#include <mach/mach_types.h>
#include <mach/mach_host.h>
#include <kern/host.h>
#include <kern/processor.h>
#include <i386/cpu_data.h>
#include <i386/machine_routines.h>
#include <i386/lapic.h>
#include <i386/mp.h>
#include <i386/trap.h>
#include <mach/i386/syscall_sw.h>
#include <chud/chud_xnu.h>
#if 0
#pragma mark **** cpu enable/disable ****
#endif
extern kern_return_t processor_start(processor_t processor); extern kern_return_t processor_exit(processor_t processor);
__private_extern__
kern_return_t chudxnu_enable_cpu(int cpu, boolean_t enable)
{
chudxnu_unbind_thread(current_thread(), 0);
if(cpu < 0 || (unsigned int)cpu >= real_ncpus) return KERN_FAILURE;
if((cpu_data_ptr[cpu] != NULL) && cpu != master_cpu) {
processor_t processor = cpu_to_processor(cpu);
if(processor == master_processor) return KERN_FAILURE;
if(enable) {
return processor_start(processor);
} else {
return processor_exit(processor);
}
}
return KERN_FAILURE;
}
#if 0
#pragma mark **** perfmon facility ****
#endif
__private_extern__ kern_return_t
chudxnu_perfmon_acquire_facility(task_t task __unused)
{
return KERN_SUCCESS;
}
__private_extern__ kern_return_t
chudxnu_perfmon_release_facility(task_t task __unused)
{
return KERN_SUCCESS;
}
#if 0
#pragma mark **** interrupt counters ****
#endif
__private_extern__ kern_return_t
chudxnu_get_cpu_interrupt_counters(int cpu, interrupt_counters_t *rupts)
{
if(cpu < 0 || (unsigned int)cpu >= real_ncpus) { return KERN_FAILURE;
}
if(rupts) {
boolean_t oldlevel = ml_set_interrupts_enabled(FALSE);
cpu_data_t *per_proc;
per_proc = cpu_data_ptr[cpu];
rupts->hwResets = per_proc->cpu_hwIntCnt[T_NMI];
rupts->hwMachineChecks = per_proc->cpu_hwIntCnt[T_MACHINE_CHECK];
rupts->hwDSIs = 0;
rupts->hwISIs = 0;
rupts->hwExternals = 0;
rupts->hwAlignments = 0; rupts->hwPrograms = 0;
rupts->hwFloatPointUnavailable = per_proc->cpu_hwIntCnt[T_NO_FPU];
rupts->hwDecrementers = per_proc->cpu_hwIntCnt[LAPIC_VECTOR(TIMER)];
rupts->hwIOErrors = per_proc->cpu_hwIntCnt[LAPIC_VECTOR(ERROR)];
rupts->hwSystemCalls = per_proc->cpu_hwIntCnt[UNIX_INT] +
per_proc->cpu_hwIntCnt[MACH_INT] +
per_proc->cpu_hwIntCnt[MACHDEP_INT] +
per_proc->cpu_hwIntCnt[DIAG_INT];
rupts->hwTraces = per_proc->cpu_hwIntCnt[T_DEBUG]; rupts->hwFloatingPointAssists = 0;
rupts->hwPerformanceMonitors =
per_proc->cpu_hwIntCnt[LAPIC_VECTOR(PERFCNT)];
rupts->hwAltivecs = 0;
rupts->hwInstBreakpoints = per_proc->cpu_hwIntCnt[T_INT3];
rupts->hwSystemManagements = 0;
rupts->hwAltivecAssists = 0;
rupts->hwThermal = per_proc->cpu_hwIntCnt[LAPIC_VECTOR(THERMAL)];
rupts->hwSoftPatches = 0;
rupts->hwMaintenances = 0;
rupts->hwInstrumentations = per_proc->cpu_hwIntCnt[T_WATCHPOINT];
ml_set_interrupts_enabled(oldlevel);
return KERN_SUCCESS;
} else {
return KERN_FAILURE;
}
}
__private_extern__ kern_return_t
chudxnu_clear_cpu_interrupt_counters(int cpu)
{
if(cpu < 0 || (unsigned int)cpu >= real_ncpus) { return KERN_FAILURE;
}
cpu_data_t *per_proc;
per_proc = cpu_data_ptr[cpu];
bzero((char *)per_proc->cpu_hwIntCnt, sizeof(uint32_t)*256);
return KERN_SUCCESS;
}