#include <mach/mach_types.h>
#include <mach/machine.h>
#include <mach/processor_info.h>
#include <kern/kalloc.h>
#include <kern/kern_types.h>
#include <kern/machine.h>
#include <kern/misc_protos.h>
#include <kern/thread.h>
#include <kern/sched_prim.h>
#include <kern/processor.h>
#include <vm/pmap.h>
#include <IOKit/IOHibernatePrivate.h>
#include <ppc/proc_reg.h>
#include <ppc/misc_protos.h>
#include <ppc/machine_routines.h>
#include <ppc/cpu_internal.h>
#include <ppc/exception.h>
#include <ppc/asm.h>
#include <ppc/hw_perfmon.h>
#include <pexpert/pexpert.h>
#include <kern/cpu_data.h>
#include <ppc/mappings.h>
#include <ppc/Diagnostics.h>
#include <ppc/trap.h>
#include <ppc/machine_cpu.h>
#include <ppc/pms.h>
#include <ppc/rtclock.h>
decl_mutex_data(static,ppt_lock);
unsigned int real_ncpus = 1;
unsigned int max_ncpus = MAX_CPUS;
decl_simple_lock_data(static,rht_lock);
static unsigned int rht_state = 0;
#define RHT_WAIT 0x01
#define RHT_BUSY 0x02
decl_simple_lock_data(static,SignalReadyLock);
struct SIGtimebase {
boolean_t avail;
boolean_t ready;
boolean_t done;
uint64_t abstime;
};
perfCallback perfCpuSigHook = 0;
extern int debugger_sync;
void cpu_sync_timebase(
void);
void cpu_timebase_signal_handler(
struct per_proc_info *proc_info,
struct SIGtimebase *timebaseAddr);
void
cpu_bootstrap(
void)
{
simple_lock_init(&rht_lock,0);
simple_lock_init(&SignalReadyLock,0);
mutex_init(&ppt_lock,0);
}
void
cpu_init(
void)
{
struct per_proc_info *proc_info;
proc_info = getPerProc();
if (proc_info->save_tbu != 0 || proc_info->save_tbl != 0) {
mttb(0);
mttbu(proc_info->save_tbu);
mttb(proc_info->save_tbl);
}
setTimerReq();
proc_info->cpu_type = CPU_TYPE_POWERPC;
proc_info->cpu_subtype = (cpu_subtype_t)proc_info->pf.rptdProc;
proc_info->cpu_threadtype = CPU_THREADTYPE_NONE;
proc_info->running = TRUE;
}
void
cpu_machine_init(
void)
{
struct per_proc_info *proc_info;
volatile struct per_proc_info *mproc_info;
proc_info = getPerProc();
mproc_info = PerProcTable[master_cpu].ppe_vaddr;
if (proc_info != mproc_info) {
simple_lock(&rht_lock);
if (rht_state & RHT_WAIT)
thread_wakeup(&rht_state);
rht_state &= ~(RHT_BUSY|RHT_WAIT);
simple_unlock(&rht_lock);
}
PE_cpu_machine_init(proc_info->cpu_id, !(proc_info->cpu_flags & BootDone));
if (proc_info->hibernate) {
uint32_t tbu, tbl;
do {
tbu = mftbu();
tbl = mftb();
} while (mftbu() != tbu);
proc_info->hibernate = 0;
hibernate_machine_init();
mttb(0);
mttbu(tbu);
mttb(tbl);
}
if (proc_info != mproc_info) {
while (!((mproc_info->cpu_flags) & SignalReady))
continue;
cpu_sync_timebase();
}
ml_init_interrupt();
if (proc_info != mproc_info)
simple_lock(&SignalReadyLock);
proc_info->cpu_flags |= BootDone|SignalReady;
if (proc_info != mproc_info) {
if (proc_info->ppXFlags & SignalReadyWait) {
hw_atomic_and(&proc_info->ppXFlags, ~SignalReadyWait);
thread_wakeup(&proc_info->cpu_flags);
}
simple_unlock(&SignalReadyLock);
pmsPark();
}
}
struct per_proc_info *
cpu_per_proc_alloc(
void)
{
struct per_proc_info *proc_info=0;
void *interrupt_stack=0;
void *debugger_stack=0;
if ((proc_info = (struct per_proc_info*)kalloc(sizeof(struct per_proc_info))) == (struct per_proc_info*)0)
return (struct per_proc_info *)NULL;
if ((interrupt_stack = kalloc(INTSTACK_SIZE)) == 0) {
kfree(proc_info, sizeof(struct per_proc_info));
return (struct per_proc_info *)NULL;
}
if ((debugger_stack = kalloc(KERNEL_STACK_SIZE)) == 0) {
kfree(proc_info, sizeof(struct per_proc_info));
kfree(interrupt_stack, INTSTACK_SIZE);
return (struct per_proc_info *)NULL;
}
bzero((void *)proc_info, sizeof(struct per_proc_info));
proc_info->pp2ndPage = (addr64_t)pmap_find_phys(kernel_pmap, (addr64_t)proc_info + 0x1000) << PAGE_SHIFT;
proc_info->next_savearea = (uint64_t)save_get_init();
proc_info->pf = BootProcInfo.pf;
proc_info->istackptr = (vm_offset_t)interrupt_stack + INTSTACK_SIZE - FM_SIZE;
proc_info->intstack_top_ss = proc_info->istackptr;
proc_info->debstackptr = (vm_offset_t)debugger_stack + KERNEL_STACK_SIZE - FM_SIZE;
proc_info->debstack_top_ss = proc_info->debstackptr;
return proc_info;
}
void
cpu_per_proc_free(
struct per_proc_info *proc_info
)
{
if (proc_info->cpu_number == master_cpu)
return;
kfree((void *)(proc_info->intstack_top_ss - INTSTACK_SIZE + FM_SIZE), INTSTACK_SIZE);
kfree((void *)(proc_info->debstack_top_ss - KERNEL_STACK_SIZE + FM_SIZE), KERNEL_STACK_SIZE);
kfree((void *)proc_info, sizeof(struct per_proc_info));
}
kern_return_t
cpu_per_proc_register(
struct per_proc_info *proc_info
)
{
int cpu;
mutex_lock(&ppt_lock);
if (real_ncpus >= max_ncpus) {
mutex_unlock(&ppt_lock);
return KERN_FAILURE;
}
cpu = real_ncpus;
proc_info->cpu_number = cpu;
PerProcTable[cpu].ppe_vaddr = proc_info;
PerProcTable[cpu].ppe_paddr = (addr64_t)pmap_find_phys(kernel_pmap, (addr64_t)proc_info) << PAGE_SHIFT;
eieio();
real_ncpus++;
mutex_unlock(&ppt_lock);
return KERN_SUCCESS;
}
kern_return_t
cpu_start(
int cpu)
{
struct per_proc_info *proc_info;
kern_return_t ret;
mapping_t *mp;
proc_info = PerProcTable[cpu].ppe_vaddr;
if (cpu == cpu_number()) {
PE_cpu_machine_init(proc_info->cpu_id, !(proc_info->cpu_flags & BootDone));
ml_init_interrupt();
proc_info->cpu_flags |= BootDone|SignalReady;
return KERN_SUCCESS;
} else {
proc_info->cpu_flags &= BootDone;
proc_info->interrupts_enabled = 0;
proc_info->pending_ast = AST_NONE;
proc_info->istackptr = proc_info->intstack_top_ss;
proc_info->rtcPop = EndOfAllTime;
proc_info->FPU_owner = 0;
proc_info->VMX_owner = 0;
proc_info->pms.pmsStamp = 0;
proc_info->pms.pmsPop = EndOfAllTime;
proc_info->pms.pmsState = pmsParked;
proc_info->pms.pmsCSetCmd = pmsCInit;
mp = (mapping_t *)(&proc_info->ppUMWmp);
mp->mpFlags = 0x01000000 | mpLinkage | mpPerm | 1;
mp->mpSpace = invalSpace;
if (proc_info->start_paddr == EXCEPTION_VECTOR(T_RESET)) {
simple_lock(&rht_lock);
while (rht_state & RHT_BUSY) {
rht_state |= RHT_WAIT;
thread_sleep_usimple_lock((event_t)&rht_state,
&rht_lock, THREAD_UNINT);
}
rht_state |= RHT_BUSY;
simple_unlock(&rht_lock);
ml_phys_write((vm_offset_t)&ResetHandler + 0,
RESET_HANDLER_START);
ml_phys_write((vm_offset_t)&ResetHandler + 4,
(vm_offset_t)_start_cpu);
ml_phys_write((vm_offset_t)&ResetHandler + 8,
(vm_offset_t)&PerProcTable[cpu]);
}
ml_get_timebase((unsigned long long *)&proc_info->ruptStamp);
__asm__ volatile("sync");
__asm__ volatile("isync");
ret = PE_cpu_start(proc_info->cpu_id,
proc_info->start_paddr, (vm_offset_t)proc_info);
if (ret != KERN_SUCCESS) {
if (proc_info->start_paddr == EXCEPTION_VECTOR(T_RESET)) {
simple_lock(&rht_lock);
if (rht_state & RHT_WAIT)
thread_wakeup(&rht_state);
rht_state &= ~(RHT_BUSY|RHT_WAIT);
simple_unlock(&rht_lock);
};
} else {
simple_lock(&SignalReadyLock);
if (!((*(volatile short *)&proc_info->cpu_flags) & SignalReady)) {
hw_atomic_or(&proc_info->ppXFlags, SignalReadyWait);
thread_sleep_simple_lock((event_t)&proc_info->cpu_flags,
&SignalReadyLock, THREAD_UNINT);
}
simple_unlock(&SignalReadyLock);
}
return(ret);
}
}
void
cpu_exit_wait(
int cpu)
{
struct per_proc_info *tpproc;
if ( cpu != master_cpu) {
tpproc = PerProcTable[cpu].ppe_vaddr;
while (!((*(volatile short *)&tpproc->cpu_flags) & SleepState)) {};
}
}
void
cpu_doshutdown(
void)
{
enable_preemption();
processor_offline(current_processor());
}
void
cpu_sleep(
void)
{
struct per_proc_info *proc_info;
unsigned int i;
unsigned int wait_ncpus_sleep, ncpus_sleep;
facility_context *fowner;
proc_info = getPerProc();
proc_info->running = FALSE;
fowner = proc_info->FPU_owner;
if(fowner) fpu_save(fowner);
proc_info->FPU_owner = 0;
fowner = proc_info->VMX_owner;
if(fowner) vec_save(fowner);
proc_info->VMX_owner = 0;
if (proc_info->cpu_number == master_cpu) {
proc_info->cpu_flags &= BootDone;
proc_info->interrupts_enabled = 0;
proc_info->pending_ast = AST_NONE;
if (proc_info->start_paddr == EXCEPTION_VECTOR(T_RESET)) {
ml_phys_write((vm_offset_t)&ResetHandler + 0,
RESET_HANDLER_START);
ml_phys_write((vm_offset_t)&ResetHandler + 4,
(vm_offset_t)_start_cpu);
ml_phys_write((vm_offset_t)&ResetHandler + 8,
(vm_offset_t)&PerProcTable[master_cpu]);
__asm__ volatile("sync");
__asm__ volatile("isync");
}
wait_ncpus_sleep = real_ncpus-1;
ncpus_sleep = 0;
while (wait_ncpus_sleep != ncpus_sleep) {
ncpus_sleep = 0;
for(i=1; i < real_ncpus ; i++) {
if ((*(volatile short *)&(PerProcTable[i].ppe_vaddr->cpu_flags)) & SleepState)
ncpus_sleep++;
}
}
}
do {
proc_info->save_tbu = mftbu();
proc_info->save_tbl = mftb();
} while (mftbu() != proc_info->save_tbu);
PE_cpu_machine_quiesce(proc_info->cpu_id);
}
kern_return_t
cpu_signal(
int target,
int signal,
unsigned int p1,
unsigned int p2)
{
unsigned int holdStat;
struct per_proc_info *tpproc, *mpproc;
int busybitset=0;
#if DEBUG
if(((unsigned int)target) >= MAX_CPUS) panic("cpu_signal: invalid target CPU - %08X\n", target);
#endif
mpproc = getPerProc();
tpproc = PerProcTable[target].ppe_vaddr;
if(mpproc == tpproc) return KERN_FAILURE;
if(!tpproc->running) return KERN_FAILURE;
if (!(tpproc->cpu_flags & SignalReady)) return KERN_FAILURE;
if((tpproc->MPsigpStat & MPsigpMsgp) == MPsigpMsgp) {
if(signal == SIGPwake) {
mpproc->hwCtr.numSIGPmwake++;
return KERN_SUCCESS;
}
if((signal == SIGPast) && (tpproc->MPsigpParm0 == SIGPast)) {
mpproc->hwCtr.numSIGPmast++;
return KERN_SUCCESS;
}
if (tpproc->MPsigpParm0 == SIGPwake) {
if (hw_lock_mbits(&tpproc->MPsigpStat, (MPsigpMsgp | MPsigpAck),
(MPsigpBusy | MPsigpPass ), MPsigpBusy, 0)) {
busybitset = 1;
mpproc->hwCtr.numSIGPmwake++;
}
}
}
if((busybitset == 0) &&
(!hw_lock_mbits(&tpproc->MPsigpStat, MPsigpMsgp, 0, MPsigpBusy,
(gPEClockFrequencyInfo.timebase_frequency_hz >> 11)))) {
mpproc->hwCtr.numSIGPtimo++;
return KERN_FAILURE;
}
holdStat = MPsigpBusy | MPsigpPass | (MPsigpSigp << 8) | mpproc->cpu_number;
tpproc->MPsigpParm0 = signal;
tpproc->MPsigpParm1 = p1;
tpproc->MPsigpParm2 = p2;
__asm__ volatile("sync");
tpproc->MPsigpStat = holdStat;
__asm__ volatile("eieio");
if (busybitset == 0)
PE_cpu_signal(mpproc->cpu_id, tpproc->cpu_id);
return KERN_SUCCESS;
}
void
cpu_signal_handler(
void)
{
unsigned int holdStat, holdParm0, holdParm1, holdParm2, mtype;
unsigned int *parmAddr;
struct per_proc_info *proc_info;
int cpu;
broadcastFunc xfunc;
cpu = cpu_number();
proc_info = getPerProc();
if(!hw_lock_mbits(&proc_info->MPsigpStat, (MPsigpMsgp | MPsigpAck), (MPsigpBusy | MPsigpPass),
(MPsigpBusy | MPsigpPass | MPsigpAck), (gPEClockFrequencyInfo.timebase_frequency_hz >> 5))) {
panic("cpu_signal_handler: Lock pass timed out\n");
}
holdStat = proc_info->MPsigpStat;
holdParm0 = proc_info->MPsigpParm0;
holdParm1 = proc_info->MPsigpParm1;
holdParm2 = proc_info->MPsigpParm2;
__asm__ volatile("isync");
proc_info->MPsigpStat = holdStat & ~(MPsigpMsgp | MPsigpAck | MPsigpFunc);
switch ((holdStat & MPsigpFunc) >> 8) {
case MPsigpIdle:
return;
case MPsigpSigp:
switch (holdParm0) {
case SIGPast:
proc_info->hwCtr.numSIGPast++;
#if 0
kprintf("cpu_signal_handler: AST check on cpu %x\n", cpu_number());
#endif
ast_check((processor_t)proc_info->processor);
return;
case SIGPcpureq:
proc_info->hwCtr.numSIGPcpureq++;
switch (holdParm1) {
case CPRQtimebase:
cpu_timebase_signal_handler(proc_info, (struct SIGtimebase *)holdParm2);
return;
case CPRQsegload:
return;
case CPRQchud:
parmAddr = (unsigned int *)holdParm2;
if(perfCpuSigHook) {
struct savearea *ssp = current_thread()->machine.pcb;
if(ssp) {
(perfCpuSigHook)(parmAddr[1] , ssp, 0, 0);
}
}
parmAddr[1] = 0;
parmAddr[0] = 0;
return;
case CPRQscom:
if(((scomcomm *)holdParm2)->scomfunc) {
((scomcomm *)holdParm2)->scomstat = ml_scom_write(((scomcomm *)holdParm2)->scomreg, ((scomcomm *)holdParm2)->scomdata);
}
else {
((scomcomm *)holdParm2)->scomstat = ml_scom_read(((scomcomm *)holdParm2)->scomreg, &((scomcomm *)holdParm2)->scomdata);
}
return;
case CPRQsps:
{
ml_set_processor_speed_slave(holdParm2);
return;
}
default:
panic("cpu_signal_handler: unknown CPU request - %08X\n", holdParm1);
return;
}
case SIGPdebug:
proc_info->hwCtr.numSIGPdebug++;
proc_info->debugger_is_slave++;
hw_atomic_sub(&debugger_sync, 1);
__asm__ volatile("tw 4,r3,r3");
return;
case SIGPwake:
proc_info->hwCtr.numSIGPwake++;
return;
case SIGPcall:
proc_info->hwCtr.numSIGPcall++;
xfunc = holdParm1;
xfunc(holdParm2);
return;
default:
panic("cpu_signal_handler: unknown SIGP message order - %08X\n", holdParm0);
return;
}
default:
panic("cpu_signal_handler: unknown SIGP function - %08X\n", (holdStat & MPsigpFunc) >> 8);
return;
}
panic("cpu_signal_handler: we should never get here\n");
}
void
cpu_sync_timebase(
void)
{
natural_t tbu, tbl;
boolean_t intr;
struct SIGtimebase syncClkSpot;
intr = ml_set_interrupts_enabled(FALSE);
syncClkSpot.avail = FALSE;
syncClkSpot.ready = FALSE;
syncClkSpot.done = FALSE;
while (cpu_signal(master_cpu, SIGPcpureq, CPRQtimebase,
(unsigned int)&syncClkSpot) != KERN_SUCCESS)
continue;
while (*(volatile int *)&(syncClkSpot.avail) == FALSE)
continue;
isync();
tbu = syncClkSpot.abstime >> 32;
tbl = (uint32_t)syncClkSpot.abstime;
mttb(0);
mttbu(tbu);
mttb(tbl);
syncClkSpot.ready = TRUE;
while (*(volatile int *)&(syncClkSpot.done) == FALSE)
continue;
setTimerReq();
(void)ml_set_interrupts_enabled(intr);
}
void
cpu_timebase_signal_handler(
struct per_proc_info *proc_info,
struct SIGtimebase *timebaseAddr)
{
unsigned int tbu, tbu2, tbl;
if(proc_info->time_base_enable != (void(*)(cpu_id_t, boolean_t ))NULL)
proc_info->time_base_enable(proc_info->cpu_id, FALSE);
timebaseAddr->abstime = 0;
sync();
do {
asm volatile(" mftbu %0" : "=r" (tbu));
asm volatile(" mftb %0" : "=r" (tbl));
asm volatile(" mftbu %0" : "=r" (tbu2));
} while (tbu != tbu2);
timebaseAddr->abstime = ((uint64_t)tbu << 32) | tbl;
sync();
timebaseAddr->avail = TRUE;
while (*(volatile int *)&(timebaseAddr->ready) == FALSE);
if(proc_info->time_base_enable != (void(*)(cpu_id_t, boolean_t ))NULL)
proc_info->time_base_enable(proc_info->cpu_id, TRUE);
timebaseAddr->done = TRUE;
}
kern_return_t
cpu_control(
int slot_num,
processor_info_t info,
unsigned int count)
{
struct per_proc_info *proc_info;
cpu_type_t tcpu_type;
cpu_subtype_t tcpu_subtype;
processor_pm_regs_t perf_regs;
processor_control_cmd_t cmd;
boolean_t oldlevel;
#define MMCR0_SUPPORT_MASK 0xf83f1fff
#define MMCR1_SUPPORT_MASK 0xffc00000
#define MMCR2_SUPPORT_MASK 0x80000000
proc_info = PerProcTable[slot_num].ppe_vaddr;
tcpu_type = proc_info->cpu_type;
tcpu_subtype = proc_info->cpu_subtype;
cmd = (processor_control_cmd_t) info;
if (count < PROCESSOR_CONTROL_CMD_COUNT)
return(KERN_FAILURE);
if ( tcpu_type != cmd->cmd_cpu_type ||
tcpu_subtype != cmd->cmd_cpu_subtype)
return(KERN_FAILURE);
if (perfmon_acquire_facility(current_task()) != KERN_SUCCESS) {
return(KERN_RESOURCE_SHORTAGE);
}
switch (cmd->cmd_op)
{
case PROCESSOR_PM_CLR_PMC:
switch (tcpu_subtype)
{
case CPU_SUBTYPE_POWERPC_750:
case CPU_SUBTYPE_POWERPC_7400:
case CPU_SUBTYPE_POWERPC_7450:
{
oldlevel = ml_set_interrupts_enabled(FALSE);
mtpmc1(0x0);
mtpmc2(0x0);
mtpmc3(0x0);
mtpmc4(0x0);
ml_set_interrupts_enabled(oldlevel);
return(KERN_SUCCESS);
}
default:
return(KERN_FAILURE);
}
case PROCESSOR_PM_SET_REGS:
switch (tcpu_subtype)
{
case CPU_SUBTYPE_POWERPC_750:
if (count < (PROCESSOR_CONTROL_CMD_COUNT +
PROCESSOR_PM_REGS_COUNT_POWERPC_750))
return(KERN_FAILURE);
else
{
perf_regs = (processor_pm_regs_t)cmd->cmd_pm_regs;
oldlevel = ml_set_interrupts_enabled(FALSE);
mtmmcr0(PERFMON_MMCR0(perf_regs) & MMCR0_SUPPORT_MASK);
mtpmc1(PERFMON_PMC1(perf_regs));
mtpmc2(PERFMON_PMC2(perf_regs));
mtmmcr1(PERFMON_MMCR1(perf_regs) & MMCR1_SUPPORT_MASK);
mtpmc3(PERFMON_PMC3(perf_regs));
mtpmc4(PERFMON_PMC4(perf_regs));
ml_set_interrupts_enabled(oldlevel);
return(KERN_SUCCESS);
}
case CPU_SUBTYPE_POWERPC_7400:
case CPU_SUBTYPE_POWERPC_7450:
if (count < (PROCESSOR_CONTROL_CMD_COUNT +
PROCESSOR_PM_REGS_COUNT_POWERPC_7400))
return(KERN_FAILURE);
else
{
perf_regs = (processor_pm_regs_t)cmd->cmd_pm_regs;
oldlevel = ml_set_interrupts_enabled(FALSE);
mtmmcr0(PERFMON_MMCR0(perf_regs) & MMCR0_SUPPORT_MASK);
mtpmc1(PERFMON_PMC1(perf_regs));
mtpmc2(PERFMON_PMC2(perf_regs));
mtmmcr1(PERFMON_MMCR1(perf_regs) & MMCR1_SUPPORT_MASK);
mtpmc3(PERFMON_PMC3(perf_regs));
mtpmc4(PERFMON_PMC4(perf_regs));
mtmmcr2(PERFMON_MMCR2(perf_regs) & MMCR2_SUPPORT_MASK);
ml_set_interrupts_enabled(oldlevel);
return(KERN_SUCCESS);
}
default:
return(KERN_FAILURE);
}
case PROCESSOR_PM_SET_MMCR:
switch (tcpu_subtype)
{
case CPU_SUBTYPE_POWERPC_750:
if (count < (PROCESSOR_CONTROL_CMD_COUNT +
PROCESSOR_PM_REGS_COUNT_POWERPC_750))
return(KERN_FAILURE);
else
{
perf_regs = (processor_pm_regs_t)cmd->cmd_pm_regs;
oldlevel = ml_set_interrupts_enabled(FALSE);
mtmmcr0(PERFMON_MMCR0(perf_regs) & MMCR0_SUPPORT_MASK);
mtmmcr1(PERFMON_MMCR1(perf_regs) & MMCR1_SUPPORT_MASK);
ml_set_interrupts_enabled(oldlevel);
return(KERN_SUCCESS);
}
case CPU_SUBTYPE_POWERPC_7400:
case CPU_SUBTYPE_POWERPC_7450:
if (count < (PROCESSOR_CONTROL_CMD_COUNT +
PROCESSOR_PM_REGS_COUNT_POWERPC_7400))
return(KERN_FAILURE);
else
{
perf_regs = (processor_pm_regs_t)cmd->cmd_pm_regs;
oldlevel = ml_set_interrupts_enabled(FALSE);
mtmmcr0(PERFMON_MMCR0(perf_regs) & MMCR0_SUPPORT_MASK);
mtmmcr1(PERFMON_MMCR1(perf_regs) & MMCR1_SUPPORT_MASK);
mtmmcr2(PERFMON_MMCR2(perf_regs) & MMCR2_SUPPORT_MASK);
ml_set_interrupts_enabled(oldlevel);
return(KERN_SUCCESS);
}
default:
return(KERN_FAILURE);
}
default:
return(KERN_FAILURE);
}
}
kern_return_t
cpu_info_count(
processor_flavor_t flavor,
unsigned int *count)
{
cpu_subtype_t tcpu_subtype;
tcpu_subtype = PerProcTable[master_cpu].ppe_vaddr->cpu_subtype;
switch (flavor) {
case PROCESSOR_PM_REGS_INFO:
switch (tcpu_subtype) {
case CPU_SUBTYPE_POWERPC_750:
*count = PROCESSOR_PM_REGS_COUNT_POWERPC_750;
return(KERN_SUCCESS);
case CPU_SUBTYPE_POWERPC_7400:
case CPU_SUBTYPE_POWERPC_7450:
*count = PROCESSOR_PM_REGS_COUNT_POWERPC_7400;
return(KERN_SUCCESS);
default:
*count = 0;
return(KERN_INVALID_ARGUMENT);
}
case PROCESSOR_TEMPERATURE:
*count = PROCESSOR_TEMPERATURE_COUNT;
return (KERN_SUCCESS);
default:
*count = 0;
return(KERN_INVALID_ARGUMENT);
}
}
kern_return_t
cpu_info(
processor_flavor_t flavor,
int slot_num,
processor_info_t info,
unsigned int *count)
{
cpu_subtype_t tcpu_subtype;
processor_pm_regs_t perf_regs;
boolean_t oldlevel;
tcpu_subtype = PerProcTable[slot_num].ppe_vaddr->cpu_subtype;
switch (flavor) {
case PROCESSOR_PM_REGS_INFO:
perf_regs = (processor_pm_regs_t) info;
switch (tcpu_subtype) {
case CPU_SUBTYPE_POWERPC_750:
if (*count < PROCESSOR_PM_REGS_COUNT_POWERPC_750)
return(KERN_FAILURE);
oldlevel = ml_set_interrupts_enabled(FALSE);
PERFMON_MMCR0(perf_regs) = mfmmcr0();
PERFMON_PMC1(perf_regs) = mfpmc1();
PERFMON_PMC2(perf_regs) = mfpmc2();
PERFMON_MMCR1(perf_regs) = mfmmcr1();
PERFMON_PMC3(perf_regs) = mfpmc3();
PERFMON_PMC4(perf_regs) = mfpmc4();
ml_set_interrupts_enabled(oldlevel);
*count = PROCESSOR_PM_REGS_COUNT_POWERPC_750;
return(KERN_SUCCESS);
case CPU_SUBTYPE_POWERPC_7400:
case CPU_SUBTYPE_POWERPC_7450:
if (*count < PROCESSOR_PM_REGS_COUNT_POWERPC_7400)
return(KERN_FAILURE);
oldlevel = ml_set_interrupts_enabled(FALSE);
PERFMON_MMCR0(perf_regs) = mfmmcr0();
PERFMON_PMC1(perf_regs) = mfpmc1();
PERFMON_PMC2(perf_regs) = mfpmc2();
PERFMON_MMCR1(perf_regs) = mfmmcr1();
PERFMON_PMC3(perf_regs) = mfpmc3();
PERFMON_PMC4(perf_regs) = mfpmc4();
PERFMON_MMCR2(perf_regs) = mfmmcr2();
ml_set_interrupts_enabled(oldlevel);
*count = PROCESSOR_PM_REGS_COUNT_POWERPC_7400;
return(KERN_SUCCESS);
default:
return(KERN_FAILURE);
}
case PROCESSOR_TEMPERATURE:
*info = -1;
return(KERN_FAILURE);
default:
return(KERN_INVALID_ARGUMENT);
}
}
processor_t
cpu_to_processor(
int cpu)
{
return ((processor_t)PerProcTable[cpu].ppe_vaddr->processor);
}
cpu_type_t
slot_type(
int slot_num)
{
return (PerProcTable[slot_num].ppe_vaddr->cpu_type);
}
cpu_subtype_t
slot_subtype(
int slot_num)
{
return (PerProcTable[slot_num].ppe_vaddr->cpu_subtype);
}
cpu_threadtype_t
slot_threadtype(
int slot_num)
{
return (PerProcTable[slot_num].ppe_vaddr->cpu_threadtype);
}
cpu_type_t
cpu_type(void)
{
return (getPerProc()->cpu_type);
}
cpu_subtype_t
cpu_subtype(void)
{
return (getPerProc()->cpu_subtype);
}
cpu_threadtype_t
cpu_threadtype(void)
{
return (getPerProc()->cpu_threadtype);
}
int32_t cpu_broadcast(uint32_t *synch, broadcastFunc func, uint32_t parm) {
int sigproc, cpu, ocpu;
cpu = cpu_number();
sigproc = 0;
if(real_ncpus > 1) {
assert_wait((event_t)synch, THREAD_UNINT);
for(ocpu = 0; ocpu < real_ncpus; ocpu++) {
if(ocpu == cpu) continue;
hw_atomic_add(synch, 1);
sigproc++;
if(KERN_SUCCESS != cpu_signal(ocpu, SIGPcall, (uint32_t)func, parm)) {
hw_atomic_sub(synch, 1);
sigproc--;
}
}
if(!sigproc) clear_wait(current_thread(), THREAD_AWAKENED);
else thread_block(THREAD_CONTINUE_NULL);
}
return sigproc;
}