#include <kern/machine.h>
#include <kern/misc_protos.h>
#include <kern/thread.h>
#include <kern/processor.h>
#include <mach/machine.h>
#include <mach/processor_info.h>
#include <mach/mach_types.h>
#include <ppc/proc_reg.h>
#include <ppc/misc_protos.h>
#include <ppc/machine_routines.h>
#include <ppc/machine_cpu.h>
#include <ppc/exception.h>
#include <ppc/asm.h>
#include <ppc/hw_perfmon.h>
#include <pexpert/pexpert.h>
#include <kern/cpu_data.h>
#include <ppc/mappings.h>
#include <ppc/Diagnostics.h>
#include <ppc/trap.h>
int real_ncpus = 1;
int wncpu = NCPUS;
resethandler_t resethandler_target;
#define MMCR0_SUPPORT_MASK 0xf83f1fff
#define MMCR1_SUPPORT_MASK 0xffc00000
#define MMCR2_SUPPORT_MASK 0x80000000
extern int debugger_pending[NCPUS];
extern int debugger_is_slave[NCPUS];
extern int debugger_holdoff[NCPUS];
extern int debugger_sync;
struct SIGtimebase {
boolean_t avail;
boolean_t ready;
boolean_t done;
uint64_t abstime;
};
struct per_proc_info *pper_proc_info = per_proc_info;
extern struct SIGtimebase syncClkSpot;
void cpu_sync_timebase(void);
kern_return_t
cpu_control(
int slot_num,
processor_info_t info,
unsigned int count)
{
cpu_type_t cpu_type;
cpu_subtype_t cpu_subtype;
processor_pm_regs_t perf_regs;
processor_control_cmd_t cmd;
boolean_t oldlevel;
cpu_type = machine_slot[slot_num].cpu_type;
cpu_subtype = machine_slot[slot_num].cpu_subtype;
cmd = (processor_control_cmd_t) info;
if (count < PROCESSOR_CONTROL_CMD_COUNT)
return(KERN_FAILURE);
if ( cpu_type != cmd->cmd_cpu_type ||
cpu_subtype != cmd->cmd_cpu_subtype)
return(KERN_FAILURE);
if (perfmon_acquire_facility(current_task()) != KERN_SUCCESS) {
return(KERN_RESOURCE_SHORTAGE);
}
switch (cmd->cmd_op)
{
case PROCESSOR_PM_CLR_PMC:
switch (cpu_subtype)
{
case CPU_SUBTYPE_POWERPC_750:
case CPU_SUBTYPE_POWERPC_7400:
case CPU_SUBTYPE_POWERPC_7450:
{
oldlevel = ml_set_interrupts_enabled(FALSE);
mtpmc1(0x0);
mtpmc2(0x0);
mtpmc3(0x0);
mtpmc4(0x0);
ml_set_interrupts_enabled(oldlevel);
return(KERN_SUCCESS);
}
default:
return(KERN_FAILURE);
}
case PROCESSOR_PM_SET_REGS:
switch (cpu_subtype)
{
case CPU_SUBTYPE_POWERPC_750:
if (count < (PROCESSOR_CONTROL_CMD_COUNT +
PROCESSOR_PM_REGS_COUNT_POWERPC_750))
return(KERN_FAILURE);
else
{
perf_regs = (processor_pm_regs_t)cmd->cmd_pm_regs;
oldlevel = ml_set_interrupts_enabled(FALSE);
mtmmcr0(PERFMON_MMCR0(perf_regs) & MMCR0_SUPPORT_MASK);
mtpmc1(PERFMON_PMC1(perf_regs));
mtpmc2(PERFMON_PMC2(perf_regs));
mtmmcr1(PERFMON_MMCR1(perf_regs) & MMCR1_SUPPORT_MASK);
mtpmc3(PERFMON_PMC3(perf_regs));
mtpmc4(PERFMON_PMC4(perf_regs));
ml_set_interrupts_enabled(oldlevel);
return(KERN_SUCCESS);
}
case CPU_SUBTYPE_POWERPC_7400:
case CPU_SUBTYPE_POWERPC_7450:
if (count < (PROCESSOR_CONTROL_CMD_COUNT +
PROCESSOR_PM_REGS_COUNT_POWERPC_7400))
return(KERN_FAILURE);
else
{
perf_regs = (processor_pm_regs_t)cmd->cmd_pm_regs;
oldlevel = ml_set_interrupts_enabled(FALSE);
mtmmcr0(PERFMON_MMCR0(perf_regs) & MMCR0_SUPPORT_MASK);
mtpmc1(PERFMON_PMC1(perf_regs));
mtpmc2(PERFMON_PMC2(perf_regs));
mtmmcr1(PERFMON_MMCR1(perf_regs) & MMCR1_SUPPORT_MASK);
mtpmc3(PERFMON_PMC3(perf_regs));
mtpmc4(PERFMON_PMC4(perf_regs));
mtmmcr2(PERFMON_MMCR2(perf_regs) & MMCR2_SUPPORT_MASK);
ml_set_interrupts_enabled(oldlevel);
return(KERN_SUCCESS);
}
default:
return(KERN_FAILURE);
}
case PROCESSOR_PM_SET_MMCR:
switch (cpu_subtype)
{
case CPU_SUBTYPE_POWERPC_750:
if (count < (PROCESSOR_CONTROL_CMD_COUNT +
PROCESSOR_PM_REGS_COUNT_POWERPC_750))
return(KERN_FAILURE);
else
{
perf_regs = (processor_pm_regs_t)cmd->cmd_pm_regs;
oldlevel = ml_set_interrupts_enabled(FALSE);
mtmmcr0(PERFMON_MMCR0(perf_regs) & MMCR0_SUPPORT_MASK);
mtmmcr1(PERFMON_MMCR1(perf_regs) & MMCR1_SUPPORT_MASK);
ml_set_interrupts_enabled(oldlevel);
return(KERN_SUCCESS);
}
case CPU_SUBTYPE_POWERPC_7400:
case CPU_SUBTYPE_POWERPC_7450:
if (count < (PROCESSOR_CONTROL_CMD_COUNT +
PROCESSOR_PM_REGS_COUNT_POWERPC_7400))
return(KERN_FAILURE);
else
{
perf_regs = (processor_pm_regs_t)cmd->cmd_pm_regs;
oldlevel = ml_set_interrupts_enabled(FALSE);
mtmmcr0(PERFMON_MMCR0(perf_regs) & MMCR0_SUPPORT_MASK);
mtmmcr1(PERFMON_MMCR1(perf_regs) & MMCR1_SUPPORT_MASK);
mtmmcr2(PERFMON_MMCR2(perf_regs) & MMCR2_SUPPORT_MASK);
ml_set_interrupts_enabled(oldlevel);
return(KERN_SUCCESS);
}
default:
return(KERN_FAILURE);
}
default:
return(KERN_FAILURE);
}
}
kern_return_t
cpu_info_count(
processor_flavor_t flavor,
unsigned int *count)
{
cpu_subtype_t cpu_subtype;
cpu_subtype = machine_slot[0].cpu_subtype;
switch (flavor) {
case PROCESSOR_PM_REGS_INFO:
switch (cpu_subtype) {
case CPU_SUBTYPE_POWERPC_750:
*count = PROCESSOR_PM_REGS_COUNT_POWERPC_750;
return(KERN_SUCCESS);
case CPU_SUBTYPE_POWERPC_7400:
case CPU_SUBTYPE_POWERPC_7450:
*count = PROCESSOR_PM_REGS_COUNT_POWERPC_7400;
return(KERN_SUCCESS);
default:
*count = 0;
return(KERN_INVALID_ARGUMENT);
}
case PROCESSOR_TEMPERATURE:
*count = PROCESSOR_TEMPERATURE_COUNT;
return (KERN_SUCCESS);
default:
*count = 0;
return(KERN_INVALID_ARGUMENT);
}
}
kern_return_t
cpu_info(
processor_flavor_t flavor,
int slot_num,
processor_info_t info,
unsigned int *count)
{
cpu_subtype_t cpu_subtype;
processor_pm_regs_t perf_regs;
boolean_t oldlevel;
unsigned int temp[2];
cpu_subtype = machine_slot[slot_num].cpu_subtype;
switch (flavor) {
case PROCESSOR_PM_REGS_INFO:
perf_regs = (processor_pm_regs_t) info;
switch (cpu_subtype) {
case CPU_SUBTYPE_POWERPC_750:
if (*count < PROCESSOR_PM_REGS_COUNT_POWERPC_750)
return(KERN_FAILURE);
oldlevel = ml_set_interrupts_enabled(FALSE);
PERFMON_MMCR0(perf_regs) = mfmmcr0();
PERFMON_PMC1(perf_regs) = mfpmc1();
PERFMON_PMC2(perf_regs) = mfpmc2();
PERFMON_MMCR1(perf_regs) = mfmmcr1();
PERFMON_PMC3(perf_regs) = mfpmc3();
PERFMON_PMC4(perf_regs) = mfpmc4();
ml_set_interrupts_enabled(oldlevel);
*count = PROCESSOR_PM_REGS_COUNT_POWERPC_750;
return(KERN_SUCCESS);
case CPU_SUBTYPE_POWERPC_7400:
case CPU_SUBTYPE_POWERPC_7450:
if (*count < PROCESSOR_PM_REGS_COUNT_POWERPC_7400)
return(KERN_FAILURE);
oldlevel = ml_set_interrupts_enabled(FALSE);
PERFMON_MMCR0(perf_regs) = mfmmcr0();
PERFMON_PMC1(perf_regs) = mfpmc1();
PERFMON_PMC2(perf_regs) = mfpmc2();
PERFMON_MMCR1(perf_regs) = mfmmcr1();
PERFMON_PMC3(perf_regs) = mfpmc3();
PERFMON_PMC4(perf_regs) = mfpmc4();
PERFMON_MMCR2(perf_regs) = mfmmcr2();
ml_set_interrupts_enabled(oldlevel);
*count = PROCESSOR_PM_REGS_COUNT_POWERPC_7400;
return(KERN_SUCCESS);
default:
return(KERN_FAILURE);
}
case PROCESSOR_TEMPERATURE:
disable_preemption();
if(slot_num == cpu_number()) {
*info = ml_read_temp();
}
else {
temp[0] = -1;
eieio();
sync();
temp[1] = -1;
(void)cpu_signal(slot_num, SIGPcpureq, CPRQtemp ,(unsigned int)&temp);
(void)hw_cpu_sync(temp, LockTimeOut);
*info = temp[1];
}
enable_preemption();
return(KERN_SUCCESS);
default:
return(KERN_INVALID_ARGUMENT);
}
}
void
cpu_init(
void)
{
int cpu;
cpu = cpu_number();
machine_slot[cpu].running = TRUE;
machine_slot[cpu].cpu_type = CPU_TYPE_POWERPC;
machine_slot[cpu].cpu_subtype = (cpu_subtype_t)per_proc_info[cpu].pf.rptdProc;
}
void
cpu_machine_init(
void)
{
struct per_proc_info *tproc_info;
volatile struct per_proc_info *mproc_info;
int cpu;
cpu = cpu_number();
tproc_info = &per_proc_info[cpu];
mproc_info = &per_proc_info[master_cpu];
PE_cpu_machine_init(tproc_info->cpu_id, !(tproc_info->cpu_flags & BootDone));
if (cpu != master_cpu) {
while (!((mproc_info->cpu_flags) & SignalReady))
continue;
cpu_sync_timebase();
}
ml_init_interrupt();
tproc_info->cpu_flags |= BootDone|SignalReady;
}
kern_return_t
cpu_register(
int *target_cpu
)
{
int cpu;
*target_cpu = -1;
for(cpu=0; cpu < wncpu; cpu++) {
if(!machine_slot[cpu].is_cpu) {
machine_slot[cpu].is_cpu = TRUE;
*target_cpu = cpu;
break;
}
}
if (*target_cpu != -1) {
real_ncpus++;
return KERN_SUCCESS;
} else
return KERN_FAILURE;
}
kern_return_t
cpu_start(
int cpu)
{
struct per_proc_info *proc_info;
kern_return_t ret;
mapping *mp;
extern vm_offset_t intstack;
extern vm_offset_t debstack;
proc_info = &per_proc_info[cpu];
if (cpu == cpu_number()) {
PE_cpu_machine_init(proc_info->cpu_id, !(proc_info->cpu_flags & BootDone));
ml_init_interrupt();
proc_info->cpu_flags |= BootDone|SignalReady;
return KERN_SUCCESS;
} else {
extern void _start_cpu(void);
proc_info->cpu_number = cpu;
proc_info->cpu_flags &= BootDone;
proc_info->istackptr = (vm_offset_t)&intstack + (INTSTACK_SIZE*(cpu+1)) - FM_SIZE;
proc_info->intstack_top_ss = proc_info->istackptr;
#if MACH_KDP || MACH_KDB
proc_info->debstackptr = (vm_offset_t)&debstack + (KERNEL_STACK_SIZE*(cpu+1)) - FM_SIZE;
proc_info->debstack_top_ss = proc_info->debstackptr;
#endif
proc_info->interrupts_enabled = 0;
proc_info->need_ast = (unsigned int)&need_ast[cpu];
proc_info->FPU_owner = 0;
proc_info->VMX_owner = 0;
mp = (mapping *)(&proc_info->ppCIOmp);
mp->mpFlags = 0x01000000 | mpSpecial | 1;
mp->mpSpace = invalSpace;
if (proc_info->start_paddr == EXCEPTION_VECTOR(T_RESET)) {
resethandler_target.type = RESET_HANDLER_START;
resethandler_target.call_paddr = (vm_offset_t)_start_cpu;
resethandler_target.arg__paddr = (vm_offset_t)proc_info;
ml_phys_write((vm_offset_t)&ResetHandler + 0,
resethandler_target.type);
ml_phys_write((vm_offset_t)&ResetHandler + 4,
resethandler_target.call_paddr);
ml_phys_write((vm_offset_t)&ResetHandler + 8,
resethandler_target.arg__paddr);
}
ml_get_timebase((unsigned long long *)&proc_info->ruptStamp);
__asm__ volatile("sync");
__asm__ volatile("isync");
ret = PE_cpu_start(proc_info->cpu_id,
proc_info->start_paddr, (vm_offset_t)proc_info);
if (ret != KERN_SUCCESS &&
proc_info->start_paddr == EXCEPTION_VECTOR(T_RESET)) {
}
return(ret);
}
}
perfTrap perfCpuSigHook = 0;
void
cpu_signal_handler(
void)
{
unsigned int holdStat, holdParm0, holdParm1, holdParm2, mtype;
unsigned int *parmAddr;
struct per_proc_info *pproc;
int cpu;
struct SIGtimebase *timebaseAddr;
natural_t tbu, tbu2, tbl;
cpu = cpu_number();
pproc = &per_proc_info[cpu];
if(!hw_lock_mbits(&pproc->MPsigpStat, (MPsigpMsgp | MPsigpAck), (MPsigpBusy | MPsigpPass),
(MPsigpBusy | MPsigpPass | MPsigpAck), (gPEClockFrequencyInfo.timebase_frequency_hz >> 5))) {
panic("cpu_signal_handler: Lock pass timed out\n");
}
holdStat = pproc->MPsigpStat;
holdParm0 = pproc->MPsigpParm0;
holdParm1 = pproc->MPsigpParm1;
holdParm2 = pproc->MPsigpParm2;
__asm__ volatile("isync");
pproc->MPsigpStat = holdStat & ~(MPsigpMsgp | MPsigpAck | MPsigpFunc);
switch ((holdStat & MPsigpFunc) >> 8) {
case MPsigpIdle:
return;
case MPsigpSigp:
switch (holdParm0) {
case SIGPast:
pproc->hwCtr.numSIGPast++;
#if 0
kprintf("cpu_signal_handler: AST check on cpu %x\n", cpu_number());
#endif
ast_check(cpu_to_processor(cpu));
return;
case SIGPcpureq:
pproc->hwCtr.numSIGPcpureq++;
switch (holdParm1) {
case CPRQtemp:
parmAddr = (unsigned int *)holdParm2;
parmAddr[1] = ml_read_temp();
eieio();
sync();
parmAddr[0] = 0;
return;
case CPRQtimebase:
timebaseAddr = (struct SIGtimebase *)holdParm2;
if(pproc->time_base_enable != (void(*)(cpu_id_t, boolean_t ))NULL)
pproc->time_base_enable(pproc->cpu_id, FALSE);
timebaseAddr->abstime = 0;
sync();
do {
asm volatile(" mftbu %0" : "=r" (tbu));
asm volatile(" mftb %0" : "=r" (tbl));
asm volatile(" mftbu %0" : "=r" (tbu2));
} while (tbu != tbu2);
timebaseAddr->abstime = ((uint64_t)tbu << 32) | tbl;
sync();
timebaseAddr->avail = TRUE;
while (*(volatile int *)&(syncClkSpot.ready) == FALSE);
if(pproc->time_base_enable != (void(*)(cpu_id_t, boolean_t ))NULL)
pproc->time_base_enable(pproc->cpu_id, TRUE);
timebaseAddr->done = TRUE;
return;
case CPRQsegload:
return;
case CPRQchud:
parmAddr = (unsigned int *)holdParm2;
if(perfCpuSigHook) {
struct savearea *ssp = current_act()->mact.pcb;
if(ssp) {
(perfCpuSigHook)(parmAddr[1] , ssp, 0, 0);
}
}
parmAddr[1] = 0;
parmAddr[0] = 0;
return;
case CPRQscom:
if(((scomcomm *)holdParm2)->scomfunc) {
((scomcomm *)holdParm2)->scomstat = ml_scom_write(((scomcomm *)holdParm2)->scomreg, ((scomcomm *)holdParm2)->scomdata);
}
else {
((scomcomm *)holdParm2)->scomstat = ml_scom_read(((scomcomm *)holdParm2)->scomreg, &((scomcomm *)holdParm2)->scomdata);
}
return;
default:
panic("cpu_signal_handler: unknown CPU request - %08X\n", holdParm1);
return;
}
case SIGPdebug:
pproc->hwCtr.numSIGPdebug++;
debugger_is_slave[cpu]++;
hw_atomic_sub(&debugger_sync, 1);
__asm__ volatile("tw 4,r3,r3");
return;
case SIGPwake:
pproc->hwCtr.numSIGPwake++;
return;
default:
panic("cpu_signal_handler: unknown SIGP message order - %08X\n", holdParm0);
return;
}
default:
panic("cpu_signal_handler: unknown SIGP function - %08X\n", (holdStat & MPsigpFunc) >> 8);
return;
}
panic("cpu_signal_handler: we should never get here\n");
}
kern_return_t
cpu_signal(
int target,
int signal,
unsigned int p1,
unsigned int p2)
{
unsigned int holdStat, holdParm0, holdParm1, holdParm2, mtype;
struct per_proc_info *tpproc, *mpproc;
int cpu;
int busybitset =0;
#if DEBUG
if(target > NCPUS) panic("cpu_signal: invalid target CPU - %08X\n", target);
#endif
cpu = cpu_number();
if(target == cpu) return KERN_FAILURE;
if(!machine_slot[target].running) return KERN_FAILURE;
mpproc = &per_proc_info[cpu];
tpproc = &per_proc_info[target];
if (!(tpproc->cpu_flags & SignalReady)) return KERN_FAILURE;
if((tpproc->MPsigpStat & MPsigpMsgp) == MPsigpMsgp) {
if(signal == SIGPwake) {
mpproc->hwCtr.numSIGPmwake++;
return KERN_SUCCESS;
}
if((signal == SIGPast) && (tpproc->MPsigpParm0 == SIGPast)) {
mpproc->hwCtr.numSIGPmast++;
return KERN_SUCCESS;
}
if (tpproc->MPsigpParm0 == SIGPwake) {
if (hw_lock_mbits(&tpproc->MPsigpStat, (MPsigpMsgp | MPsigpAck),
(MPsigpBusy | MPsigpPass ), MPsigpBusy, 0)) {
busybitset = 1;
mpproc->hwCtr.numSIGPmwake++;
}
}
}
if((busybitset == 0) &&
(!hw_lock_mbits(&tpproc->MPsigpStat, MPsigpMsgp, 0, MPsigpBusy,
(gPEClockFrequencyInfo.timebase_frequency_hz >> 11)))) {
mpproc->hwCtr.numSIGPtimo++;
return KERN_FAILURE;
}
holdStat = MPsigpBusy | MPsigpPass | (MPsigpSigp << 8) | cpu;
tpproc->MPsigpParm0 = signal;
tpproc->MPsigpParm1 = p1;
tpproc->MPsigpParm2 = p2;
__asm__ volatile("sync");
tpproc->MPsigpStat = holdStat;
__asm__ volatile("eieio");
if (busybitset == 0)
PE_cpu_signal(mpproc->cpu_id, tpproc->cpu_id);
return KERN_SUCCESS;
}
void
cpu_doshutdown(
void)
{
enable_preemption();
processor_offline(current_processor());
}
void
cpu_sleep(
void)
{
struct per_proc_info *proc_info;
unsigned int cpu, i;
unsigned int wait_ncpus_sleep, ncpus_sleep;
facility_context *fowner;
extern vm_offset_t intstack;
extern vm_offset_t debstack;
extern void _restart_cpu(void);
cpu = cpu_number();
proc_info = &per_proc_info[cpu];
fowner = proc_info->FPU_owner;
if(fowner) fpu_save(fowner);
proc_info->FPU_owner = 0;
fowner = proc_info->VMX_owner;
if(fowner) vec_save(fowner);
proc_info->VMX_owner = 0;
if (proc_info->cpu_number == 0) {
proc_info->cpu_flags &= BootDone;
proc_info->istackptr = (vm_offset_t)&intstack + (INTSTACK_SIZE*(cpu+1)) - FM_SIZE;
proc_info->intstack_top_ss = proc_info->istackptr;
#if MACH_KDP || MACH_KDB
proc_info->debstackptr = (vm_offset_t)&debstack + (KERNEL_STACK_SIZE*(cpu+1)) - FM_SIZE;
proc_info->debstack_top_ss = proc_info->debstackptr;
#endif
proc_info->interrupts_enabled = 0;
if (proc_info->start_paddr == EXCEPTION_VECTOR(T_RESET)) {
extern void _start_cpu(void);
resethandler_target.type = RESET_HANDLER_START;
resethandler_target.call_paddr = (vm_offset_t)_start_cpu;
resethandler_target.arg__paddr = (vm_offset_t)proc_info;
ml_phys_write((vm_offset_t)&ResetHandler + 0,
resethandler_target.type);
ml_phys_write((vm_offset_t)&ResetHandler + 4,
resethandler_target.call_paddr);
ml_phys_write((vm_offset_t)&ResetHandler + 8,
resethandler_target.arg__paddr);
__asm__ volatile("sync");
__asm__ volatile("isync");
}
wait_ncpus_sleep = real_ncpus-1;
ncpus_sleep = 0;
while (wait_ncpus_sleep != ncpus_sleep) {
ncpus_sleep = 0;
for(i=1; i < real_ncpus ; i++) {
if ((*(volatile short *)&per_proc_info[i].cpu_flags) & SleepState)
ncpus_sleep++;
}
}
}
PE_cpu_machine_quiesce(proc_info->cpu_id);
}
void
cpu_sync_timebase(
void)
{
natural_t tbu, tbl;
boolean_t intr;
intr = ml_set_interrupts_enabled(FALSE);
syncClkSpot.avail = FALSE;
syncClkSpot.ready = FALSE;
syncClkSpot.done = FALSE;
while (cpu_signal(master_cpu, SIGPcpureq, CPRQtimebase,
(unsigned int)&syncClkSpot) != KERN_SUCCESS)
continue;
while (*(volatile int *)&(syncClkSpot.avail) == FALSE)
continue;
isync();
tbu = syncClkSpot.abstime >> 32;
tbl = (uint32_t)syncClkSpot.abstime;
mttb(0);
mttbu(tbu);
mttb(tbl);
syncClkSpot.ready = TRUE;
while (*(volatile int *)&(syncClkSpot.done) == FALSE)
continue;
(void)ml_set_interrupts_enabled(intr);
}