#include <cputypes.h>
#include <cpus.h>
#include <platforms.h>
#include <task_swapper.h>
#include <kern/ast.h>
#include <kern/counters.h>
#include <kern/cpu_number.h>
#include <kern/misc_protos.h>
#include <kern/queue.h>
#include <kern/sched.h>
#include <kern/sched_prim.h>
#include <kern/thread.h>
#include <kern/thread_act.h>
#include <kern/thread_swap.h>
#include <kern/processor.h>
#include <kern/spl.h>
#include <mach/policy.h>
#if TASK_SWAPPER
#include <kern/task_swap.h>
#endif
volatile ast_t need_ast[NCPUS];
void
ast_init(void)
{
#ifndef MACHINE_AST
register int i;
for (i=0; i<NCPUS; i++) {
need_ast[i] = AST_NONE;
}
#endif
}
void
ast_taken(
boolean_t preemption,
ast_t mask,
boolean_t interrupt
)
{
register thread_t self = current_thread();
register processor_t mypr;
register ast_t reasons;
register int mycpu;
thread_act_t act = self->top_act;
#ifdef MACH_BSD
extern void bsd_ast(thread_act_t);
extern void bsdinit_task(void);
#endif
mp_disable_preemption();
mycpu = cpu_number();
reasons = need_ast[mycpu] & mask;
need_ast[mycpu] &= ~reasons;
mp_enable_preemption();
ml_set_interrupts_enabled(interrupt);
if (self->state & TH_IDLE)
return;
if ((reasons & AST_URGENT) && (wait_queue_assert_possible(self))) {
reasons &= ~AST_URGENT;
if ((reasons & (AST_BLOCK|AST_QUANTUM)) == 0) {
mp_disable_preemption();
mypr = current_processor();
if (csw_needed(self, mypr)) {
reasons |= (mypr->first_quantum ? AST_BLOCK : AST_QUANTUM);
}
mp_enable_preemption();
}
if (reasons & (AST_BLOCK | AST_QUANTUM)) {
counter(c_ast_taken_block++);
thread_block_reason((void (*)(void))0,
(reasons & (AST_BLOCK | AST_QUANTUM)));
}
if (reasons == 0)
return;
}
#ifdef MACH_BSD
if (reasons & AST_BSD) {
thread_ast_clear(act,AST_BSD);
bsd_ast(act);
}
if (reasons & AST_BSD_INIT) {
thread_ast_clear(act,AST_BSD_INIT);
bsdinit_task();
}
#endif
#if TASK_SWAPPER
if (reasons & AST_SWAPOUT) {
spl_t s;
swapout_ast();
s = splsched();
mp_disable_preemption();
mycpu = cpu_number();
if (need_ast[mycpu] & AST_APC) {
reasons |= AST_APC;
need_ast[mycpu] &= ~AST_APC;
}
mp_enable_preemption();
splx(s);
}
#endif
if (reasons & AST_APC) {
act_execute_returnhandlers();
}
reasons &= (AST_BLOCK | AST_QUANTUM);
if (reasons == 0) {
mp_disable_preemption();
mypr = current_processor();
if (csw_needed(self, mypr)) {
reasons = (mypr->first_quantum ? AST_BLOCK : AST_QUANTUM);
}
mp_enable_preemption();
}
if ((reasons & (AST_BLOCK | AST_QUANTUM)) &&
(wait_queue_assert_possible(self))) {
counter(c_ast_taken_block++);
thread_block_reason((void (*)(void))0, reasons);
}
}
void
ast_check(void)
{
register int mycpu;
register processor_t myprocessor;
register thread_t thread = current_thread();
spl_t s = splsched();
mp_disable_preemption();
mycpu = cpu_number();
myprocessor = cpu_to_processor(mycpu);
switch(myprocessor->state) {
case PROCESSOR_OFF_LINE:
case PROCESSOR_IDLE:
case PROCESSOR_DISPATCHING:
break;
#if NCPUS > 1
case PROCESSOR_ASSIGN:
ast_on(AST_BLOCK);
break;
#endif
case PROCESSOR_RUNNING:
case PROCESSOR_SHUTDOWN:
ast_propagate(current_act()->ast);
if (ast_needed(mycpu))
break;
if (csw_needed(thread, myprocessor)) {
ast_on((myprocessor->first_quantum ?
AST_BLOCK : AST_QUANTUM));
}
break;
default:
panic("ast_check: Bad processor state");
}
mp_enable_preemption();
splx(s);
}
#undef ast_on
#undef ast_off
void
ast_on(ast_t reason)
{
boolean_t intr;
intr = ml_set_interrupts_enabled(FALSE);
ast_on_fast(reason);
(void *)ml_set_interrupts_enabled(intr);
}
void
ast_off(ast_t reason)
{
ast_off_fast(reason);
}