#include <kern/ast.h>
#include <kern/counters.h>
#include <kern/cpu_number.h>
#include <kern/misc_protos.h>
#include <kern/queue.h>
#include <kern/sched_prim.h>
#include <kern/thread.h>
#include <kern/processor.h>
#include <kern/spl.h>
#include <kern/sfi.h>
#if CONFIG_TELEMETRY
#include <kern/telemetry.h>
#endif
#include <kern/waitq.h>
#include <kern/ledger.h>
#include <mach/policy.h>
#include <machine/trap.h> // for CHUD AST hook
#include <machine/pal_routines.h>
#include <security/mac_mach_internal.h> // for MACF AST hook
volatile perfASTCallback perfASTHook;
void
ast_init(void)
{
}
extern void chudxnu_thread_ast(thread_t);
void
ast_taken(
ast_t reasons,
boolean_t enable
)
{
boolean_t preempt_trap = (reasons == AST_PREEMPTION);
ast_t *myast = ast_pending();
thread_t thread = current_thread();
perfASTCallback perf_hook = perfASTHook;
if (perf_hook) {
if (*myast & AST_CHUD_ALL) {
(*perf_hook)(reasons, myast);
if (*myast == AST_NONE)
return;
}
}
else
*myast &= ~AST_CHUD_ALL;
reasons &= *myast;
*myast &= ~reasons;
if (!(thread->state & TH_IDLE)) {
if ( (reasons & AST_URGENT) &&
waitq_wait_possible(thread) ) {
if (reasons & AST_PREEMPT) {
counter(c_ast_taken_block++);
thread_block_reason(THREAD_CONTINUE_NULL, NULL,
reasons & AST_PREEMPTION);
}
reasons &= ~AST_PREEMPTION;
}
if (!preempt_trap) {
ml_set_interrupts_enabled(enable);
#ifdef MACH_BSD
if (reasons & AST_BSD) {
thread_ast_clear(thread, AST_BSD);
bsd_ast(thread);
}
#endif
#if CONFIG_MACF
if (reasons & AST_MACF) {
thread_ast_clear(thread, AST_MACF);
mac_thread_userret(thread);
}
#endif
if (reasons & AST_APC) {
thread_ast_clear(thread, AST_APC);
special_handler(thread);
}
if (reasons & AST_GUARD) {
thread_ast_clear(thread, AST_GUARD);
guard_ast(thread);
}
if (reasons & AST_LEDGER) {
thread_ast_clear(thread, AST_LEDGER);
ledger_ast(thread);
}
if (reasons & AST_KPERF) {
thread_ast_clear(thread, AST_KPERF);
chudxnu_thread_ast(thread);
}
#if CONFIG_TELEMETRY
if (reasons & AST_TELEMETRY_ALL) {
boolean_t interrupted_userspace = FALSE;
boolean_t is_windowed = FALSE;
assert((reasons & AST_TELEMETRY_ALL) != AST_TELEMETRY_ALL);
interrupted_userspace = (reasons & AST_TELEMETRY_USER) ? TRUE : FALSE;
is_windowed = ((reasons & AST_TELEMETRY_WINDOWED) ? TRUE : FALSE);
thread_ast_clear(thread, AST_TELEMETRY_ALL);
telemetry_ast(thread, interrupted_userspace, is_windowed);
}
#endif
ml_set_interrupts_enabled(FALSE);
#if CONFIG_SCHED_SFI
if (reasons & AST_SFI) {
sfi_ast(thread);
}
#endif
thread_lock(thread);
if (reasons & AST_PREEMPT)
reasons = csw_check(current_processor(), reasons & AST_QUANTUM);
thread_unlock(thread);
assert(waitq_wait_possible(thread));
if (reasons & AST_PREEMPT) {
counter(c_ast_taken_block++);
thread_block_reason((thread_continue_t)thread_exception_return, NULL, reasons & AST_PREEMPTION);
}
}
}
ml_set_interrupts_enabled(enable);
}
void
ast_check(
processor_t processor)
{
thread_t thread = processor->active_thread;
if (processor->state == PROCESSOR_RUNNING ||
processor->state == PROCESSOR_SHUTDOWN) {
ast_t preempt;
pal_ast_check(thread);
ast_propagate(thread->ast);
thread_lock(thread);
processor->current_pri = thread->sched_pri;
processor->current_thmode = thread->sched_mode;
processor->current_sfi_class = thread->sfi_class = sfi_thread_classify(thread);
if ((preempt = csw_check(processor, AST_NONE)) != AST_NONE)
ast_on(preempt);
thread_unlock(thread);
}
}
void
ast_on(ast_t reasons)
{
ast_t *pending_ast = ast_pending();
*pending_ast |= reasons;
}
void
ast_off(ast_t reasons)
{
ast_t *pending_ast = ast_pending();
*pending_ast &= ~reasons;
}
void
ast_context(thread_t thread)
{
ast_t *pending_ast = ast_pending();
*pending_ast = ((*pending_ast & ~AST_PER_THREAD) | thread->ast);
}