#ifndef I386_CPU_DATA
#define I386_CPU_DATA
#include <mach_assert.h>
#if defined(__GNUC__)
#include <kern/assert.h>
#include <kern/kern_types.h>
#include <kern/processor.h>
#include <pexpert/pexpert.h>
struct cpu_core;
struct cpu_cons_buffer;
struct mp_desc_table;
typedef struct rtclock_timer {
uint64_t deadline;
boolean_t is_set;
boolean_t has_expired;
} rtclock_timer_t;
typedef struct {
uint64_t rnt_tsc;
uint64_t rnt_nanos;
uint32_t rnt_scale;
uint32_t rnt_shift;
uint64_t rnt_step_tsc;
uint64_t rnt_step_nanos;
} rtc_nanotime_t;
typedef struct {
struct i386_tss *cdi_ktss;
#if MACH_KDB
struct i386_tss *cdi_dbtss;
#endif
struct fake_descriptor *cdi_gdt;
struct fake_descriptor *cdi_idt;
struct fake_descriptor *cdi_ldt;
} cpu_desc_index_t;
typedef struct cpu_data
{
struct cpu_data *cpu_this;
thread_t cpu_active_thread;
thread_t cpu_active_kloaded;
vm_offset_t cpu_active_stack;
vm_offset_t cpu_kernel_stack;
vm_offset_t cpu_int_stack_top;
int cpu_preemption_level;
int cpu_simple_lock_count;
int cpu_interrupt_level;
int cpu_number;
int cpu_phys_number;
cpu_id_t cpu_id;
int cpu_signals;
int cpu_mcount_off;
ast_t cpu_pending_ast;
int cpu_type;
int cpu_subtype;
int cpu_threadtype;
int cpu_running;
struct cpu_core *cpu_core;
uint64_t cpu_rtc_tick_deadline;
uint64_t cpu_rtc_intr_deadline;
rtclock_timer_t cpu_rtc_timer;
rtc_nanotime_t cpu_rtc_nanotime;
void *cpu_console_buf;
struct processor *cpu_processor;
struct cpu_pmap *cpu_pmap;
struct mp_desc_table *cpu_desc_tablep;
cpu_desc_index_t cpu_desc_index;
boolean_t cpu_iflag;
#ifdef MACH_KDB
int cpu_db_pass_thru;
vm_offset_t cpu_db_stacks;
struct i386_saved_state *cpu_kdb_saved_state;
spl_t cpu_kdb_saved_ipl;
int cpu_kdb_is_slave;
int cpu_kdb_active;
#endif
int cpu_hibernate;
} cpu_data_t;
extern cpu_data_t *cpu_data_ptr[];
extern cpu_data_t cpu_data_master;
#define offsetof(TYPE,MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
#define CPU_DATA_GET(member,type) \
type ret; \
__asm__ volatile ("movl %%gs:%P1,%0" \
: "=r" (ret) \
: "i" (offsetof(cpu_data_t,member))); \
return ret;
static inline thread_t
get_active_thread(void)
{
CPU_DATA_GET(cpu_active_thread,thread_t)
}
#define current_thread_fast() get_active_thread()
#define current_thread() current_thread_fast()
static inline int
get_preemption_level(void)
{
CPU_DATA_GET(cpu_preemption_level,int)
}
static inline int
get_simple_lock_count(void)
{
CPU_DATA_GET(cpu_simple_lock_count,int)
}
static inline int
get_interrupt_level(void)
{
CPU_DATA_GET(cpu_interrupt_level,int)
}
static inline int
get_cpu_number(void)
{
CPU_DATA_GET(cpu_number,int)
}
static inline int
get_cpu_phys_number(void)
{
CPU_DATA_GET(cpu_phys_number,int)
}
static inline struct
cpu_core * get_cpu_core(void)
{
CPU_DATA_GET(cpu_core,struct cpu_core *)
}
static inline void
disable_preemption(void)
{
__asm__ volatile ("incl %%gs:%P0"
:
: "i" (offsetof(cpu_data_t, cpu_preemption_level)));
}
static inline void
enable_preemption(void)
{
assert(get_preemption_level() > 0);
__asm__ volatile ("decl %%gs:%P0 \n\t"
"jne 1f \n\t"
"call _kernel_preempt_check \n\t"
"1:"
:
: "i" (offsetof(cpu_data_t, cpu_preemption_level))
: "eax", "ecx", "edx", "cc", "memory");
}
static inline void
enable_preemption_no_check(void)
{
assert(get_preemption_level() > 0);
__asm__ volatile ("decl %%gs:%P0"
:
: "i" (offsetof(cpu_data_t, cpu_preemption_level))
: "cc", "memory");
}
static inline void
mp_disable_preemption(void)
{
disable_preemption();
}
static inline void
mp_enable_preemption(void)
{
enable_preemption();
}
static inline void
mp_enable_preemption_no_check(void)
{
enable_preemption_no_check();
}
static inline cpu_data_t *
current_cpu_datap(void)
{
CPU_DATA_GET(cpu_this, cpu_data_t *);
}
static inline cpu_data_t *
cpu_datap(int cpu)
{
assert(cpu_data_ptr[cpu]);
return cpu_data_ptr[cpu];
}
extern cpu_data_t *cpu_data_alloc(boolean_t is_boot_cpu);
#else
#endif
#endif