#include <cpus.h>
#include <fast_idle.h>
#include <mach_kdb.h>
#include <mach_kgdb.h>
#include <mach_kdp.h>
#include <mach_ldebug.h>
#include <types.h>
#include <i386/eflags.h>
#include <i386/trap.h>
#include <i386/pmap.h>
#include <i386/fpu.h>
#include <mach/exception.h>
#include <mach/kern_return.h>
#include <mach/vm_param.h>
#include <mach/i386/thread_status.h>
#include <vm/vm_kern.h>
#include <vm/vm_fault.h>
#include <kern/etap_macros.h>
#include <kern/kern_types.h>
#include <kern/ast.h>
#include <kern/thread.h>
#include <kern/task.h>
#include <kern/sched.h>
#include <kern/sched_prim.h>
#include <kern/exception.h>
#include <kern/spl.h>
#include <kern/misc_protos.h>
#if MACH_KGDB
#include <kgdb/kgdb_defs.h>
#endif
#include <i386/intel_read_fault.h>
#if MACH_KGDB
#include <kgdb/kgdb_defs.h>
#endif
#if MACH_KDB
#include <ddb/db_watch.h>
#include <ddb/db_run.h>
#include <ddb/db_break.h>
#include <ddb/db_trap.h>
#endif
#include <string.h>
#include <i386/io_emulate.h>
extern void user_page_fault_continue(
kern_return_t kr);
extern boolean_t v86_assist(
thread_t thread,
struct i386_saved_state *regs);
extern boolean_t check_io_fault(
struct i386_saved_state *regs);
extern int inst_fetch(
int eip,
int cs);
void
thread_syscall_return(
kern_return_t ret)
{
register thread_act_t thr_act = current_act();
register struct i386_saved_state *regs = USER_REGS(thr_act);
regs->eax = ret;
thread_exception_return();
}
#if MACH_KDB
boolean_t debug_all_traps_with_kdb = FALSE;
extern struct db_watchpoint *db_watchpoint_list;
extern boolean_t db_watchpoints_inserted;
extern boolean_t db_breakpoints_inserted;
void
thread_kdb_return(void)
{
register thread_act_t thr_act = current_act();
register thread_t cur_thr = current_thread();
register struct i386_saved_state *regs = USER_REGS(thr_act);
if (kdb_trap(regs->trapno, regs->err, regs)) {
#if MACH_LDEBUG
assert(cur_thr->mutex_count == 0);
#endif
check_simple_locks();
thread_exception_return();
}
}
boolean_t let_ddb_vm_fault = FALSE;
#if NCPUS > 1
extern int kdb_active[NCPUS];
#endif
#endif
void
user_page_fault_continue(
kern_return_t kr)
{
register thread_act_t thr_act = current_act();
register thread_t cur_thr = current_thread();
register struct i386_saved_state *regs = USER_REGS(thr_act);
if ((kr == KERN_SUCCESS) || (kr == KERN_ABORTED)) {
#if MACH_KDB
if (!db_breakpoints_inserted) {
db_set_breakpoints();
}
if (db_watchpoint_list &&
db_watchpoints_inserted &&
(regs->err & T_PF_WRITE) &&
db_find_watchpoint(thr_act->map,
(vm_offset_t)regs->cr2,
regs))
kdb_trap(T_WATCHPOINT, 0, regs);
#endif
thread_exception_return();
}
#if MACH_KDB
if (debug_all_traps_with_kdb &&
kdb_trap(regs->trapno, regs->err, regs)) {
#if MACH_LDEBUG
assert(cur_thr->mutex_count == 0);
#endif
check_simple_locks();
thread_exception_return();
}
#endif
i386_exception(EXC_BAD_ACCESS, kr, regs->cr2);
}
struct recovery {
int fault_addr;
int recover_addr;
};
extern struct recovery recover_table[];
extern struct recovery recover_table_end[];
extern struct recovery retry_table[];
extern struct recovery retry_table_end[];
char * trap_type[] = {TRAP_NAMES};
int TRAP_TYPES = sizeof(trap_type)/sizeof(trap_type[0]);
boolean_t
kernel_trap(
register struct i386_saved_state *regs)
{
int exc;
int code;
int subcode;
int interruptible;
register int type;
vm_map_t map;
kern_return_t result;
register thread_t thread;
thread_act_t thr_act;
etap_data_t probe_data;
pt_entry_t *pte;
extern vm_offset_t vm_last_phys;
type = regs->trapno;
code = regs->err;
thread = current_thread();
thr_act = current_act();
ETAP_DATA_LOAD(probe_data[0], regs->trapno);
ETAP_DATA_LOAD(probe_data[1], MACH_PORT_NULL);
ETAP_DATA_LOAD(probe_data[2], MACH_PORT_NULL);
ETAP_PROBE_DATA(ETAP_P_EXCEPTION,
0,
thread,
&probe_data,
ETAP_DATA_ENTRY*3);
switch (type) {
case T_PREEMPT:
return (TRUE);
case T_NO_FPU:
fpnoextflt();
return (TRUE);
case T_FPU_FAULT:
fpextovrflt();
return (TRUE);
case T_FLOATING_POINT_ERROR:
fpexterrflt();
return (TRUE);
case T_PAGE_FAULT:
#if MACH_KDB
mp_disable_preemption();
if (db_active
#if NCPUS > 1
&& kdb_active[cpu_number()]
#endif
&& !let_ddb_vm_fault) {
mp_enable_preemption();
return (FALSE);
}
mp_enable_preemption();
#endif
subcode = regs->cr2;
if (subcode > LINEAR_KERNEL_ADDRESS) {
map = kernel_map;
subcode -= LINEAR_KERNEL_ADDRESS;
} else if (thr_act == THR_ACT_NULL || thread == THREAD_NULL)
map = kernel_map;
else {
map = thr_act->map;
}
#if MACH_KDB
if (map == kernel_map &&
db_watchpoint_list &&
db_watchpoints_inserted &&
(code & T_PF_WRITE) &&
(vm_offset_t)subcode < vm_last_phys &&
((*(pte = pmap_pte(kernel_pmap, (vm_offset_t)subcode))) &
INTEL_PTE_WRITE) == 0) {
*pte = INTEL_PTE_VALID | INTEL_PTE_WRITE |
pa_to_pte(trunc_page((vm_offset_t)subcode) -
VM_MIN_KERNEL_ADDRESS);
result = KERN_SUCCESS;
} else
#endif
{
if (map == kernel_map) {
register struct recovery *rp;
interruptible = THREAD_UNINT;
for (rp = recover_table; rp < recover_table_end; rp++) {
if (regs->eip == rp->fault_addr) {
interruptible = THREAD_ABORTSAFE;
break;
}
}
}
result = vm_fault(map,
trunc_page((vm_offset_t)subcode),
VM_PROT_READ|VM_PROT_WRITE,
FALSE,
(map == kernel_map) ? interruptible : THREAD_ABORTSAFE, NULL, 0);
}
#if MACH_KDB
if (result == KERN_SUCCESS) {
if (db_watchpoint_list &&
db_watchpoints_inserted &&
(code & T_PF_WRITE) &&
db_find_watchpoint(map,
(vm_offset_t)subcode, regs))
kdb_trap(T_WATCHPOINT, 0, regs);
}
else
#endif
if ((code & T_PF_WRITE) == 0 &&
result == KERN_PROTECTION_FAILURE)
{
result = intel_read_fault(map,
trunc_page((vm_offset_t)subcode));
}
if (result == KERN_SUCCESS) {
register struct recovery *rp;
for (rp = retry_table; rp < retry_table_end; rp++) {
if (regs->eip == rp->fault_addr) {
regs->eip = rp->recover_addr;
break;
}
}
return (TRUE);
}
case T_GENERAL_PROTECTION:
{
register struct recovery *rp;
for (rp = recover_table;
rp < recover_table_end;
rp++) {
if (regs->eip == rp->fault_addr) {
regs->eip = rp->recover_addr;
return (TRUE);
}
}
}
if (thread->recover) {
regs->eip = thread->recover;
thread->recover = 0;
return (TRUE);
}
default:
#if MACH_KDP
kdp_i386_trap(type, regs, result, regs->cr2);
#endif
return (FALSE);
}
return (TRUE);
}
void
panic_trap(
register struct i386_saved_state *regs)
{
int code;
register int type;
type = regs->trapno;
code = regs->err;
printf("trap type %d, code = %x, pc = %x\n",
type, code, regs->eip);
panic("trap");
}
void
user_trap(
register struct i386_saved_state *regs)
{
int exc;
int code;
int subcode;
register int type;
vm_map_t map;
vm_prot_t prot;
kern_return_t result;
register thread_act_t thr_act = current_act();
thread_t thread = (thr_act ? thr_act->thread : THREAD_NULL);
boolean_t kernel_act = thr_act->kernel_loaded;
etap_data_t probe_data;
if (regs->efl & EFL_VM) {
if (v86_assist(thread, regs))
return;
}
type = regs->trapno;
code = 0;
subcode = 0;
switch (type) {
case T_DIVIDE_ERROR:
exc = EXC_ARITHMETIC;
code = EXC_I386_DIV;
break;
case T_DEBUG:
exc = EXC_BREAKPOINT;
code = EXC_I386_SGL;
break;
case T_INT3:
exc = EXC_BREAKPOINT;
code = EXC_I386_BPT;
break;
case T_OVERFLOW:
exc = EXC_ARITHMETIC;
code = EXC_I386_INTO;
break;
case T_OUT_OF_BOUNDS:
exc = EXC_SOFTWARE;
code = EXC_I386_BOUND;
break;
case T_INVALID_OPCODE:
exc = EXC_BAD_INSTRUCTION;
code = EXC_I386_INVOP;
break;
case T_NO_FPU:
case 32:
fpnoextflt();
return;
case T_FPU_FAULT:
fpextovrflt();
return;
case 10:
exc = EXC_BAD_INSTRUCTION;
code = EXC_I386_INVTSSFLT;
subcode = regs->err & 0xffff;
break;
case T_SEGMENT_NOT_PRESENT:
exc = EXC_BAD_INSTRUCTION;
code = EXC_I386_SEGNPFLT;
subcode = regs->err & 0xffff;
break;
case T_STACK_FAULT:
exc = EXC_BAD_INSTRUCTION;
code = EXC_I386_STKFLT;
subcode = regs->err & 0xffff;
break;
case T_GENERAL_PROTECTION:
if (!(regs->efl & EFL_VM)) {
if (check_io_fault(regs))
return;
}
exc = EXC_BAD_INSTRUCTION;
code = EXC_I386_GPFLT;
subcode = regs->err & 0xffff;
break;
case T_PAGE_FAULT:
subcode = regs->cr2;
prot = VM_PROT_READ|VM_PROT_WRITE;
if (kernel_act == FALSE) {
if (!(regs->err & T_PF_WRITE))
prot = VM_PROT_READ;
(void) user_page_fault_continue(vm_fault(thr_act->map,
trunc_page((vm_offset_t)subcode),
prot,
FALSE,
THREAD_ABORTSAFE, NULL, 0));
}
else {
if (subcode > LINEAR_KERNEL_ADDRESS) {
map = kernel_map;
subcode -= LINEAR_KERNEL_ADDRESS;
}
result = vm_fault(thr_act->map,
trunc_page((vm_offset_t)subcode),
prot,
FALSE,
(map == kernel_map) ? THREAD_UNINT : THREAD_ABORTSAFE, NULL, 0);
if ((result != KERN_SUCCESS) && (result != KERN_ABORTED)) {
result = intel_read_fault(thr_act->map,
trunc_page((vm_offset_t)subcode));
}
user_page_fault_continue(result);
}
break;
case T_FLOATING_POINT_ERROR:
fpexterrflt();
return;
default:
#if MACH_KGDB
Debugger("Unanticipated user trap");
return;
#endif
#if MACH_KDB
if (kdb_trap(type, regs->err, regs))
return;
#endif
printf("user trap type %d, code = %x, pc = %x\n",
type, regs->err, regs->eip);
panic("user trap");
return;
}
#if MACH_KDB
if (debug_all_traps_with_kdb &&
kdb_trap(type, regs->err, regs))
return;
#endif
#if ETAP_EVENT_MONITOR
if (thread != THREAD_NULL) {
ETAP_DATA_LOAD(probe_data[0], regs->trapno);
ETAP_DATA_LOAD(probe_data[1],
thr_act->exc_actions[exc].port);
ETAP_DATA_LOAD(probe_data[2],
thr_act->task->exc_actions[exc].port);
ETAP_PROBE_DATA(ETAP_P_EXCEPTION,
0,
thread,
&probe_data,
ETAP_DATA_ENTRY*3);
}
#endif
i386_exception(exc, code, subcode);
}
boolean_t v86_assist_on = TRUE;
boolean_t v86_unsafe_ok = FALSE;
boolean_t v86_do_sti_cli = TRUE;
boolean_t v86_do_sti_immediate = FALSE;
#define V86_IRET_PENDING 0x4000
int cli_count = 0;
int sti_count = 0;
boolean_t
v86_assist(
thread_t thread,
register struct i386_saved_state *regs)
{
register struct v86_assist_state *v86 = &thread->top_act->mact.pcb->ims.v86s;
#define Addr8086(seg,off) ((((seg) & 0xffff) << 4) + (off))
#define EFL_V86_SAFE ( EFL_OF | EFL_DF | EFL_TF \
| EFL_SF | EFL_ZF | EFL_AF \
| EFL_PF | EFL_CF )
struct iret_32 {
int eip;
int cs;
int eflags;
};
struct iret_16 {
unsigned short ip;
unsigned short cs;
unsigned short flags;
};
union iret_struct {
struct iret_32 iret_32;
struct iret_16 iret_16;
};
struct int_vec {
unsigned short ip;
unsigned short cs;
};
if (!v86_assist_on)
return FALSE;
if (v86->flags & V86_IF_PENDING) {
v86->flags &= ~V86_IF_PENDING;
v86->flags |= EFL_IF;
if ((v86->flags & EFL_TF) == 0)
regs->efl &= ~EFL_TF;
}
if (regs->trapno == T_DEBUG) {
if (v86->flags & EFL_TF) {
return FALSE;
}
}
else if (regs->trapno == T_GENERAL_PROTECTION) {
register int eip;
boolean_t addr_32 = FALSE;
boolean_t data_32 = FALSE;
int io_port;
__asm__("movl $(addr_error), %0" : : "m" (thread->recover));
eip = regs->eip;
while (TRUE) {
unsigned char opcode;
if (eip > 0xFFFF) {
thread->recover = 0;
return FALSE;
}
opcode = *(unsigned char *)Addr8086(regs->cs,eip);
eip++;
switch (opcode) {
case 0xf0:
case 0xf2:
case 0xf3:
case 0x2e:
case 0x36:
case 0x3e:
case 0x26:
case 0x64:
case 0x65:
continue;
case 0x66:
data_32 = TRUE;
continue;
case 0x67:
addr_32 = TRUE;
continue;
case 0xe4:
case 0xe5:
case 0xe6:
case 0xe7:
io_port = *(unsigned char *)Addr8086(regs->cs, eip);
eip++;
goto do_in_out;
case 0xec:
case 0xed:
case 0xee:
case 0xef:
case 0x6c:
case 0x6d:
case 0x6e:
case 0x6f:
io_port = regs->edx & 0xffff;
do_in_out:
if (!data_32)
opcode |= 0x6600;
switch (emulate_io(regs, opcode, io_port)) {
case EM_IO_DONE:
break;
case EM_IO_RETRY:
thread->recover = 0;
return TRUE;
case EM_IO_ERROR:
thread->recover = 0;
return FALSE;
}
break;
case 0xfa:
if (!v86_do_sti_cli) {
thread->recover = 0;
return (FALSE);
}
v86->flags &= ~EFL_IF;
cli_count++;
break;
case 0xfb:
if (!v86_do_sti_cli) {
thread->recover = 0;
return (FALSE);
}
if ((v86->flags & EFL_IF) == 0) {
if (v86_do_sti_immediate) {
v86->flags |= EFL_IF;
} else {
v86->flags |= V86_IF_PENDING;
regs->efl |= EFL_TF;
}
}
sti_count++;
break;
case 0x9c:
{
int flags;
vm_offset_t sp;
int size;
flags = regs->efl;
if ((v86->flags & EFL_IF) == 0)
flags &= ~EFL_IF;
if ((v86->flags & EFL_TF) == 0)
flags &= ~EFL_TF;
else flags |= EFL_TF;
sp = regs->uesp;
if (!addr_32)
sp &= 0xffff;
else if (sp > 0xffff)
goto stack_error;
size = (data_32) ? 4 : 2;
if (sp < size)
goto stack_error;
sp -= size;
if (copyout((char *)&flags,
(char *)Addr8086(regs->ss,sp),
size))
goto addr_error;
if (addr_32)
regs->uesp = sp;
else
regs->uesp = (regs->uesp & 0xffff0000) | sp;
break;
}
case 0x9d:
{
vm_offset_t sp;
int nflags;
sp = regs->uesp;
if (!addr_32)
sp &= 0xffff;
else if (sp > 0xffff)
goto stack_error;
if (data_32) {
if (sp > 0xffff - sizeof(int))
goto stack_error;
nflags = *(int *)Addr8086(regs->ss,sp);
sp += sizeof(int);
}
else {
if (sp > 0xffff - sizeof(short))
goto stack_error;
nflags = *(unsigned short *)
Addr8086(regs->ss,sp);
sp += sizeof(short);
}
if (addr_32)
regs->uesp = sp;
else
regs->uesp = (regs->uesp & 0xffff0000) | sp;
if (v86->flags & V86_IRET_PENDING) {
v86->flags = nflags & (EFL_TF | EFL_IF);
v86->flags |= V86_IRET_PENDING;
} else {
v86->flags = nflags & (EFL_TF | EFL_IF);
}
regs->efl = (regs->efl & ~EFL_V86_SAFE)
| (nflags & EFL_V86_SAFE);
break;
}
case 0xcf:
{
vm_offset_t sp;
int nflags;
int size;
union iret_struct iret_struct;
v86->flags &= ~V86_IRET_PENDING;
sp = regs->uesp;
if (!addr_32)
sp &= 0xffff;
else if (sp > 0xffff)
goto stack_error;
if (data_32) {
if (sp > 0xffff - sizeof(struct iret_32))
goto stack_error;
iret_struct.iret_32 =
*(struct iret_32 *) Addr8086(regs->ss,sp);
sp += sizeof(struct iret_32);
}
else {
if (sp > 0xffff - sizeof(struct iret_16))
goto stack_error;
iret_struct.iret_16 =
*(struct iret_16 *) Addr8086(regs->ss,sp);
sp += sizeof(struct iret_16);
}
if (addr_32)
regs->uesp = sp;
else
regs->uesp = (regs->uesp & 0xffff0000) | sp;
if (data_32) {
eip = iret_struct.iret_32.eip;
regs->cs = iret_struct.iret_32.cs & 0xffff;
nflags = iret_struct.iret_32.eflags;
}
else {
eip = iret_struct.iret_16.ip;
regs->cs = iret_struct.iret_16.cs;
nflags = iret_struct.iret_16.flags;
}
v86->flags = nflags & (EFL_TF | EFL_IF);
regs->efl = (regs->efl & ~EFL_V86_SAFE)
| (nflags & EFL_V86_SAFE);
break;
}
default:
thread->recover = 0;
return FALSE;
}
break;
}
regs->eip = (regs->eip & 0xffff0000 | eip);
}
else {
thread->recover = 0;
return FALSE;
}
if ((v86->flags & EFL_IF) && ((v86->flags & V86_IRET_PENDING)==0)) {
struct v86_interrupt_table *int_table;
int int_count;
int vec;
int i;
int_table = (struct v86_interrupt_table *) v86->int_table;
int_count = v86->int_count;
vec = 0;
for (i = 0; i < int_count; int_table++, i++) {
if (!int_table->mask && int_table->count > 0) {
int_table->count--;
vec = int_table->vec;
break;
}
}
if (vec != 0) {
vm_offset_t sp;
struct iret_16 iret_16;
struct int_vec int_vec;
sp = regs->uesp & 0xffff;
if (sp < sizeof(struct iret_16))
goto stack_error;
sp -= sizeof(struct iret_16);
iret_16.ip = regs->eip;
iret_16.cs = regs->cs;
iret_16.flags = regs->efl & 0xFFFF;
if ((v86->flags & EFL_TF) == 0)
iret_16.flags &= ~EFL_TF;
else iret_16.flags |= EFL_TF;
(void) memcpy((char *) &int_vec,
(char *) (sizeof(struct int_vec) * vec),
sizeof (struct int_vec));
if (copyout((char *)&iret_16,
(char *)Addr8086(regs->ss,sp),
sizeof(struct iret_16)))
goto addr_error;
regs->uesp = (regs->uesp & 0xFFFF0000) | (sp & 0xffff);
regs->eip = int_vec.ip;
regs->cs = int_vec.cs;
regs->efl &= ~EFL_TF;
v86->flags &= ~(EFL_IF | EFL_TF);
v86->flags |= V86_IRET_PENDING;
}
}
thread->recover = 0;
return TRUE;
addr_error:
__asm__("addr_error:;");
thread->recover = 0;
return FALSE;
stack_error:
thread->recover = 0;
regs->trapno = T_STACK_FAULT;
return FALSE;
}
extern void log_thread_action (thread_t, char *);
void
i386_astintr(int preemption)
{
int mycpu;
ast_t mask = AST_ALL;
spl_t s;
thread_t self = current_thread();
s = splsched();
mp_disable_preemption();
mycpu = cpu_number();
if (need_ast[mycpu] & AST_I386_FP) {
ast_off(AST_I386_FP);
mp_enable_preemption();
splx(s);
fpexterrflt();
}
else {
#ifdef XXX
if (preemption) {
thread_lock (self);
if (thread_not_preemptable(self) || self->preempt) {
ast_off(AST_URGENT);
thread_unlock (self);
mp_enable_preemption();
splx(s);
return;
}
else mask = AST_PREEMPT;
mp_enable_preemption();
thread_unlock (self);
} else {
mp_enable_preemption();
}
#else
mp_enable_preemption();
#endif
ast_taken(mask, s
#if FAST_IDLE
,NO_IDLE_THREAD
#endif
);
}
}
void
i386_exception(
int exc,
int code,
int subcode)
{
spl_t s;
exception_data_type_t codes[EXCEPTION_CODE_MAX];
s = splsched();
mp_disable_preemption();
ast_off(AST_I386_FP);
mp_enable_preemption();
splx(s);
codes[0] = code;
codes[1] = subcode;
exception(exc, codes, 2);
}
boolean_t
check_io_fault(
struct i386_saved_state *regs)
{
int eip, opcode, io_port;
boolean_t data_16 = FALSE;
eip = regs->eip;
for (;;) {
opcode = inst_fetch(eip, regs->cs);
eip++;
switch (opcode) {
case 0x66:
data_16 = TRUE;
continue;
case 0xf3:
case 0x26:
case 0x2e:
case 0x36:
case 0x3e:
case 0x64:
case 0x65:
continue;
case 0xE4:
case 0xE5:
case 0xE6:
case 0xE7:
io_port = inst_fetch(eip, regs->cs);
eip++;
break;
case 0xEC:
case 0xED:
case 0xEE:
case 0xEF:
case 0x6C:
case 0x6D:
case 0x6E:
case 0x6F:
io_port = regs->edx & 0xFFFF;
break;
default:
return FALSE;
}
break;
}
if (data_16)
opcode |= 0x6600;
switch (emulate_io(regs, opcode, io_port)) {
case EM_IO_DONE:
regs->eip = eip;
return TRUE;
case EM_IO_RETRY:
return TRUE;
case EM_IO_ERROR:
return FALSE;
}
return FALSE;
}
void
kernel_preempt_check (void)
{
mp_disable_preemption();
if ((need_ast[cpu_number()] & AST_URGENT) &&
#if NCPUS > 1
get_interrupt_level() == 1
#else
get_interrupt_level() == 0
#endif
) {
mp_enable_preemption_no_check();
__asm__ volatile (" int $0xff");
} else {
mp_enable_preemption_no_check();
}
}
#if MACH_KDB
extern void db_i386_state(struct i386_saved_state *regs);
#include <ddb/db_output.h>
void
db_i386_state(
struct i386_saved_state *regs)
{
db_printf("eip %8x\n", regs->eip);
db_printf("trap %8x\n", regs->trapno);
db_printf("err %8x\n", regs->err);
db_printf("efl %8x\n", regs->efl);
db_printf("ebp %8x\n", regs->ebp);
db_printf("esp %8x\n", regs->esp);
db_printf("uesp %8x\n", regs->uesp);
db_printf("cs %8x\n", regs->cs & 0xff);
db_printf("ds %8x\n", regs->ds & 0xff);
db_printf("es %8x\n", regs->es & 0xff);
db_printf("fs %8x\n", regs->fs & 0xff);
db_printf("gs %8x\n", regs->gs & 0xff);
db_printf("ss %8x\n", regs->ss & 0xff);
db_printf("eax %8x\n", regs->eax);
db_printf("ebx %8x\n", regs->ebx);
db_printf("ecx %8x\n", regs->ecx);
db_printf("edx %8x\n", regs->edx);
db_printf("esi %8x\n", regs->esi);
db_printf("edi %8x\n", regs->edi);
}
#endif