#include <kern/ast.h>
#include <kern/locks.h>
#include <kern/task.h>
#include <kern/thread.h>
#include <libkern/OSAtomic.h>
#include <vm/vm_pageout.h>
#include <mach/sdt.h>
#include <sys/kdebug.h>
#if defined(__x86_64__) && CONFIG_VMX
#include <i386/vmx/vmx_cpu.h>
#endif
#include <kern/hv_support.h>
int hv_support_available = 0;
int hv_disable = 0;
hv_callbacks_t hv_callbacks = {
.dispatch = NULL,
.preempt = NULL,
.suspend = NULL,
.thread_destroy = NULL,
.task_destroy = NULL,
.volatile_state = NULL,
.resume = NULL,
.memory_pressure = NULL,
};
static hv_trap_table_t hv_trap_table[] = {
[HV_TASK_TRAP] = {
.traps = NULL,
.trap_count = 0
},
[HV_THREAD_TRAP] = {
.traps = NULL,
.trap_count = 0
}
};
static int hv_callbacks_enabled = 0;
static LCK_GRP_DECLARE(hv_support_lck_grp, "hv_support");
static LCK_MTX_DECLARE(hv_support_lck_mtx, &hv_support_lck_grp);
void
hv_support_init(void)
{
#if defined(__x86_64__) && CONFIG_VMX
hv_support_available = vmx_hv_support();
#endif
}
int
hv_get_support(void)
{
return hv_support_available;
}
void
hv_set_task_target(void *target)
{
current_task()->hv_task_target = target;
}
void
hv_set_thread_target(void *target)
{
current_thread()->hv_thread_target = target;
}
void*
hv_get_task_target(void)
{
return current_task()->hv_task_target;
}
void*
hv_get_thread_target(void)
{
return current_thread()->hv_thread_target;
}
int
hv_get_volatile_state(hv_volatile_state_t state)
{
int is_volatile = 0;
#if (defined(__x86_64__))
if (state == HV_DEBUG_STATE) {
is_volatile = (current_thread()->machine.ids != NULL);
}
#endif
return is_volatile;
}
kern_return_t
hv_set_traps(hv_trap_type_t trap_type, const hv_trap_t *traps,
unsigned trap_count)
{
hv_trap_table_t *trap_table = &hv_trap_table[trap_type];
kern_return_t kr = KERN_FAILURE;
lck_mtx_lock(&hv_support_lck_mtx);
if (trap_table->trap_count == 0) {
trap_table->traps = traps;
OSMemoryBarrier();
trap_table->trap_count = trap_count;
kr = KERN_SUCCESS;
}
lck_mtx_unlock(&hv_support_lck_mtx);
return kr;
}
void
hv_release_traps(hv_trap_type_t trap_type)
{
hv_trap_table_t *trap_table = &hv_trap_table[trap_type];
lck_mtx_lock(&hv_support_lck_mtx);
trap_table->trap_count = 0;
OSMemoryBarrier();
trap_table->traps = NULL;
lck_mtx_unlock(&hv_support_lck_mtx);
}
kern_return_t
hv_set_callbacks(hv_callbacks_t callbacks)
{
kern_return_t kr = KERN_FAILURE;
lck_mtx_lock(&hv_support_lck_mtx);
if (hv_callbacks_enabled == 0) {
hv_callbacks = callbacks;
hv_callbacks_enabled = 1;
kr = KERN_SUCCESS;
}
lck_mtx_unlock(&hv_support_lck_mtx);
return kr;
}
void
hv_release_callbacks(void)
{
lck_mtx_lock(&hv_support_lck_mtx);
hv_callbacks = (hv_callbacks_t) {
.dispatch = NULL,
.preempt = NULL,
.suspend = NULL,
.thread_destroy = NULL,
.task_destroy = NULL,
.volatile_state = NULL,
.resume = NULL,
};
hv_callbacks_enabled = 0;
lck_mtx_unlock(&hv_support_lck_mtx);
}
void
hv_suspend(void)
{
if (hv_callbacks_enabled) {
hv_callbacks.suspend();
}
}
void
hv_resume(void)
{
if (hv_callbacks_enabled && hv_callbacks.resume) {
hv_callbacks.resume();
}
}
#define HV_TRAP_DISPATCH(type, index, target, argument) \
((__probable(index < hv_trap_table[type].trap_count)) ? \
hv_trap_table[type].traps[index](target, argument) \
: KERN_INVALID_ARGUMENT)
kern_return_t
hv_task_trap(uint64_t index, uint64_t arg)
{
return HV_TRAP_DISPATCH(HV_TASK_TRAP, index, hv_get_task_target(), arg);
}
kern_return_t
hv_thread_trap(uint64_t index, uint64_t arg)
{
return HV_TRAP_DISPATCH(HV_THREAD_TRAP, index, hv_get_thread_target(), arg);
}
boolean_t
hv_ast_pending(void)
{
return current_cpu_datap()->cpu_pending_ast != 0;
}
void __attribute__((__noreturn__))
hv_port_notify(mach_msg_header_t *msg __unused)
{
panic("%s: not supported in this configuration", __func__);
}
void
hv_trace_guest_enter(uint32_t vcpu_id, uint64_t *vcpu_regs)
{
DTRACE_HV2(guest__enter, uint32_t, vcpu_id, uint64_t *, vcpu_regs);
KDBG(MACHDBG_CODE(DBG_MACH_HV, HV_GUEST_ENTER) | DBG_FUNC_START, vcpu_id);
}
void
hv_trace_guest_exit(uint32_t vcpu_id, uint64_t *vcpu_regs, uint32_t reason)
{
KDBG(MACHDBG_CODE(DBG_MACH_HV, HV_GUEST_ENTER) | DBG_FUNC_END, vcpu_id,
reason);
DTRACE_HV2(guest__exit, uint32_t, vcpu_id, uint64_t *, vcpu_regs);
}
void
hv_trace_guest_error(uint32_t vcpu_id, uint64_t *vcpu_regs, uint32_t failure,
uint32_t error)
{
KDBG(MACHDBG_CODE(DBG_MACH_HV, HV_GUEST_ENTER) | DBG_FUNC_END, vcpu_id,
-1, failure, error);
KDBG(MACHDBG_CODE(DBG_MACH_HV, HV_GUEST_ERROR), vcpu_id, failure, error);
DTRACE_HV3(guest__error, uint32_t, vcpu_id, uint64_t *, vcpu_regs, uint32_t, failure);
}