#include <mach_rt.h>
#include <mach_debug.h>
#include <mach_ldebug.h>
#include <sys/kdebug.h>
#include <mach/kern_return.h>
#include <mach/thread_status.h>
#include <mach/vm_param.h>
#include <kern/counters.h>
#include <kern/mach_param.h>
#include <kern/task.h>
#include <kern/thread.h>
#include <kern/sched_prim.h>
#include <kern/misc_protos.h>
#include <kern/assert.h>
#include <kern/spl.h>
#include <ipc/ipc_port.h>
#include <vm/vm_kern.h>
#include <vm/vm_map.h>
#include <vm/pmap.h>
#include <i386/cpu_data.h>
#include <i386/cpu_number.h>
#include <i386/thread.h>
#include <i386/eflags.h>
#include <i386/proc_reg.h>
#include <i386/seg.h>
#include <i386/tss.h>
#include <i386/user_ldt.h>
#include <i386/fpu.h>
#include <i386/misc_protos.h>
void
pmap_zero_page(
ppnum_t pn)
{
assert(pn != vm_page_fictitious_addr);
assert(pn != vm_page_guard_addr);
bzero_phys((addr64_t)i386_ptob(pn), PAGE_SIZE);
}
void
pmap_zero_part_page(
ppnum_t pn,
vm_offset_t offset,
vm_size_t len)
{
assert(pn != vm_page_fictitious_addr);
assert(pn != vm_page_guard_addr);
assert(offset + len <= PAGE_SIZE);
bzero_phys((addr64_t)(i386_ptob(pn) + offset), len);
}
void
pmap_copy_part_page(
ppnum_t psrc,
vm_offset_t src_offset,
ppnum_t pdst,
vm_offset_t dst_offset,
vm_size_t len)
{
pmap_paddr_t src, dst;
assert(psrc != vm_page_fictitious_addr);
assert(pdst != vm_page_fictitious_addr);
assert(psrc != vm_page_guard_addr);
assert(pdst != vm_page_guard_addr);
src = i386_ptob(psrc);
dst = i386_ptob(pdst);
assert((((uint32_t)dst & PAGE_MASK) + dst_offset + len) <= PAGE_SIZE);
assert((((uint32_t)src & PAGE_MASK) + src_offset + len) <= PAGE_SIZE);
bcopy_phys((addr64_t)src + (src_offset & INTEL_OFFMASK),
(addr64_t)dst + (dst_offset & INTEL_OFFMASK),
len);
}
void
pmap_copy_part_lpage(
vm_offset_t src,
ppnum_t pdst,
vm_offset_t dst_offset,
vm_size_t len)
{
mapwindow_t *map;
assert(pdst != vm_page_fictitious_addr);
assert(pdst != vm_page_guard_addr);
assert((dst_offset + len) <= PAGE_SIZE);
mp_disable_preemption();
map = pmap_get_mapwindow(INTEL_PTE_VALID | INTEL_PTE_RW | (i386_ptob(pdst) & PG_FRAME) |
INTEL_PTE_REF | INTEL_PTE_MOD);
memcpy((void *) (map->prv_CADDR + (dst_offset & INTEL_OFFMASK)), (void *) src, len);
pmap_put_mapwindow(map);
mp_enable_preemption();
}
void
pmap_copy_part_rpage(
ppnum_t psrc,
vm_offset_t src_offset,
vm_offset_t dst,
vm_size_t len)
{
mapwindow_t *map;
assert(psrc != vm_page_fictitious_addr);
assert(psrc != vm_page_guard_addr);
assert((src_offset + len) <= PAGE_SIZE);
mp_disable_preemption();
map = pmap_get_mapwindow(INTEL_PTE_VALID | INTEL_PTE_RW | (i386_ptob(psrc) & PG_FRAME) |
INTEL_PTE_REF);
memcpy((void *) dst, (void *) (map->prv_CADDR + (src_offset & INTEL_OFFMASK)), len);
pmap_put_mapwindow(map);
mp_enable_preemption();
}
addr64_t
kvtophys(
vm_offset_t addr)
{
pt_entry_t *ptep;
pmap_paddr_t pa;
mp_disable_preemption();
if ((ptep = pmap_pte(kernel_pmap, (vm_map_offset_t)addr)) == PT_ENTRY_NULL) {
pa = 0;
} else {
pa = pte_to_pa(*ptep) | (addr & INTEL_OFFMASK);
}
mp_enable_preemption_no_check();
return ((addr64_t)pa);
}