#ifdef KERNEL_PRIVATE
#ifndef _PMAP_MACHINE_
#define _PMAP_MACHINE_ 1
#ifndef ASSEMBLER
#include <platforms.h>
#include <mach/kern_return.h>
#include <mach/machine/vm_types.h>
#include <mach/vm_prot.h>
#include <mach/vm_statistics.h>
#include <mach/machine/vm_param.h>
#include <kern/kern_types.h>
#include <kern/thread.h>
#include <kern/lock.h>
#include <i386/mp.h>
#include <i386/proc_reg.h>
#define INTEL_PGBYTES I386_PGBYTES
#define INTEL_PGSHIFT I386_PGSHIFT
#define intel_btop(x) i386_btop(x)
#define intel_ptob(x) i386_ptob(x)
#define intel_round_page(x) i386_round_page(x)
#define intel_trunc_page(x) i386_trunc_page(x)
#define trunc_intel_to_vm(x) trunc_i386_to_vm(x)
#define round_intel_to_vm(x) round_i386_to_vm(x)
#define vm_to_intel(x) vm_to_i386(x)
#endif
#define NPGPTD 4
#define PDESHIFT 21
#define PTEMASK 0x1ff
#define PTEINDX 3
#define PTESHIFT 12
#define PDESIZE sizeof(pd_entry_t)
#define PTESIZE sizeof(pt_entry_t)
#define INTEL_OFFMASK (I386_PGBYTES - 1)
#define PG_FRAME 0x000FFFFFFFFFF000ULL
#define NPTEPG (PAGE_SIZE/(sizeof (pt_entry_t)))
#define NPTDPG (PAGE_SIZE/(sizeof (pd_entry_t)))
#define NBPTD (NPGPTD << PAGE_SHIFT)
#define NPDEPTD (NBPTD / (sizeof (pd_entry_t)))
#define NPDEPG (PAGE_SIZE/(sizeof (pd_entry_t)))
#define NBPDE (1 << PDESHIFT)
#define PDEMASK (NBPDE - 1)
typedef uint64_t pml4_entry_t;
#define NPML4PG (PAGE_SIZE/(sizeof (pml4_entry_t)))
#define PML4SHIFT 39
#define PML4PGSHIFT 9
#define NBPML4 (1ULL << PML4SHIFT)
#define PML4MASK (NBPML4-1)
#define PML4_ENTRY_NULL ((pml4_entry_t *) 0)
typedef uint64_t pdpt_entry_t;
#define NPDPTPG (PAGE_SIZE/(sizeof (pdpt_entry_t)))
#define PDPTSHIFT 30
#define PDPTPGSHIFT 9
#define NBPDPT (1 << PDPTSHIFT)
#define PDPTMASK (NBPDPT-1)
#define PDPT_ENTRY_NULL ((pdpt_entry_t *) 0)
typedef uint64_t pd_entry_t;
#define NPDPG (PAGE_SIZE/(sizeof (pd_entry_t)))
#define PDSHIFT 21
#define PDPGSHIFT 9
#define NBPD (1 << PDSHIFT)
#define PDMASK (NBPD-1)
#define PD_ENTRY_NULL ((pd_entry_t *) 0)
typedef uint64_t pt_entry_t;
#define NPTPG (PAGE_SIZE/(sizeof (pt_entry_t)))
#define PTSHIFT 12
#define PTPGSHIFT 9
#define NBPT (1 << PTSHIFT)
#define PTMASK (NBPT-1)
#define PT_ENTRY_NULL ((pt_entry_t *) 0)
typedef uint64_t pmap_paddr_t;
static inline void
pmap_store_pte(pt_entry_t *entryp, pt_entry_t value)
{
__asm__ volatile(
" movl (%0), %%eax \n\t"
" movl 4(%0), %%edx \n\t"
"1: \n\t"
" cmpxchg8b (%0) \n\t"
" jnz 1b"
:
: "D" (entryp),
"b" ((uint32_t)value),
"c" ((uint32_t)(value >> 32))
: "eax", "edx", "memory");
}
static inline boolean_t
pmap_cmpx_pte(pt_entry_t *entryp, pt_entry_t old, pt_entry_t new)
{
boolean_t ret;
asm volatile(
" lock; cmpxchg8b (%1) \n\t"
" setz %%al \n\t"
" movzbl %%al,%0"
: "=a" (ret)
: "D" (entryp),
"a" ((uint32_t)old),
"d" ((uint32_t)(old >> 32)),
"b" ((uint32_t)new),
"c" ((uint32_t)(new >> 32))
: "memory");
return ret;
}
#define pmap_update_pte(entryp, old, new) \
while (!pmap_cmpx_pte((entryp), (old), (new)))
#define NPML4PGS (1ULL * (PAGE_SIZE/(sizeof (pml4_entry_t))))
#define NPDPTPGS (NPML4PGS * (PAGE_SIZE/(sizeof (pdpt_entry_t))))
#define NPDEPGS (NPDPTPGS * (PAGE_SIZE/(sizeof (pd_entry_t))))
#define NPTEPGS (NPDEPGS * (PAGE_SIZE/(sizeof (pt_entry_t))))
#define KERNEL_UBER_PML4_INDEX 511
#define KERNEL_UBER_BASE (0ULL - NBPML4)
#define KERNEL_UBER_BASE_HI32 ((uint32_t)(KERNEL_UBER_BASE >> 32))
#define VM_WIMG_COPYBACK VM_MEM_COHERENT
#define VM_WIMG_DEFAULT VM_MEM_COHERENT
#define VM_WIMG_IO (VM_MEM_COHERENT | \
VM_MEM_NOT_CACHEABLE | VM_MEM_GUARDED)
#define VM_WIMG_WTHRU (VM_MEM_WRITE_THROUGH | VM_MEM_COHERENT | VM_MEM_GUARDED)
#define VM_WIMG_WCOMB (VM_MEM_NOT_CACHEABLE | VM_MEM_COHERENT)
#define VADDR(pdi, pti) ((vm_offset_t)(((pdi)<<PDESHIFT)|((pti)<<PTESHIFT)))
#define VADDR64(pmi, pdi, pti) ((vm_offset_t)(((pmi)<<PLM4SHIFT))((pdi)<<PDESHIFT)|((pti)<<PTESHIFT))
#ifndef KVA_PAGES
#define KVA_PAGES 1024
#endif
#ifndef NKPT
#define NKPT 500
#endif
#ifndef NKPDE
#define NKPDE (KVA_PAGES - 1)
#endif
enum high_cpu_types {
HIGH_CPU_ISS0,
HIGH_CPU_ISS1,
HIGH_CPU_DESC,
HIGH_CPU_LDT_BEGIN,
HIGH_CPU_LDT_END = HIGH_CPU_LDT_BEGIN + (LDTSZ / 512) - 1,
HIGH_CPU_END
};
enum high_fixed_addresses {
HIGH_FIXED_TRAMPS,
HIGH_FIXED_TRAMPS_END,
HIGH_FIXED_GDT,
HIGH_FIXED_IDT,
HIGH_FIXED_LDT_BEGIN,
HIGH_FIXED_LDT_END = HIGH_FIXED_LDT_BEGIN + (LDTSZ / 512) - 1,
HIGH_FIXED_KTSS,
HIGH_FIXED_DFTSS,
HIGH_FIXED_DBTSS,
HIGH_FIXED_CPUS_BEGIN,
HIGH_FIXED_CPUS_END = HIGH_FIXED_CPUS_BEGIN + (HIGH_CPU_END * MAX_CPUS) - 1,
};
#define KPTDI (0x000)
#define PTDPTDI (0x7F4)
#define APTDPTDI (0x7F8)
#define UMAXPTDI (0x7F8)
#define UMAXPTEOFF (NPTEPG)
#define KERNBASE VADDR(KPTDI,0)
#define HIGH_MEM_BASE ((uint32_t)( -NBPDE) )
#define pmap_index_to_virt(x) (HIGH_MEM_BASE | ((unsigned)(x) << PAGE_SHIFT))
#define pdenum(pmap, a) (((vm_offset_t)(a) >> PDESHIFT) & PDEMASK)
#define pdeidx(pmap, a) (((a) >> PDSHIFT) & ((1ULL<<(48 - PDSHIFT)) -1))
#define pdptidx(pmap, a) (((a) >> PDPTSHIFT) & ((1ULL<<(48 - PDPTSHIFT)) -1))
#define pml4idx(pmap, a) (((a) >> PML4SHIFT) & ((1ULL<<(48 - PML4SHIFT)) -1))
#define pdetova(a) ((vm_offset_t)(a) << PDESHIFT)
#define ptenum(a) (((vm_offset_t)(a) >> PTESHIFT) & PTEMASK)
#define INTEL_PTE_VALID 0x00000001
#define INTEL_PTE_WRITE 0x00000002
#define INTEL_PTE_RW 0x00000002
#define INTEL_PTE_USER 0x00000004
#define INTEL_PTE_WTHRU 0x00000008
#define INTEL_PTE_NCACHE 0x00000010
#define INTEL_PTE_REF 0x00000020
#define INTEL_PTE_MOD 0x00000040
#define INTEL_PTE_PS 0x00000080
#define INTEL_PTE_GLOBAL 0x00000100
#define INTEL_PTE_WIRED 0x00000200
#define INTEL_PTE_PFN PG_FRAME
#define INTEL_PTE_PTA 0x00000080
#define INTEL_PTE_NX (1ULL << 63)
#define INTEL_PTE_INVALID 0
#define pa_to_pte(a) ((a) & INTEL_PTE_PFN)
#define pte_to_pa(p) ((p) & INTEL_PTE_PFN)
#define pte_increment_pa(p) ((p) += INTEL_OFFMASK+1)
#define pte_kernel_rw(p) ((pt_entry_t)(pa_to_pte(p) | INTEL_PTE_VALID|INTEL_PTE_RW))
#define pte_kernel_ro(p) ((pt_entry_t)(pa_to_pte(p) | INTEL_PTE_VALID))
#define pte_user_rw(p) ((pt_entry)t)(pa_to_pte(p) | INTEL_PTE_VALID|INTEL_PTE_USER|INTEL_PTE_RW))
#define pte_user_ro(p) ((pt_entry_t)(pa_to_pte(p) | INTEL_PTE_VALID|INTEL_PTE_USER))
#define PMAP_DEFAULT_CACHE 0
#define PMAP_INHIBIT_CACHE 1
#define PMAP_GUARDED_CACHE 2
#define PMAP_ACTIVATE_CACHE 4
#define PMAP_NO_GUARD_CACHE 8
#ifndef ASSEMBLER
#include <sys/queue.h>
extern pt_entry_t PTmap[], APTmap[], Upte;
extern pd_entry_t PTD[], APTD[], PTDpde[], APTDpde[], Upde;
extern pd_entry_t *IdlePTD;
extern pdpt_entry_t *IdlePDPT;
extern pmap_paddr_t lo_kernel_cr3;
extern pml4_entry_t *IdlePML4;
extern pdpt_entry_t *IdlePDPT64;
extern addr64_t kernel64_cr3;
extern boolean_t no_shared_cr3;
extern uint64_t pmap_pv_hashlist_walks;
extern uint64_t pmap_pv_hashlist_cnts;
extern uint32_t pmap_pv_hashlist_max;
#define vtopte(va) (PTmap + i386_btop((vm_offset_t)va))
typedef volatile long cpu_set;
struct md_page {
int pv_list_count;
TAILQ_HEAD(,pv_entry) pv_list;
};
#include <vm/vm_page.h>
struct pmap {
pd_entry_t *dirbase;
pmap_paddr_t pdirbase;
vm_object_t pm_obj;
int ref_count;
int nx_enabled;
task_map_t pm_task_map;
decl_simple_lock_data(,lock)
struct pmap_statistics stats;
vm_offset_t pm_hold;
pmap_paddr_t pm_cr3;
pdpt_entry_t *pm_pdpt;
pml4_entry_t *pm_pml4;
vm_object_t pm_obj_pdpt;
vm_object_t pm_obj_pml4;
vm_object_t pm_obj_top;
boolean_t pm_shared;
};
#define PMAP_PDPT_FIRST_WINDOW 0
#define PMAP_PDPT_NWINDOWS 4
#define PMAP_PDE_FIRST_WINDOW (PMAP_PDPT_NWINDOWS)
#define PMAP_PDE_NWINDOWS 4
#define PMAP_PTE_FIRST_WINDOW (PMAP_PDE_FIRST_WINDOW + PMAP_PDE_NWINDOWS)
#define PMAP_PTE_NWINDOWS 4
#define PMAP_NWINDOWS_FIRSTFREE (PMAP_PTE_FIRST_WINDOW + PMAP_PTE_NWINDOWS)
#define PMAP_WINDOW_SIZE 8
#define PMAP_NWINDOWS (PMAP_NWINDOWS_FIRSTFREE + PMAP_WINDOW_SIZE)
typedef struct {
pt_entry_t *prv_CMAP;
caddr_t prv_CADDR;
} mapwindow_t;
typedef struct cpu_pmap {
int pdpt_window_index;
int pde_window_index;
int pte_window_index;
mapwindow_t mapwindow[PMAP_NWINDOWS];
} cpu_pmap_t;
extern mapwindow_t *pmap_get_mapwindow(pt_entry_t pentry);
extern void pmap_put_mapwindow(mapwindow_t *map);
typedef struct pmap_memory_regions {
ppnum_t base;
ppnum_t end;
ppnum_t alloc;
uint32_t type;
} pmap_memory_region_t;
unsigned pmap_memory_region_count;
unsigned pmap_memory_region_current;
#define PMAP_MEMORY_REGIONS_SIZE 128
extern pmap_memory_region_t pmap_memory_regions[];
static inline void set_dirbase(pmap_t tpmap, __unused int tcpu) {
current_cpu_datap()->cpu_task_cr3 = (pmap_paddr_t)((tpmap)->pm_cr3);
current_cpu_datap()->cpu_task_map = tpmap->pm_task_map;
}
extern void process_pmap_updates(void);
extern void pmap_update_interrupt(void);
extern addr64_t (kvtophys)(
vm_offset_t addr);
extern void pmap_expand(
pmap_t pmap,
vm_map_offset_t addr);
extern pt_entry_t *pmap_pte(
struct pmap *pmap,
vm_map_offset_t addr);
extern pd_entry_t *pmap_pde(
struct pmap *pmap,
vm_map_offset_t addr);
extern pd_entry_t *pmap64_pde(
struct pmap *pmap,
vm_map_offset_t addr);
extern pdpt_entry_t *pmap64_pdpt(
struct pmap *pmap,
vm_map_offset_t addr);
extern vm_offset_t pmap_map(
vm_offset_t virt,
vm_map_offset_t start,
vm_map_offset_t end,
vm_prot_t prot,
unsigned int flags);
extern vm_offset_t pmap_map_bd(
vm_offset_t virt,
vm_map_offset_t start,
vm_map_offset_t end,
vm_prot_t prot,
unsigned int flags);
extern void pmap_bootstrap(
vm_offset_t load_start,
boolean_t IA32e);
extern boolean_t pmap_valid_page(
ppnum_t pn);
extern int pmap_list_resident_pages(
struct pmap *pmap,
vm_offset_t *listp,
int space);
extern void pmap_commpage32_init(
vm_offset_t kernel,
vm_offset_t user,
int count);
extern void pmap_commpage64_init(
vm_offset_t kernel,
vm_map_offset_t user,
int count);
extern struct cpu_pmap *pmap_cpu_alloc(
boolean_t is_boot_cpu);
extern void pmap_cpu_free(
struct cpu_pmap *cp);
extern void pmap_map_block(
pmap_t pmap,
addr64_t va,
ppnum_t pa,
uint32_t size,
vm_prot_t prot,
int attr,
unsigned int flags);
extern void invalidate_icache(vm_offset_t addr, unsigned cnt, int phys);
extern void flush_dcache(vm_offset_t addr, unsigned count, int phys);
extern ppnum_t pmap_find_phys(pmap_t map, addr64_t va);
extern void pmap_cpu_init(void);
extern void pmap_disable_NX(pmap_t pmap);
extern void pmap_set_4GB_pagezero(pmap_t pmap);
extern void pmap_clear_4GB_pagezero(pmap_t pmap);
extern void pmap_load_kernel_cr3(void);
extern vm_offset_t pmap_cpu_high_map_vaddr(int, enum high_cpu_types);
extern vm_offset_t pmap_high_map_vaddr(enum high_cpu_types);
extern vm_offset_t pmap_high_map(pt_entry_t, enum high_cpu_types);
extern vm_offset_t pmap_cpu_high_shared_remap(int, enum high_cpu_types, vm_offset_t, int);
extern vm_offset_t pmap_high_shared_remap(enum high_fixed_addresses, vm_offset_t, int);
extern void pt_fake_zone_info(int *, vm_size_t *, vm_size_t *, vm_size_t *, vm_size_t *, int *, int *);
#include <kern/spl.h>
#if defined(PMAP_ACTIVATE_KERNEL)
#undef PMAP_ACTIVATE_KERNEL
#undef PMAP_DEACTIVATE_KERNEL
#undef PMAP_ACTIVATE_USER
#undef PMAP_DEACTIVATE_USER
#endif
#define PMAP_ACTIVATE_KERNEL(my_cpu) { \
spl_t spl; \
\
spl = splhigh(); \
if (current_cpu_datap()->cpu_tlb_invalid) \
process_pmap_updates(); \
splx(spl); \
}
#define PMAP_DEACTIVATE_KERNEL(my_cpu) { \
spl_t spl; \
\
spl = splhigh(); \
process_pmap_updates(); \
splx(spl); \
}
#define PMAP_ACTIVATE_MAP(map, my_cpu) { \
register pmap_t tpmap; \
\
tpmap = vm_map_pmap(map); \
set_dirbase(tpmap, my_cpu); \
}
#define PMAP_DEACTIVATE_MAP(map, my_cpu) \
if (vm_map_pmap(map)->pm_task_map == TASK_MAP_64BIT_SHARED) \
pmap_load_kernel_cr3();
#define PMAP_ACTIVATE_USER(th, my_cpu) { \
spl_t spl; \
\
spl = splhigh(); \
PMAP_ACTIVATE_MAP(th->map, my_cpu) \
splx(spl); \
}
#define PMAP_DEACTIVATE_USER(th, my_cpu)
#define PMAP_SWITCH_CONTEXT(old_th, new_th, my_cpu) { \
spl_t spl; \
pt_entry_t *kpdp; \
pt_entry_t *updp; \
int i; \
int need_flush; \
\
need_flush = 0; \
spl = splhigh(); \
if (old_th->map != new_th->map) { \
PMAP_DEACTIVATE_MAP(old_th->map, my_cpu); \
PMAP_ACTIVATE_MAP(new_th->map, my_cpu); \
} \
kpdp = current_cpu_datap()->cpu_copywindow_pdp; \
for (i = 0; i < NCOPY_WINDOWS; i++) { \
if (new_th->machine.copy_window[i].user_base != (user_addr_t)-1) { \
updp = pmap_pde(new_th->map->pmap, \
new_th->machine.copy_window[i].user_base);\
pmap_store_pte(kpdp, updp ? *updp : 0); \
} \
kpdp++; \
} \
splx(spl); \
if (new_th->machine.copyio_state == WINDOWS_OPENED) \
need_flush = 1; \
else \
new_th->machine.copyio_state = WINDOWS_DIRTY; \
if (new_th->machine.physwindow_pte) { \
pmap_store_pte((current_cpu_datap()->cpu_physwindow_ptep), \
new_th->machine.physwindow_pte); \
if (need_flush == 0) \
invlpg((uintptr_t)current_cpu_datap()->cpu_physwindow_base);\
} \
if (need_flush) \
flush_tlb(); \
}
#define PMAP_SWITCH_USER(th, new_map, my_cpu) { \
spl_t spl; \
\
spl = splhigh(); \
PMAP_DEACTIVATE_MAP(th->map, my_cpu); \
th->map = new_map; \
PMAP_ACTIVATE_MAP(th->map, my_cpu); \
splx(spl); \
inval_copy_windows(th); \
}
#define CPU_CR3_MARK_INACTIVE() \
current_cpu_datap()->cpu_active_cr3 |= 1
#define CPU_CR3_MARK_ACTIVE() \
current_cpu_datap()->cpu_active_cr3 &= ~1
#define CPU_CR3_IS_ACTIVE(cpu) \
((cpu_datap(cpu)->cpu_active_cr3 & 1) == 0)
#define CPU_GET_ACTIVE_CR3(cpu) \
(cpu_datap(cpu)->cpu_active_cr3 & ~1)
#define MARK_CPU_IDLE(my_cpu) { \
\
int s = splhigh(); \
if (!cpu_mode_is64bit() || no_shared_cr3) \
process_pmap_updates(); \
else \
pmap_load_kernel_cr3(); \
CPU_CR3_MARK_INACTIVE(); \
__asm__ volatile("mfence"); \
splx(s); \
}
#define MARK_CPU_ACTIVE(my_cpu) { \
\
int s = splhigh(); \
\
CPU_CR3_MARK_ACTIVE(); \
__asm__ volatile("mfence"); \
\
if (current_cpu_datap()->cpu_tlb_invalid) \
process_pmap_updates(); \
splx(s); \
}
#define PMAP_CONTEXT(pmap, thread)
#define pmap_kernel_va(VA) \
((((vm_offset_t) (VA)) >= vm_min_kernel_address) && \
(((vm_offset_t) (VA)) <= vm_max_kernel_address))
#define pmap_resident_count(pmap) ((pmap)->stats.resident_count)
#define pmap_resident_max(pmap) ((pmap)->stats.resident_max)
#define pmap_copy(dst_pmap,src_pmap,dst_addr,len,src_addr)
#define pmap_attribute(pmap,addr,size,attr,value) \
(KERN_INVALID_ADDRESS)
#define pmap_attribute_cache_sync(addr,size,attr,value) \
(KERN_INVALID_ADDRESS)
#define MACHINE_PMAP_IS_EMPTY 1
extern boolean_t pmap_is_empty(pmap_t pmap,
vm_map_offset_t start,
vm_map_offset_t end);
#endif
#endif
#endif