#ifndef _PMAP_MACHINE_
#define _PMAP_MACHINE_ 1
#ifndef ASSEMBLER
#include <platforms.h>
#include <mach/kern_return.h>
#include <mach/machine/vm_types.h>
#include <mach/vm_prot.h>
#include <mach/vm_statistics.h>
#include <mach/machine/vm_param.h>
#include <kern/kern_types.h>
#include <kern/thread.h>
#include <kern/lock.h>
#define PMAP_QUEUE 1
#ifdef PMAP_QUEUE
#include <kern/queue.h>
#endif
#define INTEL_PGBYTES I386_PGBYTES
#define INTEL_PGSHIFT I386_PGSHIFT
#define intel_btop(x) i386_btop(x)
#define intel_ptob(x) i386_ptob(x)
#define intel_round_page(x) i386_round_page(x)
#define intel_trunc_page(x) i386_trunc_page(x)
#define trunc_intel_to_vm(x) trunc_i386_to_vm(x)
#define round_intel_to_vm(x) round_i386_to_vm(x)
#define vm_to_intel(x) vm_to_i386(x)
#ifdef PAE
typedef uint64_t pdpt_entry_t;
typedef uint64_t pt_entry_t;
typedef uint64_t pd_entry_t;
typedef uint64_t pmap_paddr_t;
#else
typedef uint32_t pt_entry_t;
typedef uint32_t pd_entry_t;
typedef uint32_t pmap_paddr_t;
#endif
#define PT_ENTRY_NULL ((pt_entry_t *) 0)
#define PD_ENTRY_NULL ((pt_entry_t *) 0)
#endif
#ifdef PAE
#define NPGPTD 4
#define PDESHIFT 21
#define PTEMASK 0x1ff
#define PTEINDX 3
#else
#define NPGPTD 1
#define PDESHIFT 22
#define PTEMASK 0x3ff
#define PTEINDX 2
#endif
#define PTESHIFT 12
#define PDESIZE sizeof(pd_entry_t)
#define PTESIZE sizeof(pt_entry_t)
#define INTEL_OFFMASK (I386_PGBYTES - 1)
#define PG_FRAME (~((pmap_paddr_t)PAGE_MASK))
#define NPTEPG (PAGE_SIZE/(sizeof (pt_entry_t)))
#define NBPTD (NPGPTD << PAGE_SHIFT)
#define NPDEPTD (NBPTD / (sizeof (pd_entry_t)))
#define NPDEPG (PAGE_SIZE/(sizeof (pd_entry_t)))
#define NBPDE (1 << PDESHIFT)
#define PDEMASK (NBPDE - 1)
#define VM_WIMG_COPYBACK VM_MEM_COHERENT
#define VM_WIMG_DEFAULT VM_MEM_COHERENT
#define VM_WIMG_IO (VM_MEM_COHERENT | \
VM_MEM_NOT_CACHEABLE | VM_MEM_GUARDED)
#define VM_WIMG_WTHRU (VM_MEM_WRITE_THROUGH | VM_MEM_COHERENT | VM_MEM_GUARDED)
#define VM_WIMG_WCOMB (VM_MEM_NOT_CACHEABLE | VM_MEM_COHERENT)
#ifndef KVA_PAGES
#define KVA_PAGES 256
#endif
#define VADDR(pdi, pti) ((vm_offset_t)(((pdi)<<PDESHIFT)|((pti)<<PTESHIFT)))
#ifndef NKPT
#ifdef PAE
#define NKPT 500
#else
#define NKPT 32
#endif
#endif
#ifndef NKPDE
#define NKPDE (KVA_PAGES - 1)
#endif
#ifdef PAE
#define KPTDI (0x600)
#define PTDPTDI (0x7F4)
#define APTDPTDI (0x7F8)
#define UMAXPTDI (0x5FC)
#define UMAXPTEOFF (NPTEPG)
#else
#define KPTDI (0x300)
#define PTDPTDI (0x3FD)
#define APTDPTDI (0x3FE)
#define UMAXPTDI (0x2FF)
#define UMAXPTEOFF (NPTEPG)
#endif
#define KERNBASE VADDR(KPTDI,0)
#define pdenum(pmap, a) (((a) >> PDESHIFT) & PDEMASK)
#define pdetova(a) ((vm_offset_t)(a) << PDESHIFT)
#define ptenum(a) (((a) >> PTESHIFT) & PTEMASK)
#define INTEL_PTE_VALID 0x00000001
#define INTEL_PTE_WRITE 0x00000002
#define INTEL_PTE_RW 0x00000002
#define INTEL_PTE_USER 0x00000004
#define INTEL_PTE_WTHRU 0x00000008
#define INTEL_PTE_NCACHE 0x00000010
#define INTEL_PTE_REF 0x00000020
#define INTEL_PTE_MOD 0x00000040
#define INTEL_PTE_PS 0x00000080
#define INTEL_PTE_GLOBAL 0x00000100
#define INTEL_PTE_WIRED 0x00000200
#define INTEL_PTE_PFN (~0xFFF)
#define INTEL_PTE_PTA 0x00000080
#define pa_to_pte(a) ((a) & INTEL_PTE_PFN)
#define pte_to_pa(p) ((p) & INTEL_PTE_PFN)
#define pte_increment_pa(p) ((p) += INTEL_OFFMASK+1)
#define PMAP_DEFAULT_CACHE 0
#define PMAP_INHIBIT_CACHE 1
#define PMAP_GUARDED_CACHE 2
#define PMAP_ACTIVATE_CACHE 4
#define PMAP_NO_GUARD_CACHE 8
#ifndef ASSEMBLER
#include <sys/queue.h>
extern pt_entry_t PTmap[], APTmap[], Upte;
extern pd_entry_t PTD[], APTD[], PTDpde[], APTDpde[], Upde;
extern pd_entry_t *IdlePTD;
#ifdef PAE
extern pdpt_entry_t *IdlePDPT;
#endif
#define vtopte(va) (PTmap + i386_btop(va))
typedef volatile long cpu_set;
struct md_page {
int pv_list_count;
TAILQ_HEAD(,pv_entry) pv_list;
};
#include <vm/vm_page.h>
struct pmap {
#ifdef PMAP_QUEUE
queue_head_t pmap_link;
#endif
pd_entry_t *dirbase;
pd_entry_t *pdirbase;
vm_object_t pm_obj;
int ref_count;
decl_simple_lock_data(,lock)
struct pmap_statistics stats;
cpu_set cpus_using;
#ifdef PAE
vm_offset_t pm_hold;
pdpt_entry_t *pm_pdpt;
vm_offset_t pm_ppdpt;
#endif
};
#define PMAP_NWINDOWS 4
typedef struct {
pt_entry_t *prv_CMAP;
caddr_t prv_CADDR;
} mapwindow_t;
typedef struct cpu_pmap {
mapwindow_t mapwindow[PMAP_NWINDOWS];
struct pmap *real_pmap;
struct pmap_update_list *update_list;
volatile boolean_t update_needed;
} cpu_pmap_t;
#define CM1 (current_cpu_datap()->cpu_pmap->mapwindow[0].prv_CMAP)
#define CM2 (current_cpu_datap()->cpu_pmap->mapwindow[1].prv_CMAP)
#define CM3 (current_cpu_datap()->cpu_pmap->mapwindow[2].prv_CMAP)
#define CM4 (current_cpu_datap()->cpu_pmap->mapwindow[3].prv_CMAP)
#define CA1 (current_cpu_datap()->cpu_pmap->mapwindow[0].prv_CADDR)
#define CA2 (current_cpu_datap()->cpu_pmap->mapwindow[1].prv_CADDR)
#define CA3 (current_cpu_datap()->cpu_pmap->mapwindow[2].prv_CADDR)
#define CA4 (current_cpu_datap()->cpu_pmap->mapwindow[3].prv_CADDR)
typedef struct pmap_memory_regions {
ppnum_t base;
ppnum_t end;
ppnum_t alloc;
uint32_t type;
} pmap_memory_region_t;
unsigned pmap_memory_region_count;
unsigned pmap_memory_region_current;
#define PMAP_MEMORY_REGIONS_SIZE 32
extern pmap_memory_region_t pmap_memory_regions[];
#define PMAP_REAL(my_cpu) (cpu_datap(my_cpu)->cpu_pmap->real_pmap)
#include <i386/proc_reg.h>
#define PMAP_CPU_SET(pmap, my_cpu) i_bit_set(my_cpu, &((pmap)->cpus_using))
#define PMAP_CPU_CLR(pmap, my_cpu) i_bit_clear(my_cpu, &((pmap)->cpus_using))
#ifdef PAE
#define PDIRBASE pm_ppdpt
#else
#define PDIRBASE pdirbase
#endif
#define set_dirbase(mypmap, my_cpu) { \
struct pmap **ppmap = &PMAP_REAL(my_cpu); \
pmap_paddr_t pdirbase = (pmap_paddr_t)((mypmap)->PDIRBASE); \
\
if (*ppmap == (pmap_paddr_t)NULL) { \
*ppmap = (mypmap); \
PMAP_CPU_SET((mypmap), my_cpu); \
set_cr3(pdirbase); \
} else if ((mypmap) != kernel_pmap && (mypmap) != *ppmap ) { \
if (*ppmap != kernel_pmap) \
PMAP_CPU_CLR(*ppmap, my_cpu); \
*ppmap = (mypmap); \
PMAP_CPU_SET((mypmap), my_cpu); \
set_cr3(pdirbase); \
} \
assert((mypmap) == *ppmap || (mypmap) == kernel_pmap); \
}
extern cpu_set cpus_active;
extern cpu_set cpus_idle;
#define cpu_update_needed(cpu) cpu_datap(cpu)->cpu_pmap->update_needed
#define cpu_update_list(cpu) cpu_datap(cpu)->cpu_pmap->update_list
extern void process_pmap_updates(struct pmap *pmap);
extern void pmap_update_interrupt(void);
extern vm_offset_t (kvtophys)(
vm_offset_t addr);
extern pt_entry_t *pmap_pte(
struct pmap *pmap,
vm_offset_t addr);
extern vm_offset_t pmap_map(
vm_offset_t virt,
vm_offset_t start,
vm_offset_t end,
vm_prot_t prot);
extern vm_offset_t pmap_map_bd(
vm_offset_t virt,
vm_offset_t start,
vm_offset_t end,
vm_prot_t prot);
extern void pmap_bootstrap(
vm_offset_t load_start);
extern boolean_t pmap_valid_page(
ppnum_t pn);
extern int pmap_list_resident_pages(
struct pmap *pmap,
vm_offset_t *listp,
int space);
extern void pmap_commpage_init(
vm_offset_t kernel,
vm_offset_t user,
int count);
extern struct cpu_pmap *pmap_cpu_alloc(
boolean_t is_boot_cpu);
extern void pmap_cpu_free(
struct cpu_pmap *cp);
extern void invalidate_icache(vm_offset_t addr, unsigned cnt, int phys);
extern void flush_dcache(vm_offset_t addr, unsigned count, int phys);
extern ppnum_t pmap_find_phys(pmap_t map, addr64_t va);
extern void pmap_sync_page_data_phys(ppnum_t pa);
extern void pmap_sync_page_attributes_phys(ppnum_t pa);
#include <kern/spl.h>
#if defined(PMAP_ACTIVATE_KERNEL)
#undef PMAP_ACTIVATE_KERNEL
#undef PMAP_DEACTIVATE_KERNEL
#undef PMAP_ACTIVATE_USER
#undef PMAP_DEACTIVATE_USER
#endif
#define PMAP_ACTIVATE_KERNEL(my_cpu) { \
\
\
i_bit_clear((my_cpu), &cpus_active); \
\
\
simple_lock(&kernel_pmap->lock); \
\
\
if (cpu_update_needed(my_cpu)) \
process_pmap_updates(kernel_pmap); \
\
\
i_bit_set((my_cpu), &kernel_pmap->cpus_using); \
\
\
i_bit_set((my_cpu), &cpus_active); \
\
simple_unlock(&kernel_pmap->lock); \
}
#define PMAP_DEACTIVATE_KERNEL(my_cpu) { \
\
i_bit_clear((my_cpu), &kernel_pmap->cpus_using); \
i_bit_clear((my_cpu), &cpus_active); \
PMAP_REAL(my_cpu) = NULL; \
}
#define PMAP_ACTIVATE_MAP(map, my_cpu) { \
register pmap_t tpmap; \
\
tpmap = vm_map_pmap(map); \
if (tpmap == kernel_pmap) { \
\
set_dirbase(kernel_pmap, my_cpu); \
} \
else { \
\
i_bit_clear((my_cpu), &cpus_active); \
\
\
simple_lock(&tpmap->lock); \
\
\
set_dirbase(tpmap, my_cpu); \
\
\
i_bit_set((my_cpu), &cpus_active); \
\
simple_unlock(&tpmap->lock); \
} \
}
#define PMAP_DEACTIVATE_MAP(map, my_cpu)
#define PMAP_ACTIVATE_USER(th, my_cpu) { \
spl_t spl; \
\
spl = splhigh(); \
PMAP_ACTIVATE_MAP(th->map, my_cpu) \
splx(spl); \
}
#define PMAP_DEACTIVATE_USER(th, my_cpu)
#define PMAP_SWITCH_CONTEXT(old_th, new_th, my_cpu) { \
spl_t spl; \
\
if (old_th->map != new_th->map) { \
spl = splhigh(); \
PMAP_DEACTIVATE_MAP(old_th->map, my_cpu); \
PMAP_ACTIVATE_MAP(new_th->map, my_cpu); \
splx(spl); \
} \
}
#define PMAP_SWITCH_USER(th, new_map, my_cpu) { \
spl_t spl; \
\
spl = splhigh(); \
PMAP_DEACTIVATE_MAP(th->map, my_cpu); \
th->map = new_map; \
PMAP_ACTIVATE_MAP(th->map, my_cpu); \
splx(spl); \
}
#define MARK_CPU_IDLE(my_cpu) { \
\
int s = splhigh(); \
i_bit_set((my_cpu), &cpus_idle); \
i_bit_clear((my_cpu), &cpus_active); \
splx(s); \
set_led(my_cpu); \
}
#define MARK_CPU_ACTIVE(my_cpu) { \
\
int s = splhigh(); \
\
i_bit_clear((my_cpu), &cpus_idle); \
\
if (cpu_update_needed(my_cpu)) \
pmap_update_interrupt(); \
\
\
i_bit_set((my_cpu), &cpus_active); \
splx(s); \
clear_led(my_cpu); \
}
#define PMAP_CONTEXT(pmap, thread)
#define pmap_kernel_va(VA) \
(((VA) >= VM_MIN_KERNEL_ADDRESS) && ((VA) <= VM_MAX_KERNEL_ADDRESS))
#define pmap_resident_count(pmap) ((pmap)->stats.resident_count)
#define pmap_copy(dst_pmap,src_pmap,dst_addr,len,src_addr)
#define pmap_attribute(pmap,addr,size,attr,value) \
(KERN_INVALID_ADDRESS)
#define pmap_attribute_cache_sync(addr,size,attr,value) \
(KERN_INVALID_ADDRESS)
#endif
#endif