#include <cpus.h>
#include <string.h>
#include <norma_vm.h>
#include <mach_kdb.h>
#include <mach_ldebug.h>
#include <mach/machine/vm_types.h>
#include <mach/boolean.h>
#include <kern/thread.h>
#include <kern/zalloc.h>
#include <kern/lock.h>
#include <kern/spl.h>
#include <vm/pmap.h>
#include <vm/vm_map.h>
#include <vm/vm_kern.h>
#include <mach/vm_param.h>
#include <mach/vm_prot.h>
#include <vm/vm_object.h>
#include <vm/vm_page.h>
#include <mach/machine/vm_param.h>
#include <machine/thread.h>
#include <kern/misc_protos.h>
#include <i386/misc_protos.h>
#include <i386/cpuid.h>
#if MACH_KDB
#include <ddb/db_command.h>
#include <ddb/db_output.h>
#include <ddb/db_sym.h>
#include <ddb/db_print.h>
#endif
#include <kern/xpr.h>
#if NCPUS > 1
#include <i386/AT386/mp/mp_events.h>
#endif
void pmap_expand(
pmap_t map,
vm_offset_t v);
extern void pmap_remove_range(
pmap_t pmap,
vm_offset_t va,
pt_entry_t *spte,
pt_entry_t *epte);
void phys_attribute_clear(
vm_offset_t phys,
int bits);
boolean_t phys_attribute_test(
vm_offset_t phys,
int bits);
void pmap_set_modify(vm_offset_t phys);
void phys_attribute_set(
vm_offset_t phys,
int bits);
#ifndef set_dirbase
void set_dirbase(vm_offset_t dirbase);
#endif
#define PA_TO_PTE(pa) (pa_to_pte((pa) - VM_MIN_KERNEL_ADDRESS))
#define iswired(pte) ((pte) & INTEL_PTE_WIRED)
pmap_t real_pmap[NCPUS];
#define WRITE_PTE(pte_p, pte_entry) *(pte_p) = (pte_entry);
#define WRITE_PTE_FAST(pte_p, pte_entry) *(pte_p) = (pte_entry);
typedef struct pv_entry {
struct pv_entry *next;
pmap_t pmap;
vm_offset_t va;
} *pv_entry_t;
#define PV_ENTRY_NULL ((pv_entry_t) 0)
pv_entry_t pv_head_table;
pv_entry_t pv_free_list;
decl_simple_lock_data(,pv_free_list_lock)
#define PV_ALLOC(pv_e) { \
simple_lock(&pv_free_list_lock); \
if ((pv_e = pv_free_list) != 0) { \
pv_free_list = pv_e->next; \
} \
simple_unlock(&pv_free_list_lock); \
}
#define PV_FREE(pv_e) { \
simple_lock(&pv_free_list_lock); \
pv_e->next = pv_free_list; \
pv_free_list = pv_e; \
simple_unlock(&pv_free_list_lock); \
}
zone_t pv_list_zone;
char *pv_lock_table;
#define pv_lock_table_size(n) (((n)+BYTE_SIZE-1)/BYTE_SIZE)
vm_offset_t vm_first_phys = (vm_offset_t) 0;
vm_offset_t vm_last_phys = (vm_offset_t) 0;
boolean_t pmap_initialized = FALSE;
#define pa_index(pa) (atop(pa - vm_first_phys))
#define pai_to_pvh(pai) (&pv_head_table[pai])
#define lock_pvh_pai(pai) bit_lock(pai, (void *)pv_lock_table)
#define unlock_pvh_pai(pai) bit_unlock(pai, (void *)pv_lock_table)
char *pmap_phys_attributes;
#define PHYS_MODIFIED INTEL_PTE_MOD
#define PHYS_REFERENCED INTEL_PTE_REF
#define PHYS_NCACHE INTEL_PTE_NCACHE
#define PDE_MAPPED_SIZE (pdetova(1))
vm_object_t pmap_object = VM_OBJECT_NULL;
#if NCPUS > 1
#define SPLVM(spl) { \
spl = splhigh(); \
mp_disable_preemption(); \
i_bit_clear(cpu_number(), &cpus_active); \
mp_enable_preemption(); \
}
#define SPLX(spl) { \
mp_disable_preemption(); \
i_bit_set(cpu_number(), &cpus_active); \
mp_enable_preemption(); \
splx(spl); \
}
lock_t pmap_system_lock;
#define PMAP_READ_LOCK(pmap, spl) { \
SPLVM(spl); \
lock_read(&pmap_system_lock); \
simple_lock(&(pmap)->lock); \
}
#define PMAP_WRITE_LOCK(spl) { \
SPLVM(spl); \
lock_write(&pmap_system_lock); \
}
#define PMAP_READ_UNLOCK(pmap, spl) { \
simple_unlock(&(pmap)->lock); \
lock_read_done(&pmap_system_lock); \
SPLX(spl); \
}
#define PMAP_WRITE_UNLOCK(spl) { \
lock_write_done(&pmap_system_lock); \
SPLX(spl); \
}
#define PMAP_WRITE_TO_READ_LOCK(pmap) { \
simple_lock(&(pmap)->lock); \
lock_write_to_read(&pmap_system_lock); \
}
#define LOCK_PVH(index) lock_pvh_pai(index)
#define UNLOCK_PVH(index) unlock_pvh_pai(index)
#define PMAP_FLUSH_TLBS() \
{ \
flush_tlb(); \
i386_signal_cpus(MP_TLB_FLUSH); \
}
#define PMAP_RELOAD_TLBS() { \
i386_signal_cpus(MP_TLB_RELOAD); \
set_cr3(kernel_pmap->pdirbase); \
}
#define PMAP_INVALIDATE_PAGE(map, addr) { \
if (map == kernel_pmap) \
invlpg((vm_offset_t) addr); \
else \
flush_tlb(); \
i386_signal_cpus(MP_TLB_FLUSH); \
}
#else
#if MACH_RT
#define SPLVM(spl) { (spl) = splhigh(); }
#define SPLX(spl) splx (spl)
#else
#define SPLVM(spl)
#define SPLX(spl)
#endif
#define PMAP_READ_LOCK(pmap, spl) SPLVM(spl)
#define PMAP_WRITE_LOCK(spl) SPLVM(spl)
#define PMAP_READ_UNLOCK(pmap, spl) SPLX(spl)
#define PMAP_WRITE_UNLOCK(spl) SPLX(spl)
#define PMAP_WRITE_TO_READ_LOCK(pmap)
#if MACH_RT
#define LOCK_PVH(index) disable_preemption()
#define UNLOCK_PVH(index) enable_preemption()
#else
#define LOCK_PVH(index)
#define UNLOCK_PVH(index)
#endif
#define PMAP_FLUSH_TLBS() flush_tlb()
#define PMAP_RELOAD_TLBS() set_cr3(kernel_pmap->pdirbase)
#define PMAP_INVALIDATE_PAGE(map, addr) { \
if (map == kernel_pmap) \
invlpg((vm_offset_t) addr); \
else \
flush_tlb(); \
}
#endif
#define MAX_TBIS_SIZE 32
#if NCPUS > 1
cpu_set cpus_active;
cpu_set cpus_idle;
volatile boolean_t cpu_update_needed[NCPUS];
#endif
#define current_pmap() (vm_map_pmap(current_act()->map))
#define pmap_in_use(pmap, cpu) (((pmap)->cpus_using & (1 << (cpu))) != 0)
struct pmap kernel_pmap_store;
pmap_t kernel_pmap;
struct zone *pmap_zone;
int pmap_debug = 0;
int ptes_per_vm_page;
unsigned int inuse_ptepages_count = 0;
int pmap_cache_max = 32;
int pmap_alloc_chunk = 8;
pmap_t pmap_cache_list;
int pmap_cache_count;
decl_simple_lock_data(,pmap_cache_lock)
extern vm_offset_t hole_start, hole_end;
extern char end;
pt_entry_t *kpde = 0;
#if DEBUG_ALIAS
#define PMAP_ALIAS_MAX 32
struct pmap_alias {
vm_offset_t rpc;
pmap_t pmap;
vm_offset_t va;
int cookie;
#define PMAP_ALIAS_COOKIE 0xdeadbeef
} pmap_aliasbuf[PMAP_ALIAS_MAX];
int pmap_alias_index = 0;
extern vm_offset_t get_rpc();
#endif
pt_entry_t *
pmap_pte(
register pmap_t pmap,
register vm_offset_t addr)
{
register pt_entry_t *ptp;
register pt_entry_t pte;
pte = pmap->dirbase[pdenum(pmap, addr)];
if ((pte & INTEL_PTE_VALID) == 0)
return(PT_ENTRY_NULL);
ptp = (pt_entry_t *)ptetokv(pte);
return(&ptp[ptenum(addr)]);
}
#define pmap_pde(pmap, addr) (&(pmap)->dirbase[pdenum(pmap, addr)])
#define DEBUG_PTE_PAGE 0
#if DEBUG_PTE_PAGE
void
ptep_check(
ptep_t ptep)
{
register pt_entry_t *pte, *epte;
int ctu, ctw;
if (ptep == PTE_PAGE_NULL)
return;
pte = pmap_pte(ptep->pmap, ptep->va);
epte = pte + INTEL_PGBYTES/sizeof(pt_entry_t);
ctu = 0;
ctw = 0;
while (pte < epte) {
if (pte->pfn != 0) {
ctu++;
if (pte->wired)
ctw++;
}
pte += ptes_per_vm_page;
}
if (ctu != ptep->use_count || ctw != ptep->wired_count) {
printf("use %d wired %d - actual use %d wired %d\n",
ptep->use_count, ptep->wired_count, ctu, ctw);
panic("pte count");
}
}
#endif
vm_offset_t
pmap_map(
register vm_offset_t virt,
register vm_offset_t start,
register vm_offset_t end,
register vm_prot_t prot)
{
register int ps;
ps = PAGE_SIZE;
while (start < end) {
pmap_enter(kernel_pmap, virt, start, prot, 0, FALSE);
virt += ps;
start += ps;
}
return(virt);
}
vm_offset_t
pmap_map_bd(
register vm_offset_t virt,
register vm_offset_t start,
register vm_offset_t end,
vm_prot_t prot)
{
register pt_entry_t template;
register pt_entry_t *pte;
template = pa_to_pte(start)
| INTEL_PTE_NCACHE
| INTEL_PTE_REF
| INTEL_PTE_MOD
| INTEL_PTE_WIRED
| INTEL_PTE_VALID;
if (prot & VM_PROT_WRITE)
template |= INTEL_PTE_WRITE;
while (start < end) {
pte = pmap_pte(kernel_pmap, virt);
if (pte == PT_ENTRY_NULL)
panic("pmap_map_bd: Invalid kernel address\n");
WRITE_PTE_FAST(pte, template)
pte_increment_pa(template);
virt += PAGE_SIZE;
start += PAGE_SIZE;
}
PMAP_FLUSH_TLBS();
return(virt);
}
extern int cnvmem;
extern char *first_avail;
extern vm_offset_t virtual_avail, virtual_end;
extern vm_offset_t avail_start, avail_end, avail_next;
void
pmap_bootstrap(
vm_offset_t load_start)
{
vm_offset_t va, tva, paddr;
pt_entry_t template;
pt_entry_t *pde, *pte, *ptend;
vm_size_t morevm;
ptes_per_vm_page = PAGE_SIZE / INTEL_PGBYTES;
kernel_pmap = &kernel_pmap_store;
#if NCPUS > 1
lock_init(&pmap_system_lock,
FALSE,
ETAP_VM_PMAP_SYS,
ETAP_VM_PMAP_SYS_I);
#endif
simple_lock_init(&kernel_pmap->lock, ETAP_VM_PMAP_KERNEL);
simple_lock_init(&pv_free_list_lock, ETAP_VM_PMAP_FREE);
kernel_pmap->ref_count = 1;
virtual_avail = phystokv(avail_start);
virtual_end = phystokv(avail_end);
pde = kpde;
pde += pdenum(kernel_pmap, virtual_avail);
if (pte_to_pa(*pde) == 0) {
pte = 0; ptend = 0;
}
else {
pte = (pt_entry_t *)ptetokv(*pde);
ptend = pte+NPTES;
pte += ptenum(virtual_avail);
pde++;
}
template = pa_to_pte(avail_start)
| INTEL_PTE_VALID
| INTEL_PTE_WRITE;
for (va = virtual_avail; va < virtual_end; va += INTEL_PGBYTES) {
if (pte >= ptend) {
pte = (pt_entry_t *)phystokv(virtual_avail);
ptend = pte + NPTES;
virtual_avail = (vm_offset_t)ptend;
if (virtual_avail == hole_start)
virtual_avail = hole_end;
*pde = PA_TO_PTE((vm_offset_t) pte)
| INTEL_PTE_VALID
| INTEL_PTE_WRITE;
pde++;
}
WRITE_PTE_FAST(pte, template)
pte++;
pte_increment_pa(template);
}
avail_start = virtual_avail - VM_MIN_KERNEL_ADDRESS;
avail_next = avail_start;
morevm = 3*avail_end;
if (virtual_end + morevm > VM_MAX_KERNEL_ADDRESS)
morevm = VM_MAX_KERNEL_ADDRESS - virtual_end + 1;
*(int *)&template = 0;
if (virtual_end + morevm < VM_MAX_KERNEL_LOADED_ADDRESS + 1)
morevm = VM_MAX_KERNEL_LOADED_ADDRESS + 1 - virtual_end;
virtual_end += morevm;
for (tva = va; tva < virtual_end; tva += INTEL_PGBYTES) {
if (pte >= ptend) {
pmap_next_page(&paddr);
pte = (pt_entry_t *)phystokv(paddr);
ptend = pte + NPTES;
*pde = PA_TO_PTE((vm_offset_t) pte)
| INTEL_PTE_VALID
| INTEL_PTE_WRITE;
pde++;
}
WRITE_PTE_FAST(pte, template)
pte++;
}
virtual_avail = va;
if (virtual_avail < hole_end)
virtual_avail = hole_end;
virtual_end = va + morevm;
while (pte < ptend)
*pte++ = 0;
memset((char *)kpde,
0,
pdenum(kernel_pmap,VM_MIN_KERNEL_ADDRESS)*sizeof(pt_entry_t));
kernel_pmap->dirbase = kpde;
printf("Kernel virtual space from 0x%x to 0x%x.\n",
VM_MIN_KERNEL_ADDRESS, virtual_end);
avail_start = avail_next;
printf("Available physical space from 0x%x to 0x%x\n",
avail_start, avail_end);
kernel_pmap->pdirbase = kvtophys((vm_offset_t)kernel_pmap->dirbase);
if (cpuid_features() & CPUID_FEATURE_PAT)
{
uint64_t pat;
uint32_t msr;
msr = 0x277;
asm volatile("rdmsr" : "=A" (pat) : "c" (msr));
pat &= ~(0xfULL << 48);
pat |= 0x01ULL << 48;
asm volatile("wrmsr" :: "A" (pat), "c" (msr));
}
}
void
pmap_virtual_space(
vm_offset_t *startp,
vm_offset_t *endp)
{
*startp = virtual_avail;
*endp = virtual_end;
}
void
pmap_init(void)
{
register long npages;
vm_offset_t addr;
register vm_size_t s;
int i;
npages = atop(avail_end - avail_start);
s = (vm_size_t) (sizeof(struct pv_entry) * npages
+ pv_lock_table_size(npages)
+ npages);
s = round_page(s);
if (kmem_alloc_wired(kernel_map, &addr, s) != KERN_SUCCESS)
panic("pmap_init");
memset((char *)addr, 0, s);
pv_head_table = (pv_entry_t) addr;
addr = (vm_offset_t) (pv_head_table + npages);
pv_lock_table = (char *) addr;
addr = (vm_offset_t) (pv_lock_table + pv_lock_table_size(npages));
pmap_phys_attributes = (char *) addr;
s = (vm_size_t) sizeof(struct pmap);
pmap_zone = zinit(s, 400*s, 4096, "pmap");
s = (vm_size_t) sizeof(struct pv_entry);
pv_list_zone = zinit(s, 10000*s, 4096, "pv_list");
vm_first_phys = avail_start;
vm_last_phys = avail_end;
pmap_initialized = TRUE;
pmap_cache_list = PMAP_NULL;
pmap_cache_count = 0;
simple_lock_init(&pmap_cache_lock, ETAP_VM_PMAP_CACHE);
}
#define pmap_valid_page(x) ((avail_start <= x) && (x < avail_end))
#define valid_page(x) (pmap_initialized && pmap_valid_page(x))
boolean_t
pmap_verify_free(
vm_offset_t phys)
{
pv_entry_t pv_h;
int pai;
spl_t spl;
boolean_t result;
assert(phys != vm_page_fictitious_addr);
if (!pmap_initialized)
return(TRUE);
if (!pmap_valid_page(phys))
return(FALSE);
PMAP_WRITE_LOCK(spl);
pai = pa_index(phys);
pv_h = pai_to_pvh(pai);
result = (pv_h->pmap == PMAP_NULL);
PMAP_WRITE_UNLOCK(spl);
return(result);
}
pmap_t
pmap_create(
vm_size_t size)
{
register pmap_t p;
register pmap_statistics_t stats;
if (size != 0) {
return(PMAP_NULL);
}
simple_lock(&pmap_cache_lock);
while ((p = pmap_cache_list) == PMAP_NULL) {
vm_offset_t dirbases;
register int i;
simple_unlock(&pmap_cache_lock);
#if NCPUS > 1
#endif
if (kmem_alloc_wired(kernel_map, &dirbases,
pmap_alloc_chunk * INTEL_PGBYTES)
!= KERN_SUCCESS)
panic("pmap_create.1");
for (i = pmap_alloc_chunk; i > 0 ; i--) {
p = (pmap_t) zalloc(pmap_zone);
if (p == PMAP_NULL)
panic("pmap_create.2");
p->dirbase = (pt_entry_t *) dirbases;
dirbases += INTEL_PGBYTES;
memcpy(p->dirbase, kpde, INTEL_PGBYTES);
p->pdirbase = kvtophys((vm_offset_t)p->dirbase);
simple_lock_init(&p->lock, ETAP_VM_PMAP);
p->cpus_using = 0;
stats = &p->stats;
stats->resident_count = 0;
stats->wired_count = 0;
simple_lock(&pmap_cache_lock);
p->ref_count = (int) pmap_cache_list;
pmap_cache_list = p;
pmap_cache_count++;
simple_unlock(&pmap_cache_lock);
}
simple_lock(&pmap_cache_lock);
}
assert(p->stats.resident_count == 0);
assert(p->stats.wired_count == 0);
p->stats.resident_count = 0;
p->stats.wired_count = 0;
pmap_cache_list = (pmap_t) p->ref_count;
p->ref_count = 1;
pmap_cache_count--;
simple_unlock(&pmap_cache_lock);
return(p);
}
void
pmap_destroy(
register pmap_t p)
{
register pt_entry_t *pdep;
register vm_offset_t pa;
register int c;
spl_t s;
register vm_page_t m;
if (p == PMAP_NULL)
return;
SPLVM(s);
simple_lock(&p->lock);
c = --p->ref_count;
if (c == 0) {
register int my_cpu;
mp_disable_preemption();
my_cpu = cpu_number();
if (real_pmap[my_cpu] == p) {
PMAP_CPU_CLR(p, my_cpu);
real_pmap[my_cpu] = kernel_pmap;
PMAP_RELOAD_TLBS();
}
mp_enable_preemption();
}
simple_unlock(&p->lock);
SPLX(s);
if (c != 0) {
return;
}
pdep = p->dirbase;
while (pdep < &p->dirbase[pdenum(p, LINEAR_KERNEL_ADDRESS)]) {
if (*pdep & INTEL_PTE_VALID) {
pa = pte_to_pa(*pdep);
vm_object_lock(pmap_object);
m = vm_page_lookup(pmap_object, pa);
if (m == VM_PAGE_NULL)
panic("pmap_destroy: pte page not in object");
vm_page_lock_queues();
vm_page_free(m);
inuse_ptepages_count--;
vm_object_unlock(pmap_object);
vm_page_unlock_queues();
c = ptes_per_vm_page;
do {
*pdep = 0;
pdep++;
} while (--c > 0);
}
else {
pdep += ptes_per_vm_page;
}
}
assert(p->stats.resident_count == 0);
assert(p->stats.wired_count == 0);
simple_lock(&pmap_cache_lock);
if (pmap_cache_count <= pmap_cache_max) {
p->ref_count = (int) pmap_cache_list;
pmap_cache_list = p;
pmap_cache_count++;
simple_unlock(&pmap_cache_lock);
}
else {
simple_unlock(&pmap_cache_lock);
kmem_free(kernel_map, (vm_offset_t)p->dirbase, INTEL_PGBYTES);
zfree(pmap_zone, (vm_offset_t) p);
}
}
void
pmap_reference(
register pmap_t p)
{
spl_t s;
if (p != PMAP_NULL) {
SPLVM(s);
simple_lock(&p->lock);
p->ref_count++;
simple_unlock(&p->lock);
SPLX(s);
}
}
void
pmap_remove_range(
pmap_t pmap,
vm_offset_t va,
pt_entry_t *spte,
pt_entry_t *epte)
{
register pt_entry_t *cpte;
int num_removed, num_unwired;
int pai;
vm_offset_t pa;
#if DEBUG_PTE_PAGE
if (pmap != kernel_pmap)
ptep_check(get_pte_page(spte));
#endif
num_removed = 0;
num_unwired = 0;
for (cpte = spte; cpte < epte;
cpte += ptes_per_vm_page, va += PAGE_SIZE) {
pa = pte_to_pa(*cpte);
if (pa == 0)
continue;
num_removed++;
if (iswired(*cpte))
num_unwired++;
if (!valid_page(pa)) {
register int i = ptes_per_vm_page;
register pt_entry_t *lpte = cpte;
do {
*lpte = 0;
lpte++;
} while (--i > 0);
continue;
}
pai = pa_index(pa);
LOCK_PVH(pai);
{
register int i;
register pt_entry_t *lpte;
i = ptes_per_vm_page;
lpte = cpte;
do {
pmap_phys_attributes[pai] |=
*lpte & (PHYS_MODIFIED|PHYS_REFERENCED);
*lpte = 0;
lpte++;
} while (--i > 0);
}
{
register pv_entry_t pv_h, prev, cur;
pv_h = pai_to_pvh(pai);
if (pv_h->pmap == PMAP_NULL) {
panic("pmap_remove: null pv_list!");
}
if (pv_h->va == va && pv_h->pmap == pmap) {
cur = pv_h->next;
if (cur != PV_ENTRY_NULL) {
*pv_h = *cur;
PV_FREE(cur);
}
else {
pv_h->pmap = PMAP_NULL;
}
}
else {
cur = pv_h;
do {
prev = cur;
if ((cur = prev->next) == PV_ENTRY_NULL) {
panic("pmap-remove: mapping not in pv_list!");
}
} while (cur->va != va || cur->pmap != pmap);
prev->next = cur->next;
PV_FREE(cur);
}
UNLOCK_PVH(pai);
}
}
assert(pmap->stats.resident_count >= num_removed);
pmap->stats.resident_count -= num_removed;
assert(pmap->stats.wired_count >= num_unwired);
pmap->stats.wired_count -= num_unwired;
}
void
pmap_remove_some_phys(
pmap_t map,
vm_offset_t phys_addr)
{
}
void
pmap_remove(
pmap_t map,
addr64_t s,
addr64_t e)
{
spl_t spl;
register pt_entry_t *pde;
register pt_entry_t *spte, *epte;
vm_offset_t l;
if (map == PMAP_NULL)
return;
PMAP_READ_LOCK(map, spl);
pde = pmap_pde(map, s);
while (s < e) {
l = (s + PDE_MAPPED_SIZE) & ~(PDE_MAPPED_SIZE-1);
if (l > e)
l = e;
if (*pde & INTEL_PTE_VALID) {
spte = (pt_entry_t *)ptetokv(*pde);
spte = &spte[ptenum(s)];
epte = &spte[intel_btop(l-s)];
pmap_remove_range(map, s, spte, epte);
}
s = l;
pde++;
}
PMAP_FLUSH_TLBS();
PMAP_READ_UNLOCK(map, spl);
}
void
pmap_page_protect(
vm_offset_t phys,
vm_prot_t prot)
{
pv_entry_t pv_h, prev;
register pv_entry_t pv_e;
register pt_entry_t *pte;
int pai;
register pmap_t pmap;
spl_t spl;
boolean_t remove;
assert(phys != vm_page_fictitious_addr);
if (!valid_page(phys)) {
return;
}
switch (prot) {
case VM_PROT_READ:
case VM_PROT_READ|VM_PROT_EXECUTE:
remove = FALSE;
break;
case VM_PROT_ALL:
return;
default:
remove = TRUE;
break;
}
PMAP_WRITE_LOCK(spl);
pai = pa_index(phys);
pv_h = pai_to_pvh(pai);
if (pv_h->pmap != PMAP_NULL) {
prev = pv_e = pv_h;
do {
pmap = pv_e->pmap;
simple_lock(&pmap->lock);
{
register vm_offset_t va;
va = pv_e->va;
pte = pmap_pte(pmap, va);
PMAP_INVALIDATE_PAGE(pmap, va);
}
if (remove || pmap == kernel_pmap) {
{
register int i = ptes_per_vm_page;
do {
pmap_phys_attributes[pai] |=
*pte & (PHYS_MODIFIED|PHYS_REFERENCED);
*pte++ = 0;
} while (--i > 0);
}
assert(pmap->stats.resident_count >= 1);
pmap->stats.resident_count--;
if (pv_e == pv_h) {
pv_h->pmap = PMAP_NULL;
}
else {
prev->next = pv_e->next;
PV_FREE(pv_e);
}
}
else {
register int i = ptes_per_vm_page;
do {
*pte &= ~INTEL_PTE_WRITE;
pte++;
} while (--i > 0);
prev = pv_e;
}
simple_unlock(&pmap->lock);
} while ((pv_e = prev->next) != PV_ENTRY_NULL);
if (pv_h->pmap == PMAP_NULL) {
pv_e = pv_h->next;
if (pv_e != PV_ENTRY_NULL) {
*pv_h = *pv_e;
PV_FREE(pv_e);
}
}
}
PMAP_WRITE_UNLOCK(spl);
}
void
pmap_protect(
pmap_t map,
vm_offset_t s,
vm_offset_t e,
vm_prot_t prot)
{
register pt_entry_t *pde;
register pt_entry_t *spte, *epte;
vm_offset_t l;
spl_t spl;
if (map == PMAP_NULL)
return;
switch (prot) {
case VM_PROT_READ:
case VM_PROT_READ|VM_PROT_EXECUTE:
break;
case VM_PROT_READ|VM_PROT_WRITE:
case VM_PROT_ALL:
return;
default:
pmap_remove(map, s, e);
return;
}
if (cpuid_family == CPUID_FAMILY_386)
if (map == kernel_pmap) {
pmap_remove(map, s, e);
return;
}
SPLVM(spl);
simple_lock(&map->lock);
pde = pmap_pde(map, s);
while (s < e) {
l = (s + PDE_MAPPED_SIZE) & ~(PDE_MAPPED_SIZE-1);
if (l > e)
l = e;
if (*pde & INTEL_PTE_VALID) {
spte = (pt_entry_t *)ptetokv(*pde);
spte = &spte[ptenum(s)];
epte = &spte[intel_btop(l-s)];
while (spte < epte) {
if (*spte & INTEL_PTE_VALID)
*spte &= ~INTEL_PTE_WRITE;
spte++;
}
}
s = l;
pde++;
}
PMAP_FLUSH_TLBS();
simple_unlock(&map->lock);
SPLX(spl);
}
void
pmap_enter(
register pmap_t pmap,
vm_offset_t v,
register vm_offset_t pa,
vm_prot_t prot,
unsigned int flags,
boolean_t wired)
{
register pt_entry_t *pte;
register pv_entry_t pv_h;
register int i, pai;
pv_entry_t pv_e;
pt_entry_t template;
spl_t spl;
vm_offset_t old_pa;
XPR(0x80000000, "%x/%x: pmap_enter %x/%x/%x\n",
current_thread()->top_act,
current_thread(),
pmap, v, pa);
assert(pa != vm_page_fictitious_addr);
if (pmap_debug)
printf("pmap(%x, %x)\n", v, pa);
if (pmap == PMAP_NULL)
return;
if (cpuid_family == CPUID_FAMILY_386)
if (pmap == kernel_pmap && (prot & VM_PROT_WRITE) == 0
&& !wired ) {
PMAP_READ_LOCK(pmap, spl);
pte = pmap_pte(pmap, v);
if (pte != PT_ENTRY_NULL && pte_to_pa(*pte) != 0) {
PMAP_INVALIDATE_PAGE(pmap, v);
pmap_remove_range(pmap, v, pte,
pte + ptes_per_vm_page);
}
PMAP_READ_UNLOCK(pmap, spl);
return;
}
pv_e = PV_ENTRY_NULL;
Retry:
PMAP_READ_LOCK(pmap, spl);
while ((pte = pmap_pte(pmap, v)) == PT_ENTRY_NULL) {
PMAP_READ_UNLOCK(pmap, spl);
pmap_expand(pmap, v);
PMAP_READ_LOCK(pmap, spl);
}
old_pa = pte_to_pa(*pte);
if (old_pa == pa) {
template = pa_to_pte(pa) | INTEL_PTE_VALID;
if(flags & VM_MEM_NOT_CACHEABLE) {
if(!(flags & VM_MEM_GUARDED))
template |= INTEL_PTE_PTA;
template |= INTEL_PTE_NCACHE;
}
if (pmap != kernel_pmap)
template |= INTEL_PTE_USER;
if (prot & VM_PROT_WRITE)
template |= INTEL_PTE_WRITE;
if (wired) {
template |= INTEL_PTE_WIRED;
if (!iswired(*pte))
pmap->stats.wired_count++;
}
else {
if (iswired(*pte)) {
assert(pmap->stats.wired_count >= 1);
pmap->stats.wired_count--;
}
}
PMAP_INVALIDATE_PAGE(pmap, v);
i = ptes_per_vm_page;
do {
if (*pte & INTEL_PTE_MOD)
template |= INTEL_PTE_MOD;
WRITE_PTE(pte, template)
pte++;
pte_increment_pa(template);
} while (--i > 0);
goto Done;
}
if (old_pa != (vm_offset_t) 0) {
PMAP_INVALIDATE_PAGE(pmap, v);
#if DEBUG_PTE_PAGE
if (pmap != kernel_pmap)
ptep_check(get_pte_page(pte));
#endif
if (valid_page(old_pa)) {
pai = pa_index(old_pa);
LOCK_PVH(pai);
assert(pmap->stats.resident_count >= 1);
pmap->stats.resident_count--;
if (iswired(*pte)) {
assert(pmap->stats.wired_count >= 1);
pmap->stats.wired_count--;
}
i = ptes_per_vm_page;
do {
pmap_phys_attributes[pai] |=
*pte & (PHYS_MODIFIED|PHYS_REFERENCED);
WRITE_PTE(pte, 0)
pte++;
pte_increment_pa(template);
} while (--i > 0);
pte -= ptes_per_vm_page;
{
register pv_entry_t prev, cur;
pv_h = pai_to_pvh(pai);
if (pv_h->pmap == PMAP_NULL) {
panic("pmap_enter: null pv_list!");
}
if (pv_h->va == v && pv_h->pmap == pmap) {
cur = pv_h->next;
if (cur != PV_ENTRY_NULL) {
*pv_h = *cur;
pv_e = cur;
}
else {
pv_h->pmap = PMAP_NULL;
}
}
else {
cur = pv_h;
do {
prev = cur;
if ((cur = prev->next) == PV_ENTRY_NULL) {
panic("pmap_enter: mapping not in pv_list!");
}
} while (cur->va != v || cur->pmap != pmap);
prev->next = cur->next;
pv_e = cur;
}
}
UNLOCK_PVH(pai);
}
else {
old_pa = (vm_offset_t) 0;
assert(pmap->stats.resident_count >= 1);
pmap->stats.resident_count--;
if (iswired(*pte)) {
assert(pmap->stats.wired_count >= 1);
pmap->stats.wired_count--;
}
}
}
if (valid_page(pa)) {
pai = pa_index(pa);
#if SHARING_FAULTS
RetryPvList:
#endif
LOCK_PVH(pai);
pv_h = pai_to_pvh(pai);
if (pv_h->pmap == PMAP_NULL) {
pv_h->va = v;
pv_h->pmap = pmap;
pv_h->next = PV_ENTRY_NULL;
}
else {
#if DEBUG
{
pv_entry_t e = pv_h;
while (e != PV_ENTRY_NULL) {
if (e->pmap == pmap && e->va == v)
panic("pmap_enter: already in pv_list");
e = e->next;
}
}
#endif
#if SHARING_FAULTS
{
pv_entry_t e = pv_h;
pt_entry_t *opte;
while (e != PV_ENTRY_NULL) {
if (e->pmap == pmap) {
UNLOCK_PVH(pai);
opte = pmap_pte(pmap, e->va);
assert(opte != PT_ENTRY_NULL);
PMAP_INVALIDATE_PAGE(pmap, e->va);
pmap_remove_range(pmap, e->va, opte,
opte + ptes_per_vm_page);
goto RetryPvList;
}
e = e->next;
}
e = pv_h;
while (e != PV_ENTRY_NULL) {
if (e->pmap == pmap)
panic("pmap_enter: alias in pv_list");
e = e->next;
}
}
#endif
#if DEBUG_ALIAS
{
pv_entry_t e = pv_h;
vm_offset_t rpc = get_rpc();
while (e != PV_ENTRY_NULL) {
if (e->pmap == pmap) {
struct pmap_alias *pma;
int ii, logit;
logit = TRUE;
for (ii = 0; ii < pmap_alias_index; ii++) {
if (pmap_aliasbuf[ii].rpc == rpc) {
logit = FALSE;
break;
}
}
if (logit) {
pma = &pmap_aliasbuf[pmap_alias_index];
pma->pmap = pmap;
pma->va = v;
pma->rpc = rpc;
pma->cookie = PMAP_ALIAS_COOKIE;
if (++pmap_alias_index >= PMAP_ALIAS_MAX)
panic("pmap_enter: exhausted alias log");
}
}
e = e->next;
}
}
#endif
if (pv_e == PV_ENTRY_NULL) {
PV_ALLOC(pv_e);
if (pv_e == PV_ENTRY_NULL) {
UNLOCK_PVH(pai);
PMAP_READ_UNLOCK(pmap, spl);
pv_e = (pv_entry_t) zalloc(pv_list_zone);
goto Retry;
}
}
pv_e->va = v;
pv_e->pmap = pmap;
pv_e->next = pv_h->next;
pv_h->next = pv_e;
pv_e = PV_ENTRY_NULL;
}
UNLOCK_PVH(pai);
}
pmap->stats.resident_count++;
template = pa_to_pte(pa) | INTEL_PTE_VALID;
if(flags & VM_MEM_NOT_CACHEABLE) {
if(!(flags & VM_MEM_GUARDED))
template |= INTEL_PTE_PTA;
template |= INTEL_PTE_NCACHE;
}
if (pmap != kernel_pmap)
template |= INTEL_PTE_USER;
if (prot & VM_PROT_WRITE)
template |= INTEL_PTE_WRITE;
if (wired) {
template |= INTEL_PTE_WIRED;
pmap->stats.wired_count++;
}
i = ptes_per_vm_page;
do {
WRITE_PTE(pte, template)
pte++;
pte_increment_pa(template);
} while (--i > 0);
Done:
if (pv_e != PV_ENTRY_NULL) {
PV_FREE(pv_e);
}
PMAP_READ_UNLOCK(pmap, spl);
}
void
pmap_change_wiring(
register pmap_t map,
vm_offset_t v,
boolean_t wired)
{
register pt_entry_t *pte;
register int i;
spl_t spl;
#if 0
PMAP_READ_LOCK(map, spl);
if ((pte = pmap_pte(map, v)) == PT_ENTRY_NULL)
panic("pmap_change_wiring: pte missing");
if (wired && !iswired(*pte)) {
map->stats.wired_count++;
i = ptes_per_vm_page;
do {
*pte++ |= INTEL_PTE_WIRED;
} while (--i > 0);
}
else if (!wired && iswired(*pte)) {
assert(map->stats.wired_count >= 1);
map->stats.wired_count--;
i = ptes_per_vm_page;
do {
*pte++ &= ~INTEL_PTE_WIRED;
} while (--i > 0);
}
PMAP_READ_UNLOCK(map, spl);
#else
return;
#endif
}
vm_offset_t
pmap_extract(
register pmap_t pmap,
vm_offset_t va)
{
register pt_entry_t *pte;
register vm_offset_t pa;
spl_t spl;
SPLVM(spl);
simple_lock(&pmap->lock);
if ((pte = pmap_pte(pmap, va)) == PT_ENTRY_NULL)
pa = (vm_offset_t) 0;
else if (!(*pte & INTEL_PTE_VALID))
pa = (vm_offset_t) 0;
else
pa = pte_to_pa(*pte) + (va & INTEL_OFFMASK);
simple_unlock(&pmap->lock);
SPLX(spl);
return(pa);
}
void
pmap_expand(
register pmap_t map,
register vm_offset_t v)
{
pt_entry_t *pdp;
register vm_page_t m;
register vm_offset_t pa;
register int i;
spl_t spl;
if (map == kernel_pmap)
panic("pmap_expand");
if (pmap_object == VM_OBJECT_NULL)
pmap_object = vm_object_allocate(avail_end);
while ((m = vm_page_grab()) == VM_PAGE_NULL)
VM_PAGE_WAIT();
pa = m->phys_page;
vm_object_lock(pmap_object);
vm_page_insert(m, pmap_object, pa);
vm_page_lock_queues();
vm_page_wire(m);
inuse_ptepages_count++;
vm_object_unlock(pmap_object);
vm_page_unlock_queues();
memset((void *)phystokv(pa), 0, PAGE_SIZE);
PMAP_READ_LOCK(map, spl);
if (pmap_pte(map, v) != PT_ENTRY_NULL) {
PMAP_READ_UNLOCK(map, spl);
vm_object_lock(pmap_object);
vm_page_lock_queues();
vm_page_free(m);
inuse_ptepages_count--;
vm_page_unlock_queues();
vm_object_unlock(pmap_object);
return;
}
i = ptes_per_vm_page;
pdp = &map->dirbase[pdenum(map, v) & ~(i-1)];
do {
*pdp = pa_to_pte(pa)
| INTEL_PTE_VALID
| INTEL_PTE_USER
| INTEL_PTE_WRITE;
pdp++;
pa += INTEL_PGBYTES;
} while (--i > 0);
PMAP_READ_UNLOCK(map, spl);
return;
}
#if 0
void
pmap_copy(
pmap_t dst_pmap,
pmap_t src_pmap,
vm_offset_t dst_addr,
vm_size_t len,
vm_offset_t src_addr)
{
#ifdef lint
dst_pmap++; src_pmap++; dst_addr++; len++; src_addr++;
#endif
}
#endif/* 0 */
void pmap_sync_caches_phys(ppnum_t pa)
{
if (!(cpuid_features() & CPUID_FEATURE_SS))
{
__asm__ volatile("wbinvd");
}
return;
}
int collect_ref;
int collect_unref;
void
pmap_collect(
pmap_t p)
{
register pt_entry_t *pdp, *ptp;
pt_entry_t *eptp;
vm_offset_t pa;
int wired;
spl_t spl;
if (p == PMAP_NULL)
return;
if (p == kernel_pmap)
return;
PMAP_READ_LOCK(p, spl);
PMAP_FLUSH_TLBS();
for (pdp = p->dirbase;
pdp < &p->dirbase[pdenum(p, LINEAR_KERNEL_ADDRESS)];
pdp += ptes_per_vm_page)
{
if (*pdp & INTEL_PTE_VALID)
if(*pdp & INTEL_PTE_REF) {
*pdp &= ~INTEL_PTE_REF;
collect_ref++;
} else {
collect_unref++;
pa = pte_to_pa(*pdp);
ptp = (pt_entry_t *)phystokv(pa);
eptp = ptp + NPTES*ptes_per_vm_page;
wired = 0;
{
register pt_entry_t *ptep;
for (ptep = ptp; ptep < eptp; ptep++) {
if (iswired(*ptep)) {
wired = 1;
break;
}
}
}
if (!wired) {
pmap_remove_range(p,
pdetova(pdp - p->dirbase),
ptp,
eptp);
{
register int i = ptes_per_vm_page;
register pt_entry_t *pdep = pdp;
do {
*pdep++ = 0;
} while (--i > 0);
}
PMAP_READ_UNLOCK(p, spl);
{
register vm_page_t m;
vm_object_lock(pmap_object);
m = vm_page_lookup(pmap_object, pa);
if (m == VM_PAGE_NULL)
panic("pmap_collect: pte page not in object");
vm_page_lock_queues();
vm_page_free(m);
inuse_ptepages_count--;
vm_page_unlock_queues();
vm_object_unlock(pmap_object);
}
PMAP_READ_LOCK(p, spl);
}
}
}
PMAP_READ_UNLOCK(p, spl);
return;
}
#if 0
pmap_t
pmap_kernel(void)
{
return (kernel_pmap);
}
#endif/* 0 */
#if 0
void
pmap_zero_page(
register vm_offset_t phys)
{
register int i;
assert(phys != vm_page_fictitious_addr);
i = PAGE_SIZE / INTEL_PGBYTES;
phys = intel_pfn(phys);
while (i--)
zero_phys(phys++);
}
#endif/* 0 */
#if 0
void
pmap_copy_page(
vm_offset_t src,
vm_offset_t dst)
{
int i;
assert(src != vm_page_fictitious_addr);
assert(dst != vm_page_fictitious_addr);
i = PAGE_SIZE / INTEL_PGBYTES;
while (i--) {
copy_phys(intel_pfn(src), intel_pfn(dst));
src += INTEL_PGBYTES;
dst += INTEL_PGBYTES;
}
}
#endif/* 0 */
void
pmap_pageable(
pmap_t pmap,
vm_offset_t start,
vm_offset_t end,
boolean_t pageable)
{
#ifdef lint
pmap++; start++; end++; pageable++;
#endif
}
void
phys_attribute_clear(
vm_offset_t phys,
int bits)
{
pv_entry_t pv_h;
register pv_entry_t pv_e;
register pt_entry_t *pte;
int pai;
register pmap_t pmap;
spl_t spl;
assert(phys != vm_page_fictitious_addr);
if (!valid_page(phys)) {
return;
}
PMAP_WRITE_LOCK(spl);
pai = pa_index(phys);
pv_h = pai_to_pvh(pai);
if (pv_h->pmap != PMAP_NULL) {
for (pv_e = pv_h; pv_e != PV_ENTRY_NULL; pv_e = pv_e->next) {
pmap = pv_e->pmap;
simple_lock(&pmap->lock);
{
register vm_offset_t va;
va = pv_e->va;
pte = pmap_pte(pmap, va);
#if 0
assert(*pte & INTEL_PTE_VALID);
#endif
PMAP_INVALIDATE_PAGE(pmap, va);
}
{
register int i = ptes_per_vm_page;
do {
*pte++ &= ~bits;
} while (--i > 0);
}
simple_unlock(&pmap->lock);
}
}
pmap_phys_attributes[pai] &= ~bits;
PMAP_WRITE_UNLOCK(spl);
}
boolean_t
phys_attribute_test(
vm_offset_t phys,
int bits)
{
pv_entry_t pv_h;
register pv_entry_t pv_e;
register pt_entry_t *pte;
int pai;
register pmap_t pmap;
spl_t spl;
assert(phys != vm_page_fictitious_addr);
if (!valid_page(phys)) {
return (FALSE);
}
PMAP_WRITE_LOCK(spl);
pai = pa_index(phys);
pv_h = pai_to_pvh(pai);
if (pmap_phys_attributes[pai] & bits) {
PMAP_WRITE_UNLOCK(spl);
return (TRUE);
}
if (pv_h->pmap != PMAP_NULL) {
for (pv_e = pv_h; pv_e != PV_ENTRY_NULL; pv_e = pv_e->next) {
pmap = pv_e->pmap;
simple_lock(&pmap->lock);
{
register vm_offset_t va;
va = pv_e->va;
pte = pmap_pte(pmap, va);
#if 0
assert(*pte & INTEL_PTE_VALID);
#endif
}
{
register int i = ptes_per_vm_page;
do {
if (*pte++ & bits) {
simple_unlock(&pmap->lock);
PMAP_WRITE_UNLOCK(spl);
return (TRUE);
}
} while (--i > 0);
}
simple_unlock(&pmap->lock);
}
}
PMAP_WRITE_UNLOCK(spl);
return (FALSE);
}
void
phys_attribute_set(
vm_offset_t phys,
int bits)
{
int spl;
assert(phys != vm_page_fictitious_addr);
if (!valid_page(phys)) {
return;
}
PMAP_WRITE_LOCK(spl);
pmap_phys_attributes[pa_index(phys)] |= bits;
PMAP_WRITE_UNLOCK(spl);
}
void pmap_set_modify(
register vm_offset_t phys)
{
phys_attribute_set(phys, PHYS_MODIFIED);
}
void
pmap_clear_modify(
register vm_offset_t phys)
{
phys_attribute_clear(phys, PHYS_MODIFIED);
}
boolean_t
pmap_is_modified(
register vm_offset_t phys)
{
return (phys_attribute_test(phys, PHYS_MODIFIED));
}
void
pmap_clear_reference(
vm_offset_t phys)
{
phys_attribute_clear(phys, PHYS_REFERENCED);
}
boolean_t
pmap_is_referenced(
vm_offset_t phys)
{
return (phys_attribute_test(phys, PHYS_REFERENCED));
}
void
pmap_modify_pages(
pmap_t map,
vm_offset_t s,
vm_offset_t e)
{
spl_t spl;
register pt_entry_t *pde;
register pt_entry_t *spte, *epte;
vm_offset_t l;
if (map == PMAP_NULL)
return;
PMAP_READ_LOCK(map, spl);
pde = pmap_pde(map, s);
while (s && s < e) {
l = (s + PDE_MAPPED_SIZE) & ~(PDE_MAPPED_SIZE-1);
if (l > e)
l = e;
if (*pde & INTEL_PTE_VALID) {
spte = (pt_entry_t *)ptetokv(*pde);
if (l) {
spte = &spte[ptenum(s)];
epte = &spte[intel_btop(l-s)];
} else {
epte = &spte[intel_btop(PDE_MAPPED_SIZE)];
spte = &spte[ptenum(s)];
}
while (spte < epte) {
if (*spte & INTEL_PTE_VALID) {
*spte |= (INTEL_PTE_MOD | INTEL_PTE_WRITE);
}
spte++;
}
}
s = l;
pde++;
}
PMAP_FLUSH_TLBS();
PMAP_READ_UNLOCK(map, spl);
}
void
invalidate_icache(vm_offset_t addr, unsigned cnt, int phys)
{
return;
}
void
flush_dcache(vm_offset_t addr, unsigned count, int phys)
{
return;
}
#if NCPUS > 1
void inline
pmap_wait_for_clear()
{
register int my_cpu;
spl_t s;
register pmap_t my_pmap;
mp_disable_preemption();
my_cpu = cpu_number();
my_pmap = real_pmap[my_cpu];
if (!(my_pmap && pmap_in_use(my_pmap, my_cpu)))
my_pmap = kernel_pmap;
s = splhigh();
while (*(volatile hw_lock_t)&my_pmap->lock.interlock ||
*(volatile hw_lock_t)&kernel_pmap->lock.interlock) {
continue;
}
splx(s);
mp_enable_preemption();
}
void
pmap_flush_tlb_interrupt(void) {
pmap_wait_for_clear();
flush_tlb();
}
void
pmap_reload_tlb_interrupt(void) {
pmap_wait_for_clear();
set_cr3(kernel_pmap->pdirbase);
}
#endif
#if MACH_KDB
extern void db_show_page(vm_offset_t pa);
void
db_show_page(vm_offset_t pa)
{
pv_entry_t pv_h;
int pai;
char attr;
pai = pa_index(pa);
pv_h = pai_to_pvh(pai);
attr = pmap_phys_attributes[pai];
printf("phys page %x ", pa);
if (attr & PHYS_MODIFIED)
printf("modified, ");
if (attr & PHYS_REFERENCED)
printf("referenced, ");
if (pv_h->pmap || pv_h->next)
printf(" mapped at\n");
else
printf(" not mapped\n");
for (; pv_h; pv_h = pv_h->next)
if (pv_h->pmap)
printf("%x in pmap %x\n", pv_h->va, pv_h->pmap);
}
#endif
#if MACH_KDB
void db_kvtophys(vm_offset_t);
void db_show_vaddrs(pt_entry_t *);
void
db_kvtophys(
vm_offset_t vaddr)
{
db_printf("0x%x", kvtophys(vaddr));
}
void
db_show_vaddrs(
pt_entry_t *dirbase)
{
pt_entry_t *ptep, *pdep, tmp;
int x, y, pdecnt, ptecnt;
if (dirbase == 0) {
dirbase = kernel_pmap->dirbase;
}
if (dirbase == 0) {
db_printf("need a dirbase...\n");
return;
}
dirbase = (pt_entry_t *) ((unsigned long) dirbase & ~INTEL_OFFMASK);
db_printf("dirbase: 0x%x\n", dirbase);
pdecnt = ptecnt = 0;
pdep = &dirbase[0];
for (y = 0; y < NPDES; y++, pdep++) {
if (((tmp = *pdep) & INTEL_PTE_VALID) == 0) {
continue;
}
pdecnt++;
ptep = (pt_entry_t *) ((*pdep) & ~INTEL_OFFMASK);
db_printf("dir[%4d]: 0x%x\n", y, *pdep);
for (x = 0; x < NPTES; x++, ptep++) {
if (((tmp = *ptep) & INTEL_PTE_VALID) == 0) {
continue;
}
ptecnt++;
db_printf(" tab[%4d]: 0x%x, va=0x%x, pa=0x%x\n",
x,
*ptep,
(y << 22) | (x << 12),
*ptep & ~INTEL_OFFMASK);
}
}
db_printf("total: %d tables, %d page table entries.\n", pdecnt, ptecnt);
}
#endif
#include <mach_vm_debug.h>
#if MACH_VM_DEBUG
#include <vm/vm_debug.h>
int
pmap_list_resident_pages(
register pmap_t pmap,
register vm_offset_t *listp,
register int space)
{
return 0;
}
#endif
#ifdef MACH_BSD
void
pmap_movepage(unsigned long from, unsigned long to, vm_size_t size)
{
spl_t spl;
pt_entry_t *pte, saved_pte;
while (size > 0) {
PMAP_READ_LOCK(kernel_pmap, spl);
pte = pmap_pte(kernel_pmap, from);
if (pte == NULL)
panic("pmap_pagemove from pte NULL");
saved_pte = *pte;
PMAP_READ_UNLOCK(kernel_pmap, spl);
pmap_enter(kernel_pmap, to, i386_trunc_page(*pte),
VM_PROT_READ|VM_PROT_WRITE, 0, *pte & INTEL_PTE_WIRED);
pmap_remove(kernel_pmap, from, from+PAGE_SIZE);
PMAP_READ_LOCK(kernel_pmap, spl);
pte = pmap_pte(kernel_pmap, to);
if (pte == NULL)
panic("pmap_pagemove 'to' pte NULL");
*pte = saved_pte;
PMAP_READ_UNLOCK(kernel_pmap, spl);
from += PAGE_SIZE;
to += PAGE_SIZE;
size -= PAGE_SIZE;
}
PMAP_FLUSH_TLBS();
}
kern_return_t bmapvideo(vm_offset_t *info);
kern_return_t bmapvideo(vm_offset_t *info) {
extern struct vc_info vinfo;
#ifdef NOTIMPLEMENTED
(void)copyout((char *)&vinfo, (char *)info, sizeof(struct vc_info));
#endif
return KERN_SUCCESS;
}
kern_return_t bmapmap(vm_offset_t va, vm_offset_t pa, vm_size_t size, vm_prot_t prot, int attr);
kern_return_t bmapmap(vm_offset_t va, vm_offset_t pa, vm_size_t size, vm_prot_t prot, int attr) {
#ifdef NOTIMPLEMENTED
pmap_map_block(current_act()->task->map->pmap, va, pa, size, prot, attr);
#endif
return KERN_SUCCESS;
}
kern_return_t bmapmapr(vm_offset_t va);
kern_return_t bmapmapr(vm_offset_t va) {
#ifdef NOTIMPLEMENTED
mapping_remove(current_act()->task->map->pmap, va);
#endif
return KERN_SUCCESS;
}
#endif
boolean_t
coredumpok(vm_map_t map, vm_offset_t va)
{
pt_entry_t *ptep;
ptep = pmap_pte(map->pmap, va);
if (0 == ptep) return FALSE;
return ((*ptep & (INTEL_PTE_NCACHE|INTEL_PTE_WIRED)) != (INTEL_PTE_NCACHE|INTEL_PTE_WIRED));
}