#ifndef _PMAP_MACHINE_
#define _PMAP_MACHINE_ 1
#ifndef ASSEMBLER
#include <platforms.h>
#include <mp_v1_1.h>
#include <mach/kern_return.h>
#include <mach/machine/vm_types.h>
#include <mach/vm_prot.h>
#include <mach/vm_statistics.h>
#include <mach/machine/vm_param.h>
#include <kern/kern_types.h>
#include <kern/thread_act.h>
#include <kern/lock.h>
#define INTEL_PGBYTES I386_PGBYTES
#define INTEL_PGSHIFT I386_PGSHIFT
#define intel_btop(x) i386_btop(x)
#define intel_ptob(x) i386_ptob(x)
#define intel_round_page(x) i386_round_page(x)
#define intel_trunc_page(x) i386_trunc_page(x)
#define trunc_intel_to_vm(x) trunc_i386_to_vm(x)
#define round_intel_to_vm(x) round_i386_to_vm(x)
#define vm_to_intel(x) vm_to_i386(x)
typedef unsigned int pt_entry_t;
#define PT_ENTRY_NULL ((pt_entry_t *) 0)
#endif
#define INTEL_OFFMASK 0xfff
#define PDESHIFT 22
#define PDEMASK 0x3ff
#define PTESHIFT 12
#define PTEMASK 0x3ff
#define VM_WIMG_DEFAULT VM_MEM_COHERENT
#define kvtolinear(a) ((a)+LINEAR_KERNEL_ADDRESS)
#define pdenum(pmap, a) (((((pmap) == kernel_pmap) ? \
kvtolinear(a) : (a)) \
>> PDESHIFT) & PDEMASK)
#define pdetova(a) ((vm_offset_t)(a) << PDESHIFT)
#define ptenum(a) (((a) >> PTESHIFT) & PTEMASK)
#define NPTES (intel_ptob(1)/sizeof(pt_entry_t))
#define NPDES (intel_ptob(1)/sizeof(pt_entry_t))
#define INTEL_PTE_VALID 0x00000001
#define INTEL_PTE_WRITE 0x00000002
#define INTEL_PTE_USER 0x00000004
#define INTEL_PTE_WTHRU 0x00000008
#define INTEL_PTE_NCACHE 0x00000010
#define INTEL_PTE_REF 0x00000020
#define INTEL_PTE_MOD 0x00000040
#define INTEL_PTE_WIRED 0x00000200
#define INTEL_PTE_PFN 0xfffff000
#define pa_to_pte(a) ((a) & INTEL_PTE_PFN)
#define pte_to_pa(p) ((p) & INTEL_PTE_PFN)
#define pte_increment_pa(p) ((p) += INTEL_OFFMASK+1)
#define PMAP_DEFAULT_CACHE 0
#define PMAP_INHIBIT_CACHE 1
#define PMAP_GUARDED_CACHE 2
#define PMAP_ACTIVATE_CACHE 4
#define PMAP_NO_GUARD_CACHE 8
#define ptetokv(a) (phystokv(pte_to_pa(a)))
#ifndef ASSEMBLER
typedef volatile long cpu_set;
struct pmap {
pt_entry_t *dirbase;
vm_offset_t pdirbase;
int ref_count;
decl_simple_lock_data(,lock)
struct pmap_statistics stats;
cpu_set cpus_using;
};
extern struct pmap *real_pmap[NCPUS];
#include <i386/proc_reg.h>
#if NCPUS > 1
#define PMAP_CPU_SET(pmap, my_cpu) i_bit_set(my_cpu, &((pmap)->cpus_using))
#define PMAP_CPU_CLR(pmap, my_cpu) i_bit_clear(my_cpu, &((pmap)->cpus_using))
#else
#define PMAP_CPU_SET(pmap,my_cpu) (pmap)->cpus_using = TRUE
#define PMAP_CPU_CLR(pmap,my_cpu) (pmap)->cpus_using = FALSE
#endif
#define set_dirbase(mypmap, my_cpu) { \
struct pmap **ppmap = &real_pmap[my_cpu]; \
vm_offset_t pdirbase = (mypmap)->pdirbase; \
\
if (*ppmap == (vm_offset_t)NULL) { \
*ppmap = (mypmap); \
PMAP_CPU_SET((mypmap), my_cpu); \
set_cr3(pdirbase); \
} else if ((mypmap) != kernel_pmap && (mypmap) != *ppmap ) { \
if (*ppmap != kernel_pmap) \
PMAP_CPU_CLR(*ppmap, my_cpu); \
*ppmap = (mypmap); \
PMAP_CPU_SET((mypmap), my_cpu); \
set_cr3(pdirbase); \
} \
assert((mypmap) == *ppmap || (mypmap) == kernel_pmap); \
}
#if NCPUS > 1
extern cpu_set cpus_active;
extern cpu_set cpus_idle;
extern void process_pmap_updates(struct pmap *pmap);
extern void pmap_update_interrupt(void);
#endif
extern vm_offset_t (phystokv)(
vm_offset_t pa);
extern vm_offset_t (kvtophys)(
vm_offset_t addr);
extern pt_entry_t *pmap_pte(
struct pmap *pmap,
vm_offset_t addr);
extern vm_offset_t pmap_map(
vm_offset_t virt,
vm_offset_t start,
vm_offset_t end,
vm_prot_t prot);
extern vm_offset_t pmap_map_bd(
vm_offset_t virt,
vm_offset_t start,
vm_offset_t end,
vm_prot_t prot);
extern void pmap_bootstrap(
vm_offset_t load_start);
extern boolean_t pmap_valid_page(
vm_offset_t pa);
extern int pmap_list_resident_pages(
struct pmap *pmap,
vm_offset_t *listp,
int space);
extern void flush_tlb(void);
extern void invalidate_icache(vm_offset_t addr, unsigned cnt, int phys);
extern void flush_dcache(vm_offset_t addr, unsigned count, int phys);
#if NCPUS > 1
#include <kern/spl.h>
#define PMAP_ACTIVATE_KERNEL(my_cpu) { \
\
\
i_bit_clear((my_cpu), &cpus_active); \
\
\
simple_lock(&kernel_pmap->lock); \
\
\
i_bit_set((my_cpu), &kernel_pmap->cpus_using); \
\
\
i_bit_set((my_cpu), &cpus_active); \
\
simple_unlock(&kernel_pmap->lock); \
}
#define PMAP_DEACTIVATE_KERNEL(my_cpu) { \
\
i_bit_clear((my_cpu), &kernel_pmap->cpus_using); \
}
#define PMAP_ACTIVATE_MAP(map, my_cpu) { \
register struct pmap *tpmap; \
\
tpmap = vm_map_pmap(map); \
if (tpmap == kernel_pmap) { \
\
set_dirbase(kernel_pmap, my_cpu); \
} \
else { \
\
i_bit_clear((my_cpu), &cpus_active); \
\
\
simple_lock(&tpmap->lock); \
\
\
set_dirbase(tpmap, my_cpu); \
\
\
i_bit_set((my_cpu), &cpus_active); \
\
simple_unlock(&tpmap->lock); \
} \
}
#define PMAP_DEACTIVATE_MAP(map, my_cpu)
#define PMAP_ACTIVATE_USER(th, my_cpu) { \
spl_t spl; \
\
spl = splhigh(); \
PMAP_ACTIVATE_MAP(th->map, my_cpu) \
splx(spl); \
}
#define PMAP_DEACTIVATE_USER(th, my_cpu) { \
spl_t spl; \
\
spl = splhigh(); \
PMAP_DEACTIVATE_MAP(th->map, my_cpu) \
splx(spl); \
}
#define PMAP_SWITCH_CONTEXT(old_th, new_th, my_cpu) { \
spl_t spl; \
\
if (old_th->map != new_th->map) { \
spl = splhigh(); \
PMAP_DEACTIVATE_MAP(old_th->map, my_cpu); \
PMAP_ACTIVATE_MAP(new_th->map, my_cpu); \
splx(spl); \
} \
}
#define PMAP_SWITCH_USER(th, new_map, my_cpu) { \
spl_t spl; \
\
spl = splhigh(); \
PMAP_DEACTIVATE_MAP(th->map, my_cpu); \
th->map = new_map; \
PMAP_ACTIVATE_MAP(th->map, my_cpu); \
splx(spl); \
}
#if MP_V1_1
#define set_led(cpu)
#define clear_led(cpu)
#endif
#define MARK_CPU_IDLE(my_cpu) { \
\
int s = splhigh(); \
i_bit_set((my_cpu), &cpus_idle); \
i_bit_clear((my_cpu), &cpus_active); \
splx(s); \
set_led(my_cpu); \
}
#define MARK_CPU_ACTIVE(my_cpu) { \
\
int s = splhigh(); \
\
i_bit_clear((my_cpu), &cpus_idle); \
\
\
i_bit_set((my_cpu), &cpus_active); \
splx(s); \
clear_led(my_cpu); \
}
#else
#define PMAP_ACTIVATE_KERNEL(my_cpu) { \
kernel_pmap->cpus_using = TRUE; \
}
#define PMAP_DEACTIVATE_KERNEL(my_cpu) { \
kernel_pmap->cpus_using = FALSE; \
}
#define PMAP_ACTIVATE_MAP(map, my_cpu) \
set_dirbase(vm_map_pmap(map), my_cpu)
#define PMAP_DEACTIVATE_MAP(map, my_cpu)
#define PMAP_ACTIVATE_USER(th, my_cpu) \
PMAP_ACTIVATE_MAP(th->map, my_cpu)
#define PMAP_DEACTIVATE_USER(th, my_cpu) \
PMAP_DEACTIVATE_MAP(th->map, my_cpu)
#define PMAP_SWITCH_CONTEXT(old_th, new_th, my_cpu) { \
if (old_th->map != new_th->map) { \
PMAP_DEACTIVATE_MAP(old_th->map, my_cpu); \
PMAP_ACTIVATE_MAP(new_th->map, my_cpu); \
} \
}
#define PMAP_SWITCH_USER(th, new_map, my_cpu) { \
PMAP_DEACTIVATE_MAP(th->map, my_cpu); \
th->map = new_map; \
PMAP_ACTIVATE_MAP(th->map, my_cpu); \
}
#endif
#define PMAP_CONTEXT(pmap, thread)
#define pmap_kernel_va(VA) \
(((VA) >= VM_MIN_KERNEL_ADDRESS) && ((VA) <= VM_MAX_KERNEL_ADDRESS))
#define pmap_resident_count(pmap) ((pmap)->stats.resident_count)
#define pmap_phys_address(frame) ((vm_offset_t) (intel_ptob(frame)))
#define pmap_phys_to_frame(phys) ((int) (intel_btop(phys)))
#define pmap_copy(dst_pmap,src_pmap,dst_addr,len,src_addr)
#define pmap_attribute(pmap,addr,size,attr,value) \
(KERN_INVALID_ADDRESS)
#define pmap_attribute_cache_sync(addr,size,attr,value) \
(KERN_INVALID_ADDRESS)
#define pmap_sync_caches_phys(pa) \
(KERN_INVALID_ADDRESS)
#endif
#endif