#ifndef _VM_PMAP_H_
#define _VM_PMAP_H_
#include <mach/kern_return.h>
#include <mach/vm_param.h>
#include <mach/vm_types.h>
#include <mach/vm_attributes.h>
#include <mach/boolean.h>
#include <mach/vm_prot.h>
#ifdef KERNEL_PRIVATE
extern kern_return_t copypv(
addr64_t source,
addr64_t sink,
unsigned int size,
int which);
#define cppvPsnk 1
#define cppvPsnkb 31
#define cppvPsrc 2
#define cppvPsrcb 30
#define cppvFsnk 4
#define cppvFsnkb 29
#define cppvFsrc 8
#define cppvFsrcb 28
#define cppvNoModSnk 16
#define cppvNoModSnkb 27
#define cppvNoRefSrc 32
#define cppvNoRefSrcb 26
#define cppvKmap 64
#define cppvKmapb 25
extern boolean_t pmap_has_managed_page(ppnum_t first, ppnum_t last);
#ifdef MACH_KERNEL_PRIVATE
#include <mach_assert.h>
#include <machine/pmap.h>
extern void *pmap_steal_memory(vm_size_t size);
extern unsigned int pmap_free_pages(void);
extern void pmap_startup(
vm_offset_t *startp,
vm_offset_t *endp);
extern void pmap_init(void);
extern void mapping_adjust(void);
extern void mapping_free_prime(void);
#ifndef MACHINE_PAGES
extern boolean_t pmap_next_page(ppnum_t *pnum);
extern boolean_t pmap_next_page_hi(ppnum_t *pnum);
extern void pmap_virtual_space(
vm_offset_t *virtual_start,
vm_offset_t *virtual_end);
#endif
extern pmap_t pmap_create(
ledger_t ledger,
vm_map_size_t size,
boolean_t is_64bit);
#if __x86_64__
extern pmap_t pmap_create_options(
ledger_t ledger,
vm_map_size_t size,
int flags);
#endif
extern pmap_t (pmap_kernel)(void);
extern void pmap_reference(pmap_t pmap);
extern void pmap_destroy(pmap_t pmap);
extern void pmap_switch(pmap_t);
#if MACH_ASSERT
extern void pmap_set_process(pmap_t pmap,
int pid,
char *procname);
#endif
extern kern_return_t pmap_enter(
pmap_t pmap,
vm_map_offset_t v,
ppnum_t pn,
vm_prot_t prot,
vm_prot_t fault_type,
unsigned int flags,
boolean_t wired);
extern kern_return_t pmap_enter_options(
pmap_t pmap,
vm_map_offset_t v,
ppnum_t pn,
vm_prot_t prot,
vm_prot_t fault_type,
unsigned int flags,
boolean_t wired,
unsigned int options,
void *arg);
extern void pmap_remove_some_phys(
pmap_t pmap,
ppnum_t pn);
extern void pmap_lock_phys_page(
ppnum_t pn);
extern void pmap_unlock_phys_page(
ppnum_t pn);
extern void pmap_page_protect(
ppnum_t phys,
vm_prot_t prot);
extern void pmap_page_protect_options(
ppnum_t phys,
vm_prot_t prot,
unsigned int options,
void *arg);
extern void (pmap_zero_page)(
ppnum_t pn);
extern void (pmap_zero_part_page)(
ppnum_t pn,
vm_offset_t offset,
vm_size_t len);
extern void (pmap_copy_page)(
ppnum_t src,
ppnum_t dest);
extern void (pmap_copy_part_page)(
ppnum_t src,
vm_offset_t src_offset,
ppnum_t dst,
vm_offset_t dst_offset,
vm_size_t len);
extern void (pmap_copy_part_lpage)(
vm_offset_t src,
ppnum_t dst,
vm_offset_t dst_offset,
vm_size_t len);
extern void (pmap_copy_part_rpage)(
ppnum_t src,
vm_offset_t src_offset,
vm_offset_t dst,
vm_size_t len);
extern unsigned int (pmap_disconnect)(
ppnum_t phys);
extern unsigned int (pmap_disconnect_options)(
ppnum_t phys,
unsigned int options,
void *arg);
extern kern_return_t (pmap_attribute_cache_sync)(
ppnum_t pn,
vm_size_t size,
vm_machine_attribute_t attribute,
vm_machine_attribute_val_t* value);
extern unsigned int (pmap_cache_attributes)(
ppnum_t pn);
extern void pmap_set_cache_attributes(
ppnum_t,
unsigned int);
#if defined(__arm__) || defined(__arm64__)
extern boolean_t pmap_batch_set_cache_attributes(
ppnum_t,
unsigned int,
unsigned int,
unsigned int,
boolean_t,
unsigned int*);
#endif
extern void pmap_sync_page_data_phys(ppnum_t pa);
extern void pmap_sync_page_attributes_phys(ppnum_t pa);
extern boolean_t pmap_verify_free(ppnum_t pn);
extern int (pmap_compressed)(pmap_t pmap);
extern int (pmap_resident_count)(pmap_t pmap);
extern int (pmap_resident_max)(pmap_t pmap);
#ifdef CURRENTLY_UNUSED_AND_UNTESTED
extern void pmap_collect(pmap_t pmap);
#endif
extern void (pmap_copy)(
pmap_t dest,
pmap_t source,
vm_map_offset_t dest_va,
vm_map_size_t size,
vm_map_offset_t source_va);
extern kern_return_t (pmap_attribute)(
pmap_t pmap,
vm_map_offset_t va,
vm_map_size_t size,
vm_machine_attribute_t attribute,
vm_machine_attribute_val_t* value);
#ifndef PMAP_ACTIVATE_USER
#ifndef PMAP_ACTIVATE
#define PMAP_ACTIVATE_USER(thr, cpu)
#else
#define PMAP_ACTIVATE_USER(thr, cpu) { \
pmap_t pmap; \
\
pmap = (thr)->map->pmap; \
if (pmap != pmap_kernel()) \
PMAP_ACTIVATE(pmap, (thr), (cpu)); \
}
#endif
#endif
#ifndef PMAP_DEACTIVATE_USER
#ifndef PMAP_DEACTIVATE
#define PMAP_DEACTIVATE_USER(thr, cpu)
#else
#define PMAP_DEACTIVATE_USER(thr, cpu) { \
pmap_t pmap; \
\
pmap = (thr)->map->pmap; \
if ((pmap) != pmap_kernel()) \
PMAP_DEACTIVATE(pmap, (thr), (cpu)); \
}
#endif
#endif
#ifndef PMAP_ACTIVATE_KERNEL
#ifndef PMAP_ACTIVATE
#define PMAP_ACTIVATE_KERNEL(cpu)
#else
#define PMAP_ACTIVATE_KERNEL(cpu) \
PMAP_ACTIVATE(pmap_kernel(), THREAD_NULL, cpu)
#endif
#endif
#ifndef PMAP_DEACTIVATE_KERNEL
#ifndef PMAP_DEACTIVATE
#define PMAP_DEACTIVATE_KERNEL(cpu)
#else
#define PMAP_DEACTIVATE_KERNEL(cpu) \
PMAP_DEACTIVATE(pmap_kernel(), THREAD_NULL, cpu)
#endif
#endif
#ifndef PMAP_ENTER
#define PMAP_ENTER(pmap, virtual_address, page, protection, fault_type, \
flags, wired, result) \
MACRO_BEGIN \
pmap_t __pmap = (pmap); \
vm_page_t __page = (page); \
int __options = 0; \
vm_object_t __obj; \
\
PMAP_ENTER_CHECK(__pmap, __page) \
__obj = VM_PAGE_OBJECT(__page); \
if (__obj->internal) { \
__options |= PMAP_OPTIONS_INTERNAL; \
} \
if (__page->reusable || __obj->all_reusable) { \
__options |= PMAP_OPTIONS_REUSABLE; \
} \
result = pmap_enter_options(__pmap, \
(virtual_address), \
VM_PAGE_GET_PHYS_PAGE(__page), \
(protection), \
(fault_type), \
(flags), \
(wired), \
__options, \
NULL); \
MACRO_END
#endif
#ifndef PMAP_ENTER_OPTIONS
#define PMAP_ENTER_OPTIONS(pmap, virtual_address, page, protection, \
fault_type, flags, wired, options, result) \
MACRO_BEGIN \
pmap_t __pmap = (pmap); \
vm_page_t __page = (page); \
int __extra_options = 0; \
vm_object_t __obj; \
\
PMAP_ENTER_CHECK(__pmap, __page) \
__obj = VM_PAGE_OBJECT(__page); \
if (__obj->internal) { \
__extra_options |= PMAP_OPTIONS_INTERNAL; \
} \
if (__page->reusable || __obj->all_reusable) { \
__extra_options |= PMAP_OPTIONS_REUSABLE; \
} \
result = pmap_enter_options(__pmap, \
(virtual_address), \
VM_PAGE_GET_PHYS_PAGE(__page), \
(protection), \
(fault_type), \
(flags), \
(wired), \
(options) | __extra_options, \
NULL); \
MACRO_END
#endif
#ifndef PMAP_SET_CACHE_ATTR
#define PMAP_SET_CACHE_ATTR(mem, object, cache_attr, batch_pmap_op) \
MACRO_BEGIN \
if (!batch_pmap_op) { \
pmap_set_cache_attributes(VM_PAGE_GET_PHYS_PAGE(mem), cache_attr); \
object->set_cache_attr = TRUE; \
} \
MACRO_END
#endif
#ifndef PMAP_BATCH_SET_CACHE_ATTR
#if defined(__arm__) || defined(__arm64__)
#define PMAP_BATCH_SET_CACHE_ATTR(object, user_page_list, \
cache_attr, num_pages, batch_pmap_op) \
MACRO_BEGIN \
if ((batch_pmap_op)) { \
unsigned int __page_idx=0; \
unsigned int res=0; \
boolean_t batch=TRUE; \
while (__page_idx < (num_pages)) { \
if (!pmap_batch_set_cache_attributes( \
user_page_list[__page_idx].phys_addr, \
(cache_attr), \
(num_pages), \
(__page_idx), \
FALSE, \
(&res))) { \
batch = FALSE; \
break; \
} \
__page_idx++; \
} \
__page_idx=0; \
res=0; \
while (__page_idx < (num_pages)) { \
if (batch) \
(void)pmap_batch_set_cache_attributes( \
user_page_list[__page_idx].phys_addr, \
(cache_attr), \
(num_pages), \
(__page_idx), \
TRUE, \
(&res)); \
else \
pmap_set_cache_attributes( \
user_page_list[__page_idx].phys_addr, \
(cache_attr)); \
__page_idx++; \
} \
(object)->set_cache_attr = TRUE; \
} \
MACRO_END
#else
#define PMAP_BATCH_SET_CACHE_ATTR(object, user_page_list, \
cache_attr, num_pages, batch_pmap_op) \
MACRO_BEGIN \
if ((batch_pmap_op)) { \
unsigned int __page_idx=0; \
while (__page_idx < (num_pages)) { \
pmap_set_cache_attributes( \
user_page_list[__page_idx].phys_addr, \
(cache_attr)); \
__page_idx++; \
} \
(object)->set_cache_attr = TRUE; \
} \
MACRO_END
#endif
#endif
#define PMAP_ENTER_CHECK(pmap, page) \
{ \
if ((page)->error) { \
panic("VM page %p should not have an error\n", \
(page)); \
} \
}
struct pfc {
long pfc_cpus;
long pfc_invalid_global;
};
typedef struct pfc pmap_flush_context;
extern void pmap_clear_reference(ppnum_t pn);
extern boolean_t (pmap_is_referenced)(ppnum_t pn);
extern void pmap_set_modify(ppnum_t pn);
extern void pmap_clear_modify(ppnum_t pn);
extern boolean_t pmap_is_modified(ppnum_t pn);
extern unsigned int pmap_get_refmod(ppnum_t pn);
extern void pmap_clear_refmod(ppnum_t pn, unsigned int mask);
#define VM_MEM_MODIFIED 0x01
#define VM_MEM_REFERENCED 0x02
extern void pmap_clear_refmod_options(ppnum_t pn, unsigned int mask, unsigned int options, void *);
extern void pmap_flush_context_init(pmap_flush_context *);
extern void pmap_flush(pmap_flush_context *);
extern void pmap_protect(
pmap_t map,
vm_map_offset_t s,
vm_map_offset_t e,
vm_prot_t prot);
extern void pmap_protect_options(
pmap_t map,
vm_map_offset_t s,
vm_map_offset_t e,
vm_prot_t prot,
unsigned int options,
void *arg);
extern void (pmap_pageable)(
pmap_t pmap,
vm_map_offset_t start,
vm_map_offset_t end,
boolean_t pageable);
extern uint64_t pmap_nesting_size_min;
extern uint64_t pmap_nesting_size_max;
extern kern_return_t pmap_nest(pmap_t,
pmap_t,
addr64_t,
addr64_t,
uint64_t);
extern kern_return_t pmap_unnest(pmap_t,
addr64_t,
uint64_t);
#define PMAP_UNNEST_CLEAN 1
extern kern_return_t pmap_unnest_options(pmap_t,
addr64_t,
uint64_t,
unsigned int);
extern boolean_t pmap_adjust_unnest_parameters(pmap_t, vm_map_offset_t *, vm_map_offset_t *);
extern void pmap_advise_pagezero_range(pmap_t, uint64_t);
#endif
extern boolean_t pmap_is_noencrypt(ppnum_t);
extern void pmap_set_noencrypt(ppnum_t pn);
extern void pmap_clear_noencrypt(ppnum_t pn);
extern pmap_t kernel_pmap;
#define pmap_kernel() (kernel_pmap)
#define VM_MEM_GUARDED 0x1
#define VM_MEM_COHERENT 0x2
#define VM_MEM_NOT_CACHEABLE 0x4
#define VM_MEM_WRITE_THROUGH 0x8
#define VM_WIMG_USE_DEFAULT 0x80
#define VM_WIMG_MASK 0xFF
#define VM_MEM_SUPERPAGE 0x100
#define VM_MEM_STACK 0x200
#if __x86_64__
#define PMAP_CREATE_64BIT 0x1
#define PMAP_CREATE_EPT 0x2
#define PMAP_CREATE_KNOWN_FLAGS (PMAP_CREATE_64BIT | PMAP_CREATE_EPT)
#endif
#define PMAP_OPTIONS_NOWAIT 0x1
#define PMAP_OPTIONS_NOENTER 0x2
#define PMAP_OPTIONS_COMPRESSOR 0x4
#define PMAP_OPTIONS_INTERNAL 0x8
#define PMAP_OPTIONS_REUSABLE 0x10
#define PMAP_OPTIONS_NOFLUSH 0x20
#define PMAP_OPTIONS_NOREFMOD 0x40
#define PMAP_OPTIONS_ALT_ACCT 0x80
#define PMAP_OPTIONS_REMOVE 0x100
#define PMAP_OPTIONS_SET_REUSABLE 0x200
#define PMAP_OPTIONS_CLEAR_REUSABLE 0x400
#define PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED 0x800
#define PMAP_OPTIONS_PROTECT_IMMEDIATE 0x1000
#if !defined(__LP64__)
extern vm_offset_t pmap_extract(pmap_t pmap,
vm_map_offset_t va);
#endif
extern void pmap_change_wiring(
pmap_t pmap,
vm_map_offset_t va,
boolean_t wired);
extern void pmap_remove(
pmap_t map,
vm_map_offset_t s,
vm_map_offset_t e);
extern void pmap_remove_options(
pmap_t map,
vm_map_offset_t s,
vm_map_offset_t e,
int options);
extern void fillPage(ppnum_t pa, unsigned int fill);
extern void pmap_map_sharedpage(task_t task, pmap_t pmap);
extern void pmap_unmap_sharedpage(pmap_t pmap);
#if defined(__LP64__)
void pmap_pre_expand(pmap_t pmap, vm_map_offset_t vaddr);
#endif
mach_vm_size_t pmap_query_resident(pmap_t pmap,
vm_map_offset_t s,
vm_map_offset_t e,
mach_vm_size_t *compressed_bytes_p);
extern void pmap_set_jit_entitled(pmap_t pmap);
bool pmap_has_prot_policy(vm_prot_t prot);
void pmap_release_pages_fast(void);
#define PMAP_QUERY_PAGE_PRESENT 0x01
#define PMAP_QUERY_PAGE_REUSABLE 0x02
#define PMAP_QUERY_PAGE_INTERNAL 0x04
#define PMAP_QUERY_PAGE_ALTACCT 0x08
#define PMAP_QUERY_PAGE_COMPRESSED 0x10
#define PMAP_QUERY_PAGE_COMPRESSED_ALTACCT 0x20
extern kern_return_t pmap_query_page_info(
pmap_t pmap,
vm_map_offset_t va,
int *disp);
#if CONFIG_PGTRACE
int pmap_pgtrace_add_page(pmap_t pmap, vm_map_offset_t start, vm_map_offset_t end);
int pmap_pgtrace_delete_page(pmap_t pmap, vm_map_offset_t start, vm_map_offset_t end);
kern_return_t pmap_pgtrace_fault(pmap_t pmap, vm_map_offset_t va, arm_saved_state_t *ss);
#endif
#endif
#endif