#include <task_swapper.h>
#include <mach_assert.h>
#include <libkern/OSAtomic.h>
#include <mach/kern_return.h>
#include <mach/port.h>
#include <mach/vm_attributes.h>
#include <mach/vm_param.h>
#include <mach/vm_behavior.h>
#include <mach/vm_statistics.h>
#include <mach/memory_object.h>
#include <mach/mach_vm.h>
#include <machine/cpu_capabilities.h>
#include <mach/sdt.h>
#include <kern/assert.h>
#include <kern/counters.h>
#include <kern/kalloc.h>
#include <kern/zalloc.h>
#include <vm/cpm.h>
#include <vm/vm_init.h>
#include <vm/vm_fault.h>
#include <vm/vm_map.h>
#include <vm/vm_object.h>
#include <vm/vm_page.h>
#include <vm/vm_pageout.h>
#include <vm/vm_kern.h>
#include <ipc/ipc_port.h>
#include <kern/sched_prim.h>
#include <kern/misc_protos.h>
#include <machine/db_machdep.h>
#include <kern/xpr.h>
#include <mach/vm_map_server.h>
#include <mach/mach_host_server.h>
#include <vm/vm_protos.h>
#include <vm/vm_purgeable_internal.h>
#include <vm/vm_protos.h>
#include <vm/vm_shared_region.h>
#include <vm/vm_map_store.h>
static void vm_map_simplify_range(
vm_map_t map,
vm_map_offset_t start,
vm_map_offset_t end);
static boolean_t vm_map_range_check(
vm_map_t map,
vm_map_offset_t start,
vm_map_offset_t end,
vm_map_entry_t *entry);
static vm_map_entry_t _vm_map_entry_create(
struct vm_map_header *map_header);
static void _vm_map_entry_dispose(
struct vm_map_header *map_header,
vm_map_entry_t entry);
static void vm_map_pmap_enter(
vm_map_t map,
vm_map_offset_t addr,
vm_map_offset_t end_addr,
vm_object_t object,
vm_object_offset_t offset,
vm_prot_t protection);
static void _vm_map_clip_end(
struct vm_map_header *map_header,
vm_map_entry_t entry,
vm_map_offset_t end);
static void _vm_map_clip_start(
struct vm_map_header *map_header,
vm_map_entry_t entry,
vm_map_offset_t start);
static void vm_map_entry_delete(
vm_map_t map,
vm_map_entry_t entry);
static kern_return_t vm_map_delete(
vm_map_t map,
vm_map_offset_t start,
vm_map_offset_t end,
int flags,
vm_map_t zap_map);
static kern_return_t vm_map_copy_overwrite_unaligned(
vm_map_t dst_map,
vm_map_entry_t entry,
vm_map_copy_t copy,
vm_map_address_t start);
static kern_return_t vm_map_copy_overwrite_aligned(
vm_map_t dst_map,
vm_map_entry_t tmp_entry,
vm_map_copy_t copy,
vm_map_offset_t start,
pmap_t pmap);
static kern_return_t vm_map_copyin_kernel_buffer(
vm_map_t src_map,
vm_map_address_t src_addr,
vm_map_size_t len,
boolean_t src_destroy,
vm_map_copy_t *copy_result);
static kern_return_t vm_map_copyout_kernel_buffer(
vm_map_t map,
vm_map_address_t *addr,
vm_map_copy_t copy,
boolean_t overwrite);
static void vm_map_fork_share(
vm_map_t old_map,
vm_map_entry_t old_entry,
vm_map_t new_map);
static boolean_t vm_map_fork_copy(
vm_map_t old_map,
vm_map_entry_t *old_entry_p,
vm_map_t new_map);
void vm_map_region_top_walk(
vm_map_entry_t entry,
vm_region_top_info_t top);
void vm_map_region_walk(
vm_map_t map,
vm_map_offset_t va,
vm_map_entry_t entry,
vm_object_offset_t offset,
vm_object_size_t range,
vm_region_extended_info_t extended,
boolean_t look_for_pages);
static kern_return_t vm_map_wire_nested(
vm_map_t map,
vm_map_offset_t start,
vm_map_offset_t end,
vm_prot_t access_type,
boolean_t user_wire,
pmap_t map_pmap,
vm_map_offset_t pmap_addr);
static kern_return_t vm_map_unwire_nested(
vm_map_t map,
vm_map_offset_t start,
vm_map_offset_t end,
boolean_t user_wire,
pmap_t map_pmap,
vm_map_offset_t pmap_addr);
static kern_return_t vm_map_overwrite_submap_recurse(
vm_map_t dst_map,
vm_map_offset_t dst_addr,
vm_map_size_t dst_size);
static kern_return_t vm_map_copy_overwrite_nested(
vm_map_t dst_map,
vm_map_offset_t dst_addr,
vm_map_copy_t copy,
boolean_t interruptible,
pmap_t pmap,
boolean_t discard_on_success);
static kern_return_t vm_map_remap_extract(
vm_map_t map,
vm_map_offset_t addr,
vm_map_size_t size,
boolean_t copy,
struct vm_map_header *map_header,
vm_prot_t *cur_protection,
vm_prot_t *max_protection,
vm_inherit_t inheritance,
boolean_t pageable);
static kern_return_t vm_map_remap_range_allocate(
vm_map_t map,
vm_map_address_t *address,
vm_map_size_t size,
vm_map_offset_t mask,
int flags,
vm_map_entry_t *map_entry);
static void vm_map_region_look_for_page(
vm_map_t map,
vm_map_offset_t va,
vm_object_t object,
vm_object_offset_t offset,
int max_refcnt,
int depth,
vm_region_extended_info_t extended);
static int vm_map_region_count_obj_refs(
vm_map_entry_t entry,
vm_object_t object);
static kern_return_t vm_map_willneed(
vm_map_t map,
vm_map_offset_t start,
vm_map_offset_t end);
static kern_return_t vm_map_reuse_pages(
vm_map_t map,
vm_map_offset_t start,
vm_map_offset_t end);
static kern_return_t vm_map_reusable_pages(
vm_map_t map,
vm_map_offset_t start,
vm_map_offset_t end);
static kern_return_t vm_map_can_reuse(
vm_map_t map,
vm_map_offset_t start,
vm_map_offset_t end);
#if CONFIG_FREEZE
struct default_freezer_table;
__private_extern__ void* default_freezer_mapping_create(vm_object_t, vm_offset_t);
__private_extern__ void default_freezer_mapping_free(void**, boolean_t all);
#endif
#define vm_map_entry_copy(NEW,OLD) \
MACRO_BEGIN \
*(NEW) = *(OLD); \
(NEW)->is_shared = FALSE; \
(NEW)->needs_wakeup = FALSE; \
(NEW)->in_transition = FALSE; \
(NEW)->wired_count = 0; \
(NEW)->user_wired_count = 0; \
(NEW)->permanent = FALSE; \
MACRO_END
#define vm_map_entry_copy_full(NEW,OLD) (*(NEW) = *(OLD))
extern int allow_data_exec, allow_stack_exec;
int
override_nx(vm_map_t map, uint32_t user_tag)
{
int current_abi;
if (vm_map_is_64bit(map))
current_abi = VM_ABI_64;
else
current_abi = VM_ABI_32;
if (user_tag == VM_MEMORY_STACK)
return allow_stack_exec & current_abi;
return (allow_data_exec & current_abi) && (map->map_disallow_data_exec == FALSE);
}
static zone_t vm_map_zone;
static zone_t vm_map_entry_zone;
static zone_t vm_map_kentry_zone;
static zone_t vm_map_copy_zone;
vm_object_t vm_submap_object;
static void *map_data;
static vm_size_t map_data_size;
static void *kentry_data;
static vm_size_t kentry_data_size;
static int kentry_count = 2048;
#if CONFIG_EMBEDDED
#define NO_COALESCE_LIMIT 0
#else
#define NO_COALESCE_LIMIT ((1024 * 128) - 1)
#endif
unsigned int not_in_kdp = 1;
unsigned int vm_map_set_cache_attr_count = 0;
kern_return_t
vm_map_set_cache_attr(
vm_map_t map,
vm_map_offset_t va)
{
vm_map_entry_t map_entry;
vm_object_t object;
kern_return_t kr = KERN_SUCCESS;
vm_map_lock_read(map);
if (!vm_map_lookup_entry(map, va, &map_entry) ||
map_entry->is_sub_map) {
kr = KERN_INVALID_ARGUMENT;
goto done;
}
object = map_entry->object.vm_object;
if (object == VM_OBJECT_NULL) {
kr = KERN_INVALID_ARGUMENT;
goto done;
}
vm_object_lock(object);
object->set_cache_attr = TRUE;
vm_object_unlock(object);
vm_map_set_cache_attr_count++;
done:
vm_map_unlock_read(map);
return kr;
}
#if CONFIG_CODE_DECRYPTION
kern_return_t
vm_map_apple_protected(
vm_map_t map,
vm_map_offset_t start,
vm_map_offset_t end,
struct pager_crypt_info *crypt_info)
{
boolean_t map_locked;
kern_return_t kr;
vm_map_entry_t map_entry;
memory_object_t protected_mem_obj;
vm_object_t protected_object;
vm_map_offset_t map_addr;
vm_map_lock_read(map);
map_locked = TRUE;
if (!vm_map_lookup_entry(map,
start,
&map_entry) ||
map_entry->vme_end < end ||
map_entry->is_sub_map) {
kr = KERN_INVALID_ARGUMENT;
goto done;
}
protected_object = map_entry->object.vm_object;
if (protected_object == VM_OBJECT_NULL) {
kr = KERN_INVALID_ARGUMENT;
goto done;
}
vm_object_reference(protected_object);
vm_map_unlock_read(map);
map_locked = FALSE;
protected_mem_obj = apple_protect_pager_setup(protected_object, crypt_info);
vm_object_deallocate(protected_object);
if (protected_mem_obj == NULL) {
kr = KERN_FAILURE;
goto done;
}
map_addr = start;
kr = vm_map_enter_mem_object(map,
&map_addr,
end - start,
(mach_vm_offset_t) 0,
VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
(ipc_port_t) protected_mem_obj,
(map_entry->offset +
(start - map_entry->vme_start)),
TRUE,
map_entry->protection,
map_entry->max_protection,
map_entry->inheritance);
assert(map_addr == start);
memory_object_deallocate(protected_mem_obj);
done:
if (map_locked) {
vm_map_unlock_read(map);
}
return kr;
}
#endif
lck_grp_t vm_map_lck_grp;
lck_grp_attr_t vm_map_lck_grp_attr;
lck_attr_t vm_map_lck_attr;
void
vm_map_init(
void)
{
vm_map_zone = zinit((vm_map_size_t) sizeof(struct _vm_map), 40*1024,
PAGE_SIZE, "maps");
zone_change(vm_map_zone, Z_NOENCRYPT, TRUE);
vm_map_entry_zone = zinit((vm_map_size_t) sizeof(struct vm_map_entry),
1024*1024, PAGE_SIZE*5,
"non-kernel map entries");
zone_change(vm_map_entry_zone, Z_NOENCRYPT, TRUE);
vm_map_kentry_zone = zinit((vm_map_size_t) sizeof(struct vm_map_entry),
kentry_data_size, kentry_data_size,
"kernel map entries");
zone_change(vm_map_kentry_zone, Z_NOENCRYPT, TRUE);
vm_map_copy_zone = zinit((vm_map_size_t) sizeof(struct vm_map_copy),
16*1024, PAGE_SIZE, "map copies");
zone_change(vm_map_copy_zone, Z_NOENCRYPT, TRUE);
zone_change(vm_map_zone, Z_COLLECT, FALSE);
zone_change(vm_map_kentry_zone, Z_COLLECT, FALSE);
zone_change(vm_map_kentry_zone, Z_EXPAND, FALSE);
zone_change(vm_map_kentry_zone, Z_FOREIGN, TRUE);
zone_change(vm_map_kentry_zone, Z_CALLERACCT, FALSE);
zone_change(vm_map_copy_zone, Z_CALLERACCT, FALSE);
zcram(vm_map_zone, map_data, map_data_size);
zcram(vm_map_kentry_zone, kentry_data, kentry_data_size);
lck_grp_attr_setdefault(&vm_map_lck_grp_attr);
lck_grp_init(&vm_map_lck_grp, "vm_map", &vm_map_lck_grp_attr);
lck_attr_setdefault(&vm_map_lck_attr);
}
void
vm_map_steal_memory(
void)
{
map_data_size = round_page(10 * sizeof(struct _vm_map));
map_data = pmap_steal_memory(map_data_size);
#if 0
#endif
kentry_count = pmap_free_pages() / 8;
kentry_data_size =
round_page(kentry_count * sizeof(struct vm_map_entry));
kentry_data = pmap_steal_memory(kentry_data_size);
}
vm_map_t
vm_map_create(
pmap_t pmap,
vm_map_offset_t min,
vm_map_offset_t max,
boolean_t pageable)
{
static int color_seed = 0;
register vm_map_t result;
result = (vm_map_t) zalloc(vm_map_zone);
if (result == VM_MAP_NULL)
panic("vm_map_create");
vm_map_first_entry(result) = vm_map_to_entry(result);
vm_map_last_entry(result) = vm_map_to_entry(result);
result->hdr.nentries = 0;
result->hdr.entries_pageable = pageable;
vm_map_store_init( &(result->hdr) );
result->size = 0;
result->user_wire_limit = MACH_VM_MAX_ADDRESS;
result->user_wire_size = 0;
result->ref_count = 1;
#if TASK_SWAPPER
result->res_count = 1;
result->sw_state = MAP_SW_IN;
#endif
result->pmap = pmap;
result->min_offset = min;
result->max_offset = max;
result->wiring_required = FALSE;
result->no_zero_fill = FALSE;
result->mapped = FALSE;
result->wait_for_space = FALSE;
result->switch_protect = FALSE;
result->disable_vmentry_reuse = FALSE;
result->map_disallow_data_exec = FALSE;
result->highest_entry_end = 0;
result->first_free = vm_map_to_entry(result);
result->hint = vm_map_to_entry(result);
result->color_rr = (color_seed++) & vm_color_mask;
result->jit_entry_exists = FALSE;
#if CONFIG_FREEZE
result->default_freezer_toc = NULL;
#endif
vm_map_lock_init(result);
lck_mtx_init_ext(&result->s_lock, &result->s_lock_ext, &vm_map_lck_grp, &vm_map_lck_attr);
return(result);
}
#define vm_map_entry_create(map) \
_vm_map_entry_create(&(map)->hdr)
#define vm_map_copy_entry_create(copy) \
_vm_map_entry_create(&(copy)->cpy_hdr)
static vm_map_entry_t
_vm_map_entry_create(
register struct vm_map_header *map_header)
{
register zone_t zone;
register vm_map_entry_t entry;
if (map_header->entries_pageable)
zone = vm_map_entry_zone;
else
zone = vm_map_kentry_zone;
entry = (vm_map_entry_t) zalloc(zone);
if (entry == VM_MAP_ENTRY_NULL)
panic("vm_map_entry_create");
vm_map_store_update( (vm_map_t) NULL, entry, VM_MAP_ENTRY_CREATE);
return(entry);
}
#define vm_map_entry_dispose(map, entry) \
vm_map_store_update( map, entry, VM_MAP_ENTRY_DELETE); \
_vm_map_entry_dispose(&(map)->hdr, (entry))
#define vm_map_copy_entry_dispose(map, entry) \
_vm_map_entry_dispose(&(copy)->cpy_hdr, (entry))
static void
_vm_map_entry_dispose(
register struct vm_map_header *map_header,
register vm_map_entry_t entry)
{
register zone_t zone;
if (map_header->entries_pageable)
zone = vm_map_entry_zone;
else
zone = vm_map_kentry_zone;
zfree(zone, entry);
}
#if MACH_ASSERT
static boolean_t first_free_check = FALSE;
boolean_t
first_free_is_valid(
vm_map_t map)
{
if (!first_free_check)
return TRUE;
return( first_free_is_valid_store( map ));
}
#endif
#define vm_map_copy_entry_link(copy, after_where, entry) \
_vm_map_store_entry_link(&(copy)->cpy_hdr, after_where, (entry))
#define vm_map_copy_entry_unlink(copy, entry) \
_vm_map_store_entry_unlink(&(copy)->cpy_hdr, (entry))
#if MACH_ASSERT && TASK_SWAPPER
void vm_map_res_reference(register vm_map_t map)
{
assert(map->res_count >= 0);
assert(map->ref_count >= map->res_count);
if (map->res_count == 0) {
lck_mtx_unlock(&map->s_lock);
vm_map_lock(map);
vm_map_swapin(map);
lck_mtx_lock(&map->s_lock);
++map->res_count;
vm_map_unlock(map);
} else
++map->res_count;
}
void vm_map_reference_swap(register vm_map_t map)
{
assert(map != VM_MAP_NULL);
lck_mtx_lock(&map->s_lock);
assert(map->res_count >= 0);
assert(map->ref_count >= map->res_count);
map->ref_count++;
vm_map_res_reference(map);
lck_mtx_unlock(&map->s_lock);
}
void vm_map_res_deallocate(register vm_map_t map)
{
assert(map->res_count > 0);
if (--map->res_count == 0) {
lck_mtx_unlock(&map->s_lock);
vm_map_lock(map);
vm_map_swapout(map);
vm_map_unlock(map);
lck_mtx_lock(&map->s_lock);
}
assert(map->ref_count >= map->res_count);
}
#endif
void
vm_map_destroy(
vm_map_t map,
int flags)
{
vm_map_lock(map);
(void) vm_map_delete(map, map->min_offset, map->max_offset,
flags, VM_MAP_NULL);
(void) vm_map_delete(map, 0x0, 0xFFFFFFFFFFFFF000ULL,
flags, VM_MAP_NULL);
#if CONFIG_FREEZE
if (map->default_freezer_toc){
default_freezer_mapping_free( &(map->default_freezer_toc), TRUE);
}
#endif
vm_map_unlock(map);
assert(map->hdr.nentries == 0);
if(map->pmap)
pmap_destroy(map->pmap);
zfree(vm_map_zone, map);
}
#if TASK_SWAPPER
int vm_map_swap_enable = 1;
void vm_map_swapin (vm_map_t map)
{
register vm_map_entry_t entry;
if (!vm_map_swap_enable)
return;
if (map->sw_state == MAP_SW_IN)
return;
assert(map->res_count == 0);
assert(map->sw_state == MAP_SW_OUT);
entry = vm_map_first_entry(map);
while (entry != vm_map_to_entry(map)) {
if (entry->object.vm_object != VM_OBJECT_NULL) {
if (entry->is_sub_map) {
vm_map_t lmap = entry->object.sub_map;
lck_mtx_lock(&lmap->s_lock);
vm_map_res_reference(lmap);
lck_mtx_unlock(&lmap->s_lock);
} else {
vm_object_t object = entry->object.vm_object;
vm_object_lock(object);
vm_object_res_reference(object);
vm_object_unlock(object);
}
}
entry = entry->vme_next;
}
assert(map->sw_state == MAP_SW_OUT);
map->sw_state = MAP_SW_IN;
}
void vm_map_swapout(vm_map_t map)
{
register vm_map_entry_t entry;
lck_mtx_lock(&map->s_lock);
if (map->res_count != 0) {
lck_mtx_unlock(&map->s_lock);
return;
}
lck_mtx_unlock(&map->s_lock);
assert(map->sw_state == MAP_SW_IN);
if (!vm_map_swap_enable)
return;
entry = vm_map_first_entry(map);
while (entry != vm_map_to_entry(map)) {
if (entry->object.vm_object != VM_OBJECT_NULL) {
if (entry->is_sub_map) {
vm_map_t lmap = entry->object.sub_map;
lck_mtx_lock(&lmap->s_lock);
vm_map_res_deallocate(lmap);
lck_mtx_unlock(&lmap->s_lock);
} else {
vm_object_t object = entry->object.vm_object;
vm_object_lock(object);
vm_object_res_deallocate(object);
vm_object_unlock(object);
}
}
entry = entry->vme_next;
}
assert(map->sw_state == MAP_SW_IN);
map->sw_state = MAP_SW_OUT;
}
#endif
boolean_t
vm_map_lookup_entry(
register vm_map_t map,
register vm_map_offset_t address,
vm_map_entry_t *entry)
{
return ( vm_map_store_lookup_entry( map, address, entry ));
}
kern_return_t
vm_map_find_space(
register vm_map_t map,
vm_map_offset_t *address,
vm_map_size_t size,
vm_map_offset_t mask,
int flags,
vm_map_entry_t *o_entry)
{
register vm_map_entry_t entry, new_entry;
register vm_map_offset_t start;
register vm_map_offset_t end;
if (size == 0) {
*address = 0;
return KERN_INVALID_ARGUMENT;
}
if (flags & VM_FLAGS_GUARD_AFTER) {
size += PAGE_SIZE_64;
}
new_entry = vm_map_entry_create(map);
vm_map_lock(map);
if( map->disable_vmentry_reuse == TRUE) {
VM_MAP_HIGHEST_ENTRY(map, entry, start);
} else {
assert(first_free_is_valid(map));
if ((entry = map->first_free) == vm_map_to_entry(map))
start = map->min_offset;
else
start = entry->vme_end;
}
while (TRUE) {
register vm_map_entry_t next;
if (flags & VM_FLAGS_GUARD_BEFORE) {
start += PAGE_SIZE_64;
}
end = ((start + mask) & ~mask);
if (end < start) {
vm_map_entry_dispose(map, new_entry);
vm_map_unlock(map);
return(KERN_NO_SPACE);
}
start = end;
end += size;
if ((end > map->max_offset) || (end < start)) {
vm_map_entry_dispose(map, new_entry);
vm_map_unlock(map);
return(KERN_NO_SPACE);
}
next = entry->vme_next;
if (next == vm_map_to_entry(map))
break;
if (next->vme_start >= end)
break;
entry = next;
start = entry->vme_end;
}
if (flags & VM_FLAGS_GUARD_BEFORE) {
start -= PAGE_SIZE_64;
}
*address = start;
new_entry->vme_start = start;
new_entry->vme_end = end;
assert(page_aligned(new_entry->vme_start));
assert(page_aligned(new_entry->vme_end));
new_entry->is_shared = FALSE;
new_entry->is_sub_map = FALSE;
new_entry->use_pmap = FALSE;
new_entry->object.vm_object = VM_OBJECT_NULL;
new_entry->offset = (vm_object_offset_t) 0;
new_entry->needs_copy = FALSE;
new_entry->inheritance = VM_INHERIT_DEFAULT;
new_entry->protection = VM_PROT_DEFAULT;
new_entry->max_protection = VM_PROT_ALL;
new_entry->behavior = VM_BEHAVIOR_DEFAULT;
new_entry->wired_count = 0;
new_entry->user_wired_count = 0;
new_entry->in_transition = FALSE;
new_entry->needs_wakeup = FALSE;
new_entry->no_cache = FALSE;
new_entry->permanent = FALSE;
new_entry->superpage_size = 0;
new_entry->alias = 0;
new_entry->zero_wired_pages = FALSE;
VM_GET_FLAGS_ALIAS(flags, new_entry->alias);
vm_map_store_entry_link(map, entry, new_entry);
map->size += size;
SAVE_HINT_MAP_WRITE(map, new_entry);
*o_entry = new_entry;
return(KERN_SUCCESS);
}
int vm_map_pmap_enter_print = FALSE;
int vm_map_pmap_enter_enable = FALSE;
static void
vm_map_pmap_enter(
vm_map_t map,
register vm_map_offset_t addr,
register vm_map_offset_t end_addr,
register vm_object_t object,
vm_object_offset_t offset,
vm_prot_t protection)
{
int type_of_fault;
kern_return_t kr;
if(map->pmap == 0)
return;
while (addr < end_addr) {
register vm_page_t m;
vm_object_lock(object);
m = vm_page_lookup(object, offset);
if (m == VM_PAGE_NULL || m->busy || m->encrypted ||
m->fictitious ||
(m->unusual && ( m->error || m->restart || m->absent))) {
vm_object_unlock(object);
return;
}
if (vm_map_pmap_enter_print) {
printf("vm_map_pmap_enter:");
printf("map: %p, addr: %llx, object: %p, offset: %llx\n",
map, (unsigned long long)addr, object, (unsigned long long)offset);
}
type_of_fault = DBG_CACHE_HIT_FAULT;
kr = vm_fault_enter(m, map->pmap, addr, protection, protection,
VM_PAGE_WIRED(m), FALSE, FALSE, FALSE,
&type_of_fault);
vm_object_unlock(object);
offset += PAGE_SIZE_64;
addr += PAGE_SIZE;
}
}
boolean_t vm_map_pmap_is_empty(
vm_map_t map,
vm_map_offset_t start,
vm_map_offset_t end);
boolean_t vm_map_pmap_is_empty(
vm_map_t map,
vm_map_offset_t start,
vm_map_offset_t end)
{
#ifdef MACHINE_PMAP_IS_EMPTY
return pmap_is_empty(map->pmap, start, end);
#else
vm_map_offset_t offset;
ppnum_t phys_page;
if (map->pmap == NULL) {
return TRUE;
}
for (offset = start;
offset < end;
offset += PAGE_SIZE) {
phys_page = pmap_find_phys(map->pmap, offset);
if (phys_page) {
kprintf("vm_map_pmap_is_empty(%p,0x%llx,0x%llx): "
"page %d at 0x%llx\n",
map, (long long)start, (long long)end,
phys_page, (long long)offset);
return FALSE;
}
}
return TRUE;
#endif
}
int _map_enter_debug = 0;
static unsigned int vm_map_enter_restore_successes = 0;
static unsigned int vm_map_enter_restore_failures = 0;
kern_return_t
vm_map_enter(
vm_map_t map,
vm_map_offset_t *address,
vm_map_size_t size,
vm_map_offset_t mask,
int flags,
vm_object_t object,
vm_object_offset_t offset,
boolean_t needs_copy,
vm_prot_t cur_protection,
vm_prot_t max_protection,
vm_inherit_t inheritance)
{
vm_map_entry_t entry, new_entry;
vm_map_offset_t start, tmp_start, tmp_offset;
vm_map_offset_t end, tmp_end;
vm_map_offset_t tmp2_start, tmp2_end;
vm_map_offset_t step;
kern_return_t result = KERN_SUCCESS;
vm_map_t zap_old_map = VM_MAP_NULL;
vm_map_t zap_new_map = VM_MAP_NULL;
boolean_t map_locked = FALSE;
boolean_t pmap_empty = TRUE;
boolean_t new_mapping_established = FALSE;
boolean_t anywhere = ((flags & VM_FLAGS_ANYWHERE) != 0);
boolean_t purgable = ((flags & VM_FLAGS_PURGABLE) != 0);
boolean_t overwrite = ((flags & VM_FLAGS_OVERWRITE) != 0);
boolean_t no_cache = ((flags & VM_FLAGS_NO_CACHE) != 0);
boolean_t is_submap = ((flags & VM_FLAGS_SUBMAP) != 0);
boolean_t permanent = ((flags & VM_FLAGS_PERMANENT) != 0);
unsigned int superpage_size = ((flags & VM_FLAGS_SUPERPAGE_MASK) >> VM_FLAGS_SUPERPAGE_SHIFT);
char alias;
vm_map_offset_t effective_min_offset, effective_max_offset;
kern_return_t kr;
if (superpage_size) {
switch (superpage_size) {
#ifdef __x86_64__
case SUPERPAGE_SIZE_ANY:
size = (size + 2*1024*1024 - 1) & ~(2*1024*1024 - 1);
case SUPERPAGE_SIZE_2MB:
break;
#endif
default:
return KERN_INVALID_ARGUMENT;
}
mask = SUPERPAGE_SIZE-1;
if (size & (SUPERPAGE_SIZE-1))
return KERN_INVALID_ARGUMENT;
inheritance = VM_INHERIT_NONE;
}
#if CONFIG_EMBEDDED
if (cur_protection & VM_PROT_WRITE){
if ((cur_protection & VM_PROT_EXECUTE) && !(flags & VM_FLAGS_MAP_JIT)){
printf("EMBEDDED: %s curprot cannot be write+execute. turning off execute\n", __PRETTY_FUNCTION__);
cur_protection &= ~VM_PROT_EXECUTE;
}
}
#endif
if (is_submap) {
if (purgable) {
return KERN_INVALID_ARGUMENT;
}
if (object == VM_OBJECT_NULL) {
return KERN_INVALID_ARGUMENT;
}
}
if (flags & VM_FLAGS_ALREADY) {
if ((flags & VM_FLAGS_ANYWHERE) ||
(flags & VM_FLAGS_OVERWRITE)) {
return KERN_INVALID_ARGUMENT;
}
}
effective_min_offset = map->min_offset;
if (flags & VM_FLAGS_BEYOND_MAX) {
if (vm_map_is_64bit(map))
effective_max_offset = 0xFFFFFFFFFFFFF000ULL;
else
effective_max_offset = 0x00000000FFFFF000ULL;
} else {
effective_max_offset = map->max_offset;
}
if (size == 0 ||
(offset & PAGE_MASK_64) != 0) {
*address = 0;
return KERN_INVALID_ARGUMENT;
}
VM_GET_FLAGS_ALIAS(flags, alias);
#define RETURN(value) { result = value; goto BailOut; }
assert(page_aligned(*address));
assert(page_aligned(size));
if (purgable &&
(offset != 0 ||
(object != VM_OBJECT_NULL &&
(object->vo_size != size ||
object->purgable == VM_PURGABLE_DENY))
|| size > ANON_MAX_SIZE))
return KERN_INVALID_ARGUMENT;
if (!anywhere && overwrite) {
zap_old_map = vm_map_create(PMAP_NULL,
*address,
*address + size,
map->hdr.entries_pageable);
}
StartAgain: ;
start = *address;
if (anywhere) {
vm_map_lock(map);
map_locked = TRUE;
if ((flags & VM_FLAGS_MAP_JIT) && (map->jit_entry_exists)){
result = KERN_INVALID_ARGUMENT;
goto BailOut;
}
if (start < effective_min_offset)
start = effective_min_offset;
if (start > effective_max_offset)
RETURN(KERN_NO_SPACE);
if( map->disable_vmentry_reuse == TRUE) {
VM_MAP_HIGHEST_ENTRY(map, entry, start);
} else {
assert(first_free_is_valid(map));
entry = map->first_free;
if (entry == vm_map_to_entry(map)) {
entry = NULL;
} else {
if (entry->vme_next == vm_map_to_entry(map)){
entry = NULL;
} else {
if (start < (entry->vme_next)->vme_start ) {
start = entry->vme_end;
} else {
entry = NULL;
}
}
}
if (entry == NULL) {
vm_map_entry_t tmp_entry;
if (vm_map_lookup_entry(map, start, &tmp_entry))
start = tmp_entry->vme_end;
entry = tmp_entry;
}
}
while (TRUE) {
register vm_map_entry_t next;
end = ((start + mask) & ~mask);
if (end < start)
RETURN(KERN_NO_SPACE);
start = end;
end += size;
if ((end > effective_max_offset) || (end < start)) {
if (map->wait_for_space) {
if (size <= (effective_max_offset -
effective_min_offset)) {
assert_wait((event_t)map,
THREAD_ABORTSAFE);
vm_map_unlock(map);
map_locked = FALSE;
thread_block(THREAD_CONTINUE_NULL);
goto StartAgain;
}
}
RETURN(KERN_NO_SPACE);
}
next = entry->vme_next;
if (next == vm_map_to_entry(map))
break;
if (next->vme_start >= end)
break;
entry = next;
start = entry->vme_end;
}
*address = start;
} else {
vm_map_lock(map);
map_locked = TRUE;
if ((start & mask) != 0)
RETURN(KERN_NO_SPACE);
end = start + size;
if ((start < effective_min_offset) ||
(end > effective_max_offset) ||
(start >= end)) {
RETURN(KERN_INVALID_ADDRESS);
}
if (overwrite && zap_old_map != VM_MAP_NULL) {
(void) vm_map_delete(map, start, end,
VM_MAP_REMOVE_SAVE_ENTRIES,
zap_old_map);
}
if (vm_map_lookup_entry(map, start, &entry)) {
if (! (flags & VM_FLAGS_ALREADY)) {
RETURN(KERN_NO_SPACE);
}
tmp_start = start;
tmp_offset = offset;
if (entry->vme_start < start) {
tmp_start -= start - entry->vme_start;
tmp_offset -= start - entry->vme_start;
}
for (; entry->vme_start < end;
entry = entry->vme_next) {
if (entry == vm_map_to_entry(map) ||
entry->vme_start != tmp_start ||
entry->is_sub_map != is_submap ||
entry->offset != tmp_offset ||
entry->needs_copy != needs_copy ||
entry->protection != cur_protection ||
entry->max_protection != max_protection ||
entry->inheritance != inheritance ||
entry->alias != alias) {
RETURN(KERN_NO_SPACE);
}
if (is_submap) {
if (entry->object.sub_map !=
(vm_map_t) object) {
RETURN(KERN_NO_SPACE);
}
} else {
if (entry->object.vm_object != object) {
vm_object_t obj2;
obj2 = entry->object.vm_object;
if ((obj2 == VM_OBJECT_NULL ||
obj2->internal) &&
(object == VM_OBJECT_NULL ||
object->internal)) {
} else {
RETURN(KERN_NO_SPACE);
}
}
}
tmp_offset += entry->vme_end - entry->vme_start;
tmp_start += entry->vme_end - entry->vme_start;
if (entry->vme_end >= end) {
break;
}
}
RETURN(KERN_MEMORY_PRESENT);
}
if ((entry->vme_next != vm_map_to_entry(map)) &&
(entry->vme_next->vme_start < end))
RETURN(KERN_NO_SPACE);
}
if (purgable) {
if (object == VM_OBJECT_NULL) {
object = vm_object_allocate(size);
object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
object->purgable = VM_PURGABLE_NONVOLATILE;
offset = (vm_object_offset_t)0;
}
} else if ((is_submap == FALSE) &&
(object == VM_OBJECT_NULL) &&
(entry != vm_map_to_entry(map)) &&
(entry->vme_end == start) &&
(!entry->is_shared) &&
(!entry->is_sub_map) &&
((alias == VM_MEMORY_REALLOC) || (entry->alias == alias)) &&
(entry->inheritance == inheritance) &&
(entry->protection == cur_protection) &&
(entry->max_protection == max_protection) &&
(entry->behavior == VM_BEHAVIOR_DEFAULT) &&
(entry->in_transition == 0) &&
(entry->no_cache == no_cache) &&
((entry->vme_end - entry->vme_start) + size <=
(alias == VM_MEMORY_REALLOC ?
ANON_CHUNK_SIZE :
NO_COALESCE_LIMIT)) &&
(entry->wired_count == 0)) {
if (vm_object_coalesce(entry->object.vm_object,
VM_OBJECT_NULL,
entry->offset,
(vm_object_offset_t) 0,
(vm_map_size_t)(entry->vme_end - entry->vme_start),
(vm_map_size_t)(end - entry->vme_end))) {
map->size += (end - entry->vme_end);
entry->vme_end = end;
vm_map_store_update_first_free(map, map->first_free);
RETURN(KERN_SUCCESS);
}
}
step = superpage_size ? SUPERPAGE_SIZE : (end - start);
new_entry = NULL;
for (tmp2_start = start; tmp2_start<end; tmp2_start += step) {
tmp2_end = tmp2_start + step;
tmp_start = tmp2_start;
if (object == VM_OBJECT_NULL &&
size > (vm_map_size_t)ANON_CHUNK_SIZE &&
max_protection != VM_PROT_NONE &&
superpage_size == 0)
tmp_end = tmp_start + (vm_map_size_t)ANON_CHUNK_SIZE;
else
tmp_end = tmp2_end;
do {
new_entry = vm_map_entry_insert(map, entry, tmp_start, tmp_end,
object, offset, needs_copy,
FALSE, FALSE,
cur_protection, max_protection,
VM_BEHAVIOR_DEFAULT,
(flags & VM_FLAGS_MAP_JIT)? VM_INHERIT_NONE: inheritance,
0, no_cache,
permanent, superpage_size);
new_entry->alias = alias;
if (flags & VM_FLAGS_MAP_JIT){
if (!(map->jit_entry_exists)){
new_entry->used_for_jit = TRUE;
map->jit_entry_exists = TRUE;
}
}
if (is_submap) {
vm_map_t submap;
boolean_t submap_is_64bit;
boolean_t use_pmap;
new_entry->is_sub_map = TRUE;
submap = (vm_map_t) object;
submap_is_64bit = vm_map_is_64bit(submap);
use_pmap = (alias == VM_MEMORY_SHARED_PMAP);
#ifndef NO_NESTED_PMAP
if (use_pmap && submap->pmap == NULL) {
submap->pmap = pmap_create(0, submap_is_64bit);
if (submap->pmap == NULL) {
}
}
if (use_pmap && submap->pmap != NULL) {
kr = pmap_nest(map->pmap,
submap->pmap,
tmp_start,
tmp_start,
tmp_end - tmp_start);
if (kr != KERN_SUCCESS) {
printf("vm_map_enter: "
"pmap_nest(0x%llx,0x%llx) "
"error 0x%x\n",
(long long)tmp_start,
(long long)tmp_end,
kr);
} else {
new_entry->use_pmap = TRUE;
pmap_empty = FALSE;
}
}
#endif
}
entry = new_entry;
if (superpage_size) {
vm_page_t pages, m;
vm_object_t sp_object;
entry->offset = 0;
kr = cpm_allocate(SUPERPAGE_SIZE, &pages, 0, SUPERPAGE_NBASEPAGES-1, TRUE, 0);
if (kr != KERN_SUCCESS) {
new_mapping_established = TRUE;
RETURN(kr);
}
sp_object = vm_object_allocate((vm_map_size_t)(entry->vme_end - entry->vme_start));
sp_object->phys_contiguous = TRUE;
sp_object->vo_shadow_offset = (vm_object_offset_t)pages->phys_page*PAGE_SIZE;
entry->object.vm_object = sp_object;
vm_object_lock(sp_object);
for (offset = 0; offset < SUPERPAGE_SIZE; offset += PAGE_SIZE) {
m = pages;
pmap_zero_page(m->phys_page);
pages = NEXT_PAGE(m);
*(NEXT_PAGE_PTR(m)) = VM_PAGE_NULL;
vm_page_insert(m, sp_object, offset);
}
vm_object_unlock(sp_object);
}
} while (tmp_end != tmp2_end &&
(tmp_start = tmp_end) &&
(tmp_end = (tmp2_end - tmp_end > (vm_map_size_t)ANON_CHUNK_SIZE) ?
tmp_end + (vm_map_size_t)ANON_CHUNK_SIZE : tmp2_end));
}
vm_map_unlock(map);
map_locked = FALSE;
new_mapping_established = TRUE;
if ((map->wiring_required)||(superpage_size)) {
pmap_empty = FALSE;
result = vm_map_wire(map, start, end,
new_entry->protection, TRUE);
RETURN(result);
}
if ((object != VM_OBJECT_NULL) &&
(vm_map_pmap_enter_enable) &&
(!anywhere) &&
(!needs_copy) &&
(size < (128*1024))) {
pmap_empty = FALSE;
if (override_nx(map, alias) && cur_protection)
cur_protection |= VM_PROT_EXECUTE;
vm_map_pmap_enter(map, start, end,
object, offset, cur_protection);
}
BailOut: ;
if (result == KERN_SUCCESS) {
vm_prot_t pager_prot;
memory_object_t pager;
if (pmap_empty &&
!(flags & VM_FLAGS_NO_PMAP_CHECK)) {
assert(vm_map_pmap_is_empty(map,
*address,
*address+size));
}
pager_prot = max_protection;
if (needs_copy) {
pager_prot &= ~VM_PROT_WRITE;
}
if (!is_submap &&
object != VM_OBJECT_NULL &&
object->named &&
object->pager != MEMORY_OBJECT_NULL) {
vm_object_lock(object);
pager = object->pager;
if (object->named &&
pager != MEMORY_OBJECT_NULL) {
assert(object->pager_ready);
vm_object_mapping_wait(object, THREAD_UNINT);
vm_object_mapping_begin(object);
vm_object_unlock(object);
kr = memory_object_map(pager, pager_prot);
assert(kr == KERN_SUCCESS);
vm_object_lock(object);
vm_object_mapping_end(object);
}
vm_object_unlock(object);
}
} else {
if (new_mapping_established) {
zap_new_map = vm_map_create(PMAP_NULL,
*address,
*address + size,
map->hdr.entries_pageable);
if (!map_locked) {
vm_map_lock(map);
map_locked = TRUE;
}
(void) vm_map_delete(map, *address, *address+size,
VM_MAP_REMOVE_SAVE_ENTRIES,
zap_new_map);
}
if (zap_old_map != VM_MAP_NULL &&
zap_old_map->hdr.nentries != 0) {
vm_map_entry_t entry1, entry2;
if (!map_locked) {
vm_map_lock(map);
map_locked = TRUE;
}
start = vm_map_first_entry(zap_old_map)->vme_start;
end = vm_map_last_entry(zap_old_map)->vme_end;
if (vm_map_lookup_entry(map, start, &entry1) ||
vm_map_lookup_entry(map, end, &entry2) ||
entry1 != entry2) {
vm_map_enter_restore_failures++;
} else {
for (entry2 = vm_map_first_entry(zap_old_map);
entry2 != vm_map_to_entry(zap_old_map);
entry2 = vm_map_first_entry(zap_old_map)) {
vm_map_size_t entry_size;
entry_size = (entry2->vme_end -
entry2->vme_start);
vm_map_store_entry_unlink(zap_old_map,
entry2);
zap_old_map->size -= entry_size;
vm_map_store_entry_link(map, entry1, entry2);
map->size += entry_size;
entry1 = entry2;
}
if (map->wiring_required) {
}
vm_map_enter_restore_successes++;
}
}
}
if (map_locked) {
vm_map_unlock(map);
}
if (zap_old_map != VM_MAP_NULL) {
vm_map_destroy(zap_old_map, VM_MAP_REMOVE_NO_PMAP_CLEANUP);
zap_old_map = VM_MAP_NULL;
}
if (zap_new_map != VM_MAP_NULL) {
vm_map_destroy(zap_new_map, VM_MAP_REMOVE_NO_PMAP_CLEANUP);
zap_new_map = VM_MAP_NULL;
}
return result;
#undef RETURN
}
kern_return_t
vm_map_enter_mem_object(
vm_map_t target_map,
vm_map_offset_t *address,
vm_map_size_t initial_size,
vm_map_offset_t mask,
int flags,
ipc_port_t port,
vm_object_offset_t offset,
boolean_t copy,
vm_prot_t cur_protection,
vm_prot_t max_protection,
vm_inherit_t inheritance)
{
vm_map_address_t map_addr;
vm_map_size_t map_size;
vm_object_t object;
vm_object_size_t size;
kern_return_t result;
boolean_t mask_cur_protection, mask_max_protection;
mask_cur_protection = cur_protection & VM_PROT_IS_MASK;
mask_max_protection = max_protection & VM_PROT_IS_MASK;
cur_protection &= ~VM_PROT_IS_MASK;
max_protection &= ~VM_PROT_IS_MASK;
if ((target_map == VM_MAP_NULL) ||
(cur_protection & ~VM_PROT_ALL) ||
(max_protection & ~VM_PROT_ALL) ||
(inheritance > VM_INHERIT_LAST_VALID) ||
initial_size == 0)
return KERN_INVALID_ARGUMENT;
map_addr = vm_map_trunc_page(*address);
map_size = vm_map_round_page(initial_size);
size = vm_object_round_page(initial_size);
if (!IP_VALID(port)) {
object = VM_OBJECT_NULL;
offset = 0;
copy = FALSE;
} else if (ip_kotype(port) == IKOT_NAMED_ENTRY) {
vm_named_entry_t named_entry;
named_entry = (vm_named_entry_t) port->ip_kobject;
if (size == 0) {
if (offset >= named_entry->size)
return KERN_INVALID_RIGHT;
size = named_entry->size - offset;
}
if (mask_max_protection) {
max_protection &= named_entry->protection;
}
if (mask_cur_protection) {
cur_protection &= named_entry->protection;
}
if ((named_entry->protection & max_protection) !=
max_protection)
return KERN_INVALID_RIGHT;
if ((named_entry->protection & cur_protection) !=
cur_protection)
return KERN_INVALID_RIGHT;
if (named_entry->size < (offset + size))
return KERN_INVALID_ARGUMENT;
offset = offset + named_entry->offset;
named_entry_lock(named_entry);
if (named_entry->is_sub_map) {
vm_map_t submap;
submap = named_entry->backing.map;
vm_map_lock(submap);
vm_map_reference(submap);
vm_map_unlock(submap);
named_entry_unlock(named_entry);
result = vm_map_enter(target_map,
&map_addr,
map_size,
mask,
flags | VM_FLAGS_SUBMAP,
(vm_object_t) submap,
offset,
copy,
cur_protection,
max_protection,
inheritance);
if (result != KERN_SUCCESS) {
vm_map_deallocate(submap);
} else {
if (submap->mapped == FALSE) {
vm_map_lock(submap);
submap->mapped = TRUE;
vm_map_unlock(submap);
}
*address = map_addr;
}
return result;
} else if (named_entry->is_pager) {
unsigned int access;
vm_prot_t protections;
unsigned int wimg_mode;
protections = named_entry->protection & VM_PROT_ALL;
access = GET_MAP_MEM(named_entry->protection);
object = vm_object_enter(named_entry->backing.pager,
named_entry->size,
named_entry->internal,
FALSE,
FALSE);
if (object == VM_OBJECT_NULL) {
named_entry_unlock(named_entry);
return KERN_INVALID_OBJECT;
}
vm_object_lock(object);
vm_object_reference_locked(object);
named_entry->backing.object = object;
named_entry->is_pager = FALSE;
named_entry_unlock(named_entry);
wimg_mode = object->wimg_bits;
if (access == MAP_MEM_IO) {
wimg_mode = VM_WIMG_IO;
} else if (access == MAP_MEM_COPYBACK) {
wimg_mode = VM_WIMG_USE_DEFAULT;
} else if (access == MAP_MEM_WTHRU) {
wimg_mode = VM_WIMG_WTHRU;
} else if (access == MAP_MEM_WCOMB) {
wimg_mode = VM_WIMG_WCOMB;
}
if (!named_entry->internal) {
while (!object->pager_ready) {
vm_object_wait(
object,
VM_OBJECT_EVENT_PAGER_READY,
THREAD_UNINT);
vm_object_lock(object);
}
}
if (object->wimg_bits != wimg_mode)
vm_object_change_wimg_mode(object, wimg_mode);
object->true_share = TRUE;
if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC)
object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
vm_object_unlock(object);
} else {
object = named_entry->backing.object;
assert(object != VM_OBJECT_NULL);
named_entry_unlock(named_entry);
vm_object_reference(object);
}
} else if (ip_kotype(port) == IKOT_MEMORY_OBJECT) {
object = vm_object_enter((memory_object_t)port,
size, FALSE, FALSE, FALSE);
if (object == VM_OBJECT_NULL)
return KERN_INVALID_OBJECT;
if (object != VM_OBJECT_NULL) {
if (object == kernel_object) {
printf("Warning: Attempt to map kernel object"
" by a non-private kernel entity\n");
return KERN_INVALID_OBJECT;
}
if (!object->pager_ready) {
vm_object_lock(object);
while (!object->pager_ready) {
vm_object_wait(object,
VM_OBJECT_EVENT_PAGER_READY,
THREAD_UNINT);
vm_object_lock(object);
}
vm_object_unlock(object);
}
}
} else {
return KERN_INVALID_OBJECT;
}
if (object != VM_OBJECT_NULL &&
object->named &&
object->pager != MEMORY_OBJECT_NULL &&
object->copy_strategy != MEMORY_OBJECT_COPY_NONE) {
memory_object_t pager;
vm_prot_t pager_prot;
kern_return_t kr;
pager_prot = max_protection;
if (copy) {
pager_prot &= ~VM_PROT_WRITE;
}
vm_object_lock(object);
pager = object->pager;
if (object->named &&
pager != MEMORY_OBJECT_NULL &&
object->copy_strategy != MEMORY_OBJECT_COPY_NONE) {
assert(object->pager_ready);
vm_object_mapping_wait(object, THREAD_UNINT);
vm_object_mapping_begin(object);
vm_object_unlock(object);
kr = memory_object_map(pager, pager_prot);
assert(kr == KERN_SUCCESS);
vm_object_lock(object);
vm_object_mapping_end(object);
}
vm_object_unlock(object);
}
if (copy) {
vm_object_t new_object;
vm_object_offset_t new_offset;
result = vm_object_copy_strategically(object, offset, size,
&new_object, &new_offset,
©);
if (result == KERN_MEMORY_RESTART_COPY) {
boolean_t success;
boolean_t src_needs_copy;
new_object = object;
new_offset = offset;
success = vm_object_copy_quickly(&new_object,
new_offset, size,
&src_needs_copy,
©);
assert(success);
result = KERN_SUCCESS;
}
vm_object_deallocate(object);
if (result != KERN_SUCCESS)
return result;
object = new_object;
offset = new_offset;
}
result = vm_map_enter(target_map,
&map_addr, map_size,
(vm_map_offset_t)mask,
flags,
object, offset,
copy,
cur_protection, max_protection, inheritance);
if (result != KERN_SUCCESS)
vm_object_deallocate(object);
*address = map_addr;
return result;
}
kern_return_t
vm_map_enter_mem_object_control(
vm_map_t target_map,
vm_map_offset_t *address,
vm_map_size_t initial_size,
vm_map_offset_t mask,
int flags,
memory_object_control_t control,
vm_object_offset_t offset,
boolean_t copy,
vm_prot_t cur_protection,
vm_prot_t max_protection,
vm_inherit_t inheritance)
{
vm_map_address_t map_addr;
vm_map_size_t map_size;
vm_object_t object;
vm_object_size_t size;
kern_return_t result;
memory_object_t pager;
vm_prot_t pager_prot;
kern_return_t kr;
if ((target_map == VM_MAP_NULL) ||
(cur_protection & ~VM_PROT_ALL) ||
(max_protection & ~VM_PROT_ALL) ||
(inheritance > VM_INHERIT_LAST_VALID) ||
initial_size == 0)
return KERN_INVALID_ARGUMENT;
map_addr = vm_map_trunc_page(*address);
map_size = vm_map_round_page(initial_size);
size = vm_object_round_page(initial_size);
object = memory_object_control_to_vm_object(control);
if (object == VM_OBJECT_NULL)
return KERN_INVALID_OBJECT;
if (object == kernel_object) {
printf("Warning: Attempt to map kernel object"
" by a non-private kernel entity\n");
return KERN_INVALID_OBJECT;
}
vm_object_lock(object);
object->ref_count++;
vm_object_res_reference(object);
pager_prot = max_protection;
if (copy) {
pager_prot &= ~VM_PROT_WRITE;
}
pager = object->pager;
if (object->named &&
pager != MEMORY_OBJECT_NULL &&
object->copy_strategy != MEMORY_OBJECT_COPY_NONE) {
assert(object->pager_ready);
vm_object_mapping_wait(object, THREAD_UNINT);
vm_object_mapping_begin(object);
vm_object_unlock(object);
kr = memory_object_map(pager, pager_prot);
assert(kr == KERN_SUCCESS);
vm_object_lock(object);
vm_object_mapping_end(object);
}
vm_object_unlock(object);
if (copy) {
vm_object_t new_object;
vm_object_offset_t new_offset;
result = vm_object_copy_strategically(object, offset, size,
&new_object, &new_offset,
©);
if (result == KERN_MEMORY_RESTART_COPY) {
boolean_t success;
boolean_t src_needs_copy;
new_object = object;
new_offset = offset;
success = vm_object_copy_quickly(&new_object,
new_offset, size,
&src_needs_copy,
©);
assert(success);
result = KERN_SUCCESS;
}
vm_object_deallocate(object);
if (result != KERN_SUCCESS)
return result;
object = new_object;
offset = new_offset;
}
result = vm_map_enter(target_map,
&map_addr, map_size,
(vm_map_offset_t)mask,
flags,
object, offset,
copy,
cur_protection, max_protection, inheritance);
if (result != KERN_SUCCESS)
vm_object_deallocate(object);
*address = map_addr;
return result;
}
#if VM_CPM
#ifdef MACH_ASSERT
extern pmap_paddr_t avail_start, avail_end;
#endif
kern_return_t
vm_map_enter_cpm(
vm_map_t map,
vm_map_offset_t *addr,
vm_map_size_t size,
int flags)
{
vm_object_t cpm_obj;
pmap_t pmap;
vm_page_t m, pages;
kern_return_t kr;
vm_map_offset_t va, start, end, offset;
#if MACH_ASSERT
vm_map_offset_t prev_addr;
#endif
boolean_t anywhere = ((VM_FLAGS_ANYWHERE & flags) != 0);
if (!vm_allocate_cpm_enabled)
return KERN_FAILURE;
if (size == 0) {
*addr = 0;
return KERN_SUCCESS;
}
if (anywhere)
*addr = vm_map_min(map);
else
*addr = vm_map_trunc_page(*addr);
size = vm_map_round_page(size);
if (size > VM_MAX_ADDRESS)
return KERN_RESOURCE_SHORTAGE;
if ((kr = cpm_allocate(CAST_DOWN(vm_size_t, size),
&pages, 0, 0, TRUE, flags)) != KERN_SUCCESS)
return kr;
cpm_obj = vm_object_allocate((vm_object_size_t)size);
assert(cpm_obj != VM_OBJECT_NULL);
assert(cpm_obj->internal);
assert(cpm_obj->size == (vm_object_size_t)size);
assert(cpm_obj->can_persist == FALSE);
assert(cpm_obj->pager_created == FALSE);
assert(cpm_obj->pageout == FALSE);
assert(cpm_obj->shadow == VM_OBJECT_NULL);
vm_object_lock(cpm_obj);
for (offset = 0; offset < size; offset += PAGE_SIZE) {
m = pages;
pages = NEXT_PAGE(m);
*(NEXT_PAGE_PTR(m)) = VM_PAGE_NULL;
assert(!m->gobbled);
assert(!m->wanted);
assert(!m->pageout);
assert(!m->tabled);
assert(VM_PAGE_WIRED(m));
ASSERT_PAGE_DECRYPTED(m);
assert(m->busy);
assert(m->phys_page>=(avail_start>>PAGE_SHIFT) && m->phys_page<=(avail_end>>PAGE_SHIFT));
m->busy = FALSE;
vm_page_insert(m, cpm_obj, offset);
}
assert(cpm_obj->resident_page_count == size / PAGE_SIZE);
vm_object_unlock(cpm_obj);
vm_object_reference(cpm_obj);
kr = vm_map_enter(
map,
addr,
size,
(vm_map_offset_t)0,
flags,
cpm_obj,
(vm_object_offset_t)0,
FALSE,
VM_PROT_ALL,
VM_PROT_ALL,
VM_INHERIT_DEFAULT);
if (kr != KERN_SUCCESS) {
assert(cpm_obj->pager_created == FALSE);
assert(cpm_obj->can_persist == FALSE);
assert(cpm_obj->pageout == FALSE);
assert(cpm_obj->shadow == VM_OBJECT_NULL);
vm_object_deallocate(cpm_obj);
vm_object_deallocate(cpm_obj);
}
start = *addr;
end = start + size;
pmap = vm_map_pmap(map);
pmap_pageable(pmap, start, end, FALSE);
for (offset = 0, va = start; offset < size;
va += PAGE_SIZE, offset += PAGE_SIZE) {
int type_of_fault;
vm_object_lock(cpm_obj);
m = vm_page_lookup(cpm_obj, (vm_object_offset_t)offset);
assert(m != VM_PAGE_NULL);
vm_page_zero_fill(m);
type_of_fault = DBG_ZERO_FILL_FAULT;
vm_fault_enter(m, pmap, va, VM_PROT_ALL, VM_PROT_WRITE,
VM_PAGE_WIRED(m), FALSE, FALSE, FALSE,
&type_of_fault);
vm_object_unlock(cpm_obj);
}
#if MACH_ASSERT
for (offset = 0; offset < size; offset += PAGE_SIZE) {
vm_object_lock(cpm_obj);
m = vm_page_lookup(cpm_obj, (vm_object_offset_t)offset);
vm_object_unlock(cpm_obj);
if (m == VM_PAGE_NULL)
panic("vm_allocate_cpm: obj 0x%x off 0x%x no page",
cpm_obj, offset);
assert(m->tabled);
assert(!m->busy);
assert(!m->wanted);
assert(!m->fictitious);
assert(!m->private);
assert(!m->absent);
assert(!m->error);
assert(!m->cleaning);
assert(!m->precious);
assert(!m->clustered);
if (offset != 0) {
if (m->phys_page != prev_addr + 1) {
printf("start 0x%x end 0x%x va 0x%x\n",
start, end, va);
printf("obj 0x%x off 0x%x\n", cpm_obj, offset);
printf("m 0x%x prev_address 0x%x\n", m,
prev_addr);
panic("vm_allocate_cpm: pages not contig!");
}
}
prev_addr = m->phys_page;
}
#endif
vm_object_deallocate(cpm_obj);
return kr;
}
#else
kern_return_t
vm_map_enter_cpm(
__unused vm_map_t map,
__unused vm_map_offset_t *addr,
__unused vm_map_size_t size,
__unused int flags)
{
return KERN_FAILURE;
}
#endif
#ifndef NO_NESTED_PMAP
static void
vm_map_clip_unnest(
vm_map_t map,
vm_map_entry_t entry,
vm_map_offset_t start_unnest,
vm_map_offset_t end_unnest)
{
vm_map_offset_t old_start_unnest = start_unnest;
vm_map_offset_t old_end_unnest = end_unnest;
assert(entry->is_sub_map);
assert(entry->object.sub_map != NULL);
if (pmap_adjust_unnest_parameters(map->pmap, &start_unnest, &end_unnest)) {
log_unnest_badness(map, old_start_unnest, old_end_unnest);
}
if (entry->vme_start > start_unnest ||
entry->vme_end < end_unnest) {
panic("vm_map_clip_unnest(0x%llx,0x%llx): "
"bad nested entry: start=0x%llx end=0x%llx\n",
(long long)start_unnest, (long long)end_unnest,
(long long)entry->vme_start, (long long)entry->vme_end);
}
if (start_unnest > entry->vme_start) {
_vm_map_clip_start(&map->hdr,
entry,
start_unnest);
vm_map_store_update_first_free(map, map->first_free);
}
if (entry->vme_end > end_unnest) {
_vm_map_clip_end(&map->hdr,
entry,
end_unnest);
vm_map_store_update_first_free(map, map->first_free);
}
pmap_unnest(map->pmap,
entry->vme_start,
entry->vme_end - entry->vme_start);
if ((map->mapped) && (map->ref_count)) {
vm_map_submap_pmap_clean(
map, entry->vme_start,
entry->vme_end,
entry->object.sub_map,
entry->offset);
}
entry->use_pmap = FALSE;
}
#endif
static void
vm_map_clip_start(
vm_map_t map,
vm_map_entry_t entry,
vm_map_offset_t startaddr)
{
#ifndef NO_NESTED_PMAP
if (entry->use_pmap &&
startaddr >= entry->vme_start) {
vm_map_offset_t start_unnest, end_unnest;
start_unnest = startaddr & ~(pmap_nesting_size_min - 1);
end_unnest = start_unnest + pmap_nesting_size_min;
vm_map_clip_unnest(map, entry, start_unnest, end_unnest);
}
#endif
if (startaddr > entry->vme_start) {
if (entry->object.vm_object &&
!entry->is_sub_map &&
entry->object.vm_object->phys_contiguous) {
pmap_remove(map->pmap,
(addr64_t)(entry->vme_start),
(addr64_t)(entry->vme_end));
}
_vm_map_clip_start(&map->hdr, entry, startaddr);
vm_map_store_update_first_free(map, map->first_free);
}
}
#define vm_map_copy_clip_start(copy, entry, startaddr) \
MACRO_BEGIN \
if ((startaddr) > (entry)->vme_start) \
_vm_map_clip_start(&(copy)->cpy_hdr,(entry),(startaddr)); \
MACRO_END
static void
_vm_map_clip_start(
register struct vm_map_header *map_header,
register vm_map_entry_t entry,
register vm_map_offset_t start)
{
register vm_map_entry_t new_entry;
new_entry = _vm_map_entry_create(map_header);
vm_map_entry_copy_full(new_entry, entry);
new_entry->vme_end = start;
entry->offset += (start - entry->vme_start);
entry->vme_start = start;
_vm_map_store_entry_link(map_header, entry->vme_prev, new_entry);
if (entry->is_sub_map)
vm_map_reference(new_entry->object.sub_map);
else
vm_object_reference(new_entry->object.vm_object);
}
static void
vm_map_clip_end(
vm_map_t map,
vm_map_entry_t entry,
vm_map_offset_t endaddr)
{
if (endaddr > entry->vme_end) {
endaddr = entry->vme_end;
}
#ifndef NO_NESTED_PMAP
if (entry->use_pmap) {
vm_map_offset_t start_unnest, end_unnest;
start_unnest = entry->vme_start;
end_unnest =
(endaddr + pmap_nesting_size_min - 1) &
~(pmap_nesting_size_min - 1);
vm_map_clip_unnest(map, entry, start_unnest, end_unnest);
}
#endif
if (endaddr < entry->vme_end) {
if (entry->object.vm_object &&
!entry->is_sub_map &&
entry->object.vm_object->phys_contiguous) {
pmap_remove(map->pmap,
(addr64_t)(entry->vme_start),
(addr64_t)(entry->vme_end));
}
_vm_map_clip_end(&map->hdr, entry, endaddr);
vm_map_store_update_first_free(map, map->first_free);
}
}
#define vm_map_copy_clip_end(copy, entry, endaddr) \
MACRO_BEGIN \
if ((endaddr) < (entry)->vme_end) \
_vm_map_clip_end(&(copy)->cpy_hdr,(entry),(endaddr)); \
MACRO_END
static void
_vm_map_clip_end(
register struct vm_map_header *map_header,
register vm_map_entry_t entry,
register vm_map_offset_t end)
{
register vm_map_entry_t new_entry;
new_entry = _vm_map_entry_create(map_header);
vm_map_entry_copy_full(new_entry, entry);
new_entry->vme_start = entry->vme_end = end;
new_entry->offset += (end - entry->vme_start);
_vm_map_store_entry_link(map_header, entry, new_entry);
if (entry->is_sub_map)
vm_map_reference(new_entry->object.sub_map);
else
vm_object_reference(new_entry->object.vm_object);
}
#define VM_MAP_RANGE_CHECK(map, start, end) \
MACRO_BEGIN \
if (start < vm_map_min(map)) \
start = vm_map_min(map); \
if (end > vm_map_max(map)) \
end = vm_map_max(map); \
if (start > end) \
start = end; \
MACRO_END
static boolean_t
vm_map_range_check(
register vm_map_t map,
register vm_map_offset_t start,
register vm_map_offset_t end,
vm_map_entry_t *entry)
{
vm_map_entry_t cur;
register vm_map_offset_t prev;
if (start < vm_map_min(map) || end > vm_map_max(map) || start > end)
return (FALSE);
if (!vm_map_lookup_entry(map, start, &cur))
return (FALSE);
if (entry != (vm_map_entry_t *) NULL)
*entry = cur;
if (end <= cur->vme_end)
return (TRUE);
prev = cur->vme_end;
cur = cur->vme_next;
while ((cur != vm_map_to_entry(map)) && (prev == cur->vme_start)) {
if (end <= cur->vme_end)
return (TRUE);
prev = cur->vme_end;
cur = cur->vme_next;
}
return (FALSE);
}
kern_return_t
vm_map_submap(
vm_map_t map,
vm_map_offset_t start,
vm_map_offset_t end,
vm_map_t submap,
vm_map_offset_t offset,
#ifdef NO_NESTED_PMAP
__unused
#endif
boolean_t use_pmap)
{
vm_map_entry_t entry;
register kern_return_t result = KERN_INVALID_ARGUMENT;
register vm_object_t object;
vm_map_lock(map);
if (! vm_map_lookup_entry(map, start, &entry)) {
entry = entry->vme_next;
}
if (entry == vm_map_to_entry(map) ||
entry->is_sub_map) {
vm_map_unlock(map);
return KERN_INVALID_ARGUMENT;
}
assert(!entry->use_pmap);
vm_map_clip_start(map, entry, start);
vm_map_clip_end(map, entry, end);
if ((entry->vme_start == start) && (entry->vme_end == end) &&
(!entry->is_sub_map) &&
((object = entry->object.vm_object) == vm_submap_object) &&
(object->resident_page_count == 0) &&
(object->copy == VM_OBJECT_NULL) &&
(object->shadow == VM_OBJECT_NULL) &&
(!object->pager_created)) {
entry->offset = (vm_object_offset_t)offset;
entry->object.vm_object = VM_OBJECT_NULL;
vm_object_deallocate(object);
entry->is_sub_map = TRUE;
entry->object.sub_map = submap;
vm_map_reference(submap);
submap->mapped = TRUE;
#ifndef NO_NESTED_PMAP
if (use_pmap) {
if(submap->pmap == NULL) {
submap->pmap = pmap_create((vm_map_size_t) 0, FALSE);
if(submap->pmap == PMAP_NULL) {
vm_map_unlock(map);
return(KERN_NO_SPACE);
}
}
result = pmap_nest(map->pmap,
(entry->object.sub_map)->pmap,
(addr64_t)start,
(addr64_t)start,
(uint64_t)(end - start));
if(result)
panic("vm_map_submap: pmap_nest failed, rc = %08X\n", result);
entry->use_pmap = TRUE;
}
#else
pmap_remove(map->pmap, (addr64_t)start, (addr64_t)end);
#endif
result = KERN_SUCCESS;
}
vm_map_unlock(map);
return(result);
}
kern_return_t
vm_map_protect(
register vm_map_t map,
register vm_map_offset_t start,
register vm_map_offset_t end,
register vm_prot_t new_prot,
register boolean_t set_max)
{
register vm_map_entry_t current;
register vm_map_offset_t prev;
vm_map_entry_t entry;
vm_prot_t new_max;
XPR(XPR_VM_MAP,
"vm_map_protect, 0x%X start 0x%X end 0x%X, new 0x%X %d",
map, start, end, new_prot, set_max);
vm_map_lock(map);
if (start >= map->max_offset) {
vm_map_unlock(map);
return(KERN_INVALID_ADDRESS);
}
while(1) {
if (! vm_map_lookup_entry(map, start, &entry)) {
vm_map_unlock(map);
return(KERN_INVALID_ADDRESS);
}
if (entry->superpage_size && (start & (SUPERPAGE_SIZE-1))) {
start = SUPERPAGE_ROUND_DOWN(start);
continue;
}
break;
}
if (entry->superpage_size)
end = SUPERPAGE_ROUND_UP(end);
current = entry;
prev = current->vme_start;
while ((current != vm_map_to_entry(map)) &&
(current->vme_start < end)) {
if (current->vme_start != prev) {
vm_map_unlock(map);
return(KERN_INVALID_ADDRESS);
}
new_max = current->max_protection;
if(new_prot & VM_PROT_COPY) {
new_max |= VM_PROT_WRITE;
if ((new_prot & (new_max | VM_PROT_COPY)) != new_prot) {
vm_map_unlock(map);
return(KERN_PROTECTION_FAILURE);
}
} else {
if ((new_prot & new_max) != new_prot) {
vm_map_unlock(map);
return(KERN_PROTECTION_FAILURE);
}
}
#if CONFIG_EMBEDDED
if (new_prot & VM_PROT_WRITE) {
if ((new_prot & VM_PROT_EXECUTE) && !(current->used_for_jit)) {
printf("EMBEDDED: %s can't have both write and exec at the same time\n", __FUNCTION__);
new_prot &= ~VM_PROT_EXECUTE;
}
}
#endif
prev = current->vme_end;
current = current->vme_next;
}
if (end > prev) {
vm_map_unlock(map);
return(KERN_INVALID_ADDRESS);
}
current = entry;
if (current != vm_map_to_entry(map)) {
vm_map_clip_start(map, current, start);
}
while ((current != vm_map_to_entry(map)) &&
(current->vme_start < end)) {
vm_prot_t old_prot;
vm_map_clip_end(map, current, end);
assert(!current->use_pmap);
old_prot = current->protection;
if(new_prot & VM_PROT_COPY) {
if (current->is_sub_map == FALSE && current->object.vm_object == VM_OBJECT_NULL){
current->object.vm_object = vm_object_allocate((vm_map_size_t)(current->vme_end - current->vme_start));
current->offset = 0;
}
current->needs_copy = TRUE;
current->max_protection |= VM_PROT_WRITE;
}
if (set_max)
current->protection =
(current->max_protection =
new_prot & ~VM_PROT_COPY) &
old_prot;
else
current->protection = new_prot & ~VM_PROT_COPY;
if (current->protection != old_prot) {
vm_prot_t prot;
prot = current->protection & ~VM_PROT_WRITE;
if (override_nx(map, current->alias) && prot)
prot |= VM_PROT_EXECUTE;
if (current->is_sub_map && current->use_pmap) {
pmap_protect(current->object.sub_map->pmap,
current->vme_start,
current->vme_end,
prot);
} else {
pmap_protect(map->pmap,
current->vme_start,
current->vme_end,
prot);
}
}
current = current->vme_next;
}
current = entry;
while ((current != vm_map_to_entry(map)) &&
(current->vme_start <= end)) {
vm_map_simplify_entry(map, current);
current = current->vme_next;
}
vm_map_unlock(map);
return(KERN_SUCCESS);
}
kern_return_t
vm_map_inherit(
register vm_map_t map,
register vm_map_offset_t start,
register vm_map_offset_t end,
register vm_inherit_t new_inheritance)
{
register vm_map_entry_t entry;
vm_map_entry_t temp_entry;
vm_map_lock(map);
VM_MAP_RANGE_CHECK(map, start, end);
if (vm_map_lookup_entry(map, start, &temp_entry)) {
entry = temp_entry;
}
else {
temp_entry = temp_entry->vme_next;
entry = temp_entry;
}
while ((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) {
if(entry->is_sub_map) {
if(new_inheritance == VM_INHERIT_COPY) {
vm_map_unlock(map);
return(KERN_INVALID_ARGUMENT);
}
}
entry = entry->vme_next;
}
entry = temp_entry;
if (entry != vm_map_to_entry(map)) {
vm_map_clip_start(map, entry, start);
}
while ((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) {
vm_map_clip_end(map, entry, end);
assert(!entry->use_pmap);
entry->inheritance = new_inheritance;
entry = entry->vme_next;
}
vm_map_unlock(map);
return(KERN_SUCCESS);
}
static kern_return_t
add_wire_counts(
vm_map_t map,
vm_map_entry_t entry,
boolean_t user_wire)
{
vm_map_size_t size;
if (user_wire) {
unsigned int total_wire_count = vm_page_wire_count + vm_lopage_free_count;
if (entry->user_wired_count == 0) {
size = entry->vme_end - entry->vme_start;
if(size + map->user_wire_size > MIN(map->user_wire_limit, vm_user_wire_limit) ||
size + ptoa_64(total_wire_count) > vm_global_user_wire_limit ||
size + ptoa_64(total_wire_count) > max_mem - vm_global_no_user_wire_amount)
return KERN_RESOURCE_SHORTAGE;
if (entry->wired_count >= MAX_WIRE_COUNT)
return KERN_FAILURE;
entry->wired_count++;
map->user_wire_size += size;
}
if (entry->user_wired_count >= MAX_WIRE_COUNT)
return KERN_FAILURE;
entry->user_wired_count++;
} else {
if (entry->wired_count >= MAX_WIRE_COUNT)
panic("vm_map_wire: too many wirings");
entry->wired_count++;
}
return KERN_SUCCESS;
}
static void
subtract_wire_counts(
vm_map_t map,
vm_map_entry_t entry,
boolean_t user_wire)
{
if (user_wire) {
if (entry->user_wired_count == 1) {
assert(entry->wired_count >= 1);
entry->wired_count--;
map->user_wire_size -= entry->vme_end - entry->vme_start;
}
assert(entry->user_wired_count >= 1);
entry->user_wired_count--;
} else {
assert(entry->wired_count >= 1);
entry->wired_count--;
}
}
static kern_return_t
vm_map_wire_nested(
register vm_map_t map,
register vm_map_offset_t start,
register vm_map_offset_t end,
register vm_prot_t access_type,
boolean_t user_wire,
pmap_t map_pmap,
vm_map_offset_t pmap_addr)
{
register vm_map_entry_t entry;
struct vm_map_entry *first_entry, tmp_entry;
vm_map_t real_map;
register vm_map_offset_t s,e;
kern_return_t rc;
boolean_t need_wakeup;
boolean_t main_map = FALSE;
wait_interrupt_t interruptible_state;
thread_t cur_thread;
unsigned int last_timestamp;
vm_map_size_t size;
vm_map_lock(map);
if(map_pmap == NULL)
main_map = TRUE;
last_timestamp = map->timestamp;
VM_MAP_RANGE_CHECK(map, start, end);
assert(page_aligned(start));
assert(page_aligned(end));
if (start == end) {
vm_map_unlock(map);
return KERN_SUCCESS;
}
need_wakeup = FALSE;
cur_thread = current_thread();
s = start;
rc = KERN_SUCCESS;
if (vm_map_lookup_entry(map, s, &first_entry)) {
entry = first_entry;
} else {
rc = KERN_INVALID_ADDRESS;
goto done;
}
while ((entry != vm_map_to_entry(map)) && (s < end)) {
e = entry->vme_end;
if (e > end)
e = end;
if (entry->in_transition) {
wait_result_t wait_result;
entry->needs_wakeup = TRUE;
if (need_wakeup) {
vm_map_entry_wakeup(map);
need_wakeup = FALSE;
}
wait_result = vm_map_entry_wait(map,
(user_wire) ? THREAD_ABORTSAFE :
THREAD_UNINT);
if (user_wire && wait_result == THREAD_INTERRUPTED) {
rc = KERN_FAILURE;
goto done;
}
last_timestamp = map->timestamp;
if (!vm_map_lookup_entry(map, s, &first_entry)) {
rc = KERN_FAILURE;
goto done;
}
entry = first_entry;
continue;
}
if (entry->is_sub_map) {
vm_map_offset_t sub_start;
vm_map_offset_t sub_end;
vm_map_offset_t local_start;
vm_map_offset_t local_end;
pmap_t pmap;
vm_map_clip_start(map, entry, s);
vm_map_clip_end(map, entry, end);
sub_start = entry->offset;
sub_end = entry->vme_end;
sub_end += entry->offset - entry->vme_start;
local_end = entry->vme_end;
if(map_pmap == NULL) {
vm_object_t object;
vm_object_offset_t offset;
vm_prot_t prot;
boolean_t wired;
vm_map_entry_t local_entry;
vm_map_version_t version;
vm_map_t lookup_map;
if(entry->use_pmap) {
pmap = entry->object.sub_map->pmap;
#ifdef notdef
pmap_addr = sub_start;
#endif
pmap_addr = s;
} else {
pmap = map->pmap;
pmap_addr = s;
}
if (entry->wired_count) {
if ((rc = add_wire_counts(map, entry, user_wire)) != KERN_SUCCESS)
goto done;
entry = entry->vme_next;
s = entry->vme_start;
continue;
}
local_start = entry->vme_start;
lookup_map = map;
vm_map_lock_write_to_read(map);
if(vm_map_lookup_locked(
&lookup_map, local_start,
access_type,
OBJECT_LOCK_EXCLUSIVE,
&version, &object,
&offset, &prot, &wired,
NULL,
&real_map)) {
vm_map_unlock_read(lookup_map);
vm_map_unwire(map, start,
s, user_wire);
return(KERN_FAILURE);
}
if(real_map != lookup_map)
vm_map_unlock(real_map);
vm_map_unlock_read(lookup_map);
vm_map_lock(map);
vm_object_unlock(object);
if (!vm_map_lookup_entry(map,
local_start,
&local_entry)) {
rc = KERN_FAILURE;
goto done;
}
entry = local_entry;
assert(s == local_start);
vm_map_clip_start(map, entry, s);
vm_map_clip_end(map, entry, end);
e = entry->vme_end;
if (e > end)
e = end;
if (!entry->is_sub_map) {
last_timestamp = map->timestamp;
continue;
}
} else {
local_start = entry->vme_start;
pmap = map_pmap;
}
if ((rc = add_wire_counts(map, entry, user_wire)) != KERN_SUCCESS)
goto done;
entry->in_transition = TRUE;
vm_map_unlock(map);
rc = vm_map_wire_nested(entry->object.sub_map,
sub_start, sub_end,
access_type,
user_wire, pmap, pmap_addr);
vm_map_lock(map);
if (!vm_map_lookup_entry(map, local_start,
&first_entry))
panic("vm_map_wire: re-lookup failed");
entry = first_entry;
assert(local_start == s);
e = entry->vme_end;
if (e > end)
e = end;
last_timestamp = map->timestamp;
while ((entry != vm_map_to_entry(map)) &&
(entry->vme_start < e)) {
assert(entry->in_transition);
entry->in_transition = FALSE;
if (entry->needs_wakeup) {
entry->needs_wakeup = FALSE;
need_wakeup = TRUE;
}
if (rc != KERN_SUCCESS) {
subtract_wire_counts(map, entry, user_wire);
}
entry = entry->vme_next;
}
if (rc != KERN_SUCCESS) {
goto done;
}
s = entry->vme_start;
continue;
}
if (entry->wired_count) {
vm_map_clip_start(map, entry, s);
vm_map_clip_end(map, entry, end);
if ((rc = add_wire_counts(map, entry, user_wire)) != KERN_SUCCESS)
goto done;
entry = entry->vme_next;
s = entry->vme_start;
continue;
}
size = entry->vme_end - entry->vme_start;
if (entry->needs_copy) {
vm_object_shadow(&entry->object.vm_object,
&entry->offset, size);
entry->needs_copy = FALSE;
} else if (entry->object.vm_object == VM_OBJECT_NULL) {
entry->object.vm_object = vm_object_allocate(size);
entry->offset = (vm_object_offset_t)0;
}
vm_map_clip_start(map, entry, s);
vm_map_clip_end(map, entry, end);
e = entry->vme_end;
if (e > end)
e = end;
if ((entry->vme_end < end) &&
((entry->vme_next == vm_map_to_entry(map)) ||
(entry->vme_next->vme_start > entry->vme_end))) {
rc = KERN_INVALID_ADDRESS;
goto done;
}
if ((entry->protection & access_type) != access_type) {
rc = KERN_PROTECTION_FAILURE;
goto done;
}
assert(entry->wired_count == 0 && entry->user_wired_count == 0);
if ((rc = add_wire_counts(map, entry, user_wire)) != KERN_SUCCESS)
goto done;
entry->in_transition = TRUE;
tmp_entry = *entry;
vm_map_unlock(map);
if (!user_wire && cur_thread != THREAD_NULL)
interruptible_state = thread_interrupt_level(THREAD_UNINT);
else
interruptible_state = THREAD_UNINT;
if(map_pmap)
rc = vm_fault_wire(map,
&tmp_entry, map_pmap, pmap_addr);
else
rc = vm_fault_wire(map,
&tmp_entry, map->pmap,
tmp_entry.vme_start);
if (!user_wire && cur_thread != THREAD_NULL)
thread_interrupt_level(interruptible_state);
vm_map_lock(map);
if (last_timestamp+1 != map->timestamp) {
if (!vm_map_lookup_entry(map, tmp_entry.vme_start,
&first_entry))
panic("vm_map_wire: re-lookup failed");
entry = first_entry;
}
last_timestamp = map->timestamp;
while ((entry != vm_map_to_entry(map)) &&
(entry->vme_start < tmp_entry.vme_end)) {
assert(entry->in_transition);
entry->in_transition = FALSE;
if (entry->needs_wakeup) {
entry->needs_wakeup = FALSE;
need_wakeup = TRUE;
}
if (rc != KERN_SUCCESS) {
subtract_wire_counts(map, entry, user_wire);
}
entry = entry->vme_next;
}
if (rc != KERN_SUCCESS) {
goto done;
}
s = entry->vme_start;
}
done:
if (rc == KERN_SUCCESS) {
vm_map_simplify_range(map, start, end);
}
vm_map_unlock(map);
if (need_wakeup)
vm_map_entry_wakeup(map);
if (rc != KERN_SUCCESS) {
vm_map_unwire(map, start, s, user_wire);
}
return rc;
}
kern_return_t
vm_map_wire(
register vm_map_t map,
register vm_map_offset_t start,
register vm_map_offset_t end,
register vm_prot_t access_type,
boolean_t user_wire)
{
kern_return_t kret;
kret = vm_map_wire_nested(map, start, end, access_type,
user_wire, (pmap_t)NULL, 0);
return kret;
}
static kern_return_t
vm_map_unwire_nested(
register vm_map_t map,
register vm_map_offset_t start,
register vm_map_offset_t end,
boolean_t user_wire,
pmap_t map_pmap,
vm_map_offset_t pmap_addr)
{
register vm_map_entry_t entry;
struct vm_map_entry *first_entry, tmp_entry;
boolean_t need_wakeup;
boolean_t main_map = FALSE;
unsigned int last_timestamp;
vm_map_lock(map);
if(map_pmap == NULL)
main_map = TRUE;
last_timestamp = map->timestamp;
VM_MAP_RANGE_CHECK(map, start, end);
assert(page_aligned(start));
assert(page_aligned(end));
if (start == end) {
vm_map_unlock(map);
return KERN_SUCCESS;
}
if (vm_map_lookup_entry(map, start, &first_entry)) {
entry = first_entry;
}
else {
if (!user_wire) {
panic("vm_map_unwire: start not found");
}
vm_map_unlock(map);
return(KERN_INVALID_ADDRESS);
}
if (entry->superpage_size) {
vm_map_unlock(map);
return KERN_INVALID_ADDRESS;
}
need_wakeup = FALSE;
while ((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) {
if (entry->in_transition) {
if (!user_wire) {
panic("vm_map_unwire: in_transition entry");
}
entry = entry->vme_next;
continue;
}
if (entry->is_sub_map) {
vm_map_offset_t sub_start;
vm_map_offset_t sub_end;
vm_map_offset_t local_end;
pmap_t pmap;
vm_map_clip_start(map, entry, start);
vm_map_clip_end(map, entry, end);
sub_start = entry->offset;
sub_end = entry->vme_end - entry->vme_start;
sub_end += entry->offset;
local_end = entry->vme_end;
if(map_pmap == NULL) {
if(entry->use_pmap) {
pmap = entry->object.sub_map->pmap;
pmap_addr = sub_start;
} else {
pmap = map->pmap;
pmap_addr = start;
}
if (entry->wired_count == 0 ||
(user_wire && entry->user_wired_count == 0)) {
if (!user_wire)
panic("vm_map_unwire: entry is unwired");
entry = entry->vme_next;
continue;
}
if (((entry->vme_end < end) &&
((entry->vme_next == vm_map_to_entry(map)) ||
(entry->vme_next->vme_start
> entry->vme_end)))) {
if (!user_wire)
panic("vm_map_unwire: non-contiguous region");
}
subtract_wire_counts(map, entry, user_wire);
if (entry->wired_count != 0) {
entry = entry->vme_next;
continue;
}
entry->in_transition = TRUE;
tmp_entry = *entry;
vm_map_unlock(map);
vm_map_unwire_nested(entry->object.sub_map,
sub_start, sub_end, user_wire, pmap, pmap_addr);
vm_map_lock(map);
if (last_timestamp+1 != map->timestamp) {
if (!vm_map_lookup_entry(map,
tmp_entry.vme_start,
&first_entry)) {
if (!user_wire)
panic("vm_map_unwire: re-lookup failed");
entry = first_entry->vme_next;
} else
entry = first_entry;
}
last_timestamp = map->timestamp;
while ((entry != vm_map_to_entry(map)) &&
(entry->vme_start < tmp_entry.vme_end)) {
assert(entry->in_transition);
entry->in_transition = FALSE;
if (entry->needs_wakeup) {
entry->needs_wakeup = FALSE;
need_wakeup = TRUE;
}
entry = entry->vme_next;
}
continue;
} else {
vm_map_unlock(map);
vm_map_unwire_nested(entry->object.sub_map,
sub_start, sub_end, user_wire, map_pmap,
pmap_addr);
vm_map_lock(map);
if (last_timestamp+1 != map->timestamp) {
if (!vm_map_lookup_entry(map,
tmp_entry.vme_start,
&first_entry)) {
if (!user_wire)
panic("vm_map_unwire: re-lookup failed");
entry = first_entry->vme_next;
} else
entry = first_entry;
}
last_timestamp = map->timestamp;
}
}
if ((entry->wired_count == 0) ||
(user_wire && entry->user_wired_count == 0)) {
if (!user_wire)
panic("vm_map_unwire: entry is unwired");
entry = entry->vme_next;
continue;
}
assert(entry->wired_count > 0 &&
(!user_wire || entry->user_wired_count > 0));
vm_map_clip_start(map, entry, start);
vm_map_clip_end(map, entry, end);
if (((entry->vme_end < end) &&
((entry->vme_next == vm_map_to_entry(map)) ||
(entry->vme_next->vme_start > entry->vme_end)))) {
if (!user_wire)
panic("vm_map_unwire: non-contiguous region");
entry = entry->vme_next;
continue;
}
subtract_wire_counts(map, entry, user_wire);
if (entry->wired_count != 0) {
entry = entry->vme_next;
continue;
}
if(entry->zero_wired_pages) {
entry->zero_wired_pages = FALSE;
}
entry->in_transition = TRUE;
tmp_entry = *entry;
vm_map_unlock(map);
if(map_pmap) {
vm_fault_unwire(map,
&tmp_entry, FALSE, map_pmap, pmap_addr);
} else {
vm_fault_unwire(map,
&tmp_entry, FALSE, map->pmap,
tmp_entry.vme_start);
}
vm_map_lock(map);
if (last_timestamp+1 != map->timestamp) {
if (!vm_map_lookup_entry(map, tmp_entry.vme_start,
&first_entry)) {
if (!user_wire)
panic("vm_map_unwire: re-lookup failed");
entry = first_entry->vme_next;
} else
entry = first_entry;
}
last_timestamp = map->timestamp;
while ((entry != vm_map_to_entry(map)) &&
(entry->vme_start < tmp_entry.vme_end)) {
assert(entry->in_transition);
entry->in_transition = FALSE;
if (entry->needs_wakeup) {
entry->needs_wakeup = FALSE;
need_wakeup = TRUE;
}
entry = entry->vme_next;
}
}
vm_map_simplify_range(map, start, end);
vm_map_unlock(map);
if (need_wakeup)
vm_map_entry_wakeup(map);
return(KERN_SUCCESS);
}
kern_return_t
vm_map_unwire(
register vm_map_t map,
register vm_map_offset_t start,
register vm_map_offset_t end,
boolean_t user_wire)
{
return vm_map_unwire_nested(map, start, end,
user_wire, (pmap_t)NULL, 0);
}
static void
vm_map_entry_delete(
register vm_map_t map,
register vm_map_entry_t entry)
{
register vm_map_offset_t s, e;
register vm_object_t object;
register vm_map_t submap;
s = entry->vme_start;
e = entry->vme_end;
assert(page_aligned(s));
assert(page_aligned(e));
assert(entry->wired_count == 0);
assert(entry->user_wired_count == 0);
assert(!entry->permanent);
if (entry->is_sub_map) {
object = NULL;
submap = entry->object.sub_map;
} else {
submap = NULL;
object = entry->object.vm_object;
}
vm_map_store_entry_unlink(map, entry);
map->size -= e - s;
vm_map_entry_dispose(map, entry);
vm_map_unlock(map);
if (submap)
vm_map_deallocate(submap);
else
vm_object_deallocate(object);
}
void
vm_map_submap_pmap_clean(
vm_map_t map,
vm_map_offset_t start,
vm_map_offset_t end,
vm_map_t sub_map,
vm_map_offset_t offset)
{
vm_map_offset_t submap_start;
vm_map_offset_t submap_end;
vm_map_size_t remove_size;
vm_map_entry_t entry;
submap_end = offset + (end - start);
submap_start = offset;
vm_map_lock_read(sub_map);
if(vm_map_lookup_entry(sub_map, offset, &entry)) {
remove_size = (entry->vme_end - entry->vme_start);
if(offset > entry->vme_start)
remove_size -= offset - entry->vme_start;
if(submap_end < entry->vme_end) {
remove_size -=
entry->vme_end - submap_end;
}
if(entry->is_sub_map) {
vm_map_submap_pmap_clean(
sub_map,
start,
start + remove_size,
entry->object.sub_map,
entry->offset);
} else {
if((map->mapped) && (map->ref_count)
&& (entry->object.vm_object != NULL)) {
vm_object_pmap_protect(
entry->object.vm_object,
entry->offset+(offset-entry->vme_start),
remove_size,
PMAP_NULL,
entry->vme_start,
VM_PROT_NONE);
} else {
pmap_remove(map->pmap,
(addr64_t)start,
(addr64_t)(start + remove_size));
}
}
}
entry = entry->vme_next;
while((entry != vm_map_to_entry(sub_map))
&& (entry->vme_start < submap_end)) {
remove_size = (entry->vme_end - entry->vme_start);
if(submap_end < entry->vme_end) {
remove_size -= entry->vme_end - submap_end;
}
if(entry->is_sub_map) {
vm_map_submap_pmap_clean(
sub_map,
(start + entry->vme_start) - offset,
((start + entry->vme_start) - offset) + remove_size,
entry->object.sub_map,
entry->offset);
} else {
if((map->mapped) && (map->ref_count)
&& (entry->object.vm_object != NULL)) {
vm_object_pmap_protect(
entry->object.vm_object,
entry->offset,
remove_size,
PMAP_NULL,
entry->vme_start,
VM_PROT_NONE);
} else {
pmap_remove(map->pmap,
(addr64_t)((start + entry->vme_start)
- offset),
(addr64_t)(((start + entry->vme_start)
- offset) + remove_size));
}
}
entry = entry->vme_next;
}
vm_map_unlock_read(sub_map);
return;
}
static kern_return_t
vm_map_delete(
vm_map_t map,
vm_map_offset_t start,
vm_map_offset_t end,
int flags,
vm_map_t zap_map)
{
vm_map_entry_t entry, next;
struct vm_map_entry *first_entry, tmp_entry;
register vm_map_offset_t s;
register vm_object_t object;
boolean_t need_wakeup;
unsigned int last_timestamp = ~0;
int interruptible;
interruptible = (flags & VM_MAP_REMOVE_INTERRUPTIBLE) ?
THREAD_ABORTSAFE : THREAD_UNINT;
flags |= VM_MAP_REMOVE_WAIT_FOR_KWIRE;
while(1) {
if (vm_map_lookup_entry(map, start, &first_entry)) {
entry = first_entry;
if (entry->superpage_size && (start & ~SUPERPAGE_MASK)) { start = SUPERPAGE_ROUND_DOWN(start);
start = SUPERPAGE_ROUND_DOWN(start);
continue;
}
if (start == entry->vme_start) {
} else {
vm_map_clip_start(map, entry, start);
}
SAVE_HINT_MAP_WRITE(map, entry->vme_prev);
} else {
entry = first_entry->vme_next;
}
break;
}
if (entry->superpage_size)
end = SUPERPAGE_ROUND_UP(end);
need_wakeup = FALSE;
s = entry->vme_start;
while ((entry != vm_map_to_entry(map)) && (s < end)) {
if (entry->vme_start >= s) {
} else {
vm_map_clip_start(map, entry, s);
}
if (entry->vme_end <= end) {
} else {
vm_map_clip_end(map, entry, end);
}
if (entry->permanent) {
panic("attempt to remove permanent VM map entry "
"%p [0x%llx:0x%llx]\n",
entry, (uint64_t) s, (uint64_t) end);
}
if (entry->in_transition) {
wait_result_t wait_result;
assert(s == entry->vme_start);
entry->needs_wakeup = TRUE;
if (need_wakeup) {
vm_map_entry_wakeup(map);
need_wakeup = FALSE;
}
wait_result = vm_map_entry_wait(map, interruptible);
if (interruptible &&
wait_result == THREAD_INTERRUPTED) {
vm_map_unlock(map);
return KERN_ABORTED;
}
if (!vm_map_lookup_entry(map, s, &first_entry)) {
assert((map != kernel_map) &&
(!entry->is_sub_map));
entry = first_entry->vme_next;
s = entry->vme_start;
} else {
entry = first_entry;
SAVE_HINT_MAP_WRITE(map, entry->vme_prev);
}
last_timestamp = map->timestamp;
continue;
}
if (entry->wired_count) {
boolean_t user_wire;
user_wire = entry->user_wired_count > 0;
if (flags & VM_MAP_REMOVE_KUNWIRE) {
entry->wired_count--;
}
if (entry->user_wired_count > 0) {
while (entry->user_wired_count)
subtract_wire_counts(map, entry, user_wire);
}
if (entry->wired_count != 0) {
assert(map != kernel_map);
if (flags & VM_MAP_REMOVE_WAIT_FOR_KWIRE) {
wait_result_t wait_result;
assert(s == entry->vme_start);
entry->needs_wakeup = TRUE;
wait_result = vm_map_entry_wait(map,
interruptible);
if (interruptible &&
wait_result == THREAD_INTERRUPTED) {
vm_map_unlock(map);
return KERN_ABORTED;
}
if (!vm_map_lookup_entry(map, s,
&first_entry)) {
assert(map != kernel_map);
entry = first_entry->vme_next;
s = entry->vme_start;
} else {
entry = first_entry;
SAVE_HINT_MAP_WRITE(map, entry->vme_prev);
}
last_timestamp = map->timestamp;
continue;
}
else {
return KERN_FAILURE;
}
}
entry->in_transition = TRUE;
tmp_entry = *entry;
assert(s == entry->vme_start);
vm_map_unlock(map);
if (tmp_entry.is_sub_map) {
vm_map_t sub_map;
vm_map_offset_t sub_start, sub_end;
pmap_t pmap;
vm_map_offset_t pmap_addr;
sub_map = tmp_entry.object.sub_map;
sub_start = tmp_entry.offset;
sub_end = sub_start + (tmp_entry.vme_end -
tmp_entry.vme_start);
if (tmp_entry.use_pmap) {
pmap = sub_map->pmap;
pmap_addr = tmp_entry.vme_start;
} else {
pmap = map->pmap;
pmap_addr = tmp_entry.vme_start;
}
(void) vm_map_unwire_nested(sub_map,
sub_start, sub_end,
user_wire,
pmap, pmap_addr);
} else {
vm_fault_unwire(map, &tmp_entry,
tmp_entry.object.vm_object == kernel_object,
map->pmap, tmp_entry.vme_start);
}
vm_map_lock(map);
if (last_timestamp+1 != map->timestamp) {
if (!vm_map_lookup_entry(map, s, &first_entry)){
assert((map != kernel_map) &&
(!entry->is_sub_map));
first_entry = first_entry->vme_next;
s = first_entry->vme_start;
} else {
SAVE_HINT_MAP_WRITE(map, entry->vme_prev);
}
} else {
SAVE_HINT_MAP_WRITE(map, entry->vme_prev);
first_entry = entry;
}
last_timestamp = map->timestamp;
entry = first_entry;
while ((entry != vm_map_to_entry(map)) &&
(entry->vme_start < tmp_entry.vme_end)) {
assert(entry->in_transition);
entry->in_transition = FALSE;
if (entry->needs_wakeup) {
entry->needs_wakeup = FALSE;
need_wakeup = TRUE;
}
entry = entry->vme_next;
}
entry = first_entry;
continue;
}
assert(entry->wired_count == 0);
assert(entry->user_wired_count == 0);
assert(s == entry->vme_start);
if (flags & VM_MAP_REMOVE_NO_PMAP_CLEANUP) {
} else if (entry->is_sub_map) {
if (entry->use_pmap) {
#ifndef NO_NESTED_PMAP
pmap_unnest(map->pmap,
(addr64_t)entry->vme_start,
entry->vme_end - entry->vme_start);
#endif
if ((map->mapped) && (map->ref_count)) {
vm_map_submap_pmap_clean(
map, entry->vme_start,
entry->vme_end,
entry->object.sub_map,
entry->offset);
}
} else {
vm_map_submap_pmap_clean(
map, entry->vme_start, entry->vme_end,
entry->object.sub_map,
entry->offset);
}
} else if (entry->object.vm_object != kernel_object) {
object = entry->object.vm_object;
if((map->mapped) && (map->ref_count)) {
vm_object_pmap_protect(
object, entry->offset,
entry->vme_end - entry->vme_start,
PMAP_NULL,
entry->vme_start,
VM_PROT_NONE);
} else {
pmap_remove(map->pmap,
(addr64_t)entry->vme_start,
(addr64_t)entry->vme_end);
}
}
assert(vm_map_pmap_is_empty(map,
entry->vme_start,
entry->vme_end));
next = entry->vme_next;
s = next->vme_start;
last_timestamp = map->timestamp;
if ((flags & VM_MAP_REMOVE_SAVE_ENTRIES) &&
zap_map != VM_MAP_NULL) {
vm_map_size_t entry_size;
vm_map_store_entry_unlink(map, entry);
vm_map_store_entry_link(zap_map,
vm_map_last_entry(zap_map),
entry);
entry_size = entry->vme_end - entry->vme_start;
map->size -= entry_size;
zap_map->size += entry_size;
last_timestamp--;
} else {
vm_map_entry_delete(map, entry);
vm_map_lock(map);
}
entry = next;
if(entry == vm_map_to_entry(map)) {
break;
}
if (last_timestamp+1 != map->timestamp) {
if (!vm_map_lookup_entry(map, s, &entry)){
entry = entry->vme_next;
s = entry->vme_start;
} else {
SAVE_HINT_MAP_WRITE(map, entry->vme_prev);
}
if(entry == vm_map_to_entry(map)) {
break;
}
}
last_timestamp = map->timestamp;
}
if (map->wait_for_space)
thread_wakeup((event_t) map);
if (need_wakeup)
vm_map_entry_wakeup(map);
return KERN_SUCCESS;
}
kern_return_t
vm_map_remove(
register vm_map_t map,
register vm_map_offset_t start,
register vm_map_offset_t end,
register boolean_t flags)
{
register kern_return_t result;
vm_map_lock(map);
VM_MAP_RANGE_CHECK(map, start, end);
result = vm_map_delete(map, start, end, flags, VM_MAP_NULL);
vm_map_unlock(map);
return(result);
}
void
vm_map_copy_discard(
vm_map_copy_t copy)
{
if (copy == VM_MAP_COPY_NULL)
return;
switch (copy->type) {
case VM_MAP_COPY_ENTRY_LIST:
while (vm_map_copy_first_entry(copy) !=
vm_map_copy_to_entry(copy)) {
vm_map_entry_t entry = vm_map_copy_first_entry(copy);
vm_map_copy_entry_unlink(copy, entry);
vm_object_deallocate(entry->object.vm_object);
vm_map_copy_entry_dispose(copy, entry);
}
break;
case VM_MAP_COPY_OBJECT:
vm_object_deallocate(copy->cpy_object);
break;
case VM_MAP_COPY_KERNEL_BUFFER:
kfree(copy, copy->cpy_kalloc_size);
return;
}
zfree(vm_map_copy_zone, copy);
}
vm_map_copy_t
vm_map_copy_copy(
vm_map_copy_t copy)
{
vm_map_copy_t new_copy;
if (copy == VM_MAP_COPY_NULL)
return VM_MAP_COPY_NULL;
new_copy = (vm_map_copy_t) zalloc(vm_map_copy_zone);
*new_copy = *copy;
if (copy->type == VM_MAP_COPY_ENTRY_LIST) {
vm_map_copy_first_entry(copy)->vme_prev
= vm_map_copy_to_entry(new_copy);
vm_map_copy_last_entry(copy)->vme_next
= vm_map_copy_to_entry(new_copy);
}
copy->type = VM_MAP_COPY_OBJECT;
copy->cpy_object = VM_OBJECT_NULL;
return new_copy;
}
static kern_return_t
vm_map_overwrite_submap_recurse(
vm_map_t dst_map,
vm_map_offset_t dst_addr,
vm_map_size_t dst_size)
{
vm_map_offset_t dst_end;
vm_map_entry_t tmp_entry;
vm_map_entry_t entry;
kern_return_t result;
boolean_t encountered_sub_map = FALSE;
dst_end = vm_map_round_page(dst_addr + dst_size);
vm_map_lock(dst_map);
start_pass_1:
if (!vm_map_lookup_entry(dst_map, dst_addr, &tmp_entry)) {
vm_map_unlock(dst_map);
return(KERN_INVALID_ADDRESS);
}
vm_map_clip_start(dst_map, tmp_entry, vm_map_trunc_page(dst_addr));
assert(!tmp_entry->use_pmap);
for (entry = tmp_entry;;) {
vm_map_entry_t next;
next = entry->vme_next;
while(entry->is_sub_map) {
vm_map_offset_t sub_start;
vm_map_offset_t sub_end;
vm_map_offset_t local_end;
if (entry->in_transition) {
entry->needs_wakeup = TRUE;
vm_map_entry_wait(dst_map, THREAD_UNINT);
goto start_pass_1;
}
encountered_sub_map = TRUE;
sub_start = entry->offset;
if(entry->vme_end < dst_end)
sub_end = entry->vme_end;
else
sub_end = dst_end;
sub_end -= entry->vme_start;
sub_end += entry->offset;
local_end = entry->vme_end;
vm_map_unlock(dst_map);
result = vm_map_overwrite_submap_recurse(
entry->object.sub_map,
sub_start,
sub_end - sub_start);
if(result != KERN_SUCCESS)
return result;
if (dst_end <= entry->vme_end)
return KERN_SUCCESS;
vm_map_lock(dst_map);
if(!vm_map_lookup_entry(dst_map, local_end,
&tmp_entry)) {
vm_map_unlock(dst_map);
return(KERN_INVALID_ADDRESS);
}
entry = tmp_entry;
next = entry->vme_next;
}
if ( ! (entry->protection & VM_PROT_WRITE)) {
vm_map_unlock(dst_map);
return(KERN_PROTECTION_FAILURE);
}
if (entry->in_transition) {
entry->needs_wakeup = TRUE;
vm_map_entry_wait(dst_map, THREAD_UNINT);
goto start_pass_1;
}
if (dst_end <= entry->vme_end) {
vm_map_unlock(dst_map);
return KERN_SUCCESS;
}
if ((next == vm_map_to_entry(dst_map)) ||
(next->vme_start != entry->vme_end)) {
vm_map_unlock(dst_map);
return(KERN_INVALID_ADDRESS);
}
if ((entry->object.vm_object != VM_OBJECT_NULL) &&
((!entry->object.vm_object->internal) ||
(entry->object.vm_object->true_share))) {
if(encountered_sub_map) {
vm_map_unlock(dst_map);
return(KERN_FAILURE);
}
}
entry = next;
}
vm_map_unlock(dst_map);
return(KERN_SUCCESS);
}
static kern_return_t
vm_map_copy_overwrite_nested(
vm_map_t dst_map,
vm_map_address_t dst_addr,
vm_map_copy_t copy,
boolean_t interruptible,
pmap_t pmap,
boolean_t discard_on_success)
{
vm_map_offset_t dst_end;
vm_map_entry_t tmp_entry;
vm_map_entry_t entry;
kern_return_t kr;
boolean_t aligned = TRUE;
boolean_t contains_permanent_objects = FALSE;
boolean_t encountered_sub_map = FALSE;
vm_map_offset_t base_addr;
vm_map_size_t copy_size;
vm_map_size_t total_size;
if (copy == VM_MAP_COPY_NULL)
return(KERN_SUCCESS);
if (copy->type == VM_MAP_COPY_KERNEL_BUFFER) {
return(vm_map_copyout_kernel_buffer(
dst_map, &dst_addr,
copy, TRUE));
}
assert(copy->type == VM_MAP_COPY_ENTRY_LIST);
if (copy->size == 0) {
if (discard_on_success)
vm_map_copy_discard(copy);
return(KERN_SUCCESS);
}
if (!page_aligned(copy->size) ||
!page_aligned (copy->offset) ||
!page_aligned (dst_addr))
{
aligned = FALSE;
dst_end = vm_map_round_page(dst_addr + copy->size);
} else {
dst_end = dst_addr + copy->size;
}
vm_map_lock(dst_map);
if (dst_addr >= dst_map->max_offset) {
vm_map_unlock(dst_map);
return(KERN_INVALID_ADDRESS);
}
start_pass_1:
if (!vm_map_lookup_entry(dst_map, dst_addr, &tmp_entry)) {
vm_map_unlock(dst_map);
return(KERN_INVALID_ADDRESS);
}
vm_map_clip_start(dst_map, tmp_entry, vm_map_trunc_page(dst_addr));
for (entry = tmp_entry;;) {
vm_map_entry_t next = entry->vme_next;
while(entry->is_sub_map) {
vm_map_offset_t sub_start;
vm_map_offset_t sub_end;
vm_map_offset_t local_end;
if (entry->in_transition) {
entry->needs_wakeup = TRUE;
vm_map_entry_wait(dst_map, THREAD_UNINT);
goto start_pass_1;
}
local_end = entry->vme_end;
if (!(entry->needs_copy)) {
encountered_sub_map = TRUE;
sub_start = entry->offset;
if(entry->vme_end < dst_end)
sub_end = entry->vme_end;
else
sub_end = dst_end;
sub_end -= entry->vme_start;
sub_end += entry->offset;
vm_map_unlock(dst_map);
kr = vm_map_overwrite_submap_recurse(
entry->object.sub_map,
sub_start,
sub_end - sub_start);
if(kr != KERN_SUCCESS)
return kr;
vm_map_lock(dst_map);
}
if (dst_end <= entry->vme_end)
goto start_overwrite;
if(!vm_map_lookup_entry(dst_map, local_end,
&entry)) {
vm_map_unlock(dst_map);
return(KERN_INVALID_ADDRESS);
}
next = entry->vme_next;
}
if ( ! (entry->protection & VM_PROT_WRITE)) {
vm_map_unlock(dst_map);
return(KERN_PROTECTION_FAILURE);
}
if (entry->in_transition) {
entry->needs_wakeup = TRUE;
vm_map_entry_wait(dst_map, THREAD_UNINT);
goto start_pass_1;
}
if (dst_end <= entry->vme_end)
break;
if ((next == vm_map_to_entry(dst_map)) ||
(next->vme_start != entry->vme_end)) {
vm_map_unlock(dst_map);
return(KERN_INVALID_ADDRESS);
}
if ((entry->object.vm_object != VM_OBJECT_NULL) &&
((!entry->object.vm_object->internal) ||
(entry->object.vm_object->true_share))) {
contains_permanent_objects = TRUE;
}
entry = next;
}
start_overwrite:
if (interruptible && contains_permanent_objects) {
vm_map_unlock(dst_map);
return(KERN_FAILURE);
}
total_size = copy->size;
if(encountered_sub_map) {
copy_size = 0;
if (!vm_map_lookup_entry( dst_map, dst_addr, &tmp_entry)) {
vm_map_unlock(dst_map);
return(KERN_INVALID_ADDRESS);
}
} else {
copy_size = copy->size;
}
base_addr = dst_addr;
while(TRUE) {
vm_map_entry_t copy_entry;
vm_map_entry_t previous_prev = VM_MAP_ENTRY_NULL;
vm_map_entry_t next_copy = VM_MAP_ENTRY_NULL;
int nentries;
int remaining_entries = 0;
vm_map_offset_t new_offset = 0;
for (entry = tmp_entry; copy_size == 0;) {
vm_map_entry_t next;
next = entry->vme_next;
if (entry->in_transition) {
entry->needs_wakeup = TRUE;
vm_map_entry_wait(dst_map, THREAD_UNINT);
if(!vm_map_lookup_entry(dst_map, base_addr,
&tmp_entry)) {
vm_map_unlock(dst_map);
return(KERN_INVALID_ADDRESS);
}
copy_size = 0;
entry = tmp_entry;
continue;
}
if(entry->is_sub_map) {
vm_map_offset_t sub_start;
vm_map_offset_t sub_end;
vm_map_offset_t local_end;
if (entry->needs_copy) {
if(entry->vme_end < dst_end)
sub_end = entry->vme_end;
else
sub_end = dst_end;
if(entry->vme_start < base_addr)
sub_start = base_addr;
else
sub_start = entry->vme_start;
vm_map_clip_end(
dst_map, entry, sub_end);
vm_map_clip_start(
dst_map, entry, sub_start);
assert(!entry->use_pmap);
entry->is_sub_map = FALSE;
vm_map_deallocate(
entry->object.sub_map);
entry->object.sub_map = NULL;
entry->is_shared = FALSE;
entry->needs_copy = FALSE;
entry->offset = 0;
entry->protection = VM_PROT_ALL;
entry->max_protection = VM_PROT_ALL;
entry->wired_count = 0;
entry->user_wired_count = 0;
if(entry->inheritance
== VM_INHERIT_SHARE)
entry->inheritance = VM_INHERIT_COPY;
continue;
}
if(base_addr < entry->vme_start) {
copy_size =
entry->vme_start - base_addr;
break;
}
sub_start = entry->offset;
if(entry->vme_end < dst_end)
sub_end = entry->vme_end;
else
sub_end = dst_end;
sub_end -= entry->vme_start;
sub_end += entry->offset;
local_end = entry->vme_end;
vm_map_unlock(dst_map);
copy_size = sub_end - sub_start;
if (total_size > copy_size) {
vm_map_size_t local_size = 0;
vm_map_size_t entry_size;
nentries = 1;
new_offset = copy->offset;
copy_entry = vm_map_copy_first_entry(copy);
while(copy_entry !=
vm_map_copy_to_entry(copy)){
entry_size = copy_entry->vme_end -
copy_entry->vme_start;
if((local_size < copy_size) &&
((local_size + entry_size)
>= copy_size)) {
vm_map_copy_clip_end(copy,
copy_entry,
copy_entry->vme_start +
(copy_size - local_size));
entry_size = copy_entry->vme_end -
copy_entry->vme_start;
local_size += entry_size;
new_offset += entry_size;
}
if(local_size >= copy_size) {
next_copy = copy_entry->vme_next;
copy_entry->vme_next =
vm_map_copy_to_entry(copy);
previous_prev =
copy->cpy_hdr.links.prev;
copy->cpy_hdr.links.prev = copy_entry;
copy->size = copy_size;
remaining_entries =
copy->cpy_hdr.nentries;
remaining_entries -= nentries;
copy->cpy_hdr.nentries = nentries;
break;
} else {
local_size += entry_size;
new_offset += entry_size;
nentries++;
}
copy_entry = copy_entry->vme_next;
}
}
if((entry->use_pmap) && (pmap == NULL)) {
kr = vm_map_copy_overwrite_nested(
entry->object.sub_map,
sub_start,
copy,
interruptible,
entry->object.sub_map->pmap,
TRUE);
} else if (pmap != NULL) {
kr = vm_map_copy_overwrite_nested(
entry->object.sub_map,
sub_start,
copy,
interruptible, pmap,
TRUE);
} else {
kr = vm_map_copy_overwrite_nested(
entry->object.sub_map,
sub_start,
copy,
interruptible,
dst_map->pmap,
TRUE);
}
if(kr != KERN_SUCCESS) {
if(next_copy != NULL) {
copy->cpy_hdr.nentries +=
remaining_entries;
copy->cpy_hdr.links.prev->vme_next =
next_copy;
copy->cpy_hdr.links.prev
= previous_prev;
copy->size = total_size;
}
return kr;
}
if (dst_end <= local_end) {
return(KERN_SUCCESS);
}
copy = (vm_map_copy_t)
zalloc(vm_map_copy_zone);
vm_map_copy_first_entry(copy) =
vm_map_copy_last_entry(copy) =
vm_map_copy_to_entry(copy);
copy->type = VM_MAP_COPY_ENTRY_LIST;
copy->offset = new_offset;
total_size -= copy_size;
copy_size = 0;
if(next_copy != NULL) {
copy->cpy_hdr.nentries = remaining_entries;
copy->cpy_hdr.links.next = next_copy;
copy->cpy_hdr.links.prev = previous_prev;
copy->size = total_size;
next_copy->vme_prev =
vm_map_copy_to_entry(copy);
next_copy = NULL;
}
base_addr = local_end;
vm_map_lock(dst_map);
if(!vm_map_lookup_entry(dst_map,
local_end, &tmp_entry)) {
vm_map_unlock(dst_map);
return(KERN_INVALID_ADDRESS);
}
entry = tmp_entry;
continue;
}
if (dst_end <= entry->vme_end) {
copy_size = dst_end - base_addr;
break;
}
if ((next == vm_map_to_entry(dst_map)) ||
(next->vme_start != entry->vme_end)) {
vm_map_unlock(dst_map);
return(KERN_INVALID_ADDRESS);
}
entry = next;
}
next_copy = NULL;
nentries = 1;
if (total_size > copy_size) {
vm_map_size_t local_size = 0;
vm_map_size_t entry_size;
new_offset = copy->offset;
copy_entry = vm_map_copy_first_entry(copy);
while(copy_entry != vm_map_copy_to_entry(copy)) {
entry_size = copy_entry->vme_end -
copy_entry->vme_start;
if((local_size < copy_size) &&
((local_size + entry_size)
>= copy_size)) {
vm_map_copy_clip_end(copy, copy_entry,
copy_entry->vme_start +
(copy_size - local_size));
entry_size = copy_entry->vme_end -
copy_entry->vme_start;
local_size += entry_size;
new_offset += entry_size;
}
if(local_size >= copy_size) {
next_copy = copy_entry->vme_next;
copy_entry->vme_next =
vm_map_copy_to_entry(copy);
previous_prev =
copy->cpy_hdr.links.prev;
copy->cpy_hdr.links.prev = copy_entry;
copy->size = copy_size;
remaining_entries =
copy->cpy_hdr.nentries;
remaining_entries -= nentries;
copy->cpy_hdr.nentries = nentries;
break;
} else {
local_size += entry_size;
new_offset += entry_size;
nentries++;
}
copy_entry = copy_entry->vme_next;
}
}
if (aligned) {
pmap_t local_pmap;
if(pmap)
local_pmap = pmap;
else
local_pmap = dst_map->pmap;
if ((kr = vm_map_copy_overwrite_aligned(
dst_map, tmp_entry, copy,
base_addr, local_pmap)) != KERN_SUCCESS) {
if(next_copy != NULL) {
copy->cpy_hdr.nentries +=
remaining_entries;
copy->cpy_hdr.links.prev->vme_next =
next_copy;
copy->cpy_hdr.links.prev =
previous_prev;
copy->size += copy_size;
}
return kr;
}
vm_map_unlock(dst_map);
} else {
if ((kr = vm_map_copy_overwrite_unaligned( dst_map,
tmp_entry, copy, base_addr)) != KERN_SUCCESS) {
if(next_copy != NULL) {
copy->cpy_hdr.nentries +=
remaining_entries;
copy->cpy_hdr.links.prev->vme_next =
next_copy;
copy->cpy_hdr.links.prev =
previous_prev;
copy->size += copy_size;
}
return kr;
}
}
total_size -= copy_size;
if(total_size == 0)
break;
base_addr += copy_size;
copy_size = 0;
copy->offset = new_offset;
if(next_copy != NULL) {
copy->cpy_hdr.nentries = remaining_entries;
copy->cpy_hdr.links.next = next_copy;
copy->cpy_hdr.links.prev = previous_prev;
next_copy->vme_prev = vm_map_copy_to_entry(copy);
copy->size = total_size;
}
vm_map_lock(dst_map);
while(TRUE) {
if (!vm_map_lookup_entry(dst_map,
base_addr, &tmp_entry)) {
vm_map_unlock(dst_map);
return(KERN_INVALID_ADDRESS);
}
if (tmp_entry->in_transition) {
entry->needs_wakeup = TRUE;
vm_map_entry_wait(dst_map, THREAD_UNINT);
} else {
break;
}
}
vm_map_clip_start(dst_map, tmp_entry, vm_map_trunc_page(base_addr));
entry = tmp_entry;
}
if (discard_on_success)
vm_map_copy_discard(copy);
return(KERN_SUCCESS);
}
kern_return_t
vm_map_copy_overwrite(
vm_map_t dst_map,
vm_map_offset_t dst_addr,
vm_map_copy_t copy,
boolean_t interruptible)
{
vm_map_size_t head_size, tail_size;
vm_map_copy_t head_copy, tail_copy;
vm_map_offset_t head_addr, tail_addr;
vm_map_entry_t entry;
kern_return_t kr;
head_size = 0;
tail_size = 0;
head_copy = NULL;
tail_copy = NULL;
head_addr = 0;
tail_addr = 0;
if (interruptible ||
copy == VM_MAP_COPY_NULL ||
copy->type != VM_MAP_COPY_ENTRY_LIST) {
blunt_copy:
return vm_map_copy_overwrite_nested(dst_map,
dst_addr,
copy,
interruptible,
(pmap_t) NULL,
TRUE);
}
if (copy->size < 3 * PAGE_SIZE) {
goto blunt_copy;
}
if ((dst_addr & PAGE_MASK) != (copy->offset & PAGE_MASK)) {
goto blunt_copy;
}
if (!page_aligned(dst_addr)) {
head_addr = dst_addr;
head_size = PAGE_SIZE - (copy->offset & PAGE_MASK);
}
if (!page_aligned(copy->offset + copy->size)) {
tail_size = (copy->offset + copy->size) & PAGE_MASK;
tail_addr = dst_addr + copy->size - tail_size;
}
if (head_size + tail_size == copy->size) {
goto blunt_copy;
}
vm_map_lock_read(dst_map);
if (! vm_map_lookup_entry(dst_map, dst_addr, &entry)) {
vm_map_unlock_read(dst_map);
goto blunt_copy;
}
for (;
(entry != vm_map_copy_to_entry(copy) &&
entry->vme_start < dst_addr + copy->size);
entry = entry->vme_next) {
if (entry->is_sub_map) {
vm_map_unlock_read(dst_map);
goto blunt_copy;
}
}
vm_map_unlock_read(dst_map);
if (head_size) {
head_copy = (vm_map_copy_t) zalloc(vm_map_copy_zone);
vm_map_copy_first_entry(head_copy) =
vm_map_copy_to_entry(head_copy);
vm_map_copy_last_entry(head_copy) =
vm_map_copy_to_entry(head_copy);
head_copy->type = VM_MAP_COPY_ENTRY_LIST;
head_copy->cpy_hdr.nentries = 0;
head_copy->cpy_hdr.entries_pageable =
copy->cpy_hdr.entries_pageable;
vm_map_store_init(&head_copy->cpy_hdr);
head_copy->offset = copy->offset;
head_copy->size = head_size;
copy->offset += head_size;
copy->size -= head_size;
entry = vm_map_copy_first_entry(copy);
vm_map_copy_clip_end(copy, entry, copy->offset);
vm_map_copy_entry_unlink(copy, entry);
vm_map_copy_entry_link(head_copy,
vm_map_copy_to_entry(head_copy),
entry);
kr = vm_map_copy_overwrite_nested(dst_map,
head_addr,
head_copy,
interruptible,
(pmap_t) NULL,
FALSE);
if (kr != KERN_SUCCESS)
goto done;
}
if (tail_size) {
tail_copy = (vm_map_copy_t) zalloc(vm_map_copy_zone);
vm_map_copy_first_entry(tail_copy) =
vm_map_copy_to_entry(tail_copy);
vm_map_copy_last_entry(tail_copy) =
vm_map_copy_to_entry(tail_copy);
tail_copy->type = VM_MAP_COPY_ENTRY_LIST;
tail_copy->cpy_hdr.nentries = 0;
tail_copy->cpy_hdr.entries_pageable =
copy->cpy_hdr.entries_pageable;
vm_map_store_init(&tail_copy->cpy_hdr);
tail_copy->offset = copy->offset + copy->size - tail_size;
tail_copy->size = tail_size;
copy->size -= tail_size;
entry = vm_map_copy_last_entry(copy);
vm_map_copy_clip_start(copy, entry, tail_copy->offset);
entry = vm_map_copy_last_entry(copy);
vm_map_copy_entry_unlink(copy, entry);
vm_map_copy_entry_link(tail_copy,
vm_map_copy_last_entry(tail_copy),
entry);
}
kr = vm_map_copy_overwrite_nested(dst_map,
dst_addr + head_size,
copy,
interruptible,
(pmap_t) NULL,
FALSE);
if (kr != KERN_SUCCESS) {
goto done;
}
if (tail_size) {
kr = vm_map_copy_overwrite_nested(dst_map,
tail_addr,
tail_copy,
interruptible,
(pmap_t) NULL,
FALSE);
}
done:
assert(copy->type == VM_MAP_COPY_ENTRY_LIST);
if (kr == KERN_SUCCESS) {
if (head_copy) {
vm_map_copy_discard(head_copy);
head_copy = NULL;
}
vm_map_copy_discard(copy);
if (tail_copy) {
vm_map_copy_discard(tail_copy);
tail_copy = NULL;
}
} else {
if (head_copy) {
entry = vm_map_copy_first_entry(head_copy);
vm_map_copy_entry_unlink(head_copy, entry);
vm_map_copy_entry_link(copy,
vm_map_copy_to_entry(copy),
entry);
copy->offset -= head_size;
copy->size += head_size;
vm_map_copy_discard(head_copy);
head_copy = NULL;
}
if (tail_copy) {
entry = vm_map_copy_last_entry(tail_copy);
vm_map_copy_entry_unlink(tail_copy, entry);
vm_map_copy_entry_link(copy,
vm_map_copy_last_entry(copy),
entry);
copy->size += tail_size;
vm_map_copy_discard(tail_copy);
tail_copy = NULL;
}
}
return kr;
}
static kern_return_t
vm_map_copy_overwrite_unaligned(
vm_map_t dst_map,
vm_map_entry_t entry,
vm_map_copy_t copy,
vm_map_offset_t start)
{
vm_map_entry_t copy_entry = vm_map_copy_first_entry(copy);
vm_map_version_t version;
vm_object_t dst_object;
vm_object_offset_t dst_offset;
vm_object_offset_t src_offset;
vm_object_offset_t entry_offset;
vm_map_offset_t entry_end;
vm_map_size_t src_size,
dst_size,
copy_size,
amount_left;
kern_return_t kr = KERN_SUCCESS;
vm_map_lock_write_to_read(dst_map);
src_offset = copy->offset - vm_object_trunc_page(copy->offset);
amount_left = copy->size;
while (amount_left > 0) {
if (entry == vm_map_to_entry(dst_map)) {
vm_map_unlock_read(dst_map);
return KERN_INVALID_ADDRESS;
}
assert ((start>=entry->vme_start) && (start<entry->vme_end));
dst_offset = start - entry->vme_start;
dst_size = entry->vme_end - start;
src_size = copy_entry->vme_end -
(copy_entry->vme_start + src_offset);
if (dst_size < src_size) {
copy_size = dst_size;
} else {
copy_size = src_size;
}
if (copy_size > amount_left) {
copy_size = amount_left;
}
if (entry->needs_copy &&
((entry->protection & VM_PROT_WRITE) != 0))
{
if (vm_map_lock_read_to_write(dst_map)) {
vm_map_lock_read(dst_map);
goto RetryLookup;
}
vm_object_shadow(&entry->object.vm_object,
&entry->offset,
(vm_map_size_t)(entry->vme_end
- entry->vme_start));
entry->needs_copy = FALSE;
vm_map_lock_write_to_read(dst_map);
}
dst_object = entry->object.vm_object;
if (dst_object == VM_OBJECT_NULL) {
if (vm_map_lock_read_to_write(dst_map)) {
vm_map_lock_read(dst_map);
goto RetryLookup;
}
dst_object = vm_object_allocate((vm_map_size_t)
entry->vme_end - entry->vme_start);
entry->object.vm_object = dst_object;
entry->offset = 0;
vm_map_lock_write_to_read(dst_map);
}
vm_object_reference(dst_object);
version.main_timestamp = dst_map->timestamp;
entry_offset = entry->offset;
entry_end = entry->vme_end;
vm_map_unlock_read(dst_map);
kr = vm_fault_copy(
copy_entry->object.vm_object,
copy_entry->offset + src_offset,
©_size,
dst_object,
entry_offset + dst_offset,
dst_map,
&version,
THREAD_UNINT );
start += copy_size;
src_offset += copy_size;
amount_left -= copy_size;
vm_object_deallocate(dst_object);
if (kr != KERN_SUCCESS)
return kr;
if ((copy_entry->vme_start + src_offset) == copy_entry->vme_end
|| amount_left == 0)
{
vm_map_copy_entry_unlink(copy, copy_entry);
vm_object_deallocate(copy_entry->object.vm_object);
vm_map_copy_entry_dispose(copy, copy_entry);
if ((copy_entry = vm_map_copy_first_entry(copy))
== vm_map_copy_to_entry(copy) && amount_left) {
return KERN_INVALID_ADDRESS;
}
src_offset = 0;
}
if (amount_left == 0)
return KERN_SUCCESS;
vm_map_lock_read(dst_map);
if (version.main_timestamp == dst_map->timestamp) {
if (start == entry_end) {
entry = entry->vme_next;
if (start != entry->vme_start) {
vm_map_unlock_read(dst_map);
return KERN_INVALID_ADDRESS ;
}
}
} else {
RetryLookup:
if (!vm_map_lookup_entry(dst_map, start, &entry))
{
vm_map_unlock_read(dst_map);
return KERN_INVALID_ADDRESS ;
}
}
}
return KERN_SUCCESS;
}
static kern_return_t
vm_map_copy_overwrite_aligned(
vm_map_t dst_map,
vm_map_entry_t tmp_entry,
vm_map_copy_t copy,
vm_map_offset_t start,
__unused pmap_t pmap)
{
vm_object_t object;
vm_map_entry_t copy_entry;
vm_map_size_t copy_size;
vm_map_size_t size;
vm_map_entry_t entry;
while ((copy_entry = vm_map_copy_first_entry(copy))
!= vm_map_copy_to_entry(copy))
{
copy_size = (copy_entry->vme_end - copy_entry->vme_start);
entry = tmp_entry;
assert(!entry->use_pmap);
if (entry == vm_map_to_entry(dst_map)) {
vm_map_unlock(dst_map);
return KERN_INVALID_ADDRESS;
}
size = (entry->vme_end - entry->vme_start);
if ((entry->vme_start != start) || ((entry->is_sub_map)
&& !entry->needs_copy)) {
vm_map_unlock(dst_map);
return(KERN_INVALID_ADDRESS);
}
assert(entry != vm_map_to_entry(dst_map));
if ( ! (entry->protection & VM_PROT_WRITE)) {
vm_map_unlock(dst_map);
return(KERN_PROTECTION_FAILURE);
}
if (copy_size < size) {
vm_map_clip_end(dst_map, entry, entry->vme_start + copy_size);
size = copy_size;
}
if (size < copy_size) {
vm_map_copy_clip_end(copy, copy_entry,
copy_entry->vme_start + size);
copy_size = size;
}
assert((entry->vme_end - entry->vme_start) == size);
assert((tmp_entry->vme_end - tmp_entry->vme_start) == size);
assert((copy_entry->vme_end - copy_entry->vme_start) == size);
object = entry->object.vm_object;
if ((!entry->is_shared &&
((object == VM_OBJECT_NULL) ||
(object->internal && !object->true_share))) ||
entry->needs_copy) {
vm_object_t old_object = entry->object.vm_object;
vm_object_offset_t old_offset = entry->offset;
vm_object_offset_t offset;
if (old_object == copy_entry->object.vm_object &&
old_offset == copy_entry->offset) {
vm_map_copy_entry_unlink(copy, copy_entry);
vm_map_copy_entry_dispose(copy, copy_entry);
if (old_object != VM_OBJECT_NULL)
vm_object_deallocate(old_object);
start = tmp_entry->vme_end;
tmp_entry = tmp_entry->vme_next;
continue;
}
if (old_object != VM_OBJECT_NULL) {
if(entry->is_sub_map) {
if(entry->use_pmap) {
#ifndef NO_NESTED_PMAP
pmap_unnest(dst_map->pmap,
(addr64_t)entry->vme_start,
entry->vme_end - entry->vme_start);
#endif
if(dst_map->mapped) {
vm_map_submap_pmap_clean(
dst_map, entry->vme_start,
entry->vme_end,
entry->object.sub_map,
entry->offset);
}
} else {
vm_map_submap_pmap_clean(
dst_map, entry->vme_start,
entry->vme_end,
entry->object.sub_map,
entry->offset);
}
vm_map_deallocate(
entry->object.sub_map);
} else {
if(dst_map->mapped) {
vm_object_pmap_protect(
entry->object.vm_object,
entry->offset,
entry->vme_end
- entry->vme_start,
PMAP_NULL,
entry->vme_start,
VM_PROT_NONE);
} else {
pmap_remove(dst_map->pmap,
(addr64_t)(entry->vme_start),
(addr64_t)(entry->vme_end));
}
vm_object_deallocate(old_object);
}
}
entry->is_sub_map = FALSE;
entry->object = copy_entry->object;
object = entry->object.vm_object;
entry->needs_copy = copy_entry->needs_copy;
entry->wired_count = 0;
entry->user_wired_count = 0;
offset = entry->offset = copy_entry->offset;
vm_map_copy_entry_unlink(copy, copy_entry);
vm_map_copy_entry_dispose(copy, copy_entry);
start = tmp_entry->vme_end;
tmp_entry = tmp_entry->vme_next;
} else {
vm_map_version_t version;
vm_object_t dst_object = entry->object.vm_object;
vm_object_offset_t dst_offset = entry->offset;
kern_return_t r;
vm_object_reference(dst_object);
version.main_timestamp = dst_map->timestamp + 1;
vm_map_unlock(dst_map);
copy_size = size;
r = vm_fault_copy(
copy_entry->object.vm_object,
copy_entry->offset,
©_size,
dst_object,
dst_offset,
dst_map,
&version,
THREAD_UNINT );
vm_object_deallocate(dst_object);
if (r != KERN_SUCCESS)
return(r);
if (copy_size != 0) {
vm_map_copy_clip_end(copy, copy_entry,
copy_entry->vme_start + copy_size);
vm_map_copy_entry_unlink(copy, copy_entry);
vm_object_deallocate(copy_entry->object.vm_object);
vm_map_copy_entry_dispose(copy, copy_entry);
}
start += copy_size;
vm_map_lock(dst_map);
if (version.main_timestamp == dst_map->timestamp) {
vm_map_clip_end(dst_map, tmp_entry, start);
tmp_entry = tmp_entry->vme_next;
} else {
if (!vm_map_lookup_entry(dst_map, start, &tmp_entry)) {
vm_map_unlock(dst_map);
return(KERN_INVALID_ADDRESS);
}
vm_map_clip_start(dst_map, tmp_entry, start);
}
}
}
return(KERN_SUCCESS);
}
static kern_return_t
vm_map_copyin_kernel_buffer(
vm_map_t src_map,
vm_map_offset_t src_addr,
vm_map_size_t len,
boolean_t src_destroy,
vm_map_copy_t *copy_result)
{
kern_return_t kr;
vm_map_copy_t copy;
vm_size_t kalloc_size;
if ((vm_size_t) len != len) {
return KERN_RESOURCE_SHORTAGE;
}
kalloc_size = (vm_size_t) (sizeof(struct vm_map_copy) + len);
assert((vm_map_size_t) kalloc_size == sizeof (struct vm_map_copy) + len);
copy = (vm_map_copy_t) kalloc(kalloc_size);
if (copy == VM_MAP_COPY_NULL) {
return KERN_RESOURCE_SHORTAGE;
}
copy->type = VM_MAP_COPY_KERNEL_BUFFER;
copy->size = len;
copy->offset = 0;
copy->cpy_kdata = (void *) (copy + 1);
copy->cpy_kalloc_size = kalloc_size;
kr = copyinmap(src_map, src_addr, copy->cpy_kdata, (vm_size_t) len);
if (kr != KERN_SUCCESS) {
kfree(copy, kalloc_size);
return kr;
}
if (src_destroy) {
(void) vm_map_remove(src_map, vm_map_trunc_page(src_addr),
vm_map_round_page(src_addr + len),
VM_MAP_REMOVE_INTERRUPTIBLE |
VM_MAP_REMOVE_WAIT_FOR_KWIRE |
(src_map == kernel_map) ?
VM_MAP_REMOVE_KUNWIRE : 0);
}
*copy_result = copy;
return KERN_SUCCESS;
}
static int vm_map_copyout_kernel_buffer_failures = 0;
static kern_return_t
vm_map_copyout_kernel_buffer(
vm_map_t map,
vm_map_address_t *addr,
vm_map_copy_t copy,
boolean_t overwrite)
{
kern_return_t kr = KERN_SUCCESS;
thread_t thread = current_thread();
if (!overwrite) {
*addr = 0;
kr = vm_map_enter(map,
addr,
vm_map_round_page(copy->size),
(vm_map_offset_t) 0,
VM_FLAGS_ANYWHERE,
VM_OBJECT_NULL,
(vm_object_offset_t) 0,
FALSE,
VM_PROT_DEFAULT,
VM_PROT_ALL,
VM_INHERIT_DEFAULT);
if (kr != KERN_SUCCESS)
return kr;
}
if (thread->map == map) {
assert((vm_size_t) copy->size == copy->size);
if (copyout(copy->cpy_kdata, *addr, (vm_size_t) copy->size)) {
kr = KERN_INVALID_ADDRESS;
}
}
else {
vm_map_t oldmap;
vm_map_reference(map);
oldmap = vm_map_switch(map);
assert((vm_size_t) copy->size == copy->size);
if (copyout(copy->cpy_kdata, *addr, (vm_size_t) copy->size)) {
vm_map_copyout_kernel_buffer_failures++;
kr = KERN_INVALID_ADDRESS;
}
(void) vm_map_switch(oldmap);
vm_map_deallocate(map);
}
if (kr != KERN_SUCCESS) {
if (!overwrite) {
(void) vm_map_remove(map,
vm_map_trunc_page(*addr),
vm_map_round_page(*addr +
vm_map_round_page(copy->size)),
VM_MAP_NO_FLAGS);
*addr = 0;
}
} else {
kfree(copy, copy->cpy_kalloc_size);
}
return kr;
}
#define vm_map_copy_insert(map, where, copy) \
MACRO_BEGIN \
vm_map_store_copy_insert(map, where, copy); \
zfree(vm_map_copy_zone, copy); \
MACRO_END
kern_return_t
vm_map_copyout(
vm_map_t dst_map,
vm_map_address_t *dst_addr,
vm_map_copy_t copy)
{
vm_map_size_t size;
vm_map_size_t adjustment;
vm_map_offset_t start;
vm_object_offset_t vm_copy_start;
vm_map_entry_t last;
register
vm_map_entry_t entry;
if (copy == VM_MAP_COPY_NULL) {
*dst_addr = 0;
return(KERN_SUCCESS);
}
if (copy->type == VM_MAP_COPY_OBJECT) {
vm_object_t object = copy->cpy_object;
kern_return_t kr;
vm_object_offset_t offset;
offset = vm_object_trunc_page(copy->offset);
size = vm_map_round_page(copy->size +
(vm_map_size_t)(copy->offset - offset));
*dst_addr = 0;
kr = vm_map_enter(dst_map, dst_addr, size,
(vm_map_offset_t) 0, VM_FLAGS_ANYWHERE,
object, offset, FALSE,
VM_PROT_DEFAULT, VM_PROT_ALL,
VM_INHERIT_DEFAULT);
if (kr != KERN_SUCCESS)
return(kr);
*dst_addr += (vm_map_offset_t)(copy->offset - offset);
zfree(vm_map_copy_zone, copy);
return(KERN_SUCCESS);
}
if (copy->type == VM_MAP_COPY_KERNEL_BUFFER) {
return(vm_map_copyout_kernel_buffer(dst_map, dst_addr,
copy, FALSE));
}
vm_copy_start = vm_object_trunc_page(copy->offset);
size = vm_map_round_page((vm_map_size_t)copy->offset + copy->size)
- vm_copy_start;
StartAgain: ;
vm_map_lock(dst_map);
if( dst_map->disable_vmentry_reuse == TRUE) {
VM_MAP_HIGHEST_ENTRY(dst_map, entry, start);
last = entry;
} else {
assert(first_free_is_valid(dst_map));
start = ((last = dst_map->first_free) == vm_map_to_entry(dst_map)) ?
vm_map_min(dst_map) : last->vme_end;
}
while (TRUE) {
vm_map_entry_t next = last->vme_next;
vm_map_offset_t end = start + size;
if ((end > dst_map->max_offset) || (end < start)) {
if (dst_map->wait_for_space) {
if (size <= (dst_map->max_offset - dst_map->min_offset)) {
assert_wait((event_t) dst_map,
THREAD_INTERRUPTIBLE);
vm_map_unlock(dst_map);
thread_block(THREAD_CONTINUE_NULL);
goto StartAgain;
}
}
vm_map_unlock(dst_map);
return(KERN_NO_SPACE);
}
if ((next == vm_map_to_entry(dst_map)) ||
(next->vme_start >= end))
break;
last = next;
start = last->vme_end;
}
if (copy->cpy_hdr.entries_pageable != dst_map->hdr.entries_pageable) {
zone_t old_zone;
vm_map_entry_t next, new;
old_zone = (copy->cpy_hdr.entries_pageable)
? vm_map_entry_zone
: vm_map_kentry_zone;
entry = vm_map_copy_first_entry(copy);
vm_map_store_copy_reset(copy, entry);
copy->cpy_hdr.entries_pageable = dst_map->hdr.entries_pageable;
while (entry != vm_map_copy_to_entry(copy)) {
new = vm_map_copy_entry_create(copy);
vm_map_entry_copy_full(new, entry);
new->use_pmap = FALSE;
vm_map_copy_entry_link(copy,
vm_map_copy_last_entry(copy),
new);
next = entry->vme_next;
zfree(old_zone, entry);
entry = next;
}
}
adjustment = start - vm_copy_start;
for (entry = vm_map_copy_first_entry(copy);
entry != vm_map_copy_to_entry(copy);
entry = entry->vme_next) {
entry->vme_start += adjustment;
entry->vme_end += adjustment;
entry->inheritance = VM_INHERIT_DEFAULT;
entry->protection = VM_PROT_DEFAULT;
entry->max_protection = VM_PROT_ALL;
entry->behavior = VM_BEHAVIOR_DEFAULT;
if (entry->wired_count != 0) {
register vm_map_offset_t va;
vm_object_offset_t offset;
register vm_object_t object;
vm_prot_t prot;
int type_of_fault;
object = entry->object.vm_object;
offset = entry->offset;
va = entry->vme_start;
pmap_pageable(dst_map->pmap,
entry->vme_start,
entry->vme_end,
TRUE);
while (va < entry->vme_end) {
register vm_page_t m;
vm_object_lock(object);
m = vm_page_lookup(object, offset);
if (m == VM_PAGE_NULL || !VM_PAGE_WIRED(m) ||
m->absent)
panic("vm_map_copyout: wiring %p", m);
ASSERT_PAGE_DECRYPTED(m);
prot = entry->protection;
if (override_nx(dst_map, entry->alias) && prot)
prot |= VM_PROT_EXECUTE;
type_of_fault = DBG_CACHE_HIT_FAULT;
vm_fault_enter(m, dst_map->pmap, va, prot, prot,
VM_PAGE_WIRED(m), FALSE, FALSE, FALSE,
&type_of_fault);
vm_object_unlock(object);
offset += PAGE_SIZE_64;
va += PAGE_SIZE;
}
}
}
*dst_addr = start + (copy->offset - vm_copy_start);
SAVE_HINT_MAP_WRITE(dst_map, vm_map_copy_last_entry(copy));
dst_map->size += size;
vm_map_copy_insert(dst_map, last, copy);
vm_map_unlock(dst_map);
return(KERN_SUCCESS);
}
#undef vm_map_copyin
kern_return_t
vm_map_copyin(
vm_map_t src_map,
vm_map_address_t src_addr,
vm_map_size_t len,
boolean_t src_destroy,
vm_map_copy_t *copy_result)
{
return(vm_map_copyin_common(src_map, src_addr, len, src_destroy,
FALSE, copy_result, FALSE));
}
typedef struct submap_map {
vm_map_t parent_map;
vm_map_offset_t base_start;
vm_map_offset_t base_end;
vm_map_size_t base_len;
struct submap_map *next;
} submap_map_t;
kern_return_t
vm_map_copyin_common(
vm_map_t src_map,
vm_map_address_t src_addr,
vm_map_size_t len,
boolean_t src_destroy,
__unused boolean_t src_volatile,
vm_map_copy_t *copy_result,
boolean_t use_maxprot)
{
vm_map_entry_t tmp_entry;
register
vm_map_entry_t new_entry = VM_MAP_ENTRY_NULL;
vm_map_offset_t src_start;
vm_map_offset_t src_end;
vm_map_offset_t src_base;
vm_map_t base_map = src_map;
boolean_t map_share=FALSE;
submap_map_t *parent_maps = NULL;
register
vm_map_copy_t copy;
vm_map_address_t copy_addr;
if (len == 0) {
*copy_result = VM_MAP_COPY_NULL;
return(KERN_SUCCESS);
}
src_end = src_addr + len;
if (src_end < src_addr)
return KERN_INVALID_ADDRESS;
if ((len < msg_ool_size_small) && !use_maxprot)
return vm_map_copyin_kernel_buffer(src_map, src_addr, len,
src_destroy, copy_result);
src_start = vm_map_trunc_page(src_addr);
src_end = vm_map_round_page(src_end);
XPR(XPR_VM_MAP, "vm_map_copyin_common map 0x%x addr 0x%x len 0x%x dest %d\n", src_map, src_addr, len, src_destroy, 0);
copy = (vm_map_copy_t) zalloc(vm_map_copy_zone);
vm_map_copy_first_entry(copy) =
vm_map_copy_last_entry(copy) = vm_map_copy_to_entry(copy);
copy->type = VM_MAP_COPY_ENTRY_LIST;
copy->cpy_hdr.nentries = 0;
copy->cpy_hdr.entries_pageable = TRUE;
vm_map_store_init( &(copy->cpy_hdr) );
copy->offset = src_addr;
copy->size = len;
new_entry = vm_map_copy_entry_create(copy);
#define RETURN(x) \
MACRO_BEGIN \
vm_map_unlock(src_map); \
if(src_map != base_map) \
vm_map_deallocate(src_map); \
if (new_entry != VM_MAP_ENTRY_NULL) \
vm_map_copy_entry_dispose(copy,new_entry); \
vm_map_copy_discard(copy); \
{ \
submap_map_t *_ptr; \
\
for(_ptr = parent_maps; _ptr != NULL; _ptr = parent_maps) { \
parent_maps=parent_maps->next; \
if (_ptr->parent_map != base_map) \
vm_map_deallocate(_ptr->parent_map); \
kfree(_ptr, sizeof(submap_map_t)); \
} \
} \
MACRO_RETURN(x); \
MACRO_END
vm_map_lock(src_map);
if (!vm_map_lookup_entry(src_map, src_start, &tmp_entry))
RETURN(KERN_INVALID_ADDRESS);
if(!tmp_entry->is_sub_map) {
vm_map_clip_start(src_map, tmp_entry, src_start);
}
copy_addr = src_start;
while (TRUE) {
register
vm_map_entry_t src_entry = tmp_entry;
vm_map_size_t src_size;
register
vm_object_t src_object;
vm_object_offset_t src_offset;
boolean_t src_needs_copy;
boolean_t new_entry_needs_copy;
boolean_t was_wired;
vm_map_version_t version;
kern_return_t result;
while(tmp_entry->is_sub_map) {
vm_map_size_t submap_len;
submap_map_t *ptr;
ptr = (submap_map_t *)kalloc(sizeof(submap_map_t));
ptr->next = parent_maps;
parent_maps = ptr;
ptr->parent_map = src_map;
ptr->base_start = src_start;
ptr->base_end = src_end;
submap_len = tmp_entry->vme_end - src_start;
if(submap_len > (src_end-src_start))
submap_len = src_end-src_start;
ptr->base_len = submap_len;
src_start -= tmp_entry->vme_start;
src_start += tmp_entry->offset;
src_end = src_start + submap_len;
src_map = tmp_entry->object.sub_map;
vm_map_lock(src_map);
vm_map_reference(src_map);
vm_map_unlock(ptr->parent_map);
if (!vm_map_lookup_entry(
src_map, src_start, &tmp_entry))
RETURN(KERN_INVALID_ADDRESS);
map_share = TRUE;
if(!tmp_entry->is_sub_map)
vm_map_clip_start(src_map, tmp_entry, src_start);
src_entry = tmp_entry;
}
if ((tmp_entry->object.vm_object != VM_OBJECT_NULL) &&
(tmp_entry->object.vm_object->phys_contiguous)) {
RETURN(KERN_PROTECTION_FAILURE);
}
if (new_entry == VM_MAP_ENTRY_NULL) {
version.main_timestamp = src_map->timestamp;
vm_map_unlock(src_map);
new_entry = vm_map_copy_entry_create(copy);
vm_map_lock(src_map);
if ((version.main_timestamp + 1) != src_map->timestamp) {
if (!vm_map_lookup_entry(src_map, src_start,
&tmp_entry)) {
RETURN(KERN_INVALID_ADDRESS);
}
if (!tmp_entry->is_sub_map)
vm_map_clip_start(src_map, tmp_entry, src_start);
continue;
}
}
if (((src_entry->protection & VM_PROT_READ) == VM_PROT_NONE &&
!use_maxprot) ||
(src_entry->max_protection & VM_PROT_READ) == 0)
RETURN(KERN_PROTECTION_FAILURE);
vm_map_clip_end(src_map, src_entry, src_end);
src_size = src_entry->vme_end - src_start;
src_object = src_entry->object.vm_object;
src_offset = src_entry->offset;
was_wired = (src_entry->wired_count != 0);
vm_map_entry_copy(new_entry, src_entry);
new_entry->use_pmap = FALSE;
if (src_destroy &&
(src_object == VM_OBJECT_NULL ||
(src_object->internal && !src_object->true_share
&& !map_share))) {
vm_object_reference(src_object);
goto CopySuccessful;
}
RestartCopy:
XPR(XPR_VM_MAP, "vm_map_copyin_common src_obj 0x%x ent 0x%x obj 0x%x was_wired %d\n",
src_object, new_entry, new_entry->object.vm_object,
was_wired, 0);
if ((src_object == VM_OBJECT_NULL ||
(!was_wired && !map_share && !tmp_entry->is_shared)) &&
vm_object_copy_quickly(
&new_entry->object.vm_object,
src_offset,
src_size,
&src_needs_copy,
&new_entry_needs_copy)) {
new_entry->needs_copy = new_entry_needs_copy;
if (src_needs_copy && !tmp_entry->needs_copy) {
vm_prot_t prot;
prot = src_entry->protection & ~VM_PROT_WRITE;
if (override_nx(src_map, src_entry->alias) && prot)
prot |= VM_PROT_EXECUTE;
vm_object_pmap_protect(
src_object,
src_offset,
src_size,
(src_entry->is_shared ?
PMAP_NULL
: src_map->pmap),
src_entry->vme_start,
prot);
tmp_entry->needs_copy = TRUE;
}
goto CopySuccessful;
}
assert(src_object != VM_OBJECT_NULL);
vm_object_reference(src_object);
version.main_timestamp = src_map->timestamp;
vm_map_unlock(src_map);
if (was_wired) {
CopySlowly:
vm_object_lock(src_object);
result = vm_object_copy_slowly(
src_object,
src_offset,
src_size,
THREAD_UNINT,
&new_entry->object.vm_object);
new_entry->offset = 0;
new_entry->needs_copy = FALSE;
}
else if (src_object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC &&
(tmp_entry->is_shared || map_share)) {
vm_object_t new_object;
vm_object_lock_shared(src_object);
new_object = vm_object_copy_delayed(
src_object,
src_offset,
src_size,
TRUE);
if (new_object == VM_OBJECT_NULL)
goto CopySlowly;
new_entry->object.vm_object = new_object;
new_entry->needs_copy = TRUE;
result = KERN_SUCCESS;
} else {
result = vm_object_copy_strategically(src_object,
src_offset,
src_size,
&new_entry->object.vm_object,
&new_entry->offset,
&new_entry_needs_copy);
new_entry->needs_copy = new_entry_needs_copy;
}
if (result != KERN_SUCCESS &&
result != KERN_MEMORY_RESTART_COPY) {
vm_map_lock(src_map);
RETURN(result);
}
vm_object_deallocate(src_object);
vm_map_lock(src_map);
if ((version.main_timestamp + 1) == src_map->timestamp)
goto VerificationSuccessful;
if (!vm_map_lookup_entry(src_map, src_start, &tmp_entry)) {
RETURN(KERN_INVALID_ADDRESS);
}
src_entry = tmp_entry;
vm_map_clip_start(src_map, src_entry, src_start);
if ((((src_entry->protection & VM_PROT_READ) == VM_PROT_NONE) &&
!use_maxprot) ||
((src_entry->max_protection & VM_PROT_READ) == 0))
goto VerificationFailed;
if (src_entry->vme_end < new_entry->vme_end)
src_size = (new_entry->vme_end = src_entry->vme_end) - src_start;
if ((src_entry->object.vm_object != src_object) ||
(src_entry->offset != src_offset) ) {
VerificationFailed: ;
vm_object_deallocate(new_entry->object.vm_object);
tmp_entry = src_entry;
continue;
}
VerificationSuccessful: ;
if (result == KERN_MEMORY_RESTART_COPY)
goto RestartCopy;
CopySuccessful: ;
vm_map_copy_entry_link(copy, vm_map_copy_last_entry(copy),
new_entry);
src_base = src_start;
src_start = new_entry->vme_end;
new_entry = VM_MAP_ENTRY_NULL;
while ((src_start >= src_end) && (src_end != 0)) {
if (src_map != base_map) {
submap_map_t *ptr;
ptr = parent_maps;
assert(ptr != NULL);
parent_maps = parent_maps->next;
vm_map_simplify_range(src_map,
src_base,
src_end);
vm_map_unlock(src_map);
vm_map_deallocate(src_map);
vm_map_lock(ptr->parent_map);
src_map = ptr->parent_map;
src_base = ptr->base_start;
src_start = ptr->base_start + ptr->base_len;
src_end = ptr->base_end;
if ((src_end > src_start) &&
!vm_map_lookup_entry(
src_map, src_start, &tmp_entry))
RETURN(KERN_INVALID_ADDRESS);
kfree(ptr, sizeof(submap_map_t));
if(parent_maps == NULL)
map_share = FALSE;
src_entry = tmp_entry->vme_prev;
} else
break;
}
if ((src_start >= src_end) && (src_end != 0))
break;
tmp_entry = src_entry->vme_next;
if ((tmp_entry->vme_start != src_start) ||
(tmp_entry == vm_map_to_entry(src_map)))
RETURN(KERN_INVALID_ADDRESS);
}
if (src_destroy) {
(void) vm_map_delete(src_map,
vm_map_trunc_page(src_addr),
src_end,
(src_map == kernel_map) ?
VM_MAP_REMOVE_KUNWIRE :
VM_MAP_NO_FLAGS,
VM_MAP_NULL);
} else {
vm_map_simplify_range(src_map,
vm_map_trunc_page(src_addr),
vm_map_round_page(src_end));
}
vm_map_unlock(src_map);
tmp_entry = vm_map_copy_first_entry(copy);
while (tmp_entry != vm_map_copy_to_entry(copy)) {
tmp_entry->vme_end = copy_addr +
(tmp_entry->vme_end - tmp_entry->vme_start);
tmp_entry->vme_start = copy_addr;
copy_addr += tmp_entry->vme_end - tmp_entry->vme_start;
tmp_entry = (struct vm_map_entry *)tmp_entry->vme_next;
}
*copy_result = copy;
return(KERN_SUCCESS);
#undef RETURN
}
kern_return_t
vm_map_copyin_object(
vm_object_t object,
vm_object_offset_t offset,
vm_object_size_t size,
vm_map_copy_t *copy_result)
{
vm_map_copy_t copy;
copy = (vm_map_copy_t) zalloc(vm_map_copy_zone);
copy->type = VM_MAP_COPY_OBJECT;
copy->cpy_object = object;
copy->offset = offset;
copy->size = size;
*copy_result = copy;
return(KERN_SUCCESS);
}
static void
vm_map_fork_share(
vm_map_t old_map,
vm_map_entry_t old_entry,
vm_map_t new_map)
{
vm_object_t object;
vm_map_entry_t new_entry;
object = old_entry->object.vm_object;
if (old_entry->is_sub_map) {
assert(old_entry->wired_count == 0);
#ifndef NO_NESTED_PMAP
if(old_entry->use_pmap) {
kern_return_t result;
result = pmap_nest(new_map->pmap,
(old_entry->object.sub_map)->pmap,
(addr64_t)old_entry->vme_start,
(addr64_t)old_entry->vme_start,
(uint64_t)(old_entry->vme_end - old_entry->vme_start));
if(result)
panic("vm_map_fork_share: pmap_nest failed!");
}
#endif
} else if (object == VM_OBJECT_NULL) {
object = vm_object_allocate((vm_map_size_t)(old_entry->vme_end -
old_entry->vme_start));
old_entry->offset = 0;
old_entry->object.vm_object = object;
assert(!old_entry->needs_copy);
} else if (object->copy_strategy !=
MEMORY_OBJECT_COPY_SYMMETRIC) {
assert(! old_entry->needs_copy);
}
else if (old_entry->needs_copy ||
object->shadowed ||
(!object->true_share &&
!old_entry->is_shared &&
(object->vo_size >
(vm_map_size_t)(old_entry->vme_end -
old_entry->vme_start)))) {
vm_object_shadow(&old_entry->object.vm_object,
&old_entry->offset,
(vm_map_size_t) (old_entry->vme_end -
old_entry->vme_start));
if (!old_entry->needs_copy &&
(old_entry->protection & VM_PROT_WRITE)) {
vm_prot_t prot;
prot = old_entry->protection & ~VM_PROT_WRITE;
if (override_nx(old_map, old_entry->alias) && prot)
prot |= VM_PROT_EXECUTE;
if (old_map->mapped) {
vm_object_pmap_protect(
old_entry->object.vm_object,
old_entry->offset,
(old_entry->vme_end -
old_entry->vme_start),
PMAP_NULL,
old_entry->vme_start,
prot);
} else {
pmap_protect(old_map->pmap,
old_entry->vme_start,
old_entry->vme_end,
prot);
}
}
old_entry->needs_copy = FALSE;
object = old_entry->object.vm_object;
}
if(old_entry->is_sub_map) {
vm_map_lock(old_entry->object.sub_map);
vm_map_reference(old_entry->object.sub_map);
vm_map_unlock(old_entry->object.sub_map);
} else {
vm_object_lock(object);
vm_object_reference_locked(object);
if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) {
object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
}
vm_object_unlock(object);
}
new_entry = vm_map_entry_create(new_map);
vm_map_entry_copy(new_entry, old_entry);
old_entry->is_shared = TRUE;
new_entry->is_shared = TRUE;
vm_map_store_entry_link(new_map, vm_map_last_entry(new_map), new_entry);
if (old_entry->is_sub_map) {
} else {
pmap_copy(new_map->pmap, old_map->pmap, new_entry->vme_start,
old_entry->vme_end - old_entry->vme_start,
old_entry->vme_start);
}
}
static boolean_t
vm_map_fork_copy(
vm_map_t old_map,
vm_map_entry_t *old_entry_p,
vm_map_t new_map)
{
vm_map_entry_t old_entry = *old_entry_p;
vm_map_size_t entry_size = old_entry->vme_end - old_entry->vme_start;
vm_map_offset_t start = old_entry->vme_start;
vm_map_copy_t copy;
vm_map_entry_t last = vm_map_last_entry(new_map);
vm_map_unlock(old_map);
if (vm_map_copyin_maxprot(old_map, start, entry_size, FALSE, ©)
!= KERN_SUCCESS) {
vm_map_lock(old_map);
if (!vm_map_lookup_entry(old_map, start, &last) ||
(last->max_protection & VM_PROT_READ) == VM_PROT_NONE) {
last = last->vme_next;
}
*old_entry_p = last;
return FALSE;
}
vm_map_copy_insert(new_map, last, copy);
vm_map_lock(old_map);
start += entry_size;
if (! vm_map_lookup_entry(old_map, start, &last)) {
last = last->vme_next;
} else {
if (last->vme_start == start) {
} else {
vm_map_clip_start(old_map, last, start);
}
}
*old_entry_p = last;
return TRUE;
}
vm_map_t
vm_map_fork(
vm_map_t old_map)
{
pmap_t new_pmap;
vm_map_t new_map;
vm_map_entry_t old_entry;
vm_map_size_t new_size = 0, entry_size;
vm_map_entry_t new_entry;
boolean_t src_needs_copy;
boolean_t new_entry_needs_copy;
new_pmap = pmap_create((vm_map_size_t) 0,
#if defined(__i386__) || defined(__x86_64__)
old_map->pmap->pm_task_map != TASK_MAP_32BIT
#else
0
#endif
);
#if defined(__i386__)
if (old_map->pmap->pm_task_map == TASK_MAP_64BIT_SHARED)
pmap_set_4GB_pagezero(new_pmap);
#endif
vm_map_reference_swap(old_map);
vm_map_lock(old_map);
new_map = vm_map_create(new_pmap,
old_map->min_offset,
old_map->max_offset,
old_map->hdr.entries_pageable);
for (
old_entry = vm_map_first_entry(old_map);
old_entry != vm_map_to_entry(old_map);
) {
entry_size = old_entry->vme_end - old_entry->vme_start;
switch (old_entry->inheritance) {
case VM_INHERIT_NONE:
break;
case VM_INHERIT_SHARE:
vm_map_fork_share(old_map, old_entry, new_map);
new_size += entry_size;
break;
case VM_INHERIT_COPY:
if(old_entry->is_sub_map)
break;
if ((old_entry->wired_count != 0) ||
((old_entry->object.vm_object != NULL) &&
(old_entry->object.vm_object->true_share))) {
goto slow_vm_map_fork_copy;
}
new_entry = vm_map_entry_create(new_map);
vm_map_entry_copy(new_entry, old_entry);
new_entry->use_pmap = FALSE;
if (! vm_object_copy_quickly(
&new_entry->object.vm_object,
old_entry->offset,
(old_entry->vme_end -
old_entry->vme_start),
&src_needs_copy,
&new_entry_needs_copy)) {
vm_map_entry_dispose(new_map, new_entry);
goto slow_vm_map_fork_copy;
}
if (src_needs_copy && !old_entry->needs_copy) {
vm_prot_t prot;
prot = old_entry->protection & ~VM_PROT_WRITE;
if (override_nx(old_map, old_entry->alias) && prot)
prot |= VM_PROT_EXECUTE;
vm_object_pmap_protect(
old_entry->object.vm_object,
old_entry->offset,
(old_entry->vme_end -
old_entry->vme_start),
((old_entry->is_shared
|| old_map->mapped)
? PMAP_NULL :
old_map->pmap),
old_entry->vme_start,
prot);
old_entry->needs_copy = TRUE;
}
new_entry->needs_copy = new_entry_needs_copy;
vm_map_store_entry_link(new_map, vm_map_last_entry(new_map),
new_entry);
new_size += entry_size;
break;
slow_vm_map_fork_copy:
if (vm_map_fork_copy(old_map, &old_entry, new_map)) {
new_size += entry_size;
}
continue;
}
old_entry = old_entry->vme_next;
}
new_map->size = new_size;
vm_map_unlock(old_map);
vm_map_deallocate(old_map);
return(new_map);
}
kern_return_t
vm_map_exec(
vm_map_t new_map,
task_t task,
void *fsroot,
cpu_type_t cpu)
{
SHARED_REGION_TRACE_DEBUG(
("shared_region: task %p: vm_map_exec(%p,%p,%p,0x%x): ->\n",
current_task(), new_map, task, fsroot, cpu));
(void) vm_commpage_enter(new_map, task);
(void) vm_shared_region_enter(new_map, task, fsroot, cpu);
SHARED_REGION_TRACE_DEBUG(
("shared_region: task %p: vm_map_exec(%p,%p,%p,0x%x): <-\n",
current_task(), new_map, task, fsroot, cpu));
return KERN_SUCCESS;
}
kern_return_t
vm_map_lookup_locked(
vm_map_t *var_map,
vm_map_offset_t vaddr,
vm_prot_t fault_type,
int object_lock_type,
vm_map_version_t *out_version,
vm_object_t *object,
vm_object_offset_t *offset,
vm_prot_t *out_prot,
boolean_t *wired,
vm_object_fault_info_t fault_info,
vm_map_t *real_map)
{
vm_map_entry_t entry;
register vm_map_t map = *var_map;
vm_map_t old_map = *var_map;
vm_map_t cow_sub_map_parent = VM_MAP_NULL;
vm_map_offset_t cow_parent_vaddr = 0;
vm_map_offset_t old_start = 0;
vm_map_offset_t old_end = 0;
register vm_prot_t prot;
boolean_t mask_protections;
vm_prot_t original_fault_type;
mask_protections = (fault_type & VM_PROT_IS_MASK) ? TRUE : FALSE;
fault_type &= ~VM_PROT_IS_MASK;
original_fault_type = fault_type;
*real_map = map;
RetryLookup:
fault_type = original_fault_type;
entry = map->hint;
if ((entry == vm_map_to_entry(map)) ||
(vaddr < entry->vme_start) || (vaddr >= entry->vme_end)) {
vm_map_entry_t tmp_entry;
if (!vm_map_lookup_entry(map, vaddr, &tmp_entry)) {
if((cow_sub_map_parent) && (cow_sub_map_parent != map))
vm_map_unlock(cow_sub_map_parent);
if((*real_map != map)
&& (*real_map != cow_sub_map_parent))
vm_map_unlock(*real_map);
return KERN_INVALID_ADDRESS;
}
entry = tmp_entry;
}
if(map == old_map) {
old_start = entry->vme_start;
old_end = entry->vme_end;
}
submap_recurse:
if (entry->is_sub_map) {
vm_map_offset_t local_vaddr;
vm_map_offset_t end_delta;
vm_map_offset_t start_delta;
vm_map_entry_t submap_entry;
boolean_t mapped_needs_copy=FALSE;
local_vaddr = vaddr;
if ((entry->use_pmap && !(fault_type & VM_PROT_WRITE))) {
if ((*real_map != map) &&
(*real_map != cow_sub_map_parent))
vm_map_unlock(*real_map);
*real_map = entry->object.sub_map;
}
if(entry->needs_copy && (fault_type & VM_PROT_WRITE)) {
if (!mapped_needs_copy) {
if (vm_map_lock_read_to_write(map)) {
vm_map_lock_read(map);
if(*real_map == entry->object.sub_map)
*real_map = map;
goto RetryLookup;
}
vm_map_lock_read(entry->object.sub_map);
cow_sub_map_parent = map;
old_start = entry->vme_start;
old_end = entry->vme_end;
cow_parent_vaddr = vaddr;
mapped_needs_copy = TRUE;
} else {
vm_map_lock_read(entry->object.sub_map);
if((cow_sub_map_parent != map) &&
(*real_map != map))
vm_map_unlock(map);
}
} else {
vm_map_lock_read(entry->object.sub_map);
if((*real_map != map) && (map != cow_sub_map_parent))
vm_map_unlock_read(map);
}
*var_map = map = entry->object.sub_map;
local_vaddr = (local_vaddr - entry->vme_start) + entry->offset;
RetrySubMap:
if(!vm_map_lookup_entry(map, local_vaddr, &submap_entry)) {
if((cow_sub_map_parent) && (cow_sub_map_parent != map)){
vm_map_unlock(cow_sub_map_parent);
}
if((*real_map != map)
&& (*real_map != cow_sub_map_parent)) {
vm_map_unlock(*real_map);
}
*real_map = map;
return KERN_INVALID_ADDRESS;
}
start_delta = submap_entry->vme_start > entry->offset ?
submap_entry->vme_start - entry->offset : 0;
end_delta =
(entry->offset + start_delta + (old_end - old_start)) <=
submap_entry->vme_end ?
0 : (entry->offset +
(old_end - old_start))
- submap_entry->vme_end;
old_start += start_delta;
old_end -= end_delta;
if(submap_entry->is_sub_map) {
entry = submap_entry;
vaddr = local_vaddr;
goto submap_recurse;
}
if(((fault_type & VM_PROT_WRITE) && cow_sub_map_parent)) {
vm_object_t sub_object, copy_object;
vm_object_offset_t copy_offset;
vm_map_offset_t local_start;
vm_map_offset_t local_end;
boolean_t copied_slowly = FALSE;
if (vm_map_lock_read_to_write(map)) {
vm_map_lock_read(map);
old_start -= start_delta;
old_end += end_delta;
goto RetrySubMap;
}
sub_object = submap_entry->object.vm_object;
if (sub_object == VM_OBJECT_NULL) {
sub_object =
vm_object_allocate(
(vm_map_size_t)
(submap_entry->vme_end -
submap_entry->vme_start));
submap_entry->object.vm_object = sub_object;
submap_entry->offset = 0;
}
local_start = local_vaddr -
(cow_parent_vaddr - old_start);
local_end = local_vaddr +
(old_end - cow_parent_vaddr);
vm_map_clip_start(map, submap_entry, local_start);
vm_map_clip_end(map, submap_entry, local_end);
assert(!submap_entry->use_pmap);
if(submap_entry->wired_count != 0 ||
(sub_object->copy_strategy ==
MEMORY_OBJECT_COPY_NONE)) {
vm_object_lock(sub_object);
vm_object_copy_slowly(sub_object,
submap_entry->offset,
(submap_entry->vme_end -
submap_entry->vme_start),
FALSE,
©_object);
copied_slowly = TRUE;
} else {
copy_object = sub_object;
vm_object_reference(copy_object);
sub_object->shadowed = TRUE;
submap_entry->needs_copy = TRUE;
prot = submap_entry->protection & ~VM_PROT_WRITE;
if (override_nx(map, submap_entry->alias) && prot)
prot |= VM_PROT_EXECUTE;
vm_object_pmap_protect(
sub_object,
submap_entry->offset,
submap_entry->vme_end -
submap_entry->vme_start,
(submap_entry->is_shared
|| map->mapped) ?
PMAP_NULL : map->pmap,
submap_entry->vme_start,
prot);
}
copy_offset = (local_vaddr -
submap_entry->vme_start +
submap_entry->offset);
vm_map_unlock(map);
local_start = old_start;
local_end = old_end;
map = cow_sub_map_parent;
*var_map = cow_sub_map_parent;
vaddr = cow_parent_vaddr;
cow_sub_map_parent = NULL;
if(!vm_map_lookup_entry(map,
vaddr, &entry)) {
vm_object_deallocate(
copy_object);
vm_map_lock_write_to_read(map);
return KERN_INVALID_ADDRESS;
}
local_start = vaddr & ~(pmap_nesting_size_min - 1);
local_end = local_start + pmap_nesting_size_min;
if (local_start < old_start) {
local_start = old_start;
}
if (local_end > old_end) {
local_end = old_end;
}
copy_offset -= (vaddr - local_start);
vm_map_clip_start(map, entry, local_start);
vm_map_clip_end(map, entry, local_end);
assert(!entry->use_pmap);
vm_map_deallocate(entry->object.sub_map);
entry->is_sub_map = FALSE;
entry->object.vm_object = copy_object;
entry->protection |= submap_entry->protection;
entry->max_protection |= submap_entry->max_protection;
if(copied_slowly) {
entry->offset = local_start - old_start;
entry->needs_copy = FALSE;
entry->is_shared = FALSE;
} else {
entry->offset = copy_offset;
entry->needs_copy = TRUE;
if(entry->inheritance == VM_INHERIT_SHARE)
entry->inheritance = VM_INHERIT_COPY;
if (map != old_map)
entry->is_shared = TRUE;
}
if(entry->inheritance == VM_INHERIT_SHARE)
entry->inheritance = VM_INHERIT_COPY;
vm_map_lock_write_to_read(map);
} else {
if((cow_sub_map_parent)
&& (cow_sub_map_parent != *real_map)
&& (cow_sub_map_parent != map)) {
vm_map_unlock(cow_sub_map_parent);
}
entry = submap_entry;
vaddr = local_vaddr;
}
}
prot = entry->protection;
if (override_nx(map, entry->alias) && prot) {
prot |= VM_PROT_EXECUTE;
}
if (mask_protections) {
fault_type &= prot;
if (fault_type == VM_PROT_NONE) {
goto protection_failure;
}
}
if ((fault_type & (prot)) != fault_type) {
protection_failure:
if (*real_map != map) {
vm_map_unlock(*real_map);
}
*real_map = map;
if ((fault_type & VM_PROT_EXECUTE) && prot)
log_stack_execution_failure((addr64_t)vaddr, prot);
DTRACE_VM2(prot_fault, int, 1, (uint64_t *), NULL);
return KERN_PROTECTION_FAILURE;
}
*wired = (entry->wired_count != 0);
if (*wired)
fault_type = prot;
if (entry->needs_copy) {
if ((fault_type & VM_PROT_WRITE) || *wired) {
if (vm_map_lock_read_to_write(map)) {
vm_map_lock_read(map);
goto RetryLookup;
}
vm_object_shadow(&entry->object.vm_object,
&entry->offset,
(vm_map_size_t) (entry->vme_end -
entry->vme_start));
entry->object.vm_object->shadowed = TRUE;
entry->needs_copy = FALSE;
vm_map_lock_write_to_read(map);
}
else {
prot &= (~VM_PROT_WRITE);
}
}
if (entry->object.vm_object == VM_OBJECT_NULL) {
if (vm_map_lock_read_to_write(map)) {
vm_map_lock_read(map);
goto RetryLookup;
}
entry->object.vm_object = vm_object_allocate(
(vm_map_size_t)(entry->vme_end - entry->vme_start));
entry->offset = 0;
vm_map_lock_write_to_read(map);
}
*offset = (vaddr - entry->vme_start) + entry->offset;
*object = entry->object.vm_object;
*out_prot = prot;
if (fault_info) {
fault_info->interruptible = THREAD_UNINT;
fault_info->cluster_size = 0;
fault_info->user_tag = entry->alias;
fault_info->behavior = entry->behavior;
fault_info->lo_offset = entry->offset;
fault_info->hi_offset = (entry->vme_end - entry->vme_start) + entry->offset;
fault_info->no_cache = entry->no_cache;
fault_info->stealth = FALSE;
fault_info->io_sync = FALSE;
fault_info->cs_bypass = (entry->used_for_jit)? TRUE : FALSE;
fault_info->mark_zf_absent = FALSE;
}
if (object_lock_type == OBJECT_LOCK_EXCLUSIVE)
vm_object_lock(*object);
else
vm_object_lock_shared(*object);
out_version->main_timestamp = map->timestamp;
return KERN_SUCCESS;
}
boolean_t
vm_map_verify(
register vm_map_t map,
register vm_map_version_t *version)
{
boolean_t result;
vm_map_lock_read(map);
result = (map->timestamp == version->main_timestamp);
if (!result)
vm_map_unlock_read(map);
return(result);
}
kern_return_t
vm_map_region_recurse_64(
vm_map_t map,
vm_map_offset_t *address,
vm_map_size_t *size,
natural_t *nesting_depth,
vm_region_submap_info_64_t submap_info,
mach_msg_type_number_t *count)
{
vm_region_extended_info_data_t extended;
vm_map_entry_t tmp_entry;
vm_map_offset_t user_address;
unsigned int user_max_depth;
vm_map_entry_t curr_entry;
vm_map_address_t curr_address;
vm_map_offset_t curr_offset;
vm_map_t curr_map;
unsigned int curr_depth;
vm_map_offset_t curr_max_below, curr_max_above;
vm_map_offset_t curr_skip;
vm_map_entry_t next_entry;
vm_map_offset_t next_offset;
vm_map_offset_t next_address;
vm_map_t next_map;
unsigned int next_depth;
vm_map_offset_t next_max_below, next_max_above;
vm_map_offset_t next_skip;
boolean_t look_for_pages;
vm_region_submap_short_info_64_t short_info;
if (map == VM_MAP_NULL) {
return KERN_INVALID_ARGUMENT;
}
if (*count < VM_REGION_SUBMAP_INFO_COUNT_64) {
if (*count < VM_REGION_SUBMAP_SHORT_INFO_COUNT_64) {
return KERN_INVALID_ARGUMENT;
} else {
look_for_pages = FALSE;
*count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
short_info = (vm_region_submap_short_info_64_t) submap_info;
submap_info = NULL;
}
} else {
look_for_pages = TRUE;
*count = VM_REGION_SUBMAP_INFO_COUNT_64;
short_info = NULL;
}
user_address = *address;
user_max_depth = *nesting_depth;
curr_entry = NULL;
curr_map = map;
curr_address = user_address;
curr_offset = 0;
curr_skip = 0;
curr_depth = 0;
curr_max_above = ((vm_map_offset_t) -1) - curr_address;
curr_max_below = curr_address;
next_entry = NULL;
next_map = NULL;
next_address = 0;
next_offset = 0;
next_skip = 0;
next_depth = 0;
next_max_above = (vm_map_offset_t) -1;
next_max_below = (vm_map_offset_t) -1;
if (not_in_kdp) {
vm_map_lock_read(curr_map);
}
for (;;) {
if (vm_map_lookup_entry(curr_map,
curr_address,
&tmp_entry)) {
curr_entry = tmp_entry;
} else {
vm_map_offset_t skip;
curr_entry = tmp_entry->vme_next;
if (curr_entry == vm_map_to_entry(curr_map) ||
(curr_entry->vme_start >=
curr_address + curr_max_above)) {
if (not_in_kdp) {
vm_map_unlock_read(curr_map);
}
curr_entry = NULL;
curr_map = NULL;
curr_offset = 0;
curr_depth = 0;
curr_max_above = 0;
curr_max_below = 0;
break;
}
skip = curr_entry->vme_start - curr_address;
curr_address = curr_entry->vme_start;
curr_skip = skip;
curr_offset += skip;
curr_max_above -= skip;
curr_max_below = 0;
}
tmp_entry = curr_entry->vme_next;
if (tmp_entry == vm_map_to_entry(curr_map)) {
} else if (tmp_entry->vme_start >=
curr_address + curr_max_above) {
} else if ((next_entry == NULL) ||
(tmp_entry->vme_start + curr_offset <=
next_entry->vme_start + next_offset)) {
if (next_entry != NULL) {
if (next_map != curr_map && not_in_kdp) {
vm_map_unlock_read(next_map);
}
}
next_entry = tmp_entry;
next_map = curr_map;
next_depth = curr_depth;
next_address = next_entry->vme_start;
next_skip = curr_skip;
next_offset = curr_offset;
next_offset += (next_address - curr_address);
next_max_above = MIN(next_max_above, curr_max_above);
next_max_above = MIN(next_max_above,
next_entry->vme_end - next_address);
next_max_below = MIN(next_max_below, curr_max_below);
next_max_below = MIN(next_max_below,
next_address - next_entry->vme_start);
}
curr_max_above = MIN(curr_max_above,
curr_entry->vme_end - curr_address);
curr_max_below = MIN(curr_max_below,
curr_address - curr_entry->vme_start);
if (!curr_entry->is_sub_map ||
curr_depth >= user_max_depth) {
break;
}
if (not_in_kdp) {
vm_map_lock_read(curr_entry->object.sub_map);
}
if (curr_map == next_map) {
} else {
if (not_in_kdp)
vm_map_unlock_read(curr_map);
}
curr_offset +=
(curr_entry->offset - curr_entry->vme_start);
curr_address = user_address + curr_offset;
curr_map = curr_entry->object.sub_map;
curr_depth++;
curr_entry = NULL;
}
if (curr_entry == NULL) {
if (next_entry == NULL) {
return KERN_INVALID_ADDRESS;
}
curr_entry = next_entry;
curr_map = next_map;
curr_address = next_address;
curr_skip = next_skip;
curr_offset = next_offset;
curr_depth = next_depth;
curr_max_above = next_max_above;
curr_max_below = next_max_below;
if (curr_map == map) {
user_address = curr_address;
}
} else {
if (next_entry != NULL) {
if (next_map != curr_map && not_in_kdp) {
vm_map_unlock_read(next_map);
}
}
}
next_entry = NULL;
next_map = NULL;
next_offset = 0;
next_skip = 0;
next_depth = 0;
next_max_below = -1;
next_max_above = -1;
*nesting_depth = curr_depth;
*size = curr_max_above + curr_max_below;
*address = user_address + curr_skip - curr_max_below;
#define INFO_MAKE_OBJECT_ID(p) ((uint32_t)(uintptr_t)p)
if (look_for_pages) {
submap_info->user_tag = curr_entry->alias;
submap_info->offset = curr_entry->offset;
submap_info->protection = curr_entry->protection;
submap_info->inheritance = curr_entry->inheritance;
submap_info->max_protection = curr_entry->max_protection;
submap_info->behavior = curr_entry->behavior;
submap_info->user_wired_count = curr_entry->user_wired_count;
submap_info->is_submap = curr_entry->is_sub_map;
submap_info->object_id = INFO_MAKE_OBJECT_ID(curr_entry->object.vm_object);
} else {
short_info->user_tag = curr_entry->alias;
short_info->offset = curr_entry->offset;
short_info->protection = curr_entry->protection;
short_info->inheritance = curr_entry->inheritance;
short_info->max_protection = curr_entry->max_protection;
short_info->behavior = curr_entry->behavior;
short_info->user_wired_count = curr_entry->user_wired_count;
short_info->is_submap = curr_entry->is_sub_map;
short_info->object_id = INFO_MAKE_OBJECT_ID(curr_entry->object.vm_object);
}
extended.pages_resident = 0;
extended.pages_swapped_out = 0;
extended.pages_shared_now_private = 0;
extended.pages_dirtied = 0;
extended.external_pager = 0;
extended.shadow_depth = 0;
if (not_in_kdp) {
if (!curr_entry->is_sub_map) {
vm_map_offset_t range_start, range_end;
range_start = MAX((curr_address - curr_max_below),
curr_entry->vme_start);
range_end = MIN((curr_address + curr_max_above),
curr_entry->vme_end);
vm_map_region_walk(curr_map,
range_start,
curr_entry,
(curr_entry->offset +
(range_start -
curr_entry->vme_start)),
range_end - range_start,
&extended,
look_for_pages);
if (extended.external_pager &&
extended.ref_count == 2 &&
extended.share_mode == SM_SHARED) {
extended.share_mode = SM_PRIVATE;
}
} else {
if (curr_entry->use_pmap) {
extended.share_mode = SM_TRUESHARED;
} else {
extended.share_mode = SM_PRIVATE;
}
extended.ref_count =
curr_entry->object.sub_map->ref_count;
}
}
if (look_for_pages) {
submap_info->pages_resident = extended.pages_resident;
submap_info->pages_swapped_out = extended.pages_swapped_out;
submap_info->pages_shared_now_private =
extended.pages_shared_now_private;
submap_info->pages_dirtied = extended.pages_dirtied;
submap_info->external_pager = extended.external_pager;
submap_info->shadow_depth = extended.shadow_depth;
submap_info->share_mode = extended.share_mode;
submap_info->ref_count = extended.ref_count;
} else {
short_info->external_pager = extended.external_pager;
short_info->shadow_depth = extended.shadow_depth;
short_info->share_mode = extended.share_mode;
short_info->ref_count = extended.ref_count;
}
if (not_in_kdp) {
vm_map_unlock_read(curr_map);
}
return KERN_SUCCESS;
}
kern_return_t
vm_map_region(
vm_map_t map,
vm_map_offset_t *address,
vm_map_size_t *size,
vm_region_flavor_t flavor,
vm_region_info_t info,
mach_msg_type_number_t *count,
mach_port_t *object_name)
{
vm_map_entry_t tmp_entry;
vm_map_entry_t entry;
vm_map_offset_t start;
if (map == VM_MAP_NULL)
return(KERN_INVALID_ARGUMENT);
switch (flavor) {
case VM_REGION_BASIC_INFO:
{
vm_region_basic_info_t basic;
if (*count < VM_REGION_BASIC_INFO_COUNT)
return(KERN_INVALID_ARGUMENT);
basic = (vm_region_basic_info_t) info;
*count = VM_REGION_BASIC_INFO_COUNT;
vm_map_lock_read(map);
start = *address;
if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) {
vm_map_unlock_read(map);
return(KERN_INVALID_ADDRESS);
}
} else {
entry = tmp_entry;
}
start = entry->vme_start;
basic->offset = (uint32_t)entry->offset;
basic->protection = entry->protection;
basic->inheritance = entry->inheritance;
basic->max_protection = entry->max_protection;
basic->behavior = entry->behavior;
basic->user_wired_count = entry->user_wired_count;
basic->reserved = entry->is_sub_map;
*address = start;
*size = (entry->vme_end - start);
if (object_name) *object_name = IP_NULL;
if (entry->is_sub_map) {
basic->shared = FALSE;
} else {
basic->shared = entry->is_shared;
}
vm_map_unlock_read(map);
return(KERN_SUCCESS);
}
case VM_REGION_BASIC_INFO_64:
{
vm_region_basic_info_64_t basic;
if (*count < VM_REGION_BASIC_INFO_COUNT_64)
return(KERN_INVALID_ARGUMENT);
basic = (vm_region_basic_info_64_t) info;
*count = VM_REGION_BASIC_INFO_COUNT_64;
vm_map_lock_read(map);
start = *address;
if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) {
vm_map_unlock_read(map);
return(KERN_INVALID_ADDRESS);
}
} else {
entry = tmp_entry;
}
start = entry->vme_start;
basic->offset = entry->offset;
basic->protection = entry->protection;
basic->inheritance = entry->inheritance;
basic->max_protection = entry->max_protection;
basic->behavior = entry->behavior;
basic->user_wired_count = entry->user_wired_count;
basic->reserved = entry->is_sub_map;
*address = start;
*size = (entry->vme_end - start);
if (object_name) *object_name = IP_NULL;
if (entry->is_sub_map) {
basic->shared = FALSE;
} else {
basic->shared = entry->is_shared;
}
vm_map_unlock_read(map);
return(KERN_SUCCESS);
}
case VM_REGION_EXTENDED_INFO:
{
vm_region_extended_info_t extended;
if (*count < VM_REGION_EXTENDED_INFO_COUNT)
return(KERN_INVALID_ARGUMENT);
extended = (vm_region_extended_info_t) info;
*count = VM_REGION_EXTENDED_INFO_COUNT;
vm_map_lock_read(map);
start = *address;
if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) {
vm_map_unlock_read(map);
return(KERN_INVALID_ADDRESS);
}
} else {
entry = tmp_entry;
}
start = entry->vme_start;
extended->protection = entry->protection;
extended->user_tag = entry->alias;
extended->pages_resident = 0;
extended->pages_swapped_out = 0;
extended->pages_shared_now_private = 0;
extended->pages_dirtied = 0;
extended->external_pager = 0;
extended->shadow_depth = 0;
vm_map_region_walk(map, start, entry, entry->offset, entry->vme_end - start, extended, TRUE);
if (extended->external_pager && extended->ref_count == 2 && extended->share_mode == SM_SHARED)
extended->share_mode = SM_PRIVATE;
if (object_name)
*object_name = IP_NULL;
*address = start;
*size = (entry->vme_end - start);
vm_map_unlock_read(map);
return(KERN_SUCCESS);
}
case VM_REGION_TOP_INFO:
{
vm_region_top_info_t top;
if (*count < VM_REGION_TOP_INFO_COUNT)
return(KERN_INVALID_ARGUMENT);
top = (vm_region_top_info_t) info;
*count = VM_REGION_TOP_INFO_COUNT;
vm_map_lock_read(map);
start = *address;
if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) {
vm_map_unlock_read(map);
return(KERN_INVALID_ADDRESS);
}
} else {
entry = tmp_entry;
}
start = entry->vme_start;
top->private_pages_resident = 0;
top->shared_pages_resident = 0;
vm_map_region_top_walk(entry, top);
if (object_name)
*object_name = IP_NULL;
*address = start;
*size = (entry->vme_end - start);
vm_map_unlock_read(map);
return(KERN_SUCCESS);
}
default:
return(KERN_INVALID_ARGUMENT);
}
}
#define OBJ_RESIDENT_COUNT(obj, entry_size) \
MIN((entry_size), \
((obj)->all_reusable ? \
(obj)->wired_page_count : \
(obj)->resident_page_count - (obj)->reusable_page_count))
void
vm_map_region_top_walk(
vm_map_entry_t entry,
vm_region_top_info_t top)
{
if (entry->object.vm_object == 0 || entry->is_sub_map) {
top->share_mode = SM_EMPTY;
top->ref_count = 0;
top->obj_id = 0;
return;
}
{
struct vm_object *obj, *tmp_obj;
int ref_count;
uint32_t entry_size;
entry_size = (uint32_t) ((entry->vme_end - entry->vme_start) / PAGE_SIZE_64);
obj = entry->object.vm_object;
vm_object_lock(obj);
if ((ref_count = obj->ref_count) > 1 && obj->paging_in_progress)
ref_count--;
assert(obj->reusable_page_count <= obj->resident_page_count);
if (obj->shadow) {
if (ref_count == 1)
top->private_pages_resident =
OBJ_RESIDENT_COUNT(obj, entry_size);
else
top->shared_pages_resident =
OBJ_RESIDENT_COUNT(obj, entry_size);
top->ref_count = ref_count;
top->share_mode = SM_COW;
while ((tmp_obj = obj->shadow)) {
vm_object_lock(tmp_obj);
vm_object_unlock(obj);
obj = tmp_obj;
if ((ref_count = obj->ref_count) > 1 && obj->paging_in_progress)
ref_count--;
assert(obj->reusable_page_count <= obj->resident_page_count);
top->shared_pages_resident +=
OBJ_RESIDENT_COUNT(obj, entry_size);
top->ref_count += ref_count - 1;
}
} else {
if (entry->superpage_size) {
top->share_mode = SM_LARGE_PAGE;
top->shared_pages_resident = 0;
top->private_pages_resident = entry_size;
} else if (entry->needs_copy) {
top->share_mode = SM_COW;
top->shared_pages_resident =
OBJ_RESIDENT_COUNT(obj, entry_size);
} else {
if (ref_count == 1 ||
(ref_count == 2 && !(obj->pager_trusted) && !(obj->internal))) {
top->share_mode = SM_PRIVATE;
top->private_pages_resident =
OBJ_RESIDENT_COUNT(obj,
entry_size);
} else {
top->share_mode = SM_SHARED;
top->shared_pages_resident =
OBJ_RESIDENT_COUNT(obj,
entry_size);
}
}
top->ref_count = ref_count;
}
top->obj_id = (unsigned int) (uintptr_t)obj;
vm_object_unlock(obj);
}
}
void
vm_map_region_walk(
vm_map_t map,
vm_map_offset_t va,
vm_map_entry_t entry,
vm_object_offset_t offset,
vm_object_size_t range,
vm_region_extended_info_t extended,
boolean_t look_for_pages)
{
register struct vm_object *obj, *tmp_obj;
register vm_map_offset_t last_offset;
register int i;
register int ref_count;
struct vm_object *shadow_object;
int shadow_depth;
if ((entry->object.vm_object == 0) ||
(entry->is_sub_map) ||
(entry->object.vm_object->phys_contiguous &&
!entry->superpage_size)) {
extended->share_mode = SM_EMPTY;
extended->ref_count = 0;
return;
}
if (entry->superpage_size) {
extended->shadow_depth = 0;
extended->share_mode = SM_LARGE_PAGE;
extended->ref_count = 1;
extended->external_pager = 0;
extended->pages_resident = (unsigned int)(range >> PAGE_SHIFT);
extended->shadow_depth = 0;
return;
}
{
obj = entry->object.vm_object;
vm_object_lock(obj);
if ((ref_count = obj->ref_count) > 1 && obj->paging_in_progress)
ref_count--;
if (look_for_pages) {
for (last_offset = offset + range;
offset < last_offset;
offset += PAGE_SIZE_64, va += PAGE_SIZE)
vm_map_region_look_for_page(map, va, obj,
offset, ref_count,
0, extended);
} else {
shadow_object = obj->shadow;
shadow_depth = 0;
if ( !(obj->pager_trusted) && !(obj->internal))
extended->external_pager = 1;
if (shadow_object != VM_OBJECT_NULL) {
vm_object_lock(shadow_object);
for (;
shadow_object != VM_OBJECT_NULL;
shadow_depth++) {
vm_object_t next_shadow;
if ( !(shadow_object->pager_trusted) &&
!(shadow_object->internal))
extended->external_pager = 1;
next_shadow = shadow_object->shadow;
if (next_shadow) {
vm_object_lock(next_shadow);
}
vm_object_unlock(shadow_object);
shadow_object = next_shadow;
}
}
extended->shadow_depth = shadow_depth;
}
if (extended->shadow_depth || entry->needs_copy)
extended->share_mode = SM_COW;
else {
if (ref_count == 1)
extended->share_mode = SM_PRIVATE;
else {
if (obj->true_share)
extended->share_mode = SM_TRUESHARED;
else
extended->share_mode = SM_SHARED;
}
}
extended->ref_count = ref_count - extended->shadow_depth;
for (i = 0; i < extended->shadow_depth; i++) {
if ((tmp_obj = obj->shadow) == 0)
break;
vm_object_lock(tmp_obj);
vm_object_unlock(obj);
if ((ref_count = tmp_obj->ref_count) > 1 && tmp_obj->paging_in_progress)
ref_count--;
extended->ref_count += ref_count;
obj = tmp_obj;
}
vm_object_unlock(obj);
if (extended->share_mode == SM_SHARED) {
register vm_map_entry_t cur;
register vm_map_entry_t last;
int my_refs;
obj = entry->object.vm_object;
last = vm_map_to_entry(map);
my_refs = 0;
if ((ref_count = obj->ref_count) > 1 && obj->paging_in_progress)
ref_count--;
for (cur = vm_map_first_entry(map); cur != last; cur = cur->vme_next)
my_refs += vm_map_region_count_obj_refs(cur, obj);
if (my_refs == ref_count)
extended->share_mode = SM_PRIVATE_ALIASED;
else if (my_refs > 1)
extended->share_mode = SM_SHARED_ALIASED;
}
}
}
static void
vm_map_region_look_for_page(
__unused vm_map_t map,
__unused vm_map_offset_t va,
vm_object_t object,
vm_object_offset_t offset,
int max_refcnt,
int depth,
vm_region_extended_info_t extended)
{
register vm_page_t p;
register vm_object_t shadow;
register int ref_count;
vm_object_t caller_object;
#if MACH_PAGEMAP
kern_return_t kr;
#endif
shadow = object->shadow;
caller_object = object;
while (TRUE) {
if ( !(object->pager_trusted) && !(object->internal))
extended->external_pager = 1;
if ((p = vm_page_lookup(object, offset)) != VM_PAGE_NULL) {
if (shadow && (max_refcnt == 1))
extended->pages_shared_now_private++;
if (!p->fictitious &&
(p->dirty || pmap_is_modified(p->phys_page)))
extended->pages_dirtied++;
extended->pages_resident++;
if(object != caller_object)
vm_object_unlock(object);
return;
}
#if MACH_PAGEMAP
if (object->existence_map) {
if (vm_external_state_get(object->existence_map, offset) == VM_EXTERNAL_STATE_EXISTS) {
extended->pages_swapped_out++;
if(object != caller_object)
vm_object_unlock(object);
return;
}
} else if (object->internal &&
object->alive &&
!object->terminating &&
object->pager_ready) {
memory_object_t pager;
vm_object_paging_begin(object);
pager = object->pager;
vm_object_unlock(object);
kr = memory_object_data_request(
pager,
offset + object->paging_offset,
0,
VM_PROT_READ,
NULL);
vm_object_lock(object);
vm_object_paging_end(object);
if (kr == KERN_SUCCESS) {
extended->pages_swapped_out++;
if (object != caller_object)
vm_object_unlock(object);
return;
}
}
#endif
if (shadow) {
vm_object_lock(shadow);
if ((ref_count = shadow->ref_count) > 1 && shadow->paging_in_progress)
ref_count--;
if (++depth > extended->shadow_depth)
extended->shadow_depth = depth;
if (ref_count > max_refcnt)
max_refcnt = ref_count;
if(object != caller_object)
vm_object_unlock(object);
offset = offset + object->vo_shadow_offset;
object = shadow;
shadow = object->shadow;
continue;
}
if(object != caller_object)
vm_object_unlock(object);
break;
}
}
static int
vm_map_region_count_obj_refs(
vm_map_entry_t entry,
vm_object_t object)
{
register int ref_count;
register vm_object_t chk_obj;
register vm_object_t tmp_obj;
if (entry->object.vm_object == 0)
return(0);
if (entry->is_sub_map)
return(0);
else {
ref_count = 0;
chk_obj = entry->object.vm_object;
vm_object_lock(chk_obj);
while (chk_obj) {
if (chk_obj == object)
ref_count++;
tmp_obj = chk_obj->shadow;
if (tmp_obj)
vm_object_lock(tmp_obj);
vm_object_unlock(chk_obj);
chk_obj = tmp_obj;
}
}
return(ref_count);
}
void
vm_map_simplify_entry(
vm_map_t map,
vm_map_entry_t this_entry)
{
vm_map_entry_t prev_entry;
counter(c_vm_map_simplify_entry_called++);
prev_entry = this_entry->vme_prev;
if ((this_entry != vm_map_to_entry(map)) &&
(prev_entry != vm_map_to_entry(map)) &&
(prev_entry->vme_end == this_entry->vme_start) &&
(prev_entry->is_sub_map == this_entry->is_sub_map) &&
(prev_entry->object.vm_object == this_entry->object.vm_object) &&
((prev_entry->offset + (prev_entry->vme_end -
prev_entry->vme_start))
== this_entry->offset) &&
(prev_entry->inheritance == this_entry->inheritance) &&
(prev_entry->protection == this_entry->protection) &&
(prev_entry->max_protection == this_entry->max_protection) &&
(prev_entry->behavior == this_entry->behavior) &&
(prev_entry->alias == this_entry->alias) &&
(prev_entry->zero_wired_pages == this_entry->zero_wired_pages) &&
(prev_entry->no_cache == this_entry->no_cache) &&
(prev_entry->wired_count == this_entry->wired_count) &&
(prev_entry->user_wired_count == this_entry->user_wired_count) &&
(prev_entry->needs_copy == this_entry->needs_copy) &&
(prev_entry->permanent == this_entry->permanent) &&
(prev_entry->use_pmap == FALSE) &&
(this_entry->use_pmap == FALSE) &&
(prev_entry->in_transition == FALSE) &&
(this_entry->in_transition == FALSE) &&
(prev_entry->needs_wakeup == FALSE) &&
(this_entry->needs_wakeup == FALSE) &&
(prev_entry->is_shared == FALSE) &&
(this_entry->is_shared == FALSE)
) {
_vm_map_store_entry_unlink(&map->hdr, prev_entry);
this_entry->vme_start = prev_entry->vme_start;
this_entry->offset = prev_entry->offset;
if (prev_entry->is_sub_map) {
vm_map_deallocate(prev_entry->object.sub_map);
} else {
vm_object_deallocate(prev_entry->object.vm_object);
}
vm_map_entry_dispose(map, prev_entry);
SAVE_HINT_MAP_WRITE(map, this_entry);
counter(c_vm_map_simplified++);
}
}
void
vm_map_simplify(
vm_map_t map,
vm_map_offset_t start)
{
vm_map_entry_t this_entry;
vm_map_lock(map);
if (vm_map_lookup_entry(map, start, &this_entry)) {
vm_map_simplify_entry(map, this_entry);
vm_map_simplify_entry(map, this_entry->vme_next);
}
counter(c_vm_map_simplify_called++);
vm_map_unlock(map);
}
static void
vm_map_simplify_range(
vm_map_t map,
vm_map_offset_t start,
vm_map_offset_t end)
{
vm_map_entry_t entry;
if (start >= end) {
return;
}
start = vm_map_trunc_page(start);
end = vm_map_round_page(end);
if (!vm_map_lookup_entry(map, start, &entry)) {
if (entry == vm_map_to_entry(map)) {
entry = vm_map_first_entry(map);
} else {
entry = entry->vme_next;
}
}
while (entry != vm_map_to_entry(map) &&
entry->vme_start <= end) {
vm_map_simplify_entry(map, entry);
entry = entry->vme_next;
}
}
kern_return_t
vm_map_machine_attribute(
vm_map_t map,
vm_map_offset_t start,
vm_map_offset_t end,
vm_machine_attribute_t attribute,
vm_machine_attribute_val_t* value)
{
kern_return_t ret;
vm_map_size_t sync_size;
vm_map_entry_t entry;
if (start < vm_map_min(map) || end > vm_map_max(map))
return KERN_INVALID_ADDRESS;
sync_size = end - start;
vm_map_lock(map);
if (attribute != MATTR_CACHE) {
ret = pmap_attribute(map->pmap, start, end-start,
attribute, value);
vm_map_unlock(map);
return ret;
}
ret = KERN_SUCCESS;
while(sync_size) {
if (vm_map_lookup_entry(map, start, &entry)) {
vm_map_size_t sub_size;
if((entry->vme_end - start) > sync_size) {
sub_size = sync_size;
sync_size = 0;
} else {
sub_size = entry->vme_end - start;
sync_size -= sub_size;
}
if(entry->is_sub_map) {
vm_map_offset_t sub_start;
vm_map_offset_t sub_end;
sub_start = (start - entry->vme_start)
+ entry->offset;
sub_end = sub_start + sub_size;
vm_map_machine_attribute(
entry->object.sub_map,
sub_start,
sub_end,
attribute, value);
} else {
if(entry->object.vm_object) {
vm_page_t m;
vm_object_t object;
vm_object_t base_object;
vm_object_t last_object;
vm_object_offset_t offset;
vm_object_offset_t base_offset;
vm_map_size_t range;
range = sub_size;
offset = (start - entry->vme_start)
+ entry->offset;
base_offset = offset;
object = entry->object.vm_object;
base_object = object;
last_object = NULL;
vm_object_lock(object);
while (range) {
m = vm_page_lookup(
object, offset);
if (m && !m->fictitious) {
ret =
pmap_attribute_cache_sync(
m->phys_page,
PAGE_SIZE,
attribute, value);
} else if (object->shadow) {
offset = offset + object->vo_shadow_offset;
last_object = object;
object = object->shadow;
vm_object_lock(last_object->shadow);
vm_object_unlock(last_object);
continue;
}
range -= PAGE_SIZE;
if (base_object != object) {
vm_object_unlock(object);
vm_object_lock(base_object);
object = base_object;
}
base_offset += PAGE_SIZE;
offset = base_offset;
}
vm_object_unlock(object);
}
}
start += sub_size;
} else {
vm_map_unlock(map);
return KERN_FAILURE;
}
}
vm_map_unlock(map);
return ret;
}
kern_return_t
vm_map_behavior_set(
vm_map_t map,
vm_map_offset_t start,
vm_map_offset_t end,
vm_behavior_t new_behavior)
{
register vm_map_entry_t entry;
vm_map_entry_t temp_entry;
XPR(XPR_VM_MAP,
"vm_map_behavior_set, 0x%X start 0x%X end 0x%X behavior %d",
map, start, end, new_behavior, 0);
if (start > end ||
start < vm_map_min(map) ||
end > vm_map_max(map)) {
return KERN_NO_SPACE;
}
switch (new_behavior) {
case VM_BEHAVIOR_DEFAULT:
case VM_BEHAVIOR_RANDOM:
case VM_BEHAVIOR_SEQUENTIAL:
case VM_BEHAVIOR_RSEQNTL:
case VM_BEHAVIOR_ZERO_WIRED_PAGES:
vm_map_lock(map);
if (vm_map_range_check(map, start, end, &temp_entry)) {
entry = temp_entry;
vm_map_clip_start(map, entry, start);
}
else {
vm_map_unlock(map);
return(KERN_INVALID_ADDRESS);
}
while ((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) {
vm_map_clip_end(map, entry, end);
assert(!entry->use_pmap);
if( new_behavior == VM_BEHAVIOR_ZERO_WIRED_PAGES ) {
entry->zero_wired_pages = TRUE;
} else {
entry->behavior = new_behavior;
}
entry = entry->vme_next;
}
vm_map_unlock(map);
break;
case VM_BEHAVIOR_WILLNEED:
return vm_map_willneed(map, start, end);
case VM_BEHAVIOR_DONTNEED:
return vm_map_msync(map, start, end - start, VM_SYNC_DEACTIVATE | VM_SYNC_CONTIGUOUS);
case VM_BEHAVIOR_FREE:
return vm_map_msync(map, start, end - start, VM_SYNC_KILLPAGES | VM_SYNC_CONTIGUOUS);
case VM_BEHAVIOR_REUSABLE:
return vm_map_reusable_pages(map, start, end);
case VM_BEHAVIOR_REUSE:
return vm_map_reuse_pages(map, start, end);
case VM_BEHAVIOR_CAN_REUSE:
return vm_map_can_reuse(map, start, end);
default:
return(KERN_INVALID_ARGUMENT);
}
return(KERN_SUCCESS);
}
static kern_return_t
vm_map_willneed(
vm_map_t map,
vm_map_offset_t start,
vm_map_offset_t end
)
{
vm_map_entry_t entry;
vm_object_t object;
memory_object_t pager;
struct vm_object_fault_info fault_info;
kern_return_t kr;
vm_object_size_t len;
vm_object_offset_t offset;
fault_info.interruptible = THREAD_UNINT;
fault_info.behavior = VM_BEHAVIOR_SEQUENTIAL;
fault_info.no_cache = FALSE;
fault_info.stealth = TRUE;
fault_info.io_sync = FALSE;
fault_info.cs_bypass = FALSE;
fault_info.mark_zf_absent = FALSE;
vm_map_lock_read(map);
if (! vm_map_range_check(map, start, end, &entry)) {
vm_map_unlock_read(map);
return KERN_INVALID_ADDRESS;
}
for (; entry != vm_map_to_entry(map) && start < end; ) {
offset = (start - entry->vme_start) + entry->offset;
len = MIN(entry->vme_end - start, end - start);
if ((vm_size_t) len != len) {
len = (vm_size_t) (0 - PAGE_SIZE);
}
fault_info.cluster_size = (vm_size_t) len;
fault_info.lo_offset = offset;
fault_info.hi_offset = offset + len;
fault_info.user_tag = entry->alias;
if ((entry->protection & VM_PROT_READ) == 0) {
entry = entry->vme_next;
start = entry->vme_start;
continue;
}
if ((object = find_vnode_object(entry)) == VM_OBJECT_NULL) {
entry = entry->vme_next;
start = entry->vme_start;
continue;
}
vm_map_unlock_read(map);
vm_object_paging_begin(object);
pager = object->pager;
vm_object_unlock(object);
kr = memory_object_data_request(
pager,
offset + object->paging_offset,
0,
VM_PROT_READ,
(memory_object_fault_info_t)&fault_info);
vm_object_lock(object);
vm_object_paging_end(object);
vm_object_unlock(object);
if (kr != KERN_SUCCESS) {
return KERN_SUCCESS;
}
start += len;
if (start >= end) {
return KERN_SUCCESS;
}
vm_map_lock_read(map);
if (! vm_map_lookup_entry(map, start, &entry)) {
vm_map_unlock_read(map);
return KERN_INVALID_ADDRESS;
}
}
vm_map_unlock_read(map);
return KERN_SUCCESS;
}
static boolean_t
vm_map_entry_is_reusable(
vm_map_entry_t entry)
{
vm_object_t object;
if (entry->is_shared ||
entry->is_sub_map ||
entry->in_transition ||
entry->protection != VM_PROT_DEFAULT ||
entry->max_protection != VM_PROT_ALL ||
entry->inheritance != VM_INHERIT_DEFAULT ||
entry->no_cache ||
entry->permanent ||
entry->superpage_size != 0 ||
entry->zero_wired_pages ||
entry->wired_count != 0 ||
entry->user_wired_count != 0) {
return FALSE;
}
object = entry->object.vm_object;
if (object == VM_OBJECT_NULL) {
return TRUE;
}
if (object->ref_count == 1 &&
object->wired_page_count == 0 &&
object->copy == VM_OBJECT_NULL &&
object->shadow == VM_OBJECT_NULL &&
object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC &&
object->internal &&
!object->true_share &&
object->wimg_bits == VM_WIMG_USE_DEFAULT &&
!object->code_signed) {
return TRUE;
}
return FALSE;
}
static kern_return_t
vm_map_reuse_pages(
vm_map_t map,
vm_map_offset_t start,
vm_map_offset_t end)
{
vm_map_entry_t entry;
vm_object_t object;
vm_object_offset_t start_offset, end_offset;
vm_map_lock_read(map);
if (!vm_map_range_check(map, start, end, &entry)) {
vm_map_unlock_read(map);
vm_page_stats_reusable.reuse_pages_failure++;
return KERN_INVALID_ADDRESS;
}
for (; entry != vm_map_to_entry(map) && entry->vme_start < end;
entry = entry->vme_next) {
if (! vm_map_entry_is_reusable(entry)) {
vm_map_unlock_read(map);
vm_page_stats_reusable.reuse_pages_failure++;
return KERN_INVALID_ADDRESS;
}
if (entry->vme_start < start) {
start_offset = start - entry->vme_start;
} else {
start_offset = 0;
}
end_offset = MIN(end, entry->vme_end) - entry->vme_start;
start_offset += entry->offset;
end_offset += entry->offset;
object = entry->object.vm_object;
if (object != VM_OBJECT_NULL) {
vm_object_lock(object);
vm_object_reuse_pages(object, start_offset, end_offset,
TRUE);
vm_object_unlock(object);
}
if (entry->alias == VM_MEMORY_MALLOC_LARGE_REUSABLE) {
entry->alias = VM_MEMORY_MALLOC_LARGE_REUSED;
}
}
vm_map_unlock_read(map);
vm_page_stats_reusable.reuse_pages_success++;
return KERN_SUCCESS;
}
static kern_return_t
vm_map_reusable_pages(
vm_map_t map,
vm_map_offset_t start,
vm_map_offset_t end)
{
vm_map_entry_t entry;
vm_object_t object;
vm_object_offset_t start_offset, end_offset;
vm_map_lock_read(map);
if (!vm_map_range_check(map, start, end, &entry)) {
vm_map_unlock_read(map);
vm_page_stats_reusable.reusable_pages_failure++;
return KERN_INVALID_ADDRESS;
}
for (; entry != vm_map_to_entry(map) && entry->vme_start < end;
entry = entry->vme_next) {
int kill_pages = 0;
if (! vm_map_entry_is_reusable(entry)) {
vm_map_unlock_read(map);
vm_page_stats_reusable.reusable_pages_failure++;
return KERN_INVALID_ADDRESS;
}
if (entry->vme_start < start) {
start_offset = start - entry->vme_start;
} else {
start_offset = 0;
}
end_offset = MIN(end, entry->vme_end) - entry->vme_start;
start_offset += entry->offset;
end_offset += entry->offset;
object = entry->object.vm_object;
if (object == VM_OBJECT_NULL)
continue;
vm_object_lock(object);
if (object->ref_count == 1 && !object->shadow)
kill_pages = 1;
else
kill_pages = -1;
if (kill_pages != -1) {
vm_object_deactivate_pages(object,
start_offset,
end_offset - start_offset,
kill_pages,
TRUE );
} else {
vm_page_stats_reusable.reusable_pages_shared++;
}
vm_object_unlock(object);
if (entry->alias == VM_MEMORY_MALLOC_LARGE ||
entry->alias == VM_MEMORY_MALLOC_LARGE_REUSED) {
entry->alias = VM_MEMORY_MALLOC_LARGE_REUSABLE;
}
}
vm_map_unlock_read(map);
vm_page_stats_reusable.reusable_pages_success++;
return KERN_SUCCESS;
}
static kern_return_t
vm_map_can_reuse(
vm_map_t map,
vm_map_offset_t start,
vm_map_offset_t end)
{
vm_map_entry_t entry;
vm_map_lock_read(map);
if (!vm_map_range_check(map, start, end, &entry)) {
vm_map_unlock_read(map);
vm_page_stats_reusable.can_reuse_failure++;
return KERN_INVALID_ADDRESS;
}
for (; entry != vm_map_to_entry(map) && entry->vme_start < end;
entry = entry->vme_next) {
if (! vm_map_entry_is_reusable(entry)) {
vm_map_unlock_read(map);
vm_page_stats_reusable.can_reuse_failure++;
return KERN_INVALID_ADDRESS;
}
}
vm_map_unlock_read(map);
vm_page_stats_reusable.can_reuse_success++;
return KERN_SUCCESS;
}
#include <mach_kdb.h>
#if MACH_KDB
#include <ddb/db_output.h>
#include <vm/vm_print.h>
#define printf db_printf
extern void vm_map_links_print(
struct vm_map_links *links);
extern void vm_map_header_print(
struct vm_map_header *header);
extern void vm_map_entry_print(
vm_map_entry_t entry);
extern void vm_follow_entry(
vm_map_entry_t entry);
extern void vm_follow_map(
vm_map_t map);
void
vm_map_links_print(
struct vm_map_links *links)
{
iprintf("prev = %08X next = %08X start = %016llX end = %016llX\n",
links->prev,
links->next,
(unsigned long long)links->start,
(unsigned long long)links->end);
}
void
vm_map_header_print(
struct vm_map_header *header)
{
vm_map_links_print(&header->links);
iprintf("nentries = %08X, %sentries_pageable\n",
header->nentries,
(header->entries_pageable ? "" : "!"));
}
void
vm_follow_entry(
vm_map_entry_t entry)
{
int shadows;
iprintf("map entry %08X\n", entry);
db_indent += 2;
shadows = vm_follow_object(entry->object.vm_object);
iprintf("Total objects : %d\n",shadows);
db_indent -= 2;
}
void
vm_map_entry_print(
register vm_map_entry_t entry)
{
static const char *inheritance_name[4] =
{ "share", "copy", "none", "?"};
static const char *behavior_name[4] =
{ "dflt", "rand", "seqtl", "rseqntl" };
iprintf("map entry %08X - prev = %08X next = %08X\n", entry, entry->vme_prev, entry->vme_next);
db_indent += 2;
vm_map_links_print(&entry->links);
iprintf("start = %016llX end = %016llX - prot=%x/%x/%s\n",
(unsigned long long)entry->vme_start,
(unsigned long long)entry->vme_end,
entry->protection,
entry->max_protection,
inheritance_name[(entry->inheritance & 0x3)]);
iprintf("behavior = %s, wired_count = %d, user_wired_count = %d\n",
behavior_name[(entry->behavior & 0x3)],
entry->wired_count,
entry->user_wired_count);
iprintf("%sin_transition, %sneeds_wakeup\n",
(entry->in_transition ? "" : "!"),
(entry->needs_wakeup ? "" : "!"));
if (entry->is_sub_map) {
iprintf("submap = %08X - offset = %016llX\n",
entry->object.sub_map,
(unsigned long long)entry->offset);
} else {
iprintf("object = %08X offset = %016llX - ",
entry->object.vm_object,
(unsigned long long)entry->offset);
printf("%sis_shared, %sneeds_copy\n",
(entry->is_shared ? "" : "!"),
(entry->needs_copy ? "" : "!"));
}
db_indent -= 2;
}
void
vm_follow_map(
vm_map_t map)
{
register vm_map_entry_t entry;
iprintf("task map %08X\n", map);
db_indent += 2;
for (entry = vm_map_first_entry(map);
entry && entry != vm_map_to_entry(map);
entry = entry->vme_next) {
vm_follow_entry(entry);
}
db_indent -= 2;
}
void
vm_map_print(
db_addr_t inmap)
{
register vm_map_entry_t entry;
vm_map_t map;
#if TASK_SWAPPER
char *swstate;
#endif
map = (vm_map_t)(long)
inmap;
iprintf("task map %08X\n", map);
db_indent += 2;
vm_map_header_print(&map->hdr);
iprintf("pmap = %08X size = %08X ref = %d hint = %08X first_free = %08X\n",
map->pmap,
map->size,
map->ref_count,
map->hint,
map->first_free);
iprintf("%swait_for_space, %swiring_required, timestamp = %d\n",
(map->wait_for_space ? "" : "!"),
(map->wiring_required ? "" : "!"),
map->timestamp);
#if TASK_SWAPPER
switch (map->sw_state) {
case MAP_SW_IN:
swstate = "SW_IN";
break;
case MAP_SW_OUT:
swstate = "SW_OUT";
break;
default:
swstate = "????";
break;
}
iprintf("res = %d, sw_state = %s\n", map->res_count, swstate);
#endif
for (entry = vm_map_first_entry(map);
entry && entry != vm_map_to_entry(map);
entry = entry->vme_next) {
vm_map_entry_print(entry);
}
db_indent -= 2;
}
void
vm_map_copy_print(
db_addr_t incopy)
{
vm_map_copy_t copy;
vm_map_entry_t entry;
copy = (vm_map_copy_t)(long)
incopy;
printf("copy object 0x%x\n", copy);
db_indent += 2;
iprintf("type=%d", copy->type);
switch (copy->type) {
case VM_MAP_COPY_ENTRY_LIST:
printf("[entry_list]");
break;
case VM_MAP_COPY_OBJECT:
printf("[object]");
break;
case VM_MAP_COPY_KERNEL_BUFFER:
printf("[kernel_buffer]");
break;
default:
printf("[bad type]");
break;
}
printf(", offset=0x%llx", (unsigned long long)copy->offset);
printf(", size=0x%x\n", copy->size);
switch (copy->type) {
case VM_MAP_COPY_ENTRY_LIST:
vm_map_header_print(©->cpy_hdr);
for (entry = vm_map_copy_first_entry(copy);
entry && entry != vm_map_copy_to_entry(copy);
entry = entry->vme_next) {
vm_map_entry_print(entry);
}
break;
case VM_MAP_COPY_OBJECT:
iprintf("object=0x%x\n", copy->cpy_object);
break;
case VM_MAP_COPY_KERNEL_BUFFER:
iprintf("kernel buffer=0x%x", copy->cpy_kdata);
printf(", kalloc_size=0x%x\n", copy->cpy_kalloc_size);
break;
}
db_indent -=2;
}
vm_map_size_t
db_vm_map_total_size(
db_addr_t inmap)
{
vm_map_entry_t entry;
vm_map_size_t total;
vm_map_t map;
map = (vm_map_t)(long)
inmap;
total = 0;
for (entry = vm_map_first_entry(map);
entry != vm_map_to_entry(map);
entry = entry->vme_next) {
total += entry->vme_end - entry->vme_start;
}
return total;
}
#endif
vm_map_entry_t
vm_map_entry_insert(
vm_map_t map,
vm_map_entry_t insp_entry,
vm_map_offset_t start,
vm_map_offset_t end,
vm_object_t object,
vm_object_offset_t offset,
boolean_t needs_copy,
boolean_t is_shared,
boolean_t in_transition,
vm_prot_t cur_protection,
vm_prot_t max_protection,
vm_behavior_t behavior,
vm_inherit_t inheritance,
unsigned wired_count,
boolean_t no_cache,
boolean_t permanent,
unsigned int superpage_size)
{
vm_map_entry_t new_entry;
assert(insp_entry != (vm_map_entry_t)0);
new_entry = vm_map_entry_create(map);
new_entry->vme_start = start;
new_entry->vme_end = end;
assert(page_aligned(new_entry->vme_start));
assert(page_aligned(new_entry->vme_end));
new_entry->object.vm_object = object;
new_entry->offset = offset;
new_entry->is_shared = is_shared;
new_entry->is_sub_map = FALSE;
new_entry->needs_copy = needs_copy;
new_entry->in_transition = in_transition;
new_entry->needs_wakeup = FALSE;
new_entry->inheritance = inheritance;
new_entry->protection = cur_protection;
new_entry->max_protection = max_protection;
new_entry->behavior = behavior;
new_entry->wired_count = wired_count;
new_entry->user_wired_count = 0;
new_entry->use_pmap = FALSE;
new_entry->alias = 0;
new_entry->zero_wired_pages = FALSE;
new_entry->no_cache = no_cache;
new_entry->permanent = permanent;
new_entry->superpage_size = superpage_size;
new_entry->used_for_jit = FALSE;
vm_map_store_entry_link(map, insp_entry, new_entry);
map->size += end - start;
SAVE_HINT_MAP_WRITE(map, new_entry);
return new_entry;
}
static kern_return_t
vm_map_remap_extract(
vm_map_t map,
vm_map_offset_t addr,
vm_map_size_t size,
boolean_t copy,
struct vm_map_header *map_header,
vm_prot_t *cur_protection,
vm_prot_t *max_protection,
vm_inherit_t inheritance,
boolean_t pageable)
{
kern_return_t result;
vm_map_size_t mapped_size;
vm_map_size_t tmp_size;
vm_map_entry_t src_entry;
vm_map_entry_t new_entry;
vm_object_offset_t offset;
vm_map_offset_t map_address;
vm_map_offset_t src_start;
vm_map_offset_t src_end;
vm_object_t object;
vm_map_version_t version;
boolean_t src_needs_copy;
boolean_t new_entry_needs_copy;
assert(map != VM_MAP_NULL);
assert(size != 0 && size == vm_map_round_page(size));
assert(inheritance == VM_INHERIT_NONE ||
inheritance == VM_INHERIT_COPY ||
inheritance == VM_INHERIT_SHARE);
src_start = vm_map_trunc_page(addr);
src_end = vm_map_round_page(src_start + size);
map_header->links.next = (struct vm_map_entry *)&map_header->links;
map_header->links.prev = (struct vm_map_entry *)&map_header->links;
map_header->nentries = 0;
map_header->entries_pageable = pageable;
vm_map_store_init( map_header );
*cur_protection = VM_PROT_ALL;
*max_protection = VM_PROT_ALL;
map_address = 0;
mapped_size = 0;
result = KERN_SUCCESS;
vm_map_lock(map);
while (mapped_size != size) {
vm_map_size_t entry_size;
if (! vm_map_lookup_entry(map, src_start, &src_entry)) {
result = KERN_INVALID_ADDRESS;
break;
}
if (src_start < src_entry->vme_start ||
(mapped_size && src_start != src_entry->vme_start)) {
result = KERN_INVALID_ADDRESS;
break;
}
tmp_size = size - mapped_size;
if (src_end > src_entry->vme_end)
tmp_size -= (src_end - src_entry->vme_end);
entry_size = (vm_map_size_t)(src_entry->vme_end -
src_entry->vme_start);
if(src_entry->is_sub_map) {
vm_map_reference(src_entry->object.sub_map);
object = VM_OBJECT_NULL;
} else {
object = src_entry->object.vm_object;
if (object == VM_OBJECT_NULL) {
object = vm_object_allocate(entry_size);
src_entry->offset = 0;
src_entry->object.vm_object = object;
} else if (object->copy_strategy !=
MEMORY_OBJECT_COPY_SYMMETRIC) {
assert(!src_entry->needs_copy);
} else if (src_entry->needs_copy || object->shadowed ||
(object->internal && !object->true_share &&
!src_entry->is_shared &&
object->vo_size > entry_size)) {
vm_object_shadow(&src_entry->object.vm_object,
&src_entry->offset,
entry_size);
if (!src_entry->needs_copy &&
(src_entry->protection & VM_PROT_WRITE)) {
vm_prot_t prot;
prot = src_entry->protection & ~VM_PROT_WRITE;
if (override_nx(map, src_entry->alias) && prot)
prot |= VM_PROT_EXECUTE;
if(map->mapped) {
vm_object_pmap_protect(
src_entry->object.vm_object,
src_entry->offset,
entry_size,
PMAP_NULL,
src_entry->vme_start,
prot);
} else {
pmap_protect(vm_map_pmap(map),
src_entry->vme_start,
src_entry->vme_end,
prot);
}
}
object = src_entry->object.vm_object;
src_entry->needs_copy = FALSE;
}
vm_object_lock(object);
vm_object_reference_locked(object);
if (object->copy_strategy ==
MEMORY_OBJECT_COPY_SYMMETRIC) {
object->copy_strategy =
MEMORY_OBJECT_COPY_DELAY;
}
vm_object_unlock(object);
}
offset = src_entry->offset + (src_start - src_entry->vme_start);
new_entry = _vm_map_entry_create(map_header);
vm_map_entry_copy(new_entry, src_entry);
new_entry->use_pmap = FALSE;
new_entry->vme_start = map_address;
new_entry->vme_end = map_address + tmp_size;
new_entry->inheritance = inheritance;
new_entry->offset = offset;
RestartCopy:
if (!copy) {
src_entry->is_shared = TRUE;
new_entry->is_shared = TRUE;
if (!(new_entry->is_sub_map))
new_entry->needs_copy = FALSE;
} else if (src_entry->is_sub_map) {
new_entry->needs_copy = TRUE;
object = VM_OBJECT_NULL;
} else if (src_entry->wired_count == 0 &&
vm_object_copy_quickly(&new_entry->object.vm_object,
new_entry->offset,
(new_entry->vme_end -
new_entry->vme_start),
&src_needs_copy,
&new_entry_needs_copy)) {
new_entry->needs_copy = new_entry_needs_copy;
new_entry->is_shared = FALSE;
if (src_needs_copy && !src_entry->needs_copy) {
vm_prot_t prot;
prot = src_entry->protection & ~VM_PROT_WRITE;
if (override_nx(map, src_entry->alias) && prot)
prot |= VM_PROT_EXECUTE;
vm_object_pmap_protect(object,
offset,
entry_size,
((src_entry->is_shared
|| map->mapped) ?
PMAP_NULL : map->pmap),
src_entry->vme_start,
prot);
src_entry->needs_copy = TRUE;
}
vm_object_deallocate(object);
} else {
new_entry->is_shared = FALSE;
version.main_timestamp = map->timestamp;
vm_map_unlock(map);
if (src_entry->wired_count > 0) {
vm_object_lock(object);
result = vm_object_copy_slowly(
object,
offset,
entry_size,
THREAD_UNINT,
&new_entry->object.vm_object);
new_entry->offset = 0;
new_entry->needs_copy = FALSE;
} else {
result = vm_object_copy_strategically(
object,
offset,
entry_size,
&new_entry->object.vm_object,
&new_entry->offset,
&new_entry_needs_copy);
new_entry->needs_copy = new_entry_needs_copy;
}
vm_object_deallocate(object);
if (result != KERN_SUCCESS &&
result != KERN_MEMORY_RESTART_COPY) {
_vm_map_entry_dispose(map_header, new_entry);
break;
}
vm_map_lock(map);
if (version.main_timestamp + 1 != map->timestamp) {
vm_object_deallocate(new_entry->
object.vm_object);
_vm_map_entry_dispose(map_header, new_entry);
if (result == KERN_MEMORY_RESTART_COPY)
result = KERN_SUCCESS;
continue;
}
if (result == KERN_MEMORY_RESTART_COPY) {
vm_object_reference(object);
goto RestartCopy;
}
}
_vm_map_store_entry_link(map_header,
map_header->links.prev, new_entry);
if( !src_entry->is_sub_map ) {
*cur_protection &= src_entry->protection;
*max_protection &= src_entry->max_protection;
}
map_address += tmp_size;
mapped_size += tmp_size;
src_start += tmp_size;
}
vm_map_unlock(map);
if (result != KERN_SUCCESS) {
for (src_entry = map_header->links.next;
src_entry != (struct vm_map_entry *)&map_header->links;
src_entry = new_entry) {
new_entry = src_entry->vme_next;
_vm_map_store_entry_unlink(map_header, src_entry);
vm_object_deallocate(src_entry->object.vm_object);
_vm_map_entry_dispose(map_header, src_entry);
}
}
return result;
}
kern_return_t
vm_map_remap(
vm_map_t target_map,
vm_map_address_t *address,
vm_map_size_t size,
vm_map_offset_t mask,
int flags,
vm_map_t src_map,
vm_map_offset_t memory_address,
boolean_t copy,
vm_prot_t *cur_protection,
vm_prot_t *max_protection,
vm_inherit_t inheritance)
{
kern_return_t result;
vm_map_entry_t entry;
vm_map_entry_t insp_entry = VM_MAP_ENTRY_NULL;
vm_map_entry_t new_entry;
struct vm_map_header map_header;
if (target_map == VM_MAP_NULL)
return KERN_INVALID_ARGUMENT;
switch (inheritance) {
case VM_INHERIT_NONE:
case VM_INHERIT_COPY:
case VM_INHERIT_SHARE:
if (size != 0 && src_map != VM_MAP_NULL)
break;
default:
return KERN_INVALID_ARGUMENT;
}
size = vm_map_round_page(size);
result = vm_map_remap_extract(src_map, memory_address,
size, copy, &map_header,
cur_protection,
max_protection,
inheritance,
target_map->hdr.
entries_pageable);
if (result != KERN_SUCCESS) {
return result;
}
*address = vm_map_trunc_page(*address);
vm_map_lock(target_map);
result = vm_map_remap_range_allocate(target_map, address, size,
mask, flags, &insp_entry);
for (entry = map_header.links.next;
entry != (struct vm_map_entry *)&map_header.links;
entry = new_entry) {
new_entry = entry->vme_next;
_vm_map_store_entry_unlink(&map_header, entry);
if (result == KERN_SUCCESS) {
entry->vme_start += *address;
entry->vme_end += *address;
vm_map_store_entry_link(target_map, insp_entry, entry);
insp_entry = entry;
} else {
if (!entry->is_sub_map) {
vm_object_deallocate(entry->object.vm_object);
} else {
vm_map_deallocate(entry->object.sub_map);
}
_vm_map_entry_dispose(&map_header, entry);
}
}
if( target_map->disable_vmentry_reuse == TRUE) {
if( target_map->highest_entry_end < insp_entry->vme_end ){
target_map->highest_entry_end = insp_entry->vme_end;
}
}
if (result == KERN_SUCCESS) {
target_map->size += size;
SAVE_HINT_MAP_WRITE(target_map, insp_entry);
}
vm_map_unlock(target_map);
if (result == KERN_SUCCESS && target_map->wiring_required)
result = vm_map_wire(target_map, *address,
*address + size, *cur_protection, TRUE);
return result;
}
static kern_return_t
vm_map_remap_range_allocate(
vm_map_t map,
vm_map_address_t *address,
vm_map_size_t size,
vm_map_offset_t mask,
int flags,
vm_map_entry_t *map_entry)
{
vm_map_entry_t entry;
vm_map_offset_t start;
vm_map_offset_t end;
kern_return_t kr;
StartAgain: ;
start = *address;
if (flags & VM_FLAGS_ANYWHERE)
{
if (start < map->min_offset)
start = map->min_offset;
if (start > map->max_offset)
return(KERN_NO_SPACE);
if( map->disable_vmentry_reuse == TRUE) {
VM_MAP_HIGHEST_ENTRY(map, entry, start);
} else {
assert(first_free_is_valid(map));
if (start == map->min_offset) {
if ((entry = map->first_free) != vm_map_to_entry(map))
start = entry->vme_end;
} else {
vm_map_entry_t tmp_entry;
if (vm_map_lookup_entry(map, start, &tmp_entry))
start = tmp_entry->vme_end;
entry = tmp_entry;
}
}
while (TRUE) {
register vm_map_entry_t next;
end = ((start + mask) & ~mask);
if (end < start)
return(KERN_NO_SPACE);
start = end;
end += size;
if ((end > map->max_offset) || (end < start)) {
if (map->wait_for_space) {
if (size <= (map->max_offset -
map->min_offset)) {
assert_wait((event_t) map, THREAD_INTERRUPTIBLE);
vm_map_unlock(map);
thread_block(THREAD_CONTINUE_NULL);
vm_map_lock(map);
goto StartAgain;
}
}
return(KERN_NO_SPACE);
}
next = entry->vme_next;
if (next == vm_map_to_entry(map))
break;
if (next->vme_start >= end)
break;
entry = next;
start = entry->vme_end;
}
*address = start;
} else {
vm_map_entry_t temp_entry;
if ((start & mask) != 0)
return(KERN_NO_SPACE);
end = start + size;
if ((start < map->min_offset) ||
(end > map->max_offset) ||
(start >= end)) {
return(KERN_INVALID_ADDRESS);
}
if (flags & VM_FLAGS_OVERWRITE) {
vm_map_t zap_map;
zap_map = vm_map_create(PMAP_NULL,
start,
end - start,
map->hdr.entries_pageable);
if (zap_map == VM_MAP_NULL) {
return KERN_RESOURCE_SHORTAGE;
}
kr = vm_map_delete(map, start, end,
VM_MAP_REMOVE_SAVE_ENTRIES,
zap_map);
if (kr == KERN_SUCCESS) {
vm_map_destroy(zap_map,
VM_MAP_REMOVE_NO_PMAP_CLEANUP);
zap_map = VM_MAP_NULL;
}
}
if (vm_map_lookup_entry(map, start, &temp_entry))
return(KERN_NO_SPACE);
entry = temp_entry;
if ((entry->vme_next != vm_map_to_entry(map)) &&
(entry->vme_next->vme_start < end))
return(KERN_NO_SPACE);
}
*map_entry = entry;
return(KERN_SUCCESS);
}
vm_map_t
vm_map_switch(
vm_map_t map)
{
int mycpu;
thread_t thread = current_thread();
vm_map_t oldmap = thread->map;
mp_disable_preemption();
mycpu = cpu_number();
PMAP_SWITCH_USER(thread, map, mycpu);
mp_enable_preemption();
return(oldmap);
}
kern_return_t
vm_map_write_user(
vm_map_t map,
void *src_p,
vm_map_address_t dst_addr,
vm_size_t size)
{
kern_return_t kr = KERN_SUCCESS;
if(current_map() == map) {
if (copyout(src_p, dst_addr, size)) {
kr = KERN_INVALID_ADDRESS;
}
} else {
vm_map_t oldmap;
vm_map_reference(map);
oldmap = vm_map_switch(map);
if (copyout(src_p, dst_addr, size)) {
kr = KERN_INVALID_ADDRESS;
}
vm_map_switch(oldmap);
vm_map_deallocate(map);
}
return kr;
}
kern_return_t
vm_map_read_user(
vm_map_t map,
vm_map_address_t src_addr,
void *dst_p,
vm_size_t size)
{
kern_return_t kr = KERN_SUCCESS;
if(current_map() == map) {
if (copyin(src_addr, dst_p, size)) {
kr = KERN_INVALID_ADDRESS;
}
} else {
vm_map_t oldmap;
vm_map_reference(map);
oldmap = vm_map_switch(map);
if (copyin(src_addr, dst_p, size)) {
kr = KERN_INVALID_ADDRESS;
}
vm_map_switch(oldmap);
vm_map_deallocate(map);
}
return kr;
}
boolean_t
vm_map_check_protection(vm_map_t map, vm_map_offset_t start,
vm_map_offset_t end, vm_prot_t protection)
{
vm_map_entry_t entry;
vm_map_entry_t tmp_entry;
vm_map_lock(map);
if (start < vm_map_min(map) || end > vm_map_max(map) || start > end)
{
vm_map_unlock(map);
return (FALSE);
}
if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
vm_map_unlock(map);
return(FALSE);
}
entry = tmp_entry;
while (start < end) {
if (entry == vm_map_to_entry(map)) {
vm_map_unlock(map);
return(FALSE);
}
if (start < entry->vme_start) {
vm_map_unlock(map);
return(FALSE);
}
if ((entry->protection & protection) != protection) {
vm_map_unlock(map);
return(FALSE);
}
start = entry->vme_end;
entry = entry->vme_next;
}
vm_map_unlock(map);
return(TRUE);
}
kern_return_t
vm_map_purgable_control(
vm_map_t map,
vm_map_offset_t address,
vm_purgable_t control,
int *state)
{
vm_map_entry_t entry;
vm_object_t object;
kern_return_t kr;
if (map == VM_MAP_NULL)
return(KERN_INVALID_ARGUMENT);
if (control != VM_PURGABLE_SET_STATE &&
control != VM_PURGABLE_GET_STATE &&
control != VM_PURGABLE_PURGE_ALL)
return(KERN_INVALID_ARGUMENT);
if (control == VM_PURGABLE_PURGE_ALL) {
vm_purgeable_object_purge_all();
return KERN_SUCCESS;
}
if (control == VM_PURGABLE_SET_STATE &&
(((*state & ~(VM_PURGABLE_ALL_MASKS)) != 0) ||
((*state & VM_PURGABLE_STATE_MASK) > VM_PURGABLE_STATE_MASK)))
return(KERN_INVALID_ARGUMENT);
vm_map_lock_read(map);
if (!vm_map_lookup_entry(map, address, &entry) || entry->is_sub_map) {
vm_map_unlock_read(map);
return(KERN_INVALID_ADDRESS);
}
if ((entry->protection & VM_PROT_WRITE) == 0) {
vm_map_unlock_read(map);
return(KERN_PROTECTION_FAILURE);
}
object = entry->object.vm_object;
if (object == VM_OBJECT_NULL) {
vm_map_unlock_read(map);
return KERN_INVALID_ARGUMENT;
}
vm_object_lock(object);
if (entry->offset != 0 ||
entry->vme_end - entry->vme_start != object->vo_size) {
vm_map_unlock_read(map);
vm_object_unlock(object);
return KERN_INVALID_ARGUMENT;
}
vm_map_unlock_read(map);
kr = vm_object_purgable_control(object, control, state);
vm_object_unlock(object);
return kr;
}
kern_return_t
vm_map_page_query_internal(
vm_map_t target_map,
vm_map_offset_t offset,
int *disposition,
int *ref_count)
{
kern_return_t kr;
vm_page_info_basic_data_t info;
mach_msg_type_number_t count;
count = VM_PAGE_INFO_BASIC_COUNT;
kr = vm_map_page_info(target_map,
offset,
VM_PAGE_INFO_BASIC,
(vm_page_info_t) &info,
&count);
if (kr == KERN_SUCCESS) {
*disposition = info.disposition;
*ref_count = info.ref_count;
} else {
*disposition = 0;
*ref_count = 0;
}
return kr;
}
kern_return_t
vm_map_page_info(
vm_map_t map,
vm_map_offset_t offset,
vm_page_info_flavor_t flavor,
vm_page_info_t info,
mach_msg_type_number_t *count)
{
vm_map_entry_t map_entry;
vm_object_t object;
vm_page_t m;
kern_return_t kr;
kern_return_t retval = KERN_SUCCESS;
boolean_t top_object;
int disposition;
int ref_count;
vm_object_id_t object_id;
vm_page_info_basic_t basic_info;
int depth;
vm_map_offset_t offset_in_page;
switch (flavor) {
case VM_PAGE_INFO_BASIC:
if (*count != VM_PAGE_INFO_BASIC_COUNT) {
if (*count != VM_PAGE_INFO_BASIC_COUNT - 1)
return KERN_INVALID_ARGUMENT;
}
break;
default:
return KERN_INVALID_ARGUMENT;
}
disposition = 0;
ref_count = 0;
object_id = 0;
top_object = TRUE;
depth = 0;
retval = KERN_SUCCESS;
offset_in_page = offset & PAGE_MASK;
offset = vm_map_trunc_page(offset);
vm_map_lock_read(map);
for (;;) {
if (!vm_map_lookup_entry(map, offset, &map_entry)) {
vm_map_unlock_read(map);
return KERN_INVALID_ADDRESS;
}
offset -= map_entry->vme_start;
offset += map_entry->offset;
if (map_entry->is_sub_map) {
vm_map_t sub_map;
sub_map = map_entry->object.sub_map;
vm_map_lock_read(sub_map);
vm_map_unlock_read(map);
map = sub_map;
ref_count = MAX(ref_count, map->ref_count);
continue;
}
break;
}
object = map_entry->object.vm_object;
if (object == VM_OBJECT_NULL) {
vm_map_unlock_read(map);
goto done;
}
vm_object_lock(object);
vm_map_unlock_read(map);
for (;;) {
ref_count = MAX(ref_count, object->ref_count);
m = vm_page_lookup(object, offset);
if (m != VM_PAGE_NULL) {
disposition |= VM_PAGE_QUERY_PAGE_PRESENT;
break;
} else {
#if MACH_PAGEMAP
if (object->existence_map) {
if (vm_external_state_get(object->existence_map,
offset) ==
VM_EXTERNAL_STATE_EXISTS) {
disposition |= VM_PAGE_QUERY_PAGE_PAGED_OUT;
break;
}
} else
#endif
{
if (object->internal &&
object->alive &&
!object->terminating &&
object->pager_ready) {
memory_object_t pager;
vm_object_paging_begin(object);
pager = object->pager;
vm_object_unlock(object);
kr = memory_object_data_request(
pager,
offset + object->paging_offset,
0,
VM_PROT_READ,
NULL);
vm_object_lock(object);
vm_object_paging_end(object);
if (kr == KERN_SUCCESS) {
disposition |= VM_PAGE_QUERY_PAGE_PAGED_OUT;
break;
}
}
}
if (object->shadow != VM_OBJECT_NULL) {
vm_object_t shadow;
offset += object->vo_shadow_offset;
shadow = object->shadow;
vm_object_lock(shadow);
vm_object_unlock(object);
object = shadow;
top_object = FALSE;
depth++;
} else {
break;
}
}
}
if (top_object == TRUE && object->shadow)
disposition |= VM_PAGE_QUERY_PAGE_COPIED;
if (! object->internal)
disposition |= VM_PAGE_QUERY_PAGE_EXTERNAL;
if (m == VM_PAGE_NULL)
goto done_with_object;
if (m->fictitious) {
disposition |= VM_PAGE_QUERY_PAGE_FICTITIOUS;
goto done_with_object;
}
if (m->dirty || pmap_is_modified(m->phys_page))
disposition |= VM_PAGE_QUERY_PAGE_DIRTY;
if (m->reference || pmap_is_referenced(m->phys_page))
disposition |= VM_PAGE_QUERY_PAGE_REF;
if (m->speculative)
disposition |= VM_PAGE_QUERY_PAGE_SPECULATIVE;
if (m->cs_validated)
disposition |= VM_PAGE_QUERY_PAGE_CS_VALIDATED;
if (m->cs_tainted)
disposition |= VM_PAGE_QUERY_PAGE_CS_TAINTED;
done_with_object:
vm_object_unlock(object);
done:
switch (flavor) {
case VM_PAGE_INFO_BASIC:
basic_info = (vm_page_info_basic_t) info;
basic_info->disposition = disposition;
basic_info->ref_count = ref_count;
basic_info->object_id = (vm_object_id_t) (uintptr_t) object;
basic_info->offset =
(memory_object_offset_t) offset + offset_in_page;
basic_info->depth = depth;
break;
}
return retval;
}
kern_return_t
vm_map_msync(
vm_map_t map,
vm_map_address_t address,
vm_map_size_t size,
vm_sync_t sync_flags)
{
msync_req_t msr;
msync_req_t new_msr;
queue_chain_t req_q;
vm_map_entry_t entry;
vm_map_size_t amount_left;
vm_object_offset_t offset;
boolean_t do_sync_req;
boolean_t had_hole = FALSE;
memory_object_t pager;
if ((sync_flags & VM_SYNC_ASYNCHRONOUS) &&
(sync_flags & VM_SYNC_SYNCHRONOUS))
return(KERN_INVALID_ARGUMENT);
size = vm_map_round_page(address + size) - vm_map_trunc_page(address);
address = vm_map_trunc_page(address);
if (map == VM_MAP_NULL)
return(KERN_INVALID_TASK);
if (size == 0)
return(KERN_SUCCESS);
queue_init(&req_q);
amount_left = size;
while (amount_left > 0) {
vm_object_size_t flush_size;
vm_object_t object;
vm_map_lock(map);
if (!vm_map_lookup_entry(map,
vm_map_trunc_page(address), &entry)) {
vm_map_size_t skip;
had_hole = TRUE;
if (entry == vm_map_to_entry(map) &&
entry->vme_next == entry) {
vm_map_unlock(map);
break;
}
if ((map->hdr.nentries == 0) ||
(entry->vme_next->vme_start < address)) {
vm_map_unlock(map);
break;
}
skip = (entry->vme_next->vme_start - address);
if (skip >= amount_left)
amount_left = 0;
else
amount_left -= skip;
address = entry->vme_next->vme_start;
vm_map_unlock(map);
continue;
}
offset = address - entry->vme_start;
if (amount_left + entry->vme_start + offset > entry->vme_end) {
flush_size = entry->vme_end -
(entry->vme_start + offset);
} else {
flush_size = amount_left;
}
amount_left -= flush_size;
address += flush_size;
if (entry->is_sub_map == TRUE) {
vm_map_t local_map;
vm_map_offset_t local_offset;
local_map = entry->object.sub_map;
local_offset = entry->offset;
vm_map_unlock(map);
if (vm_map_msync(
local_map,
local_offset,
flush_size,
sync_flags) == KERN_INVALID_ADDRESS) {
had_hole = TRUE;
}
continue;
}
object = entry->object.vm_object;
if (object == VM_OBJECT_NULL) {
vm_map_unlock(map);
continue;
}
offset += entry->offset;
vm_object_lock(object);
if (sync_flags & (VM_SYNC_KILLPAGES | VM_SYNC_DEACTIVATE)) {
int kill_pages = 0;
boolean_t reusable_pages = FALSE;
if (sync_flags & VM_SYNC_KILLPAGES) {
if (object->ref_count == 1 && !object->shadow)
kill_pages = 1;
else
kill_pages = -1;
}
if (kill_pages != -1)
vm_object_deactivate_pages(object, offset,
(vm_object_size_t)flush_size, kill_pages, reusable_pages);
vm_object_unlock(object);
vm_map_unlock(map);
continue;
}
if ((object->pager == MEMORY_OBJECT_NULL) ||
(object->internal) || (object->private)) {
vm_object_unlock(object);
vm_map_unlock(map);
continue;
}
vm_object_reference_locked(object);
vm_object_unlock(object);
vm_map_unlock(map);
do_sync_req = vm_object_sync(object,
offset,
flush_size,
sync_flags & VM_SYNC_INVALIDATE,
((sync_flags & VM_SYNC_SYNCHRONOUS) ||
(sync_flags & VM_SYNC_ASYNCHRONOUS)),
sync_flags & VM_SYNC_SYNCHRONOUS);
if (!do_sync_req) {
if ((sync_flags & VM_SYNC_INVALIDATE) && object->resident_page_count == 0) {
vm_object_lock(object);
object->pages_created = 0;
object->pages_used = 0;
object->sequential = 0;
object->last_alloc = 0;
vm_object_unlock(object);
}
vm_object_deallocate(object);
continue;
}
msync_req_alloc(new_msr);
vm_object_lock(object);
offset += object->paging_offset;
new_msr->offset = offset;
new_msr->length = flush_size;
new_msr->object = object;
new_msr->flag = VM_MSYNC_SYNCHRONIZING;
re_iterate:
pager = object->pager;
if (pager == MEMORY_OBJECT_NULL) {
vm_object_unlock(object);
vm_object_deallocate(object);
continue;
}
queue_iterate(&object->msr_q, msr, msync_req_t, msr_q) {
msr_lock(msr);
if (msr->flag == VM_MSYNC_SYNCHRONIZING &&
((offset >= msr->offset &&
offset < (msr->offset + msr->length)) ||
(msr->offset >= offset &&
msr->offset < (offset + flush_size))))
{
assert_wait((event_t) msr,THREAD_INTERRUPTIBLE);
msr_unlock(msr);
vm_object_unlock(object);
thread_block(THREAD_CONTINUE_NULL);
vm_object_lock(object);
goto re_iterate;
}
msr_unlock(msr);
}
queue_enter(&object->msr_q, new_msr, msync_req_t, msr_q);
vm_object_paging_begin(object);
vm_object_unlock(object);
queue_enter(&req_q, new_msr, msync_req_t, req_q);
(void) memory_object_synchronize(
pager,
offset,
flush_size,
sync_flags & ~VM_SYNC_CONTIGUOUS);
vm_object_lock(object);
vm_object_paging_end(object);
vm_object_unlock(object);
}
while (!queue_empty(&req_q)) {
msr = (msync_req_t)queue_first(&req_q);
msr_lock(msr);
while(msr->flag != VM_MSYNC_DONE) {
assert_wait((event_t) msr, THREAD_INTERRUPTIBLE);
msr_unlock(msr);
thread_block(THREAD_CONTINUE_NULL);
msr_lock(msr);
}
queue_remove(&req_q, msr, msync_req_t, req_q);
msr_unlock(msr);
vm_object_deallocate(msr->object);
msync_req_free(msr);
}
if (had_hole == TRUE && (sync_flags & VM_SYNC_CONTIGUOUS))
return(KERN_INVALID_ADDRESS);
return(KERN_SUCCESS);
}
vm_map_t
convert_port_entry_to_map(
ipc_port_t port)
{
vm_map_t map;
vm_named_entry_t named_entry;
uint32_t try_failed_count = 0;
if(IP_VALID(port) && (ip_kotype(port) == IKOT_NAMED_ENTRY)) {
while(TRUE) {
ip_lock(port);
if(ip_active(port) && (ip_kotype(port)
== IKOT_NAMED_ENTRY)) {
named_entry =
(vm_named_entry_t)port->ip_kobject;
if (!(lck_mtx_try_lock(&(named_entry)->Lock))) {
ip_unlock(port);
try_failed_count++;
mutex_pause(try_failed_count);
continue;
}
named_entry->ref_count++;
lck_mtx_unlock(&(named_entry)->Lock);
ip_unlock(port);
if ((named_entry->is_sub_map) &&
(named_entry->protection
& VM_PROT_WRITE)) {
map = named_entry->backing.map;
} else {
mach_destroy_memory_entry(port);
return VM_MAP_NULL;
}
vm_map_reference_swap(map);
mach_destroy_memory_entry(port);
break;
}
else
return VM_MAP_NULL;
}
}
else
map = convert_port_to_map(port);
return map;
}
vm_object_t
convert_port_entry_to_object(
ipc_port_t port)
{
vm_object_t object;
vm_named_entry_t named_entry;
uint32_t try_failed_count = 0;
if(IP_VALID(port) && (ip_kotype(port) == IKOT_NAMED_ENTRY)) {
while(TRUE) {
ip_lock(port);
if(ip_active(port) && (ip_kotype(port)
== IKOT_NAMED_ENTRY)) {
named_entry =
(vm_named_entry_t)port->ip_kobject;
if (!(lck_mtx_try_lock(&(named_entry)->Lock))) {
ip_unlock(port);
try_failed_count++;
mutex_pause(try_failed_count);
continue;
}
named_entry->ref_count++;
lck_mtx_unlock(&(named_entry)->Lock);
ip_unlock(port);
if ((!named_entry->is_sub_map) &&
(!named_entry->is_pager) &&
(named_entry->protection
& VM_PROT_WRITE)) {
object = named_entry->backing.object;
} else {
mach_destroy_memory_entry(port);
return (vm_object_t)NULL;
}
vm_object_reference(named_entry->backing.object);
mach_destroy_memory_entry(port);
break;
}
else
return (vm_object_t)NULL;
}
} else {
return (vm_object_t)NULL;
}
return object;
}
#undef current_map
vm_map_t
current_map(void)
{
return (current_map_fast());
}
#undef vm_map_reference
void
vm_map_reference(
register vm_map_t map)
{
if (map == VM_MAP_NULL)
return;
lck_mtx_lock(&map->s_lock);
#if TASK_SWAPPER
assert(map->res_count > 0);
assert(map->ref_count >= map->res_count);
map->res_count++;
#endif
map->ref_count++;
lck_mtx_unlock(&map->s_lock);
}
void
vm_map_deallocate(
register vm_map_t map)
{
unsigned int ref;
if (map == VM_MAP_NULL)
return;
lck_mtx_lock(&map->s_lock);
ref = --map->ref_count;
if (ref > 0) {
vm_map_res_deallocate(map);
lck_mtx_unlock(&map->s_lock);
return;
}
assert(map->ref_count == 0);
lck_mtx_unlock(&map->s_lock);
#if TASK_SWAPPER
#endif
vm_map_destroy(map, VM_MAP_NO_FLAGS);
}
void
vm_map_disable_NX(vm_map_t map)
{
if (map == NULL)
return;
if (map->pmap == NULL)
return;
pmap_disable_NX(map->pmap);
}
void
vm_map_disallow_data_exec(vm_map_t map)
{
if (map == NULL)
return;
map->map_disallow_data_exec = TRUE;
}
void
vm_map_set_32bit(vm_map_t map)
{
map->max_offset = (vm_map_offset_t)VM_MAX_ADDRESS;
}
void
vm_map_set_64bit(vm_map_t map)
{
map->max_offset = (vm_map_offset_t)MACH_VM_MAX_ADDRESS;
}
vm_map_offset_t
vm_compute_max_offset(unsigned is64)
{
return (is64 ? (vm_map_offset_t)MACH_VM_MAX_ADDRESS : (vm_map_offset_t)VM_MAX_ADDRESS);
}
boolean_t
vm_map_is_64bit(
vm_map_t map)
{
return map->max_offset > ((vm_map_offset_t)VM_MAX_ADDRESS);
}
boolean_t
vm_map_has_4GB_pagezero(
vm_map_t map)
{
return (map->min_offset >= 0x100000000ULL);
}
void
vm_map_set_4GB_pagezero(vm_map_t map)
{
#if defined(__i386__)
pmap_set_4GB_pagezero(map->pmap);
#else
#pragma unused(map)
#endif
}
void
vm_map_clear_4GB_pagezero(vm_map_t map)
{
#if defined(__i386__)
pmap_clear_4GB_pagezero(map->pmap);
#else
#pragma unused(map)
#endif
}
kern_return_t
vm_map_raise_min_offset(
vm_map_t map,
vm_map_offset_t new_min_offset)
{
vm_map_entry_t first_entry;
new_min_offset = vm_map_round_page(new_min_offset);
vm_map_lock(map);
if (new_min_offset < map->min_offset) {
vm_map_unlock(map);
return KERN_INVALID_ADDRESS;
}
first_entry = vm_map_first_entry(map);
if (first_entry != vm_map_to_entry(map) &&
first_entry->vme_start < new_min_offset) {
vm_map_unlock(map);
return KERN_NO_SPACE;
}
map->min_offset = new_min_offset;
vm_map_unlock(map);
return KERN_SUCCESS;
}
void
vm_map_set_user_wire_limit(vm_map_t map,
vm_size_t limit)
{
map->user_wire_limit = limit;
}
void vm_map_switch_protect(vm_map_t map,
boolean_t val)
{
vm_map_lock(map);
map->switch_protect=val;
vm_map_unlock(map);
}
#if CONFIG_DYNAMIC_CODE_SIGNING
kern_return_t vm_map_sign(vm_map_t map,
vm_map_offset_t start,
vm_map_offset_t end)
{
vm_map_entry_t entry;
vm_page_t m;
vm_object_t object;
if (map == VM_MAP_NULL)
return(KERN_INVALID_ARGUMENT);
vm_map_lock_read(map);
if (!vm_map_lookup_entry(map, start, &entry) || entry->is_sub_map) {
vm_map_unlock_read(map);
return(KERN_INVALID_ADDRESS);
}
if((entry->vme_start > start) || (entry->vme_end < end)) {
vm_map_unlock_read(map);
return(KERN_INVALID_ARGUMENT);
}
object = entry->object.vm_object;
if (object == VM_OBJECT_NULL) {
vm_map_unlock_read(map);
return KERN_INVALID_ARGUMENT;
}
vm_object_lock(object);
vm_map_unlock_read(map);
while(start < end) {
uint32_t refmod;
m = vm_page_lookup(object, start - entry->vme_start + entry->offset );
if (m==VM_PAGE_NULL) {
vm_object_unlock(object);
return KERN_FAILURE;
}
if (m->busy ||
(m->unusual && (m->error || m->restart || m->private || m->absent))) {
vm_object_unlock(object);
return KERN_FAILURE;
}
m->cs_validated = TRUE;
m->wpmapped = FALSE;
refmod = pmap_disconnect(m->phys_page);
if ((refmod & VM_MEM_MODIFIED) && !m->dirty) {
m->dirty = TRUE;
}
start += PAGE_SIZE;
}
vm_object_unlock(object);
return KERN_SUCCESS;
}
#endif
#if CONFIG_FREEZE
kern_return_t vm_map_freeze_walk(
vm_map_t map,
unsigned int *purgeable_count,
unsigned int *wired_count,
unsigned int *clean_count,
unsigned int *dirty_count,
boolean_t *has_shared)
{
vm_map_entry_t entry;
vm_map_lock_read(map);
*purgeable_count = *wired_count = *clean_count = *dirty_count = 0;
*has_shared = FALSE;
for (entry = vm_map_first_entry(map);
entry != vm_map_to_entry(map);
entry = entry->vme_next) {
unsigned int purgeable, clean, dirty, wired;
boolean_t shared;
if ((entry->object.vm_object == 0) ||
(entry->is_sub_map) ||
(entry->object.vm_object->phys_contiguous)) {
continue;
}
vm_object_pack(&purgeable, &wired, &clean, &dirty, &shared, entry->object.vm_object, VM_OBJECT_NULL, NULL, NULL);
*purgeable_count += purgeable;
*wired_count += wired;
*clean_count += clean;
*dirty_count += dirty;
if (shared) {
*has_shared = TRUE;
}
}
vm_map_unlock_read(map);
return KERN_SUCCESS;
}
kern_return_t vm_map_freeze(
vm_map_t map,
unsigned int *purgeable_count,
unsigned int *wired_count,
unsigned int *clean_count,
unsigned int *dirty_count,
boolean_t *has_shared)
{
vm_map_entry_t entry2 = VM_MAP_ENTRY_NULL;
vm_object_t compact_object = VM_OBJECT_NULL;
vm_object_offset_t offset = 0x0;
kern_return_t kr = KERN_SUCCESS;
void *default_freezer_toc = NULL;
boolean_t cleanup = FALSE;
*purgeable_count = *wired_count = *clean_count = *dirty_count = 0;
*has_shared = FALSE;
compact_object = vm_object_allocate((vm_map_offset_t)(VM_MAX_ADDRESS) - (vm_map_offset_t)(VM_MIN_ADDRESS));
if (!compact_object) {
kr = KERN_FAILURE;
goto done;
}
default_freezer_toc = default_freezer_mapping_create(compact_object, offset);
if (!default_freezer_toc) {
kr = KERN_FAILURE;
goto done;
}
vm_map_lock(map);
if (map->default_freezer_toc != NULL){
cleanup = TRUE;
kr = KERN_SUCCESS;
goto done;
}
map->default_freezer_toc = default_freezer_toc;
vm_object_lock(compact_object);
for (entry2 = vm_map_first_entry(map);
entry2 != vm_map_to_entry(map);
entry2 = entry2->vme_next) {
vm_object_t src_object = entry2->object.vm_object;
if (entry2->object.vm_object && !entry2->is_sub_map && !entry2->object.vm_object->phys_contiguous) {
unsigned int purgeable, clean, dirty, wired;
boolean_t shared;
vm_object_pack(&purgeable, &wired, &clean, &dirty, &shared,
src_object, compact_object, &default_freezer_toc, &offset);
*purgeable_count += purgeable;
*wired_count += wired;
*clean_count += clean;
*dirty_count += dirty;
if (shared) {
*has_shared = TRUE;
}
}
}
vm_object_unlock(compact_object);
vm_object_pageout(compact_object);
done:
vm_map_unlock(map);
if ((cleanup) || (KERN_SUCCESS != kr)) {
if (default_freezer_toc){
default_freezer_mapping_free(&map->default_freezer_toc, TRUE);
}
if (compact_object){
vm_object_deallocate(compact_object);
}
}
return kr;
}
__private_extern__ vm_object_t default_freezer_get_compact_vm_object( void** );
void
vm_map_thaw(
vm_map_t map)
{
void **default_freezer_toc;
vm_object_t compact_object;
vm_map_lock(map);
if (map->default_freezer_toc == NULL){
goto out;
}
default_freezer_toc = &(map->default_freezer_toc);
compact_object = default_freezer_get_compact_vm_object(default_freezer_toc);
vm_object_pagein(compact_object);
vm_object_unpack(compact_object, default_freezer_toc);
vm_object_deallocate(compact_object);
map->default_freezer_toc = NULL;
out:
vm_map_unlock(map);
}
#endif