#include <debug.h>
#include <vm_cpm.h>
#include <mach/boolean.h>
#include <mach/kern_return.h>
#include <mach/mach_types.h>
#include <mach/memory_object.h>
#include <mach/std_types.h>
#include <mach/upl.h>
#include <mach/vm_attributes.h>
#include <mach/vm_param.h>
#include <mach/vm_statistics.h>
#include <mach/mach_syscalls.h>
#include <mach/host_priv_server.h>
#include <mach/mach_vm_server.h>
#include <mach/vm_map_server.h>
#include <kern/host.h>
#include <kern/kalloc.h>
#include <kern/task.h>
#include <kern/misc_protos.h>
#include <vm/vm_fault.h>
#include <vm/vm_map.h>
#include <vm/vm_object.h>
#include <vm/vm_page.h>
#include <vm/memory_object.h>
#include <vm/vm_pageout.h>
#include <vm/vm_protos.h>
vm_size_t upl_offset_to_pagelist = 0;
#if VM_CPM
#include <vm/cpm.h>
#endif
ipc_port_t dynamic_pager_control_port=NULL;
kern_return_t
mach_vm_allocate(
vm_map_t map,
mach_vm_offset_t *addr,
mach_vm_size_t size,
int flags)
{
vm_map_offset_t map_addr;
vm_map_size_t map_size;
kern_return_t result;
boolean_t anywhere;
if (flags & ~VM_FLAGS_USER_ALLOCATE)
return KERN_INVALID_ARGUMENT;
if (map == VM_MAP_NULL)
return(KERN_INVALID_ARGUMENT);
if (size == 0) {
*addr = 0;
return(KERN_SUCCESS);
}
anywhere = ((VM_FLAGS_ANYWHERE & flags) != 0);
if (anywhere) {
map_addr = vm_map_min(map);
if (map_addr == 0)
map_addr += PAGE_SIZE;
} else
map_addr = vm_map_trunc_page(*addr);
map_size = vm_map_round_page(size);
if (map_size == 0) {
return(KERN_INVALID_ARGUMENT);
}
result = vm_map_enter(
map,
&map_addr,
map_size,
(vm_map_offset_t)0,
flags,
VM_OBJECT_NULL,
(vm_object_offset_t)0,
FALSE,
VM_PROT_DEFAULT,
VM_PROT_ALL,
VM_INHERIT_DEFAULT);
*addr = map_addr;
return(result);
}
kern_return_t
vm_allocate(
vm_map_t map,
vm_offset_t *addr,
vm_size_t size,
int flags)
{
vm_map_offset_t map_addr;
vm_map_size_t map_size;
kern_return_t result;
boolean_t anywhere;
if (flags & ~VM_FLAGS_USER_ALLOCATE)
return KERN_INVALID_ARGUMENT;
if (map == VM_MAP_NULL)
return(KERN_INVALID_ARGUMENT);
if (size == 0) {
*addr = 0;
return(KERN_SUCCESS);
}
anywhere = ((VM_FLAGS_ANYWHERE & flags) != 0);
if (anywhere) {
map_addr = vm_map_min(map);
if (map_addr == 0)
map_addr += PAGE_SIZE;
} else
map_addr = vm_map_trunc_page(*addr);
map_size = vm_map_round_page(size);
if (map_size == 0) {
return(KERN_INVALID_ARGUMENT);
}
result = vm_map_enter(
map,
&map_addr,
map_size,
(vm_map_offset_t)0,
flags,
VM_OBJECT_NULL,
(vm_object_offset_t)0,
FALSE,
VM_PROT_DEFAULT,
VM_PROT_ALL,
VM_INHERIT_DEFAULT);
*addr = CAST_DOWN(vm_offset_t, map_addr);
return(result);
}
kern_return_t
mach_vm_deallocate(
vm_map_t map,
mach_vm_offset_t start,
mach_vm_size_t size)
{
if ((map == VM_MAP_NULL) || (start + size < start))
return(KERN_INVALID_ARGUMENT);
if (size == (mach_vm_offset_t) 0)
return(KERN_SUCCESS);
return(vm_map_remove(map, vm_map_trunc_page(start),
vm_map_round_page(start+size), VM_MAP_NO_FLAGS));
}
kern_return_t
vm_deallocate(
register vm_map_t map,
vm_offset_t start,
vm_size_t size)
{
if ((map == VM_MAP_NULL) || (start + size < start))
return(KERN_INVALID_ARGUMENT);
if (size == (vm_offset_t) 0)
return(KERN_SUCCESS);
return(vm_map_remove(map, vm_map_trunc_page(start),
vm_map_round_page(start+size), VM_MAP_NO_FLAGS));
}
kern_return_t
mach_vm_inherit(
vm_map_t map,
mach_vm_offset_t start,
mach_vm_size_t size,
vm_inherit_t new_inheritance)
{
if ((map == VM_MAP_NULL) || (start + size < start) ||
(new_inheritance > VM_INHERIT_LAST_VALID))
return(KERN_INVALID_ARGUMENT);
if (size == 0)
return KERN_SUCCESS;
return(vm_map_inherit(map,
vm_map_trunc_page(start),
vm_map_round_page(start+size),
new_inheritance));
}
kern_return_t
vm_inherit(
register vm_map_t map,
vm_offset_t start,
vm_size_t size,
vm_inherit_t new_inheritance)
{
if ((map == VM_MAP_NULL) || (start + size < start) ||
(new_inheritance > VM_INHERIT_LAST_VALID))
return(KERN_INVALID_ARGUMENT);
if (size == 0)
return KERN_SUCCESS;
return(vm_map_inherit(map,
vm_map_trunc_page(start),
vm_map_round_page(start+size),
new_inheritance));
}
kern_return_t
mach_vm_protect(
vm_map_t map,
mach_vm_offset_t start,
mach_vm_size_t size,
boolean_t set_maximum,
vm_prot_t new_protection)
{
if ((map == VM_MAP_NULL) || (start + size < start) ||
(new_protection & ~(VM_PROT_ALL | VM_PROT_COPY)))
return(KERN_INVALID_ARGUMENT);
if (size == 0)
return KERN_SUCCESS;
return(vm_map_protect(map,
vm_map_trunc_page(start),
vm_map_round_page(start+size),
new_protection,
set_maximum));
}
kern_return_t
vm_protect(
vm_map_t map,
vm_offset_t start,
vm_size_t size,
boolean_t set_maximum,
vm_prot_t new_protection)
{
if ((map == VM_MAP_NULL) || (start + size < start) ||
(new_protection & ~(VM_PROT_ALL | VM_PROT_COPY)))
return(KERN_INVALID_ARGUMENT);
if (size == 0)
return KERN_SUCCESS;
return(vm_map_protect(map,
vm_map_trunc_page(start),
vm_map_round_page(start+size),
new_protection,
set_maximum));
}
kern_return_t
mach_vm_machine_attribute(
vm_map_t map,
mach_vm_address_t addr,
mach_vm_size_t size,
vm_machine_attribute_t attribute,
vm_machine_attribute_val_t* value)
{
if ((map == VM_MAP_NULL) || (addr + size < addr))
return(KERN_INVALID_ARGUMENT);
if (size == 0)
return KERN_SUCCESS;
return vm_map_machine_attribute(map,
vm_map_trunc_page(addr),
vm_map_round_page(addr+size),
attribute,
value);
}
kern_return_t
vm_machine_attribute(
vm_map_t map,
vm_address_t addr,
vm_size_t size,
vm_machine_attribute_t attribute,
vm_machine_attribute_val_t* value)
{
if ((map == VM_MAP_NULL) || (addr + size < addr))
return(KERN_INVALID_ARGUMENT);
if (size == 0)
return KERN_SUCCESS;
return vm_map_machine_attribute(map,
vm_map_trunc_page(addr),
vm_map_round_page(addr+size),
attribute,
value);
}
kern_return_t
mach_vm_read(
vm_map_t map,
mach_vm_address_t addr,
mach_vm_size_t size,
pointer_t *data,
mach_msg_type_number_t *data_size)
{
kern_return_t error;
vm_map_copy_t ipc_address;
if (map == VM_MAP_NULL)
return(KERN_INVALID_ARGUMENT);
if ((mach_msg_type_number_t) size != size)
return KERN_INVALID_ARGUMENT;
error = vm_map_copyin(map,
(vm_map_address_t)addr,
(vm_map_size_t)size,
FALSE,
&ipc_address);
if (KERN_SUCCESS == error) {
*data = (pointer_t) ipc_address;
*data_size = (mach_msg_type_number_t) size;
assert(*data_size == size);
}
return(error);
}
kern_return_t
vm_read(
vm_map_t map,
vm_address_t addr,
vm_size_t size,
pointer_t *data,
mach_msg_type_number_t *data_size)
{
kern_return_t error;
vm_map_copy_t ipc_address;
if (map == VM_MAP_NULL)
return(KERN_INVALID_ARGUMENT);
if (size > (unsigned)(mach_msg_type_number_t) -1) {
return KERN_INVALID_ARGUMENT;
}
error = vm_map_copyin(map,
(vm_map_address_t)addr,
(vm_map_size_t)size,
FALSE,
&ipc_address);
if (KERN_SUCCESS == error) {
*data = (pointer_t) ipc_address;
*data_size = (mach_msg_type_number_t) size;
assert(*data_size == size);
}
return(error);
}
kern_return_t
mach_vm_read_list(
vm_map_t map,
mach_vm_read_entry_t data_list,
natural_t count)
{
mach_msg_type_number_t i;
kern_return_t error;
vm_map_copy_t copy;
if (map == VM_MAP_NULL ||
count > VM_MAP_ENTRY_MAX)
return(KERN_INVALID_ARGUMENT);
error = KERN_SUCCESS;
for(i=0; i<count; i++) {
vm_map_address_t map_addr;
vm_map_size_t map_size;
map_addr = (vm_map_address_t)(data_list[i].address);
map_size = (vm_map_size_t)(data_list[i].size);
if(map_size != 0) {
error = vm_map_copyin(map,
map_addr,
map_size,
FALSE,
©);
if (KERN_SUCCESS == error) {
error = vm_map_copyout(
current_task()->map,
&map_addr,
copy);
if (KERN_SUCCESS == error) {
data_list[i].address = map_addr;
continue;
}
vm_map_copy_discard(copy);
}
}
data_list[i].address = (mach_vm_address_t)0;
data_list[i].size = (mach_vm_size_t)0;
}
return(error);
}
kern_return_t
vm_read_list(
vm_map_t map,
vm_read_entry_t data_list,
natural_t count)
{
mach_msg_type_number_t i;
kern_return_t error;
vm_map_copy_t copy;
if (map == VM_MAP_NULL ||
count > VM_MAP_ENTRY_MAX)
return(KERN_INVALID_ARGUMENT);
error = KERN_SUCCESS;
for(i=0; i<count; i++) {
vm_map_address_t map_addr;
vm_map_size_t map_size;
map_addr = (vm_map_address_t)(data_list[i].address);
map_size = (vm_map_size_t)(data_list[i].size);
if(map_size != 0) {
error = vm_map_copyin(map,
map_addr,
map_size,
FALSE,
©);
if (KERN_SUCCESS == error) {
error = vm_map_copyout(current_task()->map,
&map_addr,
copy);
if (KERN_SUCCESS == error) {
data_list[i].address =
CAST_DOWN(vm_offset_t, map_addr);
continue;
}
vm_map_copy_discard(copy);
}
}
data_list[i].address = (mach_vm_address_t)0;
data_list[i].size = (mach_vm_size_t)0;
}
return(error);
}
kern_return_t
mach_vm_read_overwrite(
vm_map_t map,
mach_vm_address_t address,
mach_vm_size_t size,
mach_vm_address_t data,
mach_vm_size_t *data_size)
{
kern_return_t error;
vm_map_copy_t copy;
if (map == VM_MAP_NULL)
return(KERN_INVALID_ARGUMENT);
error = vm_map_copyin(map, (vm_map_address_t)address,
(vm_map_size_t)size, FALSE, ©);
if (KERN_SUCCESS == error) {
error = vm_map_copy_overwrite(current_thread()->map,
(vm_map_address_t)data,
copy, FALSE);
if (KERN_SUCCESS == error) {
*data_size = size;
return error;
}
vm_map_copy_discard(copy);
}
return(error);
}
kern_return_t
vm_read_overwrite(
vm_map_t map,
vm_address_t address,
vm_size_t size,
vm_address_t data,
vm_size_t *data_size)
{
kern_return_t error;
vm_map_copy_t copy;
if (map == VM_MAP_NULL)
return(KERN_INVALID_ARGUMENT);
error = vm_map_copyin(map, (vm_map_address_t)address,
(vm_map_size_t)size, FALSE, ©);
if (KERN_SUCCESS == error) {
error = vm_map_copy_overwrite(current_thread()->map,
(vm_map_address_t)data,
copy, FALSE);
if (KERN_SUCCESS == error) {
*data_size = size;
return error;
}
vm_map_copy_discard(copy);
}
return(error);
}
kern_return_t
mach_vm_write(
vm_map_t map,
mach_vm_address_t address,
pointer_t data,
__unused mach_msg_type_number_t size)
{
if (map == VM_MAP_NULL)
return KERN_INVALID_ARGUMENT;
return vm_map_copy_overwrite(map, (vm_map_address_t)address,
(vm_map_copy_t) data, FALSE );
}
kern_return_t
vm_write(
vm_map_t map,
vm_address_t address,
pointer_t data,
__unused mach_msg_type_number_t size)
{
if (map == VM_MAP_NULL)
return KERN_INVALID_ARGUMENT;
return vm_map_copy_overwrite(map, (vm_map_address_t)address,
(vm_map_copy_t) data, FALSE );
}
kern_return_t
mach_vm_copy(
vm_map_t map,
mach_vm_address_t source_address,
mach_vm_size_t size,
mach_vm_address_t dest_address)
{
vm_map_copy_t copy;
kern_return_t kr;
if (map == VM_MAP_NULL)
return KERN_INVALID_ARGUMENT;
kr = vm_map_copyin(map, (vm_map_address_t)source_address,
(vm_map_size_t)size, FALSE, ©);
if (KERN_SUCCESS == kr) {
kr = vm_map_copy_overwrite(map,
(vm_map_address_t)dest_address,
copy, FALSE );
if (KERN_SUCCESS != kr)
vm_map_copy_discard(copy);
}
return kr;
}
kern_return_t
vm_copy(
vm_map_t map,
vm_address_t source_address,
vm_size_t size,
vm_address_t dest_address)
{
vm_map_copy_t copy;
kern_return_t kr;
if (map == VM_MAP_NULL)
return KERN_INVALID_ARGUMENT;
kr = vm_map_copyin(map, (vm_map_address_t)source_address,
(vm_map_size_t)size, FALSE, ©);
if (KERN_SUCCESS == kr) {
kr = vm_map_copy_overwrite(map,
(vm_map_address_t)dest_address,
copy, FALSE );
if (KERN_SUCCESS != kr)
vm_map_copy_discard(copy);
}
return kr;
}
kern_return_t
mach_vm_map(
vm_map_t target_map,
mach_vm_offset_t *address,
mach_vm_size_t initial_size,
mach_vm_offset_t mask,
int flags,
ipc_port_t port,
vm_object_offset_t offset,
boolean_t copy,
vm_prot_t cur_protection,
vm_prot_t max_protection,
vm_inherit_t inheritance)
{
if (flags & ~VM_FLAGS_USER_MAP)
return KERN_INVALID_ARGUMENT;
return vm_map_enter_mem_object(target_map,
address,
initial_size,
mask,
flags,
port,
offset,
copy,
cur_protection,
max_protection,
inheritance);
}
kern_return_t
vm_map_64(
vm_map_t target_map,
vm_offset_t *address,
vm_size_t size,
vm_offset_t mask,
int flags,
ipc_port_t port,
vm_object_offset_t offset,
boolean_t copy,
vm_prot_t cur_protection,
vm_prot_t max_protection,
vm_inherit_t inheritance)
{
mach_vm_address_t map_addr;
mach_vm_size_t map_size;
mach_vm_offset_t map_mask;
kern_return_t kr;
map_addr = (mach_vm_address_t)*address;
map_size = (mach_vm_size_t)size;
map_mask = (mach_vm_offset_t)mask;
kr = mach_vm_map(target_map, &map_addr, map_size, map_mask, flags,
port, offset, copy,
cur_protection, max_protection, inheritance);
*address = CAST_DOWN(vm_offset_t, map_addr);
return kr;
}
kern_return_t
vm_map(
vm_map_t target_map,
vm_offset_t *address,
vm_size_t size,
vm_offset_t mask,
int flags,
ipc_port_t port,
vm_offset_t offset,
boolean_t copy,
vm_prot_t cur_protection,
vm_prot_t max_protection,
vm_inherit_t inheritance)
{
mach_vm_address_t map_addr;
mach_vm_size_t map_size;
mach_vm_offset_t map_mask;
vm_object_offset_t obj_offset;
kern_return_t kr;
map_addr = (mach_vm_address_t)*address;
map_size = (mach_vm_size_t)size;
map_mask = (mach_vm_offset_t)mask;
obj_offset = (vm_object_offset_t)offset;
kr = mach_vm_map(target_map, &map_addr, map_size, map_mask, flags,
port, obj_offset, copy,
cur_protection, max_protection, inheritance);
*address = CAST_DOWN(vm_offset_t, map_addr);
return kr;
}
kern_return_t
mach_vm_remap(
vm_map_t target_map,
mach_vm_offset_t *address,
mach_vm_size_t size,
mach_vm_offset_t mask,
int flags,
vm_map_t src_map,
mach_vm_offset_t memory_address,
boolean_t copy,
vm_prot_t *cur_protection,
vm_prot_t *max_protection,
vm_inherit_t inheritance)
{
vm_map_offset_t map_addr;
kern_return_t kr;
if (VM_MAP_NULL == target_map || VM_MAP_NULL == src_map)
return KERN_INVALID_ARGUMENT;
if (flags & ~VM_FLAGS_USER_REMAP)
return KERN_INVALID_ARGUMENT;
map_addr = (vm_map_offset_t)*address;
kr = vm_map_remap(target_map,
&map_addr,
size,
mask,
flags,
src_map,
memory_address,
copy,
cur_protection,
max_protection,
inheritance);
*address = map_addr;
return kr;
}
kern_return_t
vm_remap(
vm_map_t target_map,
vm_offset_t *address,
vm_size_t size,
vm_offset_t mask,
int flags,
vm_map_t src_map,
vm_offset_t memory_address,
boolean_t copy,
vm_prot_t *cur_protection,
vm_prot_t *max_protection,
vm_inherit_t inheritance)
{
vm_map_offset_t map_addr;
kern_return_t kr;
if (VM_MAP_NULL == target_map || VM_MAP_NULL == src_map)
return KERN_INVALID_ARGUMENT;
if (flags & ~VM_FLAGS_USER_REMAP)
return KERN_INVALID_ARGUMENT;
map_addr = (vm_map_offset_t)*address;
kr = vm_map_remap(target_map,
&map_addr,
size,
mask,
flags,
src_map,
memory_address,
copy,
cur_protection,
max_protection,
inheritance);
*address = CAST_DOWN(vm_offset_t, map_addr);
return kr;
}
#include <mach/mach_host_server.h>
kern_return_t
mach_vm_wire(
host_priv_t host_priv,
vm_map_t map,
mach_vm_offset_t start,
mach_vm_size_t size,
vm_prot_t access)
{
kern_return_t rc;
if (host_priv == HOST_PRIV_NULL)
return KERN_INVALID_HOST;
assert(host_priv == &realhost);
if (map == VM_MAP_NULL)
return KERN_INVALID_TASK;
if (access & ~VM_PROT_ALL || (start + size < start))
return KERN_INVALID_ARGUMENT;
if (access != VM_PROT_NONE) {
rc = vm_map_wire(map, vm_map_trunc_page(start),
vm_map_round_page(start+size), access, TRUE);
} else {
rc = vm_map_unwire(map, vm_map_trunc_page(start),
vm_map_round_page(start+size), TRUE);
}
return rc;
}
kern_return_t
vm_wire(
host_priv_t host_priv,
register vm_map_t map,
vm_offset_t start,
vm_size_t size,
vm_prot_t access)
{
kern_return_t rc;
if (host_priv == HOST_PRIV_NULL)
return KERN_INVALID_HOST;
assert(host_priv == &realhost);
if (map == VM_MAP_NULL)
return KERN_INVALID_TASK;
if ((access & ~VM_PROT_ALL) || (start + size < start))
return KERN_INVALID_ARGUMENT;
if (size == 0) {
rc = KERN_SUCCESS;
} else if (access != VM_PROT_NONE) {
rc = vm_map_wire(map, vm_map_trunc_page(start),
vm_map_round_page(start+size), access, TRUE);
} else {
rc = vm_map_unwire(map, vm_map_trunc_page(start),
vm_map_round_page(start+size), TRUE);
}
return rc;
}
kern_return_t
mach_vm_msync(
vm_map_t map,
mach_vm_address_t address,
mach_vm_size_t size,
vm_sync_t sync_flags)
{
if (map == VM_MAP_NULL)
return(KERN_INVALID_TASK);
return vm_map_msync(map, (vm_map_address_t)address,
(vm_map_size_t)size, sync_flags);
}
kern_return_t
vm_msync(
vm_map_t map,
vm_address_t address,
vm_size_t size,
vm_sync_t sync_flags)
{
if (map == VM_MAP_NULL)
return(KERN_INVALID_TASK);
return vm_map_msync(map, (vm_map_address_t)address,
(vm_map_size_t)size, sync_flags);
}
int
vm_toggle_entry_reuse(int toggle, int *old_value)
{
vm_map_t map = current_map();
if(toggle == VM_TOGGLE_GETVALUE && old_value != NULL){
*old_value = map->disable_vmentry_reuse;
} else if(toggle == VM_TOGGLE_SET){
vm_map_lock(map);
map->disable_vmentry_reuse = TRUE;
if (map->first_free == vm_map_to_entry(map)) {
map->highest_entry_end = vm_map_min(map);
} else {
map->highest_entry_end = map->first_free->vme_end;
}
vm_map_unlock(map);
} else if (toggle == VM_TOGGLE_CLEAR){
vm_map_lock(map);
map->disable_vmentry_reuse = FALSE;
vm_map_unlock(map);
} else
return KERN_INVALID_ARGUMENT;
return KERN_SUCCESS;
}
kern_return_t
mach_vm_behavior_set(
vm_map_t map,
mach_vm_offset_t start,
mach_vm_size_t size,
vm_behavior_t new_behavior)
{
if ((map == VM_MAP_NULL) || (start + size < start))
return(KERN_INVALID_ARGUMENT);
if (size == 0)
return KERN_SUCCESS;
return(vm_map_behavior_set(map, vm_map_trunc_page(start),
vm_map_round_page(start+size), new_behavior));
}
kern_return_t
vm_behavior_set(
vm_map_t map,
vm_offset_t start,
vm_size_t size,
vm_behavior_t new_behavior)
{
if ((map == VM_MAP_NULL) || (start + size < start))
return(KERN_INVALID_ARGUMENT);
if (size == 0)
return KERN_SUCCESS;
return(vm_map_behavior_set(map, vm_map_trunc_page(start),
vm_map_round_page(start+size), new_behavior));
}
kern_return_t
mach_vm_region(
vm_map_t map,
mach_vm_offset_t *address,
mach_vm_size_t *size,
vm_region_flavor_t flavor,
vm_region_info_t info,
mach_msg_type_number_t *count,
mach_port_t *object_name)
{
vm_map_offset_t map_addr;
vm_map_size_t map_size;
kern_return_t kr;
if (VM_MAP_NULL == map)
return KERN_INVALID_ARGUMENT;
map_addr = (vm_map_offset_t)*address;
map_size = (vm_map_size_t)*size;
if (VM_REGION_BASIC_INFO == flavor)
flavor = VM_REGION_BASIC_INFO_64;
kr = vm_map_region(map,
&map_addr, &map_size,
flavor, info, count,
object_name);
*address = map_addr;
*size = map_size;
return kr;
}
kern_return_t
vm_region_64(
vm_map_t map,
vm_offset_t *address,
vm_size_t *size,
vm_region_flavor_t flavor,
vm_region_info_t info,
mach_msg_type_number_t *count,
mach_port_t *object_name)
{
vm_map_offset_t map_addr;
vm_map_size_t map_size;
kern_return_t kr;
if (VM_MAP_NULL == map)
return KERN_INVALID_ARGUMENT;
map_addr = (vm_map_offset_t)*address;
map_size = (vm_map_size_t)*size;
if (VM_REGION_BASIC_INFO == flavor)
flavor = VM_REGION_BASIC_INFO_64;
kr = vm_map_region(map,
&map_addr, &map_size,
flavor, info, count,
object_name);
*address = CAST_DOWN(vm_offset_t, map_addr);
*size = CAST_DOWN(vm_size_t, map_size);
if (KERN_SUCCESS == kr && map_addr + map_size > VM_MAX_ADDRESS)
return KERN_INVALID_ADDRESS;
return kr;
}
kern_return_t
vm_region(
vm_map_t map,
vm_address_t *address,
vm_size_t *size,
vm_region_flavor_t flavor,
vm_region_info_t info,
mach_msg_type_number_t *count,
mach_port_t *object_name)
{
vm_map_address_t map_addr;
vm_map_size_t map_size;
kern_return_t kr;
if (VM_MAP_NULL == map)
return KERN_INVALID_ARGUMENT;
map_addr = (vm_map_address_t)*address;
map_size = (vm_map_size_t)*size;
kr = vm_map_region(map,
&map_addr, &map_size,
flavor, info, count,
object_name);
*address = CAST_DOWN(vm_address_t, map_addr);
*size = CAST_DOWN(vm_size_t, map_size);
if (KERN_SUCCESS == kr && map_addr + map_size > VM_MAX_ADDRESS)
return KERN_INVALID_ADDRESS;
return kr;
}
kern_return_t
mach_vm_region_recurse(
vm_map_t map,
mach_vm_address_t *address,
mach_vm_size_t *size,
uint32_t *depth,
vm_region_recurse_info_t info,
mach_msg_type_number_t *infoCnt)
{
vm_map_address_t map_addr;
vm_map_size_t map_size;
kern_return_t kr;
if (VM_MAP_NULL == map)
return KERN_INVALID_ARGUMENT;
map_addr = (vm_map_address_t)*address;
map_size = (vm_map_size_t)*size;
kr = vm_map_region_recurse_64(
map,
&map_addr,
&map_size,
depth,
(vm_region_submap_info_64_t)info,
infoCnt);
*address = map_addr;
*size = map_size;
return kr;
}
kern_return_t
vm_region_recurse_64(
vm_map_t map,
vm_address_t *address,
vm_size_t *size,
uint32_t *depth,
vm_region_recurse_info_64_t info,
mach_msg_type_number_t *infoCnt)
{
vm_map_address_t map_addr;
vm_map_size_t map_size;
kern_return_t kr;
if (VM_MAP_NULL == map)
return KERN_INVALID_ARGUMENT;
map_addr = (vm_map_address_t)*address;
map_size = (vm_map_size_t)*size;
kr = vm_map_region_recurse_64(
map,
&map_addr,
&map_size,
depth,
(vm_region_submap_info_64_t)info,
infoCnt);
*address = CAST_DOWN(vm_address_t, map_addr);
*size = CAST_DOWN(vm_size_t, map_size);
if (KERN_SUCCESS == kr && map_addr + map_size > VM_MAX_ADDRESS)
return KERN_INVALID_ADDRESS;
return kr;
}
kern_return_t
vm_region_recurse(
vm_map_t map,
vm_offset_t *address,
vm_size_t *size,
natural_t *depth,
vm_region_recurse_info_t info32,
mach_msg_type_number_t *infoCnt)
{
vm_region_submap_info_data_64_t info64;
vm_region_submap_info_t info;
vm_map_address_t map_addr;
vm_map_size_t map_size;
kern_return_t kr;
if (VM_MAP_NULL == map || *infoCnt < VM_REGION_SUBMAP_INFO_COUNT)
return KERN_INVALID_ARGUMENT;
map_addr = (vm_map_address_t)*address;
map_size = (vm_map_size_t)*size;
info = (vm_region_submap_info_t)info32;
*infoCnt = VM_REGION_SUBMAP_INFO_COUNT_64;
kr = vm_map_region_recurse_64(map, &map_addr,&map_size,
depth, &info64, infoCnt);
info->protection = info64.protection;
info->max_protection = info64.max_protection;
info->inheritance = info64.inheritance;
info->offset = (uint32_t)info64.offset;
info->user_tag = info64.user_tag;
info->pages_resident = info64.pages_resident;
info->pages_shared_now_private = info64.pages_shared_now_private;
info->pages_swapped_out = info64.pages_swapped_out;
info->pages_dirtied = info64.pages_dirtied;
info->ref_count = info64.ref_count;
info->shadow_depth = info64.shadow_depth;
info->external_pager = info64.external_pager;
info->share_mode = info64.share_mode;
info->is_submap = info64.is_submap;
info->behavior = info64.behavior;
info->object_id = info64.object_id;
info->user_wired_count = info64.user_wired_count;
*address = CAST_DOWN(vm_address_t, map_addr);
*size = CAST_DOWN(vm_size_t, map_size);
*infoCnt = VM_REGION_SUBMAP_INFO_COUNT;
if (KERN_SUCCESS == kr && map_addr + map_size > VM_MAX_ADDRESS)
return KERN_INVALID_ADDRESS;
return kr;
}
kern_return_t
mach_vm_purgable_control(
vm_map_t map,
mach_vm_offset_t address,
vm_purgable_t control,
int *state)
{
if (VM_MAP_NULL == map)
return KERN_INVALID_ARGUMENT;
return vm_map_purgable_control(map,
vm_map_trunc_page(address),
control,
state);
}
kern_return_t
vm_purgable_control(
vm_map_t map,
vm_offset_t address,
vm_purgable_t control,
int *state)
{
if (VM_MAP_NULL == map)
return KERN_INVALID_ARGUMENT;
return vm_map_purgable_control(map,
vm_map_trunc_page(address),
control,
state);
}
unsigned int vm_allocate_cpm_privileged = 0;
kern_return_t
vm_allocate_cpm(
host_priv_t host_priv,
vm_map_t map,
vm_address_t *addr,
vm_size_t size,
int flags)
{
vm_map_address_t map_addr;
vm_map_size_t map_size;
kern_return_t kr;
if (vm_allocate_cpm_privileged && HOST_PRIV_NULL == host_priv)
return KERN_INVALID_HOST;
if (VM_MAP_NULL == map)
return KERN_INVALID_ARGUMENT;
map_addr = (vm_map_address_t)*addr;
map_size = (vm_map_size_t)size;
kr = vm_map_enter_cpm(map,
&map_addr,
map_size,
flags);
*addr = CAST_DOWN(vm_address_t, map_addr);
return kr;
}
kern_return_t
mach_vm_page_query(
vm_map_t map,
mach_vm_offset_t offset,
int *disposition,
int *ref_count)
{
if (VM_MAP_NULL == map)
return KERN_INVALID_ARGUMENT;
return vm_map_page_query_internal(map,
vm_map_trunc_page(offset),
disposition, ref_count);
}
kern_return_t
vm_map_page_query(
vm_map_t map,
vm_offset_t offset,
int *disposition,
int *ref_count)
{
if (VM_MAP_NULL == map)
return KERN_INVALID_ARGUMENT;
return vm_map_page_query_internal(map,
vm_map_trunc_page(offset),
disposition, ref_count);
}
kern_return_t
mach_vm_page_info(
vm_map_t map,
mach_vm_address_t address,
vm_page_info_flavor_t flavor,
vm_page_info_t info,
mach_msg_type_number_t *count)
{
kern_return_t kr;
if (map == VM_MAP_NULL) {
return KERN_INVALID_ARGUMENT;
}
kr = vm_map_page_info(map, address, flavor, info, count);
return kr;
}
kern_return_t
vm_upl_map(
vm_map_t map,
upl_t upl,
vm_address_t *dst_addr)
{
vm_map_offset_t map_addr;
kern_return_t kr;
if (VM_MAP_NULL == map)
return KERN_INVALID_ARGUMENT;
kr = vm_map_enter_upl(map, upl, &map_addr);
*dst_addr = CAST_DOWN(vm_address_t, map_addr);
return kr;
}
kern_return_t
vm_upl_unmap(
vm_map_t map,
upl_t upl)
{
if (VM_MAP_NULL == map)
return KERN_INVALID_ARGUMENT;
return (vm_map_remove_upl(map, upl));
}
kern_return_t
vm_map_get_upl(
vm_map_t map,
vm_map_offset_t map_offset,
upl_size_t *upl_size,
upl_t *upl,
upl_page_info_array_t page_list,
unsigned int *count,
int *flags,
int force_data_sync)
{
int map_flags;
kern_return_t kr;
if (VM_MAP_NULL == map)
return KERN_INVALID_ARGUMENT;
map_flags = *flags & ~UPL_NOZEROFILL;
if (force_data_sync)
map_flags |= UPL_FORCE_DATA_SYNC;
kr = vm_map_create_upl(map,
map_offset,
upl_size,
upl,
page_list,
count,
&map_flags);
*flags = (map_flags & ~UPL_FORCE_DATA_SYNC);
return kr;
}
kern_return_t
mach_make_memory_entry_64(
vm_map_t target_map,
memory_object_size_t *size,
memory_object_offset_t offset,
vm_prot_t permission,
ipc_port_t *object_handle,
ipc_port_t parent_handle)
{
vm_map_version_t version;
vm_named_entry_t parent_entry;
vm_named_entry_t user_entry;
ipc_port_t user_handle;
kern_return_t kr;
vm_map_t real_map;
boolean_t wired;
vm_object_offset_t obj_off;
vm_prot_t prot;
struct vm_object_fault_info fault_info;
vm_object_t object;
vm_object_t shadow_object;
vm_map_entry_t map_entry;
vm_map_entry_t next_entry;
vm_map_t local_map;
vm_map_t original_map = target_map;
vm_map_size_t total_size;
vm_map_size_t map_size;
vm_map_offset_t map_offset;
vm_map_offset_t local_offset;
vm_object_size_t mappable_size;
unsigned int access;
vm_prot_t protections;
vm_prot_t original_protections, mask_protections;
unsigned int wimg_mode;
if (((permission & 0x00FF0000) &
~(MAP_MEM_ONLY |
MAP_MEM_NAMED_CREATE |
MAP_MEM_PURGABLE |
MAP_MEM_NAMED_REUSE))) {
return KERN_INVALID_VALUE;
}
if (parent_handle != IP_NULL &&
ip_kotype(parent_handle) == IKOT_NAMED_ENTRY) {
parent_entry = (vm_named_entry_t) parent_handle->ip_kobject;
} else {
parent_entry = NULL;
}
original_protections = permission & VM_PROT_ALL;
protections = original_protections;
mask_protections = permission & VM_PROT_IS_MASK;
access = GET_MAP_MEM(permission);
user_handle = IP_NULL;
user_entry = NULL;
map_offset = vm_map_trunc_page(offset);
map_size = vm_map_round_page(*size);
if (permission & MAP_MEM_ONLY) {
boolean_t parent_is_object;
if (parent_entry == NULL) {
return KERN_INVALID_ARGUMENT;
}
parent_is_object = !(parent_entry->is_sub_map || parent_entry->is_pager);
object = parent_entry->backing.object;
if(parent_is_object && object != VM_OBJECT_NULL)
wimg_mode = object->wimg_bits;
else
wimg_mode = VM_WIMG_USE_DEFAULT;
if((access != GET_MAP_MEM(parent_entry->protection)) &&
!(parent_entry->protection & VM_PROT_WRITE)) {
return KERN_INVALID_RIGHT;
}
if(access == MAP_MEM_IO) {
SET_MAP_MEM(access, parent_entry->protection);
wimg_mode = VM_WIMG_IO;
} else if (access == MAP_MEM_COPYBACK) {
SET_MAP_MEM(access, parent_entry->protection);
wimg_mode = VM_WIMG_USE_DEFAULT;
} else if (access == MAP_MEM_WTHRU) {
SET_MAP_MEM(access, parent_entry->protection);
wimg_mode = VM_WIMG_WTHRU;
} else if (access == MAP_MEM_WCOMB) {
SET_MAP_MEM(access, parent_entry->protection);
wimg_mode = VM_WIMG_WCOMB;
}
if (parent_is_object && object &&
(access != MAP_MEM_NOOP) &&
(!(object->nophyscache))) {
if (object->wimg_bits != wimg_mode) {
vm_object_lock(object);
vm_object_change_wimg_mode(object, wimg_mode);
vm_object_unlock(object);
}
}
if (object_handle)
*object_handle = IP_NULL;
return KERN_SUCCESS;
}
if(permission & MAP_MEM_NAMED_CREATE) {
kr = mach_memory_entry_allocate(&user_entry, &user_handle);
if (kr != KERN_SUCCESS) {
return KERN_FAILURE;
}
if (map_size > (vm_map_size_t) ANON_MAX_SIZE) {
kr = KERN_FAILURE;
goto make_mem_done;
}
object = vm_object_allocate(map_size);
assert(object != VM_OBJECT_NULL);
if (permission & MAP_MEM_PURGABLE) {
if (! (permission & VM_PROT_WRITE)) {
vm_object_deallocate(object);
kr = KERN_INVALID_ARGUMENT;
goto make_mem_done;
}
object->purgable = VM_PURGABLE_NONVOLATILE;
}
wimg_mode = object->wimg_bits;
if (access == MAP_MEM_IO) {
wimg_mode = VM_WIMG_IO;
} else if (access == MAP_MEM_COPYBACK) {
wimg_mode = VM_WIMG_USE_DEFAULT;
} else if (access == MAP_MEM_WTHRU) {
wimg_mode = VM_WIMG_WTHRU;
} else if (access == MAP_MEM_WCOMB) {
wimg_mode = VM_WIMG_WCOMB;
}
if (access != MAP_MEM_NOOP) {
object->wimg_bits = wimg_mode;
}
object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
user_entry->backing.object = object;
user_entry->internal = TRUE;
user_entry->is_sub_map = FALSE;
user_entry->is_pager = FALSE;
user_entry->offset = 0;
user_entry->protection = protections;
SET_MAP_MEM(access, user_entry->protection);
user_entry->size = map_size;
*size = CAST_DOWN(vm_size_t, map_size);
*object_handle = user_handle;
return KERN_SUCCESS;
}
if (parent_entry == NULL ||
(permission & MAP_MEM_NAMED_REUSE)) {
if (target_map == VM_MAP_NULL) {
return KERN_INVALID_TASK;
}
redo_lookup:
protections = original_protections;
vm_map_lock_read(target_map);
kr = vm_map_lookup_locked(&target_map, map_offset,
protections | mask_protections,
OBJECT_LOCK_EXCLUSIVE, &version,
&object, &obj_off, &prot, &wired,
&fault_info,
&real_map);
if (kr != KERN_SUCCESS) {
vm_map_unlock_read(target_map);
goto make_mem_done;
}
if (mask_protections) {
protections &= prot;
}
if (((prot & protections) != protections)
|| (object == kernel_object)) {
kr = KERN_INVALID_RIGHT;
vm_object_unlock(object);
vm_map_unlock_read(target_map);
if(real_map != target_map)
vm_map_unlock_read(real_map);
if(object == kernel_object) {
printf("Warning: Attempt to create a named"
" entry from the kernel_object\n");
}
goto make_mem_done;
}
vm_object_reference_locked(object);
vm_object_unlock(object);
local_map = original_map;
local_offset = map_offset;
if(target_map != local_map) {
vm_map_unlock_read(target_map);
if(real_map != target_map)
vm_map_unlock_read(real_map);
vm_map_lock_read(local_map);
target_map = local_map;
real_map = local_map;
}
while(TRUE) {
if(!vm_map_lookup_entry(local_map,
local_offset, &map_entry)) {
kr = KERN_INVALID_ARGUMENT;
vm_map_unlock_read(target_map);
if(real_map != target_map)
vm_map_unlock_read(real_map);
vm_object_deallocate(object);
object = VM_OBJECT_NULL;
goto make_mem_done;
}
if(!(map_entry->is_sub_map)) {
if(map_entry->object.vm_object != object) {
kr = KERN_INVALID_ARGUMENT;
vm_map_unlock_read(target_map);
if(real_map != target_map)
vm_map_unlock_read(real_map);
vm_object_deallocate(object);
object = VM_OBJECT_NULL;
goto make_mem_done;
}
break;
} else {
vm_map_t tmap;
tmap = local_map;
local_map = map_entry->object.sub_map;
vm_map_lock_read(local_map);
vm_map_unlock_read(tmap);
target_map = local_map;
real_map = local_map;
local_offset = local_offset - map_entry->vme_start;
local_offset += map_entry->offset;
}
}
vm_object_lock(object);
if(map_entry->wired_count) {
object->true_share = TRUE;
}
if (mask_protections) {
protections &= map_entry->max_protection;
}
if(((map_entry->max_protection) & protections) != protections) {
kr = KERN_INVALID_RIGHT;
vm_object_unlock(object);
vm_map_unlock_read(target_map);
if(real_map != target_map)
vm_map_unlock_read(real_map);
vm_object_deallocate(object);
object = VM_OBJECT_NULL;
goto make_mem_done;
}
mappable_size = fault_info.hi_offset - obj_off;
total_size = map_entry->vme_end - map_entry->vme_start;
if(map_size > mappable_size) {
next_entry = map_entry->vme_next;
while(map_size > mappable_size) {
if((next_entry->object.vm_object == object) &&
(next_entry->vme_start ==
next_entry->vme_prev->vme_end) &&
(next_entry->offset ==
next_entry->vme_prev->offset +
(next_entry->vme_prev->vme_end -
next_entry->vme_prev->vme_start))) {
if (mask_protections) {
protections &= next_entry->max_protection;
}
if(((next_entry->max_protection)
& protections) != protections) {
break;
}
if (next_entry->needs_copy !=
map_entry->needs_copy)
break;
mappable_size += next_entry->vme_end
- next_entry->vme_start;
total_size += next_entry->vme_end
- next_entry->vme_start;
next_entry = next_entry->vme_next;
} else {
break;
}
}
}
if(object->internal) {
if ((map_entry->needs_copy || object->shadowed ||
(object->vo_size > total_size))
&& !object->true_share) {
vm_object_unlock(object);
if (vm_map_lock_read_to_write(target_map)) {
vm_object_deallocate(object);
target_map = original_map;
goto redo_lookup;
}
vm_object_lock(object);
vm_object_shadow(&map_entry->object.vm_object,
&map_entry->offset, total_size);
shadow_object = map_entry->object.vm_object;
vm_object_unlock(object);
prot = map_entry->protection & ~VM_PROT_WRITE;
if (override_nx(target_map, map_entry->alias) && prot)
prot |= VM_PROT_EXECUTE;
vm_object_pmap_protect(
object, map_entry->offset,
total_size,
((map_entry->is_shared
|| target_map->mapped)
? PMAP_NULL :
target_map->pmap),
map_entry->vme_start,
prot);
total_size -= (map_entry->vme_end
- map_entry->vme_start);
next_entry = map_entry->vme_next;
map_entry->needs_copy = FALSE;
vm_object_lock(shadow_object);
while (total_size) {
if(next_entry->object.vm_object == object) {
vm_object_reference_locked(shadow_object);
next_entry->object.vm_object
= shadow_object;
vm_object_deallocate(object);
next_entry->offset
= next_entry->vme_prev->offset +
(next_entry->vme_prev->vme_end
- next_entry->vme_prev->vme_start);
next_entry->needs_copy = FALSE;
} else {
panic("mach_make_memory_entry_64:"
" map entries out of sync\n");
}
total_size -=
next_entry->vme_end
- next_entry->vme_start;
next_entry = next_entry->vme_next;
}
vm_object_reference_locked(shadow_object);
vm_object_deallocate(object);
object = shadow_object;
obj_off = (local_offset - map_entry->vme_start)
+ map_entry->offset;
vm_map_lock_write_to_read(target_map);
}
}
wimg_mode = object->wimg_bits;
if(!(object->nophyscache)) {
if(access == MAP_MEM_IO) {
wimg_mode = VM_WIMG_IO;
} else if (access == MAP_MEM_COPYBACK) {
wimg_mode = VM_WIMG_USE_DEFAULT;
} else if (access == MAP_MEM_WTHRU) {
wimg_mode = VM_WIMG_WTHRU;
} else if (access == MAP_MEM_WCOMB) {
wimg_mode = VM_WIMG_WCOMB;
}
}
object->true_share = TRUE;
if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC)
object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
vm_map_unlock_read(target_map);
if(real_map != target_map)
vm_map_unlock_read(real_map);
if (object->wimg_bits != wimg_mode)
vm_object_change_wimg_mode(object, wimg_mode);
if(map_size > mappable_size)
map_size = mappable_size;
if (permission & MAP_MEM_NAMED_REUSE) {
if (parent_entry != NULL &&
parent_entry->backing.object == object &&
parent_entry->internal == object->internal &&
parent_entry->is_sub_map == FALSE &&
parent_entry->is_pager == FALSE &&
parent_entry->offset == obj_off &&
parent_entry->protection == protections &&
parent_entry->size == map_size) {
vm_object_unlock(object);
vm_object_deallocate(object);
ipc_port_copy_send(parent_handle);
*object_handle = parent_handle;
return KERN_SUCCESS;
} else {
}
}
vm_object_unlock(object);
if (mach_memory_entry_allocate(&user_entry, &user_handle)
!= KERN_SUCCESS) {
vm_object_deallocate(object);
return KERN_FAILURE;
}
user_entry->backing.object = object;
user_entry->internal = object->internal;
user_entry->is_sub_map = FALSE;
user_entry->is_pager = FALSE;
user_entry->offset = obj_off;
user_entry->protection = protections;
SET_MAP_MEM(GET_MAP_MEM(permission), user_entry->protection);
user_entry->size = map_size;
*size = CAST_DOWN(vm_size_t, map_size);
*object_handle = user_handle;
return KERN_SUCCESS;
} else {
if (parent_entry == NULL) {
kr = KERN_INVALID_ARGUMENT;
goto make_mem_done;
}
if((offset + map_size) > parent_entry->size) {
kr = KERN_INVALID_ARGUMENT;
goto make_mem_done;
}
if (mask_protections) {
protections &= parent_entry->protection;
}
if((protections & parent_entry->protection) != protections) {
kr = KERN_PROTECTION_FAILURE;
goto make_mem_done;
}
if (mach_memory_entry_allocate(&user_entry, &user_handle)
!= KERN_SUCCESS) {
kr = KERN_FAILURE;
goto make_mem_done;
}
user_entry->size = map_size;
user_entry->offset = parent_entry->offset + map_offset;
user_entry->is_sub_map = parent_entry->is_sub_map;
user_entry->is_pager = parent_entry->is_pager;
user_entry->internal = parent_entry->internal;
user_entry->protection = protections;
if(access != MAP_MEM_NOOP) {
SET_MAP_MEM(access, user_entry->protection);
}
if(parent_entry->is_sub_map) {
user_entry->backing.map = parent_entry->backing.map;
vm_map_lock(user_entry->backing.map);
user_entry->backing.map->ref_count++;
vm_map_unlock(user_entry->backing.map);
}
else if (parent_entry->is_pager) {
user_entry->backing.pager = parent_entry->backing.pager;
} else {
object = parent_entry->backing.object;
assert(object != VM_OBJECT_NULL);
user_entry->backing.object = object;
vm_object_reference(object);
vm_object_lock(object);
object->true_share = TRUE;
if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC)
object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
vm_object_unlock(object);
}
*size = CAST_DOWN(vm_size_t, map_size);
*object_handle = user_handle;
return KERN_SUCCESS;
}
make_mem_done:
if (user_handle != IP_NULL) {
mach_memory_entry_port_release(user_handle);
}
return kr;
}
kern_return_t
_mach_make_memory_entry(
vm_map_t target_map,
memory_object_size_t *size,
memory_object_offset_t offset,
vm_prot_t permission,
ipc_port_t *object_handle,
ipc_port_t parent_entry)
{
memory_object_size_t mo_size;
kern_return_t kr;
mo_size = (memory_object_size_t)*size;
kr = mach_make_memory_entry_64(target_map, &mo_size,
(memory_object_offset_t)offset, permission, object_handle,
parent_entry);
*size = mo_size;
return kr;
}
kern_return_t
mach_make_memory_entry(
vm_map_t target_map,
vm_size_t *size,
vm_offset_t offset,
vm_prot_t permission,
ipc_port_t *object_handle,
ipc_port_t parent_entry)
{
memory_object_size_t mo_size;
kern_return_t kr;
mo_size = (memory_object_size_t)*size;
kr = mach_make_memory_entry_64(target_map, &mo_size,
(memory_object_offset_t)offset, permission, object_handle,
parent_entry);
*size = CAST_DOWN(vm_size_t, mo_size);
return kr;
}
kern_return_t
task_wire(
vm_map_t map,
boolean_t must_wire)
{
if (map == VM_MAP_NULL)
return(KERN_INVALID_ARGUMENT);
if (must_wire)
map->wiring_required = TRUE;
else
map->wiring_required = FALSE;
return(KERN_SUCCESS);
}
__private_extern__ kern_return_t
mach_memory_entry_allocate(
vm_named_entry_t *user_entry_p,
ipc_port_t *user_handle_p)
{
vm_named_entry_t user_entry;
ipc_port_t user_handle;
ipc_port_t previous;
user_entry = (vm_named_entry_t) kalloc(sizeof *user_entry);
if (user_entry == NULL)
return KERN_FAILURE;
named_entry_lock_init(user_entry);
user_handle = ipc_port_alloc_kernel();
if (user_handle == IP_NULL) {
kfree(user_entry, sizeof *user_entry);
return KERN_FAILURE;
}
ip_lock(user_handle);
user_handle->ip_sorights++;
ip_reference(user_handle);
user_handle->ip_destination = IP_NULL;
user_handle->ip_receiver_name = MACH_PORT_NULL;
user_handle->ip_receiver = ipc_space_kernel;
user_handle->ip_mscount++;
user_handle->ip_srights++;
ip_reference(user_handle);
ipc_port_nsrequest(user_handle, 1, user_handle, &previous);
user_entry->backing.pager = NULL;
user_entry->is_sub_map = FALSE;
user_entry->is_pager = FALSE;
user_entry->internal = FALSE;
user_entry->size = 0;
user_entry->offset = 0;
user_entry->protection = VM_PROT_NONE;
user_entry->ref_count = 1;
ipc_kobject_set(user_handle, (ipc_kobject_t) user_entry,
IKOT_NAMED_ENTRY);
*user_entry_p = user_entry;
*user_handle_p = user_handle;
return KERN_SUCCESS;
}
kern_return_t
mach_memory_object_memory_entry_64(
host_t host,
boolean_t internal,
vm_object_offset_t size,
vm_prot_t permission,
memory_object_t pager,
ipc_port_t *entry_handle)
{
unsigned int access;
vm_named_entry_t user_entry;
ipc_port_t user_handle;
if (host == HOST_NULL)
return(KERN_INVALID_HOST);
if (mach_memory_entry_allocate(&user_entry, &user_handle)
!= KERN_SUCCESS) {
return KERN_FAILURE;
}
user_entry->backing.pager = pager;
user_entry->size = size;
user_entry->offset = 0;
user_entry->protection = permission & VM_PROT_ALL;
access = GET_MAP_MEM(permission);
SET_MAP_MEM(access, user_entry->protection);
user_entry->internal = internal;
user_entry->is_sub_map = FALSE;
user_entry->is_pager = TRUE;
assert(user_entry->ref_count == 1);
*entry_handle = user_handle;
return KERN_SUCCESS;
}
kern_return_t
mach_memory_object_memory_entry(
host_t host,
boolean_t internal,
vm_size_t size,
vm_prot_t permission,
memory_object_t pager,
ipc_port_t *entry_handle)
{
return mach_memory_object_memory_entry_64( host, internal,
(vm_object_offset_t)size, permission, pager, entry_handle);
}
kern_return_t
mach_memory_entry_purgable_control(
ipc_port_t entry_port,
vm_purgable_t control,
int *state)
{
kern_return_t kr;
vm_named_entry_t mem_entry;
vm_object_t object;
if (entry_port == IP_NULL ||
ip_kotype(entry_port) != IKOT_NAMED_ENTRY) {
return KERN_INVALID_ARGUMENT;
}
if (control != VM_PURGABLE_SET_STATE &&
control != VM_PURGABLE_GET_STATE)
return(KERN_INVALID_ARGUMENT);
if (control == VM_PURGABLE_SET_STATE &&
(((*state & ~(VM_PURGABLE_ALL_MASKS)) != 0) ||
((*state & VM_PURGABLE_STATE_MASK) > VM_PURGABLE_STATE_MASK)))
return(KERN_INVALID_ARGUMENT);
mem_entry = (vm_named_entry_t) entry_port->ip_kobject;
named_entry_lock(mem_entry);
if (mem_entry->is_sub_map || mem_entry->is_pager) {
named_entry_unlock(mem_entry);
return KERN_INVALID_ARGUMENT;
}
object = mem_entry->backing.object;
if (object == VM_OBJECT_NULL) {
named_entry_unlock(mem_entry);
return KERN_INVALID_ARGUMENT;
}
vm_object_lock(object);
if (mem_entry->offset != 0 || object->vo_size != mem_entry->size) {
vm_object_unlock(object);
named_entry_unlock(mem_entry);
return KERN_INVALID_ARGUMENT;
}
named_entry_unlock(mem_entry);
kr = vm_object_purgable_control(object, control, state);
vm_object_unlock(object);
return kr;
}
void
mach_memory_entry_port_release(
ipc_port_t port)
{
assert(ip_kotype(port) == IKOT_NAMED_ENTRY);
ipc_port_release_send(port);
}
void
mach_destroy_memory_entry(
ipc_port_t port)
{
vm_named_entry_t named_entry;
#if MACH_ASSERT
assert(ip_kotype(port) == IKOT_NAMED_ENTRY);
#endif
named_entry = (vm_named_entry_t)port->ip_kobject;
lck_mtx_lock(&(named_entry)->Lock);
named_entry->ref_count -= 1;
if(named_entry->ref_count == 0) {
if (named_entry->is_sub_map) {
vm_map_deallocate(named_entry->backing.map);
} else if (!named_entry->is_pager) {
vm_object_deallocate(named_entry->backing.object);
}
lck_mtx_unlock(&(named_entry)->Lock);
kfree((void *) port->ip_kobject,
sizeof (struct vm_named_entry));
} else
lck_mtx_unlock(&(named_entry)->Lock);
}
kern_return_t
mach_memory_entry_page_op(
ipc_port_t entry_port,
vm_object_offset_t offset,
int ops,
ppnum_t *phys_entry,
int *flags)
{
vm_named_entry_t mem_entry;
vm_object_t object;
kern_return_t kr;
if (entry_port == IP_NULL ||
ip_kotype(entry_port) != IKOT_NAMED_ENTRY) {
return KERN_INVALID_ARGUMENT;
}
mem_entry = (vm_named_entry_t) entry_port->ip_kobject;
named_entry_lock(mem_entry);
if (mem_entry->is_sub_map || mem_entry->is_pager) {
named_entry_unlock(mem_entry);
return KERN_INVALID_ARGUMENT;
}
object = mem_entry->backing.object;
if (object == VM_OBJECT_NULL) {
named_entry_unlock(mem_entry);
return KERN_INVALID_ARGUMENT;
}
vm_object_reference(object);
named_entry_unlock(mem_entry);
kr = vm_object_page_op(object, offset, ops, phys_entry, flags);
vm_object_deallocate(object);
return kr;
}
kern_return_t
mach_memory_entry_range_op(
ipc_port_t entry_port,
vm_object_offset_t offset_beg,
vm_object_offset_t offset_end,
int ops,
int *range)
{
vm_named_entry_t mem_entry;
vm_object_t object;
kern_return_t kr;
if (entry_port == IP_NULL ||
ip_kotype(entry_port) != IKOT_NAMED_ENTRY) {
return KERN_INVALID_ARGUMENT;
}
mem_entry = (vm_named_entry_t) entry_port->ip_kobject;
named_entry_lock(mem_entry);
if (mem_entry->is_sub_map || mem_entry->is_pager) {
named_entry_unlock(mem_entry);
return KERN_INVALID_ARGUMENT;
}
object = mem_entry->backing.object;
if (object == VM_OBJECT_NULL) {
named_entry_unlock(mem_entry);
return KERN_INVALID_ARGUMENT;
}
vm_object_reference(object);
named_entry_unlock(mem_entry);
kr = vm_object_range_op(object,
offset_beg,
offset_end,
ops,
(uint32_t *) range);
vm_object_deallocate(object);
return kr;
}
kern_return_t
set_dp_control_port(
host_priv_t host_priv,
ipc_port_t control_port)
{
if (host_priv == HOST_PRIV_NULL)
return (KERN_INVALID_HOST);
if (IP_VALID(dynamic_pager_control_port))
ipc_port_release_send(dynamic_pager_control_port);
dynamic_pager_control_port = control_port;
return KERN_SUCCESS;
}
kern_return_t
get_dp_control_port(
host_priv_t host_priv,
ipc_port_t *control_port)
{
if (host_priv == HOST_PRIV_NULL)
return (KERN_INVALID_HOST);
*control_port = ipc_port_copy_send(dynamic_pager_control_port);
return KERN_SUCCESS;
}
extern int kernel_upl_map(
vm_map_t map,
upl_t upl,
vm_offset_t *dst_addr);
extern int kernel_upl_unmap(
vm_map_t map,
upl_t upl);
extern int kernel_upl_commit(
upl_t upl,
upl_page_info_t *pl,
mach_msg_type_number_t count);
extern int kernel_upl_commit_range(
upl_t upl,
upl_offset_t offset,
upl_size_t size,
int flags,
upl_page_info_array_t pl,
mach_msg_type_number_t count);
extern int kernel_upl_abort(
upl_t upl,
int abort_type);
extern int kernel_upl_abort_range(
upl_t upl,
upl_offset_t offset,
upl_size_t size,
int abort_flags);
kern_return_t
kernel_upl_map(
vm_map_t map,
upl_t upl,
vm_offset_t *dst_addr)
{
return vm_upl_map(map, upl, dst_addr);
}
kern_return_t
kernel_upl_unmap(
vm_map_t map,
upl_t upl)
{
return vm_upl_unmap(map, upl);
}
kern_return_t
kernel_upl_commit(
upl_t upl,
upl_page_info_t *pl,
mach_msg_type_number_t count)
{
kern_return_t kr;
kr = upl_commit(upl, pl, count);
upl_deallocate(upl);
return kr;
}
kern_return_t
kernel_upl_commit_range(
upl_t upl,
upl_offset_t offset,
upl_size_t size,
int flags,
upl_page_info_array_t pl,
mach_msg_type_number_t count)
{
boolean_t finished = FALSE;
kern_return_t kr;
if (flags & UPL_COMMIT_FREE_ON_EMPTY)
flags |= UPL_COMMIT_NOTIFY_EMPTY;
if (flags & UPL_COMMIT_KERNEL_ONLY_FLAGS) {
return KERN_INVALID_ARGUMENT;
}
kr = upl_commit_range(upl, offset, size, flags, pl, count, &finished);
if ((flags & UPL_COMMIT_NOTIFY_EMPTY) && finished)
upl_deallocate(upl);
return kr;
}
kern_return_t
kernel_upl_abort_range(
upl_t upl,
upl_offset_t offset,
upl_size_t size,
int abort_flags)
{
kern_return_t kr;
boolean_t finished = FALSE;
if (abort_flags & UPL_COMMIT_FREE_ON_EMPTY)
abort_flags |= UPL_COMMIT_NOTIFY_EMPTY;
kr = upl_abort_range(upl, offset, size, abort_flags, &finished);
if ((abort_flags & UPL_COMMIT_FREE_ON_EMPTY) && finished)
upl_deallocate(upl);
return kr;
}
kern_return_t
kernel_upl_abort(
upl_t upl,
int abort_type)
{
kern_return_t kr;
kr = upl_abort(upl, abort_type);
upl_deallocate(upl);
return kr;
}
kern_return_t
vm_region_object_create(
__unused vm_map_t target_map,
vm_size_t size,
ipc_port_t *object_handle)
{
vm_named_entry_t user_entry;
ipc_port_t user_handle;
vm_map_t new_map;
if (mach_memory_entry_allocate(&user_entry, &user_handle)
!= KERN_SUCCESS) {
return KERN_FAILURE;
}
new_map = vm_map_create(PMAP_NULL, VM_MAP_MIN_ADDRESS,
vm_map_round_page(size), TRUE);
user_entry->backing.map = new_map;
user_entry->internal = TRUE;
user_entry->is_sub_map = TRUE;
user_entry->offset = 0;
user_entry->protection = VM_PROT_ALL;
user_entry->size = size;
assert(user_entry->ref_count == 1);
*object_handle = user_handle;
return KERN_SUCCESS;
}
ppnum_t vm_map_get_phys_page(
vm_map_t map,
vm_offset_t offset);
ppnum_t
vm_map_get_phys_page(
vm_map_t map,
vm_offset_t addr)
{
vm_object_offset_t offset;
vm_object_t object;
vm_map_offset_t map_offset;
vm_map_entry_t entry;
ppnum_t phys_page = 0;
map_offset = vm_map_trunc_page(addr);
vm_map_lock(map);
while (vm_map_lookup_entry(map, map_offset, &entry)) {
if (entry->object.vm_object == VM_OBJECT_NULL) {
vm_map_unlock(map);
return (ppnum_t) 0;
}
if (entry->is_sub_map) {
vm_map_t old_map;
vm_map_lock(entry->object.sub_map);
old_map = map;
map = entry->object.sub_map;
map_offset = entry->offset + (map_offset - entry->vme_start);
vm_map_unlock(old_map);
continue;
}
if (entry->object.vm_object->phys_contiguous) {
if(entry->object.vm_object->vo_shadow_offset == 0) {
vm_map_unlock(map);
vm_fault(map, map_offset, VM_PROT_NONE,
FALSE, THREAD_UNINT, NULL, 0);
vm_map_lock(map);
continue;
}
offset = entry->offset + (map_offset - entry->vme_start);
phys_page = (ppnum_t)
((entry->object.vm_object->vo_shadow_offset
+ offset) >> 12);
break;
}
offset = entry->offset + (map_offset - entry->vme_start);
object = entry->object.vm_object;
vm_object_lock(object);
while (TRUE) {
vm_page_t dst_page = vm_page_lookup(object,offset);
if(dst_page == VM_PAGE_NULL) {
if(object->shadow) {
vm_object_t old_object;
vm_object_lock(object->shadow);
old_object = object;
offset = offset + object->vo_shadow_offset;
object = object->shadow;
vm_object_unlock(old_object);
} else {
vm_object_unlock(object);
break;
}
} else {
phys_page = (ppnum_t)(dst_page->phys_page);
vm_object_unlock(object);
break;
}
}
break;
}
vm_map_unlock(map);
return phys_page;
}
kern_return_t kernel_object_iopl_request(
vm_named_entry_t named_entry,
memory_object_offset_t offset,
upl_size_t *upl_size,
upl_t *upl_ptr,
upl_page_info_array_t user_page_list,
unsigned int *page_list_count,
int *flags);
kern_return_t
kernel_object_iopl_request(
vm_named_entry_t named_entry,
memory_object_offset_t offset,
upl_size_t *upl_size,
upl_t *upl_ptr,
upl_page_info_array_t user_page_list,
unsigned int *page_list_count,
int *flags)
{
vm_object_t object;
kern_return_t ret;
int caller_flags;
caller_flags = *flags;
if (caller_flags & ~UPL_VALID_FLAGS) {
return KERN_INVALID_VALUE;
}
if(*upl_size == 0) {
if(offset >= named_entry->size)
return(KERN_INVALID_RIGHT);
*upl_size = (upl_size_t) (named_entry->size - offset);
if (*upl_size != named_entry->size - offset)
return KERN_INVALID_ARGUMENT;
}
if(caller_flags & UPL_COPYOUT_FROM) {
if((named_entry->protection & VM_PROT_READ)
!= VM_PROT_READ) {
return(KERN_INVALID_RIGHT);
}
} else {
if((named_entry->protection &
(VM_PROT_READ | VM_PROT_WRITE))
!= (VM_PROT_READ | VM_PROT_WRITE)) {
return(KERN_INVALID_RIGHT);
}
}
if(named_entry->size < (offset + *upl_size))
return(KERN_INVALID_ARGUMENT);
offset = offset + named_entry->offset;
if(named_entry->is_sub_map)
return (KERN_INVALID_ARGUMENT);
named_entry_lock(named_entry);
if (named_entry->is_pager) {
object = vm_object_enter(named_entry->backing.pager,
named_entry->offset + named_entry->size,
named_entry->internal,
FALSE,
FALSE);
if (object == VM_OBJECT_NULL) {
named_entry_unlock(named_entry);
return(KERN_INVALID_OBJECT);
}
vm_object_lock(object);
vm_object_reference_locked(object);
named_entry->backing.object = object;
named_entry->is_pager = FALSE;
named_entry_unlock(named_entry);
if (!named_entry->internal) {
while (!object->pager_ready) {
vm_object_wait(object,
VM_OBJECT_EVENT_PAGER_READY,
THREAD_UNINT);
vm_object_lock(object);
}
}
vm_object_unlock(object);
} else {
object = named_entry->backing.object;
vm_object_reference(object);
named_entry_unlock(named_entry);
}
if (!object->private) {
if (*upl_size > (MAX_UPL_TRANSFER*PAGE_SIZE))
*upl_size = (MAX_UPL_TRANSFER*PAGE_SIZE);
if (object->phys_contiguous) {
*flags = UPL_PHYS_CONTIG;
} else {
*flags = 0;
}
} else {
*flags = UPL_DEV_MEMORY | UPL_PHYS_CONTIG;
}
ret = vm_object_iopl_request(object,
offset,
*upl_size,
upl_ptr,
user_page_list,
page_list_count,
caller_flags);
vm_object_deallocate(object);
return ret;
}