#include <mach_vm_debug.h>
#include <mach/kern_return.h>
#include <mach/mach_host_server.h>
#include <mach/vm_map_server.h>
#include <mach_debug/vm_info.h>
#include <mach_debug/page_info.h>
#include <mach_debug/hash_info.h>
#if MACH_VM_DEBUG
#include <mach/machine/vm_types.h>
#include <mach/memory_object_types.h>
#include <mach/vm_prot.h>
#include <mach/vm_inherit.h>
#include <mach/vm_param.h>
#include <kern/thread.h>
#include <vm/vm_map.h>
#include <vm/vm_kern.h>
#include <vm/vm_object.h>
#include <kern/task.h>
#include <kern/host.h>
#include <ipc/ipc_port.h>
#include <vm/vm_debug.h>
#endif
#if !MACH_VM_DEBUG
#define __DEBUG_ONLY __unused
#else
#define __DEBUG_ONLY
#endif
kern_return_t
mach_vm_region_info(
__DEBUG_ONLY vm_map_t map,
__DEBUG_ONLY vm_offset_t address,
__DEBUG_ONLY vm_info_region_t *regionp,
__DEBUG_ONLY vm_info_object_array_t *objectsp,
__DEBUG_ONLY mach_msg_type_number_t *objectsCntp)
{
#if !MACH_VM_DEBUG
return KERN_FAILURE;
#else
vm_map_copy_t copy;
vm_offset_t addr;
vm_size_t size;
unsigned int room;
unsigned int used;
vm_info_region_t region;
kern_return_t kr;
if (map == VM_MAP_NULL)
return KERN_INVALID_TASK;
size = 0;
for (;;) {
vm_map_t cmap;
vm_map_t nmap;
vm_map_entry_t entry;
vm_object_t object, cobject, nobject;
vm_map_lock_read(map);
for (cmap = map;; cmap = nmap) {
if (!vm_map_lookup_entry(cmap,
(vm_map_address_t)address, &entry)) {
entry = entry->vme_next;
if (entry == vm_map_to_entry(cmap)) {
vm_map_unlock_read(cmap);
if (size != 0)
kmem_free(ipc_kernel_map,
addr, size);
return KERN_NO_SPACE;
}
}
if (entry->is_sub_map)
nmap = entry->object.sub_map;
else
break;
vm_map_lock_read(nmap);
vm_map_unlock_read(cmap);
}
object = entry->object.vm_object;
region.vir_start = entry->vme_start;
region.vir_end = entry->vme_end;
region.vir_object = (vm_offset_t) object;
region.vir_offset = entry->offset;
region.vir_needs_copy = entry->needs_copy;
region.vir_protection = entry->protection;
region.vir_max_protection = entry->max_protection;
region.vir_inheritance = entry->inheritance;
region.vir_wired_count = entry->wired_count;
region.vir_user_wired_count = entry->user_wired_count;
used = 0;
room = size / sizeof(vm_info_object_t);
if (object == VM_OBJECT_NULL) {
vm_map_unlock_read(cmap);
break;
}
vm_object_lock(object);
vm_map_unlock_read(cmap);
for (cobject = object;; cobject = nobject) {
if (used < room) {
vm_info_object_t *vio =
&((vm_info_object_t *) addr)[used];
vio->vio_object =
(vm_offset_t) cobject;
vio->vio_size =
cobject->size;
vio->vio_ref_count =
cobject->ref_count;
vio->vio_resident_page_count =
cobject->resident_page_count;
vio->vio_copy =
(vm_offset_t) cobject->copy;
vio->vio_shadow =
(vm_offset_t) cobject->shadow;
vio->vio_shadow_offset =
cobject->shadow_offset;
vio->vio_paging_offset =
cobject->paging_offset;
vio->vio_copy_strategy =
cobject->copy_strategy;
vio->vio_last_alloc =
cobject->last_alloc;
vio->vio_paging_in_progress =
cobject->paging_in_progress;
vio->vio_pager_created =
cobject->pager_created;
vio->vio_pager_initialized =
cobject->pager_initialized;
vio->vio_pager_ready =
cobject->pager_ready;
vio->vio_can_persist =
cobject->can_persist;
vio->vio_internal =
cobject->internal;
vio->vio_temporary =
cobject->temporary;
vio->vio_alive =
cobject->alive;
vio->vio_purgable =
(cobject->purgable != VM_PURGABLE_DENY);
vio->vio_purgable_volatile =
(cobject->purgable == VM_PURGABLE_VOLATILE ||
cobject->purgable == VM_PURGABLE_EMPTY);
}
used++;
nobject = cobject->shadow;
if (nobject == VM_OBJECT_NULL) {
vm_object_unlock(cobject);
break;
}
vm_object_lock(nobject);
vm_object_unlock(cobject);
}
if (used <= room)
break;
if (size != 0)
kmem_free(ipc_kernel_map, addr, size);
size = round_page_32(2 * used * sizeof(vm_info_object_t));
kr = vm_allocate(ipc_kernel_map, &addr, size, VM_FLAGS_ANYWHERE);
if (kr != KERN_SUCCESS)
return KERN_RESOURCE_SHORTAGE;
kr = vm_map_wire(ipc_kernel_map, vm_map_trunc_page(addr),
vm_map_round_page(addr + size),
VM_PROT_READ|VM_PROT_WRITE, FALSE);
assert(kr == KERN_SUCCESS);
}
if (used == 0) {
copy = VM_MAP_COPY_NULL;
if (size != 0)
kmem_free(ipc_kernel_map, addr, size);
} else {
vm_size_t size_used =
round_page_32(used * sizeof(vm_info_object_t));
kr = vm_map_unwire(ipc_kernel_map, vm_map_trunc_page(addr),
vm_map_round_page(addr + size_used), FALSE);
assert(kr == KERN_SUCCESS);
kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr,
(vm_map_size_t)size_used, TRUE, ©);
assert(kr == KERN_SUCCESS);
if (size != size_used)
kmem_free(ipc_kernel_map,
addr + size_used, size - size_used);
}
*regionp = region;
*objectsp = (vm_info_object_array_t) copy;
*objectsCntp = used;
return KERN_SUCCESS;
#endif
}
kern_return_t
mach_vm_region_info_64(
__DEBUG_ONLY vm_map_t map,
__DEBUG_ONLY vm_offset_t address,
__DEBUG_ONLY vm_info_region_64_t *regionp,
__DEBUG_ONLY vm_info_object_array_t *objectsp,
__DEBUG_ONLY mach_msg_type_number_t *objectsCntp)
{
#if !MACH_VM_DEBUG
return KERN_FAILURE;
#else
vm_map_copy_t copy;
vm_offset_t addr;
vm_size_t size;
unsigned int room;
unsigned int used;
vm_info_region_64_t region;
kern_return_t kr;
if (map == VM_MAP_NULL)
return KERN_INVALID_TASK;
size = 0;
for (;;) {
vm_map_t cmap;
vm_map_t nmap;
vm_map_entry_t entry;
vm_object_t object, cobject, nobject;
vm_map_lock_read(map);
for (cmap = map;; cmap = nmap) {
if (!vm_map_lookup_entry(cmap, address, &entry)) {
entry = entry->vme_next;
if (entry == vm_map_to_entry(cmap)) {
vm_map_unlock_read(cmap);
if (size != 0)
kmem_free(ipc_kernel_map,
addr, size);
return KERN_NO_SPACE;
}
}
if (entry->is_sub_map)
nmap = entry->object.sub_map;
else
break;
vm_map_lock_read(nmap);
vm_map_unlock_read(cmap);
}
object = entry->object.vm_object;
region.vir_start = entry->vme_start;
region.vir_end = entry->vme_end;
region.vir_object = (vm_offset_t) object;
region.vir_offset = entry->offset;
region.vir_needs_copy = entry->needs_copy;
region.vir_protection = entry->protection;
region.vir_max_protection = entry->max_protection;
region.vir_inheritance = entry->inheritance;
region.vir_wired_count = entry->wired_count;
region.vir_user_wired_count = entry->user_wired_count;
used = 0;
room = size / sizeof(vm_info_object_t);
if (object == VM_OBJECT_NULL) {
vm_map_unlock_read(cmap);
break;
}
vm_object_lock(object);
vm_map_unlock_read(cmap);
for (cobject = object;; cobject = nobject) {
if (used < room) {
vm_info_object_t *vio =
&((vm_info_object_t *) addr)[used];
vio->vio_object =
(vm_offset_t) cobject;
vio->vio_size =
cobject->size;
vio->vio_ref_count =
cobject->ref_count;
vio->vio_resident_page_count =
cobject->resident_page_count;
vio->vio_copy =
(vm_offset_t) cobject->copy;
vio->vio_shadow =
(vm_offset_t) cobject->shadow;
vio->vio_shadow_offset =
cobject->shadow_offset;
vio->vio_paging_offset =
cobject->paging_offset;
vio->vio_copy_strategy =
cobject->copy_strategy;
vio->vio_last_alloc =
cobject->last_alloc;
vio->vio_paging_in_progress =
cobject->paging_in_progress;
vio->vio_pager_created =
cobject->pager_created;
vio->vio_pager_initialized =
cobject->pager_initialized;
vio->vio_pager_ready =
cobject->pager_ready;
vio->vio_can_persist =
cobject->can_persist;
vio->vio_internal =
cobject->internal;
vio->vio_temporary =
cobject->temporary;
vio->vio_alive =
cobject->alive;
vio->vio_purgable =
(cobject->purgable != VM_PURGABLE_DENY);
vio->vio_purgable_volatile =
(cobject->purgable == VM_PURGABLE_VOLATILE ||
cobject->purgable == VM_PURGABLE_EMPTY);
}
used++;
nobject = cobject->shadow;
if (nobject == VM_OBJECT_NULL) {
vm_object_unlock(cobject);
break;
}
vm_object_lock(nobject);
vm_object_unlock(cobject);
}
if (used <= room)
break;
if (size != 0)
kmem_free(ipc_kernel_map, addr, size);
size = round_page_32(2 * used * sizeof(vm_info_object_t));
kr = vm_allocate(ipc_kernel_map, &addr, size, VM_FLAGS_ANYWHERE);
if (kr != KERN_SUCCESS)
return KERN_RESOURCE_SHORTAGE;
kr = vm_map_wire(ipc_kernel_map, vm_map_trunc_page(addr),
vm_map_round_page(addr + size),
VM_PROT_READ|VM_PROT_WRITE, FALSE);
assert(kr == KERN_SUCCESS);
}
if (used == 0) {
copy = VM_MAP_COPY_NULL;
if (size != 0)
kmem_free(ipc_kernel_map, addr, size);
} else {
vm_size_t size_used =
round_page_32(used * sizeof(vm_info_object_t));
kr = vm_map_unwire(ipc_kernel_map, vm_map_trunc_page(addr),
vm_map_round_page(addr + size_used), FALSE);
assert(kr == KERN_SUCCESS);
kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr,
(vm_map_size_t)size_used, TRUE, ©);
assert(kr == KERN_SUCCESS);
if (size != size_used)
kmem_free(ipc_kernel_map,
addr + size_used, size - size_used);
}
*regionp = region;
*objectsp = (vm_info_object_array_t) copy;
*objectsCntp = used;
return KERN_SUCCESS;
#endif
}
kern_return_t
vm_mapped_pages_info(
__DEBUG_ONLY vm_map_t map,
__DEBUG_ONLY page_address_array_t *pages,
__DEBUG_ONLY mach_msg_type_number_t *pages_count)
{
#if !MACH_VM_DEBUG
return KERN_FAILURE;
#else
pmap_t pmap;
vm_size_t size, size_used;
unsigned int actual, space;
page_address_array_t list;
vm_offset_t addr;
if (map == VM_MAP_NULL)
return (KERN_INVALID_ARGUMENT);
pmap = map->pmap;
size = pmap_resident_count(pmap) * sizeof(vm_offset_t);
size = round_page_32(size);
for (;;) {
(void) vm_allocate(ipc_kernel_map, &addr, size, VM_FLAGS_ANYWHERE);
(void) vm_map_unwire(ipc_kernel_map, vm_map_trunc_page(addr),
vm_map_round_page(addr + size), FALSE);
list = (page_address_array_t) addr;
space = size / sizeof(vm_offset_t);
actual = pmap_list_resident_pages(pmap,
list,
space);
if (actual <= space)
break;
(void) kmem_free(ipc_kernel_map, addr, size);
size = round_page_32(actual * sizeof(vm_offset_t));
}
if (actual == 0) {
*pages = 0;
*pages_count = 0;
(void) kmem_free(ipc_kernel_map, addr, size);
}
else {
*pages_count = actual;
size_used = round_page_32(actual * sizeof(vm_offset_t));
(void) vm_map_wire(ipc_kernel_map, vm_map_trunc_page(addr),
vm_map_round_page(addr + size),
VM_PROT_READ|VM_PROT_WRITE, FALSE);
(void) vm_map_copyin(ipc_kernel_map,
(vm_map_address_t)addr,
(vm_map_size_t)size_used,
TRUE,
(vm_map_copy_t *)pages);
if (size_used != size) {
(void) kmem_free(ipc_kernel_map,
addr + size_used,
size - size_used);
}
}
return (KERN_SUCCESS);
#endif
}
kern_return_t
host_virtual_physical_table_info(
__DEBUG_ONLY host_t host,
__DEBUG_ONLY hash_info_bucket_array_t *infop,
__DEBUG_ONLY mach_msg_type_number_t *countp)
{
#if !MACH_VM_DEBUG
return KERN_FAILURE;
#else
vm_offset_t addr;
vm_size_t size = 0;
hash_info_bucket_t *info;
unsigned int potential, actual;
kern_return_t kr;
if (host == HOST_NULL)
return KERN_INVALID_HOST;
info = *infop;
potential = *countp;
for (;;) {
actual = vm_page_info(info, potential);
if (actual <= potential)
break;
if (info != *infop)
kmem_free(ipc_kernel_map, addr, size);
size = round_page_32(actual * sizeof *info);
kr = kmem_alloc_pageable(ipc_kernel_map, &addr, size);
if (kr != KERN_SUCCESS)
return KERN_RESOURCE_SHORTAGE;
info = (hash_info_bucket_t *) addr;
potential = size/sizeof *info;
}
if (info == *infop) {
*countp = actual;
} else if (actual == 0) {
kmem_free(ipc_kernel_map, addr, size);
*countp = 0;
} else {
vm_map_copy_t copy;
vm_size_t used;
used = round_page_32(actual * sizeof *info);
if (used != size)
kmem_free(ipc_kernel_map, addr + used, size - used);
kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr,
(vm_map_size_t)used, TRUE, ©);
assert(kr == KERN_SUCCESS);
*infop = (hash_info_bucket_t *) copy;
*countp = actual;
}
return KERN_SUCCESS;
#endif
}