#include <mach/kern_return.h>
#include <mach/vm_param.h>
#include <kern/assert.h>
#include <kern/lock.h>
#include <kern/thread.h>
#include <vm/vm_kern.h>
#include <vm/vm_map.h>
#include <vm/vm_object.h>
#include <vm/vm_page.h>
#include <vm/vm_pageout.h>
#include <kern/misc_protos.h>
#include <vm/cpm.h>
#include <string.h>
vm_map_t kernel_map;
vm_map_t kernel_pageable_map;
extern kern_return_t kmem_alloc_pages(
register vm_object_t object,
register vm_object_offset_t offset,
register vm_object_size_t size);
extern void kmem_remap_pages(
register vm_object_t object,
register vm_object_offset_t offset,
register vm_offset_t start,
register vm_offset_t end,
vm_prot_t protection);
kern_return_t
kmem_alloc_contig(
vm_map_t map,
vm_offset_t *addrp,
vm_size_t size,
vm_offset_t mask,
int flags)
{
vm_object_t object;
vm_object_offset_t offset;
vm_map_offset_t map_addr;
vm_map_offset_t map_mask;
vm_map_size_t map_size, i;
vm_map_entry_t entry;
vm_page_t m, pages;
kern_return_t kr;
if (map == VM_MAP_NULL || (flags && (flags ^ KMA_KOBJECT)))
return KERN_INVALID_ARGUMENT;
if (size == 0) {
*addrp = 0;
return KERN_INVALID_ARGUMENT;
}
map_size = vm_map_round_page(size);
map_mask = (vm_map_offset_t)mask;
if ((flags & KMA_KOBJECT) != 0) {
object = kernel_object;
vm_object_reference(object);
} else {
object = vm_object_allocate(map_size);
}
kr = vm_map_find_space(map, &map_addr, map_size, map_mask, &entry);
if (KERN_SUCCESS != kr) {
vm_object_deallocate(object);
return kr;
}
entry->object.vm_object = object;
entry->offset = offset = (object == kernel_object) ?
map_addr - VM_MIN_KERNEL_ADDRESS : 0;
vm_object_reference(object);
vm_map_unlock(map);
kr = cpm_allocate(CAST_DOWN(vm_size_t, map_size), &pages, FALSE);
if (kr != KERN_SUCCESS) {
vm_map_remove(map, vm_map_trunc_page(map_addr),
vm_map_round_page(map_addr + map_size), 0);
vm_object_deallocate(object);
*addrp = 0;
return kr;
}
vm_object_lock(object);
for (i = 0; i < map_size; i += PAGE_SIZE) {
m = pages;
pages = NEXT_PAGE(m);
m->busy = FALSE;
vm_page_insert(m, object, offset + i);
}
vm_object_unlock(object);
if ((kr = vm_map_wire(map, vm_map_trunc_page(map_addr),
vm_map_round_page(map_addr + map_size), VM_PROT_DEFAULT, FALSE))
!= KERN_SUCCESS) {
if (object == kernel_object) {
vm_object_lock(object);
vm_object_page_remove(object, offset, offset + map_size);
vm_object_unlock(object);
}
vm_map_remove(map, vm_map_trunc_page(map_addr),
vm_map_round_page(map_addr + map_size), 0);
vm_object_deallocate(object);
return kr;
}
vm_object_deallocate(object);
if (object == kernel_object)
vm_map_simplify(map, map_addr);
*addrp = map_addr;
return KERN_SUCCESS;
}
kern_return_t
kernel_memory_allocate(
register vm_map_t map,
register vm_offset_t *addrp,
register vm_size_t size,
register vm_offset_t mask,
int flags)
{
vm_object_t object;
vm_object_offset_t offset;
vm_map_entry_t entry;
vm_map_offset_t map_addr;
vm_map_offset_t map_mask;
vm_map_size_t map_size;
vm_map_size_t i;
kern_return_t kr;
if (size == 0) {
*addrp = 0;
return KERN_INVALID_ARGUMENT;
}
map_size = vm_map_round_page(size);
map_mask = (vm_map_offset_t) mask;
if ((flags & KMA_KOBJECT) != 0) {
object = kernel_object;
vm_object_reference(object);
} else {
object = vm_object_allocate(map_size);
}
kr = vm_map_find_space(map, &map_addr, map_size, map_mask, &entry);
if (KERN_SUCCESS != kr) {
vm_object_deallocate(object);
return kr;
}
entry->object.vm_object = object;
entry->offset = offset = (object == kernel_object) ?
map_addr - VM_MIN_KERNEL_ADDRESS : 0;
vm_object_reference(object);
vm_map_unlock(map);
vm_object_lock(object);
for (i = 0; i < map_size; i += PAGE_SIZE) {
vm_page_t mem;
while (VM_PAGE_NULL ==
(mem = vm_page_alloc(object, offset + i))) {
if (flags & KMA_NOPAGEWAIT) {
if (object == kernel_object)
vm_object_page_remove(object, offset, offset + i);
vm_object_unlock(object);
vm_map_remove(map, map_addr, map_addr + map_size, 0);
vm_object_deallocate(object);
return KERN_RESOURCE_SHORTAGE;
}
vm_object_unlock(object);
VM_PAGE_WAIT();
vm_object_lock(object);
}
mem->busy = FALSE;
}
vm_object_unlock(object);
if ((kr = vm_map_wire(map, map_addr, map_addr + map_size, VM_PROT_DEFAULT, FALSE))
!= KERN_SUCCESS) {
if (object == kernel_object) {
vm_object_lock(object);
vm_object_page_remove(object, offset, offset + map_size);
vm_object_unlock(object);
}
vm_map_remove(map, map_addr, map_addr + map_size, 0);
vm_object_deallocate(object);
return (kr);
}
vm_object_deallocate(object);
if (object == kernel_object)
vm_map_simplify(map, map_addr);
*addrp = CAST_DOWN(vm_offset_t, map_addr);
return KERN_SUCCESS;
}
kern_return_t
kmem_alloc(
vm_map_t map,
vm_offset_t *addrp,
vm_size_t size)
{
return kernel_memory_allocate(map, addrp, size, 0, 0);
}
kern_return_t
kmem_realloc(
vm_map_t map,
vm_offset_t oldaddr,
vm_size_t oldsize,
vm_offset_t *newaddrp,
vm_size_t newsize)
{
vm_object_t object;
vm_object_offset_t offset;
vm_map_offset_t oldmapmin;
vm_map_offset_t oldmapmax;
vm_map_offset_t newmapaddr;
vm_map_size_t oldmapsize;
vm_map_size_t newmapsize;
vm_map_entry_t oldentry;
vm_map_entry_t newentry;
vm_page_t mem;
kern_return_t kr;
oldmapmin = vm_map_trunc_page(oldaddr);
oldmapmax = vm_map_round_page(oldaddr + oldsize);
oldmapsize = oldmapmax - oldmapmin;
newmapsize = vm_map_round_page(newsize);
vm_map_lock(map);
if (!vm_map_lookup_entry(map, oldmapmin, &oldentry))
panic("kmem_realloc");
object = oldentry->object.vm_object;
vm_object_reference(object);
vm_object_lock(object);
vm_map_unlock(map);
if (object->size != oldmapsize)
panic("kmem_realloc");
object->size = newmapsize;
vm_object_unlock(object);
kmem_alloc_pages(object, vm_object_round_page(oldmapsize),
vm_object_round_page(newmapsize-oldmapsize));
kr = vm_map_find_space(map, &newmapaddr, newmapsize,
(vm_map_offset_t) 0, &newentry);
if (kr != KERN_SUCCESS) {
vm_object_lock(object);
for(offset = oldmapsize;
offset < newmapsize; offset += PAGE_SIZE) {
if ((mem = vm_page_lookup(object, offset)) != VM_PAGE_NULL) {
vm_page_lock_queues();
vm_page_free(mem);
vm_page_unlock_queues();
}
}
object->size = oldmapsize;
vm_object_unlock(object);
vm_object_deallocate(object);
return kr;
}
newentry->object.vm_object = object;
newentry->offset = 0;
assert (newentry->wired_count == 0);
vm_object_reference(object);
vm_map_unlock(map);
kr = vm_map_wire(map, newmapaddr, newmapaddr + newmapsize, VM_PROT_DEFAULT, FALSE);
if (KERN_SUCCESS != kr) {
vm_map_remove(map, newmapaddr, newmapaddr + newmapsize, 0);
vm_object_lock(object);
for(offset = oldsize; offset < newmapsize; offset += PAGE_SIZE) {
if ((mem = vm_page_lookup(object, offset)) != VM_PAGE_NULL) {
vm_page_lock_queues();
vm_page_free(mem);
vm_page_unlock_queues();
}
}
object->size = oldmapsize;
vm_object_unlock(object);
vm_object_deallocate(object);
return (kr);
}
vm_object_deallocate(object);
*newaddrp = CAST_DOWN(vm_offset_t, newmapaddr);
return KERN_SUCCESS;
}
kern_return_t
kmem_alloc_wired(
vm_map_t map,
vm_offset_t *addrp,
vm_size_t size)
{
return kernel_memory_allocate(map, addrp, size, 0, KMA_KOBJECT);
}
kern_return_t
kmem_alloc_aligned(
vm_map_t map,
vm_offset_t *addrp,
vm_size_t size)
{
if ((size & (size - 1)) != 0)
panic("kmem_alloc_aligned: size not aligned");
return kernel_memory_allocate(map, addrp, size, size - 1, KMA_KOBJECT);
}
kern_return_t
kmem_alloc_pageable(
vm_map_t map,
vm_offset_t *addrp,
vm_size_t size)
{
vm_map_offset_t map_addr;
vm_map_size_t map_size;
kern_return_t kr;
#ifndef normal
map_addr = (vm_map_min(map)) + 0x1000;
#else
map_addr = vm_map_min(map);
#endif
map_size = vm_map_round_page(size);
kr = vm_map_enter(map, &map_addr, map_size,
(vm_map_offset_t) 0, VM_FLAGS_ANYWHERE,
VM_OBJECT_NULL, (vm_object_offset_t) 0, FALSE,
VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
if (kr != KERN_SUCCESS)
return kr;
*addrp = CAST_DOWN(vm_offset_t, map_addr);
return KERN_SUCCESS;
}
void
kmem_free(
vm_map_t map,
vm_offset_t addr,
vm_size_t size)
{
kern_return_t kr;
kr = vm_map_remove(map, vm_map_trunc_page(addr),
vm_map_round_page(addr + size),
VM_MAP_REMOVE_KUNWIRE);
if (kr != KERN_SUCCESS)
panic("kmem_free");
}
kern_return_t
kmem_alloc_pages(
register vm_object_t object,
register vm_object_offset_t offset,
register vm_object_size_t size)
{
vm_object_size_t alloc_size;
alloc_size = vm_object_round_page(size);
vm_object_lock(object);
while (alloc_size) {
register vm_page_t mem;
while (VM_PAGE_NULL ==
(mem = vm_page_alloc(object, offset))) {
vm_object_unlock(object);
VM_PAGE_WAIT();
vm_object_lock(object);
}
mem->busy = FALSE;
alloc_size -= PAGE_SIZE;
offset += PAGE_SIZE;
}
vm_object_unlock(object);
return KERN_SUCCESS;
}
void
kmem_remap_pages(
register vm_object_t object,
register vm_object_offset_t offset,
register vm_offset_t start,
register vm_offset_t end,
vm_prot_t protection)
{
vm_map_offset_t map_start;
vm_map_offset_t map_end;
map_start = vm_map_trunc_page(start);
map_end = vm_map_round_page(end);
pmap_pageable(kernel_pmap, map_start, map_end, FALSE);
while (map_start < map_end) {
register vm_page_t mem;
vm_object_lock(object);
if ((mem = vm_page_lookup(object, offset)) == VM_PAGE_NULL)
panic("kmem_remap_pages");
vm_page_lock_queues();
vm_page_wire(mem);
vm_page_unlock_queues();
vm_object_unlock(object);
ASSERT_PAGE_DECRYPTED(mem);
PMAP_ENTER(kernel_pmap, map_start, mem, protection,
((unsigned int)(mem->object->wimg_bits))
& VM_WIMG_MASK,
TRUE);
map_start += PAGE_SIZE;
offset += PAGE_SIZE;
}
}
kern_return_t
kmem_suballoc(
vm_map_t parent,
vm_offset_t *addr,
vm_size_t size,
boolean_t pageable,
int flags,
vm_map_t *new_map)
{
vm_map_t map;
vm_map_offset_t map_addr;
vm_map_size_t map_size;
kern_return_t kr;
map_size = vm_map_round_page(size);
vm_object_reference(vm_submap_object);
map_addr = (flags & VM_FLAGS_ANYWHERE) ?
vm_map_min(parent) : vm_map_trunc_page(*addr);
kr = vm_map_enter(parent, &map_addr, map_size,
(vm_map_offset_t) 0, flags,
vm_submap_object, (vm_object_offset_t) 0, FALSE,
VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
if (kr != KERN_SUCCESS) {
vm_object_deallocate(vm_submap_object);
return (kr);
}
pmap_reference(vm_map_pmap(parent));
map = vm_map_create(vm_map_pmap(parent), map_addr, map_addr + map_size, pageable);
if (map == VM_MAP_NULL)
panic("kmem_suballoc: vm_map_create failed");
kr = vm_map_submap(parent, map_addr, map_addr + map_size, map, map_addr, FALSE);
if (kr != KERN_SUCCESS) {
vm_map_remove(parent, map_addr, map_addr + map_size, VM_MAP_NO_FLAGS);
vm_map_deallocate(map);
vm_object_deallocate(vm_submap_object);
return (kr);
}
*addr = CAST_DOWN(vm_offset_t, map_addr);
*new_map = map;
return (KERN_SUCCESS);
}
void
kmem_init(
vm_offset_t start,
vm_offset_t end)
{
vm_map_offset_t map_start;
vm_map_offset_t map_end;
map_start = vm_map_trunc_page(start);
map_end = vm_map_round_page(end);
kernel_map = vm_map_create(pmap_kernel(),VM_MIN_KERNEL_ADDRESS,
map_end, FALSE);
if (start != VM_MIN_KERNEL_ADDRESS) {
vm_map_offset_t map_addr;
map_addr = VM_MIN_KERNEL_ADDRESS;
(void) vm_map_enter(kernel_map,
&map_addr,
(vm_map_size_t)(map_start - VM_MIN_KERNEL_ADDRESS),
(vm_map_offset_t) 0,
VM_FLAGS_ANYWHERE | VM_FLAGS_NO_PMAP_CHECK,
VM_OBJECT_NULL,
(vm_object_offset_t) 0, FALSE,
VM_PROT_DEFAULT, VM_PROT_ALL,
VM_INHERIT_DEFAULT);
}
vm_page_wire_count = (atop_64(max_mem) - (vm_page_free_count
+ vm_page_active_count
+ vm_page_inactive_count));
}
kern_return_t
copyinmap(
vm_map_t map,
vm_map_offset_t fromaddr,
void *todata,
vm_size_t length)
{
kern_return_t kr = KERN_SUCCESS;
vm_map_t oldmap;
if (vm_map_pmap(map) == pmap_kernel())
{
memcpy(todata, CAST_DOWN(void *, fromaddr), length);
}
else if (current_map() == map)
{
if (copyin(fromaddr, todata, length) != 0)
kr = KERN_INVALID_ADDRESS;
}
else
{
vm_map_reference(map);
oldmap = vm_map_switch(map);
if (copyin(fromaddr, todata, length) != 0)
kr = KERN_INVALID_ADDRESS;
vm_map_switch(oldmap);
vm_map_deallocate(map);
}
return kr;
}
kern_return_t
copyoutmap(
vm_map_t map,
void *fromdata,
vm_map_address_t toaddr,
vm_size_t length)
{
if (vm_map_pmap(map) == pmap_kernel()) {
memcpy(CAST_DOWN(void *, toaddr), fromdata, length);
return KERN_SUCCESS;
}
if (current_map() != map)
return KERN_NOT_SUPPORTED;
if (copyout(fromdata, toaddr, length) != 0)
return KERN_INVALID_ADDRESS;
return KERN_SUCCESS;
}
kern_return_t
vm_conflict_check(
vm_map_t map,
vm_map_offset_t off,
vm_map_size_t len,
memory_object_t pager,
vm_object_offset_t file_off)
{
vm_map_entry_t entry;
vm_object_t obj;
vm_object_offset_t obj_off;
vm_map_t base_map;
vm_map_offset_t base_offset;
vm_map_offset_t original_offset;
kern_return_t kr;
vm_map_size_t local_len;
base_map = map;
base_offset = off;
original_offset = off;
kr = KERN_SUCCESS;
vm_map_lock(map);
while(vm_map_lookup_entry(map, off, &entry)) {
local_len = len;
if (entry->object.vm_object == VM_OBJECT_NULL) {
vm_map_unlock(map);
return KERN_SUCCESS;
}
if (entry->is_sub_map) {
vm_map_t old_map;
old_map = map;
vm_map_lock(entry->object.sub_map);
map = entry->object.sub_map;
off = entry->offset + (off - entry->vme_start);
vm_map_unlock(old_map);
continue;
}
obj = entry->object.vm_object;
obj_off = (off - entry->vme_start) + entry->offset;
while(obj->shadow) {
obj_off += obj->shadow_offset;
obj = obj->shadow;
}
if((obj->pager_created) && (obj->pager == pager)) {
if(((obj->paging_offset) + obj_off) == file_off) {
if(off != base_offset) {
vm_map_unlock(map);
return KERN_FAILURE;
}
kr = KERN_ALREADY_WAITING;
} else {
vm_object_offset_t obj_off_aligned;
vm_object_offset_t file_off_aligned;
obj_off_aligned = obj_off & ~PAGE_MASK;
file_off_aligned = file_off & ~PAGE_MASK;
if (file_off_aligned == (obj->paging_offset + obj_off_aligned)) {
vm_map_unlock(map);
return KERN_FAILURE;
}
if ((file_off < (obj->paging_offset + obj_off_aligned)) &&
((file_off + len) > (obj->paging_offset + obj_off_aligned))) {
vm_map_unlock(map);
return KERN_FAILURE;
}
if ((file_off_aligned > (obj->paging_offset + obj_off)) &&
(file_off_aligned < (obj->paging_offset + obj_off) + len)) {
vm_map_unlock(map);
return KERN_FAILURE;
}
}
} else if(kr != KERN_SUCCESS) {
vm_map_unlock(map);
return KERN_FAILURE;
}
if(len <= ((entry->vme_end - entry->vme_start) -
(off - entry->vme_start))) {
vm_map_unlock(map);
return kr;
} else {
len -= (entry->vme_end - entry->vme_start) -
(off - entry->vme_start);
}
base_offset = base_offset + (local_len - len);
file_off = file_off + (local_len - len);
off = base_offset;
if(map != base_map) {
vm_map_unlock(map);
vm_map_lock(base_map);
map = base_map;
}
}
vm_map_unlock(map);
return kr;
}