#include <cpus.h>
#include <mach/kern_return.h>
#include <mach/vm_param.h>
#include <kern/assert.h>
#include <kern/lock.h>
#include <kern/thread.h>
#include <vm/vm_kern.h>
#include <vm/vm_map.h>
#include <vm/vm_object.h>
#include <vm/vm_page.h>
#include <vm/vm_pageout.h>
#include <kern/misc_protos.h>
#include <vm/cpm.h>
#include <string.h>
vm_map_t kernel_map;
vm_map_t kernel_pageable_map;
extern kern_return_t kmem_alloc_pages(
register vm_object_t object,
register vm_object_offset_t offset,
register vm_offset_t start,
register vm_offset_t end,
vm_prot_t protection);
extern void kmem_remap_pages(
register vm_object_t object,
register vm_object_offset_t offset,
register vm_offset_t start,
register vm_offset_t end,
vm_prot_t protection);
kern_return_t
kmem_alloc_contig(
vm_map_t map,
vm_offset_t *addrp,
vm_size_t size,
vm_offset_t mask,
int flags)
{
vm_object_t object;
vm_page_t m, pages;
kern_return_t kr;
vm_offset_t addr, i;
vm_object_offset_t offset;
vm_map_entry_t entry;
if (map == VM_MAP_NULL || (flags && (flags ^ KMA_KOBJECT)))
return KERN_INVALID_ARGUMENT;
if (size == 0) {
*addrp = 0;
return KERN_INVALID_ARGUMENT;
}
size = round_page(size);
if ((flags & KMA_KOBJECT) == 0) {
object = vm_object_allocate(size);
kr = vm_map_find_space(map, &addr, size, mask, &entry);
}
else {
object = kernel_object;
kr = vm_map_find_space(map, &addr, size, mask, &entry);
}
if ((flags & KMA_KOBJECT) == 0) {
entry->object.vm_object = object;
entry->offset = offset = 0;
} else {
offset = addr - VM_MIN_KERNEL_ADDRESS;
if (entry->object.vm_object == VM_OBJECT_NULL) {
vm_object_reference(object);
entry->object.vm_object = object;
entry->offset = offset;
}
}
if (kr != KERN_SUCCESS) {
if ((flags & KMA_KOBJECT) == 0)
vm_object_deallocate(object);
return kr;
}
vm_map_unlock(map);
kr = cpm_allocate(size, &pages, FALSE);
if (kr != KERN_SUCCESS) {
vm_map_remove(map, addr, addr + size, 0);
*addrp = 0;
return kr;
}
vm_object_lock(object);
for (i = 0; i < size; i += PAGE_SIZE) {
m = pages;
pages = NEXT_PAGE(m);
m->busy = FALSE;
vm_page_insert(m, object, offset + i);
}
vm_object_unlock(object);
if ((kr = vm_map_wire(map, addr, addr + size, VM_PROT_DEFAULT, FALSE))
!= KERN_SUCCESS) {
if (object == kernel_object) {
vm_object_lock(object);
vm_object_page_remove(object, offset, offset + size);
vm_object_unlock(object);
}
vm_map_remove(map, addr, addr + size, 0);
return kr;
}
if (object == kernel_object)
vm_map_simplify(map, addr);
*addrp = addr;
return KERN_SUCCESS;
}
kern_return_t
kernel_memory_allocate(
register vm_map_t map,
register vm_offset_t *addrp,
register vm_size_t size,
register vm_offset_t mask,
int flags)
{
vm_object_t object = VM_OBJECT_NULL;
vm_map_entry_t entry;
vm_object_offset_t offset;
vm_offset_t addr;
vm_offset_t i;
kern_return_t kr;
size = round_page(size);
if ((flags & KMA_KOBJECT) == 0) {
object = vm_object_allocate(size);
kr = vm_map_find_space(map, &addr, size, mask, &entry);
}
else {
object = kernel_object;
kr = vm_map_find_space(map, &addr, size, mask, &entry);
}
if (kr != KERN_SUCCESS) {
if ((flags & KMA_KOBJECT) == 0)
vm_object_deallocate(object);
return kr;
}
if ((flags & KMA_KOBJECT) == 0) {
entry->object.vm_object = object;
entry->offset = offset = 0;
} else {
offset = addr - VM_MIN_KERNEL_ADDRESS;
if (entry->object.vm_object == VM_OBJECT_NULL) {
vm_object_reference(object);
entry->object.vm_object = object;
entry->offset = offset;
}
}
vm_map_unlock(map);
vm_object_lock(object);
for (i = 0; i < size; i += PAGE_SIZE) {
vm_page_t mem;
while ((mem = vm_page_alloc(object,
offset + (vm_object_offset_t)i))
== VM_PAGE_NULL) {
if (flags & KMA_NOPAGEWAIT) {
if (object == kernel_object)
vm_object_page_remove(object, offset,
offset + (vm_object_offset_t)i);
vm_object_unlock(object);
vm_map_remove(map, addr, addr + size, 0);
return KERN_RESOURCE_SHORTAGE;
}
vm_object_unlock(object);
VM_PAGE_WAIT();
vm_object_lock(object);
}
mem->busy = FALSE;
}
vm_object_unlock(object);
if ((kr = vm_map_wire(map, addr, addr + size, VM_PROT_DEFAULT, FALSE))
!= KERN_SUCCESS) {
if (object == kernel_object) {
vm_object_lock(object);
vm_object_page_remove(object, offset, offset + size);
vm_object_unlock(object);
}
vm_map_remove(map, addr, addr + size, 0);
return (kr);
}
if (object == kernel_object)
vm_map_simplify(map, addr);
#if (NCPUS > 1) && i860
bzero( addr, size );
#endif
*addrp = addr;
return KERN_SUCCESS;
}
kern_return_t
kmem_alloc(
vm_map_t map,
vm_offset_t *addrp,
vm_size_t size)
{
return kernel_memory_allocate(map, addrp, size, 0, 0);
}
kern_return_t
kmem_realloc(
vm_map_t map,
vm_offset_t oldaddr,
vm_size_t oldsize,
vm_offset_t *newaddrp,
vm_size_t newsize)
{
vm_offset_t oldmin, oldmax;
vm_offset_t newaddr;
vm_object_t object;
vm_map_entry_t oldentry, newentry;
kern_return_t kr;
oldmin = trunc_page(oldaddr);
oldmax = round_page(oldaddr + oldsize);
oldsize = oldmax - oldmin;
newsize = round_page(newsize);
kr = vm_map_find_space(map, &newaddr, newsize, (vm_offset_t) 0,
&newentry);
if (kr != KERN_SUCCESS) {
return kr;
}
if (!vm_map_lookup_entry(map, oldmin, &oldentry))
panic("kmem_realloc");
object = oldentry->object.vm_object;
vm_object_reference(object);
vm_object_lock(object);
if (object->size != oldsize)
panic("kmem_realloc");
object->size = newsize;
vm_object_unlock(object);
newentry->object.vm_object = object;
newentry->offset = 0;
assert (newentry->wired_count == 0);
newentry->wired_count = 1;
vm_map_unlock(map);
kmem_remap_pages(object, 0,
newaddr, newaddr + oldsize,
VM_PROT_DEFAULT);
kmem_alloc_pages(object, oldsize,
newaddr + oldsize, newaddr + newsize,
VM_PROT_DEFAULT);
*newaddrp = newaddr;
return KERN_SUCCESS;
}
kern_return_t
kmem_alloc_wired(
vm_map_t map,
vm_offset_t *addrp,
vm_size_t size)
{
return kernel_memory_allocate(map, addrp, size, 0, KMA_KOBJECT);
}
kern_return_t
kmem_alloc_aligned(
vm_map_t map,
vm_offset_t *addrp,
vm_size_t size)
{
if ((size & (size - 1)) != 0)
panic("kmem_alloc_aligned: size not aligned");
return kernel_memory_allocate(map, addrp, size, size - 1, KMA_KOBJECT);
}
kern_return_t
kmem_alloc_pageable(
vm_map_t map,
vm_offset_t *addrp,
vm_size_t size)
{
vm_offset_t addr;
kern_return_t kr;
#ifndef normal
addr = (vm_map_min(map)) + 0x1000;
#else
addr = vm_map_min(map);
#endif
kr = vm_map_enter(map, &addr, round_page(size),
(vm_offset_t) 0, TRUE,
VM_OBJECT_NULL, (vm_object_offset_t) 0, FALSE,
VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
if (kr != KERN_SUCCESS)
return kr;
*addrp = addr;
return KERN_SUCCESS;
}
void
kmem_free(
vm_map_t map,
vm_offset_t addr,
vm_size_t size)
{
kern_return_t kr;
kr = vm_map_remove(map, trunc_page(addr),
round_page(addr + size), VM_MAP_REMOVE_KUNWIRE);
if (kr != KERN_SUCCESS)
panic("kmem_free");
}
kern_return_t
kmem_alloc_pages(
register vm_object_t object,
register vm_object_offset_t offset,
register vm_offset_t start,
register vm_offset_t end,
vm_prot_t protection)
{
pmap_pageable(kernel_pmap, start, end, FALSE);
while (start < end) {
register vm_page_t mem;
vm_object_lock(object);
while ((mem = vm_page_alloc(object, offset))
== VM_PAGE_NULL) {
vm_object_unlock(object);
VM_PAGE_WAIT();
vm_object_lock(object);
}
vm_page_lock_queues();
vm_page_wire(mem);
vm_page_unlock_queues();
vm_object_unlock(object);
PMAP_ENTER(kernel_pmap, start, mem,
protection, TRUE);
vm_object_lock(object);
PAGE_WAKEUP_DONE(mem);
vm_object_unlock(object);
start += PAGE_SIZE;
offset += PAGE_SIZE_64;
}
return KERN_SUCCESS;
}
void
kmem_remap_pages(
register vm_object_t object,
register vm_object_offset_t offset,
register vm_offset_t start,
register vm_offset_t end,
vm_prot_t protection)
{
pmap_pageable(kernel_pmap, start, end, FALSE);
while (start < end) {
register vm_page_t mem;
vm_object_lock(object);
if ((mem = vm_page_lookup(object, offset)) == VM_PAGE_NULL)
panic("kmem_remap_pages");
vm_page_lock_queues();
vm_page_wire(mem);
vm_page_unlock_queues();
vm_object_unlock(object);
PMAP_ENTER(kernel_pmap, start, mem,
protection, TRUE);
start += PAGE_SIZE;
offset += PAGE_SIZE;
}
}
kern_return_t
kmem_suballoc(
vm_map_t parent,
vm_offset_t *addr,
vm_size_t size,
boolean_t pageable,
boolean_t anywhere,
vm_map_t *new_map)
{
vm_map_t map;
kern_return_t kr;
size = round_page(size);
vm_object_reference(vm_submap_object);
if (anywhere == TRUE)
*addr = (vm_offset_t)vm_map_min(parent);
kr = vm_map_enter(parent, addr, size,
(vm_offset_t) 0, anywhere,
vm_submap_object, (vm_object_offset_t) 0, FALSE,
VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
if (kr != KERN_SUCCESS) {
vm_object_deallocate(vm_submap_object);
return (kr);
}
pmap_reference(vm_map_pmap(parent));
map = vm_map_create(vm_map_pmap(parent), *addr, *addr + size, pageable);
if (map == VM_MAP_NULL)
panic("kmem_suballoc: vm_map_create failed");
kr = vm_map_submap(parent, *addr, *addr + size, map, *addr, FALSE);
if (kr != KERN_SUCCESS) {
vm_map_remove(parent, *addr, *addr + size, VM_MAP_NO_FLAGS);
vm_map_deallocate(map);
vm_object_deallocate(vm_submap_object);
return (kr);
}
*new_map = map;
return (KERN_SUCCESS);
}
void
kmem_init(
vm_offset_t start,
vm_offset_t end)
{
kernel_map = vm_map_create(pmap_kernel(),
VM_MIN_KERNEL_ADDRESS, end,
FALSE);
if (start != VM_MIN_KERNEL_ADDRESS) {
vm_offset_t addr = VM_MIN_KERNEL_ADDRESS;
(void) vm_map_enter(kernel_map,
&addr, start - VM_MIN_KERNEL_ADDRESS,
(vm_offset_t) 0, TRUE,
VM_OBJECT_NULL,
(vm_object_offset_t) 0, FALSE,
VM_PROT_DEFAULT, VM_PROT_ALL,
VM_INHERIT_DEFAULT);
}
vm_page_wire_count = (atop(mem_size) - (vm_page_free_count
+ vm_page_active_count
+ vm_page_inactive_count));
}
kern_return_t
kmem_io_map_copyout(
vm_map_t map,
vm_offset_t *addr,
vm_size_t *alloc_size,
vm_map_copy_t copy,
vm_size_t min_size,
vm_prot_t prot)
{
vm_offset_t myaddr, offset;
vm_size_t mysize, copy_size;
kern_return_t ret;
register
vm_page_t *page_list;
vm_map_copy_t new_copy;
register
int i;
assert(copy->type == VM_MAP_COPY_PAGE_LIST);
assert(min_size != 0);
min_size += (vm_size_t)(copy->offset - trunc_page_64(copy->offset));
min_size = round_page(min_size);
mysize = (vm_size_t)(round_page_64(
copy->offset + (vm_object_offset_t)copy->size) -
trunc_page_64(copy->offset));
copy_size = ptoa(copy->cpy_npages);
if (mysize > copy_size && copy_size > min_size)
mysize = copy_size;
myaddr = vm_map_min(map);
ret = vm_map_enter(map, &myaddr, mysize,
(vm_offset_t) 0, TRUE,
VM_OBJECT_NULL, (vm_object_offset_t) 0, FALSE,
prot, prot, VM_INHERIT_DEFAULT);
if (ret != KERN_SUCCESS)
return(ret);
pmap_pageable(vm_map_pmap(map), myaddr, myaddr + mysize, TRUE);
*addr = myaddr + (vm_offset_t)
(copy->offset - trunc_page_64(copy->offset));
*alloc_size = mysize;
offset = myaddr;
page_list = ©->cpy_page_list[0];
while (TRUE) {
for ( i = 0; i < copy->cpy_npages; i++, offset+=PAGE_SIZE_64) {
PMAP_ENTER(vm_map_pmap(map),
(vm_offset_t)offset, *page_list,
prot, TRUE);
page_list++;
}
if (offset == (myaddr + mysize))
break;
vm_map_copy_invoke_extend_cont(copy, &new_copy, &ret);
if (ret != KERN_SUCCESS) {
kmem_io_map_deallocate(map, myaddr, mysize);
return(ret);
}
copy->cpy_cont = vm_map_copy_discard_cont;
copy->cpy_cont_args = (vm_map_copyin_args_t) new_copy;
assert(new_copy != VM_MAP_COPY_NULL);
assert(new_copy->type == VM_MAP_COPY_PAGE_LIST);
copy = new_copy;
page_list = ©->cpy_page_list[0];
}
return(ret);
}
void
kmem_io_map_deallocate(
vm_map_t map,
vm_offset_t addr,
vm_size_t size)
{
register vm_offset_t va, end;
end = round_page(addr + size);
for (va = trunc_page(addr); va < end; va += PAGE_SIZE)
pmap_change_wiring(vm_map_pmap(map), va, FALSE);
pmap_remove(vm_map_pmap(map), addr, addr + size);
vm_map_remove(map, addr, addr + size, VM_MAP_REMOVE_KUNWIRE);
}
kern_return_t
kmem_io_object_trunc(copy, new_size)
vm_map_copy_t copy;
register vm_size_t new_size;
{
register vm_size_t offset, old_size;
assert(copy->type == VM_MAP_COPY_OBJECT);
old_size = (vm_size_t)round_page_64(copy->size);
copy->size = new_size;
new_size = round_page(new_size);
vm_object_lock(copy->cpy_object);
vm_object_page_remove(copy->cpy_object,
(vm_object_offset_t)new_size, (vm_object_offset_t)old_size);
for (offset = 0; offset < new_size; offset += PAGE_SIZE) {
register vm_page_t mem;
if ((mem = vm_page_lookup(copy->cpy_object,
(vm_object_offset_t)offset)) == VM_PAGE_NULL)
panic("kmem_io_object_trunc: unable to find object page");
mem->dirty = TRUE;
vm_page_lock_queues();
vm_page_unwire(mem);
vm_page_unlock_queues();
}
copy->cpy_object->size = new_size;
vm_object_unlock(copy->cpy_object);
return(KERN_SUCCESS);
}
void
kmem_io_object_deallocate(
vm_map_copy_t copy)
{
kern_return_t ret;
ret = kmem_io_object_trunc(copy, 0);
if (ret != KERN_SUCCESS)
panic("kmem_io_object_deallocate: unable to truncate object");
vm_map_copy_discard(copy);
}
boolean_t
copyinmap(
vm_map_t map,
vm_offset_t fromaddr,
vm_offset_t toaddr,
vm_size_t length)
{
if (vm_map_pmap(map) == pmap_kernel()) {
memcpy((void *)toaddr, (void *)fromaddr, length);
return FALSE;
}
if (current_map() == map)
return copyin((char *)fromaddr, (char *)toaddr, length);
return TRUE;
}
boolean_t
copyoutmap(
vm_map_t map,
vm_offset_t fromaddr,
vm_offset_t toaddr,
vm_size_t length)
{
if (vm_map_pmap(map) == pmap_kernel()) {
memcpy((void *)toaddr, (void *)fromaddr, length);
return FALSE;
}
if (current_map() == map)
return copyout((char *)fromaddr, (char *)toaddr, length);
return TRUE;
}