dp_memory_object.c [plain text]
#include "default_pager_internal.h"
#include <mach/memory_object_types.h>
#include <mach/memory_object_server.h>
#include <vm/memory_object.h>
#include <vm/vm_pageout.h>
struct vstruct_list_head vstruct_list;
__private_extern__ void
vstruct_list_insert(
vstruct_t vs)
{
VSL_LOCK();
queue_enter(&vstruct_list.vsl_queue, vs, vstruct_t, vs_links);
vstruct_list.vsl_count++;
VSL_UNLOCK();
}
__private_extern__ void
vstruct_list_delete(
vstruct_t vs)
{
queue_remove(&vstruct_list.vsl_queue, vs, vstruct_t, vs_links);
vstruct_list.vsl_count--;
}
static unsigned int default_pager_total = 0;
static unsigned int default_pager_wait_seqno = 0;
static unsigned int default_pager_wait_read = 0;
static unsigned int default_pager_wait_write = 0;
static unsigned int default_pager_wait_refs = 0;
__private_extern__ void
vs_async_wait(
vstruct_t vs)
{
ASSERT(vs->vs_async_pending >= 0);
while (vs->vs_async_pending > 0) {
vs->vs_waiting_async = TRUE;
assert_wait(&vs->vs_async_pending, THREAD_UNINT);
VS_UNLOCK(vs);
thread_block(THREAD_CONTINUE_NULL);
VS_LOCK(vs);
}
ASSERT(vs->vs_async_pending == 0);
}
#if PARALLEL
__private_extern__ void
vs_lock(
vstruct_t vs)
{
mach_port_seqno_t seqno;
default_pager_total++;
VS_LOCK(vs);
seqno = vs->vs_next_seqno++;
while (vs->vs_seqno != seqno) {
default_pager_wait_seqno++;
vs->vs_waiting_seqno = TRUE;
assert_wait(&vs->vs_seqno, THREAD_UNINT);
VS_UNLOCK(vs);
thread_block(THREAD_CONTINUE_NULL);
VS_LOCK(vs);
}
}
__private_extern__ void
vs_unlock(vstruct_t vs)
{
vs->vs_seqno++;
if (vs->vs_waiting_seqno) {
vs->vs_waiting_seqno = FALSE;
VS_UNLOCK(vs);
thread_wakeup(&vs->vs_seqno);
return;
}
VS_UNLOCK(vs);
}
__private_extern__ void
vs_start_read(
vstruct_t vs)
{
vs->vs_readers++;
}
__private_extern__ void
vs_wait_for_readers(
vstruct_t vs)
{
while (vs->vs_readers != 0) {
default_pager_wait_read++;
vs->vs_waiting_read = TRUE;
assert_wait(&vs->vs_readers, THREAD_UNINT);
VS_UNLOCK(vs);
thread_block(THREAD_CONTINUE_NULL);
VS_LOCK(vs);
}
}
__private_extern__ void
vs_finish_read(
vstruct_t vs)
{
VS_LOCK(vs);
if (--vs->vs_readers == 0 && vs->vs_waiting_read) {
vs->vs_waiting_read = FALSE;
VS_UNLOCK(vs);
thread_wakeup(&vs->vs_readers);
return;
}
VS_UNLOCK(vs);
}
__private_extern__ void
vs_start_write(
vstruct_t vs)
{
vs->vs_writers++;
}
__private_extern__ void
vs_wait_for_writers(
vstruct_t vs)
{
while (vs->vs_writers != 0) {
default_pager_wait_write++;
vs->vs_waiting_write = TRUE;
assert_wait(&vs->vs_writers, THREAD_UNINT);
VS_UNLOCK(vs);
thread_block(THREAD_CONTINUE_NULL);
VS_LOCK(vs);
}
vs_async_wait(vs);
}
__private_extern__ void
vs_wait_for_sync_writers(
vstruct_t vs)
{
while (vs->vs_writers != 0) {
default_pager_wait_write++;
vs->vs_waiting_write = TRUE;
assert_wait(&vs->vs_writers, THREAD_UNINT);
VS_UNLOCK(vs);
thread_block(THREAD_CONTINUE_NULL);
VS_LOCK(vs);
}
}
__private_extern__ void
vs_finish_write(
vstruct_t vs)
{
VS_LOCK(vs);
if (--vs->vs_writers == 0 && vs->vs_waiting_write) {
vs->vs_waiting_write = FALSE;
VS_UNLOCK(vs);
thread_wakeup(&vs->vs_writers);
return;
}
VS_UNLOCK(vs);
}
#endif
vstruct_t
vs_object_create(
vm_size_t size)
{
vstruct_t vs;
vs = ps_vstruct_create(size);
if (vs == VSTRUCT_NULL) {
dprintf(("vs_object_create: unable to allocate %s\n",
"-- either run swapon command or reboot"));
return VSTRUCT_NULL;
}
return vs;
}
#if 0
void default_pager_add(vstruct_t, boolean_t);
void
default_pager_add(
vstruct_t vs,
boolean_t internal)
{
memory_object_t mem_obj = vs->vs_mem_obj;
mach_port_t pset;
mach_port_mscount_t sync;
mach_port_t previous;
kern_return_t kr;
static char here[] = "default_pager_add";
if (internal) {
sync = 0;
pset = default_pager_internal_set;
} else {
sync = 1;
pset = default_pager_external_set;
}
ipc_port_make_sonce(mem_obj);
ip_lock(mem_obj);
ipc_port_nsrequest(mem_obj, sync, mem_obj, &previous);
}
#endif
kern_return_t
dp_memory_object_init(
memory_object_t mem_obj,
memory_object_control_t control,
vm_size_t pager_page_size)
{
vstruct_t vs;
assert(pager_page_size == vm_page_size);
memory_object_control_reference(control);
vs_lookup(mem_obj, vs);
vs_lock(vs);
if (vs->vs_control != MEMORY_OBJECT_CONTROL_NULL)
Panic("bad request");
vs->vs_control = control;
vs_unlock(vs);
return KERN_SUCCESS;
}
kern_return_t
dp_memory_object_synchronize(
memory_object_t mem_obj,
memory_object_offset_t offset,
vm_size_t length,
vm_sync_t flags)
{
vstruct_t vs;
vs_lookup(mem_obj, vs);
vs_lock(vs);
vs_unlock(vs);
memory_object_synchronize_completed(vs->vs_control, offset, length);
return KERN_SUCCESS;
}
kern_return_t
dp_memory_object_unmap(
memory_object_t mem_obj)
{
panic("dp_memory_object_unmap");
return KERN_FAILURE;
}
kern_return_t
dp_memory_object_terminate(
memory_object_t mem_obj)
{
memory_object_control_t control;
vstruct_t vs;
kern_return_t kr;
vs_lookup(mem_obj, vs);
vs_lock(vs);
vs_wait_for_readers(vs);
vs_wait_for_writers(vs);
control = vs->vs_control;
vs->vs_control = MEMORY_OBJECT_CONTROL_NULL;
thread_wakeup(&vs->vs_writers);
thread_wakeup(&vs->vs_async_pending);
vs_unlock(vs);
memory_object_control_deallocate(control);
return KERN_SUCCESS;
}
void
dp_memory_object_reference(
memory_object_t mem_obj)
{
vstruct_t vs;
vs_lookup_safe(mem_obj, vs);
if (vs == VSTRUCT_NULL)
return;
VS_LOCK(vs);
assert(vs->vs_references > 0);
vs->vs_references++;
VS_UNLOCK(vs);
}
extern ipc_port_t max_pages_trigger_port;
extern int dp_pages_free;
extern int maximum_pages_free;
void
dp_memory_object_deallocate(
memory_object_t mem_obj)
{
vstruct_t vs;
mach_port_seqno_t seqno;
ipc_port_t trigger;
vs_lookup_safe(mem_obj, vs);
if (vs == VSTRUCT_NULL)
return;
VS_LOCK(vs);
if (--vs->vs_references > 0) {
VS_UNLOCK(vs);
return;
}
seqno = vs->vs_next_seqno++;
while (vs->vs_seqno != seqno) {
default_pager_wait_seqno++;
vs->vs_waiting_seqno = TRUE;
assert_wait(&vs->vs_seqno, THREAD_UNINT);
VS_UNLOCK(vs);
thread_block(THREAD_CONTINUE_NULL);
VS_LOCK(vs);
}
vs_async_wait(vs);
while(!VSL_LOCK_TRY()) {
VS_UNLOCK(vs);
VSL_LOCK();
VSL_UNLOCK();
VS_LOCK(vs);
vs_async_wait(vs);
}
if (vs->vs_control != MEMORY_OBJECT_CONTROL_NULL)
Panic("bad request");
VS_UNLOCK(vs);
backing_store_release_trigger_disable += 1;
vstruct_list_delete(vs);
VSL_UNLOCK();
ps_vstruct_dealloc(vs);
VSL_LOCK();
backing_store_release_trigger_disable -= 1;
if(backing_store_release_trigger_disable == 0) {
thread_wakeup((event_t)&backing_store_release_trigger_disable);
}
VSL_UNLOCK();
PSL_LOCK();
if(max_pages_trigger_port
&& (backing_store_release_trigger_disable == 0)
&& (dp_pages_free > maximum_pages_free)) {
trigger = max_pages_trigger_port;
max_pages_trigger_port = NULL;
} else
trigger = IP_NULL;
PSL_UNLOCK();
if (trigger != IP_NULL) {
default_pager_space_alert(trigger, LO_WAT_ALERT);
ipc_port_release_send(trigger);
}
}
kern_return_t
dp_memory_object_data_request(
memory_object_t mem_obj,
memory_object_offset_t offset,
vm_size_t length,
vm_prot_t protection_required)
{
vstruct_t vs;
GSTAT(global_stats.gs_pagein_calls++);
vs_lookup(mem_obj, vs);
vs_lock(vs);
if (vs->vs_writers != 0) {
vs_unlock(vs);
VS_LOCK(vs);
while (vs->vs_writers != 0) {
default_pager_wait_write++;
vs->vs_waiting_write = TRUE;
assert_wait(&vs->vs_writers, THREAD_UNINT);
VS_UNLOCK(vs);
thread_block(THREAD_CONTINUE_NULL);
VS_LOCK(vs);
vs_async_wait(vs);
}
if(vs->vs_control == MEMORY_OBJECT_CONTROL_NULL) {
VS_UNLOCK(vs);
return KERN_FAILURE;
}
vs_start_read(vs);
VS_UNLOCK(vs);
} else {
vs_start_read(vs);
vs_unlock(vs);
}
if ((offset & vm_page_mask) != 0 || (length & vm_page_mask) != 0)
Panic("bad alignment");
pvs_cluster_read(vs, (vm_offset_t)offset, length);
vs_finish_read(vs);
return KERN_SUCCESS;
}
kern_return_t
dp_memory_object_data_initialize(
memory_object_t mem_obj,
memory_object_offset_t offset,
vm_size_t size)
{
vstruct_t vs;
DEBUG(DEBUG_MO_EXTERNAL,
("mem_obj=0x%x,offset=0x%x,cnt=0x%x\n",
(int)mem_obj, (int)offset, (int)size));
GSTAT(global_stats.gs_pages_init += atop(size));
vs_lookup(mem_obj, vs);
vs_lock(vs);
vs_start_write(vs);
vs_unlock(vs);
vs_cluster_write(vs, 0, (vm_offset_t)offset, size, FALSE, 0);
vs_finish_write(vs);
return KERN_SUCCESS;
}
kern_return_t
dp_memory_object_data_unlock(
memory_object_t mem_obj,
memory_object_offset_t offset,
vm_size_t size,
vm_prot_t desired_access)
{
Panic("dp_memory_object_data_unlock: illegal");
return KERN_FAILURE;
}
kern_return_t
dp_memory_object_data_return(
memory_object_t mem_obj,
memory_object_offset_t offset,
vm_size_t size,
boolean_t dirty,
boolean_t kernel_copy)
{
vstruct_t vs;
DEBUG(DEBUG_MO_EXTERNAL,
("mem_obj=0x%x,offset=0x%x,size=0x%x\n",
(int)mem_obj, (int)offset, (int)size));
GSTAT(global_stats.gs_pageout_calls++);
vs_lookup(mem_obj, vs);
default_pager_total++;
if(!VS_TRY_LOCK(vs)) {
upl_t upl;
int page_list_count = 0;
memory_object_super_upl_request(vs->vs_control,
(memory_object_offset_t)offset,
size, size,
&upl, NULL, &page_list_count,
UPL_NOBLOCK | UPL_CLEAN_IN_PLACE
| UPL_NO_SYNC | UPL_COPYOUT_FROM);
upl_abort(upl,0);
upl_deallocate(upl);
return KERN_SUCCESS;
}
if ((vs->vs_seqno != vs->vs_next_seqno++)
|| (vs->vs_readers)
|| (vs->vs_xfer_pending)) {
upl_t upl;
int page_list_count = 0;
vs->vs_next_seqno--;
VS_UNLOCK(vs);
memory_object_super_upl_request(vs->vs_control,
(memory_object_offset_t)offset,
size, size,
&upl, NULL, &page_list_count,
UPL_NOBLOCK | UPL_CLEAN_IN_PLACE
| UPL_NO_SYNC | UPL_COPYOUT_FROM);
upl_abort(upl,0);
upl_deallocate(upl);
return KERN_SUCCESS;
}
if ((size % vm_page_size) != 0)
Panic("bad alignment");
vs_start_write(vs);
vs->vs_async_pending += 1;
vs_unlock(vs);
vs_cluster_write(vs, 0, (vm_offset_t)offset, size, FALSE, 0);
vs_finish_write(vs);
VS_LOCK(vs);
vs->vs_async_pending -= 1;
if (vs->vs_async_pending == 0 && vs->vs_waiting_async) {
vs->vs_waiting_async = FALSE;
VS_UNLOCK(vs);
thread_wakeup(&vs->vs_async_pending);
} else {
VS_UNLOCK(vs);
}
return KERN_SUCCESS;
}
kern_return_t
default_pager_memory_object_create(
memory_object_default_t dmm,
vm_size_t new_size,
memory_object_t *new_mem_obj)
{
vstruct_t vs;
assert(dmm == default_pager_object);
vs = vs_object_create(new_size);
if (vs == VSTRUCT_NULL)
return KERN_RESOURCE_SHORTAGE;
vs->vs_next_seqno = 0;
vs->vs_mem_obj = ISVS;
vs->vs_mem_obj_ikot = IKOT_MEMORY_OBJECT;
vstruct_list_insert(vs);
*new_mem_obj = vs_to_mem_obj(vs);
return KERN_SUCCESS;
}
kern_return_t
default_pager_object_create(
default_pager_t pager,
vm_size_t size,
memory_object_t *mem_objp)
{
vstruct_t vs;
kern_return_t result;
struct vstruct_alias *alias_struct;
if (pager != default_pager_object)
return KERN_INVALID_ARGUMENT;
vs = vs_object_create(size);
if (vs == VSTRUCT_NULL)
return KERN_RESOURCE_SHORTAGE;
vs->vs_mem_obj = ISVS;
vstruct_list_insert(vs);
*mem_objp = vs_to_mem_obj(vs);
return KERN_SUCCESS;
}
kern_return_t
default_pager_objects(
default_pager_t pager,
default_pager_object_array_t *objectsp,
mach_msg_type_number_t *ocountp,
memory_object_array_t *pagersp,
mach_msg_type_number_t *pcountp)
{
vm_offset_t oaddr = 0;
vm_size_t osize = 0;
default_pager_object_t * objects;
unsigned int opotential;
vm_offset_t paddr = 0;
vm_size_t psize = 0;
memory_object_t * pagers;
unsigned int ppotential;
unsigned int actual;
unsigned int num_objects;
kern_return_t kr;
vstruct_t entry;
kr = vm_map_copyout(ipc_kernel_map, (vm_offset_t *)&objects,
(vm_map_copy_t) *objectsp);
if (kr != KERN_SUCCESS)
return kr;
osize = round_page(*ocountp * sizeof * objects);
kr = vm_map_wire(ipc_kernel_map,
trunc_page((vm_offset_t)objects),
round_page(((vm_offset_t)objects) + osize),
VM_PROT_READ|VM_PROT_WRITE, FALSE);
osize=0;
*objectsp = objects;
num_objects = 0;
opotential = *ocountp;
pagers = (memory_object_t *) *pagersp;
ppotential = *pcountp;
VSL_LOCK();
actual = vstruct_list.vsl_count;
VSL_UNLOCK();
if (opotential < actual) {
vm_offset_t newaddr;
vm_size_t newsize;
newsize = 2 * round_page(actual * sizeof * objects);
kr = vm_allocate(kernel_map, &newaddr, newsize, TRUE);
if (kr != KERN_SUCCESS)
goto nomemory;
oaddr = newaddr;
osize = newsize;
opotential = osize / sizeof * objects;
objects = (default_pager_object_t *)oaddr;
}
if (ppotential < actual) {
vm_offset_t newaddr;
vm_size_t newsize;
newsize = 2 * round_page(actual * sizeof * pagers);
kr = vm_allocate(kernel_map, &newaddr, newsize, TRUE);
if (kr != KERN_SUCCESS)
goto nomemory;
paddr = newaddr;
psize = newsize;
ppotential = psize / sizeof * pagers;
pagers = (memory_object_t *)paddr;
}
VSL_LOCK();
num_objects = 0;
queue_iterate(&vstruct_list.vsl_queue, entry, vstruct_t, vs_links) {
memory_object_t pager;
vm_size_t size;
if ((num_objects >= opotential) ||
(num_objects >= ppotential)) {
break;
}
if (!VS_MAP_TRY_LOCK(entry))
goto not_this_one;
size = ps_vstruct_allocated_size(entry);
VS_MAP_UNLOCK(entry);
VS_LOCK(entry);
VS_LOCK(entry);
if (entry->vs_references == 0) {
VS_UNLOCK(entry);
goto not_this_one;
}
dp_memory_object_reference(vs_to_mem_obj(entry));
VS_UNLOCK(entry);
objects[num_objects].dpo_object = (vm_offset_t) entry;
objects[num_objects].dpo_size = size;
pagers [num_objects++] = pager;
continue;
not_this_one:
objects[num_objects].dpo_object = (vm_offset_t) 0;
objects[num_objects].dpo_size = 0;
pagers[num_objects++] = MEMORY_OBJECT_NULL;
}
VSL_UNLOCK();
if (objects == *objectsp) {
*ocountp = num_objects;
} else if (actual == 0) {
(void) vm_deallocate(kernel_map, oaddr, osize);
*ocountp = 0;
} else {
vm_offset_t used;
used = round_page(actual * sizeof * objects);
if (used != osize)
(void) vm_deallocate(kernel_map,
oaddr + used, osize - used);
*objectsp = objects;
*ocountp = num_objects;
}
if (pagers == (memory_object_t *)*pagersp) {
*pcountp = num_objects;
} else if (actual == 0) {
(void) vm_deallocate(kernel_map, paddr, psize);
*pcountp = 0;
} else {
vm_offset_t used;
used = round_page(actual * sizeof * pagers);
if (used != psize)
(void) vm_deallocate(kernel_map,
paddr + used, psize - used);
*pagersp = (memory_object_array_t)pagers;
*pcountp = num_objects;
}
(void) vm_map_unwire(kernel_map, (vm_offset_t)objects,
*ocountp + (vm_offset_t)objects, FALSE);
(void) vm_map_copyin(kernel_map, (vm_offset_t)objects,
*ocountp, TRUE, (vm_map_copy_t *)objectsp);
return KERN_SUCCESS;
nomemory:
{
register int i;
for (i = 0; i < num_objects; i++)
if (pagers[i] != MEMORY_OBJECT_NULL)
memory_object_deallocate(pagers[i]);
}
if (objects != *objectsp)
(void) vm_deallocate(kernel_map, oaddr, osize);
if (pagers != (memory_object_t *)*pagersp)
(void) vm_deallocate(kernel_map, paddr, psize);
return KERN_RESOURCE_SHORTAGE;
}
kern_return_t
default_pager_object_pages(
default_pager_t pager,
memory_object_t object,
default_pager_page_array_t *pagesp,
mach_msg_type_number_t *countp)
{
vm_offset_t addr;
vm_size_t size = 0;
default_pager_page_t * pages;
unsigned int potential, actual;
kern_return_t kr;
if (pager != default_pager_object)
return KERN_INVALID_ARGUMENT;
kr = vm_map_copyout(ipc_kernel_map, (vm_offset_t *)&pages,
(vm_map_copy_t) *pagesp);
if (kr != KERN_SUCCESS)
return kr;
size = round_page(*countp * sizeof * pages);
kr = vm_map_wire(ipc_kernel_map,
trunc_page((vm_offset_t)pages),
round_page(((vm_offset_t)pages) + size),
VM_PROT_READ|VM_PROT_WRITE, FALSE);
size=0;
*pagesp = pages;
addr = (vm_offset_t)pages;
potential = *countp;
for (;;) {
vstruct_t entry;
VSL_LOCK();
queue_iterate(&vstruct_list.vsl_queue, entry, vstruct_t,
vs_links) {
VS_LOCK(entry);
if (vs_to_mem_obj(entry) == object) {
VSL_UNLOCK();
goto found_object;
}
VS_UNLOCK(entry);
}
VSL_UNLOCK();
if (pages != *pagesp)
(void) vm_deallocate(kernel_map, addr, size);
return KERN_INVALID_ARGUMENT;
found_object:
if (!VS_MAP_TRY_LOCK(entry)) {
int wresult;
VS_UNLOCK(entry);
assert_wait_timeout( 1, THREAD_UNINT );
wresult = thread_block(THREAD_CONTINUE_NULL);
assert(wresult == THREAD_TIMED_OUT);
continue;
}
actual = ps_vstruct_allocated_pages(entry, pages, potential);
VS_MAP_UNLOCK(entry);
VS_UNLOCK(entry);
if (actual <= potential)
break;
if (pages != *pagesp)
(void) vm_deallocate(kernel_map, addr, size);
size = round_page(actual * sizeof * pages);
kr = vm_allocate(kernel_map, &addr, size, TRUE);
if (kr != KERN_SUCCESS)
return kr;
pages = (default_pager_page_t *)addr;
potential = size / sizeof * pages;
}
if (pages == *pagesp) {
*countp = actual;
} else if (actual == 0) {
(void) vm_deallocate(kernel_map, addr, size);
*countp = 0;
} else {
vm_offset_t used;
used = round_page(actual * sizeof * pages);
if (used != size)
(void) vm_deallocate(kernel_map,
addr + used, size - used);
*pagesp = pages;
*countp = actual;
}
(void) vm_map_unwire(kernel_map, (vm_offset_t)pages,
*countp + (vm_offset_t)pages, FALSE);
(void) vm_map_copyin(kernel_map, (vm_offset_t)pages,
*countp, TRUE, (vm_map_copy_t *)pagesp);
return KERN_SUCCESS;
}