#include <advisory_pageout.h>
#include <mach/std_types.h>
#include <mach/mach_types.h>
#include <mach/mig.h>
#include <mach/kern_return.h>
#include <mach/memory_object.h>
#include <mach/memory_object_default.h>
#include <mach/memory_object_control_server.h>
#include <mach/host_priv_server.h>
#include <mach/boolean.h>
#include <mach/vm_prot.h>
#include <mach/message.h>
#include <string.h>
#include <kern/xpr.h>
#include <kern/host.h>
#include <kern/thread.h>
#include <kern/ipc_mig.h>
#include <kern/misc_protos.h>
#include <vm/vm_object.h>
#include <vm/vm_fault.h>
#include <vm/memory_object.h>
#include <vm/vm_page.h>
#include <vm/vm_pageout.h>
#include <vm/pmap.h>
#include <vm/vm_kern.h>
#include <vm/vm_map.h>
#include <vm/vm_purgeable_internal.h>
#include <vm/vm_shared_region.h>
#if MACH_PAGEMAP
#include <vm/vm_external.h>
#endif
#include <vm/vm_protos.h>
memory_object_default_t memory_manager_default = MEMORY_OBJECT_DEFAULT_NULL;
decl_lck_mtx_data(, memory_manager_default_lock)
#define memory_object_should_return_page(m, should_return) \
(should_return != MEMORY_OBJECT_RETURN_NONE && \
(((m)->dirty || ((m)->dirty = pmap_is_modified((m)->phys_page))) || \
((m)->precious && (should_return) == MEMORY_OBJECT_RETURN_ALL) || \
(should_return) == MEMORY_OBJECT_RETURN_ANYTHING))
typedef int memory_object_lock_result_t;
#define MEMORY_OBJECT_LOCK_RESULT_DONE 0
#define MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK 1
#define MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN 2
#define MEMORY_OBJECT_LOCK_RESULT_MUST_FREE 3
memory_object_lock_result_t memory_object_lock_page(
vm_page_t m,
memory_object_return_t should_return,
boolean_t should_flush,
vm_prot_t prot);
memory_object_lock_result_t
memory_object_lock_page(
vm_page_t m,
memory_object_return_t should_return,
boolean_t should_flush,
vm_prot_t prot)
{
XPR(XPR_MEMORY_OBJECT,
"m_o_lock_page, page 0x%X rtn %d flush %d prot %d\n",
m, should_return, should_flush, prot, 0);
if (m->busy || m->cleaning) {
if (m->list_req_pending &&
should_return == MEMORY_OBJECT_RETURN_NONE &&
should_flush == TRUE) {
if (m->absent) {
if (!VM_PAGE_WIRED(m)) {
return (MEMORY_OBJECT_LOCK_RESULT_MUST_FREE);
} else {
return (MEMORY_OBJECT_LOCK_RESULT_DONE);
}
}
if (m->pageout || m->cleaning) {
vm_pageout_queue_steal(m, FALSE);
PAGE_WAKEUP_DONE(m);
} else {
panic("list_req_pending on page %p without absent/pageout/cleaning set\n", m);
}
} else
return (MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK);
}
if (m->absent || m->error || m->restart) {
if (m->error && should_flush && !VM_PAGE_WIRED(m)) {
return (MEMORY_OBJECT_LOCK_RESULT_MUST_FREE);
}
return (MEMORY_OBJECT_LOCK_RESULT_DONE);
}
assert(!m->fictitious);
if (VM_PAGE_WIRED(m)) {
if (memory_object_should_return_page(m, should_return))
return (MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN);
return (MEMORY_OBJECT_LOCK_RESULT_DONE);
}
if (should_flush) {
if (pmap_disconnect(m->phys_page) & VM_MEM_MODIFIED)
m->dirty = TRUE;
} else {
if (prot != VM_PROT_NO_CHANGE)
pmap_page_protect(m->phys_page, VM_PROT_ALL & ~prot);
}
if (memory_object_should_return_page(m, should_return)) {
return (MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN);
}
if (should_flush)
return (MEMORY_OBJECT_LOCK_RESULT_MUST_FREE);
return (MEMORY_OBJECT_LOCK_RESULT_DONE);
}
kern_return_t
memory_object_lock_request(
memory_object_control_t control,
memory_object_offset_t offset,
memory_object_size_t size,
memory_object_offset_t * resid_offset,
int * io_errno,
memory_object_return_t should_return,
int flags,
vm_prot_t prot)
{
vm_object_t object;
object = memory_object_control_to_vm_object(control);
if (object == VM_OBJECT_NULL)
return (KERN_INVALID_ARGUMENT);
if ((prot & ~VM_PROT_ALL) != 0 && prot != VM_PROT_NO_CHANGE)
return (KERN_INVALID_ARGUMENT);
size = round_page_64(size);
vm_object_lock(object);
vm_object_paging_begin(object);
if (flags & MEMORY_OBJECT_DATA_FLUSH_ALL) {
if ((should_return != MEMORY_OBJECT_RETURN_NONE) || offset || object->copy) {
flags &= ~MEMORY_OBJECT_DATA_FLUSH_ALL;
flags |= MEMORY_OBJECT_DATA_FLUSH;
}
}
offset -= object->paging_offset;
if (flags & MEMORY_OBJECT_DATA_FLUSH_ALL)
vm_object_reap_pages(object, REAP_DATA_FLUSH);
else
(void)vm_object_update(object, offset, size, resid_offset,
io_errno, should_return, flags, prot);
vm_object_paging_end(object);
vm_object_unlock(object);
return (KERN_SUCCESS);
}
kern_return_t
memory_object_release_name(
memory_object_control_t control,
int flags)
{
vm_object_t object;
object = memory_object_control_to_vm_object(control);
if (object == VM_OBJECT_NULL)
return (KERN_INVALID_ARGUMENT);
return vm_object_release_name(object, flags);
}
kern_return_t
memory_object_destroy(
memory_object_control_t control,
kern_return_t reason)
{
vm_object_t object;
object = memory_object_control_to_vm_object(control);
if (object == VM_OBJECT_NULL)
return (KERN_INVALID_ARGUMENT);
return (vm_object_destroy(object, reason));
}
boolean_t
vm_object_sync(
vm_object_t object,
vm_object_offset_t offset,
vm_object_size_t size,
boolean_t should_flush,
boolean_t should_return,
boolean_t should_iosync)
{
boolean_t rv;
int flags;
XPR(XPR_VM_OBJECT,
"vm_o_sync, object 0x%X, offset 0x%X size 0x%x flush %d rtn %d\n",
object, offset, size, should_flush, should_return);
vm_object_lock(object);
vm_object_paging_begin(object);
if (should_flush)
flags = MEMORY_OBJECT_DATA_FLUSH;
else
flags = 0;
if (should_iosync)
flags |= MEMORY_OBJECT_IO_SYNC;
rv = vm_object_update(object, offset, (vm_object_size_t)size, NULL, NULL,
(should_return) ?
MEMORY_OBJECT_RETURN_ALL :
MEMORY_OBJECT_RETURN_NONE,
flags,
VM_PROT_NO_CHANGE);
vm_object_paging_end(object);
vm_object_unlock(object);
return rv;
}
#define LIST_REQ_PAGEOUT_PAGES(object, data_cnt, po, ro, ioerr, iosync) \
MACRO_BEGIN \
\
int upl_flags; \
memory_object_t pager; \
\
if (object == slide_info.slide_object) { \
panic("Objects with slid pages not allowed\n"); \
} \
\
if ((pager = (object)->pager) != MEMORY_OBJECT_NULL) { \
vm_object_paging_begin(object); \
vm_object_unlock(object); \
\
if (iosync) \
upl_flags = UPL_MSYNC | UPL_IOSYNC; \
else \
upl_flags = UPL_MSYNC; \
\
(void) memory_object_data_return(pager, \
po, \
(memory_object_cluster_size_t)data_cnt, \
ro, \
ioerr, \
FALSE, \
FALSE, \
upl_flags); \
\
vm_object_lock(object); \
vm_object_paging_end(object); \
} \
MACRO_END
static int
vm_object_update_extent(
vm_object_t object,
vm_object_offset_t offset,
vm_object_offset_t offset_end,
vm_object_offset_t *offset_resid,
int *io_errno,
boolean_t should_flush,
memory_object_return_t should_return,
boolean_t should_iosync,
vm_prot_t prot)
{
vm_page_t m;
int retval = 0;
vm_object_offset_t paging_offset = 0;
vm_object_offset_t next_offset = offset;
memory_object_lock_result_t page_lock_result;
memory_object_cluster_size_t data_cnt = 0;
struct vm_page_delayed_work dw_array[DEFAULT_DELAYED_WORK_LIMIT];
struct vm_page_delayed_work *dwp;
int dw_count;
int dw_limit;
dwp = &dw_array[0];
dw_count = 0;
dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT);
for (;
offset < offset_end && object->resident_page_count;
offset += PAGE_SIZE_64) {
if (data_cnt) {
if ((data_cnt >= PAGE_SIZE * MAX_UPL_TRANSFER) || (next_offset != offset)) {
if (dw_count) {
vm_page_do_delayed_work(object, &dw_array[0], dw_count);
dwp = &dw_array[0];
dw_count = 0;
}
LIST_REQ_PAGEOUT_PAGES(object, data_cnt,
paging_offset, offset_resid, io_errno, should_iosync);
data_cnt = 0;
}
}
while ((m = vm_page_lookup(object, offset)) != VM_PAGE_NULL) {
dwp->dw_mask = 0;
page_lock_result = memory_object_lock_page(m, should_return, should_flush, prot);
if (data_cnt && page_lock_result != MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN) {
if (dw_count) {
vm_page_do_delayed_work(object, &dw_array[0], dw_count);
dwp = &dw_array[0];
dw_count = 0;
}
LIST_REQ_PAGEOUT_PAGES(object, data_cnt,
paging_offset, offset_resid, io_errno, should_iosync);
data_cnt = 0;
continue;
}
switch (page_lock_result) {
case MEMORY_OBJECT_LOCK_RESULT_DONE:
break;
case MEMORY_OBJECT_LOCK_RESULT_MUST_FREE:
dwp->dw_mask |= DW_vm_page_free;
break;
case MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK:
PAGE_SLEEP(object, m, THREAD_UNINT);
continue;
case MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN:
if (data_cnt == 0)
paging_offset = offset;
data_cnt += PAGE_SIZE;
next_offset = offset + PAGE_SIZE_64;
m->list_req_pending = TRUE;
m->cleaning = TRUE;
if (!VM_PAGE_WIRED(m)) {
if (should_flush) {
m->busy = TRUE;
m->pageout = TRUE;
dwp->dw_mask |= DW_vm_page_wire;
}
}
retval = 1;
break;
}
if (dwp->dw_mask) {
VM_PAGE_ADD_DELAYED_WORK(dwp, m, dw_count);
if (dw_count >= dw_limit) {
vm_page_do_delayed_work(object, &dw_array[0], dw_count);
dwp = &dw_array[0];
dw_count = 0;
}
}
break;
}
}
if (dw_count)
vm_page_do_delayed_work(object, &dw_array[0], dw_count);
if (data_cnt) {
LIST_REQ_PAGEOUT_PAGES(object, data_cnt,
paging_offset, offset_resid, io_errno, should_iosync);
}
return (retval);
}
kern_return_t
vm_object_update(
vm_object_t object,
vm_object_offset_t offset,
vm_object_size_t size,
vm_object_offset_t *resid_offset,
int *io_errno,
memory_object_return_t should_return,
int flags,
vm_prot_t protection)
{
vm_object_t copy_object = VM_OBJECT_NULL;
boolean_t data_returned = FALSE;
boolean_t update_cow;
boolean_t should_flush = (flags & MEMORY_OBJECT_DATA_FLUSH) ? TRUE : FALSE;
boolean_t should_iosync = (flags & MEMORY_OBJECT_IO_SYNC) ? TRUE : FALSE;
vm_fault_return_t result;
int num_of_extents;
int n;
#define MAX_EXTENTS 8
#define EXTENT_SIZE (1024 * 1024 * 256)
#define RESIDENT_LIMIT (1024 * 32)
struct extent {
vm_object_offset_t e_base;
vm_object_offset_t e_min;
vm_object_offset_t e_max;
} extents[MAX_EXTENTS];
update_cow = ((flags & MEMORY_OBJECT_DATA_FLUSH)
&& (!(flags & MEMORY_OBJECT_DATA_NO_CHANGE) &&
!(flags & MEMORY_OBJECT_DATA_PURGE)))
|| (flags & MEMORY_OBJECT_COPY_SYNC);
if (update_cow || (flags & (MEMORY_OBJECT_DATA_PURGE | MEMORY_OBJECT_DATA_SYNC))) {
int collisions = 0;
while ((copy_object = object->copy) != VM_OBJECT_NULL) {
if (vm_object_lock_try(copy_object)) {
vm_object_unlock(object);
vm_object_reference_locked(copy_object);
break;
}
vm_object_unlock(object);
collisions++;
mutex_pause(collisions);
vm_object_lock(object);
}
}
if ((copy_object != VM_OBJECT_NULL && update_cow) || (flags & MEMORY_OBJECT_DATA_SYNC)) {
vm_map_size_t i;
vm_map_size_t copy_size;
vm_map_offset_t copy_offset;
vm_prot_t prot;
vm_page_t page;
vm_page_t top_page;
kern_return_t error = 0;
struct vm_object_fault_info fault_info;
if (copy_object != VM_OBJECT_NULL) {
copy_offset = (offset >= copy_object->vo_shadow_offset) ?
(vm_map_offset_t)(offset - copy_object->vo_shadow_offset) :
(vm_map_offset_t) 0;
if (copy_offset > copy_object->vo_size)
copy_offset = copy_object->vo_size;
if (offset >= copy_object->vo_shadow_offset) {
copy_size = size;
} else if (size >= copy_object->vo_shadow_offset - offset) {
copy_size = size - (copy_object->vo_shadow_offset - offset);
} else {
copy_size = 0;
}
if (copy_offset + copy_size > copy_object->vo_size) {
if (copy_object->vo_size >= copy_offset) {
copy_size = copy_object->vo_size - copy_offset;
} else {
copy_size = 0;
}
}
copy_size+=copy_offset;
} else {
copy_object = object;
copy_size = offset + size;
copy_offset = offset;
}
fault_info.interruptible = THREAD_UNINT;
fault_info.behavior = VM_BEHAVIOR_SEQUENTIAL;
fault_info.user_tag = 0;
fault_info.lo_offset = copy_offset;
fault_info.hi_offset = copy_size;
fault_info.no_cache = FALSE;
fault_info.stealth = TRUE;
fault_info.io_sync = FALSE;
fault_info.cs_bypass = FALSE;
fault_info.mark_zf_absent = FALSE;
vm_object_paging_begin(copy_object);
for (i = copy_offset; i < copy_size; i += PAGE_SIZE) {
RETRY_COW_OF_LOCK_REQUEST:
fault_info.cluster_size = (vm_size_t) (copy_size - i);
assert(fault_info.cluster_size == copy_size - i);
prot = VM_PROT_WRITE|VM_PROT_READ;
result = vm_fault_page(copy_object, i,
VM_PROT_WRITE|VM_PROT_READ,
FALSE,
&prot,
&page,
&top_page,
(int *)0,
&error,
FALSE,
FALSE, &fault_info);
switch (result) {
case VM_FAULT_SUCCESS:
if (top_page) {
vm_fault_cleanup(
page->object, top_page);
vm_object_lock(copy_object);
vm_object_paging_begin(copy_object);
}
if (!page->active &&
!page->inactive &&
!page->throttled) {
vm_page_lockspin_queues();
if (!page->active &&
!page->inactive &&
!page->throttled)
vm_page_deactivate(page);
vm_page_unlock_queues();
}
PAGE_WAKEUP_DONE(page);
break;
case VM_FAULT_RETRY:
prot = VM_PROT_WRITE|VM_PROT_READ;
vm_object_lock(copy_object);
vm_object_paging_begin(copy_object);
goto RETRY_COW_OF_LOCK_REQUEST;
case VM_FAULT_INTERRUPTED:
prot = VM_PROT_WRITE|VM_PROT_READ;
vm_object_lock(copy_object);
vm_object_paging_begin(copy_object);
goto RETRY_COW_OF_LOCK_REQUEST;
case VM_FAULT_MEMORY_SHORTAGE:
VM_PAGE_WAIT();
prot = VM_PROT_WRITE|VM_PROT_READ;
vm_object_lock(copy_object);
vm_object_paging_begin(copy_object);
goto RETRY_COW_OF_LOCK_REQUEST;
case VM_FAULT_SUCCESS_NO_VM_PAGE:
vm_object_paging_end(copy_object);
vm_object_unlock(copy_object);
case VM_FAULT_MEMORY_ERROR:
if (object != copy_object)
vm_object_deallocate(copy_object);
vm_object_lock(object);
goto BYPASS_COW_COPYIN;
default:
panic("vm_object_update: unexpected error 0x%x"
" from vm_fault_page()\n", result);
}
}
vm_object_paging_end(copy_object);
}
if ((flags & (MEMORY_OBJECT_DATA_SYNC | MEMORY_OBJECT_COPY_SYNC))) {
if (copy_object != VM_OBJECT_NULL && copy_object != object) {
vm_object_unlock(copy_object);
vm_object_deallocate(copy_object);
vm_object_lock(object);
}
return KERN_SUCCESS;
}
if (copy_object != VM_OBJECT_NULL && copy_object != object) {
if ((flags & MEMORY_OBJECT_DATA_PURGE)) {
copy_object->shadow_severed = TRUE;
copy_object->shadowed = FALSE;
copy_object->shadow = NULL;
vm_object_deallocate(object);
}
vm_object_unlock(copy_object);
vm_object_deallocate(copy_object);
vm_object_lock(object);
}
BYPASS_COW_COPYIN:
if ((object->resident_page_count < RESIDENT_LIMIT) &&
(atop_64(size) > (unsigned)(object->resident_page_count/(8 * MAX_EXTENTS)))) {
vm_page_t next;
vm_object_offset_t start;
vm_object_offset_t end;
vm_object_size_t e_mask;
vm_page_t m;
start = offset;
end = offset + size;
num_of_extents = 0;
e_mask = ~((vm_object_size_t)(EXTENT_SIZE - 1));
m = (vm_page_t) queue_first(&object->memq);
while (!queue_end(&object->memq, (queue_entry_t) m)) {
next = (vm_page_t) queue_next(&m->listq);
if ((m->offset >= start) && (m->offset < end)) {
for (n = 0; n < num_of_extents; n++) {
if ((m->offset & e_mask) == extents[n].e_base) {
if (m->offset < extents[n].e_min)
extents[n].e_min = m->offset;
else if ((m->offset + (PAGE_SIZE - 1)) > extents[n].e_max)
extents[n].e_max = m->offset + (PAGE_SIZE - 1);
break;
}
}
if (n == num_of_extents) {
if (n < MAX_EXTENTS) {
extents[n].e_base = m->offset & e_mask;
extents[n].e_min = m->offset;
extents[n].e_max = m->offset + (PAGE_SIZE - 1);
num_of_extents++;
} else {
for (n = 1; n < num_of_extents; n++) {
if (extents[n].e_min < extents[0].e_min)
extents[0].e_min = extents[n].e_min;
if (extents[n].e_max > extents[0].e_max)
extents[0].e_max = extents[n].e_max;
}
extents[0].e_base = 0;
e_mask = 0;
num_of_extents = 1;
continue;
}
}
}
m = next;
}
} else {
extents[0].e_min = offset;
extents[0].e_max = offset + (size - 1);
num_of_extents = 1;
}
for (n = 0; n < num_of_extents; n++) {
if (vm_object_update_extent(object, extents[n].e_min, extents[n].e_max, resid_offset, io_errno,
should_flush, should_return, should_iosync, protection))
data_returned = TRUE;
}
return (data_returned);
}
kern_return_t
memory_object_synchronize_completed(
memory_object_control_t control,
memory_object_offset_t offset,
memory_object_size_t length)
{
vm_object_t object;
msync_req_t msr;
object = memory_object_control_to_vm_object(control);
XPR(XPR_MEMORY_OBJECT,
"m_o_sync_completed, object 0x%X, offset 0x%X length 0x%X\n",
object, offset, length, 0, 0);
if (object == VM_OBJECT_NULL)
return (KERN_INVALID_ARGUMENT);
vm_object_lock(object);
queue_iterate(&object->msr_q, msr, msync_req_t, msr_q) {
if (msr->offset == offset && msr->length == length) {
queue_remove(&object->msr_q, msr, msync_req_t, msr_q);
break;
}
}
if (queue_end(&object->msr_q, (queue_entry_t)msr)) {
vm_object_unlock(object);
return KERN_INVALID_ARGUMENT;
}
msr_lock(msr);
vm_object_unlock(object);
msr->flag = VM_MSYNC_DONE;
msr_unlock(msr);
thread_wakeup((event_t) msr);
return KERN_SUCCESS;
}
static kern_return_t
vm_object_set_attributes_common(
vm_object_t object,
boolean_t may_cache,
memory_object_copy_strategy_t copy_strategy,
boolean_t temporary,
boolean_t silent_overwrite,
boolean_t advisory_pageout)
{
boolean_t object_became_ready;
XPR(XPR_MEMORY_OBJECT,
"m_o_set_attr_com, object 0x%X flg %x strat %d\n",
object, (may_cache&1)|((temporary&1)<1), copy_strategy, 0, 0);
if (object == VM_OBJECT_NULL)
return(KERN_INVALID_ARGUMENT);
switch(copy_strategy) {
case MEMORY_OBJECT_COPY_NONE:
case MEMORY_OBJECT_COPY_DELAY:
break;
default:
return(KERN_INVALID_ARGUMENT);
}
#if !ADVISORY_PAGEOUT
if (silent_overwrite || advisory_pageout)
return(KERN_INVALID_ARGUMENT);
#endif
if (may_cache)
may_cache = TRUE;
if (temporary)
temporary = TRUE;
vm_object_lock(object);
assert(!object->internal);
object_became_ready = !object->pager_ready;
object->copy_strategy = copy_strategy;
object->can_persist = may_cache;
object->temporary = temporary;
object->silent_overwrite = silent_overwrite;
object->advisory_pageout = advisory_pageout;
if (object_became_ready) {
object->pager_ready = TRUE;
vm_object_wakeup(object, VM_OBJECT_EVENT_PAGER_READY);
}
vm_object_unlock(object);
return(KERN_SUCCESS);
}
kern_return_t
memory_object_change_attributes(
memory_object_control_t control,
memory_object_flavor_t flavor,
memory_object_info_t attributes,
mach_msg_type_number_t count)
{
vm_object_t object;
kern_return_t result = KERN_SUCCESS;
boolean_t temporary;
boolean_t may_cache;
boolean_t invalidate;
memory_object_copy_strategy_t copy_strategy;
boolean_t silent_overwrite;
boolean_t advisory_pageout;
object = memory_object_control_to_vm_object(control);
if (object == VM_OBJECT_NULL)
return (KERN_INVALID_ARGUMENT);
vm_object_lock(object);
temporary = object->temporary;
may_cache = object->can_persist;
copy_strategy = object->copy_strategy;
silent_overwrite = object->silent_overwrite;
advisory_pageout = object->advisory_pageout;
#if notyet
invalidate = object->invalidate;
#endif
vm_object_unlock(object);
switch (flavor) {
case OLD_MEMORY_OBJECT_BEHAVIOR_INFO:
{
old_memory_object_behave_info_t behave;
if (count != OLD_MEMORY_OBJECT_BEHAVE_INFO_COUNT) {
result = KERN_INVALID_ARGUMENT;
break;
}
behave = (old_memory_object_behave_info_t) attributes;
temporary = behave->temporary;
invalidate = behave->invalidate;
copy_strategy = behave->copy_strategy;
break;
}
case MEMORY_OBJECT_BEHAVIOR_INFO:
{
memory_object_behave_info_t behave;
if (count != MEMORY_OBJECT_BEHAVE_INFO_COUNT) {
result = KERN_INVALID_ARGUMENT;
break;
}
behave = (memory_object_behave_info_t) attributes;
temporary = behave->temporary;
invalidate = behave->invalidate;
copy_strategy = behave->copy_strategy;
silent_overwrite = behave->silent_overwrite;
advisory_pageout = behave->advisory_pageout;
break;
}
case MEMORY_OBJECT_PERFORMANCE_INFO:
{
memory_object_perf_info_t perf;
if (count != MEMORY_OBJECT_PERF_INFO_COUNT) {
result = KERN_INVALID_ARGUMENT;
break;
}
perf = (memory_object_perf_info_t) attributes;
may_cache = perf->may_cache;
break;
}
case OLD_MEMORY_OBJECT_ATTRIBUTE_INFO:
{
old_memory_object_attr_info_t attr;
if (count != OLD_MEMORY_OBJECT_ATTR_INFO_COUNT) {
result = KERN_INVALID_ARGUMENT;
break;
}
attr = (old_memory_object_attr_info_t) attributes;
may_cache = attr->may_cache;
copy_strategy = attr->copy_strategy;
break;
}
case MEMORY_OBJECT_ATTRIBUTE_INFO:
{
memory_object_attr_info_t attr;
if (count != MEMORY_OBJECT_ATTR_INFO_COUNT) {
result = KERN_INVALID_ARGUMENT;
break;
}
attr = (memory_object_attr_info_t) attributes;
copy_strategy = attr->copy_strategy;
may_cache = attr->may_cache_object;
temporary = attr->temporary;
break;
}
default:
result = KERN_INVALID_ARGUMENT;
break;
}
if (result != KERN_SUCCESS)
return(result);
if (copy_strategy == MEMORY_OBJECT_COPY_TEMPORARY) {
copy_strategy = MEMORY_OBJECT_COPY_DELAY;
temporary = TRUE;
} else {
temporary = FALSE;
}
return (vm_object_set_attributes_common(object,
may_cache,
copy_strategy,
temporary,
silent_overwrite,
advisory_pageout));
}
kern_return_t
memory_object_get_attributes(
memory_object_control_t control,
memory_object_flavor_t flavor,
memory_object_info_t attributes,
mach_msg_type_number_t *count)
{
kern_return_t ret = KERN_SUCCESS;
vm_object_t object;
object = memory_object_control_to_vm_object(control);
if (object == VM_OBJECT_NULL)
return (KERN_INVALID_ARGUMENT);
vm_object_lock(object);
switch (flavor) {
case OLD_MEMORY_OBJECT_BEHAVIOR_INFO:
{
old_memory_object_behave_info_t behave;
if (*count < OLD_MEMORY_OBJECT_BEHAVE_INFO_COUNT) {
ret = KERN_INVALID_ARGUMENT;
break;
}
behave = (old_memory_object_behave_info_t) attributes;
behave->copy_strategy = object->copy_strategy;
behave->temporary = object->temporary;
#if notyet
behave->invalidate = object->invalidate;
#else
behave->invalidate = FALSE;
#endif
*count = OLD_MEMORY_OBJECT_BEHAVE_INFO_COUNT;
break;
}
case MEMORY_OBJECT_BEHAVIOR_INFO:
{
memory_object_behave_info_t behave;
if (*count < MEMORY_OBJECT_BEHAVE_INFO_COUNT) {
ret = KERN_INVALID_ARGUMENT;
break;
}
behave = (memory_object_behave_info_t) attributes;
behave->copy_strategy = object->copy_strategy;
behave->temporary = object->temporary;
#if notyet
behave->invalidate = object->invalidate;
#else
behave->invalidate = FALSE;
#endif
behave->advisory_pageout = object->advisory_pageout;
behave->silent_overwrite = object->silent_overwrite;
*count = MEMORY_OBJECT_BEHAVE_INFO_COUNT;
break;
}
case MEMORY_OBJECT_PERFORMANCE_INFO:
{
memory_object_perf_info_t perf;
if (*count < MEMORY_OBJECT_PERF_INFO_COUNT) {
ret = KERN_INVALID_ARGUMENT;
break;
}
perf = (memory_object_perf_info_t) attributes;
perf->cluster_size = PAGE_SIZE;
perf->may_cache = object->can_persist;
*count = MEMORY_OBJECT_PERF_INFO_COUNT;
break;
}
case OLD_MEMORY_OBJECT_ATTRIBUTE_INFO:
{
old_memory_object_attr_info_t attr;
if (*count < OLD_MEMORY_OBJECT_ATTR_INFO_COUNT) {
ret = KERN_INVALID_ARGUMENT;
break;
}
attr = (old_memory_object_attr_info_t) attributes;
attr->may_cache = object->can_persist;
attr->copy_strategy = object->copy_strategy;
*count = OLD_MEMORY_OBJECT_ATTR_INFO_COUNT;
break;
}
case MEMORY_OBJECT_ATTRIBUTE_INFO:
{
memory_object_attr_info_t attr;
if (*count < MEMORY_OBJECT_ATTR_INFO_COUNT) {
ret = KERN_INVALID_ARGUMENT;
break;
}
attr = (memory_object_attr_info_t) attributes;
attr->copy_strategy = object->copy_strategy;
attr->cluster_size = PAGE_SIZE;
attr->may_cache_object = object->can_persist;
attr->temporary = object->temporary;
*count = MEMORY_OBJECT_ATTR_INFO_COUNT;
break;
}
default:
ret = KERN_INVALID_ARGUMENT;
break;
}
vm_object_unlock(object);
return(ret);
}
kern_return_t
memory_object_iopl_request(
ipc_port_t port,
memory_object_offset_t offset,
upl_size_t *upl_size,
upl_t *upl_ptr,
upl_page_info_array_t user_page_list,
unsigned int *page_list_count,
int *flags)
{
vm_object_t object;
kern_return_t ret;
int caller_flags;
caller_flags = *flags;
if (caller_flags & ~UPL_VALID_FLAGS) {
return KERN_INVALID_VALUE;
}
if (ip_kotype(port) == IKOT_NAMED_ENTRY) {
vm_named_entry_t named_entry;
named_entry = (vm_named_entry_t)port->ip_kobject;
if(*upl_size == 0) {
if(offset >= named_entry->size)
return(KERN_INVALID_RIGHT);
*upl_size = (upl_size_t)(named_entry->size - offset);
if (*upl_size != named_entry->size - offset)
return KERN_INVALID_ARGUMENT;
}
if(caller_flags & UPL_COPYOUT_FROM) {
if((named_entry->protection & VM_PROT_READ)
!= VM_PROT_READ) {
return(KERN_INVALID_RIGHT);
}
} else {
if((named_entry->protection &
(VM_PROT_READ | VM_PROT_WRITE))
!= (VM_PROT_READ | VM_PROT_WRITE)) {
return(KERN_INVALID_RIGHT);
}
}
if(named_entry->size < (offset + *upl_size))
return(KERN_INVALID_ARGUMENT);
offset = offset + named_entry->offset;
if(named_entry->is_sub_map)
return (KERN_INVALID_ARGUMENT);
named_entry_lock(named_entry);
if (named_entry->is_pager) {
object = vm_object_enter(named_entry->backing.pager,
named_entry->offset + named_entry->size,
named_entry->internal,
FALSE,
FALSE);
if (object == VM_OBJECT_NULL) {
named_entry_unlock(named_entry);
return(KERN_INVALID_OBJECT);
}
vm_object_lock(object);
vm_object_reference_locked(object);
named_entry->backing.object = object;
named_entry->is_pager = FALSE;
named_entry_unlock(named_entry);
while (!object->pager_ready) {
vm_object_wait(object,
VM_OBJECT_EVENT_PAGER_READY,
THREAD_UNINT);
vm_object_lock(object);
}
vm_object_unlock(object);
} else {
object = named_entry->backing.object;
vm_object_reference(object);
named_entry_unlock(named_entry);
}
} else if (ip_kotype(port) == IKOT_MEM_OBJ_CONTROL) {
memory_object_control_t control;
control = (memory_object_control_t) port;
if (control == NULL)
return (KERN_INVALID_ARGUMENT);
object = memory_object_control_to_vm_object(control);
if (object == VM_OBJECT_NULL)
return (KERN_INVALID_ARGUMENT);
vm_object_reference(object);
} else {
return KERN_INVALID_ARGUMENT;
}
if (object == VM_OBJECT_NULL)
return (KERN_INVALID_ARGUMENT);
if (!object->private) {
if (*upl_size > (MAX_UPL_TRANSFER*PAGE_SIZE))
*upl_size = (MAX_UPL_TRANSFER*PAGE_SIZE);
if (object->phys_contiguous) {
*flags = UPL_PHYS_CONTIG;
} else {
*flags = 0;
}
} else {
*flags = UPL_DEV_MEMORY | UPL_PHYS_CONTIG;
}
ret = vm_object_iopl_request(object,
offset,
*upl_size,
upl_ptr,
user_page_list,
page_list_count,
caller_flags);
vm_object_deallocate(object);
return ret;
}
kern_return_t
memory_object_upl_request(
memory_object_control_t control,
memory_object_offset_t offset,
upl_size_t size,
upl_t *upl_ptr,
upl_page_info_array_t user_page_list,
unsigned int *page_list_count,
int cntrl_flags)
{
vm_object_t object;
object = memory_object_control_to_vm_object(control);
if (object == VM_OBJECT_NULL)
return (KERN_TERMINATED);
return vm_object_upl_request(object,
offset,
size,
upl_ptr,
user_page_list,
page_list_count,
cntrl_flags);
}
kern_return_t
memory_object_super_upl_request(
memory_object_control_t control,
memory_object_offset_t offset,
upl_size_t size,
upl_size_t super_cluster,
upl_t *upl,
upl_page_info_t *user_page_list,
unsigned int *page_list_count,
int cntrl_flags)
{
vm_object_t object;
object = memory_object_control_to_vm_object(control);
if (object == VM_OBJECT_NULL)
return (KERN_INVALID_ARGUMENT);
return vm_object_super_upl_request(object,
offset,
size,
super_cluster,
upl,
user_page_list,
page_list_count,
cntrl_flags);
}
kern_return_t
memory_object_cluster_size(memory_object_control_t control, memory_object_offset_t *start,
vm_size_t *length, uint32_t *io_streaming, memory_object_fault_info_t fault_info)
{
vm_object_t object;
object = memory_object_control_to_vm_object(control);
if (object == VM_OBJECT_NULL || object->paging_offset > *start)
return (KERN_INVALID_ARGUMENT);
*start -= object->paging_offset;
vm_object_cluster_size(object, (vm_object_offset_t *)start, length, (vm_object_fault_info_t)fault_info, io_streaming);
*start += object->paging_offset;
return (KERN_SUCCESS);
}
int vm_stat_discard_cleared_reply = 0;
int vm_stat_discard_cleared_unset = 0;
int vm_stat_discard_cleared_too_late = 0;
kern_return_t
host_default_memory_manager(
host_priv_t host_priv,
memory_object_default_t *default_manager,
__unused memory_object_cluster_size_t cluster_size)
{
memory_object_default_t current_manager;
memory_object_default_t new_manager;
memory_object_default_t returned_manager;
kern_return_t result = KERN_SUCCESS;
if (host_priv == HOST_PRIV_NULL)
return(KERN_INVALID_HOST);
assert(host_priv == &realhost);
new_manager = *default_manager;
lck_mtx_lock(&memory_manager_default_lock);
current_manager = memory_manager_default;
returned_manager = MEMORY_OBJECT_DEFAULT_NULL;
if (new_manager == MEMORY_OBJECT_DEFAULT_NULL) {
returned_manager = current_manager;
memory_object_default_reference(returned_manager);
} else {
if (current_manager == MEMORY_OBJECT_DEFAULT_NULL) {
result = vm_pageout_internal_start();
if (result != KERN_SUCCESS)
goto out;
}
returned_manager = current_manager;
memory_manager_default = new_manager;
memory_object_default_reference(new_manager);
thread_wakeup((event_t) &memory_manager_default);
#ifndef CONFIG_FREEZE
if (current_manager == MEMORY_OBJECT_DEFAULT_NULL)
{
vm_page_reactivate_all_throttled();
}
#endif
}
out:
lck_mtx_unlock(&memory_manager_default_lock);
*default_manager = returned_manager;
return(result);
}
__private_extern__ memory_object_default_t
memory_manager_default_reference(void)
{
memory_object_default_t current_manager;
lck_mtx_lock(&memory_manager_default_lock);
current_manager = memory_manager_default;
while (current_manager == MEMORY_OBJECT_DEFAULT_NULL) {
wait_result_t res;
res = lck_mtx_sleep(&memory_manager_default_lock,
LCK_SLEEP_DEFAULT,
(event_t) &memory_manager_default,
THREAD_UNINT);
assert(res == THREAD_AWAKENED);
current_manager = memory_manager_default;
}
memory_object_default_reference(current_manager);
lck_mtx_unlock(&memory_manager_default_lock);
return current_manager;
}
__private_extern__ kern_return_t
memory_manager_default_check(void)
{
memory_object_default_t current;
lck_mtx_lock(&memory_manager_default_lock);
current = memory_manager_default;
if (current == MEMORY_OBJECT_DEFAULT_NULL) {
static boolean_t logged;
boolean_t complain = !logged;
logged = TRUE;
lck_mtx_unlock(&memory_manager_default_lock);
if (complain)
printf("Warning: No default memory manager\n");
return(KERN_FAILURE);
} else {
lck_mtx_unlock(&memory_manager_default_lock);
return(KERN_SUCCESS);
}
}
__private_extern__ void
memory_manager_default_init(void)
{
memory_manager_default = MEMORY_OBJECT_DEFAULT_NULL;
lck_mtx_init(&memory_manager_default_lock, &vm_object_lck_grp, &vm_object_lck_attr);
}
kern_return_t
memory_object_page_op(
memory_object_control_t control,
memory_object_offset_t offset,
int ops,
ppnum_t *phys_entry,
int *flags)
{
vm_object_t object;
object = memory_object_control_to_vm_object(control);
if (object == VM_OBJECT_NULL)
return (KERN_INVALID_ARGUMENT);
return vm_object_page_op(object, offset, ops, phys_entry, flags);
}
kern_return_t
memory_object_range_op(
memory_object_control_t control,
memory_object_offset_t offset_beg,
memory_object_offset_t offset_end,
int ops,
int *range)
{
vm_object_t object;
object = memory_object_control_to_vm_object(control);
if (object == VM_OBJECT_NULL)
return (KERN_INVALID_ARGUMENT);
return vm_object_range_op(object,
offset_beg,
offset_end,
ops,
(uint32_t *) range);
}
void
memory_object_mark_used(
memory_object_control_t control)
{
vm_object_t object;
if (control == NULL)
return;
object = memory_object_control_to_vm_object(control);
if (object != VM_OBJECT_NULL)
vm_object_cache_remove(object);
}
void
memory_object_mark_unused(
memory_object_control_t control,
__unused boolean_t rage)
{
vm_object_t object;
if (control == NULL)
return;
object = memory_object_control_to_vm_object(control);
if (object != VM_OBJECT_NULL)
vm_object_cache_add(object);
}
kern_return_t
memory_object_pages_resident(
memory_object_control_t control,
boolean_t * has_pages_resident)
{
vm_object_t object;
*has_pages_resident = FALSE;
object = memory_object_control_to_vm_object(control);
if (object == VM_OBJECT_NULL)
return (KERN_INVALID_ARGUMENT);
if (object->resident_page_count)
*has_pages_resident = TRUE;
return (KERN_SUCCESS);
}
kern_return_t
memory_object_signed(
memory_object_control_t control,
boolean_t is_signed)
{
vm_object_t object;
object = memory_object_control_to_vm_object(control);
if (object == VM_OBJECT_NULL)
return KERN_INVALID_ARGUMENT;
vm_object_lock(object);
object->code_signed = is_signed;
vm_object_unlock(object);
return KERN_SUCCESS;
}
boolean_t
memory_object_is_slid(
memory_object_control_t control)
{
vm_object_t object = VM_OBJECT_NULL;
vm_object_t slide_object = slide_info.slide_object;
object = memory_object_control_to_vm_object(control);
if (object == VM_OBJECT_NULL)
return FALSE;
return (object == slide_object);
}
static zone_t mem_obj_control_zone;
__private_extern__ void
memory_object_control_bootstrap(void)
{
int i;
i = (vm_size_t) sizeof (struct memory_object_control);
mem_obj_control_zone = zinit (i, 8192*i, 4096, "mem_obj_control");
zone_change(mem_obj_control_zone, Z_CALLERACCT, FALSE);
zone_change(mem_obj_control_zone, Z_NOENCRYPT, TRUE);
return;
}
__private_extern__ memory_object_control_t
memory_object_control_allocate(
vm_object_t object)
{
memory_object_control_t control;
control = (memory_object_control_t)zalloc(mem_obj_control_zone);
if (control != MEMORY_OBJECT_CONTROL_NULL) {
control->moc_object = object;
control->moc_ikot = IKOT_MEM_OBJ_CONTROL;
}
return (control);
}
__private_extern__ void
memory_object_control_collapse(
memory_object_control_t control,
vm_object_t object)
{
assert((control->moc_object != VM_OBJECT_NULL) &&
(control->moc_object != object));
control->moc_object = object;
}
__private_extern__ vm_object_t
memory_object_control_to_vm_object(
memory_object_control_t control)
{
if (control == MEMORY_OBJECT_CONTROL_NULL ||
control->moc_ikot != IKOT_MEM_OBJ_CONTROL)
return VM_OBJECT_NULL;
return (control->moc_object);
}
memory_object_control_t
convert_port_to_mo_control(
__unused mach_port_t port)
{
return MEMORY_OBJECT_CONTROL_NULL;
}
mach_port_t
convert_mo_control_to_port(
__unused memory_object_control_t control)
{
return MACH_PORT_NULL;
}
void
memory_object_control_reference(
__unused memory_object_control_t control)
{
return;
}
void
memory_object_control_deallocate(
memory_object_control_t control)
{
zfree(mem_obj_control_zone, control);
}
void
memory_object_control_disable(
memory_object_control_t control)
{
assert(control->moc_object != VM_OBJECT_NULL);
control->moc_object = VM_OBJECT_NULL;
}
void
memory_object_default_reference(
memory_object_default_t dmm)
{
ipc_port_make_send(dmm);
}
void
memory_object_default_deallocate(
memory_object_default_t dmm)
{
ipc_port_release_send(dmm);
}
memory_object_t
convert_port_to_memory_object(
__unused mach_port_t port)
{
return (MEMORY_OBJECT_NULL);
}
mach_port_t
convert_memory_object_to_port(
__unused memory_object_t object)
{
return (MACH_PORT_NULL);
}
void memory_object_reference(
memory_object_t memory_object)
{
(memory_object->mo_pager_ops->memory_object_reference)(
memory_object);
}
void memory_object_deallocate(
memory_object_t memory_object)
{
(memory_object->mo_pager_ops->memory_object_deallocate)(
memory_object);
}
kern_return_t memory_object_init
(
memory_object_t memory_object,
memory_object_control_t memory_control,
memory_object_cluster_size_t memory_object_page_size
)
{
return (memory_object->mo_pager_ops->memory_object_init)(
memory_object,
memory_control,
memory_object_page_size);
}
kern_return_t memory_object_terminate
(
memory_object_t memory_object
)
{
return (memory_object->mo_pager_ops->memory_object_terminate)(
memory_object);
}
kern_return_t memory_object_data_request
(
memory_object_t memory_object,
memory_object_offset_t offset,
memory_object_cluster_size_t length,
vm_prot_t desired_access,
memory_object_fault_info_t fault_info
)
{
return (memory_object->mo_pager_ops->memory_object_data_request)(
memory_object,
offset,
length,
desired_access,
fault_info);
}
kern_return_t memory_object_data_return
(
memory_object_t memory_object,
memory_object_offset_t offset,
memory_object_cluster_size_t size,
memory_object_offset_t *resid_offset,
int *io_error,
boolean_t dirty,
boolean_t kernel_copy,
int upl_flags
)
{
return (memory_object->mo_pager_ops->memory_object_data_return)(
memory_object,
offset,
size,
resid_offset,
io_error,
dirty,
kernel_copy,
upl_flags);
}
kern_return_t memory_object_data_initialize
(
memory_object_t memory_object,
memory_object_offset_t offset,
memory_object_cluster_size_t size
)
{
return (memory_object->mo_pager_ops->memory_object_data_initialize)(
memory_object,
offset,
size);
}
kern_return_t memory_object_data_unlock
(
memory_object_t memory_object,
memory_object_offset_t offset,
memory_object_size_t size,
vm_prot_t desired_access
)
{
return (memory_object->mo_pager_ops->memory_object_data_unlock)(
memory_object,
offset,
size,
desired_access);
}
kern_return_t memory_object_synchronize
(
memory_object_t memory_object,
memory_object_offset_t offset,
memory_object_size_t size,
vm_sync_t sync_flags
)
{
return (memory_object->mo_pager_ops->memory_object_synchronize)(
memory_object,
offset,
size,
sync_flags);
}
kern_return_t memory_object_map
(
memory_object_t memory_object,
vm_prot_t prot
)
{
return (memory_object->mo_pager_ops->memory_object_map)(
memory_object,
prot);
}
kern_return_t memory_object_last_unmap
(
memory_object_t memory_object
)
{
return (memory_object->mo_pager_ops->memory_object_last_unmap)(
memory_object);
}
kern_return_t memory_object_data_reclaim
(
memory_object_t memory_object,
boolean_t reclaim_backing_store
)
{
if (memory_object->mo_pager_ops->memory_object_data_reclaim == NULL)
return KERN_NOT_SUPPORTED;
return (memory_object->mo_pager_ops->memory_object_data_reclaim)(
memory_object,
reclaim_backing_store);
}
kern_return_t memory_object_create
(
memory_object_default_t default_memory_manager,
vm_size_t new_memory_object_size,
memory_object_t *new_memory_object
)
{
return default_pager_memory_object_create(default_memory_manager,
new_memory_object_size,
new_memory_object);
}
upl_t
convert_port_to_upl(
ipc_port_t port)
{
upl_t upl;
ip_lock(port);
if (!ip_active(port) || (ip_kotype(port) != IKOT_UPL)) {
ip_unlock(port);
return (upl_t)NULL;
}
upl = (upl_t) port->ip_kobject;
ip_unlock(port);
upl_lock(upl);
upl->ref_count+=1;
upl_unlock(upl);
return upl;
}
mach_port_t
convert_upl_to_port(
__unused upl_t upl)
{
return MACH_PORT_NULL;
}
__private_extern__ void
upl_no_senders(
__unused ipc_port_t port,
__unused mach_port_mscount_t mscount)
{
return;
}