#include <debug.h>
#include <mach_pagemap.h>
#include <task_swapper.h>
#include <mach/mach_types.h>
#include <mach/memory_object.h>
#include <mach/memory_object_default.h>
#include <mach/memory_object_control_server.h>
#include <mach/vm_param.h>
#include <mach/sdt.h>
#include <ipc/ipc_types.h>
#include <ipc/ipc_port.h>
#include <kern/kern_types.h>
#include <kern/assert.h>
#include <kern/lock.h>
#include <kern/queue.h>
#include <kern/xpr.h>
#include <kern/kalloc.h>
#include <kern/zalloc.h>
#include <kern/host.h>
#include <kern/host_statistics.h>
#include <kern/processor.h>
#include <kern/misc_protos.h>
#include <vm/memory_object.h>
#include <vm/vm_compressor_pager.h>
#include <vm/vm_fault.h>
#include <vm/vm_map.h>
#include <vm/vm_object.h>
#include <vm/vm_page.h>
#include <vm/vm_pageout.h>
#include <vm/vm_protos.h>
#include <vm/vm_purgeable_internal.h>
#include <vm/vm_compressor.h>
static kern_return_t vm_object_terminate(
vm_object_t object);
extern void vm_object_remove(
vm_object_t object);
static kern_return_t vm_object_copy_call(
vm_object_t src_object,
vm_object_offset_t src_offset,
vm_object_size_t size,
vm_object_t *_result_object);
static void vm_object_do_collapse(
vm_object_t object,
vm_object_t backing_object);
static void vm_object_do_bypass(
vm_object_t object,
vm_object_t backing_object);
static void vm_object_release_pager(
memory_object_t pager,
boolean_t hashed);
static zone_t vm_object_zone;
static struct vm_object kernel_object_store;
vm_object_t kernel_object;
static struct vm_object compressor_object_store;
vm_object_t compressor_object = &compressor_object_store;
static struct vm_object vm_submap_object_store;
static struct vm_object vm_object_template;
unsigned int vm_page_purged_wired = 0;
unsigned int vm_page_purged_busy = 0;
unsigned int vm_page_purged_others = 0;
#if VM_OBJECT_CACHE
static vm_object_t vm_object_cache_trim(
boolean_t called_from_vm_object_deallocate);
static void vm_object_deactivate_all_pages(
vm_object_t object);
static int vm_object_cached_high;
static int vm_object_cached_max = 512;
#define vm_object_cache_lock() \
lck_mtx_lock(&vm_object_cached_lock_data)
#define vm_object_cache_lock_try() \
lck_mtx_try_lock(&vm_object_cached_lock_data)
#endif
static queue_head_t vm_object_cached_list;
static uint32_t vm_object_cache_pages_freed = 0;
static uint32_t vm_object_cache_pages_moved = 0;
static uint32_t vm_object_cache_pages_skipped = 0;
static uint32_t vm_object_cache_adds = 0;
static uint32_t vm_object_cached_count = 0;
static lck_mtx_t vm_object_cached_lock_data;
static lck_mtx_ext_t vm_object_cached_lock_data_ext;
static uint32_t vm_object_page_grab_failed = 0;
static uint32_t vm_object_page_grab_skipped = 0;
static uint32_t vm_object_page_grab_returned = 0;
static uint32_t vm_object_page_grab_pmapped = 0;
static uint32_t vm_object_page_grab_reactivations = 0;
#define vm_object_cache_lock_spin() \
lck_mtx_lock_spin(&vm_object_cached_lock_data)
#define vm_object_cache_unlock() \
lck_mtx_unlock(&vm_object_cached_lock_data)
static void vm_object_cache_remove_locked(vm_object_t);
#define VM_OBJECT_HASH_COUNT 1024
#define VM_OBJECT_HASH_LOCK_COUNT 512
static lck_mtx_t vm_object_hashed_lock_data[VM_OBJECT_HASH_LOCK_COUNT];
static lck_mtx_ext_t vm_object_hashed_lock_data_ext[VM_OBJECT_HASH_LOCK_COUNT];
static queue_head_t vm_object_hashtable[VM_OBJECT_HASH_COUNT];
static struct zone *vm_object_hash_zone;
struct vm_object_hash_entry {
queue_chain_t hash_link;
memory_object_t pager;
vm_object_t object;
boolean_t waiting;
};
typedef struct vm_object_hash_entry *vm_object_hash_entry_t;
#define VM_OBJECT_HASH_ENTRY_NULL ((vm_object_hash_entry_t) 0)
#define VM_OBJECT_HASH_SHIFT 5
#define vm_object_hash(pager) \
((int)((((uintptr_t)pager) >> VM_OBJECT_HASH_SHIFT) % VM_OBJECT_HASH_COUNT))
#define vm_object_lock_hash(pager) \
((int)((((uintptr_t)pager) >> VM_OBJECT_HASH_SHIFT) % VM_OBJECT_HASH_LOCK_COUNT))
void vm_object_hash_entry_free(
vm_object_hash_entry_t entry);
static void vm_object_reap(vm_object_t object);
static void vm_object_reap_async(vm_object_t object);
static void vm_object_reaper_thread(void);
static lck_mtx_t vm_object_reaper_lock_data;
static lck_mtx_ext_t vm_object_reaper_lock_data_ext;
static queue_head_t vm_object_reaper_queue;
unsigned int vm_object_reap_count = 0;
unsigned int vm_object_reap_count_async = 0;
#define vm_object_reaper_lock() \
lck_mtx_lock(&vm_object_reaper_lock_data)
#define vm_object_reaper_lock_spin() \
lck_mtx_lock_spin(&vm_object_reaper_lock_data)
#define vm_object_reaper_unlock() \
lck_mtx_unlock(&vm_object_reaper_lock_data)
#if 0
#undef KERNEL_DEBUG
#define KERNEL_DEBUG KERNEL_DEBUG_CONSTANT
#endif
static lck_mtx_t *
vm_object_hash_lock_spin(
memory_object_t pager)
{
int index;
index = vm_object_lock_hash(pager);
lck_mtx_lock_spin(&vm_object_hashed_lock_data[index]);
return (&vm_object_hashed_lock_data[index]);
}
static void
vm_object_hash_unlock(lck_mtx_t *lck)
{
lck_mtx_unlock(lck);
}
static vm_object_hash_entry_t
vm_object_hash_lookup(
memory_object_t pager,
boolean_t remove_entry)
{
queue_t bucket;
vm_object_hash_entry_t entry;
bucket = &vm_object_hashtable[vm_object_hash(pager)];
entry = (vm_object_hash_entry_t)queue_first(bucket);
while (!queue_end(bucket, (queue_entry_t)entry)) {
if (entry->pager == pager) {
if (remove_entry) {
queue_remove(bucket, entry,
vm_object_hash_entry_t, hash_link);
}
return(entry);
}
entry = (vm_object_hash_entry_t)queue_next(&entry->hash_link);
}
return(VM_OBJECT_HASH_ENTRY_NULL);
}
static void
vm_object_hash_insert(
vm_object_hash_entry_t entry,
vm_object_t object)
{
queue_t bucket;
bucket = &vm_object_hashtable[vm_object_hash(entry->pager)];
queue_enter(bucket, entry, vm_object_hash_entry_t, hash_link);
entry->object = object;
object->hashed = TRUE;
}
static vm_object_hash_entry_t
vm_object_hash_entry_alloc(
memory_object_t pager)
{
vm_object_hash_entry_t entry;
entry = (vm_object_hash_entry_t)zalloc(vm_object_hash_zone);
entry->pager = pager;
entry->object = VM_OBJECT_NULL;
entry->waiting = FALSE;
return(entry);
}
void
vm_object_hash_entry_free(
vm_object_hash_entry_t entry)
{
zfree(vm_object_hash_zone, entry);
}
__private_extern__ void
_vm_object_allocate(
vm_object_size_t size,
vm_object_t object)
{
XPR(XPR_VM_OBJECT,
"vm_object_allocate, object 0x%X size 0x%X\n",
object, size, 0,0,0);
*object = vm_object_template;
queue_init(&object->memq);
queue_init(&object->msr_q);
#if UPL_DEBUG
queue_init(&object->uplq);
#endif
vm_object_lock_init(object);
object->vo_size = size;
}
__private_extern__ vm_object_t
vm_object_allocate(
vm_object_size_t size)
{
register vm_object_t object;
object = (vm_object_t) zalloc(vm_object_zone);
if (object != VM_OBJECT_NULL)
_vm_object_allocate(size, object);
return object;
}
lck_grp_t vm_object_lck_grp;
lck_grp_t vm_object_cache_lck_grp;
lck_grp_attr_t vm_object_lck_grp_attr;
lck_attr_t vm_object_lck_attr;
lck_attr_t kernel_object_lck_attr;
lck_attr_t compressor_object_lck_attr;
__private_extern__ void
vm_object_bootstrap(void)
{
register int i;
vm_object_zone = zinit((vm_size_t) sizeof(struct vm_object),
round_page(512*1024),
round_page(12*1024),
"vm objects");
zone_change(vm_object_zone, Z_CALLERACCT, FALSE);
zone_change(vm_object_zone, Z_NOENCRYPT, TRUE);
vm_object_init_lck_grp();
queue_init(&vm_object_cached_list);
lck_mtx_init_ext(&vm_object_cached_lock_data,
&vm_object_cached_lock_data_ext,
&vm_object_cache_lck_grp,
&vm_object_lck_attr);
queue_init(&vm_object_reaper_queue);
for (i = 0; i < VM_OBJECT_HASH_LOCK_COUNT; i++) {
lck_mtx_init_ext(&vm_object_hashed_lock_data[i],
&vm_object_hashed_lock_data_ext[i],
&vm_object_lck_grp,
&vm_object_lck_attr);
}
lck_mtx_init_ext(&vm_object_reaper_lock_data,
&vm_object_reaper_lock_data_ext,
&vm_object_lck_grp,
&vm_object_lck_attr);
vm_object_hash_zone =
zinit((vm_size_t) sizeof (struct vm_object_hash_entry),
round_page(512*1024),
round_page(12*1024),
"vm object hash entries");
zone_change(vm_object_hash_zone, Z_CALLERACCT, FALSE);
zone_change(vm_object_hash_zone, Z_NOENCRYPT, TRUE);
for (i = 0; i < VM_OBJECT_HASH_COUNT; i++)
queue_init(&vm_object_hashtable[i]);
vm_object_template.memq.prev = NULL;
vm_object_template.memq.next = NULL;
#if 0
vm_object_lock_init(&vm_object_template);
#endif
vm_object_template.vo_size = 0;
vm_object_template.memq_hint = VM_PAGE_NULL;
vm_object_template.ref_count = 1;
#if TASK_SWAPPER
vm_object_template.res_count = 1;
#endif
vm_object_template.resident_page_count = 0;
vm_object_template.wired_page_count = 0;
vm_object_template.reusable_page_count = 0;
vm_object_template.copy = VM_OBJECT_NULL;
vm_object_template.shadow = VM_OBJECT_NULL;
vm_object_template.vo_shadow_offset = (vm_object_offset_t) 0;
vm_object_template.pager = MEMORY_OBJECT_NULL;
vm_object_template.paging_offset = 0;
vm_object_template.pager_control = MEMORY_OBJECT_CONTROL_NULL;
vm_object_template.copy_strategy = MEMORY_OBJECT_COPY_SYMMETRIC;
vm_object_template.paging_in_progress = 0;
vm_object_template.activity_in_progress = 0;
vm_object_template.all_wanted = 0;
vm_object_template.pager_created = FALSE;
vm_object_template.pager_initialized = FALSE;
vm_object_template.pager_ready = FALSE;
vm_object_template.pager_trusted = FALSE;
vm_object_template.can_persist = FALSE;
vm_object_template.internal = TRUE;
vm_object_template.temporary = TRUE;
vm_object_template.private = FALSE;
vm_object_template.pageout = FALSE;
vm_object_template.alive = TRUE;
vm_object_template.purgable = VM_PURGABLE_DENY;
vm_object_template.purgeable_when_ripe = FALSE;
vm_object_template.shadowed = FALSE;
vm_object_template.advisory_pageout = FALSE;
vm_object_template.true_share = FALSE;
vm_object_template.terminating = FALSE;
vm_object_template.named = FALSE;
vm_object_template.shadow_severed = FALSE;
vm_object_template.phys_contiguous = FALSE;
vm_object_template.nophyscache = FALSE;
vm_object_template.cached_list.prev = NULL;
vm_object_template.cached_list.next = NULL;
vm_object_template.msr_q.prev = NULL;
vm_object_template.msr_q.next = NULL;
vm_object_template.last_alloc = (vm_object_offset_t) 0;
vm_object_template.sequential = (vm_object_offset_t) 0;
vm_object_template.pages_created = 0;
vm_object_template.pages_used = 0;
vm_object_template.scan_collisions = 0;
#if MACH_PAGEMAP
vm_object_template.existence_map = VM_EXTERNAL_NULL;
#endif
vm_object_template.cow_hint = ~(vm_offset_t)0;
#if MACH_ASSERT
vm_object_template.paging_object = VM_OBJECT_NULL;
#endif
vm_object_template.wimg_bits = VM_WIMG_USE_DEFAULT;
vm_object_template.set_cache_attr = FALSE;
vm_object_template.object_slid = FALSE;
vm_object_template.code_signed = FALSE;
vm_object_template.hashed = FALSE;
vm_object_template.transposed = FALSE;
vm_object_template.mapping_in_progress = FALSE;
vm_object_template.volatile_empty = FALSE;
vm_object_template.volatile_fault = FALSE;
vm_object_template.all_reusable = FALSE;
vm_object_template.blocked_access = FALSE;
vm_object_template.__object2_unused_bits = 0;
#if UPL_DEBUG
vm_object_template.uplq.prev = NULL;
vm_object_template.uplq.next = NULL;
#endif
#ifdef VM_PIP_DEBUG
bzero(&vm_object_template.pip_holders,
sizeof (vm_object_template.pip_holders));
#endif
vm_object_template.objq.next=NULL;
vm_object_template.objq.prev=NULL;
vm_object_template.purgeable_queue_type = PURGEABLE_Q_TYPE_MAX;
vm_object_template.purgeable_queue_group = 0;
vm_object_template.vo_cache_ts = 0;
kernel_object = &kernel_object_store;
#ifdef ppc
_vm_object_allocate(vm_last_addr + 1,
kernel_object);
#else
_vm_object_allocate(VM_MAX_KERNEL_ADDRESS + 1,
kernel_object);
_vm_object_allocate(VM_MAX_KERNEL_ADDRESS + 1,
compressor_object);
#endif
kernel_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
compressor_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
vm_submap_object = &vm_submap_object_store;
#ifdef ppc
_vm_object_allocate(vm_last_addr + 1,
vm_submap_object);
#else
_vm_object_allocate(VM_MAX_KERNEL_ADDRESS + 1,
vm_submap_object);
#endif
vm_submap_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
vm_object_reference(vm_submap_object);
#if MACH_PAGEMAP
vm_external_module_initialize();
#endif
}
void
vm_object_reaper_init(void)
{
kern_return_t kr;
thread_t thread;
kr = kernel_thread_start_priority(
(thread_continue_t) vm_object_reaper_thread,
NULL,
BASEPRI_PREEMPT - 1,
&thread);
if (kr != KERN_SUCCESS) {
panic("failed to launch vm_object_reaper_thread kr=0x%x", kr);
}
thread_deallocate(thread);
}
__private_extern__ void
vm_object_init(void)
{
}
__private_extern__ void
vm_object_init_lck_grp(void)
{
lck_grp_attr_setdefault(&vm_object_lck_grp_attr);
lck_grp_init(&vm_object_lck_grp, "vm_object", &vm_object_lck_grp_attr);
lck_grp_init(&vm_object_cache_lck_grp, "vm_object_cache", &vm_object_lck_grp_attr);
lck_attr_setdefault(&vm_object_lck_attr);
lck_attr_setdefault(&kernel_object_lck_attr);
lck_attr_cleardebug(&kernel_object_lck_attr);
lck_attr_setdefault(&compressor_object_lck_attr);
lck_attr_cleardebug(&compressor_object_lck_attr);
}
#if VM_OBJECT_CACHE
#define MIGHT_NOT_CACHE_SHADOWS 1
#if MIGHT_NOT_CACHE_SHADOWS
static int cache_shadows = TRUE;
#endif
#endif
unsigned long vm_object_deallocate_shared_successes = 0;
unsigned long vm_object_deallocate_shared_failures = 0;
unsigned long vm_object_deallocate_shared_swap_failures = 0;
__private_extern__ void
vm_object_deallocate(
register vm_object_t object)
{
#if VM_OBJECT_CACHE
boolean_t retry_cache_trim = FALSE;
uint32_t try_failed_count = 0;
#endif
vm_object_t shadow = VM_OBJECT_NULL;
if (object == VM_OBJECT_NULL)
return;
if (object == kernel_object || object == compressor_object) {
vm_object_lock_shared(object);
OSAddAtomic(-1, &object->ref_count);
if (object->ref_count == 0) {
if (object == kernel_object)
panic("vm_object_deallocate: losing kernel_object\n");
else
panic("vm_object_deallocate: losing compressor_object\n");
}
vm_object_unlock(object);
return;
}
if (object->ref_count > 2 ||
(!object->named && object->ref_count > 1)) {
UInt32 original_ref_count;
volatile UInt32 *ref_count_p;
Boolean atomic_swap;
vm_object_lock_shared(object);
ref_count_p = (volatile UInt32 *) &object->ref_count;
original_ref_count = object->ref_count;
if (original_ref_count > 2 ||
(!object->named && original_ref_count > 1)) {
atomic_swap = OSCompareAndSwap(
original_ref_count,
original_ref_count - 1,
(UInt32 *) &object->ref_count);
if (atomic_swap == FALSE) {
vm_object_deallocate_shared_swap_failures++;
}
} else {
atomic_swap = FALSE;
}
vm_object_unlock(object);
if (atomic_swap) {
vm_object_deallocate_shared_successes++;
return;
}
vm_object_deallocate_shared_failures++;
}
while (object != VM_OBJECT_NULL) {
vm_object_lock(object);
assert(object->ref_count > 0);
if ((object->ref_count == 2) && (object->named)) {
memory_object_t pager = object->pager;
if (pager != MEMORY_OBJECT_NULL) {
vm_object_mapping_wait(object, THREAD_UNINT);
vm_object_mapping_begin(object);
vm_object_unlock(object);
memory_object_last_unmap(pager);
vm_object_lock(object);
vm_object_mapping_end(object);
}
assert(object->ref_count > 0);
}
if ((object->ref_count > 1) || object->terminating) {
vm_object_lock_assert_exclusive(object);
object->ref_count--;
vm_object_res_deallocate(object);
if (object->ref_count == 1 &&
object->shadow != VM_OBJECT_NULL) {
vm_object_collapse(object, 0, FALSE);
}
vm_object_unlock(object);
#if VM_OBJECT_CACHE
if (retry_cache_trim &&
((object = vm_object_cache_trim(TRUE)) !=
VM_OBJECT_NULL)) {
continue;
}
#endif
return;
}
if (object->pager_created && ! object->pager_initialized) {
assert(! object->can_persist);
vm_object_assert_wait(object,
VM_OBJECT_EVENT_INITIALIZED,
THREAD_UNINT);
vm_object_unlock(object);
thread_block(THREAD_CONTINUE_NULL);
continue;
}
#if VM_OBJECT_CACHE
if ((object->can_persist) && (object->alive)) {
vm_object_lock_assert_exclusive(object);
if (--object->ref_count > 0) {
vm_object_res_deallocate(object);
vm_object_unlock(object);
if (retry_cache_trim &&
((object = vm_object_cache_trim(TRUE)) !=
VM_OBJECT_NULL)) {
continue;
}
return;
}
#if MIGHT_NOT_CACHE_SHADOWS
if (! cache_shadows) {
shadow = object->shadow;
object->shadow = VM_OBJECT_NULL;
}
#endif
assert(object->shadow == VM_OBJECT_NULL);
VM_OBJ_RES_DECR(object);
XPR(XPR_VM_OBJECT,
"vm_o_deallocate: adding %x to cache, queue = (%x, %x)\n",
object,
vm_object_cached_list.next,
vm_object_cached_list.prev,0,0);
vm_object_unlock(object);
try_failed_count = 0;
for (;;) {
vm_object_cache_lock();
if (vm_object_lock_try(object))
break;
vm_object_cache_unlock();
try_failed_count++;
mutex_pause(try_failed_count);
}
vm_object_cached_count++;
if (vm_object_cached_count > vm_object_cached_high)
vm_object_cached_high = vm_object_cached_count;
queue_enter(&vm_object_cached_list, object,
vm_object_t, cached_list);
vm_object_cache_unlock();
vm_object_deactivate_all_pages(object);
vm_object_unlock(object);
#if MIGHT_NOT_CACHE_SHADOWS
if (! cache_shadows && shadow != VM_OBJECT_NULL) {
object = shadow;
retry_cache_trim = TRUE;
continue;
}
#endif
object = vm_object_cache_trim(TRUE);
if (object == VM_OBJECT_NULL) {
return;
}
retry_cache_trim = TRUE;
} else
#endif
{
XPR(XPR_VM_OBJECT,
"vm_o_deallocate: !cacheable 0x%X res %d paging_ops %d thread 0x%p ref %d\n",
object, object->resident_page_count,
object->paging_in_progress,
(void *)current_thread(),object->ref_count);
VM_OBJ_RES_DECR(object);
shadow = object->pageout?VM_OBJECT_NULL:object->shadow;
if (vm_object_terminate(object) != KERN_SUCCESS) {
return;
}
if (shadow != VM_OBJECT_NULL) {
object = shadow;
continue;
}
#if VM_OBJECT_CACHE
if (retry_cache_trim &&
((object = vm_object_cache_trim(TRUE)) !=
VM_OBJECT_NULL)) {
continue;
}
#endif
return;
}
}
#if VM_OBJECT_CACHE
assert(! retry_cache_trim);
#endif
}
vm_page_t
vm_object_page_grab(
vm_object_t object)
{
vm_page_t p, next_p;
int p_limit = 0;
int p_skipped = 0;
vm_object_lock_assert_exclusive(object);
next_p = (vm_page_t)queue_first(&object->memq);
p_limit = MIN(50, object->resident_page_count);
while (!queue_end(&object->memq, (queue_entry_t)next_p) && --p_limit > 0) {
p = next_p;
next_p = (vm_page_t)queue_next(&next_p->listq);
if (VM_PAGE_WIRED(p) || p->busy || p->cleaning || p->laundry || p->fictitious)
goto move_page_in_obj;
if (p->pmapped || p->dirty || p->precious) {
vm_page_lockspin_queues();
if (p->pmapped) {
int refmod_state;
vm_object_page_grab_pmapped++;
if (p->reference == FALSE || p->dirty == FALSE) {
refmod_state = pmap_get_refmod(p->phys_page);
if (refmod_state & VM_MEM_REFERENCED)
p->reference = TRUE;
if (refmod_state & VM_MEM_MODIFIED) {
SET_PAGE_DIRTY(p, FALSE);
}
}
if (p->dirty == FALSE && p->precious == FALSE) {
refmod_state = pmap_disconnect(p->phys_page);
if (refmod_state & VM_MEM_REFERENCED)
p->reference = TRUE;
if (refmod_state & VM_MEM_MODIFIED) {
SET_PAGE_DIRTY(p, FALSE);
}
if (p->dirty == FALSE)
goto take_page;
}
}
if (p->inactive && p->reference == TRUE) {
vm_page_activate(p);
VM_STAT_INCR(reactivations);
vm_object_page_grab_reactivations++;
}
vm_page_unlock_queues();
move_page_in_obj:
queue_remove(&object->memq, p, vm_page_t, listq);
queue_enter(&object->memq, p, vm_page_t, listq);
p_skipped++;
continue;
}
vm_page_lockspin_queues();
take_page:
vm_page_free_prepare_queues(p);
vm_object_page_grab_returned++;
vm_object_page_grab_skipped += p_skipped;
vm_page_unlock_queues();
vm_page_free_prepare_object(p, TRUE);
return (p);
}
vm_object_page_grab_skipped += p_skipped;
vm_object_page_grab_failed++;
return (NULL);
}
#define EVICT_PREPARE_LIMIT 64
#define EVICT_AGE 10
static clock_sec_t vm_object_cache_aging_ts = 0;
static void
vm_object_cache_remove_locked(
vm_object_t object)
{
queue_remove(&vm_object_cached_list, object, vm_object_t, objq);
object->objq.next = NULL;
object->objq.prev = NULL;
vm_object_cached_count--;
}
void
vm_object_cache_remove(
vm_object_t object)
{
vm_object_cache_lock_spin();
if (object->objq.next || object->objq.prev)
vm_object_cache_remove_locked(object);
vm_object_cache_unlock();
}
void
vm_object_cache_add(
vm_object_t object)
{
clock_sec_t sec;
clock_nsec_t nsec;
if (object->resident_page_count == 0)
return;
clock_get_system_nanotime(&sec, &nsec);
vm_object_cache_lock_spin();
if (object->objq.next == NULL && object->objq.prev == NULL) {
queue_enter(&vm_object_cached_list, object, vm_object_t, objq);
object->vo_cache_ts = sec + EVICT_AGE;
object->vo_cache_pages_to_scan = object->resident_page_count;
vm_object_cached_count++;
vm_object_cache_adds++;
}
vm_object_cache_unlock();
}
int
vm_object_cache_evict(
int num_to_evict,
int max_objects_to_examine)
{
vm_object_t object = VM_OBJECT_NULL;
vm_object_t next_obj = VM_OBJECT_NULL;
vm_page_t local_free_q = VM_PAGE_NULL;
vm_page_t p;
vm_page_t next_p;
int object_cnt = 0;
vm_page_t ep_array[EVICT_PREPARE_LIMIT];
int ep_count;
int ep_limit;
int ep_index;
int ep_freed = 0;
int ep_moved = 0;
uint32_t ep_skipped = 0;
clock_sec_t sec;
clock_nsec_t nsec;
KERNEL_DEBUG(0x13001ec | DBG_FUNC_START, 0, 0, 0, 0, 0);
if (queue_empty(&vm_object_cached_list)) {
KERNEL_DEBUG(0x13001ec | DBG_FUNC_END, 0, 0, 0, 0, 0);
return (0);
}
clock_get_system_nanotime(&sec, &nsec);
if (sec < vm_object_cache_aging_ts) {
KERNEL_DEBUG(0x13001ec | DBG_FUNC_END, 0, 0, 0, 0, 0);
return (0);
}
vm_page_unlock_queues();
vm_object_cache_lock_spin();
for (;;) {
next_obj = (vm_object_t)queue_first(&vm_object_cached_list);
while (!queue_end(&vm_object_cached_list, (queue_entry_t)next_obj) && object_cnt++ < max_objects_to_examine) {
object = next_obj;
next_obj = (vm_object_t)queue_next(&next_obj->objq);
if (sec < object->vo_cache_ts) {
KERNEL_DEBUG(0x130020c, object, object->resident_page_count, object->vo_cache_ts, sec, 0);
vm_object_cache_aging_ts = object->vo_cache_ts;
object = VM_OBJECT_NULL;
break;
}
if (!vm_object_lock_try_scan(object)) {
KERNEL_DEBUG(0x13001f8, object, object->resident_page_count, 0, 0, 0);
object = VM_OBJECT_NULL;
continue;
}
if (queue_empty(&object->memq) || object->vo_cache_pages_to_scan == 0) {
KERNEL_DEBUG(0x13001fc, object, object->resident_page_count, ep_freed, ep_moved, 0);
vm_object_cache_remove_locked(object);
vm_object_unlock(object);
object = VM_OBJECT_NULL;
continue;
}
break;
}
vm_object_cache_unlock();
if (object == VM_OBJECT_NULL)
break;
next_p = (vm_page_t)queue_first(&object->memq);
if ((ep_limit = num_to_evict - (ep_freed + ep_moved)) > EVICT_PREPARE_LIMIT)
ep_limit = EVICT_PREPARE_LIMIT;
ep_count = 0;
while (!queue_end(&object->memq, (queue_entry_t)next_p) && object->vo_cache_pages_to_scan && ep_count < ep_limit) {
p = next_p;
next_p = (vm_page_t)queue_next(&next_p->listq);
object->vo_cache_pages_to_scan--;
if (VM_PAGE_WIRED(p) || p->busy || p->cleaning || p->laundry) {
queue_remove(&object->memq, p, vm_page_t, listq);
queue_enter(&object->memq, p, vm_page_t, listq);
ep_skipped++;
continue;
}
if (p->wpmapped || p->dirty || p->precious) {
queue_remove(&object->memq, p, vm_page_t, listq);
queue_enter(&object->memq, p, vm_page_t, listq);
pmap_clear_reference(p->phys_page);
}
ep_array[ep_count++] = p;
}
KERNEL_DEBUG(0x13001f4 | DBG_FUNC_START, object, object->resident_page_count, ep_freed, ep_moved, 0);
vm_page_lockspin_queues();
for (ep_index = 0; ep_index < ep_count; ep_index++) {
p = ep_array[ep_index];
if (p->wpmapped || p->dirty || p->precious) {
p->reference = FALSE;
p->no_cache = FALSE;
assert(!p->pageout_queue);
VM_PAGE_QUEUES_REMOVE(p);
VM_PAGE_ENQUEUE_INACTIVE(p, TRUE);
ep_moved++;
} else {
vm_page_free_prepare_queues(p);
assert(p->pageq.next == NULL && p->pageq.prev == NULL);
p->pageq.next = (queue_entry_t) local_free_q;
local_free_q = p;
ep_freed++;
}
}
vm_page_unlock_queues();
KERNEL_DEBUG(0x13001f4 | DBG_FUNC_END, object, object->resident_page_count, ep_freed, ep_moved, 0);
if (local_free_q) {
vm_page_free_list(local_free_q, TRUE);
local_free_q = VM_PAGE_NULL;
}
if (object->vo_cache_pages_to_scan == 0) {
KERNEL_DEBUG(0x1300208, object, object->resident_page_count, ep_freed, ep_moved, 0);
vm_object_cache_remove(object);
KERNEL_DEBUG(0x13001fc, object, object->resident_page_count, ep_freed, ep_moved, 0);
}
vm_object_unlock(object);
object = VM_OBJECT_NULL;
if ((ep_freed + ep_moved) >= num_to_evict) {
break;
}
vm_object_cache_lock_spin();
}
vm_page_lock_queues();
vm_object_cache_pages_freed += ep_freed;
vm_object_cache_pages_moved += ep_moved;
vm_object_cache_pages_skipped += ep_skipped;
KERNEL_DEBUG(0x13001ec | DBG_FUNC_END, ep_freed, 0, 0, 0, 0);
return (ep_freed);
}
#if VM_OBJECT_CACHE
vm_object_t
vm_object_cache_trim(
boolean_t called_from_vm_object_deallocate)
{
register vm_object_t object = VM_OBJECT_NULL;
vm_object_t shadow;
for (;;) {
if (vm_object_cached_count <= vm_object_cached_max)
return VM_OBJECT_NULL;
vm_object_cache_lock();
if (vm_object_cached_count <= vm_object_cached_max) {
vm_object_cache_unlock();
return VM_OBJECT_NULL;
}
XPR(XPR_VM_OBJECT,
"vm_object_cache_trim: removing from front of cache (%x, %x)\n",
vm_object_cached_list.next,
vm_object_cached_list.prev, 0, 0, 0);
object = (vm_object_t) queue_first(&vm_object_cached_list);
if(object == (vm_object_t) &vm_object_cached_list) {
if(vm_object_cached_max < 0)
vm_object_cached_max = 0;
vm_object_cached_count = 0;
vm_object_cache_unlock();
return VM_OBJECT_NULL;
}
vm_object_lock(object);
queue_remove(&vm_object_cached_list, object, vm_object_t,
cached_list);
vm_object_cached_count--;
vm_object_cache_unlock();
assert(object->pager_initialized);
assert(object->ref_count == 0);
vm_object_lock_assert_exclusive(object);
object->ref_count++;
shadow = object->pageout?VM_OBJECT_NULL:object->shadow;
if(vm_object_terminate(object) != KERN_SUCCESS)
continue;
if (shadow != VM_OBJECT_NULL) {
if (called_from_vm_object_deallocate) {
return shadow;
} else {
vm_object_deallocate(shadow);
}
}
}
}
#endif
static kern_return_t
vm_object_terminate(
vm_object_t object)
{
vm_object_t shadow_object;
XPR(XPR_VM_OBJECT, "vm_object_terminate, object 0x%X ref %d\n",
object, object->ref_count, 0, 0, 0);
if (!object->pageout && (!object->temporary || object->can_persist) &&
(object->pager != NULL || object->shadow_severed)) {
object->pager_trusted = FALSE;
vm_object_reap_pages(object, REAP_TERMINATE);
}
if (object->terminating) {
vm_object_lock_assert_exclusive(object);
object->ref_count--;
assert(object->ref_count > 0);
vm_object_unlock(object);
return KERN_FAILURE;
}
if (object->ref_count != 1) {
vm_object_lock_assert_exclusive(object);
object->ref_count--;
assert(object->ref_count > 0);
vm_object_res_deallocate(object);
vm_object_unlock(object);
return KERN_FAILURE;
}
object->terminating = TRUE;
object->alive = FALSE;
if ( !object->internal && (object->objq.next || object->objq.prev))
vm_object_cache_remove(object);
if (object->hashed) {
lck_mtx_t *lck;
lck = vm_object_hash_lock_spin(object->pager);
vm_object_remove(object);
vm_object_hash_unlock(lck);
}
if (((shadow_object = object->shadow) != VM_OBJECT_NULL) &&
!(object->pageout)) {
vm_object_lock(shadow_object);
if (shadow_object->copy == object)
shadow_object->copy = VM_OBJECT_NULL;
vm_object_unlock(shadow_object);
}
if (object->paging_in_progress != 0 ||
object->activity_in_progress != 0) {
vm_object_reap_async(object);
vm_object_unlock(object);
return KERN_FAILURE;
}
vm_object_reap(object);
object = VM_OBJECT_NULL;
return KERN_SUCCESS;
}
void
vm_object_reap(
vm_object_t object)
{
memory_object_t pager;
vm_object_lock_assert_exclusive(object);
assert(object->paging_in_progress == 0);
assert(object->activity_in_progress == 0);
vm_object_reap_count++;
pager = object->pager;
object->pager = MEMORY_OBJECT_NULL;
if (pager != MEMORY_OBJECT_NULL)
memory_object_control_disable(object->pager_control);
object->ref_count--;
#if TASK_SWAPPER
assert(object->res_count == 0);
#endif
assert (object->ref_count == 0);
if (object->internal && (object->objq.next || object->objq.prev)) {
purgeable_q_t queue = vm_purgeable_object_remove(object);
assert(queue);
if (object->purgeable_when_ripe) {
vm_page_lock_queues();
vm_purgeable_token_delete_first(queue);
assert(queue->debug_count_objects>=0);
vm_page_unlock_queues();
}
}
if (object->pageout) {
assert(object->shadow != VM_OBJECT_NULL);
vm_pageout_object_terminate(object);
} else if (((object->temporary && !object->can_persist) || (pager == MEMORY_OBJECT_NULL))) {
vm_object_reap_pages(object, REAP_REAP);
}
assert(queue_empty(&object->memq));
assert(object->paging_in_progress == 0);
assert(object->activity_in_progress == 0);
assert(object->ref_count == 0);
if (pager != MEMORY_OBJECT_NULL) {
vm_object_unlock(object);
vm_object_release_pager(pager, object->hashed);
vm_object_lock(object);
}
object->terminating = FALSE;
vm_object_paging_begin(object);
vm_object_paging_end(object);
vm_object_unlock(object);
#if MACH_PAGEMAP
vm_external_destroy(object->existence_map, object->vo_size);
#endif
object->shadow = VM_OBJECT_NULL;
vm_object_lock_destroy(object);
zfree(vm_object_zone, object);
object = VM_OBJECT_NULL;
}
unsigned int vm_max_batch = 256;
#define V_O_R_MAX_BATCH 128
#define BATCH_LIMIT(max) (vm_max_batch >= max ? max : vm_max_batch)
#define VM_OBJ_REAP_FREELIST(_local_free_q, do_disconnect) \
MACRO_BEGIN \
if (_local_free_q) { \
if (do_disconnect) { \
vm_page_t m; \
for (m = _local_free_q; \
m != VM_PAGE_NULL; \
m = (vm_page_t) m->pageq.next) { \
if (m->pmapped) { \
pmap_disconnect(m->phys_page); \
} \
} \
} \
vm_page_free_list(_local_free_q, TRUE); \
_local_free_q = VM_PAGE_NULL; \
} \
MACRO_END
void
vm_object_reap_pages(
vm_object_t object,
int reap_type)
{
vm_page_t p;
vm_page_t next;
vm_page_t local_free_q = VM_PAGE_NULL;
int loop_count;
boolean_t disconnect_on_release;
pmap_flush_context pmap_flush_context_storage;
if (reap_type == REAP_DATA_FLUSH) {
disconnect_on_release = TRUE;
} else {
disconnect_on_release = FALSE;
}
restart_after_sleep:
if (queue_empty(&object->memq))
return;
loop_count = BATCH_LIMIT(V_O_R_MAX_BATCH);
if (reap_type == REAP_PURGEABLE)
pmap_flush_context_init(&pmap_flush_context_storage);
vm_page_lockspin_queues();
next = (vm_page_t)queue_first(&object->memq);
while (!queue_end(&object->memq, (queue_entry_t)next)) {
p = next;
next = (vm_page_t)queue_next(&next->listq);
if (--loop_count == 0) {
vm_page_unlock_queues();
if (local_free_q) {
if (reap_type == REAP_PURGEABLE) {
pmap_flush(&pmap_flush_context_storage);
pmap_flush_context_init(&pmap_flush_context_storage);
}
VM_OBJ_REAP_FREELIST(local_free_q,
disconnect_on_release);
} else
mutex_pause(0);
loop_count = BATCH_LIMIT(V_O_R_MAX_BATCH);
vm_page_lockspin_queues();
}
if (reap_type == REAP_DATA_FLUSH || reap_type == REAP_TERMINATE) {
if (p->busy || p->cleaning) {
vm_page_unlock_queues();
VM_OBJ_REAP_FREELIST(local_free_q,
disconnect_on_release);
PAGE_SLEEP(object, p, THREAD_UNINT);
goto restart_after_sleep;
}
if (p->laundry) {
p->pageout = FALSE;
vm_pageout_steal_laundry(p, TRUE);
}
}
switch (reap_type) {
case REAP_DATA_FLUSH:
if (VM_PAGE_WIRED(p)) {
continue;
}
break;
case REAP_PURGEABLE:
if (VM_PAGE_WIRED(p)) {
vm_page_purged_wired++;
continue;
}
if (p->laundry && !p->busy && !p->cleaning) {
p->pageout = FALSE;
vm_pageout_steal_laundry(p, TRUE);
}
if (p->cleaning || p->laundry) {
vm_page_purged_others++;
continue;
}
if (p->busy) {
vm_page_deactivate(p);
vm_page_purged_busy++;
continue;
}
assert(p->object != kernel_object);
if (p->pmapped == TRUE) {
pmap_disconnect_options(p->phys_page, PMAP_OPTIONS_NOFLUSH | PMAP_OPTIONS_NOREFMOD, (void *)&pmap_flush_context_storage);
}
vm_page_purged_count++;
break;
case REAP_TERMINATE:
if (p->absent || p->private) {
break;
}
if (p->fictitious) {
assert (p->phys_page == vm_page_guard_addr);
break;
}
if (!p->dirty && p->wpmapped)
p->dirty = pmap_is_modified(p->phys_page);
if ((p->dirty || p->precious) && !p->error && object->alive) {
if (!p->laundry) {
VM_PAGE_QUEUES_REMOVE(p);
vm_pageout_cluster(p, TRUE);
}
vm_page_unlock_queues();
VM_OBJ_REAP_FREELIST(local_free_q,
disconnect_on_release);
vm_object_paging_wait(object, THREAD_UNINT);
goto restart_after_sleep;
}
break;
case REAP_REAP:
break;
}
vm_page_free_prepare_queues(p);
assert(p->pageq.next == NULL && p->pageq.prev == NULL);
p->pageq.next = (queue_entry_t) local_free_q;
local_free_q = p;
}
vm_page_unlock_queues();
if (reap_type == REAP_PURGEABLE)
pmap_flush(&pmap_flush_context_storage);
VM_OBJ_REAP_FREELIST(local_free_q,
disconnect_on_release);
}
void
vm_object_reap_async(
vm_object_t object)
{
vm_object_lock_assert_exclusive(object);
vm_object_reaper_lock_spin();
vm_object_reap_count_async++;
queue_enter(&vm_object_reaper_queue, object,
vm_object_t, cached_list);
vm_object_reaper_unlock();
thread_wakeup((event_t) &vm_object_reaper_queue);
}
void
vm_object_reaper_thread(void)
{
vm_object_t object, shadow_object;
vm_object_reaper_lock_spin();
while (!queue_empty(&vm_object_reaper_queue)) {
queue_remove_first(&vm_object_reaper_queue,
object,
vm_object_t,
cached_list);
vm_object_reaper_unlock();
vm_object_lock(object);
assert(object->terminating);
assert(!object->alive);
while (object->paging_in_progress != 0 ||
object->activity_in_progress != 0) {
vm_object_wait(object,
VM_OBJECT_EVENT_PAGING_IN_PROGRESS,
THREAD_UNINT);
vm_object_lock(object);
}
shadow_object =
object->pageout ? VM_OBJECT_NULL : object->shadow;
vm_object_reap(object);
object = VM_OBJECT_NULL;
if (shadow_object != VM_OBJECT_NULL) {
vm_object_deallocate(shadow_object);
shadow_object = VM_OBJECT_NULL;
}
vm_object_reaper_lock_spin();
}
assert_wait((event_t) &vm_object_reaper_queue, THREAD_UNINT);
vm_object_reaper_unlock();
thread_block((thread_continue_t) vm_object_reaper_thread);
}
static void
vm_object_pager_wakeup(
memory_object_t pager)
{
vm_object_hash_entry_t entry;
boolean_t waiting = FALSE;
lck_mtx_t *lck;
lck = vm_object_hash_lock_spin(pager);
entry = vm_object_hash_lookup(pager, TRUE);
if (entry != VM_OBJECT_HASH_ENTRY_NULL)
waiting = entry->waiting;
vm_object_hash_unlock(lck);
if (entry != VM_OBJECT_HASH_ENTRY_NULL) {
if (waiting)
thread_wakeup((event_t) pager);
vm_object_hash_entry_free(entry);
}
}
static void
vm_object_release_pager(
memory_object_t pager,
boolean_t hashed)
{
(void) memory_object_terminate(pager);
if (hashed == TRUE) {
vm_object_pager_wakeup(pager);
}
memory_object_deallocate(pager);
}
kern_return_t
vm_object_destroy(
vm_object_t object,
__unused kern_return_t reason)
{
memory_object_t old_pager;
if (object == VM_OBJECT_NULL)
return(KERN_SUCCESS);
vm_object_lock(object);
object->can_persist = FALSE;
object->named = FALSE;
object->alive = FALSE;
if (object->hashed) {
lck_mtx_t *lck;
lck = vm_object_hash_lock_spin(object->pager);
vm_object_remove(object);
vm_object_hash_unlock(lck);
}
old_pager = object->pager;
object->pager = MEMORY_OBJECT_NULL;
if (old_pager != MEMORY_OBJECT_NULL)
memory_object_control_disable(object->pager_control);
vm_object_paging_wait(object, THREAD_UNINT);
vm_object_unlock(object);
if (old_pager != MEMORY_OBJECT_NULL) {
vm_object_release_pager(old_pager, object->hashed);
vm_object_deallocate(object);
}
return(KERN_SUCCESS);
}
#if VM_OBJECT_CACHE
#define VM_OBJ_DEACT_ALL_STATS DEBUG
#if VM_OBJ_DEACT_ALL_STATS
uint32_t vm_object_deactivate_all_pages_batches = 0;
uint32_t vm_object_deactivate_all_pages_pages = 0;
#endif
static void
vm_object_deactivate_all_pages(
register vm_object_t object)
{
register vm_page_t p;
int loop_count;
#if VM_OBJ_DEACT_ALL_STATS
int pages_count;
#endif
#define V_O_D_A_P_MAX_BATCH 256
loop_count = BATCH_LIMIT(V_O_D_A_P_MAX_BATCH);
#if VM_OBJ_DEACT_ALL_STATS
pages_count = 0;
#endif
vm_page_lock_queues();
queue_iterate(&object->memq, p, vm_page_t, listq) {
if (--loop_count == 0) {
#if VM_OBJ_DEACT_ALL_STATS
hw_atomic_add(&vm_object_deactivate_all_pages_batches,
1);
hw_atomic_add(&vm_object_deactivate_all_pages_pages,
pages_count);
pages_count = 0;
#endif
lck_mtx_yield(&vm_page_queue_lock);
loop_count = BATCH_LIMIT(V_O_D_A_P_MAX_BATCH);
}
if (!p->busy && !p->throttled) {
#if VM_OBJ_DEACT_ALL_STATS
pages_count++;
#endif
vm_page_deactivate(p);
}
}
#if VM_OBJ_DEACT_ALL_STATS
if (pages_count) {
hw_atomic_add(&vm_object_deactivate_all_pages_batches, 1);
hw_atomic_add(&vm_object_deactivate_all_pages_pages,
pages_count);
pages_count = 0;
}
#endif
vm_page_unlock_queues();
}
#endif
#define PAGES_IN_A_CHUNK 64
#define CHUNK_SIZE (PAGES_IN_A_CHUNK * PAGE_SIZE_64)
typedef uint64_t chunk_state_t;
#define CHUNK_INIT(c, len) \
MACRO_BEGIN \
uint64_t p; \
\
(c) = 0xffffffffffffffffLL; \
\
for (p = (len) / PAGE_SIZE_64; p < PAGES_IN_A_CHUNK; p++) \
MARK_PAGE_HANDLED(c, p); \
MACRO_END
#define CHUNK_NOT_COMPLETE(c) ((c) != 0)
#define PAGE_ALREADY_HANDLED(c, p) (((c) & (1LL << (p))) == 0)
#define MARK_PAGE_HANDLED(c, p) \
MACRO_BEGIN \
(c) = (c) & ~(1LL << (p)); \
MACRO_END
static boolean_t
page_is_paged_out(
vm_object_t object,
vm_object_offset_t offset)
{
kern_return_t kr;
memory_object_t pager;
#if MACH_PAGEMAP
if (object->existence_map) {
if (vm_external_state_get(object->existence_map, offset)
== VM_EXTERNAL_STATE_EXISTS) {
return TRUE;
}
} else
#endif
if (object->internal &&
object->alive &&
!object->terminating &&
object->pager_ready) {
if (COMPRESSED_PAGER_IS_ACTIVE || DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE) {
if (VM_COMPRESSOR_PAGER_STATE_GET(object, offset)
== VM_EXTERNAL_STATE_EXISTS) {
return TRUE;
} else {
return FALSE;
}
}
assert(object->paging_in_progress);
pager = object->pager;
vm_object_unlock(object);
kr = memory_object_data_request(
pager,
offset + object->paging_offset,
0,
VM_PROT_READ,
NULL);
vm_object_lock(object);
if (kr == KERN_SUCCESS) {
return TRUE;
}
}
return FALSE;
}
#if DEVELOPMENT || DEBUG
int madvise_free_debug = 1;
#else
int madvise_free_debug = 0;
#endif
static void
deactivate_pages_in_object(
vm_object_t object,
vm_object_offset_t offset,
vm_object_size_t size,
boolean_t kill_page,
boolean_t reusable_page,
boolean_t all_reusable,
chunk_state_t *chunk_state,
pmap_flush_context *pfc)
{
vm_page_t m;
int p;
struct vm_page_delayed_work dw_array[DEFAULT_DELAYED_WORK_LIMIT];
struct vm_page_delayed_work *dwp;
int dw_count;
int dw_limit;
unsigned int reusable = 0;
dwp = &dw_array[0];
dw_count = 0;
dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT);
for(p = 0; size && CHUNK_NOT_COMPLETE(*chunk_state); p++, size -= PAGE_SIZE_64, offset += PAGE_SIZE_64) {
if (PAGE_ALREADY_HANDLED(*chunk_state, p))
continue;
if ((m = vm_page_lookup(object, offset)) != VM_PAGE_NULL) {
MARK_PAGE_HANDLED(*chunk_state, p);
if (( !VM_PAGE_WIRED(m)) && (!m->private) && (!m->gobbled) && (!m->busy) && (!m->laundry)) {
int clear_refmod;
dwp->dw_mask = 0;
clear_refmod = VM_MEM_REFERENCED;
dwp->dw_mask |= DW_clear_reference;
if ((kill_page) && (object->internal)) {
if (madvise_free_debug) {
pmap_zero_page(m->phys_page);
}
m->precious = FALSE;
m->dirty = FALSE;
clear_refmod |= VM_MEM_MODIFIED;
if (m->throttled) {
dwp->dw_mask |= DW_move_page;
}
#if MACH_PAGEMAP
vm_external_state_clr(object->existence_map, offset);
#endif
VM_COMPRESSOR_PAGER_STATE_CLR(object,
offset);
if (reusable_page && !m->reusable) {
assert(!all_reusable);
assert(!object->all_reusable);
m->reusable = TRUE;
object->reusable_page_count++;
assert(object->resident_page_count >= object->reusable_page_count);
reusable++;
}
}
pmap_clear_refmod_options(m->phys_page, clear_refmod, PMAP_OPTIONS_NOFLUSH, (void *)pfc);
if (!m->throttled && !(reusable_page || all_reusable))
dwp->dw_mask |= DW_move_page;
if (dwp->dw_mask)
VM_PAGE_ADD_DELAYED_WORK(dwp, m,
dw_count);
if (dw_count >= dw_limit) {
if (reusable) {
OSAddAtomic(reusable,
&vm_page_stats_reusable.reusable_count);
vm_page_stats_reusable.reusable += reusable;
reusable = 0;
}
vm_page_do_delayed_work(object, &dw_array[0], dw_count);
dwp = &dw_array[0];
dw_count = 0;
}
}
} else {
if (page_is_paged_out(object, offset)) {
MARK_PAGE_HANDLED(*chunk_state, p);
if ((kill_page) && (object->internal)) {
#if MACH_PAGEMAP
vm_external_state_clr(object->existence_map, offset);
#endif
VM_COMPRESSOR_PAGER_STATE_CLR(object,
offset);
}
}
}
}
if (reusable) {
OSAddAtomic(reusable, &vm_page_stats_reusable.reusable_count);
vm_page_stats_reusable.reusable += reusable;
reusable = 0;
}
if (dw_count)
vm_page_do_delayed_work(object, &dw_array[0], dw_count);
}
static vm_object_size_t
deactivate_a_chunk(
vm_object_t orig_object,
vm_object_offset_t offset,
vm_object_size_t size,
boolean_t kill_page,
boolean_t reusable_page,
boolean_t all_reusable,
pmap_flush_context *pfc)
{
vm_object_t object;
vm_object_t tmp_object;
vm_object_size_t length;
chunk_state_t chunk_state;
length = MIN(size, CHUNK_SIZE);
CHUNK_INIT(chunk_state, length);
object = orig_object;
while (object && CHUNK_NOT_COMPLETE(chunk_state)) {
vm_object_paging_begin(object);
deactivate_pages_in_object(object, offset, length, kill_page, reusable_page, all_reusable, &chunk_state, pfc);
vm_object_paging_end(object);
tmp_object = object->shadow;
if (tmp_object) {
kill_page = FALSE;
reusable_page = FALSE;
all_reusable = FALSE;
offset += object->vo_shadow_offset;
vm_object_lock(tmp_object);
}
if (object != orig_object)
vm_object_unlock(object);
object = tmp_object;
}
if (object && object != orig_object)
vm_object_unlock(object);
return length;
}
__private_extern__ void
vm_object_deactivate_pages(
vm_object_t object,
vm_object_offset_t offset,
vm_object_size_t size,
boolean_t kill_page,
boolean_t reusable_page)
{
vm_object_size_t length;
boolean_t all_reusable;
pmap_flush_context pmap_flush_context_storage;
all_reusable = FALSE;
if (reusable_page &&
object->internal &&
object->vo_size != 0 &&
object->vo_size == size &&
object->reusable_page_count == 0) {
all_reusable = TRUE;
reusable_page = FALSE;
}
if ((reusable_page || all_reusable) && object->all_reusable) {
return;
}
pmap_flush_context_init(&pmap_flush_context_storage);
while (size) {
length = deactivate_a_chunk(object, offset, size, kill_page, reusable_page, all_reusable, &pmap_flush_context_storage);
size -= length;
offset += length;
}
pmap_flush(&pmap_flush_context_storage);
if (all_reusable) {
if (!object->all_reusable) {
unsigned int reusable;
object->all_reusable = TRUE;
assert(object->reusable_page_count == 0);
reusable = object->resident_page_count;
OSAddAtomic(reusable,
&vm_page_stats_reusable.reusable_count);
vm_page_stats_reusable.reusable += reusable;
vm_page_stats_reusable.all_reusable_calls++;
}
} else if (reusable_page) {
vm_page_stats_reusable.partial_reusable_calls++;
}
}
void
vm_object_reuse_pages(
vm_object_t object,
vm_object_offset_t start_offset,
vm_object_offset_t end_offset,
boolean_t allow_partial_reuse)
{
vm_object_offset_t cur_offset;
vm_page_t m;
unsigned int reused, reusable;
#define VM_OBJECT_REUSE_PAGE(object, m, reused) \
MACRO_BEGIN \
if ((m) != VM_PAGE_NULL && \
(m)->reusable) { \
assert((object)->reusable_page_count <= \
(object)->resident_page_count); \
assert((object)->reusable_page_count > 0); \
(object)->reusable_page_count--; \
(m)->reusable = FALSE; \
(reused)++; \
} \
MACRO_END
reused = 0;
reusable = 0;
vm_object_lock_assert_exclusive(object);
if (object->all_reusable) {
assert(object->reusable_page_count == 0);
object->all_reusable = FALSE;
if (end_offset - start_offset == object->vo_size ||
!allow_partial_reuse) {
vm_page_stats_reusable.all_reuse_calls++;
reused = object->resident_page_count;
} else {
vm_page_stats_reusable.partial_reuse_calls++;
queue_iterate(&object->memq, m, vm_page_t, listq) {
if (m->offset < start_offset ||
m->offset >= end_offset) {
m->reusable = TRUE;
object->reusable_page_count++;
assert(object->resident_page_count >= object->reusable_page_count);
continue;
} else {
assert(!m->reusable);
reused++;
}
}
}
} else if (object->resident_page_count >
((end_offset - start_offset) >> PAGE_SHIFT)) {
vm_page_stats_reusable.partial_reuse_calls++;
for (cur_offset = start_offset;
cur_offset < end_offset;
cur_offset += PAGE_SIZE_64) {
if (object->reusable_page_count == 0) {
break;
}
m = vm_page_lookup(object, cur_offset);
VM_OBJECT_REUSE_PAGE(object, m, reused);
}
} else {
vm_page_stats_reusable.partial_reuse_calls++;
queue_iterate(&object->memq, m, vm_page_t, listq) {
if (object->reusable_page_count == 0) {
break;
}
if (m->offset < start_offset ||
m->offset >= end_offset) {
continue;
}
VM_OBJECT_REUSE_PAGE(object, m, reused);
}
}
OSAddAtomic(reusable-reused, &vm_page_stats_reusable.reusable_count);
vm_page_stats_reusable.reused += reused;
vm_page_stats_reusable.reusable += reusable;
}
__private_extern__ void
vm_object_pmap_protect(
register vm_object_t object,
register vm_object_offset_t offset,
vm_object_size_t size,
pmap_t pmap,
vm_map_offset_t pmap_start,
vm_prot_t prot)
{
vm_object_pmap_protect_options(object, offset, size,
pmap, pmap_start, prot, 0);
}
__private_extern__ void
vm_object_pmap_protect_options(
register vm_object_t object,
register vm_object_offset_t offset,
vm_object_size_t size,
pmap_t pmap,
vm_map_offset_t pmap_start,
vm_prot_t prot,
int options)
{
pmap_flush_context pmap_flush_context_storage;
boolean_t delayed_pmap_flush = FALSE;
if (object == VM_OBJECT_NULL)
return;
size = vm_object_round_page(size);
offset = vm_object_trunc_page(offset);
vm_object_lock(object);
if (object->phys_contiguous) {
if (pmap != NULL) {
vm_object_unlock(object);
pmap_protect_options(pmap,
pmap_start,
pmap_start + size,
prot,
options & ~PMAP_OPTIONS_NOFLUSH,
NULL);
} else {
vm_object_offset_t phys_start, phys_end, phys_addr;
phys_start = object->vo_shadow_offset + offset;
phys_end = phys_start + size;
assert(phys_start <= phys_end);
assert(phys_end <= object->vo_shadow_offset + object->vo_size);
vm_object_unlock(object);
pmap_flush_context_init(&pmap_flush_context_storage);
delayed_pmap_flush = FALSE;
for (phys_addr = phys_start;
phys_addr < phys_end;
phys_addr += PAGE_SIZE_64) {
pmap_page_protect_options(
(ppnum_t) (phys_addr >> PAGE_SHIFT),
prot,
options | PMAP_OPTIONS_NOFLUSH,
(void *)&pmap_flush_context_storage);
delayed_pmap_flush = TRUE;
}
if (delayed_pmap_flush == TRUE)
pmap_flush(&pmap_flush_context_storage);
}
return;
}
assert(object->internal);
while (TRUE) {
if (ptoa_64(object->resident_page_count) > size/2 && pmap != PMAP_NULL) {
vm_object_unlock(object);
pmap_protect_options(pmap, pmap_start, pmap_start + size, prot,
options & ~PMAP_OPTIONS_NOFLUSH, NULL);
return;
}
pmap_flush_context_init(&pmap_flush_context_storage);
delayed_pmap_flush = FALSE;
if (ptoa_64(object->resident_page_count / 4) < size) {
vm_page_t p;
vm_object_offset_t end;
end = offset + size;
queue_iterate(&object->memq, p, vm_page_t, listq) {
if (!p->fictitious && (offset <= p->offset) && (p->offset < end)) {
vm_map_offset_t start;
start = pmap_start + p->offset - offset;
if (pmap != PMAP_NULL)
pmap_protect_options(
pmap,
start,
start + PAGE_SIZE_64,
prot,
options | PMAP_OPTIONS_NOFLUSH,
&pmap_flush_context_storage);
else
pmap_page_protect_options(
p->phys_page,
prot,
options | PMAP_OPTIONS_NOFLUSH,
&pmap_flush_context_storage);
delayed_pmap_flush = TRUE;
}
}
} else {
vm_page_t p;
vm_object_offset_t end;
vm_object_offset_t target_off;
end = offset + size;
for (target_off = offset;
target_off < end; target_off += PAGE_SIZE) {
p = vm_page_lookup(object, target_off);
if (p != VM_PAGE_NULL) {
vm_object_offset_t start;
start = pmap_start + (p->offset - offset);
if (pmap != PMAP_NULL)
pmap_protect_options(
pmap,
start,
start + PAGE_SIZE_64,
prot,
options | PMAP_OPTIONS_NOFLUSH,
&pmap_flush_context_storage);
else
pmap_page_protect_options(
p->phys_page,
prot,
options | PMAP_OPTIONS_NOFLUSH,
&pmap_flush_context_storage);
delayed_pmap_flush = TRUE;
}
}
}
if (delayed_pmap_flush == TRUE)
pmap_flush(&pmap_flush_context_storage);
if (prot == VM_PROT_NONE) {
register vm_object_t next_object;
next_object = object->shadow;
if (next_object != VM_OBJECT_NULL) {
offset += object->vo_shadow_offset;
vm_object_lock(next_object);
vm_object_unlock(object);
object = next_object;
}
else {
break;
}
}
else {
break;
}
}
vm_object_unlock(object);
}
__private_extern__ kern_return_t
vm_object_copy_slowly(
register vm_object_t src_object,
vm_object_offset_t src_offset,
vm_object_size_t size,
boolean_t interruptible,
vm_object_t *_result_object)
{
vm_object_t new_object;
vm_object_offset_t new_offset;
struct vm_object_fault_info fault_info;
XPR(XPR_VM_OBJECT, "v_o_c_slowly obj 0x%x off 0x%x size 0x%x\n",
src_object, src_offset, size, 0, 0);
if (size == 0) {
vm_object_unlock(src_object);
*_result_object = VM_OBJECT_NULL;
return(KERN_INVALID_ARGUMENT);
}
vm_object_reference_locked(src_object);
vm_object_unlock(src_object);
new_object = vm_object_allocate(size);
new_offset = 0;
assert(size == trunc_page_64(size));
fault_info.interruptible = interruptible;
fault_info.behavior = VM_BEHAVIOR_SEQUENTIAL;
fault_info.user_tag = 0;
fault_info.lo_offset = src_offset;
fault_info.hi_offset = src_offset + size;
fault_info.no_cache = FALSE;
fault_info.stealth = TRUE;
fault_info.io_sync = FALSE;
fault_info.cs_bypass = FALSE;
fault_info.mark_zf_absent = FALSE;
fault_info.batch_pmap_op = FALSE;
for ( ;
size != 0 ;
src_offset += PAGE_SIZE_64,
new_offset += PAGE_SIZE_64, size -= PAGE_SIZE_64
) {
vm_page_t new_page;
vm_fault_return_t result;
vm_object_lock(new_object);
while ((new_page = vm_page_alloc(new_object, new_offset))
== VM_PAGE_NULL) {
vm_object_unlock(new_object);
if (!vm_page_wait(interruptible)) {
vm_object_deallocate(new_object);
vm_object_deallocate(src_object);
*_result_object = VM_OBJECT_NULL;
return(MACH_SEND_INTERRUPTED);
}
vm_object_lock(new_object);
}
vm_object_unlock(new_object);
do {
vm_prot_t prot = VM_PROT_READ;
vm_page_t _result_page;
vm_page_t top_page;
register
vm_page_t result_page;
kern_return_t error_code;
vm_object_lock(src_object);
vm_object_paging_begin(src_object);
if (size > (vm_size_t) -1) {
fault_info.cluster_size = (vm_size_t) (0 - PAGE_SIZE);
} else {
fault_info.cluster_size = (vm_size_t) size;
assert(fault_info.cluster_size == size);
}
XPR(XPR_VM_FAULT,"vm_object_copy_slowly -> vm_fault_page",0,0,0,0,0);
_result_page = VM_PAGE_NULL;
result = vm_fault_page(src_object, src_offset,
VM_PROT_READ, FALSE,
FALSE,
&prot, &_result_page, &top_page,
(int *)0,
&error_code, FALSE, FALSE, &fault_info);
switch(result) {
case VM_FAULT_SUCCESS:
result_page = _result_page;
vm_page_copy(result_page, new_page);
vm_object_unlock(result_page->object);
vm_object_lock(new_object);
SET_PAGE_DIRTY(new_page, FALSE);
PAGE_WAKEUP_DONE(new_page);
vm_object_unlock(new_object);
vm_object_lock(result_page->object);
PAGE_WAKEUP_DONE(result_page);
vm_page_lockspin_queues();
if (!result_page->active &&
!result_page->inactive &&
!result_page->throttled)
vm_page_activate(result_page);
vm_page_activate(new_page);
vm_page_unlock_queues();
vm_fault_cleanup(result_page->object,
top_page);
break;
case VM_FAULT_RETRY:
break;
case VM_FAULT_MEMORY_SHORTAGE:
if (vm_page_wait(interruptible))
break;
case VM_FAULT_INTERRUPTED:
vm_object_lock(new_object);
VM_PAGE_FREE(new_page);
vm_object_unlock(new_object);
vm_object_deallocate(new_object);
vm_object_deallocate(src_object);
*_result_object = VM_OBJECT_NULL;
return(MACH_SEND_INTERRUPTED);
case VM_FAULT_SUCCESS_NO_VM_PAGE:
vm_object_paging_end(src_object);
vm_object_unlock(src_object);
case VM_FAULT_MEMORY_ERROR:
vm_object_lock(new_object);
VM_PAGE_FREE(new_page);
vm_object_unlock(new_object);
vm_object_deallocate(new_object);
vm_object_deallocate(src_object);
*_result_object = VM_OBJECT_NULL;
return(error_code ? error_code:
KERN_MEMORY_ERROR);
default:
panic("vm_object_copy_slowly: unexpected error"
" 0x%x from vm_fault_page()\n", result);
}
} while (result != VM_FAULT_SUCCESS);
}
vm_object_deallocate(src_object);
*_result_object = new_object;
return(KERN_SUCCESS);
}
__private_extern__ boolean_t
vm_object_copy_quickly(
vm_object_t *_object,
__unused vm_object_offset_t offset,
__unused vm_object_size_t size,
boolean_t *_src_needs_copy,
boolean_t *_dst_needs_copy)
{
vm_object_t object = *_object;
memory_object_copy_strategy_t copy_strategy;
XPR(XPR_VM_OBJECT, "v_o_c_quickly obj 0x%x off 0x%x size 0x%x\n",
*_object, offset, size, 0, 0);
if (object == VM_OBJECT_NULL) {
*_src_needs_copy = FALSE;
*_dst_needs_copy = FALSE;
return(TRUE);
}
vm_object_lock(object);
copy_strategy = object->copy_strategy;
switch (copy_strategy) {
case MEMORY_OBJECT_COPY_SYMMETRIC:
vm_object_reference_locked(object);
object->shadowed = TRUE;
vm_object_unlock(object);
*_src_needs_copy = TRUE;
*_dst_needs_copy = TRUE;
break;
case MEMORY_OBJECT_COPY_DELAY:
vm_object_unlock(object);
return(FALSE);
default:
vm_object_unlock(object);
return(FALSE);
}
return(TRUE);
}
static int copy_call_count = 0;
static int copy_call_sleep_count = 0;
static int copy_call_restart_count = 0;
static kern_return_t
vm_object_copy_call(
vm_object_t src_object,
vm_object_offset_t src_offset,
vm_object_size_t size,
vm_object_t *_result_object)
{
kern_return_t kr;
vm_object_t copy;
boolean_t check_ready = FALSE;
uint32_t try_failed_count = 0;
copy_call_count++;
while (vm_object_wanted(src_object, VM_OBJECT_EVENT_COPY_CALL)) {
vm_object_sleep(src_object, VM_OBJECT_EVENT_COPY_CALL,
THREAD_UNINT);
copy_call_restart_count++;
}
vm_object_set_wanted(src_object, VM_OBJECT_EVENT_COPY_CALL);
vm_object_unlock(src_object);
kr = KERN_FAILURE;
if (kr != KERN_SUCCESS) {
return kr;
}
vm_object_lock(src_object);
while (vm_object_wanted(src_object, VM_OBJECT_EVENT_COPY_CALL)) {
vm_object_sleep(src_object, VM_OBJECT_EVENT_COPY_CALL,
THREAD_UNINT);
copy_call_sleep_count++;
}
Retry:
assert(src_object->copy != VM_OBJECT_NULL);
copy = src_object->copy;
if (!vm_object_lock_try(copy)) {
vm_object_unlock(src_object);
try_failed_count++;
mutex_pause(try_failed_count);
vm_object_lock(src_object);
goto Retry;
}
if (copy->vo_size < src_offset+size)
copy->vo_size = src_offset+size;
if (!copy->pager_ready)
check_ready = TRUE;
*_result_object = copy;
vm_object_unlock(copy);
vm_object_unlock(src_object);
if (check_ready == TRUE) {
vm_object_lock(copy);
while (!copy->pager_ready) {
vm_object_sleep(copy, VM_OBJECT_EVENT_PAGER_READY, THREAD_UNINT);
}
vm_object_unlock(copy);
}
return KERN_SUCCESS;
}
static int copy_delayed_lock_collisions = 0;
static int copy_delayed_max_collisions = 0;
static int copy_delayed_lock_contention = 0;
static int copy_delayed_protect_iterate = 0;
__private_extern__ vm_object_t
vm_object_copy_delayed(
vm_object_t src_object,
vm_object_offset_t src_offset,
vm_object_size_t size,
boolean_t src_object_shared)
{
vm_object_t new_copy = VM_OBJECT_NULL;
vm_object_t old_copy;
vm_page_t p;
vm_object_size_t copy_size = src_offset + size;
pmap_flush_context pmap_flush_context_storage;
boolean_t delayed_pmap_flush = FALSE;
int collisions = 0;
Retry:
if (!src_object->true_share &&
(src_object->paging_in_progress != 0 ||
src_object->activity_in_progress != 0)) {
if (src_object_shared == TRUE) {
vm_object_unlock(src_object);
vm_object_lock(src_object);
src_object_shared = FALSE;
goto Retry;
}
vm_object_paging_wait(src_object, THREAD_UNINT);
}
old_copy = src_object->copy;
if (old_copy != VM_OBJECT_NULL) {
int lock_granted;
if (src_object_shared == TRUE)
lock_granted = vm_object_lock_try_shared(old_copy);
else
lock_granted = vm_object_lock_try(old_copy);
if (!lock_granted) {
vm_object_unlock(src_object);
if (collisions++ == 0)
copy_delayed_lock_contention++;
mutex_pause(collisions);
copy_delayed_lock_collisions++;
if (collisions > copy_delayed_max_collisions)
copy_delayed_max_collisions = collisions;
if (src_object_shared == TRUE)
vm_object_lock_shared(src_object);
else
vm_object_lock(src_object);
goto Retry;
}
if (old_copy->resident_page_count == 0 &&
!old_copy->pager_created) {
if (old_copy->vo_size < copy_size) {
if (src_object_shared == TRUE) {
vm_object_unlock(old_copy);
vm_object_unlock(src_object);
vm_object_lock(src_object);
src_object_shared = FALSE;
goto Retry;
}
copy_delayed_protect_iterate++;
pmap_flush_context_init(&pmap_flush_context_storage);
delayed_pmap_flush = FALSE;
queue_iterate(&src_object->memq, p, vm_page_t, listq) {
if (!p->fictitious &&
p->offset >= old_copy->vo_size &&
p->offset < copy_size) {
if (VM_PAGE_WIRED(p)) {
vm_object_unlock(old_copy);
vm_object_unlock(src_object);
if (new_copy != VM_OBJECT_NULL) {
vm_object_unlock(new_copy);
vm_object_deallocate(new_copy);
}
if (delayed_pmap_flush == TRUE)
pmap_flush(&pmap_flush_context_storage);
return VM_OBJECT_NULL;
} else {
pmap_page_protect_options(p->phys_page, (VM_PROT_ALL & ~VM_PROT_WRITE),
PMAP_OPTIONS_NOFLUSH, (void *)&pmap_flush_context_storage);
delayed_pmap_flush = TRUE;
}
}
}
if (delayed_pmap_flush == TRUE)
pmap_flush(&pmap_flush_context_storage);
old_copy->vo_size = copy_size;
}
if (src_object_shared == TRUE)
vm_object_reference_shared(old_copy);
else
vm_object_reference_locked(old_copy);
vm_object_unlock(old_copy);
vm_object_unlock(src_object);
if (new_copy != VM_OBJECT_NULL) {
vm_object_unlock(new_copy);
vm_object_deallocate(new_copy);
}
return(old_copy);
}
if (old_copy->vo_size > copy_size)
copy_size = old_copy->vo_size;
if (new_copy == VM_OBJECT_NULL) {
vm_object_unlock(old_copy);
vm_object_unlock(src_object);
new_copy = vm_object_allocate(copy_size);
vm_object_lock(src_object);
vm_object_lock(new_copy);
src_object_shared = FALSE;
goto Retry;
}
new_copy->vo_size = copy_size;
assert((old_copy->shadow == src_object) &&
(old_copy->vo_shadow_offset == (vm_object_offset_t) 0));
} else if (new_copy == VM_OBJECT_NULL) {
vm_object_unlock(src_object);
new_copy = vm_object_allocate(copy_size);
vm_object_lock(src_object);
vm_object_lock(new_copy);
src_object_shared = FALSE;
goto Retry;
}
copy_delayed_protect_iterate++;
pmap_flush_context_init(&pmap_flush_context_storage);
delayed_pmap_flush = FALSE;
queue_iterate(&src_object->memq, p, vm_page_t, listq) {
if (!p->fictitious && p->offset < copy_size) {
if (VM_PAGE_WIRED(p)) {
if (old_copy)
vm_object_unlock(old_copy);
vm_object_unlock(src_object);
vm_object_unlock(new_copy);
vm_object_deallocate(new_copy);
if (delayed_pmap_flush == TRUE)
pmap_flush(&pmap_flush_context_storage);
return VM_OBJECT_NULL;
} else {
pmap_page_protect_options(p->phys_page, (VM_PROT_ALL & ~VM_PROT_WRITE),
PMAP_OPTIONS_NOFLUSH, (void *)&pmap_flush_context_storage);
delayed_pmap_flush = TRUE;
}
}
}
if (delayed_pmap_flush == TRUE)
pmap_flush(&pmap_flush_context_storage);
if (old_copy != VM_OBJECT_NULL) {
vm_object_lock_assert_exclusive(src_object);
src_object->ref_count--;
assert(src_object->ref_count > 0);
vm_object_lock_assert_exclusive(old_copy);
old_copy->shadow = new_copy;
vm_object_lock_assert_exclusive(new_copy);
assert(new_copy->ref_count > 0);
new_copy->ref_count++;
#if TASK_SWAPPER
if (old_copy->res_count) {
VM_OBJ_RES_INCR(new_copy);
VM_OBJ_RES_DECR(src_object);
}
#endif
vm_object_unlock(old_copy);
}
vm_object_lock_assert_exclusive(new_copy);
new_copy->shadow = src_object;
new_copy->vo_shadow_offset = 0;
new_copy->shadowed = TRUE;
vm_object_lock_assert_exclusive(src_object);
vm_object_reference_locked(src_object);
src_object->copy = new_copy;
vm_object_unlock(src_object);
vm_object_unlock(new_copy);
XPR(XPR_VM_OBJECT,
"vm_object_copy_delayed: used copy object %X for source %X\n",
new_copy, src_object, 0, 0, 0);
return new_copy;
}
__private_extern__ kern_return_t
vm_object_copy_strategically(
register vm_object_t src_object,
vm_object_offset_t src_offset,
vm_object_size_t size,
vm_object_t *dst_object,
vm_object_offset_t *dst_offset,
boolean_t *dst_needs_copy)
{
boolean_t result;
boolean_t interruptible = THREAD_ABORTSAFE;
boolean_t object_lock_shared = FALSE;
memory_object_copy_strategy_t copy_strategy;
assert(src_object != VM_OBJECT_NULL);
copy_strategy = src_object->copy_strategy;
if (copy_strategy == MEMORY_OBJECT_COPY_DELAY) {
vm_object_lock_shared(src_object);
object_lock_shared = TRUE;
} else
vm_object_lock(src_object);
while (!src_object->internal && !src_object->pager_ready) {
wait_result_t wait_result;
if (object_lock_shared == TRUE) {
vm_object_unlock(src_object);
vm_object_lock(src_object);
object_lock_shared = FALSE;
continue;
}
wait_result = vm_object_sleep( src_object,
VM_OBJECT_EVENT_PAGER_READY,
interruptible);
if (wait_result != THREAD_AWAKENED) {
vm_object_unlock(src_object);
*dst_object = VM_OBJECT_NULL;
*dst_offset = 0;
*dst_needs_copy = FALSE;
return(MACH_SEND_INTERRUPTED);
}
}
switch (copy_strategy) {
case MEMORY_OBJECT_COPY_DELAY:
*dst_object = vm_object_copy_delayed(src_object,
src_offset, size, object_lock_shared);
if (*dst_object != VM_OBJECT_NULL) {
*dst_offset = src_offset;
*dst_needs_copy = TRUE;
result = KERN_SUCCESS;
break;
}
vm_object_lock(src_object);
case MEMORY_OBJECT_COPY_NONE:
result = vm_object_copy_slowly(src_object, src_offset, size,
interruptible, dst_object);
if (result == KERN_SUCCESS) {
*dst_offset = 0;
*dst_needs_copy = FALSE;
}
break;
case MEMORY_OBJECT_COPY_CALL:
result = vm_object_copy_call(src_object, src_offset, size,
dst_object);
if (result == KERN_SUCCESS) {
*dst_offset = src_offset;
*dst_needs_copy = TRUE;
}
break;
case MEMORY_OBJECT_COPY_SYMMETRIC:
XPR(XPR_VM_OBJECT, "v_o_c_strategically obj 0x%x off 0x%x size 0x%x\n", src_object, src_offset, size, 0, 0);
vm_object_unlock(src_object);
result = KERN_MEMORY_RESTART_COPY;
break;
default:
panic("copy_strategically: bad strategy");
result = KERN_INVALID_ARGUMENT;
}
return(result);
}
boolean_t vm_object_shadow_check = TRUE;
__private_extern__ boolean_t
vm_object_shadow(
vm_object_t *object,
vm_object_offset_t *offset,
vm_object_size_t length)
{
register vm_object_t source;
register vm_object_t result;
source = *object;
assert(source != VM_OBJECT_NULL);
if (source == VM_OBJECT_NULL)
return FALSE;
#if 0
assert(source->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC);
#endif
if (vm_object_shadow_check &&
source->vo_size == length &&
source->ref_count == 1 &&
(source->shadow == VM_OBJECT_NULL ||
source->shadow->copy == VM_OBJECT_NULL) )
{
source->shadowed = FALSE;
return FALSE;
}
if ((result = vm_object_allocate(length)) == VM_OBJECT_NULL)
panic("vm_object_shadow: no object for shadowing");
result->shadow = source;
result->vo_shadow_offset = *offset;
*offset = 0;
*object = result;
return TRUE;
}
vm_object_t
vm_object_enter(
memory_object_t pager,
vm_object_size_t size,
boolean_t internal,
boolean_t init,
boolean_t named)
{
register vm_object_t object;
vm_object_t new_object;
boolean_t must_init;
vm_object_hash_entry_t entry, new_entry;
uint32_t try_failed_count = 0;
lck_mtx_t *lck;
if (pager == MEMORY_OBJECT_NULL)
return(vm_object_allocate(size));
new_object = VM_OBJECT_NULL;
new_entry = VM_OBJECT_HASH_ENTRY_NULL;
must_init = init;
Retry:
lck = vm_object_hash_lock_spin(pager);
do {
entry = vm_object_hash_lookup(pager, FALSE);
if (entry == VM_OBJECT_HASH_ENTRY_NULL) {
if (new_object == VM_OBJECT_NULL) {
vm_object_hash_unlock(lck);
assert(new_entry == VM_OBJECT_HASH_ENTRY_NULL);
new_entry = vm_object_hash_entry_alloc(pager);
new_object = vm_object_allocate(size);
lck = vm_object_hash_lock_spin(pager);
} else {
vm_object_hash_insert(new_entry, new_object);
entry = new_entry;
new_entry = VM_OBJECT_HASH_ENTRY_NULL;
new_object = VM_OBJECT_NULL;
must_init = TRUE;
}
} else if (entry->object == VM_OBJECT_NULL) {
entry->waiting = TRUE;
entry = VM_OBJECT_HASH_ENTRY_NULL;
assert_wait((event_t) pager, THREAD_UNINT);
vm_object_hash_unlock(lck);
thread_block(THREAD_CONTINUE_NULL);
lck = vm_object_hash_lock_spin(pager);
}
} while (entry == VM_OBJECT_HASH_ENTRY_NULL);
object = entry->object;
assert(object != VM_OBJECT_NULL);
if (!must_init) {
if ( !vm_object_lock_try(object)) {
vm_object_hash_unlock(lck);
try_failed_count++;
mutex_pause(try_failed_count);
goto Retry;
}
assert(!internal || object->internal);
#if VM_OBJECT_CACHE
if (object->ref_count == 0) {
if ( !vm_object_cache_lock_try()) {
vm_object_hash_unlock(lck);
vm_object_unlock(object);
try_failed_count++;
mutex_pause(try_failed_count);
goto Retry;
}
XPR(XPR_VM_OBJECT_CACHE,
"vm_object_enter: removing %x from cache, head (%x, %x)\n",
object,
vm_object_cached_list.next,
vm_object_cached_list.prev, 0,0);
queue_remove(&vm_object_cached_list, object,
vm_object_t, cached_list);
vm_object_cached_count--;
vm_object_cache_unlock();
}
#endif
if (named) {
assert(!object->named);
object->named = TRUE;
}
vm_object_lock_assert_exclusive(object);
object->ref_count++;
vm_object_res_reference(object);
vm_object_hash_unlock(lck);
vm_object_unlock(object);
VM_STAT_INCR(hits);
} else
vm_object_hash_unlock(lck);
assert(object->ref_count > 0);
VM_STAT_INCR(lookups);
XPR(XPR_VM_OBJECT,
"vm_o_enter: pager 0x%x obj 0x%x must_init %d\n",
pager, object, must_init, 0, 0);
if (new_object != VM_OBJECT_NULL)
vm_object_deallocate(new_object);
if (new_entry != VM_OBJECT_HASH_ENTRY_NULL)
vm_object_hash_entry_free(new_entry);
if (must_init) {
memory_object_control_t control;
control = memory_object_control_allocate(object);
assert (control != MEMORY_OBJECT_CONTROL_NULL);
vm_object_lock(object);
assert(object != kernel_object);
memory_object_reference(pager);
object->pager_created = TRUE;
object->pager = pager;
object->internal = internal;
object->pager_trusted = internal;
if (!internal) {
object->copy_strategy = MEMORY_OBJECT_COPY_INVALID;
}
object->pager_control = control;
object->pager_ready = FALSE;
vm_object_unlock(object);
(void) memory_object_init(pager,
object->pager_control,
PAGE_SIZE);
vm_object_lock(object);
if (named)
object->named = TRUE;
if (internal) {
object->pager_ready = TRUE;
vm_object_wakeup(object, VM_OBJECT_EVENT_PAGER_READY);
}
object->pager_initialized = TRUE;
vm_object_wakeup(object, VM_OBJECT_EVENT_INITIALIZED);
} else {
vm_object_lock(object);
}
while (!object->pager_initialized) {
vm_object_sleep(object,
VM_OBJECT_EVENT_INITIALIZED,
THREAD_UNINT);
}
vm_object_unlock(object);
XPR(XPR_VM_OBJECT,
"vm_object_enter: vm_object %x, memory_object %x, internal %d\n",
object, object->pager, internal, 0,0);
return(object);
}
void
vm_object_pager_create(
register vm_object_t object)
{
memory_object_t pager;
vm_object_hash_entry_t entry;
lck_mtx_t *lck;
#if MACH_PAGEMAP
vm_object_size_t size;
vm_external_map_t map;
#endif
XPR(XPR_VM_OBJECT, "vm_object_pager_create, object 0x%X\n",
object, 0,0,0,0);
assert(object != kernel_object);
if (memory_manager_default_check() != KERN_SUCCESS)
return;
vm_object_paging_begin(object);
if (object->pager_created) {
while (!object->pager_initialized) {
vm_object_sleep(object,
VM_OBJECT_EVENT_INITIALIZED,
THREAD_UNINT);
}
vm_object_paging_end(object);
return;
}
object->pager_created = TRUE;
object->paging_offset = 0;
#if MACH_PAGEMAP
size = object->vo_size;
#endif
vm_object_unlock(object);
#if MACH_PAGEMAP
if (DEFAULT_PAGER_IS_ACTIVE) {
map = vm_external_create(size);
vm_object_lock(object);
assert(object->vo_size == size);
object->existence_map = map;
vm_object_unlock(object);
}
#endif
if ((uint32_t) object->vo_size != object->vo_size) {
panic("vm_object_pager_create(): object size 0x%llx >= 4GB\n",
(uint64_t) object->vo_size);
}
{
memory_object_default_t dmm;
dmm = memory_manager_default_reference();
assert(object->temporary);
assert((vm_size_t) object->vo_size == object->vo_size);
(void) memory_object_create(dmm, (vm_size_t) object->vo_size,
&pager);
memory_object_default_deallocate(dmm);
}
entry = vm_object_hash_entry_alloc(pager);
lck = vm_object_hash_lock_spin(pager);
vm_object_hash_insert(entry, object);
vm_object_hash_unlock(lck);
if (vm_object_enter(pager, object->vo_size, TRUE, TRUE, FALSE) != object)
panic("vm_object_pager_create: mismatch");
memory_object_deallocate(pager);
vm_object_lock(object);
vm_object_paging_end(object);
}
void
vm_object_compressor_pager_create(
register vm_object_t object)
{
memory_object_t pager;
vm_object_hash_entry_t entry;
lck_mtx_t *lck;
assert(object != kernel_object);
vm_object_paging_begin(object);
if (object->pager_created) {
while (!object->pager_initialized) {
vm_object_sleep(object,
VM_OBJECT_EVENT_INITIALIZED,
THREAD_UNINT);
}
vm_object_paging_end(object);
return;
}
object->pager_created = TRUE;
object->paging_offset = 0;
vm_object_unlock(object);
if ((uint32_t) (object->vo_size/PAGE_SIZE) !=
(object->vo_size/PAGE_SIZE)) {
panic("vm_object_compressor_pager_create(%p): "
"object size 0x%llx >= 0x%llx\n",
object,
(uint64_t) object->vo_size,
0x0FFFFFFFFULL*PAGE_SIZE);
}
{
assert(object->temporary);
assert((uint32_t) (object->vo_size/PAGE_SIZE) ==
(object->vo_size/PAGE_SIZE));
(void) compressor_memory_object_create(
(memory_object_size_t) object->vo_size,
&pager);
if (pager == NULL) {
panic("vm_object_compressor_pager_create(): "
"no pager for object %p size 0x%llx\n",
object, (uint64_t) object->vo_size);
}
}
entry = vm_object_hash_entry_alloc(pager);
lck = vm_object_hash_lock_spin(pager);
vm_object_hash_insert(entry, object);
vm_object_hash_unlock(lck);
if (vm_object_enter(pager, object->vo_size, TRUE, TRUE, FALSE) != object)
panic("vm_object_compressor_pager_create: mismatch");
memory_object_deallocate(pager);
vm_object_lock(object);
vm_object_paging_end(object);
}
__private_extern__ void
vm_object_remove(
vm_object_t object)
{
memory_object_t pager;
if ((pager = object->pager) != MEMORY_OBJECT_NULL) {
vm_object_hash_entry_t entry;
entry = vm_object_hash_lookup(pager, FALSE);
if (entry != VM_OBJECT_HASH_ENTRY_NULL)
entry->object = VM_OBJECT_NULL;
}
}
static long object_collapses = 0;
static long object_bypasses = 0;
static boolean_t vm_object_collapse_allowed = TRUE;
static boolean_t vm_object_bypass_allowed = TRUE;
#if MACH_PAGEMAP
static int vm_external_discarded;
static int vm_external_collapsed;
#endif
unsigned long vm_object_collapse_encrypted = 0;
static void
vm_object_do_collapse(
vm_object_t object,
vm_object_t backing_object)
{
vm_page_t p, pp;
vm_object_offset_t new_offset, backing_offset;
vm_object_size_t size;
vm_object_lock_assert_exclusive(object);
vm_object_lock_assert_exclusive(backing_object);
backing_offset = object->vo_shadow_offset;
size = object->vo_size;
while (!queue_empty(&backing_object->memq)) {
p = (vm_page_t) queue_first(&backing_object->memq);
new_offset = (p->offset - backing_offset);
assert(!p->busy || p->absent);
if (p->offset < backing_offset || new_offset >= size) {
VM_PAGE_FREE(p);
} else {
if (p->encrypted) {
vm_object_collapse_encrypted++;
}
pp = vm_page_lookup(object, new_offset);
if (pp == VM_PAGE_NULL) {
vm_page_rename(p, object, new_offset, TRUE);
#if MACH_PAGEMAP
} else if (pp->absent) {
VM_PAGE_FREE(pp);
vm_page_rename(p, object, new_offset, TRUE);
#endif
} else {
assert(! pp->absent);
VM_PAGE_FREE(p);
}
}
}
#if !MACH_PAGEMAP
assert((!object->pager_created && (object->pager == MEMORY_OBJECT_NULL))
|| (!backing_object->pager_created
&& (backing_object->pager == MEMORY_OBJECT_NULL)));
#else
assert(!object->pager_created && object->pager == MEMORY_OBJECT_NULL);
#endif
if (backing_object->pager != MEMORY_OBJECT_NULL) {
vm_object_hash_entry_t entry;
#if 00
if (COMPRESSED_PAGER_IS_ACTIVE) {
panic("vm_object_do_collapse(%p,%p): "
"backing_object has a compressor pager",
object, backing_object);
}
#endif
assert(!object->paging_in_progress);
assert(!object->activity_in_progress);
object->pager = backing_object->pager;
if (backing_object->hashed) {
lck_mtx_t *lck;
lck = vm_object_hash_lock_spin(backing_object->pager);
entry = vm_object_hash_lookup(object->pager, FALSE);
assert(entry != VM_OBJECT_HASH_ENTRY_NULL);
entry->object = object;
vm_object_hash_unlock(lck);
object->hashed = TRUE;
}
object->pager_created = backing_object->pager_created;
object->pager_control = backing_object->pager_control;
object->pager_ready = backing_object->pager_ready;
object->pager_initialized = backing_object->pager_initialized;
object->paging_offset =
backing_object->paging_offset + backing_offset;
if (object->pager_control != MEMORY_OBJECT_CONTROL_NULL) {
memory_object_control_collapse(object->pager_control,
object);
}
}
#if MACH_PAGEMAP
assert(object->existence_map == VM_EXTERNAL_NULL);
if (backing_offset || (size != backing_object->vo_size)) {
vm_external_discarded++;
vm_external_destroy(backing_object->existence_map,
backing_object->vo_size);
}
else {
vm_external_collapsed++;
object->existence_map = backing_object->existence_map;
}
backing_object->existence_map = VM_EXTERNAL_NULL;
#endif
assert(!object->phys_contiguous);
assert(!backing_object->phys_contiguous);
object->shadow = backing_object->shadow;
if (object->shadow) {
object->vo_shadow_offset += backing_object->vo_shadow_offset;
} else {
object->vo_shadow_offset = 0;
}
assert((object->shadow == VM_OBJECT_NULL) ||
(object->shadow->copy != backing_object));
assert((backing_object->ref_count == 1) &&
(backing_object->resident_page_count == 0) &&
(backing_object->paging_in_progress == 0) &&
(backing_object->activity_in_progress == 0));
backing_object->alive = FALSE;
vm_object_unlock(backing_object);
XPR(XPR_VM_OBJECT, "vm_object_collapse, collapsed 0x%X\n",
backing_object, 0,0,0,0);
vm_object_lock_destroy(backing_object);
zfree(vm_object_zone, backing_object);
object_collapses++;
}
static void
vm_object_do_bypass(
vm_object_t object,
vm_object_t backing_object)
{
vm_object_lock_assert_exclusive(object);
vm_object_lock_assert_exclusive(backing_object);
#if TASK_SWAPPER
if (backing_object->shadow != VM_OBJECT_NULL) {
vm_object_lock(backing_object->shadow);
vm_object_lock_assert_exclusive(backing_object->shadow);
backing_object->shadow->ref_count++;
if (object->res_count != 0)
vm_object_res_reference(backing_object->shadow);
vm_object_unlock(backing_object->shadow);
}
#else
vm_object_reference(backing_object->shadow);
#endif
assert(!object->phys_contiguous);
assert(!backing_object->phys_contiguous);
object->shadow = backing_object->shadow;
if (object->shadow) {
object->vo_shadow_offset += backing_object->vo_shadow_offset;
} else {
object->vo_shadow_offset = 0;
}
if (backing_object->copy == object) {
backing_object->copy = VM_OBJECT_NULL;
}
if (backing_object->ref_count > 2 ||
(!backing_object->named && backing_object->ref_count > 1)) {
vm_object_lock_assert_exclusive(backing_object);
backing_object->ref_count--;
#if TASK_SWAPPER
if (object->res_count != 0)
vm_object_res_deallocate(backing_object);
assert(backing_object->ref_count > 0);
#endif
vm_object_unlock(backing_object);
} else {
#if TASK_SWAPPER
if (object->res_count == 0) {
vm_object_res_reference(backing_object);
}
#endif
vm_object_activity_begin(object);
vm_object_unlock(object);
vm_object_unlock(backing_object);
vm_object_deallocate(backing_object);
vm_object_lock(object);
vm_object_activity_end(object);
}
object_bypasses++;
}
static unsigned long vm_object_collapse_calls = 0;
static unsigned long vm_object_collapse_objects = 0;
static unsigned long vm_object_collapse_do_collapse = 0;
static unsigned long vm_object_collapse_do_bypass = 0;
__private_extern__ void
vm_object_collapse(
register vm_object_t object,
register vm_object_offset_t hint_offset,
boolean_t can_bypass)
{
register vm_object_t backing_object;
register unsigned int rcount;
register unsigned int size;
vm_object_t original_object;
int object_lock_type;
int backing_object_lock_type;
vm_object_collapse_calls++;
if (! vm_object_collapse_allowed &&
! (can_bypass && vm_object_bypass_allowed)) {
return;
}
XPR(XPR_VM_OBJECT, "vm_object_collapse, obj 0x%X\n",
object, 0,0,0,0);
if (object == VM_OBJECT_NULL)
return;
original_object = object;
object_lock_type = OBJECT_LOCK_EXCLUSIVE;
backing_object_lock_type = OBJECT_LOCK_SHARED;
retry:
object = original_object;
vm_object_lock_assert_exclusive(object);
while (TRUE) {
vm_object_collapse_objects++;
backing_object = object->shadow;
if (backing_object == VM_OBJECT_NULL) {
if (object != original_object) {
vm_object_unlock(object);
}
return;
}
if (backing_object_lock_type == OBJECT_LOCK_SHARED) {
vm_object_lock_shared(backing_object);
} else {
vm_object_lock(backing_object);
}
if (object->paging_in_progress != 0 ||
object->activity_in_progress != 0) {
if (object != original_object) {
vm_object_unlock(object);
}
object = backing_object;
object_lock_type = backing_object_lock_type;
continue;
}
if (!backing_object->internal ||
backing_object->paging_in_progress != 0 ||
backing_object->activity_in_progress != 0) {
if (object != original_object) {
vm_object_unlock(object);
}
object = backing_object;
object_lock_type = backing_object_lock_type;
continue;
}
if (backing_object->shadow != VM_OBJECT_NULL &&
backing_object->shadow->copy == backing_object) {
if (object != original_object) {
vm_object_unlock(object);
}
object = backing_object;
object_lock_type = backing_object_lock_type;
continue;
}
if (backing_object->ref_count == 1 &&
(!object->pager_created
#if !MACH_PAGEMAP
|| (!backing_object->pager_created)
#endif
) && vm_object_collapse_allowed) {
if (backing_object_lock_type != OBJECT_LOCK_EXCLUSIVE) {
vm_object_unlock(backing_object);
if (object != original_object)
vm_object_unlock(object);
object_lock_type = OBJECT_LOCK_EXCLUSIVE;
backing_object_lock_type = OBJECT_LOCK_EXCLUSIVE;
goto retry;
}
XPR(XPR_VM_OBJECT,
"vm_object_collapse: %x to %x, pager %x, pager_control %x\n",
backing_object, object,
backing_object->pager,
backing_object->pager_control, 0);
vm_object_do_collapse(object, backing_object);
vm_object_collapse_do_collapse++;
continue;
}
if (! (can_bypass && vm_object_bypass_allowed)) {
if (object != original_object) {
vm_object_unlock(object);
}
object = backing_object;
object_lock_type = backing_object_lock_type;
continue;
}
size = (unsigned int)atop(object->vo_size);
rcount = object->resident_page_count;
if (rcount != size) {
vm_object_offset_t offset;
vm_object_offset_t backing_offset;
unsigned int backing_rcount;
if (backing_object->pager_created
#if MACH_PAGEMAP
&& (backing_object->existence_map == VM_EXTERNAL_NULL)
#endif
) {
if (object != original_object) {
vm_object_unlock(object);
}
object = backing_object;
object_lock_type = backing_object_lock_type;
continue;
}
if (object->pager_created
#if MACH_PAGEMAP
&& (object->existence_map == VM_EXTERNAL_NULL)
#endif
) {
if (object != original_object) {
vm_object_unlock(object);
}
object = backing_object;
object_lock_type = backing_object_lock_type;
continue;
}
backing_offset = object->vo_shadow_offset;
backing_rcount = backing_object->resident_page_count;
if ( (int)backing_rcount - (int)(atop(backing_object->vo_size) - size) > (int)rcount) {
if (object != original_object) {
vm_object_unlock(object);
}
object = backing_object;
object_lock_type = backing_object_lock_type;
continue;
}
#if MACH_PAGEMAP
#define EXISTS_IN_OBJECT(obj, off, rc) \
((vm_external_state_get((obj)->existence_map, \
(vm_offset_t)(off)) \
== VM_EXTERNAL_STATE_EXISTS) || \
(VM_COMPRESSOR_PAGER_STATE_GET((obj), (off)) \
== VM_EXTERNAL_STATE_EXISTS) || \
((rc) && vm_page_lookup((obj), (off)) != VM_PAGE_NULL && (rc)--))
#else
#define EXISTS_IN_OBJECT(obj, off, rc) \
((VM_COMPRESSOR_PAGER_STATE_GET((obj), (off)) \
== VM_EXTERNAL_STATE_EXISTS) || \
((rc) && vm_page_lookup((obj), (off)) != VM_PAGE_NULL && (rc)--))
#endif
if (object->cow_hint != ~(vm_offset_t)0)
hint_offset = (vm_object_offset_t)object->cow_hint;
else
hint_offset = (hint_offset > 8 * PAGE_SIZE_64) ?
(hint_offset - 8 * PAGE_SIZE_64) : 0;
if (EXISTS_IN_OBJECT(backing_object, hint_offset +
backing_offset, backing_rcount) &&
!EXISTS_IN_OBJECT(object, hint_offset, rcount)) {
object->cow_hint = (vm_offset_t) hint_offset;
if (object != original_object) {
vm_object_unlock(object);
}
object = backing_object;
object_lock_type = backing_object_lock_type;
continue;
}
if (backing_rcount && backing_rcount < (size / 8)) {
unsigned int rc = rcount;
vm_page_t p;
backing_rcount = backing_object->resident_page_count;
p = (vm_page_t)queue_first(&backing_object->memq);
do {
offset = (p->offset - backing_offset);
if (offset < object->vo_size &&
offset != hint_offset &&
!EXISTS_IN_OBJECT(object, offset, rc)) {
object->cow_hint = (vm_offset_t) offset;
break;
}
p = (vm_page_t) queue_next(&p->listq);
} while (--backing_rcount);
if (backing_rcount != 0 ) {
if (object != original_object) {
vm_object_unlock(object);
}
object = backing_object;
object_lock_type = backing_object_lock_type;
continue;
}
}
if (backing_rcount
#if MACH_PAGEMAP
|| backing_object->existence_map
#endif
) {
offset = hint_offset;
while((offset =
(offset + PAGE_SIZE_64 < object->vo_size) ?
(offset + PAGE_SIZE_64) : 0) != hint_offset) {
if (EXISTS_IN_OBJECT(backing_object, offset +
backing_offset, backing_rcount) &&
!EXISTS_IN_OBJECT(object, offset, rcount)) {
object->cow_hint = (vm_offset_t) offset;
break;
}
}
if (offset != hint_offset) {
if (object != original_object) {
vm_object_unlock(object);
}
object = backing_object;
object_lock_type = backing_object_lock_type;
continue;
}
}
}
if (backing_object_lock_type != OBJECT_LOCK_EXCLUSIVE) {
vm_object_unlock(backing_object);
if (object != original_object)
vm_object_unlock(object);
object_lock_type = OBJECT_LOCK_EXCLUSIVE;
backing_object_lock_type = OBJECT_LOCK_EXCLUSIVE;
goto retry;
}
object->cow_hint = (vm_offset_t)0;
vm_object_do_bypass(object, backing_object);
vm_object_collapse_do_bypass++;
continue;
}
if (object != original_object) {
vm_object_unlock(object);
}
}
unsigned int vm_object_page_remove_lookup = 0;
unsigned int vm_object_page_remove_iterate = 0;
__private_extern__ void
vm_object_page_remove(
register vm_object_t object,
register vm_object_offset_t start,
register vm_object_offset_t end)
{
register vm_page_t p, next;
if (atop_64(end - start) < (unsigned)object->resident_page_count/16) {
vm_object_page_remove_lookup++;
for (; start < end; start += PAGE_SIZE_64) {
p = vm_page_lookup(object, start);
if (p != VM_PAGE_NULL) {
assert(!p->cleaning && !p->pageout && !p->laundry);
if (!p->fictitious && p->pmapped)
pmap_disconnect(p->phys_page);
VM_PAGE_FREE(p);
}
}
} else {
vm_object_page_remove_iterate++;
p = (vm_page_t) queue_first(&object->memq);
while (!queue_end(&object->memq, (queue_entry_t) p)) {
next = (vm_page_t) queue_next(&p->listq);
if ((start <= p->offset) && (p->offset < end)) {
assert(!p->cleaning && !p->pageout && !p->laundry);
if (!p->fictitious && p->pmapped)
pmap_disconnect(p->phys_page);
VM_PAGE_FREE(p);
}
p = next;
}
}
}
static int vm_object_coalesce_count = 0;
__private_extern__ boolean_t
vm_object_coalesce(
register vm_object_t prev_object,
vm_object_t next_object,
vm_object_offset_t prev_offset,
__unused vm_object_offset_t next_offset,
vm_object_size_t prev_size,
vm_object_size_t next_size)
{
vm_object_size_t newsize;
#ifdef lint
next_offset++;
#endif
if (next_object != VM_OBJECT_NULL) {
return(FALSE);
}
if (prev_object == VM_OBJECT_NULL) {
return(TRUE);
}
XPR(XPR_VM_OBJECT,
"vm_object_coalesce: 0x%X prev_off 0x%X prev_size 0x%X next_size 0x%X\n",
prev_object, prev_offset, prev_size, next_size, 0);
vm_object_lock(prev_object);
vm_object_collapse(prev_object, prev_offset, TRUE);
if ((prev_object->ref_count > 1) ||
prev_object->pager_created ||
(prev_object->shadow != VM_OBJECT_NULL) ||
(prev_object->copy != VM_OBJECT_NULL) ||
(prev_object->true_share != FALSE) ||
(prev_object->purgable != VM_PURGABLE_DENY) ||
(prev_object->paging_in_progress != 0) ||
(prev_object->activity_in_progress != 0)) {
vm_object_unlock(prev_object);
return(FALSE);
}
vm_object_coalesce_count++;
vm_object_page_remove(prev_object,
prev_offset + prev_size,
prev_offset + prev_size + next_size);
newsize = prev_offset + prev_size + next_size;
if (newsize > prev_object->vo_size) {
#if MACH_PAGEMAP
assert(prev_object->existence_map == VM_EXTERNAL_NULL);
#endif
prev_object->vo_size = newsize;
}
vm_object_unlock(prev_object);
return(TRUE);
}
void
vm_object_page_map(
vm_object_t object,
vm_object_offset_t offset,
vm_object_size_t size,
vm_object_offset_t (*map_fn)(void *map_fn_data,
vm_object_offset_t offset),
void *map_fn_data)
{
int64_t num_pages;
int i;
vm_page_t m;
vm_page_t old_page;
vm_object_offset_t addr;
num_pages = atop_64(size);
for (i = 0; i < num_pages; i++, offset += PAGE_SIZE_64) {
addr = (*map_fn)(map_fn_data, offset);
while ((m = vm_page_grab_fictitious()) == VM_PAGE_NULL)
vm_page_more_fictitious();
vm_object_lock(object);
if ((old_page = vm_page_lookup(object, offset))
!= VM_PAGE_NULL)
{
VM_PAGE_FREE(old_page);
}
assert((ppnum_t) addr == addr);
vm_page_init(m, (ppnum_t) addr, FALSE);
m->private = TRUE;
m->wire_count = 1;
vm_page_insert(m, object, offset);
PAGE_WAKEUP_DONE(m);
vm_object_unlock(object);
}
}
kern_return_t
vm_object_populate_with_private(
vm_object_t object,
vm_object_offset_t offset,
ppnum_t phys_page,
vm_size_t size)
{
ppnum_t base_page;
vm_object_offset_t base_offset;
if (!object->private)
return KERN_FAILURE;
base_page = phys_page;
vm_object_lock(object);
if (!object->phys_contiguous) {
vm_page_t m;
if ((base_offset = trunc_page_64(offset)) != offset) {
vm_object_unlock(object);
return KERN_FAILURE;
}
base_offset += object->paging_offset;
while (size) {
m = vm_page_lookup(object, base_offset);
if (m != VM_PAGE_NULL) {
if (m->fictitious) {
if (m->phys_page != vm_page_guard_addr) {
vm_page_lockspin_queues();
m->private = TRUE;
vm_page_unlock_queues();
m->fictitious = FALSE;
m->phys_page = base_page;
}
} else if (m->phys_page != base_page) {
if ( !m->private) {
panic("vm_object_populate_with_private - %p not private", m);
}
if (m->pmapped) {
pmap_disconnect(m->phys_page);
}
m->phys_page = base_page;
}
if (m->encrypted) {
panic("vm_object_populate_with_private - %p encrypted", m);
}
} else {
while ((m = vm_page_grab_fictitious()) == VM_PAGE_NULL)
vm_page_more_fictitious();
m->private = TRUE;
m->fictitious = FALSE;
m->phys_page = base_page;
m->unusual = TRUE;
m->busy = FALSE;
vm_page_insert(m, object, base_offset);
}
base_page++;
base_offset += PAGE_SIZE;
size -= PAGE_SIZE;
}
} else {
object->vo_shadow_offset = (vm_object_offset_t)phys_page << PAGE_SHIFT;
object->vo_size = size;
}
vm_object_unlock(object);
return KERN_SUCCESS;
}
__private_extern__ kern_return_t
memory_object_free_from_cache(
__unused host_t host,
__unused memory_object_pager_ops_t pager_ops,
int *count)
{
#if VM_OBJECT_CACHE
int object_released = 0;
register vm_object_t object = VM_OBJECT_NULL;
vm_object_t shadow;
try_again:
vm_object_cache_lock();
queue_iterate(&vm_object_cached_list, object,
vm_object_t, cached_list) {
if (object->pager &&
(pager_ops == object->pager->mo_pager_ops)) {
vm_object_lock(object);
queue_remove(&vm_object_cached_list, object,
vm_object_t, cached_list);
vm_object_cached_count--;
vm_object_cache_unlock();
assert(object->pager_initialized);
assert(object->ref_count == 0);
vm_object_lock_assert_exclusive(object);
object->ref_count++;
shadow = object->pageout?VM_OBJECT_NULL:object->shadow;
if ((vm_object_terminate(object) == KERN_SUCCESS)
&& (shadow != VM_OBJECT_NULL)) {
vm_object_deallocate(shadow);
}
if(object_released++ == *count)
return KERN_SUCCESS;
goto try_again;
}
}
vm_object_cache_unlock();
*count = object_released;
#else
*count = 0;
#endif
return KERN_SUCCESS;
}
kern_return_t
memory_object_create_named(
memory_object_t pager,
memory_object_offset_t size,
memory_object_control_t *control)
{
vm_object_t object;
vm_object_hash_entry_t entry;
lck_mtx_t *lck;
*control = MEMORY_OBJECT_CONTROL_NULL;
if (pager == MEMORY_OBJECT_NULL)
return KERN_INVALID_ARGUMENT;
lck = vm_object_hash_lock_spin(pager);
entry = vm_object_hash_lookup(pager, FALSE);
if ((entry != VM_OBJECT_HASH_ENTRY_NULL) &&
(entry->object != VM_OBJECT_NULL)) {
if (entry->object->named == TRUE)
panic("memory_object_create_named: caller already holds the right"); }
vm_object_hash_unlock(lck);
if ((object = vm_object_enter(pager, size, FALSE, FALSE, TRUE)) == VM_OBJECT_NULL) {
return(KERN_INVALID_OBJECT);
}
if (object != VM_OBJECT_NULL) {
vm_object_lock(object);
object->named = TRUE;
while (!object->pager_ready) {
vm_object_sleep(object,
VM_OBJECT_EVENT_PAGER_READY,
THREAD_UNINT);
}
*control = object->pager_control;
vm_object_unlock(object);
}
return (KERN_SUCCESS);
}
kern_return_t
memory_object_recover_named(
memory_object_control_t control,
boolean_t wait_on_terminating)
{
vm_object_t object;
object = memory_object_control_to_vm_object(control);
if (object == VM_OBJECT_NULL) {
return (KERN_INVALID_ARGUMENT);
}
restart:
vm_object_lock(object);
if (object->terminating && wait_on_terminating) {
vm_object_wait(object,
VM_OBJECT_EVENT_PAGING_IN_PROGRESS,
THREAD_UNINT);
goto restart;
}
if (!object->alive) {
vm_object_unlock(object);
return KERN_FAILURE;
}
if (object->named == TRUE) {
vm_object_unlock(object);
return KERN_SUCCESS;
}
#if VM_OBJECT_CACHE
if ((object->ref_count == 0) && (!object->terminating)) {
if (!vm_object_cache_lock_try()) {
vm_object_unlock(object);
goto restart;
}
queue_remove(&vm_object_cached_list, object,
vm_object_t, cached_list);
vm_object_cached_count--;
XPR(XPR_VM_OBJECT_CACHE,
"memory_object_recover_named: removing %X, head (%X, %X)\n",
object,
vm_object_cached_list.next,
vm_object_cached_list.prev, 0,0);
vm_object_cache_unlock();
}
#endif
object->named = TRUE;
vm_object_lock_assert_exclusive(object);
object->ref_count++;
vm_object_res_reference(object);
while (!object->pager_ready) {
vm_object_sleep(object,
VM_OBJECT_EVENT_PAGER_READY,
THREAD_UNINT);
}
vm_object_unlock(object);
return (KERN_SUCCESS);
}
__private_extern__ kern_return_t
vm_object_release_name(
vm_object_t object,
int flags)
{
vm_object_t shadow;
boolean_t original_object = TRUE;
while (object != VM_OBJECT_NULL) {
vm_object_lock(object);
assert(object->alive);
if (original_object)
assert(object->named);
assert(object->ref_count > 0);
if (object->pager_created && !object->pager_initialized) {
assert(!object->can_persist);
vm_object_assert_wait(object,
VM_OBJECT_EVENT_INITIALIZED,
THREAD_UNINT);
vm_object_unlock(object);
thread_block(THREAD_CONTINUE_NULL);
continue;
}
if (((object->ref_count > 1)
&& (flags & MEMORY_OBJECT_TERMINATE_IDLE))
|| (object->terminating)) {
vm_object_unlock(object);
return KERN_FAILURE;
} else {
if (flags & MEMORY_OBJECT_RELEASE_NO_OP) {
vm_object_unlock(object);
return KERN_SUCCESS;
}
}
if ((flags & MEMORY_OBJECT_RESPECT_CACHE) &&
(object->ref_count == 1)) {
if (original_object)
object->named = FALSE;
vm_object_unlock(object);
vm_object_deallocate(object);
return KERN_SUCCESS;
}
VM_OBJ_RES_DECR(object);
shadow = object->pageout?VM_OBJECT_NULL:object->shadow;
if (object->ref_count == 1) {
if (vm_object_terminate(object) != KERN_SUCCESS) {
if (original_object) {
return KERN_FAILURE;
} else {
return KERN_SUCCESS;
}
}
if (shadow != VM_OBJECT_NULL) {
original_object = FALSE;
object = shadow;
continue;
}
return KERN_SUCCESS;
} else {
vm_object_lock_assert_exclusive(object);
object->ref_count--;
assert(object->ref_count > 0);
if(original_object)
object->named = FALSE;
vm_object_unlock(object);
return KERN_SUCCESS;
}
}
assert(0);
return KERN_FAILURE;
}
__private_extern__ kern_return_t
vm_object_lock_request(
vm_object_t object,
vm_object_offset_t offset,
vm_object_size_t size,
memory_object_return_t should_return,
int flags,
vm_prot_t prot)
{
__unused boolean_t should_flush;
should_flush = flags & MEMORY_OBJECT_DATA_FLUSH;
XPR(XPR_MEMORY_OBJECT,
"vm_o_lock_request, obj 0x%X off 0x%X size 0x%X flags %X prot %X\n",
object, offset, size,
(((should_return&1)<<1)|should_flush), prot);
if (object == VM_OBJECT_NULL)
return (KERN_INVALID_ARGUMENT);
if ((prot & ~VM_PROT_ALL) != 0 && prot != VM_PROT_NO_CHANGE)
return (KERN_INVALID_ARGUMENT);
size = round_page_64(size);
vm_object_lock(object);
vm_object_paging_begin(object);
(void)vm_object_update(object,
offset, size, NULL, NULL, should_return, flags, prot);
vm_object_paging_end(object);
vm_object_unlock(object);
return (KERN_SUCCESS);
}
void
vm_object_purge(vm_object_t object)
{
vm_object_lock_assert_exclusive(object);
if (object->purgable == VM_PURGABLE_DENY)
return;
assert(object->copy == VM_OBJECT_NULL);
assert(object->copy_strategy == MEMORY_OBJECT_COPY_NONE);
if(object->purgable == VM_PURGABLE_VOLATILE) {
unsigned int delta;
assert(object->resident_page_count >=
object->wired_page_count);
delta = (object->resident_page_count -
object->wired_page_count);
if (delta != 0) {
assert(vm_page_purgeable_count >=
delta);
OSAddAtomic(-delta,
(SInt32 *)&vm_page_purgeable_count);
}
if (object->wired_page_count != 0) {
assert(vm_page_purgeable_wired_count >=
object->wired_page_count);
OSAddAtomic(-object->wired_page_count,
(SInt32 *)&vm_page_purgeable_wired_count);
}
}
object->purgable = VM_PURGABLE_EMPTY;
vm_object_reap_pages(object, REAP_PURGEABLE);
}
kern_return_t
vm_object_purgable_control(
vm_object_t object,
vm_purgable_t control,
int *state)
{
int old_state;
int new_state;
if (object == VM_OBJECT_NULL) {
return KERN_INVALID_ARGUMENT;
}
old_state = object->purgable;
if (old_state == VM_PURGABLE_DENY)
return KERN_INVALID_ARGUMENT;
assert(object->copy == VM_OBJECT_NULL);
assert(object->copy_strategy == MEMORY_OBJECT_COPY_NONE);
if (control == VM_PURGABLE_GET_STATE) {
*state = old_state;
return KERN_SUCCESS;
}
if ((*state) & VM_PURGABLE_DEBUG_EMPTY) {
object->volatile_empty = TRUE;
}
if ((*state) & VM_PURGABLE_DEBUG_FAULT) {
object->volatile_fault = TRUE;
}
new_state = *state & VM_PURGABLE_STATE_MASK;
if (new_state == VM_PURGABLE_VOLATILE &&
object->volatile_empty) {
new_state = VM_PURGABLE_EMPTY;
}
switch (new_state) {
case VM_PURGABLE_DENY:
case VM_PURGABLE_NONVOLATILE:
object->purgable = new_state;
if (old_state == VM_PURGABLE_VOLATILE) {
unsigned int delta;
assert(object->resident_page_count >=
object->wired_page_count);
delta = (object->resident_page_count -
object->wired_page_count);
assert(vm_page_purgeable_count >= delta);
if (delta != 0) {
OSAddAtomic(-delta,
(SInt32 *)&vm_page_purgeable_count);
}
if (object->wired_page_count != 0) {
assert(vm_page_purgeable_wired_count >=
object->wired_page_count);
OSAddAtomic(-object->wired_page_count,
(SInt32 *)&vm_page_purgeable_wired_count);
}
vm_page_lock_queues();
assert(object->objq.next != NULL && object->objq.prev != NULL);
purgeable_q_t queue = vm_purgeable_object_remove(object);
assert(queue);
if (object->purgeable_when_ripe) {
vm_purgeable_token_delete_last(queue);
}
assert(queue->debug_count_objects>=0);
vm_page_unlock_queues();
}
break;
case VM_PURGABLE_VOLATILE:
if (object->volatile_fault) {
vm_page_t p;
int refmod;
queue_iterate(&object->memq, p, vm_page_t, listq) {
if (p->busy ||
VM_PAGE_WIRED(p) ||
p->fictitious) {
continue;
}
refmod = pmap_disconnect(p->phys_page);
if ((refmod & VM_MEM_MODIFIED) &&
!p->dirty) {
SET_PAGE_DIRTY(p, FALSE);
}
}
}
if (old_state == VM_PURGABLE_EMPTY &&
object->resident_page_count == 0)
break;
purgeable_q_t queue;
if ((*state&VM_PURGABLE_ORDERING_MASK) == VM_PURGABLE_ORDERING_OBSOLETE)
queue = &purgeable_queues[PURGEABLE_Q_TYPE_OBSOLETE];
else {
if ((*state&VM_PURGABLE_BEHAVIOR_MASK) == VM_PURGABLE_BEHAVIOR_FIFO)
queue = &purgeable_queues[PURGEABLE_Q_TYPE_FIFO];
else
queue = &purgeable_queues[PURGEABLE_Q_TYPE_LIFO];
}
if (old_state == VM_PURGABLE_NONVOLATILE ||
old_state == VM_PURGABLE_EMPTY) {
unsigned int delta;
if ((*state & VM_PURGABLE_NO_AGING_MASK) ==
VM_PURGABLE_NO_AGING) {
object->purgeable_when_ripe = FALSE;
} else {
object->purgeable_when_ripe = TRUE;
}
if (object->purgeable_when_ripe) {
kern_return_t result;
vm_page_lock_queues();
result = vm_purgeable_token_add(queue);
if (result != KERN_SUCCESS) {
vm_page_unlock_queues();
return result;
}
vm_page_unlock_queues();
}
assert(object->resident_page_count >=
object->wired_page_count);
delta = (object->resident_page_count -
object->wired_page_count);
if (delta != 0) {
OSAddAtomic(delta,
&vm_page_purgeable_count);
}
if (object->wired_page_count != 0) {
OSAddAtomic(object->wired_page_count,
&vm_page_purgeable_wired_count);
}
object->purgable = new_state;
assert(object->objq.next == NULL && object->objq.prev == NULL);
}
else if (old_state == VM_PURGABLE_VOLATILE) {
purgeable_q_t old_queue;
boolean_t purgeable_when_ripe;
assert(object->objq.next != NULL && object->objq.prev != NULL);
old_queue = vm_purgeable_object_remove(object);
assert(old_queue);
if ((*state & VM_PURGABLE_NO_AGING_MASK) ==
VM_PURGABLE_NO_AGING) {
purgeable_when_ripe = FALSE;
} else {
purgeable_when_ripe = TRUE;
}
if (old_queue != queue ||
(purgeable_when_ripe !=
object->purgeable_when_ripe)) {
kern_return_t result;
vm_page_lock_queues();
if (object->purgeable_when_ripe) {
vm_purgeable_token_delete_last(old_queue);
}
object->purgeable_when_ripe = purgeable_when_ripe;
if (object->purgeable_when_ripe) {
result = vm_purgeable_token_add(queue);
assert(result==KERN_SUCCESS);
}
vm_page_unlock_queues();
}
};
vm_purgeable_object_add(object, queue, (*state&VM_VOLATILE_GROUP_MASK)>>VM_VOLATILE_GROUP_SHIFT );
assert(queue->debug_count_objects>=0);
break;
case VM_PURGABLE_EMPTY:
if (object->volatile_fault) {
vm_page_t p;
int refmod;
queue_iterate(&object->memq, p, vm_page_t, listq) {
if (p->busy ||
VM_PAGE_WIRED(p) ||
p->fictitious) {
continue;
}
refmod = pmap_disconnect(p->phys_page);
if ((refmod & VM_MEM_MODIFIED) &&
!p->dirty) {
SET_PAGE_DIRTY(p, FALSE);
}
}
}
if (old_state != new_state) {
assert(old_state == VM_PURGABLE_NONVOLATILE ||
old_state == VM_PURGABLE_VOLATILE);
if (old_state == VM_PURGABLE_VOLATILE) {
purgeable_q_t old_queue;
assert(object->objq.next != NULL &&
object->objq.prev != NULL);
old_queue = vm_purgeable_object_remove(object);
assert(old_queue);
if (object->purgeable_when_ripe) {
vm_page_lock_queues();
vm_purgeable_token_delete_first(old_queue);
vm_page_unlock_queues();
}
}
(void) vm_object_purge(object);
}
break;
}
*state = old_state;
return KERN_SUCCESS;
}
kern_return_t
vm_object_get_page_counts(
vm_object_t object,
vm_object_offset_t offset,
vm_object_size_t size,
unsigned int *resident_page_count,
unsigned int *dirty_page_count)
{
kern_return_t kr = KERN_SUCCESS;
boolean_t count_dirty_pages = FALSE;
vm_page_t p = VM_PAGE_NULL;
unsigned int local_resident_count = 0;
unsigned int local_dirty_count = 0;
vm_object_offset_t cur_offset = 0;
vm_object_offset_t end_offset = 0;
if (object == VM_OBJECT_NULL)
return KERN_INVALID_ARGUMENT;
cur_offset = offset;
end_offset = offset + size;
vm_object_lock_assert_exclusive(object);
if (dirty_page_count != NULL) {
count_dirty_pages = TRUE;
}
if (resident_page_count != NULL && count_dirty_pages == FALSE) {
if (offset == 0 && (object->vo_size == size)) {
*resident_page_count = object->resident_page_count;
goto out;
}
}
if (object->resident_page_count <= (size >> PAGE_SHIFT)) {
queue_iterate(&object->memq, p, vm_page_t, listq) {
if (p->offset >= cur_offset && p->offset < end_offset) {
local_resident_count++;
if (count_dirty_pages) {
if (p->dirty || (p->wpmapped && pmap_is_modified(p->phys_page))) {
local_dirty_count++;
}
}
}
}
} else {
for (cur_offset = offset; cur_offset < end_offset; cur_offset += PAGE_SIZE_64) {
p = vm_page_lookup(object, cur_offset);
if (p != VM_PAGE_NULL) {
local_resident_count++;
if (count_dirty_pages) {
if (p->dirty || (p->wpmapped && pmap_is_modified(p->phys_page))) {
local_dirty_count++;
}
}
}
}
}
if (resident_page_count != NULL) {
*resident_page_count = local_resident_count;
}
if (dirty_page_count != NULL) {
*dirty_page_count = local_dirty_count;
}
out:
return kr;
}
#if TASK_SWAPPER
__private_extern__ void
vm_object_res_deallocate(
vm_object_t object)
{
vm_object_t orig_object = object;
assert(object->res_count > 0);
while (--object->res_count == 0) {
assert(object->ref_count >= object->res_count);
vm_object_deactivate_all_pages(object);
if (object->shadow != VM_OBJECT_NULL) {
vm_object_t tmp_object = object->shadow;
vm_object_lock(tmp_object);
if (object != orig_object)
vm_object_unlock(object);
object = tmp_object;
assert(object->res_count > 0);
} else
break;
}
if (object != orig_object)
vm_object_unlock(object);
}
__private_extern__ void
vm_object_res_reference(
vm_object_t object)
{
vm_object_t orig_object = object;
while ((++object->res_count == 1) &&
(object->shadow != VM_OBJECT_NULL)) {
vm_object_t tmp_object = object->shadow;
assert(object->ref_count >= object->res_count);
vm_object_lock(tmp_object);
if (object != orig_object)
vm_object_unlock(object);
object = tmp_object;
}
if (object != orig_object)
vm_object_unlock(object);
assert(orig_object->ref_count >= orig_object->res_count);
}
#endif
#ifdef vm_object_reference
#undef vm_object_reference
#endif
__private_extern__ void
vm_object_reference(
register vm_object_t object)
{
if (object == VM_OBJECT_NULL)
return;
vm_object_lock(object);
assert(object->ref_count > 0);
vm_object_reference_locked(object);
vm_object_unlock(object);
}
#ifdef MACH_BSD
kern_return_t
adjust_vm_object_cache(
__unused vm_size_t oval,
__unused vm_size_t nval)
{
#if VM_OBJECT_CACHE
vm_object_cached_max = nval;
vm_object_cache_trim(FALSE);
#endif
return (KERN_SUCCESS);
}
#endif
unsigned int vm_object_transpose_count = 0;
kern_return_t
vm_object_transpose(
vm_object_t object1,
vm_object_t object2,
vm_object_size_t transpose_size)
{
vm_object_t tmp_object;
kern_return_t retval;
boolean_t object1_locked, object2_locked;
vm_page_t page;
vm_object_offset_t page_offset;
lck_mtx_t *hash_lck;
vm_object_hash_entry_t hash_entry;
tmp_object = VM_OBJECT_NULL;
object1_locked = FALSE; object2_locked = FALSE;
if (object1 == object2 ||
object1 == VM_OBJECT_NULL ||
object2 == VM_OBJECT_NULL) {
retval = KERN_INVALID_VALUE;
goto done;
}
if (object1 > object2) {
tmp_object = object1;
object1 = object2;
object2 = tmp_object;
}
tmp_object = vm_object_allocate(transpose_size);
vm_object_lock(tmp_object);
tmp_object->can_persist = FALSE;
vm_object_lock(object1);
object1_locked = TRUE;
if (!object1->alive || object1->terminating ||
object1->copy || object1->shadow || object1->shadowed ||
object1->purgable != VM_PURGABLE_DENY) {
retval = KERN_INVALID_VALUE;
goto done;
}
vm_object_paging_only_wait(object1, THREAD_UNINT);
vm_object_lock(object2);
object2_locked = TRUE;
if (! object2->alive || object2->terminating ||
object2->copy || object2->shadow || object2->shadowed ||
object2->purgable != VM_PURGABLE_DENY) {
retval = KERN_INVALID_VALUE;
goto done;
}
vm_object_paging_only_wait(object2, THREAD_UNINT);
if (object1->vo_size != object2->vo_size ||
object1->vo_size != transpose_size) {
retval = KERN_INVALID_VALUE;
goto done;
}
if (object1->phys_contiguous || queue_empty(&object1->memq)) {
while (!queue_empty(&object2->memq)) {
page = (vm_page_t) queue_first(&object2->memq);
vm_page_rename(page, object1, page->offset, FALSE);
}
assert(queue_empty(&object2->memq));
} else if (object2->phys_contiguous || queue_empty(&object2->memq)) {
while (!queue_empty(&object1->memq)) {
page = (vm_page_t) queue_first(&object1->memq);
vm_page_rename(page, object2, page->offset, FALSE);
}
assert(queue_empty(&object1->memq));
} else {
while (!queue_empty(&object1->memq)) {
page = (vm_page_t) queue_first(&object1->memq);
page_offset = page->offset;
vm_page_remove(page, TRUE);
page->offset = page_offset;
queue_enter(&tmp_object->memq, page, vm_page_t, listq);
}
assert(queue_empty(&object1->memq));
while (!queue_empty(&object2->memq)) {
page = (vm_page_t) queue_first(&object2->memq);
vm_page_rename(page, object1, page->offset, FALSE);
}
assert(queue_empty(&object2->memq));
while (!queue_empty(&tmp_object->memq)) {
page = (vm_page_t) queue_first(&tmp_object->memq);
queue_remove(&tmp_object->memq, page,
vm_page_t, listq);
vm_page_insert(page, object2, page->offset);
}
assert(queue_empty(&tmp_object->memq));
}
#define __TRANSPOSE_FIELD(field) \
MACRO_BEGIN \
tmp_object->field = object1->field; \
object1->field = object2->field; \
object2->field = tmp_object->field; \
MACRO_END
assert(object1->vo_size == object2->vo_size);
#if TASK_SWAPPER
#endif
assert(!object1->copy);
assert(!object2->copy);
assert(!object1->shadow);
assert(!object2->shadow);
__TRANSPOSE_FIELD(vo_shadow_offset);
__TRANSPOSE_FIELD(pager);
__TRANSPOSE_FIELD(paging_offset);
__TRANSPOSE_FIELD(pager_control);
if (object1->pager_control != MEMORY_OBJECT_CONTROL_NULL) {
memory_object_control_collapse(object1->pager_control,
object1);
}
if (object2->pager_control != MEMORY_OBJECT_CONTROL_NULL) {
memory_object_control_collapse(object2->pager_control,
object2);
}
__TRANSPOSE_FIELD(copy_strategy);
assert(!object1->paging_in_progress);
assert(!object2->paging_in_progress);
assert(object1->activity_in_progress);
assert(object2->activity_in_progress);
__TRANSPOSE_FIELD(pager_created);
__TRANSPOSE_FIELD(pager_initialized);
__TRANSPOSE_FIELD(pager_ready);
__TRANSPOSE_FIELD(pager_trusted);
__TRANSPOSE_FIELD(can_persist);
__TRANSPOSE_FIELD(internal);
__TRANSPOSE_FIELD(temporary);
__TRANSPOSE_FIELD(private);
__TRANSPOSE_FIELD(pageout);
assert(object1->alive);
assert(object2->alive);
assert(object1->purgable == VM_PURGABLE_DENY);
assert(object2->purgable == VM_PURGABLE_DENY);
__TRANSPOSE_FIELD(purgeable_when_ripe);
__TRANSPOSE_FIELD(advisory_pageout);
__TRANSPOSE_FIELD(true_share);
assert(!object1->terminating);
assert(!object2->terminating);
__TRANSPOSE_FIELD(named);
__TRANSPOSE_FIELD(phys_contiguous);
__TRANSPOSE_FIELD(nophyscache);
object1->cached_list.next = (queue_entry_t) object2;
object2->cached_list.next = (queue_entry_t) object1;
assert(object1->cached_list.prev == NULL);
assert(object2->cached_list.prev == NULL);
assert(queue_empty(&object1->msr_q));
assert(queue_empty(&object2->msr_q));
__TRANSPOSE_FIELD(last_alloc);
__TRANSPOSE_FIELD(sequential);
__TRANSPOSE_FIELD(pages_created);
__TRANSPOSE_FIELD(pages_used);
__TRANSPOSE_FIELD(scan_collisions);
#if MACH_PAGEMAP
__TRANSPOSE_FIELD(existence_map);
#endif
__TRANSPOSE_FIELD(cow_hint);
#if MACH_ASSERT
__TRANSPOSE_FIELD(paging_object);
#endif
__TRANSPOSE_FIELD(wimg_bits);
__TRANSPOSE_FIELD(set_cache_attr);
__TRANSPOSE_FIELD(code_signed);
if (object1->hashed) {
hash_lck = vm_object_hash_lock_spin(object2->pager);
hash_entry = vm_object_hash_lookup(object2->pager, FALSE);
assert(hash_entry != VM_OBJECT_HASH_ENTRY_NULL);
hash_entry->object = object2;
vm_object_hash_unlock(hash_lck);
}
if (object2->hashed) {
hash_lck = vm_object_hash_lock_spin(object1->pager);
hash_entry = vm_object_hash_lookup(object1->pager, FALSE);
assert(hash_entry != VM_OBJECT_HASH_ENTRY_NULL);
hash_entry->object = object1;
vm_object_hash_unlock(hash_lck);
}
__TRANSPOSE_FIELD(hashed);
object1->transposed = TRUE;
object2->transposed = TRUE;
__TRANSPOSE_FIELD(mapping_in_progress);
__TRANSPOSE_FIELD(volatile_empty);
__TRANSPOSE_FIELD(volatile_fault);
__TRANSPOSE_FIELD(all_reusable);
assert(object1->blocked_access);
assert(object2->blocked_access);
assert(object1->__object2_unused_bits == 0);
assert(object2->__object2_unused_bits == 0);
#if UPL_DEBUG
#endif
assert(object1->objq.next == NULL);
assert(object1->objq.prev == NULL);
assert(object2->objq.next == NULL);
assert(object2->objq.prev == NULL);
#undef __TRANSPOSE_FIELD
retval = KERN_SUCCESS;
done:
if (tmp_object != VM_OBJECT_NULL) {
vm_object_unlock(tmp_object);
_vm_object_allocate(transpose_size, tmp_object);
vm_object_deallocate(tmp_object);
tmp_object = VM_OBJECT_NULL;
}
if (object1_locked) {
vm_object_unlock(object1);
object1_locked = FALSE;
}
if (object2_locked) {
vm_object_unlock(object2);
object2_locked = FALSE;
}
vm_object_transpose_count++;
return retval;
}
extern int speculative_reads_disabled;
extern int ignore_is_ssd;
unsigned int preheat_pages_max = MAX_UPL_TRANSFER;
unsigned int preheat_pages_min = 8;
uint32_t pre_heat_scaling[MAX_UPL_TRANSFER + 1];
uint32_t pre_heat_cluster[MAX_UPL_TRANSFER + 1];
__private_extern__ void
vm_object_cluster_size(vm_object_t object, vm_object_offset_t *start,
vm_size_t *length, vm_object_fault_info_t fault_info, uint32_t *io_streaming)
{
vm_size_t pre_heat_size;
vm_size_t tail_size;
vm_size_t head_size;
vm_size_t max_length;
vm_size_t cluster_size;
vm_object_offset_t object_size;
vm_object_offset_t orig_start;
vm_object_offset_t target_start;
vm_object_offset_t offset;
vm_behavior_t behavior;
boolean_t look_behind = TRUE;
boolean_t look_ahead = TRUE;
boolean_t isSSD = FALSE;
uint32_t throttle_limit;
int sequential_run;
int sequential_behavior = VM_BEHAVIOR_SEQUENTIAL;
unsigned int max_ph_size;
unsigned int min_ph_size;
unsigned int min_ph_size_in_bytes;
assert( !(*length & PAGE_MASK));
assert( !(*start & PAGE_MASK_64));
max_length = *length;
*length = PAGE_SIZE;
*io_streaming = 0;
if (speculative_reads_disabled || fault_info == NULL) {
return;
}
orig_start = *start;
target_start = orig_start;
cluster_size = round_page(fault_info->cluster_size);
behavior = fault_info->behavior;
vm_object_lock(object);
if (object->pager == MEMORY_OBJECT_NULL)
goto out;
if (!ignore_is_ssd)
vnode_pager_get_isSSD(object->pager, &isSSD);
min_ph_size = preheat_pages_min;
max_ph_size = preheat_pages_max;
if (isSSD) {
min_ph_size /= 2;
max_ph_size /= 8;
}
if (min_ph_size < 1)
min_ph_size = 1;
if (max_ph_size < 1)
max_ph_size = 1;
else if (max_ph_size > MAX_UPL_TRANSFER)
max_ph_size = MAX_UPL_TRANSFER;
if (max_length > (max_ph_size * PAGE_SIZE))
max_length = max_ph_size * PAGE_SIZE;
if (max_length <= PAGE_SIZE)
goto out;
min_ph_size_in_bytes = min_ph_size * PAGE_SIZE;
if (object->internal)
object_size = object->vo_size;
else
vnode_pager_get_object_size(object->pager, &object_size);
object_size = round_page_64(object_size);
if (orig_start >= object_size) {
goto out;
}
if (object->pages_used > object->pages_created) {
object->pages_used = object->pages_created = 0;
}
if ((sequential_run = object->sequential)) {
if (sequential_run < 0) {
sequential_behavior = VM_BEHAVIOR_RSEQNTL;
sequential_run = 0 - sequential_run;
} else {
sequential_behavior = VM_BEHAVIOR_SEQUENTIAL;
}
}
switch (behavior) {
default:
behavior = VM_BEHAVIOR_DEFAULT;
case VM_BEHAVIOR_DEFAULT:
if (object->internal && fault_info->user_tag == VM_MEMORY_STACK)
goto out;
if (sequential_run >= (3 * PAGE_SIZE)) {
pre_heat_size = sequential_run + PAGE_SIZE;
if (sequential_behavior == VM_BEHAVIOR_SEQUENTIAL)
look_behind = FALSE;
else
look_ahead = FALSE;
*io_streaming = 1;
} else {
if (object->pages_created < (20 * min_ph_size)) {
pre_heat_size = min_ph_size_in_bytes;
} else {
pre_heat_size = (max_length * object->pages_used) / object->pages_created;
if (pre_heat_size < min_ph_size_in_bytes)
pre_heat_size = min_ph_size_in_bytes;
else
pre_heat_size = round_page(pre_heat_size);
}
}
break;
case VM_BEHAVIOR_RANDOM:
if ((pre_heat_size = cluster_size) <= PAGE_SIZE)
goto out;
break;
case VM_BEHAVIOR_SEQUENTIAL:
if ((pre_heat_size = cluster_size) == 0)
pre_heat_size = sequential_run + PAGE_SIZE;
look_behind = FALSE;
*io_streaming = 1;
break;
case VM_BEHAVIOR_RSEQNTL:
if ((pre_heat_size = cluster_size) == 0)
pre_heat_size = sequential_run + PAGE_SIZE;
look_ahead = FALSE;
*io_streaming = 1;
break;
}
throttle_limit = (uint32_t) max_length;
assert(throttle_limit == max_length);
if (vnode_pager_get_throttle_io_limit(object->pager, &throttle_limit) == KERN_SUCCESS) {
if (max_length > throttle_limit)
max_length = throttle_limit;
}
if (pre_heat_size > max_length)
pre_heat_size = max_length;
if (behavior == VM_BEHAVIOR_DEFAULT && (pre_heat_size > min_ph_size_in_bytes)) {
unsigned int consider_free = vm_page_free_count + vm_page_cleaned_count;
if (consider_free < vm_page_throttle_limit) {
pre_heat_size = trunc_page(pre_heat_size / 16);
} else if (consider_free < vm_page_free_target) {
pre_heat_size = trunc_page(pre_heat_size / 4);
}
if (pre_heat_size < min_ph_size_in_bytes)
pre_heat_size = min_ph_size_in_bytes;
}
if (look_ahead == TRUE) {
if (look_behind == TRUE) {
head_size = trunc_page(pre_heat_size / 2);
if (target_start > head_size)
target_start -= head_size;
else
target_start = 0;
}
if ((target_start + pre_heat_size) > object_size)
pre_heat_size = (vm_size_t)(round_page_64(object_size - target_start));
assert((vm_size_t)(orig_start - target_start) == (orig_start - target_start));
tail_size = pre_heat_size - (vm_size_t)(orig_start - target_start) - PAGE_SIZE;
} else {
if (pre_heat_size > target_start) {
pre_heat_size = (vm_size_t) target_start;
}
tail_size = 0;
}
assert( !(target_start & PAGE_MASK_64));
assert( !(pre_heat_size & PAGE_MASK));
pre_heat_scaling[pre_heat_size / PAGE_SIZE]++;
if (pre_heat_size <= PAGE_SIZE)
goto out;
if (look_behind == TRUE) {
head_size = pre_heat_size - tail_size - PAGE_SIZE;
for (offset = orig_start - PAGE_SIZE_64; head_size; offset -= PAGE_SIZE_64, head_size -= PAGE_SIZE) {
if (offset < fault_info->lo_offset)
break;
#if MACH_PAGEMAP
if (vm_external_state_get(object->existence_map, offset) == VM_EXTERNAL_STATE_ABSENT) {
break;
}
#endif
if (VM_COMPRESSOR_PAGER_STATE_GET(object, offset)
== VM_EXTERNAL_STATE_ABSENT) {
break;
}
if (vm_page_lookup(object, offset) != VM_PAGE_NULL) {
break;
}
*start = offset;
*length += PAGE_SIZE;
}
}
if (look_ahead == TRUE) {
for (offset = orig_start + PAGE_SIZE_64; tail_size; offset += PAGE_SIZE_64, tail_size -= PAGE_SIZE) {
if (offset >= fault_info->hi_offset)
break;
assert(offset < object_size);
#if MACH_PAGEMAP
if (vm_external_state_get(object->existence_map, offset) == VM_EXTERNAL_STATE_ABSENT) {
break;
}
#endif
if (VM_COMPRESSOR_PAGER_STATE_GET(object, offset)
== VM_EXTERNAL_STATE_ABSENT) {
break;
}
if (vm_page_lookup(object, offset) != VM_PAGE_NULL) {
break;
}
*length += PAGE_SIZE;
}
}
out:
if (*length > max_length)
*length = max_length;
pre_heat_cluster[*length / PAGE_SIZE]++;
vm_object_unlock(object);
DTRACE_VM1(clustersize, vm_size_t, *length);
}
kern_return_t
vm_object_page_op(
vm_object_t object,
vm_object_offset_t offset,
int ops,
ppnum_t *phys_entry,
int *flags)
{
vm_page_t dst_page;
vm_object_lock(object);
if(ops & UPL_POP_PHYSICAL) {
if(object->phys_contiguous) {
if (phys_entry) {
*phys_entry = (ppnum_t)
(object->vo_shadow_offset >> PAGE_SHIFT);
}
vm_object_unlock(object);
return KERN_SUCCESS;
} else {
vm_object_unlock(object);
return KERN_INVALID_OBJECT;
}
}
if(object->phys_contiguous) {
vm_object_unlock(object);
return KERN_INVALID_OBJECT;
}
while(TRUE) {
if((dst_page = vm_page_lookup(object,offset)) == VM_PAGE_NULL) {
vm_object_unlock(object);
return KERN_FAILURE;
}
if((dst_page->busy || dst_page->cleaning) &&
(((ops & UPL_POP_SET) &&
(ops & UPL_POP_BUSY)) || (ops & UPL_POP_DUMP))) {
PAGE_SLEEP(object, dst_page, THREAD_UNINT);
continue;
}
if (ops & UPL_POP_DUMP) {
if (dst_page->pmapped == TRUE)
pmap_disconnect(dst_page->phys_page);
VM_PAGE_FREE(dst_page);
break;
}
if (flags) {
*flags = 0;
if(dst_page->dirty) *flags |= UPL_POP_DIRTY;
if(dst_page->pageout) *flags |= UPL_POP_PAGEOUT;
if(dst_page->precious) *flags |= UPL_POP_PRECIOUS;
if(dst_page->absent) *flags |= UPL_POP_ABSENT;
if(dst_page->busy) *flags |= UPL_POP_BUSY;
}
if(ops & UPL_POP_SET) {
assert(dst_page->busy || (ops & UPL_POP_BUSY));
if (ops & UPL_POP_DIRTY) {
SET_PAGE_DIRTY(dst_page, FALSE);
}
if (ops & UPL_POP_PAGEOUT) dst_page->pageout = TRUE;
if (ops & UPL_POP_PRECIOUS) dst_page->precious = TRUE;
if (ops & UPL_POP_ABSENT) dst_page->absent = TRUE;
if (ops & UPL_POP_BUSY) dst_page->busy = TRUE;
}
if(ops & UPL_POP_CLR) {
assert(dst_page->busy);
if (ops & UPL_POP_DIRTY) dst_page->dirty = FALSE;
if (ops & UPL_POP_PAGEOUT) dst_page->pageout = FALSE;
if (ops & UPL_POP_PRECIOUS) dst_page->precious = FALSE;
if (ops & UPL_POP_ABSENT) dst_page->absent = FALSE;
if (ops & UPL_POP_BUSY) {
dst_page->busy = FALSE;
PAGE_WAKEUP(dst_page);
}
}
if (dst_page->encrypted) {
if ((ops & UPL_POP_SET) && (ops & UPL_POP_BUSY) &&
dst_page->busy) {
vm_page_decrypt(dst_page, 0);
} else {
assert(!phys_entry);
}
}
if (phys_entry) {
assert(dst_page->busy);
assert(!dst_page->encrypted);
*phys_entry = dst_page->phys_page;
}
break;
}
vm_object_unlock(object);
return KERN_SUCCESS;
}
kern_return_t
vm_object_range_op(
vm_object_t object,
vm_object_offset_t offset_beg,
vm_object_offset_t offset_end,
int ops,
uint32_t *range)
{
vm_object_offset_t offset;
vm_page_t dst_page;
if (offset_end - offset_beg > (uint32_t) -1) {
return KERN_INVALID_ARGUMENT;
}
if (object->resident_page_count == 0) {
if (range) {
if (ops & UPL_ROP_PRESENT) {
*range = 0;
} else {
*range = (uint32_t) (offset_end - offset_beg);
assert(*range == (offset_end - offset_beg));
}
}
return KERN_SUCCESS;
}
vm_object_lock(object);
if (object->phys_contiguous) {
vm_object_unlock(object);
return KERN_INVALID_OBJECT;
}
offset = offset_beg & ~PAGE_MASK_64;
while (offset < offset_end) {
dst_page = vm_page_lookup(object, offset);
if (dst_page != VM_PAGE_NULL) {
if (ops & UPL_ROP_DUMP) {
if (dst_page->busy || dst_page->cleaning) {
PAGE_SLEEP(object, dst_page, THREAD_UNINT);
continue;
}
if (dst_page->laundry) {
dst_page->pageout = FALSE;
vm_pageout_steal_laundry(dst_page, FALSE);
}
if (dst_page->pmapped == TRUE)
pmap_disconnect(dst_page->phys_page);
VM_PAGE_FREE(dst_page);
} else if ((ops & UPL_ROP_ABSENT) && !dst_page->absent)
break;
} else if (ops & UPL_ROP_PRESENT)
break;
offset += PAGE_SIZE;
}
vm_object_unlock(object);
if (range) {
if (offset > offset_end)
offset = offset_end;
if(offset > offset_beg) {
*range = (uint32_t) (offset - offset_beg);
assert(*range == (offset - offset_beg));
} else {
*range = 0;
}
}
return KERN_SUCCESS;
}
kern_return_t pager_map_to_phys_contiguous(
memory_object_control_t object,
memory_object_offset_t offset,
addr64_t base_vaddr,
vm_size_t size)
{
ppnum_t page_num;
boolean_t clobbered_private;
kern_return_t retval;
vm_object_t pager_object;
page_num = pmap_find_phys(kernel_pmap, base_vaddr);
if (!page_num) {
retval = KERN_FAILURE;
goto out;
}
pager_object = memory_object_control_to_vm_object(object);
if (!pager_object) {
retval = KERN_FAILURE;
goto out;
}
clobbered_private = pager_object->private;
pager_object->private = TRUE;
retval = vm_object_populate_with_private(pager_object, offset, page_num, size);
if (retval != KERN_SUCCESS)
pager_object->private = clobbered_private;
out:
return retval;
}
uint32_t scan_object_collision = 0;
void
vm_object_lock(vm_object_t object)
{
if (object == vm_pageout_scan_wants_object) {
scan_object_collision++;
mutex_pause(2);
}
lck_rw_lock_exclusive(&object->Lock);
}
boolean_t
vm_object_lock_avoid(vm_object_t object)
{
if (object == vm_pageout_scan_wants_object) {
scan_object_collision++;
return TRUE;
}
return FALSE;
}
boolean_t
_vm_object_lock_try(vm_object_t object)
{
return (lck_rw_try_lock_exclusive(&object->Lock));
}
boolean_t
vm_object_lock_try(vm_object_t object)
{
if (vm_object_lock_avoid(object) && ml_get_interrupts_enabled() && get_preemption_level()==0) {
mutex_pause(2);
}
return _vm_object_lock_try(object);
}
void
vm_object_lock_shared(vm_object_t object)
{
if (vm_object_lock_avoid(object)) {
mutex_pause(2);
}
lck_rw_lock_shared(&object->Lock);
}
boolean_t
vm_object_lock_try_shared(vm_object_t object)
{
if (vm_object_lock_avoid(object)) {
mutex_pause(2);
}
return (lck_rw_try_lock_shared(&object->Lock));
}
unsigned int vm_object_change_wimg_mode_count = 0;
void
vm_object_change_wimg_mode(vm_object_t object, unsigned int wimg_mode)
{
vm_page_t p;
vm_object_lock_assert_exclusive(object);
vm_object_paging_wait(object, THREAD_UNINT);
queue_iterate(&object->memq, p, vm_page_t, listq) {
if (!p->fictitious)
pmap_set_cache_attributes(p->phys_page, wimg_mode);
}
if (wimg_mode == VM_WIMG_USE_DEFAULT)
object->set_cache_attr = FALSE;
else
object->set_cache_attr = TRUE;
object->wimg_bits = wimg_mode;
vm_object_change_wimg_mode_count++;
}
#if CONFIG_FREEZE
kern_return_t vm_object_pack(
unsigned int *purgeable_count,
unsigned int *wired_count,
unsigned int *clean_count,
unsigned int *dirty_count,
unsigned int dirty_budget,
boolean_t *shared,
vm_object_t src_object,
struct default_freezer_handle *df_handle)
{
kern_return_t kr = KERN_SUCCESS;
vm_object_lock(src_object);
*purgeable_count = *wired_count = *clean_count = *dirty_count = 0;
*shared = FALSE;
if (!src_object->alive || src_object->terminating){
kr = KERN_FAILURE;
goto done;
}
if (src_object->purgable == VM_PURGABLE_VOLATILE) {
*purgeable_count = src_object->resident_page_count;
if (df_handle != NULL) {
purgeable_q_t queue;
assert(src_object->objq.next != NULL &&
src_object->objq.prev != NULL);
queue = vm_purgeable_object_remove(src_object);
assert(queue);
if (src_object->purgeable_when_ripe) {
vm_page_lock_queues();
vm_purgeable_token_delete_first(queue);
vm_page_unlock_queues();
}
vm_object_purge(src_object);
}
goto done;
}
if (src_object->ref_count == 1) {
vm_object_pack_pages(wired_count, clean_count, dirty_count, dirty_budget, src_object, df_handle);
} else {
if (src_object->internal) {
*shared = TRUE;
}
}
done:
vm_object_unlock(src_object);
return kr;
}
void
vm_object_pack_pages(
unsigned int *wired_count,
unsigned int *clean_count,
unsigned int *dirty_count,
unsigned int dirty_budget,
vm_object_t src_object,
struct default_freezer_handle *df_handle)
{
vm_page_t p, next;
next = (vm_page_t)queue_first(&src_object->memq);
while (!queue_end(&src_object->memq, (queue_entry_t)next)) {
p = next;
next = (vm_page_t)queue_next(&next->listq);
if (dirty_budget && (dirty_budget == *dirty_count)) {
break;
}
assert(!p->laundry);
if (p->fictitious || p->busy )
continue;
if (p->absent || p->unusual || p->error)
continue;
if (VM_PAGE_WIRED(p)) {
(*wired_count)++;
continue;
}
if (df_handle == NULL) {
if (p->dirty || pmap_is_modified(p->phys_page)) {
(*dirty_count)++;
} else {
(*clean_count)++;
}
continue;
}
if (p->cleaning) {
p->pageout = TRUE;
continue;
}
if (p->pmapped == TRUE) {
int refmod_state;
refmod_state = pmap_disconnect(p->phys_page);
if (refmod_state & VM_MEM_MODIFIED) {
SET_PAGE_DIRTY(p, FALSE);
}
}
if (p->dirty) {
default_freezer_pack_page(p, df_handle);
(*dirty_count)++;
}
else {
VM_PAGE_FREE(p);
(*clean_count)++;
}
}
}
void
vm_object_pageout(
vm_object_t object)
{
vm_page_t p, next;
struct vm_pageout_queue *iq;
iq = &vm_pageout_queue_internal;
assert(object != VM_OBJECT_NULL );
vm_object_lock(object);
if (DEFAULT_PAGER_IS_ACTIVE || DEFAULT_FREEZER_IS_ACTIVE) {
if (!object->pager_initialized) {
vm_object_pager_create(object);
}
}
ReScan:
next = (vm_page_t)queue_first(&object->memq);
while (!queue_end(&object->memq, (queue_entry_t)next)) {
p = next;
next = (vm_page_t)queue_next(&next->listq);
vm_page_lockspin_queues();
if (!p->laundry) {
if (COMPRESSED_PAGER_IS_ACTIVE || DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE) {
if (VM_PAGE_Q_THROTTLED(iq)) {
iq->pgo_draining = TRUE;
assert_wait((event_t) (&iq->pgo_laundry + 1), THREAD_INTERRUPTIBLE);
vm_page_unlock_queues();
vm_object_unlock(object);
thread_block(THREAD_CONTINUE_NULL);
vm_object_lock(object);
goto ReScan;
}
if (p->fictitious || p->busy ) {
vm_page_unlock_queues();
continue;
}
if (p->absent || p->unusual || p->error || VM_PAGE_WIRED(p)) {
vm_page_unlock_queues();
continue;
}
if (p->cleaning) {
p->pageout = TRUE;
vm_page_unlock_queues();
continue;
}
if (p->pmapped == TRUE) {
int refmod_state;
refmod_state = pmap_disconnect_options(p->phys_page, PMAP_OPTIONS_COMPRESSOR, NULL);
if (refmod_state & VM_MEM_MODIFIED) {
SET_PAGE_DIRTY(p, FALSE);
}
}
if (p->dirty == FALSE) {
vm_page_unlock_queues();
VM_PAGE_FREE(p);
continue;
}
}
VM_PAGE_QUEUES_REMOVE(p);
vm_pageout_cluster(p, TRUE);
}
vm_page_unlock_queues();
}
vm_object_unlock(object);
}
kern_return_t
vm_object_pagein(
vm_object_t object)
{
memory_object_t pager;
kern_return_t kr;
vm_object_lock(object);
pager = object->pager;
if (!object->pager_ready || pager == MEMORY_OBJECT_NULL) {
vm_object_unlock(object);
return KERN_FAILURE;
}
vm_object_paging_wait(object, THREAD_UNINT);
vm_object_paging_begin(object);
object->blocked_access = TRUE;
vm_object_unlock(object);
kr = memory_object_data_reclaim(pager, TRUE);
vm_object_lock(object);
object->blocked_access = FALSE;
vm_object_paging_end(object);
vm_object_unlock(object);
return kr;
}
#endif