#include <stdint.h>
#include <debug.h>
#include <mach_pagemap.h>
#include <mach_cluster_stats.h>
#include <mach_kdb.h>
#include <advisory_pageout.h>
#include <mach/mach_types.h>
#include <mach/memory_object.h>
#include <mach/memory_object_default.h>
#include <mach/memory_object_control_server.h>
#include <mach/mach_host_server.h>
#include <mach/upl.h>
#include <mach/vm_map.h>
#include <mach/vm_param.h>
#include <mach/vm_statistics.h>
#include <mach/sdt.h>
#include <kern/kern_types.h>
#include <kern/counters.h>
#include <kern/host_statistics.h>
#include <kern/machine.h>
#include <kern/misc_protos.h>
#include <kern/sched.h>
#include <kern/thread.h>
#include <kern/xpr.h>
#include <kern/kalloc.h>
#include <machine/vm_tuning.h>
#include <machine/commpage.h>
#if CONFIG_EMBEDDED
#include <sys/kern_memorystatus.h>
#endif
#include <vm/pmap.h>
#include <vm/vm_fault.h>
#include <vm/vm_map.h>
#include <vm/vm_object.h>
#include <vm/vm_page.h>
#include <vm/vm_pageout.h>
#include <vm/vm_protos.h>
#include <vm/memory_object.h>
#include <vm/vm_purgeable_internal.h>
#include <../bsd/crypto/aes/aes.h>
extern u_int32_t random(void);
#if UPL_DEBUG
#include <libkern/OSDebug.h>
#endif
#ifndef VM_PAGEOUT_BURST_ACTIVE_THROTTLE
#define VM_PAGEOUT_BURST_ACTIVE_THROTTLE 100
#endif
#ifndef VM_PAGEOUT_BURST_INACTIVE_THROTTLE
#ifdef CONFIG_EMBEDDED
#define VM_PAGEOUT_BURST_INACTIVE_THROTTLE 1024
#else
#define VM_PAGEOUT_BURST_INACTIVE_THROTTLE 4096
#endif
#endif
#ifndef VM_PAGEOUT_DEADLOCK_RELIEF
#define VM_PAGEOUT_DEADLOCK_RELIEF 100
#endif
#ifndef VM_PAGEOUT_INACTIVE_RELIEF
#define VM_PAGEOUT_INACTIVE_RELIEF 50
#endif
#ifndef VM_PAGE_LAUNDRY_MAX
#define VM_PAGE_LAUNDRY_MAX 16UL
#endif
#ifndef VM_PAGEOUT_BURST_WAIT
#define VM_PAGEOUT_BURST_WAIT 30
#endif
#ifndef VM_PAGEOUT_EMPTY_WAIT
#define VM_PAGEOUT_EMPTY_WAIT 200
#endif
#ifndef VM_PAGEOUT_DEADLOCK_WAIT
#define VM_PAGEOUT_DEADLOCK_WAIT 300
#endif
#ifndef VM_PAGEOUT_IDLE_WAIT
#define VM_PAGEOUT_IDLE_WAIT 10
#endif
#ifndef VM_PAGE_SPECULATIVE_TARGET
#define VM_PAGE_SPECULATIVE_TARGET(total) ((total) * 1 / 20)
#endif
#ifndef VM_PAGE_INACTIVE_HEALTHY_LIMIT
#define VM_PAGE_INACTIVE_HEALTHY_LIMIT(total) ((total) * 1 / 200)
#endif
#ifndef VM_PAGE_INACTIVE_TARGET
#define VM_PAGE_INACTIVE_TARGET(avail) ((avail) * 1 / 3)
#endif
#ifndef VM_PAGE_FREE_TARGET
#ifdef CONFIG_EMBEDDED
#define VM_PAGE_FREE_TARGET(free) (15 + (free) / 100)
#else
#define VM_PAGE_FREE_TARGET(free) (15 + (free) / 80)
#endif
#endif
#ifndef VM_PAGE_FREE_MIN
#ifdef CONFIG_EMBEDDED
#define VM_PAGE_FREE_MIN(free) (10 + (free) / 200)
#else
#define VM_PAGE_FREE_MIN(free) (10 + (free) / 100)
#endif
#endif
#define VM_PAGE_FREE_MIN_LIMIT 1500
#define VM_PAGE_FREE_TARGET_LIMIT 2000
#ifndef VM_PAGE_FREE_RESERVED
#define VM_PAGE_FREE_RESERVED(n) \
((unsigned) (6 * VM_PAGE_LAUNDRY_MAX) + (n))
#endif
#define VM_PAGE_REACTIVATE_LIMIT_MAX 20000
#ifndef VM_PAGE_REACTIVATE_LIMIT
#ifdef CONFIG_EMBEDDED
#define VM_PAGE_REACTIVATE_LIMIT(avail) (VM_PAGE_INACTIVE_TARGET(avail) / 2)
#else
#define VM_PAGE_REACTIVATE_LIMIT(avail) (MAX((avail) * 1 / 20,VM_PAGE_REACTIVATE_LIMIT_MAX))
#endif
#endif
#define VM_PAGEOUT_INACTIVE_FORCE_RECLAIM 100
struct vm_pageout_queue {
queue_head_t pgo_pending;
unsigned int pgo_laundry;
unsigned int pgo_maxlaundry;
unsigned int pgo_idle:1,
pgo_busy:1,
pgo_throttled:1,
:0;
};
#define VM_PAGE_Q_THROTTLED(q) \
((q)->pgo_laundry >= (q)->pgo_maxlaundry)
unsigned int vm_pageout_scan_event_counter = 0;
static void vm_pageout_garbage_collect(int);
static void vm_pageout_iothread_continue(struct vm_pageout_queue *);
static void vm_pageout_iothread_external(void);
static void vm_pageout_iothread_internal(void);
extern void vm_pageout_continue(void);
extern void vm_pageout_scan(void);
static thread_t vm_pageout_external_iothread = THREAD_NULL;
static thread_t vm_pageout_internal_iothread = THREAD_NULL;
unsigned int vm_pageout_reserved_internal = 0;
unsigned int vm_pageout_reserved_really = 0;
unsigned int vm_pageout_idle_wait = 0;
unsigned int vm_pageout_empty_wait = 0;
unsigned int vm_pageout_burst_wait = 0;
unsigned int vm_pageout_deadlock_wait = 0;
unsigned int vm_pageout_deadlock_relief = 0;
unsigned int vm_pageout_inactive_relief = 0;
unsigned int vm_pageout_burst_active_throttle = 0;
unsigned int vm_pageout_burst_inactive_throttle = 0;
unsigned int vm_accellerate_zf_pageout_trigger = 400;
unsigned int zf_queue_min_count = 100;
unsigned int vm_zf_queue_count = 0;
#if defined(__ppc__)
unsigned int vm_zf_count = 0;
#else
uint64_t vm_zf_count __attribute__((aligned(8))) = 0;
#endif
unsigned int vm_pageout_active = 0;
unsigned int vm_pageout_inactive = 0;
unsigned int vm_pageout_inactive_throttled = 0;
unsigned int vm_pageout_inactive_forced = 0;
unsigned int vm_pageout_inactive_nolock = 0;
unsigned int vm_pageout_inactive_avoid = 0;
unsigned int vm_pageout_inactive_busy = 0;
unsigned int vm_pageout_inactive_absent = 0;
unsigned int vm_pageout_inactive_used = 0;
unsigned int vm_pageout_inactive_clean = 0;
unsigned int vm_pageout_inactive_dirty = 0;
unsigned int vm_pageout_inactive_deactivated = 0;
unsigned int vm_pageout_inactive_zf = 0;
unsigned int vm_pageout_dirty_no_pager = 0;
unsigned int vm_pageout_purged_objects = 0;
unsigned int vm_stat_discard = 0;
unsigned int vm_stat_discard_sent = 0;
unsigned int vm_stat_discard_failure = 0;
unsigned int vm_stat_discard_throttle = 0;
unsigned int vm_pageout_reactivation_limit_exceeded = 0;
unsigned int vm_pageout_catch_ups = 0;
unsigned int vm_pageout_inactive_force_reclaim = 0;
unsigned int vm_pageout_scan_active_throttled = 0;
unsigned int vm_pageout_scan_inactive_throttled = 0;
unsigned int vm_pageout_scan_throttle = 0;
unsigned int vm_pageout_scan_throttle_aborted = 0;
unsigned int vm_pageout_scan_burst_throttle = 0;
unsigned int vm_pageout_scan_empty_throttle = 0;
unsigned int vm_pageout_scan_deadlock_detected = 0;
unsigned int vm_pageout_scan_active_throttle_success = 0;
unsigned int vm_pageout_scan_inactive_throttle_success = 0;
unsigned int vm_page_speculative_count_drifts = 0;
unsigned int vm_page_speculative_count_drift_max = 0;
unsigned int vm_backing_store_low = 0;
unsigned int vm_pageout_out_of_line = 0;
unsigned int vm_pageout_in_place = 0;
unsigned int vm_page_steal_pageout_page = 0;
unsigned long vm_page_decrypt_counter = 0;
unsigned long vm_page_decrypt_for_upl_counter = 0;
unsigned long vm_page_encrypt_counter = 0;
unsigned long vm_page_encrypt_abort_counter = 0;
unsigned long vm_page_encrypt_already_encrypted_counter = 0;
boolean_t vm_pages_encrypted = FALSE;
struct vm_pageout_queue vm_pageout_queue_internal;
struct vm_pageout_queue vm_pageout_queue_external;
unsigned int vm_page_speculative_target = 0;
vm_object_t vm_pageout_scan_wants_object = VM_OBJECT_NULL;
static boolean_t (* volatile consider_buffer_cache_collect)(void) = NULL;
#if DEVELOPMENT || DEBUG
unsigned long vm_cs_validated_resets = 0;
#endif
void
vm_backing_store_disable(
boolean_t disable)
{
if(disable) {
vm_backing_store_low = 1;
} else {
if(vm_backing_store_low) {
vm_backing_store_low = 0;
thread_wakeup((event_t) &vm_backing_store_low);
}
}
}
#if MACH_CLUSTER_STATS
unsigned long vm_pageout_cluster_dirtied = 0;
unsigned long vm_pageout_cluster_cleaned = 0;
unsigned long vm_pageout_cluster_collisions = 0;
unsigned long vm_pageout_cluster_clusters = 0;
unsigned long vm_pageout_cluster_conversions = 0;
unsigned long vm_pageout_target_collisions = 0;
unsigned long vm_pageout_target_page_dirtied = 0;
unsigned long vm_pageout_target_page_freed = 0;
#define CLUSTER_STAT(clause) clause
#else
#define CLUSTER_STAT(clause)
#endif
void
vm_pageout_object_terminate(
vm_object_t object)
{
vm_object_t shadow_object;
assert(object->pageout);
shadow_object = object->shadow;
vm_object_lock(shadow_object);
while (!queue_empty(&object->memq)) {
vm_page_t p, m;
vm_object_offset_t offset;
p = (vm_page_t) queue_first(&object->memq);
assert(p->private);
assert(p->pageout);
p->pageout = FALSE;
assert(!p->cleaning);
offset = p->offset;
VM_PAGE_FREE(p);
p = VM_PAGE_NULL;
m = vm_page_lookup(shadow_object,
offset + object->shadow_offset);
if(m == VM_PAGE_NULL)
continue;
assert(m->cleaning);
m->dump_cleaning = FALSE;
assert((m->dirty) || (m->precious) ||
(m->busy && m->cleaning));
vm_page_lock_queues();
if (m->laundry) {
vm_pageout_throttle_up(m);
}
if (m->pageout) {
assert(m->busy);
assert(m->wire_count == 1);
m->cleaning = FALSE;
m->encrypted_cleaning = FALSE;
m->pageout = FALSE;
#if MACH_CLUSTER_STATS
if (m->wanted) vm_pageout_target_collisions++;
#endif
if (pmap_disconnect(m->phys_page) & VM_MEM_MODIFIED)
m->dirty = TRUE;
else
m->dirty = FALSE;
if (m->dirty) {
CLUSTER_STAT(vm_pageout_target_page_dirtied++;)
vm_page_unwire(m);
VM_STAT_INCR(reactivations);
PAGE_WAKEUP_DONE(m);
} else {
CLUSTER_STAT(vm_pageout_target_page_freed++;)
vm_page_free(m);
}
vm_page_unlock_queues();
continue;
}
if (!m->active && !m->inactive && !m->throttled && !m->private) {
if (m->reference)
vm_page_activate(m);
else
vm_page_deactivate(m);
}
if((m->busy) && (m->cleaning)) {
m->busy = FALSE;
pmap_clear_modify(m->phys_page);
m->absent = FALSE;
m->overwriting = FALSE;
} else if (m->overwriting) {
assert(VM_PAGE_WIRED(m));
vm_page_unwire(m);
m->overwriting = FALSE;
} else {
#if MACH_CLUSTER_STATS
m->dirty = pmap_is_modified(m->phys_page);
if (m->dirty) vm_pageout_cluster_dirtied++;
else vm_pageout_cluster_cleaned++;
if (m->wanted) vm_pageout_cluster_collisions++;
#else
m->dirty = 0;
#endif
}
m->cleaning = FALSE;
m->encrypted_cleaning = FALSE;
PAGE_WAKEUP(m);
vm_page_unlock_queues();
}
vm_object_activity_end(shadow_object);
vm_object_unlock(shadow_object);
assert(object->ref_count == 0);
assert(object->paging_in_progress == 0);
assert(object->activity_in_progress == 0);
assert(object->resident_page_count == 0);
return;
}
void
vm_pageclean_setup(
vm_page_t m,
vm_page_t new_m,
vm_object_t new_object,
vm_object_offset_t new_offset)
{
assert(!m->busy);
#if 0
assert(!m->cleaning);
#endif
XPR(XPR_VM_PAGEOUT,
"vm_pageclean_setup, obj 0x%X off 0x%X page 0x%X new 0x%X new_off 0x%X\n",
m->object, m->offset, m,
new_m, new_offset);
pmap_clear_modify(m->phys_page);
m->cleaning = TRUE;
m->dirty = TRUE;
m->precious = FALSE;
assert(new_m->fictitious);
assert(new_m->phys_page == vm_page_fictitious_addr);
new_m->fictitious = FALSE;
new_m->private = TRUE;
new_m->pageout = TRUE;
new_m->phys_page = m->phys_page;
vm_page_lockspin_queues();
vm_page_wire(new_m);
vm_page_unlock_queues();
vm_page_insert(new_m, new_object, new_offset);
assert(!new_m->wanted);
new_m->busy = FALSE;
}
void
vm_pageout_initialize_page(
vm_page_t m)
{
vm_object_t object;
vm_object_offset_t paging_offset;
vm_page_t holding_page;
memory_object_t pager;
XPR(XPR_VM_PAGEOUT,
"vm_pageout_initialize_page, page 0x%X\n",
m, 0, 0, 0, 0);
assert(m->busy);
assert(!m->absent);
assert(!m->error);
assert(m->dirty);
object = m->object;
paging_offset = m->offset + object->paging_offset;
if (m->absent || m->error || m->restart || (!m->dirty && !m->precious)) {
VM_PAGE_FREE(m);
panic("reservation without pageout?");
vm_object_unlock(object);
return;
}
pager = object->pager;
if (pager == MEMORY_OBJECT_NULL) {
VM_PAGE_FREE(m);
panic("missing pager for copy object");
return;
}
vm_object_paging_begin(object);
holding_page = NULL;
pmap_clear_modify(m->phys_page);
m->dirty = TRUE;
m->busy = TRUE;
m->list_req_pending = TRUE;
m->cleaning = TRUE;
m->pageout = TRUE;
vm_page_lockspin_queues();
vm_page_wire(m);
vm_page_unlock_queues();
vm_object_unlock(object);
memory_object_data_initialize(pager, paging_offset, PAGE_SIZE);
vm_object_lock(object);
vm_object_paging_end(object);
}
#if MACH_CLUSTER_STATS
#define MAXCLUSTERPAGES 16
struct {
unsigned long pages_in_cluster;
unsigned long pages_at_higher_offsets;
unsigned long pages_at_lower_offsets;
} cluster_stats[MAXCLUSTERPAGES];
#endif
void
vm_pageout_cluster(vm_page_t m)
{
vm_object_t object = m->object;
struct vm_pageout_queue *q;
XPR(XPR_VM_PAGEOUT,
"vm_pageout_cluster, object 0x%X offset 0x%X page 0x%X\n",
object, m->offset, m, 0, 0);
VM_PAGE_CHECK(m);
assert(m->busy && (m->dirty || m->precious) && (!VM_PAGE_WIRED(m)));
assert(!m->cleaning && !m->pageout && !m->inactive && !m->active);
assert(!m->throttled);
vm_object_paging_begin(object);
vm_page_wire(m);
m->list_req_pending = TRUE;
m->cleaning = TRUE;
m->pageout = TRUE;
m->laundry = TRUE;
if (object->internal == TRUE)
q = &vm_pageout_queue_internal;
else
q = &vm_pageout_queue_external;
q->pgo_laundry++;
m->pageout_queue = TRUE;
queue_enter(&q->pgo_pending, m, vm_page_t, pageq);
if (q->pgo_idle == TRUE) {
q->pgo_idle = FALSE;
thread_wakeup((event_t) &q->pgo_pending);
}
VM_PAGE_CHECK(m);
}
unsigned long vm_pageout_throttle_up_count = 0;
void
vm_pageout_throttle_up(
vm_page_t m)
{
struct vm_pageout_queue *q;
assert(m->laundry);
assert(m->object != VM_OBJECT_NULL);
assert(m->object != kernel_object);
vm_pageout_throttle_up_count++;
if (m->object->internal == TRUE)
q = &vm_pageout_queue_internal;
else
q = &vm_pageout_queue_external;
if (m->pageout_queue == TRUE) {
m->pageout_queue = FALSE;
queue_remove(&q->pgo_pending, m, vm_page_t, pageq);
m->pageq.next = NULL;
m->pageq.prev = NULL;
vm_object_paging_end(m->object);
}
m->laundry = FALSE;
q->pgo_laundry--;
if (q->pgo_throttled == TRUE) {
q->pgo_throttled = FALSE;
thread_wakeup((event_t) &q->pgo_laundry);
}
}
#define VM_PAGEOUT_DELAYED_UNLOCK_LIMIT (3 * MAX_UPL_TRANSFER)
#define FCS_IDLE 0
#define FCS_DELAYED 1
#define FCS_DEADLOCK_DETECTED 2
struct flow_control {
int state;
mach_timespec_t ts;
};
#define VM_PAGEOUT_STAT_SIZE 31
struct vm_pageout_stat {
unsigned int considered;
unsigned int reclaimed;
} vm_pageout_stats[VM_PAGEOUT_STAT_SIZE] = {{0,0}, };
unsigned int vm_pageout_stat_now = 0;
unsigned int vm_memory_pressure = 0;
#define VM_PAGEOUT_STAT_BEFORE(i) \
(((i) == 0) ? VM_PAGEOUT_STAT_SIZE - 1 : (i) - 1)
#define VM_PAGEOUT_STAT_AFTER(i) \
(((i) == VM_PAGEOUT_STAT_SIZE - 1) ? 0 : (i) + 1)
void
compute_memory_pressure(
__unused void *arg)
{
unsigned int vm_pageout_next;
vm_memory_pressure =
vm_pageout_stats[VM_PAGEOUT_STAT_BEFORE(vm_pageout_stat_now)].reclaimed;
commpage_set_memory_pressure( vm_memory_pressure );
vm_pageout_next = VM_PAGEOUT_STAT_AFTER(vm_pageout_stat_now);
vm_pageout_stats[vm_pageout_next].considered = 0;
vm_pageout_stats[vm_pageout_next].reclaimed = 0;
vm_pageout_stat_now = vm_pageout_next;
}
unsigned int
mach_vm_ctl_page_free_wanted(void)
{
unsigned int page_free_target, page_free_count, page_free_wanted;
page_free_target = vm_page_free_target;
page_free_count = vm_page_free_count;
if (page_free_target > page_free_count) {
page_free_wanted = page_free_target - page_free_count;
} else {
page_free_wanted = 0;
}
return page_free_wanted;
}
kern_return_t
mach_vm_pressure_monitor(
boolean_t wait_for_pressure,
unsigned int nsecs_monitored,
unsigned int *pages_reclaimed_p,
unsigned int *pages_wanted_p)
{
wait_result_t wr;
unsigned int vm_pageout_then, vm_pageout_now;
unsigned int pages_reclaimed;
if (wait_for_pressure) {
while (vm_page_free_count >= vm_page_free_target) {
wr = assert_wait((event_t) &vm_page_free_wanted,
THREAD_INTERRUPTIBLE);
if (wr == THREAD_WAITING) {
wr = thread_block(THREAD_CONTINUE_NULL);
}
if (wr == THREAD_INTERRUPTED) {
return KERN_ABORTED;
}
if (wr == THREAD_AWAKENED) {
break;
}
}
}
if (pages_wanted_p != NULL) {
*pages_wanted_p = mach_vm_ctl_page_free_wanted();
}
if (pages_reclaimed_p == NULL) {
return KERN_SUCCESS;
}
do {
vm_pageout_now = vm_pageout_stat_now;
pages_reclaimed = 0;
for (vm_pageout_then =
VM_PAGEOUT_STAT_BEFORE(vm_pageout_now);
vm_pageout_then != vm_pageout_now &&
nsecs_monitored-- != 0;
vm_pageout_then =
VM_PAGEOUT_STAT_BEFORE(vm_pageout_then)) {
pages_reclaimed += vm_pageout_stats[vm_pageout_then].reclaimed;
}
} while (vm_pageout_now != vm_pageout_stat_now);
*pages_reclaimed_p = pages_reclaimed;
return KERN_SUCCESS;
}
#define PAGE_STATE_SPECULATIVE 1
#define PAGE_STATE_THROTTLED 2
#define PAGE_STATE_ZEROFILL 3
#define PAGE_STATE_INACTIVE 4
#define VM_PAGEOUT_SCAN_HANDLE_REUSABLE_PAGE(m) \
MACRO_BEGIN \
\
if ((m)->reusable || \
(m)->object->all_reusable) { \
vm_object_reuse_pages((m)->object, \
(m)->offset, \
(m)->offset + PAGE_SIZE_64, \
FALSE); \
} \
MACRO_END
void
vm_pageout_scan(void)
{
unsigned int loop_count = 0;
unsigned int inactive_burst_count = 0;
unsigned int active_burst_count = 0;
unsigned int reactivated_this_call;
unsigned int reactivate_limit;
vm_page_t local_freeq = NULL;
int local_freed = 0;
int delayed_unlock;
int refmod_state = 0;
int vm_pageout_deadlock_target = 0;
struct vm_pageout_queue *iq;
struct vm_pageout_queue *eq;
struct vm_speculative_age_q *sq;
struct flow_control flow_control = { 0, { 0, 0 } };
boolean_t inactive_throttled = FALSE;
boolean_t try_failed;
mach_timespec_t ts;
unsigned int msecs = 0;
vm_object_t object;
vm_object_t last_object_tried;
#if defined(__ppc__)
unsigned int zf_ratio;
unsigned int zf_run_count;
#else
uint64_t zf_ratio;
uint64_t zf_run_count;
#endif
uint32_t catch_up_count = 0;
uint32_t inactive_reclaim_run;
boolean_t forced_reclaim;
int page_prev_state = 0;
flow_control.state = FCS_IDLE;
iq = &vm_pageout_queue_internal;
eq = &vm_pageout_queue_external;
sq = &vm_page_queue_speculative[VM_PAGE_SPECULATIVE_AGED_Q];
XPR(XPR_VM_PAGEOUT, "vm_pageout_scan\n", 0, 0, 0, 0, 0);
vm_page_lock_queues();
delayed_unlock = 1;
reactivated_this_call = 0;
reactivate_limit = VM_PAGE_REACTIVATE_LIMIT(vm_page_active_count +
vm_page_inactive_count);
inactive_reclaim_run = 0;
Restart:
assert(delayed_unlock!=0);
{
#if defined(__ppc__)
uint32_t total = vm_page_active_count + vm_page_inactive_count;
uint32_t normal = total - vm_zf_count;
#else
uint64_t total = vm_page_active_count + vm_page_inactive_count;
uint64_t normal = total - vm_zf_count;
#endif
if (vm_zf_count < vm_accellerate_zf_pageout_trigger)
zf_ratio = 0;
else if ((vm_zf_count <= normal) || (normal == 0))
zf_ratio = 1;
else
zf_ratio = vm_zf_count / normal;
zf_run_count = 0;
}
vm_page_inactive_target = VM_PAGE_INACTIVE_TARGET(vm_page_active_count +
vm_page_inactive_count +
vm_page_speculative_count);
vm_page_inactive_min = vm_page_inactive_target - (vm_page_inactive_target / 400);
vm_page_speculative_target = VM_PAGE_SPECULATIVE_TARGET(vm_page_active_count +
vm_page_inactive_count);
object = NULL;
last_object_tried = NULL;
try_failed = FALSE;
if ((vm_page_inactive_count + vm_page_speculative_count) < VM_PAGE_INACTIVE_HEALTHY_LIMIT(vm_page_active_count))
catch_up_count = vm_page_inactive_count + vm_page_speculative_count;
else
catch_up_count = 0;
for (;;) {
vm_page_t m;
DTRACE_VM2(rev, int, 1, (uint64_t *), NULL);
if (delayed_unlock == 0) {
vm_page_lock_queues();
delayed_unlock = 1;
}
active_burst_count = MIN(vm_pageout_burst_active_throttle,
vm_page_active_count);
if ((vm_page_inactive_count + vm_page_speculative_count) >= vm_page_inactive_target)
goto done_moving_active_pages;
while (!queue_empty(&vm_page_queue_active) && active_burst_count) {
if (active_burst_count)
active_burst_count--;
vm_pageout_active++;
m = (vm_page_t) queue_first(&vm_page_queue_active);
assert(m->active && !m->inactive);
assert(!m->laundry);
assert(m->object != kernel_object);
assert(m->phys_page != vm_page_guard_addr);
DTRACE_VM2(scan, int, 1, (uint64_t *), NULL);
if (m->object != object) {
if (object != NULL) {
vm_object_unlock(object);
object = NULL;
vm_pageout_scan_wants_object = VM_OBJECT_NULL;
}
if (!vm_object_lock_try_scan(m->object)) {
queue_remove(&vm_page_queue_active, m,
vm_page_t, pageq);
queue_enter(&vm_page_queue_active, m,
vm_page_t, pageq);
try_failed = TRUE;
m = (vm_page_t) queue_first(&vm_page_queue_active);
vm_pageout_scan_wants_object = m->object;
goto done_with_activepage;
}
object = m->object;
try_failed = FALSE;
}
if (m->busy) {
queue_remove(&vm_page_queue_active, m,
vm_page_t, pageq);
m->pageq.next = NULL;
m->pageq.prev = NULL;
if (!m->fictitious)
vm_page_active_count--;
m->active = FALSE;
goto done_with_activepage;
}
VM_PAGEOUT_SCAN_HANDLE_REUSABLE_PAGE(m);
vm_page_deactivate(m);
done_with_activepage:
if (delayed_unlock++ > VM_PAGEOUT_DELAYED_UNLOCK_LIMIT || try_failed == TRUE) {
if (object != NULL) {
vm_pageout_scan_wants_object = VM_OBJECT_NULL;
vm_object_unlock(object);
object = NULL;
}
if (local_freeq) {
vm_page_unlock_queues();
vm_page_free_list(local_freeq, TRUE);
local_freeq = NULL;
local_freed = 0;
vm_page_lock_queues();
} else
lck_mtx_yield(&vm_page_queue_lock);
delayed_unlock = 1;
}
}
done_moving_active_pages:
if (vm_page_free_count + local_freed >= vm_page_free_target) {
if (object != NULL) {
vm_object_unlock(object);
object = NULL;
}
vm_pageout_scan_wants_object = VM_OBJECT_NULL;
if (local_freeq) {
vm_page_unlock_queues();
vm_page_free_list(local_freeq, TRUE);
local_freeq = NULL;
local_freed = 0;
vm_page_lock_queues();
}
vm_page_inactive_target = VM_PAGE_INACTIVE_TARGET(vm_page_active_count +
vm_page_inactive_count +
vm_page_speculative_count);
#ifndef CONFIG_EMBEDDED
if (((vm_page_inactive_count + vm_page_speculative_count) < vm_page_inactive_target) &&
!queue_empty(&vm_page_queue_active))
continue;
#endif
lck_mtx_lock(&vm_page_queue_free_lock);
if ((vm_page_free_count >= vm_page_free_target) &&
(vm_page_free_wanted == 0) && (vm_page_free_wanted_privileged == 0)) {
vm_page_unlock_queues();
thread_wakeup((event_t) &vm_pageout_garbage_collect);
assert(vm_pageout_scan_wants_object == VM_OBJECT_NULL);
return;
}
lck_mtx_unlock(&vm_page_queue_free_lock);
}
assert (available_for_purge>=0);
if (available_for_purge)
{
if (object != NULL) {
vm_object_unlock(object);
object = NULL;
}
if(TRUE == vm_purgeable_object_purge_one()) {
continue;
}
}
if (queue_empty(&sq->age_q) && vm_page_speculative_count) {
struct vm_speculative_age_q *aq;
mach_timespec_t ts_fully_aged;
boolean_t can_steal = FALSE;
int num_scanned_queues;
aq = &vm_page_queue_speculative[speculative_steal_index];
num_scanned_queues = 0;
while (queue_empty(&aq->age_q) &&
num_scanned_queues++ != VM_PAGE_MAX_SPECULATIVE_AGE_Q) {
speculative_steal_index++;
if (speculative_steal_index > VM_PAGE_MAX_SPECULATIVE_AGE_Q)
speculative_steal_index = VM_PAGE_MIN_SPECULATIVE_AGE_Q;
aq = &vm_page_queue_speculative[speculative_steal_index];
}
if (num_scanned_queues ==
VM_PAGE_MAX_SPECULATIVE_AGE_Q + 1) {
printf("vm_pageout_scan: "
"all speculative queues empty "
"but count=%d. Re-adjusting.\n",
vm_page_speculative_count);
if (vm_page_speculative_count >
vm_page_speculative_count_drift_max)
vm_page_speculative_count_drift_max = vm_page_speculative_count;
vm_page_speculative_count_drifts++;
#if 6553678
Debugger("vm_pageout_scan: no speculative pages");
#endif
vm_page_speculative_count = 0;
continue;
}
if (vm_page_speculative_count > vm_page_speculative_target)
can_steal = TRUE;
else {
ts_fully_aged.tv_sec = (VM_PAGE_MAX_SPECULATIVE_AGE_Q * VM_PAGE_SPECULATIVE_Q_AGE_MS) / 1000;
ts_fully_aged.tv_nsec = ((VM_PAGE_MAX_SPECULATIVE_AGE_Q * VM_PAGE_SPECULATIVE_Q_AGE_MS) % 1000)
* 1000 * NSEC_PER_USEC;
ADD_MACH_TIMESPEC(&ts_fully_aged, &aq->age_ts);
clock_sec_t sec;
clock_nsec_t nsec;
clock_get_system_nanotime(&sec, &nsec);
ts.tv_sec = (unsigned int) sec;
ts.tv_nsec = nsec;
if (CMP_MACH_TIMESPEC(&ts, &ts_fully_aged) >= 0)
can_steal = TRUE;
}
if (can_steal == TRUE)
vm_page_speculate_ageit(aq);
}
if (queue_empty(&vm_page_queue_inactive) && queue_empty(&vm_page_queue_zf) && queue_empty(&sq->age_q) &&
(VM_PAGE_Q_THROTTLED(iq) || queue_empty(&vm_page_queue_throttled))) {
vm_pageout_scan_empty_throttle++;
msecs = vm_pageout_empty_wait;
goto vm_pageout_scan_delay;
} else if (inactive_burst_count >=
MIN(vm_pageout_burst_inactive_throttle,
(vm_page_inactive_count +
vm_page_speculative_count))) {
vm_pageout_scan_burst_throttle++;
msecs = vm_pageout_burst_wait;
goto vm_pageout_scan_delay;
} else if (VM_PAGE_Q_THROTTLED(iq) && IP_VALID(memory_manager_default)) {
clock_sec_t sec;
clock_nsec_t nsec;
switch (flow_control.state) {
case FCS_IDLE:
reset_deadlock_timer:
ts.tv_sec = vm_pageout_deadlock_wait / 1000;
ts.tv_nsec = (vm_pageout_deadlock_wait % 1000) * 1000 * NSEC_PER_USEC;
clock_get_system_nanotime(&sec, &nsec);
flow_control.ts.tv_sec = (unsigned int) sec;
flow_control.ts.tv_nsec = nsec;
ADD_MACH_TIMESPEC(&flow_control.ts, &ts);
flow_control.state = FCS_DELAYED;
msecs = vm_pageout_deadlock_wait;
break;
case FCS_DELAYED:
clock_get_system_nanotime(&sec, &nsec);
ts.tv_sec = (unsigned int) sec;
ts.tv_nsec = nsec;
if (CMP_MACH_TIMESPEC(&ts, &flow_control.ts) >= 0) {
vm_pageout_deadlock_target = vm_pageout_deadlock_relief + vm_page_free_wanted + vm_page_free_wanted_privileged;
vm_pageout_scan_deadlock_detected++;
flow_control.state = FCS_DEADLOCK_DETECTED;
thread_wakeup((event_t) &vm_pageout_garbage_collect);
goto consider_inactive;
}
msecs = vm_pageout_idle_wait;
break;
case FCS_DEADLOCK_DETECTED:
if (vm_pageout_deadlock_target)
goto consider_inactive;
goto reset_deadlock_timer;
}
vm_pageout_scan_throttle++;
iq->pgo_throttled = TRUE;
vm_pageout_scan_delay:
if (object != NULL) {
vm_object_unlock(object);
object = NULL;
}
vm_pageout_scan_wants_object = VM_OBJECT_NULL;
if (local_freeq) {
vm_page_unlock_queues();
vm_page_free_list(local_freeq, TRUE);
local_freeq = NULL;
local_freed = 0;
vm_page_lock_queues();
if (flow_control.state == FCS_DELAYED &&
!VM_PAGE_Q_THROTTLED(iq)) {
flow_control.state = FCS_IDLE;
vm_pageout_scan_throttle_aborted++;
goto consider_inactive;
}
}
#if CONFIG_EMBEDDED
{
int percent_avail;
percent_avail =
(vm_page_active_count + vm_page_inactive_count +
vm_page_speculative_count + vm_page_free_count +
(IP_VALID(memory_manager_default)?0:vm_page_purgeable_count) ) * 100 /
atop_64(max_mem);
if (percent_avail >= (kern_memorystatus_level + 5) ||
percent_avail <= (kern_memorystatus_level - 5)) {
kern_memorystatus_level = percent_avail;
thread_wakeup((event_t)&kern_memorystatus_wakeup);
}
}
#endif
assert_wait_timeout((event_t) &iq->pgo_laundry, THREAD_INTERRUPTIBLE, msecs, 1000*NSEC_PER_USEC);
counter(c_vm_pageout_scan_block++);
vm_page_unlock_queues();
assert(vm_pageout_scan_wants_object == VM_OBJECT_NULL);
thread_block(THREAD_CONTINUE_NULL);
vm_page_lock_queues();
delayed_unlock = 1;
iq->pgo_throttled = FALSE;
if (loop_count >= vm_page_inactive_count)
loop_count = 0;
inactive_burst_count = 0;
goto Restart;
}
flow_control.state = FCS_IDLE;
consider_inactive:
loop_count++;
inactive_burst_count++;
vm_pageout_inactive++;
while (1) {
m = NULL;
if (IP_VALID(memory_manager_default)) {
assert(vm_page_throttled_count == 0);
assert(queue_empty(&vm_page_queue_throttled));
}
if ( !queue_empty(&sq->age_q) ) {
m = (vm_page_t) queue_first(&sq->age_q);
break;
}
if ( ((zf_run_count < zf_ratio) && vm_zf_queue_count >= zf_queue_min_count) ||
queue_empty(&vm_page_queue_inactive)) {
if ( !queue_empty(&vm_page_queue_zf) ) {
m = (vm_page_t) queue_first(&vm_page_queue_zf);
zf_run_count++;
break;
}
}
if ( !queue_empty(&vm_page_queue_inactive) ) {
m = (vm_page_t) queue_first(&vm_page_queue_inactive);
zf_run_count = 0;
break;
}
panic("vm_pageout: no victim");
}
assert(!m->active && (m->inactive || m->speculative || m->throttled));
assert(!m->laundry);
assert(m->object != kernel_object);
assert(m->phys_page != vm_page_guard_addr);
if (!m->speculative) {
vm_pageout_stats[vm_pageout_stat_now].considered++;
}
DTRACE_VM2(scan, int, 1, (uint64_t *), NULL);
if (m->object != object) {
if (object != NULL) {
vm_object_unlock(object);
object = NULL;
vm_pageout_scan_wants_object = VM_OBJECT_NULL;
}
if (!vm_object_lock_try_scan(m->object)) {
vm_pageout_inactive_nolock++;
requeue_page:
if (m->zero_fill) {
if (m->speculative) {
panic("vm_pageout_scan(): page %p speculative and zero-fill !?\n", m);
}
assert(!m->speculative);
queue_remove(&vm_page_queue_zf, m,
vm_page_t, pageq);
queue_enter(&vm_page_queue_zf, m,
vm_page_t, pageq);
} else if (m->speculative) {
remque(&m->pageq);
m->speculative = FALSE;
vm_page_speculative_count--;
queue_enter_first(&vm_page_queue_inactive, m,
vm_page_t, pageq);
m->inactive = TRUE;
vm_page_inactive_count++;
token_new_pagecount++;
} else if (m->throttled) {
queue_remove(&vm_page_queue_throttled, m,
vm_page_t, pageq);
m->throttled = FALSE;
vm_page_throttled_count--;
queue_enter(&vm_page_queue_inactive, m,
vm_page_t, pageq);
m->inactive = TRUE;
vm_page_inactive_count++;
token_new_pagecount++;
} else {
queue_remove(&vm_page_queue_inactive, m,
vm_page_t, pageq);
#if MACH_ASSERT
vm_page_inactive_count--;
#endif
vm_purgeable_q_advance_all();
queue_enter(&vm_page_queue_inactive, m,
vm_page_t, pageq);
#if MACH_ASSERT
vm_page_inactive_count++;
#endif
token_new_pagecount++;
}
pmap_clear_reference(m->phys_page);
m->reference = FALSE;
if ( !queue_empty(&sq->age_q) )
m = (vm_page_t) queue_first(&sq->age_q);
else if ( ((zf_run_count < zf_ratio) && vm_zf_queue_count >= zf_queue_min_count) ||
queue_empty(&vm_page_queue_inactive)) {
if ( !queue_empty(&vm_page_queue_zf) )
m = (vm_page_t) queue_first(&vm_page_queue_zf);
} else if ( !queue_empty(&vm_page_queue_inactive) ) {
m = (vm_page_t) queue_first(&vm_page_queue_inactive);
}
vm_pageout_scan_wants_object = m->object;
try_failed = TRUE;
goto done_with_inactivepage;
}
object = m->object;
vm_pageout_scan_wants_object = VM_OBJECT_NULL;
try_failed = FALSE;
}
if (!object->pager_initialized && object->pager_created) {
vm_pageout_inactive_avoid++;
goto requeue_page;
}
if (m->speculative) {
remque(&m->pageq);
page_prev_state = PAGE_STATE_SPECULATIVE;
m->speculative = FALSE;
vm_page_speculative_count--;
} else if (m->throttled) {
queue_remove(&vm_page_queue_throttled, m, vm_page_t, pageq);
page_prev_state = PAGE_STATE_THROTTLED;
m->throttled = FALSE;
vm_page_throttled_count--;
} else {
if (m->zero_fill) {
queue_remove(&vm_page_queue_zf, m, vm_page_t, pageq);
page_prev_state = PAGE_STATE_ZEROFILL;
vm_zf_queue_count--;
} else {
page_prev_state = PAGE_STATE_INACTIVE;
queue_remove(&vm_page_queue_inactive, m, vm_page_t, pageq);
}
m->inactive = FALSE;
if (!m->fictitious)
vm_page_inactive_count--;
vm_purgeable_q_advance_all();
}
m->pageq.next = NULL;
m->pageq.prev = NULL;
if ( !m->fictitious && catch_up_count)
catch_up_count--;
if ( !m->encrypted_cleaning && (m->busy || !object->alive)) {
vm_pageout_inactive_busy++;
goto done_with_inactivepage;
}
if (m->absent || m->error) {
vm_pageout_inactive_absent++;
reclaim_page:
if (vm_pageout_deadlock_target) {
vm_pageout_scan_inactive_throttle_success++;
vm_pageout_deadlock_target--;
}
DTRACE_VM2(dfree, int, 1, (uint64_t *), NULL);
if (object->internal) {
DTRACE_VM2(anonfree, int, 1, (uint64_t *), NULL);
} else {
DTRACE_VM2(fsfree, int, 1, (uint64_t *), NULL);
}
vm_page_free_prepare_queues(m);
if (m->tabled)
vm_page_remove(m, TRUE);
assert(m->pageq.next == NULL &&
m->pageq.prev == NULL);
m->pageq.next = (queue_entry_t)local_freeq;
local_freeq = m;
local_freed++;
inactive_burst_count = 0;
if(page_prev_state != PAGE_STATE_SPECULATIVE) {
vm_pageout_stats[vm_pageout_stat_now].reclaimed++;
page_prev_state = 0;
}
goto done_with_inactivepage;
}
assert(!m->private);
assert(!m->fictitious);
if (m->cleaning) {
m->busy = TRUE;
m->pageout = TRUE;
m->dump_cleaning = TRUE;
vm_page_wire(m);
CLUSTER_STAT(vm_pageout_cluster_conversions++);
inactive_burst_count = 0;
goto done_with_inactivepage;
}
if (object->copy == VM_OBJECT_NULL) {
if (object->purgable == VM_PURGABLE_EMPTY) {
m->busy = TRUE;
if (m->pmapped == TRUE) {
refmod_state = pmap_disconnect(m->phys_page);
if (refmod_state & VM_MEM_MODIFIED) {
m->dirty = TRUE;
}
}
if (m->dirty || m->precious) {
vm_page_purged_count++;
}
goto reclaim_page;
}
if (object->purgable == VM_PURGABLE_VOLATILE) {
assert(!VM_PAGE_WIRED(m));
goto reactivate_page;
}
}
refmod_state = -1;
if (m->reference == FALSE && m->pmapped == TRUE) {
refmod_state = pmap_get_refmod(m->phys_page);
if (refmod_state & VM_MEM_REFERENCED)
m->reference = TRUE;
if (refmod_state & VM_MEM_MODIFIED)
m->dirty = TRUE;
}
if (m->reference || m->dirty) {
VM_PAGEOUT_SCAN_HANDLE_REUSABLE_PAGE(m);
}
if (m->reference && !m->no_cache) {
if (++reactivated_this_call >= reactivate_limit) {
vm_pageout_reactivation_limit_exceeded++;
} else if (catch_up_count) {
vm_pageout_catch_ups++;
} else if (++inactive_reclaim_run >= VM_PAGEOUT_INACTIVE_FORCE_RECLAIM) {
vm_pageout_inactive_force_reclaim++;
} else {
uint32_t isinuse;
reactivate_page:
if ( !object->internal && object->pager != MEMORY_OBJECT_NULL &&
vnode_pager_get_isinuse(object->pager, &isinuse) == KERN_SUCCESS && !isinuse) {
vm_page_deactivate(m);
vm_pageout_inactive_deactivated++;
} else {
vm_page_activate(m);
VM_STAT_INCR(reactivations);
}
vm_pageout_inactive_used++;
inactive_burst_count = 0;
goto done_with_inactivepage;
}
if ((refmod_state == -1) && !m->dirty && m->pmapped) {
refmod_state = pmap_get_refmod(m->phys_page);
if (refmod_state & VM_MEM_MODIFIED)
m->dirty = TRUE;
}
forced_reclaim = TRUE;
} else {
forced_reclaim = FALSE;
}
XPR(XPR_VM_PAGEOUT,
"vm_pageout_scan, replace object 0x%X offset 0x%X page 0x%X\n",
object, m->offset, m, 0,0);
inactive_throttled = FALSE;
if (m->dirty || m->precious) {
if (object->internal) {
if (VM_PAGE_Q_THROTTLED(iq))
inactive_throttled = TRUE;
} else if (VM_PAGE_Q_THROTTLED(eq)) {
inactive_throttled = TRUE;
}
}
if (inactive_throttled == TRUE) {
throttle_inactive:
if (!IP_VALID(memory_manager_default) &&
object->internal &&
(object->purgable == VM_PURGABLE_DENY ||
object->purgable == VM_PURGABLE_NONVOLATILE ||
object->purgable == VM_PURGABLE_VOLATILE )) {
queue_enter(&vm_page_queue_throttled, m,
vm_page_t, pageq);
m->throttled = TRUE;
vm_page_throttled_count++;
} else {
if (m->zero_fill) {
queue_enter(&vm_page_queue_zf, m,
vm_page_t, pageq);
vm_zf_queue_count++;
} else
queue_enter(&vm_page_queue_inactive, m,
vm_page_t, pageq);
m->inactive = TRUE;
if (!m->fictitious) {
vm_page_inactive_count++;
token_new_pagecount++;
}
}
vm_pageout_scan_inactive_throttled++;
goto done_with_inactivepage;
}
m->busy = TRUE;
if (m->pmapped == TRUE) {
refmod_state = pmap_disconnect(m->phys_page);
if (refmod_state & VM_MEM_MODIFIED)
m->dirty = TRUE;
if (refmod_state & VM_MEM_REFERENCED) {
if ( ! m->reference ) {
m->reference = TRUE;
if (forced_reclaim ||
++reactivated_this_call >= reactivate_limit)
vm_pageout_reactivation_limit_exceeded++;
else {
PAGE_WAKEUP_DONE(m);
goto reactivate_page;
}
}
}
}
inactive_reclaim_run = 0;
if (!m->dirty && !m->precious) {
if (m->zero_fill)
vm_pageout_inactive_zf++;
vm_pageout_inactive_clean++;
goto reclaim_page;
}
{
boolean_t disconnect_throttled = FALSE;
if (object->internal) {
if (VM_PAGE_Q_THROTTLED(iq))
disconnect_throttled = TRUE;
} else if (VM_PAGE_Q_THROTTLED(eq)) {
disconnect_throttled = TRUE;
}
if (disconnect_throttled == TRUE) {
PAGE_WAKEUP_DONE(m);
goto throttle_inactive;
}
}
vm_pageout_stats[vm_pageout_stat_now].reclaimed++;
vm_pageout_cluster(m);
if (m->zero_fill)
vm_pageout_inactive_zf++;
vm_pageout_inactive_dirty++;
inactive_burst_count = 0;
done_with_inactivepage:
if (delayed_unlock++ > VM_PAGEOUT_DELAYED_UNLOCK_LIMIT || try_failed == TRUE) {
if (object != NULL) {
vm_pageout_scan_wants_object = VM_OBJECT_NULL;
vm_object_unlock(object);
object = NULL;
}
if (local_freeq) {
vm_page_unlock_queues();
vm_page_free_list(local_freeq, TRUE);
local_freeq = NULL;
local_freed = 0;
vm_page_lock_queues();
} else
lck_mtx_yield(&vm_page_queue_lock);
delayed_unlock = 1;
}
}
}
int vm_page_free_count_init;
void
vm_page_free_reserve(
int pages)
{
int free_after_reserve;
vm_page_free_reserved += pages;
free_after_reserve = vm_page_free_count_init - vm_page_free_reserved;
vm_page_free_min = vm_page_free_reserved +
VM_PAGE_FREE_MIN(free_after_reserve);
if (vm_page_free_min > VM_PAGE_FREE_MIN_LIMIT)
vm_page_free_min = VM_PAGE_FREE_MIN_LIMIT;
vm_page_free_target = vm_page_free_reserved +
VM_PAGE_FREE_TARGET(free_after_reserve);
if (vm_page_free_target > VM_PAGE_FREE_TARGET_LIMIT)
vm_page_free_target = VM_PAGE_FREE_TARGET_LIMIT;
if (vm_page_free_target < vm_page_free_min + 5)
vm_page_free_target = vm_page_free_min + 5;
vm_page_throttle_limit = vm_page_free_target - (vm_page_free_target / 3);
vm_page_creation_throttle = vm_page_free_target / 2;
}
void
vm_pageout_continue(void)
{
DTRACE_VM2(pgrrun, int, 1, (uint64_t *), NULL);
vm_pageout_scan_event_counter++;
vm_pageout_scan();
assert(vm_page_free_wanted == 0);
assert(vm_page_free_wanted_privileged == 0);
assert_wait((event_t) &vm_page_free_wanted, THREAD_UNINT);
lck_mtx_unlock(&vm_page_queue_free_lock);
counter(c_vm_pageout_block++);
thread_block((thread_continue_t)vm_pageout_continue);
}
#ifdef FAKE_DEADLOCK
#define FAKE_COUNT 5000
int internal_count = 0;
int fake_deadlock = 0;
#endif
static void
vm_pageout_iothread_continue(struct vm_pageout_queue *q)
{
vm_page_t m = NULL;
vm_object_t object;
boolean_t need_wakeup;
memory_object_t pager;
thread_t self = current_thread();
if ((vm_pageout_internal_iothread != THREAD_NULL)
&& (self == vm_pageout_external_iothread )
&& (self->options & TH_OPT_VMPRIV))
self->options &= ~TH_OPT_VMPRIV;
vm_page_lockspin_queues();
while ( !queue_empty(&q->pgo_pending) ) {
q->pgo_busy = TRUE;
queue_remove_first(&q->pgo_pending, m, vm_page_t, pageq);
VM_PAGE_CHECK(m);
m->pageout_queue = FALSE;
m->pageq.next = NULL;
m->pageq.prev = NULL;
vm_page_unlock_queues();
#ifdef FAKE_DEADLOCK
if (q == &vm_pageout_queue_internal) {
vm_offset_t addr;
int pg_count;
internal_count++;
if ((internal_count == FAKE_COUNT)) {
pg_count = vm_page_free_count + vm_page_free_reserved;
if (kmem_alloc(kernel_map, &addr, PAGE_SIZE * pg_count) == KERN_SUCCESS) {
kmem_free(kernel_map, addr, PAGE_SIZE * pg_count);
}
internal_count = 0;
fake_deadlock++;
}
}
#endif
object = m->object;
vm_object_lock(object);
if (!object->pager_initialized) {
if (!object->pager_initialized)
vm_object_collapse(object,
(vm_object_offset_t) 0,
TRUE);
if (!object->pager_initialized)
vm_object_pager_create(object);
if (!object->pager_initialized) {
vm_page_lockspin_queues();
vm_pageout_queue_steal(m, TRUE);
vm_pageout_dirty_no_pager++;
vm_page_activate(m);
vm_page_unlock_queues();
PAGE_WAKEUP_DONE(m);
vm_object_paging_end(object);
vm_object_unlock(object);
vm_page_lockspin_queues();
continue;
}
}
pager = object->pager;
if (pager == MEMORY_OBJECT_NULL) {
VM_PAGE_FREE(m);
vm_object_paging_end(object);
vm_object_unlock(object);
vm_page_lockspin_queues();
continue;
}
VM_PAGE_CHECK(m);
vm_object_unlock(object);
memory_object_data_return(pager,
m->offset + object->paging_offset,
PAGE_SIZE,
NULL,
NULL,
FALSE,
FALSE,
0);
vm_object_lock(object);
vm_object_paging_end(object);
vm_object_unlock(object);
vm_page_lockspin_queues();
}
assert_wait((event_t) q, THREAD_UNINT);
if (q->pgo_throttled == TRUE && !VM_PAGE_Q_THROTTLED(q)) {
q->pgo_throttled = FALSE;
need_wakeup = TRUE;
} else
need_wakeup = FALSE;
q->pgo_busy = FALSE;
q->pgo_idle = TRUE;
vm_page_unlock_queues();
if (need_wakeup == TRUE)
thread_wakeup((event_t) &q->pgo_laundry);
thread_block_parameter((thread_continue_t)vm_pageout_iothread_continue, (void *) &q->pgo_pending);
}
static void
vm_pageout_iothread_external(void)
{
thread_t self = current_thread();
self->options |= TH_OPT_VMPRIV;
vm_pageout_iothread_continue(&vm_pageout_queue_external);
}
static void
vm_pageout_iothread_internal(void)
{
thread_t self = current_thread();
self->options |= TH_OPT_VMPRIV;
vm_pageout_iothread_continue(&vm_pageout_queue_internal);
}
kern_return_t
vm_set_buffer_cleanup_callout(boolean_t (*func)(void))
{
if (OSCompareAndSwapPtr(NULL, func, (void * volatile *) &consider_buffer_cache_collect)) {
return KERN_SUCCESS;
} else {
return KERN_FAILURE;
}
}
static void
vm_pageout_garbage_collect(int collect)
{
if (collect) {
boolean_t buf_large_zfree = FALSE;
stack_collect();
consider_machine_collect();
if (consider_buffer_cache_collect != NULL) {
buf_large_zfree = (*consider_buffer_cache_collect)();
}
consider_zone_gc(buf_large_zfree);
consider_machine_adjust();
}
assert_wait((event_t) &vm_pageout_garbage_collect, THREAD_UNINT);
thread_block_parameter((thread_continue_t) vm_pageout_garbage_collect, (void *)1);
}
void
vm_pageout(void)
{
thread_t self = current_thread();
thread_t thread;
kern_return_t result;
spl_t s;
s = splsched();
thread_lock(self);
self->priority = BASEPRI_PREEMPT - 1;
set_sched_pri(self, self->priority);
thread_unlock(self);
if (!self->reserved_stack)
self->reserved_stack = self->kernel_stack;
splx(s);
if (vm_pageout_idle_wait == 0)
vm_pageout_idle_wait = VM_PAGEOUT_IDLE_WAIT;
if (vm_pageout_burst_wait == 0)
vm_pageout_burst_wait = VM_PAGEOUT_BURST_WAIT;
if (vm_pageout_empty_wait == 0)
vm_pageout_empty_wait = VM_PAGEOUT_EMPTY_WAIT;
if (vm_pageout_deadlock_wait == 0)
vm_pageout_deadlock_wait = VM_PAGEOUT_DEADLOCK_WAIT;
if (vm_pageout_deadlock_relief == 0)
vm_pageout_deadlock_relief = VM_PAGEOUT_DEADLOCK_RELIEF;
if (vm_pageout_inactive_relief == 0)
vm_pageout_inactive_relief = VM_PAGEOUT_INACTIVE_RELIEF;
if (vm_pageout_burst_active_throttle == 0)
vm_pageout_burst_active_throttle = VM_PAGEOUT_BURST_ACTIVE_THROTTLE;
if (vm_pageout_burst_inactive_throttle == 0)
vm_pageout_burst_inactive_throttle = VM_PAGEOUT_BURST_INACTIVE_THROTTLE;
task_lock(kernel_task);
kernel_task->priv_flags |= VM_BACKING_STORE_PRIV;
task_unlock(kernel_task);
vm_page_free_count_init = vm_page_free_count;
if (vm_page_free_reserved < VM_PAGE_FREE_RESERVED(processor_count)) {
vm_page_free_reserve((VM_PAGE_FREE_RESERVED(processor_count)) - vm_page_free_reserved);
} else
vm_page_free_reserve(0);
queue_init(&vm_pageout_queue_external.pgo_pending);
vm_pageout_queue_external.pgo_maxlaundry = VM_PAGE_LAUNDRY_MAX;
vm_pageout_queue_external.pgo_laundry = 0;
vm_pageout_queue_external.pgo_idle = FALSE;
vm_pageout_queue_external.pgo_busy = FALSE;
vm_pageout_queue_external.pgo_throttled = FALSE;
queue_init(&vm_pageout_queue_internal.pgo_pending);
vm_pageout_queue_internal.pgo_maxlaundry = 0;
vm_pageout_queue_internal.pgo_laundry = 0;
vm_pageout_queue_internal.pgo_idle = FALSE;
vm_pageout_queue_internal.pgo_busy = FALSE;
vm_pageout_queue_internal.pgo_throttled = FALSE;
result = kernel_thread_start_priority((thread_continue_t)vm_pageout_iothread_external, NULL,
BASEPRI_PREEMPT - 1,
&vm_pageout_external_iothread);
if (result != KERN_SUCCESS)
panic("vm_pageout_iothread_external: create failed");
thread_deallocate(vm_pageout_external_iothread);
result = kernel_thread_start_priority((thread_continue_t)vm_pageout_garbage_collect, NULL,
MINPRI_KERNEL,
&thread);
if (result != KERN_SUCCESS)
panic("vm_pageout_garbage_collect: create failed");
thread_deallocate(thread);
vm_object_reaper_init();
vm_pageout_continue();
DTRACE_VM2(execfree, int, 1, (uint64_t *), NULL);
DTRACE_VM2(execpgin, int, 1, (uint64_t *), NULL);
DTRACE_VM2(execpgout, int, 1, (uint64_t *), NULL);
DTRACE_VM2(pgswapin, int, 1, (uint64_t *), NULL);
DTRACE_VM2(pgswapout, int, 1, (uint64_t *), NULL);
DTRACE_VM2(swapin, int, 1, (uint64_t *), NULL);
DTRACE_VM2(swapout, int, 1, (uint64_t *), NULL);
}
kern_return_t
vm_pageout_internal_start(void)
{
kern_return_t result;
vm_pageout_queue_internal.pgo_maxlaundry = VM_PAGE_LAUNDRY_MAX;
result = kernel_thread_start_priority((thread_continue_t)vm_pageout_iothread_internal, NULL, BASEPRI_PREEMPT - 1, &vm_pageout_internal_iothread);
if (result == KERN_SUCCESS)
thread_deallocate(vm_pageout_internal_iothread);
return result;
}
#define DELAYED_WORK_LIMIT 32
#define DW_vm_page_unwire 0x01
#define DW_vm_page_wire 0x02
#define DW_vm_page_free 0x04
#define DW_vm_page_activate 0x08
#define DW_vm_page_deactivate_internal 0x10
#define DW_vm_page_speculate 0x20
#define DW_vm_page_lru 0x40
#define DW_vm_pageout_throttle_up 0x80
#define DW_PAGE_WAKEUP 0x100
#define DW_clear_busy 0x200
#define DW_clear_reference 0x400
#define DW_set_reference 0x800
struct dw {
vm_page_t dw_m;
int dw_mask;
};
static void dw_do_work(vm_object_t object, struct dw *dwp, int dw_count);
static upl_t
upl_create(int type, int flags, upl_size_t size)
{
upl_t upl;
int page_field_size = 0;
int upl_flags = 0;
int upl_size = sizeof(struct upl);
size = round_page_32(size);
if (type & UPL_CREATE_LITE) {
page_field_size = (atop(size) + 7) >> 3;
page_field_size = (page_field_size + 3) & 0xFFFFFFFC;
upl_flags |= UPL_LITE;
}
if (type & UPL_CREATE_INTERNAL) {
upl_size += (int) sizeof(struct upl_page_info) * atop(size);
upl_flags |= UPL_INTERNAL;
}
upl = (upl_t)kalloc(upl_size + page_field_size);
if (page_field_size)
bzero((char *)upl + upl_size, page_field_size);
upl->flags = upl_flags | flags;
upl->src_object = NULL;
upl->kaddr = (vm_offset_t)0;
upl->size = 0;
upl->map_object = NULL;
upl->ref_count = 1;
upl->highest_page = 0;
upl_lock_init(upl);
upl->vector_upl = NULL;
#if UPL_DEBUG
upl->ubc_alias1 = 0;
upl->ubc_alias2 = 0;
upl->upl_creator = current_thread();
upl->upl_state = 0;
upl->upl_commit_index = 0;
bzero(&upl->upl_commit_records[0], sizeof(upl->upl_commit_records));
(void) OSBacktrace(&upl->upl_create_retaddr[0], UPL_DEBUG_STACK_FRAMES);
#endif
return(upl);
}
static void
upl_destroy(upl_t upl)
{
int page_field_size;
int size;
#if UPL_DEBUG
{
vm_object_t object;
if (upl->flags & UPL_SHADOWED) {
object = upl->map_object->shadow;
} else {
object = upl->map_object;
}
vm_object_lock(object);
queue_remove(&object->uplq, upl, upl_t, uplq);
vm_object_unlock(object);
}
#endif
if (upl->flags & UPL_SHADOWED)
vm_object_deallocate(upl->map_object);
if (upl->flags & UPL_DEVICE_MEMORY)
size = PAGE_SIZE;
else
size = upl->size;
page_field_size = 0;
if (upl->flags & UPL_LITE) {
page_field_size = ((size/PAGE_SIZE) + 7) >> 3;
page_field_size = (page_field_size + 3) & 0xFFFFFFFC;
}
upl_lock_destroy(upl);
upl->vector_upl = (vector_upl_t) 0xfeedbeef;
if (upl->flags & UPL_INTERNAL) {
kfree(upl,
sizeof(struct upl) +
(sizeof(struct upl_page_info) * (size/PAGE_SIZE))
+ page_field_size);
} else {
kfree(upl, sizeof(struct upl) + page_field_size);
}
}
void uc_upl_dealloc(upl_t upl);
__private_extern__ void
uc_upl_dealloc(upl_t upl)
{
if (--upl->ref_count == 0)
upl_destroy(upl);
}
void
upl_deallocate(upl_t upl)
{
if (--upl->ref_count == 0) {
if(vector_upl_is_valid(upl))
vector_upl_deallocate(upl);
upl_destroy(upl);
}
}
#if DEVELOPMENT || DEBUG
unsigned long upl_cow = 0;
unsigned long upl_cow_again = 0;
unsigned long upl_cow_pages = 0;
unsigned long upl_cow_again_pages = 0;
unsigned long iopl_cow = 0;
unsigned long iopl_cow_pages = 0;
#endif
__private_extern__ kern_return_t
vm_object_upl_request(
vm_object_t object,
vm_object_offset_t offset,
upl_size_t size,
upl_t *upl_ptr,
upl_page_info_array_t user_page_list,
unsigned int *page_list_count,
int cntrl_flags)
{
vm_page_t dst_page = VM_PAGE_NULL;
vm_object_offset_t dst_offset;
upl_size_t xfer_size;
boolean_t dirty;
boolean_t hw_dirty;
upl_t upl = NULL;
unsigned int entry;
#if MACH_CLUSTER_STATS
boolean_t encountered_lrp = FALSE;
#endif
vm_page_t alias_page = NULL;
int refmod_state = 0;
wpl_array_t lite_list = NULL;
vm_object_t last_copy_object;
struct dw dw_array[DELAYED_WORK_LIMIT];
struct dw *dwp;
int dw_count;
if (cntrl_flags & ~UPL_VALID_FLAGS) {
return KERN_INVALID_VALUE;
}
if ( (!object->internal) && (object->paging_offset != 0) )
panic("vm_object_upl_request: external object with non-zero paging offset\n");
if (object->phys_contiguous)
panic("vm_object_upl_request: contiguous object specified\n");
if ((size / PAGE_SIZE) > MAX_UPL_SIZE)
size = MAX_UPL_SIZE * PAGE_SIZE;
if ( (cntrl_flags & UPL_SET_INTERNAL) && page_list_count != NULL)
*page_list_count = MAX_UPL_SIZE;
if (cntrl_flags & UPL_SET_INTERNAL) {
if (cntrl_flags & UPL_SET_LITE) {
upl = upl_create(UPL_CREATE_INTERNAL | UPL_CREATE_LITE, 0, size);
user_page_list = (upl_page_info_t *) (((uintptr_t)upl) + sizeof(struct upl));
lite_list = (wpl_array_t)
(((uintptr_t)user_page_list) +
((size/PAGE_SIZE) * sizeof(upl_page_info_t)));
if (size == 0) {
user_page_list = NULL;
lite_list = NULL;
}
} else {
upl = upl_create(UPL_CREATE_INTERNAL, 0, size);
user_page_list = (upl_page_info_t *) (((uintptr_t)upl) + sizeof(struct upl));
if (size == 0) {
user_page_list = NULL;
}
}
} else {
if (cntrl_flags & UPL_SET_LITE) {
upl = upl_create(UPL_CREATE_EXTERNAL | UPL_CREATE_LITE, 0, size);
lite_list = (wpl_array_t) (((uintptr_t)upl) + sizeof(struct upl));
if (size == 0) {
lite_list = NULL;
}
} else {
upl = upl_create(UPL_CREATE_EXTERNAL, 0, size);
}
}
*upl_ptr = upl;
if (user_page_list)
user_page_list[0].device = FALSE;
if (cntrl_flags & UPL_SET_LITE) {
upl->map_object = object;
} else {
upl->map_object = vm_object_allocate(size);
upl->map_object->shadow = object;
upl->map_object->pageout = TRUE;
upl->map_object->can_persist = FALSE;
upl->map_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
upl->map_object->shadow_offset = offset;
upl->map_object->wimg_bits = object->wimg_bits;
VM_PAGE_GRAB_FICTITIOUS(alias_page);
upl->flags |= UPL_SHADOWED;
}
if (cntrl_flags & UPL_ENCRYPT)
upl->flags |= UPL_ENCRYPTED;
if (cntrl_flags & UPL_FOR_PAGEOUT)
upl->flags |= UPL_PAGEOUT;
vm_object_lock(object);
vm_object_activity_begin(object);
upl->size = size;
upl->offset = offset + object->paging_offset;
#if UPL_DEBUG
queue_enter(&object->uplq, upl, upl_t, uplq);
#endif
if ((cntrl_flags & UPL_WILL_MODIFY) && object->copy != VM_OBJECT_NULL) {
vm_object_update(object,
offset,
size,
NULL,
NULL,
FALSE,
MEMORY_OBJECT_COPY_SYNC,
VM_PROT_NO_CHANGE);
#if DEVELOPMENT || DEBUG
upl_cow++;
upl_cow_pages += size >> PAGE_SHIFT;
#endif
}
last_copy_object = object->copy;
entry = 0;
xfer_size = size;
dst_offset = offset;
dwp = &dw_array[0];
dw_count = 0;
while (xfer_size) {
dwp->dw_mask = 0;
if ((alias_page == NULL) && !(cntrl_flags & UPL_SET_LITE)) {
vm_object_unlock(object);
VM_PAGE_GRAB_FICTITIOUS(alias_page);
vm_object_lock(object);
}
if (cntrl_flags & UPL_COPYOUT_FROM) {
upl->flags |= UPL_PAGE_SYNC_DONE;
if ( ((dst_page = vm_page_lookup(object, dst_offset)) == VM_PAGE_NULL) ||
dst_page->fictitious ||
dst_page->absent ||
dst_page->error ||
(VM_PAGE_WIRED(dst_page) && !dst_page->pageout && !dst_page->list_req_pending)) {
if (user_page_list)
user_page_list[entry].phys_addr = 0;
goto try_next_page;
}
if (dst_page->pmapped)
refmod_state = pmap_get_refmod(dst_page->phys_page);
else
refmod_state = 0;
if ( (refmod_state & VM_MEM_REFERENCED) && dst_page->inactive ) {
dwp->dw_mask |= DW_vm_page_activate;
}
if (cntrl_flags & UPL_RET_ONLY_DIRTY) {
if (dst_page->list_req_pending || !(cntrl_flags & UPL_FOR_PAGEOUT)) {
if (dst_page->dirty || dst_page->precious || (refmod_state & VM_MEM_MODIFIED))
goto check_busy;
goto dont_return;
}
if ( !(refmod_state & VM_MEM_REFERENCED) &&
((refmod_state & VM_MEM_MODIFIED) || dst_page->dirty || dst_page->precious) ) {
goto check_busy;
}
dont_return:
if (user_page_list)
user_page_list[entry].phys_addr = 0;
goto try_next_page;
}
check_busy:
if (dst_page->busy && (!(dst_page->list_req_pending && dst_page->pageout))) {
if (cntrl_flags & UPL_NOBLOCK) {
if (user_page_list)
user_page_list[entry].phys_addr = 0;
goto try_next_page;
}
PAGE_SLEEP(object, dst_page, THREAD_UNINT);
continue;
}
if ((dst_page->cleaning || dst_page->absent || VM_PAGE_WIRED(dst_page)) && !dst_page->list_req_pending) {
if (user_page_list)
user_page_list[entry].phys_addr = 0;
goto try_next_page;
}
if (! (cntrl_flags & UPL_ENCRYPT) && dst_page->encrypted) {
int was_busy;
was_busy = dst_page->busy;
dst_page->busy = TRUE;
vm_page_decrypt(dst_page, 0);
vm_page_decrypt_for_upl_counter++;
dst_page->busy = was_busy;
}
if (dst_page->pageout_queue == TRUE) {
vm_page_lockspin_queues();
if (dst_page->pageout_queue == TRUE) {
vm_pageout_throttle_up(dst_page);
}
vm_page_unlock_queues();
}
#if MACH_CLUSTER_STATS
if (dst_page->list_req_pending)
encountered_lrp = TRUE;
if ((dst_page->dirty || (dst_page->object->internal && dst_page->precious)) && !dst_page->list_req_pending) {
if (encountered_lrp)
CLUSTER_STAT(pages_at_higher_offsets++;)
else
CLUSTER_STAT(pages_at_lower_offsets++;)
}
#endif
dst_page->list_req_pending = FALSE;
dst_page->busy = FALSE;
hw_dirty = refmod_state & VM_MEM_MODIFIED;
dirty = hw_dirty ? TRUE : dst_page->dirty;
if (dst_page->phys_page > upl->highest_page)
upl->highest_page = dst_page->phys_page;
if (cntrl_flags & UPL_SET_LITE) {
unsigned int pg_num;
pg_num = (unsigned int) ((dst_offset-offset)/PAGE_SIZE);
assert(pg_num == (dst_offset-offset)/PAGE_SIZE);
lite_list[pg_num>>5] |= 1 << (pg_num & 31);
if (hw_dirty)
pmap_clear_modify(dst_page->phys_page);
dst_page->cleaning = TRUE;
dst_page->precious = FALSE;
} else {
vm_object_lock(upl->map_object);
vm_pageclean_setup(dst_page, alias_page, upl->map_object, size - xfer_size);
vm_object_unlock(upl->map_object);
alias_page->absent = FALSE;
alias_page = NULL;
}
#if MACH_PAGEMAP
vm_external_state_set(object->existence_map, dst_page->offset);
#endif
dst_page->dirty = dirty;
if (!dirty)
dst_page->precious = TRUE;
if (dst_page->pageout)
dst_page->busy = TRUE;
if ( (cntrl_flags & UPL_ENCRYPT) ) {
dst_page->busy = TRUE;
dst_page->encrypted_cleaning = TRUE;
}
if ( !(cntrl_flags & UPL_CLEAN_IN_PLACE) ) {
if ((!dst_page->pageout) && ( !VM_PAGE_WIRED(dst_page))) {
dst_page->busy = TRUE;
dst_page->pageout = TRUE;
dwp->dw_mask |= DW_vm_page_wire;
}
}
} else {
if ((cntrl_flags & UPL_WILL_MODIFY) && object->copy != last_copy_object) {
if (object->copy != VM_OBJECT_NULL) {
vm_object_update(
object,
dst_offset,
xfer_size,
NULL,
NULL,
FALSE,
MEMORY_OBJECT_COPY_SYNC,
VM_PROT_NO_CHANGE);
#if DEVELOPMENT || DEBUG
upl_cow_again++;
upl_cow_again_pages += xfer_size >> PAGE_SHIFT;
#endif
}
last_copy_object = object->copy;
}
dst_page = vm_page_lookup(object, dst_offset);
if (dst_page != VM_PAGE_NULL) {
if ((cntrl_flags & UPL_RET_ONLY_ABSENT)) {
if ( !(dst_page->absent && dst_page->list_req_pending) ) {
if (user_page_list)
user_page_list[entry].phys_addr = 0;
goto try_next_page;
}
}
if ( !(dst_page->list_req_pending) ) {
if (dst_page->cleaning) {
PAGE_SLEEP(object, dst_page, THREAD_UNINT);
continue;
}
} else {
if (dst_page->fictitious &&
dst_page->phys_page == vm_page_fictitious_addr) {
assert( !dst_page->speculative);
dst_page->list_req_pending = FALSE;
VM_PAGE_FREE(dst_page);
dst_page = NULL;
} else if (dst_page->absent) {
dst_page->list_req_pending = FALSE;
dst_page->busy = FALSE;
} else if (dst_page->pageout) {
dst_page->busy = FALSE;
vm_pageout_queue_steal(dst_page, FALSE);
}
}
}
if (dst_page == VM_PAGE_NULL) {
if (object->private) {
if (user_page_list)
user_page_list[entry].phys_addr = 0;
goto try_next_page;
}
dst_page = vm_page_grab();
if (dst_page == VM_PAGE_NULL) {
if ( (cntrl_flags & (UPL_RET_ONLY_ABSENT | UPL_NOBLOCK)) == (UPL_RET_ONLY_ABSENT | UPL_NOBLOCK)) {
if (user_page_list)
user_page_list[entry].phys_addr = 0;
goto try_next_page;
}
vm_object_unlock(object);
VM_PAGE_WAIT();
vm_object_lock(object);
continue;
}
vm_page_insert(dst_page, object, dst_offset);
dst_page->absent = TRUE;
dst_page->busy = FALSE;
if (cntrl_flags & UPL_RET_ONLY_ABSENT) {
dst_page->clustered = TRUE;
}
}
if (dst_page->fictitious) {
panic("need corner case for fictitious page");
}
if (dst_page->busy) {
PAGE_SLEEP(object, dst_page, THREAD_UNINT);
continue;
}
if (cntrl_flags & UPL_ENCRYPT) {
dst_page->encrypted = TRUE;
} else {
dst_page->encrypted = FALSE;
}
dst_page->overwriting = TRUE;
if (dst_page->pmapped) {
if ( !(cntrl_flags & UPL_FILE_IO))
refmod_state = pmap_disconnect(dst_page->phys_page);
else
refmod_state = pmap_get_refmod(dst_page->phys_page);
} else
refmod_state = 0;
hw_dirty = refmod_state & VM_MEM_MODIFIED;
dirty = hw_dirty ? TRUE : dst_page->dirty;
if (cntrl_flags & UPL_SET_LITE) {
unsigned int pg_num;
pg_num = (unsigned int) ((dst_offset-offset)/PAGE_SIZE);
assert(pg_num == (dst_offset-offset)/PAGE_SIZE);
lite_list[pg_num>>5] |= 1 << (pg_num & 31);
if (hw_dirty)
pmap_clear_modify(dst_page->phys_page);
dst_page->cleaning = TRUE;
dst_page->precious = FALSE;
} else {
vm_object_lock(upl->map_object);
vm_pageclean_setup(dst_page, alias_page, upl->map_object, size - xfer_size);
vm_object_unlock(upl->map_object);
alias_page->absent = FALSE;
alias_page = NULL;
}
if (cntrl_flags & UPL_CLEAN_IN_PLACE) {
upl->flags |= UPL_CLEAR_DIRTY;
}
dst_page->dirty = dirty;
if (!dirty)
dst_page->precious = TRUE;
if ( !VM_PAGE_WIRED(dst_page)) {
dst_page->busy = TRUE;
} else
dwp->dw_mask |= DW_vm_page_wire;
dst_page->restart = FALSE;
if (!dst_page->absent && !(cntrl_flags & UPL_WILL_MODIFY)) {
dwp->dw_mask |= DW_set_reference;
}
dst_page->precious = (cntrl_flags & UPL_PRECIOUS) ? TRUE : FALSE;
}
if (dst_page->phys_page > upl->highest_page)
upl->highest_page = dst_page->phys_page;
if (user_page_list) {
user_page_list[entry].phys_addr = dst_page->phys_page;
user_page_list[entry].pageout = dst_page->pageout;
user_page_list[entry].absent = dst_page->absent;
user_page_list[entry].dirty = dst_page->dirty;
user_page_list[entry].precious = dst_page->precious;
user_page_list[entry].device = FALSE;
if (dst_page->clustered == TRUE)
user_page_list[entry].speculative = dst_page->speculative;
else
user_page_list[entry].speculative = FALSE;
user_page_list[entry].cs_validated = dst_page->cs_validated;
user_page_list[entry].cs_tainted = dst_page->cs_tainted;
}
if ( !(cntrl_flags & UPL_RET_ONLY_ABSENT)) {
VM_PAGE_CONSUME_CLUSTERED(dst_page);
}
try_next_page:
if (dwp->dw_mask) {
if (dwp->dw_mask & DW_vm_page_activate)
VM_STAT_INCR(reactivations);
if (dst_page->busy == FALSE) {
dst_page->busy = TRUE;
dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
}
dwp->dw_m = dst_page;
dwp++;
dw_count++;
if (dw_count >= DELAYED_WORK_LIMIT) {
dw_do_work(object, &dw_array[0], dw_count);
dwp = &dw_array[0];
dw_count = 0;
}
}
entry++;
dst_offset += PAGE_SIZE_64;
xfer_size -= PAGE_SIZE;
}
if (dw_count)
dw_do_work(object, &dw_array[0], dw_count);
if (alias_page != NULL) {
VM_PAGE_FREE(alias_page);
}
if (page_list_count != NULL) {
if (upl->flags & UPL_INTERNAL)
*page_list_count = 0;
else if (*page_list_count > entry)
*page_list_count = entry;
}
#if UPL_DEBUG
upl->upl_state = 1;
#endif
vm_object_unlock(object);
return KERN_SUCCESS;
}
kern_return_t
vm_fault_list_request(
memory_object_control_t control,
vm_object_offset_t offset,
upl_size_t size,
upl_t *upl_ptr,
upl_page_info_t **user_page_list_ptr,
unsigned int page_list_count,
int cntrl_flags);
kern_return_t
vm_fault_list_request(
memory_object_control_t control,
vm_object_offset_t offset,
upl_size_t size,
upl_t *upl_ptr,
upl_page_info_t **user_page_list_ptr,
unsigned int page_list_count,
int cntrl_flags)
{
unsigned int local_list_count;
upl_page_info_t *user_page_list;
kern_return_t kr;
if((cntrl_flags & UPL_VECTOR)==UPL_VECTOR)
return KERN_INVALID_ARGUMENT;
if (user_page_list_ptr != NULL) {
local_list_count = page_list_count;
user_page_list = *user_page_list_ptr;
} else {
local_list_count = 0;
user_page_list = NULL;
}
kr = memory_object_upl_request(control,
offset,
size,
upl_ptr,
user_page_list,
&local_list_count,
cntrl_flags);
if(kr != KERN_SUCCESS)
return kr;
if ((user_page_list_ptr != NULL) && (cntrl_flags & UPL_INTERNAL)) {
*user_page_list_ptr = UPL_GET_INTERNAL_PAGE_LIST(*upl_ptr);
}
return KERN_SUCCESS;
}
__private_extern__ kern_return_t
vm_object_super_upl_request(
vm_object_t object,
vm_object_offset_t offset,
upl_size_t size,
upl_size_t super_cluster,
upl_t *upl,
upl_page_info_t *user_page_list,
unsigned int *page_list_count,
int cntrl_flags)
{
if (object->paging_offset > offset || ((cntrl_flags & UPL_VECTOR)==UPL_VECTOR))
return KERN_FAILURE;
assert(object->paging_in_progress);
offset = offset - object->paging_offset;
if (super_cluster > size) {
vm_object_offset_t base_offset;
upl_size_t super_size;
vm_object_size_t super_size_64;
base_offset = (offset & ~((vm_object_offset_t) super_cluster - 1));
super_size = (offset + size) > (base_offset + super_cluster) ? super_cluster<<1 : super_cluster;
super_size_64 = ((base_offset + super_size) > object->size) ? (object->size - base_offset) : super_size;
super_size = (upl_size_t) super_size_64;
assert(super_size == super_size_64);
if (offset > (base_offset + super_size)) {
panic("vm_object_super_upl_request: Missed target pageout"
" %#llx,%#llx, %#x, %#x, %#x, %#llx\n",
offset, base_offset, super_size, super_cluster,
size, object->paging_offset);
}
if ((offset + size) > (base_offset + super_size)) {
super_size_64 = (offset + size) - base_offset;
super_size = (upl_size_t) super_size_64;
assert(super_size == super_size_64);
}
offset = base_offset;
size = super_size;
}
return vm_object_upl_request(object, offset, size, upl, user_page_list, page_list_count, cntrl_flags);
}
kern_return_t
vm_map_create_upl(
vm_map_t map,
vm_map_address_t offset,
upl_size_t *upl_size,
upl_t *upl,
upl_page_info_array_t page_list,
unsigned int *count,
int *flags)
{
vm_map_entry_t entry;
int caller_flags;
int force_data_sync;
int sync_cow_data;
vm_object_t local_object;
vm_map_offset_t local_offset;
vm_map_offset_t local_start;
kern_return_t ret;
caller_flags = *flags;
if (caller_flags & ~UPL_VALID_FLAGS) {
return KERN_INVALID_VALUE;
}
force_data_sync = (caller_flags & UPL_FORCE_DATA_SYNC);
sync_cow_data = !(caller_flags & UPL_COPYOUT_FROM);
if (upl == NULL)
return KERN_INVALID_ARGUMENT;
REDISCOVER_ENTRY:
vm_map_lock_read(map);
if (vm_map_lookup_entry(map, offset, &entry)) {
if ((entry->vme_end - offset) < *upl_size) {
*upl_size = (upl_size_t) (entry->vme_end - offset);
assert(*upl_size == entry->vme_end - offset);
}
if (caller_flags & UPL_QUERY_OBJECT_TYPE) {
*flags = 0;
if ( !entry->is_sub_map && entry->object.vm_object != VM_OBJECT_NULL) {
if (entry->object.vm_object->private)
*flags = UPL_DEV_MEMORY;
if (entry->object.vm_object->phys_contiguous)
*flags |= UPL_PHYS_CONTIG;
}
vm_map_unlock_read(map);
return KERN_SUCCESS;
}
if (entry->object.vm_object == VM_OBJECT_NULL || !entry->object.vm_object->phys_contiguous) {
if ((*upl_size/PAGE_SIZE) > MAX_UPL_SIZE)
*upl_size = MAX_UPL_SIZE * PAGE_SIZE;
}
if (entry->object.vm_object == VM_OBJECT_NULL) {
if (vm_map_lock_read_to_write(map))
goto REDISCOVER_ENTRY;
entry->object.vm_object = vm_object_allocate((vm_size_t)(entry->vme_end - entry->vme_start));
entry->offset = 0;
vm_map_lock_write_to_read(map);
}
if (!(caller_flags & UPL_COPYOUT_FROM)) {
if (!(entry->protection & VM_PROT_WRITE)) {
vm_map_unlock_read(map);
return KERN_PROTECTION_FAILURE;
}
if (entry->needs_copy) {
vm_map_t local_map;
vm_object_t object;
vm_object_offset_t new_offset;
vm_prot_t prot;
boolean_t wired;
vm_map_version_t version;
vm_map_t real_map;
local_map = map;
if (vm_map_lookup_locked(&local_map,
offset, VM_PROT_WRITE,
OBJECT_LOCK_EXCLUSIVE,
&version, &object,
&new_offset, &prot, &wired,
NULL,
&real_map) != KERN_SUCCESS) {
vm_map_unlock_read(local_map);
return KERN_FAILURE;
}
if (real_map != map)
vm_map_unlock(real_map);
vm_map_unlock_read(local_map);
vm_object_unlock(object);
goto REDISCOVER_ENTRY;
}
}
if (entry->is_sub_map) {
vm_map_t submap;
submap = entry->object.sub_map;
local_start = entry->vme_start;
local_offset = entry->offset;
vm_map_reference(submap);
vm_map_unlock_read(map);
ret = vm_map_create_upl(submap,
local_offset + (offset - local_start),
upl_size, upl, page_list, count, flags);
vm_map_deallocate(submap);
return ret;
}
if (sync_cow_data) {
if (entry->object.vm_object->shadow || entry->object.vm_object->copy) {
local_object = entry->object.vm_object;
local_start = entry->vme_start;
local_offset = entry->offset;
vm_object_reference(local_object);
vm_map_unlock_read(map);
if (local_object->shadow && local_object->copy) {
vm_object_lock_request(
local_object->shadow,
(vm_object_offset_t)
((offset - local_start) +
local_offset) +
local_object->shadow_offset,
*upl_size, FALSE,
MEMORY_OBJECT_DATA_SYNC,
VM_PROT_NO_CHANGE);
}
sync_cow_data = FALSE;
vm_object_deallocate(local_object);
goto REDISCOVER_ENTRY;
}
}
if (force_data_sync) {
local_object = entry->object.vm_object;
local_start = entry->vme_start;
local_offset = entry->offset;
vm_object_reference(local_object);
vm_map_unlock_read(map);
vm_object_lock_request(
local_object,
(vm_object_offset_t)
((offset - local_start) + local_offset),
(vm_object_size_t)*upl_size, FALSE,
MEMORY_OBJECT_DATA_SYNC,
VM_PROT_NO_CHANGE);
force_data_sync = FALSE;
vm_object_deallocate(local_object);
goto REDISCOVER_ENTRY;
}
if (entry->object.vm_object->private)
*flags = UPL_DEV_MEMORY;
else
*flags = 0;
if (entry->object.vm_object->phys_contiguous)
*flags |= UPL_PHYS_CONTIG;
local_object = entry->object.vm_object;
local_offset = entry->offset;
local_start = entry->vme_start;
vm_object_reference(local_object);
vm_map_unlock_read(map);
ret = vm_object_iopl_request(local_object,
(vm_object_offset_t) ((offset - local_start) + local_offset),
*upl_size,
upl,
page_list,
count,
caller_flags);
vm_object_deallocate(local_object);
return(ret);
}
vm_map_unlock_read(map);
return(KERN_FAILURE);
}
kern_return_t
vm_map_enter_upl(
vm_map_t map,
upl_t upl,
vm_map_offset_t *dst_addr)
{
vm_map_size_t size;
vm_object_offset_t offset;
vm_map_offset_t addr;
vm_page_t m;
kern_return_t kr;
int isVectorUPL = 0, curr_upl=0;
upl_t vector_upl = NULL;
vm_offset_t vector_upl_dst_addr = 0;
vm_map_t vector_upl_submap = NULL;
upl_offset_t subupl_offset = 0;
upl_size_t subupl_size = 0;
if (upl == UPL_NULL)
return KERN_INVALID_ARGUMENT;
if((isVectorUPL = vector_upl_is_valid(upl))) {
int mapped=0,valid_upls=0;
vector_upl = upl;
upl_lock(vector_upl);
for(curr_upl=0; curr_upl < MAX_VECTOR_UPL_ELEMENTS; curr_upl++) {
upl = vector_upl_subupl_byindex(vector_upl, curr_upl );
if(upl == NULL)
continue;
valid_upls++;
if (UPL_PAGE_LIST_MAPPED & upl->flags)
mapped++;
}
if(mapped) {
if(mapped != valid_upls)
panic("Only %d of the %d sub-upls within the Vector UPL are alread mapped\n", mapped, valid_upls);
else {
upl_unlock(vector_upl);
return KERN_FAILURE;
}
}
kr = kmem_suballoc(map, &vector_upl_dst_addr, vector_upl->size, FALSE, VM_FLAGS_ANYWHERE, &vector_upl_submap);
if( kr != KERN_SUCCESS )
panic("Vector UPL submap allocation failed\n");
map = vector_upl_submap;
vector_upl_set_submap(vector_upl, vector_upl_submap, vector_upl_dst_addr);
curr_upl=0;
}
else
upl_lock(upl);
process_upl_to_enter:
if(isVectorUPL){
if(curr_upl == MAX_VECTOR_UPL_ELEMENTS) {
*dst_addr = vector_upl_dst_addr;
upl_unlock(vector_upl);
return KERN_SUCCESS;
}
upl = vector_upl_subupl_byindex(vector_upl, curr_upl++ );
if(upl == NULL)
goto process_upl_to_enter;
vector_upl_get_iostate(vector_upl, upl, &subupl_offset, &subupl_size);
*dst_addr = (vm_map_offset_t)(vector_upl_dst_addr + (vm_map_offset_t)subupl_offset);
}
if (UPL_PAGE_LIST_MAPPED & upl->flags) {
upl_unlock(upl);
return KERN_FAILURE;
}
if ((!(upl->flags & UPL_SHADOWED)) && !((upl->flags & (UPL_DEVICE_MEMORY | UPL_IO_WIRE)) ||
(upl->map_object->phys_contiguous))) {
vm_object_t object;
vm_page_t alias_page;
vm_object_offset_t new_offset;
unsigned int pg_num;
wpl_array_t lite_list;
if (upl->flags & UPL_INTERNAL) {
lite_list = (wpl_array_t)
((((uintptr_t)upl) + sizeof(struct upl))
+ ((upl->size/PAGE_SIZE) * sizeof(upl_page_info_t)));
} else {
lite_list = (wpl_array_t)(((uintptr_t)upl) + sizeof(struct upl));
}
object = upl->map_object;
upl->map_object = vm_object_allocate(upl->size);
vm_object_lock(upl->map_object);
upl->map_object->shadow = object;
upl->map_object->pageout = TRUE;
upl->map_object->can_persist = FALSE;
upl->map_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
upl->map_object->shadow_offset = upl->offset - object->paging_offset;
upl->map_object->wimg_bits = object->wimg_bits;
offset = upl->map_object->shadow_offset;
new_offset = 0;
size = upl->size;
upl->flags |= UPL_SHADOWED;
while (size) {
pg_num = (unsigned int) (new_offset / PAGE_SIZE);
assert(pg_num == new_offset / PAGE_SIZE);
if (lite_list[pg_num>>5] & (1 << (pg_num & 31))) {
VM_PAGE_GRAB_FICTITIOUS(alias_page);
vm_object_lock(object);
m = vm_page_lookup(object, offset);
if (m == VM_PAGE_NULL) {
panic("vm_upl_map: page missing\n");
}
assert(alias_page->fictitious);
alias_page->fictitious = FALSE;
alias_page->private = TRUE;
alias_page->pageout = TRUE;
alias_page->phys_page = m->phys_page;
vm_object_unlock(object);
vm_page_lockspin_queues();
vm_page_wire(alias_page);
vm_page_unlock_queues();
ASSERT_PAGE_DECRYPTED(m);
vm_page_insert(alias_page, upl->map_object, new_offset);
assert(!alias_page->wanted);
alias_page->busy = FALSE;
alias_page->absent = FALSE;
}
size -= PAGE_SIZE;
offset += PAGE_SIZE_64;
new_offset += PAGE_SIZE_64;
}
vm_object_unlock(upl->map_object);
}
if ((upl->flags & (UPL_DEVICE_MEMORY | UPL_IO_WIRE)) || upl->map_object->phys_contiguous)
offset = upl->offset - upl->map_object->paging_offset;
else
offset = 0;
size = upl->size;
vm_object_reference(upl->map_object);
if(!isVectorUPL) {
*dst_addr = 0;
kr = vm_map_enter(map, dst_addr, (vm_map_size_t)size, (vm_map_offset_t) 0,
VM_FLAGS_ANYWHERE, upl->map_object, offset, FALSE,
VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
}
else {
kr = vm_map_enter(map, dst_addr, (vm_map_size_t)size, (vm_map_offset_t) 0,
VM_FLAGS_FIXED, upl->map_object, offset, FALSE,
VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
if(kr)
panic("vm_map_enter failed for a Vector UPL\n");
}
if (kr != KERN_SUCCESS) {
upl_unlock(upl);
return(kr);
}
vm_object_lock(upl->map_object);
for (addr = *dst_addr; size > 0; size -= PAGE_SIZE, addr += PAGE_SIZE) {
m = vm_page_lookup(upl->map_object, offset);
if (m) {
unsigned int cache_attr;
cache_attr = ((unsigned int)m->object->wimg_bits) & VM_WIMG_MASK;
m->pmapped = TRUE;
assert(map==kernel_map);
PMAP_ENTER(map->pmap, addr, m, VM_PROT_ALL, cache_attr, TRUE);
}
offset += PAGE_SIZE_64;
}
vm_object_unlock(upl->map_object);
upl->ref_count++;
upl->flags |= UPL_PAGE_LIST_MAPPED;
upl->kaddr = (vm_offset_t) *dst_addr;
assert(upl->kaddr == *dst_addr);
if(!isVectorUPL)
upl_unlock(upl);
else
goto process_upl_to_enter;
return KERN_SUCCESS;
}
kern_return_t
vm_map_remove_upl(
vm_map_t map,
upl_t upl)
{
vm_address_t addr;
upl_size_t size;
int isVectorUPL = 0, curr_upl = 0;
upl_t vector_upl = NULL;
if (upl == UPL_NULL)
return KERN_INVALID_ARGUMENT;
if((isVectorUPL = vector_upl_is_valid(upl))) {
int unmapped=0, valid_upls=0;
vector_upl = upl;
upl_lock(vector_upl);
for(curr_upl=0; curr_upl < MAX_VECTOR_UPL_ELEMENTS; curr_upl++) {
upl = vector_upl_subupl_byindex(vector_upl, curr_upl );
if(upl == NULL)
continue;
valid_upls++;
if (!(UPL_PAGE_LIST_MAPPED & upl->flags))
unmapped++;
}
if(unmapped) {
if(unmapped != valid_upls)
panic("%d of the %d sub-upls within the Vector UPL is/are not mapped\n", unmapped, valid_upls);
else {
upl_unlock(vector_upl);
return KERN_FAILURE;
}
}
curr_upl=0;
}
else
upl_lock(upl);
process_upl_to_remove:
if(isVectorUPL) {
if(curr_upl == MAX_VECTOR_UPL_ELEMENTS) {
vm_map_t v_upl_submap;
vm_offset_t v_upl_submap_dst_addr;
vector_upl_get_submap(vector_upl, &v_upl_submap, &v_upl_submap_dst_addr);
vm_map_remove(map, v_upl_submap_dst_addr, v_upl_submap_dst_addr + vector_upl->size, VM_MAP_NO_FLAGS);
vm_map_deallocate(v_upl_submap);
upl_unlock(vector_upl);
return KERN_SUCCESS;
}
upl = vector_upl_subupl_byindex(vector_upl, curr_upl++ );
if(upl == NULL)
goto process_upl_to_remove;
}
if (upl->flags & UPL_PAGE_LIST_MAPPED) {
addr = upl->kaddr;
size = upl->size;
assert(upl->ref_count > 1);
upl->ref_count--;
upl->flags &= ~UPL_PAGE_LIST_MAPPED;
upl->kaddr = (vm_offset_t) 0;
if(!isVectorUPL) {
upl_unlock(upl);
vm_map_remove(map,
vm_map_trunc_page(addr),
vm_map_round_page(addr + size),
VM_MAP_NO_FLAGS);
return KERN_SUCCESS;
}
else {
goto process_upl_to_remove;
}
}
upl_unlock(upl);
return KERN_FAILURE;
}
static void
dw_do_work(
vm_object_t object,
struct dw *dwp,
int dw_count)
{
int j;
boolean_t held_as_spin = TRUE;
if (!vm_page_trylockspin_queues()) {
vm_object_unlock(object);
vm_page_lockspin_queues();
for (j = 0; ; j++) {
if (!vm_object_lock_avoid(object) &&
_vm_object_lock_try(object))
break;
vm_page_unlock_queues();
mutex_pause(j);
vm_page_lockspin_queues();
}
}
for (j = 0; j < dw_count; j++, dwp++) {
if (dwp->dw_mask & DW_vm_pageout_throttle_up)
vm_pageout_throttle_up(dwp->dw_m);
if (dwp->dw_mask & DW_vm_page_wire)
vm_page_wire(dwp->dw_m);
else if (dwp->dw_mask & DW_vm_page_unwire)
vm_page_unwire(dwp->dw_m);
if (dwp->dw_mask & DW_vm_page_free) {
if (held_as_spin == TRUE) {
vm_page_lockconvert_queues();
held_as_spin = FALSE;
}
vm_page_free(dwp->dw_m);
} else {
if (dwp->dw_mask & DW_vm_page_deactivate_internal)
vm_page_deactivate_internal(dwp->dw_m, FALSE);
else if (dwp->dw_mask & DW_vm_page_activate)
vm_page_activate(dwp->dw_m);
else if (dwp->dw_mask & DW_vm_page_speculate)
vm_page_speculate(dwp->dw_m, TRUE);
else if (dwp->dw_mask & DW_vm_page_lru)
vm_page_lru(dwp->dw_m);
if (dwp->dw_mask & DW_set_reference)
dwp->dw_m->reference = TRUE;
else if (dwp->dw_mask & DW_clear_reference)
dwp->dw_m->reference = FALSE;
if (dwp->dw_mask & DW_clear_busy)
dwp->dw_m->busy = FALSE;
if (dwp->dw_mask & DW_PAGE_WAKEUP)
PAGE_WAKEUP(dwp->dw_m);
}
}
vm_page_unlock_queues();
}
kern_return_t
upl_commit_range(
upl_t upl,
upl_offset_t offset,
upl_size_t size,
int flags,
upl_page_info_t *page_list,
mach_msg_type_number_t count,
boolean_t *empty)
{
upl_size_t xfer_size, subupl_size = size;
vm_object_t shadow_object;
vm_object_t object;
vm_object_offset_t target_offset;
upl_offset_t subupl_offset = offset;
int entry;
wpl_array_t lite_list;
int occupied;
int clear_refmod = 0;
int pgpgout_count = 0;
struct dw dw_array[DELAYED_WORK_LIMIT];
struct dw *dwp;
int dw_count, isVectorUPL = 0;
upl_t vector_upl = NULL;
*empty = FALSE;
if (upl == UPL_NULL)
return KERN_INVALID_ARGUMENT;
if (count == 0)
page_list = NULL;
if((isVectorUPL = vector_upl_is_valid(upl))) {
vector_upl = upl;
upl_lock(vector_upl);
}
else
upl_lock(upl);
process_upl_to_commit:
if(isVectorUPL) {
size = subupl_size;
offset = subupl_offset;
if(size == 0) {
upl_unlock(vector_upl);
return KERN_SUCCESS;
}
upl = vector_upl_subupl_byoffset(vector_upl, &offset, &size);
if(upl == NULL) {
upl_unlock(vector_upl);
return KERN_FAILURE;
}
page_list = UPL_GET_INTERNAL_PAGE_LIST_SIMPLE(upl);
subupl_size -= size;
subupl_offset += size;
}
#if UPL_DEBUG
if (upl->upl_commit_index < UPL_DEBUG_COMMIT_RECORDS) {
(void) OSBacktrace(&upl->upl_commit_records[upl->upl_commit_index].c_retaddr[0], UPL_DEBUG_STACK_FRAMES);
upl->upl_commit_records[upl->upl_commit_index].c_beg = offset;
upl->upl_commit_records[upl->upl_commit_index].c_end = (offset + size);
upl->upl_commit_index++;
}
#endif
if (upl->flags & UPL_DEVICE_MEMORY)
xfer_size = 0;
else if ((offset + size) <= upl->size)
xfer_size = size;
else {
if(!isVectorUPL)
upl_unlock(upl);
else {
upl_unlock(vector_upl);
}
return KERN_FAILURE;
}
if (upl->flags & UPL_CLEAR_DIRTY)
flags |= UPL_COMMIT_CLEAR_DIRTY;
if (upl->flags & UPL_INTERNAL)
lite_list = (wpl_array_t) ((((uintptr_t)upl) + sizeof(struct upl))
+ ((upl->size/PAGE_SIZE) * sizeof(upl_page_info_t)));
else
lite_list = (wpl_array_t) (((uintptr_t)upl) + sizeof(struct upl));
object = upl->map_object;
if (upl->flags & UPL_SHADOWED) {
vm_object_lock(object);
shadow_object = object->shadow;
} else {
shadow_object = object;
}
entry = offset/PAGE_SIZE;
target_offset = (vm_object_offset_t)offset;
if (upl->flags & UPL_KERNEL_OBJECT)
vm_object_lock_shared(shadow_object);
else
vm_object_lock(shadow_object);
if (upl->flags & UPL_ACCESS_BLOCKED) {
assert(shadow_object->blocked_access);
shadow_object->blocked_access = FALSE;
vm_object_wakeup(object, VM_OBJECT_EVENT_UNBLOCKED);
}
if (shadow_object->code_signed) {
flags &= ~UPL_COMMIT_CS_VALIDATED;
}
if (! page_list) {
flags &= ~UPL_COMMIT_CS_VALIDATED;
}
dwp = &dw_array[0];
dw_count = 0;
while (xfer_size) {
vm_page_t t, m;
dwp->dw_mask = 0;
clear_refmod = 0;
m = VM_PAGE_NULL;
if (upl->flags & UPL_LITE) {
unsigned int pg_num;
pg_num = (unsigned int) (target_offset/PAGE_SIZE);
assert(pg_num == target_offset/PAGE_SIZE);
if (lite_list[pg_num>>5] & (1 << (pg_num & 31))) {
lite_list[pg_num>>5] &= ~(1 << (pg_num & 31));
if (!(upl->flags & UPL_KERNEL_OBJECT))
m = vm_page_lookup(shadow_object, target_offset + (upl->offset - shadow_object->paging_offset));
}
}
if (upl->flags & UPL_SHADOWED) {
if ((t = vm_page_lookup(object, target_offset)) != VM_PAGE_NULL) {
t->pageout = FALSE;
VM_PAGE_FREE(t);
if (m == VM_PAGE_NULL)
m = vm_page_lookup(shadow_object, target_offset + object->shadow_offset);
}
}
if ((upl->flags & UPL_KERNEL_OBJECT) || m == VM_PAGE_NULL)
goto commit_next_page;
if (flags & UPL_COMMIT_CS_VALIDATED) {
m->cs_validated = page_list[entry].cs_validated;
m->cs_tainted = page_list[entry].cs_tainted;
}
if (upl->flags & UPL_IO_WIRE) {
dwp->dw_mask |= DW_vm_page_unwire;
if (page_list)
page_list[entry].phys_addr = 0;
if (flags & UPL_COMMIT_SET_DIRTY)
m->dirty = TRUE;
else if (flags & UPL_COMMIT_CLEAR_DIRTY) {
m->dirty = FALSE;
if (! (flags & UPL_COMMIT_CS_VALIDATED) &&
m->cs_validated && !m->cs_tainted) {
m->cs_validated = FALSE;
#if DEVELOPMENT || DEBUG
vm_cs_validated_resets++;
#endif
pmap_disconnect(m->phys_page);
}
clear_refmod |= VM_MEM_MODIFIED;
}
if (flags & UPL_COMMIT_INACTIVATE) {
dwp->dw_mask |= DW_vm_page_deactivate_internal;
clear_refmod |= VM_MEM_REFERENCED;
}
if (upl->flags & UPL_ACCESS_BLOCKED) {
dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
}
goto commit_next_page;
}
if (flags & UPL_COMMIT_CLEAR_DIRTY) {
m->dirty = FALSE;
if (! (flags & UPL_COMMIT_CS_VALIDATED) &&
m->cs_validated && !m->cs_tainted) {
m->cs_validated = FALSE;
#if DEVELOPMENT || DEBUG
vm_cs_validated_resets++;
#endif
pmap_disconnect(m->phys_page);
}
clear_refmod |= VM_MEM_MODIFIED;
}
if (page_list) {
upl_page_info_t *p;
p = &(page_list[entry]);
if (p->phys_addr && p->pageout && !m->pageout) {
m->busy = TRUE;
m->pageout = TRUE;
dwp->dw_mask |= DW_vm_page_wire;
} else if (p->phys_addr &&
!p->pageout && m->pageout &&
!m->dump_cleaning) {
m->pageout = FALSE;
m->absent = FALSE;
m->overwriting = FALSE;
dwp->dw_mask |= (DW_vm_page_unwire | DW_clear_busy | DW_PAGE_WAKEUP);
}
page_list[entry].phys_addr = 0;
}
m->dump_cleaning = FALSE;
if (m->laundry)
dwp->dw_mask |= DW_vm_pageout_throttle_up;
if (m->pageout) {
m->cleaning = FALSE;
m->encrypted_cleaning = FALSE;
m->pageout = FALSE;
#if MACH_CLUSTER_STATS
if (m->wanted) vm_pageout_target_collisions++;
#endif
m->dirty = FALSE;
if (! (flags & UPL_COMMIT_CS_VALIDATED) &&
m->cs_validated && !m->cs_tainted) {
m->cs_validated = FALSE;
#if DEVELOPMENT || DEBUG
vm_cs_validated_resets++;
#endif
pmap_disconnect(m->phys_page);
}
if ((flags & UPL_COMMIT_SET_DIRTY) ||
(m->pmapped && (pmap_disconnect(m->phys_page) & VM_MEM_MODIFIED)))
m->dirty = TRUE;
if (m->dirty) {
dwp->dw_mask |= (DW_vm_page_unwire | DW_clear_busy | DW_PAGE_WAKEUP);
if (upl->flags & UPL_PAGEOUT) {
CLUSTER_STAT(vm_pageout_target_page_dirtied++;)
VM_STAT_INCR(reactivations);
DTRACE_VM2(pgrec, int, 1, (uint64_t *), NULL);
}
} else {
if (m->object->internal) {
DTRACE_VM2(anonpgout, int, 1, (uint64_t *), NULL);
} else {
DTRACE_VM2(fspgout, int, 1, (uint64_t *), NULL);
}
dwp->dw_mask |= DW_vm_page_free;
if (upl->flags & UPL_PAGEOUT) {
CLUSTER_STAT(vm_pageout_target_page_freed++;)
if (page_list[entry].dirty) {
VM_STAT_INCR(pageouts);
DTRACE_VM2(pgout, int, 1, (uint64_t *), NULL);
pgpgout_count++;
}
}
}
goto commit_next_page;
}
#if MACH_CLUSTER_STATS
if (m->wpmapped)
m->dirty = pmap_is_modified(m->phys_page);
if (m->dirty) vm_pageout_cluster_dirtied++;
else vm_pageout_cluster_cleaned++;
if (m->wanted) vm_pageout_cluster_collisions++;
#endif
m->dirty = FALSE;
if (! (flags & UPL_COMMIT_CS_VALIDATED) &&
m->cs_validated && !m->cs_tainted) {
m->cs_validated = FALSE;
#if DEVELOPMENT || DEBUG
vm_cs_validated_resets++;
#endif
pmap_disconnect(m->phys_page);
}
if ((m->busy) && (m->cleaning)) {
m->absent = FALSE;
m->overwriting = FALSE;
dwp->dw_mask |= DW_clear_busy;
} else if (m->overwriting) {
assert(VM_PAGE_WIRED(m));
m->overwriting = FALSE;
dwp->dw_mask |= DW_vm_page_unwire;
}
m->cleaning = FALSE;
m->encrypted_cleaning = FALSE;
if ((upl->flags & UPL_PAGE_SYNC_DONE) || (flags & UPL_COMMIT_CLEAR_PRECIOUS))
m->precious = FALSE;
if (flags & UPL_COMMIT_SET_DIRTY)
m->dirty = TRUE;
if ((flags & UPL_COMMIT_INACTIVATE) && !m->clustered && !m->speculative) {
dwp->dw_mask |= DW_vm_page_deactivate_internal;
clear_refmod |= VM_MEM_REFERENCED;
} else if (!m->active && !m->inactive && !m->speculative) {
if (m->clustered || (flags & UPL_COMMIT_SPECULATE))
dwp->dw_mask |= DW_vm_page_speculate;
else if (m->reference)
dwp->dw_mask |= DW_vm_page_activate;
else {
dwp->dw_mask |= DW_vm_page_deactivate_internal;
clear_refmod |= VM_MEM_REFERENCED;
}
}
if (upl->flags & UPL_ACCESS_BLOCKED) {
dwp->dw_mask |= DW_clear_busy;
}
dwp->dw_mask |= DW_PAGE_WAKEUP;
commit_next_page:
if (clear_refmod)
pmap_clear_refmod(m->phys_page, clear_refmod);
target_offset += PAGE_SIZE_64;
xfer_size -= PAGE_SIZE;
entry++;
if (dwp->dw_mask) {
if (dwp->dw_mask & ~(DW_clear_busy | DW_PAGE_WAKEUP)) {
if (m->busy == FALSE) {
m->busy = TRUE;
dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
}
dwp->dw_m = m;
dwp++;
dw_count++;
if (dw_count >= DELAYED_WORK_LIMIT) {
dw_do_work(shadow_object, &dw_array[0], dw_count);
dwp = &dw_array[0];
dw_count = 0;
}
} else {
if (dwp->dw_mask & DW_clear_busy)
m->busy = FALSE;
if (dwp->dw_mask & DW_PAGE_WAKEUP)
PAGE_WAKEUP(m);
}
}
}
if (dw_count)
dw_do_work(shadow_object, &dw_array[0], dw_count);
occupied = 1;
if (upl->flags & UPL_DEVICE_MEMORY) {
occupied = 0;
} else if (upl->flags & UPL_LITE) {
int pg_num;
int i;
pg_num = upl->size/PAGE_SIZE;
pg_num = (pg_num + 31) >> 5;
occupied = 0;
for (i = 0; i < pg_num; i++) {
if (lite_list[i] != 0) {
occupied = 1;
break;
}
}
} else {
if (queue_empty(&upl->map_object->memq))
occupied = 0;
}
if (occupied == 0) {
if ((upl->flags & UPL_COMMIT_NOTIFY_EMPTY) || isVectorUPL)
*empty = TRUE;
if (object == shadow_object && !(upl->flags & UPL_KERNEL_OBJECT)) {
vm_object_activity_end(shadow_object);
} else {
}
}
vm_object_unlock(shadow_object);
if (object != shadow_object)
vm_object_unlock(object);
if(!isVectorUPL)
upl_unlock(upl);
else {
if(*empty==TRUE) {
*empty = vector_upl_set_subupl(vector_upl, upl, 0);
upl_deallocate(upl);
}
goto process_upl_to_commit;
}
if (pgpgout_count) {
DTRACE_VM2(pgpgout, int, pgpgout_count, (uint64_t *), NULL);
}
return KERN_SUCCESS;
}
kern_return_t
upl_abort_range(
upl_t upl,
upl_offset_t offset,
upl_size_t size,
int error,
boolean_t *empty)
{
upl_size_t xfer_size, subupl_size = size;
vm_object_t shadow_object;
vm_object_t object;
vm_object_offset_t target_offset;
upl_offset_t subupl_offset = offset;
int entry;
wpl_array_t lite_list;
int occupied;
struct dw dw_array[DELAYED_WORK_LIMIT];
struct dw *dwp;
int dw_count, isVectorUPL = 0;
upl_t vector_upl = NULL;
*empty = FALSE;
if (upl == UPL_NULL)
return KERN_INVALID_ARGUMENT;
if ( (upl->flags & UPL_IO_WIRE) && !(error & UPL_ABORT_DUMP_PAGES) )
return upl_commit_range(upl, offset, size, 0, NULL, 0, empty);
if((isVectorUPL = vector_upl_is_valid(upl))) {
vector_upl = upl;
upl_lock(vector_upl);
}
else
upl_lock(upl);
process_upl_to_abort:
if(isVectorUPL) {
size = subupl_size;
offset = subupl_offset;
if(size == 0) {
upl_unlock(vector_upl);
return KERN_SUCCESS;
}
upl = vector_upl_subupl_byoffset(vector_upl, &offset, &size);
if(upl == NULL) {
upl_unlock(vector_upl);
return KERN_FAILURE;
}
subupl_size -= size;
subupl_offset += size;
}
*empty = FALSE;
#if UPL_DEBUG
if (upl->upl_commit_index < UPL_DEBUG_COMMIT_RECORDS) {
(void) OSBacktrace(&upl->upl_commit_records[upl->upl_commit_index].c_retaddr[0], UPL_DEBUG_STACK_FRAMES);
upl->upl_commit_records[upl->upl_commit_index].c_beg = offset;
upl->upl_commit_records[upl->upl_commit_index].c_end = (offset + size);
upl->upl_commit_records[upl->upl_commit_index].c_aborted = 1;
upl->upl_commit_index++;
}
#endif
if (upl->flags & UPL_DEVICE_MEMORY)
xfer_size = 0;
else if ((offset + size) <= upl->size)
xfer_size = size;
else {
if(!isVectorUPL)
upl_unlock(upl);
else {
upl_unlock(vector_upl);
}
return KERN_FAILURE;
}
if (upl->flags & UPL_INTERNAL) {
lite_list = (wpl_array_t)
((((uintptr_t)upl) + sizeof(struct upl))
+ ((upl->size/PAGE_SIZE) * sizeof(upl_page_info_t)));
} else {
lite_list = (wpl_array_t)
(((uintptr_t)upl) + sizeof(struct upl));
}
object = upl->map_object;
if (upl->flags & UPL_SHADOWED) {
vm_object_lock(object);
shadow_object = object->shadow;
} else
shadow_object = object;
entry = offset/PAGE_SIZE;
target_offset = (vm_object_offset_t)offset;
if (upl->flags & UPL_KERNEL_OBJECT)
vm_object_lock_shared(shadow_object);
else
vm_object_lock(shadow_object);
if (upl->flags & UPL_ACCESS_BLOCKED) {
assert(shadow_object->blocked_access);
shadow_object->blocked_access = FALSE;
vm_object_wakeup(object, VM_OBJECT_EVENT_UNBLOCKED);
}
dwp = &dw_array[0];
dw_count = 0;
if ((error & UPL_ABORT_DUMP_PAGES) && (upl->flags & UPL_KERNEL_OBJECT))
panic("upl_abort_range: kernel_object being DUMPED");
while (xfer_size) {
vm_page_t t, m;
dwp->dw_mask = 0;
m = VM_PAGE_NULL;
if (upl->flags & UPL_LITE) {
unsigned int pg_num;
pg_num = (unsigned int) (target_offset/PAGE_SIZE);
assert(pg_num == target_offset/PAGE_SIZE);
if (lite_list[pg_num>>5] & (1 << (pg_num & 31))) {
lite_list[pg_num>>5] &= ~(1 << (pg_num & 31));
if ( !(upl->flags & UPL_KERNEL_OBJECT))
m = vm_page_lookup(shadow_object, target_offset +
(upl->offset - shadow_object->paging_offset));
}
}
if (upl->flags & UPL_SHADOWED) {
if ((t = vm_page_lookup(object, target_offset)) != VM_PAGE_NULL) {
t->pageout = FALSE;
VM_PAGE_FREE(t);
if (m == VM_PAGE_NULL)
m = vm_page_lookup(shadow_object, target_offset + object->shadow_offset);
}
}
if ((upl->flags & UPL_KERNEL_OBJECT))
goto abort_next_page;
if (m != VM_PAGE_NULL) {
if (m->absent) {
boolean_t must_free = TRUE;
m->clustered = FALSE;
if (error & UPL_ABORT_RESTART) {
m->restart = TRUE;
m->absent = FALSE;
m->unusual = TRUE;
must_free = FALSE;
} else if (error & UPL_ABORT_UNAVAILABLE) {
m->restart = FALSE;
m->unusual = TRUE;
must_free = FALSE;
} else if (error & UPL_ABORT_ERROR) {
m->restart = FALSE;
m->absent = FALSE;
m->error = TRUE;
m->unusual = TRUE;
must_free = FALSE;
}
m->cleaning = FALSE;
m->encrypted_cleaning = FALSE;
m->overwriting = FALSE;
dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
if (must_free == TRUE)
dwp->dw_mask |= DW_vm_page_free;
else
dwp->dw_mask |= DW_vm_page_activate;
} else {
if (m->laundry)
dwp->dw_mask |= DW_vm_pageout_throttle_up;
if (m->pageout) {
assert(m->busy);
assert(m->wire_count == 1);
m->pageout = FALSE;
dwp->dw_mask |= DW_vm_page_unwire;
}
m->dump_cleaning = FALSE;
m->cleaning = FALSE;
m->encrypted_cleaning = FALSE;
m->overwriting = FALSE;
#if MACH_PAGEMAP
vm_external_state_clr(m->object->existence_map, m->offset);
#endif
if (error & UPL_ABORT_DUMP_PAGES) {
pmap_disconnect(m->phys_page);
dwp->dw_mask |= DW_vm_page_free;
} else {
if (error & UPL_ABORT_REFERENCE) {
dwp->dw_mask |= DW_vm_page_lru;
}
dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
}
}
}
abort_next_page:
target_offset += PAGE_SIZE_64;
xfer_size -= PAGE_SIZE;
entry++;
if (dwp->dw_mask) {
if (dwp->dw_mask & ~(DW_clear_busy | DW_PAGE_WAKEUP)) {
if (m->busy == FALSE) {
m->busy = TRUE;
dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
}
dwp->dw_m = m;
dwp++;
dw_count++;
if (dw_count >= DELAYED_WORK_LIMIT) {
dw_do_work(shadow_object, &dw_array[0], dw_count);
dwp = &dw_array[0];
dw_count = 0;
}
} else {
if (dwp->dw_mask & DW_clear_busy)
m->busy = FALSE;
if (dwp->dw_mask & DW_PAGE_WAKEUP)
PAGE_WAKEUP(m);
}
}
}
if (dw_count)
dw_do_work(shadow_object, &dw_array[0], dw_count);
occupied = 1;
if (upl->flags & UPL_DEVICE_MEMORY) {
occupied = 0;
} else if (upl->flags & UPL_LITE) {
int pg_num;
int i;
pg_num = upl->size/PAGE_SIZE;
pg_num = (pg_num + 31) >> 5;
occupied = 0;
for (i = 0; i < pg_num; i++) {
if (lite_list[i] != 0) {
occupied = 1;
break;
}
}
} else {
if (queue_empty(&upl->map_object->memq))
occupied = 0;
}
if (occupied == 0) {
if ((upl->flags & UPL_COMMIT_NOTIFY_EMPTY) || isVectorUPL)
*empty = TRUE;
if (object == shadow_object && !(upl->flags & UPL_KERNEL_OBJECT)) {
vm_object_activity_end(shadow_object);
} else {
}
}
vm_object_unlock(shadow_object);
if (object != shadow_object)
vm_object_unlock(object);
if(!isVectorUPL)
upl_unlock(upl);
else {
if(*empty == TRUE) {
*empty = vector_upl_set_subupl(vector_upl, upl,0);
upl_deallocate(upl);
}
goto process_upl_to_abort;
}
return KERN_SUCCESS;
}
kern_return_t
upl_abort(
upl_t upl,
int error)
{
boolean_t empty;
return upl_abort_range(upl, 0, upl->size, error, &empty);
}
kern_return_t
upl_commit(
upl_t upl,
upl_page_info_t *page_list,
mach_msg_type_number_t count)
{
boolean_t empty;
return upl_commit_range(upl, 0, upl->size, 0, page_list, count, &empty);
}
unsigned int vm_object_iopl_request_sleep_for_cleaning = 0;
kern_return_t
vm_object_iopl_request(
vm_object_t object,
vm_object_offset_t offset,
upl_size_t size,
upl_t *upl_ptr,
upl_page_info_array_t user_page_list,
unsigned int *page_list_count,
int cntrl_flags)
{
vm_page_t dst_page;
vm_object_offset_t dst_offset;
upl_size_t xfer_size;
upl_t upl = NULL;
unsigned int entry;
wpl_array_t lite_list = NULL;
int no_zero_fill = FALSE;
u_int32_t psize;
kern_return_t ret;
vm_prot_t prot;
struct vm_object_fault_info fault_info;
struct dw dw_array[DELAYED_WORK_LIMIT];
struct dw *dwp;
int dw_count;
int dw_index;
if (cntrl_flags & ~UPL_VALID_FLAGS) {
return KERN_INVALID_VALUE;
}
if (vm_lopage_poolsize == 0)
cntrl_flags &= ~UPL_NEED_32BIT_ADDR;
if (cntrl_flags & UPL_NEED_32BIT_ADDR) {
if ( (cntrl_flags & (UPL_SET_IO_WIRE | UPL_SET_LITE)) != (UPL_SET_IO_WIRE | UPL_SET_LITE))
return KERN_INVALID_VALUE;
if (object->phys_contiguous) {
if ((offset + object->shadow_offset) >= (vm_object_offset_t)max_valid_dma_address)
return KERN_INVALID_ADDRESS;
if (((offset + object->shadow_offset) + size) >= (vm_object_offset_t)max_valid_dma_address)
return KERN_INVALID_ADDRESS;
}
}
if (cntrl_flags & UPL_ENCRYPT) {
assert(! (cntrl_flags & UPL_ENCRYPT));
}
if (cntrl_flags & UPL_NOZEROFILL)
no_zero_fill = TRUE;
if (cntrl_flags & UPL_COPYOUT_FROM)
prot = VM_PROT_READ;
else
prot = VM_PROT_READ | VM_PROT_WRITE;
if (((size/PAGE_SIZE) > MAX_UPL_SIZE) && !object->phys_contiguous)
size = MAX_UPL_SIZE * PAGE_SIZE;
if (cntrl_flags & UPL_SET_INTERNAL) {
if (page_list_count != NULL)
*page_list_count = MAX_UPL_SIZE;
}
if (((cntrl_flags & UPL_SET_INTERNAL) && !(object->phys_contiguous)) &&
((page_list_count != NULL) && (*page_list_count != 0) && *page_list_count < (size/page_size)))
return KERN_INVALID_ARGUMENT;
if ((!object->internal) && (object->paging_offset != 0))
panic("vm_object_iopl_request: external object with non-zero paging offset\n");
if (object->phys_contiguous)
psize = PAGE_SIZE;
else
psize = size;
if (cntrl_flags & UPL_SET_INTERNAL) {
upl = upl_create(UPL_CREATE_INTERNAL | UPL_CREATE_LITE, UPL_IO_WIRE, psize);
user_page_list = (upl_page_info_t *) (((uintptr_t)upl) + sizeof(struct upl));
lite_list = (wpl_array_t) (((uintptr_t)user_page_list) +
((psize / PAGE_SIZE) * sizeof(upl_page_info_t)));
if (size == 0) {
user_page_list = NULL;
lite_list = NULL;
}
} else {
upl = upl_create(UPL_CREATE_LITE, UPL_IO_WIRE, psize);
lite_list = (wpl_array_t) (((uintptr_t)upl) + sizeof(struct upl));
if (size == 0) {
lite_list = NULL;
}
}
if (user_page_list)
user_page_list[0].device = FALSE;
*upl_ptr = upl;
upl->map_object = object;
upl->size = size;
if (object == kernel_object &&
!(cntrl_flags & (UPL_NEED_32BIT_ADDR | UPL_BLOCK_ACCESS))) {
upl->flags |= UPL_KERNEL_OBJECT;
#if UPL_DEBUG
vm_object_lock(object);
#else
vm_object_lock_shared(object);
#endif
} else {
vm_object_lock(object);
vm_object_activity_begin(object);
}
upl->offset = offset + object->paging_offset;
if (cntrl_flags & UPL_BLOCK_ACCESS) {
upl->flags |= UPL_ACCESS_BLOCKED;
}
if (object->phys_contiguous) {
#if UPL_DEBUG
queue_enter(&object->uplq, upl, upl_t, uplq);
#endif
if (upl->flags & UPL_ACCESS_BLOCKED) {
assert(!object->blocked_access);
object->blocked_access = TRUE;
}
vm_object_unlock(object);
upl->flags |= UPL_DEVICE_MEMORY;
upl->highest_page = (ppnum_t) ((offset + object->shadow_offset + size - 1)>>PAGE_SHIFT);
if (user_page_list) {
user_page_list[0].phys_addr = (ppnum_t) ((offset + object->shadow_offset)>>PAGE_SHIFT);
user_page_list[0].device = TRUE;
}
if (page_list_count != NULL) {
if (upl->flags & UPL_INTERNAL)
*page_list_count = 0;
else
*page_list_count = 1;
}
return KERN_SUCCESS;
}
if (object != kernel_object) {
object->true_share = TRUE;
if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC)
object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
}
#if UPL_DEBUG
queue_enter(&object->uplq, upl, upl_t, uplq);
#endif
if (!(cntrl_flags & UPL_COPYOUT_FROM) &&
object->copy != VM_OBJECT_NULL) {
vm_object_update(object,
offset,
size,
NULL,
NULL,
FALSE,
MEMORY_OBJECT_COPY_SYNC,
VM_PROT_NO_CHANGE);
#if DEVELOPMENT || DEBUG
iopl_cow++;
iopl_cow_pages += size >> PAGE_SHIFT;
#endif
}
entry = 0;
xfer_size = size;
dst_offset = offset;
fault_info.behavior = VM_BEHAVIOR_SEQUENTIAL;
fault_info.user_tag = 0;
fault_info.lo_offset = offset;
fault_info.hi_offset = offset + xfer_size;
fault_info.no_cache = FALSE;
fault_info.stealth = FALSE;
dwp = &dw_array[0];
dw_count = 0;
while (xfer_size) {
vm_fault_return_t result;
unsigned int pg_num;
dwp->dw_mask = 0;
dst_page = vm_page_lookup(object, dst_offset);
if (dst_page == VM_PAGE_NULL ||
dst_page->busy ||
dst_page->encrypted ||
dst_page->error ||
dst_page->restart ||
dst_page->absent ||
dst_page->fictitious) {
if (object == kernel_object)
panic("vm_object_iopl_request: missing/bad page in kernel object\n");
do {
vm_page_t top_page;
kern_return_t error_code;
int interruptible;
if (cntrl_flags & UPL_SET_INTERRUPTIBLE)
interruptible = THREAD_ABORTSAFE;
else
interruptible = THREAD_UNINT;
fault_info.interruptible = interruptible;
fault_info.cluster_size = xfer_size;
vm_object_paging_begin(object);
result = vm_fault_page(object, dst_offset,
prot | VM_PROT_WRITE, FALSE,
&prot, &dst_page, &top_page,
(int *)0,
&error_code, no_zero_fill,
FALSE, &fault_info);
switch (result) {
case VM_FAULT_SUCCESS:
PAGE_WAKEUP_DONE(dst_page);
if (top_page != VM_PAGE_NULL) {
vm_object_t local_object;
local_object = top_page->object;
if (top_page->object != dst_page->object) {
vm_object_lock(local_object);
VM_PAGE_FREE(top_page);
vm_object_paging_end(local_object);
vm_object_unlock(local_object);
} else {
VM_PAGE_FREE(top_page);
vm_object_paging_end(local_object);
}
}
vm_object_paging_end(object);
break;
case VM_FAULT_RETRY:
vm_object_lock(object);
break;
case VM_FAULT_FICTITIOUS_SHORTAGE:
vm_page_more_fictitious();
vm_object_lock(object);
break;
case VM_FAULT_MEMORY_SHORTAGE:
if (vm_page_wait(interruptible)) {
vm_object_lock(object);
break;
}
case VM_FAULT_INTERRUPTED:
error_code = MACH_SEND_INTERRUPTED;
case VM_FAULT_MEMORY_ERROR:
memory_error:
ret = (error_code ? error_code: KERN_MEMORY_ERROR);
vm_object_lock(object);
goto return_err;
case VM_FAULT_SUCCESS_NO_VM_PAGE:
vm_object_paging_end(object);
vm_object_unlock(object);
goto memory_error;
default:
panic("vm_object_iopl_request: unexpected error"
" 0x%x from vm_fault_page()\n", result);
}
} while (result != VM_FAULT_SUCCESS);
}
if (upl->flags & UPL_KERNEL_OBJECT)
goto record_phys_addr;
if (dst_page->cleaning) {
vm_object_iopl_request_sleep_for_cleaning++;
PAGE_SLEEP(object, dst_page, THREAD_UNINT);
continue;
}
if ( (cntrl_flags & UPL_NEED_32BIT_ADDR) &&
dst_page->phys_page >= (max_valid_dma_address >> PAGE_SHIFT) ) {
vm_page_t low_page;
int refmod;
if (VM_PAGE_WIRED(dst_page)) {
ret = KERN_PROTECTION_FAILURE;
goto return_err;
}
low_page = vm_page_grablo();
if (low_page == VM_PAGE_NULL) {
ret = KERN_RESOURCE_SHORTAGE;
goto return_err;
}
if (dst_page->pmapped)
refmod = pmap_disconnect(dst_page->phys_page);
else
refmod = 0;
vm_page_copy(dst_page, low_page);
low_page->reference = dst_page->reference;
low_page->dirty = dst_page->dirty;
if (refmod & VM_MEM_REFERENCED)
low_page->reference = TRUE;
if (refmod & VM_MEM_MODIFIED)
low_page->dirty = TRUE;
vm_page_replace(low_page, object, dst_offset);
dst_page = low_page;
dst_page->busy = FALSE;
}
dwp->dw_mask |= DW_vm_page_wire;
if (cntrl_flags & UPL_BLOCK_ACCESS) {
assert(!dst_page->fictitious);
dst_page->busy = TRUE;
}
dwp->dw_mask |= DW_set_reference;
if (!(cntrl_flags & UPL_COPYOUT_FROM))
dst_page->dirty = TRUE;
record_phys_addr:
pg_num = (unsigned int) ((dst_offset-offset)/PAGE_SIZE);
assert(pg_num == (dst_offset-offset)/PAGE_SIZE);
lite_list[pg_num>>5] |= 1 << (pg_num & 31);
if (dst_page->phys_page > upl->highest_page)
upl->highest_page = dst_page->phys_page;
if (user_page_list) {
user_page_list[entry].phys_addr = dst_page->phys_page;
user_page_list[entry].pageout = dst_page->pageout;
user_page_list[entry].absent = dst_page->absent;
user_page_list[entry].dirty = dst_page->dirty;
user_page_list[entry].precious = dst_page->precious;
user_page_list[entry].device = FALSE;
if (dst_page->clustered == TRUE)
user_page_list[entry].speculative = dst_page->speculative;
else
user_page_list[entry].speculative = FALSE;
user_page_list[entry].cs_validated = dst_page->cs_validated;
user_page_list[entry].cs_tainted = dst_page->cs_tainted;
}
if (object != kernel_object) {
VM_PAGE_CONSUME_CLUSTERED(dst_page);
}
entry++;
dst_offset += PAGE_SIZE_64;
xfer_size -= PAGE_SIZE;
if (dwp->dw_mask) {
if (dst_page->busy == FALSE) {
dst_page->busy = TRUE;
dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
}
dwp->dw_m = dst_page;
dwp++;
dw_count++;
if (dw_count >= DELAYED_WORK_LIMIT) {
dw_do_work(object, &dw_array[0], dw_count);
dwp = &dw_array[0];
dw_count = 0;
}
}
}
if (dw_count)
dw_do_work(object, &dw_array[0], dw_count);
if (page_list_count != NULL) {
if (upl->flags & UPL_INTERNAL)
*page_list_count = 0;
else if (*page_list_count > entry)
*page_list_count = entry;
}
vm_object_unlock(object);
if (cntrl_flags & UPL_BLOCK_ACCESS) {
vm_object_pmap_protect(object, offset, (vm_object_size_t)size,
PMAP_NULL, 0, VM_PROT_NONE);
assert(!object->blocked_access);
object->blocked_access = TRUE;
}
return KERN_SUCCESS;
return_err:
dw_index = 0;
for (; offset < dst_offset; offset += PAGE_SIZE) {
dst_page = vm_page_lookup(object, offset);
if (dst_page == VM_PAGE_NULL)
panic("vm_object_iopl_request: Wired pages missing. \n");
if (dw_count) {
if (dw_array[dw_index].dw_m == dst_page) {
dw_index++;
dw_count--;
continue;
}
}
vm_page_lockspin_queues();
vm_page_unwire(dst_page);
vm_page_unlock_queues();
VM_STAT_INCR(reactivations);
}
#if UPL_DEBUG
upl->upl_state = 2;
#endif
if (! (upl->flags & UPL_KERNEL_OBJECT)) {
vm_object_activity_end(object);
}
vm_object_unlock(object);
upl_destroy(upl);
return ret;
}
kern_return_t
upl_transpose(
upl_t upl1,
upl_t upl2)
{
kern_return_t retval;
boolean_t upls_locked;
vm_object_t object1, object2;
if (upl1 == UPL_NULL || upl2 == UPL_NULL || upl1 == upl2 || ((upl1->flags & UPL_VECTOR)==UPL_VECTOR) || ((upl2->flags & UPL_VECTOR)==UPL_VECTOR)) {
return KERN_INVALID_ARGUMENT;
}
upls_locked = FALSE;
if (upl1 < upl2) {
upl_lock(upl1);
upl_lock(upl2);
} else {
upl_lock(upl2);
upl_lock(upl1);
}
upls_locked = TRUE;
object1 = upl1->map_object;
object2 = upl2->map_object;
if (upl1->offset != 0 || upl2->offset != 0 ||
upl1->size != upl2->size) {
retval = KERN_INVALID_VALUE;
goto done;
}
retval = vm_object_transpose(object1, object2,
(vm_object_size_t) upl1->size);
if (retval == KERN_SUCCESS) {
#if UPL_DEBUG
queue_remove(&object1->uplq, upl1, upl_t, uplq);
queue_remove(&object2->uplq, upl2, upl_t, uplq);
#endif
upl1->map_object = object2;
upl2->map_object = object1;
#if UPL_DEBUG
queue_enter(&object1->uplq, upl2, upl_t, uplq);
queue_enter(&object2->uplq, upl1, upl_t, uplq);
#endif
}
done:
if (upls_locked) {
upl_unlock(upl1);
upl_unlock(upl2);
upls_locked = FALSE;
}
return retval;
}
decl_simple_lock_data(,vm_paging_lock)
#define VM_PAGING_NUM_PAGES 64
vm_map_offset_t vm_paging_base_address = 0;
boolean_t vm_paging_page_inuse[VM_PAGING_NUM_PAGES] = { FALSE, };
int vm_paging_max_index = 0;
int vm_paging_page_waiter = 0;
int vm_paging_page_waiter_total = 0;
unsigned long vm_paging_no_kernel_page = 0;
unsigned long vm_paging_objects_mapped = 0;
unsigned long vm_paging_pages_mapped = 0;
unsigned long vm_paging_objects_mapped_slow = 0;
unsigned long vm_paging_pages_mapped_slow = 0;
void
vm_paging_map_init(void)
{
kern_return_t kr;
vm_map_offset_t page_map_offset;
vm_map_entry_t map_entry;
assert(vm_paging_base_address == 0);
page_map_offset = 0;
kr = vm_map_find_space(kernel_map,
&page_map_offset,
VM_PAGING_NUM_PAGES * PAGE_SIZE,
0,
0,
&map_entry);
if (kr != KERN_SUCCESS) {
panic("vm_paging_map_init: kernel_map full\n");
}
map_entry->object.vm_object = kernel_object;
map_entry->offset = page_map_offset;
vm_object_reference(kernel_object);
vm_map_unlock(kernel_map);
assert(vm_paging_base_address == 0);
vm_paging_base_address = page_map_offset;
}
kern_return_t
vm_paging_map_object(
vm_map_offset_t *address,
vm_page_t page,
vm_object_t object,
vm_object_offset_t offset,
vm_map_size_t *size,
vm_prot_t protection,
boolean_t can_unlock_object)
{
kern_return_t kr;
vm_map_offset_t page_map_offset;
vm_map_size_t map_size;
vm_object_offset_t object_offset;
int i;
if (page != VM_PAGE_NULL && *size == PAGE_SIZE) {
assert(page->busy);
simple_lock(&vm_paging_lock);
page_map_offset = 0;
for (;;) {
for (i = 0; i < VM_PAGING_NUM_PAGES; i++) {
if (vm_paging_page_inuse[i] == FALSE) {
page_map_offset =
vm_paging_base_address +
(i * PAGE_SIZE);
break;
}
}
if (page_map_offset != 0) {
break;
}
if (can_unlock_object) {
break;
}
vm_paging_page_waiter_total++;
vm_paging_page_waiter++;
thread_sleep_fast_usimple_lock(&vm_paging_page_waiter,
&vm_paging_lock,
THREAD_UNINT);
vm_paging_page_waiter--;
}
if (page_map_offset != 0) {
if (i > vm_paging_max_index) {
vm_paging_max_index = i;
}
vm_paging_page_inuse[i] = TRUE;
simple_unlock(&vm_paging_lock);
if (page->pmapped == FALSE) {
pmap_sync_page_data_phys(page->phys_page);
}
page->pmapped = TRUE;
PMAP_ENTER(kernel_pmap,
page_map_offset,
page,
protection,
((int) page->object->wimg_bits &
VM_WIMG_MASK),
TRUE);
vm_paging_objects_mapped++;
vm_paging_pages_mapped++;
*address = page_map_offset;
return KERN_SUCCESS;
}
vm_paging_no_kernel_page++;
simple_unlock(&vm_paging_lock);
}
if (! can_unlock_object) {
return KERN_NOT_SUPPORTED;
}
object_offset = vm_object_trunc_page(offset);
map_size = vm_map_round_page(*size);
vm_object_reference_locked(object);
vm_object_unlock(object);
kr = vm_map_enter(kernel_map,
address,
map_size,
0,
VM_FLAGS_ANYWHERE,
object,
object_offset,
FALSE,
protection,
VM_PROT_ALL,
VM_INHERIT_NONE);
if (kr != KERN_SUCCESS) {
*address = 0;
*size = 0;
vm_object_deallocate(object);
vm_object_lock(object);
return kr;
}
*size = map_size;
vm_object_lock(object);
for (page_map_offset = 0;
map_size != 0;
map_size -= PAGE_SIZE_64, page_map_offset += PAGE_SIZE_64) {
unsigned int cache_attr;
page = vm_page_lookup(object, offset + page_map_offset);
if (page == VM_PAGE_NULL) {
printf("vm_paging_map_object: no page !?");
vm_object_unlock(object);
kr = vm_map_remove(kernel_map, *address, *size,
VM_MAP_NO_FLAGS);
assert(kr == KERN_SUCCESS);
*address = 0;
*size = 0;
vm_object_lock(object);
return KERN_MEMORY_ERROR;
}
if (page->pmapped == FALSE) {
pmap_sync_page_data_phys(page->phys_page);
}
page->pmapped = TRUE;
cache_attr = ((unsigned int) object->wimg_bits) & VM_WIMG_MASK;
PMAP_ENTER(kernel_pmap,
*address + page_map_offset,
page,
protection,
cache_attr,
TRUE);
}
vm_paging_objects_mapped_slow++;
vm_paging_pages_mapped_slow += (unsigned long) (map_size / PAGE_SIZE_64);
return KERN_SUCCESS;
}
void
vm_paging_unmap_object(
vm_object_t object,
vm_map_offset_t start,
vm_map_offset_t end)
{
kern_return_t kr;
int i;
if ((vm_paging_base_address == 0) ||
(start < vm_paging_base_address) ||
(end > (vm_paging_base_address
+ (VM_PAGING_NUM_PAGES * PAGE_SIZE)))) {
if (object != VM_OBJECT_NULL) {
vm_object_unlock(object);
}
kr = vm_map_remove(kernel_map, start, end, VM_MAP_NO_FLAGS);
if (object != VM_OBJECT_NULL) {
vm_object_lock(object);
}
assert(kr == KERN_SUCCESS);
} else {
assert(end - start == PAGE_SIZE);
i = (int) ((start - vm_paging_base_address) >> PAGE_SHIFT);
assert(i >= 0 && i < VM_PAGING_NUM_PAGES);
pmap_remove(kernel_pmap, start, end);
simple_lock(&vm_paging_lock);
vm_paging_page_inuse[i] = FALSE;
if (vm_paging_page_waiter) {
thread_wakeup(&vm_paging_page_waiter);
}
simple_unlock(&vm_paging_lock);
}
}
#if CRYPTO
#define SWAP_CRYPT_AES_KEY_SIZE 128
boolean_t swap_crypt_ctx_initialized = FALSE;
aes_32t swap_crypt_key[8];
aes_ctx swap_crypt_ctx;
const unsigned char swap_crypt_null_iv[AES_BLOCK_SIZE] = {0xa, };
#if DEBUG
boolean_t swap_crypt_ctx_tested = FALSE;
unsigned char swap_crypt_test_page_ref[4096] __attribute__((aligned(4096)));
unsigned char swap_crypt_test_page_encrypt[4096] __attribute__((aligned(4096)));
unsigned char swap_crypt_test_page_decrypt[4096] __attribute__((aligned(4096)));
#endif
void swap_crypt_ctx_initialize(void);
void
swap_crypt_ctx_initialize(void)
{
unsigned int i;
if (swap_crypt_ctx_initialized == FALSE) {
for (i = 0;
i < (sizeof (swap_crypt_key) /
sizeof (swap_crypt_key[0]));
i++) {
swap_crypt_key[i] = random();
}
aes_encrypt_key((const unsigned char *) swap_crypt_key,
SWAP_CRYPT_AES_KEY_SIZE,
&swap_crypt_ctx.encrypt);
aes_decrypt_key((const unsigned char *) swap_crypt_key,
SWAP_CRYPT_AES_KEY_SIZE,
&swap_crypt_ctx.decrypt);
swap_crypt_ctx_initialized = TRUE;
}
#if DEBUG
if (swap_crypt_ctx_tested == FALSE) {
for (i = 0; i < 4096; i++) {
swap_crypt_test_page_ref[i] = (char) i;
}
aes_encrypt_cbc(swap_crypt_test_page_ref,
swap_crypt_null_iv,
PAGE_SIZE / AES_BLOCK_SIZE,
swap_crypt_test_page_encrypt,
&swap_crypt_ctx.encrypt);
aes_decrypt_cbc(swap_crypt_test_page_encrypt,
swap_crypt_null_iv,
PAGE_SIZE / AES_BLOCK_SIZE,
swap_crypt_test_page_decrypt,
&swap_crypt_ctx.decrypt);
for (i = 0; i < 4096; i ++) {
if (swap_crypt_test_page_decrypt[i] !=
swap_crypt_test_page_ref[i]) {
panic("encryption test failed");
}
}
aes_encrypt_cbc(swap_crypt_test_page_decrypt,
swap_crypt_null_iv,
PAGE_SIZE / AES_BLOCK_SIZE,
swap_crypt_test_page_decrypt,
&swap_crypt_ctx.encrypt);
aes_decrypt_cbc(swap_crypt_test_page_decrypt,
swap_crypt_null_iv,
PAGE_SIZE / AES_BLOCK_SIZE,
swap_crypt_test_page_decrypt,
&swap_crypt_ctx.decrypt);
for (i = 0; i < 4096; i ++) {
if (swap_crypt_test_page_decrypt[i] !=
swap_crypt_test_page_ref[i]) {
panic("in place encryption test failed");
}
}
swap_crypt_ctx_tested = TRUE;
}
#endif
}
void
vm_page_encrypt(
vm_page_t page,
vm_map_offset_t kernel_mapping_offset)
{
kern_return_t kr;
vm_map_size_t kernel_mapping_size;
vm_offset_t kernel_vaddr;
union {
unsigned char aes_iv[AES_BLOCK_SIZE];
struct {
memory_object_t pager_object;
vm_object_offset_t paging_offset;
} vm;
} encrypt_iv;
if (! vm_pages_encrypted) {
vm_pages_encrypted = TRUE;
}
assert(page->busy);
assert(page->dirty || page->precious);
if (page->encrypted) {
vm_page_encrypt_already_encrypted_counter++;
return;
}
ASSERT_PAGE_DECRYPTED(page);
vm_object_paging_begin(page->object);
if (kernel_mapping_offset == 0) {
kernel_mapping_size = PAGE_SIZE;
kr = vm_paging_map_object(&kernel_mapping_offset,
page,
page->object,
page->offset,
&kernel_mapping_size,
VM_PROT_READ | VM_PROT_WRITE,
FALSE);
if (kr != KERN_SUCCESS) {
panic("vm_page_encrypt: "
"could not map page in kernel: 0x%x\n",
kr);
}
} else {
kernel_mapping_size = 0;
}
kernel_vaddr = CAST_DOWN(vm_offset_t, kernel_mapping_offset);
if (swap_crypt_ctx_initialized == FALSE) {
swap_crypt_ctx_initialize();
}
assert(swap_crypt_ctx_initialized);
bzero(&encrypt_iv.aes_iv[0], sizeof (encrypt_iv.aes_iv));
encrypt_iv.vm.pager_object = page->object->pager;
encrypt_iv.vm.paging_offset =
page->object->paging_offset + page->offset;
aes_encrypt_cbc((const unsigned char *) &encrypt_iv.aes_iv[0],
swap_crypt_null_iv,
1,
&encrypt_iv.aes_iv[0],
&swap_crypt_ctx.encrypt);
aes_encrypt_cbc((const unsigned char *) kernel_vaddr,
&encrypt_iv.aes_iv[0],
PAGE_SIZE / AES_BLOCK_SIZE,
(unsigned char *) kernel_vaddr,
&swap_crypt_ctx.encrypt);
vm_page_encrypt_counter++;
if (kernel_mapping_size != 0) {
vm_paging_unmap_object(page->object,
kernel_mapping_offset,
kernel_mapping_offset + kernel_mapping_size);
}
pmap_clear_refmod(page->phys_page, VM_MEM_REFERENCED | VM_MEM_MODIFIED);
page->encrypted = TRUE;
vm_object_paging_end(page->object);
}
void
vm_page_decrypt(
vm_page_t page,
vm_map_offset_t kernel_mapping_offset)
{
kern_return_t kr;
vm_map_size_t kernel_mapping_size;
vm_offset_t kernel_vaddr;
union {
unsigned char aes_iv[AES_BLOCK_SIZE];
struct {
memory_object_t pager_object;
vm_object_offset_t paging_offset;
} vm;
} decrypt_iv;
assert(page->busy);
assert(page->encrypted);
vm_object_paging_begin(page->object);
if (kernel_mapping_offset == 0) {
kernel_mapping_size = PAGE_SIZE;
kr = vm_paging_map_object(&kernel_mapping_offset,
page,
page->object,
page->offset,
&kernel_mapping_size,
VM_PROT_READ | VM_PROT_WRITE,
FALSE);
if (kr != KERN_SUCCESS) {
panic("vm_page_decrypt: "
"could not map page in kernel: 0x%x\n",
kr);
}
} else {
kernel_mapping_size = 0;
}
kernel_vaddr = CAST_DOWN(vm_offset_t, kernel_mapping_offset);
assert(swap_crypt_ctx_initialized);
bzero(&decrypt_iv.aes_iv[0], sizeof (decrypt_iv.aes_iv));
decrypt_iv.vm.pager_object = page->object->pager;
decrypt_iv.vm.paging_offset =
page->object->paging_offset + page->offset;
aes_encrypt_cbc((const unsigned char *) &decrypt_iv.aes_iv[0],
swap_crypt_null_iv,
1,
&decrypt_iv.aes_iv[0],
&swap_crypt_ctx.encrypt);
aes_decrypt_cbc((const unsigned char *) kernel_vaddr,
&decrypt_iv.aes_iv[0],
PAGE_SIZE / AES_BLOCK_SIZE,
(unsigned char *) kernel_vaddr,
&swap_crypt_ctx.decrypt);
vm_page_decrypt_counter++;
if (kernel_mapping_size != 0) {
vm_paging_unmap_object(page->object,
kernel_vaddr,
kernel_vaddr + PAGE_SIZE);
}
page->dirty = FALSE;
assert (page->cs_validated == FALSE);
pmap_clear_refmod(page->phys_page, VM_MEM_MODIFIED | VM_MEM_REFERENCED);
page->encrypted = FALSE;
pmap_sync_page_attributes_phys(page->phys_page);
assert(pmap_verify_free(page->phys_page));
page->pmapped = FALSE;
page->wpmapped = FALSE;
vm_object_paging_end(page->object);
}
#if DEVELOPMENT || DEBUG
unsigned long upl_encrypt_upls = 0;
unsigned long upl_encrypt_pages = 0;
#endif
void
upl_encrypt(
upl_t upl,
upl_offset_t crypt_offset,
upl_size_t crypt_size)
{
upl_size_t upl_size, subupl_size=crypt_size;
upl_offset_t offset_in_upl, subupl_offset=crypt_offset;
vm_object_t upl_object;
vm_object_offset_t upl_offset;
vm_page_t page;
vm_object_t shadow_object;
vm_object_offset_t shadow_offset;
vm_object_offset_t paging_offset;
vm_object_offset_t base_offset;
int isVectorUPL = 0;
upl_t vector_upl = NULL;
if((isVectorUPL = vector_upl_is_valid(upl)))
vector_upl = upl;
process_upl_to_encrypt:
if(isVectorUPL) {
crypt_size = subupl_size;
crypt_offset = subupl_offset;
upl = vector_upl_subupl_byoffset(vector_upl, &crypt_offset, &crypt_size);
if(upl == NULL)
panic("upl_encrypt: Accessing a sub-upl that doesn't exist\n");
subupl_size -= crypt_size;
subupl_offset += crypt_size;
}
#if DEVELOPMENT || DEBUG
upl_encrypt_upls++;
upl_encrypt_pages += crypt_size / PAGE_SIZE;
#endif
upl_object = upl->map_object;
upl_offset = upl->offset;
upl_size = upl->size;
vm_object_lock(upl_object);
if (upl_object->pageout) {
shadow_object = upl_object->shadow;
shadow_offset = 0;
assert(upl_object->paging_offset == 0);
vm_object_lock(shadow_object);
} else {
shadow_object = upl_object;
shadow_offset = 0;
}
paging_offset = shadow_object->paging_offset;
vm_object_paging_begin(shadow_object);
if (shadow_object != upl_object)
vm_object_unlock(upl_object);
base_offset = shadow_offset;
base_offset += upl_offset;
base_offset += crypt_offset;
base_offset -= paging_offset;
assert(crypt_offset + crypt_size <= upl_size);
for (offset_in_upl = 0;
offset_in_upl < crypt_size;
offset_in_upl += PAGE_SIZE) {
page = vm_page_lookup(shadow_object,
base_offset + offset_in_upl);
if (page == VM_PAGE_NULL) {
panic("upl_encrypt: "
"no page for (obj=%p,off=%lld+%d)!\n",
shadow_object,
base_offset,
offset_in_upl);
}
pmap_disconnect(page->phys_page);
vm_page_encrypt(page, 0);
if (vm_object_lock_avoid(shadow_object)) {
vm_object_unlock(shadow_object);
mutex_pause(2);
vm_object_lock(shadow_object);
}
}
vm_object_paging_end(shadow_object);
vm_object_unlock(shadow_object);
if(isVectorUPL && subupl_size)
goto process_upl_to_encrypt;
}
#else
void
upl_encrypt(
__unused upl_t upl,
__unused upl_offset_t crypt_offset,
__unused upl_size_t crypt_size)
{
}
void
vm_page_encrypt(
__unused vm_page_t page,
__unused vm_map_offset_t kernel_mapping_offset)
{
}
void
vm_page_decrypt(
__unused vm_page_t page,
__unused vm_map_offset_t kernel_mapping_offset)
{
}
#endif
void
vm_pageout_queue_steal(vm_page_t page, boolean_t queues_locked)
{
page->list_req_pending = FALSE;
page->cleaning = FALSE;
page->pageout = FALSE;
if (!queues_locked) {
vm_page_lockspin_queues();
}
vm_pageout_throttle_up(page);
vm_page_unwire(page);
vm_page_steal_pageout_page++;
if (!queues_locked) {
vm_page_unlock_queues();
}
}
upl_t
vector_upl_create(vm_offset_t upl_offset)
{
int vector_upl_size = sizeof(struct _vector_upl);
int i=0;
upl_t upl;
vector_upl_t vector_upl = (vector_upl_t)kalloc(vector_upl_size);
upl = upl_create(0,UPL_VECTOR,0);
upl->vector_upl = vector_upl;
upl->offset = upl_offset;
vector_upl->size = 0;
vector_upl->offset = upl_offset;
vector_upl->invalid_upls=0;
vector_upl->num_upls=0;
vector_upl->pagelist = NULL;
for(i=0; i < MAX_VECTOR_UPL_ELEMENTS ; i++) {
vector_upl->upl_iostates[i].size = 0;
vector_upl->upl_iostates[i].offset = 0;
}
return upl;
}
void
vector_upl_deallocate(upl_t upl)
{
if(upl) {
vector_upl_t vector_upl = upl->vector_upl;
if(vector_upl) {
if(vector_upl->invalid_upls != vector_upl->num_upls)
panic("Deallocating non-empty Vectored UPL\n");
kfree(vector_upl->pagelist,(sizeof(struct upl_page_info)*(vector_upl->size/PAGE_SIZE)));
vector_upl->invalid_upls=0;
vector_upl->num_upls = 0;
vector_upl->pagelist = NULL;
vector_upl->size = 0;
vector_upl->offset = 0;
kfree(vector_upl, sizeof(struct _vector_upl));
vector_upl = (vector_upl_t)0xdeadbeef;
}
else
panic("vector_upl_deallocate was passed a non-vectored upl\n");
}
else
panic("vector_upl_deallocate was passed a NULL upl\n");
}
boolean_t
vector_upl_is_valid(upl_t upl)
{
if(upl && ((upl->flags & UPL_VECTOR)==UPL_VECTOR)) {
vector_upl_t vector_upl = upl->vector_upl;
if(vector_upl == NULL || vector_upl == (vector_upl_t)0xdeadbeef || vector_upl == (vector_upl_t)0xfeedbeef)
return FALSE;
else
return TRUE;
}
return FALSE;
}
boolean_t
vector_upl_set_subupl(upl_t upl,upl_t subupl, uint32_t io_size)
{
if(vector_upl_is_valid(upl)) {
vector_upl_t vector_upl = upl->vector_upl;
if(vector_upl) {
if(subupl) {
if(io_size) {
if(io_size < PAGE_SIZE)
io_size = PAGE_SIZE;
subupl->vector_upl = (void*)vector_upl;
vector_upl->upl_elems[vector_upl->num_upls++] = subupl;
vector_upl->size += io_size;
upl->size += io_size;
}
else {
uint32_t i=0,invalid_upls=0;
for(i = 0; i < vector_upl->num_upls; i++) {
if(vector_upl->upl_elems[i] == subupl)
break;
}
if(i == vector_upl->num_upls)
panic("Trying to remove sub-upl when none exists");
vector_upl->upl_elems[i] = NULL;
invalid_upls = hw_atomic_add(&(vector_upl)->invalid_upls, 1);
if(invalid_upls == vector_upl->num_upls)
return TRUE;
else
return FALSE;
}
}
else
panic("vector_upl_set_subupl was passed a NULL upl element\n");
}
else
panic("vector_upl_set_subupl was passed a non-vectored upl\n");
}
else
panic("vector_upl_set_subupl was passed a NULL upl\n");
return FALSE;
}
void
vector_upl_set_pagelist(upl_t upl)
{
if(vector_upl_is_valid(upl)) {
uint32_t i=0;
vector_upl_t vector_upl = upl->vector_upl;
if(vector_upl) {
vm_offset_t pagelist_size=0, cur_upl_pagelist_size=0;
vector_upl->pagelist = (upl_page_info_array_t)kalloc(sizeof(struct upl_page_info)*(vector_upl->size/PAGE_SIZE));
for(i=0; i < vector_upl->num_upls; i++) {
cur_upl_pagelist_size = sizeof(struct upl_page_info) * vector_upl->upl_elems[i]->size/PAGE_SIZE;
bcopy(UPL_GET_INTERNAL_PAGE_LIST_SIMPLE(vector_upl->upl_elems[i]), (char*)vector_upl->pagelist + pagelist_size, cur_upl_pagelist_size);
pagelist_size += cur_upl_pagelist_size;
if(vector_upl->upl_elems[i]->highest_page > upl->highest_page)
upl->highest_page = vector_upl->upl_elems[i]->highest_page;
}
assert( pagelist_size == (sizeof(struct upl_page_info)*(vector_upl->size/PAGE_SIZE)) );
}
else
panic("vector_upl_set_pagelist was passed a non-vectored upl\n");
}
else
panic("vector_upl_set_pagelist was passed a NULL upl\n");
}
upl_t
vector_upl_subupl_byindex(upl_t upl, uint32_t index)
{
if(vector_upl_is_valid(upl)) {
vector_upl_t vector_upl = upl->vector_upl;
if(vector_upl) {
if(index < vector_upl->num_upls)
return vector_upl->upl_elems[index];
}
else
panic("vector_upl_subupl_byindex was passed a non-vectored upl\n");
}
return NULL;
}
upl_t
vector_upl_subupl_byoffset(upl_t upl, upl_offset_t *upl_offset, upl_size_t *upl_size)
{
if(vector_upl_is_valid(upl)) {
uint32_t i=0;
vector_upl_t vector_upl = upl->vector_upl;
if(vector_upl) {
upl_t subupl = NULL;
vector_upl_iostates_t subupl_state;
for(i=0; i < vector_upl->num_upls; i++) {
subupl = vector_upl->upl_elems[i];
subupl_state = vector_upl->upl_iostates[i];
if( *upl_offset <= (subupl_state.offset + subupl_state.size - 1)) {
if(subupl == NULL)
return NULL;
if((subupl_state.offset + subupl_state.size) < (*upl_offset + *upl_size)) {
*upl_size = (subupl_state.offset + subupl_state.size) - *upl_offset;
if(*upl_size > subupl_state.size)
*upl_size = subupl_state.size;
}
if(*upl_offset >= subupl_state.offset)
*upl_offset -= subupl_state.offset;
else if(i)
panic("Vector UPL offset miscalculation\n");
return subupl;
}
}
}
else
panic("vector_upl_subupl_byoffset was passed a non-vectored UPL\n");
}
return NULL;
}
void
vector_upl_get_submap(upl_t upl, vm_map_t *v_upl_submap, vm_offset_t *submap_dst_addr)
{
*v_upl_submap = NULL;
if(vector_upl_is_valid(upl)) {
vector_upl_t vector_upl = upl->vector_upl;
if(vector_upl) {
*v_upl_submap = vector_upl->submap;
*submap_dst_addr = vector_upl->submap_dst_addr;
}
else
panic("vector_upl_get_submap was passed a non-vectored UPL\n");
}
else
panic("vector_upl_get_submap was passed a null UPL\n");
}
void
vector_upl_set_submap(upl_t upl, vm_map_t submap, vm_offset_t submap_dst_addr)
{
if(vector_upl_is_valid(upl)) {
vector_upl_t vector_upl = upl->vector_upl;
if(vector_upl) {
vector_upl->submap = submap;
vector_upl->submap_dst_addr = submap_dst_addr;
}
else
panic("vector_upl_get_submap was passed a non-vectored UPL\n");
}
else
panic("vector_upl_get_submap was passed a NULL UPL\n");
}
void
vector_upl_set_iostate(upl_t upl, upl_t subupl, upl_offset_t offset, upl_size_t size)
{
if(vector_upl_is_valid(upl)) {
uint32_t i = 0;
vector_upl_t vector_upl = upl->vector_upl;
if(vector_upl) {
for(i = 0; i < vector_upl->num_upls; i++) {
if(vector_upl->upl_elems[i] == subupl)
break;
}
if(i == vector_upl->num_upls)
panic("setting sub-upl iostate when none exists");
vector_upl->upl_iostates[i].offset = offset;
if(size < PAGE_SIZE)
size = PAGE_SIZE;
vector_upl->upl_iostates[i].size = size;
}
else
panic("vector_upl_set_iostate was passed a non-vectored UPL\n");
}
else
panic("vector_upl_set_iostate was passed a NULL UPL\n");
}
void
vector_upl_get_iostate(upl_t upl, upl_t subupl, upl_offset_t *offset, upl_size_t *size)
{
if(vector_upl_is_valid(upl)) {
uint32_t i = 0;
vector_upl_t vector_upl = upl->vector_upl;
if(vector_upl) {
for(i = 0; i < vector_upl->num_upls; i++) {
if(vector_upl->upl_elems[i] == subupl)
break;
}
if(i == vector_upl->num_upls)
panic("getting sub-upl iostate when none exists");
*offset = vector_upl->upl_iostates[i].offset;
*size = vector_upl->upl_iostates[i].size;
}
else
panic("vector_upl_get_iostate was passed a non-vectored UPL\n");
}
else
panic("vector_upl_get_iostate was passed a NULL UPL\n");
}
void
vector_upl_get_iostate_byindex(upl_t upl, uint32_t index, upl_offset_t *offset, upl_size_t *size)
{
if(vector_upl_is_valid(upl)) {
vector_upl_t vector_upl = upl->vector_upl;
if(vector_upl) {
if(index < vector_upl->num_upls) {
*offset = vector_upl->upl_iostates[index].offset;
*size = vector_upl->upl_iostates[index].size;
}
else
*offset = *size = 0;
}
else
panic("vector_upl_get_iostate_byindex was passed a non-vectored UPL\n");
}
else
panic("vector_upl_get_iostate_byindex was passed a NULL UPL\n");
}
upl_page_info_t *
upl_get_internal_vectorupl_pagelist(upl_t upl)
{
return ((vector_upl_t)(upl->vector_upl))->pagelist;
}
void *
upl_get_internal_vectorupl(upl_t upl)
{
return upl->vector_upl;
}
vm_size_t
upl_get_internal_pagelist_offset(void)
{
return sizeof(struct upl);
}
void
upl_clear_dirty(
upl_t upl,
boolean_t value)
{
if (value) {
upl->flags |= UPL_CLEAR_DIRTY;
} else {
upl->flags &= ~UPL_CLEAR_DIRTY;
}
}
#ifdef MACH_BSD
boolean_t upl_device_page(upl_page_info_t *upl)
{
return(UPL_DEVICE_PAGE(upl));
}
boolean_t upl_page_present(upl_page_info_t *upl, int index)
{
return(UPL_PAGE_PRESENT(upl, index));
}
boolean_t upl_speculative_page(upl_page_info_t *upl, int index)
{
return(UPL_SPECULATIVE_PAGE(upl, index));
}
boolean_t upl_dirty_page(upl_page_info_t *upl, int index)
{
return(UPL_DIRTY_PAGE(upl, index));
}
boolean_t upl_valid_page(upl_page_info_t *upl, int index)
{
return(UPL_VALID_PAGE(upl, index));
}
ppnum_t upl_phys_page(upl_page_info_t *upl, int index)
{
return(UPL_PHYS_PAGE(upl, index));
}
void
vm_countdirtypages(void)
{
vm_page_t m;
int dpages;
int pgopages;
int precpages;
dpages=0;
pgopages=0;
precpages=0;
vm_page_lock_queues();
m = (vm_page_t) queue_first(&vm_page_queue_inactive);
do {
if (m ==(vm_page_t )0) break;
if(m->dirty) dpages++;
if(m->pageout) pgopages++;
if(m->precious) precpages++;
assert(m->object != kernel_object);
m = (vm_page_t) queue_next(&m->pageq);
if (m ==(vm_page_t )0) break;
} while (!queue_end(&vm_page_queue_inactive,(queue_entry_t) m));
vm_page_unlock_queues();
vm_page_lock_queues();
m = (vm_page_t) queue_first(&vm_page_queue_throttled);
do {
if (m ==(vm_page_t )0) break;
dpages++;
assert(m->dirty);
assert(!m->pageout);
assert(m->object != kernel_object);
m = (vm_page_t) queue_next(&m->pageq);
if (m ==(vm_page_t )0) break;
} while (!queue_end(&vm_page_queue_throttled,(queue_entry_t) m));
vm_page_unlock_queues();
vm_page_lock_queues();
m = (vm_page_t) queue_first(&vm_page_queue_zf);
do {
if (m ==(vm_page_t )0) break;
if(m->dirty) dpages++;
if(m->pageout) pgopages++;
if(m->precious) precpages++;
assert(m->object != kernel_object);
m = (vm_page_t) queue_next(&m->pageq);
if (m ==(vm_page_t )0) break;
} while (!queue_end(&vm_page_queue_zf,(queue_entry_t) m));
vm_page_unlock_queues();
printf("IN Q: %d : %d : %d\n", dpages, pgopages, precpages);
dpages=0;
pgopages=0;
precpages=0;
vm_page_lock_queues();
m = (vm_page_t) queue_first(&vm_page_queue_active);
do {
if(m == (vm_page_t )0) break;
if(m->dirty) dpages++;
if(m->pageout) pgopages++;
if(m->precious) precpages++;
assert(m->object != kernel_object);
m = (vm_page_t) queue_next(&m->pageq);
if(m == (vm_page_t )0) break;
} while (!queue_end(&vm_page_queue_active,(queue_entry_t) m));
vm_page_unlock_queues();
printf("AC Q: %d : %d : %d\n", dpages, pgopages, precpages);
}
#endif
ppnum_t upl_get_highest_page(
upl_t upl)
{
return upl->highest_page;
}
upl_size_t upl_get_size(
upl_t upl)
{
return upl->size;
}
#if UPL_DEBUG
kern_return_t upl_ubc_alias_set(upl_t upl, uintptr_t alias1, uintptr_t alias2)
{
upl->ubc_alias1 = alias1;
upl->ubc_alias2 = alias2;
return KERN_SUCCESS;
}
int upl_ubc_alias_get(upl_t upl, uintptr_t * al, uintptr_t * al2)
{
if(al)
*al = upl->ubc_alias1;
if(al2)
*al2 = upl->ubc_alias2;
return KERN_SUCCESS;
}
#endif
#if MACH_KDB
#include <ddb/db_output.h>
#include <ddb/db_print.h>
#include <vm/vm_print.h>
#define printf kdbprintf
void db_pageout(void);
void
db_vm(void)
{
iprintf("VM Statistics:\n");
db_indent += 2;
iprintf("pages:\n");
db_indent += 2;
iprintf("activ %5d inact %5d free %5d",
vm_page_active_count, vm_page_inactive_count,
vm_page_free_count);
printf(" wire %5d gobbl %5d\n",
vm_page_wire_count, vm_page_gobble_count);
db_indent -= 2;
iprintf("target:\n");
db_indent += 2;
iprintf("min %5d inact %5d free %5d",
vm_page_free_min, vm_page_inactive_target,
vm_page_free_target);
printf(" resrv %5d\n", vm_page_free_reserved);
db_indent -= 2;
iprintf("pause:\n");
db_pageout();
db_indent -= 2;
}
#if MACH_COUNTERS
extern int c_laundry_pages_freed;
#endif
void
db_pageout(void)
{
iprintf("Pageout Statistics:\n");
db_indent += 2;
iprintf("active %5d inactv %5d\n",
vm_pageout_active, vm_pageout_inactive);
iprintf("nolock %5d avoid %5d busy %5d absent %5d\n",
vm_pageout_inactive_nolock, vm_pageout_inactive_avoid,
vm_pageout_inactive_busy, vm_pageout_inactive_absent);
iprintf("used %5d clean %5d dirty %5d\n",
vm_pageout_inactive_used, vm_pageout_inactive_clean,
vm_pageout_inactive_dirty);
#if MACH_COUNTERS
iprintf("laundry_pages_freed %d\n", c_laundry_pages_freed);
#endif
#if MACH_CLUSTER_STATS
iprintf("Cluster Statistics:\n");
db_indent += 2;
iprintf("dirtied %5d cleaned %5d collisions %5d\n",
vm_pageout_cluster_dirtied, vm_pageout_cluster_cleaned,
vm_pageout_cluster_collisions);
iprintf("clusters %5d conversions %5d\n",
vm_pageout_cluster_clusters, vm_pageout_cluster_conversions);
db_indent -= 2;
iprintf("Target Statistics:\n");
db_indent += 2;
iprintf("collisions %5d page_dirtied %5d page_freed %5d\n",
vm_pageout_target_collisions, vm_pageout_target_page_dirtied,
vm_pageout_target_page_freed);
db_indent -= 2;
#endif
db_indent -= 2;
}
#endif