#include <stdint.h>
#include <debug.h>
#include <mach_pagemap.h>
#include <mach_cluster_stats.h>
#include <mach/mach_types.h>
#include <mach/memory_object.h>
#include <mach/memory_object_default.h>
#include <mach/memory_object_control_server.h>
#include <mach/mach_host_server.h>
#include <mach/upl.h>
#include <mach/vm_map.h>
#include <mach/vm_param.h>
#include <mach/vm_statistics.h>
#include <mach/sdt.h>
#include <kern/kern_types.h>
#include <kern/counters.h>
#include <kern/host_statistics.h>
#include <kern/machine.h>
#include <kern/misc_protos.h>
#include <kern/sched.h>
#include <kern/thread.h>
#include <kern/xpr.h>
#include <kern/kalloc.h>
#include <kern/policy_internal.h>
#include <kern/thread_group.h>
#include <machine/vm_tuning.h>
#include <machine/commpage.h>
#include <vm/pmap.h>
#include <vm/vm_compressor_pager.h>
#include <vm/vm_fault.h>
#include <vm/vm_map.h>
#include <vm/vm_object.h>
#include <vm/vm_page.h>
#include <vm/vm_pageout.h>
#include <vm/vm_protos.h>
#include <vm/memory_object.h>
#include <vm/vm_purgeable_internal.h>
#include <vm/vm_shared_region.h>
#include <vm/vm_compressor.h>
#include <san/kasan.h>
#if CONFIG_PHANTOM_CACHE
#include <vm/vm_phantom_cache.h>
#endif
extern int cs_debug;
#if UPL_DEBUG
#include <libkern/OSDebug.h>
#endif
extern void m_drain(void);
#if VM_PRESSURE_EVENTS
#if CONFIG_JETSAM
extern unsigned int memorystatus_available_pages;
extern unsigned int memorystatus_available_pages_pressure;
extern unsigned int memorystatus_available_pages_critical;
#else
extern uint64_t memorystatus_available_pages;
extern uint64_t memorystatus_available_pages_pressure;
extern uint64_t memorystatus_available_pages_critical;
#endif
extern unsigned int memorystatus_frozen_count;
extern unsigned int memorystatus_suspended_count;
extern vm_pressure_level_t memorystatus_vm_pressure_level;
int memorystatus_purge_on_warning = 2;
int memorystatus_purge_on_urgent = 5;
int memorystatus_purge_on_critical = 8;
void vm_pressure_response(void);
boolean_t vm_pressure_thread_running = FALSE;
extern void consider_vm_pressure_events(void);
#define MEMORYSTATUS_SUSPENDED_THRESHOLD 4
#endif
boolean_t vm_pressure_changed = FALSE;
#ifndef VM_PAGEOUT_BURST_ACTIVE_THROTTLE
#define VM_PAGEOUT_BURST_ACTIVE_THROTTLE 100
#endif
#ifndef VM_PAGEOUT_BURST_INACTIVE_THROTTLE
#ifdef CONFIG_EMBEDDED
#define VM_PAGEOUT_BURST_INACTIVE_THROTTLE 1024
#else
#define VM_PAGEOUT_BURST_INACTIVE_THROTTLE 4096
#endif
#endif
#ifndef VM_PAGEOUT_DEADLOCK_RELIEF
#define VM_PAGEOUT_DEADLOCK_RELIEF 100
#endif
#ifndef VM_PAGEOUT_INACTIVE_RELIEF
#define VM_PAGEOUT_INACTIVE_RELIEF 50
#endif
#ifndef VM_PAGE_LAUNDRY_MAX
#define VM_PAGE_LAUNDRY_MAX 128UL
#endif
#ifndef VM_PAGEOUT_BURST_WAIT
#define VM_PAGEOUT_BURST_WAIT 10
#endif
#ifndef VM_PAGEOUT_EMPTY_WAIT
#define VM_PAGEOUT_EMPTY_WAIT 200
#endif
#ifndef VM_PAGEOUT_DEADLOCK_WAIT
#define VM_PAGEOUT_DEADLOCK_WAIT 300
#endif
#ifndef VM_PAGEOUT_IDLE_WAIT
#define VM_PAGEOUT_IDLE_WAIT 10
#endif
#ifndef VM_PAGEOUT_SWAP_WAIT
#define VM_PAGEOUT_SWAP_WAIT 50
#endif
#ifndef VM_PAGEOUT_PRESSURE_PAGES_CONSIDERED
#define VM_PAGEOUT_PRESSURE_PAGES_CONSIDERED 1000
#endif
#ifndef VM_PAGEOUT_PRESSURE_EVENT_MONITOR_SECS
#define VM_PAGEOUT_PRESSURE_EVENT_MONITOR_SECS 5
#endif
unsigned int vm_page_speculative_q_age_ms = VM_PAGE_SPECULATIVE_Q_AGE_MS;
unsigned int vm_page_speculative_percentage = 5;
#ifndef VM_PAGE_SPECULATIVE_TARGET
#define VM_PAGE_SPECULATIVE_TARGET(total) ((total) * 1 / (100 / vm_page_speculative_percentage))
#endif
#ifndef VM_PAGE_INACTIVE_TARGET
#ifdef CONFIG_EMBEDDED
#define VM_PAGE_INACTIVE_TARGET(avail) ((avail) * 1 / 3)
#else
#define VM_PAGE_INACTIVE_TARGET(avail) ((avail) * 1 / 2)
#endif
#endif
#ifndef VM_PAGE_FREE_TARGET
#ifdef CONFIG_EMBEDDED
#define VM_PAGE_FREE_TARGET(free) (15 + (free) / 100)
#else
#define VM_PAGE_FREE_TARGET(free) (15 + (free) / 80)
#endif
#endif
#ifndef VM_PAGE_FREE_MIN
#ifdef CONFIG_EMBEDDED
#define VM_PAGE_FREE_MIN(free) (10 + (free) / 200)
#else
#define VM_PAGE_FREE_MIN(free) (10 + (free) / 100)
#endif
#endif
#ifdef CONFIG_EMBEDDED
#define VM_PAGE_FREE_RESERVED_LIMIT 100
#define VM_PAGE_FREE_MIN_LIMIT 1500
#define VM_PAGE_FREE_TARGET_LIMIT 2000
#else
#define VM_PAGE_FREE_RESERVED_LIMIT 1700
#define VM_PAGE_FREE_MIN_LIMIT 3500
#define VM_PAGE_FREE_TARGET_LIMIT 4000
#endif
#ifndef VM_PAGE_FREE_RESERVED
#define VM_PAGE_FREE_RESERVED(n) \
((unsigned) (6 * VM_PAGE_LAUNDRY_MAX) + (n))
#endif
#define VM_PAGE_REACTIVATE_LIMIT_MAX 20000
#ifndef VM_PAGE_REACTIVATE_LIMIT
#ifdef CONFIG_EMBEDDED
#define VM_PAGE_REACTIVATE_LIMIT(avail) (VM_PAGE_INACTIVE_TARGET(avail) / 2)
#else
#define VM_PAGE_REACTIVATE_LIMIT(avail) (MAX((avail) * 1 / 20,VM_PAGE_REACTIVATE_LIMIT_MAX))
#endif
#endif
#define VM_PAGEOUT_INACTIVE_FORCE_RECLAIM 1000
extern boolean_t hibernate_cleaning_in_progress;
unsigned int vm_pageout_scan_event_counter = 0;
struct cq {
struct vm_pageout_queue *q;
void *current_chead;
char *scratch_buf;
int id;
};
struct cq ciq[MAX_COMPRESSOR_THREAD_COUNT];
#if VM_PRESSURE_EVENTS
void vm_pressure_thread(void);
boolean_t VM_PRESSURE_NORMAL_TO_WARNING(void);
boolean_t VM_PRESSURE_WARNING_TO_CRITICAL(void);
boolean_t VM_PRESSURE_WARNING_TO_NORMAL(void);
boolean_t VM_PRESSURE_CRITICAL_TO_WARNING(void);
#endif
void vm_pageout_garbage_collect(int);
static void vm_pageout_iothread_external(void);
static void vm_pageout_iothread_internal(struct cq *cq);
static void vm_pageout_adjust_eq_iothrottle(struct vm_pageout_queue *, boolean_t);
extern void vm_pageout_continue(void);
extern void vm_pageout_scan(void);
void vm_tests(void);
boolean_t vm_restricted_to_single_processor = FALSE;
#if !CONFIG_EMBEDDED
static boolean_t vm_pageout_waiter = FALSE;
static boolean_t vm_pageout_running = FALSE;
#endif
static thread_t vm_pageout_external_iothread = THREAD_NULL;
static thread_t vm_pageout_internal_iothread = THREAD_NULL;
unsigned int vm_pageout_reserved_internal = 0;
unsigned int vm_pageout_reserved_really = 0;
unsigned int vm_pageout_swap_wait = 0;
unsigned int vm_pageout_idle_wait = 0;
unsigned int vm_pageout_empty_wait = 0;
unsigned int vm_pageout_burst_wait = 0;
unsigned int vm_pageout_deadlock_wait = 0;
unsigned int vm_pageout_deadlock_relief = 0;
unsigned int vm_pageout_inactive_relief = 0;
unsigned int vm_pageout_burst_active_throttle = 0;
unsigned int vm_pageout_burst_inactive_throttle = 0;
int vm_upl_wait_for_pages = 0;
unsigned int vm_pageout_active = 0;
unsigned int vm_pageout_inactive = 0;
unsigned int vm_pageout_inactive_throttled = 0;
unsigned int vm_pageout_inactive_forced = 0;
unsigned int vm_pageout_inactive_nolock = 0;
unsigned int vm_pageout_inactive_avoid = 0;
unsigned int vm_pageout_inactive_busy = 0;
unsigned int vm_pageout_inactive_error = 0;
unsigned int vm_pageout_inactive_absent = 0;
unsigned int vm_pageout_inactive_notalive = 0;
unsigned int vm_pageout_inactive_used = 0;
unsigned int vm_pageout_cache_evicted = 0;
unsigned int vm_pageout_inactive_clean = 0;
unsigned int vm_pageout_speculative_clean = 0;
unsigned int vm_pageout_speculative_dirty = 0;
unsigned int vm_pageout_freed_from_cleaned = 0;
unsigned int vm_pageout_freed_from_speculative = 0;
unsigned int vm_pageout_freed_from_inactive_clean = 0;
unsigned int vm_pageout_freed_after_compression = 0;
extern uint32_t vm_compressor_pages_grabbed;
extern uint32_t c_segment_pages_compressed;
unsigned int vm_pageout_enqueued_cleaned_from_inactive_dirty = 0;
unsigned int vm_pageout_cleaned_reclaimed = 0;
unsigned int vm_pageout_cleaned_reactivated = 0;
unsigned int vm_pageout_cleaned_reference_reactivated = 0;
unsigned int vm_pageout_cleaned_volatile_reactivated = 0;
unsigned int vm_pageout_cleaned_fault_reactivated = 0;
unsigned int vm_pageout_cleaned_commit_reactivated = 0;
unsigned int vm_pageout_cleaned_busy = 0;
unsigned int vm_pageout_cleaned_nolock = 0;
unsigned int vm_pageout_inactive_dirty_internal = 0;
unsigned int vm_pageout_inactive_dirty_external = 0;
unsigned int vm_pageout_inactive_deactivated = 0;
unsigned int vm_pageout_inactive_anonymous = 0;
unsigned int vm_pageout_dirty_no_pager = 0;
unsigned int vm_pageout_purged_objects = 0;
unsigned int vm_stat_discard = 0;
unsigned int vm_stat_discard_sent = 0;
unsigned int vm_stat_discard_failure = 0;
unsigned int vm_stat_discard_throttle = 0;
unsigned int vm_pageout_reactivation_limit_exceeded = 0;
unsigned int vm_pageout_inactive_force_reclaim = 0;
unsigned int vm_pageout_skipped_external = 0;
unsigned int vm_pageout_scan_reclaimed_throttled = 0;
unsigned int vm_pageout_scan_active_throttled = 0;
unsigned int vm_pageout_scan_inactive_throttled_internal = 0;
unsigned int vm_pageout_scan_inactive_throttled_external = 0;
unsigned int vm_pageout_scan_throttle = 0;
unsigned int vm_pageout_scan_burst_throttle = 0;
unsigned int vm_pageout_scan_empty_throttle = 0;
unsigned int vm_pageout_scan_swap_throttle = 0;
unsigned int vm_pageout_scan_deadlock_detected = 0;
unsigned int vm_pageout_scan_active_throttle_success = 0;
unsigned int vm_pageout_scan_inactive_throttle_success = 0;
unsigned int vm_pageout_inactive_external_forced_jetsam_count = 0;
unsigned int vm_pageout_scan_throttle_deferred = 0;
unsigned int vm_pageout_scan_yield_unthrottled = 0;
unsigned int vm_page_speculative_count_drifts = 0;
unsigned int vm_page_speculative_count_drift_max = 0;
uint32_t vm_compressor_failed;
unsigned int vm_backing_store_low = 0;
unsigned int vm_pageout_out_of_line = 0;
unsigned int vm_pageout_in_place = 0;
unsigned int vm_page_steal_pageout_page = 0;
struct vm_config vm_config;
struct vm_pageout_queue vm_pageout_queue_internal __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT)));
struct vm_pageout_queue vm_pageout_queue_external __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT)));
unsigned int vm_page_speculative_target = 0;
vm_object_t vm_pageout_scan_wants_object = VM_OBJECT_NULL;
boolean_t (* volatile consider_buffer_cache_collect)(int) = NULL;
#if DEVELOPMENT || DEBUG
unsigned long vm_cs_validated_resets = 0;
#endif
int vm_debug_events = 0;
#if CONFIG_MEMORYSTATUS
#if !CONFIG_JETSAM
extern boolean_t memorystatus_idle_exit_from_VM(void);
#endif
extern boolean_t memorystatus_kill_on_VM_page_shortage(boolean_t async);
extern void memorystatus_on_pageout_scan_end(void);
uint32_t vm_pageout_memorystatus_fb_factor_nr = 5;
uint32_t vm_pageout_memorystatus_fb_factor_dr = 2;
#if DEVELOPMENT || DEBUG
uint32_t vm_grab_anon_overrides = 0;
uint32_t vm_grab_anon_nops = 0;
#endif
#endif
#if MACH_CLUSTER_STATS
unsigned long vm_pageout_cluster_dirtied = 0;
unsigned long vm_pageout_cluster_cleaned = 0;
unsigned long vm_pageout_cluster_collisions = 0;
unsigned long vm_pageout_cluster_clusters = 0;
unsigned long vm_pageout_cluster_conversions = 0;
unsigned long vm_pageout_target_collisions = 0;
unsigned long vm_pageout_target_page_dirtied = 0;
unsigned long vm_pageout_target_page_freed = 0;
#define CLUSTER_STAT(clause) clause
#else
#define CLUSTER_STAT(clause)
#endif
#if DEVELOPMENT || DEBUG
vmct_stats_t vmct_stats;
#endif
void
vm_pageout_object_terminate(
vm_object_t object)
{
vm_object_t shadow_object;
assert(object->pageout);
shadow_object = object->shadow;
vm_object_lock(shadow_object);
while (!vm_page_queue_empty(&object->memq)) {
vm_page_t p, m;
vm_object_offset_t offset;
p = (vm_page_t) vm_page_queue_first(&object->memq);
assert(p->private);
assert(p->free_when_done);
p->free_when_done = FALSE;
assert(!p->cleaning);
assert(!p->laundry);
offset = p->offset;
VM_PAGE_FREE(p);
p = VM_PAGE_NULL;
m = vm_page_lookup(shadow_object,
offset + object->vo_shadow_offset);
if(m == VM_PAGE_NULL)
continue;
assert((m->dirty) || (m->precious) ||
(m->busy && m->cleaning));
vm_page_lock_queues();
if (m->vm_page_q_state == VM_PAGE_ON_PAGEOUT_Q)
vm_pageout_throttle_up(m);
if (m->free_when_done) {
assert(m->busy);
assert(m->vm_page_q_state == VM_PAGE_IS_WIRED);
assert(m->wire_count == 1);
m->cleaning = FALSE;
m->free_when_done = FALSE;
#if MACH_CLUSTER_STATS
if (m->wanted) vm_pageout_target_collisions++;
#endif
if (pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m)) & VM_MEM_MODIFIED) {
SET_PAGE_DIRTY(m, FALSE);
} else {
m->dirty = FALSE;
}
if (m->dirty) {
CLUSTER_STAT(vm_pageout_target_page_dirtied++;)
vm_page_unwire(m, TRUE);
VM_STAT_INCR(reactivations);
PAGE_WAKEUP_DONE(m);
} else {
CLUSTER_STAT(vm_pageout_target_page_freed++;)
vm_page_free(m);
}
vm_page_unlock_queues();
continue;
}
if ((m->vm_page_q_state == VM_PAGE_NOT_ON_Q) && !m->private) {
if (m->reference)
vm_page_activate(m);
else
vm_page_deactivate(m);
}
if (m->overwriting) {
if (m->busy) {
pmap_clear_modify(VM_PAGE_GET_PHYS_PAGE(m));
m->busy = FALSE;
m->absent = FALSE;
} else {
assert(VM_PAGE_WIRED(m));
vm_page_unwire(m, TRUE);
}
m->overwriting = FALSE;
} else {
#if MACH_CLUSTER_STATS
m->dirty = pmap_is_modified(VM_PAGE_GET_PHYS_PAGE(m));
if (m->dirty) vm_pageout_cluster_dirtied++;
else vm_pageout_cluster_cleaned++;
if (m->wanted) vm_pageout_cluster_collisions++;
#else
m->dirty = FALSE;
#endif
}
m->cleaning = FALSE;
PAGE_WAKEUP(m);
vm_page_unlock_queues();
}
vm_object_activity_end(shadow_object);
vm_object_unlock(shadow_object);
assert(object->ref_count == 0);
assert(object->paging_in_progress == 0);
assert(object->activity_in_progress == 0);
assert(object->resident_page_count == 0);
return;
}
static void
vm_pageclean_setup(
vm_page_t m,
vm_page_t new_m,
vm_object_t new_object,
vm_object_offset_t new_offset)
{
assert(!m->busy);
#if 0
assert(!m->cleaning);
#endif
XPR(XPR_VM_PAGEOUT,
"vm_pageclean_setup, obj 0x%X off 0x%X page 0x%X new 0x%X new_off 0x%X\n",
VM_PAGE_OBJECT(m), m->offset, m,
new_m, new_offset);
pmap_clear_modify(VM_PAGE_GET_PHYS_PAGE(m));
m->cleaning = TRUE;
SET_PAGE_DIRTY(m, FALSE);
m->precious = FALSE;
assert(new_m->fictitious);
assert(VM_PAGE_GET_PHYS_PAGE(new_m) == vm_page_fictitious_addr);
new_m->fictitious = FALSE;
new_m->private = TRUE;
new_m->free_when_done = TRUE;
VM_PAGE_SET_PHYS_PAGE(new_m, VM_PAGE_GET_PHYS_PAGE(m));
vm_page_lockspin_queues();
vm_page_wire(new_m, VM_KERN_MEMORY_NONE, TRUE);
vm_page_unlock_queues();
vm_page_insert_wired(new_m, new_object, new_offset, VM_KERN_MEMORY_NONE);
assert(!new_m->wanted);
new_m->busy = FALSE;
}
void
vm_pageout_initialize_page(
vm_page_t m)
{
vm_object_t object;
vm_object_offset_t paging_offset;
memory_object_t pager;
XPR(XPR_VM_PAGEOUT,
"vm_pageout_initialize_page, page 0x%X\n",
m, 0, 0, 0, 0);
assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
object = VM_PAGE_OBJECT(m);
assert(m->busy);
assert(object->internal);
assert(!m->absent);
assert(!m->error);
assert(m->dirty);
paging_offset = m->offset + object->paging_offset;
if (m->absent || m->error || m->restart || (!m->dirty && !m->precious)) {
panic("reservation without pageout?");
VM_PAGE_FREE(m);
vm_object_unlock(object);
return;
}
pager = object->pager;
if (pager == MEMORY_OBJECT_NULL) {
panic("missing pager for copy object");
VM_PAGE_FREE(m);
return;
}
pmap_clear_modify(VM_PAGE_GET_PHYS_PAGE(m));
SET_PAGE_DIRTY(m, FALSE);
vm_object_paging_begin(object);
vm_object_unlock(object);
memory_object_data_initialize(pager, paging_offset, PAGE_SIZE);
vm_object_lock(object);
vm_object_paging_end(object);
}
#if MACH_CLUSTER_STATS
#define MAXCLUSTERPAGES 16
struct {
unsigned long pages_in_cluster;
unsigned long pages_at_higher_offsets;
unsigned long pages_at_lower_offsets;
} cluster_stats[MAXCLUSTERPAGES];
#endif
int32_t vmct_active = 0;
typedef enum vmct_state_t {
VMCT_IDLE,
VMCT_AWAKENED,
VMCT_ACTIVE,
} vmct_state_t;
vmct_state_t vmct_state[MAX_COMPRESSOR_THREAD_COUNT];
void
vm_pageout_cluster(vm_page_t m)
{
vm_object_t object = VM_PAGE_OBJECT(m);
struct vm_pageout_queue *q;
XPR(XPR_VM_PAGEOUT,
"vm_pageout_cluster, object 0x%X offset 0x%X page 0x%X\n",
object, m->offset, m, 0, 0);
VM_PAGE_CHECK(m);
LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
vm_object_lock_assert_exclusive(object);
assert((m->dirty || m->precious) && (!VM_PAGE_WIRED(m)));
assert(!m->cleaning && !m->laundry);
assert(m->vm_page_q_state == VM_PAGE_NOT_ON_Q);
vm_object_activity_begin(object);
if (object->internal == TRUE) {
assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
m->busy = TRUE;
q = &vm_pageout_queue_internal;
} else
q = &vm_pageout_queue_external;
m->laundry = TRUE;
q->pgo_laundry++;
m->vm_page_q_state = VM_PAGE_ON_PAGEOUT_Q;
vm_page_queue_enter(&q->pgo_pending, m, vm_page_t, pageq);
if (q->pgo_idle == TRUE) {
q->pgo_idle = FALSE;
thread_wakeup((event_t) &q->pgo_pending);
}
VM_PAGE_CHECK(m);
}
unsigned long vm_pageout_throttle_up_count = 0;
void
vm_pageout_throttle_up(
vm_page_t m)
{
struct vm_pageout_queue *q;
vm_object_t m_object;
m_object = VM_PAGE_OBJECT(m);
assert(m_object != VM_OBJECT_NULL);
assert(m_object != kernel_object);
LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
vm_object_lock_assert_exclusive(m_object);
vm_pageout_throttle_up_count++;
if (m_object->internal == TRUE)
q = &vm_pageout_queue_internal;
else
q = &vm_pageout_queue_external;
if (m->vm_page_q_state == VM_PAGE_ON_PAGEOUT_Q) {
vm_page_queue_remove(&q->pgo_pending, m, vm_page_t, pageq);
m->vm_page_q_state = VM_PAGE_NOT_ON_Q;
VM_PAGE_ZERO_PAGEQ_ENTRY(m);
vm_object_activity_end(m_object);
}
if (m->laundry == TRUE) {
m->laundry = FALSE;
q->pgo_laundry--;
if (q->pgo_throttled == TRUE) {
q->pgo_throttled = FALSE;
thread_wakeup((event_t) &q->pgo_laundry);
}
if (q->pgo_draining == TRUE && q->pgo_laundry == 0) {
q->pgo_draining = FALSE;
thread_wakeup((event_t) (&q->pgo_laundry+1));
}
}
}
static void
vm_pageout_throttle_up_batch(
struct vm_pageout_queue *q,
int batch_cnt)
{
LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
vm_pageout_throttle_up_count += batch_cnt;
q->pgo_laundry -= batch_cnt;
if (q->pgo_throttled == TRUE) {
q->pgo_throttled = FALSE;
thread_wakeup((event_t) &q->pgo_laundry);
}
if (q->pgo_draining == TRUE && q->pgo_laundry == 0) {
q->pgo_draining = FALSE;
thread_wakeup((event_t) (&q->pgo_laundry+1));
}
}
#define VM_PAGEOUT_STAT_SIZE 31
struct vm_pageout_stat {
unsigned int considered;
unsigned int reclaimed_clean;
unsigned int pages_compressed;
unsigned int pages_grabbed_by_compressor;
unsigned int cleaned_dirty_external;
unsigned int throttled_internal_q;
unsigned int throttled_external_q;
unsigned int failed_compressions;
} vm_pageout_stats[VM_PAGEOUT_STAT_SIZE] = {{0,0,0,0,0,0,0,0}, };
unsigned int vm_pageout_stat_now = 0;
unsigned int vm_memory_pressure = 0;
#define VM_PAGEOUT_STAT_BEFORE(i) \
(((i) == 0) ? VM_PAGEOUT_STAT_SIZE - 1 : (i) - 1)
#define VM_PAGEOUT_STAT_AFTER(i) \
(((i) == VM_PAGEOUT_STAT_SIZE - 1) ? 0 : (i) + 1)
#if VM_PAGE_BUCKETS_CHECK
int vm_page_buckets_check_interval = 10;
#endif
void
compute_memory_pressure(
__unused void *arg)
{
unsigned int vm_pageout_next;
#if VM_PAGE_BUCKETS_CHECK
static int counter = 0;
if ((++counter % vm_page_buckets_check_interval) == 0) {
vm_page_buckets_check();
}
#endif
vm_memory_pressure =
vm_pageout_stats[VM_PAGEOUT_STAT_BEFORE(vm_pageout_stat_now)].reclaimed_clean;
commpage_set_memory_pressure( vm_memory_pressure );
vm_pageout_next = VM_PAGEOUT_STAT_AFTER(vm_pageout_stat_now);
vm_pageout_stats[vm_pageout_next].considered = 0;
vm_pageout_stats[vm_pageout_next].reclaimed_clean = 0;
vm_pageout_stats[vm_pageout_next].throttled_internal_q = 0;
vm_pageout_stats[vm_pageout_next].throttled_external_q = 0;
vm_pageout_stats[vm_pageout_next].cleaned_dirty_external = 0;
vm_pageout_stats[vm_pageout_next].pages_compressed = 0;
vm_pageout_stats[vm_pageout_next].pages_grabbed_by_compressor = 0;
vm_pageout_stats[vm_pageout_next].failed_compressions = 0;
vm_pageout_stat_now = vm_pageout_next;
}
unsigned int
mach_vm_ctl_page_free_wanted(void)
{
unsigned int page_free_target, page_free_count, page_free_wanted;
page_free_target = vm_page_free_target;
page_free_count = vm_page_free_count;
if (page_free_target > page_free_count) {
page_free_wanted = page_free_target - page_free_count;
} else {
page_free_wanted = 0;
}
return page_free_wanted;
}
kern_return_t
mach_vm_pressure_monitor(
boolean_t wait_for_pressure,
unsigned int nsecs_monitored,
unsigned int *pages_reclaimed_p,
unsigned int *pages_wanted_p)
{
wait_result_t wr;
unsigned int vm_pageout_then, vm_pageout_now;
unsigned int pages_reclaimed;
if (wait_for_pressure) {
while (vm_page_free_count >= vm_page_free_target) {
wr = assert_wait((event_t) &vm_page_free_wanted,
THREAD_INTERRUPTIBLE);
if (wr == THREAD_WAITING) {
wr = thread_block(THREAD_CONTINUE_NULL);
}
if (wr == THREAD_INTERRUPTED) {
return KERN_ABORTED;
}
if (wr == THREAD_AWAKENED) {
break;
}
}
}
if (pages_wanted_p != NULL) {
*pages_wanted_p = mach_vm_ctl_page_free_wanted();
}
if (pages_reclaimed_p == NULL) {
return KERN_SUCCESS;
}
vm_pageout_now = vm_pageout_stat_now;
pages_reclaimed = 0;
for (vm_pageout_then =
VM_PAGEOUT_STAT_BEFORE(vm_pageout_now);
vm_pageout_then != vm_pageout_now &&
nsecs_monitored-- != 0;
vm_pageout_then =
VM_PAGEOUT_STAT_BEFORE(vm_pageout_then)) {
pages_reclaimed += vm_pageout_stats[vm_pageout_then].reclaimed_clean;
}
*pages_reclaimed_p = pages_reclaimed;
return KERN_SUCCESS;
}
#if DEVELOPMENT || DEBUG
static void
vm_pageout_disconnect_all_pages_in_queue(vm_page_queue_head_t *, int);
boolean_t vm_pageout_disconnect_all_pages_active = FALSE;
void
vm_pageout_disconnect_all_pages()
{
vm_page_lock_queues();
if (vm_pageout_disconnect_all_pages_active == TRUE) {
vm_page_unlock_queues();
return;
}
vm_pageout_disconnect_all_pages_active = TRUE;
vm_page_unlock_queues();
vm_pageout_disconnect_all_pages_in_queue(&vm_page_queue_throttled, vm_page_throttled_count);
vm_pageout_disconnect_all_pages_in_queue(&vm_page_queue_anonymous, vm_page_anonymous_count);
vm_pageout_disconnect_all_pages_in_queue(&vm_page_queue_active, vm_page_active_count);
vm_pageout_disconnect_all_pages_active = FALSE;
}
void
vm_pageout_disconnect_all_pages_in_queue(vm_page_queue_head_t *q, int qcount)
{
vm_page_t m;
vm_object_t t_object = NULL;
vm_object_t l_object = NULL;
vm_object_t m_object = NULL;
int delayed_unlock = 0;
int try_failed_count = 0;
int disconnected_count = 0;
int paused_count = 0;
int object_locked_count = 0;
KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_DISCONNECT_ALL_PAGE_MAPPINGS)) | DBG_FUNC_START,
q, qcount, 0, 0, 0);
vm_page_lock_queues();
while (qcount && !vm_page_queue_empty(q)) {
LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
m = (vm_page_t) vm_page_queue_first(q);
m_object = VM_PAGE_OBJECT(m);
if (m_object != l_object) {
if (l_object != NULL) {
vm_object_unlock(l_object);
l_object = NULL;
}
if (m_object != t_object)
try_failed_count = 0;
if ( !vm_object_lock_try_scan(m_object)) {
if (try_failed_count > 20) {
goto reenter_pg_on_q;
}
vm_page_unlock_queues();
mutex_pause(try_failed_count++);
vm_page_lock_queues();
delayed_unlock = 0;
paused_count++;
t_object = m_object;
continue;
}
object_locked_count++;
l_object = m_object;
}
if ( !m_object->alive || m->cleaning || m->laundry || m->busy || m->absent || m->error || m->free_when_done) {
goto reenter_pg_on_q;
}
if (m->pmapped == TRUE) {
pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
disconnected_count++;
}
reenter_pg_on_q:
vm_page_queue_remove(q, m, vm_page_t, pageq);
vm_page_queue_enter(q, m, vm_page_t, pageq);
qcount--;
try_failed_count = 0;
if (delayed_unlock++ > 128) {
if (l_object != NULL) {
vm_object_unlock(l_object);
l_object = NULL;
}
lck_mtx_yield(&vm_page_queue_lock);
delayed_unlock = 0;
}
}
if (l_object != NULL) {
vm_object_unlock(l_object);
l_object = NULL;
}
vm_page_unlock_queues();
KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_DISCONNECT_ALL_PAGE_MAPPINGS)) | DBG_FUNC_END,
q, disconnected_count, object_locked_count, paused_count, 0);
}
#endif
static void
vm_pageout_page_queue(vm_page_queue_head_t *, int);
boolean_t vm_pageout_anonymous_pages_active = FALSE;
void
vm_pageout_anonymous_pages()
{
if (VM_CONFIG_COMPRESSOR_IS_PRESENT) {
vm_page_lock_queues();
if (vm_pageout_anonymous_pages_active == TRUE) {
vm_page_unlock_queues();
return;
}
vm_pageout_anonymous_pages_active = TRUE;
vm_page_unlock_queues();
vm_pageout_page_queue(&vm_page_queue_throttled, vm_page_throttled_count);
vm_pageout_page_queue(&vm_page_queue_anonymous, vm_page_anonymous_count);
vm_pageout_page_queue(&vm_page_queue_active, vm_page_active_count);
if (VM_CONFIG_SWAP_IS_PRESENT)
vm_consider_swapping();
vm_page_lock_queues();
vm_pageout_anonymous_pages_active = FALSE;
vm_page_unlock_queues();
}
}
void
vm_pageout_page_queue(vm_page_queue_head_t *q, int qcount)
{
vm_page_t m;
vm_object_t t_object = NULL;
vm_object_t l_object = NULL;
vm_object_t m_object = NULL;
int delayed_unlock = 0;
int try_failed_count = 0;
int refmod_state;
int pmap_options;
struct vm_pageout_queue *iq;
ppnum_t phys_page;
iq = &vm_pageout_queue_internal;
vm_page_lock_queues();
while (qcount && !vm_page_queue_empty(q)) {
LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
if (VM_PAGE_Q_THROTTLED(iq)) {
if (l_object != NULL) {
vm_object_unlock(l_object);
l_object = NULL;
}
iq->pgo_draining = TRUE;
assert_wait((event_t) (&iq->pgo_laundry + 1), THREAD_INTERRUPTIBLE);
vm_page_unlock_queues();
thread_block(THREAD_CONTINUE_NULL);
vm_page_lock_queues();
delayed_unlock = 0;
continue;
}
m = (vm_page_t) vm_page_queue_first(q);
m_object = VM_PAGE_OBJECT(m);
if (m_object != l_object) {
if ( !m_object->internal)
goto reenter_pg_on_q;
if (l_object != NULL) {
vm_object_unlock(l_object);
l_object = NULL;
}
if (m_object != t_object)
try_failed_count = 0;
if ( !vm_object_lock_try_scan(m_object)) {
if (try_failed_count > 20) {
goto reenter_pg_on_q;
}
vm_page_unlock_queues();
mutex_pause(try_failed_count++);
vm_page_lock_queues();
delayed_unlock = 0;
t_object = m_object;
continue;
}
l_object = m_object;
}
if ( !m_object->alive || m->cleaning || m->laundry || m->busy || m->absent || m->error || m->free_when_done) {
goto reenter_pg_on_q;
}
phys_page = VM_PAGE_GET_PHYS_PAGE(m);
if (m->reference == FALSE && m->pmapped == TRUE) {
refmod_state = pmap_get_refmod(phys_page);
if (refmod_state & VM_MEM_REFERENCED)
m->reference = TRUE;
if (refmod_state & VM_MEM_MODIFIED) {
SET_PAGE_DIRTY(m, FALSE);
}
}
if (m->reference == TRUE) {
m->reference = FALSE;
pmap_clear_refmod_options(phys_page, VM_MEM_REFERENCED, PMAP_OPTIONS_NOFLUSH, (void *)NULL);
goto reenter_pg_on_q;
}
if (m->pmapped == TRUE) {
if (m->dirty || m->precious) {
pmap_options = PMAP_OPTIONS_COMPRESSOR;
} else {
pmap_options = PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED;
}
refmod_state = pmap_disconnect_options(phys_page, pmap_options, NULL);
if (refmod_state & VM_MEM_MODIFIED) {
SET_PAGE_DIRTY(m, FALSE);
}
}
if ( !m->dirty && !m->precious) {
vm_page_unlock_queues();
VM_PAGE_FREE(m);
vm_page_lock_queues();
delayed_unlock = 0;
goto next_pg;
}
if (!m_object->pager_initialized || m_object->pager == MEMORY_OBJECT_NULL) {
if (!m_object->pager_initialized) {
vm_page_unlock_queues();
vm_object_collapse(m_object, (vm_object_offset_t) 0, TRUE);
if (!m_object->pager_initialized)
vm_object_compressor_pager_create(m_object);
vm_page_lock_queues();
delayed_unlock = 0;
}
if (!m_object->pager_initialized || m_object->pager == MEMORY_OBJECT_NULL)
goto reenter_pg_on_q;
continue;
}
vm_page_queues_remove(m, TRUE);
LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
vm_pageout_cluster(m);
goto next_pg;
reenter_pg_on_q:
vm_page_queue_remove(q, m, vm_page_t, pageq);
vm_page_queue_enter(q, m, vm_page_t, pageq);
next_pg:
qcount--;
try_failed_count = 0;
if (delayed_unlock++ > 128) {
if (l_object != NULL) {
vm_object_unlock(l_object);
l_object = NULL;
}
lck_mtx_yield(&vm_page_queue_lock);
delayed_unlock = 0;
}
}
if (l_object != NULL) {
vm_object_unlock(l_object);
l_object = NULL;
}
vm_page_unlock_queues();
}
extern void vm_pageout_io_throttle(void);
#define VM_PAGEOUT_SCAN_HANDLE_REUSABLE_PAGE(m, obj) \
MACRO_BEGIN \
\
assert(VM_PAGE_OBJECT((m)) == (obj)); \
if ((m)->reusable || \
(obj)->all_reusable) { \
vm_object_reuse_pages((obj), \
(m)->offset, \
(m)->offset + PAGE_SIZE_64, \
FALSE); \
} \
MACRO_END
#define VM_PAGEOUT_DELAYED_UNLOCK_LIMIT 64
#define VM_PAGEOUT_DELAYED_UNLOCK_LIMIT_MAX 1024
#define FCS_IDLE 0
#define FCS_DELAYED 1
#define FCS_DEADLOCK_DETECTED 2
struct flow_control {
int state;
mach_timespec_t ts;
};
#if CONFIG_BACKGROUND_QUEUE
uint64_t vm_pageout_skipped_bq_internal = 0;
uint64_t vm_pageout_considered_bq_internal = 0;
uint64_t vm_pageout_considered_bq_external = 0;
uint64_t vm_pageout_rejected_bq_internal = 0;
uint64_t vm_pageout_rejected_bq_external = 0;
#endif
uint32_t vm_pageout_no_victim = 0;
uint32_t vm_pageout_considered_page = 0;
uint32_t vm_page_filecache_min = 0;
#define ANONS_GRABBED_LIMIT 2
#if CONFIG_SECLUDED_MEMORY
extern vm_page_t vm_page_grab_secluded(void);
uint64_t vm_pageout_secluded_burst_count = 0;
#endif
static void vm_pageout_delayed_unlock(int *, int *, vm_page_t *);
static void vm_pageout_prepare_to_block(vm_object_t *, int *, vm_page_t *, int *, int);
#define VM_PAGEOUT_PB_NO_ACTION 0
#define VM_PAGEOUT_PB_CONSIDER_WAKING_COMPACTOR_SWAPPER 1
#define VM_PAGEOUT_PB_THREAD_YIELD 2
static void
vm_pageout_delayed_unlock(int *delayed_unlock, int *local_freed, vm_page_t *local_freeq)
{
if (*local_freeq) {
vm_page_unlock_queues();
VM_DEBUG_EVENT(
vm_pageout_freelist, VM_PAGEOUT_FREELIST, DBG_FUNC_START,
vm_page_free_count, *local_freed, 0, 1);
vm_page_free_list(*local_freeq, TRUE);
VM_DEBUG_EVENT(vm_pageout_freelist,VM_PAGEOUT_FREELIST, DBG_FUNC_END,
vm_page_free_count, 0, 0, 1);
*local_freeq = NULL;
*local_freed = 0;
vm_page_lock_queues();
} else {
lck_mtx_yield(&vm_page_queue_lock);
}
*delayed_unlock = 1;
}
static void
vm_pageout_prepare_to_block(vm_object_t *object, int *delayed_unlock,
vm_page_t *local_freeq, int *local_freed, int action)
{
vm_page_unlock_queues();
if (*object != NULL) {
vm_object_unlock(*object);
*object = NULL;
}
vm_pageout_scan_wants_object = VM_OBJECT_NULL;
if (*local_freeq) {
VM_DEBUG_EVENT(vm_pageout_freelist, VM_PAGEOUT_FREELIST, DBG_FUNC_START,
vm_page_free_count, *local_freed, 0, 2);
vm_page_free_list(*local_freeq, TRUE);
VM_DEBUG_EVENT(vm_pageout_freelist, VM_PAGEOUT_FREELIST, DBG_FUNC_END,
vm_page_free_count, 0, 0, 2);
*local_freeq = NULL;
*local_freed = 0;
}
*delayed_unlock = 1;
switch (action) {
case VM_PAGEOUT_PB_CONSIDER_WAKING_COMPACTOR_SWAPPER:
vm_consider_waking_compactor_swapper();
break;
case VM_PAGEOUT_PB_THREAD_YIELD:
thread_yield_internal(1);
break;
case VM_PAGEOUT_PB_NO_ACTION:
default:
break;
}
vm_page_lock_queues();
}
int last_vm_pageout_freed_from_inactive_clean = 0;
int last_vm_pageout_freed_from_cleaned = 0;
int last_vm_pageout_freed_from_speculative = 0;
int last_vm_pageout_freed_after_compression = 0;
int last_vm_pageout_enqueued_cleaned_from_inactive_dirty = 0;
int last_vm_pageout_inactive_force_reclaim = 0;
int last_vm_pageout_scan_inactive_throttled_external = 0;
int last_vm_pageout_scan_inactive_throttled_internal = 0;
int last_vm_pageout_reactivation_limit_exceeded = 0;
int last_vm_pageout_considered_page = 0;
int last_vm_compressor_pages_grabbed = 0;
int last_vm_compressor_failed = 0;
int last_vm_pageout_skipped_external = 0;
void update_vm_info(void)
{
int tmp1, tmp2, tmp3, tmp4;
if (!kdebug_enable)
return;
KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_INFO1)) | DBG_FUNC_NONE,
vm_page_active_count,
vm_page_speculative_count,
vm_page_inactive_count,
vm_page_anonymous_count,
0);
KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_INFO2)) | DBG_FUNC_NONE,
vm_page_free_count,
vm_page_wire_count,
VM_PAGE_COMPRESSOR_COUNT,
0, 0);
KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_INFO3)) | DBG_FUNC_NONE,
c_segment_pages_compressed,
vm_page_internal_count,
vm_page_external_count,
vm_page_xpmapped_external_count,
0);
if ((vm_pageout_considered_page - last_vm_pageout_considered_page) == 0 &&
(vm_pageout_enqueued_cleaned_from_inactive_dirty - last_vm_pageout_enqueued_cleaned_from_inactive_dirty == 0) &&
(vm_pageout_freed_after_compression - last_vm_pageout_freed_after_compression == 0))
return;
tmp1 = vm_pageout_considered_page;
tmp2 = vm_pageout_freed_from_speculative;
tmp3 = vm_pageout_freed_from_inactive_clean;
KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_INFO4)) | DBG_FUNC_NONE,
tmp1 - last_vm_pageout_considered_page,
tmp2 - last_vm_pageout_freed_from_speculative,
tmp3 - last_vm_pageout_freed_from_inactive_clean,
0, 0);
last_vm_pageout_considered_page = tmp1;
last_vm_pageout_freed_from_speculative = tmp2;
last_vm_pageout_freed_from_inactive_clean = tmp3;
tmp1 = vm_pageout_scan_inactive_throttled_external;
tmp2 = vm_pageout_enqueued_cleaned_from_inactive_dirty;
tmp3 = vm_pageout_freed_from_cleaned;
KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_INFO5)) | DBG_FUNC_NONE,
tmp1 - last_vm_pageout_scan_inactive_throttled_external,
tmp2 - last_vm_pageout_enqueued_cleaned_from_inactive_dirty,
tmp3 - last_vm_pageout_freed_from_cleaned,
0, 0);
vm_pageout_stats[vm_pageout_stat_now].throttled_external_q += (tmp1 - last_vm_pageout_scan_inactive_throttled_external);
vm_pageout_stats[vm_pageout_stat_now].cleaned_dirty_external += (tmp2 - last_vm_pageout_enqueued_cleaned_from_inactive_dirty);
last_vm_pageout_scan_inactive_throttled_external = tmp1;
last_vm_pageout_enqueued_cleaned_from_inactive_dirty = tmp2;
last_vm_pageout_freed_from_cleaned = tmp3;
tmp1 = vm_pageout_scan_inactive_throttled_internal;
tmp2 = vm_pageout_freed_after_compression;
tmp3 = vm_compressor_pages_grabbed;
tmp4 = vm_pageout_skipped_external;
KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_INFO6)) | DBG_FUNC_NONE,
tmp1 - last_vm_pageout_scan_inactive_throttled_internal,
tmp2 - last_vm_pageout_freed_after_compression,
tmp3 - last_vm_compressor_pages_grabbed,
tmp4 - last_vm_pageout_skipped_external,
0);
vm_pageout_stats[vm_pageout_stat_now].throttled_internal_q += (tmp1 - last_vm_pageout_scan_inactive_throttled_internal);
vm_pageout_stats[vm_pageout_stat_now].pages_compressed += (tmp2 - last_vm_pageout_freed_after_compression);
vm_pageout_stats[vm_pageout_stat_now].pages_grabbed_by_compressor += (tmp3 - last_vm_compressor_pages_grabbed);
last_vm_pageout_scan_inactive_throttled_internal = tmp1;
last_vm_pageout_freed_after_compression = tmp2;
last_vm_compressor_pages_grabbed = tmp3;
last_vm_pageout_skipped_external = tmp4;
if ((vm_pageout_reactivation_limit_exceeded - last_vm_pageout_reactivation_limit_exceeded) == 0 &&
(vm_pageout_inactive_force_reclaim - last_vm_pageout_inactive_force_reclaim) == 0 &&
(vm_compressor_failed - last_vm_compressor_failed) == 0)
return;
tmp1 = vm_pageout_reactivation_limit_exceeded;
tmp2 = vm_pageout_inactive_force_reclaim;
tmp3 = vm_compressor_failed;
KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_INFO7)) | DBG_FUNC_NONE,
tmp1 - last_vm_pageout_reactivation_limit_exceeded,
tmp2 - last_vm_pageout_inactive_force_reclaim,
tmp3 - last_vm_compressor_failed,
0, 0);
vm_pageout_stats[vm_pageout_stat_now].failed_compressions += (tmp3 - last_vm_compressor_failed);
last_vm_pageout_reactivation_limit_exceeded = tmp1;
last_vm_pageout_inactive_force_reclaim = tmp2;
last_vm_compressor_failed = tmp3;
}
void
vm_pageout_scan(void)
{
unsigned int loop_count = 0;
unsigned int inactive_burst_count = 0;
unsigned int active_burst_count = 0;
unsigned int reactivated_this_call;
unsigned int reactivate_limit;
vm_page_t local_freeq = NULL;
int local_freed = 0;
int delayed_unlock;
int delayed_unlock_limit = 0;
int refmod_state = 0;
int vm_pageout_deadlock_target = 0;
struct vm_pageout_queue *iq;
struct vm_pageout_queue *eq;
struct vm_speculative_age_q *sq;
struct flow_control flow_control = { 0, { 0, 0 } };
boolean_t inactive_throttled = FALSE;
boolean_t try_failed;
mach_timespec_t ts;
unsigned int msecs = 0;
vm_object_t object = NULL;
uint32_t inactive_reclaim_run;
boolean_t exceeded_burst_throttle;
boolean_t grab_anonymous = FALSE;
boolean_t force_anonymous = FALSE;
boolean_t force_speculative_aging = FALSE;
int anons_grabbed = 0;
int page_prev_q_state = 0;
#if CONFIG_BACKGROUND_QUEUE
boolean_t page_from_bg_q = FALSE;
#endif
int cache_evict_throttle = 0;
uint32_t vm_pageout_inactive_external_forced_reactivate_limit = 0;
int force_purge = 0;
#define DELAY_SPECULATIVE_AGE 1000
int delay_speculative_age = 0;
vm_object_t m_object = VM_OBJECT_NULL;
#if VM_PRESSURE_EVENTS
vm_pressure_level_t pressure_level;
#endif
VM_DEBUG_CONSTANT_EVENT(vm_pageout_scan, VM_PAGEOUT_SCAN, DBG_FUNC_START,
vm_pageout_speculative_clean, vm_pageout_inactive_clean,
vm_pageout_inactive_dirty_internal, vm_pageout_inactive_dirty_external);
flow_control.state = FCS_IDLE;
iq = &vm_pageout_queue_internal;
eq = &vm_pageout_queue_external;
sq = &vm_page_queue_speculative[VM_PAGE_SPECULATIVE_AGED_Q];
XPR(XPR_VM_PAGEOUT, "vm_pageout_scan\n", 0, 0, 0, 0, 0);
pmap_release_pages_fast();
vm_page_lock_queues();
delayed_unlock = 1;
reactivated_this_call = 0;
reactivate_limit = VM_PAGE_REACTIVATE_LIMIT(vm_page_active_count +
vm_page_inactive_count);
inactive_reclaim_run = 0;
vm_pageout_inactive_external_forced_reactivate_limit = vm_page_active_count + vm_page_inactive_count;
Restart:
assert(object == NULL);
assert(delayed_unlock != 0);
vm_page_inactive_target = VM_PAGE_INACTIVE_TARGET(vm_page_active_count +
vm_page_inactive_count +
vm_page_speculative_count);
vm_page_anonymous_min = vm_page_inactive_target / 20;
vm_page_inactive_min = vm_page_inactive_target - (vm_page_inactive_target / 400);
if (vm_page_speculative_percentage > 50)
vm_page_speculative_percentage = 50;
else if (vm_page_speculative_percentage <= 0)
vm_page_speculative_percentage = 1;
vm_page_speculative_target = VM_PAGE_SPECULATIVE_TARGET(vm_page_active_count +
vm_page_inactive_count);
try_failed = FALSE;
for (;;) {
vm_page_t m;
DTRACE_VM2(rev, int, 1, (uint64_t *), NULL);
if (vm_upl_wait_for_pages < 0)
vm_upl_wait_for_pages = 0;
delayed_unlock_limit = VM_PAGEOUT_DELAYED_UNLOCK_LIMIT + vm_upl_wait_for_pages;
if (delayed_unlock_limit > VM_PAGEOUT_DELAYED_UNLOCK_LIMIT_MAX)
delayed_unlock_limit = VM_PAGEOUT_DELAYED_UNLOCK_LIMIT_MAX;
#if CONFIG_SECLUDED_MEMORY
if (vm_page_secluded_count > vm_page_secluded_target) {
unsigned int secluded_overflow;
vm_page_t secluded_page;
if (object != NULL) {
vm_object_unlock(object);
object = NULL;
vm_pageout_scan_wants_object = VM_OBJECT_NULL;
}
active_burst_count = MIN(vm_pageout_burst_active_throttle,
vm_page_secluded_count_inuse);
secluded_overflow = (vm_page_secluded_count -
vm_page_secluded_target);
while (secluded_overflow-- > 0 &&
vm_page_secluded_count > vm_page_secluded_target) {
assert((vm_page_secluded_count_free +
vm_page_secluded_count_inuse) ==
vm_page_secluded_count);
secluded_page = (vm_page_t)vm_page_queue_first(&vm_page_queue_secluded);
assert(secluded_page->vm_page_q_state ==
VM_PAGE_ON_SECLUDED_Q);
vm_page_queues_remove(secluded_page, FALSE);
assert(!secluded_page->fictitious);
assert(!VM_PAGE_WIRED(secluded_page));
if (secluded_page->vm_page_object == 0) {
assert(secluded_page->busy);
secluded_page->snext = local_freeq;
local_freeq = secluded_page;
local_freed++;
} else {
vm_page_enqueue_active(secluded_page, FALSE);
if (active_burst_count-- == 0) {
vm_pageout_secluded_burst_count++;
break;
}
}
secluded_page = VM_PAGE_NULL;
if (delayed_unlock++ > delayed_unlock_limit) {
vm_pageout_delayed_unlock(&delayed_unlock, &local_freed, &local_freeq);
}
}
}
#endif
assert(delayed_unlock);
if ((vm_page_inactive_count + vm_page_speculative_count) >= vm_page_inactive_target)
goto done_moving_active_pages;
if (object != NULL) {
vm_object_unlock(object);
object = NULL;
vm_pageout_scan_wants_object = VM_OBJECT_NULL;
}
active_burst_count = MIN(vm_pageout_burst_active_throttle, vm_page_active_count);
VM_DEBUG_EVENT(vm_pageout_balance, VM_PAGEOUT_BALANCE, DBG_FUNC_START,
vm_pageout_inactive, vm_pageout_inactive_used, vm_page_free_count, local_freed);
VM_DEBUG_EVENT(vm_pageout_balance, VM_PAGEOUT_BALANCE, DBG_FUNC_NONE,
vm_pageout_speculative_clean, vm_pageout_inactive_clean,
vm_pageout_inactive_dirty_internal, vm_pageout_inactive_dirty_external);
memoryshot(VM_PAGEOUT_BALANCE, DBG_FUNC_START);
while (!vm_page_queue_empty(&vm_page_queue_active) && active_burst_count--) {
vm_pageout_active++;
m = (vm_page_t) vm_page_queue_first(&vm_page_queue_active);
assert(m->vm_page_q_state == VM_PAGE_ON_ACTIVE_Q);
assert(!m->laundry);
assert(VM_PAGE_OBJECT(m) != kernel_object);
assert(VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr);
DTRACE_VM2(scan, int, 1, (uint64_t *), NULL);
pmap_clear_refmod_options(VM_PAGE_GET_PHYS_PAGE(m), VM_MEM_REFERENCED, PMAP_OPTIONS_NOFLUSH, (void *)NULL);
vm_page_deactivate_internal(m, FALSE);
if (delayed_unlock++ > delayed_unlock_limit) {
vm_pageout_delayed_unlock(&delayed_unlock, &local_freed, &local_freeq);
}
}
VM_DEBUG_EVENT(vm_pageout_balance, VM_PAGEOUT_BALANCE, DBG_FUNC_END,
vm_page_active_count, vm_page_inactive_count, vm_page_speculative_count, vm_page_inactive_target);
memoryshot(VM_PAGEOUT_BALANCE, DBG_FUNC_END);
done_moving_active_pages:
if (vm_page_free_count + local_freed >= vm_page_free_target)
{
vm_pageout_prepare_to_block(&object, &delayed_unlock, &local_freeq, &local_freed,
VM_PAGEOUT_PB_CONSIDER_WAKING_COMPACTOR_SWAPPER);
vm_pageout_adjust_eq_iothrottle(eq, TRUE);
vm_page_inactive_target = VM_PAGE_INACTIVE_TARGET(vm_page_active_count +
vm_page_inactive_count +
vm_page_speculative_count);
#ifndef CONFIG_EMBEDDED
if (((vm_page_inactive_count + vm_page_speculative_count) < vm_page_inactive_target) &&
!vm_page_queue_empty(&vm_page_queue_active)) {
continue;
}
#endif
lck_mtx_lock(&vm_page_queue_free_lock);
if ((vm_page_free_count >= vm_page_free_target) &&
(vm_page_free_wanted == 0) && (vm_page_free_wanted_privileged == 0)) {
return_from_scan:
assert(vm_pageout_scan_wants_object == VM_OBJECT_NULL);
VM_DEBUG_CONSTANT_EVENT(vm_pageout_scan, VM_PAGEOUT_SCAN, DBG_FUNC_NONE,
vm_pageout_inactive, vm_pageout_inactive_used, 0, 0);
VM_DEBUG_CONSTANT_EVENT(vm_pageout_scan, VM_PAGEOUT_SCAN, DBG_FUNC_END,
vm_pageout_speculative_clean, vm_pageout_inactive_clean,
vm_pageout_inactive_dirty_internal, vm_pageout_inactive_dirty_external);
return;
}
lck_mtx_unlock(&vm_page_queue_free_lock);
}
assert (available_for_purge>=0);
force_purge = 0;
#if VM_PRESSURE_EVENTS
pressure_level = memorystatus_vm_pressure_level;
if (pressure_level > kVMPressureNormal) {
if (pressure_level >= kVMPressureCritical) {
force_purge = memorystatus_purge_on_critical;
} else if (pressure_level >= kVMPressureUrgent) {
force_purge = memorystatus_purge_on_urgent;
} else if (pressure_level >= kVMPressureWarning) {
force_purge = memorystatus_purge_on_warning;
}
}
#endif
if (available_for_purge || force_purge) {
if (object != NULL) {
vm_object_unlock(object);
object = NULL;
}
memoryshot(VM_PAGEOUT_PURGEONE, DBG_FUNC_START);
VM_DEBUG_EVENT(vm_pageout_purgeone, VM_PAGEOUT_PURGEONE, DBG_FUNC_START, vm_page_free_count, 0, 0, 0);
if (vm_purgeable_object_purge_one(force_purge, C_DONT_BLOCK)) {
vm_pageout_purged_objects++;
VM_DEBUG_EVENT(vm_pageout_purgeone, VM_PAGEOUT_PURGEONE, DBG_FUNC_END, vm_page_free_count, 0, 0, 0);
memoryshot(VM_PAGEOUT_PURGEONE, DBG_FUNC_END);
continue;
}
VM_DEBUG_EVENT(vm_pageout_purgeone, VM_PAGEOUT_PURGEONE, DBG_FUNC_END, 0, 0, 0, -1);
memoryshot(VM_PAGEOUT_PURGEONE, DBG_FUNC_END);
}
if (vm_page_queue_empty(&sq->age_q) && vm_page_speculative_count) {
struct vm_speculative_age_q *aq;
boolean_t can_steal = FALSE;
int num_scanned_queues;
aq = &vm_page_queue_speculative[speculative_steal_index];
num_scanned_queues = 0;
while (vm_page_queue_empty(&aq->age_q) &&
num_scanned_queues++ != VM_PAGE_MAX_SPECULATIVE_AGE_Q) {
speculative_steal_index++;
if (speculative_steal_index > VM_PAGE_MAX_SPECULATIVE_AGE_Q)
speculative_steal_index = VM_PAGE_MIN_SPECULATIVE_AGE_Q;
aq = &vm_page_queue_speculative[speculative_steal_index];
}
if (num_scanned_queues == VM_PAGE_MAX_SPECULATIVE_AGE_Q + 1) {
printf("vm_pageout_scan: "
"all speculative queues empty "
"but count=%d. Re-adjusting.\n",
vm_page_speculative_count);
if (vm_page_speculative_count > vm_page_speculative_count_drift_max)
vm_page_speculative_count_drift_max = vm_page_speculative_count;
vm_page_speculative_count_drifts++;
#if DEVELOPMENT || DEBUG
panic("vm_pageout_scan: vm_page_speculative_count=%d but queues are empty", vm_page_speculative_count);
#endif
vm_page_speculative_count = 0;
continue;
}
if (vm_page_speculative_count > vm_page_speculative_target || force_speculative_aging == TRUE)
can_steal = TRUE;
else {
if (!delay_speculative_age) {
mach_timespec_t ts_fully_aged;
ts_fully_aged.tv_sec = (VM_PAGE_MAX_SPECULATIVE_AGE_Q * vm_page_speculative_q_age_ms) / 1000;
ts_fully_aged.tv_nsec = ((VM_PAGE_MAX_SPECULATIVE_AGE_Q * vm_page_speculative_q_age_ms) % 1000)
* 1000 * NSEC_PER_USEC;
ADD_MACH_TIMESPEC(&ts_fully_aged, &aq->age_ts);
clock_sec_t sec;
clock_nsec_t nsec;
clock_get_system_nanotime(&sec, &nsec);
ts.tv_sec = (unsigned int) sec;
ts.tv_nsec = nsec;
if (CMP_MACH_TIMESPEC(&ts, &ts_fully_aged) >= 0)
can_steal = TRUE;
else
delay_speculative_age++;
} else {
delay_speculative_age++;
if (delay_speculative_age == DELAY_SPECULATIVE_AGE)
delay_speculative_age = 0;
}
}
if (can_steal == TRUE)
vm_page_speculate_ageit(aq);
}
force_speculative_aging = FALSE;
#if CONFIG_BACKGROUND_QUEUE
if (vm_page_queue_empty(&sq->age_q) && cache_evict_throttle == 0 &&
((vm_page_background_mode == VM_PAGE_BG_DISABLED) || (vm_page_background_count <= vm_page_background_target)))
#else
if (vm_page_queue_empty(&sq->age_q) && cache_evict_throttle == 0)
#endif
{
int pages_evicted;
if (object != NULL) {
vm_object_unlock(object);
object = NULL;
}
pages_evicted = vm_object_cache_evict(100, 10);
if (pages_evicted) {
vm_pageout_cache_evicted += pages_evicted;
VM_DEBUG_EVENT(vm_pageout_cache_evict, VM_PAGEOUT_CACHE_EVICT, DBG_FUNC_NONE,
vm_page_free_count, pages_evicted, vm_pageout_cache_evicted, 0);
memoryshot(VM_PAGEOUT_CACHE_EVICT, DBG_FUNC_NONE);
continue;
} else
cache_evict_throttle = 1000;
}
if (cache_evict_throttle)
cache_evict_throttle--;
#if CONFIG_JETSAM
if (vm_compressor_low_on_space())
vm_page_filecache_min = 0;
else
vm_page_filecache_min = (AVAILABLE_NON_COMPRESSED_MEMORY / 7);
#else
if (vm_compressor_out_of_space())
vm_page_filecache_min = 0;
else {
vm_page_filecache_min = (AVAILABLE_NON_COMPRESSED_MEMORY / 3);
}
#endif
if (vm_page_free_count < (vm_page_free_reserved / 4))
vm_page_filecache_min = 0;
exceeded_burst_throttle = FALSE;
if (vm_page_queue_empty(&vm_page_queue_inactive) &&
vm_page_queue_empty(&vm_page_queue_anonymous) &&
vm_page_queue_empty(&sq->age_q)) {
vm_pageout_scan_empty_throttle++;
msecs = vm_pageout_empty_wait;
goto vm_pageout_scan_delay;
} else if (inactive_burst_count >=
MIN(vm_pageout_burst_inactive_throttle,
(vm_page_inactive_count +
vm_page_speculative_count))) {
vm_pageout_scan_burst_throttle++;
msecs = vm_pageout_burst_wait;
exceeded_burst_throttle = TRUE;
goto vm_pageout_scan_delay;
} else if (vm_page_free_count > (vm_page_free_reserved / 4) &&
VM_PAGEOUT_SCAN_NEEDS_TO_THROTTLE()) {
vm_pageout_scan_swap_throttle++;
msecs = vm_pageout_swap_wait;
goto vm_pageout_scan_delay;
} else if (VM_PAGE_Q_THROTTLED(iq) &&
VM_DYNAMIC_PAGING_ENABLED()) {
clock_sec_t sec;
clock_nsec_t nsec;
switch (flow_control.state) {
case FCS_IDLE:
if ((vm_page_free_count + local_freed) < vm_page_free_target) {
vm_pageout_prepare_to_block(&object, &delayed_unlock, &local_freeq, &local_freed,
VM_PAGEOUT_PB_THREAD_YIELD);
if (!VM_PAGE_Q_THROTTLED(iq)) {
vm_pageout_scan_yield_unthrottled++;
continue;
}
if (vm_page_pageable_external_count > vm_page_filecache_min &&
!vm_page_queue_empty(&vm_page_queue_inactive)) {
anons_grabbed = ANONS_GRABBED_LIMIT;
vm_pageout_scan_throttle_deferred++;
goto consider_inactive;
}
if (((vm_page_inactive_count + vm_page_speculative_count) < vm_page_inactive_target) && vm_page_active_count)
continue;
}
reset_deadlock_timer:
ts.tv_sec = vm_pageout_deadlock_wait / 1000;
ts.tv_nsec = (vm_pageout_deadlock_wait % 1000) * 1000 * NSEC_PER_USEC;
clock_get_system_nanotime(&sec, &nsec);
flow_control.ts.tv_sec = (unsigned int) sec;
flow_control.ts.tv_nsec = nsec;
ADD_MACH_TIMESPEC(&flow_control.ts, &ts);
flow_control.state = FCS_DELAYED;
msecs = vm_pageout_deadlock_wait;
break;
case FCS_DELAYED:
clock_get_system_nanotime(&sec, &nsec);
ts.tv_sec = (unsigned int) sec;
ts.tv_nsec = nsec;
if (CMP_MACH_TIMESPEC(&ts, &flow_control.ts) >= 0) {
vm_pageout_deadlock_target = vm_pageout_deadlock_relief + vm_page_free_wanted + vm_page_free_wanted_privileged;
vm_pageout_scan_deadlock_detected++;
flow_control.state = FCS_DEADLOCK_DETECTED;
thread_wakeup((event_t) &vm_pageout_garbage_collect);
goto consider_inactive;
}
msecs = vm_pageout_idle_wait;
break;
case FCS_DEADLOCK_DETECTED:
if (vm_pageout_deadlock_target)
goto consider_inactive;
goto reset_deadlock_timer;
}
vm_pageout_scan_delay:
vm_pageout_prepare_to_block(&object, &delayed_unlock, &local_freeq, &local_freed,
VM_PAGEOUT_PB_CONSIDER_WAKING_COMPACTOR_SWAPPER);
if (flow_control.state == FCS_DELAYED &&
!VM_PAGE_Q_THROTTLED(iq)) {
flow_control.state = FCS_IDLE;
goto consider_inactive;
}
if (vm_page_free_count >= vm_page_free_target) {
vm_pageout_adjust_eq_iothrottle(eq, TRUE);
}
lck_mtx_lock(&vm_page_queue_free_lock);
if (vm_page_free_count >= vm_page_free_target &&
(vm_page_free_wanted == 0) && (vm_page_free_wanted_privileged == 0)) {
goto return_from_scan;
}
lck_mtx_unlock(&vm_page_queue_free_lock);
if ((vm_page_free_count + vm_page_cleaned_count) < vm_page_free_target) {
vm_pageout_adjust_eq_iothrottle(eq, FALSE);
}
if (vm_page_cleaned_count > 0 && exceeded_burst_throttle == FALSE) {
flow_control.state = FCS_IDLE;
goto consider_inactive;
}
VM_CHECK_MEMORYSTATUS;
if (flow_control.state != FCS_IDLE)
vm_pageout_scan_throttle++;
iq->pgo_throttled = TRUE;
assert_wait_timeout((event_t) &iq->pgo_laundry, THREAD_INTERRUPTIBLE, msecs, 1000*NSEC_PER_USEC);
counter(c_vm_pageout_scan_block++);
vm_page_unlock_queues();
assert(vm_pageout_scan_wants_object == VM_OBJECT_NULL);
VM_DEBUG_EVENT(vm_pageout_thread_block, VM_PAGEOUT_THREAD_BLOCK, DBG_FUNC_START,
iq->pgo_laundry, iq->pgo_maxlaundry, msecs, 0);
memoryshot(VM_PAGEOUT_THREAD_BLOCK, DBG_FUNC_START);
thread_block(THREAD_CONTINUE_NULL);
VM_DEBUG_EVENT(vm_pageout_thread_block, VM_PAGEOUT_THREAD_BLOCK, DBG_FUNC_END,
iq->pgo_laundry, iq->pgo_maxlaundry, msecs, 0);
memoryshot(VM_PAGEOUT_THREAD_BLOCK, DBG_FUNC_END);
vm_page_lock_queues();
iq->pgo_throttled = FALSE;
if (loop_count >= vm_page_inactive_count)
loop_count = 0;
inactive_burst_count = 0;
goto Restart;
}
flow_control.state = FCS_IDLE;
consider_inactive:
vm_pageout_inactive_external_forced_reactivate_limit = MIN((vm_page_active_count + vm_page_inactive_count),
vm_pageout_inactive_external_forced_reactivate_limit);
loop_count++;
inactive_burst_count++;
vm_pageout_inactive++;
while (1) {
uint32_t inactive_external_count;
#if CONFIG_BACKGROUND_QUEUE
page_from_bg_q = FALSE;
#endif
m = NULL;
m_object = VM_OBJECT_NULL;
if (VM_DYNAMIC_PAGING_ENABLED()) {
assert(vm_page_throttled_count == 0);
assert(vm_page_queue_empty(&vm_page_queue_throttled));
}
if (!vm_page_queue_empty(&vm_page_queue_cleaned)) {
m = (vm_page_t) vm_page_queue_first(&vm_page_queue_cleaned);
assert(m->vm_page_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q);
break;
}
if (!vm_page_queue_empty(&sq->age_q)) {
m = (vm_page_t) vm_page_queue_first(&sq->age_q);
assert(m->vm_page_q_state == VM_PAGE_ON_SPECULATIVE_Q);
if (!m->dirty || force_anonymous == FALSE)
break;
else
m = NULL;
}
#if CONFIG_BACKGROUND_QUEUE
if (vm_page_background_mode != VM_PAGE_BG_DISABLED && (vm_page_background_count > vm_page_background_target)) {
vm_object_t bg_m_object = NULL;
m = (vm_page_t) vm_page_queue_first(&vm_page_queue_background);
bg_m_object = VM_PAGE_OBJECT(m);
if (!VM_PAGE_PAGEABLE(m)) {
} else if (force_anonymous == FALSE || bg_m_object->internal) {
if (bg_m_object->internal &&
((vm_compressor_out_of_space() == TRUE) ||
(vm_page_free_count < (vm_page_free_reserved / 4)))) {
vm_pageout_skipped_bq_internal++;
} else {
page_from_bg_q = TRUE;
if (bg_m_object->internal)
vm_pageout_considered_bq_internal++;
else
vm_pageout_considered_bq_external++;
break;
}
}
}
#endif
grab_anonymous = (vm_page_anonymous_count > vm_page_anonymous_min);
inactive_external_count = vm_page_inactive_count - vm_page_anonymous_count;
if ((vm_page_pageable_external_count < vm_page_filecache_min || force_anonymous == TRUE) ||
((inactive_external_count < vm_page_anonymous_count) && (inactive_external_count < (vm_page_pageable_external_count / 3)))) {
grab_anonymous = TRUE;
anons_grabbed = 0;
vm_pageout_skipped_external++;
goto want_anonymous;
}
#if CONFIG_JETSAM
if (grab_anonymous == TRUE && !VM_PAGE_Q_THROTTLED(eq)) {
if (vm_page_pageable_external_count >
vm_page_filecache_min) {
if ((vm_page_pageable_external_count *
vm_pageout_memorystatus_fb_factor_dr) >
(memorystatus_available_pages_critical *
vm_pageout_memorystatus_fb_factor_nr)) {
grab_anonymous = FALSE;
#if DEVELOPMENT || DEBUG
vm_grab_anon_overrides++;
#endif
}
}
#if DEVELOPMENT || DEBUG
if (grab_anonymous) {
vm_grab_anon_nops++;
}
#endif
}
#endif
want_anonymous:
if (grab_anonymous == FALSE || anons_grabbed >= ANONS_GRABBED_LIMIT || vm_page_queue_empty(&vm_page_queue_anonymous)) {
if ( !vm_page_queue_empty(&vm_page_queue_inactive) ) {
m = (vm_page_t) vm_page_queue_first(&vm_page_queue_inactive);
assert(m->vm_page_q_state == VM_PAGE_ON_INACTIVE_EXTERNAL_Q);
anons_grabbed = 0;
if (vm_page_pageable_external_count < vm_page_filecache_min) {
if ((++reactivated_this_call % 100))
goto must_activate_page;
}
break;
}
}
if ( !vm_page_queue_empty(&vm_page_queue_anonymous) ) {
m = (vm_page_t) vm_page_queue_first(&vm_page_queue_anonymous);
assert(m->vm_page_q_state == VM_PAGE_ON_INACTIVE_INTERNAL_Q);
anons_grabbed++;
break;
}
force_anonymous = FALSE;
vm_pageout_no_victim++;
if ((vm_page_inactive_count + vm_page_speculative_count) < vm_page_inactive_target)
goto done_with_inactivepage;
if (!vm_page_queue_empty(&sq->age_q))
goto done_with_inactivepage;
if (vm_page_speculative_count) {
force_speculative_aging = TRUE;
goto done_with_inactivepage;
}
panic("vm_pageout: no victim");
}
assert(VM_PAGE_PAGEABLE(m));
m_object = VM_PAGE_OBJECT(m);
force_anonymous = FALSE;
page_prev_q_state = m->vm_page_q_state;
vm_page_queues_remove(m, TRUE);
assert(!m->laundry);
assert(!m->private);
assert(!m->fictitious);
assert(m_object != kernel_object);
assert(VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr);
vm_pageout_stats[vm_pageout_stat_now].considered++;
vm_pageout_considered_page++;
DTRACE_VM2(scan, int, 1, (uint64_t *), NULL);
if (m_object != object) {
if (object != NULL) {
vm_object_unlock(object);
object = NULL;
vm_pageout_scan_wants_object = VM_OBJECT_NULL;
}
if (!vm_object_lock_try_scan(m_object)) {
vm_page_t m_want = NULL;
vm_pageout_inactive_nolock++;
if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q)
vm_pageout_cleaned_nolock++;
pmap_clear_reference(VM_PAGE_GET_PHYS_PAGE(m));
m->reference = FALSE;
#if !CONFIG_EMBEDDED
m_object->scan_collisions = 1;
#endif
if ( !vm_page_queue_empty(&sq->age_q) )
m_want = (vm_page_t) vm_page_queue_first(&sq->age_q);
else if ( !vm_page_queue_empty(&vm_page_queue_cleaned))
m_want = (vm_page_t) vm_page_queue_first(&vm_page_queue_cleaned);
else if ( !vm_page_queue_empty(&vm_page_queue_inactive) &&
(anons_grabbed >= ANONS_GRABBED_LIMIT || vm_page_queue_empty(&vm_page_queue_anonymous)))
m_want = (vm_page_t) vm_page_queue_first(&vm_page_queue_inactive);
else if ( !vm_page_queue_empty(&vm_page_queue_anonymous))
m_want = (vm_page_t) vm_page_queue_first(&vm_page_queue_anonymous);
if (m_want)
vm_pageout_scan_wants_object = VM_PAGE_OBJECT(m_want);
try_failed = TRUE;
goto requeue_page;
}
object = m_object;
vm_pageout_scan_wants_object = VM_OBJECT_NULL;
try_failed = FALSE;
}
assert(m_object == object);
assert(VM_PAGE_OBJECT(m) == m_object);
if (m->busy) {
vm_pageout_inactive_busy++;
if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q)
vm_pageout_cleaned_busy++;
requeue_page:
if (page_prev_q_state == VM_PAGE_ON_SPECULATIVE_Q)
vm_page_enqueue_inactive(m, FALSE);
else
vm_page_activate(m);
#if CONFIG_BACKGROUND_QUEUE
if (page_from_bg_q == TRUE) {
if (m_object->internal)
vm_pageout_rejected_bq_internal++;
else
vm_pageout_rejected_bq_external++;
}
#endif
goto done_with_inactivepage;
}
if (m->absent || m->error || !object->alive) {
if (m->absent)
vm_pageout_inactive_absent++;
else if (!object->alive)
vm_pageout_inactive_notalive++;
else
vm_pageout_inactive_error++;
reclaim_page:
if (vm_pageout_deadlock_target) {
vm_pageout_scan_inactive_throttle_success++;
vm_pageout_deadlock_target--;
}
DTRACE_VM2(dfree, int, 1, (uint64_t *), NULL);
if (object->internal) {
DTRACE_VM2(anonfree, int, 1, (uint64_t *), NULL);
} else {
DTRACE_VM2(fsfree, int, 1, (uint64_t *), NULL);
}
assert(!m->cleaning);
assert(!m->laundry);
m->busy = TRUE;
if (m->tabled)
vm_page_remove(m, TRUE);
assert(m->pageq.next == 0 && m->pageq.prev == 0);
m->snext = local_freeq;
local_freeq = m;
local_freed++;
if (page_prev_q_state == VM_PAGE_ON_SPECULATIVE_Q)
vm_pageout_freed_from_speculative++;
else if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q)
vm_pageout_freed_from_cleaned++;
else
vm_pageout_freed_from_inactive_clean++;
vm_pageout_stats[vm_pageout_stat_now].reclaimed_clean++;
inactive_burst_count = 0;
goto done_with_inactivepage;
}
if (object->copy == VM_OBJECT_NULL) {
if (object->purgable == VM_PURGABLE_EMPTY) {
if (m->pmapped == TRUE) {
refmod_state = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
if (refmod_state & VM_MEM_MODIFIED) {
SET_PAGE_DIRTY(m, FALSE);
}
}
if (m->dirty || m->precious) {
vm_page_purged_count++;
}
goto reclaim_page;
}
if (VM_CONFIG_COMPRESSOR_IS_ACTIVE) {
} else if (object->purgable == VM_PURGABLE_VOLATILE) {
assert(!VM_PAGE_WIRED(m));
reactivated_this_call++;
if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q)
vm_pageout_cleaned_volatile_reactivated++;
goto reactivate_page;
}
}
refmod_state = -1;
if (m->reference == FALSE && m->pmapped == TRUE) {
refmod_state = pmap_get_refmod(VM_PAGE_GET_PHYS_PAGE(m));
if (refmod_state & VM_MEM_REFERENCED)
m->reference = TRUE;
if (refmod_state & VM_MEM_MODIFIED) {
SET_PAGE_DIRTY(m, FALSE);
}
}
if (m->free_when_done) {
goto done_with_inactivepage;
}
if (m->cleaning) {
if (m->reference == TRUE) {
reactivated_this_call++;
goto reactivate_page;
} else {
goto done_with_inactivepage;
}
}
if (m->reference || m->dirty) {
VM_PAGEOUT_SCAN_HANDLE_REUSABLE_PAGE(m, m_object);
}
if (!m->no_cache &&
#if CONFIG_BACKGROUND_QUEUE
page_from_bg_q == FALSE &&
#endif
(m->reference ||
(m->xpmapped && !object->internal && (vm_page_xpmapped_external_count < (vm_page_external_count / 4))))) {
if (++reactivated_this_call >= reactivate_limit) {
vm_pageout_reactivation_limit_exceeded++;
} else if (++inactive_reclaim_run >= VM_PAGEOUT_INACTIVE_FORCE_RECLAIM) {
vm_pageout_inactive_force_reclaim++;
} else {
uint32_t isinuse;
if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q)
vm_pageout_cleaned_reference_reactivated++;
reactivate_page:
if ( !object->internal && object->pager != MEMORY_OBJECT_NULL &&
vnode_pager_get_isinuse(object->pager, &isinuse) == KERN_SUCCESS && !isinuse) {
vm_page_deactivate(m);
vm_pageout_inactive_deactivated++;
} else {
must_activate_page:
vm_page_activate(m);
VM_STAT_INCR(reactivations);
inactive_burst_count = 0;
}
#if CONFIG_BACKGROUND_QUEUE
if (page_from_bg_q == TRUE) {
if (m_object->internal)
vm_pageout_rejected_bq_internal++;
else
vm_pageout_rejected_bq_external++;
}
#endif
if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q)
vm_pageout_cleaned_reactivated++;
vm_pageout_inactive_used++;
goto done_with_inactivepage;
}
if ((refmod_state == -1) && !m->dirty && m->pmapped) {
refmod_state = pmap_get_refmod(VM_PAGE_GET_PHYS_PAGE(m));
if (refmod_state & VM_MEM_MODIFIED) {
SET_PAGE_DIRTY(m, FALSE);
}
}
}
XPR(XPR_VM_PAGEOUT,
"vm_pageout_scan, replace object 0x%X offset 0x%X page 0x%X\n",
object, m->offset, m, 0,0);
inactive_throttled = FALSE;
if (m->dirty || m->precious) {
if (object->internal) {
if (VM_PAGE_Q_THROTTLED(iq))
inactive_throttled = TRUE;
} else if (VM_PAGE_Q_THROTTLED(eq)) {
inactive_throttled = TRUE;
}
}
throttle_inactive:
if (!VM_DYNAMIC_PAGING_ENABLED() &&
object->internal && m->dirty &&
(object->purgable == VM_PURGABLE_DENY ||
object->purgable == VM_PURGABLE_NONVOLATILE ||
object->purgable == VM_PURGABLE_VOLATILE)) {
vm_page_check_pageable_safe(m);
assert(m->vm_page_q_state == VM_PAGE_NOT_ON_Q);
vm_page_queue_enter(&vm_page_queue_throttled, m,
vm_page_t, pageq);
m->vm_page_q_state = VM_PAGE_ON_THROTTLED_Q;
vm_page_throttled_count++;
vm_pageout_scan_reclaimed_throttled++;
inactive_burst_count = 0;
goto done_with_inactivepage;
}
if (inactive_throttled == TRUE) {
if (object->internal == FALSE) {
vm_pageout_scan_inactive_throttled_external++;
vm_page_check_pageable_safe(m);
assert(m->vm_page_q_state == VM_PAGE_NOT_ON_Q);
vm_page_queue_enter(&vm_page_queue_active, m, vm_page_t, pageq);
m->vm_page_q_state = VM_PAGE_ON_ACTIVE_Q;
vm_page_active_count++;
vm_page_pageable_external_count++;
vm_pageout_adjust_eq_iothrottle(eq, FALSE);
#if CONFIG_MEMORYSTATUS && CONFIG_JETSAM
vm_pageout_inactive_external_forced_reactivate_limit--;
if (vm_pageout_inactive_external_forced_reactivate_limit <= 0) {
vm_pageout_inactive_external_forced_reactivate_limit = vm_page_active_count + vm_page_inactive_count;
assert(object);
vm_object_unlock(object);
object = VM_OBJECT_NULL;
vm_page_unlock_queues();
VM_DEBUG_CONSTANT_EVENT(vm_pageout_jetsam, VM_PAGEOUT_JETSAM, DBG_FUNC_START,
vm_page_active_count, vm_page_inactive_count, vm_page_free_count, vm_page_free_count);
if (memorystatus_kill_on_VM_page_shortage(FALSE) == FALSE) {
panic("vm_pageout_scan: Jetsam request failed\n");
}
VM_DEBUG_CONSTANT_EVENT(vm_pageout_jetsam, VM_PAGEOUT_JETSAM, DBG_FUNC_END, 0, 0, 0, 0);
vm_pageout_inactive_external_forced_jetsam_count++;
vm_page_lock_queues();
delayed_unlock = 1;
}
#else
force_anonymous = TRUE;
#endif
inactive_burst_count = 0;
goto done_with_inactivepage;
} else {
vm_pageout_scan_inactive_throttled_internal++;
goto must_activate_page;
}
}
if (m->pmapped == TRUE) {
int pmap_options;
if ( !VM_CONFIG_COMPRESSOR_IS_ACTIVE ||
object->internal == FALSE) {
pmap_options = 0;
} else if (m->dirty || m->precious) {
pmap_options = PMAP_OPTIONS_COMPRESSOR;
} else {
pmap_options =
PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED;
}
refmod_state = pmap_disconnect_options(VM_PAGE_GET_PHYS_PAGE(m),
pmap_options,
NULL);
if (refmod_state & VM_MEM_MODIFIED) {
SET_PAGE_DIRTY(m, FALSE);
}
}
inactive_reclaim_run = 0;
if (!m->dirty && !m->precious) {
if (page_prev_q_state == VM_PAGE_ON_SPECULATIVE_Q)
vm_pageout_speculative_clean++;
else {
if (page_prev_q_state == VM_PAGE_ON_INACTIVE_INTERNAL_Q)
vm_pageout_inactive_anonymous++;
else if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q)
vm_pageout_cleaned_reclaimed++;
vm_pageout_inactive_clean++;
}
#if CONFIG_PHANTOM_CACHE
if (!object->internal)
vm_phantom_cache_add_ghost(m);
#endif
goto reclaim_page;
}
if (object->internal) {
if (VM_PAGE_Q_THROTTLED(iq))
inactive_throttled = TRUE;
} else if (VM_PAGE_Q_THROTTLED(eq)) {
inactive_throttled = TRUE;
}
if (inactive_throttled == TRUE)
goto throttle_inactive;
#if VM_PRESSURE_EVENTS
#if CONFIG_JETSAM
#else
vm_pressure_response();
#endif
#endif
if (page_prev_q_state == VM_PAGE_ON_SPECULATIVE_Q)
vm_pageout_speculative_dirty++;
else if (page_prev_q_state == VM_PAGE_ON_INACTIVE_INTERNAL_Q)
vm_pageout_inactive_anonymous++;
if (object->internal)
vm_pageout_inactive_dirty_internal++;
else
vm_pageout_inactive_dirty_external++;
vm_pageout_cluster(m);
done_with_inactivepage:
if (delayed_unlock++ > delayed_unlock_limit || try_failed == TRUE) {
vm_pageout_prepare_to_block(&object, &delayed_unlock, &local_freeq, &local_freed,
VM_PAGEOUT_PB_CONSIDER_WAKING_COMPACTOR_SWAPPER);
if (try_failed == TRUE)
lck_mtx_yield(&vm_page_queue_lock);
}
}
}
int vm_page_free_count_init;
void
vm_page_free_reserve(
int pages)
{
int free_after_reserve;
if (VM_CONFIG_COMPRESSOR_IS_PRESENT) {
if ((vm_page_free_reserved + pages + COMPRESSOR_FREE_RESERVED_LIMIT) >= (VM_PAGE_FREE_RESERVED_LIMIT + COMPRESSOR_FREE_RESERVED_LIMIT))
vm_page_free_reserved = VM_PAGE_FREE_RESERVED_LIMIT + COMPRESSOR_FREE_RESERVED_LIMIT;
else
vm_page_free_reserved += (pages + COMPRESSOR_FREE_RESERVED_LIMIT);
} else {
if ((vm_page_free_reserved + pages) >= VM_PAGE_FREE_RESERVED_LIMIT)
vm_page_free_reserved = VM_PAGE_FREE_RESERVED_LIMIT;
else
vm_page_free_reserved += pages;
}
free_after_reserve = vm_page_free_count_init - vm_page_free_reserved;
vm_page_free_min = vm_page_free_reserved +
VM_PAGE_FREE_MIN(free_after_reserve);
if (vm_page_free_min > VM_PAGE_FREE_MIN_LIMIT)
vm_page_free_min = VM_PAGE_FREE_MIN_LIMIT;
vm_page_free_target = vm_page_free_reserved +
VM_PAGE_FREE_TARGET(free_after_reserve);
if (vm_page_free_target > VM_PAGE_FREE_TARGET_LIMIT)
vm_page_free_target = VM_PAGE_FREE_TARGET_LIMIT;
if (vm_page_free_target < vm_page_free_min + 5)
vm_page_free_target = vm_page_free_min + 5;
vm_page_throttle_limit = vm_page_free_target - (vm_page_free_target / 2);
}
void
vm_pageout_continue(void)
{
DTRACE_VM2(pgrrun, int, 1, (uint64_t *), NULL);
vm_pageout_scan_event_counter++;
#if !CONFIG_EMBEDDED
lck_mtx_lock(&vm_page_queue_free_lock);
vm_pageout_running = TRUE;
lck_mtx_unlock(&vm_page_queue_free_lock);
#endif
vm_pageout_scan();
assert(vm_page_free_wanted == 0);
assert(vm_page_free_wanted_privileged == 0);
assert_wait((event_t) &vm_page_free_wanted, THREAD_UNINT);
#if !CONFIG_EMBEDDED
vm_pageout_running = FALSE;
if (vm_pageout_waiter) {
vm_pageout_waiter = FALSE;
thread_wakeup((event_t)&vm_pageout_waiter);
}
#endif
lck_mtx_unlock(&vm_page_queue_free_lock);
vm_page_unlock_queues();
counter(c_vm_pageout_block++);
thread_block((thread_continue_t)vm_pageout_continue);
}
#if !CONFIG_EMBEDDED
kern_return_t
vm_pageout_wait(uint64_t deadline)
{
kern_return_t kr;
lck_mtx_lock(&vm_page_queue_free_lock);
for (kr = KERN_SUCCESS; vm_pageout_running && (KERN_SUCCESS == kr); ) {
vm_pageout_waiter = TRUE;
if (THREAD_AWAKENED != lck_mtx_sleep_deadline(
&vm_page_queue_free_lock, LCK_SLEEP_DEFAULT,
(event_t) &vm_pageout_waiter, THREAD_UNINT, deadline)) {
kr = KERN_OPERATION_TIMED_OUT;
}
}
lck_mtx_unlock(&vm_page_queue_free_lock);
return (kr);
}
#endif
static void
vm_pageout_iothread_external_continue(struct vm_pageout_queue *q)
{
vm_page_t m = NULL;
vm_object_t object;
vm_object_offset_t offset;
memory_object_t pager;
if (vm_pageout_internal_iothread != THREAD_NULL)
current_thread()->options &= ~TH_OPT_VMPRIV;
vm_page_lockspin_queues();
while ( !vm_page_queue_empty(&q->pgo_pending) ) {
q->pgo_busy = TRUE;
vm_page_queue_remove_first(&q->pgo_pending, m, vm_page_t, pageq);
assert(m->vm_page_q_state == VM_PAGE_ON_PAGEOUT_Q);
VM_PAGE_CHECK(m);
object = VM_PAGE_OBJECT(m);
offset = m->offset;
if (object->object_slid) {
panic("slid page %p not allowed on this path\n", m);
}
m->vm_page_q_state = VM_PAGE_NOT_ON_Q;
VM_PAGE_ZERO_PAGEQ_ENTRY(m);
vm_page_unlock_queues();
vm_object_lock(object);
m = vm_page_lookup(object, offset);
if (m == NULL ||
m->busy || m->cleaning || !m->laundry || (m->vm_page_q_state == VM_PAGE_ON_PAGEOUT_Q)) {
vm_object_activity_end(object);
vm_object_unlock(object);
vm_page_lockspin_queues();
continue;
}
pager = object->pager;
if (pager == MEMORY_OBJECT_NULL) {
if (m->free_when_done) {
VM_PAGE_FREE(m);
} else {
vm_page_lockspin_queues();
vm_pageout_throttle_up(m);
vm_page_activate(m);
vm_page_unlock_queues();
}
vm_object_activity_end(object);
vm_object_unlock(object);
vm_page_lockspin_queues();
continue;
}
#if 0
VM_PAGE_CHECK(m);
#endif
vm_object_activity_end(object);
vm_object_paging_begin(object);
vm_object_unlock(object);
memory_object_data_return(pager,
m->offset + object->paging_offset,
PAGE_SIZE,
NULL,
NULL,
FALSE,
FALSE,
0);
vm_object_lock(object);
vm_object_paging_end(object);
vm_object_unlock(object);
vm_pageout_io_throttle();
vm_page_lockspin_queues();
}
q->pgo_busy = FALSE;
q->pgo_idle = TRUE;
assert_wait((event_t) &q->pgo_pending, THREAD_UNINT);
vm_page_unlock_queues();
thread_block_parameter((thread_continue_t)vm_pageout_iothread_external_continue, (void *) q);
}
#define MAX_FREE_BATCH 32
uint32_t vm_compressor_time_thread;
#if DEVELOPMENT || DEBUG
uint64_t compressor_epoch_start, compressor_epoch_stop, compressor_threads_runtime;
#endif
void
vm_pageout_iothread_internal_continue(struct cq *);
void
vm_pageout_iothread_internal_continue(struct cq *cq)
{
struct vm_pageout_queue *q;
vm_page_t m = NULL;
boolean_t pgo_draining;
vm_page_t local_q;
int local_cnt;
vm_page_t local_freeq = NULL;
int local_freed = 0;
int local_batch_size;
int ncomps = 0;
#if DEVELOPMENT || DEBUG
boolean_t marked_active = FALSE;
#endif
KERNEL_DEBUG(0xe040000c | DBG_FUNC_END, 0, 0, 0, 0, 0);
q = cq->q;
local_batch_size = q->pgo_maxlaundry / (vm_compressor_thread_count * 2);
#if RECORD_THE_COMPRESSED_DATA
if (q->pgo_laundry)
c_compressed_record_init();
#endif
while (TRUE) {
int pages_left_on_q = 0;
local_cnt = 0;
local_q = NULL;
KERNEL_DEBUG(0xe0400014 | DBG_FUNC_START, 0, 0, 0, 0, 0);
vm_page_lock_queues();
#if DEVELOPMENT || DEBUG
if (marked_active == FALSE) {
vmct_active++;
vmct_state[cq->id] = VMCT_ACTIVE;
marked_active = TRUE;
if (vmct_active == 1) {
compressor_epoch_start = mach_absolute_time();
}
}
#endif
KERNEL_DEBUG(0xe0400014 | DBG_FUNC_END, 0, 0, 0, 0, 0);
KERNEL_DEBUG(0xe0400018 | DBG_FUNC_START, q->pgo_laundry, 0, 0, 0, 0);
while ( !vm_page_queue_empty(&q->pgo_pending) && local_cnt < local_batch_size) {
vm_page_queue_remove_first(&q->pgo_pending, m, vm_page_t, pageq);
assert(m->vm_page_q_state == VM_PAGE_ON_PAGEOUT_Q);
VM_PAGE_CHECK(m);
m->vm_page_q_state = VM_PAGE_NOT_ON_Q;
VM_PAGE_ZERO_PAGEQ_ENTRY(m);
m->laundry = FALSE;
m->snext = local_q;
local_q = m;
local_cnt++;
}
if (local_q == NULL)
break;
q->pgo_busy = TRUE;
if ((pgo_draining = q->pgo_draining) == FALSE) {
vm_pageout_throttle_up_batch(q, local_cnt);
pages_left_on_q = q->pgo_laundry;
} else
pages_left_on_q = q->pgo_laundry - local_cnt;
vm_page_unlock_queues();
#if !RECORD_THE_COMPRESSED_DATA
if (pages_left_on_q >= local_batch_size && cq->id < (vm_compressor_thread_count - 1)) {
thread_wakeup((event_t) ((uintptr_t)&q->pgo_pending + cq->id + 1));
}
#endif
KERNEL_DEBUG(0xe0400018 | DBG_FUNC_END, q->pgo_laundry, 0, 0, 0, 0);
while (local_q) {
KERNEL_DEBUG(0xe0400024 | DBG_FUNC_START, local_cnt, 0, 0, 0, 0);
m = local_q;
local_q = m->snext;
m->snext = NULL;
if (vm_pageout_compress_page(&cq->current_chead, cq->scratch_buf, m, FALSE) == KERN_SUCCESS) {
ncomps++;
m->snext = local_freeq;
local_freeq = m;
local_freed++;
if (local_freed >= MAX_FREE_BATCH) {
vm_pageout_freed_after_compression += local_freed;
vm_page_free_list(local_freeq, TRUE);
local_freeq = NULL;
local_freed = 0;
}
}
#if !CONFIG_JETSAM
while (vm_page_free_count < COMPRESSOR_FREE_RESERVED_LIMIT) {
kern_return_t wait_result;
int need_wakeup = 0;
if (local_freeq) {
vm_pageout_freed_after_compression += local_freed;
vm_page_free_list(local_freeq, TRUE);
local_freeq = NULL;
local_freed = 0;
continue;
}
lck_mtx_lock_spin(&vm_page_queue_free_lock);
if (vm_page_free_count < COMPRESSOR_FREE_RESERVED_LIMIT) {
if (vm_page_free_wanted_privileged++ == 0)
need_wakeup = 1;
wait_result = assert_wait((event_t)&vm_page_free_wanted_privileged, THREAD_UNINT);
lck_mtx_unlock(&vm_page_queue_free_lock);
if (need_wakeup)
thread_wakeup((event_t)&vm_page_free_wanted);
if (wait_result == THREAD_WAITING)
thread_block(THREAD_CONTINUE_NULL);
} else
lck_mtx_unlock(&vm_page_queue_free_lock);
}
#endif
}
if (local_freeq) {
vm_pageout_freed_after_compression += local_freed;
vm_page_free_list(local_freeq, TRUE);
local_freeq = NULL;
local_freed = 0;
}
if (pgo_draining == TRUE) {
vm_page_lockspin_queues();
vm_pageout_throttle_up_batch(q, local_cnt);
vm_page_unlock_queues();
}
}
KERNEL_DEBUG(0xe040000c | DBG_FUNC_START, 0, 0, 0, 0, 0);
q->pgo_busy = FALSE;
q->pgo_idle = TRUE;
assert_wait((event_t) ((uintptr_t)&q->pgo_pending + cq->id), THREAD_UNINT);
#if DEVELOPMENT || DEBUG
if (marked_active == TRUE) {
vmct_active--;
vmct_state[cq->id] = VMCT_IDLE;
if (vmct_active == 0) {
compressor_epoch_stop = mach_absolute_time();
assert(compressor_epoch_stop > compressor_epoch_start);
vmct_stats.vmct_cthreads_total += compressor_epoch_stop - compressor_epoch_start;
}
}
#endif
vm_page_unlock_queues();
#if DEVELOPMENT || DEBUG
if (__improbable(vm_compressor_time_thread)) {
vmct_stats.vmct_runtimes[cq->id] = thread_get_runtime_self();
vmct_stats.vmct_pages[cq->id] += ncomps;
vmct_stats.vmct_iterations[cq->id]++;
if (ncomps > vmct_stats.vmct_maxpages[cq->id]) {
vmct_stats.vmct_maxpages[cq->id] = ncomps;
}
if (ncomps < vmct_stats.vmct_minpages[cq->id]) {
vmct_stats.vmct_minpages[cq->id] = ncomps;
}
}
#endif
KERNEL_DEBUG(0xe0400018 | DBG_FUNC_END, 0, 0, 0, 0, 0);
thread_block_parameter((thread_continue_t)vm_pageout_iothread_internal_continue, (void *) cq);
}
kern_return_t
vm_pageout_compress_page(void **current_chead, char *scratch_buf, vm_page_t m, boolean_t object_locked_by_caller)
{
vm_object_t object;
memory_object_t pager;
int compressed_count_delta;
kern_return_t retval;
object = VM_PAGE_OBJECT(m);
if (object->object_slid) {
panic("slid page %p not allowed on this path\n", m);
}
assert(!m->free_when_done);
assert(!m->laundry);
pager = object->pager;
if (object_locked_by_caller == FALSE && (!object->pager_initialized || pager == MEMORY_OBJECT_NULL)) {
KERNEL_DEBUG(0xe0400010 | DBG_FUNC_START, object, pager, 0, 0, 0);
vm_object_lock(object);
if (!object->pager_initialized)
vm_object_collapse(object, (vm_object_offset_t) 0, TRUE);
if (!object->pager_initialized)
vm_object_compressor_pager_create(object);
pager = object->pager;
if (!object->pager_initialized || pager == MEMORY_OBJECT_NULL) {
PAGE_WAKEUP_DONE(m);
vm_page_lockspin_queues();
vm_page_activate(m);
vm_pageout_dirty_no_pager++;
vm_page_unlock_queues();
vm_object_activity_end(object);
vm_object_unlock(object);
return KERN_FAILURE;
}
vm_object_unlock(object);
KERNEL_DEBUG(0xe0400010 | DBG_FUNC_END, object, pager, 0, 0, 0);
}
assert(object->pager_initialized && pager != MEMORY_OBJECT_NULL);
if (object_locked_by_caller == FALSE)
assert(object->activity_in_progress > 0);
retval = vm_compressor_pager_put(
pager,
m->offset + object->paging_offset,
VM_PAGE_GET_PHYS_PAGE(m),
current_chead,
scratch_buf,
&compressed_count_delta);
if (object_locked_by_caller == FALSE) {
vm_object_lock(object);
assert(object->activity_in_progress > 0);
assert(VM_PAGE_OBJECT(m) == object);
}
vm_compressor_pager_count(pager,
compressed_count_delta,
FALSE,
object);
assert( !VM_PAGE_WIRED(m));
if (retval == KERN_SUCCESS) {
if (object->purgable != VM_PURGABLE_DENY &&
object->vo_purgeable_owner != NULL) {
vm_purgeable_compressed_update(object,
+1);
}
VM_STAT_INCR(compressions);
if (m->tabled)
vm_page_remove(m, TRUE);
} else {
PAGE_WAKEUP_DONE(m);
vm_page_lockspin_queues();
vm_page_activate(m);
vm_compressor_failed++;
vm_page_unlock_queues();
}
if (object_locked_by_caller == FALSE) {
vm_object_activity_end(object);
vm_object_unlock(object);
}
return retval;
}
static void
vm_pageout_adjust_eq_iothrottle(struct vm_pageout_queue *eq, boolean_t req_lowpriority)
{
uint32_t policy;
if (hibernate_cleaning_in_progress == TRUE)
req_lowpriority = FALSE;
if (eq->pgo_inited == TRUE && eq->pgo_lowpriority != req_lowpriority) {
vm_page_unlock_queues();
if (req_lowpriority == TRUE) {
policy = THROTTLE_LEVEL_PAGEOUT_THROTTLED;
DTRACE_VM(laundrythrottle);
} else {
policy = THROTTLE_LEVEL_PAGEOUT_UNTHROTTLED;
DTRACE_VM(laundryunthrottle);
}
proc_set_thread_policy_with_tid(kernel_task, eq->pgo_tid,
TASK_POLICY_EXTERNAL, TASK_POLICY_IO, policy);
eq->pgo_lowpriority = req_lowpriority;
vm_page_lock_queues();
}
}
static void
vm_pageout_iothread_external(void)
{
thread_t self = current_thread();
self->options |= TH_OPT_VMPRIV;
DTRACE_VM2(laundrythrottle, int, 1, (uint64_t *), NULL);
proc_set_thread_policy(self, TASK_POLICY_EXTERNAL,
TASK_POLICY_IO, THROTTLE_LEVEL_PAGEOUT_THROTTLED);
vm_page_lock_queues();
vm_pageout_queue_external.pgo_tid = self->thread_id;
vm_pageout_queue_external.pgo_lowpriority = TRUE;
vm_pageout_queue_external.pgo_inited = TRUE;
vm_page_unlock_queues();
vm_pageout_iothread_external_continue(&vm_pageout_queue_external);
}
static void
vm_pageout_iothread_internal(struct cq *cq)
{
thread_t self = current_thread();
self->options |= TH_OPT_VMPRIV;
vm_page_lock_queues();
vm_pageout_queue_internal.pgo_tid = self->thread_id;
vm_pageout_queue_internal.pgo_lowpriority = TRUE;
vm_pageout_queue_internal.pgo_inited = TRUE;
vm_page_unlock_queues();
if (vm_restricted_to_single_processor == TRUE)
thread_vm_bind_group_add();
thread_set_thread_name(current_thread(), "VM_compressor");
#if DEVELOPMENT || DEBUG
vmct_stats.vmct_minpages[cq->id] = INT32_MAX;
#endif
vm_pageout_iothread_internal_continue(cq);
}
kern_return_t
vm_set_buffer_cleanup_callout(boolean_t (*func)(int))
{
if (OSCompareAndSwapPtr(NULL, func, (void * volatile *) &consider_buffer_cache_collect)) {
return KERN_SUCCESS;
} else {
return KERN_FAILURE;
}
}
extern boolean_t memorystatus_manual_testing_on;
extern unsigned int memorystatus_level;
#if VM_PRESSURE_EVENTS
boolean_t vm_pressure_events_enabled = FALSE;
void
vm_pressure_response(void)
{
vm_pressure_level_t old_level = kVMPressureNormal;
int new_level = -1;
unsigned int total_pages;
uint64_t available_memory = 0;
if (vm_pressure_events_enabled == FALSE)
return;
#if CONFIG_EMBEDDED
available_memory = (uint64_t) memorystatus_available_pages;
#else
available_memory = (uint64_t) AVAILABLE_NON_COMPRESSED_MEMORY;
memorystatus_available_pages = (uint64_t) AVAILABLE_NON_COMPRESSED_MEMORY;
#endif
total_pages = (unsigned int) atop_64(max_mem);
#if CONFIG_SECLUDED_MEMORY
total_pages -= vm_page_secluded_count;
#endif
memorystatus_level = (unsigned int) ((available_memory * 100) / total_pages);
if (memorystatus_manual_testing_on) {
return;
}
old_level = memorystatus_vm_pressure_level;
switch (memorystatus_vm_pressure_level) {
case kVMPressureNormal:
{
if (VM_PRESSURE_WARNING_TO_CRITICAL()) {
new_level = kVMPressureCritical;
} else if (VM_PRESSURE_NORMAL_TO_WARNING()) {
new_level = kVMPressureWarning;
}
break;
}
case kVMPressureWarning:
case kVMPressureUrgent:
{
if (VM_PRESSURE_WARNING_TO_NORMAL()) {
new_level = kVMPressureNormal;
} else if (VM_PRESSURE_WARNING_TO_CRITICAL()) {
new_level = kVMPressureCritical;
}
break;
}
case kVMPressureCritical:
{
if (VM_PRESSURE_WARNING_TO_NORMAL()) {
new_level = kVMPressureNormal;
} else if (VM_PRESSURE_CRITICAL_TO_WARNING()) {
new_level = kVMPressureWarning;
}
break;
}
default:
return;
}
if (new_level != -1) {
memorystatus_vm_pressure_level = (vm_pressure_level_t) new_level;
if ((memorystatus_vm_pressure_level != kVMPressureNormal) || (old_level != new_level)) {
if (vm_pressure_thread_running == FALSE) {
thread_wakeup(&vm_pressure_thread);
}
if (old_level != new_level) {
thread_wakeup(&vm_pressure_changed);
}
}
}
}
#endif
kern_return_t
mach_vm_pressure_level_monitor(__unused boolean_t wait_for_pressure, __unused unsigned int *pressure_level) {
#if CONFIG_EMBEDDED
return KERN_FAILURE;
#elif !VM_PRESSURE_EVENTS
return KERN_FAILURE;
#else
kern_return_t kr = KERN_SUCCESS;
if (pressure_level != NULL) {
vm_pressure_level_t old_level = memorystatus_vm_pressure_level;
if (wait_for_pressure == TRUE) {
wait_result_t wr = 0;
while (old_level == *pressure_level) {
wr = assert_wait((event_t) &vm_pressure_changed,
THREAD_INTERRUPTIBLE);
if (wr == THREAD_WAITING) {
wr = thread_block(THREAD_CONTINUE_NULL);
}
if (wr == THREAD_INTERRUPTED) {
return KERN_ABORTED;
}
if (wr == THREAD_AWAKENED) {
old_level = memorystatus_vm_pressure_level;
if (old_level != *pressure_level) {
break;
}
}
}
}
*pressure_level = old_level;
kr = KERN_SUCCESS;
} else {
kr = KERN_INVALID_ARGUMENT;
}
return kr;
#endif
}
#if VM_PRESSURE_EVENTS
void
vm_pressure_thread(void) {
static boolean_t thread_initialized = FALSE;
if (thread_initialized == TRUE) {
vm_pressure_thread_running = TRUE;
consider_vm_pressure_events();
vm_pressure_thread_running = FALSE;
}
thread_initialized = TRUE;
assert_wait((event_t) &vm_pressure_thread, THREAD_UNINT);
thread_block((thread_continue_t)vm_pressure_thread);
}
#endif
uint32_t vm_pageout_considered_page_last = 0;
void
compute_pageout_gc_throttle(__unused void *arg)
{
if (vm_pageout_considered_page != vm_pageout_considered_page_last) {
vm_pageout_considered_page_last = vm_pageout_considered_page;
thread_wakeup((event_t) &vm_pageout_garbage_collect);
}
}
extern boolean_t is_zone_map_nearing_exhaustion(void);
void
vm_pageout_garbage_collect(int collect)
{
if (collect) {
if (is_zone_map_nearing_exhaustion()) {
consider_zone_gc(TRUE);
} else {
boolean_t buf_large_zfree = FALSE;
boolean_t first_try = TRUE;
stack_collect();
consider_machine_collect();
m_drain();
do {
if (consider_buffer_cache_collect != NULL) {
buf_large_zfree = (*consider_buffer_cache_collect)(0);
}
if (first_try == TRUE || buf_large_zfree == TRUE) {
consider_zone_gc(FALSE);
}
first_try = FALSE;
} while (buf_large_zfree == TRUE && vm_page_free_count < vm_page_free_target);
consider_machine_adjust();
}
}
assert_wait((event_t) &vm_pageout_garbage_collect, THREAD_UNINT);
thread_block_parameter((thread_continue_t) vm_pageout_garbage_collect, (void *)1);
}
#if VM_PAGE_BUCKETS_CHECK
#if VM_PAGE_FAKE_BUCKETS
extern vm_map_offset_t vm_page_fake_buckets_start, vm_page_fake_buckets_end;
#endif
#endif
void
vm_set_restrictions()
{
host_basic_info_data_t hinfo;
mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
#define BSD_HOST 1
host_info((host_t)BSD_HOST, HOST_BASIC_INFO, (host_info_t)&hinfo, &count);
assert(hinfo.max_cpus > 0);
if (hinfo.max_cpus <= 3) {
vm_restricted_to_single_processor = TRUE;
}
}
void
vm_pageout(void)
{
thread_t self = current_thread();
thread_t thread;
kern_return_t result;
spl_t s;
s = splsched();
thread_lock(self);
self->options |= TH_OPT_VMPRIV;
sched_set_thread_base_priority(self, BASEPRI_VM);
thread_unlock(self);
if (!self->reserved_stack)
self->reserved_stack = self->kernel_stack;
if (vm_restricted_to_single_processor == TRUE)
thread_vm_bind_group_add();
splx(s);
thread_set_thread_name(current_thread(), "VM_pageout_scan");
if (vm_pageout_swap_wait == 0)
vm_pageout_swap_wait = VM_PAGEOUT_SWAP_WAIT;
if (vm_pageout_idle_wait == 0)
vm_pageout_idle_wait = VM_PAGEOUT_IDLE_WAIT;
if (vm_pageout_burst_wait == 0)
vm_pageout_burst_wait = VM_PAGEOUT_BURST_WAIT;
if (vm_pageout_empty_wait == 0)
vm_pageout_empty_wait = VM_PAGEOUT_EMPTY_WAIT;
if (vm_pageout_deadlock_wait == 0)
vm_pageout_deadlock_wait = VM_PAGEOUT_DEADLOCK_WAIT;
if (vm_pageout_deadlock_relief == 0)
vm_pageout_deadlock_relief = VM_PAGEOUT_DEADLOCK_RELIEF;
if (vm_pageout_inactive_relief == 0)
vm_pageout_inactive_relief = VM_PAGEOUT_INACTIVE_RELIEF;
if (vm_pageout_burst_active_throttle == 0)
vm_pageout_burst_active_throttle = VM_PAGEOUT_BURST_ACTIVE_THROTTLE;
if (vm_pageout_burst_inactive_throttle == 0)
vm_pageout_burst_inactive_throttle = VM_PAGEOUT_BURST_INACTIVE_THROTTLE;
task_lock(kernel_task);
kernel_task->priv_flags |= VM_BACKING_STORE_PRIV;
task_unlock(kernel_task);
vm_page_free_count_init = vm_page_free_count;
if (vm_page_free_reserved < VM_PAGE_FREE_RESERVED(processor_count)) {
vm_page_free_reserve((VM_PAGE_FREE_RESERVED(processor_count)) - vm_page_free_reserved);
} else
vm_page_free_reserve(0);
vm_page_queue_init(&vm_pageout_queue_external.pgo_pending);
vm_pageout_queue_external.pgo_maxlaundry = VM_PAGE_LAUNDRY_MAX;
vm_pageout_queue_external.pgo_laundry = 0;
vm_pageout_queue_external.pgo_idle = FALSE;
vm_pageout_queue_external.pgo_busy = FALSE;
vm_pageout_queue_external.pgo_throttled = FALSE;
vm_pageout_queue_external.pgo_draining = FALSE;
vm_pageout_queue_external.pgo_lowpriority = FALSE;
vm_pageout_queue_external.pgo_tid = -1;
vm_pageout_queue_external.pgo_inited = FALSE;
vm_page_queue_init(&vm_pageout_queue_internal.pgo_pending);
vm_pageout_queue_internal.pgo_maxlaundry = 0;
vm_pageout_queue_internal.pgo_laundry = 0;
vm_pageout_queue_internal.pgo_idle = FALSE;
vm_pageout_queue_internal.pgo_busy = FALSE;
vm_pageout_queue_internal.pgo_throttled = FALSE;
vm_pageout_queue_internal.pgo_draining = FALSE;
vm_pageout_queue_internal.pgo_lowpriority = FALSE;
vm_pageout_queue_internal.pgo_tid = -1;
vm_pageout_queue_internal.pgo_inited = FALSE;
result = kernel_thread_start_priority((thread_continue_t)vm_pageout_iothread_external, NULL,
BASEPRI_VM,
&vm_pageout_external_iothread);
if (result != KERN_SUCCESS)
panic("vm_pageout_iothread_external: create failed");
thread_deallocate(vm_pageout_external_iothread);
result = kernel_thread_start_priority((thread_continue_t)vm_pageout_garbage_collect, NULL,
BASEPRI_DEFAULT,
&thread);
if (result != KERN_SUCCESS)
panic("vm_pageout_garbage_collect: create failed");
thread_deallocate(thread);
#if VM_PRESSURE_EVENTS
result = kernel_thread_start_priority((thread_continue_t)vm_pressure_thread, NULL,
BASEPRI_DEFAULT,
&thread);
if (result != KERN_SUCCESS)
panic("vm_pressure_thread: create failed");
thread_deallocate(thread);
#endif
vm_object_reaper_init();
bzero(&vm_config, sizeof(vm_config));
switch(vm_compressor_mode) {
case VM_PAGER_DEFAULT:
printf("mapping deprecated VM_PAGER_DEFAULT to VM_PAGER_COMPRESSOR_WITH_SWAP\n");
case VM_PAGER_COMPRESSOR_WITH_SWAP:
vm_config.compressor_is_present = TRUE;
vm_config.swap_is_present = TRUE;
vm_config.compressor_is_active = TRUE;
vm_config.swap_is_active = TRUE;
break;
case VM_PAGER_COMPRESSOR_NO_SWAP:
vm_config.compressor_is_present = TRUE;
vm_config.swap_is_present = TRUE;
vm_config.compressor_is_active = TRUE;
break;
case VM_PAGER_FREEZER_DEFAULT:
printf("mapping deprecated VM_PAGER_FREEZER_DEFAULT to VM_PAGER_FREEZER_COMPRESSOR_NO_SWAP\n");
case VM_PAGER_FREEZER_COMPRESSOR_NO_SWAP:
vm_config.compressor_is_present = TRUE;
vm_config.swap_is_present = TRUE;
break;
case VM_PAGER_COMPRESSOR_NO_SWAP_PLUS_FREEZER_COMPRESSOR_WITH_SWAP:
vm_config.compressor_is_present = TRUE;
vm_config.swap_is_present = TRUE;
vm_config.compressor_is_active = TRUE;
vm_config.freezer_swap_is_active = TRUE;
break;
case VM_PAGER_NOT_CONFIGURED:
break;
default:
printf("unknown compressor mode - %x\n", vm_compressor_mode);
break;
}
if (VM_CONFIG_COMPRESSOR_IS_PRESENT)
vm_compressor_pager_init();
#if VM_PRESSURE_EVENTS
vm_pressure_events_enabled = TRUE;
#endif
#if CONFIG_PHANTOM_CACHE
vm_phantom_cache_init();
#endif
#if VM_PAGE_BUCKETS_CHECK
#if VM_PAGE_FAKE_BUCKETS
printf("**** DEBUG: protecting fake buckets [0x%llx:0x%llx]\n",
(uint64_t) vm_page_fake_buckets_start,
(uint64_t) vm_page_fake_buckets_end);
pmap_protect(kernel_pmap,
vm_page_fake_buckets_start,
vm_page_fake_buckets_end,
VM_PROT_READ);
#endif
#endif
#if VM_OBJECT_TRACKING
vm_object_tracking_init();
#endif
vm_tests();
vm_pageout_continue();
DTRACE_VM2(execfree, int, 1, (uint64_t *), NULL);
DTRACE_VM2(execpgin, int, 1, (uint64_t *), NULL);
DTRACE_VM2(execpgout, int, 1, (uint64_t *), NULL);
DTRACE_VM2(pgswapin, int, 1, (uint64_t *), NULL);
DTRACE_VM2(pgswapout, int, 1, (uint64_t *), NULL);
DTRACE_VM2(swapin, int, 1, (uint64_t *), NULL);
DTRACE_VM2(swapout, int, 1, (uint64_t *), NULL);
}
#if CONFIG_EMBEDDED
int vm_compressor_thread_count = 1;
#else
int vm_compressor_thread_count = 2;
#endif
kern_return_t
vm_pageout_internal_start(void)
{
kern_return_t result;
int i;
host_basic_info_data_t hinfo;
assert (VM_CONFIG_COMPRESSOR_IS_PRESENT);
mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
#define BSD_HOST 1
host_info((host_t)BSD_HOST, HOST_BASIC_INFO, (host_info_t)&hinfo, &count);
assert(hinfo.max_cpus > 0);
PE_parse_boot_argn("vmcomp_threads", &vm_compressor_thread_count, sizeof(vm_compressor_thread_count));
if (vm_compressor_thread_count >= hinfo.max_cpus)
vm_compressor_thread_count = hinfo.max_cpus - 1;
if (vm_compressor_thread_count <= 0)
vm_compressor_thread_count = 1;
else if (vm_compressor_thread_count > MAX_COMPRESSOR_THREAD_COUNT)
vm_compressor_thread_count = MAX_COMPRESSOR_THREAD_COUNT;
vm_pageout_queue_internal.pgo_maxlaundry = (vm_compressor_thread_count * 4) * VM_PAGE_LAUNDRY_MAX;
PE_parse_boot_argn("vmpgoi_maxlaundry", &vm_pageout_queue_internal.pgo_maxlaundry, sizeof(vm_pageout_queue_internal.pgo_maxlaundry));
for (i = 0; i < vm_compressor_thread_count; i++) {
ciq[i].id = i;
ciq[i].q = &vm_pageout_queue_internal;
ciq[i].current_chead = NULL;
ciq[i].scratch_buf = kalloc(COMPRESSOR_SCRATCH_BUF_SIZE);
result = kernel_thread_start_priority((thread_continue_t)vm_pageout_iothread_internal, (void *)&ciq[i], BASEPRI_VM, &vm_pageout_internal_iothread);
if (result == KERN_SUCCESS)
thread_deallocate(vm_pageout_internal_iothread);
else
break;
}
return result;
}
#if CONFIG_IOSCHED
static void
upl_set_decmp_info(upl_t upl, upl_t src_upl)
{
assert((src_upl->flags & UPL_DECMP_REQ) != 0);
upl_lock(src_upl);
if (src_upl->decmp_io_upl) {
upl_unlock(src_upl);
return;
}
src_upl->decmp_io_upl = (void *)upl;
src_upl->ref_count++;
upl->flags |= UPL_DECMP_REAL_IO;
upl->decmp_io_upl = (void *)src_upl;
upl_unlock(src_upl);
}
#endif
#if UPL_DEBUG
int upl_debug_enabled = 1;
#else
int upl_debug_enabled = 0;
#endif
static upl_t
upl_create(int type, int flags, upl_size_t size)
{
upl_t upl;
vm_size_t page_field_size = 0;
int upl_flags = 0;
vm_size_t upl_size = sizeof(struct upl);
size = round_page_32(size);
if (type & UPL_CREATE_LITE) {
page_field_size = (atop(size) + 7) >> 3;
page_field_size = (page_field_size + 3) & 0xFFFFFFFC;
upl_flags |= UPL_LITE;
}
if (type & UPL_CREATE_INTERNAL) {
upl_size += sizeof(struct upl_page_info) * atop(size);
upl_flags |= UPL_INTERNAL;
}
upl = (upl_t)kalloc(upl_size + page_field_size);
if (page_field_size)
bzero((char *)upl + upl_size, page_field_size);
upl->flags = upl_flags | flags;
upl->kaddr = (vm_offset_t)0;
upl->size = 0;
upl->map_object = NULL;
upl->ref_count = 1;
upl->ext_ref_count = 0;
upl->highest_page = 0;
upl_lock_init(upl);
upl->vector_upl = NULL;
upl->associated_upl = NULL;
#if CONFIG_IOSCHED
if (type & UPL_CREATE_IO_TRACKING) {
upl->upl_priority = proc_get_effective_thread_policy(current_thread(), TASK_POLICY_IO);
}
upl->upl_reprio_info = 0;
upl->decmp_io_upl = 0;
if ((type & UPL_CREATE_INTERNAL) && (type & UPL_CREATE_EXPEDITE_SUP)) {
thread_t curthread = current_thread();
upl->upl_reprio_info = (uint64_t *)kalloc(sizeof(uint64_t) * atop(size));
bzero(upl->upl_reprio_info, (sizeof(uint64_t) * atop(size)));
upl->flags |= UPL_EXPEDITE_SUPPORTED;
if (curthread->decmp_upl != NULL)
upl_set_decmp_info(upl, curthread->decmp_upl);
}
#endif
#if CONFIG_IOSCHED || UPL_DEBUG
if ((type & UPL_CREATE_IO_TRACKING) || upl_debug_enabled) {
upl->upl_creator = current_thread();
upl->uplq.next = 0;
upl->uplq.prev = 0;
upl->flags |= UPL_TRACKED_BY_OBJECT;
}
#endif
#if UPL_DEBUG
upl->ubc_alias1 = 0;
upl->ubc_alias2 = 0;
upl->upl_state = 0;
upl->upl_commit_index = 0;
bzero(&upl->upl_commit_records[0], sizeof(upl->upl_commit_records));
(void) OSBacktrace(&upl->upl_create_retaddr[0], UPL_DEBUG_STACK_FRAMES);
#endif
return(upl);
}
static void
upl_destroy(upl_t upl)
{
int page_field_size;
int size;
if (upl->ext_ref_count) {
panic("upl(%p) ext_ref_count", upl);
}
#if CONFIG_IOSCHED
if ((upl->flags & UPL_DECMP_REAL_IO) && upl->decmp_io_upl) {
upl_t src_upl;
src_upl = upl->decmp_io_upl;
assert((src_upl->flags & UPL_DECMP_REQ) != 0);
upl_lock(src_upl);
src_upl->decmp_io_upl = NULL;
upl_unlock(src_upl);
upl_deallocate(src_upl);
}
#endif
#if CONFIG_IOSCHED || UPL_DEBUG
if ((upl->flags & UPL_TRACKED_BY_OBJECT) && !(upl->flags & UPL_VECTOR)) {
vm_object_t object;
if (upl->flags & UPL_SHADOWED) {
object = upl->map_object->shadow;
} else {
object = upl->map_object;
}
vm_object_lock(object);
queue_remove(&object->uplq, upl, upl_t, uplq);
vm_object_activity_end(object);
vm_object_collapse(object, 0, TRUE);
vm_object_unlock(object);
}
#endif
if (upl->flags & UPL_SHADOWED)
vm_object_deallocate(upl->map_object);
if (upl->flags & UPL_DEVICE_MEMORY)
size = PAGE_SIZE;
else
size = upl->size;
page_field_size = 0;
if (upl->flags & UPL_LITE) {
page_field_size = ((size/PAGE_SIZE) + 7) >> 3;
page_field_size = (page_field_size + 3) & 0xFFFFFFFC;
}
upl_lock_destroy(upl);
upl->vector_upl = (vector_upl_t) 0xfeedbeef;
#if CONFIG_IOSCHED
if (upl->flags & UPL_EXPEDITE_SUPPORTED)
kfree(upl->upl_reprio_info, sizeof(uint64_t) * (size/PAGE_SIZE));
#endif
if (upl->flags & UPL_INTERNAL) {
kfree(upl,
sizeof(struct upl) +
(sizeof(struct upl_page_info) * (size/PAGE_SIZE))
+ page_field_size);
} else {
kfree(upl, sizeof(struct upl) + page_field_size);
}
}
void
upl_deallocate(upl_t upl)
{
upl_lock(upl);
if (--upl->ref_count == 0) {
if(vector_upl_is_valid(upl))
vector_upl_deallocate(upl);
upl_unlock(upl);
upl_destroy(upl);
}
else
upl_unlock(upl);
}
#if CONFIG_IOSCHED
void
upl_mark_decmp(upl_t upl)
{
if (upl->flags & UPL_TRACKED_BY_OBJECT) {
upl->flags |= UPL_DECMP_REQ;
upl->upl_creator->decmp_upl = (void *)upl;
}
}
void
upl_unmark_decmp(upl_t upl)
{
if(upl && (upl->flags & UPL_DECMP_REQ)) {
upl->upl_creator->decmp_upl = NULL;
}
}
#endif
#define VM_PAGE_Q_BACKING_UP(q) \
((q)->pgo_laundry >= (((q)->pgo_maxlaundry * 8) / 10))
boolean_t must_throttle_writes(void);
boolean_t
must_throttle_writes()
{
if (VM_PAGE_Q_BACKING_UP(&vm_pageout_queue_external) &&
vm_page_pageable_external_count > (AVAILABLE_NON_COMPRESSED_MEMORY * 6) / 10)
return (TRUE);
return (FALSE);
}
#if DEVELOPMENT || DEBUG
unsigned long upl_cow = 0;
unsigned long upl_cow_again = 0;
unsigned long upl_cow_pages = 0;
unsigned long upl_cow_again_pages = 0;
unsigned long iopl_cow = 0;
unsigned long iopl_cow_pages = 0;
#endif
__private_extern__ kern_return_t
vm_object_upl_request(
vm_object_t object,
vm_object_offset_t offset,
upl_size_t size,
upl_t *upl_ptr,
upl_page_info_array_t user_page_list,
unsigned int *page_list_count,
upl_control_flags_t cntrl_flags,
vm_tag_t tag)
{
vm_page_t dst_page = VM_PAGE_NULL;
vm_object_offset_t dst_offset;
upl_size_t xfer_size;
unsigned int size_in_pages;
boolean_t dirty;
boolean_t hw_dirty;
upl_t upl = NULL;
unsigned int entry;
#if MACH_CLUSTER_STATS
boolean_t encountered_lrp = FALSE;
#endif
vm_page_t alias_page = NULL;
int refmod_state = 0;
wpl_array_t lite_list = NULL;
vm_object_t last_copy_object;
struct vm_page_delayed_work dw_array[DEFAULT_DELAYED_WORK_LIMIT];
struct vm_page_delayed_work *dwp;
int dw_count;
int dw_limit;
int io_tracking_flag = 0;
int grab_options;
ppnum_t phys_page;
if (cntrl_flags & ~UPL_VALID_FLAGS) {
return KERN_INVALID_VALUE;
}
if ( (!object->internal) && (object->paging_offset != 0) )
panic("vm_object_upl_request: external object with non-zero paging offset\n");
if (object->phys_contiguous)
panic("vm_object_upl_request: contiguous object specified\n");
if (size > MAX_UPL_SIZE_BYTES)
size = MAX_UPL_SIZE_BYTES;
if ( (cntrl_flags & UPL_SET_INTERNAL) && page_list_count != NULL)
*page_list_count = MAX_UPL_SIZE_BYTES >> PAGE_SHIFT;
#if CONFIG_IOSCHED || UPL_DEBUG
if (object->io_tracking || upl_debug_enabled)
io_tracking_flag |= UPL_CREATE_IO_TRACKING;
#endif
#if CONFIG_IOSCHED
if (object->io_tracking)
io_tracking_flag |= UPL_CREATE_EXPEDITE_SUP;
#endif
if (cntrl_flags & UPL_SET_INTERNAL) {
if (cntrl_flags & UPL_SET_LITE) {
upl = upl_create(UPL_CREATE_INTERNAL | UPL_CREATE_LITE | io_tracking_flag, 0, size);
user_page_list = (upl_page_info_t *) (((uintptr_t)upl) + sizeof(struct upl));
lite_list = (wpl_array_t)
(((uintptr_t)user_page_list) +
((size/PAGE_SIZE) * sizeof(upl_page_info_t)));
if (size == 0) {
user_page_list = NULL;
lite_list = NULL;
}
} else {
upl = upl_create(UPL_CREATE_INTERNAL | io_tracking_flag, 0, size);
user_page_list = (upl_page_info_t *) (((uintptr_t)upl) + sizeof(struct upl));
if (size == 0) {
user_page_list = NULL;
}
}
} else {
if (cntrl_flags & UPL_SET_LITE) {
upl = upl_create(UPL_CREATE_EXTERNAL | UPL_CREATE_LITE | io_tracking_flag, 0, size);
lite_list = (wpl_array_t) (((uintptr_t)upl) + sizeof(struct upl));
if (size == 0) {
lite_list = NULL;
}
} else {
upl = upl_create(UPL_CREATE_EXTERNAL | io_tracking_flag, 0, size);
}
}
*upl_ptr = upl;
if (user_page_list)
user_page_list[0].device = FALSE;
if (cntrl_flags & UPL_SET_LITE) {
upl->map_object = object;
} else {
upl->map_object = vm_object_allocate(size);
upl->map_object->shadow = object;
upl->map_object->pageout = TRUE;
upl->map_object->can_persist = FALSE;
upl->map_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
upl->map_object->vo_shadow_offset = offset;
upl->map_object->wimg_bits = object->wimg_bits;
VM_PAGE_GRAB_FICTITIOUS(alias_page);
upl->flags |= UPL_SHADOWED;
}
if (cntrl_flags & UPL_FOR_PAGEOUT)
upl->flags |= UPL_PAGEOUT;
vm_object_lock(object);
vm_object_activity_begin(object);
grab_options = 0;
#if CONFIG_SECLUDED_MEMORY
if (object->can_grab_secluded) {
grab_options |= VM_PAGE_GRAB_SECLUDED;
}
#endif
upl->size = size;
upl->offset = offset + object->paging_offset;
#if CONFIG_IOSCHED || UPL_DEBUG
if (object->io_tracking || upl_debug_enabled) {
vm_object_activity_begin(object);
queue_enter(&object->uplq, upl, upl_t, uplq);
}
#endif
if ((cntrl_flags & UPL_WILL_MODIFY) && object->copy != VM_OBJECT_NULL) {
vm_object_update(object,
offset,
size,
NULL,
NULL,
FALSE,
MEMORY_OBJECT_COPY_SYNC,
VM_PROT_NO_CHANGE);
#if DEVELOPMENT || DEBUG
upl_cow++;
upl_cow_pages += size >> PAGE_SHIFT;
#endif
}
last_copy_object = object->copy;
entry = 0;
xfer_size = size;
dst_offset = offset;
size_in_pages = size / PAGE_SIZE;
dwp = &dw_array[0];
dw_count = 0;
dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT);
if (vm_page_free_count > (vm_page_free_target + size_in_pages) ||
object->resident_page_count < ((MAX_UPL_SIZE_BYTES * 2) >> PAGE_SHIFT))
object->scan_collisions = 0;
if ((cntrl_flags & UPL_WILL_MODIFY) && must_throttle_writes() == TRUE) {
boolean_t isSSD = FALSE;
#if CONFIG_EMBEDDED
isSSD = TRUE;
#else
vnode_pager_get_isSSD(object->pager, &isSSD);
#endif
vm_object_unlock(object);
OSAddAtomic(size_in_pages, &vm_upl_wait_for_pages);
if (isSSD == TRUE)
delay(1000 * size_in_pages);
else
delay(5000 * size_in_pages);
OSAddAtomic(-size_in_pages, &vm_upl_wait_for_pages);
vm_object_lock(object);
}
while (xfer_size) {
dwp->dw_mask = 0;
if ((alias_page == NULL) && !(cntrl_flags & UPL_SET_LITE)) {
vm_object_unlock(object);
VM_PAGE_GRAB_FICTITIOUS(alias_page);
vm_object_lock(object);
}
if (cntrl_flags & UPL_COPYOUT_FROM) {
upl->flags |= UPL_PAGE_SYNC_DONE;
if ( ((dst_page = vm_page_lookup(object, dst_offset)) == VM_PAGE_NULL) ||
dst_page->fictitious ||
dst_page->absent ||
dst_page->error ||
dst_page->cleaning ||
(VM_PAGE_WIRED(dst_page))) {
if (user_page_list)
user_page_list[entry].phys_addr = 0;
goto try_next_page;
}
phys_page = VM_PAGE_GET_PHYS_PAGE(dst_page);
if (dst_page->pmapped)
refmod_state = pmap_get_refmod(phys_page);
else
refmod_state = 0;
if ( (refmod_state & VM_MEM_REFERENCED) && VM_PAGE_INACTIVE(dst_page)) {
dwp->dw_mask |= DW_vm_page_activate;
}
if (cntrl_flags & UPL_RET_ONLY_DIRTY) {
if (dst_page->laundry || !(cntrl_flags & UPL_FOR_PAGEOUT)) {
if (dst_page->dirty || dst_page->precious || (refmod_state & VM_MEM_MODIFIED))
goto check_busy;
goto dont_return;
}
if ( (hibernate_cleaning_in_progress == TRUE ||
(!((refmod_state & VM_MEM_REFERENCED) || dst_page->reference) ||
(dst_page->vm_page_q_state == VM_PAGE_ON_THROTTLED_Q))) &&
((refmod_state & VM_MEM_MODIFIED) || dst_page->dirty || dst_page->precious) ) {
goto check_busy;
}
dont_return:
if (dst_page->laundry == TRUE) {
vm_page_lockspin_queues();
vm_pageout_steal_laundry(dst_page, TRUE);
vm_page_activate(dst_page);
vm_page_unlock_queues();
}
if (user_page_list)
user_page_list[entry].phys_addr = 0;
goto try_next_page;
}
check_busy:
if (dst_page->busy) {
if (cntrl_flags & UPL_NOBLOCK) {
if (user_page_list)
user_page_list[entry].phys_addr = 0;
dwp->dw_mask = 0;
goto try_next_page;
}
PAGE_SLEEP(object, dst_page, THREAD_UNINT);
continue;
}
if (dst_page->vm_page_q_state == VM_PAGE_ON_PAGEOUT_Q) {
vm_page_lockspin_queues();
if (dst_page->vm_page_q_state == VM_PAGE_ON_PAGEOUT_Q) {
vm_pageout_throttle_up(dst_page);
}
vm_page_unlock_queues();
}
#if MACH_CLUSTER_STATS
if (dst_page->pageout)
encountered_lrp = TRUE;
if ((dst_page->dirty || (object->internal && dst_page->precious))) {
if (encountered_lrp)
CLUSTER_STAT(pages_at_higher_offsets++;)
else
CLUSTER_STAT(pages_at_lower_offsets++;)
}
#endif
hw_dirty = refmod_state & VM_MEM_MODIFIED;
dirty = hw_dirty ? TRUE : dst_page->dirty;
if (phys_page > upl->highest_page)
upl->highest_page = phys_page;
assert (!pmap_is_noencrypt(phys_page));
if (cntrl_flags & UPL_SET_LITE) {
unsigned int pg_num;
pg_num = (unsigned int) ((dst_offset-offset)/PAGE_SIZE);
assert(pg_num == (dst_offset-offset)/PAGE_SIZE);
lite_list[pg_num>>5] |= 1 << (pg_num & 31);
if (hw_dirty)
pmap_clear_modify(phys_page);
dst_page->cleaning = TRUE;
dst_page->precious = FALSE;
} else {
vm_object_lock(upl->map_object);
vm_pageclean_setup(dst_page, alias_page, upl->map_object, size - xfer_size);
vm_object_unlock(upl->map_object);
alias_page->absent = FALSE;
alias_page = NULL;
}
if (dirty) {
SET_PAGE_DIRTY(dst_page, FALSE);
} else {
dst_page->dirty = FALSE;
}
if (!dirty)
dst_page->precious = TRUE;
if ( !(cntrl_flags & UPL_CLEAN_IN_PLACE) ) {
if ( !VM_PAGE_WIRED(dst_page))
dst_page->free_when_done = TRUE;
}
} else {
if ((cntrl_flags & UPL_WILL_MODIFY) && object->copy != last_copy_object) {
if (object->copy != VM_OBJECT_NULL) {
vm_object_update(
object,
dst_offset,
xfer_size,
NULL,
NULL,
FALSE,
MEMORY_OBJECT_COPY_SYNC,
VM_PROT_NO_CHANGE);
#if DEVELOPMENT || DEBUG
upl_cow_again++;
upl_cow_again_pages += xfer_size >> PAGE_SHIFT;
#endif
}
last_copy_object = object->copy;
}
dst_page = vm_page_lookup(object, dst_offset);
if (dst_page != VM_PAGE_NULL) {
if ((cntrl_flags & UPL_RET_ONLY_ABSENT)) {
if (user_page_list)
user_page_list[entry].phys_addr = 0;
goto try_next_page;
}
if (dst_page->fictitious) {
panic("need corner case for fictitious page");
}
if (dst_page->busy || dst_page->cleaning) {
PAGE_SLEEP(object, dst_page, THREAD_UNINT);
continue;
}
if (dst_page->laundry)
vm_pageout_steal_laundry(dst_page, FALSE);
} else {
if (object->private) {
if (user_page_list)
user_page_list[entry].phys_addr = 0;
goto try_next_page;
}
if (object->scan_collisions) {
dst_page = vm_object_page_grab(object);
if (dst_page != VM_PAGE_NULL)
vm_page_release(dst_page,
FALSE);
dst_page = vm_object_page_grab(object);
}
if (dst_page == VM_PAGE_NULL) {
dst_page = vm_page_grab_options(grab_options);
}
if (dst_page == VM_PAGE_NULL) {
if ( (cntrl_flags & (UPL_RET_ONLY_ABSENT | UPL_NOBLOCK)) == (UPL_RET_ONLY_ABSENT | UPL_NOBLOCK)) {
if (user_page_list)
user_page_list[entry].phys_addr = 0;
goto try_next_page;
}
vm_object_unlock(object);
OSAddAtomic(size_in_pages, &vm_upl_wait_for_pages);
VM_DEBUG_EVENT(vm_upl_page_wait, VM_UPL_PAGE_WAIT, DBG_FUNC_START, vm_upl_wait_for_pages, 0, 0, 0);
VM_PAGE_WAIT();
OSAddAtomic(-size_in_pages, &vm_upl_wait_for_pages);
VM_DEBUG_EVENT(vm_upl_page_wait, VM_UPL_PAGE_WAIT, DBG_FUNC_END, vm_upl_wait_for_pages, 0, 0, 0);
vm_object_lock(object);
continue;
}
vm_page_insert(dst_page, object, dst_offset);
dst_page->absent = TRUE;
dst_page->busy = FALSE;
if (cntrl_flags & UPL_RET_ONLY_ABSENT) {
dst_page->clustered = TRUE;
if ( !(cntrl_flags & UPL_FILE_IO))
VM_STAT_INCR(pageins);
}
}
phys_page = VM_PAGE_GET_PHYS_PAGE(dst_page);
dst_page->overwriting = TRUE;
if (dst_page->pmapped) {
if ( !(cntrl_flags & UPL_FILE_IO))
refmod_state = pmap_disconnect(phys_page);
else
refmod_state = pmap_get_refmod(phys_page);
} else
refmod_state = 0;
hw_dirty = refmod_state & VM_MEM_MODIFIED;
dirty = hw_dirty ? TRUE : dst_page->dirty;
if (cntrl_flags & UPL_SET_LITE) {
unsigned int pg_num;
pg_num = (unsigned int) ((dst_offset-offset)/PAGE_SIZE);
assert(pg_num == (dst_offset-offset)/PAGE_SIZE);
lite_list[pg_num>>5] |= 1 << (pg_num & 31);
if (hw_dirty)
pmap_clear_modify(phys_page);
dst_page->cleaning = TRUE;
dst_page->precious = FALSE;
} else {
vm_object_lock(upl->map_object);
vm_pageclean_setup(dst_page, alias_page, upl->map_object, size - xfer_size);
vm_object_unlock(upl->map_object);
alias_page->absent = FALSE;
alias_page = NULL;
}
if (cntrl_flags & UPL_REQUEST_SET_DIRTY) {
upl->flags &= ~UPL_CLEAR_DIRTY;
upl->flags |= UPL_SET_DIRTY;
dirty = TRUE;
upl->flags |= UPL_SET_DIRTY;
} else if (cntrl_flags & UPL_CLEAN_IN_PLACE) {
upl->flags |= UPL_CLEAR_DIRTY;
}
dst_page->dirty = dirty;
if (!dirty)
dst_page->precious = TRUE;
if ( !VM_PAGE_WIRED(dst_page)) {
dst_page->busy = TRUE;
} else
dwp->dw_mask |= DW_vm_page_wire;
dst_page->restart = FALSE;
if (!dst_page->absent && !(cntrl_flags & UPL_WILL_MODIFY)) {
dwp->dw_mask |= DW_set_reference;
}
if (cntrl_flags & UPL_PRECIOUS) {
if (object->internal) {
SET_PAGE_DIRTY(dst_page, FALSE);
dst_page->precious = FALSE;
} else {
dst_page->precious = TRUE;
}
} else {
dst_page->precious = FALSE;
}
}
if (dst_page->busy)
upl->flags |= UPL_HAS_BUSY;
if (phys_page > upl->highest_page)
upl->highest_page = phys_page;
assert (!pmap_is_noencrypt(phys_page));
if (user_page_list) {
user_page_list[entry].phys_addr = phys_page;
user_page_list[entry].free_when_done = dst_page->free_when_done;
user_page_list[entry].absent = dst_page->absent;
user_page_list[entry].dirty = dst_page->dirty;
user_page_list[entry].precious = dst_page->precious;
user_page_list[entry].device = FALSE;
user_page_list[entry].needed = FALSE;
if (dst_page->clustered == TRUE)
user_page_list[entry].speculative = (dst_page->vm_page_q_state == VM_PAGE_ON_SPECULATIVE_Q) ? TRUE : FALSE;
else
user_page_list[entry].speculative = FALSE;
user_page_list[entry].cs_validated = dst_page->cs_validated;
user_page_list[entry].cs_tainted = dst_page->cs_tainted;
user_page_list[entry].cs_nx = dst_page->cs_nx;
user_page_list[entry].mark = FALSE;
}
if ( !(cntrl_flags & UPL_RET_ONLY_ABSENT)) {
if (dst_page->clustered)
VM_PAGE_CONSUME_CLUSTERED(dst_page);
}
try_next_page:
if (dwp->dw_mask) {
if (dwp->dw_mask & DW_vm_page_activate)
VM_STAT_INCR(reactivations);
VM_PAGE_ADD_DELAYED_WORK(dwp, dst_page, dw_count);
if (dw_count >= dw_limit) {
vm_page_do_delayed_work(object, tag, &dw_array[0], dw_count);
dwp = &dw_array[0];
dw_count = 0;
}
}
entry++;
dst_offset += PAGE_SIZE_64;
xfer_size -= PAGE_SIZE;
}
if (dw_count)
vm_page_do_delayed_work(object, tag, &dw_array[0], dw_count);
if (alias_page != NULL) {
VM_PAGE_FREE(alias_page);
}
if (page_list_count != NULL) {
if (upl->flags & UPL_INTERNAL)
*page_list_count = 0;
else if (*page_list_count > entry)
*page_list_count = entry;
}
#if UPL_DEBUG
upl->upl_state = 1;
#endif
vm_object_unlock(object);
return KERN_SUCCESS;
}
__private_extern__ kern_return_t
vm_object_super_upl_request(
vm_object_t object,
vm_object_offset_t offset,
upl_size_t size,
upl_size_t super_cluster,
upl_t *upl,
upl_page_info_t *user_page_list,
unsigned int *page_list_count,
upl_control_flags_t cntrl_flags,
vm_tag_t tag)
{
if (object->paging_offset > offset || ((cntrl_flags & UPL_VECTOR)==UPL_VECTOR))
return KERN_FAILURE;
assert(object->paging_in_progress);
offset = offset - object->paging_offset;
if (super_cluster > size) {
vm_object_offset_t base_offset;
upl_size_t super_size;
vm_object_size_t super_size_64;
base_offset = (offset & ~((vm_object_offset_t) super_cluster - 1));
super_size = (offset + size) > (base_offset + super_cluster) ? super_cluster<<1 : super_cluster;
super_size_64 = ((base_offset + super_size) > object->vo_size) ? (object->vo_size - base_offset) : super_size;
super_size = (upl_size_t) super_size_64;
assert(super_size == super_size_64);
if (offset > (base_offset + super_size)) {
panic("vm_object_super_upl_request: Missed target pageout"
" %#llx,%#llx, %#x, %#x, %#x, %#llx\n",
offset, base_offset, super_size, super_cluster,
size, object->paging_offset);
}
if ((offset + size) > (base_offset + super_size)) {
super_size_64 = (offset + size) - base_offset;
super_size = (upl_size_t) super_size_64;
assert(super_size == super_size_64);
}
offset = base_offset;
size = super_size;
}
return vm_object_upl_request(object, offset, size, upl, user_page_list, page_list_count, cntrl_flags, tag);
}
#if CONFIG_EMBEDDED
int cs_executable_create_upl = 0;
extern int proc_selfpid(void);
extern char *proc_name_address(void *p);
#endif
kern_return_t
vm_map_create_upl(
vm_map_t map,
vm_map_address_t offset,
upl_size_t *upl_size,
upl_t *upl,
upl_page_info_array_t page_list,
unsigned int *count,
upl_control_flags_t *flags,
vm_tag_t tag)
{
vm_map_entry_t entry;
upl_control_flags_t caller_flags;
int force_data_sync;
int sync_cow_data;
vm_object_t local_object;
vm_map_offset_t local_offset;
vm_map_offset_t local_start;
kern_return_t ret;
assert(page_aligned(offset));
caller_flags = *flags;
if (caller_flags & ~UPL_VALID_FLAGS) {
return KERN_INVALID_VALUE;
}
force_data_sync = (caller_flags & UPL_FORCE_DATA_SYNC);
sync_cow_data = !(caller_flags & UPL_COPYOUT_FROM);
if (upl == NULL)
return KERN_INVALID_ARGUMENT;
REDISCOVER_ENTRY:
vm_map_lock_read(map);
if (!vm_map_lookup_entry(map, offset, &entry)) {
vm_map_unlock_read(map);
return KERN_FAILURE;
}
if ((entry->vme_end - offset) < *upl_size) {
*upl_size = (upl_size_t) (entry->vme_end - offset);
assert(*upl_size == entry->vme_end - offset);
}
if (caller_flags & UPL_QUERY_OBJECT_TYPE) {
*flags = 0;
if (!entry->is_sub_map &&
VME_OBJECT(entry) != VM_OBJECT_NULL) {
if (VME_OBJECT(entry)->private)
*flags = UPL_DEV_MEMORY;
if (VME_OBJECT(entry)->phys_contiguous)
*flags |= UPL_PHYS_CONTIG;
}
vm_map_unlock_read(map);
return KERN_SUCCESS;
}
if (VME_OBJECT(entry) == VM_OBJECT_NULL ||
!VME_OBJECT(entry)->phys_contiguous) {
if (*upl_size > MAX_UPL_SIZE_BYTES)
*upl_size = MAX_UPL_SIZE_BYTES;
}
if (VME_OBJECT(entry) == VM_OBJECT_NULL) {
if (vm_map_lock_read_to_write(map))
goto REDISCOVER_ENTRY;
VME_OBJECT_SET(entry,
vm_object_allocate((vm_size_t)
(entry->vme_end -
entry->vme_start)));
VME_OFFSET_SET(entry, 0);
vm_map_lock_write_to_read(map);
}
if (!(caller_flags & UPL_COPYOUT_FROM) &&
!(entry->protection & VM_PROT_WRITE)) {
vm_map_unlock_read(map);
return KERN_PROTECTION_FAILURE;
}
#if CONFIG_EMBEDDED
if (map->pmap != kernel_pmap &&
(caller_flags & UPL_COPYOUT_FROM) &&
(entry->protection & VM_PROT_EXECUTE) &&
!(entry->protection & VM_PROT_WRITE)) {
vm_offset_t kaddr;
vm_size_t ksize;
vm_map_unlock_read(map);
ksize = round_page(*upl_size);
kaddr = 0;
ret = kmem_alloc_pageable(kernel_map,
&kaddr,
ksize,
tag);
if (ret == KERN_SUCCESS) {
assert(page_aligned(offset));
ret = copyinmap(map, offset, (void *)kaddr, *upl_size);
}
if (ret == KERN_SUCCESS) {
if (ksize > *upl_size) {
memset((void *)(kaddr + *upl_size),
0,
ksize - *upl_size);
}
ret = vm_map_create_upl(kernel_map, kaddr, upl_size,
upl, page_list, count, flags, tag);
}
if (kaddr != 0) {
kmem_free(kernel_map, kaddr, ksize);
kaddr = 0;
ksize = 0;
}
#if DEVELOPMENT || DEBUG
DTRACE_VM4(create_upl_from_executable,
vm_map_t, map,
vm_map_address_t, offset,
upl_size_t, *upl_size,
kern_return_t, ret);
#endif
return ret;
}
#endif
local_object = VME_OBJECT(entry);
assert(local_object != VM_OBJECT_NULL);
if (!entry->is_sub_map &&
!entry->needs_copy &&
*upl_size != 0 &&
local_object->vo_size > *upl_size &&
entry->wired_count == 0 &&
(map->pmap != kernel_pmap) &&
(vm_map_entry_should_cow_for_true_share(entry)
||
(
local_object->internal &&
(local_object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) &&
local_object->ref_count > 1))) {
vm_prot_t prot;
if (vm_map_lock_read_to_write(map)) {
goto REDISCOVER_ENTRY;
}
vm_map_lock_assert_exclusive(map);
assert(VME_OBJECT(entry) == local_object);
vm_map_clip_start(map,
entry,
vm_map_trunc_page(offset,
VM_MAP_PAGE_MASK(map)));
vm_map_clip_end(map,
entry,
vm_map_round_page(offset + *upl_size,
VM_MAP_PAGE_MASK(map)));
if ((entry->vme_end - offset) < *upl_size) {
*upl_size = (upl_size_t) (entry->vme_end - offset);
assert(*upl_size == entry->vme_end - offset);
}
prot = entry->protection & ~VM_PROT_WRITE;
if (override_nx(map, VME_ALIAS(entry)) && prot)
prot |= VM_PROT_EXECUTE;
vm_object_pmap_protect(local_object,
VME_OFFSET(entry),
entry->vme_end - entry->vme_start,
((entry->is_shared ||
map->mapped_in_other_pmaps)
? PMAP_NULL
: map->pmap),
entry->vme_start,
prot);
assert(entry->wired_count == 0);
vm_object_lock(local_object);
if (local_object->true_share) {
assert(local_object->copy_strategy !=
MEMORY_OBJECT_COPY_SYMMETRIC);
} else {
assert(local_object->copy_strategy ==
MEMORY_OBJECT_COPY_SYMMETRIC);
entry->needs_copy = TRUE;
}
vm_object_unlock(local_object);
vm_map_lock_write_to_read(map);
}
if (entry->needs_copy) {
vm_map_t local_map;
vm_object_t object;
vm_object_offset_t new_offset;
vm_prot_t prot;
boolean_t wired;
vm_map_version_t version;
vm_map_t real_map;
vm_prot_t fault_type;
local_map = map;
if (caller_flags & UPL_COPYOUT_FROM) {
fault_type = VM_PROT_READ | VM_PROT_COPY;
vm_counters.create_upl_extra_cow++;
vm_counters.create_upl_extra_cow_pages +=
(entry->vme_end - entry->vme_start) / PAGE_SIZE;
} else {
fault_type = VM_PROT_WRITE;
}
if (vm_map_lookup_locked(&local_map,
offset, fault_type,
OBJECT_LOCK_EXCLUSIVE,
&version, &object,
&new_offset, &prot, &wired,
NULL,
&real_map) != KERN_SUCCESS) {
if (fault_type == VM_PROT_WRITE) {
vm_counters.create_upl_lookup_failure_write++;
} else {
vm_counters.create_upl_lookup_failure_copy++;
}
vm_map_unlock_read(local_map);
return KERN_FAILURE;
}
if (real_map != map)
vm_map_unlock(real_map);
vm_map_unlock_read(local_map);
vm_object_unlock(object);
goto REDISCOVER_ENTRY;
}
if (entry->is_sub_map) {
vm_map_t submap;
submap = VME_SUBMAP(entry);
local_start = entry->vme_start;
local_offset = VME_OFFSET(entry);
vm_map_reference(submap);
vm_map_unlock_read(map);
ret = vm_map_create_upl(submap,
local_offset + (offset - local_start),
upl_size, upl, page_list, count, flags, tag);
vm_map_deallocate(submap);
return ret;
}
if (sync_cow_data &&
(VME_OBJECT(entry)->shadow ||
VME_OBJECT(entry)->copy)) {
local_object = VME_OBJECT(entry);
local_start = entry->vme_start;
local_offset = VME_OFFSET(entry);
vm_object_reference(local_object);
vm_map_unlock_read(map);
if (local_object->shadow && local_object->copy) {
vm_object_lock_request(local_object->shadow,
((vm_object_offset_t)
((offset - local_start) +
local_offset) +
local_object->vo_shadow_offset),
*upl_size, FALSE,
MEMORY_OBJECT_DATA_SYNC,
VM_PROT_NO_CHANGE);
}
sync_cow_data = FALSE;
vm_object_deallocate(local_object);
goto REDISCOVER_ENTRY;
}
if (force_data_sync) {
local_object = VME_OBJECT(entry);
local_start = entry->vme_start;
local_offset = VME_OFFSET(entry);
vm_object_reference(local_object);
vm_map_unlock_read(map);
vm_object_lock_request(local_object,
((vm_object_offset_t)
((offset - local_start) +
local_offset)),
(vm_object_size_t)*upl_size,
FALSE,
MEMORY_OBJECT_DATA_SYNC,
VM_PROT_NO_CHANGE);
force_data_sync = FALSE;
vm_object_deallocate(local_object);
goto REDISCOVER_ENTRY;
}
if (VME_OBJECT(entry)->private)
*flags = UPL_DEV_MEMORY;
else
*flags = 0;
if (VME_OBJECT(entry)->phys_contiguous)
*flags |= UPL_PHYS_CONTIG;
local_object = VME_OBJECT(entry);
local_offset = VME_OFFSET(entry);
local_start = entry->vme_start;
#if CONFIG_EMBEDDED
if (entry->protection & VM_PROT_EXECUTE) {
#if MACH_ASSERT
printf("pid %d[%s] create_upl out of executable range from "
"0x%llx to 0x%llx: side effects may include "
"code-signing violations later on\n",
proc_selfpid(),
(current_task()->bsd_info
? proc_name_address(current_task()->bsd_info)
: "?"),
(uint64_t) entry->vme_start,
(uint64_t) entry->vme_end);
#endif
DTRACE_VM2(cs_executable_create_upl,
uint64_t, (uint64_t)entry->vme_start,
uint64_t, (uint64_t)entry->vme_end);
cs_executable_create_upl++;
}
#endif
vm_object_lock(local_object);
if (local_object->true_share) {
assert(local_object->copy_strategy !=
MEMORY_OBJECT_COPY_SYMMETRIC);
} else if (local_object != kernel_object &&
local_object != compressor_object &&
!local_object->phys_contiguous) {
#if VM_OBJECT_TRACKING_OP_TRUESHARE
if (!local_object->true_share &&
vm_object_tracking_inited) {
void *bt[VM_OBJECT_TRACKING_BTDEPTH];
int num = 0;
num = OSBacktrace(bt,
VM_OBJECT_TRACKING_BTDEPTH);
btlog_add_entry(vm_object_tracking_btlog,
local_object,
VM_OBJECT_TRACKING_OP_TRUESHARE,
bt,
num);
}
#endif
local_object->true_share = TRUE;
if (local_object->copy_strategy ==
MEMORY_OBJECT_COPY_SYMMETRIC) {
local_object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
}
}
vm_object_reference_locked(local_object);
vm_object_unlock(local_object);
vm_map_unlock_read(map);
ret = vm_object_iopl_request(local_object,
((vm_object_offset_t)
((offset - local_start) + local_offset)),
*upl_size,
upl,
page_list,
count,
caller_flags,
tag);
vm_object_deallocate(local_object);
return ret;
}
kern_return_t
vm_map_enter_upl(
vm_map_t map,
upl_t upl,
vm_map_offset_t *dst_addr)
{
vm_map_size_t size;
vm_object_offset_t offset;
vm_map_offset_t addr;
vm_page_t m;
kern_return_t kr;
int isVectorUPL = 0, curr_upl=0;
upl_t vector_upl = NULL;
vm_offset_t vector_upl_dst_addr = 0;
vm_map_t vector_upl_submap = NULL;
upl_offset_t subupl_offset = 0;
upl_size_t subupl_size = 0;
if (upl == UPL_NULL)
return KERN_INVALID_ARGUMENT;
if((isVectorUPL = vector_upl_is_valid(upl))) {
int mapped=0,valid_upls=0;
vector_upl = upl;
upl_lock(vector_upl);
for(curr_upl=0; curr_upl < MAX_VECTOR_UPL_ELEMENTS; curr_upl++) {
upl = vector_upl_subupl_byindex(vector_upl, curr_upl );
if(upl == NULL)
continue;
valid_upls++;
if (UPL_PAGE_LIST_MAPPED & upl->flags)
mapped++;
}
if(mapped) {
if(mapped != valid_upls)
panic("Only %d of the %d sub-upls within the Vector UPL are alread mapped\n", mapped, valid_upls);
else {
upl_unlock(vector_upl);
return KERN_FAILURE;
}
}
kr = kmem_suballoc(map, &vector_upl_dst_addr, vector_upl->size, FALSE,
VM_FLAGS_ANYWHERE, VM_MAP_KERNEL_FLAGS_NONE, VM_KERN_MEMORY_NONE,
&vector_upl_submap);
if( kr != KERN_SUCCESS )
panic("Vector UPL submap allocation failed\n");
map = vector_upl_submap;
vector_upl_set_submap(vector_upl, vector_upl_submap, vector_upl_dst_addr);
curr_upl=0;
}
else
upl_lock(upl);
process_upl_to_enter:
if(isVectorUPL){
if(curr_upl == MAX_VECTOR_UPL_ELEMENTS) {
*dst_addr = vector_upl_dst_addr;
upl_unlock(vector_upl);
return KERN_SUCCESS;
}
upl = vector_upl_subupl_byindex(vector_upl, curr_upl++ );
if(upl == NULL)
goto process_upl_to_enter;
vector_upl_get_iostate(vector_upl, upl, &subupl_offset, &subupl_size);
*dst_addr = (vm_map_offset_t)(vector_upl_dst_addr + (vm_map_offset_t)subupl_offset);
} else {
if (UPL_PAGE_LIST_MAPPED & upl->flags) {
upl_unlock(upl);
return KERN_FAILURE;
}
}
if ((!(upl->flags & UPL_SHADOWED)) &&
((upl->flags & UPL_HAS_BUSY) ||
!((upl->flags & (UPL_DEVICE_MEMORY | UPL_IO_WIRE)) || (upl->map_object->phys_contiguous)))) {
vm_object_t object;
vm_page_t alias_page;
vm_object_offset_t new_offset;
unsigned int pg_num;
wpl_array_t lite_list;
if (upl->flags & UPL_INTERNAL) {
lite_list = (wpl_array_t)
((((uintptr_t)upl) + sizeof(struct upl))
+ ((upl->size/PAGE_SIZE) * sizeof(upl_page_info_t)));
} else {
lite_list = (wpl_array_t)(((uintptr_t)upl) + sizeof(struct upl));
}
object = upl->map_object;
upl->map_object = vm_object_allocate(upl->size);
vm_object_lock(upl->map_object);
upl->map_object->shadow = object;
upl->map_object->pageout = TRUE;
upl->map_object->can_persist = FALSE;
upl->map_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
upl->map_object->vo_shadow_offset = upl->offset - object->paging_offset;
upl->map_object->wimg_bits = object->wimg_bits;
offset = upl->map_object->vo_shadow_offset;
new_offset = 0;
size = upl->size;
upl->flags |= UPL_SHADOWED;
while (size) {
pg_num = (unsigned int) (new_offset / PAGE_SIZE);
assert(pg_num == new_offset / PAGE_SIZE);
if (lite_list[pg_num>>5] & (1 << (pg_num & 31))) {
VM_PAGE_GRAB_FICTITIOUS(alias_page);
vm_object_lock(object);
m = vm_page_lookup(object, offset);
if (m == VM_PAGE_NULL) {
panic("vm_upl_map: page missing\n");
}
assert(alias_page->fictitious);
alias_page->fictitious = FALSE;
alias_page->private = TRUE;
alias_page->free_when_done = TRUE;
VM_PAGE_SET_PHYS_PAGE(alias_page, VM_PAGE_GET_PHYS_PAGE(m));
vm_object_unlock(object);
vm_page_lockspin_queues();
vm_page_wire(alias_page, VM_KERN_MEMORY_NONE, TRUE);
vm_page_unlock_queues();
vm_page_insert_wired(alias_page, upl->map_object, new_offset, VM_KERN_MEMORY_NONE);
assert(!alias_page->wanted);
alias_page->busy = FALSE;
alias_page->absent = FALSE;
}
size -= PAGE_SIZE;
offset += PAGE_SIZE_64;
new_offset += PAGE_SIZE_64;
}
vm_object_unlock(upl->map_object);
}
if (upl->flags & UPL_SHADOWED)
offset = 0;
else
offset = upl->offset - upl->map_object->paging_offset;
size = upl->size;
vm_object_reference(upl->map_object);
if(!isVectorUPL) {
*dst_addr = 0;
kr = vm_map_enter(map, dst_addr, (vm_map_size_t)size, (vm_map_offset_t) 0,
VM_FLAGS_ANYWHERE, VM_MAP_KERNEL_FLAGS_NONE, VM_KERN_MEMORY_OSFMK,
upl->map_object, offset, FALSE,
VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
if (kr != KERN_SUCCESS) {
vm_object_deallocate(upl->map_object);
upl_unlock(upl);
return(kr);
}
}
else {
kr = vm_map_enter(map, dst_addr, (vm_map_size_t)size, (vm_map_offset_t) 0,
VM_FLAGS_FIXED, VM_MAP_KERNEL_FLAGS_NONE, VM_KERN_MEMORY_OSFMK,
upl->map_object, offset, FALSE,
VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
if(kr)
panic("vm_map_enter failed for a Vector UPL\n");
}
vm_object_lock(upl->map_object);
for (addr = *dst_addr; size > 0; size -= PAGE_SIZE, addr += PAGE_SIZE) {
m = vm_page_lookup(upl->map_object, offset);
if (m) {
m->pmapped = TRUE;
assert(map->pmap == kernel_pmap);
PMAP_ENTER(map->pmap, addr, m, VM_PROT_DEFAULT, VM_PROT_NONE, 0, TRUE, kr);
assert(kr == KERN_SUCCESS);
#if KASAN
kasan_notify_address(addr, PAGE_SIZE_64);
#endif
}
offset += PAGE_SIZE_64;
}
vm_object_unlock(upl->map_object);
upl->ref_count++;
upl->flags |= UPL_PAGE_LIST_MAPPED;
upl->kaddr = (vm_offset_t) *dst_addr;
assert(upl->kaddr == *dst_addr);
if(isVectorUPL)
goto process_upl_to_enter;
upl_unlock(upl);
return KERN_SUCCESS;
}
kern_return_t
vm_map_remove_upl(
vm_map_t map,
upl_t upl)
{
vm_address_t addr;
upl_size_t size;
int isVectorUPL = 0, curr_upl = 0;
upl_t vector_upl = NULL;
if (upl == UPL_NULL)
return KERN_INVALID_ARGUMENT;
if((isVectorUPL = vector_upl_is_valid(upl))) {
int unmapped=0, valid_upls=0;
vector_upl = upl;
upl_lock(vector_upl);
for(curr_upl=0; curr_upl < MAX_VECTOR_UPL_ELEMENTS; curr_upl++) {
upl = vector_upl_subupl_byindex(vector_upl, curr_upl );
if(upl == NULL)
continue;
valid_upls++;
if (!(UPL_PAGE_LIST_MAPPED & upl->flags))
unmapped++;
}
if(unmapped) {
if(unmapped != valid_upls)
panic("%d of the %d sub-upls within the Vector UPL is/are not mapped\n", unmapped, valid_upls);
else {
upl_unlock(vector_upl);
return KERN_FAILURE;
}
}
curr_upl=0;
}
else
upl_lock(upl);
process_upl_to_remove:
if(isVectorUPL) {
if(curr_upl == MAX_VECTOR_UPL_ELEMENTS) {
vm_map_t v_upl_submap;
vm_offset_t v_upl_submap_dst_addr;
vector_upl_get_submap(vector_upl, &v_upl_submap, &v_upl_submap_dst_addr);
vm_map_remove(map, v_upl_submap_dst_addr, v_upl_submap_dst_addr + vector_upl->size, VM_MAP_NO_FLAGS);
vm_map_deallocate(v_upl_submap);
upl_unlock(vector_upl);
return KERN_SUCCESS;
}
upl = vector_upl_subupl_byindex(vector_upl, curr_upl++ );
if(upl == NULL)
goto process_upl_to_remove;
}
if (upl->flags & UPL_PAGE_LIST_MAPPED) {
addr = upl->kaddr;
size = upl->size;
assert(upl->ref_count > 1);
upl->ref_count--;
upl->flags &= ~UPL_PAGE_LIST_MAPPED;
upl->kaddr = (vm_offset_t) 0;
if(!isVectorUPL) {
upl_unlock(upl);
vm_map_remove(
map,
vm_map_trunc_page(addr,
VM_MAP_PAGE_MASK(map)),
vm_map_round_page(addr + size,
VM_MAP_PAGE_MASK(map)),
VM_MAP_NO_FLAGS);
return KERN_SUCCESS;
}
else {
goto process_upl_to_remove;
}
}
upl_unlock(upl);
return KERN_FAILURE;
}
kern_return_t
upl_commit_range(
upl_t upl,
upl_offset_t offset,
upl_size_t size,
int flags,
upl_page_info_t *page_list,
mach_msg_type_number_t count,
boolean_t *empty)
{
upl_size_t xfer_size, subupl_size = size;
vm_object_t shadow_object;
vm_object_t object;
vm_object_t m_object;
vm_object_offset_t target_offset;
upl_offset_t subupl_offset = offset;
int entry;
wpl_array_t lite_list;
int occupied;
int clear_refmod = 0;
int pgpgout_count = 0;
struct vm_page_delayed_work dw_array[DEFAULT_DELAYED_WORK_LIMIT];
struct vm_page_delayed_work *dwp;
int dw_count;
int dw_limit;
int isVectorUPL = 0;
upl_t vector_upl = NULL;
boolean_t should_be_throttled = FALSE;
vm_page_t nxt_page = VM_PAGE_NULL;
int fast_path_possible = 0;
int fast_path_full_commit = 0;
int throttle_page = 0;
int unwired_count = 0;
int local_queue_count = 0;
vm_page_t first_local, last_local;
*empty = FALSE;
if (upl == UPL_NULL)
return KERN_INVALID_ARGUMENT;
if (count == 0)
page_list = NULL;
if((isVectorUPL = vector_upl_is_valid(upl))) {
vector_upl = upl;
upl_lock(vector_upl);
}
else
upl_lock(upl);
process_upl_to_commit:
if(isVectorUPL) {
size = subupl_size;
offset = subupl_offset;
if(size == 0) {
upl_unlock(vector_upl);
return KERN_SUCCESS;
}
upl = vector_upl_subupl_byoffset(vector_upl, &offset, &size);
if(upl == NULL) {
upl_unlock(vector_upl);
return KERN_FAILURE;
}
page_list = UPL_GET_INTERNAL_PAGE_LIST_SIMPLE(upl);
subupl_size -= size;
subupl_offset += size;
}
#if UPL_DEBUG
if (upl->upl_commit_index < UPL_DEBUG_COMMIT_RECORDS) {
(void) OSBacktrace(&upl->upl_commit_records[upl->upl_commit_index].c_retaddr[0], UPL_DEBUG_STACK_FRAMES);
upl->upl_commit_records[upl->upl_commit_index].c_beg = offset;
upl->upl_commit_records[upl->upl_commit_index].c_end = (offset + size);
upl->upl_commit_index++;
}
#endif
if (upl->flags & UPL_DEVICE_MEMORY)
xfer_size = 0;
else if ((offset + size) <= upl->size)
xfer_size = size;
else {
if(!isVectorUPL)
upl_unlock(upl);
else {
upl_unlock(vector_upl);
}
return KERN_FAILURE;
}
if (upl->flags & UPL_SET_DIRTY)
flags |= UPL_COMMIT_SET_DIRTY;
if (upl->flags & UPL_CLEAR_DIRTY)
flags |= UPL_COMMIT_CLEAR_DIRTY;
if (upl->flags & UPL_INTERNAL)
lite_list = (wpl_array_t) ((((uintptr_t)upl) + sizeof(struct upl))
+ ((upl->size/PAGE_SIZE) * sizeof(upl_page_info_t)));
else
lite_list = (wpl_array_t) (((uintptr_t)upl) + sizeof(struct upl));
object = upl->map_object;
if (upl->flags & UPL_SHADOWED) {
vm_object_lock(object);
shadow_object = object->shadow;
} else {
shadow_object = object;
}
entry = offset/PAGE_SIZE;
target_offset = (vm_object_offset_t)offset;
assert(!(target_offset & PAGE_MASK));
assert(!(xfer_size & PAGE_MASK));
if (upl->flags & UPL_KERNEL_OBJECT)
vm_object_lock_shared(shadow_object);
else
vm_object_lock(shadow_object);
VM_OBJECT_WIRED_PAGE_UPDATE_START(shadow_object);
if (upl->flags & UPL_ACCESS_BLOCKED) {
assert(shadow_object->blocked_access);
shadow_object->blocked_access = FALSE;
vm_object_wakeup(object, VM_OBJECT_EVENT_UNBLOCKED);
}
if (shadow_object->code_signed) {
flags &= ~UPL_COMMIT_CS_VALIDATED;
}
if (! page_list) {
flags &= ~UPL_COMMIT_CS_VALIDATED;
}
if (!VM_DYNAMIC_PAGING_ENABLED() && shadow_object->internal)
should_be_throttled = TRUE;
dwp = &dw_array[0];
dw_count = 0;
dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT);
if ((upl->flags & UPL_IO_WIRE) &&
!(flags & UPL_COMMIT_FREE_ABSENT) &&
!isVectorUPL &&
shadow_object->purgable != VM_PURGABLE_VOLATILE &&
shadow_object->purgable != VM_PURGABLE_EMPTY) {
if (!vm_page_queue_empty(&shadow_object->memq)) {
if (size == shadow_object->vo_size) {
nxt_page = (vm_page_t)vm_page_queue_first(&shadow_object->memq);
fast_path_full_commit = 1;
}
fast_path_possible = 1;
if (!VM_DYNAMIC_PAGING_ENABLED() && shadow_object->internal &&
(shadow_object->purgable == VM_PURGABLE_DENY ||
shadow_object->purgable == VM_PURGABLE_NONVOLATILE ||
shadow_object->purgable == VM_PURGABLE_VOLATILE)) {
throttle_page = 1;
}
}
}
first_local = VM_PAGE_NULL;
last_local = VM_PAGE_NULL;
while (xfer_size) {
vm_page_t t, m;
dwp->dw_mask = 0;
clear_refmod = 0;
m = VM_PAGE_NULL;
if (upl->flags & UPL_LITE) {
unsigned int pg_num;
if (nxt_page != VM_PAGE_NULL) {
m = nxt_page;
nxt_page = (vm_page_t)vm_page_queue_next(&nxt_page->listq);
target_offset = m->offset;
}
pg_num = (unsigned int) (target_offset/PAGE_SIZE);
assert(pg_num == target_offset/PAGE_SIZE);
if (lite_list[pg_num>>5] & (1 << (pg_num & 31))) {
lite_list[pg_num>>5] &= ~(1 << (pg_num & 31));
if (!(upl->flags & UPL_KERNEL_OBJECT) && m == VM_PAGE_NULL)
m = vm_page_lookup(shadow_object, target_offset + (upl->offset - shadow_object->paging_offset));
} else
m = NULL;
}
if (upl->flags & UPL_SHADOWED) {
if ((t = vm_page_lookup(object, target_offset)) != VM_PAGE_NULL) {
t->free_when_done = FALSE;
VM_PAGE_FREE(t);
if (!(upl->flags & UPL_KERNEL_OBJECT) && m == VM_PAGE_NULL)
m = vm_page_lookup(shadow_object, target_offset + object->vo_shadow_offset);
}
}
if (m == VM_PAGE_NULL)
goto commit_next_page;
m_object = VM_PAGE_OBJECT(m);
if (m->vm_page_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
assert(m->busy);
dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
goto commit_next_page;
}
if (flags & UPL_COMMIT_CS_VALIDATED) {
m->cs_validated = page_list[entry].cs_validated;
m->cs_tainted = page_list[entry].cs_tainted;
m->cs_nx = page_list[entry].cs_nx;
}
if (flags & UPL_COMMIT_WRITTEN_BY_KERNEL)
m->written_by_kernel = TRUE;
if (upl->flags & UPL_IO_WIRE) {
if (page_list)
page_list[entry].phys_addr = 0;
if (flags & UPL_COMMIT_SET_DIRTY) {
SET_PAGE_DIRTY(m, FALSE);
} else if (flags & UPL_COMMIT_CLEAR_DIRTY) {
m->dirty = FALSE;
if (! (flags & UPL_COMMIT_CS_VALIDATED) &&
m->cs_validated && !m->cs_tainted) {
if (m->slid) {
panic("upl_commit_range(%p): page %p was slid\n",
upl, m);
}
assert(!m->slid);
m->cs_validated = FALSE;
#if DEVELOPMENT || DEBUG
vm_cs_validated_resets++;
#endif
pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
}
clear_refmod |= VM_MEM_MODIFIED;
}
if (upl->flags & UPL_ACCESS_BLOCKED) {
dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
}
if (fast_path_possible) {
assert(m_object->purgable != VM_PURGABLE_EMPTY);
assert(m_object->purgable != VM_PURGABLE_VOLATILE);
if (m->absent) {
assert(m->vm_page_q_state == VM_PAGE_NOT_ON_Q);
assert(m->wire_count == 0);
assert(m->busy);
m->absent = FALSE;
dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
} else {
if (m->wire_count == 0)
panic("wire_count == 0, m = %p, obj = %p\n", m, shadow_object);
assert(m->vm_page_q_state == VM_PAGE_IS_WIRED);
assert(m->wire_count > 0);
m->wire_count--;
if (m->wire_count == 0) {
m->vm_page_q_state = VM_PAGE_NOT_ON_Q;
unwired_count++;
}
}
if (m->wire_count == 0) {
assert(m->pageq.next == 0 && m->pageq.prev == 0);
if (last_local == VM_PAGE_NULL) {
assert(first_local == VM_PAGE_NULL);
last_local = m;
first_local = m;
} else {
assert(first_local != VM_PAGE_NULL);
m->pageq.next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_local);
first_local->pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(m);
first_local = m;
}
local_queue_count++;
if (throttle_page) {
m->vm_page_q_state = VM_PAGE_ON_THROTTLED_Q;
} else {
if (flags & UPL_COMMIT_INACTIVATE) {
if (shadow_object->internal)
m->vm_page_q_state = VM_PAGE_ON_INACTIVE_INTERNAL_Q;
else
m->vm_page_q_state = VM_PAGE_ON_INACTIVE_EXTERNAL_Q;
} else
m->vm_page_q_state = VM_PAGE_ON_ACTIVE_Q;
}
}
} else {
if (flags & UPL_COMMIT_INACTIVATE) {
dwp->dw_mask |= DW_vm_page_deactivate_internal;
clear_refmod |= VM_MEM_REFERENCED;
}
if (m->absent) {
if (flags & UPL_COMMIT_FREE_ABSENT)
dwp->dw_mask |= DW_vm_page_free;
else {
m->absent = FALSE;
dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
if ( !(dwp->dw_mask & DW_vm_page_deactivate_internal))
dwp->dw_mask |= DW_vm_page_activate;
}
} else
dwp->dw_mask |= DW_vm_page_unwire;
}
goto commit_next_page;
}
assert(m->vm_page_q_state != VM_PAGE_USED_BY_COMPRESSOR);
if (page_list)
page_list[entry].phys_addr = 0;
if (flags & UPL_COMMIT_CLEAR_DIRTY) {
m->dirty = FALSE;
clear_refmod |= VM_MEM_MODIFIED;
}
if (m->laundry)
dwp->dw_mask |= DW_vm_pageout_throttle_up;
if (VM_PAGE_WIRED(m))
m->free_when_done = FALSE;
if (! (flags & UPL_COMMIT_CS_VALIDATED) &&
m->cs_validated && !m->cs_tainted) {
if (m->slid) {
panic("upl_commit_range(%p): page %p was slid\n",
upl, m);
}
assert(!m->slid);
m->cs_validated = FALSE;
#if DEVELOPMENT || DEBUG
vm_cs_validated_resets++;
#endif
pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
}
if (m->overwriting) {
if (m->busy) {
#if CONFIG_PHANTOM_CACHE
if (m->absent && !m_object->internal)
dwp->dw_mask |= DW_vm_phantom_cache_update;
#endif
m->absent = FALSE;
dwp->dw_mask |= DW_clear_busy;
} else {
assert(VM_PAGE_WIRED(m));
dwp->dw_mask |= DW_vm_page_unwire;
}
m->overwriting = FALSE;
}
m->cleaning = FALSE;
if (m->free_when_done) {
assert(!(flags & UPL_PAGEOUT));
assert(!m_object->internal);
m->free_when_done = FALSE;
#if MACH_CLUSTER_STATS
if (m->wanted) vm_pageout_target_collisions++;
#endif
if ((flags & UPL_COMMIT_SET_DIRTY) ||
(m->pmapped && (pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m)) & VM_MEM_MODIFIED))) {
SET_PAGE_DIRTY(m, FALSE);
dwp->dw_mask |= DW_vm_page_activate | DW_PAGE_WAKEUP;
if (upl->flags & UPL_PAGEOUT) {
CLUSTER_STAT(vm_pageout_target_page_dirtied++;)
VM_STAT_INCR(reactivations);
DTRACE_VM2(pgrec, int, 1, (uint64_t *), NULL);
}
} else {
if (m_object->internal) {
DTRACE_VM2(anonpgout, int, 1, (uint64_t *), NULL);
} else {
DTRACE_VM2(fspgout, int, 1, (uint64_t *), NULL);
}
m->dirty = FALSE;
m->busy = TRUE;
dwp->dw_mask |= DW_vm_page_free;
}
goto commit_next_page;
}
#if MACH_CLUSTER_STATS
if (m->wpmapped)
m->dirty = pmap_is_modified(VM_PAGE_GET_PHYS_PAGE(m));
if (m->dirty) vm_pageout_cluster_dirtied++;
else vm_pageout_cluster_cleaned++;
if (m->wanted) vm_pageout_cluster_collisions++;
#endif
if ((upl->flags & UPL_PAGE_SYNC_DONE) || (flags & UPL_COMMIT_CLEAR_PRECIOUS))
m->precious = FALSE;
if (flags & UPL_COMMIT_SET_DIRTY) {
SET_PAGE_DIRTY(m, FALSE);
} else {
m->dirty = FALSE;
}
if (hibernate_cleaning_in_progress == FALSE && !m->dirty && (upl->flags & UPL_PAGEOUT)) {
pgpgout_count++;
VM_STAT_INCR(pageouts);
DTRACE_VM2(pgout, int, 1, (uint64_t *), NULL);
dwp->dw_mask |= DW_enqueue_cleaned;
vm_pageout_enqueued_cleaned_from_inactive_dirty++;
} else if (should_be_throttled == TRUE && (m->vm_page_q_state == VM_PAGE_NOT_ON_Q)) {
SET_PAGE_DIRTY(m, FALSE);
dwp->dw_mask |= DW_vm_page_activate;
} else {
if ((flags & UPL_COMMIT_INACTIVATE) && !m->clustered && (m->vm_page_q_state != VM_PAGE_ON_SPECULATIVE_Q)) {
dwp->dw_mask |= DW_vm_page_deactivate_internal;
clear_refmod |= VM_MEM_REFERENCED;
} else if ( !VM_PAGE_PAGEABLE(m)) {
if (m->clustered || (flags & UPL_COMMIT_SPECULATE))
dwp->dw_mask |= DW_vm_page_speculate;
else if (m->reference)
dwp->dw_mask |= DW_vm_page_activate;
else {
dwp->dw_mask |= DW_vm_page_deactivate_internal;
clear_refmod |= VM_MEM_REFERENCED;
}
}
}
if (upl->flags & UPL_ACCESS_BLOCKED) {
dwp->dw_mask |= DW_clear_busy;
}
dwp->dw_mask |= DW_PAGE_WAKEUP;
commit_next_page:
if (clear_refmod)
pmap_clear_refmod(VM_PAGE_GET_PHYS_PAGE(m), clear_refmod);
target_offset += PAGE_SIZE_64;
xfer_size -= PAGE_SIZE;
entry++;
if (dwp->dw_mask) {
if (dwp->dw_mask & ~(DW_clear_busy | DW_PAGE_WAKEUP)) {
VM_PAGE_ADD_DELAYED_WORK(dwp, m, dw_count);
if (dw_count >= dw_limit) {
vm_page_do_delayed_work(shadow_object, VM_KERN_MEMORY_NONE, &dw_array[0], dw_count);
dwp = &dw_array[0];
dw_count = 0;
}
} else {
if (dwp->dw_mask & DW_clear_busy)
m->busy = FALSE;
if (dwp->dw_mask & DW_PAGE_WAKEUP)
PAGE_WAKEUP(m);
}
}
}
if (dw_count)
vm_page_do_delayed_work(shadow_object, VM_KERN_MEMORY_NONE, &dw_array[0], dw_count);
if (fast_path_possible) {
assert(shadow_object->purgable != VM_PURGABLE_VOLATILE);
assert(shadow_object->purgable != VM_PURGABLE_EMPTY);
if (local_queue_count || unwired_count) {
if (local_queue_count) {
vm_page_t first_target;
vm_page_queue_head_t *target_queue;
if (throttle_page)
target_queue = &vm_page_queue_throttled;
else {
if (flags & UPL_COMMIT_INACTIVATE) {
if (shadow_object->internal)
target_queue = &vm_page_queue_anonymous;
else
target_queue = &vm_page_queue_inactive;
} else
target_queue = &vm_page_queue_active;
}
vm_page_lockspin_queues();
first_target = (vm_page_t) vm_page_queue_first(target_queue);
if (vm_page_queue_empty(target_queue))
target_queue->prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_local);
else
first_target->pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_local);
target_queue->next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_local);
first_local->pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(target_queue);
last_local->pageq.next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_target);
if (throttle_page) {
vm_page_throttled_count += local_queue_count;
} else {
if (flags & UPL_COMMIT_INACTIVATE) {
if (shadow_object->internal)
vm_page_anonymous_count += local_queue_count;
vm_page_inactive_count += local_queue_count;
token_new_pagecount += local_queue_count;
} else
vm_page_active_count += local_queue_count;
if (shadow_object->internal)
vm_page_pageable_internal_count += local_queue_count;
else
vm_page_pageable_external_count += local_queue_count;
}
} else {
vm_page_lockspin_queues();
}
if (unwired_count) {
vm_page_wire_count -= unwired_count;
VM_CHECK_MEMORYSTATUS;
}
vm_page_unlock_queues();
VM_OBJECT_WIRED_PAGE_COUNT(shadow_object, -unwired_count);
}
}
occupied = 1;
if (upl->flags & UPL_DEVICE_MEMORY) {
occupied = 0;
} else if (upl->flags & UPL_LITE) {
int pg_num;
int i;
occupied = 0;
if (!fast_path_full_commit) {
pg_num = upl->size/PAGE_SIZE;
pg_num = (pg_num + 31) >> 5;
for (i = 0; i < pg_num; i++) {
if (lite_list[i] != 0) {
occupied = 1;
break;
}
}
}
} else {
if (vm_page_queue_empty(&upl->map_object->memq))
occupied = 0;
}
if (occupied == 0) {
if ((upl->flags & UPL_COMMIT_NOTIFY_EMPTY) || isVectorUPL)
*empty = TRUE;
if (object == shadow_object && !(upl->flags & UPL_KERNEL_OBJECT)) {
vm_object_activity_end(shadow_object);
vm_object_collapse(shadow_object, 0, TRUE);
} else {
}
}
VM_OBJECT_WIRED_PAGE_UPDATE_END(shadow_object, shadow_object->wire_tag);
vm_object_unlock(shadow_object);
if (object != shadow_object)
vm_object_unlock(object);
if(!isVectorUPL)
upl_unlock(upl);
else {
if(*empty==TRUE) {
*empty = vector_upl_set_subupl(vector_upl, upl, 0);
upl_deallocate(upl);
}
goto process_upl_to_commit;
}
if (pgpgout_count) {
DTRACE_VM2(pgpgout, int, pgpgout_count, (uint64_t *), NULL);
}
return KERN_SUCCESS;
}
kern_return_t
upl_abort_range(
upl_t upl,
upl_offset_t offset,
upl_size_t size,
int error,
boolean_t *empty)
{
upl_page_info_t *user_page_list = NULL;
upl_size_t xfer_size, subupl_size = size;
vm_object_t shadow_object;
vm_object_t object;
vm_object_offset_t target_offset;
upl_offset_t subupl_offset = offset;
int entry;
wpl_array_t lite_list;
int occupied;
struct vm_page_delayed_work dw_array[DEFAULT_DELAYED_WORK_LIMIT];
struct vm_page_delayed_work *dwp;
int dw_count;
int dw_limit;
int isVectorUPL = 0;
upl_t vector_upl = NULL;
*empty = FALSE;
if (upl == UPL_NULL)
return KERN_INVALID_ARGUMENT;
if ( (upl->flags & UPL_IO_WIRE) && !(error & UPL_ABORT_DUMP_PAGES) )
return upl_commit_range(upl, offset, size, UPL_COMMIT_FREE_ABSENT, NULL, 0, empty);
if((isVectorUPL = vector_upl_is_valid(upl))) {
vector_upl = upl;
upl_lock(vector_upl);
}
else
upl_lock(upl);
process_upl_to_abort:
if(isVectorUPL) {
size = subupl_size;
offset = subupl_offset;
if(size == 0) {
upl_unlock(vector_upl);
return KERN_SUCCESS;
}
upl = vector_upl_subupl_byoffset(vector_upl, &offset, &size);
if(upl == NULL) {
upl_unlock(vector_upl);
return KERN_FAILURE;
}
subupl_size -= size;
subupl_offset += size;
}
*empty = FALSE;
#if UPL_DEBUG
if (upl->upl_commit_index < UPL_DEBUG_COMMIT_RECORDS) {
(void) OSBacktrace(&upl->upl_commit_records[upl->upl_commit_index].c_retaddr[0], UPL_DEBUG_STACK_FRAMES);
upl->upl_commit_records[upl->upl_commit_index].c_beg = offset;
upl->upl_commit_records[upl->upl_commit_index].c_end = (offset + size);
upl->upl_commit_records[upl->upl_commit_index].c_aborted = 1;
upl->upl_commit_index++;
}
#endif
if (upl->flags & UPL_DEVICE_MEMORY)
xfer_size = 0;
else if ((offset + size) <= upl->size)
xfer_size = size;
else {
if(!isVectorUPL)
upl_unlock(upl);
else {
upl_unlock(vector_upl);
}
return KERN_FAILURE;
}
if (upl->flags & UPL_INTERNAL) {
lite_list = (wpl_array_t)
((((uintptr_t)upl) + sizeof(struct upl))
+ ((upl->size/PAGE_SIZE) * sizeof(upl_page_info_t)));
user_page_list = (upl_page_info_t *) (((uintptr_t)upl) + sizeof(struct upl));
} else {
lite_list = (wpl_array_t)
(((uintptr_t)upl) + sizeof(struct upl));
}
object = upl->map_object;
if (upl->flags & UPL_SHADOWED) {
vm_object_lock(object);
shadow_object = object->shadow;
} else
shadow_object = object;
entry = offset/PAGE_SIZE;
target_offset = (vm_object_offset_t)offset;
assert(!(target_offset & PAGE_MASK));
assert(!(xfer_size & PAGE_MASK));
if (upl->flags & UPL_KERNEL_OBJECT)
vm_object_lock_shared(shadow_object);
else
vm_object_lock(shadow_object);
if (upl->flags & UPL_ACCESS_BLOCKED) {
assert(shadow_object->blocked_access);
shadow_object->blocked_access = FALSE;
vm_object_wakeup(object, VM_OBJECT_EVENT_UNBLOCKED);
}
dwp = &dw_array[0];
dw_count = 0;
dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT);
if ((error & UPL_ABORT_DUMP_PAGES) && (upl->flags & UPL_KERNEL_OBJECT))
panic("upl_abort_range: kernel_object being DUMPED");
while (xfer_size) {
vm_page_t t, m;
unsigned int pg_num;
boolean_t needed;
pg_num = (unsigned int) (target_offset/PAGE_SIZE);
assert(pg_num == target_offset/PAGE_SIZE);
needed = FALSE;
if (user_page_list)
needed = user_page_list[pg_num].needed;
dwp->dw_mask = 0;
m = VM_PAGE_NULL;
if (upl->flags & UPL_LITE) {
if (lite_list[pg_num>>5] & (1 << (pg_num & 31))) {
lite_list[pg_num>>5] &= ~(1 << (pg_num & 31));
if ( !(upl->flags & UPL_KERNEL_OBJECT))
m = vm_page_lookup(shadow_object, target_offset +
(upl->offset - shadow_object->paging_offset));
}
}
if (upl->flags & UPL_SHADOWED) {
if ((t = vm_page_lookup(object, target_offset)) != VM_PAGE_NULL) {
t->free_when_done = FALSE;
VM_PAGE_FREE(t);
if (m == VM_PAGE_NULL)
m = vm_page_lookup(shadow_object, target_offset + object->vo_shadow_offset);
}
}
if ((upl->flags & UPL_KERNEL_OBJECT))
goto abort_next_page;
if (m != VM_PAGE_NULL) {
assert(m->vm_page_q_state != VM_PAGE_USED_BY_COMPRESSOR);
if (m->absent) {
boolean_t must_free = TRUE;
if (error & UPL_ABORT_RESTART) {
m->restart = TRUE;
m->absent = FALSE;
m->unusual = TRUE;
must_free = FALSE;
} else if (error & UPL_ABORT_UNAVAILABLE) {
m->restart = FALSE;
m->unusual = TRUE;
must_free = FALSE;
} else if (error & UPL_ABORT_ERROR) {
m->restart = FALSE;
m->absent = FALSE;
m->error = TRUE;
m->unusual = TRUE;
must_free = FALSE;
}
if (m->clustered && needed == FALSE) {
must_free = TRUE;
}
m->cleaning = FALSE;
if (m->overwriting && !m->busy) {
dwp->dw_mask |= DW_vm_page_unwire;
}
m->overwriting = FALSE;
dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
if (must_free == TRUE)
dwp->dw_mask |= DW_vm_page_free;
else
dwp->dw_mask |= DW_vm_page_activate;
} else {
if (m->laundry)
dwp->dw_mask |= DW_vm_pageout_throttle_up;
if (upl->flags & UPL_ACCESS_BLOCKED) {
dwp->dw_mask |= DW_clear_busy;
}
if (m->overwriting) {
if (m->busy)
dwp->dw_mask |= DW_clear_busy;
else {
dwp->dw_mask |= DW_vm_page_unwire;
}
m->overwriting = FALSE;
}
m->free_when_done = FALSE;
m->cleaning = FALSE;
if (error & UPL_ABORT_DUMP_PAGES) {
pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
dwp->dw_mask |= DW_vm_page_free;
} else {
if (!(dwp->dw_mask & DW_vm_page_unwire)) {
if (error & UPL_ABORT_REFERENCE) {
dwp->dw_mask |= DW_vm_page_lru;
} else if ( !VM_PAGE_PAGEABLE(m))
dwp->dw_mask |= DW_vm_page_deactivate_internal;
}
dwp->dw_mask |= DW_PAGE_WAKEUP;
}
}
}
abort_next_page:
target_offset += PAGE_SIZE_64;
xfer_size -= PAGE_SIZE;
entry++;
if (dwp->dw_mask) {
if (dwp->dw_mask & ~(DW_clear_busy | DW_PAGE_WAKEUP)) {
VM_PAGE_ADD_DELAYED_WORK(dwp, m, dw_count);
if (dw_count >= dw_limit) {
vm_page_do_delayed_work(shadow_object, VM_KERN_MEMORY_NONE, &dw_array[0], dw_count);
dwp = &dw_array[0];
dw_count = 0;
}
} else {
if (dwp->dw_mask & DW_clear_busy)
m->busy = FALSE;
if (dwp->dw_mask & DW_PAGE_WAKEUP)
PAGE_WAKEUP(m);
}
}
}
if (dw_count)
vm_page_do_delayed_work(shadow_object, VM_KERN_MEMORY_NONE, &dw_array[0], dw_count);
occupied = 1;
if (upl->flags & UPL_DEVICE_MEMORY) {
occupied = 0;
} else if (upl->flags & UPL_LITE) {
int pg_num;
int i;
pg_num = upl->size/PAGE_SIZE;
pg_num = (pg_num + 31) >> 5;
occupied = 0;
for (i = 0; i < pg_num; i++) {
if (lite_list[i] != 0) {
occupied = 1;
break;
}
}
} else {
if (vm_page_queue_empty(&upl->map_object->memq))
occupied = 0;
}
if (occupied == 0) {
if ((upl->flags & UPL_COMMIT_NOTIFY_EMPTY) || isVectorUPL)
*empty = TRUE;
if (object == shadow_object && !(upl->flags & UPL_KERNEL_OBJECT)) {
vm_object_activity_end(shadow_object);
vm_object_collapse(shadow_object, 0, TRUE);
} else {
}
}
vm_object_unlock(shadow_object);
if (object != shadow_object)
vm_object_unlock(object);
if(!isVectorUPL)
upl_unlock(upl);
else {
if(*empty == TRUE) {
*empty = vector_upl_set_subupl(vector_upl, upl,0);
upl_deallocate(upl);
}
goto process_upl_to_abort;
}
return KERN_SUCCESS;
}
kern_return_t
upl_abort(
upl_t upl,
int error)
{
boolean_t empty;
if (upl == UPL_NULL)
return KERN_INVALID_ARGUMENT;
return upl_abort_range(upl, 0, upl->size, error, &empty);
}
kern_return_t
upl_commit(
upl_t upl,
upl_page_info_t *page_list,
mach_msg_type_number_t count)
{
boolean_t empty;
if (upl == UPL_NULL)
return KERN_INVALID_ARGUMENT;
return upl_commit_range(upl, 0, upl->size, 0, page_list, count, &empty);
}
void
iopl_valid_data(
upl_t upl,
vm_tag_t tag)
{
vm_object_t object;
vm_offset_t offset;
vm_page_t m, nxt_page = VM_PAGE_NULL;
upl_size_t size;
int wired_count = 0;
if (upl == NULL)
panic("iopl_valid_data: NULL upl");
if (vector_upl_is_valid(upl))
panic("iopl_valid_data: vector upl");
if ((upl->flags & (UPL_DEVICE_MEMORY|UPL_SHADOWED|UPL_ACCESS_BLOCKED|UPL_IO_WIRE|UPL_INTERNAL)) != UPL_IO_WIRE)
panic("iopl_valid_data: unsupported upl, flags = %x", upl->flags);
object = upl->map_object;
if (object == kernel_object || object == compressor_object)
panic("iopl_valid_data: object == kernel or compressor");
if (object->purgable == VM_PURGABLE_VOLATILE ||
object->purgable == VM_PURGABLE_EMPTY)
panic("iopl_valid_data: object %p purgable %d",
object, object->purgable);
size = upl->size;
vm_object_lock(object);
VM_OBJECT_WIRED_PAGE_UPDATE_START(object);
if (object->vo_size == size && object->resident_page_count == (size / PAGE_SIZE))
nxt_page = (vm_page_t)vm_page_queue_first(&object->memq);
else
offset = 0 + upl->offset - object->paging_offset;
while (size) {
if (nxt_page != VM_PAGE_NULL) {
m = nxt_page;
nxt_page = (vm_page_t)vm_page_queue_next(&nxt_page->listq);
} else {
m = vm_page_lookup(object, offset);
offset += PAGE_SIZE;
if (m == VM_PAGE_NULL)
panic("iopl_valid_data: missing expected page at offset %lx", (long)offset);
}
if (m->busy) {
if (!m->absent)
panic("iopl_valid_data: busy page w/o absent");
if (m->pageq.next || m->pageq.prev)
panic("iopl_valid_data: busy+absent page on page queue");
if (m->reusable) {
panic("iopl_valid_data: %p is reusable", m);
}
m->absent = FALSE;
m->dirty = TRUE;
assert(m->vm_page_q_state == VM_PAGE_NOT_ON_Q);
assert(m->wire_count == 0);
m->wire_count++;
assert(m->wire_count);
if (m->wire_count == 1) {
m->vm_page_q_state = VM_PAGE_IS_WIRED;
wired_count++;
} else {
panic("iopl_valid_data: %p already wired\n", m);
}
PAGE_WAKEUP_DONE(m);
}
size -= PAGE_SIZE;
}
if (wired_count) {
VM_OBJECT_WIRED_PAGE_COUNT(object, wired_count);
assert(object->resident_page_count >= object->wired_page_count);
assert(object->purgable != VM_PURGABLE_VOLATILE);
assert(object->purgable != VM_PURGABLE_EMPTY);
vm_page_lockspin_queues();
vm_page_wire_count += wired_count;
vm_page_unlock_queues();
}
VM_OBJECT_WIRED_PAGE_UPDATE_END(object, tag);
vm_object_unlock(object);
}
void
vm_object_set_pmap_cache_attr(
vm_object_t object,
upl_page_info_array_t user_page_list,
unsigned int num_pages,
boolean_t batch_pmap_op)
{
unsigned int cache_attr = 0;
cache_attr = object->wimg_bits & VM_WIMG_MASK;
assert(user_page_list);
if (cache_attr != VM_WIMG_USE_DEFAULT) {
PMAP_BATCH_SET_CACHE_ATTR(object, user_page_list, cache_attr, num_pages, batch_pmap_op);
}
}
boolean_t vm_object_iopl_wire_full(vm_object_t, upl_t, upl_page_info_array_t, wpl_array_t, upl_control_flags_t, vm_tag_t);
kern_return_t vm_object_iopl_wire_empty(vm_object_t, upl_t, upl_page_info_array_t, wpl_array_t, upl_control_flags_t, vm_tag_t, vm_object_offset_t *, int);
boolean_t
vm_object_iopl_wire_full(vm_object_t object, upl_t upl, upl_page_info_array_t user_page_list,
wpl_array_t lite_list, upl_control_flags_t cntrl_flags, vm_tag_t tag)
{
vm_page_t dst_page;
unsigned int entry;
int page_count;
int delayed_unlock = 0;
boolean_t retval = TRUE;
ppnum_t phys_page;
vm_object_lock_assert_exclusive(object);
assert(object->purgable != VM_PURGABLE_VOLATILE);
assert(object->purgable != VM_PURGABLE_EMPTY);
assert(object->pager == NULL);
assert(object->copy == NULL);
assert(object->shadow == NULL);
page_count = object->resident_page_count;
dst_page = (vm_page_t)vm_page_queue_first(&object->memq);
vm_page_lock_queues();
while (page_count--) {
if (dst_page->busy ||
dst_page->fictitious ||
dst_page->absent ||
dst_page->error ||
dst_page->cleaning ||
dst_page->restart ||
dst_page->laundry) {
retval = FALSE;
goto done;
}
if ((cntrl_flags & UPL_REQUEST_FORCE_COHERENCY) && dst_page->written_by_kernel == TRUE) {
retval = FALSE;
goto done;
}
dst_page->reference = TRUE;
vm_page_wire(dst_page, tag, FALSE);
if (!(cntrl_flags & UPL_COPYOUT_FROM)) {
SET_PAGE_DIRTY(dst_page, FALSE);
}
entry = (unsigned int)(dst_page->offset / PAGE_SIZE);
assert(entry >= 0 && entry < object->resident_page_count);
lite_list[entry>>5] |= 1 << (entry & 31);
phys_page = VM_PAGE_GET_PHYS_PAGE(dst_page);
if (phys_page > upl->highest_page)
upl->highest_page = phys_page;
if (user_page_list) {
user_page_list[entry].phys_addr = phys_page;
user_page_list[entry].absent = dst_page->absent;
user_page_list[entry].dirty = dst_page->dirty;
user_page_list[entry].free_when_done = dst_page->free_when_done;
user_page_list[entry].precious = dst_page->precious;
user_page_list[entry].device = FALSE;
user_page_list[entry].speculative = FALSE;
user_page_list[entry].cs_validated = FALSE;
user_page_list[entry].cs_tainted = FALSE;
user_page_list[entry].cs_nx = FALSE;
user_page_list[entry].needed = FALSE;
user_page_list[entry].mark = FALSE;
}
if (delayed_unlock++ > 256) {
delayed_unlock = 0;
lck_mtx_yield(&vm_page_queue_lock);
VM_CHECK_MEMORYSTATUS;
}
dst_page = (vm_page_t)vm_page_queue_next(&dst_page->listq);
}
done:
vm_page_unlock_queues();
VM_CHECK_MEMORYSTATUS;
return (retval);
}
kern_return_t
vm_object_iopl_wire_empty(vm_object_t object, upl_t upl, upl_page_info_array_t user_page_list,
wpl_array_t lite_list, upl_control_flags_t cntrl_flags, vm_tag_t tag, vm_object_offset_t *dst_offset, int page_count)
{
vm_page_t dst_page;
boolean_t no_zero_fill = FALSE;
int interruptible;
int pages_wired = 0;
int pages_inserted = 0;
int entry = 0;
uint64_t delayed_ledger_update = 0;
kern_return_t ret = KERN_SUCCESS;
int grab_options;
ppnum_t phys_page;
vm_object_lock_assert_exclusive(object);
assert(object->purgable != VM_PURGABLE_VOLATILE);
assert(object->purgable != VM_PURGABLE_EMPTY);
assert(object->pager == NULL);
assert(object->copy == NULL);
assert(object->shadow == NULL);
if (cntrl_flags & UPL_SET_INTERRUPTIBLE)
interruptible = THREAD_ABORTSAFE;
else
interruptible = THREAD_UNINT;
if (cntrl_flags & (UPL_NOZEROFILL | UPL_NOZEROFILLIO))
no_zero_fill = TRUE;
grab_options = 0;
#if CONFIG_SECLUDED_MEMORY
if (object->can_grab_secluded) {
grab_options |= VM_PAGE_GRAB_SECLUDED;
}
#endif
while (page_count--) {
while ((dst_page = vm_page_grab_options(grab_options))
== VM_PAGE_NULL) {
OSAddAtomic(page_count, &vm_upl_wait_for_pages);
VM_DEBUG_EVENT(vm_iopl_page_wait, VM_IOPL_PAGE_WAIT, DBG_FUNC_START, vm_upl_wait_for_pages, 0, 0, 0);
if (vm_page_wait(interruptible) == FALSE) {
OSAddAtomic(-page_count, &vm_upl_wait_for_pages);
VM_DEBUG_EVENT(vm_iopl_page_wait, VM_IOPL_PAGE_WAIT, DBG_FUNC_END, vm_upl_wait_for_pages, 0, 0, -1);
ret = MACH_SEND_INTERRUPTED;
goto done;
}
OSAddAtomic(-page_count, &vm_upl_wait_for_pages);
VM_DEBUG_EVENT(vm_iopl_page_wait, VM_IOPL_PAGE_WAIT, DBG_FUNC_END, vm_upl_wait_for_pages, 0, 0, 0);
}
if (no_zero_fill == FALSE)
vm_page_zero_fill(dst_page);
else
dst_page->absent = TRUE;
dst_page->reference = TRUE;
if (!(cntrl_flags & UPL_COPYOUT_FROM)) {
SET_PAGE_DIRTY(dst_page, FALSE);
}
if (dst_page->absent == FALSE) {
assert(dst_page->vm_page_q_state == VM_PAGE_NOT_ON_Q);
assert(dst_page->wire_count == 0);
dst_page->wire_count++;
dst_page->vm_page_q_state = VM_PAGE_IS_WIRED;
assert(dst_page->wire_count);
pages_wired++;
PAGE_WAKEUP_DONE(dst_page);
}
pages_inserted++;
vm_page_insert_internal(dst_page, object, *dst_offset, tag, FALSE, TRUE, TRUE, TRUE, &delayed_ledger_update);
lite_list[entry>>5] |= 1 << (entry & 31);
phys_page = VM_PAGE_GET_PHYS_PAGE(dst_page);
if (phys_page > upl->highest_page)
upl->highest_page = phys_page;
if (user_page_list) {
user_page_list[entry].phys_addr = phys_page;
user_page_list[entry].absent = dst_page->absent;
user_page_list[entry].dirty = dst_page->dirty;
user_page_list[entry].free_when_done = FALSE;
user_page_list[entry].precious = FALSE;
user_page_list[entry].device = FALSE;
user_page_list[entry].speculative = FALSE;
user_page_list[entry].cs_validated = FALSE;
user_page_list[entry].cs_tainted = FALSE;
user_page_list[entry].cs_nx = FALSE;
user_page_list[entry].needed = FALSE;
user_page_list[entry].mark = FALSE;
}
entry++;
*dst_offset += PAGE_SIZE_64;
}
done:
if (pages_wired) {
vm_page_lockspin_queues();
vm_page_wire_count += pages_wired;
vm_page_unlock_queues();
}
if (pages_inserted) {
if (object->internal) {
OSAddAtomic(pages_inserted, &vm_page_internal_count);
} else {
OSAddAtomic(pages_inserted, &vm_page_external_count);
}
}
if (delayed_ledger_update) {
task_t owner;
owner = object->vo_purgeable_owner;
assert(owner);
ledger_credit(owner->ledger,
task_ledgers.purgeable_nonvolatile,
delayed_ledger_update);
ledger_credit(owner->ledger,
task_ledgers.phys_footprint,
delayed_ledger_update);
}
return (ret);
}
unsigned int vm_object_iopl_request_sleep_for_cleaning = 0;
kern_return_t
vm_object_iopl_request(
vm_object_t object,
vm_object_offset_t offset,
upl_size_t size,
upl_t *upl_ptr,
upl_page_info_array_t user_page_list,
unsigned int *page_list_count,
upl_control_flags_t cntrl_flags,
vm_tag_t tag)
{
vm_page_t dst_page;
vm_object_offset_t dst_offset;
upl_size_t xfer_size;
upl_t upl = NULL;
unsigned int entry;
wpl_array_t lite_list = NULL;
int no_zero_fill = FALSE;
unsigned int size_in_pages;
u_int32_t psize;
kern_return_t ret;
vm_prot_t prot;
struct vm_object_fault_info fault_info;
struct vm_page_delayed_work dw_array[DEFAULT_DELAYED_WORK_LIMIT];
struct vm_page_delayed_work *dwp;
int dw_count;
int dw_limit;
int dw_index;
boolean_t caller_lookup;
int io_tracking_flag = 0;
int interruptible;
ppnum_t phys_page;
boolean_t set_cache_attr_needed = FALSE;
boolean_t free_wired_pages = FALSE;
boolean_t fast_path_empty_req = FALSE;
boolean_t fast_path_full_req = FALSE;
if (cntrl_flags & ~UPL_VALID_FLAGS) {
return KERN_INVALID_VALUE;
}
if (vm_lopage_needed == FALSE)
cntrl_flags &= ~UPL_NEED_32BIT_ADDR;
if (cntrl_flags & UPL_NEED_32BIT_ADDR) {
if ( (cntrl_flags & (UPL_SET_IO_WIRE | UPL_SET_LITE)) != (UPL_SET_IO_WIRE | UPL_SET_LITE))
return KERN_INVALID_VALUE;
if (object->phys_contiguous) {
if ((offset + object->vo_shadow_offset) >= (vm_object_offset_t)max_valid_dma_address)
return KERN_INVALID_ADDRESS;
if (((offset + object->vo_shadow_offset) + size) >= (vm_object_offset_t)max_valid_dma_address)
return KERN_INVALID_ADDRESS;
}
}
if (cntrl_flags & (UPL_NOZEROFILL | UPL_NOZEROFILLIO))
no_zero_fill = TRUE;
if (cntrl_flags & UPL_COPYOUT_FROM)
prot = VM_PROT_READ;
else
prot = VM_PROT_READ | VM_PROT_WRITE;
if ((!object->internal) && (object->paging_offset != 0))
panic("vm_object_iopl_request: external object with non-zero paging offset\n");
#if CONFIG_IOSCHED || UPL_DEBUG
if ((object->io_tracking && object != kernel_object) || upl_debug_enabled)
io_tracking_flag |= UPL_CREATE_IO_TRACKING;
#endif
#if CONFIG_IOSCHED
if (object->io_tracking) {
if (object != kernel_object)
io_tracking_flag |= UPL_CREATE_EXPEDITE_SUP;
}
#endif
if (object->phys_contiguous)
psize = PAGE_SIZE;
else
psize = size;
if (cntrl_flags & UPL_SET_INTERNAL) {
upl = upl_create(UPL_CREATE_INTERNAL | UPL_CREATE_LITE | io_tracking_flag, UPL_IO_WIRE, psize);
user_page_list = (upl_page_info_t *) (((uintptr_t)upl) + sizeof(struct upl));
lite_list = (wpl_array_t) (((uintptr_t)user_page_list) +
((psize / PAGE_SIZE) * sizeof(upl_page_info_t)));
if (size == 0) {
user_page_list = NULL;
lite_list = NULL;
}
} else {
upl = upl_create(UPL_CREATE_LITE | io_tracking_flag, UPL_IO_WIRE, psize);
lite_list = (wpl_array_t) (((uintptr_t)upl) + sizeof(struct upl));
if (size == 0) {
lite_list = NULL;
}
}
if (user_page_list)
user_page_list[0].device = FALSE;
*upl_ptr = upl;
upl->map_object = object;
upl->size = size;
size_in_pages = size / PAGE_SIZE;
if (object == kernel_object &&
!(cntrl_flags & (UPL_NEED_32BIT_ADDR | UPL_BLOCK_ACCESS))) {
upl->flags |= UPL_KERNEL_OBJECT;
#if UPL_DEBUG
vm_object_lock(object);
#else
vm_object_lock_shared(object);
#endif
} else {
vm_object_lock(object);
vm_object_activity_begin(object);
}
upl->offset = offset + object->paging_offset;
if (cntrl_flags & UPL_BLOCK_ACCESS) {
upl->flags |= UPL_ACCESS_BLOCKED;
}
#if CONFIG_IOSCHED || UPL_DEBUG
if (upl->flags & UPL_TRACKED_BY_OBJECT) {
vm_object_activity_begin(object);
queue_enter(&object->uplq, upl, upl_t, uplq);
}
#endif
if (object->phys_contiguous) {
if (upl->flags & UPL_ACCESS_BLOCKED) {
assert(!object->blocked_access);
object->blocked_access = TRUE;
}
vm_object_unlock(object);
upl->flags |= UPL_DEVICE_MEMORY;
upl->highest_page = (ppnum_t) ((offset + object->vo_shadow_offset + size - 1)>>PAGE_SHIFT);
if (user_page_list) {
user_page_list[0].phys_addr = (ppnum_t) ((offset + object->vo_shadow_offset)>>PAGE_SHIFT);
user_page_list[0].device = TRUE;
}
if (page_list_count != NULL) {
if (upl->flags & UPL_INTERNAL)
*page_list_count = 0;
else
*page_list_count = 1;
}
return KERN_SUCCESS;
}
if (object != kernel_object && object != compressor_object) {
#if VM_OBJECT_TRACKING_OP_TRUESHARE
if (!object->true_share &&
vm_object_tracking_inited) {
void *bt[VM_OBJECT_TRACKING_BTDEPTH];
int num = 0;
num = OSBacktrace(bt,
VM_OBJECT_TRACKING_BTDEPTH);
btlog_add_entry(vm_object_tracking_btlog,
object,
VM_OBJECT_TRACKING_OP_TRUESHARE,
bt,
num);
}
#endif
vm_object_lock_assert_exclusive(object);
object->true_share = TRUE;
if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC)
object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
}
if (!(cntrl_flags & UPL_COPYOUT_FROM) &&
object->copy != VM_OBJECT_NULL) {
vm_object_update(object,
offset,
size,
NULL,
NULL,
FALSE,
MEMORY_OBJECT_COPY_SYNC,
VM_PROT_NO_CHANGE);
#if DEVELOPMENT || DEBUG
iopl_cow++;
iopl_cow_pages += size >> PAGE_SHIFT;
#endif
}
if (!(cntrl_flags & (UPL_NEED_32BIT_ADDR | UPL_BLOCK_ACCESS)) &&
object->purgable != VM_PURGABLE_VOLATILE &&
object->purgable != VM_PURGABLE_EMPTY &&
object->copy == NULL &&
size == object->vo_size &&
offset == 0 &&
object->shadow == NULL &&
object->pager == NULL)
{
if (object->resident_page_count == size_in_pages)
{
assert(object != compressor_object);
assert(object != kernel_object);
fast_path_full_req = TRUE;
}
else if (object->resident_page_count == 0)
{
assert(object != compressor_object);
assert(object != kernel_object);
fast_path_empty_req = TRUE;
set_cache_attr_needed = TRUE;
}
}
if (cntrl_flags & UPL_SET_INTERRUPTIBLE)
interruptible = THREAD_ABORTSAFE;
else
interruptible = THREAD_UNINT;
entry = 0;
xfer_size = size;
dst_offset = offset;
dw_count = 0;
if (fast_path_full_req) {
if (vm_object_iopl_wire_full(object, upl, user_page_list, lite_list, cntrl_flags, tag) == TRUE)
goto finish;
} else if (fast_path_empty_req) {
if (cntrl_flags & UPL_REQUEST_NO_FAULT) {
ret = KERN_MEMORY_ERROR;
goto return_err;
}
ret = vm_object_iopl_wire_empty(object, upl, user_page_list, lite_list, cntrl_flags, tag, &dst_offset, size_in_pages);
if (ret) {
free_wired_pages = TRUE;
goto return_err;
}
goto finish;
}
fault_info.behavior = VM_BEHAVIOR_SEQUENTIAL;
fault_info.user_tag = 0;
fault_info.lo_offset = offset;
fault_info.hi_offset = offset + xfer_size;
fault_info.no_cache = FALSE;
fault_info.stealth = FALSE;
fault_info.io_sync = FALSE;
fault_info.cs_bypass = FALSE;
fault_info.mark_zf_absent = TRUE;
fault_info.interruptible = interruptible;
fault_info.batch_pmap_op = TRUE;
dwp = &dw_array[0];
dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT);
while (xfer_size) {
vm_fault_return_t result;
dwp->dw_mask = 0;
if (fast_path_full_req) {
if (lite_list[entry>>5] & (1 << (entry & 31)))
goto skip_page;
}
dst_page = vm_page_lookup(object, dst_offset);
if (dst_page == VM_PAGE_NULL ||
dst_page->busy ||
dst_page->error ||
dst_page->restart ||
dst_page->absent ||
dst_page->fictitious) {
if (object == kernel_object)
panic("vm_object_iopl_request: missing/bad page in kernel object\n");
if (object == compressor_object)
panic("vm_object_iopl_request: missing/bad page in compressor object\n");
if (cntrl_flags & UPL_REQUEST_NO_FAULT) {
ret = KERN_MEMORY_ERROR;
goto return_err;
}
set_cache_attr_needed = TRUE;
caller_lookup = TRUE;
do {
vm_page_t top_page;
kern_return_t error_code;
fault_info.cluster_size = xfer_size;
vm_object_paging_begin(object);
result = vm_fault_page(object, dst_offset,
prot | VM_PROT_WRITE, FALSE,
caller_lookup,
&prot, &dst_page, &top_page,
(int *)0,
&error_code, no_zero_fill,
FALSE, &fault_info);
caller_lookup = FALSE;
switch (result) {
case VM_FAULT_SUCCESS:
if ( !dst_page->absent) {
PAGE_WAKEUP_DONE(dst_page);
} else {
}
if (top_page != VM_PAGE_NULL) {
vm_object_t local_object;
local_object = VM_PAGE_OBJECT(top_page);
if (top_page->vm_page_object != dst_page->vm_page_object) {
vm_object_lock(local_object);
VM_PAGE_FREE(top_page);
vm_object_paging_end(local_object);
vm_object_unlock(local_object);
} else {
VM_PAGE_FREE(top_page);
vm_object_paging_end(local_object);
}
}
vm_object_paging_end(object);
break;
case VM_FAULT_RETRY:
vm_object_lock(object);
break;
case VM_FAULT_MEMORY_SHORTAGE:
OSAddAtomic((size_in_pages - entry), &vm_upl_wait_for_pages);
VM_DEBUG_EVENT(vm_iopl_page_wait, VM_IOPL_PAGE_WAIT, DBG_FUNC_START, vm_upl_wait_for_pages, 0, 0, 0);
if (vm_page_wait(interruptible)) {
OSAddAtomic(-(size_in_pages - entry), &vm_upl_wait_for_pages);
VM_DEBUG_EVENT(vm_iopl_page_wait, VM_IOPL_PAGE_WAIT, DBG_FUNC_END, vm_upl_wait_for_pages, 0, 0, 0);
vm_object_lock(object);
break;
}
OSAddAtomic(-(size_in_pages - entry), &vm_upl_wait_for_pages);
VM_DEBUG_EVENT(vm_iopl_page_wait, VM_IOPL_PAGE_WAIT, DBG_FUNC_END, vm_upl_wait_for_pages, 0, 0, -1);
case VM_FAULT_INTERRUPTED:
error_code = MACH_SEND_INTERRUPTED;
case VM_FAULT_MEMORY_ERROR:
memory_error:
ret = (error_code ? error_code: KERN_MEMORY_ERROR);
vm_object_lock(object);
goto return_err;
case VM_FAULT_SUCCESS_NO_VM_PAGE:
vm_object_paging_end(object);
vm_object_unlock(object);
goto memory_error;
default:
panic("vm_object_iopl_request: unexpected error"
" 0x%x from vm_fault_page()\n", result);
}
} while (result != VM_FAULT_SUCCESS);
}
phys_page = VM_PAGE_GET_PHYS_PAGE(dst_page);
if (upl->flags & UPL_KERNEL_OBJECT)
goto record_phys_addr;
if (dst_page->vm_page_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
dst_page->busy = TRUE;
goto record_phys_addr;
}
if (dst_page->cleaning) {
vm_object_iopl_request_sleep_for_cleaning++;
PAGE_SLEEP(object, dst_page, THREAD_UNINT);
continue;
}
if (dst_page->laundry)
vm_pageout_steal_laundry(dst_page, FALSE);
if ( (cntrl_flags & UPL_NEED_32BIT_ADDR) &&
phys_page >= (max_valid_dma_address >> PAGE_SHIFT) ) {
vm_page_t low_page;
int refmod;
if (VM_PAGE_WIRED(dst_page)) {
ret = KERN_PROTECTION_FAILURE;
goto return_err;
}
low_page = vm_page_grablo();
if (low_page == VM_PAGE_NULL) {
ret = KERN_RESOURCE_SHORTAGE;
goto return_err;
}
if (dst_page->pmapped)
refmod = pmap_disconnect(phys_page);
else
refmod = 0;
if (!dst_page->absent)
vm_page_copy(dst_page, low_page);
low_page->reference = dst_page->reference;
low_page->dirty = dst_page->dirty;
low_page->absent = dst_page->absent;
if (refmod & VM_MEM_REFERENCED)
low_page->reference = TRUE;
if (refmod & VM_MEM_MODIFIED) {
SET_PAGE_DIRTY(low_page, FALSE);
}
vm_page_replace(low_page, object, dst_offset);
dst_page = low_page;
if ( !dst_page->absent)
dst_page->busy = FALSE;
phys_page = VM_PAGE_GET_PHYS_PAGE(dst_page);
}
if ( !dst_page->busy)
dwp->dw_mask |= DW_vm_page_wire;
if (cntrl_flags & UPL_BLOCK_ACCESS) {
assert(!dst_page->fictitious);
dst_page->busy = TRUE;
}
dwp->dw_mask |= DW_set_reference;
if (!(cntrl_flags & UPL_COPYOUT_FROM)) {
SET_PAGE_DIRTY(dst_page, TRUE);
}
if ((cntrl_flags & UPL_REQUEST_FORCE_COHERENCY) && dst_page->written_by_kernel == TRUE) {
pmap_sync_page_attributes_phys(phys_page);
dst_page->written_by_kernel = FALSE;
}
record_phys_addr:
if (dst_page->busy)
upl->flags |= UPL_HAS_BUSY;
lite_list[entry>>5] |= 1 << (entry & 31);
if (phys_page > upl->highest_page)
upl->highest_page = phys_page;
if (user_page_list) {
user_page_list[entry].phys_addr = phys_page;
user_page_list[entry].free_when_done = dst_page->free_when_done;
user_page_list[entry].absent = dst_page->absent;
user_page_list[entry].dirty = dst_page->dirty;
user_page_list[entry].precious = dst_page->precious;
user_page_list[entry].device = FALSE;
user_page_list[entry].needed = FALSE;
if (dst_page->clustered == TRUE)
user_page_list[entry].speculative = (dst_page->vm_page_q_state == VM_PAGE_ON_SPECULATIVE_Q) ? TRUE : FALSE;
else
user_page_list[entry].speculative = FALSE;
user_page_list[entry].cs_validated = dst_page->cs_validated;
user_page_list[entry].cs_tainted = dst_page->cs_tainted;
user_page_list[entry].cs_nx = dst_page->cs_nx;
user_page_list[entry].mark = FALSE;
}
if (object != kernel_object && object != compressor_object) {
if (dst_page->clustered)
VM_PAGE_CONSUME_CLUSTERED(dst_page);
}
skip_page:
entry++;
dst_offset += PAGE_SIZE_64;
xfer_size -= PAGE_SIZE;
if (dwp->dw_mask) {
VM_PAGE_ADD_DELAYED_WORK(dwp, dst_page, dw_count);
if (dw_count >= dw_limit) {
vm_page_do_delayed_work(object, tag, &dw_array[0], dw_count);
dwp = &dw_array[0];
dw_count = 0;
}
}
}
assert(entry == size_in_pages);
if (dw_count)
vm_page_do_delayed_work(object, tag, &dw_array[0], dw_count);
finish:
if (user_page_list && set_cache_attr_needed == TRUE)
vm_object_set_pmap_cache_attr(object, user_page_list, size_in_pages, TRUE);
if (page_list_count != NULL) {
if (upl->flags & UPL_INTERNAL)
*page_list_count = 0;
else if (*page_list_count > size_in_pages)
*page_list_count = size_in_pages;
}
vm_object_unlock(object);
if (cntrl_flags & UPL_BLOCK_ACCESS) {
vm_object_pmap_protect(object, offset, (vm_object_size_t)size,
PMAP_NULL, 0, VM_PROT_NONE);
assert(!object->blocked_access);
object->blocked_access = TRUE;
}
return KERN_SUCCESS;
return_err:
dw_index = 0;
for (; offset < dst_offset; offset += PAGE_SIZE) {
boolean_t need_unwire;
dst_page = vm_page_lookup(object, offset);
if (dst_page == VM_PAGE_NULL)
panic("vm_object_iopl_request: Wired page missing. \n");
need_unwire = TRUE;
if (dw_count) {
if (dw_array[dw_index].dw_m == dst_page) {
need_unwire = FALSE;
dw_index++;
dw_count--;
}
}
vm_page_lock_queues();
if (dst_page->absent || free_wired_pages == TRUE) {
vm_page_free(dst_page);
need_unwire = FALSE;
} else {
if (need_unwire == TRUE)
vm_page_unwire(dst_page, TRUE);
PAGE_WAKEUP_DONE(dst_page);
}
vm_page_unlock_queues();
if (need_unwire == TRUE)
VM_STAT_INCR(reactivations);
}
#if UPL_DEBUG
upl->upl_state = 2;
#endif
if (! (upl->flags & UPL_KERNEL_OBJECT)) {
vm_object_activity_end(object);
vm_object_collapse(object, 0, TRUE);
}
vm_object_unlock(object);
upl_destroy(upl);
return ret;
}
kern_return_t
upl_transpose(
upl_t upl1,
upl_t upl2)
{
kern_return_t retval;
boolean_t upls_locked;
vm_object_t object1, object2;
if (upl1 == UPL_NULL || upl2 == UPL_NULL || upl1 == upl2 || ((upl1->flags & UPL_VECTOR)==UPL_VECTOR) || ((upl2->flags & UPL_VECTOR)==UPL_VECTOR)) {
return KERN_INVALID_ARGUMENT;
}
upls_locked = FALSE;
if (upl1 < upl2) {
upl_lock(upl1);
upl_lock(upl2);
} else {
upl_lock(upl2);
upl_lock(upl1);
}
upls_locked = TRUE;
object1 = upl1->map_object;
object2 = upl2->map_object;
if (upl1->offset != 0 || upl2->offset != 0 ||
upl1->size != upl2->size) {
retval = KERN_INVALID_VALUE;
goto done;
}
retval = vm_object_transpose(object1, object2,
(vm_object_size_t) upl1->size);
if (retval == KERN_SUCCESS) {
#if CONFIG_IOSCHED || UPL_DEBUG
if ((upl1->flags & UPL_TRACKED_BY_OBJECT) || (upl2->flags & UPL_TRACKED_BY_OBJECT)) {
vm_object_lock(object1);
vm_object_lock(object2);
}
if (upl1->flags & UPL_TRACKED_BY_OBJECT)
queue_remove(&object1->uplq, upl1, upl_t, uplq);
if (upl2->flags & UPL_TRACKED_BY_OBJECT)
queue_remove(&object2->uplq, upl2, upl_t, uplq);
#endif
upl1->map_object = object2;
upl2->map_object = object1;
#if CONFIG_IOSCHED || UPL_DEBUG
if (upl1->flags & UPL_TRACKED_BY_OBJECT)
queue_enter(&object2->uplq, upl1, upl_t, uplq);
if (upl2->flags & UPL_TRACKED_BY_OBJECT)
queue_enter(&object1->uplq, upl2, upl_t, uplq);
if ((upl1->flags & UPL_TRACKED_BY_OBJECT) || (upl2->flags & UPL_TRACKED_BY_OBJECT)) {
vm_object_unlock(object2);
vm_object_unlock(object1);
}
#endif
}
done:
if (upls_locked) {
upl_unlock(upl1);
upl_unlock(upl2);
upls_locked = FALSE;
}
return retval;
}
void
upl_range_needed(
upl_t upl,
int index,
int count)
{
upl_page_info_t *user_page_list;
int size_in_pages;
if ( !(upl->flags & UPL_INTERNAL) || count <= 0)
return;
size_in_pages = upl->size / PAGE_SIZE;
user_page_list = (upl_page_info_t *) (((uintptr_t)upl) + sizeof(struct upl));
while (count-- && index < size_in_pages)
user_page_list[index++].needed = TRUE;
}
decl_simple_lock_data(,vm_paging_lock)
#define VM_PAGING_NUM_PAGES 64
vm_map_offset_t vm_paging_base_address = 0;
boolean_t vm_paging_page_inuse[VM_PAGING_NUM_PAGES] = { FALSE, };
int vm_paging_max_index = 0;
int vm_paging_page_waiter = 0;
int vm_paging_page_waiter_total = 0;
unsigned long vm_paging_no_kernel_page = 0;
unsigned long vm_paging_objects_mapped = 0;
unsigned long vm_paging_pages_mapped = 0;
unsigned long vm_paging_objects_mapped_slow = 0;
unsigned long vm_paging_pages_mapped_slow = 0;
void
vm_paging_map_init(void)
{
kern_return_t kr;
vm_map_offset_t page_map_offset;
vm_map_entry_t map_entry;
assert(vm_paging_base_address == 0);
page_map_offset = 0;
kr = vm_map_find_space(kernel_map,
&page_map_offset,
VM_PAGING_NUM_PAGES * PAGE_SIZE,
0,
0,
VM_MAP_KERNEL_FLAGS_NONE,
VM_KERN_MEMORY_NONE,
&map_entry);
if (kr != KERN_SUCCESS) {
panic("vm_paging_map_init: kernel_map full\n");
}
VME_OBJECT_SET(map_entry, kernel_object);
VME_OFFSET_SET(map_entry, page_map_offset);
map_entry->protection = VM_PROT_NONE;
map_entry->max_protection = VM_PROT_NONE;
map_entry->permanent = TRUE;
vm_object_reference(kernel_object);
vm_map_unlock(kernel_map);
assert(vm_paging_base_address == 0);
vm_paging_base_address = page_map_offset;
}
kern_return_t
vm_paging_map_object(
vm_page_t page,
vm_object_t object,
vm_object_offset_t offset,
vm_prot_t protection,
boolean_t can_unlock_object,
vm_map_size_t *size,
vm_map_offset_t *address,
boolean_t *need_unmap)
{
kern_return_t kr;
vm_map_offset_t page_map_offset;
vm_map_size_t map_size;
vm_object_offset_t object_offset;
int i;
if (page != VM_PAGE_NULL && *size == PAGE_SIZE) {
#if __x86_64__
*address = (vm_map_offset_t)
PHYSMAP_PTOV((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(page) <<
PAGE_SHIFT);
*need_unmap = FALSE;
return KERN_SUCCESS;
#elif __arm__ || __arm64__
*address = (vm_map_offset_t)
phystokv((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(page) << PAGE_SHIFT);
*need_unmap = FALSE;
return KERN_SUCCESS;
#else
#warn "vm_paging_map_object: no 1-to-1 kernel mapping of physical memory..."
#endif
assert(page->busy);
simple_lock(&vm_paging_lock);
page_map_offset = 0;
for (;;) {
for (i = 0; i < VM_PAGING_NUM_PAGES; i++) {
if (vm_paging_page_inuse[i] == FALSE) {
page_map_offset =
vm_paging_base_address +
(i * PAGE_SIZE);
break;
}
}
if (page_map_offset != 0) {
break;
}
if (can_unlock_object) {
break;
}
vm_paging_page_waiter_total++;
vm_paging_page_waiter++;
kr = assert_wait((event_t)&vm_paging_page_waiter, THREAD_UNINT);
if (kr == THREAD_WAITING) {
simple_unlock(&vm_paging_lock);
kr = thread_block(THREAD_CONTINUE_NULL);
simple_lock(&vm_paging_lock);
}
vm_paging_page_waiter--;
}
if (page_map_offset != 0) {
if (i > vm_paging_max_index) {
vm_paging_max_index = i;
}
vm_paging_page_inuse[i] = TRUE;
simple_unlock(&vm_paging_lock);
page->pmapped = TRUE;
PMAP_ENTER(kernel_pmap,
page_map_offset,
page,
protection,
VM_PROT_NONE,
0,
TRUE,
kr);
assert(kr == KERN_SUCCESS);
vm_paging_objects_mapped++;
vm_paging_pages_mapped++;
*address = page_map_offset;
*need_unmap = TRUE;
#if KASAN
kasan_notify_address(page_map_offset, PAGE_SIZE);
#endif
return KERN_SUCCESS;
}
vm_paging_no_kernel_page++;
simple_unlock(&vm_paging_lock);
}
if (! can_unlock_object) {
*address = 0;
*size = 0;
*need_unmap = FALSE;
return KERN_NOT_SUPPORTED;
}
object_offset = vm_object_trunc_page(offset);
map_size = vm_map_round_page(*size,
VM_MAP_PAGE_MASK(kernel_map));
vm_object_reference_locked(object);
vm_object_unlock(object);
kr = vm_map_enter(kernel_map,
address,
map_size,
0,
VM_FLAGS_ANYWHERE,
VM_MAP_KERNEL_FLAGS_NONE,
VM_KERN_MEMORY_NONE,
object,
object_offset,
FALSE,
protection,
VM_PROT_ALL,
VM_INHERIT_NONE);
if (kr != KERN_SUCCESS) {
*address = 0;
*size = 0;
*need_unmap = FALSE;
vm_object_deallocate(object);
vm_object_lock(object);
return kr;
}
*size = map_size;
vm_object_lock(object);
for (page_map_offset = 0;
map_size != 0;
map_size -= PAGE_SIZE_64, page_map_offset += PAGE_SIZE_64) {
page = vm_page_lookup(object, offset + page_map_offset);
if (page == VM_PAGE_NULL) {
printf("vm_paging_map_object: no page !?");
vm_object_unlock(object);
kr = vm_map_remove(kernel_map, *address, *size,
VM_MAP_NO_FLAGS);
assert(kr == KERN_SUCCESS);
*address = 0;
*size = 0;
*need_unmap = FALSE;
vm_object_lock(object);
return KERN_MEMORY_ERROR;
}
page->pmapped = TRUE;
PMAP_ENTER(kernel_pmap,
*address + page_map_offset,
page,
protection,
VM_PROT_NONE,
0,
TRUE,
kr);
assert(kr == KERN_SUCCESS);
#if KASAN
kasan_notify_address(*address + page_map_offset, PAGE_SIZE);
#endif
}
vm_paging_objects_mapped_slow++;
vm_paging_pages_mapped_slow += (unsigned long) (map_size / PAGE_SIZE_64);
*need_unmap = TRUE;
return KERN_SUCCESS;
}
void
vm_paging_unmap_object(
vm_object_t object,
vm_map_offset_t start,
vm_map_offset_t end)
{
kern_return_t kr;
int i;
if ((vm_paging_base_address == 0) ||
(start < vm_paging_base_address) ||
(end > (vm_paging_base_address
+ (VM_PAGING_NUM_PAGES * PAGE_SIZE)))) {
if (object != VM_OBJECT_NULL) {
vm_object_unlock(object);
}
kr = vm_map_remove(kernel_map, start, end, VM_MAP_NO_FLAGS);
if (object != VM_OBJECT_NULL) {
vm_object_lock(object);
}
assert(kr == KERN_SUCCESS);
} else {
assert(end - start == PAGE_SIZE);
i = (int) ((start - vm_paging_base_address) >> PAGE_SHIFT);
assert(i >= 0 && i < VM_PAGING_NUM_PAGES);
pmap_remove(kernel_pmap, start, end);
simple_lock(&vm_paging_lock);
vm_paging_page_inuse[i] = FALSE;
if (vm_paging_page_waiter) {
thread_wakeup(&vm_paging_page_waiter);
}
simple_unlock(&vm_paging_lock);
}
}
void
vm_pageout_steal_laundry(vm_page_t page, boolean_t queues_locked)
{
if (!queues_locked) {
vm_page_lockspin_queues();
}
page->free_when_done = FALSE;
vm_pageout_throttle_up(page);
vm_page_steal_pageout_page++;
if (!queues_locked) {
vm_page_unlock_queues();
}
}
upl_t
vector_upl_create(vm_offset_t upl_offset)
{
int vector_upl_size = sizeof(struct _vector_upl);
int i=0;
upl_t upl;
vector_upl_t vector_upl = (vector_upl_t)kalloc(vector_upl_size);
upl = upl_create(0,UPL_VECTOR,0);
upl->vector_upl = vector_upl;
upl->offset = upl_offset;
vector_upl->size = 0;
vector_upl->offset = upl_offset;
vector_upl->invalid_upls=0;
vector_upl->num_upls=0;
vector_upl->pagelist = NULL;
for(i=0; i < MAX_VECTOR_UPL_ELEMENTS ; i++) {
vector_upl->upl_iostates[i].size = 0;
vector_upl->upl_iostates[i].offset = 0;
}
return upl;
}
void
vector_upl_deallocate(upl_t upl)
{
if(upl) {
vector_upl_t vector_upl = upl->vector_upl;
if(vector_upl) {
if(vector_upl->invalid_upls != vector_upl->num_upls)
panic("Deallocating non-empty Vectored UPL\n");
kfree(vector_upl->pagelist,(sizeof(struct upl_page_info)*(vector_upl->size/PAGE_SIZE)));
vector_upl->invalid_upls=0;
vector_upl->num_upls = 0;
vector_upl->pagelist = NULL;
vector_upl->size = 0;
vector_upl->offset = 0;
kfree(vector_upl, sizeof(struct _vector_upl));
vector_upl = (vector_upl_t)0xfeedfeed;
}
else
panic("vector_upl_deallocate was passed a non-vectored upl\n");
}
else
panic("vector_upl_deallocate was passed a NULL upl\n");
}
boolean_t
vector_upl_is_valid(upl_t upl)
{
if(upl && ((upl->flags & UPL_VECTOR)==UPL_VECTOR)) {
vector_upl_t vector_upl = upl->vector_upl;
if(vector_upl == NULL || vector_upl == (vector_upl_t)0xfeedfeed || vector_upl == (vector_upl_t)0xfeedbeef)
return FALSE;
else
return TRUE;
}
return FALSE;
}
boolean_t
vector_upl_set_subupl(upl_t upl,upl_t subupl, uint32_t io_size)
{
if(vector_upl_is_valid(upl)) {
vector_upl_t vector_upl = upl->vector_upl;
if(vector_upl) {
if(subupl) {
if(io_size) {
if(io_size < PAGE_SIZE)
io_size = PAGE_SIZE;
subupl->vector_upl = (void*)vector_upl;
vector_upl->upl_elems[vector_upl->num_upls++] = subupl;
vector_upl->size += io_size;
upl->size += io_size;
}
else {
uint32_t i=0,invalid_upls=0;
for(i = 0; i < vector_upl->num_upls; i++) {
if(vector_upl->upl_elems[i] == subupl)
break;
}
if(i == vector_upl->num_upls)
panic("Trying to remove sub-upl when none exists");
vector_upl->upl_elems[i] = NULL;
invalid_upls = hw_atomic_add(&(vector_upl)->invalid_upls, 1);
if(invalid_upls == vector_upl->num_upls)
return TRUE;
else
return FALSE;
}
}
else
panic("vector_upl_set_subupl was passed a NULL upl element\n");
}
else
panic("vector_upl_set_subupl was passed a non-vectored upl\n");
}
else
panic("vector_upl_set_subupl was passed a NULL upl\n");
return FALSE;
}
void
vector_upl_set_pagelist(upl_t upl)
{
if(vector_upl_is_valid(upl)) {
uint32_t i=0;
vector_upl_t vector_upl = upl->vector_upl;
if(vector_upl) {
vm_offset_t pagelist_size=0, cur_upl_pagelist_size=0;
vector_upl->pagelist = (upl_page_info_array_t)kalloc(sizeof(struct upl_page_info)*(vector_upl->size/PAGE_SIZE));
for(i=0; i < vector_upl->num_upls; i++) {
cur_upl_pagelist_size = sizeof(struct upl_page_info) * vector_upl->upl_elems[i]->size/PAGE_SIZE;
bcopy(UPL_GET_INTERNAL_PAGE_LIST_SIMPLE(vector_upl->upl_elems[i]), (char*)vector_upl->pagelist + pagelist_size, cur_upl_pagelist_size);
pagelist_size += cur_upl_pagelist_size;
if(vector_upl->upl_elems[i]->highest_page > upl->highest_page)
upl->highest_page = vector_upl->upl_elems[i]->highest_page;
}
assert( pagelist_size == (sizeof(struct upl_page_info)*(vector_upl->size/PAGE_SIZE)) );
}
else
panic("vector_upl_set_pagelist was passed a non-vectored upl\n");
}
else
panic("vector_upl_set_pagelist was passed a NULL upl\n");
}
upl_t
vector_upl_subupl_byindex(upl_t upl, uint32_t index)
{
if(vector_upl_is_valid(upl)) {
vector_upl_t vector_upl = upl->vector_upl;
if(vector_upl) {
if(index < vector_upl->num_upls)
return vector_upl->upl_elems[index];
}
else
panic("vector_upl_subupl_byindex was passed a non-vectored upl\n");
}
return NULL;
}
upl_t
vector_upl_subupl_byoffset(upl_t upl, upl_offset_t *upl_offset, upl_size_t *upl_size)
{
if(vector_upl_is_valid(upl)) {
uint32_t i=0;
vector_upl_t vector_upl = upl->vector_upl;
if(vector_upl) {
upl_t subupl = NULL;
vector_upl_iostates_t subupl_state;
for(i=0; i < vector_upl->num_upls; i++) {
subupl = vector_upl->upl_elems[i];
subupl_state = vector_upl->upl_iostates[i];
if( *upl_offset <= (subupl_state.offset + subupl_state.size - 1)) {
if(subupl == NULL)
return NULL;
if((subupl_state.offset + subupl_state.size) < (*upl_offset + *upl_size)) {
*upl_size = (subupl_state.offset + subupl_state.size) - *upl_offset;
if(*upl_size > subupl_state.size)
*upl_size = subupl_state.size;
}
if(*upl_offset >= subupl_state.offset)
*upl_offset -= subupl_state.offset;
else if(i)
panic("Vector UPL offset miscalculation\n");
return subupl;
}
}
}
else
panic("vector_upl_subupl_byoffset was passed a non-vectored UPL\n");
}
return NULL;
}
void
vector_upl_get_submap(upl_t upl, vm_map_t *v_upl_submap, vm_offset_t *submap_dst_addr)
{
*v_upl_submap = NULL;
if(vector_upl_is_valid(upl)) {
vector_upl_t vector_upl = upl->vector_upl;
if(vector_upl) {
*v_upl_submap = vector_upl->submap;
*submap_dst_addr = vector_upl->submap_dst_addr;
}
else
panic("vector_upl_get_submap was passed a non-vectored UPL\n");
}
else
panic("vector_upl_get_submap was passed a null UPL\n");
}
void
vector_upl_set_submap(upl_t upl, vm_map_t submap, vm_offset_t submap_dst_addr)
{
if(vector_upl_is_valid(upl)) {
vector_upl_t vector_upl = upl->vector_upl;
if(vector_upl) {
vector_upl->submap = submap;
vector_upl->submap_dst_addr = submap_dst_addr;
}
else
panic("vector_upl_get_submap was passed a non-vectored UPL\n");
}
else
panic("vector_upl_get_submap was passed a NULL UPL\n");
}
void
vector_upl_set_iostate(upl_t upl, upl_t subupl, upl_offset_t offset, upl_size_t size)
{
if(vector_upl_is_valid(upl)) {
uint32_t i = 0;
vector_upl_t vector_upl = upl->vector_upl;
if(vector_upl) {
for(i = 0; i < vector_upl->num_upls; i++) {
if(vector_upl->upl_elems[i] == subupl)
break;
}
if(i == vector_upl->num_upls)
panic("setting sub-upl iostate when none exists");
vector_upl->upl_iostates[i].offset = offset;
if(size < PAGE_SIZE)
size = PAGE_SIZE;
vector_upl->upl_iostates[i].size = size;
}
else
panic("vector_upl_set_iostate was passed a non-vectored UPL\n");
}
else
panic("vector_upl_set_iostate was passed a NULL UPL\n");
}
void
vector_upl_get_iostate(upl_t upl, upl_t subupl, upl_offset_t *offset, upl_size_t *size)
{
if(vector_upl_is_valid(upl)) {
uint32_t i = 0;
vector_upl_t vector_upl = upl->vector_upl;
if(vector_upl) {
for(i = 0; i < vector_upl->num_upls; i++) {
if(vector_upl->upl_elems[i] == subupl)
break;
}
if(i == vector_upl->num_upls)
panic("getting sub-upl iostate when none exists");
*offset = vector_upl->upl_iostates[i].offset;
*size = vector_upl->upl_iostates[i].size;
}
else
panic("vector_upl_get_iostate was passed a non-vectored UPL\n");
}
else
panic("vector_upl_get_iostate was passed a NULL UPL\n");
}
void
vector_upl_get_iostate_byindex(upl_t upl, uint32_t index, upl_offset_t *offset, upl_size_t *size)
{
if(vector_upl_is_valid(upl)) {
vector_upl_t vector_upl = upl->vector_upl;
if(vector_upl) {
if(index < vector_upl->num_upls) {
*offset = vector_upl->upl_iostates[index].offset;
*size = vector_upl->upl_iostates[index].size;
}
else
*offset = *size = 0;
}
else
panic("vector_upl_get_iostate_byindex was passed a non-vectored UPL\n");
}
else
panic("vector_upl_get_iostate_byindex was passed a NULL UPL\n");
}
upl_page_info_t *
upl_get_internal_vectorupl_pagelist(upl_t upl)
{
return ((vector_upl_t)(upl->vector_upl))->pagelist;
}
void *
upl_get_internal_vectorupl(upl_t upl)
{
return upl->vector_upl;
}
vm_size_t
upl_get_internal_pagelist_offset(void)
{
return sizeof(struct upl);
}
void
upl_clear_dirty(
upl_t upl,
boolean_t value)
{
if (value) {
upl->flags |= UPL_CLEAR_DIRTY;
} else {
upl->flags &= ~UPL_CLEAR_DIRTY;
}
}
void
upl_set_referenced(
upl_t upl,
boolean_t value)
{
upl_lock(upl);
if (value) {
upl->ext_ref_count++;
} else {
if (!upl->ext_ref_count) {
panic("upl_set_referenced not %p\n", upl);
}
upl->ext_ref_count--;
}
upl_unlock(upl);
}
#if CONFIG_IOSCHED
void
upl_set_blkno(
upl_t upl,
vm_offset_t upl_offset,
int io_size,
int64_t blkno)
{
int i,j;
if ((upl->flags & UPL_EXPEDITE_SUPPORTED) == 0)
return;
assert(upl->upl_reprio_info != 0);
for(i = (int)(upl_offset / PAGE_SIZE), j = 0; j < io_size; i++, j += PAGE_SIZE) {
UPL_SET_REPRIO_INFO(upl, i, blkno, io_size);
}
}
#endif
boolean_t
vm_page_is_slideable(vm_page_t m)
{
boolean_t result = FALSE;
vm_shared_region_slide_info_t si;
vm_object_t m_object;
m_object = VM_PAGE_OBJECT(m);
vm_object_lock_assert_held(m_object);
if (!m_object->object_slid) {
goto done;
}
si = m_object->vo_slide_info;
if (si == NULL) {
goto done;
}
if(!m->slid && (si->start <= m->offset && si->end > m->offset)) {
result = TRUE;
}
done:
return result;
}
int vm_page_slide_counter = 0;
int vm_page_slide_errors = 0;
kern_return_t
vm_page_slide(
vm_page_t page,
vm_map_offset_t kernel_mapping_offset)
{
kern_return_t kr;
vm_map_size_t kernel_mapping_size;
boolean_t kernel_mapping_needs_unmap;
vm_offset_t kernel_vaddr;
uint32_t pageIndex;
uint32_t slide_chunk;
vm_object_t page_object;
page_object = VM_PAGE_OBJECT(page);
assert(!page->slid);
assert(page_object->object_slid);
vm_object_lock_assert_exclusive(page_object);
if (page->error)
return KERN_FAILURE;
vm_object_paging_begin(page_object);
if (kernel_mapping_offset == 0) {
kernel_mapping_size = PAGE_SIZE;
kernel_mapping_needs_unmap = FALSE;
kr = vm_paging_map_object(page,
page_object,
page->offset,
VM_PROT_READ | VM_PROT_WRITE,
FALSE,
&kernel_mapping_size,
&kernel_mapping_offset,
&kernel_mapping_needs_unmap);
if (kr != KERN_SUCCESS) {
panic("vm_page_slide: "
"could not map page in kernel: 0x%x\n",
kr);
}
} else {
kernel_mapping_size = 0;
kernel_mapping_needs_unmap = FALSE;
}
kernel_vaddr = CAST_DOWN(vm_offset_t, kernel_mapping_offset);
assert(!page->slid);
assert(page_object->object_slid);
pageIndex = (uint32_t)((page->offset -
page_object->vo_slide_info->start) /
PAGE_SIZE_FOR_SR_SLIDE);
for (slide_chunk = 0;
slide_chunk < PAGE_SIZE / PAGE_SIZE_FOR_SR_SLIDE;
slide_chunk++) {
kr = vm_shared_region_slide_page(page_object->vo_slide_info,
(kernel_vaddr +
(slide_chunk *
PAGE_SIZE_FOR_SR_SLIDE)),
(pageIndex + slide_chunk));
if (kr != KERN_SUCCESS) {
break;
}
}
vm_page_slide_counter++;
if (kernel_mapping_needs_unmap) {
vm_paging_unmap_object(page_object,
kernel_vaddr,
kernel_vaddr + PAGE_SIZE);
}
page->dirty = FALSE;
pmap_clear_refmod(VM_PAGE_GET_PHYS_PAGE(page), VM_MEM_MODIFIED | VM_MEM_REFERENCED);
if (kr != KERN_SUCCESS || cs_debug > 1) {
printf("vm_page_slide(%p): "
"obj %p off 0x%llx mobj %p moff 0x%llx\n",
page,
page_object, page->offset,
page_object->pager,
page->offset + page_object->paging_offset);
}
if (kr == KERN_SUCCESS) {
page->slid = TRUE;
} else {
page->error = TRUE;
vm_page_slide_errors++;
}
vm_object_paging_end(page_object);
return kr;
}
void inline memoryshot(unsigned int event, unsigned int control)
{
if (vm_debug_events) {
KERNEL_DEBUG_CONSTANT1((MACHDBG_CODE(DBG_MACH_VM_PRESSURE, event)) | control,
vm_page_active_count, vm_page_inactive_count,
vm_page_free_count, vm_page_speculative_count,
vm_page_throttled_count);
} else {
(void) event;
(void) control;
}
}
#ifdef MACH_BSD
boolean_t upl_device_page(upl_page_info_t *upl)
{
return(UPL_DEVICE_PAGE(upl));
}
boolean_t upl_page_present(upl_page_info_t *upl, int index)
{
return(UPL_PAGE_PRESENT(upl, index));
}
boolean_t upl_speculative_page(upl_page_info_t *upl, int index)
{
return(UPL_SPECULATIVE_PAGE(upl, index));
}
boolean_t upl_dirty_page(upl_page_info_t *upl, int index)
{
return(UPL_DIRTY_PAGE(upl, index));
}
boolean_t upl_valid_page(upl_page_info_t *upl, int index)
{
return(UPL_VALID_PAGE(upl, index));
}
ppnum_t upl_phys_page(upl_page_info_t *upl, int index)
{
return(UPL_PHYS_PAGE(upl, index));
}
void upl_page_set_mark(upl_page_info_t *upl, int index, boolean_t v)
{
upl[index].mark = v;
}
boolean_t upl_page_get_mark(upl_page_info_t *upl, int index)
{
return upl[index].mark;
}
void
vm_countdirtypages(void)
{
vm_page_t m;
int dpages;
int pgopages;
int precpages;
dpages=0;
pgopages=0;
precpages=0;
vm_page_lock_queues();
m = (vm_page_t) vm_page_queue_first(&vm_page_queue_inactive);
do {
if (m ==(vm_page_t )0) break;
if(m->dirty) dpages++;
if(m->free_when_done) pgopages++;
if(m->precious) precpages++;
assert(VM_PAGE_OBJECT(m) != kernel_object);
m = (vm_page_t) vm_page_queue_next(&m->pageq);
if (m ==(vm_page_t )0) break;
} while (!vm_page_queue_end(&vm_page_queue_inactive, (vm_page_queue_entry_t) m));
vm_page_unlock_queues();
vm_page_lock_queues();
m = (vm_page_t) vm_page_queue_first(&vm_page_queue_throttled);
do {
if (m ==(vm_page_t )0) break;
dpages++;
assert(m->dirty);
assert(!m->free_when_done);
assert(VM_PAGE_OBJECT(m) != kernel_object);
m = (vm_page_t) vm_page_queue_next(&m->pageq);
if (m ==(vm_page_t )0) break;
} while (!vm_page_queue_end(&vm_page_queue_throttled, (vm_page_queue_entry_t) m));
vm_page_unlock_queues();
vm_page_lock_queues();
m = (vm_page_t) vm_page_queue_first(&vm_page_queue_anonymous);
do {
if (m ==(vm_page_t )0) break;
if(m->dirty) dpages++;
if(m->free_when_done) pgopages++;
if(m->precious) precpages++;
assert(VM_PAGE_OBJECT(m) != kernel_object);
m = (vm_page_t) vm_page_queue_next(&m->pageq);
if (m ==(vm_page_t )0) break;
} while (!vm_page_queue_end(&vm_page_queue_anonymous, (vm_page_queue_entry_t) m));
vm_page_unlock_queues();
printf("IN Q: %d : %d : %d\n", dpages, pgopages, precpages);
dpages=0;
pgopages=0;
precpages=0;
vm_page_lock_queues();
m = (vm_page_t) vm_page_queue_first(&vm_page_queue_active);
do {
if(m == (vm_page_t )0) break;
if(m->dirty) dpages++;
if(m->free_when_done) pgopages++;
if(m->precious) precpages++;
assert(VM_PAGE_OBJECT(m) != kernel_object);
m = (vm_page_t) vm_page_queue_next(&m->pageq);
if(m == (vm_page_t )0) break;
} while (!vm_page_queue_end(&vm_page_queue_active, (vm_page_queue_entry_t) m));
vm_page_unlock_queues();
printf("AC Q: %d : %d : %d\n", dpages, pgopages, precpages);
}
#endif
#if CONFIG_IOSCHED
int upl_get_cached_tier(upl_t upl)
{
assert(upl);
if (upl->flags & UPL_TRACKED_BY_OBJECT)
return (upl->upl_priority);
return (-1);
}
#endif
ppnum_t upl_get_highest_page(
upl_t upl)
{
return upl->highest_page;
}
upl_size_t upl_get_size(
upl_t upl)
{
return upl->size;
}
upl_t upl_associated_upl(upl_t upl)
{
return upl->associated_upl;
}
void upl_set_associated_upl(upl_t upl, upl_t associated_upl)
{
upl->associated_upl = associated_upl;
}
struct vnode * upl_lookup_vnode(upl_t upl)
{
if (!upl->map_object->internal)
return vnode_pager_lookup_vnode(upl->map_object->pager);
else
return NULL;
}
#if UPL_DEBUG
kern_return_t upl_ubc_alias_set(upl_t upl, uintptr_t alias1, uintptr_t alias2)
{
upl->ubc_alias1 = alias1;
upl->ubc_alias2 = alias2;
return KERN_SUCCESS;
}
int upl_ubc_alias_get(upl_t upl, uintptr_t * al, uintptr_t * al2)
{
if(al)
*al = upl->ubc_alias1;
if(al2)
*al2 = upl->ubc_alias2;
return KERN_SUCCESS;
}
#endif
#if VM_PRESSURE_EVENTS
extern boolean_t vm_compressor_low_on_space(void);
boolean_t
VM_PRESSURE_NORMAL_TO_WARNING(void) {
if ( !VM_CONFIG_COMPRESSOR_IS_ACTIVE) {
if (memorystatus_available_pages < memorystatus_available_pages_pressure) {
if (memorystatus_frozen_count == 0) {
if (memorystatus_suspended_count < MEMORYSTATUS_SUSPENDED_THRESHOLD) {
return TRUE;
}
}
}
return FALSE;
} else {
return ((AVAILABLE_NON_COMPRESSED_MEMORY < VM_PAGE_COMPRESSOR_COMPACT_THRESHOLD) ? 1 : 0);
}
}
boolean_t
VM_PRESSURE_WARNING_TO_CRITICAL(void) {
if ( !VM_CONFIG_COMPRESSOR_IS_ACTIVE) {
if (memorystatus_available_pages < memorystatus_available_pages_critical) {
return TRUE;
}
return FALSE;
} else {
return (vm_compressor_low_on_space() || (AVAILABLE_NON_COMPRESSED_MEMORY < ((12 * VM_PAGE_COMPRESSOR_SWAP_UNTHROTTLE_THRESHOLD) / 10)) ? 1 : 0);
}
}
boolean_t
VM_PRESSURE_WARNING_TO_NORMAL(void) {
if ( !VM_CONFIG_COMPRESSOR_IS_ACTIVE) {
unsigned int target_threshold = (unsigned int) (memorystatus_available_pages_pressure + ((15 * memorystatus_available_pages_pressure) / 100));
if (memorystatus_available_pages > target_threshold) {
return TRUE;
}
return FALSE;
} else {
return ((AVAILABLE_NON_COMPRESSED_MEMORY > ((12 * VM_PAGE_COMPRESSOR_COMPACT_THRESHOLD) / 10)) ? 1 : 0);
}
}
boolean_t
VM_PRESSURE_CRITICAL_TO_WARNING(void) {
if ( !VM_CONFIG_COMPRESSOR_IS_ACTIVE) {
unsigned int target_threshold = (unsigned int)(memorystatus_available_pages_critical + ((15 * memorystatus_available_pages_critical) / 100));
if (memorystatus_available_pages > target_threshold) {
return TRUE;
}
return FALSE;
} else {
return ((AVAILABLE_NON_COMPRESSED_MEMORY > ((14 * VM_PAGE_COMPRESSOR_SWAP_UNTHROTTLE_THRESHOLD) / 10)) ? 1 : 0);
}
}
#endif
#define VM_TEST_COLLAPSE_COMPRESSOR 0
#define VM_TEST_WIRE_AND_EXTRACT 0
#define VM_TEST_PAGE_WIRE_OVERFLOW_PANIC 0
#if __arm64__
#define VM_TEST_KERNEL_OBJECT_FAULT 0
#endif
#define VM_TEST_DEVICE_PAGER_TRANSPOSE (DEVELOPMENT || DEBUG)
#if VM_TEST_COLLAPSE_COMPRESSOR
extern boolean_t vm_object_collapse_compressor_allowed;
#include <IOKit/IOLib.h>
static void
vm_test_collapse_compressor(void)
{
vm_object_size_t backing_size, top_size;
vm_object_t backing_object, top_object;
vm_map_offset_t backing_offset, top_offset;
unsigned char *backing_address, *top_address;
kern_return_t kr;
printf("VM_TEST_COLLAPSE_COMPRESSOR:\n");
backing_size = 15 * PAGE_SIZE;
backing_object = vm_object_allocate(backing_size);
assert(backing_object != VM_OBJECT_NULL);
printf("VM_TEST_COLLAPSE_COMPRESSOR: created backing object %p\n",
backing_object);
backing_offset = 0;
kr = vm_map_enter(kernel_map, &backing_offset, backing_size, 0,
VM_FLAGS_ANYWHERE, VM_MAP_KERNEL_FLAGS_NONE,
backing_object, 0, FALSE,
VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
assert(kr == KERN_SUCCESS);
backing_address = (unsigned char *) backing_offset;
printf("VM_TEST_COLLAPSE_COMPRESSOR: "
"mapped backing object %p at 0x%llx\n",
backing_object, (uint64_t) backing_offset);
backing_address[0x1*PAGE_SIZE] = 0xB1;
backing_address[0x4*PAGE_SIZE] = 0xB4;
backing_address[0x7*PAGE_SIZE] = 0xB7;
backing_address[0xa*PAGE_SIZE] = 0xBA;
backing_address[0xd*PAGE_SIZE] = 0xBD;
printf("VM_TEST_COLLAPSE_COMPRESSOR: "
"populated pages to be compressed in "
"backing_object %p\n", backing_object);
vm_object_pageout(backing_object);
printf("VM_TEST_COLLAPSE_COMPRESSOR: compressing backing_object %p\n",
backing_object);
while (*(volatile int *)&backing_object->resident_page_count != 0)
IODelay(10);
printf("VM_TEST_COLLAPSE_COMPRESSOR: backing_object %p compressed\n",
backing_object);
backing_address[0x0*PAGE_SIZE] = 0xB0;
backing_address[0x3*PAGE_SIZE] = 0xB3;
backing_address[0x6*PAGE_SIZE] = 0xB6;
backing_address[0x9*PAGE_SIZE] = 0xB9;
backing_address[0xc*PAGE_SIZE] = 0xBC;
printf("VM_TEST_COLLAPSE_COMPRESSOR: "
"populated pages to be resident in "
"backing_object %p\n", backing_object);
assert(backing_object->paging_offset == 0);
backing_object->paging_offset = 0x3000;
top_size = 9 * PAGE_SIZE;
top_object = vm_object_allocate(top_size);
assert(top_object != VM_OBJECT_NULL);
printf("VM_TEST_COLLAPSE_COMPRESSOR: created top object %p\n",
top_object);
top_offset = 0;
kr = vm_map_enter(kernel_map, &top_offset, top_size, 0,
VM_FLAGS_ANYWHERE, VM_MAP_KERNEL_FLAGS_NONE,
top_object, 0, FALSE,
VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
assert(kr == KERN_SUCCESS);
top_address = (unsigned char *) top_offset;
printf("VM_TEST_COLLAPSE_COMPRESSOR: "
"mapped top object %p at 0x%llx\n",
top_object, (uint64_t) top_offset);
top_address[0x3*PAGE_SIZE] = 0xA3;
top_address[0x4*PAGE_SIZE] = 0xA4;
top_address[0x5*PAGE_SIZE] = 0xA5;
printf("VM_TEST_COLLAPSE_COMPRESSOR: "
"populated pages to be compressed in "
"top_object %p\n", top_object);
vm_object_pageout(top_object);
printf("VM_TEST_COLLAPSE_COMPRESSOR: compressing top_object %p\n",
top_object);
while (top_object->resident_page_count != 0)
IODelay(10);
printf("VM_TEST_COLLAPSE_COMPRESSOR: top_object %p compressed\n",
top_object);
top_address[0x0*PAGE_SIZE] = 0xA0;
top_address[0x1*PAGE_SIZE] = 0xA1;
top_address[0x2*PAGE_SIZE] = 0xA2;
printf("VM_TEST_COLLAPSE_COMPRESSOR: "
"populated pages to be resident in "
"top_object %p\n", top_object);
vm_object_reference(backing_object);
top_object->shadow = backing_object;
top_object->vo_shadow_offset = 0x3000;
printf("VM_TEST_COLLAPSE_COMPRESSOR: linked %p and %p\n",
top_object, backing_object);
vm_map_remove(kernel_map,
backing_offset,
backing_offset + backing_size,
0);
printf("VM_TEST_COLLAPSE_COMPRESSOR: "
"unmapped backing_object %p [0x%llx:0x%llx]\n",
backing_object,
(uint64_t) backing_offset,
(uint64_t) (backing_offset + backing_size));
printf("VM_TEST_COLLAPSE_COMPRESSOR: collapsing %p\n", top_object);
vm_object_lock(top_object);
vm_object_collapse(top_object, 0, FALSE);
vm_object_unlock(top_object);
printf("VM_TEST_COLLAPSE_COMPRESSOR: collapsed %p\n", top_object);
if (top_object->shadow != VM_OBJECT_NULL) {
printf("VM_TEST_COLLAPSE_COMPRESSOR: not collapsed\n");
printf("VM_TEST_COLLAPSE_COMPRESSOR: FAIL\n");
if (vm_object_collapse_compressor_allowed) {
panic("VM_TEST_COLLAPSE_COMPRESSOR: FAIL\n");
}
} else {
unsigned char expect[9] =
{ 0xA0, 0xA1, 0xA2,
0xA3, 0xA4, 0xA5,
0xB9,
0xBD,
0x00 };
unsigned char actual[9];
unsigned int i, errors;
errors = 0;
for (i = 0; i < sizeof (actual); i++) {
actual[i] = (unsigned char) top_address[i*PAGE_SIZE];
if (actual[i] != expect[i]) {
errors++;
}
}
printf("VM_TEST_COLLAPSE_COMPRESSOR: "
"actual [%x %x %x %x %x %x %x %x %x] "
"expect [%x %x %x %x %x %x %x %x %x] "
"%d errors\n",
actual[0], actual[1], actual[2], actual[3],
actual[4], actual[5], actual[6], actual[7],
actual[8],
expect[0], expect[1], expect[2], expect[3],
expect[4], expect[5], expect[6], expect[7],
expect[8],
errors);
if (errors) {
panic("VM_TEST_COLLAPSE_COMPRESSOR: FAIL\n");
} else {
printf("VM_TEST_COLLAPSE_COMPRESSOR: PASS\n");
}
}
}
#else
#define vm_test_collapse_compressor()
#endif
#if VM_TEST_WIRE_AND_EXTRACT
extern ledger_template_t task_ledger_template;
#include <mach/mach_vm.h>
extern ppnum_t vm_map_get_phys_page(vm_map_t map,
vm_offset_t offset);
static void
vm_test_wire_and_extract(void)
{
ledger_t ledger;
vm_map_t user_map, wire_map;
mach_vm_address_t user_addr, wire_addr;
mach_vm_size_t user_size, wire_size;
mach_vm_offset_t cur_offset;
vm_prot_t cur_prot, max_prot;
ppnum_t user_ppnum, wire_ppnum;
kern_return_t kr;
ledger = ledger_instantiate(task_ledger_template,
LEDGER_CREATE_ACTIVE_ENTRIES);
user_map = vm_map_create(pmap_create(ledger, 0, PMAP_CREATE_64BIT),
0x100000000ULL,
0x200000000ULL,
TRUE);
wire_map = vm_map_create(NULL,
0x100000000ULL,
0x200000000ULL,
TRUE);
user_addr = 0;
user_size = 0x10000;
kr = mach_vm_allocate(user_map,
&user_addr,
user_size,
VM_FLAGS_ANYWHERE);
assert(kr == KERN_SUCCESS);
wire_addr = 0;
wire_size = user_size;
kr = mach_vm_remap(wire_map,
&wire_addr,
wire_size,
0,
VM_FLAGS_ANYWHERE,
user_map,
user_addr,
FALSE,
&cur_prot,
&max_prot,
VM_INHERIT_NONE);
assert(kr == KERN_SUCCESS);
for (cur_offset = 0;
cur_offset < wire_size;
cur_offset += PAGE_SIZE) {
kr = vm_map_wire_and_extract(wire_map,
wire_addr + cur_offset,
VM_PROT_DEFAULT | VM_PROT_MEMORY_TAG_MAKE(VM_KERN_MEMORY_OSFMK),
TRUE,
&wire_ppnum);
assert(kr == KERN_SUCCESS);
user_ppnum = vm_map_get_phys_page(user_map,
user_addr + cur_offset);
printf("VM_TEST_WIRE_AND_EXTRACT: kr=0x%x "
"user[%p:0x%llx:0x%x] wire[%p:0x%llx:0x%x]\n",
kr,
user_map, user_addr + cur_offset, user_ppnum,
wire_map, wire_addr + cur_offset, wire_ppnum);
if (kr != KERN_SUCCESS ||
wire_ppnum == 0 ||
wire_ppnum != user_ppnum) {
panic("VM_TEST_WIRE_AND_EXTRACT: FAIL\n");
}
}
cur_offset -= PAGE_SIZE;
kr = vm_map_wire_and_extract(wire_map,
wire_addr + cur_offset,
VM_PROT_DEFAULT,
TRUE,
&wire_ppnum);
assert(kr == KERN_SUCCESS);
printf("VM_TEST_WIRE_AND_EXTRACT: re-wire kr=0x%x "
"user[%p:0x%llx:0x%x] wire[%p:0x%llx:0x%x]\n",
kr,
user_map, user_addr + cur_offset, user_ppnum,
wire_map, wire_addr + cur_offset, wire_ppnum);
if (kr != KERN_SUCCESS ||
wire_ppnum == 0 ||
wire_ppnum != user_ppnum) {
panic("VM_TEST_WIRE_AND_EXTRACT: FAIL\n");
}
printf("VM_TEST_WIRE_AND_EXTRACT: PASS\n");
}
#else
#define vm_test_wire_and_extract()
#endif
#if VM_TEST_PAGE_WIRE_OVERFLOW_PANIC
static void
vm_test_page_wire_overflow_panic(void)
{
vm_object_t object;
vm_page_t page;
printf("VM_TEST_PAGE_WIRE_OVERFLOW_PANIC: starting...\n");
object = vm_object_allocate(PAGE_SIZE);
vm_object_lock(object);
page = vm_page_alloc(object, 0x0);
vm_page_lock_queues();
do {
vm_page_wire(page, 1, FALSE);
} while (page->wire_count != 0);
vm_page_unlock_queues();
vm_object_unlock(object);
panic("FBDP(%p,%p): wire_count overflow not detected\n",
object, page);
}
#else
#define vm_test_page_wire_overflow_panic()
#endif
#if __arm64__ && VM_TEST_KERNEL_OBJECT_FAULT
extern int copyinframe(vm_address_t fp, char *frame, boolean_t is64bit);
static void
vm_test_kernel_object_fault(void)
{
kern_return_t kr;
vm_offset_t stack;
uintptr_t frameb[2];
int ret;
kr = kernel_memory_allocate(kernel_map, &stack,
kernel_stack_size + (2*PAGE_SIZE),
0,
(KMA_KSTACK | KMA_KOBJECT |
KMA_GUARD_FIRST | KMA_GUARD_LAST),
VM_KERN_MEMORY_STACK);
if (kr != KERN_SUCCESS) {
panic("VM_TEST_KERNEL_OBJECT_FAULT: kernel_memory_allocate kr 0x%x\n", kr);
}
ret = copyinframe((uintptr_t)stack, (char *)frameb, TRUE);
if (ret != 0) {
printf("VM_TEST_KERNEL_OBJECT_FAULT: PASS\n");
} else {
printf("VM_TEST_KERNEL_OBJECT_FAULT: FAIL\n");
}
vm_map_remove(kernel_map,
stack,
stack + kernel_stack_size + (2*PAGE_SIZE),
VM_MAP_REMOVE_KUNWIRE);
stack = 0;
}
#else
#define vm_test_kernel_object_fault()
#endif
#if VM_TEST_DEVICE_PAGER_TRANSPOSE
static void
vm_test_device_pager_transpose(void)
{
memory_object_t device_pager;
vm_object_t anon_object, device_object;
vm_size_t size;
vm_map_offset_t anon_mapping, device_mapping;
kern_return_t kr;
size = 3 * PAGE_SIZE;
anon_object = vm_object_allocate(size);
assert(anon_object != VM_OBJECT_NULL);
device_pager = device_pager_setup(NULL, 0, size, 0);
assert(device_pager != NULL);
device_object = memory_object_to_vm_object(device_pager);
assert(device_object != VM_OBJECT_NULL);
anon_mapping = 0;
kr = vm_map_enter(kernel_map, &anon_mapping, size, 0,
VM_FLAGS_ANYWHERE, VM_MAP_KERNEL_FLAGS_NONE, VM_KERN_MEMORY_NONE,
anon_object, 0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL,
VM_INHERIT_DEFAULT);
assert(kr == KERN_SUCCESS);
device_mapping = 0;
kr = vm_map_enter_mem_object(kernel_map, &device_mapping, size, 0,
VM_FLAGS_ANYWHERE,
VM_MAP_KERNEL_FLAGS_NONE,
VM_KERN_MEMORY_NONE,
(void *)device_pager, 0, FALSE,
VM_PROT_DEFAULT, VM_PROT_ALL,
VM_INHERIT_DEFAULT);
assert(kr == KERN_SUCCESS);
memory_object_deallocate(device_pager);
vm_object_lock(anon_object);
vm_object_activity_begin(anon_object);
anon_object->blocked_access = TRUE;
vm_object_unlock(anon_object);
vm_object_lock(device_object);
vm_object_activity_begin(device_object);
device_object->blocked_access = TRUE;
vm_object_unlock(device_object);
assert(anon_object->ref_count == 1);
assert(!anon_object->named);
assert(device_object->ref_count == 2);
assert(device_object->named);
kr = vm_object_transpose(device_object, anon_object, size);
assert(kr == KERN_SUCCESS);
vm_object_lock(anon_object);
vm_object_activity_end(anon_object);
anon_object->blocked_access = FALSE;
vm_object_unlock(anon_object);
vm_object_lock(device_object);
vm_object_activity_end(device_object);
device_object->blocked_access = FALSE;
vm_object_unlock(device_object);
assert(anon_object->ref_count == 2);
assert(anon_object->named);
kr = vm_deallocate(kernel_map, anon_mapping, size);
assert(kr == KERN_SUCCESS);
assert(device_object->ref_count == 1);
assert(!device_object->named);
kr = vm_deallocate(kernel_map, device_mapping, size);
assert(kr == KERN_SUCCESS);
printf("VM_TEST_DEVICE_PAGER_TRANSPOSE: PASS\n");
}
#else
#define vm_test_device_pager_transpose()
#endif
void
vm_tests(void)
{
vm_test_collapse_compressor();
vm_test_wire_and_extract();
vm_test_page_wire_overflow_panic();
vm_test_kernel_object_fault();
vm_test_device_pager_transpose();
}