#ifndef _VM_VM_OBJECT_H_
#define _VM_VM_OBJECT_H_
#include <debug.h>
#include <mach_assert.h>
#include <mach_pagemap.h>
#include <task_swapper.h>
#include <mach/kern_return.h>
#include <mach/boolean.h>
#include <mach/memory_object_types.h>
#include <mach/port.h>
#include <mach/vm_prot.h>
#include <mach/vm_param.h>
#include <mach/machine/vm_types.h>
#include <kern/queue.h>
#include <kern/lock.h>
#include <kern/locks.h>
#include <kern/assert.h>
#include <kern/misc_protos.h>
#include <kern/macro_help.h>
#include <ipc/ipc_types.h>
#include <vm/pmap.h>
#if MACH_PAGEMAP
#include <vm/vm_external.h>
#endif
#include <vm/vm_options.h>
struct vm_page;
struct vm_object_fault_info {
int interruptible;
uint32_t user_tag;
vm_size_t cluster_size;
vm_behavior_t behavior;
vm_map_offset_t lo_offset;
vm_map_offset_t hi_offset;
unsigned int
no_cache:1,
stealth:1,
io_sync:1,
cs_bypass:1,
mark_zf_absent:1,
batch_pmap_op:1,
__vm_object_fault_info_unused_bits:26;
};
#define vo_size vo_un1.vou_size
#define vo_cache_pages_to_scan vo_un1.vou_cache_pages_to_scan
#define vo_shadow_offset vo_un2.vou_shadow_offset
#define vo_cache_ts vo_un2.vou_cache_ts
struct vm_object {
queue_head_t memq;
lck_rw_t Lock;
union {
vm_object_size_t vou_size;
int vou_cache_pages_to_scan;
} vo_un1;
struct vm_page *memq_hint;
int ref_count;
#if TASK_SWAPPER
int res_count;
#endif
unsigned int resident_page_count;
unsigned int wired_page_count;
unsigned int reusable_page_count;
struct vm_object *copy;
struct vm_object *shadow;
union {
vm_object_offset_t vou_shadow_offset;
clock_sec_t vou_cache_ts;
} vo_un2;
memory_object_t pager;
vm_object_offset_t paging_offset;
memory_object_control_t pager_control;
memory_object_copy_strategy_t
copy_strategy;
short paging_in_progress;
short activity_in_progress;
unsigned int
all_wanted:11,
pager_created:1,
pager_initialized:1,
pager_ready:1,
pager_trusted:1,
can_persist:1,
internal:1,
temporary:1,
private:1,
pageout:1,
alive:1,
purgable:2,
shadowed:1,
silent_overwrite:1,
advisory_pageout:1,
true_share:1,
terminating:1,
named:1,
shadow_severed:1,
phys_contiguous:1,
nophyscache:1;
queue_chain_t cached_list;
queue_head_t msr_q;
vm_object_offset_t last_alloc;
int sequential;
uint32_t pages_created;
uint32_t pages_used;
#if MACH_PAGEMAP
vm_external_map_t existence_map;
#endif
vm_offset_t cow_hint;
#if MACH_ASSERT
struct vm_object *paging_object;
#endif
unsigned int
wimg_bits:8,
code_signed:1,
hashed:1,
transposed:1,
mapping_in_progress:1,
volatile_empty:1,
volatile_fault:1,
all_reusable:1,
blocked_access:1,
set_cache_attr:1,
__object2_unused_bits:15;
uint32_t scan_collisions;
#if UPL_DEBUG
queue_head_t uplq;
#endif
#ifdef VM_PIP_DEBUG
#define VM_PIP_DEBUG_STACK_FRAMES 25
#define VM_PIP_DEBUG_MAX_REFS 10
struct __pip_backtrace {
void *pip_retaddr[VM_PIP_DEBUG_STACK_FRAMES];
} pip_holders[VM_PIP_DEBUG_MAX_REFS];
#endif
queue_chain_t objq;
};
#define VM_OBJECT_PURGEABLE_FAULT_ERROR(object) \
((object)->volatile_fault && \
((object)->purgable == VM_PURGABLE_VOLATILE || \
(object)->purgable == VM_PURGABLE_EMPTY))
#define VM_PAGE_REMOVE(page) \
MACRO_BEGIN \
vm_page_t __page = (page); \
vm_object_t __object = __page->object; \
if (__page == __object->memq_hint) { \
vm_page_t __new_hint; \
queue_entry_t __qe; \
__qe = queue_next(&__page->listq); \
if (queue_end(&__object->memq, __qe)) { \
__qe = queue_prev(&__page->listq); \
if (queue_end(&__object->memq, __qe)) { \
__qe = NULL; \
} \
} \
__new_hint = (vm_page_t) __qe; \
__object->memq_hint = __new_hint; \
} \
queue_remove(&__object->memq, __page, vm_page_t, listq); \
MACRO_END
#define VM_PAGE_INSERT(page, object) \
MACRO_BEGIN \
vm_page_t __page = (page); \
vm_object_t __object = (object); \
queue_enter(&__object->memq, __page, vm_page_t, listq); \
__object->memq_hint = __page; \
MACRO_END
__private_extern__
vm_object_t kernel_object;
__private_extern__
unsigned int vm_object_absent_max;
# define VM_MSYNC_INITIALIZED 0
# define VM_MSYNC_SYNCHRONIZING 1
# define VM_MSYNC_DONE 2
struct msync_req {
queue_chain_t msr_q;
queue_chain_t req_q;
unsigned int flag;
vm_object_offset_t offset;
vm_object_size_t length;
vm_object_t object;
decl_lck_mtx_data(, msync_req_lock)
};
typedef struct msync_req *msync_req_t;
#define MSYNC_REQ_NULL ((msync_req_t) 0)
extern lck_grp_t vm_map_lck_grp;
extern lck_attr_t vm_map_lck_attr;
#define msync_req_alloc(msr) \
MACRO_BEGIN \
(msr) = (msync_req_t)kalloc(sizeof(struct msync_req)); \
lck_mtx_init(&(msr)->msync_req_lock, &vm_map_lck_grp, &vm_map_lck_attr); \
msr->flag = VM_MSYNC_INITIALIZED; \
MACRO_END
#define msync_req_free(msr) \
(kfree((msr), sizeof(struct msync_req)))
#define msr_lock(msr) lck_mtx_lock(&(msr)->msync_req_lock)
#define msr_unlock(msr) lck_mtx_unlock(&(msr)->msync_req_lock)
__private_extern__ void vm_object_bootstrap(void) __attribute__((section("__TEXT, initcode")));
__private_extern__ void vm_object_init(void);
__private_extern__ void vm_object_init_lck_grp(void);
__private_extern__ void vm_object_reaper_init(void);
__private_extern__ vm_object_t vm_object_allocate(
vm_object_size_t size);
__private_extern__ void _vm_object_allocate(vm_object_size_t size,
vm_object_t object);
#if TASK_SWAPPER
__private_extern__ void vm_object_res_reference(
vm_object_t object);
__private_extern__ void vm_object_res_deallocate(
vm_object_t object);
#define VM_OBJ_RES_INCR(object) (object)->res_count++
#define VM_OBJ_RES_DECR(object) (object)->res_count--
#else
#define VM_OBJ_RES_INCR(object)
#define VM_OBJ_RES_DECR(object)
#define vm_object_res_reference(object)
#define vm_object_res_deallocate(object)
#endif
#define vm_object_reference_locked(object) \
MACRO_BEGIN \
vm_object_t RLObject = (object); \
vm_object_lock_assert_exclusive(object); \
assert((RLObject)->ref_count > 0); \
(RLObject)->ref_count++; \
assert((RLObject)->ref_count > 1); \
vm_object_res_reference(RLObject); \
MACRO_END
#define vm_object_reference_shared(object) \
MACRO_BEGIN \
vm_object_t RLObject = (object); \
vm_object_lock_assert_shared(object); \
assert((RLObject)->ref_count > 0); \
OSAddAtomic(1, &(RLObject)->ref_count); \
assert((RLObject)->ref_count > 0); \
\
vm_object_res_reference(RLObject); \
MACRO_END
__private_extern__ void vm_object_reference(
vm_object_t object);
#if !MACH_ASSERT
#define vm_object_reference(object) \
MACRO_BEGIN \
vm_object_t RObject = (object); \
if (RObject) { \
vm_object_lock_shared(RObject); \
vm_object_reference_shared(RObject); \
vm_object_unlock(RObject); \
} \
MACRO_END
#endif
__private_extern__ void vm_object_deallocate(
vm_object_t object);
__private_extern__ kern_return_t vm_object_release_name(
vm_object_t object,
int flags);
__private_extern__ void vm_object_pmap_protect(
vm_object_t object,
vm_object_offset_t offset,
vm_object_size_t size,
pmap_t pmap,
vm_map_offset_t pmap_start,
vm_prot_t prot);
__private_extern__ void vm_object_page_remove(
vm_object_t object,
vm_object_offset_t start,
vm_object_offset_t end);
__private_extern__ void vm_object_deactivate_pages(
vm_object_t object,
vm_object_offset_t offset,
vm_object_size_t size,
boolean_t kill_page,
boolean_t reusable_page);
__private_extern__ void vm_object_reuse_pages(
vm_object_t object,
vm_object_offset_t start_offset,
vm_object_offset_t end_offset,
boolean_t allow_partial_reuse);
__private_extern__ void vm_object_purge(
vm_object_t object);
__private_extern__ kern_return_t vm_object_purgable_control(
vm_object_t object,
vm_purgable_t control,
int *state);
__private_extern__ boolean_t vm_object_coalesce(
vm_object_t prev_object,
vm_object_t next_object,
vm_object_offset_t prev_offset,
vm_object_offset_t next_offset,
vm_object_size_t prev_size,
vm_object_size_t next_size);
__private_extern__ boolean_t vm_object_shadow(
vm_object_t *object,
vm_object_offset_t *offset,
vm_object_size_t length);
__private_extern__ void vm_object_collapse(
vm_object_t object,
vm_object_offset_t offset,
boolean_t can_bypass);
__private_extern__ boolean_t vm_object_copy_quickly(
vm_object_t *_object,
vm_object_offset_t src_offset,
vm_object_size_t size,
boolean_t *_src_needs_copy,
boolean_t *_dst_needs_copy);
__private_extern__ kern_return_t vm_object_copy_strategically(
vm_object_t src_object,
vm_object_offset_t src_offset,
vm_object_size_t size,
vm_object_t *dst_object,
vm_object_offset_t *dst_offset,
boolean_t *dst_needs_copy);
__private_extern__ kern_return_t vm_object_copy_slowly(
vm_object_t src_object,
vm_object_offset_t src_offset,
vm_object_size_t size,
boolean_t interruptible,
vm_object_t *_result_object);
__private_extern__ vm_object_t vm_object_copy_delayed(
vm_object_t src_object,
vm_object_offset_t src_offset,
vm_object_size_t size,
boolean_t src_object_shared);
__private_extern__ kern_return_t vm_object_destroy(
vm_object_t object,
kern_return_t reason);
__private_extern__ void vm_object_pager_create(
vm_object_t object);
__private_extern__ void vm_object_page_map(
vm_object_t object,
vm_object_offset_t offset,
vm_object_size_t size,
vm_object_offset_t (*map_fn)
(void *, vm_object_offset_t),
void *map_fn_data);
__private_extern__ kern_return_t vm_object_upl_request(
vm_object_t object,
vm_object_offset_t offset,
upl_size_t size,
upl_t *upl,
upl_page_info_t *page_info,
unsigned int *count,
int flags);
__private_extern__ kern_return_t vm_object_transpose(
vm_object_t object1,
vm_object_t object2,
vm_object_size_t transpose_size);
__private_extern__ boolean_t vm_object_sync(
vm_object_t object,
vm_object_offset_t offset,
vm_object_size_t size,
boolean_t should_flush,
boolean_t should_return,
boolean_t should_iosync);
__private_extern__ kern_return_t vm_object_update(
vm_object_t object,
vm_object_offset_t offset,
vm_object_size_t size,
vm_object_offset_t *error_offset,
int *io_errno,
memory_object_return_t should_return,
int flags,
vm_prot_t prot);
__private_extern__ kern_return_t vm_object_lock_request(
vm_object_t object,
vm_object_offset_t offset,
vm_object_size_t size,
memory_object_return_t should_return,
int flags,
vm_prot_t prot);
__private_extern__ vm_object_t vm_object_enter(
memory_object_t pager,
vm_object_size_t size,
boolean_t internal,
boolean_t init,
boolean_t check_named);
__private_extern__ void vm_object_cluster_size(
vm_object_t object,
vm_object_offset_t *start,
vm_size_t *length,
vm_object_fault_info_t fault_info,
uint32_t *io_streaming);
__private_extern__ kern_return_t vm_object_populate_with_private(
vm_object_t object,
vm_object_offset_t offset,
ppnum_t phys_page,
vm_size_t size);
__private_extern__ void vm_object_change_wimg_mode(
vm_object_t object,
unsigned int wimg_mode);
extern kern_return_t adjust_vm_object_cache(
vm_size_t oval,
vm_size_t nval);
extern kern_return_t vm_object_page_op(
vm_object_t object,
vm_object_offset_t offset,
int ops,
ppnum_t *phys_entry,
int *flags);
extern kern_return_t vm_object_range_op(
vm_object_t object,
vm_object_offset_t offset_beg,
vm_object_offset_t offset_end,
int ops,
uint32_t *range);
__private_extern__ void vm_object_reap_pages(
vm_object_t object,
int reap_type);
#define REAP_REAP 0
#define REAP_TERMINATE 1
#define REAP_PURGEABLE 2
#define REAP_DATA_FLUSH 3
#if CONFIG_FREEZE
struct default_freezer_handle;
__private_extern__ kern_return_t
vm_object_pack(
unsigned int *purgeable_count,
unsigned int *wired_count,
unsigned int *clean_count,
unsigned int *dirty_count,
unsigned int dirty_budget,
boolean_t *shared,
vm_object_t src_object,
struct default_freezer_handle *df_handle);
__private_extern__ void
vm_object_pack_pages(
unsigned int *wired_count,
unsigned int *clean_count,
unsigned int *dirty_count,
unsigned int dirty_budget,
vm_object_t src_object,
struct default_freezer_handle *df_handle);
__private_extern__ void
vm_object_pageout(
vm_object_t object);
__private_extern__ kern_return_t
vm_object_pagein(
vm_object_t object);
#endif
#define VM_OBJECT_EVENT_INITIALIZED 0
#define VM_OBJECT_EVENT_PAGER_READY 1
#define VM_OBJECT_EVENT_PAGING_IN_PROGRESS 2
#define VM_OBJECT_EVENT_MAPPING_IN_PROGRESS 3
#define VM_OBJECT_EVENT_LOCK_IN_PROGRESS 4
#define VM_OBJECT_EVENT_UNCACHING 5
#define VM_OBJECT_EVENT_COPY_CALL 6
#define VM_OBJECT_EVENT_CACHING 7
#define VM_OBJECT_EVENT_UNBLOCKED 8
#define VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS 9
#define vm_object_assert_wait(object, event, interruptible) \
(((object)->all_wanted |= 1 << (event)), \
assert_wait((event_t)((vm_offset_t)(object)+(event)),(interruptible)))
#define vm_object_wait(object, event, interruptible) \
(vm_object_assert_wait((object),(event),(interruptible)), \
vm_object_unlock(object), \
thread_block(THREAD_CONTINUE_NULL)) \
#define thread_sleep_vm_object(object, event, interruptible) \
lck_rw_sleep(&(object)->Lock, LCK_SLEEP_DEFAULT, (event_t)(event), (interruptible))
#define vm_object_sleep(object, event, interruptible) \
(((object)->all_wanted |= 1 << (event)), \
thread_sleep_vm_object((object), \
((vm_offset_t)(object)+(event)), (interruptible)))
#define vm_object_wakeup(object, event) \
MACRO_BEGIN \
if ((object)->all_wanted & (1 << (event))) \
thread_wakeup((event_t)((vm_offset_t)(object) + (event))); \
(object)->all_wanted &= ~(1 << (event)); \
MACRO_END
#define vm_object_set_wanted(object, event) \
MACRO_BEGIN \
((object)->all_wanted |= (1 << (event))); \
MACRO_END
#define vm_object_wanted(object, event) \
((object)->all_wanted & (1 << (event)))
#ifdef VM_PIP_DEBUG
#include <libkern/OSDebug.h>
#define VM_PIP_DEBUG_BEGIN(object) \
MACRO_BEGIN \
int pip = ((object)->paging_in_progress + \
(object)->activity_in_progress); \
if (pip < VM_PIP_DEBUG_MAX_REFS) { \
(void) OSBacktrace(&(object)->pip_holders[pip].pip_retaddr[0], \
VM_PIP_DEBUG_STACK_FRAMES); \
} \
MACRO_END
#else
#define VM_PIP_DEBUG_BEGIN(object)
#endif
#define vm_object_activity_begin(object) \
MACRO_BEGIN \
vm_object_lock_assert_exclusive((object)); \
assert((object)->paging_in_progress >= 0); \
VM_PIP_DEBUG_BEGIN((object)); \
(object)->activity_in_progress++; \
MACRO_END
#define vm_object_activity_end(object) \
MACRO_BEGIN \
vm_object_lock_assert_exclusive((object)); \
assert((object)->activity_in_progress > 0); \
(object)->activity_in_progress--; \
if ((object)->paging_in_progress == 0 && \
(object)->activity_in_progress == 0) \
vm_object_wakeup((object), \
VM_OBJECT_EVENT_PAGING_IN_PROGRESS); \
MACRO_END
#define vm_object_paging_begin(object) \
MACRO_BEGIN \
vm_object_lock_assert_exclusive((object)); \
assert((object)->paging_in_progress >= 0); \
VM_PIP_DEBUG_BEGIN((object)); \
(object)->paging_in_progress++; \
MACRO_END
#define vm_object_paging_end(object) \
MACRO_BEGIN \
vm_object_lock_assert_exclusive((object)); \
assert((object)->paging_in_progress > 0); \
(object)->paging_in_progress--; \
if ((object)->paging_in_progress == 0) { \
vm_object_wakeup((object), \
VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS); \
if ((object)->activity_in_progress == 0) \
vm_object_wakeup((object), \
VM_OBJECT_EVENT_PAGING_IN_PROGRESS); \
} \
MACRO_END
#define vm_object_paging_wait(object, interruptible) \
MACRO_BEGIN \
vm_object_lock_assert_exclusive((object)); \
while ((object)->paging_in_progress != 0 || \
(object)->activity_in_progress != 0) { \
wait_result_t _wr; \
\
_wr = vm_object_sleep((object), \
VM_OBJECT_EVENT_PAGING_IN_PROGRESS, \
(interruptible)); \
\
\
\
} \
MACRO_END
#define vm_object_paging_only_wait(object, interruptible) \
MACRO_BEGIN \
vm_object_lock_assert_exclusive((object)); \
while ((object)->paging_in_progress != 0) { \
wait_result_t _wr; \
\
_wr = vm_object_sleep((object), \
VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS,\
(interruptible)); \
\
\
\
} \
MACRO_END
#define vm_object_mapping_begin(object) \
MACRO_BEGIN \
vm_object_lock_assert_exclusive((object)); \
assert(! (object)->mapping_in_progress); \
(object)->mapping_in_progress = TRUE; \
MACRO_END
#define vm_object_mapping_end(object) \
MACRO_BEGIN \
vm_object_lock_assert_exclusive((object)); \
assert((object)->mapping_in_progress); \
(object)->mapping_in_progress = FALSE; \
vm_object_wakeup((object), \
VM_OBJECT_EVENT_MAPPING_IN_PROGRESS); \
MACRO_END
#define vm_object_mapping_wait(object, interruptible) \
MACRO_BEGIN \
vm_object_lock_assert_exclusive((object)); \
while ((object)->mapping_in_progress) { \
wait_result_t _wr; \
\
_wr = vm_object_sleep((object), \
VM_OBJECT_EVENT_MAPPING_IN_PROGRESS, \
(interruptible)); \
\
\
} \
assert(!(object)->mapping_in_progress); \
MACRO_END
#define OBJECT_LOCK_SHARED 0
#define OBJECT_LOCK_EXCLUSIVE 1
extern lck_grp_t vm_object_lck_grp;
extern lck_grp_attr_t vm_object_lck_grp_attr;
extern lck_attr_t vm_object_lck_attr;
extern lck_attr_t kernel_object_lck_attr;
extern vm_object_t vm_pageout_scan_wants_object;
extern void vm_object_lock(vm_object_t);
extern boolean_t vm_object_lock_try(vm_object_t);
extern boolean_t _vm_object_lock_try(vm_object_t);
extern boolean_t vm_object_lock_avoid(vm_object_t);
extern void vm_object_lock_shared(vm_object_t);
extern boolean_t vm_object_lock_try_shared(vm_object_t);
#define vm_object_lock_init(object) \
lck_rw_init(&(object)->Lock, &vm_object_lck_grp, \
(((object) == kernel_object || \
(object) == vm_submap_object) ? \
&kernel_object_lck_attr : \
&vm_object_lck_attr))
#define vm_object_lock_destroy(object) lck_rw_destroy(&(object)->Lock, &vm_object_lck_grp)
#define vm_object_unlock(object) lck_rw_done(&(object)->Lock)
#define vm_object_lock_upgrade(object) lck_rw_lock_shared_to_exclusive(&(object)->Lock)
#define vm_object_lock_try_scan(object) _vm_object_lock_try(object)
#if MACH_ASSERT || DEBUG
#define vm_object_lock_assert_held(object) \
lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_HELD)
#define vm_object_lock_assert_shared(object) \
lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_SHARED)
#define vm_object_lock_assert_exclusive(object) \
lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_EXCLUSIVE)
#else
#define vm_object_lock_assert_held(object)
#define vm_object_lock_assert_shared(object)
#define vm_object_lock_assert_exclusive(object)
#endif
#define vm_object_round_page(x) (((vm_object_offset_t)(x) + PAGE_MASK) & ~((signed)PAGE_MASK))
#define vm_object_trunc_page(x) ((vm_object_offset_t)(x) & ~((signed)PAGE_MASK))
extern void vm_object_cache_add(vm_object_t);
extern void vm_object_cache_remove(vm_object_t);
extern int vm_object_cache_evict(int, int);
#endif