default_pager_internal.h [plain text]
#ifndef _DEFAULT_PAGER_INTERNAL_H_
#define _DEFAULT_PAGER_INTERNAL_H_
#include <default_pager/diag.h>
#include <default_pager/default_pager_types.h>
#include <mach/mach_types.h>
#include <ipc/ipc_port.h>
#include <ipc/ipc_types.h>
#include <ipc/ipc_space.h>
#include <kern/lock.h>
#include <kern/kalloc.h>
#include <kern/thread.h>
#include <vm/vm_kern.h>
#include <device/device_types.h>
#ifndef PARALLEL
#define PARALLEL 1
#endif
#ifndef CHECKSUM
#define CHECKSUM 0
#endif
#define MACH_PORT_FACE mach_port_t
#if 0
#ifndef USE_PRECIOUS
#define USE_PRECIOUS TRUE
#endif
#endif
#ifdef USER_PAGER
#define UP(stuff) stuff
#else
#define UP(stuff)
#endif
#ifndef MACH_KERNEL
extern struct mutex dprintf_lock;
#define PRINTF_LOCK_INIT() mutex_init(&dprintf_lock)
#define PRINTF_LOCK() mutex_lock(&dprintf_lock)
#define PRINTF_UNLOCK() mutex_unlock(&dprintf_lock)
#endif
#ifndef MACH_KERNEL
#define dprintf(args) \
do { \
PRINTF_LOCK(); \
printf("%s[%d]: ", my_name, dp_thread_id()); \
printf args; \
PRINTF_UNLOCK(); \
} while (0)
#else
#define dprintf(args) \
do { \
printf("%s[KERNEL]: ", my_name); \
printf args; \
} while (0)
#endif
__private_extern__ char my_name[];
#define DEFAULT_PAGER_DEBUG 0
#if DEFAULT_PAGER_DEBUG
extern int debug_mask;
#define DEBUG_MSG_EXTERNAL 0x00000001
#define DEBUG_MSG_INTERNAL 0x00000002
#define DEBUG_MO_EXTERNAL 0x00000100
#define DEBUG_MO_INTERNAL 0x00000200
#define DEBUG_VS_EXTERNAL 0x00010000
#define DEBUG_VS_INTERNAL 0x00020000
#define DEBUG_BS_EXTERNAL 0x01000000
#define DEBUG_BS_INTERNAL 0x02000000
#define DP_DEBUG(level, args) \
do { \
if (debug_mask & (level)) \
dprintf(args); \
} while (0)
#define ASSERT(expr) \
do { \
if (!(expr)) \
#ifndef MACH_KERNEL
panic("%s[%d]%s: assertion failed in %s line %d: %s",\
my_name, dp_thread_id(), here, \
__FILE__, __LINE__, # expr); \
#else
panic("%s[KERNEL]: assertion failed in %s line %d: %s",\
my_name, __FILE__, __LINE__, # expr); \
#endif
} while (0)
#else
#define DP_DEBUG(level, args) do {} while(0)
#define ASSERT(clause) do {} while(0)
#endif
#ifndef MACH_KERNEL
extern char *mach_error_string(kern_return_t);
#endif
#define PAGER_SUCCESS 0
#define PAGER_FULL 1
#define PAGER_ERROR 2
#ifdef MACH_KERNEL
#define vm_page_size page_size
#else
extern vm_object_size_t vm_page_size;
#endif
extern unsigned long long vm_page_mask;
extern int vm_page_shift;
#ifndef MACH_KERNEL
#define ptoa(p) ((p)*vm_page_size)
#define atop(a) ((a)/vm_page_size)
#endif
#define howmany(a,b) (((a) + (b) - 1)/(b))
extern memory_object_default_t default_pager_object;
#ifdef MACH_KERNEL
extern mutex_t dpt_lock;
extern int default_pager_internal_count;
extern MACH_PORT_FACE default_pager_host_port;
extern MACH_PORT_FACE default_pager_internal_set;
extern MACH_PORT_FACE default_pager_external_set;
extern MACH_PORT_FACE default_pager_default_set;
#else
extern mach_port_t default_pager_host_port;
extern task_port_t default_pager_self;
extern mach_port_t default_pager_internal_set;
extern mach_port_t default_pager_external_set;
extern mach_port_t default_pager_default_set;
#endif
typedef struct default_pager_thread {
#ifndef MACH_KERNEL
cthread_t dpt_thread;
#endif
vm_offset_t dpt_buffer;
boolean_t dpt_internal;
#ifndef MACH_KERNEL
int dpt_id;
#else
int checked_out;
#endif
boolean_t dpt_initialized_p;
} default_pager_thread_t;
#ifdef MACH_KERNEL
extern default_pager_thread_t **dpt_array;
#endif
struct {
unsigned int gs_pageout_calls;
unsigned int gs_pagein_calls;
unsigned int gs_pages_in;
unsigned int gs_pages_out;
unsigned int gs_pages_unavail;
unsigned int gs_pages_init;
unsigned int gs_pages_init_writes;
VSTATS_LOCK_DECL(gs_lock)
} global_stats;
#define GSTAT(clause) VSTATS_ACTION(&global_stats.gs_lock, (clause))
#define MAX_CLUSTER_SIZE 8
#define MAX_CLUSTER_SHIFT 3
#define NO_CLSIZE 0
#define NBBY 8
#define BYTEMASK 0xff
#define setbit(a,i) (*(((char *)(a)) + ((i)/NBBY)) |= 1<<((i)%NBBY))
#define clrbit(a,i) (*(((char *)(a)) + ((i)/NBBY)) &= ~(1<<((i)%NBBY)))
#define isset(a,i) (*(((char *)(a)) + ((i)/NBBY)) & (1<<((i)%NBBY)))
#define isclr(a,i) ((*(((char *)(a)) + ((i)/NBBY)) & (1<<((i)%NBBY))) == 0)
#define BS_MAXPRI 4
#define BS_MINPRI 0
#define BS_NOPRI -1
#define BS_FULLPRI -2
struct backing_store {
queue_chain_t bs_links;
#ifdef MACH_KERNEL
mutex_t bs_lock;
#else
struct mutex bs_lock;
#endif
MACH_PORT_FACE bs_port;
int bs_priority;
int bs_clsize;
unsigned int bs_pages_free;
unsigned int bs_pages_total;
unsigned int bs_pages_in;
unsigned int bs_pages_in_fail;
unsigned int bs_pages_out;
unsigned int bs_pages_out_fail;
};
typedef struct backing_store *backing_store_t;
#define BACKING_STORE_NULL ((backing_store_t) 0)
#define BS_STAT(bs, clause) VSTATS_ACTION(&(bs)->bs_lock, (clause))
#ifdef MACH_KERNEL
#define BS_LOCK_INIT(bs) mutex_init(&(bs)->bs_lock, 0)
#else
#define BS_LOCK_INIT(bs) mutex_init(&(bs)->bs_lock)
#endif
#define BS_LOCK(bs) mutex_lock(&(bs)->bs_lock)
#define BS_UNLOCK(bs) mutex_unlock(&(bs)->bs_lock)
struct backing_store_list_head {
queue_head_t bsl_queue;
#ifdef MACH_KERNEL
mutex_t bsl_lock;
#else
struct mutex bsl_lock;
#endif
};
extern struct backing_store_list_head backing_store_list;
extern int backing_store_release_trigger_disable;
#ifdef MACH_KERNEL
#define BSL_LOCK_INIT() mutex_init(&backing_store_list.bsl_lock, 0)
#else
#define BSL_LOCK_INIT() mutex_init(&backing_store_list.bsl_lock)
#endif
#define BSL_LOCK() mutex_lock(&backing_store_list.bsl_lock)
#define BSL_UNLOCK() mutex_unlock(&backing_store_list.bsl_lock)
struct paging_segment {
union {
MACH_PORT_FACE dev;
struct vnode *vnode;
} storage_type;
unsigned int ps_segtype;
MACH_PORT_FACE ps_device;
vm_offset_t ps_offset;
vm_offset_t ps_recnum;
unsigned int ps_pgnum;
unsigned int ps_record_shift;
unsigned int ps_clshift;
unsigned int ps_ncls;
unsigned int ps_clcount;
unsigned int ps_pgcount;
unsigned long ps_hint;
#ifdef MACH_KERNEL
mutex_t ps_lock;
#else
struct mutex ps_lock;
#endif
unsigned char *ps_bmap;
backing_store_t ps_bs;
boolean_t ps_going_away;
};
#define ps_vnode storage_type.vnode
#define ps_device storage_type.dev
#define PS_PARTITION 1
#define PS_FILE 2
typedef struct paging_segment *paging_segment_t;
#define PAGING_SEGMENT_NULL ((paging_segment_t) 0)
#ifdef MACH_KERNEL
#define PS_LOCK_INIT(ps) mutex_init(&(ps)->ps_lock, 0)
#else
#define PS_LOCK_INIT(ps) mutex_init(&(ps)->ps_lock)
#endif
#define PS_LOCK(ps) mutex_lock(&(ps)->ps_lock)
#define PS_UNLOCK(ps) mutex_unlock(&(ps)->ps_lock)
typedef unsigned int pseg_index_t;
#define INVALID_PSEG_INDEX ((pseg_index_t)-1)
#define NULL_PSEG_INDEX ((pseg_index_t) 0)
#define MAX_PSEG_INDEX 63
#define MAX_NUM_PAGING_SEGMENTS MAX_PSEG_INDEX
extern paging_segment_t paging_segments[MAX_NUM_PAGING_SEGMENTS];
#ifdef MACH_KERNEL
extern mutex_t paging_segments_lock;
#else
extern struct mutex paging_segments_lock;
#endif
extern int paging_segment_count;
extern int paging_segment_max;
extern int ps_select_array[DEFAULT_PAGER_BACKING_STORE_MAXPRI+1];
#ifdef MACH_KERNEL
#define PSL_LOCK_INIT() mutex_init(&paging_segments_lock, 0)
#else
#define PSL_LOCK_INIT() mutex_init(&paging_segments_lock)
#endif
#define PSL_LOCK() mutex_lock(&paging_segments_lock)
#define PSL_UNLOCK() mutex_unlock(&paging_segments_lock)
struct vs_map {
unsigned int vsmap_entry:23,
vsmap_psindex:8,
vsmap_error:1,
vsmap_bmap:16,
vsmap_alloc:16;
};
typedef struct vs_map *vs_map_t;
#define VSM_ENTRY_NULL 0x7fffff
#define VSCLSIZE(vs) (1UL << (vs)->vs_clshift)
#define VSM_ISCLR(vsm) (((vsm).vsmap_entry == VSM_ENTRY_NULL) && \
((vsm).vsmap_error == 0))
#define VSM_ISERR(vsm) ((vsm).vsmap_error)
#define VSM_SETCLOFF(vsm, val) ((vsm).vsmap_entry = (val))
#define VSM_SETERR(vsm, err) ((vsm).vsmap_error = 1, \
(vsm).vsmap_entry = (err))
#define VSM_GETERR(vsm) ((vsm).vsmap_entry)
#define VSM_SETPG(vsm, page) ((vsm).vsmap_bmap |= (1 << (page)))
#define VSM_CLRPG(vsm, page) ((vsm).vsmap_bmap &= ~(1 << (page)))
#define VSM_SETPS(vsm, psindx) ((vsm).vsmap_psindex = (psindx))
#define VSM_PSINDEX(vsm) ((vsm).vsmap_psindex)
#define VSM_PS(vsm) paging_segments[(vsm).vsmap_psindex]
#define VSM_BMAP(vsm) ((vsm).vsmap_bmap)
#define VSM_CLOFF(vsm) ((vsm).vsmap_entry)
#define VSM_CLR(vsm) ((vsm).vsmap_entry = VSM_ENTRY_NULL, \
(vsm).vsmap_psindex = 0, \
(vsm).vsmap_error = 0, \
(vsm).vsmap_bmap = 0, \
(vsm).vsmap_alloc = 0)
#define VSM_ALLOC(vsm) ((vsm).vsmap_alloc)
#define VSM_SETALLOC(vsm, page) ((vsm).vsmap_alloc |= (1 << (page)))
#define VSM_CLRALLOC(vsm, page) ((vsm).vsmap_alloc &= ~(1 << (page)))
#define CLMAP_THRESHOLD 512
#define CLMAP_ENTRIES (CLMAP_THRESHOLD/sizeof(struct vs_map))
#define CLMAP_SIZE(ncls) (ncls*sizeof(struct vs_map))
#define INDIRECT_CLMAP_ENTRIES(ncls) (((ncls-1)/CLMAP_ENTRIES) + 1)
#define INDIRECT_CLMAP_SIZE(ncls) (INDIRECT_CLMAP_ENTRIES(ncls) * sizeof(struct vs_map *))
#define INDIRECT_CLMAP(size) (CLMAP_SIZE(size) > CLMAP_THRESHOLD)
#define RMAPSIZE(blocks) (howmany(blocks,NBBY))
#define CL_FIND 1
#define CL_ALLOC 2
struct clbmap {
unsigned int clb_map;
};
struct clmap {
paging_segment_t cl_ps;
int cl_numpages;
struct clbmap cl_bmap;
int cl_error;
struct clbmap cl_alloc;
};
#define CLMAP_ERROR(clm) (clm).cl_error
#define CLMAP_PS(clm) (clm).cl_ps
#define CLMAP_NPGS(clm) (clm).cl_numpages
#define CLMAP_ISSET(clm,i) ((1<<(i))&((clm).cl_bmap.clb_map))
#define CLMAP_ALLOC(clm) (clm).cl_alloc.clb_map
#define CLMAP_SHIFT(clm,vs) \
(clm)->cl_bmap.clb_map >>= (VSCLSIZE(vs) - (clm)->cl_numpages)
#define CLMAP_SHIFTALLOC(clm,vs) \
(clm)->cl_alloc.clb_map >>= (VSCLSIZE(vs) - (clm)->cl_numpages)
typedef struct vstruct_alias {
memory_object_pager_ops_t name;
struct vstruct *vs;
} vstruct_alias_t;
#ifdef MACH_KERNEL
#define DPT_LOCK_INIT(lock) mutex_init(&(lock), 0)
#define DPT_LOCK(lock) mutex_lock(&(lock))
#define DPT_UNLOCK(lock) mutex_unlock(&(lock))
#define DPT_SLEEP(lock, e, i) thread_sleep_mutex(&(lock), (event_t)(e), i)
#define VS_LOCK_TYPE hw_lock_data_t
#define VS_LOCK_INIT(vs) hw_lock_init(&(vs)->vs_lock)
#define VS_TRY_LOCK(vs) (VS_LOCK(vs),TRUE)
#define VS_LOCK(vs) hw_lock_lock(&(vs)->vs_lock)
#define VS_UNLOCK(vs) hw_lock_unlock(&(vs)->vs_lock)
#define VS_MAP_LOCK_TYPE mutex_t
#define VS_MAP_LOCK_INIT(vs) mutex_init(&(vs)->vs_map_lock, 0)
#define VS_MAP_LOCK(vs) mutex_lock(&(vs)->vs_map_lock)
#define VS_MAP_TRY_LOCK(vs) mutex_try(&(vs)->vs_map_lock)
#define VS_MAP_UNLOCK(vs) mutex_unlock(&(vs)->vs_map_lock)
#else
#define VS_LOCK_TYPE struct mutex
#define VS_LOCK_INIT(vs) mutex_init(&(vs)->vs_lock, 0)
#define VS_TRY_LOCK(vs) mutex_try(&(vs)->vs_lock)
#define VS_LOCK(vs) mutex_lock(&(vs)->vs_lock)
#define VS_UNLOCK(vs) mutex_unlock(&(vs)->vs_lock)
#define VS_MAP_LOCK_TYPE struct mutex
#define VS_MAP_LOCK_INIT(vs) mutex_init(&(vs)->vs_map_lock)
#define VS_MAP_LOCK(vs) mutex_lock(&(vs)->vs_map_lock)
#define VS_MAP_TRY_LOCK(vs) mutex_try(&(vs)->vs_map_lock)
#define VS_MAP_UNLOCK(vs) mutex_unlock(&(vs)->vs_map_lock)
#endif
typedef struct vstruct {
memory_object_pager_ops_t vs_pager_ops;
int vs_mem_obj_ikot;
memory_object_control_t vs_control;
VS_LOCK_TYPE vs_lock;
unsigned int vs_next_seqno;
unsigned int vs_seqno;
unsigned int vs_readers;
unsigned int vs_writers;
#ifdef MACH_KERNEL
unsigned int
vs_waiting_seqno:1,
vs_waiting_read:1,
vs_waiting_write:1,
vs_waiting_async:1,
vs_indirect:1,
vs_xfer_pending:1;
#else
event_t vs_waiting_seqno;
event_t vs_waiting_read;
event_t vs_waiting_write;
event_t vs_waiting_async;
int vs_indirect:1,
vs_xfer_pending:1;
#endif
unsigned int vs_async_pending;
unsigned int vs_errors;
unsigned int vs_references;
queue_chain_t vs_links;
unsigned int vs_clshift;
unsigned int vs_size;
#ifdef MACH_KERNEL
mutex_t vs_map_lock;
#else
struct mutex vs_map_lock;
#endif
union {
struct vs_map *vsu_dmap;
struct vs_map **vsu_imap;
} vs_un;
} *vstruct_t;
#define vs_dmap vs_un.vsu_dmap
#define vs_imap vs_un.vsu_imap
#define VSTRUCT_NULL ((vstruct_t) 0)
__private_extern__ void vs_async_wait(vstruct_t);
#if PARALLEL
__private_extern__ void vs_lock(vstruct_t);
__private_extern__ void vs_unlock(vstruct_t);
__private_extern__ void vs_start_read(vstruct_t);
__private_extern__ void vs_finish_read(vstruct_t);
__private_extern__ void vs_wait_for_readers(vstruct_t);
__private_extern__ void vs_start_write(vstruct_t);
__private_extern__ void vs_finish_write(vstruct_t);
__private_extern__ void vs_wait_for_writers(vstruct_t);
__private_extern__ void vs_wait_for_sync_writers(vstruct_t);
#else
#define vs_lock(vs)
#define vs_unlock(vs)
#define vs_start_read(vs)
#define vs_wait_for_readers(vs)
#define vs_finish_read(vs)
#define vs_start_write(vs)
#define vs_wait_for_writers(vs)
#define vs_wait_for_sync_writers(vs)
#define vs_finish_write(vs)
#endif
struct vs_async {
struct vs_async *vsa_next;
vstruct_t vsa_vs;
vm_offset_t vsa_addr;
vm_offset_t vsa_offset;
vm_size_t vsa_size;
paging_segment_t vsa_ps;
int vsa_flags;
int vsa_error;
mutex_t vsa_lock;
MACH_PORT_FACE reply_port;
};
#define VSA_READ 0x0001
#define VSA_WRITE 0x0002
#define VSA_TRANSFER 0x0004
struct vstruct_list_head {
queue_head_t vsl_queue;
#ifdef MACH_KERNEL
mutex_t vsl_lock;
#else
struct mutex vsl_lock;
#endif
int vsl_count;
};
__private_extern__ struct vstruct_list_head vstruct_list;
__private_extern__ void vstruct_list_insert(vstruct_t vs);
__private_extern__ void vstruct_list_delete(vstruct_t vs);
#ifdef MACH_KERNEL
#define VSL_LOCK_INIT() mutex_init(&vstruct_list.vsl_lock, 0)
#else
#define VSL_LOCK_INIT() mutex_init(&vstruct_list.vsl_lock)
#endif
#define VSL_LOCK() mutex_lock(&vstruct_list.vsl_lock)
#define VSL_LOCK_TRY() mutex_try(&vstruct_list.vsl_lock)
#define VSL_UNLOCK() mutex_unlock(&vstruct_list.vsl_lock)
#define VSL_SLEEP(e,i) thread_sleep_mutex((e), &vstruct_list.vsl_lock, (i))
#ifdef MACH_KERNEL
__private_extern__ zone_t vstruct_zone;
#endif
#ifdef MACH_KERNEL
extern const struct memory_object_pager_ops default_pager_ops;
#define mem_obj_is_vs(_mem_obj_) \
(((_mem_obj_) != NULL) && \
((_mem_obj_)->mo_pager_ops == &default_pager_ops))
#define mem_obj_to_vs(_mem_obj_) \
((vstruct_t)(_mem_obj_))
#define vs_to_mem_obj(_vs_) ((memory_object_t)(_vs_))
#define vs_lookup(_mem_obj_, _vs_) \
do { \
if (!mem_obj_is_vs(_mem_obj_)) \
panic("bad dp memory object"); \
_vs_ = mem_obj_to_vs(_mem_obj_); \
} while (0)
#define vs_lookup_safe(_mem_obj_, _vs_) \
do { \
if (!mem_obj_is_vs(_mem_obj_)) \
_vs_ = VSTRUCT_NULL; \
else \
_vs_ = mem_obj_to_vs(_mem_obj_); \
} while (0)
#else
#define vs_to_port(_vs_) (((vm_offset_t)(_vs_))+1)
#define port_to_vs(_port_) ((vstruct_t)(((vm_offset_t)(_port_))&~3))
#define port_is_vs(_port_) ((((vm_offset_t)(_port_))&3) == 1)
#define vs_lookup(_port_, _vs_) \
do { \
if (!MACH_PORT_VALID(_port_) || !port_is_vs(_port_) \
|| port_to_vs(_port_)->vs_mem_obj != (_port_)) \
Panic("bad pager port"); \
_vs_ = port_to_vs(_port_); \
} while (0)
#endif
#ifndef MACH_KERNEL
extern int dp_thread_id(void);
#endif
extern boolean_t device_reply_server(mach_msg_header_t *,
mach_msg_header_t *);
#ifdef MACH_KERNEL
extern boolean_t default_pager_no_senders(memory_object_t,
mach_port_mscount_t);
#else
extern void default_pager_no_senders(memory_object_t,
mach_port_seqno_t,
mach_port_mscount_t);
#endif
extern int local_log2(unsigned int);
extern void bs_initialize(void);
extern void bs_global_info(vm_size_t *,
vm_size_t *);
extern boolean_t bs_add_device(char *,
MACH_PORT_FACE);
extern vstruct_t ps_vstruct_create(vm_size_t);
extern void ps_vstruct_dealloc(vstruct_t);
extern kern_return_t pvs_cluster_read(vstruct_t,
vm_offset_t,
vm_size_t,
void *);
extern kern_return_t vs_cluster_write(vstruct_t,
upl_t,
upl_offset_t,
upl_size_t,
boolean_t,
int);
extern vm_offset_t ps_clmap(vstruct_t,
vm_offset_t,
struct clmap *,
int,
vm_size_t,
int);
extern vm_size_t ps_vstruct_allocated_size(vstruct_t);
extern size_t ps_vstruct_allocated_pages(vstruct_t,
default_pager_page_t *,
size_t);
extern boolean_t bs_set_default_clsize(unsigned int);
extern boolean_t verbose;
extern thread_call_t default_pager_backing_store_monitor_callout;
extern void default_pager_backing_store_monitor(thread_call_param_t, thread_call_param_t);
extern ipc_port_t max_pages_trigger_port;
extern unsigned int dp_pages_free;
extern unsigned int maximum_pages_free;
extern boolean_t dp_encryption_inited;
extern boolean_t dp_encryption;
#endif