dp_backing_store.c [plain text]
#include <mach/host_priv.h>
#include <mach/memory_object_control.h>
#include <mach/memory_object_server.h>
#include <mach/upl.h>
#include <default_pager/default_pager_internal.h>
#include <default_pager/default_pager_alerts.h>
#include <default_pager/default_pager_object_server.h>
#include <ipc/ipc_types.h>
#include <ipc/ipc_port.h>
#include <ipc/ipc_space.h>
#include <kern/kern_types.h>
#include <kern/host.h>
#include <kern/queue.h>
#include <kern/counters.h>
#include <kern/sched_prim.h>
#include <vm/vm_kern.h>
#include <vm/vm_pageout.h>
#include <vm/vm_map.h>
#include <vm/vm_object.h>
#include <vm/vm_protos.h>
#define ALLOC_STRIDE (1024 * 1024 * 1024)
int physical_transfer_cluster_count = 0;
#define VM_SUPER_CLUSTER 0x40000
#define VM_SUPER_PAGES 64
#define VSTRUCT_DEF_CLSHIFT 2
int vstruct_def_clshift = VSTRUCT_DEF_CLSHIFT;
int default_pager_clsize = 0;
unsigned int clustered_writes[VM_SUPER_PAGES+1];
unsigned int clustered_reads[VM_SUPER_PAGES+1];
#if 0
struct vs_async *vs_async_list;
int async_num_queued;
int async_requests_out;
#endif
#define VS_ASYNC_REUSE 1
struct vs_async *vs_async_free_list;
mutex_t default_pager_async_lock;
int vs_alloc_async_failed = 0;
int vs_alloc_async_count = 0;
struct vs_async *vs_alloc_async(void);
void vs_free_async(struct vs_async *vsa);
#define VS_ALLOC_ASYNC() vs_alloc_async()
#define VS_FREE_ASYNC(vsa) vs_free_async(vsa)
#define VS_ASYNC_LOCK() mutex_lock(&default_pager_async_lock)
#define VS_ASYNC_UNLOCK() mutex_unlock(&default_pager_async_lock)
#define VS_ASYNC_LOCK_INIT() mutex_init(&default_pager_async_lock, 0)
#define VS_ASYNC_LOCK_ADDR() (&default_pager_async_lock)
unsigned int minimum_pages_remaining = 0;
unsigned int maximum_pages_free = 0;
ipc_port_t min_pages_trigger_port = NULL;
ipc_port_t max_pages_trigger_port = NULL;
boolean_t bs_low = FALSE;
int backing_store_release_trigger_disable = 0;
boolean_t dp_encryption_inited = FALSE;
boolean_t dp_encryption = FALSE;
vm_size_t max_doubled_size = 4 * 1024 * 1024;
struct backing_store_list_head backing_store_list;
paging_segment_t paging_segments[MAX_NUM_PAGING_SEGMENTS];
mutex_t paging_segments_lock;
int paging_segment_max = 0;
int paging_segment_count = 0;
int ps_select_array[BS_MAXPRI+1] = { -1,-1,-1,-1,-1 };
unsigned int dp_pages_free = 0;
unsigned int cluster_transfer_minimum = 100;
kern_return_t ps_write_file(paging_segment_t, upl_t, upl_offset_t, vm_offset_t, unsigned int, int);
kern_return_t ps_read_file (paging_segment_t, upl_t, upl_offset_t, vm_offset_t, unsigned int, unsigned int *, int);
default_pager_thread_t *get_read_buffer( void );
kern_return_t ps_vstruct_transfer_from_segment(
vstruct_t vs,
paging_segment_t segment,
upl_t upl);
kern_return_t ps_read_device(paging_segment_t, vm_offset_t, vm_offset_t *, unsigned int, unsigned int *, int);
kern_return_t ps_write_device(paging_segment_t, vm_offset_t, vm_offset_t, unsigned int, struct vs_async *);
kern_return_t vs_cluster_transfer(
vstruct_t vs,
upl_offset_t offset,
upl_size_t cnt,
upl_t upl);
vs_map_t vs_get_map_entry(
vstruct_t vs,
vm_offset_t offset);
default_pager_thread_t *
get_read_buffer( void )
{
int i;
DPT_LOCK(dpt_lock);
while(TRUE) {
for (i=0; i<default_pager_internal_count; i++) {
if(dpt_array[i]->checked_out == FALSE) {
dpt_array[i]->checked_out = TRUE;
DPT_UNLOCK(dpt_lock);
return dpt_array[i];
}
}
DPT_SLEEP(dpt_lock, &dpt_array, THREAD_UNINT);
}
}
void
bs_initialize(void)
{
int i;
BSL_LOCK_INIT();
queue_init(&backing_store_list.bsl_queue);
PSL_LOCK_INIT();
VS_ASYNC_LOCK_INIT();
#if VS_ASYNC_REUSE
vs_async_free_list = NULL;
#endif
for (i = 0; i < VM_SUPER_PAGES + 1; i++) {
clustered_writes[i] = 0;
clustered_reads[i] = 0;
}
}
void bs_no_paging_space(boolean_t);
void
bs_no_paging_space(
boolean_t out_of_memory)
{
if (out_of_memory)
dprintf(("*** OUT OF MEMORY ***\n"));
panic("bs_no_paging_space: NOT ENOUGH PAGING SPACE");
}
void bs_more_space(int);
void bs_commit(int);
boolean_t user_warned = FALSE;
unsigned int clusters_committed = 0;
unsigned int clusters_available = 0;
unsigned int clusters_committed_peak = 0;
void
bs_more_space(
int nclusters)
{
BSL_LOCK();
clusters_available += nclusters;
if (clusters_available >= clusters_committed) {
if (verbose && user_warned) {
printf("%s%s - %d excess clusters now.\n",
my_name,
"paging space is OK now",
clusters_available - clusters_committed);
user_warned = FALSE;
clusters_committed_peak = 0;
}
} else {
if (verbose && user_warned) {
printf("%s%s - still short of %d clusters.\n",
my_name,
"WARNING: paging space over-committed",
clusters_committed - clusters_available);
clusters_committed_peak -= nclusters;
}
}
BSL_UNLOCK();
return;
}
void
bs_commit(
int nclusters)
{
BSL_LOCK();
clusters_committed += nclusters;
if (clusters_committed > clusters_available) {
if (verbose && !user_warned) {
user_warned = TRUE;
printf("%s%s - short of %d clusters.\n",
my_name,
"WARNING: paging space over-committed",
clusters_committed - clusters_available);
}
if (clusters_committed > clusters_committed_peak) {
clusters_committed_peak = clusters_committed;
}
} else {
if (verbose && user_warned) {
printf("%s%s - was short of up to %d clusters.\n",
my_name,
"paging space is OK now",
clusters_committed_peak - clusters_available);
user_warned = FALSE;
clusters_committed_peak = 0;
}
}
BSL_UNLOCK();
return;
}
int default_pager_info_verbose = 1;
void
bs_global_info(
vm_size_t *totalp,
vm_size_t *freep)
{
vm_size_t pages_total, pages_free;
paging_segment_t ps;
int i;
PSL_LOCK();
pages_total = pages_free = 0;
for (i = 0; i <= paging_segment_max; i++) {
ps = paging_segments[i];
if (ps == PAGING_SEGMENT_NULL)
continue;
pages_total += ps->ps_pgnum;
pages_free += ps->ps_clcount << ps->ps_clshift;
DP_DEBUG(DEBUG_BS_INTERNAL,
("segment #%d: %d total, %d free\n",
i, ps->ps_pgnum, ps->ps_clcount << ps->ps_clshift));
}
*totalp = pages_total;
*freep = pages_free;
if (verbose && user_warned && default_pager_info_verbose) {
if (clusters_available < clusters_committed) {
printf("%s %d clusters committed, %d available.\n",
my_name,
clusters_committed,
clusters_available);
}
}
PSL_UNLOCK();
}
backing_store_t backing_store_alloc(void);
backing_store_t
backing_store_alloc(void)
{
backing_store_t bs;
bs = (backing_store_t) kalloc(sizeof (struct backing_store));
if (bs == BACKING_STORE_NULL)
panic("backing_store_alloc: no memory");
BS_LOCK_INIT(bs);
bs->bs_port = MACH_PORT_NULL;
bs->bs_priority = 0;
bs->bs_clsize = 0;
bs->bs_pages_total = 0;
bs->bs_pages_in = 0;
bs->bs_pages_in_fail = 0;
bs->bs_pages_out = 0;
bs->bs_pages_out_fail = 0;
return bs;
}
backing_store_t backing_store_lookup(MACH_PORT_FACE);
backing_store_t
backing_store_lookup(
MACH_PORT_FACE port)
{
backing_store_t bs;
if ((port == MACH_PORT_NULL))
return BACKING_STORE_NULL;
BSL_LOCK();
queue_iterate(&backing_store_list.bsl_queue, bs, backing_store_t,
bs_links) {
BS_LOCK(bs);
if (bs->bs_port == port) {
BSL_UNLOCK();
return bs;
}
BS_UNLOCK(bs);
}
BSL_UNLOCK();
return BACKING_STORE_NULL;
}
void backing_store_add(backing_store_t);
void
backing_store_add(
__unused backing_store_t bs)
{
kern_return_t kr = KERN_SUCCESS;
if (kr != KERN_SUCCESS)
panic("backing_store_add: add to set");
}
boolean_t
bs_set_default_clsize(unsigned int npages)
{
switch(npages){
case 1:
case 2:
case 4:
case 8:
if (default_pager_clsize == 0)
vstruct_def_clshift = local_log2(npages);
return(TRUE);
}
return(FALSE);
}
int bs_get_global_clsize(int clsize);
int
bs_get_global_clsize(
int clsize)
{
int i;
memory_object_default_t dmm;
kern_return_t kr;
if (default_pager_clsize == 0) {
if (clsize != NO_CLSIZE) {
for (i = 0; (1 << i) < clsize; i++);
if (i > MAX_CLUSTER_SHIFT)
i = MAX_CLUSTER_SHIFT;
vstruct_def_clshift = i;
}
default_pager_clsize = (1 << vstruct_def_clshift);
if (verbose)
printf("%scluster size = %d page%s\n",
my_name, default_pager_clsize,
(default_pager_clsize == 1) ? "" : "s");
dmm = default_pager_object;
clsize = default_pager_clsize * vm_page_size;
kr = host_default_memory_manager(host_priv_self(),
&dmm,
clsize);
memory_object_default_deallocate(dmm);
if (kr != KERN_SUCCESS) {
panic("bs_get_global_cl_size:host_default_memory_manager");
}
if (dmm != default_pager_object) {
panic("bs_get_global_cl_size:there is another default pager");
}
}
ASSERT(default_pager_clsize > 0 &&
(default_pager_clsize & (default_pager_clsize - 1)) == 0);
return default_pager_clsize;
}
kern_return_t
default_pager_backing_store_create(
memory_object_default_t pager,
int priority,
int clsize,
MACH_PORT_FACE *backing_store)
{
backing_store_t bs;
MACH_PORT_FACE port;
struct vstruct_alias *alias_struct;
if (pager != default_pager_object)
return KERN_INVALID_ARGUMENT;
bs = backing_store_alloc();
port = ipc_port_alloc_kernel();
ipc_port_make_send(port);
assert (port != IP_NULL);
DP_DEBUG(DEBUG_BS_EXTERNAL,
("priority=%d clsize=%d bs_port=0x%x\n",
priority, clsize, (int) backing_store));
alias_struct = (struct vstruct_alias *)
kalloc(sizeof (struct vstruct_alias));
if(alias_struct != NULL) {
alias_struct->vs = (struct vstruct *)bs;
alias_struct->name = &default_pager_ops;
port->alias = (int) alias_struct;
}
else {
ipc_port_dealloc_kernel((MACH_PORT_FACE)(port));
kfree(bs, sizeof (struct backing_store));
return KERN_RESOURCE_SHORTAGE;
}
bs->bs_port = port;
if (priority == DEFAULT_PAGER_BACKING_STORE_MAXPRI)
priority = BS_MAXPRI;
else if (priority == BS_NOPRI)
priority = BS_MAXPRI;
else
priority = BS_MINPRI;
bs->bs_priority = priority;
bs->bs_clsize = bs_get_global_clsize(atop_32(clsize));
BSL_LOCK();
queue_enter(&backing_store_list.bsl_queue, bs, backing_store_t,
bs_links);
BSL_UNLOCK();
backing_store_add(bs);
*backing_store = port;
return KERN_SUCCESS;
}
kern_return_t
default_pager_backing_store_info(
MACH_PORT_FACE backing_store,
backing_store_flavor_t flavour,
backing_store_info_t info,
mach_msg_type_number_t *size)
{
backing_store_t bs;
backing_store_basic_info_t basic;
int i;
paging_segment_t ps;
if (flavour != BACKING_STORE_BASIC_INFO ||
*size < BACKING_STORE_BASIC_INFO_COUNT)
return KERN_INVALID_ARGUMENT;
basic = (backing_store_basic_info_t)info;
*size = BACKING_STORE_BASIC_INFO_COUNT;
VSTATS_LOCK(&global_stats.gs_lock);
basic->pageout_calls = global_stats.gs_pageout_calls;
basic->pagein_calls = global_stats.gs_pagein_calls;
basic->pages_in = global_stats.gs_pages_in;
basic->pages_out = global_stats.gs_pages_out;
basic->pages_unavail = global_stats.gs_pages_unavail;
basic->pages_init = global_stats.gs_pages_init;
basic->pages_init_writes= global_stats.gs_pages_init_writes;
VSTATS_UNLOCK(&global_stats.gs_lock);
if ((bs = backing_store_lookup(backing_store)) == BACKING_STORE_NULL)
return KERN_INVALID_ARGUMENT;
basic->bs_pages_total = bs->bs_pages_total;
PSL_LOCK();
bs->bs_pages_free = 0;
for (i = 0; i <= paging_segment_max; i++) {
ps = paging_segments[i];
if (ps != PAGING_SEGMENT_NULL && ps->ps_bs == bs) {
PS_LOCK(ps);
bs->bs_pages_free += ps->ps_clcount << ps->ps_clshift;
PS_UNLOCK(ps);
}
}
PSL_UNLOCK();
basic->bs_pages_free = bs->bs_pages_free;
basic->bs_pages_in = bs->bs_pages_in;
basic->bs_pages_in_fail = bs->bs_pages_in_fail;
basic->bs_pages_out = bs->bs_pages_out;
basic->bs_pages_out_fail= bs->bs_pages_out_fail;
basic->bs_priority = bs->bs_priority;
basic->bs_clsize = ptoa_32(bs->bs_clsize);
BS_UNLOCK(bs);
return KERN_SUCCESS;
}
int ps_delete(paging_segment_t);
int
ps_delete(
paging_segment_t ps)
{
vstruct_t vs;
kern_return_t error = KERN_SUCCESS;
int vs_count;
VSL_LOCK();
while(backing_store_release_trigger_disable != 0) {
VSL_SLEEP(&backing_store_release_trigger_disable, THREAD_UNINT);
}
vs_count = vstruct_list.vsl_count;
vs = (vstruct_t) queue_first((queue_entry_t)&(vstruct_list.vsl_queue));
if(vs == (vstruct_t)&vstruct_list) {
VSL_UNLOCK();
return KERN_SUCCESS;
}
VS_LOCK(vs);
vs_async_wait(vs);
if ((vs_count != 0) && (vs != NULL))
vs->vs_async_pending += 1;
VS_UNLOCK(vs);
VSL_UNLOCK();
while((vs_count != 0) && (vs != NULL)) {
vstruct_t next_vs;
if(dp_pages_free < cluster_transfer_minimum)
error = KERN_FAILURE;
else {
vm_object_t transfer_object;
unsigned int count;
upl_t upl;
transfer_object = vm_object_allocate((vm_object_size_t)VM_SUPER_CLUSTER);
count = 0;
error = vm_object_upl_request(transfer_object,
(vm_object_offset_t)0, VM_SUPER_CLUSTER,
&upl, NULL, &count,
UPL_NO_SYNC | UPL_CLEAN_IN_PLACE | UPL_SET_LITE | UPL_SET_INTERNAL);
if(error == KERN_SUCCESS) {
error = ps_vstruct_transfer_from_segment(
vs, ps, upl);
upl_commit(upl, NULL, 0);
upl_deallocate(upl);
} else {
error = KERN_FAILURE;
}
vm_object_deallocate(transfer_object);
}
if(error) {
VS_LOCK(vs);
vs->vs_async_pending -= 1;
if (vs->vs_async_pending == 0 && vs->vs_waiting_async) {
vs->vs_waiting_async = FALSE;
VS_UNLOCK(vs);
thread_wakeup(&vs->vs_async_pending);
} else {
VS_UNLOCK(vs);
}
return KERN_FAILURE;
}
VSL_LOCK();
while(backing_store_release_trigger_disable != 0) {
VSL_SLEEP(&backing_store_release_trigger_disable,
THREAD_UNINT);
}
next_vs = (vstruct_t) queue_next(&(vs->vs_links));
if((next_vs != (vstruct_t)&vstruct_list) &&
(vs != next_vs) && (vs_count != 1)) {
VS_LOCK(next_vs);
vs_async_wait(next_vs);
next_vs->vs_async_pending += 1;
VS_UNLOCK(next_vs);
}
VSL_UNLOCK();
VS_LOCK(vs);
vs->vs_async_pending -= 1;
if (vs->vs_async_pending == 0 && vs->vs_waiting_async) {
vs->vs_waiting_async = FALSE;
VS_UNLOCK(vs);
thread_wakeup(&vs->vs_async_pending);
} else {
VS_UNLOCK(vs);
}
if((vs == next_vs) || (next_vs == (vstruct_t)&vstruct_list))
vs = NULL;
else
vs = next_vs;
vs_count--;
}
return KERN_SUCCESS;
}
kern_return_t
default_pager_backing_store_delete(
MACH_PORT_FACE backing_store)
{
backing_store_t bs;
int i;
paging_segment_t ps;
int error;
int interim_pages_removed = 0;
if ((bs = backing_store_lookup(backing_store)) == BACKING_STORE_NULL)
return KERN_INVALID_ARGUMENT;
#if 0
BS_UNLOCK(bs);
return KERN_FAILURE;
#endif
restart:
PSL_LOCK();
error = KERN_SUCCESS;
for (i = 0; i <= paging_segment_max; i++) {
ps = paging_segments[i];
if (ps != PAGING_SEGMENT_NULL &&
ps->ps_bs == bs &&
! ps->ps_going_away) {
PS_LOCK(ps);
ps->ps_going_away = TRUE;
PS_UNLOCK(ps);
if(dp_pages_free < (cluster_transfer_minimum
+ ps->ps_pgcount)) {
error = KERN_FAILURE;
PSL_UNLOCK();
}
else {
dp_pages_free -= ps->ps_pgcount;
interim_pages_removed += ps->ps_pgcount;
PSL_UNLOCK();
error = ps_delete(ps);
}
if (error != KERN_SUCCESS) {
PSL_LOCK();
break;
}
goto restart;
}
}
if (error != KERN_SUCCESS) {
for (i = 0; i <= paging_segment_max; i++) {
ps = paging_segments[i];
if (ps != PAGING_SEGMENT_NULL &&
ps->ps_bs == bs &&
ps->ps_going_away) {
PS_LOCK(ps);
ps->ps_going_away = FALSE;
PS_UNLOCK(ps);
}
}
dp_pages_free += interim_pages_removed;
PSL_UNLOCK();
BS_UNLOCK(bs);
return error;
}
for (i = 0; i <= paging_segment_max; i++) {
ps = paging_segments[i];
if (ps != PAGING_SEGMENT_NULL &&
ps->ps_bs == bs) {
if(ps->ps_going_away) {
paging_segments[i] = PAGING_SEGMENT_NULL;
paging_segment_count--;
PS_LOCK(ps);
kfree(ps->ps_bmap, RMAPSIZE(ps->ps_ncls));
kfree(ps, sizeof *ps);
}
}
}
for (i = 0; i < MAX_NUM_PAGING_SEGMENTS; i++) {
if(paging_segments[i] != PAGING_SEGMENT_NULL)
paging_segment_max = i;
}
PSL_UNLOCK();
if((void *)bs->bs_port->alias != NULL)
kfree((void *) bs->bs_port->alias,
sizeof (struct vstruct_alias));
ipc_port_dealloc_kernel((ipc_port_t) (bs->bs_port));
bs->bs_port = MACH_PORT_NULL;
BS_UNLOCK(bs);
BSL_LOCK();
queue_remove(&backing_store_list.bsl_queue, bs, backing_store_t,
bs_links);
BSL_UNLOCK();
kfree(bs, sizeof *bs);
return KERN_SUCCESS;
}
int ps_enter(paging_segment_t);
int
ps_enter(
paging_segment_t ps)
{
int i;
PSL_LOCK();
for (i = 0; i < MAX_NUM_PAGING_SEGMENTS; i++) {
if (paging_segments[i] == PAGING_SEGMENT_NULL)
break;
}
if (i < MAX_NUM_PAGING_SEGMENTS) {
paging_segments[i] = ps;
if (i > paging_segment_max)
paging_segment_max = i;
paging_segment_count++;
if ((ps_select_array[ps->ps_bs->bs_priority] == BS_NOPRI) ||
(ps_select_array[ps->ps_bs->bs_priority] == BS_FULLPRI))
ps_select_array[ps->ps_bs->bs_priority] = 0;
i = 0;
} else {
PSL_UNLOCK();
return KERN_RESOURCE_SHORTAGE;
}
PSL_UNLOCK();
return i;
}
#ifdef DEVICE_PAGING
kern_return_t
default_pager_add_segment(
MACH_PORT_FACE backing_store,
MACH_PORT_FACE device,
recnum_t offset,
recnum_t count,
int record_size)
{
backing_store_t bs;
paging_segment_t ps;
int i;
int error;
if ((bs = backing_store_lookup(backing_store))
== BACKING_STORE_NULL)
return KERN_INVALID_ARGUMENT;
PSL_LOCK();
for (i = 0; i <= paging_segment_max; i++) {
ps = paging_segments[i];
if (ps == PAGING_SEGMENT_NULL)
continue;
if (!(ps->ps_device != device
|| offset >= ps->ps_offset + ps->ps_recnum
|| offset + count <= ps->ps_offset)) {
PSL_UNLOCK();
BS_UNLOCK(bs);
return KERN_INVALID_ARGUMENT;
}
}
PSL_UNLOCK();
ps = (paging_segment_t) kalloc(sizeof (struct paging_segment));
if (ps == PAGING_SEGMENT_NULL) {
BS_UNLOCK(bs);
return KERN_RESOURCE_SHORTAGE;
}
ps->ps_segtype = PS_PARTITION;
ps->ps_device = device;
ps->ps_offset = offset;
ps->ps_record_shift = local_log2(vm_page_size / record_size);
ps->ps_recnum = count;
ps->ps_pgnum = count >> ps->ps_record_shift;
ps->ps_pgcount = ps->ps_pgnum;
ps->ps_clshift = local_log2(bs->bs_clsize);
ps->ps_clcount = ps->ps_ncls = ps->ps_pgcount >> ps->ps_clshift;
ps->ps_hint = 0;
PS_LOCK_INIT(ps);
ps->ps_bmap = (unsigned char *) kalloc(RMAPSIZE(ps->ps_ncls));
if (!ps->ps_bmap) {
kfree(ps, sizeof *ps);
BS_UNLOCK(bs);
return KERN_RESOURCE_SHORTAGE;
}
for (i = 0; i < ps->ps_ncls; i++) {
clrbit(ps->ps_bmap, i);
}
ps->ps_going_away = FALSE;
ps->ps_bs = bs;
if ((error = ps_enter(ps)) != 0) {
kfree(ps->ps_bmap, RMAPSIZE(ps->ps_ncls));
kfree(ps, sizeof *ps);
BS_UNLOCK(bs);
return KERN_RESOURCE_SHORTAGE;
}
bs->bs_pages_free += ps->ps_clcount << ps->ps_clshift;
bs->bs_pages_total += ps->ps_clcount << ps->ps_clshift;
BS_UNLOCK(bs);
PSL_LOCK();
dp_pages_free += ps->ps_pgcount;
PSL_UNLOCK();
bs_more_space(ps->ps_clcount);
DP_DEBUG(DEBUG_BS_INTERNAL,
("device=0x%x,offset=0x%x,count=0x%x,record_size=0x%x,shift=%d,total_size=0x%x\n",
device, offset, count, record_size,
ps->ps_record_shift, ps->ps_pgnum));
return KERN_SUCCESS;
}
boolean_t
bs_add_device(
char *dev_name,
MACH_PORT_FACE master)
{
security_token_t null_security_token = {
{ 0, 0 }
};
MACH_PORT_FACE device;
int info[DEV_GET_SIZE_COUNT];
mach_msg_type_number_t info_count;
MACH_PORT_FACE bs = MACH_PORT_NULL;
unsigned int rec_size;
recnum_t count;
int clsize;
MACH_PORT_FACE reply_port;
if (ds_device_open_sync(master, MACH_PORT_NULL, D_READ | D_WRITE,
null_security_token, dev_name, &device))
return FALSE;
info_count = DEV_GET_SIZE_COUNT;
if (!ds_device_get_status(device, DEV_GET_SIZE, info, &info_count)) {
rec_size = info[DEV_GET_SIZE_RECORD_SIZE];
count = info[DEV_GET_SIZE_DEVICE_SIZE] / rec_size;
clsize = bs_get_global_clsize(0);
if (!default_pager_backing_store_create(
default_pager_object,
DEFAULT_PAGER_BACKING_STORE_MAXPRI,
(clsize * vm_page_size),
&bs)) {
if (!default_pager_add_segment(bs, device,
0, count, rec_size)) {
return TRUE;
}
ipc_port_release_receive(bs);
}
}
ipc_port_release_send(device);
return FALSE;
}
#endif
#if VS_ASYNC_REUSE
struct vs_async *
vs_alloc_async(void)
{
struct vs_async *vsa;
MACH_PORT_FACE reply_port;
VS_ASYNC_LOCK();
if (vs_async_free_list == NULL) {
VS_ASYNC_UNLOCK();
vsa = (struct vs_async *) kalloc(sizeof (struct vs_async));
if (vsa != NULL) {
struct vstruct_alias *alias_struct;
reply_port = ipc_port_alloc_kernel();
alias_struct = (struct vstruct_alias *)
kalloc(sizeof (struct vstruct_alias));
if(alias_struct != NULL) {
alias_struct->vs = (struct vstruct *)vsa;
alias_struct->name = &default_pager_ops;
reply_port->alias = (int) alias_struct;
vsa->reply_port = reply_port;
vs_alloc_async_count++;
}
else {
vs_alloc_async_failed++;
ipc_port_dealloc_kernel((MACH_PORT_FACE)
(reply_port));
kfree(vsa, sizeof (struct vs_async));
vsa = NULL;
}
}
} else {
vsa = vs_async_free_list;
vs_async_free_list = vs_async_free_list->vsa_next;
VS_ASYNC_UNLOCK();
}
return vsa;
}
void
vs_free_async(
struct vs_async *vsa)
{
VS_ASYNC_LOCK();
vsa->vsa_next = vs_async_free_list;
vs_async_free_list = vsa;
VS_ASYNC_UNLOCK();
}
#else
struct vs_async *
vs_alloc_async(void)
{
struct vs_async *vsa;
MACH_PORT_FACE reply_port;
kern_return_t kr;
vsa = (struct vs_async *) kalloc(sizeof (struct vs_async));
if (vsa != NULL) {
reply_port = ipc_port_alloc_kernel();
alias_struct = (vstruct_alias *)
kalloc(sizeof (struct vstruct_alias));
if(alias_struct != NULL) {
alias_struct->vs = reply_port;
alias_struct->name = &default_pager_ops;
reply_port->alias = (int) vsa;
vsa->reply_port = reply_port;
vs_alloc_async_count++;
}
else {
vs_alloc_async_failed++;
ipc_port_dealloc_kernel((MACH_PORT_FACE)
(reply_port));
kfree(vsa, sizeof (struct vs_async));
vsa = NULL;
}
}
return vsa;
}
void
vs_free_async(
struct vs_async *vsa)
{
MACH_PORT_FACE reply_port;
kern_return_t kr;
reply_port = vsa->reply_port;
kfree(reply_port->alias, sizeof (struct vstuct_alias));
kfree(vsa, sizeof (struct vs_async));
ipc_port_dealloc_kernel((MACH_PORT_FACE) (reply_port));
#if 0
VS_ASYNC_LOCK();
vs_alloc_async_count--;
VS_ASYNC_UNLOCK();
#endif
}
#endif
zone_t vstruct_zone;
vstruct_t
ps_vstruct_create(
vm_size_t size)
{
vstruct_t vs;
unsigned int i;
vs = (vstruct_t) zalloc(vstruct_zone);
if (vs == VSTRUCT_NULL) {
return VSTRUCT_NULL;
}
VS_LOCK_INIT(vs);
vs->vs_pager_ops = NULL;
vs->vs_control = MEMORY_OBJECT_CONTROL_NULL;
vs->vs_references = 1;
vs->vs_seqno = 0;
#ifdef MACH_KERNEL
vs->vs_waiting_seqno = FALSE;
vs->vs_waiting_read = FALSE;
vs->vs_waiting_write = FALSE;
vs->vs_waiting_async = FALSE;
#else
mutex_init(&vs->vs_waiting_seqno, 0);
mutex_init(&vs->vs_waiting_read, 0);
mutex_init(&vs->vs_waiting_write, 0);
mutex_init(&vs->vs_waiting_refs, 0);
mutex_init(&vs->vs_waiting_async, 0);
#endif
vs->vs_readers = 0;
vs->vs_writers = 0;
vs->vs_errors = 0;
vs->vs_clshift = local_log2(bs_get_global_clsize(0));
vs->vs_size = ((atop_32(round_page_32(size)) - 1) >> vs->vs_clshift) + 1;
vs->vs_async_pending = 0;
if (INDIRECT_CLMAP(vs->vs_size)) {
vs->vs_imap = (struct vs_map **)
kalloc(INDIRECT_CLMAP_SIZE(vs->vs_size));
vs->vs_indirect = TRUE;
} else {
vs->vs_dmap = (struct vs_map *)
kalloc(CLMAP_SIZE(vs->vs_size));
vs->vs_indirect = FALSE;
}
vs->vs_xfer_pending = FALSE;
DP_DEBUG(DEBUG_VS_INTERNAL,
("map=0x%x, indirect=%d\n", (int) vs->vs_dmap, vs->vs_indirect));
if (!vs->vs_dmap) {
kfree(vs, sizeof *vs);
return VSTRUCT_NULL;
}
if (vs->vs_indirect)
memset(vs->vs_imap, 0,
INDIRECT_CLMAP_SIZE(vs->vs_size));
else
for (i = 0; i < vs->vs_size; i++)
VSM_CLR(vs->vs_dmap[i]);
VS_MAP_LOCK_INIT(vs);
bs_commit(vs->vs_size);
return vs;
}
paging_segment_t ps_select_segment(unsigned int, int *);
paging_segment_t
ps_select_segment(
unsigned int shift,
int *psindex)
{
paging_segment_t ps;
int i;
int j;
PSL_LOCK();
if (paging_segment_count == 1) {
paging_segment_t lps;
ipc_port_t trigger = IP_NULL;
ps = paging_segments[paging_segment_max];
*psindex = paging_segment_max;
PS_LOCK(ps);
if (ps->ps_going_away) {
lps = PAGING_SEGMENT_NULL;
} else {
ASSERT(ps->ps_clshift >= shift);
if (ps->ps_clcount) {
ps->ps_clcount--;
dp_pages_free -= 1 << ps->ps_clshift;
if(min_pages_trigger_port &&
(dp_pages_free < minimum_pages_remaining)) {
trigger = min_pages_trigger_port;
min_pages_trigger_port = NULL;
bs_low = TRUE;
}
lps = ps;
} else
lps = PAGING_SEGMENT_NULL;
}
PS_UNLOCK(ps);
PSL_UNLOCK();
if (trigger != IP_NULL) {
default_pager_space_alert(trigger, HI_WAT_ALERT);
ipc_port_release_send(trigger);
}
return lps;
}
if (paging_segment_count == 0) {
PSL_UNLOCK();
return PAGING_SEGMENT_NULL;
}
for (i = BS_MAXPRI;
i >= BS_MINPRI; i--) {
int start_index;
if ((ps_select_array[i] == BS_NOPRI) ||
(ps_select_array[i] == BS_FULLPRI))
continue;
start_index = ps_select_array[i];
if(!(paging_segments[start_index])) {
j = start_index+1;
physical_transfer_cluster_count = 0;
}
else if ((physical_transfer_cluster_count+1) == (ALLOC_STRIDE >>
(((paging_segments[start_index])->ps_clshift)
+ vm_page_shift))) {
physical_transfer_cluster_count = 0;
j = start_index + 1;
} else {
physical_transfer_cluster_count+=1;
j = start_index;
if(start_index == 0)
start_index = paging_segment_max;
else
start_index = start_index - 1;
}
while (1) {
if (j > paging_segment_max)
j = 0;
if ((ps = paging_segments[j]) &&
(ps->ps_bs->bs_priority == i)) {
PS_LOCK(ps);
if (ps->ps_going_away) {
} else if ((ps->ps_clcount) &&
(ps->ps_clshift >= shift)) {
ipc_port_t trigger = IP_NULL;
ps->ps_clcount--;
dp_pages_free -= 1 << ps->ps_clshift;
if(min_pages_trigger_port &&
(dp_pages_free <
minimum_pages_remaining)) {
trigger = min_pages_trigger_port;
min_pages_trigger_port = NULL;
}
PS_UNLOCK(ps);
ps_select_array[i] = j;
PSL_UNLOCK();
if (trigger != IP_NULL) {
default_pager_space_alert(
trigger,
HI_WAT_ALERT);
ipc_port_release_send(trigger);
}
*psindex = j;
return ps;
}
PS_UNLOCK(ps);
}
if (j == start_index) {
ps_select_array[i] = BS_FULLPRI;
break;
}
j++;
}
}
PSL_UNLOCK();
return PAGING_SEGMENT_NULL;
}
vm_offset_t ps_allocate_cluster(vstruct_t, int *, paging_segment_t);
vm_offset_t
ps_allocate_cluster(
vstruct_t vs,
int *psindex,
paging_segment_t use_ps)
{
unsigned int byte_num;
int bit_num = 0;
paging_segment_t ps;
vm_offset_t cluster;
ipc_port_t trigger = IP_NULL;
if (use_ps != PAGING_SEGMENT_NULL) {
ps = use_ps;
PSL_LOCK();
PS_LOCK(ps);
ASSERT(ps->ps_clcount != 0);
ps->ps_clcount--;
dp_pages_free -= 1 << ps->ps_clshift;
if(min_pages_trigger_port &&
(dp_pages_free < minimum_pages_remaining)) {
trigger = min_pages_trigger_port;
min_pages_trigger_port = NULL;
}
PSL_UNLOCK();
PS_UNLOCK(ps);
if (trigger != IP_NULL) {
default_pager_space_alert(trigger, HI_WAT_ALERT);
ipc_port_release_send(trigger);
}
} else if ((ps = ps_select_segment(vs->vs_clshift, psindex)) ==
PAGING_SEGMENT_NULL) {
static uint32_t lastnotify = 0;
uint32_t now, nanoseconds_dummy;
clock_get_system_nanotime(&now, &nanoseconds_dummy);
if (now > lastnotify + 5) {
dprintf(("no space in available paging segments\n"));
lastnotify = now;
}
PSL_LOCK();
dp_pages_free = 0;
if(min_pages_trigger_port) {
trigger = min_pages_trigger_port;
min_pages_trigger_port = NULL;
bs_low = TRUE;
}
PSL_UNLOCK();
if (trigger != IP_NULL) {
default_pager_space_alert(trigger, HI_WAT_ALERT);
ipc_port_release_send(trigger);
}
return (vm_offset_t) -1;
}
PS_LOCK(ps);
byte_num = ps->ps_hint;
for (; byte_num < howmany(ps->ps_ncls, NBBY); byte_num++) {
if (*(ps->ps_bmap + byte_num) != BYTEMASK) {
for (bit_num = 0; bit_num < NBBY; bit_num++) {
if (isclr((ps->ps_bmap + byte_num), bit_num))
break;
}
ASSERT(bit_num != NBBY);
break;
}
}
ps->ps_hint = byte_num;
cluster = (byte_num*NBBY) + bit_num;
ASSERT(cluster < ps->ps_ncls);
setbit(ps->ps_bmap, cluster);
PS_UNLOCK(ps);
return cluster;
}
void ps_deallocate_cluster(paging_segment_t, vm_offset_t);
void
ps_deallocate_cluster(
paging_segment_t ps,
vm_offset_t cluster)
{
if (cluster >= (vm_offset_t) ps->ps_ncls)
panic("ps_deallocate_cluster: Invalid cluster number");
PSL_LOCK();
PS_LOCK(ps);
clrbit(ps->ps_bmap, cluster);
++ps->ps_clcount;
dp_pages_free += 1 << ps->ps_clshift;
PSL_UNLOCK();
if ((cluster/NBBY) < ps->ps_hint) {
ps->ps_hint = (cluster/NBBY);
}
PS_UNLOCK(ps);
PSL_LOCK();
if (ps_select_array[ps->ps_bs->bs_priority] == BS_FULLPRI)
ps_select_array[ps->ps_bs->bs_priority] = 0;
PSL_UNLOCK();
return;
}
void ps_dealloc_vsmap(struct vs_map *, vm_size_t);
void
ps_dealloc_vsmap(
struct vs_map *vsmap,
vm_size_t size)
{
unsigned int i;
for (i = 0; i < size; i++)
if (!VSM_ISCLR(vsmap[i]) && !VSM_ISERR(vsmap[i]))
ps_deallocate_cluster(VSM_PS(vsmap[i]),
VSM_CLOFF(vsmap[i]));
}
void
ps_vstruct_dealloc(
vstruct_t vs)
{
unsigned int i;
VS_MAP_LOCK(vs);
if (vs->vs_indirect) {
for (i = 0; i < INDIRECT_CLMAP_ENTRIES(vs->vs_size); i++) {
if (vs->vs_imap[i] != NULL) {
ps_dealloc_vsmap(vs->vs_imap[i], CLMAP_ENTRIES);
kfree(vs->vs_imap[i], CLMAP_THRESHOLD);
}
}
kfree(vs->vs_imap, INDIRECT_CLMAP_SIZE(vs->vs_size));
} else {
ps_dealloc_vsmap(vs->vs_dmap, vs->vs_size);
kfree(vs->vs_dmap, CLMAP_SIZE(vs->vs_size));
}
VS_MAP_UNLOCK(vs);
bs_commit(- vs->vs_size);
zfree(vstruct_zone, vs);
}
int ps_map_extend(vstruct_t, unsigned int);
int ps_map_extend(
vstruct_t vs,
unsigned int new_size)
{
struct vs_map **new_imap;
struct vs_map *new_dmap = NULL;
int newdsize;
int i;
void *old_map = NULL;
int old_map_size = 0;
if (vs->vs_size >= new_size) {
return 0;
}
if (INDIRECT_CLMAP(new_size)) {
int new_map_size = INDIRECT_CLMAP_SIZE(new_size);
old_map_size = INDIRECT_CLMAP_SIZE(vs->vs_size);
if (vs->vs_indirect &&
(new_map_size == old_map_size)) {
bs_commit(new_size - vs->vs_size);
vs->vs_size = new_size;
return 0;
}
new_imap = (struct vs_map **)kalloc(new_map_size);
if (new_imap == NULL) {
return -1;
}
memset(new_imap, 0, new_map_size);
if (vs->vs_indirect) {
memcpy(new_imap, vs->vs_imap, old_map_size);
old_map = (void *) vs->vs_imap;
newdsize = 0;
} else {
if ((new_imap[0] = (struct vs_map *)
kalloc(CLMAP_THRESHOLD)) == NULL) {
kfree(new_imap, new_map_size);
return -1;
}
new_dmap = new_imap[0];
newdsize = CLMAP_ENTRIES;
}
} else {
new_imap = NULL;
newdsize = new_size;
if ((new_dmap = (struct vs_map *)
kalloc(CLMAP_SIZE(new_size))) == NULL) {
return -1;
}
}
if (newdsize) {
old_map = (void *) vs->vs_dmap;
old_map_size = CLMAP_SIZE(vs->vs_size);
memcpy(new_dmap, vs->vs_dmap, old_map_size);
for (i = vs->vs_size; i < newdsize; i++)
VSM_CLR(new_dmap[i]);
}
if (new_imap) {
vs->vs_imap = new_imap;
vs->vs_indirect = TRUE;
} else
vs->vs_dmap = new_dmap;
bs_commit(new_size - vs->vs_size);
vs->vs_size = new_size;
if (old_map)
kfree(old_map, old_map_size);
return 0;
}
vm_offset_t
ps_clmap(
vstruct_t vs,
vm_offset_t offset,
struct clmap *clmap,
int flag,
vm_size_t size,
int error)
{
vm_offset_t cluster;
vm_offset_t newcl;
vm_offset_t newoff;
unsigned int i;
struct vs_map *vsmap;
VS_MAP_LOCK(vs);
ASSERT(vs->vs_dmap);
cluster = atop_32(offset) >> vs->vs_clshift;
clmap->cl_error = 0;
if (cluster >= vs->vs_size) {
if (flag == CL_FIND) {
VS_MAP_UNLOCK(vs);
return (vm_offset_t) -1;
}
if (ps_map_extend(vs, cluster + 1)) {
VS_MAP_UNLOCK(vs);
return (vm_offset_t) -1;
}
}
if (vs->vs_indirect) {
long ind_block = cluster/CLMAP_ENTRIES;
vsmap = vs->vs_imap[ind_block];
if (vsmap == NULL) {
if (flag == CL_FIND) {
VS_MAP_UNLOCK(vs);
return (vm_offset_t) -1;
}
vsmap = (struct vs_map *) kalloc(CLMAP_THRESHOLD);
if (vsmap == NULL) {
VS_MAP_UNLOCK(vs);
return (vm_offset_t) -1;
}
for (i = 0; i < CLMAP_ENTRIES; i++)
VSM_CLR(vsmap[i]);
vs->vs_imap[ind_block] = vsmap;
}
} else
vsmap = vs->vs_dmap;
ASSERT(vsmap);
vsmap += cluster%CLMAP_ENTRIES;
if (VSM_ISERR(*vsmap)) {
clmap->cl_error = VSM_GETERR(*vsmap);
VS_MAP_UNLOCK(vs);
return (vm_offset_t) -1;
} else if (VSM_ISCLR(*vsmap)) {
int psindex;
if (flag == CL_FIND) {
if (error) {
VSM_SETERR(*vsmap, error);
}
VS_MAP_UNLOCK(vs);
return (vm_offset_t) -1;
} else {
newcl = ps_allocate_cluster(vs, &psindex,
PAGING_SEGMENT_NULL);
if (newcl == (vm_offset_t) -1) {
VS_MAP_UNLOCK(vs);
return (vm_offset_t) -1;
}
VSM_CLR(*vsmap);
VSM_SETCLOFF(*vsmap, newcl);
VSM_SETPS(*vsmap, psindex);
}
} else
newcl = VSM_CLOFF(*vsmap);
clmap->cl_ps = VSM_PS(*vsmap);
clmap->cl_numpages = VSCLSIZE(vs);
clmap->cl_bmap.clb_map = (unsigned int) VSM_BMAP(*vsmap);
ASSERT(trunc_page(offset) == offset);
newcl = ptoa_32(newcl) << vs->vs_clshift;
newoff = offset & ((1<<(vm_page_shift + vs->vs_clshift)) - 1);
if (flag == CL_ALLOC) {
i = atop_32(newoff);
while ((size > 0) && (i < VSCLSIZE(vs))) {
VSM_SETALLOC(*vsmap, i);
i++;
size -= vm_page_size;
}
}
clmap->cl_alloc.clb_map = (unsigned int) VSM_ALLOC(*vsmap);
if (newoff) {
clmap->cl_numpages -= atop_32(newoff);
CLMAP_SHIFT(clmap, vs);
CLMAP_SHIFTALLOC(clmap, vs);
}
if (size && flag == CL_FIND) {
vm_offset_t off = (vm_offset_t) 0;
if (!error) {
for (i = VSCLSIZE(vs) - clmap->cl_numpages; size > 0;
i++) {
VSM_SETPG(*vsmap, i);
size -= vm_page_size;
}
ASSERT(i <= VSCLSIZE(vs));
} else {
BS_STAT(clmap->cl_ps->ps_bs,
clmap->cl_ps->ps_bs->bs_pages_out_fail +=
atop_32(size));
off = VSM_CLOFF(*vsmap);
VSM_SETERR(*vsmap, error);
}
if (off != (vm_offset_t) 0)
ps_deallocate_cluster(clmap->cl_ps, off);
VS_MAP_UNLOCK(vs);
return (vm_offset_t) 0;
} else
VS_MAP_UNLOCK(vs);
DP_DEBUG(DEBUG_VS_INTERNAL,
("returning 0x%X,vs=0x%X,vsmap=0x%X,flag=%d\n",
newcl+newoff, (int) vs, (int) vsmap, flag));
DP_DEBUG(DEBUG_VS_INTERNAL,
(" clmap->cl_ps=0x%X,cl_numpages=%d,clbmap=0x%x,cl_alloc=%x\n",
(int) clmap->cl_ps, clmap->cl_numpages,
(int) clmap->cl_bmap.clb_map, (int) clmap->cl_alloc.clb_map));
return (newcl + newoff);
}
void ps_clunmap(vstruct_t, vm_offset_t, vm_size_t);
void
ps_clunmap(
vstruct_t vs,
vm_offset_t offset,
vm_size_t length)
{
vm_offset_t cluster;
struct vs_map *vsmap;
VS_MAP_LOCK(vs);
while (length > 0) {
vm_offset_t newoff;
unsigned int i;
cluster = atop_32(offset) >> vs->vs_clshift;
if (vs->vs_indirect)
vsmap = vs->vs_imap[cluster/CLMAP_ENTRIES];
else
vsmap = vs->vs_dmap;
if (vsmap == NULL) {
VS_MAP_UNLOCK(vs);
return;
}
vsmap += cluster%CLMAP_ENTRIES;
if (VSM_ISCLR(*vsmap)) {
length -= vm_page_size;
offset += vm_page_size;
continue;
}
if ( (newoff = (offset&((1<<(vm_page_shift+vs->vs_clshift))-1))) ) {
ASSERT(trunc_page(newoff) == newoff);
i = atop_32(newoff);
} else
i = 0;
while ((i < VSCLSIZE(vs)) && (length > 0)) {
VSM_CLRPG(*vsmap, i);
VSM_CLRALLOC(*vsmap, i);
length -= vm_page_size;
offset += vm_page_size;
i++;
}
if (!VSM_ALLOC(*vsmap)) {
ps_deallocate_cluster(VSM_PS(*vsmap),
VSM_CLOFF(*vsmap));
VSM_CLR(*vsmap);
}
}
VS_MAP_UNLOCK(vs);
}
void ps_vs_write_complete(vstruct_t, vm_offset_t, vm_size_t, int);
void
ps_vs_write_complete(
vstruct_t vs,
vm_offset_t offset,
vm_size_t size,
int error)
{
struct clmap clmap;
(void) ps_clmap(vs, offset, &clmap, CL_FIND, size, error);
}
void vs_cl_write_complete(vstruct_t, paging_segment_t, vm_offset_t, vm_offset_t, vm_size_t, boolean_t, int);
void
vs_cl_write_complete(
vstruct_t vs,
__unused paging_segment_t ps,
vm_offset_t offset,
__unused vm_offset_t addr,
vm_size_t size,
boolean_t async,
int error)
{
if (error) {
dprintf(("write failed error = 0x%x\n", error));
} else
GSTAT(global_stats.gs_pages_out += atop_32(size));
ps_vs_write_complete(vs, offset, size, error);
if (async) {
VS_LOCK(vs);
ASSERT(vs->vs_async_pending > 0);
vs->vs_async_pending -= size;
if (vs->vs_async_pending == 0 && vs->vs_waiting_async) {
vs->vs_waiting_async = FALSE;
VS_UNLOCK(vs);
thread_wakeup(&vs->vs_async_pending);
} else {
VS_UNLOCK(vs);
}
}
}
#ifdef DEVICE_PAGING
kern_return_t device_write_reply(MACH_PORT_FACE, kern_return_t, io_buf_len_t);
kern_return_t
device_write_reply(
MACH_PORT_FACE reply_port,
kern_return_t device_code,
io_buf_len_t bytes_written)
{
struct vs_async *vsa;
vsa = (struct vs_async *)
((struct vstruct_alias *)(reply_port->alias))->vs;
if (device_code == KERN_SUCCESS && bytes_written != vsa->vsa_size) {
device_code = KERN_FAILURE;
}
vsa->vsa_error = device_code;
ASSERT(vsa->vsa_vs != VSTRUCT_NULL);
if(vsa->vsa_flags & VSA_TRANSFER) {
if(vsa->vsa_error) {
vm_map_copy_discard((vm_map_copy_t)vsa->vsa_addr);
}
ps_vs_write_complete(vsa->vsa_vs, vsa->vsa_offset,
vsa->vsa_size, vsa->vsa_error);
} else {
vs_cl_write_complete(vsa->vsa_vs, vsa->vsa_ps, vsa->vsa_offset,
vsa->vsa_addr, vsa->vsa_size, TRUE,
vsa->vsa_error);
}
VS_FREE_ASYNC(vsa);
return KERN_SUCCESS;
}
kern_return_t device_write_reply_inband(MACH_PORT_FACE, kern_return_t, io_buf_len_t);
kern_return_t
device_write_reply_inband(
MACH_PORT_FACE reply_port,
kern_return_t return_code,
io_buf_len_t bytes_written)
{
panic("device_write_reply_inband: illegal");
return KERN_SUCCESS;
}
kern_return_t device_read_reply(MACH_PORT_FACE, kern_return_t, io_buf_ptr_t, mach_msg_type_number_t);
kern_return_t
device_read_reply(
MACH_PORT_FACE reply_port,
kern_return_t return_code,
io_buf_ptr_t data,
mach_msg_type_number_t dataCnt)
{
struct vs_async *vsa;
vsa = (struct vs_async *)
((struct vstruct_alias *)(reply_port->alias))->vs;
vsa->vsa_addr = (vm_offset_t)data;
vsa->vsa_size = (vm_size_t)dataCnt;
vsa->vsa_error = return_code;
thread_wakeup(&vsa->vsa_lock);
return KERN_SUCCESS;
}
kern_return_t device_read_reply_inband(MACH_PORT_FACE, kern_return_t, io_buf_ptr_inband_t, mach_msg_type_number_t);
kern_return_t
device_read_reply_inband(
MACH_PORT_FACE reply_port,
kern_return_t return_code,
io_buf_ptr_inband_t data,
mach_msg_type_number_t dataCnt)
{
panic("device_read_reply_inband: illegal");
return KERN_SUCCESS;
}
kern_return_t device_read_reply_overwrite(MACH_PORT_FACE, kern_return_t, io_buf_len_t);
kern_return_t
device_read_reply_overwrite(
MACH_PORT_FACE reply_port,
kern_return_t return_code,
io_buf_len_t bytes_read)
{
panic("device_read_reply_overwrite: illegal\n");
return KERN_SUCCESS;
}
kern_return_t device_open_reply(MACH_PORT_FACE, kern_return_t, MACH_PORT_FACE);
kern_return_t
device_open_reply(
MACH_PORT_FACE reply_port,
kern_return_t return_code,
MACH_PORT_FACE device_port)
{
panic("device_open_reply: illegal\n");
return KERN_SUCCESS;
}
kern_return_t
ps_read_device(
paging_segment_t ps,
vm_offset_t offset,
vm_offset_t *bufferp,
unsigned int size,
unsigned int *residualp,
int flags)
{
kern_return_t kr;
recnum_t dev_offset;
unsigned int bytes_wanted;
unsigned int bytes_read;
unsigned int total_read;
vm_offset_t dev_buffer;
vm_offset_t buf_ptr;
unsigned int records_read;
struct vs_async *vsa;
mutex_t vs_waiting_read_reply;
device_t device;
vm_map_copy_t device_data = NULL;
default_pager_thread_t *dpt = NULL;
device = dev_port_lookup(ps->ps_device);
clustered_reads[atop_32(size)]++;
dev_offset = (ps->ps_offset +
(offset >> (vm_page_shift - ps->ps_record_shift)));
bytes_wanted = size;
total_read = 0;
*bufferp = (vm_offset_t)NULL;
do {
vsa = VS_ALLOC_ASYNC();
if (vsa) {
vsa->vsa_vs = NULL;
vsa->vsa_addr = 0;
vsa->vsa_offset = 0;
vsa->vsa_size = 0;
vsa->vsa_ps = NULL;
}
mutex_init(&vsa->vsa_lock, 0);
ip_lock(vsa->reply_port);
vsa->reply_port->ip_sorights++;
ip_reference(vsa->reply_port);
ip_unlock(vsa->reply_port);
kr = ds_device_read_common(device,
vsa->reply_port,
(mach_msg_type_name_t)
MACH_MSG_TYPE_MOVE_SEND_ONCE,
(dev_mode_t) 0,
dev_offset,
bytes_wanted,
(IO_READ | IO_CALL),
(io_buf_ptr_t *) &dev_buffer,
(mach_msg_type_number_t *) &bytes_read);
if(kr == MIG_NO_REPLY) {
assert_wait(&vsa->vsa_lock, THREAD_UNINT);
thread_block(THREAD_CONTINUE_NULL);
dev_buffer = vsa->vsa_addr;
bytes_read = (unsigned int)vsa->vsa_size;
kr = vsa->vsa_error;
}
VS_FREE_ASYNC(vsa);
if (kr != KERN_SUCCESS || bytes_read == 0) {
break;
}
total_read += bytes_read;
if (bytes_read == size) {
*bufferp = (vm_offset_t)dev_buffer;
break;
}
#if 1
dprintf(("read only %d bytes out of %d\n",
bytes_read, bytes_wanted));
#endif
if(dpt == NULL) {
dpt = get_read_buffer();
buf_ptr = dpt->dpt_buffer;
*bufferp = (vm_offset_t)buf_ptr;
}
memcpy((void *) buf_ptr, (void *) dev_buffer, bytes_read);
buf_ptr += bytes_read;
bytes_wanted -= bytes_read;
records_read = (bytes_read >>
(vm_page_shift - ps->ps_record_shift));
dev_offset += records_read;
DP_DEBUG(DEBUG_VS_INTERNAL,
("calling vm_deallocate(addr=0x%X,size=0x%X)\n",
dev_buffer, bytes_read));
if (vm_deallocate(kernel_map, dev_buffer, bytes_read)
!= KERN_SUCCESS)
Panic("dealloc buf");
} while (bytes_wanted);
*residualp = size - total_read;
if((dev_buffer != *bufferp) && (total_read != 0)) {
vm_offset_t temp_buffer;
vm_allocate(kernel_map, &temp_buffer, total_read, VM_FLAGS_ANYWHERE);
memcpy((void *) temp_buffer, (void *) *bufferp, total_read);
if(vm_map_copyin_page_list(kernel_map, temp_buffer, total_read,
VM_MAP_COPYIN_OPT_SRC_DESTROY |
VM_MAP_COPYIN_OPT_STEAL_PAGES |
VM_MAP_COPYIN_OPT_PMAP_ENTER,
(vm_map_copy_t *)&device_data, FALSE))
panic("ps_read_device: cannot copyin locally provided buffer\n");
}
else if((kr == KERN_SUCCESS) && (total_read != 0) && (dev_buffer != 0)){
if(vm_map_copyin_page_list(kernel_map, dev_buffer, bytes_read,
VM_MAP_COPYIN_OPT_SRC_DESTROY |
VM_MAP_COPYIN_OPT_STEAL_PAGES |
VM_MAP_COPYIN_OPT_PMAP_ENTER,
(vm_map_copy_t *)&device_data, FALSE))
panic("ps_read_device: cannot copyin backing store provided buffer\n");
}
else {
device_data = NULL;
}
*bufferp = (vm_offset_t)device_data;
if(dpt != NULL) {
dpt->checked_out = 0;
thread_wakeup(&dpt_array);
}
return KERN_SUCCESS;
}
kern_return_t
ps_write_device(
paging_segment_t ps,
vm_offset_t offset,
vm_offset_t addr,
unsigned int size,
struct vs_async *vsa)
{
recnum_t dev_offset;
io_buf_len_t bytes_to_write, bytes_written;
recnum_t records_written;
kern_return_t kr;
MACH_PORT_FACE reply_port;
clustered_writes[atop_32(size)]++;
dev_offset = (ps->ps_offset +
(offset >> (vm_page_shift - ps->ps_record_shift)));
bytes_to_write = size;
if (vsa) {
reply_port = vsa->reply_port;
ip_lock(reply_port);
reply_port->ip_sorights++;
ip_reference(reply_port);
ip_unlock(reply_port);
{
device_t device;
device = dev_port_lookup(ps->ps_device);
vsa->vsa_addr = addr;
kr=ds_device_write_common(device,
reply_port,
(mach_msg_type_name_t) MACH_MSG_TYPE_MOVE_SEND_ONCE,
(dev_mode_t) 0,
dev_offset,
(io_buf_ptr_t) addr,
size,
(IO_WRITE | IO_CALL),
&bytes_written);
}
if ((kr != KERN_SUCCESS) && (kr != MIG_NO_REPLY)) {
if (verbose)
dprintf(("%s0x%x, addr=0x%x,"
"size=0x%x,offset=0x%x\n",
"device_write_request returned ",
kr, addr, size, offset));
BS_STAT(ps->ps_bs,
ps->ps_bs->bs_pages_out_fail += atop_32(size));
device_write_reply(reply_port, kr, 0);
return PAGER_ERROR;
}
} else do {
{
device_t device;
device = dev_port_lookup(ps->ps_device);
kr=ds_device_write_common(device,
IP_NULL, 0,
(dev_mode_t) 0,
dev_offset,
(io_buf_ptr_t) addr,
size,
(IO_WRITE | IO_SYNC | IO_KERNEL_BUF),
&bytes_written);
}
if (kr != KERN_SUCCESS) {
dprintf(("%s0x%x, addr=0x%x,size=0x%x,offset=0x%x\n",
"device_write returned ",
kr, addr, size, offset));
BS_STAT(ps->ps_bs,
ps->ps_bs->bs_pages_out_fail += atop_32(size));
return PAGER_ERROR;
}
if (bytes_written & ((vm_page_size >> ps->ps_record_shift) - 1))
Panic("fragmented write");
records_written = (bytes_written >>
(vm_page_shift - ps->ps_record_shift));
dev_offset += records_written;
#if 1
if (bytes_written != bytes_to_write) {
dprintf(("wrote only %d bytes out of %d\n",
bytes_written, bytes_to_write));
}
#endif
bytes_to_write -= bytes_written;
addr += bytes_written;
} while (bytes_to_write > 0);
return PAGER_SUCCESS;
}
#else
kern_return_t
ps_read_device(
__unused paging_segment_t ps,
__unused vm_offset_t offset,
__unused vm_offset_t *bufferp,
__unused unsigned int size,
__unused unsigned int *residualp,
__unused int flags)
{
panic("ps_read_device not supported");
return KERN_FAILURE;
}
kern_return_t
ps_write_device(
__unused paging_segment_t ps,
__unused vm_offset_t offset,
__unused vm_offset_t addr,
__unused unsigned int size,
__unused struct vs_async *vsa)
{
panic("ps_write_device not supported");
return KERN_FAILURE;
}
#endif
void pvs_object_data_provided(vstruct_t, upl_t, upl_offset_t, upl_size_t);
void
pvs_object_data_provided(
__unused vstruct_t vs,
__unused upl_t upl,
__unused upl_offset_t offset,
upl_size_t size)
{
DP_DEBUG(DEBUG_VS_INTERNAL,
("buffer=0x%x,offset=0x%x,size=0x%x\n",
upl, offset, size));
ASSERT(size > 0);
GSTAT(global_stats.gs_pages_in += atop_32(size));
#if USE_PRECIOUS
ps_clunmap(vs, offset, size);
#endif
}
static memory_object_offset_t last_start;
static vm_size_t last_length;
kern_return_t
pvs_cluster_read(
vstruct_t vs,
vm_offset_t vs_offset,
vm_size_t cnt,
void *fault_info)
{
kern_return_t error = KERN_SUCCESS;
unsigned int size;
unsigned int residual;
unsigned int request_flags;
int seg_index;
int pages_in_cl;
int cl_size;
int cl_mask;
int cl_index;
unsigned int xfer_size;
vm_offset_t orig_vs_offset;
vm_offset_t ps_offset[(VM_SUPER_CLUSTER / PAGE_SIZE) >> VSTRUCT_DEF_CLSHIFT];
paging_segment_t psp[(VM_SUPER_CLUSTER / PAGE_SIZE) >> VSTRUCT_DEF_CLSHIFT];
struct clmap clmap;
upl_t upl;
unsigned int page_list_count;
memory_object_offset_t start;
pages_in_cl = 1 << vs->vs_clshift;
cl_size = pages_in_cl * vm_page_size;
cl_mask = cl_size - 1;
#if USE_PRECIOUS
request_flags = UPL_NO_SYNC | UPL_CLEAN_IN_PLACE | UPL_PRECIOUS | UPL_RET_ONLY_ABSENT | UPL_SET_LITE;
#else
request_flags = UPL_NO_SYNC | UPL_CLEAN_IN_PLACE | UPL_RET_ONLY_ABSENT | UPL_SET_LITE;
#endif
cl_index = (vs_offset & cl_mask) / vm_page_size;
if ((ps_clmap(vs, vs_offset & ~cl_mask, &clmap, CL_FIND, 0, 0) == (vm_offset_t)-1) ||
!CLMAP_ISSET(clmap, cl_index)) {
if (cnt == 0) {
return KERN_FAILURE;
}
page_list_count = 0;
memory_object_super_upl_request(vs->vs_control, (memory_object_offset_t)vs_offset,
PAGE_SIZE, PAGE_SIZE,
&upl, NULL, &page_list_count,
request_flags);
if (clmap.cl_error)
upl_abort(upl, UPL_ABORT_ERROR);
else
upl_abort(upl, UPL_ABORT_UNAVAILABLE);
upl_deallocate(upl);
return KERN_SUCCESS;
}
if (cnt == 0) {
return KERN_SUCCESS;
}
assert(dp_encryption_inited);
if (dp_encryption) {
request_flags |= UPL_ENCRYPT;
}
orig_vs_offset = vs_offset;
start = (memory_object_offset_t)vs_offset;
assert(cnt != 0);
cnt = VM_SUPER_CLUSTER;
if (memory_object_cluster_size(vs->vs_control, &start, &cnt, (memory_object_fault_info_t)fault_info) == KERN_SUCCESS) {
assert(vs_offset >= (vm_offset_t) start &&
vs_offset < (vm_offset_t) (start + cnt));
vs_offset = (vm_offset_t)start;
} else
cnt = PAGE_SIZE;
last_start = start;
last_length = cnt;
while (cnt && error == KERN_SUCCESS) {
int ps_info_valid;
if ((vs_offset & cl_mask) && (cnt > (VM_SUPER_CLUSTER - (vs_offset & cl_mask)))) {
size = VM_SUPER_CLUSTER;
size -= vs_offset & cl_mask;
} else if (cnt > VM_SUPER_CLUSTER)
size = VM_SUPER_CLUSTER;
else
size = cnt;
cnt -= size;
ps_info_valid = 0;
seg_index = 0;
while (size > 0 && error == KERN_SUCCESS) {
unsigned int abort_size;
int failed_size;
int beg_pseg;
int beg_indx;
vm_offset_t cur_offset;
if ( !ps_info_valid) {
ps_offset[seg_index] = ps_clmap(vs, vs_offset & ~cl_mask, &clmap, CL_FIND, 0, 0);
psp[seg_index] = CLMAP_PS(clmap);
ps_info_valid = 1;
}
if (ps_offset[seg_index] == (vm_offset_t) -1) {
abort_size = cl_size - (vs_offset & cl_mask);
abort_size = MIN(abort_size, size);
size -= abort_size;
vs_offset += abort_size;
seg_index++;
ps_info_valid = 0;
continue;
}
cl_index = (vs_offset & cl_mask) / vm_page_size;
for (abort_size = 0; cl_index < pages_in_cl && abort_size < size; cl_index++) {
if (CLMAP_ISSET(clmap, cl_index))
break;
abort_size += vm_page_size;
}
if (abort_size) {
size -= abort_size;
vs_offset += abort_size;
if (cl_index == pages_in_cl) {
seg_index++;
ps_info_valid = 0;
continue;
}
if (size == 0)
break;
}
beg_pseg = seg_index;
beg_indx = cl_index;
cur_offset = vs_offset;
for (xfer_size = 0; xfer_size < size; ) {
while (cl_index < pages_in_cl && xfer_size < size) {
if (CLMAP_ISSET(clmap, cl_index)) {
xfer_size += vm_page_size;
cur_offset += vm_page_size;
cl_index++;
BS_STAT(psp[seg_index]->ps_bs,
psp[seg_index]->ps_bs->bs_pages_in++);
} else
break;
}
if (cl_index < pages_in_cl || xfer_size >= size) {
break;
}
seg_index++;
ps_offset[seg_index] = ps_clmap(vs, cur_offset & ~cl_mask, &clmap, CL_FIND, 0, 0);
psp[seg_index] = CLMAP_PS(clmap);
ps_info_valid = 1;
if ((ps_offset[seg_index - 1] != (ps_offset[seg_index] - cl_size)) || (psp[seg_index - 1] != psp[seg_index])) {
break;
}
cl_index = 0;
}
if (xfer_size == 0) {
continue;
}
if (cur_offset <= orig_vs_offset) {
size -= xfer_size;
vs_offset += xfer_size;
continue;
}
page_list_count = 0;
memory_object_super_upl_request(vs->vs_control, (memory_object_offset_t)vs_offset,
xfer_size, xfer_size,
&upl, NULL, &page_list_count,
request_flags | UPL_SET_INTERNAL | UPL_NOBLOCK);
error = ps_read_file(psp[beg_pseg],
upl, (upl_offset_t) 0,
ps_offset[beg_pseg] + (beg_indx * vm_page_size),
xfer_size, &residual, 0);
failed_size = 0;
if ((error == KERN_SUCCESS) && (residual == 0)) {
pvs_object_data_provided(vs, upl, vs_offset, xfer_size);
} else {
failed_size = xfer_size;
if (error == KERN_SUCCESS) {
if (residual == xfer_size) {
error = KERN_FAILURE;
} else {
int fill;
unsigned int lsize;
fill = residual & ~vm_page_size;
lsize = (xfer_size - residual) + fill;
pvs_object_data_provided(vs, upl, vs_offset, lsize);
if (lsize < xfer_size) {
failed_size = xfer_size - lsize;
error = KERN_FAILURE;
}
}
}
}
if (error != KERN_SUCCESS) {
BS_STAT(psp[beg_pseg]->ps_bs,
psp[beg_pseg]->ps_bs->bs_pages_in_fail += atop_32(failed_size));
}
return (error);
}
}
return error;
}
int vs_do_async_write = 1;
kern_return_t
vs_cluster_write(
vstruct_t vs,
upl_t internal_upl,
upl_offset_t offset,
upl_size_t cnt,
boolean_t dp_internal,
int flags)
{
upl_size_t transfer_size;
int error = 0;
struct clmap clmap;
vm_offset_t actual_offset;
paging_segment_t ps;
vm_offset_t mobj_base_addr;
vm_offset_t mobj_target_addr;
upl_t upl;
upl_page_info_t *pl;
int page_index;
int list_size;
int pages_in_cl;
unsigned int cl_size;
int base_index;
unsigned int seg_size;
pages_in_cl = 1 << vs->vs_clshift;
cl_size = pages_in_cl * vm_page_size;
if (!dp_internal) {
unsigned int page_list_count;
int request_flags;
unsigned int super_size;
int first_dirty;
int num_dirty;
int num_of_pages;
int seg_index;
upl_offset_t upl_offset;
vm_offset_t seg_offset;
vm_offset_t ps_offset[((VM_SUPER_CLUSTER / PAGE_SIZE) >> VSTRUCT_DEF_CLSHIFT) + 1];
paging_segment_t psp[((VM_SUPER_CLUSTER / PAGE_SIZE) >> VSTRUCT_DEF_CLSHIFT) + 1];
if (bs_low) {
super_size = cl_size;
request_flags = UPL_NOBLOCK |
UPL_RET_ONLY_DIRTY | UPL_COPYOUT_FROM |
UPL_NO_SYNC | UPL_SET_INTERNAL | UPL_SET_LITE;
} else {
super_size = VM_SUPER_CLUSTER;
request_flags = UPL_NOBLOCK | UPL_CLEAN_IN_PLACE |
UPL_RET_ONLY_DIRTY | UPL_COPYOUT_FROM |
UPL_NO_SYNC | UPL_SET_INTERNAL | UPL_SET_LITE;
}
if (!dp_encryption_inited) {
dp_encryption_inited = TRUE;
}
if (dp_encryption) {
request_flags |= UPL_ENCRYPT;
flags |= UPL_PAGING_ENCRYPTED;
}
page_list_count = 0;
memory_object_super_upl_request(vs->vs_control,
(memory_object_offset_t)offset,
cnt, super_size,
&upl, NULL, &page_list_count,
request_flags | UPL_FOR_PAGEOUT);
pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
seg_size = cl_size - (upl->offset % cl_size);
upl_offset = upl->offset & ~(cl_size - 1);
for (seg_index = 0, transfer_size = upl->size;
transfer_size > 0; ) {
ps_offset[seg_index] =
ps_clmap(vs,
upl_offset,
&clmap, CL_ALLOC,
cl_size, 0);
if (ps_offset[seg_index] == (vm_offset_t) -1) {
upl_abort(upl, 0);
upl_deallocate(upl);
return KERN_FAILURE;
}
psp[seg_index] = CLMAP_PS(clmap);
if (transfer_size > seg_size) {
transfer_size -= seg_size;
upl_offset += cl_size;
seg_size = cl_size;
seg_index++;
} else
transfer_size = 0;
}
for (page_index = upl->size / vm_page_size; page_index > 0;)
if (UPL_PAGE_PRESENT(pl, --page_index))
break;
num_of_pages = page_index + 1;
base_index = (upl->offset % cl_size) / PAGE_SIZE;
for (page_index = 0; page_index < num_of_pages; ) {
for ( ; page_index < num_of_pages; page_index++) {
if (UPL_DIRTY_PAGE(pl, page_index)
|| UPL_PRECIOUS_PAGE(pl, page_index))
break;
if (UPL_PAGE_PRESENT(pl, page_index)) {
boolean_t empty = FALSE;
upl_commit_range(upl,
page_index * vm_page_size,
vm_page_size,
UPL_COMMIT_NOTIFY_EMPTY,
pl,
page_list_count,
&empty);
if (empty) {
assert(page_index ==
num_of_pages - 1);
upl_deallocate(upl);
}
}
}
if (page_index == num_of_pages)
break;
for (first_dirty = page_index;
page_index < num_of_pages; ) {
if ( !UPL_DIRTY_PAGE(pl, page_index)
&& !UPL_PRECIOUS_PAGE(pl, page_index))
break;
page_index++;
if (page_index < num_of_pages) {
int cur_seg;
int nxt_seg;
cur_seg = (base_index + (page_index - 1))/pages_in_cl;
nxt_seg = (base_index + page_index)/pages_in_cl;
if (cur_seg != nxt_seg) {
if ((ps_offset[cur_seg] != (ps_offset[nxt_seg] - cl_size)) || (psp[cur_seg] != psp[nxt_seg]))
break;
}
}
}
num_dirty = page_index - first_dirty;
if (num_dirty) {
upl_offset = first_dirty * vm_page_size;
transfer_size = num_dirty * vm_page_size;
while (transfer_size) {
if ((seg_size = cl_size -
((upl->offset + upl_offset) % cl_size))
> transfer_size)
seg_size = transfer_size;
ps_vs_write_complete(vs,
upl->offset + upl_offset,
seg_size, error);
transfer_size -= seg_size;
upl_offset += seg_size;
}
upl_offset = first_dirty * vm_page_size;
transfer_size = num_dirty * vm_page_size;
seg_index = (base_index + first_dirty) / pages_in_cl;
seg_offset = (upl->offset + upl_offset) % cl_size;
error = ps_write_file(psp[seg_index],
upl, upl_offset,
ps_offset[seg_index]
+ seg_offset,
transfer_size, flags);
} else {
boolean_t empty = FALSE;
upl_abort_range(upl,
first_dirty * vm_page_size,
num_dirty * vm_page_size,
UPL_ABORT_NOTIFY_EMPTY,
&empty);
if (empty) {
assert(page_index == num_of_pages);
upl_deallocate(upl);
}
}
}
} else {
assert(cnt <= (vm_page_size << vs->vs_clshift));
list_size = cnt;
page_index = 0;
mobj_base_addr = offset;
mobj_target_addr = mobj_base_addr;
for (transfer_size = list_size; transfer_size != 0;) {
actual_offset = ps_clmap(vs, mobj_target_addr,
&clmap, CL_ALLOC,
transfer_size < cl_size ?
transfer_size : cl_size, 0);
if(actual_offset == (vm_offset_t) -1) {
error = 1;
break;
}
cnt = MIN(transfer_size,
CLMAP_NPGS(clmap) * vm_page_size);
ps = CLMAP_PS(clmap);
if(cnt) {
ps_vs_write_complete(vs, mobj_target_addr,
cnt, error);
error = ps_write_file(ps, internal_upl,
0, actual_offset,
cnt, flags);
if (error)
break;
}
if (error)
break;
actual_offset += cnt;
mobj_target_addr += cnt;
transfer_size -= cnt;
cnt = 0;
if (error)
break;
}
}
if(error)
return KERN_FAILURE;
else
return KERN_SUCCESS;
}
vm_size_t
ps_vstruct_allocated_size(
vstruct_t vs)
{
int num_pages;
struct vs_map *vsmap;
unsigned int i, j, k;
num_pages = 0;
if (vs->vs_indirect) {
for (i = 0; i < INDIRECT_CLMAP_ENTRIES(vs->vs_size); i++) {
vsmap = vs->vs_imap[i];
if (vsmap == NULL)
continue;
for (j = 0; j < CLMAP_ENTRIES; j++) {
if (VSM_ISCLR(vsmap[j]) ||
VSM_ISERR(vsmap[j]))
continue;
for (k = 0; k < VSCLSIZE(vs); k++) {
if ((VSM_BMAP(vsmap[j])) & (1 << k))
num_pages++;
}
}
}
} else {
vsmap = vs->vs_dmap;
if (vsmap == NULL)
return 0;
for (j = 0; j < CLMAP_ENTRIES; j++) {
if (VSM_ISCLR(vsmap[j]) ||
VSM_ISERR(vsmap[j]))
continue;
for (k = 0; k < VSCLSIZE(vs); k++) {
if ((VSM_BMAP(vsmap[j])) & (1 << k))
num_pages++;
}
}
}
return ptoa_32(num_pages);
}
size_t
ps_vstruct_allocated_pages(
vstruct_t vs,
default_pager_page_t *pages,
size_t pages_size)
{
unsigned int num_pages;
struct vs_map *vsmap;
vm_offset_t offset;
unsigned int i, j, k;
num_pages = 0;
offset = 0;
if (vs->vs_indirect) {
for (i = 0; i < INDIRECT_CLMAP_ENTRIES(vs->vs_size); i++) {
vsmap = vs->vs_imap[i];
if (vsmap == NULL) {
offset += (vm_page_size * CLMAP_ENTRIES *
VSCLSIZE(vs));
continue;
}
for (j = 0; j < CLMAP_ENTRIES; j++) {
if (VSM_ISCLR(vsmap[j]) ||
VSM_ISERR(vsmap[j])) {
offset += vm_page_size * VSCLSIZE(vs);
continue;
}
for (k = 0; k < VSCLSIZE(vs); k++) {
if ((VSM_BMAP(vsmap[j])) & (1 << k)) {
num_pages++;
if (num_pages < pages_size)
pages++->dpp_offset =
offset;
}
offset += vm_page_size;
}
}
}
} else {
vsmap = vs->vs_dmap;
if (vsmap == NULL)
return 0;
for (j = 0; j < CLMAP_ENTRIES; j++) {
if (VSM_ISCLR(vsmap[j]) ||
VSM_ISERR(vsmap[j])) {
offset += vm_page_size * VSCLSIZE(vs);
continue;
}
for (k = 0; k < VSCLSIZE(vs); k++) {
if ((VSM_BMAP(vsmap[j])) & (1 << k)) {
num_pages++;
if (num_pages < pages_size)
pages++->dpp_offset = offset;
}
offset += vm_page_size;
}
}
}
return num_pages;
}
kern_return_t
ps_vstruct_transfer_from_segment(
vstruct_t vs,
paging_segment_t segment,
upl_t upl)
{
struct vs_map *vsmap;
unsigned int i, j;
VS_LOCK(vs);
vs->vs_xfer_pending = TRUE;
vs_wait_for_sync_writers(vs);
vs_start_write(vs);
vs_wait_for_readers(vs);
VS_UNLOCK(vs);
vs_changed:
if (vs->vs_indirect) {
unsigned int vsmap_size;
int clmap_off;
for (i = 0; i < INDIRECT_CLMAP_ENTRIES(vs->vs_size); i++) {
vsmap = vs->vs_imap[i];
if (vsmap == NULL)
continue;
clmap_off = (vm_page_size * CLMAP_ENTRIES *
VSCLSIZE(vs) * i);
if(i+1 == INDIRECT_CLMAP_ENTRIES(vs->vs_size))
vsmap_size = vs->vs_size - (CLMAP_ENTRIES * i);
else
vsmap_size = CLMAP_ENTRIES;
for (j = 0; j < vsmap_size; j++) {
if (VSM_ISCLR(vsmap[j]) ||
VSM_ISERR(vsmap[j]) ||
(VSM_PS(vsmap[j]) != segment))
continue;
if(vs_cluster_transfer(vs,
(vm_page_size * (j << vs->vs_clshift))
+ clmap_off,
vm_page_size << vs->vs_clshift,
upl)
!= KERN_SUCCESS) {
VS_LOCK(vs);
vs->vs_xfer_pending = FALSE;
VS_UNLOCK(vs);
vs_finish_write(vs);
return KERN_FAILURE;
}
VS_LOCK(vs);
vs->vs_xfer_pending = FALSE;
VS_UNLOCK(vs);
vs_finish_write(vs);
VS_LOCK(vs);
vs->vs_xfer_pending = TRUE;
vs_wait_for_sync_writers(vs);
vs_start_write(vs);
vs_wait_for_readers(vs);
VS_UNLOCK(vs);
if (!(vs->vs_indirect)) {
goto vs_changed;
}
}
}
} else {
vsmap = vs->vs_dmap;
if (vsmap == NULL) {
VS_LOCK(vs);
vs->vs_xfer_pending = FALSE;
VS_UNLOCK(vs);
vs_finish_write(vs);
return KERN_SUCCESS;
}
for (j = 0; j < vs->vs_size; j++) {
if (VSM_ISCLR(vsmap[j]) ||
VSM_ISERR(vsmap[j]) ||
(VSM_PS(vsmap[j]) != segment))
continue;
if(vs_cluster_transfer(vs,
vm_page_size * (j << vs->vs_clshift),
vm_page_size << vs->vs_clshift,
upl) != KERN_SUCCESS) {
VS_LOCK(vs);
vs->vs_xfer_pending = FALSE;
VS_UNLOCK(vs);
vs_finish_write(vs);
return KERN_FAILURE;
}
VS_LOCK(vs);
vs->vs_xfer_pending = FALSE;
VS_UNLOCK(vs);
vs_finish_write(vs);
VS_LOCK(vs);
vs->vs_xfer_pending = TRUE;
VS_UNLOCK(vs);
vs_wait_for_sync_writers(vs);
vs_start_write(vs);
vs_wait_for_readers(vs);
if (vs->vs_indirect) {
goto vs_changed;
}
}
}
VS_LOCK(vs);
vs->vs_xfer_pending = FALSE;
VS_UNLOCK(vs);
vs_finish_write(vs);
return KERN_SUCCESS;
}
vs_map_t
vs_get_map_entry(
vstruct_t vs,
vm_offset_t offset)
{
struct vs_map *vsmap;
vm_offset_t cluster;
cluster = atop_32(offset) >> vs->vs_clshift;
if (vs->vs_indirect) {
long ind_block = cluster/CLMAP_ENTRIES;
vsmap = vs->vs_imap[ind_block];
if(vsmap == (vs_map_t) NULL)
return vsmap;
} else
vsmap = vs->vs_dmap;
vsmap += cluster%CLMAP_ENTRIES;
return vsmap;
}
kern_return_t
vs_cluster_transfer(
vstruct_t vs,
vm_offset_t offset,
vm_size_t cnt,
upl_t upl)
{
vm_offset_t actual_offset;
paging_segment_t ps;
struct clmap clmap;
kern_return_t error = KERN_SUCCESS;
unsigned int size, size_wanted;
int i;
unsigned int residual = 0;
unsigned int unavail_size;
struct vs_map *vsmap_ptr = NULL;
struct vs_map read_vsmap;
struct vs_map original_read_vsmap;
struct vs_map write_vsmap;
VSM_CLR(write_vsmap);
VSM_CLR(original_read_vsmap);
while (cnt && (error == KERN_SUCCESS)) {
vsmap_ptr = vs_get_map_entry(vs, offset);
actual_offset = ps_clmap(vs, offset, &clmap, CL_FIND, 0, 0);
if (actual_offset == (vm_offset_t) -1) {
unsigned int local_size, clmask, clsize;
clsize = vm_page_size << vs->vs_clshift;
clmask = clsize - 1;
local_size = clsize - (offset & clmask);
ASSERT(local_size);
local_size = MIN(local_size, cnt);
*vsmap_ptr = write_vsmap;
VSM_CLR(write_vsmap);
VSM_CLR(original_read_vsmap);
cnt -= local_size;
offset += local_size;
continue;
}
ps = CLMAP_PS(clmap);
ASSERT(ps);
size = 0;
unavail_size = 0;
for (i = 0;
(size < cnt) && (unavail_size < cnt) &&
(i < CLMAP_NPGS(clmap)); i++) {
if (CLMAP_ISSET(clmap, i)) {
if (unavail_size != 0)
break;
size += vm_page_size;
BS_STAT(ps->ps_bs,
ps->ps_bs->bs_pages_in++);
} else {
if (size != 0)
break;
unavail_size += vm_page_size;
}
}
if (size == 0) {
ASSERT(unavail_size);
cnt -= unavail_size;
offset += unavail_size;
if((offset & ((vm_page_size << vs->vs_clshift) - 1))
== 0) {
*vsmap_ptr = write_vsmap;
VSM_CLR(write_vsmap);
VSM_CLR(original_read_vsmap);
}
continue;
}
if(VSM_ISCLR(original_read_vsmap))
original_read_vsmap = *vsmap_ptr;
if(ps->ps_segtype == PS_PARTITION) {
panic("swap partition not supported\n");
error = KERN_FAILURE;
residual = size;
} else {
error = ps_read_file(ps, upl, (upl_offset_t) 0, actual_offset,
size, &residual,
(UPL_IOSYNC | UPL_NOCOMMIT));
}
read_vsmap = *vsmap_ptr;
if ((error == KERN_SUCCESS) && (residual == 0)) {
*vsmap_ptr = write_vsmap;
if(vs_cluster_write(vs, upl, offset,
size, TRUE, UPL_IOSYNC | UPL_NOCOMMIT ) != KERN_SUCCESS) {
error = KERN_FAILURE;
if(!(VSM_ISCLR(*vsmap_ptr))) {
ps_clunmap(vs, offset, size);
}
*vsmap_ptr = original_read_vsmap;
VSM_CLR(write_vsmap);
} else {
if((offset + size) &
((vm_page_size << vs->vs_clshift)
- 1)) {
write_vsmap = *vsmap_ptr;
*vsmap_ptr = read_vsmap;
} else {
write_vsmap = *vsmap_ptr;
*vsmap_ptr = read_vsmap;
ps_clunmap(vs, offset, size);
*vsmap_ptr = write_vsmap;
VSM_CLR(write_vsmap);
VSM_CLR(original_read_vsmap);
}
}
} else {
size_wanted = size;
if (error == KERN_SUCCESS) {
if (residual == size) {
error = KERN_FAILURE;
*vsmap_ptr = write_vsmap;
if(!(VSM_ISCLR(*vsmap_ptr))) {
ps_clunmap(vs, offset, size);
}
*vsmap_ptr = original_read_vsmap;
VSM_CLR(write_vsmap);
continue;
} else {
error = KERN_FAILURE;
*vsmap_ptr = write_vsmap;
if(!(VSM_ISCLR(*vsmap_ptr))) {
ps_clunmap(vs, offset, size);
}
*vsmap_ptr = original_read_vsmap;
VSM_CLR(write_vsmap);
continue;
}
}
}
cnt -= size;
offset += size;
}
if(!VSM_ISCLR(write_vsmap))
*vsmap_ptr = write_vsmap;
return error;
}
kern_return_t
default_pager_add_file(
MACH_PORT_FACE backing_store,
vnode_ptr_t vp,
int record_size,
vm_size_t size)
{
backing_store_t bs;
paging_segment_t ps;
int i;
unsigned int j;
int error;
if ((bs = backing_store_lookup(backing_store))
== BACKING_STORE_NULL)
return KERN_INVALID_ARGUMENT;
PSL_LOCK();
for (i = 0; i <= paging_segment_max; i++) {
ps = paging_segments[i];
if (ps == PAGING_SEGMENT_NULL)
continue;
if (ps->ps_segtype != PS_FILE)
continue;
if (ps->ps_vnode == (struct vnode *)vp) {
PSL_UNLOCK();
BS_UNLOCK(bs);
return KERN_INVALID_ARGUMENT;
}
}
PSL_UNLOCK();
ps = (paging_segment_t) kalloc(sizeof (struct paging_segment));
if (ps == PAGING_SEGMENT_NULL) {
BS_UNLOCK(bs);
return KERN_RESOURCE_SHORTAGE;
}
ps->ps_segtype = PS_FILE;
ps->ps_vnode = (struct vnode *)vp;
ps->ps_offset = 0;
ps->ps_record_shift = local_log2(vm_page_size / record_size);
ps->ps_recnum = size;
ps->ps_pgnum = size >> ps->ps_record_shift;
ps->ps_pgcount = ps->ps_pgnum;
ps->ps_clshift = local_log2(bs->bs_clsize);
ps->ps_clcount = ps->ps_ncls = ps->ps_pgcount >> ps->ps_clshift;
ps->ps_hint = 0;
PS_LOCK_INIT(ps);
ps->ps_bmap = (unsigned char *) kalloc(RMAPSIZE(ps->ps_ncls));
if (!ps->ps_bmap) {
kfree(ps, sizeof *ps);
BS_UNLOCK(bs);
return KERN_RESOURCE_SHORTAGE;
}
for (j = 0; j < ps->ps_ncls; j++) {
clrbit(ps->ps_bmap, j);
}
ps->ps_going_away = FALSE;
ps->ps_bs = bs;
if ((error = ps_enter(ps)) != 0) {
kfree(ps->ps_bmap, RMAPSIZE(ps->ps_ncls));
kfree(ps, sizeof *ps);
BS_UNLOCK(bs);
return KERN_RESOURCE_SHORTAGE;
}
bs->bs_pages_free += ps->ps_clcount << ps->ps_clshift;
bs->bs_pages_total += ps->ps_clcount << ps->ps_clshift;
PSL_LOCK();
dp_pages_free += ps->ps_pgcount;
PSL_UNLOCK();
BS_UNLOCK(bs);
bs_more_space(ps->ps_clcount);
DP_DEBUG(DEBUG_BS_INTERNAL,
("device=0x%x,offset=0x%x,count=0x%x,record_size=0x%x,shift=%d,total_size=0x%x\n",
device, offset, size, record_size,
ps->ps_record_shift, ps->ps_pgnum));
return KERN_SUCCESS;
}
kern_return_t
ps_read_file(
paging_segment_t ps,
upl_t upl,
upl_offset_t upl_offset,
vm_offset_t offset,
upl_size_t size,
unsigned int *residualp,
int flags)
{
vm_object_offset_t f_offset;
int error = 0;
int result;
assert(dp_encryption_inited);
clustered_reads[atop_32(size)]++;
f_offset = (vm_object_offset_t)(ps->ps_offset + offset);
error = vnode_pagein(ps->ps_vnode, upl, upl_offset, f_offset, (vm_size_t)size, flags, NULL);
if (error)
result = KERN_FAILURE;
else {
*residualp = 0;
result = KERN_SUCCESS;
}
return result;
}
kern_return_t
ps_write_file(
paging_segment_t ps,
upl_t upl,
upl_offset_t upl_offset,
vm_offset_t offset,
unsigned int size,
int flags)
{
vm_object_offset_t f_offset;
kern_return_t result;
assert(dp_encryption_inited);
clustered_writes[atop_32(size)]++;
f_offset = (vm_object_offset_t)(ps->ps_offset + offset);
if (flags & UPL_PAGING_ENCRYPTED) {
upl_encrypt(upl, upl_offset, size);
}
if (vnode_pageout(ps->ps_vnode, upl, upl_offset, f_offset, (vm_size_t)size, flags, NULL))
result = KERN_FAILURE;
else
result = KERN_SUCCESS;
return result;
}
kern_return_t
default_pager_triggers( __unused MACH_PORT_FACE default_pager,
int hi_wat,
int lo_wat,
int flags,
MACH_PORT_FACE trigger_port)
{
MACH_PORT_FACE release;
kern_return_t kr;
PSL_LOCK();
if (flags == SWAP_ENCRYPT_ON) {
release = trigger_port;
if (!dp_encryption_inited) {
dp_encryption_inited = TRUE;
dp_encryption = TRUE;
kr = KERN_SUCCESS;
} else {
kr = KERN_FAILURE;
}
} else if (flags == SWAP_ENCRYPT_OFF) {
release = trigger_port;
if (!dp_encryption_inited) {
dp_encryption_inited = TRUE;
dp_encryption = FALSE;
kr = KERN_SUCCESS;
} else {
kr = KERN_FAILURE;
}
} else if (flags == HI_WAT_ALERT) {
release = min_pages_trigger_port;
min_pages_trigger_port = trigger_port;
minimum_pages_remaining = hi_wat/vm_page_size;
bs_low = FALSE;
kr = KERN_SUCCESS;
} else if (flags == LO_WAT_ALERT) {
release = max_pages_trigger_port;
max_pages_trigger_port = trigger_port;
maximum_pages_free = lo_wat/vm_page_size;
kr = KERN_SUCCESS;
} else {
release = trigger_port;
kr = KERN_INVALID_ARGUMENT;
}
PSL_UNLOCK();
if (IP_VALID(release))
ipc_port_release_send(release);
return kr;
}
#define PF_INTERVAL 3
#define PF_LATENCY 10
static int dp_pages_free_low_count = 0;
thread_call_t default_pager_backing_store_monitor_callout;
void
default_pager_backing_store_monitor(__unused thread_call_param_t p1,
__unused thread_call_param_t p2)
{
ipc_port_t trigger;
uint64_t deadline;
if (dp_pages_free > maximum_pages_free) {
dp_pages_free_low_count++;
} else {
dp_pages_free_low_count = 0;
}
trigger = IP_NULL;
if (max_pages_trigger_port &&
(backing_store_release_trigger_disable == 0) &&
(dp_pages_free_low_count > PF_LATENCY)) {
trigger = max_pages_trigger_port;
max_pages_trigger_port = NULL;
}
if (trigger != IP_NULL) {
VSL_LOCK();
if(backing_store_release_trigger_disable != 0) {
assert_wait((event_t)
&backing_store_release_trigger_disable,
THREAD_UNINT);
VSL_UNLOCK();
thread_block(THREAD_CONTINUE_NULL);
} else {
VSL_UNLOCK();
}
default_pager_space_alert(trigger, LO_WAT_ALERT);
ipc_port_release_send(trigger);
dp_pages_free_low_count = 0;
}
clock_interval_to_deadline(PF_INTERVAL, NSEC_PER_SEC, &deadline);
thread_call_enter_delayed(default_pager_backing_store_monitor_callout, deadline);
}