#ifdef MACH_BSD
extern int vnode_pager_workaround;
extern int device_pager_workaround;
#endif
#include <mach_cluster_stats.h>
#include <mach_pagemap.h>
#include <mach_kdb.h>
#include <vm/vm_fault.h>
#include <mach/kern_return.h>
#include <mach/message.h>
#include <kern/host_statistics.h>
#include <kern/counters.h>
#include <kern/task.h>
#include <kern/thread.h>
#include <kern/sched_prim.h>
#include <kern/host.h>
#include <kern/xpr.h>
#include <ppc/proc_reg.h>
#include <vm/task_working_set.h>
#include <vm/vm_map.h>
#include <vm/vm_object.h>
#include <vm/vm_page.h>
#include <vm/vm_kern.h>
#include <vm/pmap.h>
#include <vm/vm_pageout.h>
#include <mach/vm_param.h>
#include <mach/vm_behavior.h>
#include <mach/memory_object.h>
#include <kern/mach_param.h>
#include <kern/macro_help.h>
#include <kern/zalloc.h>
#include <kern/misc_protos.h>
#include <sys/kdebug.h>
#define VM_FAULT_CLASSIFY 0
#define VM_FAULT_STATIC_CONFIG 1
#define TRACEFAULTPAGE 0
int vm_object_absent_max = 50;
int vm_fault_debug = 0;
#if !VM_FAULT_STATIC_CONFIG
boolean_t vm_fault_dirty_handling = FALSE;
boolean_t vm_fault_interruptible = FALSE;
boolean_t software_reference_bits = TRUE;
#endif
#if MACH_KDB
extern struct db_watchpoint *db_watchpoint_list;
#endif
extern kern_return_t vm_fault_wire_fast(
vm_map_t map,
vm_offset_t va,
vm_map_entry_t entry,
pmap_t pmap,
vm_offset_t pmap_addr);
extern void vm_fault_continue(void);
extern void vm_fault_copy_cleanup(
vm_page_t page,
vm_page_t top_page);
extern void vm_fault_copy_dst_cleanup(
vm_page_t page);
#if VM_FAULT_CLASSIFY
extern void vm_fault_classify(vm_object_t object,
vm_object_offset_t offset,
vm_prot_t fault_type);
extern void vm_fault_classify_init(void);
#endif
void
vm_fault_init(void)
{
}
void
vm_fault_cleanup(
register vm_object_t object,
register vm_page_t top_page)
{
vm_object_paging_end(object);
vm_object_unlock(object);
if (top_page != VM_PAGE_NULL) {
object = top_page->object;
vm_object_lock(object);
VM_PAGE_FREE(top_page);
vm_object_paging_end(object);
vm_object_unlock(object);
}
}
#if MACH_CLUSTER_STATS
#define MAXCLUSTERPAGES 16
struct {
unsigned long pages_in_cluster;
unsigned long pages_at_higher_offsets;
unsigned long pages_at_lower_offsets;
} cluster_stats_in[MAXCLUSTERPAGES];
#define CLUSTER_STAT(clause) clause
#define CLUSTER_STAT_HIGHER(x) \
((cluster_stats_in[(x)].pages_at_higher_offsets)++)
#define CLUSTER_STAT_LOWER(x) \
((cluster_stats_in[(x)].pages_at_lower_offsets)++)
#define CLUSTER_STAT_CLUSTER(x) \
((cluster_stats_in[(x)].pages_in_cluster)++)
#else
#define CLUSTER_STAT(clause)
#endif
boolean_t vm_allow_clustered_pagein = FALSE;
int vm_pagein_cluster_used = 0;
#define ALIGNED(x) (((x) & (PAGE_SIZE_64 - 1)) == 0)
boolean_t vm_page_deactivate_behind = TRUE;
int vm_default_ahead = 0;
int vm_default_behind = MAX_UPL_TRANSFER;
static
boolean_t
vm_fault_deactivate_behind(
vm_object_t object,
vm_offset_t offset,
vm_behavior_t behavior)
{
vm_page_t m;
#if TRACEFAULTPAGE
dbgTrace(0xBEEF0018, (unsigned int) object, (unsigned int) vm_fault_deactivate_behind);
#endif
switch (behavior) {
case VM_BEHAVIOR_RANDOM:
object->sequential = PAGE_SIZE_64;
m = VM_PAGE_NULL;
break;
case VM_BEHAVIOR_SEQUENTIAL:
if (offset &&
object->last_alloc == offset - PAGE_SIZE_64) {
object->sequential += PAGE_SIZE_64;
m = vm_page_lookup(object, offset - PAGE_SIZE_64);
} else {
object->sequential = PAGE_SIZE_64;
m = VM_PAGE_NULL;
}
break;
case VM_BEHAVIOR_RSEQNTL:
if (object->last_alloc &&
object->last_alloc == offset + PAGE_SIZE_64) {
object->sequential += PAGE_SIZE_64;
m = vm_page_lookup(object, offset + PAGE_SIZE_64);
} else {
object->sequential = PAGE_SIZE_64;
m = VM_PAGE_NULL;
}
break;
case VM_BEHAVIOR_DEFAULT:
default:
if (offset &&
object->last_alloc == offset - PAGE_SIZE_64) {
vm_object_offset_t behind = vm_default_behind * PAGE_SIZE_64;
object->sequential += PAGE_SIZE_64;
m = (offset >= behind &&
object->sequential >= behind) ?
vm_page_lookup(object, offset - behind) :
VM_PAGE_NULL;
} else if (object->last_alloc &&
object->last_alloc == offset + PAGE_SIZE_64) {
vm_object_offset_t behind = vm_default_behind * PAGE_SIZE_64;
object->sequential += PAGE_SIZE_64;
m = (offset < -behind &&
object->sequential >= behind) ?
vm_page_lookup(object, offset + behind) :
VM_PAGE_NULL;
} else {
object->sequential = PAGE_SIZE_64;
m = VM_PAGE_NULL;
}
break;
}
object->last_alloc = offset;
if (m) {
if (!m->busy) {
vm_page_lock_queues();
vm_page_deactivate(m);
vm_page_unlock_queues();
#if TRACEFAULTPAGE
dbgTrace(0xBEEF0019, (unsigned int) object, (unsigned int) m);
#endif
}
return TRUE;
}
return FALSE;
}
vm_fault_return_t
vm_fault_page(
vm_object_t first_object,
vm_object_offset_t first_offset,
vm_prot_t fault_type,
boolean_t must_be_resident,
int interruptible,
vm_object_offset_t lo_offset,
vm_object_offset_t hi_offset,
vm_behavior_t behavior,
vm_prot_t *protection,
vm_page_t *result_page,
vm_page_t *top_page,
int *type_of_fault,
kern_return_t *error_code,
boolean_t no_zero_fill,
boolean_t data_supply,
vm_map_t map,
vm_offset_t vaddr)
{
register
vm_page_t m;
register
vm_object_t object;
register
vm_object_offset_t offset;
vm_page_t first_m;
vm_object_t next_object;
vm_object_t copy_object;
boolean_t look_for_page;
vm_prot_t access_required = fault_type;
vm_prot_t wants_copy_flag;
vm_size_t cluster_size, length;
vm_object_offset_t cluster_offset;
vm_object_offset_t cluster_start, cluster_end, paging_offset;
vm_object_offset_t align_offset;
CLUSTER_STAT(int pages_at_higher_offsets;)
CLUSTER_STAT(int pages_at_lower_offsets;)
kern_return_t wait_result;
boolean_t interruptible_state;
boolean_t bumped_pagein = FALSE;
#if MACH_PAGEMAP
#define LOOK_FOR(o, f) (vm_external_state_get((o)->existence_map, (f)) \
!= VM_EXTERNAL_STATE_ABSENT)
#define PAGED_OUT(o, f) (vm_external_state_get((o)->existence_map, (f)) \
== VM_EXTERNAL_STATE_EXISTS)
#else
#define LOOK_FOR(o, f) TRUE
#define PAGED_OUT(o, f) FALSE
#endif
#define PREPARE_RELEASE_PAGE(m) \
MACRO_BEGIN \
vm_page_lock_queues(); \
MACRO_END
#define DO_RELEASE_PAGE(m) \
MACRO_BEGIN \
PAGE_WAKEUP_DONE(m); \
if (!m->active && !m->inactive) \
vm_page_activate(m); \
vm_page_unlock_queues(); \
MACRO_END
#define RELEASE_PAGE(m) \
MACRO_BEGIN \
PREPARE_RELEASE_PAGE(m); \
DO_RELEASE_PAGE(m); \
MACRO_END
#if TRACEFAULTPAGE
dbgTrace(0xBEEF0002, (unsigned int) first_object, (unsigned int) first_offset);
#endif
#if !VM_FAULT_STATIC_CONFIG
if (vm_fault_dirty_handling
#if MACH_KDB
|| db_watchpoint_list
#endif
) {
if (!(fault_type & VM_PROT_WRITE))
*protection &= ~VM_PROT_WRITE;
}
if (!vm_fault_interruptible)
interruptible = THREAD_UNINT;
#else
#if MACH_KDB
if (db_watchpoint_list) {
if (!(fault_type & VM_PROT_WRITE))
*protection &= ~VM_PROT_WRITE;
}
#endif
#endif
interruptible_state = thread_interrupt_level(interruptible);
object = first_object;
offset = first_offset;
first_m = VM_PAGE_NULL;
access_required = fault_type;
XPR(XPR_VM_FAULT,
"vm_f_page: obj 0x%X, offset 0x%X, type %d, prot %d\n",
(integer_t)object, offset, fault_type, *protection, 0);
while (TRUE) {
#if TRACEFAULTPAGE
dbgTrace(0xBEEF0003, (unsigned int) 0, (unsigned int) 0);
#endif
if (!object->alive) {
vm_fault_cleanup(object, first_m);
thread_interrupt_level(interruptible_state);
return(VM_FAULT_MEMORY_ERROR);
}
m = vm_page_lookup(object, offset);
#if TRACEFAULTPAGE
dbgTrace(0xBEEF0004, (unsigned int) m, (unsigned int) object);
#endif
if (m != VM_PAGE_NULL) {
if (m->clustered) {
vm_pagein_cluster_used++;
m->clustered = FALSE;
}
if (m->busy) {
#if TRACEFAULTPAGE
dbgTrace(0xBEEF0005, (unsigned int) m, (unsigned int) 0);
#endif
wait_result = PAGE_SLEEP(object, m, interruptible);
XPR(XPR_VM_FAULT,
"vm_f_page: block busy obj 0x%X, offset 0x%X, page 0x%X\n",
(integer_t)object, offset,
(integer_t)m, 0, 0);
counter(c_vm_fault_page_block_busy_kernel++);
if (wait_result != THREAD_AWAKENED) {
vm_fault_cleanup(object, first_m);
thread_interrupt_level(interruptible_state);
if (wait_result == THREAD_RESTART)
{
return(VM_FAULT_RETRY);
}
else
{
return(VM_FAULT_INTERRUPTED);
}
}
continue;
}
if (m->error) {
#if TRACEFAULTPAGE
dbgTrace(0xBEEF0006, (unsigned int) m, (unsigned int) error_code);
#endif
if (error_code)
*error_code = m->page_error;
VM_PAGE_FREE(m);
vm_fault_cleanup(object, first_m);
thread_interrupt_level(interruptible_state);
return(VM_FAULT_MEMORY_ERROR);
}
if (m->restart) {
#if TRACEFAULTPAGE
dbgTrace(0xBEEF0007, (unsigned int) m, (unsigned int) 0);
#endif
VM_PAGE_FREE(m);
vm_fault_cleanup(object, first_m);
thread_interrupt_level(interruptible_state);
return(VM_FAULT_RETRY);
}
if (m->absent) {
#if TRACEFAULTPAGE
dbgTrace(0xBEEF0008, (unsigned int) m, (unsigned int) object->shadow);
#endif
next_object = object->shadow;
if (next_object == VM_OBJECT_NULL) {
vm_page_t real_m;
assert(!must_be_resident);
if (object->shadow_severed) {
vm_fault_cleanup(
object, first_m);
thread_interrupt_level(interruptible_state);
return VM_FAULT_MEMORY_ERROR;
}
if (VM_PAGE_THROTTLED() ||
(real_m = vm_page_grab())
== VM_PAGE_NULL) {
vm_fault_cleanup(
object, first_m);
thread_interrupt_level(
interruptible_state);
return(
VM_FAULT_MEMORY_SHORTAGE);
}
if(vm_backing_store_low) {
if(!(current_task()->priv_flags
& VM_BACKING_STORE_PRIV)) {
assert_wait((event_t)
&vm_backing_store_low,
THREAD_UNINT);
vm_fault_cleanup(object,
first_m);
thread_block((void(*)(void)) 0);
thread_interrupt_level(
interruptible_state);
return(VM_FAULT_RETRY);
}
}
XPR(XPR_VM_FAULT,
"vm_f_page: zero obj 0x%X, off 0x%X, page 0x%X, first_obj 0x%X\n",
(integer_t)object, offset,
(integer_t)m,
(integer_t)first_object, 0);
if (object != first_object) {
VM_PAGE_FREE(m);
vm_object_paging_end(object);
vm_object_unlock(object);
object = first_object;
offset = first_offset;
m = first_m;
first_m = VM_PAGE_NULL;
vm_object_lock(object);
}
VM_PAGE_FREE(m);
assert(real_m->busy);
vm_page_insert(real_m, object, offset);
m = real_m;
m->no_isync = FALSE;
if (!no_zero_fill) {
vm_object_unlock(object);
vm_page_zero_fill(m);
vm_object_lock(object);
}
if (type_of_fault)
*type_of_fault = DBG_ZERO_FILL_FAULT;
VM_STAT(zero_fill_count++);
if (bumped_pagein == TRUE) {
VM_STAT(pageins--);
current_task()->pageins--;
}
#if 0
pmap_clear_modify(m->phys_page);
#endif
vm_page_lock_queues();
VM_PAGE_QUEUES_REMOVE(m);
m->page_ticket = vm_page_ticket;
if(m->object->size > 0x80000) {
m->zero_fill = TRUE;
vm_zf_count += 1;
queue_enter(&vm_page_queue_zf,
m, vm_page_t, pageq);
} else {
queue_enter(
&vm_page_queue_inactive,
m, vm_page_t, pageq);
}
vm_page_ticket_roll++;
if(vm_page_ticket_roll ==
VM_PAGE_TICKETS_IN_ROLL) {
vm_page_ticket_roll = 0;
if(vm_page_ticket ==
VM_PAGE_TICKET_ROLL_IDS)
vm_page_ticket= 0;
else
vm_page_ticket++;
}
m->inactive = TRUE;
vm_page_inactive_count++;
vm_page_unlock_queues();
break;
} else {
if (must_be_resident) {
vm_object_paging_end(object);
} else if (object != first_object) {
vm_object_paging_end(object);
VM_PAGE_FREE(m);
} else {
first_m = m;
m->absent = FALSE;
m->unusual = FALSE;
vm_object_absent_release(object);
m->busy = TRUE;
vm_page_lock_queues();
VM_PAGE_QUEUES_REMOVE(m);
vm_page_unlock_queues();
}
XPR(XPR_VM_FAULT,
"vm_f_page: unavail obj 0x%X, off 0x%X, next_obj 0x%X, newoff 0x%X\n",
(integer_t)object, offset,
(integer_t)next_object,
offset+object->shadow_offset,0);
offset += object->shadow_offset;
hi_offset += object->shadow_offset;
lo_offset += object->shadow_offset;
access_required = VM_PROT_READ;
vm_object_lock(next_object);
vm_object_unlock(object);
object = next_object;
vm_object_paging_begin(object);
continue;
}
}
if ((m->cleaning)
&& ((object != first_object) ||
(object->copy != VM_OBJECT_NULL))
&& (fault_type & VM_PROT_WRITE)) {
#if TRACEFAULTPAGE
dbgTrace(0xBEEF0009, (unsigned int) m, (unsigned int) offset);
#endif
XPR(XPR_VM_FAULT,
"vm_f_page: cleaning obj 0x%X, offset 0x%X, page 0x%X\n",
(integer_t)object, offset,
(integer_t)m, 0, 0);
assert(object->ref_count > 0);
object->ref_count++;
vm_object_res_reference(object);
vm_fault_cleanup(object, first_m);
counter(c_vm_fault_page_block_backoff_kernel++);
vm_object_lock(object);
assert(object->ref_count > 0);
m = vm_page_lookup(object, offset);
if (m != VM_PAGE_NULL && m->cleaning) {
PAGE_ASSERT_WAIT(m, interruptible);
vm_object_unlock(object);
wait_result = thread_block(THREAD_CONTINUE_NULL);
vm_object_deallocate(object);
goto backoff;
} else {
vm_object_unlock(object);
vm_object_deallocate(object);
thread_interrupt_level(interruptible_state);
return VM_FAULT_RETRY;
}
}
if (access_required & m->page_lock) {
if ((access_required & m->unlock_request) != access_required) {
vm_prot_t new_unlock_request;
kern_return_t rc;
#if TRACEFAULTPAGE
dbgTrace(0xBEEF000A, (unsigned int) m, (unsigned int) object->pager_ready);
#endif
if (!object->pager_ready) {
XPR(XPR_VM_FAULT,
"vm_f_page: ready wait acc_req %d, obj 0x%X, offset 0x%X, page 0x%X\n",
access_required,
(integer_t)object, offset,
(integer_t)m, 0);
assert(object->ref_count > 0);
object->ref_count++;
vm_object_res_reference(object);
vm_fault_cleanup(object,
first_m);
counter(c_vm_fault_page_block_backoff_kernel++);
vm_object_lock(object);
assert(object->ref_count > 0);
if (!object->pager_ready) {
wait_result = vm_object_assert_wait(
object,
VM_OBJECT_EVENT_PAGER_READY,
interruptible);
vm_object_unlock(object);
if (wait_result == THREAD_WAITING)
wait_result = thread_block(THREAD_CONTINUE_NULL);
vm_object_deallocate(object);
goto backoff;
} else {
vm_object_unlock(object);
vm_object_deallocate(object);
thread_interrupt_level(interruptible_state);
return VM_FAULT_RETRY;
}
}
new_unlock_request = m->unlock_request =
(access_required | m->unlock_request);
vm_object_unlock(object);
XPR(XPR_VM_FAULT,
"vm_f_page: unlock obj 0x%X, offset 0x%X, page 0x%X, unl_req %d\n",
(integer_t)object, offset,
(integer_t)m, new_unlock_request, 0);
if ((rc = memory_object_data_unlock(
object->pager,
offset + object->paging_offset,
PAGE_SIZE,
new_unlock_request))
!= KERN_SUCCESS) {
if (vm_fault_debug)
printf("vm_fault: memory_object_data_unlock failed\n");
vm_object_lock(object);
vm_fault_cleanup(object, first_m);
thread_interrupt_level(interruptible_state);
return((rc == MACH_SEND_INTERRUPTED) ?
VM_FAULT_INTERRUPTED :
VM_FAULT_MEMORY_ERROR);
}
vm_object_lock(object);
continue;
}
XPR(XPR_VM_FAULT,
"vm_f_page: access wait acc_req %d, obj 0x%X, offset 0x%X, page 0x%X\n",
access_required, (integer_t)object,
offset, (integer_t)m, 0);
assert(object->ref_count > 0);
object->ref_count++;
vm_object_res_reference(object);
vm_fault_cleanup(object, first_m);
counter(c_vm_fault_page_block_backoff_kernel++);
vm_object_lock(object);
assert(object->ref_count > 0);
m = vm_page_lookup(object, offset);
if (m != VM_PAGE_NULL &&
(access_required & m->page_lock) &&
!((access_required & m->unlock_request) != access_required)) {
PAGE_ASSERT_WAIT(m, interruptible);
vm_object_unlock(object);
wait_result = thread_block(THREAD_CONTINUE_NULL);
vm_object_deallocate(object);
goto backoff;
} else {
vm_object_unlock(object);
vm_object_deallocate(object);
thread_interrupt_level(interruptible_state);
return VM_FAULT_RETRY;
}
}
#if TRACEFAULTPAGE
dbgTrace(0xBEEF000B, (unsigned int) m, (unsigned int) 0);
#endif
#if !VM_FAULT_STATIC_CONFIG
if (!software_reference_bits) {
vm_page_lock_queues();
if (m->inactive)
vm_stat.reactivations++;
VM_PAGE_QUEUES_REMOVE(m);
vm_page_unlock_queues();
}
#endif
XPR(XPR_VM_FAULT,
"vm_f_page: found page obj 0x%X, offset 0x%X, page 0x%X\n",
(integer_t)object, offset, (integer_t)m, 0, 0);
assert(!m->busy);
m->busy = TRUE;
assert(!m->absent);
break;
}
look_for_page =
(object->pager_created) &&
LOOK_FOR(object, offset) &&
(!data_supply);
#if TRACEFAULTPAGE
dbgTrace(0xBEEF000C, (unsigned int) look_for_page, (unsigned int) object);
#endif
if ((look_for_page || (object == first_object))
&& !must_be_resident
&& !(object->phys_contiguous)) {
m = vm_page_grab_fictitious();
#if TRACEFAULTPAGE
dbgTrace(0xBEEF000D, (unsigned int) m, (unsigned int) object);
#endif
if (m == VM_PAGE_NULL) {
vm_fault_cleanup(object, first_m);
thread_interrupt_level(interruptible_state);
return(VM_FAULT_FICTITIOUS_SHORTAGE);
}
vm_page_insert(m, object, offset);
}
if ((look_for_page && !must_be_resident)) {
kern_return_t rc;
if (!object->pager_ready) {
#if TRACEFAULTPAGE
dbgTrace(0xBEEF000E, (unsigned int) 0, (unsigned int) 0);
#endif
if(m != VM_PAGE_NULL)
VM_PAGE_FREE(m);
XPR(XPR_VM_FAULT,
"vm_f_page: ready wait obj 0x%X, offset 0x%X\n",
(integer_t)object, offset, 0, 0, 0);
assert(object->ref_count > 0);
object->ref_count++;
vm_object_res_reference(object);
vm_fault_cleanup(object, first_m);
counter(c_vm_fault_page_block_backoff_kernel++);
vm_object_lock(object);
assert(object->ref_count > 0);
if (!object->pager_ready) {
wait_result = vm_object_assert_wait(object,
VM_OBJECT_EVENT_PAGER_READY,
interruptible);
vm_object_unlock(object);
if (wait_result == THREAD_WAITING)
wait_result = thread_block(THREAD_CONTINUE_NULL);
vm_object_deallocate(object);
goto backoff;
} else {
vm_object_unlock(object);
vm_object_deallocate(object);
thread_interrupt_level(interruptible_state);
return VM_FAULT_RETRY;
}
}
if(object->phys_contiguous) {
if(m != VM_PAGE_NULL) {
VM_PAGE_FREE(m);
m = VM_PAGE_NULL;
}
goto no_clustering;
}
if (object->internal) {
#if TRACEFAULTPAGE
dbgTrace(0xBEEF000F, (unsigned int) m, (unsigned int) 0);
#endif
if (m->fictitious && !vm_page_convert(m)) {
VM_PAGE_FREE(m);
vm_fault_cleanup(object, first_m);
thread_interrupt_level(interruptible_state);
return(VM_FAULT_MEMORY_SHORTAGE);
}
} else if (object->absent_count >
vm_object_absent_max) {
#if TRACEFAULTPAGE
dbgTrace(0xBEEF0010, (unsigned int) m, (unsigned int) 0);
#endif
if(m != VM_PAGE_NULL)
VM_PAGE_FREE(m);
assert(object->ref_count > 0);
object->ref_count++;
vm_object_res_reference(object);
vm_fault_cleanup(object, first_m);
counter(c_vm_fault_page_block_backoff_kernel++);
vm_object_lock(object);
assert(object->ref_count > 0);
if (object->absent_count > vm_object_absent_max) {
vm_object_absent_assert_wait(object,
interruptible);
vm_object_unlock(object);
wait_result = thread_block(THREAD_CONTINUE_NULL);
vm_object_deallocate(object);
goto backoff;
} else {
vm_object_unlock(object);
vm_object_deallocate(object);
thread_interrupt_level(interruptible_state);
return VM_FAULT_RETRY;
}
}
if(m != VM_PAGE_NULL) {
m->list_req_pending = TRUE;
m->absent = TRUE;
m->unusual = TRUE;
object->absent_count++;
}
no_clustering:
cluster_start = offset;
length = PAGE_SIZE;
if((map != NULL) &&
(current_task()->dynamic_working_set != 0)) {
cluster_end = cluster_start + length;
cluster_end = offset + PAGE_SIZE_64;
tws_build_cluster((tws_hash_t)
current_task()->dynamic_working_set,
object, &cluster_start,
&cluster_end, 0x40000);
length = cluster_end - cluster_start;
}
#if TRACEFAULTPAGE
dbgTrace(0xBEEF0012, (unsigned int) object, (unsigned int) 0);
#endif
vm_object_unlock(object);
if (type_of_fault)
*type_of_fault = (length << 8) | DBG_PAGEIN_FAULT;
VM_STAT(pageins++);
current_task()->pageins++;
bumped_pagein = TRUE;
if (object->copy_strategy == MEMORY_OBJECT_COPY_CALL &&
object != first_object) {
wants_copy_flag = VM_PROT_WANTS_COPY;
} else {
wants_copy_flag = VM_PROT_NONE;
}
XPR(XPR_VM_FAULT,
"vm_f_page: data_req obj 0x%X, offset 0x%X, page 0x%X, acc %d\n",
(integer_t)object, offset, (integer_t)m,
access_required | wants_copy_flag, 0);
rc = memory_object_data_request(object->pager,
cluster_start + object->paging_offset,
length,
access_required | wants_copy_flag);
#if TRACEFAULTPAGE
dbgTrace(0xBEEF0013, (unsigned int) object, (unsigned int) rc);
#endif
if (rc != KERN_SUCCESS) {
if (rc != MACH_SEND_INTERRUPTED
&& vm_fault_debug)
printf("%s(0x%x, 0x%x, 0x%x, 0x%x) failed, rc=%d\n",
"memory_object_data_request",
object->pager,
cluster_start + object->paging_offset,
length, access_required, rc);
if(!object->phys_contiguous) {
vm_object_lock(object);
for (; length; length -= PAGE_SIZE,
cluster_start += PAGE_SIZE_64) {
vm_page_t p;
if ((p = vm_page_lookup(object,
cluster_start))
&& p->absent && p->busy
&& p != first_m) {
VM_PAGE_FREE(p);
}
}
}
vm_fault_cleanup(object, first_m);
thread_interrupt_level(interruptible_state);
return((rc == MACH_SEND_INTERRUPTED) ?
VM_FAULT_INTERRUPTED :
VM_FAULT_MEMORY_ERROR);
} else {
#ifdef notdefcdy
tws_hash_line_t line;
task_t task;
task = current_task();
if((map != NULL) &&
(task->dynamic_working_set != 0))
&& !(object->private)) {
vm_object_t base_object;
vm_object_offset_t base_offset;
base_object = object;
base_offset = offset;
while(base_object->shadow) {
base_offset +=
base_object->shadow_offset;
base_object =
base_object->shadow;
}
if(tws_lookup
((tws_hash_t)
task->dynamic_working_set,
base_offset, base_object,
&line) == KERN_SUCCESS) {
tws_line_signal((tws_hash_t)
task->dynamic_working_set,
map, line, vaddr);
}
}
#endif
}
vm_object_lock(object);
if ((interruptible != THREAD_UNINT) &&
(current_thread()->state & TH_ABORT)) {
vm_fault_cleanup(object, first_m);
thread_interrupt_level(interruptible_state);
return(VM_FAULT_INTERRUPTED);
}
if(m == VM_PAGE_NULL)
break;
continue;
}
#if TRACEFAULTPAGE
dbgTrace(0xBEEF0014, (unsigned int) object, (unsigned int) m);
#endif
if (object == first_object)
first_m = m;
else
assert(m == VM_PAGE_NULL);
XPR(XPR_VM_FAULT,
"vm_f_page: no pager obj 0x%X, offset 0x%X, page 0x%X, next_obj 0x%X\n",
(integer_t)object, offset, (integer_t)m,
(integer_t)object->shadow, 0);
next_object = object->shadow;
if (next_object == VM_OBJECT_NULL) {
assert(!must_be_resident);
if (object != first_object) {
vm_object_paging_end(object);
vm_object_unlock(object);
object = first_object;
offset = first_offset;
vm_object_lock(object);
}
m = first_m;
assert(m->object == object);
first_m = VM_PAGE_NULL;
if(m == VM_PAGE_NULL) {
m = vm_page_grab();
if (m == VM_PAGE_NULL) {
vm_fault_cleanup(
object, VM_PAGE_NULL);
thread_interrupt_level(
interruptible_state);
return(VM_FAULT_MEMORY_SHORTAGE);
}
vm_page_insert(
m, object, offset);
}
if (object->shadow_severed) {
VM_PAGE_FREE(m);
vm_fault_cleanup(object, VM_PAGE_NULL);
thread_interrupt_level(interruptible_state);
return VM_FAULT_MEMORY_ERROR;
}
if(vm_backing_store_low) {
if(!(current_task()->priv_flags
& VM_BACKING_STORE_PRIV)) {
assert_wait((event_t)
&vm_backing_store_low,
THREAD_UNINT);
VM_PAGE_FREE(m);
vm_fault_cleanup(object, VM_PAGE_NULL);
thread_block((void (*)(void)) 0);
thread_interrupt_level(
interruptible_state);
return(VM_FAULT_RETRY);
}
}
if (VM_PAGE_THROTTLED() ||
(m->fictitious && !vm_page_convert(m))) {
VM_PAGE_FREE(m);
vm_fault_cleanup(object, VM_PAGE_NULL);
thread_interrupt_level(interruptible_state);
return(VM_FAULT_MEMORY_SHORTAGE);
}
m->no_isync = FALSE;
if (!no_zero_fill) {
vm_object_unlock(object);
vm_page_zero_fill(m);
vm_object_lock(object);
}
if (type_of_fault)
*type_of_fault = DBG_ZERO_FILL_FAULT;
VM_STAT(zero_fill_count++);
if (bumped_pagein == TRUE) {
VM_STAT(pageins--);
current_task()->pageins--;
}
vm_page_lock_queues();
VM_PAGE_QUEUES_REMOVE(m);
if(m->object->size > 0x80000) {
m->zero_fill = TRUE;
vm_zf_count += 1;
queue_enter(&vm_page_queue_zf,
m, vm_page_t, pageq);
} else {
queue_enter(
&vm_page_queue_inactive,
m, vm_page_t, pageq);
}
m->page_ticket = vm_page_ticket;
vm_page_ticket_roll++;
if(vm_page_ticket_roll == VM_PAGE_TICKETS_IN_ROLL) {
vm_page_ticket_roll = 0;
if(vm_page_ticket ==
VM_PAGE_TICKET_ROLL_IDS)
vm_page_ticket= 0;
else
vm_page_ticket++;
}
m->inactive = TRUE;
vm_page_inactive_count++;
vm_page_unlock_queues();
#if 0
pmap_clear_modify(m->phys_page);
#endif
break;
}
else {
if ((object != first_object) || must_be_resident)
vm_object_paging_end(object);
offset += object->shadow_offset;
hi_offset += object->shadow_offset;
lo_offset += object->shadow_offset;
access_required = VM_PROT_READ;
vm_object_lock(next_object);
vm_object_unlock(object);
object = next_object;
vm_object_paging_begin(object);
}
}
#if TRACEFAULTPAGE
dbgTrace(0xBEEF0015, (unsigned int) object, (unsigned int) m);
#endif
#if EXTRA_ASSERTIONS
if(m != VM_PAGE_NULL) {
assert(m->busy && !m->absent);
assert((first_m == VM_PAGE_NULL) ||
(first_m->busy && !first_m->absent &&
!first_m->active && !first_m->inactive));
}
#endif
XPR(XPR_VM_FAULT,
"vm_f_page: FOUND obj 0x%X, off 0x%X, page 0x%X, 1_obj 0x%X, 1_m 0x%X\n",
(integer_t)object, offset, (integer_t)m,
(integer_t)first_object, (integer_t)first_m);
if ((object != first_object) && (m != VM_PAGE_NULL)) {
#if TRACEFAULTPAGE
dbgTrace(0xBEEF0016, (unsigned int) object, (unsigned int) fault_type);
#endif
if (fault_type & VM_PROT_WRITE) {
vm_page_t copy_m;
assert(!must_be_resident);
if(vm_backing_store_low) {
if(!(current_task()->priv_flags
& VM_BACKING_STORE_PRIV)) {
assert_wait((event_t)
&vm_backing_store_low,
THREAD_UNINT);
RELEASE_PAGE(m);
vm_fault_cleanup(object, first_m);
thread_block((void (*)(void)) 0);
thread_interrupt_level(
interruptible_state);
return(VM_FAULT_RETRY);
}
}
copy_m = vm_page_grab();
if (copy_m == VM_PAGE_NULL) {
RELEASE_PAGE(m);
vm_fault_cleanup(object, first_m);
thread_interrupt_level(interruptible_state);
return(VM_FAULT_MEMORY_SHORTAGE);
}
XPR(XPR_VM_FAULT,
"vm_f_page: page_copy obj 0x%X, offset 0x%X, m 0x%X, copy_m 0x%X\n",
(integer_t)object, offset,
(integer_t)m, (integer_t)copy_m, 0);
vm_page_copy(m, copy_m);
vm_page_lock_queues();
assert(!m->cleaning);
pmap_page_protect(m->phys_page, VM_PROT_NONE);
vm_page_deactivate(m);
copy_m->dirty = TRUE;
first_m->reference = TRUE;
vm_page_unlock_queues();
PAGE_WAKEUP_DONE(m);
vm_object_paging_end(object);
vm_object_unlock(object);
if (type_of_fault)
*type_of_fault = DBG_COW_FAULT;
VM_STAT(cow_faults++);
current_task()->cow_faults++;
object = first_object;
offset = first_offset;
vm_object_lock(object);
VM_PAGE_FREE(first_m);
first_m = VM_PAGE_NULL;
assert(copy_m->busy);
vm_page_insert(copy_m, object, offset);
m = copy_m;
vm_object_paging_end(object);
vm_object_collapse(object, offset);
vm_object_paging_begin(object);
}
else {
*protection &= (~VM_PROT_WRITE);
}
}
while ((copy_object = first_object->copy) != VM_OBJECT_NULL &&
(m!= VM_PAGE_NULL)) {
vm_object_offset_t copy_offset;
vm_page_t copy_m;
#if TRACEFAULTPAGE
dbgTrace(0xBEEF0017, (unsigned int) copy_object, (unsigned int) fault_type);
#endif
if ((fault_type & VM_PROT_WRITE) == 0) {
*protection &= ~VM_PROT_WRITE;
break;
}
if (must_be_resident)
break;
if (!vm_object_lock_try(copy_object)) {
vm_object_unlock(object);
mutex_pause();
vm_object_lock(object);
continue;
}
assert(copy_object->ref_count > 0);
copy_object->ref_count++;
VM_OBJ_RES_INCR(copy_object);
copy_offset = first_offset - copy_object->shadow_offset;
if (copy_object->size <= copy_offset)
;
else if ((copy_m =
vm_page_lookup(copy_object, copy_offset)) != VM_PAGE_NULL) {
if (copy_m->busy) {
RELEASE_PAGE(m);
assert(copy_object->ref_count > 0);
copy_object->ref_count++;
vm_object_res_reference(copy_object);
vm_object_unlock(copy_object);
vm_fault_cleanup(object, first_m);
counter(c_vm_fault_page_block_backoff_kernel++);
vm_object_lock(copy_object);
assert(copy_object->ref_count > 0);
VM_OBJ_RES_DECR(copy_object);
copy_object->ref_count--;
assert(copy_object->ref_count > 0);
copy_m = vm_page_lookup(copy_object, copy_offset);
if (copy_m != VM_PAGE_NULL && copy_m->busy) {
PAGE_ASSERT_WAIT(copy_m, interruptible);
vm_object_unlock(copy_object);
wait_result = thread_block(THREAD_CONTINUE_NULL);
vm_object_deallocate(copy_object);
goto backoff;
} else {
vm_object_unlock(copy_object);
vm_object_deallocate(copy_object);
thread_interrupt_level(interruptible_state);
return VM_FAULT_RETRY;
}
}
}
else if (!PAGED_OUT(copy_object, copy_offset)) {
if(vm_backing_store_low) {
if(!(current_task()->priv_flags
& VM_BACKING_STORE_PRIV)) {
assert_wait((event_t)
&vm_backing_store_low,
THREAD_UNINT);
RELEASE_PAGE(m);
VM_OBJ_RES_DECR(copy_object);
copy_object->ref_count--;
assert(copy_object->ref_count > 0);
vm_object_unlock(copy_object);
vm_fault_cleanup(object, first_m);
thread_block((void (*)(void)) 0);
thread_interrupt_level(
interruptible_state);
return(VM_FAULT_RETRY);
}
}
copy_m = vm_page_alloc(copy_object, copy_offset);
if (copy_m == VM_PAGE_NULL) {
RELEASE_PAGE(m);
VM_OBJ_RES_DECR(copy_object);
copy_object->ref_count--;
assert(copy_object->ref_count > 0);
vm_object_unlock(copy_object);
vm_fault_cleanup(object, first_m);
thread_interrupt_level(interruptible_state);
return(VM_FAULT_MEMORY_SHORTAGE);
}
vm_page_copy(m, copy_m);
vm_page_lock_queues();
assert(!m->cleaning);
pmap_page_protect(m->phys_page, VM_PROT_NONE);
copy_m->dirty = TRUE;
vm_page_unlock_queues();
if
#if MACH_PAGEMAP
((!copy_object->pager_created) ||
vm_external_state_get(
copy_object->existence_map, copy_offset)
== VM_EXTERNAL_STATE_ABSENT)
#else
(!copy_object->pager_created)
#endif
{
vm_page_lock_queues();
vm_page_activate(copy_m);
vm_page_unlock_queues();
PAGE_WAKEUP_DONE(copy_m);
}
else {
assert(copy_m->busy == TRUE);
vm_object_unlock(object);
vm_pageout_initialize_page(copy_m);
if ((copy_object->shadow != object) ||
(copy_object->ref_count == 1)) {
vm_object_unlock(copy_object);
vm_object_deallocate(copy_object);
vm_object_lock(object);
continue;
}
vm_object_lock(object);
}
if (m->wanted) {
m->wanted = FALSE;
thread_wakeup_with_result((event_t) m,
THREAD_RESTART);
}
}
copy_object->ref_count--;
assert(copy_object->ref_count > 0);
VM_OBJ_RES_DECR(copy_object);
vm_object_unlock(copy_object);
break;
}
*result_page = m;
*top_page = first_m;
XPR(XPR_VM_FAULT,
"vm_f_page: DONE obj 0x%X, offset 0x%X, m 0x%X, first_m 0x%X\n",
(integer_t)object, offset, (integer_t)m, (integer_t)first_m, 0);
if(m != VM_PAGE_NULL) {
#if !VM_FAULT_STATIC_CONFIG
if (vm_fault_dirty_handling && (*protection & VM_PROT_WRITE))
m->dirty = TRUE;
#endif
if (vm_page_deactivate_behind)
vm_fault_deactivate_behind(object, offset, behavior);
} else {
vm_object_unlock(object);
}
thread_interrupt_level(interruptible_state);
#if TRACEFAULTPAGE
dbgTrace(0xBEEF001A, (unsigned int) VM_FAULT_SUCCESS, 0);
#endif
return(VM_FAULT_SUCCESS);
#if 0
block_and_backoff:
vm_fault_cleanup(object, first_m);
counter(c_vm_fault_page_block_backoff_kernel++);
thread_block(THREAD_CONTINUE_NULL);
#endif
backoff:
thread_interrupt_level(interruptible_state);
if (wait_result == THREAD_INTERRUPTED)
return VM_FAULT_INTERRUPTED;
return VM_FAULT_RETRY;
#undef RELEASE_PAGE
}
static boolean_t
vm_fault_tws_insert(
vm_map_t map,
vm_map_t pmap_map,
vm_offset_t vaddr,
vm_object_t object,
vm_object_offset_t offset)
{
tws_hash_line_t line;
task_t task;
kern_return_t kr;
boolean_t result = FALSE;
extern vm_map_t kalloc_map;
if (map == kernel_map || map == kalloc_map ||
pmap_map == kernel_map || pmap_map == kalloc_map)
return result;
task = current_task();
if (task->dynamic_working_set != 0) {
vm_object_t base_object;
vm_object_t base_shadow;
vm_object_offset_t base_offset;
base_object = object;
base_offset = offset;
while(base_shadow = base_object->shadow) {
vm_object_lock(base_shadow);
vm_object_unlock(base_object);
base_offset +=
base_object->shadow_offset;
base_object = base_shadow;
}
kr = tws_lookup((tws_hash_t)
task->dynamic_working_set,
base_offset, base_object,
&line);
if (kr == KERN_OPERATION_TIMED_OUT){
result = TRUE;
if (base_object != object) {
vm_object_unlock(base_object);
vm_object_lock(object);
}
} else if (kr != KERN_SUCCESS) {
if(base_object != object)
vm_object_reference_locked(base_object);
kr = tws_insert((tws_hash_t)
task->dynamic_working_set,
base_offset, base_object,
vaddr, pmap_map);
if(base_object != object) {
vm_object_unlock(base_object);
vm_object_deallocate(base_object);
}
if(kr == KERN_NO_SPACE) {
if (base_object == object)
vm_object_unlock(object);
tws_expand_working_set(
task->dynamic_working_set,
TWS_HASH_LINE_COUNT,
FALSE);
if (base_object == object)
vm_object_lock(object);
} else if(kr == KERN_OPERATION_TIMED_OUT) {
result = TRUE;
}
if(base_object != object)
vm_object_lock(object);
} else if (base_object != object) {
vm_object_unlock(base_object);
vm_object_lock(object);
}
}
return result;
}
kern_return_t
vm_fault(
vm_map_t map,
vm_offset_t vaddr,
vm_prot_t fault_type,
boolean_t change_wiring,
int interruptible,
pmap_t caller_pmap,
vm_offset_t caller_pmap_addr)
{
vm_map_version_t version;
boolean_t wired;
vm_object_t object;
vm_object_offset_t offset;
vm_prot_t prot;
vm_behavior_t behavior;
vm_object_offset_t lo_offset, hi_offset;
vm_object_t old_copy_object;
vm_page_t result_page;
vm_page_t top_page;
kern_return_t kr;
register
vm_page_t m;
kern_return_t error_code;
register
vm_object_t cur_object;
register
vm_object_offset_t cur_offset;
vm_page_t cur_m;
vm_object_t new_object;
int type_of_fault;
vm_map_t pmap_map = map;
vm_map_t original_map = map;
pmap_t pmap = NULL;
boolean_t funnel_set = FALSE;
funnel_t *curflock;
thread_t cur_thread;
boolean_t interruptible_state;
unsigned int cache_attr;
int write_startup_file = 0;
vm_prot_t full_fault_type;
if (get_preemption_level() != 0)
return (KERN_FAILURE);
KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, 0)) | DBG_FUNC_START,
vaddr,
0,
0,
0,
0);
full_fault_type = fault_type;
if(fault_type & VM_PROT_EXECUTE) {
fault_type &= ~VM_PROT_EXECUTE;
fault_type |= VM_PROT_READ;
}
interruptible_state = thread_interrupt_level(interruptible);
type_of_fault = DBG_CACHE_HIT_FAULT;
VM_STAT(faults++);
current_task()->faults++;
cur_thread = current_thread();
if ((cur_thread->funnel_state & TH_FN_OWNED) == TH_FN_OWNED) {
funnel_set = TRUE;
curflock = cur_thread->funnel_lock;
thread_funnel_set( curflock , FALSE);
}
RetryFault: ;
map = original_map;
vm_map_lock_read(map);
kr = vm_map_lookup_locked(&map, vaddr, fault_type, &version,
&object, &offset,
&prot, &wired,
&behavior, &lo_offset, &hi_offset, &pmap_map);
pmap = pmap_map->pmap;
if (kr != KERN_SUCCESS) {
vm_map_unlock_read(map);
goto done;
}
if (wired)
fault_type = prot | VM_PROT_WRITE;
#if VM_FAULT_CLASSIFY
vm_fault_classify(object, offset, fault_type);
#endif
if (object->copy_strategy != MEMORY_OBJECT_COPY_DELAY ||
object->copy == VM_OBJECT_NULL ||
(fault_type & VM_PROT_WRITE) == 0) {
cur_object = object;
cur_offset = offset;
while (TRUE) {
m = vm_page_lookup(cur_object, cur_offset);
if (m != VM_PAGE_NULL) {
if (m->busy) {
wait_result_t result;
if (object != cur_object)
vm_object_unlock(object);
vm_map_unlock_read(map);
if (pmap_map != map)
vm_map_unlock(pmap_map);
#if !VM_FAULT_STATIC_CONFIG
if (!vm_fault_interruptible)
interruptible = THREAD_UNINT;
#endif
result = PAGE_ASSERT_WAIT(m, interruptible);
vm_object_unlock(cur_object);
if (result == THREAD_WAITING) {
result = thread_block(THREAD_CONTINUE_NULL);
counter(c_vm_fault_page_block_busy_kernel++);
}
if (result == THREAD_AWAKENED || result == THREAD_RESTART)
goto RetryFault;
kr = KERN_ABORTED;
goto done;
}
if (m->unusual && (m->error || m->restart || m->private
|| m->absent || (fault_type & m->page_lock))) {
break;
}
if (object == cur_object &&
object->copy == VM_OBJECT_NULL)
goto FastMapInFault;
if ((fault_type & VM_PROT_WRITE) == 0) {
boolean_t sequential;
prot &= ~VM_PROT_WRITE;
if (object != cur_object) {
vm_object_unlock(object);
object = cur_object;
}
FastMapInFault:
m->busy = TRUE;
vm_object_paging_begin(object);
FastPmapEnter:
#if !VM_FAULT_STATIC_CONFIG
if (vm_fault_dirty_handling
#if MACH_KDB
|| db_watchpoint_list
#endif
&& (fault_type & VM_PROT_WRITE) == 0)
prot &= ~VM_PROT_WRITE;
#else
#if MACH_KDB
if (db_watchpoint_list
&& (fault_type & VM_PROT_WRITE) == 0)
prot &= ~VM_PROT_WRITE;
#endif
#endif
cache_attr = ((unsigned int)m->object->wimg_bits) & VM_WIMG_MASK;
sequential = FALSE;
if (m->no_isync == TRUE) {
m->no_isync = FALSE;
pmap_sync_caches_phys(m->phys_page);
if (type_of_fault == DBG_CACHE_HIT_FAULT) {
VM_STAT(pageins++);
current_task()->pageins++;
type_of_fault = DBG_PAGEIN_FAULT;
sequential = TRUE;
}
} else if (cache_attr != VM_WIMG_DEFAULT) {
pmap_sync_caches_phys(m->phys_page);
}
if(caller_pmap) {
PMAP_ENTER(caller_pmap,
caller_pmap_addr, m,
prot, cache_attr, wired);
} else {
PMAP_ENTER(pmap, vaddr, m,
prot, cache_attr, wired);
}
vm_page_lock_queues();
if (m->clustered) {
vm_pagein_cluster_used++;
m->clustered = FALSE;
}
m->reference = TRUE;
if (change_wiring) {
if (wired)
vm_page_wire(m);
else
vm_page_unwire(m);
}
#if VM_FAULT_STATIC_CONFIG
else {
if (!m->active && !m->inactive)
vm_page_activate(m);
}
#else
else if (software_reference_bits) {
if (!m->active && !m->inactive)
vm_page_activate(m);
}
else if (!m->active) {
vm_page_activate(m);
}
#endif
vm_page_unlock_queues();
PAGE_WAKEUP_DONE(m);
sequential = (sequential && vm_page_deactivate_behind) ?
vm_fault_deactivate_behind(object, cur_offset, behavior) :
FALSE;
if (!sequential && !object->private) {
write_startup_file =
vm_fault_tws_insert(map, pmap_map, vaddr,
object, cur_offset);
}
vm_object_paging_end(object);
vm_object_unlock(object);
vm_map_unlock_read(map);
if(pmap_map != map)
vm_map_unlock(pmap_map);
if(write_startup_file)
tws_send_startup_info(current_task());
if (funnel_set)
thread_funnel_set( curflock, TRUE);
thread_interrupt_level(interruptible_state);
KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, 0)) | DBG_FUNC_END,
vaddr,
type_of_fault & 0xff,
KERN_SUCCESS,
type_of_fault >> 8,
0);
return KERN_SUCCESS;
}
if (cur_object == object)
break;
cur_m = m;
m = vm_page_grab();
if (m == VM_PAGE_NULL) {
break;
}
cur_m->busy = TRUE;
vm_page_copy(cur_m, m);
vm_page_insert(m, object, offset);
vm_object_paging_begin(cur_object);
vm_object_paging_begin(object);
type_of_fault = DBG_COW_FAULT;
VM_STAT(cow_faults++);
current_task()->cow_faults++;
vm_page_lock_queues();
vm_page_deactivate(cur_m);
m->dirty = TRUE;
pmap_page_protect(cur_m->phys_page,
VM_PROT_NONE);
vm_page_unlock_queues();
PAGE_WAKEUP_DONE(cur_m);
vm_object_paging_end(cur_object);
vm_object_unlock(cur_object);
vm_object_paging_end(object);
vm_object_collapse(object, offset);
vm_object_paging_begin(object);
goto FastPmapEnter;
}
else {
if (cur_object->pager_created) {
break;
}
if (cur_object->shadow == VM_OBJECT_NULL) {
if (cur_object->shadow_severed) {
vm_object_paging_end(object);
vm_object_unlock(object);
vm_map_unlock_read(map);
if(pmap_map != map)
vm_map_unlock(pmap_map);
if(write_startup_file)
tws_send_startup_info(
current_task());
if (funnel_set) {
thread_funnel_set( curflock, TRUE);
funnel_set = FALSE;
}
thread_interrupt_level(interruptible_state);
return VM_FAULT_MEMORY_ERROR;
}
if (VM_PAGE_THROTTLED()) {
break;
}
if(vm_backing_store_low) {
if(!(current_task()->priv_flags
& VM_BACKING_STORE_PRIV))
break;
}
m = vm_page_alloc(object, offset);
if (m == VM_PAGE_NULL) {
break;
}
m->no_isync = FALSE;
if (cur_object != object)
vm_object_unlock(cur_object);
vm_object_paging_begin(object);
vm_object_unlock(object);
if (!map->no_zero_fill) {
vm_page_zero_fill(m);
type_of_fault = DBG_ZERO_FILL_FAULT;
VM_STAT(zero_fill_count++);
}
vm_page_lock_queues();
VM_PAGE_QUEUES_REMOVE(m);
m->page_ticket = vm_page_ticket;
if(m->object->size > 0x80000) {
m->zero_fill = TRUE;
vm_zf_count += 1;
queue_enter(&vm_page_queue_zf,
m, vm_page_t, pageq);
} else {
queue_enter(
&vm_page_queue_inactive,
m, vm_page_t, pageq);
}
vm_page_ticket_roll++;
if(vm_page_ticket_roll ==
VM_PAGE_TICKETS_IN_ROLL) {
vm_page_ticket_roll = 0;
if(vm_page_ticket ==
VM_PAGE_TICKET_ROLL_IDS)
vm_page_ticket= 0;
else
vm_page_ticket++;
}
m->inactive = TRUE;
vm_page_inactive_count++;
vm_page_unlock_queues();
vm_object_lock(object);
goto FastPmapEnter;
}
cur_offset += cur_object->shadow_offset;
new_object = cur_object->shadow;
vm_object_lock(new_object);
if (cur_object != object)
vm_object_unlock(cur_object);
cur_object = new_object;
continue;
}
}
if (object != cur_object)
vm_object_unlock(cur_object);
}
vm_map_unlock_read(map);
if(pmap_map != map)
vm_map_unlock(pmap_map);
assert(object->ref_count > 0);
object->ref_count++;
vm_object_res_reference(object);
vm_object_paging_begin(object);
XPR(XPR_VM_FAULT,"vm_fault -> vm_fault_page\n",0,0,0,0,0);
if (!object->private) {
write_startup_file =
vm_fault_tws_insert(map, pmap_map, vaddr, object, offset);
}
kr = vm_fault_page(object, offset, fault_type,
(change_wiring && !wired),
interruptible,
lo_offset, hi_offset, behavior,
&prot, &result_page, &top_page,
&type_of_fault,
&error_code, map->no_zero_fill, FALSE, map, vaddr);
if (kr != VM_FAULT_SUCCESS)
vm_object_deallocate(object);
switch (kr) {
case VM_FAULT_SUCCESS:
break;
case VM_FAULT_MEMORY_SHORTAGE:
if (vm_page_wait((change_wiring) ?
THREAD_UNINT :
THREAD_ABORTSAFE))
goto RetryFault;
case VM_FAULT_INTERRUPTED:
kr = KERN_ABORTED;
goto done;
case VM_FAULT_RETRY:
goto RetryFault;
case VM_FAULT_FICTITIOUS_SHORTAGE:
vm_page_more_fictitious();
goto RetryFault;
case VM_FAULT_MEMORY_ERROR:
if (error_code)
kr = error_code;
else
kr = KERN_MEMORY_ERROR;
goto done;
}
m = result_page;
if(m != VM_PAGE_NULL) {
assert((change_wiring && !wired) ?
(top_page == VM_PAGE_NULL) :
((top_page == VM_PAGE_NULL) == (m->object == object)));
}
#define UNLOCK_AND_DEALLOCATE \
MACRO_BEGIN \
vm_fault_cleanup(m->object, top_page); \
vm_object_deallocate(object); \
MACRO_END
#define RELEASE_PAGE(m) \
MACRO_BEGIN \
PAGE_WAKEUP_DONE(m); \
vm_page_lock_queues(); \
if (!m->active && !m->inactive) \
vm_page_activate(m); \
vm_page_unlock_queues(); \
MACRO_END
if(m != VM_PAGE_NULL) {
old_copy_object = m->object->copy;
vm_object_unlock(m->object);
} else {
old_copy_object = VM_OBJECT_NULL;
}
if ((map != original_map) || !vm_map_verify(map, &version)) {
vm_object_t retry_object;
vm_object_offset_t retry_offset;
vm_prot_t retry_prot;
map = original_map;
vm_map_lock_read(map);
kr = vm_map_lookup_locked(&map, vaddr,
fault_type & ~VM_PROT_WRITE, &version,
&retry_object, &retry_offset, &retry_prot,
&wired, &behavior, &lo_offset, &hi_offset,
&pmap_map);
pmap = pmap_map->pmap;
if (kr != KERN_SUCCESS) {
vm_map_unlock_read(map);
if(m != VM_PAGE_NULL) {
vm_object_lock(m->object);
RELEASE_PAGE(m);
UNLOCK_AND_DEALLOCATE;
} else {
vm_object_deallocate(object);
}
goto done;
}
vm_object_unlock(retry_object);
if(m != VM_PAGE_NULL) {
vm_object_lock(m->object);
} else {
vm_object_lock(object);
}
if ((retry_object != object) ||
(retry_offset != offset)) {
vm_map_unlock_read(map);
if(pmap_map != map)
vm_map_unlock(pmap_map);
if(m != VM_PAGE_NULL) {
RELEASE_PAGE(m);
UNLOCK_AND_DEALLOCATE;
} else {
vm_object_deallocate(object);
}
goto RetryFault;
}
prot &= retry_prot;
if(m != VM_PAGE_NULL) {
vm_object_unlock(m->object);
} else {
vm_object_unlock(object);
}
}
if(m != VM_PAGE_NULL) {
vm_object_lock(m->object);
} else {
vm_object_lock(object);
}
if(m != VM_PAGE_NULL) {
if (m->object->copy != old_copy_object)
prot &= ~VM_PROT_WRITE;
}
if (wired && (fault_type != (prot|VM_PROT_WRITE))) {
vm_map_verify_done(map, &version);
if(pmap_map != map)
vm_map_unlock(pmap_map);
if(m != VM_PAGE_NULL) {
RELEASE_PAGE(m);
UNLOCK_AND_DEALLOCATE;
} else {
vm_object_deallocate(object);
}
goto RetryFault;
}
if (m != VM_PAGE_NULL) {
if (m->no_isync == TRUE) {
pmap_sync_caches_phys(m->phys_page);
if (type_of_fault == DBG_CACHE_HIT_FAULT) {
VM_STAT(pageins++);
current_task()->pageins++;
type_of_fault = DBG_PAGEIN_FAULT;
}
m->no_isync = FALSE;
}
cache_attr = ((unsigned int)m->object->wimg_bits) & VM_WIMG_MASK;
if(caller_pmap) {
PMAP_ENTER(caller_pmap,
caller_pmap_addr, m,
prot, cache_attr, wired);
} else {
PMAP_ENTER(pmap, vaddr, m,
prot, cache_attr, wired);
}
if (m->object->private) {
write_startup_file =
vm_fault_tws_insert(map, pmap_map, vaddr,
m->object, m->offset);
}
} else {
#ifndef i386
int memattr;
vm_map_entry_t entry;
vm_offset_t laddr;
vm_offset_t ldelta, hdelta;
if((full_fault_type & VM_PROT_EXECUTE) &&
(pmap_canExecute((ppnum_t)
(object->shadow_offset >> 12)) < 1)) {
vm_map_verify_done(map, &version);
if(pmap_map != map)
vm_map_unlock(pmap_map);
vm_fault_cleanup(object, top_page);
vm_object_deallocate(object);
kr = KERN_PROTECTION_FAILURE;
goto done;
}
if(pmap_map != map) {
vm_map_unlock(pmap_map);
}
if (original_map != map) {
vm_map_unlock_read(map);
vm_map_lock_read(original_map);
map = original_map;
}
pmap_map = map;
laddr = vaddr;
hdelta = 0xFFFFF000;
ldelta = 0xFFFFF000;
while(vm_map_lookup_entry(map, laddr, &entry)) {
if(ldelta > (laddr - entry->vme_start))
ldelta = laddr - entry->vme_start;
if(hdelta > (entry->vme_end - laddr))
hdelta = entry->vme_end - laddr;
if(entry->is_sub_map) {
laddr = (laddr - entry->vme_start)
+ entry->offset;
vm_map_lock_read(entry->object.sub_map);
if(map != pmap_map)
vm_map_unlock_read(map);
if(entry->use_pmap) {
vm_map_unlock_read(pmap_map);
pmap_map = entry->object.sub_map;
}
map = entry->object.sub_map;
} else {
break;
}
}
if(vm_map_lookup_entry(map, laddr, &entry) &&
(entry->object.vm_object != NULL) &&
(entry->object.vm_object == object)) {
if(caller_pmap) {
pmap_map_block(caller_pmap,
(addr64_t)(caller_pmap_addr - ldelta),
(((vm_offset_t)
(entry->object.vm_object->shadow_offset))
+ entry->offset +
(laddr - entry->vme_start)
- ldelta)>>12,
ldelta + hdelta, prot,
(VM_WIMG_MASK & (int)object->wimg_bits), 0);
} else {
pmap_map_block(pmap_map->pmap,
(addr64_t)(vaddr - ldelta),
(((vm_offset_t)
(entry->object.vm_object->shadow_offset))
+ entry->offset +
(laddr - entry->vme_start) - ldelta)>>12,
ldelta + hdelta, prot,
(VM_WIMG_MASK & (int)object->wimg_bits), 0);
}
}
#else
#ifdef notyet
if(caller_pmap) {
pmap_enter(caller_pmap, caller_pmap_addr,
object->shadow_offset>>12, prot, 0, TRUE);
} else {
pmap_enter(pmap, vaddr,
object->shadow_offset>>12, prot, 0, TRUE);
}
#endif
#endif
}
if(m != VM_PAGE_NULL) {
vm_page_lock_queues();
if (change_wiring) {
if (wired)
vm_page_wire(m);
else
vm_page_unwire(m);
}
#if VM_FAULT_STATIC_CONFIG
else {
if (!m->active && !m->inactive)
vm_page_activate(m);
m->reference = TRUE;
}
#else
else if (software_reference_bits) {
if (!m->active && !m->inactive)
vm_page_activate(m);
m->reference = TRUE;
} else {
vm_page_activate(m);
}
#endif
vm_page_unlock_queues();
}
vm_map_verify_done(map, &version);
if(pmap_map != map)
vm_map_unlock(pmap_map);
if(m != VM_PAGE_NULL) {
PAGE_WAKEUP_DONE(m);
UNLOCK_AND_DEALLOCATE;
} else {
vm_fault_cleanup(object, top_page);
vm_object_deallocate(object);
}
kr = KERN_SUCCESS;
#undef UNLOCK_AND_DEALLOCATE
#undef RELEASE_PAGE
done:
if(write_startup_file)
tws_send_startup_info(current_task());
if (funnel_set) {
thread_funnel_set( curflock, TRUE);
funnel_set = FALSE;
}
thread_interrupt_level(interruptible_state);
KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, 0)) | DBG_FUNC_END,
vaddr,
type_of_fault & 0xff,
kr,
type_of_fault >> 8,
0);
return(kr);
}
kern_return_t
vm_fault_wire(
vm_map_t map,
vm_map_entry_t entry,
pmap_t pmap,
vm_offset_t pmap_addr)
{
register vm_offset_t va;
register vm_offset_t end_addr = entry->vme_end;
register kern_return_t rc;
assert(entry->in_transition);
if ((entry->object.vm_object != NULL) &&
!entry->is_sub_map &&
entry->object.vm_object->phys_contiguous) {
return KERN_SUCCESS;
}
pmap_pageable(pmap, pmap_addr,
pmap_addr + (end_addr - entry->vme_start), FALSE);
for (va = entry->vme_start; va < end_addr; va += PAGE_SIZE) {
if ((rc = vm_fault_wire_fast(
map, va, entry, pmap,
pmap_addr + (va - entry->vme_start)
)) != KERN_SUCCESS) {
rc = vm_fault(map, va, VM_PROT_NONE, TRUE,
(pmap == kernel_pmap) ?
THREAD_UNINT : THREAD_ABORTSAFE,
pmap, pmap_addr + (va - entry->vme_start));
}
if (rc != KERN_SUCCESS) {
struct vm_map_entry tmp_entry = *entry;
tmp_entry.vme_end = va;
vm_fault_unwire(map,
&tmp_entry, FALSE, pmap, pmap_addr);
return rc;
}
}
return KERN_SUCCESS;
}
void
vm_fault_unwire(
vm_map_t map,
vm_map_entry_t entry,
boolean_t deallocate,
pmap_t pmap,
vm_offset_t pmap_addr)
{
register vm_offset_t va;
register vm_offset_t end_addr = entry->vme_end;
vm_object_t object;
object = (entry->is_sub_map)
? VM_OBJECT_NULL : entry->object.vm_object;
for (va = entry->vme_start; va < end_addr; va += PAGE_SIZE) {
pmap_change_wiring(pmap,
pmap_addr + (va - entry->vme_start), FALSE);
if (object == VM_OBJECT_NULL) {
(void) vm_fault(map, va, VM_PROT_NONE,
TRUE, THREAD_UNINT, pmap, pmap_addr);
} else if (object->phys_contiguous) {
continue;
} else {
vm_prot_t prot;
vm_page_t result_page;
vm_page_t top_page;
vm_object_t result_object;
vm_fault_return_t result;
do {
prot = VM_PROT_NONE;
vm_object_lock(object);
vm_object_paging_begin(object);
XPR(XPR_VM_FAULT,
"vm_fault_unwire -> vm_fault_page\n",
0,0,0,0,0);
result = vm_fault_page(object,
entry->offset +
(va - entry->vme_start),
VM_PROT_NONE, TRUE,
THREAD_UNINT,
entry->offset,
entry->offset +
(entry->vme_end
- entry->vme_start),
entry->behavior,
&prot,
&result_page,
&top_page,
(int *)0,
0, map->no_zero_fill,
FALSE, NULL, 0);
} while (result == VM_FAULT_RETRY);
if (result != VM_FAULT_SUCCESS)
panic("vm_fault_unwire: failure");
result_object = result_page->object;
if (deallocate) {
assert(!result_page->fictitious);
pmap_page_protect(result_page->phys_page,
VM_PROT_NONE);
VM_PAGE_FREE(result_page);
} else {
vm_page_lock_queues();
vm_page_unwire(result_page);
vm_page_unlock_queues();
PAGE_WAKEUP_DONE(result_page);
}
vm_fault_cleanup(result_object, top_page);
}
}
pmap_pageable(pmap, pmap_addr,
pmap_addr + (end_addr - entry->vme_start), TRUE);
}
kern_return_t
vm_fault_wire_fast(
vm_map_t map,
vm_offset_t va,
vm_map_entry_t entry,
pmap_t pmap,
vm_offset_t pmap_addr)
{
vm_object_t object;
vm_object_offset_t offset;
register vm_page_t m;
vm_prot_t prot;
thread_act_t thr_act;
unsigned int cache_attr;
VM_STAT(faults++);
if((thr_act=current_act()) && (thr_act->task != TASK_NULL))
thr_act->task->faults++;
#undef RELEASE_PAGE
#define RELEASE_PAGE(m) { \
PAGE_WAKEUP_DONE(m); \
vm_page_lock_queues(); \
vm_page_unwire(m); \
vm_page_unlock_queues(); \
}
#undef UNLOCK_THINGS
#define UNLOCK_THINGS { \
object->paging_in_progress--; \
vm_object_unlock(object); \
}
#undef UNLOCK_AND_DEALLOCATE
#define UNLOCK_AND_DEALLOCATE { \
UNLOCK_THINGS; \
vm_object_deallocate(object); \
}
#define GIVE_UP { \
UNLOCK_AND_DEALLOCATE; \
return(KERN_FAILURE); \
}
if (entry->is_sub_map)
return(KERN_FAILURE);
object = entry->object.vm_object;
offset = (va - entry->vme_start) + entry->offset;
prot = entry->protection;
vm_object_lock(object);
assert(object->ref_count > 0);
object->ref_count++;
vm_object_res_reference(object);
object->paging_in_progress++;
m = vm_page_lookup(object, offset);
if ((m == VM_PAGE_NULL) || (m->busy) ||
(m->unusual && ( m->error || m->restart || m->absent ||
prot & m->page_lock))) {
GIVE_UP;
}
vm_page_lock_queues();
vm_page_wire(m);
vm_page_unlock_queues();
assert(!m->busy);
m->busy = TRUE;
assert(!m->absent);
if ((object->copy != VM_OBJECT_NULL) && (prot & VM_PROT_WRITE)) {
RELEASE_PAGE(m);
GIVE_UP;
}
if (m->no_isync == TRUE) {
pmap_sync_caches_phys(m->phys_page);
m->no_isync = FALSE;
}
cache_attr = ((unsigned int)m->object->wimg_bits) & VM_WIMG_MASK;
PMAP_ENTER(pmap, pmap_addr, m, prot, cache_attr, TRUE);
PAGE_WAKEUP_DONE(m);
UNLOCK_AND_DEALLOCATE;
return(KERN_SUCCESS);
}
void
vm_fault_copy_cleanup(
vm_page_t page,
vm_page_t top_page)
{
vm_object_t object = page->object;
vm_object_lock(object);
PAGE_WAKEUP_DONE(page);
vm_page_lock_queues();
if (!page->active && !page->inactive)
vm_page_activate(page);
vm_page_unlock_queues();
vm_fault_cleanup(object, top_page);
}
void
vm_fault_copy_dst_cleanup(
vm_page_t page)
{
vm_object_t object;
if (page != VM_PAGE_NULL) {
object = page->object;
vm_object_lock(object);
vm_page_lock_queues();
vm_page_unwire(page);
vm_page_unlock_queues();
vm_object_paging_end(object);
vm_object_unlock(object);
}
}
kern_return_t
vm_fault_copy(
vm_object_t src_object,
vm_object_offset_t src_offset,
vm_size_t *src_size,
vm_object_t dst_object,
vm_object_offset_t dst_offset,
vm_map_t dst_map,
vm_map_version_t *dst_version,
int interruptible)
{
vm_page_t result_page;
vm_page_t src_page;
vm_page_t src_top_page;
vm_prot_t src_prot;
vm_page_t dst_page;
vm_page_t dst_top_page;
vm_prot_t dst_prot;
vm_size_t amount_left;
vm_object_t old_copy_object;
kern_return_t error = 0;
vm_size_t part_size;
vm_object_offset_t src_lo_offset = trunc_page_64(src_offset);
vm_object_offset_t dst_lo_offset = trunc_page_64(dst_offset);
vm_object_offset_t src_hi_offset = round_page_64(src_offset + *src_size);
vm_object_offset_t dst_hi_offset = round_page_64(dst_offset + *src_size);
#define RETURN(x) \
MACRO_BEGIN \
*src_size -= amount_left; \
MACRO_RETURN(x); \
MACRO_END
amount_left = *src_size;
do {
RetryDestinationFault: ;
dst_prot = VM_PROT_WRITE|VM_PROT_READ;
vm_object_lock(dst_object);
vm_object_paging_begin(dst_object);
XPR(XPR_VM_FAULT,"vm_fault_copy -> vm_fault_page\n",0,0,0,0,0);
switch (vm_fault_page(dst_object,
trunc_page_64(dst_offset),
VM_PROT_WRITE|VM_PROT_READ,
FALSE,
interruptible,
dst_lo_offset,
dst_hi_offset,
VM_BEHAVIOR_SEQUENTIAL,
&dst_prot,
&dst_page,
&dst_top_page,
(int *)0,
&error,
dst_map->no_zero_fill,
FALSE, NULL, 0)) {
case VM_FAULT_SUCCESS:
break;
case VM_FAULT_RETRY:
goto RetryDestinationFault;
case VM_FAULT_MEMORY_SHORTAGE:
if (vm_page_wait(interruptible))
goto RetryDestinationFault;
case VM_FAULT_INTERRUPTED:
RETURN(MACH_SEND_INTERRUPTED);
case VM_FAULT_FICTITIOUS_SHORTAGE:
vm_page_more_fictitious();
goto RetryDestinationFault;
case VM_FAULT_MEMORY_ERROR:
if (error)
return (error);
else
return(KERN_MEMORY_ERROR);
}
assert ((dst_prot & VM_PROT_WRITE) != VM_PROT_NONE);
old_copy_object = dst_page->object->copy;
vm_page_lock_queues();
vm_page_wire(dst_page);
vm_page_unlock_queues();
PAGE_WAKEUP_DONE(dst_page);
vm_object_unlock(dst_page->object);
if (dst_top_page != VM_PAGE_NULL) {
vm_object_lock(dst_object);
VM_PAGE_FREE(dst_top_page);
vm_object_paging_end(dst_object);
vm_object_unlock(dst_object);
}
RetrySourceFault: ;
if (src_object == VM_OBJECT_NULL) {
src_page = VM_PAGE_NULL;
result_page = VM_PAGE_NULL;
} else {
vm_object_lock(src_object);
src_page = vm_page_lookup(src_object,
trunc_page_64(src_offset));
if (src_page == dst_page) {
src_prot = dst_prot;
result_page = VM_PAGE_NULL;
} else {
src_prot = VM_PROT_READ;
vm_object_paging_begin(src_object);
XPR(XPR_VM_FAULT,
"vm_fault_copy(2) -> vm_fault_page\n",
0,0,0,0,0);
switch (vm_fault_page(src_object,
trunc_page_64(src_offset),
VM_PROT_READ,
FALSE,
interruptible,
src_lo_offset,
src_hi_offset,
VM_BEHAVIOR_SEQUENTIAL,
&src_prot,
&result_page,
&src_top_page,
(int *)0,
&error,
FALSE,
FALSE, NULL, 0)) {
case VM_FAULT_SUCCESS:
break;
case VM_FAULT_RETRY:
goto RetrySourceFault;
case VM_FAULT_MEMORY_SHORTAGE:
if (vm_page_wait(interruptible))
goto RetrySourceFault;
case VM_FAULT_INTERRUPTED:
vm_fault_copy_dst_cleanup(dst_page);
RETURN(MACH_SEND_INTERRUPTED);
case VM_FAULT_FICTITIOUS_SHORTAGE:
vm_page_more_fictitious();
goto RetrySourceFault;
case VM_FAULT_MEMORY_ERROR:
vm_fault_copy_dst_cleanup(dst_page);
if (error)
return (error);
else
return(KERN_MEMORY_ERROR);
}
assert((src_top_page == VM_PAGE_NULL) ==
(result_page->object == src_object));
}
assert ((src_prot & VM_PROT_READ) != VM_PROT_NONE);
vm_object_unlock(result_page->object);
}
if (!vm_map_verify(dst_map, dst_version)) {
if (result_page != VM_PAGE_NULL && src_page != dst_page)
vm_fault_copy_cleanup(result_page, src_top_page);
vm_fault_copy_dst_cleanup(dst_page);
break;
}
vm_object_lock(dst_page->object);
if (dst_page->object->copy != old_copy_object) {
vm_object_unlock(dst_page->object);
vm_map_verify_done(dst_map, dst_version);
if (result_page != VM_PAGE_NULL && src_page != dst_page)
vm_fault_copy_cleanup(result_page, src_top_page);
vm_fault_copy_dst_cleanup(dst_page);
break;
}
vm_object_unlock(dst_page->object);
if (!page_aligned(src_offset) ||
!page_aligned(dst_offset) ||
!page_aligned(amount_left)) {
vm_object_offset_t src_po,
dst_po;
src_po = src_offset - trunc_page_64(src_offset);
dst_po = dst_offset - trunc_page_64(dst_offset);
if (dst_po > src_po) {
part_size = PAGE_SIZE - dst_po;
} else {
part_size = PAGE_SIZE - src_po;
}
if (part_size > (amount_left)){
part_size = amount_left;
}
if (result_page == VM_PAGE_NULL) {
vm_page_part_zero_fill(dst_page,
dst_po, part_size);
} else {
vm_page_part_copy(result_page, src_po,
dst_page, dst_po, part_size);
if(!dst_page->dirty){
vm_object_lock(dst_object);
dst_page->dirty = TRUE;
vm_object_unlock(dst_page->object);
}
}
} else {
part_size = PAGE_SIZE;
if (result_page == VM_PAGE_NULL)
vm_page_zero_fill(dst_page);
else{
vm_page_copy(result_page, dst_page);
if(!dst_page->dirty){
vm_object_lock(dst_object);
dst_page->dirty = TRUE;
vm_object_unlock(dst_page->object);
}
}
}
vm_map_verify_done(dst_map, dst_version);
if (result_page != VM_PAGE_NULL && src_page != dst_page)
vm_fault_copy_cleanup(result_page, src_top_page);
vm_fault_copy_dst_cleanup(dst_page);
amount_left -= part_size;
src_offset += part_size;
dst_offset += part_size;
} while (amount_left > 0);
RETURN(KERN_SUCCESS);
#undef RETURN
}
#ifdef notdef
vm_fault_return_t
vm_fault_page_overwrite(
register
vm_object_t dst_object,
vm_object_offset_t dst_offset,
vm_page_t *result_page)
{
register
vm_page_t dst_page;
kern_return_t wait_result;
#define interruptible THREAD_UNINT
while (TRUE) {
while ((dst_page = vm_page_lookup(dst_object, dst_offset))
== VM_PAGE_NULL) {
dst_page = vm_page_alloc(dst_object, dst_offset);
if (dst_page == VM_PAGE_NULL) {
vm_object_unlock(dst_object);
VM_PAGE_WAIT();
vm_object_lock(dst_object);
continue;
}
dst_page->overwriting = TRUE;
dst_page->page_lock = VM_PROT_WRITE;
dst_page->absent = TRUE;
dst_page->unusual = TRUE;
dst_object->absent_count++;
break;
#define DISCARD_PAGE \
MACRO_BEGIN \
vm_object_lock(dst_object); \
dst_page = vm_page_lookup(dst_object, dst_offset); \
if ((dst_page != VM_PAGE_NULL) && dst_page->overwriting) \
VM_PAGE_FREE(dst_page); \
vm_object_unlock(dst_object); \
MACRO_END
}
if (dst_page->page_lock & VM_PROT_WRITE) {
if ( ! (dst_page->unlock_request & VM_PROT_WRITE)) {
vm_prot_t u;
kern_return_t rc;
if (!dst_object->pager_ready) {
wait_result = vm_object_assert_wait(dst_object,
VM_OBJECT_EVENT_PAGER_READY,
interruptible);
vm_object_unlock(dst_object);
if (wait_result == THREAD_WAITING)
wait_result = thread_block(THREAD_CONTINUE_NULL);
if (wait_result != THREAD_AWAKENED) {
DISCARD_PAGE;
return(VM_FAULT_INTERRUPTED);
}
continue;
}
u = dst_page->unlock_request |= VM_PROT_WRITE;
vm_object_unlock(dst_object);
if ((rc = memory_object_data_unlock(
dst_object->pager,
dst_offset + dst_object->paging_offset,
PAGE_SIZE,
u)) != KERN_SUCCESS) {
if (vm_fault_debug)
printf("vm_object_overwrite: memory_object_data_unlock failed\n");
DISCARD_PAGE;
return((rc == MACH_SEND_INTERRUPTED) ?
VM_FAULT_INTERRUPTED :
VM_FAULT_MEMORY_ERROR);
}
vm_object_lock(dst_object);
continue;
}
} else {
if ( ! (dst_page->busy || dst_page->absent ||
dst_page->error || dst_page->restart) )
break;
}
wait_result = PAGE_ASSERT_WAIT(dst_page, interruptible);
vm_object_unlock(dst_object);
if (wait_result == THREAD_WAITING)
wait_result = thread_block(THREAD_CONTINUE_NULL);
if (wait_result != THREAD_AWAKENED) {
DISCARD_PAGE;
return(VM_FAULT_INTERRUPTED);
}
}
*result_page = dst_page;
return(VM_FAULT_SUCCESS);
#undef interruptible
#undef DISCARD_PAGE
}
#endif
#if VM_FAULT_CLASSIFY
#define VM_FAULT_TYPES_MAX 5
#define VM_FAULT_LEVEL_MAX 8
int vm_fault_stats[VM_FAULT_TYPES_MAX][VM_FAULT_LEVEL_MAX];
#define VM_FAULT_TYPE_ZERO_FILL 0
#define VM_FAULT_TYPE_MAP_IN 1
#define VM_FAULT_TYPE_PAGER 2
#define VM_FAULT_TYPE_COPY 3
#define VM_FAULT_TYPE_OTHER 4
void
vm_fault_classify(vm_object_t object,
vm_object_offset_t offset,
vm_prot_t fault_type)
{
int type, level = 0;
vm_page_t m;
while (TRUE) {
m = vm_page_lookup(object, offset);
if (m != VM_PAGE_NULL) {
if (m->busy || m->error || m->restart || m->absent ||
fault_type & m->page_lock) {
type = VM_FAULT_TYPE_OTHER;
break;
}
if (((fault_type & VM_PROT_WRITE) == 0) ||
((level == 0) && object->copy == VM_OBJECT_NULL)) {
type = VM_FAULT_TYPE_MAP_IN;
break;
}
type = VM_FAULT_TYPE_COPY;
break;
}
else {
if (object->pager_created) {
type = VM_FAULT_TYPE_PAGER;
break;
}
if (object->shadow == VM_OBJECT_NULL) {
type = VM_FAULT_TYPE_ZERO_FILL;
break;
}
offset += object->shadow_offset;
object = object->shadow;
level++;
continue;
}
}
if (level > VM_FAULT_LEVEL_MAX)
level = VM_FAULT_LEVEL_MAX;
vm_fault_stats[type][level] += 1;
return;
}
void
vm_fault_classify_init(void)
{
int type, level;
for (type = 0; type < VM_FAULT_TYPES_MAX; type++) {
for (level = 0; level < VM_FAULT_LEVEL_MAX; level++) {
vm_fault_stats[type][level] = 0;
}
}
return;
}
#endif