vm_compressor_pager.c [plain text]
#include <kern/host_statistics.h>
#include <kern/kalloc.h>
#include <mach/memory_object_control.h>
#include <mach/memory_object_types.h>
#include <mach/memory_object_server.h>
#include <mach/upl.h>
#include <vm/memory_object.h>
#include <vm/vm_compressor_pager.h>
#include <vm/vm_external.h>
#include <vm/vm_pageout.h>
#include <vm/vm_protos.h>
void compressor_memory_object_reference(memory_object_t mem_obj);
void compressor_memory_object_deallocate(memory_object_t mem_obj);
kern_return_t compressor_memory_object_init(
memory_object_t mem_obj,
memory_object_control_t control,
memory_object_cluster_size_t pager_page_size);
kern_return_t compressor_memory_object_terminate(memory_object_t mem_obj);
kern_return_t compressor_memory_object_data_request(
memory_object_t mem_obj,
memory_object_offset_t offset,
memory_object_cluster_size_t length,
__unused vm_prot_t protection_required,
memory_object_fault_info_t fault_info);
kern_return_t compressor_memory_object_data_return(
memory_object_t mem_obj,
memory_object_offset_t offset,
memory_object_cluster_size_t size,
__unused memory_object_offset_t *resid_offset,
__unused int *io_error,
__unused boolean_t dirty,
__unused boolean_t kernel_copy,
__unused int upl_flags);
kern_return_t compressor_memory_object_data_initialize(
memory_object_t mem_obj,
memory_object_offset_t offset,
memory_object_cluster_size_t size);
kern_return_t compressor_memory_object_data_unlock(
__unused memory_object_t mem_obj,
__unused memory_object_offset_t offset,
__unused memory_object_size_t size,
__unused vm_prot_t desired_access);
kern_return_t compressor_memory_object_synchronize(
memory_object_t mem_obj,
memory_object_offset_t offset,
memory_object_size_t length,
__unused vm_sync_t flags);
kern_return_t compressor_memory_object_map(
__unused memory_object_t mem_obj,
__unused vm_prot_t prot);
kern_return_t compressor_memory_object_last_unmap(memory_object_t mem_obj);
kern_return_t compressor_memory_object_data_reclaim(
__unused memory_object_t mem_obj,
__unused boolean_t reclaim_backing_store);
const struct memory_object_pager_ops compressor_pager_ops = {
compressor_memory_object_reference,
compressor_memory_object_deallocate,
compressor_memory_object_init,
compressor_memory_object_terminate,
compressor_memory_object_data_request,
compressor_memory_object_data_return,
compressor_memory_object_data_initialize,
compressor_memory_object_data_unlock,
compressor_memory_object_synchronize,
compressor_memory_object_map,
compressor_memory_object_last_unmap,
compressor_memory_object_data_reclaim,
"compressor pager"
};
struct {
uint64_t data_returns;
uint64_t data_requests;
uint64_t state_clr;
uint64_t state_get;
} compressor_pager_stats;
typedef int compressor_slot_t;
typedef struct compressor_pager {
struct ipc_object_header cpgr_pager_header;
memory_object_pager_ops_t cpgr_pager_ops;
memory_object_control_t cpgr_control;
lck_mtx_t cpgr_lock;
unsigned int cpgr_references;
unsigned int cpgr_num_slots;
union {
compressor_slot_t *cpgr_dslots;
compressor_slot_t **cpgr_islots;
} cpgr_slots;
} *compressor_pager_t;
#define compressor_pager_lookup(_mem_obj_, _cpgr_) \
MACRO_BEGIN \
if (_mem_obj_ == NULL || \
_mem_obj_->mo_pager_ops != &compressor_pager_ops) { \
_cpgr_ = NULL; \
} else { \
_cpgr_ = (compressor_pager_t) _mem_obj_; \
} \
MACRO_END
zone_t compressor_pager_zone;
lck_grp_t compressor_pager_lck_grp;
lck_grp_attr_t compressor_pager_lck_grp_attr;
lck_attr_t compressor_pager_lck_attr;
#define compressor_pager_lock(_cpgr_) \
lck_mtx_lock(&(_cpgr_)->cpgr_lock)
#define compressor_pager_unlock(_cpgr_) \
lck_mtx_unlock(&(_cpgr_)->cpgr_lock)
#define compressor_pager_lock_init(_cpgr_) \
lck_mtx_init(&(_cpgr_)->cpgr_lock, &compressor_pager_lck_grp, &compressor_pager_lck_attr)
#define compressor_pager_lock_destroy(_cpgr_) \
lck_mtx_destroy(&(_cpgr_)->cpgr_lock, &compressor_pager_lck_grp)
#define COMPRESSOR_SLOTS_CHUNK_SIZE (512)
#define COMPRESSOR_SLOTS_PER_CHUNK (COMPRESSOR_SLOTS_CHUNK_SIZE / sizeof (compressor_slot_t))
void compressor_pager_slots_chunk_free(compressor_slot_t *chunk, int num_slots);
void compressor_pager_slot_lookup(
compressor_pager_t pager,
boolean_t do_alloc,
uint32_t offset,
compressor_slot_t **slot_pp);
kern_return_t
compressor_memory_object_init(
memory_object_t mem_obj,
memory_object_control_t control,
__unused memory_object_cluster_size_t pager_page_size)
{
compressor_pager_t pager;
assert(pager_page_size == PAGE_SIZE);
memory_object_control_reference(control);
compressor_pager_lookup(mem_obj, pager);
compressor_pager_lock(pager);
if (pager->cpgr_control != MEMORY_OBJECT_CONTROL_NULL)
panic("compressor_memory_object_init: bad request");
pager->cpgr_control = control;
compressor_pager_unlock(pager);
return KERN_SUCCESS;
}
kern_return_t
compressor_memory_object_synchronize(
memory_object_t mem_obj,
memory_object_offset_t offset,
memory_object_size_t length,
__unused vm_sync_t flags)
{
compressor_pager_t pager;
compressor_pager_lookup(mem_obj, pager);
memory_object_synchronize_completed(pager->cpgr_control, offset, length);
return KERN_SUCCESS;
}
kern_return_t
compressor_memory_object_map(
__unused memory_object_t mem_obj,
__unused vm_prot_t prot)
{
panic("compressor_memory_object_map");
return KERN_FAILURE;
}
kern_return_t
compressor_memory_object_last_unmap(
__unused memory_object_t mem_obj)
{
panic("compressor_memory_object_last_unmap");
return KERN_FAILURE;
}
kern_return_t
compressor_memory_object_data_reclaim(
__unused memory_object_t mem_obj,
__unused boolean_t reclaim_backing_store)
{
panic("compressor_memory_object_data_reclaim");
return KERN_FAILURE;
}
kern_return_t
compressor_memory_object_terminate(
memory_object_t mem_obj)
{
memory_object_control_t control;
compressor_pager_t pager;
compressor_pager_lookup(mem_obj, pager);
compressor_pager_lock(pager);
control = pager->cpgr_control;
pager->cpgr_control = MEMORY_OBJECT_CONTROL_NULL;
compressor_pager_unlock(pager);
memory_object_control_deallocate(control);
return KERN_SUCCESS;
}
void
compressor_memory_object_reference(
memory_object_t mem_obj)
{
compressor_pager_t pager;
compressor_pager_lookup(mem_obj, pager);
if (pager == NULL)
return;
compressor_pager_lock(pager);
assert(pager->cpgr_references > 0);
pager->cpgr_references++;
compressor_pager_unlock(pager);
}
void
compressor_memory_object_deallocate(
memory_object_t mem_obj)
{
compressor_pager_t pager;
compressor_pager_lookup(mem_obj, pager);
if (pager == NULL)
return;
compressor_pager_lock(pager);
if (--pager->cpgr_references > 0) {
compressor_pager_unlock(pager);
return;
}
if (pager->cpgr_control != MEMORY_OBJECT_CONTROL_NULL)
panic("compressor_memory_object_deallocate(): bad request");
compressor_pager_unlock(pager);
int num_chunks;
int i;
compressor_slot_t *chunk;
num_chunks = (pager->cpgr_num_slots + COMPRESSOR_SLOTS_PER_CHUNK -1) / COMPRESSOR_SLOTS_PER_CHUNK;
if (num_chunks > 1) {
for (i = 0; i < num_chunks; i++) {
chunk = pager->cpgr_slots.cpgr_islots[i];
if (chunk != NULL) {
compressor_pager_slots_chunk_free(
chunk,
COMPRESSOR_SLOTS_PER_CHUNK);
pager->cpgr_slots.cpgr_islots[i] = NULL;
kfree(chunk, COMPRESSOR_SLOTS_CHUNK_SIZE);
}
}
kfree(pager->cpgr_slots.cpgr_islots,
num_chunks * sizeof (pager->cpgr_slots.cpgr_islots[0]));
pager->cpgr_slots.cpgr_islots = NULL;
} else {
chunk = pager->cpgr_slots.cpgr_dslots;
compressor_pager_slots_chunk_free(
chunk,
pager->cpgr_num_slots);
pager->cpgr_slots.cpgr_dslots = NULL;
kfree(chunk,
(pager->cpgr_num_slots *
sizeof (pager->cpgr_slots.cpgr_dslots[0])));
}
compressor_pager_lock_destroy(pager);
zfree(compressor_pager_zone, pager);
}
kern_return_t
compressor_memory_object_data_request(
memory_object_t mem_obj,
memory_object_offset_t offset,
memory_object_cluster_size_t length,
__unused vm_prot_t protection_required,
__unused memory_object_fault_info_t fault_info)
{
compressor_pager_t pager;
kern_return_t kr;
compressor_slot_t *slot_p;
compressor_pager_stats.data_requests++;
if ((offset & PAGE_MASK) != 0 || (length & PAGE_MASK) != 0)
panic("compressor_memory_object_data_request(): bad alignment");
assert((uint32_t) offset == offset);
compressor_pager_lookup(mem_obj, pager);
if (length == 0) {
} else {
panic("compressor: data_request");
}
compressor_pager_slot_lookup(pager, FALSE, (uint32_t) offset, &slot_p);
if (offset / PAGE_SIZE > pager->cpgr_num_slots) {
kr = KERN_FAILURE;
} else if (slot_p == NULL || *slot_p == 0) {
kr = KERN_FAILURE;
} else {
kr = KERN_SUCCESS;
}
return kr;
}
kern_return_t
compressor_memory_object_data_initialize(
memory_object_t mem_obj,
memory_object_offset_t offset,
memory_object_cluster_size_t size)
{
compressor_pager_t pager;
memory_object_offset_t cur_offset;
compressor_pager_lookup(mem_obj, pager);
compressor_pager_lock(pager);
for (cur_offset = offset;
cur_offset < offset + size;
cur_offset += PAGE_SIZE) {
panic("do a data_return() if slot for this page is empty");
}
compressor_pager_unlock(pager);
return KERN_SUCCESS;
}
kern_return_t
compressor_memory_object_data_unlock(
__unused memory_object_t mem_obj,
__unused memory_object_offset_t offset,
__unused memory_object_size_t size,
__unused vm_prot_t desired_access)
{
panic("compressor_memory_object_data_unlock()");
return KERN_FAILURE;
}
kern_return_t
compressor_memory_object_data_return(
__unused memory_object_t mem_obj,
__unused memory_object_offset_t offset,
__unused memory_object_cluster_size_t size,
__unused memory_object_offset_t *resid_offset,
__unused int *io_error,
__unused boolean_t dirty,
__unused boolean_t kernel_copy,
__unused int upl_flags)
{
panic("compressor: data_return");
return KERN_FAILURE;
}
kern_return_t
compressor_memory_object_create(
vm_size_t new_size,
memory_object_t *new_mem_obj)
{
compressor_pager_t pager;
int num_chunks;
if ((uint32_t) new_size != new_size) {
return KERN_INVALID_ARGUMENT;
}
pager = (compressor_pager_t) zalloc(compressor_pager_zone);
if (pager == NULL) {
return KERN_RESOURCE_SHORTAGE;
}
compressor_pager_lock_init(pager);
pager->cpgr_control = MEMORY_OBJECT_CONTROL_NULL;
pager->cpgr_references = 1;
pager->cpgr_num_slots = (uint32_t) (new_size / PAGE_SIZE);
num_chunks = (pager->cpgr_num_slots + COMPRESSOR_SLOTS_PER_CHUNK - 1) / COMPRESSOR_SLOTS_PER_CHUNK;
if (num_chunks > 1) {
pager->cpgr_slots.cpgr_islots = kalloc(num_chunks * sizeof (pager->cpgr_slots.cpgr_islots[0]));
bzero(pager->cpgr_slots.cpgr_islots, num_chunks * sizeof (pager->cpgr_slots.cpgr_islots[0]));
} else {
pager->cpgr_slots.cpgr_dslots = kalloc(pager->cpgr_num_slots * sizeof (pager->cpgr_slots.cpgr_dslots[0]));
bzero(pager->cpgr_slots.cpgr_dslots, pager->cpgr_num_slots * sizeof (pager->cpgr_slots.cpgr_dslots[0]));
}
pager->cpgr_pager_ops = &compressor_pager_ops;
pager->cpgr_pager_header.io_bits = IKOT_MEMORY_OBJECT;
*new_mem_obj = (memory_object_t) pager;
return KERN_SUCCESS;
}
void
compressor_pager_slots_chunk_free(
compressor_slot_t *chunk,
int num_slots)
{
#if 00
vm_compressor_free(chunk, num_slots);
#else
int i;
for (i = 0; i < num_slots; i++) {
if (chunk[i] != 0) {
vm_compressor_free(&chunk[i]);
}
}
#endif
}
void
compressor_pager_slot_lookup(
compressor_pager_t pager,
boolean_t do_alloc,
uint32_t offset,
compressor_slot_t **slot_pp)
{
int num_chunks;
uint32_t page_num;
int chunk_idx;
int slot_idx;
compressor_slot_t *chunk;
compressor_slot_t *t_chunk;
page_num = offset / PAGE_SIZE;
if (page_num > pager->cpgr_num_slots) {
*slot_pp = NULL;
return;
}
num_chunks = (pager->cpgr_num_slots + COMPRESSOR_SLOTS_PER_CHUNK - 1) / COMPRESSOR_SLOTS_PER_CHUNK;
if (num_chunks > 1) {
chunk_idx = page_num / COMPRESSOR_SLOTS_PER_CHUNK;
chunk = pager->cpgr_slots.cpgr_islots[chunk_idx];
if (chunk == NULL && do_alloc) {
t_chunk = kalloc(COMPRESSOR_SLOTS_CHUNK_SIZE);
bzero(t_chunk, COMPRESSOR_SLOTS_CHUNK_SIZE);
compressor_pager_lock(pager);
if ((chunk = pager->cpgr_slots.cpgr_islots[chunk_idx]) == NULL) {
chunk = pager->cpgr_slots.cpgr_islots[chunk_idx] = t_chunk;
t_chunk = NULL;
}
compressor_pager_unlock(pager);
if (t_chunk)
kfree(t_chunk, COMPRESSOR_SLOTS_CHUNK_SIZE);
}
if (chunk == NULL) {
*slot_pp = NULL;
} else {
slot_idx = page_num % COMPRESSOR_SLOTS_PER_CHUNK;
*slot_pp = &chunk[slot_idx];
}
} else {
slot_idx = page_num;
*slot_pp = &pager->cpgr_slots.cpgr_dslots[slot_idx];
}
}
void
vm_compressor_pager_init(void)
{
lck_grp_attr_setdefault(&compressor_pager_lck_grp_attr);
lck_grp_init(&compressor_pager_lck_grp, "compressor_pager", &compressor_pager_lck_grp_attr);
lck_attr_setdefault(&compressor_pager_lck_attr);
compressor_pager_zone = zinit(sizeof (struct compressor_pager),
10000 * sizeof (struct compressor_pager),
8192, "compressor_pager");
zone_change(compressor_pager_zone, Z_CALLERACCT, FALSE);
zone_change(compressor_pager_zone, Z_NOENCRYPT, TRUE);
vm_compressor_init();
}
kern_return_t
vm_compressor_pager_put(
memory_object_t mem_obj,
memory_object_offset_t offset,
ppnum_t ppnum,
void **current_chead,
char *scratch_buf)
{
compressor_pager_t pager;
compressor_slot_t *slot_p;
compressor_pager_stats.data_returns++;
compressor_pager_lookup(mem_obj, pager);
assert((upl_offset_t) offset == offset);
compressor_pager_slot_lookup(pager, TRUE, (uint32_t) offset, &slot_p);
if (slot_p == NULL) {
panic("compressor_pager_put: out of range");
}
if (*slot_p != 0) {
vm_compressor_free(slot_p);
}
if (vm_compressor_put(ppnum, slot_p, current_chead, scratch_buf))
return (KERN_RESOURCE_SHORTAGE);
return (KERN_SUCCESS);
}
kern_return_t
vm_compressor_pager_get(
memory_object_t mem_obj,
memory_object_offset_t offset,
ppnum_t ppnum,
int *my_fault_type,
int flags)
{
compressor_pager_t pager;
kern_return_t kr;
compressor_slot_t *slot_p;
compressor_pager_stats.data_requests++;
assert((uint32_t) offset == offset);
compressor_pager_lookup(mem_obj, pager);
compressor_pager_slot_lookup(pager, FALSE, (uint32_t) offset, &slot_p);
if (offset / PAGE_SIZE > pager->cpgr_num_slots) {
kr = KERN_MEMORY_FAILURE;
} else if (slot_p == NULL || *slot_p == 0) {
kr = KERN_MEMORY_ERROR;
} else {
kr = KERN_SUCCESS;
}
*my_fault_type = DBG_COMPRESSOR_FAULT;
if (kr == KERN_SUCCESS) {
int retval;
if ((retval = vm_compressor_get(ppnum, slot_p, flags)) == -1)
kr = KERN_MEMORY_FAILURE;
else if (retval == 1)
*my_fault_type = DBG_COMPRESSOR_SWAPIN_FAULT;
else if (retval == -2) {
assert((flags & C_DONT_BLOCK));
kr = KERN_FAILURE;
}
}
return kr;
}
void
vm_compressor_pager_state_clr(
memory_object_t mem_obj,
memory_object_offset_t offset)
{
compressor_pager_t pager;
compressor_slot_t *slot_p;
compressor_pager_stats.state_clr++;
assert((uint32_t) offset == offset);
compressor_pager_lookup(mem_obj, pager);
compressor_pager_slot_lookup(pager, FALSE, (uint32_t) offset, &slot_p);
if (slot_p && *slot_p != 0) {
vm_compressor_free(slot_p);
}
}
vm_external_state_t
vm_compressor_pager_state_get(
memory_object_t mem_obj,
memory_object_offset_t offset)
{
compressor_pager_t pager;
compressor_slot_t *slot_p;
compressor_pager_stats.state_get++;
assert((uint32_t) offset == offset);
compressor_pager_lookup(mem_obj, pager);
compressor_pager_slot_lookup(pager, FALSE, (uint32_t) offset, &slot_p);
if (offset / PAGE_SIZE > pager->cpgr_num_slots) {
return VM_EXTERNAL_STATE_ABSENT;
} else if (slot_p == NULL || *slot_p == 0) {
return VM_EXTERNAL_STATE_ABSENT;
} else {
return VM_EXTERNAL_STATE_EXISTS;
}
}