vm_shared_memory_server.c [plain text]
#include <debug.h>
#include <mach/mach_types.h>
#include <mach/kern_return.h>
#include <mach/vm_inherit.h>
#include <mach/vm_map.h>
#include <machine/cpu_capabilities.h>
#include <kern/kern_types.h>
#include <kern/ipc_kobject.h>
#include <kern/thread.h>
#include <kern/zalloc.h>
#include <kern/kalloc.h>
#include <ipc/ipc_types.h>
#include <ipc/ipc_port.h>
#include <vm/vm_kern.h>
#include <vm/vm_map.h>
#include <vm/vm_page.h>
#include <mach/mach_vm.h>
#include <mach/shared_memory_server.h>
#include <vm/vm_shared_memory_server.h>
#if DEBUG
int lsf_debug = 0;
int lsf_alloc_debug = 0;
#define LSF_DEBUG(args) \
MACRO_BEGIN \
if (lsf_debug) { \
kprintf args; \
} \
MACRO_END
#define LSF_ALLOC_DEBUG(args) \
MACRO_BEGIN \
if (lsf_alloc_debug) { \
kprintf args; \
} \
MACRO_END
#else
#define LSF_DEBUG(args)
#define LSF_ALLOC_DEBUG(args)
#endif
static kern_return_t
shared_region_object_create(
vm_size_t size,
ipc_port_t *object_handle);
static kern_return_t
shared_region_mapping_dealloc_lock(
shared_region_mapping_t shared_region,
int need_sfh_lock,
int need_drl_lock);
static kern_return_t
shared_file_init(
ipc_port_t *text_region_handle,
vm_size_t text_region_size,
ipc_port_t *data_region_handle,
vm_size_t data_region_size,
vm_offset_t *file_mapping_array);
static kern_return_t
shared_file_header_init(
shared_file_info_t *shared_file_header);
static load_struct_t *
lsf_hash_lookup(
queue_head_t *hash_table,
void *file_object,
vm_offset_t recognizableOffset,
int size,
boolean_t regular,
boolean_t alternate,
shared_region_task_mappings_t sm_info);
static load_struct_t *
lsf_hash_delete(
void *file_object,
vm_offset_t base_offset,
shared_region_task_mappings_t sm_info);
static void
lsf_hash_insert(
load_struct_t *entry,
shared_region_task_mappings_t sm_info);
static kern_return_t
lsf_slide(
unsigned int map_cnt,
struct shared_file_mapping_np *mappings,
shared_region_task_mappings_t sm_info,
mach_vm_offset_t *base_offset_p);
static kern_return_t
lsf_map(
struct shared_file_mapping_np *mappings,
int map_cnt,
void *file_control,
memory_object_size_t file_size,
shared_region_task_mappings_t sm_info,
mach_vm_offset_t base_offset,
mach_vm_offset_t *slide_p);
static void
lsf_unload(
void *file_object,
vm_offset_t base_offset,
shared_region_task_mappings_t sm_info);
static void
lsf_deallocate(
void *file_object,
vm_offset_t base_offset,
shared_region_task_mappings_t sm_info,
boolean_t unload);
#define load_file_hash(file_object, size) \
((((natural_t)file_object) & 0xffffff) % size)
vm_offset_t shared_file_text_region;
vm_offset_t shared_file_data_region;
ipc_port_t shared_text_region_handle;
ipc_port_t shared_data_region_handle;
vm_offset_t shared_file_mapping_array = 0;
shared_region_mapping_t default_environment_shared_regions = NULL;
static decl_mutex_data(,default_regions_list_lock_data)
#define default_regions_list_lock() \
mutex_lock(&default_regions_list_lock_data)
#define default_regions_list_lock_try() \
mutex_try(&default_regions_list_lock_data)
#define default_regions_list_unlock() \
mutex_unlock(&default_regions_list_lock_data)
ipc_port_t sfma_handle = NULL;
zone_t lsf_zone;
int shared_file_available_hash_ele;
ipc_port_t com_region_handle32 = NULL;
ipc_port_t com_region_handle64 = NULL;
vm_map_t com_region_map32 = NULL;
vm_map_t com_region_map64 = NULL;
vm_size_t com_region_size = _COMM_PAGE_AREA_LENGTH;
shared_region_mapping_t com_mapping_resource = NULL;
#if DEBUG
int shared_region_debug = 0;
#endif
kern_return_t
vm_get_shared_region(
task_t task,
shared_region_mapping_t *shared_region)
{
*shared_region = (shared_region_mapping_t) task->system_shared_region;
if (*shared_region) {
assert((*shared_region)->ref_count > 0);
}
SHARED_REGION_DEBUG(("vm_get_shared_region(task=%p) -> %p\n",
task, *shared_region));
return KERN_SUCCESS;
}
kern_return_t
vm_set_shared_region(
task_t task,
shared_region_mapping_t shared_region)
{
SHARED_REGION_DEBUG(("vm_set_shared_region(task=%p, "
"shared_region=%p)\n",
task, shared_region));
if (shared_region) {
assert(shared_region->ref_count > 0);
}
task->system_shared_region = shared_region;
return KERN_SUCCESS;
}
void
shared_region_object_chain_detached(
shared_region_mapping_t target_region)
{
shared_region_mapping_lock(target_region);
target_region->flags |= SHARED_REGION_STANDALONE;
shared_region_mapping_unlock(target_region);
}
kern_return_t
shared_region_object_chain_attach(
shared_region_mapping_t target_region,
shared_region_mapping_t object_chain_region)
{
shared_region_object_chain_t object_ele;
SHARED_REGION_DEBUG(("shared_region_object_chain_attach("
"target_region=%p, object_chain_region=%p\n",
target_region, object_chain_region));
assert(target_region->ref_count > 0);
assert(object_chain_region->ref_count > 0);
if(target_region->object_chain)
return KERN_FAILURE;
object_ele = (shared_region_object_chain_t)
kalloc(sizeof (struct shared_region_object_chain));
shared_region_mapping_lock(object_chain_region);
target_region->object_chain = object_ele;
object_ele->object_chain_region = object_chain_region;
object_ele->next = object_chain_region->object_chain;
object_ele->depth = object_chain_region->depth;
object_chain_region->depth++;
target_region->alternate_next = object_chain_region->alternate_next;
shared_region_mapping_unlock(object_chain_region);
return KERN_SUCCESS;
}
kern_return_t
shared_region_mapping_create(
ipc_port_t text_region,
vm_size_t text_size,
ipc_port_t data_region,
vm_size_t data_size,
vm_offset_t region_mappings,
vm_offset_t client_base,
shared_region_mapping_t *shared_region,
vm_offset_t alt_base,
vm_offset_t alt_next)
{
SHARED_REGION_DEBUG(("shared_region_mapping_create()\n"));
*shared_region = (shared_region_mapping_t)
kalloc(sizeof (struct shared_region_mapping));
if(*shared_region == NULL) {
SHARED_REGION_DEBUG(("shared_region_mapping_create: "
"failure\n"));
return KERN_FAILURE;
}
shared_region_mapping_lock_init((*shared_region));
(*shared_region)->text_region = text_region;
(*shared_region)->text_size = text_size;
(*shared_region)->fs_base = ENV_DEFAULT_ROOT;
(*shared_region)->system = cpu_type();
(*shared_region)->data_region = data_region;
(*shared_region)->data_size = data_size;
(*shared_region)->region_mappings = region_mappings;
(*shared_region)->client_base = client_base;
(*shared_region)->ref_count = 1;
(*shared_region)->next = NULL;
(*shared_region)->object_chain = NULL;
(*shared_region)->self = *shared_region;
(*shared_region)->flags = 0;
(*shared_region)->depth = 0;
(*shared_region)->default_env_list = NULL;
(*shared_region)->alternate_base = alt_base;
(*shared_region)->alternate_next = alt_next;
SHARED_REGION_DEBUG(("shared_region_mapping_create -> %p\n",
*shared_region));
return KERN_SUCCESS;
}
kern_return_t
shared_region_mapping_info(
shared_region_mapping_t shared_region,
ipc_port_t *text_region,
vm_size_t *text_size,
ipc_port_t *data_region,
vm_size_t *data_size,
vm_offset_t *region_mappings,
vm_offset_t *client_base,
vm_offset_t *alt_base,
vm_offset_t *alt_next,
unsigned int *fs_base,
unsigned int *system,
int *flags,
shared_region_mapping_t *next)
{
shared_region_mapping_lock(shared_region);
SHARED_REGION_DEBUG(("shared_region_mapping_info(shared_region=%p)\n",
shared_region));
assert(shared_region->ref_count > 0);
*text_region = shared_region->text_region;
*text_size = shared_region->text_size;
*data_region = shared_region->data_region;
*data_size = shared_region->data_size;
*region_mappings = shared_region->region_mappings;
*client_base = shared_region->client_base;
*alt_base = shared_region->alternate_base;
*alt_next = shared_region->alternate_next;
*flags = shared_region->flags;
*fs_base = shared_region->fs_base;
*system = shared_region->system;
*next = shared_region->next;
shared_region_mapping_unlock(shared_region);
}
kern_return_t
shared_region_mapping_ref(
shared_region_mapping_t shared_region)
{
SHARED_REGION_DEBUG(("shared_region_mapping_ref(shared_region=%p): "
"ref_count=%d + 1\n",
shared_region,
shared_region ? shared_region->ref_count : 0));
if(shared_region == NULL)
return KERN_SUCCESS;
assert(shared_region->ref_count > 0);
hw_atomic_add(&shared_region->ref_count, 1);
return KERN_SUCCESS;
}
static kern_return_t
shared_region_mapping_dealloc_lock(
shared_region_mapping_t shared_region,
int need_sfh_lock,
int need_drl_lock)
{
struct shared_region_task_mappings sm_info;
shared_region_mapping_t next = NULL;
int ref_count;
SHARED_REGION_DEBUG(("shared_region_mapping_dealloc_lock"
"(shared_region=%p,%d,%d) ref_count=%d\n",
shared_region, need_sfh_lock, need_drl_lock,
shared_region ? shared_region->ref_count : 0));
while (shared_region) {
SHARED_REGION_DEBUG(("shared_region_mapping_dealloc_lock(%p): "
"ref_count=%d\n",
shared_region, shared_region->ref_count));
assert(shared_region->ref_count > 0);
if ((ref_count =
hw_atomic_sub(&shared_region->ref_count, 1)) == 0) {
shared_region_mapping_lock(shared_region);
sm_info.text_region = shared_region->text_region;
sm_info.text_size = shared_region->text_size;
sm_info.data_region = shared_region->data_region;
sm_info.data_size = shared_region->data_size;
sm_info.region_mappings = shared_region->region_mappings;
sm_info.client_base = shared_region->client_base;
sm_info.alternate_base = shared_region->alternate_base;
sm_info.alternate_next = shared_region->alternate_next;
sm_info.flags = shared_region->flags;
sm_info.self = (vm_offset_t)shared_region;
if(shared_region->region_mappings) {
lsf_remove_regions_mappings_lock(shared_region, &sm_info, need_sfh_lock);
}
if(((vm_named_entry_t)
(shared_region->text_region->ip_kobject))
->backing.map->pmap) {
pmap_remove(((vm_named_entry_t)
(shared_region->text_region->ip_kobject))
->backing.map->pmap,
sm_info.client_base,
sm_info.client_base + sm_info.text_size);
}
ipc_port_release_send(shared_region->text_region);
if(shared_region->data_region)
ipc_port_release_send(shared_region->data_region);
if (shared_region->object_chain) {
next = shared_region->object_chain->object_chain_region;
kfree(shared_region->object_chain,
sizeof (struct shared_region_object_chain));
} else {
next = NULL;
}
shared_region_mapping_unlock(shared_region);
SHARED_REGION_DEBUG(
("shared_region_mapping_dealloc_lock(%p): "
"freeing\n",
shared_region));
bzero((void *)shared_region,
sizeof (*shared_region));
kfree(shared_region,
sizeof (struct shared_region_mapping));
shared_region = next;
} else {
if((ref_count == 1) &&
(shared_region->flags & SHARED_REGION_SYSTEM)
&& !(shared_region->flags & SHARED_REGION_STALE)) {
SHARED_REGION_DEBUG(
("shared_region_mapping_dealloc_lock"
"(%p): removing stale\n",
shared_region));
remove_default_shared_region_lock(shared_region,need_sfh_lock, need_drl_lock);
}
break;
}
}
SHARED_REGION_DEBUG(("shared_region_mapping_dealloc_lock(%p): done\n",
shared_region));
return KERN_SUCCESS;
}
kern_return_t
shared_region_mapping_dealloc(
shared_region_mapping_t shared_region)
{
SHARED_REGION_DEBUG(("shared_region_mapping_dealloc"
"(shared_region=%p)\n",
shared_region));
if (shared_region) {
assert(shared_region->ref_count > 0);
}
return shared_region_mapping_dealloc_lock(shared_region, 1, 1);
}
static
kern_return_t
shared_region_object_create(
vm_size_t size,
ipc_port_t *object_handle)
{
vm_named_entry_t user_entry;
ipc_port_t user_handle;
ipc_port_t previous;
vm_map_t new_map;
user_entry = (vm_named_entry_t)
kalloc(sizeof (struct vm_named_entry));
if(user_entry == NULL) {
return KERN_FAILURE;
}
named_entry_lock_init(user_entry);
user_handle = ipc_port_alloc_kernel();
ip_lock(user_handle);
user_handle->ip_sorights++;
ip_reference(user_handle);
user_handle->ip_destination = IP_NULL;
user_handle->ip_receiver_name = MACH_PORT_NULL;
user_handle->ip_receiver = ipc_space_kernel;
user_handle->ip_mscount++;
user_handle->ip_srights++;
ip_reference(user_handle);
ipc_port_nsrequest(user_handle, 1, user_handle, &previous);
new_map = vm_map_create(pmap_create(0), 0, size, TRUE);
user_entry->backing.map = new_map;
user_entry->internal = TRUE;
user_entry->is_sub_map = TRUE;
user_entry->is_pager = FALSE;
user_entry->offset = 0;
user_entry->protection = VM_PROT_ALL;
user_entry->size = size;
user_entry->ref_count = 1;
ipc_kobject_set(user_handle, (ipc_kobject_t) user_entry,
IKOT_NAMED_ENTRY);
*object_handle = user_handle;
return KERN_SUCCESS;
}
kern_return_t
shared_file_create_system_region(
shared_region_mapping_t *shared_region)
{
ipc_port_t text_handle;
ipc_port_t data_handle;
long text_size;
long data_size;
vm_offset_t mapping_array;
kern_return_t kret;
SHARED_REGION_DEBUG(("shared_file_create_system_region()\n"));
text_size = 0x10000000;
data_size = 0x10000000;
kret = shared_file_init(&text_handle,
text_size, &data_handle, data_size, &mapping_array);
if(kret) {
SHARED_REGION_DEBUG(("shared_file_create_system_region: "
"shared_file_init failed kret=0x%x\n",
kret));
return kret;
}
kret = shared_region_mapping_create(text_handle,
text_size, data_handle, data_size, mapping_array,
GLOBAL_SHARED_TEXT_SEGMENT, shared_region,
SHARED_ALTERNATE_LOAD_BASE, SHARED_ALTERNATE_LOAD_BASE);
if(kret) {
SHARED_REGION_DEBUG(("shared_file_create_system_region: "
"shared_region_mapping_create failed "
"kret=0x%x\n",
kret));
return kret;
}
(*shared_region)->flags = 0;
if(com_mapping_resource) {
shared_region_mapping_ref(com_mapping_resource);
(*shared_region)->next = com_mapping_resource;
}
SHARED_REGION_DEBUG(("shared_file_create_system_region() "
"-> shared_region=%p\n",
*shared_region));
return KERN_SUCCESS;
}
shared_region_mapping_t
update_default_shared_region(
shared_region_mapping_t new_system_region)
{
shared_region_mapping_t old_system_region;
unsigned int fs_base;
unsigned int system;
SHARED_REGION_DEBUG(("update_default_shared_region(new=%p)\n",
new_system_region));
assert(new_system_region->ref_count > 0);
fs_base = new_system_region->fs_base;
system = new_system_region->system;
new_system_region->flags |= SHARED_REGION_SYSTEM;
default_regions_list_lock();
old_system_region = default_environment_shared_regions;
if((old_system_region != NULL) &&
(old_system_region->fs_base == fs_base) &&
(old_system_region->system == system)) {
new_system_region->default_env_list =
old_system_region->default_env_list;
old_system_region->default_env_list = NULL;
default_environment_shared_regions = new_system_region;
old_system_region->flags |= SHARED_REGION_STALE;
default_regions_list_unlock();
SHARED_REGION_DEBUG(("update_default_shared_region(%p): "
"old=%p stale 1\n",
new_system_region, old_system_region));
assert(old_system_region->ref_count > 0);
return old_system_region;
}
if (old_system_region) {
while(old_system_region->default_env_list != NULL) {
if((old_system_region->default_env_list->fs_base == fs_base) &&
(old_system_region->default_env_list->system == system)) {
shared_region_mapping_t tmp_system_region;
tmp_system_region =
old_system_region->default_env_list;
new_system_region->default_env_list =
tmp_system_region->default_env_list;
tmp_system_region->default_env_list = NULL;
old_system_region->default_env_list =
new_system_region;
old_system_region = tmp_system_region;
old_system_region->flags |= SHARED_REGION_STALE;
default_regions_list_unlock();
SHARED_REGION_DEBUG(("update_default_shared_region(%p)"
": old=%p stale 2\n",
new_system_region,
old_system_region));
assert(old_system_region->ref_count > 0);
return old_system_region;
}
old_system_region = old_system_region->default_env_list;
}
}
if(old_system_region) {
SHARED_REGION_DEBUG(("update_default_system_region(%p): "
"adding after old=%p\n",
new_system_region, old_system_region));
assert(old_system_region->ref_count > 0);
old_system_region->default_env_list = new_system_region;
} else {
SHARED_REGION_DEBUG(("update_default_system_region(%p): "
"new default\n",
new_system_region));
default_environment_shared_regions = new_system_region;
}
assert(new_system_region->ref_count > 0);
default_regions_list_unlock();
return NULL;
}
shared_region_mapping_t
lookup_default_shared_region(
unsigned int fs_base,
unsigned int system)
{
shared_region_mapping_t system_region;
default_regions_list_lock();
system_region = default_environment_shared_regions;
SHARED_REGION_DEBUG(("lookup_default_shared_region"
"(base=0x%x, system=0x%x)\n",
fs_base, system));
while(system_region != NULL) {
SHARED_REGION_DEBUG(("lookup_default_shared_region(0x%x, 0x%x)"
": system_region=%p base=0x%x system=0x%x"
" ref_count=%d\n",
fs_base, system, system_region,
system_region->fs_base,
system_region->system,
system_region->ref_count));
assert(system_region->ref_count > 0);
if((system_region->fs_base == fs_base) &&
(system_region->system == system)) {
break;
}
system_region = system_region->default_env_list;
}
if(system_region)
shared_region_mapping_ref(system_region);
default_regions_list_unlock();
SHARED_REGION_DEBUG(("lookup_default_system_region(0x%x,0x%x) -> %p\n",
system_region));
return system_region;
}
__private_extern__ void
remove_default_shared_region_lock(
shared_region_mapping_t system_region,
int need_sfh_lock,
int need_drl_lock)
{
shared_region_mapping_t old_system_region;
SHARED_REGION_DEBUG(("remove_default_shared_region_lock"
"(system_region=%p, %d, %d)\n",
system_region, need_sfh_lock, need_drl_lock));
if (need_drl_lock) {
default_regions_list_lock();
}
old_system_region = default_environment_shared_regions;
if(old_system_region == NULL) {
SHARED_REGION_DEBUG(("remove_default_shared_region_lock(%p)"
"-> default_env=NULL\n",
system_region));
if (need_drl_lock) {
default_regions_list_unlock();
}
return;
}
SHARED_REGION_DEBUG(("remove_default_shared_region_lock(%p): "
"default_env=%p\n",
system_region, old_system_region));
assert(old_system_region->ref_count > 0);
if (old_system_region == system_region) {
default_environment_shared_regions
= old_system_region->default_env_list;
old_system_region->default_env_list = NULL;
old_system_region->flags |= SHARED_REGION_STALE;
SHARED_REGION_DEBUG(("remove_default_shared_region_lock(%p): "
"old=%p ref_count=%d STALE\n",
system_region, old_system_region,
old_system_region->ref_count));
shared_region_mapping_dealloc_lock(old_system_region,
need_sfh_lock,
0);
if (need_drl_lock) {
default_regions_list_unlock();
}
return;
}
while(old_system_region->default_env_list != NULL) {
SHARED_REGION_DEBUG(("remove_default_shared_region_lock(%p): "
"old=%p->default_env=%p\n",
system_region, old_system_region,
old_system_region->default_env_list));
assert(old_system_region->default_env_list->ref_count > 0);
if(old_system_region->default_env_list == system_region) {
shared_region_mapping_t dead_region;
dead_region = old_system_region->default_env_list;
old_system_region->default_env_list =
dead_region->default_env_list;
dead_region->default_env_list = NULL;
dead_region->flags |= SHARED_REGION_STALE;
SHARED_REGION_DEBUG(
("remove_default_shared_region_lock(%p): "
"dead=%p ref_count=%d stale\n",
system_region, dead_region,
dead_region->ref_count));
shared_region_mapping_dealloc_lock(dead_region,
need_sfh_lock,
0);
if (need_drl_lock) {
default_regions_list_unlock();
}
return;
}
old_system_region = old_system_region->default_env_list;
}
if (need_drl_lock) {
default_regions_list_unlock();
}
}
void
remove_default_shared_region(
shared_region_mapping_t system_region)
{
SHARED_REGION_DEBUG(("remove_default_shared_region(%p)\n",
system_region));
if (system_region) {
assert(system_region->ref_count > 0);
}
remove_default_shared_region_lock(system_region, 1, 1);
}
void
remove_all_shared_regions(void)
{
shared_region_mapping_t system_region;
shared_region_mapping_t next_system_region;
SHARED_REGION_DEBUG(("***** REMOVE_ALL_SHARED_REGIONS()\n"));
LSF_ALLOC_DEBUG(("***** REMOVE_ALL_SHARED_REGIONS()\n"));
LSF_DEBUG(("***** REMOVE_ALL_SHARED_REGIONS()\n"));
default_regions_list_lock();
system_region = default_environment_shared_regions;
if(system_region == NULL) {
default_regions_list_unlock();
return;
}
while(system_region != NULL) {
next_system_region = system_region->default_env_list;
system_region->default_env_list = NULL;
system_region->flags |= SHARED_REGION_STALE;
SHARED_REGION_DEBUG(("remove_all_shared_regions(): "
"%p ref_count=%d stale\n",
system_region, system_region->ref_count));
assert(system_region->ref_count > 0);
shared_region_mapping_dealloc_lock(system_region, 1, 0);
system_region = next_system_region;
}
default_environment_shared_regions = NULL;
default_regions_list_unlock();
SHARED_REGION_DEBUG(("***** remove_all_shared_regions() done\n"));
LSF_ALLOC_DEBUG(("***** remove_all_shared_regions() done\n"));
LSF_DEBUG(("***** remove_all_shared_regions() done\n"));
}
void shared_com_boot_time_init(void);
void
shared_com_boot_time_init(void)
{
kern_return_t kret;
vm_named_entry_t named_entry;
SHARED_REGION_DEBUG(("shared_com_boot_time_init()\n"));
if(com_region_handle32) {
panic("shared_com_boot_time_init: "
"com_region_handle32 already set\n");
}
if(com_region_handle64) {
panic("shared_com_boot_time_init: "
"com_region_handle64 already set\n");
}
if((kret = shared_region_object_create(
com_region_size,
&com_region_handle32))) {
panic("shared_com_boot_time_init: "
"unable to create 32-bit comm page\n");
return;
}
if((kret = shared_region_object_create(
com_region_size,
&com_region_handle64))) {
panic("shared_com_boot_time_init: "
"unable to create 64-bit comm page\n");
return;
}
named_entry = (vm_named_entry_t)com_region_handle32->ip_kobject;
com_region_map32 = named_entry->backing.map;
named_entry = (vm_named_entry_t)com_region_handle64->ip_kobject;
com_region_map64 = named_entry->backing.map;
kret = shared_region_mapping_create(com_region_handle32,
com_region_size, NULL, 0, 0,
_COMM_PAGE_BASE_ADDRESS, &com_mapping_resource,
0, 0);
if (kret) {
panic("shared_region_mapping_create failed for commpage");
}
}
void
shared_file_boot_time_init(
unsigned int fs_base,
unsigned int system)
{
long text_region_size;
long data_region_size;
shared_region_mapping_t new_system_region;
shared_region_mapping_t old_default_env;
SHARED_REGION_DEBUG(("shared_file_boot_time_init"
"(base=0x%x,system=0x%x)\n",
fs_base, system));
text_region_size = 0x10000000;
data_region_size = 0x10000000;
shared_file_init(&shared_text_region_handle,
text_region_size,
&shared_data_region_handle,
data_region_size,
&shared_file_mapping_array);
shared_region_mapping_create(shared_text_region_handle,
text_region_size,
shared_data_region_handle,
data_region_size,
shared_file_mapping_array,
GLOBAL_SHARED_TEXT_SEGMENT,
&new_system_region,
SHARED_ALTERNATE_LOAD_BASE,
SHARED_ALTERNATE_LOAD_BASE);
new_system_region->fs_base = fs_base;
new_system_region->system = system;
new_system_region->flags = SHARED_REGION_SYSTEM;
shared_region_mapping_ref(new_system_region);
old_default_env = update_default_shared_region(new_system_region);
if(old_default_env)
shared_region_mapping_dealloc(old_default_env);
if(com_mapping_resource == NULL) {
shared_com_boot_time_init();
}
shared_region_mapping_ref(com_mapping_resource);
new_system_region->next = com_mapping_resource;
vm_set_shared_region(current_task(), new_system_region);
SHARED_REGION_DEBUG(("shared_file_boot_time_init(0x%x,0x%x) done\n",
fs_base, system));
}
static kern_return_t
shared_file_init(
ipc_port_t *text_region_handle,
vm_size_t text_region_size,
ipc_port_t *data_region_handle,
vm_size_t data_region_size,
vm_offset_t *file_mapping_array)
{
shared_file_info_t *sf_head;
vm_offset_t table_mapping_address;
int data_table_size;
int hash_size;
kern_return_t kret;
vm_object_t buf_object;
vm_map_entry_t entry;
vm_size_t alloced;
vm_offset_t b;
vm_page_t p;
SHARED_REGION_DEBUG(("shared_file_init()\n"));
kret = shared_region_object_create(
text_region_size,
text_region_handle);
if (kret) {
return kret;
}
kret = shared_region_object_create(
data_region_size,
data_region_handle);
if (kret) {
ipc_port_release_send(*text_region_handle);
return kret;
}
data_table_size = data_region_size >> 9;
hash_size = data_region_size >> 14;
table_mapping_address = data_region_size - data_table_size;
if(shared_file_mapping_array == 0) {
vm_map_address_t map_addr;
buf_object = vm_object_allocate(data_table_size);
if(vm_map_find_space(kernel_map, &map_addr,
data_table_size, 0, &entry)
!= KERN_SUCCESS) {
panic("shared_file_init: no space");
}
shared_file_mapping_array = CAST_DOWN(vm_offset_t, map_addr);
*file_mapping_array = shared_file_mapping_array;
vm_map_unlock(kernel_map);
entry->object.vm_object = buf_object;
entry->offset = 0;
for (b = *file_mapping_array, alloced = 0;
alloced < (hash_size +
round_page(sizeof(struct sf_mapping)));
alloced += PAGE_SIZE, b += PAGE_SIZE) {
vm_object_lock(buf_object);
p = vm_page_alloc(buf_object, alloced);
if (p == VM_PAGE_NULL) {
panic("shared_file_init: no space");
}
p->busy = FALSE;
vm_object_unlock(buf_object);
pmap_enter(kernel_pmap, b, p->phys_page,
VM_PROT_READ | VM_PROT_WRITE,
((unsigned int)(p->object->wimg_bits))
& VM_WIMG_MASK,
TRUE);
}
sf_head = (shared_file_info_t *)*file_mapping_array;
sf_head->hash = (queue_head_t *)
(((int)*file_mapping_array) +
sizeof(struct shared_file_info));
sf_head->hash_size = hash_size/sizeof(queue_head_t);
mutex_init(&(sf_head->lock), 0);
sf_head->hash_init = FALSE;
mach_make_memory_entry(kernel_map, &data_table_size,
*file_mapping_array, VM_PROT_READ, &sfma_handle,
NULL);
if (vm_map_wire(kernel_map,
vm_map_trunc_page(*file_mapping_array),
vm_map_round_page(*file_mapping_array +
hash_size +
round_page(sizeof(struct sf_mapping))),
VM_PROT_DEFAULT, FALSE) != KERN_SUCCESS) {
panic("shared_file_init: No memory for data table");
}
lsf_zone = zinit(sizeof(struct load_file_ele),
data_table_size -
(hash_size + round_page_32(sizeof(struct sf_mapping))),
0, "load_file_server");
zone_change(lsf_zone, Z_EXHAUST, TRUE);
zone_change(lsf_zone, Z_COLLECT, FALSE);
zone_change(lsf_zone, Z_EXPAND, FALSE);
zone_change(lsf_zone, Z_FOREIGN, TRUE);
mutex_init(&default_regions_list_lock_data, 0);
} else {
*file_mapping_array = shared_file_mapping_array;
}
kret = vm_map(((vm_named_entry_t)
(*data_region_handle)->ip_kobject)->backing.map,
&table_mapping_address,
data_table_size, 0,
SHARED_LIB_ALIAS | VM_FLAGS_FIXED,
sfma_handle, 0, FALSE,
VM_PROT_READ, VM_PROT_READ, VM_INHERIT_NONE);
SHARED_REGION_DEBUG(("shared_file_init() done\n"));
return kret;
}
static kern_return_t
shared_file_header_init(
shared_file_info_t *shared_file_header)
{
vm_size_t hash_table_size;
vm_size_t hash_table_offset;
int i;
static int allocable_hash_pages;
static vm_offset_t hash_cram_address;
hash_table_size = shared_file_header->hash_size
* sizeof (struct queue_entry);
hash_table_offset = hash_table_size +
round_page(sizeof (struct sf_mapping));
for (i = 0; i < shared_file_header->hash_size; i++)
queue_init(&shared_file_header->hash[i]);
allocable_hash_pages = (((hash_table_size << 5) - hash_table_offset)
/ PAGE_SIZE);
hash_cram_address = ((vm_offset_t) shared_file_header)
+ hash_table_offset;
shared_file_available_hash_ele = 0;
shared_file_header->hash_init = TRUE;
if ((shared_file_available_hash_ele < 20) && (allocable_hash_pages)) {
int cram_pages, cram_size;
cram_pages = allocable_hash_pages > 3 ?
3 : allocable_hash_pages;
cram_size = cram_pages * PAGE_SIZE;
if (vm_map_wire(kernel_map, hash_cram_address,
hash_cram_address + cram_size,
VM_PROT_DEFAULT, FALSE) != KERN_SUCCESS) {
printf("shared_file_header_init: "
"No memory for data table\n");
return KERN_NO_SPACE;
}
allocable_hash_pages -= cram_pages;
zcram(lsf_zone, (void *) hash_cram_address, cram_size);
shared_file_available_hash_ele
+= cram_size/sizeof(struct load_file_ele);
hash_cram_address += cram_size;
}
return KERN_SUCCESS;
}
kern_return_t
map_shared_file(
int map_cnt,
struct shared_file_mapping_np *mappings,
memory_object_control_t file_control,
memory_object_size_t file_size,
shared_region_task_mappings_t sm_info,
mach_vm_offset_t base_offset,
mach_vm_offset_t *slide_p)
{
vm_object_t file_object;
shared_file_info_t *shared_file_header;
load_struct_t *file_entry;
loaded_mapping_t *file_mapping;
int i;
kern_return_t ret;
mach_vm_offset_t slide;
SHARED_REGION_DEBUG(("map_shared_file()\n"));
shared_file_header = (shared_file_info_t *)sm_info->region_mappings;
mutex_lock(&shared_file_header->lock);
if(shared_file_header->hash_init == FALSE) {
ret = shared_file_header_init(shared_file_header);
if (ret != KERN_SUCCESS) {
mutex_unlock(&shared_file_header->lock);
return KERN_NO_SPACE;
}
}
file_object = memory_object_control_to_vm_object(file_control);
file_entry = lsf_hash_lookup(shared_file_header->hash,
(void *) file_object,
mappings[0].sfm_file_offset,
shared_file_header->hash_size,
TRUE, TRUE, sm_info);
if (file_entry) {
i = 0;
file_mapping = file_entry->mappings;
while(file_mapping != NULL) {
if(i>=map_cnt) {
mutex_unlock(&shared_file_header->lock);
return KERN_INVALID_ARGUMENT;
}
if(((mappings[i].sfm_address)
& SHARED_DATA_REGION_MASK) !=
file_mapping->mapping_offset ||
mappings[i].sfm_size != file_mapping->size ||
mappings[i].sfm_file_offset != file_mapping->file_offset ||
mappings[i].sfm_init_prot != file_mapping->protection) {
break;
}
file_mapping = file_mapping->next;
i++;
}
if(i!=map_cnt) {
mutex_unlock(&shared_file_header->lock);
return KERN_INVALID_ARGUMENT;
}
slide = file_entry->base_address - base_offset;
if (slide_p != NULL) {
*slide_p = slide;
ret = KERN_SUCCESS;
} else {
if (slide != 0) {
ret = KERN_FAILURE;
} else {
ret = KERN_SUCCESS;
}
}
mutex_unlock(&shared_file_header->lock);
return ret;
} else {
ret = lsf_map(mappings, map_cnt,
(void *)file_control,
file_size,
sm_info,
base_offset,
slide_p);
if(ret == KERN_NO_SPACE) {
shared_region_mapping_t regions;
shared_region_mapping_t system_region;
regions = (shared_region_mapping_t)sm_info->self;
regions->flags |= SHARED_REGION_FULL;
system_region = lookup_default_shared_region(
regions->fs_base, regions->system);
if (system_region == regions) {
shared_region_mapping_t new_system_shared_region;
shared_file_boot_time_init(
regions->fs_base, regions->system);
vm_get_shared_region(current_task(),
&new_system_shared_region);
shared_region_mapping_dealloc_lock(
new_system_shared_region, 0, 1);
vm_set_shared_region(current_task(), regions);
} else if (system_region != NULL) {
shared_region_mapping_dealloc_lock(
system_region, 0, 1);
}
}
mutex_unlock(&shared_file_header->lock);
return ret;
}
}
kern_return_t
shared_region_cleanup(
unsigned int range_count,
struct shared_region_range_np *ranges,
shared_region_task_mappings_t sm_info)
{
kern_return_t kr;
ipc_port_t region_handle;
vm_named_entry_t region_named_entry;
vm_map_t text_submap, data_submap, submap, next_submap;
unsigned int i_range;
vm_map_offset_t range_start, range_end;
vm_map_offset_t submap_base, submap_end, submap_offset;
vm_map_size_t delete_size;
struct shared_region_range_np tmp_range;
unsigned int sort_index, sorted_index;
vm_map_offset_t sort_min_address;
unsigned int sort_min_index;
for (sorted_index = 0;
sorted_index < range_count;
sorted_index++) {
sort_min_index = sorted_index;
sort_min_address = ranges[sort_min_index].srr_address;
for (sort_index = sorted_index + 1;
sort_index < range_count;
sort_index++) {
if (ranges[sort_index].srr_address < sort_min_address) {
sort_min_index = sort_index;
sort_min_address =
ranges[sort_min_index].srr_address;
}
}
if (sort_min_index != sorted_index) {
tmp_range = ranges[sort_min_index];
ranges[sort_min_index] = ranges[sorted_index];
ranges[sorted_index] = tmp_range;
}
}
region_handle = (ipc_port_t) sm_info->text_region;
region_named_entry = (vm_named_entry_t) region_handle->ip_kobject;
text_submap = region_named_entry->backing.map;
region_handle = (ipc_port_t) sm_info->data_region;
region_named_entry = (vm_named_entry_t) region_handle->ip_kobject;
data_submap = region_named_entry->backing.map;
submap = text_submap;
next_submap = submap;
submap_base = sm_info->client_base;
submap_offset = 0;
submap_end = submap_base + sm_info->text_size;
for (i_range = 0;
i_range < range_count;
i_range++) {
range_start = ranges[i_range].srr_address;
range_end = range_start + ranges[i_range].srr_size;
range_start = vm_map_trunc_page(range_start);
range_end = vm_map_round_page(range_end);
if (range_start < submap_base) {
range_start = submap_base;
} else if (range_start >= submap_end) {
range_start = submap_end;
}
if (range_end < submap_base) {
range_end = submap_base;
} else if (range_end >= submap_end) {
range_end = submap_end;
}
if (range_start > submap_base + submap_offset) {
delete_size = range_start -
(submap_base + submap_offset);
(void) vm_deallocate(submap,
submap_offset,
delete_size);
} else {
delete_size = 0;
}
submap_offset += delete_size + (range_end - range_start);
if (submap_base + submap_offset >= submap_end) {
if (submap == data_submap) {
break;
}
range_start = ranges[i_range].srr_address;
range_end = range_start + ranges[i_range].srr_size;
range_start = vm_map_trunc_page(range_start);
range_end = vm_map_round_page(range_end);
if (range_end > submap_end) {
i_range--;
}
if (submap == text_submap) {
submap = data_submap;
submap_offset = 0;
submap_base = sm_info->client_base +
sm_info->text_size;
submap_end = submap_base + sm_info->data_size;
}
}
}
if (submap_base + submap_offset < submap_end) {
(void) vm_deallocate(submap,
submap_offset,
submap_end - submap_base - submap_offset);
if (submap == text_submap) {
submap = data_submap;
submap_offset = 0;
submap_base = sm_info->client_base + sm_info->text_size;
submap_end = submap_base + sm_info->data_size;
(void) vm_deallocate(data_submap,
0,
submap_end - submap_base);
}
}
kr = KERN_SUCCESS;
return kr;
}
static load_struct_t *
lsf_hash_lookup(
queue_head_t *hash_table,
void *file_object,
vm_offset_t recognizableOffset,
int size,
boolean_t regular,
boolean_t alternate,
shared_region_task_mappings_t sm_info)
{
register queue_t bucket;
load_struct_t *entry;
shared_region_mapping_t target_region;
int depth;
LSF_DEBUG(("lsf_hash_lookup: table=%p, file=%p, offset=0x%x size=0x%x "
"reg=%d alt=%d sm_info=%p\n",
hash_table, file_object, recognizableOffset, size,
regular, alternate, sm_info));
bucket = &(hash_table[load_file_hash((int)file_object, size)]);
for (entry = (load_struct_t *)queue_first(bucket);
!queue_end(bucket, &entry->links);
entry = (load_struct_t *)queue_next(&entry->links)) {
if ((entry->file_object == (int)file_object) &&
(entry->file_offset == recognizableOffset)) {
target_region = (shared_region_mapping_t)sm_info->self;
depth = target_region->depth;
while(target_region) {
if((!(sm_info->self)) ||
((target_region == entry->regions_instance) &&
(target_region->depth >= entry->depth))) {
if(alternate &&
entry->base_address >= sm_info->alternate_base) {
LSF_DEBUG(("lsf_hash_lookup: "
"alt=%d found entry %p "
"(base=0x%x "
"alt_base=0x%x)\n",
alternate, entry,
entry->base_address,
sm_info->alternate_base));
return entry;
}
if (regular &&
entry->base_address < sm_info->alternate_base) {
LSF_DEBUG(("lsf_hash_lookup: "
"reg=%d found entry %p "
"(base=0x%x "
"alt_base=0x%x)\n",
regular, entry,
entry->base_address,
sm_info->alternate_base));
return entry;
}
}
if(target_region->object_chain) {
target_region = (shared_region_mapping_t)
target_region->object_chain->object_chain_region;
depth = target_region->object_chain->depth;
} else {
target_region = NULL;
}
}
}
}
LSF_DEBUG(("lsf_hash_lookup: table=%p, file=%p, offset=0x%x size=0x%x "
"reg=%d alt=%d sm_info=%p NOT FOUND\n",
hash_table, file_object, recognizableOffset, size,
regular, alternate, sm_info));
return (load_struct_t *)0;
}
__private_extern__ load_struct_t *
lsf_remove_regions_mappings_lock(
shared_region_mapping_t region,
shared_region_task_mappings_t sm_info,
int need_sfh_lock)
{
int i;
register queue_t bucket;
shared_file_info_t *shared_file_header;
load_struct_t *entry;
load_struct_t *next_entry;
shared_file_header = (shared_file_info_t *)sm_info->region_mappings;
LSF_DEBUG(("lsf_remove_regions_mappings_lock(region=%p,sm_info=%p) "
"sfh=%p\n",
region, sm_info, shared_file_header));
if (need_sfh_lock)
mutex_lock(&shared_file_header->lock);
if(shared_file_header->hash_init == FALSE) {
if (need_sfh_lock)
mutex_unlock(&shared_file_header->lock);
LSF_DEBUG(("lsf_remove_regions_mappings_lock"
"(region=%p,sm_info=%p): not inited\n",
region, sm_info));
return NULL;
}
for(i = 0; i<shared_file_header->hash_size; i++) {
bucket = &shared_file_header->hash[i];
for (entry = (load_struct_t *)queue_first(bucket);
!queue_end(bucket, &entry->links);) {
next_entry = (load_struct_t *)queue_next(&entry->links);
if(region == entry->regions_instance) {
LSF_DEBUG(("lsf_remove_regions_mapping_lock: "
"entry %p region %p: "
"unloading\n",
entry, region));
lsf_unload((void *)entry->file_object,
entry->base_address, sm_info);
} else {
LSF_DEBUG(("lsf_remove_regions_mapping_lock: "
"entry %p region %p target region %p: "
"not unloading\n",
entry, entry->regions_instance, region));
}
entry = next_entry;
}
}
if (need_sfh_lock)
mutex_unlock(&shared_file_header->lock);
LSF_DEBUG(("lsf_removed_regions_mapping_lock done\n"));
return NULL;
}
load_struct_t *
lsf_remove_regions_mappings(
shared_region_mapping_t region,
shared_region_task_mappings_t sm_info)
{
return lsf_remove_regions_mappings_lock(region, sm_info, 1);
}
static load_struct_t *
lsf_hash_delete(
void *file_object,
vm_offset_t base_offset,
shared_region_task_mappings_t sm_info)
{
register queue_t bucket;
shared_file_info_t *shared_file_header;
load_struct_t *entry;
LSF_DEBUG(("lsf_hash_delete(file=%p,base=0x%x,sm_info=%p)\n",
file_object, base_offset, sm_info));
shared_file_header = (shared_file_info_t *)sm_info->region_mappings;
bucket = &shared_file_header->hash
[load_file_hash((int)file_object, shared_file_header->hash_size)];
for (entry = (load_struct_t *)queue_first(bucket);
!queue_end(bucket, &entry->links);
entry = (load_struct_t *)queue_next(&entry->links)) {
if((!(sm_info->self)) || ((shared_region_mapping_t)
sm_info->self == entry->regions_instance)) {
if ((entry->file_object == (int) file_object) &&
(entry->base_address == base_offset)) {
queue_remove(bucket, entry,
load_struct_ptr_t, links);
LSF_DEBUG(("lsf_hash_delete: found it\n"));
return entry;
}
}
}
LSF_DEBUG(("lsf_hash_delete; not found\n"));
return (load_struct_t *)0;
}
static void
lsf_hash_insert(
load_struct_t *entry,
shared_region_task_mappings_t sm_info)
{
shared_file_info_t *shared_file_header;
LSF_DEBUG(("lsf_hash_insert(entry=%p,sm_info=%p): file=%p base=0x%x\n",
entry, sm_info, entry->file_object, entry->base_address));
shared_file_header = (shared_file_info_t *)sm_info->region_mappings;
queue_enter(&shared_file_header->hash
[load_file_hash(entry->file_object,
shared_file_header->hash_size)],
entry, load_struct_ptr_t, links);
}
static kern_return_t
lsf_slide(
unsigned int map_cnt,
struct shared_file_mapping_np *mappings_in,
shared_region_task_mappings_t sm_info,
mach_vm_offset_t *base_offset_p)
{
mach_vm_offset_t max_mapping_offset;
int i;
vm_map_entry_t map_entry, prev_entry, next_entry;
mach_vm_offset_t prev_hole_start, prev_hole_end;
mach_vm_offset_t mapping_offset, mapping_end_offset;
mach_vm_offset_t base_offset;
mach_vm_size_t mapping_size;
mach_vm_offset_t wiggle_room, wiggle;
vm_map_t text_map, data_map, map;
vm_named_entry_t region_entry;
ipc_port_t region_handle;
kern_return_t kr;
struct shared_file_mapping_np *mappings, tmp_mapping;
unsigned int sort_index, sorted_index;
vm_map_offset_t sort_min_address;
unsigned int sort_min_index;
kr = kmem_alloc(kernel_map,
(vm_offset_t *) &mappings,
(vm_size_t) (map_cnt * sizeof (mappings[0])));
if (kr != KERN_SUCCESS) {
return KERN_NO_SPACE;
}
bcopy(mappings_in, mappings, map_cnt * sizeof (mappings[0]));
max_mapping_offset = 0;
for (sorted_index = 0;
sorted_index < map_cnt;
sorted_index++) {
sort_min_index = sorted_index;
mapping_end_offset = ((mappings[sort_min_index].sfm_address &
SHARED_TEXT_REGION_MASK) +
mappings[sort_min_index].sfm_size);
sort_min_address = mapping_end_offset;
if (mapping_end_offset > max_mapping_offset) {
max_mapping_offset = mapping_end_offset;
}
for (sort_index = sorted_index + 1;
sort_index < map_cnt;
sort_index++) {
mapping_end_offset =
((mappings[sort_index].sfm_address &
SHARED_TEXT_REGION_MASK) +
mappings[sort_index].sfm_size);
if (mapping_end_offset < sort_min_address) {
sort_min_index = sort_index;
sort_min_address = mapping_end_offset;
}
}
if (sort_min_index != sorted_index) {
tmp_mapping = mappings[sort_min_index];
mappings[sort_min_index] = mappings[sorted_index];
mappings[sorted_index] = tmp_mapping;
}
}
max_mapping_offset = vm_map_round_page(max_mapping_offset);
base_offset = sm_info->text_size;
if (max_mapping_offset > base_offset) {
kmem_free(kernel_map,
(vm_offset_t) mappings,
map_cnt * sizeof (mappings[0]));
return KERN_FAILURE;
}
base_offset -= max_mapping_offset;
region_handle = (ipc_port_t) sm_info->text_region;
region_entry = (vm_named_entry_t) region_handle->ip_kobject;
text_map = region_entry->backing.map;
region_handle = (ipc_port_t) sm_info->data_region;
region_entry = (vm_named_entry_t) region_handle->ip_kobject;
data_map = region_entry->backing.map;
vm_map_lock_read(text_map);
vm_map_lock_read(data_map);
start_over:
wiggle_room = base_offset;
for (i = (signed) map_cnt - 1; i >= 0; i--) {
if (mappings[i].sfm_init_prot & VM_PROT_COW) {
map = data_map;
} else {
map = text_map;
}
mapping_offset = (mappings[i].sfm_address &
SHARED_TEXT_REGION_MASK);
mapping_size = mappings[i].sfm_size;
mapping_end_offset = mapping_offset + mapping_size;
mapping_offset = vm_map_trunc_page(mapping_offset);
mapping_end_offset = vm_map_round_page(mapping_end_offset);
mapping_size = mapping_end_offset - mapping_offset;
for (;;) {
if (vm_map_lookup_entry(map,
base_offset + mapping_offset,
&map_entry)) {
prev_hole_end = map_entry->vme_start;
prev_entry = map_entry->vme_prev;
if (prev_entry == vm_map_to_entry(map)) {
prev_hole_start = map->min_offset;
} else {
prev_hole_start = prev_entry->vme_end;
}
} else {
if (map_entry == vm_map_to_entry(map)) {
prev_hole_start = map->min_offset;
} else {
prev_hole_start = map_entry->vme_end;
}
next_entry = map_entry->vme_next;
if (next_entry == vm_map_to_entry(map)) {
prev_hole_end = map->max_offset;
} else {
prev_hole_end = next_entry->vme_start;
}
}
if (prev_hole_end <= base_offset + mapping_offset) {
wiggle = base_offset + mapping_offset - prev_hole_end + mapping_size;
if (wiggle > base_offset) {
kr = KERN_FAILURE;
goto done;
}
base_offset -= wiggle;
if (wiggle > wiggle_room) {
goto start_over;
}
wiggle_room -= wiggle;
}
if (prev_hole_end >
base_offset + mapping_offset + mapping_size) {
prev_hole_end = (base_offset + mapping_offset +
mapping_size);
}
if (prev_hole_end <
base_offset + mapping_offset + mapping_size) {
wiggle = base_offset + mapping_offset
+ mapping_size - prev_hole_end;
if (wiggle > base_offset) {
kr = KERN_FAILURE;
goto done;
}
base_offset -= wiggle;
if (wiggle > wiggle_room) {
goto start_over;
}
wiggle_room -= wiggle;
continue;
}
if (prev_hole_start > base_offset + mapping_offset) {
continue;
}
wiggle = base_offset + mapping_offset - prev_hole_start;
if (wiggle < wiggle_room) {
wiggle_room = wiggle;
}
break;
}
}
*base_offset_p = base_offset;
kr = KERN_SUCCESS;
done:
vm_map_unlock_read(text_map);
vm_map_unlock_read(data_map);
kmem_free(kernel_map,
(vm_offset_t) mappings,
map_cnt * sizeof (mappings[0]));
return kr;
}
static kern_return_t
lsf_map(
struct shared_file_mapping_np *mappings,
int map_cnt,
void *file_control,
memory_object_offset_t file_size,
shared_region_task_mappings_t sm_info,
mach_vm_offset_t base_offset,
mach_vm_offset_t *slide_p)
{
load_struct_t *entry;
loaded_mapping_t *file_mapping;
loaded_mapping_t **tptr;
ipc_port_t region_handle;
vm_named_entry_t region_entry;
mach_port_t map_port;
vm_object_t file_object;
kern_return_t kr;
int i;
mach_vm_offset_t original_base_offset;
file_object = memory_object_control_to_vm_object(file_control);
original_base_offset = base_offset;
LSF_DEBUG(("lsf_map"
"(cnt=%d,file=%p,sm_info=%p)"
"\n",
map_cnt, file_object,
sm_info));
restart_after_slide:
entry = (load_struct_t *)zalloc(lsf_zone);
LSF_ALLOC_DEBUG(("lsf_map: entry=%p map_cnt=%d\n", entry, map_cnt));
LSF_DEBUG(("lsf_map"
"(cnt=%d,file=%p,sm_info=%p) "
"entry=%p\n",
map_cnt, file_object,
sm_info, entry));
if (entry == NULL) {
printf("lsf_map: unable to allocate memory\n");
return KERN_NO_SPACE;
}
shared_file_available_hash_ele--;
entry->file_object = (int)file_object;
entry->mapping_cnt = map_cnt;
entry->mappings = NULL;
entry->links.prev = (queue_entry_t) 0;
entry->links.next = (queue_entry_t) 0;
entry->regions_instance = (shared_region_mapping_t)sm_info->self;
entry->depth=((shared_region_mapping_t)sm_info->self)->depth;
entry->file_offset = mappings[0].sfm_file_offset;
lsf_hash_insert(entry, sm_info);
tptr = &(entry->mappings);
entry->base_address = base_offset;
for (i = 0; i < map_cnt; i++) {
mach_vm_offset_t target_address;
mach_vm_offset_t region_mask;
if (mappings[i].sfm_init_prot & VM_PROT_COW) {
region_handle = (ipc_port_t)sm_info->data_region;
region_mask = SHARED_DATA_REGION_MASK;
if ((((mappings[i].sfm_address + base_offset)
& GLOBAL_SHARED_SEGMENT_MASK) != 0x10000000) ||
(((mappings[i].sfm_address + base_offset +
mappings[i].sfm_size - 1)
& GLOBAL_SHARED_SEGMENT_MASK) != 0x10000000)) {
lsf_unload(file_object,
entry->base_address, sm_info);
return KERN_INVALID_ARGUMENT;
}
} else {
region_mask = SHARED_TEXT_REGION_MASK;
region_handle = (ipc_port_t)sm_info->text_region;
if (((mappings[i].sfm_address + base_offset)
& GLOBAL_SHARED_SEGMENT_MASK) ||
((mappings[i].sfm_address + base_offset +
mappings[i].sfm_size - 1)
& GLOBAL_SHARED_SEGMENT_MASK)) {
lsf_unload(file_object,
entry->base_address, sm_info);
return KERN_INVALID_ARGUMENT;
}
}
if (!(mappings[i].sfm_init_prot & VM_PROT_ZF) &&
((mappings[i].sfm_file_offset + mappings[i].sfm_size) >
(file_size))) {
lsf_unload(file_object, entry->base_address, sm_info);
return KERN_INVALID_ARGUMENT;
}
target_address = entry->base_address +
((mappings[i].sfm_address) & region_mask);
if (mappings[i].sfm_init_prot & VM_PROT_ZF) {
map_port = MACH_PORT_NULL;
} else {
map_port = (ipc_port_t) file_object->pager;
}
region_entry = (vm_named_entry_t) region_handle->ip_kobject;
if (mach_vm_map(region_entry->backing.map,
&target_address,
vm_map_round_page(mappings[i].sfm_size),
0,
VM_FLAGS_FIXED,
map_port,
mappings[i].sfm_file_offset,
TRUE,
(mappings[i].sfm_init_prot &
(VM_PROT_READ|VM_PROT_EXECUTE)),
(mappings[i].sfm_max_prot &
(VM_PROT_READ|VM_PROT_EXECUTE)),
VM_INHERIT_DEFAULT) != KERN_SUCCESS) {
lsf_unload(file_object, entry->base_address, sm_info);
if (slide_p != NULL) {
kr = lsf_slide(map_cnt, mappings,
sm_info, &base_offset);
if (kr == KERN_SUCCESS) {
entry->base_address = base_offset;
goto restart_after_slide;
}
}
return KERN_FAILURE;
}
file_mapping = (loaded_mapping_t *)zalloc(lsf_zone);
if (file_mapping == NULL) {
lsf_unload(file_object, entry->base_address, sm_info);
printf("lsf_map: unable to allocate memory\n");
return KERN_NO_SPACE;
}
shared_file_available_hash_ele--;
file_mapping->mapping_offset = (mappings[i].sfm_address)
& region_mask;
file_mapping->size = mappings[i].sfm_size;
file_mapping->file_offset = mappings[i].sfm_file_offset;
file_mapping->protection = mappings[i].sfm_init_prot;
file_mapping->next = NULL;
LSF_DEBUG(("lsf_map: file_mapping %p "
"for offset=0x%x size=0x%x\n",
file_mapping, file_mapping->mapping_offset,
file_mapping->size));
*tptr = file_mapping;
tptr = &(file_mapping->next);
}
if (slide_p != NULL) {
*slide_p = base_offset - original_base_offset;
}
if (sm_info->flags & SHARED_REGION_STANDALONE) {
lsf_deallocate(file_object, entry->base_address, sm_info,
FALSE);
}
LSF_DEBUG(("lsf_map: done\n"));
return KERN_SUCCESS;
}
static void
lsf_unload(
void *file_object,
vm_offset_t base_offset,
shared_region_task_mappings_t sm_info)
{
lsf_deallocate(file_object, base_offset, sm_info, TRUE);
}
static void
lsf_deallocate(
void *file_object,
vm_offset_t base_offset,
shared_region_task_mappings_t sm_info,
boolean_t unload)
{
load_struct_t *entry;
loaded_mapping_t *map_ele;
loaded_mapping_t *back_ptr;
LSF_DEBUG(("lsf_deallocate(file=%p,base=0x%x,sm_info=%p,unload=%d)\n",
file_object, base_offset, sm_info, unload));
entry = lsf_hash_delete(file_object, base_offset, sm_info);
if(entry) {
map_ele = entry->mappings;
while(map_ele != NULL) {
if (unload) {
ipc_port_t region_handle;
vm_named_entry_t region_entry;
if(map_ele->protection & VM_PROT_COW) {
region_handle = (ipc_port_t)
sm_info->data_region;
} else {
region_handle = (ipc_port_t)
sm_info->text_region;
}
region_entry = (vm_named_entry_t)
region_handle->ip_kobject;
vm_deallocate(region_entry->backing.map,
(entry->base_address +
map_ele->mapping_offset),
map_ele->size);
}
back_ptr = map_ele;
map_ele = map_ele->next;
LSF_DEBUG(("lsf_deallocate: freeing mapping %p "
"offset 0x%x size 0x%x\n",
back_ptr, back_ptr->mapping_offset,
back_ptr->size));
zfree(lsf_zone, back_ptr);
shared_file_available_hash_ele++;
}
LSF_DEBUG(("lsf_deallocate: freeing entry %p\n", entry));
LSF_ALLOC_DEBUG(("lsf_deallocate: entry=%p", entry));
zfree(lsf_zone, entry);
shared_file_available_hash_ele++;
}
LSF_DEBUG(("lsf_unload: done\n"));
}
unsigned int
lsf_mapping_pool_gauge(void)
{
return ((lsf_zone->count * lsf_zone->elem_size) * 100)/lsf_zone->max_size;
}