#include <kern/backtrace.h>
#include <mach/sdt.h>
#include <vm/vm_map_store.h>
#include <vm/vm_pageout.h>
#if MACH_ASSERT
boolean_t
first_free_is_valid_store( vm_map_t map )
{
return first_free_is_valid_ll( map );
}
#endif
boolean_t
vm_map_store_has_RB_support( struct vm_map_header *hdr )
{
if ((void*)hdr->rb_head_store.rbh_root == (void*)(int)SKIP_RB_TREE) {
return FALSE;
}
return TRUE;
}
void
vm_map_store_init( struct vm_map_header *hdr )
{
vm_map_store_init_ll( hdr );
#ifdef VM_MAP_STORE_USE_RB
if (vm_map_store_has_RB_support( hdr )) {
vm_map_store_init_rb( hdr );
}
#endif
}
__attribute__((noinline))
boolean_t
vm_map_store_lookup_entry(
vm_map_t map,
vm_map_offset_t address,
vm_map_entry_t *entry)
{
#ifdef VM_MAP_STORE_USE_LL
return vm_map_store_lookup_entry_ll( map, address, entry );
#elif defined VM_MAP_STORE_USE_RB
if (vm_map_store_has_RB_support( &map->hdr )) {
return vm_map_store_lookup_entry_rb( map, address, entry );
} else {
panic("VM map lookups need RB tree support.\n");
return FALSE;
}
#endif
}
void
vm_map_store_update( vm_map_t map, vm_map_entry_t entry, int update_type )
{
switch (update_type) {
case VM_MAP_ENTRY_CREATE:
break;
case VM_MAP_ENTRY_DELETE:
if ((map->holelistenabled == FALSE) && ((entry) == (map)->first_free)) {
(map)->first_free = vm_map_to_entry(map);
}
if ((entry) == (map)->hint) {
(map)->hint = vm_map_to_entry(map);
}
break;
default:
break;
}
}
void
vm_map_store_find_last_free(
vm_map_t map,
vm_map_entry_t *o_entry)
{
vm_map_store_find_last_free_ll(map, o_entry);
}
void
_vm_map_store_entry_link( struct vm_map_header * mapHdr, vm_map_entry_t after_where, vm_map_entry_t entry)
{
assert(entry->vme_start < entry->vme_end);
if (__improbable(vm_debug_events)) {
DTRACE_VM4(map_entry_link, vm_map_t, (char *)mapHdr - sizeof(lck_rw_t), vm_map_entry_t, entry, vm_address_t, entry->links.start, vm_address_t, entry->links.end);
}
vm_map_store_entry_link_ll(mapHdr, after_where, entry);
#ifdef VM_MAP_STORE_USE_RB
if (vm_map_store_has_RB_support( mapHdr )) {
vm_map_store_entry_link_rb(mapHdr, after_where, entry);
}
#endif
#if MAP_ENTRY_INSERTION_DEBUG
if (entry->vme_start_original == 0 && entry->vme_end_original == 0) {
entry->vme_start_original = entry->vme_start;
entry->vme_end_original = entry->vme_end;
}
backtrace(&entry->vme_insertion_bt[0],
(sizeof(entry->vme_insertion_bt) / sizeof(uintptr_t)), NULL);
#endif
}
void
vm_map_store_entry_link(
vm_map_t map,
vm_map_entry_t after_where,
vm_map_entry_t entry,
vm_map_kernel_flags_t vmk_flags)
{
vm_map_t VMEL_map;
vm_map_entry_t VMEL_entry;
VMEL_map = (map);
VMEL_entry = (entry);
if (entry->is_sub_map) {
assertf(VM_MAP_PAGE_SHIFT(VME_SUBMAP(entry)) >= VM_MAP_PAGE_SHIFT(map),
"map %p (%d) entry %p submap %p (%d)\n",
map, VM_MAP_PAGE_SHIFT(map), entry,
VME_SUBMAP(entry), VM_MAP_PAGE_SHIFT(VME_SUBMAP(entry)));
}
_vm_map_store_entry_link(&VMEL_map->hdr, after_where, VMEL_entry);
if (VMEL_map->disable_vmentry_reuse == TRUE) {
UPDATE_HIGHEST_ENTRY_END( VMEL_map, VMEL_entry);
} else {
update_first_free_ll(VMEL_map, VMEL_map->first_free);
#ifdef VM_MAP_STORE_USE_RB
if (vm_map_store_has_RB_support( &VMEL_map->hdr )) {
update_first_free_rb(VMEL_map, entry, TRUE);
}
#endif
}
(void) vmk_flags;
}
void
_vm_map_store_entry_unlink( struct vm_map_header * mapHdr, vm_map_entry_t entry)
{
if (__improbable(vm_debug_events)) {
DTRACE_VM4(map_entry_unlink, vm_map_t, (char *)mapHdr - sizeof(lck_rw_t), vm_map_entry_t, entry, vm_address_t, entry->links.start, vm_address_t, entry->links.end);
}
vm_map_store_entry_unlink_ll(mapHdr, entry);
#ifdef VM_MAP_STORE_USE_RB
if (vm_map_store_has_RB_support( mapHdr )) {
vm_map_store_entry_unlink_rb(mapHdr, entry);
}
#endif
}
void
vm_map_store_entry_unlink( vm_map_t map, vm_map_entry_t entry)
{
vm_map_t VMEU_map;
vm_map_entry_t VMEU_entry = NULL;
vm_map_entry_t VMEU_first_free = NULL;
VMEU_map = (map);
VMEU_entry = (entry);
if (map->holelistenabled == FALSE) {
if (VMEU_entry->vme_start <= VMEU_map->first_free->vme_start) {
VMEU_first_free = VMEU_entry->vme_prev;
} else {
VMEU_first_free = VMEU_map->first_free;
}
}
_vm_map_store_entry_unlink(&VMEU_map->hdr, VMEU_entry);
vm_map_store_update( map, entry, VM_MAP_ENTRY_DELETE);
update_first_free_ll(VMEU_map, VMEU_first_free);
#ifdef VM_MAP_STORE_USE_RB
if (vm_map_store_has_RB_support( &VMEU_map->hdr )) {
update_first_free_rb(VMEU_map, entry, FALSE);
}
#endif
}
void
vm_map_store_copy_reset( vm_map_copy_t copy, vm_map_entry_t entry)
{
int nentries = copy->cpy_hdr.nentries;
vm_map_store_copy_reset_ll(copy, entry, nentries);
#ifdef VM_MAP_STORE_USE_RB
if (vm_map_store_has_RB_support( ©->c_u.hdr )) {
vm_map_store_copy_reset_rb(copy, entry, nentries);
}
#endif
}
void
vm_map_store_update_first_free( vm_map_t map, vm_map_entry_t first_free_entry, boolean_t new_entry_creation)
{
update_first_free_ll(map, first_free_entry);
#ifdef VM_MAP_STORE_USE_RB
if (vm_map_store_has_RB_support( &map->hdr )) {
update_first_free_rb(map, first_free_entry, new_entry_creation);
}
#endif
}