#include <zone_debug.h>
#include <zone_alias_addr.h>
#include <norma_vm.h>
#include <mach_kdb.h>
#include <mach/mach_types.h>
#include <mach/vm_param.h>
#include <mach/kern_return.h>
#include <mach/mach_host_server.h>
#include <mach/machine/vm_types.h>
#include <mach_debug/zone_info.h>
#include <kern/kern_types.h>
#include <kern/assert.h>
#include <kern/host.h>
#include <kern/macro_help.h>
#include <kern/sched.h>
#include <kern/locks.h>
#include <kern/sched_prim.h>
#include <kern/misc_protos.h>
#include <kern/thread_call.h>
#include <kern/zalloc.h>
#include <kern/kalloc.h>
#include <vm/pmap.h>
#include <vm/vm_map.h>
#include <vm/vm_kern.h>
#include <vm/vm_page.h>
#include <machine/machparam.h>
#include <libkern/OSDebug.h>
#include <sys/kdebug.h>
#if defined(__ppc__)
#include <ppc/savearea.h>
#include <ppc/mappings.h>
#endif
boolean_t check_freed_element = FALSE;
boolean_t zfree_clear = FALSE;
#define is_kernel_data_addr(a) (!(a) || ((a) >= vm_min_kernel_address && !((a) & 0x3)))
#define ADD_TO_ZONE(zone, element) \
MACRO_BEGIN \
if (zfree_clear) \
{ unsigned int i; \
for (i=0; \
i < zone->elem_size/sizeof(uint32_t); \
i++) \
((uint32_t *)(element))[i] = 0xdeadbeef; \
} \
*((vm_offset_t *)(element)) = (zone)->free_elements; \
if (check_freed_element) { \
if ((zone)->elem_size >= (2 * sizeof(vm_offset_t))) \
((vm_offset_t *)(element))[((zone)->elem_size/sizeof(vm_offset_t))-1] = \
(zone)->free_elements; \
} \
(zone)->free_elements = (vm_offset_t) (element); \
(zone)->count--; \
MACRO_END
#define REMOVE_FROM_ZONE(zone, ret, type) \
MACRO_BEGIN \
(ret) = (type) (zone)->free_elements; \
if ((ret) != (type) 0) { \
if (check_freed_element) { \
if (!is_kernel_data_addr(((vm_offset_t *)(ret))[0]) || \
((zone)->elem_size >= (2 * sizeof(vm_offset_t)) && \
((vm_offset_t *)(ret))[((zone)->elem_size/sizeof(vm_offset_t))-1] != \
((vm_offset_t *)(ret))[0])) \
panic("a freed zone element has been modified");\
if (zfree_clear) { \
unsigned int ii; \
for (ii = sizeof(vm_offset_t) / sizeof(uint32_t); \
ii < zone->elem_size/sizeof(uint32_t) - sizeof(vm_offset_t) / sizeof(uint32_t); \
ii++) \
if (((uint32_t *)(ret))[ii] != (uint32_t)0xdeadbeef) \
panic("a freed zone element has been modified");\
} \
} \
(zone)->count++; \
(zone)->free_elements = *((vm_offset_t *)(ret)); \
} \
MACRO_END
#if ZONE_DEBUG
#define zone_debug_enabled(z) z->active_zones.next
#define ROUNDUP(x,y) ((((x)+(y)-1)/(y))*(y))
#define ZONE_DEBUG_OFFSET ROUNDUP(sizeof(queue_chain_t),16)
#endif
struct zone_page_table_entry {
struct zone_page_table_entry *link;
short alloc_count;
short collect_count;
};
void zone_page_init(
vm_offset_t addr,
vm_size_t size,
int value);
void zone_page_alloc(
vm_offset_t addr,
vm_size_t size);
void zone_page_free_element(
struct zone_page_table_entry **free_pages,
vm_offset_t addr,
vm_size_t size);
void zone_page_collect(
vm_offset_t addr,
vm_size_t size);
boolean_t zone_page_collectable(
vm_offset_t addr,
vm_size_t size);
void zone_page_keep(
vm_offset_t addr,
vm_size_t size);
void zalloc_async(
thread_call_param_t p0,
thread_call_param_t p1);
void zone_display_zprint( void );
#if ZONE_DEBUG && MACH_KDB
int zone_count(
zone_t z,
int tail);
#endif
vm_map_t zone_map = VM_MAP_NULL;
zone_t zone_zone = ZONE_NULL;
vm_offset_t zdata;
vm_size_t zdata_size;
#define lock_zone(zone) \
MACRO_BEGIN \
lck_mtx_lock_spin(&(zone)->lock); \
MACRO_END
#define unlock_zone(zone) \
MACRO_BEGIN \
lck_mtx_unlock(&(zone)->lock); \
MACRO_END
#define zone_wakeup(zone) thread_wakeup((event_t)(zone))
#define zone_sleep(zone) \
(void) lck_mtx_sleep(&(zone)->lock, LCK_SLEEP_SPIN, (event_t)(zone), THREAD_UNINT);
#define lock_zone_init(zone) \
MACRO_BEGIN \
char _name[32]; \
(void) snprintf(_name, sizeof (_name), "zone.%s", (zone)->zone_name); \
lck_grp_attr_setdefault(&(zone)->lock_grp_attr); \
lck_grp_init(&(zone)->lock_grp, _name, &(zone)->lock_grp_attr); \
lck_attr_setdefault(&(zone)->lock_attr); \
lck_mtx_init_ext(&(zone)->lock, &(zone)->lock_ext, \
&(zone)->lock_grp, &(zone)->lock_attr); \
MACRO_END
#define lock_try_zone(zone) lck_mtx_try_lock_spin(&zone->lock)
kern_return_t zget_space(
vm_offset_t size,
vm_offset_t *result);
decl_simple_lock_data(,zget_space_lock)
vm_offset_t zalloc_next_space;
vm_offset_t zalloc_end_of_space;
vm_size_t zalloc_wasted_space;
struct zone_page_table_entry * zone_page_table;
vm_offset_t zone_map_min_address;
vm_offset_t zone_map_max_address;
unsigned int zone_pages;
decl_lck_mtx_data(, zone_gc_lock)
lck_attr_t zone_lck_attr;
lck_grp_t zone_lck_grp;
lck_grp_attr_t zone_lck_grp_attr;
lck_mtx_ext_t zone_lck_ext;
#if !ZONE_ALIAS_ADDR
#define from_zone_map(addr, size) \
((vm_offset_t)(addr) >= zone_map_min_address && \
((vm_offset_t)(addr) + size -1) < zone_map_max_address)
#else
#define from_zone_map(addr, size) \
((vm_offset_t)(zone_virtual_addr((vm_map_address_t)addr)) >= zone_map_min_address && \
((vm_offset_t)(zone_virtual_addr((vm_map_address_t)addr)) + size -1) < zone_map_max_address)
#endif
#define ZONE_PAGE_USED 0
#define ZONE_PAGE_UNUSED -1
decl_simple_lock_data(, all_zones_lock)
zone_t first_zone;
zone_t *last_zone;
unsigned int num_zones;
boolean_t zone_gc_allowed = TRUE;
boolean_t zone_gc_forced = FALSE;
boolean_t panic_include_zprint = FALSE;
unsigned zone_gc_last_tick = 0;
unsigned zone_gc_max_rate = 0;
static int log_records;
#define MAX_ZONE_NAME 32
static char zone_name_to_log[MAX_ZONE_NAME] = "";
#define ZRECORDS_MAX 8000
#define ZRECORDS_DEFAULT 4000
#define MAX_DEPTH 15
struct zrecord {
void *z_element;
uint32_t z_opcode:1,
z_time:31;
void *z_pc[MAX_DEPTH];
};
#define ZOP_ALLOC 1
#define ZOP_FREE 0
static struct zrecord *zrecords;
static int zcurrent = 0;
static int zrecorded = 0;
static unsigned int ztime = 0;
static zone_t zone_of_interest = NULL;
static int
log_this_zone(const char *zonename, const char *logname)
{
int len;
const char *zc = zonename;
const char *lc = logname;
for (len = 1; len <= MAX_ZONE_NAME; zc++, lc++, len++) {
if (*zc != *lc && !(*zc == ' ' && *lc == '.'))
break;
if (*zc == '\0')
return TRUE;
}
return FALSE;
}
#define DO_LOGGING(z) (zrecords && (z) == zone_of_interest)
extern boolean_t zlog_ready;
zone_t
zinit(
vm_size_t size,
vm_size_t max,
vm_size_t alloc,
const char *name)
{
zone_t z;
if (zone_zone == ZONE_NULL) {
if (zget_space(sizeof(struct zone), (vm_offset_t *)&z)
!= KERN_SUCCESS)
return(ZONE_NULL);
} else
z = (zone_t) zalloc(zone_zone);
if (z == ZONE_NULL)
return(ZONE_NULL);
if (size < sizeof(z->free_elements))
size = sizeof(z->free_elements);
size = ((size-1) + sizeof(z->free_elements)) -
((size-1) % sizeof(z->free_elements));
if (alloc == 0)
alloc = PAGE_SIZE;
alloc = round_page(alloc);
max = round_page(max);
#if ZONE_ALIAS_ADDR
if ((size < PAGE_SIZE) && (PAGE_SIZE % size <= PAGE_SIZE / 10))
alloc = PAGE_SIZE;
else
#endif
{ vm_size_t best, waste; unsigned int i;
best = PAGE_SIZE;
waste = best % size;
for (i = 1; i <= 5; i++) {
vm_size_t tsize, twaste;
tsize = i * PAGE_SIZE;
if ((tsize % size) < (tsize / 100)) {
alloc = tsize;
goto use_this_allocation;
}
twaste = tsize % size;
if (twaste < waste)
best = tsize, waste = twaste;
}
if (alloc <= best || (alloc % size >= waste))
alloc = best;
}
use_this_allocation:
if (max && (max < alloc))
max = alloc;
z->free_elements = 0;
z->cur_size = 0;
z->max_size = max;
z->elem_size = size;
z->alloc_size = alloc;
z->zone_name = name;
z->count = 0;
z->doing_alloc = FALSE;
z->doing_gc = FALSE;
z->exhaustible = FALSE;
z->collectable = TRUE;
z->allows_foreign = FALSE;
z->expandable = TRUE;
z->waiting = FALSE;
z->async_pending = FALSE;
#if ZONE_DEBUG
z->active_zones.next = z->active_zones.prev = NULL;
zone_debug_enable(z);
#endif
lock_zone_init(z);
z->next_zone = ZONE_NULL;
thread_call_setup(&z->call_async_alloc, zalloc_async, z);
simple_lock(&all_zones_lock);
*last_zone = z;
last_zone = &z->next_zone;
num_zones++;
simple_unlock(&all_zones_lock);
if (log_this_zone(z->zone_name, zone_name_to_log)) {
zone_of_interest = z;
}
if (zone_of_interest != NULL && zrecords == NULL && zlog_ready) {
if (kmem_alloc(kernel_map, (vm_offset_t *)&zrecords, log_records * sizeof(struct zrecord)) == KERN_SUCCESS) {
bzero((void *)zrecords, log_records * sizeof(struct zrecord));
printf("zone: logging started for zone %s (%p)\n", zone_of_interest->zone_name, zone_of_interest);
} else {
printf("zone: couldn't allocate memory for zrecords, turning off zleak logging\n");
zone_of_interest = NULL;
}
}
return(z);
}
void
zcram(
register zone_t zone,
void *newaddr,
vm_size_t size)
{
register vm_size_t elem_size;
vm_offset_t newmem = (vm_offset_t) newaddr;
assert(zone != ZONE_NULL && newmem != (vm_offset_t)0);
assert(!zone->collectable || zone->allows_foreign
|| (from_zone_map(newmem, size)));
elem_size = zone->elem_size;
lock_zone(zone);
while (size >= elem_size) {
ADD_TO_ZONE(zone, newmem);
if (from_zone_map(newmem, elem_size))
zone_page_alloc(newmem, elem_size);
zone->count++;
size -= elem_size;
newmem += elem_size;
zone->cur_size += elem_size;
}
unlock_zone(zone);
}
kern_return_t
zget_space(
vm_offset_t size,
vm_offset_t *result)
{
vm_offset_t new_space = 0;
vm_size_t space_to_add = 0;
simple_lock(&zget_space_lock);
while ((zalloc_next_space + size) > zalloc_end_of_space) {
space_to_add = round_page(size);
if (new_space == 0) {
kern_return_t retval;
simple_unlock(&zget_space_lock);
retval = kernel_memory_allocate(zone_map, &new_space,
space_to_add, 0, KMA_KOBJECT|KMA_NOPAGEWAIT);
if (retval != KERN_SUCCESS)
return(retval);
#if ZONE_ALIAS_ADDR
if (space_to_add == PAGE_SIZE)
new_space = zone_alias_addr(new_space);
#endif
zone_page_init(new_space, space_to_add,
ZONE_PAGE_USED);
simple_lock(&zget_space_lock);
continue;
}
if (new_space != zalloc_end_of_space) {
zalloc_wasted_space +=
zalloc_end_of_space - zalloc_next_space;
zalloc_next_space = new_space;
}
zalloc_end_of_space = new_space + space_to_add;
new_space = 0;
}
*result = zalloc_next_space;
zalloc_next_space += size;
simple_unlock(&zget_space_lock);
if (new_space != 0)
kmem_free(zone_map, new_space, space_to_add);
return(KERN_SUCCESS);
}
void
zone_steal_memory(void)
{
zdata_size = round_page(128*sizeof(struct zone));
zdata = (vm_offset_t)((char *)pmap_steal_memory(zdata_size) - (char *)0);
}
int
zfill(
zone_t zone,
int nelem)
{
kern_return_t kr;
vm_size_t size;
vm_offset_t memory;
int nalloc;
assert(nelem > 0);
if (nelem <= 0)
return 0;
size = nelem * zone->elem_size;
size = round_page(size);
kr = kmem_alloc_kobject(kernel_map, &memory, size);
if (kr != KERN_SUCCESS)
return 0;
zone_change(zone, Z_FOREIGN, TRUE);
zcram(zone, (void *)memory, size);
nalloc = (int)(size / zone->elem_size);
assert(nalloc >= nelem);
return nalloc;
}
void
zone_bootstrap(void)
{
vm_size_t zone_zone_size;
vm_offset_t zone_zone_space;
char temp_buf[16];
if (PE_parse_boot_argn("-zc", temp_buf, sizeof (temp_buf))) {
check_freed_element = TRUE;
}
if (PE_parse_boot_argn("-zp", temp_buf, sizeof (temp_buf))) {
zfree_clear = TRUE;
}
if (PE_parse_boot_argn("zlog", zone_name_to_log, sizeof(zone_name_to_log)) == TRUE) {
if (PE_parse_boot_argn("zrecs", &log_records, sizeof(log_records)) == TRUE) {
log_records = MIN(ZRECORDS_MAX, log_records);
} else {
log_records = ZRECORDS_DEFAULT;
}
}
simple_lock_init(&all_zones_lock, 0);
first_zone = ZONE_NULL;
last_zone = &first_zone;
num_zones = 0;
simple_lock_init(&zget_space_lock, 0);
zalloc_next_space = zdata;
zalloc_end_of_space = zdata + zdata_size;
zalloc_wasted_space = 0;
assert(zone_zone == ZONE_NULL);
zone_zone = zinit(sizeof(struct zone), 128 * sizeof(struct zone),
sizeof(struct zone), "zones");
zone_change(zone_zone, Z_COLLECT, FALSE);
zone_zone_size = zalloc_end_of_space - zalloc_next_space;
zget_space(zone_zone_size, &zone_zone_space);
zcram(zone_zone, (void *)zone_zone_space, zone_zone_size);
}
void
zone_init(
vm_size_t max_zonemap_size)
{
kern_return_t retval;
vm_offset_t zone_min;
vm_offset_t zone_max;
vm_size_t zone_table_size;
retval = kmem_suballoc(kernel_map, &zone_min, max_zonemap_size,
FALSE, VM_FLAGS_ANYWHERE | VM_FLAGS_PERMANENT,
&zone_map);
if (retval != KERN_SUCCESS)
panic("zone_init: kmem_suballoc failed");
zone_max = zone_min + round_page(max_zonemap_size);
zone_table_size = atop_kernel(zone_max - zone_min) *
sizeof(struct zone_page_table_entry);
if (kmem_alloc_kobject(zone_map, (vm_offset_t *) &zone_page_table,
zone_table_size) != KERN_SUCCESS)
panic("zone_init");
zone_min = (vm_offset_t)zone_page_table + round_page(zone_table_size);
zone_pages = (unsigned int)atop_kernel(zone_max - zone_min);
zone_map_min_address = zone_min;
zone_map_max_address = zone_max;
lck_grp_attr_setdefault(&zone_lck_grp_attr);
lck_grp_init(&zone_lck_grp, "zones", &zone_lck_grp_attr);
lck_attr_setdefault(&zone_lck_attr);
lck_mtx_init_ext(&zone_gc_lock, &zone_lck_ext, &zone_lck_grp, &zone_lck_attr);
zone_page_init(zone_min, zone_max - zone_min, ZONE_PAGE_UNUSED);
}
extern volatile SInt32 kfree_nop_count;
void *
zalloc_canblock(
register zone_t zone,
boolean_t canblock)
{
vm_offset_t addr;
kern_return_t retval;
void *bt[MAX_DEPTH];
int numsaved = 0;
int i;
assert(zone != ZONE_NULL);
if (DO_LOGGING(zone))
numsaved = OSBacktrace(&bt[0], MAX_DEPTH);
lock_zone(zone);
REMOVE_FROM_ZONE(zone, addr, vm_offset_t);
while ((addr == 0) && canblock && (zone->doing_gc)) {
zone->waiting = TRUE;
zone_sleep(zone);
REMOVE_FROM_ZONE(zone, addr, vm_offset_t);
}
while ((addr == 0) && canblock) {
if (zone->doing_alloc) {
zone->waiting = TRUE;
zone_sleep(zone);
}
else {
if ((zone->cur_size + zone->elem_size) >
zone->max_size) {
if (zone->exhaustible)
break;
if (zone->expandable) {
zone->max_size += (zone->max_size >> 1);
} else {
unlock_zone(zone);
panic("zalloc: zone \"%s\" empty.", zone->zone_name);
}
}
zone->doing_alloc = TRUE;
unlock_zone(zone);
if (zone->collectable) {
vm_offset_t space;
vm_size_t alloc_size;
int retry = 0;
for (;;) {
if (vm_pool_low() || retry >= 1)
alloc_size =
round_page(zone->elem_size);
else
alloc_size = zone->alloc_size;
retval = kernel_memory_allocate(zone_map,
&space, alloc_size, 0,
KMA_KOBJECT|KMA_NOPAGEWAIT);
if (retval == KERN_SUCCESS) {
#if ZONE_ALIAS_ADDR
if (alloc_size == PAGE_SIZE)
space = zone_alias_addr(space);
#endif
zone_page_init(space, alloc_size,
ZONE_PAGE_USED);
zcram(zone, (void *)space, alloc_size);
break;
} else if (retval != KERN_RESOURCE_SHORTAGE) {
retry++;
if (retry == 2) {
zone_gc();
printf("zalloc did gc\n");
zone_display_zprint();
}
if (retry == 3) {
panic_include_zprint = TRUE;
panic("zalloc: \"%s\" (%d elements) retry fail %d, kfree_nop_count: %d", zone->zone_name, zone->count, retval, (int)kfree_nop_count);
}
} else {
break;
}
}
lock_zone(zone);
zone->doing_alloc = FALSE;
if (zone->waiting) {
zone->waiting = FALSE;
zone_wakeup(zone);
}
REMOVE_FROM_ZONE(zone, addr, vm_offset_t);
if (addr == 0 &&
retval == KERN_RESOURCE_SHORTAGE) {
unlock_zone(zone);
VM_PAGE_WAIT();
lock_zone(zone);
}
} else {
vm_offset_t space;
retval = zget_space(zone->elem_size, &space);
lock_zone(zone);
zone->doing_alloc = FALSE;
if (zone->waiting) {
zone->waiting = FALSE;
thread_wakeup((event_t)zone);
}
if (retval == KERN_SUCCESS) {
zone->count++;
zone->cur_size += zone->elem_size;
#if ZONE_DEBUG
if (zone_debug_enabled(zone)) {
enqueue_tail(&zone->active_zones, (queue_entry_t)space);
}
#endif
unlock_zone(zone);
zone_page_alloc(space, zone->elem_size);
#if ZONE_DEBUG
if (zone_debug_enabled(zone))
space += ZONE_DEBUG_OFFSET;
#endif
addr = space;
goto success;
}
if (retval == KERN_RESOURCE_SHORTAGE) {
unlock_zone(zone);
VM_PAGE_WAIT();
lock_zone(zone);
} else {
panic("zalloc: \"%s\" (%d elements) zget_space returned %d", zone->zone_name, zone->count, retval);
}
}
}
if (addr == 0)
REMOVE_FROM_ZONE(zone, addr, vm_offset_t);
}
if (DO_LOGGING(zone) && addr) {
if (!check_freed_element && zrecords[zcurrent].z_element && zrecorded < log_records) {
for (i = zcurrent; i < log_records; i++) {
if (zrecords[i].z_element == NULL) {
zcurrent = i;
goto empty_slot;
}
}
for (i = 0; i < zcurrent; i++) {
if (zrecords[i].z_element == NULL) {
zcurrent = i;
goto empty_slot;
}
}
}
empty_slot:
if (zrecords[zcurrent].z_element == NULL)
zrecorded++;
zrecords[zcurrent].z_element = (void *)addr;
zrecords[zcurrent].z_time = ztime++;
zrecords[zcurrent].z_opcode = ZOP_ALLOC;
for (i = 0; i < numsaved; i++)
zrecords[zcurrent].z_pc[i] = bt[i];
for (; i < MAX_DEPTH; i++)
zrecords[zcurrent].z_pc[i] = 0;
zcurrent++;
if (zcurrent >= log_records)
zcurrent = 0;
}
if ((addr == 0) && !canblock && (zone->async_pending == FALSE) && (zone->exhaustible == FALSE) && (!vm_pool_low())) {
zone->async_pending = TRUE;
unlock_zone(zone);
thread_call_enter(&zone->call_async_alloc);
lock_zone(zone);
REMOVE_FROM_ZONE(zone, addr, vm_offset_t);
}
#if ZONE_DEBUG
if (addr && zone_debug_enabled(zone)) {
enqueue_tail(&zone->active_zones, (queue_entry_t)addr);
addr += ZONE_DEBUG_OFFSET;
}
#endif
unlock_zone(zone);
success:
TRACE_MACHLEAKS(ZALLOC_CODE, ZALLOC_CODE_2, zone->elem_size, addr);
return((void *)addr);
}
void *
zalloc(
register zone_t zone)
{
return( zalloc_canblock(zone, TRUE) );
}
void *
zalloc_noblock(
register zone_t zone)
{
return( zalloc_canblock(zone, FALSE) );
}
void
zalloc_async(
thread_call_param_t p0,
__unused thread_call_param_t p1)
{
void *elt;
elt = zalloc_canblock((zone_t)p0, TRUE);
zfree((zone_t)p0, elt);
lock_zone(((zone_t)p0));
((zone_t)p0)->async_pending = FALSE;
unlock_zone(((zone_t)p0));
}
void *
zget(
register zone_t zone)
{
register vm_offset_t addr;
assert( zone != ZONE_NULL );
if (!lock_try_zone(zone))
return NULL;
REMOVE_FROM_ZONE(zone, addr, vm_offset_t);
#if ZONE_DEBUG
if (addr && zone_debug_enabled(zone)) {
enqueue_tail(&zone->active_zones, (queue_entry_t)addr);
addr += ZONE_DEBUG_OFFSET;
}
#endif
unlock_zone(zone);
return((void *) addr);
}
boolean_t zone_check = FALSE;
static zone_t zone_last_bogus_zone = ZONE_NULL;
static vm_offset_t zone_last_bogus_elem = 0;
void
zfree(
register zone_t zone,
void *addr)
{
vm_offset_t elem = (vm_offset_t) addr;
void *bt[MAX_DEPTH];
int numsaved = 0;
assert(zone != ZONE_NULL);
if (DO_LOGGING(zone))
numsaved = OSBacktrace(&bt[0], MAX_DEPTH);
#if MACH_ASSERT
if (zone == ZONE_NULL || elem == (vm_offset_t)0)
panic("zfree: NULL");
if (zone == zone_zone)
panic("zfree: freeing to zone_zone breaks zone_gc!");
#endif
TRACE_MACHLEAKS(ZFREE_CODE, ZFREE_CODE_2, zone->elem_size, (uintptr_t)addr);
if (zone->collectable && !zone->allows_foreign &&
!from_zone_map(elem, zone->elem_size)) {
#if MACH_ASSERT
panic("zfree: non-allocated memory in collectable zone!");
#endif
zone_last_bogus_zone = zone;
zone_last_bogus_elem = elem;
return;
}
lock_zone(zone);
if (DO_LOGGING(zone)) {
int i;
if (check_freed_element) {
if (zrecords[zcurrent].z_element == NULL)
zrecorded++;
zrecords[zcurrent].z_element = (void *)addr;
zrecords[zcurrent].z_time = ztime++;
zrecords[zcurrent].z_opcode = ZOP_FREE;
for (i = 0; i < numsaved; i++)
zrecords[zcurrent].z_pc[i] = bt[i];
for (; i < MAX_DEPTH; i++)
zrecords[zcurrent].z_pc[i] = 0;
zcurrent++;
if (zcurrent >= log_records)
zcurrent = 0;
} else {
for (i = 0; i < log_records; i++) {
if (zrecords[i].z_element == addr) {
zrecords[i].z_element = NULL;
zcurrent = i;
zrecorded--;
break;
}
}
}
}
#if ZONE_DEBUG
if (zone_debug_enabled(zone)) {
queue_t tmp_elem;
elem -= ZONE_DEBUG_OFFSET;
if (zone_check) {
for (tmp_elem = queue_first(&zone->active_zones);
!queue_end(tmp_elem, &zone->active_zones);
tmp_elem = queue_next(tmp_elem))
if (elem == (vm_offset_t)tmp_elem)
break;
if (elem != (vm_offset_t)tmp_elem)
panic("zfree()ing element from wrong zone");
}
remqueue(&zone->active_zones, (queue_t) elem);
}
#endif
if (zone_check) {
vm_offset_t this;
for (this = zone->free_elements;
this != 0;
this = * (vm_offset_t *) this)
if (!pmap_kernel_va(this) || this == elem)
panic("zfree");
}
ADD_TO_ZONE(zone, elem);
#if MACH_ASSERT
if (zone->count < 0)
panic("zfree: count < 0!");
#endif
if (zone->elem_size >= PAGE_SIZE &&
vm_pool_low()){
zone_gc_forced = TRUE;
}
unlock_zone(zone);
}
void
zone_change(
zone_t zone,
unsigned int item,
boolean_t value)
{
assert( zone != ZONE_NULL );
assert( value == TRUE || value == FALSE );
switch(item){
case Z_EXHAUST:
zone->exhaustible = value;
break;
case Z_COLLECT:
zone->collectable = value;
break;
case Z_EXPAND:
zone->expandable = value;
break;
case Z_FOREIGN:
zone->allows_foreign = value;
break;
#if MACH_ASSERT
default:
panic("Zone_change: Wrong Item Type!");
#endif
}
}
integer_t
zone_free_count(zone_t zone)
{
integer_t free_count;
lock_zone(zone);
free_count = (integer_t)(zone->cur_size/zone->elem_size - zone->count);
unlock_zone(zone);
assert(free_count >= 0);
return(free_count);
}
void
zprealloc(
zone_t zone,
vm_size_t size)
{
vm_offset_t addr;
if (size != 0) {
if (kmem_alloc_kobject(zone_map, &addr, size) != KERN_SUCCESS)
panic("zprealloc");
zone_page_init(addr, size, ZONE_PAGE_USED);
zcram(zone, (void *)addr, size);
}
}
boolean_t
zone_page_collectable(
vm_offset_t addr,
vm_size_t size)
{
struct zone_page_table_entry *zp;
natural_t i, j;
#if ZONE_ALIAS_ADDR
addr = zone_virtual_addr(addr);
#endif
#if MACH_ASSERT
if (!from_zone_map(addr, size))
panic("zone_page_collectable");
#endif
i = (natural_t)atop_kernel(addr-zone_map_min_address);
j = (natural_t)atop_kernel((addr+size-1) - zone_map_min_address);
for (zp = zone_page_table + i; i <= j; zp++, i++)
if (zp->collect_count == zp->alloc_count)
return (TRUE);
return (FALSE);
}
void
zone_page_keep(
vm_offset_t addr,
vm_size_t size)
{
struct zone_page_table_entry *zp;
natural_t i, j;
#if ZONE_ALIAS_ADDR
addr = zone_virtual_addr(addr);
#endif
#if MACH_ASSERT
if (!from_zone_map(addr, size))
panic("zone_page_keep");
#endif
i = (natural_t)atop_kernel(addr-zone_map_min_address);
j = (natural_t)atop_kernel((addr+size-1) - zone_map_min_address);
for (zp = zone_page_table + i; i <= j; zp++, i++)
zp->collect_count = 0;
}
void
zone_page_collect(
vm_offset_t addr,
vm_size_t size)
{
struct zone_page_table_entry *zp;
natural_t i, j;
#if ZONE_ALIAS_ADDR
addr = zone_virtual_addr(addr);
#endif
#if MACH_ASSERT
if (!from_zone_map(addr, size))
panic("zone_page_collect");
#endif
i = (natural_t)atop_kernel(addr-zone_map_min_address);
j = (natural_t)atop_kernel((addr+size-1) - zone_map_min_address);
for (zp = zone_page_table + i; i <= j; zp++, i++)
++zp->collect_count;
}
void
zone_page_init(
vm_offset_t addr,
vm_size_t size,
int value)
{
struct zone_page_table_entry *zp;
natural_t i, j;
#if ZONE_ALIAS_ADDR
addr = zone_virtual_addr(addr);
#endif
#if MACH_ASSERT
if (!from_zone_map(addr, size))
panic("zone_page_init");
#endif
i = (natural_t)atop_kernel(addr-zone_map_min_address);
j = (natural_t)atop_kernel((addr+size-1) - zone_map_min_address);
for (zp = zone_page_table + i; i <= j; zp++, i++) {
zp->alloc_count = value;
zp->collect_count = 0;
}
}
void
zone_page_alloc(
vm_offset_t addr,
vm_size_t size)
{
struct zone_page_table_entry *zp;
natural_t i, j;
#if ZONE_ALIAS_ADDR
addr = zone_virtual_addr(addr);
#endif
#if MACH_ASSERT
if (!from_zone_map(addr, size))
panic("zone_page_alloc");
#endif
i = (natural_t)atop_kernel(addr-zone_map_min_address);
j = (natural_t)atop_kernel((addr+size-1) - zone_map_min_address);
for (zp = zone_page_table + i; i <= j; zp++, i++) {
if (zp->alloc_count == ZONE_PAGE_UNUSED)
zp->alloc_count = 1;
else
++zp->alloc_count;
}
}
void
zone_page_free_element(
struct zone_page_table_entry **free_pages,
vm_offset_t addr,
vm_size_t size)
{
struct zone_page_table_entry *zp;
natural_t i, j;
#if ZONE_ALIAS_ADDR
addr = zone_virtual_addr(addr);
#endif
#if MACH_ASSERT
if (!from_zone_map(addr, size))
panic("zone_page_free_element");
#endif
i = (natural_t)atop_kernel(addr-zone_map_min_address);
j = (natural_t)atop_kernel((addr+size-1) - zone_map_min_address);
for (zp = zone_page_table + i; i <= j; zp++, i++) {
if (zp->collect_count > 0)
--zp->collect_count;
if (--zp->alloc_count == 0) {
zp->alloc_count = ZONE_PAGE_UNUSED;
zp->collect_count = 0;
zp->link = *free_pages;
*free_pages = zp;
}
}
}
struct zone_free_element {
struct zone_free_element * next;
};
#define ADD_LIST_TO_ZONE(zone, base, tail) \
MACRO_BEGIN \
(tail)->next = (void *)((zone)->free_elements); \
if (check_freed_element) { \
if ((zone)->elem_size >= (2 * sizeof(vm_offset_t))) \
((vm_offset_t *)(tail))[((zone)->elem_size/sizeof(vm_offset_t))-1] = \
(zone)->free_elements; \
} \
(zone)->free_elements = (unsigned long)(base); \
MACRO_END
#define ADD_ELEMENT(zone, prev, elem) \
MACRO_BEGIN \
(prev)->next = (elem); \
if (check_freed_element) { \
if ((zone)->elem_size >= (2 * sizeof(vm_offset_t))) \
((vm_offset_t *)(prev))[((zone)->elem_size/sizeof(vm_offset_t))-1] = \
(vm_offset_t)(elem); \
} \
MACRO_END
struct {
uint32_t pgs_freed;
uint32_t elems_collected,
elems_freed,
elems_kept;
} zgc_stats;
void
zone_gc(void)
{
unsigned int max_zones;
zone_t z;
unsigned int i;
struct zone_page_table_entry *zp, *zone_free_pages;
lck_mtx_lock(&zone_gc_lock);
simple_lock(&all_zones_lock);
max_zones = num_zones;
z = first_zone;
simple_unlock(&all_zones_lock);
#if MACH_ASSERT
for (i = 0; i < zone_pages; i++)
assert(zone_page_table[i].collect_count == 0);
#endif
zone_free_pages = NULL;
for (i = 0; i < max_zones; i++, z = z->next_zone) {
unsigned int n, m;
vm_size_t elt_size, size_freed;
struct zone_free_element *elt, *base_elt, *base_prev, *prev, *scan, *keep, *tail;
assert(z != ZONE_NULL);
if (!z->collectable)
continue;
lock_zone(z);
elt_size = z->elem_size;
if ((elt_size & PAGE_MASK) &&
(((z->cur_size - z->count * elt_size) <= (2 * z->alloc_size)) ||
((z->cur_size - z->count * elt_size) <= (z->cur_size / 10)))) {
unlock_zone(z);
continue;
}
z->doing_gc = TRUE;
scan = (void *)z->free_elements;
z->free_elements = 0;
unlock_zone(z);
prev = (void *)&scan;
elt = scan;
n = 0; tail = keep = NULL;
while (elt != NULL) {
if (from_zone_map(elt, elt_size)) {
zone_page_collect((vm_offset_t)elt, elt_size);
prev = elt;
elt = elt->next;
++zgc_stats.elems_collected;
}
else {
if (keep == NULL)
keep = tail = elt;
else {
ADD_ELEMENT(z, tail, elt);
tail = elt;
}
ADD_ELEMENT(z, prev, elt->next);
elt = elt->next;
ADD_ELEMENT(z, tail, NULL);
}
if (++n >= 50) {
if (z->waiting == TRUE) {
lock_zone(z);
if (keep != NULL) {
ADD_LIST_TO_ZONE(z, keep, tail);
tail = keep = NULL;
} else {
m =0;
base_elt = elt;
base_prev = prev;
while ((elt != NULL) && (++m < 50)) {
prev = elt;
elt = elt->next;
}
if (m !=0 ) {
ADD_LIST_TO_ZONE(z, base_elt, prev);
ADD_ELEMENT(z, base_prev, elt);
prev = base_prev;
}
}
if (z->waiting) {
z->waiting = FALSE;
zone_wakeup(z);
}
unlock_zone(z);
}
n =0;
}
}
if (keep != NULL) {
lock_zone(z);
ADD_LIST_TO_ZONE(z, keep, tail);
unlock_zone(z);
}
size_freed = 0;
elt = scan;
n = 0; tail = keep = NULL;
while (elt != NULL) {
if (zone_page_collectable((vm_offset_t)elt, elt_size)) {
size_freed += elt_size;
zone_page_free_element(&zone_free_pages,
(vm_offset_t)elt, elt_size);
elt = elt->next;
++zgc_stats.elems_freed;
}
else {
zone_page_keep((vm_offset_t)elt, elt_size);
if (keep == NULL)
keep = tail = elt;
else {
ADD_ELEMENT(z, tail, elt);
tail = elt;
}
elt = elt->next;
ADD_ELEMENT(z, tail, NULL);
++zgc_stats.elems_kept;
}
if (++n >= 50) {
lock_zone(z);
z->cur_size -= size_freed;
size_freed = 0;
if (keep != NULL) {
ADD_LIST_TO_ZONE(z, keep, tail);
}
if (z->waiting) {
z->waiting = FALSE;
zone_wakeup(z);
}
unlock_zone(z);
n = 0; tail = keep = NULL;
}
}
lock_zone(z);
if (size_freed > 0 || keep != NULL) {
z->cur_size -= size_freed;
if (keep != NULL) {
ADD_LIST_TO_ZONE(z, keep, tail);
}
}
z->doing_gc = FALSE;
if (z->waiting) {
z->waiting = FALSE;
zone_wakeup(z);
}
unlock_zone(z);
}
while ((zp = zone_free_pages) != NULL) {
zone_free_pages = zp->link;
#if ZONE_ALIAS_ADDR
z = zone_virtual_addr((vm_map_address_t)z);
#endif
kmem_free(zone_map, zone_map_min_address + PAGE_SIZE *
(zp - zone_page_table), PAGE_SIZE);
++zgc_stats.pgs_freed;
}
lck_mtx_unlock(&zone_gc_lock);
}
void
consider_zone_gc(boolean_t force)
{
if (zone_gc_max_rate == 0)
zone_gc_max_rate = (60 << SCHED_TICK_SHIFT) + 1;
if (zone_gc_allowed &&
((sched_tick > (zone_gc_last_tick + zone_gc_max_rate)) ||
zone_gc_forced ||
force)) {
zone_gc_forced = FALSE;
zone_gc_last_tick = sched_tick;
zone_gc();
}
}
struct fake_zone_info {
const char* name;
void (*func)(int *, vm_size_t *, vm_size_t *, vm_size_t *, vm_size_t *,
int *, int *);
};
static struct fake_zone_info fake_zones[] = {
{
.name = "kernel_stacks",
.func = stack_fake_zone_info,
},
#ifdef ppc
{
.name = "save_areas",
.func = save_fake_zone_info,
},
{
.name = "pmap_mappings",
.func = mapping_fake_zone_info,
},
#endif
#if defined(__i386__) || defined (__x86_64__)
{
.name = "page_tables",
.func = pt_fake_zone_info,
},
#endif
{
.name = "kalloc.large",
.func = kalloc_fake_zone_info,
},
};
kern_return_t
host_zone_info(
host_t host,
zone_name_array_t *namesp,
mach_msg_type_number_t *namesCntp,
zone_info_array_t *infop,
mach_msg_type_number_t *infoCntp)
{
zone_name_t *names;
vm_offset_t names_addr;
vm_size_t names_size;
zone_info_t *info;
vm_offset_t info_addr;
vm_size_t info_size;
unsigned int max_zones, i;
zone_t z;
zone_name_t *zn;
zone_info_t *zi;
kern_return_t kr;
size_t num_fake_zones;
if (host == HOST_NULL)
return KERN_INVALID_HOST;
#if defined(__LP64__)
if (!thread_is_64bit(current_thread()))
return KERN_NOT_SUPPORTED;
#else
if (thread_is_64bit(current_thread()))
return KERN_NOT_SUPPORTED;
#endif
num_fake_zones = sizeof fake_zones / sizeof fake_zones[0];
simple_lock(&all_zones_lock);
max_zones = (unsigned int)(num_zones + num_fake_zones);
z = first_zone;
simple_unlock(&all_zones_lock);
if (max_zones <= *namesCntp) {
names_size = *namesCntp * sizeof *names;
names = *namesp;
} else {
names_size = round_page(max_zones * sizeof *names);
kr = kmem_alloc_pageable(ipc_kernel_map,
&names_addr, names_size);
if (kr != KERN_SUCCESS)
return kr;
names = (zone_name_t *) names_addr;
}
if (max_zones <= *infoCntp) {
info_size = *infoCntp * sizeof *info;
info = *infop;
} else {
info_size = round_page(max_zones * sizeof *info);
kr = kmem_alloc_pageable(ipc_kernel_map,
&info_addr, info_size);
if (kr != KERN_SUCCESS) {
if (names != *namesp)
kmem_free(ipc_kernel_map,
names_addr, names_size);
return kr;
}
info = (zone_info_t *) info_addr;
}
zn = &names[0];
zi = &info[0];
for (i = 0; i < num_zones; i++) {
struct zone zcopy;
assert(z != ZONE_NULL);
lock_zone(z);
zcopy = *z;
unlock_zone(z);
simple_lock(&all_zones_lock);
z = z->next_zone;
simple_unlock(&all_zones_lock);
(void) strncpy(zn->zn_name, zcopy.zone_name,
sizeof zn->zn_name);
zn->zn_name[sizeof zn->zn_name - 1] = '\0';
zi->zi_count = zcopy.count;
zi->zi_cur_size = zcopy.cur_size;
zi->zi_max_size = zcopy.max_size;
zi->zi_elem_size = zcopy.elem_size;
zi->zi_alloc_size = zcopy.alloc_size;
zi->zi_exhaustible = zcopy.exhaustible;
zi->zi_collectable = zcopy.collectable;
zn++;
zi++;
}
for (i = 0; i < num_fake_zones; i++) {
strncpy(zn->zn_name, fake_zones[i].name, sizeof zn->zn_name);
zn->zn_name[sizeof zn->zn_name - 1] = '\0';
fake_zones[i].func(&zi->zi_count, &zi->zi_cur_size,
&zi->zi_max_size, &zi->zi_elem_size,
&zi->zi_alloc_size, &zi->zi_collectable,
&zi->zi_exhaustible);
zn++;
zi++;
}
if (names != *namesp) {
vm_size_t used;
vm_map_copy_t copy;
used = max_zones * sizeof *names;
if (used != names_size)
bzero((char *) (names_addr + used), names_size - used);
kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)names_addr,
(vm_map_size_t)names_size, TRUE, ©);
assert(kr == KERN_SUCCESS);
*namesp = (zone_name_t *) copy;
}
*namesCntp = max_zones;
if (info != *infop) {
vm_size_t used;
vm_map_copy_t copy;
used = max_zones * sizeof *info;
if (used != info_size)
bzero((char *) (info_addr + used), info_size - used);
kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)info_addr,
(vm_map_size_t)info_size, TRUE, ©);
assert(kr == KERN_SUCCESS);
*infop = (zone_info_t *) copy;
}
*infoCntp = max_zones;
return KERN_SUCCESS;
}
extern unsigned int stack_total;
#if defined(__i386__) || defined (__x86_64__)
extern unsigned int inuse_ptepages_count;
#endif
void zone_display_zprint()
{
unsigned int i;
zone_t the_zone;
if(first_zone!=NULL) {
the_zone = first_zone;
for (i = 0; i < num_zones; i++) {
if(the_zone->cur_size > (1024*1024)) {
printf("%.20s:\t%lu\n",the_zone->zone_name,(uintptr_t)the_zone->cur_size);
}
if(the_zone->next_zone == NULL) {
break;
}
the_zone = the_zone->next_zone;
}
}
printf("Kernel Stacks:\t%lu\n",(uintptr_t)(kernel_stack_size * stack_total));
#if defined(__i386__) || defined (__x86_64__)
printf("PageTables:\t%lu\n",(uintptr_t)(PAGE_SIZE * inuse_ptepages_count));
#endif
printf("Kalloc.Large:\t%lu\n",(uintptr_t)kalloc_large_total);
}
#if MACH_KDB
#include <ddb/db_command.h>
#include <ddb/db_output.h>
#include <kern/kern_print.h>
const char *zone_labels =
"ENTRY COUNT TOT_SZ MAX_SZ ELT_SZ ALLOC_SZ NAME";
void db_print_zone(
zone_t addr);
#if ZONE_DEBUG
void db_zone_check_active(
zone_t zone);
void db_zone_print_active(
zone_t zone);
#endif
void db_zone_print_free(
zone_t zone);
void
db_print_zone(
zone_t addr)
{
struct zone zcopy;
zcopy = *addr;
db_printf("%8x %8x %8x %8x %6x %8x %s ",
addr, zcopy.count, zcopy.cur_size,
zcopy.max_size, zcopy.elem_size,
zcopy.alloc_size, zcopy.zone_name);
if (zcopy.exhaustible)
db_printf("H");
if (zcopy.collectable)
db_printf("C");
if (zcopy.expandable)
db_printf("X");
db_printf("\n");
}
void
db_show_one_zone(db_expr_t addr, boolean_t have_addr,
__unused db_expr_t count, __unused char *modif)
{
struct zone *z = (zone_t)((char *)0 + addr);
if (z == ZONE_NULL || !have_addr){
db_error("No Zone\n");
}
db_printf("%s\n", zone_labels);
db_print_zone(z);
}
void
db_show_all_zones(__unused db_expr_t addr, boolean_t have_addr, db_expr_t count,
__unused char *modif)
{
zone_t z;
unsigned total = 0;
have_addr = simple_lock_try(&all_zones_lock);
count = num_zones;
z = first_zone;
if (have_addr) {
simple_unlock(&all_zones_lock);
}
db_printf("%s\n", zone_labels);
for ( ; count > 0; count--) {
if (!z) {
db_error("Mangled Zone List\n");
}
db_print_zone(z);
total += z->cur_size,
have_addr = simple_lock_try(&all_zones_lock);
z = z->next_zone;
if (have_addr) {
simple_unlock(&all_zones_lock);
}
}
db_printf("\nTotal %8x", total);
db_printf("\n\nzone_gc() has reclaimed %d pages\n", zgc_stats.pgs_freed);
}
#if ZONE_DEBUG
void
db_zone_check_active(
zone_t zone)
{
int count = 0;
queue_t tmp_elem;
if (!zone_debug_enabled(zone) || !zone_check)
return;
tmp_elem = queue_first(&zone->active_zones);
while (count < zone->count) {
count++;
if (tmp_elem == 0) {
printf("unexpected zero element, zone=%p, count=%d\n",
zone, count);
assert(FALSE);
break;
}
if (queue_end(tmp_elem, &zone->active_zones)) {
printf("unexpected queue_end, zone=%p, count=%d\n",
zone, count);
assert(FALSE);
break;
}
tmp_elem = queue_next(tmp_elem);
}
if (!queue_end(tmp_elem, &zone->active_zones)) {
printf("not at queue_end, zone=%p, tmp_elem=%p\n",
zone, tmp_elem);
assert(FALSE);
}
}
void
db_zone_print_active(
zone_t zone)
{
int count = 0;
queue_t tmp_elem;
if (!zone_debug_enabled(zone)) {
printf("zone %p debug not enabled\n", zone);
return;
}
if (!zone_check) {
printf("zone_check FALSE\n");
return;
}
printf("zone %p, active elements %d\n", zone, zone->count);
printf("active list:\n");
tmp_elem = queue_first(&zone->active_zones);
while (count < zone->count) {
printf(" %p", tmp_elem);
count++;
if ((count % 6) == 0)
printf("\n");
if (tmp_elem == 0) {
printf("\nunexpected zero element, count=%d\n", count);
break;
}
if (queue_end(tmp_elem, &zone->active_zones)) {
printf("\nunexpected queue_end, count=%d\n", count);
break;
}
tmp_elem = queue_next(tmp_elem);
}
if (!queue_end(tmp_elem, &zone->active_zones))
printf("\nnot at queue_end, tmp_elem=%p\n", tmp_elem);
else
printf("\n");
}
#endif
void
db_zone_print_free(
zone_t zone)
{
int count = 0;
int freecount;
vm_offset_t elem;
freecount = zone_free_count(zone);
printf("zone %p, free elements %d\n", zone, freecount);
printf("free list:\n");
elem = zone->free_elements;
while (count < freecount) {
printf(" 0x%x", elem);
count++;
if ((count % 6) == 0)
printf("\n");
if (elem == 0) {
printf("\nunexpected zero element, count=%d\n", count);
break;
}
elem = *((vm_offset_t *)elem);
}
if (elem != 0)
printf("\nnot at end of free list, elem=0x%x\n", elem);
else
printf("\n");
}
#endif
#if ZONE_DEBUG
#if MACH_KDB
void *
next_element(
zone_t z,
void *prev)
{
char *elt = (char *)prev;
if (!zone_debug_enabled(z))
return(NULL);
elt -= ZONE_DEBUG_OFFSET;
elt = (char *) queue_next((queue_t) elt);
if ((queue_t) elt == &z->active_zones)
return(NULL);
elt += ZONE_DEBUG_OFFSET;
return(elt);
}
void *
first_element(
zone_t z)
{
char *elt;
if (!zone_debug_enabled(z))
return(NULL);
if (queue_empty(&z->active_zones))
return(NULL);
elt = (char *)queue_first(&z->active_zones);
elt += ZONE_DEBUG_OFFSET;
return(elt);
}
int
zone_count(
zone_t z,
int tail)
{
void *elt;
int count = 0;
boolean_t print = (tail != 0);
if (tail < 0)
tail = z->count;
if (z->count < tail)
tail = 0;
tail = z->count - tail;
for (elt = first_element(z); elt; elt = next_element(z, elt)) {
if (print && tail <= count)
db_printf("%8x\n", elt);
count++;
}
assert(count == z->count);
return(count);
}
#endif
#define zone_in_use(z) ( z->count || z->free_elements )
void
zone_debug_enable(
zone_t z)
{
if (zone_debug_enabled(z) || zone_in_use(z) ||
z->alloc_size < (z->elem_size + ZONE_DEBUG_OFFSET))
return;
queue_init(&z->active_zones);
z->elem_size += ZONE_DEBUG_OFFSET;
}
void
zone_debug_disable(
zone_t z)
{
if (!zone_debug_enabled(z) || zone_in_use(z))
return;
z->elem_size -= ZONE_DEBUG_OFFSET;
z->active_zones.next = z->active_zones.prev = NULL;
}
#endif