#include <zone_debug.h>
#include <mach/boolean.h>
#include <mach/machine/vm_types.h>
#include <mach/vm_param.h>
#include <kern/misc_protos.h>
#include <kern/zalloc.h>
#include <kern/kalloc.h>
#include <kern/lock.h>
#include <vm/vm_kern.h>
#include <vm/vm_object.h>
#include <vm/vm_map.h>
#include <libkern/OSMalloc.h>
#ifdef MACH_BSD
zone_t kalloc_zone(vm_size_t);
#endif
vm_map_t kalloc_map;
vm_size_t kalloc_map_size = 16 * 1024 * 1024;
vm_size_t kalloc_max;
vm_size_t kalloc_max_prerounded;
unsigned int kalloc_large_inuse;
vm_size_t kalloc_large_total;
vm_size_t kalloc_large_max;
int first_k_zone = -1;
struct zone *k_zone[16];
static const char *k_zone_name[16] = {
"kalloc.1", "kalloc.2",
"kalloc.4", "kalloc.8",
"kalloc.16", "kalloc.32",
"kalloc.64", "kalloc.128",
"kalloc.256", "kalloc.512",
"kalloc.1024", "kalloc.2048",
"kalloc.4096", "kalloc.8192",
"kalloc.16384", "kalloc.32768"
};
unsigned long k_zone_max[16] = {
1024,
1024,
1024,
1024,
1024,
4096,
4096,
4096,
4096,
1024,
1024,
1024,
1024,
4096,
64,
64,
};
void * kalloc_canblock(
vm_size_t size,
boolean_t canblock);
static
queue_head_t OSMalloc_tag_list;
decl_simple_lock_data(static,OSMalloc_tag_lock)
void OSMalloc_init(void);
void OSMalloc_Tagref(OSMallocTag tag);
void OSMalloc_Tagrele(OSMallocTag tag);
void
kalloc_init(
void)
{
kern_return_t retval;
vm_offset_t min;
vm_size_t size;
register int i;
retval = kmem_suballoc(kernel_map, &min, kalloc_map_size,
FALSE, VM_FLAGS_ANYWHERE, &kalloc_map);
if (retval != KERN_SUCCESS)
panic("kalloc_init: kmem_suballoc failed");
if (PAGE_SIZE < 16*1024)
kalloc_max = 16*1024;
else
kalloc_max = PAGE_SIZE;
kalloc_max_prerounded = kalloc_max / 2 + 1;
for (i = 0, size = 1; size < kalloc_max; i++, size <<= 1) {
if (size < KALLOC_MINSIZE) {
k_zone[i] = 0;
continue;
}
if (size == KALLOC_MINSIZE) {
first_k_zone = i;
}
k_zone[i] = zinit(size, k_zone_max[i] * size, size,
k_zone_name[i]);
}
OSMalloc_init();
}
void *
kalloc_canblock(
vm_size_t size,
boolean_t canblock)
{
register int zindex;
register vm_size_t allocsize;
if (size >= kalloc_max_prerounded) {
void *addr;
if (!canblock) {
return(0);
}
if (kmem_alloc(kalloc_map, (vm_offset_t *)&addr, size) != KERN_SUCCESS)
addr = 0;
if (addr) {
kalloc_large_inuse++;
kalloc_large_total += size;
if (kalloc_large_total > kalloc_large_max)
kalloc_large_max = kalloc_large_total;
}
return(addr);
}
allocsize = KALLOC_MINSIZE;
zindex = first_k_zone;
while (allocsize < size) {
allocsize <<= 1;
zindex++;
}
assert(allocsize < kalloc_max);
return(zalloc_canblock(k_zone[zindex], canblock));
}
void *
kalloc(
vm_size_t size)
{
return( kalloc_canblock(size, TRUE) );
}
void *
kalloc_noblock(
vm_size_t size)
{
return( kalloc_canblock(size, FALSE) );
}
void
krealloc(
void **addrp,
vm_size_t old_size,
vm_size_t new_size,
simple_lock_t lock)
{
register int zindex;
register vm_size_t allocsize;
void *naddr;
assert(new_size > old_size);
if (old_size == 0) {
simple_unlock(lock);
naddr = kalloc(new_size);
simple_lock(lock);
*addrp = naddr;
return;
}
if (old_size >= kalloc_max_prerounded) {
old_size = round_page(old_size);
new_size = round_page(new_size);
if (new_size > old_size) {
if (KERN_SUCCESS != kmem_realloc(kalloc_map,
(vm_offset_t)*addrp, old_size,
(vm_offset_t *)&naddr, new_size)) {
panic("krealloc: kmem_realloc");
naddr = 0;
}
simple_lock(lock);
*addrp = (void *) naddr;
kmem_free(kalloc_map, (vm_offset_t)*addrp, old_size);
kalloc_large_total += (new_size - old_size);
if (kalloc_large_total > kalloc_large_max)
kalloc_large_max = kalloc_large_total;
}
return;
}
allocsize = KALLOC_MINSIZE;
zindex = first_k_zone;
while (allocsize < old_size) {
allocsize <<= 1;
zindex++;
}
if (new_size <= allocsize) {
return;
}
simple_unlock(lock);
if (new_size >= kalloc_max_prerounded) {
if (KERN_SUCCESS != kmem_alloc(kalloc_map,
(vm_offset_t *)&naddr, new_size)) {
panic("krealloc: kmem_alloc");
simple_lock(lock);
*addrp = NULL;
return;
}
kalloc_large_inuse++;
kalloc_large_total += new_size;
if (kalloc_large_total > kalloc_large_max)
kalloc_large_max = kalloc_large_total;
} else {
register int new_zindex;
allocsize <<= 1;
new_zindex = zindex + 1;
while (allocsize < new_size) {
allocsize <<= 1;
new_zindex++;
}
naddr = zalloc(k_zone[new_zindex]);
}
simple_lock(lock);
bcopy((const char *)*addrp, (char *)naddr, old_size);
zfree(k_zone[zindex], *addrp);
*addrp = (void *) naddr;
}
void *
kget(
vm_size_t size)
{
register int zindex;
register vm_size_t allocsize;
if (size >= kalloc_max_prerounded) {
panic("kget");
}
allocsize = KALLOC_MINSIZE;
zindex = first_k_zone;
while (allocsize < size) {
allocsize <<= 1;
zindex++;
}
assert(allocsize < kalloc_max);
return(zget(k_zone[zindex]));
}
void
kfree(
void *data,
vm_size_t size)
{
register int zindex;
register vm_size_t freesize;
if (size >= kalloc_max_prerounded) {
kmem_free(kalloc_map, (vm_offset_t)data, size);
kalloc_large_total -= size;
kalloc_large_inuse--;
return;
}
freesize = KALLOC_MINSIZE;
zindex = first_k_zone;
while (freesize < size) {
freesize <<= 1;
zindex++;
}
assert(freesize < kalloc_max);
zfree(k_zone[zindex], data);
}
#ifdef MACH_BSD
zone_t
kalloc_zone(
vm_size_t size)
{
register int zindex = 0;
register vm_size_t allocsize;
allocsize = size;
if (size <= kalloc_max) {
allocsize = KALLOC_MINSIZE;
zindex = first_k_zone;
while (allocsize < size) {
allocsize <<= 1;
zindex++;
}
return (k_zone[zindex]);
}
return (ZONE_NULL);
}
#endif
void
kalloc_fake_zone_info(int *count, vm_size_t *cur_size, vm_size_t *max_size, vm_size_t *elem_size,
vm_size_t *alloc_size, int *collectable, int *exhaustable)
{
*count = kalloc_large_inuse;
*cur_size = kalloc_large_total;
*max_size = kalloc_large_max;
*elem_size = kalloc_large_total / kalloc_large_inuse;
*alloc_size = kalloc_large_total / kalloc_large_inuse;
*collectable = 0;
*exhaustable = 0;
}
void
OSMalloc_init(
void)
{
queue_init(&OSMalloc_tag_list);
simple_lock_init(&OSMalloc_tag_lock, 0);
}
OSMallocTag
OSMalloc_Tagalloc(
const char *str,
uint32_t flags)
{
OSMallocTag OSMTag;
OSMTag = (OSMallocTag)kalloc(sizeof(*OSMTag));
bzero((void *)OSMTag, sizeof(*OSMTag));
if (flags & OSMT_PAGEABLE)
OSMTag->OSMT_attr = OSMT_ATTR_PAGEABLE;
OSMTag->OSMT_refcnt = 1;
strncpy(OSMTag->OSMT_name, str, OSMT_MAX_NAME);
simple_lock(&OSMalloc_tag_lock);
enqueue_tail(&OSMalloc_tag_list, (queue_entry_t)OSMTag);
simple_unlock(&OSMalloc_tag_lock);
OSMTag->OSMT_state = OSMT_VALID;
return(OSMTag);
}
void
OSMalloc_Tagref(
OSMallocTag tag)
{
if (!((tag->OSMT_state & OSMT_VALID_MASK) == OSMT_VALID))
panic("OSMalloc_Tagref(): bad state 0x%08X\n",tag->OSMT_state);
(void)hw_atomic_add((uint32_t *)(&tag->OSMT_refcnt), 1);
}
void
OSMalloc_Tagrele(
OSMallocTag tag)
{
if (!((tag->OSMT_state & OSMT_VALID_MASK) == OSMT_VALID))
panic("OSMalloc_Tagref(): bad state 0x%08X\n",tag->OSMT_state);
if (hw_atomic_sub((uint32_t *)(&tag->OSMT_refcnt), 1) == 0) {
if (hw_compare_and_store(OSMT_VALID|OSMT_RELEASED, OSMT_VALID|OSMT_RELEASED, &tag->OSMT_state)) {
simple_lock(&OSMalloc_tag_lock);
(void)remque((queue_entry_t)tag);
simple_unlock(&OSMalloc_tag_lock);
kfree((void*)tag, sizeof(*tag));
} else
panic("OSMalloc_Tagrele(): refcnt 0\n");
}
}
void
OSMalloc_Tagfree(
OSMallocTag tag)
{
if (!hw_compare_and_store(OSMT_VALID, OSMT_VALID|OSMT_RELEASED, &tag->OSMT_state))
panic("OSMalloc_Tagfree(): bad state 0x%08X\n", tag->OSMT_state);
if (hw_atomic_sub((uint32_t *)(&tag->OSMT_refcnt), 1) == 0) {
simple_lock(&OSMalloc_tag_lock);
(void)remque((queue_entry_t)tag);
simple_unlock(&OSMalloc_tag_lock);
kfree((void*)tag, sizeof(*tag));
}
}
void *
OSMalloc(
uint32_t size,
OSMallocTag tag)
{
void *addr=NULL;
kern_return_t kr;
OSMalloc_Tagref(tag);
if ((tag->OSMT_attr & OSMT_PAGEABLE)
&& (size & ~PAGE_MASK)) {
if ((kr = kmem_alloc_pageable(kernel_map, (vm_offset_t *)&addr, size)) != KERN_SUCCESS)
panic("OSMalloc(): kmem_alloc_pageable() failed 0x%08X\n", kr);
} else
addr = kalloc((vm_size_t)size);
return(addr);
}
void *
OSMalloc_nowait(
uint32_t size,
OSMallocTag tag)
{
void *addr=NULL;
if (tag->OSMT_attr & OSMT_PAGEABLE)
return(NULL);
OSMalloc_Tagref(tag);
addr = kalloc_noblock((vm_size_t)size);
if (addr == NULL)
OSMalloc_Tagrele(tag);
return(addr);
}
void *
OSMalloc_noblock(
uint32_t size,
OSMallocTag tag)
{
void *addr=NULL;
if (tag->OSMT_attr & OSMT_PAGEABLE)
return(NULL);
OSMalloc_Tagref(tag);
addr = kalloc_noblock((vm_size_t)size);
if (addr == NULL)
OSMalloc_Tagrele(tag);
return(addr);
}
void
OSFree(
void *addr,
uint32_t size,
OSMallocTag tag)
{
if ((tag->OSMT_attr & OSMT_PAGEABLE)
&& (size & ~PAGE_MASK)) {
kmem_free(kernel_map, (vm_offset_t)addr, size);
} else
kfree((void*)addr, size);
OSMalloc_Tagrele(tag);
}