nano_malloc_common.c [plain text]
#include "internal.h"
#if CONFIG_NANOZONE
typedef enum {
NANO_INACTIVE, NANO_ENABLED, NANO_FORCED, } nanov2_mode_t;
nano_version_t _malloc_engaged_nano = NANO_NONE;
static const char mode_boot_arg[] = "nanov2_mode";
static const char inactive_mode[] = "inactive"; static const char enabled_mode[] = "enabled"; static const char forced_mode[] = "forced";
unsigned int nano_common_max_magazines;
boolean_t nano_common_max_magazines_is_ncpu;
static const char nano_max_magazines_boot_arg[] = "malloc_nano_max_magazines";
#pragma mark -
#pragma mark Initialization
void
nano_common_init(const char *envp[], const char *apple[], const char *bootargs)
{
nanov2_mode_t nanov2_mode = NANO_ENABLED;
const char *p = malloc_common_value_for_key(bootargs, mode_boot_arg);
if (p) {
if (!strncmp(p, inactive_mode, sizeof(inactive_mode) - 1)) {
nanov2_mode = NANO_INACTIVE;
} else if (!strncmp(p, enabled_mode, sizeof(enabled_mode) - 1)) {
nanov2_mode = NANO_ENABLED;
} else if (!strncmp(p, forced_mode, sizeof(forced_mode) - 1)) {
nanov2_mode = NANO_FORCED;
}
}
if (nanov2_mode == NANO_FORCED) {
const char *flag = _simple_getenv(envp, "MallocNanoZone");
if (flag && (flag[0] == 'V' || flag[0] == 'v') && flag[1] == '1') {
_malloc_engaged_nano = NANO_V1;
} else {
_malloc_engaged_nano = NANO_V2;
}
} else {
const char *flag = _simple_getenv(apple, "MallocNanoZone");
if (flag && flag[0] == '1') {
_malloc_engaged_nano = nanov2_mode == NANO_ENABLED ? NANO_V2 : NANO_V1;
}
flag = _simple_getenv(envp, "MallocNanoZone");
if (flag) {
if (flag[0] == '1') {
_malloc_engaged_nano = nanov2_mode == NANO_ENABLED ? NANO_V2 : NANO_V1;
} else if (flag[0] == '0') {
_malloc_engaged_nano = NANO_NONE;
} else if (flag[0] == 'V' || flag[0] == 'v') {
if (flag[1] == '1') {
_malloc_engaged_nano = NANO_V1;
} else if (flag[1] == '2') {
_malloc_engaged_nano = NANO_V2;
}
}
}
}
if (_malloc_engaged_nano) {
char value_buf[256];
const char *flag = malloc_common_value_for_key_copy(bootargs,
nano_max_magazines_boot_arg, value_buf, sizeof(value_buf));
if (flag) {
const char *endp;
long value = malloc_common_convert_to_long(flag, &endp);
if (!*endp && value >= 0) {
nano_common_max_magazines = (unsigned int)value;
} else {
malloc_report(ASL_LEVEL_ERR,
"malloc_nano_max_magazines must be positive - ignored.\n");
}
}
}
switch (_malloc_engaged_nano) {
case NANO_V1:
nano_init(envp, apple, bootargs);
break;
case NANO_V2:
nanov2_init(envp, apple, bootargs);
break;
default:
break;
}
}
void
nano_common_configure(void)
{
unsigned int magazines = nano_common_max_magazines > 0 ?
nano_common_max_magazines : phys_ncpus;
const char *flag = getenv("MallocNanoMaxMagazines");
if (flag) {
int value = (int)strtol(flag, NULL, 0);
if (value < 0) {
malloc_report(ASL_LEVEL_ERR,
"MallocNanoMaxMagazines must be positive - ignored.\n");
} else {
magazines = value;
}
}
if (magazines == 0) {
magazines = phys_ncpus;
} else if (magazines > phys_ncpus) {
magazines = phys_ncpus;
malloc_report(ASL_LEVEL_ERR,
"Nano maximum magazines limited to number of physical "
"CPUs [%d]\n", phys_ncpus);
}
nano_common_max_magazines = magazines;
if (flag) {
malloc_report(ASL_LEVEL_INFO, "Nano maximum magazines set to %d\n",
nano_common_max_magazines);
}
nano_common_cpu_number_override_set();
switch (_malloc_engaged_nano) {
case NANO_V1:
nano_configure();
break;
case NANO_V2:
nanov2_configure();
break;
default:
break;
}
}
#pragma mark -
#pragma mark VM Helper Functions
void *
nano_common_allocate_based_pages(size_t size, unsigned char align,
unsigned debug_flags, int vm_page_label, void *base_addr)
{
mach_vm_address_t vm_addr;
uintptr_t addr;
mach_vm_size_t allocation_size = round_page(size);
mach_vm_offset_t allocation_mask = ((mach_vm_offset_t)1 << align) - 1;
int alloc_flags = VM_FLAGS_ANYWHERE | VM_MAKE_TAG(vm_page_label);
kern_return_t kr;
if (!allocation_size) {
allocation_size = vm_page_size;
}
if (allocation_size < size) { return NULL;
}
vm_addr = round_page((mach_vm_address_t)base_addr);
if (!vm_addr) {
vm_addr = vm_page_size;
}
kr = mach_vm_map(mach_task_self(), &vm_addr, allocation_size,
allocation_mask, alloc_flags, MEMORY_OBJECT_NULL, 0, FALSE,
VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
if (kr) {
malloc_zone_error(debug_flags, false, "*** can't allocate pages: "
"mach_vm_map(size=%lu) failed (error code=%d)\n", size, kr);
return NULL;
}
addr = (uintptr_t)vm_addr;
return (void *)addr;
}
boolean_t
nano_common_allocate_vm_space(mach_vm_address_t base, mach_vm_size_t size)
{
mach_vm_address_t vm_addr = base;
kern_return_t kr = mach_vm_map(mach_task_self(), &vm_addr, size, 0,
VM_MAKE_TAG(VM_MEMORY_MALLOC_NANO), MEMORY_OBJECT_NULL, 0, FALSE,
VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
if (kr != KERN_SUCCESS || vm_addr != base) {
if (!kr) {
mach_vm_deallocate(mach_task_self(), vm_addr, size);
}
return FALSE;
}
return TRUE;
}
void
nano_common_deallocate_pages(void *addr, size_t size, unsigned debug_flags)
{
mach_vm_address_t vm_addr = (mach_vm_address_t)addr;
mach_vm_size_t allocation_size = size;
kern_return_t kr;
kr = mach_vm_deallocate(mach_task_self(), vm_addr, allocation_size);
if (kr) {
malloc_zone_error(debug_flags, false, "Can't deallocate_pages at %p\n",
addr);
}
}
#pragma mark -
#pragma mark Introspection Helper Functions
kern_return_t
nano_common_default_reader(task_t task, vm_address_t address, vm_size_t size,
void **ptr)
{
*ptr = (void *)address;
return 0;
}
#pragma mark -
#pragma mark Utility functions
void
nano_common_cpu_number_override_set()
{
nano_common_max_magazines_is_ncpu = _os_cpu_number_override == -1 &&
nano_common_max_magazines == phys_ncpus;
}
#endif // CONFIG_NANOZONE