#include "config.h"
#include "FastMalloc.h"
#include "Assertions.h"
#include "CurrentTime.h"
#include <limits>
#if OS(WINDOWS)
#include <windows.h>
#else
#include <pthread.h>
#endif
#include <string.h>
#include <wtf/DataLog.h>
#include <wtf/StdLibExtras.h>
#if OS(DARWIN)
#include <mach/mach_init.h>
#include <malloc/malloc.h>
#endif
#ifndef NO_TCMALLOC_SAMPLES
#ifdef WTF_CHANGES
#define NO_TCMALLOC_SAMPLES
#endif
#endif
#if !(defined(USE_SYSTEM_MALLOC) && USE_SYSTEM_MALLOC) && defined(NDEBUG)
#define FORCE_SYSTEM_MALLOC 0
#else
#define FORCE_SYSTEM_MALLOC 1
#endif
#define ENABLE_TCMALLOC_HARDENING 1
#define USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY 1
#ifndef NDEBUG
namespace WTF {
#if OS(WINDOWS)
#ifndef TLS_OUT_OF_INDEXES
#define TLS_OUT_OF_INDEXES 0xffffffff
#endif
static DWORD isForibiddenTlsIndex = TLS_OUT_OF_INDEXES;
static const LPVOID kTlsAllowValue = reinterpret_cast<LPVOID>(0); static const LPVOID kTlsForbiddenValue = reinterpret_cast<LPVOID>(1);
#if !ASSERT_DISABLED
static bool isForbidden()
{
return (isForibiddenTlsIndex != TLS_OUT_OF_INDEXES) && (TlsGetValue(isForibiddenTlsIndex) == kTlsForbiddenValue);
}
#endif
void fastMallocForbid()
{
if (isForibiddenTlsIndex == TLS_OUT_OF_INDEXES)
isForibiddenTlsIndex = TlsAlloc(); TlsSetValue(isForibiddenTlsIndex, kTlsForbiddenValue);
}
void fastMallocAllow()
{
if (isForibiddenTlsIndex == TLS_OUT_OF_INDEXES)
return;
TlsSetValue(isForibiddenTlsIndex, kTlsAllowValue);
}
#else // !OS(WINDOWS)
static pthread_key_t isForbiddenKey;
static pthread_once_t isForbiddenKeyOnce = PTHREAD_ONCE_INIT;
static void initializeIsForbiddenKey()
{
pthread_key_create(&isForbiddenKey, 0);
}
#if !ASSERT_DISABLED
static bool isForbidden()
{
pthread_once(&isForbiddenKeyOnce, initializeIsForbiddenKey);
return !!pthread_getspecific(isForbiddenKey);
}
#endif
void fastMallocForbid()
{
pthread_once(&isForbiddenKeyOnce, initializeIsForbiddenKey);
pthread_setspecific(isForbiddenKey, &isForbiddenKey);
}
void fastMallocAllow()
{
pthread_once(&isForbiddenKeyOnce, initializeIsForbiddenKey);
pthread_setspecific(isForbiddenKey, 0);
}
#endif // OS(WINDOWS)
} #endif // NDEBUG
namespace WTF {
namespace Internal {
#if !ENABLE(WTF_MALLOC_VALIDATION)
WTF_EXPORT_PRIVATE void fastMallocMatchFailed(void*);
#else
COMPILE_ASSERT(((sizeof(ValidationHeader) % sizeof(AllocAlignmentInteger)) == 0), ValidationHeader_must_produce_correct_alignment);
#endif
NO_RETURN_DUE_TO_CRASH void fastMallocMatchFailed(void*)
{
CRASH();
}
}
void* fastZeroedMalloc(size_t n)
{
void* result = fastMalloc(n);
memset(result, 0, n);
return result;
}
char* fastStrDup(const char* src)
{
size_t len = strlen(src) + 1;
char* dup = static_cast<char*>(fastMalloc(len));
memcpy(dup, src, len);
return dup;
}
TryMallocReturnValue tryFastZeroedMalloc(size_t n)
{
void* result;
if (!tryFastMalloc(n).getValue(result))
return 0;
memset(result, 0, n);
return result;
}
}
#if FORCE_SYSTEM_MALLOC
#if OS(WINDOWS)
#include <malloc.h>
#endif
namespace WTF {
size_t fastMallocGoodSize(size_t bytes)
{
#if OS(DARWIN)
return malloc_good_size(bytes);
#else
return bytes;
#endif
}
TryMallocReturnValue tryFastMalloc(size_t n)
{
ASSERT(!isForbidden());
#if ENABLE(WTF_MALLOC_VALIDATION)
if (std::numeric_limits<size_t>::max() - Internal::ValidationBufferSize <= n) return 0;
void* result = malloc(n + Internal::ValidationBufferSize);
if (!result)
return 0;
Internal::ValidationHeader* header = static_cast<Internal::ValidationHeader*>(result);
header->m_size = n;
header->m_type = Internal::AllocTypeMalloc;
header->m_prefix = static_cast<unsigned>(Internal::ValidationPrefix);
result = header + 1;
*Internal::fastMallocValidationSuffix(result) = Internal::ValidationSuffix;
fastMallocValidate(result);
return result;
#else
return malloc(n);
#endif
}
void* fastMalloc(size_t n)
{
ASSERT(!isForbidden());
#if ENABLE(WTF_MALLOC_VALIDATION)
TryMallocReturnValue returnValue = tryFastMalloc(n);
void* result;
if (!returnValue.getValue(result))
CRASH();
#else
void* result = malloc(n);
#endif
if (!result)
CRASH();
return result;
}
TryMallocReturnValue tryFastCalloc(size_t n_elements, size_t element_size)
{
ASSERT(!isForbidden());
#if ENABLE(WTF_MALLOC_VALIDATION)
size_t totalBytes = n_elements * element_size;
if (n_elements > 1 && element_size && (totalBytes / element_size) != n_elements)
return 0;
TryMallocReturnValue returnValue = tryFastMalloc(totalBytes);
void* result;
if (!returnValue.getValue(result))
return 0;
memset(result, 0, totalBytes);
fastMallocValidate(result);
return result;
#else
return calloc(n_elements, element_size);
#endif
}
void* fastCalloc(size_t n_elements, size_t element_size)
{
ASSERT(!isForbidden());
#if ENABLE(WTF_MALLOC_VALIDATION)
TryMallocReturnValue returnValue = tryFastCalloc(n_elements, element_size);
void* result;
if (!returnValue.getValue(result))
CRASH();
#else
void* result = calloc(n_elements, element_size);
#endif
if (!result)
CRASH();
return result;
}
void fastFree(void* p)
{
ASSERT(!isForbidden());
#if ENABLE(WTF_MALLOC_VALIDATION)
if (!p)
return;
fastMallocMatchValidateFree(p, Internal::AllocTypeMalloc);
Internal::ValidationHeader* header = Internal::fastMallocValidationHeader(p);
memset(p, 0xCC, header->m_size);
free(header);
#else
free(p);
#endif
}
TryMallocReturnValue tryFastRealloc(void* p, size_t n)
{
ASSERT(!isForbidden());
#if ENABLE(WTF_MALLOC_VALIDATION)
if (p) {
if (std::numeric_limits<size_t>::max() - Internal::ValidationBufferSize <= n) return 0;
fastMallocValidate(p);
Internal::ValidationHeader* result = static_cast<Internal::ValidationHeader*>(realloc(Internal::fastMallocValidationHeader(p), n + Internal::ValidationBufferSize));
if (!result)
return 0;
result->m_size = n;
result = result + 1;
*fastMallocValidationSuffix(result) = Internal::ValidationSuffix;
fastMallocValidate(result);
return result;
} else {
return fastMalloc(n);
}
#else
return realloc(p, n);
#endif
}
void* fastRealloc(void* p, size_t n)
{
ASSERT(!isForbidden());
#if ENABLE(WTF_MALLOC_VALIDATION)
TryMallocReturnValue returnValue = tryFastRealloc(p, n);
void* result;
if (!returnValue.getValue(result))
CRASH();
#else
void* result = realloc(p, n);
#endif
if (!result)
CRASH();
return result;
}
void releaseFastMallocFreeMemory() { }
FastMallocStatistics fastMallocStatistics()
{
FastMallocStatistics statistics = { 0, 0, 0 };
return statistics;
}
size_t fastMallocSize(const void* p)
{
#if ENABLE(WTF_MALLOC_VALIDATION)
return Internal::fastMallocValidationHeader(const_cast<void*>(p))->m_size;
#elif OS(DARWIN)
return malloc_size(p);
#elif OS(WINDOWS)
return _msize(const_cast<void*>(p));
#else
UNUSED_PARAM(p);
return 1;
#endif
}
}
#if OS(DARWIN)
extern "C" WTF_EXPORT_PRIVATE const int jscore_fastmalloc_introspection = 0;
#endif
#elif defined(USE_BMALLOC) && USE_BMALLOC // FORCE_SYSTEM_MALLOC
#include <bmalloc/bmalloc.h>
namespace WTF {
void* fastMalloc(size_t size)
{
ASSERT(!isForbidden());
return bmalloc::api::malloc(size);
}
void* fastCalloc(size_t numElements, size_t elementSize)
{
return fastZeroedMalloc(numElements * elementSize);
}
void* fastRealloc(void* object, size_t size)
{
return bmalloc::api::realloc(object, size);
}
void fastFree(void* object)
{
bmalloc::api::free(object);
}
size_t fastMallocSize(const void*)
{
return 1;
}
size_t fastMallocGoodSize(size_t size)
{
return size;
}
TryMallocReturnValue tryFastMalloc(size_t size)
{
return fastMalloc(size);
}
TryMallocReturnValue tryFastRealloc(void* p, size_t n)
{
return fastRealloc(p, n);
}
TryMallocReturnValue tryFastCalloc(size_t numElements, size_t elementSize)
{
return fastCalloc(numElements, elementSize);
}
void releaseFastMallocFreeMemory() { }
FastMallocStatistics fastMallocStatistics()
{
FastMallocStatistics statistics = { 0, 0, 0 };
return statistics;
}
}
#else // FORCE_SYSTEM_MALLOC
#include "TCPackedCache.h"
#include "TCPageMap.h"
#include "TCSpinLock.h"
#include "TCSystemAlloc.h"
#include "ThreadSpecific.h"
#include <algorithm>
#if USE(PTHREADS)
#include <pthread.h>
#endif
#include <stdarg.h>
#include <stddef.h>
#include <stdint.h>
#include <stdio.h>
#if HAVE(ERRNO_H)
#include <errno.h>
#endif
#if OS(UNIX)
#include <unistd.h>
#endif
#if OS(WINDOWS)
#ifndef WIN32_LEAN_AND_MEAN
#define WIN32_LEAN_AND_MEAN
#endif
#include <windows.h>
#endif
#ifdef WTF_CHANGES
#if OS(DARWIN)
#include <wtf/HashSet.h>
#include <wtf/Vector.h>
#endif
#if HAVE(DISPATCH_H)
#include <dispatch/dispatch.h>
#endif
#if OS(DARWIN)
#if defined(__has_include) && __has_include(<System/pthread_machdep.h>)
#include <System/pthread_machdep.h>
#endif
#endif
#if defined(__PTK_FRAMEWORK_JAVASCRIPTCORE_KEY0)
#define WTF_USE_PTHREAD_GETSPECIFIC_DIRECT 1
#endif
#ifndef PRIuS
#define PRIuS "zu"
#endif
#if OS(DARWIN)
#if !USE(PTHREAD_GETSPECIFIC_DIRECT)
static void* (*pthread_getspecific_function_pointer)(pthread_key_t) = pthread_getspecific;
#define pthread_getspecific(key) pthread_getspecific_function_pointer(key)
#else
#define pthread_getspecific(key) _pthread_getspecific_direct(key)
#define pthread_setspecific(key, val) _pthread_setspecific_direct(key, (val))
#endif
#endif
#define DEFINE_VARIABLE(type, name, value, meaning) \
namespace FLAG__namespace_do_not_use_directly_use_DECLARE_##type##_instead { \
type FLAGS_##name(value); \
char FLAGS_no##name; \
} \
using FLAG__namespace_do_not_use_directly_use_DECLARE_##type##_instead::FLAGS_##name
#define DEFINE_int64(name, value, meaning) \
DEFINE_VARIABLE(int64_t, name, value, meaning)
#define DEFINE_double(name, value, meaning) \
DEFINE_VARIABLE(double, name, value, meaning)
namespace WTF {
#define malloc fastMalloc
#define calloc fastCalloc
#define free fastFree
#define realloc fastRealloc
#define MESSAGE LOG_ERROR
#define CHECK_CONDITION ASSERT
#if !OS(DARWIN)
static const char kLLHardeningMask = 0;
#endif
template <unsigned> struct EntropySource;
template <> struct EntropySource<4> {
static uint32_t value()
{
#if OS(DARWIN)
return arc4random();
#else
return static_cast<uint32_t>(static_cast<uintptr_t>(currentTime() * 10000) ^ reinterpret_cast<uintptr_t>(&kLLHardeningMask));
#endif
}
};
template <> struct EntropySource<8> {
static uint64_t value()
{
return EntropySource<4>::value() | (static_cast<uint64_t>(EntropySource<4>::value()) << 32);
}
};
#if ENABLE(TCMALLOC_HARDENING)
enum {
MaskKeyShift = 13
};
static ALWAYS_INLINE uintptr_t internalEntropyValue()
{
static uintptr_t value = EntropySource<sizeof(uintptr_t)>::value() | 1;
ASSERT(value);
return value;
}
#define HARDENING_ENTROPY internalEntropyValue()
#define ROTATE_VALUE(value, amount) (((value) >> (amount)) | ((value) << (sizeof(value) * 8 - (amount))))
#if COMPILER(MSVC)
#define XOR_MASK_PTR_WITH_KEY(ptr, key, entropy) (reinterpret_cast<decltype(ptr)>(reinterpret_cast<uintptr_t>(ptr)^(ROTATE_VALUE(reinterpret_cast<uintptr_t>(key), MaskKeyShift)^entropy)))
#else
#define XOR_MASK_PTR_WITH_KEY(ptr, key, entropy) (reinterpret_cast<__typeof__(ptr)>(reinterpret_cast<uintptr_t>(ptr)^(ROTATE_VALUE(reinterpret_cast<uintptr_t>(key), MaskKeyShift)^entropy)))
#endif
static ALWAYS_INLINE uint32_t freedObjectStartPoison()
{
static uint32_t value = EntropySource<sizeof(uint32_t)>::value() | 1;
ASSERT(value);
return value;
}
static ALWAYS_INLINE uint32_t freedObjectEndPoison()
{
static uint32_t value = EntropySource<sizeof(uint32_t)>::value() | 1;
ASSERT(value);
return value;
}
#define PTR_TO_UINT32(ptr) static_cast<uint32_t>(reinterpret_cast<uintptr_t>(ptr))
#define END_POISON_INDEX(allocationSize) (((allocationSize) - sizeof(uint32_t)) / sizeof(uint32_t))
#define POISON_ALLOCATION(allocation, allocationSize) do { \
ASSERT((allocationSize) >= 2 * sizeof(uint32_t)); \
reinterpret_cast<uint32_t*>(allocation)[0] = 0xbadbeef1; \
reinterpret_cast<uint32_t*>(allocation)[1] = 0xbadbeef3; \
if ((allocationSize) < 4 * sizeof(uint32_t)) \
break; \
reinterpret_cast<uint32_t*>(allocation)[2] = 0xbadbeef5; \
reinterpret_cast<uint32_t*>(allocation)[END_POISON_INDEX(allocationSize)] = 0xbadbeef7; \
} while (false);
#define POISON_DEALLOCATION_EXPLICIT(allocation, allocationSize, startPoison, endPoison) do { \
ASSERT((allocationSize) >= 2 * sizeof(uint32_t)); \
reinterpret_cast_ptr<uint32_t*>(allocation)[0] = 0xbadbeef9; \
reinterpret_cast_ptr<uint32_t*>(allocation)[1] = 0xbadbeefb; \
if ((allocationSize) < 4 * sizeof(uint32_t)) \
break; \
reinterpret_cast_ptr<uint32_t*>(allocation)[2] = (startPoison) ^ PTR_TO_UINT32(allocation); \
reinterpret_cast_ptr<uint32_t*>(allocation)[END_POISON_INDEX(allocationSize)] = (endPoison) ^ PTR_TO_UINT32(allocation); \
} while (false)
#define POISON_DEALLOCATION(allocation, allocationSize) \
POISON_DEALLOCATION_EXPLICIT(allocation, (allocationSize), freedObjectStartPoison(), freedObjectEndPoison())
#define MAY_BE_POISONED(allocation, allocationSize) (((allocationSize) >= 4 * sizeof(uint32_t)) && ( \
(reinterpret_cast<uint32_t*>(allocation)[2] == (freedObjectStartPoison() ^ PTR_TO_UINT32(allocation))) || \
(reinterpret_cast<uint32_t*>(allocation)[END_POISON_INDEX(allocationSize)] == (freedObjectEndPoison() ^ PTR_TO_UINT32(allocation))) \
))
#define IS_DEFINITELY_POISONED(allocation, allocationSize) (((allocationSize) < 4 * sizeof(uint32_t)) || ( \
(reinterpret_cast<uint32_t*>(allocation)[2] == (freedObjectStartPoison() ^ PTR_TO_UINT32(allocation))) && \
(reinterpret_cast<uint32_t*>(allocation)[END_POISON_INDEX(allocationSize)] == (freedObjectEndPoison() ^ PTR_TO_UINT32(allocation))) \
))
#else
#define POISON_ALLOCATION(allocation, allocationSize)
#define POISON_DEALLOCATION(allocation, allocationSize)
#define POISON_DEALLOCATION_EXPLICIT(allocation, allocationSize, startPoison, endPoison)
#define MAY_BE_POISONED(allocation, allocationSize) (false)
#define IS_DEFINITELY_POISONED(allocation, allocationSize) (true)
#define XOR_MASK_PTR_WITH_KEY(ptr, key, entropy) (((void)entropy), ((void)key), ptr)
#define HARDENING_ENTROPY 0
#endif
typedef uintptr_t Length;
#define K_PAGE_SHIFT_MIN 12
#define K_PAGE_SHIFT_MAX 14
#define K_NUM_CLASSES_MAX 77
static size_t kPageShift = 0;
static size_t kNumClasses = 0;
static size_t kPageSize = 0;
static Length kMaxValidPages = 0;
static const size_t kMaxSize = 32u * 1024;
static const size_t kAlignShift = 3;
static const size_t kAlignment = 1 << kAlignShift;
static const size_t kPageMapBigAllocationThreshold = 128 << 20;
static const size_t kMinSystemAlloc = 1 << (20 - K_PAGE_SHIFT_MAX);
static int num_objects_to_move[K_NUM_CLASSES_MAX];
static const int kMaxFreeListLength = 256;
static const size_t kMinThreadCacheSize = kMaxSize * 2;
static const size_t kMaxThreadCacheSize = 2 << 20;
static const size_t kDefaultOverallThreadCacheSize = 16 << 20;
static const size_t kMaxPages = kMinSystemAlloc;
static int primes_list[] = {
32771, 65537, 131101, 262147, 524309, 1048583,
2097169, 4194319, 8388617, 16777259, 33554467 };
#ifdef NO_TCMALLOC_SAMPLES
DEFINE_int64(tcmalloc_sample_parameter, 0,
"Unused: code is compiled with NO_TCMALLOC_SAMPLES");
static size_t sample_period = 0;
#else
DEFINE_int64(tcmalloc_sample_parameter, 262147,
"Twice the approximate gap between sampling actions."
" Must be a prime number. Otherwise will be rounded up to a "
" larger prime number");
static size_t sample_period = 262147;
#endif
static SpinLock sample_period_lock = SPINLOCK_INITIALIZER;
DEFINE_double(tcmalloc_release_rate, 1,
"Rate at which we release unused memory to the system. "
"Zero means we never release memory back to the system. "
"Increase this flag to return memory faster; decrease it "
"to return memory slower. Reasonable rates are in the "
"range [0,10]");
static const size_t kMaxSmallSize = 1024;
static const int shift_amount[2] = { 3, 7 }; static const int add_amount[2] = { 7, 127 + (120 << 7) };
static unsigned char class_array[377];
static inline int ClassIndex(size_t s) {
const int i = (s > kMaxSmallSize);
return static_cast<int>((s + add_amount[i]) >> shift_amount[i]);
}
static size_t class_to_size[K_NUM_CLASSES_MAX];
static size_t class_to_pages[K_NUM_CLASSES_MAX];
class HardenedSLL {
public:
static ALWAYS_INLINE HardenedSLL create(void* value)
{
HardenedSLL result;
result.m_value = value;
return result;
}
static ALWAYS_INLINE HardenedSLL null()
{
HardenedSLL result;
result.m_value = 0;
return result;
}
ALWAYS_INLINE void setValue(void* value) { m_value = value; }
ALWAYS_INLINE void* value() const { return m_value; }
ALWAYS_INLINE bool operator!() const { return !m_value; }
typedef void* (HardenedSLL::*UnspecifiedBoolType);
ALWAYS_INLINE operator UnspecifiedBoolType() const { return m_value ? &HardenedSLL::m_value : 0; }
bool operator!=(const HardenedSLL& other) const { return m_value != other.m_value; }
bool operator==(const HardenedSLL& other) const { return m_value == other.m_value; }
private:
void* m_value;
};
struct TCEntry {
HardenedSLL head; HardenedSLL tail; };
#define K_NUM_TRANSFER_ENTRIES_MAX static_cast<int>(K_NUM_CLASSES_MAX)
#define kNumTransferEntries static_cast<int>(kNumClasses)
static inline int LgFloor(size_t n) {
int log = 0;
for (int i = 4; i >= 0; --i) {
int shift = (1 << i);
size_t x = n >> shift;
if (x != 0) {
n = x;
log += shift;
}
}
ASSERT(n == 1);
return log;
}
static ALWAYS_INLINE HardenedSLL SLL_Next(HardenedSLL t, uintptr_t entropy) {
void* tValueNext = *(reinterpret_cast<void**>(t.value()));
return HardenedSLL::create(XOR_MASK_PTR_WITH_KEY(tValueNext, t.value(), entropy));
}
static ALWAYS_INLINE void SLL_SetNext(HardenedSLL t, HardenedSLL n, uintptr_t entropy) {
*(reinterpret_cast<void**>(t.value())) = XOR_MASK_PTR_WITH_KEY(n.value(), t.value(), entropy);
}
static ALWAYS_INLINE void SLL_Push(HardenedSLL* list, HardenedSLL element, uintptr_t entropy) {
SLL_SetNext(element, *list, entropy);
*list = element;
}
static ALWAYS_INLINE HardenedSLL SLL_Pop(HardenedSLL *list, uintptr_t entropy) {
HardenedSLL result = *list;
*list = SLL_Next(*list, entropy);
return result;
}
static ALWAYS_INLINE void SLL_PopRange(HardenedSLL* head, int N, HardenedSLL *start, HardenedSLL *end, uintptr_t entropy) {
if (N == 0) {
*start = HardenedSLL::null();
*end = HardenedSLL::null();
return;
}
HardenedSLL tmp = *head;
for (int i = 1; i < N; ++i) {
tmp = SLL_Next(tmp, entropy);
}
*start = *head;
*end = tmp;
*head = SLL_Next(tmp, entropy);
SLL_SetNext(tmp, HardenedSLL::null(), entropy);
}
static ALWAYS_INLINE void SLL_PushRange(HardenedSLL *head, HardenedSLL start, HardenedSLL end, uintptr_t entropy) {
if (!start) return;
SLL_SetNext(end, *head, entropy);
*head = start;
}
static ALWAYS_INLINE size_t SizeClass(size_t size) {
return class_array[ClassIndex(size)];
}
static ALWAYS_INLINE size_t ByteSizeForClass(size_t cl) {
return class_to_size[cl];
}
static int NumMoveSize(size_t size) {
if (size == 0) return 0;
int num = static_cast<int>(64.0 * 1024.0 / size);
if (num < 2) num = 2;
if (num > static_cast<int>(0.8 * kMaxFreeListLength))
num = static_cast<int>(0.8 * kMaxFreeListLength);
if (num > 32) num = 32;
return num;
}
static void InitSizeClasses() {
#if OS(DARWIN)
kPageShift = vm_page_shift;
switch (kPageShift) {
case 12:
kNumClasses = 68;
break;
case 14:
kNumClasses = 77;
break;
default:
CRASH();
};
#else
kPageShift = 12;
kNumClasses = 68;
#endif
kPageSize = 1 << kPageShift;
kMaxValidPages = (~static_cast<Length>(0)) >> kPageShift;
if (ClassIndex(0) < 0) {
MESSAGE("Invalid class index %d for size 0\n", ClassIndex(0));
CRASH();
}
if (static_cast<size_t>(ClassIndex(kMaxSize)) >= sizeof(class_array)) {
MESSAGE("Invalid class index %d for kMaxSize\n", ClassIndex(kMaxSize));
CRASH();
}
size_t sc = 1; unsigned char alignshift = kAlignShift;
int last_lg = -1;
for (size_t size = kAlignment; size <= kMaxSize; size += (1 << alignshift)) {
int lg = LgFloor(size);
if (lg > last_lg) {
if ((lg >= 7) && (alignshift < 8)) {
alignshift++;
}
last_lg = lg;
}
size_t psize = kPageSize;
while ((psize % size) > (psize >> 3)) {
psize += kPageSize;
}
const size_t my_pages = psize >> kPageShift;
if (sc > 1 && my_pages == class_to_pages[sc-1]) {
const size_t my_objects = (my_pages << kPageShift) / size;
const size_t prev_objects = (class_to_pages[sc-1] << kPageShift)
/ class_to_size[sc-1];
if (my_objects == prev_objects) {
class_to_size[sc-1] = size;
continue;
}
}
class_to_pages[sc] = my_pages;
class_to_size[sc] = size;
sc++;
}
if (sc != kNumClasses) {
MESSAGE("wrong number of size classes: found %" PRIuS " instead of %d\n",
sc, int(kNumClasses));
CRASH();
}
int next_size = 0;
for (unsigned char c = 1; c < kNumClasses; c++) {
const size_t max_size_in_class = class_to_size[c];
for (size_t s = next_size; s <= max_size_in_class; s += kAlignment) {
class_array[ClassIndex(s)] = c;
}
next_size = static_cast<int>(max_size_in_class + kAlignment);
}
for (size_t size = 0; size <= kMaxSize; size++) {
const size_t sc = SizeClass(size);
if (sc == 0) {
MESSAGE("Bad size class %" PRIuS " for %" PRIuS "\n", sc, size);
CRASH();
}
if (sc > 1 && size <= class_to_size[sc-1]) {
MESSAGE("Allocating unnecessarily large class %" PRIuS " for %" PRIuS
"\n", sc, size);
CRASH();
}
if (sc >= kNumClasses) {
MESSAGE("Bad size class %" PRIuS " for %" PRIuS "\n", sc, size);
CRASH();
}
const size_t s = class_to_size[sc];
if (size > s) {
MESSAGE("Bad size %" PRIuS " for %" PRIuS " (sc = %" PRIuS ")\n", s, size, sc);
CRASH();
}
if (s == 0) {
MESSAGE("Bad size %" PRIuS " for %" PRIuS " (sc = %" PRIuS ")\n", s, size, sc);
CRASH();
}
}
for (size_t cl = 1; cl < kNumClasses; ++cl) {
num_objects_to_move[cl] = NumMoveSize(ByteSizeForClass(cl));
}
#ifndef WTF_CHANGES
if (false) {
for (size_t cl = 1; cl < kNumClasses; ++cl) {
const int alloc_size = class_to_pages[cl] << kPageShift;
const int alloc_objs = alloc_size / class_to_size[cl];
const int min_used = (class_to_size[cl-1] + 1) * alloc_objs;
const int max_waste = alloc_size - min_used;
MESSAGE("SC %3d [ %8d .. %8d ] from %8d ; %2.0f%% maxwaste\n",
int(cl),
int(class_to_size[cl-1] + 1),
int(class_to_size[cl]),
int(class_to_pages[cl] << kPageShift),
max_waste * 100.0 / alloc_size
);
}
}
#endif
}
static uint64_t metadata_system_bytes = 0;
static void* MetaDataAlloc(size_t bytes) {
void* result = TCMalloc_SystemAlloc(bytes, 0);
if (result != NULL) {
metadata_system_bytes += bytes;
}
return result;
}
#if defined(WTF_CHANGES) && OS(DARWIN)
class RemoteMemoryReader;
#endif
template <class T>
class PageHeapAllocator {
private:
static const size_t kAllocIncrement = 32 << 10;
static const size_t kAlignedSize
= (((sizeof(T) + kAlignment - 1) / kAlignment) * kAlignment);
char* free_area_;
size_t free_avail_;
HardenedSLL allocated_regions_;
HardenedSLL free_list_;
int inuse_;
uintptr_t entropy_;
public:
void Init(uintptr_t entropy) {
ASSERT(kAlignedSize <= kAllocIncrement);
inuse_ = 0;
allocated_regions_ = HardenedSLL::null();
free_area_ = NULL;
free_avail_ = 0;
free_list_.setValue(NULL);
entropy_ = entropy;
}
T* New() {
void* result;
if (free_list_) {
result = free_list_.value();
free_list_ = SLL_Next(free_list_, entropy_);
} else {
if (free_avail_ < kAlignedSize) {
char* new_allocation = reinterpret_cast<char*>(MetaDataAlloc(kAllocIncrement));
if (!new_allocation)
CRASH();
HardenedSLL new_head = HardenedSLL::create(new_allocation);
SLL_SetNext(new_head, allocated_regions_, entropy_);
allocated_regions_ = new_head;
free_area_ = new_allocation + kAlignedSize;
free_avail_ = kAllocIncrement - kAlignedSize;
}
result = free_area_;
free_area_ += kAlignedSize;
free_avail_ -= kAlignedSize;
}
inuse_++;
return reinterpret_cast<T*>(result);
}
void Delete(T* p) {
HardenedSLL new_head = HardenedSLL::create(p);
SLL_SetNext(new_head, free_list_, entropy_);
free_list_ = new_head;
inuse_--;
}
int inuse() const { return inuse_; }
#if defined(WTF_CHANGES) && OS(DARWIN)
template <typename Recorder>
void recordAdministrativeRegions(Recorder&, const RemoteMemoryReader&);
#endif
};
typedef uintptr_t PageID;
static inline Length pages(size_t bytes) {
ASSERT(kPageShift && kNumClasses && kPageSize);
return (bytes >> kPageShift) +
((bytes & (kPageSize - 1)) > 0 ? 1 : 0);
}
static size_t AllocationSize(size_t bytes) {
ASSERT(kPageShift && kNumClasses && kPageSize);
if (bytes > kMaxSize) {
ASSERT(bytes <= (kMaxValidPages << kPageShift));
return pages(bytes) << kPageShift;
} else {
return ByteSizeForClass(SizeClass(bytes));
}
}
enum {
kSpanCookieBits = 10,
kSpanCookieMask = (1 << 10) - 1,
kSpanThisShift = 7
};
static uint32_t spanValidationCookie;
static uint32_t spanInitializerCookie()
{
static uint32_t value = EntropySource<sizeof(uint32_t)>::value() & kSpanCookieMask;
spanValidationCookie = value;
return value;
}
struct Span {
PageID start; Length length; Span* next(uintptr_t entropy) const { return XOR_MASK_PTR_WITH_KEY(m_next, this, entropy); }
Span* remoteNext(const Span* remoteSpanPointer, uintptr_t entropy) const { return XOR_MASK_PTR_WITH_KEY(m_next, remoteSpanPointer, entropy); }
Span* prev(uintptr_t entropy) const { return XOR_MASK_PTR_WITH_KEY(m_prev, this, entropy); }
void setNext(Span* next, uintptr_t entropy) { m_next = XOR_MASK_PTR_WITH_KEY(next, this, entropy); }
void setPrev(Span* prev, uintptr_t entropy) { m_prev = XOR_MASK_PTR_WITH_KEY(prev, this, entropy); }
private:
Span* m_next; Span* m_prev; public:
HardenedSLL objects; unsigned int free : 1; #ifndef NO_TCMALLOC_SAMPLES
unsigned int sample : 1; #endif
unsigned int sizeclass : 8; unsigned int refcount : 11; bool decommitted : 1;
void initCookie()
{
m_cookie = ((reinterpret_cast<uintptr_t>(this) >> kSpanThisShift) & kSpanCookieMask) ^ spanInitializerCookie();
}
void clearCookie() { m_cookie = 0; }
bool isValid() const
{
return (((reinterpret_cast<uintptr_t>(this) >> kSpanThisShift) & kSpanCookieMask) ^ m_cookie) == spanValidationCookie;
}
private:
uint32_t m_cookie : kSpanCookieBits;
#undef SPAN_HISTORY
#ifdef SPAN_HISTORY
int nexthistory;
char history[64];
int value[64];
#endif
};
#define ASSERT_SPAN_COMMITTED(span) ASSERT(!span->decommitted)
#ifdef SPAN_HISTORY
void Event(Span* span, char op, int v = 0) {
span->history[span->nexthistory] = op;
span->value[span->nexthistory] = v;
span->nexthistory++;
if (span->nexthistory == sizeof(span->history)) span->nexthistory = 0;
}
#else
#define Event(s,o,v) ((void) 0)
#endif
static PageHeapAllocator<Span> span_allocator;
static Span* NewSpan(PageID p, Length len) {
Span* result = span_allocator.New();
memset(result, 0, sizeof(*result));
result->start = p;
result->length = len;
result->initCookie();
#ifdef SPAN_HISTORY
result->nexthistory = 0;
#endif
return result;
}
static inline void DeleteSpan(Span* span) {
RELEASE_ASSERT(span->isValid());
#ifndef NDEBUG
memset(span, 0x3f, sizeof(*span));
#endif
span->clearCookie();
span_allocator.Delete(span);
}
static inline void DLL_Init(Span* list, uintptr_t entropy) {
list->setNext(list, entropy);
list->setPrev(list, entropy);
}
static inline void DLL_Remove(Span* span, uintptr_t entropy) {
span->prev(entropy)->setNext(span->next(entropy), entropy);
span->next(entropy)->setPrev(span->prev(entropy), entropy);
span->setPrev(NULL, entropy);
span->setNext(NULL, entropy);
}
static ALWAYS_INLINE bool DLL_IsEmpty(const Span* list, uintptr_t entropy) {
return list->next(entropy) == list;
}
static int DLL_Length(const Span* list, uintptr_t entropy) {
int result = 0;
for (Span* s = list->next(entropy); s != list; s = s->next(entropy)) {
result++;
}
return result;
}
#if 0
static void DLL_Print(const char* label, const Span* list) {
MESSAGE("%-10s %p:", label, list);
for (const Span* s = list->next; s != list; s = s->next) {
MESSAGE(" <%p,%u,%u>", s, s->start, s->length);
}
MESSAGE("\n");
}
#endif
static inline void DLL_Prepend(Span* list, Span* span, uintptr_t entropy) {
span->setNext(list->next(entropy), entropy);
span->setPrev(list, entropy);
list->next(entropy)->setPrev(span, entropy);
list->setNext(span, entropy);
}
class TCMalloc_Central_FreeList {
public:
void Init(size_t cl, uintptr_t entropy);
void InsertRange(HardenedSLL start, HardenedSLL end, int N);
void RemoveRange(HardenedSLL* start, HardenedSLL* end, int *N);
size_t length() {
SpinLockHolder h(&lock_);
return counter_;
}
int tc_length() {
SpinLockHolder h(&lock_);
return used_slots_ * num_objects_to_move[size_class_];
}
#ifdef WTF_CHANGES
template <class Finder, class Reader>
void enumerateFreeObjects(Finder& finder, const Reader& reader, TCMalloc_Central_FreeList* remoteCentralFreeList)
{
{
static const ptrdiff_t emptyOffset = reinterpret_cast<const char*>(&empty_) - reinterpret_cast<const char*>(this);
Span* remoteEmpty = reinterpret_cast<Span*>(reinterpret_cast<char*>(remoteCentralFreeList) + emptyOffset);
Span* remoteSpan = nonempty_.remoteNext(remoteEmpty, entropy_);
for (Span* span = reader(remoteEmpty); span && span != &empty_; remoteSpan = span->remoteNext(remoteSpan, entropy_), span = (remoteSpan ? reader(remoteSpan) : 0))
ASSERT(!span->objects);
}
ASSERT(!nonempty_.objects);
static const ptrdiff_t nonemptyOffset = reinterpret_cast<const char*>(&nonempty_) - reinterpret_cast<const char*>(this);
Span* remoteNonempty = reinterpret_cast<Span*>(reinterpret_cast<char*>(remoteCentralFreeList) + nonemptyOffset);
Span* remoteSpan = nonempty_.remoteNext(remoteNonempty, entropy_);
for (Span* span = reader(remoteSpan); span && remoteSpan != remoteNonempty; remoteSpan = span->remoteNext(remoteSpan, entropy_), span = (remoteSpan ? reader(remoteSpan) : 0)) {
for (HardenedSLL nextObject = span->objects; nextObject; nextObject.setValue(reader.nextEntryInHardenedLinkedList(reinterpret_cast<void**>(nextObject.value()), entropy_))) {
finder.visit(nextObject.value());
}
}
for (int slot = 0; slot < used_slots_; ++slot) {
for (HardenedSLL entry = tc_slots_[slot].head; entry; entry.setValue(reader.nextEntryInHardenedLinkedList(reinterpret_cast<void**>(entry.value()), entropy_)))
finder.visit(entry.value());
}
}
#endif
uintptr_t entropy() const { return entropy_; }
private:
HardenedSLL FetchFromSpans();
HardenedSLL FetchFromSpansSafe();
void ReleaseListToSpans(HardenedSLL start);
ALWAYS_INLINE void ReleaseToSpans(HardenedSLL object);
ALWAYS_INLINE void Populate();
bool MakeCacheSpace();
static ALWAYS_INLINE bool EvictRandomSizeClass(size_t locked_size_class, bool force);
bool ShrinkCache(int locked_size_class, bool force);
SpinLock lock_;
size_t size_class_; Span empty_; Span nonempty_; size_t counter_;
TCEntry tc_slots_[K_NUM_TRANSFER_ENTRIES_MAX];
int32_t used_slots_;
int32_t cache_size_;
uintptr_t entropy_;
};
#if COMPILER(CLANG) && defined(__has_warning)
#pragma clang diagnostic push
#if __has_warning("-Wunused-private-field")
#pragma clang diagnostic ignored "-Wunused-private-field"
#endif
#endif
template <size_t SizeToPad>
class TCMalloc_Central_FreeListPadded_Template : public TCMalloc_Central_FreeList {
private:
char pad[64 - SizeToPad];
};
template <> class TCMalloc_Central_FreeListPadded_Template<0> : public TCMalloc_Central_FreeList {
};
typedef TCMalloc_Central_FreeListPadded_Template<sizeof(TCMalloc_Central_FreeList) % 64> TCMalloc_Central_FreeListPadded;
#if COMPILER(CLANG) && defined(__has_warning)
#pragma clang diagnostic pop
#endif
#if OS(DARWIN)
struct Span;
class TCMalloc_PageHeap;
class TCMalloc_ThreadCache;
template <typename T> class PageHeapAllocator;
class FastMallocZone {
public:
static void init();
static kern_return_t enumerate(task_t, void*, unsigned typeMmask, vm_address_t zoneAddress, memory_reader_t, vm_range_recorder_t);
static size_t goodSize(malloc_zone_t*, size_t size) { return size; }
static boolean_t check(malloc_zone_t*) { return true; }
static void print(malloc_zone_t*, boolean_t) { }
static void log(malloc_zone_t*, void*) { }
static void forceLock(malloc_zone_t*) { }
static void forceUnlock(malloc_zone_t*) { }
static void statistics(malloc_zone_t*, malloc_statistics_t* stats) { memset(stats, 0, sizeof(malloc_statistics_t)); }
private:
FastMallocZone(TCMalloc_PageHeap*, TCMalloc_ThreadCache**, TCMalloc_Central_FreeListPadded*, PageHeapAllocator<Span>*, PageHeapAllocator<TCMalloc_ThreadCache>*);
static size_t size(malloc_zone_t*, const void*);
static void* zoneMalloc(malloc_zone_t*, size_t);
static void* zoneCalloc(malloc_zone_t*, size_t numItems, size_t size);
static void zoneFree(malloc_zone_t*, void*);
static void* zoneRealloc(malloc_zone_t*, void*, size_t);
static void* zoneValloc(malloc_zone_t*, size_t) { LOG_ERROR("valloc is not supported"); return 0; }
static void zoneDestroy(malloc_zone_t*) { }
malloc_zone_t m_zone;
TCMalloc_PageHeap* m_pageHeap;
TCMalloc_ThreadCache** m_threadHeaps;
TCMalloc_Central_FreeListPadded* m_centralCaches;
PageHeapAllocator<Span>* m_spanAllocator;
PageHeapAllocator<TCMalloc_ThreadCache>* m_pageHeapAllocator;
};
extern "C" void (*malloc_logger)(uint32_t typeFlags, uintptr_t zone, uintptr_t size, uintptr_t pointer, uintptr_t returnValue, uint32_t numberOfFramesToSkip);
#endif
class MallocHook {
static bool stackLoggingEnabled;
#if OS(DARWIN)
enum StackLoggingType {
StackLoggingTypeAlloc = 2,
StackLoggingTypeDealloc = 4,
};
static void record(uint32_t typeFlags, uintptr_t zone, uintptr_t size, void* pointer, void* returnValue, uint32_t numberOfFramesToSkip)
{
malloc_logger(typeFlags, zone, size, reinterpret_cast<uintptr_t>(pointer), reinterpret_cast<uintptr_t>(returnValue), numberOfFramesToSkip);
}
static NEVER_INLINE void recordAllocation(void* pointer, size_t size)
{
record(StackLoggingTypeAlloc, 0, size, 0, pointer, 0);
}
static NEVER_INLINE void recordDeallocation(void* pointer)
{
record(StackLoggingTypeDealloc, 0, reinterpret_cast<uintptr_t>(pointer), 0, 0, 0);
}
#endif
public:
static void init()
{
#if OS(DARWIN)
stackLoggingEnabled = malloc_logger;
#endif
}
#if OS(DARWIN)
static ALWAYS_INLINE void InvokeNewHook(void* pointer, size_t size)
{
if (UNLIKELY(stackLoggingEnabled))
recordAllocation(pointer, size);
}
static ALWAYS_INLINE void InvokeDeleteHook(void* pointer)
{
if (UNLIKELY(stackLoggingEnabled))
recordDeallocation(pointer);
}
#else
static ALWAYS_INLINE void InvokeNewHook(void*, size_t) { }
static ALWAYS_INLINE void InvokeDeleteHook(void*) { }
#endif
};
bool MallocHook::stackLoggingEnabled = false;
#endif
#ifndef WTF_CHANGES
#ifdef NO_TCMALLOC_SAMPLES
# define GetStackTrace(stack, depth, skip) (0)
#else
# include <google/stacktrace.h>
#endif
#endif
#if defined(HAVE_TLS)
static bool kernel_supports_tls = false; static inline bool KernelSupportsTLS() {
return kernel_supports_tls;
}
# if !HAVE_DECL_UNAME // if too old for uname, probably too old for TLS
static void CheckIfKernelSupportsTLS() {
kernel_supports_tls = false;
}
# else
# include <sys/utsname.h> // DECL_UNAME checked for <sys/utsname.h> too
static void CheckIfKernelSupportsTLS() {
struct utsname buf;
if (uname(&buf) != 0) { MESSAGE("uname failed assuming no TLS support (errno=%d)\n", errno);
kernel_supports_tls = false;
} else if (strcasecmp(buf.sysname, "linux") == 0) {
if (buf.release[0] < '2' && buf.release[1] == '.') kernel_supports_tls = false;
else if (buf.release[0] == '2' && buf.release[1] == '.' &&
buf.release[2] >= '0' && buf.release[2] < '6' &&
buf.release[3] == '.') kernel_supports_tls = false;
else
kernel_supports_tls = true;
} else { kernel_supports_tls = true;
}
}
# endif // HAVE_DECL_UNAME
#endif // HAVE_TLS
#ifndef __THROW // I guess we're not on a glibc system
# define __THROW // __THROW is just an optimization, so ok to make it ""
#endif
static const int kMaxStackDepth = 31;
struct StackTrace {
uintptr_t size; uintptr_t depth; void* stack[kMaxStackDepth];
};
static PageHeapAllocator<StackTrace> stacktrace_allocator;
static Span sampled_objects;
template <int BITS> class MapSelector {
public:
typedef TCMalloc_PageMap3<BITS-K_PAGE_SHIFT_MIN> Type;
typedef PackedCache<BITS, uint64_t> CacheType;
};
#if defined(WTF_CHANGES)
#if CPU(X86_64) || CPU(ARM64)
static const size_t kBitsUnusedOn64Bit = 16;
#else
static const size_t kBitsUnusedOn64Bit = 0;
#endif
template <> class MapSelector<64> {
public:
typedef TCMalloc_PageMap3<64 - K_PAGE_SHIFT_MIN - kBitsUnusedOn64Bit> Type;
typedef PackedCache<64, uint64_t> CacheType;
};
#endif
template <> class MapSelector<32> {
public:
typedef TCMalloc_PageMap2<32 - K_PAGE_SHIFT_MIN> Type;
typedef PackedCache<32 - K_PAGE_SHIFT_MIN, uint16_t> CacheType;
};
#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
static const int kScavengeDelayInSeconds = 2;
static const float kScavengePercentage = .5f;
static const int kMinSpanListsWithSpans = 32;
static const size_t kMinimumFreeCommittedPageCount = kMinSpanListsWithSpans * ((1.0f+kMinSpanListsWithSpans) / 2.0f);
#endif
static SpinLock pageheap_lock = SPINLOCK_INITIALIZER;
class TCMalloc_PageHeap {
public:
void init();
Span* New(Length n);
void Delete(Span* span);
void RegisterSizeClass(Span* span, size_t sc);
Span* Split(Span* span, Length n);
inline Span* GetDescriptor(PageID p) const {
return reinterpret_cast<Span*>(pagemap_.get(p));
}
#ifdef WTF_CHANGES
inline Span* GetDescriptorEnsureSafe(PageID p)
{
pagemap_.Ensure(p, 1);
return GetDescriptor(p);
}
size_t ReturnedBytes() const;
#endif
#ifndef WTF_CHANGES
void Dump(TCMalloc_Printer* out);
#endif
inline uint64_t SystemBytes() const { return system_bytes_; }
uint64_t FreeBytes() const {
ASSERT(kPageShift && kNumClasses && kPageSize);
return (static_cast<uint64_t>(free_pages_) << kPageShift);
}
bool Check();
size_t CheckList(Span* list, Length min_pages, Length max_pages, bool decommitted);
void ReleaseFreePages();
void ReleaseFreeList(Span*, Span*);
size_t GetSizeClassIfCached(PageID p) const {
return pagemap_cache_.GetOrDefault(p, 0);
}
void CacheSizeClass(PageID p, size_t cl) const { pagemap_cache_.Put(p, cl); }
private:
typedef MapSelector<8*sizeof(uintptr_t)>::Type PageMap;
typedef MapSelector<8*sizeof(uintptr_t)>::CacheType PageMapCache;
PageMap pagemap_;
mutable PageMapCache pagemap_cache_;
struct SpanList {
Span normal;
Span returned;
};
SpanList large_;
SpanList free_[kMaxPages];
uintptr_t free_pages_;
uintptr_t entropy_;
uint64_t system_bytes_;
#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
Length free_committed_pages_;
Length min_free_committed_pages_since_last_scavenge_;
#endif
bool GrowHeap(Length n);
void Carve(Span* span, Length n, bool released);
void RecordSpan(Span* span) {
pagemap_.set(span->start, span);
if (span->length > 1) {
pagemap_.set(span->start + span->length - 1, span);
}
}
Span* AllocLarge(Length n);
#if !USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
void IncrementalScavenge(Length n);
#endif
int64_t scavenge_counter_;
size_t scavenge_index_;
#if defined(WTF_CHANGES) && OS(DARWIN)
friend class FastMallocZone;
#endif
#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
void initializeScavenger();
ALWAYS_INLINE void signalScavenger();
void scavenge();
ALWAYS_INLINE bool shouldScavenge() const;
#if HAVE(DISPATCH_H) || OS(WINDOWS)
void periodicScavenge();
ALWAYS_INLINE bool isScavengerSuspended();
ALWAYS_INLINE void scheduleScavenger();
ALWAYS_INLINE void rescheduleScavenger();
ALWAYS_INLINE void suspendScavenger();
#endif
#if HAVE(DISPATCH_H)
dispatch_queue_t m_scavengeQueue;
dispatch_source_t m_scavengeTimer;
bool m_scavengingSuspended;
#elif OS(WINDOWS)
static void CALLBACK scavengerTimerFired(void*, BOOLEAN);
HANDLE m_scavengeQueueTimer;
#else
static NO_RETURN_WITH_VALUE void* runScavengerThread(void*);
NO_RETURN void scavengerThread();
bool m_scavengeThreadActive;
pthread_mutex_t m_scavengeMutex;
pthread_cond_t m_scavengeCondition;
#endif
#endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
};
void TCMalloc_PageHeap::init()
{
ASSERT(kPageShift && kNumClasses && kPageSize);
pagemap_.init(MetaDataAlloc);
pagemap_cache_ = PageMapCache(0);
free_pages_ = 0;
system_bytes_ = 0;
entropy_ = HARDENING_ENTROPY;
#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
free_committed_pages_ = 0;
min_free_committed_pages_since_last_scavenge_ = 0;
#endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
scavenge_counter_ = 0;
scavenge_index_ = kMaxPages-1;
ASSERT(kNumClasses <= (1 << PageMapCache::kValuebits));
DLL_Init(&large_.normal, entropy_);
DLL_Init(&large_.returned, entropy_);
for (size_t i = 0; i < kMaxPages; i++) {
DLL_Init(&free_[i].normal, entropy_);
DLL_Init(&free_[i].returned, entropy_);
}
#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
initializeScavenger();
#endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
}
#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
#if HAVE(DISPATCH_H)
void TCMalloc_PageHeap::initializeScavenger()
{
m_scavengeQueue = dispatch_queue_create("com.apple.JavaScriptCore.FastMallocSavenger", NULL);
m_scavengeTimer = dispatch_source_create(DISPATCH_SOURCE_TYPE_TIMER, 0, 0, m_scavengeQueue);
uint64_t scavengeDelayInNanoseconds = kScavengeDelayInSeconds * NSEC_PER_SEC;
dispatch_time_t startTime = dispatch_time(DISPATCH_TIME_NOW, scavengeDelayInNanoseconds);
dispatch_source_set_timer(m_scavengeTimer, startTime, scavengeDelayInNanoseconds, scavengeDelayInNanoseconds / 10);
dispatch_source_set_event_handler(m_scavengeTimer, ^{ periodicScavenge(); });
m_scavengingSuspended = true;
}
ALWAYS_INLINE bool TCMalloc_PageHeap::isScavengerSuspended()
{
ASSERT(pageheap_lock.IsHeld());
return m_scavengingSuspended;
}
ALWAYS_INLINE void TCMalloc_PageHeap::scheduleScavenger()
{
ASSERT(pageheap_lock.IsHeld());
m_scavengingSuspended = false;
dispatch_resume(m_scavengeTimer);
}
ALWAYS_INLINE void TCMalloc_PageHeap::rescheduleScavenger()
{
}
ALWAYS_INLINE void TCMalloc_PageHeap::suspendScavenger()
{
ASSERT(pageheap_lock.IsHeld());
m_scavengingSuspended = true;
dispatch_suspend(m_scavengeTimer);
}
#elif OS(WINDOWS)
void TCMalloc_PageHeap::scavengerTimerFired(void* context, BOOLEAN)
{
static_cast<TCMalloc_PageHeap*>(context)->periodicScavenge();
}
void TCMalloc_PageHeap::initializeScavenger()
{
m_scavengeQueueTimer = 0;
}
ALWAYS_INLINE bool TCMalloc_PageHeap::isScavengerSuspended()
{
ASSERT(pageheap_lock.IsHeld());
return !m_scavengeQueueTimer;
}
ALWAYS_INLINE void TCMalloc_PageHeap::scheduleScavenger()
{
ASSERT(pageheap_lock.IsHeld());
CreateTimerQueueTimer(&m_scavengeQueueTimer, 0, scavengerTimerFired, this, kScavengeDelayInSeconds * 1000, 0, WT_EXECUTEONLYONCE);
}
ALWAYS_INLINE void TCMalloc_PageHeap::rescheduleScavenger()
{
suspendScavenger();
scheduleScavenger();
}
ALWAYS_INLINE void TCMalloc_PageHeap::suspendScavenger()
{
ASSERT(pageheap_lock.IsHeld());
HANDLE scavengeQueueTimer = m_scavengeQueueTimer;
m_scavengeQueueTimer = 0;
DeleteTimerQueueTimer(0, scavengeQueueTimer, 0);
}
#else
void TCMalloc_PageHeap::initializeScavenger()
{
#if !defined(PTHREAD_MUTEX_NORMAL) || PTHREAD_MUTEX_NORMAL == PTHREAD_MUTEX_DEFAULT
pthread_mutex_init(&m_scavengeMutex, 0);
#else
pthread_mutexattr_t attr;
pthread_mutexattr_init(&attr);
pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_NORMAL);
pthread_mutex_init(&m_scavengeMutex, &attr);
pthread_mutexattr_destroy(&attr);
#endif
pthread_cond_init(&m_scavengeCondition, 0);
m_scavengeThreadActive = true;
pthread_t thread;
pthread_create(&thread, 0, runScavengerThread, this);
}
void* TCMalloc_PageHeap::runScavengerThread(void* context)
{
static_cast<TCMalloc_PageHeap*>(context)->scavengerThread();
#if (COMPILER(MSVC) || COMPILER(SUNCC))
return 0;
#endif
}
ALWAYS_INLINE void TCMalloc_PageHeap::signalScavenger()
{
ASSERT(pageheap_lock.IsHeld());
if (!m_scavengeThreadActive && shouldScavenge())
pthread_cond_signal(&m_scavengeCondition);
}
#endif
void TCMalloc_PageHeap::scavenge()
{
ASSERT(kPageShift && kNumClasses && kPageSize);
size_t pagesToRelease = min_free_committed_pages_since_last_scavenge_ * kScavengePercentage;
size_t targetPageCount = std::max<size_t>(kMinimumFreeCommittedPageCount, free_committed_pages_ - pagesToRelease);
Length lastFreeCommittedPages = free_committed_pages_;
while (free_committed_pages_ > targetPageCount) {
ASSERT(Check());
for (int i = kMaxPages; i > 0 && free_committed_pages_ >= targetPageCount; i--) {
SpanList* slist = (static_cast<size_t>(i) == kMaxPages) ? &large_ : &free_[i];
size_t length = DLL_Length(&slist->normal, entropy_);
size_t numSpansToReturn = (i > kMinSpanListsWithSpans) ? length : length / 2;
for (int j = 0; static_cast<size_t>(j) < numSpansToReturn && !DLL_IsEmpty(&slist->normal, entropy_) && free_committed_pages_ > targetPageCount; j++) {
Span* s = slist->normal.prev(entropy_);
DLL_Remove(s, entropy_);
ASSERT(!s->decommitted);
if (!s->decommitted) {
TCMalloc_SystemRelease(reinterpret_cast<void*>(s->start << kPageShift),
static_cast<size_t>(s->length << kPageShift));
ASSERT(free_committed_pages_ >= s->length);
free_committed_pages_ -= s->length;
s->decommitted = true;
}
DLL_Prepend(&slist->returned, s, entropy_);
}
}
if (lastFreeCommittedPages == free_committed_pages_)
break;
lastFreeCommittedPages = free_committed_pages_;
}
min_free_committed_pages_since_last_scavenge_ = free_committed_pages_;
}
ALWAYS_INLINE bool TCMalloc_PageHeap::shouldScavenge() const
{
return free_committed_pages_ > kMinimumFreeCommittedPageCount;
}
#endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
inline Span* TCMalloc_PageHeap::New(Length n) {
ASSERT(Check());
ASSERT(n > 0);
for (Length s = n; s < kMaxPages; s++) {
Span* ll = NULL;
bool released = false;
if (!DLL_IsEmpty(&free_[s].normal, entropy_)) {
ll = &free_[s].normal;
} else if (!DLL_IsEmpty(&free_[s].returned, entropy_)) {
ll = &free_[s].returned;
released = true;
} else {
continue;
}
Span* result = ll->next(entropy_);
Carve(result, n, released);
#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
ASSERT(free_committed_pages_ >= n);
free_committed_pages_ -= n;
if (free_committed_pages_ < min_free_committed_pages_since_last_scavenge_)
min_free_committed_pages_since_last_scavenge_ = free_committed_pages_;
#endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
ASSERT(Check());
free_pages_ -= n;
return result;
}
Span* result = AllocLarge(n);
if (result != NULL) {
ASSERT_SPAN_COMMITTED(result);
return result;
}
if (!GrowHeap(n)) {
ASSERT(Check());
return NULL;
}
return New(n);
}
Span* TCMalloc_PageHeap::AllocLarge(Length n) {
bool from_released = false;
Span *best = NULL;
for (Span* span = large_.normal.next(entropy_);
span != &large_.normal;
span = span->next(entropy_)) {
if (span->length >= n) {
if ((best == NULL)
|| (span->length < best->length)
|| ((span->length == best->length) && (span->start < best->start))) {
best = span;
from_released = false;
}
}
}
for (Span* span = large_.returned.next(entropy_);
span != &large_.returned;
span = span->next(entropy_)) {
if (span->length >= n) {
if ((best == NULL)
|| (span->length < best->length)
|| ((span->length == best->length) && (span->start < best->start))) {
best = span;
from_released = true;
}
}
}
if (best != NULL) {
Carve(best, n, from_released);
#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
ASSERT(free_committed_pages_ >= n);
free_committed_pages_ -= n;
if (free_committed_pages_ < min_free_committed_pages_since_last_scavenge_)
min_free_committed_pages_since_last_scavenge_ = free_committed_pages_;
#endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
ASSERT(Check());
free_pages_ -= n;
return best;
}
return NULL;
}
Span* TCMalloc_PageHeap::Split(Span* span, Length n) {
ASSERT(0 < n);
ASSERT(n < span->length);
ASSERT(!span->free);
ASSERT(span->sizeclass == 0);
Event(span, 'T', n);
const Length extra = span->length - n;
Span* leftover = NewSpan(span->start + n, extra);
Event(leftover, 'U', extra);
RecordSpan(leftover);
pagemap_.set(span->start + n - 1, span); span->length = n;
return leftover;
}
inline void TCMalloc_PageHeap::Carve(Span* span, Length n, bool released) {
ASSERT(kPageShift && kNumClasses && kPageSize);
ASSERT(n > 0);
DLL_Remove(span, entropy_);
span->free = 0;
Event(span, 'A', n);
if (released) {
ASSERT(span->decommitted);
TCMalloc_SystemCommit(reinterpret_cast<void*>(span->start << kPageShift), static_cast<size_t>(span->length << kPageShift));
span->decommitted = false;
#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
free_committed_pages_ += span->length;
#endif
}
const int extra = static_cast<int>(span->length - n);
ASSERT(extra >= 0);
if (extra > 0) {
Span* leftover = NewSpan(span->start + n, extra);
leftover->free = 1;
leftover->decommitted = false;
Event(leftover, 'S', extra);
RecordSpan(leftover);
SpanList* listpair = (static_cast<size_t>(extra) < kMaxPages) ? &free_[extra] : &large_;
Span* dst = &listpair->normal;
DLL_Prepend(dst, leftover, entropy_);
span->length = n;
pagemap_.set(span->start + n - 1, span);
}
}
static ALWAYS_INLINE void mergeDecommittedStates(Span* destination, Span* other)
{
ASSERT(kPageShift && kNumClasses && kPageSize);
if (destination->decommitted && !other->decommitted) {
TCMalloc_SystemRelease(reinterpret_cast<void*>(other->start << kPageShift),
static_cast<size_t>(other->length << kPageShift));
} else if (other->decommitted && !destination->decommitted) {
TCMalloc_SystemRelease(reinterpret_cast<void*>(destination->start << kPageShift),
static_cast<size_t>(destination->length << kPageShift));
destination->decommitted = true;
}
}
inline void TCMalloc_PageHeap::Delete(Span* span) {
ASSERT(Check());
ASSERT(!span->free);
ASSERT(span->length > 0);
ASSERT(GetDescriptor(span->start) == span);
ASSERT(GetDescriptor(span->start + span->length - 1) == span);
span->sizeclass = 0;
#ifndef NO_TCMALLOC_SAMPLES
span->sample = 0;
#endif
#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
Length neighboringCommittedSpansLength = 0;
#endif
const PageID p = span->start;
const Length n = span->length;
Span* prev = GetDescriptor(p-1);
if (prev != NULL && prev->free) {
ASSERT(prev->start + prev->length == p);
const Length len = prev->length;
#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
if (!prev->decommitted)
neighboringCommittedSpansLength += len;
#endif
mergeDecommittedStates(span, prev);
DLL_Remove(prev, entropy_);
DeleteSpan(prev);
span->start -= len;
span->length += len;
pagemap_.set(span->start, span);
Event(span, 'L', len);
}
Span* next = GetDescriptor(p+n);
if (next != NULL && next->free) {
ASSERT(next->start == p+n);
const Length len = next->length;
#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
if (!next->decommitted)
neighboringCommittedSpansLength += len;
#endif
mergeDecommittedStates(span, next);
DLL_Remove(next, entropy_);
DeleteSpan(next);
span->length += len;
pagemap_.set(span->start + span->length - 1, span);
Event(span, 'R', len);
}
Event(span, 'D', span->length);
span->free = 1;
if (span->decommitted) {
if (span->length < kMaxPages)
DLL_Prepend(&free_[span->length].returned, span, entropy_);
else
DLL_Prepend(&large_.returned, span, entropy_);
} else {
if (span->length < kMaxPages)
DLL_Prepend(&free_[span->length].normal, span, entropy_);
else
DLL_Prepend(&large_.normal, span, entropy_);
}
free_pages_ += n;
#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
if (span->decommitted) {
free_committed_pages_ -= neighboringCommittedSpansLength;
if (free_committed_pages_ < min_free_committed_pages_since_last_scavenge_)
min_free_committed_pages_since_last_scavenge_ = free_committed_pages_;
} else {
free_committed_pages_ += n;
}
signalScavenger();
#else
IncrementalScavenge(n);
#endif
ASSERT(Check());
}
#if !USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
void TCMalloc_PageHeap::IncrementalScavenge(Length n) {
ASSERT(kPageShift && kNumClasses && kPageSize);
scavenge_counter_ -= n;
if (scavenge_counter_ >= 0) return;
#if PLATFORM(IOS)
static const size_t kDefaultReleaseDelay = 64;
#else
static const size_t kDefaultReleaseDelay = 1 << 8;
#endif
size_t index = scavenge_index_ + 1;
uintptr_t entropy = entropy_;
for (size_t i = 0; i < kMaxPages+1; i++) {
if (index > kMaxPages) index = 0;
SpanList* slist = (index == kMaxPages) ? &large_ : &free_[index];
if (!DLL_IsEmpty(&slist->normal, entropy)) {
Span* s = slist->normal.prev(entropy);
DLL_Remove(s, entropy_);
TCMalloc_SystemRelease(reinterpret_cast<void*>(s->start << kPageShift),
static_cast<size_t>(s->length << kPageShift));
s->decommitted = true;
DLL_Prepend(&slist->returned, s, entropy);
#if PLATFORM(IOS)
scavenge_counter_ = std::max<size_t>(16UL, std::min<size_t>(kDefaultReleaseDelay, kDefaultReleaseDelay - (free_pages_ / kDefaultReleaseDelay)));
#else
scavenge_counter_ = std::max<size_t>(64UL, std::min<size_t>(kDefaultReleaseDelay, kDefaultReleaseDelay - (free_pages_ / kDefaultReleaseDelay)));
#endif
if (index == kMaxPages && !DLL_IsEmpty(&slist->normal, entropy))
scavenge_index_ = index - 1;
else
scavenge_index_ = index;
return;
}
index++;
}
scavenge_counter_ = kDefaultReleaseDelay;
}
#endif
void TCMalloc_PageHeap::RegisterSizeClass(Span* span, size_t sc) {
ASSERT(!span->free);
ASSERT(GetDescriptor(span->start) == span);
ASSERT(GetDescriptor(span->start+span->length-1) == span);
Event(span, 'C', sc);
span->sizeclass = static_cast<unsigned int>(sc);
for (Length i = 1; i < span->length-1; i++) {
pagemap_.set(span->start+i, span);
}
}
#ifdef WTF_CHANGES
size_t TCMalloc_PageHeap::ReturnedBytes() const {
ASSERT(kPageShift && kNumClasses && kPageSize);
size_t result = 0;
for (unsigned s = 0; s < kMaxPages; s++) {
const int r_length = DLL_Length(&free_[s].returned, entropy_);
unsigned r_pages = s * r_length;
result += r_pages << kPageShift;
}
for (Span* s = large_.returned.next(entropy_); s != &large_.returned; s = s->next(entropy_))
result += s->length << kPageShift;
return result;
}
#endif
#ifndef WTF_CHANGES
static double PagesToMB(uint64_t pages) {
ASSERT(kPageShift && kNumClasses && kPageSize);
return (pages << kPageShift) / 1048576.0;
}
void TCMalloc_PageHeap::Dump(TCMalloc_Printer* out) {
int nonempty_sizes = 0;
for (int s = 0; s < kMaxPages; s++) {
if (!DLL_IsEmpty(&free_[s].normal) || !DLL_IsEmpty(&free_[s].returned)) {
nonempty_sizes++;
}
}
out->printf("------------------------------------------------\n");
out->printf("PageHeap: %d sizes; %6.1f MB free\n",
nonempty_sizes, PagesToMB(free_pages_));
out->printf("------------------------------------------------\n");
uint64_t total_normal = 0;
uint64_t total_returned = 0;
for (int s = 0; s < kMaxPages; s++) {
const int n_length = DLL_Length(&free_[s].normal);
const int r_length = DLL_Length(&free_[s].returned);
if (n_length + r_length > 0) {
uint64_t n_pages = s * n_length;
uint64_t r_pages = s * r_length;
total_normal += n_pages;
total_returned += r_pages;
out->printf("%6u pages * %6u spans ~ %6.1f MB; %6.1f MB cum"
"; unmapped: %6.1f MB; %6.1f MB cum\n",
s,
(n_length + r_length),
PagesToMB(n_pages + r_pages),
PagesToMB(total_normal + total_returned),
PagesToMB(r_pages),
PagesToMB(total_returned));
}
}
uint64_t n_pages = 0;
uint64_t r_pages = 0;
int n_spans = 0;
int r_spans = 0;
out->printf("Normal large spans:\n");
for (Span* s = large_.normal.next; s != &large_.normal; s = s->next) {
out->printf(" [ %6" PRIuS " pages ] %6.1f MB\n",
s->length, PagesToMB(s->length));
n_pages += s->length;
n_spans++;
}
out->printf("Unmapped large spans:\n");
for (Span* s = large_.returned.next; s != &large_.returned; s = s->next) {
out->printf(" [ %6" PRIuS " pages ] %6.1f MB\n",
s->length, PagesToMB(s->length));
r_pages += s->length;
r_spans++;
}
total_normal += n_pages;
total_returned += r_pages;
out->printf(">255 large * %6u spans ~ %6.1f MB; %6.1f MB cum"
"; unmapped: %6.1f MB; %6.1f MB cum\n",
(n_spans + r_spans),
PagesToMB(n_pages + r_pages),
PagesToMB(total_normal + total_returned),
PagesToMB(r_pages),
PagesToMB(total_returned));
}
#endif
bool TCMalloc_PageHeap::GrowHeap(Length n) {
ASSERT(kPageShift && kNumClasses && kPageSize);
ASSERT(kMaxPages >= kMinSystemAlloc);
if (n > kMaxValidPages) return false;
Length ask = (n>kMinSystemAlloc) ? n : static_cast<Length>(kMinSystemAlloc);
size_t actual_size;
void* ptr = TCMalloc_SystemAlloc(ask << kPageShift, &actual_size, kPageSize);
if (ptr == NULL) {
if (n < ask) {
ask = n;
ptr = TCMalloc_SystemAlloc(ask << kPageShift, &actual_size, kPageSize);
}
if (ptr == NULL) return false;
}
ask = actual_size >> kPageShift;
uint64_t old_system_bytes = system_bytes_;
system_bytes_ += (ask << kPageShift);
const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift;
ASSERT(p > 0);
if (old_system_bytes < kPageMapBigAllocationThreshold
&& system_bytes_ >= kPageMapBigAllocationThreshold) {
pagemap_.PreallocateMoreMemory();
}
if (pagemap_.Ensure(p-1, ask+2)) {
Span* span = NewSpan(p, ask);
RecordSpan(span);
Delete(span);
ASSERT(Check());
return true;
} else {
return false;
}
}
bool TCMalloc_PageHeap::Check() {
#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
size_t totalFreeCommitted = 0;
#endif
ASSERT(free_[0].normal.next(entropy_) == &free_[0].normal);
ASSERT(free_[0].returned.next(entropy_) == &free_[0].returned);
#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
totalFreeCommitted = CheckList(&large_.normal, kMaxPages, 1000000000, false);
#else
CheckList(&large_.normal, kMaxPages, 1000000000, false);
#endif
CheckList(&large_.returned, kMaxPages, 1000000000, true);
for (Length s = 1; s < kMaxPages; s++) {
#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
totalFreeCommitted += CheckList(&free_[s].normal, s, s, false);
#else
CheckList(&free_[s].normal, s, s, false);
#endif
CheckList(&free_[s].returned, s, s, true);
}
#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
ASSERT(totalFreeCommitted == free_committed_pages_);
#endif
return true;
}
#if ASSERT_DISABLED
size_t TCMalloc_PageHeap::CheckList(Span*, Length, Length, bool) {
return 0;
}
#else
size_t TCMalloc_PageHeap::CheckList(Span* list, Length min_pages, Length max_pages, bool decommitted) {
size_t freeCount = 0;
for (Span* s = list->next(entropy_); s != list; s = s->next(entropy_)) {
CHECK_CONDITION(s->free);
CHECK_CONDITION(s->length >= min_pages);
CHECK_CONDITION(s->length <= max_pages);
CHECK_CONDITION(GetDescriptor(s->start) == s);
CHECK_CONDITION(GetDescriptor(s->start+s->length-1) == s);
CHECK_CONDITION(s->decommitted == decommitted);
freeCount += s->length;
}
return freeCount;
}
#endif
void TCMalloc_PageHeap::ReleaseFreeList(Span* list, Span* returned) {
ASSERT(kPageShift && kNumClasses && kPageSize);
#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
size_t freePageReduction = 0;
#endif
while (!DLL_IsEmpty(list, entropy_)) {
Span* s = list->prev(entropy_);
DLL_Remove(s, entropy_);
s->decommitted = true;
DLL_Prepend(returned, s, entropy_);
TCMalloc_SystemRelease(reinterpret_cast<void*>(s->start << kPageShift),
static_cast<size_t>(s->length << kPageShift));
#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
freePageReduction += s->length;
#endif
}
#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
free_committed_pages_ -= freePageReduction;
if (free_committed_pages_ < min_free_committed_pages_since_last_scavenge_)
min_free_committed_pages_since_last_scavenge_ = free_committed_pages_;
#endif
}
void TCMalloc_PageHeap::ReleaseFreePages() {
for (Length s = 0; s < kMaxPages; s++) {
ReleaseFreeList(&free_[s].normal, &free_[s].returned);
}
ReleaseFreeList(&large_.normal, &large_.returned);
ASSERT(Check());
}
class TCMalloc_ThreadCache_FreeList {
private:
HardenedSLL list_; uint16_t length_; uint16_t lowater_; uintptr_t entropy_;
public:
void Init(uintptr_t entropy) {
list_.setValue(NULL);
length_ = 0;
lowater_ = 0;
entropy_ = entropy;
#if ENABLE(TCMALLOC_HARDENING)
ASSERT(entropy_);
#endif
}
int length() const {
return length_;
}
bool empty() const {
return !list_;
}
int lowwatermark() const { return lowater_; }
void clear_lowwatermark() { lowater_ = length_; }
ALWAYS_INLINE void Push(HardenedSLL ptr) {
SLL_Push(&list_, ptr, entropy_);
length_++;
}
void PushRange(int N, HardenedSLL start, HardenedSLL end) {
SLL_PushRange(&list_, start, end, entropy_);
length_ = length_ + static_cast<uint16_t>(N);
}
void PopRange(int N, HardenedSLL* start, HardenedSLL* end) {
SLL_PopRange(&list_, N, start, end, entropy_);
ASSERT(length_ >= N);
length_ = length_ - static_cast<uint16_t>(N);
if (length_ < lowater_) lowater_ = length_;
}
ALWAYS_INLINE void* Pop() {
ASSERT(list_);
length_--;
if (length_ < lowater_) lowater_ = length_;
return SLL_Pop(&list_, entropy_).value();
}
NEVER_INLINE void Validate(HardenedSLL missing, size_t size) {
HardenedSLL node = list_;
UNUSED_PARAM(size);
while (node) {
RELEASE_ASSERT(node != missing);
RELEASE_ASSERT(IS_DEFINITELY_POISONED(node.value(), size));
node = SLL_Next(node, entropy_);
}
}
#ifdef WTF_CHANGES
template <class Finder, class Reader>
void enumerateFreeObjects(Finder& finder, const Reader& reader)
{
for (HardenedSLL nextObject = list_; nextObject; nextObject.setValue(reader.nextEntryInHardenedLinkedList(reinterpret_cast<void**>(nextObject.value()), entropy_)))
finder.visit(nextObject.value());
}
#endif
};
class TCMalloc_ThreadCache {
private:
typedef TCMalloc_ThreadCache_FreeList FreeList;
#if OS(WINDOWS)
typedef DWORD ThreadIdentifier;
#else
typedef pthread_t ThreadIdentifier;
#endif
size_t size_; ThreadIdentifier tid_; bool in_setspecific_; FreeList list_[K_NUM_CLASSES_MAX];
uint32_t rnd_; size_t bytes_until_sample_;
uintptr_t entropy_;
static inline TCMalloc_ThreadCache* NewHeap(ThreadIdentifier tid, uintptr_t entropy);
static void DestroyThreadCache(void* ptr);
public:
TCMalloc_ThreadCache* next_;
TCMalloc_ThreadCache* prev_;
void Init(ThreadIdentifier tid, uintptr_t entropy);
void Cleanup();
int freelist_length(size_t cl) const { return list_[cl].length(); }
size_t Size() const { return size_; }
ALWAYS_INLINE void* Allocate(size_t size);
void Deallocate(HardenedSLL ptr, size_t size_class);
ALWAYS_INLINE void FetchFromCentralCache(size_t cl, size_t allocationSize);
void ReleaseToCentralCache(size_t cl, int N);
void Scavenge();
void Print() const;
bool SampleAllocation(size_t k);
void PickNextSample(size_t k);
static void InitModule();
static void InitTSD();
static TCMalloc_ThreadCache* GetThreadHeap();
static TCMalloc_ThreadCache* GetCache();
static TCMalloc_ThreadCache* GetCacheIfPresent();
static TCMalloc_ThreadCache* CreateCacheIfNecessary();
static void DeleteCache(TCMalloc_ThreadCache* heap);
static void BecomeIdle();
static void RecomputeThreadCacheSize();
#ifdef WTF_CHANGES
template <class Finder, class Reader>
void enumerateFreeObjects(Finder& finder, const Reader& reader)
{
ASSERT(kPageShift && kNumClasses && kPageSize);
for (unsigned sizeClass = 0; sizeClass < kNumClasses; sizeClass++)
list_[sizeClass].enumerateFreeObjects(finder, reader);
}
#endif
};
static TCMalloc_Central_FreeListPadded central_cache[K_NUM_CLASSES_MAX];
static AllocAlignmentInteger pageheap_memory[(sizeof(TCMalloc_PageHeap) + sizeof(AllocAlignmentInteger) - 1) / sizeof(AllocAlignmentInteger)];
static bool phinited = false;
typedef union {
void* m_memory;
TCMalloc_PageHeap* m_pageHeap;
} PageHeapUnion;
static inline TCMalloc_PageHeap* getPageHeap()
{
PageHeapUnion u = { &pageheap_memory[0] };
return u.m_pageHeap;
}
#define pageheap getPageHeap()
size_t fastMallocGoodSize(size_t bytes)
{
if (!phinited)
TCMalloc_ThreadCache::InitModule();
return AllocationSize(bytes);
}
#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
#if HAVE(DISPATCH_H) || OS(WINDOWS)
void TCMalloc_PageHeap::periodicScavenge()
{
SpinLockHolder h(&pageheap_lock);
pageheap->scavenge();
if (shouldScavenge()) {
rescheduleScavenger();
return;
}
suspendScavenger();
}
ALWAYS_INLINE void TCMalloc_PageHeap::signalScavenger()
{
ASSERT(pageheap_lock.IsHeld());
if (isScavengerSuspended() && shouldScavenge())
scheduleScavenger();
}
#else
void TCMalloc_PageHeap::scavengerThread()
{
#if HAVE(PTHREAD_SETNAME_NP)
pthread_setname_np("JavaScriptCore: FastMalloc scavenger");
#endif
while (1) {
pageheap_lock.Lock();
if (!shouldScavenge()) {
m_scavengeThreadActive = false;
pageheap_lock.Unlock();
pthread_mutex_lock(&m_scavengeMutex);
pthread_cond_wait(&m_scavengeCondition, &m_scavengeMutex);
pthread_mutex_unlock(&m_scavengeMutex);
m_scavengeThreadActive = true;
} else
pageheap_lock.Unlock();
sleep(kScavengeDelayInSeconds);
{
SpinLockHolder h(&pageheap_lock);
pageheap->scavenge();
}
}
}
#endif
#endif
#ifdef HAVE_TLS
static __thread TCMalloc_ThreadCache *threadlocal_heap;
#endif
static bool tsd_inited = false;
#if USE(PTHREAD_GETSPECIFIC_DIRECT)
static const pthread_key_t heap_key = __PTK_FRAMEWORK_JAVASCRIPTCORE_KEY0;
#else
static ThreadSpecificKey heap_key;
#endif
static ALWAYS_INLINE void setThreadHeap(TCMalloc_ThreadCache* heap)
{
#if USE(PTHREAD_GETSPECIFIC_DIRECT)
if (pthread_getspecific(heap_key))
CRASH();
#endif
#if OS(DARWIN)
pthread_setspecific(heap_key, heap);
#else
threadSpecificSet(heap_key, heap);
#endif
}
static PageHeapAllocator<TCMalloc_ThreadCache> threadheap_allocator;
static TCMalloc_ThreadCache* thread_heaps = NULL;
static int thread_heap_count = 0;
static size_t overall_thread_cache_size = kDefaultOverallThreadCacheSize;
static volatile size_t per_thread_cache_size = kMaxThreadCacheSize;
void TCMalloc_Central_FreeList::Init(size_t cl, uintptr_t entropy) {
ASSERT(kPageShift && kNumClasses && kPageSize);
lock_.Init();
size_class_ = cl;
entropy_ = entropy;
#if ENABLE(TCMALLOC_HARDENING)
ASSERT(entropy_);
#endif
DLL_Init(&empty_, entropy_);
DLL_Init(&nonempty_, entropy_);
counter_ = 0;
cache_size_ = 1;
used_slots_ = 0;
ASSERT(cache_size_ <= kNumTransferEntries);
}
void TCMalloc_Central_FreeList::ReleaseListToSpans(HardenedSLL start) {
while (start) {
HardenedSLL next = SLL_Next(start, entropy_);
ReleaseToSpans(start);
start = next;
}
}
ALWAYS_INLINE void TCMalloc_Central_FreeList::ReleaseToSpans(HardenedSLL object) {
ASSERT(kPageShift && kNumClasses && kPageSize);
const PageID p = reinterpret_cast<uintptr_t>(object.value()) >> kPageShift;
Span* span = pageheap->GetDescriptor(p);
ASSERT(span != NULL);
ASSERT(span->refcount > 0);
if (!span->objects) {
DLL_Remove(span, entropy_);
DLL_Prepend(&nonempty_, span, entropy_);
Event(span, 'N', 0);
}
if (false) {
unsigned got = 0;
for (HardenedSLL p = span->objects; !p; SLL_Next(p, entropy_)) {
ASSERT(p.value() != object.value());
got++;
}
ASSERT(got + span->refcount ==
(span->length<<kPageShift)/ByteSizeForClass(span->sizeclass));
}
counter_++;
span->refcount--;
if (span->refcount == 0) {
Event(span, '#', 0);
counter_ -= (span->length<<kPageShift) / ByteSizeForClass(span->sizeclass);
DLL_Remove(span, entropy_);
lock_.Unlock();
{
SpinLockHolder h(&pageheap_lock);
pageheap->Delete(span);
}
lock_.Lock();
} else {
SLL_SetNext(object, span->objects, entropy_);
span->objects.setValue(object.value());
}
}
ALWAYS_INLINE bool TCMalloc_Central_FreeList::EvictRandomSizeClass(
size_t locked_size_class, bool force) {
ASSERT(kPageShift && kNumClasses && kPageSize);
static int race_counter = 0;
int t = race_counter++; if (t >= static_cast<int>(kNumClasses)) {
while (t >= static_cast<int>(kNumClasses)) {
t -= kNumClasses;
}
race_counter = t;
}
ASSERT(t >= 0);
ASSERT(t < static_cast<int>(kNumClasses));
if (t == static_cast<int>(locked_size_class)) return false;
return central_cache[t].ShrinkCache(static_cast<int>(locked_size_class), force);
}
bool TCMalloc_Central_FreeList::MakeCacheSpace() {
ASSERT(kPageShift && kNumClasses && kPageSize);
if (used_slots_ < cache_size_) return true;
if (cache_size_ == kNumTransferEntries) return false;
if (EvictRandomSizeClass(size_class_, false) ||
EvictRandomSizeClass(size_class_, true)) {
cache_size_++;
return true;
}
return false;
}
namespace {
class LockInverter {
private:
SpinLock *held_, *temp_;
public:
inline explicit LockInverter(SpinLock* held, SpinLock *temp)
: held_(held), temp_(temp) { held_->Unlock(); temp_->Lock(); }
inline ~LockInverter() { temp_->Unlock(); held_->Lock(); }
};
}
bool TCMalloc_Central_FreeList::ShrinkCache(int locked_size_class, bool force) {
if (cache_size_ == 0) return false;
if (force == false && used_slots_ == cache_size_) return false;
LockInverter li(¢ral_cache[locked_size_class].lock_, &lock_);
ASSERT(used_slots_ <= cache_size_);
ASSERT(0 <= cache_size_);
if (cache_size_ == 0) return false;
if (used_slots_ == cache_size_) {
if (force == false) return false;
cache_size_--;
used_slots_--;
ReleaseListToSpans(tc_slots_[used_slots_].head);
return true;
}
cache_size_--;
return true;
}
void TCMalloc_Central_FreeList::InsertRange(HardenedSLL start, HardenedSLL end, int N) {
ASSERT(kPageShift && kNumClasses && kPageSize);
SpinLockHolder h(&lock_);
if (N == num_objects_to_move[size_class_] &&
MakeCacheSpace()) {
int slot = used_slots_++;
ASSERT(slot >=0);
ASSERT(slot < kNumTransferEntries);
TCEntry *entry = &tc_slots_[slot];
entry->head = start;
entry->tail = end;
return;
}
ReleaseListToSpans(start);
}
ALWAYS_INLINE void TCMalloc_Central_FreeList::RemoveRange(HardenedSLL* start, HardenedSLL* end, int *N) {
int num = *N;
ASSERT(num > 0);
SpinLockHolder h(&lock_);
if (num == num_objects_to_move[size_class_] && used_slots_ > 0) {
int slot = --used_slots_;
ASSERT(slot >= 0);
TCEntry *entry = &tc_slots_[slot];
*start = entry->head;
*end = entry->tail;
return;
}
HardenedSLL tail = FetchFromSpansSafe();
if (!tail) {
*start = *end = HardenedSLL::null();
*N = 0;
return;
}
SLL_SetNext(tail, HardenedSLL::null(), entropy_);
HardenedSLL head = tail;
int count = 1;
while (count < num) {
HardenedSLL t = FetchFromSpans();
if (!t) break;
SLL_Push(&head, t, entropy_);
count++;
}
*start = head;
*end = tail;
*N = count;
}
ALWAYS_INLINE HardenedSLL TCMalloc_Central_FreeList::FetchFromSpansSafe() {
HardenedSLL t = FetchFromSpans();
if (!t) {
Populate();
t = FetchFromSpans();
}
return t;
}
HardenedSLL TCMalloc_Central_FreeList::FetchFromSpans() {
if (DLL_IsEmpty(&nonempty_, entropy_)) return HardenedSLL::null();
Span* span = nonempty_.next(entropy_);
ASSERT(span->objects);
ASSERT_SPAN_COMMITTED(span);
span->refcount++;
HardenedSLL result = span->objects;
span->objects = SLL_Next(result, entropy_);
if (!span->objects) {
DLL_Remove(span, entropy_);
DLL_Prepend(&empty_, span, entropy_);
Event(span, 'E', 0);
}
counter_--;
return result;
}
ALWAYS_INLINE void TCMalloc_Central_FreeList::Populate() {
ASSERT(kPageShift && kNumClasses && kPageSize);
lock_.Unlock();
const size_t npages = class_to_pages[size_class_];
Span* span;
{
SpinLockHolder h(&pageheap_lock);
span = pageheap->New(npages);
if (span) pageheap->RegisterSizeClass(span, size_class_);
}
if (span == NULL) {
#if HAVE(ERRNO_H)
MESSAGE("allocation failed: %d\n", errno);
#elif OS(WINDOWS)
MESSAGE("allocation failed: %d\n", ::GetLastError());
#else
MESSAGE("allocation failed\n");
#endif
lock_.Lock();
return;
}
ASSERT_SPAN_COMMITTED(span);
ASSERT(span->length == npages);
for (size_t i = 0; i < npages; i++) {
pageheap->CacheSizeClass(span->start + i, size_class_);
}
HardenedSLL head = HardenedSLL::null();
char* start = reinterpret_cast<char*>(span->start << kPageShift);
const size_t size = ByteSizeForClass(size_class_);
char* ptr = start + (npages << kPageShift) - ((npages << kPageShift) % size);
int num = 0;
#if ENABLE(TCMALLOC_HARDENING)
uint32_t startPoison = freedObjectStartPoison();
uint32_t endPoison = freedObjectEndPoison();
#endif
while (ptr > start) {
ptr -= size;
HardenedSLL node = HardenedSLL::create(ptr);
POISON_DEALLOCATION_EXPLICIT(ptr, size, startPoison, endPoison);
SLL_SetNext(node, head, entropy_);
head = node;
num++;
}
ASSERT(ptr == start);
ASSERT(ptr == head.value());
#ifndef NDEBUG
{
HardenedSLL node = head;
while (node) {
ASSERT(IS_DEFINITELY_POISONED(node.value(), size));
node = SLL_Next(node, entropy_);
}
}
#endif
span->objects = head;
ASSERT(span->objects.value() == head.value());
span->refcount = 0;
lock_.Lock();
DLL_Prepend(&nonempty_, span, entropy_);
counter_ += num;
}
inline bool TCMalloc_ThreadCache::SampleAllocation(size_t k) {
if (bytes_until_sample_ < k) {
PickNextSample(k);
return true;
} else {
bytes_until_sample_ -= k;
return false;
}
}
void TCMalloc_ThreadCache::Init(ThreadIdentifier tid, uintptr_t entropy) {
ASSERT(kPageShift && kNumClasses && kPageSize);
size_ = 0;
next_ = NULL;
prev_ = NULL;
tid_ = tid;
in_setspecific_ = false;
entropy_ = entropy;
#if ENABLE(TCMALLOC_HARDENING)
ASSERT(entropy_);
#endif
for (size_t cl = 0; cl < kNumClasses; ++cl) {
list_[cl].Init(entropy_);
}
bytes_until_sample_ = 0;
rnd_ = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(this));
for (int i = 0; i < 100; i++) {
PickNextSample(static_cast<size_t>(FLAGS_tcmalloc_sample_parameter * 2));
}
}
void TCMalloc_ThreadCache::Cleanup() {
ASSERT(kPageShift && kNumClasses && kPageSize);
for (size_t cl = 0; cl < kNumClasses; ++cl) {
if (list_[cl].length() > 0) {
ReleaseToCentralCache(cl, list_[cl].length());
}
}
}
ALWAYS_INLINE void* TCMalloc_ThreadCache::Allocate(size_t size) {
ASSERT(size <= kMaxSize);
const size_t cl = SizeClass(size);
FreeList* list = &list_[cl];
size_t allocationSize = ByteSizeForClass(cl);
if (list->empty()) {
FetchFromCentralCache(cl, allocationSize);
if (list->empty()) return NULL;
}
size_ -= allocationSize;
void* result = list->Pop();
if (!result)
return 0;
RELEASE_ASSERT(IS_DEFINITELY_POISONED(result, allocationSize));
POISON_ALLOCATION(result, allocationSize);
return result;
}
inline void TCMalloc_ThreadCache::Deallocate(HardenedSLL ptr, size_t cl) {
size_t allocationSize = ByteSizeForClass(cl);
size_ += allocationSize;
FreeList* list = &list_[cl];
if (MAY_BE_POISONED(ptr.value(), allocationSize))
list->Validate(ptr, allocationSize);
POISON_DEALLOCATION(ptr.value(), allocationSize);
list->Push(ptr);
if (list->length() > kMaxFreeListLength) {
ReleaseToCentralCache(cl, num_objects_to_move[cl]);
}
if (size_ >= per_thread_cache_size) Scavenge();
}
ALWAYS_INLINE void TCMalloc_ThreadCache::FetchFromCentralCache(size_t cl, size_t allocationSize) {
int fetch_count = num_objects_to_move[cl];
HardenedSLL start, end;
central_cache[cl].RemoveRange(&start, &end, &fetch_count);
list_[cl].PushRange(fetch_count, start, end);
size_ += allocationSize * fetch_count;
}
inline void TCMalloc_ThreadCache::ReleaseToCentralCache(size_t cl, int N) {
ASSERT(N > 0);
FreeList* src = &list_[cl];
if (N > src->length()) N = src->length();
size_ -= N*ByteSizeForClass(cl);
int batch_size = num_objects_to_move[cl];
while (N > batch_size) {
HardenedSLL tail, head;
src->PopRange(batch_size, &head, &tail);
central_cache[cl].InsertRange(head, tail, batch_size);
N -= batch_size;
}
HardenedSLL tail, head;
src->PopRange(N, &head, &tail);
central_cache[cl].InsertRange(head, tail, N);
}
inline void TCMalloc_ThreadCache::Scavenge() {
ASSERT(kPageShift && kNumClasses && kPageSize);
for (size_t cl = 0; cl < kNumClasses; cl++) {
FreeList* list = &list_[cl];
const int lowmark = list->lowwatermark();
if (lowmark > 0) {
const int drop = (lowmark > 1) ? lowmark/2 : 1;
ReleaseToCentralCache(cl, drop);
}
list->clear_lowwatermark();
}
}
void TCMalloc_ThreadCache::PickNextSample(size_t k) {
static const uint32_t kPoly = (1 << 22) | (1 << 2) | (1 << 1) | (1 << 0);
uint32_t r = rnd_;
rnd_ = (r << 1) ^ ((static_cast<int32_t>(r) >> 31) & kPoly);
const int flag_value = static_cast<int>(FLAGS_tcmalloc_sample_parameter);
static int last_flag_value = -1;
if (flag_value != last_flag_value) {
SpinLockHolder h(&sample_period_lock);
int i;
for (i = 0; i < (static_cast<int>(sizeof(primes_list)/sizeof(primes_list[0])) - 1); i++) {
if (primes_list[i] >= flag_value) {
break;
}
}
sample_period = primes_list[i];
last_flag_value = flag_value;
}
bytes_until_sample_ += rnd_ % sample_period;
if (k > (static_cast<size_t>(-1) >> 2)) {
return;
}
while (bytes_until_sample_ < k) {
bytes_until_sample_ += (sample_period >> 1);
}
bytes_until_sample_ -= k;
}
void TCMalloc_ThreadCache::InitModule() {
SpinLockHolder h(&pageheap_lock);
if (!phinited) {
uintptr_t entropy = HARDENING_ENTROPY;
#ifdef WTF_CHANGES
InitTSD();
#endif
InitSizeClasses();
threadheap_allocator.Init(entropy);
span_allocator.Init(entropy);
span_allocator.New(); span_allocator.New(); stacktrace_allocator.Init(entropy);
DLL_Init(&sampled_objects, entropy);
for (size_t i = 0; i < kNumClasses; ++i) {
central_cache[i].Init(i, entropy);
}
pageheap->init();
phinited = 1;
#if defined(WTF_CHANGES) && OS(DARWIN)
MallocHook::init();
FastMallocZone::init();
#endif
}
}
inline TCMalloc_ThreadCache* TCMalloc_ThreadCache::NewHeap(ThreadIdentifier tid, uintptr_t entropy) {
TCMalloc_ThreadCache *heap = threadheap_allocator.New();
heap->Init(tid, entropy);
heap->next_ = thread_heaps;
heap->prev_ = NULL;
if (thread_heaps != NULL) thread_heaps->prev_ = heap;
thread_heaps = heap;
thread_heap_count++;
RecomputeThreadCacheSize();
return heap;
}
inline TCMalloc_ThreadCache* TCMalloc_ThreadCache::GetThreadHeap() {
#ifdef HAVE_TLS
if (KernelSupportsTLS())
return threadlocal_heap;
#elif OS(DARWIN)
return static_cast<TCMalloc_ThreadCache*>(pthread_getspecific(heap_key));
#else
return static_cast<TCMalloc_ThreadCache*>(threadSpecificGet(heap_key));
#endif
}
inline TCMalloc_ThreadCache* TCMalloc_ThreadCache::GetCache() {
TCMalloc_ThreadCache* ptr = NULL;
if (!tsd_inited) {
InitModule();
} else {
ptr = GetThreadHeap();
}
if (ptr == NULL) ptr = CreateCacheIfNecessary();
return ptr;
}
inline TCMalloc_ThreadCache* TCMalloc_ThreadCache::GetCacheIfPresent() {
if (!tsd_inited) return NULL;
void* const p = GetThreadHeap();
return reinterpret_cast<TCMalloc_ThreadCache*>(p);
}
void TCMalloc_ThreadCache::InitTSD() {
ASSERT(!tsd_inited);
#if USE(PTHREAD_GETSPECIFIC_DIRECT)
pthread_key_init_np(heap_key, DestroyThreadCache);
#else
threadSpecificKeyCreate(&heap_key, DestroyThreadCache);
#endif
tsd_inited = true;
#if !OS(WINDOWS)
pthread_t zero;
memset(&zero, 0, sizeof(zero));
#endif
#ifndef WTF_CHANGES
SpinLockHolder h(&pageheap_lock);
#else
ASSERT(pageheap_lock.IsHeld());
#endif
for (TCMalloc_ThreadCache* h = thread_heaps; h != NULL; h = h->next_) {
#if OS(WINDOWS)
if (h->tid_ == 0) {
h->tid_ = GetCurrentThreadId();
}
#else
if (pthread_equal(h->tid_, zero)) {
h->tid_ = pthread_self();
}
#endif
}
}
TCMalloc_ThreadCache* TCMalloc_ThreadCache::CreateCacheIfNecessary() {
TCMalloc_ThreadCache* heap = NULL;
{
SpinLockHolder h(&pageheap_lock);
#if OS(WINDOWS)
DWORD me;
if (!tsd_inited) {
me = 0;
} else {
me = GetCurrentThreadId();
}
#else
pthread_t me;
if (!tsd_inited) {
memset(&me, 0, sizeof(me));
} else {
me = pthread_self();
}
#endif
for (TCMalloc_ThreadCache* h = thread_heaps; h != NULL; h = h->next_) {
#if OS(WINDOWS)
if (h->tid_ == me) {
#else
if (pthread_equal(h->tid_, me)) {
#endif
heap = h;
break;
}
}
if (heap == NULL) heap = NewHeap(me, HARDENING_ENTROPY);
}
if (!heap->in_setspecific_ && tsd_inited) {
heap->in_setspecific_ = true;
setThreadHeap(heap);
}
return heap;
}
void TCMalloc_ThreadCache::BecomeIdle() {
if (!tsd_inited) return; TCMalloc_ThreadCache* heap = GetThreadHeap();
if (heap == NULL) return; if (heap->in_setspecific_) return;
heap->in_setspecific_ = true;
setThreadHeap(NULL);
#ifdef HAVE_TLS
threadlocal_heap = NULL;
#endif
heap->in_setspecific_ = false;
if (GetThreadHeap() == heap) {
return;
}
DeleteCache(heap);
}
void TCMalloc_ThreadCache::DestroyThreadCache(void* ptr) {
if (ptr == NULL) return;
#ifdef HAVE_TLS
threadlocal_heap = NULL;
#endif
DeleteCache(reinterpret_cast<TCMalloc_ThreadCache*>(ptr));
}
void TCMalloc_ThreadCache::DeleteCache(TCMalloc_ThreadCache* heap) {
heap->Cleanup();
SpinLockHolder h(&pageheap_lock);
if (heap->next_ != NULL) heap->next_->prev_ = heap->prev_;
if (heap->prev_ != NULL) heap->prev_->next_ = heap->next_;
if (thread_heaps == heap) thread_heaps = heap->next_;
thread_heap_count--;
RecomputeThreadCacheSize();
threadheap_allocator.Delete(heap);
}
void TCMalloc_ThreadCache::RecomputeThreadCacheSize() {
int n = thread_heap_count > 0 ? thread_heap_count : 1;
size_t space = overall_thread_cache_size / n;
if (space < kMinThreadCacheSize) space = kMinThreadCacheSize;
if (space > kMaxThreadCacheSize) space = kMaxThreadCacheSize;
per_thread_cache_size = space;
}
void TCMalloc_ThreadCache::Print() const {
ASSERT(kPageShift && kNumClasses && kPageSize);
for (size_t cl = 0; cl < kNumClasses; ++cl) {
MESSAGE(" %5" PRIuS " : %4d len; %4d lo\n",
ByteSizeForClass(cl),
list_[cl].length(),
list_[cl].lowwatermark());
}
}
struct TCMallocStats {
uint64_t system_bytes; uint64_t thread_bytes; uint64_t central_bytes; uint64_t transfer_bytes; uint64_t pageheap_bytes; uint64_t metadata_bytes; };
#ifndef WTF_CHANGES
static void ExtractStats(TCMallocStats* r, uint64_t* class_count) {
ASSERT(kPageShift && kNumClasses && kPageSize);
r->central_bytes = 0;
r->transfer_bytes = 0;
for (int cl = 0; cl < kNumClasses; ++cl) {
const int length = central_cache[cl].length();
const int tc_length = central_cache[cl].tc_length();
r->central_bytes += static_cast<uint64_t>(ByteSizeForClass(cl)) * length;
r->transfer_bytes +=
static_cast<uint64_t>(ByteSizeForClass(cl)) * tc_length;
if (class_count) class_count[cl] = length + tc_length;
}
r->thread_bytes = 0;
{ SpinLockHolder h(&pageheap_lock);
for (TCMalloc_ThreadCache* h = thread_heaps; h != NULL; h = h->next_) {
r->thread_bytes += h->Size();
if (class_count) {
for (size_t cl = 0; cl < kNumClasses; ++cl) {
class_count[cl] += h->freelist_length(cl);
}
}
}
}
{ SpinLockHolder h(&pageheap_lock);
r->system_bytes = pageheap->SystemBytes();
r->metadata_bytes = metadata_system_bytes;
r->pageheap_bytes = pageheap->FreeBytes();
}
}
#endif
#ifndef WTF_CHANGES
static void DumpStats(TCMalloc_Printer* out, int level) {
ASSERT(kPageShift && kNumClasses && kPageSize);
TCMallocStats stats;
uint64_t class_count[kNumClasses];
ExtractStats(&stats, (level >= 2 ? class_count : NULL));
if (level >= 2) {
out->printf("------------------------------------------------\n");
uint64_t cumulative = 0;
for (int cl = 0; cl < kNumClasses; ++cl) {
if (class_count[cl] > 0) {
uint64_t class_bytes = class_count[cl] * ByteSizeForClass(cl);
cumulative += class_bytes;
out->printf("class %3d [ %8" PRIuS " bytes ] : "
"%8" PRIu64 " objs; %5.1f MB; %5.1f cum MB\n",
cl, ByteSizeForClass(cl),
class_count[cl],
class_bytes / 1048576.0,
cumulative / 1048576.0);
}
}
SpinLockHolder h(&pageheap_lock);
pageheap->Dump(out);
}
const uint64_t bytes_in_use = stats.system_bytes
- stats.pageheap_bytes
- stats.central_bytes
- stats.transfer_bytes
- stats.thread_bytes;
out->printf("------------------------------------------------\n"
"MALLOC: %12" PRIu64 " Heap size\n"
"MALLOC: %12" PRIu64 " Bytes in use by application\n"
"MALLOC: %12" PRIu64 " Bytes free in page heap\n"
"MALLOC: %12" PRIu64 " Bytes free in central cache\n"
"MALLOC: %12" PRIu64 " Bytes free in transfer cache\n"
"MALLOC: %12" PRIu64 " Bytes free in thread caches\n"
"MALLOC: %12" PRIu64 " Spans in use\n"
"MALLOC: %12" PRIu64 " Thread heaps in use\n"
"MALLOC: %12" PRIu64 " Metadata allocated\n"
"------------------------------------------------\n",
stats.system_bytes,
bytes_in_use,
stats.pageheap_bytes,
stats.central_bytes,
stats.transfer_bytes,
stats.thread_bytes,
uint64_t(span_allocator.inuse()),
uint64_t(threadheap_allocator.inuse()),
stats.metadata_bytes);
}
static void PrintStats(int level) {
const int kBufferSize = 16 << 10;
char* buffer = new char[kBufferSize];
TCMalloc_Printer printer(buffer, kBufferSize);
DumpStats(&printer, level);
write(STDERR_FILENO, buffer, strlen(buffer));
delete[] buffer;
}
static void** DumpStackTraces() {
int needed_slots = 0;
{
SpinLockHolder h(&pageheap_lock);
for (Span* s = sampled_objects.next; s != &sampled_objects; s = s->next) {
StackTrace* stack = reinterpret_cast<StackTrace*>(s->objects);
needed_slots += 3 + stack->depth;
}
needed_slots += 100; needed_slots += needed_slots/8; }
void** result = new void*[needed_slots];
if (result == NULL) {
MESSAGE("tcmalloc: could not allocate %d slots for stack traces\n",
needed_slots);
return NULL;
}
SpinLockHolder h(&pageheap_lock);
int used_slots = 0;
for (Span* s = sampled_objects.next; s != &sampled_objects; s = s->next) {
ASSERT_WITH_SECURITY_IMPLICATION(used_slots < needed_slots); StackTrace* stack = reinterpret_cast<StackTrace*>(s->objects);
if (used_slots + 3 + stack->depth >= needed_slots) {
break;
}
result[used_slots+0] = reinterpret_cast<void*>(static_cast<uintptr_t>(1));
result[used_slots+1] = reinterpret_cast<void*>(stack->size);
result[used_slots+2] = reinterpret_cast<void*>(stack->depth);
for (int d = 0; d < stack->depth; d++) {
result[used_slots+3+d] = stack->stack[d];
}
used_slots += 3 + stack->depth;
}
result[used_slots] = reinterpret_cast<void*>(static_cast<uintptr_t>(0));
return result;
}
#endif
#ifndef WTF_CHANGES
class TCMallocImplementation : public MallocExtension {
public:
virtual void GetStats(char* buffer, int buffer_length) {
ASSERT(buffer_length > 0);
TCMalloc_Printer printer(buffer, buffer_length);
if (buffer_length < 10000) {
DumpStats(&printer, 1);
} else {
DumpStats(&printer, 2);
}
}
virtual void** ReadStackTraces() {
return DumpStackTraces();
}
virtual bool GetNumericProperty(const char* name, size_t* value) {
ASSERT(name != NULL);
if (strcmp(name, "generic.current_allocated_bytes") == 0) {
TCMallocStats stats;
ExtractStats(&stats, NULL);
*value = stats.system_bytes
- stats.thread_bytes
- stats.central_bytes
- stats.pageheap_bytes;
return true;
}
if (strcmp(name, "generic.heap_size") == 0) {
TCMallocStats stats;
ExtractStats(&stats, NULL);
*value = stats.system_bytes;
return true;
}
if (strcmp(name, "tcmalloc.slack_bytes") == 0) {
SpinLockHolder l(&pageheap_lock);
*value = pageheap->FreeBytes();
return true;
}
if (strcmp(name, "tcmalloc.max_total_thread_cache_bytes") == 0) {
SpinLockHolder l(&pageheap_lock);
*value = overall_thread_cache_size;
return true;
}
if (strcmp(name, "tcmalloc.current_total_thread_cache_bytes") == 0) {
TCMallocStats stats;
ExtractStats(&stats, NULL);
*value = stats.thread_bytes;
return true;
}
return false;
}
virtual bool SetNumericProperty(const char* name, size_t value) {
ASSERT(name != NULL);
if (strcmp(name, "tcmalloc.max_total_thread_cache_bytes") == 0) {
if (value < kMinThreadCacheSize) value = kMinThreadCacheSize;
if (value > (1<<30)) value = (1<<30);
SpinLockHolder l(&pageheap_lock);
overall_thread_cache_size = static_cast<size_t>(value);
TCMalloc_ThreadCache::RecomputeThreadCacheSize();
return true;
}
return false;
}
virtual void MarkThreadIdle() {
TCMalloc_ThreadCache::BecomeIdle();
}
virtual void ReleaseFreeMemory() {
SpinLockHolder h(&pageheap_lock);
pageheap->ReleaseFreePages();
}
};
#endif
class TCMallocGuard {
public:
TCMallocGuard() {
#ifdef HAVE_TLS // this is true if the cc/ld/libc combo support TLS
CheckIfKernelSupportsTLS();
#endif
#ifndef WTF_CHANGES
#ifdef WIN32 // patch the windows VirtualAlloc, etc.
PatchWindowsFunctions(); #endif
#endif
free(malloc(1));
TCMalloc_ThreadCache::InitTSD();
free(malloc(1));
#ifndef WTF_CHANGES
MallocExtension::Register(new TCMallocImplementation);
#endif
}
#ifndef WTF_CHANGES
~TCMallocGuard() {
const char* env = getenv("MALLOCSTATS");
if (env != NULL) {
int level = atoi(env);
if (level < 1) level = 1;
PrintStats(level);
}
#ifdef WIN32
UnpatchWindowsFunctions();
#endif
}
#endif
};
#ifndef WTF_CHANGES
static TCMallocGuard module_enter_exit_hook;
#endif
#ifndef WTF_CHANGES
static Span* DoSampledAllocation(size_t size) {
StackTrace tmp;
tmp.depth = GetStackTrace(tmp.stack, kMaxStackDepth, 1);
tmp.size = size;
SpinLockHolder h(&pageheap_lock);
Span *span = pageheap->New(pages(size == 0 ? 1 : size));
if (span == NULL) {
return NULL;
}
StackTrace *stack = stacktrace_allocator.New();
if (stack == NULL) {
return span;
}
*stack = tmp;
span->sample = 1;
span->objects = stack;
DLL_Prepend(&sampled_objects, span);
return span;
}
#endif
#if !ASSERT_DISABLED
static inline bool CheckCachedSizeClass(void *ptr) {
ASSERT(kPageShift && kNumClasses && kPageSize);
PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift;
size_t cached_value = pageheap->GetSizeClassIfCached(p);
return cached_value == 0 ||
cached_value == pageheap->GetDescriptor(p)->sizeclass;
}
#endif
static inline void* CheckedMallocResult(void *result)
{
ASSERT(result == 0 || CheckCachedSizeClass(result));
return result;
}
static inline void* SpanToMallocResult(Span *span) {
ASSERT(kPageShift && kNumClasses && kPageSize);
ASSERT_SPAN_COMMITTED(span);
pageheap->CacheSizeClass(span->start, 0);
void* result = reinterpret_cast<void*>(span->start << kPageShift);
POISON_ALLOCATION(result, span->length << kPageShift);
return CheckedMallocResult(result);
}
#ifdef WTF_CHANGES
template <bool crashOnFailure>
#endif
static ALWAYS_INLINE void* do_malloc(size_t size) {
void* ret = NULL;
#ifdef WTF_CHANGES
ASSERT(!isForbidden());
#endif
TCMalloc_ThreadCache* heap = TCMalloc_ThreadCache::GetCache();
#ifndef WTF_CHANGES
if ((FLAGS_tcmalloc_sample_parameter > 0) && heap->SampleAllocation(size)) {
Span* span = DoSampledAllocation(size);
if (span != NULL) {
ret = SpanToMallocResult(span);
}
} else
#endif
if (size > kMaxSize) {
SpinLockHolder h(&pageheap_lock);
Span* span = pageheap->New(pages(size));
if (span != NULL) {
ret = SpanToMallocResult(span);
}
} else {
ret = CheckedMallocResult(heap->Allocate(size));
}
if (!ret) {
#ifdef WTF_CHANGES
if (crashOnFailure) CRASH();
#else
errno = ENOMEM;
#endif
}
return ret;
}
static ALWAYS_INLINE void do_free(void* ptr) {
if (ptr == NULL) return;
ASSERT(pageheap != NULL); ASSERT(kPageShift && kNumClasses && kPageSize);
const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift;
Span* span = pageheap->GetDescriptor(p);
RELEASE_ASSERT(span->isValid());
size_t cl = span->sizeclass;
if (cl) {
size_t byteSizeForClass = ByteSizeForClass(cl);
#if !(CPU(ARM_THUMB2) && !CPU(APPLE_ARMV7S))
RELEASE_ASSERT(!((reinterpret_cast<char*>(ptr) - reinterpret_cast<char*>(span->start << kPageShift)) % byteSizeForClass));
#endif
pageheap->CacheSizeClass(p, cl);
#ifndef NO_TCMALLOC_SAMPLES
ASSERT(!pageheap->GetDescriptor(p)->sample);
#endif
TCMalloc_ThreadCache* heap = TCMalloc_ThreadCache::GetCacheIfPresent();
if (heap != NULL) {
heap->Deallocate(HardenedSLL::create(ptr), cl);
} else {
POISON_DEALLOCATION(ptr, byteSizeForClass);
SLL_SetNext(HardenedSLL::create(ptr), HardenedSLL::null(), central_cache[cl].entropy());
central_cache[cl].InsertRange(HardenedSLL::create(ptr), HardenedSLL::create(ptr), 1);
}
} else {
SpinLockHolder h(&pageheap_lock);
ASSERT(reinterpret_cast<uintptr_t>(ptr) % kPageSize == 0);
ASSERT(span != NULL && span->start == p);
#ifndef NO_TCMALLOC_SAMPLES
if (span->sample) {
DLL_Remove(span);
stacktrace_allocator.Delete(reinterpret_cast<StackTrace*>(span->objects));
span->objects = NULL;
}
#endif
RELEASE_ASSERT(reinterpret_cast<void*>(span->start << kPageShift) == ptr);
POISON_DEALLOCATION(ptr, span->length << kPageShift);
pageheap->Delete(span);
}
}
#ifndef WTF_CHANGES
static void* do_memalign(size_t align, size_t size) {
ASSERT((align & (align - 1)) == 0);
ASSERT(align > 0);
if (pageheap == NULL) TCMalloc_ThreadCache::InitModule();
ASSERT(kPageShift && kNumClasses && kPageSize);
if (size == 0) size = 1;
if (size <= kMaxSize && align < kPageSize) {
size_t cl = SizeClass(size);
while (cl < kNumClasses && ((class_to_size[cl] & (align - 1)) != 0)) {
cl++;
}
if (cl < kNumClasses) {
TCMalloc_ThreadCache* heap = TCMalloc_ThreadCache::GetCache();
return CheckedMallocResult(heap->Allocate(class_to_size[cl]));
}
}
SpinLockHolder h(&pageheap_lock);
if (align <= kPageSize) {
Span* span = pageheap->New(pages(size));
return span == NULL ? NULL : SpanToMallocResult(span);
}
const Length alloc = pages(size + align);
Span* span = pageheap->New(alloc);
if (span == NULL) return NULL;
Length skip = 0;
while ((((span->start+skip) << kPageShift) & (align - 1)) != 0) {
skip++;
}
ASSERT_WITH_SECURITY_IMPLICATION(skip < alloc);
if (skip > 0) {
Span* rest = pageheap->Split(span, skip);
pageheap->Delete(span);
span = rest;
}
const Length needed = pages(size);
ASSERT(span->length >= needed);
if (span->length > needed) {
Span* trailer = pageheap->Split(span, needed);
pageheap->Delete(trailer);
}
return SpanToMallocResult(span);
}
#endif
#ifndef WTF_CHANGES
static inline void do_malloc_stats() {
PrintStats(1);
}
static inline int do_mallopt(int, int) {
return 1; }
#endif
#ifdef HAVE_STRUCT_MALLINFO // mallinfo isn't defined on freebsd, for instance
static inline struct mallinfo do_mallinfo() {
TCMallocStats stats;
ExtractStats(&stats, NULL);
struct mallinfo info;
memset(&info, 0, sizeof(info));
info.arena = static_cast<int>(stats.system_bytes);
info.fsmblks = static_cast<int>(stats.thread_bytes
+ stats.central_bytes
+ stats.transfer_bytes);
info.fordblks = static_cast<int>(stats.pageheap_bytes);
info.uordblks = static_cast<int>(stats.system_bytes
- stats.thread_bytes
- stats.central_bytes
- stats.transfer_bytes
- stats.pageheap_bytes);
return info;
}
#endif
#ifndef WTF_CHANGES
extern "C"
#else
#define do_malloc do_malloc<crashOnFailure>
template <bool crashOnFailure>
ALWAYS_INLINE void* malloc(size_t);
void* fastMalloc(size_t size)
{
void* result = malloc<true>(size);
#if ENABLE(ALLOCATION_LOGGING)
dataLogF("fastMalloc allocating %lu bytes (fastMalloc): %p.\n", size, result);
#endif
return result;
}
TryMallocReturnValue tryFastMalloc(size_t size)
{
TryMallocReturnValue result = malloc<false>(size);
#if ENABLE(ALLOCATION_LOGGING)
void* pointer;
(void)result.getValue(pointer);
dataLogF("fastMalloc allocating %lu bytes (tryFastMalloc): %p.\n", size, pointer);
#endif
return result;
}
template <bool crashOnFailure>
ALWAYS_INLINE
#endif
void* malloc(size_t size) {
#if ENABLE(WTF_MALLOC_VALIDATION)
if (std::numeric_limits<size_t>::max() - Internal::ValidationBufferSize <= size) return 0;
void* result = do_malloc(size + Internal::ValidationBufferSize);
if (!result)
return 0;
Internal::ValidationHeader* header = static_cast<Internal::ValidationHeader*>(result);
header->m_size = size;
header->m_type = Internal::AllocTypeMalloc;
header->m_prefix = static_cast<unsigned>(Internal::ValidationPrefix);
result = header + 1;
*Internal::fastMallocValidationSuffix(result) = Internal::ValidationSuffix;
fastMallocValidate(result);
#else
void* result = do_malloc(size);
#endif
MallocHook::InvokeNewHook(result, size);
return result;
}
#ifndef WTF_CHANGES
extern "C"
#endif
void free(void* ptr) {
#if ENABLE(ALLOCATION_LOGGING)
dataLogF("fastFree freeing %p.\n", ptr);
#endif
MallocHook::InvokeDeleteHook(ptr);
#if ENABLE(WTF_MALLOC_VALIDATION)
if (!ptr)
return;
fastMallocValidate(ptr);
Internal::ValidationHeader* header = Internal::fastMallocValidationHeader(ptr);
memset(ptr, 0xCC, header->m_size);
do_free(header);
#else
do_free(ptr);
#endif
}
#ifndef WTF_CHANGES
extern "C"
#else
template <bool crashOnFailure>
ALWAYS_INLINE void* calloc(size_t, size_t);
void* fastCalloc(size_t n, size_t elem_size)
{
void* result = calloc<true>(n, elem_size);
#if ENABLE(WTF_MALLOC_VALIDATION)
fastMallocValidate(result);
#endif
#if ENABLE(ALLOCATION_LOGGING)
dataLogF("fastMalloc contiguously allocating %lu * %lu bytes (fastCalloc): %p.\n", n, elem_size, result);
#endif
return result;
}
TryMallocReturnValue tryFastCalloc(size_t n, size_t elem_size)
{
void* result = calloc<false>(n, elem_size);
#if ENABLE(WTF_MALLOC_VALIDATION)
fastMallocValidate(result);
#endif
#if ENABLE(ALLOCATION_LOGGING)
dataLogF("fastMalloc contiguously allocating %lu * %lu bytes (tryFastCalloc): %p.\n", n, elem_size, result);
#endif
return result;
}
template <bool crashOnFailure>
ALWAYS_INLINE
#endif
void* calloc(size_t n, size_t elem_size) {
size_t totalBytes = n * elem_size;
if (n > 1 && elem_size && (totalBytes / elem_size) != n)
return 0;
#if ENABLE(WTF_MALLOC_VALIDATION)
void* result = malloc<crashOnFailure>(totalBytes);
if (!result)
return 0;
memset(result, 0, totalBytes);
fastMallocValidate(result);
#else
void* result = do_malloc(totalBytes);
if (result != NULL) {
memset(result, 0, totalBytes);
}
#endif
MallocHook::InvokeNewHook(result, totalBytes);
return result;
}
#ifndef WTF_CHANGES
#ifndef WTF_CHANGES
extern "C"
#endif
void cfree(void* ptr) {
#ifndef WTF_CHANGES
MallocHook::InvokeDeleteHook(ptr);
#endif
do_free(ptr);
}
#endif
#ifndef WTF_CHANGES
extern "C"
#else
template <bool crashOnFailure>
ALWAYS_INLINE void* realloc(void*, size_t);
void* fastRealloc(void* old_ptr, size_t new_size)
{
#if ENABLE(WTF_MALLOC_VALIDATION)
fastMallocValidate(old_ptr);
#endif
void* result = realloc<true>(old_ptr, new_size);
#if ENABLE(WTF_MALLOC_VALIDATION)
fastMallocValidate(result);
#endif
#if ENABLE(ALLOCATION_LOGGING)
dataLogF("fastMalloc reallocating %lu bytes (fastRealloc): %p -> %p.\n", new_size, old_ptr, result);
#endif
return result;
}
TryMallocReturnValue tryFastRealloc(void* old_ptr, size_t new_size)
{
#if ENABLE(WTF_MALLOC_VALIDATION)
fastMallocValidate(old_ptr);
#endif
void* result = realloc<false>(old_ptr, new_size);
#if ENABLE(WTF_MALLOC_VALIDATION)
fastMallocValidate(result);
#endif
#if ENABLE(ALLOCATION_LOGGING)
dataLogF("fastMalloc reallocating %lu bytes (tryFastRealloc): %p -> %p.\n", new_size, old_ptr, result);
#endif
return result;
}
template <bool crashOnFailure>
ALWAYS_INLINE
#endif
void* realloc(void* old_ptr, size_t new_size) {
if (old_ptr == NULL) {
#if ENABLE(WTF_MALLOC_VALIDATION)
void* result = malloc<crashOnFailure>(new_size);
#else
void* result = do_malloc(new_size);
MallocHook::InvokeNewHook(result, new_size);
#endif
return result;
}
if (new_size == 0) {
MallocHook::InvokeDeleteHook(old_ptr);
free(old_ptr);
return NULL;
}
#if ENABLE(WTF_MALLOC_VALIDATION)
if (std::numeric_limits<size_t>::max() - Internal::ValidationBufferSize <= new_size) return 0;
Internal::ValidationHeader* header = Internal::fastMallocValidationHeader(old_ptr);
fastMallocValidate(old_ptr);
old_ptr = header;
header->m_size = new_size;
new_size += Internal::ValidationBufferSize;
#endif
ASSERT(pageheap != NULL); ASSERT(kPageShift && kNumClasses && kPageSize);
const PageID p = reinterpret_cast<uintptr_t>(old_ptr) >> kPageShift;
size_t cl = pageheap->GetSizeClassIfCached(p);
Span *span = NULL;
size_t old_size;
if (cl == 0) {
span = pageheap->GetDescriptor(p);
cl = span->sizeclass;
pageheap->CacheSizeClass(p, cl);
}
if (cl != 0) {
old_size = ByteSizeForClass(cl);
} else {
ASSERT(span != NULL);
old_size = span->length << kPageShift;
}
if ((new_size > old_size) || (AllocationSize(new_size) < old_size)) {
void* new_ptr = do_malloc(new_size);
if (new_ptr == NULL) {
return NULL;
}
MallocHook::InvokeNewHook(new_ptr, new_size);
memcpy(new_ptr, old_ptr, ((old_size < new_size) ? old_size : new_size));
MallocHook::InvokeDeleteHook(old_ptr);
do_free(old_ptr);
#if ENABLE(WTF_MALLOC_VALIDATION)
new_ptr = static_cast<Internal::ValidationHeader*>(new_ptr) + 1;
*Internal::fastMallocValidationSuffix(new_ptr) = Internal::ValidationSuffix;
#endif
return new_ptr;
} else {
#if ENABLE(WTF_MALLOC_VALIDATION)
old_ptr = static_cast<Internal::ValidationHeader*>(old_ptr) + 1; *Internal::fastMallocValidationSuffix(old_ptr) = Internal::ValidationSuffix;
#endif
return old_ptr;
}
}
#ifdef WTF_CHANGES
#undef do_malloc
#else
static SpinLock set_new_handler_lock = SPINLOCK_INITIALIZER;
static inline void* cpp_alloc(size_t size, bool nothrow) {
for (;;) {
void* p = do_malloc(size);
#ifdef PREANSINEW
return p;
#else
if (p == NULL) { std::new_handler nh;
{
SpinLockHolder h(&set_new_handler_lock);
nh = std::set_new_handler(0);
(void) std::set_new_handler(nh);
}
if (!nh) {
if (nothrow) return 0;
throw std::bad_alloc();
}
try {
(*nh)();
} catch (const std::bad_alloc&) {
if (!nothrow) throw;
return p;
}
} else { return p;
}
#endif
}
}
extern "C" void* memalign(size_t align, size_t size) __THROW {
void* result = do_memalign(align, size);
MallocHook::InvokeNewHook(result, size);
return result;
}
extern "C" int posix_memalign(void** result_ptr, size_t align, size_t size)
__THROW {
if (((align % sizeof(void*)) != 0) ||
((align & (align - 1)) != 0) ||
(align == 0)) {
return EINVAL;
}
void* result = do_memalign(align, size);
MallocHook::InvokeNewHook(result, size);
if (result == NULL) {
return ENOMEM;
} else {
*result_ptr = result;
return 0;
}
}
static size_t pagesize = 0;
extern "C" void* valloc(size_t size) __THROW {
if (pagesize == 0) pagesize = getpagesize();
void* result = do_memalign(pagesize, size);
MallocHook::InvokeNewHook(result, size);
return result;
}
extern "C" void* pvalloc(size_t size) __THROW {
if (pagesize == 0) pagesize = getpagesize();
size = (size + pagesize - 1) & ~(pagesize - 1);
void* result = do_memalign(pagesize, size);
MallocHook::InvokeNewHook(result, size);
return result;
}
extern "C" void malloc_stats(void) {
do_malloc_stats();
}
extern "C" int mallopt(int cmd, int value) {
return do_mallopt(cmd, value);
}
#ifdef HAVE_STRUCT_MALLINFO
extern "C" struct mallinfo mallinfo(void) {
return do_mallinfo();
}
#endif
#if defined(__GLIBC__)
extern "C" {
#if COMPILER(GCC) && !defined(__MACH__) && defined(HAVE___ATTRIBUTE__)
# define ALIAS(x) __attribute__ ((weak, alias (x)))
void* __libc_malloc(size_t size) ALIAS("malloc");
void __libc_free(void* ptr) ALIAS("free");
void* __libc_realloc(void* ptr, size_t size) ALIAS("realloc");
void* __libc_calloc(size_t n, size_t size) ALIAS("calloc");
void __libc_cfree(void* ptr) ALIAS("cfree");
void* __libc_memalign(size_t align, size_t s) ALIAS("memalign");
void* __libc_valloc(size_t size) ALIAS("valloc");
void* __libc_pvalloc(size_t size) ALIAS("pvalloc");
int __posix_memalign(void** r, size_t a, size_t s) ALIAS("posix_memalign");
# undef ALIAS
# else
void* __libc_malloc(size_t size) { return malloc(size); }
void __libc_free(void* ptr) { free(ptr); }
void* __libc_realloc(void* ptr, size_t size) { return realloc(ptr, size); }
void* __libc_calloc(size_t n, size_t size) { return calloc(n, size); }
void __libc_cfree(void* ptr) { cfree(ptr); }
void* __libc_memalign(size_t align, size_t s) { return memalign(align, s); }
void* __libc_valloc(size_t size) { return valloc(size); }
void* __libc_pvalloc(size_t size) { return pvalloc(size); }
int __posix_memalign(void** r, size_t a, size_t s) {
return posix_memalign(r, a, s);
}
# endif
}
#endif
static void *MemalignOverride(size_t align, size_t size, const void *caller)
__THROW {
void* result = do_memalign(align, size);
MallocHook::InvokeNewHook(result, size);
return result;
}
void *(*__memalign_hook)(size_t, size_t, const void *) = MemalignOverride;
#endif
#ifdef WTF_CHANGES
void releaseFastMallocFreeMemory()
{
if (TCMalloc_ThreadCache* threadCache = TCMalloc_ThreadCache::GetCacheIfPresent())
threadCache->Cleanup();
SpinLockHolder h(&pageheap_lock);
pageheap->ReleaseFreePages();
}
FastMallocStatistics fastMallocStatistics()
{
ASSERT(kPageShift && kNumClasses && kPageSize);
FastMallocStatistics statistics;
SpinLockHolder lockHolder(&pageheap_lock);
statistics.reservedVMBytes = static_cast<size_t>(pageheap->SystemBytes());
statistics.committedVMBytes = statistics.reservedVMBytes - pageheap->ReturnedBytes();
statistics.freeListBytes = 0;
for (unsigned cl = 0; cl < kNumClasses; ++cl) {
const int length = central_cache[cl].length();
const int tc_length = central_cache[cl].tc_length();
statistics.freeListBytes += ByteSizeForClass(cl) * (length + tc_length);
}
for (TCMalloc_ThreadCache* threadCache = thread_heaps; threadCache ; threadCache = threadCache->next_)
statistics.freeListBytes += threadCache->Size();
return statistics;
}
size_t fastMallocSize(const void* ptr)
{
if (pageheap == NULL) TCMalloc_ThreadCache::InitModule();
ASSERT(kPageShift && kNumClasses && kPageSize);
#if ENABLE(WTF_MALLOC_VALIDATION)
return Internal::fastMallocValidationHeader(const_cast<void*>(ptr))->m_size;
#else
const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift;
Span* span = pageheap->GetDescriptorEnsureSafe(p);
if (!span || span->free)
return 0;
for (HardenedSLL free = span->objects; free; free = SLL_Next(free, HARDENING_ENTROPY)) {
if (ptr == free.value())
return 0;
}
if (size_t cl = span->sizeclass)
return ByteSizeForClass(cl);
return span->length << kPageShift;
#endif
}
#if OS(DARWIN)
class RemoteMemoryReader {
task_t m_task;
memory_reader_t* m_reader;
public:
RemoteMemoryReader(task_t task, memory_reader_t* reader)
: m_task(task)
, m_reader(reader)
{ }
void* operator()(vm_address_t address, size_t size) const
{
void* output;
kern_return_t err = (*m_reader)(m_task, address, size, static_cast<void**>(&output));
if (err)
output = 0;
return output;
}
template <typename T>
T* operator()(T* address, size_t size = sizeof(T)) const
{
return static_cast<T*>((*this)(reinterpret_cast<vm_address_t>(address), size));
}
template <typename T>
T* nextEntryInHardenedLinkedList(T** remoteAddress, uintptr_t entropy) const
{
T** localAddress = (*this)(remoteAddress);
if (!localAddress)
return 0;
T* hardenedNext = *localAddress;
if (!hardenedNext || hardenedNext == (void*)entropy)
return 0;
return XOR_MASK_PTR_WITH_KEY(hardenedNext, remoteAddress, entropy);
}
};
template <typename T>
template <typename Recorder>
void PageHeapAllocator<T>::recordAdministrativeRegions(Recorder& recorder, const RemoteMemoryReader& reader)
{
for (HardenedSLL adminAllocation = allocated_regions_; adminAllocation; adminAllocation.setValue(reader.nextEntryInHardenedLinkedList(reinterpret_cast<void**>(adminAllocation.value()), entropy_)))
recorder.recordRegion(reinterpret_cast<vm_address_t>(adminAllocation.value()), kAllocIncrement);
}
class FreeObjectFinder {
const RemoteMemoryReader& m_reader;
HashSet<void*> m_freeObjects;
public:
FreeObjectFinder(const RemoteMemoryReader& reader) : m_reader(reader) { }
void visit(void* ptr) { m_freeObjects.add(ptr); }
bool isFreeObject(void* ptr) const { return m_freeObjects.contains(ptr); }
bool isFreeObject(vm_address_t ptr) const { return isFreeObject(reinterpret_cast<void*>(ptr)); }
size_t freeObjectCount() const { return m_freeObjects.size(); }
void findFreeObjects(TCMalloc_ThreadCache* threadCache)
{
for (; threadCache; threadCache = (threadCache->next_ ? m_reader(threadCache->next_) : 0))
threadCache->enumerateFreeObjects(*this, m_reader);
}
void findFreeObjects(TCMalloc_Central_FreeListPadded* centralFreeList, size_t numSizes, TCMalloc_Central_FreeListPadded* remoteCentralFreeList)
{
for (unsigned i = 0; i < numSizes; i++)
centralFreeList[i].enumerateFreeObjects(*this, m_reader, remoteCentralFreeList + i);
}
};
class PageMapFreeObjectFinder {
const RemoteMemoryReader& m_reader;
FreeObjectFinder& m_freeObjectFinder;
uintptr_t m_entropy;
public:
PageMapFreeObjectFinder(const RemoteMemoryReader& reader, FreeObjectFinder& freeObjectFinder, uintptr_t entropy)
: m_reader(reader)
, m_freeObjectFinder(freeObjectFinder)
, m_entropy(entropy)
{
#if ENABLE(TCMALLOC_HARDENING)
ASSERT(m_entropy);
#endif
}
int visit(void* ptr) const
{
ASSERT(kPageShift && kNumClasses && kPageSize);
if (!ptr)
return 1;
Span* span = m_reader(reinterpret_cast<Span*>(ptr));
if (!span)
return 1;
if (span->free) {
void* ptr = reinterpret_cast<void*>(span->start << kPageShift);
m_freeObjectFinder.visit(ptr);
} else if (span->sizeclass) {
for (HardenedSLL nextObject = span->objects; nextObject; nextObject.setValue(m_reader.nextEntryInHardenedLinkedList(reinterpret_cast<void**>(nextObject.value()), m_entropy)))
m_freeObjectFinder.visit(nextObject.value());
}
return span->length;
}
};
class PageMapMemoryUsageRecorder {
task_t m_task;
void* m_context;
unsigned m_typeMask;
vm_range_recorder_t* m_recorder;
const RemoteMemoryReader& m_reader;
const FreeObjectFinder& m_freeObjectFinder;
HashSet<void*> m_seenPointers;
Vector<Span*> m_coalescedSpans;
public:
PageMapMemoryUsageRecorder(task_t task, void* context, unsigned typeMask, vm_range_recorder_t* recorder, const RemoteMemoryReader& reader, const FreeObjectFinder& freeObjectFinder)
: m_task(task)
, m_context(context)
, m_typeMask(typeMask)
, m_recorder(recorder)
, m_reader(reader)
, m_freeObjectFinder(freeObjectFinder)
{ }
~PageMapMemoryUsageRecorder()
{
ASSERT(!m_coalescedSpans.size());
}
void recordPendingRegions()
{
ASSERT(kPageShift && kNumClasses && kPageSize);
bool recordRegionsContainingPointers = m_typeMask & MALLOC_PTR_REGION_RANGE_TYPE;
bool recordAllocations = m_typeMask & MALLOC_PTR_IN_USE_RANGE_TYPE;
if (!recordRegionsContainingPointers && !recordAllocations) {
m_coalescedSpans.clear();
return;
}
Vector<vm_range_t, 256> pointerRegions;
Vector<vm_range_t, 1024> allocatedPointers;
for (size_t i = 0; i < m_coalescedSpans.size(); ++i) {
Span *theSpan = m_coalescedSpans[i];
vm_address_t spanStartAddress = theSpan->start << kPageShift;
vm_size_t spanSizeInBytes = theSpan->length * kPageSize;
if (recordRegionsContainingPointers)
pointerRegions.append((vm_range_t){spanStartAddress, spanSizeInBytes});
if (theSpan->free || !recordAllocations)
continue;
if (!theSpan->sizeclass) {
if (!m_freeObjectFinder.isFreeObject(spanStartAddress))
allocatedPointers.append((vm_range_t){spanStartAddress, spanSizeInBytes});
} else {
const size_t objectSize = ByteSizeForClass(theSpan->sizeclass);
const vm_address_t endOfSpan = spanStartAddress + spanSizeInBytes;
for (vm_address_t object = spanStartAddress; object + objectSize <= endOfSpan; object += objectSize) {
if (!m_freeObjectFinder.isFreeObject(object))
allocatedPointers.append((vm_range_t){object, objectSize});
}
}
}
if (recordRegionsContainingPointers)
(*m_recorder)(m_task, m_context, MALLOC_PTR_REGION_RANGE_TYPE, pointerRegions.data(), pointerRegions.size());
if (recordAllocations)
(*m_recorder)(m_task, m_context, MALLOC_PTR_IN_USE_RANGE_TYPE, allocatedPointers.data(), allocatedPointers.size());
m_coalescedSpans.clear();
}
int visit(void* ptr)
{
ASSERT(kPageShift && kNumClasses && kPageSize);
if (!ptr)
return 1;
Span* span = m_reader(reinterpret_cast<Span*>(ptr));
if (!span || !span->start)
return 1;
if (!m_seenPointers.add(ptr).isNewEntry)
return span->length;
if (!m_coalescedSpans.size()) {
m_coalescedSpans.append(span);
return span->length;
}
Span* previousSpan = m_coalescedSpans[m_coalescedSpans.size() - 1];
vm_address_t previousSpanStartAddress = previousSpan->start << kPageShift;
vm_size_t previousSpanSizeInBytes = previousSpan->length * kPageSize;
vm_address_t spanStartAddress = span->start << kPageShift;
if (spanStartAddress == previousSpanStartAddress + previousSpanSizeInBytes) {
m_coalescedSpans.append(span);
return span->length;
}
recordPendingRegions();
m_coalescedSpans.append(span);
return span->length;
}
};
class AdminRegionRecorder {
task_t m_task;
void* m_context;
unsigned m_typeMask;
vm_range_recorder_t* m_recorder;
Vector<vm_range_t, 1024> m_pendingRegions;
public:
AdminRegionRecorder(task_t task, void* context, unsigned typeMask, vm_range_recorder_t* recorder)
: m_task(task)
, m_context(context)
, m_typeMask(typeMask)
, m_recorder(recorder)
{ }
void recordRegion(vm_address_t ptr, size_t size)
{
if (m_typeMask & MALLOC_ADMIN_REGION_RANGE_TYPE)
m_pendingRegions.append((vm_range_t){ ptr, size });
}
void visit(void *ptr, size_t size)
{
recordRegion(reinterpret_cast<vm_address_t>(ptr), size);
}
void recordPendingRegions()
{
if (m_pendingRegions.size()) {
(*m_recorder)(m_task, m_context, MALLOC_ADMIN_REGION_RANGE_TYPE, m_pendingRegions.data(), m_pendingRegions.size());
m_pendingRegions.clear();
}
}
~AdminRegionRecorder()
{
ASSERT(!m_pendingRegions.size());
}
};
kern_return_t FastMallocZone::enumerate(task_t task, void* context, unsigned typeMask, vm_address_t zoneAddress, memory_reader_t reader, vm_range_recorder_t recorder)
{
RemoteMemoryReader memoryReader(task, reader);
InitSizeClasses();
FastMallocZone* mzone = memoryReader(reinterpret_cast<FastMallocZone*>(zoneAddress));
TCMalloc_PageHeap* pageHeap = memoryReader(mzone->m_pageHeap);
TCMalloc_ThreadCache** threadHeapsPointer = memoryReader(mzone->m_threadHeaps);
TCMalloc_ThreadCache* threadHeaps = memoryReader(*threadHeapsPointer);
TCMalloc_Central_FreeListPadded* centralCaches = memoryReader(mzone->m_centralCaches, sizeof(TCMalloc_Central_FreeListPadded) * kNumClasses);
FreeObjectFinder finder(memoryReader);
finder.findFreeObjects(threadHeaps);
finder.findFreeObjects(centralCaches, kNumClasses, mzone->m_centralCaches);
TCMalloc_PageHeap::PageMap* pageMap = &pageHeap->pagemap_;
PageMapFreeObjectFinder pageMapFinder(memoryReader, finder, pageHeap->entropy_);
pageMap->visitValues(pageMapFinder, memoryReader);
PageMapMemoryUsageRecorder usageRecorder(task, context, typeMask, recorder, memoryReader, finder);
pageMap->visitValues(usageRecorder, memoryReader);
usageRecorder.recordPendingRegions();
AdminRegionRecorder adminRegionRecorder(task, context, typeMask, recorder);
pageMap->visitAllocations(adminRegionRecorder, memoryReader);
PageHeapAllocator<Span>* spanAllocator = memoryReader(mzone->m_spanAllocator);
PageHeapAllocator<TCMalloc_ThreadCache>* pageHeapAllocator = memoryReader(mzone->m_pageHeapAllocator);
spanAllocator->recordAdministrativeRegions(adminRegionRecorder, memoryReader);
pageHeapAllocator->recordAdministrativeRegions(adminRegionRecorder, memoryReader);
adminRegionRecorder.recordPendingRegions();
return 0;
}
size_t FastMallocZone::size(malloc_zone_t*, const void*)
{
return 0;
}
void* FastMallocZone::zoneMalloc(malloc_zone_t*, size_t)
{
return 0;
}
void* FastMallocZone::zoneCalloc(malloc_zone_t*, size_t, size_t)
{
return 0;
}
void FastMallocZone::zoneFree(malloc_zone_t*, void* ptr)
{
malloc_printf("*** error for object %p: pointer being freed was not allocated\n", ptr);
}
void* FastMallocZone::zoneRealloc(malloc_zone_t*, void*, size_t)
{
return 0;
}
#undef malloc
#undef free
#undef realloc
#undef calloc
extern "C" {
malloc_introspection_t jscore_fastmalloc_introspection = { &FastMallocZone::enumerate, &FastMallocZone::goodSize, &FastMallocZone::check, &FastMallocZone::print,
&FastMallocZone::log, &FastMallocZone::forceLock, &FastMallocZone::forceUnlock, &FastMallocZone::statistics
, 0 , 0, 0, 0, 0
};
}
FastMallocZone::FastMallocZone(TCMalloc_PageHeap* pageHeap, TCMalloc_ThreadCache** threadHeaps, TCMalloc_Central_FreeListPadded* centralCaches, PageHeapAllocator<Span>* spanAllocator, PageHeapAllocator<TCMalloc_ThreadCache>* pageHeapAllocator)
: m_pageHeap(pageHeap)
, m_threadHeaps(threadHeaps)
, m_centralCaches(centralCaches)
, m_spanAllocator(spanAllocator)
, m_pageHeapAllocator(pageHeapAllocator)
{
memset(&m_zone, 0, sizeof(m_zone));
m_zone.version = 4;
m_zone.zone_name = "JavaScriptCore FastMalloc";
m_zone.size = &FastMallocZone::size;
m_zone.malloc = &FastMallocZone::zoneMalloc;
m_zone.calloc = &FastMallocZone::zoneCalloc;
m_zone.realloc = &FastMallocZone::zoneRealloc;
m_zone.free = &FastMallocZone::zoneFree;
m_zone.valloc = &FastMallocZone::zoneValloc;
m_zone.destroy = &FastMallocZone::zoneDestroy;
m_zone.introspect = &jscore_fastmalloc_introspection;
malloc_zone_register(&m_zone);
}
void FastMallocZone::init()
{
static FastMallocZone zone(pageheap, &thread_heaps, static_cast<TCMalloc_Central_FreeListPadded*>(central_cache), &span_allocator, &threadheap_allocator);
}
#endif // OS(DARWIN)
} #endif // WTF_CHANGES
#endif // FORCE_SYSTEM_MALLOC