#if defined(TCL_THREADS) && defined(USE_THREAD_ALLOC)
#include "tclInt.h"
#ifdef WIN32
#include "tclWinInt.h"
#else
extern Tcl_Mutex *TclpNewAllocMutex(void);
extern void *TclpGetAllocCache(void);
extern void TclpSetAllocCache(void *);
#endif
#ifndef RCHECK
#ifdef NDEBUG
#define RCHECK 0
#else
#define RCHECK 1
#endif
#endif
#define NOBJALLOC 800
#define NOBJHIGH 1200
#define NBUCKETS 11
#define MAXALLOC 16284
typedef struct Block {
union {
struct Block *next;
struct {
unsigned char magic1;
unsigned char bucket;
unsigned char unused;
unsigned char magic2;
} b_s;
} b_u;
size_t b_reqsize;
} Block;
#define b_next b_u.next
#define b_bucket b_u.b_s.bucket
#define b_magic1 b_u.b_s.magic1
#define b_magic2 b_u.b_s.magic2
#define MAGIC 0xef
typedef struct Bucket {
Block *firstPtr;
int nfree;
int nget;
int nput;
int nwait;
int nlock;
int nrequest;
} Bucket;
typedef struct Cache {
struct Cache *nextPtr;
Tcl_ThreadId owner;
Tcl_Obj *firstObjPtr;
int nobjs;
int nsysalloc;
Bucket buckets[NBUCKETS];
} Cache;
struct binfo {
size_t blocksize;
int maxblocks;
int nmove;
Tcl_Mutex *lockPtr;
} binfo[NBUCKETS] = {
{ 16, 1024, 512, NULL},
{ 32, 512, 256, NULL},
{ 64, 256, 128, NULL},
{ 128, 128, 64, NULL},
{ 256, 64, 32, NULL},
{ 512, 32, 16, NULL},
{ 1024, 16, 8, NULL},
{ 2048, 8, 4, NULL},
{ 4096, 4, 2, NULL},
{ 8192, 2, 1, NULL},
{16284, 1, 1, NULL},
};
static void LockBucket(Cache *cachePtr, int bucket);
static void UnlockBucket(Cache *cachePtr, int bucket);
static void PutBlocks(Cache *cachePtr, int bucket, int nmove);
static int GetBlocks(Cache *cachePtr, int bucket);
static Block *Ptr2Block(char *ptr);
static char *Block2Ptr(Block *blockPtr, int bucket, unsigned int reqsize);
static void MoveObjs(Cache *fromPtr, Cache *toPtr, int nmove);
static Tcl_Mutex *listLockPtr;
static Tcl_Mutex *objLockPtr;
static Cache sharedCache;
static Cache *sharedPtr = &sharedCache;
static Cache *firstCachePtr = &sharedCache;
static Cache *
GetCache(void)
{
Cache *cachePtr;
if (listLockPtr == NULL) {
Tcl_Mutex *initLockPtr;
int i;
initLockPtr = Tcl_GetAllocMutex();
Tcl_MutexLock(initLockPtr);
if (listLockPtr == NULL) {
listLockPtr = TclpNewAllocMutex();
objLockPtr = TclpNewAllocMutex();
for (i = 0; i < NBUCKETS; ++i) {
binfo[i].lockPtr = TclpNewAllocMutex();
}
}
Tcl_MutexUnlock(initLockPtr);
}
cachePtr = TclpGetAllocCache();
if (cachePtr == NULL) {
cachePtr = calloc(1, sizeof(Cache));
if (cachePtr == NULL) {
panic("alloc: could not allocate new cache");
}
Tcl_MutexLock(listLockPtr);
cachePtr->nextPtr = firstCachePtr;
firstCachePtr = cachePtr;
Tcl_MutexUnlock(listLockPtr);
cachePtr->owner = Tcl_GetCurrentThread();
TclpSetAllocCache(cachePtr);
}
return cachePtr;
}
void
TclFreeAllocCache(void *arg)
{
Cache *cachePtr = arg;
Cache **nextPtrPtr;
register int bucket;
for (bucket = 0; bucket < NBUCKETS; ++bucket) {
if (cachePtr->buckets[bucket].nfree > 0) {
PutBlocks(cachePtr, bucket, cachePtr->buckets[bucket].nfree);
}
}
if (cachePtr->nobjs > 0) {
Tcl_MutexLock(objLockPtr);
MoveObjs(cachePtr, sharedPtr, cachePtr->nobjs);
Tcl_MutexUnlock(objLockPtr);
}
Tcl_MutexLock(listLockPtr);
nextPtrPtr = &firstCachePtr;
while (*nextPtrPtr != cachePtr) {
nextPtrPtr = &(*nextPtrPtr)->nextPtr;
}
*nextPtrPtr = cachePtr->nextPtr;
cachePtr->nextPtr = NULL;
Tcl_MutexUnlock(listLockPtr);
#ifdef WIN32
TlsFree((DWORD) cachePtr);
#else
free(cachePtr);
#endif
}
char *
TclpAlloc(unsigned int reqsize)
{
Cache *cachePtr = TclpGetAllocCache();
Block *blockPtr;
register int bucket;
size_t size;
if (cachePtr == NULL) {
cachePtr = GetCache();
}
blockPtr = NULL;
size = reqsize + sizeof(Block);
#if RCHECK
++size;
#endif
if (size > MAXALLOC) {
bucket = NBUCKETS;
blockPtr = malloc(size);
if (blockPtr != NULL) {
cachePtr->nsysalloc += reqsize;
}
} else {
bucket = 0;
while (binfo[bucket].blocksize < size) {
++bucket;
}
if (cachePtr->buckets[bucket].nfree || GetBlocks(cachePtr, bucket)) {
blockPtr = cachePtr->buckets[bucket].firstPtr;
cachePtr->buckets[bucket].firstPtr = blockPtr->b_next;
--cachePtr->buckets[bucket].nfree;
++cachePtr->buckets[bucket].nget;
cachePtr->buckets[bucket].nrequest += reqsize;
}
}
if (blockPtr == NULL) {
return NULL;
}
return Block2Ptr(blockPtr, bucket, reqsize);
}
void
TclpFree(char *ptr)
{
if (ptr != NULL) {
Cache *cachePtr = TclpGetAllocCache();
Block *blockPtr;
int bucket;
if (cachePtr == NULL) {
cachePtr = GetCache();
}
blockPtr = Ptr2Block(ptr);
bucket = blockPtr->b_bucket;
if (bucket == NBUCKETS) {
cachePtr->nsysalloc -= blockPtr->b_reqsize;
free(blockPtr);
} else {
cachePtr->buckets[bucket].nrequest -= blockPtr->b_reqsize;
blockPtr->b_next = cachePtr->buckets[bucket].firstPtr;
cachePtr->buckets[bucket].firstPtr = blockPtr;
++cachePtr->buckets[bucket].nfree;
++cachePtr->buckets[bucket].nput;
if (cachePtr != sharedPtr &&
cachePtr->buckets[bucket].nfree > binfo[bucket].maxblocks) {
PutBlocks(cachePtr, bucket, binfo[bucket].nmove);
}
}
}
}
char *
TclpRealloc(char *ptr, unsigned int reqsize)
{
Cache *cachePtr = TclpGetAllocCache();
Block *blockPtr;
void *new;
size_t size, min;
int bucket;
if (ptr == NULL) {
return TclpAlloc(reqsize);
}
if (cachePtr == NULL) {
cachePtr = GetCache();
}
blockPtr = Ptr2Block(ptr);
size = reqsize + sizeof(Block);
#if RCHECK
++size;
#endif
bucket = blockPtr->b_bucket;
if (bucket != NBUCKETS) {
if (bucket > 0) {
min = binfo[bucket-1].blocksize;
} else {
min = 0;
}
if (size > min && size <= binfo[bucket].blocksize) {
cachePtr->buckets[bucket].nrequest -= blockPtr->b_reqsize;
cachePtr->buckets[bucket].nrequest += reqsize;
return Block2Ptr(blockPtr, bucket, reqsize);
}
} else if (size > MAXALLOC) {
cachePtr->nsysalloc -= blockPtr->b_reqsize;
cachePtr->nsysalloc += reqsize;
blockPtr = realloc(blockPtr, size);
if (blockPtr == NULL) {
return NULL;
}
return Block2Ptr(blockPtr, NBUCKETS, reqsize);
}
new = TclpAlloc(reqsize);
if (new != NULL) {
if (reqsize > blockPtr->b_reqsize) {
reqsize = blockPtr->b_reqsize;
}
memcpy(new, ptr, reqsize);
TclpFree(ptr);
}
return new;
}
Tcl_Obj *
TclThreadAllocObj(void)
{
register Cache *cachePtr = TclpGetAllocCache();
register int nmove;
register Tcl_Obj *objPtr;
Tcl_Obj *newObjsPtr;
if (cachePtr == NULL) {
cachePtr = GetCache();
}
if (cachePtr->nobjs == 0) {
Tcl_MutexLock(objLockPtr);
nmove = sharedPtr->nobjs;
if (nmove > 0) {
if (nmove > NOBJALLOC) {
nmove = NOBJALLOC;
}
MoveObjs(sharedPtr, cachePtr, nmove);
}
Tcl_MutexUnlock(objLockPtr);
if (cachePtr->nobjs == 0) {
cachePtr->nobjs = nmove = NOBJALLOC;
newObjsPtr = malloc(sizeof(Tcl_Obj) * nmove);
if (newObjsPtr == NULL) {
panic("alloc: could not allocate %d new objects", nmove);
}
while (--nmove >= 0) {
objPtr = &newObjsPtr[nmove];
objPtr->internalRep.otherValuePtr = cachePtr->firstObjPtr;
cachePtr->firstObjPtr = objPtr;
}
}
}
objPtr = cachePtr->firstObjPtr;
cachePtr->firstObjPtr = objPtr->internalRep.otherValuePtr;
--cachePtr->nobjs;
return objPtr;
}
void
TclThreadFreeObj(Tcl_Obj *objPtr)
{
Cache *cachePtr = TclpGetAllocCache();
if (cachePtr == NULL) {
cachePtr = GetCache();
}
objPtr->internalRep.otherValuePtr = cachePtr->firstObjPtr;
cachePtr->firstObjPtr = objPtr;
++cachePtr->nobjs;
if (cachePtr->nobjs > NOBJHIGH) {
Tcl_MutexLock(objLockPtr);
MoveObjs(cachePtr, sharedPtr, NOBJALLOC);
Tcl_MutexUnlock(objLockPtr);
}
}
void
Tcl_GetMemoryInfo(Tcl_DString *dsPtr)
{
Cache *cachePtr;
char buf[200];
int n;
Tcl_MutexLock(listLockPtr);
cachePtr = firstCachePtr;
while (cachePtr != NULL) {
Tcl_DStringStartSublist(dsPtr);
if (cachePtr == sharedPtr) {
Tcl_DStringAppendElement(dsPtr, "shared");
} else {
sprintf(buf, "thread%d", (int) cachePtr->owner);
Tcl_DStringAppendElement(dsPtr, buf);
}
for (n = 0; n < NBUCKETS; ++n) {
sprintf(buf, "%d %d %d %d %d %d %d",
(int) binfo[n].blocksize,
cachePtr->buckets[n].nfree,
cachePtr->buckets[n].nget,
cachePtr->buckets[n].nput,
cachePtr->buckets[n].nrequest,
cachePtr->buckets[n].nlock,
cachePtr->buckets[n].nwait);
Tcl_DStringAppendElement(dsPtr, buf);
}
Tcl_DStringEndSublist(dsPtr);
cachePtr = cachePtr->nextPtr;
}
Tcl_MutexUnlock(listLockPtr);
}
static void
MoveObjs(Cache *fromPtr, Cache *toPtr, int nmove)
{
register Tcl_Obj *objPtr = fromPtr->firstObjPtr;
Tcl_Obj *fromFirstObjPtr = objPtr;
toPtr->nobjs += nmove;
fromPtr->nobjs -= nmove;
while (--nmove) {
objPtr = objPtr->internalRep.otherValuePtr;
}
fromPtr->firstObjPtr = objPtr->internalRep.otherValuePtr;
objPtr->internalRep.otherValuePtr = toPtr->firstObjPtr;
toPtr->firstObjPtr = fromFirstObjPtr;
}
static char *
Block2Ptr(Block *blockPtr, int bucket, unsigned int reqsize)
{
register void *ptr;
blockPtr->b_magic1 = blockPtr->b_magic2 = MAGIC;
blockPtr->b_bucket = bucket;
blockPtr->b_reqsize = reqsize;
ptr = ((void *) (blockPtr + 1));
#if RCHECK
((unsigned char *)(ptr))[reqsize] = MAGIC;
#endif
return (char *) ptr;
}
static Block *
Ptr2Block(char *ptr)
{
register Block *blockPtr;
blockPtr = (((Block *) ptr) - 1);
if (blockPtr->b_magic1 != MAGIC
#if RCHECK
|| ((unsigned char *) ptr)[blockPtr->b_reqsize] != MAGIC
#endif
|| blockPtr->b_magic2 != MAGIC) {
panic("alloc: invalid block: %p: %x %x %x\n",
blockPtr, blockPtr->b_magic1, blockPtr->b_magic2,
((unsigned char *) ptr)[blockPtr->b_reqsize]);
}
return blockPtr;
}
static void
LockBucket(Cache *cachePtr, int bucket)
{
#if 0
if (Tcl_MutexTryLock(binfo[bucket].lockPtr) != TCL_OK) {
Tcl_MutexLock(binfo[bucket].lockPtr);
++cachePtr->buckets[bucket].nwait;
++sharedPtr->buckets[bucket].nwait;
}
#else
Tcl_MutexLock(binfo[bucket].lockPtr);
#endif
++cachePtr->buckets[bucket].nlock;
++sharedPtr->buckets[bucket].nlock;
}
static void
UnlockBucket(Cache *cachePtr, int bucket)
{
Tcl_MutexUnlock(binfo[bucket].lockPtr);
}
static void
PutBlocks(Cache *cachePtr, int bucket, int nmove)
{
register Block *lastPtr, *firstPtr;
register int n = nmove;
firstPtr = lastPtr = cachePtr->buckets[bucket].firstPtr;
while (--n > 0) {
lastPtr = lastPtr->b_next;
}
cachePtr->buckets[bucket].firstPtr = lastPtr->b_next;
cachePtr->buckets[bucket].nfree -= nmove;
LockBucket(cachePtr, bucket);
lastPtr->b_next = sharedPtr->buckets[bucket].firstPtr;
sharedPtr->buckets[bucket].firstPtr = firstPtr;
sharedPtr->buckets[bucket].nfree += nmove;
UnlockBucket(cachePtr, bucket);
}
static int
GetBlocks(Cache *cachePtr, int bucket)
{
register Block *blockPtr;
register int n;
register size_t size;
if (cachePtr != sharedPtr && sharedPtr->buckets[bucket].nfree > 0) {
LockBucket(cachePtr, bucket);
if (sharedPtr->buckets[bucket].nfree > 0) {
n = binfo[bucket].nmove;
if (n >= sharedPtr->buckets[bucket].nfree) {
cachePtr->buckets[bucket].firstPtr =
sharedPtr->buckets[bucket].firstPtr;
cachePtr->buckets[bucket].nfree =
sharedPtr->buckets[bucket].nfree;
sharedPtr->buckets[bucket].firstPtr = NULL;
sharedPtr->buckets[bucket].nfree = 0;
} else {
blockPtr = sharedPtr->buckets[bucket].firstPtr;
cachePtr->buckets[bucket].firstPtr = blockPtr;
sharedPtr->buckets[bucket].nfree -= n;
cachePtr->buckets[bucket].nfree = n;
while (--n > 0) {
blockPtr = blockPtr->b_next;
}
sharedPtr->buckets[bucket].firstPtr = blockPtr->b_next;
blockPtr->b_next = NULL;
}
}
UnlockBucket(cachePtr, bucket);
}
if (cachePtr->buckets[bucket].nfree == 0) {
blockPtr = NULL;
n = NBUCKETS;
size = 0;
while (--n > bucket) {
if (cachePtr->buckets[n].nfree > 0) {
size = binfo[n].blocksize;
blockPtr = cachePtr->buckets[n].firstPtr;
cachePtr->buckets[n].firstPtr = blockPtr->b_next;
--cachePtr->buckets[n].nfree;
break;
}
}
if (blockPtr == NULL) {
size = MAXALLOC;
blockPtr = malloc(size);
if (blockPtr == NULL) {
return 0;
}
}
n = size / binfo[bucket].blocksize;
cachePtr->buckets[bucket].nfree = n;
cachePtr->buckets[bucket].firstPtr = blockPtr;
while (--n > 0) {
blockPtr->b_next = (Block *)
((char *) blockPtr + binfo[bucket].blocksize);
blockPtr = blockPtr->b_next;
}
blockPtr->b_next = NULL;
}
return 1;
}
#endif