EntityManager.cpp: safety improvements

mmgr: consistent prefix naming
lockfree: split out lock free allocator

This was SVN commit r2260.
This commit is contained in:
janwas 2005-05-09 04:41:35 +00:00
parent 229a6b7a16
commit 34352b77af
5 changed files with 606 additions and 575 deletions

582
source/lib/lf_alloc.cpp Normal file
View File

@ -0,0 +1,582 @@
// lock-free memory allocation
//
// Copyright (c) 2005 Jan Wassenberg
//
// This program is free software; you can redistribute it and/or
// modify it under the terms of the GNU General Public License as
// published by the Free Software Foundation; either version 2 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful, but
// WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// General Public License for more details.
//
// Contact info:
// Jan.Wassenberg@stud.uni-karlsruhe.de
// http://www.stud.uni-karlsruhe.de/~urkt/
#include "precompiled.h"
#include <algorithm>
#include <limits.h>
#include "lib.h"
#include "posix.h"
#include "sysdep/cpu.h"
#include "lockfree.h"
#include "timer.h"
// superblock descriptor structure
// one machine word
struct Anchor
{
uint avail : 10;
uint count : 10;
uint tag : 10;
uint state : 2;
// convert to uintptr_t for CAS
operator uintptr_t() const
{
return *(uintptr_t*)this;
}
};
cassert(sizeof(Anchor) == sizeof(uintptr_t));
enum State
{
ACTIVE = 0,
FULL = 1,
PARTIAL = 2,
EMPTY = 3
};
/*/**/typedef void* DescList;
struct SizeClass
{
DescList partial; // initially empty
size_t sz; // block size
size_t sb_size; // superblock's size
};
struct Descriptor;
static const uint PTR_BITS = sizeof(void*) * CHAR_BIT;
struct Active
{
uint pdesc : PTR_BITS-6;
uint credits : 6;
Active()
{
}
// convert to uintptr_t for CAS
operator uintptr_t() const
{
return *(uintptr_t*)this;
}
//
// allow Active to be used as Descriptor*
//
Active& operator=(Descriptor* desc)
{
*(Descriptor**)this = desc;
assert(credits == 0); // make sure ptr is aligned
return *this;
}
Active(Descriptor* desc)
{
*this = desc;
}
// disambiguate (could otherwise be either uintptr_t or Descriptor*)
bool operator!() const
{
return (uintptr_t)*this != 0;
}
operator Descriptor*() const
{
return *(Descriptor**)this;
}
};
static const uint MAX_CREDITS = 64; // = 2 ** num_credit_bits
struct ProcHeap
{
Active active; // initially 0; points to Descriptor
Descriptor* partial; // initially 0
SizeClass* sc; // parent
};
// POD; must be MAX_CREDITS-aligned!
struct Descriptor
{
Anchor anchor;
Descriptor* next;
u8* sb; // superblock
ProcHeap* heap; // -> owner procheap
size_t sz; // block size
uint maxcount; // superblock size/sz
};
static u8* AllocNewSB(size_t sb_size)
{
return 0;
}
static void FreeSB(u8* sb)
{
}
static Descriptor* DescAvail = 0;
static const size_t DESCSBSIZE = 128;
static Descriptor* DescAlloc()
{
Descriptor* desc;
for(;;)
{
desc = DescAvail;
if(desc)
{
Descriptor* next = desc->next;
if(CAS(&DescAvail, desc, next))
break;
}
else
{
desc = (Descriptor*)AllocNewSB(DESCSBSIZE);
// organize descriptors in a linked list
mfence();
if(CAS(&DescAvail, 0, desc->next))
break;
FreeSB((u8*)desc);
}
}
return desc;
}
static void DescRetire(Descriptor* desc)
{
Descriptor* old_head;
do
{
old_head = DescAvail;
desc->next = old_head;
mfence();
}
while(!CAS(&DescAvail, old_head, desc));
}
static Descriptor* ListGetPartial(SizeClass* sc)
{
return 0;
}
static void ListPutPartial(Descriptor* desc)
{
}
static void ListRemoveEmptyDesc(SizeClass* sc)
{
}
static ProcHeap* find_heap(SizeClass* sc)
{
return 0;
}
static Descriptor* HeapGetPartial(ProcHeap* heap)
{
Descriptor* desc;
do
{
desc = heap->partial;
if(!desc)
return ListGetPartial(heap->sc);
}
while(!CAS(&heap->partial, desc, 0));
return desc;
}
static void HeapPutPartial(Descriptor* desc)
{
Descriptor* prev;
do
prev = desc->heap->partial;
while(!CAS(&desc->heap->partial, prev, desc));
if(prev)
ListPutPartial(prev);
}
static void UpdateActive(ProcHeap* heap, Descriptor* desc, uint more_credits)
{
Active new_active = desc;
new_active.credits = more_credits-1;
if(CAS(&heap->active, 0, new_active))
return;
// someone installed another active sb
// return credits to sb and make it partial
Anchor old_anchor, new_anchor;
do
{
new_anchor = old_anchor = desc->anchor;
new_anchor.count += more_credits;
new_anchor.state = PARTIAL;
}
while(!CAS(&desc->anchor, old_anchor, new_anchor));
HeapPutPartial(desc);
}
static void RemoveEmptyDesc(ProcHeap* heap, Descriptor* desc)
{
if(CAS(&heap->partial, desc, 0))
DescRetire(desc);
else
ListRemoveEmptyDesc(heap->sc);
}
static void* MallocFromActive(ProcHeap* heap)
{
// reserve block
Active old_active, new_active;
do
{
new_active = old_active = heap->active;
// no active superblock - will try Partial and then NewSB
if(!old_active)
return 0;
// none left - mark as no longer active
if(old_active.credits == 0)
new_active = 0;
// expected case - reserve
else
new_active.credits--;
}
while(!CAS(&heap->active, old_active, new_active));
u8* p;
// pop block
Anchor old_anchor, new_anchor;
Descriptor* desc = old_active;
uint more_credits;
do
{
new_anchor = old_anchor = desc->anchor;
p = desc->sb + old_anchor.avail*desc->sz;
new_anchor.avail = *(uint*)p;
new_anchor.tag++;
if(old_active.credits == 0)
{
// state must be ACTIVE
if(old_anchor.count == 0)
new_anchor.state = FULL;
else
{
more_credits = MIN(old_anchor.count, MAX_CREDITS);
new_anchor.count -= more_credits;
}
}
}
while(!CAS(&desc->anchor, old_anchor, new_anchor));
if(old_active.credits == 0 && old_anchor.count > 0)
UpdateActive(heap, desc, more_credits);
*(Descriptor**)p = desc;
return p+sizeof(void*);
}
static void* MallocFromPartial(ProcHeap* heap)
{
retry:
Descriptor* desc = HeapGetPartial(heap);
if(!desc)
return 0;
desc->heap = heap;
// reserve blocks
uint more_credits;
Anchor old_anchor, new_anchor;
do
{
new_anchor = old_anchor = desc->anchor;
if(old_anchor.state == EMPTY)
{
DescRetire(desc);
goto retry;
}
// old_anchor state must be PARTIAL
// old_anchor count must be > 0
more_credits = MIN(old_anchor.count-1, MAX_CREDITS);
new_anchor.count -= more_credits+1;
new_anchor.state = (more_credits > 0)? ACTIVE : FULL;
}
while(!CAS(&desc->anchor, old_anchor, new_anchor));
u8* p;
// pop reserved block
do
{
new_anchor = old_anchor = desc->anchor;
p = desc->sb + old_anchor.avail*desc->sz;
new_anchor.avail = *(uint*)p;
new_anchor.tag++;
}
while(!CAS(&desc->anchor, old_anchor, new_anchor));
if(more_credits > 0)
UpdateActive(heap, desc, more_credits);
*(Descriptor**)p = desc;
return p+sizeof(void*);
}
static void* MallocFromNewSB(ProcHeap* heap)
{
Descriptor* desc = DescAlloc();
desc->sb = AllocNewSB(heap->sc->sb_size);
//organize blocks in a linked list starting with index 0
desc->heap = heap;
desc->anchor.avail = 1;
desc->sz = heap->sc->sz;
desc->maxcount = (uint)(heap->sc->sb_size/desc->sz);
Active new_active = (Active)desc;
new_active.credits = MIN(desc->maxcount-1, MAX_CREDITS)-1;
desc->anchor.count = (desc->maxcount-1)-(new_active.credits+1);
desc->anchor.state = ACTIVE;
mfence();
if(!CAS(&heap->active, 0, new_active))
{
FreeSB(desc->sb);
return 0;
}
u8* p = desc->sb;
*(Descriptor**)p = desc;
return p+sizeof(void*);
}
void* lf_malloc(size_t sz)
{
void* p;
// use sz and thread id to find heap
ProcHeap* heap = find_heap(0); // TODO: pass SizeClass
// large block - allocate directly
if(!heap)
{
p = malloc(sz);
if(p)
*(size_t*)p = sz|1;
return p;
}
retry:
p = MallocFromActive(heap);
if(p)
return p;
p = MallocFromPartial(heap);
if(p)
return p;
p = MallocFromNewSB(heap);
if(p)
return p;
goto retry;
}
void lf_free(void* p_)
{
if(!p_)
return;
u8* p = (u8*)p_;
// get block header
p -= sizeof(void*);
uintptr_t hdr = *(uintptr_t*)p;
// large block - free directly
if(hdr & 1)
{
free(p);
return;
}
Descriptor* desc = (Descriptor*)hdr;
u8* sb = desc->sb;
Anchor old_anchor, new_anchor;
ProcHeap* heap;
do
{
new_anchor = old_anchor = desc->anchor;
*(size_t*)p = old_anchor.avail;
new_anchor.avail = (uint)((p-sb) / desc->sz);
if(old_anchor.state == FULL)
new_anchor.state = PARTIAL;
if(old_anchor.count == desc->maxcount-1)
{
heap = desc->heap;
serialize();
new_anchor.state = EMPTY;
}
else
new_anchor.count++;
mfence();
}
while(!CAS(&desc->anchor, old_anchor, new_anchor));
if(new_anchor.state == EMPTY)
{
FreeSB(sb);
RemoveEmptyDesc(heap, desc);
}
else if(old_anchor.state == FULL)
HeapPutPartial(desc);
}
/*
static const int MAX_POOLS = 8;
// split out of pools[] for more efficient lookup
static size_t pool_element_sizes[MAX_POOLS];
struct Pool
{
u8* bucket_pos;
u8* freelist;
}
pools[MAX_POOLS];
static const int num_pools = 0;
const size_t BUCKET_SIZE = 8*KiB;
static u8* bucket_pos;
// return the pool responsible for <size>, or 0 if not yet set up and
// there are already too many pools.
static Pool* responsible_pool(size_t size)
{
Pool* pool = pools;
for(int i = 0; i < MAX_POOLS; i++, pool++)
if(pool->element_size == size)
return pool;
// need to set up a new pool
// .. but there are too many
assert2(0 <= num_pools && num_pools <= MAX_POOLS);
if(num_pools >= MAX_POOLS)
{
debug_warn("increase MAX_POOLS");
return 0;
}
pool = &pools[num_pools++];
pool->element_size = size;
return pool;
}
void* sbh_alloc(size_t size)
{
// when this allocation is freed, there must be enough room for
// our freelist pointer. also ensures alignment.
size = round_up(size, 8);
// would overflow a bucket
if(size > BUCKET_SIZE-sizeof(u8*))
{
debug_warn("sbh_alloc: size doesn't fit in a bucket");
return 0;
}
//
//
}
TNode* node_alloc(size_t size)
{
// would overflow a bucket
if(size > BUCKET_SIZE-sizeof(u8*))
{
debug_warn("node_alloc: size doesn't fit in a bucket");
return 0;
}
size = round_up(size, 8);
// ensure alignment, since size includes a string
const uintptr_t addr = (uintptr_t)bucket_pos;
const size_t bytes_used = addr % BUCKET_SIZE;
// addr = 0 on first call (no bucket yet allocated)
// bytes_used == 0 if a node fit exactly into a bucket
if(addr == 0 || bytes_used == 0 || bytes_used+size > BUCKET_SIZE)
{
u8* const prev_bucket = (u8*)addr - bytes_used;
u8* bucket = (u8*)mem_alloc(BUCKET_SIZE, BUCKET_SIZE);
if(!bucket)
return 0;
*(u8**)bucket = prev_bucket;
bucket_pos = bucket+round_up(sizeof(u8*), 8);
}
TNode* node = (TNode*)bucket_pos;
bucket_pos = (u8*)node+size;
return node;
}
static void node_free_all()
{
const uintptr_t addr = (uintptr_t)bucket_pos;
u8* bucket = bucket_pos - (addr % BUCKET_SIZE);
// covers bucket_pos == 0 case
while(bucket)
{
u8* prev_bucket = *(u8**)bucket;
mem_free(bucket);
bucket = prev_bucket;
}
}
*/

View File

@ -1,4 +1,4 @@
// lock-free primitives and algorithms
// lock-free data structures
//
// Copyright (c) 2005 Jan Wassenberg
//
@ -103,568 +103,6 @@ static inline void* node_user_data(Node* n)
}
// superblock descriptor structure
// one machine word
struct Anchor
{
uint avail : 10;
uint count : 10;
uint state : 2;
uint tag : 10;
// convert to uintptr_t for CAS
operator uintptr_t() const
{
return *(uintptr_t*)this;
}
};
enum State
{
ACTIVE = 0,
FULL = 1,
PARTIAL = 2,
EMPTY = 3
};
/*/**/typedef void* DescList;
struct SizeClass
{
DescList partial; // initially empty
size_t sz; // block size
size_t sb_size; // superblock's size
};
struct Descriptor;
struct Active
{
uint pdesc : 26;
uint credits : 6;
Active()
{
}
// convert to uintptr_t for CAS
operator uintptr_t() const
{
return *(uintptr_t*)this;
}
//
// allow Active to be used as Descriptor*
//
Active& operator=(Descriptor* desc)
{
*(Descriptor**)this = desc;
assert(credits == 0); // make sure ptr is aligned
return *this;
}
Active(Descriptor* desc)
{
*this = desc;
}
// disambiguate (could otherwise be either uintptr_t or Descriptor*)
bool operator!() const
{
return (uintptr_t)*this != 0;
}
operator Descriptor*() const
{
return *(Descriptor**)this;
}
};
static const uint MAX_CREDITS = 64; // = 2 ** num_credit_bits
struct ProcHeap
{
Active active; // initially 0; points to Descriptor
Descriptor* partial; // initially 0
SizeClass* sc; // parent
};
// POD; must be MAX_CREDITS-aligned!
struct Descriptor
{
Anchor anchor;
Descriptor* next;
u8* sb; // superblock
ProcHeap* heap; // -> owner procheap
size_t sz; // block size
uint maxcount; // superblock size/sz
};
static u8* AllocNewSB(size_t sb_size)
{
return 0;
}
static void FreeSB(u8* sb)
{
}
static Descriptor* DescAvail = 0;
static const size_t DESCSBSIZE = 128;
static Descriptor* DescAlloc()
{
Descriptor* desc;
for(;;)
{
desc = DescAvail;
if(desc)
{
Descriptor* next = desc->next;
if(CAS(&DescAvail, desc, next))
break;
}
else
{
desc = (Descriptor*)AllocNewSB(DESCSBSIZE);
// organize descriptors in a linked list
mfence();
if(CAS(&DescAvail, 0, desc->next))
break;
FreeSB((u8*)desc);
}
}
return desc;
}
static void DescRetire(Descriptor* desc)
{
Descriptor* old_head;
do
{
old_head = DescAvail;
desc->next = old_head;
mfence();
}
while(!CAS(&DescAvail, old_head, desc));
}
static Descriptor* ListGetPartial(SizeClass* sc)
{
return 0;
}
static void ListPutPartial(Descriptor* desc)
{
}
static void ListRemoveEmptyDesc(SizeClass* sc)
{
}
static ProcHeap* find_heap(SizeClass* sc)
{
return 0;
}
static Descriptor* HeapGetPartial(ProcHeap* heap)
{
Descriptor* desc;
do
{
desc = heap->partial;
if(!desc)
return ListGetPartial(heap->sc);
}
while(!CAS(&heap->partial, desc, 0));
return desc;
}
static void HeapPutPartial(Descriptor* desc)
{
Descriptor* prev;
do
prev = desc->heap->partial;
while(!CAS(&desc->heap->partial, prev, desc));
if(prev)
ListPutPartial(prev);
}
static void UpdateActive(ProcHeap* heap, Descriptor* desc, uint more_credits)
{
Active new_active = desc;
new_active.credits = more_credits-1;
if(CAS(&heap->active, 0, new_active))
return;
// someone installed another active sb
// return credits to sb and make it partial
Anchor old_anchor, new_anchor;
do
{
new_anchor = old_anchor = desc->anchor;
new_anchor.count += more_credits;
new_anchor.state = PARTIAL;
}
while(!CAS(&desc->anchor, old_anchor, new_anchor));
HeapPutPartial(desc);
}
static void RemoveEmptyDesc(ProcHeap* heap, Descriptor* desc)
{
if(CAS(&heap->partial, desc, 0))
DescRetire(desc);
else
ListRemoveEmptyDesc(heap->sc);
}
static void* MallocFromActive(ProcHeap* heap)
{
// reserve block
Active old_active, new_active;
do
{
new_active = old_active = heap->active;
if(!old_active)
return 0;
if(old_active.credits == 0)
new_active = 0;
else
new_active.credits--;
}
while(!CAS(&heap->active, old_active, new_active));
u8* p;
// pop block
Anchor old_anchor, new_anchor;
Descriptor* desc = old_active;
uint more_credits;
do
{
new_anchor = old_anchor = desc->anchor;
p = desc->sb + old_anchor.avail*desc->sz;
new_anchor.avail = *(uint*)p;
new_anchor.tag++;
if(old_active.credits == 0)
{
// state must be ACTIVE
if(old_anchor.count == 0)
new_anchor.state = FULL;
else
{
more_credits = MIN(old_anchor.count, MAX_CREDITS);
new_anchor.count -= more_credits;
}
}
}
while(!CAS(&desc->anchor, old_anchor, new_anchor));
if(old_active.credits == 0 && old_anchor.count > 0)
UpdateActive(heap, desc, more_credits);
*(Descriptor**)p = desc;
return p+sizeof(void*);
}
static void* MallocFromPartial(ProcHeap* heap)
{
retry:
Descriptor* desc = HeapGetPartial(heap);
if(!desc)
return 0;
desc->heap = heap;
// reserve blocks
uint more_credits;
Anchor old_anchor, new_anchor;
do
{
new_anchor = old_anchor = desc->anchor;
if(old_anchor.state == EMPTY)
{
DescRetire(desc);
goto retry;
}
// old_anchor state must be PARTIAL
// old_anchor count must be > 0
more_credits = MIN(old_anchor.count-1, MAX_CREDITS);
new_anchor.count -= more_credits+1;
new_anchor.state = (more_credits > 0)? ACTIVE : FULL;
}
while(!CAS(&desc->anchor, old_anchor, new_anchor));
u8* p;
// pop reserved block
do
{
new_anchor = old_anchor = desc->anchor;
p = desc->sb + old_anchor.avail*desc->sz;
new_anchor.avail = *(uint*)p;
new_anchor.tag++;
}
while(!CAS(&desc->anchor, old_anchor, new_anchor));
if(more_credits > 0)
UpdateActive(heap, desc, more_credits);
*(Descriptor**)p = desc;
return p+sizeof(void*);
}
static void* MallocFromNewSB(ProcHeap* heap)
{
Descriptor* desc = DescAlloc();
desc->sb = AllocNewSB(heap->sc->sb_size);
//organize blocks in a linked list starting with index 0
desc->heap = heap;
desc->anchor.avail = 1;
desc->sz = heap->sc->sz;
desc->maxcount = (uint)(heap->sc->sb_size/desc->sz);
Active new_active = (Active)desc;
new_active.credits = MIN(desc->maxcount-1, MAX_CREDITS)-1;
desc->anchor.count = (desc->maxcount-1)-(new_active.credits+1);
desc->anchor.state = ACTIVE;
mfence();
if(!CAS(&heap->active, 0, new_active))
{
FreeSB(desc->sb);
return 0;
}
u8* p = desc->sb;
*(Descriptor**)p = desc;
return p+sizeof(void*);
}
void* lf_malloc(size_t sz)
{
void* p;
// use sz and thread id to find heap
ProcHeap* heap = find_heap(0); // TODO: pass SizeClass
// large block - allocate directly
if(!heap)
{
p = malloc(sz);
if(p)
*(size_t*)p = sz|1;
return p;
}
retry:
p = MallocFromActive(heap);
if(p)
return p;
p = MallocFromPartial(heap);
if(p)
return p;
p = MallocFromNewSB(heap);
if(p)
return p;
goto retry;
}
void lf_free(void* p_)
{
if(!p_)
return;
u8* p = (u8*)p_;
// get block header
p -= sizeof(void*);
uintptr_t hdr = *(uintptr_t*)p;
// large block - free directly
if(hdr & 1)
{
free(p);
return;
}
Descriptor* desc = (Descriptor*)hdr;
u8* sb = desc->sb;
Anchor old_anchor, new_anchor;
ProcHeap* heap;
do
{
new_anchor = old_anchor = desc->anchor;
*(size_t*)p = old_anchor.avail;
new_anchor.avail = (uint)((p-sb) / desc->sz);
if(old_anchor.state == FULL)
new_anchor.state = PARTIAL;
if(old_anchor.count == desc->maxcount-1)
{
heap = desc->heap;
serialize();
new_anchor.state = EMPTY;
}
else
new_anchor.count++;
mfence();
}
while(!CAS(&desc->anchor, old_anchor, new_anchor));
if(new_anchor.state == EMPTY)
{
FreeSB(sb);
RemoveEmptyDesc(heap, desc);
}
else if(old_anchor.state == FULL)
HeapPutPartial(desc);
}
/*
static const int MAX_POOLS = 8;
// split out of pools[] for more efficient lookup
static size_t pool_element_sizes[MAX_POOLS];
struct Pool
{
u8* bucket_pos;
u8* freelist;
}
pools[MAX_POOLS];
static const int num_pools = 0;
const size_t BUCKET_SIZE = 8*KiB;
static u8* bucket_pos;
// return the pool responsible for <size>, or 0 if not yet set up and
// there are already too many pools.
static Pool* responsible_pool(size_t size)
{
Pool* pool = pools;
for(int i = 0; i < MAX_POOLS; i++, pool++)
if(pool->element_size == size)
return pool;
// need to set up a new pool
// .. but there are too many
assert2(0 <= num_pools && num_pools <= MAX_POOLS);
if(num_pools >= MAX_POOLS)
{
debug_warn("increase MAX_POOLS");
return 0;
}
pool = &pools[num_pools++];
pool->element_size = size;
return pool;
}
void* sbh_alloc(size_t size)
{
// when this allocation is freed, there must be enough room for
// our freelist pointer. also ensures alignment.
size = round_up(size, 8);
// would overflow a bucket
if(size > BUCKET_SIZE-sizeof(u8*))
{
debug_warn("sbh_alloc: size doesn't fit in a bucket");
return 0;
}
//
//
}
TNode* node_alloc(size_t size)
{
// would overflow a bucket
if(size > BUCKET_SIZE-sizeof(u8*))
{
debug_warn("node_alloc: size doesn't fit in a bucket");
return 0;
}
size = round_up(size, 8);
// ensure alignment, since size includes a string
const uintptr_t addr = (uintptr_t)bucket_pos;
const size_t bytes_used = addr % BUCKET_SIZE;
// addr = 0 on first call (no bucket yet allocated)
// bytes_used == 0 if a node fit exactly into a bucket
if(addr == 0 || bytes_used == 0 || bytes_used+size > BUCKET_SIZE)
{
u8* const prev_bucket = (u8*)addr - bytes_used;
u8* bucket = (u8*)mem_alloc(BUCKET_SIZE, BUCKET_SIZE);
if(!bucket)
return 0;
*(u8**)bucket = prev_bucket;
bucket_pos = bucket+round_up(sizeof(u8*), 8);
}
TNode* node = (TNode*)bucket_pos;
bucket_pos = (u8*)node+size;
return node;
}
static void node_free_all()
{
const uintptr_t addr = (uintptr_t)bucket_pos;
u8* bucket = bucket_pos - (addr % BUCKET_SIZE);
// covers bucket_pos == 0 case
while(bucket)
{
u8* prev_bucket = *(u8**)bucket;
mem_free(bucket);
bucket = prev_bucket;
}
}
*/
//////////////////////////////////////////////////////////////////////////////
//
// thread-local storage for SMR

View File

@ -1,4 +1,4 @@
// lock-free primitives and algorithms
// lock-free data structures
//
// Copyright (c) 2005 Jan Wassenberg
//

View File

@ -104,7 +104,7 @@ static const size_t padding_size = 256 * sizeof(ulong);
// normal settings
#else
static uint options = 0;
static bool random_fill = false;
static bool random_fill = true;
static const size_t padding_size = 1 * sizeof(ulong);
#endif
@ -377,10 +377,10 @@ static void allocs_foreach(void(*cb)(const Alloc*, void*), void* arg)
// padding: make sure the user hasn't over/underrun their buffer.
//////////////////////////////////////////////////////////////////////////////
static const ulong prefixPattern = 0xbaadf00d;
static const ulong postfixPattern = 0xdeadc0de;
static const ulong unusedPattern = 0xfeedface; // newly allocated
static const ulong releasedPattern = 0xdeadbeef;
static const ulong pattern_before = 0xbaadf00d;
static const ulong pattern_after = 0xdeadc0de;
static const ulong pattern_unused = 0xfeedface;
static const ulong pattern_freed = 0xdeadbeef;
static void pattern_set(const Alloc* a, ulong pattern)
{
@ -434,8 +434,8 @@ static void pattern_set(const Alloc* a, ulong pattern)
ulong* post = (ulong*)( (char*)a->p + a->size - padding_size );
for(uint i = 0; i < padding_size / sizeof(ulong); i++)
{
*pre++ = prefixPattern;
*post++ = postfixPattern;
*pre++ = pattern_before;
*post++ = pattern_after;
// note: doesn't need to be split into 2 loops; cache is A2
}
}
@ -447,7 +447,7 @@ static bool padding_is_intact(const Alloc* a)
ulong* post = (ulong*)( (char*)a->p + a->size - padding_size );
for(uint i = 0; i < padding_size / sizeof(ulong); i++)
if(*pre++ != prefixPattern || *post++ != postfixPattern)
if(*pre++ != pattern_before || *post++ != pattern_after)
return false;
return true;
@ -479,7 +479,7 @@ static size_t calc_unused(const Alloc* a)
size_t total = 0;
const ulong* p = (const ulong*)a->user_p();
for(uint i = 0; i < a->user_size(); i += sizeof(ulong))
if(*p++ == unusedPattern)
if(*p++ == pattern_unused)
total += sizeof(long);
return total;
@ -1045,7 +1045,7 @@ void* alloc_dbg(size_t user_size, AllocType type, const char* file, int line, co
allocs_add(a);
stats_add(a);
pattern_set(a, unusedPattern);
pattern_set(a, pattern_unused);
// calloc() must zero the memory
if(type == AT_CALLOC)
@ -1117,7 +1117,7 @@ void free_dbg(const void* user_p, AllocType type, const char* file, int line, co
// "poison" the allocation's memory, to catch use-after-free bugs.
// the VC7 debug heap does this also (in free), so we're wasting time
// in that case. oh well, better to be safe/consistent.
pattern_set(a, releasedPattern);
pattern_set(a, pattern_freed);
free(a->p);

View File

@ -9,9 +9,12 @@ int SELECTION_BOX_POINTS;
int SELECTION_SMOOTHNESS_UNIFIED = 9;
CEntityManager::CEntityManager()
: m_entities() // janwas: default-initialize entire array;
// CHandle ctor sets m_entity and m_refcount to 0
{
m_nextalloc = 0;
m_extant = true;
// Also load a couple of global entity settings
CConfigValue* cfg = g_ConfigDB.GetValue( CFG_USER, "selection.outline.quality" );
if( cfg ) cfg->GetInt( SELECTION_SMOOTHNESS_UNIFIED );
@ -23,9 +26,14 @@ CEntityManager::CEntityManager()
CEntityManager::~CEntityManager()
{
m_extant = false;
for( int i = 0; i < MAX_HANDLES; i++ )
if( m_entities[i].m_refcount )
{
delete( m_entities[i].m_entity );
m_entities[i].m_entity = 0;
m_entities[i].m_refcount = 0;
}
}
void CEntityManager::deleteAll()
@ -48,7 +56,10 @@ HEntity CEntityManager::create( CBaseEntity* base, CVector3D position, float ori
return( HEntity() );
while( m_entities[m_nextalloc].m_refcount )
{
m_nextalloc++;
assert(m_nextalloc < MAX_HANDLES);
}
m_entities[m_nextalloc].m_entity = new CEntity( base, position, orientation );
m_entities[m_nextalloc].m_entity->me = HEntity( m_nextalloc );
return( HEntity( m_nextalloc++ ) );