incorporate allocators/ into build

the old lib/allocators.h is now a bridge to the new headers (temporary)
make slight interface changes in lib/res/file to match new allocators
interface

This was SVN commit r5445.
This commit is contained in:
janwas 2007-11-10 13:55:32 +00:00
parent 11081d8e36
commit 23a1c6196f
8 changed files with 30 additions and 1338 deletions

View File

@ -399,6 +399,7 @@ function setup_all_libs ()
source_dirs = {
"lib",
"lib/allocators",
"lib/posix",
"lib/sysdep",
"lib/sysdep/ia32",

View File

@ -1,626 +0,0 @@
/**
* =========================================================================
* File : allocators.cpp
* Project : 0 A.D.
* Description : memory suballocators.
* =========================================================================
*/
// license: GPL; see lib/license.txt
#include "precompiled.h"
#include "allocators.h"
#include "lib/posix/posix_mman.h" // PROT_* constants for da_set_prot
#include "lib/posix/posix.h" // sysconf
#include "lib/sysdep/cpu.h" // cpu_CAS
#include "byte_order.h"
#include "bits.h"
//-----------------------------------------------------------------------------
// helper routines
//-----------------------------------------------------------------------------
// latch page size in case we are called from static ctors (it's possible
// that they are called before our static initializers).
// pool_create is therefore now safe to call before main().
static size_t get_page_size()
{
static const size_t page_size = cpu_PageSize();
return page_size;
}
static inline bool is_page_multiple(uintptr_t x)
{
return (x % get_page_size()) == 0;
}
static inline size_t round_up_to_page(size_t size)
{
return round_up(size, get_page_size());
}
// very thin wrapper on top of sys/mman.h that makes the intent more obvious:
// (its commit/decommit semantics are difficult to tell apart)
static inline LibError LibError_from_mmap(void* ret, bool warn_if_failed = true)
{
if(ret != MAP_FAILED)
return INFO::OK;
return LibError_from_errno(warn_if_failed);
}
// "anonymous" effectively means mapping /dev/zero, but is more efficient.
// MAP_ANONYMOUS is not in SUSv3, but is a very common extension.
// unfortunately, MacOS X only defines MAP_ANON, which Solaris says is
// deprecated. workaround there: define MAP_ANONYMOUS in terms of MAP_ANON.
#ifndef MAP_ANONYMOUS
# define MAP_ANONYMOUS MAP_ANON
#endif
static const int mmap_flags = MAP_PRIVATE|MAP_ANONYMOUS;
static LibError mem_reserve(size_t size, u8** pp)
{
errno = 0;
void* ret = mmap(0, size, PROT_NONE, mmap_flags|MAP_NORESERVE, -1, 0);
*pp = (u8*)ret;
return LibError_from_mmap(ret);
}
static LibError mem_release(u8* p, size_t size)
{
errno = 0;
int ret = munmap(p, size);
return LibError_from_posix(ret);
}
static LibError mem_commit(u8* p, size_t size, int prot)
{
if(prot == PROT_NONE)
// not allowed - it would be misinterpreted by mmap.
WARN_RETURN(ERR::INVALID_PARAM);
errno = 0;
void* ret = mmap(p, size, prot, mmap_flags|MAP_FIXED, -1, 0);
return LibError_from_mmap(ret);
}
static LibError mem_decommit(u8* p, size_t size)
{
errno = 0;
void* ret = mmap(p, size, PROT_NONE, mmap_flags|MAP_NORESERVE|MAP_FIXED, -1, 0);
return LibError_from_mmap(ret);
}
static LibError mem_protect(u8* p, size_t size, int prot)
{
errno = 0;
int ret = mprotect(p, size, prot);
return LibError_from_posix(ret);
}
//-----------------------------------------------------------------------------
// page aligned allocator
//-----------------------------------------------------------------------------
void* page_aligned_alloc(size_t unaligned_size)
{
const size_t size_pa = round_up_to_page(unaligned_size);
u8* p = 0;
RETURN0_IF_ERR(mem_reserve(size_pa, &p));
RETURN0_IF_ERR(mem_commit(p, size_pa, PROT_READ|PROT_WRITE));
return p;
}
void page_aligned_free(void* p, size_t unaligned_size)
{
if(!p)
return;
debug_assert(is_page_multiple((uintptr_t)p));
const size_t size_pa = round_up_to_page(unaligned_size);
(void)mem_release((u8*)p, size_pa);
}
//-----------------------------------------------------------------------------
// dynamic (expandable) array
//-----------------------------------------------------------------------------
// indicates that this DynArray must not be resized or freed
// (e.g. because it merely wraps an existing memory range).
// stored in da->prot to reduce size; doesn't conflict with any PROT_* flags.
const int DA_NOT_OUR_MEM = 0x40000000;
static LibError validate_da(DynArray* da)
{
if(!da)
WARN_RETURN(ERR::INVALID_PARAM);
u8* const base = da->base;
const size_t max_size_pa = da->max_size_pa;
const size_t cur_size = da->cur_size;
const size_t pos = da->pos;
const int prot = da->prot;
if(debug_is_pointer_bogus(base))
WARN_RETURN(ERR::_1);
// note: don't check if base is page-aligned -
// might not be true for 'wrapped' mem regions.
// if(!is_page_multiple((uintptr_t)base))
// WARN_RETURN(ERR::_2);
if(!is_page_multiple(max_size_pa))
WARN_RETURN(ERR::_3);
if(cur_size > max_size_pa)
WARN_RETURN(ERR::_4);
if(pos > cur_size || pos > max_size_pa)
WARN_RETURN(ERR::_5);
if(prot & ~(PROT_READ|PROT_WRITE|PROT_EXEC|DA_NOT_OUR_MEM))
WARN_RETURN(ERR::_6);
return INFO::OK;
}
#define CHECK_DA(da) RETURN_ERR(validate_da(da))
LibError da_alloc(DynArray* da, size_t max_size)
{
const size_t max_size_pa = round_up_to_page(max_size);
u8* p;
RETURN_ERR(mem_reserve(max_size_pa, &p));
da->base = p;
da->max_size_pa = max_size_pa;
da->cur_size = 0;
da->cur_size_pa = 0;
da->prot = PROT_READ|PROT_WRITE;
da->pos = 0;
CHECK_DA(da);
return INFO::OK;
}
LibError da_free(DynArray* da)
{
CHECK_DA(da);
u8* p = da->base;
size_t size_pa = da->max_size_pa;
bool was_wrapped = (da->prot & DA_NOT_OUR_MEM) != 0;
// wipe out the DynArray for safety
// (must be done here because mem_release may fail)
memset(da, 0, sizeof(*da));
// skip mem_release if <da> was allocated via da_wrap_fixed
// (i.e. it doesn't actually own any memory). don't complain;
// da_free is supposed to be called even in the above case.
if(!was_wrapped)
RETURN_ERR(mem_release(p, size_pa));
return INFO::OK;
}
LibError da_set_size(DynArray* da, size_t new_size)
{
CHECK_DA(da);
if(da->prot & DA_NOT_OUR_MEM)
WARN_RETURN(ERR::LOGIC);
// determine how much to add/remove
const size_t cur_size_pa = round_up_to_page(da->cur_size);
const size_t new_size_pa = round_up_to_page(new_size);
const ssize_t size_delta_pa = (ssize_t)new_size_pa - (ssize_t)cur_size_pa;
// not enough memory to satisfy this expand request: abort.
// note: do not complain - some allocators (e.g. file_cache)
// legitimately use up all available space.
if(new_size_pa > da->max_size_pa)
return ERR::LIMIT; // NOWARN
u8* end = da->base + cur_size_pa;
// expanding
if(size_delta_pa > 0)
RETURN_ERR(mem_commit(end, size_delta_pa, da->prot));
// shrinking
else if(size_delta_pa < 0)
RETURN_ERR(mem_decommit(end+size_delta_pa, -size_delta_pa));
// else: no change in page count, e.g. if going from size=1 to 2
// (we don't want mem_* to have to handle size=0)
da->cur_size = new_size;
da->cur_size_pa = new_size_pa;
CHECK_DA(da);
return INFO::OK;
}
LibError da_reserve(DynArray* da, size_t size)
{
if(da->pos+size > da->cur_size_pa)
RETURN_ERR(da_set_size(da, da->cur_size_pa+size));
da->cur_size = std::max(da->cur_size, da->pos+size);
return INFO::OK;
}
LibError da_set_prot(DynArray* da, int prot)
{
CHECK_DA(da);
// somewhat more subtle: POSIX mprotect requires the memory have been
// mmap-ed, which it probably wasn't here.
if(da->prot & DA_NOT_OUR_MEM)
WARN_RETURN(ERR::LOGIC);
da->prot = prot;
RETURN_ERR(mem_protect(da->base, da->cur_size_pa, prot));
CHECK_DA(da);
return INFO::OK;
}
LibError da_wrap_fixed(DynArray* da, u8* p, size_t size)
{
da->base = p;
da->max_size_pa = round_up_to_page(size);
da->cur_size = size;
da->cur_size_pa = da->max_size_pa;
da->prot = PROT_READ|PROT_WRITE|DA_NOT_OUR_MEM;
da->pos = 0;
CHECK_DA(da);
return INFO::OK;
}
LibError da_read(DynArray* da, void* data, size_t size)
{
// make sure we have enough data to read
if(da->pos+size > da->cur_size)
WARN_RETURN(ERR::FAIL);
cpu_memcpy(data, da->base+da->pos, size);
da->pos += size;
return INFO::OK;
}
LibError da_append(DynArray* da, const void* data, size_t size)
{
RETURN_ERR(da_reserve(da, size));
cpu_memcpy(da->base+da->pos, data, size);
da->pos += size;
return INFO::OK;
}
//-----------------------------------------------------------------------------
// pool allocator
//-----------------------------------------------------------------------------
// "freelist" is a pointer to the first unused element (0 if there are none);
// its memory holds a pointer to the next free one in list.
static void freelist_push(void** pfreelist, void* el)
{
debug_assert(el != 0);
void* prev_el = *pfreelist;
*pfreelist = el;
*(void**)el = prev_el;
}
static void* freelist_pop(void** pfreelist)
{
void* el = *pfreelist;
// nothing in list
if(!el)
return 0;
*pfreelist = *(void**)el;
return el;
}
// elements returned are aligned to this many bytes:
static const size_t ALIGN = 8;
LibError pool_create(Pool* p, size_t max_size, size_t el_size)
{
if(el_size == POOL_VARIABLE_ALLOCS)
p->el_size = 0;
else
p->el_size = round_up(el_size, ALIGN);
p->freelist = 0;
RETURN_ERR(da_alloc(&p->da, max_size));
return INFO::OK;
}
LibError pool_destroy(Pool* p)
{
// don't be picky and complain if the freelist isn't empty;
// we don't care since it's all part of the da anyway.
// however, zero it to prevent further allocs from succeeding.
p->freelist = 0;
return da_free(&p->da);
}
bool pool_contains(Pool* p, void* el)
{
// outside of our range
if(!(p->da.base <= el && el < p->da.base+p->da.pos))
return false;
// sanity check: it should be aligned (if pool has fixed-size elements)
if(p->el_size)
debug_assert((uintptr_t)((u8*)el - p->da.base) % p->el_size == 0);
return true;
}
void* pool_alloc(Pool* p, size_t size)
{
// if pool allows variable sizes, go with the size parameter,
// otherwise the pool el_size setting.
const size_t el_size = p->el_size? p->el_size : round_up(size, ALIGN);
// note: this can never happen in pools with variable-sized elements
// because they disallow pool_free.
void* el = freelist_pop(&p->freelist);
if(el)
goto have_el;
// alloc a new entry
{
// expand, if necessary
if(da_reserve(&p->da, el_size) < 0)
return 0;
el = p->da.base + p->da.pos;
p->da.pos += el_size;
}
have_el:
debug_assert(pool_contains(p, el)); // paranoia
return el;
}
void pool_free(Pool* p, void* el)
{
// only allowed to free items if we were initialized with
// fixed el_size. (this avoids having to pass el_size here and
// check if requested_size matches that when allocating)
if(p->el_size == 0)
{
debug_warn("cannot free variable-size items");
return;
}
if(pool_contains(p, el))
freelist_push(&p->freelist, el);
else
debug_warn("invalid pointer (not in pool)");
}
void pool_free_all(Pool* p)
{
p->freelist = 0;
// must be reset before da_set_size or CHECK_DA will complain.
p->da.pos = 0;
da_set_size(&p->da, 0);
}
//-----------------------------------------------------------------------------
// bucket allocator
//-----------------------------------------------------------------------------
// power-of-2 isn't required; value is arbitrary.
const size_t BUCKET_SIZE = 4000;
LibError bucket_create(Bucket* b, size_t el_size)
{
b->freelist = 0;
b->el_size = round_up(el_size, ALIGN);
// note: allocating here avoids the is-this-the-first-time check
// in bucket_alloc, which speeds things up.
b->bucket = (u8*)malloc(BUCKET_SIZE);
if(!b->bucket)
{
// cause next bucket_alloc to retry the allocation
b->pos = BUCKET_SIZE;
b->num_buckets = 0;
WARN_RETURN(ERR::NO_MEM);
}
*(u8**)b->bucket = 0; // terminate list
b->pos = round_up(sizeof(u8*), ALIGN);
b->num_buckets = 1;
return INFO::OK;
}
void bucket_destroy(Bucket* b)
{
while(b->bucket)
{
u8* prev_bucket = *(u8**)b->bucket;
free(b->bucket);
b->bucket = prev_bucket;
b->num_buckets--;
}
debug_assert(b->num_buckets == 0);
// poison pill: cause subsequent alloc and free to fail
b->freelist = 0;
b->el_size = BUCKET_SIZE;
}
void* bucket_alloc(Bucket* b, size_t size)
{
size_t el_size = b->el_size? b->el_size : round_up(size, ALIGN);
// must fit in a bucket
debug_assert(el_size <= BUCKET_SIZE-sizeof(u8*));
// try to satisfy alloc from freelist
void* el = freelist_pop(&b->freelist);
if(el)
return el;
// if there's not enough space left, close current bucket and
// allocate another.
if(b->pos+el_size > BUCKET_SIZE)
{
u8* bucket = (u8*)malloc(BUCKET_SIZE);
if(!bucket)
return 0;
*(u8**)bucket = b->bucket;
b->bucket = bucket;
// skip bucket list field and align (note: malloc already
// aligns to at least 8 bytes, so don't take b->bucket into account)
b->pos = round_up(sizeof(u8*), ALIGN);
b->num_buckets++;
}
void* ret = b->bucket+b->pos;
b->pos += el_size;
return ret;
}
void bucket_free(Bucket* b, void* el)
{
if(b->el_size == 0)
{
debug_warn("cannot free variable-size items");
return;
}
freelist_push(&b->freelist, el);
// note: checking if <el> was actually allocated from <b> is difficult:
// it may not be in the currently open bucket, so we'd have to
// iterate over the list - too much work.
}
//-----------------------------------------------------------------------------
// matrix allocator
//-----------------------------------------------------------------------------
void** matrix_alloc(uint cols, uint rows, size_t el_size)
{
const size_t initial_align = 64;
// note: no provision for padding rows. this is a bit more work and
// if el_size isn't a power-of-2, performance is going to suck anyway.
// otherwise, the initial alignment will take care of it.
const size_t ptr_array_size = cols*sizeof(void*);
const size_t row_size = cols*el_size;
const size_t data_size = rows*row_size;
const size_t total_size = ptr_array_size + initial_align + data_size;
void* p = malloc(total_size);
if(!p)
return 0;
uintptr_t data_addr = (uintptr_t)p + ptr_array_size + initial_align;
data_addr -= data_addr % initial_align;
// alignment check didn't set address to before allocation
debug_assert(data_addr >= (uintptr_t)p+ptr_array_size);
void** ptr_array = (void**)p;
for(uint i = 0; i < cols; i++)
{
ptr_array[i] = (void*)data_addr;
data_addr += row_size;
}
// didn't overrun total allocation
debug_assert(data_addr <= (uintptr_t)p+total_size);
return ptr_array;
}
void matrix_free(void** matrix)
{
free(matrix);
}
//-----------------------------------------------------------------------------
// allocator optimized for single instances
//-----------------------------------------------------------------------------
void* single_calloc(void* storage, volatile uintptr_t* in_use_flag, size_t size)
{
// sanity check
debug_assert(*in_use_flag == 0 || *in_use_flag == 1);
void* p;
// successfully reserved the single instance
if(cpu_CAS(in_use_flag, 0, 1))
p = storage;
// already in use (rare) - allocate from heap
else
p = new u8[size];
memset(p, 0, size);
return p;
}
void single_free(void* storage, volatile uintptr_t* in_use_flag, void* p)
{
// sanity check
debug_assert(*in_use_flag == 0 || *in_use_flag == 1);
if(p == storage)
{
if(cpu_CAS(in_use_flag, 1, 0))
{
// ok, flag has been reset to 0
}
else
debug_warn("in_use_flag out of sync (double free?)");
}
// was allocated from heap
else
{
// single instance may have been freed by now - cannot assume
// anything about in_use_flag.
delete[] (u8*)p;
}
}
//-----------------------------------------------------------------------------
// static allocator
//-----------------------------------------------------------------------------
void* static_calloc(StaticStorage* ss, size_t size)
{
void* p = (void*)round_up((uintptr_t)ss->pos, 16);
ss->pos = (u8*)p+size;
debug_assert(ss->pos <= ss->end);
return p;
}

View File

@ -1,695 +1,10 @@
/**
* =========================================================================
* File : allocators.h
* Project : 0 A.D.
* Description : memory suballocators.
* =========================================================================
*/
// temporary "bridge" header to new lib/allocators location until
// all source files are adapted to match the new headers.
// license: GPL; see lib/license.txt
#include "lib/allocators/allocators.h"
#include "lib/allocators/bucket.h"
#include "lib/allocators/dynarray.h"
#include "lib/allocators/headerless.h"
#include "lib/allocators/mem_util.h"
#include "lib/allocators/pool.h"
#ifndef INCLUDED_ALLOCATORS
#define INCLUDED_ALLOCATORS
#include <map>
#include "lib/posix/posix_mman.h" // PROT_*
#include "lib/sysdep/cpu.h" // cpu_CAS
//
// page aligned allocator
//
/**
* allocate memory aligned to the system page size.
*
* this is useful for file_buf_alloc, which uses this allocator to
* get sector-aligned (hopefully; see file_sector_size) IO buffers.
*
* note that this allocator is stateless and very litte error checking
* can be performed.
*
* the memory is initially writable and you can use mprotect to set other
* access permissions if desired.
*
* @param unaligned_size minimum size [bytes] to allocate.
* @return page-aligned and -padded memory or 0 on error / out of memory.
**/
extern void* page_aligned_alloc(size_t unaligned_size);
/**
* free a previously allocated page-aligned region.
*
* @param p exact value returned from page_aligned_alloc
* @param size exact value passed to page_aligned_alloc
**/
extern void page_aligned_free(void* p, size_t unaligned_size);
// adapter that allows calling page_aligned_free as a boost::shared_ptr deleter.
class PageAlignedDeleter
{
public:
PageAlignedDeleter(size_t size)
: m_size(size)
{
debug_assert(m_size != 0);
}
void operator()(u8* p)
{
debug_assert(m_size != 0);
page_aligned_free(p, m_size);
m_size = 0;
}
private:
size_t m_size;
};
//
// dynamic (expandable) array
//
/**
* provides a memory range that can be expanded but doesn't waste
* physical memory or relocate itself.
*
* works by preallocating address space and committing as needed.
* used as a building block for other allocators.
**/
struct DynArray
{
u8* base;
size_t max_size_pa; /// reserved
size_t cur_size; /// committed
size_t cur_size_pa;
/**
* mprotect flags applied to newly committed pages
**/
int prot;
size_t pos;
};
/**
* ready the DynArray object for use.
*
* no virtual memory is actually committed until calls to da_set_size.
*
* @param da DynArray.
* @param max_size size [bytes] of address space to reserve (*);
* the DynArray can never expand beyond this.
* (* rounded up to next page size multiple)
* @return LibError.
**/
extern LibError da_alloc(DynArray* da, size_t max_size);
/**
* free all memory (address space + physical) that constitutes the
* given array.
*
* use-after-free is impossible because the memory is unmapped.
*
* @param DynArray* da; zeroed afterwards.
* @return LibError
**/
extern LibError da_free(DynArray* da);
/**
* expand or shrink the array: changes the amount of currently committed
* (i.e. usable) memory pages.
*
* @param da DynArray.
* @param new_size target size (rounded up to next page multiple).
* pages are added/removed until this is met.
* @return LibError.
**/
extern LibError da_set_size(DynArray* da, size_t new_size);
/**
* Make sure at least <size> bytes starting at da->pos are committed and
* ready for use.
*
* @param DynArray*
* @param size Minimum amount to guarantee [bytes]
* @return LibError
**/
extern LibError da_reserve(DynArray* da, size_t size);
/**
* change access rights of the array memory.
*
* used to implement write-protection. affects the currently committed
* pages as well as all subsequently added pages.
*
* @param da DynArray.
* @param prot a combination of the PROT_* values used with mprotect.
* @return LibError.
**/
extern LibError da_set_prot(DynArray* da, int prot);
/**
* "wrap" (i.e. store information about) the given buffer in a DynArray.
*
* this is used to allow calling da_read or da_append on normal buffers.
* da_free should be called when the DynArray is no longer needed,
* even though it doesn't free this memory (but does zero the DynArray).
*
* @param da DynArray. Note: any future operations on it that would
* change the underlying memory (e.g. da_set_size) will fail.
* @param p target memory (no alignment/padding requirements)
* @param size maximum size (no alignment requirements)
* @return LibError.
**/
extern LibError da_wrap_fixed(DynArray* da, u8* p, size_t size);
/**
* "read" from array, i.e. copy into the given buffer.
*
* starts at offset DynArray.pos and advances this.
*
* @param da DynArray.
* @param data_dst destination memory
* @param size [bytes] to copy
* @return LibError.
**/
extern LibError da_read(DynArray* da, void* data_dst, size_t size);
/**
* "write" to array, i.e. copy from the given buffer.
*
* starts at offset DynArray.pos and advances this.
*
* @param da DynArray.
* @param data_src source memory
* @param size [bytes] to copy
* @return LibError.
**/
extern LibError da_append(DynArray* da, const void* data_src, size_t size);
//
// pool allocator
//
/**
* allocator design parameters:
* - O(1) alloc and free;
* - either fixed- or variable-sized blocks;
* - doesn't preallocate the entire pool;
* - returns sequential addresses.
*
* opaque! do not read/write any fields!
**/
struct Pool
{
DynArray da;
/**
* size of elements. = 0 if pool set up for variable-sized
* elements, otherwise rounded up to pool alignment.
**/
size_t el_size;
/**
* pointer to freelist (opaque); see freelist_*.
* never used (remains 0) if elements are of variable size.
**/
void* freelist;
};
/**
* pass as pool_create's <el_size> param to indicate variable-sized allocs
* are required (see below).
**/
const size_t POOL_VARIABLE_ALLOCS = ~0u;
/**
* Ready Pool for use.
*
* @param Pool*
* @param max_size Max size [bytes] of the Pool; this much
* (rounded up to next page multiple) virtual address space is reserved.
* no virtual memory is actually committed until calls to pool_alloc.
* @param el_size Number of bytes that will be returned by each
* pool_alloc (whose size parameter is then ignored). Can be 0 to
* allow variable-sized allocations, but pool_free is then unusable.
* @return LibError
**/
extern LibError pool_create(Pool* p, size_t max_size, size_t el_size);
/**
* free all memory (address space + physical) that constitutes the
* given Pool.
*
* future alloc and free calls on this pool will fail.
* continued use of the allocated memory (*) is
* impossible because it is marked not-present via MMU.
* (* no matter if in freelist or unused or "allocated" to user)
*
* @param Pool*
* @return LibError.
**/
extern LibError pool_destroy(Pool* p);
/**
* indicate whether a pointer was allocated from the given pool.
*
* this is useful for callers that use several types of allocators.
*
* @param Pool*
* @return bool.
**/
extern bool pool_contains(Pool* p, void* el);
/**
* Dole out memory from the pool.
* exhausts the freelist before returning new entries to improve locality.
*
* @param Pool*
* @param size bytes to allocate; ignored if pool_create's el_size was not 0.
* @return allocated memory, or 0 if the Pool would have to be expanded and
* there isn't enough memory to do so.
**/
extern void* pool_alloc(Pool* p, size_t size);
/**
* Make a fixed-size element available for reuse in the given Pool.
*
* this is not allowed if the Pool was created for variable-size elements.
* rationale: avoids having to pass el_size here and compare with size when
* allocating; also prevents fragmentation and leaking memory.
*
* @param Pool*
* @param el Element returned by pool_alloc.
**/
extern void pool_free(Pool* p, void* el);
/**
* "free" all user allocations that ensued from the given Pool.
*
* this resets it as if freshly pool_create-d, but doesn't release the
* underlying reserved virtual memory.
*
* @param Pool*
**/
extern void pool_free_all(Pool* p);
//
// bucket allocator
//
/**
* allocator design goals:
* - either fixed- or variable-sized blocks;
* - allow freeing individual blocks if they are all fixed-size;
* - never relocates;
* - no fixed limit.
*
* note: this type of allocator is called "region-based" in the literature.
* see "Reconsidering Custom Memory Allocation" (Berger, Zorn, McKinley).
* if individual variable-size elements must be freeable, consider "reaps":
* basically a combination of region and heap, where frees go to the heap and
* allocs exhaust that memory first and otherwise use the region.
*
* opaque! do not read/write any fields!
**/
struct Bucket
{
/**
* currently open bucket.
**/
u8* bucket;
/**
* offset of free space at end of current bucket (i.e. # bytes in use).
**/
size_t pos;
void* freelist;
size_t el_size : 16;
/**
* records # buckets allocated; verifies the list of buckets is correct.
**/
uint num_buckets : 16;
};
/**
* ready the Bucket object for use.
*
* @param Bucket*
* @param el_size 0 to allow variable-sized allocations (which cannot be
* freed individually); otherwise, it specifies the number of bytes that
* will be returned by bucket_alloc (whose size parameter is then ignored).
* @return LibError.
**/
extern LibError bucket_create(Bucket* b, size_t el_size);
/**
* free all memory that ensued from <b>.
*
* future alloc and free calls on this Bucket will fail.
*
* @param Bucket*
**/
extern void bucket_destroy(Bucket* b);
/**
* Dole out memory from the Bucket.
* exhausts the freelist before returning new entries to improve locality.
*
* @param Bucket*
* @param size bytes to allocate; ignored if bucket_create's el_size was not 0.
* @return allocated memory, or 0 if the Bucket would have to be expanded and
* there isn't enough memory to do so.
**/
extern void* bucket_alloc(Bucket* b, size_t size);
/**
* make an entry available for reuse in the given Bucket.
*
* this is not allowed if created for variable-size elements.
* rationale: avoids having to pass el_size here and compare with size when
* allocating; also prevents fragmentation and leaking memory.
*
* @param Bucket*
* @param el entry allocated via bucket_alloc.
**/
extern void bucket_free(Bucket* b, void* el);
//
// matrix allocator
//
/**
* allocate a 2D matrix accessible as matrix[col][row].
*
* takes care of the dirty work of allocating 2D matrices:
* - aligns data
* - only allocates one memory block, which is more efficient than
* malloc/new for each row.
*
* @param cols, rows: dimension (cols x rows)
* @param el_size size [bytes] of a matrix cell
* @return 0 if out of memory, otherwise matrix that should be cast to
* type** (sizeof(type) == el_size). must be freed via matrix_free.
**/
extern void** matrix_alloc(uint cols, uint rows, size_t el_size);
/**
* free the given matrix.
*
* @param matrix allocated by matrix_alloc; no-op if 0.
* callers will likely want to pass variables of a different type
* (e.g. int**); they must be cast to void**.
**/
extern void matrix_free(void** matrix);
//
// allocator optimized for single instances
//
/**
* Allocate <size> bytes of zeroed memory.
*
* intended for applications that frequently alloc/free a single
* fixed-size object. caller provides static storage and an in-use flag;
* we use that memory if available and otherwise fall back to the heap.
* if the application only has one object in use at a time, malloc is
* avoided; this is faster and avoids heap fragmentation.
*
* note: thread-safe despite use of shared static data.
*
* @param storage Caller-allocated memory of at least <size> bytes
* (typically a static array of bytes)
* @param in_use_flag Pointer to a flag we set when <storage> is in-use.
* @param size [bytes] to allocate
* @return allocated memory (typically = <storage>, but falls back to
* malloc if that's in-use), or 0 (with warning) if out of memory.
**/
extern void* single_calloc(void* storage, volatile uintptr_t* in_use_flag, size_t size);
/**
* Free a memory block that had been allocated by single_calloc.
*
* @param storage Exact value passed to single_calloc.
* @param in_use_flag Exact value passed to single_calloc.
* @param Exact value returned by single_calloc.
**/
extern void single_free(void* storage, volatile uintptr_t* in_use_flag, void* p);
#ifdef __cplusplus
/**
* C++ wrapper on top of single_calloc that's slightly easier to use.
*
* T must be POD (Plain Old Data) because it is memset to 0!
**/
template<class T> class SingleAllocator
{
// evil but necessary hack: we don't want to instantiate a T directly
// because it may not have a default ctor. an array of uninitialized
// storage is used instead. single_calloc doesn't know about alignment,
// so we fix this by asking for an array of doubles.
double storage[(sizeof(T)+sizeof(double)-1)/sizeof(double)];
volatile uintptr_t is_in_use;
public:
typedef T value_type;
SingleAllocator()
{
is_in_use = 0;
}
T* alloc()
{
return (T*)single_calloc(&storage, &is_in_use, sizeof(storage));
}
void release(T* p)
{
single_free(&storage, &is_in_use, p);
}
};
#endif // #ifdef __cplusplus
//
// static allocator
//
// dole out chunks of memory from storage reserved in the BSS.
// freeing isn't necessary.
/**
* opaque; initialized by STATIC_STORAGE and used by static_calloc
**/
struct StaticStorage
{
void* pos;
void* end;
};
// define <size> bytes of storage and prepare <name> for use with
// static_calloc.
// must be invoked from file or function scope.
#define STATIC_STORAGE(name, size)\
static u8 storage[(size)];\
static StaticStorage name = { storage, storage+(size) }
/*
usage example:
static Object* pObject;
void InitObject()
{
STATIC_STORAGE(ss, 100); // includes padding
void* addr = static_calloc(ss, sizeof(Object));
pObject = new(addr) Object;
}
*/
/**
* dole out memory from static storage reserved in BSS.
*
* this is useful for static objects that are used before _cinit - callers
* define static storage for one or several objects, use this function to
* retrieve an aligned pointer, then construct there via placement new.
*
* @param ss - initialized via STATIC_STORAGE
* @param size [bytes] to allocate
* @return aligned (suitable for any type) pointer
*
* raises a warning if there's not enough room (indicates incorrect usage)
**/
extern void* static_calloc(StaticStorage* ss, size_t size);
// (no need to free static_calloc-ed memory since it's in the BSS)
//
// overrun protection
//
/**
OverrunProtector wraps an arbitrary object in DynArray memory and can detect
inadvertent writes to it. this is useful for tracking down memory overruns.
the basic idea is to require users to request access to the object and
notify us when done; memory access permission is temporarily granted.
(similar in principle to Software Transaction Memory).
since this is quite slow, the protection is disabled unless
CONFIG_OVERRUN_PROTECTION == 1; this avoids having to remove the
wrapper code in release builds and re-write when looking for overruns.
example usage:
OverrunProtector<your_class> your_class_wrapper;
..
your_class* yc = your_class_wrapper.get(); // unlock, make ready for use
if(!yc) // your_class_wrapper's one-time alloc of a your_class-
abort(); // instance had failed - can't continue.
doSomethingWith(yc); // read/write access
your_class_wrapper.lock(); // disallow further access until next .get()
..
**/
#ifdef REDEFINED_NEW
# include "lib/nommgr.h"
#endif
template<class T> class OverrunProtector
{
DynArray da;
T* cached_ptr;
uintptr_t initialized;
public:
OverrunProtector()
{
memset(&da, 0, sizeof(da));
cached_ptr = 0;
initialized = 0;
}
~OverrunProtector()
{
shutdown();
}
void lock()
{
#if CONFIG_OVERRUN_PROTECTION
da_set_prot(&da, PROT_NONE);
#endif
}
private:
void unlock()
{
#if CONFIG_OVERRUN_PROTECTION
da_set_prot(&da, PROT_READ|PROT_WRITE);
#endif
}
void init()
{
if(da_alloc(&da, sizeof(T)) < 0)
{
fail:
WARN_ERR(ERR::NO_MEM);
return;
}
if(da_set_size(&da, sizeof(T)) < 0)
goto fail;
cached_ptr = new(da.base) T();
lock();
}
void shutdown()
{
if(!cpu_CAS(&initialized, 1, 2))
return; // never initialized or already shut down - abort
unlock();
cached_ptr->~T(); // call dtor (since we used placement new)
cached_ptr = 0;
(void)da_free(&da);
}
public:
T* get()
{
// this could theoretically be done in the ctor, but we try to
// minimize non-trivial code at NLSO ctor time
// (avoids init order problems).
if(cpu_CAS(&initialized, 0, 1))
init();
debug_assert(initialized != 2 && "OverrunProtector: used after dtor called:");
unlock();
return cached_ptr;
}
};
#ifdef REDEFINED_NEW
# include "lib/mmgr.h"
#endif
//
// allocator test rig
//
/**
* allocator test rig.
* call from each allocator operation to sanity-check them.
* should only be used during debug mode due to serious overhead.
**/
class AllocatorChecker
{
public:
void notify_alloc(void* p, size_t size)
{
const Allocs::value_type item = std::make_pair(p, size);
std::pair<Allocs::iterator, bool> ret = allocs.insert(item);
debug_assert(ret.second == true); // wasn't already in map
}
void notify_free(void* p, size_t size)
{
Allocs::iterator it = allocs.find(p);
if(it == allocs.end())
debug_warn("AllocatorChecker: freeing invalid pointer");
else
{
// size must match what was passed to notify_alloc
const size_t allocated_size = it->second;
debug_assert(size == allocated_size);
allocs.erase(it);
}
}
/**
* allocator is resetting itself, i.e. wiping out all allocs.
**/
void notify_clear()
{
allocs.clear();
}
private:
typedef std::map<void*, size_t> Allocs;
Allocs allocs;
};
#endif // #ifndef INCLUDED_ALLOCATORS

View File

@ -2,6 +2,8 @@
#include "lib/allocators/headerless.h"
void* const null = 0;
class TestHeaderless: public CxxTest::TestSuite
{
public:
@ -10,16 +12,16 @@ public:
HeaderlessAllocator a(8192);
// can't Allocate unaligned sizes
TS_ASSERT_EQUALS(a.Allocate(1), 0);
TS_ASSERT_EQUALS(a.Allocate(1), null);
// can't Allocate too small amounts
TS_ASSERT_EQUALS(a.Allocate(16), 0);
TS_ASSERT_EQUALS(a.Allocate(16), null);
// can Allocate the entire pool
char* p1 = (char*)a.Allocate(4096);
char* p2 = (char*)a.Allocate(4096);
TS_ASSERT_DIFFERS(p1, 0);
TS_ASSERT_DIFFERS(p2, 0);
TS_ASSERT_DIFFERS(p1, null);
TS_ASSERT_DIFFERS(p2, null);
// back-to-back (non-freelist) allocations should be contiguous
TS_ASSERT_EQUALS(p1+4096, p2);
@ -47,16 +49,16 @@ public:
void* p1 = a.Allocate(0x5670);
void* p2 = a.Allocate(0x7890);
void* p3 = a.Allocate(0x1230);
TS_ASSERT_DIFFERS(p1, 0);
TS_ASSERT_DIFFERS(p2, 0);
TS_ASSERT_DIFFERS(p3, 0);
TS_ASSERT_DIFFERS(p1, null);
TS_ASSERT_DIFFERS(p2, null);
TS_ASSERT_DIFFERS(p3, null);
// must be able to allocate the entire range after freeing the items
a.Deallocate(p1, 0x5670);
a.Deallocate(p2, 0x7890);
a.Deallocate(p3, 0x1230);
void* p4 = a.Allocate(0x10000);
TS_ASSERT_DIFFERS(p4, 0);
TS_ASSERT_DIFFERS(p4, null);
}
void test_Reset()
@ -100,7 +102,7 @@ public:
continue;
// find random allocation to deallocate
AllocMap::iterator it = allocs.begin();
const int numToSkip = rand() % allocs.size();
const int numToSkip = rand() % (int)allocs.size();
for(int skip = 0; skip < numToSkip; skip++)
++it;
void* p = (*it).first;

View File

@ -410,7 +410,7 @@ LibError afile_io_issue(File* f, off_t user_ofs, size_t max_output_size, u8* use
H_DEREF(af->ha, Archive, a);
ArchiveFileIo* aio = (ArchiveFileIo*)io->opaque;
aio->io = io_allocator.alloc();
aio->io = io_allocator.Allocate();
if(!aio->io)
WARN_RETURN(ERR::NO_MEM);
@ -495,7 +495,7 @@ LibError afile_io_discard(FileIo* io)
{
ArchiveFileIo* aio = (ArchiveFileIo*)io->opaque;
LibError ret = file_io_discard(aio->io);
io_allocator.release(aio->io);
io_allocator.Deallocate(aio->io);
return ret;
}

View File

@ -346,13 +346,13 @@ public:
void Destroy(ICodec* codec)
{
codec->~ICodec();
m_allocator.release((Allocator::value_type*)codec);
m_allocator.Deallocate((Allocator::value_type*)codec);
}
private:
void* AllocateMemory()
{
void* mem = m_allocator.alloc();
void* mem = m_allocator.Allocate();
if(!mem)
throw std::bad_alloc();
return mem;
@ -611,7 +611,7 @@ class StreamFactory
public:
Stream* Create(ContextType type, CompressionMethod method)
{
void* mem = m_allocator.alloc();
void* mem = m_allocator.Allocate();
if(!mem)
throw std::bad_alloc();
return new(mem) Stream(type, method);
@ -620,7 +620,7 @@ public:
void Destroy(Stream* stream)
{
stream->~Stream();
m_allocator.release(stream);
m_allocator.Deallocate(stream);
}
private:

View File

@ -567,7 +567,7 @@ LibError zip_archive_create(const char* zip_filename, ZipArchive** pza)
RETURN_ERR(file_open(zip_filename, FILE_WRITE|FILE_NO_AIO, &za_copy.f));
RETURN_ERR(pool_create(&za_copy.cdfhs, 10*MiB, 0));
ZipArchive* za = za_mgr.alloc();
ZipArchive* za = za_mgr.Allocate();
if(!za)
WARN_RETURN(ERR::NO_MEM);
*za = za_copy;
@ -635,6 +635,6 @@ LibError zip_archive_finish(ZipArchive* za)
(void)file_close(&za->f);
(void)pool_destroy(&za->cdfhs);
za_mgr.release(za);
za_mgr.Deaallocate(za);
return INFO::OK;
}

View File

@ -85,7 +85,7 @@ LibError dir_open(const char* P_path, DirIterator* di)
char n_path[PATH_MAX];
RETURN_ERR(file_make_full_native_path(P_path, n_path));
pdi->pp = pp_allocator.alloc();
pdi->pp = pp_allocator.Allocate();
if(!pdi->pp)
WARN_RETURN(ERR::NO_MEM);
@ -161,7 +161,7 @@ get_another_entry:
LibError dir_close(DirIterator* di)
{
PosixDirIterator* pdi = (PosixDirIterator*)di->opaque;
pp_allocator.release(pdi->pp);
pp_allocator.Deallocate(pdi->pp);
errno = 0;
if(closedir(pdi->os_dir) < 0)