1
0
forked from 0ad/0ad

(actually remove)

This was SVN commit r10050.
This commit is contained in:
janwas 2011-08-21 10:31:23 +00:00
parent c26a369762
commit 3eb1ed0139
2 changed files with 0 additions and 278 deletions

View File

@ -1,176 +0,0 @@
/* Copyright (c) 2010 Wildfire Games
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
/*
* bucket allocator
*/
#include "precompiled.h"
#include "lib/allocators/bucket.h"
#include "lib/alignment.h"
#include "lib/allocators/freelist.h"
// power-of-2 isn't required; value is arbitrary.
const size_t bucketSize = 4000;
template<typename T, class Allocator = Allocator_Heap, size_t bucketSize = pageSize>
class Buckets
{
public:
// (must round up because freelist stores pointers inside objects)
static const size_t objectSize = ROUND_UP(sizeof(T), sizeof(intptr_t));
Buckets(size_t maxObjects)
: storage(maxObjects*objectSize)
{
}
size_t RemainingObjects()
{
return (storage.MaxCapacity() - end) / objectSize;
}
T* Allocate()
{
void* p = mem_freelist_Detach(freelist);
if(p)
{
ASSERT(Contains(p));
return (T*)p;
}
return (T*)StorageAppend(storage, end, objectSize);
}
void Deallocate(T* p)
{
ASSERT(Contains(p));
mem_freelist_AddToFront(freelist, p);
}
private:
Storage storage;
size_t end;
void* freelist;
};
Status bucket_create(Bucket* b, size_t el_size)
{
b->freelist = mem_freelist_Sentinel();
b->el_size = Align<allocationAlignment>(el_size);
// note: allocating here avoids the is-this-the-first-time check
// in bucket_alloc, which speeds things up.
b->bucket = (u8*)malloc(bucketSize);
if(!b->bucket)
{
// cause next bucket_alloc to retry the allocation
b->pos = bucketSize;
b->num_buckets = 0;
WARN_RETURN(ERR::NO_MEM);
}
*(u8**)b->bucket = 0; // terminate list
b->pos = Align<allocationAlignment>(sizeof(u8*));
b->num_buckets = 1;
return INFO::OK;
}
void bucket_destroy(Bucket* b)
{
while(b->bucket)
{
u8* prev_bucket = *(u8**)b->bucket;
free(b->bucket);
b->bucket = prev_bucket;
b->num_buckets--;
}
ENSURE(b->num_buckets == 0);
// poison pill: cause subsequent alloc and free to fail
b->freelist = 0;
b->el_size = bucketSize;
}
void* bucket_alloc(Bucket* b, size_t size)
{
size_t el_size = b->el_size? b->el_size : Align<allocationAlignment>(size);
// must fit in a bucket
ENSURE(el_size <= bucketSize-sizeof(u8*));
// try to satisfy alloc from freelist
void* el = mem_freelist_Detach(b->freelist);
if(el)
return el;
// if there's not enough space left, close current bucket and
// allocate another.
if(b->pos+el_size > bucketSize)
{
u8* bucket = (u8*)malloc(bucketSize);
if(!bucket)
return 0;
*(u8**)bucket = b->bucket;
b->bucket = bucket;
// skip bucket list field and align (note: malloc already
// aligns to at least 8 bytes, so don't take b->bucket into account)
b->pos = Align<allocationAlignment>(sizeof(u8*));;
b->num_buckets++;
}
void* ret = b->bucket+b->pos;
b->pos += el_size;
return ret;
}
void* bucket_fast_alloc(Bucket* b)
{
// try to satisfy alloc from freelist
void* el = mem_freelist_Detach(b->freelist);
if(el)
return el;
// if there's not enough space left, close current bucket and
// allocate another.
if(b->pos+b->el_size > bucketSize)
{
u8* bucket = (u8*)malloc(bucketSize);
*(u8**)bucket = b->bucket;
b->bucket = bucket;
// skip bucket list field (alignment is only pointer-size)
b->pos = sizeof(u8*);
b->num_buckets++;
}
void* ret = b->bucket+b->pos;
b->pos += b->el_size;
return ret;
}

View File

@ -1,102 +0,0 @@
/* Copyright (c) 2010 Wildfire Games
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
/*
* bucket allocator
*/
#ifndef INCLUDED_ALLOCATORS_BUCKET
#define INCLUDED_ALLOCATORS_BUCKET
/**
* allocator design goals:
* - either fixed- or variable-sized blocks;
* - allow freeing individual blocks if they are all fixed-size;
* - never relocates;
* - no fixed limit.
*
* note: this type of allocator is called "region-based" in the literature
* and is also known as "obstack"; see "Reconsidering Custom Memory
* Allocation" (Berger, Zorn, McKinley).
* if individual variable-size elements must be freeable, consider "reaps":
* basically a combination of region and heap, where frees go to the heap and
* allocs exhaust that memory first and otherwise use the region.
*
* opaque! do not read/write any fields!
**/
struct Bucket
{
/**
* currently open bucket.
**/
u8* bucket;
/**
* offset of free space at end of current bucket (i.e. # bytes in use).
**/
size_t pos;
void* freelist;
size_t el_size;
/**
* records # buckets allocated; verifies the list of buckets is correct.
**/
size_t num_buckets;
};
/**
* ready the Bucket object for use.
*
* @param b Bucket*
* @param el_size 0 to allow variable-sized allocations (which cannot be
* freed individually); otherwise, it specifies the number of bytes that
* will be returned by bucket_alloc (whose size parameter is then ignored).
* @return Status.
**/
LIB_API Status bucket_create(Bucket* b, size_t el_size);
/**
* free all memory that ensued from \<b\>.
*
* future alloc and free calls on this Bucket will fail.
*
* @param b Bucket*
**/
LIB_API void bucket_destroy(Bucket* b);
/**
* Dole out memory from the Bucket.
* exhausts the freelist before returning new entries to improve locality.
*
* @param b Bucket*
* @param size bytes to allocate; ignored if bucket_create's el_size was not 0.
* @return allocated memory, or 0 if the Bucket would have to be expanded and
* there isn't enough memory to do so.
**/
LIB_API void* bucket_alloc(Bucket* b, size_t size);
LIB_API void* bucket_fast_alloc(Bucket* b);
#endif // #ifndef INCLUDED_ALLOCATORS_BUCKET