2006-04-24 01:14:18 +02:00
|
|
|
/**
|
|
|
|
* =========================================================================
|
|
|
|
* File : allocators.h
|
|
|
|
* Project : 0 A.D.
|
|
|
|
* Description : memory suballocators.
|
|
|
|
*
|
|
|
|
* @author Jan.Wassenberg@stud.uni-karlsruhe.de
|
|
|
|
* =========================================================================
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
2007-01-01 22:25:47 +01:00
|
|
|
* Copyright (c) 2005-2006 Jan Wassenberg
|
2006-04-24 01:14:18 +02:00
|
|
|
*
|
|
|
|
* Redistribution and/or modification are also permitted under the
|
|
|
|
* terms of the GNU General Public License as published by the
|
|
|
|
* Free Software Foundation (version 2 or later, at your option).
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful, but
|
|
|
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef ALLOCATORS_H__
|
|
|
|
#define ALLOCATORS_H__
|
|
|
|
|
|
|
|
#include <map>
|
|
|
|
|
2007-01-01 22:25:47 +01:00
|
|
|
#include "lib/posix/posix_mman.h" // PROT_*
|
2006-06-22 20:26:08 +02:00
|
|
|
#include "lib/sysdep/cpu.h" // CAS
|
2007-01-01 22:25:47 +01:00
|
|
|
|
2006-04-24 01:14:18 +02:00
|
|
|
|
|
|
|
//
|
|
|
|
// page aligned allocator
|
|
|
|
//
|
|
|
|
|
2006-06-25 17:31:58 +02:00
|
|
|
/**
|
|
|
|
* allocate memory aligned to the system page size.
|
|
|
|
*
|
|
|
|
* this is useful for file_buf_alloc, which uses this allocator to
|
|
|
|
* get sector-aligned (hopefully; see file_sector_size) IO buffers.
|
|
|
|
*
|
|
|
|
* note that this allocator is stateless and very litte error checking
|
|
|
|
* can be performed.
|
|
|
|
*
|
|
|
|
* @param unaligned_size minimum size [bytes] to allocate.
|
|
|
|
* @return writable, page-aligned and -padded memory; you can use
|
|
|
|
* mprotect to set other access permissions if desired.
|
|
|
|
**/
|
2006-04-24 01:14:18 +02:00
|
|
|
extern void* page_aligned_alloc(size_t unaligned_size);
|
|
|
|
|
2006-06-25 17:31:58 +02:00
|
|
|
/**
|
|
|
|
* free a previously allocated page-aligned region.
|
|
|
|
*
|
|
|
|
* @param p exact value returned from page_aligned_alloc
|
|
|
|
* @param size exact value passed to page_aligned_alloc
|
|
|
|
**/
|
2006-04-24 01:14:18 +02:00
|
|
|
extern void page_aligned_free(void* p, size_t unaligned_size);
|
|
|
|
|
|
|
|
|
|
|
|
//
|
|
|
|
// dynamic (expandable) array
|
|
|
|
//
|
|
|
|
|
2006-06-25 17:31:58 +02:00
|
|
|
/**
|
|
|
|
* provides a memory range that can be expanded but doesn't waste
|
|
|
|
* physical memory or relocate itself.
|
|
|
|
*
|
|
|
|
* works by preallocating address space and committing as needed.
|
|
|
|
* used as a building block for other allocators.
|
|
|
|
**/
|
2006-04-24 01:14:18 +02:00
|
|
|
struct DynArray
|
|
|
|
{
|
|
|
|
u8* base;
|
2006-06-25 17:31:58 +02:00
|
|
|
size_t max_size_pa; /// reserved
|
|
|
|
size_t cur_size; /// committed
|
2006-04-24 01:14:18 +02:00
|
|
|
|
2006-06-25 17:31:58 +02:00
|
|
|
/**
|
|
|
|
* mprotect flags applied to newly committed pages
|
|
|
|
**/
|
|
|
|
int prot;
|
2006-04-24 01:14:18 +02:00
|
|
|
|
|
|
|
size_t pos;
|
|
|
|
};
|
|
|
|
|
|
|
|
|
2006-06-25 17:31:58 +02:00
|
|
|
/**
|
|
|
|
* ready the DynArray object for use.
|
|
|
|
*
|
|
|
|
* no virtual memory is actually committed until calls to da_set_size.
|
|
|
|
*
|
|
|
|
* @param da DynArray.
|
|
|
|
* @param max_size size [bytes] of address space to reserve (*);
|
|
|
|
* the DynArray can never expand beyond this.
|
|
|
|
* (* rounded up to next page size multiple)
|
|
|
|
* @return LibError.
|
|
|
|
**/
|
2006-04-24 01:14:18 +02:00
|
|
|
extern LibError da_alloc(DynArray* da, size_t max_size);
|
|
|
|
|
2006-06-25 17:31:58 +02:00
|
|
|
/**
|
|
|
|
* free all memory (address space + physical) that constitutes the
|
|
|
|
* given DynArray.
|
|
|
|
*
|
|
|
|
* @param da DynArray. zeroed afterwards; continued use of the allocated
|
|
|
|
* memory is impossible because it is marked not-present via MMU.
|
|
|
|
* @return LibError.
|
|
|
|
**/
|
2006-04-24 01:14:18 +02:00
|
|
|
extern LibError da_free(DynArray* da);
|
|
|
|
|
2006-06-25 17:31:58 +02:00
|
|
|
/**
|
|
|
|
* expand or shrink the array: changes the amount of currently committed
|
|
|
|
* (i.e. usable) memory pages.
|
|
|
|
*
|
|
|
|
* @param da DynArray.
|
|
|
|
* @param new_size target size (rounded up to next page multiple).
|
|
|
|
* pages are added/removed until this is met.
|
|
|
|
* @return LibError.
|
|
|
|
**/
|
2006-04-24 01:14:18 +02:00
|
|
|
extern LibError da_set_size(DynArray* da, size_t new_size);
|
|
|
|
|
2006-06-25 17:31:58 +02:00
|
|
|
/**
|
|
|
|
* make sure a given number of bytes starting from the current position
|
|
|
|
* (DynArray.pos) are committed and ready for use.
|
|
|
|
*
|
|
|
|
* @param da DynArray.
|
|
|
|
* @param size minimum size [bytes].
|
|
|
|
* @return LibError.
|
|
|
|
**/
|
2006-04-24 01:14:18 +02:00
|
|
|
extern LibError da_reserve(DynArray* da, size_t size);
|
|
|
|
|
2006-06-25 17:31:58 +02:00
|
|
|
/**
|
|
|
|
* change access rights of the array memory.
|
|
|
|
*
|
|
|
|
* used to implement write-protection. affects the currently committed
|
|
|
|
* pages as well as all subsequently added pages.
|
|
|
|
*
|
|
|
|
* @param da DynArray.
|
|
|
|
* @param prot a combination of the PROT_* values used with mprotect.
|
|
|
|
* @return LibError.
|
|
|
|
**/
|
2006-04-24 01:14:18 +02:00
|
|
|
extern LibError da_set_prot(DynArray* da, int prot);
|
|
|
|
|
2006-06-25 17:31:58 +02:00
|
|
|
/**
|
|
|
|
* "wrap" (i.e. store information about) the given buffer in a DynArray.
|
|
|
|
*
|
|
|
|
* this is used to allow calling da_read or da_append on normal buffers.
|
|
|
|
* da_free should be called when the DynArray is no longer needed,
|
|
|
|
* even though it doesn't free this memory (but does zero the DynArray).
|
|
|
|
*
|
|
|
|
* @param da DynArray.
|
|
|
|
* @param p target memory (no alignment/padding requirements)
|
|
|
|
* @param size maximum size (no alignment requirements)
|
|
|
|
* @return LibError.
|
|
|
|
**/
|
2006-04-24 01:14:18 +02:00
|
|
|
extern LibError da_wrap_fixed(DynArray* da, u8* p, size_t size);
|
|
|
|
|
2006-06-25 17:31:58 +02:00
|
|
|
/**
|
|
|
|
* "read" from array, i.e. copy into the given buffer.
|
|
|
|
*
|
|
|
|
* starts at offset DynArray.pos and advances this.
|
|
|
|
*
|
|
|
|
* @param da DynArray.
|
|
|
|
* @param data_dst destination memory
|
|
|
|
* @param size [bytes] to copy
|
|
|
|
* @return LibError.
|
|
|
|
**/
|
2006-04-24 01:14:18 +02:00
|
|
|
extern LibError da_read(DynArray* da, void* data_dst, size_t size);
|
|
|
|
|
2006-06-25 17:31:58 +02:00
|
|
|
/**
|
|
|
|
* "write" to array, i.e. copy from the given buffer.
|
|
|
|
*
|
|
|
|
* starts at offset DynArray.pos and advances this.
|
|
|
|
*
|
|
|
|
* @param da DynArray.
|
|
|
|
* @param data_src source memory
|
|
|
|
* @param size [bytes] to copy
|
|
|
|
* @return LibError.
|
|
|
|
**/
|
2006-04-24 01:14:18 +02:00
|
|
|
extern LibError da_append(DynArray* da, const void* data_src, size_t size);
|
|
|
|
|
|
|
|
|
|
|
|
//
|
|
|
|
// pool allocator
|
|
|
|
//
|
|
|
|
|
2006-06-25 17:31:58 +02:00
|
|
|
/**
|
|
|
|
* allocator design parameters:
|
|
|
|
* - O(1) alloc and free;
|
|
|
|
* - either fixed- or variable-sized blocks;
|
|
|
|
* - doesn't preallocate the entire pool;
|
|
|
|
* - returns sequential addresses.
|
|
|
|
*
|
|
|
|
* opaque! do not read/write any fields!
|
|
|
|
**/
|
2006-04-24 01:14:18 +02:00
|
|
|
struct Pool
|
|
|
|
{
|
|
|
|
DynArray da;
|
|
|
|
|
2006-06-25 17:31:58 +02:00
|
|
|
/**
|
|
|
|
* size of elements. = 0 if pool set up for variable-sized
|
|
|
|
* elements, otherwise rounded up to pool alignment.
|
|
|
|
**/
|
2006-04-24 01:14:18 +02:00
|
|
|
size_t el_size;
|
|
|
|
|
2006-06-25 17:31:58 +02:00
|
|
|
/**
|
|
|
|
* pointer to freelist (opaque); see freelist_*.
|
|
|
|
* never used (remains 0) if elements are of variable size.
|
|
|
|
**/
|
2006-04-24 01:14:18 +02:00
|
|
|
void* freelist;
|
|
|
|
};
|
|
|
|
|
2006-06-25 17:31:58 +02:00
|
|
|
/**
|
|
|
|
* pass as pool_create's <el_size> param to indicate variable-sized allocs
|
|
|
|
* are required (see below).
|
|
|
|
**/
|
2006-04-24 01:14:18 +02:00
|
|
|
const size_t POOL_VARIABLE_ALLOCS = ~0u;
|
|
|
|
|
2006-06-25 17:31:58 +02:00
|
|
|
/**
|
|
|
|
* ready the Pool object for use.
|
|
|
|
*
|
|
|
|
* @param p Pool.
|
|
|
|
* @param max_size size [bytes] of address space to reserve (*);
|
|
|
|
* the Pool can never expand beyond this.
|
|
|
|
* (* rounded up to next page size multiple)
|
|
|
|
* @param el_size 0 to allow variable-sized allocations (which cannot be
|
|
|
|
* freed individually); otherwise, it specifies the number of bytes that
|
|
|
|
* will be returned by pool_alloc (whose size parameter is then ignored).
|
|
|
|
* @return LibError.
|
|
|
|
**/
|
2006-04-24 01:14:18 +02:00
|
|
|
extern LibError pool_create(Pool* p, size_t max_size, size_t el_size);
|
|
|
|
|
2006-06-25 17:31:58 +02:00
|
|
|
/**
|
|
|
|
* free all memory (address space + physical) that constitutes the
|
|
|
|
* given Pool.
|
|
|
|
*
|
|
|
|
* @param p Pool. continued use of the allocated memory (*) is
|
|
|
|
* impossible because it is marked not-present via MMU.
|
|
|
|
* (* no matter if in freelist or unused or "allocated" to user)
|
|
|
|
* @return LibError.
|
|
|
|
**/
|
2006-04-24 01:14:18 +02:00
|
|
|
extern LibError pool_destroy(Pool* p);
|
|
|
|
|
2006-06-25 17:31:58 +02:00
|
|
|
/**
|
|
|
|
* indicate whether a pointer was allocated from the given pool.
|
|
|
|
*
|
|
|
|
* this is useful for callers that use several types of allocators.
|
|
|
|
*
|
|
|
|
* @param p Pool.
|
|
|
|
* @return bool.
|
|
|
|
**/
|
2006-04-24 01:14:18 +02:00
|
|
|
extern bool pool_contains(Pool* p, void* el);
|
|
|
|
|
2006-06-25 17:31:58 +02:00
|
|
|
/**
|
|
|
|
* allocate memory from the pool.
|
|
|
|
*
|
|
|
|
* exhausts the freelist before returning new entries to improve locality.
|
|
|
|
*
|
|
|
|
* @param p Pool.
|
|
|
|
* @param size [bytes] to allocated. ignored if pool was set up with
|
|
|
|
* fixed-size elements.
|
|
|
|
* @return 0 if the Pool would have to be expanded and there isn't enough
|
|
|
|
* memory to do so, otherwise the allocated memory.
|
|
|
|
**/
|
2006-04-24 01:14:18 +02:00
|
|
|
extern void* pool_alloc(Pool* p, size_t size);
|
|
|
|
|
2006-06-25 17:31:58 +02:00
|
|
|
/**
|
|
|
|
* make an entry available for reuse in the given Pool.
|
|
|
|
*
|
|
|
|
* this is not allowed if created for variable-size elements.
|
|
|
|
* rationale: avoids having to pass el_size here and compare with size when
|
|
|
|
* allocating; also prevents fragmentation and leaking memory.
|
|
|
|
*
|
|
|
|
* @param p Pool.
|
|
|
|
* @param el entry allocated via pool_alloc.
|
|
|
|
**/
|
2006-04-24 01:14:18 +02:00
|
|
|
extern void pool_free(Pool* p, void* el);
|
|
|
|
|
2006-06-25 17:31:58 +02:00
|
|
|
/**
|
|
|
|
* "free" all user allocations that ensued from the given Pool.
|
|
|
|
*
|
|
|
|
* this resets it as if freshly pool_create-d, but doesn't release the
|
|
|
|
* underlying memory.
|
|
|
|
*
|
|
|
|
* @param p Pool.
|
|
|
|
**/
|
2006-04-24 01:14:18 +02:00
|
|
|
extern void pool_free_all(Pool* p);
|
|
|
|
|
|
|
|
|
|
|
|
//
|
|
|
|
// bucket allocator
|
|
|
|
//
|
|
|
|
|
2006-06-25 17:31:58 +02:00
|
|
|
/**
|
|
|
|
* allocator design goals:
|
|
|
|
* - either fixed- or variable-sized blocks;
|
|
|
|
* - allow freeing individual blocks if they are all fixed-size;
|
|
|
|
* - never relocates;
|
|
|
|
* - no fixed limit.
|
|
|
|
*
|
|
|
|
* note: this type of allocator is called "region-based" in the literature.
|
|
|
|
* see "Reconsidering Custom Memory Allocation" (Berger, Zorn, McKinley).
|
|
|
|
* if individual variable-size elements must be freeable, consider "reaps":
|
|
|
|
* basically a combination of region and heap, where frees go to the heap and
|
|
|
|
* allocs exhaust that memory first and otherwise use the region.
|
|
|
|
*
|
|
|
|
* opaque! do not read/write any fields!
|
|
|
|
**/
|
2006-04-24 01:14:18 +02:00
|
|
|
struct Bucket
|
|
|
|
{
|
2006-06-25 17:31:58 +02:00
|
|
|
/**
|
|
|
|
* currently open bucket.
|
|
|
|
**/
|
2006-04-24 01:14:18 +02:00
|
|
|
u8* bucket;
|
|
|
|
|
2006-06-25 17:31:58 +02:00
|
|
|
/**
|
|
|
|
* offset of free space at end of current bucket (i.e. # bytes in use).
|
|
|
|
**/
|
2006-04-24 01:14:18 +02:00
|
|
|
size_t pos;
|
|
|
|
|
|
|
|
void* freelist;
|
|
|
|
|
|
|
|
size_t el_size : 16;
|
|
|
|
|
2006-06-25 17:31:58 +02:00
|
|
|
/**
|
|
|
|
* records # buckets allocated; verifies the list of buckets is correct.
|
|
|
|
**/
|
2006-04-24 01:14:18 +02:00
|
|
|
uint num_buckets : 16;
|
|
|
|
};
|
|
|
|
|
|
|
|
|
2006-06-25 17:31:58 +02:00
|
|
|
/**
|
|
|
|
* ready the Bucket object for use.
|
|
|
|
*
|
|
|
|
* @param b Bucket.
|
|
|
|
* @param el_size 0 to allow variable-sized allocations (which cannot be
|
|
|
|
* freed individually); otherwise, it specifies the number of bytes that
|
|
|
|
* will be returned by bucket_alloc (whose size parameter is then ignored).
|
|
|
|
* @return LibError.
|
|
|
|
**/
|
2006-04-24 01:14:18 +02:00
|
|
|
extern LibError bucket_create(Bucket* b, size_t el_size);
|
|
|
|
|
2006-06-25 17:31:58 +02:00
|
|
|
/**
|
|
|
|
* free all memory that ensued from <b>.
|
|
|
|
*
|
|
|
|
* future alloc and free calls on this Bucket will fail.
|
|
|
|
*
|
|
|
|
* @param b Bucket.
|
|
|
|
**/
|
2006-04-24 01:14:18 +02:00
|
|
|
extern void bucket_destroy(Bucket* b);
|
|
|
|
|
2006-06-25 17:31:58 +02:00
|
|
|
/**
|
|
|
|
* allocate memory from the bucket.
|
|
|
|
*
|
|
|
|
* exhausts the freelist before returning new entries to improve locality.
|
|
|
|
*
|
|
|
|
* @param b Bucket.
|
|
|
|
* @param size [bytes] to allocated. ignored if pool was set up with
|
|
|
|
* fixed-size elements.
|
|
|
|
* @return 0 if the Bucket would have to be expanded and there isn't enough
|
|
|
|
* memory to do so, otherwise the allocated memory.
|
|
|
|
**/
|
2006-04-24 01:14:18 +02:00
|
|
|
extern void* bucket_alloc(Bucket* b, size_t size);
|
|
|
|
|
2006-06-25 17:31:58 +02:00
|
|
|
/**
|
|
|
|
* make an entry available for reuse in the given Bucket.
|
|
|
|
*
|
|
|
|
* this is not allowed if created for variable-size elements.
|
|
|
|
* rationale: avoids having to pass el_size here and compare with size when
|
|
|
|
* allocating; also prevents fragmentation and leaking memory.
|
|
|
|
*
|
|
|
|
* @param b Bucket.
|
|
|
|
* @param el entry allocated via bucket_alloc.
|
|
|
|
**/
|
2006-04-24 01:14:18 +02:00
|
|
|
extern void bucket_free(Bucket* b, void* el);
|
|
|
|
|
|
|
|
|
|
|
|
//
|
|
|
|
// matrix allocator
|
|
|
|
//
|
|
|
|
|
2006-06-25 17:31:58 +02:00
|
|
|
/**
|
|
|
|
* allocate a 2D matrix accessible as matrix[col][row].
|
|
|
|
*
|
|
|
|
* takes care of the dirty work of allocating 2D matrices:
|
|
|
|
* - aligns data
|
|
|
|
* - only allocates one memory block, which is more efficient than
|
|
|
|
* malloc/new for each row.
|
|
|
|
*
|
|
|
|
* @param cols, rows: dimension (cols x rows)
|
|
|
|
* @param el_size size [bytes] of a matrix cell
|
|
|
|
* @return 0 if out of memory, otherwise matrix that should be cast to
|
|
|
|
* type** (sizeof(type) == el_size). must be freed via matrix_free.
|
|
|
|
**/
|
2006-04-24 01:14:18 +02:00
|
|
|
extern void** matrix_alloc(uint cols, uint rows, size_t el_size);
|
|
|
|
|
2006-06-25 17:31:58 +02:00
|
|
|
/**
|
|
|
|
* free the given matrix.
|
|
|
|
*
|
|
|
|
* @param matrix allocated by matrix_alloc; no-op if 0.
|
|
|
|
* callers will likely want to pass variables of a different type
|
|
|
|
* (e.g. int**); they must be cast to void**.
|
|
|
|
**/
|
2006-04-24 01:14:18 +02:00
|
|
|
extern void matrix_free(void** matrix);
|
|
|
|
|
|
|
|
|
|
|
|
//
|
|
|
|
// allocator optimized for single instances
|
|
|
|
//
|
|
|
|
|
2006-06-25 17:31:58 +02:00
|
|
|
/**
|
|
|
|
* allocator for applications that frequently alloc/free a single
|
|
|
|
* fixed-size object.
|
|
|
|
*
|
|
|
|
* if there is only one object in use at a time, malloc is avoided;
|
|
|
|
* this is faster and avoids heap fragmentation.
|
|
|
|
*
|
|
|
|
* @param storage static storage; enough to fit one item.
|
|
|
|
* @param in_use_flag: indicates if storage is in use. manipulated via CAS,
|
|
|
|
* so this is thread-safe.
|
|
|
|
* @param size [bytes] of storage (we need to know this if falling back to
|
|
|
|
* heap allocation).
|
|
|
|
* @return pointer to storage if available, otherwise a heap allocation.
|
|
|
|
**/
|
2006-04-24 01:14:18 +02:00
|
|
|
extern void* single_calloc(void* storage, volatile uintptr_t* in_use_flag, size_t size);
|
|
|
|
|
2006-06-25 17:31:58 +02:00
|
|
|
/**
|
|
|
|
* free memory allocated via single_calloc.
|
|
|
|
*
|
|
|
|
* see description there.
|
|
|
|
**/
|
2006-04-24 01:14:18 +02:00
|
|
|
extern void single_free(void* storage, volatile uintptr_t* in_use_flag, void* p);
|
|
|
|
|
|
|
|
#ifdef __cplusplus
|
|
|
|
|
2006-06-25 17:31:58 +02:00
|
|
|
/**
|
|
|
|
* C++ wrapper on top of single_calloc that's slightly easier to use.
|
|
|
|
*
|
|
|
|
* T must be POD (Plain Old Data) because it is memset to 0!
|
|
|
|
**/
|
2006-04-24 01:14:18 +02:00
|
|
|
template<class T> class SingleAllocator
|
|
|
|
{
|
|
|
|
T storage;
|
|
|
|
volatile uintptr_t is_in_use;
|
|
|
|
|
|
|
|
public:
|
|
|
|
SingleAllocator()
|
|
|
|
{
|
|
|
|
is_in_use = 0;
|
|
|
|
}
|
|
|
|
|
2006-04-27 00:38:25 +02:00
|
|
|
T* alloc()
|
2006-04-24 01:14:18 +02:00
|
|
|
{
|
2006-04-27 00:38:25 +02:00
|
|
|
return (T*)single_calloc(&storage, &is_in_use, sizeof(storage));
|
2006-04-24 01:14:18 +02:00
|
|
|
}
|
|
|
|
|
2006-04-27 00:38:25 +02:00
|
|
|
void release(T* p)
|
2006-04-24 01:14:18 +02:00
|
|
|
{
|
|
|
|
single_free(&storage, &is_in_use, p);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
#endif // #ifdef __cplusplus
|
|
|
|
|
|
|
|
|
|
|
|
//
|
|
|
|
// overrun protection
|
|
|
|
//
|
|
|
|
|
2006-06-25 17:31:58 +02:00
|
|
|
/**
|
2006-04-24 01:14:18 +02:00
|
|
|
OverrunProtector wraps an arbitrary object in DynArray memory and can detect
|
|
|
|
inadvertent writes to it. this is useful for tracking down memory overruns.
|
|
|
|
|
|
|
|
the basic idea is to require users to request access to the object and
|
|
|
|
notify us when done; memory access permission is temporarily granted.
|
|
|
|
(similar in principle to Software Transaction Memory).
|
|
|
|
|
|
|
|
since this is quite slow, the protection is disabled unless
|
|
|
|
CONFIG_OVERRUN_PROTECTION == 1; this avoids having to remove the
|
|
|
|
wrapper code in release builds and re-write when looking for overruns.
|
|
|
|
|
|
|
|
example usage:
|
|
|
|
OverrunProtector<your_class> your_class_wrapper;
|
|
|
|
..
|
|
|
|
your_class* yc = your_class_wrapper.get(); // unlock, make ready for use
|
|
|
|
if(!yc) // your_class_wrapper's one-time alloc of a your_class-
|
|
|
|
abort(); // instance had failed - can't continue.
|
|
|
|
doSomethingWith(yc); // read/write access
|
|
|
|
your_class_wrapper.lock(); // disallow further access until next .get()
|
|
|
|
..
|
2006-06-25 17:31:58 +02:00
|
|
|
**/
|
2007-01-01 22:25:47 +01:00
|
|
|
#ifdef REDEFINED_NEW
|
|
|
|
# include "lib/nommgr.h"
|
|
|
|
#endif
|
2006-04-24 01:14:18 +02:00
|
|
|
template<class T> class OverrunProtector
|
|
|
|
{
|
|
|
|
DynArray da;
|
|
|
|
T* cached_ptr;
|
|
|
|
uintptr_t initialized;
|
|
|
|
|
|
|
|
public:
|
|
|
|
OverrunProtector()
|
|
|
|
{
|
|
|
|
memset(&da, 0, sizeof(da));
|
|
|
|
cached_ptr = 0;
|
|
|
|
initialized = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
~OverrunProtector()
|
|
|
|
{
|
|
|
|
shutdown();
|
|
|
|
}
|
|
|
|
|
|
|
|
void lock()
|
|
|
|
{
|
|
|
|
#if CONFIG_OVERRUN_PROTECTION
|
|
|
|
da_set_prot(&da, PROT_NONE);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
void unlock()
|
|
|
|
{
|
|
|
|
#if CONFIG_OVERRUN_PROTECTION
|
|
|
|
da_set_prot(&da, PROT_READ|PROT_WRITE);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
void init()
|
|
|
|
{
|
|
|
|
if(da_alloc(&da, sizeof(T)) < 0)
|
|
|
|
{
|
|
|
|
fail:
|
2006-09-22 15:19:40 +02:00
|
|
|
WARN_ERR(ERR::NO_MEM);
|
2006-04-24 01:14:18 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
if(da_set_size(&da, sizeof(T)) < 0)
|
|
|
|
goto fail;
|
|
|
|
|
|
|
|
cached_ptr = new(da.base) T();
|
|
|
|
lock();
|
|
|
|
}
|
|
|
|
|
|
|
|
void shutdown()
|
|
|
|
{
|
|
|
|
if(!CAS(&initialized, 1, 2))
|
|
|
|
return; // never initialized or already shut down - abort
|
|
|
|
unlock();
|
|
|
|
cached_ptr->~T(); // call dtor (since we used placement new)
|
|
|
|
cached_ptr = 0;
|
|
|
|
(void)da_free(&da);
|
|
|
|
}
|
|
|
|
|
|
|
|
public:
|
|
|
|
T* get()
|
|
|
|
{
|
|
|
|
// this could theoretically be done in the ctor, but we try to
|
|
|
|
// minimize non-trivial code at NLSO ctor time
|
|
|
|
// (avoids init order problems).
|
|
|
|
if(CAS(&initialized, 0, 1))
|
|
|
|
init();
|
|
|
|
debug_assert(initialized != 2 && "OverrunProtector: used after dtor called:");
|
|
|
|
unlock();
|
|
|
|
return cached_ptr;
|
|
|
|
}
|
|
|
|
};
|
2007-01-01 22:25:47 +01:00
|
|
|
#ifdef REDEFINED_NEW
|
|
|
|
# include "lib/mmgr.h"
|
|
|
|
#endif
|
2006-04-24 01:14:18 +02:00
|
|
|
|
|
|
|
|
|
|
|
//
|
|
|
|
// allocator test rig
|
|
|
|
//
|
|
|
|
|
2006-06-25 17:31:58 +02:00
|
|
|
/**
|
|
|
|
* allocator test rig.
|
|
|
|
* call from each allocator operation to sanity-check them.
|
|
|
|
* should only be used during debug mode due to serious overhead.
|
|
|
|
**/
|
2006-04-24 01:14:18 +02:00
|
|
|
class AllocatorChecker
|
|
|
|
{
|
|
|
|
public:
|
|
|
|
void notify_alloc(void* p, size_t size)
|
|
|
|
{
|
|
|
|
const Allocs::value_type item = std::make_pair(p, size);
|
|
|
|
std::pair<Allocs::iterator, bool> ret = allocs.insert(item);
|
2006-06-08 21:03:43 +02:00
|
|
|
debug_assert(ret.second == true); // wasn't already in map
|
2006-04-24 01:14:18 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void notify_free(void* p, size_t size)
|
|
|
|
{
|
|
|
|
Allocs::iterator it = allocs.find(p);
|
|
|
|
if(it == allocs.end())
|
|
|
|
debug_warn("AllocatorChecker: freeing invalid pointer");
|
|
|
|
else
|
|
|
|
{
|
|
|
|
// size must match what was passed to notify_alloc
|
|
|
|
const size_t allocated_size = it->second;
|
2006-06-08 21:03:43 +02:00
|
|
|
debug_assert(size == allocated_size);
|
2006-04-24 01:14:18 +02:00
|
|
|
|
|
|
|
allocs.erase(it);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-06-25 17:31:58 +02:00
|
|
|
/**
|
|
|
|
* allocator is resetting itself, i.e. wiping out all allocs.
|
|
|
|
**/
|
2006-04-24 01:14:18 +02:00
|
|
|
void notify_clear()
|
|
|
|
{
|
|
|
|
allocs.clear();
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
typedef std::map<void*, size_t> Allocs;
|
|
|
|
Allocs allocs;
|
|
|
|
};
|
|
|
|
|
|
|
|
#endif // #ifndef ALLOCATORS_H__
|