janwas
2e5d9452aa
move all except user-specified config choices out of config.h and into appropriate headers CPU_IA32 -> ARCH_IA32 wsdl: disable use of ddraw (will soon be replaced by WMI) use shared_ptr without namespace qualifier (it's in tr1) debug_warn -> debug_assert(0) LIB_API to allow building as DLL smart pointers: reduce use of .get() cache_adt: use map instead of hash_map (avoids needing a hashCompare class). also remove spurious warning code_annotation.h: better cassert implementation move FPS measuring portion of timer.cpp into frequency_filter move include of memory headers into mmgr.h (to avoid errors, we must ensure they are included if mmgr is used) posix_filesystem.h: move definition of mkdir to wfilesystem stl: disable iterator checks in release mode wmi: fix COM init bug, use smart pointers wutil: add code to get DLL module handle (if compiled as such), add WinScopedLock timer: fix handling of raw ticks This was SVN commit r5517.
149 lines
3.7 KiB
C++
149 lines
3.7 KiB
C++
/**
|
|
* =========================================================================
|
|
* File : allocators.cpp
|
|
* Project : 0 A.D.
|
|
* Description : memory suballocators.
|
|
* =========================================================================
|
|
*/
|
|
|
|
// license: GPL; see lib/license.txt
|
|
|
|
#include "precompiled.h"
|
|
#include "allocators.h"
|
|
|
|
#include "lib/sysdep/cpu.h" // cpu_CAS
|
|
#include "lib/bits.h"
|
|
|
|
#include "mem_util.h"
|
|
|
|
|
|
//-----------------------------------------------------------------------------
|
|
// page aligned allocator
|
|
//-----------------------------------------------------------------------------
|
|
|
|
void* page_aligned_alloc(size_t unaligned_size)
|
|
{
|
|
const size_t size_pa = mem_RoundUpToPage(unaligned_size);
|
|
u8* p = 0;
|
|
RETURN0_IF_ERR(mem_Reserve(size_pa, &p));
|
|
RETURN0_IF_ERR(mem_Commit(p, size_pa, PROT_READ|PROT_WRITE));
|
|
return p;
|
|
}
|
|
|
|
|
|
void page_aligned_free(void* p, size_t unaligned_size)
|
|
{
|
|
if(!p)
|
|
return;
|
|
debug_assert(mem_IsPageMultiple((uintptr_t)p));
|
|
const size_t size_pa = mem_RoundUpToPage(unaligned_size);
|
|
(void)mem_Release((u8*)p, size_pa);
|
|
}
|
|
|
|
|
|
//-----------------------------------------------------------------------------
|
|
// matrix allocator
|
|
//-----------------------------------------------------------------------------
|
|
|
|
void** matrix_alloc(uint cols, uint rows, size_t el_size)
|
|
{
|
|
const size_t initial_align = 64;
|
|
// note: no provision for padding rows. this is a bit more work and
|
|
// if el_size isn't a power-of-2, performance is going to suck anyway.
|
|
// otherwise, the initial alignment will take care of it.
|
|
|
|
const size_t ptr_array_size = cols*sizeof(void*);
|
|
const size_t row_size = cols*el_size;
|
|
const size_t data_size = rows*row_size;
|
|
const size_t total_size = ptr_array_size + initial_align + data_size;
|
|
|
|
void* p = malloc(total_size);
|
|
if(!p)
|
|
return 0;
|
|
|
|
uintptr_t data_addr = (uintptr_t)p + ptr_array_size + initial_align;
|
|
data_addr -= data_addr % initial_align;
|
|
|
|
// alignment check didn't set address to before allocation
|
|
debug_assert(data_addr >= (uintptr_t)p+ptr_array_size);
|
|
|
|
void** ptr_array = (void**)p;
|
|
for(uint i = 0; i < cols; i++)
|
|
{
|
|
ptr_array[i] = (void*)data_addr;
|
|
data_addr += row_size;
|
|
}
|
|
|
|
// didn't overrun total allocation
|
|
debug_assert(data_addr <= (uintptr_t)p+total_size);
|
|
|
|
return ptr_array;
|
|
}
|
|
|
|
|
|
void matrix_free(void** matrix)
|
|
{
|
|
free(matrix);
|
|
}
|
|
|
|
|
|
//-----------------------------------------------------------------------------
|
|
// allocator optimized for single instances
|
|
//-----------------------------------------------------------------------------
|
|
|
|
void* single_calloc(void* storage, volatile uintptr_t* in_use_flag, size_t size)
|
|
{
|
|
// sanity check
|
|
debug_assert(*in_use_flag == 0 || *in_use_flag == 1);
|
|
|
|
void* p;
|
|
|
|
// successfully reserved the single instance
|
|
if(cpu_CAS(in_use_flag, 0, 1))
|
|
p = storage;
|
|
// already in use (rare) - allocate from heap
|
|
else
|
|
p = new u8[size];
|
|
|
|
memset(p, 0, size);
|
|
return p;
|
|
}
|
|
|
|
|
|
void single_free(void* storage, volatile uintptr_t* in_use_flag, void* p)
|
|
{
|
|
// sanity check
|
|
debug_assert(*in_use_flag == 0 || *in_use_flag == 1);
|
|
|
|
if(p == storage)
|
|
{
|
|
if(cpu_CAS(in_use_flag, 1, 0))
|
|
{
|
|
// ok, flag has been reset to 0
|
|
}
|
|
else
|
|
debug_assert(0); // in_use_flag out of sync (double free?)
|
|
}
|
|
// was allocated from heap
|
|
else
|
|
{
|
|
// single instance may have been freed by now - cannot assume
|
|
// anything about in_use_flag.
|
|
|
|
delete[] (u8*)p;
|
|
}
|
|
}
|
|
|
|
|
|
//-----------------------------------------------------------------------------
|
|
// static allocator
|
|
//-----------------------------------------------------------------------------
|
|
|
|
void* static_calloc(StaticStorage* ss, size_t size)
|
|
{
|
|
void* p = (void*)round_up((uintptr_t)ss->pos, 16);
|
|
ss->pos = (u8*)p+size;
|
|
debug_assert(ss->pos <= ss->end);
|
|
return p;
|
|
}
|