1
0
forked from 0ad/0ad
0ad/source/lib/allocators/allocators.cpp
janwas c0ed950657 had to remove uint and ulong from lib/types.h due to conflict with other library.
this snowballed into a massive search+destroy of the hodgepodge of
mostly equivalent types we had in use (int, uint, unsigned, unsigned
int, i32, u32, ulong, uintN).

it is more efficient to use 64-bit types in 64-bit mode, so the
preferred default is size_t (for anything remotely resembling a size or
index). tile coordinates are ssize_t to allow more efficient conversion
to/from floating point. flags are int because we almost never need more
than 15 distinct bits, bit test/set is not slower and int is fastest to
type. finally, some data that is pretty much directly passed to OpenGL
is now typed accordingly.

after several hours, the code now requires fewer casts and less
guesswork.

other changes:
- unit and player IDs now have an "invalid id" constant in the
respective class to avoid casting and -1
- fix some endian/64-bit bugs in the map (un)packing. added a
convenience function to write/read a size_t.
- ia32: change CPUID interface to allow passing in ecx (required for
cache topology detection, which I need at work). remove some unneeded
functions from asm, replace with intrinsics where possible.

This was SVN commit r5942.
2008-05-11 18:48:32 +00:00

149 lines
3.7 KiB
C++

/**
* =========================================================================
* File : allocators.cpp
* Project : 0 A.D.
* Description : memory suballocators.
* =========================================================================
*/
// license: GPL; see lib/license.txt
#include "precompiled.h"
#include "allocators.h"
#include "lib/sysdep/cpu.h" // cpu_CAS
#include "lib/bits.h"
#include "mem_util.h"
//-----------------------------------------------------------------------------
// page aligned allocator
//-----------------------------------------------------------------------------
void* page_aligned_alloc(size_t unaligned_size)
{
const size_t size_pa = mem_RoundUpToPage(unaligned_size);
u8* p = 0;
RETURN0_IF_ERR(mem_Reserve(size_pa, &p));
RETURN0_IF_ERR(mem_Commit(p, size_pa, PROT_READ|PROT_WRITE));
return p;
}
void page_aligned_free(void* p, size_t unaligned_size)
{
if(!p)
return;
debug_assert(mem_IsPageMultiple((uintptr_t)p));
const size_t size_pa = mem_RoundUpToPage(unaligned_size);
(void)mem_Release((u8*)p, size_pa);
}
//-----------------------------------------------------------------------------
// matrix allocator
//-----------------------------------------------------------------------------
void** matrix_alloc(size_t cols, size_t rows, size_t el_size)
{
const size_t initial_align = 64;
// note: no provision for padding rows. this is a bit more work and
// if el_size isn't a power-of-2, performance is going to suck anyway.
// otherwise, the initial alignment will take care of it.
const size_t ptr_array_size = cols*sizeof(void*);
const size_t row_size = cols*el_size;
const size_t data_size = rows*row_size;
const size_t total_size = ptr_array_size + initial_align + data_size;
void* p = malloc(total_size);
if(!p)
return 0;
uintptr_t data_addr = (uintptr_t)p + ptr_array_size + initial_align;
data_addr -= data_addr % initial_align;
// alignment check didn't set address to before allocation
debug_assert(data_addr >= (uintptr_t)p+ptr_array_size);
void** ptr_array = (void**)p;
for(size_t i = 0; i < cols; i++)
{
ptr_array[i] = (void*)data_addr;
data_addr += row_size;
}
// didn't overrun total allocation
debug_assert(data_addr <= (uintptr_t)p+total_size);
return ptr_array;
}
void matrix_free(void** matrix)
{
free(matrix);
}
//-----------------------------------------------------------------------------
// allocator optimized for single instances
//-----------------------------------------------------------------------------
void* single_calloc(void* storage, volatile uintptr_t* in_use_flag, size_t size)
{
// sanity check
debug_assert(*in_use_flag == 0 || *in_use_flag == 1);
void* p;
// successfully reserved the single instance
if(cpu_CAS(in_use_flag, 0, 1))
p = storage;
// already in use (rare) - allocate from heap
else
p = new u8[size];
memset(p, 0, size);
return p;
}
void single_free(void* storage, volatile uintptr_t* in_use_flag, void* p)
{
// sanity check
debug_assert(*in_use_flag == 0 || *in_use_flag == 1);
if(p == storage)
{
if(cpu_CAS(in_use_flag, 1, 0))
{
// ok, flag has been reset to 0
}
else
debug_assert(0); // in_use_flag out of sync (double free?)
}
// was allocated from heap
else
{
// single instance may have been freed by now - cannot assume
// anything about in_use_flag.
delete[] (u8*)p;
}
}
//-----------------------------------------------------------------------------
// static allocator
//-----------------------------------------------------------------------------
void* static_calloc(StaticStorage* ss, size_t size)
{
void* p = (void*)round_up((uintptr_t)ss->pos, (uintptr_t)16u);
ss->pos = (u8*)p+size;
debug_assert(ss->pos <= ss->end);
return p;
}