Fix renderer crash in GCC 7.5.

The 'arena' allocator does not take memory alignment into account, which
can result in crashes at -O3 with gcc 7.5 (presumably because of SSE
instructions).

This accounts for alignment issues, fixing the issue.
Also do various cleanup in lib/allocators.

Reported by: Bellaz89, Freagarach
Comments by: vladislavbelov
Differential Revision: https://code.wildfiregames.com/D3181
This was SVN commit r24517.
This commit is contained in:
wraitii 2021-01-06 14:56:17 +00:00
parent 58d98c9a5f
commit 8ae3c09dc0
16 changed files with 429 additions and 1552 deletions

View File

@ -0,0 +1,116 @@
/* Copyright (C) 2021 Wildfire Games.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef INCLUDED_ALLOCATORS_DYNAMIC_ARENA
#define INCLUDED_ALLOCATORS_DYNAMIC_ARENA
#include "lib/bits.h"
#include <new> // bad_alloc
#include <memory> // unique_ptr
#include <vector>
namespace Allocators {
/**
* 'Blind' memory allocator. Characteristics:
* - grows dynamically, and linearily, by BLOCK_SIZE
* - no reallocations, pointers remain valid
* - can allocate objects up to BLOCK_SIZE in size
* - can handle any aligment (up to BLOCK_SIZE)
* - Doesn't de-allocate unless cleared or destroyed.
*/
template<size_t BLOCK_SIZE>
class DynamicArena
{
protected:
struct Block
{
bool Available(size_t n, size_t alignment) const
{
return n + ROUND_UP(size, alignment) <= BLOCK_SIZE;
}
uint8_t* Allocate(size_t n, size_t alignment)
{
size = ROUND_UP(size, alignment);
uint8_t* ptr = &block[size];
size += n;
return ptr;
}
alignas(8) uint8_t block[BLOCK_SIZE];
size_t size = 0;
};
NONCOPYABLE(DynamicArena);
public:
DynamicArena()
{
m_Blocks.reserve(16);
AllocateNewBlock();
};
void AllocateNewBlock()
{
m_Blocks.emplace_back(std::make_unique<Block>());
m_NewestBlock = m_Blocks.back().get();
}
void* allocate(size_t n, const void*, size_t alignment)
{
// Safely handle zero-sized allocations (happens with GCC STL - see ticket #909).
if (n == 0)
n = 1;
if (n > BLOCK_SIZE)
{
debug_warn("DynamicArena cannot allocate more than chunk size");
throw std::bad_alloc();
}
if (!m_NewestBlock->Available(n, alignment))
AllocateNewBlock();
return reinterpret_cast<void*>(m_NewestBlock->Allocate(n, alignment));
}
void deallocate(void* UNUSED(p), size_t UNUSED(n))
{
// ignored
}
void clear()
{
m_NewestBlock = nullptr;
m_Blocks.clear();
AllocateNewBlock();
}
protected:
std::vector<std::unique_ptr<Block>> m_Blocks;
Block* m_NewestBlock = nullptr;
};
} // namespace Allocators
#endif // INCLUDED_ALLOCATORS_DYNAMIC_ARENA

View File

@ -0,0 +1,116 @@
/* Copyright (C) 2021 Wildfire Games.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef INCLUDED_STL_ALLOCATORS
#define INCLUDED_STL_ALLOCATORS
#include <memory>
#include <type_traits>
/**
* Adapt a 0 A.D.-style allocator for usage in STL containers.
* Use 'Backend' as an underlying allocator.
*/
template<typename T, class Backend>
class STLAllocator
{
template<typename A, class B>
friend class STLAllocator;
public:
using value_type = T;
using pointer = T*;
using is_always_equal = std::false_type;
STLAllocator() : allocator(std::make_shared<Backend>())
{
}
template<typename V>
STLAllocator(const STLAllocator<V, Backend>& proxy) : allocator(proxy.allocator)
{
}
template<typename V>
struct rebind
{
using other = STLAllocator<V, Backend>;
};
T* allocate(size_t n)
{
return static_cast<T*>(allocator->allocate(n * sizeof(T), nullptr, alignof(T)));
}
void deallocate(T* ptr, const size_t n)
{
return allocator->deallocate(static_cast<void*>(ptr), n * sizeof(T));
}
private:
std::shared_ptr<Backend> allocator;
};
/**
* Proxies allocation to another allocator.
* This allows a single allocator to serve multiple STL containers.
*/
template<typename T, class Backend>
class ProxyAllocator
{
template<typename A, class B>
friend class ProxyAllocator;
public:
using value_type = T;
using pointer = T*;
using is_always_equal = std::false_type;
ProxyAllocator(Backend& alloc) : allocator(alloc)
{
}
template<typename V>
ProxyAllocator(const ProxyAllocator<V, Backend>& proxy) : allocator(proxy.allocator)
{
}
template<typename V>
struct rebind
{
using other = ProxyAllocator<V, Backend>;
};
T* allocate(size_t n)
{
return static_cast<T*>(allocator.allocate(n * sizeof(T), nullptr, alignof(T)));
}
void deallocate(T* ptr, const size_t n)
{
return allocator.deallocate(static_cast<void*>(ptr), n * sizeof(T));
}
private:
Backend& allocator;
};
#endif // INCLUDED_STL_ALLOCATORS

View File

@ -1,148 +0,0 @@
/* Copyright (C) 2010 Wildfire Games.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
/*
* STL allocator for aligned memory
*/
#ifndef ALIGNED_ALLOCATOR
#define ALIGNED_ALLOCATOR
#include "lib/bits.h" // round_up
#include "lib/sysdep/arch/x86_x64/cache.h"
#include "lib/sysdep/rtl.h" // rtl_AllocateAligned
/**
* stateless STL allocator that aligns elements to the L1 cache line size.
*
* note: the alignment is hard-coded to avoid any allocator state.
* this avoids portability problems, which is important since allocators
* are rather poorly specified.
*
* references:
* http://www.tantalon.com/pete/customallocators.ppt
* http://www.flipcode.com/archives/Aligned_Block_Allocation.shtml
* http://www.josuttis.com/cppcode/allocator.html
*
* derived from code that bears the following copyright notice:
* (C) Copyright Nicolai M. Josuttis 1999.
* Permission to copy, use, modify, sell and distribute this software
* is granted provided this copyright notice appears in all copies.
* This software is provided "as is" without express or implied
* warranty, and with no claim as to its suitability for any purpose.
**/
template<class T>
class AlignedAllocator
{
public:
// type definitions
typedef T value_type;
typedef T* pointer;
typedef const T* const_pointer;
typedef T& reference;
typedef const T& const_reference;
typedef std::size_t size_type;
typedef std::ptrdiff_t difference_type;
// rebind allocator to type U
template <class U>
struct rebind
{
typedef AlignedAllocator<U> other;
};
pointer address(reference value) const
{
return &value;
}
const_pointer address(const_reference value) const
{
return &value;
}
NOTHROW_DEFINE AlignedAllocator()
{
}
NOTHROW_DEFINE AlignedAllocator(const AlignedAllocator&)
{
}
template <class U>
NOTHROW_DEFINE AlignedAllocator (const AlignedAllocator<U>&)
{
}
NOTHROW_DEFINE ~AlignedAllocator()
{
}
NOTHROW_DEFINE size_type max_size() const
{
// maximum number of *elements* that can be allocated
return std::numeric_limits<std::size_t>::max() / sizeof(T);
}
// allocate uninitialized storage
pointer allocate(size_type numElements)
{
const size_type alignment = x86_x64::Caches(x86_x64::L1D)->entrySize;
const size_type elementSize = round_up(sizeof(T), alignment);
const size_type size = numElements * elementSize;
pointer p = (pointer)rtl_AllocateAligned(size, alignment);
return p;
}
// deallocate storage of elements that have been destroyed
void deallocate(pointer p, size_type UNUSED(num))
{
rtl_FreeAligned((void*)p);
}
void construct(pointer p, const T& value)
{
new((void*)p) T(value);
}
void destroy(pointer p)
{
p->~T();
UNUSED2(p); // otherwise, warning is raised for reasons unknown
}
};
// indicate that all specializations of this allocator are interchangeable
template <class T1, class T2>
NOTHROW_DEFINE bool operator==(const AlignedAllocator<T1>&, const AlignedAllocator<T2>&)
{
return true;
}
template <class T1, class T2>
NOTHROW_DEFINE bool operator!=(const AlignedAllocator<T1>&, const AlignedAllocator<T2>&)
{
return false;
}
#endif // #ifndef ALIGNED_ALLOCATOR

View File

@ -28,7 +28,7 @@
#define ALLOCATOR_POLICIES #define ALLOCATOR_POLICIES
#include "lib/alignment.h" // pageSize #include "lib/alignment.h" // pageSize
#include "lib/allocators/allocator_adapters.h" #include "lib/allocators/stateless_allocators.h"
#include "lib/allocators/freelist.h" #include "lib/allocators/freelist.h"
#include <cstring> #include <cstring>

View File

@ -1,75 +0,0 @@
/* Copyright (C) 2011 Wildfire Games.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
/*
* arena allocator (variable-size blocks, no deallocation).
*/
#include "precompiled.h"
#include "lib/allocators/arena.h"
namespace Allocators {
template<class Storage>
struct BasicArenaTest
{
void operator()() const
{
Arena<Storage> a(100);
const size_t initialSpace = a.RemainingBytes();
void* p = a.allocate(100);
ENSURE(p != 0);
ENSURE(a.Contains(uintptr_t(p)));
ENSURE(a.RemainingBytes() == initialSpace-100);
ENSURE(a.Contains(uintptr_t(p)+1));
ENSURE(a.Contains(uintptr_t(p)+99));
ENSURE(!a.Contains(uintptr_t(p)-1));
ENSURE(!a.Contains(uintptr_t(p)+100));
if(a.RemainingBytes() == 0)
ENSURE(a.allocate(1) == 0); // full
else
ENSURE(a.allocate(1) != 0); // can still expand
a.DeallocateAll();
ENSURE(!a.Contains(uintptr_t(p)));
p = a.allocate(36);
ENSURE(p != 0);
ENSURE(a.Contains(uintptr_t(p)));
ENSURE(a.RemainingBytes() == initialSpace-36);
void* p2 = a.allocate(64);
ENSURE(p2 != 0);
ENSURE(a.Contains(uintptr_t(p2)));
ENSURE(a.RemainingBytes() == initialSpace-36-64);
ENSURE(p2 == (void*)(uintptr_t(p)+36));
if(a.RemainingBytes() == 0)
ENSURE(a.allocate(1) == 0); // full
else
ENSURE(a.allocate(1) != 0); // can still expand
}
};
void TestArena()
{
ForEachStorage<BasicArenaTest>();
}
} // namespace Allocators

View File

@ -1,173 +0,0 @@
/* Copyright (C) 2013 Wildfire Games.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
/*
* arena allocator (variable-size blocks, no deallocation).
*/
#ifndef INCLUDED_ALLOCATORS_ARENA
#define INCLUDED_ALLOCATORS_ARENA
#include "lib/allocators/allocator_policies.h"
namespace Allocators {
/**
* allocator design parameters:
* - O(1) allocation;
* - variable-size blocks;
* - support for deallocating all objects;
* - consecutive allocations are back-to-back;
* - no extra alignment nor padding.
**/
template<class Storage = Storage_Fixed<> >
class Arena
{
NONCOPYABLE(Arena);
public:
Arena(size_t maxSize)
: storage(maxSize)
{
DeallocateAll();
}
size_t RemainingBytes() const
{
return storage.MaxCapacity() - end;
}
void* allocate(size_t size)
{
return (void*)StorageAppend(storage, end, size);
}
void deallocate(void* UNUSED(p), size_t UNUSED(size))
{
// ignored
}
void DeallocateAll()
{
end = 0;
}
// @return whether the address lies within the previously allocated range.
bool Contains(uintptr_t address) const
{
return (address - storage.Address()) < end;
}
private:
Storage storage;
size_t end;
};
LIB_API void TestArena();
/**
* allocator design parameters:
* - grow dynamically with a fixed chunkSize
* - for frequent allocations of size << chunkSize
* - no reallocations, pointers remain valid
**/
class DynamicArena
{
struct ArenaChunk
{
bool Available(size_t size) const
{
return size <= (capacity - end);
}
// Must check Available first or this may return an invalid address
uintptr_t Allocate(size_t size)
{
uintptr_t ptr = storage + end;
end += size;
return ptr;
}
uintptr_t storage;
size_t end;
size_t capacity;
ArenaChunk* next;
};
NONCOPYABLE(DynamicArena);
public:
DynamicArena(size_t chunkSize) : chunkSize(chunkSize), head(NULL)
{
AllocateNewChunk();
}
~DynamicArena()
{
ArenaChunk* chunk = head;
while (chunk != NULL)
{
ArenaChunk* next = chunk->next;
free(chunk);
chunk = next;
}
}
void AllocateNewChunk()
{
// For efficiency, do a single allocation with the ArenaChunk and its storage
ArenaChunk* next = head;
head = (ArenaChunk*)malloc(sizeof(ArenaChunk) + chunkSize);
ENSURE(head);
head->storage = sizeof(ArenaChunk) + uintptr_t(head);
head->end = 0;
head->capacity = chunkSize;
head->next = next;
}
void* allocate(size_t size)
{
if (size > chunkSize)
{
debug_warn(L"DynamicArena cannot allocate more than chunk size");
throw std::bad_alloc();
}
else if (!head->Available(size))
AllocateNewChunk();
return (void*)head->Allocate(size);
}
void deallocate(void* UNUSED(p), size_t UNUSED(size))
{
// ignored
}
private:
const size_t chunkSize;
ArenaChunk *head;
};
} // namespace Allocators
#endif // #ifndef INCLUDED_ALLOCATORS_ARENA

View File

@ -1,773 +0,0 @@
/* Copyright (C) 2010 Wildfire Games.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
/*
* (header-less) pool-based heap allocator
*/
#include "precompiled.h"
#include "lib/allocators/headerless.h"
#include "lib/bits.h"
#include "lib/allocators/pool.h"
static const bool performSanityChecks = true;
// shared by Impl::Allocate and FreedBlock::Validate
static bool IsValidSize(size_t size);
//-----------------------------------------------------------------------------
// this combines the boundary tags and link fields into one structure,
// which is safer than direct pointer arithmetic.
//
// it is written to freed memory, which is fine because IsValidSize ensures
// the allocations are large enough.
class FreedBlock
{
friend class RangeList; // manipulates link fields directly
public:
// (required for RangeList::m_sentinel)
FreedBlock()
{
}
void Setup(uintptr_t id, size_t size)
{
m_magic = s_magic;
m_size = size;
m_id = id;
}
void Reset()
{
// clear all fields to prevent accidental reuse
prev = next = 0;
m_id = 0;
m_size = ~size_t(0);
m_magic = 0;
}
size_t Size() const
{
return m_size;
}
/**
* @return whether this appears to be a FreedBlock instance with the
* desired ID. for additional safety, also call Validate().
**/
bool IsFreedBlock(uintptr_t id) const
{
if(m_id != id)
return false;
if(m_magic != s_magic)
return false;
return true;
}
/**
* warn if any invariant doesn't hold.
**/
void Validate(uintptr_t id) const
{
if(!performSanityChecks) return;
// note: RangeList::Validate implicitly checks the prev and next
// fields by iterating over the list.
// note: we can't check for prev != next because we're called for
// footers as well, and they don't have valid pointers.
ENSURE(IsValidSize(m_size));
ENSURE(IsFreedBlock(id));
}
private:
// note: the magic and ID fields are stored at both ends of this
// class to increase the chance of detecting memory corruption.
static const u64 s_magic = 0xFF55AA00BBCCDDEEull;
u64 m_magic;
FreedBlock* prev;
FreedBlock* next;
// size [bytes] of the entire memory block, including header and footer
size_t m_size;
// this differentiates between headers and footers.
uintptr_t m_id;
};
static bool IsValidSize(size_t size)
{
// note: we disallow the questionable practice of zero-byte allocations
// because they may be indicative of bugs.
if(size < HeaderlessAllocator::minAllocationSize)
return false;
if(size % HeaderlessAllocator::allocationAlignment)
return false;
return true;
}
//-----------------------------------------------------------------------------
// freelists
//-----------------------------------------------------------------------------
// policy: address-ordered good fit
// mechanism: segregated range lists of power-of-two size classes
struct AddressOrder
{
static bool ShouldInsertBefore(FreedBlock* current, FreedBlock* successor)
{
return current < successor;
}
};
// "range list" is a freelist of similarly-sized blocks.
class RangeList
{
public:
RangeList()
{
Reset();
}
void Reset()
{
m_sentinel.prev = &m_sentinel;
m_sentinel.next = &m_sentinel;
m_freeBlocks = 0;
m_freeBytes = 0;
}
template<class InsertPolicy>
void Insert(FreedBlock* freedBlock)
{
// find freedBlock before which to insert
FreedBlock* successor;
for(successor = m_sentinel.next; successor != &m_sentinel; successor = successor->next)
{
if(InsertPolicy::ShouldInsertBefore(freedBlock, successor))
break;
}
freedBlock->prev = successor->prev;
freedBlock->next = successor;
successor->prev->next = freedBlock;
successor->prev = freedBlock;
m_freeBlocks++;
m_freeBytes += freedBlock->Size();
}
/**
* @return the first freed block of size >= minSize or 0 if none exists.
**/
FreedBlock* Find(size_t minSize)
{
for(FreedBlock* freedBlock = m_sentinel.next; freedBlock != &m_sentinel; freedBlock = freedBlock->next)
{
if(freedBlock->Size() >= minSize)
return freedBlock;
}
// none found, so average block size is less than the desired size
ENSURE(m_freeBytes/m_freeBlocks < minSize);
return 0;
}
void Remove(FreedBlock* freedBlock)
{
freedBlock->next->prev = freedBlock->prev;
freedBlock->prev->next = freedBlock->next;
ENSURE(m_freeBlocks != 0);
ENSURE(m_freeBytes >= freedBlock->Size());
m_freeBlocks--;
m_freeBytes -= freedBlock->Size();
}
void Validate(uintptr_t id) const
{
if(!performSanityChecks) return;
size_t freeBlocks = 0, freeBytes = 0;
for(FreedBlock* freedBlock = m_sentinel.next; freedBlock != &m_sentinel; freedBlock = freedBlock->next)
{
freedBlock->Validate(id);
freeBlocks++;
freeBytes += freedBlock->Size();
}
for(FreedBlock* freedBlock = m_sentinel.prev; freedBlock != &m_sentinel; freedBlock = freedBlock->prev)
{
freedBlock->Validate(id);
freeBlocks++;
freeBytes += freedBlock->Size();
}
// our idea of the number and size of free blocks is correct
ENSURE(freeBlocks == m_freeBlocks*2 && freeBytes == m_freeBytes*2);
// if empty, state must be as established by Reset
ENSURE(!IsEmpty() || (m_sentinel.next == &m_sentinel && m_sentinel.prev == &m_sentinel));
}
bool IsEmpty() const
{
return (m_freeBlocks == 0);
}
size_t FreeBlocks() const
{
return m_freeBlocks;
}
size_t FreeBytes() const
{
return m_freeBytes;
}
private:
// a sentinel simplifies Insert and Remove. we store it here instead of
// in a separate array to improve locality (it is actually accessed).
mutable FreedBlock m_sentinel;
size_t m_freeBlocks;
size_t m_freeBytes;
};
//-----------------------------------------------------------------------------
class SegregatedRangeLists
{
public:
SegregatedRangeLists()
{
Reset();
}
void Reset()
{
m_bitmap = 0;
for(size_t i = 0; i < numRangeLists; i++)
m_rangeLists[i].Reset();
}
void Insert(FreedBlock* freedBlock)
{
const size_t sizeClass = SizeClass(freedBlock->Size());
m_rangeLists[sizeClass].Insert<AddressOrder>(freedBlock);
m_bitmap |= Bit<uintptr_t>(sizeClass);
}
/**
* @return the first freed block of size >= minSize or 0 if none exists.
**/
FreedBlock* Find(size_t minSize)
{
// iterate over all large enough, non-empty size classes
// (zero overhead for empty size classes)
const size_t minSizeClass = SizeClass(minSize);
size_t sizeClassBits = m_bitmap & ((~size_t(0)) << minSizeClass);
while(sizeClassBits)
{
const size_t size = ValueOfLeastSignificantOneBit(sizeClassBits);
sizeClassBits &= ~size; // remove from sizeClassBits
const size_t sizeClass = SizeClass(size);
FreedBlock* freedBlock = m_rangeLists[sizeClass].Find(minSize);
if(freedBlock)
return freedBlock;
}
// apparently all classes above minSizeClass are empty,
// or the above would have succeeded.
ENSURE(m_bitmap < Bit<uintptr_t>(minSizeClass+1));
return 0;
}
void Remove(FreedBlock* freedBlock)
{
const size_t sizeClass = SizeClass(freedBlock->Size());
m_rangeLists[sizeClass].Remove(freedBlock);
// (masking with !IsEmpty() << sizeClass would probably be faster)
if(m_rangeLists[sizeClass].IsEmpty())
m_bitmap &= ~Bit<uintptr_t>(sizeClass);
}
void Validate(uintptr_t id) const
{
for(size_t i = 0; i < numRangeLists; i++)
{
m_rangeLists[i].Validate(id);
// both bitmap and list must agree on whether they are empty
ENSURE(((m_bitmap & Bit<uintptr_t>(i)) == 0) == m_rangeLists[i].IsEmpty());
}
}
size_t FreeBlocks() const
{
size_t freeBlocks = 0;
for(size_t i = 0; i < numRangeLists; i++)
freeBlocks += m_rangeLists[i].FreeBlocks();
return freeBlocks;
}
size_t FreeBytes() const
{
size_t freeBytes = 0;
for(size_t i = 0; i < numRangeLists; i++)
freeBytes += m_rangeLists[i].FreeBytes();
return freeBytes;
}
private:
/**
* @return "size class" of a given size.
* class i > 0 contains blocks of size (2**(i-1), 2**i].
**/
static size_t SizeClass(size_t size)
{
return ceil_log2(size);
}
static uintptr_t ValueOfLeastSignificantOneBit(uintptr_t x)
{
return (x & -(intptr_t)x);
}
// segregated, i.e. one list per size class.
static const size_t numRangeLists = sizeof(uintptr_t)*CHAR_BIT;
RangeList m_rangeLists[numRangeLists];
// bit i set <==> size class i's freelist is not empty.
// this allows finding a non-empty list in O(1).
uintptr_t m_bitmap;
};
//-----------------------------------------------------------------------------
// coalescing
//-----------------------------------------------------------------------------
// policy: immediately coalesce
// mechanism: boundary tags
// note: the id and magic values are all that differentiates tags from
// user data. this isn't 100% reliable, but as with headers, we don't want
// to insert extra boundary tags into the allocated memory.
// note: footers consist of Tag{magic, ID, size}, while headers also
// need prev/next pointers. this could comfortably fit in 64 bytes,
// but we don't want to inherit headers from a base class because its
// prev/next pointers should reside between the magic and ID fields.
// maintaining separate FreedBlock and Footer classes is also undesirable;
// we prefer to use FreedBlock for both, which increases the minimum
// allocation size to 64 + allocationAlignment, e.g. 128.
// that's not a problem because the allocator is designed for
// returning pages or IO buffers (4..256 KB).
cassert(HeaderlessAllocator::minAllocationSize >= 2*sizeof(FreedBlock));
class BoundaryTagManager
{
public:
BoundaryTagManager()
: m_freeBlocks(0), m_freeBytes(0)
{
}
FreedBlock* WriteTags(u8* p, size_t size)
{
FreedBlock* freedBlock = (FreedBlock*)p;
freedBlock->Setup(s_headerId, size);
Footer(freedBlock)->Setup(s_footerId, size);
m_freeBlocks++;
m_freeBytes += size;
Validate(freedBlock);
return freedBlock;
}
void RemoveTags(FreedBlock* freedBlock)
{
Validate(freedBlock);
ENSURE(m_freeBlocks != 0);
ENSURE(m_freeBytes >= freedBlock->Size());
m_freeBlocks--;
m_freeBytes -= freedBlock->Size();
FreedBlock* footer = Footer(freedBlock);
freedBlock->Reset();
footer->Reset();
}
FreedBlock* PrecedingBlock(u8* p, u8* beginningOfPool) const
{
if(p == beginningOfPool) // avoid accessing invalid memory
return 0;
FreedBlock* precedingBlock;
{
FreedBlock* const footer = (FreedBlock*)(p - sizeof(FreedBlock));
if(!footer->IsFreedBlock(s_footerId))
return 0;
footer->Validate(s_footerId);
precedingBlock = (FreedBlock*)(p - footer->Size());
}
Validate(precedingBlock);
return precedingBlock;
}
FreedBlock* FollowingBlock(u8* p, size_t size, u8* endOfPool) const
{
if(p+size == endOfPool) // avoid accessing invalid memory
return 0;
FreedBlock* const followingBlock = (FreedBlock*)(p + size);
if(!followingBlock->IsFreedBlock(s_headerId))
return 0;
Validate(followingBlock);
return followingBlock;
}
size_t FreeBlocks() const
{
return m_freeBlocks;
}
size_t FreeBytes() const
{
return m_freeBytes;
}
// (generated via GUID)
static const uintptr_t s_headerId = 0x111E8E6Fu;
static const uintptr_t s_footerId = 0x4D745342u;
private:
void Validate(FreedBlock* freedBlock) const
{
if(!performSanityChecks) return;
// the existence of freedBlock means our bookkeeping better have
// records of at least that much memory.
ENSURE(m_freeBlocks != 0);
ENSURE(m_freeBytes >= freedBlock->Size());
freedBlock->Validate(s_headerId);
Footer(freedBlock)->Validate(s_footerId);
}
static FreedBlock* Footer(FreedBlock* freedBlock)
{
u8* const p = (u8*)freedBlock;
return (FreedBlock*)(p + freedBlock->Size() - sizeof(FreedBlock));
}
size_t m_freeBlocks;
size_t m_freeBytes;
};
//-----------------------------------------------------------------------------
// stats
//-----------------------------------------------------------------------------
class Stats
{
public:
void OnReset()
{
if(!performSanityChecks) return;
m_totalAllocatedBlocks = m_totalAllocatedBytes = 0;
m_totalDeallocatedBlocks = m_totalDeallocatedBytes = 0;
m_currentExtantBlocks = m_currentExtantBytes = 0;
m_currentFreeBlocks = m_currentFreeBytes = 0;
}
void OnAllocate(size_t size)
{
if(!performSanityChecks) return;
m_totalAllocatedBlocks++;
m_totalAllocatedBytes += size;
m_currentExtantBlocks++;
m_currentExtantBytes += size;
}
void OnDeallocate(size_t size)
{
if(!performSanityChecks) return;
m_totalDeallocatedBlocks++;
m_totalDeallocatedBytes += size;
ENSURE(m_totalDeallocatedBlocks <= m_totalAllocatedBlocks);
ENSURE(m_totalDeallocatedBytes <= m_totalAllocatedBytes);
ENSURE(m_currentExtantBlocks != 0);
ENSURE(m_currentExtantBytes >= size);
m_currentExtantBlocks--;
m_currentExtantBytes -= size;
}
void OnAddToFreelist(size_t size)
{
m_currentFreeBlocks++;
m_currentFreeBytes += size;
}
void OnRemoveFromFreelist(size_t size)
{
if(!performSanityChecks) return;
ENSURE(m_currentFreeBlocks != 0);
ENSURE(m_currentFreeBytes >= size);
m_currentFreeBlocks--;
m_currentFreeBytes -= size;
}
void Validate() const
{
if(!performSanityChecks) return;
ENSURE(m_totalDeallocatedBlocks <= m_totalAllocatedBlocks);
ENSURE(m_totalDeallocatedBytes <= m_totalAllocatedBytes);
ENSURE(m_currentExtantBlocks == m_totalAllocatedBlocks-m_totalDeallocatedBlocks);
ENSURE(m_currentExtantBytes == m_totalAllocatedBytes-m_totalDeallocatedBytes);
}
size_t FreeBlocks() const
{
return m_currentFreeBlocks;
}
size_t FreeBytes() const
{
return m_currentFreeBytes;
}
private:
u64 m_totalAllocatedBlocks, m_totalAllocatedBytes;
u64 m_totalDeallocatedBlocks, m_totalDeallocatedBytes;
u64 m_currentExtantBlocks, m_currentExtantBytes;
u64 m_currentFreeBlocks, m_currentFreeBytes;
};
//-----------------------------------------------------------------------------
// HeaderlessAllocator::Impl
//-----------------------------------------------------------------------------
static void AssertEqual(size_t x1, size_t x2, size_t x3)
{
ENSURE(x1 == x2 && x2 == x3);
}
class HeaderlessAllocator::Impl
{
public:
Impl(size_t poolSize)
{
(void)pool_create(&m_pool, poolSize, 0);
Reset();
}
~Impl()
{
Validate();
(void)pool_destroy(&m_pool);
}
void Reset()
{
pool_free_all(&m_pool);
m_segregatedRangeLists.Reset();
m_stats.OnReset();
Validate();
}
NOTHROW_DEFINE void* Allocate(size_t size)
{
ENSURE(IsValidSize(size));
Validate();
void* p = TakeAndSplitFreeBlock(size);
if(!p)
{
p = pool_alloc(&m_pool, size);
if(!p) // both failed; don't throw bad_alloc because
return 0; // this often happens with the file cache.
}
// (NB: we must not update the statistics if allocation failed)
m_stats.OnAllocate(size);
Validate();
ENSURE((uintptr_t)p % allocationAlignment == 0);
return p;
}
void Deallocate(u8* p, size_t size)
{
ENSURE((uintptr_t)p % allocationAlignment == 0);
ENSURE(IsValidSize(size));
ENSURE(pool_contains(&m_pool, p));
ENSURE(pool_contains(&m_pool, p+size-1));
Validate();
m_stats.OnDeallocate(size);
Coalesce(p, size);
AddToFreelist(p, size);
Validate();
}
void Validate() const
{
if(!performSanityChecks) return;
m_segregatedRangeLists.Validate(BoundaryTagManager::s_headerId);
m_stats.Validate();
AssertEqual(m_stats.FreeBlocks(), m_segregatedRangeLists.FreeBlocks(), m_boundaryTagManager.FreeBlocks());
AssertEqual(m_stats.FreeBytes(), m_segregatedRangeLists.FreeBytes(), m_boundaryTagManager.FreeBytes());
}
private:
void AddToFreelist(u8* p, size_t size)
{
FreedBlock* freedBlock = m_boundaryTagManager.WriteTags(p, size);
m_segregatedRangeLists.Insert(freedBlock);
m_stats.OnAddToFreelist(size);
}
void RemoveFromFreelist(FreedBlock* freedBlock)
{
m_stats.OnRemoveFromFreelist(freedBlock->Size());
m_segregatedRangeLists.Remove(freedBlock);
m_boundaryTagManager.RemoveTags(freedBlock);
}
/**
* expand a block by coalescing it with its free neighbor(s).
**/
void Coalesce(u8*& p, size_t& size)
{
{
FreedBlock* precedingBlock = m_boundaryTagManager.PrecedingBlock(p, m_pool.da.base);
if(precedingBlock)
{
p -= precedingBlock->Size();
size += precedingBlock->Size();
RemoveFromFreelist(precedingBlock);
}
}
{
FreedBlock* followingBlock = m_boundaryTagManager.FollowingBlock(p, size, m_pool.da.base+m_pool.da.pos);
if(followingBlock)
{
size += followingBlock->Size();
RemoveFromFreelist(followingBlock);
}
}
}
void* TakeAndSplitFreeBlock(size_t size)
{
u8* p;
size_t leftoverSize = 0;
{
FreedBlock* freedBlock = m_segregatedRangeLists.Find(size);
if(!freedBlock)
return 0;
p = (u8*)freedBlock;
leftoverSize = freedBlock->Size() - size;
RemoveFromFreelist(freedBlock);
}
if(IsValidSize(leftoverSize))
AddToFreelist(p+size, leftoverSize);
return p;
}
Pool m_pool;
SegregatedRangeLists m_segregatedRangeLists;
BoundaryTagManager m_boundaryTagManager;
Stats m_stats;
};
//-----------------------------------------------------------------------------
HeaderlessAllocator::HeaderlessAllocator(size_t poolSize)
: impl(new Impl(poolSize))
{
}
void HeaderlessAllocator::Reset()
{
return impl->Reset();
}
NOTHROW_DEFINE void* HeaderlessAllocator::Allocate(size_t size)
{
return impl->Allocate(size);
}
void HeaderlessAllocator::Deallocate(void* p, size_t size)
{
return impl->Deallocate((u8*)p, size);
}
void HeaderlessAllocator::Validate() const
{
return impl->Validate();
}

View File

@ -1,99 +0,0 @@
/* Copyright (C) 2010 Wildfire Games.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
/*
* (header-less) pool-based heap allocator
*/
#ifndef INCLUDED_ALLOCATORS_HEADERLESS
#define INCLUDED_ALLOCATORS_HEADERLESS
/**
* (header-less) pool-based heap allocator
* provides Allocate and Deallocate without requiring in-band headers;
* this is useful when allocating page-aligned I/O buffers
* (headers would waste an entire page per buffer)
*
* policy:
* - allocation: first exhaust the freelist, then allocate more
* - freelist: address-ordered good fit, always split blocks
* - coalescing: immediate
* mechanism:
* - coalescing: boundary tags in freed memory with distinct bit patterns
* - freelist: segregated range lists of power-of-two size classes
*
* note: this module basically implements a (rather complex) freelist and
* could be made independent of the Pool allocation scheme. however, reading
* neighboring boundary tags may cause segmentation violations; knowing the
* bounds of valid committed memory (i.e. Pool extents) avoids this.
**/
class HeaderlessAllocator
{
public:
// allocators must 'naturally' align pointers, i.e. ensure they are
// multiples of the largest native type (currently __m128).
// since there are no headers, we can guarantee alignment by
// requiring sizes to be multiples of allocationAlignment.
static const size_t allocationAlignment = 16;
// allocations must be large enough to hold our boundary tags
// when freed. (see rationale above BoundaryTagManager)
static const size_t minAllocationSize = 128;
/**
* @param poolSize maximum amount of memory that can be allocated.
* this much virtual address space is reserved up-front (see Pool).
**/
HeaderlessAllocator(size_t poolSize);
/**
* restore the original state (as if newly constructed).
* this includes reclaiming all extant allocations.
**/
void Reset();
/**
* @param size [bytes] (= minAllocationSize + i*allocationAlignment).
* (this allocator is designed for requests on the order of several KiB)
* @return allocated memory or 0 if the pool is too fragmented or full.
**/
NOTHROW_DECLARE void* Allocate(size_t size);
/**
* deallocate memory.
* @param p must be exactly as returned by Allocate (in particular,
* evenly divisible by allocationAlignment)
* @param size must be exactly as specified to Allocate.
**/
void Deallocate(void* p, size_t size);
/**
* perform sanity checks; ensure allocator state is consistent.
**/
void Validate() const;
private:
class Impl;
shared_ptr<Impl> impl;
};
#endif // #ifndef INCLUDED_ALLOCATORS_HEADERLESS

View File

@ -29,7 +29,6 @@
#include "lib/alignment.h" #include "lib/alignment.h"
#include "lib/allocators/freelist.h" #include "lib/allocators/freelist.h"
#include "lib/allocators/allocator_adapters.h"
#include "lib/timer.h" #include "lib/timer.h"

View File

@ -1,4 +1,4 @@
/* Copyright (C) 2019 Wildfire Games. /* Copyright (C) 2021 Wildfire Games.
* *
* Permission is hereby granted, free of charge, to any person obtaining * Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the * a copy of this software and associated documentation files (the
@ -20,13 +20,8 @@
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/ */
/* #ifndef INCLUDED_STATELESS_ALLOCATORS
* adapters for allocators; provides a minimal subset of the #define INCLUDED_STATELESS_ALLOCATORS
* STL allocator interface.
*/
#ifndef ALLOCATOR_ADAPTERS
#define ALLOCATOR_ADAPTERS
#include "lib/sysdep/rtl.h" #include "lib/sysdep/rtl.h"
#include "lib/sysdep/vm.h" #include "lib/sysdep/vm.h"
@ -92,96 +87,4 @@ struct Allocator_AddressSpace
} }
}; };
#endif // INCLUDED_STATELESS_ALLOCATORS
/**
* fully STL-compatible allocator that simply draws upon another Allocator.
* this allows a single allocator to serve multiple STL containers.
*/
template<typename T, class Allocator>
class ProxyAllocator
{
public:
typedef T value_type;
typedef T* pointer;
typedef const T* const_pointer;
typedef T& reference;
typedef const T& const_reference;
typedef std::size_t size_type;
typedef std::ptrdiff_t difference_type;
template<class U>
struct rebind
{
typedef ProxyAllocator<U, Allocator> other;
};
explicit NOTHROW_DEFINE ProxyAllocator(Allocator& allocator)
: allocator(&allocator)
{
}
template<typename U, class A>
NOTHROW_DEFINE ProxyAllocator(const ProxyAllocator<U,A>& rhs)
: allocator(rhs.allocator)
{
}
// (required by VC2010 std::vector)
bool operator==(const ProxyAllocator& rhs) const
{
return allocator == rhs.allocator;
}
bool operator!=(const ProxyAllocator& rhs) const
{
return !operator==(rhs);
}
pointer address(reference r)
{
return &r;
}
const_pointer address(const_reference s)
{
return &s;
}
size_type max_size() const throw ()
{
return std::numeric_limits<std::size_t>::max() / sizeof(T);
}
void construct(const pointer ptr, const value_type& t)
{
new(ptr) T(t);
}
void destroy(pointer ptr)
{
ptr->~T();
UNUSED2(ptr); // silence MSVC warnings
}
pointer allocate(size_type n)
{
// safely handle zero-sized allocations (happens with GCC STL - see ticket #909).
if(n == 0)
n = 1;
return (pointer)allocator->allocate(n*sizeof(T));
}
pointer allocate(size_type n, const void* const)
{
return allocate(n);
}
void deallocate(const pointer ptr, const size_type n)
{
return allocator->deallocate(ptr, n*sizeof(T));
}
//private: // otherwise copy ctor cannot access it
Allocator* allocator;
};
#endif // #ifndef ALLOCATOR_ADAPTERS

View File

@ -0,0 +1,75 @@
/* Copyright (C) 2021 Wildfire Games.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include "lib/self_test.h"
#include "lib/allocators/DynamicArena.h"
class TestDynamicArena : public CxxTest::TestSuite
{
public:
void test_allocate()
{
Allocators::DynamicArena<100> testArena;
u8* p = static_cast<u8*>(testArena.allocate(10, nullptr, 1));
TS_ASSERT(p != nullptr);
void* p2 = testArena.allocate(10, nullptr, 1);
TS_ASSERT(p + 10 == p2);
void* p3 = testArena.allocate(80, nullptr, 1);
TS_ASSERT(p + 20 == p3);
void* p4 = testArena.allocate(100, nullptr, 1);
TS_ASSERT(p4 != nullptr);
void* p5 = testArena.allocate(1, nullptr, 1);
TS_ASSERT(p5 != nullptr);
void* p6 = testArena.allocate(100, nullptr, 1);
TS_ASSERT(p6 != nullptr);
void* p7 = testArena.allocate(0, nullptr, 1);
TS_ASSERT(p7 != nullptr);
}
void test_alignment()
{
Allocators::DynamicArena<100> testArena;
u8* p = static_cast<u8*>(testArena.allocate(4, nullptr, 1));
TS_ASSERT(p != nullptr);
u8* p2 = static_cast<u8*>(testArena.allocate(1, nullptr, 8));
TS_ASSERT_EQUALS(p + 8, p2);
p2 = static_cast<u8*>(testArena.allocate(1, nullptr, 8));
TS_ASSERT_EQUALS(p + 16, p2);
p2 = static_cast<u8*>(testArena.allocate(1, nullptr, 8));
TS_ASSERT_EQUALS(p + 24, p2);
p2 = static_cast<u8*>(testArena.allocate(1, nullptr, 2));
TS_ASSERT_EQUALS(p + 26, p2);
p2 = static_cast<u8*>(testArena.allocate(1, nullptr, 8));
TS_ASSERT_EQUALS(p + 32, p2);
}
};

View File

@ -0,0 +1,81 @@
/* Copyright (C) 2021 Wildfire Games.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include "lib/self_test.h"
#include "lib/allocators/DynamicArena.h"
#include "lib/allocators/STLAllocators.h"
#include <functional>
#include <map>
#include <unordered_map>
#include <vector>
class TestSTLAllocators : public CxxTest::TestSuite
{
public:
struct SomeData
{
SomeData(int) {};
int padding[10];
};
template<template<typename... Args> typename Alloc, typename D>
using Allocator = Alloc<D, Allocators::DynamicArena<4096>>;
template<typename K, typename V, template<typename... Args> typename Alloc>
using Map = std::map<K, V, std::less<K>, Allocator<Alloc, typename std::map<K, V>::value_type>>;
template<typename K, typename V, template<typename... Args> typename Alloc>
using UMap = std::unordered_map<K, V, std::hash<K>, std::equal_to<K>, Allocator<Alloc, typename std::unordered_map<K, V>::value_type>>;
void test_CustomAllocator()
{
Map<int, SomeData, STLAllocator> map;
UMap<int, SomeData, STLAllocator> umap;
std::vector<SomeData, STLAllocator<SomeData, Allocators::DynamicArena<4096>>> vec;
map.emplace(4, 5);
umap.emplace(4, 5);
vec.emplace_back(5);
map.clear();
umap.clear();
vec.clear();
}
void test_ProxyAllocator()
{
Allocators::DynamicArena<4096> arena;
Map<int, SomeData, ProxyAllocator> map(arena);
UMap<int, SomeData, ProxyAllocator> umap(arena);
std::vector<SomeData, Allocator<ProxyAllocator, SomeData>> vec(arena);
map.emplace(4, 5);
umap.emplace(4, 5);
vec.emplace_back(5);
map.clear();
umap.clear();
vec.clear();
}
};

View File

@ -1,148 +0,0 @@
/* Copyright (C) 2010 Wildfire Games.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include "lib/self_test.h"
#include "lib/bits.h" // round_down
#include "lib/allocators/headerless.h"
void* const null = 0;
class TestHeaderless: public CxxTest::TestSuite
{
public:
void test_Basic()
{
HeaderlessAllocator a(8192);
// (these are disabled because they raise an assert)
#if 0
// can't Allocate unaligned sizes
TS_ASSERT_EQUALS(a.Allocate(HeaderlessAllocator::minAllocationSize+1), null);
// can't Allocate too small amounts
TS_ASSERT_EQUALS(a.Allocate(HeaderlessAllocator::minAllocationSize/2), null);
#endif
// can Allocate the entire pool
char* p1 = (char*)a.Allocate(4096);
char* p2 = (char*)a.Allocate(4096);
TS_ASSERT_DIFFERS(p1, null);
TS_ASSERT_DIFFERS(p2, null);
// back-to-back (non-freelist) allocations should be contiguous
TS_ASSERT_EQUALS(p1+4096, p2);
// allocations are writable
p1[0] = 11;
p1[4095] = 12;
}
void test_Free()
{
// Deallocate allows immediate reuse of the freed pointer
HeaderlessAllocator a(4096);
void* p1 = a.Allocate(1024);
a.Deallocate(p1, 1024);
void* p2 = a.Allocate(1024);
TS_ASSERT_EQUALS(p1, p2);
}
void test_Coalesce()
{
HeaderlessAllocator a(0x10000);
// can Allocate non-power-of-two sizes (note: the current
// implementation only allows sizes that are multiples of 0x10)
void* p1 = a.Allocate(0x5680);
void* p2 = a.Allocate(0x78A0);
void* p3 = a.Allocate(0x1240);
TS_ASSERT_DIFFERS(p1, null);
TS_ASSERT_DIFFERS(p2, null);
TS_ASSERT_DIFFERS(p3, null);
// after freeing, must be able to allocate the total amount
// note: we don't insist on being able to fill the entire
// memory range. in this case, the problem is that the pool has some
// virtual address space left, but the allocator doesn't grab that
// and add it to the freelist. that feature is currently not
// implemented.
a.Deallocate(p1, 0x5680);
a.Deallocate(p2, 0x78A0);
a.Deallocate(p3, 0x1240);
void* p4 = a.Allocate(0xE140);
TS_ASSERT_DIFFERS(p4, null);
}
void test_Reset()
{
// after Reset, must return the same pointer as a freshly constructed instance
HeaderlessAllocator a(4096);
void* p1 = a.Allocate(128);
a.Reset();
void* p2 = a.Allocate(128);
TS_ASSERT_EQUALS(p1, p2);
}
// will the allocator survive a series of random but valid Allocate/Deallocate?
void test_Randomized()
{
const size_t poolSize = 1024*1024;
HeaderlessAllocator a(poolSize);
typedef std::map<void*, size_t> AllocMap;
AllocMap allocs;
srand(1);
for(int i = 0; i < 1000; i++)
{
// allocate
if(rand() >= RAND_MAX/2)
{
const size_t maxSize = (size_t)((rand() / (float)RAND_MAX) * poolSize);
const size_t size = std::max((size_t)HeaderlessAllocator::minAllocationSize, round_down(maxSize, HeaderlessAllocator::allocationAlignment));
// (the size_t cast on minAllocationSize prevents max taking a reference to the non-defined variable)
void* p = a.Allocate(size);
if(!p)
continue;
TS_ASSERT(allocs.find(p) == allocs.end());
allocs[p] = size;
}
// free
else
{
if(allocs.empty())
continue;
// find random allocation to deallocate
AllocMap::iterator it = allocs.begin();
const int numToSkip = rand() % (int)allocs.size();
for(int skip = 0; skip < numToSkip; skip++)
++it;
void* p = (*it).first;
size_t size = (*it).second;
allocs.erase(it);
a.Deallocate(p, size);
}
}
}
};

View File

@ -1,4 +1,4 @@
/* Copyright (C) 2020 Wildfire Games. /* Copyright (C) 2021 Wildfire Games.
* This file is part of 0 A.D. * This file is part of 0 A.D.
* *
* 0 A.D. is free software: you can redistribute it and/or modify * 0 A.D. is free software: you can redistribute it and/or modify
@ -24,8 +24,8 @@
#include "graphics/ModelDef.h" #include "graphics/ModelDef.h"
#include "graphics/ShaderManager.h" #include "graphics/ShaderManager.h"
#include "graphics/TextureManager.h" #include "graphics/TextureManager.h"
#include "lib/allocators/allocator_adapters.h" #include "lib/allocators/DynamicArena.h"
#include "lib/allocators/arena.h" #include "lib/allocators/STLAllocators.h"
#include "lib/hash.h" #include "lib/hash.h"
#include "lib/ogl.h" #include "lib/ogl.h"
#include "maths/Vector3D.h" #include "maths/Vector3D.h"
@ -403,8 +403,10 @@ void ShaderModelRenderer::Render(const RenderModifierPtr& modifier, const CShade
* list in each, rebinding the GL state whenever it changes. * list in each, rebinding the GL state whenever it changes.
*/ */
Allocators::DynamicArena arena(256 * KiB); using Arena = Allocators::DynamicArena<256 * KiB>;
using ModelListAllocator = ProxyAllocator<CModel*, Allocators::DynamicArena>;
Arena arena;
using ModelListAllocator = ProxyAllocator<CModel*, Arena>;
using ModelList_t = std::vector<CModel*, ModelListAllocator>; using ModelList_t = std::vector<CModel*, ModelListAllocator>;
using MaterialBuckets_t = std::unordered_map< using MaterialBuckets_t = std::unordered_map<
SMRMaterialBucketKey, SMRMaterialBucketKey,
@ -413,7 +415,7 @@ void ShaderModelRenderer::Render(const RenderModifierPtr& modifier, const CShade
std::equal_to<SMRMaterialBucketKey>, std::equal_to<SMRMaterialBucketKey>,
ProxyAllocator< ProxyAllocator<
std::pair<const SMRMaterialBucketKey, ModelList_t>, std::pair<const SMRMaterialBucketKey, ModelList_t>,
Allocators::DynamicArena> >; Arena> >;
MaterialBuckets_t materialBuckets((MaterialBuckets_t::allocator_type(arena))); MaterialBuckets_t materialBuckets((MaterialBuckets_t::allocator_type(arena)));
@ -467,10 +469,10 @@ void ShaderModelRenderer::Render(const RenderModifierPtr& modifier, const CShade
} }
} }
typedef ProxyAllocator<SMRSortByDistItem, Allocators::DynamicArena> SortByDistItemsAllocator; using SortByDistItemsAllocator = ProxyAllocator<SMRSortByDistItem, Arena>;
std::vector<SMRSortByDistItem, SortByDistItemsAllocator> sortByDistItems((SortByDistItemsAllocator(arena))); std::vector<SMRSortByDistItem, SortByDistItemsAllocator> sortByDistItems((SortByDistItemsAllocator(arena)));
typedef ProxyAllocator<CShaderTechniquePtr, Allocators::DynamicArena> SortByTechItemsAllocator; using SortByTechItemsAllocator = ProxyAllocator<CShaderTechniquePtr, Arena>;
std::vector<CShaderTechniquePtr, SortByTechItemsAllocator> sortByDistTechs((SortByTechItemsAllocator(arena))); std::vector<CShaderTechniquePtr, SortByTechItemsAllocator> sortByDistTechs((SortByTechItemsAllocator(arena)));
// indexed by sortByDistItems[i].techIdx // indexed by sortByDistItems[i].techIdx
// (which stores indexes instead of CShaderTechniquePtr directly // (which stores indexes instead of CShaderTechniquePtr directly
@ -478,7 +480,7 @@ void ShaderModelRenderer::Render(const RenderModifierPtr& modifier, const CShade
// if we just stored raw CShaderTechnique* and assumed the shader manager // if we just stored raw CShaderTechnique* and assumed the shader manager
// will keep it alive long enough) // will keep it alive long enough)
typedef ProxyAllocator<SMRTechBucket, Allocators::DynamicArena> TechBucketsAllocator; using TechBucketsAllocator = ProxyAllocator<SMRTechBucket, Arena>;
std::vector<SMRTechBucket, TechBucketsAllocator> techBuckets((TechBucketsAllocator(arena))); std::vector<SMRTechBucket, TechBucketsAllocator> techBuckets((TechBucketsAllocator(arena)));
{ {
@ -588,18 +590,18 @@ void ShaderModelRenderer::Render(const RenderModifierPtr& modifier, const CShade
// This vector keeps track of texture changes during rendering. It is kept outside the // This vector keeps track of texture changes during rendering. It is kept outside the
// loops to avoid excessive reallocations. The token allocation of 64 elements // loops to avoid excessive reallocations. The token allocation of 64 elements
// should be plenty, though it is reallocated below (at a cost) if necessary. // should be plenty, though it is reallocated below (at a cost) if necessary.
typedef ProxyAllocator<CTexture*, Allocators::DynamicArena> TextureListAllocator; using TextureListAllocator = ProxyAllocator<CTexture*, Arena>;
std::vector<CTexture*, TextureListAllocator> currentTexs((TextureListAllocator(arena))); std::vector<CTexture*, TextureListAllocator> currentTexs((TextureListAllocator(arena)));
currentTexs.reserve(64); currentTexs.reserve(64);
// texBindings holds the identifier bindings in the shader, which can no longer be defined // texBindings holds the identifier bindings in the shader, which can no longer be defined
// statically in the ShaderRenderModifier class. texBindingNames uses interned strings to // statically in the ShaderRenderModifier class. texBindingNames uses interned strings to
// keep track of when bindings need to be reevaluated. // keep track of when bindings need to be reevaluated.
typedef ProxyAllocator<CShaderProgram::Binding, Allocators::DynamicArena> BindingListAllocator; using BindingListAllocator = ProxyAllocator<CShaderProgram::Binding, Arena>;
std::vector<CShaderProgram::Binding, BindingListAllocator> texBindings((BindingListAllocator(arena))); std::vector<CShaderProgram::Binding, BindingListAllocator> texBindings((BindingListAllocator(arena)));
texBindings.reserve(64); texBindings.reserve(64);
typedef ProxyAllocator<CStrIntern, Allocators::DynamicArena> BindingNamesListAllocator; using BindingNamesListAllocator = ProxyAllocator<CStrIntern, Arena>;
std::vector<CStrIntern, BindingNamesListAllocator> texBindingNames((BindingNamesListAllocator(arena))); std::vector<CStrIntern, BindingNamesListAllocator> texBindingNames((BindingNamesListAllocator(arena)));
texBindingNames.reserve(64); texBindingNames.reserve(64);

View File

@ -1,4 +1,4 @@
/* Copyright (C) 2020 Wildfire Games. /* Copyright (C) 2021 Wildfire Games.
* This file is part of 0 A.D. * This file is part of 0 A.D.
* *
* 0 A.D. is free software: you can redistribute it and/or modify * 0 A.D. is free software: you can redistribute it and/or modify
@ -27,8 +27,8 @@
#include "graphics/Terrain.h" #include "graphics/Terrain.h"
#include "graphics/TerrainTextureEntry.h" #include "graphics/TerrainTextureEntry.h"
#include "graphics/TextRenderer.h" #include "graphics/TextRenderer.h"
#include "lib/alignment.h" #include "lib/allocators/DynamicArena.h"
#include "lib/allocators/arena.h" #include "lib/allocators/STLAllocators.h"
#include "maths/MathUtil.h" #include "maths/MathUtil.h"
#include "ps/CLogger.h" #include "ps/CLogger.h"
#include "ps/Game.h" #include "ps/Game.h"
@ -677,14 +677,16 @@ void CPatchRData::Update(CSimulation2* simulation)
// batches uses a arena allocator. (All allocations are short-lived so we can // batches uses a arena allocator. (All allocations are short-lived so we can
// just throw away the whole arena at the end of each frame.) // just throw away the whole arena at the end of each frame.)
using Arena = Allocators::DynamicArena<1 * MiB>;
// std::map types with appropriate arena allocators and default comparison operator // std::map types with appropriate arena allocators and default comparison operator
#define POOLED_BATCH_MAP(Key, Value) \ template<class Key, class Value>
std::map<Key, Value, std::less<Key>, ProxyAllocator<std::pair<Key const, Value>, Allocators::DynamicArena > > using PooledBatchMap = std::map<Key, Value, std::less<Key>, ProxyAllocator<std::pair<Key const, Value>, Arena>>;
// Equivalent to "m[k]", when it returns a arena-allocated std::map (since we can't // Equivalent to "m[k]", when it returns a arena-allocated std::map (since we can't
// use the default constructor in that case) // use the default constructor in that case)
template<typename M> template<typename M>
typename M::mapped_type& PooledMapGet(M& m, const typename M::key_type& k, Allocators::DynamicArena& arena) typename M::mapped_type& PooledMapGet(M& m, const typename M::key_type& k, Arena& arena)
{ {
return m.insert(std::make_pair(k, return m.insert(std::make_pair(k,
typename M::mapped_type(typename M::mapped_type::key_compare(), typename M::mapped_type::allocator_type(arena)) typename M::mapped_type(typename M::mapped_type::key_compare(), typename M::mapped_type::allocator_type(arena))
@ -693,7 +695,7 @@ typename M::mapped_type& PooledMapGet(M& m, const typename M::key_type& k, Alloc
// Equivalent to "m[k]", when it returns a std::pair of arena-allocated std::vectors // Equivalent to "m[k]", when it returns a std::pair of arena-allocated std::vectors
template<typename M> template<typename M>
typename M::mapped_type& PooledPairGet(M& m, const typename M::key_type& k, Allocators::DynamicArena& arena) typename M::mapped_type& PooledPairGet(M& m, const typename M::key_type& k, Arena& arena)
{ {
return m.insert(std::make_pair(k, std::make_pair( return m.insert(std::make_pair(k, std::make_pair(
typename M::mapped_type::first_type(typename M::mapped_type::first_type::allocator_type(arena)), typename M::mapped_type::first_type(typename M::mapped_type::first_type::allocator_type(arena)),
@ -702,23 +704,23 @@ typename M::mapped_type& PooledPairGet(M& m, const typename M::key_type& k, Allo
} }
// Each multidraw batch has a list of index counts, and a list of pointers-to-first-indexes // Each multidraw batch has a list of index counts, and a list of pointers-to-first-indexes
typedef std::pair<std::vector<GLint, ProxyAllocator<GLint, Allocators::DynamicArena > >, std::vector<void*, ProxyAllocator<void*, Allocators::DynamicArena > > > BatchElements; using BatchElements = std::pair<std::vector<GLint, ProxyAllocator<GLint, Arena>>, std::vector<void*, ProxyAllocator<void*, Arena>>>;
// Group batches by index buffer // Group batches by index buffer
typedef POOLED_BATCH_MAP(CVertexBuffer*, BatchElements) IndexBufferBatches; using IndexBufferBatches = PooledBatchMap<CVertexBuffer*, BatchElements>;
// Group batches by vertex buffer // Group batches by vertex buffer
typedef POOLED_BATCH_MAP(CVertexBuffer*, IndexBufferBatches) VertexBufferBatches; using VertexBufferBatches = PooledBatchMap<CVertexBuffer*, IndexBufferBatches>;
// Group batches by texture // Group batches by texture
typedef POOLED_BATCH_MAP(CTerrainTextureEntry*, VertexBufferBatches) TextureBatches; using TextureBatches = PooledBatchMap<CTerrainTextureEntry*, VertexBufferBatches>;
void CPatchRData::RenderBases(const std::vector<CPatchRData*>& patches, const CShaderDefines& context, void CPatchRData::RenderBases(const std::vector<CPatchRData*>& patches, const CShaderDefines& context,
ShadowMap* shadow, bool isDummyShader, const CShaderProgramPtr& dummy) ShadowMap* shadow, bool isDummyShader, const CShaderProgramPtr& dummy)
{ {
Allocators::DynamicArena arena(1 * MiB); Arena arena;
TextureBatches batches (TextureBatches::key_compare(), (TextureBatches::allocator_type(arena))); TextureBatches batches(TextureBatches::key_compare(), (TextureBatches::allocator_type(arena)));
PROFILE_START("compute batches"); PROFILE_START("compute batches");
@ -863,7 +865,7 @@ void CPatchRData::RenderBases(const std::vector<CPatchRData*>& patches, const CS
*/ */
struct SBlendBatch struct SBlendBatch
{ {
SBlendBatch(Allocators::DynamicArena& arena) : SBlendBatch(Arena& arena) :
m_Batches(VertexBufferBatches::key_compare(), VertexBufferBatches::allocator_type(arena)) m_Batches(VertexBufferBatches::key_compare(), VertexBufferBatches::allocator_type(arena))
{ {
} }
@ -878,12 +880,12 @@ struct SBlendBatch
struct SBlendStackItem struct SBlendStackItem
{ {
SBlendStackItem(CVertexBuffer::VBChunk* v, CVertexBuffer::VBChunk* i, SBlendStackItem(CVertexBuffer::VBChunk* v, CVertexBuffer::VBChunk* i,
const std::vector<CPatchRData::SSplat>& s, Allocators::DynamicArena& arena) : const std::vector<CPatchRData::SSplat>& s, Arena& arena) :
vertices(v), indices(i), splats(s.begin(), s.end(), SplatStack::allocator_type(arena)) vertices(v), indices(i), splats(s.begin(), s.end(), SplatStack::allocator_type(arena))
{ {
} }
typedef std::vector<CPatchRData::SSplat, ProxyAllocator<CPatchRData::SSplat, Allocators::DynamicArena > > SplatStack; using SplatStack = std::vector<CPatchRData::SSplat, ProxyAllocator<CPatchRData::SSplat, Arena>>;
CVertexBuffer::VBChunk* vertices; CVertexBuffer::VBChunk* vertices;
CVertexBuffer::VBChunk* indices; CVertexBuffer::VBChunk* indices;
SplatStack splats; SplatStack splats;
@ -892,9 +894,9 @@ struct SBlendStackItem
void CPatchRData::RenderBlends(const std::vector<CPatchRData*>& patches, const CShaderDefines& context, void CPatchRData::RenderBlends(const std::vector<CPatchRData*>& patches, const CShaderDefines& context,
ShadowMap* shadow, bool isDummyShader, const CShaderProgramPtr& dummy) ShadowMap* shadow, bool isDummyShader, const CShaderProgramPtr& dummy)
{ {
Allocators::DynamicArena arena(1 * MiB); Arena arena;
typedef std::vector<SBlendBatch, ProxyAllocator<SBlendBatch, Allocators::DynamicArena > > BatchesStack; using BatchesStack = std::vector<SBlendBatch, ProxyAllocator<SBlendBatch, Arena>>;
BatchesStack batches((BatchesStack::allocator_type(arena))); BatchesStack batches((BatchesStack::allocator_type(arena)));
CShaderDefines contextBlend = context; CShaderDefines contextBlend = context;
@ -906,7 +908,7 @@ void CPatchRData::RenderBlends(const std::vector<CPatchRData*>& patches, const C
// to avoid heavy reallocations // to avoid heavy reallocations
batches.reserve(256); batches.reserve(256);
typedef std::vector<SBlendStackItem, ProxyAllocator<SBlendStackItem, Allocators::DynamicArena > > BlendStacks; using BlendStacks = std::vector<SBlendStackItem, ProxyAllocator<SBlendStackItem, Arena>>;
BlendStacks blendStacks((BlendStacks::allocator_type(arena))); BlendStacks blendStacks((BlendStacks::allocator_type(arena)));
blendStacks.reserve(patches.size()); blendStacks.reserve(patches.size());

View File

@ -1,4 +1,4 @@
/* Copyright (C) 2020 Wildfire Games. /* Copyright (C) 2021 Wildfire Games.
* This file is part of 0 A.D. * This file is part of 0 A.D.
* *
* 0 A.D. is free software: you can redistribute it and/or modify * 0 A.D. is free software: you can redistribute it and/or modify
@ -21,7 +21,6 @@
#include "ISerializer.h" #include "ISerializer.h"
#include "lib/byte_order.h" #include "lib/byte_order.h"
#include "lib/allocators/arena.h"
#include <iostream> #include <iostream>
#include <map> #include <map>