1
0
forked from 0ad/0ad

Separates allocated vertex buffers into groups for data locality.

Tested By: Langbart, OptimusShepard, Stan
Differential Revision: https://code.wildfiregames.com/D3522
This was SVN commit r24860.
This commit is contained in:
Vladislav Belov 2021-02-08 22:34:10 +00:00
parent f1e812bc32
commit 6d4fd01a59
7 changed files with 214 additions and 140 deletions

View File

@ -78,15 +78,15 @@ CPatchRData::CPatchRData(CPatch* patch, CSimulation2* simulation) :
CPatchRData::~CPatchRData()
{
// release vertex buffer chunks
if (m_VBSides) g_VBMan.Release(m_VBSides);
if (m_VBBase) g_VBMan.Release(m_VBBase);
if (m_VBBaseIndices) g_VBMan.Release(m_VBBaseIndices);
if (m_VBBlends) g_VBMan.Release(m_VBBlends);
if (m_VBBlendIndices) g_VBMan.Release(m_VBBlendIndices);
if (m_VBWater) g_VBMan.Release(m_VBWater);
if (m_VBWaterIndices) g_VBMan.Release(m_VBWaterIndices);
if (m_VBWaterShore) g_VBMan.Release(m_VBWaterShore);
if (m_VBWaterIndicesShore) g_VBMan.Release(m_VBWaterIndicesShore);
m_VBSides.Reset();
m_VBBase.Reset();
m_VBBaseIndices.Reset();
m_VBBlends.Reset();
m_VBBlendIndices.Reset();
m_VBWater.Reset();
m_VBWaterIndices.Reset();
m_VBWaterShore.Reset();
m_VBWaterIndicesShore.Reset();
}
/**
@ -290,31 +290,22 @@ void CPatchRData::BuildBlends()
}
// Release existing vertex buffer chunks
if (m_VBBlends)
{
g_VBMan.Release(m_VBBlends);
m_VBBlends = 0;
}
if (m_VBBlendIndices)
{
g_VBMan.Release(m_VBBlendIndices);
m_VBBlendIndices = 0;
}
m_VBBlends.Reset();
m_VBBlendIndices.Reset();
if (blendVertices.size())
{
// Construct vertex buffer
m_VBBlends = g_VBMan.Allocate(sizeof(SBlendVertex), blendVertices.size(), GL_STATIC_DRAW, GL_ARRAY_BUFFER);
m_VBBlends->m_Owner->UpdateChunkVertices(m_VBBlends, &blendVertices[0]);
m_VBBlends = g_VBMan.AllocateChunk(sizeof(SBlendVertex), blendVertices.size(), GL_STATIC_DRAW, GL_ARRAY_BUFFER, nullptr, CVertexBufferManager::Group::TERRAIN);
m_VBBlends->m_Owner->UpdateChunkVertices(m_VBBlends.Get(), &blendVertices[0]);
// Update the indices to include the base offset of the vertex data
for (size_t k = 0; k < blendIndices.size(); ++k)
blendIndices[k] += static_cast<u16>(m_VBBlends->m_Index);
m_VBBlendIndices = g_VBMan.Allocate(sizeof(u16), blendIndices.size(), GL_STATIC_DRAW, GL_ELEMENT_ARRAY_BUFFER);
m_VBBlendIndices->m_Owner->UpdateChunkVertices(m_VBBlendIndices, &blendIndices[0]);
m_VBBlendIndices = g_VBMan.AllocateChunk(sizeof(u16), blendIndices.size(), GL_STATIC_DRAW, GL_ELEMENT_ARRAY_BUFFER, nullptr, CVertexBufferManager::Group::TERRAIN);
m_VBBlendIndices->m_Owner->UpdateChunkVertices(m_VBBlendIndices.Get(), &blendIndices[0]);
}
}
@ -509,17 +500,13 @@ void CPatchRData::BuildIndices()
}
// Release existing vertex buffer chunk
if (m_VBBaseIndices)
{
g_VBMan.Release(m_VBBaseIndices);
m_VBBaseIndices = 0;
}
m_VBBaseIndices.Reset();
ENSURE(indices.size());
// Construct vertex buffer
m_VBBaseIndices = g_VBMan.Allocate(sizeof(u16), indices.size(), GL_STATIC_DRAW, GL_ELEMENT_ARRAY_BUFFER);
m_VBBaseIndices->m_Owner->UpdateChunkVertices(m_VBBaseIndices, &indices[0]);
m_VBBaseIndices = g_VBMan.AllocateChunk(sizeof(u16), indices.size(), GL_STATIC_DRAW, GL_ELEMENT_ARRAY_BUFFER, nullptr, CVertexBufferManager::Group::TERRAIN);
m_VBBaseIndices->m_Owner->UpdateChunkVertices(m_VBBaseIndices.Get(), &indices[0]);
}
@ -561,9 +548,9 @@ void CPatchRData::BuildVertices()
// upload to vertex buffer
if (!m_VBBase)
m_VBBase = g_VBMan.Allocate(sizeof(SBaseVertex), vsize * vsize, GL_STATIC_DRAW, GL_ARRAY_BUFFER);
m_VBBase = g_VBMan.AllocateChunk(sizeof(SBaseVertex), vsize * vsize, GL_STATIC_DRAW, GL_ARRAY_BUFFER, nullptr, CVertexBufferManager::Group::TERRAIN);
m_VBBase->m_Owner->UpdateChunkVertices(m_VBBase, &vertices[0]);
m_VBBase->m_Owner->UpdateChunkVertices(m_VBBase.Get(), &vertices[0]);
}
void CPatchRData::BuildSide(std::vector<SSideVertex>& vertices, CPatchSideFlags side)
@ -643,8 +630,8 @@ void CPatchRData::BuildSides()
return;
if (!m_VBSides)
m_VBSides = g_VBMan.Allocate(sizeof(SSideVertex), sideVertices.size(), GL_STATIC_DRAW, GL_ARRAY_BUFFER);
m_VBSides->m_Owner->UpdateChunkVertices(m_VBSides, &sideVertices[0]);
m_VBSides = g_VBMan.AllocateChunk(sizeof(SSideVertex), sideVertices.size(), GL_STATIC_DRAW, GL_ARRAY_BUFFER, nullptr, CVertexBufferManager::Group::DEFAULT);
m_VBSides->m_Owner->UpdateChunkVertices(m_VBSides.Get(), &sideVertices[0]);
}
void CPatchRData::Build()
@ -723,6 +710,8 @@ using ShaderTechniqueBatches = PooledBatchMap<CShaderTechniquePtr, TextureBatche
void CPatchRData::RenderBases(
const std::vector<CPatchRData*>& patches, const CShaderDefines& context, ShadowMap* shadow)
{
PROFILE3("render terrain bases");
Arena arena;
ShaderTechniqueBatches batches(ShaderTechniqueBatches::key_compare(), (ShaderTechniqueBatches::allocator_type(arena)));
@ -868,6 +857,8 @@ struct SBlendStackItem
void CPatchRData::RenderBlends(
const std::vector<CPatchRData*>& patches, const CShaderDefines& context, ShadowMap* shadow)
{
PROFILE3("render terrain blends");
Arena arena;
using BatchesStack = std::vector<SBlendBatch, ProxyAllocator<SBlendBatch, Arena>>;
@ -893,7 +884,7 @@ void CPatchRData::RenderBlends(
if (!patch->m_BlendSplats.empty())
{
blendStacks.push_back(SBlendStackItem(patch->m_VBBlends, patch->m_VBBlendIndices, patch->m_BlendSplats, arena));
blendStacks.push_back(SBlendStackItem(patch->m_VBBlends.Get(), patch->m_VBBlendIndices.Get(), patch->m_BlendSplats, arena));
// Reverse the splats so the first to be rendered is at the back of the list
std::reverse(blendStacks.back().splats.begin(), blendStacks.back().splats.end());
}
@ -1053,6 +1044,8 @@ void CPatchRData::RenderBlends(
void CPatchRData::RenderStreams(const std::vector<CPatchRData*>& patches, const CShaderProgramPtr& shader, int streamflags)
{
PROFILE3("render terrain streams");
// Each batch has a list of index counts, and a list of pointers-to-first-indexes
using StreamBatchElements = std::pair<std::vector<GLint>, std::vector<void*> > ;
@ -1152,29 +1145,35 @@ void CPatchRData::RenderOutline()
#endif
}
void CPatchRData::RenderSides(CShaderProgramPtr& shader)
void CPatchRData::RenderSides(const std::vector<CPatchRData*>& patches, const CShaderProgramPtr& shader)
{
ENSURE(m_UpdateFlags==0);
if (!m_VBSides)
return;
glDisable(GL_CULL_FACE);
SSideVertex *base = (SSideVertex *)m_VBSides->m_Owner->Bind();
CVertexBuffer* lastVB = nullptr;
for (CPatchRData* patch : patches)
{
ENSURE(patch->m_UpdateFlags == 0);
if (!patch->m_VBSides)
continue;
if (lastVB != patch->m_VBSides->m_Owner)
{
lastVB = patch->m_VBSides->m_Owner;
SSideVertex *base = (SSideVertex*)patch->m_VBSides->m_Owner->Bind();
// setup data pointers
GLsizei stride = sizeof(SSideVertex);
shader->VertexPointer(3, GL_FLOAT, stride, &base->m_Position);
// setup data pointers
GLsizei stride = sizeof(SSideVertex);
shader->VertexPointer(3, GL_FLOAT, stride, &base->m_Position);
}
shader->AssertPointersBound();
shader->AssertPointersBound();
if (!g_Renderer.m_SkipSubmit)
glDrawArrays(GL_TRIANGLE_STRIP, m_VBSides->m_Index, (GLsizei)m_VBSides->m_Count);
if (!g_Renderer.m_SkipSubmit)
glDrawArrays(GL_TRIANGLE_STRIP, patch->m_VBSides->m_Index, (GLsizei)patch->m_VBSides->m_Count);
// bump stats
g_Renderer.m_Stats.m_DrawCalls++;
g_Renderer.m_Stats.m_TerrainTris += m_VBSides->m_Count - 2;
// bump stats
g_Renderer.m_Stats.m_DrawCalls++;
g_Renderer.m_Stats.m_TerrainTris += patch->m_VBSides->m_Count - 2;
}
CVertexBuffer::Unbind();
@ -1220,26 +1219,11 @@ void CPatchRData::BuildWater()
// Number of vertices in each direction in each patch
ENSURE(PATCH_SIZE % water_cell_size == 0);
if (m_VBWater)
{
g_VBMan.Release(m_VBWater);
m_VBWater = nullptr;
}
if (m_VBWaterIndices)
{
g_VBMan.Release(m_VBWaterIndices);
m_VBWaterIndices = nullptr;
}
if (m_VBWaterShore)
{
g_VBMan.Release(m_VBWaterShore);
m_VBWaterShore = nullptr;
}
if (m_VBWaterIndicesShore)
{
g_VBMan.Release(m_VBWaterIndicesShore);
m_VBWaterIndicesShore = nullptr;
}
m_VBWater.Reset();
m_VBWaterIndices.Reset();
m_VBWaterShore.Reset();
m_VBWaterIndicesShore.Reset();
m_WaterBounds.SetEmpty();
// We need to use this to access the water manager or we may not have the
@ -1393,21 +1377,21 @@ void CPatchRData::BuildWater()
// No vertex buffers if no data generated
if (!water_indices.empty())
{
m_VBWater = g_VBMan.Allocate(sizeof(SWaterVertex), water_vertex_data.size(), GL_STATIC_DRAW, GL_ARRAY_BUFFER);
m_VBWater->m_Owner->UpdateChunkVertices(m_VBWater, &water_vertex_data[0]);
m_VBWater = g_VBMan.AllocateChunk(sizeof(SWaterVertex), water_vertex_data.size(), GL_STATIC_DRAW, GL_ARRAY_BUFFER, nullptr, CVertexBufferManager::Group::WATER);
m_VBWater->m_Owner->UpdateChunkVertices(m_VBWater.Get(), &water_vertex_data[0]);
m_VBWaterIndices = g_VBMan.Allocate(sizeof(GLushort), water_indices.size(), GL_STATIC_DRAW, GL_ELEMENT_ARRAY_BUFFER);
m_VBWaterIndices->m_Owner->UpdateChunkVertices(m_VBWaterIndices, &water_indices[0]);
m_VBWaterIndices = g_VBMan.AllocateChunk(sizeof(GLushort), water_indices.size(), GL_STATIC_DRAW, GL_ELEMENT_ARRAY_BUFFER, nullptr, CVertexBufferManager::Group::WATER);
m_VBWaterIndices->m_Owner->UpdateChunkVertices(m_VBWaterIndices.Get(), &water_indices[0]);
}
if (!water_indices_shore.empty())
{
m_VBWaterShore = g_VBMan.Allocate(sizeof(SWaterVertex), water_vertex_data_shore.size(), GL_STATIC_DRAW, GL_ARRAY_BUFFER);
m_VBWaterShore->m_Owner->UpdateChunkVertices(m_VBWaterShore, &water_vertex_data_shore[0]);
m_VBWaterShore = g_VBMan.AllocateChunk(sizeof(SWaterVertex), water_vertex_data_shore.size(), GL_STATIC_DRAW, GL_ARRAY_BUFFER, nullptr, CVertexBufferManager::Group::WATER);
m_VBWaterShore->m_Owner->UpdateChunkVertices(m_VBWaterShore.Get(), &water_vertex_data_shore[0]);
// Construct indices buffer
m_VBWaterIndicesShore = g_VBMan.Allocate(sizeof(GLushort), water_indices_shore.size(), GL_STATIC_DRAW, GL_ELEMENT_ARRAY_BUFFER);
m_VBWaterIndicesShore->m_Owner->UpdateChunkVertices(m_VBWaterIndicesShore, &water_indices_shore[0]);
m_VBWaterIndicesShore = g_VBMan.AllocateChunk(sizeof(GLushort), water_indices_shore.size(), GL_STATIC_DRAW, GL_ELEMENT_ARRAY_BUFFER, nullptr, CVertexBufferManager::Group::WATER);
m_VBWaterIndicesShore->m_Owner->UpdateChunkVertices(m_VBWaterIndicesShore.Get(), &water_indices_shore[0]);
}
}
@ -1423,7 +1407,7 @@ void CPatchRData::RenderWater(CShaderProgramPtr& shader, bool onlyShore, bool fi
glPolygonMode(GL_FRONT_AND_BACK, GL_LINE);
#endif
if (m_VBWater != 0x0 && !onlyShore)
if (m_VBWater && !onlyShore)
{
SWaterVertex *base=(SWaterVertex *)m_VBWater->m_Owner->Bind();
@ -1443,7 +1427,7 @@ void CPatchRData::RenderWater(CShaderProgramPtr& shader, bool onlyShore, bool fi
g_Renderer.m_Stats.m_WaterTris += m_VBWaterIndices->m_Count / 3;
}
if (m_VBWaterShore != 0x0 &&
if (m_VBWaterShore &&
g_Renderer.GetWaterManager()->m_WaterEffects &&
g_Renderer.GetWaterManager()->m_WaterFancyEffects)
{

View File

@ -46,7 +46,6 @@ public:
void Update(CSimulation2* simulation);
void RenderOutline();
void RenderSides(CShaderProgramPtr& shader);
void RenderPriorities(CTextRenderer& textRenderer);
void RenderWater(CShaderProgramPtr& shader, bool onlyShore = false, bool fixedPipeline = false);
@ -60,6 +59,7 @@ public:
static void RenderBlends(
const std::vector<CPatchRData*>& patches, const CShaderDefines& context, ShadowMap* shadow);
static void RenderStreams(const std::vector<CPatchRData*>& patches, const CShaderProgramPtr& shader, int streamflags);
static void RenderSides(const std::vector<CPatchRData*>& patches, const CShaderProgramPtr& shader);
static void PrepareShader(const CShaderProgramPtr& shader, ShadowMap* shadow);
@ -135,19 +135,19 @@ private:
CPatch* m_Patch;
// vertex buffer handle for side vertices
CVertexBuffer::VBChunk* m_VBSides;
CVertexBufferManager::Handle m_VBSides;
// vertex buffer handle for base vertices
CVertexBuffer::VBChunk* m_VBBase;
CVertexBufferManager::Handle m_VBBase;
// vertex buffer handle for base vertex indices
CVertexBuffer::VBChunk* m_VBBaseIndices;
CVertexBufferManager::Handle m_VBBaseIndices;
// vertex buffer handle for blend vertices
CVertexBuffer::VBChunk* m_VBBlends;
CVertexBufferManager::Handle m_VBBlends;
// vertex buffer handle for blend vertex indices
CVertexBuffer::VBChunk* m_VBBlendIndices;
CVertexBufferManager::Handle m_VBBlendIndices;
// list of base splats to apply to this patch
std::vector<SSplat> m_Splats;
@ -159,12 +159,12 @@ private:
CBoundingBoxAligned m_WaterBounds;
// Water vertex buffer
CVertexBuffer::VBChunk* m_VBWater;
CVertexBuffer::VBChunk* m_VBWaterShore;
CVertexBufferManager::Handle m_VBWater;
CVertexBufferManager::Handle m_VBWaterShore;
// Water indices buffer
CVertexBuffer::VBChunk* m_VBWaterIndices;
CVertexBuffer::VBChunk* m_VBWaterIndicesShore;
CVertexBufferManager::Handle m_VBWaterIndices;
CVertexBufferManager::Handle m_VBWaterIndicesShore;
CSimulation2* m_Simulation;

View File

@ -292,8 +292,7 @@ void TerrainRenderer::RenderTerrainShader(const CShaderDefines& context, int cul
shaderSolid->Uniform(str_color, 0.0f, 0.0f, 0.0f, 1.0f);
PROFILE_START("render terrain sides");
for (size_t i = 0; i < visiblePatches.size(); ++i)
visiblePatches[i]->RenderSides(shaderSolid);
CPatchRData::RenderSides(visiblePatches, shaderSolid);
PROFILE_END("render terrain sides");
techSolid->EndPass();

View File

@ -32,12 +32,17 @@
// Make it large enough for the maximum feasible mesh size (64K vertexes,
// 64 bytes per vertex in InstancingModelRenderer).
// TODO: measure what influence this has on performance
#define MAX_VB_SIZE_BYTES (4*1024*1024)
constexpr std::size_t MAX_VB_SIZE_BYTES = 4 * 1024 * 1024;
CVertexBuffer::CVertexBuffer(size_t vertexSize, GLenum usage, GLenum target)
: CVertexBuffer(vertexSize, usage, target, MAX_VB_SIZE_BYTES)
{
}
CVertexBuffer::CVertexBuffer(size_t vertexSize, GLenum usage, GLenum target, size_t maximumBufferSize)
: m_VertexSize(vertexSize), m_Handle(0), m_SysMem(0), m_Usage(usage), m_Target(target), m_HasNeededChunks(false)
{
size_t size = MAX_VB_SIZE_BYTES;
size_t size = maximumBufferSize;
if (target == GL_ARRAY_BUFFER) // vertex data buffer
{
@ -348,7 +353,7 @@ size_t CVertexBuffer::GetBytesAllocated() const
return (m_MaxVertices - m_FreeVertices) * m_VertexSize;
}
void CVertexBuffer::DumpStatus()
void CVertexBuffer::DumpStatus() const
{
debug_printf("freeverts = %d\n", static_cast<int>(m_FreeVertices));

View File

@ -88,6 +88,7 @@ public:
public:
// constructor, destructor
CVertexBuffer(size_t vertexSize, GLenum usage, GLenum target);
CVertexBuffer(size_t vertexSize, GLenum usage, GLenum target, size_t maximumBufferSize);
~CVertexBuffer();
/// Bind to this buffer; return pointer to address required as parameter
@ -113,7 +114,7 @@ public:
/// Returns true if this vertex buffer is compatible with the specified vertex type and intended usage.
bool CompatibleVertexType(size_t vertexSize, GLenum usage, GLenum target) const;
void DumpStatus();
void DumpStatus() const;
/**
* Given the usage flags of a buffer that has been (or will be) allocated:

View File

@ -1,4 +1,4 @@
/* Copyright (C) 2017 Wildfire Games.
/* Copyright (C) 2021 Wildfire Games.
* This file is part of 0 A.D.
*
* 0 A.D. is free software: you can redistribute it and/or modify
@ -30,26 +30,68 @@
CVertexBufferManager g_VBMan;
///////////////////////////////////////////////////////////////////////////////
CVertexBufferManager::Handle::Handle(Handle&& other)
: m_Chunk(other.m_Chunk)
{
other.m_Chunk = nullptr;
}
CVertexBufferManager::Handle& CVertexBufferManager::Handle::operator=(Handle&& other)
{
if (&other == this)
return *this;
if (IsValid())
Reset();
Handle tmp(std::move(other));
swap(*this, tmp);
return *this;
}
CVertexBufferManager::Handle::Handle(CVertexBuffer::VBChunk* chunk)
: m_Chunk(chunk)
{
}
void CVertexBufferManager::Handle::Reset()
{
if (!IsValid())
return;
g_VBMan.Release(m_Chunk);
m_Chunk = nullptr;
}
// Explicit shutdown of the vertex buffer subsystem.
// This avoids the ordering issues that arise when using destructors of
// global instances.
void CVertexBufferManager::Shutdown()
{
typedef std::list<CVertexBuffer*>::iterator Iter;
for (Iter iter = m_Buffers.begin(); iter != m_Buffers.end(); ++iter)
delete *iter;
m_Buffers.clear();
for (int group = static_cast<int>(Group::DEFAULT); group < static_cast<int>(Group::COUNT); ++group)
m_Buffers[group].clear();
}
CVertexBuffer::VBChunk* CVertexBufferManager::Allocate(size_t vertexSize, size_t numVertices, GLenum usage, GLenum target, void* backingStore)
{
return AllocateImpl(vertexSize, numVertices, usage, target, backingStore, Group::DEFAULT);
}
CVertexBufferManager::Handle CVertexBufferManager::AllocateChunk(size_t vertexSize, size_t numVertices, GLenum usage, GLenum target, void* backingStore, Group group)
{
CVertexBuffer::VBChunk* chunk = AllocateImpl(vertexSize, numVertices, usage, target, backingStore, group);
if (!chunk)
return Handle();
return Handle(chunk);
}
///////////////////////////////////////////////////////////////////////////////
// Allocate: try to allocate a buffer of given number of vertices (each of
// given size), with the given type, and using the given texture - return null
// if no free chunks available
CVertexBuffer::VBChunk* CVertexBufferManager::Allocate(size_t vertexSize, size_t numVertices, GLenum usage, GLenum target, void* backingStore)
CVertexBuffer::VBChunk* CVertexBufferManager::AllocateImpl(size_t vertexSize, size_t numVertices, GLenum usage, GLenum target, void* backingStore, Group group)
{
CVertexBuffer::VBChunk* result=0;
CVertexBuffer::VBChunk* result = nullptr;
ENSURE(usage == GL_STREAM_DRAW || usage == GL_STATIC_DRAW || usage == GL_DYNAMIC_DRAW);
@ -60,15 +102,15 @@ CVertexBuffer::VBChunk* CVertexBufferManager::Allocate(size_t vertexSize, size_t
// TODO, RC - run some sanity checks on allocation request
typedef std::list<CVertexBuffer*>::iterator Iter;
std::list<std::unique_ptr<CVertexBuffer>>& buffers = m_Buffers[static_cast<int>(group)];
#if DUMP_VB_STATS
debug_printf("\n============================\n# allocate vsize=%zu nverts=%zu\n\n", vertexSize, numVertices);
for (Iter iter = m_Buffers.begin(); iter != m_Buffers.end(); ++iter) {
CVertexBuffer* buffer = *iter;
for (const std::unique_ptr<CVertexBuffer>& buffer : buffers)
{
if (buffer->CompatibleVertexType(vertexSize, usage, target))
{
debug_printf("%p\n", buffer);
debug_printf("%p\n", buffer.get());
buffer->DumpStatus();
}
}
@ -76,17 +118,20 @@ CVertexBuffer::VBChunk* CVertexBufferManager::Allocate(size_t vertexSize, size_t
// iterate through all existing buffers testing for one that'll
// satisfy the allocation
for (Iter iter = m_Buffers.begin(); iter != m_Buffers.end(); ++iter) {
CVertexBuffer* buffer = *iter;
for (const std::unique_ptr<CVertexBuffer>& buffer : buffers)
{
result = buffer->Allocate(vertexSize, numVertices, usage, target, backingStore);
if (result)
return result;
}
// got this far; need to allocate a new buffer
CVertexBuffer* buffer = new CVertexBuffer(vertexSize, usage, target);
m_Buffers.push_front(buffer);
result = buffer->Allocate(vertexSize, numVertices, usage, target, backingStore);
buffers.emplace_back(
group == Group::DEFAULT
? std::make_unique<CVertexBuffer>(vertexSize, usage, target)
// Reduces the buffer size for not so frequent buffers.
: std::make_unique<CVertexBuffer>(vertexSize, usage, target, 1024 * 1024));
result = buffers.back()->Allocate(vertexSize, numVertices, usage, target, backingStore);
if (!result)
{
@ -96,8 +141,6 @@ CVertexBuffer::VBChunk* CVertexBufferManager::Allocate(size_t vertexSize, size_t
return result;
}
///////////////////////////////////////////////////////////////////////////////
// Release: return given chunk to its owner
void CVertexBufferManager::Release(CVertexBuffer::VBChunk* chunk)
{
ENSURE(chunk);
@ -107,25 +150,20 @@ void CVertexBufferManager::Release(CVertexBuffer::VBChunk* chunk)
chunk->m_Owner->Release(chunk);
}
size_t CVertexBufferManager::GetBytesReserved()
size_t CVertexBufferManager::GetBytesReserved() const
{
size_t total = 0;
typedef std::list<CVertexBuffer*>::iterator Iter;
for (Iter iter = m_Buffers.begin(); iter != m_Buffers.end(); ++iter)
total += (*iter)->GetBytesReserved();
for (int group = static_cast<int>(Group::DEFAULT); group < static_cast<int>(Group::COUNT); ++group)
for (const std::unique_ptr<CVertexBuffer>& buffer : m_Buffers[static_cast<int>(group)])
total += buffer->GetBytesReserved();
return total;
}
size_t CVertexBufferManager::GetBytesAllocated()
size_t CVertexBufferManager::GetBytesAllocated() const
{
size_t total = 0;
typedef std::list<CVertexBuffer*>::iterator Iter;
for (Iter iter = m_Buffers.begin(); iter != m_Buffers.end(); ++iter)
total += (*iter)->GetBytesAllocated();
for (int group = static_cast<int>(Group::DEFAULT); group < static_cast<int>(Group::COUNT); ++group)
for (const std::unique_ptr<CVertexBuffer>& buffer : m_Buffers[static_cast<int>(group)])
total += buffer->GetBytesAllocated();
return total;
}

View File

@ -1,4 +1,4 @@
/* Copyright (C) 2012 Wildfire Games.
/* Copyright (C) 2021 Wildfire Games.
* This file is part of 0 A.D.
*
* 0 A.D. is free software: you can redistribute it and/or modify
@ -22,14 +22,58 @@
#ifndef INCLUDED_VERTEXBUFFERMANAGER
#define INCLUDED_VERTEXBUFFERMANAGER
#include "VertexBuffer.h"
#include "lib/types.h"
#include "renderer/VertexBuffer.h"
#include <memory>
#include <utility>
///////////////////////////////////////////////////////////////////////////////
// CVertexBufferManager: owner object for CVertexBuffer objects; acts as
// 'front end' for their allocation and destruction
class CVertexBufferManager
{
public:
enum class Group : u32
{
DEFAULT,
TERRAIN,
WATER,
COUNT
};
// Helper for automatic VBChunk lifetime management.
class Handle
{
public:
Handle() = default;
Handle(const Handle&) = delete;
Handle& operator=(const Handle&) = delete;
explicit Handle(CVertexBuffer::VBChunk* chunk);
Handle(Handle&& other);
Handle& operator=(Handle&& other);
~Handle() { Reset(); }
bool IsValid() const { return m_Chunk != nullptr; }
explicit operator bool() const { return IsValid(); }
bool operator!() const { return !static_cast<bool>(*this); }
void Reset();
friend void swap(Handle& lhs, Handle& rhs)
{
using std::swap;
swap(lhs.m_Chunk, rhs.m_Chunk);
}
CVertexBuffer::VBChunk& operator*() const { return *m_Chunk; }
CVertexBuffer::VBChunk* operator->() const { return m_Chunk; }
CVertexBuffer::VBChunk* Get() const { return m_Chunk; }
private:
CVertexBuffer::VBChunk* m_Chunk = nullptr;
};
/**
* Try to allocate a vertex buffer of the given size and type.
@ -43,23 +87,26 @@ public:
* lifetime of the VBChunk
* @return chunk, or NULL if no free chunks available
*/
CVertexBuffer::VBChunk* Allocate(size_t vertexSize, size_t numVertices, GLenum usage, GLenum target, void* backingStore = NULL);
CVertexBuffer::VBChunk* Allocate(size_t vertexSize, size_t numVertices, GLenum usage, GLenum target, void* backingStore = nullptr);
/// Returns the given @p chunk to its owning buffer
void Release(CVertexBuffer::VBChunk* chunk);
/// Returns a list of all buffers
const std::list<CVertexBuffer*>& GetBufferList() const { return m_Buffers; }
// Same as the Allocate function but returns Handle. Should be used instead
// of the Allocate.
Handle AllocateChunk(size_t vertexSize, size_t numVertices, GLenum usage, GLenum target, void* backingStore = nullptr, Group group = Group::DEFAULT);
size_t GetBytesReserved();
size_t GetBytesAllocated();
size_t GetBytesReserved() const;
size_t GetBytesAllocated() const;
/// Explicit shutdown of the vertex buffer subsystem; releases all currently-allocated buffers.
void Shutdown();
private:
CVertexBuffer::VBChunk* AllocateImpl(size_t vertexSize, size_t numVertices, GLenum usage, GLenum target, void* backingStore = nullptr, Group group = Group::DEFAULT);
/// List of all known vertex buffers
std::list<CVertexBuffer*> m_Buffers;
std::list<std::unique_ptr<CVertexBuffer>> m_Buffers[static_cast<std::size_t>(Group::COUNT)];
};
extern CVertexBufferManager g_VBMan;