2021-02-05 00:26:37 +01:00
|
|
|
/* Copyright (C) 2021 Wildfire Games.
|
2009-04-18 19:00:33 +02:00
|
|
|
* This file is part of 0 A.D.
|
|
|
|
*
|
|
|
|
* 0 A.D. is free software: you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License as published by
|
|
|
|
* the Free Software Foundation, either version 2 of the License, or
|
|
|
|
* (at your option) any later version.
|
|
|
|
*
|
|
|
|
* 0 A.D. is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with 0 A.D. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
*/
|
|
|
|
|
2004-06-11 00:24:03 +02:00
|
|
|
#include "precompiled.h"
|
2021-02-05 00:26:37 +01:00
|
|
|
|
|
|
|
#include "VertexBuffer.h"
|
|
|
|
|
2006-06-02 04:10:27 +02:00
|
|
|
#include "lib/ogl.h"
|
2010-01-09 20:20:14 +01:00
|
|
|
#include "lib/sysdep/cpu.h"
|
2004-06-11 00:24:03 +02:00
|
|
|
#include "Renderer.h"
|
2004-07-21 13:50:23 +02:00
|
|
|
#include "ps/CLogger.h"
|
2021-02-05 00:26:37 +01:00
|
|
|
#include "ps/Errors.h"
|
|
|
|
|
|
|
|
#include <algorithm>
|
|
|
|
#include <iterator>
|
2004-07-21 13:50:23 +02:00
|
|
|
|
2015-01-28 01:48:00 +01:00
|
|
|
// Absolute maximum (bytewise) size of each GL vertex buffer object.
|
|
|
|
// Make it large enough for the maximum feasible mesh size (64K vertexes,
|
2015-02-09 00:39:02 +01:00
|
|
|
// 64 bytes per vertex in InstancingModelRenderer).
|
2015-01-28 01:48:00 +01:00
|
|
|
// TODO: measure what influence this has on performance
|
2021-02-08 23:34:10 +01:00
|
|
|
constexpr std::size_t MAX_VB_SIZE_BYTES = 4 * 1024 * 1024;
|
2015-01-28 01:48:00 +01:00
|
|
|
|
2011-03-13 19:58:09 +01:00
|
|
|
CVertexBuffer::CVertexBuffer(size_t vertexSize, GLenum usage, GLenum target)
|
2021-02-08 23:34:10 +01:00
|
|
|
: CVertexBuffer(vertexSize, usage, target, MAX_VB_SIZE_BYTES)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
CVertexBuffer::CVertexBuffer(size_t vertexSize, GLenum usage, GLenum target, size_t maximumBufferSize)
|
2021-06-04 00:06:59 +02:00
|
|
|
: m_VertexSize(vertexSize), m_Handle(0), m_Usage(usage), m_Target(target), m_HasNeededChunks(false)
|
2004-06-11 00:24:03 +02:00
|
|
|
{
|
2021-02-08 23:34:10 +01:00
|
|
|
size_t size = maximumBufferSize;
|
2004-06-11 00:24:03 +02:00
|
|
|
|
2012-04-22 06:04:02 +02:00
|
|
|
if (target == GL_ARRAY_BUFFER) // vertex data buffer
|
2011-03-13 20:06:33 +01:00
|
|
|
{
|
|
|
|
// We want to store 16-bit indices to any vertex in a buffer, so the
|
2016-11-23 14:02:58 +01:00
|
|
|
// buffer must never be bigger than vertexSize*64K bytes since we can
|
2012-04-22 06:04:02 +02:00
|
|
|
// address at most 64K of them with 16-bit indices
|
2011-03-13 20:06:33 +01:00
|
|
|
size = std::min(size, vertexSize*65536);
|
|
|
|
}
|
|
|
|
|
2015-01-28 01:48:00 +01:00
|
|
|
// store max/free vertex counts
|
|
|
|
m_MaxVertices = m_FreeVertices = size / vertexSize;
|
|
|
|
|
2004-06-11 00:24:03 +02:00
|
|
|
// allocate raw buffer
|
2021-06-04 00:06:59 +02:00
|
|
|
pglGenBuffersARB(1, &m_Handle);
|
|
|
|
pglBindBufferARB(m_Target, m_Handle);
|
|
|
|
pglBufferDataARB(m_Target, m_MaxVertices * m_VertexSize, 0, m_Usage);
|
|
|
|
pglBindBufferARB(m_Target, 0);
|
2004-06-11 00:24:03 +02:00
|
|
|
|
|
|
|
// create sole free chunk
|
2012-04-22 06:04:02 +02:00
|
|
|
VBChunk* chunk = new VBChunk;
|
|
|
|
chunk->m_Owner = this;
|
|
|
|
chunk->m_Count = m_FreeVertices;
|
|
|
|
chunk->m_Index = 0;
|
2021-02-05 00:26:37 +01:00
|
|
|
m_FreeList.emplace_back(chunk);
|
2004-06-11 00:24:03 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
CVertexBuffer::~CVertexBuffer()
|
|
|
|
{
|
2015-01-28 01:48:00 +01:00
|
|
|
// Must have released all chunks before destroying the buffer
|
|
|
|
ENSURE(m_AllocList.empty());
|
|
|
|
|
2011-03-13 19:58:09 +01:00
|
|
|
if (m_Handle)
|
|
|
|
pglDeleteBuffersARB(1, &m_Handle);
|
2011-03-13 20:06:33 +01:00
|
|
|
|
2021-02-05 00:26:37 +01:00
|
|
|
for (VBChunk* const& chunk : m_FreeList)
|
|
|
|
delete chunk;
|
2004-06-11 00:24:03 +02:00
|
|
|
}
|
|
|
|
|
2011-07-30 02:56:45 +02:00
|
|
|
|
2021-02-05 00:48:30 +01:00
|
|
|
bool CVertexBuffer::CompatibleVertexType(size_t vertexSize, GLenum usage, GLenum target) const
|
2011-07-30 02:56:45 +02:00
|
|
|
{
|
2021-02-05 00:26:37 +01:00
|
|
|
return usage == m_Usage && target == m_Target && vertexSize == m_VertexSize;
|
2011-07-30 02:56:45 +02:00
|
|
|
}
|
|
|
|
|
2004-06-11 00:24:03 +02:00
|
|
|
///////////////////////////////////////////////////////////////////////////////
|
2016-11-23 14:02:58 +01:00
|
|
|
// Allocate: try to allocate a buffer of given number of vertices (each of
|
|
|
|
// given size), with the given type, and using the given texture - return null
|
2004-06-11 00:24:03 +02:00
|
|
|
// if no free chunks available
|
2015-01-28 01:48:00 +01:00
|
|
|
CVertexBuffer::VBChunk* CVertexBuffer::Allocate(size_t vertexSize, size_t numVertices, GLenum usage, GLenum target, void* backingStore)
|
2004-06-11 00:24:03 +02:00
|
|
|
{
|
|
|
|
// check this is the right kind of buffer
|
2011-07-30 02:56:45 +02:00
|
|
|
if (!CompatibleVertexType(vertexSize, usage, target))
|
2021-02-05 00:48:30 +01:00
|
|
|
return nullptr;
|
2004-06-11 00:24:03 +02:00
|
|
|
|
2015-01-28 01:48:00 +01:00
|
|
|
if (UseStreaming(usage))
|
2021-02-05 00:48:30 +01:00
|
|
|
ENSURE(backingStore != nullptr);
|
2015-01-28 01:48:00 +01:00
|
|
|
|
2004-06-11 00:24:03 +02:00
|
|
|
// quick check there's enough vertices spare to allocate
|
2011-03-13 19:58:09 +01:00
|
|
|
if (numVertices > m_FreeVertices)
|
2021-02-05 00:48:30 +01:00
|
|
|
return nullptr;
|
2004-06-11 00:24:03 +02:00
|
|
|
|
|
|
|
// trawl free list looking for first free chunk with enough space
|
2021-02-05 00:26:37 +01:00
|
|
|
std::vector<VBChunk*>::iterator best_iter = m_FreeList.end();
|
|
|
|
for (std::vector<VBChunk*>::iterator iter = m_FreeList.begin(); iter != m_FreeList.end(); ++iter)
|
|
|
|
{
|
|
|
|
if (numVertices == (*iter)->m_Count)
|
|
|
|
{
|
|
|
|
best_iter = iter;
|
2004-06-11 00:24:03 +02:00
|
|
|
break;
|
|
|
|
}
|
2021-02-05 00:26:37 +01:00
|
|
|
else if (numVertices < (*iter)->m_Count && (best_iter == m_FreeList.end() || (*best_iter)->m_Count < (*iter)->m_Count))
|
|
|
|
best_iter = iter;
|
2004-06-11 00:24:03 +02:00
|
|
|
}
|
|
|
|
|
2021-02-05 00:26:37 +01:00
|
|
|
// We could not find a large enough chunk.
|
|
|
|
if (best_iter == m_FreeList.end())
|
|
|
|
return nullptr;
|
|
|
|
|
|
|
|
VBChunk* chunk = *best_iter;
|
|
|
|
m_FreeList.erase(best_iter);
|
|
|
|
m_FreeVertices -= chunk->m_Count;
|
2004-06-11 00:24:03 +02:00
|
|
|
|
2015-01-28 01:48:00 +01:00
|
|
|
chunk->m_BackingStore = backingStore;
|
|
|
|
chunk->m_Dirty = false;
|
|
|
|
chunk->m_Needed = false;
|
|
|
|
|
2016-11-23 14:02:58 +01:00
|
|
|
// split chunk into two; - allocate a new chunk using all unused vertices in the
|
2004-06-11 00:24:03 +02:00
|
|
|
// found chunk, and add it to the free list
|
2011-03-13 19:58:09 +01:00
|
|
|
if (chunk->m_Count > numVertices)
|
|
|
|
{
|
|
|
|
VBChunk* newchunk = new VBChunk;
|
|
|
|
newchunk->m_Owner = this;
|
|
|
|
newchunk->m_Count = chunk->m_Count - numVertices;
|
|
|
|
newchunk->m_Index = chunk->m_Index + numVertices;
|
2021-02-05 00:26:37 +01:00
|
|
|
m_FreeList.emplace_back(newchunk);
|
2011-03-13 19:58:09 +01:00
|
|
|
m_FreeVertices += newchunk->m_Count;
|
2004-07-21 13:50:23 +02:00
|
|
|
|
2011-03-13 19:58:09 +01:00
|
|
|
// resize given chunk
|
|
|
|
chunk->m_Count = numVertices;
|
2004-07-21 13:50:23 +02:00
|
|
|
}
|
2016-11-23 15:09:58 +01:00
|
|
|
|
2004-06-11 00:24:03 +02:00
|
|
|
// return found chunk
|
2015-01-28 01:48:00 +01:00
|
|
|
m_AllocList.push_back(chunk);
|
2004-06-11 00:24:03 +02:00
|
|
|
return chunk;
|
|
|
|
}
|
|
|
|
|
|
|
|
///////////////////////////////////////////////////////////////////////////////
|
|
|
|
// Release: return given chunk to this buffer
|
|
|
|
void CVertexBuffer::Release(VBChunk* chunk)
|
|
|
|
{
|
2011-07-30 02:56:45 +02:00
|
|
|
// Update total free count before potentially modifying this chunk's count
|
2011-03-13 19:58:09 +01:00
|
|
|
m_FreeVertices += chunk->m_Count;
|
2011-07-30 02:56:45 +02:00
|
|
|
|
2021-02-05 00:26:37 +01:00
|
|
|
m_AllocList.erase(std::find(m_AllocList.begin(), m_AllocList.end(), chunk));
|
2015-01-28 01:48:00 +01:00
|
|
|
|
2021-02-05 00:48:30 +01:00
|
|
|
// Sorting O(nlogn) shouldn't be too far from O(n) by performance, because
|
|
|
|
// the container is partly sorted already.
|
|
|
|
std::sort(
|
|
|
|
m_FreeList.begin(), m_FreeList.end(),
|
|
|
|
[](const VBChunk* chunk1, const VBChunk* chunk2) -> bool
|
|
|
|
{
|
|
|
|
return chunk1->m_Index < chunk2->m_Index;
|
|
|
|
});
|
2011-07-30 02:56:45 +02:00
|
|
|
|
|
|
|
// Coalesce with any free-list items that are adjacent to this chunk;
|
|
|
|
// merge the found chunk with the new one, and remove the old one
|
2021-02-05 00:48:30 +01:00
|
|
|
// from the list.
|
|
|
|
for (std::vector<VBChunk*>::iterator iter = m_FreeList.begin(); iter != m_FreeList.end();)
|
2011-07-30 02:56:45 +02:00
|
|
|
{
|
2021-02-05 00:48:30 +01:00
|
|
|
if ((*iter)->m_Index == chunk->m_Index + chunk->m_Count
|
|
|
|
|| (*iter)->m_Index + (*iter)->m_Count == chunk->m_Index)
|
2011-07-30 02:56:45 +02:00
|
|
|
{
|
2021-02-05 00:48:30 +01:00
|
|
|
chunk->m_Index = std::min(chunk->m_Index, (*iter)->m_Index);
|
|
|
|
chunk->m_Count += (*iter)->m_Count;
|
|
|
|
delete *iter;
|
|
|
|
iter = m_FreeList.erase(iter);
|
|
|
|
if (!m_FreeList.empty() && iter != m_FreeList.begin())
|
|
|
|
iter = std::prev(iter);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
++iter;
|
2011-07-30 02:56:45 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-02-05 00:26:37 +01:00
|
|
|
m_FreeList.emplace_back(chunk);
|
2004-06-11 00:24:03 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
///////////////////////////////////////////////////////////////////////////////
|
|
|
|
// UpdateChunkVertices: update vertex data for given chunk
|
2012-04-22 06:04:02 +02:00
|
|
|
void CVertexBuffer::UpdateChunkVertices(VBChunk* chunk, void* data)
|
2004-06-11 00:24:03 +02:00
|
|
|
{
|
2021-06-04 00:06:59 +02:00
|
|
|
ENSURE(m_Handle);
|
|
|
|
if (UseStreaming(m_Usage))
|
2011-03-13 19:58:09 +01:00
|
|
|
{
|
2021-06-04 00:06:59 +02:00
|
|
|
// The VBO is now out of sync with the backing store
|
|
|
|
chunk->m_Dirty = true;
|
2015-01-28 01:48:00 +01:00
|
|
|
|
2021-06-04 00:06:59 +02:00
|
|
|
// Sanity check: Make sure the caller hasn't tried to reallocate
|
|
|
|
// their backing store
|
|
|
|
ENSURE(data == chunk->m_BackingStore);
|
2011-03-13 19:58:09 +01:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2021-06-04 00:06:59 +02:00
|
|
|
pglBindBufferARB(m_Target, m_Handle);
|
|
|
|
pglBufferSubDataARB(m_Target, chunk->m_Index * m_VertexSize, chunk->m_Count * m_VertexSize, data);
|
|
|
|
pglBindBufferARB(m_Target, 0);
|
2004-06-11 00:24:03 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
///////////////////////////////////////////////////////////////////////////////
|
|
|
|
// Bind: bind to this buffer; return pointer to address required as parameter
|
|
|
|
// to glVertexPointer ( + etc) calls
|
|
|
|
u8* CVertexBuffer::Bind()
|
|
|
|
{
|
2015-01-28 01:48:00 +01:00
|
|
|
pglBindBufferARB(m_Target, m_Handle);
|
|
|
|
|
|
|
|
if (UseStreaming(m_Usage))
|
|
|
|
{
|
2021-02-05 00:26:37 +01:00
|
|
|
if (!m_HasNeededChunks)
|
|
|
|
return nullptr;
|
|
|
|
|
2015-01-28 01:48:00 +01:00
|
|
|
// If any chunks are out of sync with the current VBO, and are
|
|
|
|
// needed for rendering this frame, we'll need to re-upload the VBO
|
|
|
|
bool needUpload = false;
|
2016-01-13 01:42:55 +01:00
|
|
|
for (VBChunk* const& chunk : m_AllocList)
|
2015-01-28 01:48:00 +01:00
|
|
|
{
|
|
|
|
if (chunk->m_Dirty && chunk->m_Needed)
|
|
|
|
{
|
|
|
|
needUpload = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (needUpload)
|
|
|
|
{
|
|
|
|
// Tell the driver that it can reallocate the whole VBO
|
|
|
|
pglBufferDataARB(m_Target, m_MaxVertices * m_VertexSize, NULL, m_Usage);
|
|
|
|
|
|
|
|
// (In theory, glMapBufferRange with GL_MAP_INVALIDATE_BUFFER_BIT could be used
|
|
|
|
// here instead of glBufferData(..., NULL, ...) plus glMapBuffer(), but with
|
|
|
|
// current Intel Windows GPU drivers (as of 2015-01) it's much faster if you do
|
|
|
|
// the explicit glBufferData.)
|
|
|
|
|
|
|
|
while (true)
|
|
|
|
{
|
|
|
|
void* p = pglMapBufferARB(m_Target, GL_WRITE_ONLY);
|
|
|
|
if (p == NULL)
|
|
|
|
{
|
|
|
|
// This shouldn't happen unless we run out of virtual address space
|
|
|
|
LOGERROR("glMapBuffer failed");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifndef NDEBUG
|
|
|
|
// To help detect bugs where PrepareForRendering() was not called,
|
|
|
|
// force all not-needed data to 0, so things won't get rendered
|
|
|
|
// with undefined (but possibly still correct-looking) data.
|
|
|
|
memset(p, 0, m_MaxVertices * m_VertexSize);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
// Copy only the chunks we need. (This condition is helpful when
|
|
|
|
// the VBO contains data for every unit in the world, but only a
|
|
|
|
// handful are visible on screen and we don't need to bother copying
|
|
|
|
// the rest.)
|
2016-01-13 01:42:55 +01:00
|
|
|
for (VBChunk* const& chunk : m_AllocList)
|
2015-01-28 01:48:00 +01:00
|
|
|
if (chunk->m_Needed)
|
|
|
|
memcpy((u8 *)p + chunk->m_Index * m_VertexSize, chunk->m_BackingStore, chunk->m_Count * m_VertexSize);
|
|
|
|
|
|
|
|
if (pglUnmapBufferARB(m_Target) == GL_TRUE)
|
|
|
|
break;
|
|
|
|
|
|
|
|
// Unmap might fail on e.g. resolution switches, so just try again
|
|
|
|
// and hope it will eventually succeed
|
2015-04-12 21:38:31 +02:00
|
|
|
debug_printf("glUnmapBuffer failed, trying again...\n");
|
2015-01-28 01:48:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Anything we just uploaded is clean; anything else is dirty
|
|
|
|
// since the rest of the VBO content is now undefined
|
2016-01-13 01:42:55 +01:00
|
|
|
for (VBChunk* const& chunk : m_AllocList)
|
2015-01-28 01:48:00 +01:00
|
|
|
{
|
|
|
|
if (chunk->m_Needed)
|
2021-02-05 00:26:37 +01:00
|
|
|
{
|
2015-01-28 01:48:00 +01:00
|
|
|
chunk->m_Dirty = false;
|
2021-02-05 00:26:37 +01:00
|
|
|
chunk->m_Needed = false;
|
|
|
|
}
|
2015-01-28 01:48:00 +01:00
|
|
|
else
|
|
|
|
chunk->m_Dirty = true;
|
|
|
|
}
|
|
|
|
}
|
2021-02-05 00:26:37 +01:00
|
|
|
else
|
|
|
|
{
|
|
|
|
// Reset the flags for the next phase.
|
|
|
|
for (VBChunk* const& chunk : m_AllocList)
|
|
|
|
chunk->m_Needed = false;
|
|
|
|
}
|
2015-01-28 01:48:00 +01:00
|
|
|
|
2021-02-05 00:26:37 +01:00
|
|
|
m_HasNeededChunks = false;
|
2004-06-11 00:24:03 +02:00
|
|
|
}
|
2015-01-28 01:48:00 +01:00
|
|
|
|
2021-02-05 00:48:30 +01:00
|
|
|
return nullptr;
|
2004-06-11 04:14:18 +02:00
|
|
|
}
|
2010-05-30 15:42:56 +02:00
|
|
|
|
|
|
|
void CVertexBuffer::Unbind()
|
|
|
|
{
|
2021-06-04 00:06:59 +02:00
|
|
|
pglBindBufferARB(GL_ARRAY_BUFFER, 0);
|
|
|
|
pglBindBufferARB(GL_ELEMENT_ARRAY_BUFFER, 0);
|
2010-05-30 15:42:56 +02:00
|
|
|
}
|
2011-03-13 19:58:09 +01:00
|
|
|
|
|
|
|
size_t CVertexBuffer::GetBytesReserved() const
|
|
|
|
{
|
|
|
|
return MAX_VB_SIZE_BYTES;
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t CVertexBuffer::GetBytesAllocated() const
|
|
|
|
{
|
|
|
|
return (m_MaxVertices - m_FreeVertices) * m_VertexSize;
|
|
|
|
}
|
2011-07-30 02:56:45 +02:00
|
|
|
|
2021-02-08 23:34:10 +01:00
|
|
|
void CVertexBuffer::DumpStatus() const
|
2011-07-30 02:56:45 +02:00
|
|
|
{
|
2021-02-05 00:26:37 +01:00
|
|
|
debug_printf("freeverts = %d\n", static_cast<int>(m_FreeVertices));
|
2011-07-30 02:56:45 +02:00
|
|
|
|
|
|
|
size_t maxSize = 0;
|
2021-02-05 00:26:37 +01:00
|
|
|
for (VBChunk* const& chunk : m_FreeList)
|
2011-07-30 02:56:45 +02:00
|
|
|
{
|
2021-02-05 00:26:37 +01:00
|
|
|
debug_printf("free chunk %p: size=%d\n", static_cast<void *>(chunk), static_cast<int>(chunk->m_Count));
|
|
|
|
maxSize = std::max(chunk->m_Count, maxSize);
|
2011-07-30 02:56:45 +02:00
|
|
|
}
|
2021-02-05 00:26:37 +01:00
|
|
|
debug_printf("max size = %d\n", static_cast<int>(maxSize));
|
2011-07-30 02:56:45 +02:00
|
|
|
}
|
2015-01-28 01:48:00 +01:00
|
|
|
|
|
|
|
bool CVertexBuffer::UseStreaming(GLenum usage)
|
|
|
|
{
|
2021-02-05 00:48:30 +01:00
|
|
|
return usage == GL_DYNAMIC_DRAW || usage == GL_STREAM_DRAW;
|
2015-02-09 00:39:02 +01:00
|
|
|
}
|
2021-02-05 00:26:37 +01:00
|
|
|
|
|
|
|
void CVertexBuffer::PrepareForRendering(VBChunk* chunk)
|
|
|
|
{
|
|
|
|
chunk->m_Needed = true;
|
|
|
|
m_HasNeededChunks = true;
|
|
|
|
}
|