replace old pool_allocator/RawPoolAllocator with ProxyAllocator that draws upon the new-style Arena (more efficient, avoids slow VirtualAlloc)
This was SVN commit r10410.
This commit is contained in:
parent
1c081135ad
commit
8af8326563
@ -92,4 +92,96 @@ struct Allocator_AddressSpace
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* fully STL-compatible allocator that simply draws upon another Allocator.
|
||||
* this allows a single allocator to serve multiple STL containers.
|
||||
*/
|
||||
template<typename T, class Allocator>
|
||||
class ProxyAllocator
|
||||
{
|
||||
public:
|
||||
typedef T value_type;
|
||||
typedef T* pointer;
|
||||
typedef const T* const_pointer;
|
||||
typedef T& reference;
|
||||
typedef const T& const_reference;
|
||||
typedef std::size_t size_type;
|
||||
typedef std::ptrdiff_t difference_type;
|
||||
|
||||
template<class U>
|
||||
struct rebind
|
||||
{
|
||||
typedef ProxyAllocator<U, Allocator> other;
|
||||
};
|
||||
|
||||
explicit NOTHROW_DEFINE ProxyAllocator(Allocator& allocator)
|
||||
: allocator(&allocator)
|
||||
{
|
||||
}
|
||||
|
||||
template<typename U, class A>
|
||||
NOTHROW_DEFINE ProxyAllocator(const ProxyAllocator<U,A>& rhs)
|
||||
: allocator(rhs.allocator)
|
||||
{
|
||||
}
|
||||
|
||||
// (required by VC2010 std::vector)
|
||||
bool operator==(const ProxyAllocator& rhs) const
|
||||
{
|
||||
return allocator == rhs.allocator;
|
||||
}
|
||||
bool operator!=(const ProxyAllocator& rhs) const
|
||||
{
|
||||
return !operator==(rhs);
|
||||
}
|
||||
|
||||
pointer address(reference r)
|
||||
{
|
||||
return &r;
|
||||
}
|
||||
|
||||
const_pointer address(const_reference s)
|
||||
{
|
||||
return &s;
|
||||
}
|
||||
|
||||
size_type max_size() const throw ()
|
||||
{
|
||||
return std::numeric_limits<std::size_t>::max() / sizeof(T);
|
||||
}
|
||||
|
||||
void construct(const pointer ptr, const value_type& t)
|
||||
{
|
||||
new(ptr) T(t);
|
||||
}
|
||||
|
||||
void destroy(pointer ptr)
|
||||
{
|
||||
ptr->~T();
|
||||
UNUSED2(ptr); // silence MSVC warnings
|
||||
}
|
||||
|
||||
pointer allocate(size_type n)
|
||||
{
|
||||
// safely handle zero-sized allocations (happens with GCC STL - see ticket #909).
|
||||
if(n == 0)
|
||||
n = 1;
|
||||
return (pointer)allocator->allocate(n*sizeof(T));
|
||||
}
|
||||
|
||||
pointer allocate(size_type n, const void* const)
|
||||
{
|
||||
return allocate(n);
|
||||
}
|
||||
|
||||
void deallocate(const pointer ptr, const size_type n)
|
||||
{
|
||||
return allocator->deallocate(ptr, n*sizeof(T));
|
||||
}
|
||||
|
||||
//private: // otherwise copy ctor cannot access it
|
||||
Allocator* allocator;
|
||||
};
|
||||
|
||||
#endif // #ifndef ALLOCATOR_ADAPTERS
|
||||
|
@ -36,7 +36,7 @@ struct BasicArenaTest
|
||||
{
|
||||
Arena<Storage> a(100);
|
||||
const size_t initialSpace = a.RemainingBytes();
|
||||
void* p = a.Allocate(100);
|
||||
void* p = a.allocate(100);
|
||||
ENSURE(p != 0);
|
||||
ENSURE(a.Contains(uintptr_t(p)));
|
||||
ENSURE(a.RemainingBytes() == initialSpace-100);
|
||||
@ -45,25 +45,25 @@ struct BasicArenaTest
|
||||
ENSURE(!a.Contains(uintptr_t(p)-1));
|
||||
ENSURE(!a.Contains(uintptr_t(p)+100));
|
||||
if(a.RemainingBytes() == 0)
|
||||
ENSURE(a.Allocate(1) == 0); // full
|
||||
ENSURE(a.allocate(1) == 0); // full
|
||||
else
|
||||
ENSURE(a.Allocate(1) != 0); // can still expand
|
||||
ENSURE(a.allocate(1) != 0); // can still expand
|
||||
a.DeallocateAll();
|
||||
ENSURE(!a.Contains(uintptr_t(p)));
|
||||
|
||||
p = a.Allocate(36);
|
||||
p = a.allocate(36);
|
||||
ENSURE(p != 0);
|
||||
ENSURE(a.Contains(uintptr_t(p)));
|
||||
ENSURE(a.RemainingBytes() == initialSpace-36);
|
||||
void* p2 = a.Allocate(64);
|
||||
void* p2 = a.allocate(64);
|
||||
ENSURE(p2 != 0);
|
||||
ENSURE(a.Contains(uintptr_t(p2)));
|
||||
ENSURE(a.RemainingBytes() == initialSpace-36-64);
|
||||
ENSURE(p2 == (void*)(uintptr_t(p)+36));
|
||||
if(a.RemainingBytes() == 0)
|
||||
ENSURE(a.Allocate(1) == 0); // full
|
||||
ENSURE(a.allocate(1) == 0); // full
|
||||
else
|
||||
ENSURE(a.Allocate(1) != 0); // can still expand
|
||||
ENSURE(a.allocate(1) != 0); // can still expand
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -39,7 +39,7 @@ namespace Allocators {
|
||||
* - consecutive allocations are back-to-back;
|
||||
* - no extra alignment nor padding.
|
||||
**/
|
||||
template<class Storage>
|
||||
template<class Storage = Storage_Fixed<> >
|
||||
class Arena
|
||||
{
|
||||
NONCOPYABLE(Arena);
|
||||
@ -55,11 +55,16 @@ public:
|
||||
return storage.MaxCapacity() - end;
|
||||
}
|
||||
|
||||
void* Allocate(size_t size)
|
||||
void* allocate(size_t size)
|
||||
{
|
||||
return (void*)StorageAppend(storage, end, size);
|
||||
}
|
||||
|
||||
void deallocate(void* UNUSED(p), size_t UNUSED(size))
|
||||
{
|
||||
// ignored
|
||||
}
|
||||
|
||||
void DeallocateAll()
|
||||
{
|
||||
end = 0;
|
||||
|
@ -217,150 +217,4 @@ LIB_API void pool_free_all(Pool* p);
|
||||
**/
|
||||
LIB_API size_t pool_committed(Pool* p);
|
||||
|
||||
|
||||
/**
|
||||
* C++ wrapper on top of pool_alloc for variable-sized allocations.
|
||||
* Memory is returned uninitialised.
|
||||
*/
|
||||
class RawPoolAllocator
|
||||
{
|
||||
public:
|
||||
/**
|
||||
* @param maxSize maximum size of pool in bytes
|
||||
*/
|
||||
explicit RawPoolAllocator(size_t maxSize)
|
||||
{
|
||||
const size_t el_size = 0; // sizes will be passed to each pool_alloc
|
||||
(void)pool_create(&m_pool, maxSize, el_size);
|
||||
}
|
||||
|
||||
~RawPoolAllocator()
|
||||
{
|
||||
(void)pool_destroy(&m_pool);
|
||||
}
|
||||
|
||||
/**
|
||||
* @param count number of elements of type T to allocate space for
|
||||
*/
|
||||
template<typename T>
|
||||
T* AllocateMemory(size_t count)
|
||||
{
|
||||
T* t = (T*)pool_alloc(&m_pool, count*sizeof(T));
|
||||
if(!t)
|
||||
{
|
||||
debug_break();
|
||||
throw std::bad_alloc();
|
||||
}
|
||||
return t;
|
||||
}
|
||||
|
||||
size_t GetCommittedSize()
|
||||
{
|
||||
return pool_committed(&m_pool);
|
||||
}
|
||||
|
||||
private:
|
||||
Pool m_pool;
|
||||
};
|
||||
|
||||
/**
|
||||
* STL-compatible allocator based on a RawPoolAllocator.
|
||||
* (Allocated memory is never freed, until the RawPoolAllocator is destroyed.)
|
||||
*/
|
||||
template<typename T>
|
||||
class pool_allocator
|
||||
{
|
||||
private:
|
||||
// No default constructor
|
||||
pool_allocator() throw ();
|
||||
|
||||
public:
|
||||
RawPoolAllocator& p;
|
||||
|
||||
typedef T value_type;
|
||||
typedef T* pointer;
|
||||
typedef const T* const_pointer;
|
||||
typedef T& reference;
|
||||
typedef const T& const_reference;
|
||||
typedef std::size_t size_type;
|
||||
typedef std::ptrdiff_t difference_type;
|
||||
|
||||
template<class U>
|
||||
struct rebind
|
||||
{
|
||||
typedef pool_allocator<U> other;
|
||||
};
|
||||
|
||||
explicit pool_allocator(RawPoolAllocator& pool) throw () :
|
||||
p(pool)
|
||||
{
|
||||
}
|
||||
|
||||
template<typename U>
|
||||
pool_allocator(const pool_allocator<U>& alloc) throw () :
|
||||
p(alloc.p)
|
||||
{
|
||||
}
|
||||
|
||||
pool_allocator& operator=(const pool_allocator&) throw ()
|
||||
{
|
||||
}
|
||||
|
||||
pointer address(reference r)
|
||||
{
|
||||
return &r;
|
||||
}
|
||||
|
||||
const_pointer address(const_reference s)
|
||||
{
|
||||
return &s;
|
||||
}
|
||||
|
||||
size_type max_size() const throw ()
|
||||
{
|
||||
return std::numeric_limits<std::size_t>::max() / sizeof(T);
|
||||
}
|
||||
|
||||
void construct(const pointer ptr, const value_type& t)
|
||||
{
|
||||
new (ptr) T(t);
|
||||
}
|
||||
|
||||
void destroy(pointer ptr)
|
||||
{
|
||||
ptr->~T();
|
||||
UNUSED2(ptr); // silence MSVC warnings
|
||||
}
|
||||
|
||||
pointer allocate(size_type n)
|
||||
{
|
||||
// safely handle zero-sized allocations (happens with GCC STL - see ticket #909).
|
||||
if(n == 0)
|
||||
n = 1;
|
||||
return p.AllocateMemory<value_type> (n);
|
||||
}
|
||||
|
||||
pointer allocate(size_type n, const void* const)
|
||||
{
|
||||
return allocate(n);
|
||||
}
|
||||
|
||||
void deallocate(const pointer UNUSED(ptr), const size_type UNUSED(n))
|
||||
{
|
||||
// ignore deallocations
|
||||
}
|
||||
};
|
||||
|
||||
template<class T1, class T2>
|
||||
bool operator==(const pool_allocator<T1>&, const pool_allocator<T2>&) throw ()
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
template<class T1, class T2>
|
||||
bool operator!=(const pool_allocator<T1>&, const pool_allocator<T2>&) throw ()
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
#endif // #ifndef INCLUDED_ALLOCATORS_POOL
|
||||
|
@ -421,7 +421,6 @@ void ReleaseAddressSpace(void* p, size_t UNUSED(size))
|
||||
//-----------------------------------------------------------------------------
|
||||
// commit/decommit, allocate/free, protect
|
||||
|
||||
// [23 page faults for an 8 MPixel image total 789 kc, i.e. < 1 ms]
|
||||
TIMER_ADD_CLIENT(tc_commit);
|
||||
|
||||
bool Commit(uintptr_t address, size_t size, PageType pageType, int prot)
|
||||
|
@ -26,7 +26,7 @@
|
||||
#include "graphics/Patch.h"
|
||||
#include "graphics/Terrain.h"
|
||||
#include "lib/alignment.h"
|
||||
#include "lib/allocators/pool.h"
|
||||
#include "lib/allocators/arena.h"
|
||||
#include "lib/res/graphics/unifont.h"
|
||||
#include "maths/MathUtil.h"
|
||||
#include "ps/CLogger.h"
|
||||
@ -672,37 +672,37 @@ void CPatchRData::Update()
|
||||
// Types used for glMultiDrawElements batching:
|
||||
|
||||
// To minimise the cost of memory allocations, everything used for computing
|
||||
// batches uses a pool allocator. (All allocations are short-lived so we can
|
||||
// just throw away the whole pool at the end of each frame.)
|
||||
// batches uses a arena allocator. (All allocations are short-lived so we can
|
||||
// just throw away the whole arena at the end of each frame.)
|
||||
|
||||
// std::map types with appropriate pool allocators and default comparison operator
|
||||
// std::map types with appropriate arena allocators and default comparison operator
|
||||
#define POOLED_BATCH_MAP(Key, Value) \
|
||||
std::map<Key, Value, std::less<Key>, pool_allocator<std::pair<Key const, Value> > >
|
||||
std::map<Key, Value, std::less<Key>, ProxyAllocator<std::pair<Key const, Value>, Allocators::Arena<> > >
|
||||
|
||||
// Equivalent to "m[k]", when it returns a pool-allocated std::map (since we can't
|
||||
// Equivalent to "m[k]", when it returns a arena-allocated std::map (since we can't
|
||||
// use the default constructor in that case)
|
||||
template<typename M>
|
||||
typename M::mapped_type& PooledMapGet(M& m, const typename M::key_type& k, RawPoolAllocator& pool)
|
||||
typename M::mapped_type& PooledMapGet(M& m, const typename M::key_type& k, Allocators::Arena<>& arena)
|
||||
{
|
||||
return m.insert(std::make_pair(k,
|
||||
typename M::mapped_type(typename M::mapped_type::key_compare(), typename M::mapped_type::allocator_type(pool))
|
||||
typename M::mapped_type(typename M::mapped_type::key_compare(), typename M::mapped_type::allocator_type(arena))
|
||||
)).first->second;
|
||||
}
|
||||
|
||||
// Equivalent to "m[k]", when it returns a std::pair of pool-allocated std::vectors
|
||||
// Equivalent to "m[k]", when it returns a std::pair of arena-allocated std::vectors
|
||||
template<typename M>
|
||||
typename M::mapped_type& PooledPairGet(M& m, const typename M::key_type& k, RawPoolAllocator& pool)
|
||||
typename M::mapped_type& PooledPairGet(M& m, const typename M::key_type& k, Allocators::Arena<>& arena)
|
||||
{
|
||||
return m.insert(std::make_pair(k, std::make_pair(
|
||||
typename M::mapped_type::first_type(typename M::mapped_type::first_type::allocator_type(pool)),
|
||||
typename M::mapped_type::second_type(typename M::mapped_type::second_type::allocator_type(pool))
|
||||
typename M::mapped_type::first_type(typename M::mapped_type::first_type::allocator_type(arena)),
|
||||
typename M::mapped_type::second_type(typename M::mapped_type::second_type::allocator_type(arena))
|
||||
))).first->second;
|
||||
}
|
||||
|
||||
static const size_t POOL_SIZE = 4*MiB; // this should be enough for fairly huge maps
|
||||
static const size_t ARENA_SIZE = 4*MiB; // this should be enough for fairly huge maps
|
||||
|
||||
// Each multidraw batch has a list of index counts, and a list of pointers-to-first-indexes
|
||||
typedef std::pair<std::vector<GLint, pool_allocator<GLint> >, std::vector<void*, pool_allocator<void*> > > BatchElements;
|
||||
typedef std::pair<std::vector<GLint, ProxyAllocator<GLint, Allocators::Arena<> > >, std::vector<void*, ProxyAllocator<void*, Allocators::Arena<> > > > BatchElements;
|
||||
|
||||
// Group batches by index buffer
|
||||
typedef POOLED_BATCH_MAP(CVertexBuffer*, BatchElements) IndexBufferBatches;
|
||||
@ -715,9 +715,9 @@ typedef POOLED_BATCH_MAP(CTerrainTextureEntry*, VertexBufferBatches) TextureBatc
|
||||
|
||||
void CPatchRData::RenderBases(const std::vector<CPatchRData*>& patches)
|
||||
{
|
||||
RawPoolAllocator pool(POOL_SIZE);
|
||||
Allocators::Arena<> arena(ARENA_SIZE);
|
||||
|
||||
TextureBatches batches (TextureBatches::key_compare(), (TextureBatches::allocator_type(pool)));
|
||||
TextureBatches batches (TextureBatches::key_compare(), (TextureBatches::allocator_type(arena)));
|
||||
|
||||
PROFILE_START("compute batches");
|
||||
|
||||
@ -731,10 +731,10 @@ void CPatchRData::RenderBases(const std::vector<CPatchRData*>& patches)
|
||||
|
||||
BatchElements& batch = PooledPairGet(
|
||||
PooledMapGet(
|
||||
PooledMapGet(batches, splat.m_Texture, pool),
|
||||
patch->m_VBBase->m_Owner, pool
|
||||
PooledMapGet(batches, splat.m_Texture, arena),
|
||||
patch->m_VBBase->m_Owner, arena
|
||||
),
|
||||
patch->m_VBBaseIndices->m_Owner, pool
|
||||
patch->m_VBBaseIndices->m_Owner, arena
|
||||
);
|
||||
|
||||
batch.first.push_back(splat.m_IndexCount);
|
||||
@ -791,8 +791,8 @@ void CPatchRData::RenderBases(const std::vector<CPatchRData*>& patches)
|
||||
*/
|
||||
struct SBlendBatch
|
||||
{
|
||||
SBlendBatch(RawPoolAllocator& pool) :
|
||||
m_Batches(VertexBufferBatches::key_compare(), VertexBufferBatches::allocator_type(pool))
|
||||
SBlendBatch(Allocators::Arena<>& arena) :
|
||||
m_Batches(VertexBufferBatches::key_compare(), VertexBufferBatches::allocator_type(arena))
|
||||
{
|
||||
}
|
||||
|
||||
@ -806,12 +806,12 @@ struct SBlendBatch
|
||||
struct SBlendStackItem
|
||||
{
|
||||
SBlendStackItem(CVertexBuffer::VBChunk* v, CVertexBuffer::VBChunk* i,
|
||||
const std::vector<CPatchRData::SSplat>& s, RawPoolAllocator& pool) :
|
||||
vertices(v), indices(i), splats(s.begin(), s.end(), SplatStack::allocator_type(pool))
|
||||
const std::vector<CPatchRData::SSplat>& s, Allocators::Arena<>& arena) :
|
||||
vertices(v), indices(i), splats(s.begin(), s.end(), SplatStack::allocator_type(arena))
|
||||
{
|
||||
}
|
||||
|
||||
typedef std::vector<CPatchRData::SSplat, pool_allocator<CPatchRData::SSplat*> > SplatStack;
|
||||
typedef std::vector<CPatchRData::SSplat, ProxyAllocator<CPatchRData::SSplat*, Allocators::Arena<> > > SplatStack;
|
||||
CVertexBuffer::VBChunk* vertices;
|
||||
CVertexBuffer::VBChunk* indices;
|
||||
SplatStack splats;
|
||||
@ -819,10 +819,10 @@ struct SBlendStackItem
|
||||
|
||||
void CPatchRData::RenderBlends(const std::vector<CPatchRData*>& patches)
|
||||
{
|
||||
RawPoolAllocator pool(POOL_SIZE);
|
||||
Allocators::Arena<> arena(ARENA_SIZE);
|
||||
|
||||
typedef std::vector<SBlendBatch, pool_allocator<SBlendBatch*> > BatchesStack;
|
||||
BatchesStack batches((BatchesStack::allocator_type(pool)));
|
||||
typedef std::vector<SBlendBatch, ProxyAllocator<SBlendBatch*, Allocators::Arena<> > > BatchesStack;
|
||||
BatchesStack batches((BatchesStack::allocator_type(arena)));
|
||||
|
||||
PROFILE_START("compute batches");
|
||||
|
||||
@ -830,8 +830,8 @@ void CPatchRData::RenderBlends(const std::vector<CPatchRData*>& patches)
|
||||
// to avoid heavy reallocations
|
||||
batches.reserve(256);
|
||||
|
||||
typedef std::vector<SBlendStackItem, pool_allocator<SBlendStackItem*> > BlendStacks;
|
||||
BlendStacks blendStacks((BlendStacks::allocator_type(pool)));
|
||||
typedef std::vector<SBlendStackItem, ProxyAllocator<SBlendStackItem*, Allocators::Arena<> > > BlendStacks;
|
||||
BlendStacks blendStacks((BlendStacks::allocator_type(arena)));
|
||||
blendStacks.reserve(patches.size());
|
||||
|
||||
// Extract all the blend splats from each patch
|
||||
@ -841,7 +841,7 @@ void CPatchRData::RenderBlends(const std::vector<CPatchRData*>& patches)
|
||||
if (!patch->m_BlendSplats.empty())
|
||||
{
|
||||
|
||||
blendStacks.push_back(SBlendStackItem(patch->m_VBBlends, patch->m_VBBlendIndices, patch->m_BlendSplats, pool));
|
||||
blendStacks.push_back(SBlendStackItem(patch->m_VBBlends, patch->m_VBBlendIndices, patch->m_BlendSplats, arena));
|
||||
// Reverse the splats so the first to be rendered is at the back of the list
|
||||
std::reverse(blendStacks.back().splats.begin(), blendStacks.back().splats.end());
|
||||
}
|
||||
@ -866,7 +866,7 @@ void CPatchRData::RenderBlends(const std::vector<CPatchRData*>& patches)
|
||||
CVertexBuffer::VBChunk* vertices = blendStacks[k].vertices;
|
||||
CVertexBuffer::VBChunk* indices = blendStacks[k].indices;
|
||||
|
||||
BatchElements& batch = PooledPairGet(PooledMapGet(batches.back().m_Batches, vertices->m_Owner, pool), indices->m_Owner, pool);
|
||||
BatchElements& batch = PooledPairGet(PooledMapGet(batches.back().m_Batches, vertices->m_Owner, arena), indices->m_Owner, arena);
|
||||
batch.first.push_back(splats.back().m_IndexCount);
|
||||
|
||||
u8* indexBase = indices->m_Owner->GetBindAddress();
|
||||
@ -893,7 +893,7 @@ void CPatchRData::RenderBlends(const std::vector<CPatchRData*>& patches)
|
||||
if (bestStackSize == 0)
|
||||
break;
|
||||
|
||||
SBlendBatch layer(pool);
|
||||
SBlendBatch layer(arena);
|
||||
layer.m_Texture = bestTex;
|
||||
batches.push_back(layer);
|
||||
}
|
||||
|
@ -29,7 +29,7 @@
|
||||
|
||||
CBinarySerializerScriptImpl::CBinarySerializerScriptImpl(ScriptInterface& scriptInterface, ISerializer& serializer) :
|
||||
m_ScriptInterface(scriptInterface), m_Serializer(serializer), m_Rooter(m_ScriptInterface),
|
||||
m_ScriptBackrefsPool(8*MiB), m_ScriptBackrefs(backrefs_t::key_compare(), ScriptBackrefsAlloc(m_ScriptBackrefsPool)), m_ScriptBackrefsNext(1)
|
||||
m_ScriptBackrefsArena(8*MiB), m_ScriptBackrefs(backrefs_t::key_compare(), ScriptBackrefsAlloc(m_ScriptBackrefsArena)), m_ScriptBackrefsNext(1)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -23,7 +23,7 @@
|
||||
#include "scriptinterface/AutoRooters.h"
|
||||
|
||||
#include "lib/byte_order.h"
|
||||
#include "lib/allocators/pool.h"
|
||||
#include "lib/allocators/arena.h"
|
||||
|
||||
#include <map>
|
||||
|
||||
@ -64,10 +64,10 @@ private:
|
||||
ISerializer& m_Serializer;
|
||||
|
||||
// Pooling helps since we do a lot of short-lived allocations
|
||||
typedef pool_allocator<std::pair<JSObject* const, u32> > ScriptBackrefsAlloc;
|
||||
typedef ProxyAllocator<std::pair<JSObject* const, u32>, Allocators::Arena<> > ScriptBackrefsAlloc;
|
||||
typedef std::map<JSObject*, u32, std::less<JSObject*>, ScriptBackrefsAlloc> backrefs_t;
|
||||
|
||||
RawPoolAllocator m_ScriptBackrefsPool;
|
||||
Allocators::Arena<> m_ScriptBackrefsArena;
|
||||
backrefs_t m_ScriptBackrefs;
|
||||
u32 m_ScriptBackrefsNext;
|
||||
u32 GetScriptBackrefTag(JSObject* obj);
|
||||
|
Loading…
Reference in New Issue
Block a user