Remove UniqueRange custom code in favour of unique_ptr

UniqueRange was a unique_ptr replacement with a few tweaks (such as a
compulsory custom deleter). Its features are not really used, it
contains outdated atomic calls, and it blocks D1511, thus it is removed.

Refs #5165

Differential Revision: https://code.wildfiregames.com/D613
This was SVN commit r24234.
This commit is contained in:
wraitii 2020-11-22 13:43:19 +00:00
parent 3391542fec
commit 2288de4aa6
5 changed files with 32 additions and 346 deletions

View File

@ -1,104 +0,0 @@
/* Copyright (C) 2017 Wildfire Games.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include "precompiled.h"
#include "lib/allocators/unique_range.h"
#include "lib/bits.h" // is_pow2, round_up
#include "lib/sysdep/cpu.h" // cpu_AtomicAdd
#include "lib/sysdep/rtl.h" // rtl_FreeAligned
static void FreeNone(void* UNUSED(pointer), size_t UNUSED(size))
{
// (providing a deleter function for idxDeleterNone avoids
// having to check whether deleters[idxDeleter] == 0)
}
static void FreeAligned(void* pointer, size_t UNUSED(size))
{
return rtl_FreeAligned(pointer);
}
static UniqueRangeDeleter deleters[allocationAlignment] = { FreeNone, FreeAligned };
static IdxDeleter numDeleters = 2;
// NB: callers should skip this if *idxDeleterOut != 0 (avoids the overhead
// of an unnecessary indirect function call)
void RegisterUniqueRangeDeleter(UniqueRangeDeleter deleter, volatile IdxDeleter* idxDeleterOut)
{
ENSURE(deleter);
if(!cpu_CAS(idxDeleterOut, idxDeleterNone, -1)) // not the first call for this deleter
{
// wait until an index has been assigned
while(*idxDeleterOut <= 0)
cpu_Pause();
return;
}
const IdxDeleter idxDeleter = cpu_AtomicAdd(&numDeleters, 1);
ENSURE(idxDeleter < (IdxDeleter)ARRAY_SIZE(deleters));
deleters[idxDeleter] = deleter;
COMPILER_FENCE;
*idxDeleterOut = idxDeleter;
}
NOTHROW_DEFINE void CallUniqueRangeDeleter(void* pointer, size_t size, IdxDeleter idxDeleter)
{
ASSERT(idxDeleter < numDeleters);
// (some deleters do not tolerate null pointers)
if(pointer)
deleters[idxDeleter](pointer, size);
}
UniqueRange AllocateAligned(size_t size, size_t alignment)
{
ENSURE(is_pow2(alignment));
alignment = std::max(alignment, allocationAlignment);
const size_t alignedSize = round_up(size, alignment);
const UniqueRange::pointer p = rtl_AllocateAligned(alignedSize, alignment);
static volatile IdxDeleter idxDeleterAligned;
if(idxDeleterAligned == 0) // (optional optimization)
RegisterUniqueRangeDeleter(FreeAligned, &idxDeleterAligned);
return UniqueRange(p, size, idxDeleterAligned);
}
UniqueRange AllocateVM(size_t size, vm::PageType pageType, int prot)
{
const UniqueRange::pointer p = vm::Allocate(size, pageType, prot);
static volatile IdxDeleter idxDeleter;
if(idxDeleter == 0) // (optional optimization)
RegisterUniqueRangeDeleter(vm::Free, &idxDeleter);
return UniqueRange(p, size, idxDeleter);
}

View File

@ -1,220 +0,0 @@
/* Copyright (C) 2015 Wildfire Games.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef INCLUDED_ALLOCATORS_UNIQUE_RANGE
#define INCLUDED_ALLOCATORS_UNIQUE_RANGE
#include "lib/lib_api.h"
#include "lib/alignment.h" // allocationAlignment
#include "lib/sysdep/vm.h"
// we usually don't hold multiple references to allocations, so unique_ptr
// can be used instead of the more complex (ICC generated incorrect code on
// 2 occasions) and expensive shared_ptr.
// a custom deleter is required because allocators such as ReserveAddressSpace need to
// pass the size to their deleter. we want to mix pointers from various allocators, but
// unique_ptr's deleter is fixed at compile-time, so it would need to be general enough
// to handle all allocators.
// storing the size and a function pointer would be one such solution, with the added
// bonus of no longer requiring a complete type at the invocation of ~unique_ptr.
// however, this inflates the pointer size to 3 words. if only a few allocator types
// are needed, we can replace the function pointer with an index stashed into the
// lower bits of the pointer (safe because all allocations' addresses are multiples
// of allocationAlignment).
typedef intptr_t IdxDeleter;
// no-op deleter (use when returning part of an existing allocation)
static const IdxDeleter idxDeleterNone = 0;
typedef void (*UniqueRangeDeleter)(void* pointer, size_t size);
/**
* register a deleter, returning its index within the table.
*
* @param deleter function pointer. must be uniquely associated with
* the idxDeleter storage location.
* @param idxDeleter location where to store the next available index.
* if it is already non-zero, skip the call to this function to
* avoid overhead.
*
* thread-safe. idxDeleter is used for mutual exclusion between
* multiple callers for the same deleter. concurrent registration of
* different deleters is also safe due to atomic increments.
*
* halts the program if more than allocationAlignment deleters are
* to be registered.
**/
LIB_API void RegisterUniqueRangeDeleter(UniqueRangeDeleter deleter, volatile IdxDeleter* idxDeleter);
LIB_API NOTHROW_DECLARE void CallUniqueRangeDeleter(void* pointer, size_t size, IdxDeleter idxDeleter);
// unfortunately, unique_ptr allows constructing without a custom deleter. to ensure callers can
// rely upon pointers being associated with a size, we introduce a `UniqueRange' replacement.
// its interface is identical to unique_ptr except for the constructors, the addition of
// size() and the removal of operator bool (which avoids implicit casts to int).
class UniqueRange
{
public:
typedef void* pointer;
typedef void element_type;
UniqueRange()
{
Clear();
}
UniqueRange(pointer p, size_t size, IdxDeleter deleter)
{
Set(p, size, deleter);
}
UniqueRange(UniqueRange&& rvalue)
{
Pilfer(rvalue);
}
UniqueRange& operator=(UniqueRange&& rvalue)
{
UniqueRange& lvalue = rvalue;
if(this != &lvalue)
{
Delete();
Pilfer(lvalue);
}
return *this;
}
~UniqueRange()
{
Delete();
}
pointer get() const
{
return pointer(address_ & ~(allocationAlignment-1));
}
IdxDeleter get_deleter() const
{
return IdxDeleter(address_ % allocationAlignment);
}
size_t size() const
{
return size_;
}
// side effect: subsequent get_deleter will return idxDeleterNone
pointer release() // relinquish ownership
{
pointer ret = get();
Clear();
return ret;
}
void reset()
{
Delete();
Clear();
}
void reset(pointer p, size_t size, IdxDeleter deleter)
{
Delete();
Set(p, size, deleter);
}
void swap(UniqueRange& rhs)
{
std::swap(address_, rhs.address_);
std::swap(size_, rhs.size_);
}
// don't define construction and assignment from lvalue,
// but the declarations must be accessible
UniqueRange(const UniqueRange&);
UniqueRange& operator=(const UniqueRange&);
private:
void Set(pointer p, size_t size, IdxDeleter deleter)
{
ASSERT((uintptr_t(p) % allocationAlignment) == 0);
ASSERT(size_t(deleter) < allocationAlignment);
address_ = uintptr_t(p) | deleter;
size_ = size;
ASSERT(get() == p);
ASSERT(get_deleter() == deleter);
ASSERT(this->size() == size);
}
void Clear()
{
Set(0, 0, idxDeleterNone);
}
void Pilfer(UniqueRange& victim)
{
const size_t size = victim.size();
const IdxDeleter idxDeleter = victim.get_deleter();
pointer p = victim.release();
Set(p, size, idxDeleter);
victim.Clear();
}
void Delete()
{
CallUniqueRangeDeleter(get(), size(), get_deleter());
}
// (IdxDeleter is stored in the lower bits of address since size might not even be a multiple of 4.)
uintptr_t address_;
size_t size_;
};
namespace std {
static inline void swap(UniqueRange& p1, UniqueRange& p2)
{
p1.swap(p2);
}
static inline void swap(UniqueRange&& p1, UniqueRange& p2)
{
p2.swap(p1);
}
static inline void swap(UniqueRange& p1, UniqueRange&& p2)
{
p1.swap(p2);
}
}
LIB_API UniqueRange AllocateAligned(size_t size, size_t alignment);
LIB_API UniqueRange AllocateVM(size_t size, vm::PageType pageSize = vm::kDefault, int prot = PROT_READ|PROT_WRITE);
#endif // #ifndef INCLUDED_ALLOCATORS_UNIQUE_RANGE

View File

@ -1,4 +1,4 @@
/* Copyright (C) 2017 Wildfire Games.
/* Copyright (C) 2020 Wildfire Games.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
@ -448,17 +448,17 @@ public:
size_t cd_numEntries = 0;
size_t cd_size = 0;
RETURN_STATUS_IF_ERR(LocateCentralDirectory(m_file, m_fileSize, cd_ofs, cd_numEntries, cd_size));
UniqueRange buf(io::Allocate(cd_size));
io::BufferPtr buf(io::Allocate(cd_size));
io::Operation op(*m_file.get(), buf.get(), cd_size, cd_ofs);
RETURN_STATUS_IF_ERR(io::Run(op));
// iterate over Central Directory
const u8* pos = (const u8*)buf.get();
const u8* pos = buf.get();
for(size_t i = 0; i < cd_numEntries; i++)
{
// scan for next CDFH
CDFH* cdfh = (CDFH*)FindRecord((const u8*)buf.get(), cd_size, pos, cdfh_magic, sizeof(CDFH));
CDFH* cdfh = (CDFH*)FindRecord(buf.get(), cd_size, pos, cdfh_magic, sizeof(CDFH));
if(!cdfh)
WARN_RETURN(ERR::CORRUPTED);
@ -533,14 +533,14 @@ private:
static Status LocateCentralDirectory(const PFile& file, off_t fileSize, off_t& cd_ofs, size_t& cd_numEntries, size_t& cd_size)
{
const size_t maxScanSize = 66000u; // see below
UniqueRange buf(io::Allocate(maxScanSize));
io::BufferPtr buf(io::Allocate(maxScanSize));
// expected case: ECDR at EOF; no file comment
Status ret = ScanForEcdr(file, fileSize, (u8*)buf.get(), sizeof(ECDR), cd_numEntries, cd_ofs, cd_size);
Status ret = ScanForEcdr(file, fileSize, buf.get(), sizeof(ECDR), cd_numEntries, cd_ofs, cd_size);
if(ret == INFO::OK)
return INFO::OK;
// worst case: ECDR precedes 64 KiB of file comment
ret = ScanForEcdr(file, fileSize, (u8*)buf.get(), maxScanSize, cd_numEntries, cd_ofs, cd_size);
ret = ScanForEcdr(file, fileSize, buf.get(), maxScanSize, cd_numEntries, cd_ofs, cd_size);
if(ret == INFO::OK)
return INFO::OK;
@ -555,7 +555,7 @@ private:
// because it'd be slow.
// - do not warn - the corrupt archive will be deleted on next
// successful archive builder run anyway.
if(FindRecord((const u8*)buf.get(), sizeof(LFH), (const u8*)buf.get(), lfh_magic, sizeof(LFH)))
if(FindRecord(buf.get(), sizeof(LFH), buf.get(), lfh_magic, sizeof(LFH)))
return ERR::CORRUPTED; // NOWARN
// totally bogus
else
@ -662,12 +662,12 @@ public:
// allocate memory
const size_t csizeMax = codec->MaxOutputSize(size_t(usize));
UniqueRange buf(io::Allocate(sizeof(LFH) + pathnameLength + csizeMax));
io::BufferPtr buf(io::Allocate(sizeof(LFH) + pathnameLength + csizeMax));
// read and compress file contents
size_t csize; u32 checksum;
{
u8* cdata = (u8*)buf.get() + sizeof(LFH) + pathnameLength;
u8* cdata = buf.get() + sizeof(LFH) + pathnameLength;
Stream stream(codec);
stream.SetOutputBuffer(cdata, csizeMax);
StreamFeeder streamFeeder(stream);
@ -687,7 +687,7 @@ public:
// build LFH
{
LFH* lfh = (LFH*)buf.get();
LFH* lfh = reinterpret_cast<LFH*>(buf.get());
lfh->Init(fileInfo, (off_t)csize, method, checksum, pathnameInArchive);
}

View File

@ -1,4 +1,4 @@
/* Copyright (C) 2018 Wildfire Games.
/* Copyright (C) 2020 Wildfire Games.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
@ -33,11 +33,10 @@
#include "lib/bits.h"
#include "lib/timer.h"
#include "lib/file/file.h"
#include "lib/sysdep/rtl.h"
#include "lib/sysdep/filesystem.h" // wtruncate
#include "lib/posix/posix_aio.h" // LIO_READ, LIO_WRITE
#include "lib/allocators/unique_range.h"
namespace ERR
{
const Status IO = -110301;
@ -45,16 +44,27 @@ namespace ERR
namespace io {
struct FreeAligned
{
void operator()(void* pointer) { rtl_FreeAligned(pointer); }
};
using BufferPtr = std::unique_ptr<u8, FreeAligned>;
// @return memory suitable for use as an I/O buffer (address is a
// multiple of alignment, size is rounded up to a multiple of alignment)
// @param alignment is automatically increased if smaller than the
// UniqueRange requirement.
// @param alignment is automatically increased if required.
//
// use this instead of the file cache for write buffers that are
// never reused (avoids displacing other items).
static inline UniqueRange Allocate(size_t size, size_t alignment = maxSectorSize)
static inline io::BufferPtr Allocate(size_t size, size_t alignment = maxSectorSize)
{
return AllocateAligned(size, alignment);
ENSURE(is_pow2(alignment));
alignment = std::max(alignment, allocationAlignment);
u8* p = static_cast<u8*>(rtl_AllocateAligned(round_up(size, alignment), alignment));
return {p, FreeAligned{}};
}
@ -209,7 +219,7 @@ public:
}
private:
UniqueRange buffers;
io::BufferPtr buffers;
aiocb controlBlocks[Parameters::maxQueueDepth];
};

View File

@ -1,4 +1,4 @@
/* Copyright (C) 2011 Wildfire Games.
/* Copyright (C) 2020 Wildfire Games.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
@ -70,7 +70,7 @@ LIB_API void* ReserveAddressSpace(size_t size, size_t commitSize = g_LargePageSi
*
* @param p a pointer previously returned by ReserveAddressSpace.
* @param size is required by the POSIX implementation and
* ignored on Windows. it also ensures compatibility with UniqueRange.
* ignored on Windows.
**/
LIB_API void ReleaseAddressSpace(void* p, size_t size = 0);
@ -126,7 +126,7 @@ LIB_API void* Allocate(size_t size, PageType pageType = kDefault, int prot = PRO
*
* @param p a pointer previously returned by Allocate.
* @param size is required by the POSIX implementation and
* ignored on Windows. it also ensures compatibility with UniqueRange.
* ignored on Windows.
*
* (this differs from ReleaseAddressSpace, which must account for
* extra padding/alignment to largePageSize.)