1
0
forked from 0ad/0ad
- callbacks now have a uintptr_t "cbData" parameter (instead of
haphazard void*/uintptr_t, cb/ctx/data)
- resource loading code now more uniformly deals with u8* pointers
instead of void*

allocators: add support for page_aligned_alloc via boost::shared_ptr.
add evil hack to avoid the need for default ctor and ensure alignment in
SingleAllocator
archive: improve Decompressor
compression:
. near complete rewrite (previous code was a poorly factored mess)
. fix bug related to buffer allocation
. no longer provide get_output API (prone to abuse)
. add call to get max. size of output buffer (for preallocation)

This was SVN commit r5370.
This commit is contained in:
janwas 2007-09-25 09:39:20 +00:00
parent 11eafe2401
commit f4adce44bf
39 changed files with 784 additions and 602 deletions

View File

@ -150,9 +150,9 @@ void CObjectManager::UnloadObjects()
static void GetObjectName_ThunkCb(const char* path, const DirEnt* UNUSED(ent), void* context)
static void GetObjectName_ThunkCb(const char* path, const DirEnt* UNUSED(ent), uintptr_t cbData)
{
std::vector<CStr>* names = (std::vector<CStr>*)context;
std::vector<CStr>* names = (std::vector<CStr>*)cbData;
CStr name (path);
names->push_back(name.AfterFirst("actors/"));
}
@ -160,11 +160,11 @@ static void GetObjectName_ThunkCb(const char* path, const DirEnt* UNUSED(ent), v
void CObjectManager::GetAllObjectNames(std::vector<CStr>& names)
{
vfs_dir_enum("art/actors/", VFS_DIR_RECURSIVE, "*.xml",
GetObjectName_ThunkCb, &names);
GetObjectName_ThunkCb, (uintptr_t)&names);
}
void CObjectManager::GetPropObjectNames(std::vector<CStr>& names)
{
vfs_dir_enum("art/actors/props/", VFS_DIR_RECURSIVE, "*.xml",
GetObjectName_ThunkCb, &names);
GetObjectName_ThunkCb, (uintptr_t)&names);
}

View File

@ -94,7 +94,7 @@ class TestMeshManager : public CxxTest::TestSuite
{
// Create a junk trace file first, because vfs_opt_auto_build requires one
std::string trace = "000.000000: L \"-\" 0 0000\n";
vfs_store("trace.txt", trace.c_str(), trace.size(), FILE_NO_AIO);
vfs_store("trace.txt", (const u8*)trace.c_str(), trace.size(), FILE_NO_AIO);
// then make the archive
TS_ASSERT_OK(vfs_opt_rebuild_main_archive(MOD_PATH"/trace.txt", MOD_PATH"/test%02d.zip"));
@ -127,7 +127,7 @@ public:
// Have to specify FILE_WRITE_TO_TARGET in order to overwrite existent
// files when they might have been archived
vfs_store(testDAE, "Test", 4, FILE_NO_AIO | FILE_WRITE_TO_TARGET);
vfs_store(testDAE, (const u8*)"Test", 4, FILE_NO_AIO | FILE_WRITE_TO_TARGET);
// We can't overwrite cache files because FILE_WRITE_TO_TARGET won't
// write into cache/ - it might be nice to fix that. For now we just
@ -197,7 +197,7 @@ public:
copyFile(srcDAE, testDAE);
const char text[] = "Not valid XML";
vfs_store(testSkeletonDefs, text, strlen(text), FILE_NO_AIO);
vfs_store(testSkeletonDefs, (const u8*)text, strlen(text), FILE_NO_AIO);
CModelDefPtr modeldef = meshManager->GetMesh(testDAE);
TS_ASSERT(! modeldef);
@ -210,7 +210,7 @@ public:
copyFile(srcSkeletonDefs, testSkeletonDefs);
const char text[] = "Not valid XML";
vfs_store(testDAE, text, strlen(text), FILE_NO_AIO);
vfs_store(testDAE, (const u8*)text, strlen(text), FILE_NO_AIO);
CModelDefPtr modeldef = meshManager->GetMesh(testDAE);
TS_ASSERT(! modeldef);

View File

@ -581,14 +581,7 @@ void* single_calloc(void* storage, volatile uintptr_t* in_use_flag, size_t size)
p = storage;
// already in use (rare) - allocate from heap
else
{
p = malloc(size);
if(!p)
{
WARN_ERR(ERR::NO_MEM);
return 0;
}
}
p = new u8[size];
memset(p, 0, size);
return p;
@ -615,7 +608,7 @@ void single_free(void* storage, volatile uintptr_t* in_use_flag, void* p)
// single instance may have been freed by now - cannot assume
// anything about in_use_flag.
free(p);
delete[] (u8*)p;
}
}

View File

@ -47,6 +47,28 @@ extern void* page_aligned_alloc(size_t unaligned_size);
extern void page_aligned_free(void* p, size_t unaligned_size);
// adapter that allows calling page_aligned_free as a boost::shared_ptr deleter.
class PageAlignedDeleter
{
public:
PageAlignedDeleter(size_t size)
: m_size(size)
{
debug_assert(m_size != 0);
}
void operator()(u8* p)
{
debug_assert(m_size != 0);
page_aligned_free(p, m_size);
m_size = 0;
}
private:
size_t m_size;
};
//
// dynamic (expandable) array
//
@ -436,10 +458,16 @@ extern void single_free(void* storage, volatile uintptr_t* in_use_flag, void* p)
**/
template<class T> class SingleAllocator
{
T storage;
// evil but necessary hack: we don't want to instantiate a T directly
// because it may not have a default ctor. an array of uninitialized
// storage is used instead. single_calloc doesn't know about alignment,
// so we fix this by asking for an array of doubles.
double storage[(sizeof(T)+sizeof(double)-1)/sizeof(double)];
volatile uintptr_t is_in_use;
public:
typedef T value_type;
SingleAllocator()
{
is_in_use = 0;

View File

@ -294,7 +294,7 @@ const char* path_extension(const char* fn)
// call <cb> with <ctx> for each component in <path>.
LibError path_foreach_component(const char* path_org, PathComponentCb cb, void* ctx)
LibError path_foreach_component(const char* path_org, PathComponentCb cb, uintptr_t cbData)
{
CHECK_PATH(path_org);
@ -328,7 +328,7 @@ LibError path_foreach_component(const char* path_org, PathComponentCb cb, void*
else
*slash = '\0'; // 0-terminate cur_component
LibError ret = cb(cur_component, is_dir, ctx);
LibError ret = cb(cur_component, is_dir, cbData);
// callback wants to abort - return its value.
if(ret != INFO::CB_CONTINUE)
return ret;

View File

@ -182,13 +182,13 @@ extern const char* path_extension(const char* fn);
* return that. no need to 'abort' (e.g. return INFO::OK) after a filename is
* encountered - that's taken care of automatically.
**/
typedef LibError (*PathComponentCb)(const char* component, bool is_dir, void* ctx);
typedef LibError (*PathComponentCb)(const char* component, bool is_dir, uintptr_t cbData);
/**
* call <cb> with <ctx> for each component in <path>.
* @return LibError
**/
extern LibError path_foreach_component(const char* path, PathComponentCb cb, void* ctx);
extern LibError path_foreach_component(const char* path, PathComponentCb cb, uintptr_t cbData);
//-----------------------------------------------------------------------------

View File

@ -17,6 +17,8 @@
#include "lib/res/res.h"
#include "file_internal.h"
#include <boost/shared_ptr.hpp>
// components:
// - za_*: Zip archive handling
@ -386,7 +388,7 @@ struct ArchiveFileIo
uintptr_t ctx;
size_t max_output_size;
void* user_buf;
u8* user_buf;
};
cassert(sizeof(ArchiveFileIo) <= FILE_IO_OPAQUE_SIZE);
@ -396,7 +398,7 @@ static SingleAllocator<FileIo> io_allocator;
// begin transferring <size> bytes, starting at <ofs>. get result
// with afile_io_wait; when no longer needed, free via afile_io_discard.
LibError afile_io_issue(File* f, off_t user_ofs, size_t max_output_size, void* user_buf, FileIo* io)
LibError afile_io_issue(File* f, off_t user_ofs, size_t max_output_size, u8* user_buf, FileIo* io)
{
// zero output param in case we fail below.
memset(io, 0, sizeof(FileIo));
@ -432,7 +434,7 @@ LibError afile_io_issue(File* f, off_t user_ofs, size_t max_output_size, void* u
const ssize_t left_in_file = af->csize - cofs;
const size_t csize = std::min(left_in_chunk, left_in_file);
void* cbuf = mem_alloc(csize, 4*KiB);
u8* cbuf = (u8*)mem_alloc(csize, 4*KiB);
if(!cbuf)
WARN_RETURN(ERR::NO_MEM);
@ -454,21 +456,21 @@ int afile_io_has_completed(FileIo* io)
// wait until the transfer <io> completes, and return its buffer.
// output parameters are zeroed on error.
LibError afile_io_wait(FileIo* io, void*& buf, size_t& size)
LibError afile_io_wait(FileIo* io, u8*& buf, size_t& size)
{
buf = 0;
size = 0;
ArchiveFileIo* aio = (ArchiveFileIo*)io->opaque;
void* raw_buf;
u8* raw_buf;
size_t raw_size;
RETURN_ERR(file_io_wait(aio->io, raw_buf, raw_size));
// file is compressed and we need to decompress
if(aio->ctx)
{
comp_set_output(aio->ctx, (void*)aio->user_buf, aio->max_output_size);
comp_set_output(aio->ctx, aio->user_buf, aio->max_output_size);
ssize_t ucbytes_output = comp_feed(aio->ctx, raw_buf, raw_size);
free(raw_buf);
RETURN_ERR(ucbytes_output);
@ -512,61 +514,72 @@ LibError afile_io_validate(const FileIo* io)
class Decompressor
{
public:
Decompressor(uintptr_t comp_ctx_, size_t ucsize_max, bool use_temp_buf_, FileIOCB cb, uintptr_t cb_ctx)
Decompressor(uintptr_t ctx, FileIOBuf* pbuf, size_t usizeMax, FileIOCB cb, uintptr_t cbData)
: m_ctx(ctx)
, m_udataSize(usizeMax), m_csizeTotal(0), m_usizeTotal(0)
, m_cb(cb), m_cbData(cbData)
{
comp_ctx = comp_ctx_;
debug_assert(m_ctx != 0);
csize_total = 0;
ucsize_left = ucsize_max;
use_temp_buf = use_temp_buf_;
user_cb = cb;
user_cb_ctx = cb_ctx;
if(pbuf == FILE_BUF_TEMP)
{
m_tmpBuf.reset((u8*)page_aligned_alloc(m_udataSize), PageAlignedDeleter(m_udataSize));
m_udata = m_tmpBuf.get();
}
else
m_udata = (u8*)*pbuf; // WARNING: FileIOBuf is nominally const; if that's ever enforced, this may need to change.
}
LibError feed(const void* cblock, size_t csize, size_t* bytes_processed)
LibError operator()(const u8* cblock, size_t cblockSize, size_t* bytes_processed)
{
if(use_temp_buf)
RETURN_ERR(comp_alloc_output(comp_ctx, csize));
// when decompressing into the temp buffer, always start at ofs=0.
const size_t ofs = m_tmpBuf.get()? 0 : m_usizeTotal;
u8* const ublock = m_udata + ofs;
comp_set_output(m_ctx, ublock, m_udataSize-ofs);
void* ucblock = comp_get_output(comp_ctx);
const size_t ublockSize = comp_feed(m_ctx, cblock, cblockSize);
const size_t ucsize = comp_feed(comp_ctx, cblock, csize);
*bytes_processed = ucsize;
debug_assert(ucsize <= ucsize_left);
ucsize_left -= ucsize;
m_csizeTotal += cblockSize;
m_usizeTotal += ublockSize;
debug_assert(m_usizeTotal <= m_udataSize);
*bytes_processed = ublockSize;
LibError ret = INFO::CB_CONTINUE;
if(user_cb)
ret = user_cb(user_cb_ctx, ucblock, ucsize, bytes_processed);
if(ucsize_left == 0)
if(m_cb)
ret = m_cb(m_cbData, ublock, ublockSize, bytes_processed);
if(m_usizeTotal == m_udataSize)
ret = INFO::OK;
return ret;
}
size_t total_csize_fed() const { return csize_total; }
size_t NumCompressedBytesProcessed() const
{
return m_csizeTotal;
}
private:
uintptr_t comp_ctx;
uintptr_t m_ctx;
size_t csize_total;
size_t ucsize_left;
size_t m_csizeTotal;
size_t m_usizeTotal;
bool use_temp_buf;
u8* m_udata;
size_t m_udataSize;
boost::shared_ptr<u8> m_tmpBuf;
// allow user-specified callbacks: "chain" them, because file_io's
// callback mechanism is already used to return blocks.
FileIOCB user_cb;
uintptr_t user_cb_ctx;
FileIOCB m_cb;
uintptr_t m_cbData;
};
static LibError decompressor_feed_cb(uintptr_t cb_ctx,
const void* cblock, size_t csize, size_t* bytes_processed)
static LibError decompressor_feed_cb(uintptr_t cbData,
const u8* cblock, size_t cblockSize, size_t* bytes_processed)
{
Decompressor* d = (Decompressor*)cb_ctx;
return d->feed(cblock, csize, bytes_processed);
Decompressor& decompressor = *(Decompressor*)cbData;
return decompressor(cblock, cblockSize, bytes_processed);
}
@ -614,22 +627,17 @@ ssize_t afile_read(File* f, off_t ofs, size_t size, FileIOBuf* pbuf, FileIOCB cb
return bytes_read;
}
debug_assert(af->ctx != 0);
RETURN_ERR(file_io_get_buf(pbuf, size, f->atom_fn, f->flags, cb));
const bool use_temp_buf = (pbuf == FILE_BUF_TEMP);
if(!use_temp_buf)
comp_set_output(af->ctx, (void*)*pbuf, size);
const off_t cofs = af->ofs+af->last_cofs;
// remaining bytes in file. callback will cause IOs to stop when
// enough ucdata has been produced.
// enough udata has been produced.
const size_t csize_max = af->csize - af->last_cofs;
Decompressor d(af->ctx, size, use_temp_buf, cb, cb_ctx);
Decompressor d(af->ctx, pbuf, size, cb, cb_ctx);
ssize_t uc_transferred = file_io(&a->f, cofs, csize_max, FILE_BUF_TEMP, decompressor_feed_cb, (uintptr_t)&d);
af->last_cofs += (off_t)d.total_csize_fed();
af->last_cofs += (off_t)d.NumCompressedBytesProcessed();
return uc_transferred;
}
@ -649,7 +657,7 @@ ssize_t afile_read(File* f, off_t ofs, size_t size, FileIOBuf* pbuf, FileIOCB cb
// the mapping will be removed (if still open) when its file is closed.
// however, map/unmap calls should still be paired so that the mapping
// may be removed when no longer needed.
LibError afile_map(File* f, void*& p, size_t& size)
LibError afile_map(File* f, u8*& p, size_t& size)
{
p = 0;
size = 0;
@ -666,11 +674,10 @@ LibError afile_map(File* f, void*& p, size_t& size)
// in the meantime to save memory in case it wasn't going to be mapped.
// now we do so again; it's unmapped in afile_unmap (refcounted).
H_DEREF(af->ha, Archive, a);
void* archive_p;
size_t archive_size;
u8* archive_p; size_t archive_size;
RETURN_ERR(file_map(&a->f, archive_p, archive_size));
p = (char*)archive_p + af->ofs;
p = archive_p + af->ofs;
size = f->size;
af->is_mapped = 1;

View File

@ -68,7 +68,7 @@ extern LibError afile_open_vfs(const char* fn, uint flags, File* f, TFile* tf);
// begin transferring <size> bytes, starting at <ofs>. get result
// with afile_io_wait; when no longer needed, free via afile_io_discard.
extern LibError afile_io_issue(File* f, off_t ofs, size_t size, void* buf, FileIo* io);
extern LibError afile_io_issue(File* f, off_t ofs, size_t size, u8* buf, FileIo* io);
// indicates if the IO referenced by <io> has completed.
// return value: 0 if pending, 1 if complete, < 0 on error.
@ -76,7 +76,7 @@ extern int afile_io_has_completed(FileIo* io);
// wait until the transfer <io> completes, and return its buffer.
// output parameters are zeroed on error.
extern LibError afile_io_wait(FileIo* io, void*& p, size_t& size);
extern LibError afile_io_wait(FileIo* io, u8*& p, size_t& size);
// finished with transfer <io> - free its buffer (returned by afile_io_wait)
extern LibError afile_io_discard(FileIo* io);
@ -121,7 +121,7 @@ extern ssize_t afile_read(File* f, off_t ofs, size_t size, FileIOBuf* pbuf, File
// the mapping will be removed (if still open) when its archive is closed.
// however, map/unmap calls should still be paired so that the archive mapping
// may be removed when no longer needed.
extern LibError afile_map(File* f, void*& p, size_t& size);
extern LibError afile_map(File* f, u8*& p, size_t& size);
// remove the mapping of file <zf>; fail if not mapped.
//

View File

@ -52,7 +52,7 @@ struct CompressParams
u32 crc;
};
static LibError compress_cb(uintptr_t cb_ctx, const void* block, size_t size, size_t* bytes_processed)
static LibError compress_cb(uintptr_t cb_ctx, const u8* block, size_t size, size_t* bytes_processed)
{
CompressParams* p = (CompressParams*)cb_ctx;
@ -64,14 +64,19 @@ static LibError compress_cb(uintptr_t cb_ctx, const void* block, size_t size, si
p->crc = crc32(p->crc, (const Bytef*)block, (uInt)size);
if(p->attempt_compress)
{
// note: we don't need the return value because comp_finish returns
// the size of the compressed data.
(void)comp_feed(p->ctx, block, size);
}
return INFO::CB_CONTINUE;
}
// final decision on whether to store the file as compressed,
// given the observed compressed/uncompressed sizes.
static bool should_store_compressed(size_t ucsize, size_t csize)
static bool ShouldCompress(size_t ucsize, size_t csize)
{
const float ratio = (float)ucsize / csize;
const ssize_t bytes_saved = (ssize_t)ucsize - (ssize_t)csize;
@ -98,7 +103,7 @@ static bool should_store_compressed(size_t ucsize, size_t csize)
}
static LibError read_and_compress_file(const char* atom_fn, uintptr_t ctx,
ArchiveEntry& ent, void*& file_contents, FileIOBuf& buf) // out
ArchiveEntry& ent, const u8*& file_contents, FileIOBuf& buf) // out
{
struct stat s;
RETURN_ERR(vfs_stat(atom_fn, &s));
@ -117,8 +122,9 @@ static LibError read_and_compress_file(const char* atom_fn, uintptr_t ctx,
const bool attempt_compress = !file_type_is_uncompressible(atom_fn);
if(attempt_compress)
{
RETURN_ERR(comp_reset(ctx));
RETURN_ERR(comp_alloc_output(ctx, ucsize));
comp_reset(ctx);
const size_t csizeBound = comp_max_output_size(ctx, ucsize);
RETURN_ERR(comp_alloc_output(ctx, csizeBound));
}
// read file into newly allocated buffer. if attempt_compress, also
@ -131,18 +137,19 @@ static LibError read_and_compress_file(const char* atom_fn, uintptr_t ctx,
// if we compressed the file trial-wise, check results and
// decide whether to store as such or not (based on compression ratio)
bool store_compressed = false;
void* cdata = 0; size_t csize = 0;
bool shouldCompress = false;
u8* cdata = 0; size_t csize = 0;
if(attempt_compress)
{
LibError ret = comp_finish(ctx, &cdata, &csize);
u32 checksum; // TODO: use instead of crc
LibError ret = comp_finish(ctx, &cdata, &csize, &checksum);
if(ret < 0)
{
file_buf_free(buf);
return ret;
}
store_compressed = should_store_compressed(ucsize, csize);
shouldCompress = ShouldCompress(ucsize, csize);
}
// store file info
@ -152,7 +159,7 @@ static LibError read_and_compress_file(const char* atom_fn, uintptr_t ctx,
ent.flags = 0;
ent.atom_fn = atom_fn;
ent.crc = params.crc;
if(store_compressed)
if(shouldCompress)
{
ent.method = CM_DEFLATE;
ent.csize = (off_t)csize;
@ -162,7 +169,7 @@ static LibError read_and_compress_file(const char* atom_fn, uintptr_t ctx,
{
ent.method = CM_NONE;
ent.csize = (off_t)ucsize;
file_contents = (void*)buf;
file_contents = buf;
}
// note: no need to free cdata - it is owned by the
@ -173,8 +180,7 @@ static LibError read_and_compress_file(const char* atom_fn, uintptr_t ctx,
LibError archive_build_init(const char* P_archive_filename, Filenames V_fns,
ArchiveBuildState* ab)
LibError archive_build_init(const char* P_archive_filename, Filenames V_fns, ArchiveBuildState* ab)
{
RETURN_ERR(zip_archive_create(P_archive_filename, &ab->za));
ab->ctx = comp_alloc(CT_COMPRESSION, CM_DEFLATE);
@ -198,7 +204,7 @@ int archive_build_continue(ArchiveBuildState* ab)
if(!V_fn)
break;
ArchiveEntry ent; void* file_contents; FileIOBuf buf;
ArchiveEntry ent; const u8* file_contents; FileIOBuf buf;
if(read_and_compress_file(V_fn, ab->ctx, ent, file_contents, buf) == INFO::OK)
{
(void)zip_archive_add_file(ab->za, &ent, file_contents);

File diff suppressed because it is too large Load Diff

View File

@ -37,12 +37,18 @@ enum CompressionMethod
extern uintptr_t comp_alloc(ContextType type, CompressionMethod method);
/**
* @return an upper bound on the output size for the given amount of input.
* this is used when allocating a single buffer for the whole operation.
**/
extern size_t comp_max_output_size(uintptr_t ctx, size_t inSize);
// set output buffer. all subsequent comp_feed() calls will write into it.
// should only be called once (*) due to the comp_finish() interface - since
// that allows querying the output buffer, it must not be fragmented.
// * the previous output buffer is wiped out by comp_reset, so
// setting it again (once!) after that is allowed and required.
extern void comp_set_output(uintptr_t ctx, void* out, size_t out_size);
extern void comp_set_output(uintptr_t ctx, u8* out, size_t out_size);
// [compression contexts only:] allocate an output buffer big enough to
// hold worst_case_compression_ratio*in_size bytes.
@ -56,10 +62,6 @@ extern void comp_set_output(uintptr_t ctx, void* out, size_t out_size);
// comp_reset. this reduces malloc/free calls.
extern LibError comp_alloc_output(uintptr_t ctx, size_t in_size);
// get current position in output buffer.
// precondition: valid calls to EITHER comp_alloc_output OR comp_set_output.
extern void* comp_get_output(uintptr_t ctx);
// 'feed' the given buffer to the compressor/decompressor.
// returns number of output bytes produced (*), or a negative LibError code.
// * 0 is a legitimate return value - this happens if the input buffer is
@ -67,17 +69,17 @@ extern void* comp_get_output(uintptr_t ctx);
// note: the buffer may be overwritten or freed immediately after - we take
// care of copying and queuing any data that remains (e.g. due to
// lack of output buffer space).
extern ssize_t comp_feed(uintptr_t ctx, const void* in, size_t in_size);
extern ssize_t comp_feed(uintptr_t ctx, const u8* in, size_t in_size);
// feed any remaining queued input data, finish the compress/decompress and
// pass back the output buffer.
extern LibError comp_finish(uintptr_t ctx, void** out, size_t* out_size);
extern LibError comp_finish(uintptr_t ctx, u8** out, size_t* out_size, u32* checksum);
// prepare this context for reuse. the effect is similar to freeing this
// context and creating another.
// rationale: this API avoids reallocating a considerable amount of
// memory (ballbark 200KB LZ window plus output buffer).
extern LibError comp_reset(uintptr_t ctx);
extern void comp_reset(uintptr_t ctx);
// free this context and all associated memory.
extern void comp_free(uintptr_t ctx);

View File

@ -344,7 +344,7 @@ struct PosixFile
int fd;
// for reference counted memory-mapping
void* mapping;
u8* mapping;
uint map_refs;
};
cassert(sizeof(PosixFile) < FILE_OPAQUE_SIZE);
@ -498,7 +498,7 @@ static const uint MAX_MAP_REFS = 255;
// rationale: reference counting is required for zip_map: several
// Zip "mappings" each reference one ZArchive's actual file mapping.
// implement it here so that we also get refcounting for normal files.
LibError file_map(File* f, void*& p, size_t& size)
LibError file_map(File* f, u8*& p, size_t& size)
{
p = 0;
size = 0;
@ -526,7 +526,7 @@ LibError file_map(File* f, void*& p, size_t& size)
return ERR::FAIL; // NOWARN
errno = 0;
pf->mapping = mmap(0, f->size, prot, MAP_PRIVATE, pf->fd, (off_t)0);
pf->mapping = (u8*)mmap(0, f->size, prot, MAP_PRIVATE, pf->fd, (off_t)0);
if(pf->mapping == MAP_FAILED)
return LibError_from_errno();
@ -559,7 +559,7 @@ LibError file_unmap(File* f)
return INFO::OK;
// no more references: remove the mapping
void* p = pf->mapping;
u8* p = pf->mapping;
pf->mapping = 0;
// don't clear f->size - the file is still open.

View File

@ -341,7 +341,7 @@ extern LibError file_cache_invalidate(const char* fn);
// rationale: reference counting is required for zip_map: several
// Zip "mappings" each reference one ZArchive's actual file mapping.
// implement it here so that we also get refcounting for normal files.
extern LibError file_map(File* f, void*& p, size_t& size);
extern LibError file_map(File* f, u8*& p, size_t& size);
// decrement the reference count for the mapping belonging to file <f>.
// fail if there are no references; remove the mapping if the count reaches 0.

View File

@ -341,7 +341,7 @@ success:
// rationale: don't call this "free" because that would run afoul of the
// memory tracker's redirection macro and require #include "lib/nommgr.h".
void dealloc(u8* p, size_t size)
void dealloc(void* p, size_t size)
{
#ifndef NDEBUG
alloc_checker.notify_free(p, size);
@ -349,7 +349,7 @@ success:
const size_t size_pa = round_up(size, BUF_ALIGN);
// make sure entire (aligned!) range is within pool.
if(!pool_contains(&pool, p) || !pool_contains(&pool, p+size_pa-1))
if(!pool_contains(&pool, p) || !pool_contains(&pool, (u8*)p+size_pa-1))
{
debug_warn("invalid pointer");
return;
@ -371,7 +371,7 @@ success:
// write access is restored when buffer is freed.
//
// p and size are the exact (non-padded) values as in dealloc.
void make_read_only(u8* p, size_t size)
void make_read_only(void* p, size_t size)
{
// bail to avoid mprotect failing
if(!size)
@ -449,7 +449,7 @@ private:
// notes:
// - correctly deals with p lying at start/end of pool.
// - p and size_pa are trusted: [p, p+size_pa) lies within the pool.
void coalesce_and_free(u8* p, size_t size_pa)
void coalesce_and_free(void* p, size_t size_pa)
{
// CAVEAT: Header and Footer are wiped out by freelist_remove -
// must use them before that.
@ -458,10 +458,10 @@ private:
// (unless p is at start of pool region)
if(p != pool.da.base)
{
const Footer* footer = (const Footer*)(p-sizeof(Footer));
const Footer* footer = (const Footer*)((u8*)p-sizeof(Footer));
if(is_valid_tag(FOOTER_ID, footer->id, footer->magic, footer->size_pa))
{
p -= footer->size_pa;
(u8*&)p -= footer->size_pa;
size_pa += footer->size_pa;
Header* header = (Header*)p;
freelist_remove(header);
@ -471,7 +471,7 @@ private:
// expand size_pa to include following memory if it was allocated
// and is currently free.
// (unless it starts beyond end of currently committed region)
Header* header = (Header*)(p+size_pa);
Header* header = (Header*)((u8*)p+size_pa);
if((u8*)header < pool.da.base+pool.da.cur_size)
{
if(is_valid_tag(HEADER_ID, header->id, header->magic, header->size_pa))
@ -509,7 +509,7 @@ private:
return (x & -(int)x);
}
void freelist_add(u8* p, size_t size_pa)
void freelist_add(void* p, size_t size_pa)
{
debug_assert((uintptr_t)p % BUF_ALIGN == 0);
debug_assert(size_pa % BUF_ALIGN == 0);
@ -521,7 +521,7 @@ private:
header->id = HEADER_ID;
header->magic = MAGIC;
header->size_pa = size_pa;
Footer* footer = (Footer*)(p+size_pa-sizeof(Footer));
Footer* footer = (Footer*)((u8*)p+size_pa-sizeof(Footer));
footer->id = FOOTER_ID;
footer->magic = MAGIC;
footer->size_pa = size_pa;
@ -571,13 +571,13 @@ private:
{
if(cur->size_pa >= size_pa)
{
u8* p = (u8*)cur;
void* p = cur;
const size_t remnant_pa = cur->size_pa - size_pa;
freelist_remove(cur);
if(remnant_pa)
freelist_add(p+size_pa, remnant_pa);
freelist_add((u8*)p+size_pa, remnant_pa);
return p;
}
@ -865,7 +865,7 @@ private:
// cannot be used.
bool matches(const ExtantBuf& eb, FileIOBuf buf) const
{
return (eb.buf <= buf && buf < (u8*)eb.buf+eb.size);
return (eb.buf <= buf && buf < eb.buf+eb.size);
}
uint epoch;
@ -966,7 +966,7 @@ static void free_padded_buf(FileIOBuf padded_buf, size_t size, bool from_heap =
static void cache_free(FileIOBuf exact_buf, size_t exact_size)
{
cache_allocator.dealloc((u8*)exact_buf, exact_size);
cache_allocator.dealloc((void*)exact_buf, exact_size);
}
static FileIOBuf cache_alloc(size_t size)
@ -1112,7 +1112,7 @@ free_immediately:
void file_buf_add_padding(FileIOBuf exact_buf, size_t exact_size, size_t padding)
{
debug_assert(padding < FILE_BLOCK_SIZE);
FileIOBuf padded_buf = (FileIOBuf)((u8*)exact_buf + padding);
FileIOBuf padded_buf = exact_buf + padding;
exact_buf_oracle.add(exact_buf, exact_size, padded_buf);
}
@ -1169,7 +1169,7 @@ LibError file_cache_add(FileIOBuf buf, size_t size, const char* atom_fn,
ExactBufOracle::BufAndSize bas = exact_buf_oracle.get(buf, size);
FileIOBuf exact_buf = bas.first; size_t exact_size = bas.second;
cache_allocator.make_read_only((u8*)exact_buf, exact_size);
cache_allocator.make_read_only((void*)exact_buf, exact_size);
file_cache.add(atom_fn, buf, size, cost);
@ -1285,7 +1285,7 @@ void* file_cache_allocator_alloc(size_t size)
{
return cache_allocator.alloc(size);
}
void file_cache_allocator_free(u8* p, size_t size)
void file_cache_allocator_free(void* p, size_t size)
{
return cache_allocator.dealloc(p, size);
}

View File

@ -101,7 +101,7 @@ extern void file_cache_shutdown();
// test access mechanism
extern void* file_cache_allocator_alloc(size_t size);
extern void file_cache_allocator_free(u8* p, size_t size);
extern void file_cache_allocator_free(void* p, size_t size);
extern void file_cache_allocator_reset();
#endif // #ifndef INCLUDED_FILE_CACHE

View File

@ -48,7 +48,7 @@ const size_t FILE_BLOCK_SIZE = 32*KiB;
// bytes_processed is 0 if return value != { INFO::OK, INFO::CB_CONTINUE }
// note: don't abort if = 0: zip callback may not actually
// output anything if passed very little data.
extern LibError file_io_call_back(const void* block, size_t size,
extern LibError file_io_call_back(const u8* block, size_t size,
FileIOCB cb, uintptr_t ctx, size_t& bytes_processed);

View File

@ -89,7 +89,7 @@ static AiocbAllocator aiocb_allocator;
// starts transferring to/from the given buffer.
// no attempt is made at aligning or padding the transfer.
LibError file_io_issue(File* f, off_t ofs, size_t size, void* p, FileIo* io)
LibError file_io_issue(File* f, off_t ofs, size_t size, u8* p, FileIo* io)
{
debug_printf("FILE| issue ofs=0x%X size=0x%X\n", ofs, size);
@ -163,7 +163,7 @@ int file_io_has_completed(FileIo* io)
}
LibError file_io_wait(FileIo* io, void*& p, size_t& size)
LibError file_io_wait(FileIo* io, u8*& p, size_t& size)
{
PosixFileIo* pio = (PosixFileIo*)io;
// debug_printf("FILE| wait io=%p\n", io);
@ -188,7 +188,7 @@ LibError file_io_wait(FileIo* io, void*& p, size_t& size)
// (see explanation in file_io_issue).
debug_assert(bytes_transferred >= (ssize_t)(cb->aio_nbytes-AIO_SECTOR_SIZE));
p = (void*)cb->aio_buf; // cast from volatile void*
p = (u8*)cb->aio_buf; // cast from volatile void*
size = bytes_transferred;
return INFO::OK;
}
@ -249,7 +249,7 @@ size_t file_sector_size;
// bytes_processed is 0 if return value != { INFO::OK, INFO::CB_CONTINUE }
// note: don't abort if = 0: zip callback may not actually
// output anything if passed very little data.
LibError file_io_call_back(const void* block, size_t size,
LibError file_io_call_back(const u8* block, size_t size,
FileIOCB cb, uintptr_t ctx, size_t& bytes_processed)
{
if(cb)
@ -376,17 +376,17 @@ class IOManager
lseek(fd, start_ofs, SEEK_SET);
// emulate temp buffers - we take care of allocating and freeing.
void* dst;
void* dst_mem = 0;
u8* dst;
u8* dst_mem = 0;
if(pbuf == FILE_BUF_TEMP)
{
dst_mem = malloc(size);
dst_mem = (u8*)malloc(size);
if(!dst_mem)
WARN_RETURN(ERR::NO_MEM);
dst = dst_mem;
}
else
dst = (void*)*pbuf;
dst = (u8*)*pbuf; // WARNING: FileIOBuf is nominally const; if that's ever enforced, this may need to change.
ssize_t total_transferred;
if(is_write)
@ -464,7 +464,7 @@ class IOManager
else
buf = (char*)*pbuf + total_issued;
LibError ret = file_io_issue(f, ofs, issue_size, buf, &slot.io);
LibError ret = file_io_issue(f, ofs, issue_size, (u8*)buf, &slot.io);
// transfer failed - loop will now terminate after
// waiting for all pending transfers to complete.
if(ret != INFO::OK)
@ -474,7 +474,7 @@ class IOManager
total_issued += issue_size;
}
void wait(IOSlot& slot, void*& block, size_t& block_size)
void wait(IOSlot& slot, u8*& block, size_t& block_size)
{
// get completed block address/size
if(slot.cached_block)
@ -520,7 +520,7 @@ class IOManager
total_transferred += block_size;
}
void process(IOSlot& slot, void* block, size_t block_size, FileIOCB cb, uintptr_t ctx)
void process(IOSlot& slot, u8* block, size_t block_size, FileIOCB cb, uintptr_t ctx)
{
if(err == INFO::CB_CONTINUE)
{
@ -563,7 +563,7 @@ again:
if(!queue.empty())
{
IOSlot& slot = queue.front();
void* block; size_t block_size;
u8* block; size_t block_size;
wait(slot, block, block_size);
process(slot, block, block_size, cb, cb_ctx);
queue.pop_front();

View File

@ -47,14 +47,14 @@ struct FileIo
//
// rationale: this interface is more convenient than implicitly advancing a
// file pointer because archive.cpp often accesses random offsets.
extern LibError file_io_issue(File* f, off_t ofs, size_t size, void* buf, FileIo* io);
extern LibError file_io_issue(File* f, off_t ofs, size_t size, u8* buf, FileIo* io);
// indicates if the given IO has completed.
// return value: 0 if pending, 1 if complete, < 0 on error.
extern int file_io_has_completed(FileIo* io);
// wait for the given IO to complete. passes back its buffer and size.
extern LibError file_io_wait(FileIo* io, void*& p, size_t& size);
extern LibError file_io_wait(FileIo* io, u8*& p, size_t& size);
// indicates the IO's buffer is no longer needed and frees that memory.
extern LibError file_io_discard(FileIo* io);
@ -79,7 +79,7 @@ extern size_t file_sector_size;
// for the entire IO. we do not split into fake blocks because it is
// advantageous (e.g. for decompressors) to have all data at once, if available
// anyway.
typedef LibError (*FileIOCB)(uintptr_t ctx, const void* block, size_t size, size_t* bytes_processed);
typedef LibError (*FileIOCB)(uintptr_t ctx, const u8* block, size_t size, size_t* bytes_processed);
typedef const u8* FileIOBuf;

View File

@ -176,7 +176,7 @@ LibError dir_filtered_next_ent(DirIterator* di, DirEnt* ent, const char* filter)
//
// note: EnumDirEntsCB path and ent are only valid during the callback.
LibError vfs_dir_enum(const char* start_path, uint flags, const char* user_filter,
DirEnumCB cb, void* context)
DirEnumCB cb, uintptr_t cbData)
{
debug_assert((flags & ~(VFS_DIR_RECURSIVE)) == 0);
const bool recursive = (flags & VFS_DIR_RECURSIVE) != 0;
@ -236,10 +236,10 @@ LibError vfs_dir_enum(const char* start_path, uint flags, const char* user_filte
dir_queue.push(atom_path);
if(user_filter_wants_dirs)
cb(atom_path, &ent, context);
cb(atom_path, &ent, cbData);
}
else
cb(atom_path, &ent, context);
cb(atom_path, &ent, cbData);
}
vfs_dir_close(hdir);

View File

@ -15,17 +15,19 @@ public:
for(size_t i = 0; i < data_size; i++)
data[i] = rand() & 0x07;
void* cdata; size_t csize;
u8* cdata; size_t csize;
u8 ucdata[data_size];
// compress
uintptr_t c = comp_alloc(CT_COMPRESSION, CM_DEFLATE);
{
TS_ASSERT(c != 0);
TS_ASSERT_OK(comp_alloc_output(c, data_size));
const size_t csizeBound = comp_max_output_size(c, data_size);
TS_ASSERT_OK(comp_alloc_output(c, csizeBound));
const ssize_t cdata_produced = comp_feed(c, data, data_size);
TS_ASSERT(cdata_produced >= 0);
TS_ASSERT_OK(comp_finish(c, &cdata, &csize));
u32 checksum;
TS_ASSERT_OK(comp_finish(c, &cdata, &csize, &checksum));
TS_ASSERT(cdata_produced <= (ssize_t)csize); // can't have produced more than total
}
@ -36,8 +38,8 @@ public:
comp_set_output(d, ucdata, data_size);
const ssize_t ucdata_produced = comp_feed(d, cdata, csize);
TS_ASSERT(ucdata_produced >= 0);
void* ucdata_final; size_t ucsize_final;
TS_ASSERT_OK(comp_finish(d, &ucdata_final, &ucsize_final));
u8* ucdata_final; size_t ucsize_final; u32 checksum;
TS_ASSERT_OK(comp_finish(d, &ucdata_final, &ucsize_final, &checksum));
TS_ASSERT(ucdata_produced <= (ssize_t)ucsize_final); // can't have produced more than total
TS_ASSERT_EQUALS(ucdata_final, ucdata); // output buffer address is same
TS_ASSERT_EQUALS(ucsize_final, data_size); // correct amount of output

View File

@ -34,7 +34,7 @@ public:
AllocMap::iterator it = allocations.begin();
for(; chosen_idx != 0; chosen_idx--)
++it;
file_cache_allocator_free((u8*)it->first, it->second);
file_cache_allocator_free(it->first, it->second);
allocations.erase(it);
}

View File

@ -443,7 +443,7 @@ LibError vfs_load(const char* V_fn, FileIOBuf& buf, size_t& size,
// caveat: pads file to next max(4kb, sector_size) boundary
// (due to limitation of Win32 FILE_FLAG_NO_BUFFERING I/O).
// if that's a problem, specify FILE_NO_AIO when opening.
ssize_t vfs_store(const char* V_fn, const void* p, const size_t size, uint flags /* default 0 */)
ssize_t vfs_store(const char* V_fn, const u8* p, const size_t size, uint flags /* default 0 */)
{
Handle hf = vfs_open(V_fn, flags|FILE_WRITE);
H_DEREF(hf, VFile, vf);
@ -466,7 +466,7 @@ struct VIo
{
Handle hf;
size_t size;
void* buf;
u8* buf;
FileIo io;
};
@ -477,7 +477,7 @@ static void VIo_init(VIo* vio, va_list args)
{
vio->hf = va_arg(args, Handle);
vio->size = va_arg(args, size_t);
vio->buf = va_arg(args, void*);
vio->buf = va_arg(args, u8*);
}
static void VIo_dtor(VIo* vio)
@ -495,7 +495,7 @@ static void VIo_dtor(VIo* vio)
static LibError VIo_reload(VIo* vio, const char* UNUSED(fn), Handle UNUSED(h))
{
size_t size = vio->size;
void* buf = vio->buf;
u8* buf = vio->buf;
H_DEREF(vio->hf, VFile, vf);
off_t ofs = vf->ofs;
@ -524,7 +524,7 @@ static LibError VIo_to_string(const VIo* vio, char* buf)
// begin transferring <size> bytes, starting at <ofs>. get result
// with vfs_io_wait; when no longer needed, free via vfs_io_discard.
Handle vfs_io_issue(Handle hf, size_t size, void* buf)
Handle vfs_io_issue(Handle hf, size_t size, u8* buf)
{
const char* fn = 0;
uint flags = 0;
@ -550,7 +550,7 @@ int vfs_io_has_completed(Handle hio)
// wait until the transfer <hio> completes, and return its buffer.
// output parameters are zeroed on error.
LibError vfs_io_wait(Handle hio, void*& p, size_t& size)
LibError vfs_io_wait(Handle hio, u8*& p, size_t& size)
{
H_DEREF(hio, VIo, vio);
return xfile_io_wait(&vio->io, p, size);
@ -571,7 +571,7 @@ LibError vfs_io_wait(Handle hio, void*& p, size_t& size)
// the mapping will be removed (if still open) when its file is closed.
// however, map/unmap calls should still be paired so that the mapping
// may be removed when no longer needed.
LibError vfs_map(const Handle hf, const uint UNUSED(flags), void*& p, size_t& size)
LibError vfs_map(const Handle hf, const uint UNUSED(flags), u8*& p, size_t& size)
{
p = 0;
size = 0;

View File

@ -314,7 +314,7 @@ extern LibError vfs_dir_next_ent(Handle hd, DirEnt* ent, const char* filter = 0)
// its subdirectories as well), passing their complete path+name, the info
// that would be returned by vfs_next_dirent, and user-specified context.
// note: path and ent parameters are only valid during the callback.
typedef void (*DirEnumCB)(const char* path, const DirEnt* ent, void* context);
typedef void (*DirEnumCB)(const char* path, const DirEnt* ent, uintptr_t cbData);
enum DirEnumFlags
{
@ -325,7 +325,7 @@ enum DirEnumFlags
// directory <path>; if flags & VFS_DIR_RECURSIVE, entries in
// subdirectories are also returned.
extern LibError vfs_dir_enum(const char* path, uint enum_flags, const char* filter,
DirEnumCB cb, void* context);
DirEnumCB cb, uintptr_t cbData);
//
@ -363,7 +363,7 @@ extern LibError vfs_close(Handle& h);
// begin transferring <size> bytes, starting at <ofs>. get result
// with vfs_wait_read; when no longer needed, free via vfs_io_discard.
extern Handle vfs_io_issue(Handle hf, size_t size, void* buf);
extern Handle vfs_io_issue(Handle hf, size_t size, u8* buf);
// indicates if the given IO has completed.
// return value: 0 if pending, 1 if complete, < 0 on error.
@ -371,7 +371,7 @@ extern int vfs_io_has_completed(Handle hio);
// wait until the transfer <hio> completes, and return its buffer.
// output parameters are zeroed on error.
extern LibError vfs_io_wait(Handle hio, void*& p, size_t& size);
extern LibError vfs_io_wait(Handle hio, u8*& p, size_t& size);
// finished with transfer <hio> - free its buffer (returned by vfs_wait_read).
extern LibError vfs_io_discard(Handle& hio);
@ -414,7 +414,7 @@ extern LibError vfs_load(const char* V_fn, FileIOBuf& buf, size_t& size,
uint flags = 0, FileIOCB cb = 0, uintptr_t cb_ctx = 0);
extern ssize_t vfs_store(const char* fn, const void* p, size_t size, uint flags = 0);
extern ssize_t vfs_store(const char* fn, const u8* p, size_t size, uint flags = 0);
//
@ -435,7 +435,7 @@ extern ssize_t vfs_store(const char* fn, const void* p, size_t size, uint flags
// the mapping will be removed (if still open) when its file is closed.
// however, map/unmap calls should still be paired so that the mapping
// may be removed when no longer needed.
extern LibError vfs_map(Handle hf, uint flags, void*& p, size_t& size);
extern LibError vfs_map(Handle hf, uint flags, u8*& p, size_t& size);
// decrement the reference count for the mapping belonging to file <f>.
// fail if there are no references; remove the mapping if the count reaches 0.

View File

@ -125,9 +125,9 @@ static IdMgr id_mgr;
// optimizations like reading from vfs_tree container directly.
class FileGatherer
{
static void EntCb(const char* path, const DirEnt* ent, void* context)
static void EntCb(const char* path, const DirEnt* ent, uintptr_t cbData)
{
FileNodes* file_nodes = (FileNodes*)context;
FileNodes* file_nodes = (FileNodes*)cbData;
// we only want files
if(DIRENT_IS_DIR(ent))
@ -148,7 +148,7 @@ public:
// TODO: only add entries from mount points that have
// VFS_MOUNT_ARCHIVE flag set (avoids adding screenshots etc.)
vfs_dir_enum("", VFS_DIR_RECURSIVE, 0, EntCb, &file_nodes);
vfs_dir_enum("", VFS_DIR_RECURSIVE, 0, EntCb, (uintptr_t)&file_nodes);
// MAX_IDS is a rather large limit on number of files, but must not
// be exceeded (otherwise FileId overflows).

View File

@ -180,7 +180,7 @@ LibError xfile_validate(const File* f)
// IO
//
LibError xfile_io_issue(File* f, off_t ofs, size_t size, void* buf, FileIo* io)
LibError xfile_io_issue(File* f, off_t ofs, size_t size, u8* buf, FileIo* io)
{
io->type = f->type;
CHECK_VTBL(io->type);
@ -193,7 +193,7 @@ int xfile_io_has_completed(FileIo* io)
return io->type->io_has_completed(io);
}
LibError xfile_io_wait(FileIo* io, void*& p, size_t& size)
LibError xfile_io_wait(FileIo* io, u8*& p, size_t& size)
{
CHECK_VTBL(io->type);
return io->type->io_wait(io, p, size);
@ -226,7 +226,7 @@ ssize_t xfile_io(File* f, off_t ofs, size_t size, FileIOBuf* pbuf, FileIOCB cb,
// file mapping
//
LibError xfile_map(File* f, void*& p, size_t& size)
LibError xfile_map(File* f, u8*& p, size_t& size)
{
CHECK_VTBL(f->type);
return f->type->map(f, p, size);

View File

@ -37,15 +37,15 @@ struct FileProvider_VTbl
LibError (*file_validate)(const File* f);
// IO
LibError (*io_issue)(File* f, off_t ofs, size_t size, void* buf, FileIo* io);
LibError (*io_issue)(File* f, off_t ofs, size_t size, u8* buf, FileIo* io);
int (*io_has_completed)(FileIo* io);
LibError (*io_wait)(FileIo* io, void*& p, size_t& size);
LibError (*io_wait)(FileIo* io, u8*& p, size_t& size);
LibError (*io_discard)(FileIo* io);
LibError (*io_validate)(const FileIo* io);
ssize_t (*io)(File* f, off_t ofs, size_t size, FileIOBuf* pbuf, FileIOCB cb, uintptr_t ctx);
// file mapping
LibError (*map)(File* f, void*& p, size_t& size);
LibError (*map)(File* f, u8*& p, size_t& size);
LibError (*unmap)(File* f);
};
@ -59,14 +59,14 @@ extern LibError xfile_open(const char* V_path, uint flags, TFile* tf, File* f);
extern LibError xfile_close(File* f);
extern LibError xfile_validate(const File* f);
extern LibError xfile_io_issue(File* f, off_t ofs, size_t size, void* buf, FileIo* io);
extern LibError xfile_io_issue(File* f, off_t ofs, size_t size, u8* buf, FileIo* io);
extern int xfile_io_has_completed(FileIo* io);
extern LibError xfile_io_wait(FileIo* io, void*& p, size_t& size);
extern LibError xfile_io_wait(FileIo* io, u8*& p, size_t& size);
extern LibError xfile_io_discard(FileIo* io);
extern LibError xfile_io_validate(const FileIo* io);
extern ssize_t xfile_io(File* f, off_t ofs, size_t size, FileIOBuf* pbuf, FileIOCB cb, uintptr_t ctx);
extern LibError xfile_map(File* f, void*& p, size_t& size);
extern LibError xfile_map(File* f, u8*& p, size_t& size);
extern LibError xfile_unmap(File* f);
#endif // #ifndef INCLUDED_VFS_REDIRECTOR

View File

@ -392,9 +392,9 @@ struct LookupCbParams : boost::noncopyable
}
};
static LibError lookup_cb(const char* component, bool is_dir, void* ctx)
static LibError lookup_cb(const char* component, bool is_dir, uintptr_t cbData)
{
LookupCbParams* p = (LookupCbParams*)ctx;
LookupCbParams* p = (LookupCbParams*)cbData;
const TNodeType type = is_dir? NT_DIR : NT_FILE;
p->td->populate();
@ -424,7 +424,7 @@ static LibError lookup(TDir* td, const char* path, uint flags, TNode** pnode)
debug_assert( (flags & ~(LF_CREATE_MISSING|LF_START_DIR)) == 0 );
LookupCbParams p(flags, td);
RETURN_ERR(path_foreach_component(path, lookup_cb, &p));
RETURN_ERR(path_foreach_component(path, lookup_cb, (uintptr_t)&p));
// success.
*pnode = p.node;
@ -601,9 +601,9 @@ struct AddPathCbParams : boost::noncopyable
: m(m_), td(tree_root) {}
};
static LibError add_path_cb(const char* component, bool is_dir, void* ctx)
static LibError add_path_cb(const char* component, bool is_dir, uintptr_t cbData)
{
AddPathCbParams* p = (AddPathCbParams*)ctx;
AddPathCbParams* p = (AddPathCbParams*)cbData;
// should only be called for directory paths, so complain if not dir.
if(!is_dir)
@ -626,7 +626,7 @@ LibError tree_add_path(const char* V_dir_path, const Mount* m, TDir** ptd)
debug_assert(VFS_PATH_IS_DIR(V_dir_path));
AddPathCbParams p(m);
RETURN_ERR(path_foreach_component(V_dir_path, add_path_cb, &p));
RETURN_ERR(path_foreach_component(V_dir_path, add_path_cb, (uintptr_t)&p));
*ptd = p.td;
return INFO::OK;
}

View File

@ -297,7 +297,7 @@ static void ecdr_assemble(ECDR* dst_ecdr_le, uint cd_entries, off_t cd_ofs, size
// the given ID (fourcc). <record_size> includes ID field) bytes must
// remain before EOF - this makes sure the record is completely in the file.
// used by z_find_ecdr and z_extract_cdfh.
static const u8* za_find_id(const u8* buf, size_t size, const void* start, u32 magic, size_t record_size)
static const u8* za_find_id(const u8* buf, size_t size, const u8* start, u32 magic, size_t record_size)
{
ssize_t bytes_left = (ssize_t)((buf+size) - (u8*)start - record_size);
@ -486,7 +486,7 @@ struct LFH_Copier
size_t lfh_bytes_remaining;
};
static LibError lfh_copier_cb(uintptr_t ctx, const void* block, size_t size, size_t* bytes_processed)
static LibError lfh_copier_cb(uintptr_t ctx, const u8* block, size_t size, size_t* bytes_processed)
{
LFH_Copier* p = (LFH_Copier*)ctx;
@ -580,7 +580,7 @@ LibError zip_archive_create(const char* zip_filename, ZipArchive** pza)
// can be CM_NONE.
// IO cost: writes out <file_contents> to disk (we don't currently attempt
// any sort of write-buffering).
LibError zip_archive_add_file(ZipArchive* za, const ArchiveEntry* ae, void* file_contents)
LibError zip_archive_add_file(ZipArchive* za, const ArchiveEntry* ae, const u8* file_contents)
{
const size_t fn_len = strlen(ae->atom_fn);

View File

@ -51,7 +51,7 @@ extern LibError zip_archive_create(const char* zip_filename, ZipArchive** pza);
// can be CM_NONE.
// IO cost: writes out <file_contents> to disk (we don't currently attempt
// any sort of write-buffering).
extern LibError zip_archive_add_file(ZipArchive* za, const ArchiveEntry* ae, void* file_contents);
extern LibError zip_archive_add_file(ZipArchive* za, const ArchiveEntry* ae, const u8* file_contents);
// write out the archive to disk; only hereafter is it valid.
// frees the ZipArchive instance.

View File

@ -719,21 +719,21 @@ static const int TOTAL_IOS = MAX_STREAMS * MAX_IOS;
static const size_t TOTAL_BUF_SIZE = TOTAL_IOS * STREAM_BUF_SIZE;
// one large allocation for all buffers
static void* io_bufs;
static u8* io_bufs;
// list of free buffers. start of buffer holds pointer to next in list.
static void* io_buf_freelist;
static u8* io_buf_freelist;
/**
* Free an IO buffer.
*
* @param void* IO buffer
* @param p IO buffer
*/
static void io_buf_free(void* p)
static void io_buf_free(u8* p)
{
debug_assert(io_bufs <= p && p <= (char*)io_bufs+TOTAL_BUF_SIZE);
*(void**)p = io_buf_freelist;
debug_assert(io_bufs <= p && p <= io_bufs+TOTAL_BUF_SIZE);
*(u8**)p = io_buf_freelist;
io_buf_freelist = p;
}
@ -745,13 +745,13 @@ static void io_buf_free(void* p)
static void io_buf_init()
{
// allocate 1 big aligned block for all buffers.
io_bufs = mem_alloc(TOTAL_BUF_SIZE, 4*KiB);
io_bufs = (u8*)mem_alloc(TOTAL_BUF_SIZE, 4*KiB);
// .. failed; io_buf_alloc calls will return 0
if(!io_bufs)
return;
// build freelist.
char * p = (char*)io_bufs;
u8* p = io_bufs;
for(int i = 0; i < TOTAL_IOS; i++)
{
io_buf_free(p);
@ -763,13 +763,13 @@ static void io_buf_init()
/**
* Allocate a fixed-size IO buffer.
*
* @return void* buffer, or 0 (and warning) if not enough memory.
* @return buffer, or 0 (and warning) if not enough memory.
*/
static void* io_buf_alloc()
static u8* io_buf_alloc()
{
ONCE(io_buf_init());
void* buf = io_buf_freelist;
u8* buf = io_buf_freelist;
// note: we have to bail now; can't update io_buf_freelist.
if(!buf)
{
@ -784,7 +784,7 @@ static void* io_buf_alloc()
return 0;
}
io_buf_freelist = *(void**)io_buf_freelist;
io_buf_freelist = *(u8**)io_buf_freelist;
return buf;
}
@ -817,8 +817,7 @@ struct Stream
Handle ios[MAX_IOS];
uint active_ios;
/// set by stream_buf_get, used by stream_buf_discard to free buf.
void* last_buf;
u8* last_buf;
};
/**
@ -833,7 +832,7 @@ static LibError stream_issue(Stream * s)
if(s->active_ios >= MAX_IOS)
return INFO::OK;
void* buf = io_buf_alloc();
u8* buf = io_buf_alloc();
if(!buf)
WARN_RETURN(ERR::NO_MEM);
@ -853,7 +852,7 @@ static LibError stream_issue(Stream * s)
* @return LibError; if the first pending IO hasn't completed,
* ERR::AGAIN (not an error).
*/
static LibError stream_buf_get(Stream * s, void*& data, size_t& size)
static LibError stream_buf_get(Stream * s, u8*& data, size_t& size)
{
if(s->active_ios == 0)
WARN_RETURN(ERR::IO_EOF);
@ -947,7 +946,7 @@ static LibError stream_close(Stream * s)
for(uint i = 0; i < s->active_ios; i++)
{
// .. wait until complete,
void* data; size_t size; // unused
u8* data; size_t size; // unused
do
err = stream_buf_get(s, data, size);
while(err == ERR::AGAIN);
@ -1322,8 +1321,7 @@ static LibError snd_data_buf_get(Handle hsd, ALuint& al_buf)
// stream:
// .. check if IO finished.
void* data;
size_t size;
u8* data; size_t size;
err = stream_buf_get(&sd->s, data, size);
if(err == ERR::AGAIN)
return ERR::AGAIN; // NOWARN

View File

@ -752,7 +752,7 @@ void CConsole::SaveHistory()
break;
buffer = CStrW(*it).ToUTF8() + "\n" + buffer;
}
vfs_store(m_sHistoryFile, (const void*)buffer.c_str(), buffer.length(), FILE_NO_AIO);
vfs_store(m_sHistoryFile, (const u8*)buffer.c_str(), buffer.length(), FILE_NO_AIO);
}
void CConsole::SendChatMessage(const wchar_t *szMessage)

View File

@ -35,7 +35,7 @@ PSRETURN CVFSFile::Load(const char* filename, uint flags /* = 0 */)
return PSRETURN_OK;
}
const void* CVFSFile::GetBuffer() const
const u8* CVFSFile::GetBuffer() const
{
// Die in a very obvious way, to avoid subtle problems caused by
// accidentally forgetting to check that the open succeeded

View File

@ -23,7 +23,7 @@ public:
PSRETURN Load(const char* filename, uint flags = 0);
// These die if called when no file has been successfully loaded.
const void* GetBuffer() const;
const u8* GetBuffer() const;
size_t GetBufferSize() const;
CStr GetAsString() const;

View File

@ -954,9 +954,9 @@ void CMouseoverEntities::RenderRallyPoints()
glDisable( GL_BLEND );
}
// Helper function for CSelectedEntities::LoadUnitUiTextures
static void LoadUnitUIThunk( const char* path, const DirEnt* UNUSED(ent), void* context )
static void LoadUnitUIThunk( const char* path, const DirEnt* UNUSED(ent), uintptr_t cbData )
{
std::map<CStr, Handle>* textures = (std::map<CStr, Handle>*) context;
std::map<CStr, Handle>* textures = (std::map<CStr, Handle>*)cbData;
CStr name(path);
if ( !tex_is_known_extension(path) )
@ -974,7 +974,7 @@ static void LoadUnitUIThunk( const char* path, const DirEnt* UNUSED(ent), void*
int CSelectedEntities::LoadUnitUiTextures()
{
THROW_ERR( vfs_dir_enum( "art/textures/ui/session/icons/", VFS_DIR_RECURSIVE,
NULL, LoadUnitUIThunk, &m_unitUITextures ) );
NULL, LoadUnitUIThunk, (uintptr_t)&m_unitUITextures ) );
return 0;
}
void CSelectedEntities::DestroyUnitUiTextures()

View File

@ -327,7 +327,7 @@ PSRETURN CXeromyces::Load(const char* filename)
handler.CreateXMB();
// Save the file to disk, so it can be loaded quickly next time
vfs_store(xmbPath, handler.buffer.buffer, handler.buffer.length, FILE_NO_AIO);
vfs_store(xmbPath, (const u8*)handler.buffer.buffer, handler.buffer.length, FILE_NO_AIO);
// Store the buffer so it can be freed later
XMBBuffer = handler.buffer.steal_buffer();
@ -348,7 +348,7 @@ bool CXeromyces::ReadXMBFile(const char* filename)
if (file->Load(filename, FILE_LONG_LIVED) != PSRETURN_OK)
return false;
const void* buffer = file->GetBuffer();
const u8* buffer = file->GetBuffer();
debug_assert(file->GetBufferSize() >= 42 && "Invalid XMB file"); // 42 bytes is the smallest possible XMB. (Well, maybe not quite, but it's a nice number.)
debug_assert(*(u32*)buffer == HeaderMagic && "Invalid XMB file header");

View File

@ -50,9 +50,9 @@ struct BuildDirEntListState
};
// called for each matching directory entry; add its full pathname to array.
static void BuildDirEntListCB(const char* path, const DirEnt* UNUSED(ent), void* context)
static void BuildDirEntListCB(const char* path, const DirEnt* UNUSED(ent), uintptr_t cbData)
{
BuildDirEntListState* s = (BuildDirEntListState*)context;
BuildDirEntListState* s = (BuildDirEntListState*)cbData;
jsval val = ToJSVal( CStr ( path ) );
// note: <path> is already directory + name!
@ -105,7 +105,7 @@ JSBool JSI_VFS::BuildDirEntList( JSContext* cx, JSObject* UNUSED(obj), uintN arg
// build array in the callback function
BuildDirEntListState state(cx);
vfs_dir_enum( path, flags, filter, BuildDirEntListCB, &state );
vfs_dir_enum( path, flags, filter, BuildDirEntListCB, (uintptr_t)&state );
*rval = OBJECT_TO_JSVAL( state.filename_array );
return( JS_TRUE );

View File

@ -23,9 +23,9 @@ void CEntityTemplateCollection::LoadFile( const char* path )
m_templateFilenames[tag] = path;
}
static void LoadFileThunk( const char* path, const DirEnt* UNUSED(ent), void* context )
static void LoadFileThunk( const char* path, const DirEnt* UNUSED(ent), uintptr_t cbData )
{
CEntityTemplateCollection* this_ = (CEntityTemplateCollection*)context;
CEntityTemplateCollection* this_ = (CEntityTemplateCollection*)cbData;
this_->LoadFile(path);
}
@ -33,7 +33,7 @@ int CEntityTemplateCollection::LoadTemplates()
{
// List all files in entities/ and its subdirectories.
THROW_ERR( vfs_dir_enum( "entities/", VFS_DIR_RECURSIVE, "*.xml",
LoadFileThunk, this ) );
LoadFileThunk, (uintptr_t)this ) );
/*// Load all the templates; this is necessary so that we can apply techs to them
// (otherwise a tech can't affect the template of a unit that doesn't yet exist)

View File

@ -21,9 +21,9 @@ void CFormationCollection::LoadFile( const char* path )
m_templateFilenames[tag] = path;
}
static void LoadFormationThunk( const char* path, const DirEnt* UNUSED(ent), void* context )
static void LoadFormationThunk( const char* path, const DirEnt* UNUSED(ent), uintptr_t cbData )
{
CFormationCollection* this_ = (CFormationCollection*)context;
CFormationCollection* this_ = (CFormationCollection*)cbData;
this_->LoadFile(path);
}
@ -31,7 +31,7 @@ int CFormationCollection::LoadTemplates()
{
// Load all files in formations and subdirectories.
THROW_ERR( vfs_dir_enum( "formations", VFS_DIR_RECURSIVE, "*.xml",
LoadFormationThunk, this ) );
LoadFormationThunk, (uintptr_t)this ) );
return 0;
}

View File

@ -14,9 +14,9 @@ void CTechnologyCollection::LoadFile( const char* path )
m_techFilenames[tag] = path;
}
static void LoadTechThunk( const char* path, const DirEnt* UNUSED(ent), void* context )
static void LoadTechThunk( const char* path, const DirEnt* UNUSED(ent), uintptr_t cbData )
{
CTechnologyCollection* this_ = (CTechnologyCollection*)context;
CTechnologyCollection* this_ = (CTechnologyCollection*)cbData;
this_->LoadFile(path);
}
@ -24,7 +24,7 @@ int CTechnologyCollection::LoadTechnologies()
{
// Load all files in techs/ and subdirectories.
THROW_ERR( vfs_dir_enum( "technologies/", VFS_DIR_RECURSIVE, "*.xml",
LoadTechThunk, this ) );
LoadTechThunk, (uintptr_t)this ) );
return 0;
}