1
0
forked from 0ad/0ad

archive: make decompressor more robust.

archive_builder: move logic into a Compressor functor. avoid direct
dependency on ZLib
compression: add provision for calculating checksum

Xeromyces: remove no longer needed zlib.h

This was SVN commit r5388.
This commit is contained in:
janwas 2007-10-03 11:57:11 +00:00
parent 7421fa79ba
commit 62568fda28
7 changed files with 259 additions and 155 deletions

View File

@ -247,6 +247,7 @@ struct ArchiveFile
off_t ofs; // in archive
off_t csize;
CompressionMethod method;
u32 checksum;
off_t last_cofs; // in compressed file
@ -350,6 +351,7 @@ LibError afile_open(const Handle ha, const char* fn, uintptr_t memento, uint fla
af->ofs = ent->ofs;
af->csize = ent->csize;
af->method = ent->method;
af->checksum = ent->checksum;
af->ha = ha;
af->ctx = ctx;
af->is_mapped = 0;
@ -515,9 +517,9 @@ class Decompressor
{
public:
Decompressor(uintptr_t ctx, FileIOBuf* pbuf, size_t usizeMax, FileIOCB cb, uintptr_t cbData)
: m_ctx(ctx)
, m_udataSize(usizeMax), m_csizeTotal(0), m_usizeTotal(0)
, m_cb(cb), m_cbData(cbData)
: m_ctx(ctx)
, m_udataSize(usizeMax), m_csizeTotal(0), m_usizeTotal(0)
, m_cb(cb), m_cbData(cbData)
{
debug_assert(m_ctx != 0);
@ -530,7 +532,7 @@ public:
m_udata = (u8*)*pbuf; // WARNING: FileIOBuf is nominally const; if that's ever enforced, this may need to change.
}
LibError operator()(const u8* cblock, size_t cblockSize, size_t* bytes_processed)
LibError Feed(const u8* cblock, size_t cblockSize, size_t* bytes_processed)
{
// when decompressing into the temp buffer, always start at ofs=0.
const size_t ofs = m_tmpBuf.get()? 0 : m_usizeTotal;
@ -552,6 +554,12 @@ public:
return ret;
}
LibError Finish(u32& checksum)
{
u8* out; size_t outSize; // unused
return comp_finish(m_ctx, &out, &outSize, &checksum);
}
size_t NumCompressedBytesProcessed() const
{
return m_csizeTotal;
@ -579,7 +587,7 @@ static LibError decompressor_feed_cb(uintptr_t cbData,
const u8* cblock, size_t cblockSize, size_t* bytes_processed)
{
Decompressor& decompressor = *(Decompressor*)cbData;
return decompressor(cblock, cblockSize, bytes_processed);
return decompressor.Feed(cblock, cblockSize, bytes_processed);
}
@ -595,7 +603,7 @@ static LibError decompressor_feed_cb(uintptr_t cbData,
// (quasi-parallel, without the complexity of threads).
//
// return bytes read, or a negative error code.
ssize_t afile_read(File* f, off_t ofs, size_t size, FileIOBuf* pbuf, FileIOCB cb, uintptr_t cb_ctx)
ssize_t afile_read(File* f, off_t ofs, size_t size, FileIOBuf* pbuf, FileIOCB cb, uintptr_t cbData)
{
CHECK_AFILE(f);
ArchiveFile* af = (ArchiveFile*)f->opaque;
@ -620,7 +628,7 @@ ssize_t afile_read(File* f, off_t ofs, size_t size, FileIOBuf* pbuf, FileIOCB cb
bool we_allocated = (pbuf != FILE_BUF_TEMP) && (*pbuf == FILE_BUF_ALLOC);
// no need to set last_cofs - only checked if compressed.
ssize_t bytes_read = file_io(&a->f, af->ofs+ofs, size, pbuf, cb, cb_ctx);
ssize_t bytes_read = file_io(&a->f, af->ofs+ofs, size, pbuf, cb, cbData);
RETURN_ERR(bytes_read);
if(we_allocated)
(void)file_buf_set_real_fn(*pbuf, f->atom_fn);
@ -634,10 +642,13 @@ ssize_t afile_read(File* f, off_t ofs, size_t size, FileIOBuf* pbuf, FileIOCB cb
// enough udata has been produced.
const size_t csize_max = af->csize - af->last_cofs;
Decompressor d(af->ctx, pbuf, size, cb, cb_ctx);
const ssize_t usize_read = file_io(&a->f, cofs, csize_max, FILE_BUF_TEMP, decompressor_feed_cb, (uintptr_t)&d);
Decompressor decompressor(af->ctx, pbuf, size, cb, cbData);
const ssize_t usize_read = file_io(&a->f, cofs, csize_max, FILE_BUF_TEMP, decompressor_feed_cb, (uintptr_t)&decompressor);
u32 checksum;
RETURN_ERR(decompressor.Finish(checksum));
//debug_assert(checksum == af->checksum);
af->last_cofs += (off_t)d.NumCompressedBytesProcessed();
af->last_cofs += (off_t)decompressor.NumCompressedBytesProcessed();
return usize_read;
}

View File

@ -170,7 +170,7 @@ struct ArchiveEntry
off_t ofs;
off_t csize;
CompressionMethod method;
u32 crc;
u32 checksum;
uint flags; // ArchiveFileFlags

View File

@ -16,92 +16,148 @@
// un-nice dependencies:
#include "ps/Loader.h"
#include <zlib.h>
static inline bool file_type_is_uncompressible(const char* fn)
// vfs_load callback that compresses the data in parallel with IO
// (for incompressible files, we just calculate the checksum)
class Compressor
{
const char* ext = path_extension(fn);
// this is a selection of file types that are certainly not
// further compressible. we need not include every type under the sun -
// this is only a slight optimization that avoids wasting time
// compressing files. the real decision as to cmethod is made based
// on attained compression ratio.
static const char* uncompressible_exts[] =
public:
Compressor(uintptr_t ctx, const char* atom_fn, size_t usize)
: m_ctx(ctx)
, m_usize(usize)
, m_skipCompression(IsFileTypeIncompressible(atom_fn))
, m_cdata(0), m_csize(0), m_checksum(0)
{
"zip", "rar",
"jpg", "jpeg", "png",
"ogg", "mp3"
};
for(uint i = 0; i < ARRAY_SIZE(uncompressible_exts); i++)
{
if(!strcasecmp(ext+1, uncompressible_exts[i]))
return true;
comp_reset(m_ctx);
m_csizeBound = comp_max_output_size(m_ctx, usize);
THROW_ERR(comp_alloc_output(m_ctx, m_csizeBound));
}
return false;
}
LibError Feed(const u8* ublock, size_t ublockSize, size_t* bytes_processed)
{
// comp_feed already makes note of total #bytes fed, and we need
// vfs_io to return the usize (to check if all data was read).
*bytes_processed = ublockSize;
if(m_skipCompression)
{
// (since comp_finish returns the checksum, we only need to update this
// when not compressing.)
m_checksum = comp_update_checksum(m_ctx, m_checksum, ublock, ublockSize);
}
else
{
// note: we don't need the return value because comp_finish
// will tell us the total csize.
(void)comp_feed(m_ctx, ublock, ublockSize);
}
return INFO::CB_CONTINUE;
}
LibError Finish()
{
if(m_skipCompression)
return INFO::OK;
RETURN_ERR(comp_finish(m_ctx, &m_cdata, &m_csize, &m_checksum));
debug_assert(m_csize <= m_csizeBound);
return INFO::OK;
}
u32 Checksum() const
{
return m_checksum;
}
// final decision on whether to store the file as compressed,
// given the observed compressed/uncompressed sizes.
bool IsCompressionProfitable() const
{
// file is definitely incompressible.
if(m_skipCompression)
return false;
const float ratio = (float)m_usize / m_csize;
const ssize_t bytes_saved = (ssize_t)m_usize - (ssize_t)m_csize;
UNUSED2(bytes_saved);
// tiny - store compressed regardless of savings.
// rationale:
// - CPU cost is negligible and overlapped with IO anyway;
// - reading from compressed files uses less memory because we
// don't need to allocate space for padding in the final buffer.
if(m_usize < 512)
return true;
// large high-entropy file - store uncompressed.
// rationale:
// - any bigger than this and CPU time becomes a problem: it isn't
// necessarily hidden by IO time anymore.
if(m_usize >= 32*KiB && ratio < 1.02f)
return false;
// we currently store everything else compressed.
return true;
}
void GetOutput(const u8*& cdata, size_t& csize) const
{
debug_assert(!m_skipCompression);
debug_assert(m_cdata && m_csize);
cdata = m_cdata;
csize = m_csize;
// note: no need to free cdata - it is owned by the
// compression context and can be reused.
}
private:
static bool IsFileTypeIncompressible(const char* fn)
{
const char* ext = path_extension(fn);
// this is a selection of file types that are certainly not
// further compressible. we need not include every type under the sun -
// this is only a slight optimization that avoids wasting time
// compressing files. the real decision as to cmethod is made based
// on attained compression ratio.
static const char* incompressible_exts[] =
{
"zip", "rar",
"jpg", "jpeg", "png",
"ogg", "mp3"
};
for(uint i = 0; i < ARRAY_SIZE(incompressible_exts); i++)
{
if(!strcasecmp(ext+1, incompressible_exts[i]))
return true;
}
return false;
}
struct CompressParams
{
bool attempt_compress;
uintptr_t ctx;
u32 crc;
uintptr_t m_ctx;
size_t m_usize;
size_t m_csizeBound;
bool m_skipCompression;
u8* m_cdata;
size_t m_csize;
u32 m_checksum;
};
static LibError compress_cb(uintptr_t cb_ctx, const u8* block, size_t size, size_t* bytes_processed)
static LibError compressor_feed_cb(uintptr_t cbData,
const u8* ublock, size_t ublockSize, size_t* bytes_processed)
{
CompressParams* p = (CompressParams*)cb_ctx;
// comp_feed already makes note of total #bytes fed, and we need
// vfs_io to return the uc size (to check if all data was read).
*bytes_processed = size;
// update checksum
p->crc = crc32(p->crc, (const Bytef*)block, (uInt)size);
if(p->attempt_compress)
{
// note: we don't need the return value because comp_finish returns
// the size of the compressed data.
(void)comp_feed(p->ctx, block, size);
}
return INFO::CB_CONTINUE;
Compressor& compressor = *(Compressor*)cbData;
return compressor.Feed(ublock, ublockSize, bytes_processed);
}
// final decision on whether to store the file as compressed,
// given the observed compressed/uncompressed sizes.
static bool ShouldCompress(size_t usize, size_t csize)
{
const float ratio = (float)usize / csize;
const ssize_t bytes_saved = (ssize_t)usize - (ssize_t)csize;
UNUSED2(bytes_saved);
// tiny - store compressed regardless of savings.
// rationale:
// - CPU cost is negligible and overlapped with IO anyway;
// - reading from compressed files uses less memory because we
// don't need to allocate space for padding in the final buffer.
if(usize < 512)
return true;
// large high-entropy file - store uncompressed.
// rationale:
// - any bigger than this and CPU time becomes a problem: it isn't
// necessarily hidden by IO time anymore.
if(usize >= 32*KiB && ratio < 1.02f)
return false;
// TODO: any other cases?
// we currently store everything else compressed.
return true;
}
static LibError read_and_compress_file(const char* atom_fn, uintptr_t ctx,
ArchiveEntry& ent, const u8*& file_contents, FileIOBuf& buf) // out
{
@ -115,55 +171,38 @@ static LibError read_and_compress_file(const char* atom_fn, uintptr_t ctx,
// it looks like checking for usize=csize=0 is the safest way -
// relying on file attributes (which are system-dependent!) is
// even less safe.
// we thus skip 0-length files to avoid confusing them with dirs.
// we thus skip 0-length files to avoid confusing them with directories.
if(!usize)
return INFO::SKIPPED;
const bool attempt_compress = !file_type_is_uncompressible(atom_fn);
if(attempt_compress)
{
comp_reset(ctx);
const size_t csizeBound = comp_max_output_size(ctx, usize);
RETURN_ERR(comp_alloc_output(ctx, csizeBound));
}
Compressor compressor(ctx, atom_fn, usize);
// read file into newly allocated buffer. if attempt_compress, also
// compress the file into another buffer while waiting for IOs.
// read file into newly allocated buffer and run compressor.
size_t usize_read;
const uint flags = 0;
CompressParams params = { attempt_compress, ctx, 0 };
RETURN_ERR(vfs_load(atom_fn, buf, usize_read, flags, compress_cb, (uintptr_t)&params));
RETURN_ERR(vfs_load(atom_fn, buf, usize_read, flags, compressor_feed_cb, (uintptr_t)&compressor));
debug_assert(usize_read == usize);
// if we compressed the file trial-wise, check results and
// decide whether to store as such or not (based on compression ratio)
bool shouldCompress = false;
u8* cdata = 0; size_t csize = 0;
if(attempt_compress)
LibError ret = compressor.Finish();
if(ret < 0)
{
u32 checksum; // TODO: use instead of crc
LibError ret = comp_finish(ctx, &cdata, &csize, &checksum);
if(ret < 0)
{
file_buf_free(buf);
return ret;
}
shouldCompress = ShouldCompress(usize, csize);
file_buf_free(buf);
return ret;
}
// store file info
ent.usize = (off_t)usize;
ent.mtime = s.st_mtime;
ent.usize = (off_t)usize;
ent.mtime = s.st_mtime;
// .. ent.ofs is set by zip_archive_add_file
ent.flags = 0;
ent.flags = 0;
ent.atom_fn = atom_fn;
ent.crc = params.crc;
if(shouldCompress)
ent.checksum = compressor.Checksum();
if(compressor.IsCompressionProfitable())
{
ent.method = CM_DEFLATE;
ent.csize = (off_t)csize;
file_contents = cdata;
size_t csize;
compressor.GetOutput(file_contents, csize);
ent.csize = (off_t)csize;
}
else
{
@ -172,13 +211,11 @@ static LibError read_and_compress_file(const char* atom_fn, uintptr_t ctx,
file_contents = buf;
}
// note: no need to free cdata - it is owned by the
// compression context and can be reused.
return INFO::OK;
}
//-----------------------------------------------------------------------------
LibError archive_build_init(const char* P_archive_filename, Filenames V_fns, ArchiveBuildState* ab)
{

View File

@ -93,6 +93,14 @@ public:
* @return error status for the entire operation.
**/
virtual LibError Finish(u8*& out, size_t& outSize, u32& checksum) = 0;
/**
* update a checksum to reflect the contents of a buffer.
*
* @param checksum the initial value (must be 0 on first call)
* @return the new checksum.
**/
virtual u32 UpdateChecksum(u32 checksum, const u8* in, size_t inSize) const = 0;
};
@ -103,6 +111,17 @@ public:
class ZLibCodec : public ICodec
{
protected:
ZLibCodec()
{
memset(&m_zs, 0, sizeof(m_zs));
InitializeChecksum();
}
void InitializeChecksum()
{
m_checksum = crc32(0, 0, 0);
}
typedef int ZEXPORT (*ZLibFunc)(z_streamp strm, int flush);
static LibError LibError_from_zlib(int zlib_err, bool warn_if_failed = true)
@ -157,7 +176,19 @@ protected:
return LibError_from_zlib(ret);
}
virtual u32 UpdateChecksum(u32 checksum, const u8* in, size_t inSize) const
{
return (u32)crc32(checksum, in, (uInt)inSize);
}
mutable z_stream m_zs;
// note: z_stream does contain an 'adler' checksum field, but that's
// not updated in streams lacking a gzip header, so we'll have to
// calculate a checksum ourselves.
// adler32 is somewhat weaker than CRC32, but a more important argument
// is that we should use the latter for compatibility with Zip archives.
mutable u32 m_checksum;
};
class ZLibCompressor : public ZLibCodec
@ -165,8 +196,6 @@ class ZLibCompressor : public ZLibCodec
public:
ZLibCompressor()
{
memset(&m_zs, 0, sizeof(m_zs));
// note: with Z_BEST_COMPRESSION, 78% percent of
// archive builder CPU time is spent in ZLib, even though
// that is interleaved with IO; everything else is negligible.
@ -198,12 +227,14 @@ public:
virtual LibError Reset()
{
ZLibCodec::InitializeChecksum();
const int ret = deflateReset(&m_zs);
return LibError_from_zlib(ret);
}
virtual LibError Process(const u8* in, size_t inSize, u8* out, size_t outSize, size_t& inConsumed, size_t& outConsumed)
{
m_checksum = UpdateChecksum(m_checksum, in, inSize);
return ZLibCodec::Process(deflate, 0, in, inSize, out, outSize, inConsumed, outConsumed);
}
@ -218,7 +249,7 @@ public:
out = m_zs.next_out - m_zs.total_out;
outSize = m_zs.total_out;
checksum = m_zs.adler;
checksum = m_checksum;
return INFO::OK;
}
};
@ -229,8 +260,6 @@ class ZLibDecompressor : public ZLibCodec
public:
ZLibDecompressor()
{
memset(&m_zs, 0, sizeof(m_zs));
const int windowBits = -MAX_WBITS; // max window size; omit ZLib header
const int ret = inflateInit2(&m_zs, windowBits);
debug_assert(ret == Z_OK);
@ -255,13 +284,16 @@ public:
virtual LibError Reset()
{
ZLibCodec::InitializeChecksum();
const int ret = inflateReset(&m_zs);
return LibError_from_zlib(ret);
}
virtual LibError Process(const u8* in, size_t inSize, u8* out, size_t outSize, size_t& inConsumed, size_t& outConsumed)
{
return ZLibCodec::Process(inflate, Z_SYNC_FLUSH, in, inSize, out, outSize, inConsumed, outConsumed);
const LibError ret = ZLibCodec::Process(inflate, Z_SYNC_FLUSH, in, inSize, out, outSize, inConsumed, outConsumed);
m_checksum = UpdateChecksum(m_checksum, in, inSize);
return ret;
}
virtual LibError Finish(u8*& out, size_t& outSize, u32& checksum)
@ -270,7 +302,7 @@ public:
out = m_zs.next_out - m_zs.total_out;
outSize = m_zs.total_out;
checksum = m_zs.adler;
checksum = m_checksum;
return INFO::OK;
}
};
@ -527,6 +559,11 @@ public:
return m_codec->Finish(out, outSize, checksum);
}
u32 UpdateChecksum(u32 checksum, const u8* in, size_t inSize) const
{
return m_codec->UpdateChecksum(checksum, in, inSize);
}
private:
// ICodec::Finish is allowed to assume that output buffers were identical
// or contiguous; we verify this here.
@ -603,10 +640,14 @@ uintptr_t comp_alloc(ContextType type, CompressionMethod method)
return (uintptr_t)stream;
}
size_t comp_max_output_size(uintptr_t ctx, size_t inSize)
void comp_free(uintptr_t ctx)
{
// no-op if context is 0 (i.e. was never allocated)
if(!ctx)
return;
Stream* stream = (Stream*)ctx;
return stream->MaxOutputSize(inSize);
streamFactory.Destroy(stream);
}
void comp_reset(uintptr_t ctx)
@ -615,6 +656,12 @@ void comp_reset(uintptr_t ctx)
stream->Reset();
}
size_t comp_max_output_size(uintptr_t ctx, size_t inSize)
{
Stream* stream = (Stream*)ctx;
return stream->MaxOutputSize(inSize);
}
void comp_set_output(uintptr_t ctx, u8* out, size_t outSize)
{
Stream* stream = (Stream*)ctx;
@ -639,12 +686,8 @@ LibError comp_finish(uintptr_t ctx, u8** out, size_t* outSize, u32* checksum)
return stream->Finish(*out, *outSize, *checksum);
}
void comp_free(uintptr_t ctx)
u32 comp_update_checksum(uintptr_t ctx, u32 checksum, const u8* in, size_t inSize)
{
// no-op if context is 0 (i.e. was never allocated)
if(!ctx)
return;
Stream* stream = (Stream*)ctx;
streamFactory.Destroy(stream);
return stream->UpdateChecksum(checksum, in, inSize);
}

View File

@ -28,14 +28,31 @@ enum CompressionMethod
{
CM_NONE,
// zlib "deflate" (RFC 1750, 1751) and Adler32 checksum
// zlib "deflate" (RFC 1750, 1751) and CRC32
CM_DEFLATE,
CM_UNSUPPORTED
};
/**
* allocate a new compression/decompression context.
**/
extern uintptr_t comp_alloc(ContextType type, CompressionMethod method);
/**
* free this context and all associated memory.
**/
extern void comp_free(uintptr_t ctx);
/**
* clear all previous state and prepare for reuse.
*
* this is as if the object were destroyed and re-created, but more
* efficient since it avoids reallocating a considerable amount of memory
* (about 200KB for LZ).
**/
extern void comp_reset(uintptr_t ctx);
/**
* @return an upper bound on the output size for the given amount of input.
* this is used when allocating a single buffer for the whole operation.
@ -87,17 +104,15 @@ extern ssize_t comp_feed(uintptr_t ctx, const u8* in, size_t inSize);
extern LibError comp_finish(uintptr_t ctx, u8** out, size_t* out_size, u32* checksum);
/**
* clear all previous state and prepare for reuse.
* update a checksum to reflect the contents of a buffer.
*
* this is as if the object were destroyed and re-created, but more
* efficient since it avoids reallocating a considerable amount of memory
* (about 200KB for LZ).
* @param checksum the initial value (must be 0 on first call)
* @return the new checksum.
*
* note: this routine is stateless but still requires a context to establish
* the type of checksum to calculate. the results are the same as yielded by
* comp_finish after comp_feed-ing all input buffers.
**/
extern void comp_reset(uintptr_t ctx);
/**
* free this context and all associated memory.
**/
extern void comp_free(uintptr_t ctx);
extern u32 comp_update_checksum(uintptr_t ctx, u32 checksum, const u8* in, size_t inSize);
#endif // #ifndef INCLUDED_COMPRESSION

View File

@ -215,20 +215,21 @@ static void lfh_assemble(LFH* lfh_le,
lfh_le->fat_mtime = to_le32(fat_mtime);
lfh_le->crc = to_le32(crc);
lfh_le->csize = to_le32(u32_from_larger(csize));
lfh_le->usize = to_le32(u32_from_larger(usize));
lfh_le->usize = to_le32(u32_from_larger(usize));
lfh_le->fn_len = to_le16(u16_from_larger(fn_len));
lfh_le->e_len = to_le16(0);
}
static void cdfh_decompose(const CDFH* cdfh_le,
CompressionMethod& method, time_t& mtime, off_t& csize, off_t& usize,
CompressionMethod& method, time_t& mtime, u32& crc, off_t& csize, off_t& usize,
const char*& fn, off_t& lfh_ofs, size_t& total_size)
{
const u16 zip_method = read_le16(&cdfh_le->method);
const u32 fat_mtime = read_le32(&cdfh_le->fat_mtime);
crc = read_le32(&cdfh_le->crc);
csize = (off_t)read_le32(&cdfh_le->csize);
usize = (off_t)read_le32(&cdfh_le->usize);
usize = (off_t)read_le32(&cdfh_le->usize);
const u16 fn_len = read_le16(&cdfh_le->fn_len);
const u16 e_len = read_le16(&cdfh_le->e_len);
const u16 c_len = read_le16(&cdfh_le->c_len);
@ -261,7 +262,7 @@ static void cdfh_assemble(CDFH* dst_cdfh_le,
dst_cdfh_le->fat_mtime = to_le32(fat_mtime);
dst_cdfh_le->crc = to_le32(crc);
dst_cdfh_le->csize = to_le32(u32_from_larger(csize));
dst_cdfh_le->usize = to_le32(u32_from_larger(usize));
dst_cdfh_le->usize = to_le32(u32_from_larger(usize));
dst_cdfh_le->fn_len = to_le16(u16_from_larger(fn_len));
dst_cdfh_le->e_len = to_le16(0);
dst_cdfh_le->c_len = to_le16(u16_from_larger(slack));
@ -453,7 +454,7 @@ LibError zip_populate_archive(File* f, Archive* a)
// copy translated fields from CDFH into ArchiveEntry.
ArchiveEntry ae;
cdfh_decompose(cdfh, ae.method, ae.mtime, ae.csize, ae.usize, ae.atom_fn, ae.ofs, ofs_to_next_cdfh);
cdfh_decompose(cdfh, ae.method, ae.mtime, ae.checksum, ae.csize, ae.usize, ae.atom_fn, ae.ofs, ofs_to_next_cdfh);
ae.flags = ZIP_LFH_FIXUP_NEEDED;
// if file (we don't care about directories):
@ -587,7 +588,7 @@ LibError zip_archive_add_file(ZipArchive* za, const ArchiveEntry* ae, const u8*
// write (LFH, filename, file contents) to archive
// .. put LFH and filename into one 'package'
LFH_Package header;
lfh_assemble(&header.lfh, ae->method, ae->mtime, ae->crc, ae->csize, ae->usize, fn_len);
lfh_assemble(&header.lfh, ae->method, ae->mtime, ae->checksum, ae->csize, ae->usize, fn_len);
strcpy_s(header.fn, ARRAY_SIZE(header.fn), ae->atom_fn);
// .. write that out in 1 IO
const off_t lfh_ofs = za->cur_file_size;
@ -606,7 +607,7 @@ LibError zip_archive_add_file(ZipArchive* za, const ArchiveEntry* ae, const u8*
if(!p)
WARN_RETURN(ERR::NO_MEM);
const size_t slack = za->cdfhs.da.pos-prev_pos - (CDFH_SIZE+fn_len);
cdfh_assemble(&p->cdfh, ae->method, ae->mtime, ae->crc, ae->csize, ae->usize, fn_len, slack, lfh_ofs);
cdfh_assemble(&p->cdfh, ae->method, ae->mtime, ae->checksum, ae->csize, ae->usize, fn_len, slack, lfh_ofs);
cpu_memcpy(p->fn, ae->atom_fn, fn_len);
za->cd_entries++;

View File

@ -10,9 +10,6 @@
#include "lib/res/file/vfs.h"
#include "Xeromyces.h"
#define ZLIB_DLL
#include <zlib.h> // for crc32
#define LOG_CATEGORY "xml"
#include "XML.h"