1
0
forked from 0ad/0ad

adts: add LL_OPT_* defines that enable optimizations (some only make sense if there are tons of files, and need to be able to disable them for thesis). realized this would be perfect application of policy template classes, which will replace this.

fix crashdumps: was failing to write out to file (underlying cause:
current directory no longer being set)
app_hooks: add get_log_dir; used by debug and wdbg_sym

minor improvements/documentation in archive+compression+file_cache+zip

main: remove dead ScEd code

This was SVN commit r3498.
This commit is contained in:
janwas 2006-02-11 22:49:09 +00:00
parent ccd43b4b49
commit c4e3037e60
15 changed files with 370 additions and 173 deletions

View File

@ -220,11 +220,19 @@ public:
// Cache for items of variable size and value/"cost".
// currently uses Landlord algorithm.
#define LL_OPT_MINCREDIT
#define LL_OPT_RECIP
#define LL_OPT_DELAYCHARGE
template<typename Key, typename T> class Cache
{
public:
Cache()
: min_credit_density(FLT_MAX) {}
{
#ifdef LL_OPT_MINCREDIT
min_credit_density = FLT_MAX;
#endif
}
void add(Key key, T item, size_t size, uint cost)
{
@ -233,9 +241,11 @@ public:
PairIB ret = map.insert(val);
debug_assert(ret.second); // must not already be in map
#ifdef LL_OPT_MINCREDIT
// adding new item - min_credit_density may decrease
const CacheEntry& new_entry = ret.first->second;
notify_credit_reduced(new_entry);
#endif
}
// remove the entry identified by <key>. expected usage is to check
@ -250,15 +260,17 @@ public:
debug_warn("Cache: item to be removed not found");
return;
}
#include <queue>
#ifdef LL_OPT_MINCREDIT
// we're removing. if this one had the smallest
// density, recalculate.
const bool need_recalc = is_min_entry(it->second);
#endif
map.erase(it);
#ifdef LL_OPT_MINCREDIT
if(need_recalc)
recalc_min_density();
recalc_min_credit_density();
#endif
}
// if there is no entry for <key> in the cache, return 0 with
@ -275,17 +287,21 @@ public:
if(refill_credit)
{
#ifdef LL_OPT_MINCREDIT
// we're increasing credit. if this one had the smallest
// density, recalculate.
const bool need_recalc = is_min_entry(entry);
#endif
// Landlord algorithm calls for credit to be reset to anything
// between its current value and the cost.
const float gain = 0.75f; // restore most credit
entry.credit = gain*entry.cost + (1.0f-gain)*entry.credit;
#ifdef LL_OPT_MINCREDIT
if(need_recalc)
recalc_min_density();
recalc_min_credit_density();
#endif
}
return entry.item;
@ -300,22 +316,35 @@ public:
if(map.empty())
return 0;
#ifdef LL_OPT_DELAYCHARGE
// determine who has least density via priqueue
// remove it
// add its delta to accumulator
#endif
#ifndef LL_OPT_MINCREDIT
// not implicitly updated: we need to calculate min_credit_density now.
recalc_min_credit_density();
#endif
// latch current delta value to avoid it changing during the loop
// (due to notify_* calls). this ensures fairness.
const float delta = min_credit_density;
// one iteration ought to suffice to evict someone due to
// definition of min_density, but we provide for repeating
// in case of floating-point imprecision.
// definition of min_credit_density, but we provide for
// repeating in case of floating-point imprecision.
// (goto vs. loop avoids nesting and emphasizes rarity)
again:
// charge everyone rent (proportional to min_credit_density and size)
// .. latch current delta value to avoid it changing during the loop
// (due to notify_* calls). this ensures fairness.
const float delta = min_credit_density;
// charge everyone rent (proportional to delta and size)
for(CacheMapIt it = map.begin(); it != map.end(); ++it)
{
CacheEntry& entry = it->second;
entry.credit -= delta * entry.size;
#ifdef LL_OPT_MINCREDIT
// reducing credit - min_credit_density may decrease
notify_credit_reduced(entry);
#endif
// evict immediately if credit is exhausted
// (note: Landlord algorithm calls for 'any subset' of
@ -325,15 +354,17 @@ again:
// this means every call will end up charging more than
// intended, but we compensate by resetting credit
// fairly high upon cache hit.
if(entry.credit <= 0.0f)
if(entry.credit <= 0.01f) // a bit of tolerance
{
T item = entry.item;
if(psize)
*psize = entry.size;
map.erase(it);
#ifdef LL_OPT_MINCREDIT
// this item had the least density, else it wouldn't
// have been removed. recalculate.
recalc_min_density();
recalc_min_credit_density();
#endif
return item;
}
}
@ -342,12 +373,19 @@ again:
goto again;
}
bool empty()
{
return map.empty();
}
private:
struct CacheEntry
{
T item;
size_t size;
#ifdef LL_OPT_RECIP
float size_reciprocal;
#endif
uint cost;
float credit;
@ -355,7 +393,9 @@ private:
: item(item_)
{
size = size_;
#ifdef LL_OPT_RECIP
size_reciprocal = 1.0f / size;
#endif
cost = cost_;
credit = cost;
@ -373,8 +413,19 @@ private:
float min_credit_density;
float credit_density(const CacheEntry& entry)
{
#ifdef LL_OPT_RECIP
return entry.credit * entry.size_reciprocal;
#else
return entry.credit / entry.size;
#endif
}
void recalc_min_credit_density()
{
min_credit_density = FLT_MAX;
for(CacheMapIt it = map.begin(); it != map.end(); ++it)
min_credit_density = MIN(min_credit_density, credit_density(it->second));
}
#ifdef LL_OPT_MINCREDIT
void notify_credit_reduced(const CacheEntry& entry)
{
min_credit_density = MIN(min_credit_density, credit_density(entry));
@ -383,12 +434,7 @@ private:
{
return feq(min_credit_density, credit_density(entry));
}
void recalc_min_density()
{
min_credit_density = FLT_MAX;
for(CacheMapIt it = map.begin(); it != map.end(); ++it)
min_credit_density = MIN(min_credit_density, credit_density(it->second));
}
#endif
};

View File

@ -23,6 +23,7 @@
#include "detect.h"
#include "res/graphics/ogl_tex.h"
#include "res/file/file.h"
#include "res/file/vfs.h"
#include "app_hooks.h"
@ -44,6 +45,21 @@ static void override_gl_upload_caps()
}
static const char* get_log_dir()
{
static char N_log_dir[PATH_MAX];
ONCE(\
char N_exe_name[PATH_MAX];\
(void)sys_get_executable_name(N_exe_name, ARRAY_SIZE(N_exe_name));\
/* strip app name (we only need path) */\
char* slash = strrchr(N_exe_name, DIR_SEP);\
if(slash) *slash = '\0';\
(void)vfs_path_append(N_log_dir, N_exe_name, "../logs/");
);
return N_log_dir;
}
// convert contents of file <in_filename> from char to wchar_t and
// append to <out> file.
static void cat_atow(FILE* out, const char* in_filename)

View File

@ -77,6 +77,15 @@ and call set_app_hooks.
// not expandable.
FUNC(void, override_gl_upload_caps, (void), (), (void))
// return full native path of the directory into which crashdumps should be
// written. must end with directory separator (e.g. '/').
// if implementing via static storage, be sure to guarantee reentrancy
// (e.g. by only filling the string once).
// must be callable at any time - in particular, before VFS init.
// this means file_make_full_native_path cannot be used; it is best
// to specify a path relative to sys_get_executable_name.
FUNC(const char*, get_log_dir, (void), (), return)
// gather all app-related logs/information and write it into <f>.
// used when writing a crashlog so that all relevant info is in one file.
//

View File

@ -210,7 +210,12 @@ void debug_wprintf(const wchar_t* fmt, ...)
LibError debug_write_crashlog(const wchar_t* text)
{
FILE* f = fopen("crashlog.txt", "w");
// note: we go through some gyrations here (strcpy+strcat) to avoid
// dependency on file code (vfs_path_append).
char N_path[PATH_MAX];
strcpy_s(N_path, ARRAY_SIZE(N_path), ah_get_log_dir());
strcat_s(N_path, ARRAY_SIZE(N_path), "crashlog.txt");
FILE* f = fopen(N_path, "w");
if(!f)
{
DISPLAY_ERROR(L"debug_write_crashlog: unable to open file");

View File

@ -263,6 +263,10 @@ extern void debug_filter_clear();
// used for "last activity" reporting in the crashlog.
extern void debug_wprintf_mem(const wchar_t* fmt, ...);
// set directory into which crashlog (and system-specific dumps) will be
// written. if not called, the current directory is used.
extern void debug_set_crashlog_dir(const char* P_path);
// write all logs and <text> out to crashlog.txt (unicode format).
extern LibError debug_write_crashlog(const wchar_t* text);

View File

@ -91,7 +91,7 @@ static LibError Archive_reload(Archive* a, const char* fn, Handle)
RETURN_ERR(file_open(fn, FILE_CACHE_BLOCK, &a->f));
a->is_open = 1;
RETURN_ERR(zip_populate_archive(a, &a->f));
RETURN_ERR(zip_populate_archive(&a->f, a));
a->is_loaded = 1;
return ERR_OK;
@ -736,15 +736,10 @@ LibError archive_build(const char* P_archive_filename, Filenames V_fl)
RETURN_ERR(zip_archive_create(P_archive_filename, &za));
uintptr_t ctx = comp_alloc(CT_COMPRESSION, CM_DEFLATE);
const char* fn; // declare outside loop for easier debugging
for(size_t i = 0; ; i++)
for(size_t i = 0; V_fl[i]; i++)
{
fn = V_fl[i];
if(!fn)
break;
ArchiveEntry ent; void* file_contents; FileIOBuf buf;
if(read_and_compress_file(fn, ctx, ent, file_contents, buf) == ERR_OK)
if(read_and_compress_file(V_fl[i], ctx, ent, file_contents, buf) == ERR_OK)
{
(void)zip_archive_add_file(za, &ent, file_contents);
(void)file_buf_free(buf);

View File

@ -167,18 +167,40 @@ extern LibError afile_unmap(AFile* af);
// NULL entry.
typedef const char** Filenames;
// create an archive (overwriting previous file) and fill it with the given
// files. compression method is chosen intelligently based on extension and
// file entropy / achieved compression ratio.
extern LibError archive_build(const char* P_archive_filename, Filenames V_fl);
//
// interface for backends
//
// the archive-specific backends call back here for each file;
// this module provides storage for the file table.
enum ArchiveFileFlags
{
// indicates ArchiveEntry.ofs points to a "local file header"
// instead of the file data. a fixup routine is called upon
// file open; it skips past LFH and clears this flag.
// this is somewhat of a hack, but vital to archive open
// performance. without it, we'd have to scan through the
// entire Zip file, which can take *seconds*.
// (we cannot use the information in CDFH, because its 'extra' field
// has been observed to differ from that of the LFH)
// by reading LFH when a file in archive is opened, the block cache
// absorbs the IO cost because the file will likely be read anyway.
ZIP_LFH_FIXUP_NEEDED = 1
};
// convenience container for location / size of file in archive.
// separate from AFile to minimize size of file table.
// holds all per-file information extracted from the header.
// this is intended to work for all archive types.
//
// note: AFile (state of a currently open file) is separate because
// some of its fields need not be stored here; we'd like to minimize
// size of the file table.
struct ArchiveEntry
{
// these are returned by afile_stat:
@ -191,7 +213,7 @@ struct ArchiveEntry
CompressionMethod method;
u32 crc32;
uint flags;
uint flags; // ArchiveFileFlags
const char* atom_fn;

View File

@ -98,7 +98,12 @@ public:
virtual LibError init() = 0;
virtual LibError reset() = 0;
virtual LibError reset()
{
next_out = 0;
avail_out = 0;
return ERR_OK;
}
virtual LibError alloc_output(size_t in_size) = 0;
@ -117,11 +122,15 @@ public:
void set_output(void* out, size_t out_size)
{
// must only be set once, because finish() returns the
// output buffer set as: next_out - total_out.
debug_assert(next_out == 0 && avail_out == 0);
next_out = out;
avail_out = out_size;
}
void* get_output()
{
debug_assert(next_out != 0);
return next_out;
}
ssize_t feed(const void* in, size_t in_size)
@ -184,13 +193,14 @@ protected:
ContextType type;
CompressionMethod method;
// output buffer - assigned once by set_output
void* next_out;
size_t avail_out;
// output memory allocated by allow_output_impl
void* out_mem;
size_t out_mem_size;
// may be several IOs in flight => list needed
struct Buf
{
const u8* cdata;
@ -203,6 +213,10 @@ protected:
mem_to_free = mem_to_free_;
}
};
// note: a 'list' (deque is more efficient) is necessary.
// lack of output space can result in leftover input data;
// since we do not want feed() to always have to check for and
// use up any previous remnants, we allow queuing them.
std::deque<Buf> pending_bufs;
LibError alloc_output_impl(size_t required_out_size)
@ -235,6 +249,10 @@ protected:
out_mem_size = alloc_size;
have_out_mem:
// must only be set once, because finish() returns the
// output buffer set as: next_out - total_out.
debug_assert(next_out == 0 && avail_out == 0);
next_out = out_mem;
avail_out = out_mem_size;
@ -279,6 +297,7 @@ public:
virtual LibError reset()
{
Compressor::reset();
int ret;
if(type == CT_COMPRESSION)
ret = deflateReset(&zs);

View File

@ -17,16 +17,49 @@ enum CompressionMethod
extern uintptr_t comp_alloc(ContextType type, CompressionMethod method);
// set output buffer. all subsequent comp_feed() calls will write into it.
// should only be called once (*) due to the comp_finish() interface - since
// that allows querying the output buffer, it must not be fragmented.
// * the previous output buffer is wiped out by comp_reset, so
// setting it again (once!) after that is allowed and required.
extern void comp_set_output(uintptr_t ctx, void* out, size_t out_size);
extern LibError comp_alloc_output(uintptr_t c_, size_t in_size);
extern void* comp_get_output(uintptr_t ctx_);
// [compression contexts only:] allocate an output buffer big enough to
// hold worst_case_compression_ratio*in_size bytes.
// rationale: this interface is useful because callers cannot
// reliably estimate how much output space is needed.
// raises a warning for decompression contexts because this operation
// does not make sense there:
// - decompression ratio is quite large - ballpark 1000x;
// - exact uncompressed size is known to caller (via archive file header).
// note: buffer is held until comp_free; it can be reused after a
// comp_reset. this reduces malloc/free calls.
extern LibError comp_alloc_output(uintptr_t ctx, size_t in_size);
// get current position in output buffer.
// precondition: valid calls to EITHER comp_alloc_output OR comp_set_output.
extern void* comp_get_output(uintptr_t ctx);
// 'feed' the given buffer to the compressor/decompressor.
// returns number of output bytes produced (*), or a negative LibError code.
// * 0 is a legitimate return value - this happens if the input buffer is
// small and the codec hasn't produced any output.
// note: the buffer may be overwritten or freed immediately after - we take
// care of copying and queuing any data that remains (e.g. due to
// lack of output buffer space).
extern ssize_t comp_feed(uintptr_t ctx, const void* in, size_t in_size);
// feed any remaining queued input data, finish the compress/decompress and
// pass back the output buffer.
extern LibError comp_finish(uintptr_t ctx, void** out, size_t* out_size);
// prepare this context for reuse. the effect is similar to freeing this
// context and creating another.
// rationale: this API avoids reallocating a considerable amount of
// memory (ballbark 200KB LZ window plus output buffer).
extern LibError comp_reset(uintptr_t ctx);
// free this context and all associated memory.
extern void comp_free(uintptr_t ctx);
#endif // #ifndef COMPRESSION_H__

View File

@ -684,8 +684,12 @@ static ExtantBufMgr extant_bufs;
//-----------------------------------------------------------------------------
static Cache<const char*, FileIOBuf> file_cache;
// HACK: key type is really const char*, but the file_cache's STL (hash_)map
// stupidly assumes that is a "string". (comparison can be done via
// pointer compare, due to atom_fn mechanism) we define as void* to avoid
// this behavior - it breaks the (const char*)1 self-test hack and is
// inefficient.
static Cache<const void*, FileIOBuf> file_cache;
FileIOBuf file_buf_alloc(size_t size, const char* atom_fn, bool long_lived)
@ -923,6 +927,7 @@ static void test_cache_allocator()
// put allocator through its paces by allocating several times
// its capacity (this ensures memory is reused)
srand(1);
size_t total_size_used = 0;
while(total_size_used < 4*MAX_CACHE_SIZE)
{
@ -952,12 +957,32 @@ static void test_cache_allocator()
allocations[p] = size;
}
// reset to virginal state
cache_allocator.reset();
}
static void test_file_cache()
{
// we need a unique address for file_cache_add, but don't want to
// actually put it in the atom_fn storage (permanently clutters it).
// just increment this pointer (evil but works since it's not used).
// const char* atom_fn = (const char*)1;
// give to file_cache
// file_cache_add((FileIOBuf)p, size, atom_fn++);
file_cache_flush();
TEST(file_cache.empty());
// (even though everything has now been freed,
// the freelists may be a bit scattered already).
}
static void self_test()
{
test_cache_allocator();
test_file_cache();
}
SELF_TEST_RUN;

View File

@ -1,6 +1,6 @@
// Zip archiving on top of ZLib.
// archive backend for Zip files
//
// Copyright (c) 2003 Jan Wassenberg
// Copyright (c) 2003-2006 Jan Wassenberg
//
// This program is free software; you can redistribute it and/or
// modify it under the terms of the GNU General Public License as
@ -16,7 +16,6 @@
// Jan.Wassenberg@stud.uni-karlsruhe.de
// http://www.stud.uni-karlsruhe.de/~urkt/
#include "precompiled.h"
#include <time.h>
@ -56,8 +55,8 @@ struct LFH
u16 e_len;
};
const size_t LFH_SIZE = 30;
cassert(sizeof(LFH) == LFH_SIZE);
const size_t LFH_SIZE = sizeof(LFH);
cassert(LFH_SIZE == 30);
struct CDFH
@ -78,8 +77,8 @@ struct CDFH
u32 lfh_ofs;
};
const size_t CDFH_SIZE = 46;
cassert(sizeof(CDFH) == CDFH_SIZE);
const size_t CDFH_SIZE = sizeof(CDFH);
cassert(CDFH_SIZE == 46);
struct ECDR
@ -92,8 +91,8 @@ struct ECDR
u16 comment_len;
};
const size_t ECDR_SIZE = 22;
cassert(sizeof(ECDR) == ECDR_SIZE);
const size_t ECDR_SIZE = sizeof(ECDR);
cassert(ECDR_SIZE == 22);
#pragma pack(pop)
@ -147,17 +146,11 @@ static u32 FAT_from_time_t(time_t time)
}
///////////////////////////////////////////////////////////////////////////////
//
// za_*: Zip archive handling
// passes the list of files in an archive to lookup.
//
///////////////////////////////////////////////////////////////////////////////
//-----------------------------------------------------------------------------
// scan for and return a pointer to a Zip record, or 0 if not found.
// <start> is the expected position; we scan from there until EOF for
// the given ID (fourcc). <record_size> (includes ID field) bytes must
// the given ID (fourcc). <record_size> includes ID field) bytes must
// remain before EOF - this makes sure the record is completely in the file.
// used by z_find_ecdr and z_extract_cdfh.
static const u8* za_find_id(const u8* buf, size_t size, const void* start, u32 magic, size_t record_size)
@ -192,29 +185,31 @@ static const u8* za_find_id(const u8* buf, size_t size, const void* start, u32 m
}
// search for ECDR in the last <max_scan_amount> bytes of the file.
// if found, fill <dst_ecdr> with an (unprocessed) copy of the record and
// return ERR_OK, otherwise IO error or ERR_CORRUPTED.
static LibError za_find_ecdr_impl(File* f, size_t max_scan_amount, ECDR* dst_ecdr)
{
// don't scan more than the entire file
const size_t file_size = f->fc.size;
const size_t scan_amount = MIN(max_scan_amount, file_size);
// scan the last 66000 bytes of file for ecdr_id signature
// (the Zip archive comment field - up to 64k - may follow ECDR).
// if the zip file is < 66000 bytes, scan the whole file.
size_t scan_amount = MIN(max_scan_amount, file_size);
// read desired chunk of file into memory
const off_t ofs = (off_t)(file_size - scan_amount);
FileIOBuf buf = FILE_BUF_ALLOC;
RETURN_ERR(file_io(f, ofs, scan_amount, &buf));
ssize_t bytes_read = file_io(f, ofs, scan_amount, &buf);
RETURN_ERR(bytes_read);
debug_assert(bytes_read == (ssize_t)scan_amount);
LibError ret;
// look for ECDR in buffer
LibError ret = ERR_CORRUPTED;
const u8* start = (const u8*)buf;
const ECDR* ecdr = (const ECDR*)za_find_id(start, scan_amount, start, ecdr_magic, ECDR_SIZE);
const ECDR* ecdr = (const ECDR*)za_find_id(start, bytes_read, start, ecdr_magic, ECDR_SIZE);
if(ecdr)
{
*dst_ecdr = *ecdr;
ret = ERR_OK;
}
else
ret = ERR_CORRUPTED;
file_buf_free(buf);
return ret;
@ -271,66 +266,14 @@ static LibError za_extract_cdfh(const CDFH* cdfh,
}
// this code grabs an LFH struct from file block(s) that are
// passed to the callback. usually, one call copies the whole thing,
// but the LFH may straddle a block boundary.
// analyse an opened Zip file; call back into archive.cpp to
// populate the Archive object with a list of the files it contains.
// returns ERR_OK on success, ERR_UNKNOWN_FORMAT if not a Zip file
// (see below) or another negative LibError code.
//
// rationale: this allows using temp buffers for zip_fixup_lfh,
// which avoids involving the file buffer manager and thus
// unclutters the trace and cache contents.
struct LFH_Copier
{
u8* lfh_dst;
size_t lfh_bytes_remaining;
};
static LibError lfh_copier_cb(uintptr_t ctx, const void* block, size_t size, size_t* bytes_processed)
{
LFH_Copier* p = (LFH_Copier*)ctx;
debug_assert(size <= p->lfh_bytes_remaining);
memcpy2(p->lfh_dst, block, size);
p->lfh_dst += size;
p->lfh_bytes_remaining -= size;
*bytes_processed = size;
return INFO_CB_CONTINUE;
}
// ensures <ent.ofs> points to the actual file contents; it is initially
// the offset of the LFH. we cannot use CDFH filename and extra field
// lengths to skip past LFH since that may not mirror CDFH (has happened).
//
// this is called at file-open time instead of while mounting to
// reduce seeks: since reading the file will typically follow, the
// block cache entirely absorbs the IO cost.
void zip_fixup_lfh(File* f, ArchiveEntry* ent)
{
// already fixed up - done.
if(!(ent->flags & ZIP_LFH_FIXUP_NEEDED))
return;
// performance note: this ends up reading one file block, which is
// only in the block cache if the file starts in the same block as a
// previously read file (i.e. both are small).
LFH lfh;
LFH_Copier params = { (u8*)&lfh, sizeof(LFH) };
ssize_t ret = file_io(f, ent->ofs, LFH_SIZE, FILE_BUF_TEMP, lfh_copier_cb, (uintptr_t)&params);
debug_assert(ret == sizeof(LFH));
debug_assert(lfh.magic == lfh_magic);
const size_t fn_len = read_le16(&lfh.fn_len);
const size_t e_len = read_le16(&lfh.e_len);
ent->ofs += (off_t)(LFH_SIZE + fn_len + e_len);
// LFH doesn't have a comment field!
ent->flags &= ~ZIP_LFH_FIXUP_NEEDED;
}
LibError zip_populate_archive(Archive* a, File* f)
// fairly slow - must read Central Directory from disk
// (size ~= 60 bytes*num_files); observed time ~= 80ms.
LibError zip_populate_archive(File* f, Archive* a)
{
LibError ret;
@ -406,35 +349,81 @@ completely_bogus:
}
//-----------------------------------------------------------------------------
// this code grabs an LFH struct from file block(s) that are
// passed to the callback. usually, one call copies the whole thing,
// but the LFH may straddle a block boundary.
//
// rationale: this allows using temp buffers for zip_fixup_lfh,
// which avoids involving the file buffer manager and thus
// unclutters the trace and cache contents.
struct LFH_Copier
{
u8* lfh_dst;
size_t lfh_bytes_remaining;
};
static LibError lfh_copier_cb(uintptr_t ctx, const void* block, size_t size, size_t* bytes_processed)
{
LFH_Copier* p = (LFH_Copier*)ctx;
debug_assert(size <= p->lfh_bytes_remaining);
memcpy2(p->lfh_dst, block, size);
p->lfh_dst += size;
p->lfh_bytes_remaining -= size;
*bytes_processed = size;
return INFO_CB_CONTINUE;
}
// ensures <ent.ofs> points to the actual file contents; it is initially
// the offset of the LFH. we cannot use CDFH filename and extra field
// lengths to skip past LFH since that may not mirror CDFH (has happened).
//
// this is called at file-open time instead of while mounting to
// reduce seeks: since reading the file will typically follow, the
// block cache entirely absorbs the IO cost.
void zip_fixup_lfh(File* f, ArchiveEntry* ent)
{
// already fixed up - done.
if(!(ent->flags & ZIP_LFH_FIXUP_NEEDED))
return;
// performance note: this ends up reading one file block, which is
// only in the block cache if the file starts in the same block as a
// previously read file (i.e. both are small).
LFH lfh;
LFH_Copier params = { (u8*)&lfh, sizeof(LFH) };
ssize_t ret = file_io(f, ent->ofs, LFH_SIZE, FILE_BUF_TEMP, lfh_copier_cb, (uintptr_t)&params);
debug_assert(ret == sizeof(LFH));
debug_assert(lfh.magic == lfh_magic);
const size_t fn_len = read_le16(&lfh.fn_len);
const size_t e_len = read_le16(&lfh.e_len);
ent->ofs += (off_t)(LFH_SIZE + fn_len + e_len);
// LFH doesn't have a comment field!
ent->flags &= ~ZIP_LFH_FIXUP_NEEDED;
}
//-----------------------------------------------------------------------------
// archive builder backend
//-----------------------------------------------------------------------------
// rationale: don't support partial adding, i.e. updating archive with
// only one file. this would require overwriting parts of the Zip archive,
// which is annoying and slow. also, archives are usually built in
// seek-optimal order, which would break if we start inserting files.
// while testing, loose files can be used, so there's no loss.
/*
dont support partial adding, i.e. updating archive with only one file. only build archive from ground up
our archive builder always has to arrange everything for optimal performance
while testing, can use loose files, so no inconvenience
*/
// we don't want to expose ZipArchive to callers,
// (would require defining File, Pool and CDFH)
// so allocate the storage here and return opaque pointer.
struct ZipArchive
{
File f;
@ -445,8 +434,6 @@ struct ZipArchive
CDFH* prev_cdfh;
};
// we don't want to expose ZipArchive to callers, so
// allocate the storage here and return opaque pointer.
static SingleAllocator<ZipArchive> za_mgr;

View File

@ -1,17 +1,59 @@
// archive backend for Zip files
//
// Copyright (c) 2003-2006 Jan Wassenberg
//
// This program is free software; you can redistribute it and/or
// modify it under the terms of the GNU General Public License as
// published by the Free Software Foundation; either version 2 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful, but
// WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// General Public License for more details.
//
// Contact info:
// Jan.Wassenberg@stud.uni-karlsruhe.de
// http://www.stud.uni-karlsruhe.de/~urkt/
#ifndef ZIP_H__
#define ZIP_H__
#include "archive.h"
#include "file.h"
struct File;
struct Archive;
struct ArchiveEntry;
extern LibError zip_populate_archive(Archive* a, File* f);
// analyse an opened file: if it is a Zip archive, call back into
// archive.cpp to populate the Archive object with a list of the
// files it contains.
// returns ERR_OK on success, ERR_UNKNOWN_FORMAT if not a Zip archive
// (see below) or another negative LibError code.
//
// fairly slow - must read Central Directory from disk
// (size ~= 60 bytes*num_files); observed time ~= 80ms.
extern LibError zip_populate_archive(File* f, Archive* a);
// ensures <ent.ofs> points to the actual file contents; it is initially
// the offset of the LFH. we cannot use CDFH filename and extra field
// lengths to skip past LFH since that may not mirror CDFH (has happened).
//
// this is called at file-open time instead of while mounting to
// reduce seeks: since reading the file will typically follow, the
// block cache entirely absorbs the IO cost.
extern void zip_fixup_lfh(File* f, ArchiveEntry* ent);
struct ZipArchive;
//
// archive builder backend
//
struct ZipArchive; // opaque
// create a new Zip archive
extern LibError zip_archive_create(const char* zip_filename, ZipArchive** pza);
extern LibError zip_archive_add_file(ZipArchive* za, const ArchiveEntry* ze, void* file_contents);
extern LibError zip_archive_add_file(ZipArchive* za, const ArchiveEntry* ae, void* file_contents);
extern LibError zip_archive_finish(ZipArchive* za);

View File

@ -702,6 +702,10 @@ LONG WINAPI wdbg_exception_filter(EXCEPTION_POINTERS* ep)
(void)swscanf(locus, fmt, func_name, file, &line);
// don't care whether all 3 fields were filled (they default to "?")
// this must happen before the error dialog because user could choose to
// exit immediately there.
wdbg_write_minidump(ep);
wchar_t buf[500];
const wchar_t* msg_fmt =
L"Much to our regret we must report the program has encountered an error.\r\n"
@ -716,8 +720,6 @@ LONG WINAPI wdbg_exception_filter(EXCEPTION_POINTERS* ep)
ErrorReaction er = debug_display_error(buf, flags, 1,ep->ContextRecord, file,line);
debug_assert(er > 0);
wdbg_write_minidump(ep);
// invoke the Win32 default handler - it calls ExitProcess for
// most exception types.
return EXCEPTION_CONTINUE_SEARCH;

View File

@ -29,6 +29,7 @@
#include "sysdep/cpu.h"
#include "wdbg.h"
#include "debug_stl.h"
#include "app_hooks.h"
#if CPU_IA32
# include "lib/sysdep/ia32.h"
#endif
@ -1908,7 +1909,12 @@ void wdbg_write_minidump(EXCEPTION_POINTERS* exception_pointers)
{
lock();
HANDLE hFile = CreateFile("crashlog.dmp", GENERIC_WRITE, FILE_SHARE_WRITE, 0, CREATE_ALWAYS, 0, 0);
// note: we go through some gyrations here (strcpy+strcat) to avoid
// dependency on file code (vfs_path_append).
char N_path[PATH_MAX];
strcpy_s(N_path, ARRAY_SIZE(N_path), ah_get_log_dir());
strcat_s(N_path, ARRAY_SIZE(N_path), "crashlog.dmp");
HANDLE hFile = CreateFile(N_path, GENERIC_WRITE, FILE_SHARE_WRITE, 0, CREATE_ALWAYS, 0, 0);
if(hFile == INVALID_HANDLE_VALUE)
goto fail;
@ -1977,8 +1983,11 @@ static void test_array()
int ints[] = { 1,2,3,4,5 }; UNUSED2(ints);
wchar_t chars[] = { 'w','c','h','a','r','s',0 }; UNUSED2(chars);
//DISPLAY_ERROR(L"wdbg_sym self test: check if stack trace below is ok.");
RaiseException(0xf001,0,0,0);
// note: prefer simple error (which also generates stack trace) to
// exception, because it is guaranteed to work (no issues with the
// debugger swallowing exceptions).
DISPLAY_ERROR(L"wdbg_sym self test: check if stack trace below is ok.");
//RaiseException(0xf001,0,0,0);
}
// also used by test_stl as an element type

View File

@ -305,8 +305,6 @@ void kill_mainloop()
}
#ifndef SCED
int main(int argc, char* argv[])
{
debug_printf("MAIN &argc=%p &argv=%p\n", &argc, &argv);
@ -326,18 +324,3 @@ int main(int argc, char* argv[])
exit(0);
}
#else // SCED:
void ScEd_Init()
{
g_Quickstart = true;
Init(0, NULL, INIT_HAVE_VMODE|INIT_NO_GUI);
}
void ScEd_Shutdown()
{
Shutdown();
}
#endif // SCED