1
0
forked from 0ad/0ad

archive: new rule for choosing whether to compress files or not: leans heavily toward the former because that uses less memory when loading (less loss due to internal fragmentation)

everything else: bugfix: buffers added to extant list via
file_buf_retrieve (i.e. moved from cache to extant list) didn't have
their long_lived flag carried over; this caused a "not freed
immediately" warning.

This was SVN commit r3580.
This commit is contained in:
janwas 2006-03-02 02:21:55 +00:00
parent 0fd85b76db
commit 04b72b29b4
5 changed files with 42 additions and 18 deletions

View File

@ -682,6 +682,33 @@ static LibError compress_cb(uintptr_t cb_ctx, const void* block, size_t size, si
}
// final decision on whether to store the file as compressed,
// given the observed compressed/uncompressed sizes.
static bool should_store_compressed(size_t ucsize, size_t csize)
{
const float ratio = (float)ucsize / csize;
const ssize_t bytes_saved = (ssize_t)ucsize - (ssize_t)csize;
// tiny - store compressed regardless of savings.
// rationale:
// - CPU cost is negligible and overlapped with IO anyway;
// - reading from compressed files uses less memory because we
// don't need to allocate space for padding in the final buffer.
if(ucsize < 512)
return true;
// large high-entropy file - store uncompressed.
// rationale:
// - any bigger than this and CPU time becomes a problem: it isn't
// necessarily hidden by IO time anymore.
if(ucsize >= 32*KiB && ratio < 1.02f)
return false;
// TODO: any other cases?
// we currently store everything else compressed.
return true;
}
static LibError read_and_compress_file(const char* atom_fn, uintptr_t ctx,
ArchiveEntry& ent, void*& file_contents, FileIOBuf& buf) // out
{
@ -727,10 +754,7 @@ static LibError read_and_compress_file(const char* atom_fn, uintptr_t ctx,
return ret;
}
const float ratio = (float)ucsize / csize;
const ssize_t bytes_saved = (ssize_t)ucsize - (ssize_t)csize;
if(ratio > 1.05f && bytes_saved > 50)
store_compressed = true;
store_compressed = should_store_compressed(ucsize, csize);
}
// store file info

View File

@ -260,7 +260,7 @@ mechanism:
- coalesce: boundary tags in freed memory with magic value
- freelist: 2**n segregated doubly-linked, address-ordered
*/
static const size_t MAX_CACHE_SIZE = 64*MiB;
static const size_t MAX_CACHE_SIZE = 96*MiB;
class CacheAllocator
{
public:
@ -674,13 +674,13 @@ public:
extant_bufs.push_back(ExtantBuf(buf, size, atom_fn, this_epoch));
}
void add_ref(FileIOBuf buf, size_t size, const char* atom_fn)
void add_ref(FileIOBuf buf, size_t size, const char* atom_fn, bool long_lived)
{
ssize_t idx = find(buf);
if(idx != -1)
extant_bufs[idx].refs++;
else
add(buf, size, atom_fn, false);
add(buf, size, atom_fn, long_lived);
}
const char* get_owner_filename(FileIOBuf buf)
@ -826,9 +826,6 @@ static void free_padded_buf(FileIOBuf padded_buf, size_t size)
FileIOBuf file_buf_alloc(size_t size, const char* atom_fn, bool long_lived)
{
if(!stricmp(atom_fn, "entities/template_structure_43fd26460000028eA.xmb"))
debug_break();
FileIOBuf buf;
uint attempts = 0;
for(;;)
@ -846,11 +843,11 @@ debug_break();
free_padded_buf(discarded_buf, size);
// note: 200 may seem hefty, but 50 is known to be reached.
// note: this may seem hefty, but 200 is known to be reached.
// (after building archive, file cache is full; attempting to
// allocate 2MB while only freeing 100KB blocks scattered over
// the entire cache can take a while)
if(attempts++ > 200)
if(attempts++ > 300)
debug_warn("possible infinite loop: failed to make room in cache");
}
@ -979,7 +976,7 @@ FileIOBuf file_cache_find(const char* atom_fn, size_t* psize)
}
FileIOBuf file_cache_retrieve(const char* atom_fn, size_t* psize)
FileIOBuf file_cache_retrieve(const char* atom_fn, size_t* psize, bool long_lived)
{
// note: do not query extant_bufs - reusing that doesn't make sense
// (why would someone issue a second IO for the entire file while
@ -988,7 +985,7 @@ FileIOBuf file_cache_retrieve(const char* atom_fn, size_t* psize)
FileIOBuf buf = file_cache_find(atom_fn, psize);
if(buf)
{
extant_bufs.add_ref(buf, *psize, atom_fn);
extant_bufs.add_ref(buf, *psize, atom_fn, long_lived);
stats_buf_ref();
}

View File

@ -30,7 +30,7 @@ extern LibError file_buf_set_real_fn(FileIOBuf buf, const char* atom_fn);
extern FileIOBuf file_cache_find(const char* atom_fn, size_t* size);
extern FileIOBuf file_cache_retrieve(const char* atom_fn, size_t* size);
extern FileIOBuf file_cache_retrieve(const char* atom_fn, size_t* size, bool long_lived);
extern LibError file_cache_add(FileIOBuf buf, size_t size, const char* atom_fn);
extern LibError file_cache_invalidate(const char* fn);

View File

@ -445,7 +445,8 @@ LibError vfs_load(const char* V_fn, FileIOBuf& buf, size_t& size,
debug_printf("VFS| load: V_fn=%s\n", V_fn);
const char* atom_fn = file_make_unique_fn_copy(V_fn);
buf = file_cache_retrieve(atom_fn, &size);
bool long_lived = (flags & FILE_LONG_LIVED) != 0;
buf = file_cache_retrieve(atom_fn, &size, long_lived);
if(buf)
{
// we want to skip the below code (especially vfs_open) for

View File

@ -304,17 +304,19 @@ class FileAccessGatherer
switch(ent->op)
{
case TO_LOAD:
buf = file_cache_retrieve(atom_fn, &size);
{
bool long_lived = (ent->flags & FILE_LONG_LIVED) != 0;
buf = file_cache_retrieve(atom_fn, &size, long_lived);
// would not be in cache: add to list of real IOs
if(!buf)
{
bool long_lived = (ent->flags & FILE_LONG_LIVED) != 0;
buf = file_buf_alloc(size, atom_fn, long_lived);
(void)file_cache_add(buf, size, atom_fn);
file_accesses.push_back(atom_fn);
}
break;
}
case TO_FREE:
buf = file_cache_find(atom_fn, &size);
(void)file_buf_free(buf);