file code v2 is now feature complete and works.

- adts: add basic benchmark to self-test; use fminf instead of MIN; fix
overly pessimistic invalidation in Landlord_Cached
- allocators: no longer complain if DynArray is full
- lib: warn if rand() min=max
- archive: bugfixes: plug leak, correctly handle 0-length files, carry
over flags when reading uncompressed file from archive
- file: expose file_get_sorted_dirents and implement file_enum in terms
of that
- file_cache: many bugfixes (especially related to CacheAllocator
freelist)
- vfs_optimizer: finish implementation of archive builder
- zip: correctly handle 0-length files
- tex: add tex_is_known_extension
- sysdep: add efficient fminf implementation (assumes P6+)

- config: update archive builder call site
- gamesetup: add call for delayed self tests
- entity: return JSVAL_VOID as per philip's suggestion

This was SVN commit r3553.
This commit is contained in:
janwas 2006-02-23 07:55:32 +00:00
parent a52365f90a
commit bc07b9ff0a
19 changed files with 533 additions and 290 deletions

View File

@ -1,8 +1,11 @@
#include "precompiled.h"
#include "adts.h"
#include <deque>
#include "adts.h"
#include "posix.h"
#include "lib/timer.h"
//-----------------------------------------------------------------------------
// built-in self test
//-----------------------------------------------------------------------------
@ -82,12 +85,64 @@ static void test_ringbuf()
}
// ensures all 3 variants of Landlord<> behave the same
static void test_cache_removal()
{
Cache<int, int, Landlord_Naive> c1;
Cache<int, int, Landlord_Naive, Divider_Recip> c1r;
Cache<int, int, Landlord_Cached> c2;
Cache<int, int, Landlord_Cached, Divider_Recip> c2r;
Cache<int, int, Landlord_Lazy> c3;
Cache<int, int, Landlord_Lazy, Divider_Recip> c3r;
#if 0
// set max priority, to reduce interference while measuring.
int old_policy; static sched_param old_param; // (static => 0-init)
pthread_getschedparam(pthread_self(), &old_policy, &old_param);
static sched_param max_param;
max_param.sched_priority = sched_get_priority_max(SCHED_FIFO);
pthread_setschedparam(pthread_self(), SCHED_FIFO, &max_param);
#define MEASURE(c, desc)\
{\
srand(1);\
int cnt = 1;\
TIMER_BEGIN(desc);\
for(int i = 0; i < 30000; i++)\
{\
/* 70% add (random objects) */\
bool add = rand(1,10) < 7;\
if(add)\
{\
int key = cnt++;\
int val = cnt++;\
size_t size = (size_t)rand(1,100);\
uint cost = (uint)rand(1,100);\
c.add(key, val, size, cost);\
}\
else\
{\
size_t size;\
int value;\
c.remove_least_valuable(&value, &size);\
}\
}\
TIMER_END(desc);\
}
MEASURE(c1, "naive")
MEASURE(c1r, "naiverecip")
MEASURE(c2, "cached")
MEASURE(c2r, "cachedrecip")
MEASURE(c3, "lazy")
MEASURE(c3r, "lazyrecip")
// restore previous policy and priority.
pthread_setschedparam(pthread_self(), old_policy, &old_param);
exit(1134);
#endif
srand(1);
int cnt = 1;
for(int i = 0; i < 1000; i++)
@ -119,6 +174,7 @@ static void test_cache_removal()
TEST(removed2 == removed3);
TEST(size2 == size3);
TEST(value2 == value3);
} // else
} // for i
}
@ -130,7 +186,7 @@ static void self_test()
test_cache_removal();
}
SELF_TEST_RUN;
SELF_TEST_REGISTER;
} // namespace test
#endif // #if SELF_TEST_ENABLED

View File

@ -300,10 +300,15 @@ template<class Entries> float ll_calc_min_credit_density(const Entries& entries)
{
float min_credit_density = FLT_MAX;
for(typename Entries::const_iterator it = entries.begin(); it != entries.end(); ++it)
min_credit_density = MIN(min_credit_density, Entries::entry_from_it(it).credit_density());
min_credit_density = fminf(min_credit_density, Entries::entry_from_it(it).credit_density());
return min_credit_density;
}
// note: no warning is given that the MCD entry is being removed!
// (reduces overhead in remove_least_valuable)
// these functors must account for that themselves (e.g. by resetting
// their state directly after returning MCD).
// determine MCD by scanning over all entries.
// tradeoff: O(N) time complexity, but all notify* calls are no-ops.
template<class Entry, class Entries>
@ -361,12 +366,24 @@ public:
float operator()(const Entries& entries)
{
if(!min_valid)
if(min_valid)
{
min_credit_density = ll_calc_min_credit_density(entries);
min_valid = true;
// the entry that has MCD will be removed anyway by caller;
// we need to invalidate here because they don't call
// notify_increased_or_removed.
min_valid = false;
return min_credit_density;
}
return min_credit_density;
// this is somewhat counterintuitive. since we're calculating
// MCD directly, why not mark our cached version of it valid
// afterwards? reason is that our caller will remove the entry with
// MCD, so it'll be invalidated anyway.
// instead, our intent is to calculate MCD for the *next time*.
const float ret = ll_calc_min_credit_density(entries);
min_valid = true;
min_credit_density = FLT_MAX;
return ret;
}
private:
@ -445,11 +462,9 @@ again:
{
Entry& entry = it->second;
mcd_calc.notify_impending_increase_or_remove(entry);
entry.credit -= min_credit_density * entry.size;
if(should_evict(entry))
{
mcd_calc.notify_increased_or_removed(entry);
entry_list.push_back(entry);
// annoying: we have to increment <it> before erasing

View File

@ -278,10 +278,14 @@ LibError da_set_size(DynArray* da, size_t new_size)
// determine how much to add/remove
const size_t cur_size_pa = round_up_to_page(da->cur_size);
const size_t new_size_pa = round_up_to_page(new_size);
if(new_size_pa > da->max_size_pa)
WARN_RETURN(ERR_LIMIT);
const ssize_t size_delta_pa = (ssize_t)new_size_pa - (ssize_t)cur_size_pa;
// not enough memory to satisfy this expand request: abort.
// note: do not complain - some allocators (e.g. file_cache)
// egitimately use up all available space.
if(new_size_pa > da->max_size_pa)
return ERR_LIMIT;
u8* end = da->base + cur_size_pa;
// expanding
if(size_delta_pa > 0)

View File

@ -539,8 +539,8 @@ uint fullrand()
uint rand(uint min, uint max)
{
const uint range = (max-min);
// huge interval or min > max
if(range > XRAND_MAX)
// huge interval or min >= max
if(range == 0 || range > XRAND_MAX)
{
WARN_ERR(ERR_INVALID_PARAM);
return 0;

View File

@ -514,6 +514,20 @@ ssize_t afile_read(AFile* af, off_t ofs, size_t size, FileIOBuf* pbuf, FileIOCB
if(!is_compressed(af))
{
// HACK
// background: file_io will operate according to the
// *archive* file's flags, but the AFile may contain some overrides
// set via vfs_open. one example is FILE_LONG_LIVED -
// that must be copied over (temporarily) into a->f flags.
//
// we currently copy all flags - this may mean that setting
// global policy (e.g. "don't cache") for all archive files is
// difficult, but that can be worked around by forcing
// flag to be set in afile_open.
// this is better than the alternative of copying individual
// flags because it'd need to be updated as new flags are added.
a->f.fc.flags = af->fc.flags;
bool we_allocated = (pbuf != FILE_BUF_TEMP) && (*pbuf == FILE_BUF_ALLOC);
// no need to set last_cofs - only checked if compressed.
ssize_t bytes_read = file_io(&a->f, af->ofs+ofs, size, pbuf, cb, cb_ctx);
@ -625,8 +639,8 @@ static inline bool file_type_is_uncompressible(const char* fn)
static const char* uncompressible_exts[] =
{
"zip", "rar",
"jpg", "jpeg", "png",
"ogg", "mp3"
"jpg", "jpeg", "png",
"ogg", "mp3"
};
for(uint i = 0; i < ARRAY_SIZE(uncompressible_exts); i++)
@ -671,6 +685,16 @@ static LibError read_and_compress_file(const char* atom_fn, uintptr_t ctx,
struct stat s;
RETURN_ERR(vfs_stat(atom_fn, &s));
const size_t ucsize = s.st_size;
// skip 0-length files.
// rationale: zip.cpp needs to determine whether a CDFH entry is
// a file or directory (the latter are written by some programs but
// not needed - they'd only pollute the file table).
// it looks like checking for ucsize=csize=0 is the safest way -
// relying on file attributes (which are system-dependent!) is
// even less safe.
// we thus skip 0-length files to avoid confusing them with dirs.
if(!ucsize)
return INFO_SKIPPED;
const bool attempt_compress = !file_type_is_uncompressible(atom_fn);
if(attempt_compress)
@ -695,7 +719,12 @@ static LibError read_and_compress_file(const char* atom_fn, uintptr_t ctx,
void* cdata = 0; size_t csize = 0;
if(attempt_compress)
{
RETURN_ERR(comp_finish(ctx, &cdata, &csize));
LibError ret = comp_finish(ctx, &cdata, &csize);
if(ret < 0)
{
file_buf_free(buf);
return ret;
}
const float ratio = (float)ucsize / csize;
const ssize_t bytes_saved = (ssize_t)ucsize - (ssize_t)csize;

View File

@ -415,9 +415,37 @@ LibError dir_close(DirIterator* d_)
}
static bool dirent_less(const DirEnt* d1, const DirEnt* d2)
static bool dirent_less(const DirEnt& d1, const DirEnt& d2)
{
return strcmp(d1->name, d2->name) < 0;
return strcmp(d1.name, d2.name) < 0;
}
// enumerate all directory entries in <P_path>; add to container and
// then sort it by filename.
LibError file_get_sorted_dirents(const char* P_path, DirEnts& dirents)
{
DirIterator d;
RETURN_ERR(dir_open(P_path, &d));
dirents.reserve(125); // preallocate for efficiency
DirEnt ent;
for(;;)
{
LibError ret = dir_next_ent(&d, &ent);
if(ret == ERR_DIR_END)
break;
RETURN_ERR(ret);
ent.name = file_make_unique_fn_copy(ent.name);
dirents.push_back(ent);
}
std::sort(dirents.begin(), dirents.end(), dirent_less);
(void)dir_close(&d);
return ERR_OK;
}
@ -439,72 +467,32 @@ static bool dirent_less(const DirEnt* d1, const DirEnt* d2)
// of converting from/to native path (we just give 'em the dirent name).
LibError file_enum(const char* P_path, const FileCB cb, const uintptr_t user)
{
// pointer to DirEnt: faster sorting, but more allocs.
typedef std::vector<const DirEnt*> DirEnts;
typedef DirEnts::const_iterator DirEntCIt;
typedef DirEnts::reverse_iterator DirEntRIt;
// all entries are enumerated (adding to this container),
// std::sort-ed, then all passed to cb.
DirEnts dirents;
dirents.reserve(125); // preallocate for efficiency
LibError stat_err = ERR_OK; // first error encountered by stat()
LibError cb_err = ERR_OK; // first error returned by cb
DirIterator d;
CHECK_ERR(dir_open(P_path, &d));
DirEnts dirents;
RETURN_ERR(file_get_sorted_dirents(P_path, dirents));
DirEnt ent;
for(;;) // instead of while() to avoid warnings
{
LibError ret = dir_next_ent(&d, &ent);
if(ret == ERR_DIR_END)
break;
if(!stat_err)
stat_err = ret;
const size_t size = sizeof(DirEnt)+strlen(ent.name)+1;
DirEnt* p_ent = (DirEnt*)malloc(size);
if(!p_ent)
{
stat_err = ERR_NO_MEM;
goto fail;
}
p_ent->size = ent.size;
p_ent->mtime = ent.mtime;
p_ent->name = (const char*)p_ent + sizeof(DirEnt);
strcpy((char*)p_ent->name, ent.name); // safe
dirents.push_back(p_ent);
}
std::sort(dirents.begin(), dirents.end(), dirent_less);
// call back for each entry (now sorted)
{
// call back for each entry (now sorted);
// first, expand each DirEnt to full struct stat (we store as such to
// reduce memory use and therefore speed up sorting)
struct stat s;
memset(&s, 0, sizeof(s));
const uintptr_t memento = 0; // there is nothing we
// .. not needed for plain files (OS opens them; memento doesn't help)
const uintptr_t memento = 0;
for(DirEntCIt it = dirents.begin(); it != dirents.end(); ++it)
{
const DirEnt* ent = *it;
s.st_mode = (ent->size == -1)? S_IFDIR : S_IFREG;
s.st_size = ent->size;
s.st_mtime = ent->mtime;
LibError ret = cb(ent->name, &s, memento, user);
const DirEnt& dirent = *it;
s.st_mode = (dirent.size == -1)? S_IFDIR : S_IFREG;
s.st_size = dirent.size;
s.st_mtime = dirent.mtime;
LibError ret = cb(dirent.name, &s, memento, user);
if(ret != INFO_CB_CONTINUE)
{
cb_err = ret; // first error (since we now abort)
break;
}
}
}
fail:
WARN_ERR(dir_close(&d));
// free all memory (can't do in loop above because it may be aborted).
for(DirEntRIt rit = dirents.rbegin(); rit != dirents.rend(); ++rit)
free((void*)(*rit));
if(cb_err != ERR_OK)
return cb_err;

View File

@ -155,6 +155,19 @@ extern LibError dir_next_ent(DirIterator* d, DirEnt* ent);
extern LibError dir_close(DirIterator* d);
#ifdef __cplusplus
typedef std::vector<DirEnt> DirEnts;
typedef DirEnts::const_iterator DirEntCIt;
typedef DirEnts::reverse_iterator DirEntRIt;
// enumerate all directory entries in <P_path>; add to container and
// then sort it by filename.
extern LibError file_get_sorted_dirents(const char* P_path, DirEnts& dirents);
#endif // #ifdef __cplusplus
// called by file_enum for each entry in the directory.
// name doesn't include path!
// return INFO_CB_CONTINUE to continue calling; anything else will cause

View File

@ -278,6 +278,11 @@ public:
void* alloc(size_t size)
{
// safely handle 0 byte allocations. according to C/C++ tradition,
// we allocate a unique address, which ends up wasting 1 page.
if(!size)
size = 1;
const size_t size_pa = round_up(size, BUF_ALIGN);
void* p;
@ -307,31 +312,29 @@ public:
void make_read_only(u8* p, size_t size)
{
const size_t size_pa = round_up(size, BUF_ALIGN);
(void)mprotect(p, size_pa, PROT_READ);
/*/* (void)mprotect(p, size_pa, PROT_READ);*/
}
#include "nommgr.h"
void free(u8* p, size_t size)
#include "mmgr.h"
{
// make sure entire range is within pool.
if(!pool_contains(&pool, p) || !pool_contains(&pool, p+size-1))
size_t size_pa = round_up(size, BUF_ALIGN);
// make sure entire (aligned!) range is within pool.
if(!pool_contains(&pool, p) || !pool_contains(&pool, p+size_pa-1))
{
debug_warn("invalid pointer");
return;
}
size_t size_pa = round_up(size, BUF_ALIGN);
// (re)allow writes
//
// note: unfortunately we cannot unmap this buffer's memory
// (to make sure it is not used) because we write a header/footer
// into it to support coalescing.
(void)mprotect(p, size_pa, PROT_READ|PROT_WRITE);
/*/* (void)mprotect(p, size_pa, PROT_READ|PROT_WRITE);*/
coalesce(p, size_pa);
freelist_add(p, size_pa);
coalesce_and_free(p, size_pa);
}
// free all allocations and reset state to how it was just after
@ -346,146 +349,155 @@ public:
private:
Pool pool;
uint size_class_of(size_t size_pa)
uint size_class_of(size_t size_pa) const
{
return log2((uint)size_pa);
}
//-------------------------------------------------------------------------
// boundary tags for coalescing
static const u32 MAGIC1 = FOURCC('C','M','E','M');
static const u32 MAGIC2 = FOURCC('\x00','\xFF','\x55','\xAA');
struct FreePage
static const u32 HEADER_ID = FOURCC('C','M','A','H');
static const u32 FOOTER_ID = FOURCC('C','M','A','F');
static const u32 MAGIC = FOURCC('\xFF','\x55','\xAA','\x01');
struct Header
{
FreePage* prev;
FreePage* next;
Header* prev;
Header* next;
size_t size_pa;
u32 magic1;
u32 magic2;
u32 id;
u32 magic;
};
// must be enough room to stash 2 FreePage instances in the freed page.
cassert(BUF_ALIGN >= 2*sizeof(FreePage));
// check if there is a free allocation before/after <p>.
// return 0 if not, otherwise a pointer to its FreePage header/footer.
// if ofs = 0, check before; otherwise, it gives the size of the
// current allocation, and we check behind that.
// notes:
// - p and ofs are trusted: [p, p+ofs) lies within the pool.
// - correctly deals with p lying at start/end of pool.
FreePage* freed_page_at(u8* p, size_t ofs)
// we could use struct Header for Footer as well, but keeping them
// separate and different can avoid coding errors (e.g. mustn't pass a
// Footer to freelist_remove!)
struct Footer
{
// checking the footer of the memory before p.
if(!ofs)
{
// .. but p is at front of pool - bail.
if(p == pool.da.base)
return 0;
p -= sizeof(FreePage);
}
// checking header of memory after p+ofs.
else
{
p += ofs;
// .. but it's at end of the currently committed region - bail.
if(p >= pool.da.base+pool.da.cur_size)
return 0;
}
// note: deliberately reordered fields for safety
u32 magic;
u32 id;
size_t size_pa;
};
// must be enough room to stash Header+Footer within the freed allocation.
cassert(BUF_ALIGN >= sizeof(Header)+sizeof(Footer));
// check if there is a valid FreePage header/footer at p.
// we use magic values to differentiate the header from user data
// (this isn't 100% reliable, but we can't insert extra boundary
// tags because the memory must remain aligned).
FreePage* page = (FreePage*)p;
if(page->magic1 != MAGIC1 || page->magic2 != MAGIC2)
return 0;
debug_assert(page->size_pa % BUF_ALIGN == 0);
return page;
// expected_id identifies the tag type (either HEADER_ID or
// FOOTER_ID). returns whether the given id, magic and size_pa
// values are consistent with such a tag.
//
// note: these magic values are all that differentiates tags from
// user data. this isn't 100% reliable, but we can't insert extra
// boundary tags because the memory must remain aligned.
bool is_valid_tag(u32 expected_id, u32 id, u32 magic, size_t size_pa) const
{
if(id != expected_id || magic != MAGIC)
return false;
TEST(size_pa % BUF_ALIGN == 0);
TEST(size_pa <= MAX_CACHE_SIZE);
return true;
}
// check if p's neighbors are free; if so, merges them all into
// one big region and updates freelists accordingly.
// p and size_pa are trusted: [p, p+size_pa) lies within the pool.
void coalesce(u8*& p, size_t& size_pa)
// add p to freelist; if its neighbor(s) are free, merges them all into
// one big region and frees that.
// notes:
// - correctly deals with p lying at start/end of pool.
// - p and size_pa are trusted: [p, p+size_pa) lies within the pool.
void coalesce_and_free(u8* p, size_t size_pa)
{
FreePage* prev = freed_page_at(p, 0);
if(prev)
// CAVEAT: Header and Footer are wiped out by freelist_remove -
// must use them before that.
// expand (p, size_pa) to include previous allocation if it's free.
// (unless p is at start of pool region)
if(p != pool.da.base)
{
freelist_remove(prev);
p -= prev->size_pa;
size_pa += prev->size_pa;
const Footer* footer = (const Footer*)(p-sizeof(Footer));
if(is_valid_tag(FOOTER_ID, footer->id, footer->magic, footer->size_pa))
{
p -= footer->size_pa;
size_pa += footer->size_pa;
Header* header = (Header*)p;
freelist_remove(header);
}
}
FreePage* next = freed_page_at(p, size_pa);
if(next)
// expand size_pa to include following memory if it was allocated
// and is currently free.
// (unless it starts beyond end of currently committed region)
Header* header = (Header*)(p+size_pa);
if((u8*)header < pool.da.base+pool.da.cur_size)
{
freelist_remove(next);
size_pa += next->size_pa;
if(is_valid_tag(HEADER_ID, header->id, header->magic, header->size_pa))
{
size_pa += header->size_pa;
freelist_remove(header);
}
}
freelist_add(p, size_pa);
}
//-------------------------------------------------------------------------
// freelist
uintptr_t bitmap;
FreePage* freelists[sizeof(uintptr_t)*CHAR_BIT];
// note: we store Header nodes instead of just a pointer to head of
// list - this wastes a bit of mem but greatly simplifies list insertion.
Header freelists[sizeof(uintptr_t)*CHAR_BIT];
void freelist_add(u8* p, size_t size_pa)
{
TEST(size_pa % BUF_ALIGN == 0);
const uint size_class = size_class_of(size_pa);
// write header and footer into the freed mem
// (its prev and next link fields will be set below)
FreePage* header = (FreePage*)p;
header->prev = header->next = 0;
Header* header = (Header*)p;
header->id = HEADER_ID;
header->magic = MAGIC;
header->size_pa = size_pa;
header->magic1 = MAGIC1; header->magic2 = MAGIC2;
FreePage* footer = (FreePage*)(p+size_pa-sizeof(FreePage));
*footer = *header;
Footer* footer = (Footer*)(p+size_pa-sizeof(Footer));
footer->id = FOOTER_ID;
footer->magic = MAGIC;
footer->size_pa = size_pa;
// insert the header into freelist
// .. list was empty: link to head
if(!freelists[size_class])
{
freelists[size_class] = header;
bitmap |= BIT(size_class);
}
// .. not empty: link to node (address order)
else
{
FreePage* prev = freelists[size_class];
// find node to insert after
while(prev->next && header <= prev->next)
prev = prev->next;
header->next = prev->next;
header->prev = prev;
}
Header* prev = &freelists[size_class];
// find node after which to insert (address ordered freelist)
while(prev->next && header <= prev->next)
prev = prev->next;
header->next = prev->next;
header->prev = prev;
if(prev->next)
prev->next->prev = header;
prev->next = header;
bitmap |= BIT(size_class);
}
void freelist_remove(FreePage* page)
void freelist_remove(Header* header)
{
const uint size_class = size_class_of(page->size_pa);
Footer* footer = (Footer*)((u8*)header+header->size_pa-sizeof(Footer));
TEST(is_valid_tag(HEADER_ID, header->id, header->magic, header->size_pa));
TEST(is_valid_tag(FOOTER_ID, footer->id, footer->magic, footer->size_pa));
TEST(header->size_pa == footer->size_pa);
const uint size_class = size_class_of(header->size_pa);
// in middle of list: unlink from prev node
if(page->prev)
page->prev->next = page->next;
// was at front of list: unlink from head
else
{
freelists[size_class] = page->next;
// freelist is now empty - update bitmap.
if(!page->next)
bitmap &= ~BIT(size_class);
}
header->prev->next = header->next;
if(header->next)
header->next->prev = header->prev;
// not at end of list: unlink from next node
if(page->next)
page->next->prev = page->prev;
// if freelist is now empty, clear bit in bitmap.
if(!freelists[size_class].next)
bitmap &= ~BIT(size_class);
// wipe out header and footer to prevent accidental reuse
memset(header, 0xEE, sizeof(Header));
memset(footer, 0xEE, sizeof(Footer));
}
void* alloc_from_class(uint size_class, size_t size_pa)
{
// return first suitable entry in (address-ordered) list
FreePage* cur = freelists[size_class];
while(cur)
for(Header* cur = freelists[size_class].next; cur; cur = cur->next)
{
if(cur->size_pa >= size_pa)
{
@ -499,7 +511,6 @@ private:
return p;
}
cur = cur->next;
}
return 0;
@ -523,7 +534,7 @@ private:
// apparently all classes above start_size_class are empty,
// or the above would have succeeded.
debug_assert(bitmap < BIT(start_size_class+1));
TEST(bitmap < BIT(start_size_class+1));
return 0;
}
}; // CacheAllocator
@ -565,6 +576,11 @@ public:
void add(FileIOBuf buf, size_t size, const char* atom_fn, bool long_lived)
{
// cache_allocator also does this; we need to follow suit so that
// matches() won't fail due to zero-length size.
if(!size)
size = 1;
// don't do was-immediately-freed check for long_lived buffers.
const uint this_epoch = long_lived? 0 : epoch++;
@ -600,7 +616,7 @@ public:
}
}
add(buf, size, atom_fn, 0);
add(buf, size, atom_fn, false);
}
const char* get_owner_filename(FileIOBuf buf)
@ -615,33 +631,40 @@ public:
return 0;
}
void find_and_remove(FileIOBuf buf, size_t* size, const char** atom_fn)
bool find_and_remove(FileIOBuf buf, FileIOBuf& exact_buf, size_t& size, const char*& atom_fn)
{
bool actually_removed = false;
debug_assert(buf != 0);
for(size_t i = 0; i < extant_bufs.size(); i++)
{
ExtantBuf& eb = extant_bufs[i];
if(matches(eb, buf))
{
*size = eb.size;
*atom_fn = eb.atom_fn;
exact_buf = eb.buf;
size = eb.size;
atom_fn = eb.atom_fn;
// no more references
if(--eb.refs == 0)
{
// mark as reusable
// mark slot in extant_bufs[] as reusable
eb.buf = 0;
eb.size = 0;
eb.atom_fn = 0;
actually_removed = true;
}
if(eb.epoch != 0 && eb.epoch != epoch-1)
debug_warn("buf not released immediately");
epoch++;
return;
return actually_removed;
}
}
debug_warn("buf is not on extant list! double free?");
return false;
}
void replace_owner(FileIOBuf buf, const char* atom_fn)
@ -703,17 +726,15 @@ FileIOBuf file_buf_alloc(size_t size, const char* atom_fn, bool long_lived)
if(buf)
break;
// tell file_cache to remove some items. this may shake loose
// several, all of which must be returned to the cache_allocator.
for(;;)
{
FileIOBuf discarded_buf; size_t size;
if(!file_cache.remove_least_valuable(&discarded_buf, &size))
break;
// remove least valuable entry from cache and free its buffer.
FileIOBuf discarded_buf; size_t size;
bool removed = file_cache.remove_least_valuable(&discarded_buf, &size);
// only false if cache is empty, which can't be the case because
// allocation failed.
TEST(removed);
#include "nommgr.h"
cache_allocator.free((u8*)discarded_buf, size);
cache_allocator.free((u8*)discarded_buf, size);
#include "mmgr.h"
}
if(attempts++ > 50)
debug_warn("possible infinite loop: failed to make room in cache");
@ -763,11 +784,29 @@ LibError file_buf_free(FileIOBuf buf)
if(!buf)
return ERR_OK;
size_t size; const char* atom_fn;
extant_bufs.find_and_remove(buf, &size, &atom_fn);
FileIOBuf exact_buf; size_t actual_size; const char* atom_fn;
bool actually_removed = extant_bufs.find_and_remove(buf, exact_buf, actual_size, atom_fn);
if(actually_removed)
{
FileIOBuf buf_in_cache;
if(file_cache.retrieve(atom_fn, buf_in_cache, 0, false))
{
// sanity checks: what's in cache must match what we have.
// note: don't compare actual_size with cached size - they are
// usually different.
debug_assert(buf_in_cache == buf);
}
// buf is not in cache - needs to be freed immediately.
else
{
#include "nommgr.h"
cache_allocator.free((u8*)exact_buf, actual_size);
#include "mmgr.h"
}
}
stats_buf_free();
trace_notify_free(atom_fn, size);
trace_notify_free(atom_fn, actual_size);
return ERR_OK;
}
@ -938,6 +977,8 @@ static void test_cache_allocator()
{
size_t size = rand(1, 10*MiB);
total_size_used += size;
if(total_size_used == 298580898)
debug_break();
void* p;
// until successful alloc:
for(;;)

View File

@ -429,7 +429,9 @@ ssize_t vfs_io(const Handle hf, const size_t size, FileIOBuf* pbuf,
off_t ofs = vf->ofs;
vf->ofs += (off_t)size;
return x_io(&vf->xf, ofs, size, pbuf, cb, cb_ctx);
ssize_t nbytes = x_io(&vf->xf, ofs, size, pbuf, cb, cb_ctx);
RETURN_ERR(nbytes);
return nbytes;
}

View File

@ -4,6 +4,8 @@
#include "lib/timer.h"
#include "file_internal.h"
# include "ps/VFSUtil.h"
static uintptr_t trace_initialized; // set via CAS
static Pool trace_pool;
@ -251,10 +253,10 @@ struct FileAccess
typedef std::vector<FileAccess> FileAccesses;
class FileGatherer
class FileAccessGatherer
{
// put all entries in one trace file: easier to handle; obviates FS enum code
// rationale: don't go through file in order; instead, process most recent
// rationale: don't go through trace in order; instead, process most recent
// run first, to give more weight to it (TSP code should go with first entry
// when #occurrences are equal)
struct Run
@ -350,7 +352,7 @@ class FileGatherer
}
public:
FileGatherer(const char* trace_filename, Filenames required_fns,
FileAccessGatherer(const char* trace_filename, Filenames required_fns,
FileAccesses& file_accesses_)
: file_accesses(file_accesses_)
{
@ -374,8 +376,8 @@ public:
// should never be copied; this also squelches warning
private:
FileGatherer(const FileGatherer& rhs);
FileGatherer& operator=(const FileGatherer& rhs);
FileAccessGatherer(const FileAccessGatherer& rhs);
FileAccessGatherer& operator=(const FileAccessGatherer& rhs);
};
@ -387,11 +389,11 @@ class TourBuilder
{
return u32_from_u16(prev, next);
}
FileId cid_prev(ConnectionId id)
FileId cid_first(ConnectionId id)
{
return u32_hi(id);
}
FileId cid_next(ConnectionId id)
FileId cid_second(ConnectionId id)
{
return u32_lo(id);
}
@ -408,7 +410,8 @@ class TourBuilder
: id(id_), occurrences(1) {}
};
struct decreasing_occurrences: public std::binary_function<const Connection&, const Connection&, bool>
// sort by decreasing occurrence
struct Occurrence_greater: public std::binary_function<const Connection&, const Connection&, bool>
{
bool operator()(const Connection& c1, const Connection& c2) const
{
@ -476,41 +479,75 @@ class TourBuilder
return has_cycle;
}
void add_edge(const Connection& c)
void try_add_edge(const Connection& c)
{
FileId prev_id = cid_prev(c.id);
FileId next_id = cid_next(c.id);
FileId first_id = cid_first(c.id);
FileId second_id = cid_second(c.id);
FileAccess& prev = file_accesses[prev_id];
FileAccess& next = file_accesses[next_id];
if(prev.next != NULL_ID || next.prev != NULL_ID)
FileAccess& first = file_accesses[first_id];
FileAccess& second = file_accesses[second_id];
if(first.next != NULL_ID || second.prev != NULL_ID)
return;
prev.next = next_id;
next.prev = prev_id;
first.next = second_id;
second.prev = first_id;
bool introduced_cycle = is_cycle_at(next_id);
debug_assert(introduced_cycle == is_cycle_at(prev_id));
bool introduced_cycle = is_cycle_at(second_id);
debug_assert(introduced_cycle == is_cycle_at(first_id));
if(introduced_cycle)
{
debug_printf("try: undo (due tot cycle)\n");
// undo
prev.next = next.prev = NULL_ID;
first.next = second.prev = NULL_ID;
return;
}
}
// pointer to this is returned by TourBuilder()!
std::vector<const char*> fn_vector;
void output_chain(const Connection& c)
{
FileAccess* start = &file_accesses[cid_first(c.id)];
// early out: if this access was already visited, so must the entire
// chain of which it is a part. bail to save lots of time.
if(start->visited)
return;
// follow prev links starting with c until no more are left;
// start ends up the beginning of the chain including <c>.
while(start->prev != NULL_ID)
start = &file_accesses[start->prev];
// iterate over the chain - add to Filenames list and mark as visited
FileAccess* cur = start;
do
{
if(!cur->visited)
{
fn_vector.push_back(cur->atom_fn);
cur->visited = true;
}
cur = &file_accesses[cur->next];
}
while(cur->next != NULL_ID);
}
public:
TourBuilder(FileAccesses& file_accesses_)
TourBuilder(FileAccesses& file_accesses_, Filenames& fns)
: file_accesses(file_accesses_)
{
build_connections();
std::sort(connections.begin(), connections.end(), decreasing_occurrences());
std::sort(connections.begin(), connections.end(), Occurrence_greater());
for(Connections::iterator it = connections.begin(); it != connections.end(); ++it)
add_edge(*it);
try_add_edge(*it);
for(Connections::iterator it = connections.begin(); it != connections.end(); ++it)
output_chain(*it);
// walk tour; make sure all nodes are covered
// add each one to FileList
fn_vector.push_back(0); // 0-terminate for use as Filenames array
fns = &fn_vector[0];
}
// should never be copied; this also squelches warning
@ -520,44 +557,6 @@ private:
};
/*
static LibError determine_optimal_ordering(const char* trace_filename, Filenames* pfns)
{
*pfl = 0;
// count # files
uint num_files = 0;
for(size_t i = 0; i < t.num_ents; i++)
if(t.ents[i].op == TO_LOAD)
num_files++;
if(!num_files)
return ERR_DIR_END;
Filenames fns = (Filenames)malloc((num_files+1)*sizeof(const char*));
if(!fns)
return ERR_NO_MEM;
size_t ti = 0;
for(size_t i = 0; i < num_files; i++)
{
// find next trace entry that is a load (must exist)
while(t.ents[ti].op != TO_LOAD)
ti++;
fns[i] = t.ents[ti].atom_fn;
ti++;
}
trace_clear();
*pfl = fl;
return ERR_OK;
}
*/
//-----------------------------------------------------------------------------
typedef std::vector<const char*> FnVector;
@ -573,30 +572,46 @@ void vfs_opt_notify_loose_file(const char* atom_fn)
}
LibError vfs_opt_rebuild_main_archive(const char* P_archive_dst_dir)
struct EntCbParams
{
debug_warn("currently non-functional");
std::vector<const char*> files;
};
// for each mount point (with VFS_MOUNT_ARCHIVE flag set):
static void EntCb(const char* path, const DirEnt* ent, void* context)
{
EntCbParams* params = (EntCbParams*)context;
if(!DIRENT_IS_DIR(ent))
params->files.push_back(file_make_unique_fn_copy(path));
}
LibError vfs_opt_rebuild_main_archive(const char* P_archive_path, const char* trace_filename)
{
// get list of all files
Filenames required_fns = 0;
// TODO: for each mount point (with VFS_MOUNT_ARCHIVE flag set):
EntCbParams params;
RETURN_ERR(VFSUtil::EnumDirEnts("", VFSUtil::RECURSIVE, 0, EntCb, &params));
params.files.push_back(0);
Filenames required_fns = &params.files[0];
FileAccesses file_accesses;
FileGatherer gatherer("../logs/trace.txt", required_fns, file_accesses);
FileAccessGatherer gatherer(trace_filename, required_fns, file_accesses);
TourBuilder builder(file_accesses);
// builder.store_list(pfns);
Filenames fns = 0;
// (Filenames)malloc((num_files+1)*sizeof(const char*));
// if(!fns)
// return ERR_NO_MEM;
Filenames fns;
TourBuilder builder(file_accesses, fns);
LibError ret = archive_build(P_archive_path, fns);
// do NOT delete source files or archives! some apps might want to
// keep them (e.g. for source control), or name them differently.
// rebuild is required to make sure the new archive is used. this is
// already taken care of by VFS dir watch, unless it's disabled..
#ifdef NO_DIR_WATCH
(void)mount_rebuild();
#endif
char P_path[VFS_MAX_PATH];
RETURN_ERR(vfs_path_append(P_path, P_archive_dst_dir, "main.zip"));
LibError ret = archive_build("main.zip", fns);
// delete all loose files in list
free(fns);
// delete all archives in P_dst_path
return ret;
}
@ -631,16 +646,20 @@ static bool should_build_mini_archive()
}
LibError vfs_opt_auto_build_archive(const char* P_dst_path)
LibError vfs_opt_auto_build_archive(const char* P_dst_path,
const char* main_archive_name, const char* trace_filename)
{
char P_archive_path[PATH_MAX];
if(should_rebuild_main_archive())
return vfs_opt_rebuild_main_archive(P_dst_path);
{
RETURN_ERR(vfs_path_append(P_archive_path, P_dst_path, main_archive_name));
return vfs_opt_rebuild_main_archive(P_archive_path, trace_filename);
}
else if(should_build_mini_archive())
{
loose_files.push_back(0);
// get new unused mini archive name
const char* archive_filename = 0;
RETURN_ERR(archive_build(archive_filename, &loose_files[0]));
// get new unused mini archive name at P_dst_path
RETURN_ERR(archive_build(P_archive_path, &loose_files[0]));
// delete all newly added loose files
}

View File

@ -45,7 +45,10 @@ extern void trace_get(Trace* t);
extern LibError trace_write_to_file(const char* trace_filename);
extern LibError trace_read_from_file(const char* trace_filename, Trace* t);
extern LibError vfs_opt_rebuild_main_archive(const char* P_dst_path);
extern LibError vfs_opt_rebuild_main_archive(const char* P_archive_path, const char* trace_filename);
extern LibError vfs_opt_auto_build_archive(const char* P_dst_path,
const char* main_archive_name, const char* trace_filename);
extern void vfs_opt_notify_loose_file(const char* atom_fn);

View File

@ -233,6 +233,10 @@ static LibError za_extract_cdfh(const CDFH* cdfh,
const u32 lfh_ofs = read_le32(&cdfh->lfh_ofs);
const char* fn_tmp = (const char*)cdfh+CDFH_SIZE; // not 0-terminated!
// offset to where next CDFH should be (caller will scan for it)
// (must be set before early-out below).
ofs_to_next_cdfh = CDFH_SIZE + fn_len + e_len + c_len;
CompressionMethod method;
switch(zip_method)
{
@ -240,8 +244,8 @@ static LibError za_extract_cdfh(const CDFH* cdfh,
case ZIP_CM_DEFLATE: method = CM_DEFLATE; break;
default: WARN_RETURN(ERR_UNKNOWN_CMETHOD);
}
// .. it's a directory entry (we only want files)
// it's a directory entry (we only want files)
if(!csize && !ucsize)
return ERR_NOT_FILE; // don't warn - we just ignore these
@ -259,9 +263,6 @@ static LibError za_extract_cdfh(const CDFH* cdfh,
ent->method = method;
ent->flags = ZIP_LFH_FIXUP_NEEDED;
// offset to where next CDFH should be (caller will scan for it)
ofs_to_next_cdfh = CDFH_SIZE + fn_len + e_len + c_len;
return ERR_OK;
}

View File

@ -419,6 +419,26 @@ void tex_util_foreach_mipmap(uint w, uint h, uint bpp, const u8* restrict data,
// API
//-----------------------------------------------------------------------------
// indicate if <filename>'s extension is that of a texture format
// supported by tex_load. case-insensitive.
//
// rationale: tex_load complains if the given file is of an
// unsupported type. this API allows users to preempt that warning
// (by checking the filename themselves), and also provides for e.g.
// enumerating only images in a file picker.
// an alternative might be a flag to suppress warning about invalid files,
// but this is open to misuse.
bool tex_is_known_extension(const char* filename)
{
const TexCodecVTbl* dummy;
// found codec for it => known extension
if(tex_codec_for_filename(filename, &dummy) == ERR_OK)
return true;
return false;
}
// split out of tex_load to ease resource cleanup
static LibError tex_load_impl(FileIOBuf file_, size_t file_size, Tex* t)
{

View File

@ -183,6 +183,17 @@ extern void tex_set_global_orientation(int orientation);
// open/close
//
// indicate if <filename>'s extension is that of a texture format
// supported by tex_load. case-insensitive.
//
// rationale: tex_load complains if the given file is of an
// unsupported type. this API allows users to preempt that warning
// (by checking the filename themselves), and also provides for e.g.
// enumerating only images in a file picker.
// an alternative might be a flag to suppress warning about invalid files,
// but this is open to misuse.
extern bool tex_is_known_extension(const char* filename);
// load the specified image from file into the given Tex object.
// currently supports BMP, TGA, JPG, JP2, PNG, DDS.
extern LibError tex_load(const char* fn, Tex* t);

View File

@ -32,6 +32,7 @@ struct TexCodecVTbl
// precondition: ext is valid string
// ext doesn't include '.'; just compare against e.g. "png"
// must compare case-insensitive!
bool (*is_ext)(const char* ext);
size_t (*hdr_size)(const u8* file);

View File

@ -16,18 +16,53 @@
#if !HAVE_C99
// note: stupid VC7 gets arguments wrong when using __declspec(naked);
// we need to use DWORD PTR and esp-relative addressing.
#if HAVE_MS_ASM
__declspec(naked) float fminf(float, float)
{
__asm
{
fld DWORD PTR [esp+4]
fld DWORD PTR [esp+8]
fcomi st(0), st(1)
fcmovnb st(0), st(1)
fxch
fstp st(0)
ret
}
}
#else
float fminf(float a, float b)
{
return (a < b)? a : b;
}
#endif
#if HAVE_MS_ASM
__declspec(naked) float fmaxf(float, float)
{
__asm
{
fld DWORD PTR [esp+4]
fld DWORD PTR [esp+8]
fcomi st(0), st(1)
fcmovb st(0), st(1)
fxch
fstp st(0)
ret
}
}
#else
float fmaxf(float a, float b)
{
return (a > b)? a : b;
}
#endif
#endif // #if !HAVE_C99
// no C99, and not running on IA-32 (where this is defined to ia32_rint)
// => need to implement our fallback version.

View File

@ -105,7 +105,7 @@ static void ParseCommandLineArgs(int argc, char* argv[])
// note: VFS init is sure to have been completed by now
// (since CONFIG_Init reads from file); therefore,
// it is safe to call this from here directly.
vfs_opt_rebuild_main_archive("mods/official");
vfs_opt_rebuild_main_archive("mods/official/official1.zip", "../logs/trace.txt");
break;
case 'c':
if(strcmp(name, "conf") == 0)

View File

@ -916,6 +916,11 @@ void Init(int argc, char* argv[], uint flags)
// required by ogl_tex to detect broken gfx card/driver combos
get_gfx_info();
//-------------------------------------------------------------------------
// all lib init is now complete. self-tests are now run.
self_test_run_all();
oglCheck();
if(!g_Quickstart)

View File

@ -325,7 +325,7 @@ public:
m_orderQueue.clear();
else
m_orderQueue.pop_front();
return JS_TRUE;
return JSVAL_VOID;
}
static void ScriptingInit();