# add instrumentation to file_cache; bugfixes
adts: whip together inefficient LRU cache manager (mostly for comparing vs. Landlord) file_cache: fix self-test (now correctly deals with small cache sizes) file_stats: more stats for file cache trace, vfs: bugfix: ignore writes recorded by the trace This was SVN commit r3684.
This commit is contained in:
parent
dadd15ef00
commit
d6abc57868
@ -719,6 +719,80 @@ public:
|
||||
// TODO: use SSE/3DNow RCP instruction? not yet, because not all systems
|
||||
// support it and overhead of detecting this support eats into any gains.
|
||||
|
||||
// initial implementation for testing purposes; quite inefficient.
|
||||
template<typename Key, typename Entry>
|
||||
class LRU
|
||||
{
|
||||
public:
|
||||
bool empty() const
|
||||
{
|
||||
return lru.empty();
|
||||
}
|
||||
|
||||
void add(Key key, const Entry& entry)
|
||||
{
|
||||
lru.push_back(KeyAndEntry(key, entry));
|
||||
}
|
||||
|
||||
bool find(Key key, const Entry** pentry) const
|
||||
{
|
||||
CIt it = std::find_if(lru.begin(), lru.end(), KeyEq(key));
|
||||
if(it == lru.end())
|
||||
return false;
|
||||
*pentry = &it->entry;
|
||||
return true;
|
||||
}
|
||||
|
||||
void remove(Key key)
|
||||
{
|
||||
std::remove_if(lru.begin(), lru.end(), KeyEq(key));
|
||||
}
|
||||
|
||||
void on_access(Entry& entry)
|
||||
{
|
||||
for(It it = lru.begin(); it != lru.end(); ++it)
|
||||
{
|
||||
if(&entry == &it->entry)
|
||||
{
|
||||
add(it->key, it->entry);
|
||||
lru.erase(it);
|
||||
return;
|
||||
}
|
||||
}
|
||||
debug_warn("entry not found in list");
|
||||
}
|
||||
|
||||
void remove_least_valuable(std::list<Entry>& entry_list)
|
||||
{
|
||||
entry_list.push_back(lru.front().entry);
|
||||
lru.pop_front();
|
||||
}
|
||||
|
||||
private:
|
||||
struct KeyAndEntry
|
||||
{
|
||||
Key key;
|
||||
Entry entry;
|
||||
KeyAndEntry(Key key_, const Entry& entry_)
|
||||
: key(key_), entry(entry_) {}
|
||||
};
|
||||
class KeyEq
|
||||
{
|
||||
Key key;
|
||||
public:
|
||||
KeyEq(Key key_) : key(key_) {}
|
||||
bool operator()(const KeyAndEntry& ke) const
|
||||
{
|
||||
return ke.key == key;
|
||||
}
|
||||
};
|
||||
|
||||
typedef std::list<KeyAndEntry> List;
|
||||
typedef typename List::iterator It;
|
||||
typedef typename List::const_iterator CIt;
|
||||
List lru;
|
||||
};
|
||||
|
||||
|
||||
//
|
||||
// Cache
|
||||
|
@ -1297,7 +1297,7 @@ static void test_cache_allocator()
|
||||
size_t total_size_used = 0;
|
||||
while(total_size_used < 4*MAX_CACHE_SIZE)
|
||||
{
|
||||
size_t size = rand(1, 10*MiB);
|
||||
size_t size = rand(1, MAX_CACHE_SIZE/4);
|
||||
total_size_used += size;
|
||||
void* p;
|
||||
// until successful alloc:
|
||||
|
@ -317,11 +317,11 @@ void stats_dump()
|
||||
|
||||
debug_printf(
|
||||
"\nfile_cache:\n"
|
||||
"Hits: %u (%g MB); misses %u (%g MB)\n"
|
||||
"Hit ratio: %u%%; conflict misses: %u%%\n"
|
||||
"Hits: %u (%g MB); misses %u (%g MB); ratio: %u%%\n"
|
||||
"Percent of requested bytes satisfied by cache: %u%%; non-compulsory misses: %u (%u%% of misses)\n"
|
||||
"Block hits: %u; misses: %u; ratio: %u%%\n",
|
||||
cache_count[CR_HIT], cache_size_total[CR_HIT]/MB, cache_count[CR_MISS], cache_size_total[CR_MISS]/MB,
|
||||
percent(cache_count[CR_HIT], cache_count[CR_MISS]), percent(conflict_misses, cache_count[CR_MISS]),
|
||||
cache_count[CR_HIT], cache_size_total[CR_HIT]/MB, cache_count[CR_MISS], cache_size_total[CR_MISS]/MB, percent(cache_count[CR_HIT], cache_count[CR_HIT]+cache_count[CR_MISS]),
|
||||
percent(cache_size_total[CR_HIT], cache_size_total[CR_HIT]+cache_size_total[CR_MISS]), conflict_misses, percent(conflict_misses, cache_count[CR_MISS]),
|
||||
block_cache_count[CR_HIT], block_cache_count[CR_MISS], percent(block_cache_count[CR_HIT], block_cache_count[CR_HIT]+block_cache_count[CR_MISS])
|
||||
);
|
||||
|
||||
|
@ -61,9 +61,9 @@ static void trace_add(TraceOp op, const char* P_fn, size_t size, uint flags = 0,
|
||||
}
|
||||
|
||||
|
||||
void trace_notify_load(const char* P_fn, size_t size, uint flags)
|
||||
void trace_notify_io(const char* P_fn, size_t size, uint flags)
|
||||
{
|
||||
trace_add(TO_LOAD, P_fn, size, flags);
|
||||
trace_add(TO_IO, P_fn, size, flags);
|
||||
}
|
||||
|
||||
void trace_notify_free(const char* P_fn, size_t size)
|
||||
@ -103,12 +103,12 @@ LibError trace_write_to_file(const char* trace_filename)
|
||||
char opcode = '?';
|
||||
switch(ent->op)
|
||||
{
|
||||
case TO_LOAD: opcode = 'L'; break;
|
||||
case TO_IO: opcode = 'L'; break;
|
||||
case TO_FREE: opcode = 'F'; break;
|
||||
default: debug_warn("invalid TraceOp");
|
||||
}
|
||||
|
||||
debug_assert(ent->op == TO_LOAD || ent->op == TO_FREE);
|
||||
debug_assert(ent->op == TO_IO || ent->op == TO_FREE);
|
||||
fprintf(f, "%#010f: %c \"%s\" %d %04x\n", ent->timestamp, opcode, ent->atom_fn, ent->size, ent->flags);
|
||||
}
|
||||
|
||||
@ -144,10 +144,10 @@ LibError trace_read_from_file(const char* trace_filename, Trace* t)
|
||||
break;
|
||||
debug_assert(ret == 5);
|
||||
|
||||
TraceOp op = TO_LOAD; // default in case file is garbled
|
||||
TraceOp op = TO_IO; // default in case file is garbled
|
||||
switch(opcode)
|
||||
{
|
||||
case 'L': op = TO_LOAD; break;
|
||||
case 'L': op = TO_IO; break;
|
||||
case 'F': op = TO_FREE; break;
|
||||
default: debug_warn("invalid TraceOp");
|
||||
}
|
||||
@ -194,7 +194,7 @@ void trace_gen_random(size_t num_entries)
|
||||
}
|
||||
}
|
||||
|
||||
trace_add(TO_LOAD, atom_fn, size);
|
||||
trace_add(TO_IO, atom_fn, size);
|
||||
trace_add(TO_FREE, atom_fn, size);
|
||||
}
|
||||
}
|
||||
@ -215,8 +215,11 @@ bool trace_entry_causes_io(const TraceEntry* ent)
|
||||
const char* atom_fn = ent->atom_fn;
|
||||
switch(ent->op)
|
||||
{
|
||||
case TO_LOAD:
|
||||
case TO_IO:
|
||||
{
|
||||
// we're not interested in writes
|
||||
if(ent->flags & FILE_WRITE)
|
||||
return false;
|
||||
buf = file_cache_retrieve(atom_fn, &size, fc_flags);
|
||||
// would not be in cache: add to list of real IOs
|
||||
if(!buf)
|
||||
@ -270,7 +273,10 @@ LibError trace_run(const char* trace_filename, uint flags)
|
||||
FileIOBuf buf; size_t size;
|
||||
switch(ent->op)
|
||||
{
|
||||
case TO_LOAD:
|
||||
case TO_IO:
|
||||
// do not 'run' writes - we'd destroy the existing data.
|
||||
if(ent->flags & FILE_WRITE)
|
||||
continue;
|
||||
(void)vfs_load(ent->atom_fn, buf, size, ent->flags);
|
||||
break;
|
||||
case TO_FREE:
|
||||
|
@ -4,7 +4,7 @@
|
||||
extern void trace_enable(bool want_enabled);
|
||||
extern void trace_shutdown();
|
||||
|
||||
extern void trace_notify_load(const char* P_fn, size_t size, uint flags);
|
||||
extern void trace_notify_io(const char* P_fn, size_t size, uint flags);
|
||||
extern void trace_notify_free(const char* P_fn, size_t size);
|
||||
|
||||
// TraceEntry operation type.
|
||||
@ -14,7 +14,7 @@ extern void trace_notify_free(const char* P_fn, size_t size);
|
||||
// yield the same results.
|
||||
enum TraceOp
|
||||
{
|
||||
TO_LOAD,
|
||||
TO_IO,
|
||||
TO_FREE
|
||||
};
|
||||
|
||||
|
@ -424,7 +424,7 @@ ssize_t vfs_io(const Handle hf, const size_t size, FileIOBuf* pbuf,
|
||||
FileCommon* fc = &vf->xf.u.fc;
|
||||
|
||||
stats_io_user_request(size);
|
||||
trace_notify_load(fc->atom_fn, size, fc->flags);
|
||||
trace_notify_io(fc->atom_fn, size, fc->flags);
|
||||
|
||||
off_t ofs = vf->ofs;
|
||||
vf->ofs += (off_t)size;
|
||||
@ -454,7 +454,7 @@ LibError vfs_load(const char* V_fn, FileIOBuf& buf, size_t& size,
|
||||
// so duplicate that here:
|
||||
stats_cache(CR_HIT, size, atom_fn);
|
||||
stats_io_user_request(size);
|
||||
trace_notify_load(atom_fn, size, flags);
|
||||
trace_notify_io(atom_fn, size, flags);
|
||||
|
||||
size_t actual_size;
|
||||
LibError ret = file_io_call_back(buf, size, cb, cb_ctx, actual_size);
|
||||
|
Loading…
Reference in New Issue
Block a user