1
0
forked from 0ad/0ad

cache_adt.h: add peek() routine, move CacheEntry to global scope.

config: add read-only cache option
debug: fix error code
input: remove dependency on file header
lockfree: add LF_ReferenceCounter

This was SVN commit r5448.
This commit is contained in:
janwas 2007-11-10 17:47:25 +00:00
parent 0095685b71
commit 084ba8fcb0
5 changed files with 128 additions and 36 deletions

View File

@ -592,6 +592,41 @@ private:
};
// this is applicable to all cache management policies and stores all
// required information. a Divider functor is used to implement
// division for credit_density.
template<class Item, class Divider> struct CacheEntry
{
Item item;
size_t size;
uint cost;
float credit;
Divider divider;
// needed for mgr.remove_least_valuable's entry_copy
CacheEntry()
{
}
CacheEntry(Item item_, size_t size_, uint cost_)
: item(item_), divider((float)size_)
{
size = size_;
cost = cost_;
credit = cost;
// else divider will fail
debug_assert(size != 0);
}
float credit_density() const
{
return divider(credit, (float)size);
}
};
//
// Cache
//
@ -645,6 +680,11 @@ public:
return true;
}
bool peek(Key key, Item& item, size_t* psize = 0)
{
return retrieve(key, item, psize, false);
}
// toss out the least valuable entry. return false if cache is empty,
// otherwise true and (optionally) pass back its item and size.
bool remove_least_valuable(Item* pItem = 0, size_t* pSize = 0)
@ -678,38 +718,7 @@ public:
}
private:
// this is applicable to all cache management policies and stores all
// required information. a Divider functor is used to implement
// division for credit_density.
template<class InnerDivider> struct CacheEntry
{
Item item;
size_t size;
uint cost;
float credit;
InnerDivider divider;
// needed for mgr.remove_least_valuable's entry_copy
CacheEntry() {}
CacheEntry(Item item_, size_t size_, uint cost_)
: item(item_), divider((float)size_)
{
size = size_;
cost = cost_;
credit = cost;
// else divider will fail
debug_assert(size != 0);
}
float credit_density() const
{
return divider(credit, (float)size);
}
};
typedef CacheEntry<Divider> Entry;
typedef CacheEntry<Item, Divider> Entry;
// see note in remove_least_valuable().
std::list<Entry> entries_awaiting_eviction;

View File

@ -63,6 +63,14 @@
# define CONFIG_OVERRUN_PROTECTION 0
#endif
// zero-copy IO means all clients share the cached buffer; changing their
// contents is forbidden. this flag causes the buffers to be marked as
// read-only via MMU (writes would cause an exception), which takes a
// bit of extra time.
#ifndef CONFIG_READ_ONLY_CACHE
#define CONFIG_READ_ONLY_CACHE 1
#endif
// enable memory tracking (slow). see mmgr.cpp.
#ifndef CONFIG_USE_MMGR
# define CONFIG_USE_MMGR 0

View File

@ -516,7 +516,7 @@ namespace INFO
// one of the dump_sym* functions decided not to output anything at
// all (e.g. for member functions in UDTs - we don't want those).
// therefore, skip any post-symbol formatting (e.g. ) as well.
const LibError SYM_SUPPRESS_OUTPUT = +100809;
const LibError SYM_SUPPRESS_OUTPUT = +100409;
}

View File

@ -16,7 +16,6 @@
#include <stdlib.h>
#include "lib/external_libraries/sdl.h"
#include "lib/res/file/file.h"
const uint MAX_HANDLERS = 8;
static InHandler handler_stack[MAX_HANDLERS];
@ -93,7 +92,7 @@ LibError in_record(const char* fn)
f = fopen(fn, "wb");
if(!f)
WARN_RETURN(ERR::FILE_ACCESS);
WARN_RETURN(ERR::FAIL);
fwrite(&game_ticks, sizeof(u32), 1, f);
@ -112,7 +111,7 @@ LibError in_playback(const char* fn)
f = fopen(fn, "rb");
if(!f)
WARN_RETURN(ERR::FILE_ACCESS);
WARN_RETURN(ERR::FAIL);
u32 rec_start_time;
fread(&rec_start_time, sizeof(u32), 1, f);

View File

@ -12,6 +12,7 @@
#define INCLUDED_LOCKFREE
#include "posix/posix_types.h" // uintptr_t
#include "lib/sysdep/cpu.h" // cpu_CAS
/*
@ -155,4 +156,79 @@ extern void* lfh_insert(LFHash* hash, uintptr_t key, size_t additional_bytes, in
extern LibError lfh_erase(LFHash* hash, uintptr_t key);
/**
* thread-safe (lock-free) reference counter with an extra 'exclusive' state.
**/
class LF_ReferenceCounter
{
public:
LF_ReferenceCounter()
: m_status(0)
{
}
/**
* @return true if successful or false if exclusive access has already
* been granted or reference count is non-zero.
**/
bool AcquireExclusiveAccess()
{
return cpu_CAS(&m_status, 0, S_EXCLUSIVE);
}
/**
* re-enables adding references.
**/
void RelinquishExclusiveAccess()
{
const bool ok = cpu_CAS(&m_status, S_EXCLUSIVE, 0);
debug_assert(ok);
}
/**
* increase the reference count (bounds-checked).
*
* @return true if successful or false if the item is currently locked.
**/
bool AddReference()
{
const uintptr_t oldRefCnt = ReferenceCount();
debug_assert(oldRefCnt < S_REFCNT);
// (returns false if S_EXCLUSIVE is set)
return cpu_CAS(&m_status, oldRefCnt, oldRefCnt+1);
}
/**
* decrease the reference count (bounds-checked).
**/
void Release()
{
const uintptr_t oldRefCnt = ReferenceCount();
debug_assert(oldRefCnt != 0);
// (fails if S_EXCLUSIVE is set)
const bool ok = cpu_CAS(&m_status, oldRefCnt, oldRefCnt+1);
debug_assert(ok);
}
uintptr_t ReferenceCount() const
{
return m_status & S_REFCNT;
}
private:
static const uintptr_t S_REFCNT = (~0u) >> 1; // 0x7F..F
static const uintptr_t S_EXCLUSIVE = S_REFCNT+1u; // 0x80..0
volatile uintptr_t m_status;
};
struct LF_RefCountedMemRange
{
void* mem;
size_t size;
LF_ReferenceCounter refs;
};
#endif // #ifndef INCLUDED_LOCKFREE