adts: add RingBuf self-test. RingBuf: make entry#0 the 'head' and pop_front a no-op if empty.

allocators: allow pool_create at NLSO ctor time
lib: add rand() function based on libc rand and self-test. (this allows
easily returning a random number within a range, and avoids a number of
pitfalls - the naive rand()%range approach is baaad)
precompiled: add self_test.h, so that self tests won't be accidentally
disabled by forgetting to include the header

file_cache: add self-test; fixed 2 bugs that exposed (was incorrectly
splitting large blocks and not setting class_size bit properly). fix
weird corner case in BlockMgr (attempting to free LRU block but it is
locked).

file_io, wdbg_sym: tiny fixes

This was SVN commit r3461.
This commit is contained in:
janwas 2006-02-03 04:02:53 +00:00
parent 3e2017147d
commit 1227cb2f6d
10 changed files with 351 additions and 72 deletions

View File

@ -1,3 +1,91 @@
#include "precompiled.h"
#include "adts.h"
//-----------------------------------------------------------------------------
// built-in self test
//-----------------------------------------------------------------------------
#if SELF_TEST_ENABLED
namespace test {
static void test_ringbuf()
{
const size_t N = 49; // RingBuf capacity
const size_t S = 100; // number of test items
// insert and remove immediately
{
RingBuf<int, N> buf;
for(int i = 1; i < S; i++)
{
buf.push_back(i);
TEST(buf.front() == i);
buf.pop_front();
}
TEST(buf.size() == 0 && buf.empty());
}
// fill buffer and overwrite old items
{
RingBuf<int, N> buf;
for(int i = 1; i < S; i++)
buf.push_back(i);
TEST(buf.size() == N);
int first = buf.front();
TEST(first == (int)(S-1 -N +1));
for(size_t i = 0; i < N; i++)
{
TEST(buf.front() == first);
first++;
buf.pop_front();
}
TEST(buf.size() == 0 && buf.empty());
}
// randomized insert/remove; must behave as does std::deque
{
srand(1);
RingBuf<int, N> buf;
std::deque<int> deq;
for(uint rep = 0; rep < 1000; rep++)
{
uint rnd_op = rand(0, 10);
// 70% - insert
if(rnd_op >= 3)
{
int item = rand();
buf.push_back(item);
deq.push_back(item);
int excess_items = (int)deq.size() - N;
if(excess_items > 0)
{
for(int i = 0; i < excess_items; i++)
{
deq.pop_front();
}
}
}
// 30% - pop front (only if not empty)
else if(!deq.empty())
{
buf.pop_front();
deq.pop_front();
}
}
TEST(buf.size() == deq.size());
TEST(equal(buf.begin(), buf.end(), deq.begin()));
}
}
static void self_test()
{
test_ringbuf();
}
SELF_TEST_RUN;
} // namespace test
#endif // #if SELF_TEST_ENABLED

View File

@ -432,13 +432,13 @@ struct BitBuf
template<class T, size_t n> class RingBuf
{
size_t size_; // # of entries in buffer
size_t head; // index of first item
size_t tail; // index of last item
size_t head; // index of oldest item
size_t tail; // index of newest item
T data[n];
public:
RingBuf() { clear(); }
void clear() { size_ = 0; head = 1; tail = 0; }
RingBuf() : data() { clear(); }
void clear() { size_ = 0; head = 0; tail = n-1; }
size_t size() { return size_; }
bool empty() { return size_ == 0; }
@ -449,6 +449,12 @@ public:
size_t idx = (size_t)(head + ofs);
return data[idx % n];
}
T& operator[](int ofs)
{
debug_assert(!empty());
size_t idx = (size_t)(head + ofs);
return data[idx % n];
}
T& front()
{
@ -486,12 +492,13 @@ public:
void pop_front()
{
if(size_ > 0)
if(size_ != 0)
{
size_--;
head = (head + 1) % n;
}
else
debug_warn("underflow");
head = (head + 1) % n;
}
@ -569,19 +576,19 @@ public:
iterator begin()
{
return iterator(data, (size_ < n)? 1 : head);
return iterator(data, (size_ < n)? 0 : head);
}
const_iterator begin() const
{
return const_iterator(data, (size_ < n)? 1 : head);
return const_iterator(data, (size_ < n)? 0 : head);
}
iterator end()
{
return iterator(data, (size_ < n)? size_+1 : head+n);
return iterator(data, (size_ < n)? size_ : head+n);
}
const_iterator end() const
{
return const_iterator(data, (size_ < n)? size_+1 : head+n);
return const_iterator(data, (size_ < n)? size_ : head+n);
}
};

View File

@ -19,6 +19,7 @@
#include "posix.h"
#include "sysdep/cpu.h" // CAS
#include "byte_order.h"
#include "allocators.h"
@ -90,16 +91,23 @@ void single_free(void* storage, volatile uintptr_t* in_use_flag, void* p)
// dynamic (expandable) array
//-----------------------------------------------------------------------------
static const size_t page_size = sysconf(_SC_PAGE_SIZE);
// helper routine that makes sure page_size has been initialized by the
// time it is needed (otherwise, we are open to NLSO ctor order issues).
// pool_create is therefore now safe to call before main().
static size_t get_page_size()
{
static const size_t page_size = sysconf(_SC_PAGE_SIZE);
return page_size;
}
static bool is_page_multiple(uintptr_t x)
{
return (x % page_size) == 0;
return (x % get_page_size()) == 0;
}
static size_t round_up_to_page(size_t size)
{
return round_up(size, page_size);
return round_up(size, get_page_size());
}
// indicates that this DynArray must not be resized or freed
@ -704,7 +712,7 @@ static void test_da()
TEST(da_free(&da) == 0);
// test wrapping existing mem blocks for use with da_read
const u8 data[4] = { 0x12, 0x34, 0x56, 0x78 };
u8 data[4] = { 0x12, 0x34, 0x56, 0x78 };
TEST(da_wrap_fixed(&da, data, sizeof(data)) == 0);
u8 buf[4];
TEST(da_read(&da, buf, 4) == 0); // success

View File

@ -512,28 +512,83 @@ int match_wildcardw(const wchar_t* s, const wchar_t* w)
}
//////////////////////////////////////////////////////////////////////////////
//
// return random integer in [min, max).
// avoids several common pitfalls; see discussion at
// http://www.azillionmonkeys.com/qed/random.html
uint rand(uint min, uint max)
{
const uint XRAND_MAX = (RAND_MAX+1)*(RAND_MAX+1) - 1;
const uint range = (max-min);
// huge interval or min > max
if(range > XRAND_MAX)
{
WARN_ERR(ERR_INVALID_PARAM);
return 0;
}
const uint inv_range = XRAND_MAX / range;
// generate random number in [0, range)
uint x;
do
x = rand()*(RAND_MAX+1) + rand();
while(x >= range * inv_range);
x /= inv_range;
x += min;
debug_assert(x < max);
return x;
}
//-----------------------------------------------------------------------------
// built-in self test
//
//////////////////////////////////////////////////////////////////////////////
//-----------------------------------------------------------------------------
#if SELF_TEST_ENABLED
namespace test {
static void test_log2()
{
TEST(ilog2(0) == -1);
TEST(ilog2(3) == -1);
TEST(ilog2(0xffffffff) == -1);
TEST(ilog2(1) == 0);
TEST(ilog2(256) == 8);
TEST(ilog2(0x80000000) == 31);
TEST(ilog2(0u) == -1);
TEST(ilog2(3u) == -1);
TEST(ilog2(0xffffffffu) == -1);
TEST(ilog2(1u) == 0);
TEST(ilog2(256u) == 8);
TEST(ilog2(0x80000000u) == 31);
}
static void test_rand()
{
// complain if huge interval or min > max
TEST(rand(1, 0) == 0);
TEST(rand(2, ~0u) == 0);
// returned number must be in [min, max)
for(int i = 0; i < 100; i++)
{
uint min = rand(), max = min+rand();
uint x = rand(min, max);
TEST(min <= x && x < max);
}
// make sure both possible values are hit
uint ones = 0, twos = 0;
for(int i = 0; i < 100; i++)
{
uint x = rand(1, 3);
// paranoia: don't use array (x might not be 1 or 2 - checked below)
if(x == 1) ones++; if(x == 2) twos++;
}
TEST(ones+twos == 100);
TEST(ones > 10 && twos > 10);
}
static void self_test()
{
test_log2();
test_rand();
}
SELF_TEST_RUN;

View File

@ -318,4 +318,9 @@ extern int match_wildcardw(const wchar_t* s, const wchar_t* w);
// promises it is safe.
#define SAFE_STRCPY strcpy
// return random integer in [min, max).
// avoids several common pitfalls; see discussion at
// http://www.azillionmonkeys.com/qed/random.html
extern uint rand(uint min, uint max);
#endif // #ifndef LIB_H__

View File

@ -27,8 +27,9 @@
#include "lockfree.h"
#include "timer.h"
#define SELF_TEST_ENABLED 0 // known to fail on P4 due to mem reordering and lack of membars.
#include "self_test.h"
// known to fail on P4 due to mem reordering and lack of membars.
#undef SELF_TEST_ENABLED
#define SELF_TEST_ENABLED 0
/*
liberties taken:

View File

@ -34,8 +34,10 @@
// headers made available everywhere for convenience
//
// note: must not include
#include "lib/types.h"
#include "lib/string_s.h" // CRT secure string
#include "lib/self_test.h"
#include "lib/debug.h"
#include "ps/Pyrogenesis.h" // MICROLOG and old error system
#include <assert.h> // assert()

View File

@ -23,6 +23,8 @@
// to remain valid.
//
static uint block_epoch;
class BlockMgr
{
static const size_t MAX_BLOCKS = 32;
@ -35,25 +37,33 @@ class BlockMgr
struct Block
{
BlockId id;
// initialized in BlockMgr ctor and remains valid
void* mem;
BlockStatus status;
int refs;
Block() {} // for RingBuf
Block(BlockId id_, void* mem_)
: id(id_), mem(mem_), status(BS_PENDING), refs(0) {}
Block()
: id(block_cache_make_id(0, 0)), status(BS_INVALID), refs(0) {}
};
RingBuf<Block, MAX_BLOCKS> blocks;
typedef RingBuf<Block, MAX_BLOCKS>::iterator BlockIt;
// access pattern is usually ring buffer, but in rare cases we
// need to skip over locked items, even though they are the oldest.
Block blocks[MAX_BLOCKS];
uint oldest_block;
// use Pool to allocate mem for all blocks because it guarantees
// page alignment (required for IO) and obviates manually aligning.
Pool pool;
public:
void init()
BlockMgr()
: blocks(), oldest_block(0)
{
(void)pool_create(&pool, MAX_BLOCKS*FILE_BLOCK_SIZE, FILE_BLOCK_SIZE);
for(Block* b = blocks; b < blocks+MAX_BLOCKS; b++)
{
b->mem = pool_alloc(&pool, 0);
debug_assert(b->mem); // shouldn't ever fail
}
}
void shutdown()
@ -63,38 +73,77 @@ public:
void* alloc(BlockId id)
{
if(blocks.size() == MAX_BLOCKS)
Block* b;
for(b = blocks; b < blocks+MAX_BLOCKS; b++)
{
Block& b = blocks.front();
// if this block is still locked, big trouble..
// (someone forgot to free it and we can't reuse it)
debug_assert(b.status != BS_PENDING && b.refs == 0);
pool_free(&pool, b.mem);
blocks.pop_front();
if(block_eq(b->id, id))
debug_warn("allocating block that is already in list");
}
void* mem = pool_alloc(&pool, FILE_BLOCK_SIZE); // can't fail
blocks.push_back(Block(id, mem));
return mem;
for(size_t i = 0; i < MAX_BLOCKS; i++)
{
b = &blocks[oldest_block];
oldest_block = (oldest_block+1)%MAX_BLOCKS;
// normal case: oldest item can be reused
if(b->status != BS_PENDING && b->refs == 0)
goto have_block;
// wacky special case: oldest item is currently locked.
// skip it and reuse the next.
//
// to see when this can happen, consider IO depth = 4.
// let the Block at blocks[oldest_block] contain data that
// an IO wants. the 2nd and 3rd blocks are not in cache and
// happen to be taken from near the end of blocks[].
// attempting to issue block #4 fails because its buffer would
// want the first slot (which is locked since the its IO
// is still pending).
if(b->status == BS_COMPLETE && b->refs > 0)
continue;
debug_warn("status and/or refs have unexpected values");
}
debug_warn("all blocks are locked");
return 0;
have_block:
b->id = id;
b->status = BS_PENDING;
return b->mem;
}
void mark_completed(BlockId id)
{
for(BlockIt it = blocks.begin(); it != blocks.end(); ++it)
for(Block* b = blocks; b < blocks+MAX_BLOCKS; b++)
{
if(block_eq(it->id, id))
it->status = BS_COMPLETE;
if(block_eq(b->id, id))
{
debug_assert(b->status == BS_PENDING);
b->status = BS_COMPLETE;
return;
}
}
debug_warn("mark_completed: block not found, but ought still to be in cache");
}
void* find(BlockId id)
{
// linear search is ok, since we only keep a few blocks.
for(BlockIt it = blocks.begin(); it != blocks.end(); ++it)
for(Block* b = blocks; b < blocks+MAX_BLOCKS; b++)
{
if(block_eq(it->id, id) && it->status == BS_COMPLETE)
if(block_eq(b->id, id))
{
it->refs++;
return it->mem;
if(b->status == BS_COMPLETE)
{
debug_assert(b->refs >= 0);
b->refs++;
return b->mem;
}
debug_warn("block referenced while still in progress");
return 0;
}
}
return 0; // not found
@ -102,28 +151,27 @@ public:
void release(BlockId id)
{
for(BlockIt it = blocks.begin(); it != blocks.end(); ++it)
for(Block* b = blocks; b < blocks+MAX_BLOCKS; b++)
{
if(block_eq(it->id, id))
if(block_eq(b->id, id))
{
it->refs--;
debug_assert(it->refs >= 0);
b->refs--;
debug_assert(b->refs >= 0);
return;
}
}
debug_warn("release: block not found, but ought still to be in cache");
}
void invalidate(const char* atom_fn)
{
for(BlockIt it = blocks.begin(); it != blocks.end(); ++it)
for(Block* b = blocks; b < blocks+MAX_BLOCKS; b++)
{
if(it->id.atom_fn == atom_fn)
if(b->id.atom_fn == atom_fn)
{
if(it->refs)
if(b->refs)
debug_warn("invalidating block that is currently in-use");
it->status = BS_INVALID;
b->status = BS_INVALID;
}
}
}
@ -214,15 +262,14 @@ mechanism:
- coalesce: boundary tags in freed memory with magic value
- freelist: 2**n segregated doubly-linked, address-ordered
*/
static const size_t MAX_CACHE_SIZE = 64*MiB;
class CacheAllocator
{
static const size_t MAX_CACHE_SIZE = 64*MiB;
public:
void init()
CacheAllocator()
: bitmap(0), freelists()
{
// note: do not call from ctor; pool_create currently (2006-20-01)
// breaks if called at NLSO init time.
// (safe to call this from ctor as of 2006-02-02)
(void)pool_create(&pool, MAX_CACHE_SIZE, 0);
}
@ -288,6 +335,15 @@ public:
freelist_add(p, size_pa);
}
// free all allocations and reset state to how it was just after
// (the first and only) init() call.
void reset()
{
pool_free_all(&pool);
bitmap = 0;
memset(freelists, 0, sizeof(freelists));
}
private:
Pool pool;
@ -414,7 +470,7 @@ private:
freelist_remove(cur);
if(remnant_pa)
freelist_add(p+remnant_pa, remnant_pa);
freelist_add(p+size_pa, remnant_pa);
return p;
}
@ -433,7 +489,7 @@ private:
{
#define LS1(x) (x & -(int)x) // value of LSB 1-bit
const uint class_size = LS1(classes_left);
classes_left &= ~BIT(class_size); // remove from classes_left
classes_left &= ~class_size; // remove from classes_left
const uint size_class = size_class_of(class_size);
void* p = alloc_from_class(size_class, size_pa);
if(p)
@ -767,8 +823,6 @@ LibError file_cache_invalidate(const char* P_fn)
void file_cache_init()
{
block_mgr.init();
cache_allocator.init();
}
@ -778,3 +832,61 @@ void file_cache_shutdown()
cache_allocator.shutdown();
block_mgr.shutdown();
}
//-----------------------------------------------------------------------------
// built-in self test
//-----------------------------------------------------------------------------
#if SELF_TEST_ENABLED
namespace test {
static void test_cache_allocator()
{
// allocated address -> its size
typedef std::map<void*, size_t> AllocMap;
AllocMap allocations;
// put allocator through its paces by allocating several times
// its capacity (this ensures memory is reused)
size_t total_size_used = 0;
while(total_size_used < 4*MAX_CACHE_SIZE)
{
size_t size = rand(1, 10*MiB);
total_size_used += size;
void* p;
// until successful alloc:
for(;;)
{
p = cache_allocator.alloc(size);
if(p)
break;
// out of room - remove a previous allocation
// .. choose one at random
size_t chosen_idx = (size_t)rand(0, (uint)allocations.size());
AllocMap::iterator it = allocations.begin();
for(; chosen_idx != 0; chosen_idx--)
++it;
#include "nommgr.h"
cache_allocator.free((u8*)it->first, it->second);
#include "mmgr.h"
allocations.erase(it);
}
// must not already have been allocated
TEST(allocations.find(p) == allocations.end());
allocations[p] = size;
}
cache_allocator.reset();
}
static void self_test()
{
test_cache_allocator();
}
SELF_TEST_RUN;
} // namespace test
#endif // #if SELF_TEST_ENABLED

View File

@ -263,9 +263,9 @@ class IOManager
void reset()
{
memset(&io, 0, sizeof(io));
temp_buf = 0;
memset(&block_id, 0, sizeof(block_id));
cached_block = 0;
memset(&block_id, 0, sizeof(block_id));
temp_buf = 0;
start_time = 0.0; // required for stats
}
};

View File

@ -33,8 +33,9 @@
# include "lib/sysdep/ia32.h"
#endif
#define SELF_TEST_ENABLED 0 // raises an an annoying exception
#include "self_test.h"
// raises an an annoying exception, so disable unless needed
#undef SELF_TEST_ENABLED
#define SELF_TEST_ENABLED 0
#if MSC_VERSION