1
0
forked from 0ad/0ad

adts.cpp: remove DynArray, which was an array of pages. (obsoleted by DynArray/Pool allocators)

adts.h: add Cache (landlord algorithm), move DynHashTbl here from
vfs_tree; expand RingBuf and fix its iterator
allocators: minor tweaks+bugfixes; add SingleAllocator C++ wrapper
config, types: avoid stupid ICC warnings
lib: add round_down, u32_from_u16, SAFE_FREE

This was SVN commit r3414.
This commit is contained in:
janwas 2006-01-23 07:59:20 +00:00
parent c4aea4eb62
commit f06f0c8240
9 changed files with 462 additions and 73 deletions

View File

@ -1,30 +1,3 @@
#include "precompiled.h" #include "precompiled.h"
#include "adts.h" #include "adts.h"
template<class T, int max_items> class DynArray
{
public:
DynArray() {}
~DynArray() {}
T* operator[](uint n)
{
T*& page = pages[n / items_per_page];
if(!page)
{
page = (T*)calloc(PAGE_SIZE, 1);
if(!page)
return 0;
}
return page + (n % items_per_page);
}
private:
enum { PAGE_SIZE = 4096 };
// const size_t items_per_page = sizeof(T) / PAGE_SIZE;
// const size_t num_pages = (max_items + items_per_page-1) / items_per_page;
T* pages[(max_items + (sizeof(T) / PAGE_SIZE)-1) / (sizeof(T) / PAGE_SIZE)];
};

View File

@ -9,6 +9,293 @@
#include <map> #include <map>
template<typename Key, typename T> class DHT_Traits
{
public:
static const size_t initial_entries = 16;
size_t hash(Key key) const;
bool equal(Key k1, Key k2) const;
Key get_key(T t) const;
};
template<> class DHT_Traits<const char*, const char*>
{
public:
static const size_t initial_entries = 512;
size_t hash(const char* key) const
{
return (size_t)fnv_lc_hash(key);
}
bool equal(const char* k1, const char* k2) const
{
return !strcmp(k1, k2);
}
const char* get_key(const char* t) const
{
return t;
}
};
// intended for pointer types
template<typename Key, typename T, class Traits=DHT_Traits<Key,T> > class DynHashTbl
{
T* tbl;
u16 num_entries;
u16 max_entries; // when initialized, = 2**n for faster modulo
Traits tr;
T& get_slot(Key key) const
{
size_t hash = tr.hash(key);
debug_assert(max_entries != 0); // otherwise, mask will be incorrect
const uint mask = max_entries-1;
for(;;)
{
T& t = tbl[hash & mask];
// empty slot encountered => not found
if(!t)
return t;
// keys are actually equal => found it
if(tr.equal(key, tr.get_key(t)))
return t;
// keep going (linear probing)
hash++;
}
}
void expand_tbl()
{
// alloc a new table (but don't assign it to <tbl> unless successful)
T* old_tbl = tbl;
tbl = (T*)calloc(max_entries*2, sizeof(T));
if(!tbl)
{
tbl = old_tbl;
throw std::bad_alloc();
}
max_entries += max_entries;
// must be set before get_slot
// newly initialized, nothing to copy - done
if(!old_tbl)
return;
// re-hash from old table into the new one
for(size_t i = 0; i < max_entries/2u; i++)
{
T t = old_tbl[i];
if(t)
get_slot(tr.get_key(t)) = t;
}
free(old_tbl);
}
public:
DynHashTbl()
{
tbl = 0;
num_entries = 0;
max_entries = Traits.initial_entries/2; // will be doubled in expand_tbl
debug_assert(is_pow2(max_entries));
expand_tbl();
}
~DynHashTbl()
{
clear();
}
void clear()
{
free(tbl);
tbl = 0;
num_entries = 0;
// rationale: must not set to 0 because expand_tbl only doubles the size.
// don't keep the previous size because it may have become huge and
// there is no provision for shrinking.
max_entries = Traits.initial_entries/2; // will be doubled in expand_tbl
}
void insert(const Key key, const T t)
{
// more than 75% full - increase table size.
// do so before determining slot; this will invalidate previous pnodes.
if(num_entries*4 >= max_entries*3)
expand_tbl();
T& slot = get_slot(key);
debug_assert(slot == 0); // not already present
slot = t;
num_entries++;
}
T find(Key key) const
{
return get_slot(key);
}
size_t size() const
{
return num_entries;
}
class iterator
{
public:
typedef std::forward_iterator_tag iterator_category;
typedef T value_type;
typedef ptrdiff_t difference_type;
typedef const T* pointer;
typedef const T& reference;
iterator()
{
}
iterator(T* pos_, T* end_) : pos(pos_), end(end_)
{
}
T& operator*() const
{
return *pos;
}
iterator& operator++() // pre
{
do
pos++;
while(pos != end && *pos == 0);
return (*this);
}
bool operator==(const iterator& rhs) const
{
return pos == rhs.pos;
}
bool operator<(const iterator& rhs) const
{
return (pos < rhs.pos);
}
// derived
const T* operator->() const
{
return &**this;
}
bool operator!=(const iterator& rhs) const
{
return !(*this == rhs);
}
iterator operator++(int) // post
{
iterator tmp = *this; ++*this; return tmp;
}
protected:
T* pos;
T* end;
// only used when incrementing (avoid going beyond end of table)
};
iterator begin() const
{
T* pos = tbl;
while(pos != tbl+max_entries && *pos == 0)
pos++;
return iterator(pos, tbl+max_entries);
}
iterator end() const
{
return iterator(tbl+max_entries, 0);
}
};
// Cache for items of variable size and value/"cost".
// currently uses Landlord algorithm.
template<typename Key, typename T> class Cache
{
public:
void add(Key key, T item, size_t size, uint cost)
{
typedef std::pair<CacheMapIt, bool> PairIB;
CacheMap::value_type val = std::make_pair(key, CacheEntry(item, size, cost));
PairIB ret = map.insert(val);
debug_assert(ret.second); // must not already be in map
}
T retrieve(Key key, size_t* psize = 0)
{
CacheMapIt it = map.find(key);
if(it == map.end())
return 0;
CacheEntry& entry = it->second;
if(psize)
*psize = entry.size;
// increase credit
return entry.item;
}
T remove_least_valuable(size_t* psize = 0)
{
CacheMapIt it;
again: // until we find someone to evict
// foreach entry: decrease credit and evict if <= 0
for( it = map.begin(); it != map.end(); ++it)
{
CacheEntry& entry = it->second;
// found someone we can evict
if(entry.credit <= 0.0f)
{
T item = entry.item;
if(psize)
*psize = entry.size;
map.erase(it);
return item;
}
}
// none were evicted
// charge rent
goto again;
}
private:
class CacheEntry
{
friend class Cache;
CacheEntry(T item_, size_t size_, uint cost_)
{
item = item_;
size = size_;
cost = cost_;
credit = cost;
}
T item;
size_t size;
uint cost;
float credit;
};
typedef std::map<Key, CacheEntry> CacheMap;
typedef typename CacheMap::iterator CacheMapIt;
CacheMap map;
};
// //
// FIFO bit queue // FIFO bit queue
// //
@ -50,29 +337,69 @@ struct BitBuf
// ring buffer - static array, accessible modulo n // ring buffer - static array, accessible modulo n
// //
template<class T, size_t n> struct RingBuf template<class T, size_t n> class RingBuf
{ {
size_t size_; // # of entries in buffer size_t size_; // # of entries in buffer
size_t pos; // index of oldest data size_t head; // index of first item
size_t tail; // index of last item
T data[n]; T data[n];
public:
RingBuf() { clear(); } RingBuf() { clear(); }
void clear() { size_ = 0; pos = 0; } void clear() { size_ = 0; head = 1; tail = 0; }
size_t size() { return size_; } size_t size() { return size_; }
bool empty() { return size_ == 0; }
const T& operator[](int ofs) const const T& operator[](int ofs) const
{ {
size_t idx = (size_t)(pos + ofs); debug_assert(!empty());
size_t idx = (size_t)(head + ofs);
return data[idx % n]; return data[idx % n];
} }
T& front()
{
debug_assert(!empty());
return data[head];
}
const T& front() const
{
debug_assert(!empty());
return data[head];
}
T& back()
{
debug_assert(!empty());
return data[tail];
}
const T& back() const
{
debug_assert(!empty());
return data[tail];
}
void push_back(const T& item) void push_back(const T& item)
{ {
if(size_ < n) if(size_ < n)
size_++; size_++;
// do not complain - overwriting old values is legit
// (e.g. sliding window).
else
head = (head + 1) % n;
data[pos] = item; tail = (tail + 1) % n;
pos = (pos + 1) % n; data[tail] = item;
}
void pop_front()
{
if(size_ > 0)
size_--;
else
debug_warn("underflow");
head = (head + 1) % n;
} }
class const_iterator class const_iterator
@ -113,12 +440,12 @@ template<class T, size_t n> struct RingBuf
const_iterator begin() const const_iterator begin() const
{ {
return const_iterator(data, (size_ < n)? 0 : pos); return const_iterator(data, (size_ < n)? 1 : head);
} }
const_iterator end() const const_iterator end() const
{ {
return const_iterator(data, (size_ < n)? size_ : pos+n); return const_iterator(data, (size_ < n)? size_+1 : head+n);
} }
}; };
@ -133,7 +460,7 @@ template<class T, size_t n> struct RingBuf
// typical use: add all available resources to the cache via grow(); // typical use: add all available resources to the cache via grow();
// assign() ids to the resources, and update the resource data if necessary; // assign() ids to the resources, and update the resource data if necessary;
// retrieve() the resource, given id. // retrieve() the resource, given id.
template<class Entry> class Cache template<class Entry> class LRUCache
{ {
public: public:
// 'give' Entry to the cache. // 'give' Entry to the cache.

View File

@ -47,7 +47,14 @@ void* single_calloc(void* storage, volatile uintptr_t* in_use_flag, size_t size)
p = storage; p = storage;
// already in use (rare) - allocate from heap // already in use (rare) - allocate from heap
else else
{
p = malloc(size); p = malloc(size);
if(!p)
{
debug_warn("out of memory");
return 0;
}
}
memset(p, 0, size); memset(p, 0, size);
return p; return p;
@ -112,8 +119,10 @@ static LibError validate_da(DynArray* da)
if(debug_is_pointer_bogus(base)) if(debug_is_pointer_bogus(base))
return ERR_1; return ERR_1;
if(!is_page_multiple((uintptr_t)base)) // note: don't check if base is page-aligned -
return ERR_2; // might not be true for 'wrapped' mem regions.
// if(!is_page_multiple((uintptr_t)base))
// return ERR_2;
if(!is_page_multiple(max_size_pa)) if(!is_page_multiple(max_size_pa))
return ERR_3; return ERR_3;
if(cur_size > max_size_pa) if(cur_size > max_size_pa)
@ -263,7 +272,7 @@ LibError da_set_size(DynArray* da, size_t new_size)
const size_t cur_size_pa = round_up_to_page(da->cur_size); const size_t cur_size_pa = round_up_to_page(da->cur_size);
const size_t new_size_pa = round_up_to_page(new_size); const size_t new_size_pa = round_up_to_page(new_size);
if(new_size_pa > da->max_size_pa) if(new_size_pa > da->max_size_pa)
CHECK_ERR(ERR_LIMIT); WARN_RETURN(ERR_LIMIT);
const ssize_t size_delta_pa = (ssize_t)new_size_pa - (ssize_t)cur_size_pa; const ssize_t size_delta_pa = (ssize_t)new_size_pa - (ssize_t)cur_size_pa;
u8* end = da->base + cur_size_pa; u8* end = da->base + cur_size_pa;
@ -288,7 +297,7 @@ LibError da_reserve(DynArray* da, size_t size)
{ {
// default to page size (the OS won't commit less anyway); // default to page size (the OS won't commit less anyway);
// grab more if request requires it. // grab more if request requires it.
const size_t expand_amount = MIN(4*KiB, size); const size_t expand_amount = MAX(4*KiB, size);
if(da->pos + size > da->cur_size) if(da->pos + size > da->cur_size)
return da_set_size(da, da->cur_size + expand_amount); return da_set_size(da, da->cur_size + expand_amount);
@ -351,7 +360,7 @@ LibError da_append(DynArray* da, const void* data, size_t size)
// design parameters: // design parameters:
// - O(1) alloc and free; // - O(1) alloc and free;
// - fixed XOR variable size blocks; // - fixed- XOR variable-sized blocks;
// - doesn't preallocate the entire pool; // - doesn't preallocate the entire pool;
// - returns sequential addresses. // - returns sequential addresses.
@ -392,10 +401,9 @@ static const size_t POOL_CHUNK = 4*KiB;
LibError pool_create(Pool* p, size_t max_size, size_t el_size) LibError pool_create(Pool* p, size_t max_size, size_t el_size)
{ {
if(el_size != 0 && el_size < sizeof(void*)) if(el_size != 0 && el_size < sizeof(void*))
CHECK_ERR(ERR_INVALID_PARAM); WARN_RETURN(ERR_INVALID_PARAM);
RETURN_ERR(da_alloc(&p->da, max_size)); RETURN_ERR(da_alloc(&p->da, max_size));
p->pos = 0;
p->el_size = el_size; p->el_size = el_size;
return ERR_OK; return ERR_OK;
} }
@ -419,10 +427,11 @@ LibError pool_destroy(Pool* p)
bool pool_contains(Pool* p, void* el) bool pool_contains(Pool* p, void* el)
{ {
// outside of our range // outside of our range
if(!(p->da.base <= el && el < p->da.base+p->pos)) if(!(p->da.base <= el && el < p->da.base+p->da.pos))
return false; return false;
// sanity check: it should be aligned // sanity check: it should be aligned (if pool has fixed-size elements)
debug_assert((uintptr_t)((u8*)el - p->da.base) % p->el_size == 0); if(p->el_size)
debug_assert((uintptr_t)((u8*)el - p->da.base) % p->el_size == 0);
return true; return true;
} }
@ -451,8 +460,8 @@ void* pool_alloc(Pool* p, size_t size)
if(da_reserve(&p->da, el_size) < 0) if(da_reserve(&p->da, el_size) < 0)
return 0; return 0;
el = p->da.base + p->pos; el = p->da.base + p->da.pos;
p->pos += el_size; p->da.pos += el_size;
} }
have_el: have_el:
@ -487,7 +496,7 @@ void pool_free(Pool* p, void* el)
// underlying memory. // underlying memory.
void pool_free_all(Pool* p) void pool_free_all(Pool* p)
{ {
p->pos = 0; p->da.pos = 0;
p->freelist = 0; p->freelist = 0;
} }
@ -497,12 +506,18 @@ void pool_free_all(Pool* p)
//----------------------------------------------------------------------------- //-----------------------------------------------------------------------------
// design goals: // design goals:
// - variable-size allocations; // - variable-sized allocations;
// - no reuse of allocations, can only free all at once; // - no reuse of allocations, can only free all at once;
// - no init necessary; // - no init necessary;
// - never relocates; // - never relocates;
// - no fixed limit. // - no fixed limit.
// note: this type of allocator is called "region-based" in the literature.
// see "Reconsidering Custom Memory Allocation" (Berger, Zorn, McKinley).
// if individual elements must be freeable, consider "reaps":
// basically a combination of region and heap, where frees go to the heap and
// allocs exhaust that memory first and otherwise use the region.
// must be constant and power-of-2 to allow fast modulo. // must be constant and power-of-2 to allow fast modulo.
const size_t BUCKET_SIZE = 4*KiB; const size_t BUCKET_SIZE = 4*KiB;

View File

@ -38,6 +38,36 @@ extern void* single_calloc(void* storage, volatile uintptr_t* in_use_flag, size_
extern void single_free(void* storage, volatile uintptr_t* in_use_flag, void* p); extern void single_free(void* storage, volatile uintptr_t* in_use_flag, void* p);
// C++ wrapper
#ifdef __cplusplus
#include "nommgr.h"
// T must be POD (Plain Old Data) because it is memset to 0!
template<class T> class SingleAllocator
{
T storage;
volatile uintptr_t is_in_use;
public:
SingleAllocator()
{
is_in_use = 0;
}
void* alloc()
{
return single_calloc(&storage, &is_in_use, sizeof(storage));
}
void free(void* p)
{
single_free(&storage, &is_in_use, p);
}
};
#include "mmgr.h"
#endif // #ifdef __cplusplus
// //
// dynamic (expandable) array // dynamic (expandable) array
@ -106,7 +136,7 @@ extern LibError da_append(DynArray* da, const void* data_src, size_t size);
// design parameters: // design parameters:
// - O(1) alloc and free; // - O(1) alloc and free;
// - fixed XOR variable size blocks; // - fixed- XOR variable-sized blocks;
// - doesn't preallocate the entire pool; // - doesn't preallocate the entire pool;
// - returns sequential addresses. // - returns sequential addresses.
@ -118,14 +148,15 @@ struct Pool
// size of elements; see pool_create. // size of elements; see pool_create.
size_t el_size; size_t el_size;
// all bytes in da up to this mark are in circulation or freelist.
size_t pos;
// pointer to freelist (opaque); see freelist_*. // pointer to freelist (opaque); see freelist_*.
// never used (remains 0) if elements are of variable size. // never used (remains 0) if elements are of variable size.
void* freelist; void* freelist;
}; };
// pass as pool_create's <el_size> param to indicate variable-sized allocs
// are required (see below).
const size_t POOL_VARIABLE_ALLOCS = 0;
// ready <p> for use. <max_size> is the upper limit [bytes] on // ready <p> for use. <max_size> is the upper limit [bytes] on
// pool size (this is how much address space is reserved). // pool size (this is how much address space is reserved).
// //
@ -173,12 +204,18 @@ extern void pool_free_all(Pool* p);
// //
// design goals: // design goals:
// - variable-size allocations; // - variable-sized allocations;
// - no reuse of allocations, can only free all at once; // - no reuse of allocations, can only free all at once;
// - no init necessary; // - no init necessary;
// - never relocates; // - never relocates;
// - no fixed limit. // - no fixed limit.
// note: this type of allocator is called "region-based" in the literature.
// see "Reconsidering Custom Memory Allocation" (Berger, Zorn, McKinley).
// if individual elements must be freeable, consider "reaps":
// basically a combination of region and heap, where frees go to the heap and
// allocs exhaust that memory first and otherwise use the region.
// opaque! do not read/write any fields! // opaque! do not read/write any fields!
struct Bucket struct Bucket
{ {

View File

@ -250,10 +250,12 @@
// compiler support for C99 // compiler support for C99
// (this is more convenient than testing __STDC_VERSION__ directly) // (this is more convenient than testing __STDC_VERSION__ directly)
#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) #define HAVE_C99 0
# define HAVE_C99 1 #ifdef __STDC_VERSION__
#else # if __STDC_VERSION__ >= 199901L
# define HAVE_C99 0 # undef HAVE_C99
# define HAVE_C99 1
# endif
#endif #endif
// gettimeofday() // gettimeofday()
@ -288,7 +290,7 @@
#endif #endif
// GNU-style __asm() blocks (AT&T syntax) // GNU-style __asm() blocks (AT&T syntax)
#if __GNUC__ #if GCC_VERSION
# define HAVE_GNU_ASM 1 # define HAVE_GNU_ASM 1
#else #else
# define HAVE_GNU_ASM 0 # define HAVE_GNU_ASM 0

View File

@ -232,12 +232,21 @@ int ilog2(const float x)
// multiple must be a power of two. // multiple must be a power of two.
uintptr_t round_up(const uintptr_t n, const uintptr_t multiple) uintptr_t round_up(const uintptr_t n, const uintptr_t multiple)
{ {
debug_assert(is_pow2((long)multiple)); // also catches divide-by-zero debug_assert(is_pow2((long)multiple));
const uintptr_t result = (n + multiple-1) & ~(multiple-1); const uintptr_t result = (n + multiple-1) & ~(multiple-1);
debug_assert(n <= result && result < n+multiple); debug_assert(n <= result && result < n+multiple);
return result; return result;
} }
// multiple must be a power of two.
uintptr_t round_down(const uintptr_t n, const uintptr_t multiple)
{
debug_assert(is_pow2((long)multiple));
const uintptr_t result = n & ~(multiple-1);
debug_assert(result <= n && n < result+multiple);
return result;
}
u16 addusw(u16 x, u16 y) u16 addusw(u16 x, u16 y)
{ {
@ -317,6 +326,15 @@ u64 u64_from_u32(u32 hi, u32 lo)
return x; return x;
} }
u32 u32_from_u16(u16 hi, u16 lo)
{
u32 x = (u32)hi;
x <<= 16;
x |= lo;
return x;
}
// input in [0, 1); convert to u8 range // input in [0, 1); convert to u8 range

View File

@ -130,6 +130,13 @@ STMT(\
) )
#define SAFE_FREE(p)\
STMT(\
free(p); /* if p == 0, free is a no-op */ \
(p) = 0;\
)
#ifndef MIN #ifndef MIN
#define MIN(a, b) (((a) < (b))? (a) : (b)) #define MIN(a, b) (((a) < (b))? (a) : (b))
@ -269,7 +276,8 @@ extern uint round_up_to_pow2(uint x);
// multiple must be a power of two. // multiple must be a power of two.
extern uintptr_t round_up(uintptr_t val, uintptr_t multiple); extern uintptr_t round_up (uintptr_t n, uintptr_t multiple);
extern uintptr_t round_down(uintptr_t n, uintptr_t multiple);
// these avoid a common mistake in using >> (ANSI requires shift count be // these avoid a common mistake in using >> (ANSI requires shift count be
// less than the bit width of the type). // less than the bit width of the type).
@ -277,6 +285,7 @@ extern u32 u64_hi(u64 x);
extern u32 u64_lo(u64 x); extern u32 u64_lo(u64 x);
extern u64 u64_from_u32(u32 hi, u32 lo); extern u64 u64_from_u32(u32 hi, u32 lo);
extern u32 u32_from_u16(u16 hi, u16 lo);
inline bool feq(float f1, float f2) inline bool feq(float f1, float f2)
@ -305,4 +314,8 @@ extern void base32(const int len, const u8* in, u8* out);
extern int match_wildcard(const char* s, const char* w); extern int match_wildcard(const char* s, const char* w);
extern int match_wildcardw(const wchar_t* s, const wchar_t* w); extern int match_wildcardw(const wchar_t* s, const wchar_t* w);
// this is strcpy, but indicates that the programmer checked usage and
// promises it is safe.
#define SAFE_STRCPY strcpy
#endif // #ifndef LIB_H__ #endif // #ifndef LIB_H__

View File

@ -208,13 +208,13 @@ STMT(\
ERR(0, ERR_OK, "(but return value was 0 which indicates success)") ERR(0, ERR_OK, "(but return value was 0 which indicates success)")
ERR(-1, ERR_FAIL, "Function failed (no details available)") ERR(-1, ERR_FAIL, "Function failed (no details available)")
ERR(1, INFO_CB_CONTINUE , "1 (not an error)") ERR(1, INFO_CB_CONTINUE, "1 (not an error)")
// these are all basically the same thing // these are all basically the same thing
ERR(2, INFO_CANNOT_HANDLE, "2 (not an error)") ERR(2, INFO_CANNOT_HANDLE, "2 (not an error)")
ERR(3, INFO_NO_REPLACE , "3 (not an error)") ERR(3, INFO_NO_REPLACE, "3 (not an error)")
ERR(4, INFO_SKIPPED , "4 (not an error)") ERR(4, INFO_SKIPPED, "4 (not an error)")
// ERR(5, INFO_ALL_COMPLETE, "5 (not an error)")
ERR(5, INFO_ALL_COMPLETE, "5 (not an error)") ERR(6, INFO_ALREADY_PRESENT, "6 (not an error)")
ERR(-100000, ERR_LOGIC, "Logic error in code") ERR(-100000, ERR_LOGIC, "Logic error in code")
ERR(-100060, ERR_TIMED_OUT, "Timed out") ERR(-100060, ERR_TIMED_OUT, "Timed out")
@ -280,9 +280,11 @@ ERR(-100321, ERR_NOT_DIR, "Not a directory")
ERR(-100330, ERR_FILE_ACCESS, "Insufficient access rights to open file") ERR(-100330, ERR_FILE_ACCESS, "Insufficient access rights to open file")
ERR(-100331, ERR_IO, "Error during IO") ERR(-100331, ERR_IO, "Error during IO")
ERR(-100332, ERR_EOF, "Reading beyond end of file") ERR(-100332, ERR_EOF, "Reading beyond end of file")
ERR(-100333, ERR_IS_COMPRESSED, "Invalid operation for a compressed file") ERR(-100340, ERR_UNKNOWN_CMETHOD, "Unknown/unsupported compression method")
ERR(-100334, ERR_ALREADY_MOUNTED, "Directory (tree) already mounted") ERR(-100341, ERR_IS_COMPRESSED, "Invalid operation for a compressed file")
ERR(-100335, ERR_INVALID_MOUNT_TYPE, "Invalid mount type (memory corruption?)") ERR(-100350, ERR_ALREADY_MOUNTED, "Directory (tree) already mounted")
ERR(-100351, ERR_INVALID_MOUNT_TYPE, "Invalid mount type (memory corruption?)")
ERR(-100360, ERR_NOT_IN_CACHE, "[Internal] Entry not found in cache")
// file format // file format
ERR(-100400, ERR_UNKNOWN_FORMAT, "Unknown file format") ERR(-100400, ERR_UNKNOWN_FORMAT, "Unknown file format")

View File

@ -28,8 +28,10 @@ typedef unsigned int PS_uint;
// the standard only guarantees 16 bits. // the standard only guarantees 16 bits.
// we use this for memory offsets and ranges, so it better be big enough. // we use this for memory offsets and ranges, so it better be big enough.
#if defined(SIZE_MAX) && SIZE_MAX < 0xFFFFFFFF #ifdef SIZE_MAX
# error "check size_t and SIZE_MAX - too small?" # if SIZE_MAX < 0xFFFFFFFF
# error "check size_t and SIZE_MAX - too small?"
# endif
#endif #endif
#endif // #ifndef __TYPES_H__ #endif // #ifndef __TYPES_H__