1
0
forked from 0ad/0ad

Set svn:eol-style=native

This was SVN commit r6828.
This commit is contained in:
Ykkrosh 2009-04-18 16:14:48 +00:00
parent ab4b930042
commit 7a4dd7b473
215 changed files with 22645 additions and 22645 deletions

View File

@ -1,12 +1,12 @@
/**
* =========================================================================
* File : aligned_allocator.cpp
* Project : 0 A.D.
* Description : STL allocator for aligned memory
* =========================================================================
*/
// license: GPL; see lib/license.txt
#include "precompiled.h"
#include "aligned_allocator.h"
/**
* =========================================================================
* File : aligned_allocator.cpp
* Project : 0 A.D.
* Description : STL allocator for aligned memory
* =========================================================================
*/
// license: GPL; see lib/license.txt
#include "precompiled.h"
#include "aligned_allocator.h"

View File

@ -1,131 +1,131 @@
/**
* =========================================================================
* File : aligned_allocator.h
* Project : 0 A.D.
* Description : STL allocator for aligned memory
* =========================================================================
*/
// license: GPL; see lib/license.txt
#ifndef ALIGNED_ALLOCATOR
#define ALIGNED_ALLOCATOR
#include "lib/bits.h" // round_up
#include "lib/sysdep/arch/x86_x64/x86_x64.h" // x86_x64_L1CacheLineSize
#include "lib/sysdep/rtl.h" // rtl_AllocateAligned
/**
* stateless STL allocator that aligns elements to the L1 cache line size.
*
* note: the alignment is hard-coded to avoid any allocator state.
* this avoids portability problems, which is important since allocators
* are rather poorly specified.
*
* references:
* http://www.tantalon.com/pete/customallocators.ppt
* http://www.flipcode.com/archives/Aligned_Block_Allocation.shtml
* http://www.josuttis.com/cppcode/allocator.html
*
* derived from code that bears the following copyright notice:
* (C) Copyright Nicolai M. Josuttis 1999.
* Permission to copy, use, modify, sell and distribute this software
* is granted provided this copyright notice appears in all copies.
* This software is provided "as is" without express or implied
* warranty, and with no claim as to its suitability for any purpose.
**/
template<class T>
class AlignedAllocator
{
public:
// type definitions
typedef T value_type;
typedef T* pointer;
typedef const T* const_pointer;
typedef T& reference;
typedef const T& const_reference;
typedef std::size_t size_type;
typedef std::ptrdiff_t difference_type;
// rebind allocator to type U
template <class U>
struct rebind
{
typedef AlignedAllocator<U> other;
};
pointer address(reference value) const
{
return &value;
}
const_pointer address(const_reference value) const
{
return &value;
}
AlignedAllocator() throw()
{
}
AlignedAllocator(const AlignedAllocator&) throw()
{
}
template <class U>
AlignedAllocator (const AlignedAllocator<U>&) throw()
{
}
~AlignedAllocator() throw()
{
}
size_type max_size() const throw()
{
// maximum number of *elements* that can be allocated
return std::numeric_limits<std::size_t>::max() / sizeof(T);
}
// allocate uninitialized storage
pointer allocate(size_type numElements, const void* hint = 0)
{
const size_type alignment = x86_x64_L1CacheLineSize();
const size_type elementSize = round_up(sizeof(T), alignment);
const size_type size = numElements * elementSize;
pointer p = (pointer)rtl_AllocateAligned(size, alignment);
return p;
}
// deallocate storage of elements that have been destroyed
void deallocate(pointer p, size_type num)
{
rtl_FreeAligned((void*)p);
}
void construct(pointer p, const T& value)
{
new((void*)p) T(value);
}
void destroy(pointer p)
{
p->~T();
}
};
// indicate that all specializations of this allocator are interchangeable
template <class T1, class T2>
bool operator==(const AlignedAllocator<T1>&, const AlignedAllocator<T2>&) throw()
{
return true;
}
template <class T1, class T2>
bool operator!=(const AlignedAllocator<T1>&, const AlignedAllocator<T2>&) throw()
{
return false;
}
#endif // #ifndef ALIGNED_ALLOCATOR
/**
* =========================================================================
* File : aligned_allocator.h
* Project : 0 A.D.
* Description : STL allocator for aligned memory
* =========================================================================
*/
// license: GPL; see lib/license.txt
#ifndef ALIGNED_ALLOCATOR
#define ALIGNED_ALLOCATOR
#include "lib/bits.h" // round_up
#include "lib/sysdep/arch/x86_x64/x86_x64.h" // x86_x64_L1CacheLineSize
#include "lib/sysdep/rtl.h" // rtl_AllocateAligned
/**
* stateless STL allocator that aligns elements to the L1 cache line size.
*
* note: the alignment is hard-coded to avoid any allocator state.
* this avoids portability problems, which is important since allocators
* are rather poorly specified.
*
* references:
* http://www.tantalon.com/pete/customallocators.ppt
* http://www.flipcode.com/archives/Aligned_Block_Allocation.shtml
* http://www.josuttis.com/cppcode/allocator.html
*
* derived from code that bears the following copyright notice:
* (C) Copyright Nicolai M. Josuttis 1999.
* Permission to copy, use, modify, sell and distribute this software
* is granted provided this copyright notice appears in all copies.
* This software is provided "as is" without express or implied
* warranty, and with no claim as to its suitability for any purpose.
**/
template<class T>
class AlignedAllocator
{
public:
// type definitions
typedef T value_type;
typedef T* pointer;
typedef const T* const_pointer;
typedef T& reference;
typedef const T& const_reference;
typedef std::size_t size_type;
typedef std::ptrdiff_t difference_type;
// rebind allocator to type U
template <class U>
struct rebind
{
typedef AlignedAllocator<U> other;
};
pointer address(reference value) const
{
return &value;
}
const_pointer address(const_reference value) const
{
return &value;
}
AlignedAllocator() throw()
{
}
AlignedAllocator(const AlignedAllocator&) throw()
{
}
template <class U>
AlignedAllocator (const AlignedAllocator<U>&) throw()
{
}
~AlignedAllocator() throw()
{
}
size_type max_size() const throw()
{
// maximum number of *elements* that can be allocated
return std::numeric_limits<std::size_t>::max() / sizeof(T);
}
// allocate uninitialized storage
pointer allocate(size_type numElements, const void* hint = 0)
{
const size_type alignment = x86_x64_L1CacheLineSize();
const size_type elementSize = round_up(sizeof(T), alignment);
const size_type size = numElements * elementSize;
pointer p = (pointer)rtl_AllocateAligned(size, alignment);
return p;
}
// deallocate storage of elements that have been destroyed
void deallocate(pointer p, size_type num)
{
rtl_FreeAligned((void*)p);
}
void construct(pointer p, const T& value)
{
new((void*)p) T(value);
}
void destroy(pointer p)
{
p->~T();
}
};
// indicate that all specializations of this allocator are interchangeable
template <class T1, class T2>
bool operator==(const AlignedAllocator<T1>&, const AlignedAllocator<T2>&) throw()
{
return true;
}
template <class T1, class T2>
bool operator!=(const AlignedAllocator<T1>&, const AlignedAllocator<T2>&) throw()
{
return false;
}
#endif // #ifndef ALIGNED_ALLOCATOR

View File

@ -1,133 +1,133 @@
/**
* =========================================================================
* File : bucket.cpp
* Project : 0 A.D.
* Description : bucket allocator
* =========================================================================
*/
// license: GPL; see lib/license.txt
#include "precompiled.h"
#include "bucket.h"
#include "lib/bits.h"
#include "mem_util.h"
// power-of-2 isn't required; value is arbitrary.
const size_t bucketSize = 4000;
LibError bucket_create(Bucket* b, size_t el_size)
{
b->freelist = 0;
b->el_size = mem_RoundUpToAlignment(el_size);
// note: allocating here avoids the is-this-the-first-time check
// in bucket_alloc, which speeds things up.
b->bucket = (u8*)malloc(bucketSize);
if(!b->bucket)
{
// cause next bucket_alloc to retry the allocation
b->pos = bucketSize;
b->num_buckets = 0;
WARN_RETURN(ERR::NO_MEM);
}
*(u8**)b->bucket = 0; // terminate list
b->pos = mem_RoundUpToAlignment(sizeof(u8*));
b->num_buckets = 1;
return INFO::OK;
}
void bucket_destroy(Bucket* b)
{
while(b->bucket)
{
u8* prev_bucket = *(u8**)b->bucket;
free(b->bucket);
b->bucket = prev_bucket;
b->num_buckets--;
}
debug_assert(b->num_buckets == 0);
// poison pill: cause subsequent alloc and free to fail
b->freelist = 0;
b->el_size = bucketSize;
}
void* bucket_alloc(Bucket* b, size_t size)
{
size_t el_size = b->el_size? b->el_size : mem_RoundUpToAlignment(size);
// must fit in a bucket
debug_assert(el_size <= bucketSize-sizeof(u8*));
// try to satisfy alloc from freelist
void* el = mem_freelist_Detach(b->freelist);
if(el)
return el;
// if there's not enough space left, close current bucket and
// allocate another.
if(b->pos+el_size > bucketSize)
{
u8* bucket = (u8*)malloc(bucketSize);
if(!bucket)
return 0;
*(u8**)bucket = b->bucket;
b->bucket = bucket;
// skip bucket list field and align (note: malloc already
// aligns to at least 8 bytes, so don't take b->bucket into account)
b->pos = mem_RoundUpToAlignment(sizeof(u8*));;
b->num_buckets++;
}
void* ret = b->bucket+b->pos;
b->pos += el_size;
return ret;
}
void* bucket_fast_alloc(Bucket* b)
{
// try to satisfy alloc from freelist
void* el = mem_freelist_Detach(b->freelist);
if(el)
return el;
// if there's not enough space left, close current bucket and
// allocate another.
if(b->pos+b->el_size > bucketSize)
{
u8* bucket = (u8*)malloc(bucketSize);
*(u8**)bucket = b->bucket;
b->bucket = bucket;
// skip bucket list field (alignment is only pointer-size)
b->pos = sizeof(u8*);
b->num_buckets++;
}
void* ret = b->bucket+b->pos;
b->pos += b->el_size;
return ret;
}
void bucket_free(Bucket* b, void* el)
{
if(b->el_size == 0)
{
DEBUG_WARN_ERR(ERR::LOGIC); // cannot free variable-size items
return;
}
mem_freelist_AddToFront(b->freelist, el);
// note: checking if <el> was actually allocated from <b> is difficult:
// it may not be in the currently open bucket, so we'd have to
// iterate over the list - too much work.
}
/**
* =========================================================================
* File : bucket.cpp
* Project : 0 A.D.
* Description : bucket allocator
* =========================================================================
*/
// license: GPL; see lib/license.txt
#include "precompiled.h"
#include "bucket.h"
#include "lib/bits.h"
#include "mem_util.h"
// power-of-2 isn't required; value is arbitrary.
const size_t bucketSize = 4000;
LibError bucket_create(Bucket* b, size_t el_size)
{
b->freelist = 0;
b->el_size = mem_RoundUpToAlignment(el_size);
// note: allocating here avoids the is-this-the-first-time check
// in bucket_alloc, which speeds things up.
b->bucket = (u8*)malloc(bucketSize);
if(!b->bucket)
{
// cause next bucket_alloc to retry the allocation
b->pos = bucketSize;
b->num_buckets = 0;
WARN_RETURN(ERR::NO_MEM);
}
*(u8**)b->bucket = 0; // terminate list
b->pos = mem_RoundUpToAlignment(sizeof(u8*));
b->num_buckets = 1;
return INFO::OK;
}
void bucket_destroy(Bucket* b)
{
while(b->bucket)
{
u8* prev_bucket = *(u8**)b->bucket;
free(b->bucket);
b->bucket = prev_bucket;
b->num_buckets--;
}
debug_assert(b->num_buckets == 0);
// poison pill: cause subsequent alloc and free to fail
b->freelist = 0;
b->el_size = bucketSize;
}
void* bucket_alloc(Bucket* b, size_t size)
{
size_t el_size = b->el_size? b->el_size : mem_RoundUpToAlignment(size);
// must fit in a bucket
debug_assert(el_size <= bucketSize-sizeof(u8*));
// try to satisfy alloc from freelist
void* el = mem_freelist_Detach(b->freelist);
if(el)
return el;
// if there's not enough space left, close current bucket and
// allocate another.
if(b->pos+el_size > bucketSize)
{
u8* bucket = (u8*)malloc(bucketSize);
if(!bucket)
return 0;
*(u8**)bucket = b->bucket;
b->bucket = bucket;
// skip bucket list field and align (note: malloc already
// aligns to at least 8 bytes, so don't take b->bucket into account)
b->pos = mem_RoundUpToAlignment(sizeof(u8*));;
b->num_buckets++;
}
void* ret = b->bucket+b->pos;
b->pos += el_size;
return ret;
}
void* bucket_fast_alloc(Bucket* b)
{
// try to satisfy alloc from freelist
void* el = mem_freelist_Detach(b->freelist);
if(el)
return el;
// if there's not enough space left, close current bucket and
// allocate another.
if(b->pos+b->el_size > bucketSize)
{
u8* bucket = (u8*)malloc(bucketSize);
*(u8**)bucket = b->bucket;
b->bucket = bucket;
// skip bucket list field (alignment is only pointer-size)
b->pos = sizeof(u8*);
b->num_buckets++;
}
void* ret = b->bucket+b->pos;
b->pos += b->el_size;
return ret;
}
void bucket_free(Bucket* b, void* el)
{
if(b->el_size == 0)
{
DEBUG_WARN_ERR(ERR::LOGIC); // cannot free variable-size items
return;
}
mem_freelist_AddToFront(b->freelist, el);
// note: checking if <el> was actually allocated from <b> is difficult:
// it may not be in the currently open bucket, so we'd have to
// iterate over the list - too much work.
}

View File

@ -1,98 +1,98 @@
/**
* =========================================================================
* File : bucket.h
* Project : 0 A.D.
* Description : bucket allocator
* =========================================================================
*/
// license: GPL; see lib/license.txt
#ifndef INCLUDED_BUCKET
#define INCLUDED_BUCKET
/**
* allocator design goals:
* - either fixed- or variable-sized blocks;
* - allow freeing individual blocks if they are all fixed-size;
* - never relocates;
* - no fixed limit.
*
* note: this type of allocator is called "region-based" in the literature
* and is also known as "obstack"; see "Reconsidering Custom Memory
* Allocation" (Berger, Zorn, McKinley).
* if individual variable-size elements must be freeable, consider "reaps":
* basically a combination of region and heap, where frees go to the heap and
* allocs exhaust that memory first and otherwise use the region.
*
* opaque! do not read/write any fields!
**/
struct Bucket
{
/**
* currently open bucket.
**/
u8* bucket;
/**
* offset of free space at end of current bucket (i.e. # bytes in use).
**/
size_t pos;
void* freelist;
size_t el_size;
/**
* records # buckets allocated; verifies the list of buckets is correct.
**/
size_t num_buckets;
};
/**
* ready the Bucket object for use.
*
* @param Bucket*
* @param el_size 0 to allow variable-sized allocations (which cannot be
* freed individually); otherwise, it specifies the number of bytes that
* will be returned by bucket_alloc (whose size parameter is then ignored).
* @return LibError.
**/
LIB_API LibError bucket_create(Bucket* b, size_t el_size);
/**
* free all memory that ensued from <b>.
*
* future alloc and free calls on this Bucket will fail.
*
* @param Bucket*
**/
LIB_API void bucket_destroy(Bucket* b);
/**
* Dole out memory from the Bucket.
* exhausts the freelist before returning new entries to improve locality.
*
* @param Bucket*
* @param size bytes to allocate; ignored if bucket_create's el_size was not 0.
* @return allocated memory, or 0 if the Bucket would have to be expanded and
* there isn't enough memory to do so.
**/
LIB_API void* bucket_alloc(Bucket* b, size_t size);
LIB_API void* bucket_fast_alloc(Bucket* b);
/**
* make an entry available for reuse in the given Bucket.
*
* this is not allowed if created for variable-size elements.
* rationale: avoids having to pass el_size here and compare with size when
* allocating; also prevents fragmentation and leaking memory.
*
* @param Bucket*
* @param el entry allocated via bucket_alloc.
**/
LIB_API void bucket_free(Bucket* b, void* el);
#endif // #ifndef INCLUDED_BUCKET
/**
* =========================================================================
* File : bucket.h
* Project : 0 A.D.
* Description : bucket allocator
* =========================================================================
*/
// license: GPL; see lib/license.txt
#ifndef INCLUDED_BUCKET
#define INCLUDED_BUCKET
/**
* allocator design goals:
* - either fixed- or variable-sized blocks;
* - allow freeing individual blocks if they are all fixed-size;
* - never relocates;
* - no fixed limit.
*
* note: this type of allocator is called "region-based" in the literature
* and is also known as "obstack"; see "Reconsidering Custom Memory
* Allocation" (Berger, Zorn, McKinley).
* if individual variable-size elements must be freeable, consider "reaps":
* basically a combination of region and heap, where frees go to the heap and
* allocs exhaust that memory first and otherwise use the region.
*
* opaque! do not read/write any fields!
**/
struct Bucket
{
/**
* currently open bucket.
**/
u8* bucket;
/**
* offset of free space at end of current bucket (i.e. # bytes in use).
**/
size_t pos;
void* freelist;
size_t el_size;
/**
* records # buckets allocated; verifies the list of buckets is correct.
**/
size_t num_buckets;
};
/**
* ready the Bucket object for use.
*
* @param Bucket*
* @param el_size 0 to allow variable-sized allocations (which cannot be
* freed individually); otherwise, it specifies the number of bytes that
* will be returned by bucket_alloc (whose size parameter is then ignored).
* @return LibError.
**/
LIB_API LibError bucket_create(Bucket* b, size_t el_size);
/**
* free all memory that ensued from <b>.
*
* future alloc and free calls on this Bucket will fail.
*
* @param Bucket*
**/
LIB_API void bucket_destroy(Bucket* b);
/**
* Dole out memory from the Bucket.
* exhausts the freelist before returning new entries to improve locality.
*
* @param Bucket*
* @param size bytes to allocate; ignored if bucket_create's el_size was not 0.
* @return allocated memory, or 0 if the Bucket would have to be expanded and
* there isn't enough memory to do so.
**/
LIB_API void* bucket_alloc(Bucket* b, size_t size);
LIB_API void* bucket_fast_alloc(Bucket* b);
/**
* make an entry available for reuse in the given Bucket.
*
* this is not allowed if created for variable-size elements.
* rationale: avoids having to pass el_size here and compare with size when
* allocating; also prevents fragmentation and leaking memory.
*
* @param Bucket*
* @param el entry allocated via bucket_alloc.
**/
LIB_API void bucket_free(Bucket* b, void* el);
#endif // #ifndef INCLUDED_BUCKET

View File

@ -1,188 +1,188 @@
/**
* =========================================================================
* File : dynarray.cpp
* Project : 0 A.D.
* Description : dynamic (expandable) array
* =========================================================================
*/
// license: GPL; see lib/license.txt
#include "precompiled.h"
#include "dynarray.h"
#include "lib/posix/posix_mman.h" // PROT_* constants for da_set_prot
#include "lib/sysdep/cpu.h"
#include "mem_util.h"
// indicates that this DynArray must not be resized or freed
// (e.g. because it merely wraps an existing memory range).
// stored in da->prot to reduce size; doesn't conflict with any PROT_* flags.
const int DA_NOT_OUR_MEM = 0x40000000;
static LibError validate_da(DynArray* da)
{
if(!da)
WARN_RETURN(ERR::INVALID_PARAM);
// u8* const base = da->base;
const size_t max_size_pa = da->max_size_pa;
const size_t cur_size = da->cur_size;
const size_t pos = da->pos;
const int prot = da->prot;
// note: this happens if max_size == 0
// if(debug_IsPointerBogus(base))
// WARN_RETURN(ERR::_1);
// note: don't check if base is page-aligned -
// might not be true for 'wrapped' mem regions.
// if(!mem_IsPageMultiple((uintptr_t)base))
// WARN_RETURN(ERR::_2);
if(!mem_IsPageMultiple(max_size_pa))
WARN_RETURN(ERR::_3);
if(cur_size > max_size_pa)
WARN_RETURN(ERR::_4);
if(pos > cur_size || pos > max_size_pa)
WARN_RETURN(ERR::_5);
if(prot & ~(PROT_READ|PROT_WRITE|PROT_EXEC|DA_NOT_OUR_MEM))
WARN_RETURN(ERR::_6);
return INFO::OK;
}
#define CHECK_DA(da) RETURN_ERR(validate_da(da))
LibError da_alloc(DynArray* da, size_t max_size)
{
const size_t max_size_pa = mem_RoundUpToPage(max_size);
u8* p = 0;
if(max_size_pa) // (avoid mmap failure)
RETURN_ERR(mem_Reserve(max_size_pa, &p));
da->base = p;
da->max_size_pa = max_size_pa;
da->cur_size = 0;
da->cur_size_pa = 0;
da->prot = PROT_READ|PROT_WRITE;
da->pos = 0;
CHECK_DA(da);
return INFO::OK;
}
LibError da_free(DynArray* da)
{
CHECK_DA(da);
u8* p = da->base;
size_t size_pa = da->max_size_pa;
bool was_wrapped = (da->prot & DA_NOT_OUR_MEM) != 0;
// wipe out the DynArray for safety
// (must be done here because mem_Release may fail)
memset(da, 0, sizeof(*da));
// skip mem_Release if <da> was allocated via da_wrap_fixed
// (i.e. it doesn't actually own any memory). don't complain;
// da_free is supposed to be called even in the above case.
if(!was_wrapped && size_pa)
RETURN_ERR(mem_Release(p, size_pa));
return INFO::OK;
}
LibError da_set_size(DynArray* da, size_t new_size)
{
CHECK_DA(da);
if(da->prot & DA_NOT_OUR_MEM)
WARN_RETURN(ERR::LOGIC);
// determine how much to add/remove
const size_t cur_size_pa = mem_RoundUpToPage(da->cur_size);
const size_t new_size_pa = mem_RoundUpToPage(new_size);
const ssize_t size_delta_pa = (ssize_t)new_size_pa - (ssize_t)cur_size_pa;
// not enough memory to satisfy this expand request: abort.
// note: do not complain - some allocators (e.g. file_cache)
// legitimately use up all available space.
if(new_size_pa > da->max_size_pa)
return ERR::LIMIT; // NOWARN
u8* end = da->base + cur_size_pa;
// expanding
if(size_delta_pa > 0)
RETURN_ERR(mem_Commit(end, size_delta_pa, da->prot));
// shrinking
else if(size_delta_pa < 0)
RETURN_ERR(mem_Decommit(end+size_delta_pa, -size_delta_pa));
// else: no change in page count, e.g. if going from size=1 to 2
// (we don't want mem_* to have to handle size=0)
da->cur_size = new_size;
da->cur_size_pa = new_size_pa;
CHECK_DA(da);
return INFO::OK;
}
LibError da_reserve(DynArray* da, size_t size)
{
if(da->pos+size > da->cur_size_pa)
RETURN_ERR(da_set_size(da, da->cur_size_pa+size));
da->cur_size = std::max(da->cur_size, da->pos+size);
return INFO::OK;
}
LibError da_set_prot(DynArray* da, int prot)
{
CHECK_DA(da);
// somewhat more subtle: POSIX mprotect requires the memory have been
// mmap-ed, which it probably wasn't here.
if(da->prot & DA_NOT_OUR_MEM)
WARN_RETURN(ERR::LOGIC);
da->prot = prot;
RETURN_ERR(mem_Protect(da->base, da->cur_size_pa, prot));
CHECK_DA(da);
return INFO::OK;
}
LibError da_wrap_fixed(DynArray* da, u8* p, size_t size)
{
da->base = p;
da->max_size_pa = mem_RoundUpToPage(size);
da->cur_size = size;
da->cur_size_pa = da->max_size_pa;
da->prot = PROT_READ|PROT_WRITE|DA_NOT_OUR_MEM;
da->pos = 0;
CHECK_DA(da);
return INFO::OK;
}
LibError da_read(DynArray* da, void* data, size_t size)
{
// make sure we have enough data to read
if(da->pos+size > da->cur_size)
WARN_RETURN(ERR::FAIL);
cpu_memcpy(data, da->base+da->pos, size);
da->pos += size;
return INFO::OK;
}
LibError da_append(DynArray* da, const void* data, size_t size)
{
RETURN_ERR(da_reserve(da, size));
cpu_memcpy(da->base+da->pos, data, size);
da->pos += size;
return INFO::OK;
}
/**
* =========================================================================
* File : dynarray.cpp
* Project : 0 A.D.
* Description : dynamic (expandable) array
* =========================================================================
*/
// license: GPL; see lib/license.txt
#include "precompiled.h"
#include "dynarray.h"
#include "lib/posix/posix_mman.h" // PROT_* constants for da_set_prot
#include "lib/sysdep/cpu.h"
#include "mem_util.h"
// indicates that this DynArray must not be resized or freed
// (e.g. because it merely wraps an existing memory range).
// stored in da->prot to reduce size; doesn't conflict with any PROT_* flags.
const int DA_NOT_OUR_MEM = 0x40000000;
static LibError validate_da(DynArray* da)
{
if(!da)
WARN_RETURN(ERR::INVALID_PARAM);
// u8* const base = da->base;
const size_t max_size_pa = da->max_size_pa;
const size_t cur_size = da->cur_size;
const size_t pos = da->pos;
const int prot = da->prot;
// note: this happens if max_size == 0
// if(debug_IsPointerBogus(base))
// WARN_RETURN(ERR::_1);
// note: don't check if base is page-aligned -
// might not be true for 'wrapped' mem regions.
// if(!mem_IsPageMultiple((uintptr_t)base))
// WARN_RETURN(ERR::_2);
if(!mem_IsPageMultiple(max_size_pa))
WARN_RETURN(ERR::_3);
if(cur_size > max_size_pa)
WARN_RETURN(ERR::_4);
if(pos > cur_size || pos > max_size_pa)
WARN_RETURN(ERR::_5);
if(prot & ~(PROT_READ|PROT_WRITE|PROT_EXEC|DA_NOT_OUR_MEM))
WARN_RETURN(ERR::_6);
return INFO::OK;
}
#define CHECK_DA(da) RETURN_ERR(validate_da(da))
LibError da_alloc(DynArray* da, size_t max_size)
{
const size_t max_size_pa = mem_RoundUpToPage(max_size);
u8* p = 0;
if(max_size_pa) // (avoid mmap failure)
RETURN_ERR(mem_Reserve(max_size_pa, &p));
da->base = p;
da->max_size_pa = max_size_pa;
da->cur_size = 0;
da->cur_size_pa = 0;
da->prot = PROT_READ|PROT_WRITE;
da->pos = 0;
CHECK_DA(da);
return INFO::OK;
}
LibError da_free(DynArray* da)
{
CHECK_DA(da);
u8* p = da->base;
size_t size_pa = da->max_size_pa;
bool was_wrapped = (da->prot & DA_NOT_OUR_MEM) != 0;
// wipe out the DynArray for safety
// (must be done here because mem_Release may fail)
memset(da, 0, sizeof(*da));
// skip mem_Release if <da> was allocated via da_wrap_fixed
// (i.e. it doesn't actually own any memory). don't complain;
// da_free is supposed to be called even in the above case.
if(!was_wrapped && size_pa)
RETURN_ERR(mem_Release(p, size_pa));
return INFO::OK;
}
LibError da_set_size(DynArray* da, size_t new_size)
{
CHECK_DA(da);
if(da->prot & DA_NOT_OUR_MEM)
WARN_RETURN(ERR::LOGIC);
// determine how much to add/remove
const size_t cur_size_pa = mem_RoundUpToPage(da->cur_size);
const size_t new_size_pa = mem_RoundUpToPage(new_size);
const ssize_t size_delta_pa = (ssize_t)new_size_pa - (ssize_t)cur_size_pa;
// not enough memory to satisfy this expand request: abort.
// note: do not complain - some allocators (e.g. file_cache)
// legitimately use up all available space.
if(new_size_pa > da->max_size_pa)
return ERR::LIMIT; // NOWARN
u8* end = da->base + cur_size_pa;
// expanding
if(size_delta_pa > 0)
RETURN_ERR(mem_Commit(end, size_delta_pa, da->prot));
// shrinking
else if(size_delta_pa < 0)
RETURN_ERR(mem_Decommit(end+size_delta_pa, -size_delta_pa));
// else: no change in page count, e.g. if going from size=1 to 2
// (we don't want mem_* to have to handle size=0)
da->cur_size = new_size;
da->cur_size_pa = new_size_pa;
CHECK_DA(da);
return INFO::OK;
}
LibError da_reserve(DynArray* da, size_t size)
{
if(da->pos+size > da->cur_size_pa)
RETURN_ERR(da_set_size(da, da->cur_size_pa+size));
da->cur_size = std::max(da->cur_size, da->pos+size);
return INFO::OK;
}
LibError da_set_prot(DynArray* da, int prot)
{
CHECK_DA(da);
// somewhat more subtle: POSIX mprotect requires the memory have been
// mmap-ed, which it probably wasn't here.
if(da->prot & DA_NOT_OUR_MEM)
WARN_RETURN(ERR::LOGIC);
da->prot = prot;
RETURN_ERR(mem_Protect(da->base, da->cur_size_pa, prot));
CHECK_DA(da);
return INFO::OK;
}
LibError da_wrap_fixed(DynArray* da, u8* p, size_t size)
{
da->base = p;
da->max_size_pa = mem_RoundUpToPage(size);
da->cur_size = size;
da->cur_size_pa = da->max_size_pa;
da->prot = PROT_READ|PROT_WRITE|DA_NOT_OUR_MEM;
da->pos = 0;
CHECK_DA(da);
return INFO::OK;
}
LibError da_read(DynArray* da, void* data, size_t size)
{
// make sure we have enough data to read
if(da->pos+size > da->cur_size)
WARN_RETURN(ERR::FAIL);
cpu_memcpy(data, da->base+da->pos, size);
da->pos += size;
return INFO::OK;
}
LibError da_append(DynArray* da, const void* data, size_t size)
{
RETURN_ERR(da_reserve(da, size));
cpu_memcpy(da->base+da->pos, data, size);
da->pos += size;
return INFO::OK;
}

View File

@ -1,133 +1,133 @@
/**
* =========================================================================
* File : dynarray.h
* Project : 0 A.D.
* Description : dynamic (expandable) array
* =========================================================================
*/
// license: GPL; see lib/license.txt
#ifndef INCLUDED_DYNARRAY
#define INCLUDED_DYNARRAY
/**
* provides a memory range that can be expanded but doesn't waste
* physical memory or relocate itself.
*
* works by preallocating address space and committing as needed.
* used as a building block for other allocators.
**/
struct DynArray
{
u8* base;
size_t max_size_pa; /// reserved
size_t cur_size; /// committed
size_t cur_size_pa;
/**
* mprotect flags applied to newly committed pages
**/
int prot;
size_t pos;
};
/**
* ready the DynArray object for use.
*
* no virtual memory is actually committed until calls to da_set_size.
*
* @param da DynArray.
* @param max_size size [bytes] of address space to reserve (*);
* the DynArray can never expand beyond this.
* (* rounded up to next page size multiple)
* @return LibError.
**/
LIB_API LibError da_alloc(DynArray* da, size_t max_size);
/**
* free all memory (address space + physical) that constitutes the
* given array.
*
* use-after-free is impossible because the memory is unmapped.
*
* @param DynArray* da; zeroed afterwards.
* @return LibError
**/
LIB_API LibError da_free(DynArray* da);
/**
* expand or shrink the array: changes the amount of currently committed
* (i.e. usable) memory pages.
*
* @param da DynArray.
* @param new_size target size (rounded up to next page multiple).
* pages are added/removed until this is met.
* @return LibError.
**/
LIB_API LibError da_set_size(DynArray* da, size_t new_size);
/**
* Make sure at least <size> bytes starting at da->pos are committed and
* ready for use.
*
* @param DynArray*
* @param size Minimum amount to guarantee [bytes]
* @return LibError
**/
LIB_API LibError da_reserve(DynArray* da, size_t size);
/**
* change access rights of the array memory.
*
* used to implement write-protection. affects the currently committed
* pages as well as all subsequently added pages.
*
* @param da DynArray.
* @param prot a combination of the PROT_* values used with mprotect.
* @return LibError.
**/
LIB_API LibError da_set_prot(DynArray* da, int prot);
/**
* "wrap" (i.e. store information about) the given buffer in a DynArray.
*
* this is used to allow calling da_read or da_append on normal buffers.
* da_free should be called when the DynArray is no longer needed,
* even though it doesn't free this memory (but does zero the DynArray).
*
* @param da DynArray. Note: any future operations on it that would
* change the underlying memory (e.g. da_set_size) will fail.
* @param p target memory (no alignment/padding requirements)
* @param size maximum size (no alignment requirements)
* @return LibError.
**/
LIB_API LibError da_wrap_fixed(DynArray* da, u8* p, size_t size);
/**
* "read" from array, i.e. copy into the given buffer.
*
* starts at offset DynArray.pos and advances this.
*
* @param da DynArray.
* @param data_dst destination memory
* @param size [bytes] to copy
* @return LibError.
**/
LIB_API LibError da_read(DynArray* da, void* data_dst, size_t size);
/**
* "write" to array, i.e. copy from the given buffer.
*
* starts at offset DynArray.pos and advances this.
*
* @param da DynArray.
* @param data_src source memory
* @param size [bytes] to copy
* @return LibError.
**/
LIB_API LibError da_append(DynArray* da, const void* data_src, size_t size);
#endif // #ifndef INCLUDED_DYNARRAY
/**
* =========================================================================
* File : dynarray.h
* Project : 0 A.D.
* Description : dynamic (expandable) array
* =========================================================================
*/
// license: GPL; see lib/license.txt
#ifndef INCLUDED_DYNARRAY
#define INCLUDED_DYNARRAY
/**
* provides a memory range that can be expanded but doesn't waste
* physical memory or relocate itself.
*
* works by preallocating address space and committing as needed.
* used as a building block for other allocators.
**/
struct DynArray
{
u8* base;
size_t max_size_pa; /// reserved
size_t cur_size; /// committed
size_t cur_size_pa;
/**
* mprotect flags applied to newly committed pages
**/
int prot;
size_t pos;
};
/**
* ready the DynArray object for use.
*
* no virtual memory is actually committed until calls to da_set_size.
*
* @param da DynArray.
* @param max_size size [bytes] of address space to reserve (*);
* the DynArray can never expand beyond this.
* (* rounded up to next page size multiple)
* @return LibError.
**/
LIB_API LibError da_alloc(DynArray* da, size_t max_size);
/**
* free all memory (address space + physical) that constitutes the
* given array.
*
* use-after-free is impossible because the memory is unmapped.
*
* @param DynArray* da; zeroed afterwards.
* @return LibError
**/
LIB_API LibError da_free(DynArray* da);
/**
* expand or shrink the array: changes the amount of currently committed
* (i.e. usable) memory pages.
*
* @param da DynArray.
* @param new_size target size (rounded up to next page multiple).
* pages are added/removed until this is met.
* @return LibError.
**/
LIB_API LibError da_set_size(DynArray* da, size_t new_size);
/**
* Make sure at least <size> bytes starting at da->pos are committed and
* ready for use.
*
* @param DynArray*
* @param size Minimum amount to guarantee [bytes]
* @return LibError
**/
LIB_API LibError da_reserve(DynArray* da, size_t size);
/**
* change access rights of the array memory.
*
* used to implement write-protection. affects the currently committed
* pages as well as all subsequently added pages.
*
* @param da DynArray.
* @param prot a combination of the PROT_* values used with mprotect.
* @return LibError.
**/
LIB_API LibError da_set_prot(DynArray* da, int prot);
/**
* "wrap" (i.e. store information about) the given buffer in a DynArray.
*
* this is used to allow calling da_read or da_append on normal buffers.
* da_free should be called when the DynArray is no longer needed,
* even though it doesn't free this memory (but does zero the DynArray).
*
* @param da DynArray. Note: any future operations on it that would
* change the underlying memory (e.g. da_set_size) will fail.
* @param p target memory (no alignment/padding requirements)
* @param size maximum size (no alignment requirements)
* @return LibError.
**/
LIB_API LibError da_wrap_fixed(DynArray* da, u8* p, size_t size);
/**
* "read" from array, i.e. copy into the given buffer.
*
* starts at offset DynArray.pos and advances this.
*
* @param da DynArray.
* @param data_dst destination memory
* @param size [bytes] to copy
* @return LibError.
**/
LIB_API LibError da_read(DynArray* da, void* data_dst, size_t size);
/**
* "write" to array, i.e. copy from the given buffer.
*
* starts at offset DynArray.pos and advances this.
*
* @param da DynArray.
* @param data_src source memory
* @param size [bytes] to copy
* @return LibError.
**/
LIB_API LibError da_append(DynArray* da, const void* data_src, size_t size);
#endif // #ifndef INCLUDED_DYNARRAY

File diff suppressed because it is too large Load Diff

View File

@ -1,72 +1,72 @@
/**
* =========================================================================
* File : headerless.h
* Project : 0 A.D.
* Description : (header-less) pool-based heap allocator
* =========================================================================
*/
// license: GPL; see lib/license.txt
#ifndef INCLUDED_HEADERLESS
#define INCLUDED_HEADERLESS
/**
* (header-less) pool-based heap allocator
* provides Allocate and Deallocate without requiring in-band headers;
* this is useful when allocating page-aligned I/O buffers
* (headers would waste an entire page per buffer)
*
* policy:
* - allocation: first exhaust the freelist, then allocate more
* - freelist: address-ordered good fit, always split blocks
* - coalescing: immediate
* mechanism:
* - coalescing: boundary tags in freed memory with distinct bit patterns
* - freelist: segregated range lists of power-of-two size classes
*
* note: this module basically implements a (rather complex) freelist and
* could be made independent of the Pool allocation scheme. however, reading
* neighboring boundary tags may cause segmentation violations; knowing the
* bounds of valid committed memory (i.e. Pool extents) avoids this.
**/
class HeaderlessAllocator
{
public:
/**
* @param poolSize maximum amount of memory that can be allocated.
* this much virtual address space is reserved up-front (see Pool).
**/
HeaderlessAllocator(size_t poolSize);
/**
* restore the original state (as if newly constructed).
* this includes reclaiming all extant allocations.
**/
void Reset();
/**
* @param size [bytes] must be a multiple of the minimum alignment and
* enough to store a block header. (this allocator is designed for
* page-aligned requests but can handle smaller amounts.)
* @return allocated memory or 0 if the pool is too fragmented or full.
**/
void* Allocate(size_t size) throw();
/**
* deallocate memory.
* @param size must be exactly as specified to Allocate.
**/
void Deallocate(void* p, size_t size);
/**
* perform sanity checks; ensure allocator state is consistent.
**/
void Validate() const;
private:
class Impl;
shared_ptr<Impl> impl;
};
#endif // #ifndef INCLUDED_HEADERLESS
/**
* =========================================================================
* File : headerless.h
* Project : 0 A.D.
* Description : (header-less) pool-based heap allocator
* =========================================================================
*/
// license: GPL; see lib/license.txt
#ifndef INCLUDED_HEADERLESS
#define INCLUDED_HEADERLESS
/**
* (header-less) pool-based heap allocator
* provides Allocate and Deallocate without requiring in-band headers;
* this is useful when allocating page-aligned I/O buffers
* (headers would waste an entire page per buffer)
*
* policy:
* - allocation: first exhaust the freelist, then allocate more
* - freelist: address-ordered good fit, always split blocks
* - coalescing: immediate
* mechanism:
* - coalescing: boundary tags in freed memory with distinct bit patterns
* - freelist: segregated range lists of power-of-two size classes
*
* note: this module basically implements a (rather complex) freelist and
* could be made independent of the Pool allocation scheme. however, reading
* neighboring boundary tags may cause segmentation violations; knowing the
* bounds of valid committed memory (i.e. Pool extents) avoids this.
**/
class HeaderlessAllocator
{
public:
/**
* @param poolSize maximum amount of memory that can be allocated.
* this much virtual address space is reserved up-front (see Pool).
**/
HeaderlessAllocator(size_t poolSize);
/**
* restore the original state (as if newly constructed).
* this includes reclaiming all extant allocations.
**/
void Reset();
/**
* @param size [bytes] must be a multiple of the minimum alignment and
* enough to store a block header. (this allocator is designed for
* page-aligned requests but can handle smaller amounts.)
* @return allocated memory or 0 if the pool is too fragmented or full.
**/
void* Allocate(size_t size) throw();
/**
* deallocate memory.
* @param size must be exactly as specified to Allocate.
**/
void Deallocate(void* p, size_t size);
/**
* perform sanity checks; ensure allocator state is consistent.
**/
void Validate() const;
private:
class Impl;
shared_ptr<Impl> impl;
};
#endif // #ifndef INCLUDED_HEADERLESS

View File

@ -1,120 +1,120 @@
/**
* =========================================================================
* File : mem_util.cpp
* Project : 0 A.D.
* Description : memory allocator helper routines.
* =========================================================================
*/
// license: GPL; see lib/license.txt
#include "precompiled.h"
#include "mem_util.h"
#include "lib/bits.h" // round_up
#include "lib/posix/posix_mman.h"
#include "lib/sysdep/os_cpu.h" // os_cpu_PageSize
bool mem_IsPageMultiple(uintptr_t x)
{
return (x & (os_cpu_PageSize()-1)) == 0;
}
size_t mem_RoundUpToPage(size_t size)
{
return round_up(size, os_cpu_PageSize());
}
size_t mem_RoundUpToAlignment(size_t size)
{
// all allocators should align to at least this many bytes:
const size_t alignment = 8;
return round_up(size, alignment);
}
//-----------------------------------------------------------------------------
static inline LibError LibError_from_mmap(void* ret, bool warn_if_failed = true)
{
if(ret != MAP_FAILED)
return INFO::OK;
return LibError_from_errno(warn_if_failed);
}
// "anonymous" effectively means mapping /dev/zero, but is more efficient.
// MAP_ANONYMOUS is not in SUSv3, but is a very common extension.
// unfortunately, MacOS X only defines MAP_ANON, which Solaris says is
// deprecated. workaround there: define MAP_ANONYMOUS in terms of MAP_ANON.
#ifndef MAP_ANONYMOUS
# define MAP_ANONYMOUS MAP_ANON
#endif
static const int mmap_flags = MAP_PRIVATE|MAP_ANONYMOUS;
LibError mem_Reserve(size_t size, u8** pp)
{
errno = 0;
void* ret = mmap(0, size, PROT_NONE, mmap_flags|MAP_NORESERVE, -1, 0);
*pp = (u8*)ret;
return LibError_from_mmap(ret);
}
LibError mem_Release(u8* p, size_t size)
{
errno = 0;
int ret = munmap(p, size);
return LibError_from_posix(ret);
}
LibError mem_Commit(u8* p, size_t size, int prot)
{
// avoid misinterpretation by mmap.
if(prot == PROT_NONE)
WARN_RETURN(ERR::INVALID_PARAM);
errno = 0;
void* ret = mmap(p, size, prot, mmap_flags|MAP_FIXED, -1, 0);
return LibError_from_mmap(ret);
}
LibError mem_Decommit(u8* p, size_t size)
{
errno = 0;
void* ret = mmap(p, size, PROT_NONE, mmap_flags|MAP_NORESERVE|MAP_FIXED, -1, 0);
return LibError_from_mmap(ret);
}
LibError mem_Protect(u8* p, size_t size, int prot)
{
errno = 0;
int ret = mprotect(p, size, prot);
return LibError_from_posix(ret);
}
//-----------------------------------------------------------------------------
// "freelist" is a pointer to the first unused element (0 if there are none);
// its memory holds a pointer to the next free one in list.
void mem_freelist_AddToFront(void*& freelist, void* el)
{
debug_assert(el != 0);
void* prev_el = freelist;
freelist = el;
*(void**)el = prev_el;
}
void* mem_freelist_Detach(void*& freelist)
{
void* el = freelist;
// nothing in list
if(!el)
return 0;
freelist = *(void**)el;
return el;
}
/**
* =========================================================================
* File : mem_util.cpp
* Project : 0 A.D.
* Description : memory allocator helper routines.
* =========================================================================
*/
// license: GPL; see lib/license.txt
#include "precompiled.h"
#include "mem_util.h"
#include "lib/bits.h" // round_up
#include "lib/posix/posix_mman.h"
#include "lib/sysdep/os_cpu.h" // os_cpu_PageSize
bool mem_IsPageMultiple(uintptr_t x)
{
return (x & (os_cpu_PageSize()-1)) == 0;
}
size_t mem_RoundUpToPage(size_t size)
{
return round_up(size, os_cpu_PageSize());
}
size_t mem_RoundUpToAlignment(size_t size)
{
// all allocators should align to at least this many bytes:
const size_t alignment = 8;
return round_up(size, alignment);
}
//-----------------------------------------------------------------------------
static inline LibError LibError_from_mmap(void* ret, bool warn_if_failed = true)
{
if(ret != MAP_FAILED)
return INFO::OK;
return LibError_from_errno(warn_if_failed);
}
// "anonymous" effectively means mapping /dev/zero, but is more efficient.
// MAP_ANONYMOUS is not in SUSv3, but is a very common extension.
// unfortunately, MacOS X only defines MAP_ANON, which Solaris says is
// deprecated. workaround there: define MAP_ANONYMOUS in terms of MAP_ANON.
#ifndef MAP_ANONYMOUS
# define MAP_ANONYMOUS MAP_ANON
#endif
static const int mmap_flags = MAP_PRIVATE|MAP_ANONYMOUS;
LibError mem_Reserve(size_t size, u8** pp)
{
errno = 0;
void* ret = mmap(0, size, PROT_NONE, mmap_flags|MAP_NORESERVE, -1, 0);
*pp = (u8*)ret;
return LibError_from_mmap(ret);
}
LibError mem_Release(u8* p, size_t size)
{
errno = 0;
int ret = munmap(p, size);
return LibError_from_posix(ret);
}
LibError mem_Commit(u8* p, size_t size, int prot)
{
// avoid misinterpretation by mmap.
if(prot == PROT_NONE)
WARN_RETURN(ERR::INVALID_PARAM);
errno = 0;
void* ret = mmap(p, size, prot, mmap_flags|MAP_FIXED, -1, 0);
return LibError_from_mmap(ret);
}
LibError mem_Decommit(u8* p, size_t size)
{
errno = 0;
void* ret = mmap(p, size, PROT_NONE, mmap_flags|MAP_NORESERVE|MAP_FIXED, -1, 0);
return LibError_from_mmap(ret);
}
LibError mem_Protect(u8* p, size_t size, int prot)
{
errno = 0;
int ret = mprotect(p, size, prot);
return LibError_from_posix(ret);
}
//-----------------------------------------------------------------------------
// "freelist" is a pointer to the first unused element (0 if there are none);
// its memory holds a pointer to the next free one in list.
void mem_freelist_AddToFront(void*& freelist, void* el)
{
debug_assert(el != 0);
void* prev_el = freelist;
freelist = el;
*(void**)el = prev_el;
}
void* mem_freelist_Detach(void*& freelist)
{
void* el = freelist;
// nothing in list
if(!el)
return 0;
freelist = *(void**)el;
return el;
}

View File

@ -1,36 +1,36 @@
/**
* =========================================================================
* File : mem_util.h
* Project : 0 A.D.
* Description : memory allocator helper routines.
* =========================================================================
*/
// license: GPL; see lib/license.txt
#ifndef INCLUDED_MEM_UTIL
#define INCLUDED_MEM_UTIL
LIB_API bool mem_IsPageMultiple(uintptr_t x);
LIB_API size_t mem_RoundUpToPage(size_t size);
LIB_API size_t mem_RoundUpToAlignment(size_t size);
// very thin wrapper on top of sys/mman.h that makes the intent more obvious
// (its commit/decommit semantics are difficult to tell apart)
LIB_API LibError mem_Reserve(size_t size, u8** pp);
LIB_API LibError mem_Release(u8* p, size_t size);
LIB_API LibError mem_Commit(u8* p, size_t size, int prot);
LIB_API LibError mem_Decommit(u8* p, size_t size);
LIB_API LibError mem_Protect(u8* p, size_t size, int prot);
// note: element memory is used to store a pointer to the next free element.
// rationale for the function-based interface: a class encapsulating the
// freelist pointer would force each header to include mem_util.h;
// instead, implementations need only declare a void* pointer.
LIB_API void mem_freelist_AddToFront(void*& freelist, void* el);
LIB_API void* mem_freelist_Detach(void*& freelist);
#endif // #ifndef INCLUDED_MEM_UTIL
/**
* =========================================================================
* File : mem_util.h
* Project : 0 A.D.
* Description : memory allocator helper routines.
* =========================================================================
*/
// license: GPL; see lib/license.txt
#ifndef INCLUDED_MEM_UTIL
#define INCLUDED_MEM_UTIL
LIB_API bool mem_IsPageMultiple(uintptr_t x);
LIB_API size_t mem_RoundUpToPage(size_t size);
LIB_API size_t mem_RoundUpToAlignment(size_t size);
// very thin wrapper on top of sys/mman.h that makes the intent more obvious
// (its commit/decommit semantics are difficult to tell apart)
LIB_API LibError mem_Reserve(size_t size, u8** pp);
LIB_API LibError mem_Release(u8* p, size_t size);
LIB_API LibError mem_Commit(u8* p, size_t size, int prot);
LIB_API LibError mem_Decommit(u8* p, size_t size);
LIB_API LibError mem_Protect(u8* p, size_t size, int prot);
// note: element memory is used to store a pointer to the next free element.
// rationale for the function-based interface: a class encapsulating the
// freelist pointer would force each header to include mem_util.h;
// instead, implementations need only declare a void* pointer.
LIB_API void mem_freelist_AddToFront(void*& freelist, void* el);
LIB_API void* mem_freelist_Detach(void*& freelist);
#endif // #ifndef INCLUDED_MEM_UTIL

View File

@ -1,105 +1,105 @@
/**
* =========================================================================
* File : pool.cpp
* Project : 0 A.D.
* Description : pool allocator
* =========================================================================
*/
// license: GPL; see lib/license.txt
#include "precompiled.h"
#include "pool.h"
#include "mem_util.h"
LibError pool_create(Pool* p, size_t max_size, size_t el_size)
{
if(el_size == POOL_VARIABLE_ALLOCS)
p->el_size = 0;
else
p->el_size = mem_RoundUpToAlignment(el_size);
p->freelist = 0;
RETURN_ERR(da_alloc(&p->da, max_size));
return INFO::OK;
}
LibError pool_destroy(Pool* p)
{
// don't be picky and complain if the freelist isn't empty;
// we don't care since it's all part of the da anyway.
// however, zero it to prevent further allocs from succeeding.
p->freelist = 0;
return da_free(&p->da);
}
bool pool_contains(const Pool* p, void* el)
{
// outside of our range
if(!(p->da.base <= el && el < p->da.base+p->da.pos))
return false;
// sanity check: it should be aligned (if pool has fixed-size elements)
if(p->el_size)
debug_assert((uintptr_t)((u8*)el - p->da.base) % p->el_size == 0);
return true;
}
void* pool_alloc(Pool* p, size_t size)
{
// if pool allows variable sizes, go with the size parameter,
// otherwise the pool el_size setting.
const size_t el_size = p->el_size? p->el_size : mem_RoundUpToAlignment(size);
// note: this can never happen in pools with variable-sized elements
// because they disallow pool_free.
void* el = mem_freelist_Detach(p->freelist);
if(el)
goto have_el;
// alloc a new entry
{
// expand, if necessary
if(da_reserve(&p->da, el_size) < 0)
return 0;
el = p->da.base + p->da.pos;
p->da.pos += el_size;
}
have_el:
debug_assert(pool_contains(p, el)); // paranoia
return el;
}
void pool_free(Pool* p, void* el)
{
// only allowed to free items if we were initialized with
// fixed el_size. (this avoids having to pass el_size here and
// check if requested_size matches that when allocating)
if(p->el_size == 0)
{
debug_assert(0); // cannot free variable-size items
return;
}
if(pool_contains(p, el))
mem_freelist_AddToFront(p->freelist, el);
else
debug_assert(0); // invalid pointer (not in pool)
}
void pool_free_all(Pool* p)
{
p->freelist = 0;
// must be reset before da_set_size or CHECK_DA will complain.
p->da.pos = 0;
da_set_size(&p->da, 0);
}
/**
* =========================================================================
* File : pool.cpp
* Project : 0 A.D.
* Description : pool allocator
* =========================================================================
*/
// license: GPL; see lib/license.txt
#include "precompiled.h"
#include "pool.h"
#include "mem_util.h"
LibError pool_create(Pool* p, size_t max_size, size_t el_size)
{
if(el_size == POOL_VARIABLE_ALLOCS)
p->el_size = 0;
else
p->el_size = mem_RoundUpToAlignment(el_size);
p->freelist = 0;
RETURN_ERR(da_alloc(&p->da, max_size));
return INFO::OK;
}
LibError pool_destroy(Pool* p)
{
// don't be picky and complain if the freelist isn't empty;
// we don't care since it's all part of the da anyway.
// however, zero it to prevent further allocs from succeeding.
p->freelist = 0;
return da_free(&p->da);
}
bool pool_contains(const Pool* p, void* el)
{
// outside of our range
if(!(p->da.base <= el && el < p->da.base+p->da.pos))
return false;
// sanity check: it should be aligned (if pool has fixed-size elements)
if(p->el_size)
debug_assert((uintptr_t)((u8*)el - p->da.base) % p->el_size == 0);
return true;
}
void* pool_alloc(Pool* p, size_t size)
{
// if pool allows variable sizes, go with the size parameter,
// otherwise the pool el_size setting.
const size_t el_size = p->el_size? p->el_size : mem_RoundUpToAlignment(size);
// note: this can never happen in pools with variable-sized elements
// because they disallow pool_free.
void* el = mem_freelist_Detach(p->freelist);
if(el)
goto have_el;
// alloc a new entry
{
// expand, if necessary
if(da_reserve(&p->da, el_size) < 0)
return 0;
el = p->da.base + p->da.pos;
p->da.pos += el_size;
}
have_el:
debug_assert(pool_contains(p, el)); // paranoia
return el;
}
void pool_free(Pool* p, void* el)
{
// only allowed to free items if we were initialized with
// fixed el_size. (this avoids having to pass el_size here and
// check if requested_size matches that when allocating)
if(p->el_size == 0)
{
debug_assert(0); // cannot free variable-size items
return;
}
if(pool_contains(p, el))
mem_freelist_AddToFront(p->freelist, el);
else
debug_assert(0); // invalid pointer (not in pool)
}
void pool_free_all(Pool* p)
{
p->freelist = 0;
// must be reset before da_set_size or CHECK_DA will complain.
p->da.pos = 0;
da_set_size(&p->da, 0);
}

View File

@ -1,161 +1,161 @@
/**
* =========================================================================
* File : pool.h
* Project : 0 A.D.
* Description : pool allocator
* =========================================================================
*/
// license: GPL; see lib/license.txt
#ifndef INCLUDED_POOL
#define INCLUDED_POOL
#include "dynarray.h"
/**
* allocator design parameters:
* - O(1) alloc and free;
* - either fixed- or variable-sized blocks;
* - doesn't preallocate the entire pool;
* - returns sequential addresses.
*
* opaque! do not read/write any fields!
**/
struct Pool
{
DynArray da;
/**
* size of elements. = 0 if pool set up for variable-sized
* elements, otherwise rounded up to pool alignment.
**/
size_t el_size;
/**
* pointer to freelist (opaque); see freelist_*.
* never used (remains 0) if elements are of variable size.
**/
void* freelist;
};
/**
* pass as pool_create's <el_size> param to indicate variable-sized allocs
* are required (see below).
**/
const size_t POOL_VARIABLE_ALLOCS = ~(size_t)0u;
/**
* Ready Pool for use.
*
* @param Pool*
* @param max_size Max size [bytes] of the Pool; this much
* (rounded up to next page multiple) virtual address space is reserved.
* no virtual memory is actually committed until calls to pool_alloc.
* @param el_size Number of bytes that will be returned by each
* pool_alloc (whose size parameter is then ignored). Can be 0 to
* allow variable-sized allocations, but pool_free is then unusable.
* @return LibError
**/
LIB_API LibError pool_create(Pool* p, size_t max_size, size_t el_size);
/**
* free all memory (address space + physical) that constitutes the
* given Pool.
*
* future alloc and free calls on this pool will fail.
* continued use of the allocated memory (*) is
* impossible because it is marked not-present via MMU.
* (* no matter if in freelist or unused or "allocated" to user)
*
* @param Pool*
* @return LibError.
**/
LIB_API LibError pool_destroy(Pool* p);
/**
* indicate whether a pointer was allocated from the given pool.
*
* this is useful for callers that use several types of allocators.
*
* @param Pool*
* @return bool.
**/
LIB_API bool pool_contains(const Pool* p, void* el);
/**
* Dole out memory from the pool.
* exhausts the freelist before returning new entries to improve locality.
*
* @param Pool*
* @param size bytes to allocate; ignored if pool_create's el_size was not 0.
* @return allocated memory, or 0 if the Pool would have to be expanded and
* there isn't enough memory to do so.
**/
LIB_API void* pool_alloc(Pool* p, size_t size);
/**
* Make a fixed-size element available for reuse in the given Pool.
*
* this is not allowed if the Pool was created for variable-size elements.
* rationale: avoids having to pass el_size here and compare with size when
* allocating; also prevents fragmentation and leaking memory.
*
* @param Pool*
* @param el Element returned by pool_alloc.
**/
LIB_API void pool_free(Pool* p, void* el);
/**
* "free" all user allocations that ensued from the given Pool.
*
* this resets it as if freshly pool_create-d, but doesn't release the
* underlying reserved virtual memory.
*
* @param Pool*
**/
LIB_API void pool_free_all(Pool* p);
#ifdef __cplusplus
/**
* C++ wrapper on top of pool_alloc that's slightly easier to use.
*
* T must be POD (Plain Old Data) because it is memset to 0!
**/
template<class T>
class PoolAllocator
{
public:
explicit PoolAllocator(size_t maxElements)
{
(void)pool_create(&m_pool, maxElements*sizeof(T), sizeof(T));
}
~PoolAllocator()
{
(void)pool_destroy(&m_pool);
}
T* AllocateZeroedMemory()
{
T* t = (T*)pool_alloc(&m_pool, 0);
if(!t)
throw std::bad_alloc();
memset(t, 0, sizeof(T));
return t;
}
void Free(T* t)
{
pool_free(&m_pool, t);
}
private:
Pool m_pool;
};
#endif
#endif // #ifndef INCLUDED_POOL
/**
* =========================================================================
* File : pool.h
* Project : 0 A.D.
* Description : pool allocator
* =========================================================================
*/
// license: GPL; see lib/license.txt
#ifndef INCLUDED_POOL
#define INCLUDED_POOL
#include "dynarray.h"
/**
* allocator design parameters:
* - O(1) alloc and free;
* - either fixed- or variable-sized blocks;
* - doesn't preallocate the entire pool;
* - returns sequential addresses.
*
* opaque! do not read/write any fields!
**/
struct Pool
{
DynArray da;
/**
* size of elements. = 0 if pool set up for variable-sized
* elements, otherwise rounded up to pool alignment.
**/
size_t el_size;
/**
* pointer to freelist (opaque); see freelist_*.
* never used (remains 0) if elements are of variable size.
**/
void* freelist;
};
/**
* pass as pool_create's <el_size> param to indicate variable-sized allocs
* are required (see below).
**/
const size_t POOL_VARIABLE_ALLOCS = ~(size_t)0u;
/**
* Ready Pool for use.
*
* @param Pool*
* @param max_size Max size [bytes] of the Pool; this much
* (rounded up to next page multiple) virtual address space is reserved.
* no virtual memory is actually committed until calls to pool_alloc.
* @param el_size Number of bytes that will be returned by each
* pool_alloc (whose size parameter is then ignored). Can be 0 to
* allow variable-sized allocations, but pool_free is then unusable.
* @return LibError
**/
LIB_API LibError pool_create(Pool* p, size_t max_size, size_t el_size);
/**
* free all memory (address space + physical) that constitutes the
* given Pool.
*
* future alloc and free calls on this pool will fail.
* continued use of the allocated memory (*) is
* impossible because it is marked not-present via MMU.
* (* no matter if in freelist or unused or "allocated" to user)
*
* @param Pool*
* @return LibError.
**/
LIB_API LibError pool_destroy(Pool* p);
/**
* indicate whether a pointer was allocated from the given pool.
*
* this is useful for callers that use several types of allocators.
*
* @param Pool*
* @return bool.
**/
LIB_API bool pool_contains(const Pool* p, void* el);
/**
* Dole out memory from the pool.
* exhausts the freelist before returning new entries to improve locality.
*
* @param Pool*
* @param size bytes to allocate; ignored if pool_create's el_size was not 0.
* @return allocated memory, or 0 if the Pool would have to be expanded and
* there isn't enough memory to do so.
**/
LIB_API void* pool_alloc(Pool* p, size_t size);
/**
* Make a fixed-size element available for reuse in the given Pool.
*
* this is not allowed if the Pool was created for variable-size elements.
* rationale: avoids having to pass el_size here and compare with size when
* allocating; also prevents fragmentation and leaking memory.
*
* @param Pool*
* @param el Element returned by pool_alloc.
**/
LIB_API void pool_free(Pool* p, void* el);
/**
* "free" all user allocations that ensued from the given Pool.
*
* this resets it as if freshly pool_create-d, but doesn't release the
* underlying reserved virtual memory.
*
* @param Pool*
**/
LIB_API void pool_free_all(Pool* p);
#ifdef __cplusplus
/**
* C++ wrapper on top of pool_alloc that's slightly easier to use.
*
* T must be POD (Plain Old Data) because it is memset to 0!
**/
template<class T>
class PoolAllocator
{
public:
explicit PoolAllocator(size_t maxElements)
{
(void)pool_create(&m_pool, maxElements*sizeof(T), sizeof(T));
}
~PoolAllocator()
{
(void)pool_destroy(&m_pool);
}
T* AllocateZeroedMemory()
{
T* t = (T*)pool_alloc(&m_pool, 0);
if(!t)
throw std::bad_alloc();
memset(t, 0, sizeof(T));
return t;
}
void Free(T* t)
{
pool_free(&m_pool, t);
}
private:
Pool m_pool;
};
#endif
#endif // #ifndef INCLUDED_POOL

View File

@ -1,43 +1,43 @@
#include "precompiled.h"
#include "shared_ptr.h"
#include "allocators.h" // AllocatorChecker
#ifndef NDEBUG
static AllocatorChecker s_allocatorChecker;
#endif
class CheckedArrayDeleter
{
public:
CheckedArrayDeleter(size_t size)
: m_size(size)
{
}
void operator()(u8* p)
{
debug_assert(m_size != 0);
#ifndef NDEBUG
s_allocatorChecker.OnDeallocate(p, m_size);
#endif
delete[] p;
m_size = 0;
}
private:
size_t m_size;
};
shared_ptr<u8> Allocate(size_t size)
{
debug_assert(size != 0);
u8* p = new u8[size];
#ifndef NDEBUG
s_allocatorChecker.OnAllocate(p, size);
#endif
return shared_ptr<u8>(p, CheckedArrayDeleter(size));
}
#include "precompiled.h"
#include "shared_ptr.h"
#include "allocators.h" // AllocatorChecker
#ifndef NDEBUG
static AllocatorChecker s_allocatorChecker;
#endif
class CheckedArrayDeleter
{
public:
CheckedArrayDeleter(size_t size)
: m_size(size)
{
}
void operator()(u8* p)
{
debug_assert(m_size != 0);
#ifndef NDEBUG
s_allocatorChecker.OnDeallocate(p, m_size);
#endif
delete[] p;
m_size = 0;
}
private:
size_t m_size;
};
shared_ptr<u8> Allocate(size_t size)
{
debug_assert(size != 0);
u8* p = new u8[size];
#ifndef NDEBUG
s_allocatorChecker.OnAllocate(p, size);
#endif
return shared_ptr<u8>(p, CheckedArrayDeleter(size));
}

View File

@ -1,57 +1,57 @@
#ifndef INCLUDED_SHARED_PTR
#define INCLUDED_SHARED_PTR
#include "lib/sysdep/arch/x86_x64/x86_x64.h"
#include "lib/sysdep/rtl.h" // rtl_AllocateAligned
struct DummyDeleter
{
template<class T>
void operator()(T*)
{
}
};
template<class T>
shared_ptr<T> DummySharedPtr(T* ptr)
{
return shared_ptr<T>(ptr, DummyDeleter());
}
struct ArrayDeleter
{
template<class T>
void operator()(T* p)
{
delete[] p;
}
};
struct FreeDeleter
{
template<class T>
void operator()(T* p)
{
free(p);
}
};
// (note: uses CheckedArrayDeleter)
LIB_API shared_ptr<u8> Allocate(size_t size);
struct AlignedDeleter
{
template<class T>
void operator()(T* t)
{
_mm_free(t);
}
};
template<class T>
shared_ptr<T> AllocateAligned(size_t size)
{
return shared_ptr<T>((T*)rtl_AllocateAligned(size, x86_x64_L1CacheLineSize()), AlignedDeleter());
}
#endif // #ifndef INCLUDED_SHARED_PTR
#ifndef INCLUDED_SHARED_PTR
#define INCLUDED_SHARED_PTR
#include "lib/sysdep/arch/x86_x64/x86_x64.h"
#include "lib/sysdep/rtl.h" // rtl_AllocateAligned
struct DummyDeleter
{
template<class T>
void operator()(T*)
{
}
};
template<class T>
shared_ptr<T> DummySharedPtr(T* ptr)
{
return shared_ptr<T>(ptr, DummyDeleter());
}
struct ArrayDeleter
{
template<class T>
void operator()(T* p)
{
delete[] p;
}
};
struct FreeDeleter
{
template<class T>
void operator()(T* p)
{
free(p);
}
};
// (note: uses CheckedArrayDeleter)
LIB_API shared_ptr<u8> Allocate(size_t size);
struct AlignedDeleter
{
template<class T>
void operator()(T* t)
{
_mm_free(t);
}
};
template<class T>
shared_ptr<T> AllocateAligned(size_t size)
{
return shared_ptr<T>((T*)rtl_AllocateAligned(size, x86_x64_L1CacheLineSize()), AlignedDeleter());
}
#endif // #ifndef INCLUDED_SHARED_PTR

View File

@ -1,94 +1,94 @@
/**
* =========================================================================
* File : string_pool.cpp
* Project : 0 A.D.
* Description : shared storage for strings
* =========================================================================
*/
// license: GPL; see lib/license.txt
#include "precompiled.h"
#include "string_pool.h"
#include "lib/rand.h"
#include "lib/sysdep/cpu.h" // cpu_memcpy
StringPool::StringPool(size_t maxSize)
{
pool_create(&m_pool, maxSize, POOL_VARIABLE_ALLOCS);
}
StringPool::~StringPool()
{
m_map.clear();
(void)pool_destroy(&m_pool);
}
const char* StringPool::UniqueCopy(const char* string)
{
// early out: check if it already lies in the pool
if(Contains(string))
return string;
// check if equivalent to an existing string.
//
// rationale: the entire storage could be done via container,
// rather than simply using it as a lookup mapping.
// however, DynHashTbl together with Pool (see above) is more efficient.
const char* existingString = m_map.find(string);
if(existingString)
return existingString;
const size_t length = strlen(string);
const char* uniqueCopy = (const char*)pool_alloc(&m_pool, length+1);
if(!uniqueCopy)
throw std::bad_alloc();
cpu_memcpy((void*)uniqueCopy, string, length);
((char*)uniqueCopy)[length] = '\0';
m_map.insert(uniqueCopy, uniqueCopy);
return uniqueCopy;
}
bool StringPool::Contains(const char* string) const
{
return pool_contains(&m_pool, (void*)string);
}
const char* StringPool::RandomString() const
{
// there had better be names in m_pool, else this will fail.
debug_assert(m_pool.da.pos != 0);
again:
const size_t start_ofs = (size_t)rand(0, (size_t)m_pool.da.pos);
// scan back to start of string (don't scan ahead; this must
// work even if m_pool only contains one entry).
const char* start = (const char*)m_pool.da.base + start_ofs;
for(size_t i = 0; i < start_ofs; i++)
{
if(*start == '\0')
break;
start--;
}
// skip past the '\0' we found. loop is needed because there may be
// several if we land in padding (due to pool alignment).
size_t chars_left = m_pool.da.pos - start_ofs;
for(; *start == '\0'; start++)
{
// we had landed in padding at the end of the buffer.
if(chars_left-- == 0)
goto again;
}
return start;
}
/**
* =========================================================================
* File : string_pool.cpp
* Project : 0 A.D.
* Description : shared storage for strings
* =========================================================================
*/
// license: GPL; see lib/license.txt
#include "precompiled.h"
#include "string_pool.h"
#include "lib/rand.h"
#include "lib/sysdep/cpu.h" // cpu_memcpy
StringPool::StringPool(size_t maxSize)
{
pool_create(&m_pool, maxSize, POOL_VARIABLE_ALLOCS);
}
StringPool::~StringPool()
{
m_map.clear();
(void)pool_destroy(&m_pool);
}
const char* StringPool::UniqueCopy(const char* string)
{
// early out: check if it already lies in the pool
if(Contains(string))
return string;
// check if equivalent to an existing string.
//
// rationale: the entire storage could be done via container,
// rather than simply using it as a lookup mapping.
// however, DynHashTbl together with Pool (see above) is more efficient.
const char* existingString = m_map.find(string);
if(existingString)
return existingString;
const size_t length = strlen(string);
const char* uniqueCopy = (const char*)pool_alloc(&m_pool, length+1);
if(!uniqueCopy)
throw std::bad_alloc();
cpu_memcpy((void*)uniqueCopy, string, length);
((char*)uniqueCopy)[length] = '\0';
m_map.insert(uniqueCopy, uniqueCopy);
return uniqueCopy;
}
bool StringPool::Contains(const char* string) const
{
return pool_contains(&m_pool, (void*)string);
}
const char* StringPool::RandomString() const
{
// there had better be names in m_pool, else this will fail.
debug_assert(m_pool.da.pos != 0);
again:
const size_t start_ofs = (size_t)rand(0, (size_t)m_pool.da.pos);
// scan back to start of string (don't scan ahead; this must
// work even if m_pool only contains one entry).
const char* start = (const char*)m_pool.da.base + start_ofs;
for(size_t i = 0; i < start_ofs; i++)
{
if(*start == '\0')
break;
start--;
}
// skip past the '\0' we found. loop is needed because there may be
// several if we land in padding (due to pool alignment).
size_t chars_left = m_pool.da.pos - start_ofs;
for(; *start == '\0'; start++)
{
// we had landed in padding at the end of the buffer.
if(chars_left-- == 0)
goto again;
}
return start;
}

View File

@ -1,46 +1,46 @@
/**
* =========================================================================
* File : string_pool.h
* Project : 0 A.D.
* Description : shared storage for strings
* =========================================================================
*/
// license: GPL; see lib/license.txt
#ifndef INCLUDED_STRING_POOL
#define INCLUDED_STRING_POOL
#include "lib/adts.h" // DynHashTbl
#include "pool.h"
class StringPool
{
public:
StringPool(size_t maxSize);
~StringPool();
/**
* allocate a copy of the string.
*
* @return a unique pointer for the string (addresses are equal iff
* the string contents match). can return 0, but would raise a
* warning first.
**/
const char* UniqueCopy(const char* string);
bool Contains(const char* string) const;
const char* RandomString() const;
private:
// rationale: we want an O(1) Contains() so that redundant UniqueCopy
// calls are cheap. that requires allocating from one contiguous arena,
// which is also more memory-efficient than the heap (no headers).
Pool m_pool;
typedef DynHashTbl<const char*, const char*> Map;
Map m_map;
};
#endif // #ifndef INCLUDED_STRING_POOL
/**
* =========================================================================
* File : string_pool.h
* Project : 0 A.D.
* Description : shared storage for strings
* =========================================================================
*/
// license: GPL; see lib/license.txt
#ifndef INCLUDED_STRING_POOL
#define INCLUDED_STRING_POOL
#include "lib/adts.h" // DynHashTbl
#include "pool.h"
class StringPool
{
public:
StringPool(size_t maxSize);
~StringPool();
/**
* allocate a copy of the string.
*
* @return a unique pointer for the string (addresses are equal iff
* the string contents match). can return 0, but would raise a
* warning first.
**/
const char* UniqueCopy(const char* string);
bool Contains(const char* string) const;
const char* RandomString() const;
private:
// rationale: we want an O(1) Contains() so that redundant UniqueCopy
// calls are cheap. that requires allocating from one contiguous arena,
// which is also more memory-efficient than the heap (no headers).
Pool m_pool;
typedef DynHashTbl<const char*, const char*> Map;
Map m_map;
};
#endif // #ifndef INCLUDED_STRING_POOL

View File

@ -1,45 +1,45 @@
#include "lib/self_test.h"
#include "lib/allocators/allocators.h"
#include "lib/allocators/dynarray.h"
#include "lib/byte_order.h"
class TestAllocators : public CxxTest::TestSuite
{
public:
void test_da()
{
DynArray da;
// basic test of functionality (not really meaningful)
TS_ASSERT_OK(da_alloc(&da, 1000));
TS_ASSERT_OK(da_set_size(&da, 1000));
TS_ASSERT_OK(da_set_prot(&da, PROT_NONE));
TS_ASSERT_OK(da_free(&da));
// test wrapping existing mem blocks for use with da_read
u8 data[4] = { 0x12, 0x34, 0x56, 0x78 };
TS_ASSERT_OK(da_wrap_fixed(&da, data, sizeof(data)));
u8 buf[4];
TS_ASSERT_OK(da_read(&da, buf, 4));
TS_ASSERT_EQUALS(read_le32(buf), 0x78563412); // read correct value
debug_SkipNextError(ERR::FAIL);
TS_ASSERT(da_read(&da, buf, 1) < 0); // no more data left
TS_ASSERT_OK(da_free(&da));
}
void test_matrix()
{
// not much we can do here; allocate a matrix, write to it and
// make sure it can be freed.
// (note: can't check memory layout because "matrix" is int** -
// array of pointers. the matrix interface doesn't guarantee
// that data comes in row-major order after the row pointers)
int** m = (int**)matrix_alloc(3, 3, sizeof(int));
m[0][0] = 1;
m[0][1] = 2;
m[1][0] = 3;
m[2][2] = 4;
matrix_free((void**)m);
}
};
#include "lib/self_test.h"
#include "lib/allocators/allocators.h"
#include "lib/allocators/dynarray.h"
#include "lib/byte_order.h"
class TestAllocators : public CxxTest::TestSuite
{
public:
void test_da()
{
DynArray da;
// basic test of functionality (not really meaningful)
TS_ASSERT_OK(da_alloc(&da, 1000));
TS_ASSERT_OK(da_set_size(&da, 1000));
TS_ASSERT_OK(da_set_prot(&da, PROT_NONE));
TS_ASSERT_OK(da_free(&da));
// test wrapping existing mem blocks for use with da_read
u8 data[4] = { 0x12, 0x34, 0x56, 0x78 };
TS_ASSERT_OK(da_wrap_fixed(&da, data, sizeof(data)));
u8 buf[4];
TS_ASSERT_OK(da_read(&da, buf, 4));
TS_ASSERT_EQUALS(read_le32(buf), 0x78563412); // read correct value
debug_SkipNextError(ERR::FAIL);
TS_ASSERT(da_read(&da, buf, 1) < 0); // no more data left
TS_ASSERT_OK(da_free(&da));
}
void test_matrix()
{
// not much we can do here; allocate a matrix, write to it and
// make sure it can be freed.
// (note: can't check memory layout because "matrix" is int** -
// array of pointers. the matrix interface doesn't guarantee
// that data comes in row-major order after the row pointers)
int** m = (int**)matrix_alloc(3, 3, sizeof(int));
m[0][0] = 1;
m[0][1] = 2;
m[1][0] = 3;
m[2][2] = 4;
matrix_free((void**)m);
}
};

View File

@ -1,123 +1,123 @@
#include "lib/self_test.h"
#include "lib/allocators/headerless.h"
void* const null = 0;
class TestHeaderless: public CxxTest::TestSuite
{
public:
void test_Basic()
{
HeaderlessAllocator a(8192);
// (these are disabled because they raise an assert)
#if 0
// can't Allocate unaligned sizes
TS_ASSERT_EQUALS(a.Allocate(1), null);
// can't Allocate too small amounts
TS_ASSERT_EQUALS(a.Allocate(16), null);
#endif
// can Allocate the entire pool
char* p1 = (char*)a.Allocate(4096);
char* p2 = (char*)a.Allocate(4096);
TS_ASSERT_DIFFERS(p1, null);
TS_ASSERT_DIFFERS(p2, null);
// back-to-back (non-freelist) allocations should be contiguous
TS_ASSERT_EQUALS(p1+4096, p2);
// allocations are writable
p1[0] = 11;
p1[4095] = 12;
}
void test_Free()
{
// Deallocate allows immediate reuse of the freed pointer
HeaderlessAllocator a(4096);
void* p1 = a.Allocate(1024);
a.Deallocate(p1, 1024);
void* p2 = a.Allocate(1024);
TS_ASSERT_EQUALS(p1, p2);
}
void test_Coalesce()
{
HeaderlessAllocator a(0x10000);
// can Allocate non-power-of-two sizes
void* p1 = a.Allocate(0x5670);
void* p2 = a.Allocate(0x7890);
void* p3 = a.Allocate(0x1230);
TS_ASSERT_DIFFERS(p1, null);
TS_ASSERT_DIFFERS(p2, null);
TS_ASSERT_DIFFERS(p3, null);
// after freeing, must be able to allocate the total amount
// note: we don't insist on being able to fill the entire
// memory range. in this case, the problem is that the pool has some
// virtual address space left, but the allocator doesn't grab that
// and add it to the freelist. that feature is currently not
// implemented.
a.Deallocate(p1, 0x5670);
a.Deallocate(p2, 0x7890);
a.Deallocate(p3, 0x1230);
void* p4 = a.Allocate(0xE130);
TS_ASSERT_DIFFERS(p4, null);
}
void test_Reset()
{
// after Reset, must return the same pointer as a freshly constructed instance
HeaderlessAllocator a(4096);
void* p1 = a.Allocate(128);
a.Reset();
void* p2 = a.Allocate(128);
TS_ASSERT_EQUALS(p1, p2);
}
// will the allocator survive a series of random but valid Allocate/Deallocate?
void test_Randomized()
{
const size_t poolSize = 1024*1024;
HeaderlessAllocator a(poolSize);
typedef std::map<void*, size_t> AllocMap;
AllocMap allocs;
srand(1);
for(int i = 0; i < 1000; i++)
{
// allocate
if(rand() >= RAND_MAX/2)
{
const size_t maxSize = (size_t)((rand() / (float)RAND_MAX) * poolSize);
const size_t size = maxSize & ~0xFu;
void* p = a.Allocate(size);
if(!p)
continue;
TS_ASSERT(allocs.find(p) == allocs.end());
allocs[p] = size;
}
// free
else
{
if(allocs.empty())
continue;
// find random allocation to deallocate
AllocMap::iterator it = allocs.begin();
const int numToSkip = rand() % (int)allocs.size();
for(int skip = 0; skip < numToSkip; skip++)
++it;
void* p = (*it).first;
size_t size = (*it).second;
allocs.erase(it);
a.Deallocate(p, size);
}
}
}
};
#include "lib/self_test.h"
#include "lib/allocators/headerless.h"
void* const null = 0;
class TestHeaderless: public CxxTest::TestSuite
{
public:
void test_Basic()
{
HeaderlessAllocator a(8192);
// (these are disabled because they raise an assert)
#if 0
// can't Allocate unaligned sizes
TS_ASSERT_EQUALS(a.Allocate(1), null);
// can't Allocate too small amounts
TS_ASSERT_EQUALS(a.Allocate(16), null);
#endif
// can Allocate the entire pool
char* p1 = (char*)a.Allocate(4096);
char* p2 = (char*)a.Allocate(4096);
TS_ASSERT_DIFFERS(p1, null);
TS_ASSERT_DIFFERS(p2, null);
// back-to-back (non-freelist) allocations should be contiguous
TS_ASSERT_EQUALS(p1+4096, p2);
// allocations are writable
p1[0] = 11;
p1[4095] = 12;
}
void test_Free()
{
// Deallocate allows immediate reuse of the freed pointer
HeaderlessAllocator a(4096);
void* p1 = a.Allocate(1024);
a.Deallocate(p1, 1024);
void* p2 = a.Allocate(1024);
TS_ASSERT_EQUALS(p1, p2);
}
void test_Coalesce()
{
HeaderlessAllocator a(0x10000);
// can Allocate non-power-of-two sizes
void* p1 = a.Allocate(0x5670);
void* p2 = a.Allocate(0x7890);
void* p3 = a.Allocate(0x1230);
TS_ASSERT_DIFFERS(p1, null);
TS_ASSERT_DIFFERS(p2, null);
TS_ASSERT_DIFFERS(p3, null);
// after freeing, must be able to allocate the total amount
// note: we don't insist on being able to fill the entire
// memory range. in this case, the problem is that the pool has some
// virtual address space left, but the allocator doesn't grab that
// and add it to the freelist. that feature is currently not
// implemented.
a.Deallocate(p1, 0x5670);
a.Deallocate(p2, 0x7890);
a.Deallocate(p3, 0x1230);
void* p4 = a.Allocate(0xE130);
TS_ASSERT_DIFFERS(p4, null);
}
void test_Reset()
{
// after Reset, must return the same pointer as a freshly constructed instance
HeaderlessAllocator a(4096);
void* p1 = a.Allocate(128);
a.Reset();
void* p2 = a.Allocate(128);
TS_ASSERT_EQUALS(p1, p2);
}
// will the allocator survive a series of random but valid Allocate/Deallocate?
void test_Randomized()
{
const size_t poolSize = 1024*1024;
HeaderlessAllocator a(poolSize);
typedef std::map<void*, size_t> AllocMap;
AllocMap allocs;
srand(1);
for(int i = 0; i < 1000; i++)
{
// allocate
if(rand() >= RAND_MAX/2)
{
const size_t maxSize = (size_t)((rand() / (float)RAND_MAX) * poolSize);
const size_t size = maxSize & ~0xFu;
void* p = a.Allocate(size);
if(!p)
continue;
TS_ASSERT(allocs.find(p) == allocs.end());
allocs[p] = size;
}
// free
else
{
if(allocs.empty())
continue;
// find random allocation to deallocate
AllocMap::iterator it = allocs.begin();
const int numToSkip = rand() % (int)allocs.size();
for(int skip = 0; skip < numToSkip; skip++)
++it;
void* p = (*it).first;
size_t size = (*it).second;
allocs.erase(it);
a.Deallocate(p, size);
}
}
}
};

View File

@ -1,40 +1,40 @@
/**
* =========================================================================
* File : base32.cpp
* Project : 0 A.D.
* Description : base32 conversion
* =========================================================================
*/
// license: GPL; see lib/license.txt
#include "precompiled.h"
// big endian!
void base32(const size_t in_len, const u8* in, u8* out)
{
u32 pool = 0; // of bits from buffer
size_t pool_bits = 0; // # bits currently in buffer
static const u8 tbl[33] = "ABCDEFGHIJKLMNOPQRSTUVWXYZ234567";
size_t in_bytes_left = in_len; // to avoid overrunning input buffer
const size_t out_chars = (in_len*8 + 4) / 5; // = ceil(# 5-bit blocks)
for(size_t i = 0; i < out_chars; i++)
{
if(pool_bits < 5 && in_bytes_left)
{
pool <<= 8;
pool |= *in++;
pool_bits += 8;
in_bytes_left--;
}
pool_bits -= 5;
const size_t c = (pool >> pool_bits) & 31;
*out++ = tbl[c];
}
*out++ = '\0';
}
/**
* =========================================================================
* File : base32.cpp
* Project : 0 A.D.
* Description : base32 conversion
* =========================================================================
*/
// license: GPL; see lib/license.txt
#include "precompiled.h"
// big endian!
void base32(const size_t in_len, const u8* in, u8* out)
{
u32 pool = 0; // of bits from buffer
size_t pool_bits = 0; // # bits currently in buffer
static const u8 tbl[33] = "ABCDEFGHIJKLMNOPQRSTUVWXYZ234567";
size_t in_bytes_left = in_len; // to avoid overrunning input buffer
const size_t out_chars = (in_len*8 + 4) / 5; // = ceil(# 5-bit blocks)
for(size_t i = 0; i < out_chars; i++)
{
if(pool_bits < 5 && in_bytes_left)
{
pool <<= 8;
pool |= *in++;
pool_bits += 8;
in_bytes_left--;
}
pool_bits -= 5;
const size_t c = (pool >> pool_bits) & 31;
*out++ = tbl[c];
}
*out++ = '\0';
}

View File

@ -1,24 +1,24 @@
/**
* =========================================================================
* File : base32.h
* Project : 0 A.D.
* Description : base32 conversion
* =========================================================================
*/
// license: GPL; see lib/license.txt
#ifndef INCLUDED_BASE32
#define INCLUDED_BASE32
/**
* generate the base32 textual representation of a buffer.
*
* @param len size [bytes] of input
* @param big-endian input data (assumed to be integral number of bytes)
* @param output string; zero-terminated. must be big enough
* (i.e. at least ceil(len*CHAR_BIT/5) + 1 chars)
**/
extern void base32(const size_t len, const u8* in, u8* out);
#endif // #ifndef INCLUDED_BASE32
/**
* =========================================================================
* File : base32.h
* Project : 0 A.D.
* Description : base32 conversion
* =========================================================================
*/
// license: GPL; see lib/license.txt
#ifndef INCLUDED_BASE32
#define INCLUDED_BASE32
/**
* generate the base32 textual representation of a buffer.
*
* @param len size [bytes] of input
* @param big-endian input data (assumed to be integral number of bytes)
* @param output string; zero-terminated. must be big enough
* (i.e. at least ceil(len*CHAR_BIT/5) + 1 chars)
**/
extern void base32(const size_t len, const u8* in, u8* out);
#endif // #ifndef INCLUDED_BASE32

View File

@ -1,26 +1,26 @@
/**
* =========================================================================
* File : bits.cpp
* Project : 0 A.D.
* Description : bit-twiddling.
* =========================================================================
*/
// license: GPL; see lib/license.txt
#include "precompiled.h"
#include "bits.h"
static inline u32 get_float_bits(const float x)
{
u32 ret;
memcpy(&ret, &x, 4);
return ret;
}
int floor_log2(const float x)
{
const u32 i = get_float_bits(x);
const u32 biased_exp = (i >> 23) & 0xFF;
return (int)biased_exp - 127;
}
/**
* =========================================================================
* File : bits.cpp
* Project : 0 A.D.
* Description : bit-twiddling.
* =========================================================================
*/
// license: GPL; see lib/license.txt
#include "precompiled.h"
#include "bits.h"
static inline u32 get_float_bits(const float x)
{
u32 ret;
memcpy(&ret, &x, 4);
return ret;
}
int floor_log2(const float x)
{
const u32 i = get_float_bits(x);
const u32 biased_exp = (i >> 23) & 0xFF;
return (int)biased_exp - 127;
}

View File

@ -1,193 +1,193 @@
/**
* =========================================================================
* File : bits.h
* Project : 0 A.D.
* Description : bit-twiddling.
* =========================================================================
*/
// license: GPL; see lib/license.txt
#ifndef INCLUDED_BITS
#define INCLUDED_BITS
/**
* value of bit number <n>.
*
* @param n bit index.
*
* requirements:
* - T should be an unsigned type
* - n must be in [0, CHAR_BIT*sizeof(T)), else the result is undefined!
**/
template<typename T>
T Bit(size_t n)
{
const T one = T(1);
return (one << n);
}
/**
* pretty much the same as Bit<unsigned>.
* this is intended for the initialization of enum values, where a
* compile-time constant is required.
**/
#define BIT(n) (1u << (n))
template<typename T>
bool IsBitSet(T value, size_t index)
{
const T bit = Bit<T>(index);
return (value & bit) != 0;
}
// these are declared in the header and inlined to aid compiler optimizations
// (they can easily end up being time-critical).
// note: GCC can't inline extern functions, while VC's "Whole Program
// Optimization" can.
/**
* a mask that includes the lowest N bits
*
* @param num_bits number of bits in mask
**/
template<typename T>
T bit_mask(size_t numBits)
{
if(numBits == 0) // prevent shift count == bitsInT, which would be undefined.
return 0;
// notes:
// - the perhaps more intuitive (1 << numBits)-1 cannot
// handle numBits == bitsInT, but this implementation does.
// - though bulky, the below statements avoid sign-conversion warnings.
const T bitsInT = sizeof(T)*CHAR_BIT;
T mask(0);
mask = ~mask;
mask >>= T(bitsInT-numBits);
return mask;
}
/**
* extract the value of bits hi_idx:lo_idx within num
*
* example: bits(0x69, 2, 5) == 0x0A
*
* @param num number whose bits are to be extracted
* @param lo_idx bit index of lowest bit to include
* @param hi_idx bit index of highest bit to include
* @return value of extracted bits.
**/
template<typename T>
inline T bits(T num, size_t lo_idx, size_t hi_idx)
{
const size_t count = (hi_idx - lo_idx)+1; // # bits to return
T result = num >> T(lo_idx);
result &= bit_mask<T>(count);
return result;
}
/**
* @return number of 1-bits in mask
**/
template<typename T>
size_t PopulationCount(T mask)
{
// note: a more complex but probably faster method is given at
// http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
size_t num1Bits = 0;
while(mask)
{
mask &= mask-1; // clear least significant 1-bit
num1Bits++;
}
return num1Bits;
}
/**
* @return whether the given number is a power of two.
**/
template<typename T>
bool is_pow2(T n)
{
// 0 would pass the test below but isn't a POT.
if(n == 0)
return false;
return (n & (n-1)) == 0;
}
/**
* ceil(log2(x))
*
* @param x (unsigned integer)
* @return ceiling of the base-2 logarithm (i.e. rounded up) or
* zero if the input is zero.
**/
template<typename T>
size_t ceil_log2(T x)
{
T bit = 1;
size_t log = 0;
while(bit < x && bit != 0) // must detect overflow
{
log++;
bit *= 2;
}
return log;
}
/**
* floor(log2(f))
* fast, uses the FPU normalization hardware.
*
* @param f (float) input; MUST be > 0, else results are undefined.
* @return floor of the base-2 logarithm (i.e. rounded down).
**/
extern int floor_log2(const float x);
/**
* round up to next larger power of two.
**/
template<typename T>
T round_up_to_pow2(T x)
{
return T(1) << ceil_log2(x);
}
/**
* round number up/down to the next given multiple.
*
* @param multiple: must be a power of two.
**/
template<typename T>
T round_up(T n, T multiple)
{
debug_assert(is_pow2(multiple));
const T result = (n + multiple-1) & ~(multiple-1);
debug_assert(n <= result && result < n+multiple);
return result;
}
template<typename T>
T round_down(T n, T multiple)
{
debug_assert(is_pow2(multiple));
const T result = n & ~(multiple-1);
debug_assert(result <= n && n < result+multiple);
return result;
}
template<typename T>
bool IsAligned(T t, uintptr_t multiple)
{
return ((uintptr_t)t % multiple) == 0;
}
#endif // #ifndef INCLUDED_BITS
/**
* =========================================================================
* File : bits.h
* Project : 0 A.D.
* Description : bit-twiddling.
* =========================================================================
*/
// license: GPL; see lib/license.txt
#ifndef INCLUDED_BITS
#define INCLUDED_BITS
/**
* value of bit number <n>.
*
* @param n bit index.
*
* requirements:
* - T should be an unsigned type
* - n must be in [0, CHAR_BIT*sizeof(T)), else the result is undefined!
**/
template<typename T>
T Bit(size_t n)
{
const T one = T(1);
return (one << n);
}
/**
* pretty much the same as Bit<unsigned>.
* this is intended for the initialization of enum values, where a
* compile-time constant is required.
**/
#define BIT(n) (1u << (n))
template<typename T>
bool IsBitSet(T value, size_t index)
{
const T bit = Bit<T>(index);
return (value & bit) != 0;
}
// these are declared in the header and inlined to aid compiler optimizations
// (they can easily end up being time-critical).
// note: GCC can't inline extern functions, while VC's "Whole Program
// Optimization" can.
/**
* a mask that includes the lowest N bits
*
* @param num_bits number of bits in mask
**/
template<typename T>
T bit_mask(size_t numBits)
{
if(numBits == 0) // prevent shift count == bitsInT, which would be undefined.
return 0;
// notes:
// - the perhaps more intuitive (1 << numBits)-1 cannot
// handle numBits == bitsInT, but this implementation does.
// - though bulky, the below statements avoid sign-conversion warnings.
const T bitsInT = sizeof(T)*CHAR_BIT;
T mask(0);
mask = ~mask;
mask >>= T(bitsInT-numBits);
return mask;
}
/**
* extract the value of bits hi_idx:lo_idx within num
*
* example: bits(0x69, 2, 5) == 0x0A
*
* @param num number whose bits are to be extracted
* @param lo_idx bit index of lowest bit to include
* @param hi_idx bit index of highest bit to include
* @return value of extracted bits.
**/
template<typename T>
inline T bits(T num, size_t lo_idx, size_t hi_idx)
{
const size_t count = (hi_idx - lo_idx)+1; // # bits to return
T result = num >> T(lo_idx);
result &= bit_mask<T>(count);
return result;
}
/**
* @return number of 1-bits in mask
**/
template<typename T>
size_t PopulationCount(T mask)
{
// note: a more complex but probably faster method is given at
// http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
size_t num1Bits = 0;
while(mask)
{
mask &= mask-1; // clear least significant 1-bit
num1Bits++;
}
return num1Bits;
}
/**
* @return whether the given number is a power of two.
**/
template<typename T>
bool is_pow2(T n)
{
// 0 would pass the test below but isn't a POT.
if(n == 0)
return false;
return (n & (n-1)) == 0;
}
/**
* ceil(log2(x))
*
* @param x (unsigned integer)
* @return ceiling of the base-2 logarithm (i.e. rounded up) or
* zero if the input is zero.
**/
template<typename T>
size_t ceil_log2(T x)
{
T bit = 1;
size_t log = 0;
while(bit < x && bit != 0) // must detect overflow
{
log++;
bit *= 2;
}
return log;
}
/**
* floor(log2(f))
* fast, uses the FPU normalization hardware.
*
* @param f (float) input; MUST be > 0, else results are undefined.
* @return floor of the base-2 logarithm (i.e. rounded down).
**/
extern int floor_log2(const float x);
/**
* round up to next larger power of two.
**/
template<typename T>
T round_up_to_pow2(T x)
{
return T(1) << ceil_log2(x);
}
/**
* round number up/down to the next given multiple.
*
* @param multiple: must be a power of two.
**/
template<typename T>
T round_up(T n, T multiple)
{
debug_assert(is_pow2(multiple));
const T result = (n + multiple-1) & ~(multiple-1);
debug_assert(n <= result && result < n+multiple);
return result;
}
template<typename T>
T round_down(T n, T multiple)
{
debug_assert(is_pow2(multiple));
const T result = n & ~(multiple-1);
debug_assert(result <= n && n < result+multiple);
return result;
}
template<typename T>
bool IsAligned(T t, uintptr_t multiple)
{
return ((uintptr_t)t % multiple) == 0;
}
#endif // #ifndef INCLUDED_BITS

File diff suppressed because it is too large Load Diff

View File

@ -1,186 +1,186 @@
/**
* =========================================================================
* File : code_annotation.h
* Project : 0 A.D.
* Description : macros for code annotation.
* =========================================================================
*/
// license: GPL; see lib/license.txt
#ifndef INCLUDED_CODE_ANNOTATION
#define INCLUDED_CODE_ANNOTATION
/**
* mark a function local variable or parameter as unused and avoid
* the corresponding compiler warning.
* use inside the function body, e.g. void f(int x) { UNUSED2(x); }
**/
#define UNUSED2(param) (void)param;
/**
* mark a function parameter as unused and avoid
* the corresponding compiler warning.
* wrap around the parameter name, e.g. void f(int UNUSED(x))
**/
#define UNUSED(param)
/**
"unreachable code" helpers
unreachable lines of code are often the source or symptom of subtle bugs.
they are flagged by compiler warnings; however, the opposite problem -
erroneously reaching certain spots (e.g. due to missing return statement)
is worse and not detected automatically.
to defend against this, the programmer can annotate their code to
indicate to humans that a particular spot should never be reached.
however, that isn't much help; better is a sentinel that raises an
error if if it is actually reached. hence, the UNREACHABLE macro.
ironically, if the code guarded by UNREACHABLE works as it should,
compilers may flag the macro's code as unreachable. this would
distract from genuine warnings, which is unacceptable.
even worse, compilers differ in their code checking: GCC only complains if
non-void functions end without returning a value (i.e. missing return
statement), while VC checks if lines are unreachable (e.g. if they are
preceded by a return on all paths).
our implementation of UNREACHABLE solves this dilemna as follows:
- on GCC: call abort(); since it has the noreturn attributes, the
"non-void" warning disappears.
- on VC: avoid generating any code. we allow the compiler to assume the
spot is actually unreachable, which incidentally helps optimization.
if reached after all, a crash usually results. in that case, compile with
CONFIG_PARANOIA, which will cause an error message to be displayed.
this approach still allows for the possiblity of automated
checking, but does not cause any compiler warnings.
**/
#define UNREACHABLE // actually defined below.. this is for
# undef UNREACHABLE // CppDoc's benefit only.
// 1) final build: optimize assuming this location cannot be reached.
// may crash if that turns out to be untrue, but removes checking overhead.
#if CONFIG_FINAL
# define UNREACHABLE ASSUME_UNREACHABLE
// 2) normal build:
#else
// a) normal implementation: includes "abort", which is declared with
// noreturn attribute and therefore avoids GCC's "execution reaches
// end of non-void function" warning.
# if !MSC_VERSION || ICC_VERSION || CONFIG_PARANOIA
# define UNREACHABLE\
STMT(\
debug_assert(0); /* hit supposedly unreachable code */\
abort();\
)
// b) VC only: don't generate any code; squelch the warning and optimize.
# else
# define UNREACHABLE ASSUME_UNREACHABLE
# endif
#endif
/**
convenient specialization of UNREACHABLE for switch statements whose
default can never be reached. example usage:
int x;
switch(x % 2)
{
case 0: break;
case 1: break;
NODEFAULT;
}
**/
#define NODEFAULT default: UNREACHABLE
/**
* equivalent to strcpy, but indicates that the programmer checked usage and
* promises it is safe.
*
* (this macro prevents actually-safe instances of the function from
* showing up in searches)
**/
#define SAFE_STRCPY str##cpy
#define SAFE_WCSCPY wcs##cpy
// generate a symbol containing the line number of the macro invocation.
// used to give a unique name (per file) to types made by cassert.
// we can't prepend __FILE__ to make it globally unique - the filename
// may be enclosed in quotes. PASTE3_HIDDEN__ is needed to make sure
// __LINE__ is expanded correctly.
#define PASTE3_HIDDEN__(a, b, c) a ## b ## c
#define PASTE3__(a, b, c) PASTE3_HIDDEN__(a, b, c)
#define UID__ PASTE3__(LINE_, __LINE__, _)
#define UID2__ PASTE3__(LINE_, __LINE__, _2)
/**
* compile-time debug_assert. causes a compile error if the expression
* evaluates to zero/false.
*
* no runtime overhead; may be used anywhere, including file scope.
* especially useful for testing sizeof types.
*
* @param expression that is expected to evaluate to non-zero at compile-time.
**/
#define cassert(expr) typedef detail::static_assert_<(expr)>::type UID__
namespace detail
{
template<bool> struct static_assert_;
template<> struct static_assert_<true>
{
typedef int type;
};
}
/**
* compile-time debug_assert. causes a compile error if the expression
* evaluates to zero/false.
*
* no runtime overhead; may be used anywhere, including file scope.
* especially useful for testing sizeof types.
*
* this version has a less helpful error message, but redefinition doesn't
* trigger warnings.
*
* @param expression that is expected to evaluate to non-zero at compile-time.
**/
#define cassert2(expr) extern char CASSERT_FAILURE[1][(expr)]
// copied from boost::noncopyable; this definition avoids warnings when
// an exported class derives from noncopyable.
namespace noncopyable_ // protection from unintended ADL
{
class noncopyable
{
protected:
noncopyable() {}
~noncopyable() {}
private: // emphasize the following members are private
noncopyable(const noncopyable&);
const noncopyable& operator=(const noncopyable&);
};
}
typedef noncopyable_::noncopyable noncopyable;
// this form avoids ICC 11 W4 warnings about non-virtual dtors and
// suppression of the copy assignment operator.
#define NONCOPYABLE(className)\
private:\
className(const className&);\
const className& operator=(const className&)
#if ICC_VERSION
# define ASSUME_ALIGNED(ptr, multiple) __assume_aligned(ptr, multiple)
#else
# define ASSUME_ALIGNED(ptr, multiple)
#endif
#endif // #ifndef INCLUDED_CODE_ANNOTATION
/**
* =========================================================================
* File : code_annotation.h
* Project : 0 A.D.
* Description : macros for code annotation.
* =========================================================================
*/
// license: GPL; see lib/license.txt
#ifndef INCLUDED_CODE_ANNOTATION
#define INCLUDED_CODE_ANNOTATION
/**
* mark a function local variable or parameter as unused and avoid
* the corresponding compiler warning.
* use inside the function body, e.g. void f(int x) { UNUSED2(x); }
**/
#define UNUSED2(param) (void)param;
/**
* mark a function parameter as unused and avoid
* the corresponding compiler warning.
* wrap around the parameter name, e.g. void f(int UNUSED(x))
**/
#define UNUSED(param)
/**
"unreachable code" helpers
unreachable lines of code are often the source or symptom of subtle bugs.
they are flagged by compiler warnings; however, the opposite problem -
erroneously reaching certain spots (e.g. due to missing return statement)
is worse and not detected automatically.
to defend against this, the programmer can annotate their code to
indicate to humans that a particular spot should never be reached.
however, that isn't much help; better is a sentinel that raises an
error if if it is actually reached. hence, the UNREACHABLE macro.
ironically, if the code guarded by UNREACHABLE works as it should,
compilers may flag the macro's code as unreachable. this would
distract from genuine warnings, which is unacceptable.
even worse, compilers differ in their code checking: GCC only complains if
non-void functions end without returning a value (i.e. missing return
statement), while VC checks if lines are unreachable (e.g. if they are
preceded by a return on all paths).
our implementation of UNREACHABLE solves this dilemna as follows:
- on GCC: call abort(); since it has the noreturn attributes, the
"non-void" warning disappears.
- on VC: avoid generating any code. we allow the compiler to assume the
spot is actually unreachable, which incidentally helps optimization.
if reached after all, a crash usually results. in that case, compile with
CONFIG_PARANOIA, which will cause an error message to be displayed.
this approach still allows for the possiblity of automated
checking, but does not cause any compiler warnings.
**/
#define UNREACHABLE // actually defined below.. this is for
# undef UNREACHABLE // CppDoc's benefit only.
// 1) final build: optimize assuming this location cannot be reached.
// may crash if that turns out to be untrue, but removes checking overhead.
#if CONFIG_FINAL
# define UNREACHABLE ASSUME_UNREACHABLE
// 2) normal build:
#else
// a) normal implementation: includes "abort", which is declared with
// noreturn attribute and therefore avoids GCC's "execution reaches
// end of non-void function" warning.
# if !MSC_VERSION || ICC_VERSION || CONFIG_PARANOIA
# define UNREACHABLE\
STMT(\
debug_assert(0); /* hit supposedly unreachable code */\
abort();\
)
// b) VC only: don't generate any code; squelch the warning and optimize.
# else
# define UNREACHABLE ASSUME_UNREACHABLE
# endif
#endif
/**
convenient specialization of UNREACHABLE for switch statements whose
default can never be reached. example usage:
int x;
switch(x % 2)
{
case 0: break;
case 1: break;
NODEFAULT;
}
**/
#define NODEFAULT default: UNREACHABLE
/**
* equivalent to strcpy, but indicates that the programmer checked usage and
* promises it is safe.
*
* (this macro prevents actually-safe instances of the function from
* showing up in searches)
**/
#define SAFE_STRCPY str##cpy
#define SAFE_WCSCPY wcs##cpy
// generate a symbol containing the line number of the macro invocation.
// used to give a unique name (per file) to types made by cassert.
// we can't prepend __FILE__ to make it globally unique - the filename
// may be enclosed in quotes. PASTE3_HIDDEN__ is needed to make sure
// __LINE__ is expanded correctly.
#define PASTE3_HIDDEN__(a, b, c) a ## b ## c
#define PASTE3__(a, b, c) PASTE3_HIDDEN__(a, b, c)
#define UID__ PASTE3__(LINE_, __LINE__, _)
#define UID2__ PASTE3__(LINE_, __LINE__, _2)
/**
* compile-time debug_assert. causes a compile error if the expression
* evaluates to zero/false.
*
* no runtime overhead; may be used anywhere, including file scope.
* especially useful for testing sizeof types.
*
* @param expression that is expected to evaluate to non-zero at compile-time.
**/
#define cassert(expr) typedef detail::static_assert_<(expr)>::type UID__
namespace detail
{
template<bool> struct static_assert_;
template<> struct static_assert_<true>
{
typedef int type;
};
}
/**
* compile-time debug_assert. causes a compile error if the expression
* evaluates to zero/false.
*
* no runtime overhead; may be used anywhere, including file scope.
* especially useful for testing sizeof types.
*
* this version has a less helpful error message, but redefinition doesn't
* trigger warnings.
*
* @param expression that is expected to evaluate to non-zero at compile-time.
**/
#define cassert2(expr) extern char CASSERT_FAILURE[1][(expr)]
// copied from boost::noncopyable; this definition avoids warnings when
// an exported class derives from noncopyable.
namespace noncopyable_ // protection from unintended ADL
{
class noncopyable
{
protected:
noncopyable() {}
~noncopyable() {}
private: // emphasize the following members are private
noncopyable(const noncopyable&);
const noncopyable& operator=(const noncopyable&);
};
}
typedef noncopyable_::noncopyable noncopyable;
// this form avoids ICC 11 W4 warnings about non-virtual dtors and
// suppression of the copy assignment operator.
#define NONCOPYABLE(className)\
private:\
className(const className&);\
const className& operator=(const className&)
#if ICC_VERSION
# define ASSUME_ALIGNED(ptr, multiple) __assume_aligned(ptr, multiple)
#else
# define ASSUME_ALIGNED(ptr, multiple)
#endif
#endif // #ifndef INCLUDED_CODE_ANNOTATION

View File

@ -1,57 +1,57 @@
/**
* =========================================================================
* File : config2.h
* Project : 0 A.D.
* Description : compile-time configuration for isolated spots
* =========================================================================
*/
// license: GPL; see lib/license.txt
#ifndef INCLUDED_CONFIG2
#define INCLUDED_CONFIG2
// rationale: a centralized header makes it much easier to see what all
// can be changed. it is assumed that only a few modules will need
// configuration choices, so rebuilding them all is acceptable.
// use config.h when settings must apply to the entire project.
// allow use of RDTSC for raw tick counts (otherwise, the slower but
// more reliable on MP systems wall-clock will be used).
#ifndef CONFIG2_TIMER_ALLOW_RDTSC
# define CONFIG2_TIMER_ALLOW_RDTSC 1
#endif
// this enables/disables the actual checking done by OverrunProtector
// (quite slow, entailing mprotect() before/after each access).
// define to 1 here or in the relevant module if you suspect mem corruption.
// we provide this option because OverrunProtector requires some changes to
// the object being wrapped, and we want to leave those intact but not
// significantly slow things down except when needed.
#ifndef CONFIG2_ALLOCATORS_OVERRUN_PROTECTION
# define CONFIG2_ALLOCATORS_OVERRUN_PROTECTION 0
#endif
// zero-copy IO means all clients share the cached buffer; changing their
// contents is forbidden. this flag causes the buffers to be marked as
// read-only via MMU (writes would cause an exception), which takes a
// bit of extra time.
#ifndef CONFIG2_CACHE_READ_ONLY
#define CONFIG2_CACHE_READ_ONLY 1
#endif
// enable the wsdl emulator in Windows builds.
//
// NOTE: the official SDL distribution has two problems on Windows:
// - it specifies "/defaultlib:msvcrt.lib". this is troublesome because
// multiple heaps are active; errors result when allocated blocks are
// (for reasons unknown) passed to a different heap to be freed.
// one workaround is to add "/nodefaultlib:msvcrt.lib" to the linker
// command line in debug configurations.
// - it doesn't support color hardware mouse cursors and clashes with
// cursor.cpp's efforts by resetting the mouse cursor after movement.
#ifndef CONFIG2_WSDL
# define CONFIG2_WSDL 1
#endif
#endif // #ifndef INCLUDED_CONFIG2
/**
* =========================================================================
* File : config2.h
* Project : 0 A.D.
* Description : compile-time configuration for isolated spots
* =========================================================================
*/
// license: GPL; see lib/license.txt
#ifndef INCLUDED_CONFIG2
#define INCLUDED_CONFIG2
// rationale: a centralized header makes it much easier to see what all
// can be changed. it is assumed that only a few modules will need
// configuration choices, so rebuilding them all is acceptable.
// use config.h when settings must apply to the entire project.
// allow use of RDTSC for raw tick counts (otherwise, the slower but
// more reliable on MP systems wall-clock will be used).
#ifndef CONFIG2_TIMER_ALLOW_RDTSC
# define CONFIG2_TIMER_ALLOW_RDTSC 1
#endif
// this enables/disables the actual checking done by OverrunProtector
// (quite slow, entailing mprotect() before/after each access).
// define to 1 here or in the relevant module if you suspect mem corruption.
// we provide this option because OverrunProtector requires some changes to
// the object being wrapped, and we want to leave those intact but not
// significantly slow things down except when needed.
#ifndef CONFIG2_ALLOCATORS_OVERRUN_PROTECTION
# define CONFIG2_ALLOCATORS_OVERRUN_PROTECTION 0
#endif
// zero-copy IO means all clients share the cached buffer; changing their
// contents is forbidden. this flag causes the buffers to be marked as
// read-only via MMU (writes would cause an exception), which takes a
// bit of extra time.
#ifndef CONFIG2_CACHE_READ_ONLY
#define CONFIG2_CACHE_READ_ONLY 1
#endif
// enable the wsdl emulator in Windows builds.
//
// NOTE: the official SDL distribution has two problems on Windows:
// - it specifies "/defaultlib:msvcrt.lib". this is troublesome because
// multiple heaps are active; errors result when allocated blocks are
// (for reasons unknown) passed to a different heap to be freed.
// one workaround is to add "/nodefaultlib:msvcrt.lib" to the linker
// command line in debug configurations.
// - it doesn't support color hardware mouse cursors and clashes with
// cursor.cpp's efforts by resetting the mouse cursor after movement.
#ifndef CONFIG2_WSDL
# define CONFIG2_WSDL 1
#endif
#endif // #ifndef INCLUDED_CONFIG2

View File

@ -1,26 +1,26 @@
/**
* =========================================================================
* File : boost_filesystem.h
* Project : 0 A.D.
* Description : bring in Boost filesystem library
* =========================================================================
*/
// license: GPL; see lib/license.txt
#ifndef INCLUDED_BOOST_FILESYSTEM
#define INCLUDED_BOOST_FILESYSTEM
// not W4-clean
#if MSC_VERSION
# pragma warning(push, 3)
#endif
#include "boost/filesystem.hpp"
namespace fs = boost::filesystem;
#if MSC_VERSION
# pragma warning(pop)
#endif
#endif // #ifndef INCLUDED_BOOST_FILESYSTEM
/**
* =========================================================================
* File : boost_filesystem.h
* Project : 0 A.D.
* Description : bring in Boost filesystem library
* =========================================================================
*/
// license: GPL; see lib/license.txt
#ifndef INCLUDED_BOOST_FILESYSTEM
#define INCLUDED_BOOST_FILESYSTEM
// not W4-clean
#if MSC_VERSION
# pragma warning(push, 3)
#endif
#include "boost/filesystem.hpp"
namespace fs = boost::filesystem;
#if MSC_VERSION
# pragma warning(pop)
#endif
#endif // #ifndef INCLUDED_BOOST_FILESYSTEM

View File

@ -1,25 +1,25 @@
/**
* =========================================================================
* File : dbghelp.h
* Project : 0 A.D.
* Description : bring in dbghelp library
* =========================================================================
*/
// license: GPL; see lib/license.txt
#ifndef INCLUDED_DBGHELP
#define INCLUDED_DBGHELP
#include "win.h"
#define _NO_CVCONST_H // request SymTagEnum be defined
#include <dbghelp.h> // must come after win.h
#include <OAIdl.h> // VARIANT
#if MSC_VERSION
# pragma comment(lib, "dbghelp.lib")
# pragma comment(lib, "oleaut32.lib") // VariantChangeType
#endif
#endif // #ifndef INCLUDED_DBGHELP
/**
* =========================================================================
* File : dbghelp.h
* Project : 0 A.D.
* Description : bring in dbghelp library
* =========================================================================
*/
// license: GPL; see lib/license.txt
#ifndef INCLUDED_DBGHELP
#define INCLUDED_DBGHELP
#include "win.h"
#define _NO_CVCONST_H // request SymTagEnum be defined
#include <dbghelp.h> // must come after win.h
#include <OAIdl.h> // VARIANT
#if MSC_VERSION
# pragma comment(lib, "dbghelp.lib")
# pragma comment(lib, "oleaut32.lib") // VariantChangeType
#endif
#endif // #ifndef INCLUDED_DBGHELP

View File

@ -1,29 +1,29 @@
/**
* =========================================================================
* File : libjpeg.h
* Project : 0 A.D.
* Description : bring in libjpeg header+library, with compatibility fixes
* =========================================================================
*/
// license: GPL; see lib/license.txt
#ifndef INCLUDED_LIBJPEG
#define INCLUDED_LIBJPEG
extern "C" {
// we are not a core library module, so don't define JPEG_INTERNALS
#include <jpeglib.h>
#include <jerror.h>
}
// automatically link against the required library
#if MSC_VERSION
# ifdef NDEBUG
# pragma comment(lib, "jpeg-6b.lib")
# else
# pragma comment(lib, "jpeg-6bd.lib")
# endif // #ifdef NDEBUG
#endif // #ifdef MSC_VERSION
#endif // #ifndef INCLUDED_LIBJPEG
/**
* =========================================================================
* File : libjpeg.h
* Project : 0 A.D.
* Description : bring in libjpeg header+library, with compatibility fixes
* =========================================================================
*/
// license: GPL; see lib/license.txt
#ifndef INCLUDED_LIBJPEG
#define INCLUDED_LIBJPEG
extern "C" {
// we are not a core library module, so don't define JPEG_INTERNALS
#include <jpeglib.h>
#include <jerror.h>
}
// automatically link against the required library
#if MSC_VERSION
# ifdef NDEBUG
# pragma comment(lib, "jpeg-6b.lib")
# else
# pragma comment(lib, "jpeg-6bd.lib")
# endif // #ifdef NDEBUG
#endif // #ifdef MSC_VERSION
#endif // #ifndef INCLUDED_LIBJPEG

View File

@ -1,26 +1,26 @@
/**
* =========================================================================
* File : openal.h
* Project : 0 A.D.
* Description : bring in OpenAL header+library, with compatibility fixes
* =========================================================================
*/
// license: GPL; see lib/license.txt
#ifndef INCLUDED_OPENAL
#define INCLUDED_OPENAL
#if OS_MACOSX
# include <OpenAL/al.h>
# include <OpenAL/alc.h>
#else
# include <AL/al.h>
# include <AL/alc.h>
#endif
#if MSC_VERSION
# pragma comment(lib, "openal32.lib")
#endif
#endif // #ifndef INCLUDED_OPENAL
/**
* =========================================================================
* File : openal.h
* Project : 0 A.D.
* Description : bring in OpenAL header+library, with compatibility fixes
* =========================================================================
*/
// license: GPL; see lib/license.txt
#ifndef INCLUDED_OPENAL
#define INCLUDED_OPENAL
#if OS_MACOSX
# include <OpenAL/al.h>
# include <OpenAL/alc.h>
#else
# include <AL/al.h>
# include <AL/alc.h>
#endif
#if MSC_VERSION
# pragma comment(lib, "openal32.lib")
#endif
#endif // #ifndef INCLUDED_OPENAL

View File

@ -1,28 +1,28 @@
/**
* =========================================================================
* File : png.h
* Project : 0 A.D.
* Description : bring in LibPNG header+library, with compatibility fixes
* =========================================================================
*/
// license: GPL; see lib/license.txt
#ifndef INCLUDED_PNG
#define INCLUDED_PNG
// <png.h> includes <zlib.h>, which requires some fixes by our header.
#include "lib/external_libraries/zlib.h"
#include <png.h>
// automatically link against the required library
#if MSC_VERSION
# ifdef NDEBUG
# pragma comment(lib, "libpng13.lib")
# else
# pragma comment(lib, "libpng13d.lib")
# endif // NDEBUG
#endif // MSC_VERSION
#endif // #ifndef INCLUDED_PNG
/**
* =========================================================================
* File : png.h
* Project : 0 A.D.
* Description : bring in LibPNG header+library, with compatibility fixes
* =========================================================================
*/
// license: GPL; see lib/license.txt
#ifndef INCLUDED_PNG
#define INCLUDED_PNG
// <png.h> includes <zlib.h>, which requires some fixes by our header.
#include "lib/external_libraries/zlib.h"
#include <png.h>
// automatically link against the required library
#if MSC_VERSION
# ifdef NDEBUG
# pragma comment(lib, "libpng13.lib")
# else
# pragma comment(lib, "libpng13d.lib")
# endif // NDEBUG
#endif // MSC_VERSION
#endif // #ifndef INCLUDED_PNG

View File

@ -1,25 +1,25 @@
/**
* =========================================================================
* File : sdl_fwd.h
* Project : 0 A.D.
* Description : forward declaration of SDL_Event
* =========================================================================
*/
// license: GPL; see lib/license.txt
#ifndef INCLUDED_SDL_FWD
#define INCLUDED_SDL_FWD
// 2006-08-26 SDL is dragged into 6 of our 7 static library components.
// it must be specified in each of their "extern_libs" so that the
// include path is set and <SDL/sdl.h> can be found.
//
// obviously this is bad, so we work around the root cause. mostly only
// SDL_Event is needed. unfortunately it cannot be forward-declared,
// because it is a union (regrettable design mistake).
// we fix this by wrapping it in a struct, which can safely be
// forward-declared and used for SDL_Event_* parameters.
struct SDL_Event_;
#endif // #ifndef INCLUDED_SDL_FWD
/**
* =========================================================================
* File : sdl_fwd.h
* Project : 0 A.D.
* Description : forward declaration of SDL_Event
* =========================================================================
*/
// license: GPL; see lib/license.txt
#ifndef INCLUDED_SDL_FWD
#define INCLUDED_SDL_FWD
// 2006-08-26 SDL is dragged into 6 of our 7 static library components.
// it must be specified in each of their "extern_libs" so that the
// include path is set and <SDL/sdl.h> can be found.
//
// obviously this is bad, so we work around the root cause. mostly only
// SDL_Event is needed. unfortunately it cannot be forward-declared,
// because it is a union (regrettable design mistake).
// we fix this by wrapping it in a struct, which can safely be
// forward-declared and used for SDL_Event_* parameters.
struct SDL_Event_;
#endif // #ifndef INCLUDED_SDL_FWD

View File

@ -1,41 +1,41 @@
/**
* =========================================================================
* File : wxwidgets.h
* Project : 0 A.D.
* Description : bring in wxWidgets headers, with compatibility fixes
* =========================================================================
*/
// license: GPL; see lib/license.txt
#ifndef INCLUDED_WXWIDGETS
#define INCLUDED_WXWIDGETS
// prevent wxWidgets from pulling in windows.h - it's mostly unnecessary
// and interferes with posix_sock's declarations.
#define _WINDOWS_ // <windows.h> include guard
// manually define what is actually needed from windows.h:
struct HINSTANCE__
{
int unused;
};
typedef struct HINSTANCE__* HINSTANCE; // definition as if STRICT were #defined
#include "wx/wxprec.h"
#include "wx/file.h"
#include "wx/ffile.h"
#include "wx/filename.h"
#include "wx/mimetype.h"
#include "wx/statline.h"
#include "wx/debugrpt.h"
#ifdef __WXMSW__
#include "wx/evtloop.h" // for SetCriticalWindow()
#endif // __WXMSW__
// note: wxWidgets already does #pragma comment(lib) to add link targets.
#endif // #ifndef INCLUDED_WXWIDGETS
/**
* =========================================================================
* File : wxwidgets.h
* Project : 0 A.D.
* Description : bring in wxWidgets headers, with compatibility fixes
* =========================================================================
*/
// license: GPL; see lib/license.txt
#ifndef INCLUDED_WXWIDGETS
#define INCLUDED_WXWIDGETS
// prevent wxWidgets from pulling in windows.h - it's mostly unnecessary
// and interferes with posix_sock's declarations.
#define _WINDOWS_ // <windows.h> include guard
// manually define what is actually needed from windows.h:
struct HINSTANCE__
{
int unused;
};
typedef struct HINSTANCE__* HINSTANCE; // definition as if STRICT were #defined
#include "wx/wxprec.h"
#include "wx/file.h"
#include "wx/ffile.h"
#include "wx/filename.h"
#include "wx/mimetype.h"
#include "wx/statline.h"
#include "wx/debugrpt.h"
#ifdef __WXMSW__
#include "wx/evtloop.h" // for SetCriticalWindow()
#endif // __WXMSW__
// note: wxWidgets already does #pragma comment(lib) to add link targets.
#endif // #ifndef INCLUDED_WXWIDGETS

View File

@ -1,38 +1,38 @@
/**
* =========================================================================
* File : zlib.h
* Project : 0 A.D.
* Description : bring in ZLib header+library, with compatibility fixes
* =========================================================================
*/
// license: GPL; see lib/license.txt
#ifndef INCLUDED_ZLIB
#define INCLUDED_ZLIB
// zlib.h -> zconf.h includes <windows.h>, which causes conflicts.
// define the include guard to prevent it from actually being included and
// then manually define the few things that are actually needed.
#define _WINDOWS_ // windows.h include guard
#ifndef WINAPI
# define WINAPI __stdcall
# define WINAPIV __cdecl
#endif
#ifndef ZLIB_STATIC
#define ZLIB_DLL
#endif
#include <zlib.h>
// automatically link against the required library
#if MSC_VERSION
# ifdef NDEBUG
# pragma comment(lib, "zlib1.lib")
# else
# pragma comment(lib, "zlib1d.lib")
# endif
#endif
#endif // #ifndef INCLUDED_ZLIB
/**
* =========================================================================
* File : zlib.h
* Project : 0 A.D.
* Description : bring in ZLib header+library, with compatibility fixes
* =========================================================================
*/
// license: GPL; see lib/license.txt
#ifndef INCLUDED_ZLIB
#define INCLUDED_ZLIB
// zlib.h -> zconf.h includes <windows.h>, which causes conflicts.
// define the include guard to prevent it from actually being included and
// then manually define the few things that are actually needed.
#define _WINDOWS_ // windows.h include guard
#ifndef WINAPI
# define WINAPI __stdcall
# define WINAPIV __cdecl
#endif
#ifndef ZLIB_STATIC
#define ZLIB_DLL
#endif
#include <zlib.h>
// automatically link against the required library
#if MSC_VERSION
# ifdef NDEBUG
# pragma comment(lib, "zlib1.lib")
# else
# pragma comment(lib, "zlib1d.lib")
# endif
#endif
#endif // #ifndef INCLUDED_ZLIB

View File

@ -1,59 +1,59 @@
/**
* =========================================================================
* File : fat_time.cpp
* Project : 0 A.D.
* Description : timestamp conversion: DOS FAT <-> Unix time_t
* =========================================================================
*/
// license: GPL; see lib/license.txt
#include "precompiled.h"
#include "fat_time.h"
#include <ctime>
#include "lib/bits.h"
time_t time_t_from_FAT(u32 fat_timedate)
{
const u32 fat_time = bits(fat_timedate, 0, 15);
const u32 fat_date = bits(fat_timedate, 16, 31);
struct tm t; // struct tm format:
t.tm_sec = bits(fat_time, 0,4) * 2; // [0,59]
t.tm_min = bits(fat_time, 5,10); // [0,59]
t.tm_hour = bits(fat_time, 11,15); // [0,23]
t.tm_mday = bits(fat_date, 0,4); // [1,31]
t.tm_mon = bits(fat_date, 5,8) - 1; // [0,11]
t.tm_year = bits(fat_date, 9,15) + 80; // since 1900
t.tm_isdst = -1; // unknown - let libc determine
// otherwise: totally bogus, and at the limit of 32-bit time_t
debug_assert(t.tm_year < 138);
time_t ret = mktime(&t);
debug_assert(ret != (time_t)-1); // mktime shouldn't fail
return ret;
}
u32 FAT_from_time_t(time_t time)
{
// (values are adjusted for DST)
struct tm* t = localtime(&time);
u16 fat_time = 0;
fat_time |= u16(t->tm_sec/2); // 5
fat_time |= u16(t->tm_min) << 5; // 6
fat_time |= u16(t->tm_hour) << 11; // 5
u16 fat_date = 0;
fat_date |= u16(t->tm_mday); // 5
fat_date |= u16(t->tm_mon+1) << 5; // 4
fat_date |= u16(t->tm_year-80) << 9; // 7
u32 fat_timedate = u32_from_u16(fat_date, fat_time);
return fat_timedate;
}
/**
* =========================================================================
* File : fat_time.cpp
* Project : 0 A.D.
* Description : timestamp conversion: DOS FAT <-> Unix time_t
* =========================================================================
*/
// license: GPL; see lib/license.txt
#include "precompiled.h"
#include "fat_time.h"
#include <ctime>
#include "lib/bits.h"
time_t time_t_from_FAT(u32 fat_timedate)
{
const u32 fat_time = bits(fat_timedate, 0, 15);
const u32 fat_date = bits(fat_timedate, 16, 31);
struct tm t; // struct tm format:
t.tm_sec = bits(fat_time, 0,4) * 2; // [0,59]
t.tm_min = bits(fat_time, 5,10); // [0,59]
t.tm_hour = bits(fat_time, 11,15); // [0,23]
t.tm_mday = bits(fat_date, 0,4); // [1,31]
t.tm_mon = bits(fat_date, 5,8) - 1; // [0,11]
t.tm_year = bits(fat_date, 9,15) + 80; // since 1900
t.tm_isdst = -1; // unknown - let libc determine
// otherwise: totally bogus, and at the limit of 32-bit time_t
debug_assert(t.tm_year < 138);
time_t ret = mktime(&t);
debug_assert(ret != (time_t)-1); // mktime shouldn't fail
return ret;
}
u32 FAT_from_time_t(time_t time)
{
// (values are adjusted for DST)
struct tm* t = localtime(&time);
u16 fat_time = 0;
fat_time |= u16(t->tm_sec/2); // 5
fat_time |= u16(t->tm_min) << 5; // 6
fat_time |= u16(t->tm_hour) << 11; // 5
u16 fat_date = 0;
fat_date |= u16(t->tm_mday); // 5
fat_date |= u16(t->tm_mon+1) << 5; // 4
fat_date |= u16(t->tm_year-80) << 9; // 7
u32 fat_timedate = u32_from_u16(fat_date, fat_time);
return fat_timedate;
}

View File

@ -1,2 +1,2 @@
extern time_t time_t_from_FAT(u32 fat_timedate);
extern u32 FAT_from_time_t(time_t time);
extern time_t time_t_from_FAT(u32 fat_timedate);
extern u32 FAT_from_time_t(time_t time);

View File

@ -1,23 +1,23 @@
/**
* =========================================================================
* File : archive.cpp
* Project : 0 A.D.
* Description : interface for reading from and creating archives.
* =========================================================================
*/
// license: GPL; see lib/license.txt
#include "precompiled.h"
#include "archive.h"
ERROR_ASSOCIATE(ERR::ARCHIVE_UNKNOWN_FORMAT, "Unknown archive format", -1);
ERROR_ASSOCIATE(ERR::ARCHIVE_UNKNOWN_METHOD, "Unknown compression method", -1);
IArchiveReader::~IArchiveReader()
{
}
IArchiveWriter::~IArchiveWriter()
{
}
/**
* =========================================================================
* File : archive.cpp
* Project : 0 A.D.
* Description : interface for reading from and creating archives.
* =========================================================================
*/
// license: GPL; see lib/license.txt
#include "precompiled.h"
#include "archive.h"
ERROR_ASSOCIATE(ERR::ARCHIVE_UNKNOWN_FORMAT, "Unknown archive format", -1);
ERROR_ASSOCIATE(ERR::ARCHIVE_UNKNOWN_METHOD, "Unknown compression method", -1);
IArchiveReader::~IArchiveReader()
{
}
IArchiveWriter::~IArchiveWriter()
{
}

View File

@ -1,81 +1,81 @@
/**
* =========================================================================
* File : archive.h
* Project : 0 A.D.
* Description : interface for reading from and creating archives.
* =========================================================================
*/
// license: GPL; see lib/license.txt
#ifndef INCLUDED_ARCHIVE
#define INCLUDED_ARCHIVE
#include "lib/file/path.h"
#include "lib/file/file_system.h" // FileInfo
#include "lib/file/common/file_loader.h"
#include "lib/file/vfs/vfs_path.h"
// rationale: this module doesn't build a directory tree of the entries
// within an archive. that task is left to the VFS; here, we are only
// concerned with enumerating all archive entries.
namespace ERR
{
const LibError ARCHIVE_UNKNOWN_FORMAT = -110400;
const LibError ARCHIVE_UNKNOWN_METHOD = -110401;
}
struct IArchiveFile : public IFileLoader
{
};
typedef shared_ptr<IArchiveFile> PIArchiveFile;
struct IArchiveReader
{
virtual ~IArchiveReader();
/**
* called for each archive entry.
* @param pathname full pathname of entry; only valid during the callback.
**/
typedef void (*ArchiveEntryCallback)(const VfsPath& pathname, const FileInfo& fileInfo, PIArchiveFile archiveFile, uintptr_t cbData);
virtual LibError ReadEntries(ArchiveEntryCallback cb, uintptr_t cbData) = 0;
};
typedef shared_ptr<IArchiveReader> PIArchiveReader;
// note: when creating an archive, any existing file with the given pathname
// will be overwritten.
// rationale: don't support partial adding, i.e. updating archive with
// only one file. this would require overwriting parts of the Zip archive,
// which is annoying and slow. also, archives are usually built in
// seek-optimal order, which would break if we start inserting files.
// while testing, loose files can be used, so there's no loss.
struct IArchiveWriter
{
/**
* write out the archive to disk; only hereafter is it valid.
**/
virtual ~IArchiveWriter();
/**
* add a file to the archive.
*
* rationale: passing in a filename instead of the compressed file
* contents makes for better encapsulation because callers don't need
* to know about the codec. one disadvantage is that loading the file
* contents can no longer take advantage of the VFS cache nor previously
* archived versions. however, the archive builder usually adds files
* precisely because they aren't in archives, and the cache would
* thrash anyway, so this is deemed acceptable.
**/
virtual LibError AddFile(const Path& pathname) = 0;
};
typedef shared_ptr<IArchiveWriter> PIArchiveWriter;
#endif // #ifndef INCLUDED_ARCHIVE
/**
* =========================================================================
* File : archive.h
* Project : 0 A.D.
* Description : interface for reading from and creating archives.
* =========================================================================
*/
// license: GPL; see lib/license.txt
#ifndef INCLUDED_ARCHIVE
#define INCLUDED_ARCHIVE
#include "lib/file/path.h"
#include "lib/file/file_system.h" // FileInfo
#include "lib/file/common/file_loader.h"
#include "lib/file/vfs/vfs_path.h"
// rationale: this module doesn't build a directory tree of the entries
// within an archive. that task is left to the VFS; here, we are only
// concerned with enumerating all archive entries.
namespace ERR
{
const LibError ARCHIVE_UNKNOWN_FORMAT = -110400;
const LibError ARCHIVE_UNKNOWN_METHOD = -110401;
}
struct IArchiveFile : public IFileLoader
{
};
typedef shared_ptr<IArchiveFile> PIArchiveFile;
struct IArchiveReader
{
virtual ~IArchiveReader();
/**
* called for each archive entry.
* @param pathname full pathname of entry; only valid during the callback.
**/
typedef void (*ArchiveEntryCallback)(const VfsPath& pathname, const FileInfo& fileInfo, PIArchiveFile archiveFile, uintptr_t cbData);
virtual LibError ReadEntries(ArchiveEntryCallback cb, uintptr_t cbData) = 0;
};
typedef shared_ptr<IArchiveReader> PIArchiveReader;
// note: when creating an archive, any existing file with the given pathname
// will be overwritten.
// rationale: don't support partial adding, i.e. updating archive with
// only one file. this would require overwriting parts of the Zip archive,
// which is annoying and slow. also, archives are usually built in
// seek-optimal order, which would break if we start inserting files.
// while testing, loose files can be used, so there's no loss.
struct IArchiveWriter
{
/**
* write out the archive to disk; only hereafter is it valid.
**/
virtual ~IArchiveWriter();
/**
* add a file to the archive.
*
* rationale: passing in a filename instead of the compressed file
* contents makes for better encapsulation because callers don't need
* to know about the codec. one disadvantage is that loading the file
* contents can no longer take advantage of the VFS cache nor previously
* archived versions. however, the archive builder usually adds files
* precisely because they aren't in archives, and the cache would
* thrash anyway, so this is deemed acceptable.
**/
virtual LibError AddFile(const Path& pathname) = 0;
};
typedef shared_ptr<IArchiveWriter> PIArchiveWriter;
#endif // #ifndef INCLUDED_ARCHIVE

File diff suppressed because it is too large Load Diff

View File

@ -1,26 +1,26 @@
/**
* =========================================================================
* File : vfs_optimizer.h
* Project : 0 A.D.
* Description : automatically bundles files into archives in order of
* : access to optimize I/O.
* =========================================================================
*/
// license: GPL; see lib/license.txt
#ifndef INCLUDED_VFS_OPTIMIZER
#define INCLUDED_VFS_OPTIMIZER
extern LibError vfs_opt_rebuild_main_archive(const char* trace_filename, const char* archive_fn_fmt);
extern void vfs_opt_auto_build_cancel();
extern int vfs_opt_auto_build(const char* trace_filename,
const char* archive_fn_fmt, const char* mini_archive_fn_fmt, bool force_build = false);
extern void vfs_opt_notify_loose_file(const char* atom_fn);
extern void vfs_opt_notify_archived_file(const char* atom_fn);
#endif // #ifndef INCLUDED_VFS_OPTIMIZER
/**
* =========================================================================
* File : vfs_optimizer.h
* Project : 0 A.D.
* Description : automatically bundles files into archives in order of
* : access to optimize I/O.
* =========================================================================
*/
// license: GPL; see lib/license.txt
#ifndef INCLUDED_VFS_OPTIMIZER
#define INCLUDED_VFS_OPTIMIZER
extern LibError vfs_opt_rebuild_main_archive(const char* trace_filename, const char* archive_fn_fmt);
extern void vfs_opt_auto_build_cancel();
extern int vfs_opt_auto_build(const char* trace_filename,
const char* archive_fn_fmt, const char* mini_archive_fn_fmt, bool force_build = false);
extern void vfs_opt_notify_loose_file(const char* atom_fn);
extern void vfs_opt_notify_archived_file(const char* atom_fn);
#endif // #ifndef INCLUDED_VFS_OPTIMIZER

File diff suppressed because it is too large Load Diff

View File

@ -1,19 +1,19 @@
/**
* =========================================================================
* File : archive_zip.h
* Project : 0 A.D.
* Description : archive backend for Zip files.
* =========================================================================
*/
// license: GPL; see lib/license.txt
#ifndef INCLUDED_ARCHIVE_ZIP
#define INCLUDED_ARCHIVE_ZIP
#include "archive.h"
LIB_API PIArchiveReader CreateArchiveReader_Zip(const Path& archivePathname);
LIB_API PIArchiveWriter CreateArchiveWriter_Zip(const Path& archivePathname);
#endif // #ifndef INCLUDED_ARCHIVE_ZIP
/**
* =========================================================================
* File : archive_zip.h
* Project : 0 A.D.
* Description : archive backend for Zip files.
* =========================================================================
*/
// license: GPL; see lib/license.txt
#ifndef INCLUDED_ARCHIVE_ZIP
#define INCLUDED_ARCHIVE_ZIP
#include "archive.h"
LIB_API PIArchiveReader CreateArchiveReader_Zip(const Path& archivePathname);
LIB_API PIArchiveWriter CreateArchiveWriter_Zip(const Path& archivePathname);
#endif // #ifndef INCLUDED_ARCHIVE_ZIP

View File

@ -1,16 +1,16 @@
/**
* =========================================================================
* File : codec.cpp
* Project : 0 A.D.
* Description :
* =========================================================================
*/
// license: GPL; see lib/license.txt
#include "precompiled.h"
#include "codec.h"
ICodec::~ICodec()
{
}
/**
* =========================================================================
* File : codec.cpp
* Project : 0 A.D.
* Description :
* =========================================================================
*/
// license: GPL; see lib/license.txt
#include "precompiled.h"
#include "codec.h"
ICodec::~ICodec()
{
}

View File

@ -1,74 +1,74 @@
/**
* =========================================================================
* File : codec.h
* Project : 0 A.D.
* Description :
* =========================================================================
*/
// license: GPL; see lib/license.txt
#ifndef INCLUDED_CODEC
#define INCLUDED_CODEC
// rationale: this layer allows for other compression methods/libraries
// besides ZLib. it also simplifies the interface for user code and
// does error checking, etc.
#define CODEC_COMPUTE_CHECKSUM 1
struct ICodec
{
public:
/**
* note: the implementation should not check whether any data remains -
* codecs are sometimes destroyed without completing a transfer.
**/
virtual ~ICodec();
/**
* @return an upper bound on the output size for the given amount of input.
* this is used when allocating a single buffer for the whole operation.
**/
virtual size_t MaxOutputSize(size_t inSize) const = 0;
/**
* clear all previous state and prepare for reuse.
*
* this is as if the object were destroyed and re-created, but more
* efficient since it avoids reallocating a considerable amount of
* memory (about 200KB for LZ).
**/
virtual LibError Reset() = 0;
/**
* process (i.e. compress or decompress) data.
*
* @param outSize bytes remaining in the output buffer; shall not be zero.
* @param inConsumed, outProduced how many bytes in the input and
* output buffers were used. either or both of these can be zero if
* the input size is small or there's not enough output space.
**/
virtual LibError Process(const u8* in, size_t inSize, u8* out, size_t outSize, size_t& inConsumed, size_t& outProduced) = 0;
/**
* flush buffers and make sure all output has been produced.
*
* @param checksum over all input data.
* @return error status for the entire operation.
**/
virtual LibError Finish(u32& checksum, size_t& outProduced) = 0;
/**
* update a checksum to reflect the contents of a buffer.
*
* @param checksum the initial value (must be 0 on first call)
* @return the new checksum. note: after all data has been seen, this is
* identical to the what Finish would return.
**/
virtual u32 UpdateChecksum(u32 checksum, const u8* in, size_t inSize) const = 0;
};
typedef shared_ptr<ICodec> PICodec;
#endif // #ifndef INCLUDED_CODEC
/**
* =========================================================================
* File : codec.h
* Project : 0 A.D.
* Description :
* =========================================================================
*/
// license: GPL; see lib/license.txt
#ifndef INCLUDED_CODEC
#define INCLUDED_CODEC
// rationale: this layer allows for other compression methods/libraries
// besides ZLib. it also simplifies the interface for user code and
// does error checking, etc.
#define CODEC_COMPUTE_CHECKSUM 1
struct ICodec
{
public:
/**
* note: the implementation should not check whether any data remains -
* codecs are sometimes destroyed without completing a transfer.
**/
virtual ~ICodec();
/**
* @return an upper bound on the output size for the given amount of input.
* this is used when allocating a single buffer for the whole operation.
**/
virtual size_t MaxOutputSize(size_t inSize) const = 0;
/**
* clear all previous state and prepare for reuse.
*
* this is as if the object were destroyed and re-created, but more
* efficient since it avoids reallocating a considerable amount of
* memory (about 200KB for LZ).
**/
virtual LibError Reset() = 0;
/**
* process (i.e. compress or decompress) data.
*
* @param outSize bytes remaining in the output buffer; shall not be zero.
* @param inConsumed, outProduced how many bytes in the input and
* output buffers were used. either or both of these can be zero if
* the input size is small or there's not enough output space.
**/
virtual LibError Process(const u8* in, size_t inSize, u8* out, size_t outSize, size_t& inConsumed, size_t& outProduced) = 0;
/**
* flush buffers and make sure all output has been produced.
*
* @param checksum over all input data.
* @return error status for the entire operation.
**/
virtual LibError Finish(u32& checksum, size_t& outProduced) = 0;
/**
* update a checksum to reflect the contents of a buffer.
*
* @param checksum the initial value (must be 0 on first call)
* @return the new checksum. note: after all data has been seen, this is
* identical to the what Finish would return.
**/
virtual u32 UpdateChecksum(u32 checksum, const u8* in, size_t inSize) const = 0;
};
typedef shared_ptr<ICodec> PICodec;
#endif // #ifndef INCLUDED_CODEC

View File

@ -1,289 +1,289 @@
#include "precompiled.h"
#include "codec_zlib.h"
#include "codec.h"
#include "lib/external_libraries/zlib.h"
#include "lib/sysdep/cpu.h"
class Codec_ZLib : public ICodec
{
public:
u32 UpdateChecksum(u32 checksum, const u8* in, size_t inSize) const
{
#if CODEC_COMPUTE_CHECKSUM
return (u32)crc32(checksum, in, (uInt)inSize);
#else
UNUSED2(checksum);
UNUSED2(in);
UNUSED2(inSize);
return 0;
#endif
}
protected:
u32 InitializeChecksum()
{
#if CODEC_COMPUTE_CHECKSUM
return crc32(0, 0, 0);
#else
return 0;
#endif
}
};
//-----------------------------------------------------------------------------
class Codec_ZLibNone : public Codec_ZLib
{
public:
Codec_ZLibNone()
{
Reset();
}
virtual ~Codec_ZLibNone()
{
}
virtual size_t MaxOutputSize(size_t inSize) const
{
return inSize;
}
virtual LibError Reset()
{
m_checksum = InitializeChecksum();
return INFO::OK;
}
virtual LibError Process(const u8* in, size_t inSize, u8* out, size_t outSize, size_t& inConsumed, size_t& outProduced)
{
const size_t transferSize = std::min(inSize, outSize);
cpu_memcpy(out, in, transferSize);
inConsumed = outProduced = transferSize;
m_checksum = UpdateChecksum(m_checksum, out, outProduced);
return INFO::OK;
}
virtual LibError Finish(u32& checksum, size_t& outProduced)
{
outProduced = 0;
checksum = m_checksum;
return INFO::OK;
}
private:
u32 m_checksum;
};
//-----------------------------------------------------------------------------
class CodecZLibStream : public Codec_ZLib
{
protected:
CodecZLibStream()
{
memset(&m_zs, 0, sizeof(m_zs));
m_checksum = InitializeChecksum();
}
static LibError LibError_from_zlib(int zlib_ret)
{
switch(zlib_ret)
{
case Z_OK:
return INFO::OK;
case Z_STREAM_END:
WARN_RETURN(ERR::FAIL);
case Z_MEM_ERROR:
WARN_RETURN(ERR::NO_MEM);
case Z_DATA_ERROR:
WARN_RETURN(ERR::CORRUPTED);
case Z_STREAM_ERROR:
WARN_RETURN(ERR::INVALID_PARAM);
default:
WARN_RETURN(ERR::FAIL);
}
}
static void WarnIfZLibError(int zlib_ret)
{
(void)LibError_from_zlib(zlib_ret);
}
typedef int ZEXPORT (*ZLibFunc)(z_streamp strm, int flush);
LibError CallStreamFunc(ZLibFunc func, int flush, const u8* in, const size_t inSize, u8* out, const size_t outSize, size_t& inConsumed, size_t& outProduced)
{
m_zs.next_in = (Byte*)in;
m_zs.avail_in = (uInt)inSize;
m_zs.next_out = (Byte*)out;
m_zs.avail_out = (uInt)outSize;
int ret = func(&m_zs, flush);
// sanity check: if ZLib reports end of stream, all input data
// must have been consumed.
if(ret == Z_STREAM_END)
{
debug_assert(m_zs.avail_in == 0);
ret = Z_OK;
}
debug_assert(inSize >= m_zs.avail_in && outSize >= m_zs.avail_out);
inConsumed = inSize - m_zs.avail_in;
outProduced = outSize - m_zs.avail_out;
return LibError_from_zlib(ret);
}
mutable z_stream m_zs;
// note: z_stream does contain an 'adler' checksum field, but that's
// not updated in streams lacking a gzip header, so we'll have to
// calculate a checksum ourselves.
// adler32 is somewhat weaker than CRC32, but a more important argument
// is that we should use the latter for compatibility with Zip archives.
mutable u32 m_checksum;
};
//-----------------------------------------------------------------------------
class Compressor_ZLib : public CodecZLibStream
{
public:
Compressor_ZLib()
{
// note: with Z_BEST_COMPRESSION, 78% percent of
// archive builder CPU time is spent in ZLib, even though
// that is interleaved with IO; everything else is negligible.
// we therefore enable this only in final builds; during
// development, 1.5% bigger archives are definitely worth much
// faster build time.
#if CONFIG_FINAL
const int level = Z_BEST_COMPRESSION;
#else
const int level = Z_BEST_SPEED;
#endif
const int windowBits = -MAX_WBITS; // max window size; omit ZLib header
const int memLevel = 9; // max speed; total mem ~= 384KiB
const int strategy = Z_DEFAULT_STRATEGY; // normal data - not RLE
const int ret = deflateInit2(&m_zs, level, Z_DEFLATED, windowBits, memLevel, strategy);
debug_assert(ret == Z_OK);
}
virtual ~Compressor_ZLib()
{
const int ret = deflateEnd(&m_zs);
WarnIfZLibError(ret);
}
virtual size_t MaxOutputSize(size_t inSize) const
{
return (size_t)deflateBound(&m_zs, (uLong)inSize);
}
virtual LibError Reset()
{
m_checksum = InitializeChecksum();
const int ret = deflateReset(&m_zs);
return LibError_from_zlib(ret);
}
virtual LibError Process(const u8* in, size_t inSize, u8* out, size_t outSize, size_t& inConsumed, size_t& outProduced)
{
m_checksum = UpdateChecksum(m_checksum, in, inSize);
return CodecZLibStream::CallStreamFunc(deflate, 0, in, inSize, out, outSize, inConsumed, outProduced);
}
virtual LibError Finish(u32& checksum, size_t& outProduced)
{
const uInt availOut = m_zs.avail_out;
// notify zlib that no more data is forthcoming and have it flush output.
// our output buffer has enough space due to use of deflateBound;
// therefore, deflate must return Z_STREAM_END.
const int ret = deflate(&m_zs, Z_FINISH);
debug_assert(ret == Z_STREAM_END);
outProduced = size_t(availOut - m_zs.avail_out);
checksum = m_checksum;
return INFO::OK;
}
};
//-----------------------------------------------------------------------------
class Decompressor_ZLib : public CodecZLibStream
{
public:
Decompressor_ZLib()
{
const int windowBits = -MAX_WBITS; // max window size; omit ZLib header
const int ret = inflateInit2(&m_zs, windowBits);
debug_assert(ret == Z_OK);
}
virtual ~Decompressor_ZLib()
{
const int ret = inflateEnd(&m_zs);
WarnIfZLibError(ret);
}
virtual size_t MaxOutputSize(size_t inSize) const
{
// relying on an upper bound for the output is a really bad idea for
// large files. archive formats store the uncompressed file sizes,
// so callers should use that when allocating the output buffer.
debug_assert(inSize < 1*MiB);
return inSize*1032; // see http://www.zlib.org/zlib_tech.html
}
virtual LibError Reset()
{
m_checksum = InitializeChecksum();
const int ret = inflateReset(&m_zs);
return LibError_from_zlib(ret);
}
virtual LibError Process(const u8* in, size_t inSize, u8* out, size_t outSize, size_t& inConsumed, size_t& outProduced)
{
const LibError ret = CodecZLibStream::CallStreamFunc(inflate, Z_SYNC_FLUSH, in, inSize, out, outSize, inConsumed, outProduced);
m_checksum = UpdateChecksum(m_checksum, out, outProduced);
return ret;
}
virtual LibError Finish(u32& checksum, size_t& outProduced)
{
// no action needed - decompression always flushes immediately.
outProduced = 0;
checksum = m_checksum;
return INFO::OK;
}
};
//-----------------------------------------------------------------------------
PICodec CreateCodec_ZLibNone()
{
return PICodec(new Codec_ZLibNone);
}
PICodec CreateCompressor_ZLibDeflate()
{
return PICodec(new Compressor_ZLib);
}
PICodec CreateDecompressor_ZLibDeflate()
{
return PICodec (new Decompressor_ZLib);
}
#include "precompiled.h"
#include "codec_zlib.h"
#include "codec.h"
#include "lib/external_libraries/zlib.h"
#include "lib/sysdep/cpu.h"
class Codec_ZLib : public ICodec
{
public:
u32 UpdateChecksum(u32 checksum, const u8* in, size_t inSize) const
{
#if CODEC_COMPUTE_CHECKSUM
return (u32)crc32(checksum, in, (uInt)inSize);
#else
UNUSED2(checksum);
UNUSED2(in);
UNUSED2(inSize);
return 0;
#endif
}
protected:
u32 InitializeChecksum()
{
#if CODEC_COMPUTE_CHECKSUM
return crc32(0, 0, 0);
#else
return 0;
#endif
}
};
//-----------------------------------------------------------------------------
class Codec_ZLibNone : public Codec_ZLib
{
public:
Codec_ZLibNone()
{
Reset();
}
virtual ~Codec_ZLibNone()
{
}
virtual size_t MaxOutputSize(size_t inSize) const
{
return inSize;
}
virtual LibError Reset()
{
m_checksum = InitializeChecksum();
return INFO::OK;
}
virtual LibError Process(const u8* in, size_t inSize, u8* out, size_t outSize, size_t& inConsumed, size_t& outProduced)
{
const size_t transferSize = std::min(inSize, outSize);
cpu_memcpy(out, in, transferSize);
inConsumed = outProduced = transferSize;
m_checksum = UpdateChecksum(m_checksum, out, outProduced);
return INFO::OK;
}
virtual LibError Finish(u32& checksum, size_t& outProduced)
{
outProduced = 0;
checksum = m_checksum;
return INFO::OK;
}
private:
u32 m_checksum;
};
//-----------------------------------------------------------------------------
class CodecZLibStream : public Codec_ZLib
{
protected:
CodecZLibStream()
{
memset(&m_zs, 0, sizeof(m_zs));
m_checksum = InitializeChecksum();
}
static LibError LibError_from_zlib(int zlib_ret)
{
switch(zlib_ret)
{
case Z_OK:
return INFO::OK;
case Z_STREAM_END:
WARN_RETURN(ERR::FAIL);
case Z_MEM_ERROR:
WARN_RETURN(ERR::NO_MEM);
case Z_DATA_ERROR:
WARN_RETURN(ERR::CORRUPTED);
case Z_STREAM_ERROR:
WARN_RETURN(ERR::INVALID_PARAM);
default:
WARN_RETURN(ERR::FAIL);
}
}
static void WarnIfZLibError(int zlib_ret)
{
(void)LibError_from_zlib(zlib_ret);
}
typedef int ZEXPORT (*ZLibFunc)(z_streamp strm, int flush);
LibError CallStreamFunc(ZLibFunc func, int flush, const u8* in, const size_t inSize, u8* out, const size_t outSize, size_t& inConsumed, size_t& outProduced)
{
m_zs.next_in = (Byte*)in;
m_zs.avail_in = (uInt)inSize;
m_zs.next_out = (Byte*)out;
m_zs.avail_out = (uInt)outSize;
int ret = func(&m_zs, flush);
// sanity check: if ZLib reports end of stream, all input data
// must have been consumed.
if(ret == Z_STREAM_END)
{
debug_assert(m_zs.avail_in == 0);
ret = Z_OK;
}
debug_assert(inSize >= m_zs.avail_in && outSize >= m_zs.avail_out);
inConsumed = inSize - m_zs.avail_in;
outProduced = outSize - m_zs.avail_out;
return LibError_from_zlib(ret);
}
mutable z_stream m_zs;
// note: z_stream does contain an 'adler' checksum field, but that's
// not updated in streams lacking a gzip header, so we'll have to
// calculate a checksum ourselves.
// adler32 is somewhat weaker than CRC32, but a more important argument
// is that we should use the latter for compatibility with Zip archives.
mutable u32 m_checksum;
};
//-----------------------------------------------------------------------------
class Compressor_ZLib : public CodecZLibStream
{
public:
Compressor_ZLib()
{
// note: with Z_BEST_COMPRESSION, 78% percent of
// archive builder CPU time is spent in ZLib, even though
// that is interleaved with IO; everything else is negligible.
// we therefore enable this only in final builds; during
// development, 1.5% bigger archives are definitely worth much
// faster build time.
#if CONFIG_FINAL
const int level = Z_BEST_COMPRESSION;
#else
const int level = Z_BEST_SPEED;
#endif
const int windowBits = -MAX_WBITS; // max window size; omit ZLib header
const int memLevel = 9; // max speed; total mem ~= 384KiB
const int strategy = Z_DEFAULT_STRATEGY; // normal data - not RLE
const int ret = deflateInit2(&m_zs, level, Z_DEFLATED, windowBits, memLevel, strategy);
debug_assert(ret == Z_OK);
}
virtual ~Compressor_ZLib()
{
const int ret = deflateEnd(&m_zs);
WarnIfZLibError(ret);
}
virtual size_t MaxOutputSize(size_t inSize) const
{
return (size_t)deflateBound(&m_zs, (uLong)inSize);
}
virtual LibError Reset()
{
m_checksum = InitializeChecksum();
const int ret = deflateReset(&m_zs);
return LibError_from_zlib(ret);
}
virtual LibError Process(const u8* in, size_t inSize, u8* out, size_t outSize, size_t& inConsumed, size_t& outProduced)
{
m_checksum = UpdateChecksum(m_checksum, in, inSize);
return CodecZLibStream::CallStreamFunc(deflate, 0, in, inSize, out, outSize, inConsumed, outProduced);
}
virtual LibError Finish(u32& checksum, size_t& outProduced)
{
const uInt availOut = m_zs.avail_out;
// notify zlib that no more data is forthcoming and have it flush output.
// our output buffer has enough space due to use of deflateBound;
// therefore, deflate must return Z_STREAM_END.
const int ret = deflate(&m_zs, Z_FINISH);
debug_assert(ret == Z_STREAM_END);
outProduced = size_t(availOut - m_zs.avail_out);
checksum = m_checksum;
return INFO::OK;
}
};
//-----------------------------------------------------------------------------
class Decompressor_ZLib : public CodecZLibStream
{
public:
Decompressor_ZLib()
{
const int windowBits = -MAX_WBITS; // max window size; omit ZLib header
const int ret = inflateInit2(&m_zs, windowBits);
debug_assert(ret == Z_OK);
}
virtual ~Decompressor_ZLib()
{
const int ret = inflateEnd(&m_zs);
WarnIfZLibError(ret);
}
virtual size_t MaxOutputSize(size_t inSize) const
{
// relying on an upper bound for the output is a really bad idea for
// large files. archive formats store the uncompressed file sizes,
// so callers should use that when allocating the output buffer.
debug_assert(inSize < 1*MiB);
return inSize*1032; // see http://www.zlib.org/zlib_tech.html
}
virtual LibError Reset()
{
m_checksum = InitializeChecksum();
const int ret = inflateReset(&m_zs);
return LibError_from_zlib(ret);
}
virtual LibError Process(const u8* in, size_t inSize, u8* out, size_t outSize, size_t& inConsumed, size_t& outProduced)
{
const LibError ret = CodecZLibStream::CallStreamFunc(inflate, Z_SYNC_FLUSH, in, inSize, out, outSize, inConsumed, outProduced);
m_checksum = UpdateChecksum(m_checksum, out, outProduced);
return ret;
}
virtual LibError Finish(u32& checksum, size_t& outProduced)
{
// no action needed - decompression always flushes immediately.
outProduced = 0;
checksum = m_checksum;
return INFO::OK;
}
};
//-----------------------------------------------------------------------------
PICodec CreateCodec_ZLibNone()
{
return PICodec(new Codec_ZLibNone);
}
PICodec CreateCompressor_ZLibDeflate()
{
return PICodec(new Compressor_ZLib);
}
PICodec CreateDecompressor_ZLibDeflate()
{
return PICodec (new Decompressor_ZLib);
}

View File

@ -1,5 +1,5 @@
#include "codec.h"
extern PICodec CreateCodec_ZLibNone();
extern PICodec CreateCompressor_ZLibDeflate();
extern PICodec CreateDecompressor_ZLibDeflate();
#include "codec.h"
extern PICodec CreateCodec_ZLibNone();
extern PICodec CreateCompressor_ZLibDeflate();
extern PICodec CreateDecompressor_ZLibDeflate();

View File

@ -1,144 +1,144 @@
#include "lib/self_test.h"
#include "lib/base32.h"
#include "lib/res/file/path.h"
#include "lib/res/file/fp_posix.h"
#include "lib/res/file/file_cache.h"
#include "lib/res/file/vfs/vfs.h"
#include "lib/res/file/archive/archive.h"
#include "lib/res/file/archive/archive_builder.h"
#include "lib/res/h_mgr.h"
#include "lib/res/mem.h"
#include "lib/rand.h"
class TestArchiveBuilder : public CxxTest::TestSuite
{
const char* const archive_fn;
static const size_t NUM_FILES = 30;
static const size_t MAX_FILE_SIZE = 20000;
std::set<const char*> existing_names;
const char* gen_random_name()
{
// 10 chars is enough for (10-1)*5 bits = 45 bits > u32
char name_tmp[10];
for(;;)
{
u32 rand_num = rand(0, 100000);
base32(4, (const u8*)&rand_num, (u8*)name_tmp);
// store filename in atom pool
const char* atom_fn = file_make_unique_fn_copy(name_tmp);
// done if the filename is unique (not been generated yet)
if(existing_names.find(atom_fn) == existing_names.end())
{
existing_names.insert(atom_fn);
return atom_fn;
}
}
}
struct TestFile
{
off_t size;
u8* data; // must be delete[]-ed after comparing
};
// (must be separate array and end with NULL entry (see Filenames))
const char* filenames[NUM_FILES+1];
TestFile files[NUM_FILES];
void generate_random_files()
{
for(size_t i = 0; i < NUM_FILES; i++)
{
const off_t size = rand(0, MAX_FILE_SIZE);
u8* data = new u8[size];
// random data won't compress at all, and we want to exercise
// the uncompressed codepath as well => make some of the files
// easily compressible (much less values).
const bool make_easily_compressible = (rand(0, 100) > 50);
if(make_easily_compressible)
{
for(off_t i = 0; i < size; i++)
data[i] = rand() & 0x0F;
}
else
{
for(off_t i = 0; i < size; i++)
data[i] = rand() & 0xFF;
}
filenames[i] = gen_random_name();
files[i].size = size;
files[i].data = data;
ssize_t bytes_written = vfs_store(filenames[i], data, size, FILE_NO_AIO);
TS_ASSERT_EQUALS(bytes_written, size);
}
// 0-terminate the list - see Filenames decl.
filenames[NUM_FILES] = NULL;
}
public:
TestArchiveBuilder()
: archive_fn("test_archive_random_data.zip") {}
void setUp()
{
(void)path_SetRoot(0, ".");
vfs_init();
}
void tearDown()
{
vfs_shutdown();
path_ResetRootDir();
}
void test_create_archive_with_random_files()
{
if(!file_exists("archivetest")) // don't get stuck if this test fails and never deletes the directory it created
TS_ASSERT_OK(dir_create("archivetest"));
TS_ASSERT_OK(vfs_mount("", "archivetest"));
generate_random_files();
TS_ASSERT_OK(archive_build(archive_fn, filenames));
// wipe out file cache, otherwise we're just going to get back
// the file contents read during archive_build .
file_cache_reset();
// read in each file and compare file contents
Handle ha = archive_open(archive_fn);
TS_ASSERT(ha > 0);
for(size_t i = 0; i < NUM_FILES; i++)
{
File f;
TS_ASSERT_OK(afile_open(ha, filenames[i], 0, 0, &f));
FileIOBuf buf = FILE_BUF_ALLOC;
ssize_t bytes_read = afile_read(&f, 0, files[i].size, &buf);
TS_ASSERT_EQUALS(bytes_read, files[i].size);
TS_ASSERT_SAME_DATA(buf, files[i].data, files[i].size);
TS_ASSERT_OK(file_cache_free(buf));
TS_ASSERT_OK(afile_close(&f));
SAFE_ARRAY_DELETE(files[i].data);
}
TS_ASSERT_OK(archive_close(ha));
dir_delete("archivetest");
file_delete(archive_fn);
}
void test_multiple_init_shutdown()
{
// setUp has already vfs_init-ed it and tearDown will vfs_shutdown.
vfs_shutdown();
vfs_init();
}
};
#include "lib/self_test.h"
#include "lib/base32.h"
#include "lib/res/file/path.h"
#include "lib/res/file/fp_posix.h"
#include "lib/res/file/file_cache.h"
#include "lib/res/file/vfs/vfs.h"
#include "lib/res/file/archive/archive.h"
#include "lib/res/file/archive/archive_builder.h"
#include "lib/res/h_mgr.h"
#include "lib/res/mem.h"
#include "lib/rand.h"
class TestArchiveBuilder : public CxxTest::TestSuite
{
const char* const archive_fn;
static const size_t NUM_FILES = 30;
static const size_t MAX_FILE_SIZE = 20000;
std::set<const char*> existing_names;
const char* gen_random_name()
{
// 10 chars is enough for (10-1)*5 bits = 45 bits > u32
char name_tmp[10];
for(;;)
{
u32 rand_num = rand(0, 100000);
base32(4, (const u8*)&rand_num, (u8*)name_tmp);
// store filename in atom pool
const char* atom_fn = file_make_unique_fn_copy(name_tmp);
// done if the filename is unique (not been generated yet)
if(existing_names.find(atom_fn) == existing_names.end())
{
existing_names.insert(atom_fn);
return atom_fn;
}
}
}
struct TestFile
{
off_t size;
u8* data; // must be delete[]-ed after comparing
};
// (must be separate array and end with NULL entry (see Filenames))
const char* filenames[NUM_FILES+1];
TestFile files[NUM_FILES];
void generate_random_files()
{
for(size_t i = 0; i < NUM_FILES; i++)
{
const off_t size = rand(0, MAX_FILE_SIZE);
u8* data = new u8[size];
// random data won't compress at all, and we want to exercise
// the uncompressed codepath as well => make some of the files
// easily compressible (much less values).
const bool make_easily_compressible = (rand(0, 100) > 50);
if(make_easily_compressible)
{
for(off_t i = 0; i < size; i++)
data[i] = rand() & 0x0F;
}
else
{
for(off_t i = 0; i < size; i++)
data[i] = rand() & 0xFF;
}
filenames[i] = gen_random_name();
files[i].size = size;
files[i].data = data;
ssize_t bytes_written = vfs_store(filenames[i], data, size, FILE_NO_AIO);
TS_ASSERT_EQUALS(bytes_written, size);
}
// 0-terminate the list - see Filenames decl.
filenames[NUM_FILES] = NULL;
}
public:
TestArchiveBuilder()
: archive_fn("test_archive_random_data.zip") {}
void setUp()
{
(void)path_SetRoot(0, ".");
vfs_init();
}
void tearDown()
{
vfs_shutdown();
path_ResetRootDir();
}
void test_create_archive_with_random_files()
{
if(!file_exists("archivetest")) // don't get stuck if this test fails and never deletes the directory it created
TS_ASSERT_OK(dir_create("archivetest"));
TS_ASSERT_OK(vfs_mount("", "archivetest"));
generate_random_files();
TS_ASSERT_OK(archive_build(archive_fn, filenames));
// wipe out file cache, otherwise we're just going to get back
// the file contents read during archive_build .
file_cache_reset();
// read in each file and compare file contents
Handle ha = archive_open(archive_fn);
TS_ASSERT(ha > 0);
for(size_t i = 0; i < NUM_FILES; i++)
{
File f;
TS_ASSERT_OK(afile_open(ha, filenames[i], 0, 0, &f));
FileIOBuf buf = FILE_BUF_ALLOC;
ssize_t bytes_read = afile_read(&f, 0, files[i].size, &buf);
TS_ASSERT_EQUALS(bytes_read, files[i].size);
TS_ASSERT_SAME_DATA(buf, files[i].data, files[i].size);
TS_ASSERT_OK(file_cache_free(buf));
TS_ASSERT_OK(afile_close(&f));
SAFE_ARRAY_DELETE(files[i].data);
}
TS_ASSERT_OK(archive_close(ha));
dir_delete("archivetest");
file_delete(archive_fn);
}
void test_multiple_init_shutdown()
{
// setUp has already vfs_init-ed it and tearDown will vfs_shutdown.
vfs_shutdown();
vfs_init();
}
};

View File

@ -1,59 +1,59 @@
#include "lib/self_test.h"
#include "lib/self_test.h"
#include "lib/res/file/archive/codec_zlib.h"
class TestCodecZLib : public CxxTest::TestSuite
{
public:
void test_compress_decompress_compare()
{
size_t inConsumed, outProduced;
u32 checksum;
// generate random input udata
// (limit values to 0..7 so that the udata will actually be compressible)
const size_t usize = 10000;
u8 udata[usize];
for(size_t i = 0; i < usize; i++)
udata[i] = rand() & 0x07;
// compress
u8* cdata; size_t csize;
{
boost::shared_ptr<ICodec> compressor_zlib = CreateCompressor_ZLib();
ICodec* c = compressor_zlib.get();
const size_t csizeMax = c->MaxOutputSize(usize);
cdata = new u8[csizeMax];
TS_ASSERT_OK(c->Process(udata, usize, cdata, csizeMax, inConsumed, outProduced));
TS_ASSERT_EQUALS(inConsumed, usize);
TS_ASSERT_LESS_THAN_EQUALS(outProduced, csizeMax);
u8* cdata2;
TS_ASSERT_OK(c->Finish(cdata2, csize, checksum));
TS_ASSERT_EQUALS(cdata, cdata2);
TS_ASSERT_EQUALS(csize, outProduced);
}
// make sure the data changed during compression
TS_ASSERT(csize != usize || memcmp(udata, cdata, std::min(usize, csize)) != 0);
// decompress
u8 ddata[usize];
{
boost::shared_ptr<ICodec> decompressor_zlib = CreateDecompressor_ZLib();
ICodec* d = decompressor_zlib.get();
TS_ASSERT_OK(decompressor_zlib->Process(cdata, csize, ddata, usize, inConsumed, outProduced));
TS_ASSERT_EQUALS(inConsumed, csize); // ZLib always outputs as much data as possible
TS_ASSERT_EQUALS(outProduced, usize); // .. so these figures are correct before Finish()
u8* ddata2; size_t dsize;
TS_ASSERT_OK(d->Finish(&ddata2, &dsize, &checksum));
TS_ASSERT_EQUALS(ddata, ddata2);
TS_ASSERT_EQUALS(dsize, outProduced);
}
// verify udata survived intact
TS_ASSERT_SAME_DATA(udata, ddata, usize);
delete[] cdata;
}
};
#include "lib/self_test.h"
#include "lib/self_test.h"
#include "lib/res/file/archive/codec_zlib.h"
class TestCodecZLib : public CxxTest::TestSuite
{
public:
void test_compress_decompress_compare()
{
size_t inConsumed, outProduced;
u32 checksum;
// generate random input udata
// (limit values to 0..7 so that the udata will actually be compressible)
const size_t usize = 10000;
u8 udata[usize];
for(size_t i = 0; i < usize; i++)
udata[i] = rand() & 0x07;
// compress
u8* cdata; size_t csize;
{
boost::shared_ptr<ICodec> compressor_zlib = CreateCompressor_ZLib();
ICodec* c = compressor_zlib.get();
const size_t csizeMax = c->MaxOutputSize(usize);
cdata = new u8[csizeMax];
TS_ASSERT_OK(c->Process(udata, usize, cdata, csizeMax, inConsumed, outProduced));
TS_ASSERT_EQUALS(inConsumed, usize);
TS_ASSERT_LESS_THAN_EQUALS(outProduced, csizeMax);
u8* cdata2;
TS_ASSERT_OK(c->Finish(cdata2, csize, checksum));
TS_ASSERT_EQUALS(cdata, cdata2);
TS_ASSERT_EQUALS(csize, outProduced);
}
// make sure the data changed during compression
TS_ASSERT(csize != usize || memcmp(udata, cdata, std::min(usize, csize)) != 0);
// decompress
u8 ddata[usize];
{
boost::shared_ptr<ICodec> decompressor_zlib = CreateDecompressor_ZLib();
ICodec* d = decompressor_zlib.get();
TS_ASSERT_OK(decompressor_zlib->Process(cdata, csize, ddata, usize, inConsumed, outProduced));
TS_ASSERT_EQUALS(inConsumed, csize); // ZLib always outputs as much data as possible
TS_ASSERT_EQUALS(outProduced, usize); // .. so these figures are correct before Finish()
u8* ddata2; size_t dsize;
TS_ASSERT_OK(d->Finish(&ddata2, &dsize, &checksum));
TS_ASSERT_EQUALS(ddata, ddata2);
TS_ASSERT_EQUALS(dsize, outProduced);
}
// verify udata survived intact
TS_ASSERT_SAME_DATA(udata, ddata, usize);
delete[] cdata;
}
};

View File

@ -1,54 +1,54 @@
#include "lib/self_test.h"
#include "lib/self_test.h"
#include "lib/res/file/archive/compression.h"
class TestCompression : public CxxTest::TestSuite
{
public:
void test_compress_decompress_compare()
{
// generate random input data
// (limit values to 0..7 so that the data will actually be compressible)
const size_t data_size = 10000;
u8 data[data_size];
for(size_t i = 0; i < data_size; i++)
data[i] = rand() & 0x07;
u8* cdata; size_t csize;
u8 udata[data_size];
// compress
uintptr_t c = comp_alloc(CT_COMPRESSION, CM_DEFLATE);
{
TS_ASSERT(c != 0);
const size_t csizeBound = comp_max_output_size(c, data_size);
TS_ASSERT_OK(comp_alloc_output(c, csizeBound));
const ssize_t cdata_produced = comp_feed(c, data, data_size);
TS_ASSERT(cdata_produced >= 0);
u32 checksum;
TS_ASSERT_OK(comp_finish(c, &cdata, &csize, &checksum));
TS_ASSERT(cdata_produced <= (ssize_t)csize); // can't have produced more than total
}
// decompress
uintptr_t d = comp_alloc(CT_DECOMPRESSION, CM_DEFLATE);
{
TS_ASSERT(d != 0);
comp_set_output(d, udata, data_size);
const ssize_t udata_produced = comp_feed(d, cdata, csize);
TS_ASSERT(udata_produced >= 0);
u8* udata_final; size_t usize_final; u32 checksum;
TS_ASSERT_OK(comp_finish(d, &udata_final, &usize_final, &checksum));
TS_ASSERT(udata_produced <= (ssize_t)usize_final); // can't have produced more than total
TS_ASSERT_EQUALS(udata_final, udata); // output buffer address is same
TS_ASSERT_EQUALS(usize_final, data_size); // correct amount of output
}
comp_free(c);
comp_free(d);
// verify data survived intact
TS_ASSERT_SAME_DATA(data, udata, data_size);
}
};
#include "lib/self_test.h"
#include "lib/self_test.h"
#include "lib/res/file/archive/compression.h"
class TestCompression : public CxxTest::TestSuite
{
public:
void test_compress_decompress_compare()
{
// generate random input data
// (limit values to 0..7 so that the data will actually be compressible)
const size_t data_size = 10000;
u8 data[data_size];
for(size_t i = 0; i < data_size; i++)
data[i] = rand() & 0x07;
u8* cdata; size_t csize;
u8 udata[data_size];
// compress
uintptr_t c = comp_alloc(CT_COMPRESSION, CM_DEFLATE);
{
TS_ASSERT(c != 0);
const size_t csizeBound = comp_max_output_size(c, data_size);
TS_ASSERT_OK(comp_alloc_output(c, csizeBound));
const ssize_t cdata_produced = comp_feed(c, data, data_size);
TS_ASSERT(cdata_produced >= 0);
u32 checksum;
TS_ASSERT_OK(comp_finish(c, &cdata, &csize, &checksum));
TS_ASSERT(cdata_produced <= (ssize_t)csize); // can't have produced more than total
}
// decompress
uintptr_t d = comp_alloc(CT_DECOMPRESSION, CM_DEFLATE);
{
TS_ASSERT(d != 0);
comp_set_output(d, udata, data_size);
const ssize_t udata_produced = comp_feed(d, cdata, csize);
TS_ASSERT(udata_produced >= 0);
u8* udata_final; size_t usize_final; u32 checksum;
TS_ASSERT_OK(comp_finish(d, &udata_final, &usize_final, &checksum));
TS_ASSERT(udata_produced <= (ssize_t)usize_final); // can't have produced more than total
TS_ASSERT_EQUALS(udata_final, udata); // output buffer address is same
TS_ASSERT_EQUALS(usize_final, data_size); // correct amount of output
}
comp_free(c);
comp_free(d);
// verify data survived intact
TS_ASSERT_SAME_DATA(data, udata, data_size);
}
};

View File

@ -1,25 +1,25 @@
#include "lib/self_test.h"
#include <ctime>
#include "lib/res/file/archive/fat_time.h"
class TestFatTime: public CxxTest::TestSuite
{
public:
void test_fat_timedate_conversion()
{
// note: FAT time stores second/2, which means converting may
// end up off by 1 second.
time_t t, converted_t;
t = time(0);
converted_t = time_t_from_FAT(FAT_from_time_t(t));
TS_ASSERT_DELTA(t, converted_t, 2);
t++;
converted_t = time_t_from_FAT(FAT_from_time_t(t));
TS_ASSERT_DELTA(t, converted_t, 2);
}
};
#include "lib/self_test.h"
#include <ctime>
#include "lib/res/file/archive/fat_time.h"
class TestFatTime: public CxxTest::TestSuite
{
public:
void test_fat_timedate_conversion()
{
// note: FAT time stores second/2, which means converting may
// end up off by 1 second.
time_t t, converted_t;
t = time(0);
converted_t = time_t_from_FAT(FAT_from_time_t(t));
TS_ASSERT_DELTA(t, converted_t, 2);
t++;
converted_t = time_t_from_FAT(FAT_from_time_t(t));
TS_ASSERT_DELTA(t, converted_t, 2);
}
};

View File

@ -1,10 +1,10 @@
#include "lib/self_test.h"
#include <time.h>
#include "lib/res/file/archive/zip.h"
class TestZip : public CxxTest::TestSuite
{
public:
};
#include "lib/self_test.h"
#include <time.h>
#include "lib/res/file/archive/zip.h"
class TestZip : public CxxTest::TestSuite
{
public:
};

View File

@ -1,135 +1,135 @@
/**
* =========================================================================
* File : stream.cpp
* Project : 0 A.D.
* Description :
* =========================================================================
*/
// license: GPL; see lib/license.txt
#include "precompiled.h"
#include "stream.h"
#include "lib/allocators/allocators.h" // page_aligned_alloc
#include "lib/allocators/shared_ptr.h"
#include "codec.h"
//#include "lib/timer.h"
//TIMER_ADD_CLIENT(tc_stream);
OutputBufferManager::OutputBufferManager()
{
Reset();
}
void OutputBufferManager::Reset()
{
m_buffer = 0;
m_size = 0;
m_capacity = 0;
}
void OutputBufferManager::SetBuffer(u8* buffer, size_t size)
{
debug_assert(IsAllowableBuffer(buffer, size));
m_buffer = buffer;
m_size = size;
}
void OutputBufferManager::AllocateBuffer(size_t size)
{
// notes:
// - this implementation allows reusing previous buffers if they
// are big enough, which reduces the number of allocations.
// - no further attempts to reduce allocations (e.g. by doubling
// the current size) are made; this strategy is enough.
// - Pool etc. cannot be used because files may be huge (larger
// than the address space of 32-bit systems).
// no buffer or the previous one wasn't big enough: reallocate
if(!m_mem || m_capacity < size)
{
m_mem.reset((u8*)page_aligned_alloc(size), PageAlignedDeleter<u8>(size));
m_capacity = size;
}
SetBuffer(m_mem.get(), size);
}
bool OutputBufferManager::IsAllowableBuffer(u8* buffer, size_t size)
{
// none yet established
if(m_buffer == 0 && m_size == 0)
return true;
// same as last time (happens with temp buffers)
if(m_buffer == buffer && m_size == size)
return true;
// located after the last buffer (note: not necessarily after
// the entire buffer; a lack of input can cause the output buffer
// to only partially be used before the next call.)
if((unsigned)(buffer - m_buffer) <= m_size)
return true;
return false;
}
//-----------------------------------------------------------------------------
Stream::Stream(const PICodec& codec)
: m_codec(codec)
, m_inConsumed(0), m_outProduced(0)
{
}
void Stream::AllocateOutputBuffer(size_t outSizeMax)
{
m_outputBufferManager.AllocateBuffer(outSizeMax);
}
void Stream::SetOutputBuffer(u8* out, size_t outSize)
{
m_outputBufferManager.SetBuffer(out, outSize);
}
LibError Stream::Feed(const u8* in, size_t inSize)
{
if(m_outProduced == m_outputBufferManager.Size()) // output buffer full; must not call Process
return INFO::OK;
size_t inConsumed, outProduced;
u8* const out = m_outputBufferManager.Buffer() + m_outProduced;
const size_t outSize = m_outputBufferManager.Size() - m_outProduced;
RETURN_ERR(m_codec->Process(in, inSize, out, outSize, inConsumed, outProduced));
m_inConsumed += inConsumed;
m_outProduced += outProduced;
return INFO::CB_CONTINUE;
}
LibError Stream::Finish()
{
size_t outProduced;
RETURN_ERR(m_codec->Finish(m_checksum, outProduced));
m_outProduced += outProduced;
return INFO::OK;
}
LibError FeedStream(uintptr_t cbData, const u8* in, size_t inSize)
{
// TIMER_ACCRUE(tc_stream);
Stream& stream = *(Stream*)cbData;
return stream.Feed(in, inSize);
}
/**
* =========================================================================
* File : stream.cpp
* Project : 0 A.D.
* Description :
* =========================================================================
*/
// license: GPL; see lib/license.txt
#include "precompiled.h"
#include "stream.h"
#include "lib/allocators/allocators.h" // page_aligned_alloc
#include "lib/allocators/shared_ptr.h"
#include "codec.h"
//#include "lib/timer.h"
//TIMER_ADD_CLIENT(tc_stream);
OutputBufferManager::OutputBufferManager()
{
Reset();
}
void OutputBufferManager::Reset()
{
m_buffer = 0;
m_size = 0;
m_capacity = 0;
}
void OutputBufferManager::SetBuffer(u8* buffer, size_t size)
{
debug_assert(IsAllowableBuffer(buffer, size));
m_buffer = buffer;
m_size = size;
}
void OutputBufferManager::AllocateBuffer(size_t size)
{
// notes:
// - this implementation allows reusing previous buffers if they
// are big enough, which reduces the number of allocations.
// - no further attempts to reduce allocations (e.g. by doubling
// the current size) are made; this strategy is enough.
// - Pool etc. cannot be used because files may be huge (larger
// than the address space of 32-bit systems).
// no buffer or the previous one wasn't big enough: reallocate
if(!m_mem || m_capacity < size)
{
m_mem.reset((u8*)page_aligned_alloc(size), PageAlignedDeleter<u8>(size));
m_capacity = size;
}
SetBuffer(m_mem.get(), size);
}
bool OutputBufferManager::IsAllowableBuffer(u8* buffer, size_t size)
{
// none yet established
if(m_buffer == 0 && m_size == 0)
return true;
// same as last time (happens with temp buffers)
if(m_buffer == buffer && m_size == size)
return true;
// located after the last buffer (note: not necessarily after
// the entire buffer; a lack of input can cause the output buffer
// to only partially be used before the next call.)
if((unsigned)(buffer - m_buffer) <= m_size)
return true;
return false;
}
//-----------------------------------------------------------------------------
Stream::Stream(const PICodec& codec)
: m_codec(codec)
, m_inConsumed(0), m_outProduced(0)
{
}
void Stream::AllocateOutputBuffer(size_t outSizeMax)
{
m_outputBufferManager.AllocateBuffer(outSizeMax);
}
void Stream::SetOutputBuffer(u8* out, size_t outSize)
{
m_outputBufferManager.SetBuffer(out, outSize);
}
LibError Stream::Feed(const u8* in, size_t inSize)
{
if(m_outProduced == m_outputBufferManager.Size()) // output buffer full; must not call Process
return INFO::OK;
size_t inConsumed, outProduced;
u8* const out = m_outputBufferManager.Buffer() + m_outProduced;
const size_t outSize = m_outputBufferManager.Size() - m_outProduced;
RETURN_ERR(m_codec->Process(in, inSize, out, outSize, inConsumed, outProduced));
m_inConsumed += inConsumed;
m_outProduced += outProduced;
return INFO::CB_CONTINUE;
}
LibError Stream::Finish()
{
size_t outProduced;
RETURN_ERR(m_codec->Finish(m_checksum, outProduced));
m_outProduced += outProduced;
return INFO::OK;
}
LibError FeedStream(uintptr_t cbData, const u8* in, size_t inSize)
{
// TIMER_ACCRUE(tc_stream);
Stream& stream = *(Stream*)cbData;
return stream.Feed(in, inSize);
}

View File

@ -1,99 +1,99 @@
/**
* =========================================================================
* File : stream.cpp
* Project : 0 A.D.
* Description :
* =========================================================================
*/
// license: GPL; see lib/license.txt
#ifndef INCLUDED_STREAM
#define INCLUDED_STREAM
#include "codec.h"
// note: this is similar in function to std::vector, but we don't need
// iterators etc. and would prefer to avoid initializing each byte.
class OutputBufferManager
{
public:
OutputBufferManager();
void Reset();
void SetBuffer(u8* buffer, size_t size);
/**
* allocate a new output buffer.
*
* @param size [bytes] to allocate.
*
* notes:
* - if a buffer had previously been allocated and is large enough,
* it is reused (this reduces the number of allocations).
* - this class manages the lifetime of the buffer.
**/
void AllocateBuffer(size_t size);
u8* Buffer() const
{
return m_buffer;
}
size_t Size() const
{
return m_size;
}
private:
bool IsAllowableBuffer(u8* buffer, size_t size);
u8* m_buffer;
size_t m_size;
shared_ptr<u8> m_mem;
// size of m_mem. allows reusing previously allocated buffers
// (user-specified buffers can't be reused because we have no control
// over their lifetime)
size_t m_capacity;
};
class Stream
{
public:
Stream(const PICodec& codec);
void SetOutputBuffer(u8* out, size_t outSize);
void AllocateOutputBuffer(size_t outSizeMax);
/**
* 'feed' the codec with a data block.
**/
LibError Feed(const u8* in, size_t inSize);
LibError Finish();
size_t OutSize() const
{
return m_outProduced;
}
u32 Checksum() const
{
return m_checksum;
}
private:
PICodec m_codec;
OutputBufferManager m_outputBufferManager;
size_t m_inConsumed;
size_t m_outProduced;
u32 m_checksum;
};
extern LibError FeedStream(uintptr_t cbData, const u8* in, size_t inSize);
#endif // #ifndef INCLUDED_STREAM
/**
* =========================================================================
* File : stream.cpp
* Project : 0 A.D.
* Description :
* =========================================================================
*/
// license: GPL; see lib/license.txt
#ifndef INCLUDED_STREAM
#define INCLUDED_STREAM
#include "codec.h"
// note: this is similar in function to std::vector, but we don't need
// iterators etc. and would prefer to avoid initializing each byte.
class OutputBufferManager
{
public:
OutputBufferManager();
void Reset();
void SetBuffer(u8* buffer, size_t size);
/**
* allocate a new output buffer.
*
* @param size [bytes] to allocate.
*
* notes:
* - if a buffer had previously been allocated and is large enough,
* it is reused (this reduces the number of allocations).
* - this class manages the lifetime of the buffer.
**/
void AllocateBuffer(size_t size);
u8* Buffer() const
{
return m_buffer;
}
size_t Size() const
{
return m_size;
}
private:
bool IsAllowableBuffer(u8* buffer, size_t size);
u8* m_buffer;
size_t m_size;
shared_ptr<u8> m_mem;
// size of m_mem. allows reusing previously allocated buffers
// (user-specified buffers can't be reused because we have no control
// over their lifetime)
size_t m_capacity;
};
class Stream
{
public:
Stream(const PICodec& codec);
void SetOutputBuffer(u8* out, size_t outSize);
void AllocateOutputBuffer(size_t outSizeMax);
/**
* 'feed' the codec with a data block.
**/
LibError Feed(const u8* in, size_t inSize);
LibError Finish();
size_t OutSize() const
{
return m_outProduced;
}
u32 Checksum() const
{
return m_checksum;
}
private:
PICodec m_codec;
OutputBufferManager m_outputBufferManager;
size_t m_inConsumed;
size_t m_outProduced;
u32 m_checksum;
};
extern LibError FeedStream(uintptr_t cbData, const u8* in, size_t inSize);
#endif // #ifndef INCLUDED_STREAM

View File

@ -1,6 +1,6 @@
#include "precompiled.h"
#include "file_loader.h"
/*virtual*/ IFileLoader::~IFileLoader()
{
}
#include "precompiled.h"
#include "file_loader.h"
/*virtual*/ IFileLoader::~IFileLoader()
{
}

View File

@ -1,16 +1,16 @@
#ifndef INCLUDED_FILE_LOADER
#define INCLUDED_FILE_LOADER
struct IFileLoader
{
virtual ~IFileLoader();
virtual size_t Precedence() const = 0;
virtual char LocationCode() const = 0;
virtual LibError Load(const std::string& name, const shared_ptr<u8>& buf, size_t size) const = 0;
};
typedef shared_ptr<IFileLoader> PIFileLoader;
#endif // #ifndef INCLUDED_FILE_LOADER
#ifndef INCLUDED_FILE_LOADER
#define INCLUDED_FILE_LOADER
struct IFileLoader
{
virtual ~IFileLoader();
virtual size_t Precedence() const = 0;
virtual char LocationCode() const = 0;
virtual LibError Load(const std::string& name, const shared_ptr<u8>& buf, size_t size) const = 0;
};
typedef shared_ptr<IFileLoader> PIFileLoader;
#endif // #ifndef INCLUDED_FILE_LOADER

View File

@ -1,328 +1,328 @@
/**
* =========================================================================
* File : file_stats.cpp
* Project : 0 A.D.
* Description : gathers statistics from all file modules.
* =========================================================================
*/
// license: GPL; see lib/license.txt
#include "precompiled.h"
#include "file_stats.h"
#include <set>
#include "lib/timer.h"
// vfs
static size_t vfs_files;
static size_t vfs_size_total;
static double vfs_init_elapsed_time;
// file
static size_t unique_names;
static size_t unique_name_len_total;
static size_t open_files_cur, open_files_max; // total = opened_files.size()
// file_buf
static size_t extant_bufs_cur, extant_bufs_max, extant_bufs_total;
static double buf_size_total, buf_aligned_size_total;
// file_io
static size_t user_ios;
static double user_io_size_total;
static double io_actual_size_total[FI_MAX_IDX][2];
static double io_elapsed_time[FI_MAX_IDX][2];
static double io_process_time_total;
static size_t io_seeks;
// file_cache
static size_t cache_count[2];
static double cache_size_total[2];
static size_t conflict_misses;
//static double conflict_miss_size_total; // JW: currently not used nor computed
static size_t block_cache_count[2];
// archive builder
static size_t ab_connection_attempts; // total number of trace entries
static size_t ab_repeated_connections; // how many of these were not unique
// convenience functions for measuring elapsed time in an interval.
// by exposing start/finish calls, we avoid callers from querying
// timestamps when stats are disabled.
static double start_time;
static void timer_start(double* start_time_storage = &start_time)
{
// make sure no measurement is currently active
// (since start_time is shared static storage)
debug_assert(*start_time_storage == 0.0);
*start_time_storage = timer_Time();
}
static double timer_reset(double* start_time_storage = &start_time)
{
double elapsed = timer_Time() - *start_time_storage;
*start_time_storage = 0.0;
return elapsed;
}
//-----------------------------------------------------------------------------
//
// vfs
//
void stats_vfs_file_add(size_t file_size)
{
vfs_files++;
vfs_size_total += file_size;
}
void stats_vfs_file_remove(size_t file_size)
{
vfs_files--;
vfs_size_total -= file_size;
}
// stats_vfs_init_* are currently unused
void stats_vfs_init_start()
{
timer_start();
}
void stats_vfs_init_finish()
{
vfs_init_elapsed_time += timer_reset();
}
//
// file
//
void stats_unique_name(size_t name_len)
{
unique_names++;
unique_name_len_total += name_len;
}
void stats_open()
{
open_files_cur++;
open_files_max = std::max(open_files_max, open_files_cur);
// could also use a set to determine unique files that have been opened
}
void stats_close()
{
debug_assert(open_files_cur > 0);
open_files_cur--;
}
//
// file_buf
//
void stats_buf_alloc(size_t size, size_t alignedSize)
{
extant_bufs_cur++;
extant_bufs_max = std::max(extant_bufs_max, extant_bufs_cur);
extant_bufs_total++;
buf_size_total += size;
buf_aligned_size_total += alignedSize;
}
void stats_buf_free()
{
debug_assert(extant_bufs_cur > 0);
extant_bufs_cur--;
}
void stats_buf_ref()
{
extant_bufs_cur++;
}
//
// file_io
//
void stats_io_user_request(size_t user_size)
{
user_ios++;
user_io_size_total += user_size;
}
ScopedIoMonitor::ScopedIoMonitor()
{
m_startTime = 0.0;
timer_start(&m_startTime);
}
ScopedIoMonitor::~ScopedIoMonitor()
{
// note: we can only bill IOs that have succeeded :S
timer_reset(&m_startTime);
}
void ScopedIoMonitor::NotifyOfSuccess(FileIOImplentation fi, char mode, size_t size)
{
debug_assert(fi < FI_MAX_IDX);
debug_assert(mode == 'r' || mode == 'w');
const FileOp op = (mode == 'r')? FO_READ : FO_WRITE;
io_actual_size_total[fi][op] += size;
io_elapsed_time[fi][op] += timer_reset(&m_startTime);
}
void stats_io_check_seek(BlockId& blockId)
{
static BlockId lastBlockId;
if(blockId != lastBlockId)
io_seeks++;
lastBlockId = blockId;
}
void stats_cb_start()
{
timer_start();
}
void stats_cb_finish()
{
io_process_time_total += timer_reset();
}
//
// file_cache
//
void stats_cache(CacheRet cr, size_t size)
{
debug_assert(cr == CR_HIT || cr == CR_MISS);
#if 0
if(cr == CR_MISS)
{
PairIB ret = ever_cached_files.insert(atom_fn);
if(!ret.second) // was already cached once
{
conflict_miss_size_total += size;
conflict_misses++;
}
}
#endif
cache_count[cr]++;
cache_size_total[cr] += size;
}
void stats_block_cache(CacheRet cr)
{
debug_assert(cr == CR_HIT || cr == CR_MISS);
block_cache_count[cr]++;
}
//
// archive builder
//
void stats_ab_connection(bool already_exists)
{
ab_connection_attempts++;
if(already_exists)
ab_repeated_connections++;
}
//-----------------------------------------------------------------------------
template<typename T> int percent(T num, T divisor)
{
if(!divisor)
return 0;
return (int)(100*num / divisor);
}
void file_stats_dump()
{
if(!debug_filter_allows("FILE_STATS|"))
return;
const double KB = 1e3; const double MB = 1e6; const double ms = 1e-3;
debug_printf("--------------------------------------------------------------------------------\n");
debug_printf("File statistics:\n");
// note: we split the reports into several debug_printfs for clarity;
// this is necessary anyway due to fixed-size buffer.
debug_printf(
"\nvfs:\n"
"Total files: %u (%g MB)\n"
"Init/mount time: %g ms\n",
vfs_files, vfs_size_total/MB,
vfs_init_elapsed_time/ms
);
debug_printf(
"\nfile:\n"
"Total names: %u (%u KB)\n"
"Max. concurrent: %u; leaked: %u.\n",
unique_names, unique_name_len_total/1000,
open_files_max, open_files_cur
);
debug_printf(
"\nfile_buf:\n"
"Total buffers used: %u (%g MB)\n"
"Max concurrent: %u; leaked: %u\n"
"Internal fragmentation: %d%%\n",
extant_bufs_total, buf_size_total/MB,
extant_bufs_max, extant_bufs_cur,
percent(buf_aligned_size_total-buf_size_total, buf_size_total)
);
debug_printf(
"\nfile_io:\n"
"Total user load requests: %u (%g MB)\n"
"IO thoughput [MB/s; 0=never happened]:\n"
" lowio: R=%.3g, W=%.3g\n"
" aio: R=%.3g, W=%.3g\n"
"Average size = %g KB; seeks: %u; total callback time: %g ms\n"
"Total data actually read from disk = %g MB\n",
user_ios, user_io_size_total/MB,
#define THROUGHPUT(impl, op) (io_elapsed_time[impl][op] == 0.0)? 0.0 : (io_actual_size_total[impl][op] / io_elapsed_time[impl][op] / MB)
THROUGHPUT(FI_LOWIO, FO_READ), THROUGHPUT(FI_LOWIO, FO_WRITE),
THROUGHPUT(FI_AIO , FO_READ), THROUGHPUT(FI_AIO , FO_WRITE),
user_io_size_total/user_ios/KB, io_seeks, io_process_time_total/ms,
(io_actual_size_total[FI_LOWIO][FO_READ]+io_actual_size_total[FI_AIO][FO_READ])/MB
);
debug_printf(
"\nfile_cache:\n"
"Hits: %u (%g MB); misses %u (%g MB); ratio: %u%%\n"
"Percent of requested bytes satisfied by cache: %u%%; non-compulsory misses: %u (%u%% of misses)\n"
"Block hits: %u; misses: %u; ratio: %u%%\n",
cache_count[CR_HIT], cache_size_total[CR_HIT]/MB, cache_count[CR_MISS], cache_size_total[CR_MISS]/MB, percent(cache_count[CR_HIT], cache_count[CR_HIT]+cache_count[CR_MISS]),
percent(cache_size_total[CR_HIT], cache_size_total[CR_HIT]+cache_size_total[CR_MISS]), conflict_misses, percent(conflict_misses, cache_count[CR_MISS]),
block_cache_count[CR_HIT], block_cache_count[CR_MISS], percent(block_cache_count[CR_HIT], block_cache_count[CR_HIT]+block_cache_count[CR_MISS])
);
debug_printf(
"\nvfs_optimizer:\n"
"Total trace entries: %u; repeated connections: %u; unique files: %u\n",
ab_connection_attempts, ab_repeated_connections, ab_connection_attempts-ab_repeated_connections
);
}
/**
* =========================================================================
* File : file_stats.cpp
* Project : 0 A.D.
* Description : gathers statistics from all file modules.
* =========================================================================
*/
// license: GPL; see lib/license.txt
#include "precompiled.h"
#include "file_stats.h"
#include <set>
#include "lib/timer.h"
// vfs
static size_t vfs_files;
static size_t vfs_size_total;
static double vfs_init_elapsed_time;
// file
static size_t unique_names;
static size_t unique_name_len_total;
static size_t open_files_cur, open_files_max; // total = opened_files.size()
// file_buf
static size_t extant_bufs_cur, extant_bufs_max, extant_bufs_total;
static double buf_size_total, buf_aligned_size_total;
// file_io
static size_t user_ios;
static double user_io_size_total;
static double io_actual_size_total[FI_MAX_IDX][2];
static double io_elapsed_time[FI_MAX_IDX][2];
static double io_process_time_total;
static size_t io_seeks;
// file_cache
static size_t cache_count[2];
static double cache_size_total[2];
static size_t conflict_misses;
//static double conflict_miss_size_total; // JW: currently not used nor computed
static size_t block_cache_count[2];
// archive builder
static size_t ab_connection_attempts; // total number of trace entries
static size_t ab_repeated_connections; // how many of these were not unique
// convenience functions for measuring elapsed time in an interval.
// by exposing start/finish calls, we avoid callers from querying
// timestamps when stats are disabled.
static double start_time;
static void timer_start(double* start_time_storage = &start_time)
{
// make sure no measurement is currently active
// (since start_time is shared static storage)
debug_assert(*start_time_storage == 0.0);
*start_time_storage = timer_Time();
}
static double timer_reset(double* start_time_storage = &start_time)
{
double elapsed = timer_Time() - *start_time_storage;
*start_time_storage = 0.0;
return elapsed;
}
//-----------------------------------------------------------------------------
//
// vfs
//
void stats_vfs_file_add(size_t file_size)
{
vfs_files++;
vfs_size_total += file_size;
}
void stats_vfs_file_remove(size_t file_size)
{
vfs_files--;
vfs_size_total -= file_size;
}
// stats_vfs_init_* are currently unused
void stats_vfs_init_start()
{
timer_start();
}
void stats_vfs_init_finish()
{
vfs_init_elapsed_time += timer_reset();
}
//
// file
//
void stats_unique_name(size_t name_len)
{
unique_names++;
unique_name_len_total += name_len;
}
void stats_open()
{
open_files_cur++;
open_files_max = std::max(open_files_max, open_files_cur);
// could also use a set to determine unique files that have been opened
}
void stats_close()
{
debug_assert(open_files_cur > 0);
open_files_cur--;
}
//
// file_buf
//
void stats_buf_alloc(size_t size, size_t alignedSize)
{
extant_bufs_cur++;
extant_bufs_max = std::max(extant_bufs_max, extant_bufs_cur);
extant_bufs_total++;
buf_size_total += size;
buf_aligned_size_total += alignedSize;
}
void stats_buf_free()
{
debug_assert(extant_bufs_cur > 0);
extant_bufs_cur--;
}
void stats_buf_ref()
{
extant_bufs_cur++;
}
//
// file_io
//
void stats_io_user_request(size_t user_size)
{
user_ios++;
user_io_size_total += user_size;
}
ScopedIoMonitor::ScopedIoMonitor()
{
m_startTime = 0.0;
timer_start(&m_startTime);
}
ScopedIoMonitor::~ScopedIoMonitor()
{
// note: we can only bill IOs that have succeeded :S
timer_reset(&m_startTime);
}
void ScopedIoMonitor::NotifyOfSuccess(FileIOImplentation fi, char mode, size_t size)
{
debug_assert(fi < FI_MAX_IDX);
debug_assert(mode == 'r' || mode == 'w');
const FileOp op = (mode == 'r')? FO_READ : FO_WRITE;
io_actual_size_total[fi][op] += size;
io_elapsed_time[fi][op] += timer_reset(&m_startTime);
}
void stats_io_check_seek(BlockId& blockId)
{
static BlockId lastBlockId;
if(blockId != lastBlockId)
io_seeks++;
lastBlockId = blockId;
}
void stats_cb_start()
{
timer_start();
}
void stats_cb_finish()
{
io_process_time_total += timer_reset();
}
//
// file_cache
//
void stats_cache(CacheRet cr, size_t size)
{
debug_assert(cr == CR_HIT || cr == CR_MISS);
#if 0
if(cr == CR_MISS)
{
PairIB ret = ever_cached_files.insert(atom_fn);
if(!ret.second) // was already cached once
{
conflict_miss_size_total += size;
conflict_misses++;
}
}
#endif
cache_count[cr]++;
cache_size_total[cr] += size;
}
void stats_block_cache(CacheRet cr)
{
debug_assert(cr == CR_HIT || cr == CR_MISS);
block_cache_count[cr]++;
}
//
// archive builder
//
void stats_ab_connection(bool already_exists)
{
ab_connection_attempts++;
if(already_exists)
ab_repeated_connections++;
}
//-----------------------------------------------------------------------------
template<typename T> int percent(T num, T divisor)
{
if(!divisor)
return 0;
return (int)(100*num / divisor);
}
void file_stats_dump()
{
if(!debug_filter_allows("FILE_STATS|"))
return;
const double KB = 1e3; const double MB = 1e6; const double ms = 1e-3;
debug_printf("--------------------------------------------------------------------------------\n");
debug_printf("File statistics:\n");
// note: we split the reports into several debug_printfs for clarity;
// this is necessary anyway due to fixed-size buffer.
debug_printf(
"\nvfs:\n"
"Total files: %u (%g MB)\n"
"Init/mount time: %g ms\n",
vfs_files, vfs_size_total/MB,
vfs_init_elapsed_time/ms
);
debug_printf(
"\nfile:\n"
"Total names: %u (%u KB)\n"
"Max. concurrent: %u; leaked: %u.\n",
unique_names, unique_name_len_total/1000,
open_files_max, open_files_cur
);
debug_printf(
"\nfile_buf:\n"
"Total buffers used: %u (%g MB)\n"
"Max concurrent: %u; leaked: %u\n"
"Internal fragmentation: %d%%\n",
extant_bufs_total, buf_size_total/MB,
extant_bufs_max, extant_bufs_cur,
percent(buf_aligned_size_total-buf_size_total, buf_size_total)
);
debug_printf(
"\nfile_io:\n"
"Total user load requests: %u (%g MB)\n"
"IO thoughput [MB/s; 0=never happened]:\n"
" lowio: R=%.3g, W=%.3g\n"
" aio: R=%.3g, W=%.3g\n"
"Average size = %g KB; seeks: %u; total callback time: %g ms\n"
"Total data actually read from disk = %g MB\n",
user_ios, user_io_size_total/MB,
#define THROUGHPUT(impl, op) (io_elapsed_time[impl][op] == 0.0)? 0.0 : (io_actual_size_total[impl][op] / io_elapsed_time[impl][op] / MB)
THROUGHPUT(FI_LOWIO, FO_READ), THROUGHPUT(FI_LOWIO, FO_WRITE),
THROUGHPUT(FI_AIO , FO_READ), THROUGHPUT(FI_AIO , FO_WRITE),
user_io_size_total/user_ios/KB, io_seeks, io_process_time_total/ms,
(io_actual_size_total[FI_LOWIO][FO_READ]+io_actual_size_total[FI_AIO][FO_READ])/MB
);
debug_printf(
"\nfile_cache:\n"
"Hits: %u (%g MB); misses %u (%g MB); ratio: %u%%\n"
"Percent of requested bytes satisfied by cache: %u%%; non-compulsory misses: %u (%u%% of misses)\n"
"Block hits: %u; misses: %u; ratio: %u%%\n",
cache_count[CR_HIT], cache_size_total[CR_HIT]/MB, cache_count[CR_MISS], cache_size_total[CR_MISS]/MB, percent(cache_count[CR_HIT], cache_count[CR_HIT]+cache_count[CR_MISS]),
percent(cache_size_total[CR_HIT], cache_size_total[CR_HIT]+cache_size_total[CR_MISS]), conflict_misses, percent(conflict_misses, cache_count[CR_MISS]),
block_cache_count[CR_HIT], block_cache_count[CR_MISS], percent(block_cache_count[CR_HIT], block_cache_count[CR_HIT]+block_cache_count[CR_MISS])
);
debug_printf(
"\nvfs_optimizer:\n"
"Total trace entries: %u; repeated connections: %u; unique files: %u\n",
ab_connection_attempts, ab_repeated_connections, ab_connection_attempts-ab_repeated_connections
);
}

View File

@ -1,105 +1,105 @@
/**
* =========================================================================
* File : file_stats.h
* Project : 0 A.D.
* Description : gathers statistics from all file modules.
* =========================================================================
*/
// license: GPL; see lib/license.txt
#ifndef INCLUDED_FILE_STATS
#define INCLUDED_FILE_STATS
#define FILE_STATS_ENABLED 1
enum FileIOImplentation { FI_LOWIO, FI_AIO, FI_BCACHE, FI_MAX_IDX };
enum FileOp { FO_READ, FO_WRITE };
enum CacheRet { CR_HIT, CR_MISS };
#include "lib/file/io/block_cache.h" // BlockId
#if FILE_STATS_ENABLED
// vfs
extern void stats_vfs_file_add(size_t file_size);
extern void stats_vfs_file_remove(size_t file_size);
extern void stats_vfs_init_start();
extern void stats_vfs_init_finish();
// file
// currently not called because string_pool is now in lib/allocators
extern void stats_unique_name(size_t name_len);
extern void stats_open();
extern void stats_close();
// file_buf
extern void stats_buf_alloc(size_t size, size_t alignedSize);
extern void stats_buf_free();
extern void stats_buf_ref();
// file_io
extern void stats_io_user_request(size_t user_size);
// this is used to measure effective throughput for the two
// synchronous IO variants.
// note: improved measurements of the actual aio throughput by instrumenting
// issue/wait doesn't work because IOManager's decompression may cause us to
// miss the exact end of IO, thus throwing off measurements.
class ScopedIoMonitor
{
public:
ScopedIoMonitor();
~ScopedIoMonitor();
void NotifyOfSuccess(FileIOImplentation fi, char mode, size_t size);
private:
double m_startTime;
};
extern void stats_io_check_seek(BlockId& blockId);
extern void stats_cb_start();
extern void stats_cb_finish();
// file_cache
extern void stats_cache(CacheRet cr, size_t size);
extern void stats_block_cache(CacheRet cr);
// archive builder
extern void stats_ab_connection(bool already_exists);
extern void file_stats_dump();
#else
#define stats_vfs_file_add(file_size)
#define stats_vfs_file_remove(file_size)
#define stats_vfs_init_start()
#define stats_vfs_init_finish()
#define stats_unique_name(name_len)
#define stats_open()
#define stats_close()
#define stats_buf_alloc(size, alignedSize)
#define stats_buf_free()
#define stats_buf_ref()
#define stats_io_user_request(user_size)
class ScopedIoMonitor
{
public:
ScopedIoMonitor() {}
~ScopedIoMonitor() {}
void NotifyOfSuccess(FileIOImplentation fi, char mode, size_t size) {}
};
#define stats_io_check_seek(blockId)
#define stats_cb_start()
#define stats_cb_finish()
#define stats_cache(cr, size)
#define stats_block_cache(cr)
#define stats_ab_connection(already_exists)
#define file_stats_dump()
#endif
#endif // #ifndef INCLUDED_FILE_STATS
/**
* =========================================================================
* File : file_stats.h
* Project : 0 A.D.
* Description : gathers statistics from all file modules.
* =========================================================================
*/
// license: GPL; see lib/license.txt
#ifndef INCLUDED_FILE_STATS
#define INCLUDED_FILE_STATS
#define FILE_STATS_ENABLED 1
enum FileIOImplentation { FI_LOWIO, FI_AIO, FI_BCACHE, FI_MAX_IDX };
enum FileOp { FO_READ, FO_WRITE };
enum CacheRet { CR_HIT, CR_MISS };
#include "lib/file/io/block_cache.h" // BlockId
#if FILE_STATS_ENABLED
// vfs
extern void stats_vfs_file_add(size_t file_size);
extern void stats_vfs_file_remove(size_t file_size);
extern void stats_vfs_init_start();
extern void stats_vfs_init_finish();
// file
// currently not called because string_pool is now in lib/allocators
extern void stats_unique_name(size_t name_len);
extern void stats_open();
extern void stats_close();
// file_buf
extern void stats_buf_alloc(size_t size, size_t alignedSize);
extern void stats_buf_free();
extern void stats_buf_ref();
// file_io
extern void stats_io_user_request(size_t user_size);
// this is used to measure effective throughput for the two
// synchronous IO variants.
// note: improved measurements of the actual aio throughput by instrumenting
// issue/wait doesn't work because IOManager's decompression may cause us to
// miss the exact end of IO, thus throwing off measurements.
class ScopedIoMonitor
{
public:
ScopedIoMonitor();
~ScopedIoMonitor();
void NotifyOfSuccess(FileIOImplentation fi, char mode, size_t size);
private:
double m_startTime;
};
extern void stats_io_check_seek(BlockId& blockId);
extern void stats_cb_start();
extern void stats_cb_finish();
// file_cache
extern void stats_cache(CacheRet cr, size_t size);
extern void stats_block_cache(CacheRet cr);
// archive builder
extern void stats_ab_connection(bool already_exists);
extern void file_stats_dump();
#else
#define stats_vfs_file_add(file_size)
#define stats_vfs_file_remove(file_size)
#define stats_vfs_init_start()
#define stats_vfs_init_finish()
#define stats_unique_name(name_len)
#define stats_open()
#define stats_close()
#define stats_buf_alloc(size, alignedSize)
#define stats_buf_free()
#define stats_buf_ref()
#define stats_io_user_request(user_size)
class ScopedIoMonitor
{
public:
ScopedIoMonitor() {}
~ScopedIoMonitor() {}
void NotifyOfSuccess(FileIOImplentation fi, char mode, size_t size) {}
};
#define stats_io_check_seek(blockId)
#define stats_cb_start()
#define stats_cb_finish()
#define stats_cache(cr, size)
#define stats_block_cache(cr)
#define stats_ab_connection(already_exists)
#define file_stats_dump()
#endif
#endif // #ifndef INCLUDED_FILE_STATS

View File

@ -1,67 +1,67 @@
#include "precompiled.h"
#include "real_directory.h"
#include "lib/path_util.h"
#include "lib/file/file.h"
#include "lib/file/io/io.h"
RealDirectory::RealDirectory(const Path& path, size_t priority, size_t flags)
: m_path(path), m_priority(priority), m_flags(flags)
{
}
/*virtual*/ size_t RealDirectory::Precedence() const
{
return 1u;
}
/*virtual*/ char RealDirectory::LocationCode() const
{
return 'F';
}
/*virtual*/ LibError RealDirectory::Load(const std::string& name, const shared_ptr<u8>& buf, size_t size) const
{
PIFile file = CreateFile_Posix();
RETURN_ERR(file->Open(m_path/name, 'r'));
RETURN_ERR(io_ReadAligned(file, 0, buf.get(), size));
return INFO::OK;
}
LibError RealDirectory::Store(const std::string& name, const shared_ptr<u8>& fileContents, size_t size)
{
const Path pathname(m_path/name);
{
PIFile file = CreateFile_Posix();
RETURN_ERR(file->Open(pathname, 'w'));
RETURN_ERR(io_WriteAligned(file, 0, fileContents.get(), size));
}
// io_WriteAligned pads the file; we need to truncate it to the actual
// length. ftruncate can't be used because Windows' FILE_FLAG_NO_BUFFERING
// only allows resizing to sector boundaries, so the file must first
// be closed.
truncate(pathname.external_file_string().c_str(), (off_t)size);
return INFO::OK;
}
void RealDirectory::Watch()
{
//m_watch = CreateWatch(Path().external_file_string().c_str());
}
PRealDirectory CreateRealSubdirectory(const PRealDirectory& realDirectory, const std::string& subdirectoryName)
{
const Path path(realDirectory->GetPath()/subdirectoryName);
return PRealDirectory(new RealDirectory(path, realDirectory->Priority(), realDirectory->Flags()));
}
#include "precompiled.h"
#include "real_directory.h"
#include "lib/path_util.h"
#include "lib/file/file.h"
#include "lib/file/io/io.h"
RealDirectory::RealDirectory(const Path& path, size_t priority, size_t flags)
: m_path(path), m_priority(priority), m_flags(flags)
{
}
/*virtual*/ size_t RealDirectory::Precedence() const
{
return 1u;
}
/*virtual*/ char RealDirectory::LocationCode() const
{
return 'F';
}
/*virtual*/ LibError RealDirectory::Load(const std::string& name, const shared_ptr<u8>& buf, size_t size) const
{
PIFile file = CreateFile_Posix();
RETURN_ERR(file->Open(m_path/name, 'r'));
RETURN_ERR(io_ReadAligned(file, 0, buf.get(), size));
return INFO::OK;
}
LibError RealDirectory::Store(const std::string& name, const shared_ptr<u8>& fileContents, size_t size)
{
const Path pathname(m_path/name);
{
PIFile file = CreateFile_Posix();
RETURN_ERR(file->Open(pathname, 'w'));
RETURN_ERR(io_WriteAligned(file, 0, fileContents.get(), size));
}
// io_WriteAligned pads the file; we need to truncate it to the actual
// length. ftruncate can't be used because Windows' FILE_FLAG_NO_BUFFERING
// only allows resizing to sector boundaries, so the file must first
// be closed.
truncate(pathname.external_file_string().c_str(), (off_t)size);
return INFO::OK;
}
void RealDirectory::Watch()
{
//m_watch = CreateWatch(Path().external_file_string().c_str());
}
PRealDirectory CreateRealSubdirectory(const PRealDirectory& realDirectory, const std::string& subdirectoryName)
{
const Path path(realDirectory->GetPath()/subdirectoryName);
return PRealDirectory(new RealDirectory(path, realDirectory->Priority(), realDirectory->Flags()));
}

View File

@ -1,58 +1,58 @@
#ifndef INCLUDED_REAL_DIRECTORY
#define INCLUDED_REAL_DIRECTORY
#include "file_loader.h"
#include "lib/file/path.h"
class RealDirectory : public IFileLoader
{
public:
RealDirectory(const Path& path, size_t priority, size_t flags);
const Path& GetPath() const
{
return m_path;
}
size_t Priority() const
{
return m_priority;
}
size_t Flags() const
{
return m_flags;
}
// IFileLoader
virtual size_t Precedence() const;
virtual char LocationCode() const;
virtual LibError Load(const std::string& name, const shared_ptr<u8>& buf, size_t size) const;
LibError Store(const std::string& name, const shared_ptr<u8>& fileContents, size_t size);
void Watch();
private:
RealDirectory(const RealDirectory& rhs); // noncopyable due to const members
RealDirectory& operator=(const RealDirectory& rhs);
// note: paths are relative to the root directory, so storing the
// entire path instead of just the portion relative to the mount point
// is not all too wasteful.
const Path m_path;
const size_t m_priority;
const size_t m_flags;
// note: watches are needed in each directory because some APIs
// (e.g. FAM) cannot watch entire trees with one call.
void* m_watch;
};
typedef shared_ptr<RealDirectory> PRealDirectory;
extern PRealDirectory CreateRealSubdirectory(const PRealDirectory& realDirectory, const std::string& subdirectoryName);
#endif // #ifndef INCLUDED_REAL_DIRECTORY
#ifndef INCLUDED_REAL_DIRECTORY
#define INCLUDED_REAL_DIRECTORY
#include "file_loader.h"
#include "lib/file/path.h"
class RealDirectory : public IFileLoader
{
public:
RealDirectory(const Path& path, size_t priority, size_t flags);
const Path& GetPath() const
{
return m_path;
}
size_t Priority() const
{
return m_priority;
}
size_t Flags() const
{
return m_flags;
}
// IFileLoader
virtual size_t Precedence() const;
virtual char LocationCode() const;
virtual LibError Load(const std::string& name, const shared_ptr<u8>& buf, size_t size) const;
LibError Store(const std::string& name, const shared_ptr<u8>& fileContents, size_t size);
void Watch();
private:
RealDirectory(const RealDirectory& rhs); // noncopyable due to const members
RealDirectory& operator=(const RealDirectory& rhs);
// note: paths are relative to the root directory, so storing the
// entire path instead of just the portion relative to the mount point
// is not all too wasteful.
const Path m_path;
const size_t m_priority;
const size_t m_flags;
// note: watches are needed in each directory because some APIs
// (e.g. FAM) cannot watch entire trees with one call.
void* m_watch;
};
typedef shared_ptr<RealDirectory> PRealDirectory;
extern PRealDirectory CreateRealSubdirectory(const PRealDirectory& realDirectory, const std::string& subdirectoryName);
#endif // #ifndef INCLUDED_REAL_DIRECTORY

View File

@ -1,194 +1,194 @@
/**
* =========================================================================
* File : trace.cpp
* Project : 0 A.D.
* Description : IO event recording
* =========================================================================
*/
// license: GPL; see lib/license.txt
#include "precompiled.h"
#include "trace.h"
#include "lib/allocators/pool.h"
#include "lib/timer.h" // timer_Time
/*virtual*/ ITrace::~ITrace()
{
}
//-----------------------------------------------------------------------------
TraceEntry::TraceEntry(EAction action, const char* pathname, size_t size)
: m_timestamp((float)timer_Time())
, m_action(action)
, m_pathname(strdup(pathname))
, m_size(size)
{
}
TraceEntry::TraceEntry(const char* text)
{
const char* fmt = "%f: %c \"" STRINGIZE(PATH_MAX) "[^\"]\" %d\n";
char pathname[PATH_MAX];
char action;
const int fieldsRead = sscanf_s(text, fmt, &m_timestamp, &action, pathname, &m_size);
debug_assert(fieldsRead == 4);
debug_assert(action == 'L' || action == 'S');
m_action = (EAction)action;
m_pathname = strdup(pathname);
}
TraceEntry::~TraceEntry()
{
SAFE_FREE(m_pathname);
}
void TraceEntry::EncodeAsText(char* text, size_t maxTextChars) const
{
const char action = (char)m_action;
sprintf_s(text, maxTextChars, "%#010f: %c \"%s\" %d\n", m_timestamp, action, m_pathname, m_size);
}
//-----------------------------------------------------------------------------
class Trace_Dummy : public ITrace
{
public:
Trace_Dummy(size_t UNUSED(maxSize))
{
}
virtual void NotifyLoad(const char* UNUSED(pathname), size_t UNUSED(size))
{
}
virtual void NotifyStore(const char* UNUSED(pathname), size_t UNUSED(size))
{
}
virtual LibError Load(const char* UNUSED(pathname))
{
return INFO::OK;
}
virtual LibError Store(const char* UNUSED(pathname)) const
{
return INFO::OK;
}
virtual const TraceEntry* Entries() const
{
return 0;
}
virtual size_t NumEntries() const
{
return 0;
}
};
//-----------------------------------------------------------------------------
class Trace : public ITrace
{
public:
Trace(size_t maxSize)
{
(void)pool_create(&m_pool, maxSize, sizeof(TraceEntry));
}
virtual ~Trace()
{
TraceEntry* entries = (TraceEntry*)m_pool.da.base;
for(TraceEntry* entry = entries; entry < entries+NumEntries(); entry++)
entry->~TraceEntry();
(void)pool_destroy(&m_pool);
}
virtual void NotifyLoad(const char* pathname, size_t size)
{
new(Allocate()) TraceEntry(TraceEntry::Load, pathname, size);
}
virtual void NotifyStore(const char* pathname, size_t size)
{
new(Allocate()) TraceEntry(TraceEntry::Store, pathname, size);
}
virtual LibError Load(const char* osPathname)
{
pool_free_all(&m_pool);
errno = 0;
FILE* file = fopen(osPathname, "rt");
if(!file)
return LibError_from_errno();
for(;;)
{
char text[500];
if(!fgets(text, ARRAY_SIZE(text)-1, file))
break;
new(Allocate()) TraceEntry(text);
}
fclose(file);
return INFO::OK;
}
virtual LibError Store(const char* osPathname) const
{
errno = 0;
FILE* file = fopen(osPathname, "at");
if(!file)
return LibError_from_errno();
for(size_t i = 0; i < NumEntries(); i++)
{
char text[500];
Entries()[i].EncodeAsText(text, ARRAY_SIZE(text));
fputs(text, file);
}
(void)fclose(file);
return INFO::OK;
}
virtual const TraceEntry* Entries() const
{
return (const TraceEntry*)m_pool.da.base;
}
virtual size_t NumEntries() const
{
return m_pool.da.pos / sizeof(TraceEntry);
}
private:
void* Allocate()
{
void* p = pool_alloc(&m_pool, 0);
debug_assert(p);
return p;
}
Pool m_pool;
};
PITrace CreateDummyTrace(size_t maxSize)
{
return PITrace(new Trace_Dummy(maxSize));
}
PITrace CreateTrace(size_t maxSize)
{
return PITrace(new Trace(maxSize));
}
/**
* =========================================================================
* File : trace.cpp
* Project : 0 A.D.
* Description : IO event recording
* =========================================================================
*/
// license: GPL; see lib/license.txt
#include "precompiled.h"
#include "trace.h"
#include "lib/allocators/pool.h"
#include "lib/timer.h" // timer_Time
/*virtual*/ ITrace::~ITrace()
{
}
//-----------------------------------------------------------------------------
TraceEntry::TraceEntry(EAction action, const char* pathname, size_t size)
: m_timestamp((float)timer_Time())
, m_action(action)
, m_pathname(strdup(pathname))
, m_size(size)
{
}
TraceEntry::TraceEntry(const char* text)
{
const char* fmt = "%f: %c \"" STRINGIZE(PATH_MAX) "[^\"]\" %d\n";
char pathname[PATH_MAX];
char action;
const int fieldsRead = sscanf_s(text, fmt, &m_timestamp, &action, pathname, &m_size);
debug_assert(fieldsRead == 4);
debug_assert(action == 'L' || action == 'S');
m_action = (EAction)action;
m_pathname = strdup(pathname);
}
TraceEntry::~TraceEntry()
{
SAFE_FREE(m_pathname);
}
void TraceEntry::EncodeAsText(char* text, size_t maxTextChars) const
{
const char action = (char)m_action;
sprintf_s(text, maxTextChars, "%#010f: %c \"%s\" %d\n", m_timestamp, action, m_pathname, m_size);
}
//-----------------------------------------------------------------------------
class Trace_Dummy : public ITrace
{
public:
Trace_Dummy(size_t UNUSED(maxSize))
{
}
virtual void NotifyLoad(const char* UNUSED(pathname), size_t UNUSED(size))
{
}
virtual void NotifyStore(const char* UNUSED(pathname), size_t UNUSED(size))
{
}
virtual LibError Load(const char* UNUSED(pathname))
{
return INFO::OK;
}
virtual LibError Store(const char* UNUSED(pathname)) const
{
return INFO::OK;
}
virtual const TraceEntry* Entries() const
{
return 0;
}
virtual size_t NumEntries() const
{
return 0;
}
};
//-----------------------------------------------------------------------------
class Trace : public ITrace
{
public:
Trace(size_t maxSize)
{
(void)pool_create(&m_pool, maxSize, sizeof(TraceEntry));
}
virtual ~Trace()
{
TraceEntry* entries = (TraceEntry*)m_pool.da.base;
for(TraceEntry* entry = entries; entry < entries+NumEntries(); entry++)
entry->~TraceEntry();
(void)pool_destroy(&m_pool);
}
virtual void NotifyLoad(const char* pathname, size_t size)
{
new(Allocate()) TraceEntry(TraceEntry::Load, pathname, size);
}
virtual void NotifyStore(const char* pathname, size_t size)
{
new(Allocate()) TraceEntry(TraceEntry::Store, pathname, size);
}
virtual LibError Load(const char* osPathname)
{
pool_free_all(&m_pool);
errno = 0;
FILE* file = fopen(osPathname, "rt");
if(!file)
return LibError_from_errno();
for(;;)
{
char text[500];
if(!fgets(text, ARRAY_SIZE(text)-1, file))
break;
new(Allocate()) TraceEntry(text);
}
fclose(file);
return INFO::OK;
}
virtual LibError Store(const char* osPathname) const
{
errno = 0;
FILE* file = fopen(osPathname, "at");
if(!file)
return LibError_from_errno();
for(size_t i = 0; i < NumEntries(); i++)
{
char text[500];
Entries()[i].EncodeAsText(text, ARRAY_SIZE(text));
fputs(text, file);
}
(void)fclose(file);
return INFO::OK;
}
virtual const TraceEntry* Entries() const
{
return (const TraceEntry*)m_pool.da.base;
}
virtual size_t NumEntries() const
{
return m_pool.da.pos / sizeof(TraceEntry);
}
private:
void* Allocate()
{
void* p = pool_alloc(&m_pool, 0);
debug_assert(p);
return p;
}
Pool m_pool;
};
PITrace CreateDummyTrace(size_t maxSize)
{
return PITrace(new Trace_Dummy(maxSize));
}
PITrace CreateTrace(size_t maxSize)
{
return PITrace(new Trace(maxSize));
}

View File

@ -1,111 +1,111 @@
/**
* =========================================================================
* File : trace.h
* Project : 0 A.D.
* Description : IO event recording
* =========================================================================
*/
// license: GPL; see lib/license.txt
// traces are useful for determining the optimal ordering of archived files
// and can also serve as a repeatable IO benchmark.
// note: since FileContents are smart pointers, the trace can't easily
// be notified when they are released (relevant for cache simulation).
// we have to assume that users process one file at a time -- as they
// should.
#ifndef INCLUDED_TRACE
#define INCLUDED_TRACE
// stores information about an IO event.
class TraceEntry
{
public:
enum EAction
{
Load = 'L',
Store = 'S'
};
TraceEntry(EAction action, const char* pathname, size_t size);
TraceEntry(const char* textualRepresentation);
~TraceEntry();
EAction Action() const
{
return m_action;
}
const char* Pathname() const
{
return m_pathname;
}
size_t Size() const
{
return m_size;
}
void EncodeAsText(char* text, size_t maxTextChars) const;
private:
// note: keep an eye on the class size because all instances are kept
// in memory (see ITrace)
// time (as returned by timer_Time) after the operation completes.
// rationale: when loading, the VFS doesn't know file size until
// querying the cache or retrieving file information.
float m_timestamp;
EAction m_action;
const char* m_pathname;
// size of file.
// rationale: other applications using this trace format might not
// have access to the VFS and its file information.
size_t m_size;
};
// note: to avoid interfering with measurements, this trace container
// does not cause any IOs (except of course in Load/Store)
struct ITrace
{
virtual ~ITrace();
virtual void NotifyLoad(const char* pathname, size_t size) = 0;
virtual void NotifyStore(const char* pathname, size_t size) = 0;
/**
* store all entries into a file.
*
* @param osPathname native (absolute) pathname
*
* note: the file format is text-based to allow human inspection and
* because storing filename strings in a binary format would be a
* bit awkward.
**/
virtual LibError Store(const char* osPathname) const = 0;
/**
* load entries from file.
*
* @param osPathname native (absolute) pathname
*
* replaces any existing entries.
**/
virtual LibError Load(const char* osPathname) = 0;
virtual const TraceEntry* Entries() const = 0;
virtual size_t NumEntries() const = 0;
};
typedef shared_ptr<ITrace> PITrace;
extern PITrace CreateDummyTrace(size_t maxSize);
extern PITrace CreateTrace(size_t maxSize);
#endif // #ifndef INCLUDED_TRACE
/**
* =========================================================================
* File : trace.h
* Project : 0 A.D.
* Description : IO event recording
* =========================================================================
*/
// license: GPL; see lib/license.txt
// traces are useful for determining the optimal ordering of archived files
// and can also serve as a repeatable IO benchmark.
// note: since FileContents are smart pointers, the trace can't easily
// be notified when they are released (relevant for cache simulation).
// we have to assume that users process one file at a time -- as they
// should.
#ifndef INCLUDED_TRACE
#define INCLUDED_TRACE
// stores information about an IO event.
class TraceEntry
{
public:
enum EAction
{
Load = 'L',
Store = 'S'
};
TraceEntry(EAction action, const char* pathname, size_t size);
TraceEntry(const char* textualRepresentation);
~TraceEntry();
EAction Action() const
{
return m_action;
}
const char* Pathname() const
{
return m_pathname;
}
size_t Size() const
{
return m_size;
}
void EncodeAsText(char* text, size_t maxTextChars) const;
private:
// note: keep an eye on the class size because all instances are kept
// in memory (see ITrace)
// time (as returned by timer_Time) after the operation completes.
// rationale: when loading, the VFS doesn't know file size until
// querying the cache or retrieving file information.
float m_timestamp;
EAction m_action;
const char* m_pathname;
// size of file.
// rationale: other applications using this trace format might not
// have access to the VFS and its file information.
size_t m_size;
};
// note: to avoid interfering with measurements, this trace container
// does not cause any IOs (except of course in Load/Store)
struct ITrace
{
virtual ~ITrace();
virtual void NotifyLoad(const char* pathname, size_t size) = 0;
virtual void NotifyStore(const char* pathname, size_t size) = 0;
/**
* store all entries into a file.
*
* @param osPathname native (absolute) pathname
*
* note: the file format is text-based to allow human inspection and
* because storing filename strings in a binary format would be a
* bit awkward.
**/
virtual LibError Store(const char* osPathname) const = 0;
/**
* load entries from file.
*
* @param osPathname native (absolute) pathname
*
* replaces any existing entries.
**/
virtual LibError Load(const char* osPathname) = 0;
virtual const TraceEntry* Entries() const = 0;
virtual size_t NumEntries() const = 0;
};
typedef shared_ptr<ITrace> PITrace;
extern PITrace CreateDummyTrace(size_t maxSize);
extern PITrace CreateTrace(size_t maxSize);
#endif // #ifndef INCLUDED_TRACE

View File

@ -1,26 +1,26 @@
/* Generated file, do not edit */
#ifndef CXXTEST_RUNNING
#define CXXTEST_RUNNING
#endif
#define _CXXTEST_HAVE_STD
#include "precompiled.h"
#include <cxxtest/TestListener.h>
#include <cxxtest/TestTracker.h>
#include <cxxtest/TestRunner.h>
#include <cxxtest/RealDescriptions.h>
#include "d:\Projects\0ad\svn\source\lib\res\file\tests\test_file_cache.h"
static TestFileCache suite_TestFileCache;
static CxxTest::List Tests_TestFileCache = { 0, 0 };
CxxTest::StaticSuiteDescription suiteDescription_TestFileCache( "d:\\Projects\\0ad\\svn\\source\\lib\\res\\file\\tests\\test_file_cache.h", 6, "TestFileCache", suite_TestFileCache, Tests_TestFileCache );
static class TestDescription_TestFileCache_test_cache_allocator : public CxxTest::RealTestDescription {
public:
TestDescription_TestFileCache_test_cache_allocator() : CxxTest::RealTestDescription( Tests_TestFileCache, suiteDescription_TestFileCache, 10, "test_cache_allocator" ) {}
void runTest() { suite_TestFileCache.test_cache_allocator(); }
} testDescription_TestFileCache_test_cache_allocator;
/* Generated file, do not edit */
#ifndef CXXTEST_RUNNING
#define CXXTEST_RUNNING
#endif
#define _CXXTEST_HAVE_STD
#include "precompiled.h"
#include <cxxtest/TestListener.h>
#include <cxxtest/TestTracker.h>
#include <cxxtest/TestRunner.h>
#include <cxxtest/RealDescriptions.h>
#include "d:\Projects\0ad\svn\source\lib\res\file\tests\test_file_cache.h"
static TestFileCache suite_TestFileCache;
static CxxTest::List Tests_TestFileCache = { 0, 0 };
CxxTest::StaticSuiteDescription suiteDescription_TestFileCache( "d:\\Projects\\0ad\\svn\\source\\lib\\res\\file\\tests\\test_file_cache.h", 6, "TestFileCache", suite_TestFileCache, Tests_TestFileCache );
static class TestDescription_TestFileCache_test_cache_allocator : public CxxTest::RealTestDescription {
public:
TestDescription_TestFileCache_test_cache_allocator() : CxxTest::RealTestDescription( Tests_TestFileCache, suiteDescription_TestFileCache, 10, "test_cache_allocator" ) {}
void runTest() { suite_TestFileCache.test_cache_allocator(); }
} testDescription_TestFileCache_test_cache_allocator;

View File

@ -1,51 +1,51 @@
#include "lib/self_test.h"
#include "lib/res/file/file_cache.h"
#include "lib/rand.h"
class TestFileCache : public CxxTest::TestSuite
{
enum { TEST_ALLOC_TOTAL = 100*1000*1000 };
public:
void test_cache_allocator()
{
// allocated address -> its size
typedef std::map<void*, size_t> AllocMap;
AllocMap allocations;
// put allocator through its paces by allocating several times
// its capacity (this ensures memory is reused)
srand(1);
size_t total_size_used = 0;
while(total_size_used < TEST_ALLOC_TOTAL)
{
size_t size = rand(1, TEST_ALLOC_TOTAL/16);
total_size_used += size;
void* p;
// until successful alloc:
for(;;)
{
p = file_cache_allocator_alloc(size);
if(p)
break;
// out of room - remove a previous allocation
// .. choose one at random
size_t chosen_idx = (size_t)rand(0, (size_t)allocations.size());
AllocMap::iterator it = allocations.begin();
for(; chosen_idx != 0; chosen_idx--)
++it;
file_cache_allocator_free(it->first, it->second);
allocations.erase(it);
}
// must not already have been allocated
TS_ASSERT_EQUALS(allocations.find(p), allocations.end());
allocations[p] = size;
}
// reset to virginal state
// note: even though everything has now been freed, this is
// necessary since the freelists may be a bit scattered already.
file_cache_allocator_reset();
}
};
#include "lib/self_test.h"
#include "lib/res/file/file_cache.h"
#include "lib/rand.h"
class TestFileCache : public CxxTest::TestSuite
{
enum { TEST_ALLOC_TOTAL = 100*1000*1000 };
public:
void test_cache_allocator()
{
// allocated address -> its size
typedef std::map<void*, size_t> AllocMap;
AllocMap allocations;
// put allocator through its paces by allocating several times
// its capacity (this ensures memory is reused)
srand(1);
size_t total_size_used = 0;
while(total_size_used < TEST_ALLOC_TOTAL)
{
size_t size = rand(1, TEST_ALLOC_TOTAL/16);
total_size_used += size;
void* p;
// until successful alloc:
for(;;)
{
p = file_cache_allocator_alloc(size);
if(p)
break;
// out of room - remove a previous allocation
// .. choose one at random
size_t chosen_idx = (size_t)rand(0, (size_t)allocations.size());
AllocMap::iterator it = allocations.begin();
for(; chosen_idx != 0; chosen_idx--)
++it;
file_cache_allocator_free(it->first, it->second);
allocations.erase(it);
}
// must not already have been allocated
TS_ASSERT_EQUALS(allocations.find(p), allocations.end());
allocations[p] = size;
}
// reset to virginal state
// note: even though everything has now been freed, this is
// necessary since the freelists may be a bit scattered already.
file_cache_allocator_reset();
}
};

View File

@ -1,32 +1,32 @@
/* Generated file, do not edit */
#ifndef CXXTEST_RUNNING
#define CXXTEST_RUNNING
#endif
#define _CXXTEST_HAVE_STD
#include "precompiled.h"
#include <cxxtest/TestListener.h>
#include <cxxtest/TestTracker.h>
#include <cxxtest/TestRunner.h>
#include <cxxtest/RealDescriptions.h>
#include "d:\Projects\0ad\svn\source\lib\res\file\tests\test_path.h"
static TestPath suite_TestPath;
static CxxTest::List Tests_TestPath = { 0, 0 };
CxxTest::StaticSuiteDescription suiteDescription_TestPath( "d:\\Projects\\0ad\\svn\\source\\lib\\res\\file\\tests\\test_path.h", 6, "TestPath", suite_TestPath, Tests_TestPath );
static class TestDescription_TestPath_test_conversion : public CxxTest::RealTestDescription {
public:
TestDescription_TestPath_test_conversion() : CxxTest::RealTestDescription( Tests_TestPath, suiteDescription_TestPath, 9, "test_conversion" ) {}
void runTest() { suite_TestPath.test_conversion(); }
} testDescription_TestPath_test_conversion;
static class TestDescription_TestPath_test_atom : public CxxTest::RealTestDescription {
public:
TestDescription_TestPath_test_atom() : CxxTest::RealTestDescription( Tests_TestPath, suiteDescription_TestPath, 33, "test_atom" ) {}
void runTest() { suite_TestPath.test_atom(); }
} testDescription_TestPath_test_atom;
/* Generated file, do not edit */
#ifndef CXXTEST_RUNNING
#define CXXTEST_RUNNING
#endif
#define _CXXTEST_HAVE_STD
#include "precompiled.h"
#include <cxxtest/TestListener.h>
#include <cxxtest/TestTracker.h>
#include <cxxtest/TestRunner.h>
#include <cxxtest/RealDescriptions.h>
#include "d:\Projects\0ad\svn\source\lib\res\file\tests\test_path.h"
static TestPath suite_TestPath;
static CxxTest::List Tests_TestPath = { 0, 0 };
CxxTest::StaticSuiteDescription suiteDescription_TestPath( "d:\\Projects\\0ad\\svn\\source\\lib\\res\\file\\tests\\test_path.h", 6, "TestPath", suite_TestPath, Tests_TestPath );
static class TestDescription_TestPath_test_conversion : public CxxTest::RealTestDescription {
public:
TestDescription_TestPath_test_conversion() : CxxTest::RealTestDescription( Tests_TestPath, suiteDescription_TestPath, 9, "test_conversion" ) {}
void runTest() { suite_TestPath.test_conversion(); }
} testDescription_TestPath_test_conversion;
static class TestDescription_TestPath_test_atom : public CxxTest::RealTestDescription {
public:
TestDescription_TestPath_test_atom() : CxxTest::RealTestDescription( Tests_TestPath, suiteDescription_TestPath, 33, "test_atom" ) {}
void runTest() { suite_TestPath.test_atom(); }
} testDescription_TestPath_test_atom;

View File

@ -1,59 +1,59 @@
#include "lib/self_test.h"
#include "lib/self_test.h"
#include "lib/res/file/path.h"
class TestPath : public CxxTest::TestSuite
{
public:
void test_conversion()
{
char N_path[PATH_MAX] = {0};
TS_ASSERT_OK(file_make_native_path("a/b/c", N_path));
#if OS_WIN
TS_ASSERT_STR_EQUALS(N_path, "a\\b\\c");
#else
TS_ASSERT_STR_EQUALS(N_path, "a/b/c");
#endif
char P_path[PATH_MAX] = {0};
TS_ASSERT_OK(file_make_portable_path("a\\b\\c", P_path));
#if OS_WIN
TS_ASSERT_STR_EQUALS(P_path, "a/b/c");
#else
// sounds strange, but correct: on non-Windows, \\ didn't
// get recognized as separators and weren't converted.
TS_ASSERT_STR_EQUALS(P_path, "a\\b\\c");
#endif
}
// file_make_full_*_path is left untested (hard to do so)
void test_pool()
{
// .. return same address for same string?
const char* atom1 = path_Pool->UniqueCopy("a/bc/def");
const char* atom2 = path_Pool->UniqueCopy("a/bc/def");
TS_ASSERT_EQUALS(atom1, atom2);
// .. early out (already in pool) check works?
const char* atom3 = path_Pool->UniqueCopy(atom1);
TS_ASSERT_EQUALS(atom3, atom1);
// is it reported as in pool?
TS_ASSERT(path_Pool()->Contains(atom1));
// path_Pool()->RandomString
// see if the atom added above eventually comes out when a
// random one is returned from the pool.
int tries_left;
for(tries_left = 1000; tries_left != 0; tries_left--)
{
const char* random_name = path_Pool->RandomString();
if(random_name == atom1)
break;
}
TS_ASSERT(tries_left != 0);
}
};
#include "lib/self_test.h"
#include "lib/self_test.h"
#include "lib/res/file/path.h"
class TestPath : public CxxTest::TestSuite
{
public:
void test_conversion()
{
char N_path[PATH_MAX] = {0};
TS_ASSERT_OK(file_make_native_path("a/b/c", N_path));
#if OS_WIN
TS_ASSERT_STR_EQUALS(N_path, "a\\b\\c");
#else
TS_ASSERT_STR_EQUALS(N_path, "a/b/c");
#endif
char P_path[PATH_MAX] = {0};
TS_ASSERT_OK(file_make_portable_path("a\\b\\c", P_path));
#if OS_WIN
TS_ASSERT_STR_EQUALS(P_path, "a/b/c");
#else
// sounds strange, but correct: on non-Windows, \\ didn't
// get recognized as separators and weren't converted.
TS_ASSERT_STR_EQUALS(P_path, "a\\b\\c");
#endif
}
// file_make_full_*_path is left untested (hard to do so)
void test_pool()
{
// .. return same address for same string?
const char* atom1 = path_Pool->UniqueCopy("a/bc/def");
const char* atom2 = path_Pool->UniqueCopy("a/bc/def");
TS_ASSERT_EQUALS(atom1, atom2);
// .. early out (already in pool) check works?
const char* atom3 = path_Pool->UniqueCopy(atom1);
TS_ASSERT_EQUALS(atom3, atom1);
// is it reported as in pool?
TS_ASSERT(path_Pool()->Contains(atom1));
// path_Pool()->RandomString
// see if the atom added above eventually comes out when a
// random one is returned from the pool.
int tries_left;
for(tries_left = 1000; tries_left != 0; tries_left--)
{
const char* random_name = path_Pool->RandomString();
if(random_name == atom1)
break;
}
TS_ASSERT(tries_left != 0);
}
};

View File

@ -1,148 +1,148 @@
/**
* =========================================================================
* File : file.cpp
* Project : 0 A.D.
* Description : simple POSIX file wrapper.
* =========================================================================
*/
#include "precompiled.h"
#include "file.h"
#include "lib/file/common/file_stats.h"
#include "lib/file/path.h"
ERROR_ASSOCIATE(ERR::FILE_ACCESS, "Insufficient access rights to open file", EACCES);
ERROR_ASSOCIATE(ERR::IO, "Error during IO", EIO);
class File_Posix : public IFile
{
public:
~File_Posix()
{
Close();
}
virtual LibError Open(const Path& pathname, char mode)
{
debug_assert(mode == 'w' || mode == 'r');
m_pathname = pathname;
m_mode = mode;
int oflag = (mode == 'r')? O_RDONLY : O_WRONLY|O_CREAT|O_TRUNC;
#if OS_WIN
oflag |= O_BINARY_NP;
#endif
m_fd = open(m_pathname.external_file_string().c_str(), oflag, S_IRWXO|S_IRWXU|S_IRWXG);
if(m_fd < 0)
WARN_RETURN(ERR::FILE_ACCESS);
stats_open();
return INFO::OK;
}
virtual LibError Open(const fs::wpath& pathname, char mode)
{
char pathname_c[PATH_MAX];
size_t numConverted = wcstombs(pathname_c, pathname.file_string().c_str(), PATH_MAX);
debug_assert(numConverted < PATH_MAX);
return Open(pathname_c, mode);
}
virtual void Close()
{
m_mode = '\0';
if(m_fd)
{
close(m_fd);
m_fd = 0;
}
}
virtual const Path& Pathname() const
{
return m_pathname;
}
virtual char Mode() const
{
return m_mode;
}
virtual LibError Issue(aiocb& req, off_t alignedOfs, u8* alignedBuf, size_t alignedSize) const
{
memset(&req, 0, sizeof(req));
req.aio_lio_opcode = (m_mode == 'w')? LIO_WRITE : LIO_READ;
req.aio_buf = (volatile void*)alignedBuf;
req.aio_fildes = m_fd;
req.aio_offset = alignedOfs;
req.aio_nbytes = alignedSize;
struct sigevent* sig = 0; // no notification signal
aiocb* const reqs = &req;
if(lio_listio(LIO_NOWAIT, &reqs, 1, sig) != 0)
return LibError_from_errno();
return INFO::OK;
}
virtual LibError WaitUntilComplete(aiocb& req, u8*& alignedBuf, size_t& alignedSize)
{
// wait for transfer to complete.
while(aio_error(&req) == EINPROGRESS)
{
aiocb* const reqs = &req;
aio_suspend(&reqs, 1, (timespec*)0); // wait indefinitely
}
const ssize_t bytesTransferred = aio_return(&req);
if(bytesTransferred == -1) // transfer failed
WARN_RETURN(ERR::IO);
alignedBuf = (u8*)req.aio_buf; // cast from volatile void*
alignedSize = bytesTransferred;
return INFO::OK;
}
virtual LibError Write(off_t ofs, const u8* buf, size_t size) const
{
return IO(ofs, const_cast<u8*>(buf), size);
}
virtual LibError Read(off_t ofs, u8* buf, size_t size) const
{
return IO(ofs, buf, size);
}
private:
LibError IO(off_t ofs, u8* buf, size_t size) const
{
ScopedIoMonitor monitor;
lseek(m_fd, ofs, SEEK_SET);
errno = 0;
const ssize_t ret = (m_mode == 'w')? write(m_fd, buf, size) : read(m_fd, buf, size);
if(ret < 0)
return LibError_from_errno();
const size_t totalTransferred = (size_t)ret;
if(totalTransferred != size)
WARN_RETURN(ERR::IO);
monitor.NotifyOfSuccess(FI_LOWIO, m_mode, totalTransferred);
return INFO::OK;
}
Path m_pathname;
char m_mode;
int m_fd;
};
PIFile CreateFile_Posix()
{
return PIFile(new File_Posix);
}
/**
* =========================================================================
* File : file.cpp
* Project : 0 A.D.
* Description : simple POSIX file wrapper.
* =========================================================================
*/
#include "precompiled.h"
#include "file.h"
#include "lib/file/common/file_stats.h"
#include "lib/file/path.h"
ERROR_ASSOCIATE(ERR::FILE_ACCESS, "Insufficient access rights to open file", EACCES);
ERROR_ASSOCIATE(ERR::IO, "Error during IO", EIO);
class File_Posix : public IFile
{
public:
~File_Posix()
{
Close();
}
virtual LibError Open(const Path& pathname, char mode)
{
debug_assert(mode == 'w' || mode == 'r');
m_pathname = pathname;
m_mode = mode;
int oflag = (mode == 'r')? O_RDONLY : O_WRONLY|O_CREAT|O_TRUNC;
#if OS_WIN
oflag |= O_BINARY_NP;
#endif
m_fd = open(m_pathname.external_file_string().c_str(), oflag, S_IRWXO|S_IRWXU|S_IRWXG);
if(m_fd < 0)
WARN_RETURN(ERR::FILE_ACCESS);
stats_open();
return INFO::OK;
}
virtual LibError Open(const fs::wpath& pathname, char mode)
{
char pathname_c[PATH_MAX];
size_t numConverted = wcstombs(pathname_c, pathname.file_string().c_str(), PATH_MAX);
debug_assert(numConverted < PATH_MAX);
return Open(pathname_c, mode);
}
virtual void Close()
{
m_mode = '\0';
if(m_fd)
{
close(m_fd);
m_fd = 0;
}
}
virtual const Path& Pathname() const
{
return m_pathname;
}
virtual char Mode() const
{
return m_mode;
}
virtual LibError Issue(aiocb& req, off_t alignedOfs, u8* alignedBuf, size_t alignedSize) const
{
memset(&req, 0, sizeof(req));
req.aio_lio_opcode = (m_mode == 'w')? LIO_WRITE : LIO_READ;
req.aio_buf = (volatile void*)alignedBuf;
req.aio_fildes = m_fd;
req.aio_offset = alignedOfs;
req.aio_nbytes = alignedSize;
struct sigevent* sig = 0; // no notification signal
aiocb* const reqs = &req;
if(lio_listio(LIO_NOWAIT, &reqs, 1, sig) != 0)
return LibError_from_errno();
return INFO::OK;
}
virtual LibError WaitUntilComplete(aiocb& req, u8*& alignedBuf, size_t& alignedSize)
{
// wait for transfer to complete.
while(aio_error(&req) == EINPROGRESS)
{
aiocb* const reqs = &req;
aio_suspend(&reqs, 1, (timespec*)0); // wait indefinitely
}
const ssize_t bytesTransferred = aio_return(&req);
if(bytesTransferred == -1) // transfer failed
WARN_RETURN(ERR::IO);
alignedBuf = (u8*)req.aio_buf; // cast from volatile void*
alignedSize = bytesTransferred;
return INFO::OK;
}
virtual LibError Write(off_t ofs, const u8* buf, size_t size) const
{
return IO(ofs, const_cast<u8*>(buf), size);
}
virtual LibError Read(off_t ofs, u8* buf, size_t size) const
{
return IO(ofs, buf, size);
}
private:
LibError IO(off_t ofs, u8* buf, size_t size) const
{
ScopedIoMonitor monitor;
lseek(m_fd, ofs, SEEK_SET);
errno = 0;
const ssize_t ret = (m_mode == 'w')? write(m_fd, buf, size) : read(m_fd, buf, size);
if(ret < 0)
return LibError_from_errno();
const size_t totalTransferred = (size_t)ret;
if(totalTransferred != size)
WARN_RETURN(ERR::IO);
monitor.NotifyOfSuccess(FI_LOWIO, m_mode, totalTransferred);
return INFO::OK;
}
Path m_pathname;
char m_mode;
int m_fd;
};
PIFile CreateFile_Posix()
{
return PIFile(new File_Posix);
}

View File

@ -1,42 +1,42 @@
/**
* =========================================================================
* File : file.h
* Project : 0 A.D.
* Description : simple POSIX file wrapper.
* =========================================================================
*/
// license: GPL; see lib/license.txt
#ifndef INCLUDED_FILE
#define INCLUDED_FILE
#include "path.h"
namespace ERR
{
const LibError FILE_ACCESS = -110200;
const LibError IO = -110201;
}
struct IFile
{
virtual LibError Open(const Path& pathname, char mode) = 0;
virtual LibError Open(const fs::wpath& pathname, char mode) = 0;
virtual void Close() = 0;
virtual const Path& Pathname() const = 0;
virtual char Mode() const = 0;
virtual LibError Issue(aiocb& req, off_t alignedOfs, u8* alignedBuf, size_t alignedSize) const = 0;
virtual LibError WaitUntilComplete(aiocb& req, u8*& alignedBuf, size_t& alignedSize) = 0;
virtual LibError Read(off_t ofs, u8* buf, size_t size) const = 0;
virtual LibError Write(off_t ofs, const u8* buf, size_t size) const = 0;
};
typedef shared_ptr<IFile> PIFile;
LIB_API PIFile CreateFile_Posix();
#endif // #ifndef INCLUDED_FILE
/**
* =========================================================================
* File : file.h
* Project : 0 A.D.
* Description : simple POSIX file wrapper.
* =========================================================================
*/
// license: GPL; see lib/license.txt
#ifndef INCLUDED_FILE
#define INCLUDED_FILE
#include "path.h"
namespace ERR
{
const LibError FILE_ACCESS = -110200;
const LibError IO = -110201;
}
struct IFile
{
virtual LibError Open(const Path& pathname, char mode) = 0;
virtual LibError Open(const fs::wpath& pathname, char mode) = 0;
virtual void Close() = 0;
virtual const Path& Pathname() const = 0;
virtual char Mode() const = 0;
virtual LibError Issue(aiocb& req, off_t alignedOfs, u8* alignedBuf, size_t alignedSize) const = 0;
virtual LibError WaitUntilComplete(aiocb& req, u8*& alignedBuf, size_t& alignedSize) = 0;
virtual LibError Read(off_t ofs, u8* buf, size_t size) const = 0;
virtual LibError Write(off_t ofs, const u8* buf, size_t size) const = 0;
};
typedef shared_ptr<IFile> PIFile;
LIB_API PIFile CreateFile_Posix();
#endif // #ifndef INCLUDED_FILE

View File

@ -1,12 +1,12 @@
/**
* =========================================================================
* File : file_system.cpp
* Project : 0 A.D.
* Description :
* =========================================================================
*/
// license: GPL; see lib/license.txt
#include "precompiled.h"
#include "file_system.h"
/**
* =========================================================================
* File : file_system.cpp
* Project : 0 A.D.
* Description :
* =========================================================================
*/
// license: GPL; see lib/license.txt
#include "precompiled.h"
#include "file_system.h"

View File

@ -1,50 +1,50 @@
/**
* =========================================================================
* File : file_system.h
* Project : 0 A.D.
* Description :
* =========================================================================
*/
// license: GPL; see lib/license.txt
#ifndef INCLUDED_FILE_SYSTEM
#define INCLUDED_FILE_SYSTEM
class FileInfo
{
public:
FileInfo()
{
}
FileInfo(const std::string& name, off_t size, time_t mtime)
: m_name(name), m_size(size), m_mtime(mtime)
{
}
const std::string& Name() const
{
return m_name;
}
off_t Size() const
{
return m_size;
}
time_t MTime() const
{
return m_mtime;
}
private:
std::string m_name;
off_t m_size;
time_t m_mtime;
};
typedef std::vector<FileInfo> FileInfos;
typedef std::vector<std::string> DirectoryNames;
#endif // #ifndef INCLUDED_FILE_SYSTEM
/**
* =========================================================================
* File : file_system.h
* Project : 0 A.D.
* Description :
* =========================================================================
*/
// license: GPL; see lib/license.txt
#ifndef INCLUDED_FILE_SYSTEM
#define INCLUDED_FILE_SYSTEM
class FileInfo
{
public:
FileInfo()
{
}
FileInfo(const std::string& name, off_t size, time_t mtime)
: m_name(name), m_size(size), m_mtime(mtime)
{
}
const std::string& Name() const
{
return m_name;
}
off_t Size() const
{
return m_size;
}
time_t MTime() const
{
return m_mtime;
}
private:
std::string m_name;
off_t m_size;
time_t m_mtime;
};
typedef std::vector<FileInfo> FileInfos;
typedef std::vector<std::string> DirectoryNames;
#endif // #ifndef INCLUDED_FILE_SYSTEM

View File

@ -1,141 +1,141 @@
/**
* =========================================================================
* File : directory_posix.cpp
* Project : 0 A.D.
* Description : file layer on top of POSIX. avoids the need for
* : absolute paths and provides fast I/O.
* =========================================================================
*/
// license: GPL; see lib/license.txt
#include "precompiled.h"
#include "file_system_posix.h"
#include <vector>
#include <algorithm>
#include <string>
#include "lib/path_util.h"
#include "lib/file/path.h"
#include "lib/posix/posix_filesystem.h"
struct DirDeleter
{
void operator()(DIR* osDir) const
{
const int ret = closedir(osDir);
debug_assert(ret == 0);
}
};
// is name "." or ".."?
static bool IsDummyDirectory(const char* name)
{
if(name[0] != '.')
return false;
return (name[1] == '\0' || (name[1] == '.' && name[2] == '\0'));
}
/*virtual*/ LibError FileSystem_Posix::GetDirectoryEntries(const Path& path, FileInfos* files, DirectoryNames* subdirectoryNames) const
{
// open directory
errno = 0;
DIR* pDir = opendir(path.external_file_string().c_str());
if(!pDir)
return LibError_from_errno(false);
shared_ptr<DIR> osDir(pDir, DirDeleter());
for(;;)
{
errno = 0;
struct dirent* osEnt = readdir(osDir.get());
if(!osEnt)
{
// no error, just no more entries to return
if(!errno)
return INFO::OK;
return LibError_from_errno();
}
const char* name = osEnt->d_name;
RETURN_ERR(path_component_validate(name));
// get file information (mode, size, mtime)
struct stat s;
#if OS_WIN
// .. wposix readdir has enough information to return dirent
// status directly (much faster than calling stat).
RETURN_ERR(readdir_stat_np(osDir.get(), &s));
#else
// .. call regular stat().
errno = 0;
const Path pathname(path/name);
if(stat(pathname.external_directory_string().c_str(), &s) != 0)
return LibError_from_errno();
#endif
if(files && S_ISREG(s.st_mode))
files->push_back(FileInfo(name, s.st_size, s.st_mtime));
else if(subdirectoryNames && S_ISDIR(s.st_mode) && !IsDummyDirectory(name))
subdirectoryNames->push_back(name);
}
}
LibError FileSystem_Posix::GetFileInfo(const Path& pathname, FileInfo* pfileInfo) const
{
char osPathname[PATH_MAX];
path_copy(osPathname, pathname.external_directory_string().c_str());
// if path ends in slash, remove it (required by stat)
char* last_char = osPathname+strlen(osPathname)-1;
if(path_is_dir_sep(*last_char))
*last_char = '\0';
errno = 0;
struct stat s;
memset(&s, 0, sizeof(s));
if(stat(osPathname, &s) != 0)
return LibError_from_errno();
const char* name = path_name_only(osPathname);
*pfileInfo = FileInfo(name, s.st_size, s.st_mtime);
return INFO::OK;
}
LibError FileSystem_Posix::DeleteDirectory(const Path& path)
{
// note: we have to recursively empty the directory before it can
// be deleted (required by Windows and POSIX rmdir()).
FileInfos files; DirectoryNames subdirectoryNames;
RETURN_ERR(GetDirectoryEntries(path, &files, &subdirectoryNames));
// delete files
for(size_t i = 0; i < files.size(); i++)
{
const Path pathname(path/files[i].Name());
errno = 0;
if(unlink(pathname.external_file_string().c_str()) != 0)
return LibError_from_errno();
}
// recurse over subdirectoryNames
for(size_t i = 0; i < subdirectoryNames.size(); i++)
RETURN_ERR(DeleteDirectory(path/subdirectoryNames[i]));
errno = 0;
if(rmdir(path.external_directory_string().c_str()) != 0)
return LibError_from_errno();
return INFO::OK;
}
PIFileSystem_Posix CreateFileSystem_Posix()
{
return PIFileSystem_Posix(new FileSystem_Posix);
}
/**
* =========================================================================
* File : directory_posix.cpp
* Project : 0 A.D.
* Description : file layer on top of POSIX. avoids the need for
* : absolute paths and provides fast I/O.
* =========================================================================
*/
// license: GPL; see lib/license.txt
#include "precompiled.h"
#include "file_system_posix.h"
#include <vector>
#include <algorithm>
#include <string>
#include "lib/path_util.h"
#include "lib/file/path.h"
#include "lib/posix/posix_filesystem.h"
struct DirDeleter
{
void operator()(DIR* osDir) const
{
const int ret = closedir(osDir);
debug_assert(ret == 0);
}
};
// is name "." or ".."?
static bool IsDummyDirectory(const char* name)
{
if(name[0] != '.')
return false;
return (name[1] == '\0' || (name[1] == '.' && name[2] == '\0'));
}
/*virtual*/ LibError FileSystem_Posix::GetDirectoryEntries(const Path& path, FileInfos* files, DirectoryNames* subdirectoryNames) const
{
// open directory
errno = 0;
DIR* pDir = opendir(path.external_file_string().c_str());
if(!pDir)
return LibError_from_errno(false);
shared_ptr<DIR> osDir(pDir, DirDeleter());
for(;;)
{
errno = 0;
struct dirent* osEnt = readdir(osDir.get());
if(!osEnt)
{
// no error, just no more entries to return
if(!errno)
return INFO::OK;
return LibError_from_errno();
}
const char* name = osEnt->d_name;
RETURN_ERR(path_component_validate(name));
// get file information (mode, size, mtime)
struct stat s;
#if OS_WIN
// .. wposix readdir has enough information to return dirent
// status directly (much faster than calling stat).
RETURN_ERR(readdir_stat_np(osDir.get(), &s));
#else
// .. call regular stat().
errno = 0;
const Path pathname(path/name);
if(stat(pathname.external_directory_string().c_str(), &s) != 0)
return LibError_from_errno();
#endif
if(files && S_ISREG(s.st_mode))
files->push_back(FileInfo(name, s.st_size, s.st_mtime));
else if(subdirectoryNames && S_ISDIR(s.st_mode) && !IsDummyDirectory(name))
subdirectoryNames->push_back(name);
}
}
LibError FileSystem_Posix::GetFileInfo(const Path& pathname, FileInfo* pfileInfo) const
{
char osPathname[PATH_MAX];
path_copy(osPathname, pathname.external_directory_string().c_str());
// if path ends in slash, remove it (required by stat)
char* last_char = osPathname+strlen(osPathname)-1;
if(path_is_dir_sep(*last_char))
*last_char = '\0';
errno = 0;
struct stat s;
memset(&s, 0, sizeof(s));
if(stat(osPathname, &s) != 0)
return LibError_from_errno();
const char* name = path_name_only(osPathname);
*pfileInfo = FileInfo(name, s.st_size, s.st_mtime);
return INFO::OK;
}
LibError FileSystem_Posix::DeleteDirectory(const Path& path)
{
// note: we have to recursively empty the directory before it can
// be deleted (required by Windows and POSIX rmdir()).
FileInfos files; DirectoryNames subdirectoryNames;
RETURN_ERR(GetDirectoryEntries(path, &files, &subdirectoryNames));
// delete files
for(size_t i = 0; i < files.size(); i++)
{
const Path pathname(path/files[i].Name());
errno = 0;
if(unlink(pathname.external_file_string().c_str()) != 0)
return LibError_from_errno();
}
// recurse over subdirectoryNames
for(size_t i = 0; i < subdirectoryNames.size(); i++)
RETURN_ERR(DeleteDirectory(path/subdirectoryNames[i]));
errno = 0;
if(rmdir(path.external_directory_string().c_str()) != 0)
return LibError_from_errno();
return INFO::OK;
}
PIFileSystem_Posix CreateFileSystem_Posix()
{
return PIFileSystem_Posix(new FileSystem_Posix);
}

View File

@ -1,35 +1,35 @@
/**
* =========================================================================
* File : file_system_posix.h
* Project : 0 A.D.
* Description : file layer on top of POSIX. avoids the need for
* : absolute paths and provides fast I/O.
* =========================================================================
*/
// license: GPL; see lib/license.txt
#ifndef INCLUDED_FILE_SYSTEM_POSIX
#define INCLUDED_FILE_SYSTEM_POSIX
#include "lib/file/path.h"
#include "lib/file/file_system.h"
// jw 2007-12-20: we'd love to replace this with boost::filesystem,
// but basic_directory_iterator does not yet cache file_size and
// last_write_time in file_status. (they each entail a stat() call,
// which is unacceptably slow.)
struct FileSystem_Posix
{
virtual LibError GetFileInfo(const Path& pathname, FileInfo* fileInfo) const;
virtual LibError GetDirectoryEntries(const Path& path, FileInfos* files, DirectoryNames* subdirectoryNames) const;
LibError DeleteDirectory(const Path& dirPath);
};
typedef shared_ptr<FileSystem_Posix> PIFileSystem_Posix;
LIB_API PIFileSystem_Posix CreateFileSystem_Posix();
#endif // #ifndef INCLUDED_FILE_SYSTEM_POSIX
/**
* =========================================================================
* File : file_system_posix.h
* Project : 0 A.D.
* Description : file layer on top of POSIX. avoids the need for
* : absolute paths and provides fast I/O.
* =========================================================================
*/
// license: GPL; see lib/license.txt
#ifndef INCLUDED_FILE_SYSTEM_POSIX
#define INCLUDED_FILE_SYSTEM_POSIX
#include "lib/file/path.h"
#include "lib/file/file_system.h"
// jw 2007-12-20: we'd love to replace this with boost::filesystem,
// but basic_directory_iterator does not yet cache file_size and
// last_write_time in file_status. (they each entail a stat() call,
// which is unacceptably slow.)
struct FileSystem_Posix
{
virtual LibError GetFileInfo(const Path& pathname, FileInfo* fileInfo) const;
virtual LibError GetDirectoryEntries(const Path& path, FileInfos* files, DirectoryNames* subdirectoryNames) const;
LibError DeleteDirectory(const Path& dirPath);
};
typedef shared_ptr<FileSystem_Posix> PIFileSystem_Posix;
LIB_API PIFileSystem_Posix CreateFileSystem_Posix();
#endif // #ifndef INCLUDED_FILE_SYSTEM_POSIX

View File

@ -1,142 +1,142 @@
/**
* =========================================================================
* File : file_system_util.cpp
* Project : 0 A.D.
* Description : helper functions for directory access
* =========================================================================
*/
// license: GPL; see lib/license.txt
#include "precompiled.h"
#include "file_system_util.h"
#include <queue>
#include <cstring>
#include "lib/path_util.h"
#include "lib/regex.h"
LibError fs_GetPathnames(const PIVFS& fs, const VfsPath& path, const char* filter, VfsPaths& pathnames)
{
std::vector<FileInfo> files;
RETURN_ERR(fs->GetDirectoryEntries(path, &files, 0));
pathnames.clear();
pathnames.reserve(files.size());
for(size_t i = 0; i < files.size(); i++)
{
if(match_wildcard(files[i].Name().c_str(), filter))
pathnames.push_back(path/files[i].Name());
}
return INFO::OK;
}
struct FileInfoNameLess : public std::binary_function<const FileInfo, const FileInfo, bool>
{
bool operator()(const FileInfo& fileInfo1, const FileInfo& fileInfo2) const
{
return strcasecmp(fileInfo1.Name().c_str(), fileInfo2.Name().c_str()) < 0;
}
};
void fs_SortFiles(FileInfos& files)
{
std::sort(files.begin(), files.end(), FileInfoNameLess());
}
struct NameLess : public std::binary_function<const std::string, const std::string, bool>
{
bool operator()(const std::string& name1, const std::string& name2) const
{
return strcasecmp(name1.c_str(), name2.c_str()) < 0;
}
};
void fs_SortDirectories(DirectoryNames& directories)
{
std::sort(directories.begin(), directories.end(), NameLess());
}
LibError fs_ForEachFile(const PIVFS& fs, const VfsPath& startPath, FileCallback cb, uintptr_t cbData, const char* pattern, size_t flags)
{
debug_assert(vfs_path_IsDirectory(startPath));
// (declare here to avoid reallocations)
FileInfos files; DirectoryNames subdirectoryNames;
// (a FIFO queue is more efficient than recursion because it uses less
// stack space and avoids seeks due to breadth-first traversal.)
std::queue<VfsPath> pendingDirectories;
pendingDirectories.push(startPath);
while(!pendingDirectories.empty())
{
const VfsPath& path = pendingDirectories.front();
RETURN_ERR(fs->GetDirectoryEntries(path/"/", &files, &subdirectoryNames));
for(size_t i = 0; i < files.size(); i++)
{
const FileInfo fileInfo = files[i];
if(!match_wildcard(fileInfo.Name().c_str(), pattern))
continue;
const VfsPath pathname(path/fileInfo.Name()); // (FileInfo only stores the name)
cb(pathname, fileInfo, cbData);
}
if(!(flags & DIR_RECURSIVE))
break;
for(size_t i = 0; i < subdirectoryNames.size(); i++)
pendingDirectories.push(path/subdirectoryNames[i]);
pendingDirectories.pop();
}
return INFO::OK;
}
void fs_NextNumberedFilename(const PIVFS& fs, const VfsPath& pathnameFormat, size_t& nextNumber, VfsPath& nextPathname)
{
// (first call only:) scan directory and set nextNumber according to
// highest matching filename found. this avoids filling "holes" in
// the number series due to deleted files, which could be confusing.
// example: add 1st and 2nd; [exit] delete 1st; [restart]
// add 3rd -> without this measure it would get number 1, not 3.
if(nextNumber == 0)
{
const std::string nameFormat = pathnameFormat.leaf();
const VfsPath path = pathnameFormat.branch_path()/"/";
size_t maxNumber = 0;
FileInfos files;
fs->GetDirectoryEntries(path, &files, 0);
for(size_t i = 0; i < files.size(); i++)
{
size_t number;
if(sscanf(files[i].Name().c_str(), nameFormat.c_str(), &number) == 1)
maxNumber = std::max(number, maxNumber);
}
nextNumber = maxNumber+1;
}
// now increment number until that file doesn't yet exist.
// this is fairly slow, but typically only happens once due
// to scan loop above. (we still need to provide for looping since
// someone may have added files in the meantime)
// we don't bother with binary search - this isn't a bottleneck.
do
{
char pathnameBuf[PATH_MAX];
snprintf(pathnameBuf, PATH_MAX, pathnameFormat.string().c_str(), nextNumber++);
nextPathname = VfsPath(pathnameBuf);
}
while(fs->GetFileInfo(nextPathname, 0) == INFO::OK);
}
/**
* =========================================================================
* File : file_system_util.cpp
* Project : 0 A.D.
* Description : helper functions for directory access
* =========================================================================
*/
// license: GPL; see lib/license.txt
#include "precompiled.h"
#include "file_system_util.h"
#include <queue>
#include <cstring>
#include "lib/path_util.h"
#include "lib/regex.h"
LibError fs_GetPathnames(const PIVFS& fs, const VfsPath& path, const char* filter, VfsPaths& pathnames)
{
std::vector<FileInfo> files;
RETURN_ERR(fs->GetDirectoryEntries(path, &files, 0));
pathnames.clear();
pathnames.reserve(files.size());
for(size_t i = 0; i < files.size(); i++)
{
if(match_wildcard(files[i].Name().c_str(), filter))
pathnames.push_back(path/files[i].Name());
}
return INFO::OK;
}
struct FileInfoNameLess : public std::binary_function<const FileInfo, const FileInfo, bool>
{
bool operator()(const FileInfo& fileInfo1, const FileInfo& fileInfo2) const
{
return strcasecmp(fileInfo1.Name().c_str(), fileInfo2.Name().c_str()) < 0;
}
};
void fs_SortFiles(FileInfos& files)
{
std::sort(files.begin(), files.end(), FileInfoNameLess());
}
struct NameLess : public std::binary_function<const std::string, const std::string, bool>
{
bool operator()(const std::string& name1, const std::string& name2) const
{
return strcasecmp(name1.c_str(), name2.c_str()) < 0;
}
};
void fs_SortDirectories(DirectoryNames& directories)
{
std::sort(directories.begin(), directories.end(), NameLess());
}
LibError fs_ForEachFile(const PIVFS& fs, const VfsPath& startPath, FileCallback cb, uintptr_t cbData, const char* pattern, size_t flags)
{
debug_assert(vfs_path_IsDirectory(startPath));
// (declare here to avoid reallocations)
FileInfos files; DirectoryNames subdirectoryNames;
// (a FIFO queue is more efficient than recursion because it uses less
// stack space and avoids seeks due to breadth-first traversal.)
std::queue<VfsPath> pendingDirectories;
pendingDirectories.push(startPath);
while(!pendingDirectories.empty())
{
const VfsPath& path = pendingDirectories.front();
RETURN_ERR(fs->GetDirectoryEntries(path/"/", &files, &subdirectoryNames));
for(size_t i = 0; i < files.size(); i++)
{
const FileInfo fileInfo = files[i];
if(!match_wildcard(fileInfo.Name().c_str(), pattern))
continue;
const VfsPath pathname(path/fileInfo.Name()); // (FileInfo only stores the name)
cb(pathname, fileInfo, cbData);
}
if(!(flags & DIR_RECURSIVE))
break;
for(size_t i = 0; i < subdirectoryNames.size(); i++)
pendingDirectories.push(path/subdirectoryNames[i]);
pendingDirectories.pop();
}
return INFO::OK;
}
void fs_NextNumberedFilename(const PIVFS& fs, const VfsPath& pathnameFormat, size_t& nextNumber, VfsPath& nextPathname)
{
// (first call only:) scan directory and set nextNumber according to
// highest matching filename found. this avoids filling "holes" in
// the number series due to deleted files, which could be confusing.
// example: add 1st and 2nd; [exit] delete 1st; [restart]
// add 3rd -> without this measure it would get number 1, not 3.
if(nextNumber == 0)
{
const std::string nameFormat = pathnameFormat.leaf();
const VfsPath path = pathnameFormat.branch_path()/"/";
size_t maxNumber = 0;
FileInfos files;
fs->GetDirectoryEntries(path, &files, 0);
for(size_t i = 0; i < files.size(); i++)
{
size_t number;
if(sscanf(files[i].Name().c_str(), nameFormat.c_str(), &number) == 1)
maxNumber = std::max(number, maxNumber);
}
nextNumber = maxNumber+1;
}
// now increment number until that file doesn't yet exist.
// this is fairly slow, but typically only happens once due
// to scan loop above. (we still need to provide for looping since
// someone may have added files in the meantime)
// we don't bother with binary search - this isn't a bottleneck.
do
{
char pathnameBuf[PATH_MAX];
snprintf(pathnameBuf, PATH_MAX, pathnameFormat.string().c_str(), nextNumber++);
nextPathname = VfsPath(pathnameBuf);
}
while(fs->GetFileInfo(nextPathname, 0) == INFO::OK);
}

View File

@ -1,67 +1,67 @@
/**
* =========================================================================
* File : file_system_util.h
* Project : 0 A.D.
* Description : helper functions for directory access
* =========================================================================
*/
// license: GPL; see lib/license.txt
#ifndef INCLUDED_FILE_SYSTEM_UTIL
#define INCLUDED_FILE_SYSTEM_UTIL
#include "lib/file/vfs/vfs.h"
extern void fs_SortFiles(FileInfos& files);
extern void fs_SortDirectories(DirectoryNames& directories);
extern LibError fs_GetPathnames(const PIVFS& fs, const VfsPath& path, const char* filter, VfsPaths& pathnames);
/**
* called for files in a directory.
*
* @param pathname full pathname (since FileInfo only gives the name).
* @param fileInfo file information
* @param cbData user-specified context
* @return INFO::CB_CONTINUE on success; any other value will immediately
* be returned to the caller (no more calls will be forthcoming).
*
* CAVEAT: pathname and fileInfo are only valid until the function
* returns!
**/
typedef LibError (*FileCallback)(const VfsPath& pathname, const FileInfo& fileInfo, const uintptr_t cbData);
enum DirFlags
{
DIR_RECURSIVE = 1
};
/**
* call back for each file in a directory tree
*
* @param cb see DirCallback
* @param pattern that file names must match. '*' and '&' wildcards
* are allowed. 0 matches everything.
* @param flags see DirFlags
* @param LibError
**/
extern LibError fs_ForEachFile(const PIVFS& fs, const VfsPath& path, FileCallback cb, uintptr_t cbData, const char* pattern = 0, size_t flags = 0);
/**
* determine the next available pathname with a given format.
* this is useful when creating new files without overwriting the previous
* ones (screenshots are a good example).
*
* @param pathnameFormat format string for the pathname; must contain one
* format specifier for a size_t.
* example: "screenshots/screenshot%04d.png"
* @param nextNumber in: the first number to try; out: the next number.
* if 0, numbers corresponding to existing files are skipped.
* @param nextPathname receives the output.
**/
extern void fs_NextNumberedFilename(const PIVFS& fs, const VfsPath& pathnameFormat, size_t& nextNumber, VfsPath& nextPathname);
#endif // #ifndef INCLUDED_FILE_SYSTEM_UTIL
/**
* =========================================================================
* File : file_system_util.h
* Project : 0 A.D.
* Description : helper functions for directory access
* =========================================================================
*/
// license: GPL; see lib/license.txt
#ifndef INCLUDED_FILE_SYSTEM_UTIL
#define INCLUDED_FILE_SYSTEM_UTIL
#include "lib/file/vfs/vfs.h"
extern void fs_SortFiles(FileInfos& files);
extern void fs_SortDirectories(DirectoryNames& directories);
extern LibError fs_GetPathnames(const PIVFS& fs, const VfsPath& path, const char* filter, VfsPaths& pathnames);
/**
* called for files in a directory.
*
* @param pathname full pathname (since FileInfo only gives the name).
* @param fileInfo file information
* @param cbData user-specified context
* @return INFO::CB_CONTINUE on success; any other value will immediately
* be returned to the caller (no more calls will be forthcoming).
*
* CAVEAT: pathname and fileInfo are only valid until the function
* returns!
**/
typedef LibError (*FileCallback)(const VfsPath& pathname, const FileInfo& fileInfo, const uintptr_t cbData);
enum DirFlags
{
DIR_RECURSIVE = 1
};
/**
* call back for each file in a directory tree
*
* @param cb see DirCallback
* @param pattern that file names must match. '*' and '&' wildcards
* are allowed. 0 matches everything.
* @param flags see DirFlags
* @param LibError
**/
extern LibError fs_ForEachFile(const PIVFS& fs, const VfsPath& path, FileCallback cb, uintptr_t cbData, const char* pattern = 0, size_t flags = 0);
/**
* determine the next available pathname with a given format.
* this is useful when creating new files without overwriting the previous
* ones (screenshots are a good example).
*
* @param pathnameFormat format string for the pathname; must contain one
* format specifier for a size_t.
* example: "screenshots/screenshot%04d.png"
* @param nextNumber in: the first number to try; out: the next number.
* if 0, numbers corresponding to existing files are skipped.
* @param nextPathname receives the output.
**/
extern void fs_NextNumberedFilename(const PIVFS& fs, const VfsPath& pathnameFormat, size_t& nextNumber, VfsPath& nextPathname);
#endif // #ifndef INCLUDED_FILE_SYSTEM_UTIL

View File

@ -1,145 +1,145 @@
/**
* =========================================================================
* File : block_cache.cpp
* Project : 0 A.D.
* Description : cache for aligned I/O m_blocks.
* =========================================================================
*/
// license: GPL; see lib/license.txt
#include "precompiled.h"
#include "block_cache.h"
#include "lib/config2.h" // CONFIG2_CACHE_READ_ONLY
#include "lib/file/common/file_stats.h"
#include "lib/lockfree.h"
#include "lib/allocators/pool.h"
#include "lib/fnv_hash.h"
#include "io_align.h"
//-----------------------------------------------------------------------------
BlockId::BlockId()
: m_id(0)
{
}
BlockId::BlockId(const Path& pathname, off_t ofs)
{
m_id = fnv_hash64(pathname.string().c_str(), pathname.string().length());
const size_t indexBits = 16;
m_id <<= indexBits;
const off_t blockIndex = off_t(ofs / BLOCK_SIZE);
debug_assert(blockIndex < off_t(1) << indexBits);
m_id |= blockIndex;
}
bool BlockId::operator==(const BlockId& rhs) const
{
return m_id == rhs.m_id;
}
bool BlockId::operator!=(const BlockId& rhs) const
{
return !operator==(rhs);
}
//-----------------------------------------------------------------------------
struct Block
{
Block(BlockId id, const shared_ptr<u8>& buf)
{
this->id = id;
this->buf = buf;
}
// block is "valid" and can satisfy Retrieve() requests if a
// (non-default-constructed) ID has been assigned.
BlockId id;
// this block is "in use" if use_count != 1.
shared_ptr<u8> buf;
};
//-----------------------------------------------------------------------------
class BlockCache::Impl
{
public:
Impl(size_t numBlocks)
: m_maxBlocks(numBlocks)
{
}
void Add(BlockId id, const shared_ptr<u8>& buf)
{
if(m_blocks.size() > m_maxBlocks)
{
#if CONFIG2_CACHE_READ_ONLY
mprotect((void*)m_blocks.front().buf.get(), BLOCK_SIZE, PROT_READ);
#endif
m_blocks.pop_front(); // evict oldest block
}
#if CONFIG2_CACHE_READ_ONLY
mprotect((void*)buf.get(), BLOCK_SIZE, PROT_WRITE|PROT_READ);
#endif
m_blocks.push_back(Block(id, buf));
}
bool Retrieve(BlockId id, shared_ptr<u8>& buf)
{
// (linear search is ok since we only expect to manage a few blocks)
for(size_t i = 0; i < m_blocks.size(); i++)
{
Block& block = m_blocks[i];
if(block.id == id)
{
buf = block.buf;
return true;
}
}
return false;
}
void InvalidateAll()
{
// note: don't check whether any references are held etc. because
// this should only be called at the end of the (test) program.
m_blocks.clear();
}
private:
size_t m_maxBlocks;
typedef std::deque<Block> Blocks;
Blocks m_blocks;
};
//-----------------------------------------------------------------------------
BlockCache::BlockCache(size_t numBlocks)
: impl(new Impl(numBlocks))
{
}
void BlockCache::Add(BlockId id, const shared_ptr<u8>& buf)
{
impl->Add(id, buf);
}
bool BlockCache::Retrieve(BlockId id, shared_ptr<u8>& buf)
{
return impl->Retrieve(id, buf);
}
void BlockCache::InvalidateAll()
{
return impl->InvalidateAll();
}
/**
* =========================================================================
* File : block_cache.cpp
* Project : 0 A.D.
* Description : cache for aligned I/O m_blocks.
* =========================================================================
*/
// license: GPL; see lib/license.txt
#include "precompiled.h"
#include "block_cache.h"
#include "lib/config2.h" // CONFIG2_CACHE_READ_ONLY
#include "lib/file/common/file_stats.h"
#include "lib/lockfree.h"
#include "lib/allocators/pool.h"
#include "lib/fnv_hash.h"
#include "io_align.h"
//-----------------------------------------------------------------------------
BlockId::BlockId()
: m_id(0)
{
}
BlockId::BlockId(const Path& pathname, off_t ofs)
{
m_id = fnv_hash64(pathname.string().c_str(), pathname.string().length());
const size_t indexBits = 16;
m_id <<= indexBits;
const off_t blockIndex = off_t(ofs / BLOCK_SIZE);
debug_assert(blockIndex < off_t(1) << indexBits);
m_id |= blockIndex;
}
bool BlockId::operator==(const BlockId& rhs) const
{
return m_id == rhs.m_id;
}
bool BlockId::operator!=(const BlockId& rhs) const
{
return !operator==(rhs);
}
//-----------------------------------------------------------------------------
struct Block
{
Block(BlockId id, const shared_ptr<u8>& buf)
{
this->id = id;
this->buf = buf;
}
// block is "valid" and can satisfy Retrieve() requests if a
// (non-default-constructed) ID has been assigned.
BlockId id;
// this block is "in use" if use_count != 1.
shared_ptr<u8> buf;
};
//-----------------------------------------------------------------------------
class BlockCache::Impl
{
public:
Impl(size_t numBlocks)
: m_maxBlocks(numBlocks)
{
}
void Add(BlockId id, const shared_ptr<u8>& buf)
{
if(m_blocks.size() > m_maxBlocks)
{
#if CONFIG2_CACHE_READ_ONLY
mprotect((void*)m_blocks.front().buf.get(), BLOCK_SIZE, PROT_READ);
#endif
m_blocks.pop_front(); // evict oldest block
}
#if CONFIG2_CACHE_READ_ONLY
mprotect((void*)buf.get(), BLOCK_SIZE, PROT_WRITE|PROT_READ);
#endif
m_blocks.push_back(Block(id, buf));
}
bool Retrieve(BlockId id, shared_ptr<u8>& buf)
{
// (linear search is ok since we only expect to manage a few blocks)
for(size_t i = 0; i < m_blocks.size(); i++)
{
Block& block = m_blocks[i];
if(block.id == id)
{
buf = block.buf;
return true;
}
}
return false;
}
void InvalidateAll()
{
// note: don't check whether any references are held etc. because
// this should only be called at the end of the (test) program.
m_blocks.clear();
}
private:
size_t m_maxBlocks;
typedef std::deque<Block> Blocks;
Blocks m_blocks;
};
//-----------------------------------------------------------------------------
BlockCache::BlockCache(size_t numBlocks)
: impl(new Impl(numBlocks))
{
}
void BlockCache::Add(BlockId id, const shared_ptr<u8>& buf)
{
impl->Add(id, buf);
}
bool BlockCache::Retrieve(BlockId id, shared_ptr<u8>& buf)
{
return impl->Retrieve(id, buf);
}
void BlockCache::InvalidateAll()
{
return impl->InvalidateAll();
}

View File

@ -1,89 +1,89 @@
/**
* =========================================================================
* File : block_cache.h
* Project : 0 A.D.
* Description : cache for aligned I/O blocks.
* =========================================================================
*/
// license: GPL; see lib/license.txt
#ifndef INCLUDED_BLOCK_CACHE
#define INCLUDED_BLOCK_CACHE
#include "lib/file/path.h"
/**
* ID that uniquely identifies a block within a file
**/
class BlockId
{
public:
BlockId();
BlockId(const Path& pathname, off_t ofs);
bool operator==(const BlockId& rhs) const;
bool operator!=(const BlockId& rhs) const;
private:
u64 m_id;
};
/**
* cache of (aligned) file blocks with support for zero-copy IO.
* absorbs the overhead of rounding up archive IOs to the nearest block
* boundaries by keeping the last few blocks in memory.
*
* the interface is somewhat similar to FileCache; see the note there.
*
* not thread-safe (each thread is intended to have its own cache).
**/
class BlockCache
{
public:
/**
* @param numBlocks (the default value is enough to support temp buffers
* and absorb the cost of unaligned reads from archives.)
**/
BlockCache(size_t numBlocks = 16);
/**
* Add a block to the cache.
*
* @param id key that will be used to Retrieve the block.
*
* call this when the block's IO has completed; its data will
* satisfy subsequent Retrieve calls for the same id.
* if CONFIG2_CACHE_READ_ONLY, the memory is made read-only.
**/
void Add(BlockId id, const shared_ptr<u8>& buf);
/**
* Attempt to retrieve a block's contents.
*
* @return whether the block is in cache.
*
* if successful, a shared pointer to the contents is returned.
* they remain valid until all references are removed and the block
* is evicted.
**/
bool Retrieve(BlockId id, shared_ptr<u8>& buf);
/**
* Invalidate the contents of the cache.
*
* this effectively discards the contents of existing blocks
* (more specifically: prevents them from satisfying Retrieve calls
* until a subsequent Add with the same id).
*
* useful for self-tests: multiple independent IO tests run in the same
* process and must not influence each other via the cache.
**/
void InvalidateAll();
private:
class Impl;
shared_ptr<Impl> impl;
};
#endif // #ifndef INCLUDED_BLOCK_CACHE
/**
* =========================================================================
* File : block_cache.h
* Project : 0 A.D.
* Description : cache for aligned I/O blocks.
* =========================================================================
*/
// license: GPL; see lib/license.txt
#ifndef INCLUDED_BLOCK_CACHE
#define INCLUDED_BLOCK_CACHE
#include "lib/file/path.h"
/**
* ID that uniquely identifies a block within a file
**/
class BlockId
{
public:
BlockId();
BlockId(const Path& pathname, off_t ofs);
bool operator==(const BlockId& rhs) const;
bool operator!=(const BlockId& rhs) const;
private:
u64 m_id;
};
/**
* cache of (aligned) file blocks with support for zero-copy IO.
* absorbs the overhead of rounding up archive IOs to the nearest block
* boundaries by keeping the last few blocks in memory.
*
* the interface is somewhat similar to FileCache; see the note there.
*
* not thread-safe (each thread is intended to have its own cache).
**/
class BlockCache
{
public:
/**
* @param numBlocks (the default value is enough to support temp buffers
* and absorb the cost of unaligned reads from archives.)
**/
BlockCache(size_t numBlocks = 16);
/**
* Add a block to the cache.
*
* @param id key that will be used to Retrieve the block.
*
* call this when the block's IO has completed; its data will
* satisfy subsequent Retrieve calls for the same id.
* if CONFIG2_CACHE_READ_ONLY, the memory is made read-only.
**/
void Add(BlockId id, const shared_ptr<u8>& buf);
/**
* Attempt to retrieve a block's contents.
*
* @return whether the block is in cache.
*
* if successful, a shared pointer to the contents is returned.
* they remain valid until all references are removed and the block
* is evicted.
**/
bool Retrieve(BlockId id, shared_ptr<u8>& buf);
/**
* Invalidate the contents of the cache.
*
* this effectively discards the contents of existing blocks
* (more specifically: prevents them from satisfying Retrieve calls
* until a subsequent Add with the same id).
*
* useful for self-tests: multiple independent IO tests run in the same
* process and must not influence each other via the cache.
**/
void InvalidateAll();
private:
class Impl;
shared_ptr<Impl> impl;
};
#endif // #ifndef INCLUDED_BLOCK_CACHE

View File

@ -1,327 +1,327 @@
/**
* =========================================================================
* File : io.cpp
* Project : 0 A.D.
* Description :
* =========================================================================
*/
// license: GPL; see lib/license.txt
#include "precompiled.h"
#include "io.h"
#include "lib/allocators/allocators.h" // AllocatorChecker
#include "lib/sysdep/cpu.h" // cpu_memcpy
#include "lib/file/file.h"
#include "lib/file/common/file_stats.h"
#include "block_cache.h"
#include "io_align.h"
static const size_t ioDepth = 8;
// the underlying aio implementation likes buffer and offset to be
// sector-aligned; if not, the transfer goes through an align buffer,
// and requires an extra cpu_memcpy.
//
// if the user specifies an unaligned buffer, there's not much we can
// do - we can't assume the buffer contains padding. therefore,
// callers should let us allocate the buffer if possible.
//
// if ofs misalign = buffer, only the first and last blocks will need
// to be copied by aio, since we read up to the next block boundary.
// otherwise, everything will have to be copied; at least we split
// the read into blocks, so aio's buffer won't have to cover the
// whole file.
// we don't do any caching or alignment here - this is just a thin
// AIO wrapper. rationale:
// - aligning the transfer isn't possible here since we have no control
// over the buffer, i.e. we cannot read more data than requested.
// instead, this is done in io_manager.
// - transfer sizes here are arbitrary (i.e. not block-aligned);
// that means the cache would have to handle this or also split them up
// into blocks, which would duplicate the abovementioned work.
// - if caching here, we'd also have to handle "forwarding" (i.e.
// desired block has been issued but isn't yet complete). again, it
// is easier to let the synchronous io_manager handle this.
// - finally, io_manager knows more about whether the block should be cached
// (e.g. whether another block request will follow), but we don't
// currently make use of this.
//
// disadvantages:
// - streamed data will always be read from disk. that's not a problem,
// because such data (e.g. music, long speech) is unlikely to be used
// again soon.
// - prefetching (issuing the next few blocks from archive/file during
// idle time to satisfy potential future IOs) requires extra buffers;
// this is a bit more complicated than just using the cache as storage.
//-----------------------------------------------------------------------------
// allocator
//-----------------------------------------------------------------------------
#ifndef NDEBUG
static AllocatorChecker allocatorChecker;
#endif
class IoDeleter
{
public:
IoDeleter(size_t paddedSize)
: m_paddedSize(paddedSize)
{
}
void operator()(u8* mem)
{
debug_assert(m_paddedSize != 0);
#ifndef NDEBUG
allocatorChecker.OnDeallocate(mem, m_paddedSize);
#endif
page_aligned_free(mem, m_paddedSize);
m_paddedSize = 0;
}
private:
size_t m_paddedSize;
};
shared_ptr<u8> io_Allocate(size_t size, off_t ofs)
{
debug_assert(size != 0);
const size_t paddedSize = PaddedSize((off_t)size, ofs);
u8* mem = (u8*)page_aligned_alloc(paddedSize);
if(!mem)
throw std::bad_alloc();
#ifndef NDEBUG
allocatorChecker.OnAllocate(mem, paddedSize);
#endif
return shared_ptr<u8>(mem, IoDeleter(paddedSize));
}
//-----------------------------------------------------------------------------
// BlockIo
//-----------------------------------------------------------------------------
class BlockIo
{
public:
LibError Issue(const PIFile& file, off_t alignedOfs, u8* alignedBuf)
{
m_file = file;
m_blockId = BlockId(file->Pathname(), alignedOfs);
if(file->Mode() == 'r' && s_blockCache.Retrieve(m_blockId, m_cachedBlock))
{
stats_block_cache(CR_HIT);
// copy from cache into user buffer
if(alignedBuf)
{
cpu_memcpy(alignedBuf, m_cachedBlock.get(), BLOCK_SIZE);
m_alignedBuf = alignedBuf;
}
// return cached block
else
{
m_alignedBuf = const_cast<u8*>(m_cachedBlock.get());
}
return INFO::OK;
}
else
{
stats_block_cache(CR_MISS);
stats_io_check_seek(m_blockId);
// transfer directly to/from user buffer
if(alignedBuf)
{
m_alignedBuf = alignedBuf;
}
// transfer into newly allocated temporary block
else
{
m_tempBlock = io_Allocate(BLOCK_SIZE);
m_alignedBuf = const_cast<u8*>(m_tempBlock.get());
}
return file->Issue(m_req, alignedOfs, m_alignedBuf, BLOCK_SIZE);
}
}
LibError WaitUntilComplete(const u8*& block, size_t& blockSize)
{
if(m_cachedBlock)
{
block = m_alignedBuf;
blockSize = BLOCK_SIZE;
return INFO::OK;
}
RETURN_ERR(m_file->WaitUntilComplete(m_req, const_cast<u8*&>(block), blockSize));
if(m_tempBlock)
s_blockCache.Add(m_blockId, m_tempBlock);
return INFO::OK;
}
private:
static BlockCache s_blockCache;
PIFile m_file;
BlockId m_blockId;
// the address that WaitUntilComplete will return
// (cached or temporary block, or user buffer)
u8* m_alignedBuf;
shared_ptr<u8> m_cachedBlock;
shared_ptr<u8> m_tempBlock;
aiocb m_req;
};
BlockCache BlockIo::s_blockCache;
//-----------------------------------------------------------------------------
// IoSplitter
//-----------------------------------------------------------------------------
class IoSplitter
{
NONCOPYABLE(IoSplitter);
public:
IoSplitter(off_t ofs, u8* alignedBuf, off_t size)
: m_ofs(ofs), m_alignedBuf(alignedBuf), m_size(size)
, m_totalIssued(0), m_totalTransferred(0)
{
m_alignedOfs = AlignedOffset(ofs);
m_alignedSize = PaddedSize(size, ofs);
m_misalignment = ofs - m_alignedOfs;
}
LibError Run(const PIFile& file, IoCallback cb = 0, uintptr_t cbData = 0)
{
ScopedIoMonitor monitor;
// (issue even if cache hit because blocks must be processed in order)
std::deque<BlockIo> pendingIos;
for(;;)
{
while(pendingIos.size() < ioDepth && m_totalIssued < m_alignedSize)
{
pendingIos.push_back(BlockIo());
const off_t alignedOfs = m_alignedOfs + m_totalIssued;
u8* const alignedBuf = m_alignedBuf? m_alignedBuf+m_totalIssued : 0;
RETURN_ERR(pendingIos.back().Issue(file, alignedOfs, alignedBuf));
m_totalIssued += BLOCK_SIZE;
}
if(pendingIos.empty())
break;
Process(pendingIos.front(), cb, cbData);
pendingIos.pop_front();
}
debug_assert(m_totalIssued >= m_totalTransferred && m_totalTransferred >= m_size);
monitor.NotifyOfSuccess(FI_AIO, file->Mode(), m_totalTransferred);
return INFO::OK;
}
off_t AlignedOfs() const
{
return m_alignedOfs;
}
private:
LibError Process(BlockIo& blockIo, IoCallback cb, uintptr_t cbData) const
{
const u8* block; size_t blockSize;
RETURN_ERR(blockIo.WaitUntilComplete(block, blockSize));
// first block: skip past alignment
if(m_totalTransferred == 0)
{
block += m_misalignment;
blockSize -= m_misalignment;
}
// last block: don't include trailing padding
if(m_totalTransferred + (off_t)blockSize > m_size)
blockSize = m_size - m_totalTransferred;
m_totalTransferred += (off_t)blockSize;
if(cb)
{
stats_cb_start();
LibError ret = cb(cbData, block, blockSize);
stats_cb_finish();
CHECK_ERR(ret);
}
return INFO::OK;
}
off_t m_ofs;
u8* m_alignedBuf;
off_t m_size;
size_t m_misalignment;
off_t m_alignedOfs;
off_t m_alignedSize;
// (useful, raw data: possibly compressed, but doesn't count padding)
mutable off_t m_totalIssued;
mutable off_t m_totalTransferred;
};
LibError io_Scan(const PIFile& file, off_t ofs, off_t size, IoCallback cb, uintptr_t cbData)
{
u8* alignedBuf = 0; // use temporary block buffers
IoSplitter splitter(ofs, alignedBuf, size);
return splitter.Run(file, cb, cbData);
}
LibError io_Read(const PIFile& file, off_t ofs, u8* alignedBuf, size_t size, u8*& data)
{
IoSplitter splitter(ofs, alignedBuf, (off_t)size);
RETURN_ERR(splitter.Run(file));
data = alignedBuf + ofs - splitter.AlignedOfs();
return INFO::OK;
}
LibError io_WriteAligned(const PIFile& file, off_t alignedOfs, const u8* alignedData, size_t size)
{
debug_assert(IsAligned_Offset(alignedOfs));
debug_assert(IsAligned_Data(alignedData));
IoSplitter splitter(alignedOfs, const_cast<u8*>(alignedData), (off_t)size);
return splitter.Run(file);
}
LibError io_ReadAligned(const PIFile& file, off_t alignedOfs, u8* alignedBuf, size_t size)
{
debug_assert(IsAligned_Offset(alignedOfs));
debug_assert(IsAligned_Data(alignedBuf));
IoSplitter splitter(alignedOfs, alignedBuf, (off_t)size);
return splitter.Run(file);
}
/**
* =========================================================================
* File : io.cpp
* Project : 0 A.D.
* Description :
* =========================================================================
*/
// license: GPL; see lib/license.txt
#include "precompiled.h"
#include "io.h"
#include "lib/allocators/allocators.h" // AllocatorChecker
#include "lib/sysdep/cpu.h" // cpu_memcpy
#include "lib/file/file.h"
#include "lib/file/common/file_stats.h"
#include "block_cache.h"
#include "io_align.h"
static const size_t ioDepth = 8;
// the underlying aio implementation likes buffer and offset to be
// sector-aligned; if not, the transfer goes through an align buffer,
// and requires an extra cpu_memcpy.
//
// if the user specifies an unaligned buffer, there's not much we can
// do - we can't assume the buffer contains padding. therefore,
// callers should let us allocate the buffer if possible.
//
// if ofs misalign = buffer, only the first and last blocks will need
// to be copied by aio, since we read up to the next block boundary.
// otherwise, everything will have to be copied; at least we split
// the read into blocks, so aio's buffer won't have to cover the
// whole file.
// we don't do any caching or alignment here - this is just a thin
// AIO wrapper. rationale:
// - aligning the transfer isn't possible here since we have no control
// over the buffer, i.e. we cannot read more data than requested.
// instead, this is done in io_manager.
// - transfer sizes here are arbitrary (i.e. not block-aligned);
// that means the cache would have to handle this or also split them up
// into blocks, which would duplicate the abovementioned work.
// - if caching here, we'd also have to handle "forwarding" (i.e.
// desired block has been issued but isn't yet complete). again, it
// is easier to let the synchronous io_manager handle this.
// - finally, io_manager knows more about whether the block should be cached
// (e.g. whether another block request will follow), but we don't
// currently make use of this.
//
// disadvantages:
// - streamed data will always be read from disk. that's not a problem,
// because such data (e.g. music, long speech) is unlikely to be used
// again soon.
// - prefetching (issuing the next few blocks from archive/file during
// idle time to satisfy potential future IOs) requires extra buffers;
// this is a bit more complicated than just using the cache as storage.
//-----------------------------------------------------------------------------
// allocator
//-----------------------------------------------------------------------------
#ifndef NDEBUG
static AllocatorChecker allocatorChecker;
#endif
class IoDeleter
{
public:
IoDeleter(size_t paddedSize)
: m_paddedSize(paddedSize)
{
}
void operator()(u8* mem)
{
debug_assert(m_paddedSize != 0);
#ifndef NDEBUG
allocatorChecker.OnDeallocate(mem, m_paddedSize);
#endif
page_aligned_free(mem, m_paddedSize);
m_paddedSize = 0;
}
private:
size_t m_paddedSize;
};
shared_ptr<u8> io_Allocate(size_t size, off_t ofs)
{
debug_assert(size != 0);
const size_t paddedSize = PaddedSize((off_t)size, ofs);
u8* mem = (u8*)page_aligned_alloc(paddedSize);
if(!mem)
throw std::bad_alloc();
#ifndef NDEBUG
allocatorChecker.OnAllocate(mem, paddedSize);
#endif
return shared_ptr<u8>(mem, IoDeleter(paddedSize));
}
//-----------------------------------------------------------------------------
// BlockIo
//-----------------------------------------------------------------------------
class BlockIo
{
public:
LibError Issue(const PIFile& file, off_t alignedOfs, u8* alignedBuf)
{
m_file = file;
m_blockId = BlockId(file->Pathname(), alignedOfs);
if(file->Mode() == 'r' && s_blockCache.Retrieve(m_blockId, m_cachedBlock))
{
stats_block_cache(CR_HIT);
// copy from cache into user buffer
if(alignedBuf)
{
cpu_memcpy(alignedBuf, m_cachedBlock.get(), BLOCK_SIZE);
m_alignedBuf = alignedBuf;
}
// return cached block
else
{
m_alignedBuf = const_cast<u8*>(m_cachedBlock.get());
}
return INFO::OK;
}
else
{
stats_block_cache(CR_MISS);
stats_io_check_seek(m_blockId);
// transfer directly to/from user buffer
if(alignedBuf)
{
m_alignedBuf = alignedBuf;
}
// transfer into newly allocated temporary block
else
{
m_tempBlock = io_Allocate(BLOCK_SIZE);
m_alignedBuf = const_cast<u8*>(m_tempBlock.get());
}
return file->Issue(m_req, alignedOfs, m_alignedBuf, BLOCK_SIZE);
}
}
LibError WaitUntilComplete(const u8*& block, size_t& blockSize)
{
if(m_cachedBlock)
{
block = m_alignedBuf;
blockSize = BLOCK_SIZE;
return INFO::OK;
}
RETURN_ERR(m_file->WaitUntilComplete(m_req, const_cast<u8*&>(block), blockSize));
if(m_tempBlock)
s_blockCache.Add(m_blockId, m_tempBlock);
return INFO::OK;
}
private:
static BlockCache s_blockCache;
PIFile m_file;
BlockId m_blockId;
// the address that WaitUntilComplete will return
// (cached or temporary block, or user buffer)
u8* m_alignedBuf;
shared_ptr<u8> m_cachedBlock;
shared_ptr<u8> m_tempBlock;
aiocb m_req;
};
BlockCache BlockIo::s_blockCache;
//-----------------------------------------------------------------------------
// IoSplitter
//-----------------------------------------------------------------------------
class IoSplitter
{
NONCOPYABLE(IoSplitter);
public:
IoSplitter(off_t ofs, u8* alignedBuf, off_t size)
: m_ofs(ofs), m_alignedBuf(alignedBuf), m_size(size)
, m_totalIssued(0), m_totalTransferred(0)
{
m_alignedOfs = AlignedOffset(ofs);
m_alignedSize = PaddedSize(size, ofs);
m_misalignment = ofs - m_alignedOfs;
}
LibError Run(const PIFile& file, IoCallback cb = 0, uintptr_t cbData = 0)
{
ScopedIoMonitor monitor;
// (issue even if cache hit because blocks must be processed in order)
std::deque<BlockIo> pendingIos;
for(;;)
{
while(pendingIos.size() < ioDepth && m_totalIssued < m_alignedSize)
{
pendingIos.push_back(BlockIo());
const off_t alignedOfs = m_alignedOfs + m_totalIssued;
u8* const alignedBuf = m_alignedBuf? m_alignedBuf+m_totalIssued : 0;
RETURN_ERR(pendingIos.back().Issue(file, alignedOfs, alignedBuf));
m_totalIssued += BLOCK_SIZE;
}
if(pendingIos.empty())
break;
Process(pendingIos.front(), cb, cbData);
pendingIos.pop_front();
}
debug_assert(m_totalIssued >= m_totalTransferred && m_totalTransferred >= m_size);
monitor.NotifyOfSuccess(FI_AIO, file->Mode(), m_totalTransferred);
return INFO::OK;
}
off_t AlignedOfs() const
{
return m_alignedOfs;
}
private:
LibError Process(BlockIo& blockIo, IoCallback cb, uintptr_t cbData) const
{
const u8* block; size_t blockSize;
RETURN_ERR(blockIo.WaitUntilComplete(block, blockSize));
// first block: skip past alignment
if(m_totalTransferred == 0)
{
block += m_misalignment;
blockSize -= m_misalignment;
}
// last block: don't include trailing padding
if(m_totalTransferred + (off_t)blockSize > m_size)
blockSize = m_size - m_totalTransferred;
m_totalTransferred += (off_t)blockSize;
if(cb)
{
stats_cb_start();
LibError ret = cb(cbData, block, blockSize);
stats_cb_finish();
CHECK_ERR(ret);
}
return INFO::OK;
}
off_t m_ofs;
u8* m_alignedBuf;
off_t m_size;
size_t m_misalignment;
off_t m_alignedOfs;
off_t m_alignedSize;
// (useful, raw data: possibly compressed, but doesn't count padding)
mutable off_t m_totalIssued;
mutable off_t m_totalTransferred;
};
LibError io_Scan(const PIFile& file, off_t ofs, off_t size, IoCallback cb, uintptr_t cbData)
{
u8* alignedBuf = 0; // use temporary block buffers
IoSplitter splitter(ofs, alignedBuf, size);
return splitter.Run(file, cb, cbData);
}
LibError io_Read(const PIFile& file, off_t ofs, u8* alignedBuf, size_t size, u8*& data)
{
IoSplitter splitter(ofs, alignedBuf, (off_t)size);
RETURN_ERR(splitter.Run(file));
data = alignedBuf + ofs - splitter.AlignedOfs();
return INFO::OK;
}
LibError io_WriteAligned(const PIFile& file, off_t alignedOfs, const u8* alignedData, size_t size)
{
debug_assert(IsAligned_Offset(alignedOfs));
debug_assert(IsAligned_Data(alignedData));
IoSplitter splitter(alignedOfs, const_cast<u8*>(alignedData), (off_t)size);
return splitter.Run(file);
}
LibError io_ReadAligned(const PIFile& file, off_t alignedOfs, u8* alignedBuf, size_t size)
{
debug_assert(IsAligned_Offset(alignedOfs));
debug_assert(IsAligned_Data(alignedBuf));
IoSplitter splitter(alignedOfs, alignedBuf, (off_t)size);
return splitter.Run(file);
}

View File

@ -1,38 +1,38 @@
/**
* =========================================================================
* File : io.h
* Project : 0 A.D.
* Description :
* =========================================================================
*/
// license: GPL; see lib/license.txt
#ifndef INCLUDED_IO
#define INCLUDED_IO
#include "lib/file/file.h"
// memory will be allocated from the heap, not the (limited) file cache.
// this makes sense for write buffers that are never used again,
// because we avoid having to displace some other cached items.
LIB_API shared_ptr<u8> io_Allocate(size_t size, off_t ofs = 0);
/**
* called after a block IO has completed.
*
* @return INFO::CB_CONTINUE to continue; any other value will cause the
* IO splitter to abort immediately and return that.
*
* this is useful for interleaving e.g. decompression with IOs.
**/
typedef LibError (*IoCallback)(uintptr_t cbData, const u8* block, size_t blockSize);
LIB_API LibError io_Scan(const PIFile& file, off_t ofs, off_t size, IoCallback cb, uintptr_t cbData);
LIB_API LibError io_Read(const PIFile& file, off_t ofs, u8* alignedBuf, size_t size, u8*& data);
LIB_API LibError io_WriteAligned(const PIFile& file, off_t alignedOfs, const u8* alignedData, size_t size);
LIB_API LibError io_ReadAligned(const PIFile& file, off_t alignedOfs, u8* alignedBuf, size_t size);
#endif // #ifndef INCLUDED_IO
/**
* =========================================================================
* File : io.h
* Project : 0 A.D.
* Description :
* =========================================================================
*/
// license: GPL; see lib/license.txt
#ifndef INCLUDED_IO
#define INCLUDED_IO
#include "lib/file/file.h"
// memory will be allocated from the heap, not the (limited) file cache.
// this makes sense for write buffers that are never used again,
// because we avoid having to displace some other cached items.
LIB_API shared_ptr<u8> io_Allocate(size_t size, off_t ofs = 0);
/**
* called after a block IO has completed.
*
* @return INFO::CB_CONTINUE to continue; any other value will cause the
* IO splitter to abort immediately and return that.
*
* this is useful for interleaving e.g. decompression with IOs.
**/
typedef LibError (*IoCallback)(uintptr_t cbData, const u8* block, size_t blockSize);
LIB_API LibError io_Scan(const PIFile& file, off_t ofs, off_t size, IoCallback cb, uintptr_t cbData);
LIB_API LibError io_Read(const PIFile& file, off_t ofs, u8* alignedBuf, size_t size, u8*& data);
LIB_API LibError io_WriteAligned(const PIFile& file, off_t alignedOfs, const u8* alignedData, size_t size);
LIB_API LibError io_ReadAligned(const PIFile& file, off_t alignedOfs, u8* alignedBuf, size_t size);
#endif // #ifndef INCLUDED_IO

View File

@ -1,2 +1,2 @@
#include "precompiled.h"
#include "io_align.h"
#include "precompiled.h"
#include "io_align.h"

View File

@ -1,49 +1,49 @@
#ifndef INCLUDED_IO_ALIGN
#define INCLUDED_IO_ALIGN
#include "lib/bits.h" // IsAligned, round_up
/**
* block := power-of-two sized chunk of a file.
* all transfers are expanded to naturally aligned, whole blocks.
* (this makes caching parts of files feasible; it is also much faster
* for some aio implementations, e.g. wposix.)
* (blocks are also thereby page-aligned, which allows write-protecting
* file buffers without worrying about their boundaries.)
**/
static const size_t BLOCK_SIZE = 256*KiB;
// note: *sizes* and *offsets* are aligned to blocks to allow zero-copy block cache.
// that the *buffer* need only be sector-aligned (we assume 4kb for simplicity)
// (this is a requirement of the underlying Windows OS)
static const size_t SECTOR_SIZE = 4*KiB;
template<class T>
inline bool IsAligned_Data(T* address)
{
return IsAligned((uintptr_t)address, SECTOR_SIZE);
}
inline bool IsAligned_Offset(off_t ofs)
{
return IsAligned(ofs, BLOCK_SIZE);
}
inline off_t AlignedOffset(off_t ofs)
{
return (off_t)round_down<size_t>(size_t(ofs), BLOCK_SIZE);
}
inline off_t AlignedSize(off_t size)
{
return (off_t)round_up<size_t>(size_t(size), BLOCK_SIZE);
}
inline off_t PaddedSize(off_t size, off_t ofs)
{
return (off_t)round_up<size_t>(size_t(size + ofs - AlignedOffset(ofs)), BLOCK_SIZE);
}
#endif // #ifndef INCLUDED_IO_ALIGN
#ifndef INCLUDED_IO_ALIGN
#define INCLUDED_IO_ALIGN
#include "lib/bits.h" // IsAligned, round_up
/**
* block := power-of-two sized chunk of a file.
* all transfers are expanded to naturally aligned, whole blocks.
* (this makes caching parts of files feasible; it is also much faster
* for some aio implementations, e.g. wposix.)
* (blocks are also thereby page-aligned, which allows write-protecting
* file buffers without worrying about their boundaries.)
**/
static const size_t BLOCK_SIZE = 256*KiB;
// note: *sizes* and *offsets* are aligned to blocks to allow zero-copy block cache.
// that the *buffer* need only be sector-aligned (we assume 4kb for simplicity)
// (this is a requirement of the underlying Windows OS)
static const size_t SECTOR_SIZE = 4*KiB;
template<class T>
inline bool IsAligned_Data(T* address)
{
return IsAligned((uintptr_t)address, SECTOR_SIZE);
}
inline bool IsAligned_Offset(off_t ofs)
{
return IsAligned(ofs, BLOCK_SIZE);
}
inline off_t AlignedOffset(off_t ofs)
{
return (off_t)round_down<size_t>(size_t(ofs), BLOCK_SIZE);
}
inline off_t AlignedSize(off_t size)
{
return (off_t)round_up<size_t>(size_t(size), BLOCK_SIZE);
}
inline off_t PaddedSize(off_t size, off_t ofs)
{
return (off_t)round_up<size_t>(size_t(size + ofs - AlignedOffset(ofs)), BLOCK_SIZE);
}
#endif // #ifndef INCLUDED_IO_ALIGN

View File

@ -1,103 +1,103 @@
#include "precompiled.h"
#include "write_buffer.h"
#include "lib/bits.h" // IsAligned
#include "lib/sysdep/cpu.h"
#include "io.h"
#include "io_align.h"
WriteBuffer::WriteBuffer()
: m_capacity(4096), m_data(io_Allocate(m_capacity)), m_size(0)
{
}
void WriteBuffer::Append(const void* data, size_t size)
{
if(m_size + size > m_capacity)
{
m_capacity = round_up_to_pow2((size_t)(m_size + size));
shared_ptr<u8> newData = io_Allocate(m_capacity);
cpu_memcpy(newData.get(), m_data.get(), m_size);
m_data = newData;
}
cpu_memcpy(m_data.get() + m_size, data, size);
m_size += size;
}
void WriteBuffer::Overwrite(const void* data, size_t size, size_t offset)
{
debug_assert(offset+size < m_size);
cpu_memcpy(m_data.get()+offset, data, size);
}
//-----------------------------------------------------------------------------
// UnalignedWriter
//-----------------------------------------------------------------------------
UnalignedWriter::UnalignedWriter(const PIFile& file, off_t ofs)
: m_file(file), m_alignedBuf(io_Allocate(BLOCK_SIZE))
{
m_alignedOfs = AlignedOffset(ofs);
const size_t misalignment = (size_t)(ofs - m_alignedOfs);
if(misalignment)
io_ReadAligned(m_file, m_alignedOfs, m_alignedBuf.get(), BLOCK_SIZE);
m_bytesUsed = misalignment;
}
UnalignedWriter::~UnalignedWriter()
{
Flush();
}
LibError UnalignedWriter::Append(const u8* data, size_t size) const
{
while(size != 0)
{
// optimization: write directly from the input buffer, if possible
const size_t alignedSize = (size / BLOCK_SIZE) * BLOCK_SIZE;
if(m_bytesUsed == 0 && IsAligned(data, SECTOR_SIZE) && alignedSize != 0)
{
RETURN_ERR(io_WriteAligned(m_file, m_alignedOfs, data, alignedSize));
m_alignedOfs += (off_t)alignedSize;
data += alignedSize;
size -= alignedSize;
}
const size_t chunkSize = std::min(size, BLOCK_SIZE-m_bytesUsed);
cpu_memcpy(m_alignedBuf.get()+m_bytesUsed, data, chunkSize);
m_bytesUsed += chunkSize;
data += chunkSize;
size -= chunkSize;
if(m_bytesUsed == BLOCK_SIZE)
RETURN_ERR(WriteBlock());
}
return INFO::OK;
}
void UnalignedWriter::Flush() const
{
if(m_bytesUsed)
{
memset(m_alignedBuf.get()+m_bytesUsed, 0, BLOCK_SIZE-m_bytesUsed);
(void)WriteBlock();
}
}
LibError UnalignedWriter::WriteBlock() const
{
RETURN_ERR(io_WriteAligned(m_file, m_alignedOfs, m_alignedBuf.get(), BLOCK_SIZE));
m_alignedOfs += BLOCK_SIZE;
m_bytesUsed = 0;
return INFO::OK;
}
#include "precompiled.h"
#include "write_buffer.h"
#include "lib/bits.h" // IsAligned
#include "lib/sysdep/cpu.h"
#include "io.h"
#include "io_align.h"
WriteBuffer::WriteBuffer()
: m_capacity(4096), m_data(io_Allocate(m_capacity)), m_size(0)
{
}
void WriteBuffer::Append(const void* data, size_t size)
{
if(m_size + size > m_capacity)
{
m_capacity = round_up_to_pow2((size_t)(m_size + size));
shared_ptr<u8> newData = io_Allocate(m_capacity);
cpu_memcpy(newData.get(), m_data.get(), m_size);
m_data = newData;
}
cpu_memcpy(m_data.get() + m_size, data, size);
m_size += size;
}
void WriteBuffer::Overwrite(const void* data, size_t size, size_t offset)
{
debug_assert(offset+size < m_size);
cpu_memcpy(m_data.get()+offset, data, size);
}
//-----------------------------------------------------------------------------
// UnalignedWriter
//-----------------------------------------------------------------------------
UnalignedWriter::UnalignedWriter(const PIFile& file, off_t ofs)
: m_file(file), m_alignedBuf(io_Allocate(BLOCK_SIZE))
{
m_alignedOfs = AlignedOffset(ofs);
const size_t misalignment = (size_t)(ofs - m_alignedOfs);
if(misalignment)
io_ReadAligned(m_file, m_alignedOfs, m_alignedBuf.get(), BLOCK_SIZE);
m_bytesUsed = misalignment;
}
UnalignedWriter::~UnalignedWriter()
{
Flush();
}
LibError UnalignedWriter::Append(const u8* data, size_t size) const
{
while(size != 0)
{
// optimization: write directly from the input buffer, if possible
const size_t alignedSize = (size / BLOCK_SIZE) * BLOCK_SIZE;
if(m_bytesUsed == 0 && IsAligned(data, SECTOR_SIZE) && alignedSize != 0)
{
RETURN_ERR(io_WriteAligned(m_file, m_alignedOfs, data, alignedSize));
m_alignedOfs += (off_t)alignedSize;
data += alignedSize;
size -= alignedSize;
}
const size_t chunkSize = std::min(size, BLOCK_SIZE-m_bytesUsed);
cpu_memcpy(m_alignedBuf.get()+m_bytesUsed, data, chunkSize);
m_bytesUsed += chunkSize;
data += chunkSize;
size -= chunkSize;
if(m_bytesUsed == BLOCK_SIZE)
RETURN_ERR(WriteBlock());
}
return INFO::OK;
}
void UnalignedWriter::Flush() const
{
if(m_bytesUsed)
{
memset(m_alignedBuf.get()+m_bytesUsed, 0, BLOCK_SIZE-m_bytesUsed);
(void)WriteBlock();
}
}
LibError UnalignedWriter::WriteBlock() const
{
RETURN_ERR(io_WriteAligned(m_file, m_alignedOfs, m_alignedBuf.get(), BLOCK_SIZE));
m_alignedOfs += BLOCK_SIZE;
m_bytesUsed = 0;
return INFO::OK;
}

View File

@ -1,61 +1,61 @@
#ifndef INCLUDED_WRITE_BUFFER
#define INCLUDED_WRITE_BUFFER
#include "lib/file/file.h"
class WriteBuffer
{
public:
WriteBuffer();
void Append(const void* data, size_t size);
void Overwrite(const void* data, size_t size, size_t offset);
shared_ptr<u8> Data() const
{
return m_data;
}
size_t Size() const
{
return m_size;
}
private:
size_t m_capacity; // must come first (init order)
shared_ptr<u8> m_data;
size_t m_size;
};
class UnalignedWriter
{
NONCOPYABLE(UnalignedWriter);
public:
UnalignedWriter(const PIFile& file, off_t ofs);
~UnalignedWriter();
/**
* add data to the align buffer, writing it out to disk if full.
**/
LibError Append(const u8* data, size_t size) const;
/**
* zero-initialize any remaining space in the align buffer and write
* it to the file. this is called by the destructor.
**/
void Flush() const;
private:
LibError WriteBlock() const;
PIFile m_file;
shared_ptr<u8> m_alignedBuf;
mutable off_t m_alignedOfs;
mutable size_t m_bytesUsed;
};
typedef shared_ptr<UnalignedWriter> PUnalignedWriter;
#endif // #ifndef INCLUDED_WRITE_BUFFER
#ifndef INCLUDED_WRITE_BUFFER
#define INCLUDED_WRITE_BUFFER
#include "lib/file/file.h"
class WriteBuffer
{
public:
WriteBuffer();
void Append(const void* data, size_t size);
void Overwrite(const void* data, size_t size, size_t offset);
shared_ptr<u8> Data() const
{
return m_data;
}
size_t Size() const
{
return m_size;
}
private:
size_t m_capacity; // must come first (init order)
shared_ptr<u8> m_data;
size_t m_size;
};
class UnalignedWriter
{
NONCOPYABLE(UnalignedWriter);
public:
UnalignedWriter(const PIFile& file, off_t ofs);
~UnalignedWriter();
/**
* add data to the align buffer, writing it out to disk if full.
**/
LibError Append(const u8* data, size_t size) const;
/**
* zero-initialize any remaining space in the align buffer and write
* it to the file. this is called by the destructor.
**/
void Flush() const;
private:
LibError WriteBlock() const;
PIFile m_file;
shared_ptr<u8> m_alignedBuf;
mutable off_t m_alignedOfs;
mutable size_t m_bytesUsed;
};
typedef shared_ptr<UnalignedWriter> PUnalignedWriter;
#endif // #ifndef INCLUDED_WRITE_BUFFER

View File

@ -1,107 +1,107 @@
/**
* =========================================================================
* File : path.cpp
* Project : 0 A.D.
* Description : manage paths relative to a root directory
* =========================================================================
*/
// license: GPL; see lib/license.txt
#include "precompiled.h"
#include "path.h"
#include <string.h>
#include "lib/posix/posix_filesystem.h"
#include "lib/sysdep/sysdep.h" // SYS_DIR_SEP
#include "lib/path_util.h" // ERR::PATH_LENGTH
ERROR_ASSOCIATE(ERR::PATH_ROOT_DIR_ALREADY_SET, "Attempting to set FS root dir more than once", -1);
ERROR_ASSOCIATE(ERR::PATH_NOT_IN_ROOT_DIR, "Accessing a file that's outside of the root dir", -1);
bool exists(const Path& path)
{
return fs::exists(path.external_directory_string());
}
// security check: only allow path_SetRoot once so that malicious code
// cannot circumvent the VFS checks that disallow access to anything above
// the current directory (set here).
// path_SetRoot is called early at startup, so any subsequent attempts
// are likely bogus.
// we provide for resetting this from the self-test to allow clean
// re-init of the individual tests.
static bool s_isRootPathEstablished;
static std::string s_rootPath;
/*static*/ PathTraits::external_string_type PathTraits::to_external(const Path&, const PathTraits::internal_string_type& src)
{
std::string absolutePath = s_rootPath + src;
std::replace(absolutePath.begin(), absolutePath.end(), '/', SYS_DIR_SEP);
return absolutePath;
}
/*static*/ PathTraits::internal_string_type PathTraits::to_internal(const PathTraits::external_string_type& src)
{
if(s_rootPath.compare(0, s_rootPath.length(), src) != 0)
DEBUG_WARN_ERR(ERR::PATH_NOT_IN_ROOT_DIR);
std::string relativePath = src.substr(s_rootPath.length(), src.length()-s_rootPath.length());
std::replace(relativePath.begin(), relativePath.end(), SYS_DIR_SEP, '/');
return relativePath;
}
LibError path_SetRoot(const char* argv0, const char* relativePath)
{
if(s_isRootPathEstablished)
WARN_RETURN(ERR::PATH_ROOT_DIR_ALREADY_SET);
s_isRootPathEstablished = true;
// get full path to executable
char osPathname[PATH_MAX];
// .. first try safe, but system-dependent version
if(sys_get_executable_name(osPathname, PATH_MAX) < 0)
{
// .. failed; use argv[0]
errno = 0;
if(!realpath(argv0, osPathname))
return LibError_from_errno();
}
// make sure it's valid
errno = 0;
if(access(osPathname, X_OK) < 0)
return LibError_from_errno();
// strip executable name
char* name = (char*)path_name_only(osPathname);
*name = '\0';
strcat_s(osPathname, PATH_MAX, relativePath);
// get actual root dir - previous osPathname may include ".."
// (slight optimization, speeds up path lookup)
errno = 0;
char osRootPath[PATH_MAX];
if(!realpath(osPathname, osRootPath))
return LibError_from_errno();
s_rootPath = osRootPath;
s_rootPath.append(1, SYS_DIR_SEP); // simplifies to_external
return INFO::OK;
}
void path_ResetRootDir()
{
debug_assert(s_isRootPathEstablished); // see comment at s_isRootPathEstablished.
s_rootPath.clear();
s_isRootPathEstablished = false;
}
/**
* =========================================================================
* File : path.cpp
* Project : 0 A.D.
* Description : manage paths relative to a root directory
* =========================================================================
*/
// license: GPL; see lib/license.txt
#include "precompiled.h"
#include "path.h"
#include <string.h>
#include "lib/posix/posix_filesystem.h"
#include "lib/sysdep/sysdep.h" // SYS_DIR_SEP
#include "lib/path_util.h" // ERR::PATH_LENGTH
ERROR_ASSOCIATE(ERR::PATH_ROOT_DIR_ALREADY_SET, "Attempting to set FS root dir more than once", -1);
ERROR_ASSOCIATE(ERR::PATH_NOT_IN_ROOT_DIR, "Accessing a file that's outside of the root dir", -1);
bool exists(const Path& path)
{
return fs::exists(path.external_directory_string());
}
// security check: only allow path_SetRoot once so that malicious code
// cannot circumvent the VFS checks that disallow access to anything above
// the current directory (set here).
// path_SetRoot is called early at startup, so any subsequent attempts
// are likely bogus.
// we provide for resetting this from the self-test to allow clean
// re-init of the individual tests.
static bool s_isRootPathEstablished;
static std::string s_rootPath;
/*static*/ PathTraits::external_string_type PathTraits::to_external(const Path&, const PathTraits::internal_string_type& src)
{
std::string absolutePath = s_rootPath + src;
std::replace(absolutePath.begin(), absolutePath.end(), '/', SYS_DIR_SEP);
return absolutePath;
}
/*static*/ PathTraits::internal_string_type PathTraits::to_internal(const PathTraits::external_string_type& src)
{
if(s_rootPath.compare(0, s_rootPath.length(), src) != 0)
DEBUG_WARN_ERR(ERR::PATH_NOT_IN_ROOT_DIR);
std::string relativePath = src.substr(s_rootPath.length(), src.length()-s_rootPath.length());
std::replace(relativePath.begin(), relativePath.end(), SYS_DIR_SEP, '/');
return relativePath;
}
LibError path_SetRoot(const char* argv0, const char* relativePath)
{
if(s_isRootPathEstablished)
WARN_RETURN(ERR::PATH_ROOT_DIR_ALREADY_SET);
s_isRootPathEstablished = true;
// get full path to executable
char osPathname[PATH_MAX];
// .. first try safe, but system-dependent version
if(sys_get_executable_name(osPathname, PATH_MAX) < 0)
{
// .. failed; use argv[0]
errno = 0;
if(!realpath(argv0, osPathname))
return LibError_from_errno();
}
// make sure it's valid
errno = 0;
if(access(osPathname, X_OK) < 0)
return LibError_from_errno();
// strip executable name
char* name = (char*)path_name_only(osPathname);
*name = '\0';
strcat_s(osPathname, PATH_MAX, relativePath);
// get actual root dir - previous osPathname may include ".."
// (slight optimization, speeds up path lookup)
errno = 0;
char osRootPath[PATH_MAX];
if(!realpath(osPathname, osRootPath))
return LibError_from_errno();
s_rootPath = osRootPath;
s_rootPath.append(1, SYS_DIR_SEP); // simplifies to_external
return INFO::OK;
}
void path_ResetRootDir()
{
debug_assert(s_isRootPathEstablished); // see comment at s_isRootPathEstablished.
s_rootPath.clear();
s_isRootPathEstablished = false;
}

View File

@ -1,80 +1,80 @@
/**
* =========================================================================
* File : path.h
* Project : 0 A.D.
* Description : manage paths relative to a root directory
* =========================================================================
*/
// license: GPL; see lib/license.txt
// path types:
// tag type type separator
// portable relative /
// os native absolute SYS_DIR_SEP
// vfs vfs absolute /
// the vfs root directory is "". no ':', '\\', "." or ".." are allowed.
#ifndef INCLUDED_PATH
#define INCLUDED_PATH
struct PathTraits;
typedef fs::basic_path<std::string, PathTraits> Path;
struct PathTraits
{
typedef std::string internal_string_type;
typedef std::string external_string_type;
static LIB_API external_string_type to_external(const Path&, const internal_string_type& src);
static LIB_API internal_string_type to_internal(const external_string_type& src);
};
namespace boost
{
namespace filesystem
{
template<> struct is_basic_path<Path>
{
BOOST_STATIC_CONSTANT(bool, value = true);
};
}
}
namespace ERR
{
const LibError PATH_ROOT_DIR_ALREADY_SET = -110200;
const LibError PATH_NOT_IN_ROOT_DIR = -110201;
}
/**
* establish the root OS directory (portable paths are relative to it)
*
* @param argv0 the value of argv[0] (used to determine the location
* of the executable in case sys_get_executable_path fails). note that
* the current directory cannot be used because it's not set when
* starting via batch file.
* @param relativePath root directory relative to the executable's directory.
* the value is considered trusted since it will typically be hard-coded.
*
* example: executable in "$install_dir/system"; desired root dir is
* "$install_dir/data" => rel_path = "../data".
*
* can only be called once unless path_ResetRootDir is called.
**/
LIB_API LibError path_SetRoot(const char* argv0, const char* relativePath);
/**
* reset root directory that was previously established via path_SetRoot.
*
* this function avoids the security complaint that would be raised if
* path_SetRoot is called twice; it is provided for the
* legitimate application of a self-test setUp()/tearDown().
**/
LIB_API void path_ResetRootDir();
// note: path_MakeAbsolute has been replaced by Path::external_directory_string.
#endif // #ifndef INCLUDED_PATH
/**
* =========================================================================
* File : path.h
* Project : 0 A.D.
* Description : manage paths relative to a root directory
* =========================================================================
*/
// license: GPL; see lib/license.txt
// path types:
// tag type type separator
// portable relative /
// os native absolute SYS_DIR_SEP
// vfs vfs absolute /
// the vfs root directory is "". no ':', '\\', "." or ".." are allowed.
#ifndef INCLUDED_PATH
#define INCLUDED_PATH
struct PathTraits;
typedef fs::basic_path<std::string, PathTraits> Path;
struct PathTraits
{
typedef std::string internal_string_type;
typedef std::string external_string_type;
static LIB_API external_string_type to_external(const Path&, const internal_string_type& src);
static LIB_API internal_string_type to_internal(const external_string_type& src);
};
namespace boost
{
namespace filesystem
{
template<> struct is_basic_path<Path>
{
BOOST_STATIC_CONSTANT(bool, value = true);
};
}
}
namespace ERR
{
const LibError PATH_ROOT_DIR_ALREADY_SET = -110200;
const LibError PATH_NOT_IN_ROOT_DIR = -110201;
}
/**
* establish the root OS directory (portable paths are relative to it)
*
* @param argv0 the value of argv[0] (used to determine the location
* of the executable in case sys_get_executable_path fails). note that
* the current directory cannot be used because it's not set when
* starting via batch file.
* @param relativePath root directory relative to the executable's directory.
* the value is considered trusted since it will typically be hard-coded.
*
* example: executable in "$install_dir/system"; desired root dir is
* "$install_dir/data" => rel_path = "../data".
*
* can only be called once unless path_ResetRootDir is called.
**/
LIB_API LibError path_SetRoot(const char* argv0, const char* relativePath);
/**
* reset root directory that was previously established via path_SetRoot.
*
* this function avoids the security complaint that would be raised if
* path_SetRoot is called twice; it is provided for the
* legitimate application of a self-test setUp()/tearDown().
**/
LIB_API void path_ResetRootDir();
// note: path_MakeAbsolute has been replaced by Path::external_directory_string.
#endif // #ifndef INCLUDED_PATH

View File

@ -1,233 +1,233 @@
/**
* =========================================================================
* File : file_cache.cpp
* Project : 0 A.D.
* Description : cache of file contents (supports zero-copy IO)
* =========================================================================
*/
// license: GPL; see lib/license.txt
#include "precompiled.h"
#include "file_cache.h"
#include "lib/file/common/file_stats.h"
#include "lib/file/io/io_align.h" // BLOCK_SIZE
#include "lib/cache_adt.h" // Cache
#include "lib/bits.h" // round_up
#include "lib/allocators/allocators.h"
#include "lib/allocators/shared_ptr.h"
#include "lib/allocators/headerless.h"
#include "lib/sysdep/os_cpu.h" // os_cpu_PageSize
//-----------------------------------------------------------------------------
// allocator
/*
the biggest worry of a file cache is external fragmentation. there are two
basic ways to combat this:
1) 'defragment' periodically - move blocks around to increase
size of available 'holes'.
2) prevent fragmentation from occurring at all via
deliberate alloc/free policy.
file contents are returned directly to the user (zero-copy IO), so only
currently unreferenced blocks can be moved. it is believed that this would
severely hamper defragmentation; we therefore go with the latter approach.
the basic insight is: fragmentation occurs when a block is freed whose
neighbors are not free (thus preventing coalescing). this can be prevented by
allocating objects of similar lifetimes together. typical workloads
(uniform access frequency) already show such behavior: the Landlord cache
manager evicts files in an LRU manner, which matches the allocation policy.
references:
"The Memory Fragmentation Problem - Solved?" (Johnstone and Wilson)
"Dynamic Storage Allocation - A Survey and Critical Review" (Johnstone and Wilson)
*/
// shared_ptr<u8>s must own a reference to their allocator to ensure it's extant when
// they are freed. it is stored in the shared_ptr deleter.
class Allocator;
typedef shared_ptr<Allocator> PAllocator;
class FileCacheDeleter
{
public:
FileCacheDeleter(size_t size, const PAllocator& allocator)
: m_size(size), m_allocator(allocator)
{
}
// (this uses Allocator and must come after its definition)
void operator()(u8* mem) const;
private:
size_t m_size;
PAllocator m_allocator;
};
// adds statistics and AllocatorChecker to a HeaderlessAllocator
class Allocator
{
public:
Allocator(size_t maxSize)
: m_allocator(maxSize)
{
}
shared_ptr<u8> Allocate(size_t size, const PAllocator& pthis)
{
const size_t alignedSize = round_up(size, BLOCK_SIZE);
u8* mem = (u8*)m_allocator.Allocate(alignedSize);
if(!mem)
return DummySharedPtr<u8>(0); // (prevent FileCacheDeleter from seeing a null pointer)
#ifndef NDEBUG
m_checker.OnAllocate(mem, alignedSize);
#endif
stats_buf_alloc(size, alignedSize);
return shared_ptr<u8>(mem, FileCacheDeleter(size, pthis));
}
void Deallocate(u8* mem, size_t size)
{
const size_t alignedSize = round_up(size, BLOCK_SIZE);
// (re)allow writes in case the buffer was made read-only. it would
// be nice to unmap the buffer, but this is not possible because
// HeaderlessAllocator needs to affix boundary tags.
(void)mprotect(mem, size, PROT_READ|PROT_WRITE);
#ifndef NDEBUG
m_checker.OnDeallocate(mem, alignedSize);
#endif
m_allocator.Deallocate(mem, alignedSize);
stats_buf_free();
}
private:
HeaderlessAllocator m_allocator;
#ifndef NDEBUG
AllocatorChecker m_checker;
#endif
};
void FileCacheDeleter::operator()(u8* mem) const
{
m_allocator->Deallocate(mem, m_size);
}
//-----------------------------------------------------------------------------
// FileCache::Impl
//-----------------------------------------------------------------------------
// since users are strongly encouraged to only load/process one file at a
// time, there won't be many active references to cache entries. we could
// take advantage of this with a separate extant list, but the cache's
// hash map should be fast enough and this way is less work than maintaining
// (possibly disjunct) cached and extant lists.
class FileCache::Impl
{
public:
Impl(size_t maxSize)
: m_allocator(new Allocator(maxSize))
{
}
shared_ptr<u8> Reserve(size_t size)
{
// (should never happen because the VFS ensures size != 0.)
debug_assert(size != 0);
// (300 iterations have been observed when reserving several MB
// of space in a full cache)
for(;;)
{
{
shared_ptr<u8> data = m_allocator->Allocate(size, m_allocator);
if(data)
return data;
}
// remove least valuable entry from cache (if users are holding
// references, the contents won't actually be deallocated)
{
shared_ptr<u8> discardedData; size_t discardedSize;
bool removed = m_cache.remove_least_valuable(&discardedData, &discardedSize);
// only false if cache is empty, which can't be the case because
// allocation failed.
debug_assert(removed);
}
}
}
void Add(const VfsPath& pathname, const shared_ptr<u8>& data, size_t size, size_t cost)
{
// zero-copy cache => all users share the contents => must not
// allow changes. this will be reverted when deallocating.
(void)mprotect((void*)data.get(), size, PROT_READ);
m_cache.add(pathname, data, size, cost);
}
bool Retrieve(const VfsPath& pathname, shared_ptr<u8>& data, size_t& size)
{
// (note: don't call stats_cache because we don't know the file size
// in case of a cache miss; doing so is left to the caller.)
stats_buf_ref();
return m_cache.retrieve(pathname, data, &size);
}
void Remove(const VfsPath& pathname)
{
m_cache.remove(pathname);
// note: we could check if someone is still holding a reference
// to the contents, but that currently doesn't matter.
}
private:
typedef Cache< VfsPath, shared_ptr<u8> > CacheType;
CacheType m_cache;
PAllocator m_allocator;
};
//-----------------------------------------------------------------------------
FileCache::FileCache(size_t size)
: impl(new Impl(size))
{
}
shared_ptr<u8> FileCache::Reserve(size_t size)
{
return impl->Reserve(size);
}
void FileCache::Add(const VfsPath& pathname, const shared_ptr<u8>& data, size_t size, size_t cost)
{
impl->Add(pathname, data, size, cost);
}
void FileCache::Remove(const VfsPath& pathname)
{
impl->Remove(pathname);
}
bool FileCache::Retrieve(const VfsPath& pathname, shared_ptr<u8>& data, size_t& size)
{
return impl->Retrieve(pathname, data, size);
}
/**
* =========================================================================
* File : file_cache.cpp
* Project : 0 A.D.
* Description : cache of file contents (supports zero-copy IO)
* =========================================================================
*/
// license: GPL; see lib/license.txt
#include "precompiled.h"
#include "file_cache.h"
#include "lib/file/common/file_stats.h"
#include "lib/file/io/io_align.h" // BLOCK_SIZE
#include "lib/cache_adt.h" // Cache
#include "lib/bits.h" // round_up
#include "lib/allocators/allocators.h"
#include "lib/allocators/shared_ptr.h"
#include "lib/allocators/headerless.h"
#include "lib/sysdep/os_cpu.h" // os_cpu_PageSize
//-----------------------------------------------------------------------------
// allocator
/*
the biggest worry of a file cache is external fragmentation. there are two
basic ways to combat this:
1) 'defragment' periodically - move blocks around to increase
size of available 'holes'.
2) prevent fragmentation from occurring at all via
deliberate alloc/free policy.
file contents are returned directly to the user (zero-copy IO), so only
currently unreferenced blocks can be moved. it is believed that this would
severely hamper defragmentation; we therefore go with the latter approach.
the basic insight is: fragmentation occurs when a block is freed whose
neighbors are not free (thus preventing coalescing). this can be prevented by
allocating objects of similar lifetimes together. typical workloads
(uniform access frequency) already show such behavior: the Landlord cache
manager evicts files in an LRU manner, which matches the allocation policy.
references:
"The Memory Fragmentation Problem - Solved?" (Johnstone and Wilson)
"Dynamic Storage Allocation - A Survey and Critical Review" (Johnstone and Wilson)
*/
// shared_ptr<u8>s must own a reference to their allocator to ensure it's extant when
// they are freed. it is stored in the shared_ptr deleter.
class Allocator;
typedef shared_ptr<Allocator> PAllocator;
class FileCacheDeleter
{
public:
FileCacheDeleter(size_t size, const PAllocator& allocator)
: m_size(size), m_allocator(allocator)
{
}
// (this uses Allocator and must come after its definition)
void operator()(u8* mem) const;
private:
size_t m_size;
PAllocator m_allocator;
};
// adds statistics and AllocatorChecker to a HeaderlessAllocator
class Allocator
{
public:
Allocator(size_t maxSize)
: m_allocator(maxSize)
{
}
shared_ptr<u8> Allocate(size_t size, const PAllocator& pthis)
{
const size_t alignedSize = round_up(size, BLOCK_SIZE);
u8* mem = (u8*)m_allocator.Allocate(alignedSize);
if(!mem)
return DummySharedPtr<u8>(0); // (prevent FileCacheDeleter from seeing a null pointer)
#ifndef NDEBUG
m_checker.OnAllocate(mem, alignedSize);
#endif
stats_buf_alloc(size, alignedSize);
return shared_ptr<u8>(mem, FileCacheDeleter(size, pthis));
}
void Deallocate(u8* mem, size_t size)
{
const size_t alignedSize = round_up(size, BLOCK_SIZE);
// (re)allow writes in case the buffer was made read-only. it would
// be nice to unmap the buffer, but this is not possible because
// HeaderlessAllocator needs to affix boundary tags.
(void)mprotect(mem, size, PROT_READ|PROT_WRITE);
#ifndef NDEBUG
m_checker.OnDeallocate(mem, alignedSize);
#endif
m_allocator.Deallocate(mem, alignedSize);
stats_buf_free();
}
private:
HeaderlessAllocator m_allocator;
#ifndef NDEBUG
AllocatorChecker m_checker;
#endif
};
void FileCacheDeleter::operator()(u8* mem) const
{
m_allocator->Deallocate(mem, m_size);
}
//-----------------------------------------------------------------------------
// FileCache::Impl
//-----------------------------------------------------------------------------
// since users are strongly encouraged to only load/process one file at a
// time, there won't be many active references to cache entries. we could
// take advantage of this with a separate extant list, but the cache's
// hash map should be fast enough and this way is less work than maintaining
// (possibly disjunct) cached and extant lists.
class FileCache::Impl
{
public:
Impl(size_t maxSize)
: m_allocator(new Allocator(maxSize))
{
}
shared_ptr<u8> Reserve(size_t size)
{
// (should never happen because the VFS ensures size != 0.)
debug_assert(size != 0);
// (300 iterations have been observed when reserving several MB
// of space in a full cache)
for(;;)
{
{
shared_ptr<u8> data = m_allocator->Allocate(size, m_allocator);
if(data)
return data;
}
// remove least valuable entry from cache (if users are holding
// references, the contents won't actually be deallocated)
{
shared_ptr<u8> discardedData; size_t discardedSize;
bool removed = m_cache.remove_least_valuable(&discardedData, &discardedSize);
// only false if cache is empty, which can't be the case because
// allocation failed.
debug_assert(removed);
}
}
}
void Add(const VfsPath& pathname, const shared_ptr<u8>& data, size_t size, size_t cost)
{
// zero-copy cache => all users share the contents => must not
// allow changes. this will be reverted when deallocating.
(void)mprotect((void*)data.get(), size, PROT_READ);
m_cache.add(pathname, data, size, cost);
}
bool Retrieve(const VfsPath& pathname, shared_ptr<u8>& data, size_t& size)
{
// (note: don't call stats_cache because we don't know the file size
// in case of a cache miss; doing so is left to the caller.)
stats_buf_ref();
return m_cache.retrieve(pathname, data, &size);
}
void Remove(const VfsPath& pathname)
{
m_cache.remove(pathname);
// note: we could check if someone is still holding a reference
// to the contents, but that currently doesn't matter.
}
private:
typedef Cache< VfsPath, shared_ptr<u8> > CacheType;
CacheType m_cache;
PAllocator m_allocator;
};
//-----------------------------------------------------------------------------
FileCache::FileCache(size_t size)
: impl(new Impl(size))
{
}
shared_ptr<u8> FileCache::Reserve(size_t size)
{
return impl->Reserve(size);
}
void FileCache::Add(const VfsPath& pathname, const shared_ptr<u8>& data, size_t size, size_t cost)
{
impl->Add(pathname, data, size, cost);
}
void FileCache::Remove(const VfsPath& pathname)
{
impl->Remove(pathname);
}
bool FileCache::Retrieve(const VfsPath& pathname, shared_ptr<u8>& data, size_t& size)
{
return impl->Retrieve(pathname, data, size);
}

View File

@ -1,90 +1,90 @@
/**
* =========================================================================
* File : file_cache.h
* Project : 0 A.D.
* Description : cache of file contents (supports zero-copy IO)
* =========================================================================
*/
// license: GPL; see lib/license.txt
#ifndef INCLUDED_FILE_CACHE
#define INCLUDED_FILE_CACHE
#include "vfs_path.h"
/**
* cache of file contents with support for zero-copy IO.
* this works by reserving a region of the cache, using it as the IO buffer,
* and returning the memory directly to users. optional write-protection
* via MMU ensures that the shared contents aren't inadvertently changed.
*
* (unique copies of) VFS pathnames are used as lookup key and owner tag.
*
* to ensure efficient operation and prevent fragmentation, only one
* reference should be active at a time. in other words, read a file,
* process it, and only then start reading the next file.
*
* rationale: this is rather similar to BlockCache; however, the differences
* (Reserve's size parameter, eviction policies) are enough to warrant
* separate implementations.
**/
class FileCache
{
public:
/**
* @param size maximum amount [bytes] of memory to use for the cache.
* (managed as a virtual memory region that's committed on-demand)
**/
FileCache(size_t size);
/**
* Reserve a chunk of the cache's memory region.
*
* @param size required number of bytes (more may be allocated due to
* alignment and/or internal fragmentation)
* @return memory suitably aligned for IO; never fails.
*
* it is expected that this data will be Add()-ed once its IO completes.
**/
shared_ptr<u8> Reserve(size_t size);
/**
* Add a file's contents to the cache.
*
* the cache will be able to satisfy subsequent Retrieve() calls by
* returning this data; if CONFIG2_CACHE_READ_ONLY, the buffer is made
* read-only. if need be and no references are currently attached to it,
* the memory can also be commandeered by Reserve().
*
* @param pathname key that will be used to Retrieve file contents.
* @param cost is the expected cost of retrieving the file again and
* influences how/when it is evicted from the cache.
**/
void Add(const VfsPath& pathname, const shared_ptr<u8>& data, size_t size, size_t cost = 1);
/**
* Remove a file's contents from the cache (if it exists).
*
* this ensures subsequent reads of the files see the current, presumably
* recently changed, contents of the file.
*
* this would typically be called in response to a notification that a
* file has changed.
**/
void Remove(const VfsPath& pathname);
/**
* Attempt to retrieve a file's contents from the file cache.
*
* @return whether the contents were successfully retrieved; if so,
* data references the read-only file contents.
**/
bool Retrieve(const VfsPath& pathname, shared_ptr<u8>& data, size_t& size);
private:
class Impl;
shared_ptr<Impl> impl;
};
#endif // #ifndef INCLUDED_FILE_CACHE
/**
* =========================================================================
* File : file_cache.h
* Project : 0 A.D.
* Description : cache of file contents (supports zero-copy IO)
* =========================================================================
*/
// license: GPL; see lib/license.txt
#ifndef INCLUDED_FILE_CACHE
#define INCLUDED_FILE_CACHE
#include "vfs_path.h"
/**
* cache of file contents with support for zero-copy IO.
* this works by reserving a region of the cache, using it as the IO buffer,
* and returning the memory directly to users. optional write-protection
* via MMU ensures that the shared contents aren't inadvertently changed.
*
* (unique copies of) VFS pathnames are used as lookup key and owner tag.
*
* to ensure efficient operation and prevent fragmentation, only one
* reference should be active at a time. in other words, read a file,
* process it, and only then start reading the next file.
*
* rationale: this is rather similar to BlockCache; however, the differences
* (Reserve's size parameter, eviction policies) are enough to warrant
* separate implementations.
**/
class FileCache
{
public:
/**
* @param size maximum amount [bytes] of memory to use for the cache.
* (managed as a virtual memory region that's committed on-demand)
**/
FileCache(size_t size);
/**
* Reserve a chunk of the cache's memory region.
*
* @param size required number of bytes (more may be allocated due to
* alignment and/or internal fragmentation)
* @return memory suitably aligned for IO; never fails.
*
* it is expected that this data will be Add()-ed once its IO completes.
**/
shared_ptr<u8> Reserve(size_t size);
/**
* Add a file's contents to the cache.
*
* the cache will be able to satisfy subsequent Retrieve() calls by
* returning this data; if CONFIG2_CACHE_READ_ONLY, the buffer is made
* read-only. if need be and no references are currently attached to it,
* the memory can also be commandeered by Reserve().
*
* @param pathname key that will be used to Retrieve file contents.
* @param cost is the expected cost of retrieving the file again and
* influences how/when it is evicted from the cache.
**/
void Add(const VfsPath& pathname, const shared_ptr<u8>& data, size_t size, size_t cost = 1);
/**
* Remove a file's contents from the cache (if it exists).
*
* this ensures subsequent reads of the files see the current, presumably
* recently changed, contents of the file.
*
* this would typically be called in response to a notification that a
* file has changed.
**/
void Remove(const VfsPath& pathname);
/**
* Attempt to retrieve a file's contents from the file cache.
*
* @return whether the contents were successfully retrieved; if so,
* data references the read-only file contents.
**/
bool Retrieve(const VfsPath& pathname, shared_ptr<u8>& data, size_t& size);
private:
class Impl;
shared_ptr<Impl> impl;
};
#endif // #ifndef INCLUDED_FILE_CACHE

View File

@ -1,167 +1,167 @@
/**
* =========================================================================
* File : vfs.cpp
* Project : 0 A.D.
* Description :
* =========================================================================
*/
// license: GPL; see lib/license.txt
#include "precompiled.h"
#include "vfs.h"
#include "lib/allocators/shared_ptr.h"
#include "lib/path_util.h"
#include "lib/file/common/file_stats.h"
#include "lib/file/common/trace.h"
#include "lib/file/archive/archive.h"
#include "lib/file/io/io.h"
#include "vfs_tree.h"
#include "vfs_lookup.h"
#include "vfs_populate.h"
#include "file_cache.h"
class VFS : public IVFS
{
public:
VFS(size_t cacheSize)
: m_cacheSize(cacheSize), m_fileCache(m_cacheSize)
, m_trace(CreateTrace(4*MiB))
{
}
virtual LibError Mount(const VfsPath& mountPoint, const Path& path, size_t flags /* = 0 */, size_t priority /* = 0 */)
{
debug_assert(vfs_path_IsDirectory(mountPoint));
// note: mounting subdirectories is now allowed.
VfsDirectory* directory;
CHECK_ERR(vfs_Lookup(mountPoint, &m_rootDirectory, directory, 0, VFS_LOOKUP_ADD|VFS_LOOKUP_CREATE));
PRealDirectory realDirectory(new RealDirectory(path, priority, flags));
RETURN_ERR(vfs_Attach(directory, realDirectory));
return INFO::OK;
}
virtual LibError GetFileInfo(const VfsPath& pathname, FileInfo* pfileInfo) const
{
VfsDirectory* directory; VfsFile* file;
LibError ret = vfs_Lookup(pathname, &m_rootDirectory, directory, &file);
if(!pfileInfo) // just indicate if the file exists without raising warnings.
return ret;
CHECK_ERR(ret);
*pfileInfo = FileInfo(file->Name(), file->Size(), file->MTime());
return INFO::OK;
}
virtual LibError GetDirectoryEntries(const VfsPath& path, FileInfos* files, DirectoryNames* subdirectoryNames) const
{
debug_assert(vfs_path_IsDirectory(path));
VfsDirectory* directory;
CHECK_ERR(vfs_Lookup(path, &m_rootDirectory, directory, 0));
directory->GetEntries(files, subdirectoryNames);
return INFO::OK;
}
// note: only allowing either reads or writes simplifies file cache
// coherency (need only invalidate when closing a FILE_WRITE file).
virtual LibError CreateFile(const VfsPath& pathname, const shared_ptr<u8>& fileContents, size_t size)
{
VfsDirectory* directory;
CHECK_ERR(vfs_Lookup(pathname, &m_rootDirectory, directory, 0, VFS_LOOKUP_ADD|VFS_LOOKUP_CREATE));
const PRealDirectory& realDirectory = directory->AssociatedDirectory();
const std::string& name = pathname.leaf();
RETURN_ERR(realDirectory->Store(name, fileContents, size));
const VfsFile file(name, (off_t)size, time(0), realDirectory->Priority(), realDirectory);
directory->AddFile(file);
// wipe out any cached blocks. this is necessary to cover the (rare) case
// of file cache contents predating the file write.
m_fileCache.Remove(pathname);
m_trace->NotifyStore(pathname.string().c_str(), size);
return INFO::OK;
}
// read the entire file.
// return number of bytes transferred (see above), or a negative error code.
//
// if non-NULL, <cb> is called for each block transferred, passing <cbData>.
// it returns how much data was actually transferred, or a negative error
// code (in which case we abort the transfer and return that value).
// the callback mechanism is useful for user progress notification or
// processing data while waiting for the next I/O to complete
// (quasi-parallel, without the complexity of threads).
virtual LibError LoadFile(const VfsPath& pathname, shared_ptr<u8>& fileContents, size_t& size)
{
const bool isCacheHit = m_fileCache.Retrieve(pathname, fileContents, size);
if(!isCacheHit)
{
VfsDirectory* directory; VfsFile* file;
CHECK_ERR(vfs_Lookup(pathname, &m_rootDirectory, directory, &file));
size = file->Size();
// safely handle zero-length files
if(!size)
fileContents = DummySharedPtr((u8*)0);
else if(size > m_cacheSize)
{
fileContents = io_Allocate(size);
RETURN_ERR(file->Load(fileContents));
}
else
{
fileContents = m_fileCache.Reserve(size);
RETURN_ERR(file->Load(fileContents));
m_fileCache.Add(pathname, fileContents, size);
}
}
stats_io_user_request(size);
stats_cache(isCacheHit? CR_HIT : CR_MISS, size);
m_trace->NotifyLoad(pathname.string().c_str(), size);
return INFO::OK;
}
// rebuild the VFS, i.e. re-mount everything. open files are not affected.
// necessary after loose files or directories change, so that the VFS
// "notices" the changes and updates file locations. res calls this after
// dir_watch reports changes; can also be called from the console after a
// rebuild command. there is no provision for updating single VFS dirs -
// it's not worth the trouble.
virtual void Clear()
{
m_rootDirectory.ClearR();
}
virtual void Display() const
{
m_rootDirectory.DisplayR(0);
}
virtual LibError GetRealPath(const VfsPath& pathname, Path& realPathname)
{
VfsDirectory* directory;
CHECK_ERR(vfs_Lookup(pathname, &m_rootDirectory, directory, 0));
const PRealDirectory& realDirectory = directory->AssociatedDirectory();
realPathname = realDirectory->GetPath() / pathname.leaf();
return INFO::OK;
}
private:
size_t m_cacheSize;
FileCache m_fileCache;
PITrace m_trace;
mutable VfsDirectory m_rootDirectory;
};
//-----------------------------------------------------------------------------
PIVFS CreateVfs(size_t cacheSize)
{
return PIVFS(new VFS(cacheSize));
}
/**
* =========================================================================
* File : vfs.cpp
* Project : 0 A.D.
* Description :
* =========================================================================
*/
// license: GPL; see lib/license.txt
#include "precompiled.h"
#include "vfs.h"
#include "lib/allocators/shared_ptr.h"
#include "lib/path_util.h"
#include "lib/file/common/file_stats.h"
#include "lib/file/common/trace.h"
#include "lib/file/archive/archive.h"
#include "lib/file/io/io.h"
#include "vfs_tree.h"
#include "vfs_lookup.h"
#include "vfs_populate.h"
#include "file_cache.h"
class VFS : public IVFS
{
public:
VFS(size_t cacheSize)
: m_cacheSize(cacheSize), m_fileCache(m_cacheSize)
, m_trace(CreateTrace(4*MiB))
{
}
virtual LibError Mount(const VfsPath& mountPoint, const Path& path, size_t flags /* = 0 */, size_t priority /* = 0 */)
{
debug_assert(vfs_path_IsDirectory(mountPoint));
// note: mounting subdirectories is now allowed.
VfsDirectory* directory;
CHECK_ERR(vfs_Lookup(mountPoint, &m_rootDirectory, directory, 0, VFS_LOOKUP_ADD|VFS_LOOKUP_CREATE));
PRealDirectory realDirectory(new RealDirectory(path, priority, flags));
RETURN_ERR(vfs_Attach(directory, realDirectory));
return INFO::OK;
}
virtual LibError GetFileInfo(const VfsPath& pathname, FileInfo* pfileInfo) const
{
VfsDirectory* directory; VfsFile* file;
LibError ret = vfs_Lookup(pathname, &m_rootDirectory, directory, &file);
if(!pfileInfo) // just indicate if the file exists without raising warnings.
return ret;
CHECK_ERR(ret);
*pfileInfo = FileInfo(file->Name(), file->Size(), file->MTime());
return INFO::OK;
}
virtual LibError GetDirectoryEntries(const VfsPath& path, FileInfos* files, DirectoryNames* subdirectoryNames) const
{
debug_assert(vfs_path_IsDirectory(path));
VfsDirectory* directory;
CHECK_ERR(vfs_Lookup(path, &m_rootDirectory, directory, 0));
directory->GetEntries(files, subdirectoryNames);
return INFO::OK;
}
// note: only allowing either reads or writes simplifies file cache
// coherency (need only invalidate when closing a FILE_WRITE file).
virtual LibError CreateFile(const VfsPath& pathname, const shared_ptr<u8>& fileContents, size_t size)
{
VfsDirectory* directory;
CHECK_ERR(vfs_Lookup(pathname, &m_rootDirectory, directory, 0, VFS_LOOKUP_ADD|VFS_LOOKUP_CREATE));
const PRealDirectory& realDirectory = directory->AssociatedDirectory();
const std::string& name = pathname.leaf();
RETURN_ERR(realDirectory->Store(name, fileContents, size));
const VfsFile file(name, (off_t)size, time(0), realDirectory->Priority(), realDirectory);
directory->AddFile(file);
// wipe out any cached blocks. this is necessary to cover the (rare) case
// of file cache contents predating the file write.
m_fileCache.Remove(pathname);
m_trace->NotifyStore(pathname.string().c_str(), size);
return INFO::OK;
}
// read the entire file.
// return number of bytes transferred (see above), or a negative error code.
//
// if non-NULL, <cb> is called for each block transferred, passing <cbData>.
// it returns how much data was actually transferred, or a negative error
// code (in which case we abort the transfer and return that value).
// the callback mechanism is useful for user progress notification or
// processing data while waiting for the next I/O to complete
// (quasi-parallel, without the complexity of threads).
virtual LibError LoadFile(const VfsPath& pathname, shared_ptr<u8>& fileContents, size_t& size)
{
const bool isCacheHit = m_fileCache.Retrieve(pathname, fileContents, size);
if(!isCacheHit)
{
VfsDirectory* directory; VfsFile* file;
CHECK_ERR(vfs_Lookup(pathname, &m_rootDirectory, directory, &file));
size = file->Size();
// safely handle zero-length files
if(!size)
fileContents = DummySharedPtr((u8*)0);
else if(size > m_cacheSize)
{
fileContents = io_Allocate(size);
RETURN_ERR(file->Load(fileContents));
}
else
{
fileContents = m_fileCache.Reserve(size);
RETURN_ERR(file->Load(fileContents));
m_fileCache.Add(pathname, fileContents, size);
}
}
stats_io_user_request(size);
stats_cache(isCacheHit? CR_HIT : CR_MISS, size);
m_trace->NotifyLoad(pathname.string().c_str(), size);
return INFO::OK;
}
// rebuild the VFS, i.e. re-mount everything. open files are not affected.
// necessary after loose files or directories change, so that the VFS
// "notices" the changes and updates file locations. res calls this after
// dir_watch reports changes; can also be called from the console after a
// rebuild command. there is no provision for updating single VFS dirs -
// it's not worth the trouble.
virtual void Clear()
{
m_rootDirectory.ClearR();
}
virtual void Display() const
{
m_rootDirectory.DisplayR(0);
}
virtual LibError GetRealPath(const VfsPath& pathname, Path& realPathname)
{
VfsDirectory* directory;
CHECK_ERR(vfs_Lookup(pathname, &m_rootDirectory, directory, 0));
const PRealDirectory& realDirectory = directory->AssociatedDirectory();
realPathname = realDirectory->GetPath() / pathname.leaf();
return INFO::OK;
}
private:
size_t m_cacheSize;
FileCache m_fileCache;
PITrace m_trace;
mutable VfsDirectory m_rootDirectory;
};
//-----------------------------------------------------------------------------
PIVFS CreateVfs(size_t cacheSize)
{
return PIVFS(new VFS(cacheSize));
}

View File

@ -1,137 +1,137 @@
/**
* =========================================================================
* File : vfs.h
* Project : 0 A.D.
* Description : Virtual File System API - allows transparent access to
* : files in archives and modding via multiple mount points.
* =========================================================================
*/
// license: GPL; see lib/license.txt
#ifndef INCLUDED_VFS
#define INCLUDED_VFS
#include "lib/file/file_system.h" // FileInfo
#include "lib/file/path.h"
#include "lib/file/vfs/vfs_path.h"
namespace ERR
{
const LibError VFS_DIR_NOT_FOUND = -110100;
const LibError VFS_FILE_NOT_FOUND = -110101;
const LibError VFS_ALREADY_MOUNTED = -110102;
}
// (recursive mounting and mounting archives are no longer optional since they don't hurt)
enum VfsMountFlags
{
/**
* all real directories mounted during this operation will be watched
* for changes. this flag is provided to avoid watches in output-only
* directories, e.g. screenshots/ (only causes unnecessary overhead).
**/
VFS_MOUNT_WATCH = 1,
/**
* anything mounted from here should be included when building archives.
**/
VFS_MOUNT_ARCHIVABLE = 2
};
struct IVFS
{
/**
* mount a directory into the VFS.
*
* @param mountPoint (will be created if it does not already exist)
* @param path real directory path
* @return LibError.
*
* if files are encountered that already exist in the VFS (sub)directories,
* the most recent / highest priority/precedence version is preferred.
*
* if files with archive extensions are seen, their contents are added
* as well.
**/
virtual LibError Mount(const VfsPath& mountPoint, const Path& path, size_t flags = 0, size_t priority = 0) = 0;
/**
* retrieve information about a file (similar to POSIX stat)
*
* @return LibError.
**/
virtual LibError GetFileInfo(const VfsPath& pathname, FileInfo* pfileInfo) const = 0;
/**
* retrieve lists of all files and subdirectories in a directory.
*
* @return LibError.
*
* rationale:
* - this interface avoids having to lock the directory while an
* iterator is extant.
* - we cannot efficiently provide routines for returning files and
* subdirectories separately due to the underlying POSIX interface.
**/
virtual LibError GetDirectoryEntries(const VfsPath& path, FileInfos* files, DirectoryNames* subdirectoryNames) const = 0;
/**
* create a file with the given contents.
*
* @param size [bytes] of the contents, will match that of the file.
* @return LibError.
*
* rationale: disallowing partial writes simplifies file cache coherency
* (need only be invalidated when closing a FILE_WRITE file).
**/
virtual LibError CreateFile(const VfsPath& pathname, const shared_ptr<u8>& fileContents, size_t size) = 0;
/**
* read an entire file into memory.
*
* @param fileContents receives a smart pointer to the contents.
* CAVEAT: this will be taken from the file cache if the VFS was
* created with cacheSize != 0 and size < cacheSize. there is no
* provision for Copy-on-Write, which means that such buffers
* must not be modified (this is enforced via mprotect).
* @param size receives the size [bytes] of the file contents.
* @return LibError.
**/
virtual LibError LoadFile(const VfsPath& pathname, shared_ptr<u8>& fileContents, size_t& size) = 0;
/**
* dump a text representation of the filesystem to debug output.
**/
virtual void Display() const = 0;
/**
* empty the contents of the filesystem.
*
* the effect is as if nothing had been mounted.
**/
virtual void Clear() = 0;
/**
* retrieve the real (POSIX) path underlying a VFS file.
*
* this is useful when passing paths to external libraries.
**/
virtual LibError GetRealPath(const VfsPath& pathname, Path& path) = 0;
};
typedef shared_ptr<IVFS> PIVFS;
/**
* create an instance of a Virtual File System.
*
* @param cacheSize size [bytes] of memory to reserve for a file cache,
* or zero to disable it. if small enough to fit, file contents are
* stored here until no references remain and they are evicted.
*
* note: there is no limitation to a single instance, it may make sense
* to create and destroy VFS instances during each unit test.
**/
LIB_API PIVFS CreateVfs(size_t cacheSize);
#endif // #ifndef INCLUDED_VFS
/**
* =========================================================================
* File : vfs.h
* Project : 0 A.D.
* Description : Virtual File System API - allows transparent access to
* : files in archives and modding via multiple mount points.
* =========================================================================
*/
// license: GPL; see lib/license.txt
#ifndef INCLUDED_VFS
#define INCLUDED_VFS
#include "lib/file/file_system.h" // FileInfo
#include "lib/file/path.h"
#include "lib/file/vfs/vfs_path.h"
namespace ERR
{
const LibError VFS_DIR_NOT_FOUND = -110100;
const LibError VFS_FILE_NOT_FOUND = -110101;
const LibError VFS_ALREADY_MOUNTED = -110102;
}
// (recursive mounting and mounting archives are no longer optional since they don't hurt)
enum VfsMountFlags
{
/**
* all real directories mounted during this operation will be watched
* for changes. this flag is provided to avoid watches in output-only
* directories, e.g. screenshots/ (only causes unnecessary overhead).
**/
VFS_MOUNT_WATCH = 1,
/**
* anything mounted from here should be included when building archives.
**/
VFS_MOUNT_ARCHIVABLE = 2
};
struct IVFS
{
/**
* mount a directory into the VFS.
*
* @param mountPoint (will be created if it does not already exist)
* @param path real directory path
* @return LibError.
*
* if files are encountered that already exist in the VFS (sub)directories,
* the most recent / highest priority/precedence version is preferred.
*
* if files with archive extensions are seen, their contents are added
* as well.
**/
virtual LibError Mount(const VfsPath& mountPoint, const Path& path, size_t flags = 0, size_t priority = 0) = 0;
/**
* retrieve information about a file (similar to POSIX stat)
*
* @return LibError.
**/
virtual LibError GetFileInfo(const VfsPath& pathname, FileInfo* pfileInfo) const = 0;
/**
* retrieve lists of all files and subdirectories in a directory.
*
* @return LibError.
*
* rationale:
* - this interface avoids having to lock the directory while an
* iterator is extant.
* - we cannot efficiently provide routines for returning files and
* subdirectories separately due to the underlying POSIX interface.
**/
virtual LibError GetDirectoryEntries(const VfsPath& path, FileInfos* files, DirectoryNames* subdirectoryNames) const = 0;
/**
* create a file with the given contents.
*
* @param size [bytes] of the contents, will match that of the file.
* @return LibError.
*
* rationale: disallowing partial writes simplifies file cache coherency
* (need only be invalidated when closing a FILE_WRITE file).
**/
virtual LibError CreateFile(const VfsPath& pathname, const shared_ptr<u8>& fileContents, size_t size) = 0;
/**
* read an entire file into memory.
*
* @param fileContents receives a smart pointer to the contents.
* CAVEAT: this will be taken from the file cache if the VFS was
* created with cacheSize != 0 and size < cacheSize. there is no
* provision for Copy-on-Write, which means that such buffers
* must not be modified (this is enforced via mprotect).
* @param size receives the size [bytes] of the file contents.
* @return LibError.
**/
virtual LibError LoadFile(const VfsPath& pathname, shared_ptr<u8>& fileContents, size_t& size) = 0;
/**
* dump a text representation of the filesystem to debug output.
**/
virtual void Display() const = 0;
/**
* empty the contents of the filesystem.
*
* the effect is as if nothing had been mounted.
**/
virtual void Clear() = 0;
/**
* retrieve the real (POSIX) path underlying a VFS file.
*
* this is useful when passing paths to external libraries.
**/
virtual LibError GetRealPath(const VfsPath& pathname, Path& path) = 0;
};
typedef shared_ptr<IVFS> PIVFS;
/**
* create an instance of a Virtual File System.
*
* @param cacheSize size [bytes] of memory to reserve for a file cache,
* or zero to disable it. if small enough to fit, file contents are
* stored here until no references remain and they are evicted.
*
* note: there is no limitation to a single instance, it may make sense
* to create and destroy VFS instances during each unit test.
**/
LIB_API PIVFS CreateVfs(size_t cacheSize);
#endif // #ifndef INCLUDED_VFS

View File

@ -1,81 +1,81 @@
/**
* =========================================================================
* File : vfs_lookup.cpp
* Project : 0 A.D.
* Description : look up directories/files by traversing path components.
* =========================================================================
*/
// license: GPL; see lib/license.txt
#include "precompiled.h"
#include "vfs_lookup.h"
#include "lib/path_util.h" // path_foreach_component
#include "vfs.h" // error codes
#include "vfs_tree.h"
#include "vfs_populate.h"
#include "lib/timer.h"
LibError vfs_Lookup(const VfsPath& pathname, VfsDirectory* startDirectory, VfsDirectory*& directory, VfsFile** pfile, size_t flags)
{
// extract and validate flags (ensure no unknown bits are set)
const bool addMissingDirectories = (flags & VFS_LOOKUP_ADD) != 0;
const bool createMissingDirectories = (flags & VFS_LOOKUP_CREATE) != 0;
debug_assert((flags & ~(VFS_LOOKUP_ADD|VFS_LOOKUP_CREATE)) == 0);
if(pfile)
*pfile = 0;
directory = startDirectory;
RETURN_ERR(vfs_Populate(directory));
// early-out for pathname == "" when mounting into VFS root
if(pathname.empty()) // (prevent iterator error in loop end condition)
return INFO::OK;
// for each directory component:
VfsPath::iterator it; // (used outside of loop to get filename)
for(it = pathname.begin(); it != --pathname.end(); ++it)
{
const std::string& subdirectoryName = *it;
VfsDirectory* subdirectory = directory->GetSubdirectory(subdirectoryName);
if(!subdirectory)
{
if(addMissingDirectories)
subdirectory = directory->AddSubdirectory(subdirectoryName);
else
return ERR::VFS_DIR_NOT_FOUND; // NOWARN
}
if(createMissingDirectories && !subdirectory->AssociatedDirectory())
{
Path currentPath;
if(directory->AssociatedDirectory()) // (is NULL when mounting into root)
currentPath = directory->AssociatedDirectory()->GetPath()/subdirectoryName;
if(mkdir(currentPath.external_directory_string().c_str(), S_IRWXO|S_IRWXU|S_IRWXG) == 0)
{
PRealDirectory realDirectory(new RealDirectory(currentPath, 0, 0));
RETURN_ERR(vfs_Attach(subdirectory, realDirectory));
}
}
RETURN_ERR(vfs_Populate(subdirectory));
directory = subdirectory;
}
if(pfile)
{
const std::string& filename = *it;
debug_assert(filename != "."); // asked for file but specified directory path
*pfile = directory->GetFile(filename);
if(!*pfile)
return ERR::VFS_FILE_NOT_FOUND; // NOWARN
}
return INFO::OK;
}
/**
* =========================================================================
* File : vfs_lookup.cpp
* Project : 0 A.D.
* Description : look up directories/files by traversing path components.
* =========================================================================
*/
// license: GPL; see lib/license.txt
#include "precompiled.h"
#include "vfs_lookup.h"
#include "lib/path_util.h" // path_foreach_component
#include "vfs.h" // error codes
#include "vfs_tree.h"
#include "vfs_populate.h"
#include "lib/timer.h"
LibError vfs_Lookup(const VfsPath& pathname, VfsDirectory* startDirectory, VfsDirectory*& directory, VfsFile** pfile, size_t flags)
{
// extract and validate flags (ensure no unknown bits are set)
const bool addMissingDirectories = (flags & VFS_LOOKUP_ADD) != 0;
const bool createMissingDirectories = (flags & VFS_LOOKUP_CREATE) != 0;
debug_assert((flags & ~(VFS_LOOKUP_ADD|VFS_LOOKUP_CREATE)) == 0);
if(pfile)
*pfile = 0;
directory = startDirectory;
RETURN_ERR(vfs_Populate(directory));
// early-out for pathname == "" when mounting into VFS root
if(pathname.empty()) // (prevent iterator error in loop end condition)
return INFO::OK;
// for each directory component:
VfsPath::iterator it; // (used outside of loop to get filename)
for(it = pathname.begin(); it != --pathname.end(); ++it)
{
const std::string& subdirectoryName = *it;
VfsDirectory* subdirectory = directory->GetSubdirectory(subdirectoryName);
if(!subdirectory)
{
if(addMissingDirectories)
subdirectory = directory->AddSubdirectory(subdirectoryName);
else
return ERR::VFS_DIR_NOT_FOUND; // NOWARN
}
if(createMissingDirectories && !subdirectory->AssociatedDirectory())
{
Path currentPath;
if(directory->AssociatedDirectory()) // (is NULL when mounting into root)
currentPath = directory->AssociatedDirectory()->GetPath()/subdirectoryName;
if(mkdir(currentPath.external_directory_string().c_str(), S_IRWXO|S_IRWXU|S_IRWXG) == 0)
{
PRealDirectory realDirectory(new RealDirectory(currentPath, 0, 0));
RETURN_ERR(vfs_Attach(subdirectory, realDirectory));
}
}
RETURN_ERR(vfs_Populate(subdirectory));
directory = subdirectory;
}
if(pfile)
{
const std::string& filename = *it;
debug_assert(filename != "."); // asked for file but specified directory path
*pfile = directory->GetFile(filename);
if(!*pfile)
return ERR::VFS_FILE_NOT_FOUND; // NOWARN
}
return INFO::OK;
}

View File

@ -1,47 +1,47 @@
/**
* =========================================================================
* File : vfs_lookup.h
* Project : 0 A.D.
* Description : look up directories/files by traversing path components.
* =========================================================================
*/
// license: GPL; see lib/license.txt
#ifndef INCLUDED_VFS_LOOKUP
#define INCLUDED_VFS_LOOKUP
#include "vfs_path.h"
class VfsFile;
class VfsDirectory;
// note: VfsDirectory pointers are non-const because they may be
// populated during the lookup.
enum VfsLookupFlags
{
// add (if they do not already exist) subdirectory components
// encountered in the path[name].
VFS_LOOKUP_ADD = 1,
// create a real directory
VFS_LOOKUP_CREATE = 2
};
/**
* Resolve a pathname.
*
* @param pathname
* @param vfsStartDirectory
* @param directory is set to the last directory component that is encountered.
* @param file is set to 0 if there is no name component, otherwise the
* corresponding file.
* @param flags see VfsLookupFlags.
* @return LibError (INFO::OK if all components in pathname exist).
*
* to allow noiseless file-existence queries, this does not raise warnings.
**/
extern LibError vfs_Lookup(const VfsPath& pathname, VfsDirectory* startDirectory, VfsDirectory*& directory, VfsFile** pfile, size_t flags = 0);
#endif // #ifndef INCLUDED_VFS_LOOKUP
/**
* =========================================================================
* File : vfs_lookup.h
* Project : 0 A.D.
* Description : look up directories/files by traversing path components.
* =========================================================================
*/
// license: GPL; see lib/license.txt
#ifndef INCLUDED_VFS_LOOKUP
#define INCLUDED_VFS_LOOKUP
#include "vfs_path.h"
class VfsFile;
class VfsDirectory;
// note: VfsDirectory pointers are non-const because they may be
// populated during the lookup.
enum VfsLookupFlags
{
// add (if they do not already exist) subdirectory components
// encountered in the path[name].
VFS_LOOKUP_ADD = 1,
// create a real directory
VFS_LOOKUP_CREATE = 2
};
/**
* Resolve a pathname.
*
* @param pathname
* @param vfsStartDirectory
* @param directory is set to the last directory component that is encountered.
* @param file is set to 0 if there is no name component, otherwise the
* corresponding file.
* @param flags see VfsLookupFlags.
* @return LibError (INFO::OK if all components in pathname exist).
*
* to allow noiseless file-existence queries, this does not raise warnings.
**/
extern LibError vfs_Lookup(const VfsPath& pathname, VfsDirectory* startDirectory, VfsDirectory*& directory, VfsFile** pfile, size_t flags = 0);
#endif // #ifndef INCLUDED_VFS_LOOKUP

View File

@ -1,7 +1,7 @@
#include "precompiled.h"
#include "vfs_path.h"
bool vfs_path_IsDirectory(const VfsPath& pathname)
{
return pathname.empty() || pathname.leaf() == ".";
}
#include "precompiled.h"
#include "vfs_path.h"
bool vfs_path_IsDirectory(const VfsPath& pathname)
{
return pathname.empty() || pathname.leaf() == ".";
}

View File

@ -1,58 +1,58 @@
#ifndef INCLUDED_VFS_PATH
#define INCLUDED_VFS_PATH
struct VfsPathTraits;
/**
* VFS path of the form "(dir/)*file?"
*
* in other words: the root directory is "" and paths are separated by '/'.
* a trailing slash is allowed for directory names.
* rationale: it is important to avoid a leading slash because that might be
* confused with an absolute POSIX path.
*
* there is no restriction on path length; when dimensioning character
* arrays, prefer PATH_MAX.
*
* rationale: a distinct specialization of basic_path prevents inadvertent
* assignment from other path types.
**/
typedef fs::basic_path<std::string, VfsPathTraits> VfsPath;
typedef std::vector<VfsPath> VfsPaths;
struct VfsPathTraits
{
typedef std::string internal_string_type;
typedef std::string external_string_type;
static external_string_type to_external(const VfsPath&, const internal_string_type& src)
{
return src;
}
static internal_string_type to_internal(const external_string_type& src)
{
return src;
}
};
namespace boost
{
namespace filesystem
{
template<> struct is_basic_path<VfsPath>
{
BOOST_STATIC_CONSTANT(bool, value = true);
};
}
}
/**
* Does a path appear to refer to a directory? (non-authoritative)
*
* note: only used as a safeguard.
**/
extern bool vfs_path_IsDirectory(const VfsPath& pathname);
#endif // #ifndef INCLUDED_VFS_PATH
#ifndef INCLUDED_VFS_PATH
#define INCLUDED_VFS_PATH
struct VfsPathTraits;
/**
* VFS path of the form "(dir/)*file?"
*
* in other words: the root directory is "" and paths are separated by '/'.
* a trailing slash is allowed for directory names.
* rationale: it is important to avoid a leading slash because that might be
* confused with an absolute POSIX path.
*
* there is no restriction on path length; when dimensioning character
* arrays, prefer PATH_MAX.
*
* rationale: a distinct specialization of basic_path prevents inadvertent
* assignment from other path types.
**/
typedef fs::basic_path<std::string, VfsPathTraits> VfsPath;
typedef std::vector<VfsPath> VfsPaths;
struct VfsPathTraits
{
typedef std::string internal_string_type;
typedef std::string external_string_type;
static external_string_type to_external(const VfsPath&, const internal_string_type& src)
{
return src;
}
static internal_string_type to_internal(const external_string_type& src)
{
return src;
}
};
namespace boost
{
namespace filesystem
{
template<> struct is_basic_path<VfsPath>
{
BOOST_STATIC_CONSTANT(bool, value = true);
};
}
}
/**
* Does a path appear to refer to a directory? (non-authoritative)
*
* note: only used as a safeguard.
**/
extern bool vfs_path_IsDirectory(const VfsPath& pathname);
#endif // #ifndef INCLUDED_VFS_PATH

View File

@ -1,139 +1,139 @@
/**
* =========================================================================
* File : vfs_populate.cpp
* Project : 0 A.D.
* Description : populate VFS directories with files
* =========================================================================
*/
// license: GPL; see lib/license.txt
#include "precompiled.h"
#include "vfs_populate.h"
#include "lib/path_util.h"
#include "lib/file/file_system_posix.h"
#include "lib/file/archive/archive_zip.h"
#include "vfs_tree.h"
#include "vfs_lookup.h"
#include "vfs.h" // error codes
static FileSystem_Posix s_fileSystemPosix;
static std::vector<const VfsFile*> s_looseFiles;
static size_t s_numArchivedFiles;
// helper class that allows breaking up the logic into sub-functions without
// always having to pass directory/realDirectory as parameters.
class PopulateHelper
{
NONCOPYABLE(PopulateHelper);
public:
PopulateHelper(VfsDirectory* directory, const PRealDirectory& realDirectory)
: m_directory(directory), m_realDirectory(realDirectory)
{
}
LibError AddEntries() const
{
FileInfos files; files.reserve(100);
DirectoryNames subdirectoryNames; subdirectoryNames.reserve(20);
RETURN_ERR(s_fileSystemPosix.GetDirectoryEntries(m_realDirectory->GetPath(), &files, &subdirectoryNames));
RETURN_ERR(AddFiles(files));
AddSubdirectories(subdirectoryNames);
return INFO::OK;
}
private:
void AddFile(const FileInfo& fileInfo) const
{
const VfsFile file(fileInfo.Name(), fileInfo.Size(), fileInfo.MTime(), m_realDirectory->Priority(), m_realDirectory);
const VfsFile* pfile = m_directory->AddFile(file);
// notify archive builder that this file could be archived but
// currently isn't; if there are too many of these, archive will
// be rebuilt.
// note: check if archivable to exclude stuff like screenshots
// from counting towards the threshold.
if(m_realDirectory->Flags() & VFS_MOUNT_ARCHIVABLE)
s_looseFiles.push_back(pfile);
}
static void AddArchiveFile(const VfsPath& pathname, const FileInfo& fileInfo, PIArchiveFile archiveFile, uintptr_t cbData)
{
PopulateHelper* this_ = (PopulateHelper*)cbData;
// (we have to create missing subdirectoryNames because archivers
// don't always place directory entries before their files)
const size_t flags = VFS_LOOKUP_ADD;
VfsDirectory* directory;
WARN_ERR(vfs_Lookup(pathname, this_->m_directory, directory, 0, flags));
const VfsFile file(fileInfo.Name(), fileInfo.Size(), fileInfo.MTime(), this_->m_realDirectory->Priority(), archiveFile);
directory->AddFile(file);
s_numArchivedFiles++;
}
LibError AddFiles(const FileInfos& files) const
{
const Path path(m_realDirectory->GetPath());
for(size_t i = 0; i < files.size(); i++)
{
const std::string& name = files[i].Name();
const char* extension = path_extension(name.c_str());
if(strcasecmp(extension, "zip") == 0)
{
PIArchiveReader archiveReader = CreateArchiveReader_Zip(path/name);
RETURN_ERR(archiveReader->ReadEntries(AddArchiveFile, (uintptr_t)this));
}
else // regular (non-archive) file
AddFile(files[i]);
}
return INFO::OK;
}
void AddSubdirectories(const DirectoryNames& subdirectoryNames) const
{
for(size_t i = 0; i < subdirectoryNames.size(); i++)
{
// skip version control directories - this avoids cluttering the
// VFS with hundreds of irrelevant files.
if(strcasecmp(subdirectoryNames[i].c_str(), ".svn") == 0)
continue;
VfsDirectory* subdirectory = m_directory->AddSubdirectory(subdirectoryNames[i]);
PRealDirectory realDirectory = CreateRealSubdirectory(m_realDirectory, subdirectoryNames[i]);
vfs_Attach(subdirectory, realDirectory);
}
}
VfsDirectory* const m_directory;
PRealDirectory m_realDirectory;
};
LibError vfs_Populate(VfsDirectory* directory)
{
if(!directory->ShouldPopulate())
return INFO::OK;
const PRealDirectory& realDirectory = directory->AssociatedDirectory();
if(realDirectory->Flags() & VFS_MOUNT_WATCH)
realDirectory->Watch();
PopulateHelper helper(directory, realDirectory);
RETURN_ERR(helper.AddEntries());
return INFO::OK;
}
LibError vfs_Attach(VfsDirectory* directory, const PRealDirectory& realDirectory)
{
RETURN_ERR(vfs_Populate(directory));
directory->SetAssociatedDirectory(realDirectory);
return INFO::OK;
}
/**
* =========================================================================
* File : vfs_populate.cpp
* Project : 0 A.D.
* Description : populate VFS directories with files
* =========================================================================
*/
// license: GPL; see lib/license.txt
#include "precompiled.h"
#include "vfs_populate.h"
#include "lib/path_util.h"
#include "lib/file/file_system_posix.h"
#include "lib/file/archive/archive_zip.h"
#include "vfs_tree.h"
#include "vfs_lookup.h"
#include "vfs.h" // error codes
static FileSystem_Posix s_fileSystemPosix;
static std::vector<const VfsFile*> s_looseFiles;
static size_t s_numArchivedFiles;
// helper class that allows breaking up the logic into sub-functions without
// always having to pass directory/realDirectory as parameters.
class PopulateHelper
{
NONCOPYABLE(PopulateHelper);
public:
PopulateHelper(VfsDirectory* directory, const PRealDirectory& realDirectory)
: m_directory(directory), m_realDirectory(realDirectory)
{
}
LibError AddEntries() const
{
FileInfos files; files.reserve(100);
DirectoryNames subdirectoryNames; subdirectoryNames.reserve(20);
RETURN_ERR(s_fileSystemPosix.GetDirectoryEntries(m_realDirectory->GetPath(), &files, &subdirectoryNames));
RETURN_ERR(AddFiles(files));
AddSubdirectories(subdirectoryNames);
return INFO::OK;
}
private:
void AddFile(const FileInfo& fileInfo) const
{
const VfsFile file(fileInfo.Name(), fileInfo.Size(), fileInfo.MTime(), m_realDirectory->Priority(), m_realDirectory);
const VfsFile* pfile = m_directory->AddFile(file);
// notify archive builder that this file could be archived but
// currently isn't; if there are too many of these, archive will
// be rebuilt.
// note: check if archivable to exclude stuff like screenshots
// from counting towards the threshold.
if(m_realDirectory->Flags() & VFS_MOUNT_ARCHIVABLE)
s_looseFiles.push_back(pfile);
}
static void AddArchiveFile(const VfsPath& pathname, const FileInfo& fileInfo, PIArchiveFile archiveFile, uintptr_t cbData)
{
PopulateHelper* this_ = (PopulateHelper*)cbData;
// (we have to create missing subdirectoryNames because archivers
// don't always place directory entries before their files)
const size_t flags = VFS_LOOKUP_ADD;
VfsDirectory* directory;
WARN_ERR(vfs_Lookup(pathname, this_->m_directory, directory, 0, flags));
const VfsFile file(fileInfo.Name(), fileInfo.Size(), fileInfo.MTime(), this_->m_realDirectory->Priority(), archiveFile);
directory->AddFile(file);
s_numArchivedFiles++;
}
LibError AddFiles(const FileInfos& files) const
{
const Path path(m_realDirectory->GetPath());
for(size_t i = 0; i < files.size(); i++)
{
const std::string& name = files[i].Name();
const char* extension = path_extension(name.c_str());
if(strcasecmp(extension, "zip") == 0)
{
PIArchiveReader archiveReader = CreateArchiveReader_Zip(path/name);
RETURN_ERR(archiveReader->ReadEntries(AddArchiveFile, (uintptr_t)this));
}
else // regular (non-archive) file
AddFile(files[i]);
}
return INFO::OK;
}
void AddSubdirectories(const DirectoryNames& subdirectoryNames) const
{
for(size_t i = 0; i < subdirectoryNames.size(); i++)
{
// skip version control directories - this avoids cluttering the
// VFS with hundreds of irrelevant files.
if(strcasecmp(subdirectoryNames[i].c_str(), ".svn") == 0)
continue;
VfsDirectory* subdirectory = m_directory->AddSubdirectory(subdirectoryNames[i]);
PRealDirectory realDirectory = CreateRealSubdirectory(m_realDirectory, subdirectoryNames[i]);
vfs_Attach(subdirectory, realDirectory);
}
}
VfsDirectory* const m_directory;
PRealDirectory m_realDirectory;
};
LibError vfs_Populate(VfsDirectory* directory)
{
if(!directory->ShouldPopulate())
return INFO::OK;
const PRealDirectory& realDirectory = directory->AssociatedDirectory();
if(realDirectory->Flags() & VFS_MOUNT_WATCH)
realDirectory->Watch();
PopulateHelper helper(directory, realDirectory);
RETURN_ERR(helper.AddEntries());
return INFO::OK;
}
LibError vfs_Attach(VfsDirectory* directory, const PRealDirectory& realDirectory)
{
RETURN_ERR(vfs_Populate(directory));
directory->SetAssociatedDirectory(realDirectory);
return INFO::OK;
}

View File

@ -1,41 +1,41 @@
/**
* =========================================================================
* File : vfs_populate.h
* Project : 0 A.D.
* Description : populate VFS directories with files
* =========================================================================
*/
// license: GPL; see lib/license.txt
#ifndef INCLUDED_VFS_POPULATE
#define INCLUDED_VFS_POPULATE
#include "lib/file/common/real_directory.h"
class VfsDirectory;
/**
* attach a real directory to a VFS directory.
*
* when the VFS directory is accessed, it will first be populated from
* the real directory. (this delays the impact of mounting a large
* directory, distributing the cost from startup to the first accesses
* to each subdirectory.)
*
* note: the most recently attached real directory will be used when
* creating files in the VFS directory.
**/
extern LibError vfs_Attach(VfsDirectory* directory, const PRealDirectory& realDirectory);
/**
* populate the directory from the attached real directory.
*
* adds each real file and subdirectory entry to the VFS directory.
* the full contents of any archives in the real directory are also added.
*
* has no effect if no directory has been attached since the last populate.
**/
extern LibError vfs_Populate(VfsDirectory* directory);
#endif // #ifndef INCLUDED_VFS_POPULATE
/**
* =========================================================================
* File : vfs_populate.h
* Project : 0 A.D.
* Description : populate VFS directories with files
* =========================================================================
*/
// license: GPL; see lib/license.txt
#ifndef INCLUDED_VFS_POPULATE
#define INCLUDED_VFS_POPULATE
#include "lib/file/common/real_directory.h"
class VfsDirectory;
/**
* attach a real directory to a VFS directory.
*
* when the VFS directory is accessed, it will first be populated from
* the real directory. (this delays the impact of mounting a large
* directory, distributing the cost from startup to the first accesses
* to each subdirectory.)
*
* note: the most recently attached real directory will be used when
* creating files in the VFS directory.
**/
extern LibError vfs_Attach(VfsDirectory* directory, const PRealDirectory& realDirectory);
/**
* populate the directory from the attached real directory.
*
* adds each real file and subdirectory entry to the VFS directory.
* the full contents of any archives in the real directory are also added.
*
* has no effect if no directory has been attached since the last populate.
**/
extern LibError vfs_Populate(VfsDirectory* directory);
#endif // #ifndef INCLUDED_VFS_POPULATE

View File

@ -1,203 +1,203 @@
/**
* =========================================================================
* File : vfs_tree.cpp
* Project : 0 A.D.
* Description : 'tree' of VFS directories and files
* =========================================================================
*/
// license: GPL; see lib/license.txt
#include "precompiled.h"
#include "vfs_tree.h"
#include "lib/file/common/file_stats.h"
#include "lib/sysdep/cpu.h"
//-----------------------------------------------------------------------------
VfsFile::VfsFile(const std::string& name, off_t size, time_t mtime, size_t priority, const PIFileLoader& loader)
: m_name(name), m_size(size), m_mtime(mtime), m_priority(priority), m_loader(loader)
{
}
bool VfsFile::IsSupersededBy(const VfsFile& file) const
{
// 1) priority (override mods)
if(file.m_priority < m_priority) // lower priority
return false;
// 2) timestamp
{
const double howMuchNewer = difftime(file.MTime(), MTime());
const double threshold = 2.0; // [seconds]; resolution provided by FAT
if(howMuchNewer > threshold) // newer timestamp
return true;
if(howMuchNewer < threshold) // older timestamp
return false;
// else: "equal" (tolerating small differences due to FAT's low
// mtime resolution)
}
// 3) precedence (efficiency of file provider)
if(file.m_loader->Precedence() < m_loader->Precedence()) // less efficient
return false;
return true;
}
void VfsFile::GenerateDescription(char* text, size_t maxChars) const
{
char timestamp[25];
const time_t mtime = MTime();
strftime(timestamp, ARRAY_SIZE(timestamp), "%a %b %d %H:%M:%S %Y", localtime(&mtime));
// build format string (set width of name field so that everything
// lines up correctly)
const char* fmt = "(%c; %6d; %s)\n";
sprintf_s(text, maxChars, fmt, m_loader->LocationCode(), Size(), timestamp);
}
LibError VfsFile::Load(const shared_ptr<u8>& buf) const
{
return m_loader->Load(Name(), buf, Size());
}
//-----------------------------------------------------------------------------
VfsDirectory::VfsDirectory()
: m_shouldPopulate(0)
{
}
VfsFile* VfsDirectory::AddFile(const VfsFile& file)
{
std::pair<std::string, VfsFile> value = std::make_pair(file.Name(), file);
std::pair<VfsFiles::iterator, bool> ret = m_files.insert(value);
if(!ret.second) // already existed
{
VfsFile& previousFile = ret.first->second;
const VfsFile& newFile = value.second;
if(previousFile.IsSupersededBy(newFile))
previousFile = newFile;
}
else
stats_vfs_file_add(file.Size());
return &(*ret.first).second;
}
// rationale: passing in a pre-constructed VfsDirectory and copying that into
// our map would be slower and less convenient for the caller.
VfsDirectory* VfsDirectory::AddSubdirectory(const std::string& name)
{
std::pair<std::string, VfsDirectory> value = std::make_pair(name, VfsDirectory());
std::pair<VfsSubdirectories::iterator, bool> ret = m_subdirectories.insert(value);
return &(*ret.first).second;
}
VfsFile* VfsDirectory::GetFile(const std::string& name)
{
VfsFiles::iterator it = m_files.find(name);
if(it == m_files.end())
return 0;
return &it->second;
}
VfsDirectory* VfsDirectory::GetSubdirectory(const std::string& name)
{
VfsSubdirectories::iterator it = m_subdirectories.find(name);
if(it == m_subdirectories.end())
return 0;
return &it->second;
}
void VfsDirectory::GetEntries(FileInfos* files, DirectoryNames* subdirectoryNames) const
{
if(files)
{
files->clear();
files->reserve(m_files.size());
for(VfsFiles::const_iterator it = m_files.begin(); it != m_files.end(); ++it)
files->push_back(FileInfo(it->second.Name(), it->second.Size(), it->second.MTime()));
}
if(subdirectoryNames)
{
subdirectoryNames->clear();
subdirectoryNames->reserve(m_subdirectories.size());
for(VfsSubdirectories::const_iterator it = m_subdirectories.begin(); it != m_subdirectories.end(); ++it)
subdirectoryNames->push_back(it->first);
}
}
void VfsDirectory::DisplayR(size_t depth) const
{
static const char indent[] = " ";
const size_t maxNameChars = 80 - depth*(sizeof(indent)-1);
char fmt[20];
sprintf_s(fmt, ARRAY_SIZE(fmt), "%%-%d.%ds %%s", maxNameChars, maxNameChars);
for(VfsFiles::const_iterator it = m_files.begin(); it != m_files.end(); ++it)
{
const std::string& name = it->first;
const VfsFile& file = it->second;
char description[100];
file.GenerateDescription(description, ARRAY_SIZE(description));
for(size_t i = 0; i < depth+1; i++)
printf(indent);
printf(fmt, name.c_str(), description);
}
for(VfsSubdirectories::const_iterator it = m_subdirectories.begin(); it != m_subdirectories.end(); ++it)
{
const std::string& name = it->first;
const VfsDirectory& directory = it->second;
for(size_t i = 0; i < depth+1; i++)
printf(indent);
printf("[%s/]\n", name.c_str());
directory.DisplayR(depth+1);
}
}
void VfsDirectory::ClearR()
{
for(VfsSubdirectories::iterator it = m_subdirectories.begin(); it != m_subdirectories.end(); ++it)
it->second.ClearR();
m_files.clear();
m_subdirectories.clear();
m_realDirectory.reset();
m_shouldPopulate = 0;
}
void VfsDirectory::SetAssociatedDirectory(const PRealDirectory& realDirectory)
{
if(!cpu_CAS(&m_shouldPopulate, 0, 1))
debug_assert(0); // caller didn't check ShouldPopulate
m_realDirectory = realDirectory;
}
bool VfsDirectory::ShouldPopulate()
{
return cpu_CAS(&m_shouldPopulate, 1, 0); // test and reset
}
/**
* =========================================================================
* File : vfs_tree.cpp
* Project : 0 A.D.
* Description : 'tree' of VFS directories and files
* =========================================================================
*/
// license: GPL; see lib/license.txt
#include "precompiled.h"
#include "vfs_tree.h"
#include "lib/file/common/file_stats.h"
#include "lib/sysdep/cpu.h"
//-----------------------------------------------------------------------------
VfsFile::VfsFile(const std::string& name, off_t size, time_t mtime, size_t priority, const PIFileLoader& loader)
: m_name(name), m_size(size), m_mtime(mtime), m_priority(priority), m_loader(loader)
{
}
bool VfsFile::IsSupersededBy(const VfsFile& file) const
{
// 1) priority (override mods)
if(file.m_priority < m_priority) // lower priority
return false;
// 2) timestamp
{
const double howMuchNewer = difftime(file.MTime(), MTime());
const double threshold = 2.0; // [seconds]; resolution provided by FAT
if(howMuchNewer > threshold) // newer timestamp
return true;
if(howMuchNewer < threshold) // older timestamp
return false;
// else: "equal" (tolerating small differences due to FAT's low
// mtime resolution)
}
// 3) precedence (efficiency of file provider)
if(file.m_loader->Precedence() < m_loader->Precedence()) // less efficient
return false;
return true;
}
void VfsFile::GenerateDescription(char* text, size_t maxChars) const
{
char timestamp[25];
const time_t mtime = MTime();
strftime(timestamp, ARRAY_SIZE(timestamp), "%a %b %d %H:%M:%S %Y", localtime(&mtime));
// build format string (set width of name field so that everything
// lines up correctly)
const char* fmt = "(%c; %6d; %s)\n";
sprintf_s(text, maxChars, fmt, m_loader->LocationCode(), Size(), timestamp);
}
LibError VfsFile::Load(const shared_ptr<u8>& buf) const
{
return m_loader->Load(Name(), buf, Size());
}
//-----------------------------------------------------------------------------
VfsDirectory::VfsDirectory()
: m_shouldPopulate(0)
{
}
VfsFile* VfsDirectory::AddFile(const VfsFile& file)
{
std::pair<std::string, VfsFile> value = std::make_pair(file.Name(), file);
std::pair<VfsFiles::iterator, bool> ret = m_files.insert(value);
if(!ret.second) // already existed
{
VfsFile& previousFile = ret.first->second;
const VfsFile& newFile = value.second;
if(previousFile.IsSupersededBy(newFile))
previousFile = newFile;
}
else
stats_vfs_file_add(file.Size());
return &(*ret.first).second;
}
// rationale: passing in a pre-constructed VfsDirectory and copying that into
// our map would be slower and less convenient for the caller.
VfsDirectory* VfsDirectory::AddSubdirectory(const std::string& name)
{
std::pair<std::string, VfsDirectory> value = std::make_pair(name, VfsDirectory());
std::pair<VfsSubdirectories::iterator, bool> ret = m_subdirectories.insert(value);
return &(*ret.first).second;
}
VfsFile* VfsDirectory::GetFile(const std::string& name)
{
VfsFiles::iterator it = m_files.find(name);
if(it == m_files.end())
return 0;
return &it->second;
}
VfsDirectory* VfsDirectory::GetSubdirectory(const std::string& name)
{
VfsSubdirectories::iterator it = m_subdirectories.find(name);
if(it == m_subdirectories.end())
return 0;
return &it->second;
}
void VfsDirectory::GetEntries(FileInfos* files, DirectoryNames* subdirectoryNames) const
{
if(files)
{
files->clear();
files->reserve(m_files.size());
for(VfsFiles::const_iterator it = m_files.begin(); it != m_files.end(); ++it)
files->push_back(FileInfo(it->second.Name(), it->second.Size(), it->second.MTime()));
}
if(subdirectoryNames)
{
subdirectoryNames->clear();
subdirectoryNames->reserve(m_subdirectories.size());
for(VfsSubdirectories::const_iterator it = m_subdirectories.begin(); it != m_subdirectories.end(); ++it)
subdirectoryNames->push_back(it->first);
}
}
void VfsDirectory::DisplayR(size_t depth) const
{
static const char indent[] = " ";
const size_t maxNameChars = 80 - depth*(sizeof(indent)-1);
char fmt[20];
sprintf_s(fmt, ARRAY_SIZE(fmt), "%%-%d.%ds %%s", maxNameChars, maxNameChars);
for(VfsFiles::const_iterator it = m_files.begin(); it != m_files.end(); ++it)
{
const std::string& name = it->first;
const VfsFile& file = it->second;
char description[100];
file.GenerateDescription(description, ARRAY_SIZE(description));
for(size_t i = 0; i < depth+1; i++)
printf(indent);
printf(fmt, name.c_str(), description);
}
for(VfsSubdirectories::const_iterator it = m_subdirectories.begin(); it != m_subdirectories.end(); ++it)
{
const std::string& name = it->first;
const VfsDirectory& directory = it->second;
for(size_t i = 0; i < depth+1; i++)
printf(indent);
printf("[%s/]\n", name.c_str());
directory.DisplayR(depth+1);
}
}
void VfsDirectory::ClearR()
{
for(VfsSubdirectories::iterator it = m_subdirectories.begin(); it != m_subdirectories.end(); ++it)
it->second.ClearR();
m_files.clear();
m_subdirectories.clear();
m_realDirectory.reset();
m_shouldPopulate = 0;
}
void VfsDirectory::SetAssociatedDirectory(const PRealDirectory& realDirectory)
{
if(!cpu_CAS(&m_shouldPopulate, 0, 1))
debug_assert(0); // caller didn't check ShouldPopulate
m_realDirectory = realDirectory;
}
bool VfsDirectory::ShouldPopulate()
{
return cpu_CAS(&m_shouldPopulate, 1, 0); // test and reset
}

View File

@ -1,109 +1,109 @@
/**
* =========================================================================
* File : vfs_tree.h
* Project : 0 A.D.
* Description : 'tree' of VFS directories and files
* =========================================================================
*/
// license: GPL; see lib/license.txt
#ifndef INCLUDED_VFS_TREE
#define INCLUDED_VFS_TREE
#include "lib/file/file_system.h" // FileInfo
#include "lib/file/common/file_loader.h" // PIFileLoader
#include "lib/file/common/real_directory.h" // PRealDirectory
class VfsFile
{
public:
VfsFile(const std::string& name, off_t size, time_t mtime, size_t priority, const PIFileLoader& provider);
const std::string& Name() const
{
return m_name;
}
off_t Size() const
{
return m_size;
}
time_t MTime() const
{
return m_mtime;
}
bool IsSupersededBy(const VfsFile& file) const;
void GenerateDescription(char* text, size_t maxChars) const;
LibError Load(const shared_ptr<u8>& buf) const;
private:
std::string m_name;
off_t m_size;
time_t m_mtime;
size_t m_priority;
PIFileLoader m_loader;
};
class VfsDirectory
{
public:
VfsDirectory();
/**
* @return address of existing or newly inserted file; remains
* valid until ClearR is called (i.e. VFS is rebuilt).
**/
VfsFile* AddFile(const VfsFile& file);
/**
* @return address of existing or newly inserted subdirectory; remains
* valid until ClearR is called (i.e. VFS is rebuilt).
**/
VfsDirectory* AddSubdirectory(const std::string& name);
VfsFile* GetFile(const std::string& name);
VfsDirectory* GetSubdirectory(const std::string& name);
void GetEntries(FileInfos* files, DirectoryNames* subdirectories) const;
void DisplayR(size_t depth) const;
void ClearR();
/**
* side effect: the next ShouldPopulate() will return true.
**/
void SetAssociatedDirectory(const PRealDirectory& realDirectory);
const PRealDirectory& AssociatedDirectory() const
{
return m_realDirectory;
}
/**
* @return whether this directory should be populated from its
* AssociatedDirectory(). note that calling this is a promise to
* do so if true is returned -- the flag is immediately reset.
**/
bool ShouldPopulate();
private:
typedef std::map<std::string, VfsFile> VfsFiles;
VfsFiles m_files;
typedef std::map<std::string, VfsDirectory> VfsSubdirectories;
VfsSubdirectories m_subdirectories;
PRealDirectory m_realDirectory;
volatile uintptr_t m_shouldPopulate; // (cpu_CAS can't be used on bool)
};
#endif // #ifndef INCLUDED_VFS_TREE
/**
* =========================================================================
* File : vfs_tree.h
* Project : 0 A.D.
* Description : 'tree' of VFS directories and files
* =========================================================================
*/
// license: GPL; see lib/license.txt
#ifndef INCLUDED_VFS_TREE
#define INCLUDED_VFS_TREE
#include "lib/file/file_system.h" // FileInfo
#include "lib/file/common/file_loader.h" // PIFileLoader
#include "lib/file/common/real_directory.h" // PRealDirectory
class VfsFile
{
public:
VfsFile(const std::string& name, off_t size, time_t mtime, size_t priority, const PIFileLoader& provider);
const std::string& Name() const
{
return m_name;
}
off_t Size() const
{
return m_size;
}
time_t MTime() const
{
return m_mtime;
}
bool IsSupersededBy(const VfsFile& file) const;
void GenerateDescription(char* text, size_t maxChars) const;
LibError Load(const shared_ptr<u8>& buf) const;
private:
std::string m_name;
off_t m_size;
time_t m_mtime;
size_t m_priority;
PIFileLoader m_loader;
};
class VfsDirectory
{
public:
VfsDirectory();
/**
* @return address of existing or newly inserted file; remains
* valid until ClearR is called (i.e. VFS is rebuilt).
**/
VfsFile* AddFile(const VfsFile& file);
/**
* @return address of existing or newly inserted subdirectory; remains
* valid until ClearR is called (i.e. VFS is rebuilt).
**/
VfsDirectory* AddSubdirectory(const std::string& name);
VfsFile* GetFile(const std::string& name);
VfsDirectory* GetSubdirectory(const std::string& name);
void GetEntries(FileInfos* files, DirectoryNames* subdirectories) const;
void DisplayR(size_t depth) const;
void ClearR();
/**
* side effect: the next ShouldPopulate() will return true.
**/
void SetAssociatedDirectory(const PRealDirectory& realDirectory);
const PRealDirectory& AssociatedDirectory() const
{
return m_realDirectory;
}
/**
* @return whether this directory should be populated from its
* AssociatedDirectory(). note that calling this is a promise to
* do so if true is returned -- the flag is immediately reset.
**/
bool ShouldPopulate();
private:
typedef std::map<std::string, VfsFile> VfsFiles;
VfsFiles m_files;
typedef std::map<std::string, VfsDirectory> VfsSubdirectories;
VfsSubdirectories m_subdirectories;
PRealDirectory m_realDirectory;
volatile uintptr_t m_shouldPopulate; // (cpu_CAS can't be used on bool)
};
#endif // #ifndef INCLUDED_VFS_TREE

View File

@ -1,117 +1,117 @@
/**
* =========================================================================
* File : fnv_hash.h
* Project : 0 A.D.
* Description : Fowler/Noll/Vo string hash
* =========================================================================
*/
// license: GPL; see lib/license.txt
#include "precompiled.h"
// FNV1-A hash - good for strings.
// if len = 0 (default), treat buf as a C-string;
// otherwise, hash <len> bytes of buf.
u32 fnv_hash(const void* buf, size_t len)
{
u32 h = 0x811c9dc5u;
// give distinct values for different length 0 buffers.
// value taken from FNV; it has no special significance.
const u8* p = (const u8*)buf;
// expected case: string
if(!len)
{
while(*p)
{
h ^= *p++;
h *= 0x01000193u;
}
}
else
{
size_t bytes_left = len;
while(bytes_left != 0)
{
h ^= *p++;
h *= 0x01000193u;
bytes_left--;
}
}
return h;
}
// FNV1-A hash - good for strings.
// if len = 0 (default), treat buf as a C-string;
// otherwise, hash <len> bytes of buf.
u64 fnv_hash64(const void* buf, size_t len)
{
u64 h = 0xCBF29CE484222325ull;
// give distinct values for different length 0 buffers.
// value taken from FNV; it has no special significance.
const u8* p = (const u8*)buf;
// expected case: string
if(!len)
{
while(*p)
{
h ^= *p++;
h *= 0x100000001B3ull;
}
}
else
{
size_t bytes_left = len;
while(bytes_left != 0)
{
h ^= *p++;
h *= 0x100000001B3ull;
bytes_left--;
}
}
return h;
}
// special version for strings: first converts to lowercase
// (useful for comparing mixed-case filenames).
// note: still need <len>, e.g. to support non-0-terminated strings
u32 fnv_lc_hash(const char* str, size_t len)
{
u32 h = 0x811c9dc5u;
// give distinct values for different length 0 buffers.
// value taken from FNV; it has no special significance.
// expected case: string
if(!len)
{
while(*str)
{
h ^= tolower(*str++);
h *= 0x01000193u;
}
}
else
{
size_t bytes_left = len;
while(bytes_left != 0)
{
h ^= tolower(*str++);
h *= 0x01000193u;
bytes_left--;
}
}
return h;
}
/**
* =========================================================================
* File : fnv_hash.h
* Project : 0 A.D.
* Description : Fowler/Noll/Vo string hash
* =========================================================================
*/
// license: GPL; see lib/license.txt
#include "precompiled.h"
// FNV1-A hash - good for strings.
// if len = 0 (default), treat buf as a C-string;
// otherwise, hash <len> bytes of buf.
u32 fnv_hash(const void* buf, size_t len)
{
u32 h = 0x811c9dc5u;
// give distinct values for different length 0 buffers.
// value taken from FNV; it has no special significance.
const u8* p = (const u8*)buf;
// expected case: string
if(!len)
{
while(*p)
{
h ^= *p++;
h *= 0x01000193u;
}
}
else
{
size_t bytes_left = len;
while(bytes_left != 0)
{
h ^= *p++;
h *= 0x01000193u;
bytes_left--;
}
}
return h;
}
// FNV1-A hash - good for strings.
// if len = 0 (default), treat buf as a C-string;
// otherwise, hash <len> bytes of buf.
u64 fnv_hash64(const void* buf, size_t len)
{
u64 h = 0xCBF29CE484222325ull;
// give distinct values for different length 0 buffers.
// value taken from FNV; it has no special significance.
const u8* p = (const u8*)buf;
// expected case: string
if(!len)
{
while(*p)
{
h ^= *p++;
h *= 0x100000001B3ull;
}
}
else
{
size_t bytes_left = len;
while(bytes_left != 0)
{
h ^= *p++;
h *= 0x100000001B3ull;
bytes_left--;
}
}
return h;
}
// special version for strings: first converts to lowercase
// (useful for comparing mixed-case filenames).
// note: still need <len>, e.g. to support non-0-terminated strings
u32 fnv_lc_hash(const char* str, size_t len)
{
u32 h = 0x811c9dc5u;
// give distinct values for different length 0 buffers.
// value taken from FNV; it has no special significance.
// expected case: string
if(!len)
{
while(*str)
{
h ^= tolower(*str++);
h *= 0x01000193u;
}
}
else
{
size_t bytes_left = len;
while(bytes_left != 0)
{
h ^= tolower(*str++);
h *= 0x01000193u;
bytes_left--;
}
}
return h;
}

View File

@ -1,39 +1,39 @@
/**
* =========================================================================
* File : fnv_hash.h
* Project : 0 A.D.
* Description : Fowler/Noll/Vo string hash
* =========================================================================
*/
// license: GPL; see lib/license.txt
#ifndef INCLUDED_FNV_HASH
#define INCLUDED_FNV_HASH
/**
* rationale: this algorithm was chosen because it delivers 'good' results
* for string data and is relatively simple. other good alternatives exist;
* see Ozan Yigit's hash roundup.
**/
/**
* calculate FNV1-A hash.
*
* @param buf input buffer.
* @param len if 0 (default), treat buf as a C-string; otherwise,
* indicates how many bytes of buffer to hash.
* @return hash result. note: results are distinct for buffers containing
* differing amounts of zero bytes because the hash value is seeded.
**/
extern u32 fnv_hash(const void* buf, size_t len = 0);
/// 64-bit version of fnv_hash.
extern u64 fnv_hash64(const void* buf, size_t len = 0);
/**
* special version of fnv_hash for strings: first converts to lowercase
* (useful for comparing mixed-case filenames)
**/
extern u32 fnv_lc_hash(const char* str, size_t len = 0);
#endif // #ifndef INCLUDED_FNV_HASH
/**
* =========================================================================
* File : fnv_hash.h
* Project : 0 A.D.
* Description : Fowler/Noll/Vo string hash
* =========================================================================
*/
// license: GPL; see lib/license.txt
#ifndef INCLUDED_FNV_HASH
#define INCLUDED_FNV_HASH
/**
* rationale: this algorithm was chosen because it delivers 'good' results
* for string data and is relatively simple. other good alternatives exist;
* see Ozan Yigit's hash roundup.
**/
/**
* calculate FNV1-A hash.
*
* @param buf input buffer.
* @param len if 0 (default), treat buf as a C-string; otherwise,
* indicates how many bytes of buffer to hash.
* @return hash result. note: results are distinct for buffers containing
* differing amounts of zero bytes because the hash value is seeded.
**/
extern u32 fnv_hash(const void* buf, size_t len = 0);
/// 64-bit version of fnv_hash.
extern u64 fnv_hash64(const void* buf, size_t len = 0);
/**
* special version of fnv_hash for strings: first converts to lowercase
* (useful for comparing mixed-case filenames)
**/
extern u32 fnv_lc_hash(const char* str, size_t len = 0);
#endif // #ifndef INCLUDED_FNV_HASH

View File

@ -1,217 +1,217 @@
#include "precompiled.h"
#include "frequency_filter.h"
static const double errorTolerance = 0.05f;
static const double sensitivity = 0.10;
/**
* variable-width window for frequency determination
**/
class FrequencyEstimator
{
NONCOPYABLE(FrequencyEstimator);
public:
FrequencyEstimator(double resolution)
: m_minDeltaTime(4.0 * resolution) // chosen to reduce error but still yield rapid updates.
, m_lastTime(0) // will be set on first call
, m_numEvents(0)
{
debug_assert(resolution > 0.0);
}
bool operator()(double time, double& frequency)
{
m_numEvents++;
if(m_lastTime == 0.0)
m_lastTime = time;
// count # events until deltaTime is large enough
// (reduces quantization errors if resolution is low)
const double deltaTime = time - m_lastTime;
if(deltaTime <= m_minDeltaTime)
return false;
frequency = m_numEvents / deltaTime;
m_numEvents = 0;
m_lastTime = time;
return true; // success
}
private:
const double m_minDeltaTime;
double m_lastTime;
int m_numEvents;
};
/**
* variable-gain IIR filter
**/
class IirFilter
{
public:
IirFilter(double sensitivity, double initialValue)
: m_sensitivity(sensitivity), m_prev(initialValue)
{
}
// bias = 0: no change. > 0: increase (n-th root). < 0: decrease (^n)
double operator()(double x, int bias)
{
// sensitivity to changes ([0,1]).
const double gain = pow(m_sensitivity, ComputeExponent(bias));
return m_prev = x*gain + m_prev*(1.0-gain);
}
private:
static double ComputeExponent(int bias)
{
if(bias > 0)
return 1.0 / bias; // n-th root
else if(bias == 0)
return 1.0; // no change
else
return -bias; // power-of-n
}
double m_sensitivity;
double m_prev;
};
/**
* regulate IIR gain for rapid but smooth tracking of a function.
* this is similar in principle to a PID controller but is tuned for
* the special case of FPS values to simplify stabilizing the filter.
**/
class Controller
{
public:
Controller(double initialValue)
: m_timesOnSameSide(0)
{
std::fill(m_history, m_history+m_historySize, initialValue);
}
// bias := exponential change to gain, (-inf, inf)
int ComputeBias(double smoothedValue, double value)
{
if(WasOnSameSide(value)) // (must be checked before updating history)
m_timesOnSameSide++;
else
m_timesOnSameSide = 0;
// update history
std::copy(m_history, m_history+m_historySize, m_history+1);
m_history[m_historySize-1] = value;
// dampen jitter
if(Change(smoothedValue, value) < 0.04)
return -1;
// dampen spikes/bounces.
if(WasSpike())
return -2;
// if the past few samples have been consistently above/below
// average, the function is changing and we need to catch up.
// (similar to I in a PID)
if(m_timesOnSameSide >= 3)
return std::min(m_timesOnSameSide, 4);
// suppress large jumps.
if(Change(m_history[m_historySize-1], value) > 0.30)
return -4; // gain -> 0
return 0;
}
private:
bool WasOnSameSide(double value) const
{
int sum = 0;
for(size_t i = 0; i < m_historySize; i++)
{
const int vote = (value >= m_history[i])? 1 : -1;
sum += vote;
}
return abs(sum) == (int)m_historySize;
}
static double Change(double from, double to)
{
return fabs(from - to) / from;
}
// /\ or \/ in last three history entries
bool WasSpike() const
{
cassert(m_historySize >= 3);
const double h2 = m_history[m_historySize-3], h1 = m_history[m_historySize-2], h0 = m_history[m_historySize-1];
if(((h2-h1) * (h1-h0)) > 0) // no sign change
return false;
if(Change(h2, h0) > 0.05) // overall change from oldest to newest value
return false;
if(Change(h1, h0) < 0.10) // no intervening spike
return false;
return true;
}
static const size_t m_historySize = 3;
double m_history[m_historySize];
int m_timesOnSameSide;
};
class FrequencyFilter : public IFrequencyFilter
{
NONCOPYABLE(FrequencyFilter);
public:
FrequencyFilter(double resolution, double expectedFrequency)
: m_controller(expectedFrequency), m_frequencyEstimator(resolution), m_iirFilter(sensitivity, expectedFrequency)
, m_stableFrequency((int)expectedFrequency), m_smoothedFrequency(expectedFrequency)
{
}
virtual void Update(double time)
{
double frequency;
if(!m_frequencyEstimator(time, frequency))
return;
const int bias = m_controller.ComputeBias(m_smoothedFrequency, frequency);
m_smoothedFrequency = m_iirFilter(frequency, bias);
// allow the smoothed FPS to free-run until it is no longer near the
// previous stable FPS value. round up because values are more often
// too low than too high.
const double difference = fabs(m_smoothedFrequency - m_stableFrequency);
if(difference > errorTolerance*m_stableFrequency)
m_stableFrequency = (int)(m_smoothedFrequency + 0.99);
}
virtual double SmoothedFrequency() const
{
return m_smoothedFrequency;
}
virtual int StableFrequency() const
{
return m_stableFrequency;
}
private:
FrequencyEstimator m_frequencyEstimator;
Controller m_controller;
IirFilter m_iirFilter;
int m_stableFrequency;
double m_smoothedFrequency;
};
PIFrequencyFilter CreateFrequencyFilter(double resolution, double expectedFrequency)
{
return PIFrequencyFilter(new FrequencyFilter(resolution, expectedFrequency));
}
#include "precompiled.h"
#include "frequency_filter.h"
static const double errorTolerance = 0.05f;
static const double sensitivity = 0.10;
/**
* variable-width window for frequency determination
**/
class FrequencyEstimator
{
NONCOPYABLE(FrequencyEstimator);
public:
FrequencyEstimator(double resolution)
: m_minDeltaTime(4.0 * resolution) // chosen to reduce error but still yield rapid updates.
, m_lastTime(0) // will be set on first call
, m_numEvents(0)
{
debug_assert(resolution > 0.0);
}
bool operator()(double time, double& frequency)
{
m_numEvents++;
if(m_lastTime == 0.0)
m_lastTime = time;
// count # events until deltaTime is large enough
// (reduces quantization errors if resolution is low)
const double deltaTime = time - m_lastTime;
if(deltaTime <= m_minDeltaTime)
return false;
frequency = m_numEvents / deltaTime;
m_numEvents = 0;
m_lastTime = time;
return true; // success
}
private:
const double m_minDeltaTime;
double m_lastTime;
int m_numEvents;
};
/**
* variable-gain IIR filter
**/
class IirFilter
{
public:
IirFilter(double sensitivity, double initialValue)
: m_sensitivity(sensitivity), m_prev(initialValue)
{
}
// bias = 0: no change. > 0: increase (n-th root). < 0: decrease (^n)
double operator()(double x, int bias)
{
// sensitivity to changes ([0,1]).
const double gain = pow(m_sensitivity, ComputeExponent(bias));
return m_prev = x*gain + m_prev*(1.0-gain);
}
private:
static double ComputeExponent(int bias)
{
if(bias > 0)
return 1.0 / bias; // n-th root
else if(bias == 0)
return 1.0; // no change
else
return -bias; // power-of-n
}
double m_sensitivity;
double m_prev;
};
/**
* regulate IIR gain for rapid but smooth tracking of a function.
* this is similar in principle to a PID controller but is tuned for
* the special case of FPS values to simplify stabilizing the filter.
**/
class Controller
{
public:
Controller(double initialValue)
: m_timesOnSameSide(0)
{
std::fill(m_history, m_history+m_historySize, initialValue);
}
// bias := exponential change to gain, (-inf, inf)
int ComputeBias(double smoothedValue, double value)
{
if(WasOnSameSide(value)) // (must be checked before updating history)
m_timesOnSameSide++;
else
m_timesOnSameSide = 0;
// update history
std::copy(m_history, m_history+m_historySize, m_history+1);
m_history[m_historySize-1] = value;
// dampen jitter
if(Change(smoothedValue, value) < 0.04)
return -1;
// dampen spikes/bounces.
if(WasSpike())
return -2;
// if the past few samples have been consistently above/below
// average, the function is changing and we need to catch up.
// (similar to I in a PID)
if(m_timesOnSameSide >= 3)
return std::min(m_timesOnSameSide, 4);
// suppress large jumps.
if(Change(m_history[m_historySize-1], value) > 0.30)
return -4; // gain -> 0
return 0;
}
private:
bool WasOnSameSide(double value) const
{
int sum = 0;
for(size_t i = 0; i < m_historySize; i++)
{
const int vote = (value >= m_history[i])? 1 : -1;
sum += vote;
}
return abs(sum) == (int)m_historySize;
}
static double Change(double from, double to)
{
return fabs(from - to) / from;
}
// /\ or \/ in last three history entries
bool WasSpike() const
{
cassert(m_historySize >= 3);
const double h2 = m_history[m_historySize-3], h1 = m_history[m_historySize-2], h0 = m_history[m_historySize-1];
if(((h2-h1) * (h1-h0)) > 0) // no sign change
return false;
if(Change(h2, h0) > 0.05) // overall change from oldest to newest value
return false;
if(Change(h1, h0) < 0.10) // no intervening spike
return false;
return true;
}
static const size_t m_historySize = 3;
double m_history[m_historySize];
int m_timesOnSameSide;
};
class FrequencyFilter : public IFrequencyFilter
{
NONCOPYABLE(FrequencyFilter);
public:
FrequencyFilter(double resolution, double expectedFrequency)
: m_controller(expectedFrequency), m_frequencyEstimator(resolution), m_iirFilter(sensitivity, expectedFrequency)
, m_stableFrequency((int)expectedFrequency), m_smoothedFrequency(expectedFrequency)
{
}
virtual void Update(double time)
{
double frequency;
if(!m_frequencyEstimator(time, frequency))
return;
const int bias = m_controller.ComputeBias(m_smoothedFrequency, frequency);
m_smoothedFrequency = m_iirFilter(frequency, bias);
// allow the smoothed FPS to free-run until it is no longer near the
// previous stable FPS value. round up because values are more often
// too low than too high.
const double difference = fabs(m_smoothedFrequency - m_stableFrequency);
if(difference > errorTolerance*m_stableFrequency)
m_stableFrequency = (int)(m_smoothedFrequency + 0.99);
}
virtual double SmoothedFrequency() const
{
return m_smoothedFrequency;
}
virtual int StableFrequency() const
{
return m_stableFrequency;
}
private:
FrequencyEstimator m_frequencyEstimator;
Controller m_controller;
IirFilter m_iirFilter;
int m_stableFrequency;
double m_smoothedFrequency;
};
PIFrequencyFilter CreateFrequencyFilter(double resolution, double expectedFrequency)
{
return PIFrequencyFilter(new FrequencyFilter(resolution, expectedFrequency));
}

View File

@ -1,21 +1,21 @@
#ifndef INCLUDED_FREQUENCY_FILTER
#define INCLUDED_FREQUENCY_FILTER
// calculate frequency of events (tuned for 100 Hz)
struct IFrequencyFilter
{
virtual void Update(double value) = 0;
// smoothed but rapidly tracked frequency
virtual double SmoothedFrequency() const = 0;
// stable, non-fluctuating value for user display
virtual int StableFrequency() const = 0;
};
typedef shared_ptr<IFrequencyFilter> PIFrequencyFilter;
// expectedFrequency is a guess that hopefully speeds up convergence
LIB_API PIFrequencyFilter CreateFrequencyFilter(double resolution, double expectedFrequency);
#endif // #ifndef INCLUDED_FREQUENCY_FILTER
#ifndef INCLUDED_FREQUENCY_FILTER
#define INCLUDED_FREQUENCY_FILTER
// calculate frequency of events (tuned for 100 Hz)
struct IFrequencyFilter
{
virtual void Update(double value) = 0;
// smoothed but rapidly tracked frequency
virtual double SmoothedFrequency() const = 0;
// stable, non-fluctuating value for user display
virtual int StableFrequency() const = 0;
};
typedef shared_ptr<IFrequencyFilter> PIFrequencyFilter;
// expectedFrequency is a guess that hopefully speeds up convergence
LIB_API PIFrequencyFilter CreateFrequencyFilter(double resolution, double expectedFrequency);
#endif // #ifndef INCLUDED_FREQUENCY_FILTER

View File

@ -1,29 +1,29 @@
#include "lib/sysdep/compiler.h"
// note: EXTERN_C cannot be used because shared_ptr is often returned
// by value, which requires C++ linkage.
#ifdef LIB_STATIC_LINK
# define LIB_API
#else
# if MSC_VERSION
# ifdef LIB_BUILD
# define LIB_API __declspec(dllexport)
# else
# define LIB_API __declspec(dllimport)
# ifdef NDEBUG
# pragma comment(lib, "lib.lib")
# else
# pragma comment(lib, "lib_d.lib")
# endif
# endif
# elif GCC_VERSION
# ifdef LIB_BUILD
# define LIB_API __attribute__ ((visibility("default")))
# else
# define LIB_API
# endif
# else
# error "Don't know how to define LIB_API for this compiler"
# endif
#endif
#include "lib/sysdep/compiler.h"
// note: EXTERN_C cannot be used because shared_ptr is often returned
// by value, which requires C++ linkage.
#ifdef LIB_STATIC_LINK
# define LIB_API
#else
# if MSC_VERSION
# ifdef LIB_BUILD
# define LIB_API __declspec(dllexport)
# else
# define LIB_API __declspec(dllimport)
# ifdef NDEBUG
# pragma comment(lib, "lib.lib")
# else
# pragma comment(lib, "lib_d.lib")
# endif
# endif
# elif GCC_VERSION
# ifdef LIB_BUILD
# define LIB_API __attribute__ ((visibility("default")))
# else
# define LIB_API
# endif
# else
# error "Don't know how to define LIB_API for this compiler"
# endif
#endif

View File

@ -1,74 +1,74 @@
/**
* =========================================================================
* File : module_init.cpp
* Project : 0 A.D.
* Description : helpers for module initialization/shutdown.
* =========================================================================
*/
// license: GPL; see lib/license.txt
#include "precompiled.h"
#include "module_init.h"
#include "lib/sysdep/cpu.h" // cpu_CAS, cpu_AtomicAdd
// notes:
// - value must be 0 to allow users to just define uninitialized static
// variables (they don't have access to our MODULE_* symbols)
// - unlike expected in-game operation, the self-tests require repeated
// sequences of init/shutdown pairs. we therefore allow this in general
// (resetting back to MODULE_UNINITIALIZED after shutdown) because
// there's no real disadvantage other than loss of strictness.
static const ModuleInitState MODULE_UNINITIALIZED = 0u;
// (1..N = reference count)
static const ModuleInitState MODULE_ERROR = ~(uintptr_t)1u;
bool ModuleShouldInitialize(volatile ModuleInitState* pInitState)
{
// currently uninitialized, so give the green light.
if(cpu_CAS(pInitState, MODULE_UNINITIALIZED, 1))
return true;
// increment reference count - unless already in a final state.
retry:
ModuleInitState latchedInitState = *pInitState;
if(latchedInitState == MODULE_ERROR)
return false;
if(!cpu_CAS(pInitState, latchedInitState, latchedInitState+1))
goto retry;
return false;
}
bool ModuleShouldShutdown(volatile ModuleInitState* pInitState)
{
// decrement reference count - unless already in a final state.
retry:
ModuleInitState latchedInitState = *pInitState;
if(latchedInitState == MODULE_UNINITIALIZED || latchedInitState == MODULE_ERROR)
return false;
if(!cpu_CAS(pInitState, latchedInitState, latchedInitState-1))
goto retry;
// refcount reached zero => allow shutdown.
if(latchedInitState-1 == MODULE_UNINITIALIZED)
return true;
return false;
}
void ModuleSetError(volatile ModuleInitState* pInitState)
{
*pInitState = MODULE_ERROR;
}
bool ModuleIsError(volatile ModuleInitState* pInitState)
{
return (*pInitState == MODULE_ERROR);
}
/**
* =========================================================================
* File : module_init.cpp
* Project : 0 A.D.
* Description : helpers for module initialization/shutdown.
* =========================================================================
*/
// license: GPL; see lib/license.txt
#include "precompiled.h"
#include "module_init.h"
#include "lib/sysdep/cpu.h" // cpu_CAS, cpu_AtomicAdd
// notes:
// - value must be 0 to allow users to just define uninitialized static
// variables (they don't have access to our MODULE_* symbols)
// - unlike expected in-game operation, the self-tests require repeated
// sequences of init/shutdown pairs. we therefore allow this in general
// (resetting back to MODULE_UNINITIALIZED after shutdown) because
// there's no real disadvantage other than loss of strictness.
static const ModuleInitState MODULE_UNINITIALIZED = 0u;
// (1..N = reference count)
static const ModuleInitState MODULE_ERROR = ~(uintptr_t)1u;
bool ModuleShouldInitialize(volatile ModuleInitState* pInitState)
{
// currently uninitialized, so give the green light.
if(cpu_CAS(pInitState, MODULE_UNINITIALIZED, 1))
return true;
// increment reference count - unless already in a final state.
retry:
ModuleInitState latchedInitState = *pInitState;
if(latchedInitState == MODULE_ERROR)
return false;
if(!cpu_CAS(pInitState, latchedInitState, latchedInitState+1))
goto retry;
return false;
}
bool ModuleShouldShutdown(volatile ModuleInitState* pInitState)
{
// decrement reference count - unless already in a final state.
retry:
ModuleInitState latchedInitState = *pInitState;
if(latchedInitState == MODULE_UNINITIALIZED || latchedInitState == MODULE_ERROR)
return false;
if(!cpu_CAS(pInitState, latchedInitState, latchedInitState-1))
goto retry;
// refcount reached zero => allow shutdown.
if(latchedInitState-1 == MODULE_UNINITIALIZED)
return true;
return false;
}
void ModuleSetError(volatile ModuleInitState* pInitState)
{
*pInitState = MODULE_ERROR;
}
bool ModuleIsError(volatile ModuleInitState* pInitState)
{
return (*pInitState == MODULE_ERROR);
}

Some files were not shown because too many files have changed in this diff Show More