1
0
forked from 0ad/0ad

add WIP file code to a new folder (not included in the build)

This was SVN commit r5443.
This commit is contained in:
janwas 2007-11-10 13:39:45 +00:00
parent abc8edceeb
commit 149ebb67c0
24 changed files with 3496 additions and 0 deletions

View File

@ -0,0 +1,250 @@
/**
* =========================================================================
* File : dir_util.cpp
* Project : 0 A.D.
* Description :
* =========================================================================
*/
// license: GPL; see lib/license.txt
#include "precompiled.h"
#include "dir_util.h"
#include <queue>
#include "filesystem.h"
#include "path.h"
#include "lib/path_util.h"
#include "lib/regex.h"
bool dir_FileExists(IFilesystem* fs, const char* pathname)
{
FilesystemEntry fsEntry;
if(fs->GetEntry(pathname, fsEntry) < 0)
return false;
debug_assert(!fsEntry.IsDirectory());
return true;
}
bool dir_DirectoryExists(IFilesystem* fs, const char* dirPath)
{
FilesystemEntry fsEntry;
if(fs->GetEntry(dirPath, fsEntry) < 0)
return false;
debug_assert(fsEntry.IsDirectory());
return true;
}
struct FsEntryNameLess : public std::binary_function<const FilesystemEntry, const FilesystemEntry, bool>
{
bool operator()(const FilesystemEntry& fsEntry1, const FilesystemEntry& fsEntry2) const
{
return strcmp(fsEntry1.name, fsEntry2.name) < 0;
}
};
LibError dir_GatherSortedEntries(IFilesystem* fs, const char* dirPath, FilesystemEntries& fsEntries)
{
DirectoryIterator di(fs, dirPath);
fsEntries.reserve(50); // preallocate for efficiency
FilesystemEntry fsEntry;
for(;;)
{
LibError ret = di.NextEntry(fsEntry);
if(ret == ERR::DIR_END)
break;
RETURN_ERR(ret);
fsEntries.push_back(fsEntry);
}
std::sort(fsEntries.begin(), fsEntries.end(), FsEntryNameLess());
return INFO::OK;
}
LibError dir_ForEachSortedEntry(IFilesystem* fs, const char* dirPath, const DirCallback cb, const uintptr_t cbData)
{
PathPackage pp;
RETURN_ERR(path_package_set_dir(&pp, dirPath));
FilesystemEntries fsEntries;
RETURN_ERR(dir_GatherSortedEntries(fs, dirPath, fsEntries));
for(FilesystemEntries::const_iterator it = fsEntries.begin(); it != fsEntries.end(); ++it)
{
const FilesystemEntry& fsEntry = *it;
path_package_append_file(&pp, fsEntry.name);
LibError ret = cb(pp.path, fsEntry, cbData);
if(ret != INFO::CB_CONTINUE)
return ret;
}
return INFO::OK;
}
LibError dir_filtered_next_ent(DirectoryIterator& di, FilesystemEntry& fsEntry, const char* filter)
{
bool want_dir = true;
if(filter)
{
// directory
if(filter[0] == '/')
{
// .. and also files
if(filter[1] == '|')
filter += 2;
}
// file only
else
want_dir = false;
}
// loop until fsEntry matches what is requested, or end of directory.
for(;;)
{
RETURN_ERR(di.NextEntry(fsEntry));
if(fsEntry.IsDirectory())
{
if(want_dir)
break;
}
else
{
// (note: filter = 0 matches anything)
if(match_wildcard(fsEntry.name, filter))
break;
}
}
return INFO::OK;
}
// call <cb> for each fsEntry matching <user_filter> (see vfs_next_dirent) in
// directory <path>; if flags & VFS_DIR_RECURSIVE, entries in
// subdirectories are also returned.
//
// note: EnumDirEntsCB path and fsEntry are only valid during the callback.
LibError dir_FilteredForEachEntry(IFilesystem* fs, const char* dirPath, uint flags, const char* user_filter, DirCallback cb, uintptr_t cbData)
{
debug_assert((flags & ~(VFS_DIR_RECURSIVE)) == 0);
const bool recursive = (flags & VFS_DIR_RECURSIVE) != 0;
char filter_buf[PATH_MAX];
const char* filter = user_filter;
bool user_filter_wants_dirs = true;
if(user_filter)
{
if(user_filter[0] != '/')
user_filter_wants_dirs = false;
// we need subdirectories and the caller hasn't already requested them
if(recursive && !user_filter_wants_dirs)
{
snprintf(filter_buf, sizeof(filter_buf), "/|%s", user_filter);
filter = filter_buf;
}
}
// note: FIFO queue instead of recursion is much more efficient
// (less stack usage; avoids seeks by reading all entries in a
// directory consecutively)
std::queue<const char*> dir_queue;
dir_queue.push(path_UniqueCopy(dirPath));
// for each directory:
do
{
// get current directory path from queue
// note: can't refer to the queue contents - those are invalidated
// as soon as a directory is pushed onto it.
PathPackage pp;
(void)path_package_set_dir(&pp, dir_queue.front());
dir_queue.pop();
DirectoryIterator di(fs, pp.path);
// for each fsEntry (file, subdir) in directory:
FilesystemEntry fsEntry;
while(dir_filtered_next_ent(di, fsEntry, filter) == 0)
{
// build complete path (FilesystemEntry only stores fsEntry name)
(void)path_package_append_file(&pp, fsEntry.name);
const char* atom_path = path_UniqueCopy(pp.path);
if(fsEntry.IsDirectory())
{
if(recursive)
dir_queue.push(atom_path);
if(user_filter_wants_dirs)
cb(atom_path, fsEntry, cbData);
}
else
cb(atom_path, fsEntry, cbData);
}
}
while(!dir_queue.empty());
return INFO::OK;
}
// fill V_next_fn (which must be big enough for PATH_MAX chars) with
// the next numbered filename according to the pattern defined by V_fn_fmt.
// <state> must be initially zeroed (e.g. by defining as static) and passed
// each time.
// if <use_vfs> (default), the paths are treated as VFS paths; otherwise,
// file.cpp's functions are used. this is necessary because one of
// our callers needs a filename for VFS archive files.
//
// this function is useful when creating new files which are not to
// overwrite the previous ones, e.g. screenshots.
// example for V_fn_fmt: "screenshots/screenshot%04d.png".
void dir_NextNumberedFilename(IFilesystem* fs, const char* fn_fmt, NextNumberedFilenameState* state, char* next_fn)
{
// (first call only:) scan directory and set next_num according to
// highest matching filename found. this avoids filling "holes" in
// the number series due to deleted files, which could be confusing.
// example: add 1st and 2nd; [exit] delete 1st; [restart]
// add 3rd -> without this measure it would get number 1, not 3.
if(state->next_num == 0)
{
char dirPath[PATH_MAX];
path_dir_only(fn_fmt, dirPath);
const char* name_fmt = path_name_only(fn_fmt);
int max_num = -1; int num;
DirectoryIterator di(fs, dirPath);
FilesystemEntry fsEntry;
while(di.NextEntry(fsEntry) == INFO::OK)
{
if(!fsEntry.IsDirectory() && sscanf(fsEntry.name, name_fmt, &num) == 1)
max_num = std::max(num, max_num);
}
state->next_num = max_num+1;
}
// now increment number until that file doesn't yet exist.
// this is fairly slow, but typically only happens once due
// to scan loop above. (we still need to provide for looping since
// someone may have added files in the meantime)
// binary search isn't expected to improve things.
do
snprintf(next_fn, PATH_MAX, fn_fmt, state->next_num++);
while(dir_FileExists(fs, next_fn));
}

104
source/lib/file/dir_util.h Normal file
View File

@ -0,0 +1,104 @@
/**
* =========================================================================
* File : dir_util.h
* Project : 0 A.D.
* Description :
* =========================================================================
*/
// license: GPL; see lib/license.txt
#ifndef INCLUDED_DIR_UTIL
#define INCLUDED_DIR_UTIL
#include "filesystem.h"
extern bool dir_FileExists(IFilesystem* fs, const char* pathname);
extern bool dir_DirectoryExists(IFilesystem* fs, const char* dirPath);
typedef std::vector<FilesystemEntry> FilesystemEntries;
// enumerate all directory entries in <P_path>; add to container and
// then sort it by filename.
extern LibError dir_GatherSortedEntries(IFilesystem* fs, const char* dirPath, FilesystemEntries& fsEntries);
// called by dir_ForEachSortedEntry for each entry in the directory.
// return INFO::CB_CONTINUE to continue calling; anything else will cause
// dir_ForEachSortedEntry to abort and immediately return that value.
typedef LibError (*DirCallback)(const char* pathname, const FilesystemEntry& fsEntry, const uintptr_t cbData);
// call <cb> for each file and subdirectory in <dir> (alphabetical order),
// passing the entry name (not full path!), stat info, and <user>.
//
// first builds a list of entries (sorted) and remembers if an error occurred.
// if <cb> returns non-zero, abort immediately and return that; otherwise,
// return first error encountered while listing files, or 0 on success.
//
// rationale:
// this makes dir_ForEachSortedEntry and zip_enum slightly incompatible, since zip_enum
// returns the full path. that's necessary because VFS zip_cb
// has no other way of determining what VFS dir a Zip file is in,
// since zip_enum enumerates all files in the archive (not only those
// in a given dir). no big deal though, since add_ent has to
// special-case Zip files anyway.
// the advantage here is simplicity, and sparing callbacks the trouble
// of converting from/to native path (we just give 'em the dirent name).
extern LibError dir_ForEachSortedEntry(IFilesystem* fs, const char* dirPath, DirCallback cb, uintptr_t cbData);
// retrieve the next (order is unspecified) dir entry matching <filter>.
// return 0 on success, ERR::DIR_END if no matching entry was found,
// or a negative error code on failure.
// filter values:
// - 0: anything;
// - "/": any subdirectory;
// - "/|<pattern>": any subdirectory, or as below with <pattern>;
// - <pattern>: any file whose name matches; ? and * wildcards are allowed.
//
// note that the directory entries are only scanned once; after the
// end is reached (-> ERR::DIR_END returned), no further entries can
// be retrieved, even if filter changes (which shouldn't happen - see impl).
//
// rationale: we do not sort directory entries alphabetically here.
// most callers don't need it and the overhead is considerable
// (we'd have to store all entries in a vector). it is left up to
// higher-level code such as VfsUtil.
extern LibError dir_filtered_next_ent(DirectoryIterator& di, FilesystemEntry& fsEntry, const char* filter);
enum DirEnumFlags
{
VFS_DIR_RECURSIVE = 1
};
// call <cb> for each entry matching <user_filter> (see vfs_next_dirent) in
// directory <path>; if flags & VFS_DIR_RECURSIVE, entries in
// subdirectories are also returned.
extern LibError dir_FilteredForEachEntry(IFilesystem* fs, const char* dirPath, uint enum_flags, const char* filter, DirCallback cb, uintptr_t cbData);
struct NextNumberedFilenameState
{
int next_num;
};
// fill V_next_fn (which must be big enough for PATH_MAX chars) with
// the next numbered filename according to the pattern defined by V_fn_fmt.
// <nnfs> must be initially zeroed (e.g. by defining as static) and passed
// each time.
// if <use_vfs> (default), the paths are treated as VFS paths; otherwise,
// file.cpp's functions are used. this is necessary because one of
// our callers needs a filename for VFS archive files.
//
// this function is useful when creating new files which are not to
// overwrite the previous ones, e.g. screenshots.
// example for V_fn_fmt: "screenshots/screenshot%04d.png".
extern void dir_NextNumberedFilename(IFilesystem* fs, const char* V_fn_fmt, NextNumberedFilenameState* nnfs, char* V_next_fn);
#endif // #ifndef INCLUDED_DIR_UTIL

View File

@ -0,0 +1,380 @@
/**
* =========================================================================
* File : file_cache.cpp
* Project : 0 A.D.
* Description : cache of file contents (supports zero-copy IO)
* =========================================================================
*/
// license: GPL; see lib/license.txt
#include "precompiled.h"
#include "file_cache.h"
#include <map>
#include "path.h"
#include "file_stats.h"
#include "archive/trace.h"
#include "lib/cache_adt.h" // Cache
#include "lib/lockfree.h"
#include "lib/bits.h" // round_up
#include "lib/allocators/allocators.h"
#include "lib/allocators/headerless.h"
#include "lib/allocators/mem_util.h" // mem_PageSize
// >= sys_max_sector_size or else waio will have to realign.
// chosen as exactly 1 page: this allows write-protecting file buffers
// without worrying about their (non-page-aligned) borders.
// internal fragmentation is considerable but acceptable.
static const size_t alignment = mem_PageSize();
//-----------------------------------------------------------------------------
// allocator
/*
the biggest worry of a file cache is external fragmentation. there are two
basic ways to combat this:
1) 'defragment' periodically - move blocks around to increase
size of available 'holes'.
2) prevent fragmentation from occurring at all via
deliberate alloc/free policy.
file contents are returned directly to the user (zero-copy IO), so only
currently unreferenced blocks can be moved. it is believed that this would
severely hamper defragmentation; we therefore go with the latter approach.
the basic insight is: fragmentation occurs when a block is freed whose
neighbors are not free (thus preventing coalescing). this can be prevented by
allocating objects of similar lifetimes together. typical workloads
(uniform access frequency) already show such behavior: the Landlord cache
manager evicts files in an LRU manner, which matches the allocation policy.
references:
"The Memory Fragmentation Problem - Solved?" (Johnstone and Wilson)
"Dynamic Storage Allocation - A Survey and Critical Review" (Johnstone and Wilson)
*/
class Allocator
{
public:
Allocator(size_t maxSize)
: m_allocator(maxSize)
{
}
IoBuf Allocate(size_t size)
{
const size_t alignedSize = round_up(size, alignment);
stats_buf_alloc(size, alignedSize);
void* p = m_allocator.Allocate(alignedSize);
#ifndef NDEBUG
m_checker.notify_alloc(p, alignedSize);
#endif
return (IoBuf)p;
}
void Deallocate(IoBuf buf, size_t size)
{
void* const p = (void*)buf;
// (re)allow writes. it would be nice to un-map the buffer, but this is
// not possible because HeaderlessAllocator needs to affix boundary tags.
(void)mprotect(p, size, PROT_READ|PROT_WRITE);
const size_t alignedSize = round_up(size, alignment);
#ifndef NDEBUG
m_checker.notify_free(p, alignedSize);
#endif
m_allocator.Deallocate(p, alignedSize);
stats_buf_free();
}
private:
HeaderlessAllocator m_allocator;
#ifndef NDEBUG
static AllocatorChecker m_checker;
#endif
};
//-----------------------------------------------------------------------------
typedef LF_RefCountedMemRange FileContents;
/**
* manages the active FileContents referenced by users.
* ("active" means between Reserve() and the final Release())
**/
class ActiveList
{
public:
~ActiveList()
{
// display leaks
debug_printf("file_cache leaks:\n");
for(MapIt it = m_map.begin(); it != m_map.end(); ++it)
{
const char* atom_fn = it->first;
FileContents& fc = it->second;
debug_printf(" %s (0x%P 0x%08x)\n", atom_fn, fc.mem, fc.size);
}
debug_printf("--------\n");
}
void Add(const char* atom_fn, FileContents& fc)
{
const PairIB ret = m_map.insert(std::make_pair(atom_fn, fc));
debug_assert(ret.second); // complain if already existed
}
void Remove(const char* atom_fn)
{
const size_t numRemoved = m_map.erase(atom_fn);
debug_assert(numRemoved == 1);
}
FileContents* Find(const char* atom_fn)
{
MapIt it = m_map.find(atom_fn);
if(it == m_map.end())
return 0; // not an error
return &it->second;
}
// (called by FileCache::Impl::AllocateCacheSpace; we can't pass
// atom_fn because Cache only knows about buf and size.)
bool Contains(IoBuf buf) const
{
for(MapCIt it = m_map.begin(); it != m_map.end(); ++it)
{
const FileContents& fc = it->second;
if(fc.mem == buf)
return true;
}
return false;
}
private:
typedef std::map<const char*, FileContents> Map;
typedef Map::iterator MapIt;
typedef Map::const_iterator MapCIt;
typedef std::pair<MapIt, bool> PairIB;
Map m_map;
};
//-----------------------------------------------------------------------------
// FileCache::Impl
//-----------------------------------------------------------------------------
// the organization of this cache is somewhat counterintuitive. one might
// expect a simple mapping of filename to FileContents. however, since users
// are strongly encouraged to only load/process one file at a time, there
// will only be a few active references. with the cache holding many more
// entries, looking up files there is more expensive than consulting a
// separate list of active FileContents.
// this list (the "manager") and the cache contents are not necessarily
// related; no inclusion relation need hold. the only requirement is that
// each consult the other on ownership issues. if the cache decides a file
// should be evicted while references to it are active, or users release a
// reference to FileContents that the cache wants to keep, the memory must
// not actually be freed. (it is then logically 'owned' by the other)
class FileCache::Impl
{
public:
Impl(size_t size)
: m_allocator(size)
{
}
IoBuf Reserve(const char* atom_fn, size_t size)
{
// (this probably indicates a bug; caching 0-length files would
// have no benefit, anyway)
debug_assert(size != 0);
IoBuf buf = AllocateCacheSpace(size);
FileContents fc;
fc.refs.AcquireExclusiveAccess();
fc.mem = (void*)buf;
fc.size = size;
m_activeList.Add(atom_fn, fc);
return buf;
}
void MarkComplete(const char* atom_fn, uint cost)
{
FileContents* fc = m_activeList.Find(atom_fn);
debug_assert(fc);
fc->refs.RelinquishExclusiveAccess();
// zero-copy cache => all users share the contents => must not
// allow changes. this will be reverted when the buffer is freed.
(void)mprotect(fc->mem, fc->size, PROT_READ);
m_cache.add(atom_fn, (IoBuf)fc->mem, fc->size, cost);
}
IoBuf Retrieve(const char* atom_fn, size_t& size)
{
IoBuf buf;
if(!m_cache.retrieve(atom_fn, buf, &size))
return 0;
FileContents* pfc = m_activeList.Find(atom_fn);
// was already active; add a reference.
if(pfc)
pfc->refs.AddReference();
// in cache, but no active references; add to list.
else
{
FileContents fc;
fc.refs.AddReference();
fc.mem = (void*)buf;
fc.size = size;
m_activeList.Add(atom_fn, fc);
}
stats_buf_ref();
return buf;
}
void Release(const char* atom_fn)
{
FileContents* fc = m_activeList.Find(atom_fn);
debug_assert(fc);
fc->refs.Release();
if(fc->refs.ReferenceCount() == 0)
{
trace_notify_free(atom_fn);
if(!IsInCache(atom_fn))
m_allocator.Deallocate((IoBuf)fc->mem, fc->size);
m_activeList.Remove(atom_fn);
}
}
LibError Invalidate(const char* atom_fn)
{
// remove from cache
IoBuf cachedBuf; size_t cachedSize;
if(m_cache.peek(atom_fn, cachedBuf, &cachedSize))
{
m_cache.remove(atom_fn);
// note: we ensure cachedBuf is not active below.
m_allocator.Deallocate(cachedBuf, cachedSize);
}
// this could happen if a hotload notification comes while someone
// is holding a reference to the file contents. atom_fn has been
// removed from the cache, so subsequent Retrieve() calls will not
// return old data. however, (re)loading the file would fail because
// Reserve() ensures there's not already an extant buffer.
// the correct way to handle this is to delay or cancel the reload,
// so we notify our caller accordingly.
if(IsActive(cachedBuf))
WARN_RETURN(ERR::AGAIN); // if this actually happens, remove the warning.
return INFO::OK;
}
private:
bool IsActive(IoBuf buf) const
{
return m_activeList.Contains(buf);
}
bool IsInCache(const char* atom_fn) const
{
IoBuf cachedBuf; size_t cachedSize; // unused
return m_cache.peek(atom_fn, cachedBuf, &cachedSize);
}
IoBuf AllocateCacheSpace(size_t size)
{
uint attempts = 0;
for(;;)
{
IoBuf buf = m_allocator.Allocate(size);
if(buf)
return buf;
// remove least valuable entry from cache
IoBuf discardedBuf; size_t discardedSize;
bool removed = m_cache.remove_least_valuable(&discardedBuf, &discardedSize);
// only false if cache is empty, which can't be the case because
// allocation failed.
debug_assert(removed);
// someone is holding a reference; we must not free the
// underlying memory, nor count this iteration.
if(IsActive(discardedBuf))
continue;
m_allocator.Deallocate(discardedBuf, discardedSize);
// note: this may seem hefty, but 300 is known to be reached.
// (after building an archive, the file cache will be full;
// attempting to allocate a few MB can take a while if only
// small scattered blocks are freed.)
debug_assert(++attempts < 500); // otherwise: failed to make room in cache?!
}
}
ActiveList m_activeList;
// HACK: due to atom_fn, we are assured that strings are equal iff their
// addresses match. however, Cache's STL (hash_)map stupidly assumes that
// const char* keys are "strings". to avoid this behavior, we specify the
// key as const void*.
static Cache<const void*, IoBuf> m_cache;
Allocator m_allocator;
};
//-----------------------------------------------------------------------------
FileCache::FileCache(size_t size)
: impl(new Impl(size))
{
}
IoBuf FileCache::Reserve(const char* atom_fn, size_t size)
{
return impl.get()->Reserve(atom_fn, size);
}
void FileCache::MarkComplete(const char* atom_fn, uint cost)
{
impl.get()->MarkComplete(atom_fn, cost);
}
IoBuf FileCache::Retrieve(const char* atom_fn, size_t& size)
{
return impl.get()->Retrieve(atom_fn, size);
}
void FileCache::Release(const char* atom_fn)
{
impl.get()->Release(atom_fn);
}
LibError FileCache::Invalidate(const char* atom_fn)
{
return impl.get()->Invalidate(atom_fn);
}

View File

@ -0,0 +1,111 @@
/**
* =========================================================================
* File : file_cache.h
* Project : 0 A.D.
* Description : cache of file contents (supports zero-copy IO)
* =========================================================================
*/
// license: GPL; see lib/license.txt
#ifndef INCLUDED_FILE_CACHE
#define INCLUDED_FILE_CACHE
#include <boost/shared_ptr.hpp>
#include "io_buf.h"
/**
* cache of file contents with support for zero-copy IO.
* this works by reserving a region of the cache, using it as the IO buffer,
* and returning the memory directly to users. optional write-protection
* via MMU ensures that the shared contents aren't inadvertently changed.
*
* to ensure efficient operation and prevent fragmentation, only one
* reference should be active at a time. in other words, read a file,
* process it, and only then start reading the next file.
*
* rationale: this is very similar to BlockCache; however, the differences
* (Reserve's size and MarkComplete's cost parameters and different eviction
* policies) are enough to warrant separate implementations.
**/
class FileCache
{
public:
/**
* @param size maximum amount [bytes] of memory to use for the cache.
* (managed as a virtual memory region that's committed on-demand)
**/
FileCache(size_t size);
/**
* Allocate an IO buffer in the cache's memory region.
*
* @param atom_fn pathname of the file that is to be read; this is
* the key that will be used to Retrieve the file contents.
* @param size required number of bytes (more may be allocated due to
* alignment and/or internal fragmentation)
* @return suitably aligned memory; never fails.
*
* no further operations with the same atom_fn are allowed to succeed
* until MarkComplete has been called.
**/
IoBuf Reserve(const char* atom_fn, size_t size);
/**
* Indicate that IO into the buffer has completed.
*
* this allows the cache to satisfy subsequent Retrieve() calls by
* returning this buffer; if CONFIG_READ_ONLY_CACHE, the buffer is
* made read-only. if need be and no references are currently attached
* to it, the memory can also be commandeered by Reserve().
*
* @param cost is the expected cost of retrieving the file again and
* influences how/when it is evicted from the cache.
**/
void MarkComplete(const char* atom_fn, uint cost = 1);
/**
* Attempt to retrieve a file's contents from the file cache.
*
* @return 0 if not in cache or its IO is still pending, otherwise a
* pointer to its (read-only) contents.
*
* if successful, the size is passed back and a reference is added to
* the file contents.
*
* note: does not call stats_cache because it does not know the file size
* in case of a cache miss; doing so is left to the caller.
**/
const u8* Retrieve(const char* atom_fn, size_t& size);
/**
* Indicate the file contents are no longer needed.
*
* this decreases the reference count; the memory can only be reused
* if it reaches 0. the contents remain in cache until they are evicted
* by a subsequent Reserve() call.
*
* note: fails (raises a warning) if called for an file that is
* currently between Reserve and MarkComplete operations.
**/
void Release(const char* atom_fn);
/**
* Invalidate the cached contents of a file.
*
* this ensures subsequent reads of the files see the current (presumably
* recently changed) contents of the file. has no effect if the file is
* not cached at the moment.
*
* this would typically be called in response to a notification that a
* file has changed.
**/
LibError Invalidate(const char* atom_fn);
private:
class Impl;
boost::shared_ptr<Impl> impl;
};
#endif // #ifndef INCLUDED_FILE_CACHE

View File

@ -0,0 +1,335 @@
/**
* =========================================================================
* File : file_stats.cpp
* Project : 0 A.D.
* Description : gathers statistics from all file modules.
* =========================================================================
*/
// license: GPL; see lib/license.txt
#include "precompiled.h"
#include "file_stats.h"
#include <set>
#include "lib/timer.h"
typedef std::set<const char*> AtomFnSet;
typedef std::pair<AtomFnSet::iterator, bool> PairIB;
// vfs
static uint vfs_files;
static size_t vfs_size_total;
static double vfs_init_elapsed_time;
// file
static uint unique_names;
static size_t unique_name_len_total;
static uint open_files_cur, open_files_max; // total = opened_files.size()
static double opened_file_size_total;
static AtomFnSet opened_files;
// file_buf
static uint extant_bufs_cur, extant_bufs_max, extant_bufs_total;
static double buf_size_total, buf_aligned_size_total;
// file_io
static uint user_ios;
static double user_io_size_total;
static double io_actual_size_total[FI_MAX_IDX][2];
static double io_elapsed_time[FI_MAX_IDX][2];
static double io_process_time_total;
static uint io_seeks;
// file_cache
static uint cache_count[2];
static double cache_size_total[2];
static AtomFnSet ever_cached_files;
static uint conflict_misses;
static double conflict_miss_size_total;
static uint block_cache_count[2];
// archive builder
static uint ab_connection_attempts; // total number of trace entries
static uint ab_repeated_connections; // how many of these were not unique
// convenience functions for measuring elapsed time in an interval.
// by exposing start/finish calls, we avoid callers from querying
// timestamps when stats are disabled.
static double start_time;
static void timer_start(double* start_time_storage = &start_time)
{
// make sure no measurement is currently active
// (since start_time is shared static storage)
debug_assert(*start_time_storage == 0.0);
*start_time_storage = get_time();
}
static double timer_reset(double* start_time_storage = &start_time)
{
double elapsed = get_time() - *start_time_storage;
*start_time_storage = 0.0;
return elapsed;
}
//-----------------------------------------------------------------------------
//
// vfs
//
void stats_vfs_file_add(size_t file_size)
{
vfs_files++;
vfs_size_total += file_size;
}
void stats_vfs_file_remove(size_t file_size)
{
vfs_files--;
vfs_size_total -= file_size;
}
void stats_vfs_init_start()
{
timer_start();
}
void stats_vfs_init_finish()
{
vfs_init_elapsed_time += timer_reset();
}
//
// file
//
void stats_unique_name(size_t name_len)
{
unique_names++;
unique_name_len_total += name_len;
}
void stats_open(const char* atom_fn, size_t file_size)
{
open_files_cur++;
open_files_max = std::max(open_files_max, open_files_cur);
PairIB ret = opened_files.insert(atom_fn);
// hadn't been opened yet
if(ret.second)
opened_file_size_total += file_size;
}
void stats_close()
{
debug_assert(open_files_cur > 0);
open_files_cur--;
}
//
// file_buf
//
void stats_buf_alloc(size_t size, size_t alignedSize)
{
extant_bufs_cur++;
extant_bufs_max = std::max(extant_bufs_max, extant_bufs_cur);
extant_bufs_total++;
buf_size_total += size;
buf_aligned_size_total += alignedSize;
}
void stats_buf_free()
{
debug_assert(extant_bufs_cur > 0);
extant_bufs_cur--;
}
void stats_buf_ref()
{
extant_bufs_cur++;
}
//
// file_io
//
void stats_io_user_request(size_t user_size)
{
user_ios++;
user_io_size_total += user_size;
}
ScopedIoMonitor::ScopedIoMonitor()
{
timer_start(&m_startTime);
}
ScopedIoMonitor::~ScopedIoMonitor()
{
// note: we can only bill IOs that have succeeded :S
}
void ScopedIoMonitor::NotifyOfSuccess(FileIOImplentation fi, char mode, size_t size)
{
debug_assert(fi < FI_MAX_IDX);
debug_assert(mode == 'r' || mode == 'w');
const FileOp op = (mode == 'r')? FO_READ : FO_WRITE;
io_actual_size_total[fi][op] += size;
io_elapsed_time[fi][op] += timer_reset(&m_startTime);
}
void stats_io_check_seek(BlockId& blockId)
{
static BlockId lastBlockId;
if(blockId != lastBlockId)
io_seeks++;
lastBlockId = blockId;
}
void stats_cb_start()
{
timer_start();
}
void stats_cb_finish()
{
io_process_time_total += timer_reset();
}
//
// file_cache
//
void stats_cache(CacheRet cr, size_t size, const char* atom_fn)
{
debug_assert(cr == CR_HIT || cr == CR_MISS);
if(cr == CR_MISS)
{
PairIB ret = ever_cached_files.insert(atom_fn);
if(!ret.second) // was already cached once
{
conflict_miss_size_total += size;
conflict_misses++;
}
}
cache_count[cr]++;
cache_size_total[cr] += size;
}
void stats_block_cache(CacheRet cr)
{
debug_assert(cr == CR_HIT || cr == CR_MISS);
block_cache_count[cr]++;
}
//
// archive builder
//
void stats_ab_connection(bool already_exists)
{
ab_connection_attempts++;
if(already_exists)
ab_repeated_connections++;
}
//-----------------------------------------------------------------------------
template<typename T> int percent(T num, T divisor)
{
if(!divisor)
return 0;
return (int)(100*num / divisor);
}
void file_stats_dump()
{
if(!debug_filter_allows("FILE_STATS|"))
return;
const double KB = 1e3; const double MB = 1e6; const double ms = 1e-3;
debug_printf("--------------------------------------------------------------------------------\n");
debug_printf("File statistics:\n");
// note: we split the reports into several debug_printfs for clarity;
// this is necessary anyway due to fixed-size buffer.
debug_printf(
"\nvfs:\n"
"Total files: %u (%g MB)\n"
"Init/mount time: %g ms\n",
vfs_files, vfs_size_total/MB,
vfs_init_elapsed_time/ms
);
debug_printf(
"\nfile:\n"
"Total names: %u (%u KB)\n"
"Accessed files: %u (%g MB) -- %u%% of data set\n"
"Max. concurrent: %u; leaked: %u.\n",
unique_names, unique_name_len_total/1000,
opened_files.size(), opened_file_size_total/MB, percent(opened_files.size(), (size_t)vfs_files),
open_files_max, open_files_cur
);
debug_printf(
"\nfile_buf:\n"
"Total buffers used: %u (%g MB)\n"
"Max concurrent: %u; leaked: %u\n"
"Internal fragmentation: %d%%\n",
extant_bufs_total, buf_size_total/MB,
extant_bufs_max, extant_bufs_cur,
percent(buf_aligned_size_total-buf_size_total, buf_size_total)
);
debug_printf(
"\nfile_io:\n"
"Total user load requests: %u (%g MB)\n"
"IO thoughput [MB/s; 0=never happened]:\n"
" lowio: R=%.3g, W=%.3g\n"
" aio: R=%.3g, W=%.3g\n"
"Average size = %g KB; seeks: %u; total callback time: %g ms\n"
"Total data actually read from disk = %g MB\n",
user_ios, user_io_size_total/MB,
#define THROUGHPUT(impl, op) (io_elapsed_time[impl][op] == 0.0)? 0.0 : (io_actual_size_total[impl][op] / io_elapsed_time[impl][op] / MB)
THROUGHPUT(FI_LOWIO, FO_READ), THROUGHPUT(FI_LOWIO, FO_WRITE),
THROUGHPUT(FI_AIO , FO_READ), THROUGHPUT(FI_AIO , FO_WRITE),
user_io_size_total/user_ios/KB, io_seeks, io_process_time_total/ms,
(io_actual_size_total[FI_LOWIO][FO_READ]+io_actual_size_total[FI_AIO][FO_READ])/MB
);
debug_printf(
"\nfile_cache:\n"
"Hits: %u (%g MB); misses %u (%g MB); ratio: %u%%\n"
"Percent of requested bytes satisfied by cache: %u%%; non-compulsory misses: %u (%u%% of misses)\n"
"Block hits: %u; misses: %u; ratio: %u%%\n",
cache_count[CR_HIT], cache_size_total[CR_HIT]/MB, cache_count[CR_MISS], cache_size_total[CR_MISS]/MB, percent(cache_count[CR_HIT], cache_count[CR_HIT]+cache_count[CR_MISS]),
percent(cache_size_total[CR_HIT], cache_size_total[CR_HIT]+cache_size_total[CR_MISS]), conflict_misses, percent(conflict_misses, cache_count[CR_MISS]),
block_cache_count[CR_HIT], block_cache_count[CR_MISS], percent(block_cache_count[CR_HIT], block_cache_count[CR_HIT]+block_cache_count[CR_MISS])
);
debug_printf(
"\nvfs_optimizer:\n"
"Total trace entries: %u; repeated connections: %u; unique files: %u\n",
ab_connection_attempts, ab_repeated_connections, ab_connection_attempts-ab_repeated_connections
);
}

View File

@ -0,0 +1,103 @@
/**
* =========================================================================
* File : file_stats.h
* Project : 0 A.D.
* Description : gathers statistics from all file modules.
* =========================================================================
*/
// license: GPL; see lib/license.txt
#ifndef INCLUDED_FILE_STATS
#define INCLUDED_FILE_STATS
#define FILE_STATS_ENABLED 1
enum FileIOImplentation { FI_LOWIO, FI_AIO, FI_BCACHE, FI_MAX_IDX };
enum FileOp { FO_READ, FO_WRITE };
enum CacheRet { CR_HIT, CR_MISS };
#include "io/block_cache.h" // BlockId
#if FILE_STATS_ENABLED
// vfs
extern void stats_vfs_file_add(size_t file_size);
extern void stats_vfs_file_remove(size_t file_size);
extern void stats_vfs_init_start();
extern void stats_vfs_init_finish();
// file
extern void stats_unique_name(size_t name_len);
extern void stats_open(const char* atom_fn, size_t file_size);
extern void stats_close();
// file_buf
extern void stats_buf_alloc(size_t size, size_t alignedSize);
extern void stats_buf_free();
extern void stats_buf_ref();
// file_io
extern void stats_io_user_request(size_t user_size);
// this is used to measure effective throughput for the two
// synchronous IO variants.
// note: improved measurements of the actual aio throughput by instrumenting
// issue/wait doesn't work because IOManager's decompression may cause us to
// miss the exact end of IO, thus throwing off measurements.
class ScopedIoMonitor
{
public:
ScopedIoMonitor();
~ScopedIoMonitor();
void NotifyOfSuccess(FileIOImplentation fi, char mode, size_t size);
private:
double m_startTime;
};
extern void stats_io_check_seek(BlockId& blockId);
extern void stats_cb_start();
extern void stats_cb_finish();
// file_cache
extern void stats_cache(CacheRet cr, size_t size, const char* atom_fn);
extern void stats_block_cache(CacheRet cr);
// archive builder
extern void stats_ab_connection(bool already_exists);
extern void file_stats_dump();
#else
#define stats_vfs_file_add(file_size)
#define stats_vfs_file_remove(file_size)
#define stats_vfs_init_start()
#define stats_vfs_init_finish()
#define stats_unique_name(name_len)
#define stats_open(atom_fn, file_size)
#define stats_close()
#define stats_buf_alloc(size, alignedSize)
#define stats_buf_free()
#define stats_buf_ref()
#define stats_io_user_request(user_size)
class ScopedIoMonitor
{
public:
ScopedIoMonitor() {}
~ScopedIoMonitor() {}
void NotifyOfSuccess(FileIOImplentation fi, char mode, size_t size) {}
};
#define stats_io_check_seek(blockId)
#define stats_cb_start()
#define stats_cb_finish()
#define stats_cache(cr, size, atom_fn)
#define stats_block_cache(cr)
#define stats_ab_connection(already_exists)
#define file_stats_dump()
#endif
#endif // #ifndef INCLUDED_FILE_STATS

View File

@ -0,0 +1,20 @@
#include "precompiled.h"
#include "filesystem.h"
#include "path.h"
ERROR_ASSOCIATE(ERR::FILE_ACCESS, "Insufficient access rights to open file", EACCES);
ERROR_ASSOCIATE(ERR::DIR_END, "End of directory reached (no more files)", -1);
ERROR_ASSOCIATE(ERR::IO, "Error during IO", EIO);
ERROR_ASSOCIATE(ERR::IO_EOF, "Reading beyond end of file", -1);
// rationale for out-of-line dtors: see [Lakos]
IDirectoryIterator::~IDirectoryIterator()
{
}
IFilesystem::~IFilesystem()
{
}

View File

@ -0,0 +1,185 @@
/**
* =========================================================================
* File : filesystem.h
* Project : 0 A.D.
* Description :
* =========================================================================
*/
// license: GPL; see lib/license.txt
#ifndef INCLUDED_FILESYSTEM
#define INCLUDED_FILESYSTEM
#include <boost/shared_ptr.hpp>
#include "io/io_manager.h" // IoCallback
namespace ERR
{
const LibError FILE_ACCESS = -110000;
const LibError DIR_NOT_FOUND = -110002;
const LibError DIR_END = -110003;
}
/**
* information describing filesystem entries (i.e. files or directories)
*
* note: don't be extravagant with memory - dir_ForEachSortedEntry allocates
* one instance of this per directory entry.
**/
struct FilesystemEntry
{
off_t size;
time_t mtime;
/**
* name of the entry; does not include a path.
* the underlying storage is guaranteed to remain valid and must not
* be freed/modified.
**/
const char* name;
// only defined for VFS files; points to their TMount.
const void* mount;
bool IsDirectory() const
{
return (size == -1);
}
};
// (note: this is defined in the public header to promote inlining of the
// DirectoryIterator (concrete class) member functions.)
// documentation: see DirectoryIterator
struct IDirectoryIterator
{
virtual ~IDirectoryIterator();
virtual LibError NextEntry(FilesystemEntry& fsEntry) = 0;
};
/**
* flags controlling file IO and caching behavior.
**/
enum FileFlags
{
// translate newlines: convert from/to native representation when
// reading/writing. this is useful if files we create need to be
// edited externally - e.g. Notepad requires \r\n.
// caveats:
// - FILE_NO_AIO must be set; translation is done by OS read()/write().
// - not supported by POSIX, so this currently only has meaning on Win32.
FILE_TEXT = 0x01,
// skip the aio path and use the OS-provided synchronous blocking
// read()/write() calls. this avoids the need for buffer alignment
// set out below, so it's useful for writing small text files.
// note: despite its more heavyweight operation, aio is still
// worthwhile for small files, so it is not automatically disabled.
FILE_NO_AIO = 0x02,
// do not add the (entire) contents of this file to the cache.
// this flag should be specified when the data is cached at a higher
// level (e.g. OpenGL textures) to avoid wasting precious cache space.
FILE_NO_CACHE = 0x04,
// enable caching individual blocks read from a file. the block cache
// is small, organized as LRU and incurs some copying overhead, so it
// should only be enabled when needed. this is the case for archives,
// where the cache absorbs overhead of block-aligning all IOs.
FILE_CACHE_BLOCK = 0x08,
// instruct file_open not to set FileCommon.atom_fn.
// this is a slight optimization used by VFS code: file_open
// would store the portable name, which is only used when calling
// the OS's open(); this would unnecessarily waste atom_fn memory.
//
// note: other file.cpp functions require atom_fn to be set,
// so this behavior is only triggered via flag (caller is
// promising they will set atom_fn).
FILE_DONT_SET_FN = 0x20,
// (only relevant for VFS) file will be written into the
// appropriate subdirectory of the mount point established by
// vfs_set_write_target. see documentation there.
FILE_WRITE_TO_TARGET = 0x40,
// sum of all flags above. used when validating flag parameters.
FILE_FLAG_ALL = 0x7F
};
struct IFilesystem
{
virtual ~IFilesystem();
/**
* @return a single character identifying the filesystem.
*
* this is useful for VFS directory listings, where an indication is
* made of where the file is actually stored.
**/
virtual char IdentificationCode() const = 0;
/**
* @return a number that represents the precedence of this filesystem.
*
* when mounting into the VFS, entries from a filesystem with higher
* precedence override otherwise equivalent files.
**/
virtual int Precedence() const = 0;
virtual LibError GetEntry(const char* pathname, FilesystemEntry& fsEntry) const = 0;
virtual LibError CreateDirectory(const char* dirPath) = 0;
virtual LibError DeleteDirectory(const char* dirPath) = 0;
virtual IDirectoryIterator* OpenDirectory(const char* dirPath) const = 0;
// note: only allowing either reads or writes simplifies file cache
// coherency (need only invalidate when closing a FILE_WRITE file).
virtual LibError CreateFile(const char* pathname, const u8* buf, size_t size, uint flags = 0) = 0;
virtual LibError DeleteFile(const char* pathname) = 0;
// read the entire file.
// return number of bytes transferred (see above), or a negative error code.
//
// if non-NULL, <cb> is called for each block transferred, passing <cbData>.
// it returns how much data was actually transferred, or a negative error
// code (in which case we abort the transfer and return that value).
// the callback mechanism is useful for user progress notification or
// processing data while waiting for the next I/O to complete
// (quasi-parallel, without the complexity of threads).
virtual LibError LoadFile(const char* pathname, const u8*& buf, size_t size, uint flags = 0, IoCallback cb = 0, uintptr_t cbData = 0) = 0;
};
/**
* (mostly) insulating concrete class providing iterator access to
* directory entries.
* this is usable for posix, VFS, etc.; instances are created via IFilesystem.
**/
class DirectoryIterator
{
public:
DirectoryIterator(IFilesystem* fs, const char* dirPath)
: m_impl(fs->OpenDirectory(dirPath))
{
}
// return ERR::DIR_END if all entries have already been returned once,
// another negative error code, or INFO::OK on success, in which case <fsEntry>
// describes the next (order is unspecified) directory entry.
LibError NextEntry(FilesystemEntry& fsEntry)
{
return m_impl.get()->NextEntry(fsEntry);
}
private:
boost::shared_ptr<IDirectoryIterator> m_impl;
};
#endif // #ifndef INCLUDED_FILESYSTEM

View File

@ -0,0 +1,217 @@
/**
* =========================================================================
* File : block_cache.cpp
* Project : 0 A.D.
* Description : cache for aligned I/O m_blocks.
* =========================================================================
*/
// license: GPL; see lib/license.txt
#include "precompiled.h"
#include "block_cache.h"
#include "lib/file/file_stats.h"
#include "lib/lockfree.h"
#include "lib/allocators/pool.h"
//-----------------------------------------------------------------------------
BlockId::BlockId()
: m_atom_fn(0), m_blockIndex(~0u)
{
}
BlockId::BlockId(const char* atom_fn, off_t ofs)
{
debug_assert(ofs <= (u64)blockSize * 0xFFFFFFFF); // ensure value fits in m_blockIndex
m_atom_fn = atom_fn; // unique (by definition)
m_blockIndex = (u32)(ofs / blockSize);
}
bool BlockId::operator==(const BlockId& rhs) const
{
return m_atom_fn == rhs.m_atom_fn && m_blockIndex == rhs.m_blockIndex;
}
bool BlockId::operator!=(const BlockId& rhs) const
{
return !operator==(rhs);
}
//-----------------------------------------------------------------------------
typedef LF_RefCountedMemRange Block;
class BlockManager
{
public:
BlockManager(size_t numBlocks)
: m_ids(numBlocks), m_blocks(numBlocks), m_oldestIndex(0)
{
for(size_t i = 0; i < m_blocks.size(); i++)
m_blocks[i].mem = (void*)io_buf_Allocate(blockSize);
}
~BlockManager()
{
for(size_t i = 0; i < m_blocks.size(); i++)
io_buf_Deallocate((IoBuf)m_blocks[i].mem, blockSize);
}
// (linear search is ok since we only expect to manage a few blocks)
Block* Find(const BlockId& id)
{
for(size_t i = 0; i < m_ids.size(); i++)
{
if(m_ids[i] == id)
return &m_blocks[i];
}
return 0;
}
Block* AcquireOldestAvailableBlock(BlockId id)
{
for(size_t i = 0; i < m_ids.size(); i++)
{
// (m_blocks are evicted in FIFO order.)
Block& block = m_blocks[m_oldestIndex % m_blocks.size()];
cpu_AtomicAdd(&m_oldestIndex, +1);
if(block.refs.AcquireExclusiveAccess())
{
m_ids[i] = id;
return &block;
}
// the oldest item is currently locked, so keep looking.
//
// to see when this can happen, consider IO depth = 4. let the
// Block at m_blocks[oldest_block] contain data that an IO wants.
// the 2nd and 3rd m_blocks are not in cache and happen to be taken
// from near the end of m_blocks[]. attempting to issue block #4
// fails because its buffer would want the first slot
// (which is locked since its IO is still pending).
}
DEBUG_WARN_ERR(ERR::LIMIT); // all m_blocks are locked
return 0;
}
void InvalidateAll()
{
// note: don't check whether any references are held etc. because
// this should only be called at the end of the (test) program.
for(size_t i = 0; i < m_blocks.size(); i++)
m_ids[i] = BlockId();
}
private:
std::vector<BlockId> m_ids;
std::vector<Block> m_blocks;
volatile intptr_t m_oldestIndex;
};
//-----------------------------------------------------------------------------
class BlockCache::Impl
{
public:
Impl(size_t cacheSize)
: m_blockManager(cacheSize / blockSize)
{
}
IoBuf Reserve(BlockId id)
{
debug_assert(!m_blockManager.Find(id)); // must not already be extant
Block* block = m_blockManager.AcquireOldestAvailableBlock(id);
#if CONFIG_READ_ONLY_CACHE
mprotect(block->mem, block->size, PROT_WRITE|PROT_READ);
#endif
return (IoBuf)block->mem;
}
void MarkComplete(BlockId id)
{
Block* block = m_blockManager.Find(id);
debug_assert(block); // (<id> cannot have been evicted because it is locked)
block->refs.RelinquishExclusiveAccess();
#if CONFIG_READ_ONLY_CACHE
mprotect(block.mem, block.size, PROT_READ);
#endif
}
IoBuf Retrieve(BlockId id)
{
Block* block = m_blockManager.Find(id);
if(!block) // not found
return 0;
if(!block->refs.AddReference()) // contents are not yet valid
return 0; // (this can happen due to multithreaded IOs)
return (IoBuf)block->mem;
}
void Release(BlockId id)
{
Block* block = m_blockManager.Find(id);
// (<id> ought not yet have been evicted because it is still referenced;
// if not found, Release was called too often)
debug_assert(block);
block->refs.Release();
}
void InvalidateAll()
{
m_blockManager.InvalidateAll();
}
private:
BlockManager m_blockManager;
};
//-----------------------------------------------------------------------------
BlockCache::BlockCache(size_t cacheSize)
: impl(new Impl(cacheSize))
{
}
IoBuf BlockCache::Reserve(BlockId id)
{
return impl.get()->Reserve(id);
}
void BlockCache::MarkComplete(BlockId id)
{
impl.get()->Reserve(id);
}
IoBuf BlockCache::Retrieve(BlockId id)
{
return impl.get()->Retrieve(id);
}
void BlockCache::Release(BlockId id)
{
impl.get()->Release(id);
}
void BlockCache::InvalidateAll()
{
return impl.get()->InvalidateAll();
}

View File

@ -0,0 +1,121 @@
/**
* =========================================================================
* File : block_cache.h
* Project : 0 A.D.
* Description : cache for aligned I/O blocks.
* =========================================================================
*/
// license: GPL; see lib/license.txt
#ifndef INCLUDED_BLOCK_CACHE
#define INCLUDED_BLOCK_CACHE
#include <boost/shared_ptr.hpp>
#include "io_buf.h"
/**
* block := power-of-two sized chunk of a file.
* all transfers are expanded to naturally aligned, whole blocks
* (this makes caching parts of files feasible; it is also much faster
* for some aio implementations, e.g. wposix).
*
* measurements show this value to yield best read throughput.
**/
static const size_t BLOCK_SIZE = 32*KiB;
/**
* ID that uniquely identifies a block within a file
**/
class BlockId
{
public:
BlockId();
BlockId(const char* atom_fn, off_t ofs);
bool operator==(const BlockId& rhs) const;
bool operator!=(const BlockId& rhs) const;
private:
const char* m_atom_fn;
u32 m_blockIndex;
};
/**
* cache of (aligned) file blocks with support for zero-copy IO.
* absorbs the overhead of rounding up archive IOs to the nearest block
* boundaries by keeping the last few blocks in memory.
*
* the interface is quite similar to FileCache; see the note there.
**/
class BlockCache
{
public:
/**
* @param cacheSize total size [bytes] that the cache is to manage.
* the default value is enough to support temp buffers and
* absorb the cost of unaligned reads from a few archives.
**/
BlockCache(size_t cacheSize = 16 * BLOCK_SIZE);
/**
* Reserve a block for use as an IO buffer.
*
* @return suitably aligned memory; never fails.
*
* no further operations with the same id are allowed to succeed
* until MarkComplete has been called.
**/
IoBuf Reserve(BlockId id);
/**
* Indicate that IO into the block has completed.
*
* this allows the cache to satisfy subsequent Retrieve() calls by
* returning this block; if CONFIG_READ_ONLY_CACHE, the block is
* made read-only. if need be and no references are currently attached
* to it, the memory can also be commandeered by Reserve().
**/
void MarkComplete(BlockId id);
/**
* Attempt to retrieve a block the file cache.
*
* @return 0 if not in cache or its IO is still pending, otherwise a
* pointer to its (read-only) contents.
*
* if successful, a reference is added to the block.
**/
IoBuf Retrieve(BlockId id);
/**
* Indicate the block contents are no longer needed.
*
* this decreases the reference count; the memory can only be reused
* if it reaches 0. the block remains in cache until evicted by a
* subsequent Reserve() call.
*
* note: fails (raises a warning) if called for a buffer that is
* currently between Reserve and MarkComplete operations.
**/
void Release(BlockId id);
/**
* Invalidate the contents of the cache.
*
* this effectively discards the contents of existing blocks
* (more specifically: prevents them from satisfying Retrieve() calls
* until a subsequent Reserve/MarkComplete of that block).
*
* useful for self-tests: multiple independent IO tests run in the same
* process and must not influence each other via the cache.
**/
void InvalidateAll();
private:
class Impl;
boost::shared_ptr<Impl> impl;
};
#endif * #ifndef INCLUDED_BLOCK_CACHE

View File

@ -0,0 +1,28 @@
#include "precompiled.h"
#include "io_buf.h"
#include "lib/allocators/allocators.h" // AllocatorChecker
#ifndef NDEBUG
static AllocatorChecker allocatorChecker;
#endif
IoBuf io_buf_Allocate(size_t size)
{
void* p = page_aligned_alloc(size);
if(!p)
throw std::bad_alloc();
#ifndef NDEBUG
allocatorChecker.notify_alloc(p, size);
#endif
return (IoBuf)p;
}
void io_buf_Deallocate(IoBuf buf, size_t size)
{
void* p = (void*)buf;
#ifndef NDEBUG
allocatorChecker.notify_free(p, size);
#endif
page_aligned_free(p, size);
}

View File

@ -0,0 +1,24 @@
/**
* =========================================================================
* File : io_buf.h
* Project : 0 A.D.
* Description :
* =========================================================================
*/
// license: GPL; see lib/license.txt
#ifndef INCLUDED_IO_BUF
#define INCLUDED_IO_BUF
typedef const u8* IoBuf;
const IoBuf IO_BUF_TEMP = 0;
const IoBuf IO_BUF_ALLOC = (IoBuf)1;
// memory will be allocated from the heap, not the (limited) file cache.
// this makes sense for write buffers that are never used again,
// because we avoid having to displace some other cached items.
extern IoBuf io_buf_Allocate(size_t size);
extern void io_buf_Deallocate(IoBuf buf, size_t size);
#endif // #ifndef INCLUDED_IO_BUF

View File

@ -0,0 +1,352 @@
/**
* =========================================================================
* File :
* Project : 0 A.D.
* Description :
* =========================================================================
*/
// license: GPL; see lib/license.txt
#include "precompiled.h"
#include "io_manager.h"
#include <boost/shared_ptr.hpp>
#include "../posix/io_posix.h"
#include "../file_stats.h"
#include "block_cache.h"
// the underlying aio implementation likes buffer and offset to be
// sector-aligned; if not, the transfer goes through an align buffer,
// and requires an extra cpu_memcpy.
//
// if the user specifies an unaligned buffer, there's not much we can
// do - we can't assume the buffer contains padding. therefore,
// callers should let us allocate the buffer if possible.
//
// if ofs misalign = buffer, only the first and last blocks will need
// to be copied by aio, since we read up to the next block boundary.
// otherwise, everything will have to be copied; at least we split
// the read into blocks, so aio's buffer won't have to cover the
// whole file.
// note: cutting off at EOF is necessary to avoid transfer errors,
// but makes size no longer sector-aligned, which would force
// waio to realign (slow). we want to pad back to sector boundaries
// afterwards (to avoid realignment), but that is not possible here
// since we have no control over the buffer (there might not be
// enough room in it). hence, do cut-off in IOManager.
//
// example: 200-byte file. IOManager issues (large) blocks;
// that ends up way beyond EOF, so ReadFile fails.
// limiting size to 200 bytes works, but causes waio to pad the
// transfer and use align buffer (slow).
// rounding up to 512 bytes avoids realignment and does not fail
// (apparently since NTFS files are sector-padded anyway?)
LibError io_InvokeCallback(const u8* block, size_t size, IoCallback cb, uintptr_t cbData, size_t& bytesProcessed)
{
if(cb)
{
stats_cb_start();
LibError ret = cb(cbData, block, size, &bytesProcessed);
stats_cb_finish();
// failed - reset byte count in case callback didn't
if(ret != INFO::OK && ret != INFO::CB_CONTINUE)
bytesProcessed = 0;
CHECK_ERR(ret); // user might not have raised a warning; make sure
return ret;
}
// no callback to process data: raw = actual
else
{
bytesProcessed = size;
return INFO::CB_CONTINUE;
}
}
//-----------------------------------------------------------------------------
class BlockIo
{
public:
BlockIo()
: m_blockId(), cachedBlock(0), tempBlock(0), m_posixIo()
{
}
LibError Issue(File_Posix& file, off_t ofs, IoBuf buf, size_t size)
{
m_blockId = BlockId(file.Pathname(), ofs);
// block already available in cache?
cachedBlock = s_blockCache.Retrieve(m_blockId);
if(cachedBlock)
{
stats_block_cache(CR_HIT);
return INFO::OK;
}
stats_block_cache(CR_MISS);
stats_io_check_seek(m_blockId);
// use a temporary block if not writing to a preallocated buffer.
if(!buf)
buf = tempBlock = s_blockCache.Reserve(m_blockId);
return m_posixIo.Issue(file, ofs, buf, size);
}
LibError WaitUntilComplete(const u8*& block, size_t& blockSize)
{
if(cachedBlock)
{
block = (u8*)cachedBlock;
blockSize = BLOCK_SIZE;
return INFO::OK;
}
return m_posixIo.WaitUntilComplete(block, blockSize);
}
void Discard()
{
if(cachedBlock)
{
s_blockCache.Release(m_blockId);
cachedBlock = 0;
return;
}
if(tempBlock)
{
s_blockCache.MarkComplete(m_blockId);
tempBlock = 0;
}
}
private:
static BlockCache s_blockCache;
BlockId m_blockId;
IoBuf cachedBlock;
IoBuf tempBlock;
Io_Posix m_posixIo;
};
//-----------------------------------------------------------------------------
class IOManager : boost::noncopyable
{
public:
IOManager(File_Posix& file, off_t ofs, IoBuf buf, size_t size, IoCallback cb = 0, uintptr_t cbData = 0)
: m_file(file)
, start_ofs(ofs), user_size(size)
, m_cb(cb), m_cbData(cbData)
, m_totalIssued(0), m_totalTransferred(0), m_totalProcessed(0)
, err(INFO::CB_CONTINUE)
{
}
// now we read the file in 64 KiB chunks, N-buffered.
// if reading from Zip, inflate while reading the next block.
LibError run()
{
ScopedIoMonitor monitor;
aio();
if(err != INFO::CB_CONTINUE && err != INFO::OK)
return (ssize_t)err;
debug_assert(m_totalIssued >= m_totalTransferred && m_totalTransferred >= user_size);
monitor.NotifyOfSuccess(FI_AIO, m_file.Mode(), m_totalTransferred);
return m_totalProcessed;
}
private:
void wait(BlockIo& blockIo, u8*& block, size_t& blockSize)
{
LibError ret = blockIo.WaitUntilComplete(block, blockSize);
if(ret < 0)
err = ret;
// first time; skip past padding
if(m_totalTransferred == 0)
{
block = (u8*)block + ofs_misalign;
blockSize -= ofs_misalign;
}
// last time: don't include trailing padding
if(m_totalTransferred + blockSize > user_size)
blockSize = user_size - m_totalTransferred;
// we have useable data from a previous temp buffer,
// but it needs to be copied into the user's buffer
if(blockIo.cachedBlock && pbuf != IO_BUF_TEMP)
cpu_memcpy((char*)*pbuf+ofs_misalign+m_totalTransferred, block, blockSize);
m_totalTransferred += blockSize;
}
// align and pad the IO to BLOCK_SIZE
// (reduces work for AIO implementation).
LibError prepare()
{
ofs_misalign = 0;
size = user_size;
if(!is_write && !no_aio)
{
// note: we go to the trouble of aligning the first block (instead of
// just reading up to the next block and letting aio realign it),
// so that it can be taken from the cache.
// this is not possible if we don't allocate the buffer because
// extra space must be added for the padding.
ofs_misalign = start_ofs % BLOCK_SIZE;
start_ofs -= (off_t)ofs_misalign;
size = round_up(ofs_misalign + user_size, BLOCK_SIZE);
// but cut off at EOF (necessary to prevent IO error).
const off_t bytes_left = f->size - start_ofs;
if(bytes_left < 0)
WARN_RETURN(ERR::IO_EOF);
size = std::min(size, (size_t)bytes_left);
// and round back up to sector size.
// see rationale in file_io_issue.
const size_t AIO_SECTOR_SIZE = 512;
size = round_up(size, AIO_SECTOR_SIZE);
}
RETURN_ERR(file_io_get_buf(pbuf, size, f->atom_fn, f->flags, cb));
// see if actual transfer count matches requested size.
// note: most callers clamp to EOF but round back up to sector size
// (see explanation in file_io_issue).
////debug_assert(bytes_transferred >= (ssize_t)(m_aiocb.aio_nbytes-AIO_SECTOR_SIZE));
return INFO::OK;
}
void aio()
{
RETURN_ERR(prepare());
again:
{
// data remaining to transfer, and no error:
// start transferring next block.
if(m_totalIssued < size && err == INFO::CB_CONTINUE && queue.size() < MAX_PENDING_IOS)
{
queue.push_back(BlockIo());
BlockIo& blockIo = queue.back();
const off_t ofs = start_ofs+(off_t)m_totalIssued;
// for both reads and writes, do not issue beyond end of file/data
const size_t issue_size = std::min(BLOCK_SIZE, size - m_totalIssued);
// try to grab whole blocks (so we can put them in the cache).
// any excess data (can only be within first or last) is
// discarded in wait().
if(pbuf == IO_BUF_TEMP)
buf = 0;
else
buf = (char*)*pbuf + m_totalIssued;
LibError ret = blockIo.Issue();
// transfer failed - loop will now terminate after
// waiting for all pending transfers to complete.
if(ret != INFO::OK)
err = ret;
m_totalIssued += issue_size;
goto again;
}
// IO pending: wait for it to complete, and process it.
if(!queue.empty())
{
BlockIo& blockIo = queue.front();
u8* block; size_t blockSize;
wait(blockIo, block, blockSize);
if(err == INFO::CB_CONTINUE)
{
size_t bytesProcessed;
LibError ret = io_InvokeCallback(block, blockSize, m_cb, m_cbData, bytesProcessed);
if(ret == INFO::CB_CONTINUE || ret == INFO::OK)
m_totalProcessed += bytesProcessed;
// processing failed - loop will now terminate after
// waiting for all pending transfers to complete.
else
err = ret;
}
blockIo.Discard();
queue.pop_front();
goto again;
}
}
// (all issued OR error) AND no pending transfers - done.
// we allocated the memory: skip any leading padding
if(not_temp && !is_write)
{
IoBuf org_buf = *pbuf;
*pbuf = (u8*)org_buf + ofs_misalign;
if(ofs_misalign || size != user_size)
assert(0); // TODO": no longer supported, rule this out
}
}
File_Posix& m_file;
bool m_isWrite;
off_t start_ofs;
size_t user_size;
IoCallback m_cb;
uintptr_t m_cbData;
// (useful, raw data: possibly compressed, but doesn't count padding)
size_t m_totalIssued;
size_t m_totalTransferred;
// if callback, sum of what it reports; otherwise, = m_totalTransferred
// this is what we'll return.
size_t m_totalProcessed;
// stop issuing and processing as soon as this changes
LibError err;
IoBuf* pbuf;
size_t ofs_misalign;
size_t size;
static const uint MAX_PENDING_IOS = 4;
//RingBuf<BlockIo, MAX_PENDING_IOS> queue;
std::deque<BlockIo> queue;
};

View File

@ -0,0 +1,44 @@
/**
* =========================================================================
* File : io_manager.h
* Project : 0 A.D.
* Description :
* =========================================================================
*/
// license: GPL; see lib/license.txt
#ifndef INCLUDED_IO_MANAGER
#define INCLUDED_IO_MANAGER
#include "../posix/io_posix.h"
namespace ERR
{
const LibError IO = -110100;
const LibError IO_EOF = -110101;
}
// called by file_io after a block IO has completed.
// *bytesProcessed must be set; file_io will return the sum of these values.
// example: when reading compressed data and decompressing in the callback,
// indicate #bytes decompressed.
// return value: INFO::CB_CONTINUE to continue calling; anything else:
// abort immediately and return that.
// note: in situations where the entire IO is not split into blocks
// (e.g. when reading from cache or not using AIO), this is still called but
// for the entire IO. we do not split into fake blocks because it is
// advantageous (e.g. for decompressors) to have all data at once, if available
// anyway.
typedef LibError (*IoCallback)(uintptr_t cbData, const u8* block, size_t size, size_t* bytesProcessed);
// helper routine used by functions that call back to a IoCallback.
//
// bytesProcessed is 0 if return value != { INFO::OK, INFO::CB_CONTINUE }
// note: don't abort if = 0: zip callback may not actually
// output anything if passed very little data.
extern LibError io_InvokeCallback(const u8* block, size_t size, IoCallback cb, uintptr_t cbData, size_t& bytesProcessed);
#endif // #ifndef INCLUDED_IO_MANAGER

316
source/lib/file/path.cpp Normal file
View File

@ -0,0 +1,316 @@
/**
* =========================================================================
* File : path.cpp
* Project : 0 A.D.
* Description : helper functions for VFS paths.
* =========================================================================
*/
// license: GPL; see lib/license.txt
#include "precompiled.h"
#include "path.h"
#include <string.h>
#include "lib/posix/posix_filesystem.h"
#include "lib/adts.h"
#include "lib/rand.h"
#include "lib/allocators/pool.h"
#include "lib/sysdep/sysdep.h"
#include "lib/path_util.h"
#include "lib/sysdep/cpu.h" // cpu_memcpy
#include "file_stats.h"
ERROR_ASSOCIATE(ERR::ROOT_DIR_ALREADY_SET, "Attempting to set FS root dir more than once", -1);
ERROR_ASSOCIATE(ERR::NOT_IN_ROOT_DIR, "Accessing a file that's outside of the root dir", -1);
// path types:
// p_*: posix (e.g. mount object name or for open())
// v_*: vfs (e.g. mount point)
// fn : filename only (e.g. from readdir)
// dir_name: directory only, no path (e.g. subdir name)
//
// all paths must be relative (no leading '/'); components are separated
// by '/'; no ':', '\\', "." or ".." allowed; root dir is "".
//
// grammar:
// path ::= dir*file?
// dir ::= name/
// file ::= name
// name ::= [^/]
enum Conversion
{
TO_NATIVE,
TO_PORTABLE
};
static LibError convert_path(char* dst, const char* src, Conversion conv = TO_NATIVE)
{
// SYS_DIR_SEP is assumed to be a single character!
const char* s = src;
char* d = dst;
char from = SYS_DIR_SEP, to = '/';
if(conv == TO_NATIVE)
from = '/', to = SYS_DIR_SEP;
size_t len = 0;
for(;;)
{
len++;
if(len >= PATH_MAX)
WARN_RETURN(ERR::PATH_LENGTH);
char c = *s++;
if(c == from)
c = to;
*d++ = c;
// end of string - done
if(c == '\0')
return INFO::OK;
}
}
// set by path_SetRoot
static char n_root_dir[PATH_MAX];
static size_t n_root_dir_len;
// return the native equivalent of the given relative portable path
// (i.e. convert all '/' to the platform's directory separator)
// makes sure length < PATH_MAX.
LibError file_make_native_path(const char* path, char* n_path)
{
return convert_path(n_path, path, TO_NATIVE);
}
// return the portable equivalent of the given relative native path
// (i.e. convert the platform's directory separators to '/')
// makes sure length < PATH_MAX.
LibError file_make_portable_path(const char* n_path, char* path)
{
return convert_path(path, n_path, TO_PORTABLE);
}
// return the native equivalent of the given portable path
// (i.e. convert all '/' to the platform's directory separator).
// also prepends current directory => n_full_path is absolute.
// makes sure length < PATH_MAX.
LibError file_make_full_native_path(const char* path, char* n_full_path)
{
debug_assert(path != n_full_path); // doesn't work in-place
strcpy_s(n_full_path, PATH_MAX, n_root_dir);
return convert_path(n_full_path+n_root_dir_len, path, TO_NATIVE);
}
// return the portable equivalent of the given relative native path
// (i.e. convert the platform's directory separators to '/')
// n_full_path is absolute; if it doesn't match the current dir, fail.
// (note: portable paths are always relative to the file root dir).
// makes sure length < PATH_MAX.
LibError file_make_full_portable_path(const char* n_full_path, char* path)
{
debug_assert(path != n_full_path); // doesn't work in-place
if(strncmp(n_full_path, n_root_dir, n_root_dir_len) != 0)
WARN_RETURN(ERR::NOT_IN_ROOT_DIR);
return convert_path(path, n_full_path+n_root_dir_len, TO_PORTABLE);
}
// security check: only allow attempting to chdir once, so that malicious
// code cannot circumvent the VFS checks that disallow access to anything
// above the current directory (set here).
// this routine is called early at startup, so any subsequent attempts
// are likely bogus.
// we provide for resetting this from the self-test to allow clean
// re-init of the individual tests.
static bool root_dir_established;
// establish the root directory from <rel_path>, which is treated as
// relative to the executable's directory (determined via argv[0]).
// all relative file paths passed to this module will be based from
// this root dir.
//
// example: executable in "$install_dir/system"; desired root dir is
// "$install_dir/data" => rel_path = "../data".
//
// argv[0] is necessary because the current directory is unknown at startup
// (e.g. it isn't set when invoked via batch file), and this is the
// easiest portable way to find our install directory.
//
// can only be called once, by design (see below). rel_path is trusted.
LibError path_SetRoot(const char* argv0, const char* rel_path)
{
if(root_dir_established)
WARN_RETURN(ERR::ROOT_DIR_ALREADY_SET);
root_dir_established = true;
// get full path to executable
char n_path[PATH_MAX];
// .. first try safe, but system-dependent version
if(sys_get_executable_name(n_path, PATH_MAX) < 0)
{
// .. failed; use argv[0]
if(!realpath(argv0, n_path))
return LibError_from_errno();
}
// make sure it's valid
if(access(n_path, X_OK) < 0)
return LibError_from_errno();
// strip executable name, append rel_path, convert to native
char* start_of_fn = (char*)path_name_only(n_path);
RETURN_ERR(file_make_native_path(rel_path, start_of_fn));
// get actual root dir - previous n_path may include ".."
// (slight optimization, speeds up path lookup)
if(!realpath(n_path, n_root_dir))
return LibError_from_errno();
// .. append SYS_DIR_SEP to simplify code that uses n_root_dir
n_root_dir_len = strlen(n_root_dir)+1; // +1 for trailing SYS_DIR_SEP
debug_assert((n_root_dir_len+1) < sizeof(n_root_dir)); // Just checking
n_root_dir[n_root_dir_len-1] = SYS_DIR_SEP;
// You might think that n_root_dir is already 0-terminated, since it's
// static - but that might not be true after calling file_reset_root_dir!
n_root_dir[n_root_dir_len] = 0;
return INFO::OK;
}
void path_ResetRootDir()
{
// see comment at root_dir_established.
debug_assert(root_dir_established);
n_root_dir[0] = '\0';
n_root_dir_len = 0;
root_dir_established = false;
}
//-----------------------------------------------------------------------------
// storage for path strings
//-----------------------------------------------------------------------------
// rationale: we want a constant-time IsAtomFn(string pointer) lookup:
// this avoids any overhead of calling path_UniqueCopy on
// already-atomized strings. that requires allocating from one contiguous
// arena, which is also more memory-efficient than the heap (no headers).
static Pool atom_pool;
typedef DynHashTbl<const char*, const char*> AtomMap;
static AtomMap atom_map;
static void InitPool()
{
static bool initialized = false;
if(initialized)
return;
pool_create(&atom_pool, 8*MiB, POOL_VARIABLE_ALLOCS);
initialized = true;
// TODO: we currently leak the pool. the following would have to run,
// but it's not clear when that would be possible+safe.
// atom_map.clear();
// (void)pool_destroy(&atom_pool);
}
bool path_is_atom_fn(const char* fn)
{
InitPool();
return pool_contains(&atom_pool, (void*)fn);
}
// allocate a copy of P_fn in our string pool. strings are equal iff
// their addresses are equal, thus allowing fast comparison.
//
// if the (generous) filename storage is full, 0 is returned.
// this is not ever expected to happen; callers need not check the
// return value because a warning is raised anyway.
const char* path_UniqueCopy(const char* P_fn)
{
InitPool();
// early out: if already an atom, return immediately.
if(path_is_atom_fn(P_fn))
return P_fn;
const size_t fn_len = strlen(P_fn);
const char* unique_fn;
// check if already allocated; return existing copy if so.
//
// rationale: the entire storage could be done via container,
// rather than simply using it as a lookup mapping.
// however, DynHashTbl together with Pool (see above) is more efficient.
unique_fn = atom_map.find(P_fn);
if(unique_fn)
return unique_fn;
unique_fn = (const char*)pool_alloc(&atom_pool, fn_len+1);
if(!unique_fn)
{
DEBUG_WARN_ERR(ERR::NO_MEM);
return 0;
}
cpu_memcpy((void*)unique_fn, P_fn, fn_len);
((char*)unique_fn)[fn_len] = '\0';
atom_map.insert(unique_fn, unique_fn);
stats_unique_name(fn_len);
return unique_fn;
}
const char* file_get_random_name()
{
InitPool();
// there had better be names in atom_pool, else this will fail.
debug_assert(atom_pool.da.pos != 0);
again:
const size_t start_ofs = (size_t)rand(0, (uint)atom_pool.da.pos);
// scan back to start of string (don't scan ahead; this must
// work even if atom_pool only contains one entry).
const char* start = (const char*)atom_pool.da.base+start_ofs;
for(size_t i = 0; i < start_ofs; i++)
{
if(*start == '\0')
break;
start--;
}
// skip past the '\0' we found. loop is needed because there may be
// several if we land in padding (due to pool alignment).
size_t chars_left = atom_pool.da.pos - start_ofs;
for(; *start == '\0'; start++)
{
// we had landed in padding at the end of the buffer.
if(chars_left-- == 0)
goto again;
}
const char* next_name = start;
return next_name;
}

90
source/lib/file/path.h Normal file
View File

@ -0,0 +1,90 @@
/**
* =========================================================================
* File : path.h
* Project : 0 A.D.
* Description : helper functions for VFS paths.
* =========================================================================
*/
// license: GPL; see lib/license.txt
#ifndef INCLUDED_PATH
#define INCLUDED_PATH
namespace ERR
{
const LibError ROOT_DIR_ALREADY_SET = -110200;
const LibError NOT_IN_ROOT_DIR = -110201;
}
// establish the root directory from <rel_path>, which is treated as
// relative to the executable's directory (determined via argv[0]).
// all relative file paths passed to this module will be based from
// this root dir.
//
// example: executable in "$install_dir/system"; desired root dir is
// "$install_dir/data" => rel_path = "../data".
//
// argv[0] is necessary because the current directory is unknown at startup
// (e.g. it isn't set when invoked via batch file), and this is the
// easiest portable way to find our install directory.
//
// can only be called once, by design (see below). rel_path is trusted.
extern LibError path_SetRoot(const char* argv0, const char* rel_path);
//
// path conversion functions (native <--> portable),
// for external libraries that require the real filename.
//
// replaces '/' with platform's directory separator and vice versa.
// verifies path length < PATH_MAX (otherwise return ERR::PATH_LENGTH).
//
// relative paths (relative to root dir)
extern LibError file_make_native_path(const char* path, char* n_path);
extern LibError file_make_portable_path(const char* n_path, char* path);
// as above, but with full native paths (portable paths are always relative).
// prepends current directory, resp. makes sure it matches the given path.
extern LibError file_make_full_native_path(const char* path, char* n_full_path);
extern LibError file_make_full_portable_path(const char* n_full_path, char* path);
#define VFS_PATH_IS_DIR(path) (*path == '\0' || path[strlen(path)-1] == '/')
extern bool path_is_atom_fn(const char* fn);
// allocate a copy of P_fn in our string pool. strings are equal iff
// their addresses are equal, thus allowing fast comparison.
//
// if the (generous) filename storage is full, 0 is returned.
// this is not ever expected to happen; callers need not check the
// return value because a warning is raised anyway.
extern const char* path_UniqueCopy(const char* P_fn);
extern const char* file_get_random_name();
extern const char* file_get_random_name();
/**
* reset root directory that was previously established via path_SetRoot.
*
* this function avoids the security complaint that would be raised if
* path_SetRoot is called twice; it is provided for the
* legitimate application of a self-test setUp()/tearDown().
**/
extern void path_ResetRootDir();
#endif // #ifndef INCLUDED_PATH

View File

@ -0,0 +1,232 @@
/**
* =========================================================================
* File : fp_posix.cpp
* Project : 0 A.D.
* Description : file layer on top of POSIX. avoids the need for
* : absolute paths and provides fast I/O.
* =========================================================================
*/
// license: GPL; see lib/license.txt
#include "precompiled.h"
#include "fs_posix.h"
#include <vector>
#include <algorithm>
#include <string>
#include "../path.h"
#include "lib/posix/posix_filesystem.h"
//-----------------------------------------------------------------------------
// DirectoryIterator_Posix
//-----------------------------------------------------------------------------
DirectoryIterator_Posix::DirectoryIterator_Posix(const char* P_path)
{
char N_path[PATH_MAX];
(void)file_make_full_native_path(P_path, N_path);
m_osDir = opendir(N_path);
// note: copying to N_path and then &m_pp.path is inefficient but
// more clear/robust. this is only called a few hundred times anyway.
(void)path_package_set_dir(&m_pp, N_path);
}
DirectoryIterator_Posix::~DirectoryIterator_Posix()
{
if(m_osDir)
{
const int ret = closedir(m_osDir);
debug_assert(ret == 0);
}
}
LibError DirectoryIterator_Posix::NextEntry(FilesystemEntry& fsEntry)
{
if(!m_osDir)
return ERR::DIR_NOT_FOUND;
get_another_entry:
errno = 0;
struct dirent* osEnt = readdir(m_osDir);
if(!osEnt)
{
// no error, just no more entries to return
if(!errno)
return ERR::DIR_END; // NOWARN
return LibError_from_errno();
}
// copying into the global filename storage avoids the need for users to
// free fsEntry.name and is convenient+safe.
const char* atom_fn = path_UniqueCopy(osEnt->d_name);
// get file information (mode, size, mtime)
struct stat s;
#if OS_WIN
// .. wposix readdir has enough information to return dirent
// status directly (much faster than calling stat).
RETURN_ERR(readdir_stat_np(m_osDir, &s));
#else
// .. call regular stat().
errno = 0;
// (we need the full pathname; don't use path_append because it would
// unnecessarily call strlen.)
path_package_append_file(&m_pp, atom_fn);
if(stat(&m_pp->path, &s) != 0)
return LibError_from_errno();
#endif
// skip "undesirable" entries that POSIX readdir returns:
if(S_ISDIR(s.st_mode))
{
// .. dummy directory entries ("." and "..")
if(atom_fn[0] == '.' && (atom_fn[1] == '\0' || (atom_fn[1] == '.' && atom_fn[2] == '\0')))
goto get_another_entry;
s.st_size = -1; // our way of indicating it's a directory
}
// .. neither dir nor file
else if(!S_ISREG(s.st_mode))
goto get_another_entry;
fsEntry.size = s.st_size;
fsEntry.mtime = s.st_mtime;
fsEntry.name = atom_fn;
return INFO::OK;
}
//-----------------------------------------------------------------------------
// Filesystem_Posix
//-----------------------------------------------------------------------------
LibError Filesystem_Posix::GetEntry(const char* P_pathname, FilesystemEntry& fsEntry) const
{
char N_pathname[PATH_MAX];
RETURN_ERR(file_make_full_native_path(P_pathname, N_pathname));
// if path ends in slash, remove it (required by stat)
char* last_char = N_pathname+strlen(N_pathname)-1;
if(path_is_dir_sep(*last_char))
*last_char = '\0';
errno = 0;
struct stat s;
memset(&s, 0, sizeof(s));
if(stat(N_pathname, &s) != 0)
return LibError_from_errno();
fsEntry.size = s.st_size;
fsEntry.mtime = s.st_mtime;
fsEntry.name = path_UniqueCopy(path_name_only(N_pathname));
fsEntry.mount = 0;
return INFO::OK;
}
LibError Filesystem_Posix::CreateDirectory(const char* P_dirPath)
{
char N_dirPath[PATH_MAX];
RETURN_ERR(file_make_full_native_path(P_dirPath, N_dirPath));
errno = 0;
struct stat s;
if(stat(N_dirPath, &s) != 0)
return LibError_from_errno();
errno = 0;
if(mkdir(N_dirPath, S_IRWXO|S_IRWXU|S_IRWXG) != 0)
return LibError_from_errno();
return INFO::OK;
}
LibError Filesystem_Posix::DeleteDirectory(const char* P_dirPath)
{
// note: we have to recursively empty the directory before it can
// be deleted (required by Windows and POSIX rmdir()).
char N_dirPath[PATH_MAX];
RETURN_ERR(file_make_full_native_path(P_dirPath, N_dirPath));
PathPackage N_pp;
RETURN_ERR(path_package_set_dir(&N_pp, N_dirPath));
{
// (must go out of scope before rmdir)
DirectoryIterator_Posix di(P_dirPath);
for(;;)
{
FilesystemEntry fsEntry;
LibError err = di.NextEntry(fsEntry);
if(err == ERR::DIR_END)
break;
RETURN_ERR(err);
if(fsEntry.IsDirectory())
{
char P_subdirPath[PATH_MAX];
RETURN_ERR(path_append(P_subdirPath, P_dirPath, fsEntry.name));
RETURN_ERR(DeleteDirectory(P_subdirPath));
}
else
{
RETURN_ERR(path_package_append_file(&N_pp, fsEntry.name));
errno = 0;
if(unlink(N_pp.path) != 0)
return LibError_from_errno();
}
}
}
errno = 0;
if(rmdir(N_dirPath) != 0)
return LibError_from_errno();
return INFO::OK;
}
IDirectoryIterator* Filesystem_Posix::OpenDirectory(const char* P_dirPath) const
{
return new DirectoryIterator_Posix(P_dirPath);
}
LibError CreateFile(const char* P_pathname, const u8* buf, size_t size, uint flags = 0)
{
File_Posix file;
RETURN_ERR(file.Open(P_pathname, 'w', flags));
RETURN_ERR(io(file, 0, buf, size));
return INFO::OK;
}
LibError Filesystem_Posix::DeleteFile(const char* P_pathname)
{
char N_pathname[PATH_MAX+1];
RETURN_ERR(file_make_full_native_path(P_pathname, N_pathname));
errno = 0;
if(unlink(N_pathname) != 0)
return LibError_from_errno();
return INFO::OK;
}
LibError LoadFile(const char* P_pathname, const u8*& buf, size_t size, uint flags = 0, IoCallback cb = 0, uintptr_t cbData = 0)
{
File_Posix file;
RETURN_ERR(file.Open(P_pathname, 'r', flags));
RETURN_ERR(io(file, 0, buf, size, cb, cbData));
return INFO::OK;
}

View File

@ -0,0 +1,60 @@
/**
* =========================================================================
* File : fp_posix.h
* Project : 0 A.D.
* Description : file layer on top of POSIX. avoids the need for
* : absolute paths and provides fast I/O.
* =========================================================================
*/
// license: GPL; see lib/license.txt
#ifndef INCLUDED_FS_POSIX
#define INCLUDED_FS_POSIX
#include "../filesystem.h"
#include "lib/path_util.h" // PathPackage
#include "../io/io_manager.h" // IoCallback
// layer on top of POSIX opendir/readdir/closedir that converts paths to
// the native representation and ignores non-file/directory entries.
class DirectoryIterator_Posix : public IDirectoryIterator
{
public:
DirectoryIterator_Posix(const char* P_path);
virtual ~DirectoryIterator_Posix();
virtual LibError NextEntry(FilesystemEntry& fsEntry);
private:
DIR* m_osDir;
PathPackage m_pp;
};
struct Filesystem_Posix : public IFilesystem
{
virtual char IdentificationCode() const
{
return 'F';
}
virtual int Precedence() const
{
return 1;
}
virtual LibError GetEntry(const char* pathname, FilesystemEntry& fsEntry) const = 0;
virtual LibError CreateDirectory(const char* dirPath);
virtual LibError DeleteDirectory(const char* dirPath);
virtual IDirectoryIterator* OpenDirectory(const char* dirPath) const;
virtual LibError CreateFile(const char* pathname, const u8* buf, size_t size, uint flags = 0);
virtual LibError DeleteFile(const char* pathname);
virtual ssize_t LoadFile(const char* pathname, IoBuf& buf, uint flags = 0, IoCallback cb = 0, uintptr_t cbData = 0);
};
#endif // #ifndef INCLUDED_FS_POSIX

View File

@ -0,0 +1,213 @@
/**
* =========================================================================
* File : io_posix.cpp
* Project : 0 A.D.
* Description : lightweight POSIX aio wrapper
* =========================================================================
*/
// license: GPL; see lib/license.txt
#include "precompiled.h"
#include "io_posix.h"
#include "lib/file/filesystem.h"
#include "lib/file/path.h"
#include "lib/file/file_stats.h"
#include "lib/posix/posix_aio.h"
//-----------------------------------------------------------------------------
File_Posix::File_Posix()
{
}
File_Posix::~File_Posix()
{
Close();
}
LibError File_Posix::Open(const char* P_pathname, char mode, uint flags)
{
debug_assert(mode == 'w' || mode == 'r');
debug_assert(flags <= FILE_FLAG_ALL);
m_pathname = path_UniqueCopy(P_pathname);
m_mode = mode;
m_flags = flags;
char N_pathname[PATH_MAX];
(void)file_make_full_native_path(P_pathname, N_pathname);
int oflag = (mode == 'r')? O_RDONLY : O_WRONLY|O_CREAT|O_TRUNC;
#if OS_WIN
if(flags & FILE_TEXT)
oflag |= O_TEXT_NP;
else
oflag |= O_BINARY_NP;
// if AIO is disabled at user's behest, inform wposix.
if(flags & FILE_NO_AIO)
oflag |= O_NO_AIO_NP;
#endif
m_fd = open(N_pathname, oflag, S_IRWXO|S_IRWXU|S_IRWXG);
if(m_fd < 0)
RETURN_ERR(ERR::FILE_ACCESS);
return INFO::OK;
}
void File_Posix::Close()
{
close(m_fd);
m_fd = 0;
}
LibError File_Posix::Validate() const
{
if(path_UniqueCopy(m_pathname) != m_pathname)
WARN_RETURN(ERR::_1);
if((m_mode != 'w' && m_mode != 'r'))
WARN_RETURN(ERR::_2);
if(m_flags > FILE_FLAG_ALL)
WARN_RETURN(ERR::_3);
// >= 0x100 is not necessarily bogus, but suspicious.
if(!(3 <= m_fd && m_fd < 0x100))
WARN_RETURN(ERR::_4);
return INFO::OK;
}
//-----------------------------------------------------------------------------
class Io_Posix::Impl
{
public:
Impl()
{
memset(&m_aiocb, 0, sizeof(m_aiocb));
}
LibError Issue(File_Posix& file, off_t ofs, IoBuf buf, size_t size)
{
debug_printf("FILE| Issue ofs=0x%X size=0x%X\n", ofs, size);
m_aiocb.aio_lio_opcode = (file.Mode() == 'w')? LIO_WRITE : LIO_READ;
m_aiocb.aio_buf = (volatile void*)buf;
m_aiocb.aio_fildes = file.Handle();
m_aiocb.aio_offset = ofs;
m_aiocb.aio_nbytes = size;
struct sigevent* sig = 0; // no notification signal
if(lio_listio(LIO_NOWAIT, (aiocb**)&m_aiocb, 1, sig) != 0)
return LibError_from_errno();
return INFO::OK;
}
LibError Validate() const
{
if(debug_is_pointer_bogus((void*)m_aiocb.aio_buf))
WARN_RETURN(ERR::_2);
const int opcode = m_aiocb.aio_lio_opcode;
if(opcode != LIO_WRITE && opcode != LIO_READ && opcode != LIO_NOP)
WARN_RETURN(ERR::_3);
// all other aiocb fields have no invariants we could check.
return INFO::OK;
}
LibError Status() const
{
debug_assert(Validate() == INFO::OK);
errno = 0;
int ret = aio_error(&m_aiocb);
if(ret == EINPROGRESS)
return INFO::IO_PENDING;
if(ret == 0)
return INFO::IO_COMPLETE;
return LibError_from_errno();
}
LibError WaitUntilComplete(IoBuf& buf, size_t& size)
{
debug_printf("FILE| Wait io=%p\n", this);
debug_assert(Validate() == INFO::OK);
// wait for transfer to complete.
while(aio_error(&m_aiocb) == EINPROGRESS)
aio_suspend((aiocb**)&m_aiocb, 1, (timespec*)0); // wait indefinitely
// query number of bytes transferred (-1 if the transfer failed)
const ssize_t bytes_transferred = aio_return(&m_aiocb);
debug_printf("FILE| bytes_transferred=%d aio_nbytes=%u\n", bytes_transferred, m_aiocb.aio_nbytes);
buf = (IoBuf)m_aiocb.aio_buf; // cast from volatile void*
size = bytes_transferred;
return INFO::OK;
}
private:
aiocb m_aiocb;
};
//-----------------------------------------------------------------------------
Io_Posix::Io_Posix()
: impl(new Impl)
{
}
Io_Posix::~Io_Posix()
{
}
LibError Io_Posix::Issue(File_Posix& file, off_t ofs, IoBuf buf, size_t size)
{
return impl.get()->Issue(file, ofs, buf, size);
}
LibError Io_Posix::Status() const
{
return impl.get()->Status();
}
LibError Io_Posix::WaitUntilComplete(IoBuf& buf, size_t& size)
{
return impl.get()->WaitUntilComplete(buf, size);
}
//-----------------------------------------------------------------------------
LibError io_posix_Synchronous(File_Posix& file, off_t ofs, IoBuf buf, size_t size)
{
const int fd = file.Handle();
const bool isWrite = (file.Mode() == 'w');
ScopedIoMonitor monitor;
lseek(fd, ofs, SEEK_SET);
errno = 0;
void* dst = (void*)buf;
const ssize_t ret = isWrite? write(fd, dst, size) : read(fd, dst, size);
if(ret < 0)
return LibError_from_errno();
const size_t totalTransferred = (size_t)ret;
if(totalTransferred != size)
WARN_RETURN(ERR::IO);
monitor.NotifyOfSuccess(FI_LOWIO, isWrite? FO_WRITE : FO_READ, totalTransferred);
return INFO::OK;
}

View File

@ -0,0 +1,139 @@
/**
* =========================================================================
* File : io_posix.h
* Project : 0 A.D.
* Description : lightweight POSIX aio wrapper
* =========================================================================
*/
// license: GPL; see lib/license.txt
#ifndef INCLUDED_IO_POSIX
#define INCLUDED_IO_POSIX
#include <boost/shared_ptr.hpp>
#include "../io/io_buf.h"
// rationale for using aio instead of mmap:
// - parallelism: instead of just waiting for the transfer to complete,
// other work can be done in the meantime.
// example: decompressing from a Zip archive is practically free because
// we inflate one block while reading the next.
// - throughput: with aio, the drive always has something to do, as opposed
// to read requests triggered by the OS for mapped files, which come
// in smaller chunks. this leads to much higher transfer rates.
// - memory: when used with VFS, aio makes better use of a file cache.
// data is generally compressed in an archive. a cache should store the
// decompressed and decoded (e.g. BGR->RGB) data; mmap would keep the
// original compressed data in memory, which doesn't help.
// we bypass the OS file cache via aio; high-level code will take care
// of caching the decoded file contents.
// however, aio is only used internally in file_io. this simplifies the
// interface by preventing the need for multiple implementations (archive,
// vfs, etc.) and avoiding the need for automatic free (since aio won't
// be used directly by client code).
// this also affects streaming of sounds, which is currently indeed
// implemented via aio from archives. however, it doesn't appear to be used
// (even music files are loaded at once) and really ought to be done via
// thread, so we could disable it.
// we don't do any caching or alignment here - this is just a thin AIO wrapper.
// rationale:
// - aligning the transfer isn't possible here since we have no control
// over the buffer, i.e. we cannot read more data than requested.
// instead, this is done in io_manager.
// - transfer sizes here are arbitrary (i.e. not block-aligned);
// that means the cache would have to handle this or also split them up
// into blocks, which would duplicate the abovementioned work.
// - if caching here, we'd also have to handle "forwarding" (i.e.
// desired block has been issued but isn't yet complete). again, it
// is easier to let the synchronous io_manager handle this.
// - finally, io_manager knows more about whether the block should be cached
// (e.g. whether another block request will follow), but we don't
// currently make use of this.
//
// disadvantages:
// - streamed data will always be read from disk. no problem, because
// such data (e.g. music, long speech) is unlikely to be used again soon.
// - prefetching (issuing the next few blocks from archive/file during
// idle time to satisfy potential future IOs) requires extra buffers;
// this is a bit more complicated than just using the cache as storage.
namespace INFO
{
const LibError IO_PENDING = +110200;
const LibError IO_COMPLETE = +110201;
}
class File_Posix
{
friend class Io_Posix;
public:
File_Posix();
~File_Posix();
LibError Open(const char* pathname, char mode, uint flags);
void Close();
const char* Pathname() const
{
return m_pathname;
}
char Mode() const
{
return m_mode;
}
uint Flags() const
{
return m_flags;
}
int Handle() const
{
return m_fd;
}
LibError Validate() const;
private:
const char* m_pathname;
char m_mode;
uint m_flags;
int m_fd;
};
class Io_Posix
{
public:
Io_Posix();
~Io_Posix();
// no attempt is made at aligning or padding the transfer.
LibError Issue(File_Posix& file, off_t ofs, IoBuf buf, size_t size);
// check if the IO has completed.
LibError Status() const;
// passes back the buffer and its size.
LibError WaitUntilComplete(IoBuf& buf, size_t& size);
private:
class Impl;
boost::shared_ptr<Impl> impl;
};
extern LibError io_posix_Synchronous(File_Posix& file, off_t ofs, IoBuf buf, size_t size);
#endif // #ifndef INCLUDED_IO_POSIX

View File

@ -0,0 +1,26 @@
/* Generated file, do not edit */
#ifndef CXXTEST_RUNNING
#define CXXTEST_RUNNING
#endif
#define _CXXTEST_HAVE_STD
#include "precompiled.h"
#include <cxxtest/TestListener.h>
#include <cxxtest/TestTracker.h>
#include <cxxtest/TestRunner.h>
#include <cxxtest/RealDescriptions.h>
#include "d:\Projects\0ad\svn\source\lib\res\file\tests\test_file_cache.h"
static TestFileCache suite_TestFileCache;
static CxxTest::List Tests_TestFileCache = { 0, 0 };
CxxTest::StaticSuiteDescription suiteDescription_TestFileCache( "d:\\Projects\\0ad\\svn\\source\\lib\\res\\file\\tests\\test_file_cache.h", 6, "TestFileCache", suite_TestFileCache, Tests_TestFileCache );
static class TestDescription_TestFileCache_test_cache_allocator : public CxxTest::RealTestDescription {
public:
TestDescription_TestFileCache_test_cache_allocator() : CxxTest::RealTestDescription( Tests_TestFileCache, suiteDescription_TestFileCache, 10, "test_cache_allocator" ) {}
void runTest() { suite_TestFileCache.test_cache_allocator(); }
} testDescription_TestFileCache_test_cache_allocator;

View File

@ -0,0 +1,51 @@
#include "lib/self_test.h"
#include "lib/res/file/file_cache.h"
#include "lib/rand.h"
class TestFileCache : public CxxTest::TestSuite
{
enum { TEST_ALLOC_TOTAL = 100*1000*1000 };
public:
void test_cache_allocator()
{
// allocated address -> its size
typedef std::map<void*, size_t> AllocMap;
AllocMap allocations;
// put allocator through its paces by allocating several times
// its capacity (this ensures memory is reused)
srand(1);
size_t total_size_used = 0;
while(total_size_used < TEST_ALLOC_TOTAL)
{
size_t size = rand(1, TEST_ALLOC_TOTAL/16);
total_size_used += size;
void* p;
// until successful alloc:
for(;;)
{
p = file_cache_allocator_alloc(size);
if(p)
break;
// out of room - remove a previous allocation
// .. choose one at random
size_t chosen_idx = (size_t)rand(0, (uint)allocations.size());
AllocMap::iterator it = allocations.begin();
for(; chosen_idx != 0; chosen_idx--)
++it;
file_cache_allocator_free(it->first, it->second);
allocations.erase(it);
}
// must not already have been allocated
TS_ASSERT_EQUALS(allocations.find(p), allocations.end());
allocations[p] = size;
}
// reset to virginal state
// note: even though everything has now been freed, this is
// necessary since the freelists may be a bit scattered already.
file_cache_allocator_reset();
}
};

View File

@ -0,0 +1,32 @@
/* Generated file, do not edit */
#ifndef CXXTEST_RUNNING
#define CXXTEST_RUNNING
#endif
#define _CXXTEST_HAVE_STD
#include "precompiled.h"
#include <cxxtest/TestListener.h>
#include <cxxtest/TestTracker.h>
#include <cxxtest/TestRunner.h>
#include <cxxtest/RealDescriptions.h>
#include "d:\Projects\0ad\svn\source\lib\res\file\tests\test_path.h"
static TestPath suite_TestPath;
static CxxTest::List Tests_TestPath = { 0, 0 };
CxxTest::StaticSuiteDescription suiteDescription_TestPath( "d:\\Projects\\0ad\\svn\\source\\lib\\res\\file\\tests\\test_path.h", 6, "TestPath", suite_TestPath, Tests_TestPath );
static class TestDescription_TestPath_test_conversion : public CxxTest::RealTestDescription {
public:
TestDescription_TestPath_test_conversion() : CxxTest::RealTestDescription( Tests_TestPath, suiteDescription_TestPath, 9, "test_conversion" ) {}
void runTest() { suite_TestPath.test_conversion(); }
} testDescription_TestPath_test_conversion;
static class TestDescription_TestPath_test_atom : public CxxTest::RealTestDescription {
public:
TestDescription_TestPath_test_atom() : CxxTest::RealTestDescription( Tests_TestPath, suiteDescription_TestPath, 33, "test_atom" ) {}
void runTest() { suite_TestPath.test_atom(); }
} testDescription_TestPath_test_atom;

View File

@ -0,0 +1,63 @@
#include "lib/self_test.h"
#include "lib/self_test.h"
#include "lib/res/file/path.h"
class TestPath : public CxxTest::TestSuite
{
public:
void test_conversion()
{
char N_path[PATH_MAX] = {0};
TS_ASSERT_OK(file_make_native_path("a/b/c", N_path));
#if OS_WIN
TS_ASSERT_STR_EQUALS(N_path, "a\\b\\c");
#else
TS_ASSERT_STR_EQUALS(N_path, "a/b/c");
#endif
char P_path[PATH_MAX] = {0};
TS_ASSERT_OK(file_make_portable_path("a\\b\\c", P_path));
#if OS_WIN
TS_ASSERT_STR_EQUALS(P_path, "a/b/c");
#else
// sounds strange, but correct: on non-Windows, \\ didn't
// get recognized as separators and weren't converted.
TS_ASSERT_STR_EQUALS(P_path, "a\\b\\c");
#endif
}
// file_make_full_*_path is left untested (hard to do so)
void test_atom()
{
// file_make_unique_fn_copy
// .. return same address for same string?
const char* atom1 = file_make_unique_fn_copy("a/bc/def");
const char* atom2 = file_make_unique_fn_copy("a/bc/def");
TS_ASSERT_EQUALS(atom1, atom2);
// .. early out (already in pool) check works?
const char* atom3 = file_make_unique_fn_copy(atom1);
TS_ASSERT_EQUALS(atom3, atom1);
// path_is_atom_fn
// is it reported as in pool?
TS_ASSERT(path_is_atom_fn(atom1));
// file_get_random_name
// see if the atom added above eventually comes out when a
// random one is returned from the pool.
int tries_left;
for(tries_left = 1000; tries_left != 0; tries_left--)
{
const char* random_name = file_get_random_name();
if(random_name == atom1)
break;
}
TS_ASSERT(tries_left != 0);
}
};