merge of (work in progress) VFS code with new dir layout

This was SVN commit r216.
This commit is contained in:
janwas 2004-05-06 17:14:30 +00:00
parent 81b6dac486
commit 5b5726ed4d
43 changed files with 2816 additions and 1274 deletions

View File

@ -300,7 +300,7 @@ public:
// Copy from memory
tOutput = *(T*)mem;
delete [] mem;
free(mem);
// TODO Gee: Undefined type - maybe report in log
return true;

View File

@ -27,85 +27,3 @@ private:
// const size_t num_pages = (max_items + items_per_page-1) / items_per_page;
T* pages[(max_items + (sizeof(T) / PAGE_SIZE)-1) / (sizeof(T) / PAGE_SIZE)];
};
//
// cache implementation
//
// mark l as the most recently used line.
void Cache::reference(List_iterator l)
{
lru_list.splice(lru_list.begin(), lru_list, l);
idx[l->tag] = l;
}
// return the line identified by tag, or 0 if not in cache.
Cache::Line* Cache::find_line(u64 tag)
{
Map::const_iterator i = idx.find(tag);
// not found
if(i == idx.end())
return 0;
// index points us to list entry
List_iterator l = i->second;
reference(l);
return &*l;
}
// return the pointer associated with the line identified by tag,
// or 0 if not in cache.
void* Cache::get(u64 tag)
{
Line* l = find_line(tag);
return l? l->p : 0;
}
// add tag to cache, and associate p with it.
// return 0 on success, or -1 if already in cache.
int Cache::add(u64 tag, void* p)
{
if(get(tag))
{
assert(0 && "add: tag already in cache!");
return -1;
}
// add directly to front of LRU list
lru_list.push_front(Line(tag, p));
idx[tag] = lru_list.begin();
return 0;
}
// find least recently used entry that isn't locked;
// change its tag, and return its associated pointer.
void* Cache::replace_lru_with(u64 new_tag)
{
// scan in least->most used order for first non-locked entry
List_iterator l = lru_list.end();
while(l != lru_list.begin())
{
--l;
if(!l->locked)
goto have_entry;
}
// all are locked and cannot be displaced.
// caller should add() enough lines so that this never happens.
assert(0 && "replace_lru_with not possible - all lines are locked");
return 0;
have_entry:
idx.erase(l->tag);
l->tag = new_tag;
reference(l);
return l->p;
}

View File

@ -1,8 +1,49 @@
#ifndef ADTS_H__
#define ADTS_H__
#include "lib.h"
#include <list>
#include <map>
#include <cassert>
static struct BIT_BUF
{
ulong buf;
ulong cur; /* bit to be appended (toggled by add()) */
ulong len; /* |buf| [bits] */
void reset()
{
buf = 0;
cur = 0;
len = 0;
}
/* toggle current bit if desired, and add to buffer (new bit is LSB) */
void add(ulong toggle)
{
cur ^= toggle;
buf <<= 1;
buf |= cur;
len++;
}
/* extract LS n bits */
uint extract(ulong n)
{
ulong i = buf & ((1ul << n) - 1);
buf >>= n;
return i;
}
}
bit_buf;
template<class T, int n> struct RingBuf
@ -33,22 +74,24 @@ template<class T, int n> struct RingBuf
class const_iterator
{
public:
const_iterator() : data(0), pos(0) {}
const_iterator(const T* _data, size_t _pos) : data(_data), pos(_pos) {}
const_iterator() : data(0), pos(0)
{}
const_iterator(const T* _data, size_t _pos) : data(_data), pos(_pos)
{}
const T& operator[](int idx) const
{ return data[(pos+idx) % n]; }
{ return data[(pos+idx) % n]; }
const T& operator*() const
{ return data[pos]; }
{ return data[pos]; }
const T* operator->() const
{ return &**this; }
{ return &**this; }
const_iterator& operator++() // pre
{ pos = (pos+1) % n; return (*this); }
{ pos = (pos+1) % n; return (*this); }
const_iterator operator++(int) // post
{ const_iterator tmp = *this; ++*this; return tmp; }
{ const_iterator tmp = *this; ++*this; return tmp; }
bool operator==(const const_iterator& rhs) const
{ return pos == rhs.pos && data == rhs.data; }
{ return pos == rhs.pos && data == rhs.data; }
bool operator!=(const const_iterator& rhs) const
{ return !(*this == rhs); }
{ return !(*this == rhs); }
protected:
const T* data;
size_t pos;
@ -67,80 +110,283 @@ template<class T, int n> struct RingBuf
// cache
//
class Cache
// owns a pool of resources (Entry-s), associated with a 64 bit id.
// typical use: add all available resources to the cache via grow();
// assign() ids to the resources, and update the resource data if necessary;
// retrieve() the resource, given id.
template<class Entry> class Cache
{
public:
// return the pointer associated with the line identified by tag,
// or 0 if not in cache.
void* get(u64 tag);
// add tag to cache, and associate p with it.
// return 0 on success, or -1 if already in cache.
int add(u64 tag, void* p);
// find least recently used entry that isn't locked;
// change its tag, and return its associated pointer.
void* replace_lru_with(u64 new_tag);
int get_ctx(u64 tag, uintptr_t& ctx)
// 'give' Entry to the cache.
int grow(Entry& e)
{
Line* l = find_line(tag);
if(!l)
return -1;
ctx = l->ctx;
// add to front of LRU list, but not index
// (since we don't have an id yet)
lru_list.push_front(Line(0, e));
return 0;
}
int set_ctx(u64 tag, uintptr_t ctx)
// find the least-recently used line; associate id with it,
// and return its Entry. fails (returns 0) if id is already
// associated, or all lines are locked.
Entry* assign(u64 id)
{
Line* l = find_line(tag);
if(find_line(id))
{
assert(0 && "assign: id already in cache!");
return 0;
}
// scan in least->most used order for first non-locked entry
List_iterator l = lru_list.end();
while(l != lru_list.begin())
{
--l;
if(l->refs == 0)
goto have_line;
}
// all are locked and cannot be displaced.
// caller should grow() enough lines so that this never happens.
assert(0 && "assign: all lines locked - grow() more lines");
return 0;
have_line:
// update mapping (index)
idx.erase(id);
idx[id] = l;
l->id = id;
return &l->ent;
}
// find line identified by id; return its entry or 0 if not in cache.
Entry* retrieve(u64 id)
{
// invalid: id 0 denotes not-yet-associated lines
if(id == 0)
{
assert(0 && "retrieve: id 0 not allowed");
return 0;
}
Line* l = find_line(id);
return l? &l->ent : 0;
}
// add/release a reference to a line, to protect it against
// displacement via associate(). we verify refs >= 0.
int lock(u64 id, bool locked)
{
Line* l = find_line(id);
if(!l)
return -1;
l->ctx = ctx;
if(locked)
l->refs++;
else
{
assert(l->refs > 0);
l->refs--;
}
return 0;
}
int lock(u64 tag, bool locked)
{
Line* l = find_line(tag);
if(!l)
return -1;
l->locked = locked;
return 0;
}
private:
// implementation:
// cache lines are stored in a list, most recently used in front.
// a map finds the list entry containing a given tag in log-time.
// a map finds the list entry containing a given id in log-time.
struct Line
{
u64 tag;
void* p;
bool locked; // protect from displacement
uintptr_t ctx;
u64 id;
Entry ent;
int refs; // protect from displacement if > 0
Line(u64 _tag, void* _p)
Line(u64 _tag, Entry& _ent)
{
tag = _tag;
p = _p;
locked = false;
ctx = 0;
id = 0;
ent = _ent;
refs = 0;
}
};
typedef std::list<Line> List;
typedef List::iterator List_iterator;
typedef typename List::iterator List_iterator;
List lru_list;
typedef std::map<u64, List_iterator> Map;
Map idx;
// return the line identified by tag, or 0 if not in cache.
Line* find_line(u64 tag);
// return the line identified by id, or 0 if not in cache.
// mark it as the most recently used line.
Line* find_line(u64 id)
{
Map::const_iterator i = idx.find(id);
// not found
if(i == idx.end())
return 0;
// mark l as the most recently used line.
void reference(List_iterator l);
// index points us to list entry
List_iterator l = i->second;
// mark l as the most recently used line.
lru_list.splice(lru_list.begin(), lru_list, l);
idx[l->id] = l;
return &*l;
}
};
// from VFS, not currently needed
/*
template<class T> class StringMap
{
public:
T* add(const char* fn, T& t)
{
const FnHash fn_hash = fnv_hash(fn);
t.name = fn;
std::pair<FnHash, T> item = std::make_pair(fn_hash, t);
std::pair<MapIt, bool> res;
res = map.insert(item);
if(!res.second)
{
assert(0 && "add: already in container");
return 0;
}
// return address of user data (T) inserted into container.
return &((res.first)->second);
}
T* find(const char* fn)
{
const FnHash fn_hash = fnv_hash(fn);
MapIt it = map.find(fn_hash);
// O(log(size))
if(it == map.end())
return 0;
return &it->second;
}
size_t size() const
{
return map.size();
}
void clear()
{
map.clear();
}
private:
typedef std::map<FnHash, T> Map;
typedef typename Map::iterator MapIt;
Map map;
public:
class iterator
{
public:
iterator()
{}
iterator(typename StringMap<T>::MapIt _it)
{ it = _it; }
T& operator*() const
{ return it->second; }
T* operator->() const
{ return &**this; }
iterator& operator++() // pre
{ ++it; return (*this); }
bool operator==(const iterator& rhs) const
{ return it == rhs.it; }
bool operator!=(const iterator& rhs) const
{ return !(*this == rhs); }
protected:
typename StringMap<T>::MapIt it;
};
iterator begin()
{ return iterator(map.begin()); }
iterator end()
{ return iterator(map.end()); }
};
template<class Key, class Data> class PriMap
{
public:
int add(Key key, uint pri, Data& data)
{
Item item = std::make_pair(pri, data);
MapEntry ent = std::make_pair(key, item);
std::pair<MapIt, bool> ret;
ret = map.insert(ent);
// already in map
if(!ret.second)
{
MapIt it = ret.first;
Item item = it->second;
const uint old_pri = item.first;
Data& old_data = item.second;
// new data is of higher priority; replace older data
if(old_pri <= pri)
{
old_data = data;
return 0;
}
// new data is of lower priority; don't add
else
return 1;
}
return 0;
}
Data* find(Key key)
{
MapIt it = map.find(key);
if(it == map.end())
return 0;
return &it->second.second;
}
void clear()
{
map.clear();
}
private:
typedef std::pair<uint, Data> Item;
typedef std::pair<Key, Item> MapEntry;
typedef std::map<Key, Item> Map;
typedef typename Map::iterator MapIt;
Map map;
};
*/
#endif // #ifndef ADTS_H__

View File

@ -24,3 +24,13 @@
#undef HAVE_X
#undef CONFIG_DISABLE_EXCEPTIONS
// TODO: where does this belong?
#ifdef CONFIG_DISABLE_EXCEPTIONS
# ifdef _WIN32
# define _HAS_EXCEPTIONS 0
# else
# define STL_NO_EXCEPTIONS
# endif
#endif

View File

@ -30,23 +30,11 @@
# include "sysdep/ia32.h"
#endif
#include "timer.h"
#include "ogl.h"
#include "sdl.h"
// HACK
extern int win_get_gfx_card();
extern int win_get_gfx_drv();
extern int win_get_cpu_info();
/*
// useful for choosing a video mode. not called by detect().
void get_cur_resolution(int& xres, int& yres)
{
// guess
xres = 1024; yres = 768;
}
*/
//
// memory
//

View File

@ -27,8 +27,13 @@ extern "C" {
// useful for choosing a video mode. not called by detect().
// if we fail, don't change the outputs (assumed initialized to defaults)
extern void get_cur_resolution(int& xres, int& yres);
// if we fail, outputs are unchanged (assumed initialized to defaults)
extern int get_cur_resolution(int& xres, int& yres);
// useful for determining aspect ratio. not called by detect().
// if we fail, outputs are unchanged (assumed initialized to defaults)
extern int get_monitor_size(int& width_cm, int& height_cm);
extern char gfx_card[64]; // default: "unknown"

View File

@ -97,6 +97,11 @@ int atexit2(void* func, uintptr_t arg, CallConvention cc)
}
int atexit2(void* func)
{
return atexit2(func, 0, CC_CDECL_0);
}
// call from main as early as possible.

View File

@ -32,14 +32,11 @@
#endif
extern void log_out(char* fmt, ...);
extern void log_out2(char* fmt, ...);
extern void log_out3(char* fmt, ...);
extern "C" int _heapchk();
#define STMT(STMT_code__) do { STMT_code__; } while(0)
// must not be used before main entered! (i.e. not from NLS constructors / functions)
#define ONCE(ONCE_code__)\
{\
STMT(\
static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;\
static bool ONCE_done__ = false;\
if(pthread_mutex_trylock(&(mutex)) == 0 && !ONCE_done__)\
@ -47,7 +44,15 @@ extern void log_out(char* fmt, ...);
ONCE_done__ = true;\
ONCE_code__;\
}\
}
)
#define CHECK_ERR(func)\
STMT(\
int err = (int)(func);\
if(err < 0)\
return err;\
)
enum LibError
@ -57,9 +62,9 @@ enum LibError
ERR_EOF = -1002, // attempted to read beyond EOF
ERR_INVALID_PARAM = -1003,
ERR_FILE_NOT_FOUND = -1004,
ERR_PATH_NOT_FOUND = -1005,
ERR_x
ERR_LAST
};
@ -83,8 +88,9 @@ ERR_x
// generate a symbol containing the line number of the macro invocation.
// used to give a unique name (per file) to types made by cassert.
// we can't prepend __FILE__ to make it globally unique -
// the filename may be enclosed in quotes.
// we can't prepend __FILE__ to make it globally unique - the filename
// may be enclosed in quotes. need the 2 macro expansions to make sure
// __LINE__ is expanded correctly.
#define MAKE_UID2__(l) LINE_ ## l
#define MAKE_UID1__(l) MAKE_UID2__(l)
#define UID__ MAKE_UID1__(__LINE__)
@ -94,14 +100,14 @@ ERR_x
#define cassert(expr) struct UID__ { int CASSERT_FAILURE: (expr); };
// less helpful error message, but redefinition doesn't trigger warnings.
#define cassert2(expr) extern char CASSERT_FAILURE[expr];
#define cassert2(expr) extern char CASSERT_FAILURE[1][(expr)];
// note: alternative method in C++: specialize a struct only for true;
// sizeof will raise 'incomplete type' errors if instantiated with false.
// using it will raise 'incomplete type' errors if instantiated with false.
#define STMT(STMT_code__) do { STMT_code__; } while(0)
// converts 4 character string to u32 for easy comparison
@ -121,8 +127,8 @@ ERR_x
const size_t KB = 1 << 10;
const size_t MB = 1 << 20;
const size_t KB = 1ul << 10;
const size_t MB = 1ul << 20;
#ifdef _WIN32
@ -132,17 +138,16 @@ const size_t MB = 1 << 20;
#endif
#define BIT(n) (1ul << (n))
// call from main as early as possible.
void lib_init();
extern void lib_init();
enum CallConvention // number of parameters
enum CallConvention // number of parameters and convention
{
CC_CDECL_0,
CC_CDECL_1,
@ -151,20 +156,18 @@ enum CallConvention // number of parameters
CC_STDCALL_1,
#endif
CC_UNUSED // no trailing comma if !_WIN32
CC_UNUSED // get rid of trailing comma when !_WIN32
};
#ifdef _WIN32
#define CC_DEFAULT CC_STDCALL_1
#else
#define CC_DEFAULT CC_CDECL_1
#endif
// more powerful atexit: registers an exit handler, with 0 or 1 parameters.
// callable before libc initialized, and frees up the real atexit table.
// stdcall convention is provided on Windows to call APIs (e.g. WSACleanup).
// for these to be called at exit, lib_main must be invoked after _cinit.
extern int atexit2(void* func, uintptr_t arg, CallConvention cc = CC_DEFAULT);
extern int atexit2(void* func, uintptr_t arg, CallConvention cc = CC_CDECL_1);
// no parameters, cdecl (CC_CDECL_0)
extern int atexit2(void* func);
#include "posix.h"

View File

@ -49,10 +49,12 @@ u32 fnv_hash(const void* buf, const size_t len)
else
{
size_t bytes_left = len;
while(bytes_left--)
while(bytes_left != 0)
{
h ^= *p++;
h *= 0x01000193;
bytes_left--;
}
}
@ -62,37 +64,6 @@ u32 fnv_hash(const void* buf, const size_t len)
u16 bswap16(u16 x)
{
return (u16)(((x & 0xff) << 8) | (x >> 8));
}
u32 bswap32(u32 x)
{
#ifdef _M_IX86
__asm
{
mov eax, [x]
bswap eax
mov [x], eax
}
#else
u32 t = x;
for(int i = 0; i < 4; i++)
{
x <<= 8;
x |= t & 0xff;
}
#endif
return x;
}
void bswap32(const u8* data, int cnt)
{
@ -164,7 +135,7 @@ int ilog2(const int n)
}
static int ilog2(const float x)
int ilog2(const float x)
{
u32 i = (u32&)x;
u32 exp = (i >> 23) & 0xff;
@ -180,31 +151,17 @@ uintptr_t round_up(uintptr_t val, uintptr_t multiple)
}
//u16 addusw(u16 x, u16 y)
//{
// u32 t = x;
// return (u16)MIN(t+y, 0xffff);
//}
//
//
//u16 subusw(u16 x, u16 y)
//{
// long t = x;
// return MAX(t-y, 0);
//}
u16 addusw(u16 x, u16 y)
{
u32 t = x;
return (u16)MIN(t+y, 0xffff);
u32 t = x;
return (u16)MIN(t+y, 0xffff);
}
u16 subusw(u16 x, u16 y)
{
long t = x;
return (u16)(MAX(t-y, 0));
long t = x;
return (u16)(MAX(t-y, 0));
}
@ -243,7 +200,7 @@ void base32(const int len, const u8* in, u8* out)
*out++ = tbl[c];
}
}
/*
#ifndef _WIN32
char *_itoa(int value, char *out, int radix)
@ -292,3 +249,5 @@ char *_ltoa(long val, char *out, int radix)
}
#endif
*/

View File

@ -33,6 +33,9 @@
// otherwise, hash <len> bytes of buf.
extern u32 fnv_hash(const void* buf, const size_t len = 0);
// hash (currently FNV) of a filename
typedef u32 FnHash;
#ifndef min
inline int min(int a, int b)

View File

@ -10,6 +10,7 @@ extern "C" {
//
#ifdef _WIN32
#ifndef WINGDIAPI
#define WINGDIAPI __declspec(dllimport)
#endif
@ -19,15 +20,17 @@ extern "C" {
#ifndef APIENTRY
#define APIENTRY __stdcall
#endif
typedef unsigned short wchar_t; // for glu.h
#endif // #ifndef _WIN32
#ifdef __APPLE__
#include <OpenGL/gl.h>
#include <OpenGL/glu.h>
# include <OpenGL/gl.h>
# include <OpenGL/glu.h>
#else
#include <GL/gl.h>
#include <GL/glu.h>
# include <GL/gl.h>
# include <GL/glu.h>
#endif
@ -46,9 +49,9 @@ typedef unsigned short wchar_t; // for glu.h
#undef GL_GLEXT_PROTOTYPES
#ifdef __APPLE__
#include <OpenGL/glext.h>
# include <OpenGL/glext.h>
#else
#include <GL/glext.h>
# include <GL/glext.h>
#endif
#define GL_TEXTURE_IMAGE_SIZE_ARB 0x86A0
@ -59,9 +62,9 @@ typedef unsigned short wchar_t; // for glu.h
//
#ifdef _WIN32
#define CALL_CONV __stdcall
# define CALL_CONV __stdcall
#else
#define CALL_CONV
# define CALL_CONV
#endif
#define FUNC(ret, name, params) extern ret (CALL_CONV *name) params;

View File

@ -57,9 +57,210 @@ const size_t BLOCK_SIZE = 1ul << BLOCK_SIZE_LOG2;
// * requests for part of a block are usually followed by another.
// return the native equivalent of the given portable path
// (i.e. convert all '/' to the platform's directory separator)
// makes sure length < PATH_MAX.
static int mk_native_path(const char* const path, char* const n_path)
{
/*
// if there's a platform with multiple-character DIR_SEP,
// scan through the path and add space for each separator found.
const size_t len = strlen(p_path);
n_path = (const char*)malloc(len * sizeof(char));
if(!n_path)
return ERR_NO_MEM;
*/
const char* portable = path;
char* native = (char*)n_path;
size_t len = 0;
for(;;)
{
len++;
if(len >= PATH_MAX)
return -1;
char c = *portable++;
if(c == '/')
c = DIR_SEP;
*native++ = c;
if(c == '\0')
break;
}
return 0;
}
// rationale for data dir being root:
// xxxxxxxx yyyyyyyyy zzzzzzzzzz sandbox; vfs users (untrusted scripts) can't overwrite critical game files
int file_set_root_dir(const char* argv0, const char* root_dir)
{
// security check: only allow attempting to set root dir once
// (prevents malicious scripts from overwriting important files
// above the intended VFS root)
static bool already_attempted;
if(already_attempted)
{
assert(0 && "vfs_set_root called more than once");
return -1;
}
already_attempted = true;
if(access(argv0, X_OK) < 0)
return -errno;
char path[PATH_MAX+1];
path[PATH_MAX] = '\0';
if(!realpath(argv0, path))
return -errno;
// remove executable name
char* fn = strrchr(path, DIR_SEP);
if(!fn)
return -1;
*fn = 0;
// path is now the absolute path to the executable.
if(chdir(path) < 0)
return -errno;
char* native_root = path; // reuse path[] (no longer needed)
CHECK_ERR(mk_native_path(root_dir, native_root));
if(chdir(native_root) < 0)
return -errno;
return 0;
}
// need to store entries returned by readdir so they can be sorted.
struct DirEnt
{
std::string name;
uint flags;
ssize_t size;
DirEnt(const char* _name, uint _flags, ssize_t _size)
: name(_name), flags(_flags), size(_size) {}
};
typedef std::vector<DirEnt> DirEnts;
typedef DirEnts::iterator DirEntsIt;
static bool dirent_less(DirEnt& d1, DirEnt& d2)
{ return d1.name.compare(d2.name) < 0; }
int file_enum_dirents(const char* dir, DirEntCB cb, uintptr_t user)
{
char n_path[PATH_MAX+1];
n_path[PATH_MAX] = '\0';
// will append filename to this, hence "path".
// 0-terminate simplifies filename strncpy below.
CHECK_ERR(mk_native_path(dir, n_path));
// all entries are enumerated (adding to list), sorted, then passed to cb
DirEnts dirents;
int stat_err = 0;
int cb_err = 0;
int ret;
DIR* os_dir = opendir(n_path);
if(!os_dir)
return -1;
// will append file names here
const size_t n_path_len = strlen(n_path);
char* fn_start = n_path + n_path_len;
*fn_start++ = DIR_SEP;
struct dirent* ent;
while((ent = readdir(os_dir)))
{
const char* fn = ent->d_name;
strncpy(fn_start, fn, PATH_MAX-n_path_len);
// stat needs the relative path. this is easier than changing
// directory every time, and should be fast enough.
struct stat s;
ret = stat(n_path, &s);
if(ret < 0)
{
if(stat_err == 0)
stat_err = ret;
continue;
}
uint flags = 0;
ssize_t size = s.st_size;
// dir
if(s.st_mode & S_IFDIR)
{
// skip . and ..
if(fn[0] == '.' && (fn[1] == '\0' || (fn[1] == '.' && fn[2] == '\0')))
continue;
flags |= LOC_DIR;
size = -1;
}
// skip if neither dir nor file
else if(!(s.st_mode & S_IFREG))
continue;
dirents.push_back(DirEnt(fn, flags, size));
}
closedir(os_dir);
std::sort(dirents.begin(), dirents.end(), dirent_less);
for(DirEntsIt it = dirents.begin(); it != dirents.end(); ++it)
{
const char* name_c = it->name.c_str();
const uint flags = it->flags;
const ssize_t size = it->size;
ret = cb(name_c, flags, size, user);
if(ret < 0)
if(cb_err == 0)
cb_err = ret;
}
if(cb_err < 0)
return cb_err;
return stat_err;
}
int file_stat(const char* path, struct stat* s)
{
char n_path[PATH_MAX+1];
CHECK_ERR(mk_native_path(path, n_path));
return stat(n_path, s);
}
///////////////////////////////////////////////////////////////////////////////
//
// file open/close
// stores information about file (e.g. size) in File struct
//
///////////////////////////////////////////////////////////////////////////////
@ -93,7 +294,7 @@ static const u32 FILE_MAGIC = FOURCC('F','I','L','E');
#endif
static int file_validate(uint line, File* f)
static int file_validate(const uint line, File* const f)
{
const char* msg = "";
int err = -1;
@ -135,10 +336,13 @@ do\
while(0);
int file_open(const char* path, int flags, File* f)
int file_open(const char* p_fn, int flags, File* f)
{
memset(f, 0, sizeof(File));
char n_fn[PATH_MAX];
CHECK_ERR(mk_native_path(p_fn, n_fn));
if(!f)
goto invalid_f;
// jump to CHECK_FILE post-check, which will handle this.
@ -153,15 +357,16 @@ int file_open(const char* path, int flags, File* f)
else
{
struct stat s;
int err = stat(path, &s);
if(err < 0)
return err;
if(stat(n_fn, &s) < 0)
return -1;
if(!(s.st_mode & S_IFREG))
return -1;
size = s.st_size;
}
int fd = open(path, mode);
int fd = open(n_fn, mode);
if(fd < 0)
return 1;
return -1;
#ifdef PARANOIA
f->magic = FILE_MAGIC;
@ -169,7 +374,7 @@ int file_open(const char* path, int flags, File* f)
f->flags = flags;
f->size = size;
f->fn_hash = fnv_hash(path);
f->fn_hash = fnv_hash(n_fn); // copy filename insteaD?
f->mapping = 0;
f->fd = fd;
}
@ -197,6 +402,73 @@ int file_close(File* f)
}
///////////////////////////////////////////////////////////////////////////////
//
// low level IO
// thin wrapper over aio; no alignment or caching
//
///////////////////////////////////////////////////////////////////////////////
struct ll_cb
{
aiocb cb;
};
int ll_start_io(File* f, size_t ofs, size_t size, void* p, ll_cb* lcb)
{
CHECK_FILE(f)
if(size == 0)
{
assert(0 && "ll_start_io: size = 0 - why?");
return ERR_INVALID_PARAM;
}
if(ofs >= f->size)
{
assert(0 && "ll_start_io: ofs beyond f->size");
return -1;
}
size_t bytes_left = f->size - ofs; // > 0
int op = (f->flags & FILE_WRITE)? LIO_WRITE : LIO_READ;
// don't read beyond EOF
if(size > bytes_left) // avoid min() - it wants int
size = bytes_left;
aiocb* cb = &lcb->cb;
// send off async read/write request
cb->aio_lio_opcode = op;
cb->aio_buf = p;
cb->aio_fildes = f->fd;
cb->aio_offset = (off_t)ofs;
cb->aio_nbytes = size;
return lio_listio(LIO_NOWAIT, &cb, 1, (struct sigevent*)0);
// this just issues the I/O - doesn't wait until complete.
}
// as a convenience, return a pointer to the transfer buffer
// (rather than expose the ll_cb internals)
ssize_t ll_wait_io(ll_cb* lcb, void*& p)
{
aiocb* cb = &lcb->cb;
// wait for transfer to complete.
while(aio_error(cb) == EINPROGRESS)
aio_suspend(&cb, 1, NULL);
p = cb->aio_buf;
// return how much was actually transferred,
// or -1 if the transfer failed.
return aio_return(cb);
}
///////////////////////////////////////////////////////////////////////////////
//
// block cache
@ -204,22 +476,25 @@ int file_close(File* f)
///////////////////////////////////////////////////////////////////////////////
static Cache c;
static Cache<void*> c;
// don't need a Block struct as cache data -
// it stores its associated ID, and does refcounting
// with lock().
// create a tag for use with the Cache that uniquely identifies
// create an id for use with the Cache that uniquely identifies
// the block from the file <fn_hash> containing <ofs>.
static u64 make_tag(u32 fn_hash, size_t ofs)
static u64 block_make_id(const u32 fn_hash, const size_t ofs)
{
// tag format: filename hash | block number
// 63 32 31 0
// id format: filename hash | block number
// 63 32 31 0
//
// we assume the hash (currently: FNV) is unique for all filenames.
// chance of a collision is tiny, and a build tool will ensure
// filenames in the VFS archives are safe.
//
// block_num will always fit in 32 bits (assuming maximum file size
// = 2^32 * BLOCK_SIZE = 2^48 - enough); we check this, but don't
// = 2^32 * BLOCK_SIZE = 2^48 -- plenty); we check this, but don't
// include a workaround. we could return 0, and the caller would have
// to allocate their own buffer, but don't bother.
@ -227,60 +502,61 @@ static u64 make_tag(u32 fn_hash, size_t ofs)
size_t block_num = ofs / BLOCK_SIZE;
assert(block_num <= 0xffffffff);
u64 tag = fn_hash; // careful, don't shift a u32 32 bits left
tag <<= 32;
tag |= block_num;
return tag;
u64 id = fn_hash; // careful, don't shift a u32 32 bits left
id <<= 32;
id |= block_num;
return id;
}
//
static void* block_alloc(u64 tag)
static void* block_alloc(const u64 id)
{
void* p;
// initialize pool, if not done already.
static size_t cache_size;
static size_t cache_pos = 0;
static void* cache = 0;
if(!cache)
// initialize cache, if not done already.
static bool cache_initialized;
if(!cache_initialized)
{
cache_size = 16 * BLOCK_SIZE;
get_mem_status();
// TODO: adjust cache_size
cache = mem_alloc(cache_size, BLOCK_SIZE);
if(!cache)
// TODO: calculate size
size_t num_blocks = 16;
// evil: waste some mem (up to one block) to make sure the first block
// isn't at the start of the allocation, so that users can't
// mem_free() it. do this by manually aligning the pool.
//
// allocator will free the whole thing at exit.
void* pool = mem_alloc((num_blocks+1) * BLOCK_SIZE);
if(!pool)
return 0;
}
// we have free blocks - add to cache
if(cache_pos < cache_size)
{
p = (char*)cache + cache_pos;
cache_pos += BLOCK_SIZE;
uintptr_t start = round_up((uintptr_t)pool + 1, BLOCK_SIZE);
// +1 => if already block-aligned, add a whole block!
if(c.add(tag, p) < 0)
// add all blocks to cache
void* p = (void*)start;
for(size_t i = 0; i < num_blocks; i++)
{
assert(0 && "block_alloc: Cache::add failed!");
return 0;
if(c.grow(p) < 0)
assert(0 && "block_alloc: Cache::grow failed!");
// currently can't fail.
p = (char*)p + BLOCK_SIZE;
}
}
// all of our pool's blocks are in the cache.
// displace the LRU entry. if not possible (all are locked), fail.
else
{
p = c.replace_lru_with(tag);
if(!p)
return 0;
cache_initialized = true;
}
if(c.lock(tag, true) < 0)
void** entry = c.assign(id);
if(!entry)
return 0;
void* block = *entry;
if(c.lock(id, true) < 0)
assert(0 && "block_alloc: Cache::lock failed!");
// can't happen: only cause is tag not found, but we successfully
// added it above. if it did fail, that'd be bad: we leak the block,
// and/or the buffer may be displaced while in use. hence, assert.
return p;
return block;
}
@ -290,11 +566,20 @@ static void* block_alloc(u64 tag)
// instead, add a copy on write call, if necessary.
static int block_retrieve(u64 tag, void*& p)
static int block_retrieve(const u64 id, void*& p)
{
p = c.get(tag);
return p? 0 : -1;
void** entry = c.retrieve(id);
if(entry)
{
p = *entry;
c.lock(id, true); // add reference
return 0;
}
else
{
p = 0;
return -1;
}
// important note:
// for the purposes of starting the IO, we can regard blocks whose read
@ -303,35 +588,29 @@ static int block_retrieve(u64 tag, void*& p)
//
// don't want to require IOs to be completed in order of issue:
// that'd mean only 1 caller can read from file at a time.
// would obviate associating tag with IO, but is overly restrictive.
// would obviate associating id with IO, but is overly restrictive.
}
static int block_discard(u64 tag)
static int block_discard(const u64 id)
{
return c.lock(tag, false);
return c.lock(id, false);
}
void file_free_buf(void *p)
{
}
// remove from cache?
int discard_buf(void* p)
{
return 0;
}
int free_buf(void* p)
int file_free_buf(void*& p)
{
uintptr_t _p = (uintptr_t)p;
void* actual_p = (void*)(_p - (_p % BLOCK_SIZE)); // round down
return mem_free(actual_p);
// remove from cache?
// check if in use?
}
///////////////////////////////////////////////////////////////////////////////
//
// async I/O
@ -339,27 +618,25 @@ int free_buf(void* p)
///////////////////////////////////////////////////////////////////////////////
enum
{
CACHED = 1,
};
struct IO
{
struct aiocb* cb;
// struct aiocb is too big to store here.
// IOs are reused, so we don't allocate a
// new aiocb every file_start_io.
u64 block_id;
// transferring via cache (=> BLOCK_SIZE aligned) iff != 0
u64 tag;
void* block;
// valid because cache line is locked
// (rug can't be pulled out from under us)
ll_cb* cb;
// this is too big to store here. IOs are reused,
// so we don't allocate a new cb every file_start_io.
void* user_p;
size_t user_ofs;
size_t user_size;
uint flags;
void* block;
int cached : 1;
int pending : 1;
};
H_TYPE_DEFINE(IO)
@ -371,8 +648,8 @@ H_TYPE_DEFINE(IO)
static void IO_init(IO* io, va_list args)
{
size_t size = round_up(sizeof(struct aiocb), 16);
io->cb = (struct aiocb*)mem_alloc(size, 16, MEM_ZERO);
size_t size = round_up(sizeof(struct ll_cb), 16);
io->cb = (ll_cb*)mem_alloc(size, 16, MEM_ZERO);
}
static void IO_dtor(IO* io)
@ -390,9 +667,7 @@ static void IO_dtor(IO* io)
// aio_result, which would terminate the read.
static int IO_reload(IO* io, const char* fn)
{
if(!io->cb)
return -1;
return 0;
return io->cb? 0 : ERR_NO_MEM;
}
@ -401,7 +676,7 @@ static int IO_reload(IO* io, const char* fn)
// extra layer on top of h_alloc, so we can reuse IOs
//
// (avoids allocating the aiocb every IO => less fragmentation)
// (avoids allocating the cb every IO => less heap fragmentation)
//
// don't worry about reassigning IOs to their associated file -
// they don't need to be reloaded, since the VFS refuses reload
@ -413,7 +688,7 @@ typedef std::vector<Handle> IOList;
static IOList free_ios;
// list of all IOs allocated.
// used to find active IO, given tag (see below).
// used to find active IO, given id (see below).
// also used to free all IOs before the handle manager
// cleans up at exit, so they aren't seen as resource leaks.
@ -436,8 +711,8 @@ static void io_cleanup(void)
static Handle io_alloc()
{
ONCE(atexit(io_cleanup))
ONCE(atexit(io_cleanup));
/*
// grab from freelist
if(!free_ios.empty())
{
@ -448,13 +723,15 @@ static Handle io_alloc()
// we don't check if the freelist contains valid handles.
// that "can't happen", and if it does, it'll be caught
// by the handle dereference in file_start_io.
//
// note that no one else can actually free an IO -
// that would require its handle type, which is private to
// this module. the free_io call just adds it to the freelist;
// all allocated IOs are destroyed by the handle manager at exit.
}
//
// no one else can actually free an IO - that would require
// its handle type, which is private to this module.
// the free_io call just adds it to the freelist;
// all allocated IOs are destroyed in io_cleanup at exit.
return h;
}
*/
// allocate a new IO
Handle h = h_alloc(H_IO, 0);
// .. it's valid - store in list.
@ -466,24 +743,41 @@ static Handle io_alloc()
static int io_free(Handle hio)
{
// mark it unused, and incidentally make sure hio is valid
// before adding to freelist.
H_DEREF(hio, IO, io);
io->tag = 0;
if(io->pending)
{
assert(0 && "io_free: IO pending");
return -1;
}
// clear the other IO fields, just to be sure.
// (but don't memset the whole thing - that'd trash the cb pointer!)
io->block_id = 0;
io->block = 0;
io->cached = 0;
io->user_ofs = 0;
io->user_size = 0;
io->user_p = 0;
memset(io->cb, 0, sizeof(ll_cb));
// TODO: complain if buffer not yet freed?
// we know hio is valid, since we successfully dereferenced above.
free_ios.push_back(hio);
return 0;
}
// need to find IO, given tag, to make sure a block
// need to find IO, given id, to make sure a block
// that is marked cached has actually been read.
// it is expected that there only be a few allocated IOs,
// so it's ok to search this list every cache hit.
// adding to the cache data structure would be messier.
struct FindTag : public std::binary_function<Handle, u64, bool>
struct FindBlock : public std::binary_function<Handle, u64, bool>
{
bool operator()(Handle hio, u64 tag) const
bool operator()(Handle hio, u64 block_id) const
{
// can't use H_DEREF - we return bool
IO* io = (IO*)h_user_data(hio, H_IO);
@ -492,14 +786,14 @@ struct FindTag : public std::binary_function<Handle, u64, bool>
assert(0 && "invalid handle in all_ios list!");
return false;
}
return io->tag == tag;
return io->block_id == block_id;
}
};
static Handle io_find_tag(u64 tag)
static Handle io_find(u64 block_id)
{
IOList::const_iterator it;
it = std::find_if(all_ios.begin(), all_ios.end(), std::bind2nd(FindTag(), tag));
it = std::find_if(all_ios.begin(), all_ios.end(), std::bind2nd(FindBlock(), block_id));
// not found
if(it == all_ios.end())
return 0;
@ -513,7 +807,7 @@ static Handle io_find_tag(u64 tag)
// rationale for extra alignment / copy layer, even though aio takes care of it:
// aio would read pad to its minimum read alignment, copy over, and be done;
// aio would pad to its minimum read alignment, copy over, and be done;
// in our case, if something is unaligned, a request for the remainder of the
// block is likely to follow, so we want to cache the whole block.
@ -546,43 +840,47 @@ Handle file_start_io(File* f, size_t user_ofs, size_t user_size, void* user_p)
user_size = bytes_left;
const u64 tag = make_tag(f->fn_hash, user_ofs);
u64 block_id = block_make_id(f->fn_hash, user_ofs);
// allocate IO slot
Handle hio = io_alloc();
H_DEREF(hio, IO, io);
struct aiocb* cb = io->cb;
io->tag = tag;
io->user_p = user_p;
io->user_ofs = user_ofs;
io->block_id = block_id;
io->user_p = user_p;
io->user_ofs = user_ofs;
io->user_size = user_size;
// notes: io->flags and io->block are already zeroed;
// cb holds the actual IO request (aligned offset and size).
// notes: io->cached, io->pending and io->block are already zeroed;
// cb will receive the actual IO request (aligned offset and size).
// if already cached, we're done
if(block_retrieve(tag, io->block) == 0)
{
io->flags = CACHED;
return hio;
}
debug_out("file_start_io hio=%I64x ofs=%d size=%d\n", hio, user_ofs, user_size);
// aio already safely handles unaligned buffers or offsets.
// when reading zip files, we don't want to repeat a read
// if a block containing end of one file and start of the next
// if a block contains the end of one file and start of the next
// (speed concern).
// therefore, we align and round up to whole blocks.
//
// note: cache even if this is the last block before EOF:
// a zip archive may contain one last file in the block.
// if not, no loss - the buffer will be LRU, and reused.
size_t ofs = user_ofs;
size_t padding = ofs % BLOCK_SIZE;
ofs -= padding;
size_t size = round_up(padding + user_size, BLOCK_SIZE);
// if already cached, we're done
if(size == BLOCK_SIZE && block_retrieve(block_id, io->block) == 0)
{
debug_out("file_start_io: cached! block # = %d\n", block_id & 0xffffffff);
io->cached = 1;
return hio;
}
void* buf = 0;
void* our_buf = 0;
@ -591,10 +889,13 @@ Handle file_start_io(File* f, size_t user_ofs, size_t user_size, void* user_p)
else
{
if(size == BLOCK_SIZE)
our_buf = io->block = block_alloc(tag);
our_buf = io->block = block_alloc(block_id);
// transferring more than one block - doesn't go through cache!
else
{
our_buf = mem_alloc(size, BLOCK_SIZE);
block_id = 0;
}
if(!our_buf)
{
err = ERR_NO_MEM;
@ -604,20 +905,14 @@ Handle file_start_io(File* f, size_t user_ofs, size_t user_size, void* user_p)
buf = our_buf;
}
// send off async read/write request
cb->aio_lio_opcode = op;
cb->aio_buf = buf;
cb->aio_fildes = f->fd;
cb->aio_offset = (off_t)ofs;
cb->aio_nbytes = size;
err = lio_listio(LIO_NOWAIT, &cb, 1, (struct sigevent*)0);
// return as soon as I/O is queued
err = ll_start_io(f, ofs, size, buf, io->cb);
if(err < 0)
{
fail:
file_discard_io(hio);
file_free_buf(our_buf);
if(size != BLOCK_SIZE)
file_free_buf(our_buf);
return err;
}
@ -627,41 +922,35 @@ fail:
int file_wait_io(const Handle hio, void*& p, size_t& size)
{
debug_out("file_wait_io: hio=%I64x\n", hio);
int ret = 0;
p = 0;
size = 0;
H_DEREF(hio, IO, io);
struct aiocb* cb = io->cb;
ll_cb* cb = io->cb;
size = io->user_size;
ssize_t bytes_transferred;
// block's tag is in cache. need to check if its read is still pending.
if(io->flags & CACHED)
if(io->cached)
{
Handle cache_hio = io_find_tag(io->tag);
Handle cache_hio = io_find(io->block_id);
// was already finished - don't wait
if(cache_hio <= 0)
goto skip_wait;
// not finished yet; wait for it below, as with uncached reads.
else
{
H_DEREF(cache_hio, IO, cache_io);
// can't fail, since io_find_tag has to dereference each handle.
cb = cache_io->cb;
}
// not finished yet; will wait for it below, as with uncached reads.
H_DEREF(cache_hio, IO, cache_io);
// can't fail, since io_find has to dereference each handle.
cb = cache_io->cb;
}
// wait for transfer to complete
{
while(aio_error(cb) == EINPROGRESS)
aio_suspend(&cb, 1, NULL);
void* transfer_buf;
ssize_t bytes_transferred = ll_wait_io(cb, transfer_buf);
bytes_transferred = aio_return(cb);
ret = bytes_transferred? 0 : -1;
}
skip_wait:
if(io->block)
@ -679,9 +968,9 @@ skip_wait:
else
p = src;
}
// read directly into target buffer
// we had read directly into target buffer
else
p = (void *)cb->aio_buf; // cb->aio_buf is volatile, p is not
p = transfer_buf;
return ret;
}
@ -690,7 +979,7 @@ skip_wait:
int file_discard_io(Handle& hio)
{
H_DEREF(hio, IO, io);
block_discard(io->tag);
block_discard(io->block_id);
io_free(hio);
return 0;
}
@ -702,7 +991,7 @@ int file_discard_io(Handle& hio)
// *p != 0: *p is the source/destination address for the transfer.
// (FILE_MEM_READONLY?)
// *p == 0: allocate a buffer, read into it, and return it in *p.
// when no longer needed, it must be freed via file_discard_buf.
// when no longer needed, it must be freed via file_free_buf.
// p == 0: read raw_size bytes from file, starting at offset raw_ofs,
// into temp buffers; each block read is passed to cb, which is
// expected to write actual_size bytes total to its output buffer
@ -714,6 +1003,8 @@ int file_discard_io(Handle& hio)
ssize_t file_io(File* const f, const size_t raw_ofs, size_t raw_size, void** const p,
const FILE_IO_CB cb, const uintptr_t ctx) // optional
{
debug_out("file_io fd=%d size=%d ofs=%d\n", f->fd, raw_size, raw_ofs);
CHECK_FILE(f)
const bool is_write = (f->flags == FILE_WRITE);
@ -743,7 +1034,9 @@ ssize_t file_io(File* const f, const size_t raw_ofs, size_t raw_size, void** con
const size_t misalign = raw_ofs % BLOCK_SIZE;
// actual transfer start offset
const size_t start_ofs = raw_ofs - misalign; // BLOCK_SIZE-aligned
// not aligned! aio takes care of initial unalignment;
// next read will be aligned, because we read up to the next block.
const size_t start_ofs = raw_ofs;
void* buf = 0; // I/O source or sink; assume temp buffer
@ -805,18 +1098,32 @@ ssize_t file_io(File* const f, const size_t raw_ofs, size_t raw_size, void** con
//
const int MAX_IOS = 2;
Handle ios[MAX_IOS] = { 0, 0 };
Handle ios[MAX_IOS] = { 0 };
if(ios[0] || ios[1])abort();
int head = 0;
int tail = 0;
int pending_ios = 0;
bool all_issued = false;
size_t raw_cnt = 0; // amount of raw data transferred so far
size_t issue_cnt = 0; // sum of I/O transfer requests
// (useful, raw data: possibly compressed, but doesn't count padding)
size_t raw_transferred_cnt = 0;
size_t issue_cnt = 0;
// if callback, what it reports; otherwise, = raw_transferred_cnt
// this is what we'll return
size_t actual_transferred_cnt = 0;
ssize_t err = +1; // loop terminates if <= 0
static int seq;
seq++;
if(seq == 4)
seq=4;
for(;;)
{
// queue not full, data remaining to transfer, and no error:
@ -824,11 +1131,11 @@ ssize_t file_io(File* const f, const size_t raw_ofs, size_t raw_size, void** con
if(pending_ios < MAX_IOS && !all_issued && err > 0)
{
// calculate issue_size:
// want to transfer up to the next block boundary.
// at most, transfer up to the next block boundary.
size_t issue_ofs = start_ofs + issue_cnt;
const size_t left_in_block = BLOCK_SIZE - (issue_ofs % BLOCK_SIZE);
const size_t left_in_file = raw_size - issue_cnt;
size_t issue_size = MIN(left_in_block, left_in_file);
const size_t total_left = raw_size - issue_cnt;
size_t issue_size = MIN(left_in_block, total_left);
// assume temp buffer allocated by file_start_io
void* data = 0;
@ -866,7 +1173,7 @@ ssize_t file_io(File* const f, const size_t raw_ofs, size_t raw_size, void** con
//// if size comes out short, we must be at EOF
raw_cnt += size;
raw_transferred_cnt += size;
if(cb && !(err <= 0))
{
@ -876,7 +1183,12 @@ ssize_t file_io(File* const f, const size_t raw_ofs, size_t raw_size, void** con
// pending transfers to complete.
if(ret <= 0)
err = ret;
else
actual_transferred_cnt += ret;
}
// no callback to process data: raw = actual
else
actual_transferred_cnt += size;
file_discard_io(hio); // zeroes array entry
}
@ -899,9 +1211,9 @@ ssize_t file_io(File* const f, const size_t raw_ofs, size_t raw_size, void** con
return err;
}
assert(issue_cnt >= raw_cnt && raw_cnt == raw_size);
assert(issue_cnt == raw_transferred_cnt && raw_transferred_cnt == raw_size);
return (ssize_t)raw_cnt;
return (ssize_t)actual_transferred_cnt;
}

View File

@ -53,12 +53,26 @@ enum
// do not cache any part of the file
// (e.g. if caching on a higher level)
FILE_NO_CACHE = 4
};
extern int file_open(const char* path, int flags, File* f);
// keep in sync with zip.cpp and vfs.cpp *_CB_FLAGS!
enum FILE_CB_FLAGS
{
// location
LOC_DIR = BIT(0),
};
extern int file_set_root_dir(const char* argv0, const char* root);
typedef int(*DirEntCB)(const char* name, uint flags, ssize_t size, uintptr_t user);
extern int file_enum_dirents(const char* dir, DirEntCB cb, uintptr_t user);
extern int file_stat(const char* path, struct stat*);
extern int file_open(const char* fn, int flags, File* f);
extern int file_close(File* f);
extern int file_map(File* f, void*& p, size_t& size);

View File

@ -40,7 +40,7 @@
// why fixed size control blocks, instead of just allocating dynamically?
// it is expected that resources be created and freed often. this way is
// much nicer to the memory manager. defining control blocks larger than
// the alloted space is caught by h_alloc (made possible by the vtbl builder
// the allotted space is caught by h_alloc (made possible by the vtbl builder
// storing control block size). it is also efficient to have all CBs in an
// more or less contiguous array (see below).
//
@ -69,19 +69,20 @@
const uint TAG_SHIFT = 0;
const u32 TAG_MASK = 0xffffffff; // safer than (1 << 32) - 1
// - index (0-based) points to control block in our array.
// - index (0-based) of control block in our array.
// (field width determines maximum currently open handles)
#define IDX_BITS 16
const uint IDX_SHIFT = 32;
const i32 IDX_MASK = (1l << IDX_BITS) - 1;
const u32 IDX_MASK = (1l << IDX_BITS) - 1;
// make sure both fields fit within a Handle variable
cassert(IDX_BITS + TAG_BITS <= sizeof(Handle)*CHAR_BIT);
// return the handle's index field (always non-negative).
// no error checking!
static inline i32 h_idx(const Handle h)
{ return (i32)((h >> IDX_SHIFT) & IDX_MASK); }
static inline u32 h_idx(const Handle h)
{ return (u32)((h >> IDX_SHIFT) & IDX_MASK); }
// return the handle's tag field.
// no error checking!
@ -89,7 +90,7 @@ static inline u32 h_tag(const Handle h)
{ return (u32)((h >> TAG_SHIFT) & TAG_MASK); }
// build a handle from index and tag
static inline Handle handle(const i32 idx, const i32 tag)
static inline Handle handle(const u32 idx, const u32 tag)
{
assert(idx <= IDX_MASK && tag <= TAG_MASK && "handle: idx or tag too big");
// somewhat clunky, but be careful with the shift:
@ -104,28 +105,38 @@ static inline Handle handle(const i32 idx, const i32 tag)
// internal per-resource-instance data
//
// determines maximum number of references to a resource.
// a handle's idx field isn't stored in its HDATA entry (not needed);
// to save space, this should take its place, i.e. it should fit in IDX_BITS.
static const uint REF_BITS = 12;
static const uint REF_BITS = 8;
static const u32 REF_MAX = 1ul << REF_BITS;
static const uint TYPE_BITS = 8;
// a handle's idx field isn't stored in its HDATA entry (not needed);
// to save space, these should take its place, i.e. they should fit in IDX_BITS.
// if not, ignore + comment out this assertion.
cassert(REF_BITS + TYPE_BITS <= IDX_BITS);
// chosen so that all current resource structs are covered,
// and so sizeof(HDATA) is a power of 2 (for more efficient array access
// and array page usage).
static const size_t HDATA_USER_SIZE = 48;
static const size_t HDATA_MAX_PATH = 64;
// 64 bytes
struct HDATA
{
uintptr_t key;
u32 tag : TAG_BITS;
u32 refs : REF_BITS;
u32 type_idx : TYPE_BITS;
H_Type type;
const char* fn;
u8 user[HDATA_USER_SIZE];
char fn[HDATA_MAX_PATH];
};
@ -190,8 +201,8 @@ static HDATA* h_data_any_type(const Handle h)
i32 idx = h_idx(h);
// this function is only called for existing handles.
// they'd also fail the tag check below, but bail here
// to avoid needlessly allocating that entry's page.
// they'd also fail the tag check below, but bail out here
// already to avoid needlessly allocating that entry's page.
if(idx > last_in_use)
return 0;
HDATA* hd = h_data(idx);
@ -282,7 +293,7 @@ static int alloc_idx(i32& idx, HDATA*& hd)
assert(!"alloc_idx: too many open handles (increase IDX_BITS)");
return -1;
}
idx = last_in_use+1; // incrementing idx would start it at 1
idx = last_in_use+1; // just incrementing idx would start it at 1
hd = h_data(idx);
if(!hd)
return ERR_NO_MEM;
@ -325,7 +336,7 @@ int h_free(Handle& h, H_Type type)
if(!hd)
return ERR_INVALID_HANDLE;
// have valid refcount (don't decrement if alread 0)
// have valid refcount (don't decrement if already 0)
if(hd->refs > 0)
{
hd->refs--;
@ -356,7 +367,7 @@ int h_free(Handle& h, H_Type type)
// any further params are passed to type's init routine
Handle h_alloc(H_Type type, const char* fn, uint flags, ...)
{
ONCE(atexit(cleanup))
ONCE(atexit(cleanup));
Handle err;

View File

@ -83,15 +83,35 @@ static void* pool_alloc(const size_t size, const uint align, uintptr_t& ctx, MEM
//////////////////////////////////////////////////////////////////////////////
static bool has_shutdown = false;
typedef std::map<void*, Handle> PtrToH;
static PtrToH* _ptr_to_h;
static void ptr_to_h_shutdown()
{
has_shutdown = true;
delete _ptr_to_h;
}
// undefined NLSO init order fix
static PtrToH& _ptr_to_h()
static PtrToH& get_ptr_to_h()
{
static PtrToH ptr_to_h_;
return ptr_to_h_;
if(!_ptr_to_h)
{
if(has_shutdown)
assert("mem.cpp: ptr -> handle lookup used after module shutdown");
// crash + burn
_ptr_to_h = new PtrToH;
atexit2(ptr_to_h_shutdown);
}
return *_ptr_to_h;
}
#define ptr_to_h _ptr_to_h()
#define ptr_to_h get_ptr_to_h()
// not needed by other modules - mem_get_size and mem_assign is enough.
@ -268,11 +288,15 @@ void* mem_alloc(size_t size, const uint align, uint flags, Handle* phm)
}
void* mem_get_ptr(Handle hm, size_t* size)
void* mem_get_ptr(Handle hm, size_t* size /* = 0 */)
{
Mem* m = H_USER_DATA(hm, Mem);
if(!m)
{
if(size)
*size = 0;
return 0;
}
assert((!m->p || m->size) && "mem_get_ptr: mem corrupted (p valid =/=> size > 0)");

View File

@ -2,4 +2,6 @@
#include "res/vfs.h"
#include "res/tex.h"
#include "res/mem.h"
#include "res/font.h"
#include "res/font.h"
#include "res/file.h"
#include "res/zip.h"

File diff suppressed because it is too large Load Diff

View File

@ -23,13 +23,12 @@
#include "h_mgr.h"
#include "posix.h"
#define VFS_MAX_PATH 63
#define VFS_MAX_PATH 256 // includes trailing '\0'
extern int vfs_set_root(const char* argv0, const char* root);
extern int vfs_mount(const char* path);
extern int vfs_umount(const char* path);
extern int vfs_mount(const char* vfs_path, const char* name, uint pri);
extern int vfs_umount(const char* name);
extern int vfs_stat(const char* fn, struct stat *buffer);
extern int vfs_stat(const char* fn, struct stat*);
extern int vfs_realpath(const char* fn, char* realpath);
extern Handle vfs_load(const char* fn, void*& p, size_t& size);
@ -39,6 +38,8 @@ extern int vfs_close(Handle& h);
extern Handle vfs_map(Handle hf, int flags, void*& p, size_t& size);
//
// async read interface
//

View File

@ -20,26 +20,559 @@
#include <cstring>
#include <cstdlib>
#include "zip.h"
#include "file.h"
#include "lib.h"
#include "misc.h"
#include "h_mgr.h"
#include "mem.h"
#include "vfs.h"
#include "res.h"
#include <zlib.h>
#ifdef _MSC_VER
#pragma comment(lib, "zlib.lib")
#endif
#include <map>
///////////////////////////////////////////////////////////////////////////////
//
// low-level in-memory inflate routines on top of ZLib
// Zip-specific code
// passes list of files in archive to lookup
//
///////////////////////////////////////////////////////////////////////////////
// convenience container for location / size of file in archive.
struct ZFileLoc
{
size_t ofs;
size_t csize; // = 0 if not compressed
size_t ucsize;
// why csize?
// file I/O may be N-buffered, so it's good to know when the raw data
// stops (or else we potentially overshoot by N-1 blocks),
// but not critical, since Zip files are compressed individually.
// (if we read too much, it's ignored by inflate).
//
// we also need a way to check if file compressed (e.g. to fail mmap
// requests if the file is compressed). packing a bit in ofs or
// ucsize is error prone and ugly (1 bit less won't hurt though).
// any other way will mess up the nice 2^n byte size anyway, so
// might as well store csize.
};
// find end of central dir record in file (loaded or mapped).
static int zip_find_ecdr(const void* const file, const size_t size, const u8*& ecdr_)
{
const char ecdr_id[] = "PK\5\6"; // signature
const size_t ECDR_SIZE = 22;
if(size < ECDR_SIZE)
{
assert(0 && "zip_find_ecdr: size is way too small");
return -1;
}
const u8* ecdr = (const u8*)file + size - ECDR_SIZE;
// early out: check expected case (ECDR at EOF; no file comment)
if(*(u32*)ecdr == *(u32*)&ecdr_id)
goto found_ecdr;
// scan the last 66000 bytes of file for ecdr_id signature
// (zip comment <= 65535 bytes, sizeof(ECDR) = 22, add some for safety)
// if the zip file is < 66000 bytes, scan the whole file.
size_t bytes_left = MIN(66000, size);
ecdr = (const u8*)file + size - bytes_left;
while(bytes_left >= 4)
{
if(*(u32*)ecdr == *(u32*)&ecdr_id)
goto found_ecdr;
// check next 4 bytes (non aligned!!)
ecdr++;
bytes_left--;
}
// reached EOF and still haven't found the ECDR identifier.
ecdr_ = 0;
return -1;
found_ecdr:
ecdr_ = ecdr;
return 0;
}
// make sure the LFH fields match those passed (from the CDFH).
// only used in PARANOIA builds - costs time when opening archives.
static int zip_verify_lfh(const void* const file, const size_t lfh_ofs, const size_t file_ofs)
{
const char lfh_id[] = "PK\3\4"; // signature
const size_t LFH_SIZE = 30;
assert(lfh_ofs < file_ofs); // header comes before file
const u8* lfh = (const u8*)file + lfh_ofs;
if(*(u32*)lfh != *(u32*)lfh_id)
{
assert(0 && "LFH corrupt! (signature doesn't match)");
return -1;
}
const u16 lfh_fn_len = read_le16(lfh+26);
const u16 lfh_e_len = read_le16(lfh+28);
const size_t lfh_file_ofs = lfh_ofs + LFH_SIZE + lfh_fn_len + lfh_e_len;
if(file_ofs != lfh_file_ofs)
{
assert(0 && "warning: CDFH and LFH data differ! normal builds will"\
"return incorrect file offsets. check Zip file!");
return -1;
}
return 0;
}
// extract information from the current Central Directory File Header;
// advance cdfh to point to the next; return -1 on unrecoverable error,
// 0 on success (<==> output fields are valid), > 0 if file is invalid.
static int zip_read_cdfh(const u8*& cdfh, const char*& fn, size_t& fn_len, ZFileLoc* const loc)
{
const char cdfh_id[] = "PK\1\2"; // signature
const size_t CDFH_SIZE = 46;
const size_t LFH_SIZE = 30;
if(*(u32*)cdfh != *(u32*)cdfh_id)
{
assert(0 && "CDFH corrupt! (signature doesn't match)");
goto skip_file;
}
const u8 method = cdfh[10];
const u32 csize_ = read_le32(cdfh+20);
const u32 ucsize_ = read_le32(cdfh+24);
const u16 fn_len_ = read_le16(cdfh+28);
const u16 e_len = read_le16(cdfh+30);
const u16 c_len = read_le16(cdfh+32);
const u32 lfh_ofs = read_le32(cdfh+42);
const char* fn_ = (const char*)cdfh+CDFH_SIZE;
// not 0-terminated!
// compression method: neither deflated nor stored
if(method & ~8)
{
assert(0 && "warning: unknown compression method");
goto skip_file;
}
fn = fn_;
fn_len = fn_len_;
loc->ofs = lfh_ofs + LFH_SIZE + fn_len_ + e_len;
loc->csize = csize_;
loc->ucsize = ucsize_;
// performance issue: want to avoid seeking between LFHs and central dir.
// would be safer to calculate file offset from the LFH, since its
// filename / extra data fields may differ WRT the CDFH version.
// we don't bother checking for this in normal builds: if they were
// to be different, we'd notice: headers of files would end up corrupted.
#ifdef PARANOIA
if(!zip_verify_lfh(file, lfh_ofs, file_ofs))
goto skip_file;
#endif
cdfh += CDFH_SIZE + fn_len + e_len + c_len;
return 0;
// file was invalid somehow; try to seek forward to the next CDFH
skip_file:
// scan for next CDFH (look for signature)
for(int i = 0; i < 256; i++)
{
if(*(u32*)cdfh == *(u32*)cdfh_id)
goto found_next_cdfh;
cdfh++;
}
// next CDFH not found. caller must abort
return -1;
// file was skipped, but we have the next CDFH
found_next_cdfh:
return 1;
}
// fn (filename) is not necessarily 0-terminated!
// loc is only valid during the callback! must be copied or saved.
typedef int(*ZipCdfhCB)(const uintptr_t user, const i32 idx, const char* fn, const size_t fn_len, const ZFileLoc* const loc);
// go through central directory of the Zip file (loaded or mapped into memory);
// call back for each file.
//
// HACK: call back with negative index the first time; its abs. value is
// the number of files in the archive. lookup needs to know this so it can
// allocate memory. having lookup_init call zip_get_num_files and then
// zip_enum_files would require passing around a ZipInfo struct,
// or searching for the ECDR twice - both ways aren't nice.
static int zip_enum_files(const void* const file, const size_t size, const ZipCdfhCB cb, const uintptr_t user)
{
int err;
ZFileLoc loc;
// find End of Central Directory Record
const u8* ecdr;
err = zip_find_ecdr(file, size, ecdr);
if(err < 0)
return err;
// call back with number of files in archive
const i32 num_files = read_le16(ecdr+10);
// .. callback expects -num_files < 0.
// if it's 0, the callback would treat it as an index => crash.
if(!num_files)
return -1;
err = cb(user, -num_files, 0, 0, 0);
if(err < 0)
return err;
const u32 cd_ofs = read_le32(ecdr+16);
const u8* cdfh = (const u8*)file + cd_ofs;
// pointer is advanced in zip_read_cdfh
for(i32 idx = 0; idx < num_files; idx++)
{
const char* fn;
size_t fn_len;
err = zip_read_cdfh(cdfh, fn, fn_len, &loc);
// .. non-recoverable error reading dir
if(err < 0)
return err;
// .. file was skipped (e.g. invalid compression format)
// call back with 0 params; don't skip the file, so that
// all indices are valid.
if(err > 0)
cb(user, idx, 0, 0, 0);
// .. file valid.
else
cb(user, idx, fn, fn_len, &loc);
}
return 0;
}
///////////////////////////////////////////////////////////////////////////////
//
// file lookup
// per archive: find file information (e.g. location, size), given filename.
//
///////////////////////////////////////////////////////////////////////////////
// file lookup: store array of files in archive (ZEnt); lookup via linear
// search for hash of filename.
// optimization: store index of last file opened; check the one after
// that first, and only then search the whole array. this is a big
// win if files are opened sequentially (they should be so ordered
// in the archive anyway, to reduce seeks)
//
// rationale: added index because exposing index is bad (say we change lookup data struct)
// much cleaner to only export handle
uintptr_t zip_init_ctx()
// don't bother making a tree structure: first, it's a bit of work
// (Zip files store paths as part of the file name - there's no extra
// directory information); second, the VFS file location DB stores
// handle and file index per file, making its lookup constant-time.
struct ZEnt
{
const char* fn; // currently allocated individually
ZFileLoc loc;
};
typedef std::map<FnHash, i32> LookupIdx;
typedef LookupIdx::iterator LookupIdxIt;
// per-archive information for mapping filename -> file info
struct LookupInfo
{
ZEnt* ents;
FnHash* fn_hashes;
// split out of ZEnt for more efficient search
// (=> ZEnt is power-of-2, back-to-back fn_hashes)
//
// currently both share one memory allocation; only mem_free() ents!
i32 num_files;
i32 next_file;
// for last-file-opened optimization.
// we store index of next file instead of the last one opened
// to avoid trouble on first call (don't want last == -1).
// don't know size of std::map, and this is used in a control block
// allocate dynamically to save size.
LookupIdx* idx;
};
// add file <fn> to the lookup data structure.
// callback for zip_enum_files, in order (0 <= idx < num_files).
//
// fn (filename) is not necessarily 0-terminated!
// loc is only valid during the callback! must be copied or saved.
static int lookup_add_file_cb(const uintptr_t user, const i32 idx, const char* const fn, const size_t fn_len, const ZFileLoc* const loc)
{
LookupInfo* li = (LookupInfo*)user;
// HACK: on first call, idx is negative and tells us how many
// files are in the archive (so we can allocate memory).
// see zip_enum_files for why it's done this way.
if(idx < 0)
{
const i32 num_files = -idx;
// both arrays in one allocation (more efficient)
const size_t ents_size = (num_files * sizeof(ZEnt));
const size_t array_size = ents_size + (num_files * sizeof(FnHash));
void* p = mem_alloc(array_size, 4*KB);
if(!p)
return ERR_NO_MEM;
li->num_files = num_files;
li->ents = (ZEnt*)p;
li->fn_hashes = (FnHash*)((char*)p + ents_size);
return 0;
}
ZEnt* ent = li->ents + idx;
FnHash fn_hash = fnv_hash(fn);
(*li->idx)[fn_hash] = idx;
li->fn_hashes[idx] = fn_hash;
// valid file - write out its info.
if(loc)
{
ent->fn = (const char*)malloc(fn_len+1);
if(!ent->fn)
return ERR_NO_MEM;
strcpy((char*)ent->fn, fn);
ent->loc = *loc;
}
// invalid file / error reading its dir entry: zero its file info.
// (don't skip it to make sure all indices are valid).
else
memset(ent, 0, sizeof(ZEnt));
return 0;
}
// initialize lookup data structure for Zip archive <file>
static int lookup_init(LookupInfo* const li, const void* const file, const size_t size)
{
// all other fields are initialized in lookup_add_file_cb
li->next_file = 0;
li->idx = new LookupIdx;
return zip_enum_files(file, size, lookup_add_file_cb, (uintptr_t)li);
}
// free lookup data structure. no use-after-free checking.
static int lookup_free(LookupInfo* const li)
{
for(i32 i = 0; i < li->num_files; i++)
{
free((void*)li->ents[i].fn);
li->ents[i].fn = 0;
}
li->num_files = 0;
delete li->idx;
return mem_free(li->ents);
}
// return key of file <fn> for use in lookup_get_file_info.
static int lookup_file(LookupInfo* const li, const char* const fn, i32& idx)
{
const FnHash fn_hash = fnv_hash(fn);
const FnHash* fn_hashes = li->fn_hashes;
const i32 num_files = li->num_files;
i32 i = li->next_file;
// .. next_file marker is at the end of the array, or
// its entry isn't what we're looking for: consult index
if(i >= num_files || fn_hashes[i] != fn_hash)
{
LookupIdxIt it = li->idx->find(fn_hash);
// not found
if(it == li->idx->end())
return ERR_FILE_NOT_FOUND;
i = it->second;
}
li->next_file = i+1;
idx = i;
return 0;
}
// return file information, given file key (from lookup_file).
static int lookup_get_file_info(LookupInfo* const li, const i32 idx, const char*& fn, ZFileLoc* const loc)
{
if(idx < 0 || idx >= li->num_files-1)
{
assert(0 && "lookup_get_file_info: index out of bounds");
return -1;
}
const ZEnt* const ent = &li->ents[idx];
fn = ent->fn;
*loc = ent->loc;
return 0;
}
typedef ZipFileCB LookupFileCB;
static int lookup_enum_files(LookupInfo* const li, LookupFileCB cb, uintptr_t user)
{
int err;
const ZEnt* ent = li->ents;
for(i32 i = 0; i < li->num_files; i++, ent++)
{
err = cb(ent->fn, LOC_ZIP, (ssize_t)ent->loc.ucsize, user);
if(err < 0)
return 0;
}
return 0;
}
///////////////////////////////////////////////////////////////////////////////
//
// container with handle for archive info
// owns archive file and its lookup mechanism.
//
///////////////////////////////////////////////////////////////////////////////
struct ZArchive
{
File f;
LookupInfo li;
// hack: on first open, file is invalid (fn_hash isn't set),
// and file validate in file_close fails.
// workaround: only close if open.
bool is_open;
};
H_TYPE_DEFINE(ZArchive)
static void ZArchive_init(ZArchive* za, va_list args)
{
}
static void ZArchive_dtor(ZArchive* za)
{
if(za->is_open)
{
file_close(&za->f);
lookup_free(&za->li);
za->is_open = false;
}
}
static int ZArchive_reload(ZArchive* za, const char* fn)
{
int err;
err = file_open(fn, 0, &za->f);
if(err < 0)
return err;
void* file;
size_t size;
err = file_map(&za->f, file, size);
if(err < 0)
goto exit_close;
err = lookup_init(&za->li, file, size);
if(err < 0)
goto exit_unmap_close;
za->is_open = true;
return 0;
exit_unmap_close:
file_unmap(&za->f);
exit_close:
file_close(&za->f);
return err;
}
// open and return a handle to the zip archive indicated by <fn>
Handle zip_archive_open(const char* const fn)
{
return h_alloc(H_ZArchive, fn);
}
// close the archive <ha> and set ha to 0
int zip_archive_close(Handle& ha)
{
return h_free(ha, H_ZArchive);
}
// would be nice to pass along a key (allowing for O(1) lookup in archive),
// but then the callback is no longer compatible to file / vfs enum files.
int zip_enum_files(const Handle ha, const ZipFileCB cb, const uintptr_t user)
{
H_DEREF(ha, ZArchive, za);
return lookup_enum_files(&za->li, cb, user);
}
///////////////////////////////////////////////////////////////////////////////
//
// in-memory inflate routines (zlib wrapper)
//
///////////////////////////////////////////////////////////////////////////////
uintptr_t inf_init_ctx()
{
// allocate ZLib stream
z_stream* stream = (z_stream*)mem_alloc(round_up(sizeof(z_stream), 32), 32, MEM_ZERO);
@ -51,7 +584,7 @@ uintptr_t zip_init_ctx()
}
int zip_start_read(uintptr_t ctx, void* out, size_t out_size)
int inf_start_read(uintptr_t ctx, void* out, size_t out_size)
{
if(!ctx)
return ERR_INVALID_PARAM;
@ -62,13 +595,13 @@ int zip_start_read(uintptr_t ctx, void* out, size_t out_size)
assert(0 && "zip_start_read: ctx already in use!");
return -1;
}
stream->next_out = (Bytef*)out;
stream->next_out = (Byte*)out;
stream->avail_out = (uInt)out_size;
return 0;
}
ssize_t zip_inflate(uintptr_t ctx, void* in, size_t in_size)
ssize_t inf_inflate(uintptr_t ctx, void* in, size_t in_size)
{
if(!ctx)
return ERR_INVALID_PARAM;
@ -76,6 +609,9 @@ ssize_t zip_inflate(uintptr_t ctx, void* in, size_t in_size)
size_t prev_avail_out = stream->avail_out;
stream->avail_in = (uInt)in_size;
stream->next_in = (Byte*)in;
int err = inflate(stream, Z_SYNC_FLUSH);
// check+return how much actual data was read
@ -92,7 +628,7 @@ ssize_t zip_inflate(uintptr_t ctx, void* in, size_t in_size)
}
int zip_finish_read(uintptr_t ctx)
int inf_finish_read(uintptr_t ctx)
{
if(!ctx)
return ERR_INVALID_PARAM;
@ -112,7 +648,7 @@ int zip_finish_read(uintptr_t ctx)
}
int zip_free_ctx(uintptr_t ctx)
int inf_free_ctx(uintptr_t ctx)
{
if(!ctx)
return ERR_INVALID_PARAM;
@ -126,212 +662,12 @@ int zip_free_ctx(uintptr_t ctx)
}
//
// Zip archive
//
static const char ecdr_id[] = "PK\5\6"; // End of Central Directory Header identifier
static const char cdfh_id[] = "PK\1\2"; // Central File Header identifier
static const char lfh_id[] = "PK\3\4"; // Local File Header identifier
struct ZEnt
{
size_t ofs;
size_t csize; // 0 if not compressed
size_t ucsize;
// why csize?
// file I/O may be N-buffered, so it's good to know when the raw data
// stops (or else we potentially overshoot by N-1 blocks),
// but not critical, since Zip files are stored individually.
//
// we also need a way to check if file compressed (e.g. to fail mmap
// requests if the file is compressed). packing a bit in ofs or
// ucsize is error prone and ugly (1 bit less won't hurt though).
// any other way will mess up the nice 8 byte size anyway, so might
// as well store csize.
//
// don't worry too much about non-power-of-two size, will probably
// change to global FS tree instead of linear lookup later anyway.
};
struct ZArchive
{
File f;
// file lookup
u16 num_files;
u16 last_file; // index of last file we found (speed up lookups of sequential files)
u32* fn_hashs; // split for more efficient search
ZEnt* ents;
};
H_TYPE_DEFINE(ZArchive)
static void ZArchive_init(ZArchive* za, va_list args)
{
}
static void ZArchive_dtor(ZArchive* za)
{
file_close(&za->f);
mem_free(za->fn_hashs); // both fn_hashs[] and files[]
}
static int ZArchive_reload(ZArchive* za, const char* fn)
{
const u8* ecdr; // declare here to avoid goto scope problems
int err = file_open(fn, 0, &za->f);
if(err < 0)
return err;
void* p;
size_t size;
err = file_map(&za->f, p, size);
if(err < 0)
return err;
{
// find end of central dir record
// by scanning last 66000 bytes of file for ecdr_id magic
// (zip comment <= 65535 bytes, sizeof(ECDR) = 22, add some for safety)
// if the zip file is < 66000 bytes, scan the whole file
size_t bytes_left = 66000; // min(66k, size) - avoid stupid warning
if(bytes_left > size)
bytes_left = size;
ecdr = (const u8*)p + size - 22;
if(*(u32*)ecdr == *(u32*)&ecdr_id)
goto found_ecdr;
ecdr = (const u8*)p + size - bytes_left;
while(bytes_left-3 > 0)
{
if(*(u32*)ecdr == *(u32*)&ecdr_id)
goto found_ecdr;
// check next 4 bytes (non aligned!!)
ecdr++;
bytes_left--;
}
// reached EOF and still haven't found the ECDR identifier
}
fail:
file_unmap(&za->f);
file_close(&za->f);
return -1;
found_ecdr:
{
// read ECDR
const u16 num_files = read_le16(ecdr+10);
const u32 cd_ofs = read_le32(ecdr+16);
// memory for fn_hash and Ent arrays
void* file_list_mem = mem_alloc(num_files * (sizeof(u32) + sizeof(ZEnt)), 4*KB);
if(!file_list_mem)
goto fail;
u32* fn_hashs = (u32*)file_list_mem;
ZEnt* ents = (ZEnt*)((u8*)file_list_mem + num_files*sizeof(u32));
// cache file list for faster lookups
// currently linear search, comparing filename hash.
// TODO: if too slow, use hash table.
const u8* cdfh = (const u8*)p+cd_ofs;
u32* hs = fn_hashs;
ZEnt* ent = ents;
u16 i;
for(i = 0; i < num_files; i++)
{
// read CDFH
if(*(u32*)cdfh != *(u32*)cdfh_id)
continue;
const u32 csize = read_le32(cdfh+20);
const u32 ucsize = read_le32(cdfh+24);
const u16 fn_len = read_le16(cdfh+28);
const u16 e_len = read_le16(cdfh+30);
const u16 c_len = read_le16(cdfh+32);
const u32 lfh_ofs = read_le32(cdfh+42);
const u8 method = cdfh[10];
if(method & ~8) // neither deflated nor stored
continue;
// read LFH
const u8* const lfh = (const u8*)p + lfh_ofs;
if(*(u32*)lfh != *(u32*)lfh_id)
continue;
const u16 lfh_fn_len = read_le16(lfh+26);
const u16 lfh_e_len = read_le16(lfh+28);
const char* lfh_fn = (const char*)lfh+30;
*hs++ = fnv_hash(lfh_fn);
ent->ofs = lfh_ofs + 30 + lfh_fn_len + lfh_e_len;
ent->csize = csize;
ent->ucsize = ucsize;
ent++;
(uintptr_t&)cdfh += 46 + fn_len + e_len + c_len;
}
za->num_files = i;
za->last_file = 0;
za->fn_hashs = fn_hashs;
za->ents = ents;
} // scope
return 0;
}
// open and return a handle to the zip archive indicated by <fn>
Handle zip_archive_open(const char* const fn)
{
return h_alloc(H_ZArchive, fn);
}
// close the archive <ha> and set ha to 0
int zip_archive_close(Handle& ha)
{
return h_free(ha, H_ZArchive);
}
///////////////////////////////////////////////////////////////////////////////
//
// file from Zip archive
// on top of inflate and lookup
//
static int lookup(Handle ha, const char* fn, const ZEnt*& ent)
{
H_DEREF(ha, ZArchive, za);
// find its File descriptor
const u32 fn_hash = fnv_hash(fn);
u16 i = za->last_file+1;
if(i >= za->num_files || za->fn_hashs[i] != fn_hash)
{
for(i = 0; i < za->num_files; i++)
if(za->fn_hashs[i] == fn_hash)
break;
if(i == za->num_files)
return ERR_FILE_NOT_FOUND;
za->last_file = i;
}
ent = &za->ents[i];
return 0;
}
///////////////////////////////////////////////////////////////////////////////
// marker for ZFile struct, to make sure it's valid
@ -381,30 +717,37 @@ do\
while(0);
int zip_open(const Handle ha, const char* fn, ZFile* zf)
int zip_open_idx(const Handle ha, const i32 idx, ZFile* zf)
{
memset(zf, 0, sizeof(ZFile));
if(!zf)
goto invalid_zf;
// jump to CHECK_ZFILE post-check, which will handle this.
// jump to CHECK_ZFILE post-check, which will handle this.
{
const ZEnt* ze;
int err = lookup(ha, fn, ze);
H_DEREF(ha, ZArchive, za);
LookupInfo* li = (LookupInfo*)&za->li;
const char* fn;
ZFileLoc loc;
// don't want ZFile to contain a ZFileLoc struct -
// its ucsize member must be 'loose' for compatibility with File.
// => need to copy ZFileLoc fields into ZFile.
int err = lookup_get_file_info(li, idx, fn, &loc);
if(err < 0)
return err;
#ifdef PARANOIA
zf->magic = ZFILE_MAGIC;
zf->magic = ZFILE_MAGIC;
#endif
zf->ofs = ze->ofs;
zf->csize = ze->csize;
zf->ucsize = ze->ucsize;
zf->ucsize = loc.ucsize;
zf->ofs = loc.ofs;
zf->csize = loc.csize;
zf->ha = ha;
zf->read_ctx = zip_init_ctx();
zf->read_ctx = inf_init_ctx();
}
invalid_zf:
@ -414,28 +757,51 @@ invalid_zf:
}
int zip_open(const Handle ha, const char* fn, ZFile* zf)
{
H_DEREF(ha, ZArchive, za);
LookupInfo* li = (LookupInfo*)&za->li;
i32 idx;
int err = lookup_file(li, fn, idx);
if(err < 0)
return err;
return zip_open_idx(ha, idx, zf);
}
int zip_close(ZFile* zf)
{
CHECK_ZFILE(zf)
// remaining fields don't need to be freed/cleared
return zip_free_ctx(zf->read_ctx);
// remaining ZFile fields don't need to be freed/cleared
return inf_free_ctx(zf->read_ctx);
}
// return file information for <fn> in archive <ha>
int zip_stat(Handle ha, const char* fn, struct stat* s)
{
const ZEnt* ze;
int err = lookup(ha, fn, ze);
H_DEREF(ha, ZArchive, za);
LookupInfo* li = &za->li;
i32 idx;
int err = lookup_file(li, fn, idx);
if(err < 0)
return err;
s->st_size = (off_t)ze->ucsize;
const char* fn2; // unused
ZFileLoc loc;
lookup_get_file_info(li, idx, fn2, &loc);
// can't fail - returned valid index above
s->st_size = (off_t)loc.ucsize;
return 0;
}
// convenience function, allows implementation change in ZFile.
// note that size == ucsize isn't foolproof, and adding a flag to
// ofs or size is ugly and error-prone.
@ -454,19 +820,11 @@ ssize_t zip_read(ZFile* zf, size_t raw_ofs, size_t size, void*& p)
ssize_t err = -1;
ssize_t raw_bytes_read;
ZArchive* za = H_USER_DATA(zf->ha, ZArchive);
if(!za)
return ERR_INVALID_HANDLE;
void* our_buf = 0; // buffer we allocated (if necessary)
if(!p)
{
p = our_buf = mem_alloc(size);
if(!p)
return ERR_NO_MEM;
}
const size_t ofs = zf->ofs + raw_ofs;
// not compressed - just pass it on to file_io
@ -484,10 +842,18 @@ ssize_t zip_read(ZFile* zf, size_t raw_ofs, size_t size, void*& p)
if(raw_ofs != zf->last_raw_ofs)
{
assert(0 && "zip_read: compressed read offset is non-continuous");
goto fail;
return -1;
}
err = (ssize_t)zip_start_read(zf->read_ctx, p, size);
void* our_buf = 0; // buffer we allocated (if necessary)
if(!p)
{
p = our_buf = mem_alloc(size);
if(!p)
return ERR_NO_MEM;
}
err = (ssize_t)inf_start_read(zf->read_ctx, p, size);
if(err < 0)
{
fail:
@ -504,9 +870,9 @@ fail:
// zip_inflate, until all compressed data has been read, or it indicates
// the desired output amount has been reached.
const size_t raw_size = zf->csize;
raw_bytes_read = file_io(&za->f, ofs, raw_size, (void**)0, zip_inflate, zf->read_ctx);
raw_bytes_read = file_io(&za->f, ofs, raw_size, (void**)0, inf_inflate, zf->read_ctx);
err = zip_finish_read(zf->read_ctx);
err = inf_finish_read(zf->read_ctx);
if(err < 0)
goto fail;

View File

@ -25,18 +25,18 @@
//
// low-level in-memory inflate routines
// in-memory inflate routines (zlib wrapper)
//
extern uintptr_t zip_init_ctx();
extern uintptr_t inf_init_ctx();
extern int zip_start_read(uintptr_t ctx, void* out, size_t out_size);
extern int inf_start_read(uintptr_t ctx, void* out, size_t out_size);
extern ssize_t zip_inflate(uintptr_t ctx, void* in, size_t in_size);
extern ssize_t inf_inflate(uintptr_t ctx, void* in, size_t in_size);
extern int zip_finish_read(uintptr_t ctx);
extern int inf_finish_read(uintptr_t ctx);
extern int zip_free_ctx(uintptr_t ctx);
extern int inf_free_ctx(uintptr_t ctx);
//
@ -50,6 +50,15 @@ extern Handle zip_archive_open(const char* fn);
extern int zip_archive_close(Handle& ha);
// keep in sync with file.cpp and vfs.cpp *_CB_FLAGS
enum ZIP_CB_FLAGS
{
LOC_ZIP = BIT(1)
};
typedef int(*ZipFileCB)(const char* const fn, const uint flags, const ssize_t size, const uintptr_t user);
extern int zip_enum_files(const Handle ha, const ZipFileCB cb, const uintptr_t user);
//
// file
//

View File

@ -1,6 +1,6 @@
#if defined(_WIN32) && !defined(NO_WSDL)
#include "sysdep/win/wsdl.h"
# include "sysdep/win/wsdl.h"
#else
#include <SDL/SDL.h>
#include <SDL/SDL_thread.h>
#endif
# include <SDL/SDL.h>
# include <SDL/SDL_thread.h>
#endif

View File

@ -293,7 +293,7 @@ again:
}
std::sort(samples.begin(), samples.end());
double median = samples[num_samples/2];
// double median = samples[num_samples/2];
// median filter (remove upper and lower 25% and average the rest)
double sum = 0.0;

View File

@ -10,18 +10,26 @@
#ifndef _WIN32
// portable output routines (win.cpp overrides these)
void display_msg(const wchar_t* caption, const wchar_t* msg)
void display_msg(const char* caption, const char* msg)
{
fprintf(stderr, "%s: %s\n", caption, msg);
}
void wdisplay_msg(const wchar_t* caption, const wchar_t* msg)
{
fwprintf(stderr, L"%ws: %ws\n", caption, msg);
}
void debug_out(const char* fmt, ...)
{
va_list args;
va_start(args, fmt);
vprintf(fmt, args);
va_end(args);
fflush(stdout);
}

View File

@ -9,7 +9,8 @@
extern "C" {
#endif
extern void display_msg(const wchar_t* caption, const wchar_t* msg);
extern void display_msg(const char* caption, const char* msg);
extern void wdisplay_msg(const wchar_t* caption, const wchar_t* msg);
extern void debug_out(const char* fmt, ...);
extern void check_heap();

View File

@ -28,21 +28,24 @@
#include "detect.h"
#include "win_internal.h"
#include <mmsystem.h> // not included by win due to WIN32_LEAN_AND_MEAN
#include <mmsystem.h>
// not included by win_internal due to its WIN32_LEAN_AND_MEAN define
#ifdef _MSC_VER
#pragma comment(lib, "winmm.lib")
#endif
// ticks per second; average of last few values measured in calibrate
// ticks per second; average of last few values measured in calibrate()
static double hrt_freq = -1.0;
// used to start the hrt tick values near 0
// used to rebase the hrt tick values to 0
static i64 hrt_origin = 0;
static HRTImpl hrt_impl = HRT_NONE;
static HRTOverride overrides[3] = { HRT_DEFAULT, HRT_DEFAULT, HRT_DEFAULT };
static HRTOverride overrides[HRT_NUM_IMPLS];
// HRTImpl enums as index
// HACK: no init needed - static data is zeroed (= HRT_DEFAULT)
cassert(HRT_DEFAULT == 0);
static i64 hrt_nominal_freq = -1;
@ -160,10 +163,14 @@ static void choose_impl()
//
// TGT
//
hrt_impl = HRT_TGT;
hrt_nominal_freq = 1000;
return;
if(1)
{
hrt_impl = HRT_TGT;
hrt_nominal_freq = 1000;
return;
}
// no warning here - doesn't inspire confidence in VC dead code removal.
assert(0 && "hrt_choose_impl: no safe timer found!");
hrt_impl = HRT_NONE;
hrt_nominal_freq = -1;
@ -174,6 +181,7 @@ static void choose_impl()
// return ticks since first call. lock must be held.
//
// split to allow calling from reset_impl_lk without recursive locking.
// (not a problem, but avoids a BoundsChecker warning)
static i64 ticks_lk()
{
i64 t;
@ -231,8 +239,8 @@ static i64 ticks_lk()
// choose a HRT implementation. lock must be held.
//
// don't want to saddle timer with the problem of initializing us
// on first call - it wouldn't otherwise need to be thread-safe.
// don't want to saddle timer module with the problem of initializing
// us on first call - it wouldn't otherwise need to be thread-safe.
static void reset_impl_lk()
{
HRTImpl old_impl = hrt_impl;
@ -414,14 +422,13 @@ lock();
hrt_cal_time = hrt_cur;
ms_cal_time = ms_cur;
//
// when we wake up, we don't know if timer has been updated yet.
// they may be off by 1 tick - try to compensate.
//
// we're called from a WinMM event, so the timer has just been updated.
// no need to determine tick / compensate.
double dt = ms_ds; // actual elapsed time since last calibration
double hrt_err = ms_ds - hrt_ds;
double hrt_abs_err = fabs(hrt_err), hrt_rel_err = hrt_abs_err / ms_ds;
// double dt = ms_ds; // actual elapsed time since last calibration
// double hrt_err = ms_ds - hrt_ds;
// double hrt_abs_err = fabs(hrt_err);
// double hrt_rel_err = hrt_abs_err / ms_ds;
double hrt_est_freq = hrt_ds / ms_ds;
// only add to buffer if within 10% of nominal
@ -457,6 +464,12 @@ static UINT mm_event;
// keep calibrate() portable, don't need args anyway
static void CALLBACK trampoline(UINT uTimerID, UINT uMsg, DWORD_PTR dwUser, DWORD_PTR dw1, DWORD_PTR dw2)
{
UNUSED(uTimerID);
UNUSED(uMsg);
UNUSED(dwUser);
UNUSED(dw1);
UNUSED(dw2);
calibrate();
}
@ -475,11 +488,11 @@ static void init_calibration_thread()
GetSystemTimeAdjustment(&adj, &incr, &adj_disabled);
DWORD res = adj / 10000;
mm_event = timeSetEvent(1000, res, trampoline, 0, TIME_PERIODIC);
atexit2(timeKillEvent, mm_event);
atexit2(timeKillEvent, mm_event, CC_STDCALL_1);
#else
// TODO: port thread. no big deal, and the timer works without.
// TODO: port thread. it's no big deal, but the timer should work without.
#endif
}

View File

@ -46,7 +46,9 @@ enum HRTOverride
{
// allow use of this implementation if available,
// and we can work around its problems
HRT_DEFAULT,
//
// HACK: give it value 0 for easier static data initialization
HRT_DEFAULT = 0,
// override our 'safe to use' recommendation
// set by hrt_override_impl (via command line arg or console function)

View File

@ -40,41 +40,33 @@
//
// note: current Windows lowio handle limit is 2k
class AioHandles
{
public:
AioHandles() : hs(0), size(0) {}
~AioHandles();
static HANDLE* aio_hs;
// array; expanded when needed in aio_h_set
HANDLE get(int fd);
int set(int fd, HANDLE h);
private:
HANDLE* hs;
uint size;
};
static int aio_hs_size;
AioHandles::~AioHandles()
static void aio_h_cleanup()
{
win_lock(WAIO_CS);
for(int i = 0; (unsigned)i < size; i++)
if(hs[i] != INVALID_HANDLE_VALUE)
for(int i = 0; i < aio_hs_size; i++)
if(aio_hs[i] != INVALID_HANDLE_VALUE)
{
CloseHandle(hs[i]);
hs[i] = INVALID_HANDLE_VALUE;
CloseHandle(aio_hs[i]);
aio_hs[i] = INVALID_HANDLE_VALUE;
}
free(hs);
hs = 0;
size = 0;
free(aio_hs);
aio_hs = 0;
aio_hs_size = 0;
win_unlock(WAIO_CS);
}
bool is_valid_file_handle(HANDLE h)
static bool is_valid_file_handle(HANDLE h)
{
SetLastError(0);
bool valid = (GetFileSize(h, 0) != INVALID_FILE_SIZE);
@ -82,20 +74,24 @@ bool is_valid_file_handle(HANDLE h)
return valid;
}
// get async capable handle to file <fd>
HANDLE AioHandles::get(int fd)
HANDLE aio_h_get(int fd)
{
HANDLE h = INVALID_HANDLE_VALUE;
win_lock(WAIO_CS);
if((unsigned)fd < size)
h = hs[fd];
if(0 <= fd && fd < aio_hs_size)
h = aio_hs[fd];
else
{
assert(0);
h = INVALID_HANDLE_VALUE;
}
if(!is_valid_file_handle(h))
return INVALID_HANDLE_VALUE;
h = INVALID_HANDLE_VALUE;
win_unlock(WAIO_CS);
@ -103,29 +99,34 @@ HANDLE AioHandles::get(int fd)
}
int AioHandles::set(int fd, HANDLE h)
int aio_h_set(int fd, HANDLE h)
{
win_lock(WAIO_CS);
WIN_ONCE(atexit2(aio_h_cleanup))
if(fd < 0)
goto fail;
// grow hs array to at least fd+1 entries
if((unsigned)fd >= size)
if(fd >= aio_hs_size)
{
uint size2 = (uint)round_up(fd+8, 8);
HANDLE* hs2 = (HANDLE*)realloc(hs, size2*sizeof(HANDLE));
HANDLE* hs2 = (HANDLE*)realloc(aio_hs, size2*sizeof(HANDLE));
if(!hs2)
goto fail;
for(uint i = size; i < size2; i++)
for(uint i = aio_hs_size; i < size2; i++)
hs2[i] = INVALID_HANDLE_VALUE;
hs = hs2;
size = size2;
aio_hs = hs2;
aio_hs_size = size2;
}
if(h == INVALID_HANDLE_VALUE)
;
else
{
if(hs[fd] != INVALID_HANDLE_VALUE)
if(aio_hs[fd] != INVALID_HANDLE_VALUE)
{
assert("AioHandles::set: handle already set!");
goto fail;
@ -137,14 +138,14 @@ int AioHandles::set(int fd, HANDLE h)
}
}
hs[fd] = h;
aio_hs[fd] = h;
win_unlock(WAIO_CS);
return 0;
fail:
win_unlock(WAIO_CS);
assert(0 && "AioHandles::set failed");
assert(0 && "aio_h_set failed");
return -1;
}
@ -171,37 +172,20 @@ struct Req
size_t buf_size;
};
class Reqs
{
public:
Reqs();
~Reqs();
Req* find(const aiocb* cb);
Req* alloc(const aiocb* cb);
private:
enum { MAX_REQS = 8 };
Req reqs[MAX_REQS];
};
// TODO: explain links between Req and cb
Reqs::Reqs()
{
for(int i = 0; i < MAX_REQS; i++)
{
memset(&reqs[i], 0, sizeof(Req));
reqs[i].ovl.hEvent = CreateEvent(0,1,0,0); // manual reset
}
}
const int MAX_REQS = 64;
static Req reqs[MAX_REQS];
Reqs::~Reqs()
void req_cleanup(void)
{
Req* r = reqs;
for(int i = 0; i < MAX_REQS; i++, r++)
{
r->cb = 0;
HANDLE& h = r->ovl.hEvent;
if(h != INVALID_HANDLE_VALUE)
{
@ -209,44 +193,62 @@ Reqs::~Reqs()
h = INVALID_HANDLE_VALUE;
}
free(r->buf);
::free(r->buf);
r->buf = 0;
}
}
// find request slot currently in use by cb
// cb = 0 => search for empty slot
Req* Reqs::find(const aiocb* cb)
void req_init()
{
atexit(req_cleanup);
for(int i = 0; i < MAX_REQS; i++)
reqs[i].ovl.hEvent = CreateEvent(0,1,0,0); // manual reset
}
Req* req_alloc(aiocb* cb)
{
ONCE(req_init());
Req* r = reqs;
for(int i = 0; i < MAX_REQS; i++, r++)
if(r->cb == cb)
return r;
if(r->cb == 0)
{
r->cb = cb;
cb->req_ = r;
debug_out("req_alloc cb=%p r=%p\n", cb, r);
return r;
}
assert(0 && "Reqs::find failed");
return 0;
}
Req* Reqs::alloc(const aiocb* cb)
Req* req_find(const aiocb* cb)
{
win_lock(WAIO_CS);
debug_out("req_find cb=%p r=%p\n", cb, cb->req_);
// find free request slot
Req* r = find(0);
if(!r)
return (Req*)cb->req_;
}
int req_free(Req* r)
{
debug_out("req_free cb=%p r=%p\n", r->cb, r);
if(r->cb == 0)
{
win_unlock(WAIO_CS);
assert(0 && "Reqs::alloc: no request slot available!");
return 0;
assert(0);
return -1;
}
r->cb = (aiocb*)cb;
win_unlock(WAIO_CS);
return r;
r->cb->req_ = 0;
r->cb = 0;
return 0;
}
@ -256,30 +258,15 @@ Req* Reqs::alloc(const aiocb* cb)
//
//////////////////////////////////////////////////////////////////////////////
static AioHandles* aio_hs;
static Reqs* reqs;
// Win32 functions require sector aligned transfers.
// max of all drives' size is checked in init().
static size_t sector_size = 4096; // minimum: one page
static void cleanup(void)
{
delete aio_hs;
delete reqs;
}
// caller ensures this is not re-entered!
static void init()
{
reqs = new Reqs;
aio_hs = new AioHandles;
atexit(cleanup);
// Win32 requires transfers to be sector aligned.
// find maximum of all drive's sector sizes, then use that.
// (it's good to know this up-front, and checking every open() is slow).
@ -291,7 +278,7 @@ static void init()
if(!(drives & (1ul << drive)))
continue;
drive_str[0] = 'A'+drive;
drive_str[0] = (char)('A'+drive);
DWORD spc, nfc, tnc; // don't need these
DWORD sector_size2;
@ -331,7 +318,7 @@ int aio_assign_handle(uintptr_t handle)
return fd;
}
return aio_hs->set(fd, (HANDLE)handle);
return aio_h_set(fd, (HANDLE)handle);
}
@ -364,20 +351,21 @@ WIN_ONCE(init()); // TODO: need to do this elsewhere in case other routines call
return -1;
}
if(aio_hs->set(fd, h) < 0)
if(aio_h_set(fd, h) < 0)
{
assert(0 && "aio_open failed");
CloseHandle(h);
return -1;
}
debug_out("aio_open fn=%s fd=%d\n", fn, fd);
return 0;
}
int aio_close(int fd)
{
HANDLE h = aio_hs->get(fd);
HANDLE h = aio_h_get(fd);
if(h == INVALID_HANDLE_VALUE) // out of bounds or already closed
{
assert(0 && "aio_close failed");
@ -387,7 +375,9 @@ int aio_close(int fd)
SetLastError(0);
if(!CloseHandle(h))
assert(0);
aio_hs->set(fd, INVALID_HANDLE_VALUE);
aio_h_set(fd, INVALID_HANDLE_VALUE);
debug_out("aio_close fd=%d\n", fd);
return 0;
}
@ -401,6 +391,8 @@ int aio_close(int fd)
// cb->aio_offset must be 0.
static int aio_rw(struct aiocb* cb)
{
debug_out("aio_rw cb=%p\n", cb);
if(!cb)
{
assert(0);
@ -412,17 +404,24 @@ static int aio_rw(struct aiocb* cb)
return 0;
}
HANDLE h = aio_hs->get(cb->aio_fildes);
HANDLE h = aio_h_get(cb->aio_fildes);
if(h == INVALID_HANDLE_VALUE)
{
assert(0 && "aio_rw: associated handle is invalid");
return -EINVAL;
}
Req* r = reqs->alloc(cb);
if(cb->req_)
{
// SUSv3 says this has undefined results; we fail the attempt.
assert(0 && "aio_rw: aiocb is already in use");
return -1;
}
Req* r = req_alloc(cb);
if(!r)
{
assert(0);
assert(0 && "aio_rw: cannot allocate a Req (too many concurrent IOs)");
return -1;
}
@ -472,22 +471,17 @@ static int aio_rw(struct aiocb* cb)
r->ovl.Internal = r->ovl.InternalHigh = 0;
//#if _MSC_VER >= 1300
// r->ovl.Pointer = (void*)ofs;
//#else
// r->ovl.Offset = ofs;
//#endif
// a bit tricky: this should work even if size_t grows to 64 bits.
//
// we don't use OVERLAPPED.Pointer because it's not defined in
// previous platform sdk versions, and i can't figure out how
// determine the sdk version installed. can't just check for the
// vc6/vc7 compiler - vc6 with the old sdk may have been upgraded
// to the vc7.1 compiler.
//
// this assumes little endian, but we're windows-specific here anyway.
*(size_t*)&r->ovl.Offset = ofs;
// a bit tricky: this should work even if size_t grows to 64 bits.
//
// we don't use OVERLAPPED.Pointer because it's not defined in
// previous platform sdk versions, and i can't figure out how
// determine the sdk version installed. can't just check for the
// vc6/vc7 compiler - vc6 with the old sdk may have been upgraded
// to the vc7.1 compiler.
//
// this assumes little endian, but we're windows-specific here anyway.
*(size_t*)&r->ovl.Offset = ofs;
assert(cb->aio_buf != 0);
@ -498,9 +492,14 @@ ResetEvent(r->ovl.hEvent);
BOOL ok = (cb->aio_lio_opcode == LIO_READ)?
ReadFile(h, buf, size32, 0, &r->ovl) : WriteFile(h, buf, size32, 0, &r->ovl);
if(ok || GetLastError() == ERROR_IO_PENDING)
return 0;
return -1;
if(GetLastError() == ERROR_IO_PENDING)
{
// clear annoying error
SetLastError(0);
ok = true;
}
return ok? 0 : -1;
}
@ -544,7 +543,8 @@ int lio_listio(int mode, struct aiocb* const cbs[], int n, struct sigevent* se)
// return status of transfer
int aio_error(const struct aiocb* cb)
{
Req* const r = reqs->find(cb);
debug_out("aio_error cb=%p\n", cb);
Req* const r = req_find(cb);
if(!r)
return -1;
@ -565,7 +565,8 @@ int aio_error(const struct aiocb* cb)
// get bytes transferred. call exactly once for each op.
ssize_t aio_return(struct aiocb* cb)
{
Req* const r = reqs->find(cb);
debug_out("aio_return cb=%p\n", cb);
Req* const r = req_find(cb);
if(!r)
return -1;
@ -576,8 +577,7 @@ ssize_t aio_return(struct aiocb* cb)
if(r->pad || _buf % sector_size)
memcpy(cb->aio_buf, (u8*)r->buf + r->pad, cb->aio_nbytes);
// free this request slot
r->cb = 0;
req_free(r);
return (ssize_t)cb->aio_nbytes;
}
@ -587,7 +587,7 @@ int aio_cancel(int fd, struct aiocb* cb)
{
UNUSED(cb)
const HANDLE h = aio_hs->get(fd);
const HANDLE h = aio_h_get(fd);
if(h == INVALID_HANDLE_VALUE)
return -1;
@ -608,6 +608,8 @@ int aio_suspend(const struct aiocb* const cbs[], int n, const struct timespec* t
{
int i;
debug_out("aio_suspend cb=%p\n", cbs[0]);
if(n <= 0 || n > MAXIMUM_WAIT_OBJECTS)
return -1;
@ -620,7 +622,7 @@ int aio_suspend(const struct aiocb* const cbs[], int n, const struct timespec* t
if(!cbs[i])
continue;
Req* r = reqs->find(cbs[i]);
Req* r = req_find(cbs[i]);
if(r)
{
if(r->ovl.Internal == STATUS_PENDING)

View File

@ -26,13 +26,16 @@
struct aiocb
{
int aio_fildes; // File descriptor.
off_t aio_offset; // File offset.
void* aio_buf; // Location of buffer.
size_t aio_nbytes; // Length of transfer.
int aio_reqprio; // Request priority offset.
struct sigevent aio_sigevent; // Signal number and value.
int aio_fildes; // File descriptor.
off_t aio_offset; // File offset.
void* aio_buf; // Location of buffer.
size_t aio_nbytes; // Length of transfer.
int aio_reqprio; // Request priority offset.
struct sigevent aio_sigevent; // Signal number and value.
int aio_lio_opcode; // Operation to be performed.
// internal
void* req_; // != 0 <==> cb in use
};
enum

View File

@ -20,6 +20,7 @@
#include <stdlib.h>
#include "detect.h"
#include "lib.h"
#include "win_internal.h"
@ -30,42 +31,166 @@
#endif
// EnumDisplayDevices (used in get_monitor_size and win_get_gfx_card)
// is not available on Win95 or NT. try to import it manually here.
// note: FreeLibrary at exit avoids BoundsChecker resource leak warnings.
static BOOL (WINAPI *pEnumDisplayDevicesA)(void*, DWORD, void*, DWORD);
static int import_EnumDisplayDevices()
{
if(!pEnumDisplayDevicesA)
{
static HMODULE hUser32Dll = LoadLibrary("user32.dll");
*(void**)&pEnumDisplayDevicesA = GetProcAddress(hUser32Dll, "EnumDisplayDevicesA");
ONCE(atexit2(FreeLibrary, (uintptr_t)hUser32Dll, CC_STDCALL_1));
}
return pEnumDisplayDevicesA? 0 : -1;
}
// useful for choosing a video mode. not called by detect().
// if we fail, don't change the outputs (assumed initialized to defaults)
void get_cur_resolution(int& xres, int& yres)
// if we fail, outputs are unchanged (assumed initialized to defaults)
int get_cur_resolution(int& xres, int& yres)
{
DEVMODEA dm;
memset(&dm, 0, sizeof(dm));
dm.dmSize = sizeof(dm);
// dm.dmDriverExtra already set to 0 by memset
if(EnumDisplaySettingsA(0, ENUM_CURRENT_SETTINGS, &dm))
if(!EnumDisplaySettingsA(0, ENUM_CURRENT_SETTINGS, &dm))
return -1;
xres = dm.dmPelsWidth;
yres = dm.dmPelsHeight;
return 0;
}
// useful for determining aspect ratio. not called by detect().
// if we fail, outputs are unchanged (assumed initialized to defaults)
int get_monitor_size(int& width_cm, int& height_cm)
{
DISPLAY_DEVICE adapter = { sizeof(DISPLAY_DEVICE) };
DISPLAY_DEVICE monitor = { sizeof(DISPLAY_DEVICE) };
// need to be distinct (EnumDisplayDevices requirement)
LONG err;
char key_name[256];
DWORD key_name_len;
DWORD key_type;
bool found = false;
// make sure EnumDisplayDevices is available (as pEnumDisplayDevicesA)
CHECK_ERR(import_EnumDisplayDevices());
HKEY hkDisplay;
if(RegOpenKeyEx(HKEY_LOCAL_MACHINE, "SYSTEM\\CurrentControlSet\\Enum\\Display", 0, KEY_READ, &hkDisplay) != 0)
return -1;
// we only look at the first monitor of the first display adapter
// attached to the desktop, assumed to be the primary monitor.
// for each display adapter
for(int adapter_idx = 0; !found; adapter_idx++)
{
xres = dm.dmPelsWidth;
yres = dm.dmPelsHeight;
// get display adapter
if(!pEnumDisplayDevicesA(0, adapter_idx, &adapter, 0))
break;
if(!(adapter.StateFlags & DISPLAY_DEVICE_ATTACHED_TO_DESKTOP))
continue;
// get its associated monitor;
// will search for its DeviceID in the registry
if(!pEnumDisplayDevicesA(adapter.DeviceName, 0, &monitor, 0))
continue;
// for each class in registry
for(int class_idx = 0; !found; class_idx++)
{
// open next key
HKEY hkClass;
key_name_len = sizeof(key_name);
if(RegEnumKeyEx(hkDisplay, class_idx, key_name, &key_name_len, 0, 0, 0, 0) != 0)
break;
if(RegOpenKeyEx(hkDisplay, key_name, 0, KEY_READ, &hkClass) != 0)
break;
// for each device in registry
for(int dev_idx = 0; !found; dev_idx++)
{
// open next key
HKEY hkDev;
key_name_len = sizeof(key_name);
if(RegEnumKeyEx(hkClass, dev_idx, key_name, &key_name_len, 0, 0, 0, 0) != 0)
break;
if(RegOpenKeyEx(hkClass, key_name, 0, KEY_READ, &hkDev) != 0)
break;
// build dev_id: (%s\\%s, HardwareID, Driver)
// example: "Monitor\NEC6604\{4D36E96E-E325-11CE-BFC1-08002BE10318}\0001"
// will compare this against monitor.DeviceID
char dev_id[256];
DWORD dev_id_len = sizeof(dev_id);
err = RegQueryValueEx(hkDev, "HardwareID", 0, &key_type, (BYTE*)dev_id, &dev_id_len);
if(err != 0 || (key_type != REG_MULTI_SZ && key_type != REG_SZ))
goto skip_dev;
char* p = (char*)dev_id + strlen((const char*)dev_id);
*p++ = '\\';
dev_id_len = sizeof(dev_id) - dev_id_len;
err = RegQueryValueEx(hkDev, "Driver", 0, &key_type, (BYTE*)p, &dev_id_len);
if(err != 0 || (key_type != REG_MULTI_SZ && key_type != REG_SZ))
goto skip_dev;
// this (hkDev) is not the monitor you're looking for..
if(strcmp(monitor.DeviceID, (const char*)dev_id) != 0)
goto skip_dev;
HKEY hkDevParams;
if(RegOpenKeyEx(hkDev, "Device Parameters", 0, KEY_READ, &hkDevParams) != 0)
goto skip_dev;
// read EDID
BYTE edid[256];
DWORD edid_len = sizeof(edid);
if(RegQueryValueEx(hkDevParams, "EDID", 0, &key_type, edid, &edid_len) == 0)
{
width_cm = edid[21];
height_cm = edid[22];
found = true;
// break out of all loops; all keys will be closed
}
RegCloseKey(hkDevParams);
skip_dev:
RegCloseKey(hkDev);
}
RegCloseKey(hkClass);
}
}
RegCloseKey(hkDisplay);
return found? 0 : -1;
}
int win_get_gfx_card()
{
// EnumDisplayDevices is not available on Win95 or NT
HMODULE hUser32Dll = LoadLibrary("user32.dll");
int (WINAPI *pEnumDisplayDevicesA)(void*, DWORD, void*, DWORD);
*(void**)&pEnumDisplayDevicesA = GetProcAddress(hUser32Dll, "EnumDisplayDevicesA");
if(pEnumDisplayDevicesA)
{
DISPLAY_DEVICEA dev;
dev.cb = sizeof(dev);
if(pEnumDisplayDevicesA(0, 0, &dev, 0))
{
strcpy(gfx_card, (const char*)dev.DeviceString);
return 0;
}
}
FreeLibrary(hUser32Dll);
// make sure EnumDisplayDevices is available (as pEnumDisplayDevicesA)
if(import_EnumDisplayDevices() < 0)
return -1;
return -1;
DISPLAY_DEVICEA dev;
dev.cb = sizeof(dev);
if(!pEnumDisplayDevicesA(0, 0, &dev, 0))
return -1;
strncpy(gfx_card, (const char*)dev.DeviceString, sizeof(gfx_card)-1);
return 0;
}
@ -120,10 +245,26 @@ int win_get_gfx_drv()
int win_get_cpu_info()
{
// get number of CPUs (can't fail)
SYSTEM_INFO si;
GetSystemInfo(&si);
cpus = si.dwNumberOfProcessors;
// read CPU frequency from registry
HKEY hKey;
const char* key = "HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0";
if(RegOpenKeyEx(HKEY_LOCAL_MACHINE, key, 0, KEY_QUERY_VALUE, &hKey) == 0)
{
DWORD freq_mhz;
DWORD size = sizeof(freq_mhz);
if(RegQueryValueEx(hKey, "~MHz", 0, 0, (LPBYTE)&freq_mhz, &size) == 0)
cpu_freq = freq_mhz * 1e6;
RegCloseKey(hKey);
}
// determine whether system is a laptop.
// (if SpeedStep detect below fails, guess SpeedStep <==> laptop)
HW_PROFILE_INFO hi;
GetCurrentHwProfile(&hi);
bool is_laptop = !(hi.dwDockInfo & DOCKINFO_DOCKED) ^

View File

@ -23,6 +23,7 @@
#include <cassert>
#include <crtdbg.h> // malloc debug
#include <malloc.h>
//
@ -37,62 +38,60 @@ void check_heap()
}
void display_msg(const wchar_t* caption, const wchar_t* msg)
void display_msg(const char* caption, const char* msg)
{
MessageBoxA(0, msg, caption, MB_ICONEXCLAMATION);
}
void wdisplay_msg(const wchar_t* caption, const wchar_t* msg)
{
MessageBoxW(0, msg, caption, MB_ICONEXCLAMATION);
}
// fixing Win32 _vsnprintf to return # characters that would be written,
// as required by C99, looks difficult and unnecessary. if any other code
// needs that, generalize the following code into vasprintf.
// need to shoehorn printf-style variable params into
// the OutputDebugString call.
// - don't want to split into multiple calls - would add newlines to output.
// - fixing Win32 _vsnprintf to return # characters that would be written,
// as required by C99, looks difficult and unnecessary. if any other code
// needs that, implement GNU vasprintf.
// - fixed size buffers aren't nice, but much simpler than vasprintf-style
// allocate+expand_until_it_fits. these calls are for quick debug output,
// not loads of data, anyway.
static const int MAX_CNT = 512;
// max output size of 1 call of (w)debug_out (including \0)
void debug_out(const char* fmt, ...)
{
size_t size = 256;
void* buf = 0;
char buf[MAX_CNT];
buf[MAX_CNT-1] = '\0';
for(;;)
{
void* buf2 = realloc(buf, size);
// out of mem - free old buf and quit
if(!buf2)
goto fail;
buf = buf2;
// don't assign directly from realloc - if it fails,
// we'd leak memory.
va_list ap;
va_start(ap, fmt);
vsnprintf(buf, MAX_CNT-1, fmt, ap);
va_end(ap);
{
va_list args;
va_start(args, fmt);
// have to re-create every time. va_copy isn't portable.
int ret = vsnprintf((char*)buf, size, fmt, args);
// success
if(ret > -1 && ret < (int)size)
break;
// return value was required buffer size (without trailing '\0')
if(ret > (int)size)
size = ret+1;
// -1: increase buffer size
else
size *= 2;
va_end(args);
}
// prevent infinite loop
if(size > 64*KB)
goto fail;
}
OutputDebugString((const char*)buf);
fail:
free(buf);
OutputDebugString(buf);
}
void wdebug_out(const wchar_t* fmt, ...)
{
wchar_t buf[MAX_CNT];
buf[MAX_CNT-1] = L'\0';
va_list ap;
va_start(ap, fmt);
vsnwprintf(buf, MAX_CNT-1, fmt, ap);
va_end(ap);
OutputDebugStringW(buf);
}
@ -182,4 +181,4 @@ int WINAPI WinMain(HINSTANCE, HINSTANCE, LPSTR, int)
{
pre_main_init();
return main(__argc, __argv);
}
}

View File

@ -7,6 +7,7 @@
#define snprintf _snprintf
#define swprintf _snwprintf
#define vsnprintf _vsnprintf
#define vsnwprintf _vsnwprintf
#include <stddef.h> // wchar_t
@ -18,4 +19,4 @@
#define WINAPI __stdcall
#define WINAPIV __cdecl
#endif // #ifndef __WIN_H__
#endif // #ifndef __WIN_H__

View File

@ -22,6 +22,7 @@
#define _WIN32_WINNT 0x0500
#define NOGDICAPMASKS // CC_*, LC_*, PC_*, CP_*, TC_*, RC_
//#define NOVIRTUALKEYCODES // VK_*
//#define NOWINMESSAGES // WM_*, EM_*, LB_*, CB_*
@ -269,4 +270,4 @@ extern void win_unlock(uint idx);
}
#endif // #ifndef WIN_INTERNAL_H
#endif // #ifndef WIN_INTERNAL_H

View File

@ -43,8 +43,10 @@ extern int aio_close(int);
int open(const char* fn, int mode, ...)
{
bool is_com_port = strncmp(fn, "/dev/tty", 8) == 0;
// /dev/tty? => COM?
if(!strncmp(fn, "/dev/tty", 8))
if(is_com_port)
{
static char port[] = "COM ";
port[3] = (char)(fn[8]+1);
@ -55,7 +57,12 @@ int open(const char* fn, int mode, ...)
// open it for async I/O as well (_open defaults to deny_none sharing)
if(fd > 2)
aio_open(fn, mode, fd);
{
// .. unless it's a COM port. don't currently need aio access for those;
// also, aio_open's CreateFile reports access denied when trying to open.
if(!is_com_port)
aio_open(fn, mode, fd);
}
return fd;
}
@ -331,8 +338,8 @@ int pthread_mutex_timedlock(pthread_mutex_t* m, const struct timespec* abs_timeo
{
struct timespec cur_ts;
clock_gettime(CLOCK_REALTIME, &cur_ts);
ms_timeout = (cur_ts.tv_sec - abs_timeout->tv_sec ) * 1000 +
(cur_ts.tv_nsec - abs_timeout->tv_nsec) / 1000000;
ms_timeout = DWORD((cur_ts.tv_sec - abs_timeout->tv_sec ) * 1000 +
(cur_ts.tv_nsec - abs_timeout->tv_nsec) / 1000000);
}
return WaitForSingleObject(*m, ms_timeout) == WAIT_OBJECT_0? 0 : -1;
@ -498,6 +505,8 @@ int nanosleep(const struct timespec* rqtp, struct timespec* /* rmtp */)
int gettimeofday(struct timeval* tv, void* tzp)
{
UNUSED(tzp);
#ifndef NDEBUG
if(!tv)
{

View File

@ -34,6 +34,13 @@ extern "C" {
#define IMP(ret, name, param) extern "C" __declspec(dllimport) ret __stdcall name param;
// for functions actually implemented in the CRT
#ifdef _DLL
#define _CRTIMP __declspec(dllimport)
#else
#define _CRTIMP
#endif
//
@ -189,10 +196,13 @@ extern int open(const char* fn, int mode, ...);
// <unistd.h>
//
// values from MS _access() implementation. do not change.
#define F_OK 0
#define R_OK 1
#define R_OK 4
#define W_OK 2
#define X_OK 4
#define X_OK 0
// MS implementation doesn't support this distinction.
// hence, the file is reported executable if it exists.
#define read _read
#define write _write

View File

@ -235,7 +235,7 @@ inline SDLKey vkmap(int vk)
{
static SDLKey VK_SDLKMap[256]; /* VK_SDLKMap[vk] == SDLK */
ONCE( init_vkmap(VK_SDLKMap); )
ONCE( init_vkmap(VK_SDLKMap); );
assert(vk >= 0 && vk < 256);
@ -306,16 +306,17 @@ return_char:
char_buf[num_chars]=0;
translated_keysym=vkmap(vk);
//wprintf(L"ToUnicode: Translated %02x to [%s], %d chars, SDLK %02x. Extended flag %d, scancode %d\n", vk, char_buf, num_chars, translated_keysym, msg.lParam & 0x01000000, scancode);
fflush(stdout);
//fflush(stdout);
goto return_char;
}
else if (num_chars == -1)
{
// Dead Key: Don't produce an event for this one
//printf("ToUnicode: Dead Key %02x [%c] [%c] SDLK %02x\n", vk, vk, char_buf[0], vkmap(vk));
fflush(stdout);
//fflush(stdout);
num_chars = 0;
break;
// leave the switch statement; get next message.
}
// num_chars == 0: No translation: Just produce a plain KEYDOWN event
@ -327,7 +328,7 @@ return_char:
ev->key.keysym.unicode = 0;
//printf("ToUnicode: No translation for %02x, extended flag %d, scancode %d, SDLK %02x [%c]\n", vk, msg.lParam & 0x01000000, scancode, ev->key.keysym.sym, ev->key.keysym.sym);
fflush(stdout);
//fflush(stdout);
return 1;
}
@ -338,7 +339,7 @@ return_char:
// TODO Modifier statekeeping
ev->type = SDL_KEYUP;
ev->key.keysym.sym = vkmap(msg.wParam);
ev->key.keysym.sym = vkmap((int)msg.wParam);
ev->key.keysym.unicode = 0;
return 1;
@ -360,7 +361,7 @@ return_char:
sdl_btn = SDL_BUTTON_LEFT + btn/3; // assumes L,R,M
if(sdl_btn != -1)
{
ev->type = SDL_MOUSEBUTTONDOWN + btn%3;
ev->type = (u8)(SDL_MOUSEBUTTONDOWN + btn%3);
ev->button.button = (u8)sdl_btn;
ev->button.x = (u16)(msg.lParam & 0xffff);
ev->button.y = (u16)((msg.lParam >> 16) & 0xffff);
@ -514,9 +515,11 @@ void SDL_Quit()
}
void SDL_WM_SetCaption(const char *title, const char *icon)
void SDL_WM_SetCaption(const char* title, const char* icon)
{
SetWindowText(hWnd, title);
UNUSED(icon); // TODO: implement
}

View File

@ -21,17 +21,27 @@
#include "detect.h"
// useful for choosing a video mode. not called by detect().
// if we fail, don't change the outputs (assumed initialized to defaults)
void get_cur_resolution(int& xres, int& yres)
// if we fail, outputs are unchanged (assumed initialized to defaults)
int get_cur_resolution(int& xres, int& yres)
{
Display* disp = XOpenDisplay(0);
if(!disp)
return;
return -1;
int screen = XDefaultScreen(disp);
xres = XDisplayWidth (disp, screen);
yres = XDisplayHeight(disp, screen);
XCloseDisplay(disp);
return 0;
}
// useful for determining aspect ratio. not called by detect().
// if we fail, outputs are unchanged (assumed initialized to defaults)
int get_monitor_size(int& width_cm, int& height_cm)
{
return -1;
}
#endif // #ifdef HAVE_X

View File

@ -82,7 +82,7 @@ static void display_startup_error(const wchar_t* msg)
const wchar_t* caption = L"0ad startup problem";
write_sys_info();
display_msg(caption, msg);
wdisplay_msg(caption, msg);
exit(1);
}
@ -214,8 +214,63 @@ static void do_tick()
{
}
int main(int argc, char* argv[])
{
chdir("\\games\\bf\\ScreenShots");
int a,b;
char fn[100];
__asm cpuid __asm rdtsc __asm mov a, eax
int i=0;
/*
int guess=4;
for(;;)
{
sprintf(fn, "ScreenShot%d.jpg", i);
if(access(fn, F_OK) < 0)
{
int lo = guess/2;
int hi = guess;
for(;;)
{
int test = (lo+hi)/2;
sprintf(fn, "ScreenShot%d.jpg", i);
if(access(fn, F_OK) < 0)
;
}
}
else
guess *= 2;
break;
}
*/
/*
DIR* d = opendir(".");
while(readdir(d))
i++;
closedir(d);
*/
/*
for(i = 0; i < 100; i++)
{
sprintf(fn, "ScreenShot%d.jpg", i);
if(access(fn, F_OK) < 0)
break;
}
*/
__asm cpuid __asm rdtsc __asm mov b, eax
int c = b-a;
lib_init();
// set 24 bit (float) FPU precision for faster divides / sqrts
@ -270,8 +325,8 @@ glEnable (GL_DEPTH_TEST);
new CConfig;
vfs_set_root(argv[0], "data");
vfs_mount("mods/official");
file_set_root_dir(argv[0], "../data");
vfs_mount("", "mods/official/", 0);
#ifndef NO_GUI
// GUI uses VFS, so this must come after VFS init.
@ -281,14 +336,11 @@ glEnable (GL_DEPTH_TEST);
g_GUI.LoadXMLFile("gui/sprite1.xml");
#endif
// tex = tex_load("0adlogo2.bmp");
// tex_upload(tex);
font = font_load("verdana.fnt");
terr_init();

View File

@ -35,7 +35,7 @@ CNetMessage *CNetMessage::DeserializeMessage(ENetMessageType type, u8 *buffer, u
{
g_DeserializerMap.insert(std::make_pair(pReg->m_Type, pReg->m_pDeserializer));
}
)
);
}
printf("DeserializeMessage: Finding for MT %d\n", type);

View File

@ -25,7 +25,7 @@
==================================================================*/
#include "Particle.h"
#include "time.h"
#include "timer.h"
#include "ogl.h"
#include <assert.h>

View File

@ -27,7 +27,7 @@
==================================================================*/
#include "ParticleEmitter.h"
#include "time.h"
#include "timer.h"
#include "ogl.h"
#include <stdlib.h>

View File

@ -26,7 +26,7 @@
#include "Sprite.h"
#include "ogl.h"
#include "tex.h"
#include "res/tex.h"
CSprite::CSprite() :
m_texture(NULL)

View File

@ -19,7 +19,7 @@ void InitScene ();
void InitResources ();
void RenderScene ();
extern bool keys[256];
extern bool keys[512]; // SDL also defines non-ascii keys; 512 should be enough
CMatrix3D g_WorldMat;
@ -309,6 +309,7 @@ for (uint ii=0;ii<g_TexMan.m_TerrainTextures.size();ii++) {
}
}
// cover entire terrain with default texture
u32 patchesPerSide=g_Terrain.GetPatchesPerSide();
for (uint pj=0; pj<patchesPerSide; pj++) {

27
source/update-workspaces.bat Executable file
View File

@ -0,0 +1,27 @@
@ECHO OFF
REM Create Visual Studio Workspaces on Windows
mkdir vc6
mkdir vc7
mkdir vc2003
REM Change to the lua project name, this must correspond to the base file name
REM of the created project files
SET PROJECT=prometheus
CD premake
premake --target vs6
MOVE %PROJECT%.dsw ..\vc6
MOVE %PROJECT%.dsp ..\vc6
premake --target vs7
move %PROJECT%.sln ..\vc7
move %PROJECT%.vcproj ..\vc7
premake --target vs2003
move %PROJECT%.sln ..\vc2003
move %PROJECT%.vcproj ..\vc2003
cd ..
pause