1
0
forked from 0ad/0ad

# add cppdoc

refs #122

This was SVN commit r4034.
This commit is contained in:
janwas 2006-06-25 15:31:58 +00:00
parent 7582f8fbbc
commit 3b753a4df8
4 changed files with 507 additions and 240 deletions

View File

@ -33,20 +33,27 @@
// page aligned allocator
//
// allocates memory aligned to the system page size.
//
// this is useful for file_buf_alloc, which uses this allocator to
// get sector-aligned (hopefully; see file_sector_size) IO buffers.
//
// note that this allocator is stateless and very litte error checking
// can be performed.
// returns at least unaligned_size bytes of page-aligned memory.
// it defaults to read/writable; you can mprotect it if desired.
/**
* allocate memory aligned to the system page size.
*
* this is useful for file_buf_alloc, which uses this allocator to
* get sector-aligned (hopefully; see file_sector_size) IO buffers.
*
* note that this allocator is stateless and very litte error checking
* can be performed.
*
* @param unaligned_size minimum size [bytes] to allocate.
* @return writable, page-aligned and -padded memory; you can use
* mprotect to set other access permissions if desired.
**/
extern void* page_aligned_alloc(size_t unaligned_size);
// free a previously allocated region. must be passed the exact values
// passed to/returned from page_aligned_malloc.
/**
* free a previously allocated page-aligned region.
*
* @param p exact value returned from page_aligned_alloc
* @param size exact value passed to page_aligned_alloc
**/
extern void page_aligned_free(void* p, size_t unaligned_size);
@ -54,127 +61,227 @@ extern void page_aligned_free(void* p, size_t unaligned_size);
// dynamic (expandable) array
//
// provides a memory range that can be expanded but doesn't waste
// physical memory or relocate itself. building block for other allocators.
/**
* provides a memory range that can be expanded but doesn't waste
* physical memory or relocate itself.
*
* works by preallocating address space and committing as needed.
* used as a building block for other allocators.
**/
struct DynArray
{
u8* base;
size_t max_size_pa; // reserved
size_t cur_size; // committed
size_t max_size_pa; /// reserved
size_t cur_size; /// committed
int prot; // applied to newly committed pages
/**
* mprotect flags applied to newly committed pages
**/
int prot;
size_t pos;
};
// ready the DynArray object for use. preallocates max_size bytes
// (rounded up to the next page size multiple) of address space for the
// array; it can never grow beyond this.
// no virtual memory is actually committed until calls to da_set_size.
/**
* ready the DynArray object for use.
*
* no virtual memory is actually committed until calls to da_set_size.
*
* @param da DynArray.
* @param max_size size [bytes] of address space to reserve (*);
* the DynArray can never expand beyond this.
* (* rounded up to next page size multiple)
* @return LibError.
**/
extern LibError da_alloc(DynArray* da, size_t max_size);
// free all memory (address space + physical) that constitutes the
// given array. use-after-free is impossible because the memory is
// marked not-present via MMU. also zeroes the contents of <da>.
/**
* free all memory (address space + physical) that constitutes the
* given DynArray.
*
* @param da DynArray. zeroed afterwards; continued use of the allocated
* memory is impossible because it is marked not-present via MMU.
* @return LibError.
**/
extern LibError da_free(DynArray* da);
// expand or shrink the array: changes the amount of currently committed
// (i.e. usable) memory pages. pages are added/removed until
// new_size (rounded up to the next page size multiple) is met.
/**
* expand or shrink the array: changes the amount of currently committed
* (i.e. usable) memory pages.
*
* @param da DynArray.
* @param new_size target size (rounded up to next page multiple).
* pages are added/removed until this is met.
* @return LibError.
**/
extern LibError da_set_size(DynArray* da, size_t new_size);
// make sure at least <size> bytes starting at <pos> are committed and
// ready for use.
/**
* make sure a given number of bytes starting from the current position
* (DynArray.pos) are committed and ready for use.
*
* @param da DynArray.
* @param size minimum size [bytes].
* @return LibError.
**/
extern LibError da_reserve(DynArray* da, size_t size);
// change access rights of the array memory; used to implement
// write-protection. affects the currently committed pages as well as
// all subsequently added pages.
// prot can be a combination of the PROT_* values used with mprotect.
/**
* change access rights of the array memory.
*
* used to implement write-protection. affects the currently committed
* pages as well as all subsequently added pages.
*
* @param da DynArray.
* @param prot a combination of the PROT_* values used with mprotect.
* @return LibError.
**/
extern LibError da_set_prot(DynArray* da, int prot);
// "wrap" (i.e. store information about) the given buffer in a
// DynArray object, preparing it for use with da_read or da_append.
// da_free should be called when the DynArray is no longer needed,
// even though it doesn't free this memory (but does zero the DynArray).
/**
* "wrap" (i.e. store information about) the given buffer in a DynArray.
*
* this is used to allow calling da_read or da_append on normal buffers.
* da_free should be called when the DynArray is no longer needed,
* even though it doesn't free this memory (but does zero the DynArray).
*
* @param da DynArray.
* @param p target memory (no alignment/padding requirements)
* @param size maximum size (no alignment requirements)
* @return LibError.
**/
extern LibError da_wrap_fixed(DynArray* da, u8* p, size_t size);
// "read" from array, i.e. copy into the given buffer.
// starts at offset DynArray.pos and advances this.
/**
* "read" from array, i.e. copy into the given buffer.
*
* starts at offset DynArray.pos and advances this.
*
* @param da DynArray.
* @param data_dst destination memory
* @param size [bytes] to copy
* @return LibError.
**/
extern LibError da_read(DynArray* da, void* data_dst, size_t size);
// "write" to array, i.e. copy from the given buffer.
// starts at offset DynArray.pos and advances this.
/**
* "write" to array, i.e. copy from the given buffer.
*
* starts at offset DynArray.pos and advances this.
*
* @param da DynArray.
* @param data_src source memory
* @param size [bytes] to copy
* @return LibError.
**/
extern LibError da_append(DynArray* da, const void* data_src, size_t size);
//
// pool allocator
//
// design parameters:
// - O(1) alloc and free;
// - fixed- XOR variable-sized blocks;
// - doesn't preallocate the entire pool;
// - returns sequential addresses.
// opaque! do not read/write any fields!
/**
* allocator design parameters:
* - O(1) alloc and free;
* - either fixed- or variable-sized blocks;
* - doesn't preallocate the entire pool;
* - returns sequential addresses.
*
* opaque! do not read/write any fields!
**/
struct Pool
{
DynArray da;
// size of elements. = 0 if pool set up for variable-sized
// elements, otherwise rounded up to pool alignment.
/**
* size of elements. = 0 if pool set up for variable-sized
* elements, otherwise rounded up to pool alignment.
**/
size_t el_size;
// pointer to freelist (opaque); see freelist_*.
// never used (remains 0) if elements are of variable size.
/**
* pointer to freelist (opaque); see freelist_*.
* never used (remains 0) if elements are of variable size.
**/
void* freelist;
};
// pass as pool_create's <el_size> param to indicate variable-sized allocs
// are required (see below).
/**
* pass as pool_create's <el_size> param to indicate variable-sized allocs
* are required (see below).
**/
const size_t POOL_VARIABLE_ALLOCS = ~0u;
// ready <p> for use. <max_size> is the upper limit [bytes] on
// pool size (this is how much address space is reserved).
//
// <el_size> can be 0 to allow variable-sized allocations
// (which cannot be freed individually);
// otherwise, it specifies the number of bytes that will be
// returned by pool_alloc (whose size parameter is then ignored).
/**
* ready the Pool object for use.
*
* @param p Pool.
* @param max_size size [bytes] of address space to reserve (*);
* the Pool can never expand beyond this.
* (* rounded up to next page size multiple)
* @param el_size 0 to allow variable-sized allocations (which cannot be
* freed individually); otherwise, it specifies the number of bytes that
* will be returned by pool_alloc (whose size parameter is then ignored).
* @return LibError.
**/
extern LibError pool_create(Pool* p, size_t max_size, size_t el_size);
// free all memory that ensued from <p>. all elements are made unusable
// (it doesn't matter if they were "allocated" or in freelist or unused);
// future alloc and free calls on this pool will fail.
/**
* free all memory (address space + physical) that constitutes the
* given Pool.
*
* @param p Pool. continued use of the allocated memory (*) is
* impossible because it is marked not-present via MMU.
* (* no matter if in freelist or unused or "allocated" to user)
* @return LibError.
**/
extern LibError pool_destroy(Pool* p);
// indicate whether <el> was allocated from the given pool.
// this is useful for callers that use several types of allocators.
/**
* indicate whether a pointer was allocated from the given pool.
*
* this is useful for callers that use several types of allocators.
*
* @param p Pool.
* @return bool.
**/
extern bool pool_contains(Pool* p, void* el);
// return an entry from the pool, or 0 if it would have to be expanded and
// there isn't enough memory to do so.
// exhausts the freelist before returning new entries to improve locality.
//
// if the pool was set up with fixed-size elements, <size> is ignored;
// otherwise, <size> bytes are allocated.
/**
* allocate memory from the pool.
*
* exhausts the freelist before returning new entries to improve locality.
*
* @param p Pool.
* @param size [bytes] to allocated. ignored if pool was set up with
* fixed-size elements.
* @return 0 if the Pool would have to be expanded and there isn't enough
* memory to do so, otherwise the allocated memory.
**/
extern void* pool_alloc(Pool* p, size_t size);
// make <el> available for reuse in the given Pool.
//
// this is not allowed if created for variable-size elements.
// rationale: avoids having to pass el_size here and compare with size when
// allocating; also prevents fragmentation and leaking memory.
/**
* make an entry available for reuse in the given Pool.
*
* this is not allowed if created for variable-size elements.
* rationale: avoids having to pass el_size here and compare with size when
* allocating; also prevents fragmentation and leaking memory.
*
* @param p Pool.
* @param el entry allocated via pool_alloc.
**/
extern void pool_free(Pool* p, void* el);
// "free" all allocations that ensued from the given Pool.
// this resets it as if freshly pool_create-d, but doesn't release the
// underlying memory.
/**
* "free" all user allocations that ensued from the given Pool.
*
* this resets it as if freshly pool_create-d, but doesn't release the
* underlying memory.
*
* @param p Pool.
**/
extern void pool_free_all(Pool* p);
@ -182,61 +289,87 @@ extern void pool_free_all(Pool* p);
// bucket allocator
//
// design goals:
// - fixed- XOR variable-sized blocks;
// - allow freeing individual blocks if they are all fixed-size;
// - never relocates;
// - no fixed limit.
// note: this type of allocator is called "region-based" in the literature.
// see "Reconsidering Custom Memory Allocation" (Berger, Zorn, McKinley).
// if individual variable-size elements must be freeable, consider "reaps":
// basically a combination of region and heap, where frees go to the heap and
// allocs exhaust that memory first and otherwise use the region.
// opaque! do not read/write any fields!
/**
* allocator design goals:
* - either fixed- or variable-sized blocks;
* - allow freeing individual blocks if they are all fixed-size;
* - never relocates;
* - no fixed limit.
*
* note: this type of allocator is called "region-based" in the literature.
* see "Reconsidering Custom Memory Allocation" (Berger, Zorn, McKinley).
* if individual variable-size elements must be freeable, consider "reaps":
* basically a combination of region and heap, where frees go to the heap and
* allocs exhaust that memory first and otherwise use the region.
*
* opaque! do not read/write any fields!
**/
struct Bucket
{
// currently open bucket.
/**
* currently open bucket.
**/
u8* bucket;
// offset of free space at end of current bucket (i.e. # bytes in use).
/**
* offset of free space at end of current bucket (i.e. # bytes in use).
**/
size_t pos;
void* freelist;
size_t el_size : 16;
// records # buckets allocated; verifies the list of buckets is correct.
/**
* records # buckets allocated; verifies the list of buckets is correct.
**/
uint num_buckets : 16;
};
// ready <b> for use.
//
// <el_size> can be 0 to allow variable-sized allocations
// (which cannot be freed individually);
// otherwise, it specifies the number of bytes that will be
// returned by bucket_alloc (whose size parameter is then ignored).
/**
* ready the Bucket object for use.
*
* @param b Bucket.
* @param el_size 0 to allow variable-sized allocations (which cannot be
* freed individually); otherwise, it specifies the number of bytes that
* will be returned by bucket_alloc (whose size parameter is then ignored).
* @return LibError.
**/
extern LibError bucket_create(Bucket* b, size_t el_size);
// free all memory that ensued from <b>.
// future alloc and free calls on this Bucket will fail.
/**
* free all memory that ensued from <b>.
*
* future alloc and free calls on this Bucket will fail.
*
* @param b Bucket.
**/
extern void bucket_destroy(Bucket* b);
// return an entry from the bucket, or 0 if another would have to be
// allocated and there isn't enough memory to do so.
// exhausts the freelist before returning new entries to improve locality.
//
// if the bucket was set up with fixed-size elements, <size> is ignored;
// otherwise, <size> bytes are allocated.
/**
* allocate memory from the bucket.
*
* exhausts the freelist before returning new entries to improve locality.
*
* @param b Bucket.
* @param size [bytes] to allocated. ignored if pool was set up with
* fixed-size elements.
* @return 0 if the Bucket would have to be expanded and there isn't enough
* memory to do so, otherwise the allocated memory.
**/
extern void* bucket_alloc(Bucket* b, size_t size);
// make <el> available for reuse in <b>.
//
// this is not allowed if created for variable-size elements.
// rationale: avoids having to pass el_size here and compare with size when
// allocating; also prevents fragmentation and leaking memory.
/**
* make an entry available for reuse in the given Bucket.
*
* this is not allowed if created for variable-size elements.
* rationale: avoids having to pass el_size here and compare with size when
* allocating; also prevents fragmentation and leaking memory.
*
* @param b Bucket.
* @param el entry allocated via bucket_alloc.
**/
extern void bucket_free(Bucket* b, void* el);
@ -244,22 +377,28 @@ extern void bucket_free(Bucket* b, void* el);
// matrix allocator
//
// takes care of the dirty work of allocating 2D matrices:
// - aligns data
// - only allocates one memory block, which is more efficient than
// malloc/new for each row.
// allocate a 2D cols x rows matrix of <el_size> byte cells.
// this must be freed via matrix_free. returns 0 if out of memory.
//
// the returned pointer should be cast to the target type (e.g. int**) and
// can then be accessed by matrix[col][row].
//
/**
* allocate a 2D matrix accessible as matrix[col][row].
*
* takes care of the dirty work of allocating 2D matrices:
* - aligns data
* - only allocates one memory block, which is more efficient than
* malloc/new for each row.
*
* @param cols, rows: dimension (cols x rows)
* @param el_size size [bytes] of a matrix cell
* @return 0 if out of memory, otherwise matrix that should be cast to
* type** (sizeof(type) == el_size). must be freed via matrix_free.
**/
extern void** matrix_alloc(uint cols, uint rows, size_t el_size);
// free the given matrix (allocated by matrix_alloc). no-op if matrix == 0.
// callers will likely want to pass variables of a different type
// (e.g. int**); they must be cast to void**.
/**
* free the given matrix.
*
* @param matrix allocated by matrix_alloc; no-op if 0.
* callers will likely want to pass variables of a different type
* (e.g. int**); they must be cast to void**.
**/
extern void matrix_free(void** matrix);
@ -267,22 +406,36 @@ extern void matrix_free(void** matrix);
// allocator optimized for single instances
//
// intended for applications that frequently alloc/free a single
// fixed-size object. caller provides static storage and an in-use flag;
// we use that memory if available and otherwise fall back to the heap.
// if the application only has one object in use at a time, malloc is
// avoided; this is faster and avoids heap fragmentation.
//
// thread-safe.
/**
* allocator for applications that frequently alloc/free a single
* fixed-size object.
*
* if there is only one object in use at a time, malloc is avoided;
* this is faster and avoids heap fragmentation.
*
* @param storage static storage; enough to fit one item.
* @param in_use_flag: indicates if storage is in use. manipulated via CAS,
* so this is thread-safe.
* @param size [bytes] of storage (we need to know this if falling back to
* heap allocation).
* @return pointer to storage if available, otherwise a heap allocation.
**/
extern void* single_calloc(void* storage, volatile uintptr_t* in_use_flag, size_t size);
/**
* free memory allocated via single_calloc.
*
* see description there.
**/
extern void single_free(void* storage, volatile uintptr_t* in_use_flag, void* p);
// C++ wrapper
#ifdef __cplusplus
// T must be POD (Plain Old Data) because it is memset to 0!
/**
* C++ wrapper on top of single_calloc that's slightly easier to use.
*
* T must be POD (Plain Old Data) because it is memset to 0!
**/
template<class T> class SingleAllocator
{
T storage;
@ -312,7 +465,7 @@ public:
// overrun protection
//
/*
/**
OverrunProtector wraps an arbitrary object in DynArray memory and can detect
inadvertent writes to it. this is useful for tracking down memory overruns.
@ -333,8 +486,7 @@ if(!yc) // your_class_wrapper's one-time alloc of a your_class-
doSomethingWith(yc); // read/write access
your_class_wrapper.lock(); // disallow further access until next .get()
..
*/
**/
template<class T> class OverrunProtector
{
DynArray da;
@ -415,8 +567,11 @@ public:
// allocator test rig
//
// call for each allocator operation to sanity-check them.
// should only be used during debug mode due to serious overhead.
/**
* allocator test rig.
* call from each allocator operation to sanity-check them.
* should only be used during debug mode due to serious overhead.
**/
class AllocatorChecker
{
public:
@ -442,6 +597,9 @@ public:
}
}
/**
* allocator is resetting itself, i.e. wiping out all allocs.
**/
void notify_clear()
{
allocs.clear();

View File

@ -50,7 +50,7 @@ Usage
In the simplest case, the stubs are already acceptable. Otherwise,
you need to implement a new version of some hooks, fill an
AppHooks struct with pointers to those functions (zero the rest),
and call set_app_hooks.
and call app_hooks_update.
*/
@ -77,40 +77,82 @@ and call set_app_hooks.
#define VOID_FUNC(name, params, param_names)\
FUNC(void, name, params, param_names, (void))
// override default decision on using OpenGL extensions relating to
// texture upload. this should call ogl_tex_override to disable/force
// their use if the current card/driver combo respectively crashes or
// supports it even though the extension isn't advertised.
//
// default implementation works but is hardwired in code and therefore
// not expandable.
/**
* override default decision on using OpenGL extensions relating to
* texture upload.
*
* this should call ogl_tex_override to disable/force their use if the
* current card/driver combo respectively crashes or
* supports it even though the extension isn't advertised.
*
* the default implementation works but is hardwired in code and therefore
* not expandable.
**/
VOID_FUNC(override_gl_upload_caps, (void), ())
// return full native path of the directory into which crashdumps should be
// written. must end with directory separator (e.g. '/').
// if implementing via static storage, be sure to guarantee reentrancy
// (e.g. by only filling the string once).
// must be callable at any time - in particular, before VFS init.
// this means file_make_full_native_path cannot be used; it is best
// to specify a path relative to sys_get_executable_name.
/**
* return path to directory into which crash dumps should be written.
*
* if implementing via static storage, be sure to guarantee reentrancy
* (e.g. by only filling the string once).
* must be callable at any time - in particular, before VFS init.
* this means file_make_full_native_path cannot be used; it is best
* to specify a path relative to sys_get_executable_name.
*
* @return full native path; must end with directory separator (e.g. '/').
**/
FUNC(const char*, get_log_dir, (void), (), return)
// gather all app-related logs/information and write it into <f>.
// used when writing a crashlog so that all relevant info is in one file.
//
// default implementation gathers 0ad data but is fail-safe.
/**
* gather all app-related logs/information and write it to file.
*
* used when writing a crash log so that all relevant info is in one file.
*
* the default implementation attempts to gather 0ad data, but is
* fail-safe (doesn't complain if file not found).
*
* @param f file into which to write.
**/
VOID_FUNC(bundle_logs, (FILE* f), (f))
// return localized version of <text> if i18n functionality is available.
//
// default implementation just returns the pointer unchanged.
/**
* translate text to the current locale.
*
* @param text to translate.
* @return pointer to localized text; must be freed via translate_free.
*
* the default implementation just returns the pointer unchanged.
**/
FUNC(const wchar_t*, translate, (const wchar_t* text), (text), return)
// write <text> to the app's log.
//
// default implementation uses stdout.
/**
* free text that was returned by translate.
*
* @param text to free.
*
* the default implementation does nothing.
**/
VOID_FUNC(translate_free, (const wchar_t* text), (text))
/**
* write text to the app's log.
*
* @param text to write.
*
* the default implementation uses stdout.
**/
VOID_FUNC(log, (const wchar_t* text), (text))
/**
* display an error dialog, thus overriding sys_display_error.
*
* @param text error message.
* @param flags see DebugDisplayErrorFlags.
* @return ErrorReaction.
*
* the default implementation just returns ER_NOT_IMPLEMENTED, which
* causes the normal sys_display_error to be used.
**/
FUNC(ErrorReaction, display_error, (const wchar_t* text, uint flags), (text, flags), return)
#undef VOID_FUNC
@ -124,7 +166,9 @@ FUNC(ErrorReaction, display_error, (const wchar_t* text, uint flags), (text, fla
#ifndef APP_HOOKS_H__
#define APP_HOOKS_H__
// holds a function pointer for each hook. passed to set_app_hooks.
/**
* holds a function pointer for each hook. passed to app_hooks_update.
**/
struct AppHooks
{
#define FUNC(ret, name, params, param_names, call_prefix) ret (*name) params;
@ -135,10 +179,14 @@ struct AppHooks
int dummy;
};
// register the specified hook function pointers. any of them that
// are non-zero override the previous function pointer value
// (these default to the stub hooks which are functional but basic).
extern void set_app_hooks(AppHooks* ah);
/**
* update the app hook function pointers.
*
* @param ah AppHooks struct. any of its function pointers that are non-zero
* override the previous function pointer value
* (these default to the stub hooks which are functional but basic).
**/
extern void app_hooks_update(AppHooks* ah);
// trampolines used by lib code to call the hooks. they encapsulate

View File

@ -21,7 +21,7 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*/
/*
/**
Error handling system
@ -125,7 +125,7 @@ Notes:
- unfortunately Intellisense isn't smart enough to pick up the
ERR_* definitions. This is the price of automatically associating
descriptive text with the error code.
*/
**/
#ifndef ERRORS_H__
#define ERRORS_H__
@ -145,38 +145,65 @@ enum LibError {
LIB_ERROR_DUMMY
};
// generate textual description of an error code.
// stores up to <max_chars> in the given buffer.
// if error is unknown/invalid, the string will be something like
// "Unknown error (65536, 0x10000)".
/**
* generate textual description of an error code.
*
* @param err LibError to be translated. if despite type checking we
* get an invalid enum value, the string will be something like
* "Unknown error (65536, 0x10000)".
* @param buf destination buffer
* @param max_chars size of buffer [characters]
**/
extern void error_description_r(LibError err, char* buf, size_t max_chars);
//-----------------------------------------------------------------------------
// conversion to/from other error code definitions.
// notes:
// - these functions will raise a warning (before returning any error code
// except INFO_OK) unless warn_if_failed is explicitly set to false.
// - other conversion routines (e.g. to/from Win32) are implemented in
// the corresponding modules to keep this header portable.
// note: other conversion routines (e.g. to/from Win32) are implemented in
// the corresponding modules to keep this header portable.
// return the LibError equivalent of errno, or ERR_FAIL if there's no equal.
// only call after a POSIX function indicates failure.
// raises a warning (avoids having to on each call site).
/**
* translate errno to LibError.
*
* should only be called directly after a POSIX function indicates failure;
* errno may otherwise still be set from another error cause.
*
* @param warn_if_failed if set, raise a warning when returning an error
* (i.e. ERR_*, but not INFO_OK). this avoids having to wrap all
* call sites in WARN_ERR etc.
* @return LibError equivalent of errno, or ERR_FAIL if there's no equal.
**/
extern LibError LibError_from_errno(bool warn_if_failed = true);
// translate the return value of any POSIX function into LibError.
// ret is typically to -1 to indicate error and 0 on success.
// you should set errno to 0 before calling the POSIX function to
// make sure we do not return any stale errors.
/**
* translate a POSIX function's return/error indication to LibError.
*
* you should set errno to 0 before calling the POSIX function to
* make sure we do not return any stale errors. typical usage:
* errno = 0;
* int ret = posix_func(..);
* return LibError_from_posix(ret);
*
* @param ret return value of a POSIX function: 0 indicates success,
* -1 is error.
* @param warn_if_failed if set, raise a warning when returning an error
* (i.e. ERR_*, but not INFO_OK). this avoids having to wrap all
* call sites in WARN_ERR etc.
* @return INFO_OK if the POSIX function succeeded, else the LibError
* equivalent of errno, or ERR_FAIL if there's no equal.
**/
extern LibError LibError_from_posix(int ret, bool warn_if_failed = true);
// set errno to the equivalent of <err>. used in wposix - underlying
// functions return LibError but must be translated to errno at
// e.g. the mmap interface level. higher-level code that calls mmap will
// in turn convert back to LibError.
/**
* set errno to the equivalent of a LibError.
*
* used in wposix - underlying functions return LibError but must be
* translated to errno at e.g. the mmap interface level. higher-level code
* that calls mmap will in turn convert back to LibError.
*
* @param err error code to set
**/
extern void LibError_set_errno(LibError err);

View File

@ -75,43 +75,60 @@ extern "C" {
// extensions
//
// check if the extension <ext> is supported by the OpenGL implementation.
// takes subsequently added core support for some extensions into account.
/**
* check if an extension is supported by the OpenGL implementation.
*
* takes subsequently added core support for some extensions into account
* (in case drivers forget to advertise extensions).
*
* @param ext extension string; exact case.
* @return bool.
**/
extern bool oglHaveExtension(const char* ext);
// check if the OpenGL implementation is at least at <version>.
// (format: "%d.%d" major minor)
/**
* make sure the OpenGL implementation version matches or is newer than
* the given version.
*
* @param version version string; format: ("%d.%d", major, minor).
* example: "1.2".
**/
extern bool oglHaveVersion(const char* version);
// check if all given extension strings (passed as const char* parameters,
// terminated by a 0 pointer) are supported by the OpenGL implementation,
// as determined by oglHaveExtension.
// returns 0 if all are present; otherwise, the first extension in the
// list that's not supported (useful for reporting errors).
//
// note: dummy parameter is necessary to access parameter va_list.
//
// rationale: see source.
/**
* check if a list of extensions are all supported (as determined by
* oglHaveExtension).
*
* @param dummy value ignored; varargs requires a placeholder.
* follow it by a list of const char* extension string parameters,
* terminated by a 0 pointer.
* @return 0 if all are present; otherwise, the first extension in the
* list that's not supported (useful for reporting errors).
**/
extern const char* oglHaveExtensions(int dummy, ...);
// return a C string of unspecified length containing a space-separated
// list of all extensions the OpenGL implementation advertises.
// (useful for crash logs).
/**
* get a list of all supported extensions.
*
* useful for crash logs / system information.
*
* @return read-only C string of unspecified length containing all
* advertised extension names, separated by space.
**/
extern const char* oglExtList(void);
// declare extension function pointers
#if OS_WIN
# define CALL_CONV __stdcall
#else
# define CALL_CONV
#endif
#define FUNC(ret, name, params) extern ret (CALL_CONV *p##name) params;
#define FUNC2(ret, nameARB, nameCore, version, params) extern ret (CALL_CONV *p##nameARB) params;
#include "glext_funcs.h"
#undef FUNC2
#undef FUNC
// leave CALL_CONV defined for ogl.cpp
@ -119,13 +136,17 @@ extern const char* oglExtList(void);
// implementation limits / feature detect
//
extern int ogl_max_tex_size; // [pixels]
extern int ogl_max_tex_units; // limit on GL_TEXTUREn
extern int ogl_max_tex_size; /// [pixels]
extern int ogl_max_tex_units; /// limit on GL_TEXTUREn
// set sysdep/gfx.h gfx_card and gfx_drv_ver. called by gfx_detect.
//
// fails if OpenGL not ready for use.
// gfx_card and gfx_drv_ver are unchanged on failure.
/**
* set sysdep/gfx.h gfx_card and gfx_drv_ver. called by gfx_detect.
*
* fails if OpenGL not ready for use.
* gfx_card and gfx_drv_ver are unchanged on failure.
*
* @return LibError
**/
extern LibError ogl_get_gfx_info(void);
@ -133,30 +154,43 @@ extern LibError ogl_get_gfx_info(void);
// misc
//
// in non-release builds, enable oglCheck, which breaks into the debugger
// if an OpenGL error was raised since the last call.
// add these calls everywhere to close in on the error cause.
//
// reports a bogus invalid_operation error if called before OpenGL is
// initialized, so don't!
#ifndef NDEBUG
/**
* raise a warning (break into the debugger) if an OpenGL error is pending.
* resets the OpenGL error state afterwards.
*
* when an error is reported, insert calls to this in a binary-search scheme
* to quickly narrow down the actual error location.
*
* reports a bogus invalid_operation error if called before OpenGL is
* initialized, so don't!
*
* disabled in release mode for efficiency and to avoid annoying errors.
**/
extern void oglCheck(void);
#else
#ifdef NDEBUG
# define oglCheck()
#endif
// ignore and reset the specified error (as returned by glGetError).
// any other errors that have occurred are reported as oglCheck would.
//
// this is useful for suppressing annoying error messages, e.g.
// "invalid enum" for GL_CLAMP_TO_EDGE even though we've already
// warned the user that their OpenGL implementation is too old.
/**
* ignore and reset the specified OpenGL error.
*
* this is useful for suppressing annoying error messages, e.g.
* "invalid enum" for GL_CLAMP_TO_EDGE even though we've already
* warned the user that their OpenGL implementation is too old.
*
* call after the fact, i.e. the error has been raised. if another or
* different error is pending, those are reported immediately.
*
* @param err_to_ignore: one of the glGetError enums.
**/
extern void oglSquelchError(GLenum err_to_ignore);
// call before using any of the above, and after each video mode change.
//
// fails if OpenGL not ready for use.
/**
* initialization: import extension function pointers and do feature detect.
* call before using any of the above, and after each video mode change.
* fails if OpenGL not ready for use.
**/
extern void oglInit(void);