1
0
forked from 0ad/0ad

# add CppDoc formatted comments to allocators,ogl_tex,snd_mgr

This was SVN commit r3843.
This commit is contained in:
janwas 2006-05-04 05:45:04 +00:00
parent 801086c5dd
commit cd67d271f0
3 changed files with 968 additions and 404 deletions

View File

@ -110,8 +110,14 @@ static LibError mem_protect(u8* p, size_t size, int prot)
// page aligned allocator
//-----------------------------------------------------------------------------
// returns at least unaligned_size bytes of page-aligned memory.
// it defaults to read/writable; you can mprotect it if desired.
/**
* allocate memory starting at a page-aligned address.
* it defaults to read/writable; you can mprotect it if desired.
*
* @param unaligned_size minimum size [bytes] to allocate
* (will be rounded up to page size)
* @return void* allocated memory, or NULL if error / out of memory.
*/
void* page_aligned_alloc(size_t unaligned_size)
{
const size_t size_pa = round_up_to_page(unaligned_size);
@ -121,10 +127,16 @@ void* page_aligned_alloc(size_t unaligned_size)
return p;
}
// free a previously allocated region. must be passed the exact values
// passed to/returned from page_aligned_malloc.
/**
* Free a memory block that had been allocated by page_aligned_alloc.
*
* @param void* Exact pointer returned by page_aligned_alloc
* @param unaligned_size Exact size passed to page_aligned_alloc
*/
void page_aligned_free(void* p, size_t unaligned_size)
{
if(!p)
return;
debug_assert(is_page_multiple((uintptr_t)p));
const size_t size_pa = round_up_to_page(unaligned_size);
(void)mem_release((u8*)p, size_pa);
@ -171,10 +183,16 @@ static LibError validate_da(DynArray* da)
#define CHECK_DA(da) RETURN_ERR(validate_da(da))
// ready the DynArray object for use. preallocates max_size bytes
// (rounded up to the next page size multiple) of address space for the
// array; it can never grow beyond this.
// no virtual memory is actually committed until calls to da_set_size.
/**
* ready the DynArray object for use.
*
* @param DynArray*
* @param max_size Max size [bytes] of the DynArray; this much
* (rounded up to next page multiple) virtual address space is reserved.
* no virtual memory is actually committed until calls to da_set_size.
* @return LibError
*/
LibError da_alloc(DynArray* da, size_t max_size)
{
const size_t max_size_pa = round_up_to_page(max_size);
@ -192,10 +210,19 @@ LibError da_alloc(DynArray* da, size_t max_size)
}
// "wrap" (i.e. store information about) the given buffer in a
// DynArray object, preparing it for use with da_read or da_append.
// da_free should be called when the DynArray is no longer needed,
// even though it doesn't free this memory (but does zero the DynArray).
/**
* "wrap" (i.e. store information about) the given buffer in a
* DynArray object, preparing it for use with da_read or da_append.
*
* da_free should be called when the DynArray is no longer needed,
* even though it doesn't free this memory (but does zero the DynArray).
*
* @param DynArray*. Note: any future operations on it that would
* change the underlying memory (e.g. da_set_size) will fail.
* @param p Memory
* @param size Size [bytes]
* @return LibError
*/
LibError da_wrap_fixed(DynArray* da, u8* p, size_t size)
{
da->base = p;
@ -208,9 +235,14 @@ LibError da_wrap_fixed(DynArray* da, u8* p, size_t size)
}
// free all memory (address space + physical) that constitutes the
// given array. use-after-free is impossible because the memory is
// marked not-present via MMU. also zeroes the contents of <da>.
/**
* free all memory (address space + physical) that constitutes the
* given array. use-after-free is impossible because the memory is
* marked not-present via MMU.
*
* @param DynArray* da; zeroed afterwards.
* @return LibError
*/
LibError da_free(DynArray* da)
{
CHECK_DA(da);
@ -232,9 +264,15 @@ LibError da_free(DynArray* da)
}
// expand or shrink the array: changes the amount of currently committed
// (i.e. usable) memory pages. pages are added/removed until
// new_size (rounded up to the next page size multiple) is met.
/**
* expand or shrink the array: changes the amount of currently committed
* (i.e. usable) memory pages.
*
* @param DynArray*
* @param new_size [bytes]. Pages are added/removed until this size
* (rounded up to the next page size multiple) is reached.
* @return LibError
*/
LibError da_set_size(DynArray* da, size_t new_size)
{
CHECK_DA(da);
@ -269,8 +307,14 @@ LibError da_set_size(DynArray* da, size_t new_size)
}
// make sure at least <size> bytes starting at <pos> are committed and
// ready for use.
/**
* Make sure at least <size> bytes starting at da->pos are committed and
* ready for use.
*
* @param DynArray*
* @param size Minimum amount to guarantee [bytes]
* @return LibError
*/
LibError da_reserve(DynArray* da, size_t size)
{
// default to page size (the OS won't commit less anyway);
@ -283,10 +327,15 @@ LibError da_reserve(DynArray* da, size_t size)
}
// change access rights of the array memory; used to implement
// write-protection. affects the currently committed pages as well as
// all subsequently added pages.
// prot can be a combination of the PROT_* values used with mprotect.
/**
* Change access rights of the array memory; used to implement
* write-protection. affects the currently committed pages as well as
* all subsequently added pages.
*
* @param DynArray*
* @param prot PROT_* protection flags as defined by POSIX mprotect()
* @return LibError
*/
LibError da_set_prot(DynArray* da, int prot)
{
CHECK_DA(da);
@ -304,8 +353,15 @@ LibError da_set_prot(DynArray* da, int prot)
}
// "read" from array, i.e. copy into the given buffer.
// starts at offset DynArray.pos and advances this.
/**
* "read" from array, i.e. copy into the given buffer.
* starts at offset DynArray.pos and advances this.
*
* @param DynArray*
* @param data Destination buffer
* @param size Amount to copy [bytes]
* @return LibError
*/
LibError da_read(DynArray* da, void* data, size_t size)
{
// make sure we have enough data to read
@ -318,8 +374,15 @@ LibError da_read(DynArray* da, void* data, size_t size)
}
// "write" to array, i.e. copy from the given buffer.
// starts at offset DynArray.pos and advances this.
/**
* "write" to array, i.e. copy from the given buffer.
* starts at offset DynArray.pos and advances this.
*
* @param DynArray*
* @param data Source buffer
* @param Amount to copy [bytes]
* @return LibError
*/
LibError da_append(DynArray* da, const void* data, size_t size)
{
RETURN_ERR(da_reserve(da, size));
@ -366,13 +429,18 @@ static void* freelist_pop(void** pfreelist)
static const size_t ALIGN = 8;
// ready <p> for use. <max_size> is the upper limit [bytes] on
// pool size (this is how much address space is reserved).
//
// <el_size> can be 0 to allow variable-sized allocations
// (which cannot be freed individually);
// otherwise, it specifies the number of bytes that will be
// returned by pool_alloc (whose size parameter is then ignored).
/**
* Ready Pool for use.
*
* @param Pool*
* @param max_size Max size [bytes] of the Pool; this much
* (rounded up to next page multiple) virtual address space is reserved.
* no virtual memory is actually committed until calls to pool_alloc.
* @param el_size Number of bytes that will be returned by each
* pool_alloc (whose size parameter is then ignored). Can be 0 to
* allow variable-sized allocations, but pool_free is then unusable.
* @return LibError
*/
LibError pool_create(Pool* p, size_t max_size, size_t el_size)
{
if(el_size == POOL_VARIABLE_ALLOCS)
@ -385,9 +453,14 @@ LibError pool_create(Pool* p, size_t max_size, size_t el_size)
}
// free all memory that ensued from <p>. all elements are made unusable
// (it doesn't matter if they were "allocated" or in freelist or unused);
// future alloc and free calls on this pool will fail.
/**
* Free all memory that ensued from the Pool. all elements are made unusable
* (it doesn't matter if they were "allocated" or in freelist or unused);
* future alloc and free calls on this pool will fail.
*
* @param Pool*
* @return LibError
*/
LibError pool_destroy(Pool* p)
{
// don't be picky and complain if the freelist isn't empty;
@ -398,8 +471,14 @@ LibError pool_destroy(Pool* p)
}
// indicate whether <el> was allocated from the given pool.
// this is useful for callers that use several types of allocators.
/**
* Indicate whether <el> was allocated from the given pool.
* this is useful for callers that use several types of allocators.
*
* @param Pool*
* @param el Address in question
* @return bool
*/
bool pool_contains(Pool* p, void* el)
{
// outside of our range
@ -412,12 +491,15 @@ bool pool_contains(Pool* p, void* el)
}
// return an entry from the pool, or 0 if it would have to be expanded and
// there isn't enough memory to do so.
// exhausts the freelist before returning new entries to improve locality.
//
// if the pool was set up with fixed-size elements, <size> is ignored;
// otherwise, <size> bytes are allocated.
/**
* Dole out memory from the pool.
* exhausts the freelist before returning new entries to improve locality.
*
* @param Pool*
* @param size bytes to allocate; ignored if pool_create's el_size was not 0.
* @return allocated memory, or 0 if the Pool would have to be expanded and
* there isn't enough memory to do so.
*/
void* pool_alloc(Pool* p, size_t size)
{
// if pool allows variable sizes, go with the size parameter,
@ -446,11 +528,16 @@ have_el:
}
// make <el> available for reuse in the given Pool.
//
// this is not allowed if created for variable-size elements.
// rationale: avoids having to pass el_size here and compare with size when
// allocating; also prevents fragmentation and leaking memory.
/**
* Make a fixed-size element available for reuse in the given Pool.
*
* this is not allowed if the Pool was created for variable-size elements.
* rationale: avoids having to pass el_size here and compare with size when
* allocating; also prevents fragmentation and leaking memory.
*
* @param Pool*
* @param el Element returned by pool_alloc.
*/
void pool_free(Pool* p, void* el)
{
// only allowed to free items if we were initialized with
@ -469,9 +556,13 @@ void pool_free(Pool* p, void* el)
}
// "free" all allocations that ensued from the given Pool.
// this resets it as if freshly pool_create-d, but doesn't release the
// underlying memory.
/**
* "free" all allocations that ensued from the given Pool.
* this resets it as if freshly pool_create-d, but doesn't release the
* underlying reserved virtual memory.
*
* @param Pool*
*/
void pool_free_all(Pool* p)
{
p->freelist = 0;
@ -502,12 +593,16 @@ void pool_free_all(Pool* p)
// power-of-2 isn't required; value is arbitrary.
const size_t BUCKET_SIZE = 4000;
// ready <b> for use.
//
// <el_size> can be 0 to allow variable-sized allocations
// (which cannot be freed individually);
// otherwise, it specifies the number of bytes that will be
// returned by bucket_alloc (whose size parameter is then ignored).
/**
* Ready Bucket for use.
*
* @param Bucket*
* @param el_size Number of bytes that will be returned by each
* bucket_alloc (whose size parameter is then ignored). Can be 0 to
* allow variable-sized allocations, but bucket_free is then unusable.
* @return LibError
*/
LibError bucket_create(Bucket* b, size_t el_size)
{
b->freelist = 0;
@ -531,8 +626,12 @@ LibError bucket_create(Bucket* b, size_t el_size)
}
// free all memory that ensued from <b>.
// future alloc and free calls on this Bucket will fail.
/**
* Free all memory that ensued from the Bucket.
* future alloc and free calls on this Bucket will fail.
*
* @param Bucket*
*/
void bucket_destroy(Bucket* b)
{
while(b->bucket)
@ -551,12 +650,15 @@ void bucket_destroy(Bucket* b)
}
// return an entry from the bucket, or 0 if another would have to be
// allocated and there isn't enough memory to do so.
// exhausts the freelist before returning new entries to improve locality.
//
// if the bucket was set up with fixed-size elements, <size> is ignored;
// otherwise, <size> bytes are allocated.
/**
* Dole out memory from the Bucket.
* exhausts the freelist before returning new entries to improve locality.
*
* @param Bucket*
* @param size bytes to allocate; ignored if bucket_create's el_size was not 0.
* @return allocated memory, or 0 if the Bucket would have to be expanded and
* there isn't enough memory to do so.
*/
void* bucket_alloc(Bucket* b, size_t size)
{
size_t el_size = b->el_size? b->el_size : round_up(size, ALIGN);
@ -589,11 +691,16 @@ void* bucket_alloc(Bucket* b, size_t size)
}
// make <el> available for reuse in <b>.
//
// this is not allowed if created for variable-size elements.
// rationale: avoids having to pass el_size here and compare with size when
// allocating; also prevents fragmentation and leaking memory.
/**
* Make a fixed-size element available for reuse in the Bucket.
*
* this is not allowed if the Bucket was created for variable-size elements.
* rationale: avoids having to pass el_size here and compare with size when
* allocating; also prevents fragmentation and leaking memory.
*
* @param Bucket*
* @param el Element returned by bucket_alloc.
*/
void bucket_free(Bucket* b, void* el)
{
if(b->el_size == 0)
@ -619,11 +726,15 @@ void bucket_free(Bucket* b, void* el)
// - only allocates one memory block, which is more efficient than
// malloc/new for each row.
// allocate a 2D cols x rows matrix of <el_size> byte cells.
// this must be freed via matrix_free. returns 0 if out of memory.
//
// the returned pointer should be cast to the target type (e.g. int**) and
// can then be accessed by matrix[col][row].
/**
* allocate a 2D cols x rows matrix of <el_size> byte cells.
* this must be freed via matrix_free.
*
* @param cols, rows Matrix dimensions.
* @param el_size Size [bytes] of each matrix entry.
* @return void**: 0 if out of memory, or a pointer that should be cast to the
* target type (e.g. int**). it can then be accessed via matrix[col][row].
*/
void** matrix_alloc(uint cols, uint rows, size_t el_size)
{
const size_t initial_align = 64;
@ -660,9 +771,12 @@ void** matrix_alloc(uint cols, uint rows, size_t el_size)
}
// free the given matrix (allocated by matrix_alloc). no-op if matrix == 0.
// callers will likely want to pass variables of a different type
// (e.g. int**); they must be cast to void**.
/**
* Free a matrix allocated by matrix_alloc.
*
* @param void** matrix. Callers will likely want to pass it as another
* type, but C++ requires it be explicitly casted to void**.
*/
void matrix_free(void** matrix)
{
free(matrix);
@ -673,14 +787,25 @@ void matrix_free(void** matrix)
// allocator optimized for single instances
//-----------------------------------------------------------------------------
// intended for applications that frequently alloc/free a single
// fixed-size object. caller provides static storage and an in-use flag;
// we use that memory if available and otherwise fall back to the heap.
// if the application only has one object in use at a time, malloc is
// avoided; this is faster and avoids heap fragmentation.
//
// thread-safe.
/**
* Allocate <size> bytes of zeroed memory.
*
* intended for applications that frequently alloc/free a single
* fixed-size object. caller provides static storage and an in-use flag;
* we use that memory if available and otherwise fall back to the heap.
* if the application only has one object in use at a time, malloc is
* avoided; this is faster and avoids heap fragmentation.
*
* note: thread-safe despite use of shared static data.
*
* @param storage Caller-allocated memory of at least <size> bytes
* (typically a static array of bytes)
* @param in_use_flag Pointer to a flag we set when <storage> is in-use.
* @param size [bytes] to allocate
* @return allocated memory; typically = <storage>, but falls back to
* malloc if that's in-use. can return 0 (with warning) if out of memory.
*/
void* single_calloc(void* storage, volatile uintptr_t* in_use_flag, size_t size)
{
// sanity check
@ -707,6 +832,13 @@ void* single_calloc(void* storage, volatile uintptr_t* in_use_flag, size_t size)
}
/**
* Free a memory block that had been allocated by single_calloc.
*
* @param storage Exact value passed to single_calloc.
* @param in_use_flag Exact value passed to single_calloc.
* @param Exact value returned by single_calloc.
*/
void single_free(void* storage, volatile uintptr_t* in_use_flag, void* p)
{
// sanity check

View File

@ -112,7 +112,7 @@ the next function to fail, but real apps should check and report errors.
[.. do something with OpenGL that uses the currently bound texture]
[at exit:]
// (done automatically, but this avoids it showing up as a leak)
* (done automatically, but this avoids it showing up as a leak)
(void)ogl_tex_free(hTexture);
@ -120,14 +120,14 @@ the next function to fail, but real apps should check and report errors.
specify internal_format and use multitexturing.
Tex t;
const uint flags = 0; // image is plain RGB, default orientation
const uint flags = 0; * image is plain RGB, default orientation
void* data = [pre-existing image]
(void)tex_wrap(w, h, 24, flags, data, &t);
Handle hCompositeAlphaMap = ogl_tex_wrap(&t, "(alpha map composite)");
(void)ogl_tex_set_filter(hCompositeAlphaMap, GL_LINEAR);
(void)ogl_tex_upload(hCompositeAlphaMap, 0, 0, GL_INTENSITY);
// (your responsibility! tex_wrap attaches a reference but it is
// removed by ogl_tex_upload.)
* (your responsibility! tex_wrap attaches a reference but it is
* removed by ogl_tex_upload.)
free(data);
[when rendering:]
@ -135,7 +135,7 @@ the next function to fail, but real apps should check and report errors.
[.. do something with OpenGL that uses the currently bound texture]
[at exit:]
// (done automatically, but this avoids it showing up as a leak)
* (done automatically, but this avoids it showing up as a leak)
(void)ogl_tex_free(hCompositeAlphaMap);
*/
@ -153,41 +153,55 @@ the next function to fail, but real apps should check and report errors.
// quality mechanism
//
/**
* Quality flags for texture uploads.
* Specify any of them to override certain aspects of the default.
*/
enum OglTexQualityFlags
{
// emphatically require full quality for this texture.
// (q_flags are invalid if this is set together with any other bit)
// rationale: the value 0 is used to indicate "use default flags" in
// ogl_tex_upload and ogl_tex_set_defaults, so this is the only
// way we can say "disregard default and do not reduce anything".
/**
* emphatically require full quality for this texture.
* (q_flags are invalid if this is set together with any other bit)
* rationale: the value 0 is used to indicate "use default flags" in
* ogl_tex_upload and ogl_tex_set_defaults, so this is the only
* way we can say "disregard default and do not reduce anything".
*/
OGL_TEX_FULL_QUALITY = 0x20,
// store the texture at half the normal bit depth
// (4 bits per pixel component, as opposed to 8).
// this increases performance on older graphics cards due to
// decreased size in vmem. it has no effect on
// compressed textures because they have a fixed internal format.
/**
* store the texture at half the normal bit depth
* (4 bits per pixel component, as opposed to 8).
* this increases performance on older graphics cards due to
* decreased size in vmem. it has no effect on
* compressed textures because they have a fixed internal format.
*/
OGL_TEX_HALF_BPP = 0x10,
// store the texture at half its original resolution.
// this increases performance on older graphics cards due to
// decreased size in vmem.
// this is useful for also reducing quality of compressed textures,
// which are not affected by OGL_TEX_HALF_BPP.
// currently only implemented for images that contain mipmaps
// (otherwise, we'd have to resample, which is slow).
// note: scaling down to 1/4, 1/8, .. is easily possible without
// extra work, so we leave some bits free for that.
/**
* store the texture at half its original resolution.
* this increases performance on older graphics cards due to
* decreased size in vmem.
* this is useful for also reducing quality of compressed textures,
* which are not affected by OGL_TEX_HALF_BPP.
* currently only implemented for images that contain mipmaps
* (otherwise, we'd have to resample, which is slow).
* note: scaling down to 1/4, 1/8, .. is easily possible without
* extra work, so we leave some bits free for that.
*/
OGL_TEX_HALF_RES = 0x01
};
// change default settings - these affect performance vs. quality.
// may be overridden for individual textures via parameter to
// ogl_tex_upload or ogl_tex_set_filter, respectively.
//
// pass 0 to keep the current setting; defaults and legal values are:
// - q_flags: OGL_TEX_FULL_QUALITY; combination of OglTexQualityFlags
// - filter: GL_LINEAR; any valid OpenGL minification filter
/**
* Change default settings - these affect performance vs. quality.
* May be overridden for individual textures via parameter to
* ogl_tex_upload or ogl_tex_set_filter, respectively.
*
* @param q_flags quality flags. Pass 0 to keep the current setting
* (initially OGL_TEX_FULL_QUALITY), or any combination of
* OglTexQualityFlags.
* @param filter mag/minification filter. Pass 0 to keep the current setting
* (initially GL_LINEAR), or any valid OpenGL minification filter.
*/
extern void ogl_tex_set_defaults(uint q_flags, GLint filter);
@ -195,30 +209,52 @@ extern void ogl_tex_set_defaults(uint q_flags, GLint filter);
// open/close
//
// load and return a handle to the texture given in <fn>.
// for a list of supported formats, see tex.h's tex_load.
/**
* Load and return a handle to the texture.
*
* @param fn VFS filename of texture.
* @param flags h_alloc flags.
* @return Handle to texture or negative LibError
* for a list of supported formats, see tex.h's tex_load.
*/
extern Handle ogl_tex_load(const char* fn, uint flags = 0);
// return Handle to an existing object, if it has been loaded and
// is still in memory; otherwise, a negative error code.
/**
* Find and return an existing texture object, if it has already been
* loaded and is still in memory.
*
* @param fn VFS filename of texture.
* @return Handle to texture or negative LibError
*/
extern Handle ogl_tex_find(const char* fn);
// make the given Tex object ready for use as an OpenGL texture
// and return a handle to it. this will be as if its contents
// had been loaded by ogl_tex_load.
//
// we need only add bookkeeping information and "wrap" it in
// a resource object (accessed via Handle), hence the name.
//
// <fn> isn't strictly needed but should describe the texture so that
// h_filename will return a meaningful comment for debug purposes.
// note: because we cannot guarantee that callers will pass distinct
// "filenames", caching is disabled for the created object. this avoids
// mistakenly reusing previous objects that share the same comment.
/**
* Make the Tex object ready for use as an OpenGL texture
* and return a handle to it. This will be as if its contents
* had been loaded by ogl_tex_load.
*
* @param t Texture object.
* @param fn filename or description of texture. not strictly needed,
* but would allow h_filename to return meaningful info for
* purposes of debugging.
* @return Handle to texture or negative LibError
*
* note: because we cannot guarantee that callers will pass distinct
* "filenames", caching is disabled for the created object. this avoids
* mistakenly reusing previous objects that share the same comment.
*
* we need only add bookkeeping information and "wrap" it in
* a resource object (accessed via Handle), hence the name.
*/
extern Handle ogl_tex_wrap(Tex* t, const char* fn = 0, uint flags = 0);
// free all resources associated with the texture and make further
// use of it impossible. (subject to refcount)
/**
* Release this texture reference. When the count reaches zero, all of
* its associated resources are freed and further use made impossible.
*
* @param ht Texture handle.
* @return LibError
*/
extern LibError ogl_tex_free(Handle& ht);
@ -229,16 +265,27 @@ extern LibError ogl_tex_free(Handle& ht);
// these must be called before uploading; this simplifies
// things and avoids calling glTexParameter twice.
// override default filter (as set above) for this texture.
// must be called before uploading (raises a warning if called afterwards).
// filter is as defined by OpenGL; it is applied for both minification and
// magnification (for rationale and details, see OglTexState)
/**
* Override default filter (see {@link #ogl_tex_set_defaults}) for
* this texture.
*
* @param filter OpenGL minification and magnification filter
* (rationale: see {@link OglTexState})
* @return LibError
*
* Must be called before uploading (raises a warning if called afterwards).
*/
extern LibError ogl_tex_set_filter(Handle ht, GLint filter);
// override default wrap mode (GL_REPEAT) for this texture.
// must be called before uploading (raises a warning if called afterwards).
// wrap is as defined by OpenGL and applies to both S and T coordinates
// (rationale: see OglTexState).
/**
* Override default wrap mode (GL_REPEAT) for this texture.
*
* @param wrap OpenGL wrap mode (for both S and T coordinates)
* (rationale: see {@link OglTexState})
* @return LibError
*
* Must be called before uploading (raises a warning if called afterwards).
*/
extern LibError ogl_tex_set_wrap(Handle ht, GLint wrap);
@ -258,19 +305,31 @@ enum OglTexAllow
OGL_TEX_ENABLE
};
// override the default decision and force/disallow use of the
// given feature. should be called from ah_override_gl_upload_caps.
/**
* Override the default decision and force/disallow use of the
* given feature. Typically called from ah_override_gl_upload_caps.
*
* @param what feature to influence
* @param allow disable/enable flag
*/
extern void ogl_tex_override(OglTexOverrides what, OglTexAllow allow);
// upload the texture to OpenGL.
// if not 0, parameters override the following:
// fmt_ovr : OpenGL format (e.g. GL_RGB) decided from bpp / Tex flags;
// q_flags_ovr : global default "quality vs. performance" flags;
// int_fmt_ovr : internal format (e.g. GL_RGB8) decided from fmt / q_flags.
//
// side effects:
// - enables texturing on TMU 0 and binds the texture to it;
// - frees the texel data! see ogl_tex_get_data.
/**
* Upload texture to OpenGL.
*
* @param fmt_ovr optional override for OpenGL format (e.g. GL_RGB),
* which is decided from bpp / Tex flags
* @param q_flags_ovr optional override for global default
* OglTexQualityFlags
* @param int_fmt_ovr optional override for OpenGL internal format
* (e.g. GL_RGB8), which is decided from fmt / q_flags.
* @return LibError
* Side Effects:
* <UL>
* <LI>enables texturing on TMU 0 and binds the texture to it;
* <LI>frees the texel data! see ogl_tex_get_data.
* </UL>
*/
extern LibError ogl_tex_upload(const Handle ht, GLenum fmt_ovr = 0, uint q_flags_ovr = 0, GLint int_fmt_ovr = 0);
@ -278,22 +337,41 @@ extern LibError ogl_tex_upload(const Handle ht, GLenum fmt_ovr = 0, uint q_flags
// return information about the texture
//
// retrieve texture dimensions and bits per pixel.
// all params are optional and filled if non-NULL.
/**
* Retrieve dimensions and bit depth of the texture.
*
* @param ht Texture handle
* @param w optional; will be filled with width
* @param h optional; will be filled with height
* @param bpp optional; will be filled with bits per pixel
* @return LibError
*/
extern LibError ogl_tex_get_size(Handle ht, uint* w, uint* h, uint* bpp);
// retrieve Tex.flags and the corresponding OpenGL format.
// the latter is determined during ogl_tex_upload and is 0 before that.
// all params are optional and filled if non-NULL.
/**
* Retrieve pixel format of the texture.
*
* @param ht Texture handle
* @param flags optional; will be filled with TexFlags
* @param fmt optional; will be filled with GL format
* (it is determined during ogl_tex_upload and 0 before then)
* @return LibError
*/
extern LibError ogl_tex_get_format(Handle ht, uint* flags, GLenum* fmt);
// retrieve pointer to texel data.
//
// note: this memory is freed after a successful ogl_tex_upload for
// this texture. after that, the pointer we retrieve is NULL but
// the function doesn't fail (negative return value) by design.
// if you still need to get at the data, add a reference before
// uploading it or read directly from OpenGL (discouraged).
/**
* Retrieve pixel data of the texture.
*
* @param ht Texture handle
* @param p will be filled with pointer to texels.
* @return LibError
*
* Note: this memory is freed after a successful ogl_tex_upload for
* this texture. After that, the pointer we retrieve is NULL but
* the function doesn't fail (negative return value) by design.
* If you still need to get at the data, add a reference before
* uploading it or read directly from OpenGL (discouraged).
*/
extern LibError ogl_tex_get_data(Handle ht, void** p);
@ -301,25 +379,49 @@ extern LibError ogl_tex_get_data(Handle ht, void** p);
// misc
//
// bind the texture to the specified unit [number] in preparation for
// using it in rendering. if <ht> is 0, texturing is disabled instead.
//
// side effects:
// - changes the active texture unit;
// - (if return value is 0:) texturing was enabled/disabled on that unit.
//
// notes:
// - assumes multitexturing is available.
// - not necessary before calling ogl_tex_upload!
// - on error, the unit's texture state is unchanged; see implementation.
/**
* Bind texture to the specified unit in preparation for using it in
* rendering.
*
* @param ht Texture handle. If 0, texturing is disabled on this unit.
* @param unit Texture Mapping Unit number, typically 0 for the first.
* @return LibError
*
* Side Effects:
* - changes the active texture unit;
* - (if successful) texturing was enabled/disabled on that unit.
*
* Notes:
* - assumes multitexturing is available.
* - not necessary before calling ogl_tex_upload!
* - on error, the unit's texture state is unchanged; see implementation.
*/
extern LibError ogl_tex_bind(Handle ht, uint unit = 0);
// apply the specified transforms (as in tex_transform) to the image.
// must be called before uploading (raises a warning if called afterwards).
/**
* (partially) Transform pixel format of the texture.
*
* @param ht Texture handle
* @param flags the TexFlags that are to be <em>changed</em>
* @return LibError
* @see tex_transform
*
* Must be called before uploading (raises a warning if called afterwards).
*/
extern LibError ogl_tex_transform(Handle ht, uint flags);
// change the pixel format to that specified by <new_flags>.
// (note: this is equivalent to ogl_tex_transform(ht, ht_flags^new_flags).
/**
* Transform pixel format of the texture.
*
* @param ht Texture handle
* @param flags desired new TexFlags indicating pixel format.
* @return LibError
* @see tex_transform
*
* Must be called before uploading (raises a warning if called afterwards).
*
* Note: this is equivalent to ogl_tex_transform(ht, ht_flags^new_flags).
*/
extern LibError ogl_tex_transform_to(Handle ht, uint new_flags);
#endif // #ifndef OGL_TEX_H__

File diff suppressed because it is too large Load Diff