1
0
forked from 0ad/0ad

Fix compiler warnings on VS 2015, refs #3439, refs #5069.

Patch By: Stan
Reviewed By: Itms, vladislavbelov
Differential Revision: https://code.wildfiregames.com/D1262
This was SVN commit r21480.
This commit is contained in:
Nicolas Auvray 2018-03-10 09:58:53 +00:00
parent 62c29bd809
commit cc67d54aeb
46 changed files with 447 additions and 379 deletions

View File

@ -1,4 +1,4 @@
/* Copyright (C) 2016 Wildfire Games.
/* Copyright (C) 2018 Wildfire Games.
* This file is part of 0 A.D.
*
* 0 A.D. is free software: you can redistribute it and/or modify
@ -311,13 +311,13 @@ void SkinReduceInfluences(FCDSkinController* skin, size_t maxInfluenceCount, flo
FCDSkinControllerVertex& influence = *skin->GetVertexInfluence(i);
std::vector<FCDJointWeightPair> newWeights;
for (size_t i = 0; i < influence.GetPairCount(); ++i)
for (size_t j = 0; j < influence.GetPairCount(); ++j)
{
FCDJointWeightPair* weight = influence.GetPair(i);
FCDJointWeightPair* weight = influence.GetPair(j);
for (size_t j = 0; j < newWeights.size(); ++j)
for (size_t k = 0; k < newWeights.size(); ++k)
{
FCDJointWeightPair& newWeight = newWeights[j];
FCDJointWeightPair& newWeight = newWeights[k];
if (weight->jointIndex == newWeight.jointIndex)
{
newWeight.weight += weight->weight;

View File

@ -1,4 +1,4 @@
/* Copyright (C) 2015 Wildfire Games.
/* Copyright (C) 2018 Wildfire Games.
* This file is part of 0 A.D.
*
* 0 A.D. is free software: you can redistribute it and/or modify
@ -32,15 +32,15 @@
#define mat_pad(A) (A[W][X]=A[X][W]=A[W][Y]=A[Y][W]=A[W][Z]=A[Z][W]=0,A[W][W]=1)
/** Copy nxn matrix A to C using "gets" for assignment **/
#define mat_copy(C,gets,A,n) {int i,j; for(i=0;i<n;i++) for(j=0;j<n;j++)\
#define mat_copy(C,gets,A,n) {for (int i = 0; i < n; ++i) for (int j = 0; j < n; ++j)\
C[i][j] gets (A[i][j]);}
/** Copy transpose of nxn matrix A to C using "gets" for assignment **/
#define mat_tpose(AT,gets,A,n) {int i,j; for(i=0;i<n;i++) for(j=0;j<n;j++)\
#define mat_tpose(AT,gets,A,n) {for (int i = 0; i < n; ++i) for (int j = 0; j < n; ++j)\
AT[i][j] gets (A[j][i]);}
/** Assign nxn matrix C the element-wise combination of A and B using "op" **/
#define mat_binop(C,gets,A,op,B,n) {int i,j; for(i=0;i<n;i++) for(j=0;j<n;j++)\
#define mat_binop(C,gets,A,op,B,n) {for (int i = 0; i < n; ++i) for (int j = 0; j < n; ++j)\
C[i][j] gets (A[i][j]) op (B[i][j]);}
/** Multiply the upper left 3x3 parts of A and B to get AB **/
@ -276,7 +276,6 @@ float polar_decomp(HMatrix M, HMatrix Q, HMatrix S)
#define TOL 1.0e-6
HMatrix Mk, MadjTk, Ek;
float det, M_one, M_inf, MadjT_one, MadjT_inf, E_one, gamma, g1, g2;
int i, j;
mat_tpose(Mk,=,M,3);
M_one = norm_one(Mk); M_inf = norm_inf(Mk);
do {
@ -295,7 +294,7 @@ float polar_decomp(HMatrix M, HMatrix Q, HMatrix S)
} while (E_one>(M_one*TOL));
mat_tpose(Q,=,Mk,3); mat_pad(Q);
mat_mult(Mk, M, S); mat_pad(S);
for (i=0; i<3; i++) for (j=i; j<3; j++)
for (int i = 0; i < 3; i++) for (int j = i; j < 3; j++)
S[i][j] = S[j][i] = 0.5*(S[i][j]+S[j][i]);
return (det);
}
@ -325,48 +324,68 @@ float polar_decomp(HMatrix M, HMatrix Q, HMatrix S)
*/
HVect spect_decomp(HMatrix S, HMatrix U)
{
HVect kv;
double Diag[3],OffD[3]; /* OffD is off-diag (by omitted index) */
double g,h,fabsh,fabsOffDi,t,theta,c,s,tau,ta,OffDq,a,b;
static char nxt[] = {Y,Z,X};
int sweep, i, j;
mat_copy(U,=,mat_id,4);
Diag[X] = S[X][X]; Diag[Y] = S[Y][Y]; Diag[Z] = S[Z][Z];
OffD[X] = S[Y][Z]; OffD[Y] = S[Z][X]; OffD[Z] = S[X][Y];
for (sweep=20; sweep>0; sweep--) {
float sm = fabs(OffD[X])+fabs(OffD[Y])+fabs(OffD[Z]);
if (sm==0.0) break;
for (i=Z; i>=X; i--) {
int p = nxt[i]; int q = nxt[p];
fabsOffDi = fabs(OffD[i]);
g = 100.0*fabsOffDi;
if (fabsOffDi>0.0) {
h = Diag[q] - Diag[p];
fabsh = fabs(h);
if (fabsh+g==fabsh) {
t = OffD[i]/h;
} else {
theta = 0.5*h/OffD[i];
t = 1.0/(fabs(theta)+sqrt(theta*theta+1.0));
if (theta<0.0) t = -t;
HVect kv;
double Diag[3], OffD[3]; /* OffD is off-diag (by omitted index) */
double g, h, fabsh, fabsOffDi, t, theta, c, s, tau, ta, OffDq, a, b;
static char nxt[] = {Y, Z, X};
mat_copy(U, =, mat_id, 4);
Diag[X] = S[X][X];
Diag[Y] = S[Y][Y];
Diag[Z] = S[Z][Z];
OffD[X] = S[Y][Z];
OffD[Y] = S[Z][X];
OffD[Z] = S[X][Y];
for (int sweep = 20; sweep > 0; --sweep)
{
float sm = fabs(OffD[X]) + fabs(OffD[Y]) + fabs(OffD[Z]);
if (sm == 0.0)
break;
for (int i = Z; i >= X; --i)
{
int p = nxt[i];
int q = nxt[p];
fabsOffDi = fabs(OffD[i]);
g = 100.0 * fabsOffDi;
if (fabsOffDi > 0.0)
{
h = Diag[q] - Diag[p];
fabsh = fabs(h);
if (fabsh + g == fabsh)
{
t = OffD[i] / h;
}
else
{
theta = 0.5 * h / OffD[i];
t = 1.0 / (fabs(theta) + sqrt(theta * theta + 1.0));
if (theta < 0.0)
t = -t;
}
c = 1.0 / sqrt(t * t + 1.0);
s = t * c;
tau = s / (c + 1.0);
ta = t * OffD[i];
OffD[i] = 0.0;
Diag[p] -= ta;
Diag[q] += ta;
OffDq = OffD[q];
OffD[q] -= s * (OffD[p] + tau * OffD[q]);
OffD[p] += s * (OffDq - tau * OffD[p]);
for (int j = Z; j >= X; --j)
{
a = U[j][p];
b = U[j][q];
U[j][p] -= s * (b + tau * a);
U[j][q] += s * (a - tau * b);
}
}
}
c = 1.0/sqrt(t*t+1.0); s = t*c;
tau = s/(c+1.0);
ta = t*OffD[i]; OffD[i] = 0.0;
Diag[p] -= ta; Diag[q] += ta;
OffDq = OffD[q];
OffD[q] -= s*(OffD[p] + tau*OffD[q]);
OffD[p] += s*(OffDq - tau*OffD[p]);
for (j=Z; j>=X; j--) {
a = U[j][p]; b = U[j][q];
U[j][p] -= s*(b + tau*a);
U[j][q] += s*(a - tau*b);
}
}
}
}
kv.x = Diag[X]; kv.y = Diag[Y]; kv.z = Diag[Z]; kv.w = 1.0;
return (kv);
kv.x = Diag[X];
kv.y = Diag[Y];
kv.z = Diag[Z];
kv.w = 1.0;
return kv;
}
/******* Spectral Axis Adjustment *******/
@ -387,7 +406,7 @@ Quat snuggle(Quat q, HVect *k)
else {a[3]=a[2]; a[2]=a[1]; a[1]=a[0]; a[0]=a[3];}
Quat p;
float ka[4];
int i, turn = -1;
int turn = -1;
ka[X] = k->x; ka[Y] = k->y; ka[Z] = k->z;
if (ka[X]==ka[Y]) {if (ka[X]==ka[Z]) turn = W; else turn = Z;}
else {if (ka[X]==ka[Z]) turn = Y; else if (ka[Y]==ka[Z]) turn = X;}
@ -395,8 +414,8 @@ Quat snuggle(Quat q, HVect *k)
Quat qtoz, qp;
unsigned neg[3], win;
double mag[3], t;
static Quat qxtoz = {0,SQRTHALF,0,SQRTHALF};
static Quat qytoz = {SQRTHALF,0,0,SQRTHALF};
static Quat qxtoz = {.0f, static_cast<float>(SQRTHALF), .0f, static_cast<float>(SQRTHALF)};
static Quat qytoz = {static_cast<float>(SQRTHALF), .0f, .0f, static_cast<float>(SQRTHALF)};
static Quat qppmm = { 0.5, 0.5,-0.5,-0.5};
static Quat qpppp = { 0.5, 0.5, 0.5, 0.5};
static Quat qmpmm = {-0.5, 0.5,-0.5,-0.5};
@ -413,7 +432,7 @@ Quat snuggle(Quat q, HVect *k)
mag[0] = (double)q.z*q.z+(double)q.w*q.w-0.5;
mag[1] = (double)q.x*q.z-(double)q.y*q.w;
mag[2] = (double)q.y*q.z+(double)q.x*q.w;
for (i=0; i<3; i++) if ((neg[i] = (mag[i]<0.0)) != 0) mag[i] = -mag[i];
for (int i = 0; i < 3; ++i) if ((neg[i] = (mag[i] < 0.0)) != 0) mag[i] = -mag[i];
if (mag[0]>mag[1]) {if (mag[0]>mag[2]) win = 0; else win = 2;}
else {if (mag[1]>mag[2]) win = 1; else win = 2;}
switch (win) {
@ -430,7 +449,7 @@ Quat snuggle(Quat q, HVect *k)
unsigned lo, hi, neg[4], par = 0;
double all, big, two;
qa[0] = q.x; qa[1] = q.y; qa[2] = q.z; qa[3] = q.w;
for (i=0; i<4; i++) {
for (int i = 0; i < 4; ++i) {
pa[i] = 0.0;
if ((neg[i] = (qa[i]<0.0)) != 0) qa[i] = -qa[i];
par ^= neg[i];

View File

@ -1,4 +1,4 @@
/* Copyright (C) 2013 Wildfire Games.
/* Copyright (C) 2018 Wildfire Games.
* This file is part of 0 A.D.
*
* 0 A.D. is free software: you can redistribute it and/or modify
@ -244,12 +244,12 @@ public:
// Iterate over all curves to find the earliest and latest keys
const FCDAnimated* anim = transform->GetAnimated();
const FCDAnimationCurveListList& curvesList = anim->GetCurves();
for (size_t j = 0; j < curvesList.size(); ++j)
for (size_t k = 0; k < curvesList.size(); ++k)
{
const FCDAnimationCurveTrackList& curves = curvesList[j];
for (size_t k = 0; k < curves.size(); ++k)
const FCDAnimationCurveTrackList& curves = curvesList[k];
for (size_t l = 0; l < curves.size(); ++l)
{
const FCDAnimationCurve* curve = curves[k];
const FCDAnimationCurve* curve = curves[l];
timeStart = std::min(timeStart, curve->GetKeys()[0]->input);
timeEnd = std::max(timeEnd, curve->GetKeys()[curve->GetKeyCount()-1]->input);
}

View File

@ -1,4 +1,4 @@
/* Copyright (C) 2012 Wildfire Games.
/* Copyright (C) 2018 Wildfire Games.
* This file is part of 0 A.D.
*
* 0 A.D. is free software: you can redistribute it and/or modify
@ -128,7 +128,7 @@ public:
SColor4ub EvaluateTerrainDiffuseFactor(const CVector3D& normal) const
{
float dot = -normal.Dot(m_SunDir);
int c = clamp((int)(dot * 255), 0, 255);
u8 c = clamp(static_cast<u8>(dot * 255), static_cast<u8>(0), static_cast<u8>(255));
return SColor4ub(c, c, c, 255);
}

View File

@ -1,4 +1,4 @@
/* Copyright (C) 2015 Wildfire Games.
/* Copyright (C) 2018 Wildfire Games.
* This file is part of 0 A.D.
*
* 0 A.D. is free software: you can redistribute it and/or modify
@ -78,16 +78,12 @@ CMaterial CMaterialManager::LoadMaterial(const VfsPath& pathname)
#undef AT
#undef EL
CMaterial material;
XMBElement root = xeroFile.GetRoot();
CPreprocessorWrapper preprocessor;
preprocessor.AddDefine("CFG_FORCE_ALPHATEST", g_Renderer.m_Options.m_ForceAlphaTest ? "1" : "0");
CMaterial material;
material.AddStaticUniform("qualityLevel", CVector4D(qualityLevel, 0, 0, 0));
CVector4D vec(qualityLevel,0,0,0);
material.AddStaticUniform("qualityLevel", vec);
XMBElement root = xeroFile.GetRoot();
XERO_ITER_EL(root, node)
{
int token = node.GetNodeName();

View File

@ -1,4 +1,4 @@
/* Copyright (C) 2016 Wildfire Games.
/* Copyright (C) 2018 Wildfire Games.
* This file is part of 0 A.D.
*
* 0 A.D. is free software: you can redistribute it and/or modify
@ -368,20 +368,17 @@ std::vector<u8> CObjectBase::CalculateVariationKey(const std::vector<std::set<CS
}
choices.push_back(match);
// Remember which props were chosen, so we can call CalculateVariationKey on them
// at the end.
Variant& var ((*grp)[match]);
for (std::vector<Prop>::iterator it = var.m_Props.begin(); it != var.m_Props.end(); ++it)
{
// Erase all existing props which are overridden by this variant:
for (std::vector<Prop>::iterator it = var.m_Props.begin(); it != var.m_Props.end(); ++it)
chosenProps.erase(it->m_PropPointName);
// and then insert the new ones:
for (std::vector<Prop>::iterator it = var.m_Props.begin(); it != var.m_Props.end(); ++it)
if (! it->m_ModelName.empty())
chosenProps.insert(make_pair(it->m_PropPointName, it->m_ModelName));
}
// Erase all existing props which are overridden by this variant:
Variant& var((*grp)[match]);
for (const Prop& prop : var.m_Props)
chosenProps.erase(prop.m_PropPointName);
// and then insert the new ones:
for (const Prop& prop : var.m_Props)
if (!prop.m_ModelName.empty())
chosenProps.insert(make_pair(prop.m_PropPointName, prop.m_ModelName));
}
// Load each prop, and add their CalculateVariationKey to our key:
@ -586,16 +583,13 @@ std::set<CStr> CObjectBase::CalculateRandomRemainingSelections(rng_t& rng, const
// Remember which props were chosen, so we can call CalculateRandomVariation on them
// at the end.
Variant& var ((*grp)[match]);
for (std::vector<Prop>::iterator it = var.m_Props.begin(); it != var.m_Props.end(); ++it)
{
// Erase all existing props which are overridden by this variant:
for (std::vector<Prop>::iterator it = var.m_Props.begin(); it != var.m_Props.end(); ++it)
chosenProps.erase(it->m_PropPointName);
// and then insert the new ones:
for (std::vector<Prop>::iterator it = var.m_Props.begin(); it != var.m_Props.end(); ++it)
if (! it->m_ModelName.empty())
chosenProps.insert(make_pair(it->m_PropPointName, it->m_ModelName));
}
// Erase all existing props which are overridden by this variant:
for (const Prop& prop : var.m_Props)
chosenProps.erase(prop.m_PropPointName);
// and then insert the new ones:
for (const Prop& prop : var.m_Props)
if (!prop.m_ModelName.empty())
chosenProps.insert(make_pair(prop.m_PropPointName, prop.m_ModelName));
}
// Load each prop, and add their required selections to ours:

View File

@ -1,4 +1,4 @@
/* Copyright (C) 2015 Wildfire Games.
/* Copyright (C) 2018 Wildfire Games.
* This file is part of 0 A.D.
*
* 0 A.D. is free software: you can redistribute it and/or modify
@ -91,15 +91,15 @@ CTerrainTextureEntry::CTerrainTextureEntry(CTerrainPropertiesPtr properties, con
ENSURE(textures_element.GetNodeName() == el_texture);
CStr name;
VfsPath path;
XERO_ITER_ATTR(textures_element, se)
VfsPath terrainTexturePath;
XERO_ITER_ATTR(textures_element, relativePath)
{
if (se.Name == at_file)
path = VfsPath("art/textures/terrain") / se.Value.FromUTF8();
else if (se.Name == at_name)
name = se.Value;
if (relativePath.Name == at_file)
terrainTexturePath = VfsPath("art/textures/terrain") / relativePath.Value.FromUTF8();
else if (relativePath.Name == at_name)
name = relativePath.Value;
}
samplers.emplace_back(name, path);
samplers.emplace_back(name, terrainTexturePath);
}
}

View File

@ -1,4 +1,4 @@
/* Copyright (C) 2012 Wildfire Games.
/* Copyright (C) 2018 Wildfire Games.
* This file is part of 0 A.D.
*
* 0 A.D. is free software: you can redistribute it and/or modify
@ -300,12 +300,12 @@ void CTextRenderer::Render()
vertexes[idx*4+3].x = g->x1 + x;
vertexes[idx*4+3].y = g->y1 + y;
indexes[idx*6+0] = idx*4+0;
indexes[idx*6+1] = idx*4+1;
indexes[idx*6+2] = idx*4+2;
indexes[idx*6+3] = idx*4+2;
indexes[idx*6+4] = idx*4+3;
indexes[idx*6+5] = idx*4+0;
indexes[idx*6+0] = static_cast<u16>(idx*4+0);
indexes[idx*6+1] = static_cast<u16>(idx*4+1);
indexes[idx*6+2] = static_cast<u16>(idx*4+2);
indexes[idx*6+3] = static_cast<u16>(idx*4+2);
indexes[idx*6+4] = static_cast<u16>(idx*4+3);
indexes[idx*6+5] = static_cast<u16>(idx*4+0);
x += g->xadvance;

View File

@ -1,4 +1,4 @@
/* Copyright (C) 2015 Wildfire Games.
/* Copyright (C) 2018 Wildfire Games.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
@ -80,8 +80,8 @@ static const size_t cacheLineSize = 64; // (L2)
// MMU pages
//
static const size_t pageSize = 0x1000; // 4 KB
static const size_t largePageSize = 0x200000; // 2 MB
static const size_t g_PageSize = 4 * 1024; // 4 KB
static const size_t g_LargePageSize = 2 * 1024 * 1024; // 2 MB
//

View File

@ -1,4 +1,4 @@
/* Copyright (C) 2011 Wildfire Games.
/* Copyright (C) 2018 Wildfire Games.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
@ -78,7 +78,7 @@ struct Allocator_VM
}
};
template<size_t commitSize = largePageSize, vm::PageType pageType = vm::kDefault, int prot = PROT_READ|PROT_WRITE>
template<size_t commitSize = g_LargePageSize, vm::PageType pageType = vm::kDefault, int prot = PROT_READ|PROT_WRITE>
struct Allocator_AddressSpace
{
void* allocate(size_t size)

View File

@ -1,4 +1,4 @@
/* Copyright (C) 2011 Wildfire Games.
/* Copyright (C) 2018 Wildfire Games.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
@ -39,7 +39,7 @@ namespace Allocators {
// Growth
// O(N) allocations, O(1) wasted space.
template<size_t increment = pageSize>
template<size_t increment = g_PageSize>
struct Growth_Linear
{
size_t operator()(size_t oldSize) const
@ -210,7 +210,7 @@ class Storage_Commit
NONCOPYABLE(Storage_Commit);
public:
Storage_Commit(size_t maxCapacity_)
: maxCapacity(Align<pageSize>(maxCapacity_)) // see Expand
: maxCapacity(Align<g_PageSize>(maxCapacity_)) // see Expand
, storage(allocator.allocate(maxCapacity))
, capacity(0)
{
@ -242,7 +242,7 @@ public:
// reduce the number of expensive commits by accurately
// reflecting the actual capacity. this is safe because
// we also round up maxCapacity.
newCapacity = Align<pageSize>(newCapacity);
newCapacity = Align<g_PageSize>(newCapacity);
if(newCapacity > maxCapacity)
return false;
if(!vm::Commit(Address()+capacity, newCapacity-capacity))
@ -269,7 +269,7 @@ class Storage_AutoCommit
NONCOPYABLE(Storage_AutoCommit);
public:
Storage_AutoCommit(size_t maxCapacity_)
: maxCapacity(Align<pageSize>(maxCapacity_)) // match user's expectation
: maxCapacity(Align<g_PageSize>(maxCapacity_)) // match user's expectation
, storage(allocator.allocate(maxCapacity))
{
vm::BeginOnDemandCommits();

View File

@ -1,4 +1,4 @@
/* Copyright (C) 2010 Wildfire Games.
/* Copyright (C) 2018 Wildfire Games.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
@ -45,7 +45,7 @@ static Status validate_da(DynArray* da)
// WARN_RETURN(ERR::_1);
// note: don't check if base is page-aligned -
// might not be true for 'wrapped' mem regions.
if(!IsAligned(max_size_pa, pageSize))
if(!IsAligned(max_size_pa, g_PageSize))
WARN_RETURN(ERR::_3);
if(cur_size > max_size_pa)
WARN_RETURN(ERR::_4);
@ -61,7 +61,7 @@ static Status validate_da(DynArray* da)
Status da_alloc(DynArray* da, size_t max_size)
{
ENSURE(max_size != 0);
const size_t max_size_pa = Align<pageSize>(max_size);
const size_t max_size_pa = Align<g_PageSize>(max_size);
u8* p = (u8*)vm::ReserveAddressSpace(max_size_pa);
if(!p)
@ -95,8 +95,8 @@ Status da_set_size(DynArray* da, size_t new_size)
CHECK_DA(da);
// determine how much to add/remove
const size_t cur_size_pa = Align<pageSize>(da->cur_size);
const size_t new_size_pa = Align<pageSize>(new_size);
const size_t cur_size_pa = Align<g_PageSize>(da->cur_size);
const size_t new_size_pa = Align<g_PageSize>(new_size);
const ssize_t size_delta_pa = (ssize_t)new_size_pa - (ssize_t)cur_size_pa;
// not enough memory to satisfy this expand request: abort.

View File

@ -1,4 +1,4 @@
/* Copyright (C) 2010 Wildfire Games.
/* Copyright (C) 2018 Wildfire Games.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
@ -94,7 +94,7 @@ Status mem_Protect(u8* p, size_t size, int prot)
void* page_aligned_alloc(size_t size)
{
const size_t alignedSize = Align<pageSize>(size);
const size_t alignedSize = Align<g_PageSize>(size);
u8* p = 0;
RETURN_0_IF_ERR(mem_Reserve(alignedSize, &p));
RETURN_0_IF_ERR(mem_Commit(p, alignedSize, PROT_READ|PROT_WRITE));
@ -106,7 +106,7 @@ void page_aligned_free(void* p, size_t size)
{
if(!p)
return;
ENSURE(IsAligned(p, pageSize));
const size_t alignedSize = Align<pageSize>(size);
ENSURE(IsAligned(p, g_PageSize));
const size_t alignedSize = Align<g_PageSize>(size);
(void)mem_Release((u8*)p, alignedSize);
}

View File

@ -1,4 +1,4 @@
/* Copyright (C) 2010 Wildfire Games.
/* Copyright (C) 2018 Wildfire Games.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
@ -29,6 +29,19 @@
#if MSC_VERSION
# pragma warning(disable:4710) // function not inlined
#if _MSC_VER > 1800
# pragma warning(disable:4626) // assignment operator was implicitly defined as deleted because a base class assignment operator is inaccessible or deleted
# pragma warning(disable:4625) // copy constructor was implicitly defined as deleted
# pragma warning(disable:4668) // 'symbol' is not defined as a preprocessor macro, replacing with '0' for 'directives'
# pragma warning(disable:5027) // 'type': move assignment operator was implicitly defined as deleted
# pragma warning(disable:4365) // signed/unsigned mismatch
# pragma warning(disable:4619) // there is no warning for 'warning'
# pragma warning(disable:5031) // #pragma warning(pop): likely mismatch, popping warning state pushed in different file
# pragma warning(disable:5026) // 'type': move constructor was implicitly defined as deleted
# pragma warning(disable:4820) // incorrect padding
# pragma warning(disable:4514) // unreferenced inlined function has been removed
# pragma warning(disable:4571) // Informational: catch(...) semantics changed since Visual C++ 7.1; structured exceptions (SEH) are no longer caught
#endif
#endif
#if ICC_VERSION
# pragma warning(push)

View File

@ -1,4 +1,4 @@
/* Copyright (C) 2010 Wildfire Games.
/* Copyright (C) 2018 Wildfire Games.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
@ -46,7 +46,7 @@ class File
{
public:
File()
: pathname(), fd(-1)
: m_PathName(), m_FileDescriptor(-1)
{
}
@ -60,40 +60,40 @@ public:
Close();
}
Status Open(const OsPath& pathname, int oflag)
Status Open(const OsPath& pathName, int openFlag)
{
Status ret = FileOpen(pathname, oflag);
Status ret = FileOpen(pathName, openFlag);
RETURN_STATUS_IF_ERR(ret);
this->pathname = pathname;
this->fd = (int)ret;
this->oflag = oflag;
m_PathName = pathName;
m_FileDescriptor = static_cast<int>(ret);
m_OpenFlag = openFlag;
return INFO::OK;
}
void Close()
{
FileClose(fd);
FileClose(m_FileDescriptor);
}
const OsPath& Pathname() const
{
return pathname;
return m_PathName;
}
int Descriptor() const
{
return fd;
return m_FileDescriptor;
}
int Flags() const
{
return oflag;
return m_OpenFlag;
}
private:
OsPath pathname;
int fd;
int oflag;
OsPath m_PathName;
int m_FileDescriptor;
int m_OpenFlag;
};
typedef shared_ptr<File> PFile;

View File

@ -1,4 +1,4 @@
/* Copyright (C) 2017 Wildfire Games.
/* Copyright (C) 2018 Wildfire Games.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
@ -64,31 +64,31 @@ static inline UniqueRange Allocate(size_t size, size_t alignment = maxSectorSize
// but also applies to synchronous I/O and has shorter/nicer names.)
struct Operation
{
// @param buf can be 0, in which case temporary block buffers are allocated.
// @param m_Buffer can be 0, in which case temporary block buffers are allocated.
// otherwise, it must be aligned and padded to the I/O alignment, e.g. via
// io::Allocate.
Operation(const File& file, void* buf, off_t size, off_t offset = 0)
: fd(file.Descriptor()), opcode((file.Flags() & O_WRONLY)? LIO_WRITE : LIO_READ)
, offset(offset), size(size), buf((void*)buf)
: m_FileDescriptor(file.Descriptor()), m_OpenFlag((file.Flags() & O_WRONLY)? LIO_WRITE : LIO_READ)
, m_Offset(offset), m_Size(size), m_Buffer(buf)
{
}
void Validate() const
{
ENSURE(fd >= 0);
ENSURE(opcode == LIO_READ || opcode == LIO_WRITE);
ENSURE(m_FileDescriptor >= 0);
ENSURE(m_OpenFlag == LIO_READ || m_OpenFlag == LIO_WRITE);
ENSURE(offset >= 0);
ENSURE(size >= 0);
// buf can legitimately be 0 (see above)
ENSURE(m_Offset >= 0);
ENSURE(m_Size >= 0);
// m_Buffer can legitimately be 0 (see above)
}
int fd;
int opcode;
int m_FileDescriptor;
int m_OpenFlag;
off_t offset;
off_t size;
void* buf;
off_t m_Offset;
off_t m_Size;
void* m_Buffer;
};
@ -123,14 +123,14 @@ struct Parameters
if(blockSize != 0)
{
ENSURE(is_pow2(blockSize));
ENSURE(pageSize <= blockSize); // (don't bother checking an upper bound)
ENSURE(g_PageSize <= blockSize); // (don't bother checking an upper bound)
}
ENSURE(1 <= queueDepth && queueDepth <= maxQueueDepth);
ENSURE(IsAligned(op.offset, alignment));
ENSURE(IsAligned(op.m_Offset, alignment));
// op.size doesn't need to be aligned
ENSURE(IsAligned(op.buf, alignment));
ENSURE(IsAligned(op.m_Buffer, alignment));
}
// (ATTO only allows 10, which improves upon 8)
@ -186,18 +186,18 @@ public:
ControlBlockRingBuffer(const Operation& op, const Parameters& p)
: controlBlocks() // zero-initialize
{
const size_t blockSize = p.blockSize? p.blockSize : (size_t)op.size;
const size_t blockSize = p.blockSize? p.blockSize : static_cast<size_t>(op.m_Size);
const bool temporaryBuffersRequested = (op.buf == 0);
const bool temporaryBuffersRequested = (op.m_Buffer == 0);
if(temporaryBuffersRequested)
buffers = io::Allocate(blockSize * p.queueDepth, p.alignment);
for(size_t i = 0; i < ARRAY_SIZE(controlBlocks); i++)
{
aiocb& cb = operator[](i);
cb.aio_fildes = op.fd;
cb.aio_fildes = op.m_FileDescriptor;
cb.aio_nbytes = blockSize;
cb.aio_lio_opcode = op.opcode;
cb.aio_lio_opcode = op.m_OpenFlag;
if(temporaryBuffersRequested)
cb.aio_buf = (volatile void*)(uintptr_t(buffers.get()) + i * blockSize);
}
@ -242,17 +242,17 @@ static inline Status Run(const Operation& op, const Parameters& p = Parameters()
COMPILER_FENCE;
#endif
const off_t numBlocks = p.blockSize? (off_t)DivideRoundUp((u64)op.size, (u64)p.blockSize) : 1;
for(off_t blocksIssued = 0, blocksCompleted = 0; blocksCompleted < numBlocks; blocksCompleted++)
size_t numBlocks = p.blockSize? DivideRoundUp(static_cast<size_t>(op.m_Size), p.blockSize) : 1;
for(size_t blocksIssued = 0, blocksCompleted = 0; blocksCompleted < numBlocks; blocksCompleted++)
{
for(; blocksIssued != numBlocks && blocksIssued < blocksCompleted + (off_t)p.queueDepth; blocksIssued++)
{
aiocb& cb = controlBlockRingBuffer[blocksIssued];
cb.aio_offset = op.offset + blocksIssued * p.blockSize;
if(op.buf)
cb.aio_buf = (volatile void*)(uintptr_t(op.buf) + blocksIssued * p.blockSize);
cb.aio_offset = op.m_Offset + blocksIssued * p.blockSize;
if(op.m_Buffer)
cb.aio_buf = (volatile void*)(uintptr_t(op.m_Buffer) + blocksIssued * p.blockSize);
if(blocksIssued == numBlocks-1)
cb.aio_nbytes = round_up(size_t(op.size - blocksIssued * p.blockSize), size_t(p.alignment));
cb.aio_nbytes = round_up(size_t(op.m_Size - blocksIssued * p.blockSize), size_t(p.alignment));
RETURN_STATUS_FROM_CALLBACK(issueHook(cb));
@ -268,7 +268,7 @@ static inline Status Run(const Operation& op, const Parameters& p = Parameters()
#if ENABLE_IO_STATS
COMPILER_FENCE;
const double t1 = timer_Time();
const off_t totalSize = p.blockSize? numBlocks*p.blockSize : op.size;
const off_t totalSize = p.blockSize? numBlocks*p.blockSize : op.m_Size;
debug_printf("IO: %.2f MB/s (%.2f)\n", totalSize/(t1-t0)/1e6, (t1-t0)*1e3);
#endif
@ -305,7 +305,7 @@ static inline Status Store(const OsPath& pathname, const void* data, size_t size
io::Operation op(file, (void*)data, size);
#if OS_WIN
(void)waio_Preallocate(op.fd, (off_t)size);
UNUSED2(waio_Preallocate(op.m_FileDescriptor, (off_t)size));
#endif
RETURN_STATUS_IF_ERR(io::Run(op, p, completedHook, issueHook));

View File

@ -1,4 +1,4 @@
/* Copyright (C) 2010 Wildfire Games.
/* Copyright (C) 2018 Wildfire Games.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
@ -33,7 +33,7 @@ static const size_t BLOCK_SIZE = 512*KiB;
WriteBuffer::WriteBuffer()
: m_capacity(pageSize), m_data((u8*)rtl_AllocateAligned(m_capacity, maxSectorSize), AlignedDeleter()), m_size(0)
: m_capacity(g_PageSize), m_data((u8*)rtl_AllocateAligned(m_capacity, maxSectorSize), AlignedDeleter()), m_size(0)
{
}

View File

@ -1,4 +1,4 @@
/* Copyright (C) 2013 Wildfire Games.
/* Copyright (C) 2018 Wildfire Games.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
@ -24,7 +24,7 @@
#include "lib/frequency_filter.h"
static const double errorTolerance = 0.25;
static const double sensitivity = 0.10;
static const double g_Sensitivity = 0.10;
static const double sampleTime = 2.0; // seconds
/**
@ -192,7 +192,7 @@ class FrequencyFilter : public IFrequencyFilter
NONCOPYABLE(FrequencyFilter);
public:
FrequencyFilter(double resolution, double expectedFrequency)
: m_frequencyEstimator(resolution), m_controller(expectedFrequency), m_iirFilter(sensitivity, expectedFrequency)
: m_frequencyEstimator(resolution), m_controller(expectedFrequency), m_iirFilter(g_Sensitivity, expectedFrequency)
, m_stableFrequency((int)expectedFrequency), m_smoothedFrequency(expectedFrequency), m_averagedFrequency(expectedFrequency)
, m_numberOfSamples((int)(sampleTime * expectedFrequency) + 1)
{

View File

@ -1,4 +1,4 @@
/* Copyright (C) 2015 Wildfire Games.
/* Copyright (C) 2018 Wildfire Games.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
@ -33,7 +33,10 @@
# pragma warning(disable:4103) // alignment changed after including header (boost has #pragma pack/pop in separate headers)
# pragma warning(disable:4127) // conditional expression is constant; rationale: see STMT in lib.h.
# pragma warning(disable:4324) // structure was padded due to __declspec(align())
# pragma warning(disable:4574) // macro is defined to be 0
#if MSC_VERSION <= 140
# pragma warning(disable:4351) // yes, default init of array entries is desired
#endif
# pragma warning(disable:4355) // 'this' used in base member initializer list
# pragma warning(disable:4512) // assignment operator could not be generated
# pragma warning(disable:4718) // recursive call has no side effects, deleting
@ -61,7 +64,9 @@
# pragma warning(default:4545 4546 4547 4549) // ill-formed comma expressions; exclude 4548 since _SECURE_SCL triggers it frequently
# pragma warning(default:4557) // __assume contains effect
//# pragma warning(default:4710) // function not inlined (often happens in STL)
#if MSC_VERSION <= 140
# pragma warning(default:4836) // local types or unnamed types cannot be used as template arguments
#endif
# pragma warning(default:4905) // wide string literal cast to LPSTR
# pragma warning(default:4906) // string literal cast to LPWSTR
# pragma warning(default:4928) // illegal copy-initialization; more than one user-defined conversion has been implicitly applied

View File

@ -1,4 +1,4 @@
/* Copyright (C) 2011 Wildfire Games.
/* Copyright (C) 2018 Wildfire Games.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
@ -42,17 +42,17 @@ static void AddCache(const x86_x64::Cache& cache)
{
ENSURE(cache.Validate());
if(cache.type == x86_x64::Cache::kData || cache.type == x86_x64::Cache::kUnified)
caches[L1D + cache.level-1] = cache;
if(cache.type == x86_x64::Cache::kInstruction || cache.type == x86_x64::Cache::kUnified)
caches[L1I + cache.level-1] = cache;
if(cache.m_Type == x86_x64::Cache::kData || cache.m_Type == x86_x64::Cache::kUnified)
caches[L1D + cache.m_Level-1] = cache;
if(cache.m_Type == x86_x64::Cache::kInstruction || cache.m_Type == x86_x64::Cache::kUnified)
caches[L1I + cache.m_Level-1] = cache;
}
static void AddTLB(const x86_x64::Cache& tlb)
{
ENSURE(tlb.Validate());
ENSURE(tlb.level == 1 || tlb.level == 2); // see maxTLBs
ENSURE(tlb.m_Level == 1 || tlb.m_Level == 2); // see maxTLBs
ENSURE(numTLBs < maxTLBs);
caches[TLB+numTLBs++] = tlb;
@ -77,10 +77,10 @@ static x86_x64::Cache L1Cache(u32 reg, x86_x64::Cache::Type type)
const size_t totalSize = bits(reg, 24, 31)*KiB;
if(lineSize != 0 && associativity != 0 && totalSize != 0)
{
cache.numEntries = totalSize / lineSize;
cache.entrySize = lineSize;
cache.associativity = associativity;
cache.sharedBy = 1;
cache.m_NumEntries = totalSize / lineSize;
cache.m_EntrySize = lineSize;
cache.m_Associativity = associativity;
cache.m_SharedBy = 1;
}
return cache;
}
@ -102,10 +102,10 @@ static x86_x64::Cache L2Cache(u32 reg, x86_x64::Cache::Type type)
const size_t totalSize = bits(reg, 16, 31)*KiB;
if(lineSize != 0 && idxAssociativity != 0 && totalSize != 0)
{
cache.numEntries = totalSize / lineSize;
cache.entrySize = lineSize;
cache.associativity = associativityTable[idxAssociativity];
cache.sharedBy = 1;
cache.m_NumEntries = totalSize / lineSize;
cache.m_EntrySize = lineSize;
cache.m_Associativity = associativityTable[idxAssociativity];
cache.m_SharedBy = 1;
}
return cache;
}
@ -122,10 +122,10 @@ static x86_x64::Cache L3Cache(u32 reg, x86_x64::Cache::Type type)
// NB: some Athlon 64 X2 models have no L3 cache
if(lineSize != 0 && idxAssociativity != 0 && totalSize != 0)
{
cache.numEntries = totalSize / lineSize;
cache.entrySize = lineSize;
cache.associativity = associativityTable[idxAssociativity];
cache.sharedBy = 1;
cache.m_NumEntries = totalSize / lineSize;
cache.m_EntrySize = lineSize;
cache.m_Associativity = associativityTable[idxAssociativity];
cache.m_SharedBy = 1;
}
return cache;
}
@ -139,10 +139,10 @@ static x86_x64::Cache TLB1(u32 reg, size_t bitOffset, size_t pageSize, x86_x64::
const size_t associativity = bits(reg, bitOffset+8, bitOffset+15); // 0 = reserved
if(numEntries != 0 && associativity != 0)
{
cache.numEntries = numEntries;
cache.entrySize = pageSize;
cache.associativity = associativity;
cache.sharedBy = 1;
cache.m_NumEntries = numEntries;
cache.m_EntrySize = pageSize;
cache.m_Associativity = associativity;
cache.m_SharedBy = 1;
}
return cache;
}
@ -156,10 +156,10 @@ static x86_x64::Cache TLB2(u32 reg, size_t bitOffset, size_t pageSize, x86_x64::
const size_t idxAssociativity = bits(reg, bitOffset+12, bitOffset+15); // 0 = disabled
if(numEntries != 0 && idxAssociativity != 0)
{
cache.numEntries = numEntries;
cache.entrySize = pageSize;
cache.associativity = associativityTable[idxAssociativity];
cache.sharedBy = 1;
cache.m_NumEntries = numEntries;
cache.m_EntrySize = pageSize;
cache.m_Associativity = associativityTable[idxAssociativity];
cache.m_SharedBy = 1;
}
return cache;
}
@ -233,10 +233,10 @@ static bool DetectCache()
x86_x64::Cache cache;
cache.Initialize(level, type);
cache.entrySize = (size_t)bits(regs.ebx, 0, 11)+1; // (yes, this also uses +1 encoding)
cache.associativity = (size_t)bits(regs.ebx, 22, 31)+1;
cache.sharedBy = (size_t)bits(regs.eax, 14, 25)+1;
cache.numEntries = cache.associativity * partitions * sets;
cache.m_EntrySize = static_cast<size_t>(bits(regs.ebx, 0, 11) + 1); // (yes, this also uses +1 encoding)
cache.m_Associativity = static_cast<size_t>(bits(regs.ebx, 22, 31) + 1);
cache.m_SharedBy = static_cast<size_t>(bits(regs.eax, 14, 25) + 1);
cache.m_NumEntries = cache.m_Associativity * partitions * sets;
AddCache(cache);
}
@ -596,10 +596,10 @@ static void DetectCacheAndTLB(size_t& descriptorFlags)
x86_x64::Cache cache;
cache.Initialize(characteristics->Level(), characteristics->Type());
cache.numEntries = characteristics->NumEntries();
cache.entrySize = characteristics->EntrySize();
cache.associativity = characteristics->associativity;
cache.sharedBy = 1; // (safe default)
cache.m_NumEntries = characteristics->NumEntries();
cache.m_EntrySize = characteristics->EntrySize();
cache.m_Associativity = characteristics->associativity;
cache.m_SharedBy = 1; // (safe default)
if(characteristics->IsTLB())
AddTLB(cache);
else
@ -632,12 +632,12 @@ static Status DetectCacheAndTLB()
// sanity checks
for(size_t idxLevel = 0; idxLevel < x86_x64::Cache::maxLevels; idxLevel++)
{
ENSURE(caches[L1D+idxLevel].type == x86_x64::Cache::kData || caches[L1D+idxLevel].type == x86_x64::Cache::kUnified);
ENSURE(caches[L1D+idxLevel].level == idxLevel+1);
ENSURE(caches[L1D+idxLevel].m_Type == x86_x64::Cache::kData || caches[L1D+idxLevel].m_Type == x86_x64::Cache::kUnified);
ENSURE(caches[L1D+idxLevel].m_Level == idxLevel+1);
ENSURE(caches[L1D+idxLevel].Validate() == true);
ENSURE(caches[L1I+idxLevel].type == x86_x64::Cache::kInstruction || caches[L1I+idxLevel].type == x86_x64::Cache::kUnified);
ENSURE(caches[L1I+idxLevel].level == idxLevel+1);
ENSURE(caches[L1I+idxLevel].m_Type == x86_x64::Cache::kInstruction || caches[L1I+idxLevel].m_Type == x86_x64::Cache::kUnified);
ENSURE(caches[L1I+idxLevel].m_Level == idxLevel+1);
ENSURE(caches[L1I+idxLevel].Validate() == true);
}
for(size_t i = 0; i < numTLBs; i++)

View File

@ -1,4 +1,4 @@
/* Copyright (C) 2013 Wildfire Games.
/* Copyright (C) 2018 Wildfire Games.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
@ -44,69 +44,69 @@ struct Cache // POD (may be used before static constructors)
/**
* 1..maxLevels
**/
size_t level;
size_t m_Level;
/**
* never kNull
**/
Type type;
Type m_Type;
/**
* if 0, the cache is disabled and all other values are zero
**/
size_t numEntries;
size_t m_NumEntries;
/**
* NB: cache entries are lines, TLB entries are pages
**/
size_t entrySize;
size_t m_EntrySize;
/**
* = fullyAssociative or the actual ways of associativity
* = fullyAssociative or the actual ways of m_Associativity
**/
size_t associativity;
size_t m_Associativity;
/**
* how many logical processors share this cache?
**/
size_t sharedBy;
size_t m_SharedBy;
void Initialize(size_t level, Type type)
{
this->level = level;
this->type = type;
numEntries = 0;
entrySize = 0;
associativity = 0;
sharedBy = 0;
m_Level = level;
m_Type = type;
m_NumEntries = 0;
m_EntrySize = 0;
m_Associativity = 0;
m_SharedBy = 0;
ENSURE(Validate());
}
bool Validate() const
{
if(!(1 <= level && level <= maxLevels))
if(!(1 <= m_Level && m_Level <= maxLevels))
return false;
if(type == kNull)
if(m_Type == kNull)
return false;
if(numEntries == 0) // disabled
if(m_NumEntries == 0) // disabled
{
if(entrySize != 0)
if(m_EntrySize != 0)
return false;
if(associativity != 0)
if(m_Associativity != 0)
return false;
if(sharedBy != 0)
if(m_SharedBy != 0)
return false;
}
else
{
if(entrySize == 0)
if(m_EntrySize == 0)
return false;
if(associativity == 0 || associativity > fullyAssociative)
if(m_Associativity == 0 || m_Associativity > fullyAssociative)
return false;
if(sharedBy == 0)
if(m_SharedBy == 0)
return false;
}
@ -115,7 +115,7 @@ struct Cache // POD (may be used before static constructors)
u64 TotalSize() const
{
return u64(numEntries)*entrySize;
return u64(m_NumEntries)*m_EntrySize;
}
};
@ -135,7 +135,7 @@ enum IdxCache
/**
* @return 0 if idxCache >= TLB+numTLBs, otherwise a valid pointer to
* a Cache whose numEntries is 0 if disabled / not present.
* a Cache whose m_NumEntries is 0 if disabled / not present.
**/
LIB_API const Cache* Caches(size_t idxCache);

View File

@ -1,4 +1,4 @@
/* Copyright (C) 2011 Wildfire Games.
/* Copyright (C) 2018 Wildfire Games.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
@ -113,7 +113,7 @@ static size_t MaxLogicalPerCore()
static size_t MaxLogicalPerCache()
{
return x86_x64::Caches(x86_x64::L2D)->sharedBy;
return x86_x64::Caches(x86_x64::L2D)->m_SharedBy;
}

View File

@ -1,4 +1,4 @@
/* Copyright (C) 2011 Wildfire Games.
/* Copyright (C) 2018 Wildfire Games.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
@ -218,8 +218,8 @@ Vendors Vendor()
//-----------------------------------------------------------------------------
// signature
static size_t model;
static size_t family;
static size_t m_Model;
static size_t m_Family;
static ModuleInitState signatureInitState;
static Status InitSignature()
@ -228,27 +228,27 @@ static Status InitSignature()
regs.eax = 1;
if(!cpuid(&regs))
DEBUG_WARN_ERR(ERR::CPU_FEATURE_MISSING);
model = bits(regs.eax, 4, 7);
family = bits(regs.eax, 8, 11);
m_Model = bits(regs.eax, 4, 7);
m_Family = bits(regs.eax, 8, 11);
const size_t extendedModel = bits(regs.eax, 16, 19);
const size_t extendedFamily = bits(regs.eax, 20, 27);
if(family == 0xF)
family += extendedFamily;
if(family == 0xF || (Vendor() == x86_x64::VENDOR_INTEL && family == 6))
model += extendedModel << 4;
if(m_Family == 0xF)
m_Family += extendedFamily;
if(m_Family == 0xF || (Vendor() == x86_x64::VENDOR_INTEL && m_Family == 6))
m_Model += extendedModel << 4;
return INFO::OK;
}
size_t Model()
{
ModuleInit(&signatureInitState, InitSignature);
return model;
return m_Model;
}
size_t Family()
{
ModuleInit(&signatureInitState, InitSignature);
return family;
return m_Family;
}

View File

@ -1,4 +1,4 @@
/* Copyright (C) 2012 Wildfire Games.
/* Copyright (C) 2018 Wildfire Games.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
@ -111,7 +111,7 @@ Status sys_cursor_set(sys_cursor cursor)
{
// restore default cursor.
if(!cursor)
cursor = cursor_from_HCURSOR(LoadCursor(0, MAKEINTRESOURCE(IDC_ARROW)));
cursor = cursor_from_HCURSOR(LoadCursor(0, IDC_ARROW));
(void)SetCursor(HCURSOR_from_cursor(cursor));
// return value (previous cursor) is useless.

View File

@ -1,4 +1,4 @@
/* Copyright (C) 2010 Wildfire Games.
/* Copyright (C) 2018 Wildfire Games.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
@ -27,6 +27,12 @@
#ifndef INCLUDED_WDBG_HEAP
#define INCLUDED_WDBG_HEAP
#ifdef _MSC_VER
#if _MSC_VER > 1800
# pragma warning(disable:4091) // hides previous local declaration
#endif
#endif
// this module provides a more convenient interface to the MS CRT's
// debug heap checks. it also hooks into allocations to record the
// caller/owner information without requiring macros (which break code

View File

@ -1,4 +1,4 @@
/* Copyright (C) 2017 Wildfire Games.
/* Copyright (C) 2018 Wildfire Games.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
@ -1778,9 +1778,8 @@ void wdbg_sym_WriteMinidump(EXCEPTION_POINTERS* exception_pointers)
// (UserStreamParam), since we will need to generate a plain text file on
// non-Windows platforms. users will just have to send us both files.
HANDLE hProcess = GetCurrentProcess();
DWORD pid = GetCurrentProcessId();
if(!pMiniDumpWriteDump || !pMiniDumpWriteDump(hProcess, pid, hFile, MiniDumpNormal, &mei, 0, 0))
if(!pMiniDumpWriteDump || !pMiniDumpWriteDump(GetCurrentProcess(), pid, hFile, MiniDumpNormal, &mei, 0, 0))
DEBUG_DISPLAY_ERROR(L"wdbg_sym_WriteMinidump: unable to generate minidump.");
CloseHandle(hFile);

View File

@ -1,4 +1,4 @@
/* Copyright (C) 2010 Wildfire Games.
/* Copyright (C) 2018 Wildfire Games.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
@ -27,6 +27,12 @@
#ifndef INCLUDED_WDBG_SYM
#define INCLUDED_WDBG_SYM
#ifdef _MSC_VER
#if _MSC_VER > 1800
# pragma warning(disable:4091) // hides previous local declaration
#endif
#endif
#include "lib/sysdep/os/win/win.h" // CONTEXT, EXCEPTION_POINTERS
struct _tagSTACKFRAME64;

View File

@ -1,4 +1,4 @@
/* Copyright (C) 2010 Wildfire Games.
/* Copyright (C) 2018 Wildfire Games.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
@ -198,9 +198,9 @@ static void UpdateTimerState()
// reads of the state variables consistent, done by latching them all and
// retrying if an update came in the middle of this.
const u64 counter = Counter();
const u64 deltaTicks = CounterDelta(ts->counter, counter);
ts2->counter = counter;
const u64 currentCounter = Counter();
const u64 deltaTicks = CounterDelta(ts->counter, currentCounter);
ts2->counter = currentCounter;
ts2->time = ts->time + deltaTicks/nominalFrequency;
ts = (volatile TimerState*)InterlockedExchangePointer((volatile PVOID*)&ts2, (PVOID)ts);
}
@ -208,12 +208,8 @@ static void UpdateTimerState()
double whrt_Time()
{
// latch timer state (counter and time must be from the same update)
const volatile TimerState* state = ts;
const double time = state->time;
const u64 counter = state->counter;
const u64 deltaTicks = CounterDelta(counter, Counter());
return (time + deltaTicks/nominalFrequency);
const volatile TimerState* state = ts;
return (state->time + CounterDelta(state->counter, Counter()) / nominalFrequency);
}

View File

@ -1,4 +1,4 @@
/* Copyright (C) 2010 Wildfire Games.
/* Copyright (C) 2018 Wildfire Games.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
@ -25,6 +25,7 @@
#include "lib/bits.h" // PopulationCount
#include "lib/alignment.h"
#include "lib/lib.h"
#include "lib/timer.h"
#include "lib/module_init.h"
#include "lib/sysdep/vm.h"
@ -292,13 +293,13 @@ static Status InitTopology()
size_t numa_NumNodes()
{
(void)ModuleInit(&initState, InitTopology);
UNUSED2(ModuleInit(&initState, InitTopology));
return numNodes;
}
size_t numa_NodeFromProcessor(size_t processor)
{
(void)ModuleInit(&initState, InitTopology);
UNUSED2(ModuleInit(&initState, InitTopology));
ENSURE(processor < os_cpu_NumProcessors());
Node* node = FindNodeWithProcessor(processor);
ENSURE(node);
@ -307,14 +308,14 @@ size_t numa_NodeFromProcessor(size_t processor)
uintptr_t numa_ProcessorMaskFromNode(size_t node)
{
(void)ModuleInit(&initState, InitTopology);
UNUSED2(ModuleInit(&initState, InitTopology));
ENSURE(node < numNodes);
return nodes[node].processorMask;
}
static UCHAR NodeNumberFromNode(size_t node)
{
(void)ModuleInit(&initState, InitTopology);
UNUSED2(ModuleInit(&initState, InitTopology));
ENSURE(node < numa_NumNodes());
return nodes[node].nodeNumber;
}
@ -393,7 +394,7 @@ static double MeasureRelativeDistance()
maxTime = std::max(maxTime, elapsedTime);
}
(void)os_cpu_SetThreadAffinityMask(previousProcessorMask);
UNUSED2(os_cpu_SetThreadAffinityMask(previousProcessorMask));
vm::Free(mem, size);
@ -425,8 +426,8 @@ static Status InitRelativeDistance()
double numa_Factor()
{
static ModuleInitState initState;
(void)ModuleInit(&initState, InitRelativeDistance);
static ModuleInitState _initState;
UNUSED2(ModuleInit(&_initState, InitRelativeDistance));
return relativeDistance;
}
@ -455,8 +456,8 @@ static Status InitMemoryInterleaved()
bool numa_IsMemoryInterleaved()
{
static ModuleInitState initState;
(void)ModuleInit(&initState, InitMemoryInterleaved);
static ModuleInitState _initState;
UNUSED2(ModuleInit(&_initState, InitMemoryInterleaved));
return isMemoryInterleaved;
}

View File

@ -1,4 +1,4 @@
/* Copyright (C) 2011 Wildfire Games.
/* Copyright (C) 2018 Wildfire Games.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
@ -224,7 +224,7 @@ struct OvlAllocator // POD
}
// one 4 KiB page is enough for 64 OVERLAPPED per file (i.e. plenty).
static const size_t storageSize = pageSize;
static const size_t storageSize = g_PageSize;
void* storage;

View File

@ -1,4 +1,4 @@
/* Copyright (C) 2012 Wildfire Games.
/* Copyright (C) 2018 Wildfire Games.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
@ -27,6 +27,12 @@
#ifndef INCLUDED_WUTIL
#define INCLUDED_WUTIL
#ifdef _MSC_VER
#if _MSC_VER > 1800
# pragma warning(disable:4091) // hides previous local declaration
#endif
#endif
#include "lib/os_path.h"
#include "lib/sysdep/os/win/win.h"

View File

@ -1,4 +1,4 @@
/* Copyright (C) 2011 Wildfire Games.
/* Copyright (C) 2018 Wildfire Games.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
@ -174,7 +174,7 @@ static bool ShouldUseLargePages(size_t allocationSize, DWORD allocationType, Pag
// default: use a heuristic.
{
// internal fragmentation would be excessive.
if(allocationSize <= largePageSize/2)
if(allocationSize <= g_LargePageSize / 2)
return false;
// a previous attempt already took too long.
@ -278,36 +278,35 @@ CACHE_ALIGNED(struct AddressRangeDescriptor) // POD
return INFO::SKIPPED;
ENSURE(size != 0); // probably indicates a bug in caller
ENSURE((commitSize % largePageSize) == 0 || pageType == kSmall);
ENSURE((commitSize % g_LargePageSize) == 0 || pageType == kSmall);
ASSERT(pageType == kLarge || pageType == kSmall || pageType == kDefault);
ASSERT(prot == PROT_NONE || (prot & ~(PROT_READ|PROT_WRITE|PROT_EXEC)) == 0);
this->commitSize = commitSize;
this->pageType = pageType;
this->prot = prot;
alignment = (pageType == kSmall)? pageSize : largePageSize;
totalSize = round_up(size+alignment-1, alignment);
m_CommitSize = commitSize;
m_PageType = pageType;
m_Prot = prot;
m_Alignment = pageType == kSmall ? g_PageSize : g_LargePageSize;
m_TotalSize = round_up(size + m_Alignment - 1, m_Alignment);
// NB: it is meaningless to ask for large pages when reserving
// (see ShouldUseLargePages). pageType only affects subsequent commits.
base = (intptr_t)AllocateLargeOrSmallPages(0, totalSize, MEM_RESERVE);
base = (intptr_t)AllocateLargeOrSmallPages(0, m_TotalSize, MEM_RESERVE);
if(!base)
{
debug_printf("AllocateLargeOrSmallPages of %lld failed\n", (u64)totalSize);
debug_printf("AllocateLargeOrSmallPages of %lld failed\n", (u64)m_TotalSize);
DEBUG_DISPLAY_ERROR(ErrorString());
return ERR::NO_MEM; // NOWARN (error string is more helpful)
}
alignedBase = round_up(uintptr_t(base), alignment);
alignedEnd = alignedBase + round_up(size, alignment);
alignedBase = round_up(uintptr_t(base), m_Alignment);
alignedEnd = alignedBase + round_up(size, m_Alignment);
return INFO::OK;
}
void Free()
{
vm::Free((void*)base, totalSize);
alignment = alignedBase = alignedEnd = 0;
totalSize = 0;
vm::Free((void*)base, m_TotalSize);
m_Alignment = alignedBase = alignedEnd = 0;
m_TotalSize = 0;
COMPILER_FENCE;
base = 0; // release descriptor for subsequent reuse
}
@ -324,15 +323,15 @@ CACHE_ALIGNED(struct AddressRangeDescriptor) // POD
bool Commit(uintptr_t address)
{
// (safe because Allocate rounded up to alignment)
const uintptr_t alignedAddress = round_down(address, alignment);
ENSURE(alignedBase <= alignedAddress && alignedAddress+commitSize <= alignedEnd);
return vm::Commit(alignedAddress, commitSize, pageType, prot);
const uintptr_t alignedAddress = round_down(address, m_Alignment);
ENSURE(alignedBase <= alignedAddress && alignedAddress + m_CommitSize <= alignedEnd);
return vm::Commit(alignedAddress, m_CommitSize, m_PageType, m_Prot);
}
// corresponds to the respective page size (Windows requires
// naturally aligned addresses and sizes when committing large pages).
// note that VirtualAlloc's alignment defaults to 64 KiB.
uintptr_t alignment;
uintptr_t m_Alignment;
uintptr_t alignedBase; // multiple of alignment
uintptr_t alignedEnd; // "
@ -340,12 +339,12 @@ CACHE_ALIGNED(struct AddressRangeDescriptor) // POD
// (actual requested size / allocated address is required by
// ReleaseAddressSpace due to variable alignment.)
volatile intptr_t base; // (type is dictated by cpu_CAS)
size_t totalSize;
size_t m_TotalSize;
// parameters to be relayed to vm::Commit
size_t commitSize;
PageType pageType;
int prot;
size_t m_CommitSize;
PageType m_PageType;
int m_Prot;
//private:
static const wchar_t* ErrorString()
@ -498,7 +497,7 @@ static LONG CALLBACK VectoredHandler(const PEXCEPTION_POINTERS ep)
// NB: the first access to a page isn't necessarily at offset 0
// (memcpy isn't guaranteed to copy sequentially). rounding down
// is safe and necessary - see AddressRangeDescriptor::alignment.
const uintptr_t alignedAddress = round_down(address, d->alignment);
const uintptr_t alignedAddress = round_down(address, d->m_Alignment);
bool ok = d->Commit(alignedAddress);
if(!ok)
{

View File

@ -1,4 +1,4 @@
/* Copyright (C) 2015 Wildfire Games.
/* Copyright (C) 2018 Wildfire Games.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
@ -118,7 +118,7 @@ static char* stringStorage;
static char* stringStoragePos;
// pointers to dynamically allocated structures
static Structures structures;
static Structures g_Structures;
static void Cleanup() // called via atexit
{
@ -127,11 +127,11 @@ static void Cleanup() // called via atexit
// free each allocated structure
#define STRUCTURE(name, id)\
while(structures.name##_)\
while(g_Structures.name##_)\
{\
name* next = structures.name##_->next;\
SAFE_FREE(structures.name##_);\
structures.name##_ = next;\
name* next = g_Structures.name##_->next;\
SAFE_FREE(g_Structures.name##_);\
g_Structures.name##_ = next;\
}
STRUCTURES
#undef STRUCTURE
@ -449,7 +449,7 @@ static Status InitStructures()
switch(header->id)
{
#define STRUCTURE(name, id) case id: AddStructure(header, strings, structures.name##_); break;
#define STRUCTURE(name, id) case id: AddStructure(header, strings, g_Structures.name##_); break;
STRUCTURES
#undef STRUCTURE
@ -693,7 +693,7 @@ const Structures* GetStructures()
// (callers have to check if member pointers are nonzero anyway, so
// we always return a valid pointer to simplify most use cases.)
UNUSED2(ret);
return &structures;
return &g_Structures;
}

View File

@ -1,4 +1,4 @@
/* Copyright (C) 2011 Wildfire Games.
/* Copyright (C) 2018 Wildfire Games.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
@ -263,7 +263,7 @@ struct Handle
FIELD(F_HEX, u64, uuid1, "")\
FIELD(0, SystemWakeUpType, wakeUpType, "")\
FIELD(0, const char*, skuNumber, "")\
FIELD(0, const char*, family, "")
FIELD(0, const char*, m_Family, "")
//-----------------------------------------------------------------------------
@ -436,7 +436,7 @@ struct Handle
#define Processor_FIELDS\
FIELD(0, const char*, socket, "")\
FIELD(0, ProcessorType, type, "")\
FIELD(0, u8, family, "") /* we don't bother providing enumerators for > 200 families */\
FIELD(0, u8, m_Family, "") /* we don't bother providing enumerators for > 200 families */\
FIELD(0, const char*, manufacturer, "")\
FIELD(F_HEX, u64, id, "")\
FIELD(0, const char*, version, "")\
@ -521,7 +521,7 @@ struct Handle
FIELD(0, u8, speed, " ns")\
FIELD(0, ECC, ecc, "")\
FIELD(0, CacheType, type, "")\
FIELD(0, CacheAssociativity, associativity, "")\
FIELD(0, CacheAssociativity, m_Associativity, "")\
FIELD(F_DERIVED, size_t, level, "") /* 1..8 */\
FIELD(F_DERIVED, CacheLocation, location, "")\
FIELD(F_DERIVED, CacheMode, mode, "")\

View File

@ -1,4 +1,4 @@
/* Copyright (C) 2010 Wildfire Games.
/* Copyright (C) 2018 Wildfire Games.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
@ -27,6 +27,12 @@
#ifndef INCLUDED_SYSDEP
#define INCLUDED_SYSDEP
#ifdef _MSC_VER
#if _MSC_VER > 1800
# pragma warning(disable:4091) // hides previous local declaration
#endif
#endif
#include "lib/debug.h" // ErrorReactionInternal
#include "lib/os_path.h"

View File

@ -63,7 +63,7 @@ enum PageType
* (an error dialog will also be raised).
* must be freed via ReleaseAddressSpace.
**/
LIB_API void* ReserveAddressSpace(size_t size, size_t commitSize = largePageSize, PageType pageType = kDefault, int prot = PROT_READ|PROT_WRITE);
LIB_API void* ReserveAddressSpace(size_t size, size_t commitSize = g_LargePageSize, PageType pageType = kDefault, int prot = PROT_READ|PROT_WRITE);
/**
* release address space and decommit any memory.

View File

@ -1,4 +1,4 @@
/* Copyright (C) 2017 Wildfire Games.
/* Copyright (C) 2018 Wildfire Games.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
@ -51,7 +51,7 @@ class S3tcBlock
{
public:
S3tcBlock(size_t dxt, const u8* RESTRICT block)
: dxt(dxt)
: m_Dxt(dxt)
{
// (careful, 'dxt != 1' doesn't work - there's also DXT1a)
const u8* a_block = block;
@ -71,23 +71,23 @@ public:
out[i] = (u8)c[c_selector][i];
// if no alpha, done
if(dxt == 1)
if(m_Dxt == 1)
return;
size_t a;
if(dxt == 3)
if(m_Dxt == 3)
{
// table of 4-bit alpha entries
a = access_bit_tbl(a_bits, pixel_idx, 4);
a |= a << 4; // expand to 8 bits (replicate high into low!)
}
else if(dxt == 5)
else if(m_Dxt == 5)
{
// pixel index -> alpha selector (3 bit) -> alpha
const size_t a_selector = access_bit_tbl(a_bits, pixel_idx, 3);
a = dxt5_a_tbl[a_selector];
}
// (dxt == DXT1A)
// (m_Dxt == DXT1A)
else
a = c[c_selector][A];
out[A] = (u8)(a & 0xFF);
@ -214,7 +214,7 @@ private:
// table of 2-bit color selectors
u32 c_selectors;
size_t dxt;
size_t m_Dxt;
};
@ -283,7 +283,7 @@ static Status s3tc_decompress(Tex* t)
const size_t out_bpp = (dxt != 1)? 32 : 24;
const size_t out_size = t->img_size() * out_bpp / t->m_Bpp;
shared_ptr<u8> decompressedData;
AllocateAligned(decompressedData, out_size, pageSize);
AllocateAligned(decompressedData, out_size, g_PageSize);
const size_t s3tc_block_size = (dxt == 3 || dxt == 5)? 16 : 8;
S3tcDecompressInfo di = { dxt, s3tc_block_size, out_bpp/8, decompressedData.get() };

View File

@ -1,4 +1,4 @@
/* Copyright (C) 2017 Wildfire Games.
/* Copyright (C) 2018 Wildfire Games.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
@ -182,7 +182,7 @@ static Status png_decode_impl(MemoryStream* stream, png_structp png_ptr, png_inf
const size_t img_size = pitch * h;
shared_ptr<u8> data;
AllocateAligned(data, img_size, pageSize);
AllocateAligned(data, img_size, g_PageSize);
std::vector<RowPtr> rows = tex_codec_alloc_rows(data.get(), h, pitch, TEX_TOP_DOWN, 0);
png_read_image(png_ptr, (png_bytepp)&rows[0]);

View File

@ -1,4 +1,4 @@
/* Copyright (C) 2017 Wildfire Games.
/* Copyright (C) 2018 Wildfire Games.
* This file is part of 0 A.D.
*
* 0 A.D. is free software: you can redistribute it and/or modify
@ -79,16 +79,16 @@ void ConvertCaches(const ScriptInterface& scriptInterface, x86_x64::IdxCache idx
for (size_t idxLevel = 0; idxLevel < x86_x64::Cache::maxLevels; ++idxLevel)
{
const x86_x64::Cache* pcache = x86_x64::Caches(idxCache+idxLevel);
if (pcache->type == x86_x64::Cache::kNull || pcache->numEntries == 0)
if (pcache->m_Type == x86_x64::Cache::kNull || pcache->m_NumEntries == 0)
continue;
JS::RootedValue cache(cx);
scriptInterface.Eval("({})", &cache);
scriptInterface.SetProperty(cache, "type", (u32)pcache->type);
scriptInterface.SetProperty(cache, "level", (u32)pcache->level);
scriptInterface.SetProperty(cache, "associativity", (u32)pcache->associativity);
scriptInterface.SetProperty(cache, "linesize", (u32)pcache->entrySize);
scriptInterface.SetProperty(cache, "sharedby", (u32)pcache->sharedBy);
scriptInterface.SetProperty(cache, "totalsize", (u32)pcache->TotalSize());
scriptInterface.SetProperty(cache, "type", static_cast<u32>(pcache->m_Type));
scriptInterface.SetProperty(cache, "level", static_cast<u32>(pcache->m_Level));
scriptInterface.SetProperty(cache, "associativity", static_cast<u32>(pcache->m_Associativity));
scriptInterface.SetProperty(cache, "linesize", static_cast<u32>(pcache->m_EntrySize));
scriptInterface.SetProperty(cache, "sharedby", static_cast<u32>(pcache->m_SharedBy));
scriptInterface.SetProperty(cache, "totalsize",static_cast<u32>(pcache->TotalSize()));
scriptInterface.SetPropertyInt(ret, idxLevel, cache);
}
}
@ -106,11 +106,11 @@ void ConvertTLBs(const ScriptInterface& scriptInterface, JS::MutableHandleValue
break;
JS::RootedValue tlb(cx);
scriptInterface.Eval("({})", &tlb);
scriptInterface.SetProperty(tlb, "type", (u32)ptlb->type);
scriptInterface.SetProperty(tlb, "level", (u32)ptlb->level);
scriptInterface.SetProperty(tlb, "associativity", (u32)ptlb->associativity);
scriptInterface.SetProperty(tlb, "pagesize", (u32)ptlb->entrySize);
scriptInterface.SetProperty(tlb, "entries", (u32)ptlb->numEntries);
scriptInterface.SetProperty(tlb, "type", static_cast<u32>(ptlb->m_Type));
scriptInterface.SetProperty(tlb, "level", static_cast<u32>(ptlb->m_Level));
scriptInterface.SetProperty(tlb, "associativity", static_cast<u32>(ptlb->m_Associativity));
scriptInterface.SetProperty(tlb, "pagesize", static_cast<u32>(ptlb->m_EntrySize));
scriptInterface.SetProperty(tlb, "entries", static_cast<u32>(ptlb->m_NumEntries));
scriptInterface.SetPropertyInt(ret, i, tlb);
}
}

View File

@ -1,4 +1,4 @@
/* Copyright (C) 2014 Wildfire Games.
/* Copyright (C) 2018 Wildfire Games.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
@ -549,10 +549,10 @@ public:
for (size_t i = 0; i < m_QueryTypes.size(); ++i)
{
GLuint id = NewQuery(i);
pglBeginPerfQueryINTEL(id);
GLuint local_id = NewQuery(i);
pglBeginPerfQueryINTEL(local_id);
ogl_WarnIfError();
event.queries.push_back(id);
event.queries.push_back(local_id);
}
frame.activeRegions.push_back(frame.events.size());

View File

@ -1,4 +1,4 @@
/* Copyright (C) 2014 Wildfire Games.
/* Copyright (C) 2018 Wildfire Games.
* This file is part of 0 A.D.
*
* 0 A.D. is free software: you can redistribute it and/or modify
@ -100,7 +100,7 @@ void SilhouetteRenderer::AddCaster(CModel* model)
* that lets us pack and sort the edge/point list efficiently.
*/
static const int MAX_COORD = 16384;
static const u16 MAX_COORD = std::numeric_limits<u8>::max();
struct Occluder
{
@ -165,8 +165,12 @@ struct ActiveList
static void ComputeScreenBounds(Occluder& occluder, const CBoundingBoxAligned& bounds, CMatrix3D& proj)
{
int x0 = INT_MAX, y0 = INT_MAX, x1 = INT_MIN, y1 = INT_MIN;
float z0 = FLT_MAX;
u16 x0 = std::numeric_limits<u16>::max();
u16 y0 = std::numeric_limits<u16>::max();
u16 x1 = std::numeric_limits<u16>::min();
u16 y1 = std::numeric_limits<u16>::min();
float z0 = std::numeric_limits<float>::max();
u16 halfMaxCoord = MAX_COORD / 2;
for (size_t ix = 0; ix <= 1; ix++)
{
for (size_t iy = 0; iy <= 1; iy++)
@ -175,10 +179,10 @@ static void ComputeScreenBounds(Occluder& occluder, const CBoundingBoxAligned& b
{
CVector4D vec(bounds[ix].X, bounds[iy].Y, bounds[iz].Z, 1.0f);
CVector4D svec = proj.Transform(vec);
x0 = std::min(x0, MAX_COORD/2 + (int)(MAX_COORD/2 * svec.X / svec.W));
y0 = std::min(y0, MAX_COORD/2 + (int)(MAX_COORD/2 * svec.Y / svec.W));
x1 = std::max(x1, MAX_COORD/2 + (int)(MAX_COORD/2 * svec.X / svec.W));
y1 = std::max(y1, MAX_COORD/2 + (int)(MAX_COORD/2 * svec.Y / svec.W));
x0 = std::min(x0, static_cast<u16>(halfMaxCoord + (halfMaxCoord * svec.X / svec.W)));
y0 = std::min(y0, static_cast<u16>(halfMaxCoord + (halfMaxCoord * svec.Y / svec.W)));
x1 = std::max(x1, static_cast<u16>(halfMaxCoord + (halfMaxCoord * svec.X / svec.W)));
y1 = std::max(y1, static_cast<u16>(halfMaxCoord + (halfMaxCoord * svec.Y / svec.W)));
z0 = std::min(z0, svec.Z / svec.W);
}
}
@ -186,23 +190,23 @@ static void ComputeScreenBounds(Occluder& occluder, const CBoundingBoxAligned& b
// TODO: there must be a quicker way to do this than to test every vertex,
// given the symmetry of the bounding box
occluder.x0 = clamp(x0, 0, MAX_COORD-1);
occluder.y0 = clamp(y0, 0, MAX_COORD-1);
occluder.x1 = clamp(x1, 0, MAX_COORD-1);
occluder.y1 = clamp(y1, 0, MAX_COORD-1);
occluder.x0 = clamp(x0, std::numeric_limits<u16>::min(), static_cast<u16>(MAX_COORD - 1));
occluder.y0 = clamp(y0, std::numeric_limits<u16>::min(), static_cast<u16>(MAX_COORD - 1));
occluder.x1 = clamp(x1, std::numeric_limits<u16>::min(), static_cast<u16>(MAX_COORD - 1));
occluder.y1 = clamp(y1, std::numeric_limits<u16>::min(), static_cast<u16>(MAX_COORD - 1));
occluder.z = z0;
}
static void ComputeScreenPos(Caster& caster, const CVector3D& pos, CMatrix3D& proj)
{
CVector4D vec(pos.X, pos.Y, pos.Z, 1.0f);
CVector4D svec = proj.Transform(vec);
int x = MAX_COORD/2 + (int)(MAX_COORD/2 * svec.X / svec.W);
int y = MAX_COORD/2 + (int)(MAX_COORD/2 * svec.Y / svec.W);
CVector4D svec = proj.Transform(CVector4D(pos.X, pos.Y, pos.Z, 1.0f));
u16 halfMaxCoord = MAX_COORD / 2;
u16 x = static_cast<u16>(halfMaxCoord + (halfMaxCoord * svec.X / svec.W));
u16 y = static_cast<u16>(halfMaxCoord + (halfMaxCoord * svec.Y / svec.W));
float z = svec.Z / svec.W;
caster.x = clamp(x, 0, MAX_COORD-1);
caster.y = clamp(y, 0, MAX_COORD-1);
caster.x = clamp(x, std::numeric_limits<u16>::min(), static_cast<u16>(MAX_COORD - 1));
caster.y = clamp(y, std::numeric_limits<u16>::min(), static_cast<u16>(MAX_COORD - 1));
caster.z = z;
}
@ -276,7 +280,7 @@ void SilhouetteRenderer::ComputeSubmissions(const CCamera& camera)
if (d.x0 == d.x1 || d.y0 == d.y1)
continue;
size_t id = occluders.size();
u16 id = static_cast<u16>(occluders.size());
occluders.push_back(d);
entries.push_back(EntryCreate(EDGE_IN, id, d.x0));
@ -297,7 +301,7 @@ void SilhouetteRenderer::ComputeSubmissions(const CCamera& camera)
if (d.x0 == d.x1 || d.y0 == d.y1)
continue;
size_t id = occluders.size();
u16 id = static_cast<u16>(occluders.size());
occluders.push_back(d);
entries.push_back(EntryCreate(EDGE_IN, id, d.x0));
@ -314,7 +318,7 @@ void SilhouetteRenderer::ComputeSubmissions(const CCamera& camera)
d.rendered = false;
ComputeScreenPos(d, pos, proj);
size_t id = casters.size();
u16 id = static_cast<u16>(occluders.size());
casters.push_back(d);
entries.push_back(EntryCreate(POINT, id, d.x));

View File

@ -1,4 +1,4 @@
/* Copyright (C) 2017 Wildfire Games.
/* Copyright (C) 2018 Wildfire Games.
* This file is part of 0 A.D.
*
* 0 A.D. is free software: you can redistribute it and/or modify
@ -126,7 +126,7 @@ private:
pauseTime = 50;
{
CScopeLock lock(m_WorkerMutex);
CScopeLock workerLock(m_WorkerMutex);
ItemsList::iterator lstr = m_Items->begin();
ItemsList* nextItemList = new ItemsList;
@ -143,7 +143,7 @@ private:
}
else
{
CScopeLock lock(m_DeadItemsMutex);
CScopeLock deadItemsLock(m_DeadItemsMutex);
m_DeadItems->push_back(*lstr);
}
++lstr;

View File

@ -61,11 +61,6 @@ using fmt::LongLong;
using fmt::ULongLong;
using fmt::internal::Arg;
#ifdef _MSC_VER
# pragma warning(push)
# pragma warning(disable: 4127) // conditional expression is constant
#endif
namespace {
#ifndef _MSC_VER
@ -94,6 +89,12 @@ inline int isinfinity(long double x) { return std::isinf(x); }
#else // _MSC_VER
# pragma warning(push)
# pragma warning(disable: 4127) // conditional expression is constant
#if _MSC_VER > 1800
# pragma warning(disable:4456) // hides previous local declaration
#endif
inline int getsign(double value) {
if (value < 0) return 1;
if (value == value) return 0;

View File

@ -7,6 +7,9 @@
#include "precompiled.h"
#ifdef _MSC_VER
#if _MSC_VER > 1800
# pragma warning(disable:4456) // hides previous local declaration
#endif
# pragma warning(disable:4189) // local variable is initialized but not referenced
#endif

View File

@ -5,6 +5,12 @@
#include "precompiled.h"
#ifdef _MSC_VER
#if _MSC_VER > 1800
# pragma warning(disable:4456) // hides previous local declaration
#endif
#endif
/**
* Copyright (C) 2011 by Morten S. Mikkelsen
*

View File

@ -20,6 +20,14 @@
# pragma warning(disable:4100) // unreferenced formal parameter
# pragma warning(disable:4245) // signed/unsigned mismatch
# pragma warning(disable:4505) // unreferenced local function has been removed
#if _MSC_VER > 1800
# pragma warning(disable:4365) // signed unsigned mismatch
# pragma warning(disable:4191) // unsafe conversion
# pragma warning(disable:4820) // incorrect padding
# pragma warning(disable:4668) // macro error
# pragma warning(disable:4710) // function not inlined
# pragma warning(disable:4711) // selected for automatic inline expansion
#endif
# pragma comment(lib, "ws2_32.lib")
#endif