1
0
forked from 0ad/0ad

Use std::atomic instead of platform-dependant code

This covers atomic add operations and atomic compare-and-switch
operations.
This commit is contained in:
Nicolas Auvray 2024-09-16 23:02:32 +02:00
parent a942100921
commit a3541201a6
Signed by: Itms
GPG Key ID: C7E52BD14CE14E09
12 changed files with 86 additions and 74 deletions

View File

@ -24,7 +24,6 @@
#include "lib/allocators/page_aligned.h"
#include "lib/alignment.h"
#include "lib/sysdep/cpu.h" // cpu_CAS
//-----------------------------------------------------------------------------

View File

@ -177,9 +177,10 @@ Status debug_WriteCrashlog(const wchar_t* text)
// initializing local static objects from constants may happen when
// this is first called, which isn't thread-safe. (see C++ 6.7.4)
cassert(IDLE == 0);
static volatile intptr_t state;
static std::atomic<State> state;
if(!cpu_CAS(&state, IDLE, BUSY))
State initial{ IDLE };
if(!state.compare_exchange_strong(initial, BUSY))
return ERR::REENTERED; // NOWARN
OsPath pathname = ah_get_log_dir()/"crashlog.txt";
@ -464,17 +465,17 @@ enum SkipStatus
{
INVALID, VALID, BUSY
};
static intptr_t skipStatus = INVALID;
static std::atomic<SkipStatus> skipStatus{ INVALID };
static Status errorToSkip;
static size_t numSkipped;
void debug_SkipErrors(Status err)
{
if(cpu_CAS(&skipStatus, INVALID, BUSY))
SkipStatus expected{ INVALID };
if(skipStatus.compare_exchange_strong(expected, BUSY))
{
errorToSkip = err;
numSkipped = 0;
COMPILER_FENCE;
skipStatus = VALID; // linearization point
}
else
@ -483,10 +484,10 @@ void debug_SkipErrors(Status err)
size_t debug_StopSkippingErrors()
{
if(cpu_CAS(&skipStatus, VALID, BUSY))
SkipStatus expected{ VALID };
if(skipStatus.compare_exchange_strong(expected, BUSY))
{
const size_t ret = numSkipped;
COMPILER_FENCE;
skipStatus = INVALID; // linearization point
return ret;
}
@ -499,11 +500,11 @@ size_t debug_StopSkippingErrors()
static bool ShouldSkipError(Status err)
{
if(cpu_CAS(&skipStatus, VALID, BUSY))
SkipStatus expected{ VALID };
if(skipStatus.compare_exchange_strong(expected, BUSY))
{
numSkipped++;
const bool ret = (err == errorToSkip);
COMPILER_FENCE;
skipStatus = VALID;
return ret;
}

View File

@ -1,4 +1,4 @@
/* Copyright (C) 2010 Wildfire Games.
/* Copyright (C) 2024 Wildfire Games.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
@ -27,7 +27,7 @@
#include "precompiled.h"
#include "lib/module_init.h"
#include "lib/sysdep/cpu.h" // cpu_CAS
#include "lib/sysdep/cpu.h"
// not yet initialized, or already shutdown
static const ModuleInitState UNINITIALIZED = 0; // value documented in header
@ -41,11 +41,14 @@ Status ModuleInit(volatile ModuleInitState* initState, Status (*init)())
{
for(;;)
{
if(cpu_CAS(initState, UNINITIALIZED, BUSY))
Status expected{ UNINITIALIZED };
if(initState->compare_exchange_strong(expected, BUSY))
{
Status ret = init();
*initState = (ret == INFO::OK)? INITIALIZED : ret;
COMPILER_FENCE;
if (ret == INFO::OK)
initState->store(INITIALIZED);
else
initState->store(ret);
return ret;
}
@ -66,11 +69,11 @@ Status ModuleShutdown(volatile ModuleInitState* initState, void (*shutdown)())
{
for(;;)
{
if(cpu_CAS(initState, INITIALIZED, BUSY))
Status expected{ INITIALIZED };
if(initState->compare_exchange_strong(expected, BUSY))
{
shutdown();
*initState = UNINITIALIZED;
COMPILER_FENCE;
initState->store(UNINITIALIZED);
return INFO::OK;
}

View File

@ -1,4 +1,4 @@
/* Copyright (C) 2022 Wildfire Games.
/* Copyright (C) 2024 Wildfire Games.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
@ -27,12 +27,14 @@
#ifndef INCLUDED_MODULE_INIT
#define INCLUDED_MODULE_INIT
#include <atomic>
/**
* initialization state of a module (class, source file, etc.)
* must be initialized to zero (e.g. by defining as a static variable).
* DO NOT change the value!
**/
typedef intptr_t ModuleInitState; // intptr_t is required by cpu_CAS
typedef std::atomic_int64_t ModuleInitState;
/**
* calls a user-defined init function if initState is zero.

View File

@ -1,4 +1,4 @@
/* Copyright (C) 2020 Wildfire Games.
/* Copyright (C) 2024 Wildfire Games.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
@ -24,11 +24,12 @@
#include "lib/sysdep/os/win/acpi.h"
#include "lib/byte_order.h"
#include "lib/sysdep/cpu.h"
#include "lib/module_init.h"
#include "lib/sysdep/os/win/wfirmware.h"
#include <atomic>
#pragma pack(1)
typedef const volatile u8* PCV_u8;
@ -103,7 +104,7 @@ static bool ValidateTable(const AcpiTable* table, const char* signature = 0)
return true;
}
static void AllocateAndCopyTables(const AcpiTable**& tables, size_t& numTables)
static void AllocateAndCopyTables(std::atomic<const AcpiTable**>& tables, size_t& numTables)
{
const wfirmware::Provider provider = FOURCC_BE('A','C','P','I');
const wfirmware::TableIds tableIDs = wfirmware::GetTableIDs(provider);
@ -135,7 +136,7 @@ static void AllocateAndCopyTables(const AcpiTable**& tables, size_t& numTables)
//-----------------------------------------------------------------------------
// note: avoid global std::map etc. because we may be called before _cinit
static const AcpiTable** tables; // tables == 0 <=> not initialized
static std::atomic<const AcpiTable**> tables; // tables == 0 <=> not initialized
static const AcpiTable* invalidTables; // tables == &invalidTables => init failed
static size_t numTables;
@ -153,7 +154,8 @@ void acpi_Shutdown()
const AcpiTable* acpi_GetTable(const char* signature)
{
if(cpu_CAS(&tables, (const AcpiTable**)0, &invalidTables))
const AcpiTable** initial{ 0 };
if(tables.compare_exchange_strong(initial, &invalidTables))
AllocateAndCopyTables(tables, numTables);
// (typically only a few tables, linear search is OK)

View File

@ -1,4 +1,4 @@
/* Copyright (C) 2023 Wildfire Games.
/* Copyright (C) 2024 Wildfire Games.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
@ -33,13 +33,13 @@
#include "lib/byte_order.h" // movzx_le64
#include "lib/module_init.h"
#include "lib/sysdep/cpu.h"
#include "lib/debug_stl.h"
#include "lib/app_hooks.h"
#include "lib/external_libraries/dbghelp.h"
#include "lib/sysdep/os/win/wdbg.h"
#include "lib/sysdep/os/win/wutil.h"
#include <atomic>
//----------------------------------------------------------------------------
// dbghelp
@ -1717,8 +1717,9 @@ static Status dump_frame_cb(const STACKFRAME64* sf, uintptr_t UNUSED(userContext
Status debug_DumpStack(wchar_t* buf, size_t maxChars, void* pcontext, const wchar_t* lastFuncToSkip)
{
static intptr_t busy;
if(!cpu_CAS(&busy, 0, 1))
static std::atomic_bool busy;
bool expected{ false };
if(!busy.compare_exchange_strong(expected, true))
return ERR::REENTERED; // NOWARN
out_init(buf, maxChars);
@ -1727,8 +1728,7 @@ Status debug_DumpStack(wchar_t* buf, size_t maxChars, void* pcontext, const wcha
wdbg_assert(pcontext != 0);
Status ret = wdbg_sym_WalkStack(dump_frame_cb, 0, *(CONTEXT*)pcontext, lastFuncToSkip);
COMPILER_FENCE;
busy = 0;
busy = false;
return ret;
}

View File

@ -1,4 +1,4 @@
/* Copyright (C) 2023 Wildfire Games.
/* Copyright (C) 2024 Wildfire Games.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
@ -35,12 +35,12 @@
#include "lib/bits.h" // round_up
#include "lib/alignment.h" // IsAligned
#include "lib/module_init.h"
#include "lib/sysdep/cpu.h" // cpu_AtomicAdd
#include "lib/sysdep/filesystem.h" // O_DIRECT
#include "lib/sysdep/os/win/wutil.h" // wutil_SetPrivilege
#include "lib/sysdep/os/win/wiocp.h"
#include "lib/sysdep/os/win/wposix/crt_posix.h" // _get_osfhandle
#include <atomic>
#include <ctime>
// (dynamic linking preserves compatibility with previous Windows versions)
@ -171,7 +171,7 @@ struct OvlAllocator // POD
void Shutdown()
{
if(extant != 0)
debug_printf("waio: OvlAllocator::Shutdown with extant=%d\n", extant);
debug_printf("waio: OvlAllocator::Shutdown with extant=%d\n", extant.load());
InterlockedFlushSList(&freelist);
@ -208,14 +208,14 @@ struct OvlAllocator // POD
ovl.OffsetHigh = u64_hi(offset);
ovl.hEvent = 0; // (notification is via IOCP and/or polling)
cpu_AtomicAdd(&extant, +1);
extant++;
return &ovl;
}
void Deallocate(OVERLAPPED* ovl)
{
cpu_AtomicAdd(&extant, -1);
extant--;
const uintptr_t address = uintptr_t(ovl);
ENSURE(uintptr_t(storage) <= address && address < uintptr_t(storage)+storageSize);
@ -236,7 +236,7 @@ struct OvlAllocator // POD
# pragma warning(pop)
#endif
volatile intptr_t extant;
std::atomic_intptr_t extant;
};
@ -306,7 +306,7 @@ struct FileControlBlocks // POD
static const int firstDescriptor = 4000;
FileControlBlock fcbs[maxFiles];
CACHE_ALIGNED(volatile intptr_t) inUse[maxFiles];
CACHE_ALIGNED(std::atomic_intptr_t) inUse[maxFiles];
void Init()
{
@ -330,7 +330,8 @@ struct FileControlBlocks // POD
{
for(size_t i = 0; i < maxFiles; i++)
{
if(cpu_CAS(&inUse[i], 0, 1))
intptr_t expected{ 0 };
if(inUse[i].compare_exchange_strong(expected, 1))
return &fcbs[i];
}

View File

@ -1,4 +1,4 @@
/* Copyright (C) 2023 Wildfire Games.
/* Copyright (C) 2024 Wildfire Games.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
@ -24,12 +24,12 @@
#include "lib/sysdep/filesystem.h"
#include "lib/debug.h"
#include "lib/sysdep/cpu.h" // cpu_CAS
#include "lib/sysdep/os/win/wutil.h" // StatusFromWin
#include "lib/sysdep/os/win/wposix/waio.h" // waio_reopen
#include "lib/sysdep/os/win/wposix/wtime_internal.h" // wtime_utc_filetime_to_time_t
#include "lib/sysdep/os/win/wposix/crt_posix.h" // _close, _lseeki64 etc.
#include <atomic>
//-----------------------------------------------------------------------------
// WDIR suballocator
@ -57,11 +57,12 @@ struct WDIR // POD
};
static WDIR wdir_storage;
static volatile intptr_t wdir_in_use;
static std::atomic_bool wdir_in_use;
static inline WDIR* wdir_alloc()
{
if(cpu_CAS(&wdir_in_use, 0, 1)) // gained ownership
bool expected{ false };
if(wdir_in_use.compare_exchange_strong(expected, true)) // gained ownership
return &wdir_storage;
// already in use (rare) - allocate from heap
@ -72,7 +73,8 @@ static inline void wdir_free(WDIR* d)
{
if(d == &wdir_storage)
{
const bool ok = cpu_CAS(&wdir_in_use, 1, 0); // relinquish ownership
bool expected{ true };
const bool ok = wdir_in_use.compare_exchange_strong(expected, false); // relinquish ownership
ENSURE(ok); // ensure it wasn't double-freed
}
else // allocated from heap

View File

@ -1,4 +1,4 @@
/* Copyright (C) 2023 Wildfire Games.
/* Copyright (C) 2024 Wildfire Games.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
@ -27,15 +27,14 @@
#include "precompiled.h"
#include "lib/sysdep/os/win/wposix/wpthread.h"
#include <new>
#include <process.h>
#include "lib/sysdep/cpu.h" // cpu_CAS
#include "lib/posix/posix_filesystem.h" // O_CREAT
#include "lib/sysdep/os/win/wposix/wposix_internal.h"
#include "lib/sysdep/os/win/wposix/wtime.h" // timespec
#include "lib/sysdep/os/win/wseh.h" // wseh_ExceptionFilter
#include <new>
#include <process.h>
namespace
{
@ -88,7 +87,8 @@ pthread_t pthread_self()
int pthread_once(pthread_once_t* once, void (*init_routine)())
{
if(cpu_CAS((volatile intptr_t*)once, 0, 1))
uintptr_t zero{ 0 };
if(once->compare_exchange_strong(zero, 1))
init_routine();
return 0;
}
@ -147,7 +147,8 @@ static const size_t MAX_DTORS = 4;
static struct
{
pthread_key_t key;
void (*dtor)(void*);
typedef void (*dtortype)(void*);
std::atomic<dtortype> dtor;
}
dtors[MAX_DTORS];
@ -165,7 +166,8 @@ int pthread_key_create(pthread_key_t* key, void (*dtor)(void*))
size_t i;
for(i = 0; i < MAX_DTORS; i++)
{
if(cpu_CAS((volatile intptr_t*)&dtors[i].dtor, (intptr_t)0, (intptr_t)dtor))
void (*zero)(void*) { 0 };
if(dtors[i].dtor.compare_exchange_strong(zero, dtor))
goto have_slot;
}

View File

@ -1,4 +1,4 @@
/* Copyright (C) 2022 Wildfire Games.
/* Copyright (C) 2024 Wildfire Games.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
@ -27,6 +27,7 @@
#ifndef INCLUDED_WPTHREAD
#define INCLUDED_WPTHREAD
#include <atomic>
//
// <sched.h>
@ -54,7 +55,7 @@ enum
//
// one-time init
typedef intptr_t pthread_once_t; // required for cpu_CAS
typedef std::atomic_uintptr_t pthread_once_t; // required for atomic compare_exchange
#define PTHREAD_ONCE_INIT 0 // static pthread_once_t x = PTHREAD_ONCE_INIT;
int pthread_once(pthread_once_t*, void (*init_routine)());

View File

@ -1,4 +1,4 @@
/* Copyright (C) 2022 Wildfire Games.
/* Copyright (C) 2024 Wildfire Games.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
@ -29,11 +29,11 @@
#include "lib/byte_order.h" // FOURCC
#include "lib/utf8.h"
#include "lib/sysdep/cpu.h"
#include "lib/sysdep/os/win/win.h"
#include "lib/sysdep/os/win/wutil.h"
#include "lib/sysdep/os/win/wdbg_sym.h" // wdbg_sym_WriteMinidump
#include <atomic>
#include <process.h> // __security_init_cookie
#define NEED_COOKIE_INIT
@ -249,8 +249,8 @@ long __stdcall wseh_ExceptionFilter(struct _EXCEPTION_POINTERS* ep)
// make sure we don't recurse infinitely if this function raises an
// SEH exception. (we may only have the guard page's 4 KB worth of
// stack space if the exception is EXCEPTION_STACK_OVERFLOW)
static intptr_t nestingLevel = 0;
cpu_AtomicAdd(&nestingLevel, 1);
static std::atomic_intptr_t nestingLevel{ 0 };
nestingLevel++;
if(nestingLevel >= 3)
return EXCEPTION_CONTINUE_SEARCH;

View File

@ -31,11 +31,11 @@
#include "lib/alignment.h" // CACHE_ALIGNED
#include "lib/bits.h" // round_down
#include "lib/module_init.h"
#include "lib/sysdep/cpu.h" // cpu_AtomicAdd
#include "lib/sysdep/numa.h"
#include "lib/sysdep/os/win/wutil.h"
#include "lib/timer.h"
#include <atomic>
#include <excpt.h>
namespace vm
@ -51,16 +51,16 @@ CACHE_ALIGNED(struct Statistics) // POD
// thread-safe (required due to concurrent commits)
void NotifyLargePageCommit()
{
cpu_AtomicAdd(&largePageCommits, +1);
largePageCommits++;
}
void NotifySmallPageCommit()
{
cpu_AtomicAdd(&smallPageCommits, +1);
smallPageCommits++;
}
intptr_t largePageCommits;
intptr_t smallPageCommits;
std::atomic_intptr_t largePageCommits;
std::atomic_intptr_t smallPageCommits;
};
static CACHE_ALIGNED(Statistics) statistics[os_cpu_MaxProcessors];
@ -209,8 +209,8 @@ CACHE_ALIGNED(struct AddressRangeDescriptor) // POD
Status Allocate(size_t size, size_t commitSize, PageType pageType, int prot)
{
// if this descriptor wasn't yet in use, mark it as busy
// (double-checking is cheaper than cpu_CAS)
if(base != 0 || !cpu_CAS(&base, intptr_t(0), intptr_t(this)))
uintptr_t base_previous = base.load();
if(base_previous != 0 || !base.compare_exchange_strong(base_previous, (uintptr_t)this))
return INFO::SKIPPED;
ENSURE(size != 0); // probably indicates a bug in caller
@ -225,7 +225,7 @@ CACHE_ALIGNED(struct AddressRangeDescriptor) // POD
// NB: it is meaningless to ask for large pages when reserving
// (see ShouldUseLargePages). pageType only affects subsequent commits.
base = (intptr_t)AllocateLargeOrSmallPages(0, m_TotalSize, MEM_RESERVE);
base = (uintptr_t)AllocateLargeOrSmallPages(0, m_TotalSize, MEM_RESERVE);
if(!base)
{
debug_printf("AllocateLargeOrSmallPages of %lld failed\n", (u64)m_TotalSize);
@ -233,17 +233,16 @@ CACHE_ALIGNED(struct AddressRangeDescriptor) // POD
return ERR::NO_MEM; // NOWARN (error string is more helpful)
}
alignedBase = round_up(uintptr_t(base), m_Alignment);
alignedBase = round_up(base.load(), m_Alignment);
alignedEnd = alignedBase + round_up(size, m_Alignment);
return INFO::OK;
}
void Free()
{
vm::Free((void*)base, m_TotalSize);
vm::Free((void*)base.load(), m_TotalSize);
m_Alignment = alignedBase = alignedEnd = 0;
m_TotalSize = 0;
COMPILER_FENCE;
base = 0; // release descriptor for subsequent reuse
}
@ -251,7 +250,7 @@ CACHE_ALIGNED(struct AddressRangeDescriptor) // POD
{
// safety check: we should never see pointers in the no-man's-land
// between the original and rounded up base addresses.
ENSURE(!(uintptr_t(base) <= address && address < alignedBase));
ENSURE(!(base.load() <= address && address < alignedBase));
return (alignedBase <= address && address < alignedEnd);
}
@ -274,7 +273,7 @@ CACHE_ALIGNED(struct AddressRangeDescriptor) // POD
// (actual requested size / allocated address is required by
// ReleaseAddressSpace due to variable alignment.)
volatile intptr_t base; // (type is dictated by cpu_CAS)
std::atomic_uintptr_t base;
size_t m_TotalSize;
// parameters to be relayed to vm::Commit
@ -445,7 +444,7 @@ static LONG CALLBACK VectoredHandler(const PEXCEPTION_POINTERS ep)
static PVOID handler;
static ModuleInitState initState;
static volatile intptr_t references = 0; // atomic
static std::atomic_intptr_t references{ 0 };
static Status InitHandler()
{
@ -466,12 +465,12 @@ static void ShutdownHandler()
void BeginOnDemandCommits()
{
ModuleInit(&initState, InitHandler);
cpu_AtomicAdd(&references, +1);
references++;
}
void EndOnDemandCommits()
{
if(cpu_AtomicAdd(&references, -1) == 1)
if(references.fetch_sub(1) == 1)
ModuleShutdown(&initState, ShutdownHandler);
}