28 #ifndef _AtomicWrapper_H__
29 #define _AtomicWrapper_H__
75 bool cas (
const T &old,
const T &nu)
78 if (
mField != old)
return false;
120 #if (((OGRE_COMPILER == OGRE_COMPILER_GNUC) && (OGRE_COMP_VER >= 412)) || (OGRE_COMPILER == OGRE_COMPILER_CLANG)) && OGRE_THREAD_SUPPORT
130 template<
class T>
class AtomicScalar
146 void operator= (
const AtomicScalar<T> &cousin)
156 void set (
const T &v)
161 bool cas (
const T &old,
const T &nu)
163 return __sync_bool_compare_and_swap (&
mField, old, nu);
168 __sync_add_and_fetch (&
mField, 1);
173 __sync_add_and_fetch (&
mField, -1);
178 __sync_fetch_and_add (&
mField, 1);
183 __sync_fetch_and_add (&
mField, -1);
188 return __sync_add_and_fetch (&
mField, add);
199 #elif OGRE_COMPILER == OGRE_COMPILER_MSVC && OGRE_COMP_VER >= 1400 && OGRE_THREAD_SUPPORT
201 #ifndef WIN32_LEAN_AND_MEAN
202 # define WIN32_LEAN_AND_MEAN
204 #if !defined(NOMINMAX) && defined(_MSC_VER)
205 # define NOMINMAX // required to stop windows.h messing up std::min
218 template<
class T>
class AtomicScalar
234 void operator= (
const AtomicScalar<T> &cousin)
244 void set (
const T &v)
249 bool cas (
const T &old,
const T &nu)
252 return _InterlockedCompareExchange16((SHORT*)&
mField, static_cast<SHORT>(nu), static_cast<SHORT>(old)) ==
static_cast<SHORT
>(old);
254 else if (
sizeof(T)==4)
256 return _InterlockedCompareExchange((LONG*)&
mField, static_cast<LONG>(nu), static_cast<LONG>(old)) ==
static_cast<LONG
>(old);
258 else if (
sizeof(T)==8) {
259 return _InterlockedCompareExchange64((LONGLONG*)&
mField, static_cast<LONGLONG>(nu), static_cast<LONGLONG>(old)) ==
static_cast<LONGLONG
>(old);
269 return InterlockedIncrement16((SHORT*)&
mField);
270 }
else if (
sizeof(T)==4) {
271 return InterlockedIncrement((LONG*)&
mField);
272 }
else if (
sizeof(T)==8) {
273 return InterlockedIncrement64((LONGLONG*)&
mField);
282 return InterlockedDecrement16((SHORT*)&
mField);
283 }
else if (
sizeof(T)==4) {
284 return InterlockedDecrement((LONG*)&
mField);
285 }
else if (
sizeof(T)==8) {
286 return InterlockedDecrement64((LONGLONG*)&
mField);
295 return InterlockedIncrement16((SHORT*)&
mField)-1;
296 }
else if (
sizeof(T)==4) {
297 return InterlockedIncrement((LONG*)&
mField)-1;
298 }
else if (
sizeof(T)==8) {
299 return InterlockedIncrement64((LONGLONG*)&
mField)-1;
308 return InterlockedDecrement16((SHORT*)&
mField)+1;
309 }
else if (
sizeof(T)==4) {
310 return InterlockedDecrement((LONG*)&
mField)+1;
311 }
else if (
sizeof(T)==8) {
312 return InterlockedDecrement64((LONGLONG*)&
mField)+1;
329 }
while (!
cas(newVal - add, newVal));
386 bool cas (
const T &old,
const T &nu)
389 if (
mField != old)
return false;
OGRE_AUTO_MUTEX volatile T mField
AtomicObject(const T &initial)
void operator=(const AtomicObject< T > &cousin)
AtomicObject(const AtomicObject< T > &cousin)
AtomicScalar(const AtomicScalar< T > &cousin)
bool cas(const T &old, const T &nu)
#define OGRE_LOCK_AUTO_MUTEX
#define OGRE_EXCEPT(num, desc, src)
void operator=(const AtomicScalar< T > &cousin)
bool cas(const T &old, const T &nu)
T operator+=(const T &add)
T operator+=(const T &add)
OGRE_AUTO_MUTEX volatile T mField
AtomicScalar(const T &initial)