Merge branch '1246-tsan-add-atomic_fetch_and+or_to_isc/stdatomic.h' into 'master'

Add atomic_fetch_add and atomic_fetch_or convenience macros and unix and win32 shims

Closes #1246

See merge request isc-projects/bind9!2397
This commit is contained in:
Ondřej Surý
2019-09-26 06:59:54 -04:00
4 changed files with 140 additions and 9 deletions

View File

@@ -35,6 +35,10 @@
atomic_fetch_add_explicit((o), (v), memory_order_relaxed)
#define atomic_fetch_sub_relaxed(o, v) \
atomic_fetch_sub_explicit((o), (v), memory_order_relaxed)
#define atomic_fetch_or_relaxed(o, v) \
atomic_fetch_or_explicit((o), (v), memory_order_relaxed)
#define atomic_fetch_and_relaxed(o, v) \
atomic_fetch_and_explicit((o), (v), memory_order_relaxed)
#define atomic_exchange_relaxed(o, v) \
atomic_exchange_explicit((o), (v), memory_order_relaxed)
#define atomic_compare_exchange_weak_relaxed(o, e, d) \

View File

@@ -93,25 +93,24 @@ typedef struct atomic_uint_fast64 {
uint64_t v;
} atomic_uint_fast64_t;
typedef struct atomic_bool_s {
isc_mutex_t m;
bool v;
} atomic_bool;
#define ATOMIC_VAR_INIT(arg) \
{ .m = PTHREAD_MUTEX_INITIALIZER, .v = arg }
#define atomic_init(obj, desired) \
{ \
isc_mutex_init(&(obj)->m); \
REQUIRE(isc_mutex_lock(&(obj)->m) == ISC_R_SUCCESS); \
(obj)->v = desired; \
REQUIRE(isc_mutex_unlock(&(obj)->m) == ISC_R_SUCCESS); \
}
#define atomic_load_explicit(obj, order) \
({ \
typeof((obj)->v) ___v; \
REQUIRE(isc_mutex_lock(&(obj)->m) == ISC_R_SUCCESS); \
___v= (obj)->v; \
___v = (obj)->v; \
REQUIRE(isc_mutex_unlock(&(obj)->m) == ISC_R_SUCCESS); \
___v; \
})
@@ -125,7 +124,7 @@ typedef struct atomic_bool_s {
({ \
typeof((obj)->v) ___v; \
REQUIRE(isc_mutex_lock(&(obj)->m) == ISC_R_SUCCESS); \
___v= (obj)->v; \
___v = (obj)->v; \
(obj)->v += arg; \
REQUIRE(isc_mutex_unlock(&(obj)->m) == ISC_R_SUCCESS); \
___v;\
@@ -133,11 +132,29 @@ typedef struct atomic_bool_s {
#define atomic_fetch_sub_explicit(obj, arg, order) \
({ typeof((obj)->v) ___v; \
REQUIRE(isc_mutex_lock(&(obj)->m) == ISC_R_SUCCESS); \
___v= (obj)->v; \
___v = (obj)->v; \
(obj)->v -= arg; \
REQUIRE(isc_mutex_unlock(&(obj)->m) == ISC_R_SUCCESS); \
___v;\
})
#define atomic_fetch_and_explicit(obj, arg, order) \
({ \
typeof((obj)->v) ___v; \
REQUIRE(isc_mutex_lock(&(obj)->m) == ISC_R_SUCCESS); \
___v = (obj)->v; \
(obj)->v &= arg; \
REQUIRE(isc_mutex_unlock(&(obj)->m) == ISC_R_SUCCESS); \
___v;\
})
#define atomic_fetch_or_explicit(obj, arg, order) \
({ \
typeof((obj)->v) ___v; \
REQUIRE(isc_mutex_lock(&(obj)->m) == ISC_R_SUCCESS); \
___v = (obj)->v; \
(obj)->v |= arg; \
REQUIRE(isc_mutex_unlock(&(obj)->m) == ISC_R_SUCCESS); \
___v;\
})
#define atomic_compare_exchange_strong_explicit(obj, expected, desired, \
succ, fail) \
({ \
@@ -169,6 +186,10 @@ typedef struct atomic_bool_s {
atomic_fetch_add_explicit(obj, arg, memory_order_seq_cst)
#define atomic_fetch_sub(obj, arg) \
atomic_fetch_sub_explicit(obj, arg, memory_order_seq_cst)
#define atomic_fetch_and(obj, arg) \
atomic_fetch_and_explicit(obj, arg, memory_order_seq_cst)
#define atomic_fetch_or(obj, arg) \
atomic_fetch_or_explicit(obj, arg, memory_order_seq_cst)
#define atomic_compare_exchange_strong(obj, expected, desired) \
atomic_compare_exchange_strong_explicit(obj, expected, desired, \
memory_order_seq_cst, \

View File

@@ -91,6 +91,10 @@ typedef bool atomic_bool;
__c11_atomic_fetch_add(obj, arg, order)
#define atomic_fetch_sub_explicit(obj, arg, order) \
__c11_atomic_fetch_sub(obj, arg, order)
#define atomic_fetch_and_explicit(obj, arg, order) \
__c11_atomic_fetch_and(obj, arg, order)
#define atomic_fetch_or_explicit(obj, arg, order) \
__c11_atomic_fetch_or(obj, arg, order)
#define atomic_compare_exchange_strong_explicit(obj, expected, desired, succ, fail) \
__c11_atomic_compare_exchange_strong_explicit(obj, expected, desired, succ, fail)
#define atomic_compare_exchange_weak_explicit(obj, expected, desired, succ, fail) \
@@ -106,6 +110,10 @@ typedef bool atomic_bool;
__atomic_fetch_add(obj, arg, order)
#define atomic_fetch_sub_explicit(obj, arg, order) \
__atomic_fetch_sub(obj, arg, order)
#define atomic_fetch_and_explicit(obj, arg, order) \
__atomic_fetch_and(obj, arg, order)
#define atomic_fetch_or_explicit(obj, arg, order) \
__atomic_fetch_or(obj, arg, order)
#define atomic_compare_exchange_strong_explicit(obj, expected, desired, succ, fail) \
__atomic_compare_exchange_n(obj, expected, desired, 0, succ, fail)
#define atomic_compare_exchange_weak_explicit(obj, expected, desired, succ, fail) \
@@ -121,10 +129,14 @@ typedef bool atomic_bool;
*obj = desired; \
__sync_synchronize(); \
} while (0);
#define atomic_fetch_add_explicit(obj, arg, order) \
#define atomic_fetch_add_explicit(obj, arg, order) \
__sync_fetch_and_add(obj, arg)
#define atomic_fetch_sub_explicit(obj, arg, order) \
#define atomic_fetch_sub_explicit(obj, arg, order) \
__sync_fetch_and_sub(obj, arg, order)
#define atomic_fetch_and_explicit(obj, arg, order) \
__sync_fetch_and_and(obj, arg, order)
#define atomic_fetch_or_explicit(obj, arg, order) \
__sync_fetch_and_or(obj, arg, order)
#define atomic_compare_exchange_strong_explicit(obj, expected, desired, succ, fail) \
({ \
__typeof__(obj) __v; \
@@ -148,6 +160,10 @@ typedef bool atomic_bool;
atomic_fetch_add_explicit(obj, arg, memory_order_seq_cst)
#define atomic_fetch_sub(obj, arg) \
atomic_fetch_sub_explicit(obj, arg, memory_order_seq_cst)
#define atomic_fetch_and(obj, arg) \
atomic_fetch_and_explicit(obj, arg, memory_order_seq_cst)
#define atomic_fetch_or(obj, arg) \
atomic_fetch_or_explicit(obj, arg, memory_order_seq_cst)
#define atomic_compare_exchange_strong(obj, expected, desired) \
atomic_compare_exchange_strong_explicit(obj, expected, desired, memory_order_seq_cst, memory_order_seq_cst)
#define atomic_compare_exchange_weak(obj, expected, desired) \

View File

@@ -187,7 +187,7 @@ atomic_load_abort() {
: InterlockedExchangeAdd64((atomic_int_fast64_t *)obj, arg))))
#else
#define atomic_fetch_add_explicit64(obj, arg, order) \
InterlockedExchange64((atomic_int_fast64_t *)obj, arg)
InterlockedExchangeAdd64((atomic_int_fast64_t *)obj, arg)
#endif
static inline
@@ -215,6 +215,96 @@ atomic_add_abort() {
#define atomic_fetch_sub(obj, arg) \
atomic_fetch_sub_explicit(obj, arg, memory_order_seq_cst)
#define atomic_fetch_and_explicit8(obj, arg, order) \
InterlockedAnd8((atomic_int_fast8_t)obj, arg)
#define atomic_fetch_and_explicit32(obj, arg, order) \
(order == memory_order_relaxed \
? InterlockedAndNoFence((atomic_int_fast32_t *)obj, arg) \
: (order == memory_order_acquire \
? InterlockedAndAcquire((atomic_int_fast32_t *)obj, arg) \
: (order == memory_order_release \
? InterlockedAndRelease((atomic_int_fast32_t *)obj, arg) \
: InterlockedAnd((atomic_int_fast32_t *)obj, arg))))
#ifdef _WIN64
#define atomic_fetch_and_explicit64(obj, arg, order) \
(order == memory_order_relaxed \
? InterlockedAnd64NoFence((atomic_int_fast64_t *)obj, arg) \
: (order == memory_order_acquire \
? InterlockedAnd64Acquire((atomic_int_fast64_t *)obj, arg) \
: (order == memory_order_release \
? InterlockedAnd64Release((atomic_int_fast64_t *)obj, arg) \
: InterlockedAnd64((atomic_int_fast64_t *)obj, arg))))
#else
#define atomic_fetch_and_explicit64(obj, arg, order) \
InterlockedAnd64((atomic_int_fast64_t *)obj, arg)
#endif
static inline
int8_t
atomic_and_abort() {
INSIST(0);
ISC_UNREACHABLE();
}
#define atomic_fetch_and_explicit(obj, arg, order) \
(sizeof(*(obj)) == 8 \
? atomic_fetch_and_explicit64(obj, arg, order) \
: (sizeof(*(obj)) == 4 \
? atomic_fetch_and_explicit32(obj, arg, order) \
: (sizeof(*(obj)) == 1 \
? atomic_fetch_and_explicit8(obj, arg, order) \
: atomic_and_abort())))
#define atomic_fetch_and(obj, arg) \
atomic_fetch_and_explicit(obj, arg, memory_order_seq_cst)
#define atomic_fetch_or_explicit8(obj, arg, order) \
InterlockedOr8((atomic_int_fast8_t)obj, arg)
#define atomic_fetch_or_explicit32(obj, arg, order) \
(order == memory_order_relaxed \
? InterlockedOrNoFence((atomic_int_fast32_t *)obj, arg) \
: (order == memory_order_acquire \
? InterlockedOrAcquire((atomic_int_fast32_t *)obj, arg) \
: (order == memory_order_release \
? InterlockedOrRelease((atomic_int_fast32_t *)obj, arg) \
: InterlockedOr((atomic_int_fast32_t *)obj, arg))))
#ifdef _WIN64
#define atomic_fetch_or_explicit64(obj, arg, order) \
(order == memory_order_relaxed \
? InterlockedOr64NoFence((atomic_int_fast64_t *)obj, arg) \
: (order == memory_order_acquire \
? InterlockedOr64Acquire((atomic_int_fast64_t *)obj, arg) \
: (order == memory_order_release \
? InterlockedOr64Release((atomic_int_fast64_t *)obj, arg) \
: InterlockedOr64((atomic_int_fast64_t *)obj, arg))))
#else
#define atomic_fetch_or_explicit64(obj, arg, order) \
InterlockedOr64((atomic_int_fast64_t *)obj, arg)
#endif
static inline
int8_t
atomic_or_abort() {
INSIST(0);
ISC_UNREACHABLE();
}
#define atomic_fetch_or_explicit(obj, arg, order) \
(sizeof(*(obj)) == 8 \
? atomic_fetch_or_explicit64(obj, arg, order) \
: (sizeof(*(obj)) == 4 \
? atomic_fetch_or_explicit32(obj, arg, order) \
: (sizeof(*(obj)) == 1 \
? atomic_fetch_or_explicit8(obj, arg, order) \
: atomic_or_abort())))
#define atomic_fetch_or(obj, arg) \
atomic_fetch_or_explicit(obj, arg, memory_order_seq_cst)
static inline bool
atomic_compare_exchange_strong_explicit8(atomic_int_fast8_t *obj,
int8_t *expected,