svn commit: r254478 - stable/9/include
Ed Maste
emaste at FreeBSD.org
Sun Aug 18 08:24:59 UTC 2013
Author: emaste
Date: Sun Aug 18 08:24:58 2013
New Revision: 254478
URL: http://svnweb.freebsd.org/changeset/base/254478
Log:
MFC r240970:
- Make C11 atomic macros usable in expressions:
- Replace do-while statements with void expressions.
- Wrap __asm statements in statement expressions.
- Make the macros function-like:
- Evaluate all arguments exactly once.
- Make sure there's a sequence point between evaluation of the
arguments and the function body. Arguments should be evaluated
before any memory barriers.
- Fix use of __atomic_is_lock_free built-in. It requires the address
of an atomic variable as second argument. Use this built-in on clang
as well because clang's __c11_atomic_is_lock_free only takes the size
of the variable into account.
- In atomic_exchange_explicit put the barrier before instead of after
the __sync_lock_test_and_set call.
Modified:
stable/9/include/stdatomic.h
Directory Properties:
stable/9/include/ (props changed)
Modified: stable/9/include/stdatomic.h
==============================================================================
--- stable/9/include/stdatomic.h Sun Aug 18 08:18:49 2013 (r254477)
+++ stable/9/include/stdatomic.h Sun Aug 18 08:24:58 2013 (r254478)
@@ -54,9 +54,7 @@
#define atomic_init(obj, value) __c11_atomic_init(obj, value)
#else
#define ATOMIC_VAR_INIT(value) { .__val = (value) }
-#define atomic_init(obj, value) do { \
- (obj)->__val = (value); \
-} while (0)
+#define atomic_init(obj, value) ((void)((obj)->__val = (value)))
#endif
/*
@@ -111,23 +109,24 @@ enum memory_order {
#define atomic_thread_fence(order) __atomic_thread_fence(order)
#define atomic_signal_fence(order) __atomic_signal_fence(order)
#else
-#define atomic_thread_fence(order) __sync_synchronize()
-#define atomic_signal_fence(order) __asm volatile ("" : : : "memory")
+#define atomic_thread_fence(order) ((void)(order), __sync_synchronize())
+#define atomic_signal_fence(order) __extension__ ({ \
+ (void)(order); \
+ __asm volatile ("" ::: "memory"); \
+ (void)0; \
+})
#endif
/*
* 7.17.5 Lock-free property.
*/
-#if defined(__CLANG_ATOMICS)
-#define atomic_is_lock_free(obj) \
- __c11_atomic_is_lock_free(sizeof(obj))
-#elif defined(__GNUC_ATOMICS)
+#if defined(__CLANG_ATOMICS) || defined(__GNUC_ATOMICS)
#define atomic_is_lock_free(obj) \
- __atomic_is_lock_free(sizeof((obj)->__val))
+ __atomic_is_lock_free(sizeof((obj)->__val), &(obj)->val)
#else
#define atomic_is_lock_free(obj) \
- (sizeof((obj)->__val) <= sizeof(void *))
+ ((void)(obj), sizeof((obj)->__val) <= sizeof(void *))
#endif
/*
@@ -234,13 +233,17 @@ typedef _Atomic(__uintmax_t) atomic_uin
__atomic_store_n(&(object)->__val, desired, order)
#else
#define atomic_compare_exchange_strong_explicit(object, expected, \
- desired, success, failure) ({ \
+ desired, success, failure) __extension__ ({ \
__typeof__((object)->__val) __v; \
+ __typeof__(expected) __e; \
_Bool __r; \
+ __e = (expected); \
+ (void)(success); \
+ (void)(failure); \
__v = __sync_val_compare_and_swap(&(object)->__val, \
- *(expected), desired); \
- __r = *(expected) == __v; \
- *(expected) = __v; \
+ *__e, (desired)); \
+ __r = (*__e == __v); \
+ *__e = __v; \
__r; \
})
@@ -250,19 +253,21 @@ typedef _Atomic(__uintmax_t) atomic_uin
desired, success, failure)
#if __has_builtin(__sync_swap)
/* Clang provides a full-barrier atomic exchange - use it if available. */
-#define atomic_exchange_explicit(object, desired, order) \
- __sync_swap(&(object)->__val, desired)
+#define atomic_exchange_explicit(object, desired, order) \
+ ((void)(order), __sync_swap(&(object)->__val, desired))
#else
/*
* __sync_lock_test_and_set() is only an acquire barrier in theory (although in
- * practice it is usually a full barrier) so we need an explicit barrier after
+ * practice it is usually a full barrier) so we need an explicit barrier before
* it.
*/
-#define atomic_exchange_explicit(object, desired, order) ({ \
- __typeof__((object)->__val) __v; \
- __v = __sync_lock_test_and_set(&(object)->__val, desired); \
+#define atomic_exchange_explicit(object, desired, order) \
+__extension__ ({ \
+ __typeof__(object) __o = (object); \
+ __typeof__(desired) __d = (desired); \
+ (void)(order); \
__sync_synchronize(); \
- __v; \
+ __sync_lock_test_and_set(&(__o)->__val, __d); \
})
#endif
#define atomic_fetch_add_explicit(object, operand, order) \
@@ -277,11 +282,14 @@ typedef _Atomic(__uintmax_t) atomic_uin
__sync_fetch_and_xor(&(object)->__val, operand)
#define atomic_load_explicit(object, order) \
__sync_fetch_and_add(&(object)->__val, 0)
-#define atomic_store_explicit(object, desired, order) do { \
+#define atomic_store_explicit(object, desired, order) __extension__ ({ \
+ __typeof__(object) __o = (object); \
+ __typeof__(desired) __d = (desired); \
+ (void)(order); \
__sync_synchronize(); \
- (object)->__val = (desired); \
+ __o->__val = __d; \
__sync_synchronize(); \
-} while (0)
+})
#endif
/*
More information about the svn-src-stable-9
mailing list