[rtems-libbsd commit] ck: Use relaxed memory order if possible

Sebastian Huber sebh at rtems.org
Fri Sep 21 08:39:02 UTC 2018


Module:    rtems-libbsd
Branch:    master
Commit:    be6515d2c1086c76ee965c6300cc5873c76afab9
Changeset: http://git.rtems.org/rtems-libbsd/commit/?id=be6515d2c1086c76ee965c6300cc5873c76afab9

Author:    Sebastian Huber <sebastian.huber at embedded-brains.de>
Date:      Mon Sep 17 07:38:32 2018 +0200

ck: Use relaxed memory order if possible

In uniprocessor configurations we can use a relaxed memory order and
compiler memory barriers should be sufficient.

Update #3472.

---

 freebsd/sys/contrib/ck/include/gcc/ck_pr.h | 35 +++++++++++++++++++++---------
 1 file changed, 25 insertions(+), 10 deletions(-)

diff --git a/freebsd/sys/contrib/ck/include/gcc/ck_pr.h b/freebsd/sys/contrib/ck/include/gcc/ck_pr.h
index 60ae8d4..5354423 100644
--- a/freebsd/sys/contrib/ck/include/gcc/ck_pr.h
+++ b/freebsd/sys/contrib/ck/include/gcc/ck_pr.h
@@ -32,6 +32,11 @@
 #endif
 
 #include <ck_cc.h>
+#ifdef RTEMS_SMP
+#define CK_PR_ATOMIC_ORDER __ATOMIC_SEQ_CST
+#else
+#define CK_PR_ATOMIC_ORDER __ATOMIC_RELAXED
+#endif
 
 CK_CC_INLINE static void
 ck_pr_barrier(void)
@@ -122,12 +127,21 @@ ck_pr_stall(void)
 /*
  * Load and store fences are equivalent to full fences in the GCC port.
  */
-#define CK_PR_FENCE(T)					\
-	CK_CC_INLINE static void			\
-	ck_pr_fence_strict_##T(void)			\
-	{						\
-		__atomic_thread_fence(__ATOMIC_SEQ_CST);\
+#ifdef RTEMS_SMP
+#define CK_PR_FENCE(T)						\
+	CK_CC_INLINE static void				\
+	ck_pr_fence_strict_##T(void)				\
+	{							\
+		__atomic_thread_fence(CK_PR_ATOMIC_ORDER);	\
 	}
+#else
+#define CK_PR_FENCE(T)						\
+	CK_CC_INLINE static void				\
+	ck_pr_fence_strict_##T(void)				\
+	{							\
+		__asm__ __volatile__("" ::: "memory");		\
+	}
+#endif
 
 CK_PR_FENCE(atomic)
 CK_PR_FENCE(atomic_atomic)
@@ -159,7 +173,7 @@ CK_PR_FENCE(unlock)
 	{									\
 		bool z;								\
 		z = __atomic_compare_exchange_n((T *)target, &compare, set,	\
-		    false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);			\
+		    false, CK_PR_ATOMIC_ORDER, CK_PR_ATOMIC_ORDER);		\
 		return z;							\
 	}
 
@@ -186,7 +200,7 @@ ck_pr_cas_ptr_value(void *target, void *compare, void *set, void *v)
 {
 	*(void **)v = compare;
 	return __atomic_compare_exchange_n((void **)target, v, set, false,
-	    __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
+	    CK_PR_ATOMIC_ORDER, CK_PR_ATOMIC_ORDER);
 }
 
 #define CK_PR_CAS_O(S, T)						\
@@ -195,7 +209,7 @@ ck_pr_cas_ptr_value(void *target, void *compare, void *set, void *v)
 	{								\
 		*v = compare;						\
 		return __atomic_compare_exchange_n(target, v, set,	\
-		    false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);		\
+		    false, CK_PR_ATOMIC_ORDER, CK_PR_ATOMIC_ORDER);	\
 	}
 
 CK_PR_CAS_O(char, char)
@@ -216,7 +230,7 @@ CK_PR_CAS_O(8,  uint8_t)
 	ck_pr_faa_##S(M *target, T d)				\
 	{							\
 		d = __atomic_fetch_add((T *)target, d,		\
-		    __ATOMIC_SEQ_CST);				\
+		    CK_PR_ATOMIC_ORDER);			\
 		return (d);					\
 	}
 
@@ -243,7 +257,7 @@ CK_PR_FAA_S(8,  uint8_t)
 	ck_pr_##K##_##S(M *target, T d)				\
 	{							\
 		d = __atomic_fetch_##K((T *)target, d,		\
-		    __ATOMIC_SEQ_CST);				\
+		    CK_PR_ATOMIC_ORDER);			\
 		return;						\
 	}
 
@@ -296,5 +310,6 @@ CK_PR_UNARY_S(8, uint8_t)
 
 #undef CK_PR_UNARY_S
 #undef CK_PR_UNARY
+#undef CK_PR_ATOMIC_ORDER
 #endif /* !CK_F_PR */
 #endif /* CK_PR_GCC_H */



More information about the vc mailing list