[PATCH 3/7] update-all-architectures-to-new-atomic-implementation

WeiY wei.a.yang at gmail.com
Mon Jul 15 15:31:10 UTC 2013


---
 cpukit/score/cpu/arm/rtems/score/cpuatomic.h     |   10 +-
 cpukit/score/cpu/avr/rtems/score/cpuatomic.h     |   10 +-
 cpukit/score/cpu/bfin/rtems/score/cpuatomic.h    |   10 +-
 cpukit/score/cpu/h8300/rtems/score/cpuatomic.h   |   10 +-
 cpukit/score/cpu/i386/rtems/score/cpuatomic.h    |  319 +------------
 cpukit/score/cpu/lm32/rtems/score/cpuatomic.h    |   10 +-
 cpukit/score/cpu/m32c/rtems/score/cpuatomic.h    |   10 +-
 cpukit/score/cpu/m32r/rtems/score/cpuatomic.h    |   10 +-
 cpukit/score/cpu/m68k/rtems/score/cpuatomic.h    |   10 +-
 cpukit/score/cpu/mips/rtems/score/cpuatomic.h    |   10 +-
 cpukit/score/cpu/moxie/rtems/score/cpuatomic.h   |   10 +-
 cpukit/score/cpu/nios2/rtems/score/cpuatomic.h   |   10 +-
 cpukit/score/cpu/powerpc/rtems/score/cpuatomic.h |  519 +---------------------
 cpukit/score/cpu/sh/rtems/score/cpuatomic.h      |   12 +-
 cpukit/score/cpu/sparc/rtems/score/cpuatomic.h   |   10 +-
 cpukit/score/cpu/sparc64/rtems/score/cpuatomic.h |   10 +-
 cpukit/score/cpu/v850/rtems/score/cpuatomic.h    |   10 +-
 17 files changed, 79 insertions(+), 911 deletions(-)

diff --git a/cpukit/score/cpu/arm/rtems/score/cpuatomic.h b/cpukit/score/cpu/arm/rtems/score/cpuatomic.h
index 227b3ce..77b9a81 100644
--- a/cpukit/score/cpu/arm/rtems/score/cpuatomic.h
+++ b/cpukit/score/cpu/arm/rtems/score/cpuatomic.h
@@ -5,16 +5,13 @@
  * atomic data types which are used by the atomic operations API file. This
  * file should use fixed name cpuatomic.h and should be included in atomic
  * operations API file atomic.h. If the architecture works at the UP mode it
- * will use a generic atomic ops using disable/enable-IRQ simulated. If the
- * the architecture works at SMP mode, most of the parts of implementations
- * are imported from FreeBSD kernel.
+ * will not define atomic ops. If the architecture works at SMP mode, most of
+ * the parts of implementations are based on stdatomic.h.
  */
 
 #ifndef _RTEMS_SCORE_ATOMIC_CPU_H
 #define _RTEMS_SCORE_ATOMIC_CPU_H
 
-#include <rtems/score/genericcpuatomic.h>
-
 #ifdef __cplusplus
 extern "C" {
 #endif
@@ -27,8 +24,9 @@ extern "C" {
 /**@{*/
 
 #if !defined(RTEMS_SMP)
-#include <rtems/score/genericatomicops.h>
+# error "Now atomic implementation only supports SMP mode."
 #else
+#include <rtems/score/cpustdatomic.h>
 #endif
 
 #ifdef __cplusplus
diff --git a/cpukit/score/cpu/avr/rtems/score/cpuatomic.h b/cpukit/score/cpu/avr/rtems/score/cpuatomic.h
index d815021..f35cdb8 100644
--- a/cpukit/score/cpu/avr/rtems/score/cpuatomic.h
+++ b/cpukit/score/cpu/avr/rtems/score/cpuatomic.h
@@ -5,16 +5,13 @@
  * atomic data types which are used by the atomic operations API file. This
  * file should use fixed name cpuatomic.h and should be included in atomic
  * operations API file atomic.h. If the architecture works at the UP mode it
- * will use a generic atomic ops using disable/enable-IRQ simulated. If the
- * the architecture works at SMP mode, most of the parts of implementations
- * are imported from FreeBSD kernel.
+ * will not define atomic ops. If the architecture works at SMP mode, most of
+ * the parts of implementations are based on stdatomic.h.
  */
 
 #ifndef _RTEMS_SCORE_ATOMIC_CPU_H
 #define _RTEMS_SCORE_ATOMIC_CPU_H
 
-#include <rtems/score/genericcpuatomic.h>
-
 #ifdef __cplusplus
 extern "C" {
 #endif
@@ -27,8 +24,9 @@ extern "C" {
 /**@{*/
 
 #if !defined(RTEMS_SMP)
-#include <rtems/score/genericatomicops.h>
+# error "Now atomic implementation only supports SMP mode."
 #else
+#include <rtems/score/cpustdatomic.h>
 #endif
 
 #ifdef __cplusplus
diff --git a/cpukit/score/cpu/bfin/rtems/score/cpuatomic.h b/cpukit/score/cpu/bfin/rtems/score/cpuatomic.h
index ab52dc4..e293458 100644
--- a/cpukit/score/cpu/bfin/rtems/score/cpuatomic.h
+++ b/cpukit/score/cpu/bfin/rtems/score/cpuatomic.h
@@ -5,16 +5,13 @@
  * atomic data types which are used by the atomic operations API file. This
  * file should use fixed name cpuatomic.h and should be included in atomic
  * operations API file atomic.h. If the architecture works at the UP mode it
- * will use a generic atomic ops using disable/enable-IRQ simulated. If the
- * the architecture works at SMP mode, most of the parts of implementations
- * are imported from FreeBSD kernel.
+ * will not define atomic ops. If the architecture works at SMP mode, most of
+ * the parts of implementations are based on stdatomic.h.
  */
 
 #ifndef _RTEMS_SCORE_ATOMIC_CPU_H
 #define _RTEMS_SCORE_ATOMIC_CPU_H
 
-#include <rtems/score/genericcpuatomic.h>
-
 #ifdef __cplusplus
 extern "C" {
 #endif
@@ -27,8 +24,9 @@ extern "C" {
 /**@{*/
 
 #if !defined(RTEMS_SMP)
-#include <rtems/score/genericatomicops.h>
+# error "Now atomic implementation only supports SMP mode."
 #else
+#include <rtems/score/cpustdatomic.h>
 #endif
 
 #ifdef __cplusplus
diff --git a/cpukit/score/cpu/h8300/rtems/score/cpuatomic.h b/cpukit/score/cpu/h8300/rtems/score/cpuatomic.h
index fd80645..760bbb4 100644
--- a/cpukit/score/cpu/h8300/rtems/score/cpuatomic.h
+++ b/cpukit/score/cpu/h8300/rtems/score/cpuatomic.h
@@ -5,16 +5,13 @@
  * atomic data types which are used by the atomic operations API file. This
  * file should use fixed name cpuatomic.h and should be included in atomic
  * operations API file atomic.h. If the architecture works at the UP mode it
- * will use a generic atomic ops using disable/enable-IRQ simulated. If the
- * the architecture works at SMP mode, most of the parts of implementations
- * are imported from FreeBSD kernel.
+ * will not define atomic ops. If the architecture works at SMP mode, most of
+ * the parts of implementations are based on stdatomic.h.
  */
 
 #ifndef _RTEMS_SCORE_ATOMIC_CPU_H
 #define _RTEMS_SCORE_ATOMIC_CPU_H
 
-#include <rtems/score/genericcpuatomic.h>
-
 #ifdef __cplusplus
 extern "C" {
 #endif
@@ -27,8 +24,9 @@ extern "C" {
 /**@{*/
 
 #if !defined(RTEMS_SMP)
-#include <rtems/score/genericatomicops.h>
+# error "Now atomic implementation only supports SMP mode."
 #else
+#include <rtems/score/cpustdatomic.h>
 #endif
 
 #ifdef __cplusplus
diff --git a/cpukit/score/cpu/i386/rtems/score/cpuatomic.h b/cpukit/score/cpu/i386/rtems/score/cpuatomic.h
index eff6a35..1656a8f 100644
--- a/cpukit/score/cpu/i386/rtems/score/cpuatomic.h
+++ b/cpukit/score/cpu/i386/rtems/score/cpuatomic.h
@@ -1,46 +1,17 @@
 /**
  * @file  rtems/score/cpuatomic.h
- * 
- * This include file implements the atomic operations for i386 and defines 
+ *
+ * This include file implements the atomic operations for i386 and defines
  * atomic data types which are used by the atomic operations API file. This
  * file should use fixed name cpuatomic.h and should be included in atomic
- * operations API file atomic.h. Most of the parts of implementations are 
- * imported from FreeBSD kernel.
- */
-
-/*
- * Copyright (c) 1998 Doug Rabson
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * $FreeBSD$
+ * operations API file atomic.h. If the architecture works at the UP mode it
+ * will not define atomic ops. If the architecture works at SMP mode, most of
+ * the parts of implementations are based on stdatomic.h.
  */
 
 #ifndef _RTEMS_SCORE_ATOMIC_CPU_H
 #define _RTEMS_SCORE_ATOMIC_CPU_H
 
-#include <rtems/score/genericcpuatomic.h>
-
 #ifdef __cplusplus
 extern "C" {
 #endif
@@ -52,286 +23,12 @@ extern "C" {
 
 /**@{*/
 
-#if defined(RTEMS_SMP)
-#define	MPLOCKED	"lock ; "
+#if !defined(RTEMS_SMP)
+# error "Now atomic implementation only supports SMP mode."
 #else
-#define	MPLOCKED
+#include <rtems/score/cpustdatomic.h>
 #endif
 
-#if !defined(RTEMS_SMP)
-/*
- * We assume that a = b will do atomic loads and stores.  However, on a
- * PentiumPro or higher, reads may pass writes, so for that case we have
- * to use a serializing instruction (i.e. with LOCK) to do the load in
- * SMP kernels.  For UP kernels, however, the cache of the single processor
- * is always consistent, so we only need to take care of compiler.
- */
-#define	ATOMIC_STORE_LOAD(NAME, TYPE, LOP, SOP)               \
-static inline Atomic_##TYPE                                   \
-_CPU_Atomic_Load_##NAME(volatile Atomic_##TYPE *p)            \
-{                                                             \
-  Atomic_##TYPE tmp;                                          \
-                                                              \
-  tmp = *p;                                                   \
-  __asm __volatile("" : : : "memory");                        \
-  return (tmp);                                               \
-}                                                             \
-                                                              \
-static inline _CPU_Atomic_Load_acq_##NAME(volatile Atomic_##TYPE *p)  \
-{                                                             \
-  Atomic_##TYPE tmp;                                          \
-                                                              \
-  tmp = *p;                                                   \
-  __asm __volatile("" : : : "memory");                        \
-  return (tmp);                                               \
-}                                                             \
-                                                              \
-static inline void                                            \
-_CPU_Atomic_Store_##NAME(volatile Atomic_##TYPE *p, Atomic_##TYPE v) \
-{                                                             \
-  __asm __volatile("" : : : "memory");                        \
-  *p = v;                                                     \
-}                                                             \
-                                                              \
-static inline void                                            \
-_CPU_Atomic_Store_rel_##NAME(volatile Atomic_##TYPE *p, Atomic_##TYPE v) \
-{                                                             \
-  __asm __volatile("" : : : "memory");                        \
-  *p = v;                                                     \
-}                                                             \
-
-#else /* !(!SMP) */
-
-#define	ATOMIC_STORE_LOAD(NAME, TYPE, LOP, SOP)               \
-static inline Atomic_##TYPE                                   \
-_CPU_Atomic_Load_##NAME(volatile Atomic_##TYPE *p)            \
-{                                                             \
-  Atomic_##TYPE res;                                          \
-                                                              \
-  __asm __volatile(MPLOCKED LOP                               \
-  : "=a" (res),                 /* 0 */                       \
-  "=m" (*p)                     /* 1 */                       \
-  : "m" (*p)                    /* 2 */                       \
-  : "memory", "cc");                                          \
-                                                              \
-  return (res);                                               \
-}                                                             \
-                                                              \
-static inline Atomic_##TYPE                                   \
-_CPU_Atomic_Load_acq_##NAME(volatile Atomic_##TYPE *p)        \
-{                                                             \
-  Atomic_##TYPE res;                                          \
-                                                              \
-  __asm __volatile(MPLOCKED LOP                               \
-  : "=a" (res),			/* 0 */                       \
-  "=m" (*p)			/* 1 */                       \
-  : "m" (*p)			/* 2 */                       \
-  : "memory", "cc");                                          \
-                                                              \
-  return (res);                                               \
-}                                                             \
-                                                              \
-/*                                                            \
- * The XCHG instruction asserts LOCK automagically.           \
- */                                                           \
-static inline void                                            \
-_CPU_Atomic_Store_##NAME(volatile Atomic_##TYPE *p, Atomic_##TYPE v) \
-{                                                             \
-  __asm __volatile(SOP                                        \
-  : "=m" (*p),                  /* 0 */                       \
-  "+r" (v)                      /* 1 */                       \
-  : "m" (*p)                    /* 2 */                       \
-  : "memory");                                                \
-}                                                             \
-static inline void					      \
-_CPU_Atomic_Store_rel_##NAME(volatile Atomic_##TYPE *p, Atomic_##TYPE v) \
-{                                                             \
-  __asm __volatile(SOP                                        \
-  : "=m" (*p),			/* 0 */                       \
-  "+r" (v)			/* 1 */		              \
-  : "m" (*p)			/* 2 */	                      \
-  : "memory");                                                \
-}                                                             \
-
-#endif /* !SMP */
-
-/*
- * The assembly is volatilized to avoid code chunk removal by the compiler.
- * GCC aggressively reorders operations and memory clobbering is necessary
- * in order to avoid that for memory barriers.
- */
-#define	ATOMIC_FETCH_GENERIC(NAME, TYPENAME, TYPE, OP, CONS, V)               \
-static inline void                                                            \
-_CPU_Atomic_Fetch_##NAME##_##TYPENAME(volatile Atomic_##TYPE *p, Atomic_##TYPE v) \
-{                                                                             \
-  __asm __volatile(MPLOCKED OP                                                \
-  : "=m" (*p)                                                                 \
-  : CONS (V), "m" (*p)                                                        \
-  : "cc");                                                                    \
-}                                                                             \
-                                                                              \
-static inline void                                                            \
-_CPU_Atomic_Fetch_##NAME##_barr_##TYPENAME(volatile Atomic_##TYPE *p, Atomic_##TYPE v)\
-{                                                                             \
-  __asm __volatile(MPLOCKED OP                                                \
-  : "=m" (*p)                                                                 \
-  : CONS (V), "m" (*p)                                                        \
-  : "memory", "cc");                                                          \
-}                                                                             \
-
-/*
- * Atomic compare and set, used by the mutex functions
- *
- * if (*dst == expect) *dst = src (all 32 bit words)
- *
- * Returns 0 on failure, non-zero on success
- */
-static inline int
-_CPU_Atomic_Compare_exchange_int(volatile Atomic_Int *dst, Atomic_Int expect, Atomic_Int src)
-{
-  unsigned char res;
-
-  __asm __volatile(
-  "    " MPLOCKED "    "
-  "    cmpxchgl %2,%1 ;    "
-  "    sete	%0 ;       "
-  "1:                      "
-  "# atomic_cmpset_int"
-  : "=a" (res),              /* 0 */
-    "=m" (*dst)              /* 1 */
-  : "r" (src),               /* 2 */
-    "a" (expect),            /* 3 */
-    "m" (*dst)               /* 4 */
-  : "memory", "cc");
-
-  return (res);
-}
-
-static inline int
-_CPU_Atomic_Compare_exchange_long(volatile Atomic_Long *dst, Atomic_Long expect, Atomic_Long src)
-{
-
-  return (_CPU_Atomic_Compare_exchange_int((volatile Atomic_Int *)dst, (Atomic_Int)expect,
-         (Atomic_Int)src));
-}
-
-ATOMIC_STORE_LOAD(int, Int,	"cmpxchgl %0,%1",  "xchgl %1,%0");
-ATOMIC_STORE_LOAD(long, Long,	"cmpxchgl %0,%1",  "xchgl %1,%0");
-
-ATOMIC_FETCH_GENERIC(add, int, Int, "addl %1,%0", "ir", v);
-ATOMIC_FETCH_GENERIC(sub, int, Int, "subl %1,%0", "ir", v);
-ATOMIC_FETCH_GENERIC(or,  int, Int, "orl %1,%0",  "ir", v);
-ATOMIC_FETCH_GENERIC(and, int, Int, "andl %1,%0", "ir", v);
-
-ATOMIC_FETCH_GENERIC(add, long, Long, "addl %1,%0", "ir", v);
-ATOMIC_FETCH_GENERIC(sub, long, Long, "subl %1,%0", "ir", v);
-ATOMIC_FETCH_GENERIC(or,  long, Long, "orl %1,%0",  "ir", v);
-ATOMIC_FETCH_GENERIC(and, long, Long, "andl %1,%0", "ir", v);
-
-#define	_CPU_Atomic_Fetch_or_acq_int		_CPU_Atomic_Fetch_or_barr_int
-#define	_CPU_Atomic_Fetch_or_rel_int		_CPU_Atomic_Fetch_or_barr_int
-#define	_CPU_Atomic_Fetch_and_acq_int		_CPU_Atomic_Fetch_and_barr_int
-#define	_CPU_Atomic_Fetch_and_rel_int		_CPU_Atomic_Fetch_and_barr_int
-#define	_CPU_Atomic_Fetch_add_acq_int		_CPU_Atomic_Fetch_add_barr_int
-#define	_CPU_Atomic_Fetch_add_rel_int		_CPU_Atomic_Fetch_add_barr_int
-#define	_CPU_Atomic_Fetch_sub_acq_int		_CPU_Atomic_Fetch_sub_barr_int
-#define	_CPU_Atomic_Fetch_sub_rel_int		_CPU_Atomic_Fetch_sub_barr_int
-#define	_CPU_Atomic_Compare_exchange_acq_int  _CPU_Atomic_Compare_exchange_int
-#define	_CPU_Atomic_Compare_exchange_rel_int  _CPU_Atomic_Compare_exchange_int
-
-#define	_CPU_Atomic_Fetch_or_acq_long		_CPU_Atomic_Fetch_or_barr_long
-#define	_CPU_Atomic_Fetch_or_rel_long		_CPU_Atomic_Fetch_or_barr_long
-#define	_CPU_Atomic_Fetch_and_acq_long		_CPU_Atomic_Fetch_and_barr_long
-#define	_CPU_Atomic_Fetch_and_rel_long		_CPU_Atomic_Fetch_and_barr_long
-#define	_CPU_Atomic_Fetch_add_acq_long		_CPU_Atomic_Fetch_add_barr_long
-#define	_CPU_Atomic_Fetch_add_rel_long		_CPU_Atomic_Fetch_add_barr_long
-#define	_CPU_Atomic_Fetch_sub_acq_long	        _CPU_Atomic_Fetch_sub_barr_long
-#define	_CPU_Atomic_Fetch_sub_rel_long	        _CPU_Atomic_Fetch_sub_barr_long
-#define	_CPU_Atomic_Compare_exchange_acq_long _CPU_Atomic_Compare_exchange_long
-#define	_CPU_Atomic_Compare_exchange_rel_long _CPU_Atomic_Compare_exchange_long
-
-/* Operations on 32-bit double words. */
-#define	_CPU_Atomic_Fetch_or_32(p, v)      \
-    _CPU_Atomic_Fetch_or_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
-#define	_CPU_Atomic_Fetch_or_acq_32(p, v)  \
-    _CPU_Atomic_Fetch_or_acq_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
-#define	_CPU_Atomic_Fetch_or_rel_32(p, v)  \
-    _CPU_Atomic_Fetch_or_rel_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
-#define	_CPU_Atomic_Fetch_and_32(p, v)     \
-    _CPU_Atomic_Fetch_and_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
-#define	_CPU_Atomic_Fetch_and_acq_32(p, v) \
-    _CPU_Atomic_Fetch_and_acq_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
-#define	_CPU_Atomic_Fetch_and_rel_32(p, v) \
-    _CPU_Atomic_Fetch_and_rel_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
-#define	_CPU_Atomic_Fetch_add_32(p, v)     \
-    _CPU_Atomic_Fetch_add_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
-#define	_CPU_Atomic_Fetch_add_acq_32(p, v) \
-    _CPU_Atomic_Fetch_add_acq_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
-#define	_CPU_Atomic_Fetch_add_rel_32(p, v) \
-    _CPU_Atomic_Fetch_add_rel_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
-#define	_CPU_Atomic_Fetch_sub_32(p, v)     \
-    _CPU_Atomic_Fetch_sub_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
-#define	_CPU_Atomic_Fetch_sub_acq_32(p, v) \
-    _CPU_Atomic_Fetch_sub_acq_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
-#define	_CPU_Atomic_Fetch_sub_rel_32(p, v) \
-    _CPU_Atomic_Fetch_sub_rel_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
-#define _CPU_Atomic_Load_32(p)             \
-    _CPU_Atomic_Load_int((volatile Atomic_Int *)(p))
-#define	_CPU_Atomic_Load_acq_32(p)         \
-    _CPU_Atomic_Load_acq_int((volatile Atomic_Int *)(p))
-#define _CPU_Atomic_Store_32(p, v)         \
-    _CPU_Atomic_Store_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
-#define	_CPU_Atomic_Store_rel_32(p, v)     \
-    _CPU_Atomic_Store_rel_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
-#define	_CPU_Atomic_Compare_exchange_32(dst, old, new)  \
-    _CPU_Atomic_Compare_exchange_int((volatile Atomic_Int *)(dst), (Atomic_Int)(old), (Atomic_Int)(new))
-#define	_CPU_Atomic_Compare_exchange_acq_32(dst, old, new)  \
-    _CPU_Atomic_Compare_exchange_acq_int((volatile Atomic_Int *)(dst), (Atomic_Int)(old), (Atomic_Int)(new))
-#define	_CPU_Atomic_Compare_exchange_rel_32(dst, old, new)  \
-    _CPU_Atomic_Compare_exchange_rel_int((volatile Atomic_Int *)(dst), (Atomic_Int)(old), (Atomic_Int)(new))
-
-/* Operations on pointers. */
-#define	_CPU_Atomic_Fetch_or_ptr(p, v)     \
-    _CPU_Atomic_Fetch_or_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
-#define	_CPU_Atomic_Fetch_or_acq_ptr(p, v) \
-    _CPU_Atomic_Fetch_or_acq_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
-#define	_CPU_Atomic_Fetch_or_rel_ptr(p, v) \
-    _CPU_Atomic_Fetch_or_rel_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
-#define	_CPU_Atomic_Fetch_and_ptr(p, v)    \
-    _CPU_Atomic_Fetch_and_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
-#define	_CPU_Atomic_Fetch_and_acq_ptr(p, v)\
-    _CPU_Atomic_Fetch_and_acq_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
-#define	_CPU_Atomic_Fetch_and_rel_ptr(p, v)\
-    _CPU_Atomic_Fetch_and_rel_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
-#define	_CPU_Atomic_Fetch_add_ptr(p, v)    \
-    _CPU_Atomic_Fetch_add_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
-#define	_CPU_Atomic_Fetch_add_acq_ptr(p, v)\
-    _CPU_Atomic_Fetch_add_acq_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
-#define	_CPU_Atomic_Fetch_add_rel_ptr(p, v)\
-    _CPU_Atomic_Fetch_add_rel_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
-#define	_CPU_Atomic_Fetch_sub_ptr(p, v)    \
-    _CPU_Atomic_Fetch_sub_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
-#define	_CPU_Atomic_Fetch_sub_acq_ptr(p, v)\
-    _CPU_Atomic_Fetch_sub_acq_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
-#define	_CPU_Atomic_Fetch_sub_rel_ptr(p, v)\
-    _CPU_Atomic_Fetch_sub_rel_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
-#define _CPU_Atomic_Load_ptr(p)            \
-    _CPU_Atomic_Load_int((volatile Atomic_Int *)(p))
-#define	_CPU_Atomic_Load_acq_ptr(p)        \
-    _CPU_Atomic_Load_acq_int((volatile Atomic_Int *)(p))
-#define _CPU_Atomic_Store_ptr(p, v)        \
-    _CPU_Atomic_Store_int((volatile Atomic_Int *)(p), (v))
-#define	_CPU_Atomic_Store_rel_ptr(p, v)    \
-    _CPU_Atomic_Store_rel_int((volatile Atomic_Int *)(p), (v))
-#define	_CPU_Atomic_Compare_exchange_ptr(dst, old, new) \
-    _CPU_Atomic_Compare_exchange_int((volatile Atomic_Int *)(dst), (Atomic_Int)(old), (Atomic_Int)(new))
-#define	_CPU_Atomic_Compare_exchange_acq_ptr(dst, old, new) \
-    _CPU_Atomic_Compare_exchange_acq_int((volatile Atomic_Int *)(dst), (Atomic_Int)(old), \
-            (Atomic_Int)(new))
-#define	_CPU_Atomic_Compare_exchange_rel_ptr(dst, old, new) \
-    _CPU_Atomic_Compare_exchange_rel_int((volatile Atomic_Int *)(dst), (Atomic_Int)(old), \
-            (Atomic_Int)(new))
-
 #ifdef __cplusplus
 }
 #endif
diff --git a/cpukit/score/cpu/lm32/rtems/score/cpuatomic.h b/cpukit/score/cpu/lm32/rtems/score/cpuatomic.h
index 71c477f..5886f4e 100644
--- a/cpukit/score/cpu/lm32/rtems/score/cpuatomic.h
+++ b/cpukit/score/cpu/lm32/rtems/score/cpuatomic.h
@@ -5,16 +5,13 @@
  * atomic data types which are used by the atomic operations API file. This
  * file should use fixed name cpuatomic.h and should be included in atomic
  * operations API file atomic.h. If the architecture works at the UP mode it
- * will use a generic atomic ops using disable/enable-IRQ simulated. If the
- * the architecture works at SMP mode, most of the parts of implementations
- * are imported from FreeBSD kernel.
+ * will not define atomic ops. If the architecture works at SMP mode, most of
+ * the parts of implementations are based on stdatomic.h.
  */
 
 #ifndef _RTEMS_SCORE_ATOMIC_CPU_H
 #define _RTEMS_SCORE_ATOMIC_CPU_H
 
-#include <rtems/score/genericcpuatomic.h>
-
 #ifdef __cplusplus
 extern "C" {
 #endif
@@ -27,8 +24,9 @@ extern "C" {
 /**@{*/
 
 #if !defined(RTEMS_SMP)
-#include <rtems/score/genericatomicops.h>
+# error "Now atomic implementation only supports SMP mode."
 #else
+#include <rtems/score/cpustdatomic.h>
 #endif
 
 #ifdef __cplusplus
diff --git a/cpukit/score/cpu/m32c/rtems/score/cpuatomic.h b/cpukit/score/cpu/m32c/rtems/score/cpuatomic.h
index c90f3f6..6264b90 100644
--- a/cpukit/score/cpu/m32c/rtems/score/cpuatomic.h
+++ b/cpukit/score/cpu/m32c/rtems/score/cpuatomic.h
@@ -5,16 +5,13 @@
  * atomic data types which are used by the atomic operations API file. This
  * file should use fixed name cpuatomic.h and should be included in atomic
  * operations API file atomic.h. If the architecture works at the UP mode it
- * will use a generic atomic ops using disable/enable-IRQ simulated. If the
- * the architecture works at SMP mode, most of the parts of implementations
- * are imported from FreeBSD kernel.
+ * will not define atomic ops. If the architecture works at SMP mode, most of
+ * the parts of implementations are based on stdatomic.h.
  */
 
 #ifndef _RTEMS_SCORE_ATOMIC_CPU_H
 #define _RTEMS_SCORE_ATOMIC_CPU_H
 
-#include <rtems/score/genericcpuatomic.h>
-
 #ifdef __cplusplus
 extern "C" {
 #endif
@@ -27,8 +24,9 @@ extern "C" {
 /**@{*/
 
 #if !defined(RTEMS_SMP)
-#include <rtems/score/genericatomicops.h>
+# error "Now atomic implementation only supports SMP mode."
 #else
+#include <rtems/score/cpustdatomic.h>
 #endif
 
 #ifdef __cplusplus
diff --git a/cpukit/score/cpu/m32r/rtems/score/cpuatomic.h b/cpukit/score/cpu/m32r/rtems/score/cpuatomic.h
index 4aff532..58b1e56 100644
--- a/cpukit/score/cpu/m32r/rtems/score/cpuatomic.h
+++ b/cpukit/score/cpu/m32r/rtems/score/cpuatomic.h
@@ -5,16 +5,13 @@
  * atomic data types which are used by the atomic operations API file. This
  * file should use fixed name cpuatomic.h and should be included in atomic
  * operations API file atomic.h. If the architecture works at the UP mode it
- * will use a generic atomic ops using disable/enable-IRQ simulated. If the
- * the architecture works at SMP mode, most of the parts of implementations
- * are imported from FreeBSD kernel.
+ * will not define atomic ops. If the architecture works at SMP mode, most of
+ * the parts of implementations are based on stdatomic.h.
  */
 
 #ifndef _RTEMS_SCORE_ATOMIC_CPU_H
 #define _RTEMS_SCORE_ATOMIC_CPU_H
 
-#include <rtems/score/genericcpuatomic.h>
-
 #ifdef __cplusplus
 extern "C" {
 #endif
@@ -27,8 +24,9 @@ extern "C" {
 /**@{*/
 
 #if !defined(RTEMS_SMP)
-#include <rtems/score/genericatomicops.h>
+# error "Now atomic implementation only supports SMP mode."
 #else
+#include <rtems/score/cpustdatomic.h>
 #endif
 
 #ifdef __cplusplus
diff --git a/cpukit/score/cpu/m68k/rtems/score/cpuatomic.h b/cpukit/score/cpu/m68k/rtems/score/cpuatomic.h
index 3b82a4d..92fc4db 100644
--- a/cpukit/score/cpu/m68k/rtems/score/cpuatomic.h
+++ b/cpukit/score/cpu/m68k/rtems/score/cpuatomic.h
@@ -5,16 +5,13 @@
  * atomic data types which are used by the atomic operations API file. This
  * file should use fixed name cpuatomic.h and should be included in atomic
  * operations API file atomic.h. If the architecture works at the UP mode it
- * will use a generic atomic ops using disable/enable-IRQ simulated. If the
- * the architecture works at SMP mode, most of the parts of implementations
- * are imported from FreeBSD kernel.
+ * will not define atomic ops. If the architecture works at SMP mode, most of
+ * the parts of implementations are based on stdatomic.h.
  */
 
 #ifndef _RTEMS_SCORE_ATOMIC_CPU_H
 #define _RTEMS_SCORE_ATOMIC_CPU_H
 
-#include <rtems/score/genericcpuatomic.h>
-
 #ifdef __cplusplus
 extern "C" {
 #endif
@@ -27,8 +24,9 @@ extern "C" {
 /**@{*/
 
 #if !defined(RTEMS_SMP)
-#include <rtems/score/genericatomicops.h>
+# error "Now atomic implementation only supports SMP mode."
 #else
+#include <rtems/score/cpustdatomic.h>
 #endif
 
 #ifdef __cplusplus
diff --git a/cpukit/score/cpu/mips/rtems/score/cpuatomic.h b/cpukit/score/cpu/mips/rtems/score/cpuatomic.h
index 8de5a55..8f2c8bd 100644
--- a/cpukit/score/cpu/mips/rtems/score/cpuatomic.h
+++ b/cpukit/score/cpu/mips/rtems/score/cpuatomic.h
@@ -5,16 +5,13 @@
  * atomic data types which are used by the atomic operations API file. This
  * file should use fixed name cpuatomic.h and should be included in atomic
  * operations API file atomic.h. If the architecture works at the UP mode it
- * will use a generic atomic ops using disable/enable-IRQ simulated. If the
- * the architecture works at SMP mode, most of the parts of implementations
- * are imported from FreeBSD kernel.
+ * will not define atomic ops. If the architecture works at SMP mode, most of
+ * the parts of implementations are based on stdatomic.h.
  */
 
 #ifndef _RTEMS_SCORE_ATOMIC_CPU_H
 #define _RTEMS_SCORE_ATOMIC_CPU_H
 
-#include <rtems/score/genericcpuatomic.h>
-
 #ifdef __cplusplus
 extern "C" {
 #endif
@@ -27,8 +24,9 @@ extern "C" {
 /**@{*/
 
 #if !defined(RTEMS_SMP)
-#include <rtems/score/genericatomicops.h>
+# error "Now atomic implementation only supports SMP mode."
 #else
+#include <rtems/score/cpustdatomic.h>
 #endif
 
 #ifdef __cplusplus
diff --git a/cpukit/score/cpu/moxie/rtems/score/cpuatomic.h b/cpukit/score/cpu/moxie/rtems/score/cpuatomic.h
index be3988e..23d0b09 100644
--- a/cpukit/score/cpu/moxie/rtems/score/cpuatomic.h
+++ b/cpukit/score/cpu/moxie/rtems/score/cpuatomic.h
@@ -5,16 +5,13 @@
  * atomic data types which are used by the atomic operations API file. This
  * file should use fixed name cpuatomic.h and should be included in atomic
  * operations API file atomic.h. If the architecture works at the UP mode it
- * will use a generic atomic ops using disable/enable-IRQ simulated. If the
- * the architecture works at SMP mode, most of the parts of implementations
- * are imported from FreeBSD kernel.
+ * will not define atomic ops. If the architecture works at SMP mode, most of
+ * the parts of implementations are based on stdatomic.h.
  */
 
 #ifndef _RTEMS_SCORE_ATOMIC_CPU_H
 #define _RTEMS_SCORE_ATOMIC_CPU_H
 
-#include <rtems/score/genericcpuatomic.h>
-
 #ifdef __cplusplus
 extern "C" {
 #endif
@@ -27,8 +24,9 @@ extern "C" {
 /**@{*/
 
 #if !defined(RTEMS_SMP)
-#include <rtems/score/genericatomicops.h>
+# error "Now atomic implementation only supports SMP mode."
 #else
+#include <rtems/score/cpustdatomic.h>
 #endif
 
 #ifdef __cplusplus
diff --git a/cpukit/score/cpu/nios2/rtems/score/cpuatomic.h b/cpukit/score/cpu/nios2/rtems/score/cpuatomic.h
index a97d5b1..354a70d 100644
--- a/cpukit/score/cpu/nios2/rtems/score/cpuatomic.h
+++ b/cpukit/score/cpu/nios2/rtems/score/cpuatomic.h
@@ -5,16 +5,13 @@
  * atomic data types which are used by the atomic operations API file. This
  * file should use fixed name cpuatomic.h and should be included in atomic
  * operations API file atomic.h. If the architecture works at the UP mode it
- * will use a generic atomic ops using disable/enable-IRQ simulated. If the
- * the architecture works at SMP mode, most of the parts of implementations
- * are imported from FreeBSD kernel.
+ * will not define atomic ops. If the architecture works at SMP mode, most of
+ * the parts of implementations are based on stdatomic.h.
  */
 
 #ifndef _RTEMS_SCORE_ATOMIC_CPU_H
 #define _RTEMS_SCORE_ATOMIC_CPU_H
 
-#include <rtems/score/genericcpuatomic.h>
-
 #ifdef __cplusplus
 extern "C" {
 #endif
@@ -27,8 +24,9 @@ extern "C" {
 /**@{*/
 
 #if !defined(RTEMS_SMP)
-#include <rtems/score/genericatomicops.h>
+# error "Now atomic implementation only supports SMP mode."
 #else
+#include <rtems/score/cpustdatomic.h>
 #endif
 
 #ifdef __cplusplus
diff --git a/cpukit/score/cpu/powerpc/rtems/score/cpuatomic.h b/cpukit/score/cpu/powerpc/rtems/score/cpuatomic.h
index 0ffb447..7588586 100644
--- a/cpukit/score/cpu/powerpc/rtems/score/cpuatomic.h
+++ b/cpukit/score/cpu/powerpc/rtems/score/cpuatomic.h
@@ -1,49 +1,17 @@
 /**
  * @file  rtems/score/cpuatomic.h
- * 
- * This include file implements the atomic operations for PowerPC and defines 
+ *
+ * This include file implements the atomic operations for powerpc and defines
  * atomic data types which are used by the atomic operations API file. This
  * file should use fixed name cpuatomic.h and should be included in atomic
- * operations API file atomic.h. Most of the parts of implementations are 
- * imported from FreeBSD kernel.
- */
-
-/*
- * Copyright (c) 2008 Marcel Moolenaar
- * Copyright (c) 2001 Benno Rice
- * Copyright (c) 2001 David E. O'Brien
- * Copyright (c) 1998 Doug Rabson
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * $FreeBSD$
+ * operations API file atomic.h. If the architecture works at the UP mode it
+ * will not define atomic ops. If the architecture works at SMP mode, most of
+ * the parts of implementations are based on stdatomic.h.
  */
 
 #ifndef _RTEMS_SCORE_ATOMIC_CPU_H
 #define _RTEMS_SCORE_ATOMIC_CPU_H
 
-#include <rtems/score/genericcpuatomic.h>
-
 #ifdef __cplusplus
 extern "C" {
 #endif
@@ -55,478 +23,11 @@ extern "C" {
 
 /**@{*/
 
-#define	__ATOMIC_BARRIER                   \
-    __asm __volatile("sync" : : : "memory")
-
-#define mb()	__ATOMIC_BARRIER
-#define	wmb()	mb()
-#define	rmb()	mb()
-
-/*
- * Atomic_Fetch_add(p, v)
- * { *p += v; }
- */
-#define __CPU_Atomic_Fetch_add_int(p, v, t)         \
-  __asm __volatile(                                 \
-     "1:lwarx	%0, 0, %2\n"                        \
-     "	add	%0, %3, %0\n"                       \
-     "	stwcx.	%0, 0, %2\n"                        \
-     "	bne-	1b\n"                               \
-     : "=&r" (t), "=m" (*p)                         \
-     : "r" (p), "r" (v), "m" (*p)                   \
-     : "cc", "memory")                              \
-     /* __CPU_Atomic_Fetch_add_int */
-
-#define	__CPU_Atomic_Fetch_add_long(p, v, t)        \
-  __asm __volatile(                                 \
-     "1:lwarx	%0, 0, %2\n"                        \
-     "	add	%0, %3, %0\n"                       \
-     "	stwcx.	%0, 0, %2\n"                        \
-     "	bne-	1b\n"                               \
-     : "=&r" (t), "=m" (*p)                         \
-     : "r" (p), "r" (v), "m" (*p)                   \
-     : "cc", "memory")                              \
-     /* __CPU_Atomic_Fetch_add_long */
-
-#define	_ATOMIC_ADD(typename, type)                 \
-  static __inline void                              \
-  _CPU_Atomic_Fetch_add_##typename(volatile Atomic_##type *p, Atomic_##type v) {  \
-    Atomic_##type t;                                \
-    __CPU_Atomic_Fetch_add_##typename(p, v, t);     \
-  }                                                 \
-                                                    \
-  static __inline void                              \
-  _CPU_Atomic_Fetch_add_acq_##typename(volatile Atomic_##type *p, Atomic_##type v) { \
-    Atomic_##type t;                                \
-   __CPU_Atomic_Fetch_add_##typename(p, v, t);      \
-   __ATOMIC_BARRIER;                                \
-  }                                                 \
-                                                    \
-  static __inline void                              \
-  _CPU_Atomic_Fetch_add_rel_##typename(volatile Atomic_##type *p, Atomic_##type v) { \
-    Atomic_##type t;                                \
-    __ATOMIC_BARRIER;                               \
-    __CPU_Atomic_Fetch_add_##typename(p, v, t);     \
-  }                                                 \
-  /* _ATOMIC_ADD */
-
-_ATOMIC_ADD(int, Int)
-_ATOMIC_ADD(long, Long)
-
-#define _CPU_Atomic_Fetch_add_32(p, v)      \
-    _CPU_Atomic_Fetch_add_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
-#define _CPU_Atomic_Fetch_add_acq_32(p, v)  \
-    _CPU_Atomic_Fetch_add_acq_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
-#define _CPU_Atomic_Fetch_add_rel_32(p, v)  \
-    _CPU_Atomic_Fetch_add_rel_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
-
-#define _CPU_Atomic_Fetch_add_ptr(p, v)     \
-    _CPU_Atomic_Fetch_add_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
-#define _CPU_Atomic_Fetch_add_acq_ptr(p, v) \
-    _CPU_Atomic_Fetch_add_acq_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
-#define _CPU_Atomic_Fetch_add_rel_ptr(p, v) \
-    _CPU_Atomic_Fetch_add_rel_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
-
-#undef _ATOMIC_ADD
-#undef __CPU_Atomic_Fetch_add_long
-#undef __CPU_Atomic_Fetch_add_int
-
-/*
- * Atomic_Fetch_and(p, v)
- * { *p &= v; }
- */
-
-#define __CPU_Atomic_Fetch_and_int(p, v, t)                     \
-  __asm __volatile(                                             \
-  "1:	lwarx	%0, 0, %2\n"                                    \
-  "	and	%0, %0, %3\n"                                   \
-  "	stwcx.	%0, 0, %2\n"                                    \
-  "	bne-	1b\n"                                           \
-  : "=&r" (t), "=m" (*p)                                        \
-  : "r" (p), "r" (v), "m" (*p)                                  \
-  : "cc", "memory")                                             \
-  /* _CPU_Atomic_Fetch_and_int */
-
-#define	__CPU_Atomic_Fetch_and_long(p, v, t)                    \
-  __asm __volatile(                                             \
-  "1:	lwarx	%0, 0, %2\n"                                    \
-  "	and	%0, %0, %3\n"                                   \
-  "	stwcx.	%0, 0, %2\n"                                    \
-  "	bne-	1b\n"                                           \
-  : "=&r" (t), "=m" (*p)                                        \
-  : "r" (p), "r" (v), "m" (*p)                                  \
-  : "cc", "memory")                                             \
-  /* _CPU_Atomic_Fetch_and_long */
-
-#define	_ATOMIC_AND(typename, type)                             \
-  static __inline void                                          \
-  _CPU_Atomic_Fetch_and_##typename(volatile Atomic_##type *p, Atomic_##type v) {  \
-    Atomic_##type t;                                            \
-    __CPU_Atomic_Fetch_and_##typename(p, v, t);                 \
-  }                                                             \
-                                                                \
-  static __inline void                                          \
-  _CPU_Atomic_Fetch_and_acq_##typename(volatile Atomic_##type *p, Atomic_##type v) {  \
-    Atomic_##type t;                                            \
-    __CPU_Atomic_Fetch_and_##typename(p, v, t);                 \
-    __ATOMIC_BARRIER;                                           \
-  }                                                             \
-                                                                \
-  static __inline void                                          \
-  _CPU_Atomic_Fetch_and_rel_##typename(volatile Atomic_##type *p, Atomic_##type v) {  \
-    Atomic_##type t;                                            \
-    __ATOMIC_BARRIER;                                           \
-    __CPU_Atomic_Fetch_and_##typename(p, v, t);                 \
-  }                                                             \
-  /* _ATOMIC_AND */
-
-
-_ATOMIC_AND(int, Int)
-_ATOMIC_AND(long, Long)
-
-#define _CPU_Atomic_Fetch_and_32(p, v)      \
-    _CPU_Atomic_Fetch_and_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
-#define _CPU_Atomic_Fetch_and_acq_32(p, v)  \
-    _CPU_Atomic_Fetch_and_acq_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
-#define _CPU_Atomic_Fetch_and_rel_32(p, v)  \
-    _CPU_Atomic_Fetch_and_rel_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
-
-#define _CPU_Atomic_Fetch_and_ptr(p, v)     \
-    _CPU_Atomic_Fetch_and_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
-#define _CPU_Atomic_Fetch_and_acq_ptr(p, v) \
-    _CPU_Atomic_Fetch_and_acq_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
-#define _CPU_Atomic_Fetch_and_rel_ptr(p, v) \
-    _CPU_Atomic_Fetch_and_rel_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
-
-
-#undef _ATOMIC_AND
-#undef __CPU_Atomic_Fetch_and_long
-#undef __CPU_Atomic_Fetch_and_int
-
-/*
- * Atomic_Fetch_or(p, v)
- * { *p |= v; }
- */
-
-#define __CPU_Atomic_Fetch_or_int(p, v, t)              \
-  __asm __volatile(                                     \
-  "1:	lwarx	%0, 0, %2\n"                            \
-  "	or	%0, %3, %0\n"                           \
-  "	stwcx.	%0, 0, %2\n"                            \
-  "	bne-	1b\n"                                   \
-  : "=&r" (t), "=m" (*p)                                \
-  : "r" (p), "r" (v), "m" (*p)                          \
-  : "cc", "memory")                                     \
-  /* __CPU_Atomic_Fetch_or_int */
-
-#define	__CPU_Atomic_Fetch_or_long(p, v, t)             \
-  __asm __volatile(                                     \
-  "1:	lwarx	%0, 0, %2\n"                            \
-  "	or	%0, %3, %0\n"                           \
-  "	stwcx.	%0, 0, %2\n"                            \
-  "	bne-	1b\n"                                   \
-  : "=&r" (t), "=m" (*p)                                \
-  : "r" (p), "r" (v), "m" (*p)                          \
-  : "cc", "memory")                                     \
-  /* __CPU_Atomic_Fetch_or_long */
-
-#define	_ATOMIC_OR(typename, type)                      \
-  static __inline void                                  \
-  _CPU_Atomic_Fetch_or_##typename(volatile Atomic_##type *p, Atomic_##type v) {  \
-    Atomic_##type t;                                    \
-    __CPU_Atomic_Fetch_or_##typename(p, v, t);          \
-  }                                                     \
-                                                        \
-  static __inline void                                  \
-  _CPU_Atomic_Fetch_or_acq_##typename(volatile Atomic_##type *p, Atomic_##type v) { \
-    Atomic_##type t;                                    \
-    __CPU_Atomic_Fetch_or_##typename(p, v, t);          \
-    __ATOMIC_BARRIER;                                   \
-  }                                                     \
-                                                        \
-  static __inline void                                  \
-  _CPU_Atomic_Fetch_or_rel_##typename(volatile Atomic_##type *p, Atomic_##type v) {	\
-    Atomic_##type t;                                    \
-    __ATOMIC_BARRIER;                                   \
-    __CPU_Atomic_Fetch_or_##typename(p, v, t);          \
-  }                                                     \
-  /* _ATOMIC_OR */
-
-_ATOMIC_OR(int, Int)
-_ATOMIC_OR(long, Long)
-
-#define _CPU_Atomic_Fetch_or_32(p, v)      \
-    _CPU_Atomic_Fetch_or_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
-#define _CPU_Atomic_Fetch_or_acq_32(p, v)  \
-    _CPU_Atomic_Fetch_or_acq_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
-#define _CPU_Atomic_Fetch_or_rel_32(p, v)  \
-    _CPU_Atomic_Fetch_or_rel_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
-
-#define _CPU_Atomic_Fetch_or_ptr(p, v)     \
-    _CPU_Atomic_Fetch_or_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
-#define _CPU_Atomic_Fetch_or_acq_ptr(p, v) \
-    _CPU_Atomic_Fetch_or_acq_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
-#define _CPU_Atomic_Fetch_or_rel_ptr(p, v) \
-    _CPU_Atomic_Fetch_or_rel_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
-
-#undef _ATOMIC_OR
-#undef __CPU_Atomic_Fetch_or_long
-#undef __CPU_Atomic_Fetch_or_int
-
-/*
- * Atomic_Fetch_sub(p, v)
- * { *p -= v; }
- */
-
-#define __CPU_Atomic_Fetch_sub_int(p, v, t)             \
-  __asm __volatile(                                     \
-  "1:	lwarx	%0, 0, %2\n"                            \
-  "	subf	%0, %3, %0\n"                           \
-  "	stwcx.	%0, 0, %2\n"                            \
-  "	bne-	1b\n"                                   \
-  : "=&r" (t), "=m" (*p)                                \
-  : "r" (p), "r" (v), "m" (*p)                          \
-  : "cc", "memory")                                     \
-  /* __CPU_Atomic_Fetch_sub_int */
-
-#define	__CPU_Atomic_Fetch_sub_long(p, v, t)            \
-  __asm __volatile(                                     \
-  "1:	lwarx	%0, 0, %2\n"                            \
-  "	subf	%0, %3, %0\n"                           \
-  "	stwcx.	%0, 0, %2\n"                            \
-  "	bne-	1b\n"                                   \
-  : "=&r" (t), "=m" (*p)                                \
-  : "r" (p), "r" (v), "m" (*p)                          \
-  : "cc", "memory")                                     \
-  /* __CPU_Atomic_Fetch_sub_long */
-
-#define	_ATOMIC_SUB(typename, type)                     \
-  static __inline void                                  \
-  _CPU_Atomic_Fetch_sub_##typename(volatile Atomic_##type *p, Atomic_##type v) {     \
-    Atomic_##type t;                                    \
-    __CPU_Atomic_Fetch_sub_##typename(p, v, t);         \
-  }                                                     \
-                                                        \
-  static __inline void                                  \
-  _CPU_Atomic_Fetch_sub_acq_##typename(volatile Atomic_##type *p, Atomic_##type v) { \
-    Atomic_##type t;                                    \
-    __CPU_Atomic_Fetch_sub_##typename(p, v, t);         \
-    __ATOMIC_BARRIER;                                   \
-  }                                                     \
-                                                        \
-  static __inline void                                  \
-  _CPU_Atomic_Fetch_sub_rel_##typename(volatile Atomic_##type *p, Atomic_##type v) { \
-    Atomic_##type t;                                    \
-    __ATOMIC_BARRIER;                                   \
-    __CPU_Atomic_Fetch_sub_##typename(p, v, t);         \
-  }                                                     \
-  /* _ATOMIC_SUB */
-
-
-_ATOMIC_SUB(int, Int)
-_ATOMIC_SUB(long, Long)
-
-#define _CPU_Atomic_Fetch_sub_32(p, v)      \
-    _CPU_Atomic_Fetch_sub_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
-#define _CPU_Atomic_Fetch_sub_acq_32(p, v)  \
-    _CPU_Atomic_Fetch_sub_acq_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
-#define _CPU_Atomic_Fetch_sub_rel_32(p, v)  \
-    _CPU_Atomic_Fetch_sub_rel_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
-
-#define _CPU_Atomic_Fetch_sub_ptr(p, v)     \
-    _CPU_Atomic_Fetch_sub_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
-#define _CPU_Atomic_Fetch_sub_acq_ptr(p, v) \
-    _CPU_Atomic_Fetch_sub_acq_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
-#define _CPU_Atomic_Fetch_sub_rel_ptr(p, v) \
-    _CPU_Atomic_Fetch_sub_rel_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
-
-#undef _ATOMIC_SUB
-#undef __CPU_Atomic_Fetch_sub_long
-#undef __CPU_Atomic_Fetch_sub_int
-
-/*
- * We assume that a = b will do atomic loads and stores.
- */
-#define	ATOMIC_STORE_LOAD(TYPENAME, TYPE)                       \
-static __inline Atomic_##TYPE                                   \
-_CPU_Atomic_Load_##TYPENAME(volatile Atomic_##TYPE *p)          \
-{                                                               \
-  Atomic_##TYPE v;                                              \
-                                                                \
-  v = *p;                                                       \
-  return (v);                                                   \
-}                                                               \
-static __inline Atomic_##TYPE                                   \
-_CPU_Atomic_Load_acq_##TYPENAME(volatile Atomic_##TYPE *p)      \
-{                                                               \
-  Atomic_##TYPE v;                                              \
-                                                                \
-  v = *p;                                                       \
-  __ATOMIC_BARRIER;                                             \
-  return (v);                                                   \
-}                                                               \
-                                                                \
-static __inline void                                            \
-_CPU_Atomic_Store_##TYPENAME(volatile Atomic_##TYPE *p, Atomic_##TYPE v)      \
-{                                                               \
-  *p = v;                                                       \
-}                                                               \
-static __inline void                                            \
-_CPU_Atomic_Store_rel_##TYPENAME(volatile Atomic_##TYPE *p, Atomic_##TYPE v)  \
-{                                                               \
-  __ATOMIC_BARRIER;                                             \
-  *p = v;                                                       \
-}
-
-ATOMIC_STORE_LOAD(int, Int)
-
-#define _CPU_Atomic_Load_32(p)      \
-    _CPU_Atomic_Load_int((volatile Atomic_Int *)(p))
-#define _CPU_Atomic_Load_acq_32(p)  \
-    _CPU_Atomic_Load_acq_int((volatile Atomic_Int *)(p))
-#define _CPU_Atomic_Store_32(p, v)  \
-    _CPU_Atomic_Store_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
-#define _CPU_Atomic_Store_rel_32(p, v)  \
-    _CPU_Atomic_Store_rel_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
-
-static __inline Atomic_Long
-_CPU_Atomic_Load_long(volatile Atomic_Long *addr)
-{
-  return ((Atomic_Long)_CPU_Atomic_Load_int((volatile Atomic_Int *)addr));
-}
-
-static __inline Atomic_Long
-_CPU_Atomic_Load_acq_long(volatile Atomic_Long *addr)
-{
-  return ((Atomic_Long)_CPU_Atomic_Load_acq_int((volatile Atomic_Int *)addr));
-}
-
-static __inline void
-_CPU_Atomic_Store_long(volatile Atomic_Long *addr, Atomic_Long val)
-{
-  _CPU_Atomic_Store_int((volatile Atomic_Int *)addr, (Atomic_Int)val);
-}
-
-static __inline void
-_CPU_Atomic_Store_rel_long(volatile Atomic_Long *addr, Atomic_Long val)
-{
-  _CPU_Atomic_Store_rel_int((volatile Atomic_Int *)addr, (Atomic_Int)val);
-}
-
-#define _CPU_Atomic_Load_ptr(p)     \
-    _CPU_Atomic_Load_int((volatile Atomic_Int *)(p))
-#define _CPU_Atomic_Load_acq_ptr(p) \
-    _CPU_Atomic_Load_acq_int((volatile Atomic_Int *)(p))
-#define _CPU_Atomic_Store_ptr(p, v) \
-    _CPU_Atomic_Store_int((volatile Atomic_Int *)(p), (v))
-#define _CPU_Atomic_Store_rel_ptr(p, v) \
-    _CPU_Atomic_Store_rel_int((volatile Atomic_Int *)(p), (v))
-
-
-#undef ATOMIC_STORE_LOAD
-
-/*
- *  * Atomically compare the value stored at *p with cmpval and if the
- *   * two values are equal, update the value of *p with newval. Returns
- *    * zero if the compare failed, nonzero otherwise.
- *     */
-static __inline int
-_CPU_Atomic_Compare_exchange_int(volatile Atomic_Int* p, Atomic_Int cmpval, Atomic_Int newval)
-{
-  int ret;
-
-  __asm __volatile (
-    "1:\tlwarx %0, 0, %2\n\t"    /* load old value */
-    "cmplw %3, %0\n\t"           /* compare */
-    "bne 2f\n\t"                 /* exit if not equal */
-    "stwcx. %4, 0, %2\n\t"       /* attempt to store */
-    "bne- 1b\n\t"                /* spin if failed */
-    "li %0, 1\n\t"               /* success - retval = 1 */
-    "b 3f\n\t"                   /* we've succeeded */
-    "2:\n\t"
-    "stwcx. %0, 0, %2\n\t"       /* clear reservation (74xx) */
-    "li %0, 0\n\t"               /* failure - retval = 0 */
-    "3:\n\t"
-    : "=&r" (ret), "=m" (*p)
-    : "r" (p), "r" (cmpval), "r" (newval), "m" (*p)
-    : "cc", "memory");
-
-  return (ret);
-}
-static __inline int
-_CPU_Atomic_Compare_exchange_long(volatile Atomic_Long* p, Atomic_Long cmpval, Atomic_Long newval)
-{
-  int ret;
-
-  __asm __volatile (
-    "1:\tlwarx %0, 0, %2\n\t"    /* load old value */
-    "cmplw %3, %0\n\t"           /* compare */
-    "bne 2f\n\t"                 /* exit if not equal */
-    "stwcx. %4, 0, %2\n\t"       /* attempt to store */
-    "bne- 1b\n\t"                /* spin if failed */
-    "li %0, 1\n\t"               /* success - retval = 1 */
-    "b 3f\n\t"                   /* we've succeeded */
-    "2:\n\t"
-    "stwcx. %0, 0, %2\n\t"       /* clear reservation (74xx) */
-    "li %0, 0\n\t"               /* failure - retval = 0 */
-    "3:\n\t"
-    : "=&r" (ret), "=m" (*p)
-    : "r" (p), "r" (cmpval), "r" (newval), "m" (*p)
-    : "cc", "memory");
-
-  return (ret);
-}
-
-static __inline int
-_CPU_Atomic_Compare_exchange_acq_int(volatile Atomic_Int *p, Atomic_Int cmpval, Atomic_Int newval)
-{
-  int retval;
-
-  retval = _CPU_Atomic_Compare_exchange_int(p, cmpval, newval);
-  __ATOMIC_BARRIER;
-  return (retval);
-}
-
-static __inline int
-_CPU_Atomic_Compare_exchange_rel_int(volatile Atomic_Int *p, Atomic_Int cmpval, Atomic_Int newval)
-{
-  __ATOMIC_BARRIER;
-  return (_CPU_Atomic_Compare_exchange_int(p, cmpval, newval));
-}
-
-static __inline int
-_CPU_Atomic_Compare_exchange_acq_long(volatile Atomic_Long *p, Atomic_Long cmpval, Atomic_Long newval)
-{
-  Atomic_Long retval;
-
-  retval = _CPU_Atomic_Compare_exchange_long(p, cmpval, newval);
-  __ATOMIC_BARRIER;
-  return (retval);
-}
-
-static __inline int
-_CPU_Atomic_Compare_exchange_rel_long(volatile Atomic_Long *p, Atomic_Long cmpval, Atomic_Long newval)
-{
-  __ATOMIC_BARRIER;
-  return (_CPU_Atomic_Compare_exchange_long(p, cmpval, newval));
-}
-
-#define _CPU_Atomic_Compare_exchange_32(dst, old, new)      \
-    _CPU_Atomic_Compare_exchange_int((volatile Atomic_Int *)(dst), (Atomic_Int)(old), (Atomic_Int)(new))
-#define _CPU_Atomic_Compare_exchange_acq_32(dst, old, new)  \
-    _CPU_Atomic_Compare_exchange_acq_int((volatile Atomic_Int *)(dst), (Atomic_Int)(old), (Atomic_Int)(new))
-#define _CPU_Atomic_Compare_exchange_rel_32(dst, old, new)  \
-    _CPU_Atomic_Compare_exchange_rel_int((volatile Atomic_Int *)(dst), (Atomic_Int)(old), (Atomic_Int)(new))
-
-#define _CPU_Atomic_Compare_exchange_ptr(dst, old, new)     \
-		_CPU_Atomic_Compare_exchange_int((volatile Atomic_Int *)(dst), (Atomic_Int)(old), (Atomic_Int)(new))
-#define _CPU_Atomic_Compare_exchange_acq_ptr(dst, old, new) \
-		_CPU_Atomic_Compare_exchange_acq_int((volatile Atomic_Int *)(dst), (Atomic_Int)(old), \
-				(Atomic_Int)(new))
-#define _CPU_Atomic_Compare_exchange_rel_ptr(dst, old, new) \
-		_CPU_Atomic_Compare_exchange_rel_int((volatile Atomic_Int *)(dst), (Atomic_Int)(old), \
-				(Atomic_Int)(new))
-
+#if !defined(RTEMS_SMP)
+# error "Now atomic implementation only supports SMP mode."
+#else
+#include <rtems/score/cpustdatomic.h>
+#endif
 
 #ifdef __cplusplus
 }
diff --git a/cpukit/score/cpu/sh/rtems/score/cpuatomic.h b/cpukit/score/cpu/sh/rtems/score/cpuatomic.h
index 78b62ff..635406b 100644
--- a/cpukit/score/cpu/sh/rtems/score/cpuatomic.h
+++ b/cpukit/score/cpu/sh/rtems/score/cpuatomic.h
@@ -1,20 +1,17 @@
 /**
  * @file  rtems/score/cpuatomic.h
  *
- * This include file implements the atomic operations for SH and defines
+ * This include file implements the atomic operations for sh and defines
  * atomic data types which are used by the atomic operations API file. This
  * file should use fixed name cpuatomic.h and should be included in atomic
  * operations API file atomic.h. If the architecture works at the UP mode it
- * will use a generic atomic ops using disable/enable-IRQ simulated. If the
- * the architecture works at SMP mode, most of the parts of implementations
- * are imported from FreeBSD kernel.
+ * will not define atomic ops. If the architecture works at SMP mode, most of
+ * the parts of implementations are based on stdatomic.h.
  */
 
 #ifndef _RTEMS_SCORE_ATOMIC_CPU_H
 #define _RTEMS_SCORE_ATOMIC_CPU_H
 
-#include <rtems/score/genericcpuatomic.h>
-
 #ifdef __cplusplus
 extern "C" {
 #endif
@@ -27,8 +24,9 @@ extern "C" {
 /**@{*/
 
 #if !defined(RTEMS_SMP)
-#include <rtems/score/genericatomicops.h>
+# error "Now atomic implementation only supports SMP mode."
 #else
+#include <rtems/score/cpustdatomic.h>
 #endif
 
 #ifdef __cplusplus
diff --git a/cpukit/score/cpu/sparc/rtems/score/cpuatomic.h b/cpukit/score/cpu/sparc/rtems/score/cpuatomic.h
index 73ff393..0010320 100644
--- a/cpukit/score/cpu/sparc/rtems/score/cpuatomic.h
+++ b/cpukit/score/cpu/sparc/rtems/score/cpuatomic.h
@@ -5,16 +5,13 @@
  * atomic data types which are used by the atomic operations API file. This
  * file should use fixed name cpuatomic.h and should be included in atomic
  * operations API file atomic.h. If the architecture works at the UP mode it
- * will use a generic atomic ops using disable/enable-IRQ simulated. If the
- * the architecture works at SMP mode, most of the parts of implementations
- * are imported from FreeBSD kernel.
+ * will not define atomic ops. If the architecture works at SMP mode, most of
+ * the parts of implementations are based on stdatomic.h.
  */
 
 #ifndef _RTEMS_SCORE_ATOMIC_CPU_H
 #define _RTEMS_SCORE_ATOMIC_CPU_H
 
-#include <rtems/score/genericcpuatomic.h>
-
 #ifdef __cplusplus
 extern "C" {
 #endif
@@ -27,8 +24,9 @@ extern "C" {
 /**@{*/
 
 #if !defined(RTEMS_SMP)
-#include <rtems/score/genericatomicops.h>
+# error "Now atomic implementation only supports SMP mode."
 #else
+#include <rtems/score/cpustdatomic.h>
 #endif
 
 #ifdef __cplusplus
diff --git a/cpukit/score/cpu/sparc64/rtems/score/cpuatomic.h b/cpukit/score/cpu/sparc64/rtems/score/cpuatomic.h
index 25a4de1..c74d75c 100644
--- a/cpukit/score/cpu/sparc64/rtems/score/cpuatomic.h
+++ b/cpukit/score/cpu/sparc64/rtems/score/cpuatomic.h
@@ -5,16 +5,13 @@
  * atomic data types which are used by the atomic operations API file. This
  * file should use fixed name cpuatomic.h and should be included in atomic
  * operations API file atomic.h. If the architecture works at the UP mode it
- * will use a generic atomic ops using disable/enable-IRQ simulated. If the
- * the architecture works at SMP mode, most of the parts of implementations
- * are imported from FreeBSD kernel.
+ * will not define atomic ops. If the architecture works at SMP mode, most of
+ * the parts of implementations are based on stdatomic.h.
  */
 
 #ifndef _RTEMS_SCORE_ATOMIC_CPU_H
 #define _RTEMS_SCORE_ATOMIC_CPU_H
 
-#include <rtems/score/genericcpuatomic.h>
-
 #ifdef __cplusplus
 extern "C" {
 #endif
@@ -27,8 +24,9 @@ extern "C" {
 /**@{*/
 
 #if !defined(RTEMS_SMP)
-#include <rtems/score/genericatomicops.h>
+# error "Now atomic implementation only supports SMP mode."
 #else
+#include <rtems/score/cpustdatomic.h>
 #endif
 
 #ifdef __cplusplus
diff --git a/cpukit/score/cpu/v850/rtems/score/cpuatomic.h b/cpukit/score/cpu/v850/rtems/score/cpuatomic.h
index c7a735c..5143219 100644
--- a/cpukit/score/cpu/v850/rtems/score/cpuatomic.h
+++ b/cpukit/score/cpu/v850/rtems/score/cpuatomic.h
@@ -5,16 +5,13 @@
  * atomic data types which are used by the atomic operations API file. This
  * file should use fixed name cpuatomic.h and should be included in atomic
  * operations API file atomic.h. If the architecture works at the UP mode it
- * will use a generic atomic ops using disable/enable-IRQ simulated. If the
- * the architecture works at SMP mode, most of the parts of implementations
- * are imported from FreeBSD kernel.
+ * will not define atomic ops. If the architecture works at SMP mode, most of
+ * the parts of implementations are based on stdatomic.h.
  */
 
 #ifndef _RTEMS_SCORE_ATOMIC_CPU_H
 #define _RTEMS_SCORE_ATOMIC_CPU_H
 
-#include <rtems/score/genericcpuatomic.h>
-
 #ifdef __cplusplus
 extern "C" {
 #endif
@@ -27,8 +24,9 @@ extern "C" {
 /**@{*/
 
 #if !defined(RTEMS_SMP)
-#include <rtems/score/genericatomicops.h>
+# error "Now atomic implementation only supports SMP mode."
 #else
+#include <rtems/score/cpustdatomic.h>
 #endif
 
 #ifdef __cplusplus
-- 
1.7.9.5




More information about the devel mailing list