[LTP] [PATCH 3/4] tst_atomic: add atomic_add_return for x86/64, ppc/64 and s390/x

Jan Stancek jstancek@redhat.com
Wed Apr 13 15:10:45 CEST 2016


In case __sync_add_and_fetch is not provided by compiler, we
try to supply our own implementation.

It has been taken from kernel sources, by compiling a small
piece of code (atomic_add_return()) with "gcc -E" and applying
some formatting to make it more readable/pretty.

Signed-off-by: Jan Stancek <jstancek@redhat.com>
---
 include/tst_atomic.h | 77 +++++++++++++++++++++++++++++++++++++++++++++++++++-
 1 file changed, 76 insertions(+), 1 deletion(-)

diff --git a/include/tst_atomic.h b/include/tst_atomic.h
index b989c10cb23b..e842f5500876 100644
--- a/include/tst_atomic.h
+++ b/include/tst_atomic.h
@@ -21,11 +21,86 @@
 #include "config.h"
 
 #if HAVE_SYNC_ADD_AND_FETCH == 1
+#define HAVE_ATOMIC_ADD_RETURN 1
 static inline __attribute__((always_inline)) int atomic_add_return(int i, int *v)
 {
 	return __sync_add_and_fetch(v, i);
 }
-#else
+
+#else /* HAVE_SYNC_ADD_AND_FETCH == 1 */
+
+#if defined(__i386__) || defined(__x86_64__)
+#define HAVE_ATOMIC_ADD_RETURN 1
+extern void __xadd_wrong_size(void);
+static inline __attribute__((always_inline)) int atomic_add_return(int i, int *v)
+{
+	int __ret = i;
+
+	switch (sizeof(*v)) {
+	case 1:
+		asm volatile ("lock; xaddb %b0, %1\n"
+			: "+q" (__ret), "+m" (*v) : : "memory", "cc");
+		break;
+	case 2:
+		asm volatile ("lock; xaddw %w0, %1\n"
+			: "+r" (__ret), "+m" (*v) : : "memory", "cc");
+		break;
+	case 4:
+		asm volatile ("lock; xaddl %0, %1\n"
+			: "+r" (__ret), "+m" (*v) : : "memory", "cc");
+		break;
+	case 8:
+		asm volatile ("lock; xaddq %q0, %1\n"
+			: "+r" (__ret), "+m" (*v) : : "memory", "cc");
+		break;
+	default:
+		__xadd_wrong_size();
+	}
+	return i + __ret;
+}
+#endif
+
+#if defined(__powerpc__) || defined(__powerpc64__)
+#define HAVE_ATOMIC_ADD_RETURN 1
+static inline __attribute__((always_inline)) int atomic_add_return(int i, int *v)
+{
+	int t;
+
+	asm volatile(
+		"	sync\n"
+		"1:	lwarx	%0,0,%2		# atomic_add_return\n"
+		"	add %0,%1,%0\n"
+		"	stwcx.	%0,0,%2 \n"
+		"	bne-	1b\n"
+		"	sync\n"
+		: "=&r" (t)
+		: "r" (i), "r" (v)
+		: "cc", "memory");
+	return t;
+}
+#endif
+
+#if defined(__s390__) || defined(__s390x__)
+#define HAVE_ATOMIC_ADD_RETURN 1
+static inline __attribute__((always_inline)) int atomic_add_return(int i, int *v)
+{
+	int old_val, new_val;
+
+	asm volatile(
+		"	l	%0,%2\n"
+		"0:	lr	%1,%0\n"
+		"	ar	%1,%3\n"
+		"	cs	%0,%1,%2\n"
+		"	jl	0b"
+		: "=&d" (old_val), "=&d" (new_val), "+Q" (*v)
+		: "d" (i)
+		: "cc", "memory");
+	return old_val + i;
+}
+#endif
+#endif /* HAVE_SYNC_ADD_AND_FETCH == 1 */
+
+#if !defined(HAVE_ATOMIC_ADD_RETURN)
 #error Your compiler does not provide __sync_add_and_fetch and LTP\
  implementation is missing for your architecture.
 #endif
-- 
1.8.3.1



More information about the ltp mailing list