aboutsummaryrefslogtreecommitdiff
path: root/sysdeps/m68k/m68020/bits/atomic.h
diff options
context:
space:
mode:
authorAndreas Schwab <schwab@suse.de>2003-03-22 20:59:49 +0000
committerAndreas Schwab <schwab@suse.de>2003-03-22 20:59:49 +0000
commit50c10df1afb11bb0533388ffe95a22d8dd873f2a (patch)
tree32c95fd197d75d028dd632bf8da306d1a4c6b0c9 /sysdeps/m68k/m68020/bits/atomic.h
parentdda8854aa5ca06b6cb271bfb73a2d9a7ea1b60f5 (diff)
downloadglibc-50c10df1afb11bb0533388ffe95a22d8dd873f2a.tar
glibc-50c10df1afb11bb0533388ffe95a22d8dd873f2a.tar.gz
glibc-50c10df1afb11bb0533388ffe95a22d8dd873f2a.tar.bz2
glibc-50c10df1afb11bb0533388ffe95a22d8dd873f2a.zip
Atomic operations for m68020 and up.
Diffstat (limited to 'sysdeps/m68k/m68020/bits/atomic.h')
-rw-r--r--sysdeps/m68k/m68020/bits/atomic.h227
1 files changed, 227 insertions, 0 deletions
diff --git a/sysdeps/m68k/m68020/bits/atomic.h b/sysdeps/m68k/m68020/bits/atomic.h
new file mode 100644
index 0000000000..09c33c19c8
--- /dev/null
+++ b/sysdeps/m68k/m68020/bits/atomic.h
@@ -0,0 +1,227 @@
+/* Copyright (C) 2003 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Andreas Schwab <schwab@suse.de>, 2003.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <stdint.h>
+
+
+typedef int8_t atomic8_t;
+typedef uint8_t uatomic8_t;
+typedef int_fast8_t atomic_fast8_t;
+typedef uint_fast8_t uatomic_fast8_t;
+
+typedef int16_t atomic16_t;
+typedef uint16_t uatomic16_t;
+typedef int_fast16_t atomic_fast16_t;
+typedef uint_fast16_t uatomic_fast16_t;
+
+typedef int32_t atomic32_t;
+typedef uint32_t uatomic32_t;
+typedef int_fast32_t atomic_fast32_t;
+typedef uint_fast32_t uatomic_fast32_t;
+
+typedef int64_t atomic64_t;
+typedef uint64_t uatomic64_t;
+typedef int_fast64_t atomic_fast64_t;
+typedef uint_fast64_t uatomic_fast64_t;
+
+typedef intptr_t atomicptr_t;
+typedef uintptr_t uatomicptr_t;
+typedef intmax_t atomic_max_t;
+typedef uintmax_t uatomic_max_t;
+
+#define __arch_compare_and_exchange_val_8_acq(mem, newval, oldval) \
+ ({ __typeof (*(mem)) __ret; \
+ __asm __volatile ("cas%.b %0,%2,%1" \
+ : "=d" (__ret), "=m" (*(mem)) \
+ : "d" (newval), "m" (*(mem)), "0" (oldval)); \
+ __ret; })
+
+#define __arch_compare_and_exchange_val_16_acq(mem, newval, oldval) \
+ ({ __typeof (*(mem)) __ret; \
+ __asm __volatile ("cas%.w %0,%2,%1" \
+ : "=d" (__ret), "=m" (*(mem)) \
+ : "d" (newval), "m" (*(mem)), "0" (oldval)); \
+ __ret; })
+
+#define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \
+ ({ __typeof (*(mem)) __ret; \
+ __asm __volatile ("cas%.l %0,%2,%1" \
+ : "=d" (__ret), "=m" (*(mem)) \
+ : "d" (newval), "m" (*(mem)), "0" (oldval)); \
+ __ret; })
+
+# define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
+ ({ __typeof (*(mem)) __ret; \
+ __typeof (mem) __memp = (mem); \
+ __asm __volatile ("cas2%.l %0:%R0,%1:%R1,(%2):(%3)" \
+ : "=d" (__ret) \
+ : "d" (newval), "r" (__memp), \
+ "r" ((char *) __memp + 4), "0" (oldval) \
+ : "memory"); \
+ __ret; })
+
+#define atomic_exchange(mem, newvalue) \
+ ({ __typeof (*(mem)) __result = *(mem); \
+ if (sizeof (*(mem)) == 1) \
+ __asm __volatile ("1: cas%.b %0,%2,%1;" \
+ " jbne 1b" \
+ : "=d" (__result), "=m" (*(mem)) \
+ : "d" (newvalue), "m" (*(mem)), "0" (__result)); \
+ else if (sizeof (*(mem)) == 2) \
+ __asm __volatile ("1: cas%.w %0,%2,%1;" \
+ " jbne 1b" \
+ : "=d" (__result), "=m" (*(mem)) \
+ : "d" (newvalue), "m" (*(mem)), "0" (__result)); \
+ else if (sizeof (*(mem)) == 4) \
+ __asm __volatile ("1: cas%.l %0,%2,%1;" \
+ " jbne 1b" \
+ : "=d" (__result), "=m" (*(mem)) \
+ : "d" (newvalue), "m" (*(mem)), "0" (__result)); \
+ else \
+ { \
+ __typeof (mem) __memp = (mem); \
+ __asm __volatile ("1: cas2%.l %0:%R0,%1:%R1,(%2):(%3);" \
+ " jbne 1b" \
+ : "=d" (__result) \
+ : "d" (newval), "r" (__memp), \
+ "r" ((char *) __memp + 4), "0" (__result) \
+ : "memory"); \
+ } \
+ __result; })
+
+#define atomic_exchange_and_add(mem, value) \
+ ({ __typeof (*(mem)) __result = *(mem); \
+ __typeof (*(mem)) __temp; \
+ if (sizeof (*(mem)) == 1) \
+ __asm __volatile ("1: move%.b %0,%2;" \
+ " add%.b %3,%2;" \
+ " cas%.b %0,%2,%1;" \
+ " jbne 1b" \
+ : "=d" (__result), "=m" (*(mem)), \
+ "=&d" (__temp) \
+ : "d" (value), "1" (*(mem)), "0" (__result)); \
+ else if (sizeof (*(mem)) == 2) \
+ __asm __volatile ("1: move%.w %0,%2;" \
+ " add%.w %3,%2;" \
+ " cas%.w %0,%2,%1;" \
+ " jbne 1b" \
+ : "=d" (__result), "=m" (*(mem)), \
+ "=&d" (__temp) \
+ : "d" (value), "1" (*(mem)), "0" (__result)); \
+ else if (sizeof (*(mem)) == 4) \
+ __asm __volatile ("1: move%.l %0,%2;" \
+ " add%.l %3,%2;" \
+ " cas%.l %0,%2,%1;" \
+ " jbne 1b" \
+ : "=d" (__result), "=m" (*(mem)), \
+ "=&d" (__temp) \
+ : "d" (value), "1" (*(mem)), "0" (__result)); \
+ else \
+ { \
+ __typeof (mem) __memp = (mem); \
+ __asm __volatile ("1: move%.l %0,%1;" \
+ " move%.l %R0,%R1;" \
+ " add%.l %2,%1;" \
+ " addx%.l %R2,%R1;" \
+ " cas2%.l %0:%R0,%1:%R1,(%3):(%4);" \
+ " jbne 1b" \
+ : "=d" (__result), "=&d" (__temp) \
+ : "d" (value), "r" (__memp), \
+ "r" ((char *) __memp + 4), "0" (__result) \
+ : "memory"); \
+ } \
+ __result; })
+
+#define atomic_add(mem, value) \
+ (void) ({ if (sizeof (*(mem)) == 1) \
+ __asm __volatile ("add%.b %1,%0" \
+ : "=m" (*(mem)) \
+ : "id" (value), "0" (*(mem))); \
+ else if (sizeof (*(mem)) == 2) \
+ __asm __volatile ("add%.w %1,%0" \
+ : "=m" (*(mem)) \
+ : "id" (value), "0" (*(mem))); \
+ else if (sizeof (*(mem)) == 4) \
+ __asm __volatile ("add%.l %1,%0" \
+ : "=m" (*(mem)) \
+ : "id" (value), "0" (*(mem))); \
+ else \
+ { \
+ __typeof (mem) __memp = (mem); \
+ __typeof (*(mem)) __oldval = *__memp; \
+ __typeof (*(mem)) __temp; \
+ __asm __volatile ("1: move%.l %0,%1;" \
+ " move%.l %R0,%R1;" \
+ " add%.l %2,%1;" \
+ " addx%.l %R2,%R1;" \
+ " cas2%.l %0:%R0,%1:%R1,(%3):(%4);" \
+ " jbne 1b" \
+ : "=d" (__oldval), "=&d" (__temp) \
+ : "d" (value), "r" (__memp), \
+ "r" ((char *) __memp + 4), "0" (__oldval) \
+ : "memory"); \
+ } \
+ })
+
+#define atomic_decrement_and_test(mem) \
+ ({ char __result; \
+ if (sizeof (*(mem)) == 1) \
+ __asm __volatile ("subq%.b %#1,%1; scs %0" \
+ : "=dm" (__result), "=m" (*(mem)) \
+ : "1" (*(mem))); \
+ else if (sizeof (*(mem)) == 2) \
+ __asm __volatile ("subq%.w %#1,%1; scs %0" \
+ : "=dm" (__result), "=m" (*(mem)) \
+ : "1" (*(mem))); \
+ else if (sizeof (*(mem)) == 4) \
+ __asm __volatile ("subq%.l %#1,%1; scs %0" \
+ : "=dm" (__result), "=m" (*(mem)) \
+ : "1" (*(mem))); \
+ else \
+ { \
+ __typef (mem) __memp = (mem); \
+ __typeof (*(mem)) __oldval = *__memp; \
+ __typeof (*(mem)) __temp; \
+ __asm __volatile ("1: move%.l %1,%2;" \
+ " move%.l %R1,%R2;" \
+ " subq%.l %#1,%2;" \
+ " subx%.l %5,%R2;" \
+ " scs %0;" \
+ " cas2%.l %1:%R1,%2:%R2,(%3):(%4);" \
+ " jbne 1b" \
+ : "=&dm" (__result), "=d" (__oldval), \
+ "=&d" (__temp) \
+ : "r" (__memp), "r" ((char *) __memp + 4), \
+ "d" (0), "1" (__oldval) \
+ : "memory"); \
+ } \
+ __result; })
+
+#define atomic_bit_set(mem, bit) \
+ __asm __volatile ("bfset %0{%1,#1}" \
+ : "=m" (*(mem)) \
+ : "di" (sizeof (*(mem)) * 8 - (bit) - 1), "m" (*(mem)))
+
+#define atomic_bit_test_set(mem, bit) \
+ ({ char __result; \
+ __asm __volatile ("bfset %1{%2,#1}; sne %0" \
+ : "=dm" (__result), "=m" (*(mem)) \
+ : "di" (sizeof (*(mem)) * 8 - (bit) - 1), \
+ "m" (*(mem))); \
+ __result; })