aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSzabolcs Nagy <szabolcs.nagy@arm.com>2021-06-08 12:48:43 +0100
committerSzabolcs Nagy <szabolcs.nagy@arm.com>2022-08-05 19:45:19 +0100
commitc8f1fc9d94f9c311d382833c3393f12c1acb1991 (patch)
tree9aa7ee0418ee28f9e60079f755ad54559d3bc8b2
parent7880bbd37436b0acd01936b7ba143d1f13634e85 (diff)
downloadglibc-c8f1fc9d94f9c311d382833c3393f12c1acb1991.tar
glibc-c8f1fc9d94f9c311d382833c3393f12c1acb1991.tar.gz
glibc-c8f1fc9d94f9c311d382833c3393f12c1acb1991.tar.bz2
glibc-c8f1fc9d94f9c311d382833c3393f12c1acb1991.zip
cheri: Implement 128-bit atomics
Arm Morello requires 128-bit atomics.
-rw-r--r--include/atomic.h17
-rw-r--r--sysdeps/aarch64/atomic-machine.h21
2 files changed, 36 insertions, 2 deletions
diff --git a/include/atomic.h b/include/atomic.h
index 2cb52c9cfd..140ef2a5a5 100644
--- a/include/atomic.h
+++ b/include/atomic.h
@@ -62,6 +62,8 @@
__atg1_result = pre##_32_##post (mem, __VA_ARGS__); \
else if (sizeof (*mem) == 8) \
__atg1_result = pre##_64_##post (mem, __VA_ARGS__); \
+ else if (sizeof (*mem) == 16) \
+ __atg1_result = pre##_128_##post (mem, __VA_ARGS__); \
else \
abort (); \
__atg1_result; \
@@ -77,6 +79,8 @@
__atg2_result = pre##_32_##post (mem, __VA_ARGS__); \
else if (sizeof (*mem) == 8) \
__atg2_result = pre##_64_##post (mem, __VA_ARGS__); \
+ else if (sizeof (*mem) == 16) \
+ __atg2_result = pre##_128_##post (mem, __VA_ARGS__); \
else \
abort (); \
__atg2_result; \
@@ -540,7 +544,11 @@
/* We require 32b atomic operations; some archs also support 64b atomic
operations. */
void __atomic_link_error (void);
-# if __HAVE_64B_ATOMICS == 1
+# if defined __CHERI_PURE_CAPABILITY__
+# define __atomic_check_size(mem) \
+ if ((sizeof (*mem) != 4) && (sizeof (*mem) != 8) && (sizeof (*mem) != 16)) \
+ __atomic_link_error ();
+# elif __HAVE_64B_ATOMICS == 1
# define __atomic_check_size(mem) \
if ((sizeof (*mem) != 4) && (sizeof (*mem) != 8)) \
__atomic_link_error ();
@@ -553,7 +561,12 @@ void __atomic_link_error (void);
need other atomic operations of such sizes, and restricting the support to
loads and stores makes this easier for archs that do not have native
support for atomic operations to less-than-word-sized data. */
-# if __HAVE_64B_ATOMICS == 1
+# if defined __CHERI_PURE_CAPABILITY__
+# define __atomic_check_size_ls(mem) \
+ if ((sizeof (*mem) != 1) && (sizeof (*mem) != 2) && (sizeof (*mem) != 4) \
+ && (sizeof (*mem) != 8) && (sizeof (*mem) != 16)) \
+ __atomic_link_error ();
+# elif __HAVE_64B_ATOMICS == 1
# define __atomic_check_size_ls(mem) \
if ((sizeof (*mem) != 1) && (sizeof (*mem) != 2) && (sizeof (*mem) != 4) \
&& (sizeof (*mem) != 8)) \
diff --git a/sysdeps/aarch64/atomic-machine.h b/sysdeps/aarch64/atomic-machine.h
index 52b3fb2047..14e9481392 100644
--- a/sysdeps/aarch64/atomic-machine.h
+++ b/sysdeps/aarch64/atomic-machine.h
@@ -54,6 +54,13 @@
model, __ATOMIC_RELAXED); \
})
+# define __arch_compare_and_exchange_bool_128_int(mem, newval, oldval, model) \
+ ({ \
+ typeof (*mem) __oldval = (oldval); \
+ !__atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \
+ model, __ATOMIC_RELAXED); \
+ })
+
# define __arch_compare_and_exchange_val_8_int(mem, newval, oldval, model) \
({ \
typeof (*mem) __oldval = (oldval); \
@@ -86,6 +93,14 @@
__oldval; \
})
+# define __arch_compare_and_exchange_val_128_int(mem, newval, oldval, model) \
+ ({ \
+ typeof (*mem) __oldval = (oldval); \
+ __atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \
+ model, __ATOMIC_RELAXED); \
+ __oldval; \
+ })
+
/* Compare and exchange with "acquire" semantics, ie barrier after. */
@@ -118,6 +133,9 @@
# define __arch_exchange_64_int(mem, newval, model) \
__atomic_exchange_n (mem, newval, model)
+# define __arch_exchange_128_int(mem, newval, model) \
+ __atomic_exchange_n (mem, newval, model)
+
# define atomic_exchange_acq(mem, value) \
__atomic_val_bysize (__arch_exchange, int, mem, value, __ATOMIC_ACQUIRE)
@@ -139,6 +157,9 @@
# define __arch_exchange_and_add_64_int(mem, value, model) \
__atomic_fetch_add (mem, value, model)
+# define __arch_exchange_and_add_128_int(mem, value, model) \
+ __atomic_fetch_add (mem, value, model)
+
# define atomic_exchange_and_add_acq(mem, value) \
__atomic_val_bysize (__arch_exchange_and_add, int, mem, value, \
__ATOMIC_ACQUIRE)