[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v2 10/27] include/host: Split out atomic128-cas.h
|
From: |
Richard Henderson |
|
Subject: |
[PATCH v2 10/27] include/host: Split out atomic128-cas.h |
|
Date: |
Tue, 23 May 2023 06:47:16 -0700 |
Separates the aarch64-specific portion into its own file.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
host/include/aarch64/host/atomic128-cas.h | 43 ++++++++++++++++++
host/include/generic/host/atomic128-cas.h | 43 ++++++++++++++++++
include/qemu/atomic128.h | 55 +----------------------
3 files changed, 87 insertions(+), 54 deletions(-)
create mode 100644 host/include/aarch64/host/atomic128-cas.h
create mode 100644 host/include/generic/host/atomic128-cas.h
diff --git a/host/include/aarch64/host/atomic128-cas.h
b/host/include/aarch64/host/atomic128-cas.h
new file mode 100644
index 0000000000..80de58e06d
--- /dev/null
+++ b/host/include/aarch64/host/atomic128-cas.h
@@ -0,0 +1,43 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ * Compare-and-swap for 128-bit atomic operations, AArch64 version.
+ *
+ * Copyright (C) 2018, 2023 Linaro, Ltd.
+ *
+ * See docs/devel/atomics.rst for discussion about the guarantees each
+ * atomic primitive is meant to provide.
+ */
+
+#ifndef AARCH64_ATOMIC128_CAS_H
+#define AARCH64_ATOMIC128_CAS_H
+
+/* Through gcc 10, aarch64 has no support for 128-bit atomics. */
+#if defined(CONFIG_ATOMIC128) || defined(CONFIG_CMPXCHG128)
+#include "host/include/generic/host/atomic128-cas.h"
+#else
+static inline Int128 atomic16_cmpxchg(Int128 *ptr, Int128 cmp, Int128 new)
+{
+ uint64_t cmpl = int128_getlo(cmp), cmph = int128_gethi(cmp);
+ uint64_t newl = int128_getlo(new), newh = int128_gethi(new);
+ uint64_t oldl, oldh;
+ uint32_t tmp;
+
+ asm("0: ldaxp %[oldl], %[oldh], %[mem]\n\t"
+ "cmp %[oldl], %[cmpl]\n\t"
+ "ccmp %[oldh], %[cmph], #0, eq\n\t"
+ "b.ne 1f\n\t"
+ "stlxp %w[tmp], %[newl], %[newh], %[mem]\n\t"
+ "cbnz %w[tmp], 0b\n"
+ "1:"
+ : [mem] "+m"(*ptr), [tmp] "=&r"(tmp),
+ [oldl] "=&r"(oldl), [oldh] "=&r"(oldh)
+ : [cmpl] "r"(cmpl), [cmph] "r"(cmph),
+ [newl] "r"(newl), [newh] "r"(newh)
+ : "memory", "cc");
+
+ return int128_make128(oldl, oldh);
+}
+# define HAVE_CMPXCHG128 1
+#endif
+
+#endif /* AARCH64_ATOMIC128_CAS_H */
diff --git a/host/include/generic/host/atomic128-cas.h
b/host/include/generic/host/atomic128-cas.h
new file mode 100644
index 0000000000..513622fe34
--- /dev/null
+++ b/host/include/generic/host/atomic128-cas.h
@@ -0,0 +1,43 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ * Compare-and-swap for 128-bit atomic operations, generic version.
+ *
+ * Copyright (C) 2018, 2023 Linaro, Ltd.
+ *
+ * See docs/devel/atomics.rst for discussion about the guarantees each
+ * atomic primitive is meant to provide.
+ */
+
+#ifndef HOST_ATOMIC128_CAS_H
+#define HOST_ATOMIC128_CAS_H
+
+#if defined(CONFIG_ATOMIC128)
+static inline Int128 atomic16_cmpxchg(Int128 *ptr, Int128 cmp, Int128 new)
+{
+ Int128Alias r, c, n;
+
+ c.s = cmp;
+ n.s = new;
+ r.i = qatomic_cmpxchg__nocheck((__int128_t *)ptr, c.i, n.i);
+ return r.s;
+}
+# define HAVE_CMPXCHG128 1
+#elif defined(CONFIG_CMPXCHG128)
+static inline Int128 atomic16_cmpxchg(Int128 *ptr, Int128 cmp, Int128 new)
+{
+ Int128Alias r, c, n;
+
+ c.s = cmp;
+ n.s = new;
+ r.i = __sync_val_compare_and_swap_16((__int128_t *)ptr, c.i, n.i);
+ return r.s;
+}
+# define HAVE_CMPXCHG128 1
+#else
+/* Fallback definition that must be optimized away, or error. */
+Int128 QEMU_ERROR("unsupported atomic")
+ atomic16_cmpxchg(Int128 *ptr, Int128 cmp, Int128 new);
+# define HAVE_CMPXCHG128 0
+#endif
+
+#endif /* HOST_ATOMIC128_CAS_H */
diff --git a/include/qemu/atomic128.h b/include/qemu/atomic128.h
index d0ba0b9c65..10a2322c44 100644
--- a/include/qemu/atomic128.h
+++ b/include/qemu/atomic128.h
@@ -41,60 +41,7 @@
* Therefore, special case each platform.
*/
-#if defined(CONFIG_ATOMIC128)
-static inline Int128 atomic16_cmpxchg(Int128 *ptr, Int128 cmp, Int128 new)
-{
- Int128Alias r, c, n;
-
- c.s = cmp;
- n.s = new;
- r.i = qatomic_cmpxchg__nocheck((__int128_t *)ptr, c.i, n.i);
- return r.s;
-}
-# define HAVE_CMPXCHG128 1
-#elif defined(CONFIG_CMPXCHG128)
-static inline Int128 atomic16_cmpxchg(Int128 *ptr, Int128 cmp, Int128 new)
-{
- Int128Alias r, c, n;
-
- c.s = cmp;
- n.s = new;
- r.i = __sync_val_compare_and_swap_16((__int128_t *)ptr, c.i, n.i);
- return r.s;
-}
-# define HAVE_CMPXCHG128 1
-#elif defined(__aarch64__)
-/* Through gcc 8, aarch64 has no support for 128-bit at all. */
-static inline Int128 atomic16_cmpxchg(Int128 *ptr, Int128 cmp, Int128 new)
-{
- uint64_t cmpl = int128_getlo(cmp), cmph = int128_gethi(cmp);
- uint64_t newl = int128_getlo(new), newh = int128_gethi(new);
- uint64_t oldl, oldh;
- uint32_t tmp;
-
- asm("0: ldaxp %[oldl], %[oldh], %[mem]\n\t"
- "cmp %[oldl], %[cmpl]\n\t"
- "ccmp %[oldh], %[cmph], #0, eq\n\t"
- "b.ne 1f\n\t"
- "stlxp %w[tmp], %[newl], %[newh], %[mem]\n\t"
- "cbnz %w[tmp], 0b\n"
- "1:"
- : [mem] "+m"(*ptr), [tmp] "=&r"(tmp),
- [oldl] "=&r"(oldl), [oldh] "=&r"(oldh)
- : [cmpl] "r"(cmpl), [cmph] "r"(cmph),
- [newl] "r"(newl), [newh] "r"(newh)
- : "memory", "cc");
-
- return int128_make128(oldl, oldh);
-}
-# define HAVE_CMPXCHG128 1
-#else
-/* Fallback definition that must be optimized away, or error. */
-Int128 QEMU_ERROR("unsupported atomic")
- atomic16_cmpxchg(Int128 *ptr, Int128 cmp, Int128 new);
-# define HAVE_CMPXCHG128 0
-#endif /* Some definition for HAVE_CMPXCHG128 */
-
+#include "host/atomic128-cas.h"
#if defined(CONFIG_ATOMIC128)
static inline Int128 atomic16_read(Int128 *ptr)
--
2.34.1
- Re: [PATCH v2 15/27] target/s390x: Use tcg_gen_qemu_{ld, st}_i128 for LPQ, STPQ, (continued)
- [PATCH v2 17/27] target/s390x: Use cpu_{ld,st}*_mmu in do_csst, Richard Henderson, 2023/05/23
- [PATCH v2 16/27] accel/tcg: Unify cpu_{ld,st}*_{be,le}_mmu, Richard Henderson, 2023/05/23
- [PATCH v2 18/27] target/s390x: Always use cpu_atomic_cmpxchgl_be_mmu in do_csst, Richard Henderson, 2023/05/23
- [PATCH v2 19/27] accel/tcg: Remove cpu_atomic_{ld,st}o_*_mmu, Richard Henderson, 2023/05/23
- [PATCH v2 10/27] include/host: Split out atomic128-cas.h,
Richard Henderson <=
- [PATCH v2 12/27] meson: Fix detect atomic128 support with optimization, Richard Henderson, 2023/05/23
- [PATCH v2 14/27] target/ppc: Use tcg_gen_qemu_{ld, st}_i128 for LQARX, LQ, STQ, Richard Henderson, 2023/05/23
- [PATCH v2 21/27] accel/tcg: Eliminate #if on HAVE_ATOMIC128 and HAVE_CMPXCHG128, Richard Henderson, 2023/05/23
- [PATCH v2 20/27] accel/tcg: Remove prot argument to atomic_mmu_lookup, Richard Henderson, 2023/05/23
- [PATCH v2 22/27] qemu/atomic128: Split atomic16_read, Richard Henderson, 2023/05/23