[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PULL 12/27] accel/tcg: Extract load_atom_extract_al16_or_al8 to host he
|
From: |
Richard Henderson |
|
Subject: |
[PULL 12/27] accel/tcg: Extract load_atom_extract_al16_or_al8 to host header |
|
Date: |
Tue, 30 May 2023 11:59:34 -0700 |
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
.../generic/host/load-extract-al16-al8.h | 45 +++++++++++++++++++
accel/tcg/ldst_atomicity.c.inc | 36 +--------------
2 files changed, 47 insertions(+), 34 deletions(-)
create mode 100644 host/include/generic/host/load-extract-al16-al8.h
diff --git a/host/include/generic/host/load-extract-al16-al8.h
b/host/include/generic/host/load-extract-al16-al8.h
new file mode 100644
index 0000000000..d95556130f
--- /dev/null
+++ b/host/include/generic/host/load-extract-al16-al8.h
@@ -0,0 +1,45 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ * Atomic extract 64 from 128-bit, generic version.
+ *
+ * Copyright (C) 2023 Linaro, Ltd.
+ */
+
+#ifndef HOST_LOAD_EXTRACT_AL16_AL8_H
+#define HOST_LOAD_EXTRACT_AL16_AL8_H
+
+/**
+ * load_atom_extract_al16_or_al8:
+ * @pv: host address
+ * @s: object size in bytes, @s <= 8.
+ *
+ * Load @s bytes from @pv, when pv % s != 0. If [p, p+s-1] does not
+ * cross an 16-byte boundary then the access must be 16-byte atomic,
+ * otherwise the access must be 8-byte atomic.
+ */
+static inline uint64_t ATTRIBUTE_ATOMIC128_OPT
+load_atom_extract_al16_or_al8(void *pv, int s)
+{
+ uintptr_t pi = (uintptr_t)pv;
+ int o = pi & 7;
+ int shr = (HOST_BIG_ENDIAN ? 16 - s - o : o) * 8;
+ Int128 r;
+
+ pv = (void *)(pi & ~7);
+ if (pi & 8) {
+ uint64_t *p8 = __builtin_assume_aligned(pv, 16, 8);
+ uint64_t a = qatomic_read__nocheck(p8);
+ uint64_t b = qatomic_read__nocheck(p8 + 1);
+
+ if (HOST_BIG_ENDIAN) {
+ r = int128_make128(b, a);
+ } else {
+ r = int128_make128(a, b);
+ }
+ } else {
+ r = atomic16_read_ro(pv);
+ }
+ return int128_getlo(int128_urshift(r, shr));
+}
+
+#endif /* HOST_LOAD_EXTRACT_AL16_AL8_H */
diff --git a/accel/tcg/ldst_atomicity.c.inc b/accel/tcg/ldst_atomicity.c.inc
index 35ce6d6368..6063395e11 100644
--- a/accel/tcg/ldst_atomicity.c.inc
+++ b/accel/tcg/ldst_atomicity.c.inc
@@ -9,6 +9,8 @@
* See the COPYING file in the top-level directory.
*/
+#include "host/load-extract-al16-al8.h"
+
#ifdef CONFIG_ATOMIC64
# define HAVE_al8 true
#else
@@ -311,40 +313,6 @@ static uint64_t
load_atom_extract_al16_or_exit(CPUArchState *env, uintptr_t ra,
return int128_getlo(r);
}
-/**
- * load_atom_extract_al16_or_al8:
- * @p: host address
- * @s: object size in bytes, @s <= 8.
- *
- * Load @s bytes from @p, when p % s != 0. If [p, p+s-1] does not
- * cross an 16-byte boundary then the access must be 16-byte atomic,
- * otherwise the access must be 8-byte atomic.
- */
-static inline uint64_t ATTRIBUTE_ATOMIC128_OPT
-load_atom_extract_al16_or_al8(void *pv, int s)
-{
- uintptr_t pi = (uintptr_t)pv;
- int o = pi & 7;
- int shr = (HOST_BIG_ENDIAN ? 16 - s - o : o) * 8;
- Int128 r;
-
- pv = (void *)(pi & ~7);
- if (pi & 8) {
- uint64_t *p8 = __builtin_assume_aligned(pv, 16, 8);
- uint64_t a = qatomic_read__nocheck(p8);
- uint64_t b = qatomic_read__nocheck(p8 + 1);
-
- if (HOST_BIG_ENDIAN) {
- r = int128_make128(b, a);
- } else {
- r = int128_make128(a, b);
- }
- } else {
- r = atomic16_read_ro(pv);
- }
- return int128_getlo(int128_urshift(r, shr));
-}
-
/**
* load_atom_4_by_2:
* @pv: host address
--
2.34.1
- [PULL 19/27] decodetree: Fix recursion in prop_format and build_tree, (continued)
- [PULL 19/27] decodetree: Fix recursion in prop_format and build_tree, Richard Henderson, 2023/05/30
- [PULL 27/27] tests/decode: Add tests for various named-field cases, Richard Henderson, 2023/05/30
- [PULL 26/27] scripts/decodetree: Implement named field support, Richard Henderson, 2023/05/30
- [PULL 05/27] tcg/i386: Support 128-bit load/store, Richard Henderson, 2023/05/30
- [PULL 09/27] tcg/aarch64: Support 128-bit load/store, Richard Henderson, 2023/05/30
- [PULL 11/27] tcg/s390x: Support 128-bit load/store, Richard Henderson, 2023/05/30
- [PULL 10/27] tcg/ppc: Support 128-bit load/store, Richard Henderson, 2023/05/30
- [PULL 08/27] tcg/aarch64: Simplify constraints on qemu_ld/st, Richard Henderson, 2023/05/30
- [PULL 07/27] tcg/aarch64: Reserve TCG_REG_TMP1, TCG_REG_TMP2, Richard Henderson, 2023/05/30
- [PULL 16/27] accel/tcg: Add aarch64 store_atom_insert_al16, Richard Henderson, 2023/05/30
- [PULL 12/27] accel/tcg: Extract load_atom_extract_al16_or_al8 to host header,
Richard Henderson <=
- [PULL 20/27] decodetree: Diagnose empty pattern group, Richard Henderson, 2023/05/30
- [PULL 23/27] docs: Document decodetree named field syntax, Richard Henderson, 2023/05/30
- [PULL 25/27] scripts/decodetree: Implement a topological sort, Richard Henderson, 2023/05/30
- Re: [PULL 00/27] tcg patch queue, Richard Henderson, 2023/05/30