[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PULL 06/28] migration/xbzrle: Shuffle function order
|
From: |
Richard Henderson |
|
Subject: |
[PULL 06/28] migration/xbzrle: Shuffle function order |
|
Date: |
Tue, 23 May 2023 16:57:42 -0700 |
Place the CONFIG_AVX512BW_OPT block at the top,
which will aid function selection in the next patch.
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
migration/xbzrle.c | 244 ++++++++++++++++++++++-----------------------
1 file changed, 122 insertions(+), 122 deletions(-)
diff --git a/migration/xbzrle.c b/migration/xbzrle.c
index 258e4959c9..751b5428f7 100644
--- a/migration/xbzrle.c
+++ b/migration/xbzrle.c
@@ -15,6 +15,128 @@
#include "qemu/host-utils.h"
#include "xbzrle.h"
+#if defined(CONFIG_AVX512BW_OPT)
+#include <immintrin.h>
+
+int __attribute__((target("avx512bw")))
+xbzrle_encode_buffer_avx512(uint8_t *old_buf, uint8_t *new_buf, int slen,
+ uint8_t *dst, int dlen)
+{
+ uint32_t zrun_len = 0, nzrun_len = 0;
+ int d = 0, i = 0, num = 0;
+ uint8_t *nzrun_start = NULL;
+ /* add 1 to include residual part in main loop */
+ uint32_t count512s = (slen >> 6) + 1;
+ /* countResidual is tail of data, i.e., countResidual = slen % 64 */
+ uint32_t count_residual = slen & 0b111111;
+ bool never_same = true;
+ uint64_t mask_residual = 1;
+ mask_residual <<= count_residual;
+ mask_residual -= 1;
+ __m512i r = _mm512_set1_epi32(0);
+
+ while (count512s) {
+ int bytes_to_check = 64;
+ uint64_t mask = 0xffffffffffffffff;
+ if (count512s == 1) {
+ bytes_to_check = count_residual;
+ mask = mask_residual;
+ }
+ __m512i old_data = _mm512_mask_loadu_epi8(r,
+ mask, old_buf + i);
+ __m512i new_data = _mm512_mask_loadu_epi8(r,
+ mask, new_buf + i);
+ uint64_t comp = _mm512_cmpeq_epi8_mask(old_data, new_data);
+ count512s--;
+
+ bool is_same = (comp & 0x1);
+ while (bytes_to_check) {
+ if (d + 2 > dlen) {
+ return -1;
+ }
+ if (is_same) {
+ if (nzrun_len) {
+ d += uleb128_encode_small(dst + d, nzrun_len);
+ if (d + nzrun_len > dlen) {
+ return -1;
+ }
+ nzrun_start = new_buf + i - nzrun_len;
+ memcpy(dst + d, nzrun_start, nzrun_len);
+ d += nzrun_len;
+ nzrun_len = 0;
+ }
+ /* 64 data at a time for speed */
+ if (count512s && (comp == 0xffffffffffffffff)) {
+ i += 64;
+ zrun_len += 64;
+ break;
+ }
+ never_same = false;
+ num = ctz64(~comp);
+ num = (num < bytes_to_check) ? num : bytes_to_check;
+ zrun_len += num;
+ bytes_to_check -= num;
+ comp >>= num;
+ i += num;
+ if (bytes_to_check) {
+ /* still has different data after same data */
+ d += uleb128_encode_small(dst + d, zrun_len);
+ zrun_len = 0;
+ } else {
+ break;
+ }
+ }
+ if (never_same || zrun_len) {
+ /*
+ * never_same only acts if
+ * data begins with diff in first count512s
+ */
+ d += uleb128_encode_small(dst + d, zrun_len);
+ zrun_len = 0;
+ never_same = false;
+ }
+ /* has diff, 64 data at a time for speed */
+ if ((bytes_to_check == 64) && (comp == 0x0)) {
+ i += 64;
+ nzrun_len += 64;
+ break;
+ }
+ num = ctz64(comp);
+ num = (num < bytes_to_check) ? num : bytes_to_check;
+ nzrun_len += num;
+ bytes_to_check -= num;
+ comp >>= num;
+ i += num;
+ if (bytes_to_check) {
+ /* mask like 111000 */
+ d += uleb128_encode_small(dst + d, nzrun_len);
+ /* overflow */
+ if (d + nzrun_len > dlen) {
+ return -1;
+ }
+ nzrun_start = new_buf + i - nzrun_len;
+ memcpy(dst + d, nzrun_start, nzrun_len);
+ d += nzrun_len;
+ nzrun_len = 0;
+ is_same = true;
+ }
+ }
+ }
+
+ if (nzrun_len != 0) {
+ d += uleb128_encode_small(dst + d, nzrun_len);
+ /* overflow */
+ if (d + nzrun_len > dlen) {
+ return -1;
+ }
+ nzrun_start = new_buf + i - nzrun_len;
+ memcpy(dst + d, nzrun_start, nzrun_len);
+ d += nzrun_len;
+ }
+ return d;
+}
+#endif
+
/*
page = zrun nzrun
| zrun nzrun page
@@ -175,125 +297,3 @@ int xbzrle_decode_buffer(uint8_t *src, int slen, uint8_t
*dst, int dlen)
return d;
}
-
-#if defined(CONFIG_AVX512BW_OPT)
-#include <immintrin.h>
-
-int __attribute__((target("avx512bw")))
-xbzrle_encode_buffer_avx512(uint8_t *old_buf, uint8_t *new_buf, int slen,
- uint8_t *dst, int dlen)
-{
- uint32_t zrun_len = 0, nzrun_len = 0;
- int d = 0, i = 0, num = 0;
- uint8_t *nzrun_start = NULL;
- /* add 1 to include residual part in main loop */
- uint32_t count512s = (slen >> 6) + 1;
- /* countResidual is tail of data, i.e., countResidual = slen % 64 */
- uint32_t count_residual = slen & 0b111111;
- bool never_same = true;
- uint64_t mask_residual = 1;
- mask_residual <<= count_residual;
- mask_residual -= 1;
- __m512i r = _mm512_set1_epi32(0);
-
- while (count512s) {
- int bytes_to_check = 64;
- uint64_t mask = 0xffffffffffffffff;
- if (count512s == 1) {
- bytes_to_check = count_residual;
- mask = mask_residual;
- }
- __m512i old_data = _mm512_mask_loadu_epi8(r,
- mask, old_buf + i);
- __m512i new_data = _mm512_mask_loadu_epi8(r,
- mask, new_buf + i);
- uint64_t comp = _mm512_cmpeq_epi8_mask(old_data, new_data);
- count512s--;
-
- bool is_same = (comp & 0x1);
- while (bytes_to_check) {
- if (d + 2 > dlen) {
- return -1;
- }
- if (is_same) {
- if (nzrun_len) {
- d += uleb128_encode_small(dst + d, nzrun_len);
- if (d + nzrun_len > dlen) {
- return -1;
- }
- nzrun_start = new_buf + i - nzrun_len;
- memcpy(dst + d, nzrun_start, nzrun_len);
- d += nzrun_len;
- nzrun_len = 0;
- }
- /* 64 data at a time for speed */
- if (count512s && (comp == 0xffffffffffffffff)) {
- i += 64;
- zrun_len += 64;
- break;
- }
- never_same = false;
- num = ctz64(~comp);
- num = (num < bytes_to_check) ? num : bytes_to_check;
- zrun_len += num;
- bytes_to_check -= num;
- comp >>= num;
- i += num;
- if (bytes_to_check) {
- /* still has different data after same data */
- d += uleb128_encode_small(dst + d, zrun_len);
- zrun_len = 0;
- } else {
- break;
- }
- }
- if (never_same || zrun_len) {
- /*
- * never_same only acts if
- * data begins with diff in first count512s
- */
- d += uleb128_encode_small(dst + d, zrun_len);
- zrun_len = 0;
- never_same = false;
- }
- /* has diff, 64 data at a time for speed */
- if ((bytes_to_check == 64) && (comp == 0x0)) {
- i += 64;
- nzrun_len += 64;
- break;
- }
- num = ctz64(comp);
- num = (num < bytes_to_check) ? num : bytes_to_check;
- nzrun_len += num;
- bytes_to_check -= num;
- comp >>= num;
- i += num;
- if (bytes_to_check) {
- /* mask like 111000 */
- d += uleb128_encode_small(dst + d, nzrun_len);
- /* overflow */
- if (d + nzrun_len > dlen) {
- return -1;
- }
- nzrun_start = new_buf + i - nzrun_len;
- memcpy(dst + d, nzrun_start, nzrun_len);
- d += nzrun_len;
- nzrun_len = 0;
- is_same = true;
- }
- }
- }
-
- if (nzrun_len != 0) {
- d += uleb128_encode_small(dst + d, nzrun_len);
- /* overflow */
- if (d + nzrun_len > dlen) {
- return -1;
- }
- nzrun_start = new_buf + i - nzrun_len;
- memcpy(dst + d, nzrun_start, nzrun_len);
- d += nzrun_len;
- }
- return d;
-}
-#endif
--
2.34.1
- [PULL 00/28] tcg patch queue, Richard Henderson, 2023/05/23
- [PULL 01/28] util: Introduce host-specific cpuinfo.h, Richard Henderson, 2023/05/23
- [PULL 02/28] util: Add cpuinfo-i386.c, Richard Henderson, 2023/05/23
- [PULL 05/28] util/bufferiszero: Use i386 host/cpuinfo.h, Richard Henderson, 2023/05/23
- [PULL 03/28] util: Add i386 CPUINFO_ATOMIC_VMOVDQU, Richard Henderson, 2023/05/23
- [PULL 04/28] tcg/i386: Use host/cpuinfo.h, Richard Henderson, 2023/05/23
- [PULL 06/28] migration/xbzrle: Shuffle function order,
Richard Henderson <=
- [PULL 08/28] migration: Build migration_files once, Richard Henderson, 2023/05/23
- [PULL 07/28] migration/xbzrle: Use i386 host/cpuinfo.h, Richard Henderson, 2023/05/23
- [PULL 10/28] include/host: Split out atomic128-cas.h, Richard Henderson, 2023/05/23
- [PULL 09/28] util: Add cpuinfo-aarch64.c, Richard Henderson, 2023/05/23
- [PULL 11/28] include/host: Split out atomic128-ldst.h, Richard Henderson, 2023/05/23
- [PULL 21/28] accel/tcg: Eliminate #if on HAVE_ATOMIC128 and HAVE_CMPXCHG128, Richard Henderson, 2023/05/23
- [PULL 17/28] target/s390x: Use cpu_{ld,st}*_mmu in do_csst, Richard Henderson, 2023/05/23
- [PULL 16/28] accel/tcg: Unify cpu_{ld,st}*_{be,le}_mmu, Richard Henderson, 2023/05/23
- [PULL 20/28] accel/tcg: Remove prot argument to atomic_mmu_lookup, Richard Henderson, 2023/05/23
- [PULL 12/28] meson: Fix detect atomic128 support with optimization, Richard Henderson, 2023/05/23