aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/qemu/cutils.h32
-rw-r--r--tests/bench/bufferiszero-bench.c47
-rw-r--r--tests/bench/meson.build1
-rw-r--r--util/bufferiszero.c443
4 files changed, 313 insertions, 210 deletions
diff --git a/include/qemu/cutils.h b/include/qemu/cutils.h
index 92c927a6a3..741dade7cf 100644
--- a/include/qemu/cutils.h
+++ b/include/qemu/cutils.h
@@ -187,9 +187,39 @@ char *freq_to_str(uint64_t freq_hz);
/* used to print char* safely */
#define STR_OR_NULL(str) ((str) ? (str) : "null")
-bool buffer_is_zero(const void *buf, size_t len);
+/*
+ * Check if a buffer is all zeroes.
+ */
+
+bool buffer_is_zero_ool(const void *vbuf, size_t len);
+bool buffer_is_zero_ge256(const void *vbuf, size_t len);
bool test_buffer_is_zero_next_accel(void);
+static inline bool buffer_is_zero_sample3(const char *buf, size_t len)
+{
+ /*
+ * For any reasonably sized buffer, these three samples come from
+ * three different cachelines. In qemu-img usage, we find that
+ * each byte eliminates more than half of all buffer testing.
+ * It is therefore critical to performance that the byte tests
+ * short-circuit, so that we do not pull in additional cache lines.
+ * Do not "optimize" this to !(a | b | c).
+ */
+ return !buf[0] && !buf[len - 1] && !buf[len / 2];
+}
+
+#ifdef __OPTIMIZE__
+static inline bool buffer_is_zero(const void *buf, size_t len)
+{
+ return (__builtin_constant_p(len) && len >= 256
+ ? buffer_is_zero_sample3(buf, len) &&
+ buffer_is_zero_ge256(buf, len)
+ : buffer_is_zero_ool(buf, len));
+}
+#else
+#define buffer_is_zero buffer_is_zero_ool
+#endif
+
/*
* Implementation of ULEB128 (http://en.wikipedia.org/wiki/LEB128)
* Input is limited to 14-bit numbers
diff --git a/tests/bench/bufferiszero-bench.c b/tests/bench/bufferiszero-bench.c
new file mode 100644
index 0000000000..222695c1fa
--- /dev/null
+++ b/tests/bench/bufferiszero-bench.c
@@ -0,0 +1,47 @@
+/*
+ * QEMU buffer_is_zero speed benchmark
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * (at your option) any later version. See the COPYING file in the
+ * top-level directory.
+ */
+#include "qemu/osdep.h"
+#include "qemu/cutils.h"
+#include "qemu/units.h"
+
+static void test(const void *opaque)
+{
+ size_t max = 64 * KiB;
+ void *buf = g_malloc0(max);
+ int accel_index = 0;
+
+ do {
+ if (accel_index != 0) {
+ g_test_message("%s", ""); /* gnu_printf Werror for simple "" */
+ }
+ for (size_t len = 1 * KiB; len <= max; len *= 4) {
+ double total = 0.0;
+
+ g_test_timer_start();
+ do {
+ buffer_is_zero_ge256(buf, len);
+ total += len;
+ } while (g_test_timer_elapsed() < 0.5);
+
+ total /= MiB;
+ g_test_message("buffer_is_zero #%d: %2zuKB %8.0f MB/sec",
+ accel_index, len / (size_t)KiB,
+ total / g_test_timer_last());
+ }
+ accel_index++;
+ } while (test_buffer_is_zero_next_accel());
+
+ g_free(buf);
+}
+
+int main(int argc, char **argv)
+{
+ g_test_init(&argc, &argv, NULL);
+ g_test_add_data_func("/cutils/bufferiszero/speed", NULL, test);
+ return g_test_run();
+}
diff --git a/tests/bench/meson.build b/tests/bench/meson.build
index 7e76338a52..4cd7a2f6b5 100644
--- a/tests/bench/meson.build
+++ b/tests/bench/meson.build
@@ -21,6 +21,7 @@ benchs = {}
if have_block
benchs += {
+ 'bufferiszero-bench': [],
'benchmark-crypto-hash': [crypto],
'benchmark-crypto-hmac': [crypto],
'benchmark-crypto-cipher': [crypto],
diff --git a/util/bufferiszero.c b/util/bufferiszero.c
index 3e6a5dfd63..74864f7b78 100644
--- a/util/bufferiszero.c
+++ b/util/bufferiszero.c
@@ -26,265 +26,290 @@
#include "qemu/bswap.h"
#include "host/cpuinfo.h"
-static bool
-buffer_zero_int(const void *buf, size_t len)
+typedef bool (*biz_accel_fn)(const void *, size_t);
+
+static bool buffer_is_zero_int_lt256(const void *buf, size_t len)
{
- if (unlikely(len < 8)) {
- /* For a very small buffer, simply accumulate all the bytes. */
- const unsigned char *p = buf;
- const unsigned char *e = buf + len;
- unsigned char t = 0;
-
- do {
- t |= *p++;
- } while (p < e);
-
- return t == 0;
- } else {
- /* Otherwise, use the unaligned memory access functions to
- handle the beginning and end of the buffer, with a couple
- of loops handling the middle aligned section. */
- uint64_t t = ldq_he_p(buf);
- const uint64_t *p = (uint64_t *)(((uintptr_t)buf + 8) & -8);
- const uint64_t *e = (uint64_t *)(((uintptr_t)buf + len) & -8);
-
- for (; p + 8 <= e; p += 8) {
- __builtin_prefetch(p + 8);
- if (t) {
- return false;
- }
- t = p[0] | p[1] | p[2] | p[3] | p[4] | p[5] | p[6] | p[7];
- }
- while (p < e) {
- t |= *p++;
- }
- t |= ldq_he_p(buf + len - 8);
+ uint64_t t;
+ const uint64_t *p, *e;
- return t == 0;
+ /*
+ * Use unaligned memory access functions to handle
+ * the beginning and end of the buffer.
+ */
+ if (unlikely(len <= 8)) {
+ return (ldl_he_p(buf) | ldl_he_p(buf + len - 4)) == 0;
}
-}
-#if defined(CONFIG_AVX512F_OPT) || defined(CONFIG_AVX2_OPT) || defined(__SSE2__)
-#include <immintrin.h>
+ t = ldq_he_p(buf) | ldq_he_p(buf + len - 8);
+ p = QEMU_ALIGN_PTR_DOWN(buf + 8, 8);
+ e = QEMU_ALIGN_PTR_DOWN(buf + len - 1, 8);
-/* Note that each of these vectorized functions require len >= 64. */
+ /* Read 0 to 31 aligned words from the middle. */
+ while (p < e) {
+ t |= *p++;
+ }
+ return t == 0;
+}
-static bool __attribute__((target("sse2")))
-buffer_zero_sse2(const void *buf, size_t len)
+static bool buffer_is_zero_int_ge256(const void *buf, size_t len)
{
- __m128i t = _mm_loadu_si128(buf);
- __m128i *p = (__m128i *)(((uintptr_t)buf + 5 * 16) & -16);
- __m128i *e = (__m128i *)(((uintptr_t)buf + len) & -16);
- __m128i zero = _mm_setzero_si128();
-
- /* Loop over 16-byte aligned blocks of 64. */
- while (likely(p <= e)) {
- __builtin_prefetch(p);
- t = _mm_cmpeq_epi8(t, zero);
- if (unlikely(_mm_movemask_epi8(t) != 0xFFFF)) {
+ /*
+ * Use unaligned memory access functions to handle
+ * the beginning and end of the buffer.
+ */
+ uint64_t t = ldq_he_p(buf) | ldq_he_p(buf + len - 8);
+ const uint64_t *p = QEMU_ALIGN_PTR_DOWN(buf + 8, 8);
+ const uint64_t *e = QEMU_ALIGN_PTR_DOWN(buf + len - 1, 8);
+
+ /* Collect a partial block at the tail end. */
+ t |= e[-7] | e[-6] | e[-5] | e[-4] | e[-3] | e[-2] | e[-1];
+
+ /*
+ * Loop over 64 byte blocks.
+ * With the head and tail removed, e - p >= 30,
+ * so the loop must iterate at least 3 times.
+ */
+ do {
+ if (t) {
return false;
}
- t = p[-4] | p[-3] | p[-2] | p[-1];
- p += 4;
- }
+ t = p[0] | p[1] | p[2] | p[3] | p[4] | p[5] | p[6] | p[7];
+ p += 8;
+ } while (p < e - 7);
- /* Finish the aligned tail. */
- t |= e[-3];
- t |= e[-2];
- t |= e[-1];
+ return t == 0;
+}
- /* Finish the unaligned tail. */
- t |= _mm_loadu_si128(buf + len - 16);
+#if defined(CONFIG_AVX2_OPT) || defined(__SSE2__)
+#include <immintrin.h>
- return _mm_movemask_epi8(_mm_cmpeq_epi8(t, zero)) == 0xFFFF;
-}
+/* Helper for preventing the compiler from reassociating
+ chains of binary vector operations. */
+#define SSE_REASSOC_BARRIER(vec0, vec1) asm("" : "+x"(vec0), "+x"(vec1))
-#ifdef CONFIG_AVX2_OPT
-static bool __attribute__((target("sse4")))
-buffer_zero_sse4(const void *buf, size_t len)
+/* Note that these vectorized functions may assume len >= 256. */
+
+static bool __attribute__((target("sse2")))
+buffer_zero_sse2(const void *buf, size_t len)
{
- __m128i t = _mm_loadu_si128(buf);
- __m128i *p = (__m128i *)(((uintptr_t)buf + 5 * 16) & -16);
- __m128i *e = (__m128i *)(((uintptr_t)buf + len) & -16);
-
- /* Loop over 16-byte aligned blocks of 64. */
- while (likely(p <= e)) {
- __builtin_prefetch(p);
- if (unlikely(!_mm_testz_si128(t, t))) {
+ /* Unaligned loads at head/tail. */
+ __m128i v = *(__m128i_u *)(buf);
+ __m128i w = *(__m128i_u *)(buf + len - 16);
+ /* Align head/tail to 16-byte boundaries. */
+ const __m128i *p = QEMU_ALIGN_PTR_DOWN(buf + 16, 16);
+ const __m128i *e = QEMU_ALIGN_PTR_DOWN(buf + len - 1, 16);
+ __m128i zero = { 0 };
+
+ /* Collect a partial block at tail end. */
+ v |= e[-1]; w |= e[-2];
+ SSE_REASSOC_BARRIER(v, w);
+ v |= e[-3]; w |= e[-4];
+ SSE_REASSOC_BARRIER(v, w);
+ v |= e[-5]; w |= e[-6];
+ SSE_REASSOC_BARRIER(v, w);
+ v |= e[-7]; v |= w;
+
+ /*
+ * Loop over complete 128-byte blocks.
+ * With the head and tail removed, e - p >= 14, so the loop
+ * must iterate at least once.
+ */
+ do {
+ v = _mm_cmpeq_epi8(v, zero);
+ if (unlikely(_mm_movemask_epi8(v) != 0xFFFF)) {
return false;
}
- t = p[-4] | p[-3] | p[-2] | p[-1];
- p += 4;
- }
-
- /* Finish the aligned tail. */
- t |= e[-3];
- t |= e[-2];
- t |= e[-1];
-
- /* Finish the unaligned tail. */
- t |= _mm_loadu_si128(buf + len - 16);
-
- return _mm_testz_si128(t, t);
+ v = p[0]; w = p[1];
+ SSE_REASSOC_BARRIER(v, w);
+ v |= p[2]; w |= p[3];
+ SSE_REASSOC_BARRIER(v, w);
+ v |= p[4]; w |= p[5];
+ SSE_REASSOC_BARRIER(v, w);
+ v |= p[6]; w |= p[7];
+ SSE_REASSOC_BARRIER(v, w);
+ v |= w;
+ p += 8;
+ } while (p < e - 7);
+
+ return _mm_movemask_epi8(_mm_cmpeq_epi8(v, zero)) == 0xFFFF;
}
+#ifdef CONFIG_AVX2_OPT
static bool __attribute__((target("avx2")))
buffer_zero_avx2(const void *buf, size_t len)
{
- /* Begin with an unaligned head of 32 bytes. */
- __m256i t = _mm256_loadu_si256(buf);
- __m256i *p = (__m256i *)(((uintptr_t)buf + 5 * 32) & -32);
- __m256i *e = (__m256i *)(((uintptr_t)buf + len) & -32);
-
- /* Loop over 32-byte aligned blocks of 128. */
- while (p <= e) {
- __builtin_prefetch(p);
- if (unlikely(!_mm256_testz_si256(t, t))) {
+ /* Unaligned loads at head/tail. */
+ __m256i v = *(__m256i_u *)(buf);
+ __m256i w = *(__m256i_u *)(buf + len - 32);
+ /* Align head/tail to 32-byte boundaries. */
+ const __m256i *p = QEMU_ALIGN_PTR_DOWN(buf + 32, 32);
+ const __m256i *e = QEMU_ALIGN_PTR_DOWN(buf + len - 1, 32);
+ __m256i zero = { 0 };
+
+ /* Collect a partial block at tail end. */
+ v |= e[-1]; w |= e[-2];
+ SSE_REASSOC_BARRIER(v, w);
+ v |= e[-3]; w |= e[-4];
+ SSE_REASSOC_BARRIER(v, w);
+ v |= e[-5]; w |= e[-6];
+ SSE_REASSOC_BARRIER(v, w);
+ v |= e[-7]; v |= w;
+
+ /* Loop over complete 256-byte blocks. */
+ for (; p < e - 7; p += 8) {
+ /* PTEST is not profitable here. */
+ v = _mm256_cmpeq_epi8(v, zero);
+ if (unlikely(_mm256_movemask_epi8(v) != 0xFFFFFFFF)) {
return false;
}
- t = p[-4] | p[-3] | p[-2] | p[-1];
- p += 4;
- } ;
-
- /* Finish the last block of 128 unaligned. */
- t |= _mm256_loadu_si256(buf + len - 4 * 32);
- t |= _mm256_loadu_si256(buf + len - 3 * 32);
- t |= _mm256_loadu_si256(buf + len - 2 * 32);
- t |= _mm256_loadu_si256(buf + len - 1 * 32);
+ v = p[0]; w = p[1];
+ SSE_REASSOC_BARRIER(v, w);
+ v |= p[2]; w |= p[3];
+ SSE_REASSOC_BARRIER(v, w);
+ v |= p[4]; w |= p[5];
+ SSE_REASSOC_BARRIER(v, w);
+ v |= p[6]; w |= p[7];
+ SSE_REASSOC_BARRIER(v, w);
+ v |= w;
+ }
- return _mm256_testz_si256(t, t);
+ return _mm256_movemask_epi8(_mm256_cmpeq_epi8(v, zero)) == 0xFFFFFFFF;
}
#endif /* CONFIG_AVX2_OPT */
-#ifdef CONFIG_AVX512F_OPT
-static bool __attribute__((target("avx512f")))
-buffer_zero_avx512(const void *buf, size_t len)
+static biz_accel_fn const accel_table[] = {
+ buffer_is_zero_int_ge256,
+ buffer_zero_sse2,
+#ifdef CONFIG_AVX2_OPT
+ buffer_zero_avx2,
+#endif
+};
+
+static unsigned best_accel(void)
{
- /* Begin with an unaligned head of 64 bytes. */
- __m512i t = _mm512_loadu_si512(buf);
- __m512i *p = (__m512i *)(((uintptr_t)buf + 5 * 64) & -64);
- __m512i *e = (__m512i *)(((uintptr_t)buf + len) & -64);
-
- /* Loop over 64-byte aligned blocks of 256. */
- while (p <= e) {
- __builtin_prefetch(p);
- if (unlikely(_mm512_test_epi64_mask(t, t))) {
- return false;
- }
- t = p[-4] | p[-3] | p[-2] | p[-1];
- p += 4;
+ unsigned info = cpuinfo_init();
+
+#ifdef CONFIG_AVX2_OPT
+ if (info & CPUINFO_AVX2) {
+ return 2;
}
+#endif
+ return info & CPUINFO_SSE2 ? 1 : 0;
+}
- t |= _mm512_loadu_si512(buf + len - 4 * 64);
- t |= _mm512_loadu_si512(buf + len - 3 * 64);
- t |= _mm512_loadu_si512(buf + len - 2 * 64);
- t |= _mm512_loadu_si512(buf + len - 1 * 64);
+#elif defined(__aarch64__) && defined(__ARM_NEON)
+#include <arm_neon.h>
- return !_mm512_test_epi64_mask(t, t);
+/*
+ * Helper for preventing the compiler from reassociating
+ * chains of binary vector operations.
+ */
+#define REASSOC_BARRIER(vec0, vec1) asm("" : "+w"(vec0), "+w"(vec1))
+
+static bool buffer_is_zero_simd(const void *buf, size_t len)
+{
+ uint32x4_t t0, t1, t2, t3;
+
+ /* Align head/tail to 16-byte boundaries. */
+ const uint32x4_t *p = QEMU_ALIGN_PTR_DOWN(buf + 16, 16);
+ const uint32x4_t *e = QEMU_ALIGN_PTR_DOWN(buf + len - 1, 16);
+
+ /* Unaligned loads at head/tail. */
+ t0 = vld1q_u32(buf) | vld1q_u32(buf + len - 16);
+
+ /* Collect a partial block at tail end. */
+ t1 = e[-7] | e[-6];
+ t2 = e[-5] | e[-4];
+ t3 = e[-3] | e[-2];
+ t0 |= e[-1];
+ REASSOC_BARRIER(t0, t1);
+ REASSOC_BARRIER(t2, t3);
+ t0 |= t1;
+ t2 |= t3;
+ REASSOC_BARRIER(t0, t2);
+ t0 |= t2;
+ /*
+ * Loop over complete 128-byte blocks.
+ * With the head and tail removed, e - p >= 14, so the loop
+ * must iterate at least once.
+ */
+ do {
+ /*
+ * Reduce via UMAXV. Whatever the actual result,
+ * it will only be zero if all input bytes are zero.
+ */
+ if (unlikely(vmaxvq_u32(t0) != 0)) {
+ return false;
+ }
+
+ t0 = p[0] | p[1];
+ t1 = p[2] | p[3];
+ t2 = p[4] | p[5];
+ t3 = p[6] | p[7];
+ REASSOC_BARRIER(t0, t1);
+ REASSOC_BARRIER(t2, t3);
+ t0 |= t1;
+ t2 |= t3;
+ REASSOC_BARRIER(t0, t2);
+ t0 |= t2;
+ p += 8;
+ } while (p < e - 7);
+
+ return vmaxvq_u32(t0) == 0;
}
-#endif /* CONFIG_AVX512F_OPT */
-/*
- * Make sure that these variables are appropriately initialized when
- * SSE2 is enabled on the compiler command-line, but the compiler is
- * too old to support CONFIG_AVX2_OPT.
- */
-#if defined(CONFIG_AVX512F_OPT) || defined(CONFIG_AVX2_OPT)
-# define INIT_USED 0
-# define INIT_LENGTH 0
-# define INIT_ACCEL buffer_zero_int
+#define best_accel() 1
+static biz_accel_fn const accel_table[] = {
+ buffer_is_zero_int_ge256,
+ buffer_is_zero_simd,
+};
#else
-# ifndef __SSE2__
-# error "ISA selection confusion"
-# endif
-# define INIT_USED CPUINFO_SSE2
-# define INIT_LENGTH 64
-# define INIT_ACCEL buffer_zero_sse2
+#define best_accel() 0
+static biz_accel_fn const accel_table[1] = {
+ buffer_is_zero_int_ge256
+};
#endif
-static unsigned used_accel = INIT_USED;
-static unsigned length_to_accel = INIT_LENGTH;
-static bool (*buffer_accel)(const void *, size_t) = INIT_ACCEL;
+static biz_accel_fn buffer_is_zero_accel;
+static unsigned accel_index;
-static unsigned __attribute__((noinline))
-select_accel_cpuinfo(unsigned info)
+bool buffer_is_zero_ool(const void *buf, size_t len)
{
- /* Array is sorted in order of algorithm preference. */
- static const struct {
- unsigned bit;
- unsigned len;
- bool (*fn)(const void *, size_t);
- } all[] = {
-#ifdef CONFIG_AVX512F_OPT
- { CPUINFO_AVX512F, 256, buffer_zero_avx512 },
-#endif
-#ifdef CONFIG_AVX2_OPT
- { CPUINFO_AVX2, 128, buffer_zero_avx2 },
- { CPUINFO_SSE4, 64, buffer_zero_sse4 },
-#endif
- { CPUINFO_SSE2, 64, buffer_zero_sse2 },
- { CPUINFO_ALWAYS, 0, buffer_zero_int },
- };
-
- for (unsigned i = 0; i < ARRAY_SIZE(all); ++i) {
- if (info & all[i].bit) {
- length_to_accel = all[i].len;
- buffer_accel = all[i].fn;
- return all[i].bit;
- }
+ if (unlikely(len == 0)) {
+ return true;
+ }
+ if (!buffer_is_zero_sample3(buf, len)) {
+ return false;
+ }
+ /* All bytes are covered for any len <= 3. */
+ if (unlikely(len <= 3)) {
+ return true;
}
- return 0;
-}
-
-#if defined(CONFIG_AVX512F_OPT) || defined(CONFIG_AVX2_OPT)
-static void __attribute__((constructor)) init_accel(void)
-{
- used_accel = select_accel_cpuinfo(cpuinfo_init());
-}
-#endif /* CONFIG_AVX2_OPT */
-bool test_buffer_is_zero_next_accel(void)
-{
- /*
- * Accumulate the accelerators that we've already tested, and
- * remove them from the set to test this round. We'll get back
- * a zero from select_accel_cpuinfo when there are no more.
- */
- unsigned used = select_accel_cpuinfo(cpuinfo & ~used_accel);
- used_accel |= used;
- return used;
+ if (likely(len >= 256)) {
+ return buffer_is_zero_accel(buf, len);
+ }
+ return buffer_is_zero_int_lt256(buf, len);
}
-static bool select_accel_fn(const void *buf, size_t len)
+bool buffer_is_zero_ge256(const void *buf, size_t len)
{
- if (likely(len >= length_to_accel)) {
- return buffer_accel(buf, len);
- }
- return buffer_zero_int(buf, len);
+ return buffer_is_zero_accel(buf, len);
}
-#else
-#define select_accel_fn buffer_zero_int
bool test_buffer_is_zero_next_accel(void)
{
+ if (accel_index != 0) {
+ buffer_is_zero_accel = accel_table[--accel_index];
+ return true;
+ }
return false;
}
-#endif
-/*
- * Checks if a buffer is all zeroes
- */
-bool buffer_is_zero(const void *buf, size_t len)
+static void __attribute__((constructor)) init_accel(void)
{
- if (unlikely(len == 0)) {
- return true;
- }
-
- /* Fetch the beginning of the buffer while we select the accelerator. */
- __builtin_prefetch(buf);
-
- /* Use an optimized zero check if possible. Note that this also
- includes a check for an unrolled loop over 64-bit integers. */
- return select_accel_fn(buf, len);
+ accel_index = best_accel();
+ buffer_is_zero_accel = accel_table[accel_index];
}