aboutsummaryrefslogtreecommitdiff
path: root/util
diff options
context:
space:
mode:
Diffstat (limited to 'util')
-rw-r--r--util/Makefile.objs2
-rw-r--r--util/cacheinfo.c185
-rw-r--r--util/stats64.c137
3 files changed, 324 insertions, 0 deletions
diff --git a/util/Makefile.objs b/util/Makefile.objs
index c6205ebf86..50a55ecc75 100644
--- a/util/Makefile.objs
+++ b/util/Makefile.objs
@@ -20,6 +20,7 @@ util-obj-y += host-utils.o
util-obj-y += bitmap.o bitops.o hbitmap.o
util-obj-y += fifo8.o
util-obj-y += acl.o
+util-obj-y += cacheinfo.o
util-obj-y += error.o qemu-error.o
util-obj-y += id.o
util-obj-y += iov.o qemu-config.o qemu-sockets.o uri.o notify.o
@@ -42,4 +43,5 @@ util-obj-y += log.o
util-obj-y += qdist.o
util-obj-y += qht.o
util-obj-y += range.o
+util-obj-y += stats64.o
util-obj-y += systemd.o
diff --git a/util/cacheinfo.c b/util/cacheinfo.c
new file mode 100644
index 0000000000..f987522df4
--- /dev/null
+++ b/util/cacheinfo.c
@@ -0,0 +1,185 @@
+/*
+ * cacheinfo.c - helpers to query the host about its caches
+ *
+ * Copyright (C) 2017, Emilio G. Cota <cota@braap.org>
+ * License: GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+
+int qemu_icache_linesize = 0;
+int qemu_dcache_linesize = 0;
+
+/*
+ * Operating system specific detection mechanisms.
+ */
+
+#if defined(_AIX)
+# include <sys/systemcfg.h>
+
+static void sys_cache_info(int *isize, int *dsize)
+{
+ *isize = _system_configuration.icache_line;
+ *dsize = _system_configuration.dcache_line;
+}
+
+#elif defined(_WIN32)
+
+static void sys_cache_info(int *isize, int *dsize)
+{
+ SYSTEM_LOGICAL_PROCESSOR_INFORMATION *buf;
+ DWORD size = 0;
+ BOOL success;
+ size_t i, n;
+
+ /* Check for the required buffer size first. Note that if the zero
+ size we use for the probe results in success, then there is no
+ data available; fail in that case. */
+ success = GetLogicalProcessorInformation(0, &size);
+ if (success || GetLastError() != ERROR_INSUFFICIENT_BUFFER) {
+ return;
+ }
+
+ n = size / sizeof(SYSTEM_LOGICAL_PROCESSOR_INFORMATION);
+ size = n * sizeof(SYSTEM_LOGICAL_PROCESSOR_INFORMATION);
+ buf = g_new0(SYSTEM_LOGICAL_PROCESSOR_INFORMATION, n);
+ if (!GetLogicalProcessorInformation(buf, &size)) {
+ goto fail;
+ }
+
+ for (i = 0; i < n; i++) {
+ if (buf[i].Relationship == RelationCache
+ && buf[i].Cache.Level == 1) {
+ switch (buf[i].Cache.Type) {
+ case CacheUnified:
+ *isize = *dsize = buf[i].Cache.LineSize;
+ break;
+ case CacheInstruction:
+ *isize = buf[i].Cache.LineSize;
+ break;
+ case CacheData:
+ *dsize = buf[i].Cache.LineSize;
+ break;
+ default:
+ break;
+ }
+ }
+ }
+ fail:
+ g_free(buf);
+}
+
+#elif defined(__APPLE__) \
+ || defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
+# include <sys/sysctl.h>
+# if defined(__APPLE__)
+# define SYSCTL_CACHELINE_NAME "hw.cachelinesize"
+# else
+# define SYSCTL_CACHELINE_NAME "machdep.cacheline_size"
+# endif
+
+static void sys_cache_info(int *isize, int *dsize)
+{
+ /* There's only a single sysctl for both I/D cache line sizes. */
+ long size;
+ size_t len = sizeof(size);
+ if (!sysctlbyname(SYSCTL_CACHELINE_NAME, &size, &len, NULL, 0)) {
+ *isize = *dsize = size;
+ }
+}
+
+#else
+/* POSIX */
+
+static void sys_cache_info(int *isize, int *dsize)
+{
+# ifdef _SC_LEVEL1_ICACHE_LINESIZE
+ *isize = sysconf(_SC_LEVEL1_ICACHE_LINESIZE);
+# endif
+# ifdef _SC_LEVEL1_DCACHE_LINESIZE
+ *dsize = sysconf(_SC_LEVEL1_DCACHE_LINESIZE);
+# endif
+}
+#endif /* sys_cache_info */
+
+/*
+ * Architecture (+ OS) specific detection mechanisms.
+ */
+
+#if defined(__aarch64__)
+
+static void arch_cache_info(int *isize, int *dsize)
+{
+ if (*isize == 0 || *dsize == 0) {
+ unsigned ctr;
+
+ /* The real cache geometry is in CCSIDR_EL1/CLIDR_EL1/CSSELR_EL1,
+ but (at least under Linux) these are marked protected by the
+ kernel. However, CTR_EL0 contains the minimum linesize in the
+ entire hierarchy, and is used by userspace cache flushing. */
+ asm volatile("mrs\t%0, ctr_el0" : "=r"(ctr));
+ if (*isize == 0) {
+ *isize = 4 << (ctr & 0xf);
+ }
+ if (*dsize == 0) {
+ *dsize = 4 << ((ctr >> 16) & 0xf);
+ }
+ }
+}
+
+#elif defined(_ARCH_PPC) && defined(__linux__)
+
+static void arch_cache_info(int *isize, int *dsize)
+{
+ if (*isize == 0) {
+ *isize = qemu_getauxval(AT_ICACHEBSIZE);
+ }
+ if (*dsize == 0) {
+ *dsize = qemu_getauxval(AT_DCACHEBSIZE);
+ }
+}
+
+#else
+static void arch_cache_info(int *isize, int *dsize) { }
+#endif /* arch_cache_info */
+
+/*
+ * ... and if all else fails ...
+ */
+
+static void fallback_cache_info(int *isize, int *dsize)
+{
+ /* If we can only find one of the two, assume they're the same. */
+ if (*isize) {
+ if (*dsize) {
+ /* Success! */
+ } else {
+ *dsize = *isize;
+ }
+ } else if (*dsize) {
+ *isize = *dsize;
+ } else {
+#if defined(_ARCH_PPC)
+ /* For PPC, we're going to use the icache size computed for
+ flush_icache_range. Which means that we must use the
+ architecture minimum. */
+ *isize = *dsize = 16;
+#else
+ /* Otherwise, 64 bytes is not uncommon. */
+ *isize = *dsize = 64;
+#endif
+ }
+}
+
+static void __attribute__((constructor)) init_cache_info(void)
+{
+ int isize = 0, dsize = 0;
+
+ sys_cache_info(&isize, &dsize);
+ arch_cache_info(&isize, &dsize);
+ fallback_cache_info(&isize, &dsize);
+
+ qemu_icache_linesize = isize;
+ qemu_dcache_linesize = dsize;
+}
diff --git a/util/stats64.c b/util/stats64.c
new file mode 100644
index 0000000000..9968fcceac
--- /dev/null
+++ b/util/stats64.c
@@ -0,0 +1,137 @@
+/*
+ * Atomic operations on 64-bit quantities.
+ *
+ * Copyright (C) 2017 Red Hat, Inc.
+ *
+ * Author: Paolo Bonzini <pbonzini@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/atomic.h"
+#include "qemu/stats64.h"
+#include "qemu/processor.h"
+
+#ifndef CONFIG_ATOMIC64
+static inline void stat64_rdlock(Stat64 *s)
+{
+ /* Keep out incoming writers to avoid them starving us. */
+ atomic_add(&s->lock, 2);
+
+ /* If there is a concurrent writer, wait for it. */
+ while (atomic_read(&s->lock) & 1) {
+ cpu_relax();
+ }
+}
+
+static inline void stat64_rdunlock(Stat64 *s)
+{
+ atomic_sub(&s->lock, 2);
+}
+
+static inline bool stat64_wrtrylock(Stat64 *s)
+{
+ return atomic_cmpxchg(&s->lock, 0, 1) == 0;
+}
+
+static inline void stat64_wrunlock(Stat64 *s)
+{
+ atomic_dec(&s->lock);
+}
+
+uint64_t stat64_get(const Stat64 *s)
+{
+ uint32_t high, low;
+
+ stat64_rdlock((Stat64 *)s);
+
+ /* 64-bit writes always take the lock, so we can read in
+ * any order.
+ */
+ high = atomic_read(&s->high);
+ low = atomic_read(&s->low);
+ stat64_rdunlock((Stat64 *)s);
+
+ return ((uint64_t)high << 32) | low;
+}
+
+bool stat64_add32_carry(Stat64 *s, uint32_t low, uint32_t high)
+{
+ uint32_t old;
+
+ if (!stat64_wrtrylock(s)) {
+ cpu_relax();
+ return false;
+ }
+
+ /* 64-bit reads always take the lock, so they don't care about the
+ * order of our update. By updating s->low first, we can check
+ * whether we have to carry into s->high.
+ */
+ old = atomic_fetch_add(&s->low, low);
+ high += (old + low) < old;
+ atomic_add(&s->high, high);
+ stat64_wrunlock(s);
+ return true;
+}
+
+bool stat64_min_slow(Stat64 *s, uint64_t value)
+{
+ uint32_t high, low;
+ uint64_t orig;
+
+ if (!stat64_wrtrylock(s)) {
+ cpu_relax();
+ return false;
+ }
+
+ high = atomic_read(&s->high);
+ low = atomic_read(&s->low);
+
+ orig = ((uint64_t)high << 32) | low;
+ if (orig < value) {
+ /* We have to set low before high, just like stat64_min reads
+ * high before low. The value may become higher temporarily, but
+ * stat64_get does not notice (it takes the lock) and the only ill
+ * effect on stat64_min is that the slow path may be triggered
+ * unnecessarily.
+ */
+ atomic_set(&s->low, (uint32_t)value);
+ smp_wmb();
+ atomic_set(&s->high, value >> 32);
+ }
+ stat64_wrunlock(s);
+ return true;
+}
+
+bool stat64_max_slow(Stat64 *s, uint64_t value)
+{
+ uint32_t high, low;
+ uint64_t orig;
+
+ if (!stat64_wrtrylock(s)) {
+ cpu_relax();
+ return false;
+ }
+
+ high = atomic_read(&s->high);
+ low = atomic_read(&s->low);
+
+ orig = ((uint64_t)high << 32) | low;
+ if (orig > value) {
+ /* We have to set low before high, just like stat64_max reads
+ * high before low. The value may become lower temporarily, but
+ * stat64_get does not notice (it takes the lock) and the only ill
+ * effect on stat64_max is that the slow path may be triggered
+ * unnecessarily.
+ */
+ atomic_set(&s->low, (uint32_t)value);
+ smp_wmb();
+ atomic_set(&s->high, value >> 32);
+ }
+ stat64_wrunlock(s);
+ return true;
+}
+#endif