aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--VERSION2
-rw-r--r--accel/tcg/atomic_common.c.inc107
-rw-r--r--accel/tcg/atomic_template.h141
-rw-r--r--accel/tcg/cpu-exec.c207
-rw-r--r--accel/tcg/cputlb.c49
-rw-r--r--accel/tcg/tcg-runtime.h46
-rw-r--r--accel/tcg/translate-all.c7
-rw-r--r--accel/tcg/translator.c39
-rw-r--r--accel/tcg/user-exec.c41
-rw-r--r--block.c3
-rw-r--r--block/dirty-bitmap.c2
-rw-r--r--block/export/export.c5
-rw-r--r--block/linux-aio.c9
-rw-r--r--block/mirror.c13
-rw-r--r--block/replication.c111
-rw-r--r--block/vvfat.c43
-rw-r--r--configs/devices/mips64el-softmmu/default.mak4
-rw-r--r--configs/devices/ppc-softmmu/default.mak1
-rw-r--r--configs/devices/tricore-softmmu/default.mak1
-rwxr-xr-xconfigure21
-rw-r--r--cpu.c34
-rw-r--r--docs/tools/qemu-img.rst8
-rw-r--r--hw/acpi/Kconfig4
-rw-r--r--hw/acpi/meson.build3
-rw-r--r--hw/arm/Kconfig3
-rw-r--r--hw/char/Kconfig1
-rw-r--r--hw/i386/Kconfig1
-rw-r--r--hw/ide/Kconfig2
-rw-r--r--hw/isa/Kconfig5
-rw-r--r--hw/mips/Kconfig6
-rw-r--r--hw/pci-host/Kconfig1
-rw-r--r--hw/ppc/Kconfig2
-rw-r--r--hw/remote/memory.c2
-rw-r--r--hw/riscv/Kconfig5
-rw-r--r--hw/riscv/meson.build2
-rw-r--r--hw/tricore/Kconfig3
-rw-r--r--hw/tricore/meson.build4
-rw-r--r--include/block/aio.h12
-rw-r--r--include/exec/exec-all.h24
-rw-r--r--include/exec/translator.h11
-rw-r--r--include/hw/core/cpu.h4
-rw-r--r--include/hw/core/tcg-cpu-ops.h6
-rw-r--r--include/qemu/atomic.h247
-rw-r--r--include/qemu/stats64.h2
-rw-r--r--include/sysemu/iothread.h3
-rw-r--r--include/tcg/tcg.h80
-rw-r--r--iothread.c82
-rw-r--r--linux-user/hppa/cpu_loop.c2
-rw-r--r--meson.build1
-rw-r--r--monitor/hmp-cmds.c2
-rw-r--r--plugins/core.c2
-rw-r--r--qapi/misc.json6
-rw-r--r--qapi/qom.json7
-rw-r--r--qemu-img.c50
-rw-r--r--qemu-options.hx8
-rw-r--r--softmmu/timers-state.h2
-rw-r--r--target/alpha/translate.c31
-rw-r--r--target/arm/cpu.c1
-rw-r--r--target/arm/cpu_tcg.c1
-rw-r--r--target/arm/debug_helper.c12
-rw-r--r--target/arm/helper-a64.c8
-rw-r--r--target/arm/helper.h2
-rw-r--r--target/arm/internals.h3
-rw-r--r--target/arm/translate-a64.c25
-rw-r--r--target/arm/translate.c29
-rw-r--r--target/avr/cpu.c1
-rw-r--r--target/avr/cpu.h1
-rw-r--r--target/avr/gdbstub.c13
-rw-r--r--target/avr/translate.c32
-rw-r--r--target/cris/translate.c20
-rw-r--r--target/hexagon/translate.c17
-rw-r--r--target/hppa/translate.c11
-rw-r--r--target/i386/tcg/mem_helper.c15
-rw-r--r--target/i386/tcg/tcg-cpu.c12
-rw-r--r--target/i386/tcg/translate.c28
-rw-r--r--target/m68k/op_helper.c19
-rw-r--r--target/m68k/translate.c18
-rw-r--r--target/microblaze/translate.c18
-rw-r--r--target/mips/tcg/translate.c19
-rw-r--r--target/nios2/translate.c27
-rw-r--r--target/openrisc/translate.c17
-rw-r--r--target/ppc/mem_helper.c16
-rw-r--r--target/ppc/translate.c18
-rw-r--r--target/riscv/translate.c17
-rw-r--r--target/rx/translate.c14
-rw-r--r--target/s390x/tcg/mem_helper.c19
-rw-r--r--target/s390x/tcg/translate.c24
-rw-r--r--target/sh4/translate.c18
-rw-r--r--target/sparc/translate.c17
-rw-r--r--target/tricore/translate.c16
-rw-r--r--target/xtensa/translate.c17
-rw-r--r--tcg/tcg-op.c79
-rw-r--r--tests/acceptance/virtio-gpu.py42
-rwxr-xr-xtests/qemu-iotests/15154
-rw-r--r--tests/qemu-iotests/151.out4
-rwxr-xr-xtests/qemu-iotests/30715
-rw-r--r--tests/qemu-iotests/307.out8
-rwxr-xr-xtests/qemu-iotests/tests/qemu-img-bitmaps (renamed from tests/qemu-iotests/291)34
-rw-r--r--tests/qemu-iotests/tests/qemu-img-bitmaps.out (renamed from tests/qemu-iotests/291.out)67
-rw-r--r--trace/mem-internal.h50
-rw-r--r--trace/mem.h46
-rw-r--r--util/aio-posix.c12
-rw-r--r--util/aio-win32.c5
-rw-r--r--util/async.c2
-rw-r--r--util/qsp.c4
105 files changed, 1097 insertions, 1370 deletions
diff --git a/VERSION b/VERSION
index cc94f6b803..7d5e1d8670 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-6.0.50
+6.0.90
diff --git a/accel/tcg/atomic_common.c.inc b/accel/tcg/atomic_common.c.inc
index 344525b0bb..6c0339f610 100644
--- a/accel/tcg/atomic_common.c.inc
+++ b/accel/tcg/atomic_common.c.inc
@@ -13,42 +13,125 @@
* See the COPYING file in the top-level directory.
*/
-static inline
-void atomic_trace_rmw_pre(CPUArchState *env, target_ulong addr, uint16_t info)
+static uint16_t atomic_trace_rmw_pre(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi)
{
CPUState *cpu = env_cpu(env);
+ uint16_t info = trace_mem_get_info(get_memop(oi), get_mmuidx(oi), false);
trace_guest_mem_before_exec(cpu, addr, info);
trace_guest_mem_before_exec(cpu, addr, info | TRACE_MEM_ST);
+
+ return info;
}
-static inline void
-atomic_trace_rmw_post(CPUArchState *env, target_ulong addr, uint16_t info)
+static void atomic_trace_rmw_post(CPUArchState *env, target_ulong addr,
+ uint16_t info)
{
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, info);
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, info | TRACE_MEM_ST);
}
-static inline
-void atomic_trace_ld_pre(CPUArchState *env, target_ulong addr, uint16_t info)
+#if HAVE_ATOMIC128
+static uint16_t atomic_trace_ld_pre(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi)
{
+ uint16_t info = trace_mem_get_info(get_memop(oi), get_mmuidx(oi), false);
+
trace_guest_mem_before_exec(env_cpu(env), addr, info);
+
+ return info;
}
-static inline
-void atomic_trace_ld_post(CPUArchState *env, target_ulong addr, uint16_t info)
+static void atomic_trace_ld_post(CPUArchState *env, target_ulong addr,
+ uint16_t info)
{
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, info);
}
-static inline
-void atomic_trace_st_pre(CPUArchState *env, target_ulong addr, uint16_t info)
+static uint16_t atomic_trace_st_pre(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi)
{
+ uint16_t info = trace_mem_get_info(get_memop(oi), get_mmuidx(oi), true);
+
trace_guest_mem_before_exec(env_cpu(env), addr, info);
+
+ return info;
}
-static inline
-void atomic_trace_st_post(CPUArchState *env, target_ulong addr, uint16_t info)
+static void atomic_trace_st_post(CPUArchState *env, target_ulong addr,
+ uint16_t info)
{
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, info);
}
+#endif
+
+/*
+ * Atomic helpers callable from TCG.
+ * These have a common interface and all defer to cpu_atomic_*
+ * using the host return address from GETPC().
+ */
+
+#define CMPXCHG_HELPER(OP, TYPE) \
+ TYPE HELPER(atomic_##OP)(CPUArchState *env, target_ulong addr, \
+ TYPE oldv, TYPE newv, uint32_t oi) \
+ { return cpu_atomic_##OP##_mmu(env, addr, oldv, newv, oi, GETPC()); }
+
+CMPXCHG_HELPER(cmpxchgb, uint32_t)
+CMPXCHG_HELPER(cmpxchgw_be, uint32_t)
+CMPXCHG_HELPER(cmpxchgw_le, uint32_t)
+CMPXCHG_HELPER(cmpxchgl_be, uint32_t)
+CMPXCHG_HELPER(cmpxchgl_le, uint32_t)
+
+#ifdef CONFIG_ATOMIC64
+CMPXCHG_HELPER(cmpxchgq_be, uint64_t)
+CMPXCHG_HELPER(cmpxchgq_le, uint64_t)
+#endif
+
+#undef CMPXCHG_HELPER
+
+#define ATOMIC_HELPER(OP, TYPE) \
+ TYPE HELPER(glue(atomic_,OP))(CPUArchState *env, target_ulong addr, \
+ TYPE val, uint32_t oi) \
+ { return glue(glue(cpu_atomic_,OP),_mmu)(env, addr, val, oi, GETPC()); }
+
+#ifdef CONFIG_ATOMIC64
+#define GEN_ATOMIC_HELPERS(OP) \
+ ATOMIC_HELPER(glue(OP,b), uint32_t) \
+ ATOMIC_HELPER(glue(OP,w_be), uint32_t) \
+ ATOMIC_HELPER(glue(OP,w_le), uint32_t) \
+ ATOMIC_HELPER(glue(OP,l_be), uint32_t) \
+ ATOMIC_HELPER(glue(OP,l_le), uint32_t) \
+ ATOMIC_HELPER(glue(OP,q_be), uint64_t) \
+ ATOMIC_HELPER(glue(OP,q_le), uint64_t)
+#else
+#define GEN_ATOMIC_HELPERS(OP) \
+ ATOMIC_HELPER(glue(OP,b), uint32_t) \
+ ATOMIC_HELPER(glue(OP,w_be), uint32_t) \
+ ATOMIC_HELPER(glue(OP,w_le), uint32_t) \
+ ATOMIC_HELPER(glue(OP,l_be), uint32_t) \
+ ATOMIC_HELPER(glue(OP,l_le), uint32_t)
+#endif
+
+GEN_ATOMIC_HELPERS(fetch_add)
+GEN_ATOMIC_HELPERS(fetch_and)
+GEN_ATOMIC_HELPERS(fetch_or)
+GEN_ATOMIC_HELPERS(fetch_xor)
+GEN_ATOMIC_HELPERS(fetch_smin)
+GEN_ATOMIC_HELPERS(fetch_umin)
+GEN_ATOMIC_HELPERS(fetch_smax)
+GEN_ATOMIC_HELPERS(fetch_umax)
+
+GEN_ATOMIC_HELPERS(add_fetch)
+GEN_ATOMIC_HELPERS(and_fetch)
+GEN_ATOMIC_HELPERS(or_fetch)
+GEN_ATOMIC_HELPERS(xor_fetch)
+GEN_ATOMIC_HELPERS(smin_fetch)
+GEN_ATOMIC_HELPERS(umin_fetch)
+GEN_ATOMIC_HELPERS(smax_fetch)
+GEN_ATOMIC_HELPERS(umax_fetch)
+
+GEN_ATOMIC_HELPERS(xchg)
+
+#undef ATOMIC_HELPER
+#undef GEN_ATOMIC_HELPERS
diff --git a/accel/tcg/atomic_template.h b/accel/tcg/atomic_template.h
index afa8a9daf3..d89af4cc1e 100644
--- a/accel/tcg/atomic_template.h
+++ b/accel/tcg/atomic_template.h
@@ -28,8 +28,8 @@
# define SHIFT 4
#elif DATA_SIZE == 8
# define SUFFIX q
-# define DATA_TYPE uint64_t
-# define SDATA_TYPE int64_t
+# define DATA_TYPE aligned_uint64_t
+# define SDATA_TYPE aligned_int64_t
# define BSWAP bswap64
# define SHIFT 3
#elif DATA_SIZE == 4
@@ -71,15 +71,14 @@
#endif
ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
- ABI_TYPE cmpv, ABI_TYPE newv EXTRA_ARGS)
+ ABI_TYPE cmpv, ABI_TYPE newv,
+ TCGMemOpIdx oi, uintptr_t retaddr)
{
- ATOMIC_MMU_DECLS;
- DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_RW;
+ DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
+ PAGE_READ | PAGE_WRITE, retaddr);
DATA_TYPE ret;
- uint16_t info = trace_mem_build_info(SHIFT, false, 0, false,
- ATOMIC_MMU_IDX);
+ uint16_t info = atomic_trace_rmw_pre(env, addr, oi);
- atomic_trace_rmw_pre(env, addr, info);
#if DATA_SIZE == 16
ret = atomic16_cmpxchg(haddr, cmpv, newv);
#else
@@ -92,45 +91,41 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
#if DATA_SIZE >= 16
#if HAVE_ATOMIC128
-ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr EXTRA_ARGS)
+ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr)
{
- ATOMIC_MMU_DECLS;
- DATA_TYPE val, *haddr = ATOMIC_MMU_LOOKUP_R;
- uint16_t info = trace_mem_build_info(SHIFT, false, 0, false,
- ATOMIC_MMU_IDX);
+ DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
+ PAGE_READ, retaddr);
+ DATA_TYPE val;
+ uint16_t info = atomic_trace_ld_pre(env, addr, oi);
- atomic_trace_ld_pre(env, addr, info);
val = atomic16_read(haddr);
ATOMIC_MMU_CLEANUP;
atomic_trace_ld_post(env, addr, info);
return val;
}
-void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr,
- ABI_TYPE val EXTRA_ARGS)
+void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
+ TCGMemOpIdx oi, uintptr_t retaddr)
{
- ATOMIC_MMU_DECLS;
- DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_W;
- uint16_t info = trace_mem_build_info(SHIFT, false, 0, true,
- ATOMIC_MMU_IDX);
+ DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
+ PAGE_WRITE, retaddr);
+ uint16_t info = atomic_trace_st_pre(env, addr, oi);
- atomic_trace_st_pre(env, addr, info);
atomic16_set(haddr, val);
ATOMIC_MMU_CLEANUP;
atomic_trace_st_post(env, addr, info);
}
#endif
#else
-ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr,
- ABI_TYPE val EXTRA_ARGS)
+ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
+ TCGMemOpIdx oi, uintptr_t retaddr)
{
- ATOMIC_MMU_DECLS;
- DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_RW;
+ DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
+ PAGE_READ | PAGE_WRITE, retaddr);
DATA_TYPE ret;
- uint16_t info = trace_mem_build_info(SHIFT, false, 0, false,
- ATOMIC_MMU_IDX);
+ uint16_t info = atomic_trace_rmw_pre(env, addr, oi);
- atomic_trace_rmw_pre(env, addr, info);
ret = qatomic_xchg__nocheck(haddr, val);
ATOMIC_MMU_CLEANUP;
atomic_trace_rmw_post(env, addr, info);
@@ -139,14 +134,12 @@ ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr,
#define GEN_ATOMIC_HELPER(X) \
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
- ABI_TYPE val EXTRA_ARGS) \
+ ABI_TYPE val, TCGMemOpIdx oi, uintptr_t retaddr) \
{ \
- ATOMIC_MMU_DECLS; \
- DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_RW; \
+ DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \
+ PAGE_READ | PAGE_WRITE, retaddr); \
DATA_TYPE ret; \
- uint16_t info = trace_mem_build_info(SHIFT, false, 0, false, \
- ATOMIC_MMU_IDX); \
- atomic_trace_rmw_pre(env, addr, info); \
+ uint16_t info = atomic_trace_rmw_pre(env, addr, oi); \
ret = qatomic_##X(haddr, val); \
ATOMIC_MMU_CLEANUP; \
atomic_trace_rmw_post(env, addr, info); \
@@ -164,7 +157,8 @@ GEN_ATOMIC_HELPER(xor_fetch)
#undef GEN_ATOMIC_HELPER
-/* These helpers are, as a whole, full barriers. Within the helper,
+/*
+ * These helpers are, as a whole, full barriers. Within the helper,
* the leading barrier is explicit and the trailing barrier is within
* cmpxchg primitive.
*
@@ -173,14 +167,12 @@ GEN_ATOMIC_HELPER(xor_fetch)
*/
#define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET) \
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
- ABI_TYPE xval EXTRA_ARGS) \
+ ABI_TYPE xval, TCGMemOpIdx oi, uintptr_t retaddr) \
{ \
- ATOMIC_MMU_DECLS; \
- XDATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_RW; \
+ XDATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \
+ PAGE_READ | PAGE_WRITE, retaddr); \
XDATA_TYPE cmp, old, new, val = xval; \
- uint16_t info = trace_mem_build_info(SHIFT, false, 0, false, \
- ATOMIC_MMU_IDX); \
- atomic_trace_rmw_pre(env, addr, info); \
+ uint16_t info = atomic_trace_rmw_pre(env, addr, oi); \
smp_mb(); \
cmp = qatomic_read__nocheck(haddr); \
do { \
@@ -218,15 +210,14 @@ GEN_ATOMIC_HELPER_FN(umax_fetch, MAX, DATA_TYPE, new)
#endif
ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
- ABI_TYPE cmpv, ABI_TYPE newv EXTRA_ARGS)
+ ABI_TYPE cmpv, ABI_TYPE newv,
+ TCGMemOpIdx oi, uintptr_t retaddr)
{
- ATOMIC_MMU_DECLS;
- DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_RW;
+ DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
+ PAGE_READ | PAGE_WRITE, retaddr);
DATA_TYPE ret;
- uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP, false,
- ATOMIC_MMU_IDX);
+ uint16_t info = atomic_trace_rmw_pre(env, addr, oi);
- atomic_trace_rmw_pre(env, addr, info);
#if DATA_SIZE == 16
ret = atomic16_cmpxchg(haddr, BSWAP(cmpv), BSWAP(newv));
#else
@@ -239,30 +230,28 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
#if DATA_SIZE >= 16
#if HAVE_ATOMIC128
-ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr EXTRA_ARGS)
+ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr)
{
- ATOMIC_MMU_DECLS;
- DATA_TYPE val, *haddr = ATOMIC_MMU_LOOKUP_R;
- uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP, false,
- ATOMIC_MMU_IDX);
+ DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
+ PAGE_READ, retaddr);
+ DATA_TYPE val;
+ uint16_t info = atomic_trace_ld_pre(env, addr, oi);
- atomic_trace_ld_pre(env, addr, info);
val = atomic16_read(haddr);
ATOMIC_MMU_CLEANUP;
atomic_trace_ld_post(env, addr, info);
return BSWAP(val);
}
-void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr,
- ABI_TYPE val EXTRA_ARGS)
+void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
+ TCGMemOpIdx oi, uintptr_t retaddr)
{
- ATOMIC_MMU_DECLS;
- DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_W;
- uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP, true,
- ATOMIC_MMU_IDX);
+ DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
+ PAGE_WRITE, retaddr);
+ uint16_t info = atomic_trace_st_pre(env, addr, oi);
val = BSWAP(val);
- atomic_trace_st_pre(env, addr, info);
val = BSWAP(val);
atomic16_set(haddr, val);
ATOMIC_MMU_CLEANUP;
@@ -270,16 +259,14 @@ void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr,
}
#endif
#else
-ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr,
- ABI_TYPE val EXTRA_ARGS)
+ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
+ TCGMemOpIdx oi, uintptr_t retaddr)
{
- ATOMIC_MMU_DECLS;
- DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_RW;
+ DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
+ PAGE_READ | PAGE_WRITE, retaddr);
ABI_TYPE ret;
- uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP, false,
- ATOMIC_MMU_IDX);
+ uint16_t info = atomic_trace_rmw_pre(env, addr, oi);
- atomic_trace_rmw_pre(env, addr, info);
ret = qatomic_xchg__nocheck(haddr, BSWAP(val));
ATOMIC_MMU_CLEANUP;
atomic_trace_rmw_post(env, addr, info);
@@ -288,14 +275,12 @@ ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr,
#define GEN_ATOMIC_HELPER(X) \
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
- ABI_TYPE val EXTRA_ARGS) \
+ ABI_TYPE val, TCGMemOpIdx oi, uintptr_t retaddr) \
{ \
- ATOMIC_MMU_DECLS; \
- DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_RW; \
+ DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \
+ PAGE_READ | PAGE_WRITE, retaddr); \
DATA_TYPE ret; \
- uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP, \
- false, ATOMIC_MMU_IDX); \
- atomic_trace_rmw_pre(env, addr, info); \
+ uint16_t info = atomic_trace_rmw_pre(env, addr, oi); \
ret = qatomic_##X(haddr, BSWAP(val)); \
ATOMIC_MMU_CLEANUP; \
atomic_trace_rmw_post(env, addr, info); \
@@ -320,14 +305,12 @@ GEN_ATOMIC_HELPER(xor_fetch)
*/
#define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET) \
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
- ABI_TYPE xval EXTRA_ARGS) \
+ ABI_TYPE xval, TCGMemOpIdx oi, uintptr_t retaddr) \
{ \
- ATOMIC_MMU_DECLS; \
- XDATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_RW; \
+ XDATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \
+ PAGE_READ | PAGE_WRITE, retaddr); \
XDATA_TYPE ldo, ldn, old, new, val = xval; \
- uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP, \
- false, ATOMIC_MMU_IDX); \
- atomic_trace_rmw_pre(env, addr, info); \
+ uint16_t info = atomic_trace_rmw_pre(env, addr, oi); \
smp_mb(); \
ldn = qatomic_read__nocheck(haddr); \
do { \
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
index e22bcb99f7..fc895cf51e 100644
--- a/accel/tcg/cpu-exec.c
+++ b/accel/tcg/cpu-exec.c
@@ -145,6 +145,28 @@ static void init_delay_params(SyncClocks *sc, const CPUState *cpu)
}
#endif /* CONFIG USER ONLY */
+uint32_t curr_cflags(CPUState *cpu)
+{
+ uint32_t cflags = cpu->tcg_cflags;
+
+ /*
+ * Record gdb single-step. We should be exiting the TB by raising
+ * EXCP_DEBUG, but to simplify other tests, disable chaining too.
+ *
+ * For singlestep and -d nochain, suppress goto_tb so that
+ * we can log -d cpu,exec after every TB.
+ */
+ if (unlikely(cpu->singlestep_enabled)) {
+ cflags |= CF_NO_GOTO_TB | CF_NO_GOTO_PTR | CF_SINGLE_STEP | 1;
+ } else if (singlestep) {
+ cflags |= CF_NO_GOTO_TB | 1;
+ } else if (qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {
+ cflags |= CF_NO_GOTO_TB;
+ }
+
+ return cflags;
+}
+
/* Might cause an exception, so have a longjmp destination ready */
static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc,
target_ulong cs_base,
@@ -205,6 +227,76 @@ static inline void log_cpu_exec(target_ulong pc, CPUState *cpu,
}
}
+static bool check_for_breakpoints(CPUState *cpu, target_ulong pc,
+ uint32_t *cflags)
+{
+ CPUBreakpoint *bp;
+ bool match_page = false;
+
+ if (likely(QTAILQ_EMPTY(&cpu->breakpoints))) {
+ return false;
+ }
+
+ /*
+ * Singlestep overrides breakpoints.
+ * This requirement is visible in the record-replay tests, where
+ * we would fail to make forward progress in reverse-continue.
+ *
+ * TODO: gdb singlestep should only override gdb breakpoints,
+ * so that one could (gdb) singlestep into the guest kernel's
+ * architectural breakpoint handler.
+ */
+ if (cpu->singlestep_enabled) {
+ return false;
+ }
+
+ QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
+ /*
+ * If we have an exact pc match, trigger the breakpoint.
+ * Otherwise, note matches within the page.
+ */
+ if (pc == bp->pc) {
+ bool match_bp = false;
+
+ if (bp->flags & BP_GDB) {
+ match_bp = true;
+ } else if (bp->flags & BP_CPU) {
+#ifdef CONFIG_USER_ONLY
+ g_assert_not_reached();
+#else
+ CPUClass *cc = CPU_GET_CLASS(cpu);
+ assert(cc->tcg_ops->debug_check_breakpoint);
+ match_bp = cc->tcg_ops->debug_check_breakpoint(cpu);
+#endif
+ }
+
+ if (match_bp) {
+ cpu->exception_index = EXCP_DEBUG;
+ return true;
+ }
+ } else if (((pc ^ bp->pc) & TARGET_PAGE_MASK) == 0) {
+ match_page = true;
+ }
+ }
+
+ /*
+ * Within the same page as a breakpoint, single-step,
+ * returning to helper_lookup_tb_ptr after each insn looking
+ * for the actual breakpoint.
+ *
+ * TODO: Perhaps better to record all of the TBs associated
+ * with a given virtual page that contains a breakpoint, and
+ * then invalidate them when a new overlapping breakpoint is
+ * set on the page. Non-overlapping TBs would not be
+ * invalidated, nor would any TB need to be invalidated as
+ * breakpoints are removed.
+ */
+ if (match_page) {
+ *cflags = (*cflags & ~CF_COUNT_MASK) | CF_NO_GOTO_TB | 1;
+ }
+ return false;
+}
+
/**
* helper_lookup_tb_ptr: quick check for next tb
* @env: current cpu state
@@ -218,11 +310,16 @@ const void *HELPER(lookup_tb_ptr)(CPUArchState *env)
CPUState *cpu = env_cpu(env);
TranslationBlock *tb;
target_ulong cs_base, pc;
- uint32_t flags;
+ uint32_t flags, cflags;
cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
- tb = tb_lookup(cpu, pc, cs_base, flags, curr_cflags(cpu));
+ cflags = curr_cflags(cpu);
+ if (check_for_breakpoints(cpu, pc, &cflags)) {
+ cpu_loop_exit(cpu);
+ }
+
+ tb = tb_lookup(cpu, pc, cs_base, flags, cflags);
if (tb == NULL) {
return tcg_code_gen_epilogue;
}
@@ -313,8 +410,7 @@ void cpu_exec_step_atomic(CPUState *cpu)
CPUArchState *env = (CPUArchState *)cpu->env_ptr;
TranslationBlock *tb;
target_ulong cs_base, pc;
- uint32_t flags;
- uint32_t cflags = (curr_cflags(cpu) & ~CF_PARALLEL) | 1;
+ uint32_t flags, cflags;
int tb_exit;
if (sigsetjmp(cpu->jmp_env, 0) == 0) {
@@ -324,8 +420,20 @@ void cpu_exec_step_atomic(CPUState *cpu)
cpu->running = true;
cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
- tb = tb_lookup(cpu, pc, cs_base, flags, cflags);
+ cflags = curr_cflags(cpu);
+ /* Execute in a serial context. */
+ cflags &= ~CF_PARALLEL;
+ /* After 1 insn, return and release the exclusive lock. */
+ cflags |= CF_NO_GOTO_TB | CF_NO_GOTO_PTR | 1;
+ /*
+ * No need to check_for_breakpoints here.
+ * We only arrive in cpu_exec_step_atomic after beginning execution
+ * of an insn that includes an atomic operation we can't handle.
+ * Any breakpoint for this insn will have been recognized earlier.
+ */
+
+ tb = tb_lookup(cpu, pc, cs_base, flags, cflags);
if (tb == NULL) {
mmap_lock();
tb = tb_gen_code(cpu, pc, cs_base, flags, cflags);
@@ -478,41 +586,6 @@ static inline void tb_add_jump(TranslationBlock *tb, int n,
return;
}
-static inline TranslationBlock *tb_find(CPUState *cpu,
- TranslationBlock *last_tb,
- int tb_exit, uint32_t cflags)
-{
- CPUArchState *env = (CPUArchState *)cpu->env_ptr;
- TranslationBlock *tb;
- target_ulong cs_base, pc;
- uint32_t flags;
-
- cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
-
- tb = tb_lookup(cpu, pc, cs_base, flags, cflags);
- if (tb == NULL) {
- mmap_lock();
- tb = tb_gen_code(cpu, pc, cs_base, flags, cflags);
- mmap_unlock();
- /* We add the TB in the virtual pc hash table for the fast lookup */
- qatomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb);
- }
-#ifndef CONFIG_USER_ONLY
- /* We don't take care of direct jumps when address mapping changes in
- * system emulation. So it's not safe to make a direct jump to a TB
- * spanning two pages because the mapping for the second page can change.
- */
- if (tb->page_addr[1] != -1) {
- last_tb = NULL;
- }
-#endif
- /* See if we can patch the calling TB. */
- if (last_tb) {
- tb_add_jump(last_tb, tb_exit, tb);
- }
- return tb;
-}
-
static inline bool cpu_handle_halt(CPUState *cpu)
{
if (cpu->halted) {
@@ -846,22 +919,60 @@ int cpu_exec(CPUState *cpu)
int tb_exit = 0;
while (!cpu_handle_interrupt(cpu, &last_tb)) {
- uint32_t cflags = cpu->cflags_next_tb;
TranslationBlock *tb;
-
- /* When requested, use an exact setting for cflags for the next
- execution. This is used for icount, precise smc, and stop-
- after-access watchpoints. Since this request should never
- have CF_INVALID set, -1 is a convenient invalid value that
- does not require tcg headers for cpu_common_reset. */
+ target_ulong cs_base, pc;
+ uint32_t flags, cflags;
+
+ cpu_get_tb_cpu_state(cpu->env_ptr, &pc, &cs_base, &flags);
+
+ /*
+ * When requested, use an exact setting for cflags for the next
+ * execution. This is used for icount, precise smc, and stop-
+ * after-access watchpoints. Since this request should never
+ * have CF_INVALID set, -1 is a convenient invalid value that
+ * does not require tcg headers for cpu_common_reset.
+ */
+ cflags = cpu->cflags_next_tb;
if (cflags == -1) {
cflags = curr_cflags(cpu);
} else {
cpu->cflags_next_tb = -1;
}
- tb = tb_find(cpu, last_tb, tb_exit, cflags);
+ if (check_for_breakpoints(cpu, pc, &cflags)) {
+ break;
+ }
+
+ tb = tb_lookup(cpu, pc, cs_base, flags, cflags);
+ if (tb == NULL) {
+ mmap_lock();
+ tb = tb_gen_code(cpu, pc, cs_base, flags, cflags);
+ mmap_unlock();
+ /*
+ * We add the TB in the virtual pc hash table
+ * for the fast lookup
+ */
+ qatomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb);
+ }
+
+#ifndef CONFIG_USER_ONLY
+ /*
+ * We don't take care of direct jumps when address mapping
+ * changes in system emulation. So it's not safe to make a
+ * direct jump to a TB spanning two pages because the mapping
+ * for the second page can change.
+ */
+ if (tb->page_addr[1] != -1) {
+ last_tb = NULL;
+ }
+#endif
+ /* See if we can patch the calling TB. */
+ if (last_tb) {
+ tb_add_jump(last_tb, tb_exit, tb);
+ }
+
cpu_loop_exec_tb(cpu, tb, &last_tb, &tb_exit);
+
/* Try to align the host and virtual clocks
if the guest is in advance */
align_clocks(&sc, cpu);
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index b4e15b6aad..b1e5471f94 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -2686,19 +2686,14 @@ void cpu_stq_le_data(CPUArchState *env, target_ulong ptr, uint64_t val)
cpu_stq_le_data_ra(env, ptr, val, 0);
}
-/* First set of helpers allows passing in of OI and RETADDR. This makes
- them callable from other helpers. */
+/*
+ * First set of functions passes in OI and RETADDR.
+ * This makes them callable from other helpers.
+ */
-#define EXTRA_ARGS , TCGMemOpIdx oi, uintptr_t retaddr
#define ATOMIC_NAME(X) \
- HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu))
-#define ATOMIC_MMU_DECLS
-#define ATOMIC_MMU_LOOKUP_RW \
- atomic_mmu_lookup(env, addr, oi, DATA_SIZE, PAGE_READ | PAGE_WRITE, retaddr)
-#define ATOMIC_MMU_LOOKUP_R \
- atomic_mmu_lookup(env, addr, oi, DATA_SIZE, PAGE_READ, retaddr)
-#define ATOMIC_MMU_LOOKUP_W \
- atomic_mmu_lookup(env, addr, oi, DATA_SIZE, PAGE_WRITE, retaddr)
+ glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu)
+
#define ATOMIC_MMU_CLEANUP
#define ATOMIC_MMU_IDX get_mmuidx(oi)
@@ -2723,38 +2718,6 @@ void cpu_stq_le_data(CPUArchState *env, target_ulong ptr, uint64_t val)
#include "atomic_template.h"
#endif
-/* Second set of helpers are directly callable from TCG as helpers. */
-
-#undef EXTRA_ARGS
-#undef ATOMIC_NAME
-#undef ATOMIC_MMU_LOOKUP_RW
-#undef ATOMIC_MMU_LOOKUP_R
-#undef ATOMIC_MMU_LOOKUP_W
-
-#define EXTRA_ARGS , TCGMemOpIdx oi
-#define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END))
-#define ATOMIC_MMU_LOOKUP_RW \
- atomic_mmu_lookup(env, addr, oi, DATA_SIZE, PAGE_READ | PAGE_WRITE, GETPC())
-#define ATOMIC_MMU_LOOKUP_R \
- atomic_mmu_lookup(env, addr, oi, DATA_SIZE, PAGE_READ, GETPC())
-#define ATOMIC_MMU_LOOKUP_W \
- atomic_mmu_lookup(env, addr, oi, DATA_SIZE, PAGE_WRITE, GETPC())
-
-#define DATA_SIZE 1
-#include "atomic_template.h"
-
-#define DATA_SIZE 2
-#include "atomic_template.h"
-
-#define DATA_SIZE 4
-#include "atomic_template.h"
-
-#ifdef CONFIG_ATOMIC64
-#define DATA_SIZE 8
-#include "atomic_template.h"
-#endif
-#undef ATOMIC_MMU_IDX
-
/* Code access functions. */
static uint64_t full_ldub_code(CPUArchState *env, target_ulong addr,
diff --git a/accel/tcg/tcg-runtime.h b/accel/tcg/tcg-runtime.h
index 91a5b7e85f..37cbd722bf 100644
--- a/accel/tcg/tcg-runtime.h
+++ b/accel/tcg/tcg-runtime.h
@@ -39,8 +39,6 @@ DEF_HELPER_FLAGS_1(exit_atomic, TCG_CALL_NO_WG, noreturn, env)
DEF_HELPER_FLAGS_3(memset, TCG_CALL_NO_RWG, ptr, ptr, int, ptr)
#endif /* IN_HELPER_PROTO */
-#ifdef CONFIG_SOFTMMU
-
DEF_HELPER_FLAGS_5(atomic_cmpxchgb, TCG_CALL_NO_WG,
i32, env, tl, i32, i32, i32)
DEF_HELPER_FLAGS_5(atomic_cmpxchgw_be, TCG_CALL_NO_WG,
@@ -88,50 +86,6 @@ DEF_HELPER_FLAGS_5(atomic_cmpxchgq_le, TCG_CALL_NO_WG,
TCG_CALL_NO_WG, i32, env, tl, i32, i32)
#endif /* CONFIG_ATOMIC64 */
-#else
-
-DEF_HELPER_FLAGS_4(atomic_cmpxchgb, TCG_CALL_NO_WG, i32, env, tl, i32, i32)
-DEF_HELPER_FLAGS_4(atomic_cmpxchgw_be, TCG_CALL_NO_WG, i32, env, tl, i32, i32)
-DEF_HELPER_FLAGS_4(atomic_cmpxchgw_le, TCG_CALL_NO_WG, i32, env, tl, i32, i32)
-DEF_HELPER_FLAGS_4(atomic_cmpxchgl_be, TCG_CALL_NO_WG, i32, env, tl, i32, i32)
-DEF_HELPER_FLAGS_4(atomic_cmpxchgl_le, TCG_CALL_NO_WG, i32, env, tl, i32, i32)
-#ifdef CONFIG_ATOMIC64
-DEF_HELPER_FLAGS_4(atomic_cmpxchgq_be, TCG_CALL_NO_WG, i64, env, tl, i64, i64)
-DEF_HELPER_FLAGS_4(atomic_cmpxchgq_le, TCG_CALL_NO_WG, i64, env, tl, i64, i64)
-#endif
-
-#ifdef CONFIG_ATOMIC64
-#define GEN_ATOMIC_HELPERS(NAME) \
- DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), b), \
- TCG_CALL_NO_WG, i32, env, tl, i32) \
- DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), w_le), \
- TCG_CALL_NO_WG, i32, env, tl, i32) \
- DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), w_be), \
- TCG_CALL_NO_WG, i32, env, tl, i32) \
- DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), l_le), \
- TCG_CALL_NO_WG, i32, env, tl, i32) \
- DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), l_be), \
- TCG_CALL_NO_WG, i32, env, tl, i32) \
- DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), q_le), \
- TCG_CALL_NO_WG, i64, env, tl, i64) \
- DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), q_be), \
- TCG_CALL_NO_WG, i64, env, tl, i64)
-#else
-#define GEN_ATOMIC_HELPERS(NAME) \
- DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), b), \
- TCG_CALL_NO_WG, i32, env, tl, i32) \
- DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), w_le), \
- TCG_CALL_NO_WG, i32, env, tl, i32) \
- DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), w_be), \
- TCG_CALL_NO_WG, i32, env, tl, i32) \
- DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), l_le), \
- TCG_CALL_NO_WG, i32, env, tl, i32) \
- DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), l_be), \
- TCG_CALL_NO_WG, i32, env, tl, i32)
-#endif /* CONFIG_ATOMIC64 */
-
-#endif /* CONFIG_SOFTMMU */
-
GEN_ATOMIC_HELPERS(fetch_add)
GEN_ATOMIC_HELPERS(fetch_and)
GEN_ATOMIC_HELPERS(fetch_or)
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
index 4df26de858..bbfcfb698c 100644
--- a/accel/tcg/translate-all.c
+++ b/accel/tcg/translate-all.c
@@ -1428,14 +1428,9 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
max_insns = cflags & CF_COUNT_MASK;
if (max_insns == 0) {
- max_insns = CF_COUNT_MASK;
- }
- if (max_insns > TCG_MAX_INSNS) {
max_insns = TCG_MAX_INSNS;
}
- if (cpu->singlestep_enabled || singlestep) {
- max_insns = 1;
- }
+ QEMU_BUILD_BUG_ON(CF_COUNT_MASK + 1 != TCG_MAX_INSNS);
buffer_overflow:
tb = tcg_tb_alloc(tcg_ctx);
diff --git a/accel/tcg/translator.c b/accel/tcg/translator.c
index 59804af37b..c53a7f8e44 100644
--- a/accel/tcg/translator.c
+++ b/accel/tcg/translator.c
@@ -33,8 +33,8 @@ void translator_loop_temp_check(DisasContextBase *db)
bool translator_use_goto_tb(DisasContextBase *db, target_ulong dest)
{
- /* Suppress goto_tb in the case of single-steping. */
- if (db->singlestep_enabled || singlestep) {
+ /* Suppress goto_tb if requested. */
+ if (tb_cflags(db->tb) & CF_NO_GOTO_TB) {
return false;
}
@@ -45,7 +45,7 @@ bool translator_use_goto_tb(DisasContextBase *db, target_ulong dest)
void translator_loop(const TranslatorOps *ops, DisasContextBase *db,
CPUState *cpu, TranslationBlock *tb, int max_insns)
{
- int bp_insn = 0;
+ uint32_t cflags = tb_cflags(tb);
bool plugin_enabled;
/* Initialize DisasContext */
@@ -55,7 +55,7 @@ void translator_loop(const TranslatorOps *ops, DisasContextBase *db,
db->is_jmp = DISAS_NEXT;
db->num_insns = 0;
db->max_insns = max_insns;
- db->singlestep_enabled = cpu->singlestep_enabled;
+ db->singlestep_enabled = cflags & CF_SINGLE_STEP;
ops->init_disas_context(db, cpu);
tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */
@@ -68,8 +68,7 @@ void translator_loop(const TranslatorOps *ops, DisasContextBase *db,
ops->tb_start(db, cpu);
tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */
- plugin_enabled = plugin_gen_tb_start(cpu, tb,
- tb_cflags(db->tb) & CF_MEMI_ONLY);
+ plugin_enabled = plugin_gen_tb_start(cpu, tb, cflags & CF_MEMI_ONLY);
while (true) {
db->num_insns++;
@@ -80,39 +79,17 @@ void translator_loop(const TranslatorOps *ops, DisasContextBase *db,
plugin_gen_insn_start(cpu, db);
}
- /* Pass breakpoint hits to target for further processing */
- if (!db->singlestep_enabled
- && unlikely(!QTAILQ_EMPTY(&cpu->breakpoints))) {
- CPUBreakpoint *bp;
- QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
- if (bp->pc == db->pc_next) {
- if (ops->breakpoint_check(db, cpu, bp)) {
- bp_insn = 1;
- break;
- }
- }
- }
- /* The breakpoint_check hook may use DISAS_TOO_MANY to indicate
- that only one more instruction is to be executed. Otherwise
- it should use DISAS_NORETURN when generating an exception,
- but may use a DISAS_TARGET_* value for Something Else. */
- if (db->is_jmp > DISAS_TOO_MANY) {
- break;
- }
- }
-
/* Disassemble one instruction. The translate_insn hook should
update db->pc_next and db->is_jmp to indicate what should be
done next -- either exiting this loop or locate the start of
the next instruction. */
- if (db->num_insns == db->max_insns
- && (tb_cflags(db->tb) & CF_LAST_IO)) {
+ if (db->num_insns == db->max_insns && (cflags & CF_LAST_IO)) {
/* Accept I/O on the last instruction. */
gen_io_start();
ops->translate_insn(db, cpu);
} else {
/* we should only see CF_MEMI_ONLY for io_recompile */
- tcg_debug_assert(!(tb_cflags(db->tb) & CF_MEMI_ONLY));
+ tcg_debug_assert(!(cflags & CF_MEMI_ONLY));
ops->translate_insn(db, cpu);
}
@@ -139,7 +116,7 @@ void translator_loop(const TranslatorOps *ops, DisasContextBase *db,
/* Emit code to exit the TB, as indicated by db->is_jmp. */
ops->tb_stop(db, cpu);
- gen_tb_end(db->tb, db->num_insns - bp_insn);
+ gen_tb_end(db->tb, db->num_insns);
if (plugin_enabled) {
plugin_gen_tb_end(cpu);
diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c
index ba09fd0413..90d1a2d327 100644
--- a/accel/tcg/user-exec.c
+++ b/accel/tcg/user-exec.c
@@ -1221,9 +1221,14 @@ uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr ptr)
return ret;
}
-/* Do not allow unaligned operations to proceed. Return the host address. */
+/*
+ * Do not allow unaligned operations to proceed. Return the host address.
+ *
+ * @prot may be PAGE_READ, PAGE_WRITE, or PAGE_READ|PAGE_WRITE.
+ */
static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
- int size, uintptr_t retaddr)
+ TCGMemOpIdx oi, int size, int prot,
+ uintptr_t retaddr)
{
/* Enforce qemu required alignment. */
if (unlikely(addr & (size - 1))) {
@@ -1234,18 +1239,17 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
return ret;
}
-/* Macro to call the above, with local variables from the use context. */
-#define ATOMIC_MMU_DECLS do {} while (0)
-#define ATOMIC_MMU_LOOKUP_RW atomic_mmu_lookup(env, addr, DATA_SIZE, GETPC())
-#define ATOMIC_MMU_LOOKUP_R ATOMIC_MMU_LOOKUP_RW
-#define ATOMIC_MMU_LOOKUP_W ATOMIC_MMU_LOOKUP_RW
-#define ATOMIC_MMU_CLEANUP do { clear_helper_retaddr(); } while (0)
-#define ATOMIC_MMU_IDX MMU_USER_IDX
+#include "atomic_common.c.inc"
-#define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END))
-#define EXTRA_ARGS
+/*
+ * First set of functions passes in OI and RETADDR.
+ * This makes them callable from other helpers.
+ */
-#include "atomic_common.c.inc"
+#define ATOMIC_NAME(X) \
+ glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu)
+#define ATOMIC_MMU_CLEANUP do { clear_helper_retaddr(); } while (0)
+#define ATOMIC_MMU_IDX MMU_USER_IDX
#define DATA_SIZE 1
#include "atomic_template.h"
@@ -1261,20 +1265,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
#include "atomic_template.h"
#endif
-/* The following is only callable from other helpers, and matches up
- with the softmmu version. */
-
#if HAVE_ATOMIC128 || HAVE_CMPXCHG128
-
-#undef EXTRA_ARGS
-#undef ATOMIC_NAME
-#undef ATOMIC_MMU_LOOKUP_RW
-
-#define EXTRA_ARGS , TCGMemOpIdx oi, uintptr_t retaddr
-#define ATOMIC_NAME(X) \
- HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu))
-#define ATOMIC_MMU_LOOKUP_RW atomic_mmu_lookup(env, addr, DATA_SIZE, retaddr)
-
#define DATA_SIZE 16
#include "atomic_template.h"
#endif
diff --git a/block.c b/block.c
index be083f389e..e97ce0b1c8 100644
--- a/block.c
+++ b/block.c
@@ -6162,6 +6162,9 @@ BlockDriverState *bdrv_find_backing_image(BlockDriverState *bs,
void bdrv_init(void)
{
+#ifdef CONFIG_BDRV_WHITELIST_TOOLS
+ use_bdrv_whitelist = 1;
+#endif
module_call_init(MODULE_INIT_BLOCK);
}
diff --git a/block/dirty-bitmap.c b/block/dirty-bitmap.c
index 68d295d6e3..0ef46163e3 100644
--- a/block/dirty-bitmap.c
+++ b/block/dirty-bitmap.c
@@ -193,7 +193,7 @@ int bdrv_dirty_bitmap_check(const BdrvDirtyBitmap *bitmap, uint32_t flags,
error_setg(errp, "Bitmap '%s' is inconsistent and cannot be used",
bitmap->name);
error_append_hint(errp, "Try block-dirty-bitmap-remove to delete"
- " this bitmap from disk");
+ " this bitmap from disk\n");
return -1;
}
diff --git a/block/export/export.c b/block/export/export.c
index fec7d9f738..6d3b9964c8 100644
--- a/block/export/export.c
+++ b/block/export/export.c
@@ -111,6 +111,7 @@ BlockExport *blk_exp_add(BlockExportOptions *export, Error **errp)
if (export->has_iothread) {
IOThread *iothread;
AioContext *new_ctx;
+ Error **set_context_errp;
iothread = iothread_by_id(export->iothread);
if (!iothread) {
@@ -120,7 +121,9 @@ BlockExport *blk_exp_add(BlockExportOptions *export, Error **errp)
new_ctx = iothread_get_aio_context(iothread);
- ret = bdrv_try_set_aio_context(bs, new_ctx, errp);
+ /* Ignore errors with fixed-iothread=false */
+ set_context_errp = fixed_iothread ? errp : NULL;
+ ret = bdrv_try_set_aio_context(bs, new_ctx, set_context_errp);
if (ret == 0) {
aio_context_release(ctx);
aio_context_acquire(new_ctx);
diff --git a/block/linux-aio.c b/block/linux-aio.c
index 3c0527c2bf..0dab507b71 100644
--- a/block/linux-aio.c
+++ b/block/linux-aio.c
@@ -28,6 +28,9 @@
*/
#define MAX_EVENTS 1024
+/* Maximum number of requests in a batch. (default value) */
+#define DEFAULT_MAX_BATCH 32
+
struct qemu_laiocb {
Coroutine *co;
LinuxAioState *ctx;
@@ -351,6 +354,10 @@ static int laio_do_submit(int fd, struct qemu_laiocb *laiocb, off_t offset,
LinuxAioState *s = laiocb->ctx;
struct iocb *iocbs = &laiocb->iocb;
QEMUIOVector *qiov = laiocb->qiov;
+ int64_t max_batch = s->aio_context->aio_max_batch ?: DEFAULT_MAX_BATCH;
+
+ /* limit the batch with the number of available events */
+ max_batch = MIN_NON_ZERO(MAX_EVENTS - s->io_q.in_flight, max_batch);
switch (type) {
case QEMU_AIO_WRITE:
@@ -371,7 +378,7 @@ static int laio_do_submit(int fd, struct qemu_laiocb *laiocb, off_t offset,
s->io_q.in_queue++;
if (!s->io_q.blocked &&
(!s->io_q.plugged ||
- s->io_q.in_flight + s->io_q.in_queue >= MAX_EVENTS)) {
+ s->io_q.in_queue >= max_batch)) {
ioq_submit(s);
}
diff --git a/block/mirror.c b/block/mirror.c
index 019f6deaa5..98fc66eabf 100644
--- a/block/mirror.c
+++ b/block/mirror.c
@@ -107,6 +107,7 @@ struct MirrorOp {
bool is_in_flight;
CoQueue waiting_requests;
Coroutine *co;
+ MirrorOp *waiting_for_op;
QTAILQ_ENTRY(MirrorOp) next;
};
@@ -159,7 +160,18 @@ static void coroutine_fn mirror_wait_on_conflicts(MirrorOp *self,
if (ranges_overlap(self_start_chunk, self_nb_chunks,
op_start_chunk, op_nb_chunks))
{
+ /*
+ * If the operation is already (indirectly) waiting for us, or
+ * will wait for us as soon as it wakes up, then just go on
+ * (instead of producing a deadlock in the former case).
+ */
+ if (op->waiting_for_op) {
+ continue;
+ }
+
+ self->waiting_for_op = op;
qemu_co_queue_wait(&op->waiting_requests, NULL);
+ self->waiting_for_op = NULL;
break;
}
}
@@ -1343,6 +1355,7 @@ static MirrorOp *coroutine_fn active_write_prepare(MirrorBlockJob *s,
.bytes = bytes,
.is_active_write = true,
.is_in_flight = true,
+ .co = qemu_coroutine_self(),
};
qemu_co_queue_init(&op->waiting_requests);
QTAILQ_INSERT_TAIL(&s->ops_in_flight, op, next);
diff --git a/block/replication.c b/block/replication.c
index 774e15df16..32444b9a8f 100644
--- a/block/replication.c
+++ b/block/replication.c
@@ -35,7 +35,6 @@ typedef enum {
typedef struct BDRVReplicationState {
ReplicationMode mode;
ReplicationStage stage;
- BdrvChild *active_disk;
BlockJob *commit_job;
BdrvChild *hidden_disk;
BdrvChild *secondary_disk;
@@ -166,7 +165,12 @@ static void replication_child_perm(BlockDriverState *bs, BdrvChild *c,
uint64_t perm, uint64_t shared,
uint64_t *nperm, uint64_t *nshared)
{
- *nperm = BLK_PERM_CONSISTENT_READ;
+ if (role & BDRV_CHILD_PRIMARY) {
+ *nperm = BLK_PERM_CONSISTENT_READ;
+ } else {
+ *nperm = 0;
+ }
+
if ((bs->open_flags & (BDRV_O_INACTIVE | BDRV_O_RDWR)) == BDRV_O_RDWR) {
*nperm |= BLK_PERM_WRITE;
}
@@ -307,8 +311,10 @@ out:
return ret;
}
-static void secondary_do_checkpoint(BDRVReplicationState *s, Error **errp)
+static void secondary_do_checkpoint(BlockDriverState *bs, Error **errp)
{
+ BDRVReplicationState *s = bs->opaque;
+ BdrvChild *active_disk = bs->file;
Error *local_err = NULL;
int ret;
@@ -323,13 +329,13 @@ static void secondary_do_checkpoint(BDRVReplicationState *s, Error **errp)
return;
}
- if (!s->active_disk->bs->drv) {
+ if (!active_disk->bs->drv) {
error_setg(errp, "Active disk %s is ejected",
- s->active_disk->bs->node_name);
+ active_disk->bs->node_name);
return;
}
- ret = bdrv_make_empty(s->active_disk, errp);
+ ret = bdrv_make_empty(active_disk, errp);
if (ret < 0) {
return;
}
@@ -340,17 +346,7 @@ static void secondary_do_checkpoint(BDRVReplicationState *s, Error **errp)
return;
}
- BlockBackend *blk = blk_new(qemu_get_current_aio_context(),
- BLK_PERM_WRITE, BLK_PERM_ALL);
- blk_insert_bs(blk, s->hidden_disk->bs, &local_err);
- if (local_err) {
- error_propagate(errp, local_err);
- blk_unref(blk);
- return;
- }
-
- ret = blk_make_empty(blk, errp);
- blk_unref(blk);
+ ret = bdrv_make_empty(s->hidden_disk, errp);
if (ret < 0) {
return;
}
@@ -365,27 +361,35 @@ static void reopen_backing_file(BlockDriverState *bs, bool writable,
Error **errp)
{
BDRVReplicationState *s = bs->opaque;
+ BdrvChild *hidden_disk, *secondary_disk;
BlockReopenQueue *reopen_queue = NULL;
+ /*
+ * s->hidden_disk and s->secondary_disk may not be set yet, as they will
+ * only be set after the children are writable.
+ */
+ hidden_disk = bs->file->bs->backing;
+ secondary_disk = hidden_disk->bs->backing;
+
if (writable) {
- s->orig_hidden_read_only = bdrv_is_read_only(s->hidden_disk->bs);
- s->orig_secondary_read_only = bdrv_is_read_only(s->secondary_disk->bs);
+ s->orig_hidden_read_only = bdrv_is_read_only(hidden_disk->bs);
+ s->orig_secondary_read_only = bdrv_is_read_only(secondary_disk->bs);
}
- bdrv_subtree_drained_begin(s->hidden_disk->bs);
- bdrv_subtree_drained_begin(s->secondary_disk->bs);
+ bdrv_subtree_drained_begin(hidden_disk->bs);
+ bdrv_subtree_drained_begin(secondary_disk->bs);
if (s->orig_hidden_read_only) {
QDict *opts = qdict_new();
qdict_put_bool(opts, BDRV_OPT_READ_ONLY, !writable);
- reopen_queue = bdrv_reopen_queue(reopen_queue, s->hidden_disk->bs,
+ reopen_queue = bdrv_reopen_queue(reopen_queue, hidden_disk->bs,
opts, true);
}
if (s->orig_secondary_read_only) {
QDict *opts = qdict_new();
qdict_put_bool(opts, BDRV_OPT_READ_ONLY, !writable);
- reopen_queue = bdrv_reopen_queue(reopen_queue, s->secondary_disk->bs,
+ reopen_queue = bdrv_reopen_queue(reopen_queue, secondary_disk->bs,
opts, true);
}
@@ -400,8 +404,8 @@ static void reopen_backing_file(BlockDriverState *bs, bool writable,
}
}
- bdrv_subtree_drained_end(s->hidden_disk->bs);
- bdrv_subtree_drained_end(s->secondary_disk->bs);
+ bdrv_subtree_drained_end(hidden_disk->bs);
+ bdrv_subtree_drained_end(secondary_disk->bs);
}
static void backup_job_cleanup(BlockDriverState *bs)
@@ -458,6 +462,7 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode,
BlockDriverState *bs = rs->opaque;
BDRVReplicationState *s;
BlockDriverState *top_bs;
+ BdrvChild *active_disk, *hidden_disk, *secondary_disk;
int64_t active_length, hidden_length, disk_length;
AioContext *aio_context;
Error *local_err = NULL;
@@ -495,32 +500,31 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode,
case REPLICATION_MODE_PRIMARY:
break;
case REPLICATION_MODE_SECONDARY:
- s->active_disk = bs->file;
- if (!s->active_disk || !s->active_disk->bs ||
- !s->active_disk->bs->backing) {
+ active_disk = bs->file;
+ if (!active_disk || !active_disk->bs || !active_disk->bs->backing) {
error_setg(errp, "Active disk doesn't have backing file");
aio_context_release(aio_context);
return;
}
- s->hidden_disk = s->active_disk->bs->backing;
- if (!s->hidden_disk->bs || !s->hidden_disk->bs->backing) {
+ hidden_disk = active_disk->bs->backing;
+ if (!hidden_disk->bs || !hidden_disk->bs->backing) {
error_setg(errp, "Hidden disk doesn't have backing file");
aio_context_release(aio_context);
return;
}
- s->secondary_disk = s->hidden_disk->bs->backing;
- if (!s->secondary_disk->bs || !bdrv_has_blk(s->secondary_disk->bs)) {
+ secondary_disk = hidden_disk->bs->backing;
+ if (!secondary_disk->bs || !bdrv_has_blk(secondary_disk->bs)) {
error_setg(errp, "The secondary disk doesn't have block backend");
aio_context_release(aio_context);
return;
}
/* verify the length */
- active_length = bdrv_getlength(s->active_disk->bs);
- hidden_length = bdrv_getlength(s->hidden_disk->bs);
- disk_length = bdrv_getlength(s->secondary_disk->bs);
+ active_length = bdrv_getlength(active_disk->bs);
+ hidden_length = bdrv_getlength(hidden_disk->bs);
+ disk_length = bdrv_getlength(secondary_disk->bs);
if (active_length < 0 || hidden_length < 0 || disk_length < 0 ||
active_length != hidden_length || hidden_length != disk_length) {
error_setg(errp, "Active disk, hidden disk, secondary disk's length"
@@ -530,10 +534,10 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode,
}
/* Must be true, or the bdrv_getlength() calls would have failed */
- assert(s->active_disk->bs->drv && s->hidden_disk->bs->drv);
+ assert(active_disk->bs->drv && hidden_disk->bs->drv);
- if (!s->active_disk->bs->drv->bdrv_make_empty ||
- !s->hidden_disk->bs->drv->bdrv_make_empty) {
+ if (!active_disk->bs->drv->bdrv_make_empty ||
+ !hidden_disk->bs->drv->bdrv_make_empty) {
error_setg(errp,
"Active disk or hidden disk doesn't support make_empty");
aio_context_release(aio_context);
@@ -548,6 +552,26 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode,
return;
}
+ bdrv_ref(hidden_disk->bs);
+ s->hidden_disk = bdrv_attach_child(bs, hidden_disk->bs, "hidden disk",
+ &child_of_bds, BDRV_CHILD_DATA,
+ &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ aio_context_release(aio_context);
+ return;
+ }
+
+ bdrv_ref(secondary_disk->bs);
+ s->secondary_disk = bdrv_attach_child(bs, secondary_disk->bs,
+ "secondary disk", &child_of_bds,
+ BDRV_CHILD_DATA, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ aio_context_release(aio_context);
+ return;
+ }
+
/* start backup job now */
error_setg(&s->blocker,
"Block device is in use by internal backup job");
@@ -586,7 +610,7 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode,
s->stage = BLOCK_REPLICATION_RUNNING;
if (s->mode == REPLICATION_MODE_SECONDARY) {
- secondary_do_checkpoint(s, errp);
+ secondary_do_checkpoint(bs, errp);
}
s->error = 0;
@@ -615,7 +639,7 @@ static void replication_do_checkpoint(ReplicationState *rs, Error **errp)
}
if (s->mode == REPLICATION_MODE_SECONDARY) {
- secondary_do_checkpoint(s, errp);
+ secondary_do_checkpoint(bs, errp);
}
aio_context_release(aio_context);
}
@@ -652,8 +676,9 @@ static void replication_done(void *opaque, int ret)
if (ret == 0) {
s->stage = BLOCK_REPLICATION_DONE;
- s->active_disk = NULL;
+ bdrv_unref_child(bs, s->secondary_disk);
s->secondary_disk = NULL;
+ bdrv_unref_child(bs, s->hidden_disk);
s->hidden_disk = NULL;
s->error = 0;
} else {
@@ -705,7 +730,7 @@ static void replication_stop(ReplicationState *rs, bool failover, Error **errp)
}
if (!failover) {
- secondary_do_checkpoint(s, errp);
+ secondary_do_checkpoint(bs, errp);
s->stage = BLOCK_REPLICATION_DONE;
aio_context_release(aio_context);
return;
@@ -713,7 +738,7 @@ static void replication_stop(ReplicationState *rs, bool failover, Error **errp)
s->stage = BLOCK_REPLICATION_FAILOVER;
s->commit_job = commit_active_start(
- NULL, s->active_disk->bs, s->secondary_disk->bs,
+ NULL, bs->file->bs, s->secondary_disk->bs,
JOB_INTERNAL, 0, BLOCKDEV_ON_ERROR_REPORT,
NULL, replication_done, bs, true, errp);
break;
diff --git a/block/vvfat.c b/block/vvfat.c
index ae9d387da7..34bf1e3a86 100644
--- a/block/vvfat.c
+++ b/block/vvfat.c
@@ -3098,26 +3098,6 @@ static int coroutine_fn vvfat_co_block_status(BlockDriverState *bs,
return BDRV_BLOCK_DATA;
}
-static int coroutine_fn
-write_target_commit(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
- QEMUIOVector *qiov, int flags)
-{
- int ret;
-
- BDRVVVFATState* s = *((BDRVVVFATState**) bs->opaque);
- qemu_co_mutex_lock(&s->lock);
- ret = try_commit(s);
- qemu_co_mutex_unlock(&s->lock);
-
- return ret;
-}
-
-static BlockDriver vvfat_write_target = {
- .format_name = "vvfat_write_target",
- .instance_size = sizeof(void*),
- .bdrv_co_pwritev = write_target_commit,
-};
-
static void vvfat_qcow_options(BdrvChildRole role, bool parent_is_format,
int *child_flags, QDict *child_options,
int parent_flags, QDict *parent_options)
@@ -3133,7 +3113,6 @@ static int enable_write_target(BlockDriverState *bs, Error **errp)
{
BDRVVVFATState *s = bs->opaque;
BlockDriver *bdrv_qcow = NULL;
- BlockDriverState *backing;
QemuOpts *opts = NULL;
int ret;
int size = sector2cluster(s, s->sector_count);
@@ -3184,13 +3163,6 @@ static int enable_write_target(BlockDriverState *bs, Error **errp)
unlink(s->qcow_filename);
#endif
- backing = bdrv_new_open_driver(&vvfat_write_target, NULL, BDRV_O_ALLOW_RDWR,
- &error_abort);
- *(void**) backing->opaque = s;
-
- bdrv_set_backing_hd(s->bs, backing, &error_abort);
- bdrv_unref(backing);
-
return 0;
err:
@@ -3205,17 +3177,10 @@ static void vvfat_child_perm(BlockDriverState *bs, BdrvChild *c,
uint64_t perm, uint64_t shared,
uint64_t *nperm, uint64_t *nshared)
{
- if (role & BDRV_CHILD_DATA) {
- /* This is a private node, nobody should try to attach to it */
- *nperm = BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE;
- *nshared = BLK_PERM_WRITE_UNCHANGED;
- } else {
- assert(role & BDRV_CHILD_COW);
- /* The backing file is there so 'commit' can use it. vvfat doesn't
- * access it in any way. */
- *nperm = 0;
- *nshared = BLK_PERM_ALL;
- }
+ assert(role & BDRV_CHILD_DATA);
+ /* This is a private node, nobody should try to attach to it */
+ *nperm = BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE;
+ *nshared = BLK_PERM_WRITE_UNCHANGED;
}
static void vvfat_close(BlockDriverState *bs)
diff --git a/configs/devices/mips64el-softmmu/default.mak b/configs/devices/mips64el-softmmu/default.mak
index c511a061ba..c610749ac1 100644
--- a/configs/devices/mips64el-softmmu/default.mak
+++ b/configs/devices/mips64el-softmmu/default.mak
@@ -8,8 +8,4 @@ CONFIG_ATI_VGA=y
CONFIG_RTL8139_PCI=y
CONFIG_JAZZ=y
CONFIG_VT82C686=y
-CONFIG_AHCI=y
CONFIG_MIPS_BOSTON=y
-CONFIG_FITLOADER=y
-CONFIG_PCI_EXPRESS=y
-CONFIG_PCI_EXPRESS_XILINX=y
diff --git a/configs/devices/ppc-softmmu/default.mak b/configs/devices/ppc-softmmu/default.mak
index 4535993d8d..658a454426 100644
--- a/configs/devices/ppc-softmmu/default.mak
+++ b/configs/devices/ppc-softmmu/default.mak
@@ -1,7 +1,6 @@
# Default configuration for ppc-softmmu
# For embedded PPCs:
-CONFIG_DS1338=y
CONFIG_E500=y
CONFIG_PPC405=y
CONFIG_PPC440=y
diff --git a/configs/devices/tricore-softmmu/default.mak b/configs/devices/tricore-softmmu/default.mak
index 5cc91cebce..cb8fc286eb 100644
--- a/configs/devices/tricore-softmmu/default.mak
+++ b/configs/devices/tricore-softmmu/default.mak
@@ -1 +1,2 @@
+CONFIG_TRICORE_TESTBOARD=y
CONFIG_TRIBOARD=y
diff --git a/configure b/configure
index 2a6d23a844..0005cd140d 100755
--- a/configure
+++ b/configure
@@ -243,6 +243,7 @@ cross_prefix=""
audio_drv_list=""
block_drv_rw_whitelist=""
block_drv_ro_whitelist=""
+block_drv_whitelist_tools="no"
host_cc="cc"
audio_win_int=""
libs_qga=""
@@ -1018,6 +1019,10 @@ for opt do
;;
--block-drv-ro-whitelist=*) block_drv_ro_whitelist=$(echo "$optarg" | sed -e 's/,/ /g')
;;
+ --enable-block-drv-whitelist-in-tools) block_drv_whitelist_tools="yes"
+ ;;
+ --disable-block-drv-whitelist-in-tools) block_drv_whitelist_tools="no"
+ ;;
--enable-debug-tcg) debug_tcg="yes"
;;
--disable-debug-tcg) debug_tcg="no"
@@ -1802,10 +1807,12 @@ Advanced options (experts only):
--block-drv-whitelist=L Same as --block-drv-rw-whitelist=L
--block-drv-rw-whitelist=L
set block driver read-write whitelist
- (affects only QEMU, not qemu-img)
+ (by default affects only QEMU, not tools like qemu-img)
--block-drv-ro-whitelist=L
set block driver read-only whitelist
- (affects only QEMU, not qemu-img)
+ (by default affects only QEMU, not tools like qemu-img)
+ --enable-block-drv-whitelist-in-tools
+ use block whitelist also in tools instead of only QEMU
--enable-trace-backends=B Set trace backend
Available backends: $trace_backend_list
--with-trace-file=NAME Full PATH,NAME of file to store traces
@@ -3974,18 +3981,11 @@ cat > $TMPC << EOF
int main(void)
{
uint64_t x = 0, y = 0;
-#ifdef __ATOMIC_RELAXED
y = __atomic_load_n(&x, __ATOMIC_RELAXED);
__atomic_store_n(&x, y, __ATOMIC_RELAXED);
__atomic_compare_exchange_n(&x, &y, x, 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED);
__atomic_exchange_n(&x, y, __ATOMIC_RELAXED);
__atomic_fetch_add(&x, y, __ATOMIC_RELAXED);
-#else
- typedef char is_host64[sizeof(void *) >= sizeof(uint64_t) ? 1 : -1];
- __sync_lock_test_and_set(&x, y);
- __sync_val_compare_and_swap(&x, y, 0);
- __sync_fetch_and_add(&x, y);
-#endif
return 0;
}
EOF
@@ -4573,6 +4573,9 @@ if test "$audio_win_int" = "yes" ; then
fi
echo "CONFIG_BDRV_RW_WHITELIST=$block_drv_rw_whitelist" >> $config_host_mak
echo "CONFIG_BDRV_RO_WHITELIST=$block_drv_ro_whitelist" >> $config_host_mak
+if test "$block_drv_whitelist_tools" = "yes" ; then
+ echo "CONFIG_BDRV_WHITELIST_TOOLS=y" >> $config_host_mak
+fi
if test "$xfs" = "yes" ; then
echo "CONFIG_XFS=y" >> $config_host_mak
fi
diff --git a/cpu.c b/cpu.c
index 83059537d7..e1799a15bc 100644
--- a/cpu.c
+++ b/cpu.c
@@ -225,11 +225,6 @@ void tb_invalidate_phys_addr(target_ulong addr)
tb_invalidate_phys_page_range(addr, addr + 1);
mmap_unlock();
}
-
-static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
-{
- tb_invalidate_phys_addr(pc);
-}
#else
void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs)
{
@@ -250,25 +245,19 @@ void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs)
ram_addr = memory_region_get_ram_addr(mr) + addr;
tb_invalidate_phys_page_range(ram_addr, ram_addr + 1);
}
-
-static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
-{
- /*
- * There may not be a virtual to physical translation for the pc
- * right now, but there may exist cached TB for this pc.
- * Flush the whole TB cache to force re-translation of such TBs.
- * This is heavyweight, but we're debugging anyway.
- */
- tb_flush(cpu);
-}
#endif
/* Add a breakpoint. */
int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
CPUBreakpoint **breakpoint)
{
+ CPUClass *cc = CPU_GET_CLASS(cpu);
CPUBreakpoint *bp;
+ if (cc->gdb_adjust_breakpoint) {
+ pc = cc->gdb_adjust_breakpoint(cpu, pc);
+ }
+
bp = g_malloc(sizeof(*bp));
bp->pc = pc;
@@ -281,8 +270,6 @@ int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
}
- breakpoint_invalidate(cpu, pc);
-
if (breakpoint) {
*breakpoint = bp;
}
@@ -294,8 +281,13 @@ int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
/* Remove a specific breakpoint. */
int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
{
+ CPUClass *cc = CPU_GET_CLASS(cpu);
CPUBreakpoint *bp;
+ if (cc->gdb_adjust_breakpoint) {
+ pc = cc->gdb_adjust_breakpoint(cpu, pc);
+ }
+
QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
if (bp->pc == pc && bp->flags == flags) {
cpu_breakpoint_remove_by_ref(cpu, bp);
@@ -310,8 +302,6 @@ void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *bp)
{
QTAILQ_REMOVE(&cpu->breakpoints, bp, entry);
- breakpoint_invalidate(cpu, bp->pc);
-
trace_breakpoint_remove(cpu->cpu_index, bp->pc, bp->flags);
g_free(bp);
}
@@ -336,10 +326,6 @@ void cpu_single_step(CPUState *cpu, int enabled)
cpu->singlestep_enabled = enabled;
if (kvm_enabled()) {
kvm_update_guest_debug(cpu, 0);
- } else {
- /* must flush all the translated code to avoid inconsistencies */
- /* XXX: only flush what is necessary */
- tb_flush(cpu);
}
trace_breakpoint_singlestep(cpu->cpu_index, enabled);
}
diff --git a/docs/tools/qemu-img.rst b/docs/tools/qemu-img.rst
index 1d8470eada..b7d602a288 100644
--- a/docs/tools/qemu-img.rst
+++ b/docs/tools/qemu-img.rst
@@ -414,7 +414,7 @@ Command description:
4
Error on reading data
-.. option:: convert [--object OBJECTDEF] [--image-opts] [--target-image-opts] [--target-is-zero] [--bitmaps] [-U] [-C] [-c] [-p] [-q] [-n] [-f FMT] [-t CACHE] [-T SRC_CACHE] [-O OUTPUT_FMT] [-B BACKING_FILE] [-o OPTIONS] [-l SNAPSHOT_PARAM] [-S SPARSE_SIZE] [-r RATE_LIMIT] [-m NUM_COROUTINES] [-W] FILENAME [FILENAME2 [...]] OUTPUT_FILENAME
+.. option:: convert [--object OBJECTDEF] [--image-opts] [--target-image-opts] [--target-is-zero] [--bitmaps [--skip-broken-bitmaps]] [-U] [-C] [-c] [-p] [-q] [-n] [-f FMT] [-t CACHE] [-T SRC_CACHE] [-O OUTPUT_FMT] [-B BACKING_FILE] [-o OPTIONS] [-l SNAPSHOT_PARAM] [-S SPARSE_SIZE] [-r RATE_LIMIT] [-m NUM_COROUTINES] [-W] FILENAME [FILENAME2 [...]] OUTPUT_FILENAME
Convert the disk image *FILENAME* or a snapshot *SNAPSHOT_PARAM*
to disk image *OUTPUT_FILENAME* using format *OUTPUT_FMT*. It can
@@ -456,6 +456,12 @@ Command description:
*NUM_COROUTINES* specifies how many coroutines work in parallel during
the convert process (defaults to 8).
+ Use of ``--bitmaps`` requests that any persistent bitmaps present in
+ the original are also copied to the destination. If any bitmap is
+ inconsistent in the source, the conversion will fail unless
+ ``--skip-broken-bitmaps`` is also specified to copy only the
+ consistent bitmaps.
+
.. option:: create [--object OBJECTDEF] [-q] [-f FMT] [-b BACKING_FILE] [-F BACKING_FMT] [-u] [-o OPTIONS] FILENAME [SIZE]
Create the new disk image *FILENAME* of size *SIZE* and format
diff --git a/hw/acpi/Kconfig b/hw/acpi/Kconfig
index 1932f66af8..cfc4ede8d9 100644
--- a/hw/acpi/Kconfig
+++ b/hw/acpi/Kconfig
@@ -42,3 +42,7 @@ config ACPI_VMGENID
depends on PC
config ACPI_HW_REDUCED
+ bool
+ select ACPI
+ select ACPI_MEMORY_HOTPLUG
+ select ACPI_NVDIMM
diff --git a/hw/acpi/meson.build b/hw/acpi/meson.build
index 9b7fa75719..29f804d13e 100644
--- a/hw/acpi/meson.build
+++ b/hw/acpi/meson.build
@@ -3,6 +3,7 @@ acpi_ss.add(files(
'acpi_interface.c',
'aml-build.c',
'bios-linker-loader.c',
+ 'core.c',
'utils.c',
))
acpi_ss.add(when: 'CONFIG_ACPI_CPU_HOTPLUG', if_true: files('cpu.c'))
@@ -14,7 +15,7 @@ acpi_ss.add(when: 'CONFIG_ACPI_VMGENID', if_true: files('vmgenid.c'))
acpi_ss.add(when: 'CONFIG_ACPI_HW_REDUCED', if_true: files('generic_event_device.c'))
acpi_ss.add(when: 'CONFIG_ACPI_HMAT', if_true: files('hmat.c'))
acpi_ss.add(when: 'CONFIG_ACPI_APEI', if_true: files('ghes.c'), if_false: files('ghes-stub.c'))
-acpi_ss.add(when: 'CONFIG_ACPI_X86', if_true: files('core.c', 'piix4.c', 'pcihp.c'), if_false: files('acpi-stub.c'))
+acpi_ss.add(when: 'CONFIG_ACPI_X86', if_true: files('piix4.c', 'pcihp.c'))
acpi_ss.add(when: 'CONFIG_ACPI_X86_ICH', if_true: files('ich9.c', 'tco.c'))
acpi_ss.add(when: 'CONFIG_IPMI', if_true: files('ipmi.c'), if_false: files('ipmi-stub.c'))
acpi_ss.add(when: 'CONFIG_PC', if_false: files('acpi-x86-stub.c'))
diff --git a/hw/arm/Kconfig b/hw/arm/Kconfig
index 90b19c0861..4ba0aca067 100644
--- a/hw/arm/Kconfig
+++ b/hw/arm/Kconfig
@@ -388,6 +388,7 @@ config NPCM7XX
select A9MPCORE
select ADM1272
select ARM_GIC
+ select SMBUS
select AT24C # EEPROM
select MAX34451
select PL310 # cache controller
@@ -403,7 +404,7 @@ config FSL_IMX25
select IMX_FEC
select IMX_I2C
select WDT_IMX2
- select DS1338
+ select SDHCI
config FSL_IMX31
bool
diff --git a/hw/char/Kconfig b/hw/char/Kconfig
index 4cf36ac637..2e4f620b13 100644
--- a/hw/char/Kconfig
+++ b/hw/char/Kconfig
@@ -61,6 +61,7 @@ config AVR_USART
config MCHP_PFSOC_MMUART
bool
+ select SERIAL
config SIFIVE_UART
bool
diff --git a/hw/i386/Kconfig b/hw/i386/Kconfig
index bad6cf5b4e..ddedcef0b2 100644
--- a/hw/i386/Kconfig
+++ b/hw/i386/Kconfig
@@ -111,6 +111,7 @@ config MICROVM
select ACPI_HW_REDUCED
select PCI_EXPRESS_GENERIC_BRIDGE
select USB_XHCI_SYSBUS
+ select I8254
config X86_IOMMU
bool
diff --git a/hw/ide/Kconfig b/hw/ide/Kconfig
index 8e2c893454..dd85fa3619 100644
--- a/hw/ide/Kconfig
+++ b/hw/ide/Kconfig
@@ -8,7 +8,7 @@ config IDE_QDEV
config IDE_PCI
bool
depends on PCI
- select IDE_CORE
+ select IDE_QDEV
config IDE_ISA
bool
diff --git a/hw/isa/Kconfig b/hw/isa/Kconfig
index 96db170eff..d42143a991 100644
--- a/hw/isa/Kconfig
+++ b/hw/isa/Kconfig
@@ -50,6 +50,11 @@ config VT82C686
select FDC_ISA
select USB_UHCI
select APM
+ select I8254
+ select I8257
+ select I8259
+ select MC146818RTC
+ select PARALLEL
config SMC37C669
bool
diff --git a/hw/mips/Kconfig b/hw/mips/Kconfig
index c245e881a2..b4c5549ce8 100644
--- a/hw/mips/Kconfig
+++ b/hw/mips/Kconfig
@@ -47,9 +47,15 @@ config LOONGSON3V
config MIPS_CPS
bool
select PTIMER
+ select MIPS_ITU
config MIPS_BOSTON
bool
+ select FITLOADER
+ select MIPS_CPS
+ select PCI_EXPRESS_XILINX
+ select AHCI_ICH9
+ select SERIAL
config FW_CFG_MIPS
bool
diff --git a/hw/pci-host/Kconfig b/hw/pci-host/Kconfig
index 84494400b8..2b5f7d58cc 100644
--- a/hw/pci-host/Kconfig
+++ b/hw/pci-host/Kconfig
@@ -76,3 +76,4 @@ config SH_PCI
config MV64361
bool
select PCI
+ select I8259
diff --git a/hw/ppc/Kconfig b/hw/ppc/Kconfig
index 322a7eb031..400511c6b7 100644
--- a/hw/ppc/Kconfig
+++ b/hw/ppc/Kconfig
@@ -79,6 +79,7 @@ config PEGASOS2
select VOF
# This should come with VT82C686
select ACPI_X86
+ imply ATI_VGA
config PREP
bool
@@ -130,6 +131,7 @@ config E500
select SERIAL
select MPC_I2C
select FDT_PPC
+ select DS1338
config VIRTEX
bool
diff --git a/hw/remote/memory.c b/hw/remote/memory.c
index 472ed2a272..6e21ab1a45 100644
--- a/hw/remote/memory.c
+++ b/hw/remote/memory.c
@@ -46,7 +46,7 @@ void remote_sysmem_reconfig(MPQemuMsg *msg, Error **errp)
subregion = g_new(MemoryRegion, 1);
memory_region_init_ram_from_fd(subregion, NULL,
name, sysmem_info->sizes[region],
- true, msg->fds[region],
+ RAM_SHARED, msg->fds[region],
sysmem_info->offsets[region],
errp);
diff --git a/hw/riscv/Kconfig b/hw/riscv/Kconfig
index 86957ec7b0..0590f443fd 100644
--- a/hw/riscv/Kconfig
+++ b/hw/riscv/Kconfig
@@ -1,3 +1,6 @@
+config RISCV_NUMA
+ bool
+
config IBEX
bool
@@ -34,6 +37,7 @@ config RISCV_VIRT
imply PCI_DEVICES
imply VIRTIO_VGA
imply TEST_DEVICES
+ select RISCV_NUMA
select GOLDFISH_RTC
select MSI_NONBROKEN
select PCI
@@ -74,6 +78,7 @@ config SIFIVE_U
config SPIKE
bool
+ select RISCV_NUMA
select HTIF
select MSI_NONBROKEN
select SIFIVE_CLINT
diff --git a/hw/riscv/meson.build b/hw/riscv/meson.build
index a97454661c..ab6cae57ea 100644
--- a/hw/riscv/meson.build
+++ b/hw/riscv/meson.build
@@ -1,6 +1,6 @@
riscv_ss = ss.source_set()
riscv_ss.add(files('boot.c'), fdt)
-riscv_ss.add(files('numa.c'))
+riscv_ss.add(when: 'CONFIG_RISCV_NUMA', if_true: files('numa.c'))
riscv_ss.add(files('riscv_hart.c'))
riscv_ss.add(when: 'CONFIG_OPENTITAN', if_true: files('opentitan.c'))
riscv_ss.add(when: 'CONFIG_RISCV_VIRT', if_true: files('virt.c'))
diff --git a/hw/tricore/Kconfig b/hw/tricore/Kconfig
index 506e6183c1..33c1e852c3 100644
--- a/hw/tricore/Kconfig
+++ b/hw/tricore/Kconfig
@@ -1,9 +1,8 @@
-config TRICORE
+config TRICORE_TESTBOARD
bool
config TRIBOARD
bool
- select TRICORE
select TC27X_SOC
config TC27X_SOC
diff --git a/hw/tricore/meson.build b/hw/tricore/meson.build
index 47e36bb077..7e3585daf8 100644
--- a/hw/tricore/meson.build
+++ b/hw/tricore/meson.build
@@ -1,6 +1,6 @@
tricore_ss = ss.source_set()
-tricore_ss.add(when: 'CONFIG_TRICORE', if_true: files('tricore_testboard.c'))
-tricore_ss.add(when: 'CONFIG_TRICORE', if_true: files('tricore_testdevice.c'))
+tricore_ss.add(when: 'CONFIG_TRICORE_TESTBOARD', if_true: files('tricore_testboard.c'))
+tricore_ss.add(when: 'CONFIG_TRICORE_TESTBOARD', if_true: files('tricore_testdevice.c'))
tricore_ss.add(when: 'CONFIG_TRIBOARD', if_true: files('triboard.c'))
tricore_ss.add(when: 'CONFIG_TC27X_SOC', if_true: files('tc27x_soc.c'))
diff --git a/include/block/aio.h b/include/block/aio.h
index 807edce9b5..47fbe9d81f 100644
--- a/include/block/aio.h
+++ b/include/block/aio.h
@@ -232,6 +232,9 @@ struct AioContext {
int64_t poll_grow; /* polling time growth factor */
int64_t poll_shrink; /* polling time shrink factor */
+ /* AIO engine parameters */
+ int64_t aio_max_batch; /* maximum number of requests in a batch */
+
/*
* List of handlers participating in userspace polling. Protected by
* ctx->list_lock. Iterated and modified mostly by the event loop thread
@@ -755,4 +758,13 @@ void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns,
int64_t grow, int64_t shrink,
Error **errp);
+/**
+ * aio_context_set_aio_params:
+ * @ctx: the aio context
+ * @max_batch: maximum number of requests in a batch, 0 means that the
+ * engine will use its default
+ */
+void aio_context_set_aio_params(AioContext *ctx, int64_t max_batch,
+ Error **errp);
+
#endif
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
index 754f4130c9..5d1b6d80fb 100644
--- a/include/exec/exec-all.h
+++ b/include/exec/exec-all.h
@@ -492,13 +492,18 @@ struct TranslationBlock {
target_ulong cs_base; /* CS base for this block */
uint32_t flags; /* flags defining in which context the code was generated */
uint32_t cflags; /* compile flags */
-#define CF_COUNT_MASK 0x00007fff
-#define CF_LAST_IO 0x00008000 /* Last insn may be an IO access. */
-#define CF_MEMI_ONLY 0x00010000 /* Only instrument memory ops */
-#define CF_USE_ICOUNT 0x00020000
-#define CF_INVALID 0x00040000 /* TB is stale. Set with @jmp_lock held */
-#define CF_PARALLEL 0x00080000 /* Generate code for a parallel context */
-#define CF_CLUSTER_MASK 0xff000000 /* Top 8 bits are cluster ID */
+
+/* Note that TCG_MAX_INSNS is 512; we validate this match elsewhere. */
+#define CF_COUNT_MASK 0x000001ff
+#define CF_NO_GOTO_TB 0x00000200 /* Do not chain with goto_tb */
+#define CF_NO_GOTO_PTR 0x00000400 /* Do not chain with goto_ptr */
+#define CF_SINGLE_STEP 0x00000800 /* gdbstub single-step in effect */
+#define CF_LAST_IO 0x00008000 /* Last insn may be an IO access. */
+#define CF_MEMI_ONLY 0x00010000 /* Only instrument memory ops */
+#define CF_USE_ICOUNT 0x00020000
+#define CF_INVALID 0x00040000 /* TB is stale. Set with @jmp_lock held */
+#define CF_PARALLEL 0x00080000 /* Generate code for a parallel context */
+#define CF_CLUSTER_MASK 0xff000000 /* Top 8 bits are cluster ID */
#define CF_CLUSTER_SHIFT 24
/* Per-vCPU dynamic tracing state used to generate this TB */
@@ -563,10 +568,7 @@ static inline uint32_t tb_cflags(const TranslationBlock *tb)
}
/* current cflags for hashing/comparison */
-static inline uint32_t curr_cflags(CPUState *cpu)
-{
- return cpu->tcg_cflags;
-}
+uint32_t curr_cflags(CPUState *cpu);
/* TranslationBlock invalidate API */
#if defined(CONFIG_USER_ONLY)
diff --git a/include/exec/translator.h b/include/exec/translator.h
index dd9c06d40d..d318803267 100644
--- a/include/exec/translator.h
+++ b/include/exec/translator.h
@@ -89,15 +89,6 @@ typedef struct DisasContextBase {
* @insn_start:
* Emit the tcg_gen_insn_start opcode.
*
- * @breakpoint_check:
- * When called, the breakpoint has already been checked to match the PC,
- * but the target may decide the breakpoint missed the address
- * (e.g., due to conditions encoded in their flags). Return true to
- * indicate that the breakpoint did hit, in which case no more breakpoints
- * are checked. If the breakpoint did hit, emit any code required to
- * signal the exception, and set db->is_jmp as necessary to terminate
- * the main loop.
- *
* @translate_insn:
* Disassemble one instruction and set db->pc_next for the start
* of the following instruction. Set db->is_jmp as necessary to
@@ -113,8 +104,6 @@ typedef struct TranslatorOps {
void (*init_disas_context)(DisasContextBase *db, CPUState *cpu);
void (*tb_start)(DisasContextBase *db, CPUState *cpu);
void (*insn_start)(DisasContextBase *db, CPUState *cpu);
- bool (*breakpoint_check)(DisasContextBase *db, CPUState *cpu,
- const CPUBreakpoint *bp);
void (*translate_insn)(DisasContextBase *db, CPUState *cpu);
void (*tb_stop)(DisasContextBase *db, CPUState *cpu);
void (*disas_log)(const DisasContextBase *db, CPUState *cpu);
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
index 4e0ea68efc..bc864564ce 100644
--- a/include/hw/core/cpu.h
+++ b/include/hw/core/cpu.h
@@ -103,6 +103,9 @@ struct SysemuCPUOps;
* also implement the synchronize_from_tb hook.
* @gdb_read_register: Callback for letting GDB read a register.
* @gdb_write_register: Callback for letting GDB write a register.
+ * @gdb_adjust_breakpoint: Callback for adjusting the address of a
+ * breakpoint. Used by AVR to handle a gdb mis-feature with
+ * its Harvard architecture split code and data.
* @gdb_num_core_regs: Number of core registers accessible to GDB.
* @gdb_core_xml_file: File name for core registers GDB XML description.
* @gdb_stop_before_watchpoint: Indicates whether GDB expects the CPU to stop
@@ -137,6 +140,7 @@ struct CPUClass {
void (*set_pc)(CPUState *cpu, vaddr value);
int (*gdb_read_register)(CPUState *cpu, GByteArray *buf, int reg);
int (*gdb_write_register)(CPUState *cpu, uint8_t *buf, int reg);
+ vaddr (*gdb_adjust_breakpoint)(CPUState *cpu, vaddr addr);
const char *gdb_core_xml_file;
gchar * (*gdb_arch_name)(CPUState *cpu);
diff --git a/include/hw/core/tcg-cpu-ops.h b/include/hw/core/tcg-cpu-ops.h
index 72d791438c..eab27d0c03 100644
--- a/include/hw/core/tcg-cpu-ops.h
+++ b/include/hw/core/tcg-cpu-ops.h
@@ -89,6 +89,12 @@ struct TCGCPUOps {
bool (*debug_check_watchpoint)(CPUState *cpu, CPUWatchpoint *wp);
/**
+ * @debug_check_breakpoint: return true if the architectural
+ * breakpoint whose PC has matched should really fire.
+ */
+ bool (*debug_check_breakpoint)(CPUState *cpu);
+
+ /**
* @io_recompile_replay_branch: Callback for cpu_io_recompile.
*
* The cpu has been stopped, and cpu_restore_state_from_tb has been
diff --git a/include/qemu/atomic.h b/include/qemu/atomic.h
index 3ccf84fd46..112a29910b 100644
--- a/include/qemu/atomic.h
+++ b/include/qemu/atomic.h
@@ -60,8 +60,9 @@
(unsigned short)1, \
(expr)+0))))))
-#ifdef __ATOMIC_RELAXED
-/* For C11 atomic ops */
+#ifndef __ATOMIC_RELAXED
+#error "Expecting C11 atomic ops"
+#endif
/* Manual memory barriers
*
@@ -239,193 +240,8 @@
#define qatomic_xor(ptr, n) \
((void) __atomic_fetch_xor(ptr, n, __ATOMIC_SEQ_CST))
-#else /* __ATOMIC_RELAXED */
-
-#ifdef __alpha__
-#define smp_read_barrier_depends() asm volatile("mb":::"memory")
-#endif
-
-#if defined(__i386__) || defined(__x86_64__) || defined(__s390x__)
-
-/*
- * Because of the strongly ordered storage model, wmb() and rmb() are nops
- * here (a compiler barrier only). QEMU doesn't do accesses to write-combining
- * qemu memory or non-temporal load/stores from C code.
- */
-#define smp_mb_release() barrier()
-#define smp_mb_acquire() barrier()
-
-/*
- * __sync_lock_test_and_set() is documented to be an acquire barrier only,
- * but it is a full barrier at the hardware level. Add a compiler barrier
- * to make it a full barrier also at the compiler level.
- */
-#define qatomic_xchg(ptr, i) (barrier(), __sync_lock_test_and_set(ptr, i))
-
-#elif defined(_ARCH_PPC)
-
-/*
- * We use an eieio() for wmb() on powerpc. This assumes we don't
- * need to order cacheable and non-cacheable stores with respect to
- * each other.
- *
- * smp_mb has the same problem as on x86 for not-very-new GCC
- * (http://patchwork.ozlabs.org/patch/126184/, Nov 2011).
- */
-#define smp_wmb() ({ asm volatile("eieio" ::: "memory"); (void)0; })
-#if defined(__powerpc64__)
-#define smp_mb_release() ({ asm volatile("lwsync" ::: "memory"); (void)0; })
-#define smp_mb_acquire() ({ asm volatile("lwsync" ::: "memory"); (void)0; })
-#else
-#define smp_mb_release() ({ asm volatile("sync" ::: "memory"); (void)0; })
-#define smp_mb_acquire() ({ asm volatile("sync" ::: "memory"); (void)0; })
-#endif
-#define smp_mb() ({ asm volatile("sync" ::: "memory"); (void)0; })
-
-#endif /* _ARCH_PPC */
-
-/*
- * For (host) platforms we don't have explicit barrier definitions
- * for, we use the gcc __sync_synchronize() primitive to generate a
- * full barrier. This should be safe on all platforms, though it may
- * be overkill for smp_mb_acquire() and smp_mb_release().
- */
-#ifndef smp_mb
-#define smp_mb() __sync_synchronize()
-#endif
-
-#ifndef smp_mb_acquire
-#define smp_mb_acquire() __sync_synchronize()
-#endif
-
-#ifndef smp_mb_release
-#define smp_mb_release() __sync_synchronize()
-#endif
-
-#ifndef smp_read_barrier_depends
-#define smp_read_barrier_depends() barrier()
-#endif
-
-#ifndef signal_barrier
-#define signal_barrier() barrier()
-#endif
-
-/* These will only be atomic if the processor does the fetch or store
- * in a single issue memory operation
- */
-#define qatomic_read__nocheck(p) (*(__typeof__(*(p)) volatile*) (p))
-#define qatomic_set__nocheck(p, i) ((*(__typeof__(*(p)) volatile*) (p)) = (i))
-
-#define qatomic_read(ptr) qatomic_read__nocheck(ptr)
-#define qatomic_set(ptr, i) qatomic_set__nocheck(ptr,i)
-
-/**
- * qatomic_rcu_read - reads a RCU-protected pointer to a local variable
- * into a RCU read-side critical section. The pointer can later be safely
- * dereferenced within the critical section.
- *
- * This ensures that the pointer copy is invariant thorough the whole critical
- * section.
- *
- * Inserts memory barriers on architectures that require them (currently only
- * Alpha) and documents which pointers are protected by RCU.
- *
- * qatomic_rcu_read also includes a compiler barrier to ensure that
- * value-speculative optimizations (e.g. VSS: Value Speculation
- * Scheduling) does not perform the data read before the pointer read
- * by speculating the value of the pointer.
- *
- * Should match qatomic_rcu_set(), qatomic_xchg(), qatomic_cmpxchg().
- */
-#define qatomic_rcu_read(ptr) ({ \
- typeof(*ptr) _val = qatomic_read(ptr); \
- smp_read_barrier_depends(); \
- _val; \
-})
-
-/**
- * qatomic_rcu_set - assigns (publicizes) a pointer to a new data structure
- * meant to be read by RCU read-side critical sections.
- *
- * Documents which pointers will be dereferenced by RCU read-side critical
- * sections and adds the required memory barriers on architectures requiring
- * them. It also makes sure the compiler does not reorder code initializing the
- * data structure before its publication.
- *
- * Should match qatomic_rcu_read().
- */
-#define qatomic_rcu_set(ptr, i) do { \
- smp_wmb(); \
- qatomic_set(ptr, i); \
-} while (0)
-
-#define qatomic_load_acquire(ptr) ({ \
- typeof(*ptr) _val = qatomic_read(ptr); \
- smp_mb_acquire(); \
- _val; \
-})
-
-#define qatomic_store_release(ptr, i) do { \
- smp_mb_release(); \
- qatomic_set(ptr, i); \
-} while (0)
-
-#ifndef qatomic_xchg
-#if defined(__clang__)
-#define qatomic_xchg(ptr, i) __sync_swap(ptr, i)
-#else
-/* __sync_lock_test_and_set() is documented to be an acquire barrier only. */
-#define qatomic_xchg(ptr, i) (smp_mb(), __sync_lock_test_and_set(ptr, i))
-#endif
-#endif
-#define qatomic_xchg__nocheck qatomic_xchg
-
-/* Provide shorter names for GCC atomic builtins. */
-#define qatomic_fetch_inc(ptr) __sync_fetch_and_add(ptr, 1)
-#define qatomic_fetch_dec(ptr) __sync_fetch_and_add(ptr, -1)
-
-#define qatomic_fetch_add(ptr, n) __sync_fetch_and_add(ptr, n)
-#define qatomic_fetch_sub(ptr, n) __sync_fetch_and_sub(ptr, n)
-#define qatomic_fetch_and(ptr, n) __sync_fetch_and_and(ptr, n)
-#define qatomic_fetch_or(ptr, n) __sync_fetch_and_or(ptr, n)
-#define qatomic_fetch_xor(ptr, n) __sync_fetch_and_xor(ptr, n)
-
-#define qatomic_inc_fetch(ptr) __sync_add_and_fetch(ptr, 1)
-#define qatomic_dec_fetch(ptr) __sync_add_and_fetch(ptr, -1)
-#define qatomic_add_fetch(ptr, n) __sync_add_and_fetch(ptr, n)
-#define qatomic_sub_fetch(ptr, n) __sync_sub_and_fetch(ptr, n)
-#define qatomic_and_fetch(ptr, n) __sync_and_and_fetch(ptr, n)
-#define qatomic_or_fetch(ptr, n) __sync_or_and_fetch(ptr, n)
-#define qatomic_xor_fetch(ptr, n) __sync_xor_and_fetch(ptr, n)
-
-#define qatomic_cmpxchg(ptr, old, new) \
- __sync_val_compare_and_swap(ptr, old, new)
-#define qatomic_cmpxchg__nocheck(ptr, old, new) qatomic_cmpxchg(ptr, old, new)
-
-/* And even shorter names that return void. */
-#define qatomic_inc(ptr) ((void) __sync_fetch_and_add(ptr, 1))
-#define qatomic_dec(ptr) ((void) __sync_fetch_and_add(ptr, -1))
-#define qatomic_add(ptr, n) ((void) __sync_fetch_and_add(ptr, n))
-#define qatomic_sub(ptr, n) ((void) __sync_fetch_and_sub(ptr, n))
-#define qatomic_and(ptr, n) ((void) __sync_fetch_and_and(ptr, n))
-#define qatomic_or(ptr, n) ((void) __sync_fetch_and_or(ptr, n))
-#define qatomic_xor(ptr, n) ((void) __sync_fetch_and_xor(ptr, n))
-
-#endif /* __ATOMIC_RELAXED */
-
-#ifndef smp_wmb
#define smp_wmb() smp_mb_release()
-#endif
-#ifndef smp_rmb
#define smp_rmb() smp_mb_acquire()
-#endif
-
-/* This is more efficient than a store plus a fence. */
-#if !defined(__SANITIZE_THREAD__)
-#if defined(__i386__) || defined(__x86_64__) || defined(__s390x__)
-#define qatomic_mb_set(ptr, i) ((void)qatomic_xchg(ptr, i))
-#endif
-#endif
/* qatomic_mb_read/set semantics map Java volatile variables. They are
* less expensive on some platforms (notably POWER) than fully
@@ -435,16 +251,16 @@
* use. See docs/devel/atomics.rst for more discussion.
*/
-#ifndef qatomic_mb_read
#define qatomic_mb_read(ptr) \
qatomic_load_acquire(ptr)
-#endif
-#ifndef qatomic_mb_set
-#define qatomic_mb_set(ptr, i) do { \
- qatomic_store_release(ptr, i); \
- smp_mb(); \
-} while(0)
+#if !defined(__SANITIZE_THREAD__) && \
+ (defined(__i386__) || defined(__x86_64__) || defined(__s390x__))
+/* This is more efficient than a store plus a fence. */
+# define qatomic_mb_set(ptr, i) ((void)qatomic_xchg(ptr, i))
+#else
+# define qatomic_mb_set(ptr, i) \
+ ({ qatomic_store_release(ptr, i); smp_mb(); })
#endif
#define qatomic_fetch_inc_nonzero(ptr) ({ \
@@ -455,28 +271,29 @@
_oldn; \
})
-/* Abstractions to access atomically (i.e. "once") i64/u64 variables */
-#ifdef CONFIG_ATOMIC64
-static inline int64_t qatomic_read_i64(const int64_t *ptr)
-{
- /* use __nocheck because sizeof(void *) might be < sizeof(u64) */
- return qatomic_read__nocheck(ptr);
-}
-
-static inline uint64_t qatomic_read_u64(const uint64_t *ptr)
-{
- return qatomic_read__nocheck(ptr);
-}
-
-static inline void qatomic_set_i64(int64_t *ptr, int64_t val)
-{
- qatomic_set__nocheck(ptr, val);
-}
+/*
+ * Abstractions to access atomically (i.e. "once") i64/u64 variables.
+ *
+ * The i386 abi is odd in that by default members are only aligned to
+ * 4 bytes, which means that 8-byte types can wind up mis-aligned.
+ * Clang will then warn about this, and emit a call into libatomic.
+ *
+ * Use of these types in structures when they will be used with atomic
+ * operations can avoid this.
+ */
+typedef int64_t aligned_int64_t __attribute__((aligned(8)));
+typedef uint64_t aligned_uint64_t __attribute__((aligned(8)));
-static inline void qatomic_set_u64(uint64_t *ptr, uint64_t val)
-{
- qatomic_set__nocheck(ptr, val);
-}
+#ifdef CONFIG_ATOMIC64
+/* Use __nocheck because sizeof(void *) might be < sizeof(u64) */
+#define qatomic_read_i64(P) \
+ _Generic(*(P), int64_t: qatomic_read__nocheck(P))
+#define qatomic_read_u64(P) \
+ _Generic(*(P), uint64_t: qatomic_read__nocheck(P))
+#define qatomic_set_i64(P, V) \
+ _Generic(*(P), int64_t: qatomic_set__nocheck(P, V))
+#define qatomic_set_u64(P, V) \
+ _Generic(*(P), uint64_t: qatomic_set__nocheck(P, V))
static inline void qatomic64_init(void)
{
diff --git a/include/qemu/stats64.h b/include/qemu/stats64.h
index fdd3d1b8f9..802402254b 100644
--- a/include/qemu/stats64.h
+++ b/include/qemu/stats64.h
@@ -21,7 +21,7 @@
typedef struct Stat64 {
#ifdef CONFIG_ATOMIC64
- uint64_t value;
+ aligned_uint64_t value;
#else
uint32_t low, high;
uint32_t lock;
diff --git a/include/sysemu/iothread.h b/include/sysemu/iothread.h
index f177142f16..7f714bd136 100644
--- a/include/sysemu/iothread.h
+++ b/include/sysemu/iothread.h
@@ -37,6 +37,9 @@ struct IOThread {
int64_t poll_max_ns;
int64_t poll_grow;
int64_t poll_shrink;
+
+ /* AioContext AIO engine parameters */
+ int64_t aio_max_batch;
};
typedef struct IOThread IOThread;
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
index 25dd19d6e1..44ccd86f3e 100644
--- a/include/tcg/tcg.h
+++ b/include/tcg/tcg.h
@@ -1341,31 +1341,32 @@ void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
# define helper_ret_stl_mmu helper_le_stl_mmu
# define helper_ret_stq_mmu helper_le_stq_mmu
#endif
+#endif /* CONFIG_SOFTMMU */
-uint32_t helper_atomic_cmpxchgb_mmu(CPUArchState *env, target_ulong addr,
+uint32_t cpu_atomic_cmpxchgb_mmu(CPUArchState *env, target_ulong addr,
+ uint32_t cmpv, uint32_t newv,
+ TCGMemOpIdx oi, uintptr_t retaddr);
+uint32_t cpu_atomic_cmpxchgw_le_mmu(CPUArchState *env, target_ulong addr,
+ uint32_t cmpv, uint32_t newv,
+ TCGMemOpIdx oi, uintptr_t retaddr);
+uint32_t cpu_atomic_cmpxchgl_le_mmu(CPUArchState *env, target_ulong addr,
uint32_t cmpv, uint32_t newv,
TCGMemOpIdx oi, uintptr_t retaddr);
-uint32_t helper_atomic_cmpxchgw_le_mmu(CPUArchState *env, target_ulong addr,
- uint32_t cmpv, uint32_t newv,
- TCGMemOpIdx oi, uintptr_t retaddr);
-uint32_t helper_atomic_cmpxchgl_le_mmu(CPUArchState *env, target_ulong addr,
- uint32_t cmpv, uint32_t newv,
- TCGMemOpIdx oi, uintptr_t retaddr);
-uint64_t helper_atomic_cmpxchgq_le_mmu(CPUArchState *env, target_ulong addr,
- uint64_t cmpv, uint64_t newv,
- TCGMemOpIdx oi, uintptr_t retaddr);
-uint32_t helper_atomic_cmpxchgw_be_mmu(CPUArchState *env, target_ulong addr,
- uint32_t cmpv, uint32_t newv,
- TCGMemOpIdx oi, uintptr_t retaddr);
-uint32_t helper_atomic_cmpxchgl_be_mmu(CPUArchState *env, target_ulong addr,
- uint32_t cmpv, uint32_t newv,
- TCGMemOpIdx oi, uintptr_t retaddr);
-uint64_t helper_atomic_cmpxchgq_be_mmu(CPUArchState *env, target_ulong addr,
- uint64_t cmpv, uint64_t newv,
- TCGMemOpIdx oi, uintptr_t retaddr);
+uint64_t cpu_atomic_cmpxchgq_le_mmu(CPUArchState *env, target_ulong addr,
+ uint64_t cmpv, uint64_t newv,
+ TCGMemOpIdx oi, uintptr_t retaddr);
+uint32_t cpu_atomic_cmpxchgw_be_mmu(CPUArchState *env, target_ulong addr,
+ uint32_t cmpv, uint32_t newv,
+ TCGMemOpIdx oi, uintptr_t retaddr);
+uint32_t cpu_atomic_cmpxchgl_be_mmu(CPUArchState *env, target_ulong addr,
+ uint32_t cmpv, uint32_t newv,
+ TCGMemOpIdx oi, uintptr_t retaddr);
+uint64_t cpu_atomic_cmpxchgq_be_mmu(CPUArchState *env, target_ulong addr,
+ uint64_t cmpv, uint64_t newv,
+ TCGMemOpIdx oi, uintptr_t retaddr);
#define GEN_ATOMIC_HELPER(NAME, TYPE, SUFFIX) \
-TYPE helper_atomic_ ## NAME ## SUFFIX ## _mmu \
+TYPE cpu_atomic_ ## NAME ## SUFFIX ## _mmu \
(CPUArchState *env, target_ulong addr, TYPE val, \
TCGMemOpIdx oi, uintptr_t retaddr);
@@ -1411,31 +1412,22 @@ GEN_ATOMIC_HELPER_ALL(xchg)
#undef GEN_ATOMIC_HELPER_ALL
#undef GEN_ATOMIC_HELPER
-#endif /* CONFIG_SOFTMMU */
-/*
- * These aren't really a "proper" helpers because TCG cannot manage Int128.
- * However, use the same format as the others, for use by the backends.
- *
- * The cmpxchg functions are only defined if HAVE_CMPXCHG128;
- * the ld/st functions are only defined if HAVE_ATOMIC128,
- * as defined by <qemu/atomic128.h>.
- */
-Int128 helper_atomic_cmpxchgo_le_mmu(CPUArchState *env, target_ulong addr,
- Int128 cmpv, Int128 newv,
- TCGMemOpIdx oi, uintptr_t retaddr);
-Int128 helper_atomic_cmpxchgo_be_mmu(CPUArchState *env, target_ulong addr,
- Int128 cmpv, Int128 newv,
- TCGMemOpIdx oi, uintptr_t retaddr);
-
-Int128 helper_atomic_ldo_le_mmu(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr);
-Int128 helper_atomic_ldo_be_mmu(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr);
-void helper_atomic_sto_le_mmu(CPUArchState *env, target_ulong addr, Int128 val,
- TCGMemOpIdx oi, uintptr_t retaddr);
-void helper_atomic_sto_be_mmu(CPUArchState *env, target_ulong addr, Int128 val,
- TCGMemOpIdx oi, uintptr_t retaddr);
+Int128 cpu_atomic_cmpxchgo_le_mmu(CPUArchState *env, target_ulong addr,
+ Int128 cmpv, Int128 newv,
+ TCGMemOpIdx oi, uintptr_t retaddr);
+Int128 cpu_atomic_cmpxchgo_be_mmu(CPUArchState *env, target_ulong addr,
+ Int128 cmpv, Int128 newv,
+ TCGMemOpIdx oi, uintptr_t retaddr);
+
+Int128 cpu_atomic_ldo_le_mmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr);
+Int128 cpu_atomic_ldo_be_mmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr);
+void cpu_atomic_sto_le_mmu(CPUArchState *env, target_ulong addr, Int128 val,
+ TCGMemOpIdx oi, uintptr_t retaddr);
+void cpu_atomic_sto_be_mmu(CPUArchState *env, target_ulong addr, Int128 val,
+ TCGMemOpIdx oi, uintptr_t retaddr);
#ifdef CONFIG_DEBUG_TCG
void tcg_assert_listed_vecop(TCGOpcode);
diff --git a/iothread.c b/iothread.c
index 2c5ccd7367..ddbbde61f7 100644
--- a/iothread.c
+++ b/iothread.c
@@ -152,6 +152,24 @@ static void iothread_init_gcontext(IOThread *iothread)
iothread->main_loop = g_main_loop_new(iothread->worker_context, TRUE);
}
+static void iothread_set_aio_context_params(IOThread *iothread, Error **errp)
+{
+ ERRP_GUARD();
+
+ aio_context_set_poll_params(iothread->ctx,
+ iothread->poll_max_ns,
+ iothread->poll_grow,
+ iothread->poll_shrink,
+ errp);
+ if (*errp) {
+ return;
+ }
+
+ aio_context_set_aio_params(iothread->ctx,
+ iothread->aio_max_batch,
+ errp);
+}
+
static void iothread_complete(UserCreatable *obj, Error **errp)
{
Error *local_error = NULL;
@@ -171,11 +189,7 @@ static void iothread_complete(UserCreatable *obj, Error **errp)
*/
iothread_init_gcontext(iothread);
- aio_context_set_poll_params(iothread->ctx,
- iothread->poll_max_ns,
- iothread->poll_grow,
- iothread->poll_shrink,
- &local_error);
+ iothread_set_aio_context_params(iothread, &local_error);
if (local_error) {
error_propagate(errp, local_error);
aio_context_unref(iothread->ctx);
@@ -212,8 +226,11 @@ static PollParamInfo poll_grow_info = {
static PollParamInfo poll_shrink_info = {
"poll-shrink", offsetof(IOThread, poll_shrink),
};
+static PollParamInfo aio_max_batch_info = {
+ "aio-max-batch", offsetof(IOThread, aio_max_batch),
+};
-static void iothread_get_poll_param(Object *obj, Visitor *v,
+static void iothread_get_param(Object *obj, Visitor *v,
const char *name, void *opaque, Error **errp)
{
IOThread *iothread = IOTHREAD(obj);
@@ -223,7 +240,7 @@ static void iothread_get_poll_param(Object *obj, Visitor *v,
visit_type_int64(v, name, field, errp);
}
-static void iothread_set_poll_param(Object *obj, Visitor *v,
+static bool iothread_set_param(Object *obj, Visitor *v,
const char *name, void *opaque, Error **errp)
{
IOThread *iothread = IOTHREAD(obj);
@@ -232,17 +249,36 @@ static void iothread_set_poll_param(Object *obj, Visitor *v,
int64_t value;
if (!visit_type_int64(v, name, &value, errp)) {
- return;
+ return false;
}
if (value < 0) {
error_setg(errp, "%s value must be in range [0, %" PRId64 "]",
info->name, INT64_MAX);
- return;
+ return false;
}
*field = value;
+ return true;
+}
+
+static void iothread_get_poll_param(Object *obj, Visitor *v,
+ const char *name, void *opaque, Error **errp)
+{
+
+ iothread_get_param(obj, v, name, opaque, errp);
+}
+
+static void iothread_set_poll_param(Object *obj, Visitor *v,
+ const char *name, void *opaque, Error **errp)
+{
+ IOThread *iothread = IOTHREAD(obj);
+
+ if (!iothread_set_param(obj, v, name, opaque, errp)) {
+ return;
+ }
+
if (iothread->ctx) {
aio_context_set_poll_params(iothread->ctx,
iothread->poll_max_ns,
@@ -252,6 +288,29 @@ static void iothread_set_poll_param(Object *obj, Visitor *v,
}
}
+static void iothread_get_aio_param(Object *obj, Visitor *v,
+ const char *name, void *opaque, Error **errp)
+{
+
+ iothread_get_param(obj, v, name, opaque, errp);
+}
+
+static void iothread_set_aio_param(Object *obj, Visitor *v,
+ const char *name, void *opaque, Error **errp)
+{
+ IOThread *iothread = IOTHREAD(obj);
+
+ if (!iothread_set_param(obj, v, name, opaque, errp)) {
+ return;
+ }
+
+ if (iothread->ctx) {
+ aio_context_set_aio_params(iothread->ctx,
+ iothread->aio_max_batch,
+ errp);
+ }
+}
+
static void iothread_class_init(ObjectClass *klass, void *class_data)
{
UserCreatableClass *ucc = USER_CREATABLE_CLASS(klass);
@@ -269,6 +328,10 @@ static void iothread_class_init(ObjectClass *klass, void *class_data)
iothread_get_poll_param,
iothread_set_poll_param,
NULL, &poll_shrink_info);
+ object_class_property_add(klass, "aio-max-batch", "int",
+ iothread_get_aio_param,
+ iothread_set_aio_param,
+ NULL, &aio_max_batch_info);
}
static const TypeInfo iothread_info = {
@@ -318,6 +381,7 @@ static int query_one_iothread(Object *object, void *opaque)
info->poll_max_ns = iothread->poll_max_ns;
info->poll_grow = iothread->poll_grow;
info->poll_shrink = iothread->poll_shrink;
+ info->aio_max_batch = iothread->aio_max_batch;
QAPI_LIST_APPEND(*tail, info);
return 0;
diff --git a/linux-user/hppa/cpu_loop.c b/linux-user/hppa/cpu_loop.c
index 3aaaf3337c..82d8183821 100644
--- a/linux-user/hppa/cpu_loop.c
+++ b/linux-user/hppa/cpu_loop.c
@@ -82,7 +82,7 @@ static abi_ulong hppa_lws(CPUHPPAState *env)
o64 = *(uint64_t *)g2h(cs, old);
n64 = *(uint64_t *)g2h(cs, new);
#ifdef CONFIG_ATOMIC64
- r64 = qatomic_cmpxchg__nocheck((uint64_t *)g2h(cs, addr),
+ r64 = qatomic_cmpxchg__nocheck((aligned_uint64_t *)g2h(cs, addr),
o64, n64);
ret = r64 != o64;
#else
diff --git a/meson.build b/meson.build
index cb3856fc35..df5094e563 100644
--- a/meson.build
+++ b/meson.build
@@ -2996,6 +2996,7 @@ summary_info += {'coroutine pool': config_host['CONFIG_COROUTINE_POOL'] == '1
if have_block
summary_info += {'Block whitelist (rw)': config_host['CONFIG_BDRV_RW_WHITELIST']}
summary_info += {'Block whitelist (ro)': config_host['CONFIG_BDRV_RO_WHITELIST']}
+ summary_info += {'Use block whitelist in tools': config_host.has_key('CONFIG_BDRV_WHITELIST_TOOLS')}
summary_info += {'VirtFS support': have_virtfs}
summary_info += {'build virtiofs daemon': have_virtiofsd}
summary_info += {'Live block migration': config_host.has_key('CONFIG_LIVE_BLOCK_MIGRATION')}
diff --git a/monitor/hmp-cmds.c b/monitor/hmp-cmds.c
index 0942027208..e00255f7ee 100644
--- a/monitor/hmp-cmds.c
+++ b/monitor/hmp-cmds.c
@@ -1893,6 +1893,8 @@ void hmp_info_iothreads(Monitor *mon, const QDict *qdict)
monitor_printf(mon, " poll-max-ns=%" PRId64 "\n", value->poll_max_ns);
monitor_printf(mon, " poll-grow=%" PRId64 "\n", value->poll_grow);
monitor_printf(mon, " poll-shrink=%" PRId64 "\n", value->poll_shrink);
+ monitor_printf(mon, " aio-max-batch=%" PRId64 "\n",
+ value->aio_max_batch);
}
qapi_free_IOThreadInfoList(info_list);
diff --git a/plugins/core.c b/plugins/core.c
index e1bcdb570d..474db287cb 100644
--- a/plugins/core.c
+++ b/plugins/core.c
@@ -27,7 +27,7 @@
#include "exec/helper-proto.h"
#include "tcg/tcg.h"
#include "tcg/tcg-op.h"
-#include "trace/mem-internal.h" /* mem_info macros */
+#include "trace/mem.h" /* mem_info macros */
#include "plugin.h"
#include "qemu/compiler.h"
diff --git a/qapi/misc.json b/qapi/misc.json
index 156f98203e..5c2ca3b556 100644
--- a/qapi/misc.json
+++ b/qapi/misc.json
@@ -86,6 +86,9 @@
# @poll-shrink: how many ns will be removed from polling time, 0 means that
# it's not configured (since 2.9)
#
+# @aio-max-batch: maximum number of requests in a batch for the AIO engine,
+# 0 means that the engine will use its default (since 6.1)
+#
# Since: 2.0
##
{ 'struct': 'IOThreadInfo',
@@ -93,7 +96,8 @@
'thread-id': 'int',
'poll-max-ns': 'int',
'poll-grow': 'int',
- 'poll-shrink': 'int' } }
+ 'poll-shrink': 'int',
+ 'aio-max-batch': 'int' } }
##
# @query-iothreads:
diff --git a/qapi/qom.json b/qapi/qom.json
index 652be317b8..6d5f4a88e6 100644
--- a/qapi/qom.json
+++ b/qapi/qom.json
@@ -516,12 +516,17 @@
# algorithm detects it is spending too long polling without
# encountering events. 0 selects a default behaviour (default: 0)
#
+# @aio-max-batch: maximum number of requests in a batch for the AIO engine,
+# 0 means that the engine will use its default
+# (default:0, since 6.1)
+#
# Since: 2.0
##
{ 'struct': 'IothreadProperties',
'data': { '*poll-max-ns': 'int',
'*poll-grow': 'int',
- '*poll-shrink': 'int' } }
+ '*poll-shrink': 'int',
+ '*aio-max-batch': 'int' } }
##
# @MemoryBackendProperties:
diff --git a/qemu-img.c b/qemu-img.c
index 797742a443..908fd0cce5 100644
--- a/qemu-img.c
+++ b/qemu-img.c
@@ -82,6 +82,7 @@ enum {
OPTION_MERGE = 274,
OPTION_BITMAPS = 275,
OPTION_FORCE = 276,
+ OPTION_SKIP_BROKEN = 277,
};
typedef enum OutputFormat {
@@ -2101,7 +2102,32 @@ static int convert_do_copy(ImgConvertState *s)
return s->ret;
}
-static int convert_copy_bitmaps(BlockDriverState *src, BlockDriverState *dst)
+/* Check that bitmaps can be copied, or output an error */
+static int convert_check_bitmaps(BlockDriverState *src, bool skip_broken)
+{
+ BdrvDirtyBitmap *bm;
+
+ if (!bdrv_supports_persistent_dirty_bitmap(src)) {
+ error_report("Source lacks bitmap support");
+ return -1;
+ }
+ FOR_EACH_DIRTY_BITMAP(src, bm) {
+ if (!bdrv_dirty_bitmap_get_persistence(bm)) {
+ continue;
+ }
+ if (!skip_broken && bdrv_dirty_bitmap_inconsistent(bm)) {
+ error_report("Cannot copy inconsistent bitmap '%s'",
+ bdrv_dirty_bitmap_name(bm));
+ error_printf("Try --skip-broken-bitmaps, or "
+ "use 'qemu-img bitmap --remove' to delete it\n");
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static int convert_copy_bitmaps(BlockDriverState *src, BlockDriverState *dst,
+ bool skip_broken)
{
BdrvDirtyBitmap *bm;
Error *err = NULL;
@@ -2113,6 +2139,10 @@ static int convert_copy_bitmaps(BlockDriverState *src, BlockDriverState *dst)
continue;
}
name = bdrv_dirty_bitmap_name(bm);
+ if (skip_broken && bdrv_dirty_bitmap_inconsistent(bm)) {
+ warn_report("Skipping inconsistent bitmap '%s'", name);
+ continue;
+ }
qmp_block_dirty_bitmap_add(dst->node_name, name,
true, bdrv_dirty_bitmap_granularity(bm),
true, true,
@@ -2127,6 +2157,7 @@ static int convert_copy_bitmaps(BlockDriverState *src, BlockDriverState *dst)
&err);
if (err) {
error_reportf_err(err, "Failed to populate bitmap %s: ", name);
+ qmp_block_dirty_bitmap_remove(dst->node_name, name, NULL);
return -1;
}
}
@@ -2167,6 +2198,7 @@ static int img_convert(int argc, char **argv)
bool force_share = false;
bool explict_min_sparse = false;
bool bitmaps = false;
+ bool skip_broken = false;
int64_t rate_limit = 0;
ImgConvertState s = (ImgConvertState) {
@@ -2188,6 +2220,7 @@ static int img_convert(int argc, char **argv)
{"salvage", no_argument, 0, OPTION_SALVAGE},
{"target-is-zero", no_argument, 0, OPTION_TARGET_IS_ZERO},
{"bitmaps", no_argument, 0, OPTION_BITMAPS},
+ {"skip-broken-bitmaps", no_argument, 0, OPTION_SKIP_BROKEN},
{0, 0, 0, 0}
};
c = getopt_long(argc, argv, ":hf:O:B:Cco:l:S:pt:T:qnm:WUr:",
@@ -2316,6 +2349,9 @@ static int img_convert(int argc, char **argv)
case OPTION_BITMAPS:
bitmaps = true;
break;
+ case OPTION_SKIP_BROKEN:
+ skip_broken = true;
+ break;
}
}
@@ -2323,6 +2359,11 @@ static int img_convert(int argc, char **argv)
out_fmt = "raw";
}
+ if (skip_broken && !bitmaps) {
+ error_report("Use of --skip-broken-bitmaps requires --bitmaps");
+ goto fail_getopt;
+ }
+
if (s.compressed && s.copy_range) {
error_report("Cannot enable copy offloading when -c is used");
goto fail_getopt;
@@ -2554,9 +2595,8 @@ static int img_convert(int argc, char **argv)
ret = -1;
goto out;
}
- if (!bdrv_supports_persistent_dirty_bitmap(blk_bs(s.src[0]))) {
- error_report("Source lacks bitmap support");
- ret = -1;
+ ret = convert_check_bitmaps(blk_bs(s.src[0]), skip_broken);
+ if (ret < 0) {
goto out;
}
}
@@ -2680,7 +2720,7 @@ static int img_convert(int argc, char **argv)
/* Now copy the bitmaps */
if (bitmaps && ret == 0) {
- ret = convert_copy_bitmaps(blk_bs(s.src[0]), out_bs);
+ ret = convert_copy_bitmaps(blk_bs(s.src[0]), out_bs, skip_broken);
}
out:
diff --git a/qemu-options.hx b/qemu-options.hx
index 0c9ddc0274..99ed5ec5f1 100644
--- a/qemu-options.hx
+++ b/qemu-options.hx
@@ -5301,7 +5301,7 @@ SRST
CN=laptop.example.com,O=Example Home,L=London,ST=London,C=GB
- ``-object iothread,id=id,poll-max-ns=poll-max-ns,poll-grow=poll-grow,poll-shrink=poll-shrink``
+ ``-object iothread,id=id,poll-max-ns=poll-max-ns,poll-grow=poll-grow,poll-shrink=poll-shrink,aio-max-batch=aio-max-batch``
Creates a dedicated event loop thread that devices can be
assigned to. This is known as an IOThread. By default device
emulation happens in vCPU threads or the main event loop thread.
@@ -5337,7 +5337,11 @@ SRST
the polling time when the algorithm detects it is spending too
long polling without encountering events.
- The polling parameters can be modified at run-time using the
+ The ``aio-max-batch`` parameter is the maximum number of requests
+ in a batch for the AIO engine, 0 means that the engine will use
+ its default.
+
+ The IOThread parameters can be modified at run-time using the
``qom-set`` command (where ``iothread1`` is the IOThread's
``id``):
diff --git a/softmmu/timers-state.h b/softmmu/timers-state.h
index 8c262ce139..94bb7394c5 100644
--- a/softmmu/timers-state.h
+++ b/softmmu/timers-state.h
@@ -47,7 +47,7 @@ typedef struct TimersState {
int64_t last_delta;
/* Compensate for varying guest execution speed. */
- int64_t qemu_icount_bias;
+ aligned_int64_t qemu_icount_bias;
int64_t vm_clock_warp_start;
int64_t cpu_clock_offset;
diff --git a/target/alpha/translate.c b/target/alpha/translate.c
index 103c6326a2..de6c0a8439 100644
--- a/target/alpha/translate.c
+++ b/target/alpha/translate.c
@@ -1207,19 +1207,8 @@ static DisasJumpType gen_call_pal(DisasContext *ctx, int palcode)
? 0x2000 + (palcode - 0x80) * 64
: 0x1000 + palcode * 64);
- /* Since the destination is running in PALmode, we don't really
- need the page permissions check. We'll see the existence of
- the page when we create the TB, and we'll flush all TBs if
- we change the PAL base register. */
- if (!ctx->base.singlestep_enabled) {
- tcg_gen_goto_tb(0);
- tcg_gen_movi_i64(cpu_pc, entry);
- tcg_gen_exit_tb(ctx->base.tb, 0);
- return DISAS_NORETURN;
- } else {
- tcg_gen_movi_i64(cpu_pc, entry);
- return DISAS_PC_UPDATED;
- }
+ tcg_gen_movi_i64(cpu_pc, entry);
+ return DISAS_PC_UPDATED;
}
#endif
}
@@ -2978,21 +2967,6 @@ static void alpha_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
tcg_gen_insn_start(dcbase->pc_next);
}
-static bool alpha_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
- const CPUBreakpoint *bp)
-{
- DisasContext *ctx = container_of(dcbase, DisasContext, base);
-
- ctx->base.is_jmp = gen_excp(ctx, EXCP_DEBUG, 0);
-
- /* The address covered by the breakpoint must be included in
- [tb->pc, tb->pc + tb->size) in order to for it to be
- properly cleared -- thus we increment the PC here so that
- the logic setting tb->size below does the right thing. */
- ctx->base.pc_next += 4;
- return true;
-}
-
static void alpha_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
{
DisasContext *ctx = container_of(dcbase, DisasContext, base);
@@ -3051,7 +3025,6 @@ static const TranslatorOps alpha_tr_ops = {
.init_disas_context = alpha_tr_init_disas_context,
.tb_start = alpha_tr_tb_start,
.insn_start = alpha_tr_insn_start,
- .breakpoint_check = alpha_tr_breakpoint_check,
.translate_insn = alpha_tr_translate_insn,
.tb_stop = alpha_tr_tb_stop,
.disas_log = alpha_tr_disas_log,
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
index 9cddfd6a44..752b15bb79 100644
--- a/target/arm/cpu.c
+++ b/target/arm/cpu.c
@@ -1984,6 +1984,7 @@ static const struct TCGCPUOps arm_tcg_ops = {
.do_unaligned_access = arm_cpu_do_unaligned_access,
.adjust_watchpoint_address = arm_adjust_watchpoint_address,
.debug_check_watchpoint = arm_debug_check_watchpoint,
+ .debug_check_breakpoint = arm_debug_check_breakpoint,
#endif /* !CONFIG_USER_ONLY */
};
#endif /* CONFIG_TCG */
diff --git a/target/arm/cpu_tcg.c b/target/arm/cpu_tcg.c
index d2d97115ea..ed444bf436 100644
--- a/target/arm/cpu_tcg.c
+++ b/target/arm/cpu_tcg.c
@@ -911,6 +911,7 @@ static const struct TCGCPUOps arm_v7m_tcg_ops = {
.do_unaligned_access = arm_cpu_do_unaligned_access,
.adjust_watchpoint_address = arm_adjust_watchpoint_address,
.debug_check_watchpoint = arm_debug_check_watchpoint,
+ .debug_check_breakpoint = arm_debug_check_breakpoint,
#endif /* !CONFIG_USER_ONLY */
};
#endif /* CONFIG_TCG */
diff --git a/target/arm/debug_helper.c b/target/arm/debug_helper.c
index 2ff72d47d1..2983e36dd3 100644
--- a/target/arm/debug_helper.c
+++ b/target/arm/debug_helper.c
@@ -216,8 +216,9 @@ static bool check_watchpoints(ARMCPU *cpu)
return false;
}
-static bool check_breakpoints(ARMCPU *cpu)
+bool arm_debug_check_breakpoint(CPUState *cs)
{
+ ARMCPU *cpu = ARM_CPU(cs);
CPUARMState *env = &cpu->env;
int n;
@@ -238,15 +239,6 @@ static bool check_breakpoints(ARMCPU *cpu)
return false;
}
-void HELPER(check_breakpoints)(CPUARMState *env)
-{
- ARMCPU *cpu = env_archcpu(env);
-
- if (check_breakpoints(cpu)) {
- HELPER(exception_internal(env, EXCP_DEBUG));
- }
-}
-
bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp)
{
/*
diff --git a/target/arm/helper-a64.c b/target/arm/helper-a64.c
index ac5c4452d5..26f79f9141 100644
--- a/target/arm/helper-a64.c
+++ b/target/arm/helper-a64.c
@@ -564,7 +564,7 @@ uint64_t HELPER(paired_cmpxchg64_le_parallel)(CPUARMState *env, uint64_t addr,
cmpv = int128_make128(env->exclusive_val, env->exclusive_high);
newv = int128_make128(new_lo, new_hi);
- oldv = helper_atomic_cmpxchgo_le_mmu(env, addr, cmpv, newv, oi, ra);
+ oldv = cpu_atomic_cmpxchgo_le_mmu(env, addr, cmpv, newv, oi, ra);
success = int128_eq(oldv, cmpv);
return !success;
@@ -638,7 +638,7 @@ uint64_t HELPER(paired_cmpxchg64_be_parallel)(CPUARMState *env, uint64_t addr,
*/
cmpv = int128_make128(env->exclusive_high, env->exclusive_val);
newv = int128_make128(new_hi, new_lo);
- oldv = helper_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv, oi, ra);
+ oldv = cpu_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv, oi, ra);
success = int128_eq(oldv, cmpv);
return !success;
@@ -660,7 +660,7 @@ void HELPER(casp_le_parallel)(CPUARMState *env, uint32_t rs, uint64_t addr,
cmpv = int128_make128(env->xregs[rs], env->xregs[rs + 1]);
newv = int128_make128(new_lo, new_hi);
- oldv = helper_atomic_cmpxchgo_le_mmu(env, addr, cmpv, newv, oi, ra);
+ oldv = cpu_atomic_cmpxchgo_le_mmu(env, addr, cmpv, newv, oi, ra);
env->xregs[rs] = int128_getlo(oldv);
env->xregs[rs + 1] = int128_gethi(oldv);
@@ -681,7 +681,7 @@ void HELPER(casp_be_parallel)(CPUARMState *env, uint32_t rs, uint64_t addr,
cmpv = int128_make128(env->xregs[rs + 1], env->xregs[rs]);
newv = int128_make128(new_lo, new_hi);
- oldv = helper_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv, oi, ra);
+ oldv = cpu_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv, oi, ra);
env->xregs[rs + 1] = int128_getlo(oldv);
env->xregs[rs] = int128_gethi(oldv);
diff --git a/target/arm/helper.h b/target/arm/helper.h
index db87d7d537..248569b0cd 100644
--- a/target/arm/helper.h
+++ b/target/arm/helper.h
@@ -54,8 +54,6 @@ DEF_HELPER_1(yield, void, env)
DEF_HELPER_1(pre_hvc, void, env)
DEF_HELPER_2(pre_smc, void, env, i32)
-DEF_HELPER_1(check_breakpoints, void, env)
-
DEF_HELPER_3(cpsr_write, void, env, i32, i32)
DEF_HELPER_2(cpsr_write_eret, void, env, i32)
DEF_HELPER_1(cpsr_read, i32, env)
diff --git a/target/arm/internals.h b/target/arm/internals.h
index 3ba86e8af8..11a72013f5 100644
--- a/target/arm/internals.h
+++ b/target/arm/internals.h
@@ -282,6 +282,9 @@ void hw_breakpoint_update(ARMCPU *cpu, int n);
*/
void hw_breakpoint_update_all(ARMCPU *cpu);
+/* Callback function for checking if a breakpoint should trigger. */
+bool arm_debug_check_breakpoint(CPUState *cs);
+
/* Callback function for checking if a watchpoint should trigger. */
bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp);
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
index ca11a5fecd..422e2ac0c9 100644
--- a/target/arm/translate-a64.c
+++ b/target/arm/translate-a64.c
@@ -14844,30 +14844,6 @@ static void aarch64_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
dc->insn_start = tcg_last_op();
}
-static bool aarch64_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
- const CPUBreakpoint *bp)
-{
- DisasContext *dc = container_of(dcbase, DisasContext, base);
-
- if (bp->flags & BP_CPU) {
- gen_a64_set_pc_im(dc->base.pc_next);
- gen_helper_check_breakpoints(cpu_env);
- /* End the TB early; it likely won't be executed */
- dc->base.is_jmp = DISAS_TOO_MANY;
- } else {
- gen_exception_internal_insn(dc, dc->base.pc_next, EXCP_DEBUG);
- /* The address covered by the breakpoint must be
- included in [tb->pc, tb->pc + tb->size) in order
- to for it to be properly cleared -- thus we
- increment the PC here so that the logic setting
- tb->size below does the right thing. */
- dc->base.pc_next += 4;
- dc->base.is_jmp = DISAS_NORETURN;
- }
-
- return true;
-}
-
static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
{
DisasContext *dc = container_of(dcbase, DisasContext, base);
@@ -14982,7 +14958,6 @@ const TranslatorOps aarch64_translator_ops = {
.init_disas_context = aarch64_tr_init_disas_context,
.tb_start = aarch64_tr_tb_start,
.insn_start = aarch64_tr_insn_start,
- .breakpoint_check = aarch64_tr_breakpoint_check,
.translate_insn = aarch64_tr_translate_insn,
.tb_stop = aarch64_tr_tb_stop,
.disas_log = aarch64_tr_disas_log,
diff --git a/target/arm/translate.c b/target/arm/translate.c
index e1a8152598..351afa43a2 100644
--- a/target/arm/translate.c
+++ b/target/arm/translate.c
@@ -9438,33 +9438,6 @@ static void arm_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
dc->insn_start = tcg_last_op();
}
-static bool arm_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
- const CPUBreakpoint *bp)
-{
- DisasContext *dc = container_of(dcbase, DisasContext, base);
-
- if (bp->flags & BP_CPU) {
- gen_set_condexec(dc);
- gen_set_pc_im(dc, dc->base.pc_next);
- gen_helper_check_breakpoints(cpu_env);
- /* End the TB early; it's likely not going to be executed */
- dc->base.is_jmp = DISAS_TOO_MANY;
- } else {
- gen_exception_internal_insn(dc, dc->base.pc_next, EXCP_DEBUG);
- /* The address covered by the breakpoint must be
- included in [tb->pc, tb->pc + tb->size) in order
- to for it to be properly cleared -- thus we
- increment the PC here so that the logic setting
- tb->size below does the right thing. */
- /* TODO: Advance PC by correct instruction length to
- * avoid disassembler error messages */
- dc->base.pc_next += 2;
- dc->base.is_jmp = DISAS_NORETURN;
- }
-
- return true;
-}
-
static bool arm_pre_translate_insn(DisasContext *dc)
{
#ifdef CONFIG_USER_ONLY
@@ -9827,7 +9800,6 @@ static const TranslatorOps arm_translator_ops = {
.init_disas_context = arm_tr_init_disas_context,
.tb_start = arm_tr_tb_start,
.insn_start = arm_tr_insn_start,
- .breakpoint_check = arm_tr_breakpoint_check,
.translate_insn = arm_tr_translate_insn,
.tb_stop = arm_tr_tb_stop,
.disas_log = arm_tr_disas_log,
@@ -9837,7 +9809,6 @@ static const TranslatorOps thumb_translator_ops = {
.init_disas_context = arm_tr_init_disas_context,
.tb_start = arm_tr_tb_start,
.insn_start = arm_tr_insn_start,
- .breakpoint_check = arm_tr_breakpoint_check,
.translate_insn = thumb_tr_translate_insn,
.tb_stop = arm_tr_tb_stop,
.disas_log = arm_tr_disas_log,
diff --git a/target/avr/cpu.c b/target/avr/cpu.c
index 57e3fab4a0..ea14175ca5 100644
--- a/target/avr/cpu.c
+++ b/target/avr/cpu.c
@@ -223,6 +223,7 @@ static void avr_cpu_class_init(ObjectClass *oc, void *data)
cc->disas_set_info = avr_cpu_disas_set_info;
cc->gdb_read_register = avr_cpu_gdb_read_register;
cc->gdb_write_register = avr_cpu_gdb_write_register;
+ cc->gdb_adjust_breakpoint = avr_cpu_gdb_adjust_breakpoint;
cc->gdb_num_core_regs = 35;
cc->gdb_core_xml_file = "avr-cpu.xml";
cc->tcg_ops = &avr_tcg_ops;
diff --git a/target/avr/cpu.h b/target/avr/cpu.h
index d148e8c75a..93e3faa0a9 100644
--- a/target/avr/cpu.h
+++ b/target/avr/cpu.h
@@ -162,6 +162,7 @@ hwaddr avr_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
int avr_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
int avr_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
int avr_print_insn(bfd_vma addr, disassemble_info *info);
+vaddr avr_cpu_gdb_adjust_breakpoint(CPUState *cpu, vaddr addr);
static inline int avr_feature(CPUAVRState *env, AVRFeature feature)
{
diff --git a/target/avr/gdbstub.c b/target/avr/gdbstub.c
index c28ed67efe..1c1b908c92 100644
--- a/target/avr/gdbstub.c
+++ b/target/avr/gdbstub.c
@@ -82,3 +82,16 @@ int avr_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
return 0;
}
+
+vaddr avr_cpu_gdb_adjust_breakpoint(CPUState *cpu, vaddr addr)
+{
+ /*
+ * This is due to some strange GDB behavior
+ * Let's assume main has address 0x100:
+ * b main - sets breakpoint at address 0x00000100 (code)
+ * b *0x100 - sets breakpoint at address 0x00800100 (data)
+ *
+ * Force all breakpoints into code space.
+ */
+ return addr % OFFSET_DATA;
+}
diff --git a/target/avr/translate.c b/target/avr/translate.c
index 8237a03c23..1111e08b83 100644
--- a/target/avr/translate.c
+++ b/target/avr/translate.c
@@ -2900,14 +2900,6 @@ static bool canonicalize_skip(DisasContext *ctx)
return true;
}
-static void gen_breakpoint(DisasContext *ctx)
-{
- canonicalize_skip(ctx);
- tcg_gen_movi_tl(cpu_pc, ctx->npc);
- gen_helper_debug(cpu_env);
- ctx->base.is_jmp = DISAS_NORETURN;
-}
-
static void avr_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
{
DisasContext *ctx = container_of(dcbase, DisasContext, base);
@@ -2944,34 +2936,11 @@ static void avr_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
tcg_gen_insn_start(ctx->npc);
}
-static bool avr_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
- const CPUBreakpoint *bp)
-{
- DisasContext *ctx = container_of(dcbase, DisasContext, base);
-
- gen_breakpoint(ctx);
- return true;
-}
-
static void avr_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
{
DisasContext *ctx = container_of(dcbase, DisasContext, base);
TCGLabel *skip_label = NULL;
- /*
- * This is due to some strange GDB behavior
- * Let's assume main has address 0x100:
- * b main - sets breakpoint at address 0x00000100 (code)
- * b *0x100 - sets breakpoint at address 0x00800100 (data)
- *
- * The translator driver has already taken care of the code pointer.
- */
- if (!ctx->base.singlestep_enabled &&
- cpu_breakpoint_test(cs, OFFSET_DATA + ctx->base.pc_next, BP_ANY)) {
- gen_breakpoint(ctx);
- return;
- }
-
/* Conditionally skip the next instruction, if indicated. */
if (ctx->skip_cond != TCG_COND_NEVER) {
skip_label = gen_new_label();
@@ -3069,7 +3038,6 @@ static const TranslatorOps avr_tr_ops = {
.init_disas_context = avr_tr_init_disas_context,
.tb_start = avr_tr_tb_start,
.insn_start = avr_tr_insn_start,
- .breakpoint_check = avr_tr_breakpoint_check,
.translate_insn = avr_tr_translate_insn,
.tb_stop = avr_tr_tb_stop,
.disas_log = avr_tr_disas_log,
diff --git a/target/cris/translate.c b/target/cris/translate.c
index 9258c13e9f..a84b753349 100644
--- a/target/cris/translate.c
+++ b/target/cris/translate.c
@@ -3118,25 +3118,6 @@ static void cris_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
tcg_gen_insn_start(dc->delayed_branch == 1 ? dc->ppc | 1 : dc->pc);
}
-static bool cris_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
- const CPUBreakpoint *bp)
-{
- DisasContext *dc = container_of(dcbase, DisasContext, base);
-
- cris_evaluate_flags(dc);
- tcg_gen_movi_tl(env_pc, dc->pc);
- t_gen_raise_exception(EXCP_DEBUG);
- dc->base.is_jmp = DISAS_NORETURN;
- /*
- * The address covered by the breakpoint must be included in
- * [tb->pc, tb->pc + tb->size) in order to for it to be
- * properly cleared -- thus we increment the PC here so that
- * the logic setting tb->size below does the right thing.
- */
- dc->pc += 2;
- return true;
-}
-
static void cris_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
{
DisasContext *dc = container_of(dcbase, DisasContext, base);
@@ -3315,7 +3296,6 @@ static const TranslatorOps cris_tr_ops = {
.init_disas_context = cris_tr_init_disas_context,
.tb_start = cris_tr_tb_start,
.insn_start = cris_tr_insn_start,
- .breakpoint_check = cris_tr_breakpoint_check,
.translate_insn = cris_tr_translate_insn,
.tb_stop = cris_tr_tb_stop,
.disas_log = cris_tr_disas_log,
diff --git a/target/hexagon/translate.c b/target/hexagon/translate.c
index b23d36adf5..54fdcaa5e8 100644
--- a/target/hexagon/translate.c
+++ b/target/hexagon/translate.c
@@ -540,22 +540,6 @@ static void hexagon_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
tcg_gen_insn_start(ctx->base.pc_next);
}
-static bool hexagon_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
- const CPUBreakpoint *bp)
-{
- DisasContext *ctx = container_of(dcbase, DisasContext, base);
-
- gen_exception_end_tb(ctx, EXCP_DEBUG);
- /*
- * The address covered by the breakpoint must be included in
- * [tb->pc, tb->pc + tb->size) in order to for it to be
- * properly cleared -- thus we increment the PC here so that
- * the logic setting tb->size below does the right thing.
- */
- ctx->base.pc_next += 4;
- return true;
-}
-
static bool pkt_crosses_page(CPUHexagonState *env, DisasContext *ctx)
{
target_ulong page_start = ctx->base.pc_first & TARGET_PAGE_MASK;
@@ -631,7 +615,6 @@ static const TranslatorOps hexagon_tr_ops = {
.init_disas_context = hexagon_tr_init_disas_context,
.tb_start = hexagon_tr_tb_start,
.insn_start = hexagon_tr_insn_start,
- .breakpoint_check = hexagon_tr_breakpoint_check,
.translate_insn = hexagon_tr_translate_packet,
.tb_stop = hexagon_tr_tb_stop,
.disas_log = hexagon_tr_disas_log,
diff --git a/target/hppa/translate.c b/target/hppa/translate.c
index 2552747138..b18150ef8d 100644
--- a/target/hppa/translate.c
+++ b/target/hppa/translate.c
@@ -4159,16 +4159,6 @@ static void hppa_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
tcg_gen_insn_start(ctx->iaoq_f, ctx->iaoq_b);
}
-static bool hppa_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
- const CPUBreakpoint *bp)
-{
- DisasContext *ctx = container_of(dcbase, DisasContext, base);
-
- gen_excp(ctx, EXCP_DEBUG);
- ctx->base.pc_next += 4;
- return true;
-}
-
static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
{
DisasContext *ctx = container_of(dcbase, DisasContext, base);
@@ -4330,7 +4320,6 @@ static const TranslatorOps hppa_tr_ops = {
.init_disas_context = hppa_tr_init_disas_context,
.tb_start = hppa_tr_tb_start,
.insn_start = hppa_tr_insn_start,
- .breakpoint_check = hppa_tr_breakpoint_check,
.translate_insn = hppa_tr_translate_insn,
.tb_stop = hppa_tr_tb_stop,
.disas_log = hppa_tr_disas_log,
diff --git a/target/i386/tcg/mem_helper.c b/target/i386/tcg/mem_helper.c
index 591f512bff..2da3cd14b6 100644
--- a/target/i386/tcg/mem_helper.c
+++ b/target/i386/tcg/mem_helper.c
@@ -64,22 +64,12 @@ void helper_cmpxchg8b(CPUX86State *env, target_ulong a0)
cmpv = deposit64(env->regs[R_EAX], 32, 32, env->regs[R_EDX]);
newv = deposit64(env->regs[R_EBX], 32, 32, env->regs[R_ECX]);
-#ifdef CONFIG_USER_ONLY
- {
- uint64_t *haddr = g2h(env_cpu(env), a0);
- cmpv = cpu_to_le64(cmpv);
- newv = cpu_to_le64(newv);
- oldv = qatomic_cmpxchg__nocheck(haddr, cmpv, newv);
- oldv = le64_to_cpu(oldv);
- }
-#else
{
uintptr_t ra = GETPC();
int mem_idx = cpu_mmu_index(env, false);
TCGMemOpIdx oi = make_memop_idx(MO_TEQ, mem_idx);
- oldv = helper_atomic_cmpxchgq_le_mmu(env, a0, cmpv, newv, oi, ra);
+ oldv = cpu_atomic_cmpxchgq_le_mmu(env, a0, cmpv, newv, oi, ra);
}
-#endif
if (oldv == cmpv) {
eflags |= CC_Z;
@@ -147,8 +137,7 @@ void helper_cmpxchg16b(CPUX86State *env, target_ulong a0)
int mem_idx = cpu_mmu_index(env, false);
TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
- Int128 oldv = helper_atomic_cmpxchgo_le_mmu(env, a0, cmpv,
- newv, oi, ra);
+ Int128 oldv = cpu_atomic_cmpxchgo_le_mmu(env, a0, cmpv, newv, oi, ra);
if (int128_eq(oldv, cmpv)) {
eflags |= CC_Z;
diff --git a/target/i386/tcg/tcg-cpu.c b/target/i386/tcg/tcg-cpu.c
index e96ec9bbcc..238e3a9395 100644
--- a/target/i386/tcg/tcg-cpu.c
+++ b/target/i386/tcg/tcg-cpu.c
@@ -54,6 +54,17 @@ static void x86_cpu_synchronize_from_tb(CPUState *cs,
cpu->env.eip = tb->pc - tb->cs_base;
}
+#ifndef CONFIG_USER_ONLY
+static bool x86_debug_check_breakpoint(CPUState *cs)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+
+ /* RF disables all architectural breakpoints. */
+ return !(env->eflags & RF_MASK);
+}
+#endif
+
#include "hw/core/tcg-cpu-ops.h"
static const struct TCGCPUOps x86_tcg_ops = {
@@ -66,6 +77,7 @@ static const struct TCGCPUOps x86_tcg_ops = {
.tlb_fill = x86_cpu_tlb_fill,
#ifndef CONFIG_USER_ONLY
.debug_excp_handler = breakpoint_handler,
+ .debug_check_breakpoint = x86_debug_check_breakpoint,
#endif /* !CONFIG_USER_ONLY */
};
diff --git a/target/i386/tcg/translate.c b/target/i386/tcg/translate.c
index 8520d5a1e2..aacb605eee 100644
--- a/target/i386/tcg/translate.c
+++ b/target/i386/tcg/translate.c
@@ -2604,14 +2604,6 @@ static void gen_interrupt(DisasContext *s, int intno,
s->base.is_jmp = DISAS_NORETURN;
}
-static void gen_debug(DisasContext *s)
-{
- gen_update_cc_op(s);
- gen_jmp_im(s, s->base.pc_next - s->cs_base);
- gen_helper_debug(cpu_env);
- s->base.is_jmp = DISAS_NORETURN;
-}
-
static void gen_set_hflag(DisasContext *s, uint32_t mask)
{
if ((s->flags & mask) == 0) {
@@ -8635,25 +8627,6 @@ static void i386_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
tcg_gen_insn_start(dc->base.pc_next, dc->cc_op);
}
-static bool i386_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
- const CPUBreakpoint *bp)
-{
- DisasContext *dc = container_of(dcbase, DisasContext, base);
- /* If RF is set, suppress an internally generated breakpoint. */
- int flags = dc->base.tb->flags & HF_RF_MASK ? BP_GDB : BP_ANY;
- if (bp->flags & flags) {
- gen_debug(dc);
- /* The address covered by the breakpoint must be included in
- [tb->pc, tb->pc + tb->size) in order to for it to be
- properly cleared -- thus we increment the PC here so that
- the generic logic setting tb->size later does the right thing. */
- dc->base.pc_next += 1;
- return true;
- } else {
- return false;
- }
-}
-
static void i386_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
{
DisasContext *dc = container_of(dcbase, DisasContext, base);
@@ -8721,7 +8694,6 @@ static const TranslatorOps i386_tr_ops = {
.init_disas_context = i386_tr_init_disas_context,
.tb_start = i386_tr_tb_start,
.insn_start = i386_tr_insn_start,
- .breakpoint_check = i386_tr_breakpoint_check,
.translate_insn = i386_tr_translate_insn,
.tb_stop = i386_tr_tb_stop,
.disas_log = i386_tr_disas_log,
diff --git a/target/m68k/op_helper.c b/target/m68k/op_helper.c
index ae1ba4b437..d006d1cb3e 100644
--- a/target/m68k/op_helper.c
+++ b/target/m68k/op_helper.c
@@ -22,6 +22,7 @@
#include "exec/exec-all.h"
#include "exec/cpu_ldst.h"
#include "semihosting/semihost.h"
+#include "tcg/tcg.h"
#if defined(CONFIG_USER_ONLY)
@@ -782,9 +783,9 @@ static void do_cas2l(CPUM68KState *env, uint32_t regs, uint32_t a1, uint32_t a2,
uint32_t u2 = env->dregs[Du2];
uint32_t l1, l2;
uintptr_t ra = GETPC();
-#if defined(CONFIG_ATOMIC64) && !defined(CONFIG_USER_ONLY)
+#if defined(CONFIG_ATOMIC64)
int mmu_idx = cpu_mmu_index(env, 0);
- TCGMemOpIdx oi;
+ TCGMemOpIdx oi = make_memop_idx(MO_BEQ, mmu_idx);
#endif
if (parallel) {
@@ -794,23 +795,13 @@ static void do_cas2l(CPUM68KState *env, uint32_t regs, uint32_t a1, uint32_t a2,
if ((a1 & 7) == 0 && a2 == a1 + 4) {
c = deposit64(c2, 32, 32, c1);
u = deposit64(u2, 32, 32, u1);
-#ifdef CONFIG_USER_ONLY
- l = helper_atomic_cmpxchgq_be(env, a1, c, u);
-#else
- oi = make_memop_idx(MO_BEQ, mmu_idx);
- l = helper_atomic_cmpxchgq_be_mmu(env, a1, c, u, oi, ra);
-#endif
+ l = cpu_atomic_cmpxchgq_be_mmu(env, a1, c, u, oi, ra);
l1 = l >> 32;
l2 = l;
} else if ((a2 & 7) == 0 && a1 == a2 + 4) {
c = deposit64(c1, 32, 32, c2);
u = deposit64(u1, 32, 32, u2);
-#ifdef CONFIG_USER_ONLY
- l = helper_atomic_cmpxchgq_be(env, a2, c, u);
-#else
- oi = make_memop_idx(MO_BEQ, mmu_idx);
- l = helper_atomic_cmpxchgq_be_mmu(env, a2, c, u, oi, ra);
-#endif
+ l = cpu_atomic_cmpxchgq_be_mmu(env, a2, c, u, oi, ra);
l2 = l >> 32;
l1 = l;
} else
diff --git a/target/m68k/translate.c b/target/m68k/translate.c
index 1fee04b8dd..c34d9aed61 100644
--- a/target/m68k/translate.c
+++ b/target/m68k/translate.c
@@ -6208,23 +6208,6 @@ static void m68k_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
tcg_gen_insn_start(dc->base.pc_next, dc->cc_op);
}
-static bool m68k_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
- const CPUBreakpoint *bp)
-{
- DisasContext *dc = container_of(dcbase, DisasContext, base);
-
- gen_exception(dc, dc->base.pc_next, EXCP_DEBUG);
- /*
- * The address covered by the breakpoint must be included in
- * [tb->pc, tb->pc + tb->size) in order to for it to be
- * properly cleared -- thus we increment the PC here so that
- * the logic setting tb->size below does the right thing.
- */
- dc->base.pc_next += 2;
-
- return true;
-}
-
static void m68k_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
{
DisasContext *dc = container_of(dcbase, DisasContext, base);
@@ -6310,7 +6293,6 @@ static const TranslatorOps m68k_tr_ops = {
.init_disas_context = m68k_tr_init_disas_context,
.tb_start = m68k_tr_tb_start,
.insn_start = m68k_tr_insn_start,
- .breakpoint_check = m68k_tr_breakpoint_check,
.translate_insn = m68k_tr_translate_insn,
.tb_stop = m68k_tr_tb_stop,
.disas_log = m68k_tr_disas_log,
diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c
index c68a84a219..a14ffed784 100644
--- a/target/microblaze/translate.c
+++ b/target/microblaze/translate.c
@@ -1673,23 +1673,6 @@ static void mb_tr_insn_start(DisasContextBase *dcb, CPUState *cs)
dc->insn_start = tcg_last_op();
}
-static bool mb_tr_breakpoint_check(DisasContextBase *dcb, CPUState *cs,
- const CPUBreakpoint *bp)
-{
- DisasContext *dc = container_of(dcb, DisasContext, base);
-
- gen_raise_exception_sync(dc, EXCP_DEBUG);
-
- /*
- * The address covered by the breakpoint must be included in
- * [tb->pc, tb->pc + tb->size) in order to for it to be
- * properly cleared -- thus we increment the PC here so that
- * the logic setting tb->size below does the right thing.
- */
- dc->base.pc_next += 4;
- return true;
-}
-
static void mb_tr_translate_insn(DisasContextBase *dcb, CPUState *cs)
{
DisasContext *dc = container_of(dcb, DisasContext, base);
@@ -1854,7 +1837,6 @@ static const TranslatorOps mb_tr_ops = {
.init_disas_context = mb_tr_init_disas_context,
.tb_start = mb_tr_tb_start,
.insn_start = mb_tr_insn_start,
- .breakpoint_check = mb_tr_breakpoint_check,
.translate_insn = mb_tr_translate_insn,
.tb_stop = mb_tr_tb_stop,
.disas_log = mb_tr_disas_log,
diff --git a/target/mips/tcg/translate.c b/target/mips/tcg/translate.c
index fd980ea966..5b03545f09 100644
--- a/target/mips/tcg/translate.c
+++ b/target/mips/tcg/translate.c
@@ -16178,24 +16178,6 @@ static void mips_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
ctx->btarget);
}
-static bool mips_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
- const CPUBreakpoint *bp)
-{
- DisasContext *ctx = container_of(dcbase, DisasContext, base);
-
- save_cpu_state(ctx, 1);
- ctx->base.is_jmp = DISAS_NORETURN;
- gen_helper_raise_exception_debug(cpu_env);
- /*
- * The address covered by the breakpoint must be included in
- * [tb->pc, tb->pc + tb->size) in order to for it to be
- * properly cleared -- thus we increment the PC here so that
- * the logic setting tb->size below does the right thing.
- */
- ctx->base.pc_next += 4;
- return true;
-}
-
static void mips_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
{
CPUMIPSState *env = cs->env_ptr;
@@ -16303,7 +16285,6 @@ static const TranslatorOps mips_tr_ops = {
.init_disas_context = mips_tr_init_disas_context,
.tb_start = mips_tr_tb_start,
.insn_start = mips_tr_insn_start,
- .breakpoint_check = mips_tr_breakpoint_check,
.translate_insn = mips_tr_translate_insn,
.tb_stop = mips_tr_tb_stop,
.disas_log = mips_tr_disas_log,
diff --git a/target/nios2/translate.c b/target/nios2/translate.c
index 17742cebc7..08d7ac5398 100644
--- a/target/nios2/translate.c
+++ b/target/nios2/translate.c
@@ -744,16 +744,6 @@ static const char * const regnames[] = {
#include "exec/gen-icount.h"
-static void gen_exception(DisasContext *dc, uint32_t excp)
-{
- TCGv_i32 tmp = tcg_const_i32(excp);
-
- tcg_gen_movi_tl(cpu_R[R_PC], dc->pc);
- gen_helper_raise_exception(cpu_env, tmp);
- tcg_temp_free_i32(tmp);
- dc->base.is_jmp = DISAS_NORETURN;
-}
-
/* generate intermediate code for basic block 'tb'. */
static void nios2_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
{
@@ -777,22 +767,6 @@ static void nios2_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
tcg_gen_insn_start(dcbase->pc_next);
}
-static bool nios2_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
- const CPUBreakpoint *bp)
-{
- DisasContext *dc = container_of(dcbase, DisasContext, base);
-
- gen_exception(dc, EXCP_DEBUG);
- /*
- * The address covered by the breakpoint must be included in
- * [tb->pc, tb->pc + tb->size) in order to for it to be
- * properly cleared -- thus we increment the PC here so that
- * the logic setting tb->size below does the right thing.
- */
- dc->base.pc_next += 4;
- return true;
-}
-
static void nios2_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
{
DisasContext *dc = container_of(dcbase, DisasContext, base);
@@ -870,7 +844,6 @@ static const TranslatorOps nios2_tr_ops = {
.init_disas_context = nios2_tr_init_disas_context,
.tb_start = nios2_tr_tb_start,
.insn_start = nios2_tr_insn_start,
- .breakpoint_check = nios2_tr_breakpoint_check,
.translate_insn = nios2_tr_translate_insn,
.tb_stop = nios2_tr_tb_stop,
.disas_log = nios2_tr_disas_log,
diff --git a/target/openrisc/translate.c b/target/openrisc/translate.c
index 059da48475..d6ea536744 100644
--- a/target/openrisc/translate.c
+++ b/target/openrisc/translate.c
@@ -1609,22 +1609,6 @@ static void openrisc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
| (dc->base.num_insns > 1 ? 2 : 0));
}
-static bool openrisc_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
- const CPUBreakpoint *bp)
-{
- DisasContext *dc = container_of(dcbase, DisasContext, base);
-
- tcg_gen_movi_tl(cpu_pc, dc->base.pc_next);
- gen_exception(dc, EXCP_DEBUG);
- dc->base.is_jmp = DISAS_NORETURN;
- /* The address covered by the breakpoint must be included in
- [tb->pc, tb->pc + tb->size) in order to for it to be
- properly cleared -- thus we increment the PC here so that
- the logic setting tb->size below does the right thing. */
- dc->base.pc_next += 4;
- return true;
-}
-
static void openrisc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
{
DisasContext *dc = container_of(dcbase, DisasContext, base);
@@ -1727,7 +1711,6 @@ static const TranslatorOps openrisc_tr_ops = {
.init_disas_context = openrisc_tr_init_disas_context,
.tb_start = openrisc_tr_tb_start,
.insn_start = openrisc_tr_insn_start,
- .breakpoint_check = openrisc_tr_breakpoint_check,
.translate_insn = openrisc_tr_translate_insn,
.tb_stop = openrisc_tr_tb_stop,
.disas_log = openrisc_tr_disas_log,
diff --git a/target/ppc/mem_helper.c b/target/ppc/mem_helper.c
index 444b2a30ef..e2282baa8d 100644
--- a/target/ppc/mem_helper.c
+++ b/target/ppc/mem_helper.c
@@ -376,7 +376,7 @@ uint64_t helper_lq_le_parallel(CPUPPCState *env, target_ulong addr,
/* We will have raised EXCP_ATOMIC from the translator. */
assert(HAVE_ATOMIC128);
- ret = helper_atomic_ldo_le_mmu(env, addr, opidx, GETPC());
+ ret = cpu_atomic_ldo_le_mmu(env, addr, opidx, GETPC());
env->retxh = int128_gethi(ret);
return int128_getlo(ret);
}
@@ -388,7 +388,7 @@ uint64_t helper_lq_be_parallel(CPUPPCState *env, target_ulong addr,
/* We will have raised EXCP_ATOMIC from the translator. */
assert(HAVE_ATOMIC128);
- ret = helper_atomic_ldo_be_mmu(env, addr, opidx, GETPC());
+ ret = cpu_atomic_ldo_be_mmu(env, addr, opidx, GETPC());
env->retxh = int128_gethi(ret);
return int128_getlo(ret);
}
@@ -401,7 +401,7 @@ void helper_stq_le_parallel(CPUPPCState *env, target_ulong addr,
/* We will have raised EXCP_ATOMIC from the translator. */
assert(HAVE_ATOMIC128);
val = int128_make128(lo, hi);
- helper_atomic_sto_le_mmu(env, addr, val, opidx, GETPC());
+ cpu_atomic_sto_le_mmu(env, addr, val, opidx, GETPC());
}
void helper_stq_be_parallel(CPUPPCState *env, target_ulong addr,
@@ -412,7 +412,7 @@ void helper_stq_be_parallel(CPUPPCState *env, target_ulong addr,
/* We will have raised EXCP_ATOMIC from the translator. */
assert(HAVE_ATOMIC128);
val = int128_make128(lo, hi);
- helper_atomic_sto_be_mmu(env, addr, val, opidx, GETPC());
+ cpu_atomic_sto_be_mmu(env, addr, val, opidx, GETPC());
}
uint32_t helper_stqcx_le_parallel(CPUPPCState *env, target_ulong addr,
@@ -429,8 +429,8 @@ uint32_t helper_stqcx_le_parallel(CPUPPCState *env, target_ulong addr,
cmpv = int128_make128(env->reserve_val2, env->reserve_val);
newv = int128_make128(new_lo, new_hi);
- oldv = helper_atomic_cmpxchgo_le_mmu(env, addr, cmpv, newv,
- opidx, GETPC());
+ oldv = cpu_atomic_cmpxchgo_le_mmu(env, addr, cmpv, newv,
+ opidx, GETPC());
success = int128_eq(oldv, cmpv);
}
env->reserve_addr = -1;
@@ -451,8 +451,8 @@ uint32_t helper_stqcx_be_parallel(CPUPPCState *env, target_ulong addr,
cmpv = int128_make128(env->reserve_val2, env->reserve_val);
newv = int128_make128(new_lo, new_hi);
- oldv = helper_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv,
- opidx, GETPC());
+ oldv = cpu_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv,
+ opidx, GETPC());
success = int128_eq(oldv, cmpv);
}
env->reserve_addr = -1;
diff --git a/target/ppc/translate.c b/target/ppc/translate.c
index 0a55cb7181..171b216e17 100644
--- a/target/ppc/translate.c
+++ b/target/ppc/translate.c
@@ -8565,23 +8565,6 @@ static void ppc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
tcg_gen_insn_start(dcbase->pc_next);
}
-static bool ppc_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
- const CPUBreakpoint *bp)
-{
- DisasContext *ctx = container_of(dcbase, DisasContext, base);
-
- gen_update_nip(ctx, ctx->base.pc_next);
- gen_debug_exception(ctx);
- /*
- * The address covered by the breakpoint must be included in
- * [tb->pc, tb->pc + tb->size) in order to for it to be properly
- * cleared -- thus we increment the PC here so that the logic
- * setting tb->size below does the right thing.
- */
- ctx->base.pc_next += 4;
- return true;
-}
-
static bool is_prefix_insn(DisasContext *ctx, uint32_t insn)
{
REQUIRE_INSNS_FLAGS2(ctx, ISA310);
@@ -8710,7 +8693,6 @@ static const TranslatorOps ppc_tr_ops = {
.init_disas_context = ppc_tr_init_disas_context,
.tb_start = ppc_tr_tb_start,
.insn_start = ppc_tr_insn_start,
- .breakpoint_check = ppc_tr_breakpoint_check,
.translate_insn = ppc_tr_translate_insn,
.tb_stop = ppc_tr_tb_stop,
.disas_log = ppc_tr_disas_log,
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
index deda0c8a44..6983be5723 100644
--- a/target/riscv/translate.c
+++ b/target/riscv/translate.c
@@ -961,22 +961,6 @@ static void riscv_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
tcg_gen_insn_start(ctx->base.pc_next);
}
-static bool riscv_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
- const CPUBreakpoint *bp)
-{
- DisasContext *ctx = container_of(dcbase, DisasContext, base);
-
- tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next);
- ctx->base.is_jmp = DISAS_NORETURN;
- gen_exception_debug();
- /* The address covered by the breakpoint must be included in
- [tb->pc, tb->pc + tb->size) in order to for it to be
- properly cleared -- thus we increment the PC here so that
- the logic setting tb->size below does the right thing. */
- ctx->base.pc_next += 4;
- return true;
-}
-
static void riscv_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
{
DisasContext *ctx = container_of(dcbase, DisasContext, base);
@@ -1029,7 +1013,6 @@ static const TranslatorOps riscv_tr_ops = {
.init_disas_context = riscv_tr_init_disas_context,
.tb_start = riscv_tr_tb_start,
.insn_start = riscv_tr_insn_start,
- .breakpoint_check = riscv_tr_breakpoint_check,
.translate_insn = riscv_tr_translate_insn,
.tb_stop = riscv_tr_tb_stop,
.disas_log = riscv_tr_disas_log,
diff --git a/target/rx/translate.c b/target/rx/translate.c
index 23a626438a..a3cf720455 100644
--- a/target/rx/translate.c
+++ b/target/rx/translate.c
@@ -2309,19 +2309,6 @@ static void rx_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
tcg_gen_insn_start(ctx->base.pc_next);
}
-static bool rx_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
- const CPUBreakpoint *bp)
-{
- DisasContext *ctx = container_of(dcbase, DisasContext, base);
-
- /* We have hit a breakpoint - make sure PC is up-to-date */
- tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next);
- gen_helper_debug(cpu_env);
- ctx->base.is_jmp = DISAS_NORETURN;
- ctx->base.pc_next += 1;
- return true;
-}
-
static void rx_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
{
DisasContext *ctx = container_of(dcbase, DisasContext, base);
@@ -2373,7 +2360,6 @@ static const TranslatorOps rx_tr_ops = {
.init_disas_context = rx_tr_init_disas_context,
.tb_start = rx_tr_tb_start,
.insn_start = rx_tr_insn_start,
- .breakpoint_check = rx_tr_breakpoint_check,
.translate_insn = rx_tr_translate_insn,
.tb_stop = rx_tr_tb_stop,
.disas_log = rx_tr_disas_log,
diff --git a/target/s390x/tcg/mem_helper.c b/target/s390x/tcg/mem_helper.c
index 9bae13ecf0..21a4de4067 100644
--- a/target/s390x/tcg/mem_helper.c
+++ b/target/s390x/tcg/mem_helper.c
@@ -1811,7 +1811,7 @@ void HELPER(cdsg_parallel)(CPUS390XState *env, uint64_t addr,
mem_idx = cpu_mmu_index(env, false);
oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
- oldv = helper_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv, oi, ra);
+ oldv = cpu_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv, oi, ra);
fail = !int128_eq(oldv, cmpv);
env->cc_op = fail;
@@ -1884,7 +1884,7 @@ static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1,
ov = qatomic_cmpxchg__nocheck(haddr, cv, nv);
#else
TCGMemOpIdx oi = make_memop_idx(MO_TEUL | MO_ALIGN, mem_idx);
- ov = helper_atomic_cmpxchgl_be_mmu(env, a1, cv, nv, oi, ra);
+ ov = cpu_atomic_cmpxchgl_be_mmu(env, a1, cv, nv, oi, ra);
#endif
} else {
ov = cpu_ldl_data_ra(env, a1, ra);
@@ -1903,13 +1903,8 @@ static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1,
if (parallel) {
#ifdef CONFIG_ATOMIC64
-# ifdef CONFIG_USER_ONLY
- uint64_t *haddr = g2h(env_cpu(env), a1);
- ov = qatomic_cmpxchg__nocheck(haddr, cv, nv);
-# else
TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN, mem_idx);
- ov = helper_atomic_cmpxchgq_be_mmu(env, a1, cv, nv, oi, ra);
-# endif
+ ov = cpu_atomic_cmpxchgq_be_mmu(env, a1, cv, nv, oi, ra);
#else
/* Note that we asserted !parallel above. */
g_assert_not_reached();
@@ -1945,7 +1940,7 @@ static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1,
cpu_stq_data_ra(env, a1 + 8, int128_getlo(nv), ra);
} else if (HAVE_CMPXCHG128) {
TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
- ov = helper_atomic_cmpxchgo_be_mmu(env, a1, cv, nv, oi, ra);
+ ov = cpu_atomic_cmpxchgo_be_mmu(env, a1, cv, nv, oi, ra);
cc = !int128_eq(ov, cv);
} else {
/* Note that we asserted !parallel above. */
@@ -1985,7 +1980,7 @@ static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1,
} else if (HAVE_ATOMIC128) {
TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
Int128 sv = int128_make128(svl, svh);
- helper_atomic_sto_be_mmu(env, a2, sv, oi, ra);
+ cpu_atomic_sto_be_mmu(env, a2, sv, oi, ra);
} else {
/* Note that we asserted !parallel above. */
g_assert_not_reached();
@@ -2486,7 +2481,7 @@ uint64_t HELPER(lpq_parallel)(CPUS390XState *env, uint64_t addr)
mem_idx = cpu_mmu_index(env, false);
oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
- v = helper_atomic_ldo_be_mmu(env, addr, oi, ra);
+ v = cpu_atomic_ldo_be_mmu(env, addr, oi, ra);
hi = int128_gethi(v);
lo = int128_getlo(v);
@@ -2518,7 +2513,7 @@ void HELPER(stpq_parallel)(CPUS390XState *env, uint64_t addr,
mem_idx = cpu_mmu_index(env, false);
oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
v = int128_make128(low, high);
- helper_atomic_sto_be_mmu(env, addr, v, oi, ra);
+ cpu_atomic_sto_be_mmu(env, addr, v, oi, ra);
}
/* Execute instruction. This instruction executes an insn modified with
diff --git a/target/s390x/tcg/translate.c b/target/s390x/tcg/translate.c
index 92fa7656c2..0632b0374b 100644
--- a/target/s390x/tcg/translate.c
+++ b/target/s390x/tcg/translate.c
@@ -6552,29 +6552,6 @@ static void s390x_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
{
}
-static bool s390x_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
- const CPUBreakpoint *bp)
-{
- DisasContext *dc = container_of(dcbase, DisasContext, base);
-
- /*
- * Emit an insn_start to accompany the breakpoint exception.
- * The ILEN value is a dummy, since this does not result in
- * an s390x exception, but an internal qemu exception which
- * brings us back to interact with the gdbstub.
- */
- tcg_gen_insn_start(dc->base.pc_next, dc->cc_op, 2);
-
- dc->base.is_jmp = DISAS_PC_STALE;
- dc->do_debug = true;
- /* The address covered by the breakpoint must be included in
- [tb->pc, tb->pc + tb->size) in order to for it to be
- properly cleared -- thus we increment the PC here so that
- the logic setting tb->size does the right thing. */
- dc->base.pc_next += 2;
- return true;
-}
-
static void s390x_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
{
CPUS390XState *env = cs->env_ptr;
@@ -6642,7 +6619,6 @@ static const TranslatorOps s390x_tr_ops = {
.init_disas_context = s390x_tr_init_disas_context,
.tb_start = s390x_tr_tb_start,
.insn_start = s390x_tr_insn_start,
- .breakpoint_check = s390x_tr_breakpoint_check,
.translate_insn = s390x_tr_translate_insn,
.tb_stop = s390x_tr_tb_stop,
.disas_log = s390x_tr_disas_log,
diff --git a/target/sh4/translate.c b/target/sh4/translate.c
index 40898e2393..8704fea1ca 100644
--- a/target/sh4/translate.c
+++ b/target/sh4/translate.c
@@ -2289,23 +2289,6 @@ static void sh4_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
tcg_gen_insn_start(ctx->base.pc_next, ctx->envflags);
}
-static bool sh4_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
- const CPUBreakpoint *bp)
-{
- DisasContext *ctx = container_of(dcbase, DisasContext, base);
-
- /* We have hit a breakpoint - make sure PC is up-to-date */
- gen_save_cpu_state(ctx, true);
- gen_helper_debug(cpu_env);
- ctx->base.is_jmp = DISAS_NORETURN;
- /* The address covered by the breakpoint must be included in
- [tb->pc, tb->pc + tb->size) in order to for it to be
- properly cleared -- thus we increment the PC here so that
- the logic setting tb->size below does the right thing. */
- ctx->base.pc_next += 2;
- return true;
-}
-
static void sh4_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
{
CPUSH4State *env = cs->env_ptr;
@@ -2369,7 +2352,6 @@ static const TranslatorOps sh4_tr_ops = {
.init_disas_context = sh4_tr_init_disas_context,
.tb_start = sh4_tr_tb_start,
.insn_start = sh4_tr_insn_start,
- .breakpoint_check = sh4_tr_breakpoint_check,
.translate_insn = sh4_tr_translate_insn,
.tb_stop = sh4_tr_tb_stop,
.disas_log = sh4_tr_disas_log,
diff --git a/target/sparc/translate.c b/target/sparc/translate.c
index e530cb4aa8..11de5a4963 100644
--- a/target/sparc/translate.c
+++ b/target/sparc/translate.c
@@ -5854,22 +5854,6 @@ static void sparc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
}
}
-static bool sparc_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
- const CPUBreakpoint *bp)
-{
- DisasContext *dc = container_of(dcbase, DisasContext, base);
-
- if (dc->pc != dc->base.pc_first) {
- save_state(dc);
- }
- gen_helper_debug(cpu_env);
- tcg_gen_exit_tb(NULL, 0);
- dc->base.is_jmp = DISAS_NORETURN;
- /* update pc_next so that the current instruction is included in tb->size */
- dc->base.pc_next += 4;
- return true;
-}
-
static void sparc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
{
DisasContext *dc = container_of(dcbase, DisasContext, base);
@@ -5932,7 +5916,6 @@ static const TranslatorOps sparc_tr_ops = {
.init_disas_context = sparc_tr_init_disas_context,
.tb_start = sparc_tr_tb_start,
.insn_start = sparc_tr_insn_start,
- .breakpoint_check = sparc_tr_breakpoint_check,
.translate_insn = sparc_tr_translate_insn,
.tb_stop = sparc_tr_tb_stop,
.disas_log = sparc_tr_disas_log,
diff --git a/target/tricore/translate.c b/target/tricore/translate.c
index 865020754d..a0cc0f1cb3 100644
--- a/target/tricore/translate.c
+++ b/target/tricore/translate.c
@@ -8810,21 +8810,6 @@ static void tricore_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
tcg_gen_insn_start(ctx->base.pc_next);
}
-static bool tricore_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
- const CPUBreakpoint *bp)
-{
- DisasContext *ctx = container_of(dcbase, DisasContext, base);
- generate_qemu_excp(ctx, EXCP_DEBUG);
- /*
- * The address covered by the breakpoint must be included in
- * [tb->pc, tb->pc + tb->size) in order to for it to be
- * properly cleared -- thus we increment the PC here so that
- * the logic setting tb->size below does the right thing.
- */
- ctx->base.pc_next += 4;
- return true;
-}
-
static bool insn_crosses_page(CPUTriCoreState *env, DisasContext *ctx)
{
/*
@@ -8898,7 +8883,6 @@ static const TranslatorOps tricore_tr_ops = {
.init_disas_context = tricore_tr_init_disas_context,
.tb_start = tricore_tr_tb_start,
.insn_start = tricore_tr_insn_start,
- .breakpoint_check = tricore_tr_breakpoint_check,
.translate_insn = tricore_tr_translate_insn,
.tb_stop = tricore_tr_tb_stop,
.disas_log = tricore_tr_disas_log,
diff --git a/target/xtensa/translate.c b/target/xtensa/translate.c
index 7094cfcf1d..20399d6a04 100644
--- a/target/xtensa/translate.c
+++ b/target/xtensa/translate.c
@@ -1232,22 +1232,6 @@ static void xtensa_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
tcg_gen_insn_start(dcbase->pc_next);
}
-static bool xtensa_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
- const CPUBreakpoint *bp)
-{
- DisasContext *dc = container_of(dcbase, DisasContext, base);
-
- tcg_gen_movi_i32(cpu_pc, dc->base.pc_next);
- gen_exception(dc, EXCP_DEBUG);
- dc->base.is_jmp = DISAS_NORETURN;
- /* The address covered by the breakpoint must be included in
- [tb->pc, tb->pc + tb->size) in order to for it to be
- properly cleared -- thus we increment the PC here so that
- the logic setting tb->size below does the right thing. */
- dc->base.pc_next += 2;
- return true;
-}
-
static void xtensa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
{
DisasContext *dc = container_of(dcbase, DisasContext, base);
@@ -1330,7 +1314,6 @@ static const TranslatorOps xtensa_translator_ops = {
.init_disas_context = xtensa_tr_init_disas_context,
.tb_start = xtensa_tr_tb_start,
.insn_start = xtensa_tr_insn_start,
- .breakpoint_check = xtensa_tr_breakpoint_check,
.translate_insn = xtensa_tr_translate_insn,
.tb_stop = xtensa_tr_tb_stop,
.disas_log = xtensa_tr_disas_log,
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
index 0c561fb253..c754396575 100644
--- a/tcg/tcg-op.c
+++ b/tcg/tcg-op.c
@@ -2723,10 +2723,6 @@ void tcg_gen_exit_tb(const TranslationBlock *tb, unsigned idx)
seen this numbered exit before, via tcg_gen_goto_tb. */
tcg_debug_assert(tcg_ctx->goto_tb_issue_mask & (1 << idx));
#endif
- /* When not chaining, exit without indicating a link. */
- if (qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {
- val = 0;
- }
} else {
/* This is an exit via the exitreq label. */
tcg_debug_assert(idx == TB_EXIT_REQUESTED);
@@ -2738,6 +2734,8 @@ void tcg_gen_exit_tb(const TranslationBlock *tb, unsigned idx)
void tcg_gen_goto_tb(unsigned idx)
{
+ /* We tested CF_NO_GOTO_TB in translator_use_goto_tb. */
+ tcg_debug_assert(!(tcg_ctx->tb_cflags & CF_NO_GOTO_TB));
/* We only support two chained exits. */
tcg_debug_assert(idx <= TB_EXIT_IDXMAX);
#ifdef CONFIG_DEBUG_TCG
@@ -2746,25 +2744,23 @@ void tcg_gen_goto_tb(unsigned idx)
tcg_ctx->goto_tb_issue_mask |= 1 << idx;
#endif
plugin_gen_disable_mem_helpers();
- /* When not chaining, we simply fall through to the "fallback" exit. */
- if (!qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {
- tcg_gen_op1i(INDEX_op_goto_tb, idx);
- }
+ tcg_gen_op1i(INDEX_op_goto_tb, idx);
}
void tcg_gen_lookup_and_goto_ptr(void)
{
- if (!qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {
- TCGv_ptr ptr;
+ TCGv_ptr ptr;
- plugin_gen_disable_mem_helpers();
- ptr = tcg_temp_new_ptr();
- gen_helper_lookup_tb_ptr(ptr, cpu_env);
- tcg_gen_op1i(INDEX_op_goto_ptr, tcgv_ptr_arg(ptr));
- tcg_temp_free_ptr(ptr);
- } else {
+ if (tcg_ctx->tb_cflags & CF_NO_GOTO_PTR) {
tcg_gen_exit_tb(NULL, 0);
+ return;
}
+
+ plugin_gen_disable_mem_helpers();
+ ptr = tcg_temp_new_ptr();
+ gen_helper_lookup_tb_ptr(ptr, cpu_env);
+ tcg_gen_op1i(INDEX_op_goto_ptr, tcgv_ptr_arg(ptr));
+ tcg_temp_free_ptr(ptr);
}
static inline MemOp tcg_canonicalize_memop(MemOp op, bool is64, bool st)
@@ -3084,7 +3080,6 @@ static void tcg_gen_ext_i64(TCGv_i64 ret, TCGv_i64 val, MemOp opc)
}
}
-#ifdef CONFIG_SOFTMMU
typedef void (*gen_atomic_cx_i32)(TCGv_i32, TCGv_env, TCGv,
TCGv_i32, TCGv_i32, TCGv_i32);
typedef void (*gen_atomic_cx_i64)(TCGv_i64, TCGv_env, TCGv,
@@ -3093,12 +3088,6 @@ typedef void (*gen_atomic_op_i32)(TCGv_i32, TCGv_env, TCGv,
TCGv_i32, TCGv_i32);
typedef void (*gen_atomic_op_i64)(TCGv_i64, TCGv_env, TCGv,
TCGv_i64, TCGv_i32);
-#else
-typedef void (*gen_atomic_cx_i32)(TCGv_i32, TCGv_env, TCGv, TCGv_i32, TCGv_i32);
-typedef void (*gen_atomic_cx_i64)(TCGv_i64, TCGv_env, TCGv, TCGv_i64, TCGv_i64);
-typedef void (*gen_atomic_op_i32)(TCGv_i32, TCGv_env, TCGv, TCGv_i32);
-typedef void (*gen_atomic_op_i64)(TCGv_i64, TCGv_env, TCGv, TCGv_i64);
-#endif
#ifdef CONFIG_ATOMIC64
# define WITH_ATOMIC64(X) X,
@@ -3140,18 +3129,13 @@ void tcg_gen_atomic_cmpxchg_i32(TCGv_i32 retv, TCGv addr, TCGv_i32 cmpv,
tcg_temp_free_i32(t1);
} else {
gen_atomic_cx_i32 gen;
+ TCGMemOpIdx oi;
gen = table_cmpxchg[memop & (MO_SIZE | MO_BSWAP)];
tcg_debug_assert(gen != NULL);
-#ifdef CONFIG_SOFTMMU
- {
- TCGMemOpIdx oi = make_memop_idx(memop & ~MO_SIGN, idx);
- gen(retv, cpu_env, addr, cmpv, newv, tcg_constant_i32(oi));
- }
-#else
- gen(retv, cpu_env, addr, cmpv, newv);
-#endif
+ oi = make_memop_idx(memop & ~MO_SIGN, idx);
+ gen(retv, cpu_env, addr, cmpv, newv, tcg_constant_i32(oi));
if (memop & MO_SIGN) {
tcg_gen_ext_i32(retv, retv, memop);
@@ -3184,18 +3168,13 @@ void tcg_gen_atomic_cmpxchg_i64(TCGv_i64 retv, TCGv addr, TCGv_i64 cmpv,
} else if ((memop & MO_SIZE) == MO_64) {
#ifdef CONFIG_ATOMIC64
gen_atomic_cx_i64 gen;
+ TCGMemOpIdx oi;
gen = table_cmpxchg[memop & (MO_SIZE | MO_BSWAP)];
tcg_debug_assert(gen != NULL);
-#ifdef CONFIG_SOFTMMU
- {
- TCGMemOpIdx oi = make_memop_idx(memop, idx);
- gen(retv, cpu_env, addr, cmpv, newv, tcg_constant_i32(oi));
- }
-#else
- gen(retv, cpu_env, addr, cmpv, newv);
-#endif
+ oi = make_memop_idx(memop, idx);
+ gen(retv, cpu_env, addr, cmpv, newv, tcg_constant_i32(oi));
#else
gen_helper_exit_atomic(cpu_env);
/* Produce a result, so that we have a well-formed opcode stream
@@ -3245,20 +3224,15 @@ static void do_atomic_op_i32(TCGv_i32 ret, TCGv addr, TCGv_i32 val,
TCGArg idx, MemOp memop, void * const table[])
{
gen_atomic_op_i32 gen;
+ TCGMemOpIdx oi;
memop = tcg_canonicalize_memop(memop, 0, 0);
gen = table[memop & (MO_SIZE | MO_BSWAP)];
tcg_debug_assert(gen != NULL);
-#ifdef CONFIG_SOFTMMU
- {
- TCGMemOpIdx oi = make_memop_idx(memop & ~MO_SIGN, idx);
- gen(ret, cpu_env, addr, val, tcg_constant_i32(oi));
- }
-#else
- gen(ret, cpu_env, addr, val);
-#endif
+ oi = make_memop_idx(memop & ~MO_SIGN, idx);
+ gen(ret, cpu_env, addr, val, tcg_constant_i32(oi));
if (memop & MO_SIGN) {
tcg_gen_ext_i32(ret, ret, memop);
@@ -3292,18 +3266,13 @@ static void do_atomic_op_i64(TCGv_i64 ret, TCGv addr, TCGv_i64 val,
if ((memop & MO_SIZE) == MO_64) {
#ifdef CONFIG_ATOMIC64
gen_atomic_op_i64 gen;
+ TCGMemOpIdx oi;
gen = table[memop & (MO_SIZE | MO_BSWAP)];
tcg_debug_assert(gen != NULL);
-#ifdef CONFIG_SOFTMMU
- {
- TCGMemOpIdx oi = make_memop_idx(memop & ~MO_SIGN, idx);
- gen(ret, cpu_env, addr, val, tcg_constant_i32(oi));
- }
-#else
- gen(ret, cpu_env, addr, val);
-#endif
+ oi = make_memop_idx(memop & ~MO_SIGN, idx);
+ gen(ret, cpu_env, addr, val, tcg_constant_i32(oi));
#else
gen_helper_exit_atomic(cpu_env);
/* Produce a result, so that we have a well-formed opcode stream
diff --git a/tests/acceptance/virtio-gpu.py b/tests/acceptance/virtio-gpu.py
index 589332c1b7..4acc1e6d5f 100644
--- a/tests/acceptance/virtio-gpu.py
+++ b/tests/acceptance/virtio-gpu.py
@@ -17,10 +17,6 @@ import socket
import subprocess
-ACCEL_NOT_AVAILABLE_FMT = "%s accelerator does not seem to be available"
-KVM_NOT_AVAILABLE = ACCEL_NOT_AVAILABLE_FMT % "KVM"
-
-
def pick_default_vug_bin():
relative_path = "./contrib/vhost-user-gpu/vhost-user-gpu"
if is_readable_executable_file(relative_path):
@@ -34,19 +30,23 @@ def pick_default_vug_bin():
class VirtioGPUx86(Test):
"""
:avocado: tags=virtio-gpu
+ :avocado: tags=arch:x86_64
+ :avocado: tags=cpu:host
"""
- KERNEL_COMMON_COMMAND_LINE = "printk.time=0 "
+ KERNEL_COMMAND_LINE = "printk.time=0 console=ttyS0 rdinit=/bin/bash"
KERNEL_URL = (
"https://archives.fedoraproject.org/pub/fedora"
"/linux/releases/33/Everything/x86_64/os/images"
"/pxeboot/vmlinuz"
)
+ KERNEL_HASH = '1433cfe3f2ffaa44de4ecfb57ec25dc2399cdecf'
INITRD_URL = (
"https://archives.fedoraproject.org/pub/fedora"
"/linux/releases/33/Everything/x86_64/os/images"
"/pxeboot/initrd.img"
)
+ INITRD_HASH = 'c828d68a027b53e5220536585efe03412332c2d9'
def wait_for_console_pattern(self, success_message, vm=None):
wait_for_console_pattern(
@@ -58,24 +58,18 @@ class VirtioGPUx86(Test):
def test_virtio_vga_virgl(self):
"""
- :avocado: tags=arch:x86_64
- :avocado: tags=device:virtio-vga
- :avocado: tags=cpu:host
+ :avocado: tags=device:virtio-vga-gl
"""
- kernel_command_line = (
- self.KERNEL_COMMON_COMMAND_LINE + "console=ttyS0 rdinit=/bin/bash"
- )
# FIXME: should check presence of virtio, virgl etc
- if not kvm_available(self.arch, self.qemu_bin):
- self.cancel(KVM_NOT_AVAILABLE)
+ self.require_accelerator('kvm')
- kernel_path = self.fetch_asset(self.KERNEL_URL)
- initrd_path = self.fetch_asset(self.INITRD_URL)
+ kernel_path = self.fetch_asset(self.KERNEL_URL, self.KERNEL_HASH)
+ initrd_path = self.fetch_asset(self.INITRD_URL, self.INITRD_HASH)
self.vm.set_console()
self.vm.add_args("-m", "2G")
self.vm.add_args("-machine", "pc,accel=kvm")
- self.vm.add_args("-device", "virtio-vga,virgl=on")
+ self.vm.add_args("-device", "virtio-vga-gl")
self.vm.add_args("-display", "egl-headless")
self.vm.add_args(
"-kernel",
@@ -83,7 +77,7 @@ class VirtioGPUx86(Test):
"-initrd",
initrd_path,
"-append",
- kernel_command_line,
+ self.KERNEL_COMMAND_LINE,
)
try:
self.vm.launch()
@@ -99,23 +93,17 @@ class VirtioGPUx86(Test):
def test_vhost_user_vga_virgl(self):
"""
- :avocado: tags=arch:x86_64
:avocado: tags=device:vhost-user-vga
- :avocado: tags=cpu:host
"""
- kernel_command_line = (
- self.KERNEL_COMMON_COMMAND_LINE + "console=ttyS0 rdinit=/bin/bash"
- )
# FIXME: should check presence of vhost-user-gpu, virgl, memfd etc
- if not kvm_available(self.arch, self.qemu_bin):
- self.cancel(KVM_NOT_AVAILABLE)
+ self.require_accelerator('kvm')
vug = pick_default_vug_bin()
if not vug:
self.cancel("Could not find vhost-user-gpu")
- kernel_path = self.fetch_asset(self.KERNEL_URL)
- initrd_path = self.fetch_asset(self.INITRD_URL)
+ kernel_path = self.fetch_asset(self.KERNEL_URL, self.KERNEL_HASH)
+ initrd_path = self.fetch_asset(self.INITRD_URL, self.INITRD_HASH)
# Create socketpair to connect proxy and remote processes
qemu_sock, vug_sock = socket.socketpair(
@@ -153,7 +141,7 @@ class VirtioGPUx86(Test):
"-initrd",
initrd_path,
"-append",
- kernel_command_line,
+ self.KERNEL_COMMAND_LINE,
)
self.vm.launch()
self.wait_for_console_pattern("as init process")
diff --git a/tests/qemu-iotests/151 b/tests/qemu-iotests/151
index 182f6b5321..93d14193d0 100755
--- a/tests/qemu-iotests/151
+++ b/tests/qemu-iotests/151
@@ -38,8 +38,9 @@ class TestActiveMirror(iotests.QMPTestCase):
'if': 'none',
'node-name': 'source-node',
'driver': iotests.imgfmt,
- 'file': {'driver': 'file',
- 'filename': source_img}}
+ 'file': {'driver': 'blkdebug',
+ 'image': {'driver': 'file',
+ 'filename': source_img}}}
blk_target = {'node-name': 'target-node',
'driver': iotests.imgfmt,
@@ -141,6 +142,55 @@ class TestActiveMirror(iotests.QMPTestCase):
self.potential_writes_in_flight = False
+ def testIntersectingActiveIO(self):
+ # Fill the source image
+ result = self.vm.hmp_qemu_io('source', 'write -P 1 0 2M')
+
+ # Start the block job (very slowly)
+ result = self.vm.qmp('blockdev-mirror',
+ job_id='mirror',
+ filter_node_name='mirror-node',
+ device='source-node',
+ target='target-node',
+ sync='full',
+ copy_mode='write-blocking',
+ speed=1)
+
+ self.vm.hmp_qemu_io('source', 'break write_aio A')
+ self.vm.hmp_qemu_io('source', 'aio_write 0 1M') # 1
+ self.vm.hmp_qemu_io('source', 'wait_break A')
+ self.vm.hmp_qemu_io('source', 'aio_write 0 2M') # 2
+ self.vm.hmp_qemu_io('source', 'aio_write 0 2M') # 3
+
+ # Now 2 and 3 are in mirror_wait_on_conflicts, waiting for 1
+
+ self.vm.hmp_qemu_io('source', 'break write_aio B')
+ self.vm.hmp_qemu_io('source', 'aio_write 1M 2M') # 4
+ self.vm.hmp_qemu_io('source', 'wait_break B')
+
+ # 4 doesn't wait for 2 and 3, because they didn't yet set
+ # in_flight_bitmap. So, nothing prevents 4 to go except for our
+ # break-point B.
+
+ self.vm.hmp_qemu_io('source', 'resume A')
+
+ # Now we resumed 1, so 2 and 3 goes to the next iteration of while loop
+ # in mirror_wait_on_conflicts(). They don't exit, as bitmap is dirty
+ # due to request 4.
+ # In the past at that point 2 and 3 would wait for each other producing
+ # a dead-lock. Now this is fixed and they will wait for request 4.
+
+ self.vm.hmp_qemu_io('source', 'resume B')
+
+ # After resuming 4, one of 2 and 3 goes first and set in_flight_bitmap,
+ # so the other will wait for it.
+
+ result = self.vm.qmp('block-job-set-speed', device='mirror', speed=0)
+ self.assert_qmp(result, 'return', {})
+ self.complete_and_wait(drive='mirror')
+
+ self.potential_writes_in_flight = False
+
if __name__ == '__main__':
iotests.main(supported_fmts=['qcow2', 'raw'],
diff --git a/tests/qemu-iotests/151.out b/tests/qemu-iotests/151.out
index 8d7e996700..89968f35d7 100644
--- a/tests/qemu-iotests/151.out
+++ b/tests/qemu-iotests/151.out
@@ -1,5 +1,5 @@
-...
+....
----------------------------------------------------------------------
-Ran 3 tests
+Ran 4 tests
OK
diff --git a/tests/qemu-iotests/307 b/tests/qemu-iotests/307
index c7685347bc..b429b5aa50 100755
--- a/tests/qemu-iotests/307
+++ b/tests/qemu-iotests/307
@@ -41,9 +41,11 @@ with iotests.FilePath('image') as img, \
iotests.log('=== Launch VM ===')
vm.add_object('iothread,id=iothread0')
+ vm.add_object('iothread,id=iothread1')
vm.add_blockdev(f'file,filename={img},node-name=file')
vm.add_blockdev(f'{iotests.imgfmt},file=file,node-name=fmt')
vm.add_blockdev('raw,file=file,node-name=ro,read-only=on')
+ vm.add_blockdev('null-co,node-name=null')
vm.add_device(f'id=scsi0,driver=virtio-scsi,iothread=iothread0')
vm.launch()
@@ -74,6 +76,19 @@ with iotests.FilePath('image') as img, \
vm.qmp_log('query-block-exports')
iotests.qemu_nbd_list_log('-k', socket)
+ iotests.log('\n=== Add export with conflicting iothread ===')
+
+ vm.qmp_log('device_add', id='sdb', driver='scsi-hd', drive='null')
+
+ # Should fail because of fixed-iothread
+ vm.qmp_log('block-export-add', id='export1', type='nbd', node_name='null',
+ iothread='iothread1', fixed_iothread=True, writable=True)
+
+ # Should ignore the iothread conflict, but then fail because of the
+ # permission conflict (and not crash)
+ vm.qmp_log('block-export-add', id='export1', type='nbd', node_name='null',
+ iothread='iothread1', fixed_iothread=False, writable=True)
+
iotests.log('\n=== Add a writable export ===')
# This fails because share-rw=off
diff --git a/tests/qemu-iotests/307.out b/tests/qemu-iotests/307.out
index 4b0c7e155a..ec8d2be0e0 100644
--- a/tests/qemu-iotests/307.out
+++ b/tests/qemu-iotests/307.out
@@ -51,6 +51,14 @@ exports available: 1
base:allocation
+=== Add export with conflicting iothread ===
+{"execute": "device_add", "arguments": {"drive": "null", "driver": "scsi-hd", "id": "sdb"}}
+{"return": {}}
+{"execute": "block-export-add", "arguments": {"fixed-iothread": true, "id": "export1", "iothread": "iothread1", "node-name": "null", "type": "nbd", "writable": true}}
+{"error": {"class": "GenericError", "desc": "Cannot change iothread of active block backend"}}
+{"execute": "block-export-add", "arguments": {"fixed-iothread": false, "id": "export1", "iothread": "iothread1", "node-name": "null", "type": "nbd", "writable": true}}
+{"error": {"class": "GenericError", "desc": "Permission conflict on node 'null': permissions 'write' are both required by an unnamed block device (uses node 'null' as 'root' child) and unshared by block device 'sdb' (uses node 'null' as 'root' child)."}}
+
=== Add a writable export ===
{"execute": "block-export-add", "arguments": {"description": "This is the writable second export", "id": "export1", "name": "export1", "node-name": "fmt", "type": "nbd", "writable": true, "writethrough": true}}
{"error": {"class": "GenericError", "desc": "Permission conflict on node 'fmt': permissions 'write' are both required by an unnamed block device (uses node 'fmt' as 'root' child) and unshared by block device 'sda' (uses node 'fmt' as 'root' child)."}}
diff --git a/tests/qemu-iotests/291 b/tests/qemu-iotests/tests/qemu-img-bitmaps
index 20efb080a6..7a3fe8c3d3 100755
--- a/tests/qemu-iotests/291
+++ b/tests/qemu-iotests/tests/qemu-img-bitmaps
@@ -3,7 +3,7 @@
#
# Test qemu-img bitmap handling
#
-# Copyright (C) 2018-2020 Red Hat, Inc.
+# Copyright (C) 2018-2021 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -27,11 +27,13 @@ status=1 # failure is the default!
_cleanup()
{
_cleanup_test_img
+ _rm_test_img "$TEST_IMG.copy"
nbd_server_stop
}
trap "_cleanup; exit \$status" 0 1 2 3 15
# get standard environment, filters and checks
+cd ..
. ./common.rc
. ./common.filter
. ./common.nbd
@@ -129,6 +131,36 @@ $QEMU_IMG map --output=json --image-opts \
nbd_server_stop
+echo
+echo "=== Check handling of inconsistent bitmap ==="
+echo
+
+# Prepare image with corrupted bitmap
+$QEMU_IO -c abort "$TEST_IMG" 2>/dev/null
+$QEMU_IMG bitmap --add "$TEST_IMG" b4
+$QEMU_IMG bitmap --remove "$TEST_IMG" b1
+_img_info --format-specific | _filter_irrelevant_img_info
+# Proof that we fail fast if bitmaps can't be copied
+echo
+$QEMU_IMG convert --bitmaps -O qcow2 "$TEST_IMG" "$TEST_IMG.copy" &&
+ echo "unexpected success"
+TEST_IMG="$TEST_IMG.copy" _img_info --format-specific \
+ | _filter_irrelevant_img_info
+# Skipping the broken bitmaps works,...
+echo
+$QEMU_IMG convert --bitmaps --skip-broken-bitmaps \
+ -O qcow2 "$TEST_IMG" "$TEST_IMG.copy"
+TEST_IMG="$TEST_IMG.copy" _img_info --format-specific \
+ | _filter_irrelevant_img_info
+# ...as does removing them
+echo
+_rm_test_img "$TEST_IMG.copy"
+$QEMU_IMG bitmap --remove "$TEST_IMG" b0
+$QEMU_IMG bitmap --remove --add "$TEST_IMG" b2
+$QEMU_IMG convert --bitmaps -O qcow2 "$TEST_IMG" "$TEST_IMG.copy"
+TEST_IMG="$TEST_IMG.copy" _img_info --format-specific \
+ | _filter_irrelevant_img_info
+
# success, all done
echo '*** done'
rm -f $seq.full
diff --git a/tests/qemu-iotests/291.out b/tests/qemu-iotests/tests/qemu-img-bitmaps.out
index 018d6b103f..e851f0320e 100644
--- a/tests/qemu-iotests/291.out
+++ b/tests/qemu-iotests/tests/qemu-img-bitmaps.out
@@ -1,4 +1,4 @@
-QA output created by 291
+QA output created by qemu-img-bitmaps
=== Initial image setup ===
@@ -115,4 +115,69 @@ Format specific information:
[{ "start": 0, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET},
{ "start": 2097152, "length": 1048576, "depth": 0, "present": false, "zero": false, "data": false},
{ "start": 3145728, "length": 7340032, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}]
+
+=== Check handling of inconsistent bitmap ===
+
+image: TEST_DIR/t.IMGFMT
+file format: IMGFMT
+virtual size: 10 MiB (10485760 bytes)
+cluster_size: 65536
+backing file: TEST_DIR/t.IMGFMT.base
+backing file format: IMGFMT
+Format specific information:
+ bitmaps:
+ [0]:
+ flags:
+ [0]: in-use
+ [1]: auto
+ name: b2
+ granularity: 65536
+ [1]:
+ flags:
+ [0]: in-use
+ name: b0
+ granularity: 65536
+ [2]:
+ flags:
+ [0]: auto
+ name: b4
+ granularity: 65536
+ corrupt: false
+
+qemu-img: Cannot copy inconsistent bitmap 'b0'
+Try --skip-broken-bitmaps, or use 'qemu-img bitmap --remove' to delete it
+qemu-img: Could not open 'TEST_DIR/t.IMGFMT.copy': Could not open 'TEST_DIR/t.IMGFMT.copy': No such file or directory
+
+qemu-img: warning: Skipping inconsistent bitmap 'b0'
+qemu-img: warning: Skipping inconsistent bitmap 'b2'
+image: TEST_DIR/t.IMGFMT.copy
+file format: IMGFMT
+virtual size: 10 MiB (10485760 bytes)
+cluster_size: 65536
+Format specific information:
+ bitmaps:
+ [0]:
+ flags:
+ [0]: auto
+ name: b4
+ granularity: 65536
+ corrupt: false
+
+image: TEST_DIR/t.IMGFMT.copy
+file format: IMGFMT
+virtual size: 10 MiB (10485760 bytes)
+cluster_size: 65536
+Format specific information:
+ bitmaps:
+ [0]:
+ flags:
+ [0]: auto
+ name: b4
+ granularity: 65536
+ [1]:
+ flags:
+ [0]: auto
+ name: b2
+ granularity: 65536
+ corrupt: false
*** done
diff --git a/trace/mem-internal.h b/trace/mem-internal.h
deleted file mode 100644
index 8b72b678fa..0000000000
--- a/trace/mem-internal.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Helper functions for guest memory tracing
- *
- * Copyright (C) 2016 LluĂ­s Vilanova <vilanova@ac.upc.edu>
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- */
-
-#ifndef TRACE__MEM_INTERNAL_H
-#define TRACE__MEM_INTERNAL_H
-
-#define TRACE_MEM_SZ_SHIFT_MASK 0xf /* size shift mask */
-#define TRACE_MEM_SE (1ULL << 4) /* sign extended (y/n) */
-#define TRACE_MEM_BE (1ULL << 5) /* big endian (y/n) */
-#define TRACE_MEM_ST (1ULL << 6) /* store (y/n) */
-#define TRACE_MEM_MMU_SHIFT 8 /* mmu idx */
-
-static inline uint16_t trace_mem_build_info(
- int size_shift, bool sign_extend, MemOp endianness,
- bool store, unsigned int mmu_idx)
-{
- uint16_t res;
-
- res = size_shift & TRACE_MEM_SZ_SHIFT_MASK;
- if (sign_extend) {
- res |= TRACE_MEM_SE;
- }
- if (endianness == MO_BE) {
- res |= TRACE_MEM_BE;
- }
- if (store) {
- res |= TRACE_MEM_ST;
- }
-#ifdef CONFIG_SOFTMMU
- res |= mmu_idx << TRACE_MEM_MMU_SHIFT;
-#endif
- return res;
-}
-
-static inline uint16_t trace_mem_get_info(MemOp op,
- unsigned int mmu_idx,
- bool store)
-{
- return trace_mem_build_info(op & MO_SIZE, !!(op & MO_SIGN),
- op & MO_BSWAP, store,
- mmu_idx);
-}
-
-#endif /* TRACE__MEM_INTERNAL_H */
diff --git a/trace/mem.h b/trace/mem.h
index 9644f592b4..2f27e7bdf0 100644
--- a/trace/mem.h
+++ b/trace/mem.h
@@ -12,24 +12,52 @@
#include "tcg/tcg.h"
+#define TRACE_MEM_SZ_SHIFT_MASK 0xf /* size shift mask */
+#define TRACE_MEM_SE (1ULL << 4) /* sign extended (y/n) */
+#define TRACE_MEM_BE (1ULL << 5) /* big endian (y/n) */
+#define TRACE_MEM_ST (1ULL << 6) /* store (y/n) */
+#define TRACE_MEM_MMU_SHIFT 8 /* mmu idx */
/**
- * trace_mem_get_info:
+ * trace_mem_build_info:
*
* Return a value for the 'info' argument in guest memory access traces.
*/
-static uint16_t trace_mem_get_info(MemOp op, unsigned int mmu_idx, bool store);
+static inline uint16_t trace_mem_build_info(int size_shift, bool sign_extend,
+ MemOp endianness, bool store,
+ unsigned int mmu_idx)
+{
+ uint16_t res;
+
+ res = size_shift & TRACE_MEM_SZ_SHIFT_MASK;
+ if (sign_extend) {
+ res |= TRACE_MEM_SE;
+ }
+ if (endianness == MO_BE) {
+ res |= TRACE_MEM_BE;
+ }
+ if (store) {
+ res |= TRACE_MEM_ST;
+ }
+#ifdef CONFIG_SOFTMMU
+ res |= mmu_idx << TRACE_MEM_MMU_SHIFT;
+#endif
+ return res;
+}
+
/**
- * trace_mem_build_info:
+ * trace_mem_get_info:
*
* Return a value for the 'info' argument in guest memory access traces.
*/
-static uint16_t trace_mem_build_info(int size_shift, bool sign_extend,
- MemOp endianness, bool store,
- unsigned int mmuidx);
-
-
-#include "trace/mem-internal.h"
+static inline uint16_t trace_mem_get_info(MemOp op,
+ unsigned int mmu_idx,
+ bool store)
+{
+ return trace_mem_build_info(op & MO_SIZE, !!(op & MO_SIGN),
+ op & MO_BSWAP, store,
+ mmu_idx);
+}
#endif /* TRACE__MEM_H */
diff --git a/util/aio-posix.c b/util/aio-posix.c
index 30f5354b1e..2b86777e91 100644
--- a/util/aio-posix.c
+++ b/util/aio-posix.c
@@ -716,3 +716,15 @@ void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns,
aio_notify(ctx);
}
+
+void aio_context_set_aio_params(AioContext *ctx, int64_t max_batch,
+ Error **errp)
+{
+ /*
+ * No thread synchronization here, it doesn't matter if an incorrect value
+ * is used once.
+ */
+ ctx->aio_max_batch = max_batch;
+
+ aio_notify(ctx);
+}
diff --git a/util/aio-win32.c b/util/aio-win32.c
index 168717b51b..d5b09a1193 100644
--- a/util/aio-win32.c
+++ b/util/aio-win32.c
@@ -440,3 +440,8 @@ void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns,
error_setg(errp, "AioContext polling is not implemented on Windows");
}
}
+
+void aio_context_set_aio_params(AioContext *ctx, int64_t max_batch,
+ Error **errp)
+{
+}
diff --git a/util/async.c b/util/async.c
index 9a41591319..6f6717a34b 100644
--- a/util/async.c
+++ b/util/async.c
@@ -554,6 +554,8 @@ AioContext *aio_context_new(Error **errp)
ctx->poll_grow = 0;
ctx->poll_shrink = 0;
+ ctx->aio_max_batch = 0;
+
return ctx;
fail:
g_source_destroy(&ctx->source);
diff --git a/util/qsp.c b/util/qsp.c
index bacc5fa2f6..8562b14a87 100644
--- a/util/qsp.c
+++ b/util/qsp.c
@@ -83,8 +83,8 @@ typedef struct QSPCallSite QSPCallSite;
struct QSPEntry {
void *thread_ptr;
const QSPCallSite *callsite;
- uint64_t n_acqs;
- uint64_t ns;
+ aligned_uint64_t n_acqs;
+ aligned_uint64_t ns;
unsigned int n_objs; /* count of coalesced objs; only used for reporting */
};
typedef struct QSPEntry QSPEntry;