aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitlab-ci.d/custom-runners.yml16
-rw-r--r--accel/tcg/cpu-exec.c14
-rw-r--r--accel/tcg/tcg-accel-ops-rr.c2
-rw-r--r--accel/tcg/translate-all.c59
-rw-r--r--accel/tcg/translator.c39
-rw-r--r--accel/tcg/user-exec.c48
-rw-r--r--block.c88
-rw-r--r--block/file-posix.c7
-rw-r--r--block/gluster.c23
-rw-r--r--block/io.c68
-rw-r--r--block/iscsi.c3
-rw-r--r--block/mirror.c25
-rw-r--r--block/qcow2-cluster.c78
-rw-r--r--block/qcow2-refcount.c326
-rw-r--r--block/qcow2.c13
-rw-r--r--block/qcow2.h7
-rw-r--r--bsd-user/i386/target_arch_cpu.c5
-rw-r--r--bsd-user/x86_64/target_arch_cpu.c5
-rw-r--r--docs/tools/qemu-img.rst4
-rw-r--r--include/block/block_int.h61
-rw-r--r--include/exec/translate-all.h1
-rw-r--r--include/exec/translator.h44
-rw-r--r--include/hw/core/tcg-cpu-ops.h26
-rw-r--r--include/tcg/tcg-op.h2
-rw-r--r--linux-user/main.c7
-rw-r--r--qemu-img-cmds.hx2
-rw-r--r--qemu-img.c18
-rwxr-xr-xscripts/simplebench/img_bench_templater.py95
-rw-r--r--scripts/simplebench/table_templater.py62
-rw-r--r--target/alpha/cpu.c2
-rw-r--r--target/alpha/cpu.h2
-rw-r--r--target/alpha/helper.c5
-rw-r--r--target/alpha/translate.c2
-rw-r--r--target/arm/arm_ldst.h12
-rw-r--r--target/arm/cpu.c7
-rw-r--r--target/arm/cpu.h3
-rw-r--r--target/arm/cpu_tcg.c6
-rw-r--r--target/arm/translate-a64.c2
-rw-r--r--target/arm/translate.c9
-rw-r--r--target/avr/cpu.c3
-rw-r--r--target/cris/cpu.c4
-rw-r--r--target/cris/cpu.h2
-rw-r--r--target/cris/helper.c17
-rw-r--r--target/hexagon/translate.c3
-rw-r--r--target/hppa/cpu.c2
-rw-r--r--target/hppa/cpu.h4
-rw-r--r--target/hppa/int_helper.c7
-rw-r--r--target/hppa/translate.c5
-rw-r--r--target/i386/cpu.h3
-rw-r--r--target/i386/tcg/helper-tcg.h2
-rw-r--r--target/i386/tcg/seg_helper.c74
-rw-r--r--target/i386/tcg/sysemu/seg_helper.c62
-rw-r--r--target/i386/tcg/tcg-cpu.c8
-rw-r--r--target/i386/tcg/translate.c10
-rw-r--r--target/m68k/cpu.c2
-rw-r--r--target/m68k/cpu.h2
-rw-r--r--target/m68k/op_helper.c16
-rw-r--r--target/m68k/translate.c2
-rw-r--r--target/microblaze/cpu.c2
-rw-r--r--target/microblaze/cpu.h2
-rw-r--r--target/microblaze/helper.c13
-rw-r--r--target/mips/cpu.c2
-rw-r--r--target/mips/tcg/exception.c18
-rw-r--r--target/mips/tcg/micromips_translate.c.inc2
-rw-r--r--target/mips/tcg/mips16e_translate.c.inc4
-rw-r--r--target/mips/tcg/nanomips_translate.c.inc4
-rw-r--r--target/mips/tcg/sysemu/tlb_helper.c18
-rw-r--r--target/mips/tcg/tcg-internal.h5
-rw-r--r--target/mips/tcg/translate.c8
-rw-r--r--target/mips/tcg/user/tlb_helper.c5
-rw-r--r--target/nios2/cpu.c5
-rw-r--r--target/openrisc/cpu.c2
-rw-r--r--target/openrisc/cpu.h5
-rw-r--r--target/openrisc/interrupt.c2
-rw-r--r--target/openrisc/meson.build6
-rw-r--r--target/openrisc/translate.c2
-rw-r--r--target/ppc/cpu.h4
-rw-r--r--target/ppc/cpu_init.c2
-rw-r--r--target/ppc/excp_helper.c21
-rw-r--r--target/ppc/translate.c5
-rw-r--r--target/riscv/cpu.c2
-rw-r--r--target/riscv/cpu.h2
-rw-r--r--target/riscv/cpu_helper.c5
-rw-r--r--target/riscv/translate.c5
-rw-r--r--target/rx/cpu.c2
-rw-r--r--target/rx/cpu.h2
-rw-r--r--target/rx/helper.c4
-rw-r--r--target/s390x/tcg/translate.c16
-rw-r--r--target/sh4/cpu.c2
-rw-r--r--target/sh4/cpu.h4
-rw-r--r--target/sh4/helper.c9
-rw-r--r--target/sh4/translate.c4
-rw-r--r--target/sparc/cpu.c4
-rw-r--r--target/sparc/translate.c2
-rw-r--r--target/xtensa/cpu.c2
-rw-r--r--target/xtensa/cpu.h2
-rw-r--r--target/xtensa/exc_helper.c7
-rw-r--r--target/xtensa/translate.c5
-rw-r--r--tcg/arm/tcg-target.c.inc515
-rw-r--r--tcg/arm/tcg-target.h27
-rw-r--r--tcg/i386/tcg-target.c.inc13
-rw-r--r--tcg/ppc/tcg-target.c.inc25
-rwxr-xr-xtests/qemu-iotests/1222
-rwxr-xr-xtests/qemu-iotests/2715
-rw-r--r--tests/qemu-iotests/271.out4
-rwxr-xr-xtests/qemu-iotests/2979
-rw-r--r--tests/qemu-iotests/iotests.py12
-rwxr-xr-xtests/qemu-iotests/tests/migrate-bitmaps-postcopy-test13
-rwxr-xr-xtests/qemu-iotests/tests/migrate-bitmaps-test43
-rwxr-xr-xtests/qemu-iotests/tests/migrate-during-backup97
-rw-r--r--tests/qemu-iotests/tests/migrate-during-backup.out5
-rwxr-xr-xtests/qemu-iotests/tests/mirror-top-perms2
112 files changed, 1570 insertions, 844 deletions
diff --git a/.gitlab-ci.d/custom-runners.yml b/.gitlab-ci.d/custom-runners.yml
index bcd22ca293..a89a20da48 100644
--- a/.gitlab-ci.d/custom-runners.yml
+++ b/.gitlab-ci.d/custom-runners.yml
@@ -60,8 +60,10 @@ ubuntu-18.04-s390x-alldbg:
rules:
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
when: manual
+ allow_failure: true
- if: "$S390X_RUNNER_AVAILABLE"
when: manual
+ allow_failure: true
script:
- mkdir build
- cd build
@@ -79,8 +81,10 @@ ubuntu-18.04-s390x-clang:
rules:
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
when: manual
+ allow_failure: true
- if: "$S390X_RUNNER_AVAILABLE"
when: manual
+ allow_failure: true
script:
- mkdir build
- cd build
@@ -97,8 +101,10 @@ ubuntu-18.04-s390x-tci:
rules:
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
when: manual
+ allow_failure: true
- if: "$S390X_RUNNER_AVAILABLE"
when: manual
+ allow_failure: true
script:
- mkdir build
- cd build
@@ -114,8 +120,10 @@ ubuntu-18.04-s390x-notcg:
rules:
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
when: manual
+ allow_failure: true
- if: "$S390X_RUNNER_AVAILABLE"
when: manual
+ allow_failure: true
script:
- mkdir build
- cd build
@@ -154,8 +162,10 @@ ubuntu-20.04-aarch64-all:
rules:
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
when: manual
+ allow_failure: true
- if: "$AARCH64_RUNNER_AVAILABLE"
when: manual
+ allow_failure: true
script:
- mkdir build
- cd build
@@ -189,8 +199,10 @@ ubuntu-20.04-aarch64-clang:
rules:
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
when: manual
+ allow_failure: true
- if: "$AARCH64_RUNNER_AVAILABLE"
when: manual
+ allow_failure: true
script:
- mkdir build
- cd build
@@ -207,8 +219,10 @@ ubuntu-20.04-aarch64-tci:
rules:
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
when: manual
+ allow_failure: true
- if: "$AARCH64_RUNNER_AVAILABLE"
when: manual
+ allow_failure: true
script:
- mkdir build
- cd build
@@ -224,8 +238,10 @@ ubuntu-20.04-aarch64-notcg:
rules:
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
when: manual
+ allow_failure: true
- if: "$AARCH64_RUNNER_AVAILABLE"
when: manual
+ allow_failure: true
script:
- mkdir build
- cd build
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
index e5c0ccd1a2..75dbc1e4e3 100644
--- a/accel/tcg/cpu-exec.c
+++ b/accel/tcg/cpu-exec.c
@@ -651,8 +651,8 @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
loop */
#if defined(TARGET_I386)
CPUClass *cc = CPU_GET_CLASS(cpu);
- cc->tcg_ops->do_interrupt(cpu);
-#endif
+ cc->tcg_ops->fake_user_interrupt(cpu);
+#endif /* TARGET_I386 */
*ret = cpu->exception_index;
cpu->exception_index = -1;
return true;
@@ -685,6 +685,7 @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
return false;
}
+#ifndef CONFIG_USER_ONLY
/*
* CPU_INTERRUPT_POLL is a virtual event which gets converted into a
* "real" interrupt event later. It does not need to be recorded for
@@ -698,12 +699,11 @@ static inline bool need_replay_interrupt(int interrupt_request)
return true;
#endif
}
+#endif /* !CONFIG_USER_ONLY */
static inline bool cpu_handle_interrupt(CPUState *cpu,
TranslationBlock **last_tb)
{
- CPUClass *cc = CPU_GET_CLASS(cpu);
-
/* Clear the interrupt flag now since we're processing
* cpu->interrupt_request and cpu->exit_request.
* Ensure zeroing happens before reading cpu->exit_request or
@@ -725,6 +725,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
qemu_mutex_unlock_iothread();
return true;
}
+#if !defined(CONFIG_USER_ONLY)
if (replay_mode == REPLAY_MODE_PLAY && !replay_has_interrupt()) {
/* Do nothing */
} else if (interrupt_request & CPU_INTERRUPT_HALT) {
@@ -753,12 +754,14 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
qemu_mutex_unlock_iothread();
return true;
}
-#endif
+#endif /* !TARGET_I386 */
/* The target hook has 3 exit conditions:
False when the interrupt isn't processed,
True when it is, and we should restart on a new TB,
and via longjmp via cpu_loop_exit. */
else {
+ CPUClass *cc = CPU_GET_CLASS(cpu);
+
if (cc->tcg_ops->cpu_exec_interrupt &&
cc->tcg_ops->cpu_exec_interrupt(cpu, interrupt_request)) {
if (need_replay_interrupt(interrupt_request)) {
@@ -777,6 +780,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
* reload the 'interrupt_request' value */
interrupt_request = cpu->interrupt_request;
}
+#endif /* !CONFIG_USER_ONLY */
if (interrupt_request & CPU_INTERRUPT_EXITTB) {
cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
/* ensure that no TB jump will be modified as
diff --git a/accel/tcg/tcg-accel-ops-rr.c b/accel/tcg/tcg-accel-ops-rr.c
index c02c061ecb..a5fd26190e 100644
--- a/accel/tcg/tcg-accel-ops-rr.c
+++ b/accel/tcg/tcg-accel-ops-rr.c
@@ -60,8 +60,6 @@ void rr_kick_vcpu_thread(CPUState *unused)
static QEMUTimer *rr_kick_vcpu_timer;
static CPUState *rr_current_cpu;
-#define TCG_KICK_PERIOD (NANOSECONDS_PER_SECOND / 10)
-
static inline int64_t rr_next_kick_time(void)
{
return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + TCG_KICK_PERIOD;
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
index bbfcfb698c..fb9ebfad9e 100644
--- a/accel/tcg/translate-all.c
+++ b/accel/tcg/translate-all.c
@@ -1297,31 +1297,8 @@ static inline void tb_page_add(PageDesc *p, TranslationBlock *tb,
invalidate_page_bitmap(p);
#if defined(CONFIG_USER_ONLY)
- if (p->flags & PAGE_WRITE) {
- target_ulong addr;
- PageDesc *p2;
- int prot;
-
- /* force the host page as non writable (writes will have a
- page fault + mprotect overhead) */
- page_addr &= qemu_host_page_mask;
- prot = 0;
- for (addr = page_addr; addr < page_addr + qemu_host_page_size;
- addr += TARGET_PAGE_SIZE) {
-
- p2 = page_find(addr >> TARGET_PAGE_BITS);
- if (!p2) {
- continue;
- }
- prot |= p2->flags;
- p2->flags &= ~PAGE_WRITE;
- }
- mprotect(g2h_untagged(page_addr), qemu_host_page_size,
- (prot & PAGE_BITS) & ~PAGE_WRITE);
- if (DEBUG_TB_INVALIDATE_GATE) {
- printf("protecting code page: 0x" TB_PAGE_ADDR_FMT "\n", page_addr);
- }
- }
+ /* translator_loop() must have made all TB pages non-writable */
+ assert(!(p->flags & PAGE_WRITE));
#else
/* if some code is already present, then the pages are already
protected. So we handle the case where only the first TB is
@@ -2394,6 +2371,38 @@ int page_check_range(target_ulong start, target_ulong len, int flags)
return 0;
}
+void page_protect(tb_page_addr_t page_addr)
+{
+ target_ulong addr;
+ PageDesc *p;
+ int prot;
+
+ p = page_find(page_addr >> TARGET_PAGE_BITS);
+ if (p && (p->flags & PAGE_WRITE)) {
+ /*
+ * Force the host page as non writable (writes will have a page fault +
+ * mprotect overhead).
+ */
+ page_addr &= qemu_host_page_mask;
+ prot = 0;
+ for (addr = page_addr; addr < page_addr + qemu_host_page_size;
+ addr += TARGET_PAGE_SIZE) {
+
+ p = page_find(addr >> TARGET_PAGE_BITS);
+ if (!p) {
+ continue;
+ }
+ prot |= p->flags;
+ p->flags &= ~PAGE_WRITE;
+ }
+ mprotect(g2h_untagged(page_addr), qemu_host_page_size,
+ (prot & PAGE_BITS) & ~PAGE_WRITE);
+ if (DEBUG_TB_INVALIDATE_GATE) {
+ printf("protecting code page: 0x" TB_PAGE_ADDR_FMT "\n", page_addr);
+ }
+ }
+}
+
/* called from signal handler: invalidate the code and unprotect the
* page. Return 0 if the fault was not handled, 1 if it was handled,
* and 2 if it was handled but the caller must cause the TB to be
diff --git a/accel/tcg/translator.c b/accel/tcg/translator.c
index c53a7f8e44..390bd9db0a 100644
--- a/accel/tcg/translator.c
+++ b/accel/tcg/translator.c
@@ -42,6 +42,15 @@ bool translator_use_goto_tb(DisasContextBase *db, target_ulong dest)
return ((db->pc_first ^ dest) & TARGET_PAGE_MASK) == 0;
}
+static inline void translator_page_protect(DisasContextBase *dcbase,
+ target_ulong pc)
+{
+#ifdef CONFIG_USER_ONLY
+ dcbase->page_protect_end = pc | ~TARGET_PAGE_MASK;
+ page_protect(pc);
+#endif
+}
+
void translator_loop(const TranslatorOps *ops, DisasContextBase *db,
CPUState *cpu, TranslationBlock *tb, int max_insns)
{
@@ -56,6 +65,7 @@ void translator_loop(const TranslatorOps *ops, DisasContextBase *db,
db->num_insns = 0;
db->max_insns = max_insns;
db->singlestep_enabled = cflags & CF_SINGLE_STEP;
+ translator_page_protect(db, db->pc_next);
ops->init_disas_context(db, cpu);
tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */
@@ -137,3 +147,32 @@ void translator_loop(const TranslatorOps *ops, DisasContextBase *db,
}
#endif
}
+
+static inline void translator_maybe_page_protect(DisasContextBase *dcbase,
+ target_ulong pc, size_t len)
+{
+#ifdef CONFIG_USER_ONLY
+ target_ulong end = pc + len - 1;
+
+ if (end > dcbase->page_protect_end) {
+ translator_page_protect(dcbase, end);
+ }
+#endif
+}
+
+#define GEN_TRANSLATOR_LD(fullname, type, load_fn, swap_fn) \
+ type fullname ## _swap(CPUArchState *env, DisasContextBase *dcbase, \
+ abi_ptr pc, bool do_swap) \
+ { \
+ translator_maybe_page_protect(dcbase, pc, sizeof(type)); \
+ type ret = load_fn(env, pc); \
+ if (do_swap) { \
+ ret = swap_fn(ret); \
+ } \
+ plugin_insn_append(&ret, sizeof(ret)); \
+ return ret; \
+ }
+
+FOR_EACH_TRANSLATOR_LD(GEN_TRANSLATOR_LD)
+
+#undef GEN_TRANSLATOR_LD
diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c
index 90d1a2d327..8fed542622 100644
--- a/accel/tcg/user-exec.c
+++ b/accel/tcg/user-exec.c
@@ -680,18 +680,26 @@ int cpu_signal_handler(int host_signum, void *pinfo,
pc = uc->uc_mcontext.psw.addr;
- /* ??? On linux, the non-rt signal handler has 4 (!) arguments instead
- of the normal 2 arguments. The 3rd argument contains the "int_code"
- from the hardware which does in fact contain the is_write value.
- The rt signal handler, as far as I can tell, does not give this value
- at all. Not that we could get to it from here even if it were. */
- /* ??? This is not even close to complete, since it ignores all
- of the read-modify-write instructions. */
+ /*
+ * ??? On linux, the non-rt signal handler has 4 (!) arguments instead
+ * of the normal 2 arguments. The 4th argument contains the "Translation-
+ * Exception Identification for DAT Exceptions" from the hardware (aka
+ * "int_parm_long"), which does in fact contain the is_write value.
+ * The rt signal handler, as far as I can tell, does not give this value
+ * at all. Not that we could get to it from here even if it were.
+ * So fall back to parsing instructions. Treat read-modify-write ones as
+ * writes, which is not fully correct, but for tracking self-modifying code
+ * this is better than treating them as reads. Checking si_addr page flags
+ * might be a viable improvement, albeit a racy one.
+ */
+ /* ??? This is not even close to complete. */
pinsn = (uint16_t *)pc;
switch (pinsn[0] >> 8) {
case 0x50: /* ST */
case 0x42: /* STC */
case 0x40: /* STH */
+ case 0xba: /* CS */
+ case 0xbb: /* CDS */
is_write = 1;
break;
case 0xc4: /* RIL format insns */
@@ -702,6 +710,12 @@ int cpu_signal_handler(int host_signum, void *pinfo,
is_write = 1;
}
break;
+ case 0xc8: /* SSF format insns */
+ switch (pinsn[0] & 0xf) {
+ case 0x2: /* CSST */
+ is_write = 1;
+ }
+ break;
case 0xe3: /* RXY format insns */
switch (pinsn[2] & 0xff) {
case 0x50: /* STY */
@@ -715,7 +729,27 @@ int cpu_signal_handler(int host_signum, void *pinfo,
is_write = 1;
}
break;
+ case 0xeb: /* RSY format insns */
+ switch (pinsn[2] & 0xff) {
+ case 0x14: /* CSY */
+ case 0x30: /* CSG */
+ case 0x31: /* CDSY */
+ case 0x3e: /* CDSG */
+ case 0xe4: /* LANG */
+ case 0xe6: /* LAOG */
+ case 0xe7: /* LAXG */
+ case 0xe8: /* LAAG */
+ case 0xea: /* LAALG */
+ case 0xf4: /* LAN */
+ case 0xf6: /* LAO */
+ case 0xf7: /* LAX */
+ case 0xfa: /* LAAL */
+ case 0xf8: /* LAA */
+ is_write = 1;
+ }
+ break;
}
+
return handle_cpu_signal(pc, info, is_write, &uc->uc_sigmask);
}
diff --git a/block.c b/block.c
index b2b66263f9..5ce08a79fd 100644
--- a/block.c
+++ b/block.c
@@ -49,6 +49,8 @@
#include "qemu/timer.h"
#include "qemu/cutils.h"
#include "qemu/id.h"
+#include "qemu/range.h"
+#include "qemu/rcu.h"
#include "block/coroutines.h"
#ifdef CONFIG_BSD
@@ -401,6 +403,9 @@ BlockDriverState *bdrv_new(void)
qemu_co_queue_init(&bs->flush_queue);
+ qemu_co_mutex_init(&bs->bsc_modify_lock);
+ bs->block_status_cache = g_new0(BdrvBlockStatusCache, 1);
+
for (i = 0; i < bdrv_drain_all_count; i++) {
bdrv_drained_begin(bs);
}
@@ -4694,6 +4699,8 @@ static void bdrv_close(BlockDriverState *bs)
bs->explicit_options = NULL;
qobject_unref(bs->full_open_options);
bs->full_open_options = NULL;
+ g_free(bs->block_status_cache);
+ bs->block_status_cache = NULL;
bdrv_release_named_dirty_bitmaps(bs);
assert(QLIST_EMPTY(&bs->dirty_bitmaps));
@@ -6319,6 +6326,7 @@ static int bdrv_inactivate_recurse(BlockDriverState *bs)
{
BdrvChild *child, *parent;
int ret;
+ uint64_t cumulative_perms, cumulative_shared_perms;
if (!bs->drv) {
return -ENOMEDIUM;
@@ -6349,6 +6357,13 @@ static int bdrv_inactivate_recurse(BlockDriverState *bs)
}
}
+ bdrv_get_cumulative_perm(bs, &cumulative_perms,
+ &cumulative_shared_perms);
+ if (cumulative_perms & (BLK_PERM_WRITE | BLK_PERM_WRITE_UNCHANGED)) {
+ /* Our inactive parents still need write access. Inactivation failed. */
+ return -EPERM;
+ }
+
bs->open_flags |= BDRV_O_INACTIVE;
/*
@@ -7684,3 +7699,76 @@ BlockDriverState *bdrv_backing_chain_next(BlockDriverState *bs)
{
return bdrv_skip_filters(bdrv_cow_bs(bdrv_skip_filters(bs)));
}
+
+/**
+ * Check whether [offset, offset + bytes) overlaps with the cached
+ * block-status data region.
+ *
+ * If so, and @pnum is not NULL, set *pnum to `bsc.data_end - offset`,
+ * which is what bdrv_bsc_is_data()'s interface needs.
+ * Otherwise, *pnum is not touched.
+ */
+static bool bdrv_bsc_range_overlaps_locked(BlockDriverState *bs,
+ int64_t offset, int64_t bytes,
+ int64_t *pnum)
+{
+ BdrvBlockStatusCache *bsc = qatomic_rcu_read(&bs->block_status_cache);
+ bool overlaps;
+
+ overlaps =
+ qatomic_read(&bsc->valid) &&
+ ranges_overlap(offset, bytes, bsc->data_start,
+ bsc->data_end - bsc->data_start);
+
+ if (overlaps && pnum) {
+ *pnum = bsc->data_end - offset;
+ }
+
+ return overlaps;
+}
+
+/**
+ * See block_int.h for this function's documentation.
+ */
+bool bdrv_bsc_is_data(BlockDriverState *bs, int64_t offset, int64_t *pnum)
+{
+ RCU_READ_LOCK_GUARD();
+
+ return bdrv_bsc_range_overlaps_locked(bs, offset, 1, pnum);
+}
+
+/**
+ * See block_int.h for this function's documentation.
+ */
+void bdrv_bsc_invalidate_range(BlockDriverState *bs,
+ int64_t offset, int64_t bytes)
+{
+ RCU_READ_LOCK_GUARD();
+
+ if (bdrv_bsc_range_overlaps_locked(bs, offset, bytes, NULL)) {
+ qatomic_set(&bs->block_status_cache->valid, false);
+ }
+}
+
+/**
+ * See block_int.h for this function's documentation.
+ */
+void bdrv_bsc_fill(BlockDriverState *bs, int64_t offset, int64_t bytes)
+{
+ BdrvBlockStatusCache *new_bsc = g_new(BdrvBlockStatusCache, 1);
+ BdrvBlockStatusCache *old_bsc;
+
+ *new_bsc = (BdrvBlockStatusCache) {
+ .valid = true,
+ .data_start = offset,
+ .data_end = offset + bytes,
+ };
+
+ QEMU_LOCK_GUARD(&bs->bsc_modify_lock);
+
+ old_bsc = qatomic_rcu_read(&bs->block_status_cache);
+ qatomic_rcu_set(&bs->block_status_cache, new_bsc);
+ if (old_bsc) {
+ g_free_rcu(old_bsc, rcu);
+ }
+}
diff --git a/block/file-posix.c b/block/file-posix.c
index cb9bffe047..9f35e5631a 100644
--- a/block/file-posix.c
+++ b/block/file-posix.c
@@ -2744,7 +2744,8 @@ static int find_allocation(BlockDriverState *bs, off_t start,
* the specified offset) that are known to be in the same
* allocated/unallocated state.
*
- * 'bytes' is the max value 'pnum' should be set to.
+ * 'bytes' is a soft cap for 'pnum'. If the information is free, 'pnum' may
+ * well exceed it.
*/
static int coroutine_fn raw_co_block_status(BlockDriverState *bs,
bool want_zero,
@@ -2782,7 +2783,7 @@ static int coroutine_fn raw_co_block_status(BlockDriverState *bs,
} else if (data == offset) {
/* On a data extent, compute bytes to the end of the extent,
* possibly including a partial sector at EOF. */
- *pnum = MIN(bytes, hole - offset);
+ *pnum = hole - offset;
/*
* We are not allowed to return partial sectors, though, so
@@ -2801,7 +2802,7 @@ static int coroutine_fn raw_co_block_status(BlockDriverState *bs,
} else {
/* On a hole, compute bytes to the beginning of the next extent. */
assert(hole == offset);
- *pnum = MIN(bytes, data - offset);
+ *pnum = data - offset;
ret = BDRV_BLOCK_ZERO;
}
*map = offset;
diff --git a/block/gluster.c b/block/gluster.c
index e8ee14c8e9..d51938e447 100644
--- a/block/gluster.c
+++ b/block/gluster.c
@@ -1461,7 +1461,8 @@ exit:
* the specified offset) that are known to be in the same
* allocated/unallocated state.
*
- * 'bytes' is the max value 'pnum' should be set to.
+ * 'bytes' is a soft cap for 'pnum'. If the information is free, 'pnum' may
+ * well exceed it.
*
* (Based on raw_co_block_status() from file-posix.c.)
*/
@@ -1477,6 +1478,8 @@ static int coroutine_fn qemu_gluster_co_block_status(BlockDriverState *bs,
off_t data = 0, hole = 0;
int ret = -EINVAL;
+ assert(QEMU_IS_ALIGNED(offset | bytes, bs->bl.request_alignment));
+
if (!s->fd) {
return ret;
}
@@ -1500,12 +1503,26 @@ static int coroutine_fn qemu_gluster_co_block_status(BlockDriverState *bs,
} else if (data == offset) {
/* On a data extent, compute bytes to the end of the extent,
* possibly including a partial sector at EOF. */
- *pnum = MIN(bytes, hole - offset);
+ *pnum = hole - offset;
+
+ /*
+ * We are not allowed to return partial sectors, though, so
+ * round up if necessary.
+ */
+ if (!QEMU_IS_ALIGNED(*pnum, bs->bl.request_alignment)) {
+ int64_t file_length = qemu_gluster_getlength(bs);
+ if (file_length > 0) {
+ /* Ignore errors, this is just a safeguard */
+ assert(hole == file_length);
+ }
+ *pnum = ROUND_UP(*pnum, bs->bl.request_alignment);
+ }
+
ret = BDRV_BLOCK_DATA;
} else {
/* On a hole, compute bytes to the beginning of the next extent. */
assert(hole == offset);
- *pnum = MIN(bytes, data - offset);
+ *pnum = data - offset;
ret = BDRV_BLOCK_ZERO;
}
diff --git a/block/io.c b/block/io.c
index a19942718b..99ee182ca4 100644
--- a/block/io.c
+++ b/block/io.c
@@ -1883,6 +1883,9 @@ static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
return -ENOTSUP;
}
+ /* Invalidate the cached block-status data range if this write overlaps */
+ bdrv_bsc_invalidate_range(bs, offset, bytes);
+
assert(alignment % bs->bl.request_alignment == 0);
head = offset % alignment;
tail = (offset + bytes) % alignment;
@@ -2447,9 +2450,65 @@ static int coroutine_fn bdrv_co_block_status(BlockDriverState *bs,
aligned_bytes = ROUND_UP(offset + bytes, align) - aligned_offset;
if (bs->drv->bdrv_co_block_status) {
- ret = bs->drv->bdrv_co_block_status(bs, want_zero, aligned_offset,
- aligned_bytes, pnum, &local_map,
- &local_file);
+ /*
+ * Use the block-status cache only for protocol nodes: Format
+ * drivers are generally quick to inquire the status, but protocol
+ * drivers often need to get information from outside of qemu, so
+ * we do not have control over the actual implementation. There
+ * have been cases where inquiring the status took an unreasonably
+ * long time, and we can do nothing in qemu to fix it.
+ * This is especially problematic for images with large data areas,
+ * because finding the few holes in them and giving them special
+ * treatment does not gain much performance. Therefore, we try to
+ * cache the last-identified data region.
+ *
+ * Second, limiting ourselves to protocol nodes allows us to assume
+ * the block status for data regions to be DATA | OFFSET_VALID, and
+ * that the host offset is the same as the guest offset.
+ *
+ * Note that it is possible that external writers zero parts of
+ * the cached regions without the cache being invalidated, and so
+ * we may report zeroes as data. This is not catastrophic,
+ * however, because reporting zeroes as data is fine.
+ */
+ if (QLIST_EMPTY(&bs->children) &&
+ bdrv_bsc_is_data(bs, aligned_offset, pnum))
+ {
+ ret = BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID;
+ local_file = bs;
+ local_map = aligned_offset;
+ } else {
+ ret = bs->drv->bdrv_co_block_status(bs, want_zero, aligned_offset,
+ aligned_bytes, pnum, &local_map,
+ &local_file);
+
+ /*
+ * Note that checking QLIST_EMPTY(&bs->children) is also done when
+ * the cache is queried above. Technically, we do not need to check
+ * it here; the worst that can happen is that we fill the cache for
+ * non-protocol nodes, and then it is never used. However, filling
+ * the cache requires an RCU update, so double check here to avoid
+ * such an update if possible.
+ */
+ if (ret == (BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID) &&
+ QLIST_EMPTY(&bs->children))
+ {
+ /*
+ * When a protocol driver reports BLOCK_OFFSET_VALID, the
+ * returned local_map value must be the same as the offset we
+ * have passed (aligned_offset), and local_bs must be the node
+ * itself.
+ * Assert this, because we follow this rule when reading from
+ * the cache (see the `local_file = bs` and
+ * `local_map = aligned_offset` assignments above), and the
+ * result the cache delivers must be the same as the driver
+ * would deliver.
+ */
+ assert(local_file == bs);
+ assert(local_map == aligned_offset);
+ bdrv_bsc_fill(bs, aligned_offset, *pnum);
+ }
+ }
} else {
/* Default code for filters */
@@ -3002,6 +3061,9 @@ int coroutine_fn bdrv_co_pdiscard(BdrvChild *child, int64_t offset,
return 0;
}
+ /* Invalidate the cached block-status data range if this discard overlaps */
+ bdrv_bsc_invalidate_range(bs, offset, bytes);
+
/* Discard is advisory, but some devices track and coalesce
* unaligned requests, so we must pass everything down rather than
* round here. Still, most devices will just silently ignore
diff --git a/block/iscsi.c b/block/iscsi.c
index 4d2a416ce7..852384086b 100644
--- a/block/iscsi.c
+++ b/block/iscsi.c
@@ -781,9 +781,6 @@ retry:
iscsi_allocmap_set_allocated(iscsilun, offset, *pnum);
}
- if (*pnum > bytes) {
- *pnum = bytes;
- }
out_unlock:
qemu_mutex_unlock(&iscsilun->mutex);
g_free(iTask.err_str);
diff --git a/block/mirror.c b/block/mirror.c
index 98fc66eabf..85b781bc21 100644
--- a/block/mirror.c
+++ b/block/mirror.c
@@ -160,18 +160,25 @@ static void coroutine_fn mirror_wait_on_conflicts(MirrorOp *self,
if (ranges_overlap(self_start_chunk, self_nb_chunks,
op_start_chunk, op_nb_chunks))
{
- /*
- * If the operation is already (indirectly) waiting for us, or
- * will wait for us as soon as it wakes up, then just go on
- * (instead of producing a deadlock in the former case).
- */
- if (op->waiting_for_op) {
- continue;
+ if (self) {
+ /*
+ * If the operation is already (indirectly) waiting for us,
+ * or will wait for us as soon as it wakes up, then just go
+ * on (instead of producing a deadlock in the former case).
+ */
+ if (op->waiting_for_op) {
+ continue;
+ }
+
+ self->waiting_for_op = op;
}
- self->waiting_for_op = op;
qemu_co_queue_wait(&op->waiting_requests, NULL);
- self->waiting_for_op = NULL;
+
+ if (self) {
+ self->waiting_for_op = NULL;
+ }
+
break;
}
}
diff --git a/block/qcow2-cluster.c b/block/qcow2-cluster.c
index bd0597842f..4ebb49a087 100644
--- a/block/qcow2-cluster.c
+++ b/block/qcow2-cluster.c
@@ -556,8 +556,7 @@ static int coroutine_fn do_perform_cow_write(BlockDriverState *bs,
* offset needs to be aligned to a cluster boundary.
*
* If the cluster is unallocated then *host_offset will be 0.
- * If the cluster is compressed then *host_offset will contain the
- * complete compressed cluster descriptor.
+ * If the cluster is compressed then *host_offset will contain the l2 entry.
*
* On entry, *bytes is the maximum number of contiguous bytes starting at
* offset that we are interested in.
@@ -660,7 +659,7 @@ int qcow2_get_host_offset(BlockDriverState *bs, uint64_t offset,
ret = -EIO;
goto fail;
}
- *host_offset = l2_entry & L2E_COMPRESSED_OFFSET_SIZE_MASK;
+ *host_offset = l2_entry;
break;
case QCOW2_SUBCLUSTER_ZERO_PLAIN:
case QCOW2_SUBCLUSTER_UNALLOCATED_PLAIN:
@@ -1400,29 +1399,47 @@ static int handle_dependencies(BlockDriverState *bs, uint64_t guest_offset,
if (end <= old_start || start >= old_end) {
/* No intersection */
+ continue;
+ }
+
+ if (old_alloc->keep_old_clusters &&
+ (end <= l2meta_cow_start(old_alloc) ||
+ start >= l2meta_cow_end(old_alloc)))
+ {
+ /*
+ * Clusters intersect but COW areas don't. And cluster itself is
+ * already allocated. So, there is no actual conflict.
+ */
+ continue;
+ }
+
+ /* Conflict */
+
+ if (start < old_start) {
+ /* Stop at the start of a running allocation */
+ bytes = old_start - start;
} else {
- if (start < old_start) {
- /* Stop at the start of a running allocation */
- bytes = old_start - start;
- } else {
- bytes = 0;
- }
+ bytes = 0;
+ }
- /* Stop if already an l2meta exists. After yielding, it wouldn't
- * be valid any more, so we'd have to clean up the old L2Metas
- * and deal with requests depending on them before starting to
- * gather new ones. Not worth the trouble. */
- if (bytes == 0 && *m) {
- *cur_bytes = 0;
- return 0;
- }
+ /*
+ * Stop if an l2meta already exists. After yielding, it wouldn't
+ * be valid any more, so we'd have to clean up the old L2Metas
+ * and deal with requests depending on them before starting to
+ * gather new ones. Not worth the trouble.
+ */
+ if (bytes == 0 && *m) {
+ *cur_bytes = 0;
+ return 0;
+ }
- if (bytes == 0) {
- /* Wait for the dependency to complete. We need to recheck
- * the free/allocated clusters when we continue. */
- qemu_co_queue_wait(&old_alloc->dependent_requests, &s->lock);
- return -EAGAIN;
- }
+ if (bytes == 0) {
+ /*
+ * Wait for the dependency to complete. We need to recheck
+ * the free/allocated clusters when we continue.
+ */
+ qemu_co_queue_wait(&old_alloc->dependent_requests, &s->lock);
+ return -EAGAIN;
}
}
@@ -2463,3 +2480,18 @@ fail:
g_free(l1_table);
return ret;
}
+
+void qcow2_parse_compressed_l2_entry(BlockDriverState *bs, uint64_t l2_entry,
+ uint64_t *coffset, int *csize)
+{
+ BDRVQcow2State *s = bs->opaque;
+ int nb_csectors;
+
+ assert(qcow2_get_cluster_type(bs, l2_entry) == QCOW2_CLUSTER_COMPRESSED);
+
+ *coffset = l2_entry & s->cluster_offset_mask;
+
+ nb_csectors = ((l2_entry >> s->csize_shift) & s->csize_mask) + 1;
+ *csize = nb_csectors * QCOW2_COMPRESSED_SECTOR_SIZE -
+ (*coffset & (QCOW2_COMPRESSED_SECTOR_SIZE - 1));
+}
diff --git a/block/qcow2-refcount.c b/block/qcow2-refcount.c
index 8e649b008e..4614572252 100644
--- a/block/qcow2-refcount.c
+++ b/block/qcow2-refcount.c
@@ -1177,11 +1177,11 @@ void qcow2_free_any_cluster(BlockDriverState *bs, uint64_t l2_entry,
switch (ctype) {
case QCOW2_CLUSTER_COMPRESSED:
{
- int64_t offset = (l2_entry & s->cluster_offset_mask)
- & QCOW2_COMPRESSED_SECTOR_MASK;
- int size = QCOW2_COMPRESSED_SECTOR_SIZE *
- (((l2_entry >> s->csize_shift) & s->csize_mask) + 1);
- qcow2_free_clusters(bs, offset, size, type);
+ uint64_t coffset;
+ int csize;
+
+ qcow2_parse_compressed_l2_entry(bs, l2_entry, &coffset, &csize);
+ qcow2_free_clusters(bs, coffset, csize, type);
}
break;
case QCOW2_CLUSTER_NORMAL:
@@ -1247,7 +1247,7 @@ int qcow2_update_snapshot_refcount(BlockDriverState *bs,
bool l1_allocated = false;
int64_t old_entry, old_l2_offset;
unsigned slice, slice_size2, n_slices;
- int i, j, l1_modified = 0, nb_csectors;
+ int i, j, l1_modified = 0;
int ret;
assert(addend >= -1 && addend <= 1);
@@ -1318,14 +1318,14 @@ int qcow2_update_snapshot_refcount(BlockDriverState *bs,
switch (qcow2_get_cluster_type(bs, entry)) {
case QCOW2_CLUSTER_COMPRESSED:
- nb_csectors = ((entry >> s->csize_shift) &
- s->csize_mask) + 1;
if (addend != 0) {
- uint64_t coffset = (entry & s->cluster_offset_mask)
- & QCOW2_COMPRESSED_SECTOR_MASK;
+ uint64_t coffset;
+ int csize;
+
+ qcow2_parse_compressed_l2_entry(bs, entry,
+ &coffset, &csize);
ret = update_refcount(
- bs, coffset,
- nb_csectors * QCOW2_COMPRESSED_SECTOR_SIZE,
+ bs, coffset, csize,
abs(addend), addend < 0,
QCOW2_DISCARD_SNAPSHOT);
if (ret < 0) {
@@ -1588,6 +1588,66 @@ enum {
};
/*
+ * Fix L2 entry by making it QCOW2_CLUSTER_ZERO_PLAIN (or making all its present
+ * subclusters QCOW2_SUBCLUSTER_ZERO_PLAIN).
+ *
+ * This function decrements res->corruptions on success, so the caller is
+ * responsible to increment res->corruptions prior to the call.
+ *
+ * On failure in-memory @l2_table may be modified.
+ */
+static int fix_l2_entry_by_zero(BlockDriverState *bs, BdrvCheckResult *res,
+ uint64_t l2_offset,
+ uint64_t *l2_table, int l2_index, bool active,
+ bool *metadata_overlap)
+{
+ BDRVQcow2State *s = bs->opaque;
+ int ret;
+ int idx = l2_index * (l2_entry_size(s) / sizeof(uint64_t));
+ uint64_t l2e_offset = l2_offset + (uint64_t)l2_index * l2_entry_size(s);
+ int ign = active ? QCOW2_OL_ACTIVE_L2 : QCOW2_OL_INACTIVE_L2;
+
+ if (has_subclusters(s)) {
+ uint64_t l2_bitmap = get_l2_bitmap(s, l2_table, l2_index);
+
+ /* Allocated subclusters become zero */
+ l2_bitmap |= l2_bitmap << 32;
+ l2_bitmap &= QCOW_L2_BITMAP_ALL_ZEROES;
+
+ set_l2_bitmap(s, l2_table, l2_index, l2_bitmap);
+ set_l2_entry(s, l2_table, l2_index, 0);
+ } else {
+ set_l2_entry(s, l2_table, l2_index, QCOW_OFLAG_ZERO);
+ }
+
+ ret = qcow2_pre_write_overlap_check(bs, ign, l2e_offset, l2_entry_size(s),
+ false);
+ if (metadata_overlap) {
+ *metadata_overlap = ret < 0;
+ }
+ if (ret < 0) {
+ fprintf(stderr, "ERROR: Overlap check failed\n");
+ goto fail;
+ }
+
+ ret = bdrv_pwrite_sync(bs->file, l2e_offset, &l2_table[idx],
+ l2_entry_size(s));
+ if (ret < 0) {
+ fprintf(stderr, "ERROR: Failed to overwrite L2 "
+ "table entry: %s\n", strerror(-ret));
+ goto fail;
+ }
+
+ res->corruptions--;
+ res->corruptions_fixed++;
+ return 0;
+
+fail:
+ res->check_errors++;
+ return ret;
+}
+
+/*
* Increases the refcount in the given refcount table for the all clusters
* referenced in the L2 table. While doing so, performs some checks on L2
* entries.
@@ -1601,26 +1661,41 @@ static int check_refcounts_l2(BlockDriverState *bs, BdrvCheckResult *res,
int flags, BdrvCheckMode fix, bool active)
{
BDRVQcow2State *s = bs->opaque;
- uint64_t *l2_table, l2_entry;
+ uint64_t l2_entry, l2_bitmap;
uint64_t next_contiguous_offset = 0;
- int i, l2_size, nb_csectors, ret;
+ int i, ret;
+ size_t l2_size_bytes = s->l2_size * l2_entry_size(s);
+ g_autofree uint64_t *l2_table = g_malloc(l2_size_bytes);
+ bool metadata_overlap;
/* Read L2 table from disk */
- l2_size = s->l2_size * l2_entry_size(s);
- l2_table = g_malloc(l2_size);
-
- ret = bdrv_pread(bs->file, l2_offset, l2_table, l2_size);
+ ret = bdrv_pread(bs->file, l2_offset, l2_table, l2_size_bytes);
if (ret < 0) {
fprintf(stderr, "ERROR: I/O error in check_refcounts_l2\n");
res->check_errors++;
- goto fail;
+ return ret;
}
/* Do the actual checks */
- for(i = 0; i < s->l2_size; i++) {
+ for (i = 0; i < s->l2_size; i++) {
+ uint64_t coffset;
+ int csize;
+ QCow2ClusterType type;
+
l2_entry = get_l2_entry(s, l2_table, i);
+ l2_bitmap = get_l2_bitmap(s, l2_table, i);
+ type = qcow2_get_cluster_type(bs, l2_entry);
+
+ if (type != QCOW2_CLUSTER_COMPRESSED) {
+ /* Check reserved bits of Standard Cluster Descriptor */
+ if (l2_entry & L2E_STD_RESERVED_MASK) {
+ fprintf(stderr, "ERROR found l2 entry with reserved bits set: "
+ "%" PRIx64 "\n", l2_entry);
+ res->corruptions++;
+ }
+ }
- switch (qcow2_get_cluster_type(bs, l2_entry)) {
+ switch (type) {
case QCOW2_CLUSTER_COMPRESSED:
/* Compressed clusters don't have QCOW_OFLAG_COPIED */
if (l2_entry & QCOW_OFLAG_COPIED) {
@@ -1638,23 +1713,28 @@ static int check_refcounts_l2(BlockDriverState *bs, BdrvCheckResult *res,
break;
}
+ if (l2_bitmap) {
+ fprintf(stderr, "ERROR compressed cluster %d with non-zero "
+ "subcluster allocation bitmap, entry=0x%" PRIx64 "\n",
+ i, l2_entry);
+ res->corruptions++;
+ break;
+ }
+
/* Mark cluster as used */
- nb_csectors = ((l2_entry >> s->csize_shift) &
- s->csize_mask) + 1;
- l2_entry &= s->cluster_offset_mask;
+ qcow2_parse_compressed_l2_entry(bs, l2_entry, &coffset, &csize);
ret = qcow2_inc_refcounts_imrt(
- bs, res, refcount_table, refcount_table_size,
- l2_entry & QCOW2_COMPRESSED_SECTOR_MASK,
- nb_csectors * QCOW2_COMPRESSED_SECTOR_SIZE);
+ bs, res, refcount_table, refcount_table_size, coffset, csize);
if (ret < 0) {
- goto fail;
+ return ret;
}
if (flags & CHECK_FRAG_INFO) {
res->bfi.allocated_clusters++;
res->bfi.compressed_clusters++;
- /* Compressed clusters are fragmented by nature. Since they
+ /*
+ * Compressed clusters are fragmented by nature. Since they
* take up sub-sector space but we only have sector granularity
* I/O we need to re-read the same sectors even for adjacent
* compressed clusters.
@@ -1668,13 +1748,19 @@ static int check_refcounts_l2(BlockDriverState *bs, BdrvCheckResult *res,
{
uint64_t offset = l2_entry & L2E_OFFSET_MASK;
+ if ((l2_bitmap >> 32) & l2_bitmap) {
+ res->corruptions++;
+ fprintf(stderr, "ERROR offset=%" PRIx64 ": Allocated "
+ "cluster has corrupted subcluster allocation bitmap\n",
+ offset);
+ }
+
/* Correct offsets are cluster aligned */
if (offset_into_cluster(s, offset)) {
bool contains_data;
res->corruptions++;
if (has_subclusters(s)) {
- uint64_t l2_bitmap = get_l2_bitmap(s, l2_table, i);
contains_data = (l2_bitmap & QCOW_L2_BITMAP_ALL_ALLOC);
} else {
contains_data = !(l2_entry & QCOW_OFLAG_ZERO);
@@ -1687,40 +1773,30 @@ static int check_refcounts_l2(BlockDriverState *bs, BdrvCheckResult *res,
fix & BDRV_FIX_ERRORS ? "Repairing" : "ERROR",
offset);
if (fix & BDRV_FIX_ERRORS) {
- int idx = i * (l2_entry_size(s) / sizeof(uint64_t));
- uint64_t l2e_offset =
- l2_offset + (uint64_t)i * l2_entry_size(s);
- int ign = active ? QCOW2_OL_ACTIVE_L2 :
- QCOW2_OL_INACTIVE_L2;
-
- l2_entry = has_subclusters(s) ? 0 : QCOW_OFLAG_ZERO;
- set_l2_entry(s, l2_table, i, l2_entry);
- ret = qcow2_pre_write_overlap_check(bs, ign,
- l2e_offset, l2_entry_size(s), false);
- if (ret < 0) {
- fprintf(stderr, "ERROR: Overlap check failed\n");
- res->check_errors++;
- /* Something is seriously wrong, so abort checking
- * this L2 table */
- goto fail;
+ ret = fix_l2_entry_by_zero(bs, res, l2_offset,
+ l2_table, i, active,
+ &metadata_overlap);
+ if (metadata_overlap) {
+ /*
+ * Something is seriously wrong, so abort checking
+ * this L2 table.
+ */
+ return ret;
}
- ret = bdrv_pwrite_sync(bs->file, l2e_offset,
- &l2_table[idx],
- l2_entry_size(s));
- if (ret < 0) {
- fprintf(stderr, "ERROR: Failed to overwrite L2 "
- "table entry: %s\n", strerror(-ret));
- res->check_errors++;
- /* Do not abort, continue checking the rest of this
- * L2 table's entries */
- } else {
- res->corruptions--;
- res->corruptions_fixed++;
- /* Skip marking the cluster as used
- * (it is unused now) */
+ if (ret == 0) {
+ /*
+ * Skip marking the cluster as used
+ * (it is unused now).
+ */
continue;
}
+
+ /*
+ * Failed to fix.
+ * Do not abort, continue checking the rest of this
+ * L2 table's entries.
+ */
}
} else {
fprintf(stderr, "ERROR offset=%" PRIx64 ": Data cluster is "
@@ -1743,14 +1819,23 @@ static int check_refcounts_l2(BlockDriverState *bs, BdrvCheckResult *res,
refcount_table_size,
offset, s->cluster_size);
if (ret < 0) {
- goto fail;
+ return ret;
}
}
break;
}
case QCOW2_CLUSTER_ZERO_PLAIN:
+ /* Impossible when image has subclusters */
+ assert(!l2_bitmap);
+ break;
+
case QCOW2_CLUSTER_UNALLOCATED:
+ if (l2_bitmap & QCOW_L2_BITMAP_ALL_ALLOC) {
+ res->corruptions++;
+ fprintf(stderr, "ERROR: Unallocated "
+ "cluster has non-zero subcluster allocation map\n");
+ }
break;
default:
@@ -1758,12 +1843,7 @@ static int check_refcounts_l2(BlockDriverState *bs, BdrvCheckResult *res,
}
}
- g_free(l2_table);
return 0;
-
-fail:
- g_free(l2_table);
- return ret;
}
/*
@@ -1782,71 +1862,79 @@ static int check_refcounts_l1(BlockDriverState *bs,
int flags, BdrvCheckMode fix, bool active)
{
BDRVQcow2State *s = bs->opaque;
- uint64_t *l1_table = NULL, l2_offset, l1_size2;
+ size_t l1_size_bytes = l1_size * L1E_SIZE;
+ g_autofree uint64_t *l1_table = NULL;
+ uint64_t l2_offset;
int i, ret;
- l1_size2 = l1_size * L1E_SIZE;
+ if (!l1_size) {
+ return 0;
+ }
/* Mark L1 table as used */
ret = qcow2_inc_refcounts_imrt(bs, res, refcount_table, refcount_table_size,
- l1_table_offset, l1_size2);
+ l1_table_offset, l1_size_bytes);
if (ret < 0) {
- goto fail;
+ return ret;
+ }
+
+ l1_table = g_try_malloc(l1_size_bytes);
+ if (l1_table == NULL) {
+ res->check_errors++;
+ return -ENOMEM;
}
/* Read L1 table entries from disk */
- if (l1_size2 > 0) {
- l1_table = g_try_malloc(l1_size2);
- if (l1_table == NULL) {
- ret = -ENOMEM;
- res->check_errors++;
- goto fail;
- }
- ret = bdrv_pread(bs->file, l1_table_offset, l1_table, l1_size2);
- if (ret < 0) {
- fprintf(stderr, "ERROR: I/O error in check_refcounts_l1\n");
- res->check_errors++;
- goto fail;
- }
- for(i = 0;i < l1_size; i++)
- be64_to_cpus(&l1_table[i]);
+ ret = bdrv_pread(bs->file, l1_table_offset, l1_table, l1_size_bytes);
+ if (ret < 0) {
+ fprintf(stderr, "ERROR: I/O error in check_refcounts_l1\n");
+ res->check_errors++;
+ return ret;
+ }
+
+ for (i = 0; i < l1_size; i++) {
+ be64_to_cpus(&l1_table[i]);
}
/* Do the actual checks */
- for(i = 0; i < l1_size; i++) {
- l2_offset = l1_table[i];
- if (l2_offset) {
- /* Mark L2 table as used */
- l2_offset &= L1E_OFFSET_MASK;
- ret = qcow2_inc_refcounts_imrt(bs, res,
- refcount_table, refcount_table_size,
- l2_offset, s->cluster_size);
- if (ret < 0) {
- goto fail;
- }
+ for (i = 0; i < l1_size; i++) {
+ if (!l1_table[i]) {
+ continue;
+ }
- /* L2 tables are cluster aligned */
- if (offset_into_cluster(s, l2_offset)) {
- fprintf(stderr, "ERROR l2_offset=%" PRIx64 ": Table is not "
- "cluster aligned; L1 entry corrupted\n", l2_offset);
- res->corruptions++;
- }
+ if (l1_table[i] & L1E_RESERVED_MASK) {
+ fprintf(stderr, "ERROR found L1 entry with reserved bits set: "
+ "%" PRIx64 "\n", l1_table[i]);
+ res->corruptions++;
+ }
- /* Process and check L2 entries */
- ret = check_refcounts_l2(bs, res, refcount_table,
- refcount_table_size, l2_offset, flags,
- fix, active);
- if (ret < 0) {
- goto fail;
- }
+ l2_offset = l1_table[i] & L1E_OFFSET_MASK;
+
+ /* Mark L2 table as used */
+ ret = qcow2_inc_refcounts_imrt(bs, res,
+ refcount_table, refcount_table_size,
+ l2_offset, s->cluster_size);
+ if (ret < 0) {
+ return ret;
+ }
+
+ /* L2 tables are cluster aligned */
+ if (offset_into_cluster(s, l2_offset)) {
+ fprintf(stderr, "ERROR l2_offset=%" PRIx64 ": Table is not "
+ "cluster aligned; L1 entry corrupted\n", l2_offset);
+ res->corruptions++;
+ }
+
+ /* Process and check L2 entries */
+ ret = check_refcounts_l2(bs, res, refcount_table,
+ refcount_table_size, l2_offset, flags,
+ fix, active);
+ if (ret < 0) {
+ return ret;
}
}
- g_free(l1_table);
- return 0;
-fail:
- g_free(l1_table);
- return ret;
+ return 0;
}
/*
@@ -2001,9 +2089,17 @@ static int check_refblocks(BlockDriverState *bs, BdrvCheckResult *res,
for(i = 0; i < s->refcount_table_size; i++) {
uint64_t offset, cluster;
- offset = s->refcount_table[i];
+ offset = s->refcount_table[i] & REFT_OFFSET_MASK;
cluster = offset >> s->cluster_bits;
+ if (s->refcount_table[i] & REFT_RESERVED_MASK) {
+ fprintf(stderr, "ERROR refcount table entry %" PRId64 " has "
+ "reserved bits set\n", i);
+ res->corruptions++;
+ *rebuild = true;
+ continue;
+ }
+
/* Refcount blocks are cluster aligned */
if (offset_into_cluster(s, offset)) {
fprintf(stderr, "ERROR refcount block %" PRId64 " is not "
diff --git a/block/qcow2.c b/block/qcow2.c
index 9f1b6461c8..02f9f3e636 100644
--- a/block/qcow2.c
+++ b/block/qcow2.c
@@ -74,7 +74,7 @@ typedef struct {
static int coroutine_fn
qcow2_co_preadv_compressed(BlockDriverState *bs,
- uint64_t cluster_descriptor,
+ uint64_t l2_entry,
uint64_t offset,
uint64_t bytes,
QEMUIOVector *qiov,
@@ -2205,7 +2205,7 @@ typedef struct Qcow2AioTask {
BlockDriverState *bs;
QCow2SubclusterType subcluster_type; /* only for read */
- uint64_t host_offset; /* or full descriptor in compressed clusters */
+ uint64_t host_offset; /* or l2_entry for compressed read */
uint64_t offset;
uint64_t bytes;
QEMUIOVector *qiov;
@@ -4693,22 +4693,19 @@ qcow2_co_pwritev_compressed_part(BlockDriverState *bs,
static int coroutine_fn
qcow2_co_preadv_compressed(BlockDriverState *bs,
- uint64_t cluster_descriptor,
+ uint64_t l2_entry,
uint64_t offset,
uint64_t bytes,
QEMUIOVector *qiov,
size_t qiov_offset)
{
BDRVQcow2State *s = bs->opaque;
- int ret = 0, csize, nb_csectors;
+ int ret = 0, csize;
uint64_t coffset;
uint8_t *buf, *out_buf;
int offset_in_cluster = offset_into_cluster(s, offset);
- coffset = cluster_descriptor & s->cluster_offset_mask;
- nb_csectors = ((cluster_descriptor >> s->csize_shift) & s->csize_mask) + 1;
- csize = nb_csectors * QCOW2_COMPRESSED_SECTOR_SIZE -
- (coffset & ~QCOW2_COMPRESSED_SECTOR_MASK);
+ qcow2_parse_compressed_l2_entry(bs, l2_entry, &coffset, &csize);
buf = g_try_malloc(csize);
if (!buf) {
diff --git a/block/qcow2.h b/block/qcow2.h
index 0fe5f74ed3..fd48a89d45 100644
--- a/block/qcow2.h
+++ b/block/qcow2.h
@@ -110,7 +110,6 @@
/* Defined in the qcow2 spec (compressed cluster descriptor) */
#define QCOW2_COMPRESSED_SECTOR_SIZE 512U
-#define QCOW2_COMPRESSED_SECTOR_MASK (~(QCOW2_COMPRESSED_SECTOR_SIZE - 1ULL))
/* Must be at least 2 to cover COW */
#define MIN_L2_CACHE_SIZE 2 /* cache entries */
@@ -587,10 +586,12 @@ typedef enum QCow2MetadataOverlap {
(QCOW2_OL_CACHED | QCOW2_OL_INACTIVE_L2)
#define L1E_OFFSET_MASK 0x00fffffffffffe00ULL
+#define L1E_RESERVED_MASK 0x7f000000000001ffULL
#define L2E_OFFSET_MASK 0x00fffffffffffe00ULL
-#define L2E_COMPRESSED_OFFSET_SIZE_MASK 0x3fffffffffffffffULL
+#define L2E_STD_RESERVED_MASK 0x3f000000000001feULL
#define REFT_OFFSET_MASK 0xfffffffffffffe00ULL
+#define REFT_RESERVED_MASK 0x1ffULL
#define INV_OFFSET (-1ULL)
@@ -914,6 +915,8 @@ int qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs,
uint64_t offset,
int compressed_size,
uint64_t *host_offset);
+void qcow2_parse_compressed_l2_entry(BlockDriverState *bs, uint64_t l2_entry,
+ uint64_t *coffset, int *csize);
int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m);
void qcow2_alloc_cluster_abort(BlockDriverState *bs, QCowL2Meta *m);
diff --git a/bsd-user/i386/target_arch_cpu.c b/bsd-user/i386/target_arch_cpu.c
index 71998e5ba5..d349e45299 100644
--- a/bsd-user/i386/target_arch_cpu.c
+++ b/bsd-user/i386/target_arch_cpu.c
@@ -33,11 +33,6 @@ uint64_t cpu_get_tsc(CPUX86State *env)
return cpu_get_host_ticks();
}
-int cpu_get_pic_interrupt(CPUX86State *env)
-{
- return -1;
-}
-
void bsd_i386_write_dt(void *ptr, unsigned long addr, unsigned long limit,
int flags)
{
diff --git a/bsd-user/x86_64/target_arch_cpu.c b/bsd-user/x86_64/target_arch_cpu.c
index db822e54c6..be7bd10720 100644
--- a/bsd-user/x86_64/target_arch_cpu.c
+++ b/bsd-user/x86_64/target_arch_cpu.c
@@ -33,11 +33,6 @@ uint64_t cpu_get_tsc(CPUX86State *env)
return cpu_get_host_ticks();
}
-int cpu_get_pic_interrupt(CPUX86State *env)
-{
- return -1;
-}
-
void bsd_x86_64_write_dt(void *ptr, unsigned long addr,
unsigned long limit, int flags)
{
diff --git a/docs/tools/qemu-img.rst b/docs/tools/qemu-img.rst
index fe6c30d509..d58980aef8 100644
--- a/docs/tools/qemu-img.rst
+++ b/docs/tools/qemu-img.rst
@@ -415,7 +415,7 @@ Command description:
4
Error on reading data
-.. option:: convert [--object OBJECTDEF] [--image-opts] [--target-image-opts] [--target-is-zero] [--bitmaps [--skip-broken-bitmaps]] [-U] [-C] [-c] [-p] [-q] [-n] [-f FMT] [-t CACHE] [-T SRC_CACHE] [-O OUTPUT_FMT] [-B BACKING_FILE] [-o OPTIONS] [-l SNAPSHOT_PARAM] [-S SPARSE_SIZE] [-r RATE_LIMIT] [-m NUM_COROUTINES] [-W] FILENAME [FILENAME2 [...]] OUTPUT_FILENAME
+.. option:: convert [--object OBJECTDEF] [--image-opts] [--target-image-opts] [--target-is-zero] [--bitmaps [--skip-broken-bitmaps]] [-U] [-C] [-c] [-p] [-q] [-n] [-f FMT] [-t CACHE] [-T SRC_CACHE] [-O OUTPUT_FMT] [-B BACKING_FILE [-F backing_fmt]] [-o OPTIONS] [-l SNAPSHOT_PARAM] [-S SPARSE_SIZE] [-r RATE_LIMIT] [-m NUM_COROUTINES] [-W] FILENAME [FILENAME2 [...]] OUTPUT_FILENAME
Convert the disk image *FILENAME* or a snapshot *SNAPSHOT_PARAM*
to disk image *OUTPUT_FILENAME* using format *OUTPUT_FMT*. It can
@@ -439,7 +439,7 @@ Command description:
You can use the *BACKING_FILE* option to force the output image to be
created as a copy on write image of the specified base image; the
*BACKING_FILE* should have the same content as the input's base image,
- however the path, image format, etc may differ.
+ however the path, image format (as given by *BACKING_FMT*), etc may differ.
If a relative path name is given, the backing file is looked up relative to
the directory containing *OUTPUT_FILENAME*.
diff --git a/include/block/block_int.h b/include/block/block_int.h
index f1a54db0f8..5451f89b8d 100644
--- a/include/block/block_int.h
+++ b/include/block/block_int.h
@@ -34,6 +34,7 @@
#include "qemu/hbitmap.h"
#include "block/snapshot.h"
#include "qemu/throttle.h"
+#include "qemu/rcu.h"
#define BLOCK_FLAG_LAZY_REFCOUNTS 8
@@ -347,6 +348,15 @@ struct BlockDriver {
* clamped to bdrv_getlength() and aligned to request_alignment,
* as well as non-NULL pnum, map, and file; in turn, the driver
* must return an error or set pnum to an aligned non-zero value.
+ *
+ * Note that @bytes is just a hint on how big of a region the
+ * caller wants to inspect. It is not a limit on *pnum.
+ * Implementations are free to return larger values of *pnum if
+ * doing so does not incur a performance penalty.
+ *
+ * block/io.c's bdrv_co_block_status() will utilize an unclamped
+ * *pnum value for the block-status cache on protocol nodes, prior
+ * to clamping *pnum for return to its caller.
*/
int coroutine_fn (*bdrv_co_block_status)(BlockDriverState *bs,
bool want_zero, int64_t offset, int64_t bytes, int64_t *pnum,
@@ -840,11 +850,23 @@ struct BdrvChild {
};
/*
- * Note: the function bdrv_append() copies and swaps contents of
- * BlockDriverStates, so if you add new fields to this struct, please
- * inspect bdrv_append() to determine if the new fields need to be
- * copied as well.
+ * Allows bdrv_co_block_status() to cache one data region for a
+ * protocol node.
+ *
+ * @valid: Whether the cache is valid (should be accessed with atomic
+ * functions so this can be reset by RCU readers)
+ * @data_start: Offset where we know (or strongly assume) is data
+ * @data_end: Offset where the data region ends (which is not necessarily
+ * the start of a zeroed region)
*/
+typedef struct BdrvBlockStatusCache {
+ struct rcu_head rcu;
+
+ bool valid;
+ int64_t data_start;
+ int64_t data_end;
+} BdrvBlockStatusCache;
+
struct BlockDriverState {
/* Protected by big QEMU lock or read-only after opening. No special
* locking needed during I/O...
@@ -1010,6 +1032,11 @@ struct BlockDriverState {
/* BdrvChild links to this node may never be frozen */
bool never_freeze;
+
+ /* Lock for block-status cache RCU writers */
+ CoMutex bsc_modify_lock;
+ /* Always non-NULL, but must only be dereferenced under an RCU read guard */
+ BdrvBlockStatusCache *block_status_cache;
};
struct BlockBackendRootState {
@@ -1435,4 +1462,30 @@ static inline BlockDriverState *bdrv_primary_bs(BlockDriverState *bs)
*/
void bdrv_drain_all_end_quiesce(BlockDriverState *bs);
+/**
+ * Check whether the given offset is in the cached block-status data
+ * region.
+ *
+ * If it is, and @pnum is not NULL, *pnum is set to
+ * `bsc.data_end - offset`, i.e. how many bytes, starting from
+ * @offset, are data (according to the cache).
+ * Otherwise, *pnum is not touched.
+ */
+bool bdrv_bsc_is_data(BlockDriverState *bs, int64_t offset, int64_t *pnum);
+
+/**
+ * If [offset, offset + bytes) overlaps with the currently cached
+ * block-status region, invalidate the cache.
+ *
+ * (To be used by I/O paths that cause data regions to be zero or
+ * holes.)
+ */
+void bdrv_bsc_invalidate_range(BlockDriverState *bs,
+ int64_t offset, int64_t bytes);
+
+/**
+ * Mark the range [offset, offset + bytes) as a data region.
+ */
+void bdrv_bsc_fill(BlockDriverState *bs, int64_t offset, int64_t bytes);
+
#endif /* BLOCK_INT_H */
diff --git a/include/exec/translate-all.h b/include/exec/translate-all.h
index a557b4e2bb..9f646389af 100644
--- a/include/exec/translate-all.h
+++ b/include/exec/translate-all.h
@@ -33,6 +33,7 @@ void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end);
void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr);
#ifdef CONFIG_USER_ONLY
+void page_protect(tb_page_addr_t page_addr);
int page_unprotect(target_ulong address, uintptr_t pc);
#endif
diff --git a/include/exec/translator.h b/include/exec/translator.h
index d318803267..9bc46eda59 100644
--- a/include/exec/translator.h
+++ b/include/exec/translator.h
@@ -23,6 +23,7 @@
#include "exec/exec-all.h"
#include "exec/cpu_ldst.h"
#include "exec/plugin-gen.h"
+#include "exec/translate-all.h"
#include "tcg/tcg.h"
@@ -74,6 +75,17 @@ typedef struct DisasContextBase {
int num_insns;
int max_insns;
bool singlestep_enabled;
+#ifdef CONFIG_USER_ONLY
+ /*
+ * Guest address of the last byte of the last protected page.
+ *
+ * Pages containing the translated instructions are made non-writable in
+ * order to achieve consistency in case another thread is modifying the
+ * code while translate_insn() fetches the instruction bytes piecemeal.
+ * Such writer threads are blocked on mmap_lock() in page_unprotect().
+ */
+ target_ulong page_protect_end;
+#endif
} DisasContextBase;
/**
@@ -156,27 +168,23 @@ bool translator_use_goto_tb(DisasContextBase *db, target_ulong dest);
*/
#define GEN_TRANSLATOR_LD(fullname, type, load_fn, swap_fn) \
- static inline type \
- fullname ## _swap(CPUArchState *env, abi_ptr pc, bool do_swap) \
+ type fullname ## _swap(CPUArchState *env, DisasContextBase *dcbase, \
+ abi_ptr pc, bool do_swap); \
+ static inline type fullname(CPUArchState *env, \
+ DisasContextBase *dcbase, abi_ptr pc) \
{ \
- type ret = load_fn(env, pc); \
- if (do_swap) { \
- ret = swap_fn(ret); \
- } \
- plugin_insn_append(&ret, sizeof(ret)); \
- return ret; \
- } \
- \
- static inline type fullname(CPUArchState *env, abi_ptr pc) \
- { \
- return fullname ## _swap(env, pc, false); \
+ return fullname ## _swap(env, dcbase, pc, false); \
}
-GEN_TRANSLATOR_LD(translator_ldub, uint8_t, cpu_ldub_code, /* no swap */)
-GEN_TRANSLATOR_LD(translator_ldsw, int16_t, cpu_ldsw_code, bswap16)
-GEN_TRANSLATOR_LD(translator_lduw, uint16_t, cpu_lduw_code, bswap16)
-GEN_TRANSLATOR_LD(translator_ldl, uint32_t, cpu_ldl_code, bswap32)
-GEN_TRANSLATOR_LD(translator_ldq, uint64_t, cpu_ldq_code, bswap64)
+#define FOR_EACH_TRANSLATOR_LD(F) \
+ F(translator_ldub, uint8_t, cpu_ldub_code, /* no swap */) \
+ F(translator_ldsw, int16_t, cpu_ldsw_code, bswap16) \
+ F(translator_lduw, uint16_t, cpu_lduw_code, bswap16) \
+ F(translator_ldl, uint32_t, cpu_ldl_code, bswap32) \
+ F(translator_ldq, uint64_t, cpu_ldq_code, bswap64)
+
+FOR_EACH_TRANSLATOR_LD(GEN_TRANSLATOR_LD)
+
#undef GEN_TRANSLATOR_LD
#endif /* EXEC__TRANSLATOR_H */
diff --git a/include/hw/core/tcg-cpu-ops.h b/include/hw/core/tcg-cpu-ops.h
index eab27d0c03..55123cb4d2 100644
--- a/include/hw/core/tcg-cpu-ops.h
+++ b/include/hw/core/tcg-cpu-ops.h
@@ -35,16 +35,6 @@ struct TCGCPUOps {
void (*cpu_exec_enter)(CPUState *cpu);
/** @cpu_exec_exit: Callback for cpu_exec cleanup */
void (*cpu_exec_exit)(CPUState *cpu);
- /** @cpu_exec_interrupt: Callback for processing interrupts in cpu_exec */
- bool (*cpu_exec_interrupt)(CPUState *cpu, int interrupt_request);
- /**
- * @do_interrupt: Callback for interrupt handling.
- *
- * note that this is in general SOFTMMU only, but it actually isn't
- * because of an x86 hack (accel/tcg/cpu-exec.c), so we cannot put it
- * in the SOFTMMU section in general.
- */
- void (*do_interrupt)(CPUState *cpu);
/**
* @tlb_fill: Handle a softmmu tlb miss or user-only address fault
*
@@ -61,7 +51,23 @@ struct TCGCPUOps {
void (*debug_excp_handler)(CPUState *cpu);
#ifdef NEED_CPU_H
+#if defined(CONFIG_USER_ONLY) && defined(TARGET_I386)
+ /**
+ * @fake_user_interrupt: Callback for 'fake exception' handling.
+ *
+ * Simulate 'fake exception' which will be handled outside the
+ * cpu execution loop (hack for x86 user mode).
+ */
+ void (*fake_user_interrupt)(CPUState *cpu);
+#else
+ /**
+ * @do_interrupt: Callback for interrupt handling.
+ */
+ void (*do_interrupt)(CPUState *cpu);
+#endif /* !CONFIG_USER_ONLY || !TARGET_I386 */
#ifdef CONFIG_SOFTMMU
+ /** @cpu_exec_interrupt: Callback for processing interrupts in cpu_exec */
+ bool (*cpu_exec_interrupt)(CPUState *cpu, int interrupt_request);
/**
* @do_transaction_failed: Callback for handling failed memory transactions
* (ie bus faults or external aborts; not MMU faults)
diff --git a/include/tcg/tcg-op.h b/include/tcg/tcg-op.h
index 2a654f350c..0545a6224c 100644
--- a/include/tcg/tcg-op.h
+++ b/include/tcg/tcg-op.h
@@ -843,7 +843,6 @@ static inline void tcg_gen_plugin_cb_end(void)
#if TARGET_LONG_BITS == 32
#define tcg_temp_new() tcg_temp_new_i32()
-#define tcg_global_reg_new tcg_global_reg_new_i32
#define tcg_global_mem_new tcg_global_mem_new_i32
#define tcg_temp_local_new() tcg_temp_local_new_i32()
#define tcg_temp_free tcg_temp_free_i32
@@ -851,7 +850,6 @@ static inline void tcg_gen_plugin_cb_end(void)
#define tcg_gen_qemu_st_tl tcg_gen_qemu_st_i32
#else
#define tcg_temp_new() tcg_temp_new_i64()
-#define tcg_global_reg_new tcg_global_reg_new_i64
#define tcg_global_mem_new tcg_global_mem_new_i64
#define tcg_temp_local_new() tcg_temp_local_new_i64()
#define tcg_temp_free tcg_temp_free_i64
diff --git a/linux-user/main.c b/linux-user/main.c
index a6094563b6..45bde4598d 100644
--- a/linux-user/main.c
+++ b/linux-user/main.c
@@ -120,13 +120,6 @@ const char *qemu_uname_release;
by remapping the process stack directly at the right place */
unsigned long guest_stack_size = 8 * 1024 * 1024UL;
-#if defined(TARGET_I386)
-int cpu_get_pic_interrupt(CPUX86State *env)
-{
- return -1;
-}
-#endif
-
/***********************************************************/
/* Helper routines for implementing atomic operations. */
diff --git a/qemu-img-cmds.hx b/qemu-img-cmds.hx
index b3620f29e5..4c4d94ab22 100644
--- a/qemu-img-cmds.hx
+++ b/qemu-img-cmds.hx
@@ -46,7 +46,7 @@ SRST
ERST
DEF("convert", img_convert,
- "convert [--object objectdef] [--image-opts] [--target-image-opts] [--target-is-zero] [--bitmaps] [-U] [-C] [-c] [-p] [-q] [-n] [-f fmt] [-t cache] [-T src_cache] [-O output_fmt] [-B backing_file] [-o options] [-l snapshot_param] [-S sparse_size] [-r rate_limit] [-m num_coroutines] [-W] [--salvage] filename [filename2 [...]] output_filename")
+ "convert [--object objectdef] [--image-opts] [--target-image-opts] [--target-is-zero] [--bitmaps] [-U] [-C] [-c] [-p] [-q] [-n] [-f fmt] [-t cache] [-T src_cache] [-O output_fmt] [-B backing_file [-F backing_fmt]] [-o options] [-l snapshot_param] [-S sparse_size] [-r rate_limit] [-m num_coroutines] [-W] [--salvage] filename [filename2 [...]] output_filename")
SRST
.. option:: convert [--object OBJECTDEF] [--image-opts] [--target-image-opts] [--target-is-zero] [--bitmaps] [-U] [-C] [-c] [-p] [-q] [-n] [-f FMT] [-t CACHE] [-T SRC_CACHE] [-O OUTPUT_FMT] [-B BACKING_FILE] [-o OPTIONS] [-l SNAPSHOT_PARAM] [-S SPARSE_SIZE] [-r RATE_LIMIT] [-m NUM_COROUTINES] [-W] [--salvage] FILENAME [FILENAME2 [...]] OUTPUT_FILENAME
ERST
diff --git a/qemu-img.c b/qemu-img.c
index d77f3e76a9..f036a1d428 100644
--- a/qemu-img.c
+++ b/qemu-img.c
@@ -2183,7 +2183,8 @@ static int img_convert(int argc, char **argv)
int c, bs_i, flags, src_flags = BDRV_O_NO_SHARE;
const char *fmt = NULL, *out_fmt = NULL, *cache = "unsafe",
*src_cache = BDRV_DEFAULT_CACHE, *out_baseimg = NULL,
- *out_filename, *out_baseimg_param, *snapshot_name = NULL;
+ *out_filename, *out_baseimg_param, *snapshot_name = NULL,
+ *backing_fmt = NULL;
BlockDriver *drv = NULL, *proto_drv = NULL;
BlockDriverInfo bdi;
BlockDriverState *out_bs;
@@ -2223,7 +2224,7 @@ static int img_convert(int argc, char **argv)
{"skip-broken-bitmaps", no_argument, 0, OPTION_SKIP_BROKEN},
{0, 0, 0, 0}
};
- c = getopt_long(argc, argv, ":hf:O:B:Cco:l:S:pt:T:qnm:WUr:",
+ c = getopt_long(argc, argv, ":hf:O:B:CcF:o:l:S:pt:T:qnm:WUr:",
long_options, NULL);
if (c == -1) {
break;
@@ -2253,6 +2254,9 @@ static int img_convert(int argc, char **argv)
case 'c':
s.compressed = true;
break;
+ case 'F':
+ backing_fmt = optarg;
+ break;
case 'o':
if (accumulate_options(&options, optarg) < 0) {
goto fail_getopt;
@@ -2521,7 +2525,7 @@ static int img_convert(int argc, char **argv)
qemu_opt_set_number(opts, BLOCK_OPT_SIZE,
s.total_sectors * BDRV_SECTOR_SIZE, &error_abort);
- ret = add_old_style_options(out_fmt, opts, out_baseimg, NULL);
+ ret = add_old_style_options(out_fmt, opts, out_baseimg, backing_fmt);
if (ret < 0) {
goto out;
}
@@ -2628,6 +2632,14 @@ static int img_convert(int argc, char **argv)
goto out;
}
+ if (flags & BDRV_O_NOCACHE) {
+ /*
+ * If we open the target with O_DIRECT, it may be necessary to
+ * extend its size to align to the physical sector size.
+ */
+ flags |= BDRV_O_RESIZE;
+ }
+
if (skip_create) {
s.target = img_open(tgt_image_opts, out_filename, out_fmt,
flags, writethrough, s.quiet, false);
diff --git a/scripts/simplebench/img_bench_templater.py b/scripts/simplebench/img_bench_templater.py
new file mode 100755
index 0000000000..f8e1540ada
--- /dev/null
+++ b/scripts/simplebench/img_bench_templater.py
@@ -0,0 +1,95 @@
+#!/usr/bin/env python3
+#
+# Process img-bench test templates
+#
+# Copyright (c) 2021 Virtuozzo International GmbH.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+
+import sys
+import subprocess
+import re
+import json
+
+import simplebench
+from results_to_text import results_to_text
+from table_templater import Templater
+
+
+def bench_func(env, case):
+ test = templater.gen(env['data'], case['data'])
+
+ p = subprocess.run(test, shell=True, stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT, universal_newlines=True)
+
+ if p.returncode == 0:
+ try:
+ m = re.search(r'Run completed in (\d+.\d+) seconds.', p.stdout)
+ return {'seconds': float(m.group(1))}
+ except Exception:
+ return {'error': f'failed to parse qemu-img output: {p.stdout}'}
+ else:
+ return {'error': f'qemu-img failed: {p.returncode}: {p.stdout}'}
+
+
+if __name__ == '__main__':
+ if len(sys.argv) > 1:
+ print("""
+Usage: img_bench_templater.py < path/to/test-template.sh
+
+This script generates performance tests from a test template (example below),
+runs them, and displays the results in a table. The template is read from
+stdin. It must be written in bash and end with a `qemu-img bench` invocation
+(whose result is parsed to get the test instance’s result).
+
+Use the following syntax in the template to create the various different test
+instances:
+
+ column templating: {var1|var2|...} - test will use different values in
+ different columns. You may use several {} constructions in the test, in this
+ case product of all choice-sets will be used.
+
+ row templating: [var1|var2|...] - similar thing to define rows (test-cases)
+
+Test template example:
+
+Assume you want to compare two qemu-img binaries, called qemu-img-old and
+qemu-img-new in your build directory in two test-cases with 4K writes and 64K
+writes. The template may look like this:
+
+qemu_img=/path/to/qemu/build/qemu-img-{old|new}
+$qemu_img create -f qcow2 /ssd/x.qcow2 1G
+$qemu_img bench -c 100 -d 8 [-s 4K|-s 64K] -w -t none -n /ssd/x.qcow2
+
+When passing this to stdin of img_bench_templater.py, the resulting comparison
+table will contain two columns (for two binaries) and two rows (for two
+test-cases).
+
+In addition to displaying the results, script also stores results in JSON
+format into results.json file in current directory.
+""")
+ sys.exit()
+
+ templater = Templater(sys.stdin.read())
+
+ envs = [{'id': ' / '.join(x), 'data': x} for x in templater.columns]
+ cases = [{'id': ' / '.join(x), 'data': x} for x in templater.rows]
+
+ result = simplebench.bench(bench_func, envs, cases, count=5,
+ initial_run=False)
+ print(results_to_text(result))
+ with open('results.json', 'w') as f:
+ json.dump(result, f, indent=4)
diff --git a/scripts/simplebench/table_templater.py b/scripts/simplebench/table_templater.py
new file mode 100644
index 0000000000..950f3b3024
--- /dev/null
+++ b/scripts/simplebench/table_templater.py
@@ -0,0 +1,62 @@
+# Parser for test templates
+#
+# Copyright (c) 2021 Virtuozzo International GmbH.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import itertools
+from lark import Lark
+
+grammar = """
+start: ( text | column_switch | row_switch )+
+
+column_switch: "{" text ["|" text]+ "}"
+row_switch: "[" text ["|" text]+ "]"
+text: /[^|{}\[\]]+/
+"""
+
+parser = Lark(grammar)
+
+class Templater:
+ def __init__(self, template):
+ self.tree = parser.parse(template)
+
+ c_switches = []
+ r_switches = []
+ for x in self.tree.children:
+ if x.data == 'column_switch':
+ c_switches.append([el.children[0].value for el in x.children])
+ elif x.data == 'row_switch':
+ r_switches.append([el.children[0].value for el in x.children])
+
+ self.columns = list(itertools.product(*c_switches))
+ self.rows = list(itertools.product(*r_switches))
+
+ def gen(self, column, row):
+ i = 0
+ j = 0
+ result = []
+
+ for x in self.tree.children:
+ if x.data == 'text':
+ result.append(x.children[0].value)
+ elif x.data == 'column_switch':
+ result.append(column[i])
+ i += 1
+ elif x.data == 'row_switch':
+ result.append(row[j])
+ j += 1
+
+ return ''.join(result)
diff --git a/target/alpha/cpu.c b/target/alpha/cpu.c
index 4871ad0c0a..93e16a2ffb 100644
--- a/target/alpha/cpu.c
+++ b/target/alpha/cpu.c
@@ -218,10 +218,10 @@ static const struct SysemuCPUOps alpha_sysemu_ops = {
static const struct TCGCPUOps alpha_tcg_ops = {
.initialize = alpha_translate_init,
- .cpu_exec_interrupt = alpha_cpu_exec_interrupt,
.tlb_fill = alpha_cpu_tlb_fill,
#ifndef CONFIG_USER_ONLY
+ .cpu_exec_interrupt = alpha_cpu_exec_interrupt,
.do_interrupt = alpha_cpu_do_interrupt,
.do_transaction_failed = alpha_cpu_do_transaction_failed,
.do_unaligned_access = alpha_cpu_do_unaligned_access,
diff --git a/target/alpha/cpu.h b/target/alpha/cpu.h
index 82df108967..4e993bd15b 100644
--- a/target/alpha/cpu.h
+++ b/target/alpha/cpu.h
@@ -274,10 +274,10 @@ struct AlphaCPU {
#ifndef CONFIG_USER_ONLY
extern const VMStateDescription vmstate_alpha_cpu;
-#endif
void alpha_cpu_do_interrupt(CPUState *cpu);
bool alpha_cpu_exec_interrupt(CPUState *cpu, int int_req);
+#endif /* !CONFIG_USER_ONLY */
void alpha_cpu_dump_state(CPUState *cs, FILE *f, int flags);
hwaddr alpha_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
int alpha_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
diff --git a/target/alpha/helper.c b/target/alpha/helper.c
index 4f56fe4d23..81550d9e2f 100644
--- a/target/alpha/helper.c
+++ b/target/alpha/helper.c
@@ -293,7 +293,6 @@ bool alpha_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
prot, mmu_idx, TARGET_PAGE_SIZE);
return true;
}
-#endif /* USER_ONLY */
void alpha_cpu_do_interrupt(CPUState *cs)
{
@@ -348,7 +347,6 @@ void alpha_cpu_do_interrupt(CPUState *cs)
cs->exception_index = -1;
-#if !defined(CONFIG_USER_ONLY)
switch (i) {
case EXCP_RESET:
i = 0x0000;
@@ -404,7 +402,6 @@ void alpha_cpu_do_interrupt(CPUState *cs)
/* Switch to PALmode. */
env->flags |= ENV_FLAG_PAL_MODE;
-#endif /* !USER_ONLY */
}
bool alpha_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
@@ -451,6 +448,8 @@ bool alpha_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
return false;
}
+#endif /* !CONFIG_USER_ONLY */
+
void alpha_cpu_dump_state(CPUState *cs, FILE *f, int flags)
{
static const char linux_reg_names[31][4] = {
diff --git a/target/alpha/translate.c b/target/alpha/translate.c
index de6c0a8439..b034206688 100644
--- a/target/alpha/translate.c
+++ b/target/alpha/translate.c
@@ -2971,7 +2971,7 @@ static void alpha_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
{
DisasContext *ctx = container_of(dcbase, DisasContext, base);
CPUAlphaState *env = cpu->env_ptr;
- uint32_t insn = translator_ldl(env, ctx->base.pc_next);
+ uint32_t insn = translator_ldl(env, &ctx->base, ctx->base.pc_next);
ctx->base.pc_next += 4;
ctx->base.is_jmp = translate_one(ctx, insn);
diff --git a/target/arm/arm_ldst.h b/target/arm/arm_ldst.h
index 057160e8da..cee0548a1c 100644
--- a/target/arm/arm_ldst.h
+++ b/target/arm/arm_ldst.h
@@ -24,15 +24,15 @@
#include "qemu/bswap.h"
/* Load an instruction and return it in the standard little-endian order */
-static inline uint32_t arm_ldl_code(CPUARMState *env, target_ulong addr,
- bool sctlr_b)
+static inline uint32_t arm_ldl_code(CPUARMState *env, DisasContextBase *s,
+ target_ulong addr, bool sctlr_b)
{
- return translator_ldl_swap(env, addr, bswap_code(sctlr_b));
+ return translator_ldl_swap(env, s, addr, bswap_code(sctlr_b));
}
/* Ditto, for a halfword (Thumb) instruction */
-static inline uint16_t arm_lduw_code(CPUARMState *env, target_ulong addr,
- bool sctlr_b)
+static inline uint16_t arm_lduw_code(CPUARMState *env, DisasContextBase* s,
+ target_ulong addr, bool sctlr_b)
{
#ifndef CONFIG_USER_ONLY
/* In big-endian (BE32) mode, adjacent Thumb instructions have been swapped
@@ -41,7 +41,7 @@ static inline uint16_t arm_lduw_code(CPUARMState *env, target_ulong addr,
addr ^= 2;
}
#endif
- return translator_lduw_swap(env, addr, bswap_code(sctlr_b));
+ return translator_lduw_swap(env, s, addr, bswap_code(sctlr_b));
}
#endif
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
index d631c4683c..ba0741b20e 100644
--- a/target/arm/cpu.c
+++ b/target/arm/cpu.c
@@ -440,6 +440,8 @@ static void arm_cpu_reset(DeviceState *dev)
arm_rebuild_hflags(env);
}
+#ifndef CONFIG_USER_ONLY
+
static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
unsigned int target_el,
unsigned int cur_el, bool secure,
@@ -556,7 +558,7 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
return unmasked || pstate_unmasked;
}
-bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
+static bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
{
CPUClass *cc = CPU_GET_CLASS(cs);
CPUARMState *env = cs->env_ptr;
@@ -608,6 +610,7 @@ bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
cc->tcg_ops->do_interrupt(cs);
return true;
}
+#endif /* !CONFIG_USER_ONLY */
void arm_cpu_update_virq(ARMCPU *cpu)
{
@@ -2010,11 +2013,11 @@ static const struct SysemuCPUOps arm_sysemu_ops = {
static const struct TCGCPUOps arm_tcg_ops = {
.initialize = arm_translate_init,
.synchronize_from_tb = arm_cpu_synchronize_from_tb,
- .cpu_exec_interrupt = arm_cpu_exec_interrupt,
.tlb_fill = arm_cpu_tlb_fill,
.debug_excp_handler = arm_debug_excp_handler,
#if !defined(CONFIG_USER_ONLY)
+ .cpu_exec_interrupt = arm_cpu_exec_interrupt,
.do_interrupt = arm_cpu_do_interrupt,
.do_transaction_failed = arm_cpu_do_transaction_failed,
.do_unaligned_access = arm_cpu_do_unaligned_access,
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
index fb0ef1ee2c..09d9027734 100644
--- a/target/arm/cpu.h
+++ b/target/arm/cpu.h
@@ -1040,11 +1040,10 @@ uint64_t arm_cpu_mp_affinity(int idx, uint8_t clustersz);
#ifndef CONFIG_USER_ONLY
extern const VMStateDescription vmstate_arm_cpu;
-#endif
void arm_cpu_do_interrupt(CPUState *cpu);
void arm_v7m_cpu_do_interrupt(CPUState *cpu);
-bool arm_cpu_exec_interrupt(CPUState *cpu, int int_req);
+#endif /* !CONFIG_USER_ONLY */
hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr,
MemTxAttrs *attrs);
diff --git a/target/arm/cpu_tcg.c b/target/arm/cpu_tcg.c
index 33cc75af57..0d5adccf1a 100644
--- a/target/arm/cpu_tcg.c
+++ b/target/arm/cpu_tcg.c
@@ -22,7 +22,7 @@
/* CPU models. These are not needed for the AArch64 linux-user build. */
#if !defined(CONFIG_USER_ONLY) || !defined(TARGET_AARCH64)
-#ifdef CONFIG_TCG
+#if !defined(CONFIG_USER_ONLY) && defined(CONFIG_TCG)
static bool arm_v7m_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
{
CPUClass *cc = CPU_GET_CLASS(cs);
@@ -46,7 +46,7 @@ static bool arm_v7m_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
}
return ret;
}
-#endif /* CONFIG_TCG */
+#endif /* !CONFIG_USER_ONLY && CONFIG_TCG */
static void arm926_initfn(Object *obj)
{
@@ -898,11 +898,11 @@ static void pxa270c5_initfn(Object *obj)
static const struct TCGCPUOps arm_v7m_tcg_ops = {
.initialize = arm_translate_init,
.synchronize_from_tb = arm_cpu_synchronize_from_tb,
- .cpu_exec_interrupt = arm_v7m_cpu_exec_interrupt,
.tlb_fill = arm_cpu_tlb_fill,
.debug_excp_handler = arm_debug_excp_handler,
#if !defined(CONFIG_USER_ONLY)
+ .cpu_exec_interrupt = arm_v7m_cpu_exec_interrupt,
.do_interrupt = arm_v7m_cpu_do_interrupt,
.do_transaction_failed = arm_cpu_do_transaction_failed,
.do_unaligned_access = arm_cpu_do_unaligned_access,
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
index 333bc836b2..ab6b346e35 100644
--- a/target/arm/translate-a64.c
+++ b/target/arm/translate-a64.c
@@ -14772,7 +14772,7 @@ static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
}
s->pc_curr = s->base.pc_next;
- insn = arm_ldl_code(env, s->base.pc_next, s->sctlr_b);
+ insn = arm_ldl_code(env, &s->base, s->base.pc_next, s->sctlr_b);
s->insn = insn;
s->base.pc_next += 4;
diff --git a/target/arm/translate.c b/target/arm/translate.c
index 435c659723..caefb1e1a1 100644
--- a/target/arm/translate.c
+++ b/target/arm/translate.c
@@ -9312,7 +9312,7 @@ static bool insn_crosses_page(CPUARMState *env, DisasContext *s)
* boundary, so we cross the page if the first 16 bits indicate
* that this is a 32 bit insn.
*/
- uint16_t insn = arm_lduw_code(env, s->base.pc_next, s->sctlr_b);
+ uint16_t insn = arm_lduw_code(env, &s->base, s->base.pc_next, s->sctlr_b);
return !thumb_insn_is_16bit(s, s->base.pc_next, insn);
}
@@ -9551,7 +9551,7 @@ static void arm_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
}
dc->pc_curr = dc->base.pc_next;
- insn = arm_ldl_code(env, dc->base.pc_next, dc->sctlr_b);
+ insn = arm_ldl_code(env, &dc->base, dc->base.pc_next, dc->sctlr_b);
dc->insn = insn;
dc->base.pc_next += 4;
disas_arm_insn(dc, insn);
@@ -9621,11 +9621,12 @@ static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
}
dc->pc_curr = dc->base.pc_next;
- insn = arm_lduw_code(env, dc->base.pc_next, dc->sctlr_b);
+ insn = arm_lduw_code(env, &dc->base, dc->base.pc_next, dc->sctlr_b);
is_16bit = thumb_insn_is_16bit(dc, dc->base.pc_next, insn);
dc->base.pc_next += 2;
if (!is_16bit) {
- uint32_t insn2 = arm_lduw_code(env, dc->base.pc_next, dc->sctlr_b);
+ uint32_t insn2 = arm_lduw_code(env, &dc->base, dc->base.pc_next,
+ dc->sctlr_b);
insn = insn << 16 | insn2;
dc->base.pc_next += 2;
diff --git a/target/avr/cpu.c b/target/avr/cpu.c
index ea14175ca5..5d70e34dd5 100644
--- a/target/avr/cpu.c
+++ b/target/avr/cpu.c
@@ -197,10 +197,7 @@ static const struct TCGCPUOps avr_tcg_ops = {
.synchronize_from_tb = avr_cpu_synchronize_from_tb,
.cpu_exec_interrupt = avr_cpu_exec_interrupt,
.tlb_fill = avr_cpu_tlb_fill,
-
-#ifndef CONFIG_USER_ONLY
.do_interrupt = avr_cpu_do_interrupt,
-#endif /* !CONFIG_USER_ONLY */
};
static void avr_cpu_class_init(ObjectClass *oc, void *data)
diff --git a/target/cris/cpu.c b/target/cris/cpu.c
index 70932b1f8c..c2e7483f5b 100644
--- a/target/cris/cpu.c
+++ b/target/cris/cpu.c
@@ -205,20 +205,20 @@ static const struct SysemuCPUOps cris_sysemu_ops = {
static const struct TCGCPUOps crisv10_tcg_ops = {
.initialize = cris_initialize_crisv10_tcg,
- .cpu_exec_interrupt = cris_cpu_exec_interrupt,
.tlb_fill = cris_cpu_tlb_fill,
#ifndef CONFIG_USER_ONLY
+ .cpu_exec_interrupt = cris_cpu_exec_interrupt,
.do_interrupt = crisv10_cpu_do_interrupt,
#endif /* !CONFIG_USER_ONLY */
};
static const struct TCGCPUOps crisv32_tcg_ops = {
.initialize = cris_initialize_tcg,
- .cpu_exec_interrupt = cris_cpu_exec_interrupt,
.tlb_fill = cris_cpu_tlb_fill,
#ifndef CONFIG_USER_ONLY
+ .cpu_exec_interrupt = cris_cpu_exec_interrupt,
.do_interrupt = cris_cpu_do_interrupt,
#endif /* !CONFIG_USER_ONLY */
};
diff --git a/target/cris/cpu.h b/target/cris/cpu.h
index d3b6492909..be021899ae 100644
--- a/target/cris/cpu.h
+++ b/target/cris/cpu.h
@@ -185,11 +185,11 @@ struct CRISCPU {
#ifndef CONFIG_USER_ONLY
extern const VMStateDescription vmstate_cris_cpu;
-#endif
void cris_cpu_do_interrupt(CPUState *cpu);
void crisv10_cpu_do_interrupt(CPUState *cpu);
bool cris_cpu_exec_interrupt(CPUState *cpu, int int_req);
+#endif
void cris_cpu_dump_state(CPUState *cs, FILE *f, int flags);
diff --git a/target/cris/helper.c b/target/cris/helper.c
index 911867f3b4..36926faf32 100644
--- a/target/cris/helper.c
+++ b/target/cris/helper.c
@@ -41,20 +41,6 @@
#if defined(CONFIG_USER_ONLY)
-void cris_cpu_do_interrupt(CPUState *cs)
-{
- CRISCPU *cpu = CRIS_CPU(cs);
- CPUCRISState *env = &cpu->env;
-
- cs->exception_index = -1;
- env->pregs[PR_ERP] = env->pc;
-}
-
-void crisv10_cpu_do_interrupt(CPUState *cs)
-{
- cris_cpu_do_interrupt(cs);
-}
-
bool cris_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr)
@@ -287,7 +273,6 @@ hwaddr cris_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
D(fprintf(stderr, "%s %x -> %x\n", __func__, addr, phy));
return phy;
}
-#endif
bool cris_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
{
@@ -319,3 +304,5 @@ bool cris_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
return ret;
}
+
+#endif /* !CONFIG_USER_ONLY */
diff --git a/target/hexagon/translate.c b/target/hexagon/translate.c
index 54fdcaa5e8..6fb4e6853c 100644
--- a/target/hexagon/translate.c
+++ b/target/hexagon/translate.c
@@ -112,7 +112,8 @@ static int read_packet_words(CPUHexagonState *env, DisasContext *ctx,
memset(words, 0, PACKET_WORDS_MAX * sizeof(uint32_t));
for (nwords = 0; !found_end && nwords < PACKET_WORDS_MAX; nwords++) {
words[nwords] =
- translator_ldl(env, ctx->base.pc_next + nwords * sizeof(uint32_t));
+ translator_ldl(env, &ctx->base,
+ ctx->base.pc_next + nwords * sizeof(uint32_t));
found_end = is_packet_end(words[nwords]);
}
if (!found_end) {
diff --git a/target/hppa/cpu.c b/target/hppa/cpu.c
index 2eace4ee12..e8edd189bf 100644
--- a/target/hppa/cpu.c
+++ b/target/hppa/cpu.c
@@ -144,10 +144,10 @@ static const struct SysemuCPUOps hppa_sysemu_ops = {
static const struct TCGCPUOps hppa_tcg_ops = {
.initialize = hppa_translate_init,
.synchronize_from_tb = hppa_cpu_synchronize_from_tb,
- .cpu_exec_interrupt = hppa_cpu_exec_interrupt,
.tlb_fill = hppa_cpu_tlb_fill,
#ifndef CONFIG_USER_ONLY
+ .cpu_exec_interrupt = hppa_cpu_exec_interrupt,
.do_interrupt = hppa_cpu_do_interrupt,
.do_unaligned_access = hppa_cpu_do_unaligned_access,
#endif /* !CONFIG_USER_ONLY */
diff --git a/target/hppa/cpu.h b/target/hppa/cpu.h
index 748270bfa3..7854675b90 100644
--- a/target/hppa/cpu.h
+++ b/target/hppa/cpu.h
@@ -325,13 +325,13 @@ int cpu_hppa_signal_handler(int host_signum, void *pinfo, void *puc);
hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr);
int hppa_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
int hppa_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
-void hppa_cpu_do_interrupt(CPUState *cpu);
-bool hppa_cpu_exec_interrupt(CPUState *cpu, int int_req);
void hppa_cpu_dump_state(CPUState *cs, FILE *f, int);
bool hppa_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr);
#ifndef CONFIG_USER_ONLY
+void hppa_cpu_do_interrupt(CPUState *cpu);
+bool hppa_cpu_exec_interrupt(CPUState *cpu, int int_req);
int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
int type, hwaddr *pphys, int *pprot);
extern const MemoryRegionOps hppa_io_eir_ops;
diff --git a/target/hppa/int_helper.c b/target/hppa/int_helper.c
index 349495d361..13073ae2bd 100644
--- a/target/hppa/int_helper.c
+++ b/target/hppa/int_helper.c
@@ -88,7 +88,6 @@ void HELPER(write_eiem)(CPUHPPAState *env, target_ureg val)
eval_interrupt(env_archcpu(env));
qemu_mutex_unlock_iothread();
}
-#endif /* !CONFIG_USER_ONLY */
void hppa_cpu_do_interrupt(CPUState *cs)
{
@@ -100,7 +99,6 @@ void hppa_cpu_do_interrupt(CPUState *cs)
uint64_t iasq_f = env->iasq_f;
uint64_t iasq_b = env->iasq_b;
-#ifndef CONFIG_USER_ONLY
target_ureg old_psw;
/* As documented in pa2.0 -- interruption handling. */
@@ -187,7 +185,6 @@ void hppa_cpu_do_interrupt(CPUState *cs)
env->iaoq_b = env->iaoq_f + 4;
env->iasq_f = 0;
env->iasq_b = 0;
-#endif
if (qemu_loglevel_mask(CPU_LOG_INT)) {
static const char * const names[] = {
@@ -248,7 +245,6 @@ void hppa_cpu_do_interrupt(CPUState *cs)
bool hppa_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
{
-#ifndef CONFIG_USER_ONLY
HPPACPU *cpu = HPPA_CPU(cs);
CPUHPPAState *env = &cpu->env;
@@ -258,6 +254,7 @@ bool hppa_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
hppa_cpu_do_interrupt(cs);
return true;
}
-#endif
return false;
}
+
+#endif /* !CONFIG_USER_ONLY */
diff --git a/target/hppa/translate.c b/target/hppa/translate.c
index b18150ef8d..c3698cf067 100644
--- a/target/hppa/translate.c
+++ b/target/hppa/translate.c
@@ -34,7 +34,6 @@
#undef TCGv
#undef tcg_temp_new
-#undef tcg_global_reg_new
#undef tcg_global_mem_new
#undef tcg_temp_local_new
#undef tcg_temp_free
@@ -59,7 +58,6 @@
#define TCGv_reg TCGv_i64
#define tcg_temp_new tcg_temp_new_i64
-#define tcg_global_reg_new tcg_global_reg_new_i64
#define tcg_global_mem_new tcg_global_mem_new_i64
#define tcg_temp_local_new tcg_temp_local_new_i64
#define tcg_temp_free tcg_temp_free_i64
@@ -155,7 +153,6 @@
#else
#define TCGv_reg TCGv_i32
#define tcg_temp_new tcg_temp_new_i32
-#define tcg_global_reg_new tcg_global_reg_new_i32
#define tcg_global_mem_new tcg_global_mem_new_i32
#define tcg_temp_local_new tcg_temp_local_new_i32
#define tcg_temp_free tcg_temp_free_i32
@@ -4177,7 +4174,7 @@ static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
{
/* Always fetch the insn, even if nullified, so that we check
the page permissions for execute. */
- uint32_t insn = translator_ldl(env, ctx->base.pc_next);
+ uint32_t insn = translator_ldl(env, &ctx->base, ctx->base.pc_next);
/* Set up the IA queue for the next insn.
This will be overwritten by a branch. */
diff --git a/target/i386/cpu.h b/target/i386/cpu.h
index 71ae3141c3..7dd664791a 100644
--- a/target/i386/cpu.h
+++ b/target/i386/cpu.h
@@ -1836,12 +1836,15 @@ int x86_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
void x86_cpu_list(void);
int cpu_x86_support_mca_broadcast(CPUX86State *env);
+#ifndef CONFIG_USER_ONLY
int cpu_get_pic_interrupt(CPUX86State *s);
+
/* MSDOS compatibility mode FPU exception support */
void x86_register_ferr_irq(qemu_irq irq);
void fpu_check_raise_ferr_irq(CPUX86State *s);
void cpu_set_ignne(void);
void cpu_clear_ignne(void);
+#endif
/* mpx_helper.c */
void cpu_sync_bndcs_hflags(CPUX86State *env);
diff --git a/target/i386/tcg/helper-tcg.h b/target/i386/tcg/helper-tcg.h
index 2510cc244e..60ca09e95e 100644
--- a/target/i386/tcg/helper-tcg.h
+++ b/target/i386/tcg/helper-tcg.h
@@ -38,7 +38,9 @@ QEMU_BUILD_BUG_ON(TCG_PHYS_ADDR_BITS > TARGET_PHYS_ADDR_SPACE_BITS);
* @cpu: vCPU the interrupt is to be handled by.
*/
void x86_cpu_do_interrupt(CPUState *cpu);
+#ifndef CONFIG_USER_ONLY
bool x86_cpu_exec_interrupt(CPUState *cpu, int int_req);
+#endif
/* helper.c */
bool x86_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
diff --git a/target/i386/tcg/seg_helper.c b/target/i386/tcg/seg_helper.c
index cef68b610a..baa905a0cd 100644
--- a/target/i386/tcg/seg_helper.c
+++ b/target/i386/tcg/seg_helper.c
@@ -929,9 +929,7 @@ static void do_interrupt64(CPUX86State *env, int intno, int is_int,
e2);
env->eip = offset;
}
-#endif
-#ifdef TARGET_X86_64
void helper_sysret(CPUX86State *env, int dflag)
{
int cpl, selector;
@@ -984,7 +982,7 @@ void helper_sysret(CPUX86State *env, int dflag)
DESC_W_MASK | DESC_A_MASK);
}
}
-#endif
+#endif /* TARGET_X86_64 */
/* real mode interrupt */
static void do_interrupt_real(CPUX86State *env, int intno, int is_int,
@@ -1112,76 +1110,6 @@ void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw)
do_interrupt_all(env_archcpu(env), intno, 0, 0, 0, is_hw);
}
-bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
-{
- X86CPU *cpu = X86_CPU(cs);
- CPUX86State *env = &cpu->env;
- int intno;
-
- interrupt_request = x86_cpu_pending_interrupt(cs, interrupt_request);
- if (!interrupt_request) {
- return false;
- }
-
- /* Don't process multiple interrupt requests in a single call.
- * This is required to make icount-driven execution deterministic.
- */
- switch (interrupt_request) {
-#if !defined(CONFIG_USER_ONLY)
- case CPU_INTERRUPT_POLL:
- cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
- apic_poll_irq(cpu->apic_state);
- break;
-#endif
- case CPU_INTERRUPT_SIPI:
- do_cpu_sipi(cpu);
- break;
- case CPU_INTERRUPT_SMI:
- cpu_svm_check_intercept_param(env, SVM_EXIT_SMI, 0, 0);
- cs->interrupt_request &= ~CPU_INTERRUPT_SMI;
-#ifdef CONFIG_USER_ONLY
- cpu_abort(CPU(cpu), "SMI interrupt: cannot enter SMM in user-mode");
-#else
- do_smm_enter(cpu);
-#endif /* CONFIG_USER_ONLY */
- break;
- case CPU_INTERRUPT_NMI:
- cpu_svm_check_intercept_param(env, SVM_EXIT_NMI, 0, 0);
- cs->interrupt_request &= ~CPU_INTERRUPT_NMI;
- env->hflags2 |= HF2_NMI_MASK;
- do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
- break;
- case CPU_INTERRUPT_MCE:
- cs->interrupt_request &= ~CPU_INTERRUPT_MCE;
- do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
- break;
- case CPU_INTERRUPT_HARD:
- cpu_svm_check_intercept_param(env, SVM_EXIT_INTR, 0, 0);
- cs->interrupt_request &= ~(CPU_INTERRUPT_HARD |
- CPU_INTERRUPT_VIRQ);
- intno = cpu_get_pic_interrupt(env);
- qemu_log_mask(CPU_LOG_TB_IN_ASM,
- "Servicing hardware INT=0x%02x\n", intno);
- do_interrupt_x86_hardirq(env, intno, 1);
- break;
-#if !defined(CONFIG_USER_ONLY)
- case CPU_INTERRUPT_VIRQ:
- cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR, 0, 0);
- intno = x86_ldl_phys(cs, env->vm_vmcb
- + offsetof(struct vmcb, control.int_vector));
- qemu_log_mask(CPU_LOG_TB_IN_ASM,
- "Servicing virtual hardware INT=0x%02x\n", intno);
- do_interrupt_x86_hardirq(env, intno, 1);
- cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
- env->int_ctl &= ~V_IRQ_MASK;
- break;
-#endif
- }
-
- /* Ensure that no TB jump will be modified as the program flow was changed. */
- return true;
-}
-
void helper_lldt(CPUX86State *env, int selector)
{
SegmentCache *dt;
diff --git a/target/i386/tcg/sysemu/seg_helper.c b/target/i386/tcg/sysemu/seg_helper.c
index 82c0856c41..bf3444c26b 100644
--- a/target/i386/tcg/sysemu/seg_helper.c
+++ b/target/i386/tcg/sysemu/seg_helper.c
@@ -125,6 +125,68 @@ void x86_cpu_do_interrupt(CPUState *cs)
}
}
+bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+ int intno;
+
+ interrupt_request = x86_cpu_pending_interrupt(cs, interrupt_request);
+ if (!interrupt_request) {
+ return false;
+ }
+
+ /* Don't process multiple interrupt requests in a single call.
+ * This is required to make icount-driven execution deterministic.
+ */
+ switch (interrupt_request) {
+ case CPU_INTERRUPT_POLL:
+ cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
+ apic_poll_irq(cpu->apic_state);
+ break;
+ case CPU_INTERRUPT_SIPI:
+ do_cpu_sipi(cpu);
+ break;
+ case CPU_INTERRUPT_SMI:
+ cpu_svm_check_intercept_param(env, SVM_EXIT_SMI, 0, 0);
+ cs->interrupt_request &= ~CPU_INTERRUPT_SMI;
+ do_smm_enter(cpu);
+ break;
+ case CPU_INTERRUPT_NMI:
+ cpu_svm_check_intercept_param(env, SVM_EXIT_NMI, 0, 0);
+ cs->interrupt_request &= ~CPU_INTERRUPT_NMI;
+ env->hflags2 |= HF2_NMI_MASK;
+ do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
+ break;
+ case CPU_INTERRUPT_MCE:
+ cs->interrupt_request &= ~CPU_INTERRUPT_MCE;
+ do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
+ break;
+ case CPU_INTERRUPT_HARD:
+ cpu_svm_check_intercept_param(env, SVM_EXIT_INTR, 0, 0);
+ cs->interrupt_request &= ~(CPU_INTERRUPT_HARD |
+ CPU_INTERRUPT_VIRQ);
+ intno = cpu_get_pic_interrupt(env);
+ qemu_log_mask(CPU_LOG_TB_IN_ASM,
+ "Servicing hardware INT=0x%02x\n", intno);
+ do_interrupt_x86_hardirq(env, intno, 1);
+ break;
+ case CPU_INTERRUPT_VIRQ:
+ cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR, 0, 0);
+ intno = x86_ldl_phys(cs, env->vm_vmcb
+ + offsetof(struct vmcb, control.int_vector));
+ qemu_log_mask(CPU_LOG_TB_IN_ASM,
+ "Servicing virtual hardware INT=0x%02x\n", intno);
+ do_interrupt_x86_hardirq(env, intno, 1);
+ cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
+ env->int_ctl &= ~V_IRQ_MASK;
+ break;
+ }
+
+ /* Ensure that no TB jump will be modified as the program flow was changed. */
+ return true;
+}
+
/* check if Port I/O is allowed in TSS */
void helper_check_io(CPUX86State *env, uint32_t addr, uint32_t size)
{
diff --git a/target/i386/tcg/tcg-cpu.c b/target/i386/tcg/tcg-cpu.c
index 93a79a5741..3ecfae34cb 100644
--- a/target/i386/tcg/tcg-cpu.c
+++ b/target/i386/tcg/tcg-cpu.c
@@ -72,10 +72,12 @@ static const struct TCGCPUOps x86_tcg_ops = {
.synchronize_from_tb = x86_cpu_synchronize_from_tb,
.cpu_exec_enter = x86_cpu_exec_enter,
.cpu_exec_exit = x86_cpu_exec_exit,
- .cpu_exec_interrupt = x86_cpu_exec_interrupt,
- .do_interrupt = x86_cpu_do_interrupt,
.tlb_fill = x86_cpu_tlb_fill,
-#ifndef CONFIG_USER_ONLY
+#ifdef CONFIG_USER_ONLY
+ .fake_user_interrupt = x86_cpu_do_interrupt,
+#else
+ .do_interrupt = x86_cpu_do_interrupt,
+ .cpu_exec_interrupt = x86_cpu_exec_interrupt,
.debug_excp_handler = breakpoint_handler,
.debug_check_breakpoint = x86_debug_check_breakpoint,
#endif /* !CONFIG_USER_ONLY */
diff --git a/target/i386/tcg/translate.c b/target/i386/tcg/translate.c
index aacb605eee..a46be75b00 100644
--- a/target/i386/tcg/translate.c
+++ b/target/i386/tcg/translate.c
@@ -2028,28 +2028,28 @@ static uint64_t advance_pc(CPUX86State *env, DisasContext *s, int num_bytes)
static inline uint8_t x86_ldub_code(CPUX86State *env, DisasContext *s)
{
- return translator_ldub(env, advance_pc(env, s, 1));
+ return translator_ldub(env, &s->base, advance_pc(env, s, 1));
}
static inline int16_t x86_ldsw_code(CPUX86State *env, DisasContext *s)
{
- return translator_ldsw(env, advance_pc(env, s, 2));
+ return translator_ldsw(env, &s->base, advance_pc(env, s, 2));
}
static inline uint16_t x86_lduw_code(CPUX86State *env, DisasContext *s)
{
- return translator_lduw(env, advance_pc(env, s, 2));
+ return translator_lduw(env, &s->base, advance_pc(env, s, 2));
}
static inline uint32_t x86_ldl_code(CPUX86State *env, DisasContext *s)
{
- return translator_ldl(env, advance_pc(env, s, 4));
+ return translator_ldl(env, &s->base, advance_pc(env, s, 4));
}
#ifdef TARGET_X86_64
static inline uint64_t x86_ldq_code(CPUX86State *env, DisasContext *s)
{
- return translator_ldq(env, advance_pc(env, s, 8));
+ return translator_ldq(env, &s->base, advance_pc(env, s, 8));
}
#endif
diff --git a/target/m68k/cpu.c b/target/m68k/cpu.c
index 72de6e9726..66d22d1189 100644
--- a/target/m68k/cpu.c
+++ b/target/m68k/cpu.c
@@ -515,10 +515,10 @@ static const struct SysemuCPUOps m68k_sysemu_ops = {
static const struct TCGCPUOps m68k_tcg_ops = {
.initialize = m68k_tcg_init,
- .cpu_exec_interrupt = m68k_cpu_exec_interrupt,
.tlb_fill = m68k_cpu_tlb_fill,
#ifndef CONFIG_USER_ONLY
+ .cpu_exec_interrupt = m68k_cpu_exec_interrupt,
.do_interrupt = m68k_cpu_do_interrupt,
.do_transaction_failed = m68k_cpu_transaction_failed,
#endif /* !CONFIG_USER_ONLY */
diff --git a/target/m68k/cpu.h b/target/m68k/cpu.h
index 997d588911..550eb028b6 100644
--- a/target/m68k/cpu.h
+++ b/target/m68k/cpu.h
@@ -166,8 +166,10 @@ struct M68kCPU {
};
+#ifndef CONFIG_USER_ONLY
void m68k_cpu_do_interrupt(CPUState *cpu);
bool m68k_cpu_exec_interrupt(CPUState *cpu, int int_req);
+#endif /* !CONFIG_USER_ONLY */
void m68k_cpu_dump_state(CPUState *cpu, FILE *f, int flags);
hwaddr m68k_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
int m68k_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
diff --git a/target/m68k/op_helper.c b/target/m68k/op_helper.c
index d006d1cb3e..5d624838ae 100644
--- a/target/m68k/op_helper.c
+++ b/target/m68k/op_helper.c
@@ -24,18 +24,7 @@
#include "semihosting/semihost.h"
#include "tcg/tcg.h"
-#if defined(CONFIG_USER_ONLY)
-
-void m68k_cpu_do_interrupt(CPUState *cs)
-{
- cs->exception_index = -1;
-}
-
-static inline void do_interrupt_m68k_hardirq(CPUM68KState *env)
-{
-}
-
-#else
+#if !defined(CONFIG_USER_ONLY)
static void cf_rte(CPUM68KState *env)
{
@@ -516,7 +505,6 @@ void m68k_cpu_transaction_failed(CPUState *cs, hwaddr physaddr, vaddr addr,
cpu_loop_exit(cs);
}
}
-#endif
bool m68k_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
{
@@ -538,6 +526,8 @@ bool m68k_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
return false;
}
+#endif /* !CONFIG_USER_ONLY */
+
static void raise_exception_ra(CPUM68KState *env, int tt, uintptr_t raddr)
{
CPUState *cs = env_cpu(env);
diff --git a/target/m68k/translate.c b/target/m68k/translate.c
index c34d9aed61..50a55f949c 100644
--- a/target/m68k/translate.c
+++ b/target/m68k/translate.c
@@ -415,7 +415,7 @@ static TCGv gen_ldst(DisasContext *s, int opsize, TCGv addr, TCGv val,
static inline uint16_t read_im16(CPUM68KState *env, DisasContext *s)
{
uint16_t im;
- im = translator_lduw(env, s->pc);
+ im = translator_lduw(env, &s->base, s->pc);
s->pc += 2;
return im;
}
diff --git a/target/microblaze/cpu.c b/target/microblaze/cpu.c
index 72d8f2a0da..15db277925 100644
--- a/target/microblaze/cpu.c
+++ b/target/microblaze/cpu.c
@@ -365,10 +365,10 @@ static const struct SysemuCPUOps mb_sysemu_ops = {
static const struct TCGCPUOps mb_tcg_ops = {
.initialize = mb_tcg_init,
.synchronize_from_tb = mb_cpu_synchronize_from_tb,
- .cpu_exec_interrupt = mb_cpu_exec_interrupt,
.tlb_fill = mb_cpu_tlb_fill,
#ifndef CONFIG_USER_ONLY
+ .cpu_exec_interrupt = mb_cpu_exec_interrupt,
.do_interrupt = mb_cpu_do_interrupt,
.do_transaction_failed = mb_cpu_transaction_failed,
.do_unaligned_access = mb_cpu_do_unaligned_access,
diff --git a/target/microblaze/cpu.h b/target/microblaze/cpu.h
index e4bba8a755..40401c33b7 100644
--- a/target/microblaze/cpu.h
+++ b/target/microblaze/cpu.h
@@ -355,8 +355,10 @@ struct MicroBlazeCPU {
};
+#ifndef CONFIG_USER_ONLY
void mb_cpu_do_interrupt(CPUState *cs);
bool mb_cpu_exec_interrupt(CPUState *cs, int int_req);
+#endif /* !CONFIG_USER_ONLY */
void mb_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
MMUAccessType access_type,
int mmu_idx, uintptr_t retaddr);
diff --git a/target/microblaze/helper.c b/target/microblaze/helper.c
index 20dbd67313..dd2aecd1d5 100644
--- a/target/microblaze/helper.c
+++ b/target/microblaze/helper.c
@@ -26,16 +26,6 @@
#if defined(CONFIG_USER_ONLY)
-void mb_cpu_do_interrupt(CPUState *cs)
-{
- MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
- CPUMBState *env = &cpu->env;
-
- cs->exception_index = -1;
- env->res_addr = RES_ADDR_NONE;
- env->regs[14] = env->pc;
-}
-
bool mb_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr)
@@ -271,7 +261,6 @@ hwaddr mb_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
return paddr;
}
-#endif
bool mb_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
{
@@ -289,6 +278,8 @@ bool mb_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
return false;
}
+#endif /* !CONFIG_USER_ONLY */
+
void mb_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
MMUAccessType access_type,
int mmu_idx, uintptr_t retaddr)
diff --git a/target/mips/cpu.c b/target/mips/cpu.c
index d426918291..00e0c55d0e 100644
--- a/target/mips/cpu.c
+++ b/target/mips/cpu.c
@@ -539,10 +539,10 @@ static const struct SysemuCPUOps mips_sysemu_ops = {
static const struct TCGCPUOps mips_tcg_ops = {
.initialize = mips_tcg_init,
.synchronize_from_tb = mips_cpu_synchronize_from_tb,
- .cpu_exec_interrupt = mips_cpu_exec_interrupt,
.tlb_fill = mips_cpu_tlb_fill,
#if !defined(CONFIG_USER_ONLY)
+ .cpu_exec_interrupt = mips_cpu_exec_interrupt,
.do_interrupt = mips_cpu_do_interrupt,
.do_transaction_failed = mips_cpu_do_transaction_failed,
.do_unaligned_access = mips_cpu_do_unaligned_access,
diff --git a/target/mips/tcg/exception.c b/target/mips/tcg/exception.c
index 4fb8b00711..7b3026b105 100644
--- a/target/mips/tcg/exception.c
+++ b/target/mips/tcg/exception.c
@@ -86,24 +86,6 @@ void mips_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb)
env->hflags |= tb->flags & MIPS_HFLAG_BMASK;
}
-bool mips_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
-{
- if (interrupt_request & CPU_INTERRUPT_HARD) {
- MIPSCPU *cpu = MIPS_CPU(cs);
- CPUMIPSState *env = &cpu->env;
-
- if (cpu_mips_hw_interrupts_enabled(env) &&
- cpu_mips_hw_interrupts_pending(env)) {
- /* Raise it */
- cs->exception_index = EXCP_EXT_INTERRUPT;
- env->error_code = 0;
- mips_cpu_do_interrupt(cs);
- return true;
- }
- }
- return false;
-}
-
static const char * const excp_names[EXCP_LAST + 1] = {
[EXCP_RESET] = "reset",
[EXCP_SRESET] = "soft reset",
diff --git a/target/mips/tcg/micromips_translate.c.inc b/target/mips/tcg/micromips_translate.c.inc
index 5e95f47854..0da4c802a3 100644
--- a/target/mips/tcg/micromips_translate.c.inc
+++ b/target/mips/tcg/micromips_translate.c.inc
@@ -1627,7 +1627,7 @@ static void decode_micromips32_opc(CPUMIPSState *env, DisasContext *ctx)
uint32_t op, minor, minor2, mips32_op;
uint32_t cond, fmt, cc;
- insn = translator_lduw(env, ctx->base.pc_next + 2);
+ insn = translator_lduw(env, &ctx->base, ctx->base.pc_next + 2);
ctx->opcode = (ctx->opcode << 16) | insn;
rt = (ctx->opcode >> 21) & 0x1f;
diff --git a/target/mips/tcg/mips16e_translate.c.inc b/target/mips/tcg/mips16e_translate.c.inc
index 54071813f1..84d816603a 100644
--- a/target/mips/tcg/mips16e_translate.c.inc
+++ b/target/mips/tcg/mips16e_translate.c.inc
@@ -455,7 +455,7 @@ static void decode_i64_mips16(DisasContext *ctx,
static int decode_extended_mips16_opc(CPUMIPSState *env, DisasContext *ctx)
{
- int extend = translator_lduw(env, ctx->base.pc_next + 2);
+ int extend = translator_lduw(env, &ctx->base, ctx->base.pc_next + 2);
int op, rx, ry, funct, sa;
int16_t imm, offset;
@@ -688,7 +688,7 @@ static int decode_ase_mips16e(CPUMIPSState *env, DisasContext *ctx)
/* No delay slot, so just process as a normal instruction */
break;
case M16_OPC_JAL:
- offset = translator_lduw(env, ctx->base.pc_next + 2);
+ offset = translator_lduw(env, &ctx->base, ctx->base.pc_next + 2);
offset = (((ctx->opcode & 0x1f) << 21)
| ((ctx->opcode >> 5) & 0x1f) << 16
| offset) << 2;
diff --git a/target/mips/tcg/nanomips_translate.c.inc b/target/mips/tcg/nanomips_translate.c.inc
index a66ae26796..ccbcecad09 100644
--- a/target/mips/tcg/nanomips_translate.c.inc
+++ b/target/mips/tcg/nanomips_translate.c.inc
@@ -3656,7 +3656,7 @@ static int decode_nanomips_32_48_opc(CPUMIPSState *env, DisasContext *ctx)
int offset;
int imm;
- insn = translator_lduw(env, ctx->base.pc_next + 2);
+ insn = translator_lduw(env, &ctx->base, ctx->base.pc_next + 2);
ctx->opcode = (ctx->opcode << 16) | insn;
rt = extract32(ctx->opcode, 21, 5);
@@ -3775,7 +3775,7 @@ static int decode_nanomips_32_48_opc(CPUMIPSState *env, DisasContext *ctx)
break;
case NM_P48I:
{
- insn = translator_lduw(env, ctx->base.pc_next + 4);
+ insn = translator_lduw(env, &ctx->base, ctx->base.pc_next + 4);
target_long addr_off = extract32(ctx->opcode, 0, 16) | insn << 16;
switch (extract32(ctx->opcode, 16, 5)) {
case NM_LI48:
diff --git a/target/mips/tcg/sysemu/tlb_helper.c b/target/mips/tcg/sysemu/tlb_helper.c
index a150a014ec..73254d1929 100644
--- a/target/mips/tcg/sysemu/tlb_helper.c
+++ b/target/mips/tcg/sysemu/tlb_helper.c
@@ -1339,6 +1339,24 @@ void mips_cpu_do_interrupt(CPUState *cs)
cs->exception_index = EXCP_NONE;
}
+bool mips_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
+{
+ if (interrupt_request & CPU_INTERRUPT_HARD) {
+ MIPSCPU *cpu = MIPS_CPU(cs);
+ CPUMIPSState *env = &cpu->env;
+
+ if (cpu_mips_hw_interrupts_enabled(env) &&
+ cpu_mips_hw_interrupts_pending(env)) {
+ /* Raise it */
+ cs->exception_index = EXCP_EXT_INTERRUPT;
+ env->error_code = 0;
+ mips_cpu_do_interrupt(cs);
+ return true;
+ }
+ }
+ return false;
+}
+
void r4k_invalidate_tlb(CPUMIPSState *env, int idx, int use_extra)
{
CPUState *cs = env_cpu(env);
diff --git a/target/mips/tcg/tcg-internal.h b/target/mips/tcg/tcg-internal.h
index 81b14eb219..c7a77ddccd 100644
--- a/target/mips/tcg/tcg-internal.h
+++ b/target/mips/tcg/tcg-internal.h
@@ -18,8 +18,6 @@
void mips_tcg_init(void);
void mips_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb);
-void mips_cpu_do_interrupt(CPUState *cpu);
-bool mips_cpu_exec_interrupt(CPUState *cpu, int int_req);
bool mips_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr);
@@ -41,6 +39,9 @@ static inline void QEMU_NORETURN do_raise_exception(CPUMIPSState *env,
#if !defined(CONFIG_USER_ONLY)
+void mips_cpu_do_interrupt(CPUState *cpu);
+bool mips_cpu_exec_interrupt(CPUState *cpu, int int_req);
+
void mmu_init(CPUMIPSState *env, const mips_def_t *def);
void update_pagemask(CPUMIPSState *env, target_ulong arg1, int32_t *pagemask);
diff --git a/target/mips/tcg/translate.c b/target/mips/tcg/translate.c
index 6f4a9a839c..148afec9dc 100644
--- a/target/mips/tcg/translate.c
+++ b/target/mips/tcg/translate.c
@@ -16041,17 +16041,17 @@ static void mips_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
is_slot = ctx->hflags & MIPS_HFLAG_BMASK;
if (ctx->insn_flags & ISA_NANOMIPS32) {
- ctx->opcode = translator_lduw(env, ctx->base.pc_next);
+ ctx->opcode = translator_lduw(env, &ctx->base, ctx->base.pc_next);
insn_bytes = decode_isa_nanomips(env, ctx);
} else if (!(ctx->hflags & MIPS_HFLAG_M16)) {
- ctx->opcode = translator_ldl(env, ctx->base.pc_next);
+ ctx->opcode = translator_ldl(env, &ctx->base, ctx->base.pc_next);
insn_bytes = 4;
decode_opc(env, ctx);
} else if (ctx->insn_flags & ASE_MICROMIPS) {
- ctx->opcode = translator_lduw(env, ctx->base.pc_next);
+ ctx->opcode = translator_lduw(env, &ctx->base, ctx->base.pc_next);
insn_bytes = decode_isa_micromips(env, ctx);
} else if (ctx->insn_flags & ASE_MIPS16) {
- ctx->opcode = translator_lduw(env, ctx->base.pc_next);
+ ctx->opcode = translator_lduw(env, &ctx->base, ctx->base.pc_next);
insn_bytes = decode_ase_mips16e(env, ctx);
} else {
gen_reserved_instruction(ctx);
diff --git a/target/mips/tcg/user/tlb_helper.c b/target/mips/tcg/user/tlb_helper.c
index b835144b82..210c6d529e 100644
--- a/target/mips/tcg/user/tlb_helper.c
+++ b/target/mips/tcg/user/tlb_helper.c
@@ -57,8 +57,3 @@ bool mips_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
raise_mmu_exception(env, address, access_type);
do_raise_exception_err(env, cs->exception_index, env->error_code, retaddr);
}
-
-void mips_cpu_do_interrupt(CPUState *cs)
-{
- cs->exception_index = EXCP_NONE;
-}
diff --git a/target/nios2/cpu.c b/target/nios2/cpu.c
index 5e37defef8..947bb09bc1 100644
--- a/target/nios2/cpu.c
+++ b/target/nios2/cpu.c
@@ -127,6 +127,7 @@ static void nios2_cpu_realizefn(DeviceState *dev, Error **errp)
ncc->parent_realize(dev, errp);
}
+#ifndef CONFIG_USER_ONLY
static bool nios2_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
{
Nios2CPU *cpu = NIOS2_CPU(cs);
@@ -140,7 +141,7 @@ static bool nios2_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
}
return false;
}
-
+#endif /* !CONFIG_USER_ONLY */
static void nios2_cpu_disas_set_info(CPUState *cpu, disassemble_info *info)
{
@@ -219,10 +220,10 @@ static const struct SysemuCPUOps nios2_sysemu_ops = {
static const struct TCGCPUOps nios2_tcg_ops = {
.initialize = nios2_tcg_init,
- .cpu_exec_interrupt = nios2_cpu_exec_interrupt,
.tlb_fill = nios2_cpu_tlb_fill,
#ifndef CONFIG_USER_ONLY
+ .cpu_exec_interrupt = nios2_cpu_exec_interrupt,
.do_interrupt = nios2_cpu_do_interrupt,
.do_unaligned_access = nios2_cpu_do_unaligned_access,
#endif /* !CONFIG_USER_ONLY */
diff --git a/target/openrisc/cpu.c b/target/openrisc/cpu.c
index bd34e429ec..27cb04152f 100644
--- a/target/openrisc/cpu.c
+++ b/target/openrisc/cpu.c
@@ -186,10 +186,10 @@ static const struct SysemuCPUOps openrisc_sysemu_ops = {
static const struct TCGCPUOps openrisc_tcg_ops = {
.initialize = openrisc_translate_init,
- .cpu_exec_interrupt = openrisc_cpu_exec_interrupt,
.tlb_fill = openrisc_cpu_tlb_fill,
#ifndef CONFIG_USER_ONLY
+ .cpu_exec_interrupt = openrisc_cpu_exec_interrupt,
.do_interrupt = openrisc_cpu_do_interrupt,
#endif /* !CONFIG_USER_ONLY */
};
diff --git a/target/openrisc/cpu.h b/target/openrisc/cpu.h
index 82cbaeb4f8..be6df81a81 100644
--- a/target/openrisc/cpu.h
+++ b/target/openrisc/cpu.h
@@ -312,8 +312,6 @@ struct OpenRISCCPU {
void cpu_openrisc_list(void);
-void openrisc_cpu_do_interrupt(CPUState *cpu);
-bool openrisc_cpu_exec_interrupt(CPUState *cpu, int int_req);
void openrisc_cpu_dump_state(CPUState *cpu, FILE *f, int flags);
hwaddr openrisc_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
int openrisc_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
@@ -331,6 +329,9 @@ int print_insn_or1k(bfd_vma addr, disassemble_info *info);
#ifndef CONFIG_USER_ONLY
extern const VMStateDescription vmstate_openrisc_cpu;
+void openrisc_cpu_do_interrupt(CPUState *cpu);
+bool openrisc_cpu_exec_interrupt(CPUState *cpu, int int_req);
+
/* hw/openrisc_pic.c */
void cpu_openrisc_pic_init(OpenRISCCPU *cpu);
diff --git a/target/openrisc/interrupt.c b/target/openrisc/interrupt.c
index 3eab771dcd..19223e3f25 100644
--- a/target/openrisc/interrupt.c
+++ b/target/openrisc/interrupt.c
@@ -28,7 +28,6 @@
void openrisc_cpu_do_interrupt(CPUState *cs)
{
-#ifndef CONFIG_USER_ONLY
OpenRISCCPU *cpu = OPENRISC_CPU(cs);
CPUOpenRISCState *env = &cpu->env;
int exception = cs->exception_index;
@@ -96,7 +95,6 @@ void openrisc_cpu_do_interrupt(CPUState *cs)
} else {
cpu_abort(cs, "Unhandled exception 0x%x\n", exception);
}
-#endif
cs->exception_index = -1;
}
diff --git a/target/openrisc/meson.build b/target/openrisc/meson.build
index 9774a58306..e445dec4a0 100644
--- a/target/openrisc/meson.build
+++ b/target/openrisc/meson.build
@@ -9,7 +9,6 @@ openrisc_ss.add(files(
'exception_helper.c',
'fpu_helper.c',
'gdbstub.c',
- 'interrupt.c',
'interrupt_helper.c',
'mmu.c',
'sys_helper.c',
@@ -17,7 +16,10 @@ openrisc_ss.add(files(
))
openrisc_softmmu_ss = ss.source_set()
-openrisc_softmmu_ss.add(files('machine.c'))
+openrisc_softmmu_ss.add(files(
+ 'interrupt.c',
+ 'machine.c',
+))
target_arch += {'openrisc': openrisc_ss}
target_softmmu_arch += {'openrisc': openrisc_softmmu_ss}
diff --git a/target/openrisc/translate.c b/target/openrisc/translate.c
index d6ea536744..5f3d430245 100644
--- a/target/openrisc/translate.c
+++ b/target/openrisc/translate.c
@@ -1613,7 +1613,7 @@ static void openrisc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
{
DisasContext *dc = container_of(dcbase, DisasContext, base);
OpenRISCCPU *cpu = OPENRISC_CPU(cs);
- uint32_t insn = translator_ldl(&cpu->env, dc->base.pc_next);
+ uint32_t insn = translator_ldl(&cpu->env, &dc->base, dc->base.pc_next);
if (!decode(dc, insn)) {
gen_illegal_exception(dc);
diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h
index 500205229c..362e7c4c5c 100644
--- a/target/ppc/cpu.h
+++ b/target/ppc/cpu.h
@@ -1254,8 +1254,6 @@ DECLARE_OBJ_CHECKERS(PPCVirtualHypervisor, PPCVirtualHypervisorClass,
PPC_VIRTUAL_HYPERVISOR, TYPE_PPC_VIRTUAL_HYPERVISOR)
#endif /* CONFIG_USER_ONLY */
-void ppc_cpu_do_interrupt(CPUState *cpu);
-bool ppc_cpu_exec_interrupt(CPUState *cpu, int int_req);
void ppc_cpu_dump_state(CPUState *cpu, FILE *f, int flags);
hwaddr ppc_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
int ppc_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
@@ -1271,6 +1269,8 @@ int ppc64_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs,
int ppc32_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs,
int cpuid, void *opaque);
#ifndef CONFIG_USER_ONLY
+void ppc_cpu_do_interrupt(CPUState *cpu);
+bool ppc_cpu_exec_interrupt(CPUState *cpu, int int_req);
void ppc_cpu_do_system_reset(CPUState *cs);
void ppc_cpu_do_fwnmi_machine_check(CPUState *cs, target_ulong vector);
extern const VMStateDescription vmstate_ppc_cpu;
diff --git a/target/ppc/cpu_init.c b/target/ppc/cpu_init.c
index ad7abc6041..6aad01d1d3 100644
--- a/target/ppc/cpu_init.c
+++ b/target/ppc/cpu_init.c
@@ -9014,10 +9014,10 @@ static const struct SysemuCPUOps ppc_sysemu_ops = {
static const struct TCGCPUOps ppc_tcg_ops = {
.initialize = ppc_translate_init,
- .cpu_exec_interrupt = ppc_cpu_exec_interrupt,
.tlb_fill = ppc_cpu_tlb_fill,
#ifndef CONFIG_USER_ONLY
+ .cpu_exec_interrupt = ppc_cpu_exec_interrupt,
.do_interrupt = ppc_cpu_do_interrupt,
.cpu_exec_enter = ppc_cpu_exec_enter,
.cpu_exec_exit = ppc_cpu_exec_exit,
diff --git a/target/ppc/excp_helper.c b/target/ppc/excp_helper.c
index 7b6ac16eef..d7e32ee107 100644
--- a/target/ppc/excp_helper.c
+++ b/target/ppc/excp_helper.c
@@ -40,24 +40,8 @@
/*****************************************************************************/
/* Exception processing */
-#if defined(CONFIG_USER_ONLY)
-void ppc_cpu_do_interrupt(CPUState *cs)
-{
- PowerPCCPU *cpu = POWERPC_CPU(cs);
- CPUPPCState *env = &cpu->env;
-
- cs->exception_index = POWERPC_EXCP_NONE;
- env->error_code = 0;
-}
-
-static void ppc_hw_interrupt(CPUPPCState *env)
-{
- CPUState *cs = env_cpu(env);
+#if !defined(CONFIG_USER_ONLY)
- cs->exception_index = POWERPC_EXCP_NONE;
- env->error_code = 0;
-}
-#else /* defined(CONFIG_USER_ONLY) */
static inline void dump_syscall(CPUPPCState *env)
{
qemu_log_mask(CPU_LOG_INT, "syscall r0=%016" PRIx64
@@ -1113,7 +1097,6 @@ void ppc_cpu_do_fwnmi_machine_check(CPUState *cs, target_ulong vector)
powerpc_set_excp_state(cpu, vector, msr);
}
-#endif /* !CONFIG_USER_ONLY */
bool ppc_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
{
@@ -1130,6 +1113,8 @@ bool ppc_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
return false;
}
+#endif /* !CONFIG_USER_ONLY */
+
#if defined(DEBUG_OP)
static void cpu_dump_rfi(target_ulong RA, target_ulong msr)
{
diff --git a/target/ppc/translate.c b/target/ppc/translate.c
index 171b216e17..5d8b06bd80 100644
--- a/target/ppc/translate.c
+++ b/target/ppc/translate.c
@@ -8585,7 +8585,7 @@ static void ppc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
ctx->base.pc_next, ctx->mem_idx, (int)msr_ir);
ctx->cia = pc = ctx->base.pc_next;
- insn = translator_ldl_swap(env, pc, need_byteswap(ctx));
+ insn = translator_ldl_swap(env, dcbase, pc, need_byteswap(ctx));
ctx->base.pc_next = pc += 4;
if (!is_prefix_insn(ctx, insn)) {
@@ -8600,7 +8600,8 @@ static void ppc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
gen_exception_err(ctx, POWERPC_EXCP_ALIGN, POWERPC_EXCP_ALIGN_INSN);
ok = true;
} else {
- uint32_t insn2 = translator_ldl_swap(env, pc, need_byteswap(ctx));
+ uint32_t insn2 = translator_ldl_swap(env, dcbase, pc,
+ need_byteswap(ctx));
ctx->base.pc_next = pc += 4;
ok = decode_insn64(ctx, deposit64(insn2, 32, 32, insn));
}
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
index 1a2b03d579..13575c1408 100644
--- a/target/riscv/cpu.c
+++ b/target/riscv/cpu.c
@@ -644,10 +644,10 @@ static const struct SysemuCPUOps riscv_sysemu_ops = {
static const struct TCGCPUOps riscv_tcg_ops = {
.initialize = riscv_translate_init,
.synchronize_from_tb = riscv_cpu_synchronize_from_tb,
- .cpu_exec_interrupt = riscv_cpu_exec_interrupt,
.tlb_fill = riscv_cpu_tlb_fill,
#ifndef CONFIG_USER_ONLY
+ .cpu_exec_interrupt = riscv_cpu_exec_interrupt,
.do_interrupt = riscv_cpu_do_interrupt,
.do_transaction_failed = riscv_cpu_do_transaction_failed,
.do_unaligned_access = riscv_cpu_do_unaligned_access,
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
index bf1c899c00..e735e53e26 100644
--- a/target/riscv/cpu.h
+++ b/target/riscv/cpu.h
@@ -334,7 +334,6 @@ int riscv_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs,
int cpuid, void *opaque);
int riscv_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
int riscv_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
-bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request);
bool riscv_cpu_fp_enabled(CPURISCVState *env);
bool riscv_cpu_virt_enabled(CPURISCVState *env);
void riscv_cpu_set_virt_enabled(CPURISCVState *env, bool enable);
@@ -362,6 +361,7 @@ void riscv_cpu_list(void);
#define cpu_mmu_index riscv_cpu_mmu_index
#ifndef CONFIG_USER_ONLY
+bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request);
void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env);
int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint32_t interrupts);
uint32_t riscv_cpu_update_mip(RISCVCPU *cpu, uint32_t mask, uint32_t value);
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
index 968cb8046f..701858d670 100644
--- a/target/riscv/cpu_helper.c
+++ b/target/riscv/cpu_helper.c
@@ -75,11 +75,9 @@ static int riscv_cpu_local_irq_pending(CPURISCVState *env)
return RISCV_EXCP_NONE; /* indicates no pending interrupt */
}
}
-#endif
bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
{
-#if !defined(CONFIG_USER_ONLY)
if (interrupt_request & CPU_INTERRUPT_HARD) {
RISCVCPU *cpu = RISCV_CPU(cs);
CPURISCVState *env = &cpu->env;
@@ -90,12 +88,9 @@ bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
return true;
}
}
-#endif
return false;
}
-#if !defined(CONFIG_USER_ONLY)
-
/* Return true is floating point support is currently enabled */
bool riscv_cpu_fp_enabled(CPURISCVState *env)
{
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
index e356fc6c46..74b33fa3c9 100644
--- a/target/riscv/translate.c
+++ b/target/riscv/translate.c
@@ -500,7 +500,8 @@ static void decode_opc(CPURISCVState *env, DisasContext *ctx, uint16_t opcode)
} else {
uint32_t opcode32 = opcode;
opcode32 = deposit32(opcode32, 16, 16,
- translator_lduw(env, ctx->base.pc_next + 2));
+ translator_lduw(env, &ctx->base,
+ ctx->base.pc_next + 2));
ctx->pc_succ_insn = ctx->base.pc_next + 4;
if (!decode_insn32(ctx, opcode32)) {
gen_exception_illegal(ctx);
@@ -561,7 +562,7 @@ static void riscv_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
{
DisasContext *ctx = container_of(dcbase, DisasContext, base);
CPURISCVState *env = cpu->env_ptr;
- uint16_t opcode16 = translator_lduw(env, ctx->base.pc_next);
+ uint16_t opcode16 = translator_lduw(env, &ctx->base, ctx->base.pc_next);
decode_opc(env, ctx, opcode16);
ctx->base.pc_next = ctx->pc_succ_insn;
diff --git a/target/rx/cpu.c b/target/rx/cpu.c
index 96cc96e514..25a4aa2976 100644
--- a/target/rx/cpu.c
+++ b/target/rx/cpu.c
@@ -186,10 +186,10 @@ static const struct SysemuCPUOps rx_sysemu_ops = {
static const struct TCGCPUOps rx_tcg_ops = {
.initialize = rx_translate_init,
.synchronize_from_tb = rx_cpu_synchronize_from_tb,
- .cpu_exec_interrupt = rx_cpu_exec_interrupt,
.tlb_fill = rx_cpu_tlb_fill,
#ifndef CONFIG_USER_ONLY
+ .cpu_exec_interrupt = rx_cpu_exec_interrupt,
.do_interrupt = rx_cpu_do_interrupt,
#endif /* !CONFIG_USER_ONLY */
};
diff --git a/target/rx/cpu.h b/target/rx/cpu.h
index 0b4b998c7b..faa3606f52 100644
--- a/target/rx/cpu.h
+++ b/target/rx/cpu.h
@@ -124,8 +124,10 @@ typedef RXCPU ArchCPU;
#define CPU_RESOLVING_TYPE TYPE_RX_CPU
const char *rx_crname(uint8_t cr);
+#ifndef CONFIG_USER_ONLY
void rx_cpu_do_interrupt(CPUState *cpu);
bool rx_cpu_exec_interrupt(CPUState *cpu, int int_req);
+#endif /* !CONFIG_USER_ONLY */
void rx_cpu_dump_state(CPUState *cpu, FILE *f, int flags);
int rx_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
int rx_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
diff --git a/target/rx/helper.c b/target/rx/helper.c
index db6b07e389..f34945e7e2 100644
--- a/target/rx/helper.c
+++ b/target/rx/helper.c
@@ -40,6 +40,8 @@ void rx_cpu_unpack_psw(CPURXState *env, uint32_t psw, int rte)
env->psw_c = FIELD_EX32(psw, PSW, C);
}
+#ifndef CONFIG_USER_ONLY
+
#define INT_FLAGS (CPU_INTERRUPT_HARD | CPU_INTERRUPT_FIR)
void rx_cpu_do_interrupt(CPUState *cs)
{
@@ -142,6 +144,8 @@ bool rx_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
return false;
}
+#endif /* !CONFIG_USER_ONLY */
+
hwaddr rx_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
{
return addr;
diff --git a/target/s390x/tcg/translate.c b/target/s390x/tcg/translate.c
index 0632b0374b..f284870cd2 100644
--- a/target/s390x/tcg/translate.c
+++ b/target/s390x/tcg/translate.c
@@ -388,14 +388,16 @@ static void update_cc_op(DisasContext *s)
}
}
-static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
+static inline uint64_t ld_code2(CPUS390XState *env, DisasContext *s,
+ uint64_t pc)
{
- return (uint64_t)cpu_lduw_code(env, pc);
+ return (uint64_t)translator_lduw(env, &s->base, pc);
}
-static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
+static inline uint64_t ld_code4(CPUS390XState *env, DisasContext *s,
+ uint64_t pc)
{
- return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
+ return (uint64_t)(uint32_t)translator_ldl(env, &s->base, pc);
}
static int get_mem_index(DisasContext *s)
@@ -6273,7 +6275,7 @@ static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s)
ilen = s->ex_value & 0xf;
op = insn >> 56;
} else {
- insn = ld_code2(env, pc);
+ insn = ld_code2(env, s, pc);
op = (insn >> 8) & 0xff;
ilen = get_ilen(op);
switch (ilen) {
@@ -6281,10 +6283,10 @@ static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s)
insn = insn << 48;
break;
case 4:
- insn = ld_code4(env, pc) << 32;
+ insn = ld_code4(env, s, pc) << 32;
break;
case 6:
- insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
+ insn = (insn << 48) | (ld_code4(env, s, pc + 2) << 16);
break;
default:
g_assert_not_reached();
diff --git a/target/sh4/cpu.c b/target/sh4/cpu.c
index 8326922942..2047742d03 100644
--- a/target/sh4/cpu.c
+++ b/target/sh4/cpu.c
@@ -236,10 +236,10 @@ static const struct SysemuCPUOps sh4_sysemu_ops = {
static const struct TCGCPUOps superh_tcg_ops = {
.initialize = sh4_translate_init,
.synchronize_from_tb = superh_cpu_synchronize_from_tb,
- .cpu_exec_interrupt = superh_cpu_exec_interrupt,
.tlb_fill = superh_cpu_tlb_fill,
#ifndef CONFIG_USER_ONLY
+ .cpu_exec_interrupt = superh_cpu_exec_interrupt,
.do_interrupt = superh_cpu_do_interrupt,
.do_unaligned_access = superh_cpu_do_unaligned_access,
.io_recompile_replay_branch = superh_io_recompile_replay_branch,
diff --git a/target/sh4/cpu.h b/target/sh4/cpu.h
index 01c4344082..017a770214 100644
--- a/target/sh4/cpu.h
+++ b/target/sh4/cpu.h
@@ -204,8 +204,6 @@ struct SuperHCPU {
};
-void superh_cpu_do_interrupt(CPUState *cpu);
-bool superh_cpu_exec_interrupt(CPUState *cpu, int int_req);
void superh_cpu_dump_state(CPUState *cpu, FILE *f, int flags);
hwaddr superh_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
int superh_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
@@ -223,6 +221,8 @@ bool superh_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
void sh4_cpu_list(void);
#if !defined(CONFIG_USER_ONLY)
+void superh_cpu_do_interrupt(CPUState *cpu);
+bool superh_cpu_exec_interrupt(CPUState *cpu, int int_req);
void cpu_sh4_invalidate_tlb(CPUSH4State *s);
uint32_t cpu_sh4_read_mmaped_itlb_addr(CPUSH4State *s,
hwaddr addr);
diff --git a/target/sh4/helper.c b/target/sh4/helper.c
index 2d622081e8..53cb9c3b63 100644
--- a/target/sh4/helper.c
+++ b/target/sh4/helper.c
@@ -45,11 +45,6 @@
#if defined(CONFIG_USER_ONLY)
-void superh_cpu_do_interrupt(CPUState *cs)
-{
- cs->exception_index = -1;
-}
-
int cpu_sh4_is_cached(CPUSH4State *env, target_ulong addr)
{
/* For user mode, only U0 area is cacheable. */
@@ -784,8 +779,6 @@ int cpu_sh4_is_cached(CPUSH4State * env, target_ulong addr)
return 0;
}
-#endif
-
bool superh_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
{
if (interrupt_request & CPU_INTERRUPT_HARD) {
@@ -803,6 +796,8 @@ bool superh_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
return false;
}
+#endif /* !CONFIG_USER_ONLY */
+
bool superh_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr)
diff --git a/target/sh4/translate.c b/target/sh4/translate.c
index 8704fea1ca..cf5fe9243d 100644
--- a/target/sh4/translate.c
+++ b/target/sh4/translate.c
@@ -1907,7 +1907,7 @@ static void decode_gusa(DisasContext *ctx, CPUSH4State *env)
/* Read all of the insns for the region. */
for (i = 0; i < max_insns; ++i) {
- insns[i] = translator_lduw(env, pc + i * 2);
+ insns[i] = translator_lduw(env, &ctx->base, pc + i * 2);
}
ld_adr = ld_dst = ld_mop = -1;
@@ -2307,7 +2307,7 @@ static void sh4_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
}
#endif
- ctx->opcode = translator_lduw(env, ctx->base.pc_next);
+ ctx->opcode = translator_lduw(env, &ctx->base, ctx->base.pc_next);
decode_opc(ctx);
ctx->base.pc_next += 2;
}
diff --git a/target/sparc/cpu.c b/target/sparc/cpu.c
index da6b30ec74..5a8a4ce750 100644
--- a/target/sparc/cpu.c
+++ b/target/sparc/cpu.c
@@ -77,6 +77,7 @@ static void sparc_cpu_reset(DeviceState *dev)
env->cache_control = 0;
}
+#ifndef CONFIG_USER_ONLY
static bool sparc_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
{
if (interrupt_request & CPU_INTERRUPT_HARD) {
@@ -96,6 +97,7 @@ static bool sparc_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
}
return false;
}
+#endif /* !CONFIG_USER_ONLY */
static void cpu_sparc_disas_set_info(CPUState *cpu, disassemble_info *info)
{
@@ -863,10 +865,10 @@ static const struct SysemuCPUOps sparc_sysemu_ops = {
static const struct TCGCPUOps sparc_tcg_ops = {
.initialize = sparc_tcg_init,
.synchronize_from_tb = sparc_cpu_synchronize_from_tb,
- .cpu_exec_interrupt = sparc_cpu_exec_interrupt,
.tlb_fill = sparc_cpu_tlb_fill,
#ifndef CONFIG_USER_ONLY
+ .cpu_exec_interrupt = sparc_cpu_exec_interrupt,
.do_interrupt = sparc_cpu_do_interrupt,
.do_transaction_failed = sparc_cpu_do_transaction_failed,
.do_unaligned_access = sparc_cpu_do_unaligned_access,
diff --git a/target/sparc/translate.c b/target/sparc/translate.c
index bb70ba17de..fdb8bbe5dc 100644
--- a/target/sparc/translate.c
+++ b/target/sparc/translate.c
@@ -5855,7 +5855,7 @@ static void sparc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
CPUSPARCState *env = cs->env_ptr;
unsigned int insn;
- insn = translator_ldl(env, dc->pc);
+ insn = translator_ldl(env, &dc->base, dc->pc);
dc->base.pc_next += 4;
disas_sparc_insn(dc, insn);
diff --git a/target/xtensa/cpu.c b/target/xtensa/cpu.c
index 58ec3a0862..c1cbd03595 100644
--- a/target/xtensa/cpu.c
+++ b/target/xtensa/cpu.c
@@ -192,11 +192,11 @@ static const struct SysemuCPUOps xtensa_sysemu_ops = {
static const struct TCGCPUOps xtensa_tcg_ops = {
.initialize = xtensa_translate_init,
- .cpu_exec_interrupt = xtensa_cpu_exec_interrupt,
.tlb_fill = xtensa_cpu_tlb_fill,
.debug_excp_handler = xtensa_breakpoint_handler,
#ifndef CONFIG_USER_ONLY
+ .cpu_exec_interrupt = xtensa_cpu_exec_interrupt,
.do_interrupt = xtensa_cpu_do_interrupt,
.do_transaction_failed = xtensa_cpu_do_transaction_failed,
.do_unaligned_access = xtensa_cpu_do_unaligned_access,
diff --git a/target/xtensa/cpu.h b/target/xtensa/cpu.h
index 2345cb59c7..cbb720e7cc 100644
--- a/target/xtensa/cpu.h
+++ b/target/xtensa/cpu.h
@@ -566,12 +566,14 @@ struct XtensaCPU {
bool xtensa_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr);
+#ifndef CONFIG_USER_ONLY
void xtensa_cpu_do_interrupt(CPUState *cpu);
bool xtensa_cpu_exec_interrupt(CPUState *cpu, int interrupt_request);
void xtensa_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, vaddr addr,
unsigned size, MMUAccessType access_type,
int mmu_idx, MemTxAttrs attrs,
MemTxResult response, uintptr_t retaddr);
+#endif
void xtensa_cpu_dump_state(CPUState *cpu, FILE *f, int flags);
hwaddr xtensa_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
void xtensa_count_regs(const XtensaConfig *config,
diff --git a/target/xtensa/exc_helper.c b/target/xtensa/exc_helper.c
index 10e75ab070..9bc7f50d35 100644
--- a/target/xtensa/exc_helper.c
+++ b/target/xtensa/exc_helper.c
@@ -255,11 +255,6 @@ void xtensa_cpu_do_interrupt(CPUState *cs)
}
check_interrupts(env);
}
-#else
-void xtensa_cpu_do_interrupt(CPUState *cs)
-{
-}
-#endif
bool xtensa_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
{
@@ -270,3 +265,5 @@ bool xtensa_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
}
return false;
}
+
+#endif /* !CONFIG_USER_ONLY */
diff --git a/target/xtensa/translate.c b/target/xtensa/translate.c
index 20399d6a04..dcf6b500ef 100644
--- a/target/xtensa/translate.c
+++ b/target/xtensa/translate.c
@@ -882,7 +882,8 @@ static int arg_copy_compare(const void *a, const void *b)
static void disas_xtensa_insn(CPUXtensaState *env, DisasContext *dc)
{
xtensa_isa isa = dc->config->isa;
- unsigned char b[MAX_INSN_LENGTH] = {translator_ldub(env, dc->pc)};
+ unsigned char b[MAX_INSN_LENGTH] = {translator_ldub(env, &dc->base,
+ dc->pc)};
unsigned len = xtensa_op0_insn_len(dc, b[0]);
xtensa_format fmt;
int slot, slots;
@@ -907,7 +908,7 @@ static void disas_xtensa_insn(CPUXtensaState *env, DisasContext *dc)
dc->base.pc_next = dc->pc + len;
for (i = 1; i < len; ++i) {
- b[i] = translator_ldub(env, dc->pc + i);
+ b[i] = translator_ldub(env, &dc->base, dc->pc + i);
}
xtensa_insnbuf_from_chars(isa, dc->insnbuf, b, len);
fmt = xtensa_format_decode(isa, dc->insnbuf);
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
index 007ceee68e..d25e68b36b 100644
--- a/tcg/arm/tcg-target.c.inc
+++ b/tcg/arm/tcg-target.c.inc
@@ -92,7 +92,7 @@ static const int tcg_target_call_oarg_regs[2] = {
#define TCG_REG_TMP TCG_REG_R12
#define TCG_VEC_TMP TCG_REG_Q15
-enum arm_cond_code_e {
+typedef enum {
COND_EQ = 0x0,
COND_NE = 0x1,
COND_CS = 0x2, /* Unsigned greater or equal */
@@ -108,7 +108,7 @@ enum arm_cond_code_e {
COND_GT = 0xc,
COND_LE = 0xd,
COND_AL = 0xe,
-};
+} ARMCond;
#define TO_CPSR (1 << 20)
@@ -141,6 +141,9 @@ typedef enum {
INSN_CLZ = 0x016f0f10,
INSN_RBIT = 0x06ff0f30,
+ INSN_LDMIA = 0x08b00000,
+ INSN_STMDB = 0x09200000,
+
INSN_LDR_IMM = 0x04100000,
INSN_LDR_REG = 0x06100000,
INSN_STR_IMM = 0x04000000,
@@ -309,10 +312,10 @@ static bool reloc_pc8(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
{
const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
ptrdiff_t offset = tcg_ptr_byte_diff(target, src_rx) - 8;
- int rot = encode_imm(offset);
+ int imm12 = encode_imm(offset);
- if (rot >= 0) {
- *src_rw = deposit32(*src_rw, 0, 12, rol32(offset, rot) | (rot << 7));
+ if (imm12 >= 0) {
+ *src_rw = deposit32(*src_rw, 0, 12, imm12);
return true;
}
return false;
@@ -366,36 +369,55 @@ static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
(ALL_GENERAL_REGS & ~((1 << TCG_REG_R0) | (1 << TCG_REG_R1)))
#endif
-static inline uint32_t rotl(uint32_t val, int n)
-{
- return (val << n) | (val >> (32 - n));
-}
-
-/* ARM immediates for ALU instructions are made of an unsigned 8-bit
- right-rotated by an even amount between 0 and 30. */
+/*
+ * ARM immediates for ALU instructions are made of an unsigned 8-bit
+ * right-rotated by an even amount between 0 and 30.
+ *
+ * Return < 0 if @imm cannot be encoded, else the entire imm12 field.
+ */
static int encode_imm(uint32_t imm)
{
- int shift;
+ uint32_t rot, imm8;
- /* simple case, only lower bits */
- if ((imm & ~0xff) == 0)
- return 0;
- /* then try a simple even shift */
- shift = ctz32(imm) & ~1;
- if (((imm >> shift) & ~0xff) == 0)
- return 32 - shift;
- /* now try harder with rotations */
- if ((rotl(imm, 2) & ~0xff) == 0)
- return 2;
- if ((rotl(imm, 4) & ~0xff) == 0)
- return 4;
- if ((rotl(imm, 6) & ~0xff) == 0)
- return 6;
- /* imm can't be encoded */
+ /* Simple case, no rotation required. */
+ if ((imm & ~0xff) == 0) {
+ return imm;
+ }
+
+ /* Next, try a simple even shift. */
+ rot = ctz32(imm) & ~1;
+ imm8 = imm >> rot;
+ rot = 32 - rot;
+ if ((imm8 & ~0xff) == 0) {
+ goto found;
+ }
+
+ /*
+ * Finally, try harder with rotations.
+ * The ctz test above will have taken care of rotates >= 8.
+ */
+ for (rot = 2; rot < 8; rot += 2) {
+ imm8 = rol32(imm, rot);
+ if ((imm8 & ~0xff) == 0) {
+ goto found;
+ }
+ }
+ /* Fail: imm cannot be encoded. */
return -1;
+
+ found:
+ /* Note that rot is even, and we discard bit 0 by shifting by 7. */
+ return rot << 7 | imm8;
+}
+
+static int encode_imm_nofail(uint32_t imm)
+{
+ int ret = encode_imm(imm);
+ tcg_debug_assert(ret >= 0);
+ return ret;
}
-static inline int check_fit_imm(uint32_t imm)
+static bool check_fit_imm(uint32_t imm)
{
return encode_imm(imm) >= 0;
}
@@ -525,42 +547,37 @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
return 0;
}
-static inline void tcg_out_b(TCGContext *s, int cond, int32_t offset)
+static void tcg_out_b_imm(TCGContext *s, ARMCond cond, int32_t offset)
{
tcg_out32(s, (cond << 28) | 0x0a000000 |
(((offset - 8) >> 2) & 0x00ffffff));
}
-static inline void tcg_out_bl(TCGContext *s, int cond, int32_t offset)
+static void tcg_out_bl_imm(TCGContext *s, ARMCond cond, int32_t offset)
{
tcg_out32(s, (cond << 28) | 0x0b000000 |
(((offset - 8) >> 2) & 0x00ffffff));
}
-static inline void tcg_out_blx(TCGContext *s, int cond, int rn)
+static void tcg_out_blx_reg(TCGContext *s, ARMCond cond, TCGReg rn)
{
tcg_out32(s, (cond << 28) | 0x012fff30 | rn);
}
-static inline void tcg_out_blx_imm(TCGContext *s, int32_t offset)
+static void tcg_out_blx_imm(TCGContext *s, int32_t offset)
{
tcg_out32(s, 0xfa000000 | ((offset & 2) << 23) |
(((offset - 8) >> 2) & 0x00ffffff));
}
-static inline void tcg_out_dat_reg(TCGContext *s,
- int cond, int opc, int rd, int rn, int rm, int shift)
+static void tcg_out_dat_reg(TCGContext *s, ARMCond cond, ARMInsn opc,
+ TCGReg rd, TCGReg rn, TCGReg rm, int shift)
{
tcg_out32(s, (cond << 28) | (0 << 25) | opc |
(rn << 16) | (rd << 12) | shift | rm);
}
-static inline void tcg_out_nop(TCGContext *s)
-{
- tcg_out32(s, INSN_NOP);
-}
-
-static inline void tcg_out_mov_reg(TCGContext *s, int cond, int rd, int rm)
+static void tcg_out_mov_reg(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rm)
{
/* Simple reg-reg move, optimising out the 'do nothing' case */
if (rd != rm) {
@@ -568,35 +585,47 @@ static inline void tcg_out_mov_reg(TCGContext *s, int cond, int rd, int rm)
}
}
-static inline void tcg_out_bx(TCGContext *s, int cond, TCGReg rn)
+static void tcg_out_bx_reg(TCGContext *s, ARMCond cond, TCGReg rn)
+{
+ tcg_out32(s, (cond << 28) | 0x012fff10 | rn);
+}
+
+static void tcg_out_b_reg(TCGContext *s, ARMCond cond, TCGReg rn)
{
- /* Unless the C portion of QEMU is compiled as thumb, we don't
- actually need true BX semantics; merely a branch to an address
- held in a register. */
+ /*
+ * Unless the C portion of QEMU is compiled as thumb, we don't need
+ * true BX semantics; merely a branch to an address held in a register.
+ */
if (use_armv5t_instructions) {
- tcg_out32(s, (cond << 28) | 0x012fff10 | rn);
+ tcg_out_bx_reg(s, cond, rn);
} else {
tcg_out_mov_reg(s, cond, TCG_REG_PC, rn);
}
}
-static inline void tcg_out_dat_imm(TCGContext *s,
- int cond, int opc, int rd, int rn, int im)
+static void tcg_out_dat_imm(TCGContext *s, ARMCond cond, ARMInsn opc,
+ TCGReg rd, TCGReg rn, int im)
{
tcg_out32(s, (cond << 28) | (1 << 25) | opc |
(rn << 16) | (rd << 12) | im);
}
+static void tcg_out_ldstm(TCGContext *s, ARMCond cond, ARMInsn opc,
+ TCGReg rn, uint16_t mask)
+{
+ tcg_out32(s, (cond << 28) | opc | (rn << 16) | mask);
+}
+
/* Note that this routine is used for both LDR and LDRH formats, so we do
not wish to include an immediate shift at this point. */
-static void tcg_out_memop_r(TCGContext *s, int cond, ARMInsn opc, TCGReg rt,
+static void tcg_out_memop_r(TCGContext *s, ARMCond cond, ARMInsn opc, TCGReg rt,
TCGReg rn, TCGReg rm, bool u, bool p, bool w)
{
tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24)
| (w << 21) | (rn << 16) | (rt << 12) | rm);
}
-static void tcg_out_memop_8(TCGContext *s, int cond, ARMInsn opc, TCGReg rt,
+static void tcg_out_memop_8(TCGContext *s, ARMCond cond, ARMInsn opc, TCGReg rt,
TCGReg rn, int imm8, bool p, bool w)
{
bool u = 1;
@@ -608,8 +637,8 @@ static void tcg_out_memop_8(TCGContext *s, int cond, ARMInsn opc, TCGReg rt,
(rn << 16) | (rt << 12) | ((imm8 & 0xf0) << 4) | (imm8 & 0xf));
}
-static void tcg_out_memop_12(TCGContext *s, int cond, ARMInsn opc, TCGReg rt,
- TCGReg rn, int imm12, bool p, bool w)
+static void tcg_out_memop_12(TCGContext *s, ARMCond cond, ARMInsn opc,
+ TCGReg rt, TCGReg rn, int imm12, bool p, bool w)
{
bool u = 1;
if (imm12 < 0) {
@@ -620,167 +649,167 @@ static void tcg_out_memop_12(TCGContext *s, int cond, ARMInsn opc, TCGReg rt,
(rn << 16) | (rt << 12) | imm12);
}
-static inline void tcg_out_ld32_12(TCGContext *s, int cond, TCGReg rt,
- TCGReg rn, int imm12)
+static void tcg_out_ld32_12(TCGContext *s, ARMCond cond, TCGReg rt,
+ TCGReg rn, int imm12)
{
tcg_out_memop_12(s, cond, INSN_LDR_IMM, rt, rn, imm12, 1, 0);
}
-static inline void tcg_out_st32_12(TCGContext *s, int cond, TCGReg rt,
- TCGReg rn, int imm12)
+static void tcg_out_st32_12(TCGContext *s, ARMCond cond, TCGReg rt,
+ TCGReg rn, int imm12)
{
tcg_out_memop_12(s, cond, INSN_STR_IMM, rt, rn, imm12, 1, 0);
}
-static inline void tcg_out_ld32_r(TCGContext *s, int cond, TCGReg rt,
- TCGReg rn, TCGReg rm)
+static void tcg_out_ld32_r(TCGContext *s, ARMCond cond, TCGReg rt,
+ TCGReg rn, TCGReg rm)
{
tcg_out_memop_r(s, cond, INSN_LDR_REG, rt, rn, rm, 1, 1, 0);
}
-static inline void tcg_out_st32_r(TCGContext *s, int cond, TCGReg rt,
- TCGReg rn, TCGReg rm)
+static void tcg_out_st32_r(TCGContext *s, ARMCond cond, TCGReg rt,
+ TCGReg rn, TCGReg rm)
{
tcg_out_memop_r(s, cond, INSN_STR_REG, rt, rn, rm, 1, 1, 0);
}
-static inline void tcg_out_ldrd_8(TCGContext *s, int cond, TCGReg rt,
- TCGReg rn, int imm8)
+static void tcg_out_ldrd_8(TCGContext *s, ARMCond cond, TCGReg rt,
+ TCGReg rn, int imm8)
{
tcg_out_memop_8(s, cond, INSN_LDRD_IMM, rt, rn, imm8, 1, 0);
}
-static inline void tcg_out_ldrd_r(TCGContext *s, int cond, TCGReg rt,
- TCGReg rn, TCGReg rm)
+static void tcg_out_ldrd_r(TCGContext *s, ARMCond cond, TCGReg rt,
+ TCGReg rn, TCGReg rm)
{
tcg_out_memop_r(s, cond, INSN_LDRD_REG, rt, rn, rm, 1, 1, 0);
}
-static inline void tcg_out_ldrd_rwb(TCGContext *s, int cond, TCGReg rt,
- TCGReg rn, TCGReg rm)
+static void __attribute__((unused))
+tcg_out_ldrd_rwb(TCGContext *s, ARMCond cond, TCGReg rt, TCGReg rn, TCGReg rm)
{
tcg_out_memop_r(s, cond, INSN_LDRD_REG, rt, rn, rm, 1, 1, 1);
}
-static inline void tcg_out_strd_8(TCGContext *s, int cond, TCGReg rt,
- TCGReg rn, int imm8)
+static void tcg_out_strd_8(TCGContext *s, ARMCond cond, TCGReg rt,
+ TCGReg rn, int imm8)
{
tcg_out_memop_8(s, cond, INSN_STRD_IMM, rt, rn, imm8, 1, 0);
}
-static inline void tcg_out_strd_r(TCGContext *s, int cond, TCGReg rt,
- TCGReg rn, TCGReg rm)
+static void tcg_out_strd_r(TCGContext *s, ARMCond cond, TCGReg rt,
+ TCGReg rn, TCGReg rm)
{
tcg_out_memop_r(s, cond, INSN_STRD_REG, rt, rn, rm, 1, 1, 0);
}
/* Register pre-increment with base writeback. */
-static inline void tcg_out_ld32_rwb(TCGContext *s, int cond, TCGReg rt,
- TCGReg rn, TCGReg rm)
+static void tcg_out_ld32_rwb(TCGContext *s, ARMCond cond, TCGReg rt,
+ TCGReg rn, TCGReg rm)
{
tcg_out_memop_r(s, cond, INSN_LDR_REG, rt, rn, rm, 1, 1, 1);
}
-static inline void tcg_out_st32_rwb(TCGContext *s, int cond, TCGReg rt,
- TCGReg rn, TCGReg rm)
+static void tcg_out_st32_rwb(TCGContext *s, ARMCond cond, TCGReg rt,
+ TCGReg rn, TCGReg rm)
{
tcg_out_memop_r(s, cond, INSN_STR_REG, rt, rn, rm, 1, 1, 1);
}
-static inline void tcg_out_ld16u_8(TCGContext *s, int cond, TCGReg rt,
- TCGReg rn, int imm8)
+static void tcg_out_ld16u_8(TCGContext *s, ARMCond cond, TCGReg rt,
+ TCGReg rn, int imm8)
{
tcg_out_memop_8(s, cond, INSN_LDRH_IMM, rt, rn, imm8, 1, 0);
}
-static inline void tcg_out_st16_8(TCGContext *s, int cond, TCGReg rt,
- TCGReg rn, int imm8)
+static void tcg_out_st16_8(TCGContext *s, ARMCond cond, TCGReg rt,
+ TCGReg rn, int imm8)
{
tcg_out_memop_8(s, cond, INSN_STRH_IMM, rt, rn, imm8, 1, 0);
}
-static inline void tcg_out_ld16u_r(TCGContext *s, int cond, TCGReg rt,
- TCGReg rn, TCGReg rm)
+static void tcg_out_ld16u_r(TCGContext *s, ARMCond cond, TCGReg rt,
+ TCGReg rn, TCGReg rm)
{
tcg_out_memop_r(s, cond, INSN_LDRH_REG, rt, rn, rm, 1, 1, 0);
}
-static inline void tcg_out_st16_r(TCGContext *s, int cond, TCGReg rt,
- TCGReg rn, TCGReg rm)
+static void tcg_out_st16_r(TCGContext *s, ARMCond cond, TCGReg rt,
+ TCGReg rn, TCGReg rm)
{
tcg_out_memop_r(s, cond, INSN_STRH_REG, rt, rn, rm, 1, 1, 0);
}
-static inline void tcg_out_ld16s_8(TCGContext *s, int cond, TCGReg rt,
- TCGReg rn, int imm8)
+static void tcg_out_ld16s_8(TCGContext *s, ARMCond cond, TCGReg rt,
+ TCGReg rn, int imm8)
{
tcg_out_memop_8(s, cond, INSN_LDRSH_IMM, rt, rn, imm8, 1, 0);
}
-static inline void tcg_out_ld16s_r(TCGContext *s, int cond, TCGReg rt,
- TCGReg rn, TCGReg rm)
+static void tcg_out_ld16s_r(TCGContext *s, ARMCond cond, TCGReg rt,
+ TCGReg rn, TCGReg rm)
{
tcg_out_memop_r(s, cond, INSN_LDRSH_REG, rt, rn, rm, 1, 1, 0);
}
-static inline void tcg_out_ld8_12(TCGContext *s, int cond, TCGReg rt,
- TCGReg rn, int imm12)
+static void tcg_out_ld8_12(TCGContext *s, ARMCond cond, TCGReg rt,
+ TCGReg rn, int imm12)
{
tcg_out_memop_12(s, cond, INSN_LDRB_IMM, rt, rn, imm12, 1, 0);
}
-static inline void tcg_out_st8_12(TCGContext *s, int cond, TCGReg rt,
- TCGReg rn, int imm12)
+static void tcg_out_st8_12(TCGContext *s, ARMCond cond, TCGReg rt,
+ TCGReg rn, int imm12)
{
tcg_out_memop_12(s, cond, INSN_STRB_IMM, rt, rn, imm12, 1, 0);
}
-static inline void tcg_out_ld8_r(TCGContext *s, int cond, TCGReg rt,
- TCGReg rn, TCGReg rm)
+static void tcg_out_ld8_r(TCGContext *s, ARMCond cond, TCGReg rt,
+ TCGReg rn, TCGReg rm)
{
tcg_out_memop_r(s, cond, INSN_LDRB_REG, rt, rn, rm, 1, 1, 0);
}
-static inline void tcg_out_st8_r(TCGContext *s, int cond, TCGReg rt,
- TCGReg rn, TCGReg rm)
+static void tcg_out_st8_r(TCGContext *s, ARMCond cond, TCGReg rt,
+ TCGReg rn, TCGReg rm)
{
tcg_out_memop_r(s, cond, INSN_STRB_REG, rt, rn, rm, 1, 1, 0);
}
-static inline void tcg_out_ld8s_8(TCGContext *s, int cond, TCGReg rt,
- TCGReg rn, int imm8)
+static void tcg_out_ld8s_8(TCGContext *s, ARMCond cond, TCGReg rt,
+ TCGReg rn, int imm8)
{
tcg_out_memop_8(s, cond, INSN_LDRSB_IMM, rt, rn, imm8, 1, 0);
}
-static inline void tcg_out_ld8s_r(TCGContext *s, int cond, TCGReg rt,
- TCGReg rn, TCGReg rm)
+static void tcg_out_ld8s_r(TCGContext *s, ARMCond cond, TCGReg rt,
+ TCGReg rn, TCGReg rm)
{
tcg_out_memop_r(s, cond, INSN_LDRSB_REG, rt, rn, rm, 1, 1, 0);
}
-static void tcg_out_movi_pool(TCGContext *s, int cond, int rd, uint32_t arg)
+static void tcg_out_movi_pool(TCGContext *s, ARMCond cond,
+ TCGReg rd, uint32_t arg)
{
new_pool_label(s, arg, R_ARM_PC13, s->code_ptr, 0);
tcg_out_ld32_12(s, cond, rd, TCG_REG_PC, 0);
}
-static void tcg_out_movi32(TCGContext *s, int cond, int rd, uint32_t arg)
+static void tcg_out_movi32(TCGContext *s, ARMCond cond,
+ TCGReg rd, uint32_t arg)
{
- int rot, diff, opc, sh1, sh2;
+ int imm12, diff, opc, sh1, sh2;
uint32_t tt0, tt1, tt2;
/* Check a single MOV/MVN before anything else. */
- rot = encode_imm(arg);
- if (rot >= 0) {
- tcg_out_dat_imm(s, cond, ARITH_MOV, rd, 0,
- rotl(arg, rot) | (rot << 7));
+ imm12 = encode_imm(arg);
+ if (imm12 >= 0) {
+ tcg_out_dat_imm(s, cond, ARITH_MOV, rd, 0, imm12);
return;
}
- rot = encode_imm(~arg);
- if (rot >= 0) {
- tcg_out_dat_imm(s, cond, ARITH_MVN, rd, 0,
- rotl(~arg, rot) | (rot << 7));
+ imm12 = encode_imm(~arg);
+ if (imm12 >= 0) {
+ tcg_out_dat_imm(s, cond, ARITH_MVN, rd, 0, imm12);
return;
}
@@ -788,17 +817,15 @@ static void tcg_out_movi32(TCGContext *s, int cond, int rd, uint32_t arg)
or within the TB, which is immediately before the code block. */
diff = tcg_pcrel_diff(s, (void *)arg) - 8;
if (diff >= 0) {
- rot = encode_imm(diff);
- if (rot >= 0) {
- tcg_out_dat_imm(s, cond, ARITH_ADD, rd, TCG_REG_PC,
- rotl(diff, rot) | (rot << 7));
+ imm12 = encode_imm(diff);
+ if (imm12 >= 0) {
+ tcg_out_dat_imm(s, cond, ARITH_ADD, rd, TCG_REG_PC, imm12);
return;
}
} else {
- rot = encode_imm(-diff);
- if (rot >= 0) {
- tcg_out_dat_imm(s, cond, ARITH_SUB, rd, TCG_REG_PC,
- rotl(-diff, rot) | (rot << 7));
+ imm12 = encode_imm(-diff);
+ if (imm12 >= 0) {
+ tcg_out_dat_imm(s, cond, ARITH_SUB, rd, TCG_REG_PC, imm12);
return;
}
}
@@ -830,6 +857,8 @@ static void tcg_out_movi32(TCGContext *s, int cond, int rd, uint32_t arg)
sh2 = ctz32(tt1) & ~1;
tt2 = tt1 & ~(0xff << sh2);
if (tt2 == 0) {
+ int rot;
+
rot = ((32 - sh1) << 7) & 0xf00;
tcg_out_dat_imm(s, cond, opc, rd, 0, ((tt0 >> sh1) & 0xff) | rot);
rot = ((32 - sh2) << 7) & 0xf00;
@@ -842,65 +871,61 @@ static void tcg_out_movi32(TCGContext *s, int cond, int rd, uint32_t arg)
tcg_out_movi_pool(s, cond, rd, arg);
}
-static inline void tcg_out_dat_rI(TCGContext *s, int cond, int opc, TCGArg dst,
- TCGArg lhs, TCGArg rhs, int rhs_is_const)
+/*
+ * Emit either the reg,imm or reg,reg form of a data-processing insn.
+ * rhs must satisfy the "rI" constraint.
+ */
+static void tcg_out_dat_rI(TCGContext *s, ARMCond cond, ARMInsn opc,
+ TCGReg dst, TCGReg lhs, TCGArg rhs, int rhs_is_const)
{
- /* Emit either the reg,imm or reg,reg form of a data-processing insn.
- * rhs must satisfy the "rI" constraint.
- */
if (rhs_is_const) {
- int rot = encode_imm(rhs);
- tcg_debug_assert(rot >= 0);
- tcg_out_dat_imm(s, cond, opc, dst, lhs, rotl(rhs, rot) | (rot << 7));
+ tcg_out_dat_imm(s, cond, opc, dst, lhs, encode_imm_nofail(rhs));
} else {
tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0));
}
}
-static void tcg_out_dat_rIK(TCGContext *s, int cond, int opc, int opinv,
- TCGReg dst, TCGReg lhs, TCGArg rhs,
+/*
+ * Emit either the reg,imm or reg,reg form of a data-processing insn.
+ * rhs must satisfy the "rIK" constraint.
+ */
+static void tcg_out_dat_rIK(TCGContext *s, ARMCond cond, ARMInsn opc,
+ ARMInsn opinv, TCGReg dst, TCGReg lhs, TCGArg rhs,
bool rhs_is_const)
{
- /* Emit either the reg,imm or reg,reg form of a data-processing insn.
- * rhs must satisfy the "rIK" constraint.
- */
if (rhs_is_const) {
- int rot = encode_imm(rhs);
- if (rot < 0) {
- rhs = ~rhs;
- rot = encode_imm(rhs);
- tcg_debug_assert(rot >= 0);
+ int imm12 = encode_imm(rhs);
+ if (imm12 < 0) {
+ imm12 = encode_imm_nofail(~rhs);
opc = opinv;
}
- tcg_out_dat_imm(s, cond, opc, dst, lhs, rotl(rhs, rot) | (rot << 7));
+ tcg_out_dat_imm(s, cond, opc, dst, lhs, imm12);
} else {
tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0));
}
}
-static void tcg_out_dat_rIN(TCGContext *s, int cond, int opc, int opneg,
- TCGArg dst, TCGArg lhs, TCGArg rhs,
+static void tcg_out_dat_rIN(TCGContext *s, ARMCond cond, ARMInsn opc,
+ ARMInsn opneg, TCGReg dst, TCGReg lhs, TCGArg rhs,
bool rhs_is_const)
{
/* Emit either the reg,imm or reg,reg form of a data-processing insn.
* rhs must satisfy the "rIN" constraint.
*/
if (rhs_is_const) {
- int rot = encode_imm(rhs);
- if (rot < 0) {
- rhs = -rhs;
- rot = encode_imm(rhs);
- tcg_debug_assert(rot >= 0);
+ int imm12 = encode_imm(rhs);
+ if (imm12 < 0) {
+ imm12 = encode_imm_nofail(-rhs);
opc = opneg;
}
- tcg_out_dat_imm(s, cond, opc, dst, lhs, rotl(rhs, rot) | (rot << 7));
+ tcg_out_dat_imm(s, cond, opc, dst, lhs, imm12);
} else {
tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0));
}
}
-static inline void tcg_out_mul32(TCGContext *s, int cond, TCGReg rd,
- TCGReg rn, TCGReg rm)
+static void tcg_out_mul32(TCGContext *s, ARMCond cond, TCGReg rd,
+ TCGReg rn, TCGReg rm)
{
/* if ArchVersion() < 6 && d == n then UNPREDICTABLE; */
if (!use_armv6_instructions && rd == rn) {
@@ -917,8 +942,8 @@ static inline void tcg_out_mul32(TCGContext *s, int cond, TCGReg rd,
tcg_out32(s, (cond << 28) | 0x90 | (rd << 16) | (rm << 8) | rn);
}
-static inline void tcg_out_umull32(TCGContext *s, int cond, TCGReg rd0,
- TCGReg rd1, TCGReg rn, TCGReg rm)
+static void tcg_out_umull32(TCGContext *s, ARMCond cond, TCGReg rd0,
+ TCGReg rd1, TCGReg rn, TCGReg rm)
{
/* if ArchVersion() < 6 && (dHi == n || dLo == n) then UNPREDICTABLE; */
if (!use_armv6_instructions && (rd0 == rn || rd1 == rn)) {
@@ -936,8 +961,8 @@ static inline void tcg_out_umull32(TCGContext *s, int cond, TCGReg rd0,
(rd1 << 16) | (rd0 << 12) | (rm << 8) | rn);
}
-static inline void tcg_out_smull32(TCGContext *s, int cond, TCGReg rd0,
- TCGReg rd1, TCGReg rn, TCGReg rm)
+static void tcg_out_smull32(TCGContext *s, ARMCond cond, TCGReg rd0,
+ TCGReg rd1, TCGReg rn, TCGReg rm)
{
/* if ArchVersion() < 6 && (dHi == n || dLo == n) then UNPREDICTABLE; */
if (!use_armv6_instructions && (rd0 == rn || rd1 == rn)) {
@@ -955,18 +980,19 @@ static inline void tcg_out_smull32(TCGContext *s, int cond, TCGReg rd0,
(rd1 << 16) | (rd0 << 12) | (rm << 8) | rn);
}
-static inline void tcg_out_sdiv(TCGContext *s, int cond, int rd, int rn, int rm)
+static void tcg_out_sdiv(TCGContext *s, ARMCond cond,
+ TCGReg rd, TCGReg rn, TCGReg rm)
{
tcg_out32(s, 0x0710f010 | (cond << 28) | (rd << 16) | rn | (rm << 8));
}
-static inline void tcg_out_udiv(TCGContext *s, int cond, int rd, int rn, int rm)
+static void tcg_out_udiv(TCGContext *s, ARMCond cond,
+ TCGReg rd, TCGReg rn, TCGReg rm)
{
tcg_out32(s, 0x0730f010 | (cond << 28) | (rd << 16) | rn | (rm << 8));
}
-static inline void tcg_out_ext8s(TCGContext *s, int cond,
- int rd, int rn)
+static void tcg_out_ext8s(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rn)
{
if (use_armv6_instructions) {
/* sxtb */
@@ -979,14 +1005,13 @@ static inline void tcg_out_ext8s(TCGContext *s, int cond,
}
}
-static inline void tcg_out_ext8u(TCGContext *s, int cond,
- int rd, int rn)
+static void __attribute__((unused))
+tcg_out_ext8u(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rn)
{
tcg_out_dat_imm(s, cond, ARITH_AND, rd, rn, 0xff);
}
-static inline void tcg_out_ext16s(TCGContext *s, int cond,
- int rd, int rn)
+static void tcg_out_ext16s(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rn)
{
if (use_armv6_instructions) {
/* sxth */
@@ -999,8 +1024,7 @@ static inline void tcg_out_ext16s(TCGContext *s, int cond,
}
}
-static inline void tcg_out_ext16u(TCGContext *s, int cond,
- int rd, int rn)
+static void tcg_out_ext16u(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rn)
{
if (use_armv6_instructions) {
/* uxth */
@@ -1013,7 +1037,8 @@ static inline void tcg_out_ext16u(TCGContext *s, int cond,
}
}
-static void tcg_out_bswap16(TCGContext *s, int cond, int rd, int rn, int flags)
+static void tcg_out_bswap16(TCGContext *s, ARMCond cond,
+ TCGReg rd, TCGReg rn, int flags)
{
if (use_armv6_instructions) {
if (flags & TCG_BSWAP_OS) {
@@ -1080,7 +1105,7 @@ static void tcg_out_bswap16(TCGContext *s, int cond, int rd, int rn, int flags)
? SHIFT_IMM_ASR(8) : SHIFT_IMM_LSR(8)));
}
-static inline void tcg_out_bswap32(TCGContext *s, int cond, int rd, int rn)
+static void tcg_out_bswap32(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rn)
{
if (use_armv6_instructions) {
/* rev */
@@ -1097,8 +1122,8 @@ static inline void tcg_out_bswap32(TCGContext *s, int cond, int rd, int rn)
}
}
-static inline void tcg_out_deposit(TCGContext *s, int cond, TCGReg rd,
- TCGArg a1, int ofs, int len, bool const_a1)
+static void tcg_out_deposit(TCGContext *s, ARMCond cond, TCGReg rd,
+ TCGArg a1, int ofs, int len, bool const_a1)
{
if (const_a1) {
/* bfi becomes bfc with rn == 15. */
@@ -1109,24 +1134,24 @@ static inline void tcg_out_deposit(TCGContext *s, int cond, TCGReg rd,
| (ofs << 7) | ((ofs + len - 1) << 16));
}
-static inline void tcg_out_extract(TCGContext *s, int cond, TCGReg rd,
- TCGArg a1, int ofs, int len)
+static void tcg_out_extract(TCGContext *s, ARMCond cond, TCGReg rd,
+ TCGReg rn, int ofs, int len)
{
/* ubfx */
- tcg_out32(s, 0x07e00050 | (cond << 28) | (rd << 12) | a1
+ tcg_out32(s, 0x07e00050 | (cond << 28) | (rd << 12) | rn
| (ofs << 7) | ((len - 1) << 16));
}
-static inline void tcg_out_sextract(TCGContext *s, int cond, TCGReg rd,
- TCGArg a1, int ofs, int len)
+static void tcg_out_sextract(TCGContext *s, ARMCond cond, TCGReg rd,
+ TCGReg rn, int ofs, int len)
{
/* sbfx */
- tcg_out32(s, 0x07a00050 | (cond << 28) | (rd << 12) | a1
+ tcg_out32(s, 0x07a00050 | (cond << 28) | (rd << 12) | rn
| (ofs << 7) | ((len - 1) << 16));
}
-static inline void tcg_out_ld32u(TCGContext *s, int cond,
- int rd, int rn, int32_t offset)
+static void tcg_out_ld32u(TCGContext *s, ARMCond cond,
+ TCGReg rd, TCGReg rn, int32_t offset)
{
if (offset > 0xfff || offset < -0xfff) {
tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
@@ -1135,8 +1160,8 @@ static inline void tcg_out_ld32u(TCGContext *s, int cond,
tcg_out_ld32_12(s, cond, rd, rn, offset);
}
-static inline void tcg_out_st32(TCGContext *s, int cond,
- int rd, int rn, int32_t offset)
+static void tcg_out_st32(TCGContext *s, ARMCond cond,
+ TCGReg rd, TCGReg rn, int32_t offset)
{
if (offset > 0xfff || offset < -0xfff) {
tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
@@ -1145,8 +1170,8 @@ static inline void tcg_out_st32(TCGContext *s, int cond,
tcg_out_st32_12(s, cond, rd, rn, offset);
}
-static inline void tcg_out_ld16u(TCGContext *s, int cond,
- int rd, int rn, int32_t offset)
+static void tcg_out_ld16u(TCGContext *s, ARMCond cond,
+ TCGReg rd, TCGReg rn, int32_t offset)
{
if (offset > 0xff || offset < -0xff) {
tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
@@ -1155,8 +1180,8 @@ static inline void tcg_out_ld16u(TCGContext *s, int cond,
tcg_out_ld16u_8(s, cond, rd, rn, offset);
}
-static inline void tcg_out_ld16s(TCGContext *s, int cond,
- int rd, int rn, int32_t offset)
+static void tcg_out_ld16s(TCGContext *s, ARMCond cond,
+ TCGReg rd, TCGReg rn, int32_t offset)
{
if (offset > 0xff || offset < -0xff) {
tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
@@ -1165,8 +1190,8 @@ static inline void tcg_out_ld16s(TCGContext *s, int cond,
tcg_out_ld16s_8(s, cond, rd, rn, offset);
}
-static inline void tcg_out_st16(TCGContext *s, int cond,
- int rd, int rn, int32_t offset)
+static void tcg_out_st16(TCGContext *s, ARMCond cond,
+ TCGReg rd, TCGReg rn, int32_t offset)
{
if (offset > 0xff || offset < -0xff) {
tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
@@ -1175,8 +1200,8 @@ static inline void tcg_out_st16(TCGContext *s, int cond,
tcg_out_st16_8(s, cond, rd, rn, offset);
}
-static inline void tcg_out_ld8u(TCGContext *s, int cond,
- int rd, int rn, int32_t offset)
+static void tcg_out_ld8u(TCGContext *s, ARMCond cond,
+ TCGReg rd, TCGReg rn, int32_t offset)
{
if (offset > 0xfff || offset < -0xfff) {
tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
@@ -1185,8 +1210,8 @@ static inline void tcg_out_ld8u(TCGContext *s, int cond,
tcg_out_ld8_12(s, cond, rd, rn, offset);
}
-static inline void tcg_out_ld8s(TCGContext *s, int cond,
- int rd, int rn, int32_t offset)
+static void tcg_out_ld8s(TCGContext *s, ARMCond cond,
+ TCGReg rd, TCGReg rn, int32_t offset)
{
if (offset > 0xff || offset < -0xff) {
tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
@@ -1195,8 +1220,8 @@ static inline void tcg_out_ld8s(TCGContext *s, int cond,
tcg_out_ld8s_8(s, cond, rd, rn, offset);
}
-static inline void tcg_out_st8(TCGContext *s, int cond,
- int rd, int rn, int32_t offset)
+static void tcg_out_st8(TCGContext *s, ARMCond cond,
+ TCGReg rd, TCGReg rn, int32_t offset)
{
if (offset > 0xfff || offset < -0xfff) {
tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
@@ -1205,60 +1230,79 @@ static inline void tcg_out_st8(TCGContext *s, int cond,
tcg_out_st8_12(s, cond, rd, rn, offset);
}
-/* The _goto case is normally between TBs within the same code buffer, and
+/*
+ * The _goto case is normally between TBs within the same code buffer, and
* with the code buffer limited to 16MB we wouldn't need the long case.
* But we also use it for the tail-call to the qemu_ld/st helpers, which does.
*/
-static void tcg_out_goto(TCGContext *s, int cond, const tcg_insn_unit *addr)
+static void tcg_out_goto(TCGContext *s, ARMCond cond, const tcg_insn_unit *addr)
{
intptr_t addri = (intptr_t)addr;
ptrdiff_t disp = tcg_pcrel_diff(s, addr);
+ bool arm_mode = !(addri & 1);
- if ((addri & 1) == 0 && disp - 8 < 0x01fffffd && disp - 8 > -0x01fffffd) {
- tcg_out_b(s, cond, disp);
+ if (arm_mode && disp - 8 < 0x01fffffd && disp - 8 > -0x01fffffd) {
+ tcg_out_b_imm(s, cond, disp);
return;
}
- tcg_out_movi_pool(s, cond, TCG_REG_PC, addri);
+
+ /* LDR is interworking from v5t. */
+ if (arm_mode || use_armv5t_instructions) {
+ tcg_out_movi_pool(s, cond, TCG_REG_PC, addri);
+ return;
+ }
+
+ /* else v4t */
+ tcg_out_movi32(s, COND_AL, TCG_REG_TMP, addri);
+ tcg_out_bx_reg(s, COND_AL, TCG_REG_TMP);
}
-/* The call case is mostly used for helpers - so it's not unreasonable
- * for them to be beyond branch range */
+/*
+ * The call case is mostly used for helpers - so it's not unreasonable
+ * for them to be beyond branch range.
+ */
static void tcg_out_call(TCGContext *s, const tcg_insn_unit *addr)
{
intptr_t addri = (intptr_t)addr;
ptrdiff_t disp = tcg_pcrel_diff(s, addr);
+ bool arm_mode = !(addri & 1);
if (disp - 8 < 0x02000000 && disp - 8 >= -0x02000000) {
- if (addri & 1) {
- /* Use BLX if the target is in Thumb mode */
- if (!use_armv5t_instructions) {
- tcg_abort();
- }
+ if (arm_mode) {
+ tcg_out_bl_imm(s, COND_AL, disp);
+ return;
+ }
+ if (use_armv5t_instructions) {
tcg_out_blx_imm(s, disp);
- } else {
- tcg_out_bl(s, COND_AL, disp);
+ return;
}
- } else if (use_armv7_instructions) {
+ }
+
+ if (use_armv5t_instructions) {
tcg_out_movi32(s, COND_AL, TCG_REG_TMP, addri);
- tcg_out_blx(s, COND_AL, TCG_REG_TMP);
- } else {
+ tcg_out_blx_reg(s, COND_AL, TCG_REG_TMP);
+ } else if (arm_mode) {
/* ??? Know that movi_pool emits exactly 1 insn. */
- tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R14, TCG_REG_PC, 0);
+ tcg_out_mov_reg(s, COND_AL, TCG_REG_R14, TCG_REG_PC);
tcg_out_movi_pool(s, COND_AL, TCG_REG_PC, addri);
+ } else {
+ tcg_out_movi32(s, COND_AL, TCG_REG_TMP, addri);
+ tcg_out_mov_reg(s, COND_AL, TCG_REG_R14, TCG_REG_PC);
+ tcg_out_bx_reg(s, COND_AL, TCG_REG_TMP);
}
}
-static inline void tcg_out_goto_label(TCGContext *s, int cond, TCGLabel *l)
+static void tcg_out_goto_label(TCGContext *s, ARMCond cond, TCGLabel *l)
{
if (l->has_value) {
tcg_out_goto(s, cond, l->u.value_ptr);
} else {
tcg_out_reloc(s, s->code_ptr, R_ARM_PC24, l, 0);
- tcg_out_b(s, cond, 0);
+ tcg_out_b_imm(s, cond, 0);
}
}
-static inline void tcg_out_mb(TCGContext *s, TCGArg a0)
+static void tcg_out_mb(TCGContext *s, TCGArg a0)
{
if (use_armv7_instructions) {
tcg_out32(s, INSN_DMB_ISH);
@@ -1714,9 +1758,9 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
}
#endif /* SOFTMMU */
-static inline void tcg_out_qemu_ld_index(TCGContext *s, MemOp opc,
- TCGReg datalo, TCGReg datahi,
- TCGReg addrlo, TCGReg addend)
+static void tcg_out_qemu_ld_index(TCGContext *s, MemOp opc,
+ TCGReg datalo, TCGReg datahi,
+ TCGReg addrlo, TCGReg addend)
{
/* Byte swapping is left to middle-end expansion. */
tcg_debug_assert((opc & MO_BSWAP) == 0);
@@ -1757,9 +1801,9 @@ static inline void tcg_out_qemu_ld_index(TCGContext *s, MemOp opc,
}
}
-static inline void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc,
- TCGReg datalo, TCGReg datahi,
- TCGReg addrlo)
+#ifndef CONFIG_SOFTMMU
+static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg datalo,
+ TCGReg datahi, TCGReg addrlo)
{
/* Byte swapping is left to middle-end expansion. */
tcg_debug_assert((opc & MO_BSWAP) == 0);
@@ -1797,6 +1841,7 @@ static inline void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc,
g_assert_not_reached();
}
}
+#endif
static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
{
@@ -1823,7 +1868,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
/* This a conditional BL only to load a pointer within this opcode into LR
for the slow path. We will not be using the value for a tail call. */
label_ptr = s->code_ptr;
- tcg_out_bl(s, COND_NE, 0);
+ tcg_out_bl_imm(s, COND_NE, 0);
tcg_out_qemu_ld_index(s, opc, datalo, datahi, addrlo, addend);
@@ -1839,9 +1884,9 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
#endif
}
-static inline void tcg_out_qemu_st_index(TCGContext *s, int cond, MemOp opc,
- TCGReg datalo, TCGReg datahi,
- TCGReg addrlo, TCGReg addend)
+static void tcg_out_qemu_st_index(TCGContext *s, ARMCond cond, MemOp opc,
+ TCGReg datalo, TCGReg datahi,
+ TCGReg addrlo, TCGReg addend)
{
/* Byte swapping is left to middle-end expansion. */
tcg_debug_assert((opc & MO_BSWAP) == 0);
@@ -1871,9 +1916,9 @@ static inline void tcg_out_qemu_st_index(TCGContext *s, int cond, MemOp opc,
}
}
-static inline void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc,
- TCGReg datalo, TCGReg datahi,
- TCGReg addrlo)
+#ifndef CONFIG_SOFTMMU
+static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg datalo,
+ TCGReg datahi, TCGReg addrlo)
{
/* Byte swapping is left to middle-end expansion. */
tcg_debug_assert((opc & MO_BSWAP) == 0);
@@ -1902,6 +1947,7 @@ static inline void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc,
g_assert_not_reached();
}
}
+#endif
static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
{
@@ -1929,7 +1975,7 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
/* The conditional call must come last, as we're going to return here. */
label_ptr = s->code_ptr;
- tcg_out_bl(s, COND_NE, 0);
+ tcg_out_bl_imm(s, COND_NE, 0);
add_qemu_ldst_label(s, false, oi, datalo, datahi, addrlo, addrhi,
s->code_ptr, label_ptr);
@@ -1946,9 +1992,9 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
static void tcg_out_epilogue(TCGContext *s);
-static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
- const TCGArg args[TCG_MAX_OP_ARGS],
- const int const_args[TCG_MAX_OP_ARGS])
+static void tcg_out_op(TCGContext *s, TCGOpcode opc,
+ const TCGArg args[TCG_MAX_OP_ARGS],
+ const int const_args[TCG_MAX_OP_ARGS])
{
TCGArg a0, a1, a2, a3, a4, a5;
int c;
@@ -1982,7 +2028,7 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
}
break;
case INDEX_op_goto_ptr:
- tcg_out_bx(s, COND_AL, args[0]);
+ tcg_out_b_reg(s, COND_AL, args[0]);
break;
case INDEX_op_br:
tcg_out_goto_label(s, COND_AL, arg_label(args[0]));
@@ -2505,8 +2551,8 @@ static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
}
}
-static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
- TCGReg base, intptr_t ofs)
+static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
+ TCGReg base, intptr_t ofs)
{
return false;
}
@@ -2715,7 +2761,8 @@ static const ARMInsn vec_cmp0_insn[16] = {
static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
unsigned vecl, unsigned vece,
- const TCGArg *args, const int *const_args)
+ const TCGArg args[TCG_MAX_OP_ARGS],
+ const int const_args[TCG_MAX_OP_ARGS])
{
TCGType type = vecl + TCG_TYPE_V64;
unsigned q = vecl;
@@ -3055,7 +3102,10 @@ static void tcg_target_qemu_prologue(TCGContext *s)
{
/* Calling convention requires us to save r4-r11 and lr. */
/* stmdb sp!, { r4 - r11, lr } */
- tcg_out32(s, (COND_AL << 28) | 0x092d4ff0);
+ tcg_out_ldstm(s, COND_AL, INSN_STMDB, TCG_REG_CALL_STACK,
+ (1 << TCG_REG_R4) | (1 << TCG_REG_R5) | (1 << TCG_REG_R6) |
+ (1 << TCG_REG_R7) | (1 << TCG_REG_R8) | (1 << TCG_REG_R9) |
+ (1 << TCG_REG_R10) | (1 << TCG_REG_R11) | (1 << TCG_REG_R14));
/* Reserve callee argument and tcg temp space. */
tcg_out_dat_rI(s, COND_AL, ARITH_SUB, TCG_REG_CALL_STACK,
@@ -3065,7 +3115,7 @@ static void tcg_target_qemu_prologue(TCGContext *s)
tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
- tcg_out_bx(s, COND_AL, tcg_target_call_iarg_regs[1]);
+ tcg_out_b_reg(s, COND_AL, tcg_target_call_iarg_regs[1]);
/*
* Return path for goto_ptr. Set return value to 0, a-la exit_tb,
@@ -3083,7 +3133,10 @@ static void tcg_out_epilogue(TCGContext *s)
TCG_REG_CALL_STACK, STACK_ADDEND, 1);
/* ldmia sp!, { r4 - r11, pc } */
- tcg_out32(s, (COND_AL << 28) | 0x08bd8ff0);
+ tcg_out_ldstm(s, COND_AL, INSN_LDMIA, TCG_REG_CALL_STACK,
+ (1 << TCG_REG_R4) | (1 << TCG_REG_R5) | (1 << TCG_REG_R6) |
+ (1 << TCG_REG_R7) | (1 << TCG_REG_R8) | (1 << TCG_REG_R9) |
+ (1 << TCG_REG_R10) | (1 << TCG_REG_R11) | (1 << TCG_REG_PC));
}
typedef struct {
diff --git a/tcg/arm/tcg-target.h b/tcg/arm/tcg-target.h
index d113b7f8db..f41b809554 100644
--- a/tcg/arm/tcg-target.h
+++ b/tcg/arm/tcg-target.h
@@ -26,34 +26,9 @@
#ifndef ARM_TCG_TARGET_H
#define ARM_TCG_TARGET_H
-/* The __ARM_ARCH define is provided by gcc 4.8. Construct it otherwise. */
-#ifndef __ARM_ARCH
-# if defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) \
- || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) \
- || defined(__ARM_ARCH_7EM__)
-# define __ARM_ARCH 7
-# elif defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) \
- || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) \
- || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6T2__)
-# define __ARM_ARCH 6
-# elif defined(__ARM_ARCH_5__) || defined(__ARM_ARCH_5E__) \
- || defined(__ARM_ARCH_5T__) || defined(__ARM_ARCH_5TE__) \
- || defined(__ARM_ARCH_5TEJ__)
-# define __ARM_ARCH 5
-# else
-# define __ARM_ARCH 4
-# endif
-#endif
-
extern int arm_arch;
-#if defined(__ARM_ARCH_5T__) \
- || defined(__ARM_ARCH_5TE__) || defined(__ARM_ARCH_5TEJ__)
-# define use_armv5t_instructions 1
-#else
-# define use_armv5t_instructions use_armv6_instructions
-#endif
-
+#define use_armv5t_instructions (__ARM_ARCH >= 5 || arm_arch >= 5)
#define use_armv6_instructions (__ARM_ARCH >= 6 || arm_arch >= 6)
#define use_armv7_instructions (__ARM_ARCH >= 7 || arm_arch >= 7)
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
index 98d924b91a..997510109d 100644
--- a/tcg/i386/tcg-target.c.inc
+++ b/tcg/i386/tcg-target.c.inc
@@ -241,8 +241,9 @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
#define P_EXT 0x100 /* 0x0f opcode prefix */
#define P_EXT38 0x200 /* 0x0f 0x38 opcode prefix */
#define P_DATA16 0x400 /* 0x66 opcode prefix */
+#define P_VEXW 0x1000 /* Set VEX.W = 1 */
#if TCG_TARGET_REG_BITS == 64
-# define P_REXW 0x1000 /* Set REX.W = 1 */
+# define P_REXW P_VEXW /* Set REX.W = 1; match VEXW */
# define P_REXB_R 0x2000 /* REG field as byte register */
# define P_REXB_RM 0x4000 /* R/M field as byte register */
# define P_GS 0x8000 /* gs segment override */
@@ -410,13 +411,13 @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
#define OPC_VPBROADCASTW (0x79 | P_EXT38 | P_DATA16)
#define OPC_VPBROADCASTD (0x58 | P_EXT38 | P_DATA16)
#define OPC_VPBROADCASTQ (0x59 | P_EXT38 | P_DATA16)
-#define OPC_VPERMQ (0x00 | P_EXT3A | P_DATA16 | P_REXW)
+#define OPC_VPERMQ (0x00 | P_EXT3A | P_DATA16 | P_VEXW)
#define OPC_VPERM2I128 (0x46 | P_EXT3A | P_DATA16 | P_VEXL)
#define OPC_VPSLLVD (0x47 | P_EXT38 | P_DATA16)
-#define OPC_VPSLLVQ (0x47 | P_EXT38 | P_DATA16 | P_REXW)
+#define OPC_VPSLLVQ (0x47 | P_EXT38 | P_DATA16 | P_VEXW)
#define OPC_VPSRAVD (0x46 | P_EXT38 | P_DATA16)
#define OPC_VPSRLVD (0x45 | P_EXT38 | P_DATA16)
-#define OPC_VPSRLVQ (0x45 | P_EXT38 | P_DATA16 | P_REXW)
+#define OPC_VPSRLVQ (0x45 | P_EXT38 | P_DATA16 | P_VEXW)
#define OPC_VZEROUPPER (0x77 | P_EXT)
#define OPC_XCHG_ax_r32 (0x90)
@@ -576,7 +577,7 @@ static void tcg_out_vex_opc(TCGContext *s, int opc, int r, int v,
/* Use the two byte form if possible, which cannot encode
VEX.W, VEX.B, VEX.X, or an m-mmmm field other than P_EXT. */
- if ((opc & (P_EXT | P_EXT38 | P_EXT3A | P_REXW)) == P_EXT
+ if ((opc & (P_EXT | P_EXT38 | P_EXT3A | P_VEXW)) == P_EXT
&& ((rm | index) & 8) == 0) {
/* Two byte VEX prefix. */
tcg_out8(s, 0xc5);
@@ -601,7 +602,7 @@ static void tcg_out_vex_opc(TCGContext *s, int opc, int r, int v,
tmp |= (rm & 8 ? 0 : 0x20); /* VEX.B */
tcg_out8(s, tmp);
- tmp = (opc & P_REXW ? 0x80 : 0); /* VEX.W */
+ tmp = (opc & P_VEXW ? 0x80 : 0); /* VEX.W */
}
tmp |= (opc & P_VEXL ? 0x04 : 0); /* VEX.L */
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
index e0f4665213..5e1fac914a 100644
--- a/tcg/ppc/tcg-target.c.inc
+++ b/tcg/ppc/tcg-target.c.inc
@@ -25,9 +25,24 @@
#include "elf.h"
#include "../tcg-pool.c.inc"
-#if defined _CALL_DARWIN || defined __APPLE__
-#define TCG_TARGET_CALL_DARWIN
-#endif
+/*
+ * Standardize on the _CALL_FOO symbols used by GCC:
+ * Apple XCode does not define _CALL_DARWIN.
+ * Clang defines _CALL_ELF (64-bit) but not _CALL_SYSV (32-bit).
+ */
+#if !defined(_CALL_SYSV) && \
+ !defined(_CALL_DARWIN) && \
+ !defined(_CALL_AIX) && \
+ !defined(_CALL_ELF)
+# if defined(__APPLE__)
+# define _CALL_DARWIN
+# elif defined(__ELF__) && TCG_TARGET_REG_BITS == 32
+# define _CALL_SYSV
+# else
+# error "Unknown ABI"
+# endif
+#endif
+
#ifdef _CALL_SYSV
# define TCG_TARGET_CALL_ALIGN_ARGS 1
#endif
@@ -169,7 +184,7 @@ static const int tcg_target_call_oarg_regs[] = {
};
static const int tcg_target_callee_save_regs[] = {
-#ifdef TCG_TARGET_CALL_DARWIN
+#ifdef _CALL_DARWIN
TCG_REG_R11,
#endif
TCG_REG_R14,
@@ -2372,7 +2387,7 @@ static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
# define LINK_AREA_SIZE (6 * SZR)
# define LR_OFFSET (1 * SZR)
# define TCG_TARGET_CALL_STACK_OFFSET (LINK_AREA_SIZE + 8 * SZR)
-#elif defined(TCG_TARGET_CALL_DARWIN)
+#elif defined(_CALL_DARWIN)
# define LINK_AREA_SIZE (6 * SZR)
# define LR_OFFSET (2 * SZR)
#elif TCG_TARGET_REG_BITS == 64
diff --git a/tests/qemu-iotests/122 b/tests/qemu-iotests/122
index 5d550ed13e..efb260d822 100755
--- a/tests/qemu-iotests/122
+++ b/tests/qemu-iotests/122
@@ -67,7 +67,7 @@ echo
_make_test_img -b "$TEST_IMG".base -F $IMGFMT
$QEMU_IO -c "write -P 0 0 3M" "$TEST_IMG" 2>&1 | _filter_qemu_io | _filter_testdir
-$QEMU_IMG convert -O $IMGFMT -B "$TEST_IMG".base -o backing_fmt=$IMGFMT \
+$QEMU_IMG convert -O $IMGFMT -B "$TEST_IMG".base -F $IMGFMT \
"$TEST_IMG" "$TEST_IMG".orig
$QEMU_IO -c "read -P 0 0 3M" "$TEST_IMG".orig 2>&1 | _filter_qemu_io | _filter_testdir
$QEMU_IMG convert -O $IMGFMT -c -B "$TEST_IMG".base -o backing_fmt=$IMGFMT \
diff --git a/tests/qemu-iotests/271 b/tests/qemu-iotests/271
index 599b849cc6..2775b4d130 100755
--- a/tests/qemu-iotests/271
+++ b/tests/qemu-iotests/271
@@ -893,7 +893,10 @@ EOF
}
_make_test_img -o extended_l2=on 1M
-_concurrent_io | $QEMU_IO | _filter_qemu_io
+# Second and third writes in _concurrent_io() are independent and may finish in
+# different order. So, filter offset out to match both possible variants.
+_concurrent_io | $QEMU_IO | _filter_qemu_io | \
+ $SED -e 's/\(20480\|40960\)/OFFSET/'
_concurrent_verify | $QEMU_IO | _filter_qemu_io
# success, all done
diff --git a/tests/qemu-iotests/271.out b/tests/qemu-iotests/271.out
index 81043ba4d7..5be780de76 100644
--- a/tests/qemu-iotests/271.out
+++ b/tests/qemu-iotests/271.out
@@ -719,8 +719,8 @@ blkdebug: Suspended request 'A'
blkdebug: Resuming request 'A'
wrote 2048/2048 bytes at offset 30720
2 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
-wrote 2048/2048 bytes at offset 20480
+wrote 2048/2048 bytes at offset OFFSET
2 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
-wrote 2048/2048 bytes at offset 40960
+wrote 2048/2048 bytes at offset OFFSET
2 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
*** done
diff --git a/tests/qemu-iotests/297 b/tests/qemu-iotests/297
index 345b617b34..b04cba5366 100755
--- a/tests/qemu-iotests/297
+++ b/tests/qemu-iotests/297
@@ -29,7 +29,7 @@ import iotests
SKIP_FILES = (
'030', '040', '041', '044', '045', '055', '056', '057', '065', '093',
'096', '118', '124', '132', '136', '139', '147', '148', '149',
- '151', '152', '155', '163', '165', '169', '194', '196', '199', '202',
+ '151', '152', '155', '163', '165', '194', '196', '202',
'203', '205', '206', '207', '208', '210', '211', '212', '213', '216',
'218', '219', '224', '228', '234', '235', '236', '237', '238',
'240', '242', '245', '246', '248', '255', '256', '257', '258', '260',
@@ -46,7 +46,7 @@ def is_python_file(filename):
if filename.endswith('.py'):
return True
- with open(filename) as f:
+ with open(filename, encoding='utf-8') as f:
try:
first_line = f.readline()
return re.match('^#!.*python', first_line) is not None
@@ -55,8 +55,9 @@ def is_python_file(filename):
def run_linters():
- files = [filename for filename in (set(os.listdir('.')) - set(SKIP_FILES))
- if is_python_file(filename)]
+ named_tests = [f'tests/{entry}' for entry in os.listdir('tests')]
+ check_tests = set(os.listdir('.') + named_tests) - set(SKIP_FILES)
+ files = [filename for filename in check_tests if is_python_file(filename)]
iotests.logger.debug('Files to be checked:')
iotests.logger.debug(', '.join(sorted(files)))
diff --git a/tests/qemu-iotests/iotests.py b/tests/qemu-iotests/iotests.py
index 11276f380a..ce06cf5630 100644
--- a/tests/qemu-iotests/iotests.py
+++ b/tests/qemu-iotests/iotests.py
@@ -610,7 +610,7 @@ class VM(qtest.QEMUQtestMachine):
return
valgrind_filename = f"{test_dir}/{self._popen.pid}.valgrind"
if self.exitcode() == 99:
- with open(valgrind_filename) as f:
+ with open(valgrind_filename, encoding='utf-8') as f:
print(f.read())
else:
os.remove(valgrind_filename)
@@ -703,7 +703,7 @@ class VM(qtest.QEMUQtestMachine):
def flatten_qmp_object(self, obj, output=None, basestr=''):
if output is None:
- output = dict()
+ output = {}
if isinstance(obj, list):
for i, item in enumerate(obj):
self.flatten_qmp_object(item, output, basestr + str(i) + '.')
@@ -716,7 +716,7 @@ class VM(qtest.QEMUQtestMachine):
def qmp_to_opts(self, obj):
obj = self.flatten_qmp_object(obj)
- output_list = list()
+ output_list = []
for key in obj:
output_list += [key + '=' + obj[key]]
return ','.join(output_list)
@@ -1121,7 +1121,8 @@ def notrun(reason):
# Each test in qemu-iotests has a number ("seq")
seq = os.path.basename(sys.argv[0])
- with open('%s/%s.notrun' % (output_dir, seq), 'w') as outfile:
+ with open('%s/%s.notrun' % (output_dir, seq), 'w', encoding='utf-8') \
+ as outfile:
outfile.write(reason + '\n')
logger.warning("%s not run: %s", seq, reason)
sys.exit(0)
@@ -1135,7 +1136,8 @@ def case_notrun(reason):
# Each test in qemu-iotests has a number ("seq")
seq = os.path.basename(sys.argv[0])
- with open('%s/%s.casenotrun' % (output_dir, seq), 'a') as outfile:
+ with open('%s/%s.casenotrun' % (output_dir, seq), 'a', encoding='utf-8') \
+ as outfile:
outfile.write(' [case not run] ' + reason + '\n')
def _verify_image_format(supported_fmts: Sequence[str] = (),
diff --git a/tests/qemu-iotests/tests/migrate-bitmaps-postcopy-test b/tests/qemu-iotests/tests/migrate-bitmaps-postcopy-test
index 584062b412..00ebb5c251 100755
--- a/tests/qemu-iotests/tests/migrate-bitmaps-postcopy-test
+++ b/tests/qemu-iotests/tests/migrate-bitmaps-postcopy-test
@@ -132,10 +132,10 @@ class TestDirtyBitmapPostcopyMigration(iotests.QMPTestCase):
result = self.vm_a.qmp('x-debug-block-dirty-bitmap-sha256',
node='drive0', name='bitmap0')
- self.discards1_sha256 = result['return']['sha256']
+ discards1_sha256 = result['return']['sha256']
# Check, that updating the bitmap by discards works
- assert self.discards1_sha256 != empty_sha256
+ assert discards1_sha256 != empty_sha256
# We want to calculate resulting sha256. Do it in bitmap0, so, disable
# other bitmaps
@@ -148,7 +148,7 @@ class TestDirtyBitmapPostcopyMigration(iotests.QMPTestCase):
result = self.vm_a.qmp('x-debug-block-dirty-bitmap-sha256',
node='drive0', name='bitmap0')
- self.all_discards_sha256 = result['return']['sha256']
+ all_discards_sha256 = result['return']['sha256']
# Now, enable some bitmaps, to be updated during migration
for i in range(2, nb_bitmaps, 2):
@@ -173,10 +173,11 @@ class TestDirtyBitmapPostcopyMigration(iotests.QMPTestCase):
event_resume = self.vm_b.event_wait('RESUME')
self.vm_b_events.append(event_resume)
- return event_resume
+ return (event_resume, discards1_sha256, all_discards_sha256)
def test_postcopy_success(self):
- event_resume = self.start_postcopy()
+ event_resume, discards1_sha256, all_discards_sha256 = \
+ self.start_postcopy()
# enabled bitmaps should be updated
apply_discards(self.vm_b, discards2)
@@ -217,7 +218,7 @@ class TestDirtyBitmapPostcopyMigration(iotests.QMPTestCase):
for i in range(0, nb_bitmaps, 5):
result = self.vm_b.qmp('x-debug-block-dirty-bitmap-sha256',
node='drive0', name='bitmap{}'.format(i))
- sha = self.discards1_sha256 if i % 2 else self.all_discards_sha256
+ sha = discards1_sha256 if i % 2 else all_discards_sha256
self.assert_qmp(result, 'return/sha256', sha)
def test_early_shutdown_destination(self):
diff --git a/tests/qemu-iotests/tests/migrate-bitmaps-test b/tests/qemu-iotests/tests/migrate-bitmaps-test
index a5c7bc83e0..dc431c35b3 100755
--- a/tests/qemu-iotests/tests/migrate-bitmaps-test
+++ b/tests/qemu-iotests/tests/migrate-bitmaps-test
@@ -20,11 +20,10 @@
#
import os
-import iotests
-import time
import itertools
import operator
import re
+import iotests
from iotests import qemu_img, qemu_img_create, Timeout
@@ -37,6 +36,12 @@ mig_cmd = 'exec: cat > ' + mig_file
incoming_cmd = 'exec: cat ' + mig_file
+def get_bitmap_hash(vm):
+ result = vm.qmp('x-debug-block-dirty-bitmap-sha256',
+ node='drive0', name='bitmap0')
+ return result['return']['sha256']
+
+
class TestDirtyBitmapMigration(iotests.QMPTestCase):
def tearDown(self):
self.vm_a.shutdown()
@@ -62,21 +67,16 @@ class TestDirtyBitmapMigration(iotests.QMPTestCase):
params['persistent'] = True
result = vm.qmp('block-dirty-bitmap-add', **params)
- self.assert_qmp(result, 'return', {});
-
- def get_bitmap_hash(self, vm):
- result = vm.qmp('x-debug-block-dirty-bitmap-sha256',
- node='drive0', name='bitmap0')
- return result['return']['sha256']
+ self.assert_qmp(result, 'return', {})
def check_bitmap(self, vm, sha256):
result = vm.qmp('x-debug-block-dirty-bitmap-sha256',
node='drive0', name='bitmap0')
if sha256:
- self.assert_qmp(result, 'return/sha256', sha256);
+ self.assert_qmp(result, 'return/sha256', sha256)
else:
self.assert_qmp(result, 'error/desc',
- "Dirty bitmap 'bitmap0' not found");
+ "Dirty bitmap 'bitmap0' not found")
def do_test_migration_resume_source(self, persistent, migrate_bitmaps):
granularity = 512
@@ -97,7 +97,7 @@ class TestDirtyBitmapMigration(iotests.QMPTestCase):
self.add_bitmap(self.vm_a, granularity, persistent)
for r in regions:
self.vm_a.hmp_qemu_io('drive0', 'write %d %d' % r)
- sha256 = self.get_bitmap_hash(self.vm_a)
+ sha256 = get_bitmap_hash(self.vm_a)
result = self.vm_a.qmp('migrate', uri=mig_cmd)
while True:
@@ -106,7 +106,7 @@ class TestDirtyBitmapMigration(iotests.QMPTestCase):
break
while True:
result = self.vm_a.qmp('query-status')
- if (result['return']['status'] == 'postmigrate'):
+ if result['return']['status'] == 'postmigrate':
break
# test that bitmap is still here
@@ -164,7 +164,7 @@ class TestDirtyBitmapMigration(iotests.QMPTestCase):
self.add_bitmap(self.vm_a, granularity, persistent)
for r in regions:
self.vm_a.hmp_qemu_io('drive0', 'write %d %d' % r)
- sha256 = self.get_bitmap_hash(self.vm_a)
+ sha256 = get_bitmap_hash(self.vm_a)
if pre_shutdown:
self.vm_a.shutdown()
@@ -214,16 +214,22 @@ class TestDirtyBitmapMigration(iotests.QMPTestCase):
self.check_bitmap(self.vm_b, sha256 if persistent else False)
-def inject_test_case(klass, name, method, *args, **kwargs):
+def inject_test_case(klass, suffix, method, *args, **kwargs):
mc = operator.methodcaller(method, *args, **kwargs)
- setattr(klass, 'test_' + method + name, lambda self: mc(self))
+ # We want to add a function attribute to `klass`, so that it is
+ # correctly converted to a method on instantiation. The
+ # methodcaller object `mc` is a callable, not a function, so we
+ # need the lambda to turn it into a function.
+ # pylint: disable=unnecessary-lambda
+ setattr(klass, 'test_' + method + suffix, lambda self: mc(self))
+
for cmb in list(itertools.product((True, False), repeat=5)):
name = ('_' if cmb[0] else '_not_') + 'persistent_'
name += ('_' if cmb[1] else '_not_') + 'migbitmap_'
name += '_online' if cmb[2] else '_offline'
name += '_shared' if cmb[3] else '_nonshared'
- if (cmb[4]):
+ if cmb[4]:
name += '__pre_shutdown'
inject_test_case(TestDirtyBitmapMigration, name, 'do_test_migration',
@@ -270,7 +276,8 @@ class TestDirtyBitmapBackingMigration(iotests.QMPTestCase):
self.assert_qmp(result, 'return', {})
# Check that the bitmaps are there
- for node in self.vm.qmp('query-named-block-nodes', flat=True)['return']:
+ nodes = self.vm.qmp('query-named-block-nodes', flat=True)['return']
+ for node in nodes:
if 'node0' in node['node-name']:
self.assert_qmp(node, 'dirty-bitmaps[0]/name', 'bmap0')
@@ -287,7 +294,7 @@ class TestDirtyBitmapBackingMigration(iotests.QMPTestCase):
"""
Continue the source after migration.
"""
- result = self.vm.qmp('migrate', uri=f'exec: cat > /dev/null')
+ result = self.vm.qmp('migrate', uri='exec: cat > /dev/null')
self.assert_qmp(result, 'return', {})
with Timeout(10, 'Migration timeout'):
diff --git a/tests/qemu-iotests/tests/migrate-during-backup b/tests/qemu-iotests/tests/migrate-during-backup
new file mode 100755
index 0000000000..34103229ee
--- /dev/null
+++ b/tests/qemu-iotests/tests/migrate-during-backup
@@ -0,0 +1,97 @@
+#!/usr/bin/env python3
+# group: migration
+#
+# Copyright (c) 2021 Virtuozzo International GmbH
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import os
+import iotests
+from iotests import qemu_img_create, qemu_io
+
+
+disk_a = os.path.join(iotests.test_dir, 'disk_a')
+disk_b = os.path.join(iotests.test_dir, 'disk_b')
+size = '1M'
+mig_file = os.path.join(iotests.test_dir, 'mig_file')
+mig_cmd = 'exec: cat > ' + mig_file
+
+
+class TestMigrateDuringBackup(iotests.QMPTestCase):
+ def tearDown(self):
+ self.vm.shutdown()
+ os.remove(disk_a)
+ os.remove(disk_b)
+ os.remove(mig_file)
+
+ def setUp(self):
+ qemu_img_create('-f', iotests.imgfmt, disk_a, size)
+ qemu_img_create('-f', iotests.imgfmt, disk_b, size)
+ qemu_io('-c', f'write 0 {size}', disk_a)
+
+ self.vm = iotests.VM().add_drive(disk_a)
+ self.vm.launch()
+ result = self.vm.qmp('blockdev-add', {
+ 'node-name': 'target',
+ 'driver': iotests.imgfmt,
+ 'file': {
+ 'driver': 'file',
+ 'filename': disk_b
+ }
+ })
+ self.assert_qmp(result, 'return', {})
+
+ def test_migrate(self):
+ result = self.vm.qmp('blockdev-backup', device='drive0',
+ target='target', sync='full',
+ speed=1, x_perf={
+ 'max-workers': 1,
+ 'max-chunk': 64 * 1024
+ })
+ self.assert_qmp(result, 'return', {})
+
+ result = self.vm.qmp('job-pause', id='drive0')
+ self.assert_qmp(result, 'return', {})
+
+ result = self.vm.qmp('migrate-set-capabilities',
+ capabilities=[{'capability': 'events',
+ 'state': True}])
+ self.assert_qmp(result, 'return', {})
+ result = self.vm.qmp('migrate', uri=mig_cmd)
+ self.assert_qmp(result, 'return', {})
+
+ e = self.vm.events_wait((('MIGRATION',
+ {'data': {'status': 'completed'}}),
+ ('MIGRATION',
+ {'data': {'status': 'failed'}})))
+
+ # Don't assert that e is 'failed' now: this way we'll miss
+ # possible crash when backup continues :)
+
+ result = self.vm.qmp('block-job-set-speed', device='drive0',
+ speed=0)
+ self.assert_qmp(result, 'return', {})
+ result = self.vm.qmp('job-resume', id='drive0')
+ self.assert_qmp(result, 'return', {})
+
+ # For future: if something changes so that both migration
+ # and backup pass, let's not miss that moment, as it may
+ # be a bug as well as improvement.
+ self.assert_qmp(e, 'data/status', 'failed')
+
+
+if __name__ == '__main__':
+ iotests.main(supported_fmts=['qcow2'],
+ supported_protocols=['file'])
diff --git a/tests/qemu-iotests/tests/migrate-during-backup.out b/tests/qemu-iotests/tests/migrate-during-backup.out
new file mode 100644
index 0000000000..ae1213e6f8
--- /dev/null
+++ b/tests/qemu-iotests/tests/migrate-during-backup.out
@@ -0,0 +1,5 @@
+.
+----------------------------------------------------------------------
+Ran 1 tests
+
+OK
diff --git a/tests/qemu-iotests/tests/mirror-top-perms b/tests/qemu-iotests/tests/mirror-top-perms
index 451a0666f8..2fc8dd66e0 100755
--- a/tests/qemu-iotests/tests/mirror-top-perms
+++ b/tests/qemu-iotests/tests/mirror-top-perms
@@ -47,7 +47,7 @@ class TestMirrorTopPerms(iotests.QMPTestCase):
def tearDown(self):
try:
self.vm.shutdown()
- except qemu.machine.AbnormalShutdown:
+ except qemu.machine.machine.AbnormalShutdown:
pass
if self.vm_b is not None: