aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitlab-ci.d/custom-runners.yml36
-rw-r--r--accel/kvm/kvm-all.c2
-rw-r--r--accel/tcg/cpu-exec.c14
-rw-r--r--accel/tcg/tcg-accel-ops-rr.c2
-rw-r--r--accel/tcg/translate-all.c59
-rw-r--r--accel/tcg/translator.c39
-rw-r--r--accel/tcg/user-exec.c48
-rw-r--r--block.c88
-rw-r--r--block/file-posix.c9
-rw-r--r--block/gluster.c23
-rw-r--r--block/io.c68
-rw-r--r--block/iscsi.c3
-rw-r--r--block/mirror.c25
-rw-r--r--block/qcow2-cluster.c78
-rw-r--r--block/qcow2-refcount.c326
-rw-r--r--block/qcow2.c13
-rw-r--r--block/qcow2.h7
-rw-r--r--bsd-user/i386/target_arch_cpu.c5
-rw-r--r--bsd-user/x86_64/target_arch_cpu.c5
-rw-r--r--chardev/char-mux.c6
-rw-r--r--chardev/char.c7
-rwxr-xr-xconfigure4
-rw-r--r--docs/about/build-platforms.rst33
-rw-r--r--docs/about/deprecated.rst2
-rw-r--r--docs/system/arm/nuvoton.rst1
-rw-r--r--docs/tools/qemu-img.rst4
-rw-r--r--hw/arm/mps2-tz.c92
-rw-r--r--hw/arm/mps2.c12
-rw-r--r--hw/arm/npcm7xx_boards.c34
-rw-r--r--hw/arm/virt.c29
-rw-r--r--hw/char/cadence_uart.c61
-rw-r--r--hw/display/qxl.c2
-rw-r--r--hw/display/virtio-gpu-udmabuf.c1
-rw-r--r--hw/display/virtio-gpu.c32
-rw-r--r--hw/i386/acpi-build.c6
-rw-r--r--hw/i386/intel_iommu.c8
-rw-r--r--hw/intc/arm_gicv3.c14
-rw-r--r--hw/intc/arm_gicv3_common.c13
-rw-r--r--hw/intc/arm_gicv3_cpuif.c7
-rw-r--r--hw/intc/arm_gicv3_dist.c5
-rw-r--r--hw/intc/arm_gicv3_its.c1322
-rw-r--r--hw/intc/arm_gicv3_its_common.c7
-rw-r--r--hw/intc/arm_gicv3_its_kvm.c2
-rw-r--r--hw/intc/arm_gicv3_redist.c153
-rw-r--r--hw/intc/gicv3_internal.h188
-rw-r--r--hw/intc/meson.build1
-rw-r--r--hw/misc/zynq_slcr.c31
-rw-r--r--hw/vfio/igd.c2
-rw-r--r--hw/vfio/pci-quirks.c2
-rw-r--r--hw/vfio/pci.c6
-rw-r--r--hw/vfio/platform.c2
-rw-r--r--include/block/block_int.h61
-rw-r--r--include/chardev/char.h34
-rw-r--r--include/exec/translate-all.h1
-rw-r--r--include/exec/translator.h44
-rw-r--r--include/hw/arm/virt.h2
-rw-r--r--include/hw/core/tcg-cpu-ops.h26
-rw-r--r--include/hw/intc/arm_gicv3_common.h13
-rw-r--r--include/hw/intc/arm_gicv3_its_common.h32
-rw-r--r--include/hw/qdev-core.h28
-rw-r--r--include/tcg/tcg-op.h2
-rw-r--r--include/ui/console.h3
-rw-r--r--include/ui/egl-helpers.h3
-rw-r--r--include/ui/gtk.h5
-rw-r--r--linux-user/main.c7
-rw-r--r--meson.build2
-rw-r--r--qemu-img-cmds.hx2
-rw-r--r--qemu-img.c18
-rwxr-xr-xscripts/simplebench/img_bench_templater.py95
-rw-r--r--scripts/simplebench/table_templater.py62
-rw-r--r--softmmu/qdev-monitor.c7
-rw-r--r--target/alpha/cpu.c2
-rw-r--r--target/alpha/cpu.h2
-rw-r--r--target/alpha/helper.c5
-rw-r--r--target/alpha/translate.c2
-rw-r--r--target/arm/arm_ldst.h12
-rw-r--r--target/arm/cpu.c7
-rw-r--r--target/arm/cpu.h4
-rw-r--r--target/arm/cpu_tcg.c6
-rw-r--r--target/arm/helper-a64.c1
-rw-r--r--target/arm/helper.c8
-rw-r--r--target/arm/kvm.c7
-rw-r--r--target/arm/kvm_arm.h4
-rw-r--r--target/arm/syndrome.h5
-rw-r--r--target/arm/translate-a64.c215
-rw-r--r--target/arm/translate.c30
-rw-r--r--target/arm/translate.h2
-rw-r--r--target/avr/cpu.c3
-rw-r--r--target/avr/translate.c8
-rw-r--r--target/cris/cpu.c4
-rw-r--r--target/cris/cpu.h2
-rw-r--r--target/cris/helper.c17
-rw-r--r--target/hexagon/translate.c3
-rw-r--r--target/hppa/cpu.c2
-rw-r--r--target/hppa/cpu.h4
-rw-r--r--target/hppa/int_helper.c7
-rw-r--r--target/hppa/translate.c5
-rw-r--r--target/i386/cpu-sysemu.c2
-rw-r--r--target/i386/cpu.h3
-rw-r--r--target/i386/tcg/helper-tcg.h2
-rw-r--r--target/i386/tcg/seg_helper.c74
-rw-r--r--target/i386/tcg/sysemu/seg_helper.c62
-rw-r--r--target/i386/tcg/tcg-cpu.c8
-rw-r--r--target/i386/tcg/translate.c10
-rw-r--r--target/m68k/cpu.c2
-rw-r--r--target/m68k/cpu.h2
-rw-r--r--target/m68k/op_helper.c16
-rw-r--r--target/m68k/translate.c2
-rw-r--r--target/microblaze/cpu.c2
-rw-r--r--target/microblaze/cpu.h2
-rw-r--r--target/microblaze/helper.c13
-rw-r--r--target/mips/cpu.c2
-rw-r--r--target/mips/tcg/exception.c18
-rw-r--r--target/mips/tcg/micromips_translate.c.inc2
-rw-r--r--target/mips/tcg/mips16e_translate.c.inc4
-rw-r--r--target/mips/tcg/nanomips_translate.c.inc4
-rw-r--r--target/mips/tcg/sysemu/tlb_helper.c18
-rw-r--r--target/mips/tcg/tcg-internal.h5
-rw-r--r--target/mips/tcg/translate.c8
-rw-r--r--target/mips/tcg/user/tlb_helper.c5
-rw-r--r--target/nios2/cpu.c5
-rw-r--r--target/openrisc/cpu.c2
-rw-r--r--target/openrisc/cpu.h5
-rw-r--r--target/openrisc/interrupt.c2
-rw-r--r--target/openrisc/meson.build6
-rw-r--r--target/openrisc/translate.c2
-rw-r--r--target/ppc/cpu.h4
-rw-r--r--target/ppc/cpu_init.c2
-rw-r--r--target/ppc/excp_helper.c21
-rw-r--r--target/ppc/translate.c5
-rw-r--r--target/riscv/cpu.c2
-rw-r--r--target/riscv/cpu.h2
-rw-r--r--target/riscv/cpu_helper.c5
-rw-r--r--target/riscv/translate.c5
-rw-r--r--target/rx/cpu.c2
-rw-r--r--target/rx/cpu.h2
-rw-r--r--target/rx/helper.c4
-rw-r--r--target/s390x/tcg/translate.c16
-rw-r--r--target/sh4/cpu.c2
-rw-r--r--target/sh4/cpu.h4
-rw-r--r--target/sh4/helper.c9
-rw-r--r--target/sh4/translate.c4
-rw-r--r--target/sparc/cpu.c6
-rw-r--r--target/sparc/cpu.h1
-rw-r--r--target/sparc/translate.c2
-rw-r--r--target/xtensa/cpu.c2
-rw-r--r--target/xtensa/cpu.h2
-rw-r--r--target/xtensa/exc_helper.c7
-rw-r--r--target/xtensa/translate.c5
-rw-r--r--tcg/arm/tcg-target.c.inc515
-rw-r--r--tcg/arm/tcg-target.h27
-rw-r--r--tcg/i386/tcg-target.c.inc13
-rw-r--r--tcg/ppc/tcg-target.c.inc25
-rw-r--r--tests/data/acpi/virt/IORTbin0 -> 124 bytes
-rw-r--r--tests/data/acpi/virt/IORT.memhpbin0 -> 124 bytes
-rw-r--r--tests/data/acpi/virt/IORT.numamembin0 -> 124 bytes
-rw-r--r--tests/data/acpi/virt/IORT.pxbbin0 -> 124 bytes
-rwxr-xr-xtests/qemu-iotests/1222
-rwxr-xr-xtests/qemu-iotests/2715
-rw-r--r--tests/qemu-iotests/271.out4
-rwxr-xr-xtests/qemu-iotests/2979
-rw-r--r--tests/qemu-iotests/iotests.py12
-rwxr-xr-xtests/qemu-iotests/tests/migrate-bitmaps-postcopy-test13
-rwxr-xr-xtests/qemu-iotests/tests/migrate-bitmaps-test43
-rwxr-xr-xtests/qemu-iotests/tests/migrate-during-backup97
-rw-r--r--tests/qemu-iotests/tests/migrate-during-backup.out5
-rwxr-xr-xtests/qemu-iotests/tests/mirror-top-perms2
-rw-r--r--tools/virtiofsd/fuse_lowlevel.h4
-rw-r--r--ui/egl-helpers.c26
-rw-r--r--ui/gtk-egl.c48
-rw-r--r--ui/gtk-gl-area.c26
-rw-r--r--ui/gtk.c26
-rw-r--r--util/qemu-openpty.c7
173 files changed, 3963 insertions, 1101 deletions
diff --git a/.gitlab-ci.d/custom-runners.yml b/.gitlab-ci.d/custom-runners.yml
index 0d3e4a7b4b..a89a20da48 100644
--- a/.gitlab-ci.d/custom-runners.yml
+++ b/.gitlab-ci.d/custom-runners.yml
@@ -17,7 +17,6 @@ variables:
# setup by the scripts/ci/setup/build-environment.yml task
# "Install basic packages to build QEMU on Ubuntu 18.04/20.04"
ubuntu-18.04-s390x-all-linux-static:
- allow_failure: true
needs: []
stage: build
tags:
@@ -37,7 +36,6 @@ ubuntu-18.04-s390x-all-linux-static:
- make --output-sync -j`nproc` check-tcg V=1
ubuntu-18.04-s390x-all:
- allow_failure: true
needs: []
stage: build
tags:
@@ -54,7 +52,6 @@ ubuntu-18.04-s390x-all:
- make --output-sync -j`nproc` check V=1
ubuntu-18.04-s390x-alldbg:
- allow_failure: true
needs: []
stage: build
tags:
@@ -62,7 +59,11 @@ ubuntu-18.04-s390x-alldbg:
- s390x
rules:
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
+ when: manual
+ allow_failure: true
- if: "$S390X_RUNNER_AVAILABLE"
+ when: manual
+ allow_failure: true
script:
- mkdir build
- cd build
@@ -72,7 +73,6 @@ ubuntu-18.04-s390x-alldbg:
- make --output-sync -j`nproc` check V=1
ubuntu-18.04-s390x-clang:
- allow_failure: true
needs: []
stage: build
tags:
@@ -81,8 +81,10 @@ ubuntu-18.04-s390x-clang:
rules:
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
when: manual
+ allow_failure: true
- if: "$S390X_RUNNER_AVAILABLE"
when: manual
+ allow_failure: true
script:
- mkdir build
- cd build
@@ -91,7 +93,6 @@ ubuntu-18.04-s390x-clang:
- make --output-sync -j`nproc` check V=1
ubuntu-18.04-s390x-tci:
- allow_failure: true
needs: []
stage: build
tags:
@@ -99,7 +100,11 @@ ubuntu-18.04-s390x-tci:
- s390x
rules:
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
+ when: manual
+ allow_failure: true
- if: "$S390X_RUNNER_AVAILABLE"
+ when: manual
+ allow_failure: true
script:
- mkdir build
- cd build
@@ -107,7 +112,6 @@ ubuntu-18.04-s390x-tci:
- make --output-sync -j`nproc`
ubuntu-18.04-s390x-notcg:
- allow_failure: true
needs: []
stage: build
tags:
@@ -116,8 +120,10 @@ ubuntu-18.04-s390x-notcg:
rules:
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
when: manual
+ allow_failure: true
- if: "$S390X_RUNNER_AVAILABLE"
when: manual
+ allow_failure: true
script:
- mkdir build
- cd build
@@ -129,7 +135,6 @@ ubuntu-18.04-s390x-notcg:
# setup by the scripts/ci/setup/qemu/build-environment.yml task
# "Install basic packages to build QEMU on Ubuntu 18.04/20.04"
ubuntu-20.04-aarch64-all-linux-static:
- allow_failure: true
needs: []
stage: build
tags:
@@ -149,7 +154,6 @@ ubuntu-20.04-aarch64-all-linux-static:
- make --output-sync -j`nproc` check-tcg V=1
ubuntu-20.04-aarch64-all:
- allow_failure: true
needs: []
stage: build
tags:
@@ -157,7 +161,11 @@ ubuntu-20.04-aarch64-all:
- aarch64
rules:
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
+ when: manual
+ allow_failure: true
- if: "$AARCH64_RUNNER_AVAILABLE"
+ when: manual
+ allow_failure: true
script:
- mkdir build
- cd build
@@ -166,7 +174,6 @@ ubuntu-20.04-aarch64-all:
- make --output-sync -j`nproc` check V=1
ubuntu-20.04-aarch64-alldbg:
- allow_failure: true
needs: []
stage: build
tags:
@@ -184,7 +191,6 @@ ubuntu-20.04-aarch64-alldbg:
- make --output-sync -j`nproc` check V=1
ubuntu-20.04-aarch64-clang:
- allow_failure: true
needs: []
stage: build
tags:
@@ -193,8 +199,10 @@ ubuntu-20.04-aarch64-clang:
rules:
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
when: manual
+ allow_failure: true
- if: "$AARCH64_RUNNER_AVAILABLE"
when: manual
+ allow_failure: true
script:
- mkdir build
- cd build
@@ -203,7 +211,6 @@ ubuntu-20.04-aarch64-clang:
- make --output-sync -j`nproc` check V=1
ubuntu-20.04-aarch64-tci:
- allow_failure: true
needs: []
stage: build
tags:
@@ -211,7 +218,11 @@ ubuntu-20.04-aarch64-tci:
- aarch64
rules:
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
+ when: manual
+ allow_failure: true
- if: "$AARCH64_RUNNER_AVAILABLE"
+ when: manual
+ allow_failure: true
script:
- mkdir build
- cd build
@@ -219,7 +230,6 @@ ubuntu-20.04-aarch64-tci:
- make --output-sync -j`nproc`
ubuntu-20.04-aarch64-notcg:
- allow_failure: true
needs: []
stage: build
tags:
@@ -228,8 +238,10 @@ ubuntu-20.04-aarch64-notcg:
rules:
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
when: manual
+ allow_failure: true
- if: "$AARCH64_RUNNER_AVAILABLE"
when: manual
+ allow_failure: true
script:
- mkdir build
- cd build
diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c
index 0125c17edb..cace5ffe64 100644
--- a/accel/kvm/kvm-all.c
+++ b/accel/kvm/kvm-all.c
@@ -2469,7 +2469,7 @@ static int kvm_init(MachineState *ms)
ret = kvm_vm_enable_cap(s, KVM_CAP_DIRTY_LOG_RING, 0, ring_bytes);
if (ret) {
error_report("Enabling of KVM dirty ring failed: %s. "
- "Suggested mininum value is 1024.", strerror(-ret));
+ "Suggested minimum value is 1024.", strerror(-ret));
goto err;
}
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
index e5c0ccd1a2..75dbc1e4e3 100644
--- a/accel/tcg/cpu-exec.c
+++ b/accel/tcg/cpu-exec.c
@@ -651,8 +651,8 @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
loop */
#if defined(TARGET_I386)
CPUClass *cc = CPU_GET_CLASS(cpu);
- cc->tcg_ops->do_interrupt(cpu);
-#endif
+ cc->tcg_ops->fake_user_interrupt(cpu);
+#endif /* TARGET_I386 */
*ret = cpu->exception_index;
cpu->exception_index = -1;
return true;
@@ -685,6 +685,7 @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
return false;
}
+#ifndef CONFIG_USER_ONLY
/*
* CPU_INTERRUPT_POLL is a virtual event which gets converted into a
* "real" interrupt event later. It does not need to be recorded for
@@ -698,12 +699,11 @@ static inline bool need_replay_interrupt(int interrupt_request)
return true;
#endif
}
+#endif /* !CONFIG_USER_ONLY */
static inline bool cpu_handle_interrupt(CPUState *cpu,
TranslationBlock **last_tb)
{
- CPUClass *cc = CPU_GET_CLASS(cpu);
-
/* Clear the interrupt flag now since we're processing
* cpu->interrupt_request and cpu->exit_request.
* Ensure zeroing happens before reading cpu->exit_request or
@@ -725,6 +725,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
qemu_mutex_unlock_iothread();
return true;
}
+#if !defined(CONFIG_USER_ONLY)
if (replay_mode == REPLAY_MODE_PLAY && !replay_has_interrupt()) {
/* Do nothing */
} else if (interrupt_request & CPU_INTERRUPT_HALT) {
@@ -753,12 +754,14 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
qemu_mutex_unlock_iothread();
return true;
}
-#endif
+#endif /* !TARGET_I386 */
/* The target hook has 3 exit conditions:
False when the interrupt isn't processed,
True when it is, and we should restart on a new TB,
and via longjmp via cpu_loop_exit. */
else {
+ CPUClass *cc = CPU_GET_CLASS(cpu);
+
if (cc->tcg_ops->cpu_exec_interrupt &&
cc->tcg_ops->cpu_exec_interrupt(cpu, interrupt_request)) {
if (need_replay_interrupt(interrupt_request)) {
@@ -777,6 +780,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
* reload the 'interrupt_request' value */
interrupt_request = cpu->interrupt_request;
}
+#endif /* !CONFIG_USER_ONLY */
if (interrupt_request & CPU_INTERRUPT_EXITTB) {
cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
/* ensure that no TB jump will be modified as
diff --git a/accel/tcg/tcg-accel-ops-rr.c b/accel/tcg/tcg-accel-ops-rr.c
index c02c061ecb..a5fd26190e 100644
--- a/accel/tcg/tcg-accel-ops-rr.c
+++ b/accel/tcg/tcg-accel-ops-rr.c
@@ -60,8 +60,6 @@ void rr_kick_vcpu_thread(CPUState *unused)
static QEMUTimer *rr_kick_vcpu_timer;
static CPUState *rr_current_cpu;
-#define TCG_KICK_PERIOD (NANOSECONDS_PER_SECOND / 10)
-
static inline int64_t rr_next_kick_time(void)
{
return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + TCG_KICK_PERIOD;
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
index bbfcfb698c..fb9ebfad9e 100644
--- a/accel/tcg/translate-all.c
+++ b/accel/tcg/translate-all.c
@@ -1297,31 +1297,8 @@ static inline void tb_page_add(PageDesc *p, TranslationBlock *tb,
invalidate_page_bitmap(p);
#if defined(CONFIG_USER_ONLY)
- if (p->flags & PAGE_WRITE) {
- target_ulong addr;
- PageDesc *p2;
- int prot;
-
- /* force the host page as non writable (writes will have a
- page fault + mprotect overhead) */
- page_addr &= qemu_host_page_mask;
- prot = 0;
- for (addr = page_addr; addr < page_addr + qemu_host_page_size;
- addr += TARGET_PAGE_SIZE) {
-
- p2 = page_find(addr >> TARGET_PAGE_BITS);
- if (!p2) {
- continue;
- }
- prot |= p2->flags;
- p2->flags &= ~PAGE_WRITE;
- }
- mprotect(g2h_untagged(page_addr), qemu_host_page_size,
- (prot & PAGE_BITS) & ~PAGE_WRITE);
- if (DEBUG_TB_INVALIDATE_GATE) {
- printf("protecting code page: 0x" TB_PAGE_ADDR_FMT "\n", page_addr);
- }
- }
+ /* translator_loop() must have made all TB pages non-writable */
+ assert(!(p->flags & PAGE_WRITE));
#else
/* if some code is already present, then the pages are already
protected. So we handle the case where only the first TB is
@@ -2394,6 +2371,38 @@ int page_check_range(target_ulong start, target_ulong len, int flags)
return 0;
}
+void page_protect(tb_page_addr_t page_addr)
+{
+ target_ulong addr;
+ PageDesc *p;
+ int prot;
+
+ p = page_find(page_addr >> TARGET_PAGE_BITS);
+ if (p && (p->flags & PAGE_WRITE)) {
+ /*
+ * Force the host page as non writable (writes will have a page fault +
+ * mprotect overhead).
+ */
+ page_addr &= qemu_host_page_mask;
+ prot = 0;
+ for (addr = page_addr; addr < page_addr + qemu_host_page_size;
+ addr += TARGET_PAGE_SIZE) {
+
+ p = page_find(addr >> TARGET_PAGE_BITS);
+ if (!p) {
+ continue;
+ }
+ prot |= p->flags;
+ p->flags &= ~PAGE_WRITE;
+ }
+ mprotect(g2h_untagged(page_addr), qemu_host_page_size,
+ (prot & PAGE_BITS) & ~PAGE_WRITE);
+ if (DEBUG_TB_INVALIDATE_GATE) {
+ printf("protecting code page: 0x" TB_PAGE_ADDR_FMT "\n", page_addr);
+ }
+ }
+}
+
/* called from signal handler: invalidate the code and unprotect the
* page. Return 0 if the fault was not handled, 1 if it was handled,
* and 2 if it was handled but the caller must cause the TB to be
diff --git a/accel/tcg/translator.c b/accel/tcg/translator.c
index c53a7f8e44..390bd9db0a 100644
--- a/accel/tcg/translator.c
+++ b/accel/tcg/translator.c
@@ -42,6 +42,15 @@ bool translator_use_goto_tb(DisasContextBase *db, target_ulong dest)
return ((db->pc_first ^ dest) & TARGET_PAGE_MASK) == 0;
}
+static inline void translator_page_protect(DisasContextBase *dcbase,
+ target_ulong pc)
+{
+#ifdef CONFIG_USER_ONLY
+ dcbase->page_protect_end = pc | ~TARGET_PAGE_MASK;
+ page_protect(pc);
+#endif
+}
+
void translator_loop(const TranslatorOps *ops, DisasContextBase *db,
CPUState *cpu, TranslationBlock *tb, int max_insns)
{
@@ -56,6 +65,7 @@ void translator_loop(const TranslatorOps *ops, DisasContextBase *db,
db->num_insns = 0;
db->max_insns = max_insns;
db->singlestep_enabled = cflags & CF_SINGLE_STEP;
+ translator_page_protect(db, db->pc_next);
ops->init_disas_context(db, cpu);
tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */
@@ -137,3 +147,32 @@ void translator_loop(const TranslatorOps *ops, DisasContextBase *db,
}
#endif
}
+
+static inline void translator_maybe_page_protect(DisasContextBase *dcbase,
+ target_ulong pc, size_t len)
+{
+#ifdef CONFIG_USER_ONLY
+ target_ulong end = pc + len - 1;
+
+ if (end > dcbase->page_protect_end) {
+ translator_page_protect(dcbase, end);
+ }
+#endif
+}
+
+#define GEN_TRANSLATOR_LD(fullname, type, load_fn, swap_fn) \
+ type fullname ## _swap(CPUArchState *env, DisasContextBase *dcbase, \
+ abi_ptr pc, bool do_swap) \
+ { \
+ translator_maybe_page_protect(dcbase, pc, sizeof(type)); \
+ type ret = load_fn(env, pc); \
+ if (do_swap) { \
+ ret = swap_fn(ret); \
+ } \
+ plugin_insn_append(&ret, sizeof(ret)); \
+ return ret; \
+ }
+
+FOR_EACH_TRANSLATOR_LD(GEN_TRANSLATOR_LD)
+
+#undef GEN_TRANSLATOR_LD
diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c
index 90d1a2d327..8fed542622 100644
--- a/accel/tcg/user-exec.c
+++ b/accel/tcg/user-exec.c
@@ -680,18 +680,26 @@ int cpu_signal_handler(int host_signum, void *pinfo,
pc = uc->uc_mcontext.psw.addr;
- /* ??? On linux, the non-rt signal handler has 4 (!) arguments instead
- of the normal 2 arguments. The 3rd argument contains the "int_code"
- from the hardware which does in fact contain the is_write value.
- The rt signal handler, as far as I can tell, does not give this value
- at all. Not that we could get to it from here even if it were. */
- /* ??? This is not even close to complete, since it ignores all
- of the read-modify-write instructions. */
+ /*
+ * ??? On linux, the non-rt signal handler has 4 (!) arguments instead
+ * of the normal 2 arguments. The 4th argument contains the "Translation-
+ * Exception Identification for DAT Exceptions" from the hardware (aka
+ * "int_parm_long"), which does in fact contain the is_write value.
+ * The rt signal handler, as far as I can tell, does not give this value
+ * at all. Not that we could get to it from here even if it were.
+ * So fall back to parsing instructions. Treat read-modify-write ones as
+ * writes, which is not fully correct, but for tracking self-modifying code
+ * this is better than treating them as reads. Checking si_addr page flags
+ * might be a viable improvement, albeit a racy one.
+ */
+ /* ??? This is not even close to complete. */
pinsn = (uint16_t *)pc;
switch (pinsn[0] >> 8) {
case 0x50: /* ST */
case 0x42: /* STC */
case 0x40: /* STH */
+ case 0xba: /* CS */
+ case 0xbb: /* CDS */
is_write = 1;
break;
case 0xc4: /* RIL format insns */
@@ -702,6 +710,12 @@ int cpu_signal_handler(int host_signum, void *pinfo,
is_write = 1;
}
break;
+ case 0xc8: /* SSF format insns */
+ switch (pinsn[0] & 0xf) {
+ case 0x2: /* CSST */
+ is_write = 1;
+ }
+ break;
case 0xe3: /* RXY format insns */
switch (pinsn[2] & 0xff) {
case 0x50: /* STY */
@@ -715,7 +729,27 @@ int cpu_signal_handler(int host_signum, void *pinfo,
is_write = 1;
}
break;
+ case 0xeb: /* RSY format insns */
+ switch (pinsn[2] & 0xff) {
+ case 0x14: /* CSY */
+ case 0x30: /* CSG */
+ case 0x31: /* CDSY */
+ case 0x3e: /* CDSG */
+ case 0xe4: /* LANG */
+ case 0xe6: /* LAOG */
+ case 0xe7: /* LAXG */
+ case 0xe8: /* LAAG */
+ case 0xea: /* LAALG */
+ case 0xf4: /* LAN */
+ case 0xf6: /* LAO */
+ case 0xf7: /* LAX */
+ case 0xfa: /* LAAL */
+ case 0xf8: /* LAA */
+ is_write = 1;
+ }
+ break;
}
+
return handle_cpu_signal(pc, info, is_write, &uc->uc_sigmask);
}
diff --git a/block.c b/block.c
index b2b66263f9..5ce08a79fd 100644
--- a/block.c
+++ b/block.c
@@ -49,6 +49,8 @@
#include "qemu/timer.h"
#include "qemu/cutils.h"
#include "qemu/id.h"
+#include "qemu/range.h"
+#include "qemu/rcu.h"
#include "block/coroutines.h"
#ifdef CONFIG_BSD
@@ -401,6 +403,9 @@ BlockDriverState *bdrv_new(void)
qemu_co_queue_init(&bs->flush_queue);
+ qemu_co_mutex_init(&bs->bsc_modify_lock);
+ bs->block_status_cache = g_new0(BdrvBlockStatusCache, 1);
+
for (i = 0; i < bdrv_drain_all_count; i++) {
bdrv_drained_begin(bs);
}
@@ -4694,6 +4699,8 @@ static void bdrv_close(BlockDriverState *bs)
bs->explicit_options = NULL;
qobject_unref(bs->full_open_options);
bs->full_open_options = NULL;
+ g_free(bs->block_status_cache);
+ bs->block_status_cache = NULL;
bdrv_release_named_dirty_bitmaps(bs);
assert(QLIST_EMPTY(&bs->dirty_bitmaps));
@@ -6319,6 +6326,7 @@ static int bdrv_inactivate_recurse(BlockDriverState *bs)
{
BdrvChild *child, *parent;
int ret;
+ uint64_t cumulative_perms, cumulative_shared_perms;
if (!bs->drv) {
return -ENOMEDIUM;
@@ -6349,6 +6357,13 @@ static int bdrv_inactivate_recurse(BlockDriverState *bs)
}
}
+ bdrv_get_cumulative_perm(bs, &cumulative_perms,
+ &cumulative_shared_perms);
+ if (cumulative_perms & (BLK_PERM_WRITE | BLK_PERM_WRITE_UNCHANGED)) {
+ /* Our inactive parents still need write access. Inactivation failed. */
+ return -EPERM;
+ }
+
bs->open_flags |= BDRV_O_INACTIVE;
/*
@@ -7684,3 +7699,76 @@ BlockDriverState *bdrv_backing_chain_next(BlockDriverState *bs)
{
return bdrv_skip_filters(bdrv_cow_bs(bdrv_skip_filters(bs)));
}
+
+/**
+ * Check whether [offset, offset + bytes) overlaps with the cached
+ * block-status data region.
+ *
+ * If so, and @pnum is not NULL, set *pnum to `bsc.data_end - offset`,
+ * which is what bdrv_bsc_is_data()'s interface needs.
+ * Otherwise, *pnum is not touched.
+ */
+static bool bdrv_bsc_range_overlaps_locked(BlockDriverState *bs,
+ int64_t offset, int64_t bytes,
+ int64_t *pnum)
+{
+ BdrvBlockStatusCache *bsc = qatomic_rcu_read(&bs->block_status_cache);
+ bool overlaps;
+
+ overlaps =
+ qatomic_read(&bsc->valid) &&
+ ranges_overlap(offset, bytes, bsc->data_start,
+ bsc->data_end - bsc->data_start);
+
+ if (overlaps && pnum) {
+ *pnum = bsc->data_end - offset;
+ }
+
+ return overlaps;
+}
+
+/**
+ * See block_int.h for this function's documentation.
+ */
+bool bdrv_bsc_is_data(BlockDriverState *bs, int64_t offset, int64_t *pnum)
+{
+ RCU_READ_LOCK_GUARD();
+
+ return bdrv_bsc_range_overlaps_locked(bs, offset, 1, pnum);
+}
+
+/**
+ * See block_int.h for this function's documentation.
+ */
+void bdrv_bsc_invalidate_range(BlockDriverState *bs,
+ int64_t offset, int64_t bytes)
+{
+ RCU_READ_LOCK_GUARD();
+
+ if (bdrv_bsc_range_overlaps_locked(bs, offset, bytes, NULL)) {
+ qatomic_set(&bs->block_status_cache->valid, false);
+ }
+}
+
+/**
+ * See block_int.h for this function's documentation.
+ */
+void bdrv_bsc_fill(BlockDriverState *bs, int64_t offset, int64_t bytes)
+{
+ BdrvBlockStatusCache *new_bsc = g_new(BdrvBlockStatusCache, 1);
+ BdrvBlockStatusCache *old_bsc;
+
+ *new_bsc = (BdrvBlockStatusCache) {
+ .valid = true,
+ .data_start = offset,
+ .data_end = offset + bytes,
+ };
+
+ QEMU_LOCK_GUARD(&bs->bsc_modify_lock);
+
+ old_bsc = qatomic_rcu_read(&bs->block_status_cache);
+ qatomic_rcu_set(&bs->block_status_cache, new_bsc);
+ if (old_bsc) {
+ g_free_rcu(old_bsc, rcu);
+ }
+}
diff --git a/block/file-posix.c b/block/file-posix.c
index cb9bffe047..d81e15efa4 100644
--- a/block/file-posix.c
+++ b/block/file-posix.c
@@ -1705,7 +1705,7 @@ static int handle_aiocb_write_zeroes(void *opaque)
*/
warn_report_once("Your file system is misbehaving: "
"fallocate(FALLOC_FL_PUNCH_HOLE) returned EINVAL. "
- "Please report this bug to your file sytem "
+ "Please report this bug to your file system "
"vendor.");
} else if (ret != -ENOTSUP) {
return ret;
@@ -2744,7 +2744,8 @@ static int find_allocation(BlockDriverState *bs, off_t start,
* the specified offset) that are known to be in the same
* allocated/unallocated state.
*
- * 'bytes' is the max value 'pnum' should be set to.
+ * 'bytes' is a soft cap for 'pnum'. If the information is free, 'pnum' may
+ * well exceed it.
*/
static int coroutine_fn raw_co_block_status(BlockDriverState *bs,
bool want_zero,
@@ -2782,7 +2783,7 @@ static int coroutine_fn raw_co_block_status(BlockDriverState *bs,
} else if (data == offset) {
/* On a data extent, compute bytes to the end of the extent,
* possibly including a partial sector at EOF. */
- *pnum = MIN(bytes, hole - offset);
+ *pnum = hole - offset;
/*
* We are not allowed to return partial sectors, though, so
@@ -2801,7 +2802,7 @@ static int coroutine_fn raw_co_block_status(BlockDriverState *bs,
} else {
/* On a hole, compute bytes to the beginning of the next extent. */
assert(hole == offset);
- *pnum = MIN(bytes, data - offset);
+ *pnum = data - offset;
ret = BDRV_BLOCK_ZERO;
}
*map = offset;
diff --git a/block/gluster.c b/block/gluster.c
index e8ee14c8e9..d51938e447 100644
--- a/block/gluster.c
+++ b/block/gluster.c
@@ -1461,7 +1461,8 @@ exit:
* the specified offset) that are known to be in the same
* allocated/unallocated state.
*
- * 'bytes' is the max value 'pnum' should be set to.
+ * 'bytes' is a soft cap for 'pnum'. If the information is free, 'pnum' may
+ * well exceed it.
*
* (Based on raw_co_block_status() from file-posix.c.)
*/
@@ -1477,6 +1478,8 @@ static int coroutine_fn qemu_gluster_co_block_status(BlockDriverState *bs,
off_t data = 0, hole = 0;
int ret = -EINVAL;
+ assert(QEMU_IS_ALIGNED(offset | bytes, bs->bl.request_alignment));
+
if (!s->fd) {
return ret;
}
@@ -1500,12 +1503,26 @@ static int coroutine_fn qemu_gluster_co_block_status(BlockDriverState *bs,
} else if (data == offset) {
/* On a data extent, compute bytes to the end of the extent,
* possibly including a partial sector at EOF. */
- *pnum = MIN(bytes, hole - offset);
+ *pnum = hole - offset;
+
+ /*
+ * We are not allowed to return partial sectors, though, so
+ * round up if necessary.
+ */
+ if (!QEMU_IS_ALIGNED(*pnum, bs->bl.request_alignment)) {
+ int64_t file_length = qemu_gluster_getlength(bs);
+ if (file_length > 0) {
+ /* Ignore errors, this is just a safeguard */
+ assert(hole == file_length);
+ }
+ *pnum = ROUND_UP(*pnum, bs->bl.request_alignment);
+ }
+
ret = BDRV_BLOCK_DATA;
} else {
/* On a hole, compute bytes to the beginning of the next extent. */
assert(hole == offset);
- *pnum = MIN(bytes, data - offset);
+ *pnum = data - offset;
ret = BDRV_BLOCK_ZERO;
}
diff --git a/block/io.c b/block/io.c
index a19942718b..99ee182ca4 100644
--- a/block/io.c
+++ b/block/io.c
@@ -1883,6 +1883,9 @@ static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
return -ENOTSUP;
}
+ /* Invalidate the cached block-status data range if this write overlaps */
+ bdrv_bsc_invalidate_range(bs, offset, bytes);
+
assert(alignment % bs->bl.request_alignment == 0);
head = offset % alignment;
tail = (offset + bytes) % alignment;
@@ -2447,9 +2450,65 @@ static int coroutine_fn bdrv_co_block_status(BlockDriverState *bs,
aligned_bytes = ROUND_UP(offset + bytes, align) - aligned_offset;
if (bs->drv->bdrv_co_block_status) {
- ret = bs->drv->bdrv_co_block_status(bs, want_zero, aligned_offset,
- aligned_bytes, pnum, &local_map,
- &local_file);
+ /*
+ * Use the block-status cache only for protocol nodes: Format
+ * drivers are generally quick to inquire the status, but protocol
+ * drivers often need to get information from outside of qemu, so
+ * we do not have control over the actual implementation. There
+ * have been cases where inquiring the status took an unreasonably
+ * long time, and we can do nothing in qemu to fix it.
+ * This is especially problematic for images with large data areas,
+ * because finding the few holes in them and giving them special
+ * treatment does not gain much performance. Therefore, we try to
+ * cache the last-identified data region.
+ *
+ * Second, limiting ourselves to protocol nodes allows us to assume
+ * the block status for data regions to be DATA | OFFSET_VALID, and
+ * that the host offset is the same as the guest offset.
+ *
+ * Note that it is possible that external writers zero parts of
+ * the cached regions without the cache being invalidated, and so
+ * we may report zeroes as data. This is not catastrophic,
+ * however, because reporting zeroes as data is fine.
+ */
+ if (QLIST_EMPTY(&bs->children) &&
+ bdrv_bsc_is_data(bs, aligned_offset, pnum))
+ {
+ ret = BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID;
+ local_file = bs;
+ local_map = aligned_offset;
+ } else {
+ ret = bs->drv->bdrv_co_block_status(bs, want_zero, aligned_offset,
+ aligned_bytes, pnum, &local_map,
+ &local_file);
+
+ /*
+ * Note that checking QLIST_EMPTY(&bs->children) is also done when
+ * the cache is queried above. Technically, we do not need to check
+ * it here; the worst that can happen is that we fill the cache for
+ * non-protocol nodes, and then it is never used. However, filling
+ * the cache requires an RCU update, so double check here to avoid
+ * such an update if possible.
+ */
+ if (ret == (BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID) &&
+ QLIST_EMPTY(&bs->children))
+ {
+ /*
+ * When a protocol driver reports BLOCK_OFFSET_VALID, the
+ * returned local_map value must be the same as the offset we
+ * have passed (aligned_offset), and local_bs must be the node
+ * itself.
+ * Assert this, because we follow this rule when reading from
+ * the cache (see the `local_file = bs` and
+ * `local_map = aligned_offset` assignments above), and the
+ * result the cache delivers must be the same as the driver
+ * would deliver.
+ */
+ assert(local_file == bs);
+ assert(local_map == aligned_offset);
+ bdrv_bsc_fill(bs, aligned_offset, *pnum);
+ }
+ }
} else {
/* Default code for filters */
@@ -3002,6 +3061,9 @@ int coroutine_fn bdrv_co_pdiscard(BdrvChild *child, int64_t offset,
return 0;
}
+ /* Invalidate the cached block-status data range if this discard overlaps */
+ bdrv_bsc_invalidate_range(bs, offset, bytes);
+
/* Discard is advisory, but some devices track and coalesce
* unaligned requests, so we must pass everything down rather than
* round here. Still, most devices will just silently ignore
diff --git a/block/iscsi.c b/block/iscsi.c
index 4d2a416ce7..852384086b 100644
--- a/block/iscsi.c
+++ b/block/iscsi.c
@@ -781,9 +781,6 @@ retry:
iscsi_allocmap_set_allocated(iscsilun, offset, *pnum);
}
- if (*pnum > bytes) {
- *pnum = bytes;
- }
out_unlock:
qemu_mutex_unlock(&iscsilun->mutex);
g_free(iTask.err_str);
diff --git a/block/mirror.c b/block/mirror.c
index 98fc66eabf..85b781bc21 100644
--- a/block/mirror.c
+++ b/block/mirror.c
@@ -160,18 +160,25 @@ static void coroutine_fn mirror_wait_on_conflicts(MirrorOp *self,
if (ranges_overlap(self_start_chunk, self_nb_chunks,
op_start_chunk, op_nb_chunks))
{
- /*
- * If the operation is already (indirectly) waiting for us, or
- * will wait for us as soon as it wakes up, then just go on
- * (instead of producing a deadlock in the former case).
- */
- if (op->waiting_for_op) {
- continue;
+ if (self) {
+ /*
+ * If the operation is already (indirectly) waiting for us,
+ * or will wait for us as soon as it wakes up, then just go
+ * on (instead of producing a deadlock in the former case).
+ */
+ if (op->waiting_for_op) {
+ continue;
+ }
+
+ self->waiting_for_op = op;
}
- self->waiting_for_op = op;
qemu_co_queue_wait(&op->waiting_requests, NULL);
- self->waiting_for_op = NULL;
+
+ if (self) {
+ self->waiting_for_op = NULL;
+ }
+
break;
}
}
diff --git a/block/qcow2-cluster.c b/block/qcow2-cluster.c
index bd0597842f..4ebb49a087 100644
--- a/block/qcow2-cluster.c
+++ b/block/qcow2-cluster.c
@@ -556,8 +556,7 @@ static int coroutine_fn do_perform_cow_write(BlockDriverState *bs,
* offset needs to be aligned to a cluster boundary.
*
* If the cluster is unallocated then *host_offset will be 0.
- * If the cluster is compressed then *host_offset will contain the
- * complete compressed cluster descriptor.
+ * If the cluster is compressed then *host_offset will contain the l2 entry.
*
* On entry, *bytes is the maximum number of contiguous bytes starting at
* offset that we are interested in.
@@ -660,7 +659,7 @@ int qcow2_get_host_offset(BlockDriverState *bs, uint64_t offset,
ret = -EIO;
goto fail;
}
- *host_offset = l2_entry & L2E_COMPRESSED_OFFSET_SIZE_MASK;
+ *host_offset = l2_entry;
break;
case QCOW2_SUBCLUSTER_ZERO_PLAIN:
case QCOW2_SUBCLUSTER_UNALLOCATED_PLAIN:
@@ -1400,29 +1399,47 @@ static int handle_dependencies(BlockDriverState *bs, uint64_t guest_offset,
if (end <= old_start || start >= old_end) {
/* No intersection */
+ continue;
+ }
+
+ if (old_alloc->keep_old_clusters &&
+ (end <= l2meta_cow_start(old_alloc) ||
+ start >= l2meta_cow_end(old_alloc)))
+ {
+ /*
+ * Clusters intersect but COW areas don't. And cluster itself is
+ * already allocated. So, there is no actual conflict.
+ */
+ continue;
+ }
+
+ /* Conflict */
+
+ if (start < old_start) {
+ /* Stop at the start of a running allocation */
+ bytes = old_start - start;
} else {
- if (start < old_start) {
- /* Stop at the start of a running allocation */
- bytes = old_start - start;
- } else {
- bytes = 0;
- }
+ bytes = 0;
+ }
- /* Stop if already an l2meta exists. After yielding, it wouldn't
- * be valid any more, so we'd have to clean up the old L2Metas
- * and deal with requests depending on them before starting to
- * gather new ones. Not worth the trouble. */
- if (bytes == 0 && *m) {
- *cur_bytes = 0;
- return 0;
- }
+ /*
+ * Stop if an l2meta already exists. After yielding, it wouldn't
+ * be valid any more, so we'd have to clean up the old L2Metas
+ * and deal with requests depending on them before starting to
+ * gather new ones. Not worth the trouble.
+ */
+ if (bytes == 0 && *m) {
+ *cur_bytes = 0;
+ return 0;
+ }
- if (bytes == 0) {
- /* Wait for the dependency to complete. We need to recheck
- * the free/allocated clusters when we continue. */
- qemu_co_queue_wait(&old_alloc->dependent_requests, &s->lock);
- return -EAGAIN;
- }
+ if (bytes == 0) {
+ /*
+ * Wait for the dependency to complete. We need to recheck
+ * the free/allocated clusters when we continue.
+ */
+ qemu_co_queue_wait(&old_alloc->dependent_requests, &s->lock);
+ return -EAGAIN;
}
}
@@ -2463,3 +2480,18 @@ fail:
g_free(l1_table);
return ret;
}
+
+void qcow2_parse_compressed_l2_entry(BlockDriverState *bs, uint64_t l2_entry,
+ uint64_t *coffset, int *csize)
+{
+ BDRVQcow2State *s = bs->opaque;
+ int nb_csectors;
+
+ assert(qcow2_get_cluster_type(bs, l2_entry) == QCOW2_CLUSTER_COMPRESSED);
+
+ *coffset = l2_entry & s->cluster_offset_mask;
+
+ nb_csectors = ((l2_entry >> s->csize_shift) & s->csize_mask) + 1;
+ *csize = nb_csectors * QCOW2_COMPRESSED_SECTOR_SIZE -
+ (*coffset & (QCOW2_COMPRESSED_SECTOR_SIZE - 1));
+}
diff --git a/block/qcow2-refcount.c b/block/qcow2-refcount.c
index 8e649b008e..4614572252 100644
--- a/block/qcow2-refcount.c
+++ b/block/qcow2-refcount.c
@@ -1177,11 +1177,11 @@ void qcow2_free_any_cluster(BlockDriverState *bs, uint64_t l2_entry,
switch (ctype) {
case QCOW2_CLUSTER_COMPRESSED:
{
- int64_t offset = (l2_entry & s->cluster_offset_mask)
- & QCOW2_COMPRESSED_SECTOR_MASK;
- int size = QCOW2_COMPRESSED_SECTOR_SIZE *
- (((l2_entry >> s->csize_shift) & s->csize_mask) + 1);
- qcow2_free_clusters(bs, offset, size, type);
+ uint64_t coffset;
+ int csize;
+
+ qcow2_parse_compressed_l2_entry(bs, l2_entry, &coffset, &csize);
+ qcow2_free_clusters(bs, coffset, csize, type);
}
break;
case QCOW2_CLUSTER_NORMAL:
@@ -1247,7 +1247,7 @@ int qcow2_update_snapshot_refcount(BlockDriverState *bs,
bool l1_allocated = false;
int64_t old_entry, old_l2_offset;
unsigned slice, slice_size2, n_slices;
- int i, j, l1_modified = 0, nb_csectors;
+ int i, j, l1_modified = 0;
int ret;
assert(addend >= -1 && addend <= 1);
@@ -1318,14 +1318,14 @@ int qcow2_update_snapshot_refcount(BlockDriverState *bs,
switch (qcow2_get_cluster_type(bs, entry)) {
case QCOW2_CLUSTER_COMPRESSED:
- nb_csectors = ((entry >> s->csize_shift) &
- s->csize_mask) + 1;
if (addend != 0) {
- uint64_t coffset = (entry & s->cluster_offset_mask)
- & QCOW2_COMPRESSED_SECTOR_MASK;
+ uint64_t coffset;
+ int csize;
+
+ qcow2_parse_compressed_l2_entry(bs, entry,
+ &coffset, &csize);
ret = update_refcount(
- bs, coffset,
- nb_csectors * QCOW2_COMPRESSED_SECTOR_SIZE,
+ bs, coffset, csize,
abs(addend), addend < 0,
QCOW2_DISCARD_SNAPSHOT);
if (ret < 0) {
@@ -1588,6 +1588,66 @@ enum {
};
/*
+ * Fix L2 entry by making it QCOW2_CLUSTER_ZERO_PLAIN (or making all its present
+ * subclusters QCOW2_SUBCLUSTER_ZERO_PLAIN).
+ *
+ * This function decrements res->corruptions on success, so the caller is
+ * responsible to increment res->corruptions prior to the call.
+ *
+ * On failure in-memory @l2_table may be modified.
+ */
+static int fix_l2_entry_by_zero(BlockDriverState *bs, BdrvCheckResult *res,
+ uint64_t l2_offset,
+ uint64_t *l2_table, int l2_index, bool active,
+ bool *metadata_overlap)
+{
+ BDRVQcow2State *s = bs->opaque;
+ int ret;
+ int idx = l2_index * (l2_entry_size(s) / sizeof(uint64_t));
+ uint64_t l2e_offset = l2_offset + (uint64_t)l2_index * l2_entry_size(s);
+ int ign = active ? QCOW2_OL_ACTIVE_L2 : QCOW2_OL_INACTIVE_L2;
+
+ if (has_subclusters(s)) {
+ uint64_t l2_bitmap = get_l2_bitmap(s, l2_table, l2_index);
+
+ /* Allocated subclusters become zero */
+ l2_bitmap |= l2_bitmap << 32;
+ l2_bitmap &= QCOW_L2_BITMAP_ALL_ZEROES;
+
+ set_l2_bitmap(s, l2_table, l2_index, l2_bitmap);
+ set_l2_entry(s, l2_table, l2_index, 0);
+ } else {
+ set_l2_entry(s, l2_table, l2_index, QCOW_OFLAG_ZERO);
+ }
+
+ ret = qcow2_pre_write_overlap_check(bs, ign, l2e_offset, l2_entry_size(s),
+ false);
+ if (metadata_overlap) {
+ *metadata_overlap = ret < 0;
+ }
+ if (ret < 0) {
+ fprintf(stderr, "ERROR: Overlap check failed\n");
+ goto fail;
+ }
+
+ ret = bdrv_pwrite_sync(bs->file, l2e_offset, &l2_table[idx],
+ l2_entry_size(s));
+ if (ret < 0) {
+ fprintf(stderr, "ERROR: Failed to overwrite L2 "
+ "table entry: %s\n", strerror(-ret));
+ goto fail;
+ }
+
+ res->corruptions--;
+ res->corruptions_fixed++;
+ return 0;
+
+fail:
+ res->check_errors++;
+ return ret;
+}
+
+/*
* Increases the refcount in the given refcount table for the all clusters
* referenced in the L2 table. While doing so, performs some checks on L2
* entries.
@@ -1601,26 +1661,41 @@ static int check_refcounts_l2(BlockDriverState *bs, BdrvCheckResult *res,
int flags, BdrvCheckMode fix, bool active)
{
BDRVQcow2State *s = bs->opaque;
- uint64_t *l2_table, l2_entry;
+ uint64_t l2_entry, l2_bitmap;
uint64_t next_contiguous_offset = 0;
- int i, l2_size, nb_csectors, ret;
+ int i, ret;
+ size_t l2_size_bytes = s->l2_size * l2_entry_size(s);
+ g_autofree uint64_t *l2_table = g_malloc(l2_size_bytes);
+ bool metadata_overlap;
/* Read L2 table from disk */
- l2_size = s->l2_size * l2_entry_size(s);
- l2_table = g_malloc(l2_size);
-
- ret = bdrv_pread(bs->file, l2_offset, l2_table, l2_size);
+ ret = bdrv_pread(bs->file, l2_offset, l2_table, l2_size_bytes);
if (ret < 0) {
fprintf(stderr, "ERROR: I/O error in check_refcounts_l2\n");
res->check_errors++;
- goto fail;
+ return ret;
}
/* Do the actual checks */
- for(i = 0; i < s->l2_size; i++) {
+ for (i = 0; i < s->l2_size; i++) {
+ uint64_t coffset;
+ int csize;
+ QCow2ClusterType type;
+
l2_entry = get_l2_entry(s, l2_table, i);
+ l2_bitmap = get_l2_bitmap(s, l2_table, i);
+ type = qcow2_get_cluster_type(bs, l2_entry);
+
+ if (type != QCOW2_CLUSTER_COMPRESSED) {
+ /* Check reserved bits of Standard Cluster Descriptor */
+ if (l2_entry & L2E_STD_RESERVED_MASK) {
+ fprintf(stderr, "ERROR found l2 entry with reserved bits set: "
+ "%" PRIx64 "\n", l2_entry);
+ res->corruptions++;
+ }
+ }
- switch (qcow2_get_cluster_type(bs, l2_entry)) {
+ switch (type) {
case QCOW2_CLUSTER_COMPRESSED:
/* Compressed clusters don't have QCOW_OFLAG_COPIED */
if (l2_entry & QCOW_OFLAG_COPIED) {
@@ -1638,23 +1713,28 @@ static int check_refcounts_l2(BlockDriverState *bs, BdrvCheckResult *res,
break;
}
+ if (l2_bitmap) {
+ fprintf(stderr, "ERROR compressed cluster %d with non-zero "
+ "subcluster allocation bitmap, entry=0x%" PRIx64 "\n",
+ i, l2_entry);
+ res->corruptions++;
+ break;
+ }
+
/* Mark cluster as used */
- nb_csectors = ((l2_entry >> s->csize_shift) &
- s->csize_mask) + 1;
- l2_entry &= s->cluster_offset_mask;
+ qcow2_parse_compressed_l2_entry(bs, l2_entry, &coffset, &csize);
ret = qcow2_inc_refcounts_imrt(
- bs, res, refcount_table, refcount_table_size,
- l2_entry & QCOW2_COMPRESSED_SECTOR_MASK,
- nb_csectors * QCOW2_COMPRESSED_SECTOR_SIZE);
+ bs, res, refcount_table, refcount_table_size, coffset, csize);
if (ret < 0) {
- goto fail;
+ return ret;
}
if (flags & CHECK_FRAG_INFO) {
res->bfi.allocated_clusters++;
res->bfi.compressed_clusters++;
- /* Compressed clusters are fragmented by nature. Since they
+ /*
+ * Compressed clusters are fragmented by nature. Since they
* take up sub-sector space but we only have sector granularity
* I/O we need to re-read the same sectors even for adjacent
* compressed clusters.
@@ -1668,13 +1748,19 @@ static int check_refcounts_l2(BlockDriverState *bs, BdrvCheckResult *res,
{
uint64_t offset = l2_entry & L2E_OFFSET_MASK;
+ if ((l2_bitmap >> 32) & l2_bitmap) {
+ res->corruptions++;
+ fprintf(stderr, "ERROR offset=%" PRIx64 ": Allocated "
+ "cluster has corrupted subcluster allocation bitmap\n",
+ offset);
+ }
+
/* Correct offsets are cluster aligned */
if (offset_into_cluster(s, offset)) {
bool contains_data;
res->corruptions++;
if (has_subclusters(s)) {
- uint64_t l2_bitmap = get_l2_bitmap(s, l2_table, i);
contains_data = (l2_bitmap & QCOW_L2_BITMAP_ALL_ALLOC);
} else {
contains_data = !(l2_entry & QCOW_OFLAG_ZERO);
@@ -1687,40 +1773,30 @@ static int check_refcounts_l2(BlockDriverState *bs, BdrvCheckResult *res,
fix & BDRV_FIX_ERRORS ? "Repairing" : "ERROR",
offset);
if (fix & BDRV_FIX_ERRORS) {
- int idx = i * (l2_entry_size(s) / sizeof(uint64_t));
- uint64_t l2e_offset =
- l2_offset + (uint64_t)i * l2_entry_size(s);
- int ign = active ? QCOW2_OL_ACTIVE_L2 :
- QCOW2_OL_INACTIVE_L2;
-
- l2_entry = has_subclusters(s) ? 0 : QCOW_OFLAG_ZERO;
- set_l2_entry(s, l2_table, i, l2_entry);
- ret = qcow2_pre_write_overlap_check(bs, ign,
- l2e_offset, l2_entry_size(s), false);
- if (ret < 0) {
- fprintf(stderr, "ERROR: Overlap check failed\n");
- res->check_errors++;
- /* Something is seriously wrong, so abort checking
- * this L2 table */
- goto fail;
+ ret = fix_l2_entry_by_zero(bs, res, l2_offset,
+ l2_table, i, active,
+ &metadata_overlap);
+ if (metadata_overlap) {
+ /*
+ * Something is seriously wrong, so abort checking
+ * this L2 table.
+ */
+ return ret;
}
- ret = bdrv_pwrite_sync(bs->file, l2e_offset,
- &l2_table[idx],
- l2_entry_size(s));
- if (ret < 0) {
- fprintf(stderr, "ERROR: Failed to overwrite L2 "
- "table entry: %s\n", strerror(-ret));
- res->check_errors++;
- /* Do not abort, continue checking the rest of this
- * L2 table's entries */
- } else {
- res->corruptions--;
- res->corruptions_fixed++;
- /* Skip marking the cluster as used
- * (it is unused now) */
+ if (ret == 0) {
+ /*
+ * Skip marking the cluster as used
+ * (it is unused now).
+ */
continue;
}
+
+ /*
+ * Failed to fix.
+ * Do not abort, continue checking the rest of this
+ * L2 table's entries.
+ */
}
} else {
fprintf(stderr, "ERROR offset=%" PRIx64 ": Data cluster is "
@@ -1743,14 +1819,23 @@ static int check_refcounts_l2(BlockDriverState *bs, BdrvCheckResult *res,
refcount_table_size,
offset, s->cluster_size);
if (ret < 0) {
- goto fail;
+ return ret;
}
}
break;
}
case QCOW2_CLUSTER_ZERO_PLAIN:
+ /* Impossible when image has subclusters */
+ assert(!l2_bitmap);
+ break;
+
case QCOW2_CLUSTER_UNALLOCATED:
+ if (l2_bitmap & QCOW_L2_BITMAP_ALL_ALLOC) {
+ res->corruptions++;
+ fprintf(stderr, "ERROR: Unallocated "
+ "cluster has non-zero subcluster allocation map\n");
+ }
break;
default:
@@ -1758,12 +1843,7 @@ static int check_refcounts_l2(BlockDriverState *bs, BdrvCheckResult *res,
}
}
- g_free(l2_table);
return 0;
-
-fail:
- g_free(l2_table);
- return ret;
}
/*
@@ -1782,71 +1862,79 @@ static int check_refcounts_l1(BlockDriverState *bs,
int flags, BdrvCheckMode fix, bool active)
{
BDRVQcow2State *s = bs->opaque;
- uint64_t *l1_table = NULL, l2_offset, l1_size2;
+ size_t l1_size_bytes = l1_size * L1E_SIZE;
+ g_autofree uint64_t *l1_table = NULL;
+ uint64_t l2_offset;
int i, ret;
- l1_size2 = l1_size * L1E_SIZE;
+ if (!l1_size) {
+ return 0;
+ }
/* Mark L1 table as used */
ret = qcow2_inc_refcounts_imrt(bs, res, refcount_table, refcount_table_size,
- l1_table_offset, l1_size2);
+ l1_table_offset, l1_size_bytes);
if (ret < 0) {
- goto fail;
+ return ret;
+ }
+
+ l1_table = g_try_malloc(l1_size_bytes);
+ if (l1_table == NULL) {
+ res->check_errors++;
+ return -ENOMEM;
}
/* Read L1 table entries from disk */
- if (l1_size2 > 0) {
- l1_table = g_try_malloc(l1_size2);
- if (l1_table == NULL) {
- ret = -ENOMEM;
- res->check_errors++;
- goto fail;
- }
- ret = bdrv_pread(bs->file, l1_table_offset, l1_table, l1_size2);
- if (ret < 0) {
- fprintf(stderr, "ERROR: I/O error in check_refcounts_l1\n");
- res->check_errors++;
- goto fail;
- }
- for(i = 0;i < l1_size; i++)
- be64_to_cpus(&l1_table[i]);
+ ret = bdrv_pread(bs->file, l1_table_offset, l1_table, l1_size_bytes);
+ if (ret < 0) {
+ fprintf(stderr, "ERROR: I/O error in check_refcounts_l1\n");
+ res->check_errors++;
+ return ret;
+ }
+
+ for (i = 0; i < l1_size; i++) {
+ be64_to_cpus(&l1_table[i]);
}
/* Do the actual checks */
- for(i = 0; i < l1_size; i++) {
- l2_offset = l1_table[i];
- if (l2_offset) {
- /* Mark L2 table as used */
- l2_offset &= L1E_OFFSET_MASK;
- ret = qcow2_inc_refcounts_imrt(bs, res,
- refcount_table, refcount_table_size,
- l2_offset, s->cluster_size);
- if (ret < 0) {
- goto fail;
- }
+ for (i = 0; i < l1_size; i++) {
+ if (!l1_table[i]) {
+ continue;
+ }
- /* L2 tables are cluster aligned */
- if (offset_into_cluster(s, l2_offset)) {
- fprintf(stderr, "ERROR l2_offset=%" PRIx64 ": Table is not "
- "cluster aligned; L1 entry corrupted\n", l2_offset);
- res->corruptions++;
- }
+ if (l1_table[i] & L1E_RESERVED_MASK) {
+ fprintf(stderr, "ERROR found L1 entry with reserved bits set: "
+ "%" PRIx64 "\n", l1_table[i]);
+ res->corruptions++;
+ }
- /* Process and check L2 entries */
- ret = check_refcounts_l2(bs, res, refcount_table,
- refcount_table_size, l2_offset, flags,
- fix, active);
- if (ret < 0) {
- goto fail;
- }
+ l2_offset = l1_table[i] & L1E_OFFSET_MASK;
+
+ /* Mark L2 table as used */
+ ret = qcow2_inc_refcounts_imrt(bs, res,
+ refcount_table, refcount_table_size,
+ l2_offset, s->cluster_size);
+ if (ret < 0) {
+ return ret;
+ }
+
+ /* L2 tables are cluster aligned */
+ if (offset_into_cluster(s, l2_offset)) {
+ fprintf(stderr, "ERROR l2_offset=%" PRIx64 ": Table is not "
+ "cluster aligned; L1 entry corrupted\n", l2_offset);
+ res->corruptions++;
+ }
+
+ /* Process and check L2 entries */
+ ret = check_refcounts_l2(bs, res, refcount_table,
+ refcount_table_size, l2_offset, flags,
+ fix, active);
+ if (ret < 0) {
+ return ret;
}
}
- g_free(l1_table);
- return 0;
-fail:
- g_free(l1_table);
- return ret;
+ return 0;
}
/*
@@ -2001,9 +2089,17 @@ static int check_refblocks(BlockDriverState *bs, BdrvCheckResult *res,
for(i = 0; i < s->refcount_table_size; i++) {
uint64_t offset, cluster;
- offset = s->refcount_table[i];
+ offset = s->refcount_table[i] & REFT_OFFSET_MASK;
cluster = offset >> s->cluster_bits;
+ if (s->refcount_table[i] & REFT_RESERVED_MASK) {
+ fprintf(stderr, "ERROR refcount table entry %" PRId64 " has "
+ "reserved bits set\n", i);
+ res->corruptions++;
+ *rebuild = true;
+ continue;
+ }
+
/* Refcount blocks are cluster aligned */
if (offset_into_cluster(s, offset)) {
fprintf(stderr, "ERROR refcount block %" PRId64 " is not "
diff --git a/block/qcow2.c b/block/qcow2.c
index 9f1b6461c8..02f9f3e636 100644
--- a/block/qcow2.c
+++ b/block/qcow2.c
@@ -74,7 +74,7 @@ typedef struct {
static int coroutine_fn
qcow2_co_preadv_compressed(BlockDriverState *bs,
- uint64_t cluster_descriptor,
+ uint64_t l2_entry,
uint64_t offset,
uint64_t bytes,
QEMUIOVector *qiov,
@@ -2205,7 +2205,7 @@ typedef struct Qcow2AioTask {
BlockDriverState *bs;
QCow2SubclusterType subcluster_type; /* only for read */
- uint64_t host_offset; /* or full descriptor in compressed clusters */
+ uint64_t host_offset; /* or l2_entry for compressed read */
uint64_t offset;
uint64_t bytes;
QEMUIOVector *qiov;
@@ -4693,22 +4693,19 @@ qcow2_co_pwritev_compressed_part(BlockDriverState *bs,
static int coroutine_fn
qcow2_co_preadv_compressed(BlockDriverState *bs,
- uint64_t cluster_descriptor,
+ uint64_t l2_entry,
uint64_t offset,
uint64_t bytes,
QEMUIOVector *qiov,
size_t qiov_offset)
{
BDRVQcow2State *s = bs->opaque;
- int ret = 0, csize, nb_csectors;
+ int ret = 0, csize;
uint64_t coffset;
uint8_t *buf, *out_buf;
int offset_in_cluster = offset_into_cluster(s, offset);
- coffset = cluster_descriptor & s->cluster_offset_mask;
- nb_csectors = ((cluster_descriptor >> s->csize_shift) & s->csize_mask) + 1;
- csize = nb_csectors * QCOW2_COMPRESSED_SECTOR_SIZE -
- (coffset & ~QCOW2_COMPRESSED_SECTOR_MASK);
+ qcow2_parse_compressed_l2_entry(bs, l2_entry, &coffset, &csize);
buf = g_try_malloc(csize);
if (!buf) {
diff --git a/block/qcow2.h b/block/qcow2.h
index 0fe5f74ed3..fd48a89d45 100644
--- a/block/qcow2.h
+++ b/block/qcow2.h
@@ -110,7 +110,6 @@
/* Defined in the qcow2 spec (compressed cluster descriptor) */
#define QCOW2_COMPRESSED_SECTOR_SIZE 512U
-#define QCOW2_COMPRESSED_SECTOR_MASK (~(QCOW2_COMPRESSED_SECTOR_SIZE - 1ULL))
/* Must be at least 2 to cover COW */
#define MIN_L2_CACHE_SIZE 2 /* cache entries */
@@ -587,10 +586,12 @@ typedef enum QCow2MetadataOverlap {
(QCOW2_OL_CACHED | QCOW2_OL_INACTIVE_L2)
#define L1E_OFFSET_MASK 0x00fffffffffffe00ULL
+#define L1E_RESERVED_MASK 0x7f000000000001ffULL
#define L2E_OFFSET_MASK 0x00fffffffffffe00ULL
-#define L2E_COMPRESSED_OFFSET_SIZE_MASK 0x3fffffffffffffffULL
+#define L2E_STD_RESERVED_MASK 0x3f000000000001feULL
#define REFT_OFFSET_MASK 0xfffffffffffffe00ULL
+#define REFT_RESERVED_MASK 0x1ffULL
#define INV_OFFSET (-1ULL)
@@ -914,6 +915,8 @@ int qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs,
uint64_t offset,
int compressed_size,
uint64_t *host_offset);
+void qcow2_parse_compressed_l2_entry(BlockDriverState *bs, uint64_t l2_entry,
+ uint64_t *coffset, int *csize);
int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m);
void qcow2_alloc_cluster_abort(BlockDriverState *bs, QCowL2Meta *m);
diff --git a/bsd-user/i386/target_arch_cpu.c b/bsd-user/i386/target_arch_cpu.c
index 71998e5ba5..d349e45299 100644
--- a/bsd-user/i386/target_arch_cpu.c
+++ b/bsd-user/i386/target_arch_cpu.c
@@ -33,11 +33,6 @@ uint64_t cpu_get_tsc(CPUX86State *env)
return cpu_get_host_ticks();
}
-int cpu_get_pic_interrupt(CPUX86State *env)
-{
- return -1;
-}
-
void bsd_i386_write_dt(void *ptr, unsigned long addr, unsigned long limit,
int flags)
{
diff --git a/bsd-user/x86_64/target_arch_cpu.c b/bsd-user/x86_64/target_arch_cpu.c
index db822e54c6..be7bd10720 100644
--- a/bsd-user/x86_64/target_arch_cpu.c
+++ b/bsd-user/x86_64/target_arch_cpu.c
@@ -33,11 +33,6 @@ uint64_t cpu_get_tsc(CPUX86State *env)
return cpu_get_host_ticks();
}
-int cpu_get_pic_interrupt(CPUX86State *env)
-{
- return -1;
-}
-
void bsd_x86_64_write_dt(void *ptr, unsigned long addr,
unsigned long limit, int flags)
{
diff --git a/chardev/char-mux.c b/chardev/char-mux.c
index 5baf419010..ada0c6866f 100644
--- a/chardev/char-mux.c
+++ b/chardev/char-mux.c
@@ -386,10 +386,9 @@ void suspend_mux_open(void)
static int chardev_options_parsed_cb(Object *child, void *opaque)
{
Chardev *chr = (Chardev *)child;
- ChardevClass *class = CHARDEV_GET_CLASS(chr);
- if (!chr->be_open && class->chr_options_parsed) {
- class->chr_options_parsed(chr);
+ if (!chr->be_open && CHARDEV_IS_MUX(chr)) {
+ open_muxes(chr);
}
return 0;
@@ -412,7 +411,6 @@ static void char_mux_class_init(ObjectClass *oc, void *data)
cc->chr_accept_input = mux_chr_accept_input;
cc->chr_add_watch = mux_chr_add_watch;
cc->chr_be_event = mux_chr_be_event;
- cc->chr_options_parsed = open_muxes;
cc->chr_update_read_handler = mux_chr_update_read_handlers;
}
diff --git a/chardev/char.c b/chardev/char.c
index 4595a8d430..0169d8dde4 100644
--- a/chardev/char.c
+++ b/chardev/char.c
@@ -241,18 +241,15 @@ static void qemu_char_open(Chardev *chr, ChardevBackend *backend,
ChardevCommon *common = backend ? backend->u.null.data : NULL;
if (common && common->has_logfile) {
- int flags = O_WRONLY | O_CREAT;
+ int flags = O_WRONLY;
if (common->has_logappend &&
common->logappend) {
flags |= O_APPEND;
} else {
flags |= O_TRUNC;
}
- chr->logfd = qemu_open_old(common->logfile, flags, 0666);
+ chr->logfd = qemu_create(common->logfile, flags, 0666, errp);
if (chr->logfd < 0) {
- error_setg_errno(errp, errno,
- "Unable to open logfile %s",
- common->logfile);
return;
}
}
diff --git a/configure b/configure
index da2501489f..1043ccce4f 100755
--- a/configure
+++ b/configure
@@ -5052,7 +5052,9 @@ for bios_file in \
$source_path/pc-bios/openbios-* \
$source_path/pc-bios/u-boot.* \
$source_path/pc-bios/edk2-*.fd.bz2 \
- $source_path/pc-bios/palcode-*
+ $source_path/pc-bios/palcode-* \
+ $source_path/pc-bios/qemu_vga.ndrv
+
do
LINKS="$LINKS pc-bios/$(basename $bios_file)"
done
diff --git a/docs/about/build-platforms.rst b/docs/about/build-platforms.rst
index 692323609e..bcb1549721 100644
--- a/docs/about/build-platforms.rst
+++ b/docs/about/build-platforms.rst
@@ -29,6 +29,39 @@ The `Repology`_ site is a useful resource to identify
currently shipped versions of software in various operating systems,
though it does not cover all distros listed below.
+Supported host architectures
+----------------------------
+
+Those hosts are officially supported, with various accelerators:
+
+ .. list-table::
+ :header-rows: 1
+
+ * - CPU Architecture
+ - Accelerators
+ * - Arm
+ - kvm (64 bit only), tcg, xen
+ * - MIPS
+ - kvm, tcg
+ * - PPC
+ - kvm, tcg
+ * - RISC-V
+ - tcg
+ * - s390x
+ - kvm, tcg
+ * - SPARC
+ - tcg
+ * - x86
+ - hax, hvf (64 bit only), kvm, nvmm, tcg, whpx (64 bit only), xen
+
+Other host architectures are not supported. It is possible to build QEMU on an
+unsupported host architecture using the configure ``--enable-tcg-interpreter``
+option to enable the experimental TCI support, but note that this is very slow
+and is not recommended.
+
+Non-supported architectures may be removed in the future following the
+:ref:`deprecation process<Deprecated features>`.
+
Linux OS, macOS, FreeBSD, NetBSD, OpenBSD
-----------------------------------------
diff --git a/docs/about/deprecated.rst b/docs/about/deprecated.rst
index 9ee355ec0b..3c2be84d80 100644
--- a/docs/about/deprecated.rst
+++ b/docs/about/deprecated.rst
@@ -1,3 +1,5 @@
+.. _Deprecated features:
+
Deprecated features
===================
diff --git a/docs/system/arm/nuvoton.rst b/docs/system/arm/nuvoton.rst
index 69f57c2886..adf497e679 100644
--- a/docs/system/arm/nuvoton.rst
+++ b/docs/system/arm/nuvoton.rst
@@ -20,6 +20,7 @@ Hyperscale applications. The following machines are based on this chip :
- ``quanta-gbs-bmc`` Quanta GBS server BMC
- ``quanta-gsj`` Quanta GSJ server BMC
+- ``kudo-bmc`` Fii USA Kudo server BMC
There are also two more SoCs, NPCM710 and NPCM705, which are single-core
variants of NPCM750 and NPCM730, respectively. These are currently not
diff --git a/docs/tools/qemu-img.rst b/docs/tools/qemu-img.rst
index fe6c30d509..d58980aef8 100644
--- a/docs/tools/qemu-img.rst
+++ b/docs/tools/qemu-img.rst
@@ -415,7 +415,7 @@ Command description:
4
Error on reading data
-.. option:: convert [--object OBJECTDEF] [--image-opts] [--target-image-opts] [--target-is-zero] [--bitmaps [--skip-broken-bitmaps]] [-U] [-C] [-c] [-p] [-q] [-n] [-f FMT] [-t CACHE] [-T SRC_CACHE] [-O OUTPUT_FMT] [-B BACKING_FILE] [-o OPTIONS] [-l SNAPSHOT_PARAM] [-S SPARSE_SIZE] [-r RATE_LIMIT] [-m NUM_COROUTINES] [-W] FILENAME [FILENAME2 [...]] OUTPUT_FILENAME
+.. option:: convert [--object OBJECTDEF] [--image-opts] [--target-image-opts] [--target-is-zero] [--bitmaps [--skip-broken-bitmaps]] [-U] [-C] [-c] [-p] [-q] [-n] [-f FMT] [-t CACHE] [-T SRC_CACHE] [-O OUTPUT_FMT] [-B BACKING_FILE [-F backing_fmt]] [-o OPTIONS] [-l SNAPSHOT_PARAM] [-S SPARSE_SIZE] [-r RATE_LIMIT] [-m NUM_COROUTINES] [-W] FILENAME [FILENAME2 [...]] OUTPUT_FILENAME
Convert the disk image *FILENAME* or a snapshot *SNAPSHOT_PARAM*
to disk image *OUTPUT_FILENAME* using format *OUTPUT_FMT*. It can
@@ -439,7 +439,7 @@ Command description:
You can use the *BACKING_FILE* option to force the output image to be
created as a copy on write image of the specified base image; the
*BACKING_FILE* should have the same content as the input's base image,
- however the path, image format, etc may differ.
+ however the path, image format (as given by *BACKING_FMT*), etc may differ.
If a relative path name is given, the backing file is looked up relative to
the directory containing *OUTPUT_FILENAME*.
diff --git a/hw/arm/mps2-tz.c b/hw/arm/mps2-tz.c
index e23830f4b7..f40e854dec 100644
--- a/hw/arm/mps2-tz.c
+++ b/hw/arm/mps2-tz.c
@@ -373,6 +373,11 @@ static qemu_irq get_sse_irq_in(MPS2TZMachineState *mms, int irqno)
}
}
+/* Union describing the device-specific extra data we pass to the devfn. */
+typedef union PPCExtraData {
+ bool i2c_internal;
+} PPCExtraData;
+
/* Most of the devices in the AN505 FPGA image sit behind
* Peripheral Protection Controllers. These data structures
* define the layout of which devices sit behind which PPCs.
@@ -382,7 +387,8 @@ static qemu_irq get_sse_irq_in(MPS2TZMachineState *mms, int irqno)
*/
typedef MemoryRegion *MakeDevFn(MPS2TZMachineState *mms, void *opaque,
const char *name, hwaddr size,
- const int *irqs);
+ const int *irqs,
+ const PPCExtraData *extradata);
typedef struct PPCPortInfo {
const char *name;
@@ -391,6 +397,7 @@ typedef struct PPCPortInfo {
hwaddr addr;
hwaddr size;
int irqs[3]; /* currently no device needs more IRQ lines than this */
+ PPCExtraData extradata; /* to pass device-specific info to the devfn */
} PPCPortInfo;
typedef struct PPCInfo {
@@ -401,7 +408,8 @@ typedef struct PPCInfo {
static MemoryRegion *make_unimp_dev(MPS2TZMachineState *mms,
void *opaque,
const char *name, hwaddr size,
- const int *irqs)
+ const int *irqs,
+ const PPCExtraData *extradata)
{
/* Initialize, configure and realize a TYPE_UNIMPLEMENTED_DEVICE,
* and return a pointer to its MemoryRegion.
@@ -417,7 +425,7 @@ static MemoryRegion *make_unimp_dev(MPS2TZMachineState *mms,
static MemoryRegion *make_uart(MPS2TZMachineState *mms, void *opaque,
const char *name, hwaddr size,
- const int *irqs)
+ const int *irqs, const PPCExtraData *extradata)
{
/* The irq[] array is tx, rx, combined, in that order */
MPS2TZMachineClass *mmc = MPS2TZ_MACHINE_GET_CLASS(mms);
@@ -441,7 +449,7 @@ static MemoryRegion *make_uart(MPS2TZMachineState *mms, void *opaque,
static MemoryRegion *make_scc(MPS2TZMachineState *mms, void *opaque,
const char *name, hwaddr size,
- const int *irqs)
+ const int *irqs, const PPCExtraData *extradata)
{
MPS2SCC *scc = opaque;
DeviceState *sccdev;
@@ -465,7 +473,7 @@ static MemoryRegion *make_scc(MPS2TZMachineState *mms, void *opaque,
static MemoryRegion *make_fpgaio(MPS2TZMachineState *mms, void *opaque,
const char *name, hwaddr size,
- const int *irqs)
+ const int *irqs, const PPCExtraData *extradata)
{
MPS2FPGAIO *fpgaio = opaque;
MPS2TZMachineClass *mmc = MPS2TZ_MACHINE_GET_CLASS(mms);
@@ -480,7 +488,8 @@ static MemoryRegion *make_fpgaio(MPS2TZMachineState *mms, void *opaque,
static MemoryRegion *make_eth_dev(MPS2TZMachineState *mms, void *opaque,
const char *name, hwaddr size,
- const int *irqs)
+ const int *irqs,
+ const PPCExtraData *extradata)
{
SysBusDevice *s;
NICInfo *nd = &nd_table[0];
@@ -500,7 +509,8 @@ static MemoryRegion *make_eth_dev(MPS2TZMachineState *mms, void *opaque,
static MemoryRegion *make_eth_usb(MPS2TZMachineState *mms, void *opaque,
const char *name, hwaddr size,
- const int *irqs)
+ const int *irqs,
+ const PPCExtraData *extradata)
{
/*
* The AN524 makes the ethernet and USB share a PPC port.
@@ -543,7 +553,7 @@ static MemoryRegion *make_eth_usb(MPS2TZMachineState *mms, void *opaque,
static MemoryRegion *make_mpc(MPS2TZMachineState *mms, void *opaque,
const char *name, hwaddr size,
- const int *irqs)
+ const int *irqs, const PPCExtraData *extradata)
{
TZMPC *mpc = opaque;
int i = mpc - &mms->mpc[0];
@@ -615,7 +625,7 @@ static void remap_irq_fn(void *opaque, int n, int level)
static MemoryRegion *make_dma(MPS2TZMachineState *mms, void *opaque,
const char *name, hwaddr size,
- const int *irqs)
+ const int *irqs, const PPCExtraData *extradata)
{
/* The irq[] array is DMACINTR, DMACINTERR, DMACINTTC, in that order */
PL080State *dma = opaque;
@@ -672,7 +682,7 @@ static MemoryRegion *make_dma(MPS2TZMachineState *mms, void *opaque,
static MemoryRegion *make_spi(MPS2TZMachineState *mms, void *opaque,
const char *name, hwaddr size,
- const int *irqs)
+ const int *irqs, const PPCExtraData *extradata)
{
/*
* The AN505 has five PL022 SPI controllers.
@@ -694,7 +704,7 @@ static MemoryRegion *make_spi(MPS2TZMachineState *mms, void *opaque,
static MemoryRegion *make_i2c(MPS2TZMachineState *mms, void *opaque,
const char *name, hwaddr size,
- const int *irqs)
+ const int *irqs, const PPCExtraData *extradata)
{
ArmSbconI2CState *i2c = opaque;
SysBusDevice *s;
@@ -702,12 +712,26 @@ static MemoryRegion *make_i2c(MPS2TZMachineState *mms, void *opaque,
object_initialize_child(OBJECT(mms), name, i2c, TYPE_ARM_SBCON_I2C);
s = SYS_BUS_DEVICE(i2c);
sysbus_realize(s, &error_fatal);
+
+ /*
+ * If this is an internal-use-only i2c bus, mark it full
+ * so that user-created i2c devices are not plugged into it.
+ * If we implement models of any on-board i2c devices that
+ * plug in to one of the internal-use-only buses, then we will
+ * need to create and plugging those in here before we mark the
+ * bus as full.
+ */
+ if (extradata->i2c_internal) {
+ BusState *qbus = qdev_get_child_bus(DEVICE(i2c), "i2c");
+ qbus_mark_full(qbus);
+ }
+
return sysbus_mmio_get_region(s, 0);
}
static MemoryRegion *make_rtc(MPS2TZMachineState *mms, void *opaque,
const char *name, hwaddr size,
- const int *irqs)
+ const int *irqs, const PPCExtraData *extradata)
{
PL031State *pl031 = opaque;
SysBusDevice *s;
@@ -912,10 +936,14 @@ static void mps2tz_common_init(MachineState *machine)
{ "uart2", make_uart, &mms->uart[2], 0x40202000, 0x1000, { 36, 37, 44 } },
{ "uart3", make_uart, &mms->uart[3], 0x40203000, 0x1000, { 38, 39, 45 } },
{ "uart4", make_uart, &mms->uart[4], 0x40204000, 0x1000, { 40, 41, 46 } },
- { "i2c0", make_i2c, &mms->i2c[0], 0x40207000, 0x1000 },
- { "i2c1", make_i2c, &mms->i2c[1], 0x40208000, 0x1000 },
- { "i2c2", make_i2c, &mms->i2c[2], 0x4020c000, 0x1000 },
- { "i2c3", make_i2c, &mms->i2c[3], 0x4020d000, 0x1000 },
+ { "i2c0", make_i2c, &mms->i2c[0], 0x40207000, 0x1000, {},
+ { .i2c_internal = true /* touchscreen */ } },
+ { "i2c1", make_i2c, &mms->i2c[1], 0x40208000, 0x1000, {},
+ { .i2c_internal = true /* audio conf */ } },
+ { "i2c2", make_i2c, &mms->i2c[2], 0x4020c000, 0x1000, {},
+ { .i2c_internal = false /* shield 0 */ } },
+ { "i2c3", make_i2c, &mms->i2c[3], 0x4020d000, 0x1000, {},
+ { .i2c_internal = false /* shield 1 */ } },
},
}, {
.name = "apb_ppcexp2",
@@ -956,15 +984,20 @@ static void mps2tz_common_init(MachineState *machine)
}, {
.name = "apb_ppcexp1",
.ports = {
- { "i2c0", make_i2c, &mms->i2c[0], 0x41200000, 0x1000 },
- { "i2c1", make_i2c, &mms->i2c[1], 0x41201000, 0x1000 },
+ { "i2c0", make_i2c, &mms->i2c[0], 0x41200000, 0x1000, {},
+ { .i2c_internal = true /* touchscreen */ } },
+ { "i2c1", make_i2c, &mms->i2c[1], 0x41201000, 0x1000, {},
+ { .i2c_internal = true /* audio conf */ } },
{ "spi0", make_spi, &mms->spi[0], 0x41202000, 0x1000, { 52 } },
{ "spi1", make_spi, &mms->spi[1], 0x41203000, 0x1000, { 53 } },
{ "spi2", make_spi, &mms->spi[2], 0x41204000, 0x1000, { 54 } },
- { "i2c2", make_i2c, &mms->i2c[2], 0x41205000, 0x1000 },
- { "i2c3", make_i2c, &mms->i2c[3], 0x41206000, 0x1000 },
+ { "i2c2", make_i2c, &mms->i2c[2], 0x41205000, 0x1000, {},
+ { .i2c_internal = false /* shield 0 */ } },
+ { "i2c3", make_i2c, &mms->i2c[3], 0x41206000, 0x1000, {},
+ { .i2c_internal = false /* shield 1 */ } },
{ /* port 7 reserved */ },
- { "i2c4", make_i2c, &mms->i2c[4], 0x41208000, 0x1000 },
+ { "i2c4", make_i2c, &mms->i2c[4], 0x41208000, 0x1000, {},
+ { .i2c_internal = true /* DDR4 EEPROM */ } },
},
}, {
.name = "apb_ppcexp2",
@@ -1006,15 +1039,20 @@ static void mps2tz_common_init(MachineState *machine)
}, {
.name = "apb_ppcexp1",
.ports = {
- { "i2c0", make_i2c, &mms->i2c[0], 0x49200000, 0x1000 },
- { "i2c1", make_i2c, &mms->i2c[1], 0x49201000, 0x1000 },
+ { "i2c0", make_i2c, &mms->i2c[0], 0x49200000, 0x1000, {},
+ { .i2c_internal = true /* touchscreen */ } },
+ { "i2c1", make_i2c, &mms->i2c[1], 0x49201000, 0x1000, {},
+ { .i2c_internal = true /* audio conf */ } },
{ "spi0", make_spi, &mms->spi[0], 0x49202000, 0x1000, { 53 } },
{ "spi1", make_spi, &mms->spi[1], 0x49203000, 0x1000, { 54 } },
{ "spi2", make_spi, &mms->spi[2], 0x49204000, 0x1000, { 55 } },
- { "i2c2", make_i2c, &mms->i2c[2], 0x49205000, 0x1000 },
- { "i2c3", make_i2c, &mms->i2c[3], 0x49206000, 0x1000 },
+ { "i2c2", make_i2c, &mms->i2c[2], 0x49205000, 0x1000, {},
+ { .i2c_internal = false /* shield 0 */ } },
+ { "i2c3", make_i2c, &mms->i2c[3], 0x49206000, 0x1000, {},
+ { .i2c_internal = false /* shield 1 */ } },
{ /* port 7 reserved */ },
- { "i2c4", make_i2c, &mms->i2c[4], 0x49208000, 0x1000 },
+ { "i2c4", make_i2c, &mms->i2c[4], 0x49208000, 0x1000, {},
+ { .i2c_internal = true /* DDR4 EEPROM */ } },
},
}, {
.name = "apb_ppcexp2",
@@ -1084,7 +1122,7 @@ static void mps2tz_common_init(MachineState *machine)
}
mr = pinfo->devfn(mms, pinfo->opaque, pinfo->name, pinfo->size,
- pinfo->irqs);
+ pinfo->irqs, &pinfo->extradata);
portname = g_strdup_printf("port[%d]", port);
object_property_set_link(OBJECT(ppc), portname, OBJECT(mr),
&error_fatal);
diff --git a/hw/arm/mps2.c b/hw/arm/mps2.c
index 4634aa1a1c..bb76fa6889 100644
--- a/hw/arm/mps2.c
+++ b/hw/arm/mps2.c
@@ -428,7 +428,17 @@ static void mps2_common_init(MachineState *machine)
0x40023000, /* Audio */
0x40029000, /* Shield0 */
0x4002a000}; /* Shield1 */
- sysbus_create_simple(TYPE_ARM_SBCON_I2C, i2cbase[i], NULL);
+ DeviceState *dev;
+
+ dev = sysbus_create_simple(TYPE_ARM_SBCON_I2C, i2cbase[i], NULL);
+ if (i < 2) {
+ /*
+ * internal-only bus: mark it full to avoid user-created
+ * i2c devices being plugged into it.
+ */
+ BusState *qbus = qdev_get_child_bus(dev, "i2c");
+ qbus_mark_full(qbus);
+ }
}
create_unimplemented_device("i2s", 0x40024000, 0x400);
diff --git a/hw/arm/npcm7xx_boards.c b/hw/arm/npcm7xx_boards.c
index e5a3243995..a656169f61 100644
--- a/hw/arm/npcm7xx_boards.c
+++ b/hw/arm/npcm7xx_boards.c
@@ -31,6 +31,7 @@
#define NPCM750_EVB_POWER_ON_STRAPS 0x00001ff7
#define QUANTA_GSJ_POWER_ON_STRAPS 0x00001fff
#define QUANTA_GBS_POWER_ON_STRAPS 0x000017ff
+#define KUDO_BMC_POWER_ON_STRAPS 0x00001fff
static const char npcm7xx_default_bootrom[] = "npcm7xx_bootrom.bin";
@@ -357,6 +358,23 @@ static void quanta_gbs_init(MachineState *machine)
npcm7xx_load_kernel(machine, soc);
}
+static void kudo_bmc_init(MachineState *machine)
+{
+ NPCM7xxState *soc;
+
+ soc = npcm7xx_create_soc(machine, KUDO_BMC_POWER_ON_STRAPS);
+ npcm7xx_connect_dram(soc, machine->ram);
+ qdev_realize(DEVICE(soc), NULL, &error_fatal);
+
+ npcm7xx_load_bootrom(machine, soc);
+ npcm7xx_connect_flash(&soc->fiu[0], 0, "mx66u51235f",
+ drive_get(IF_MTD, 0, 0));
+ npcm7xx_connect_flash(&soc->fiu[1], 0, "mx66u51235f",
+ drive_get(IF_MTD, 3, 0));
+
+ npcm7xx_load_kernel(machine, soc);
+}
+
static void npcm7xx_set_soc_type(NPCM7xxMachineClass *nmc, const char *type)
{
NPCM7xxClass *sc = NPCM7XX_CLASS(object_class_by_name(type));
@@ -417,6 +435,18 @@ static void gbs_bmc_machine_class_init(ObjectClass *oc, void *data)
mc->default_ram_size = 1 * GiB;
}
+static void kudo_bmc_machine_class_init(ObjectClass *oc, void *data)
+{
+ NPCM7xxMachineClass *nmc = NPCM7XX_MACHINE_CLASS(oc);
+ MachineClass *mc = MACHINE_CLASS(oc);
+
+ npcm7xx_set_soc_type(nmc, TYPE_NPCM730);
+
+ mc->desc = "Kudo BMC (Cortex-A9)";
+ mc->init = kudo_bmc_init;
+ mc->default_ram_size = 1 * GiB;
+};
+
static const TypeInfo npcm7xx_machine_types[] = {
{
.name = TYPE_NPCM7XX_MACHINE,
@@ -437,6 +467,10 @@ static const TypeInfo npcm7xx_machine_types[] = {
.name = MACHINE_TYPE_NAME("quanta-gbs-bmc"),
.parent = TYPE_NPCM7XX_MACHINE,
.class_init = gbs_bmc_machine_class_init,
+ }, {
+ .name = MACHINE_TYPE_NAME("kudo-bmc"),
+ .parent = TYPE_NPCM7XX_MACHINE,
+ .class_init = kudo_bmc_machine_class_init,
},
};
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
index 73e9c6bb7c..1d59f0e59f 100644
--- a/hw/arm/virt.c
+++ b/hw/arm/virt.c
@@ -584,6 +584,12 @@ static void create_its(VirtMachineState *vms)
const char *itsclass = its_class_name();
DeviceState *dev;
+ if (!strcmp(itsclass, "arm-gicv3-its")) {
+ if (!vms->tcg_its) {
+ itsclass = NULL;
+ }
+ }
+
if (!itsclass) {
/* Do nothing if not supported */
return;
@@ -621,7 +627,7 @@ static void create_v2m(VirtMachineState *vms)
vms->msi_controller = VIRT_MSI_CTRL_GICV2M;
}
-static void create_gic(VirtMachineState *vms)
+static void create_gic(VirtMachineState *vms, MemoryRegion *mem)
{
MachineState *ms = MACHINE(vms);
/* We create a standalone GIC */
@@ -655,6 +661,14 @@ static void create_gic(VirtMachineState *vms)
nb_redist_regions);
qdev_prop_set_uint32(vms->gic, "redist-region-count[0]", redist0_count);
+ if (!kvm_irqchip_in_kernel()) {
+ if (vms->tcg_its) {
+ object_property_set_link(OBJECT(vms->gic), "sysmem",
+ OBJECT(mem), &error_fatal);
+ qdev_prop_set_bit(vms->gic, "has-lpi", true);
+ }
+ }
+
if (nb_redist_regions == 2) {
uint32_t redist1_capacity =
vms->memmap[VIRT_HIGH_GIC_REDIST2].size / GICV3_REDIST_SIZE;
@@ -2039,7 +2053,7 @@ static void machvirt_init(MachineState *machine)
virt_flash_fdt(vms, sysmem, secure_sysmem ?: sysmem);
- create_gic(vms);
+ create_gic(vms, sysmem);
virt_cpu_post_init(vms, sysmem);
@@ -2742,6 +2756,12 @@ static void virt_instance_init(Object *obj)
} else {
/* Default allows ITS instantiation */
vms->its = true;
+
+ if (vmc->no_tcg_its) {
+ vms->tcg_its = false;
+ } else {
+ vms->tcg_its = true;
+ }
}
/* Default disallows iommu instantiation */
@@ -2791,8 +2811,13 @@ DEFINE_VIRT_MACHINE_AS_LATEST(6, 2)
static void virt_machine_6_1_options(MachineClass *mc)
{
+ VirtMachineClass *vmc = VIRT_MACHINE_CLASS(OBJECT_CLASS(mc));
+
virt_machine_6_2_options(mc);
compat_props_add(mc->compat_props, hw_compat_6_1, hw_compat_6_1_len);
+
+ /* qemu ITS was introduced with 6.2 */
+ vmc->no_tcg_its = true;
}
DEFINE_VIRT_MACHINE(6, 1)
diff --git a/hw/char/cadence_uart.c b/hw/char/cadence_uart.c
index b4b5e8a3ee..c069a30842 100644
--- a/hw/char/cadence_uart.c
+++ b/hw/char/cadence_uart.c
@@ -235,8 +235,18 @@ static void uart_parameters_setup(CadenceUARTState *s)
static int uart_can_receive(void *opaque)
{
CadenceUARTState *s = opaque;
- int ret = MAX(CADENCE_UART_RX_FIFO_SIZE, CADENCE_UART_TX_FIFO_SIZE);
- uint32_t ch_mode = s->r[R_MR] & UART_MR_CHMODE;
+ int ret;
+ uint32_t ch_mode;
+
+ /* ignore characters when unclocked or in reset */
+ if (!clock_is_enabled(s->refclk) || device_is_in_reset(DEVICE(s))) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: uart is unclocked or in reset\n",
+ __func__);
+ return 0;
+ }
+
+ ret = MAX(CADENCE_UART_RX_FIFO_SIZE, CADENCE_UART_TX_FIFO_SIZE);
+ ch_mode = s->r[R_MR] & UART_MR_CHMODE;
if (ch_mode == NORMAL_MODE || ch_mode == ECHO_MODE) {
ret = MIN(ret, CADENCE_UART_RX_FIFO_SIZE - s->rx_count);
@@ -353,11 +363,6 @@ static void uart_receive(void *opaque, const uint8_t *buf, int size)
CadenceUARTState *s = opaque;
uint32_t ch_mode = s->r[R_MR] & UART_MR_CHMODE;
- /* ignore characters when unclocked or in reset */
- if (!clock_is_enabled(s->refclk) || device_is_in_reset(DEVICE(s))) {
- return;
- }
-
if (ch_mode == NORMAL_MODE || ch_mode == ECHO_MODE) {
uart_write_rx_fifo(opaque, buf, size);
}
@@ -373,6 +378,8 @@ static void uart_event(void *opaque, QEMUChrEvent event)
/* ignore characters when unclocked or in reset */
if (!clock_is_enabled(s->refclk) || device_is_in_reset(DEVICE(s))) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: uart is unclocked or in reset\n",
+ __func__);
return;
}
@@ -403,15 +410,22 @@ static void uart_read_rx_fifo(CadenceUARTState *s, uint32_t *c)
uart_update_status(s);
}
-static void uart_write(void *opaque, hwaddr offset,
- uint64_t value, unsigned size)
+static MemTxResult uart_write(void *opaque, hwaddr offset,
+ uint64_t value, unsigned size, MemTxAttrs attrs)
{
CadenceUARTState *s = opaque;
+ /* ignore access when unclocked or in reset */
+ if (!clock_is_enabled(s->refclk) || device_is_in_reset(DEVICE(s))) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: uart is unclocked or in reset\n",
+ __func__);
+ return MEMTX_ERROR;
+ }
+
DB_PRINT(" offset:%x data:%08x\n", (unsigned)offset, (unsigned)value);
offset >>= 2;
if (offset >= CADENCE_UART_R_MAX) {
- return;
+ return MEMTX_DECODE_ERROR;
}
switch (offset) {
case R_IER: /* ier (wts imr) */
@@ -458,30 +472,41 @@ static void uart_write(void *opaque, hwaddr offset,
break;
}
uart_update_status(s);
+
+ return MEMTX_OK;
}
-static uint64_t uart_read(void *opaque, hwaddr offset,
- unsigned size)
+static MemTxResult uart_read(void *opaque, hwaddr offset,
+ uint64_t *value, unsigned size, MemTxAttrs attrs)
{
CadenceUARTState *s = opaque;
uint32_t c = 0;
+ /* ignore access when unclocked or in reset */
+ if (!clock_is_enabled(s->refclk) || device_is_in_reset(DEVICE(s))) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: uart is unclocked or in reset\n",
+ __func__);
+ return MEMTX_ERROR;
+ }
+
offset >>= 2;
if (offset >= CADENCE_UART_R_MAX) {
- c = 0;
- } else if (offset == R_TX_RX) {
+ return MEMTX_DECODE_ERROR;
+ }
+ if (offset == R_TX_RX) {
uart_read_rx_fifo(s, &c);
} else {
- c = s->r[offset];
+ c = s->r[offset];
}
DB_PRINT(" offset:%x data:%08x\n", (unsigned)(offset << 2), (unsigned)c);
- return c;
+ *value = c;
+ return MEMTX_OK;
}
static const MemoryRegionOps uart_ops = {
- .read = uart_read,
- .write = uart_write,
+ .read_with_attrs = uart_read,
+ .write_with_attrs = uart_write,
.endianness = DEVICE_NATIVE_ENDIAN,
};
diff --git a/hw/display/qxl.c b/hw/display/qxl.c
index 43482d4364..29c80b4289 100644
--- a/hw/display/qxl.c
+++ b/hw/display/qxl.c
@@ -2252,7 +2252,7 @@ static int qxl_pre_save(void *opaque)
} else {
d->last_release_offset = (uint8_t *)d->last_release - ram_start;
}
- if (d->last_release_offset < d->vga.vram_size) {
+ if (d->last_release_offset >= d->vga.vram_size) {
return 1;
}
diff --git a/hw/display/virtio-gpu-udmabuf.c b/hw/display/virtio-gpu-udmabuf.c
index 3c01a415e7..c6f7f58784 100644
--- a/hw/display/virtio-gpu-udmabuf.c
+++ b/hw/display/virtio-gpu-udmabuf.c
@@ -185,6 +185,7 @@ static VGPUDMABuf
dmabuf->buf.stride = fb->stride;
dmabuf->buf.fourcc = qemu_pixman_to_drm_format(fb->format);
dmabuf->buf.fd = res->dmabuf_fd;
+ dmabuf->buf.allow_fences = true;
dmabuf->scanout_id = scanout_id;
QTAILQ_INSERT_HEAD(&g->dmabuf.bufs, dmabuf, next);
diff --git a/hw/display/virtio-gpu.c b/hw/display/virtio-gpu.c
index 72da5bf500..182e0868b0 100644
--- a/hw/display/virtio-gpu.c
+++ b/hw/display/virtio-gpu.c
@@ -985,8 +985,10 @@ void virtio_gpu_simple_process_cmd(VirtIOGPU *g,
break;
}
if (!cmd->finished) {
- virtio_gpu_ctrl_response_nodata(g, cmd, cmd->error ? cmd->error :
- VIRTIO_GPU_RESP_OK_NODATA);
+ if (!g->parent_obj.renderer_blocked) {
+ virtio_gpu_ctrl_response_nodata(g, cmd, cmd->error ? cmd->error :
+ VIRTIO_GPU_RESP_OK_NODATA);
+ }
}
}
@@ -1042,6 +1044,30 @@ void virtio_gpu_process_cmdq(VirtIOGPU *g)
g->processing_cmdq = false;
}
+static void virtio_gpu_process_fenceq(VirtIOGPU *g)
+{
+ struct virtio_gpu_ctrl_command *cmd, *tmp;
+
+ QTAILQ_FOREACH_SAFE(cmd, &g->fenceq, next, tmp) {
+ trace_virtio_gpu_fence_resp(cmd->cmd_hdr.fence_id);
+ virtio_gpu_ctrl_response_nodata(g, cmd, VIRTIO_GPU_RESP_OK_NODATA);
+ QTAILQ_REMOVE(&g->fenceq, cmd, next);
+ g_free(cmd);
+ g->inflight--;
+ if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
+ fprintf(stderr, "inflight: %3d (-)\r", g->inflight);
+ }
+ }
+}
+
+static void virtio_gpu_handle_gl_flushed(VirtIOGPUBase *b)
+{
+ VirtIOGPU *g = container_of(b, VirtIOGPU, parent_obj);
+
+ virtio_gpu_process_fenceq(g);
+ virtio_gpu_process_cmdq(g);
+}
+
static void virtio_gpu_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
{
VirtIOGPU *g = VIRTIO_GPU(vdev);
@@ -1400,10 +1426,12 @@ static void virtio_gpu_class_init(ObjectClass *klass, void *data)
DeviceClass *dc = DEVICE_CLASS(klass);
VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
VirtIOGPUClass *vgc = VIRTIO_GPU_CLASS(klass);
+ VirtIOGPUBaseClass *vgbc = &vgc->parent;
vgc->handle_ctrl = virtio_gpu_handle_ctrl;
vgc->process_cmd = virtio_gpu_simple_process_cmd;
vgc->update_cursor_data = virtio_gpu_update_cursor_data;
+ vgbc->gl_flushed = virtio_gpu_handle_gl_flushed;
vdc->realize = virtio_gpu_device_realize;
vdc->reset = virtio_gpu_reset;
diff --git a/hw/i386/acpi-build.c b/hw/i386/acpi-build.c
index d1f5fa3b5a..dfaa47cdc2 100644
--- a/hw/i386/acpi-build.c
+++ b/hw/i386/acpi-build.c
@@ -1916,7 +1916,7 @@ build_srat(GArray *table_data, BIOSLinker *linker, MachineState *machine)
PCMachineState *pcms = PC_MACHINE(machine);
int nb_numa_nodes = machine->numa_state->num_nodes;
NodeInfo *numa_info = machine->numa_state->nodes;
- ram_addr_t hotplugabble_address_space_size =
+ ram_addr_t hotpluggable_address_space_size =
object_property_get_int(OBJECT(pcms), PC_MACHINE_DEVMEM_REGION_SIZE,
NULL);
@@ -2022,10 +2022,10 @@ build_srat(GArray *table_data, BIOSLinker *linker, MachineState *machine)
* Memory devices may override proximity set by this entry,
* providing _PXM method if necessary.
*/
- if (hotplugabble_address_space_size) {
+ if (hotpluggable_address_space_size) {
numamem = acpi_data_push(table_data, sizeof *numamem);
build_srat_memory(numamem, machine->device_memory->base,
- hotplugabble_address_space_size, nb_numa_nodes - 1,
+ hotpluggable_address_space_size, nb_numa_nodes - 1,
MEM_AFFINITY_HOTPLUGGABLE | MEM_AFFINITY_ENABLED);
}
diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c
index 209b3f5553..75f075547f 100644
--- a/hw/i386/intel_iommu.c
+++ b/hw/i386/intel_iommu.c
@@ -679,7 +679,7 @@ static inline bool vtd_pe_type_check(X86IOMMUState *x86_iommu,
}
break;
default:
- /* Unknwon type */
+ /* Unknown type */
return false;
}
return true;
@@ -692,7 +692,7 @@ static inline bool vtd_pdire_present(VTDPASIDDirEntry *pdire)
/**
* Caller of this function should check present bit if wants
- * to use pdir entry for futher usage except for fpd bit check.
+ * to use pdir entry for further usage except for fpd bit check.
*/
static int vtd_get_pdire_from_pdir_table(dma_addr_t pasid_dir_base,
uint32_t pasid,
@@ -746,7 +746,7 @@ static int vtd_get_pe_in_pasid_leaf_table(IntelIOMMUState *s,
/**
* Caller of this function should check present bit if wants
- * to use pasid entry for futher usage except for fpd bit check.
+ * to use pasid entry for further usage except for fpd bit check.
*/
static int vtd_get_pe_from_pdire(IntelIOMMUState *s,
uint32_t pasid,
@@ -1507,7 +1507,7 @@ static int vtd_sync_shadow_page_table(VTDAddressSpace *vtd_as)
}
/*
- * Check if specific device is configed to bypass address
+ * Check if specific device is configured to bypass address
* translation for DMA requests. In Scalable Mode, bypass
* 1st-level translation or 2nd-level translation, it depends
* on PGTT setting.
diff --git a/hw/intc/arm_gicv3.c b/hw/intc/arm_gicv3.c
index d63f8af604..3f24707838 100644
--- a/hw/intc/arm_gicv3.c
+++ b/hw/intc/arm_gicv3.c
@@ -165,6 +165,16 @@ static void gicv3_redist_update_noirqset(GICv3CPUState *cs)
cs->hppi.grp = gicv3_irq_group(cs->gic, cs, cs->hppi.irq);
}
+ if ((cs->gicr_ctlr & GICR_CTLR_ENABLE_LPIS) && cs->gic->lpi_enable &&
+ (cs->hpplpi.prio != 0xff)) {
+ if (irqbetter(cs, cs->hpplpi.irq, cs->hpplpi.prio)) {
+ cs->hppi.irq = cs->hpplpi.irq;
+ cs->hppi.prio = cs->hpplpi.prio;
+ cs->hppi.grp = cs->hpplpi.grp;
+ seenbetter = true;
+ }
+ }
+
/* If the best interrupt we just found would preempt whatever
* was the previous best interrupt before this update, then
* we know it's definitely the best one now.
@@ -339,9 +349,13 @@ static void gicv3_set_irq(void *opaque, int irq, int level)
static void arm_gicv3_post_load(GICv3State *s)
{
+ int i;
/* Recalculate our cached idea of the current highest priority
* pending interrupt, but don't set IRQ or FIQ lines.
*/
+ for (i = 0; i < s->num_cpu; i++) {
+ gicv3_redist_update_lpi(&s->cpu[i]);
+ }
gicv3_full_update_noirqset(s);
/* Repopulate the cache of GICv3CPUState pointers for target CPUs */
gicv3_cache_all_target_cpustates(s);
diff --git a/hw/intc/arm_gicv3_common.c b/hw/intc/arm_gicv3_common.c
index 58ef65f589..223db16fec 100644
--- a/hw/intc/arm_gicv3_common.c
+++ b/hw/intc/arm_gicv3_common.c
@@ -345,6 +345,11 @@ static void arm_gicv3_common_realize(DeviceState *dev, Error **errp)
return;
}
+ if (s->lpi_enable && !s->dma) {
+ error_setg(errp, "Redist-ITS: Guest 'sysmem' reference link not set");
+ return;
+ }
+
s->cpu = g_new0(GICv3CPUState, s->num_cpu);
for (i = 0; i < s->num_cpu; i++) {
@@ -381,6 +386,10 @@ static void arm_gicv3_common_realize(DeviceState *dev, Error **errp)
(1 << 24) |
(i << 8) |
(last << 4);
+
+ if (s->lpi_enable) {
+ s->cpu[i].gicr_typer |= GICR_TYPER_PLPIS;
+ }
}
}
@@ -426,6 +435,7 @@ static void arm_gicv3_common_reset(DeviceState *dev)
memset(cs->gicr_ipriorityr, 0, sizeof(cs->gicr_ipriorityr));
cs->hppi.prio = 0xff;
+ cs->hpplpi.prio = 0xff;
/* State in the CPU interface must *not* be reset here, because it
* is part of the CPU's reset domain, not the GIC device's.
@@ -494,9 +504,12 @@ static Property arm_gicv3_common_properties[] = {
DEFINE_PROP_UINT32("num-cpu", GICv3State, num_cpu, 1),
DEFINE_PROP_UINT32("num-irq", GICv3State, num_irq, 32),
DEFINE_PROP_UINT32("revision", GICv3State, revision, 3),
+ DEFINE_PROP_BOOL("has-lpi", GICv3State, lpi_enable, 0),
DEFINE_PROP_BOOL("has-security-extensions", GICv3State, security_extn, 0),
DEFINE_PROP_ARRAY("redist-region-count", GICv3State, nb_redist_regions,
redist_region_count, qdev_prop_uint32, uint32_t),
+ DEFINE_PROP_LINK("sysmem", GICv3State, dma, TYPE_MEMORY_REGION,
+ MemoryRegion *),
DEFINE_PROP_END_OF_LIST(),
};
diff --git a/hw/intc/arm_gicv3_cpuif.c b/hw/intc/arm_gicv3_cpuif.c
index a032d505f5..462a35f66e 100644
--- a/hw/intc/arm_gicv3_cpuif.c
+++ b/hw/intc/arm_gicv3_cpuif.c
@@ -899,10 +899,12 @@ static void icc_activate_irq(GICv3CPUState *cs, int irq)
cs->gicr_iactiver0 = deposit32(cs->gicr_iactiver0, irq, 1, 1);
cs->gicr_ipendr0 = deposit32(cs->gicr_ipendr0, irq, 1, 0);
gicv3_redist_update(cs);
- } else {
+ } else if (irq < GICV3_LPI_INTID_START) {
gicv3_gicd_active_set(cs->gic, irq);
gicv3_gicd_pending_clear(cs->gic, irq);
gicv3_update(cs->gic, irq, 1);
+ } else {
+ gicv3_redist_lpi_pending(cs, irq, 0);
}
}
@@ -1318,7 +1320,8 @@ static void icc_eoir_write(CPUARMState *env, const ARMCPRegInfo *ri,
trace_gicv3_icc_eoir_write(is_eoir0 ? 0 : 1,
gicv3_redist_affid(cs), value);
- if (irq >= cs->gic->num_irq) {
+ if ((irq >= cs->gic->num_irq) &&
+ !(cs->gic->lpi_enable && (irq >= GICV3_LPI_INTID_START))) {
/* This handles two cases:
* 1. If software writes the ID of a spurious interrupt [ie 1020-1023]
* to the GICC_EOIR, the GIC ignores that write.
diff --git a/hw/intc/arm_gicv3_dist.c b/hw/intc/arm_gicv3_dist.c
index 5beb7c4235..4164500ea9 100644
--- a/hw/intc/arm_gicv3_dist.c
+++ b/hw/intc/arm_gicv3_dist.c
@@ -384,7 +384,9 @@ static bool gicd_readl(GICv3State *s, hwaddr offset,
* A3V == 1 (non-zero values of Affinity level 3 supported)
* IDbits == 0xf (we support 16-bit interrupt identifiers)
* DVIS == 0 (Direct virtual LPI injection not supported)
- * LPIS == 0 (LPIs not supported)
+ * LPIS == 1 (LPIs are supported if affinity routing is enabled)
+ * num_LPIs == 0b00000 (bits [15:11],Number of LPIs as indicated
+ * by GICD_TYPER.IDbits)
* MBIS == 0 (message-based SPIs not supported)
* SecurityExtn == 1 if security extns supported
* CPUNumber == 0 since for us ARE is always 1
@@ -399,6 +401,7 @@ static bool gicd_readl(GICv3State *s, hwaddr offset,
bool sec_extn = !(s->gicd_ctlr & GICD_CTLR_DS);
*data = (1 << 25) | (1 << 24) | (sec_extn << 10) |
+ (s->lpi_enable << GICD_TYPER_LPIS_SHIFT) |
(0xf << 19) | itlinesnumber;
return true;
}
diff --git a/hw/intc/arm_gicv3_its.c b/hw/intc/arm_gicv3_its.c
new file mode 100644
index 0000000000..84bcbb5f56
--- /dev/null
+++ b/hw/intc/arm_gicv3_its.c
@@ -0,0 +1,1322 @@
+/*
+ * ITS emulation for a GICv3-based system
+ *
+ * Copyright Linaro.org 2021
+ *
+ * Authors:
+ * Shashi Mallela <shashi.mallela@linaro.org>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at your
+ * option) any later version. See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/log.h"
+#include "hw/qdev-properties.h"
+#include "hw/intc/arm_gicv3_its_common.h"
+#include "gicv3_internal.h"
+#include "qom/object.h"
+#include "qapi/error.h"
+
+typedef struct GICv3ITSClass GICv3ITSClass;
+/* This is reusing the GICv3ITSState typedef from ARM_GICV3_ITS_COMMON */
+DECLARE_OBJ_CHECKERS(GICv3ITSState, GICv3ITSClass,
+ ARM_GICV3_ITS, TYPE_ARM_GICV3_ITS)
+
+struct GICv3ITSClass {
+ GICv3ITSCommonClass parent_class;
+ void (*parent_reset)(DeviceState *dev);
+};
+
+/*
+ * This is an internal enum used to distinguish between LPI triggered
+ * via command queue and LPI triggered via gits_translater write.
+ */
+typedef enum ItsCmdType {
+ NONE = 0, /* internal indication for GITS_TRANSLATER write */
+ CLEAR = 1,
+ DISCARD = 2,
+ INTERRUPT = 3,
+} ItsCmdType;
+
+typedef struct {
+ uint32_t iteh;
+ uint64_t itel;
+} IteEntry;
+
+static uint64_t baser_base_addr(uint64_t value, uint32_t page_sz)
+{
+ uint64_t result = 0;
+
+ switch (page_sz) {
+ case GITS_PAGE_SIZE_4K:
+ case GITS_PAGE_SIZE_16K:
+ result = FIELD_EX64(value, GITS_BASER, PHYADDR) << 12;
+ break;
+
+ case GITS_PAGE_SIZE_64K:
+ result = FIELD_EX64(value, GITS_BASER, PHYADDRL_64K) << 16;
+ result |= FIELD_EX64(value, GITS_BASER, PHYADDRH_64K) << 48;
+ break;
+
+ default:
+ break;
+ }
+ return result;
+}
+
+static bool get_cte(GICv3ITSState *s, uint16_t icid, uint64_t *cte,
+ MemTxResult *res)
+{
+ AddressSpace *as = &s->gicv3->dma_as;
+ uint64_t l2t_addr;
+ uint64_t value;
+ bool valid_l2t;
+ uint32_t l2t_id;
+ uint32_t max_l2_entries;
+
+ if (s->ct.indirect) {
+ l2t_id = icid / (s->ct.page_sz / L1TABLE_ENTRY_SIZE);
+
+ value = address_space_ldq_le(as,
+ s->ct.base_addr +
+ (l2t_id * L1TABLE_ENTRY_SIZE),
+ MEMTXATTRS_UNSPECIFIED, res);
+
+ if (*res == MEMTX_OK) {
+ valid_l2t = (value & L2_TABLE_VALID_MASK) != 0;
+
+ if (valid_l2t) {
+ max_l2_entries = s->ct.page_sz / s->ct.entry_sz;
+
+ l2t_addr = value & ((1ULL << 51) - 1);
+
+ *cte = address_space_ldq_le(as, l2t_addr +
+ ((icid % max_l2_entries) * GITS_CTE_SIZE),
+ MEMTXATTRS_UNSPECIFIED, res);
+ }
+ }
+ } else {
+ /* Flat level table */
+ *cte = address_space_ldq_le(as, s->ct.base_addr +
+ (icid * GITS_CTE_SIZE),
+ MEMTXATTRS_UNSPECIFIED, res);
+ }
+
+ return (*cte & TABLE_ENTRY_VALID_MASK) != 0;
+}
+
+static bool update_ite(GICv3ITSState *s, uint32_t eventid, uint64_t dte,
+ IteEntry ite)
+{
+ AddressSpace *as = &s->gicv3->dma_as;
+ uint64_t itt_addr;
+ MemTxResult res = MEMTX_OK;
+
+ itt_addr = (dte & GITS_DTE_ITTADDR_MASK) >> GITS_DTE_ITTADDR_SHIFT;
+ itt_addr <<= ITTADDR_SHIFT; /* 256 byte aligned */
+
+ address_space_stq_le(as, itt_addr + (eventid * (sizeof(uint64_t) +
+ sizeof(uint32_t))), ite.itel, MEMTXATTRS_UNSPECIFIED,
+ &res);
+
+ if (res == MEMTX_OK) {
+ address_space_stl_le(as, itt_addr + (eventid * (sizeof(uint64_t) +
+ sizeof(uint32_t))) + sizeof(uint32_t), ite.iteh,
+ MEMTXATTRS_UNSPECIFIED, &res);
+ }
+ if (res != MEMTX_OK) {
+ return false;
+ } else {
+ return true;
+ }
+}
+
+static bool get_ite(GICv3ITSState *s, uint32_t eventid, uint64_t dte,
+ uint16_t *icid, uint32_t *pIntid, MemTxResult *res)
+{
+ AddressSpace *as = &s->gicv3->dma_as;
+ uint64_t itt_addr;
+ bool status = false;
+ IteEntry ite = {};
+
+ itt_addr = (dte & GITS_DTE_ITTADDR_MASK) >> GITS_DTE_ITTADDR_SHIFT;
+ itt_addr <<= ITTADDR_SHIFT; /* 256 byte aligned */
+
+ ite.itel = address_space_ldq_le(as, itt_addr +
+ (eventid * (sizeof(uint64_t) +
+ sizeof(uint32_t))), MEMTXATTRS_UNSPECIFIED,
+ res);
+
+ if (*res == MEMTX_OK) {
+ ite.iteh = address_space_ldl_le(as, itt_addr +
+ (eventid * (sizeof(uint64_t) +
+ sizeof(uint32_t))) + sizeof(uint32_t),
+ MEMTXATTRS_UNSPECIFIED, res);
+
+ if (*res == MEMTX_OK) {
+ if (ite.itel & TABLE_ENTRY_VALID_MASK) {
+ if ((ite.itel >> ITE_ENTRY_INTTYPE_SHIFT) &
+ GITS_TYPE_PHYSICAL) {
+ *pIntid = (ite.itel & ITE_ENTRY_INTID_MASK) >>
+ ITE_ENTRY_INTID_SHIFT;
+ *icid = ite.iteh & ITE_ENTRY_ICID_MASK;
+ status = true;
+ }
+ }
+ }
+ }
+ return status;
+}
+
+static uint64_t get_dte(GICv3ITSState *s, uint32_t devid, MemTxResult *res)
+{
+ AddressSpace *as = &s->gicv3->dma_as;
+ uint64_t l2t_addr;
+ uint64_t value;
+ bool valid_l2t;
+ uint32_t l2t_id;
+ uint32_t max_l2_entries;
+
+ if (s->dt.indirect) {
+ l2t_id = devid / (s->dt.page_sz / L1TABLE_ENTRY_SIZE);
+
+ value = address_space_ldq_le(as,
+ s->dt.base_addr +
+ (l2t_id * L1TABLE_ENTRY_SIZE),
+ MEMTXATTRS_UNSPECIFIED, res);
+
+ if (*res == MEMTX_OK) {
+ valid_l2t = (value & L2_TABLE_VALID_MASK) != 0;
+
+ if (valid_l2t) {
+ max_l2_entries = s->dt.page_sz / s->dt.entry_sz;
+
+ l2t_addr = value & ((1ULL << 51) - 1);
+
+ value = address_space_ldq_le(as, l2t_addr +
+ ((devid % max_l2_entries) * GITS_DTE_SIZE),
+ MEMTXATTRS_UNSPECIFIED, res);
+ }
+ }
+ } else {
+ /* Flat level table */
+ value = address_space_ldq_le(as, s->dt.base_addr +
+ (devid * GITS_DTE_SIZE),
+ MEMTXATTRS_UNSPECIFIED, res);
+ }
+
+ return value;
+}
+
+/*
+ * This function handles the processing of following commands based on
+ * the ItsCmdType parameter passed:-
+ * 1. triggering of lpi interrupt translation via ITS INT command
+ * 2. triggering of lpi interrupt translation via gits_translater register
+ * 3. handling of ITS CLEAR command
+ * 4. handling of ITS DISCARD command
+ */
+static bool process_its_cmd(GICv3ITSState *s, uint64_t value, uint32_t offset,
+ ItsCmdType cmd)
+{
+ AddressSpace *as = &s->gicv3->dma_as;
+ uint32_t devid, eventid;
+ MemTxResult res = MEMTX_OK;
+ bool dte_valid;
+ uint64_t dte = 0;
+ uint32_t max_eventid;
+ uint16_t icid = 0;
+ uint32_t pIntid = 0;
+ bool ite_valid = false;
+ uint64_t cte = 0;
+ bool cte_valid = false;
+ bool result = false;
+ uint64_t rdbase;
+
+ if (cmd == NONE) {
+ devid = offset;
+ } else {
+ devid = ((value & DEVID_MASK) >> DEVID_SHIFT);
+
+ offset += NUM_BYTES_IN_DW;
+ value = address_space_ldq_le(as, s->cq.base_addr + offset,
+ MEMTXATTRS_UNSPECIFIED, &res);
+ }
+
+ if (res != MEMTX_OK) {
+ return result;
+ }
+
+ eventid = (value & EVENTID_MASK);
+
+ dte = get_dte(s, devid, &res);
+
+ if (res != MEMTX_OK) {
+ return result;
+ }
+ dte_valid = dte & TABLE_ENTRY_VALID_MASK;
+
+ if (dte_valid) {
+ max_eventid = (1UL << (((dte >> 1U) & SIZE_MASK) + 1));
+
+ ite_valid = get_ite(s, eventid, dte, &icid, &pIntid, &res);
+
+ if (res != MEMTX_OK) {
+ return result;
+ }
+
+ if (ite_valid) {
+ cte_valid = get_cte(s, icid, &cte, &res);
+ }
+
+ if (res != MEMTX_OK) {
+ return result;
+ }
+ }
+
+ if ((devid > s->dt.maxids.max_devids) || !dte_valid || !ite_valid ||
+ !cte_valid || (eventid > max_eventid)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: invalid command attributes "
+ "devid %d or eventid %d or invalid dte %d or"
+ "invalid cte %d or invalid ite %d\n",
+ __func__, devid, eventid, dte_valid, cte_valid,
+ ite_valid);
+ /*
+ * in this implementation, in case of error
+ * we ignore this command and move onto the next
+ * command in the queue
+ */
+ } else {
+ /*
+ * Current implementation only supports rdbase == procnum
+ * Hence rdbase physical address is ignored
+ */
+ rdbase = (cte & GITS_CTE_RDBASE_PROCNUM_MASK) >> 1U;
+
+ if (rdbase > s->gicv3->num_cpu) {
+ return result;
+ }
+
+ if ((cmd == CLEAR) || (cmd == DISCARD)) {
+ gicv3_redist_process_lpi(&s->gicv3->cpu[rdbase], pIntid, 0);
+ } else {
+ gicv3_redist_process_lpi(&s->gicv3->cpu[rdbase], pIntid, 1);
+ }
+
+ if (cmd == DISCARD) {
+ IteEntry ite = {};
+ /* remove mapping from interrupt translation table */
+ result = update_ite(s, eventid, dte, ite);
+ }
+ }
+
+ return result;
+}
+
+static bool process_mapti(GICv3ITSState *s, uint64_t value, uint32_t offset,
+ bool ignore_pInt)
+{
+ AddressSpace *as = &s->gicv3->dma_as;
+ uint32_t devid, eventid;
+ uint32_t pIntid = 0;
+ uint32_t max_eventid, max_Intid;
+ bool dte_valid;
+ MemTxResult res = MEMTX_OK;
+ uint16_t icid = 0;
+ uint64_t dte = 0;
+ IteEntry ite;
+ uint32_t int_spurious = INTID_SPURIOUS;
+ bool result = false;
+
+ devid = ((value & DEVID_MASK) >> DEVID_SHIFT);
+ offset += NUM_BYTES_IN_DW;
+ value = address_space_ldq_le(as, s->cq.base_addr + offset,
+ MEMTXATTRS_UNSPECIFIED, &res);
+
+ if (res != MEMTX_OK) {
+ return result;
+ }
+
+ eventid = (value & EVENTID_MASK);
+
+ if (!ignore_pInt) {
+ pIntid = ((value & pINTID_MASK) >> pINTID_SHIFT);
+ }
+
+ offset += NUM_BYTES_IN_DW;
+ value = address_space_ldq_le(as, s->cq.base_addr + offset,
+ MEMTXATTRS_UNSPECIFIED, &res);
+
+ if (res != MEMTX_OK) {
+ return result;
+ }
+
+ icid = value & ICID_MASK;
+
+ dte = get_dte(s, devid, &res);
+
+ if (res != MEMTX_OK) {
+ return result;
+ }
+ dte_valid = dte & TABLE_ENTRY_VALID_MASK;
+
+ max_eventid = (1UL << (((dte >> 1U) & SIZE_MASK) + 1));
+
+ if (!ignore_pInt) {
+ max_Intid = (1ULL << (GICD_TYPER_IDBITS + 1)) - 1;
+ }
+
+ if ((devid > s->dt.maxids.max_devids) || (icid > s->ct.maxids.max_collids)
+ || !dte_valid || (eventid > max_eventid) ||
+ (!ignore_pInt && (((pIntid < GICV3_LPI_INTID_START) ||
+ (pIntid > max_Intid)) && (pIntid != INTID_SPURIOUS)))) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: invalid command attributes "
+ "devid %d or icid %d or eventid %d or pIntid %d or"
+ "unmapped dte %d\n", __func__, devid, icid, eventid,
+ pIntid, dte_valid);
+ /*
+ * in this implementation, in case of error
+ * we ignore this command and move onto the next
+ * command in the queue
+ */
+ } else {
+ /* add ite entry to interrupt translation table */
+ ite.itel = (dte_valid & TABLE_ENTRY_VALID_MASK) |
+ (GITS_TYPE_PHYSICAL << ITE_ENTRY_INTTYPE_SHIFT);
+
+ if (ignore_pInt) {
+ ite.itel |= (eventid << ITE_ENTRY_INTID_SHIFT);
+ } else {
+ ite.itel |= (pIntid << ITE_ENTRY_INTID_SHIFT);
+ }
+ ite.itel |= (int_spurious << ITE_ENTRY_INTSP_SHIFT);
+ ite.iteh = icid;
+
+ result = update_ite(s, eventid, dte, ite);
+ }
+
+ return result;
+}
+
+static bool update_cte(GICv3ITSState *s, uint16_t icid, bool valid,
+ uint64_t rdbase)
+{
+ AddressSpace *as = &s->gicv3->dma_as;
+ uint64_t value;
+ uint64_t l2t_addr;
+ bool valid_l2t;
+ uint32_t l2t_id;
+ uint32_t max_l2_entries;
+ uint64_t cte = 0;
+ MemTxResult res = MEMTX_OK;
+
+ if (!s->ct.valid) {
+ return true;
+ }
+
+ if (valid) {
+ /* add mapping entry to collection table */
+ cte = (valid & TABLE_ENTRY_VALID_MASK) | (rdbase << 1ULL);
+ }
+
+ /*
+ * The specification defines the format of level 1 entries of a
+ * 2-level table, but the format of level 2 entries and the format
+ * of flat-mapped tables is IMPDEF.
+ */
+ if (s->ct.indirect) {
+ l2t_id = icid / (s->ct.page_sz / L1TABLE_ENTRY_SIZE);
+
+ value = address_space_ldq_le(as,
+ s->ct.base_addr +
+ (l2t_id * L1TABLE_ENTRY_SIZE),
+ MEMTXATTRS_UNSPECIFIED, &res);
+
+ if (res != MEMTX_OK) {
+ return false;
+ }
+
+ valid_l2t = (value & L2_TABLE_VALID_MASK) != 0;
+
+ if (valid_l2t) {
+ max_l2_entries = s->ct.page_sz / s->ct.entry_sz;
+
+ l2t_addr = value & ((1ULL << 51) - 1);
+
+ address_space_stq_le(as, l2t_addr +
+ ((icid % max_l2_entries) * GITS_CTE_SIZE),
+ cte, MEMTXATTRS_UNSPECIFIED, &res);
+ }
+ } else {
+ /* Flat level table */
+ address_space_stq_le(as, s->ct.base_addr + (icid * GITS_CTE_SIZE),
+ cte, MEMTXATTRS_UNSPECIFIED, &res);
+ }
+ if (res != MEMTX_OK) {
+ return false;
+ } else {
+ return true;
+ }
+}
+
+static bool process_mapc(GICv3ITSState *s, uint32_t offset)
+{
+ AddressSpace *as = &s->gicv3->dma_as;
+ uint16_t icid;
+ uint64_t rdbase;
+ bool valid;
+ MemTxResult res = MEMTX_OK;
+ bool result = false;
+ uint64_t value;
+
+ offset += NUM_BYTES_IN_DW;
+ offset += NUM_BYTES_IN_DW;
+
+ value = address_space_ldq_le(as, s->cq.base_addr + offset,
+ MEMTXATTRS_UNSPECIFIED, &res);
+
+ if (res != MEMTX_OK) {
+ return result;
+ }
+
+ icid = value & ICID_MASK;
+
+ rdbase = (value & R_MAPC_RDBASE_MASK) >> R_MAPC_RDBASE_SHIFT;
+ rdbase &= RDBASE_PROCNUM_MASK;
+
+ valid = (value & CMD_FIELD_VALID_MASK);
+
+ if ((icid > s->ct.maxids.max_collids) || (rdbase > s->gicv3->num_cpu)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "ITS MAPC: invalid collection table attributes "
+ "icid %d rdbase %" PRIu64 "\n", icid, rdbase);
+ /*
+ * in this implementation, in case of error
+ * we ignore this command and move onto the next
+ * command in the queue
+ */
+ } else {
+ result = update_cte(s, icid, valid, rdbase);
+ }
+
+ return result;
+}
+
+static bool update_dte(GICv3ITSState *s, uint32_t devid, bool valid,
+ uint8_t size, uint64_t itt_addr)
+{
+ AddressSpace *as = &s->gicv3->dma_as;
+ uint64_t value;
+ uint64_t l2t_addr;
+ bool valid_l2t;
+ uint32_t l2t_id;
+ uint32_t max_l2_entries;
+ uint64_t dte = 0;
+ MemTxResult res = MEMTX_OK;
+
+ if (s->dt.valid) {
+ if (valid) {
+ /* add mapping entry to device table */
+ dte = (valid & TABLE_ENTRY_VALID_MASK) |
+ ((size & SIZE_MASK) << 1U) |
+ (itt_addr << GITS_DTE_ITTADDR_SHIFT);
+ }
+ } else {
+ return true;
+ }
+
+ /*
+ * The specification defines the format of level 1 entries of a
+ * 2-level table, but the format of level 2 entries and the format
+ * of flat-mapped tables is IMPDEF.
+ */
+ if (s->dt.indirect) {
+ l2t_id = devid / (s->dt.page_sz / L1TABLE_ENTRY_SIZE);
+
+ value = address_space_ldq_le(as,
+ s->dt.base_addr +
+ (l2t_id * L1TABLE_ENTRY_SIZE),
+ MEMTXATTRS_UNSPECIFIED, &res);
+
+ if (res != MEMTX_OK) {
+ return false;
+ }
+
+ valid_l2t = (value & L2_TABLE_VALID_MASK) != 0;
+
+ if (valid_l2t) {
+ max_l2_entries = s->dt.page_sz / s->dt.entry_sz;
+
+ l2t_addr = value & ((1ULL << 51) - 1);
+
+ address_space_stq_le(as, l2t_addr +
+ ((devid % max_l2_entries) * GITS_DTE_SIZE),
+ dte, MEMTXATTRS_UNSPECIFIED, &res);
+ }
+ } else {
+ /* Flat level table */
+ address_space_stq_le(as, s->dt.base_addr + (devid * GITS_DTE_SIZE),
+ dte, MEMTXATTRS_UNSPECIFIED, &res);
+ }
+ if (res != MEMTX_OK) {
+ return false;
+ } else {
+ return true;
+ }
+}
+
+static bool process_mapd(GICv3ITSState *s, uint64_t value, uint32_t offset)
+{
+ AddressSpace *as = &s->gicv3->dma_as;
+ uint32_t devid;
+ uint8_t size;
+ uint64_t itt_addr;
+ bool valid;
+ MemTxResult res = MEMTX_OK;
+ bool result = false;
+
+ devid = ((value & DEVID_MASK) >> DEVID_SHIFT);
+
+ offset += NUM_BYTES_IN_DW;
+ value = address_space_ldq_le(as, s->cq.base_addr + offset,
+ MEMTXATTRS_UNSPECIFIED, &res);
+
+ if (res != MEMTX_OK) {
+ return result;
+ }
+
+ size = (value & SIZE_MASK);
+
+ offset += NUM_BYTES_IN_DW;
+ value = address_space_ldq_le(as, s->cq.base_addr + offset,
+ MEMTXATTRS_UNSPECIFIED, &res);
+
+ if (res != MEMTX_OK) {
+ return result;
+ }
+
+ itt_addr = (value & ITTADDR_MASK) >> ITTADDR_SHIFT;
+
+ valid = (value & CMD_FIELD_VALID_MASK);
+
+ if ((devid > s->dt.maxids.max_devids) ||
+ (size > FIELD_EX64(s->typer, GITS_TYPER, IDBITS))) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "ITS MAPD: invalid device table attributes "
+ "devid %d or size %d\n", devid, size);
+ /*
+ * in this implementation, in case of error
+ * we ignore this command and move onto the next
+ * command in the queue
+ */
+ } else {
+ result = update_dte(s, devid, valid, size, itt_addr);
+ }
+
+ return result;
+}
+
+/*
+ * Current implementation blocks until all
+ * commands are processed
+ */
+static void process_cmdq(GICv3ITSState *s)
+{
+ uint32_t wr_offset = 0;
+ uint32_t rd_offset = 0;
+ uint32_t cq_offset = 0;
+ uint64_t data;
+ AddressSpace *as = &s->gicv3->dma_as;
+ MemTxResult res = MEMTX_OK;
+ bool result = true;
+ uint8_t cmd;
+ int i;
+
+ if (!(s->ctlr & ITS_CTLR_ENABLED)) {
+ return;
+ }
+
+ wr_offset = FIELD_EX64(s->cwriter, GITS_CWRITER, OFFSET);
+
+ if (wr_offset > s->cq.max_entries) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: invalid write offset "
+ "%d\n", __func__, wr_offset);
+ return;
+ }
+
+ rd_offset = FIELD_EX64(s->creadr, GITS_CREADR, OFFSET);
+
+ if (rd_offset > s->cq.max_entries) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: invalid read offset "
+ "%d\n", __func__, rd_offset);
+ return;
+ }
+
+ while (wr_offset != rd_offset) {
+ cq_offset = (rd_offset * GITS_CMDQ_ENTRY_SIZE);
+ data = address_space_ldq_le(as, s->cq.base_addr + cq_offset,
+ MEMTXATTRS_UNSPECIFIED, &res);
+ if (res != MEMTX_OK) {
+ result = false;
+ }
+ cmd = (data & CMD_MASK);
+
+ switch (cmd) {
+ case GITS_CMD_INT:
+ res = process_its_cmd(s, data, cq_offset, INTERRUPT);
+ break;
+ case GITS_CMD_CLEAR:
+ res = process_its_cmd(s, data, cq_offset, CLEAR);
+ break;
+ case GITS_CMD_SYNC:
+ /*
+ * Current implementation makes a blocking synchronous call
+ * for every command issued earlier, hence the internal state
+ * is already consistent by the time SYNC command is executed.
+ * Hence no further processing is required for SYNC command.
+ */
+ break;
+ case GITS_CMD_MAPD:
+ result = process_mapd(s, data, cq_offset);
+ break;
+ case GITS_CMD_MAPC:
+ result = process_mapc(s, cq_offset);
+ break;
+ case GITS_CMD_MAPTI:
+ result = process_mapti(s, data, cq_offset, false);
+ break;
+ case GITS_CMD_MAPI:
+ result = process_mapti(s, data, cq_offset, true);
+ break;
+ case GITS_CMD_DISCARD:
+ result = process_its_cmd(s, data, cq_offset, DISCARD);
+ break;
+ case GITS_CMD_INV:
+ case GITS_CMD_INVALL:
+ /*
+ * Current implementation doesn't cache any ITS tables,
+ * but the calculated lpi priority information. We only
+ * need to trigger lpi priority re-calculation to be in
+ * sync with LPI config table or pending table changes.
+ */
+ for (i = 0; i < s->gicv3->num_cpu; i++) {
+ gicv3_redist_update_lpi(&s->gicv3->cpu[i]);
+ }
+ break;
+ default:
+ break;
+ }
+ if (result) {
+ rd_offset++;
+ rd_offset %= s->cq.max_entries;
+ s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, OFFSET, rd_offset);
+ } else {
+ /*
+ * in this implementation, in case of dma read/write error
+ * we stall the command processing
+ */
+ s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, STALLED, 1);
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: %x cmd processing failed\n", __func__, cmd);
+ break;
+ }
+ }
+}
+
+/*
+ * This function extracts the ITS Device and Collection table specific
+ * parameters (like base_addr, size etc) from GITS_BASER register.
+ * It is called during ITS enable and also during post_load migration
+ */
+static void extract_table_params(GICv3ITSState *s)
+{
+ uint16_t num_pages = 0;
+ uint8_t page_sz_type;
+ uint8_t type;
+ uint32_t page_sz = 0;
+ uint64_t value;
+
+ for (int i = 0; i < 8; i++) {
+ value = s->baser[i];
+
+ if (!value) {
+ continue;
+ }
+
+ page_sz_type = FIELD_EX64(value, GITS_BASER, PAGESIZE);
+
+ switch (page_sz_type) {
+ case 0:
+ page_sz = GITS_PAGE_SIZE_4K;
+ break;
+
+ case 1:
+ page_sz = GITS_PAGE_SIZE_16K;
+ break;
+
+ case 2:
+ case 3:
+ page_sz = GITS_PAGE_SIZE_64K;
+ break;
+
+ default:
+ g_assert_not_reached();
+ }
+
+ num_pages = FIELD_EX64(value, GITS_BASER, SIZE) + 1;
+
+ type = FIELD_EX64(value, GITS_BASER, TYPE);
+
+ switch (type) {
+
+ case GITS_BASER_TYPE_DEVICE:
+ memset(&s->dt, 0 , sizeof(s->dt));
+ s->dt.valid = FIELD_EX64(value, GITS_BASER, VALID);
+
+ if (!s->dt.valid) {
+ return;
+ }
+
+ s->dt.page_sz = page_sz;
+ s->dt.indirect = FIELD_EX64(value, GITS_BASER, INDIRECT);
+ s->dt.entry_sz = FIELD_EX64(value, GITS_BASER, ENTRYSIZE);
+
+ if (!s->dt.indirect) {
+ s->dt.max_entries = (num_pages * page_sz) / s->dt.entry_sz;
+ } else {
+ s->dt.max_entries = (((num_pages * page_sz) /
+ L1TABLE_ENTRY_SIZE) *
+ (page_sz / s->dt.entry_sz));
+ }
+
+ s->dt.maxids.max_devids = (1UL << (FIELD_EX64(s->typer, GITS_TYPER,
+ DEVBITS) + 1));
+
+ s->dt.base_addr = baser_base_addr(value, page_sz);
+
+ break;
+
+ case GITS_BASER_TYPE_COLLECTION:
+ memset(&s->ct, 0 , sizeof(s->ct));
+ s->ct.valid = FIELD_EX64(value, GITS_BASER, VALID);
+
+ /*
+ * GITS_TYPER.HCC is 0 for this implementation
+ * hence writes are discarded if ct.valid is 0
+ */
+ if (!s->ct.valid) {
+ return;
+ }
+
+ s->ct.page_sz = page_sz;
+ s->ct.indirect = FIELD_EX64(value, GITS_BASER, INDIRECT);
+ s->ct.entry_sz = FIELD_EX64(value, GITS_BASER, ENTRYSIZE);
+
+ if (!s->ct.indirect) {
+ s->ct.max_entries = (num_pages * page_sz) / s->ct.entry_sz;
+ } else {
+ s->ct.max_entries = (((num_pages * page_sz) /
+ L1TABLE_ENTRY_SIZE) *
+ (page_sz / s->ct.entry_sz));
+ }
+
+ if (FIELD_EX64(s->typer, GITS_TYPER, CIL)) {
+ s->ct.maxids.max_collids = (1UL << (FIELD_EX64(s->typer,
+ GITS_TYPER, CIDBITS) + 1));
+ } else {
+ /* 16-bit CollectionId supported when CIL == 0 */
+ s->ct.maxids.max_collids = (1UL << 16);
+ }
+
+ s->ct.base_addr = baser_base_addr(value, page_sz);
+
+ break;
+
+ default:
+ break;
+ }
+ }
+}
+
+static void extract_cmdq_params(GICv3ITSState *s)
+{
+ uint16_t num_pages = 0;
+ uint64_t value = s->cbaser;
+
+ num_pages = FIELD_EX64(value, GITS_CBASER, SIZE) + 1;
+
+ memset(&s->cq, 0 , sizeof(s->cq));
+ s->cq.valid = FIELD_EX64(value, GITS_CBASER, VALID);
+
+ if (s->cq.valid) {
+ s->cq.max_entries = (num_pages * GITS_PAGE_SIZE_4K) /
+ GITS_CMDQ_ENTRY_SIZE;
+ s->cq.base_addr = FIELD_EX64(value, GITS_CBASER, PHYADDR);
+ s->cq.base_addr <<= R_GITS_CBASER_PHYADDR_SHIFT;
+ }
+}
+
+static MemTxResult gicv3_its_translation_write(void *opaque, hwaddr offset,
+ uint64_t data, unsigned size,
+ MemTxAttrs attrs)
+{
+ GICv3ITSState *s = (GICv3ITSState *)opaque;
+ bool result = true;
+ uint32_t devid = 0;
+
+ switch (offset) {
+ case GITS_TRANSLATER:
+ if (s->ctlr & ITS_CTLR_ENABLED) {
+ devid = attrs.requester_id;
+ result = process_its_cmd(s, data, devid, NONE);
+ }
+ break;
+ default:
+ break;
+ }
+
+ if (result) {
+ return MEMTX_OK;
+ } else {
+ return MEMTX_ERROR;
+ }
+}
+
+static bool its_writel(GICv3ITSState *s, hwaddr offset,
+ uint64_t value, MemTxAttrs attrs)
+{
+ bool result = true;
+ int index;
+
+ switch (offset) {
+ case GITS_CTLR:
+ s->ctlr |= (value & ~(s->ctlr));
+
+ if (s->ctlr & ITS_CTLR_ENABLED) {
+ extract_table_params(s);
+ extract_cmdq_params(s);
+ s->creadr = 0;
+ process_cmdq(s);
+ }
+ break;
+ case GITS_CBASER:
+ /*
+ * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
+ * already enabled
+ */
+ if (!(s->ctlr & ITS_CTLR_ENABLED)) {
+ s->cbaser = deposit64(s->cbaser, 0, 32, value);
+ s->creadr = 0;
+ s->cwriter = s->creadr;
+ }
+ break;
+ case GITS_CBASER + 4:
+ /*
+ * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
+ * already enabled
+ */
+ if (!(s->ctlr & ITS_CTLR_ENABLED)) {
+ s->cbaser = deposit64(s->cbaser, 32, 32, value);
+ s->creadr = 0;
+ s->cwriter = s->creadr;
+ }
+ break;
+ case GITS_CWRITER:
+ s->cwriter = deposit64(s->cwriter, 0, 32,
+ (value & ~R_GITS_CWRITER_RETRY_MASK));
+ if (s->cwriter != s->creadr) {
+ process_cmdq(s);
+ }
+ break;
+ case GITS_CWRITER + 4:
+ s->cwriter = deposit64(s->cwriter, 32, 32, value);
+ break;
+ case GITS_CREADR:
+ if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) {
+ s->creadr = deposit64(s->creadr, 0, 32,
+ (value & ~R_GITS_CREADR_STALLED_MASK));
+ } else {
+ /* RO register, ignore the write */
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: invalid guest write to RO register at offset "
+ TARGET_FMT_plx "\n", __func__, offset);
+ }
+ break;
+ case GITS_CREADR + 4:
+ if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) {
+ s->creadr = deposit64(s->creadr, 32, 32, value);
+ } else {
+ /* RO register, ignore the write */
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: invalid guest write to RO register at offset "
+ TARGET_FMT_plx "\n", __func__, offset);
+ }
+ break;
+ case GITS_BASER ... GITS_BASER + 0x3f:
+ /*
+ * IMPDEF choice:- GITS_BASERn register becomes RO if ITS is
+ * already enabled
+ */
+ if (!(s->ctlr & ITS_CTLR_ENABLED)) {
+ index = (offset - GITS_BASER) / 8;
+
+ if (offset & 7) {
+ value <<= 32;
+ value &= ~GITS_BASER_RO_MASK;
+ s->baser[index] &= GITS_BASER_RO_MASK | MAKE_64BIT_MASK(0, 32);
+ s->baser[index] |= value;
+ } else {
+ value &= ~GITS_BASER_RO_MASK;
+ s->baser[index] &= GITS_BASER_RO_MASK | MAKE_64BIT_MASK(32, 32);
+ s->baser[index] |= value;
+ }
+ }
+ break;
+ case GITS_IIDR:
+ case GITS_IDREGS ... GITS_IDREGS + 0x2f:
+ /* RO registers, ignore the write */
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: invalid guest write to RO register at offset "
+ TARGET_FMT_plx "\n", __func__, offset);
+ break;
+ default:
+ result = false;
+ break;
+ }
+ return result;
+}
+
+static bool its_readl(GICv3ITSState *s, hwaddr offset,
+ uint64_t *data, MemTxAttrs attrs)
+{
+ bool result = true;
+ int index;
+
+ switch (offset) {
+ case GITS_CTLR:
+ *data = s->ctlr;
+ break;
+ case GITS_IIDR:
+ *data = gicv3_iidr();
+ break;
+ case GITS_IDREGS ... GITS_IDREGS + 0x2f:
+ /* ID registers */
+ *data = gicv3_idreg(offset - GITS_IDREGS);
+ break;
+ case GITS_TYPER:
+ *data = extract64(s->typer, 0, 32);
+ break;
+ case GITS_TYPER + 4:
+ *data = extract64(s->typer, 32, 32);
+ break;
+ case GITS_CBASER:
+ *data = extract64(s->cbaser, 0, 32);
+ break;
+ case GITS_CBASER + 4:
+ *data = extract64(s->cbaser, 32, 32);
+ break;
+ case GITS_CREADR:
+ *data = extract64(s->creadr, 0, 32);
+ break;
+ case GITS_CREADR + 4:
+ *data = extract64(s->creadr, 32, 32);
+ break;
+ case GITS_CWRITER:
+ *data = extract64(s->cwriter, 0, 32);
+ break;
+ case GITS_CWRITER + 4:
+ *data = extract64(s->cwriter, 32, 32);
+ break;
+ case GITS_BASER ... GITS_BASER + 0x3f:
+ index = (offset - GITS_BASER) / 8;
+ if (offset & 7) {
+ *data = extract64(s->baser[index], 32, 32);
+ } else {
+ *data = extract64(s->baser[index], 0, 32);
+ }
+ break;
+ default:
+ result = false;
+ break;
+ }
+ return result;
+}
+
+static bool its_writell(GICv3ITSState *s, hwaddr offset,
+ uint64_t value, MemTxAttrs attrs)
+{
+ bool result = true;
+ int index;
+
+ switch (offset) {
+ case GITS_BASER ... GITS_BASER + 0x3f:
+ /*
+ * IMPDEF choice:- GITS_BASERn register becomes RO if ITS is
+ * already enabled
+ */
+ if (!(s->ctlr & ITS_CTLR_ENABLED)) {
+ index = (offset - GITS_BASER) / 8;
+ s->baser[index] &= GITS_BASER_RO_MASK;
+ s->baser[index] |= (value & ~GITS_BASER_RO_MASK);
+ }
+ break;
+ case GITS_CBASER:
+ /*
+ * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
+ * already enabled
+ */
+ if (!(s->ctlr & ITS_CTLR_ENABLED)) {
+ s->cbaser = value;
+ s->creadr = 0;
+ s->cwriter = s->creadr;
+ }
+ break;
+ case GITS_CWRITER:
+ s->cwriter = value & ~R_GITS_CWRITER_RETRY_MASK;
+ if (s->cwriter != s->creadr) {
+ process_cmdq(s);
+ }
+ break;
+ case GITS_CREADR:
+ if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) {
+ s->creadr = value & ~R_GITS_CREADR_STALLED_MASK;
+ } else {
+ /* RO register, ignore the write */
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: invalid guest write to RO register at offset "
+ TARGET_FMT_plx "\n", __func__, offset);
+ }
+ break;
+ case GITS_TYPER:
+ /* RO registers, ignore the write */
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: invalid guest write to RO register at offset "
+ TARGET_FMT_plx "\n", __func__, offset);
+ break;
+ default:
+ result = false;
+ break;
+ }
+ return result;
+}
+
+static bool its_readll(GICv3ITSState *s, hwaddr offset,
+ uint64_t *data, MemTxAttrs attrs)
+{
+ bool result = true;
+ int index;
+
+ switch (offset) {
+ case GITS_TYPER:
+ *data = s->typer;
+ break;
+ case GITS_BASER ... GITS_BASER + 0x3f:
+ index = (offset - GITS_BASER) / 8;
+ *data = s->baser[index];
+ break;
+ case GITS_CBASER:
+ *data = s->cbaser;
+ break;
+ case GITS_CREADR:
+ *data = s->creadr;
+ break;
+ case GITS_CWRITER:
+ *data = s->cwriter;
+ break;
+ default:
+ result = false;
+ break;
+ }
+ return result;
+}
+
+static MemTxResult gicv3_its_read(void *opaque, hwaddr offset, uint64_t *data,
+ unsigned size, MemTxAttrs attrs)
+{
+ GICv3ITSState *s = (GICv3ITSState *)opaque;
+ bool result;
+
+ switch (size) {
+ case 4:
+ result = its_readl(s, offset, data, attrs);
+ break;
+ case 8:
+ result = its_readll(s, offset, data, attrs);
+ break;
+ default:
+ result = false;
+ break;
+ }
+
+ if (!result) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: invalid guest read at offset " TARGET_FMT_plx
+ "size %u\n", __func__, offset, size);
+ /*
+ * The spec requires that reserved registers are RAZ/WI;
+ * so use false returns from leaf functions as a way to
+ * trigger the guest-error logging but don't return it to
+ * the caller, or we'll cause a spurious guest data abort.
+ */
+ *data = 0;
+ }
+ return MEMTX_OK;
+}
+
+static MemTxResult gicv3_its_write(void *opaque, hwaddr offset, uint64_t data,
+ unsigned size, MemTxAttrs attrs)
+{
+ GICv3ITSState *s = (GICv3ITSState *)opaque;
+ bool result;
+
+ switch (size) {
+ case 4:
+ result = its_writel(s, offset, data, attrs);
+ break;
+ case 8:
+ result = its_writell(s, offset, data, attrs);
+ break;
+ default:
+ result = false;
+ break;
+ }
+
+ if (!result) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: invalid guest write at offset " TARGET_FMT_plx
+ "size %u\n", __func__, offset, size);
+ /*
+ * The spec requires that reserved registers are RAZ/WI;
+ * so use false returns from leaf functions as a way to
+ * trigger the guest-error logging but don't return it to
+ * the caller, or we'll cause a spurious guest data abort.
+ */
+ }
+ return MEMTX_OK;
+}
+
+static const MemoryRegionOps gicv3_its_control_ops = {
+ .read_with_attrs = gicv3_its_read,
+ .write_with_attrs = gicv3_its_write,
+ .valid.min_access_size = 4,
+ .valid.max_access_size = 8,
+ .impl.min_access_size = 4,
+ .impl.max_access_size = 8,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
+static const MemoryRegionOps gicv3_its_translation_ops = {
+ .write_with_attrs = gicv3_its_translation_write,
+ .valid.min_access_size = 2,
+ .valid.max_access_size = 4,
+ .impl.min_access_size = 2,
+ .impl.max_access_size = 4,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
+static void gicv3_arm_its_realize(DeviceState *dev, Error **errp)
+{
+ GICv3ITSState *s = ARM_GICV3_ITS_COMMON(dev);
+ int i;
+
+ for (i = 0; i < s->gicv3->num_cpu; i++) {
+ if (!(s->gicv3->cpu[i].gicr_typer & GICR_TYPER_PLPIS)) {
+ error_setg(errp, "Physical LPI not supported by CPU %d", i);
+ return;
+ }
+ }
+
+ gicv3_its_init_mmio(s, &gicv3_its_control_ops, &gicv3_its_translation_ops);
+
+ address_space_init(&s->gicv3->dma_as, s->gicv3->dma,
+ "gicv3-its-sysmem");
+
+ /* set the ITS default features supported */
+ s->typer = FIELD_DP64(s->typer, GITS_TYPER, PHYSICAL,
+ GITS_TYPE_PHYSICAL);
+ s->typer = FIELD_DP64(s->typer, GITS_TYPER, ITT_ENTRY_SIZE,
+ ITS_ITT_ENTRY_SIZE - 1);
+ s->typer = FIELD_DP64(s->typer, GITS_TYPER, IDBITS, ITS_IDBITS);
+ s->typer = FIELD_DP64(s->typer, GITS_TYPER, DEVBITS, ITS_DEVBITS);
+ s->typer = FIELD_DP64(s->typer, GITS_TYPER, CIL, 1);
+ s->typer = FIELD_DP64(s->typer, GITS_TYPER, CIDBITS, ITS_CIDBITS);
+}
+
+static void gicv3_its_reset(DeviceState *dev)
+{
+ GICv3ITSState *s = ARM_GICV3_ITS_COMMON(dev);
+ GICv3ITSClass *c = ARM_GICV3_ITS_GET_CLASS(s);
+
+ c->parent_reset(dev);
+
+ /* Quiescent bit reset to 1 */
+ s->ctlr = FIELD_DP32(s->ctlr, GITS_CTLR, QUIESCENT, 1);
+
+ /*
+ * setting GITS_BASER0.Type = 0b001 (Device)
+ * GITS_BASER1.Type = 0b100 (Collection Table)
+ * GITS_BASER<n>.Type,where n = 3 to 7 are 0b00 (Unimplemented)
+ * GITS_BASER<0,1>.Page_Size = 64KB
+ * and default translation table entry size to 16 bytes
+ */
+ s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, TYPE,
+ GITS_BASER_TYPE_DEVICE);
+ s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, PAGESIZE,
+ GITS_BASER_PAGESIZE_64K);
+ s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, ENTRYSIZE,
+ GITS_DTE_SIZE - 1);
+
+ s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, TYPE,
+ GITS_BASER_TYPE_COLLECTION);
+ s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, PAGESIZE,
+ GITS_BASER_PAGESIZE_64K);
+ s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, ENTRYSIZE,
+ GITS_CTE_SIZE - 1);
+}
+
+static void gicv3_its_post_load(GICv3ITSState *s)
+{
+ if (s->ctlr & ITS_CTLR_ENABLED) {
+ extract_table_params(s);
+ extract_cmdq_params(s);
+ }
+}
+
+static Property gicv3_its_props[] = {
+ DEFINE_PROP_LINK("parent-gicv3", GICv3ITSState, gicv3, "arm-gicv3",
+ GICv3State *),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void gicv3_its_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ GICv3ITSClass *ic = ARM_GICV3_ITS_CLASS(klass);
+ GICv3ITSCommonClass *icc = ARM_GICV3_ITS_COMMON_CLASS(klass);
+
+ dc->realize = gicv3_arm_its_realize;
+ device_class_set_props(dc, gicv3_its_props);
+ device_class_set_parent_reset(dc, gicv3_its_reset, &ic->parent_reset);
+ icc->post_load = gicv3_its_post_load;
+}
+
+static const TypeInfo gicv3_its_info = {
+ .name = TYPE_ARM_GICV3_ITS,
+ .parent = TYPE_ARM_GICV3_ITS_COMMON,
+ .instance_size = sizeof(GICv3ITSState),
+ .class_init = gicv3_its_class_init,
+ .class_size = sizeof(GICv3ITSClass),
+};
+
+static void gicv3_its_register_types(void)
+{
+ type_register_static(&gicv3_its_info);
+}
+
+type_init(gicv3_its_register_types)
diff --git a/hw/intc/arm_gicv3_its_common.c b/hw/intc/arm_gicv3_its_common.c
index 66c4c6a188..7d7f3882e7 100644
--- a/hw/intc/arm_gicv3_its_common.c
+++ b/hw/intc/arm_gicv3_its_common.c
@@ -50,6 +50,8 @@ static int gicv3_its_post_load(void *opaque, int version_id)
static const VMStateDescription vmstate_its = {
.name = "arm_gicv3_its",
+ .version_id = 1,
+ .minimum_version_id = 1,
.pre_save = gicv3_its_pre_save,
.post_load = gicv3_its_post_load,
.priority = MIG_PRI_GICV3_ITS,
@@ -99,14 +101,15 @@ static const MemoryRegionOps gicv3_its_trans_ops = {
.endianness = DEVICE_NATIVE_ENDIAN,
};
-void gicv3_its_init_mmio(GICv3ITSState *s, const MemoryRegionOps *ops)
+void gicv3_its_init_mmio(GICv3ITSState *s, const MemoryRegionOps *ops,
+ const MemoryRegionOps *tops)
{
SysBusDevice *sbd = SYS_BUS_DEVICE(s);
memory_region_init_io(&s->iomem_its_cntrl, OBJECT(s), ops, s,
"control", ITS_CONTROL_SIZE);
memory_region_init_io(&s->iomem_its_translation, OBJECT(s),
- &gicv3_its_trans_ops, s,
+ tops ? tops : &gicv3_its_trans_ops, s,
"translation", ITS_TRANS_SIZE);
/* Our two regions are always adjacent, therefore we now combine them
diff --git a/hw/intc/arm_gicv3_its_kvm.c b/hw/intc/arm_gicv3_its_kvm.c
index b554d2ede0..0b4cbed28b 100644
--- a/hw/intc/arm_gicv3_its_kvm.c
+++ b/hw/intc/arm_gicv3_its_kvm.c
@@ -106,7 +106,7 @@ static void kvm_arm_its_realize(DeviceState *dev, Error **errp)
kvm_arm_register_device(&s->iomem_its_cntrl, -1, KVM_DEV_ARM_VGIC_GRP_ADDR,
KVM_VGIC_ITS_ADDR_TYPE, s->dev_fd, 0);
- gicv3_its_init_mmio(s, NULL);
+ gicv3_its_init_mmio(s, NULL, NULL);
if (!kvm_device_check_attr(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_ITS_REGS,
GITS_CTLR)) {
diff --git a/hw/intc/arm_gicv3_redist.c b/hw/intc/arm_gicv3_redist.c
index 53da703ed8..7072bfcbb1 100644
--- a/hw/intc/arm_gicv3_redist.c
+++ b/hw/intc/arm_gicv3_redist.c
@@ -248,10 +248,19 @@ static MemTxResult gicr_writel(GICv3CPUState *cs, hwaddr offset,
case GICR_CTLR:
/* For our implementation, GICR_TYPER.DPGS is 0 and so all
* the DPG bits are RAZ/WI. We don't do anything asynchronously,
- * so UWP and RWP are RAZ/WI. And GICR_TYPER.LPIS is 0 (we don't
- * implement LPIs) so Enable_LPIs is RES0. So there are no writable
- * bits for us.
+ * so UWP and RWP are RAZ/WI. GICR_TYPER.LPIS is 1 (we
+ * implement LPIs) so Enable_LPIs is programmable.
*/
+ if (cs->gicr_typer & GICR_TYPER_PLPIS) {
+ if (value & GICR_CTLR_ENABLE_LPIS) {
+ cs->gicr_ctlr |= GICR_CTLR_ENABLE_LPIS;
+ /* Check for any pending interr in pending table */
+ gicv3_redist_update_lpi(cs);
+ gicv3_redist_update(cs);
+ } else {
+ cs->gicr_ctlr &= ~GICR_CTLR_ENABLE_LPIS;
+ }
+ }
return MEMTX_OK;
case GICR_STATUSR:
/* RAZ/WI for our implementation */
@@ -526,6 +535,144 @@ MemTxResult gicv3_redist_write(void *opaque, hwaddr offset, uint64_t data,
return r;
}
+static void gicv3_redist_check_lpi_priority(GICv3CPUState *cs, int irq)
+{
+ AddressSpace *as = &cs->gic->dma_as;
+ uint64_t lpict_baddr;
+ uint8_t lpite;
+ uint8_t prio;
+
+ lpict_baddr = cs->gicr_propbaser & R_GICR_PROPBASER_PHYADDR_MASK;
+
+ address_space_read(as, lpict_baddr + ((irq - GICV3_LPI_INTID_START) *
+ sizeof(lpite)), MEMTXATTRS_UNSPECIFIED, &lpite,
+ sizeof(lpite));
+
+ if (!(lpite & LPI_CTE_ENABLED)) {
+ return;
+ }
+
+ if (cs->gic->gicd_ctlr & GICD_CTLR_DS) {
+ prio = lpite & LPI_PRIORITY_MASK;
+ } else {
+ prio = ((lpite & LPI_PRIORITY_MASK) >> 1) | 0x80;
+ }
+
+ if ((prio < cs->hpplpi.prio) ||
+ ((prio == cs->hpplpi.prio) && (irq <= cs->hpplpi.irq))) {
+ cs->hpplpi.irq = irq;
+ cs->hpplpi.prio = prio;
+ /* LPIs are always non-secure Grp1 interrupts */
+ cs->hpplpi.grp = GICV3_G1NS;
+ }
+}
+
+void gicv3_redist_update_lpi(GICv3CPUState *cs)
+{
+ /*
+ * This function scans the LPI pending table and for each pending
+ * LPI, reads the corresponding entry from LPI configuration table
+ * to extract the priority info and determine if the current LPI
+ * priority is lower than the last computed high priority lpi interrupt.
+ * If yes, replace current LPI as the new high priority lpi interrupt.
+ */
+ AddressSpace *as = &cs->gic->dma_as;
+ uint64_t lpipt_baddr;
+ uint32_t pendt_size = 0;
+ uint8_t pend;
+ int i, bit;
+ uint64_t idbits;
+
+ idbits = MIN(FIELD_EX64(cs->gicr_propbaser, GICR_PROPBASER, IDBITS),
+ GICD_TYPER_IDBITS);
+
+ if (!(cs->gicr_ctlr & GICR_CTLR_ENABLE_LPIS) || !cs->gicr_propbaser ||
+ !cs->gicr_pendbaser) {
+ return;
+ }
+
+ cs->hpplpi.prio = 0xff;
+
+ lpipt_baddr = cs->gicr_pendbaser & R_GICR_PENDBASER_PHYADDR_MASK;
+
+ /* Determine the highest priority pending interrupt among LPIs */
+ pendt_size = (1ULL << (idbits + 1));
+
+ for (i = GICV3_LPI_INTID_START / 8; i < pendt_size / 8; i++) {
+ address_space_read(as, lpipt_baddr + i, MEMTXATTRS_UNSPECIFIED, &pend,
+ sizeof(pend));
+
+ while (pend) {
+ bit = ctz32(pend);
+ gicv3_redist_check_lpi_priority(cs, i * 8 + bit);
+ pend &= ~(1 << bit);
+ }
+ }
+}
+
+void gicv3_redist_lpi_pending(GICv3CPUState *cs, int irq, int level)
+{
+ /*
+ * This function updates the pending bit in lpi pending table for
+ * the irq being activated or deactivated.
+ */
+ AddressSpace *as = &cs->gic->dma_as;
+ uint64_t lpipt_baddr;
+ bool ispend = false;
+ uint8_t pend;
+
+ /*
+ * get the bit value corresponding to this irq in the
+ * lpi pending table
+ */
+ lpipt_baddr = cs->gicr_pendbaser & R_GICR_PENDBASER_PHYADDR_MASK;
+
+ address_space_read(as, lpipt_baddr + ((irq / 8) * sizeof(pend)),
+ MEMTXATTRS_UNSPECIFIED, &pend, sizeof(pend));
+
+ ispend = extract32(pend, irq % 8, 1);
+
+ /* no change in the value of pending bit, return */
+ if (ispend == level) {
+ return;
+ }
+ pend = deposit32(pend, irq % 8, 1, level ? 1 : 0);
+
+ address_space_write(as, lpipt_baddr + ((irq / 8) * sizeof(pend)),
+ MEMTXATTRS_UNSPECIFIED, &pend, sizeof(pend));
+
+ /*
+ * check if this LPI is better than the current hpplpi, if yes
+ * just set hpplpi.prio and .irq without doing a full rescan
+ */
+ if (level) {
+ gicv3_redist_check_lpi_priority(cs, irq);
+ } else {
+ if (irq == cs->hpplpi.irq) {
+ gicv3_redist_update_lpi(cs);
+ }
+ }
+}
+
+void gicv3_redist_process_lpi(GICv3CPUState *cs, int irq, int level)
+{
+ uint64_t idbits;
+
+ idbits = MIN(FIELD_EX64(cs->gicr_propbaser, GICR_PROPBASER, IDBITS),
+ GICD_TYPER_IDBITS);
+
+ if (!(cs->gicr_ctlr & GICR_CTLR_ENABLE_LPIS) || !cs->gicr_propbaser ||
+ !cs->gicr_pendbaser || (irq > (1ULL << (idbits + 1)) - 1) ||
+ irq < GICV3_LPI_INTID_START) {
+ return;
+ }
+
+ /* set/clear the pending bit for this irq */
+ gicv3_redist_lpi_pending(cs, irq, level);
+
+ gicv3_redist_update(cs);
+}
+
void gicv3_redist_set_irq(GICv3CPUState *cs, int irq, int level)
{
/* Update redistributor state for a change in an external PPI input line */
diff --git a/hw/intc/gicv3_internal.h b/hw/intc/gicv3_internal.h
index 05303a55c8..a0369dace7 100644
--- a/hw/intc/gicv3_internal.h
+++ b/hw/intc/gicv3_internal.h
@@ -24,6 +24,7 @@
#ifndef QEMU_ARM_GICV3_INTERNAL_H
#define QEMU_ARM_GICV3_INTERNAL_H
+#include "hw/registerfields.h"
#include "hw/intc/arm_gicv3_common.h"
/* Distributor registers, as offsets from the distributor base address */
@@ -67,6 +68,11 @@
#define GICD_CTLR_E1NWF (1U << 7)
#define GICD_CTLR_RWP (1U << 31)
+#define GICD_TYPER_LPIS_SHIFT 17
+
+/* 16 bits EventId */
+#define GICD_TYPER_IDBITS 0xf
+
/*
* Redistributor frame offsets from RD_base
*/
@@ -122,17 +128,19 @@
#define GICR_WAKER_ProcessorSleep (1U << 1)
#define GICR_WAKER_ChildrenAsleep (1U << 2)
-#define GICR_PROPBASER_OUTER_CACHEABILITY_MASK (7ULL << 56)
-#define GICR_PROPBASER_ADDR_MASK (0xfffffffffULL << 12)
-#define GICR_PROPBASER_SHAREABILITY_MASK (3U << 10)
-#define GICR_PROPBASER_CACHEABILITY_MASK (7U << 7)
-#define GICR_PROPBASER_IDBITS_MASK (0x1f)
+FIELD(GICR_PROPBASER, IDBITS, 0, 5)
+FIELD(GICR_PROPBASER, INNERCACHE, 7, 3)
+FIELD(GICR_PROPBASER, SHAREABILITY, 10, 2)
+FIELD(GICR_PROPBASER, PHYADDR, 12, 40)
+FIELD(GICR_PROPBASER, OUTERCACHE, 56, 3)
+
+FIELD(GICR_PENDBASER, INNERCACHE, 7, 3)
+FIELD(GICR_PENDBASER, SHAREABILITY, 10, 2)
+FIELD(GICR_PENDBASER, PHYADDR, 16, 36)
+FIELD(GICR_PENDBASER, OUTERCACHE, 56, 3)
+FIELD(GICR_PENDBASER, PTZ, 62, 1)
-#define GICR_PENDBASER_PTZ (1ULL << 62)
-#define GICR_PENDBASER_OUTER_CACHEABILITY_MASK (7ULL << 56)
-#define GICR_PENDBASER_ADDR_MASK (0xffffffffULL << 16)
-#define GICR_PENDBASER_SHAREABILITY_MASK (3U << 10)
-#define GICR_PENDBASER_CACHEABILITY_MASK (7U << 7)
+#define GICR_PROPBASER_IDBITS_THRESHOLD 0xd
#define ICC_CTLR_EL1_CBPR (1U << 0)
#define ICC_CTLR_EL1_EOIMODE (1U << 1)
@@ -239,6 +247,163 @@
#define ICH_VTR_EL2_PREBITS_SHIFT 26
#define ICH_VTR_EL2_PRIBITS_SHIFT 29
+/* ITS Registers */
+
+FIELD(GITS_BASER, SIZE, 0, 8)
+FIELD(GITS_BASER, PAGESIZE, 8, 2)
+FIELD(GITS_BASER, SHAREABILITY, 10, 2)
+FIELD(GITS_BASER, PHYADDR, 12, 36)
+FIELD(GITS_BASER, PHYADDRL_64K, 16, 32)
+FIELD(GITS_BASER, PHYADDRH_64K, 12, 4)
+FIELD(GITS_BASER, ENTRYSIZE, 48, 5)
+FIELD(GITS_BASER, OUTERCACHE, 53, 3)
+FIELD(GITS_BASER, TYPE, 56, 3)
+FIELD(GITS_BASER, INNERCACHE, 59, 3)
+FIELD(GITS_BASER, INDIRECT, 62, 1)
+FIELD(GITS_BASER, VALID, 63, 1)
+
+FIELD(GITS_CBASER, SIZE, 0, 8)
+FIELD(GITS_CBASER, SHAREABILITY, 10, 2)
+FIELD(GITS_CBASER, PHYADDR, 12, 40)
+FIELD(GITS_CBASER, OUTERCACHE, 53, 3)
+FIELD(GITS_CBASER, INNERCACHE, 59, 3)
+FIELD(GITS_CBASER, VALID, 63, 1)
+
+FIELD(GITS_CREADR, STALLED, 0, 1)
+FIELD(GITS_CREADR, OFFSET, 5, 15)
+
+FIELD(GITS_CWRITER, RETRY, 0, 1)
+FIELD(GITS_CWRITER, OFFSET, 5, 15)
+
+FIELD(GITS_CTLR, ENABLED, 0, 1)
+FIELD(GITS_CTLR, QUIESCENT, 31, 1)
+
+FIELD(GITS_TYPER, PHYSICAL, 0, 1)
+FIELD(GITS_TYPER, ITT_ENTRY_SIZE, 4, 4)
+FIELD(GITS_TYPER, IDBITS, 8, 5)
+FIELD(GITS_TYPER, DEVBITS, 13, 5)
+FIELD(GITS_TYPER, SEIS, 18, 1)
+FIELD(GITS_TYPER, PTA, 19, 1)
+FIELD(GITS_TYPER, CIDBITS, 32, 4)
+FIELD(GITS_TYPER, CIL, 36, 1)
+
+#define GITS_IDREGS 0xFFD0
+
+#define ITS_CTLR_ENABLED (1U) /* ITS Enabled */
+
+#define GITS_BASER_RO_MASK (R_GITS_BASER_ENTRYSIZE_MASK | \
+ R_GITS_BASER_TYPE_MASK)
+
+#define GITS_BASER_PAGESIZE_4K 0
+#define GITS_BASER_PAGESIZE_16K 1
+#define GITS_BASER_PAGESIZE_64K 2
+
+#define GITS_BASER_TYPE_DEVICE 1ULL
+#define GITS_BASER_TYPE_COLLECTION 4ULL
+
+#define GITS_PAGE_SIZE_4K 0x1000
+#define GITS_PAGE_SIZE_16K 0x4000
+#define GITS_PAGE_SIZE_64K 0x10000
+
+#define L1TABLE_ENTRY_SIZE 8
+
+#define LPI_CTE_ENABLED TABLE_ENTRY_VALID_MASK
+#define LPI_PRIORITY_MASK 0xfc
+
+#define GITS_CMDQ_ENTRY_SIZE 32
+#define NUM_BYTES_IN_DW 8
+
+#define CMD_MASK 0xff
+
+/* ITS Commands */
+#define GITS_CMD_CLEAR 0x04
+#define GITS_CMD_DISCARD 0x0F
+#define GITS_CMD_INT 0x03
+#define GITS_CMD_MAPC 0x09
+#define GITS_CMD_MAPD 0x08
+#define GITS_CMD_MAPI 0x0B
+#define GITS_CMD_MAPTI 0x0A
+#define GITS_CMD_INV 0x0C
+#define GITS_CMD_INVALL 0x0D
+#define GITS_CMD_SYNC 0x05
+
+/* MAPC command fields */
+#define ICID_LENGTH 16
+#define ICID_MASK ((1U << ICID_LENGTH) - 1)
+FIELD(MAPC, RDBASE, 16, 32)
+
+#define RDBASE_PROCNUM_LENGTH 16
+#define RDBASE_PROCNUM_MASK ((1ULL << RDBASE_PROCNUM_LENGTH) - 1)
+
+/* MAPD command fields */
+#define ITTADDR_LENGTH 44
+#define ITTADDR_SHIFT 8
+#define ITTADDR_MASK MAKE_64BIT_MASK(ITTADDR_SHIFT, ITTADDR_LENGTH)
+#define SIZE_MASK 0x1f
+
+/* MAPI command fields */
+#define EVENTID_MASK ((1ULL << 32) - 1)
+
+/* MAPTI command fields */
+#define pINTID_SHIFT 32
+#define pINTID_MASK MAKE_64BIT_MASK(32, 32)
+
+#define DEVID_SHIFT 32
+#define DEVID_MASK MAKE_64BIT_MASK(32, 32)
+
+#define VALID_SHIFT 63
+#define CMD_FIELD_VALID_MASK (1ULL << VALID_SHIFT)
+#define L2_TABLE_VALID_MASK CMD_FIELD_VALID_MASK
+#define TABLE_ENTRY_VALID_MASK (1ULL << 0)
+
+/**
+ * Default features advertised by this version of ITS
+ */
+/* Physical LPIs supported */
+#define GITS_TYPE_PHYSICAL (1U << 0)
+
+/*
+ * 12 bytes Interrupt translation Table Entry size
+ * as per Table 5.3 in GICv3 spec
+ * ITE Lower 8 Bytes
+ * Bits: | 49 ... 26 | 25 ... 2 | 1 | 0 |
+ * Values: | 1023 | IntNum | IntType | Valid |
+ * ITE Higher 4 Bytes
+ * Bits: | 31 ... 16 | 15 ...0 |
+ * Values: | vPEID | ICID |
+ */
+#define ITS_ITT_ENTRY_SIZE 0xC
+#define ITE_ENTRY_INTTYPE_SHIFT 1
+#define ITE_ENTRY_INTID_SHIFT 2
+#define ITE_ENTRY_INTID_MASK MAKE_64BIT_MASK(2, 24)
+#define ITE_ENTRY_INTSP_SHIFT 26
+#define ITE_ENTRY_ICID_MASK MAKE_64BIT_MASK(0, 16)
+
+/* 16 bits EventId */
+#define ITS_IDBITS GICD_TYPER_IDBITS
+
+/* 16 bits DeviceId */
+#define ITS_DEVBITS 0xF
+
+/* 16 bits CollectionId */
+#define ITS_CIDBITS 0xF
+
+/*
+ * 8 bytes Device Table Entry size
+ * Valid = 1 bit,ITTAddr = 44 bits,Size = 5 bits
+ */
+#define GITS_DTE_SIZE (0x8ULL)
+#define GITS_DTE_ITTADDR_SHIFT 6
+#define GITS_DTE_ITTADDR_MASK MAKE_64BIT_MASK(GITS_DTE_ITTADDR_SHIFT, \
+ ITTADDR_LENGTH)
+
+/*
+ * 8 bytes Collection Table Entry size
+ * Valid = 1 bit,RDBase = 36 bits(considering max RDBASE)
+ */
+#define GITS_CTE_SIZE (0x8ULL)
+#define GITS_CTE_RDBASE_PROCNUM_MASK MAKE_64BIT_MASK(1, RDBASE_PROCNUM_LENGTH)
+
/* Special interrupt IDs */
#define INTID_SECURE 1020
#define INTID_NONSECURE 1021
@@ -296,6 +461,9 @@ MemTxResult gicv3_redist_write(void *opaque, hwaddr offset, uint64_t data,
unsigned size, MemTxAttrs attrs);
void gicv3_dist_set_irq(GICv3State *s, int irq, int level);
void gicv3_redist_set_irq(GICv3CPUState *cs, int irq, int level);
+void gicv3_redist_process_lpi(GICv3CPUState *cs, int irq, int level);
+void gicv3_redist_lpi_pending(GICv3CPUState *cs, int irq, int level);
+void gicv3_redist_update_lpi(GICv3CPUState *cs);
void gicv3_redist_send_sgi(GICv3CPUState *cs, int grp, int irq, bool ns);
void gicv3_init_cpuif(GICv3State *s);
diff --git a/hw/intc/meson.build b/hw/intc/meson.build
index 6e52a166e3..4dcfea6aa8 100644
--- a/hw/intc/meson.build
+++ b/hw/intc/meson.build
@@ -8,6 +8,7 @@ softmmu_ss.add(when: 'CONFIG_ARM_GIC', if_true: files(
'arm_gicv3_dist.c',
'arm_gicv3_its_common.c',
'arm_gicv3_redist.c',
+ 'arm_gicv3_its.c',
))
softmmu_ss.add(when: 'CONFIG_ETRAXFS', if_true: files('etraxfs_pic.c'))
softmmu_ss.add(when: 'CONFIG_HEATHROW_PIC', if_true: files('heathrow_pic.c'))
diff --git a/hw/misc/zynq_slcr.c b/hw/misc/zynq_slcr.c
index 5086e6b7ed..8b70285961 100644
--- a/hw/misc/zynq_slcr.c
+++ b/hw/misc/zynq_slcr.c
@@ -269,6 +269,21 @@ static uint64_t zynq_slcr_compute_clock(const uint64_t periods[],
zynq_slcr_compute_clock((plls), (state)->regs[reg], \
reg ## _ ## enable_field ## _SHIFT)
+static void zynq_slcr_compute_clocks_internal(ZynqSLCRState *s, uint64_t ps_clk)
+{
+ uint64_t io_pll = zynq_slcr_compute_pll(ps_clk, s->regs[R_IO_PLL_CTRL]);
+ uint64_t arm_pll = zynq_slcr_compute_pll(ps_clk, s->regs[R_ARM_PLL_CTRL]);
+ uint64_t ddr_pll = zynq_slcr_compute_pll(ps_clk, s->regs[R_DDR_PLL_CTRL]);
+
+ uint64_t uart_mux[4] = {io_pll, io_pll, arm_pll, ddr_pll};
+
+ /* compute uartX reference clocks */
+ clock_set(s->uart0_ref_clk,
+ ZYNQ_COMPUTE_CLK(s, uart_mux, R_UART_CLK_CTRL, CLKACT0));
+ clock_set(s->uart1_ref_clk,
+ ZYNQ_COMPUTE_CLK(s, uart_mux, R_UART_CLK_CTRL, CLKACT1));
+}
+
/**
* Compute and set the ouputs clocks periods.
* But do not propagate them further. Connected clocks
@@ -283,17 +298,7 @@ static void zynq_slcr_compute_clocks(ZynqSLCRState *s)
ps_clk = 0;
}
- uint64_t io_pll = zynq_slcr_compute_pll(ps_clk, s->regs[R_IO_PLL_CTRL]);
- uint64_t arm_pll = zynq_slcr_compute_pll(ps_clk, s->regs[R_ARM_PLL_CTRL]);
- uint64_t ddr_pll = zynq_slcr_compute_pll(ps_clk, s->regs[R_DDR_PLL_CTRL]);
-
- uint64_t uart_mux[4] = {io_pll, io_pll, arm_pll, ddr_pll};
-
- /* compute uartX reference clocks */
- clock_set(s->uart0_ref_clk,
- ZYNQ_COMPUTE_CLK(s, uart_mux, R_UART_CLK_CTRL, CLKACT0));
- clock_set(s->uart1_ref_clk,
- ZYNQ_COMPUTE_CLK(s, uart_mux, R_UART_CLK_CTRL, CLKACT1));
+ zynq_slcr_compute_clocks_internal(s, ps_clk);
}
/**
@@ -416,7 +421,7 @@ static void zynq_slcr_reset_hold(Object *obj)
ZynqSLCRState *s = ZYNQ_SLCR(obj);
/* will disable all output clocks */
- zynq_slcr_compute_clocks(s);
+ zynq_slcr_compute_clocks_internal(s, 0);
zynq_slcr_propagate_clocks(s);
}
@@ -425,7 +430,7 @@ static void zynq_slcr_reset_exit(Object *obj)
ZynqSLCRState *s = ZYNQ_SLCR(obj);
/* will compute output clocks according to ps_clk and registers */
- zynq_slcr_compute_clocks(s);
+ zynq_slcr_compute_clocks_internal(s, clock_get(s->ps_clk));
zynq_slcr_propagate_clocks(s);
}
diff --git a/hw/vfio/igd.c b/hw/vfio/igd.c
index 470205f487..d4685709a3 100644
--- a/hw/vfio/igd.c
+++ b/hw/vfio/igd.c
@@ -557,7 +557,7 @@ void vfio_probe_igd_bar4_quirk(VFIOPCIDevice *vdev, int nr)
* must allocate a 1MB aligned reserved memory region below 4GB with
* the requested size (in bytes) for use by the Intel PCI class VGA
* device at VM address 00:02.0. The base address of this reserved
- * memory region must be written to the device BDSM regsiter at PCI
+ * memory region must be written to the device BDSM register at PCI
* config offset 0x5C.
*/
bdsm_size = g_malloc(sizeof(*bdsm_size));
diff --git a/hw/vfio/pci-quirks.c b/hw/vfio/pci-quirks.c
index e21a6ede11..0cf69a8c6d 100644
--- a/hw/vfio/pci-quirks.c
+++ b/hw/vfio/pci-quirks.c
@@ -1356,7 +1356,7 @@ static bool vfio_radeon_smc_is_running(VFIOPCIDevice *vdev)
/*
* The scope of a config reset is controlled by a mode bit in the misc register
* and a fuse, exposed as a bit in another register. The fuse is the default
- * (0 = GFX, 1 = whole GPU), the misc bit is a toggle, with the forumula
+ * (0 = GFX, 1 = whole GPU), the misc bit is a toggle, with the formula
* scope = !(misc ^ fuse), where the resulting scope is defined the same as
* the fuse. A truth table therefore tells us that if misc == fuse, we need
* to flip the value of the bit in the misc register.
diff --git a/hw/vfio/pci.c b/hw/vfio/pci.c
index e1ea1d8a23..4feaa1cb68 100644
--- a/hw/vfio/pci.c
+++ b/hw/vfio/pci.c
@@ -1364,7 +1364,7 @@ static void vfio_pci_relocate_msix(VFIOPCIDevice *vdev, Error **errp)
* TODO: Lookup table for known devices.
*
* Logically we might use an algorithm here to select the BAR adding
- * the least additional MMIO space, but we cannot programatically
+ * the least additional MMIO space, but we cannot programmatically
* predict the driver dependency on BAR ordering or sizing, therefore
* 'auto' becomes a lookup for combinations reported to work.
*/
@@ -2158,7 +2158,7 @@ static void vfio_pci_pre_reset(VFIOPCIDevice *vdev)
}
/*
- * Stop any ongoing DMA by disconecting I/O, MMIO, and bus master.
+ * Stop any ongoing DMA by disconnecting I/O, MMIO, and bus master.
* Also put INTx Disable in known state.
*/
cmd = vfio_pci_read_config(pdev, PCI_COMMAND, 2);
@@ -2384,7 +2384,7 @@ out_single:
}
/*
- * We want to differentiate hot reset of mulitple in-use devices vs hot reset
+ * We want to differentiate hot reset of multiple in-use devices vs hot reset
* of a single in-use device. VFIO_DEVICE_RESET will already handle the case
* of doing hot resets when there is only a single device per bus. The in-use
* here refers to how many VFIODevices are affected. A hot reset that affects
diff --git a/hw/vfio/platform.c b/hw/vfio/platform.c
index cc3f66f7e4..f8f08a0f36 100644
--- a/hw/vfio/platform.c
+++ b/hw/vfio/platform.c
@@ -156,7 +156,7 @@ static void vfio_mmap_set_enabled(VFIOPlatformDevice *vdev, bool enabled)
* if there is no more active IRQ
* @opaque: actually points to the VFIO platform device
*
- * Called on mmap timer timout, this function checks whether the
+ * Called on mmap timer timeout, this function checks whether the
* IRQ is still active and if not, restores the fast path.
* by construction a single eventfd is handled at a time.
* if the IRQ is still active, the timer is re-programmed.
diff --git a/include/block/block_int.h b/include/block/block_int.h
index f1a54db0f8..5451f89b8d 100644
--- a/include/block/block_int.h
+++ b/include/block/block_int.h
@@ -34,6 +34,7 @@
#include "qemu/hbitmap.h"
#include "block/snapshot.h"
#include "qemu/throttle.h"
+#include "qemu/rcu.h"
#define BLOCK_FLAG_LAZY_REFCOUNTS 8
@@ -347,6 +348,15 @@ struct BlockDriver {
* clamped to bdrv_getlength() and aligned to request_alignment,
* as well as non-NULL pnum, map, and file; in turn, the driver
* must return an error or set pnum to an aligned non-zero value.
+ *
+ * Note that @bytes is just a hint on how big of a region the
+ * caller wants to inspect. It is not a limit on *pnum.
+ * Implementations are free to return larger values of *pnum if
+ * doing so does not incur a performance penalty.
+ *
+ * block/io.c's bdrv_co_block_status() will utilize an unclamped
+ * *pnum value for the block-status cache on protocol nodes, prior
+ * to clamping *pnum for return to its caller.
*/
int coroutine_fn (*bdrv_co_block_status)(BlockDriverState *bs,
bool want_zero, int64_t offset, int64_t bytes, int64_t *pnum,
@@ -840,11 +850,23 @@ struct BdrvChild {
};
/*
- * Note: the function bdrv_append() copies and swaps contents of
- * BlockDriverStates, so if you add new fields to this struct, please
- * inspect bdrv_append() to determine if the new fields need to be
- * copied as well.
+ * Allows bdrv_co_block_status() to cache one data region for a
+ * protocol node.
+ *
+ * @valid: Whether the cache is valid (should be accessed with atomic
+ * functions so this can be reset by RCU readers)
+ * @data_start: Offset where we know (or strongly assume) is data
+ * @data_end: Offset where the data region ends (which is not necessarily
+ * the start of a zeroed region)
*/
+typedef struct BdrvBlockStatusCache {
+ struct rcu_head rcu;
+
+ bool valid;
+ int64_t data_start;
+ int64_t data_end;
+} BdrvBlockStatusCache;
+
struct BlockDriverState {
/* Protected by big QEMU lock or read-only after opening. No special
* locking needed during I/O...
@@ -1010,6 +1032,11 @@ struct BlockDriverState {
/* BdrvChild links to this node may never be frozen */
bool never_freeze;
+
+ /* Lock for block-status cache RCU writers */
+ CoMutex bsc_modify_lock;
+ /* Always non-NULL, but must only be dereferenced under an RCU read guard */
+ BdrvBlockStatusCache *block_status_cache;
};
struct BlockBackendRootState {
@@ -1435,4 +1462,30 @@ static inline BlockDriverState *bdrv_primary_bs(BlockDriverState *bs)
*/
void bdrv_drain_all_end_quiesce(BlockDriverState *bs);
+/**
+ * Check whether the given offset is in the cached block-status data
+ * region.
+ *
+ * If it is, and @pnum is not NULL, *pnum is set to
+ * `bsc.data_end - offset`, i.e. how many bytes, starting from
+ * @offset, are data (according to the cache).
+ * Otherwise, *pnum is not touched.
+ */
+bool bdrv_bsc_is_data(BlockDriverState *bs, int64_t offset, int64_t *pnum);
+
+/**
+ * If [offset, offset + bytes) overlaps with the currently cached
+ * block-status region, invalidate the cache.
+ *
+ * (To be used by I/O paths that cause data regions to be zero or
+ * holes.)
+ */
+void bdrv_bsc_invalidate_range(BlockDriverState *bs,
+ int64_t offset, int64_t bytes);
+
+/**
+ * Mark the range [offset, offset + bytes) as a data region.
+ */
+void bdrv_bsc_fill(BlockDriverState *bs, int64_t offset, int64_t bytes);
+
#endif /* BLOCK_INT_H */
diff --git a/include/chardev/char.h b/include/chardev/char.h
index 7c0444f90d..a319b5fdff 100644
--- a/include/chardev/char.h
+++ b/include/chardev/char.h
@@ -254,26 +254,58 @@ struct ChardevClass {
bool internal; /* TODO: eventually use TYPE_USER_CREATABLE */
bool supports_yank;
+
+ /* parse command line options and populate QAPI @backend */
void (*parse)(QemuOpts *opts, ChardevBackend *backend, Error **errp);
+ /* called after construction, open/starts the backend */
void (*open)(Chardev *chr, ChardevBackend *backend,
bool *be_opened, Error **errp);
+ /* write buf to the backend */
int (*chr_write)(Chardev *s, const uint8_t *buf, int len);
+
+ /*
+ * Read from the backend (blocking). A typical front-end will instead rely
+ * on chr_can_read/chr_read being called when polling/looping.
+ */
int (*chr_sync_read)(Chardev *s, const uint8_t *buf, int len);
+
+ /* create a watch on the backend */
GSource *(*chr_add_watch)(Chardev *s, GIOCondition cond);
+
+ /* update the backend internal sources */
void (*chr_update_read_handler)(Chardev *s);
+
+ /* send an ioctl to the backend */
int (*chr_ioctl)(Chardev *s, int cmd, void *arg);
+
+ /* get ancillary-received fds during last read */
int (*get_msgfds)(Chardev *s, int* fds, int num);
+
+ /* set ancillary fds to be sent with next write */
int (*set_msgfds)(Chardev *s, int *fds, int num);
+
+ /* accept the given fd */
int (*chr_add_client)(Chardev *chr, int fd);
+
+ /* wait for a connection */
int (*chr_wait_connected)(Chardev *chr, Error **errp);
+
+ /* disconnect a connection */
void (*chr_disconnect)(Chardev *chr);
+
+ /* called by frontend when it can read */
void (*chr_accept_input)(Chardev *chr);
+
+ /* set terminal echo */
void (*chr_set_echo)(Chardev *chr, bool echo);
+
+ /* notify the backend of frontend open state */
void (*chr_set_fe_open)(Chardev *chr, int fe_open);
+
+ /* handle various events */
void (*chr_be_event)(Chardev *s, QEMUChrEvent event);
- void (*chr_options_parsed)(Chardev *chr);
};
Chardev *qemu_chardev_new(const char *id, const char *typename,
diff --git a/include/exec/translate-all.h b/include/exec/translate-all.h
index a557b4e2bb..9f646389af 100644
--- a/include/exec/translate-all.h
+++ b/include/exec/translate-all.h
@@ -33,6 +33,7 @@ void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end);
void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr);
#ifdef CONFIG_USER_ONLY
+void page_protect(tb_page_addr_t page_addr);
int page_unprotect(target_ulong address, uintptr_t pc);
#endif
diff --git a/include/exec/translator.h b/include/exec/translator.h
index d318803267..9bc46eda59 100644
--- a/include/exec/translator.h
+++ b/include/exec/translator.h
@@ -23,6 +23,7 @@
#include "exec/exec-all.h"
#include "exec/cpu_ldst.h"
#include "exec/plugin-gen.h"
+#include "exec/translate-all.h"
#include "tcg/tcg.h"
@@ -74,6 +75,17 @@ typedef struct DisasContextBase {
int num_insns;
int max_insns;
bool singlestep_enabled;
+#ifdef CONFIG_USER_ONLY
+ /*
+ * Guest address of the last byte of the last protected page.
+ *
+ * Pages containing the translated instructions are made non-writable in
+ * order to achieve consistency in case another thread is modifying the
+ * code while translate_insn() fetches the instruction bytes piecemeal.
+ * Such writer threads are blocked on mmap_lock() in page_unprotect().
+ */
+ target_ulong page_protect_end;
+#endif
} DisasContextBase;
/**
@@ -156,27 +168,23 @@ bool translator_use_goto_tb(DisasContextBase *db, target_ulong dest);
*/
#define GEN_TRANSLATOR_LD(fullname, type, load_fn, swap_fn) \
- static inline type \
- fullname ## _swap(CPUArchState *env, abi_ptr pc, bool do_swap) \
+ type fullname ## _swap(CPUArchState *env, DisasContextBase *dcbase, \
+ abi_ptr pc, bool do_swap); \
+ static inline type fullname(CPUArchState *env, \
+ DisasContextBase *dcbase, abi_ptr pc) \
{ \
- type ret = load_fn(env, pc); \
- if (do_swap) { \
- ret = swap_fn(ret); \
- } \
- plugin_insn_append(&ret, sizeof(ret)); \
- return ret; \
- } \
- \
- static inline type fullname(CPUArchState *env, abi_ptr pc) \
- { \
- return fullname ## _swap(env, pc, false); \
+ return fullname ## _swap(env, dcbase, pc, false); \
}
-GEN_TRANSLATOR_LD(translator_ldub, uint8_t, cpu_ldub_code, /* no swap */)
-GEN_TRANSLATOR_LD(translator_ldsw, int16_t, cpu_ldsw_code, bswap16)
-GEN_TRANSLATOR_LD(translator_lduw, uint16_t, cpu_lduw_code, bswap16)
-GEN_TRANSLATOR_LD(translator_ldl, uint32_t, cpu_ldl_code, bswap32)
-GEN_TRANSLATOR_LD(translator_ldq, uint64_t, cpu_ldq_code, bswap64)
+#define FOR_EACH_TRANSLATOR_LD(F) \
+ F(translator_ldub, uint8_t, cpu_ldub_code, /* no swap */) \
+ F(translator_ldsw, int16_t, cpu_ldsw_code, bswap16) \
+ F(translator_lduw, uint16_t, cpu_lduw_code, bswap16) \
+ F(translator_ldl, uint32_t, cpu_ldl_code, bswap32) \
+ F(translator_ldq, uint64_t, cpu_ldq_code, bswap64)
+
+FOR_EACH_TRANSLATOR_LD(GEN_TRANSLATOR_LD)
+
#undef GEN_TRANSLATOR_LD
#endif /* EXEC__TRANSLATOR_H */
diff --git a/include/hw/arm/virt.h b/include/hw/arm/virt.h
index 9661c46699..b461b8d261 100644
--- a/include/hw/arm/virt.h
+++ b/include/hw/arm/virt.h
@@ -120,6 +120,7 @@ struct VirtMachineClass {
MachineClass parent;
bool disallow_affinity_adjustment;
bool no_its;
+ bool no_tcg_its;
bool no_pmu;
bool claim_edge_triggered_timers;
bool smbios_old_sys_ver;
@@ -141,6 +142,7 @@ struct VirtMachineState {
bool highmem;
bool highmem_ecam;
bool its;
+ bool tcg_its;
bool virt;
bool ras;
bool mte;
diff --git a/include/hw/core/tcg-cpu-ops.h b/include/hw/core/tcg-cpu-ops.h
index eab27d0c03..55123cb4d2 100644
--- a/include/hw/core/tcg-cpu-ops.h
+++ b/include/hw/core/tcg-cpu-ops.h
@@ -35,16 +35,6 @@ struct TCGCPUOps {
void (*cpu_exec_enter)(CPUState *cpu);
/** @cpu_exec_exit: Callback for cpu_exec cleanup */
void (*cpu_exec_exit)(CPUState *cpu);
- /** @cpu_exec_interrupt: Callback for processing interrupts in cpu_exec */
- bool (*cpu_exec_interrupt)(CPUState *cpu, int interrupt_request);
- /**
- * @do_interrupt: Callback for interrupt handling.
- *
- * note that this is in general SOFTMMU only, but it actually isn't
- * because of an x86 hack (accel/tcg/cpu-exec.c), so we cannot put it
- * in the SOFTMMU section in general.
- */
- void (*do_interrupt)(CPUState *cpu);
/**
* @tlb_fill: Handle a softmmu tlb miss or user-only address fault
*
@@ -61,7 +51,23 @@ struct TCGCPUOps {
void (*debug_excp_handler)(CPUState *cpu);
#ifdef NEED_CPU_H
+#if defined(CONFIG_USER_ONLY) && defined(TARGET_I386)
+ /**
+ * @fake_user_interrupt: Callback for 'fake exception' handling.
+ *
+ * Simulate 'fake exception' which will be handled outside the
+ * cpu execution loop (hack for x86 user mode).
+ */
+ void (*fake_user_interrupt)(CPUState *cpu);
+#else
+ /**
+ * @do_interrupt: Callback for interrupt handling.
+ */
+ void (*do_interrupt)(CPUState *cpu);
+#endif /* !CONFIG_USER_ONLY || !TARGET_I386 */
#ifdef CONFIG_SOFTMMU
+ /** @cpu_exec_interrupt: Callback for processing interrupts in cpu_exec */
+ bool (*cpu_exec_interrupt)(CPUState *cpu, int interrupt_request);
/**
* @do_transaction_failed: Callback for handling failed memory transactions
* (ie bus faults or external aborts; not MMU faults)
diff --git a/include/hw/intc/arm_gicv3_common.h b/include/hw/intc/arm_gicv3_common.h
index 91491a2f66..aa4f0d6770 100644
--- a/include/hw/intc/arm_gicv3_common.h
+++ b/include/hw/intc/arm_gicv3_common.h
@@ -36,6 +36,8 @@
#define GICV3_MAXIRQ 1020
#define GICV3_MAXSPI (GICV3_MAXIRQ - GIC_INTERNAL)
+#define GICV3_LPI_INTID_START 8192
+
#define GICV3_REDIST_SIZE 0x20000
/* Number of SGI target-list bits */
@@ -202,6 +204,13 @@ struct GICv3CPUState {
* real state above; it doesn't need to be migrated.
*/
PendingIrq hppi;
+
+ /*
+ * Cached information recalculated from LPI tables
+ * in guest memory
+ */
+ PendingIrq hpplpi;
+
/* This is temporary working state, to avoid a malloc in gicv3_update() */
bool seenbetter;
};
@@ -219,6 +228,7 @@ struct GICv3State {
uint32_t num_cpu;
uint32_t num_irq;
uint32_t revision;
+ bool lpi_enable;
bool security_extn;
bool irq_reset_nonsecure;
bool gicd_no_migration_shift_bug;
@@ -226,6 +236,9 @@ struct GICv3State {
int dev_fd; /* kvm device fd if backed by kvm vgic support */
Error *migration_blocker;
+ MemoryRegion *dma;
+ AddressSpace dma_as;
+
/* Distributor */
/* for a GIC with the security extensions the NS banked version of this
diff --git a/include/hw/intc/arm_gicv3_its_common.h b/include/hw/intc/arm_gicv3_its_common.h
index 5a0952b404..4e79145dde 100644
--- a/include/hw/intc/arm_gicv3_its_common.h
+++ b/include/hw/intc/arm_gicv3_its_common.h
@@ -25,17 +25,41 @@
#include "hw/intc/arm_gicv3_common.h"
#include "qom/object.h"
+#define TYPE_ARM_GICV3_ITS "arm-gicv3-its"
+
#define ITS_CONTROL_SIZE 0x10000
#define ITS_TRANS_SIZE 0x10000
#define ITS_SIZE (ITS_CONTROL_SIZE + ITS_TRANS_SIZE)
#define GITS_CTLR 0x0
#define GITS_IIDR 0x4
+#define GITS_TYPER 0x8
#define GITS_CBASER 0x80
#define GITS_CWRITER 0x88
#define GITS_CREADR 0x90
#define GITS_BASER 0x100
+#define GITS_TRANSLATER 0x0040
+
+typedef struct {
+ bool valid;
+ bool indirect;
+ uint16_t entry_sz;
+ uint32_t page_sz;
+ uint32_t max_entries;
+ union {
+ uint32_t max_devids;
+ uint32_t max_collids;
+ } maxids;
+ uint64_t base_addr;
+} TableDesc;
+
+typedef struct {
+ bool valid;
+ uint32_t max_entries;
+ uint64_t base_addr;
+} CmdQDesc;
+
struct GICv3ITSState {
SysBusDevice parent_obj;
@@ -52,17 +76,23 @@ struct GICv3ITSState {
/* Registers */
uint32_t ctlr;
uint32_t iidr;
+ uint64_t typer;
uint64_t cbaser;
uint64_t cwriter;
uint64_t creadr;
uint64_t baser[8];
+ TableDesc dt;
+ TableDesc ct;
+ CmdQDesc cq;
+
Error *migration_blocker;
};
typedef struct GICv3ITSState GICv3ITSState;
-void gicv3_its_init_mmio(GICv3ITSState *s, const MemoryRegionOps *ops);
+void gicv3_its_init_mmio(GICv3ITSState *s, const MemoryRegionOps *ops,
+ const MemoryRegionOps *tops);
#define TYPE_ARM_GICV3_ITS_COMMON "arm-gicv3-its-common"
typedef struct GICv3ITSCommonClass GICv3ITSCommonClass;
diff --git a/include/hw/qdev-core.h b/include/hw/qdev-core.h
index bafc311bfa..34c8a7506a 100644
--- a/include/hw/qdev-core.h
+++ b/include/hw/qdev-core.h
@@ -264,6 +264,7 @@ struct BusState {
HotplugHandler *hotplug_handler;
int max_index;
bool realized;
+ bool full;
int num_children;
/*
@@ -597,6 +598,10 @@ void qdev_init_gpio_in(DeviceState *dev, qemu_irq_handler handler, int n);
*
* See qdev_connect_gpio_out() for how code that uses such a device
* can connect to one of its output GPIO lines.
+ *
+ * There is no need to release the @pins allocated array because it
+ * will be automatically released when @dev calls its instance_finalize()
+ * handler.
*/
void qdev_init_gpio_out(DeviceState *dev, qemu_irq *pins, int n);
/**
@@ -798,6 +803,29 @@ static inline bool qbus_is_hotpluggable(BusState *bus)
return bus->hotplug_handler;
}
+/**
+ * qbus_mark_full: Mark this bus as full, so no more devices can be attached
+ * @bus: Bus to mark as full
+ *
+ * By default, QEMU will allow devices to be plugged into a bus up
+ * to the bus class's device count limit. Calling this function
+ * marks a particular bus as full, so that no more devices can be
+ * plugged into it. In particular this means that the bus will not
+ * be considered as a candidate for plugging in devices created by
+ * the user on the commandline or via the monitor.
+ * If a machine has multiple buses of a given type, such as I2C,
+ * where some of those buses in the real hardware are used only for
+ * internal devices and some are exposed via expansion ports, you
+ * can use this function to mark the internal-only buses as full
+ * after you have created all their internal devices. Then user
+ * created devices will appear on the expansion-port bus where
+ * guest software expects them.
+ */
+static inline void qbus_mark_full(BusState *bus)
+{
+ bus->full = true;
+}
+
void device_listener_register(DeviceListener *listener);
void device_listener_unregister(DeviceListener *listener);
diff --git a/include/tcg/tcg-op.h b/include/tcg/tcg-op.h
index 2a654f350c..0545a6224c 100644
--- a/include/tcg/tcg-op.h
+++ b/include/tcg/tcg-op.h
@@ -843,7 +843,6 @@ static inline void tcg_gen_plugin_cb_end(void)
#if TARGET_LONG_BITS == 32
#define tcg_temp_new() tcg_temp_new_i32()
-#define tcg_global_reg_new tcg_global_reg_new_i32
#define tcg_global_mem_new tcg_global_mem_new_i32
#define tcg_temp_local_new() tcg_temp_local_new_i32()
#define tcg_temp_free tcg_temp_free_i32
@@ -851,7 +850,6 @@ static inline void tcg_gen_plugin_cb_end(void)
#define tcg_gen_qemu_st_tl tcg_gen_qemu_st_i32
#else
#define tcg_temp_new() tcg_temp_new_i64()
-#define tcg_global_reg_new tcg_global_reg_new_i64
#define tcg_global_mem_new tcg_global_mem_new_i64
#define tcg_temp_local_new() tcg_temp_local_new_i64()
#define tcg_temp_free tcg_temp_free_i64
diff --git a/include/ui/console.h b/include/ui/console.h
index 3be21497a2..244664d727 100644
--- a/include/ui/console.h
+++ b/include/ui/console.h
@@ -168,6 +168,9 @@ typedef struct QemuDmaBuf {
uint64_t modifier;
uint32_t texture;
bool y0_top;
+ void *sync;
+ int fence_fd;
+ bool allow_fences;
} QemuDmaBuf;
typedef struct DisplayState DisplayState;
diff --git a/include/ui/egl-helpers.h b/include/ui/egl-helpers.h
index f1bf8f97fc..2fb6e0dd6b 100644
--- a/include/ui/egl-helpers.h
+++ b/include/ui/egl-helpers.h
@@ -19,6 +19,7 @@ typedef struct egl_fb {
GLuint texture;
GLuint framebuffer;
bool delete_texture;
+ QemuDmaBuf *dmabuf;
} egl_fb;
void egl_fb_destroy(egl_fb *fb);
@@ -45,6 +46,8 @@ int egl_get_fd_for_texture(uint32_t tex_id, EGLint *stride, EGLint *fourcc,
void egl_dmabuf_import_texture(QemuDmaBuf *dmabuf);
void egl_dmabuf_release_texture(QemuDmaBuf *dmabuf);
+void egl_dmabuf_create_sync(QemuDmaBuf *dmabuf);
+void egl_dmabuf_create_fence(QemuDmaBuf *dmabuf);
#endif
diff --git a/include/ui/gtk.h b/include/ui/gtk.h
index 7835ef1a71..7d22affd38 100644
--- a/include/ui/gtk.h
+++ b/include/ui/gtk.h
@@ -155,6 +155,7 @@ extern bool gtk_use_gl_area;
/* ui/gtk.c */
void gd_update_windowsize(VirtualConsole *vc);
int gd_monitor_update_interval(GtkWidget *widget);
+void gd_hw_gl_flushed(void *vc);
/* ui/gtk-egl.c */
void gd_egl_init(VirtualConsole *vc);
@@ -181,8 +182,8 @@ void gd_egl_cursor_dmabuf(DisplayChangeListener *dcl,
uint32_t hot_x, uint32_t hot_y);
void gd_egl_cursor_position(DisplayChangeListener *dcl,
uint32_t pos_x, uint32_t pos_y);
-void gd_egl_release_dmabuf(DisplayChangeListener *dcl,
- QemuDmaBuf *dmabuf);
+void gd_egl_flush(DisplayChangeListener *dcl,
+ uint32_t x, uint32_t y, uint32_t w, uint32_t h);
void gd_egl_scanout_flush(DisplayChangeListener *dcl,
uint32_t x, uint32_t y, uint32_t w, uint32_t h);
void gtk_egl_init(DisplayGLMode mode);
diff --git a/linux-user/main.c b/linux-user/main.c
index 5ce17e423d..16def5215d 100644
--- a/linux-user/main.c
+++ b/linux-user/main.c
@@ -125,13 +125,6 @@ const char *qemu_uname_release;
by remapping the process stack directly at the right place */
unsigned long guest_stack_size = 8 * 1024 * 1024UL;
-#if defined(TARGET_I386)
-int cpu_get_pic_interrupt(CPUX86State *env)
-{
- return -1;
-}
-#endif
-
/***********************************************************/
/* Helper routines for implementing atomic operations. */
diff --git a/meson.build b/meson.build
index 7d7d14a4bc..2711cbb789 100644
--- a/meson.build
+++ b/meson.build
@@ -78,7 +78,7 @@ endif
accelerator_targets = { 'CONFIG_KVM': kvm_targets }
if cpu in ['x86', 'x86_64', 'arm', 'aarch64']
- # i368 emulator provides xenpv machine type for multiple architectures
+ # i386 emulator provides xenpv machine type for multiple architectures
accelerator_targets += {
'CONFIG_XEN': ['i386-softmmu', 'x86_64-softmmu'],
}
diff --git a/qemu-img-cmds.hx b/qemu-img-cmds.hx
index b3620f29e5..4c4d94ab22 100644
--- a/qemu-img-cmds.hx
+++ b/qemu-img-cmds.hx
@@ -46,7 +46,7 @@ SRST
ERST
DEF("convert", img_convert,
- "convert [--object objectdef] [--image-opts] [--target-image-opts] [--target-is-zero] [--bitmaps] [-U] [-C] [-c] [-p] [-q] [-n] [-f fmt] [-t cache] [-T src_cache] [-O output_fmt] [-B backing_file] [-o options] [-l snapshot_param] [-S sparse_size] [-r rate_limit] [-m num_coroutines] [-W] [--salvage] filename [filename2 [...]] output_filename")
+ "convert [--object objectdef] [--image-opts] [--target-image-opts] [--target-is-zero] [--bitmaps] [-U] [-C] [-c] [-p] [-q] [-n] [-f fmt] [-t cache] [-T src_cache] [-O output_fmt] [-B backing_file [-F backing_fmt]] [-o options] [-l snapshot_param] [-S sparse_size] [-r rate_limit] [-m num_coroutines] [-W] [--salvage] filename [filename2 [...]] output_filename")
SRST
.. option:: convert [--object OBJECTDEF] [--image-opts] [--target-image-opts] [--target-is-zero] [--bitmaps] [-U] [-C] [-c] [-p] [-q] [-n] [-f FMT] [-t CACHE] [-T SRC_CACHE] [-O OUTPUT_FMT] [-B BACKING_FILE] [-o OPTIONS] [-l SNAPSHOT_PARAM] [-S SPARSE_SIZE] [-r RATE_LIMIT] [-m NUM_COROUTINES] [-W] [--salvage] FILENAME [FILENAME2 [...]] OUTPUT_FILENAME
ERST
diff --git a/qemu-img.c b/qemu-img.c
index d77f3e76a9..f036a1d428 100644
--- a/qemu-img.c
+++ b/qemu-img.c
@@ -2183,7 +2183,8 @@ static int img_convert(int argc, char **argv)
int c, bs_i, flags, src_flags = BDRV_O_NO_SHARE;
const char *fmt = NULL, *out_fmt = NULL, *cache = "unsafe",
*src_cache = BDRV_DEFAULT_CACHE, *out_baseimg = NULL,
- *out_filename, *out_baseimg_param, *snapshot_name = NULL;
+ *out_filename, *out_baseimg_param, *snapshot_name = NULL,
+ *backing_fmt = NULL;
BlockDriver *drv = NULL, *proto_drv = NULL;
BlockDriverInfo bdi;
BlockDriverState *out_bs;
@@ -2223,7 +2224,7 @@ static int img_convert(int argc, char **argv)
{"skip-broken-bitmaps", no_argument, 0, OPTION_SKIP_BROKEN},
{0, 0, 0, 0}
};
- c = getopt_long(argc, argv, ":hf:O:B:Cco:l:S:pt:T:qnm:WUr:",
+ c = getopt_long(argc, argv, ":hf:O:B:CcF:o:l:S:pt:T:qnm:WUr:",
long_options, NULL);
if (c == -1) {
break;
@@ -2253,6 +2254,9 @@ static int img_convert(int argc, char **argv)
case 'c':
s.compressed = true;
break;
+ case 'F':
+ backing_fmt = optarg;
+ break;
case 'o':
if (accumulate_options(&options, optarg) < 0) {
goto fail_getopt;
@@ -2521,7 +2525,7 @@ static int img_convert(int argc, char **argv)
qemu_opt_set_number(opts, BLOCK_OPT_SIZE,
s.total_sectors * BDRV_SECTOR_SIZE, &error_abort);
- ret = add_old_style_options(out_fmt, opts, out_baseimg, NULL);
+ ret = add_old_style_options(out_fmt, opts, out_baseimg, backing_fmt);
if (ret < 0) {
goto out;
}
@@ -2628,6 +2632,14 @@ static int img_convert(int argc, char **argv)
goto out;
}
+ if (flags & BDRV_O_NOCACHE) {
+ /*
+ * If we open the target with O_DIRECT, it may be necessary to
+ * extend its size to align to the physical sector size.
+ */
+ flags |= BDRV_O_RESIZE;
+ }
+
if (skip_create) {
s.target = img_open(tgt_image_opts, out_filename, out_fmt,
flags, writethrough, s.quiet, false);
diff --git a/scripts/simplebench/img_bench_templater.py b/scripts/simplebench/img_bench_templater.py
new file mode 100755
index 0000000000..f8e1540ada
--- /dev/null
+++ b/scripts/simplebench/img_bench_templater.py
@@ -0,0 +1,95 @@
+#!/usr/bin/env python3
+#
+# Process img-bench test templates
+#
+# Copyright (c) 2021 Virtuozzo International GmbH.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+
+import sys
+import subprocess
+import re
+import json
+
+import simplebench
+from results_to_text import results_to_text
+from table_templater import Templater
+
+
+def bench_func(env, case):
+ test = templater.gen(env['data'], case['data'])
+
+ p = subprocess.run(test, shell=True, stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT, universal_newlines=True)
+
+ if p.returncode == 0:
+ try:
+ m = re.search(r'Run completed in (\d+.\d+) seconds.', p.stdout)
+ return {'seconds': float(m.group(1))}
+ except Exception:
+ return {'error': f'failed to parse qemu-img output: {p.stdout}'}
+ else:
+ return {'error': f'qemu-img failed: {p.returncode}: {p.stdout}'}
+
+
+if __name__ == '__main__':
+ if len(sys.argv) > 1:
+ print("""
+Usage: img_bench_templater.py < path/to/test-template.sh
+
+This script generates performance tests from a test template (example below),
+runs them, and displays the results in a table. The template is read from
+stdin. It must be written in bash and end with a `qemu-img bench` invocation
+(whose result is parsed to get the test instance’s result).
+
+Use the following syntax in the template to create the various different test
+instances:
+
+ column templating: {var1|var2|...} - test will use different values in
+ different columns. You may use several {} constructions in the test, in this
+ case product of all choice-sets will be used.
+
+ row templating: [var1|var2|...] - similar thing to define rows (test-cases)
+
+Test template example:
+
+Assume you want to compare two qemu-img binaries, called qemu-img-old and
+qemu-img-new in your build directory in two test-cases with 4K writes and 64K
+writes. The template may look like this:
+
+qemu_img=/path/to/qemu/build/qemu-img-{old|new}
+$qemu_img create -f qcow2 /ssd/x.qcow2 1G
+$qemu_img bench -c 100 -d 8 [-s 4K|-s 64K] -w -t none -n /ssd/x.qcow2
+
+When passing this to stdin of img_bench_templater.py, the resulting comparison
+table will contain two columns (for two binaries) and two rows (for two
+test-cases).
+
+In addition to displaying the results, script also stores results in JSON
+format into results.json file in current directory.
+""")
+ sys.exit()
+
+ templater = Templater(sys.stdin.read())
+
+ envs = [{'id': ' / '.join(x), 'data': x} for x in templater.columns]
+ cases = [{'id': ' / '.join(x), 'data': x} for x in templater.rows]
+
+ result = simplebench.bench(bench_func, envs, cases, count=5,
+ initial_run=False)
+ print(results_to_text(result))
+ with open('results.json', 'w') as f:
+ json.dump(result, f, indent=4)
diff --git a/scripts/simplebench/table_templater.py b/scripts/simplebench/table_templater.py
new file mode 100644
index 0000000000..950f3b3024
--- /dev/null
+++ b/scripts/simplebench/table_templater.py
@@ -0,0 +1,62 @@
+# Parser for test templates
+#
+# Copyright (c) 2021 Virtuozzo International GmbH.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import itertools
+from lark import Lark
+
+grammar = """
+start: ( text | column_switch | row_switch )+
+
+column_switch: "{" text ["|" text]+ "}"
+row_switch: "[" text ["|" text]+ "]"
+text: /[^|{}\[\]]+/
+"""
+
+parser = Lark(grammar)
+
+class Templater:
+ def __init__(self, template):
+ self.tree = parser.parse(template)
+
+ c_switches = []
+ r_switches = []
+ for x in self.tree.children:
+ if x.data == 'column_switch':
+ c_switches.append([el.children[0].value for el in x.children])
+ elif x.data == 'row_switch':
+ r_switches.append([el.children[0].value for el in x.children])
+
+ self.columns = list(itertools.product(*c_switches))
+ self.rows = list(itertools.product(*r_switches))
+
+ def gen(self, column, row):
+ i = 0
+ j = 0
+ result = []
+
+ for x in self.tree.children:
+ if x.data == 'text':
+ result.append(x.children[0].value)
+ elif x.data == 'column_switch':
+ result.append(column[i])
+ i += 1
+ elif x.data == 'row_switch':
+ result.append(row[j])
+ j += 1
+
+ return ''.join(result)
diff --git a/softmmu/qdev-monitor.c b/softmmu/qdev-monitor.c
index a304754ab9..0705f00846 100644
--- a/softmmu/qdev-monitor.c
+++ b/softmmu/qdev-monitor.c
@@ -435,7 +435,12 @@ static DeviceState *qbus_find_dev(BusState *bus, char *elem)
static inline bool qbus_is_full(BusState *bus)
{
- BusClass *bus_class = BUS_GET_CLASS(bus);
+ BusClass *bus_class;
+
+ if (bus->full) {
+ return true;
+ }
+ bus_class = BUS_GET_CLASS(bus);
return bus_class->max_dev && bus->num_children >= bus_class->max_dev;
}
diff --git a/target/alpha/cpu.c b/target/alpha/cpu.c
index 4871ad0c0a..93e16a2ffb 100644
--- a/target/alpha/cpu.c
+++ b/target/alpha/cpu.c
@@ -218,10 +218,10 @@ static const struct SysemuCPUOps alpha_sysemu_ops = {
static const struct TCGCPUOps alpha_tcg_ops = {
.initialize = alpha_translate_init,
- .cpu_exec_interrupt = alpha_cpu_exec_interrupt,
.tlb_fill = alpha_cpu_tlb_fill,
#ifndef CONFIG_USER_ONLY
+ .cpu_exec_interrupt = alpha_cpu_exec_interrupt,
.do_interrupt = alpha_cpu_do_interrupt,
.do_transaction_failed = alpha_cpu_do_transaction_failed,
.do_unaligned_access = alpha_cpu_do_unaligned_access,
diff --git a/target/alpha/cpu.h b/target/alpha/cpu.h
index 82df108967..4e993bd15b 100644
--- a/target/alpha/cpu.h
+++ b/target/alpha/cpu.h
@@ -274,10 +274,10 @@ struct AlphaCPU {
#ifndef CONFIG_USER_ONLY
extern const VMStateDescription vmstate_alpha_cpu;
-#endif
void alpha_cpu_do_interrupt(CPUState *cpu);
bool alpha_cpu_exec_interrupt(CPUState *cpu, int int_req);
+#endif /* !CONFIG_USER_ONLY */
void alpha_cpu_dump_state(CPUState *cs, FILE *f, int flags);
hwaddr alpha_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
int alpha_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
diff --git a/target/alpha/helper.c b/target/alpha/helper.c
index 4f56fe4d23..81550d9e2f 100644
--- a/target/alpha/helper.c
+++ b/target/alpha/helper.c
@@ -293,7 +293,6 @@ bool alpha_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
prot, mmu_idx, TARGET_PAGE_SIZE);
return true;
}
-#endif /* USER_ONLY */
void alpha_cpu_do_interrupt(CPUState *cs)
{
@@ -348,7 +347,6 @@ void alpha_cpu_do_interrupt(CPUState *cs)
cs->exception_index = -1;
-#if !defined(CONFIG_USER_ONLY)
switch (i) {
case EXCP_RESET:
i = 0x0000;
@@ -404,7 +402,6 @@ void alpha_cpu_do_interrupt(CPUState *cs)
/* Switch to PALmode. */
env->flags |= ENV_FLAG_PAL_MODE;
-#endif /* !USER_ONLY */
}
bool alpha_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
@@ -451,6 +448,8 @@ bool alpha_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
return false;
}
+#endif /* !CONFIG_USER_ONLY */
+
void alpha_cpu_dump_state(CPUState *cs, FILE *f, int flags)
{
static const char linux_reg_names[31][4] = {
diff --git a/target/alpha/translate.c b/target/alpha/translate.c
index de6c0a8439..b034206688 100644
--- a/target/alpha/translate.c
+++ b/target/alpha/translate.c
@@ -2971,7 +2971,7 @@ static void alpha_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
{
DisasContext *ctx = container_of(dcbase, DisasContext, base);
CPUAlphaState *env = cpu->env_ptr;
- uint32_t insn = translator_ldl(env, ctx->base.pc_next);
+ uint32_t insn = translator_ldl(env, &ctx->base, ctx->base.pc_next);
ctx->base.pc_next += 4;
ctx->base.is_jmp = translate_one(ctx, insn);
diff --git a/target/arm/arm_ldst.h b/target/arm/arm_ldst.h
index 057160e8da..cee0548a1c 100644
--- a/target/arm/arm_ldst.h
+++ b/target/arm/arm_ldst.h
@@ -24,15 +24,15 @@
#include "qemu/bswap.h"
/* Load an instruction and return it in the standard little-endian order */
-static inline uint32_t arm_ldl_code(CPUARMState *env, target_ulong addr,
- bool sctlr_b)
+static inline uint32_t arm_ldl_code(CPUARMState *env, DisasContextBase *s,
+ target_ulong addr, bool sctlr_b)
{
- return translator_ldl_swap(env, addr, bswap_code(sctlr_b));
+ return translator_ldl_swap(env, s, addr, bswap_code(sctlr_b));
}
/* Ditto, for a halfword (Thumb) instruction */
-static inline uint16_t arm_lduw_code(CPUARMState *env, target_ulong addr,
- bool sctlr_b)
+static inline uint16_t arm_lduw_code(CPUARMState *env, DisasContextBase* s,
+ target_ulong addr, bool sctlr_b)
{
#ifndef CONFIG_USER_ONLY
/* In big-endian (BE32) mode, adjacent Thumb instructions have been swapped
@@ -41,7 +41,7 @@ static inline uint16_t arm_lduw_code(CPUARMState *env, target_ulong addr,
addr ^= 2;
}
#endif
- return translator_lduw_swap(env, addr, bswap_code(sctlr_b));
+ return translator_lduw_swap(env, s, addr, bswap_code(sctlr_b));
}
#endif
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
index d631c4683c..ba0741b20e 100644
--- a/target/arm/cpu.c
+++ b/target/arm/cpu.c
@@ -440,6 +440,8 @@ static void arm_cpu_reset(DeviceState *dev)
arm_rebuild_hflags(env);
}
+#ifndef CONFIG_USER_ONLY
+
static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
unsigned int target_el,
unsigned int cur_el, bool secure,
@@ -556,7 +558,7 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
return unmasked || pstate_unmasked;
}
-bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
+static bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
{
CPUClass *cc = CPU_GET_CLASS(cs);
CPUARMState *env = cs->env_ptr;
@@ -608,6 +610,7 @@ bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
cc->tcg_ops->do_interrupt(cs);
return true;
}
+#endif /* !CONFIG_USER_ONLY */
void arm_cpu_update_virq(ARMCPU *cpu)
{
@@ -2010,11 +2013,11 @@ static const struct SysemuCPUOps arm_sysemu_ops = {
static const struct TCGCPUOps arm_tcg_ops = {
.initialize = arm_translate_init,
.synchronize_from_tb = arm_cpu_synchronize_from_tb,
- .cpu_exec_interrupt = arm_cpu_exec_interrupt,
.tlb_fill = arm_cpu_tlb_fill,
.debug_excp_handler = arm_debug_excp_handler,
#if !defined(CONFIG_USER_ONLY)
+ .cpu_exec_interrupt = arm_cpu_exec_interrupt,
.do_interrupt = arm_cpu_do_interrupt,
.do_transaction_failed = arm_cpu_do_transaction_failed,
.do_unaligned_access = arm_cpu_do_unaligned_access,
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
index 6a987f65e4..09d9027734 100644
--- a/target/arm/cpu.h
+++ b/target/arm/cpu.h
@@ -1040,11 +1040,10 @@ uint64_t arm_cpu_mp_affinity(int idx, uint8_t clustersz);
#ifndef CONFIG_USER_ONLY
extern const VMStateDescription vmstate_arm_cpu;
-#endif
void arm_cpu_do_interrupt(CPUState *cpu);
void arm_v7m_cpu_do_interrupt(CPUState *cpu);
-bool arm_cpu_exec_interrupt(CPUState *cpu, int int_req);
+#endif /* !CONFIG_USER_ONLY */
hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr,
MemTxAttrs *attrs);
@@ -3455,6 +3454,7 @@ FIELD(TBFLAG_ANY, FPEXC_EL, 8, 2)
FIELD(TBFLAG_ANY, DEBUG_TARGET_EL, 10, 2)
/* Memory operations require alignment: SCTLR_ELx.A or CCR.UNALIGN_TRP */
FIELD(TBFLAG_ANY, ALIGN_MEM, 12, 1)
+FIELD(TBFLAG_ANY, PSTATE__IL, 13, 1)
/*
* Bit usage when in AArch32 state, both A- and M-profile.
diff --git a/target/arm/cpu_tcg.c b/target/arm/cpu_tcg.c
index 33cc75af57..0d5adccf1a 100644
--- a/target/arm/cpu_tcg.c
+++ b/target/arm/cpu_tcg.c
@@ -22,7 +22,7 @@
/* CPU models. These are not needed for the AArch64 linux-user build. */
#if !defined(CONFIG_USER_ONLY) || !defined(TARGET_AARCH64)
-#ifdef CONFIG_TCG
+#if !defined(CONFIG_USER_ONLY) && defined(CONFIG_TCG)
static bool arm_v7m_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
{
CPUClass *cc = CPU_GET_CLASS(cs);
@@ -46,7 +46,7 @@ static bool arm_v7m_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
}
return ret;
}
-#endif /* CONFIG_TCG */
+#endif /* !CONFIG_USER_ONLY && CONFIG_TCG */
static void arm926_initfn(Object *obj)
{
@@ -898,11 +898,11 @@ static void pxa270c5_initfn(Object *obj)
static const struct TCGCPUOps arm_v7m_tcg_ops = {
.initialize = arm_translate_init,
.synchronize_from_tb = arm_cpu_synchronize_from_tb,
- .cpu_exec_interrupt = arm_v7m_cpu_exec_interrupt,
.tlb_fill = arm_cpu_tlb_fill,
.debug_excp_handler = arm_debug_excp_handler,
#if !defined(CONFIG_USER_ONLY)
+ .cpu_exec_interrupt = arm_v7m_cpu_exec_interrupt,
.do_interrupt = arm_v7m_cpu_do_interrupt,
.do_transaction_failed = arm_cpu_do_transaction_failed,
.do_unaligned_access = arm_cpu_do_unaligned_access,
diff --git a/target/arm/helper-a64.c b/target/arm/helper-a64.c
index 26f79f9141..19445b3c94 100644
--- a/target/arm/helper-a64.c
+++ b/target/arm/helper-a64.c
@@ -1071,6 +1071,7 @@ illegal_return:
if (!arm_singlestep_active(env)) {
env->pstate &= ~PSTATE_SS;
}
+ helper_rebuild_hflags_a64(env, cur_el);
qemu_log_mask(LOG_GUEST_ERROR, "Illegal exception return at EL%d: "
"resuming execution at 0x%" PRIx64 "\n", cur_el, env->pc);
}
diff --git a/target/arm/helper.c b/target/arm/helper.c
index a7ae78146d..b210da2bc2 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -13462,6 +13462,10 @@ static CPUARMTBFlags rebuild_hflags_a32(CPUARMState *env, int fp_el,
DP_TBFLAG_A32(flags, HSTR_ACTIVE, 1);
}
+ if (env->uncached_cpsr & CPSR_IL) {
+ DP_TBFLAG_ANY(flags, PSTATE__IL, 1);
+ }
+
return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags);
}
@@ -13556,6 +13560,10 @@ static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
}
}
+ if (env->pstate & PSTATE_IL) {
+ DP_TBFLAG_ANY(flags, PSTATE__IL, 1);
+ }
+
if (cpu_isar_feature(aa64_mte, env_archcpu(env))) {
/*
* Set MTE_ACTIVE if any access may be Checked, and leave clear
diff --git a/target/arm/kvm.c b/target/arm/kvm.c
index 5d55de1a49..94b970bbf9 100644
--- a/target/arm/kvm.c
+++ b/target/arm/kvm.c
@@ -70,12 +70,17 @@ bool kvm_arm_create_scratch_host_vcpu(const uint32_t *cpus_to_try,
struct kvm_vcpu_init *init)
{
int ret = 0, kvmfd = -1, vmfd = -1, cpufd = -1;
+ int max_vm_pa_size;
kvmfd = qemu_open_old("/dev/kvm", O_RDWR);
if (kvmfd < 0) {
goto err;
}
- vmfd = ioctl(kvmfd, KVM_CREATE_VM, 0);
+ max_vm_pa_size = ioctl(kvmfd, KVM_CHECK_EXTENSION, KVM_CAP_ARM_VM_IPA_SIZE);
+ if (max_vm_pa_size < 0) {
+ max_vm_pa_size = 0;
+ }
+ vmfd = ioctl(kvmfd, KVM_CREATE_VM, max_vm_pa_size);
if (vmfd < 0) {
goto err;
}
diff --git a/target/arm/kvm_arm.h b/target/arm/kvm_arm.h
index 34f8daa377..0613454975 100644
--- a/target/arm/kvm_arm.h
+++ b/target/arm/kvm_arm.h
@@ -525,8 +525,8 @@ static inline const char *its_class_name(void)
/* KVM implementation requires this capability */
return kvm_direct_msi_enabled() ? "arm-its-kvm" : NULL;
} else {
- /* Software emulation is not implemented yet */
- return NULL;
+ /* Software emulation based model */
+ return "arm-gicv3-its";
}
}
diff --git a/target/arm/syndrome.h b/target/arm/syndrome.h
index 8dd88a0cb1..f30f4130a2 100644
--- a/target/arm/syndrome.h
+++ b/target/arm/syndrome.h
@@ -277,4 +277,9 @@ static inline uint32_t syn_wfx(int cv, int cond, int ti, bool is_16bit)
(cv << 24) | (cond << 20) | ti;
}
+static inline uint32_t syn_illegalstate(void)
+{
+ return (EC_ILLEGALSTATE << ARM_EL_EC_SHIFT) | ARM_EL_IL;
+}
+
#endif /* TARGET_ARM_SYNDROME_H */
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
index 422e2ac0c9..ab6b346e35 100644
--- a/target/arm/translate-a64.c
+++ b/target/arm/translate-a64.c
@@ -14649,103 +14649,6 @@ static bool btype_destination_ok(uint32_t insn, bool bt, int btype)
return false;
}
-/* C3.1 A64 instruction index by encoding */
-static void disas_a64_insn(CPUARMState *env, DisasContext *s)
-{
- uint32_t insn;
-
- s->pc_curr = s->base.pc_next;
- insn = arm_ldl_code(env, s->base.pc_next, s->sctlr_b);
- s->insn = insn;
- s->base.pc_next += 4;
-
- s->fp_access_checked = false;
- s->sve_access_checked = false;
-
- if (dc_isar_feature(aa64_bti, s)) {
- if (s->base.num_insns == 1) {
- /*
- * At the first insn of the TB, compute s->guarded_page.
- * We delayed computing this until successfully reading
- * the first insn of the TB, above. This (mostly) ensures
- * that the softmmu tlb entry has been populated, and the
- * page table GP bit is available.
- *
- * Note that we need to compute this even if btype == 0,
- * because this value is used for BR instructions later
- * where ENV is not available.
- */
- s->guarded_page = is_guarded_page(env, s);
-
- /* First insn can have btype set to non-zero. */
- tcg_debug_assert(s->btype >= 0);
-
- /*
- * Note that the Branch Target Exception has fairly high
- * priority -- below debugging exceptions but above most
- * everything else. This allows us to handle this now
- * instead of waiting until the insn is otherwise decoded.
- */
- if (s->btype != 0
- && s->guarded_page
- && !btype_destination_ok(insn, s->bt, s->btype)) {
- gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
- syn_btitrap(s->btype),
- default_exception_el(s));
- return;
- }
- } else {
- /* Not the first insn: btype must be 0. */
- tcg_debug_assert(s->btype == 0);
- }
- }
-
- switch (extract32(insn, 25, 4)) {
- case 0x0: case 0x1: case 0x3: /* UNALLOCATED */
- unallocated_encoding(s);
- break;
- case 0x2:
- if (!dc_isar_feature(aa64_sve, s) || !disas_sve(s, insn)) {
- unallocated_encoding(s);
- }
- break;
- case 0x8: case 0x9: /* Data processing - immediate */
- disas_data_proc_imm(s, insn);
- break;
- case 0xa: case 0xb: /* Branch, exception generation and system insns */
- disas_b_exc_sys(s, insn);
- break;
- case 0x4:
- case 0x6:
- case 0xc:
- case 0xe: /* Loads and stores */
- disas_ldst(s, insn);
- break;
- case 0x5:
- case 0xd: /* Data processing - register */
- disas_data_proc_reg(s, insn);
- break;
- case 0x7:
- case 0xf: /* Data processing - SIMD and floating point */
- disas_data_proc_simd_fp(s, insn);
- break;
- default:
- assert(FALSE); /* all 15 cases should be handled above */
- break;
- }
-
- /* if we allocated any temporaries, free them here */
- free_tmp_a64(s);
-
- /*
- * After execution of most insns, btype is reset to 0.
- * Note that we set btype == -1 when the insn sets btype.
- */
- if (s->btype > 0 && s->base.is_jmp != DISAS_NORETURN) {
- reset_btype(s);
- }
-}
-
static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
CPUState *cpu)
{
@@ -14780,6 +14683,7 @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
#endif
dc->fp_excp_el = EX_TBFLAG_ANY(tb_flags, FPEXC_EL);
dc->align_mem = EX_TBFLAG_ANY(tb_flags, ALIGN_MEM);
+ dc->pstate_il = EX_TBFLAG_ANY(tb_flags, PSTATE__IL);
dc->sve_excp_el = EX_TBFLAG_A64(tb_flags, SVEEXC_EL);
dc->sve_len = (EX_TBFLAG_A64(tb_flags, ZCR_LEN) + 1) * 16;
dc->pauth_active = EX_TBFLAG_A64(tb_flags, PAUTH_ACTIVE);
@@ -14846,10 +14750,11 @@ static void aarch64_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
{
- DisasContext *dc = container_of(dcbase, DisasContext, base);
+ DisasContext *s = container_of(dcbase, DisasContext, base);
CPUARMState *env = cpu->env_ptr;
+ uint32_t insn;
- if (dc->ss_active && !dc->pstate_ss) {
+ if (s->ss_active && !s->pstate_ss) {
/* Singlestep state is Active-pending.
* If we're in this state at the start of a TB then either
* a) we just took an exception to an EL which is being debugged
@@ -14860,14 +14765,114 @@ static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
* "did not step an insn" case, and so the syndrome ISV and EX
* bits should be zero.
*/
- assert(dc->base.num_insns == 1);
- gen_swstep_exception(dc, 0, 0);
- dc->base.is_jmp = DISAS_NORETURN;
- } else {
- disas_a64_insn(env, dc);
+ assert(s->base.num_insns == 1);
+ gen_swstep_exception(s, 0, 0);
+ s->base.is_jmp = DISAS_NORETURN;
+ return;
+ }
+
+ s->pc_curr = s->base.pc_next;
+ insn = arm_ldl_code(env, &s->base, s->base.pc_next, s->sctlr_b);
+ s->insn = insn;
+ s->base.pc_next += 4;
+
+ s->fp_access_checked = false;
+ s->sve_access_checked = false;
+
+ if (s->pstate_il) {
+ /*
+ * Illegal execution state. This has priority over BTI
+ * exceptions, but comes after instruction abort exceptions.
+ */
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
+ syn_illegalstate(), default_exception_el(s));
+ return;
+ }
+
+ if (dc_isar_feature(aa64_bti, s)) {
+ if (s->base.num_insns == 1) {
+ /*
+ * At the first insn of the TB, compute s->guarded_page.
+ * We delayed computing this until successfully reading
+ * the first insn of the TB, above. This (mostly) ensures
+ * that the softmmu tlb entry has been populated, and the
+ * page table GP bit is available.
+ *
+ * Note that we need to compute this even if btype == 0,
+ * because this value is used for BR instructions later
+ * where ENV is not available.
+ */
+ s->guarded_page = is_guarded_page(env, s);
+
+ /* First insn can have btype set to non-zero. */
+ tcg_debug_assert(s->btype >= 0);
+
+ /*
+ * Note that the Branch Target Exception has fairly high
+ * priority -- below debugging exceptions but above most
+ * everything else. This allows us to handle this now
+ * instead of waiting until the insn is otherwise decoded.
+ */
+ if (s->btype != 0
+ && s->guarded_page
+ && !btype_destination_ok(insn, s->bt, s->btype)) {
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
+ syn_btitrap(s->btype),
+ default_exception_el(s));
+ return;
+ }
+ } else {
+ /* Not the first insn: btype must be 0. */
+ tcg_debug_assert(s->btype == 0);
+ }
+ }
+
+ switch (extract32(insn, 25, 4)) {
+ case 0x0: case 0x1: case 0x3: /* UNALLOCATED */
+ unallocated_encoding(s);
+ break;
+ case 0x2:
+ if (!dc_isar_feature(aa64_sve, s) || !disas_sve(s, insn)) {
+ unallocated_encoding(s);
+ }
+ break;
+ case 0x8: case 0x9: /* Data processing - immediate */
+ disas_data_proc_imm(s, insn);
+ break;
+ case 0xa: case 0xb: /* Branch, exception generation and system insns */
+ disas_b_exc_sys(s, insn);
+ break;
+ case 0x4:
+ case 0x6:
+ case 0xc:
+ case 0xe: /* Loads and stores */
+ disas_ldst(s, insn);
+ break;
+ case 0x5:
+ case 0xd: /* Data processing - register */
+ disas_data_proc_reg(s, insn);
+ break;
+ case 0x7:
+ case 0xf: /* Data processing - SIMD and floating point */
+ disas_data_proc_simd_fp(s, insn);
+ break;
+ default:
+ assert(FALSE); /* all 15 cases should be handled above */
+ break;
+ }
+
+ /* if we allocated any temporaries, free them here */
+ free_tmp_a64(s);
+
+ /*
+ * After execution of most insns, btype is reset to 0.
+ * Note that we set btype == -1 when the insn sets btype.
+ */
+ if (s->btype > 0 && s->base.is_jmp != DISAS_NORETURN) {
+ reset_btype(s);
}
- translator_loop_temp_check(&dc->base);
+ translator_loop_temp_check(&s->base);
}
static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
diff --git a/target/arm/translate.c b/target/arm/translate.c
index 24b7f49d76..caefb1e1a1 100644
--- a/target/arm/translate.c
+++ b/target/arm/translate.c
@@ -9090,6 +9090,16 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
return;
}
+ if (s->pstate_il) {
+ /*
+ * Illegal execution state. This has priority over BTI
+ * exceptions, but comes after instruction abort exceptions.
+ */
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
+ syn_illegalstate(), default_exception_el(s));
+ return;
+ }
+
if (cond == 0xf) {
/* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
* choose to UNDEF. In ARMv5 and above the space is used
@@ -9302,7 +9312,7 @@ static bool insn_crosses_page(CPUARMState *env, DisasContext *s)
* boundary, so we cross the page if the first 16 bits indicate
* that this is a 32 bit insn.
*/
- uint16_t insn = arm_lduw_code(env, s->base.pc_next, s->sctlr_b);
+ uint16_t insn = arm_lduw_code(env, &s->base, s->base.pc_next, s->sctlr_b);
return !thumb_insn_is_16bit(s, s->base.pc_next, insn);
}
@@ -9358,6 +9368,7 @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
#endif
dc->fp_excp_el = EX_TBFLAG_ANY(tb_flags, FPEXC_EL);
dc->align_mem = EX_TBFLAG_ANY(tb_flags, ALIGN_MEM);
+ dc->pstate_il = EX_TBFLAG_ANY(tb_flags, PSTATE__IL);
if (arm_feature(env, ARM_FEATURE_M)) {
dc->vfp_enabled = 1;
@@ -9540,7 +9551,7 @@ static void arm_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
}
dc->pc_curr = dc->base.pc_next;
- insn = arm_ldl_code(env, dc->base.pc_next, dc->sctlr_b);
+ insn = arm_ldl_code(env, &dc->base, dc->base.pc_next, dc->sctlr_b);
dc->insn = insn;
dc->base.pc_next += 4;
disas_arm_insn(dc, insn);
@@ -9610,17 +9621,28 @@ static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
}
dc->pc_curr = dc->base.pc_next;
- insn = arm_lduw_code(env, dc->base.pc_next, dc->sctlr_b);
+ insn = arm_lduw_code(env, &dc->base, dc->base.pc_next, dc->sctlr_b);
is_16bit = thumb_insn_is_16bit(dc, dc->base.pc_next, insn);
dc->base.pc_next += 2;
if (!is_16bit) {
- uint32_t insn2 = arm_lduw_code(env, dc->base.pc_next, dc->sctlr_b);
+ uint32_t insn2 = arm_lduw_code(env, &dc->base, dc->base.pc_next,
+ dc->sctlr_b);
insn = insn << 16 | insn2;
dc->base.pc_next += 2;
}
dc->insn = insn;
+ if (dc->pstate_il) {
+ /*
+ * Illegal execution state. This has priority over BTI
+ * exceptions, but comes after instruction abort exceptions.
+ */
+ gen_exception_insn(dc, dc->pc_curr, EXCP_UDEF,
+ syn_illegalstate(), default_exception_el(dc));
+ return;
+ }
+
if (dc->eci) {
/*
* For M-profile continuable instructions, ECI/ICI handling
diff --git a/target/arm/translate.h b/target/arm/translate.h
index 8636c20c3b..605d1f2e33 100644
--- a/target/arm/translate.h
+++ b/target/arm/translate.h
@@ -98,6 +98,8 @@ typedef struct DisasContext {
bool hstr_active;
/* True if memory operations require alignment */
bool align_mem;
+ /* True if PSTATE.IL is set */
+ bool pstate_il;
/*
* >= 0, a copy of PSTATE.BTYPE, which will be 0 without v8.5-BTI.
* < 0, set by the current instruction.
diff --git a/target/avr/cpu.c b/target/avr/cpu.c
index ea14175ca5..5d70e34dd5 100644
--- a/target/avr/cpu.c
+++ b/target/avr/cpu.c
@@ -197,10 +197,7 @@ static const struct TCGCPUOps avr_tcg_ops = {
.synchronize_from_tb = avr_cpu_synchronize_from_tb,
.cpu_exec_interrupt = avr_cpu_exec_interrupt,
.tlb_fill = avr_cpu_tlb_fill,
-
-#ifndef CONFIG_USER_ONLY
.do_interrupt = avr_cpu_do_interrupt,
-#endif /* !CONFIG_USER_ONLY */
};
static void avr_cpu_class_init(ObjectClass *oc, void *data)
diff --git a/target/avr/translate.c b/target/avr/translate.c
index 1111e08b83..438e7b13c1 100644
--- a/target/avr/translate.c
+++ b/target/avr/translate.c
@@ -70,11 +70,9 @@ static const char reg_names[NUMBER_OF_CPU_REGISTERS][8] = {
};
#define REG(x) (cpu_r[x])
-enum {
- DISAS_EXIT = DISAS_TARGET_0, /* We want return to the cpu main loop. */
- DISAS_LOOKUP = DISAS_TARGET_1, /* We have a variable condition exit. */
- DISAS_CHAIN = DISAS_TARGET_2, /* We have a single condition exit. */
-};
+#define DISAS_EXIT DISAS_TARGET_0 /* We want return to the cpu main loop. */
+#define DISAS_LOOKUP DISAS_TARGET_1 /* We have a variable condition exit. */
+#define DISAS_CHAIN DISAS_TARGET_2 /* We have a single condition exit. */
typedef struct DisasContext DisasContext;
diff --git a/target/cris/cpu.c b/target/cris/cpu.c
index 70932b1f8c..c2e7483f5b 100644
--- a/target/cris/cpu.c
+++ b/target/cris/cpu.c
@@ -205,20 +205,20 @@ static const struct SysemuCPUOps cris_sysemu_ops = {
static const struct TCGCPUOps crisv10_tcg_ops = {
.initialize = cris_initialize_crisv10_tcg,
- .cpu_exec_interrupt = cris_cpu_exec_interrupt,
.tlb_fill = cris_cpu_tlb_fill,
#ifndef CONFIG_USER_ONLY
+ .cpu_exec_interrupt = cris_cpu_exec_interrupt,
.do_interrupt = crisv10_cpu_do_interrupt,
#endif /* !CONFIG_USER_ONLY */
};
static const struct TCGCPUOps crisv32_tcg_ops = {
.initialize = cris_initialize_tcg,
- .cpu_exec_interrupt = cris_cpu_exec_interrupt,
.tlb_fill = cris_cpu_tlb_fill,
#ifndef CONFIG_USER_ONLY
+ .cpu_exec_interrupt = cris_cpu_exec_interrupt,
.do_interrupt = cris_cpu_do_interrupt,
#endif /* !CONFIG_USER_ONLY */
};
diff --git a/target/cris/cpu.h b/target/cris/cpu.h
index d3b6492909..be021899ae 100644
--- a/target/cris/cpu.h
+++ b/target/cris/cpu.h
@@ -185,11 +185,11 @@ struct CRISCPU {
#ifndef CONFIG_USER_ONLY
extern const VMStateDescription vmstate_cris_cpu;
-#endif
void cris_cpu_do_interrupt(CPUState *cpu);
void crisv10_cpu_do_interrupt(CPUState *cpu);
bool cris_cpu_exec_interrupt(CPUState *cpu, int int_req);
+#endif
void cris_cpu_dump_state(CPUState *cs, FILE *f, int flags);
diff --git a/target/cris/helper.c b/target/cris/helper.c
index 911867f3b4..36926faf32 100644
--- a/target/cris/helper.c
+++ b/target/cris/helper.c
@@ -41,20 +41,6 @@
#if defined(CONFIG_USER_ONLY)
-void cris_cpu_do_interrupt(CPUState *cs)
-{
- CRISCPU *cpu = CRIS_CPU(cs);
- CPUCRISState *env = &cpu->env;
-
- cs->exception_index = -1;
- env->pregs[PR_ERP] = env->pc;
-}
-
-void crisv10_cpu_do_interrupt(CPUState *cs)
-{
- cris_cpu_do_interrupt(cs);
-}
-
bool cris_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr)
@@ -287,7 +273,6 @@ hwaddr cris_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
D(fprintf(stderr, "%s %x -> %x\n", __func__, addr, phy));
return phy;
}
-#endif
bool cris_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
{
@@ -319,3 +304,5 @@ bool cris_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
return ret;
}
+
+#endif /* !CONFIG_USER_ONLY */
diff --git a/target/hexagon/translate.c b/target/hexagon/translate.c
index 54fdcaa5e8..6fb4e6853c 100644
--- a/target/hexagon/translate.c
+++ b/target/hexagon/translate.c
@@ -112,7 +112,8 @@ static int read_packet_words(CPUHexagonState *env, DisasContext *ctx,
memset(words, 0, PACKET_WORDS_MAX * sizeof(uint32_t));
for (nwords = 0; !found_end && nwords < PACKET_WORDS_MAX; nwords++) {
words[nwords] =
- translator_ldl(env, ctx->base.pc_next + nwords * sizeof(uint32_t));
+ translator_ldl(env, &ctx->base,
+ ctx->base.pc_next + nwords * sizeof(uint32_t));
found_end = is_packet_end(words[nwords]);
}
if (!found_end) {
diff --git a/target/hppa/cpu.c b/target/hppa/cpu.c
index 2eace4ee12..e8edd189bf 100644
--- a/target/hppa/cpu.c
+++ b/target/hppa/cpu.c
@@ -144,10 +144,10 @@ static const struct SysemuCPUOps hppa_sysemu_ops = {
static const struct TCGCPUOps hppa_tcg_ops = {
.initialize = hppa_translate_init,
.synchronize_from_tb = hppa_cpu_synchronize_from_tb,
- .cpu_exec_interrupt = hppa_cpu_exec_interrupt,
.tlb_fill = hppa_cpu_tlb_fill,
#ifndef CONFIG_USER_ONLY
+ .cpu_exec_interrupt = hppa_cpu_exec_interrupt,
.do_interrupt = hppa_cpu_do_interrupt,
.do_unaligned_access = hppa_cpu_do_unaligned_access,
#endif /* !CONFIG_USER_ONLY */
diff --git a/target/hppa/cpu.h b/target/hppa/cpu.h
index 748270bfa3..7854675b90 100644
--- a/target/hppa/cpu.h
+++ b/target/hppa/cpu.h
@@ -325,13 +325,13 @@ int cpu_hppa_signal_handler(int host_signum, void *pinfo, void *puc);
hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr);
int hppa_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
int hppa_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
-void hppa_cpu_do_interrupt(CPUState *cpu);
-bool hppa_cpu_exec_interrupt(CPUState *cpu, int int_req);
void hppa_cpu_dump_state(CPUState *cs, FILE *f, int);
bool hppa_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr);
#ifndef CONFIG_USER_ONLY
+void hppa_cpu_do_interrupt(CPUState *cpu);
+bool hppa_cpu_exec_interrupt(CPUState *cpu, int int_req);
int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
int type, hwaddr *pphys, int *pprot);
extern const MemoryRegionOps hppa_io_eir_ops;
diff --git a/target/hppa/int_helper.c b/target/hppa/int_helper.c
index 349495d361..13073ae2bd 100644
--- a/target/hppa/int_helper.c
+++ b/target/hppa/int_helper.c
@@ -88,7 +88,6 @@ void HELPER(write_eiem)(CPUHPPAState *env, target_ureg val)
eval_interrupt(env_archcpu(env));
qemu_mutex_unlock_iothread();
}
-#endif /* !CONFIG_USER_ONLY */
void hppa_cpu_do_interrupt(CPUState *cs)
{
@@ -100,7 +99,6 @@ void hppa_cpu_do_interrupt(CPUState *cs)
uint64_t iasq_f = env->iasq_f;
uint64_t iasq_b = env->iasq_b;
-#ifndef CONFIG_USER_ONLY
target_ureg old_psw;
/* As documented in pa2.0 -- interruption handling. */
@@ -187,7 +185,6 @@ void hppa_cpu_do_interrupt(CPUState *cs)
env->iaoq_b = env->iaoq_f + 4;
env->iasq_f = 0;
env->iasq_b = 0;
-#endif
if (qemu_loglevel_mask(CPU_LOG_INT)) {
static const char * const names[] = {
@@ -248,7 +245,6 @@ void hppa_cpu_do_interrupt(CPUState *cs)
bool hppa_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
{
-#ifndef CONFIG_USER_ONLY
HPPACPU *cpu = HPPA_CPU(cs);
CPUHPPAState *env = &cpu->env;
@@ -258,6 +254,7 @@ bool hppa_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
hppa_cpu_do_interrupt(cs);
return true;
}
-#endif
return false;
}
+
+#endif /* !CONFIG_USER_ONLY */
diff --git a/target/hppa/translate.c b/target/hppa/translate.c
index b18150ef8d..c3698cf067 100644
--- a/target/hppa/translate.c
+++ b/target/hppa/translate.c
@@ -34,7 +34,6 @@
#undef TCGv
#undef tcg_temp_new
-#undef tcg_global_reg_new
#undef tcg_global_mem_new
#undef tcg_temp_local_new
#undef tcg_temp_free
@@ -59,7 +58,6 @@
#define TCGv_reg TCGv_i64
#define tcg_temp_new tcg_temp_new_i64
-#define tcg_global_reg_new tcg_global_reg_new_i64
#define tcg_global_mem_new tcg_global_mem_new_i64
#define tcg_temp_local_new tcg_temp_local_new_i64
#define tcg_temp_free tcg_temp_free_i64
@@ -155,7 +153,6 @@
#else
#define TCGv_reg TCGv_i32
#define tcg_temp_new tcg_temp_new_i32
-#define tcg_global_reg_new tcg_global_reg_new_i32
#define tcg_global_mem_new tcg_global_mem_new_i32
#define tcg_temp_local_new tcg_temp_local_new_i32
#define tcg_temp_free tcg_temp_free_i32
@@ -4177,7 +4174,7 @@ static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
{
/* Always fetch the insn, even if nullified, so that we check
the page permissions for execute. */
- uint32_t insn = translator_ldl(env, ctx->base.pc_next);
+ uint32_t insn = translator_ldl(env, &ctx->base, ctx->base.pc_next);
/* Set up the IA queue for the next insn.
This will be overwritten by a branch. */
diff --git a/target/i386/cpu-sysemu.c b/target/i386/cpu-sysemu.c
index 1078e3d157..37b7c562f5 100644
--- a/target/i386/cpu-sysemu.c
+++ b/target/i386/cpu-sysemu.c
@@ -335,7 +335,7 @@ void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v,
GuestPanicInformation *panic_info;
if (!cs->crash_occurred) {
- error_setg(errp, "No crash occured");
+ error_setg(errp, "No crash occurred");
return;
}
diff --git a/target/i386/cpu.h b/target/i386/cpu.h
index 71ae3141c3..7dd664791a 100644
--- a/target/i386/cpu.h
+++ b/target/i386/cpu.h
@@ -1836,12 +1836,15 @@ int x86_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
void x86_cpu_list(void);
int cpu_x86_support_mca_broadcast(CPUX86State *env);
+#ifndef CONFIG_USER_ONLY
int cpu_get_pic_interrupt(CPUX86State *s);
+
/* MSDOS compatibility mode FPU exception support */
void x86_register_ferr_irq(qemu_irq irq);
void fpu_check_raise_ferr_irq(CPUX86State *s);
void cpu_set_ignne(void);
void cpu_clear_ignne(void);
+#endif
/* mpx_helper.c */
void cpu_sync_bndcs_hflags(CPUX86State *env);
diff --git a/target/i386/tcg/helper-tcg.h b/target/i386/tcg/helper-tcg.h
index 2510cc244e..60ca09e95e 100644
--- a/target/i386/tcg/helper-tcg.h
+++ b/target/i386/tcg/helper-tcg.h
@@ -38,7 +38,9 @@ QEMU_BUILD_BUG_ON(TCG_PHYS_ADDR_BITS > TARGET_PHYS_ADDR_SPACE_BITS);
* @cpu: vCPU the interrupt is to be handled by.
*/
void x86_cpu_do_interrupt(CPUState *cpu);
+#ifndef CONFIG_USER_ONLY
bool x86_cpu_exec_interrupt(CPUState *cpu, int int_req);
+#endif
/* helper.c */
bool x86_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
diff --git a/target/i386/tcg/seg_helper.c b/target/i386/tcg/seg_helper.c
index cef68b610a..baa905a0cd 100644
--- a/target/i386/tcg/seg_helper.c
+++ b/target/i386/tcg/seg_helper.c
@@ -929,9 +929,7 @@ static void do_interrupt64(CPUX86State *env, int intno, int is_int,
e2);
env->eip = offset;
}
-#endif
-#ifdef TARGET_X86_64
void helper_sysret(CPUX86State *env, int dflag)
{
int cpl, selector;
@@ -984,7 +982,7 @@ void helper_sysret(CPUX86State *env, int dflag)
DESC_W_MASK | DESC_A_MASK);
}
}
-#endif
+#endif /* TARGET_X86_64 */
/* real mode interrupt */
static void do_interrupt_real(CPUX86State *env, int intno, int is_int,
@@ -1112,76 +1110,6 @@ void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw)
do_interrupt_all(env_archcpu(env), intno, 0, 0, 0, is_hw);
}
-bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
-{
- X86CPU *cpu = X86_CPU(cs);
- CPUX86State *env = &cpu->env;
- int intno;
-
- interrupt_request = x86_cpu_pending_interrupt(cs, interrupt_request);
- if (!interrupt_request) {
- return false;
- }
-
- /* Don't process multiple interrupt requests in a single call.
- * This is required to make icount-driven execution deterministic.
- */
- switch (interrupt_request) {
-#if !defined(CONFIG_USER_ONLY)
- case CPU_INTERRUPT_POLL:
- cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
- apic_poll_irq(cpu->apic_state);
- break;
-#endif
- case CPU_INTERRUPT_SIPI:
- do_cpu_sipi(cpu);
- break;
- case CPU_INTERRUPT_SMI:
- cpu_svm_check_intercept_param(env, SVM_EXIT_SMI, 0, 0);
- cs->interrupt_request &= ~CPU_INTERRUPT_SMI;
-#ifdef CONFIG_USER_ONLY
- cpu_abort(CPU(cpu), "SMI interrupt: cannot enter SMM in user-mode");
-#else
- do_smm_enter(cpu);
-#endif /* CONFIG_USER_ONLY */
- break;
- case CPU_INTERRUPT_NMI:
- cpu_svm_check_intercept_param(env, SVM_EXIT_NMI, 0, 0);
- cs->interrupt_request &= ~CPU_INTERRUPT_NMI;
- env->hflags2 |= HF2_NMI_MASK;
- do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
- break;
- case CPU_INTERRUPT_MCE:
- cs->interrupt_request &= ~CPU_INTERRUPT_MCE;
- do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
- break;
- case CPU_INTERRUPT_HARD:
- cpu_svm_check_intercept_param(env, SVM_EXIT_INTR, 0, 0);
- cs->interrupt_request &= ~(CPU_INTERRUPT_HARD |
- CPU_INTERRUPT_VIRQ);
- intno = cpu_get_pic_interrupt(env);
- qemu_log_mask(CPU_LOG_TB_IN_ASM,
- "Servicing hardware INT=0x%02x\n", intno);
- do_interrupt_x86_hardirq(env, intno, 1);
- break;
-#if !defined(CONFIG_USER_ONLY)
- case CPU_INTERRUPT_VIRQ:
- cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR, 0, 0);
- intno = x86_ldl_phys(cs, env->vm_vmcb
- + offsetof(struct vmcb, control.int_vector));
- qemu_log_mask(CPU_LOG_TB_IN_ASM,
- "Servicing virtual hardware INT=0x%02x\n", intno);
- do_interrupt_x86_hardirq(env, intno, 1);
- cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
- env->int_ctl &= ~V_IRQ_MASK;
- break;
-#endif
- }
-
- /* Ensure that no TB jump will be modified as the program flow was changed. */
- return true;
-}
-
void helper_lldt(CPUX86State *env, int selector)
{
SegmentCache *dt;
diff --git a/target/i386/tcg/sysemu/seg_helper.c b/target/i386/tcg/sysemu/seg_helper.c
index 82c0856c41..bf3444c26b 100644
--- a/target/i386/tcg/sysemu/seg_helper.c
+++ b/target/i386/tcg/sysemu/seg_helper.c
@@ -125,6 +125,68 @@ void x86_cpu_do_interrupt(CPUState *cs)
}
}
+bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+ int intno;
+
+ interrupt_request = x86_cpu_pending_interrupt(cs, interrupt_request);
+ if (!interrupt_request) {
+ return false;
+ }
+
+ /* Don't process multiple interrupt requests in a single call.
+ * This is required to make icount-driven execution deterministic.
+ */
+ switch (interrupt_request) {
+ case CPU_INTERRUPT_POLL:
+ cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
+ apic_poll_irq(cpu->apic_state);
+ break;
+ case CPU_INTERRUPT_SIPI:
+ do_cpu_sipi(cpu);
+ break;
+ case CPU_INTERRUPT_SMI:
+ cpu_svm_check_intercept_param(env, SVM_EXIT_SMI, 0, 0);
+ cs->interrupt_request &= ~CPU_INTERRUPT_SMI;
+ do_smm_enter(cpu);
+ break;
+ case CPU_INTERRUPT_NMI:
+ cpu_svm_check_intercept_param(env, SVM_EXIT_NMI, 0, 0);
+ cs->interrupt_request &= ~CPU_INTERRUPT_NMI;
+ env->hflags2 |= HF2_NMI_MASK;
+ do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
+ break;
+ case CPU_INTERRUPT_MCE:
+ cs->interrupt_request &= ~CPU_INTERRUPT_MCE;
+ do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
+ break;
+ case CPU_INTERRUPT_HARD:
+ cpu_svm_check_intercept_param(env, SVM_EXIT_INTR, 0, 0);
+ cs->interrupt_request &= ~(CPU_INTERRUPT_HARD |
+ CPU_INTERRUPT_VIRQ);
+ intno = cpu_get_pic_interrupt(env);
+ qemu_log_mask(CPU_LOG_TB_IN_ASM,
+ "Servicing hardware INT=0x%02x\n", intno);
+ do_interrupt_x86_hardirq(env, intno, 1);
+ break;
+ case CPU_INTERRUPT_VIRQ:
+ cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR, 0, 0);
+ intno = x86_ldl_phys(cs, env->vm_vmcb
+ + offsetof(struct vmcb, control.int_vector));
+ qemu_log_mask(CPU_LOG_TB_IN_ASM,
+ "Servicing virtual hardware INT=0x%02x\n", intno);
+ do_interrupt_x86_hardirq(env, intno, 1);
+ cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
+ env->int_ctl &= ~V_IRQ_MASK;
+ break;
+ }
+
+ /* Ensure that no TB jump will be modified as the program flow was changed. */
+ return true;
+}
+
/* check if Port I/O is allowed in TSS */
void helper_check_io(CPUX86State *env, uint32_t addr, uint32_t size)
{
diff --git a/target/i386/tcg/tcg-cpu.c b/target/i386/tcg/tcg-cpu.c
index 93a79a5741..3ecfae34cb 100644
--- a/target/i386/tcg/tcg-cpu.c
+++ b/target/i386/tcg/tcg-cpu.c
@@ -72,10 +72,12 @@ static const struct TCGCPUOps x86_tcg_ops = {
.synchronize_from_tb = x86_cpu_synchronize_from_tb,
.cpu_exec_enter = x86_cpu_exec_enter,
.cpu_exec_exit = x86_cpu_exec_exit,
- .cpu_exec_interrupt = x86_cpu_exec_interrupt,
- .do_interrupt = x86_cpu_do_interrupt,
.tlb_fill = x86_cpu_tlb_fill,
-#ifndef CONFIG_USER_ONLY
+#ifdef CONFIG_USER_ONLY
+ .fake_user_interrupt = x86_cpu_do_interrupt,
+#else
+ .do_interrupt = x86_cpu_do_interrupt,
+ .cpu_exec_interrupt = x86_cpu_exec_interrupt,
.debug_excp_handler = breakpoint_handler,
.debug_check_breakpoint = x86_debug_check_breakpoint,
#endif /* !CONFIG_USER_ONLY */
diff --git a/target/i386/tcg/translate.c b/target/i386/tcg/translate.c
index aacb605eee..a46be75b00 100644
--- a/target/i386/tcg/translate.c
+++ b/target/i386/tcg/translate.c
@@ -2028,28 +2028,28 @@ static uint64_t advance_pc(CPUX86State *env, DisasContext *s, int num_bytes)
static inline uint8_t x86_ldub_code(CPUX86State *env, DisasContext *s)
{
- return translator_ldub(env, advance_pc(env, s, 1));
+ return translator_ldub(env, &s->base, advance_pc(env, s, 1));
}
static inline int16_t x86_ldsw_code(CPUX86State *env, DisasContext *s)
{
- return translator_ldsw(env, advance_pc(env, s, 2));
+ return translator_ldsw(env, &s->base, advance_pc(env, s, 2));
}
static inline uint16_t x86_lduw_code(CPUX86State *env, DisasContext *s)
{
- return translator_lduw(env, advance_pc(env, s, 2));
+ return translator_lduw(env, &s->base, advance_pc(env, s, 2));
}
static inline uint32_t x86_ldl_code(CPUX86State *env, DisasContext *s)
{
- return translator_ldl(env, advance_pc(env, s, 4));
+ return translator_ldl(env, &s->base, advance_pc(env, s, 4));
}
#ifdef TARGET_X86_64
static inline uint64_t x86_ldq_code(CPUX86State *env, DisasContext *s)
{
- return translator_ldq(env, advance_pc(env, s, 8));
+ return translator_ldq(env, &s->base, advance_pc(env, s, 8));
}
#endif
diff --git a/target/m68k/cpu.c b/target/m68k/cpu.c
index 72de6e9726..66d22d1189 100644
--- a/target/m68k/cpu.c
+++ b/target/m68k/cpu.c
@@ -515,10 +515,10 @@ static const struct SysemuCPUOps m68k_sysemu_ops = {
static const struct TCGCPUOps m68k_tcg_ops = {
.initialize = m68k_tcg_init,
- .cpu_exec_interrupt = m68k_cpu_exec_interrupt,
.tlb_fill = m68k_cpu_tlb_fill,
#ifndef CONFIG_USER_ONLY
+ .cpu_exec_interrupt = m68k_cpu_exec_interrupt,
.do_interrupt = m68k_cpu_do_interrupt,
.do_transaction_failed = m68k_cpu_transaction_failed,
#endif /* !CONFIG_USER_ONLY */
diff --git a/target/m68k/cpu.h b/target/m68k/cpu.h
index 997d588911..550eb028b6 100644
--- a/target/m68k/cpu.h
+++ b/target/m68k/cpu.h
@@ -166,8 +166,10 @@ struct M68kCPU {
};
+#ifndef CONFIG_USER_ONLY
void m68k_cpu_do_interrupt(CPUState *cpu);
bool m68k_cpu_exec_interrupt(CPUState *cpu, int int_req);
+#endif /* !CONFIG_USER_ONLY */
void m68k_cpu_dump_state(CPUState *cpu, FILE *f, int flags);
hwaddr m68k_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
int m68k_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
diff --git a/target/m68k/op_helper.c b/target/m68k/op_helper.c
index d006d1cb3e..5d624838ae 100644
--- a/target/m68k/op_helper.c
+++ b/target/m68k/op_helper.c
@@ -24,18 +24,7 @@
#include "semihosting/semihost.h"
#include "tcg/tcg.h"
-#if defined(CONFIG_USER_ONLY)
-
-void m68k_cpu_do_interrupt(CPUState *cs)
-{
- cs->exception_index = -1;
-}
-
-static inline void do_interrupt_m68k_hardirq(CPUM68KState *env)
-{
-}
-
-#else
+#if !defined(CONFIG_USER_ONLY)
static void cf_rte(CPUM68KState *env)
{
@@ -516,7 +505,6 @@ void m68k_cpu_transaction_failed(CPUState *cs, hwaddr physaddr, vaddr addr,
cpu_loop_exit(cs);
}
}
-#endif
bool m68k_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
{
@@ -538,6 +526,8 @@ bool m68k_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
return false;
}
+#endif /* !CONFIG_USER_ONLY */
+
static void raise_exception_ra(CPUM68KState *env, int tt, uintptr_t raddr)
{
CPUState *cs = env_cpu(env);
diff --git a/target/m68k/translate.c b/target/m68k/translate.c
index c34d9aed61..50a55f949c 100644
--- a/target/m68k/translate.c
+++ b/target/m68k/translate.c
@@ -415,7 +415,7 @@ static TCGv gen_ldst(DisasContext *s, int opsize, TCGv addr, TCGv val,
static inline uint16_t read_im16(CPUM68KState *env, DisasContext *s)
{
uint16_t im;
- im = translator_lduw(env, s->pc);
+ im = translator_lduw(env, &s->base, s->pc);
s->pc += 2;
return im;
}
diff --git a/target/microblaze/cpu.c b/target/microblaze/cpu.c
index 72d8f2a0da..15db277925 100644
--- a/target/microblaze/cpu.c
+++ b/target/microblaze/cpu.c
@@ -365,10 +365,10 @@ static const struct SysemuCPUOps mb_sysemu_ops = {
static const struct TCGCPUOps mb_tcg_ops = {
.initialize = mb_tcg_init,
.synchronize_from_tb = mb_cpu_synchronize_from_tb,
- .cpu_exec_interrupt = mb_cpu_exec_interrupt,
.tlb_fill = mb_cpu_tlb_fill,
#ifndef CONFIG_USER_ONLY
+ .cpu_exec_interrupt = mb_cpu_exec_interrupt,
.do_interrupt = mb_cpu_do_interrupt,
.do_transaction_failed = mb_cpu_transaction_failed,
.do_unaligned_access = mb_cpu_do_unaligned_access,
diff --git a/target/microblaze/cpu.h b/target/microblaze/cpu.h
index e4bba8a755..40401c33b7 100644
--- a/target/microblaze/cpu.h
+++ b/target/microblaze/cpu.h
@@ -355,8 +355,10 @@ struct MicroBlazeCPU {
};
+#ifndef CONFIG_USER_ONLY
void mb_cpu_do_interrupt(CPUState *cs);
bool mb_cpu_exec_interrupt(CPUState *cs, int int_req);
+#endif /* !CONFIG_USER_ONLY */
void mb_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
MMUAccessType access_type,
int mmu_idx, uintptr_t retaddr);
diff --git a/target/microblaze/helper.c b/target/microblaze/helper.c
index 20dbd67313..dd2aecd1d5 100644
--- a/target/microblaze/helper.c
+++ b/target/microblaze/helper.c
@@ -26,16 +26,6 @@
#if defined(CONFIG_USER_ONLY)
-void mb_cpu_do_interrupt(CPUState *cs)
-{
- MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
- CPUMBState *env = &cpu->env;
-
- cs->exception_index = -1;
- env->res_addr = RES_ADDR_NONE;
- env->regs[14] = env->pc;
-}
-
bool mb_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr)
@@ -271,7 +261,6 @@ hwaddr mb_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
return paddr;
}
-#endif
bool mb_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
{
@@ -289,6 +278,8 @@ bool mb_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
return false;
}
+#endif /* !CONFIG_USER_ONLY */
+
void mb_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
MMUAccessType access_type,
int mmu_idx, uintptr_t retaddr)
diff --git a/target/mips/cpu.c b/target/mips/cpu.c
index d426918291..00e0c55d0e 100644
--- a/target/mips/cpu.c
+++ b/target/mips/cpu.c
@@ -539,10 +539,10 @@ static const struct SysemuCPUOps mips_sysemu_ops = {
static const struct TCGCPUOps mips_tcg_ops = {
.initialize = mips_tcg_init,
.synchronize_from_tb = mips_cpu_synchronize_from_tb,
- .cpu_exec_interrupt = mips_cpu_exec_interrupt,
.tlb_fill = mips_cpu_tlb_fill,
#if !defined(CONFIG_USER_ONLY)
+ .cpu_exec_interrupt = mips_cpu_exec_interrupt,
.do_interrupt = mips_cpu_do_interrupt,
.do_transaction_failed = mips_cpu_do_transaction_failed,
.do_unaligned_access = mips_cpu_do_unaligned_access,
diff --git a/target/mips/tcg/exception.c b/target/mips/tcg/exception.c
index 4fb8b00711..7b3026b105 100644
--- a/target/mips/tcg/exception.c
+++ b/target/mips/tcg/exception.c
@@ -86,24 +86,6 @@ void mips_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb)
env->hflags |= tb->flags & MIPS_HFLAG_BMASK;
}
-bool mips_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
-{
- if (interrupt_request & CPU_INTERRUPT_HARD) {
- MIPSCPU *cpu = MIPS_CPU(cs);
- CPUMIPSState *env = &cpu->env;
-
- if (cpu_mips_hw_interrupts_enabled(env) &&
- cpu_mips_hw_interrupts_pending(env)) {
- /* Raise it */
- cs->exception_index = EXCP_EXT_INTERRUPT;
- env->error_code = 0;
- mips_cpu_do_interrupt(cs);
- return true;
- }
- }
- return false;
-}
-
static const char * const excp_names[EXCP_LAST + 1] = {
[EXCP_RESET] = "reset",
[EXCP_SRESET] = "soft reset",
diff --git a/target/mips/tcg/micromips_translate.c.inc b/target/mips/tcg/micromips_translate.c.inc
index 5e95f47854..0da4c802a3 100644
--- a/target/mips/tcg/micromips_translate.c.inc
+++ b/target/mips/tcg/micromips_translate.c.inc
@@ -1627,7 +1627,7 @@ static void decode_micromips32_opc(CPUMIPSState *env, DisasContext *ctx)
uint32_t op, minor, minor2, mips32_op;
uint32_t cond, fmt, cc;
- insn = translator_lduw(env, ctx->base.pc_next + 2);
+ insn = translator_lduw(env, &ctx->base, ctx->base.pc_next + 2);
ctx->opcode = (ctx->opcode << 16) | insn;
rt = (ctx->opcode >> 21) & 0x1f;
diff --git a/target/mips/tcg/mips16e_translate.c.inc b/target/mips/tcg/mips16e_translate.c.inc
index 54071813f1..84d816603a 100644
--- a/target/mips/tcg/mips16e_translate.c.inc
+++ b/target/mips/tcg/mips16e_translate.c.inc
@@ -455,7 +455,7 @@ static void decode_i64_mips16(DisasContext *ctx,
static int decode_extended_mips16_opc(CPUMIPSState *env, DisasContext *ctx)
{
- int extend = translator_lduw(env, ctx->base.pc_next + 2);
+ int extend = translator_lduw(env, &ctx->base, ctx->base.pc_next + 2);
int op, rx, ry, funct, sa;
int16_t imm, offset;
@@ -688,7 +688,7 @@ static int decode_ase_mips16e(CPUMIPSState *env, DisasContext *ctx)
/* No delay slot, so just process as a normal instruction */
break;
case M16_OPC_JAL:
- offset = translator_lduw(env, ctx->base.pc_next + 2);
+ offset = translator_lduw(env, &ctx->base, ctx->base.pc_next + 2);
offset = (((ctx->opcode & 0x1f) << 21)
| ((ctx->opcode >> 5) & 0x1f) << 16
| offset) << 2;
diff --git a/target/mips/tcg/nanomips_translate.c.inc b/target/mips/tcg/nanomips_translate.c.inc
index a66ae26796..ccbcecad09 100644
--- a/target/mips/tcg/nanomips_translate.c.inc
+++ b/target/mips/tcg/nanomips_translate.c.inc
@@ -3656,7 +3656,7 @@ static int decode_nanomips_32_48_opc(CPUMIPSState *env, DisasContext *ctx)
int offset;
int imm;
- insn = translator_lduw(env, ctx->base.pc_next + 2);
+ insn = translator_lduw(env, &ctx->base, ctx->base.pc_next + 2);
ctx->opcode = (ctx->opcode << 16) | insn;
rt = extract32(ctx->opcode, 21, 5);
@@ -3775,7 +3775,7 @@ static int decode_nanomips_32_48_opc(CPUMIPSState *env, DisasContext *ctx)
break;
case NM_P48I:
{
- insn = translator_lduw(env, ctx->base.pc_next + 4);
+ insn = translator_lduw(env, &ctx->base, ctx->base.pc_next + 4);
target_long addr_off = extract32(ctx->opcode, 0, 16) | insn << 16;
switch (extract32(ctx->opcode, 16, 5)) {
case NM_LI48:
diff --git a/target/mips/tcg/sysemu/tlb_helper.c b/target/mips/tcg/sysemu/tlb_helper.c
index a150a014ec..73254d1929 100644
--- a/target/mips/tcg/sysemu/tlb_helper.c
+++ b/target/mips/tcg/sysemu/tlb_helper.c
@@ -1339,6 +1339,24 @@ void mips_cpu_do_interrupt(CPUState *cs)
cs->exception_index = EXCP_NONE;
}
+bool mips_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
+{
+ if (interrupt_request & CPU_INTERRUPT_HARD) {
+ MIPSCPU *cpu = MIPS_CPU(cs);
+ CPUMIPSState *env = &cpu->env;
+
+ if (cpu_mips_hw_interrupts_enabled(env) &&
+ cpu_mips_hw_interrupts_pending(env)) {
+ /* Raise it */
+ cs->exception_index = EXCP_EXT_INTERRUPT;
+ env->error_code = 0;
+ mips_cpu_do_interrupt(cs);
+ return true;
+ }
+ }
+ return false;
+}
+
void r4k_invalidate_tlb(CPUMIPSState *env, int idx, int use_extra)
{
CPUState *cs = env_cpu(env);
diff --git a/target/mips/tcg/tcg-internal.h b/target/mips/tcg/tcg-internal.h
index 81b14eb219..c7a77ddccd 100644
--- a/target/mips/tcg/tcg-internal.h
+++ b/target/mips/tcg/tcg-internal.h
@@ -18,8 +18,6 @@
void mips_tcg_init(void);
void mips_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb);
-void mips_cpu_do_interrupt(CPUState *cpu);
-bool mips_cpu_exec_interrupt(CPUState *cpu, int int_req);
bool mips_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr);
@@ -41,6 +39,9 @@ static inline void QEMU_NORETURN do_raise_exception(CPUMIPSState *env,
#if !defined(CONFIG_USER_ONLY)
+void mips_cpu_do_interrupt(CPUState *cpu);
+bool mips_cpu_exec_interrupt(CPUState *cpu, int int_req);
+
void mmu_init(CPUMIPSState *env, const mips_def_t *def);
void update_pagemask(CPUMIPSState *env, target_ulong arg1, int32_t *pagemask);
diff --git a/target/mips/tcg/translate.c b/target/mips/tcg/translate.c
index 6f4a9a839c..148afec9dc 100644
--- a/target/mips/tcg/translate.c
+++ b/target/mips/tcg/translate.c
@@ -16041,17 +16041,17 @@ static void mips_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
is_slot = ctx->hflags & MIPS_HFLAG_BMASK;
if (ctx->insn_flags & ISA_NANOMIPS32) {
- ctx->opcode = translator_lduw(env, ctx->base.pc_next);
+ ctx->opcode = translator_lduw(env, &ctx->base, ctx->base.pc_next);
insn_bytes = decode_isa_nanomips(env, ctx);
} else if (!(ctx->hflags & MIPS_HFLAG_M16)) {
- ctx->opcode = translator_ldl(env, ctx->base.pc_next);
+ ctx->opcode = translator_ldl(env, &ctx->base, ctx->base.pc_next);
insn_bytes = 4;
decode_opc(env, ctx);
} else if (ctx->insn_flags & ASE_MICROMIPS) {
- ctx->opcode = translator_lduw(env, ctx->base.pc_next);
+ ctx->opcode = translator_lduw(env, &ctx->base, ctx->base.pc_next);
insn_bytes = decode_isa_micromips(env, ctx);
} else if (ctx->insn_flags & ASE_MIPS16) {
- ctx->opcode = translator_lduw(env, ctx->base.pc_next);
+ ctx->opcode = translator_lduw(env, &ctx->base, ctx->base.pc_next);
insn_bytes = decode_ase_mips16e(env, ctx);
} else {
gen_reserved_instruction(ctx);
diff --git a/target/mips/tcg/user/tlb_helper.c b/target/mips/tcg/user/tlb_helper.c
index b835144b82..210c6d529e 100644
--- a/target/mips/tcg/user/tlb_helper.c
+++ b/target/mips/tcg/user/tlb_helper.c
@@ -57,8 +57,3 @@ bool mips_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
raise_mmu_exception(env, address, access_type);
do_raise_exception_err(env, cs->exception_index, env->error_code, retaddr);
}
-
-void mips_cpu_do_interrupt(CPUState *cs)
-{
- cs->exception_index = EXCP_NONE;
-}
diff --git a/target/nios2/cpu.c b/target/nios2/cpu.c
index 5e37defef8..947bb09bc1 100644
--- a/target/nios2/cpu.c
+++ b/target/nios2/cpu.c
@@ -127,6 +127,7 @@ static void nios2_cpu_realizefn(DeviceState *dev, Error **errp)
ncc->parent_realize(dev, errp);
}
+#ifndef CONFIG_USER_ONLY
static bool nios2_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
{
Nios2CPU *cpu = NIOS2_CPU(cs);
@@ -140,7 +141,7 @@ static bool nios2_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
}
return false;
}
-
+#endif /* !CONFIG_USER_ONLY */
static void nios2_cpu_disas_set_info(CPUState *cpu, disassemble_info *info)
{
@@ -219,10 +220,10 @@ static const struct SysemuCPUOps nios2_sysemu_ops = {
static const struct TCGCPUOps nios2_tcg_ops = {
.initialize = nios2_tcg_init,
- .cpu_exec_interrupt = nios2_cpu_exec_interrupt,
.tlb_fill = nios2_cpu_tlb_fill,
#ifndef CONFIG_USER_ONLY
+ .cpu_exec_interrupt = nios2_cpu_exec_interrupt,
.do_interrupt = nios2_cpu_do_interrupt,
.do_unaligned_access = nios2_cpu_do_unaligned_access,
#endif /* !CONFIG_USER_ONLY */
diff --git a/target/openrisc/cpu.c b/target/openrisc/cpu.c
index bd34e429ec..27cb04152f 100644
--- a/target/openrisc/cpu.c
+++ b/target/openrisc/cpu.c
@@ -186,10 +186,10 @@ static const struct SysemuCPUOps openrisc_sysemu_ops = {
static const struct TCGCPUOps openrisc_tcg_ops = {
.initialize = openrisc_translate_init,
- .cpu_exec_interrupt = openrisc_cpu_exec_interrupt,
.tlb_fill = openrisc_cpu_tlb_fill,
#ifndef CONFIG_USER_ONLY
+ .cpu_exec_interrupt = openrisc_cpu_exec_interrupt,
.do_interrupt = openrisc_cpu_do_interrupt,
#endif /* !CONFIG_USER_ONLY */
};
diff --git a/target/openrisc/cpu.h b/target/openrisc/cpu.h
index 82cbaeb4f8..be6df81a81 100644
--- a/target/openrisc/cpu.h
+++ b/target/openrisc/cpu.h
@@ -312,8 +312,6 @@ struct OpenRISCCPU {
void cpu_openrisc_list(void);
-void openrisc_cpu_do_interrupt(CPUState *cpu);
-bool openrisc_cpu_exec_interrupt(CPUState *cpu, int int_req);
void openrisc_cpu_dump_state(CPUState *cpu, FILE *f, int flags);
hwaddr openrisc_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
int openrisc_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
@@ -331,6 +329,9 @@ int print_insn_or1k(bfd_vma addr, disassemble_info *info);
#ifndef CONFIG_USER_ONLY
extern const VMStateDescription vmstate_openrisc_cpu;
+void openrisc_cpu_do_interrupt(CPUState *cpu);
+bool openrisc_cpu_exec_interrupt(CPUState *cpu, int int_req);
+
/* hw/openrisc_pic.c */
void cpu_openrisc_pic_init(OpenRISCCPU *cpu);
diff --git a/target/openrisc/interrupt.c b/target/openrisc/interrupt.c
index 3eab771dcd..19223e3f25 100644
--- a/target/openrisc/interrupt.c
+++ b/target/openrisc/interrupt.c
@@ -28,7 +28,6 @@
void openrisc_cpu_do_interrupt(CPUState *cs)
{
-#ifndef CONFIG_USER_ONLY
OpenRISCCPU *cpu = OPENRISC_CPU(cs);
CPUOpenRISCState *env = &cpu->env;
int exception = cs->exception_index;
@@ -96,7 +95,6 @@ void openrisc_cpu_do_interrupt(CPUState *cs)
} else {
cpu_abort(cs, "Unhandled exception 0x%x\n", exception);
}
-#endif
cs->exception_index = -1;
}
diff --git a/target/openrisc/meson.build b/target/openrisc/meson.build
index 9774a58306..e445dec4a0 100644
--- a/target/openrisc/meson.build
+++ b/target/openrisc/meson.build
@@ -9,7 +9,6 @@ openrisc_ss.add(files(
'exception_helper.c',
'fpu_helper.c',
'gdbstub.c',
- 'interrupt.c',
'interrupt_helper.c',
'mmu.c',
'sys_helper.c',
@@ -17,7 +16,10 @@ openrisc_ss.add(files(
))
openrisc_softmmu_ss = ss.source_set()
-openrisc_softmmu_ss.add(files('machine.c'))
+openrisc_softmmu_ss.add(files(
+ 'interrupt.c',
+ 'machine.c',
+))
target_arch += {'openrisc': openrisc_ss}
target_softmmu_arch += {'openrisc': openrisc_softmmu_ss}
diff --git a/target/openrisc/translate.c b/target/openrisc/translate.c
index d6ea536744..5f3d430245 100644
--- a/target/openrisc/translate.c
+++ b/target/openrisc/translate.c
@@ -1613,7 +1613,7 @@ static void openrisc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
{
DisasContext *dc = container_of(dcbase, DisasContext, base);
OpenRISCCPU *cpu = OPENRISC_CPU(cs);
- uint32_t insn = translator_ldl(&cpu->env, dc->base.pc_next);
+ uint32_t insn = translator_ldl(&cpu->env, &dc->base, dc->base.pc_next);
if (!decode(dc, insn)) {
gen_illegal_exception(dc);
diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h
index 500205229c..362e7c4c5c 100644
--- a/target/ppc/cpu.h
+++ b/target/ppc/cpu.h
@@ -1254,8 +1254,6 @@ DECLARE_OBJ_CHECKERS(PPCVirtualHypervisor, PPCVirtualHypervisorClass,
PPC_VIRTUAL_HYPERVISOR, TYPE_PPC_VIRTUAL_HYPERVISOR)
#endif /* CONFIG_USER_ONLY */
-void ppc_cpu_do_interrupt(CPUState *cpu);
-bool ppc_cpu_exec_interrupt(CPUState *cpu, int int_req);
void ppc_cpu_dump_state(CPUState *cpu, FILE *f, int flags);
hwaddr ppc_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
int ppc_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
@@ -1271,6 +1269,8 @@ int ppc64_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs,
int ppc32_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs,
int cpuid, void *opaque);
#ifndef CONFIG_USER_ONLY
+void ppc_cpu_do_interrupt(CPUState *cpu);
+bool ppc_cpu_exec_interrupt(CPUState *cpu, int int_req);
void ppc_cpu_do_system_reset(CPUState *cs);
void ppc_cpu_do_fwnmi_machine_check(CPUState *cs, target_ulong vector);
extern const VMStateDescription vmstate_ppc_cpu;
diff --git a/target/ppc/cpu_init.c b/target/ppc/cpu_init.c
index ad7abc6041..6aad01d1d3 100644
--- a/target/ppc/cpu_init.c
+++ b/target/ppc/cpu_init.c
@@ -9014,10 +9014,10 @@ static const struct SysemuCPUOps ppc_sysemu_ops = {
static const struct TCGCPUOps ppc_tcg_ops = {
.initialize = ppc_translate_init,
- .cpu_exec_interrupt = ppc_cpu_exec_interrupt,
.tlb_fill = ppc_cpu_tlb_fill,
#ifndef CONFIG_USER_ONLY
+ .cpu_exec_interrupt = ppc_cpu_exec_interrupt,
.do_interrupt = ppc_cpu_do_interrupt,
.cpu_exec_enter = ppc_cpu_exec_enter,
.cpu_exec_exit = ppc_cpu_exec_exit,
diff --git a/target/ppc/excp_helper.c b/target/ppc/excp_helper.c
index 7b6ac16eef..d7e32ee107 100644
--- a/target/ppc/excp_helper.c
+++ b/target/ppc/excp_helper.c
@@ -40,24 +40,8 @@
/*****************************************************************************/
/* Exception processing */
-#if defined(CONFIG_USER_ONLY)
-void ppc_cpu_do_interrupt(CPUState *cs)
-{
- PowerPCCPU *cpu = POWERPC_CPU(cs);
- CPUPPCState *env = &cpu->env;
-
- cs->exception_index = POWERPC_EXCP_NONE;
- env->error_code = 0;
-}
-
-static void ppc_hw_interrupt(CPUPPCState *env)
-{
- CPUState *cs = env_cpu(env);
+#if !defined(CONFIG_USER_ONLY)
- cs->exception_index = POWERPC_EXCP_NONE;
- env->error_code = 0;
-}
-#else /* defined(CONFIG_USER_ONLY) */
static inline void dump_syscall(CPUPPCState *env)
{
qemu_log_mask(CPU_LOG_INT, "syscall r0=%016" PRIx64
@@ -1113,7 +1097,6 @@ void ppc_cpu_do_fwnmi_machine_check(CPUState *cs, target_ulong vector)
powerpc_set_excp_state(cpu, vector, msr);
}
-#endif /* !CONFIG_USER_ONLY */
bool ppc_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
{
@@ -1130,6 +1113,8 @@ bool ppc_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
return false;
}
+#endif /* !CONFIG_USER_ONLY */
+
#if defined(DEBUG_OP)
static void cpu_dump_rfi(target_ulong RA, target_ulong msr)
{
diff --git a/target/ppc/translate.c b/target/ppc/translate.c
index 171b216e17..5d8b06bd80 100644
--- a/target/ppc/translate.c
+++ b/target/ppc/translate.c
@@ -8585,7 +8585,7 @@ static void ppc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
ctx->base.pc_next, ctx->mem_idx, (int)msr_ir);
ctx->cia = pc = ctx->base.pc_next;
- insn = translator_ldl_swap(env, pc, need_byteswap(ctx));
+ insn = translator_ldl_swap(env, dcbase, pc, need_byteswap(ctx));
ctx->base.pc_next = pc += 4;
if (!is_prefix_insn(ctx, insn)) {
@@ -8600,7 +8600,8 @@ static void ppc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
gen_exception_err(ctx, POWERPC_EXCP_ALIGN, POWERPC_EXCP_ALIGN_INSN);
ok = true;
} else {
- uint32_t insn2 = translator_ldl_swap(env, pc, need_byteswap(ctx));
+ uint32_t insn2 = translator_ldl_swap(env, dcbase, pc,
+ need_byteswap(ctx));
ctx->base.pc_next = pc += 4;
ok = decode_insn64(ctx, deposit64(insn2, 32, 32, insn));
}
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
index 1a2b03d579..13575c1408 100644
--- a/target/riscv/cpu.c
+++ b/target/riscv/cpu.c
@@ -644,10 +644,10 @@ static const struct SysemuCPUOps riscv_sysemu_ops = {
static const struct TCGCPUOps riscv_tcg_ops = {
.initialize = riscv_translate_init,
.synchronize_from_tb = riscv_cpu_synchronize_from_tb,
- .cpu_exec_interrupt = riscv_cpu_exec_interrupt,
.tlb_fill = riscv_cpu_tlb_fill,
#ifndef CONFIG_USER_ONLY
+ .cpu_exec_interrupt = riscv_cpu_exec_interrupt,
.do_interrupt = riscv_cpu_do_interrupt,
.do_transaction_failed = riscv_cpu_do_transaction_failed,
.do_unaligned_access = riscv_cpu_do_unaligned_access,
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
index bf1c899c00..e735e53e26 100644
--- a/target/riscv/cpu.h
+++ b/target/riscv/cpu.h
@@ -334,7 +334,6 @@ int riscv_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs,
int cpuid, void *opaque);
int riscv_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
int riscv_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
-bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request);
bool riscv_cpu_fp_enabled(CPURISCVState *env);
bool riscv_cpu_virt_enabled(CPURISCVState *env);
void riscv_cpu_set_virt_enabled(CPURISCVState *env, bool enable);
@@ -362,6 +361,7 @@ void riscv_cpu_list(void);
#define cpu_mmu_index riscv_cpu_mmu_index
#ifndef CONFIG_USER_ONLY
+bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request);
void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env);
int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint32_t interrupts);
uint32_t riscv_cpu_update_mip(RISCVCPU *cpu, uint32_t mask, uint32_t value);
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
index 968cb8046f..701858d670 100644
--- a/target/riscv/cpu_helper.c
+++ b/target/riscv/cpu_helper.c
@@ -75,11 +75,9 @@ static int riscv_cpu_local_irq_pending(CPURISCVState *env)
return RISCV_EXCP_NONE; /* indicates no pending interrupt */
}
}
-#endif
bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
{
-#if !defined(CONFIG_USER_ONLY)
if (interrupt_request & CPU_INTERRUPT_HARD) {
RISCVCPU *cpu = RISCV_CPU(cs);
CPURISCVState *env = &cpu->env;
@@ -90,12 +88,9 @@ bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
return true;
}
}
-#endif
return false;
}
-#if !defined(CONFIG_USER_ONLY)
-
/* Return true is floating point support is currently enabled */
bool riscv_cpu_fp_enabled(CPURISCVState *env)
{
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
index e356fc6c46..74b33fa3c9 100644
--- a/target/riscv/translate.c
+++ b/target/riscv/translate.c
@@ -500,7 +500,8 @@ static void decode_opc(CPURISCVState *env, DisasContext *ctx, uint16_t opcode)
} else {
uint32_t opcode32 = opcode;
opcode32 = deposit32(opcode32, 16, 16,
- translator_lduw(env, ctx->base.pc_next + 2));
+ translator_lduw(env, &ctx->base,
+ ctx->base.pc_next + 2));
ctx->pc_succ_insn = ctx->base.pc_next + 4;
if (!decode_insn32(ctx, opcode32)) {
gen_exception_illegal(ctx);
@@ -561,7 +562,7 @@ static void riscv_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
{
DisasContext *ctx = container_of(dcbase, DisasContext, base);
CPURISCVState *env = cpu->env_ptr;
- uint16_t opcode16 = translator_lduw(env, ctx->base.pc_next);
+ uint16_t opcode16 = translator_lduw(env, &ctx->base, ctx->base.pc_next);
decode_opc(env, ctx, opcode16);
ctx->base.pc_next = ctx->pc_succ_insn;
diff --git a/target/rx/cpu.c b/target/rx/cpu.c
index 96cc96e514..25a4aa2976 100644
--- a/target/rx/cpu.c
+++ b/target/rx/cpu.c
@@ -186,10 +186,10 @@ static const struct SysemuCPUOps rx_sysemu_ops = {
static const struct TCGCPUOps rx_tcg_ops = {
.initialize = rx_translate_init,
.synchronize_from_tb = rx_cpu_synchronize_from_tb,
- .cpu_exec_interrupt = rx_cpu_exec_interrupt,
.tlb_fill = rx_cpu_tlb_fill,
#ifndef CONFIG_USER_ONLY
+ .cpu_exec_interrupt = rx_cpu_exec_interrupt,
.do_interrupt = rx_cpu_do_interrupt,
#endif /* !CONFIG_USER_ONLY */
};
diff --git a/target/rx/cpu.h b/target/rx/cpu.h
index 0b4b998c7b..faa3606f52 100644
--- a/target/rx/cpu.h
+++ b/target/rx/cpu.h
@@ -124,8 +124,10 @@ typedef RXCPU ArchCPU;
#define CPU_RESOLVING_TYPE TYPE_RX_CPU
const char *rx_crname(uint8_t cr);
+#ifndef CONFIG_USER_ONLY
void rx_cpu_do_interrupt(CPUState *cpu);
bool rx_cpu_exec_interrupt(CPUState *cpu, int int_req);
+#endif /* !CONFIG_USER_ONLY */
void rx_cpu_dump_state(CPUState *cpu, FILE *f, int flags);
int rx_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
int rx_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
diff --git a/target/rx/helper.c b/target/rx/helper.c
index db6b07e389..f34945e7e2 100644
--- a/target/rx/helper.c
+++ b/target/rx/helper.c
@@ -40,6 +40,8 @@ void rx_cpu_unpack_psw(CPURXState *env, uint32_t psw, int rte)
env->psw_c = FIELD_EX32(psw, PSW, C);
}
+#ifndef CONFIG_USER_ONLY
+
#define INT_FLAGS (CPU_INTERRUPT_HARD | CPU_INTERRUPT_FIR)
void rx_cpu_do_interrupt(CPUState *cs)
{
@@ -142,6 +144,8 @@ bool rx_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
return false;
}
+#endif /* !CONFIG_USER_ONLY */
+
hwaddr rx_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
{
return addr;
diff --git a/target/s390x/tcg/translate.c b/target/s390x/tcg/translate.c
index 0632b0374b..f284870cd2 100644
--- a/target/s390x/tcg/translate.c
+++ b/target/s390x/tcg/translate.c
@@ -388,14 +388,16 @@ static void update_cc_op(DisasContext *s)
}
}
-static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
+static inline uint64_t ld_code2(CPUS390XState *env, DisasContext *s,
+ uint64_t pc)
{
- return (uint64_t)cpu_lduw_code(env, pc);
+ return (uint64_t)translator_lduw(env, &s->base, pc);
}
-static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
+static inline uint64_t ld_code4(CPUS390XState *env, DisasContext *s,
+ uint64_t pc)
{
- return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
+ return (uint64_t)(uint32_t)translator_ldl(env, &s->base, pc);
}
static int get_mem_index(DisasContext *s)
@@ -6273,7 +6275,7 @@ static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s)
ilen = s->ex_value & 0xf;
op = insn >> 56;
} else {
- insn = ld_code2(env, pc);
+ insn = ld_code2(env, s, pc);
op = (insn >> 8) & 0xff;
ilen = get_ilen(op);
switch (ilen) {
@@ -6281,10 +6283,10 @@ static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s)
insn = insn << 48;
break;
case 4:
- insn = ld_code4(env, pc) << 32;
+ insn = ld_code4(env, s, pc) << 32;
break;
case 6:
- insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
+ insn = (insn << 48) | (ld_code4(env, s, pc + 2) << 16);
break;
default:
g_assert_not_reached();
diff --git a/target/sh4/cpu.c b/target/sh4/cpu.c
index 8326922942..2047742d03 100644
--- a/target/sh4/cpu.c
+++ b/target/sh4/cpu.c
@@ -236,10 +236,10 @@ static const struct SysemuCPUOps sh4_sysemu_ops = {
static const struct TCGCPUOps superh_tcg_ops = {
.initialize = sh4_translate_init,
.synchronize_from_tb = superh_cpu_synchronize_from_tb,
- .cpu_exec_interrupt = superh_cpu_exec_interrupt,
.tlb_fill = superh_cpu_tlb_fill,
#ifndef CONFIG_USER_ONLY
+ .cpu_exec_interrupt = superh_cpu_exec_interrupt,
.do_interrupt = superh_cpu_do_interrupt,
.do_unaligned_access = superh_cpu_do_unaligned_access,
.io_recompile_replay_branch = superh_io_recompile_replay_branch,
diff --git a/target/sh4/cpu.h b/target/sh4/cpu.h
index 01c4344082..017a770214 100644
--- a/target/sh4/cpu.h
+++ b/target/sh4/cpu.h
@@ -204,8 +204,6 @@ struct SuperHCPU {
};
-void superh_cpu_do_interrupt(CPUState *cpu);
-bool superh_cpu_exec_interrupt(CPUState *cpu, int int_req);
void superh_cpu_dump_state(CPUState *cpu, FILE *f, int flags);
hwaddr superh_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
int superh_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
@@ -223,6 +221,8 @@ bool superh_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
void sh4_cpu_list(void);
#if !defined(CONFIG_USER_ONLY)
+void superh_cpu_do_interrupt(CPUState *cpu);
+bool superh_cpu_exec_interrupt(CPUState *cpu, int int_req);
void cpu_sh4_invalidate_tlb(CPUSH4State *s);
uint32_t cpu_sh4_read_mmaped_itlb_addr(CPUSH4State *s,
hwaddr addr);
diff --git a/target/sh4/helper.c b/target/sh4/helper.c
index 2d622081e8..53cb9c3b63 100644
--- a/target/sh4/helper.c
+++ b/target/sh4/helper.c
@@ -45,11 +45,6 @@
#if defined(CONFIG_USER_ONLY)
-void superh_cpu_do_interrupt(CPUState *cs)
-{
- cs->exception_index = -1;
-}
-
int cpu_sh4_is_cached(CPUSH4State *env, target_ulong addr)
{
/* For user mode, only U0 area is cacheable. */
@@ -784,8 +779,6 @@ int cpu_sh4_is_cached(CPUSH4State * env, target_ulong addr)
return 0;
}
-#endif
-
bool superh_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
{
if (interrupt_request & CPU_INTERRUPT_HARD) {
@@ -803,6 +796,8 @@ bool superh_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
return false;
}
+#endif /* !CONFIG_USER_ONLY */
+
bool superh_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr)
diff --git a/target/sh4/translate.c b/target/sh4/translate.c
index 8704fea1ca..cf5fe9243d 100644
--- a/target/sh4/translate.c
+++ b/target/sh4/translate.c
@@ -1907,7 +1907,7 @@ static void decode_gusa(DisasContext *ctx, CPUSH4State *env)
/* Read all of the insns for the region. */
for (i = 0; i < max_insns; ++i) {
- insns[i] = translator_lduw(env, pc + i * 2);
+ insns[i] = translator_lduw(env, &ctx->base, pc + i * 2);
}
ld_adr = ld_dst = ld_mop = -1;
@@ -2307,7 +2307,7 @@ static void sh4_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
}
#endif
- ctx->opcode = translator_lduw(env, ctx->base.pc_next);
+ ctx->opcode = translator_lduw(env, &ctx->base, ctx->base.pc_next);
decode_opc(ctx);
ctx->base.pc_next += 2;
}
diff --git a/target/sparc/cpu.c b/target/sparc/cpu.c
index da6b30ec74..21dd27796d 100644
--- a/target/sparc/cpu.c
+++ b/target/sparc/cpu.c
@@ -77,6 +77,7 @@ static void sparc_cpu_reset(DeviceState *dev)
env->cache_control = 0;
}
+#ifndef CONFIG_USER_ONLY
static bool sparc_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
{
if (interrupt_request & CPU_INTERRUPT_HARD) {
@@ -96,6 +97,7 @@ static bool sparc_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
}
return false;
}
+#endif /* !CONFIG_USER_ONLY */
static void cpu_sparc_disas_set_info(CPUState *cpu, disassemble_info *info)
{
@@ -610,7 +612,7 @@ static void cpu_print_cc(FILE *f, uint32_t cc)
#define REGS_PER_LINE 8
#endif
-void sparc_cpu_dump_state(CPUState *cs, FILE *f, int flags)
+static void sparc_cpu_dump_state(CPUState *cs, FILE *f, int flags)
{
SPARCCPU *cpu = SPARC_CPU(cs);
CPUSPARCState *env = &cpu->env;
@@ -863,10 +865,10 @@ static const struct SysemuCPUOps sparc_sysemu_ops = {
static const struct TCGCPUOps sparc_tcg_ops = {
.initialize = sparc_tcg_init,
.synchronize_from_tb = sparc_cpu_synchronize_from_tb,
- .cpu_exec_interrupt = sparc_cpu_exec_interrupt,
.tlb_fill = sparc_cpu_tlb_fill,
#ifndef CONFIG_USER_ONLY
+ .cpu_exec_interrupt = sparc_cpu_exec_interrupt,
.do_interrupt = sparc_cpu_do_interrupt,
.do_transaction_failed = sparc_cpu_do_transaction_failed,
.do_unaligned_access = sparc_cpu_do_unaligned_access,
diff --git a/target/sparc/cpu.h b/target/sparc/cpu.h
index ff8ae73002..1f40d768d8 100644
--- a/target/sparc/cpu.h
+++ b/target/sparc/cpu.h
@@ -571,7 +571,6 @@ extern const VMStateDescription vmstate_sparc_cpu;
#endif
void sparc_cpu_do_interrupt(CPUState *cpu);
-void sparc_cpu_dump_state(CPUState *cpu, FILE *f, int flags);
hwaddr sparc_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
int sparc_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
int sparc_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
diff --git a/target/sparc/translate.c b/target/sparc/translate.c
index bb70ba17de..fdb8bbe5dc 100644
--- a/target/sparc/translate.c
+++ b/target/sparc/translate.c
@@ -5855,7 +5855,7 @@ static void sparc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
CPUSPARCState *env = cs->env_ptr;
unsigned int insn;
- insn = translator_ldl(env, dc->pc);
+ insn = translator_ldl(env, &dc->base, dc->pc);
dc->base.pc_next += 4;
disas_sparc_insn(dc, insn);
diff --git a/target/xtensa/cpu.c b/target/xtensa/cpu.c
index 58ec3a0862..c1cbd03595 100644
--- a/target/xtensa/cpu.c
+++ b/target/xtensa/cpu.c
@@ -192,11 +192,11 @@ static const struct SysemuCPUOps xtensa_sysemu_ops = {
static const struct TCGCPUOps xtensa_tcg_ops = {
.initialize = xtensa_translate_init,
- .cpu_exec_interrupt = xtensa_cpu_exec_interrupt,
.tlb_fill = xtensa_cpu_tlb_fill,
.debug_excp_handler = xtensa_breakpoint_handler,
#ifndef CONFIG_USER_ONLY
+ .cpu_exec_interrupt = xtensa_cpu_exec_interrupt,
.do_interrupt = xtensa_cpu_do_interrupt,
.do_transaction_failed = xtensa_cpu_do_transaction_failed,
.do_unaligned_access = xtensa_cpu_do_unaligned_access,
diff --git a/target/xtensa/cpu.h b/target/xtensa/cpu.h
index 2345cb59c7..cbb720e7cc 100644
--- a/target/xtensa/cpu.h
+++ b/target/xtensa/cpu.h
@@ -566,12 +566,14 @@ struct XtensaCPU {
bool xtensa_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr);
+#ifndef CONFIG_USER_ONLY
void xtensa_cpu_do_interrupt(CPUState *cpu);
bool xtensa_cpu_exec_interrupt(CPUState *cpu, int interrupt_request);
void xtensa_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, vaddr addr,
unsigned size, MMUAccessType access_type,
int mmu_idx, MemTxAttrs attrs,
MemTxResult response, uintptr_t retaddr);
+#endif
void xtensa_cpu_dump_state(CPUState *cpu, FILE *f, int flags);
hwaddr xtensa_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
void xtensa_count_regs(const XtensaConfig *config,
diff --git a/target/xtensa/exc_helper.c b/target/xtensa/exc_helper.c
index 10e75ab070..9bc7f50d35 100644
--- a/target/xtensa/exc_helper.c
+++ b/target/xtensa/exc_helper.c
@@ -255,11 +255,6 @@ void xtensa_cpu_do_interrupt(CPUState *cs)
}
check_interrupts(env);
}
-#else
-void xtensa_cpu_do_interrupt(CPUState *cs)
-{
-}
-#endif
bool xtensa_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
{
@@ -270,3 +265,5 @@ bool xtensa_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
}
return false;
}
+
+#endif /* !CONFIG_USER_ONLY */
diff --git a/target/xtensa/translate.c b/target/xtensa/translate.c
index 20399d6a04..dcf6b500ef 100644
--- a/target/xtensa/translate.c
+++ b/target/xtensa/translate.c
@@ -882,7 +882,8 @@ static int arg_copy_compare(const void *a, const void *b)
static void disas_xtensa_insn(CPUXtensaState *env, DisasContext *dc)
{
xtensa_isa isa = dc->config->isa;
- unsigned char b[MAX_INSN_LENGTH] = {translator_ldub(env, dc->pc)};
+ unsigned char b[MAX_INSN_LENGTH] = {translator_ldub(env, &dc->base,
+ dc->pc)};
unsigned len = xtensa_op0_insn_len(dc, b[0]);
xtensa_format fmt;
int slot, slots;
@@ -907,7 +908,7 @@ static void disas_xtensa_insn(CPUXtensaState *env, DisasContext *dc)
dc->base.pc_next = dc->pc + len;
for (i = 1; i < len; ++i) {
- b[i] = translator_ldub(env, dc->pc + i);
+ b[i] = translator_ldub(env, &dc->base, dc->pc + i);
}
xtensa_insnbuf_from_chars(isa, dc->insnbuf, b, len);
fmt = xtensa_format_decode(isa, dc->insnbuf);
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
index 007ceee68e..d25e68b36b 100644
--- a/tcg/arm/tcg-target.c.inc
+++ b/tcg/arm/tcg-target.c.inc
@@ -92,7 +92,7 @@ static const int tcg_target_call_oarg_regs[2] = {
#define TCG_REG_TMP TCG_REG_R12
#define TCG_VEC_TMP TCG_REG_Q15
-enum arm_cond_code_e {
+typedef enum {
COND_EQ = 0x0,
COND_NE = 0x1,
COND_CS = 0x2, /* Unsigned greater or equal */
@@ -108,7 +108,7 @@ enum arm_cond_code_e {
COND_GT = 0xc,
COND_LE = 0xd,
COND_AL = 0xe,
-};
+} ARMCond;
#define TO_CPSR (1 << 20)
@@ -141,6 +141,9 @@ typedef enum {
INSN_CLZ = 0x016f0f10,
INSN_RBIT = 0x06ff0f30,
+ INSN_LDMIA = 0x08b00000,
+ INSN_STMDB = 0x09200000,
+
INSN_LDR_IMM = 0x04100000,
INSN_LDR_REG = 0x06100000,
INSN_STR_IMM = 0x04000000,
@@ -309,10 +312,10 @@ static bool reloc_pc8(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
{
const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
ptrdiff_t offset = tcg_ptr_byte_diff(target, src_rx) - 8;
- int rot = encode_imm(offset);
+ int imm12 = encode_imm(offset);
- if (rot >= 0) {
- *src_rw = deposit32(*src_rw, 0, 12, rol32(offset, rot) | (rot << 7));
+ if (imm12 >= 0) {
+ *src_rw = deposit32(*src_rw, 0, 12, imm12);
return true;
}
return false;
@@ -366,36 +369,55 @@ static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
(ALL_GENERAL_REGS & ~((1 << TCG_REG_R0) | (1 << TCG_REG_R1)))
#endif
-static inline uint32_t rotl(uint32_t val, int n)
-{
- return (val << n) | (val >> (32 - n));
-}
-
-/* ARM immediates for ALU instructions are made of an unsigned 8-bit
- right-rotated by an even amount between 0 and 30. */
+/*
+ * ARM immediates for ALU instructions are made of an unsigned 8-bit
+ * right-rotated by an even amount between 0 and 30.
+ *
+ * Return < 0 if @imm cannot be encoded, else the entire imm12 field.
+ */
static int encode_imm(uint32_t imm)
{
- int shift;
+ uint32_t rot, imm8;
- /* simple case, only lower bits */
- if ((imm & ~0xff) == 0)
- return 0;
- /* then try a simple even shift */
- shift = ctz32(imm) & ~1;
- if (((imm >> shift) & ~0xff) == 0)
- return 32 - shift;
- /* now try harder with rotations */
- if ((rotl(imm, 2) & ~0xff) == 0)
- return 2;
- if ((rotl(imm, 4) & ~0xff) == 0)
- return 4;
- if ((rotl(imm, 6) & ~0xff) == 0)
- return 6;
- /* imm can't be encoded */
+ /* Simple case, no rotation required. */
+ if ((imm & ~0xff) == 0) {
+ return imm;
+ }
+
+ /* Next, try a simple even shift. */
+ rot = ctz32(imm) & ~1;
+ imm8 = imm >> rot;
+ rot = 32 - rot;
+ if ((imm8 & ~0xff) == 0) {
+ goto found;
+ }
+
+ /*
+ * Finally, try harder with rotations.
+ * The ctz test above will have taken care of rotates >= 8.
+ */
+ for (rot = 2; rot < 8; rot += 2) {
+ imm8 = rol32(imm, rot);
+ if ((imm8 & ~0xff) == 0) {
+ goto found;
+ }
+ }
+ /* Fail: imm cannot be encoded. */
return -1;
+
+ found:
+ /* Note that rot is even, and we discard bit 0 by shifting by 7. */
+ return rot << 7 | imm8;
+}
+
+static int encode_imm_nofail(uint32_t imm)
+{
+ int ret = encode_imm(imm);
+ tcg_debug_assert(ret >= 0);
+ return ret;
}
-static inline int check_fit_imm(uint32_t imm)
+static bool check_fit_imm(uint32_t imm)
{
return encode_imm(imm) >= 0;
}
@@ -525,42 +547,37 @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
return 0;
}
-static inline void tcg_out_b(TCGContext *s, int cond, int32_t offset)
+static void tcg_out_b_imm(TCGContext *s, ARMCond cond, int32_t offset)
{
tcg_out32(s, (cond << 28) | 0x0a000000 |
(((offset - 8) >> 2) & 0x00ffffff));
}
-static inline void tcg_out_bl(TCGContext *s, int cond, int32_t offset)
+static void tcg_out_bl_imm(TCGContext *s, ARMCond cond, int32_t offset)
{
tcg_out32(s, (cond << 28) | 0x0b000000 |
(((offset - 8) >> 2) & 0x00ffffff));
}
-static inline void tcg_out_blx(TCGContext *s, int cond, int rn)
+static void tcg_out_blx_reg(TCGContext *s, ARMCond cond, TCGReg rn)
{
tcg_out32(s, (cond << 28) | 0x012fff30 | rn);
}
-static inline void tcg_out_blx_imm(TCGContext *s, int32_t offset)
+static void tcg_out_blx_imm(TCGContext *s, int32_t offset)
{
tcg_out32(s, 0xfa000000 | ((offset & 2) << 23) |
(((offset - 8) >> 2) & 0x00ffffff));
}
-static inline void tcg_out_dat_reg(TCGContext *s,
- int cond, int opc, int rd, int rn, int rm, int shift)
+static void tcg_out_dat_reg(TCGContext *s, ARMCond cond, ARMInsn opc,
+ TCGReg rd, TCGReg rn, TCGReg rm, int shift)
{
tcg_out32(s, (cond << 28) | (0 << 25) | opc |
(rn << 16) | (rd << 12) | shift | rm);
}
-static inline void tcg_out_nop(TCGContext *s)
-{
- tcg_out32(s, INSN_NOP);
-}
-
-static inline void tcg_out_mov_reg(TCGContext *s, int cond, int rd, int rm)
+static void tcg_out_mov_reg(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rm)
{
/* Simple reg-reg move, optimising out the 'do nothing' case */
if (rd != rm) {
@@ -568,35 +585,47 @@ static inline void tcg_out_mov_reg(TCGContext *s, int cond, int rd, int rm)
}
}
-static inline void tcg_out_bx(TCGContext *s, int cond, TCGReg rn)
+static void tcg_out_bx_reg(TCGContext *s, ARMCond cond, TCGReg rn)
+{
+ tcg_out32(s, (cond << 28) | 0x012fff10 | rn);
+}
+
+static void tcg_out_b_reg(TCGContext *s, ARMCond cond, TCGReg rn)
{
- /* Unless the C portion of QEMU is compiled as thumb, we don't
- actually need true BX semantics; merely a branch to an address
- held in a register. */
+ /*
+ * Unless the C portion of QEMU is compiled as thumb, we don't need
+ * true BX semantics; merely a branch to an address held in a register.
+ */
if (use_armv5t_instructions) {
- tcg_out32(s, (cond << 28) | 0x012fff10 | rn);
+ tcg_out_bx_reg(s, cond, rn);
} else {
tcg_out_mov_reg(s, cond, TCG_REG_PC, rn);
}
}
-static inline void tcg_out_dat_imm(TCGContext *s,
- int cond, int opc, int rd, int rn, int im)
+static void tcg_out_dat_imm(TCGContext *s, ARMCond cond, ARMInsn opc,
+ TCGReg rd, TCGReg rn, int im)
{
tcg_out32(s, (cond << 28) | (1 << 25) | opc |
(rn << 16) | (rd << 12) | im);
}
+static void tcg_out_ldstm(TCGContext *s, ARMCond cond, ARMInsn opc,
+ TCGReg rn, uint16_t mask)
+{
+ tcg_out32(s, (cond << 28) | opc | (rn << 16) | mask);
+}
+
/* Note that this routine is used for both LDR and LDRH formats, so we do
not wish to include an immediate shift at this point. */
-static void tcg_out_memop_r(TCGContext *s, int cond, ARMInsn opc, TCGReg rt,
+static void tcg_out_memop_r(TCGContext *s, ARMCond cond, ARMInsn opc, TCGReg rt,
TCGReg rn, TCGReg rm, bool u, bool p, bool w)
{
tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24)
| (w << 21) | (rn << 16) | (rt << 12) | rm);
}
-static void tcg_out_memop_8(TCGContext *s, int cond, ARMInsn opc, TCGReg rt,
+static void tcg_out_memop_8(TCGContext *s, ARMCond cond, ARMInsn opc, TCGReg rt,
TCGReg rn, int imm8, bool p, bool w)
{
bool u = 1;
@@ -608,8 +637,8 @@ static void tcg_out_memop_8(TCGContext *s, int cond, ARMInsn opc, TCGReg rt,
(rn << 16) | (rt << 12) | ((imm8 & 0xf0) << 4) | (imm8 & 0xf));
}
-static void tcg_out_memop_12(TCGContext *s, int cond, ARMInsn opc, TCGReg rt,
- TCGReg rn, int imm12, bool p, bool w)
+static void tcg_out_memop_12(TCGContext *s, ARMCond cond, ARMInsn opc,
+ TCGReg rt, TCGReg rn, int imm12, bool p, bool w)
{
bool u = 1;
if (imm12 < 0) {
@@ -620,167 +649,167 @@ static void tcg_out_memop_12(TCGContext *s, int cond, ARMInsn opc, TCGReg rt,
(rn << 16) | (rt << 12) | imm12);
}
-static inline void tcg_out_ld32_12(TCGContext *s, int cond, TCGReg rt,
- TCGReg rn, int imm12)
+static void tcg_out_ld32_12(TCGContext *s, ARMCond cond, TCGReg rt,
+ TCGReg rn, int imm12)
{
tcg_out_memop_12(s, cond, INSN_LDR_IMM, rt, rn, imm12, 1, 0);
}
-static inline void tcg_out_st32_12(TCGContext *s, int cond, TCGReg rt,
- TCGReg rn, int imm12)
+static void tcg_out_st32_12(TCGContext *s, ARMCond cond, TCGReg rt,
+ TCGReg rn, int imm12)
{
tcg_out_memop_12(s, cond, INSN_STR_IMM, rt, rn, imm12, 1, 0);
}
-static inline void tcg_out_ld32_r(TCGContext *s, int cond, TCGReg rt,
- TCGReg rn, TCGReg rm)
+static void tcg_out_ld32_r(TCGContext *s, ARMCond cond, TCGReg rt,
+ TCGReg rn, TCGReg rm)
{
tcg_out_memop_r(s, cond, INSN_LDR_REG, rt, rn, rm, 1, 1, 0);
}
-static inline void tcg_out_st32_r(TCGContext *s, int cond, TCGReg rt,
- TCGReg rn, TCGReg rm)
+static void tcg_out_st32_r(TCGContext *s, ARMCond cond, TCGReg rt,
+ TCGReg rn, TCGReg rm)
{
tcg_out_memop_r(s, cond, INSN_STR_REG, rt, rn, rm, 1, 1, 0);
}
-static inline void tcg_out_ldrd_8(TCGContext *s, int cond, TCGReg rt,
- TCGReg rn, int imm8)
+static void tcg_out_ldrd_8(TCGContext *s, ARMCond cond, TCGReg rt,
+ TCGReg rn, int imm8)
{
tcg_out_memop_8(s, cond, INSN_LDRD_IMM, rt, rn, imm8, 1, 0);
}
-static inline void tcg_out_ldrd_r(TCGContext *s, int cond, TCGReg rt,
- TCGReg rn, TCGReg rm)
+static void tcg_out_ldrd_r(TCGContext *s, ARMCond cond, TCGReg rt,
+ TCGReg rn, TCGReg rm)
{
tcg_out_memop_r(s, cond, INSN_LDRD_REG, rt, rn, rm, 1, 1, 0);
}
-static inline void tcg_out_ldrd_rwb(TCGContext *s, int cond, TCGReg rt,
- TCGReg rn, TCGReg rm)
+static void __attribute__((unused))
+tcg_out_ldrd_rwb(TCGContext *s, ARMCond cond, TCGReg rt, TCGReg rn, TCGReg rm)
{
tcg_out_memop_r(s, cond, INSN_LDRD_REG, rt, rn, rm, 1, 1, 1);
}
-static inline void tcg_out_strd_8(TCGContext *s, int cond, TCGReg rt,
- TCGReg rn, int imm8)
+static void tcg_out_strd_8(TCGContext *s, ARMCond cond, TCGReg rt,
+ TCGReg rn, int imm8)
{
tcg_out_memop_8(s, cond, INSN_STRD_IMM, rt, rn, imm8, 1, 0);
}
-static inline void tcg_out_strd_r(TCGContext *s, int cond, TCGReg rt,
- TCGReg rn, TCGReg rm)
+static void tcg_out_strd_r(TCGContext *s, ARMCond cond, TCGReg rt,
+ TCGReg rn, TCGReg rm)
{
tcg_out_memop_r(s, cond, INSN_STRD_REG, rt, rn, rm, 1, 1, 0);
}
/* Register pre-increment with base writeback. */
-static inline void tcg_out_ld32_rwb(TCGContext *s, int cond, TCGReg rt,
- TCGReg rn, TCGReg rm)
+static void tcg_out_ld32_rwb(TCGContext *s, ARMCond cond, TCGReg rt,
+ TCGReg rn, TCGReg rm)
{
tcg_out_memop_r(s, cond, INSN_LDR_REG, rt, rn, rm, 1, 1, 1);
}
-static inline void tcg_out_st32_rwb(TCGContext *s, int cond, TCGReg rt,
- TCGReg rn, TCGReg rm)
+static void tcg_out_st32_rwb(TCGContext *s, ARMCond cond, TCGReg rt,
+ TCGReg rn, TCGReg rm)
{
tcg_out_memop_r(s, cond, INSN_STR_REG, rt, rn, rm, 1, 1, 1);
}
-static inline void tcg_out_ld16u_8(TCGContext *s, int cond, TCGReg rt,
- TCGReg rn, int imm8)
+static void tcg_out_ld16u_8(TCGContext *s, ARMCond cond, TCGReg rt,
+ TCGReg rn, int imm8)
{
tcg_out_memop_8(s, cond, INSN_LDRH_IMM, rt, rn, imm8, 1, 0);
}
-static inline void tcg_out_st16_8(TCGContext *s, int cond, TCGReg rt,
- TCGReg rn, int imm8)
+static void tcg_out_st16_8(TCGContext *s, ARMCond cond, TCGReg rt,
+ TCGReg rn, int imm8)
{
tcg_out_memop_8(s, cond, INSN_STRH_IMM, rt, rn, imm8, 1, 0);
}
-static inline void tcg_out_ld16u_r(TCGContext *s, int cond, TCGReg rt,
- TCGReg rn, TCGReg rm)
+static void tcg_out_ld16u_r(TCGContext *s, ARMCond cond, TCGReg rt,
+ TCGReg rn, TCGReg rm)
{
tcg_out_memop_r(s, cond, INSN_LDRH_REG, rt, rn, rm, 1, 1, 0);
}
-static inline void tcg_out_st16_r(TCGContext *s, int cond, TCGReg rt,
- TCGReg rn, TCGReg rm)
+static void tcg_out_st16_r(TCGContext *s, ARMCond cond, TCGReg rt,
+ TCGReg rn, TCGReg rm)
{
tcg_out_memop_r(s, cond, INSN_STRH_REG, rt, rn, rm, 1, 1, 0);
}
-static inline void tcg_out_ld16s_8(TCGContext *s, int cond, TCGReg rt,
- TCGReg rn, int imm8)
+static void tcg_out_ld16s_8(TCGContext *s, ARMCond cond, TCGReg rt,
+ TCGReg rn, int imm8)
{
tcg_out_memop_8(s, cond, INSN_LDRSH_IMM, rt, rn, imm8, 1, 0);
}
-static inline void tcg_out_ld16s_r(TCGContext *s, int cond, TCGReg rt,
- TCGReg rn, TCGReg rm)
+static void tcg_out_ld16s_r(TCGContext *s, ARMCond cond, TCGReg rt,
+ TCGReg rn, TCGReg rm)
{
tcg_out_memop_r(s, cond, INSN_LDRSH_REG, rt, rn, rm, 1, 1, 0);
}
-static inline void tcg_out_ld8_12(TCGContext *s, int cond, TCGReg rt,
- TCGReg rn, int imm12)
+static void tcg_out_ld8_12(TCGContext *s, ARMCond cond, TCGReg rt,
+ TCGReg rn, int imm12)
{
tcg_out_memop_12(s, cond, INSN_LDRB_IMM, rt, rn, imm12, 1, 0);
}
-static inline void tcg_out_st8_12(TCGContext *s, int cond, TCGReg rt,
- TCGReg rn, int imm12)
+static void tcg_out_st8_12(TCGContext *s, ARMCond cond, TCGReg rt,
+ TCGReg rn, int imm12)
{
tcg_out_memop_12(s, cond, INSN_STRB_IMM, rt, rn, imm12, 1, 0);
}
-static inline void tcg_out_ld8_r(TCGContext *s, int cond, TCGReg rt,
- TCGReg rn, TCGReg rm)
+static void tcg_out_ld8_r(TCGContext *s, ARMCond cond, TCGReg rt,
+ TCGReg rn, TCGReg rm)
{
tcg_out_memop_r(s, cond, INSN_LDRB_REG, rt, rn, rm, 1, 1, 0);
}
-static inline void tcg_out_st8_r(TCGContext *s, int cond, TCGReg rt,
- TCGReg rn, TCGReg rm)
+static void tcg_out_st8_r(TCGContext *s, ARMCond cond, TCGReg rt,
+ TCGReg rn, TCGReg rm)
{
tcg_out_memop_r(s, cond, INSN_STRB_REG, rt, rn, rm, 1, 1, 0);
}
-static inline void tcg_out_ld8s_8(TCGContext *s, int cond, TCGReg rt,
- TCGReg rn, int imm8)
+static void tcg_out_ld8s_8(TCGContext *s, ARMCond cond, TCGReg rt,
+ TCGReg rn, int imm8)
{
tcg_out_memop_8(s, cond, INSN_LDRSB_IMM, rt, rn, imm8, 1, 0);
}
-static inline void tcg_out_ld8s_r(TCGContext *s, int cond, TCGReg rt,
- TCGReg rn, TCGReg rm)
+static void tcg_out_ld8s_r(TCGContext *s, ARMCond cond, TCGReg rt,
+ TCGReg rn, TCGReg rm)
{
tcg_out_memop_r(s, cond, INSN_LDRSB_REG, rt, rn, rm, 1, 1, 0);
}
-static void tcg_out_movi_pool(TCGContext *s, int cond, int rd, uint32_t arg)
+static void tcg_out_movi_pool(TCGContext *s, ARMCond cond,
+ TCGReg rd, uint32_t arg)
{
new_pool_label(s, arg, R_ARM_PC13, s->code_ptr, 0);
tcg_out_ld32_12(s, cond, rd, TCG_REG_PC, 0);
}
-static void tcg_out_movi32(TCGContext *s, int cond, int rd, uint32_t arg)
+static void tcg_out_movi32(TCGContext *s, ARMCond cond,
+ TCGReg rd, uint32_t arg)
{
- int rot, diff, opc, sh1, sh2;
+ int imm12, diff, opc, sh1, sh2;
uint32_t tt0, tt1, tt2;
/* Check a single MOV/MVN before anything else. */
- rot = encode_imm(arg);
- if (rot >= 0) {
- tcg_out_dat_imm(s, cond, ARITH_MOV, rd, 0,
- rotl(arg, rot) | (rot << 7));
+ imm12 = encode_imm(arg);
+ if (imm12 >= 0) {
+ tcg_out_dat_imm(s, cond, ARITH_MOV, rd, 0, imm12);
return;
}
- rot = encode_imm(~arg);
- if (rot >= 0) {
- tcg_out_dat_imm(s, cond, ARITH_MVN, rd, 0,
- rotl(~arg, rot) | (rot << 7));
+ imm12 = encode_imm(~arg);
+ if (imm12 >= 0) {
+ tcg_out_dat_imm(s, cond, ARITH_MVN, rd, 0, imm12);
return;
}
@@ -788,17 +817,15 @@ static void tcg_out_movi32(TCGContext *s, int cond, int rd, uint32_t arg)
or within the TB, which is immediately before the code block. */
diff = tcg_pcrel_diff(s, (void *)arg) - 8;
if (diff >= 0) {
- rot = encode_imm(diff);
- if (rot >= 0) {
- tcg_out_dat_imm(s, cond, ARITH_ADD, rd, TCG_REG_PC,
- rotl(diff, rot) | (rot << 7));
+ imm12 = encode_imm(diff);
+ if (imm12 >= 0) {
+ tcg_out_dat_imm(s, cond, ARITH_ADD, rd, TCG_REG_PC, imm12);
return;
}
} else {
- rot = encode_imm(-diff);
- if (rot >= 0) {
- tcg_out_dat_imm(s, cond, ARITH_SUB, rd, TCG_REG_PC,
- rotl(-diff, rot) | (rot << 7));
+ imm12 = encode_imm(-diff);
+ if (imm12 >= 0) {
+ tcg_out_dat_imm(s, cond, ARITH_SUB, rd, TCG_REG_PC, imm12);
return;
}
}
@@ -830,6 +857,8 @@ static void tcg_out_movi32(TCGContext *s, int cond, int rd, uint32_t arg)
sh2 = ctz32(tt1) & ~1;
tt2 = tt1 & ~(0xff << sh2);
if (tt2 == 0) {
+ int rot;
+
rot = ((32 - sh1) << 7) & 0xf00;
tcg_out_dat_imm(s, cond, opc, rd, 0, ((tt0 >> sh1) & 0xff) | rot);
rot = ((32 - sh2) << 7) & 0xf00;
@@ -842,65 +871,61 @@ static void tcg_out_movi32(TCGContext *s, int cond, int rd, uint32_t arg)
tcg_out_movi_pool(s, cond, rd, arg);
}
-static inline void tcg_out_dat_rI(TCGContext *s, int cond, int opc, TCGArg dst,
- TCGArg lhs, TCGArg rhs, int rhs_is_const)
+/*
+ * Emit either the reg,imm or reg,reg form of a data-processing insn.
+ * rhs must satisfy the "rI" constraint.
+ */
+static void tcg_out_dat_rI(TCGContext *s, ARMCond cond, ARMInsn opc,
+ TCGReg dst, TCGReg lhs, TCGArg rhs, int rhs_is_const)
{
- /* Emit either the reg,imm or reg,reg form of a data-processing insn.
- * rhs must satisfy the "rI" constraint.
- */
if (rhs_is_const) {
- int rot = encode_imm(rhs);
- tcg_debug_assert(rot >= 0);
- tcg_out_dat_imm(s, cond, opc, dst, lhs, rotl(rhs, rot) | (rot << 7));
+ tcg_out_dat_imm(s, cond, opc, dst, lhs, encode_imm_nofail(rhs));
} else {
tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0));
}
}
-static void tcg_out_dat_rIK(TCGContext *s, int cond, int opc, int opinv,
- TCGReg dst, TCGReg lhs, TCGArg rhs,
+/*
+ * Emit either the reg,imm or reg,reg form of a data-processing insn.
+ * rhs must satisfy the "rIK" constraint.
+ */
+static void tcg_out_dat_rIK(TCGContext *s, ARMCond cond, ARMInsn opc,
+ ARMInsn opinv, TCGReg dst, TCGReg lhs, TCGArg rhs,
bool rhs_is_const)
{
- /* Emit either the reg,imm or reg,reg form of a data-processing insn.
- * rhs must satisfy the "rIK" constraint.
- */
if (rhs_is_const) {
- int rot = encode_imm(rhs);
- if (rot < 0) {
- rhs = ~rhs;
- rot = encode_imm(rhs);
- tcg_debug_assert(rot >= 0);
+ int imm12 = encode_imm(rhs);
+ if (imm12 < 0) {
+ imm12 = encode_imm_nofail(~rhs);
opc = opinv;
}
- tcg_out_dat_imm(s, cond, opc, dst, lhs, rotl(rhs, rot) | (rot << 7));
+ tcg_out_dat_imm(s, cond, opc, dst, lhs, imm12);
} else {
tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0));
}
}
-static void tcg_out_dat_rIN(TCGContext *s, int cond, int opc, int opneg,
- TCGArg dst, TCGArg lhs, TCGArg rhs,
+static void tcg_out_dat_rIN(TCGContext *s, ARMCond cond, ARMInsn opc,
+ ARMInsn opneg, TCGReg dst, TCGReg lhs, TCGArg rhs,
bool rhs_is_const)
{
/* Emit either the reg,imm or reg,reg form of a data-processing insn.
* rhs must satisfy the "rIN" constraint.
*/
if (rhs_is_const) {
- int rot = encode_imm(rhs);
- if (rot < 0) {
- rhs = -rhs;
- rot = encode_imm(rhs);
- tcg_debug_assert(rot >= 0);
+ int imm12 = encode_imm(rhs);
+ if (imm12 < 0) {
+ imm12 = encode_imm_nofail(-rhs);
opc = opneg;
}
- tcg_out_dat_imm(s, cond, opc, dst, lhs, rotl(rhs, rot) | (rot << 7));
+ tcg_out_dat_imm(s, cond, opc, dst, lhs, imm12);
} else {
tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0));
}
}
-static inline void tcg_out_mul32(TCGContext *s, int cond, TCGReg rd,
- TCGReg rn, TCGReg rm)
+static void tcg_out_mul32(TCGContext *s, ARMCond cond, TCGReg rd,
+ TCGReg rn, TCGReg rm)
{
/* if ArchVersion() < 6 && d == n then UNPREDICTABLE; */
if (!use_armv6_instructions && rd == rn) {
@@ -917,8 +942,8 @@ static inline void tcg_out_mul32(TCGContext *s, int cond, TCGReg rd,
tcg_out32(s, (cond << 28) | 0x90 | (rd << 16) | (rm << 8) | rn);
}
-static inline void tcg_out_umull32(TCGContext *s, int cond, TCGReg rd0,
- TCGReg rd1, TCGReg rn, TCGReg rm)
+static void tcg_out_umull32(TCGContext *s, ARMCond cond, TCGReg rd0,
+ TCGReg rd1, TCGReg rn, TCGReg rm)
{
/* if ArchVersion() < 6 && (dHi == n || dLo == n) then UNPREDICTABLE; */
if (!use_armv6_instructions && (rd0 == rn || rd1 == rn)) {
@@ -936,8 +961,8 @@ static inline void tcg_out_umull32(TCGContext *s, int cond, TCGReg rd0,
(rd1 << 16) | (rd0 << 12) | (rm << 8) | rn);
}
-static inline void tcg_out_smull32(TCGContext *s, int cond, TCGReg rd0,
- TCGReg rd1, TCGReg rn, TCGReg rm)
+static void tcg_out_smull32(TCGContext *s, ARMCond cond, TCGReg rd0,
+ TCGReg rd1, TCGReg rn, TCGReg rm)
{
/* if ArchVersion() < 6 && (dHi == n || dLo == n) then UNPREDICTABLE; */
if (!use_armv6_instructions && (rd0 == rn || rd1 == rn)) {
@@ -955,18 +980,19 @@ static inline void tcg_out_smull32(TCGContext *s, int cond, TCGReg rd0,
(rd1 << 16) | (rd0 << 12) | (rm << 8) | rn);
}
-static inline void tcg_out_sdiv(TCGContext *s, int cond, int rd, int rn, int rm)
+static void tcg_out_sdiv(TCGContext *s, ARMCond cond,
+ TCGReg rd, TCGReg rn, TCGReg rm)
{
tcg_out32(s, 0x0710f010 | (cond << 28) | (rd << 16) | rn | (rm << 8));
}
-static inline void tcg_out_udiv(TCGContext *s, int cond, int rd, int rn, int rm)
+static void tcg_out_udiv(TCGContext *s, ARMCond cond,
+ TCGReg rd, TCGReg rn, TCGReg rm)
{
tcg_out32(s, 0x0730f010 | (cond << 28) | (rd << 16) | rn | (rm << 8));
}
-static inline void tcg_out_ext8s(TCGContext *s, int cond,
- int rd, int rn)
+static void tcg_out_ext8s(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rn)
{
if (use_armv6_instructions) {
/* sxtb */
@@ -979,14 +1005,13 @@ static inline void tcg_out_ext8s(TCGContext *s, int cond,
}
}
-static inline void tcg_out_ext8u(TCGContext *s, int cond,
- int rd, int rn)
+static void __attribute__((unused))
+tcg_out_ext8u(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rn)
{
tcg_out_dat_imm(s, cond, ARITH_AND, rd, rn, 0xff);
}
-static inline void tcg_out_ext16s(TCGContext *s, int cond,
- int rd, int rn)
+static void tcg_out_ext16s(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rn)
{
if (use_armv6_instructions) {
/* sxth */
@@ -999,8 +1024,7 @@ static inline void tcg_out_ext16s(TCGContext *s, int cond,
}
}
-static inline void tcg_out_ext16u(TCGContext *s, int cond,
- int rd, int rn)
+static void tcg_out_ext16u(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rn)
{
if (use_armv6_instructions) {
/* uxth */
@@ -1013,7 +1037,8 @@ static inline void tcg_out_ext16u(TCGContext *s, int cond,
}
}
-static void tcg_out_bswap16(TCGContext *s, int cond, int rd, int rn, int flags)
+static void tcg_out_bswap16(TCGContext *s, ARMCond cond,
+ TCGReg rd, TCGReg rn, int flags)
{
if (use_armv6_instructions) {
if (flags & TCG_BSWAP_OS) {
@@ -1080,7 +1105,7 @@ static void tcg_out_bswap16(TCGContext *s, int cond, int rd, int rn, int flags)
? SHIFT_IMM_ASR(8) : SHIFT_IMM_LSR(8)));
}
-static inline void tcg_out_bswap32(TCGContext *s, int cond, int rd, int rn)
+static void tcg_out_bswap32(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rn)
{
if (use_armv6_instructions) {
/* rev */
@@ -1097,8 +1122,8 @@ static inline void tcg_out_bswap32(TCGContext *s, int cond, int rd, int rn)
}
}
-static inline void tcg_out_deposit(TCGContext *s, int cond, TCGReg rd,
- TCGArg a1, int ofs, int len, bool const_a1)
+static void tcg_out_deposit(TCGContext *s, ARMCond cond, TCGReg rd,
+ TCGArg a1, int ofs, int len, bool const_a1)
{
if (const_a1) {
/* bfi becomes bfc with rn == 15. */
@@ -1109,24 +1134,24 @@ static inline void tcg_out_deposit(TCGContext *s, int cond, TCGReg rd,
| (ofs << 7) | ((ofs + len - 1) << 16));
}
-static inline void tcg_out_extract(TCGContext *s, int cond, TCGReg rd,
- TCGArg a1, int ofs, int len)
+static void tcg_out_extract(TCGContext *s, ARMCond cond, TCGReg rd,
+ TCGReg rn, int ofs, int len)
{
/* ubfx */
- tcg_out32(s, 0x07e00050 | (cond << 28) | (rd << 12) | a1
+ tcg_out32(s, 0x07e00050 | (cond << 28) | (rd << 12) | rn
| (ofs << 7) | ((len - 1) << 16));
}
-static inline void tcg_out_sextract(TCGContext *s, int cond, TCGReg rd,
- TCGArg a1, int ofs, int len)
+static void tcg_out_sextract(TCGContext *s, ARMCond cond, TCGReg rd,
+ TCGReg rn, int ofs, int len)
{
/* sbfx */
- tcg_out32(s, 0x07a00050 | (cond << 28) | (rd << 12) | a1
+ tcg_out32(s, 0x07a00050 | (cond << 28) | (rd << 12) | rn
| (ofs << 7) | ((len - 1) << 16));
}
-static inline void tcg_out_ld32u(TCGContext *s, int cond,
- int rd, int rn, int32_t offset)
+static void tcg_out_ld32u(TCGContext *s, ARMCond cond,
+ TCGReg rd, TCGReg rn, int32_t offset)
{
if (offset > 0xfff || offset < -0xfff) {
tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
@@ -1135,8 +1160,8 @@ static inline void tcg_out_ld32u(TCGContext *s, int cond,
tcg_out_ld32_12(s, cond, rd, rn, offset);
}
-static inline void tcg_out_st32(TCGContext *s, int cond,
- int rd, int rn, int32_t offset)
+static void tcg_out_st32(TCGContext *s, ARMCond cond,
+ TCGReg rd, TCGReg rn, int32_t offset)
{
if (offset > 0xfff || offset < -0xfff) {
tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
@@ -1145,8 +1170,8 @@ static inline void tcg_out_st32(TCGContext *s, int cond,
tcg_out_st32_12(s, cond, rd, rn, offset);
}
-static inline void tcg_out_ld16u(TCGContext *s, int cond,
- int rd, int rn, int32_t offset)
+static void tcg_out_ld16u(TCGContext *s, ARMCond cond,
+ TCGReg rd, TCGReg rn, int32_t offset)
{
if (offset > 0xff || offset < -0xff) {
tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
@@ -1155,8 +1180,8 @@ static inline void tcg_out_ld16u(TCGContext *s, int cond,
tcg_out_ld16u_8(s, cond, rd, rn, offset);
}
-static inline void tcg_out_ld16s(TCGContext *s, int cond,
- int rd, int rn, int32_t offset)
+static void tcg_out_ld16s(TCGContext *s, ARMCond cond,
+ TCGReg rd, TCGReg rn, int32_t offset)
{
if (offset > 0xff || offset < -0xff) {
tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
@@ -1165,8 +1190,8 @@ static inline void tcg_out_ld16s(TCGContext *s, int cond,
tcg_out_ld16s_8(s, cond, rd, rn, offset);
}
-static inline void tcg_out_st16(TCGContext *s, int cond,
- int rd, int rn, int32_t offset)
+static void tcg_out_st16(TCGContext *s, ARMCond cond,
+ TCGReg rd, TCGReg rn, int32_t offset)
{
if (offset > 0xff || offset < -0xff) {
tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
@@ -1175,8 +1200,8 @@ static inline void tcg_out_st16(TCGContext *s, int cond,
tcg_out_st16_8(s, cond, rd, rn, offset);
}
-static inline void tcg_out_ld8u(TCGContext *s, int cond,
- int rd, int rn, int32_t offset)
+static void tcg_out_ld8u(TCGContext *s, ARMCond cond,
+ TCGReg rd, TCGReg rn, int32_t offset)
{
if (offset > 0xfff || offset < -0xfff) {
tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
@@ -1185,8 +1210,8 @@ static inline void tcg_out_ld8u(TCGContext *s, int cond,
tcg_out_ld8_12(s, cond, rd, rn, offset);
}
-static inline void tcg_out_ld8s(TCGContext *s, int cond,
- int rd, int rn, int32_t offset)
+static void tcg_out_ld8s(TCGContext *s, ARMCond cond,
+ TCGReg rd, TCGReg rn, int32_t offset)
{
if (offset > 0xff || offset < -0xff) {
tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
@@ -1195,8 +1220,8 @@ static inline void tcg_out_ld8s(TCGContext *s, int cond,
tcg_out_ld8s_8(s, cond, rd, rn, offset);
}
-static inline void tcg_out_st8(TCGContext *s, int cond,
- int rd, int rn, int32_t offset)
+static void tcg_out_st8(TCGContext *s, ARMCond cond,
+ TCGReg rd, TCGReg rn, int32_t offset)
{
if (offset > 0xfff || offset < -0xfff) {
tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
@@ -1205,60 +1230,79 @@ static inline void tcg_out_st8(TCGContext *s, int cond,
tcg_out_st8_12(s, cond, rd, rn, offset);
}
-/* The _goto case is normally between TBs within the same code buffer, and
+/*
+ * The _goto case is normally between TBs within the same code buffer, and
* with the code buffer limited to 16MB we wouldn't need the long case.
* But we also use it for the tail-call to the qemu_ld/st helpers, which does.
*/
-static void tcg_out_goto(TCGContext *s, int cond, const tcg_insn_unit *addr)
+static void tcg_out_goto(TCGContext *s, ARMCond cond, const tcg_insn_unit *addr)
{
intptr_t addri = (intptr_t)addr;
ptrdiff_t disp = tcg_pcrel_diff(s, addr);
+ bool arm_mode = !(addri & 1);
- if ((addri & 1) == 0 && disp - 8 < 0x01fffffd && disp - 8 > -0x01fffffd) {
- tcg_out_b(s, cond, disp);
+ if (arm_mode && disp - 8 < 0x01fffffd && disp - 8 > -0x01fffffd) {
+ tcg_out_b_imm(s, cond, disp);
return;
}
- tcg_out_movi_pool(s, cond, TCG_REG_PC, addri);
+
+ /* LDR is interworking from v5t. */
+ if (arm_mode || use_armv5t_instructions) {
+ tcg_out_movi_pool(s, cond, TCG_REG_PC, addri);
+ return;
+ }
+
+ /* else v4t */
+ tcg_out_movi32(s, COND_AL, TCG_REG_TMP, addri);
+ tcg_out_bx_reg(s, COND_AL, TCG_REG_TMP);
}
-/* The call case is mostly used for helpers - so it's not unreasonable
- * for them to be beyond branch range */
+/*
+ * The call case is mostly used for helpers - so it's not unreasonable
+ * for them to be beyond branch range.
+ */
static void tcg_out_call(TCGContext *s, const tcg_insn_unit *addr)
{
intptr_t addri = (intptr_t)addr;
ptrdiff_t disp = tcg_pcrel_diff(s, addr);
+ bool arm_mode = !(addri & 1);
if (disp - 8 < 0x02000000 && disp - 8 >= -0x02000000) {
- if (addri & 1) {
- /* Use BLX if the target is in Thumb mode */
- if (!use_armv5t_instructions) {
- tcg_abort();
- }
+ if (arm_mode) {
+ tcg_out_bl_imm(s, COND_AL, disp);
+ return;
+ }
+ if (use_armv5t_instructions) {
tcg_out_blx_imm(s, disp);
- } else {
- tcg_out_bl(s, COND_AL, disp);
+ return;
}
- } else if (use_armv7_instructions) {
+ }
+
+ if (use_armv5t_instructions) {
tcg_out_movi32(s, COND_AL, TCG_REG_TMP, addri);
- tcg_out_blx(s, COND_AL, TCG_REG_TMP);
- } else {
+ tcg_out_blx_reg(s, COND_AL, TCG_REG_TMP);
+ } else if (arm_mode) {
/* ??? Know that movi_pool emits exactly 1 insn. */
- tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R14, TCG_REG_PC, 0);
+ tcg_out_mov_reg(s, COND_AL, TCG_REG_R14, TCG_REG_PC);
tcg_out_movi_pool(s, COND_AL, TCG_REG_PC, addri);
+ } else {
+ tcg_out_movi32(s, COND_AL, TCG_REG_TMP, addri);
+ tcg_out_mov_reg(s, COND_AL, TCG_REG_R14, TCG_REG_PC);
+ tcg_out_bx_reg(s, COND_AL, TCG_REG_TMP);
}
}
-static inline void tcg_out_goto_label(TCGContext *s, int cond, TCGLabel *l)
+static void tcg_out_goto_label(TCGContext *s, ARMCond cond, TCGLabel *l)
{
if (l->has_value) {
tcg_out_goto(s, cond, l->u.value_ptr);
} else {
tcg_out_reloc(s, s->code_ptr, R_ARM_PC24, l, 0);
- tcg_out_b(s, cond, 0);
+ tcg_out_b_imm(s, cond, 0);
}
}
-static inline void tcg_out_mb(TCGContext *s, TCGArg a0)
+static void tcg_out_mb(TCGContext *s, TCGArg a0)
{
if (use_armv7_instructions) {
tcg_out32(s, INSN_DMB_ISH);
@@ -1714,9 +1758,9 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
}
#endif /* SOFTMMU */
-static inline void tcg_out_qemu_ld_index(TCGContext *s, MemOp opc,
- TCGReg datalo, TCGReg datahi,
- TCGReg addrlo, TCGReg addend)
+static void tcg_out_qemu_ld_index(TCGContext *s, MemOp opc,
+ TCGReg datalo, TCGReg datahi,
+ TCGReg addrlo, TCGReg addend)
{
/* Byte swapping is left to middle-end expansion. */
tcg_debug_assert((opc & MO_BSWAP) == 0);
@@ -1757,9 +1801,9 @@ static inline void tcg_out_qemu_ld_index(TCGContext *s, MemOp opc,
}
}
-static inline void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc,
- TCGReg datalo, TCGReg datahi,
- TCGReg addrlo)
+#ifndef CONFIG_SOFTMMU
+static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg datalo,
+ TCGReg datahi, TCGReg addrlo)
{
/* Byte swapping is left to middle-end expansion. */
tcg_debug_assert((opc & MO_BSWAP) == 0);
@@ -1797,6 +1841,7 @@ static inline void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc,
g_assert_not_reached();
}
}
+#endif
static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
{
@@ -1823,7 +1868,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
/* This a conditional BL only to load a pointer within this opcode into LR
for the slow path. We will not be using the value for a tail call. */
label_ptr = s->code_ptr;
- tcg_out_bl(s, COND_NE, 0);
+ tcg_out_bl_imm(s, COND_NE, 0);
tcg_out_qemu_ld_index(s, opc, datalo, datahi, addrlo, addend);
@@ -1839,9 +1884,9 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
#endif
}
-static inline void tcg_out_qemu_st_index(TCGContext *s, int cond, MemOp opc,
- TCGReg datalo, TCGReg datahi,
- TCGReg addrlo, TCGReg addend)
+static void tcg_out_qemu_st_index(TCGContext *s, ARMCond cond, MemOp opc,
+ TCGReg datalo, TCGReg datahi,
+ TCGReg addrlo, TCGReg addend)
{
/* Byte swapping is left to middle-end expansion. */
tcg_debug_assert((opc & MO_BSWAP) == 0);
@@ -1871,9 +1916,9 @@ static inline void tcg_out_qemu_st_index(TCGContext *s, int cond, MemOp opc,
}
}
-static inline void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc,
- TCGReg datalo, TCGReg datahi,
- TCGReg addrlo)
+#ifndef CONFIG_SOFTMMU
+static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg datalo,
+ TCGReg datahi, TCGReg addrlo)
{
/* Byte swapping is left to middle-end expansion. */
tcg_debug_assert((opc & MO_BSWAP) == 0);
@@ -1902,6 +1947,7 @@ static inline void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc,
g_assert_not_reached();
}
}
+#endif
static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
{
@@ -1929,7 +1975,7 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
/* The conditional call must come last, as we're going to return here. */
label_ptr = s->code_ptr;
- tcg_out_bl(s, COND_NE, 0);
+ tcg_out_bl_imm(s, COND_NE, 0);
add_qemu_ldst_label(s, false, oi, datalo, datahi, addrlo, addrhi,
s->code_ptr, label_ptr);
@@ -1946,9 +1992,9 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
static void tcg_out_epilogue(TCGContext *s);
-static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
- const TCGArg args[TCG_MAX_OP_ARGS],
- const int const_args[TCG_MAX_OP_ARGS])
+static void tcg_out_op(TCGContext *s, TCGOpcode opc,
+ const TCGArg args[TCG_MAX_OP_ARGS],
+ const int const_args[TCG_MAX_OP_ARGS])
{
TCGArg a0, a1, a2, a3, a4, a5;
int c;
@@ -1982,7 +2028,7 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
}
break;
case INDEX_op_goto_ptr:
- tcg_out_bx(s, COND_AL, args[0]);
+ tcg_out_b_reg(s, COND_AL, args[0]);
break;
case INDEX_op_br:
tcg_out_goto_label(s, COND_AL, arg_label(args[0]));
@@ -2505,8 +2551,8 @@ static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
}
}
-static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
- TCGReg base, intptr_t ofs)
+static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
+ TCGReg base, intptr_t ofs)
{
return false;
}
@@ -2715,7 +2761,8 @@ static const ARMInsn vec_cmp0_insn[16] = {
static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
unsigned vecl, unsigned vece,
- const TCGArg *args, const int *const_args)
+ const TCGArg args[TCG_MAX_OP_ARGS],
+ const int const_args[TCG_MAX_OP_ARGS])
{
TCGType type = vecl + TCG_TYPE_V64;
unsigned q = vecl;
@@ -3055,7 +3102,10 @@ static void tcg_target_qemu_prologue(TCGContext *s)
{
/* Calling convention requires us to save r4-r11 and lr. */
/* stmdb sp!, { r4 - r11, lr } */
- tcg_out32(s, (COND_AL << 28) | 0x092d4ff0);
+ tcg_out_ldstm(s, COND_AL, INSN_STMDB, TCG_REG_CALL_STACK,
+ (1 << TCG_REG_R4) | (1 << TCG_REG_R5) | (1 << TCG_REG_R6) |
+ (1 << TCG_REG_R7) | (1 << TCG_REG_R8) | (1 << TCG_REG_R9) |
+ (1 << TCG_REG_R10) | (1 << TCG_REG_R11) | (1 << TCG_REG_R14));
/* Reserve callee argument and tcg temp space. */
tcg_out_dat_rI(s, COND_AL, ARITH_SUB, TCG_REG_CALL_STACK,
@@ -3065,7 +3115,7 @@ static void tcg_target_qemu_prologue(TCGContext *s)
tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
- tcg_out_bx(s, COND_AL, tcg_target_call_iarg_regs[1]);
+ tcg_out_b_reg(s, COND_AL, tcg_target_call_iarg_regs[1]);
/*
* Return path for goto_ptr. Set return value to 0, a-la exit_tb,
@@ -3083,7 +3133,10 @@ static void tcg_out_epilogue(TCGContext *s)
TCG_REG_CALL_STACK, STACK_ADDEND, 1);
/* ldmia sp!, { r4 - r11, pc } */
- tcg_out32(s, (COND_AL << 28) | 0x08bd8ff0);
+ tcg_out_ldstm(s, COND_AL, INSN_LDMIA, TCG_REG_CALL_STACK,
+ (1 << TCG_REG_R4) | (1 << TCG_REG_R5) | (1 << TCG_REG_R6) |
+ (1 << TCG_REG_R7) | (1 << TCG_REG_R8) | (1 << TCG_REG_R9) |
+ (1 << TCG_REG_R10) | (1 << TCG_REG_R11) | (1 << TCG_REG_PC));
}
typedef struct {
diff --git a/tcg/arm/tcg-target.h b/tcg/arm/tcg-target.h
index d113b7f8db..f41b809554 100644
--- a/tcg/arm/tcg-target.h
+++ b/tcg/arm/tcg-target.h
@@ -26,34 +26,9 @@
#ifndef ARM_TCG_TARGET_H
#define ARM_TCG_TARGET_H
-/* The __ARM_ARCH define is provided by gcc 4.8. Construct it otherwise. */
-#ifndef __ARM_ARCH
-# if defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) \
- || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) \
- || defined(__ARM_ARCH_7EM__)
-# define __ARM_ARCH 7
-# elif defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) \
- || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) \
- || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6T2__)
-# define __ARM_ARCH 6
-# elif defined(__ARM_ARCH_5__) || defined(__ARM_ARCH_5E__) \
- || defined(__ARM_ARCH_5T__) || defined(__ARM_ARCH_5TE__) \
- || defined(__ARM_ARCH_5TEJ__)
-# define __ARM_ARCH 5
-# else
-# define __ARM_ARCH 4
-# endif
-#endif
-
extern int arm_arch;
-#if defined(__ARM_ARCH_5T__) \
- || defined(__ARM_ARCH_5TE__) || defined(__ARM_ARCH_5TEJ__)
-# define use_armv5t_instructions 1
-#else
-# define use_armv5t_instructions use_armv6_instructions
-#endif
-
+#define use_armv5t_instructions (__ARM_ARCH >= 5 || arm_arch >= 5)
#define use_armv6_instructions (__ARM_ARCH >= 6 || arm_arch >= 6)
#define use_armv7_instructions (__ARM_ARCH >= 7 || arm_arch >= 7)
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
index 98d924b91a..997510109d 100644
--- a/tcg/i386/tcg-target.c.inc
+++ b/tcg/i386/tcg-target.c.inc
@@ -241,8 +241,9 @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
#define P_EXT 0x100 /* 0x0f opcode prefix */
#define P_EXT38 0x200 /* 0x0f 0x38 opcode prefix */
#define P_DATA16 0x400 /* 0x66 opcode prefix */
+#define P_VEXW 0x1000 /* Set VEX.W = 1 */
#if TCG_TARGET_REG_BITS == 64
-# define P_REXW 0x1000 /* Set REX.W = 1 */
+# define P_REXW P_VEXW /* Set REX.W = 1; match VEXW */
# define P_REXB_R 0x2000 /* REG field as byte register */
# define P_REXB_RM 0x4000 /* R/M field as byte register */
# define P_GS 0x8000 /* gs segment override */
@@ -410,13 +411,13 @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
#define OPC_VPBROADCASTW (0x79 | P_EXT38 | P_DATA16)
#define OPC_VPBROADCASTD (0x58 | P_EXT38 | P_DATA16)
#define OPC_VPBROADCASTQ (0x59 | P_EXT38 | P_DATA16)
-#define OPC_VPERMQ (0x00 | P_EXT3A | P_DATA16 | P_REXW)
+#define OPC_VPERMQ (0x00 | P_EXT3A | P_DATA16 | P_VEXW)
#define OPC_VPERM2I128 (0x46 | P_EXT3A | P_DATA16 | P_VEXL)
#define OPC_VPSLLVD (0x47 | P_EXT38 | P_DATA16)
-#define OPC_VPSLLVQ (0x47 | P_EXT38 | P_DATA16 | P_REXW)
+#define OPC_VPSLLVQ (0x47 | P_EXT38 | P_DATA16 | P_VEXW)
#define OPC_VPSRAVD (0x46 | P_EXT38 | P_DATA16)
#define OPC_VPSRLVD (0x45 | P_EXT38 | P_DATA16)
-#define OPC_VPSRLVQ (0x45 | P_EXT38 | P_DATA16 | P_REXW)
+#define OPC_VPSRLVQ (0x45 | P_EXT38 | P_DATA16 | P_VEXW)
#define OPC_VZEROUPPER (0x77 | P_EXT)
#define OPC_XCHG_ax_r32 (0x90)
@@ -576,7 +577,7 @@ static void tcg_out_vex_opc(TCGContext *s, int opc, int r, int v,
/* Use the two byte form if possible, which cannot encode
VEX.W, VEX.B, VEX.X, or an m-mmmm field other than P_EXT. */
- if ((opc & (P_EXT | P_EXT38 | P_EXT3A | P_REXW)) == P_EXT
+ if ((opc & (P_EXT | P_EXT38 | P_EXT3A | P_VEXW)) == P_EXT
&& ((rm | index) & 8) == 0) {
/* Two byte VEX prefix. */
tcg_out8(s, 0xc5);
@@ -601,7 +602,7 @@ static void tcg_out_vex_opc(TCGContext *s, int opc, int r, int v,
tmp |= (rm & 8 ? 0 : 0x20); /* VEX.B */
tcg_out8(s, tmp);
- tmp = (opc & P_REXW ? 0x80 : 0); /* VEX.W */
+ tmp = (opc & P_VEXW ? 0x80 : 0); /* VEX.W */
}
tmp |= (opc & P_VEXL ? 0x04 : 0); /* VEX.L */
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
index e0f4665213..5e1fac914a 100644
--- a/tcg/ppc/tcg-target.c.inc
+++ b/tcg/ppc/tcg-target.c.inc
@@ -25,9 +25,24 @@
#include "elf.h"
#include "../tcg-pool.c.inc"
-#if defined _CALL_DARWIN || defined __APPLE__
-#define TCG_TARGET_CALL_DARWIN
-#endif
+/*
+ * Standardize on the _CALL_FOO symbols used by GCC:
+ * Apple XCode does not define _CALL_DARWIN.
+ * Clang defines _CALL_ELF (64-bit) but not _CALL_SYSV (32-bit).
+ */
+#if !defined(_CALL_SYSV) && \
+ !defined(_CALL_DARWIN) && \
+ !defined(_CALL_AIX) && \
+ !defined(_CALL_ELF)
+# if defined(__APPLE__)
+# define _CALL_DARWIN
+# elif defined(__ELF__) && TCG_TARGET_REG_BITS == 32
+# define _CALL_SYSV
+# else
+# error "Unknown ABI"
+# endif
+#endif
+
#ifdef _CALL_SYSV
# define TCG_TARGET_CALL_ALIGN_ARGS 1
#endif
@@ -169,7 +184,7 @@ static const int tcg_target_call_oarg_regs[] = {
};
static const int tcg_target_callee_save_regs[] = {
-#ifdef TCG_TARGET_CALL_DARWIN
+#ifdef _CALL_DARWIN
TCG_REG_R11,
#endif
TCG_REG_R14,
@@ -2372,7 +2387,7 @@ static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
# define LINK_AREA_SIZE (6 * SZR)
# define LR_OFFSET (1 * SZR)
# define TCG_TARGET_CALL_STACK_OFFSET (LINK_AREA_SIZE + 8 * SZR)
-#elif defined(TCG_TARGET_CALL_DARWIN)
+#elif defined(_CALL_DARWIN)
# define LINK_AREA_SIZE (6 * SZR)
# define LR_OFFSET (2 * SZR)
#elif TCG_TARGET_REG_BITS == 64
diff --git a/tests/data/acpi/virt/IORT b/tests/data/acpi/virt/IORT
new file mode 100644
index 0000000000..521acefe9b
--- /dev/null
+++ b/tests/data/acpi/virt/IORT
Binary files differ
diff --git a/tests/data/acpi/virt/IORT.memhp b/tests/data/acpi/virt/IORT.memhp
new file mode 100644
index 0000000000..521acefe9b
--- /dev/null
+++ b/tests/data/acpi/virt/IORT.memhp
Binary files differ
diff --git a/tests/data/acpi/virt/IORT.numamem b/tests/data/acpi/virt/IORT.numamem
new file mode 100644
index 0000000000..521acefe9b
--- /dev/null
+++ b/tests/data/acpi/virt/IORT.numamem
Binary files differ
diff --git a/tests/data/acpi/virt/IORT.pxb b/tests/data/acpi/virt/IORT.pxb
new file mode 100644
index 0000000000..521acefe9b
--- /dev/null
+++ b/tests/data/acpi/virt/IORT.pxb
Binary files differ
diff --git a/tests/qemu-iotests/122 b/tests/qemu-iotests/122
index 5d550ed13e..efb260d822 100755
--- a/tests/qemu-iotests/122
+++ b/tests/qemu-iotests/122
@@ -67,7 +67,7 @@ echo
_make_test_img -b "$TEST_IMG".base -F $IMGFMT
$QEMU_IO -c "write -P 0 0 3M" "$TEST_IMG" 2>&1 | _filter_qemu_io | _filter_testdir
-$QEMU_IMG convert -O $IMGFMT -B "$TEST_IMG".base -o backing_fmt=$IMGFMT \
+$QEMU_IMG convert -O $IMGFMT -B "$TEST_IMG".base -F $IMGFMT \
"$TEST_IMG" "$TEST_IMG".orig
$QEMU_IO -c "read -P 0 0 3M" "$TEST_IMG".orig 2>&1 | _filter_qemu_io | _filter_testdir
$QEMU_IMG convert -O $IMGFMT -c -B "$TEST_IMG".base -o backing_fmt=$IMGFMT \
diff --git a/tests/qemu-iotests/271 b/tests/qemu-iotests/271
index 599b849cc6..2775b4d130 100755
--- a/tests/qemu-iotests/271
+++ b/tests/qemu-iotests/271
@@ -893,7 +893,10 @@ EOF
}
_make_test_img -o extended_l2=on 1M
-_concurrent_io | $QEMU_IO | _filter_qemu_io
+# Second and third writes in _concurrent_io() are independent and may finish in
+# different order. So, filter offset out to match both possible variants.
+_concurrent_io | $QEMU_IO | _filter_qemu_io | \
+ $SED -e 's/\(20480\|40960\)/OFFSET/'
_concurrent_verify | $QEMU_IO | _filter_qemu_io
# success, all done
diff --git a/tests/qemu-iotests/271.out b/tests/qemu-iotests/271.out
index 81043ba4d7..5be780de76 100644
--- a/tests/qemu-iotests/271.out
+++ b/tests/qemu-iotests/271.out
@@ -719,8 +719,8 @@ blkdebug: Suspended request 'A'
blkdebug: Resuming request 'A'
wrote 2048/2048 bytes at offset 30720
2 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
-wrote 2048/2048 bytes at offset 20480
+wrote 2048/2048 bytes at offset OFFSET
2 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
-wrote 2048/2048 bytes at offset 40960
+wrote 2048/2048 bytes at offset OFFSET
2 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
*** done
diff --git a/tests/qemu-iotests/297 b/tests/qemu-iotests/297
index 345b617b34..b04cba5366 100755
--- a/tests/qemu-iotests/297
+++ b/tests/qemu-iotests/297
@@ -29,7 +29,7 @@ import iotests
SKIP_FILES = (
'030', '040', '041', '044', '045', '055', '056', '057', '065', '093',
'096', '118', '124', '132', '136', '139', '147', '148', '149',
- '151', '152', '155', '163', '165', '169', '194', '196', '199', '202',
+ '151', '152', '155', '163', '165', '194', '196', '202',
'203', '205', '206', '207', '208', '210', '211', '212', '213', '216',
'218', '219', '224', '228', '234', '235', '236', '237', '238',
'240', '242', '245', '246', '248', '255', '256', '257', '258', '260',
@@ -46,7 +46,7 @@ def is_python_file(filename):
if filename.endswith('.py'):
return True
- with open(filename) as f:
+ with open(filename, encoding='utf-8') as f:
try:
first_line = f.readline()
return re.match('^#!.*python', first_line) is not None
@@ -55,8 +55,9 @@ def is_python_file(filename):
def run_linters():
- files = [filename for filename in (set(os.listdir('.')) - set(SKIP_FILES))
- if is_python_file(filename)]
+ named_tests = [f'tests/{entry}' for entry in os.listdir('tests')]
+ check_tests = set(os.listdir('.') + named_tests) - set(SKIP_FILES)
+ files = [filename for filename in check_tests if is_python_file(filename)]
iotests.logger.debug('Files to be checked:')
iotests.logger.debug(', '.join(sorted(files)))
diff --git a/tests/qemu-iotests/iotests.py b/tests/qemu-iotests/iotests.py
index 11276f380a..ce06cf5630 100644
--- a/tests/qemu-iotests/iotests.py
+++ b/tests/qemu-iotests/iotests.py
@@ -610,7 +610,7 @@ class VM(qtest.QEMUQtestMachine):
return
valgrind_filename = f"{test_dir}/{self._popen.pid}.valgrind"
if self.exitcode() == 99:
- with open(valgrind_filename) as f:
+ with open(valgrind_filename, encoding='utf-8') as f:
print(f.read())
else:
os.remove(valgrind_filename)
@@ -703,7 +703,7 @@ class VM(qtest.QEMUQtestMachine):
def flatten_qmp_object(self, obj, output=None, basestr=''):
if output is None:
- output = dict()
+ output = {}
if isinstance(obj, list):
for i, item in enumerate(obj):
self.flatten_qmp_object(item, output, basestr + str(i) + '.')
@@ -716,7 +716,7 @@ class VM(qtest.QEMUQtestMachine):
def qmp_to_opts(self, obj):
obj = self.flatten_qmp_object(obj)
- output_list = list()
+ output_list = []
for key in obj:
output_list += [key + '=' + obj[key]]
return ','.join(output_list)
@@ -1121,7 +1121,8 @@ def notrun(reason):
# Each test in qemu-iotests has a number ("seq")
seq = os.path.basename(sys.argv[0])
- with open('%s/%s.notrun' % (output_dir, seq), 'w') as outfile:
+ with open('%s/%s.notrun' % (output_dir, seq), 'w', encoding='utf-8') \
+ as outfile:
outfile.write(reason + '\n')
logger.warning("%s not run: %s", seq, reason)
sys.exit(0)
@@ -1135,7 +1136,8 @@ def case_notrun(reason):
# Each test in qemu-iotests has a number ("seq")
seq = os.path.basename(sys.argv[0])
- with open('%s/%s.casenotrun' % (output_dir, seq), 'a') as outfile:
+ with open('%s/%s.casenotrun' % (output_dir, seq), 'a', encoding='utf-8') \
+ as outfile:
outfile.write(' [case not run] ' + reason + '\n')
def _verify_image_format(supported_fmts: Sequence[str] = (),
diff --git a/tests/qemu-iotests/tests/migrate-bitmaps-postcopy-test b/tests/qemu-iotests/tests/migrate-bitmaps-postcopy-test
index 584062b412..00ebb5c251 100755
--- a/tests/qemu-iotests/tests/migrate-bitmaps-postcopy-test
+++ b/tests/qemu-iotests/tests/migrate-bitmaps-postcopy-test
@@ -132,10 +132,10 @@ class TestDirtyBitmapPostcopyMigration(iotests.QMPTestCase):
result = self.vm_a.qmp('x-debug-block-dirty-bitmap-sha256',
node='drive0', name='bitmap0')
- self.discards1_sha256 = result['return']['sha256']
+ discards1_sha256 = result['return']['sha256']
# Check, that updating the bitmap by discards works
- assert self.discards1_sha256 != empty_sha256
+ assert discards1_sha256 != empty_sha256
# We want to calculate resulting sha256. Do it in bitmap0, so, disable
# other bitmaps
@@ -148,7 +148,7 @@ class TestDirtyBitmapPostcopyMigration(iotests.QMPTestCase):
result = self.vm_a.qmp('x-debug-block-dirty-bitmap-sha256',
node='drive0', name='bitmap0')
- self.all_discards_sha256 = result['return']['sha256']
+ all_discards_sha256 = result['return']['sha256']
# Now, enable some bitmaps, to be updated during migration
for i in range(2, nb_bitmaps, 2):
@@ -173,10 +173,11 @@ class TestDirtyBitmapPostcopyMigration(iotests.QMPTestCase):
event_resume = self.vm_b.event_wait('RESUME')
self.vm_b_events.append(event_resume)
- return event_resume
+ return (event_resume, discards1_sha256, all_discards_sha256)
def test_postcopy_success(self):
- event_resume = self.start_postcopy()
+ event_resume, discards1_sha256, all_discards_sha256 = \
+ self.start_postcopy()
# enabled bitmaps should be updated
apply_discards(self.vm_b, discards2)
@@ -217,7 +218,7 @@ class TestDirtyBitmapPostcopyMigration(iotests.QMPTestCase):
for i in range(0, nb_bitmaps, 5):
result = self.vm_b.qmp('x-debug-block-dirty-bitmap-sha256',
node='drive0', name='bitmap{}'.format(i))
- sha = self.discards1_sha256 if i % 2 else self.all_discards_sha256
+ sha = discards1_sha256 if i % 2 else all_discards_sha256
self.assert_qmp(result, 'return/sha256', sha)
def test_early_shutdown_destination(self):
diff --git a/tests/qemu-iotests/tests/migrate-bitmaps-test b/tests/qemu-iotests/tests/migrate-bitmaps-test
index a5c7bc83e0..dc431c35b3 100755
--- a/tests/qemu-iotests/tests/migrate-bitmaps-test
+++ b/tests/qemu-iotests/tests/migrate-bitmaps-test
@@ -20,11 +20,10 @@
#
import os
-import iotests
-import time
import itertools
import operator
import re
+import iotests
from iotests import qemu_img, qemu_img_create, Timeout
@@ -37,6 +36,12 @@ mig_cmd = 'exec: cat > ' + mig_file
incoming_cmd = 'exec: cat ' + mig_file
+def get_bitmap_hash(vm):
+ result = vm.qmp('x-debug-block-dirty-bitmap-sha256',
+ node='drive0', name='bitmap0')
+ return result['return']['sha256']
+
+
class TestDirtyBitmapMigration(iotests.QMPTestCase):
def tearDown(self):
self.vm_a.shutdown()
@@ -62,21 +67,16 @@ class TestDirtyBitmapMigration(iotests.QMPTestCase):
params['persistent'] = True
result = vm.qmp('block-dirty-bitmap-add', **params)
- self.assert_qmp(result, 'return', {});
-
- def get_bitmap_hash(self, vm):
- result = vm.qmp('x-debug-block-dirty-bitmap-sha256',
- node='drive0', name='bitmap0')
- return result['return']['sha256']
+ self.assert_qmp(result, 'return', {})
def check_bitmap(self, vm, sha256):
result = vm.qmp('x-debug-block-dirty-bitmap-sha256',
node='drive0', name='bitmap0')
if sha256:
- self.assert_qmp(result, 'return/sha256', sha256);
+ self.assert_qmp(result, 'return/sha256', sha256)
else:
self.assert_qmp(result, 'error/desc',
- "Dirty bitmap 'bitmap0' not found");
+ "Dirty bitmap 'bitmap0' not found")
def do_test_migration_resume_source(self, persistent, migrate_bitmaps):
granularity = 512
@@ -97,7 +97,7 @@ class TestDirtyBitmapMigration(iotests.QMPTestCase):
self.add_bitmap(self.vm_a, granularity, persistent)
for r in regions:
self.vm_a.hmp_qemu_io('drive0', 'write %d %d' % r)
- sha256 = self.get_bitmap_hash(self.vm_a)
+ sha256 = get_bitmap_hash(self.vm_a)
result = self.vm_a.qmp('migrate', uri=mig_cmd)
while True:
@@ -106,7 +106,7 @@ class TestDirtyBitmapMigration(iotests.QMPTestCase):
break
while True:
result = self.vm_a.qmp('query-status')
- if (result['return']['status'] == 'postmigrate'):
+ if result['return']['status'] == 'postmigrate':
break
# test that bitmap is still here
@@ -164,7 +164,7 @@ class TestDirtyBitmapMigration(iotests.QMPTestCase):
self.add_bitmap(self.vm_a, granularity, persistent)
for r in regions:
self.vm_a.hmp_qemu_io('drive0', 'write %d %d' % r)
- sha256 = self.get_bitmap_hash(self.vm_a)
+ sha256 = get_bitmap_hash(self.vm_a)
if pre_shutdown:
self.vm_a.shutdown()
@@ -214,16 +214,22 @@ class TestDirtyBitmapMigration(iotests.QMPTestCase):
self.check_bitmap(self.vm_b, sha256 if persistent else False)
-def inject_test_case(klass, name, method, *args, **kwargs):
+def inject_test_case(klass, suffix, method, *args, **kwargs):
mc = operator.methodcaller(method, *args, **kwargs)
- setattr(klass, 'test_' + method + name, lambda self: mc(self))
+ # We want to add a function attribute to `klass`, so that it is
+ # correctly converted to a method on instantiation. The
+ # methodcaller object `mc` is a callable, not a function, so we
+ # need the lambda to turn it into a function.
+ # pylint: disable=unnecessary-lambda
+ setattr(klass, 'test_' + method + suffix, lambda self: mc(self))
+
for cmb in list(itertools.product((True, False), repeat=5)):
name = ('_' if cmb[0] else '_not_') + 'persistent_'
name += ('_' if cmb[1] else '_not_') + 'migbitmap_'
name += '_online' if cmb[2] else '_offline'
name += '_shared' if cmb[3] else '_nonshared'
- if (cmb[4]):
+ if cmb[4]:
name += '__pre_shutdown'
inject_test_case(TestDirtyBitmapMigration, name, 'do_test_migration',
@@ -270,7 +276,8 @@ class TestDirtyBitmapBackingMigration(iotests.QMPTestCase):
self.assert_qmp(result, 'return', {})
# Check that the bitmaps are there
- for node in self.vm.qmp('query-named-block-nodes', flat=True)['return']:
+ nodes = self.vm.qmp('query-named-block-nodes', flat=True)['return']
+ for node in nodes:
if 'node0' in node['node-name']:
self.assert_qmp(node, 'dirty-bitmaps[0]/name', 'bmap0')
@@ -287,7 +294,7 @@ class TestDirtyBitmapBackingMigration(iotests.QMPTestCase):
"""
Continue the source after migration.
"""
- result = self.vm.qmp('migrate', uri=f'exec: cat > /dev/null')
+ result = self.vm.qmp('migrate', uri='exec: cat > /dev/null')
self.assert_qmp(result, 'return', {})
with Timeout(10, 'Migration timeout'):
diff --git a/tests/qemu-iotests/tests/migrate-during-backup b/tests/qemu-iotests/tests/migrate-during-backup
new file mode 100755
index 0000000000..34103229ee
--- /dev/null
+++ b/tests/qemu-iotests/tests/migrate-during-backup
@@ -0,0 +1,97 @@
+#!/usr/bin/env python3
+# group: migration
+#
+# Copyright (c) 2021 Virtuozzo International GmbH
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import os
+import iotests
+from iotests import qemu_img_create, qemu_io
+
+
+disk_a = os.path.join(iotests.test_dir, 'disk_a')
+disk_b = os.path.join(iotests.test_dir, 'disk_b')
+size = '1M'
+mig_file = os.path.join(iotests.test_dir, 'mig_file')
+mig_cmd = 'exec: cat > ' + mig_file
+
+
+class TestMigrateDuringBackup(iotests.QMPTestCase):
+ def tearDown(self):
+ self.vm.shutdown()
+ os.remove(disk_a)
+ os.remove(disk_b)
+ os.remove(mig_file)
+
+ def setUp(self):
+ qemu_img_create('-f', iotests.imgfmt, disk_a, size)
+ qemu_img_create('-f', iotests.imgfmt, disk_b, size)
+ qemu_io('-c', f'write 0 {size}', disk_a)
+
+ self.vm = iotests.VM().add_drive(disk_a)
+ self.vm.launch()
+ result = self.vm.qmp('blockdev-add', {
+ 'node-name': 'target',
+ 'driver': iotests.imgfmt,
+ 'file': {
+ 'driver': 'file',
+ 'filename': disk_b
+ }
+ })
+ self.assert_qmp(result, 'return', {})
+
+ def test_migrate(self):
+ result = self.vm.qmp('blockdev-backup', device='drive0',
+ target='target', sync='full',
+ speed=1, x_perf={
+ 'max-workers': 1,
+ 'max-chunk': 64 * 1024
+ })
+ self.assert_qmp(result, 'return', {})
+
+ result = self.vm.qmp('job-pause', id='drive0')
+ self.assert_qmp(result, 'return', {})
+
+ result = self.vm.qmp('migrate-set-capabilities',
+ capabilities=[{'capability': 'events',
+ 'state': True}])
+ self.assert_qmp(result, 'return', {})
+ result = self.vm.qmp('migrate', uri=mig_cmd)
+ self.assert_qmp(result, 'return', {})
+
+ e = self.vm.events_wait((('MIGRATION',
+ {'data': {'status': 'completed'}}),
+ ('MIGRATION',
+ {'data': {'status': 'failed'}})))
+
+ # Don't assert that e is 'failed' now: this way we'll miss
+ # possible crash when backup continues :)
+
+ result = self.vm.qmp('block-job-set-speed', device='drive0',
+ speed=0)
+ self.assert_qmp(result, 'return', {})
+ result = self.vm.qmp('job-resume', id='drive0')
+ self.assert_qmp(result, 'return', {})
+
+ # For future: if something changes so that both migration
+ # and backup pass, let's not miss that moment, as it may
+ # be a bug as well as improvement.
+ self.assert_qmp(e, 'data/status', 'failed')
+
+
+if __name__ == '__main__':
+ iotests.main(supported_fmts=['qcow2'],
+ supported_protocols=['file'])
diff --git a/tests/qemu-iotests/tests/migrate-during-backup.out b/tests/qemu-iotests/tests/migrate-during-backup.out
new file mode 100644
index 0000000000..ae1213e6f8
--- /dev/null
+++ b/tests/qemu-iotests/tests/migrate-during-backup.out
@@ -0,0 +1,5 @@
+.
+----------------------------------------------------------------------
+Ran 1 tests
+
+OK
diff --git a/tests/qemu-iotests/tests/mirror-top-perms b/tests/qemu-iotests/tests/mirror-top-perms
index 451a0666f8..2fc8dd66e0 100755
--- a/tests/qemu-iotests/tests/mirror-top-perms
+++ b/tests/qemu-iotests/tests/mirror-top-perms
@@ -47,7 +47,7 @@ class TestMirrorTopPerms(iotests.QMPTestCase):
def tearDown(self):
try:
self.vm.shutdown()
- except qemu.machine.AbnormalShutdown:
+ except qemu.machine.machine.AbnormalShutdown:
pass
if self.vm_b is not None:
diff --git a/tools/virtiofsd/fuse_lowlevel.h b/tools/virtiofsd/fuse_lowlevel.h
index 4b4e8c9724..c55c0ca2fc 100644
--- a/tools/virtiofsd/fuse_lowlevel.h
+++ b/tools/virtiofsd/fuse_lowlevel.h
@@ -1603,7 +1603,7 @@ int fuse_lowlevel_notify_inval_inode(struct fuse_session *se, fuse_ino_t ino,
* parent/name
*
* To avoid a deadlock this function must not be called in the
- * execution path of a related filesytem operation or within any code
+ * execution path of a related filesystem operation or within any code
* that could hold a lock that could be needed to execute such an
* operation. As of kernel 4.18, a "related operation" is a lookup(),
* symlink(), mknod(), mkdir(), unlink(), rename(), link() or create()
@@ -1636,7 +1636,7 @@ int fuse_lowlevel_notify_inval_entry(struct fuse_session *se, fuse_ino_t parent,
* that the dentry has been deleted.
*
* To avoid a deadlock this function must not be called while
- * executing a related filesytem operation or while holding a lock
+ * executing a related filesystem operation or while holding a lock
* that could be needed to execute such an operation (see the
* description of fuse_lowlevel_notify_inval_entry() for more
* details).
diff --git a/ui/egl-helpers.c b/ui/egl-helpers.c
index 6d0cb2b5cb..385a3fa752 100644
--- a/ui/egl-helpers.c
+++ b/ui/egl-helpers.c
@@ -287,6 +287,32 @@ void egl_dmabuf_release_texture(QemuDmaBuf *dmabuf)
dmabuf->texture = 0;
}
+void egl_dmabuf_create_sync(QemuDmaBuf *dmabuf)
+{
+ EGLSyncKHR sync;
+
+ if (epoxy_has_egl_extension(qemu_egl_display,
+ "EGL_KHR_fence_sync") &&
+ epoxy_has_egl_extension(qemu_egl_display,
+ "EGL_ANDROID_native_fence_sync")) {
+ sync = eglCreateSyncKHR(qemu_egl_display,
+ EGL_SYNC_NATIVE_FENCE_ANDROID, NULL);
+ if (sync != EGL_NO_SYNC_KHR) {
+ dmabuf->sync = sync;
+ }
+ }
+}
+
+void egl_dmabuf_create_fence(QemuDmaBuf *dmabuf)
+{
+ if (dmabuf->sync) {
+ dmabuf->fence_fd = eglDupNativeFenceFDANDROID(qemu_egl_display,
+ dmabuf->sync);
+ eglDestroySyncKHR(qemu_egl_display, dmabuf->sync);
+ dmabuf->sync = NULL;
+ }
+}
+
#endif /* CONFIG_GBM */
/* ---------------------------------------------------------------------- */
diff --git a/ui/gtk-egl.c b/ui/gtk-egl.c
index 2a2e6d3a17..72ce5e1f8f 100644
--- a/ui/gtk-egl.c
+++ b/ui/gtk-egl.c
@@ -12,6 +12,7 @@
*/
#include "qemu/osdep.h"
+#include "qemu/main-loop.h"
#include "trace.h"
@@ -94,6 +95,18 @@ void gd_egl_draw(VirtualConsole *vc)
}
glFlush();
+#ifdef CONFIG_GBM
+ if (vc->gfx.guest_fb.dmabuf) {
+ QemuDmaBuf *dmabuf = vc->gfx.guest_fb.dmabuf;
+
+ egl_dmabuf_create_fence(dmabuf);
+ if (dmabuf->fence_fd > 0) {
+ qemu_set_fd_handler(dmabuf->fence_fd, gd_hw_gl_flushed, NULL, vc);
+ return;
+ }
+ graphic_hw_gl_block(vc->gfx.dcl.con, false);
+ }
+#endif
graphic_hw_gl_flushed(vc->gfx.dcl.con);
}
@@ -209,6 +222,8 @@ void gd_egl_scanout_dmabuf(DisplayChangeListener *dcl,
QemuDmaBuf *dmabuf)
{
#ifdef CONFIG_GBM
+ VirtualConsole *vc = container_of(dcl, VirtualConsole, gfx.dcl);
+
egl_dmabuf_import_texture(dmabuf);
if (!dmabuf->texture) {
return;
@@ -217,6 +232,10 @@ void gd_egl_scanout_dmabuf(DisplayChangeListener *dcl,
gd_egl_scanout_texture(dcl, dmabuf->texture,
false, dmabuf->width, dmabuf->height,
0, 0, dmabuf->width, dmabuf->height);
+
+ if (dmabuf->allow_fences) {
+ vc->gfx.guest_fb.dmabuf = dmabuf;
+ }
#endif
}
@@ -249,14 +268,6 @@ void gd_egl_cursor_position(DisplayChangeListener *dcl,
vc->gfx.cursor_y = pos_y * vc->gfx.scale_y;
}
-void gd_egl_release_dmabuf(DisplayChangeListener *dcl,
- QemuDmaBuf *dmabuf)
-{
-#ifdef CONFIG_GBM
- egl_dmabuf_release_texture(dmabuf);
-#endif
-}
-
void gd_egl_scanout_flush(DisplayChangeListener *dcl,
uint32_t x, uint32_t y, uint32_t w, uint32_t h)
{
@@ -289,9 +300,30 @@ void gd_egl_scanout_flush(DisplayChangeListener *dcl,
egl_fb_blit(&vc->gfx.win_fb, &vc->gfx.guest_fb, !vc->gfx.y0_top);
}
+#ifdef CONFIG_GBM
+ if (vc->gfx.guest_fb.dmabuf) {
+ egl_dmabuf_create_sync(vc->gfx.guest_fb.dmabuf);
+ }
+#endif
+
eglSwapBuffers(qemu_egl_display, vc->gfx.esurface);
}
+void gd_egl_flush(DisplayChangeListener *dcl,
+ uint32_t x, uint32_t y, uint32_t w, uint32_t h)
+{
+ VirtualConsole *vc = container_of(dcl, VirtualConsole, gfx.dcl);
+ GtkWidget *area = vc->gfx.drawing_area;
+
+ if (vc->gfx.guest_fb.dmabuf) {
+ graphic_hw_gl_block(vc->gfx.dcl.con, true);
+ gtk_widget_queue_draw_area(area, x, y, w, h);
+ return;
+ }
+
+ gd_egl_scanout_flush(&vc->gfx.dcl, x, y, w, h);
+}
+
void gtk_egl_init(DisplayGLMode mode)
{
GdkDisplay *gdk_display = gdk_display_get_default();
diff --git a/ui/gtk-gl-area.c b/ui/gtk-gl-area.c
index dd5783fec7..b23523748e 100644
--- a/ui/gtk-gl-area.c
+++ b/ui/gtk-gl-area.c
@@ -8,6 +8,7 @@
*/
#include "qemu/osdep.h"
+#include "qemu/main-loop.h"
#include "trace.h"
@@ -71,7 +72,25 @@ void gd_gl_area_draw(VirtualConsole *vc)
surface_gl_render_texture(vc->gfx.gls, vc->gfx.ds);
}
+#ifdef CONFIG_GBM
+ if (vc->gfx.guest_fb.dmabuf) {
+ egl_dmabuf_create_sync(vc->gfx.guest_fb.dmabuf);
+ }
+#endif
+
glFlush();
+#ifdef CONFIG_GBM
+ if (vc->gfx.guest_fb.dmabuf) {
+ QemuDmaBuf *dmabuf = vc->gfx.guest_fb.dmabuf;
+
+ egl_dmabuf_create_fence(dmabuf);
+ if (dmabuf->fence_fd > 0) {
+ qemu_set_fd_handler(dmabuf->fence_fd, gd_hw_gl_flushed, NULL, vc);
+ return;
+ }
+ graphic_hw_gl_block(vc->gfx.dcl.con, false);
+ }
+#endif
graphic_hw_gl_flushed(vc->gfx.dcl.con);
}
@@ -213,6 +232,9 @@ void gd_gl_area_scanout_flush(DisplayChangeListener *dcl,
{
VirtualConsole *vc = container_of(dcl, VirtualConsole, gfx.dcl);
+ if (vc->gfx.guest_fb.dmabuf) {
+ graphic_hw_gl_block(vc->gfx.dcl.con, true);
+ }
gtk_gl_area_queue_render(GTK_GL_AREA(vc->gfx.drawing_area));
}
@@ -231,6 +253,10 @@ void gd_gl_area_scanout_dmabuf(DisplayChangeListener *dcl,
gd_gl_area_scanout_texture(dcl, dmabuf->texture,
false, dmabuf->width, dmabuf->height,
0, 0, dmabuf->width, dmabuf->height);
+
+ if (dmabuf->allow_fences) {
+ vc->gfx.guest_fb.dmabuf = dmabuf;
+ }
#endif
}
diff --git a/ui/gtk.c b/ui/gtk.c
index cfb0728d1f..b0564d80c1 100644
--- a/ui/gtk.c
+++ b/ui/gtk.c
@@ -36,6 +36,7 @@
#include "qapi/qapi-commands-machine.h"
#include "qapi/qapi-commands-misc.h"
#include "qemu/cutils.h"
+#include "qemu/main-loop.h"
#include "ui/console.h"
#include "ui/gtk.h"
@@ -575,6 +576,26 @@ static bool gd_has_dmabuf(DisplayChangeListener *dcl)
return vc->gfx.has_dmabuf;
}
+static void gd_gl_release_dmabuf(DisplayChangeListener *dcl,
+ QemuDmaBuf *dmabuf)
+{
+#ifdef CONFIG_GBM
+ egl_dmabuf_release_texture(dmabuf);
+#endif
+}
+
+void gd_hw_gl_flushed(void *vcon)
+{
+ VirtualConsole *vc = vcon;
+ QemuDmaBuf *dmabuf = vc->gfx.guest_fb.dmabuf;
+
+ graphic_hw_gl_block(vc->gfx.dcl.con, false);
+ graphic_hw_gl_flushed(vc->gfx.dcl.con);
+ qemu_set_fd_handler(dmabuf->fence_fd, NULL, NULL, NULL);
+ close(dmabuf->fence_fd);
+ dmabuf->fence_fd = -1;
+}
+
/** DisplayState Callbacks (opengl version) **/
static const DisplayChangeListenerOps dcl_gl_area_ops = {
@@ -593,6 +614,7 @@ static const DisplayChangeListenerOps dcl_gl_area_ops = {
.dpy_gl_scanout_disable = gd_gl_area_scanout_disable,
.dpy_gl_update = gd_gl_area_scanout_flush,
.dpy_gl_scanout_dmabuf = gd_gl_area_scanout_dmabuf,
+ .dpy_gl_release_dmabuf = gd_gl_release_dmabuf,
.dpy_has_dmabuf = gd_has_dmabuf,
};
@@ -615,8 +637,8 @@ static const DisplayChangeListenerOps dcl_egl_ops = {
.dpy_gl_scanout_dmabuf = gd_egl_scanout_dmabuf,
.dpy_gl_cursor_dmabuf = gd_egl_cursor_dmabuf,
.dpy_gl_cursor_position = gd_egl_cursor_position,
- .dpy_gl_release_dmabuf = gd_egl_release_dmabuf,
- .dpy_gl_update = gd_egl_scanout_flush,
+ .dpy_gl_update = gd_egl_flush,
+ .dpy_gl_release_dmabuf = gd_gl_release_dmabuf,
.dpy_has_dmabuf = gd_has_dmabuf,
};
diff --git a/util/qemu-openpty.c b/util/qemu-openpty.c
index eb17f5b0bc..427f43a769 100644
--- a/util/qemu-openpty.c
+++ b/util/qemu-openpty.c
@@ -80,10 +80,9 @@ static int openpty(int *amaster, int *aslave, char *name,
(termp != NULL && tcgetattr(sfd, termp) < 0))
goto err;
- if (amaster)
- *amaster = mfd;
- if (aslave)
- *aslave = sfd;
+ *amaster = mfd;
+ *aslave = sfd;
+
if (winp)
ioctl(sfd, TIOCSWINSZ, winp);