aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitlab-ci.d/edk2.yml1
-rw-r--r--.gitlab-ci.d/opensbi.yml1
-rw-r--r--.gitlab-ci.yml5
-rw-r--r--MAINTAINERS22
-rw-r--r--accel/tcg/cpu-exec.c35
-rw-r--r--accel/tcg/tcg-accel-ops-mttcg.c3
-rw-r--r--accel/tcg/tcg-accel-ops-rr.c2
-rw-r--r--accel/tcg/tcg-accel-ops.c8
-rw-r--r--accel/tcg/tcg-accel-ops.h1
-rw-r--r--accel/tcg/tcg-runtime.c6
-rw-r--r--accel/tcg/translate-all.c18
-rw-r--r--block.c8
-rw-r--r--block/backup-top.c10
-rw-r--r--block/backup.c1
-rw-r--r--block/dirty-bitmap.c13
-rw-r--r--block/export/vhost-user-blk-server.c150
-rw-r--r--block/meson.build3
-rw-r--r--block/parallels-ext.c300
-rw-r--r--block/parallels.c26
-rw-r--r--block/parallels.h7
-rw-r--r--block/qcow2-bitmap.c16
-rw-r--r--blockdev.c14
-rw-r--r--blockjob.c10
-rwxr-xr-xconfigure2
-rw-r--r--default-configs/targets/tilegx-linux-user.mak1
-rw-r--r--docs/devel/clocks.rst71
-rw-r--r--docs/devel/index.rst1
-rw-r--r--docs/devel/qgraph.rst568
-rw-r--r--docs/devel/qtest.rst8
-rw-r--r--docs/interop/parallels.txt28
-rw-r--r--docs/system/arm/aspeed.rst16
-rw-r--r--docs/system/arm/mps2.rst6
-rw-r--r--docs/system/deprecated.rst8
-rw-r--r--docs/system/removed-features.rst14
-rw-r--r--docs/tools/qemu-storage-daemon.rst68
-rw-r--r--hw/9pfs/9p.c1
-rw-r--r--hw/adc/npcm7xx_adc.c2
-rw-r--r--hw/arm/Kconfig10
-rw-r--r--hw/arm/armsse.c944
-rw-r--r--hw/arm/armv7m.c1
-rw-r--r--hw/arm/aspeed_ast2600.c51
-rw-r--r--hw/arm/aspeed_soc.c34
-rw-r--r--hw/arm/mainstone.c1
-rw-r--r--hw/arm/mps2-tz.c168
-rw-r--r--hw/arm/xlnx-zcu102.c1
-rw-r--r--hw/arm/xlnx-zynqmp.c21
-rw-r--r--hw/arm/z2.c1
-rw-r--r--hw/block/Kconfig3
-rw-r--r--hw/block/meson.build4
-rw-r--r--hw/block/nvme-ns.c38
-rw-r--r--hw/block/nvme-ns.h13
-rw-r--r--hw/block/nvme-subsys.c116
-rw-r--r--hw/block/nvme-subsys.h60
-rw-r--r--hw/block/nvme.c1439
-rw-r--r--hw/block/nvme.h63
-rw-r--r--hw/block/tc58128.c26
-rw-r--r--hw/block/trace-events21
-rw-r--r--hw/block/vhost-user-blk.c7
-rw-r--r--hw/char/Kconfig3
-rw-r--r--hw/char/cadence_uart.c4
-rw-r--r--hw/char/ibex_uart.c4
-rw-r--r--hw/char/meson.build2
-rw-r--r--hw/char/pl011.c5
-rw-r--r--hw/core/clock.c24
-rw-r--r--hw/core/qdev-clock.c8
-rw-r--r--hw/dma/Kconfig4
-rw-r--r--hw/dma/meson.build1
-rw-r--r--hw/dma/sparc32_dma.c4
-rw-r--r--hw/dma/xlnx_csu_dma.c745
-rw-r--r--hw/i386/pc.c1
-rw-r--r--hw/intc/Kconfig3
-rw-r--r--hw/intc/meson.build2
-rw-r--r--hw/m68k/q800.c4
-rw-r--r--hw/mips/cps.c2
-rw-r--r--hw/mips/jazz.c4
-rw-r--r--hw/misc/Kconfig9
-rw-r--r--hw/misc/armsse-cpu-pwrctrl.c149
-rw-r--r--hw/misc/aspeed_lpc.c486
-rw-r--r--hw/misc/bcm2835_cprman.c23
-rw-r--r--hw/misc/iotkit-secctl.c50
-rw-r--r--hw/misc/iotkit-sysctl.c522
-rw-r--r--hw/misc/iotkit-sysinfo.c51
-rw-r--r--hw/misc/ivshmem.c1
-rw-r--r--hw/misc/meson.build8
-rw-r--r--hw/misc/mps2-fpgaio.c52
-rw-r--r--hw/misc/mps2-scc.c15
-rw-r--r--hw/misc/npcm7xx_clk.c26
-rw-r--r--hw/misc/npcm7xx_pwm.c2
-rw-r--r--hw/misc/trace-events4
-rw-r--r--hw/misc/zynq_slcr.c5
-rw-r--r--hw/pci-host/Kconfig4
-rw-r--r--hw/pci-host/meson.build1
-rw-r--r--hw/pci-host/sh_pci.c (renamed from hw/sh4/sh_pci.c)0
-rw-r--r--hw/ppc/ppc440_bamboo.c1
-rw-r--r--hw/ppc/prep.c1
-rw-r--r--hw/ppc/sam460ex.c1
-rw-r--r--hw/ppc/spapr_caps.c1
-rw-r--r--hw/ppc/spapr_pci_vfio.c1
-rw-r--r--hw/ppc/spapr_vio.c1
-rw-r--r--hw/ppc/virtex_ml507.c1
-rw-r--r--hw/riscv/spike.c1
-rw-r--r--hw/rx/rx62n.c1
-rw-r--r--hw/scsi/esp-pci.c53
-rw-r--r--hw/scsi/esp.c975
-rw-r--r--hw/scsi/trace-events5
-rw-r--r--hw/sh4/Kconfig12
-rw-r--r--hw/sh4/meson.build1
-rw-r--r--hw/sh4/sh7750_regs.h24
-rw-r--r--hw/sparc/sun4m.c2
-rw-r--r--hw/ssi/xilinx_spips.c33
-rw-r--r--hw/timer/Kconfig10
-rw-r--r--hw/timer/cmsdk-apb-dualtimer.c5
-rw-r--r--hw/timer/cmsdk-apb-timer.c4
-rw-r--r--hw/timer/meson.build4
-rw-r--r--hw/timer/npcm7xx_timer.c6
-rw-r--r--hw/timer/renesas_tmr.c33
-rw-r--r--hw/timer/sse-counter.c474
-rw-r--r--hw/timer/sse-timer.c470
-rw-r--r--hw/timer/trace-events12
-rw-r--r--hw/watchdog/cmsdk-apb-watchdog.c5
-rw-r--r--include/block/dirty-bitmap.h2
-rw-r--r--include/block/nvme.h88
-rw-r--r--include/elf.h2
-rw-r--r--include/exec/exec-all.h22
-rw-r--r--include/exec/poison.h3
-rw-r--r--include/exec/tb-lookup.h26
-rw-r--r--include/hw/arm/armsse-version.h42
-rw-r--r--include/hw/arm/armsse.h40
-rw-r--r--include/hw/arm/aspeed_soc.h3
-rw-r--r--include/hw/arm/xlnx-zynqmp.h5
-rw-r--r--include/hw/clock.h63
-rw-r--r--include/hw/core/cpu.h2
-rw-r--r--include/hw/dma/xlnx_csu_dma.h52
-rw-r--r--include/hw/misc/armsse-cpu-pwrctrl.h40
-rw-r--r--include/hw/misc/aspeed_lpc.h47
-rw-r--r--include/hw/misc/iotkit-secctl.h2
-rw-r--r--include/hw/misc/iotkit-sysctl.h13
-rw-r--r--include/hw/misc/iotkit-sysinfo.h2
-rw-r--r--include/hw/misc/mps2-fpgaio.h2
-rw-r--r--include/hw/qdev-clock.h17
-rw-r--r--include/hw/scsi/esp.h52
-rw-r--r--include/hw/sh4/sh.h31
-rw-r--r--include/hw/ssi/xilinx_spips.h2
-rw-r--r--include/hw/timer/sse-counter.h105
-rw-r--r--include/hw/timer/sse-timer.h53
-rw-r--r--linux-user/elfload.c23
-rw-r--r--linux-user/main.c1
-rw-r--r--linux-user/sh4/signal.c8
-rw-r--r--linux-user/syscall.c18
-rw-r--r--linux-user/syscall_defs.h10
-rw-r--r--linux-user/tilegx/cpu_loop.c287
-rw-r--r--linux-user/tilegx/signal.c178
-rw-r--r--linux-user/tilegx/sockbits.h1
-rw-r--r--linux-user/tilegx/syscall_nr.h327
-rw-r--r--linux-user/tilegx/target_cpu.h44
-rw-r--r--linux-user/tilegx/target_elf.h14
-rw-r--r--linux-user/tilegx/target_fcntl.h11
-rw-r--r--linux-user/tilegx/target_signal.h23
-rw-r--r--linux-user/tilegx/target_structs.h46
-rw-r--r--linux-user/tilegx/target_syscall.h44
-rw-r--r--linux-user/tilegx/termbits.h1
-rw-r--r--net/net.c1
-rwxr-xr-xscripts/ci/gitlab-pipeline-status25
-rw-r--r--scripts/mtest2make.py2
-rw-r--r--softmmu/cpu-timers.c1
-rw-r--r--softmmu/physmem.c2
-rw-r--r--storage-daemon/qemu-storage-daemon.c56
-rw-r--r--target/arm/cpu.c335
-rw-r--r--target/arm/cpu_tcg.c318
-rw-r--r--target/hexagon/macros.h4
-rw-r--r--target/hexagon/opcodes.c1
-rw-r--r--target/meson.build1
-rw-r--r--target/mips/cpu.c2
-rw-r--r--target/ppc/translate_init.c.inc1
-rw-r--r--target/sh4/cpu.h11
-rw-r--r--target/sh4/helper.c101
-rw-r--r--target/tilegx/cpu-param.h17
-rw-r--r--target/tilegx/cpu.c182
-rw-r--r--target/tilegx/cpu.h160
-rw-r--r--target/tilegx/helper.c147
-rw-r--r--target/tilegx/helper.h23
-rw-r--r--target/tilegx/meson.build13
-rw-r--r--target/tilegx/opcode_tilegx.h1406
-rw-r--r--target/tilegx/simd_helper.c165
-rw-r--r--target/tilegx/spr_def_64.h212
-rw-r--r--target/tilegx/translate.c2437
-rw-r--r--tcg/aarch64/tcg-target.c.inc229
-rw-r--r--tcg/tcg.c29
-rw-r--r--tcg/tci.c526
-rw-r--r--tcg/tci/tcg-target.c.inc204
-rw-r--r--tests/Makefile.include5
-rwxr-xr-xtests/qemu-iotests/0304
-rwxr-xr-xtests/qemu-iotests/0404
-rw-r--r--tests/qemu-iotests/051.pc.out6
-rw-r--r--tests/qemu-iotests/081.out2
-rw-r--r--tests/qemu-iotests/085.out6
-rwxr-xr-xtests/qemu-iotests/0878
-rw-r--r--tests/qemu-iotests/087.out2
-rwxr-xr-xtests/qemu-iotests/18418
-rw-r--r--tests/qemu-iotests/206.out2
-rw-r--r--tests/qemu-iotests/210.out2
-rw-r--r--tests/qemu-iotests/211.out2
-rw-r--r--tests/qemu-iotests/212.out2
-rw-r--r--tests/qemu-iotests/213.out2
-rwxr-xr-xtests/qemu-iotests/2182
-rw-r--r--tests/qemu-iotests/223.out4
-rwxr-xr-xtests/qemu-iotests/2352
-rw-r--r--tests/qemu-iotests/237.out2
-rwxr-xr-xtests/qemu-iotests/24514
-rw-r--r--tests/qemu-iotests/249.out2
-rwxr-xr-xtests/qemu-iotests/2586
-rw-r--r--tests/qemu-iotests/258.out4
-rwxr-xr-xtests/qemu-iotests/28353
-rw-r--r--tests/qemu-iotests/283.out15
-rwxr-xr-xtests/qemu-iotests/2952
-rwxr-xr-xtests/qemu-iotests/2962
-rwxr-xr-xtests/qemu-iotests/30014
-rw-r--r--tests/qemu-iotests/iotests.py10
-rw-r--r--tests/qemu-iotests/sample_images/parallels-with-bitmap.bz2bin0 -> 203 bytes
-rwxr-xr-xtests/qemu-iotests/sample_images/parallels-with-bitmap.sh51
-rwxr-xr-xtests/qemu-iotests/tests/parallels-read-bitmap55
-rw-r--r--tests/qemu-iotests/tests/parallels-read-bitmap.out6
-rw-r--r--tests/qtest/libqos/libqtest.h37
-rw-r--r--tests/qtest/libqos/qgraph.h450
-rw-r--r--tests/qtest/libqtest.c82
-rw-r--r--tests/qtest/meson.build1
-rw-r--r--tests/qtest/sse-timer-test.c240
-rw-r--r--util/main-loop.c1
-rw-r--r--util/qemu-timer.c1
229 files changed, 9727 insertions, 8598 deletions
diff --git a/.gitlab-ci.d/edk2.yml b/.gitlab-ci.d/edk2.yml
index e1e0452416..ba7280605c 100644
--- a/.gitlab-ci.d/edk2.yml
+++ b/.gitlab-ci.d/edk2.yml
@@ -25,6 +25,7 @@ docker-edk2:
build-edk2:
stage: build
+ needs: ['docker-edk2']
rules: # Only run this job when ...
- changes: # ... roms/edk2/ is modified (submodule updated)
- roms/edk2/*
diff --git a/.gitlab-ci.d/opensbi.yml b/.gitlab-ci.d/opensbi.yml
index 5b13047e2a..f66cd1d908 100644
--- a/.gitlab-ci.d/opensbi.yml
+++ b/.gitlab-ci.d/opensbi.yml
@@ -25,6 +25,7 @@ docker-opensbi:
build-opensbi:
stage: build
+ needs: ['docker-opensbi']
rules: # Only run this job when ...
- changes: # ... roms/opensbi/ is modified (submodule updated)
- roms/opensbi/*
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 8b6d495288..07202f6ffb 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -76,6 +76,8 @@ include:
build-system-alpine:
<<: *native_build_job_definition
+ needs:
+ - job: amd64-alpine-container
variables:
IMAGE: alpine
TARGETS: aarch64-softmmu alpha-softmmu cris-softmmu hppa-softmmu
@@ -497,8 +499,7 @@ build-deprecated:
IMAGE: debian-all-test-cross
CONFIGURE_ARGS: --disable-docs --disable-tools
MAKE_CHECK_ARGS: build-tcg
- TARGETS: ppc64abi32-linux-user tilegx-linux-user lm32-softmmu
- unicore32-softmmu
+ TARGETS: ppc64abi32-linux-user lm32-softmmu unicore32-softmmu
artifacts:
expire_in: 2 days
paths:
diff --git a/MAINTAINERS b/MAINTAINERS
index c0c36648ab..1e15dab8cd 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -747,10 +747,17 @@ F: hw/misc/iotkit-sysctl.c
F: include/hw/misc/iotkit-sysctl.h
F: hw/misc/iotkit-sysinfo.c
F: include/hw/misc/iotkit-sysinfo.h
+F: hw/misc/armsse-cpu-pwrctrl.c
+F: include/hw/misc/armsse-cpu-pwrctrl.h
F: hw/misc/armsse-cpuid.c
F: include/hw/misc/armsse-cpuid.h
F: hw/misc/armsse-mhu.c
F: include/hw/misc/armsse-mhu.h
+F: hw/timer/sse-counter.c
+F: include/hw/timer/sse-counter.h
+F: hw/timer/sse-timer.c
+F: include/hw/timer/sse-timer.h
+F: tests/qtest/sse-timer-test.c
F: docs/system/arm/mps2.rst
Musca
@@ -1397,16 +1404,22 @@ R2D
M: Yoshinori Sato <ysato@users.sourceforge.jp>
R: Magnus Damm <magnus.damm@gmail.com>
S: Odd Fixes
+F: hw/char/sh_serial.c
F: hw/sh4/r2d.c
F: hw/intc/sh_intc.c
+F: hw/pci-host/sh_pci.c
+F: hw/timer/sh_timer.c
F: include/hw/sh4/sh_intc.h
Shix
M: Yoshinori Sato <ysato@users.sourceforge.jp>
R: Magnus Damm <magnus.damm@gmail.com>
S: Odd Fixes
+F: hw/block/tc58128.c
+F: hw/char/sh_serial.c
F: hw/sh4/shix.c
F: hw/intc/sh_intc.c
+F: hw/timer/sh_timer.c
F: include/hw/sh4/sh_intc.h
SPARC Machines
@@ -2601,6 +2614,7 @@ S: Maintained
F: softmmu/qtest.c
F: accel/qtest/
F: tests/qtest/
+F: docs/devel/qgraph.rst
X: tests/qtest/bios-tables-test*
Device Fuzzing
@@ -2896,9 +2910,12 @@ F: thunk.c
F: accel/tcg/user-exec*.c
BSD user
-S: Orphan
+M: Warner Losh <imp@bsdimp.com>
+R: Kyle Evans <kevans@freebsd.org>
+S: Maintained
F: bsd-user/
F: default-configs/targets/*-bsd-user.mak
+T: git https://github.com/qemu-bsd-user/qemu-bsd-user bsd-user-rebase-3.1
Linux user
M: Laurent Vivier <laurent@vivier.eu>
@@ -3128,10 +3145,13 @@ F: block/dmg.c
parallels
M: Stefan Hajnoczi <stefanha@redhat.com>
M: Denis V. Lunev <den@openvz.org>
+M: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
L: qemu-block@nongnu.org
S: Supported
F: block/parallels.c
+F: block/parallels-ext.c
F: docs/interop/parallels.txt
+T: git https://src.openvz.org/scm/~vsementsov/qemu.git parallels
qed
M: Stefan Hajnoczi <stefanha@redhat.com>
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
index 16e4fe3ccd..f62f12e717 100644
--- a/accel/tcg/cpu-exec.c
+++ b/accel/tcg/cpu-exec.c
@@ -28,7 +28,6 @@
#include "tcg/tcg.h"
#include "qemu/atomic.h"
#include "qemu/compiler.h"
-#include "sysemu/qtest.h"
#include "qemu/timer.h"
#include "qemu/rcu.h"
#include "exec/tb-hash.h"
@@ -245,11 +244,11 @@ static void cpu_exec_exit(CPUState *cpu)
void cpu_exec_step_atomic(CPUState *cpu)
{
+ CPUArchState *env = (CPUArchState *)cpu->env_ptr;
TranslationBlock *tb;
target_ulong cs_base, pc;
uint32_t flags;
- uint32_t cflags = 1;
- uint32_t cf_mask = cflags & CF_HASH_MASK;
+ uint32_t cflags = (curr_cflags(cpu) & ~CF_PARALLEL) | 1;
int tb_exit;
if (sigsetjmp(cpu->jmp_env, 0) == 0) {
@@ -258,15 +257,15 @@ void cpu_exec_step_atomic(CPUState *cpu)
g_assert(!cpu->running);
cpu->running = true;
- tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, cf_mask);
+ cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
+ tb = tb_lookup(cpu, pc, cs_base, flags, cflags);
+
if (tb == NULL) {
mmap_lock();
tb = tb_gen_code(cpu, pc, cs_base, flags, cflags);
mmap_unlock();
}
- /* Since we got here, we know that parallel_cpus must be true. */
- parallel_cpus = false;
cpu_exec_enter(cpu);
/* execute the generated code */
trace_exec_tb(tb, pc);
@@ -294,7 +293,6 @@ void cpu_exec_step_atomic(CPUState *cpu)
* the execution.
*/
g_assert(cpu_in_exclusive_context(cpu));
- parallel_cpus = true;
cpu->running = false;
end_exclusive();
}
@@ -305,7 +303,7 @@ struct tb_desc {
CPUArchState *env;
tb_page_addr_t phys_page1;
uint32_t flags;
- uint32_t cf_mask;
+ uint32_t cflags;
uint32_t trace_vcpu_dstate;
};
@@ -319,7 +317,7 @@ static bool tb_lookup_cmp(const void *p, const void *d)
tb->cs_base == desc->cs_base &&
tb->flags == desc->flags &&
tb->trace_vcpu_dstate == desc->trace_vcpu_dstate &&
- (tb_cflags(tb) & (CF_HASH_MASK | CF_INVALID)) == desc->cf_mask) {
+ tb_cflags(tb) == desc->cflags) {
/* check next page if needed */
if (tb->page_addr[1] == -1) {
return true;
@@ -339,7 +337,7 @@ static bool tb_lookup_cmp(const void *p, const void *d)
TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
target_ulong cs_base, uint32_t flags,
- uint32_t cf_mask)
+ uint32_t cflags)
{
tb_page_addr_t phys_pc;
struct tb_desc desc;
@@ -348,7 +346,7 @@ TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
desc.env = (CPUArchState *)cpu->env_ptr;
desc.cs_base = cs_base;
desc.flags = flags;
- desc.cf_mask = cf_mask;
+ desc.cflags = cflags;
desc.trace_vcpu_dstate = *cpu->trace_dstate;
desc.pc = pc;
phys_pc = get_page_addr_code(desc.env, pc);
@@ -356,7 +354,7 @@ TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
return NULL;
}
desc.phys_page1 = phys_pc & TARGET_PAGE_MASK;
- h = tb_hash_func(phys_pc, pc, flags, cf_mask, *cpu->trace_dstate);
+ h = tb_hash_func(phys_pc, pc, flags, cflags, *cpu->trace_dstate);
return qht_lookup_custom(&tb_ctx.htable, &desc, h, tb_lookup_cmp);
}
@@ -416,16 +414,19 @@ static inline void tb_add_jump(TranslationBlock *tb, int n,
static inline TranslationBlock *tb_find(CPUState *cpu,
TranslationBlock *last_tb,
- int tb_exit, uint32_t cf_mask)
+ int tb_exit, uint32_t cflags)
{
+ CPUArchState *env = (CPUArchState *)cpu->env_ptr;
TranslationBlock *tb;
target_ulong cs_base, pc;
uint32_t flags;
- tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, cf_mask);
+ cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
+
+ tb = tb_lookup(cpu, pc, cs_base, flags, cflags);
if (tb == NULL) {
mmap_lock();
- tb = tb_gen_code(cpu, pc, cs_base, flags, cf_mask);
+ tb = tb_gen_code(cpu, pc, cs_base, flags, cflags);
mmap_unlock();
/* We add the TB in the virtual pc hash table for the fast lookup */
qatomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb);
@@ -491,7 +492,7 @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
if (replay_has_exception()
&& cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0) {
/* Execute just one insn to trigger exception pending in the log */
- cpu->cflags_next_tb = (curr_cflags() & ~CF_USE_ICOUNT) | 1;
+ cpu->cflags_next_tb = (curr_cflags(cpu) & ~CF_USE_ICOUNT) | 1;
}
#endif
return false;
@@ -788,7 +789,7 @@ int cpu_exec(CPUState *cpu)
have CF_INVALID set, -1 is a convenient invalid value that
does not require tcg headers for cpu_common_reset. */
if (cflags == -1) {
- cflags = curr_cflags();
+ cflags = curr_cflags(cpu);
} else {
cpu->cflags_next_tb = -1;
}
diff --git a/accel/tcg/tcg-accel-ops-mttcg.c b/accel/tcg/tcg-accel-ops-mttcg.c
index 42973fb062..847d2079d2 100644
--- a/accel/tcg/tcg-accel-ops-mttcg.c
+++ b/accel/tcg/tcg-accel-ops-mttcg.c
@@ -114,8 +114,7 @@ void mttcg_start_vcpu_thread(CPUState *cpu)
char thread_name[VCPU_THREAD_NAME_SIZE];
g_assert(tcg_enabled());
-
- parallel_cpus = (current_machine->smp.max_cpus > 1);
+ tcg_cpu_init_cflags(cpu, current_machine->smp.max_cpus > 1);
cpu->thread = g_malloc0(sizeof(QemuThread));
cpu->halt_cond = g_malloc0(sizeof(QemuCond));
diff --git a/accel/tcg/tcg-accel-ops-rr.c b/accel/tcg/tcg-accel-ops-rr.c
index 4a66055e0d..018b54c508 100644
--- a/accel/tcg/tcg-accel-ops-rr.c
+++ b/accel/tcg/tcg-accel-ops-rr.c
@@ -269,7 +269,7 @@ void rr_start_vcpu_thread(CPUState *cpu)
static QemuThread *single_tcg_cpu_thread;
g_assert(tcg_enabled());
- parallel_cpus = false;
+ tcg_cpu_init_cflags(cpu, false);
if (!single_tcg_cpu_thread) {
cpu->thread = g_malloc0(sizeof(QemuThread));
diff --git a/accel/tcg/tcg-accel-ops.c b/accel/tcg/tcg-accel-ops.c
index 6144d9df87..6cdcaa2855 100644
--- a/accel/tcg/tcg-accel-ops.c
+++ b/accel/tcg/tcg-accel-ops.c
@@ -41,6 +41,14 @@
/* common functionality among all TCG variants */
+void tcg_cpu_init_cflags(CPUState *cpu, bool parallel)
+{
+ uint32_t cflags = cpu->cluster_index << CF_CLUSTER_SHIFT;
+ cflags |= parallel ? CF_PARALLEL : 0;
+ cflags |= icount_enabled() ? CF_USE_ICOUNT : 0;
+ cpu->tcg_cflags = cflags;
+}
+
void tcg_cpus_destroy(CPUState *cpu)
{
cpu_thread_signal_destroyed(cpu);
diff --git a/accel/tcg/tcg-accel-ops.h b/accel/tcg/tcg-accel-ops.h
index 48130006de..6a5fcef889 100644
--- a/accel/tcg/tcg-accel-ops.h
+++ b/accel/tcg/tcg-accel-ops.h
@@ -17,5 +17,6 @@
void tcg_cpus_destroy(CPUState *cpu);
int tcg_cpus_exec(CPUState *cpu);
void tcg_handle_interrupt(CPUState *cpu, int mask);
+void tcg_cpu_init_cflags(CPUState *cpu, bool parallel);
#endif /* TCG_CPUS_H */
diff --git a/accel/tcg/tcg-runtime.c b/accel/tcg/tcg-runtime.c
index d736f4ff55..49f5de37e8 100644
--- a/accel/tcg/tcg-runtime.c
+++ b/accel/tcg/tcg-runtime.c
@@ -27,10 +27,10 @@
#include "exec/helper-proto.h"
#include "exec/cpu_ldst.h"
#include "exec/exec-all.h"
-#include "exec/tb-lookup.h"
#include "disas/disas.h"
#include "exec/log.h"
#include "tcg/tcg.h"
+#include "exec/tb-lookup.h"
/* 32-bit helpers */
@@ -152,7 +152,9 @@ const void *HELPER(lookup_tb_ptr)(CPUArchState *env)
target_ulong cs_base, pc;
uint32_t flags;
- tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, curr_cflags());
+ cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
+
+ tb = tb_lookup(cpu, pc, cs_base, flags, curr_cflags(cpu));
if (tb == NULL) {
return tcg_code_gen_epilogue;
}
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
index bbd919a393..f32df8b240 100644
--- a/accel/tcg/translate-all.c
+++ b/accel/tcg/translate-all.c
@@ -224,7 +224,6 @@ static void *l1_map[V_L1_MAX_SIZE];
TCGContext tcg_init_ctx;
__thread TCGContext *tcg_ctx;
TBContext tb_ctx;
-bool parallel_cpus;
static void page_table_config_init(void)
{
@@ -1311,7 +1310,7 @@ static bool tb_cmp(const void *ap, const void *bp)
return a->pc == b->pc &&
a->cs_base == b->cs_base &&
a->flags == b->flags &&
- (tb_cflags(a) & CF_HASH_MASK) == (tb_cflags(b) & CF_HASH_MASK) &&
+ (tb_cflags(a) & ~CF_INVALID) == (tb_cflags(b) & ~CF_INVALID) &&
a->trace_vcpu_dstate == b->trace_vcpu_dstate &&
a->page_addr[0] == b->page_addr[0] &&
a->page_addr[1] == b->page_addr[1];
@@ -1616,6 +1615,7 @@ static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
PageDesc *p;
uint32_t h;
tb_page_addr_t phys_pc;
+ uint32_t orig_cflags = tb_cflags(tb);
assert_memory_lock();
@@ -1626,7 +1626,7 @@ static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
/* remove the TB from the hash list */
phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
- h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb_cflags(tb) & CF_HASH_MASK,
+ h = tb_hash_func(phys_pc, tb->pc, tb->flags, orig_cflags,
tb->trace_vcpu_dstate);
if (!qht_remove(&tb_ctx.htable, tb, h)) {
return;
@@ -1793,6 +1793,7 @@ tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
uint32_t h;
assert_memory_lock();
+ tcg_debug_assert(!(tb->cflags & CF_INVALID));
/*
* Add the TB to the page list, acquiring first the pages's locks.
@@ -1811,7 +1812,7 @@ tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
}
/* add in the hash table */
- h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->cflags & CF_HASH_MASK,
+ h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->cflags,
tb->trace_vcpu_dstate);
qht_insert(&tb_ctx.htable, tb, h, &existing_tb);
@@ -1865,9 +1866,6 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
cflags = (cflags & ~CF_COUNT_MASK) | 1;
}
- cflags &= ~CF_CLUSTER_MASK;
- cflags |= cpu->cluster_index << CF_CLUSTER_SHIFT;
-
max_insns = cflags & CF_COUNT_MASK;
if (max_insns == 0) {
max_insns = CF_COUNT_MASK;
@@ -2194,7 +2192,7 @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
if (current_tb_modified) {
page_collection_unlock(pages);
/* Force execution of one insn next time. */
- cpu->cflags_next_tb = 1 | curr_cflags();
+ cpu->cflags_next_tb = 1 | curr_cflags(cpu);
mmap_unlock();
cpu_loop_exit_noexc(cpu);
}
@@ -2362,7 +2360,7 @@ static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc)
#ifdef TARGET_HAS_PRECISE_SMC
if (current_tb_modified) {
/* Force execution of one insn next time. */
- cpu->cflags_next_tb = 1 | curr_cflags();
+ cpu->cflags_next_tb = 1 | curr_cflags(cpu);
return true;
}
#endif
@@ -2438,7 +2436,7 @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
* operations only (which execute after completion) so we don't
* double instrument the instruction.
*/
- cpu->cflags_next_tb = curr_cflags() | CF_MEMI_ONLY | CF_LAST_IO | n;
+ cpu->cflags_next_tb = curr_cflags(cpu) | CF_MEMI_ONLY | CF_LAST_IO | n;
qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc,
"cpu_io_recompile: rewound execution of TB to "
diff --git a/block.c b/block.c
index 933ff49b10..f377158c42 100644
--- a/block.c
+++ b/block.c
@@ -1440,7 +1440,7 @@ static void bdrv_assign_node_name(BlockDriverState *bs,
* Check for empty string or invalid characters, but not if it is
* generated (generated names use characters not available to the user)
*/
- error_setg(errp, "Invalid node name");
+ error_setg(errp, "Invalid node-name: '%s'", node_name);
return;
}
@@ -1453,7 +1453,7 @@ static void bdrv_assign_node_name(BlockDriverState *bs,
/* takes care of avoiding duplicates node names */
if (bdrv_find_node(node_name)) {
- error_setg(errp, "Duplicate node name");
+ error_setg(errp, "Duplicate nodes with node-name='%s'", node_name);
goto out;
}
@@ -5430,7 +5430,7 @@ BlockDriverState *bdrv_lookup_bs(const char *device,
}
}
- error_setg(errp, "Cannot find device=%s nor node_name=%s",
+ error_setg(errp, "Cannot find device=\'%s\' nor node-name=\'%s\'",
device ? device : "",
node_name ? node_name : "");
return NULL;
@@ -6750,7 +6750,7 @@ BlockDriverState *check_to_replace_node(BlockDriverState *parent_bs,
AioContext *aio_context;
if (!to_replace_bs) {
- error_setg(errp, "Node name '%s' not found", node_name);
+ error_setg(errp, "Failed to find node with node-name='%s'", node_name);
return NULL;
}
diff --git a/block/backup-top.c b/block/backup-top.c
index d1253e1aa6..589e8b651d 100644
--- a/block/backup-top.c
+++ b/block/backup-top.c
@@ -45,6 +45,12 @@ static coroutine_fn int backup_top_co_preadv(
BlockDriverState *bs, uint64_t offset, uint64_t bytes,
QEMUIOVector *qiov, int flags)
{
+ BDRVBackupTopState *s = bs->opaque;
+
+ if (!s->active) {
+ return -EIO;
+ }
+
return bdrv_co_preadv(bs->backing, offset, bytes, qiov, flags);
}
@@ -54,6 +60,10 @@ static coroutine_fn int backup_top_cbw(BlockDriverState *bs, uint64_t offset,
BDRVBackupTopState *s = bs->opaque;
uint64_t off, end;
+ if (!s->active) {
+ return -EIO;
+ }
+
if (flags & BDRV_REQ_WRITE_UNCHANGED) {
return 0;
}
diff --git a/block/backup.c b/block/backup.c
index 94e6dcd72e..6cf2f974aa 100644
--- a/block/backup.c
+++ b/block/backup.c
@@ -103,6 +103,7 @@ static void backup_abort(Job *job)
static void backup_clean(Job *job)
{
BackupBlockJob *s = container_of(job, BackupBlockJob, common.job);
+ block_job_remove_all_bdrv(&s->common);
bdrv_backup_top_drop(s->backup_top);
}
diff --git a/block/dirty-bitmap.c b/block/dirty-bitmap.c
index 9b9cd71238..a0eaa28785 100644
--- a/block/dirty-bitmap.c
+++ b/block/dirty-bitmap.c
@@ -726,6 +726,19 @@ uint64_t bdrv_dirty_bitmap_serialization_align(const BdrvDirtyBitmap *bitmap)
return hbitmap_serialization_align(bitmap->bitmap);
}
+/* Return the disk size covered by a chunk of serialized bitmap data. */
+uint64_t bdrv_dirty_bitmap_serialization_coverage(int serialized_chunk_size,
+ const BdrvDirtyBitmap *bitmap)
+{
+ uint64_t granularity = bdrv_dirty_bitmap_granularity(bitmap);
+ uint64_t limit = granularity * (serialized_chunk_size << 3);
+
+ assert(QEMU_IS_ALIGNED(limit,
+ bdrv_dirty_bitmap_serialization_align(bitmap)));
+ return limit;
+}
+
+
void bdrv_dirty_bitmap_serialize_part(const BdrvDirtyBitmap *bitmap,
uint8_t *buf, uint64_t offset,
uint64_t bytes)
diff --git a/block/export/vhost-user-blk-server.c b/block/export/vhost-user-blk-server.c
index ab2c4d44c4..cb5d896b7b 100644
--- a/block/export/vhost-user-blk-server.c
+++ b/block/export/vhost-user-blk-server.c
@@ -20,8 +20,17 @@
#include "sysemu/block-backend.h"
#include "util/block-helpers.h"
+/*
+ * Sector units are 512 bytes regardless of the
+ * virtio_blk_config->blk_size value.
+ */
+#define VIRTIO_BLK_SECTOR_BITS 9
+#define VIRTIO_BLK_SECTOR_SIZE (1ull << VIRTIO_BLK_SECTOR_BITS)
+
enum {
VHOST_USER_BLK_NUM_QUEUES_DEFAULT = 1,
+ VHOST_USER_BLK_MAX_DISCARD_SECTORS = 32768,
+ VHOST_USER_BLK_MAX_WRITE_ZEROES_SECTORS = 32768,
};
struct virtio_blk_inhdr {
unsigned char status;
@@ -58,30 +67,102 @@ static void vu_blk_req_complete(VuBlkReq *req)
free(req);
}
+static bool vu_blk_sect_range_ok(VuBlkExport *vexp, uint64_t sector,
+ size_t size)
+{
+ uint64_t nb_sectors = size >> BDRV_SECTOR_BITS;
+ uint64_t total_sectors;
+
+ if (nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
+ return false;
+ }
+ if ((sector << VIRTIO_BLK_SECTOR_BITS) % vexp->blk_size) {
+ return false;
+ }
+ blk_get_geometry(vexp->export.blk, &total_sectors);
+ if (sector > total_sectors || nb_sectors > total_sectors - sector) {
+ return false;
+ }
+ return true;
+}
+
static int coroutine_fn
-vu_blk_discard_write_zeroes(BlockBackend *blk, struct iovec *iov,
+vu_blk_discard_write_zeroes(VuBlkExport *vexp, struct iovec *iov,
uint32_t iovcnt, uint32_t type)
{
+ BlockBackend *blk = vexp->export.blk;
struct virtio_blk_discard_write_zeroes desc;
- ssize_t size = iov_to_buf(iov, iovcnt, 0, &desc, sizeof(desc));
+ ssize_t size;
+ uint64_t sector;
+ uint32_t num_sectors;
+ uint32_t max_sectors;
+ uint32_t flags;
+ int bytes;
+
+ /* Only one desc is currently supported */
+ if (unlikely(iov_size(iov, iovcnt) > sizeof(desc))) {
+ return VIRTIO_BLK_S_UNSUPP;
+ }
+
+ size = iov_to_buf(iov, iovcnt, 0, &desc, sizeof(desc));
if (unlikely(size != sizeof(desc))) {
- error_report("Invalid size %zd, expect %zu", size, sizeof(desc));
- return -EINVAL;
+ error_report("Invalid size %zd, expected %zu", size, sizeof(desc));
+ return VIRTIO_BLK_S_IOERR;
+ }
+
+ sector = le64_to_cpu(desc.sector);
+ num_sectors = le32_to_cpu(desc.num_sectors);
+ flags = le32_to_cpu(desc.flags);
+ max_sectors = (type == VIRTIO_BLK_T_WRITE_ZEROES) ?
+ VHOST_USER_BLK_MAX_WRITE_ZEROES_SECTORS :
+ VHOST_USER_BLK_MAX_DISCARD_SECTORS;
+
+ /* This check ensures that 'bytes' fits in an int */
+ if (unlikely(num_sectors > max_sectors)) {
+ return VIRTIO_BLK_S_IOERR;
}
- uint64_t range[2] = { le64_to_cpu(desc.sector) << 9,
- le32_to_cpu(desc.num_sectors) << 9 };
- if (type == VIRTIO_BLK_T_DISCARD) {
- if (blk_co_pdiscard(blk, range[0], range[1]) == 0) {
- return 0;
+ bytes = num_sectors << VIRTIO_BLK_SECTOR_BITS;
+
+ if (unlikely(!vu_blk_sect_range_ok(vexp, sector, bytes))) {
+ return VIRTIO_BLK_S_IOERR;
+ }
+
+ /*
+ * The device MUST set the status byte to VIRTIO_BLK_S_UNSUPP for discard
+ * and write zeroes commands if any unknown flag is set.
+ */
+ if (unlikely(flags & ~VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP)) {
+ return VIRTIO_BLK_S_UNSUPP;
+ }
+
+ if (type == VIRTIO_BLK_T_WRITE_ZEROES) {
+ int blk_flags = 0;
+
+ if (flags & VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP) {
+ blk_flags |= BDRV_REQ_MAY_UNMAP;
}
- } else if (type == VIRTIO_BLK_T_WRITE_ZEROES) {
- if (blk_co_pwrite_zeroes(blk, range[0], range[1], 0) == 0) {
- return 0;
+
+ if (blk_co_pwrite_zeroes(blk, sector << VIRTIO_BLK_SECTOR_BITS,
+ bytes, blk_flags) == 0) {
+ return VIRTIO_BLK_S_OK;
+ }
+ } else if (type == VIRTIO_BLK_T_DISCARD) {
+ /*
+ * The device MUST set the status byte to VIRTIO_BLK_S_UNSUPP for
+ * discard commands if the unmap flag is set.
+ */
+ if (unlikely(flags & VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP)) {
+ return VIRTIO_BLK_S_UNSUPP;
+ }
+
+ if (blk_co_pdiscard(blk, sector << VIRTIO_BLK_SECTOR_BITS,
+ bytes) == 0) {
+ return VIRTIO_BLK_S_OK;
}
}
- return -EINVAL;
+ return VIRTIO_BLK_S_IOERR;
}
static void coroutine_fn vu_blk_virtio_process_req(void *opaque)
@@ -128,6 +209,8 @@ static void coroutine_fn vu_blk_virtio_process_req(void *opaque)
switch (type & ~VIRTIO_BLK_T_BARRIER) {
case VIRTIO_BLK_T_IN:
case VIRTIO_BLK_T_OUT: {
+ QEMUIOVector qiov;
+ int64_t offset;
ssize_t ret = 0;
bool is_write = type & VIRTIO_BLK_T_OUT;
req->sector_num = le64_to_cpu(req->out.sector);
@@ -137,13 +220,24 @@ static void coroutine_fn vu_blk_virtio_process_req(void *opaque)
break;
}
- int64_t offset = req->sector_num * vexp->blk_size;
- QEMUIOVector qiov;
if (is_write) {
qemu_iovec_init_external(&qiov, out_iov, out_num);
- ret = blk_co_pwritev(blk, offset, qiov.size, &qiov, 0);
} else {
qemu_iovec_init_external(&qiov, in_iov, in_num);
+ }
+
+ if (unlikely(!vu_blk_sect_range_ok(vexp,
+ req->sector_num,
+ qiov.size))) {
+ req->in->status = VIRTIO_BLK_S_IOERR;
+ break;
+ }
+
+ offset = req->sector_num << VIRTIO_BLK_SECTOR_BITS;
+
+ if (is_write) {
+ ret = blk_co_pwritev(blk, offset, qiov.size, &qiov, 0);
+ } else {
ret = blk_co_preadv(blk, offset, qiov.size, &qiov, 0);
}
if (ret >= 0) {
@@ -170,19 +264,13 @@ static void coroutine_fn vu_blk_virtio_process_req(void *opaque)
}
case VIRTIO_BLK_T_DISCARD:
case VIRTIO_BLK_T_WRITE_ZEROES: {
- int rc;
-
if (!vexp->writable) {
req->in->status = VIRTIO_BLK_S_IOERR;
break;
}
- rc = vu_blk_discard_write_zeroes(blk, &elem->out_sg[1], out_num, type);
- if (rc == 0) {
- req->in->status = VIRTIO_BLK_S_OK;
- } else {
- req->in->status = VIRTIO_BLK_S_IOERR;
- }
+ req->in->status = vu_blk_discard_write_zeroes(vexp, out_iov, out_num,
+ type);
break;
}
default:
@@ -347,17 +435,21 @@ vu_blk_initialize_config(BlockDriverState *bs,
uint32_t blk_size,
uint16_t num_queues)
{
- config->capacity = cpu_to_le64(bdrv_getlength(bs) >> BDRV_SECTOR_BITS);
+ config->capacity =
+ cpu_to_le64(bdrv_getlength(bs) >> VIRTIO_BLK_SECTOR_BITS);
config->blk_size = cpu_to_le32(blk_size);
config->size_max = cpu_to_le32(0);
config->seg_max = cpu_to_le32(128 - 2);
config->min_io_size = cpu_to_le16(1);
config->opt_io_size = cpu_to_le32(1);
config->num_queues = cpu_to_le16(num_queues);
- config->max_discard_sectors = cpu_to_le32(32768);
+ config->max_discard_sectors =
+ cpu_to_le32(VHOST_USER_BLK_MAX_DISCARD_SECTORS);
config->max_discard_seg = cpu_to_le32(1);
- config->discard_sector_alignment = cpu_to_le32(config->blk_size >> 9);
- config->max_write_zeroes_sectors = cpu_to_le32(32768);
+ config->discard_sector_alignment =
+ cpu_to_le32(blk_size >> VIRTIO_BLK_SECTOR_BITS);
+ config->max_write_zeroes_sectors
+ = cpu_to_le32(VHOST_USER_BLK_MAX_WRITE_ZEROES_SECTORS);
config->max_write_zeroes_seg = cpu_to_le32(1);
}
@@ -383,7 +475,7 @@ static int vu_blk_exp_create(BlockExport *exp, BlockExportOptions *opts,
if (vu_opts->has_logical_block_size) {
logical_block_size = vu_opts->logical_block_size;
} else {
- logical_block_size = BDRV_SECTOR_SIZE;
+ logical_block_size = VIRTIO_BLK_SECTOR_SIZE;
}
check_block_size(exp->id, "logical-block-size", logical_block_size,
&local_err);
diff --git a/block/meson.build b/block/meson.build
index eeaefe5809..d21990ec95 100644
--- a/block/meson.build
+++ b/block/meson.build
@@ -57,7 +57,8 @@ block_ss.add(when: 'CONFIG_QED', if_true: files(
'qed-table.c',
'qed.c',
))
-block_ss.add(when: [libxml2, 'CONFIG_PARALLELS'], if_true: files('parallels.c'))
+block_ss.add(when: [libxml2, 'CONFIG_PARALLELS'],
+ if_true: files('parallels.c', 'parallels-ext.c'))
block_ss.add(when: 'CONFIG_WIN32', if_true: files('file-win32.c', 'win32-aio.c'))
block_ss.add(when: 'CONFIG_POSIX', if_true: [files('file-posix.c'), coref, iokit])
block_ss.add(when: libiscsi, if_true: files('iscsi-opts.c'))
diff --git a/block/parallels-ext.c b/block/parallels-ext.c
new file mode 100644
index 0000000000..e0dd0975c6
--- /dev/null
+++ b/block/parallels-ext.c
@@ -0,0 +1,300 @@
+/*
+ * Support of Parallels Format Extension. It's a part of Parallels format
+ * driver.
+ *
+ * Copyright (c) 2021 Virtuozzo International GmbH
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "block/block_int.h"
+#include "parallels.h"
+#include "crypto/hash.h"
+#include "qemu/uuid.h"
+
+#define PARALLELS_FORMAT_EXTENSION_MAGIC 0xAB234CEF23DCEA87ULL
+
+#define PARALLELS_END_OF_FEATURES_MAGIC 0x0ULL
+#define PARALLELS_DIRTY_BITMAP_FEATURE_MAGIC 0x20385FAE252CB34AULL
+
+typedef struct ParallelsFormatExtensionHeader {
+ uint64_t magic; /* PARALLELS_FORMAT_EXTENSION_MAGIC */
+ uint8_t check_sum[16];
+} QEMU_PACKED ParallelsFormatExtensionHeader;
+
+typedef struct ParallelsFeatureHeader {
+ uint64_t magic;
+ uint64_t flags;
+ uint32_t data_size;
+ uint32_t _unused;
+} QEMU_PACKED ParallelsFeatureHeader;
+
+typedef struct ParallelsDirtyBitmapFeature {
+ uint64_t size;
+ uint8_t id[16];
+ uint32_t granularity;
+ uint32_t l1_size;
+ /* L1 table follows */
+} QEMU_PACKED ParallelsDirtyBitmapFeature;
+
+/* Given L1 table read bitmap data from the image and populate @bitmap */
+static int parallels_load_bitmap_data(BlockDriverState *bs,
+ const uint64_t *l1_table,
+ uint32_t l1_size,
+ BdrvDirtyBitmap *bitmap,
+ Error **errp)
+{
+ BDRVParallelsState *s = bs->opaque;
+ int ret = 0;
+ uint64_t offset, limit;
+ uint64_t bm_size = bdrv_dirty_bitmap_size(bitmap);
+ uint8_t *buf = NULL;
+ uint64_t i, tab_size =
+ DIV_ROUND_UP(bdrv_dirty_bitmap_serialization_size(bitmap, 0, bm_size),
+ s->cluster_size);
+
+ if (tab_size != l1_size) {
+ error_setg(errp, "Bitmap table size %" PRIu32 " does not correspond "
+ "to bitmap size and cluster size. Expected %" PRIu64,
+ l1_size, tab_size);
+ return -EINVAL;
+ }
+
+ buf = qemu_blockalign(bs, s->cluster_size);
+ limit = bdrv_dirty_bitmap_serialization_coverage(s->cluster_size, bitmap);
+ for (i = 0, offset = 0; i < tab_size; ++i, offset += limit) {
+ uint64_t count = MIN(bm_size - offset, limit);
+ uint64_t entry = l1_table[i];
+
+ if (entry == 0) {
+ /* No need to deserialize zeros because @bitmap is cleared. */
+ continue;
+ }
+
+ if (entry == 1) {
+ bdrv_dirty_bitmap_deserialize_ones(bitmap, offset, count, false);
+ } else {
+ ret = bdrv_pread(bs->file, entry << BDRV_SECTOR_BITS, buf,
+ s->cluster_size);
+ if (ret < 0) {
+ error_setg_errno(errp, -ret,
+ "Failed to read bitmap data cluster");
+ goto finish;
+ }
+ bdrv_dirty_bitmap_deserialize_part(bitmap, buf, offset, count,
+ false);
+ }
+ }
+ ret = 0;
+
+ bdrv_dirty_bitmap_deserialize_finish(bitmap);
+
+finish:
+ qemu_vfree(buf);
+
+ return ret;
+}
+
+/*
+ * @data buffer (of @data_size size) is the Dirty bitmaps feature which
+ * consists of ParallelsDirtyBitmapFeature followed by L1 table.
+ */
+static BdrvDirtyBitmap *parallels_load_bitmap(BlockDriverState *bs,
+ uint8_t *data,
+ size_t data_size,
+ Error **errp)
+{
+ int ret;
+ ParallelsDirtyBitmapFeature bf;
+ g_autofree uint64_t *l1_table = NULL;
+ BdrvDirtyBitmap *bitmap;
+ QemuUUID uuid;
+ char uuidstr[UUID_FMT_LEN + 1];
+ int i;
+
+ if (data_size < sizeof(bf)) {
+ error_setg(errp, "Too small Bitmap Feature area in Parallels Format "
+ "Extension: %zu bytes, expected at least %zu bytes",
+ data_size, sizeof(bf));
+ return NULL;
+ }
+ memcpy(&bf, data, sizeof(bf));
+ bf.size = le64_to_cpu(bf.size);
+ bf.granularity = le32_to_cpu(bf.granularity) << BDRV_SECTOR_BITS;
+ bf.l1_size = le32_to_cpu(bf.l1_size);
+ data += sizeof(bf);
+ data_size -= sizeof(bf);
+
+ if (bf.size != bs->total_sectors) {
+ error_setg(errp, "Bitmap size (in sectors) %" PRId64 " differs from "
+ "disk size in sectors %" PRId64, bf.size, bs->total_sectors);
+ return NULL;
+ }
+
+ if (bf.l1_size * sizeof(uint64_t) > data_size) {
+ error_setg(errp, "Bitmaps feature corrupted: l1 table exceeds "
+ "extension data_size");
+ return NULL;
+ }
+
+ memcpy(&uuid, bf.id, sizeof(uuid));
+ qemu_uuid_unparse(&uuid, uuidstr);
+ bitmap = bdrv_create_dirty_bitmap(bs, bf.granularity, uuidstr, errp);
+ if (!bitmap) {
+ return NULL;
+ }
+
+ l1_table = g_new(uint64_t, bf.l1_size);
+ for (i = 0; i < bf.l1_size; i++, data += sizeof(uint64_t)) {
+ l1_table[i] = ldq_le_p(data);
+ }
+
+ ret = parallels_load_bitmap_data(bs, l1_table, bf.l1_size, bitmap, errp);
+ if (ret < 0) {
+ bdrv_release_dirty_bitmap(bitmap);
+ return NULL;
+ }
+
+ /* We support format extension only for RO parallels images. */
+ assert(!(bs->open_flags & BDRV_O_RDWR));
+ bdrv_dirty_bitmap_set_readonly(bitmap, true);
+
+ return bitmap;
+}
+
+static int parallels_parse_format_extension(BlockDriverState *bs,
+ uint8_t *ext_cluster, Error **errp)
+{
+ BDRVParallelsState *s = bs->opaque;
+ int ret;
+ int remaining = s->cluster_size;
+ uint8_t *pos = ext_cluster;
+ ParallelsFormatExtensionHeader eh;
+ g_autofree uint8_t *hash = NULL;
+ size_t hash_len = 0;
+ GSList *bitmaps = NULL, *el;
+
+ memcpy(&eh, pos, sizeof(eh));
+ eh.magic = le64_to_cpu(eh.magic);
+ pos += sizeof(eh);
+ remaining -= sizeof(eh);
+
+ if (eh.magic != PARALLELS_FORMAT_EXTENSION_MAGIC) {
+ error_setg(errp, "Wrong parallels Format Extension magic: 0x%" PRIx64
+ ", expected: 0x%llx", eh.magic,
+ PARALLELS_FORMAT_EXTENSION_MAGIC);
+ goto fail;
+ }
+
+ ret = qcrypto_hash_bytes(QCRYPTO_HASH_ALG_MD5, (char *)pos, remaining,
+ &hash, &hash_len, errp);
+ if (ret < 0) {
+ goto fail;
+ }
+
+ if (hash_len != sizeof(eh.check_sum) ||
+ memcmp(hash, eh.check_sum, sizeof(eh.check_sum)) != 0) {
+ error_setg(errp, "Wrong checksum in Format Extension header. Format "
+ "extension is corrupted.");
+ goto fail;
+ }
+
+ while (true) {
+ ParallelsFeatureHeader fh;
+ BdrvDirtyBitmap *bitmap;
+
+ if (remaining < sizeof(fh)) {
+ error_setg(errp, "Can not read feature header, as remaining bytes "
+ "(%d) in Format Extension is less than Feature header "
+ "size (%zu)", remaining, sizeof(fh));
+ goto fail;
+ }
+
+ memcpy(&fh, pos, sizeof(fh));
+ pos += sizeof(fh);
+ remaining -= sizeof(fh);
+
+ fh.magic = le64_to_cpu(fh.magic);
+ fh.flags = le64_to_cpu(fh.flags);
+ fh.data_size = le32_to_cpu(fh.data_size);
+
+ if (fh.flags) {
+ error_setg(errp, "Flags for extension feature are unsupported");
+ goto fail;
+ }
+
+ if (fh.data_size > remaining) {
+ error_setg(errp, "Feature data_size exceedes Format Extension "
+ "cluster");
+ goto fail;
+ }
+
+ switch (fh.magic) {
+ case PARALLELS_END_OF_FEATURES_MAGIC:
+ return 0;
+
+ case PARALLELS_DIRTY_BITMAP_FEATURE_MAGIC:
+ bitmap = parallels_load_bitmap(bs, pos, fh.data_size, errp);
+ if (!bitmap) {
+ goto fail;
+ }
+ bitmaps = g_slist_append(bitmaps, bitmap);
+ break;
+
+ default:
+ error_setg(errp, "Unknown feature: 0x%" PRIu64, fh.magic);
+ goto fail;
+ }
+
+ pos = ext_cluster + QEMU_ALIGN_UP(pos + fh.data_size - ext_cluster, 8);
+ }
+
+fail:
+ for (el = bitmaps; el; el = el->next) {
+ bdrv_release_dirty_bitmap(el->data);
+ }
+ g_slist_free(bitmaps);
+
+ return -EINVAL;
+}
+
+int parallels_read_format_extension(BlockDriverState *bs,
+ int64_t ext_off, Error **errp)
+{
+ BDRVParallelsState *s = bs->opaque;
+ int ret;
+ uint8_t *ext_cluster = qemu_blockalign(bs, s->cluster_size);
+
+ assert(ext_off > 0);
+
+ ret = bdrv_pread(bs->file, ext_off, ext_cluster, s->cluster_size);
+ if (ret < 0) {
+ error_setg_errno(errp, -ret, "Failed to read Format Extension cluster");
+ goto out;
+ }
+
+ ret = parallels_parse_format_extension(bs, ext_cluster, errp);
+
+out:
+ qemu_vfree(ext_cluster);
+
+ return ret;
+}
diff --git a/block/parallels.c b/block/parallels.c
index 3c22dfdc9d..6ebad2a2bb 100644
--- a/block/parallels.c
+++ b/block/parallels.c
@@ -29,6 +29,7 @@
*/
#include "qemu/osdep.h"
+#include "qemu/error-report.h"
#include "qapi/error.h"
#include "block/block_int.h"
#include "block/qdict.h"
@@ -421,7 +422,6 @@ static int coroutine_fn parallels_co_check(BlockDriverState *bs,
int ret;
uint32_t i;
bool flush_bat = false;
- int cluster_size = s->tracks << BDRV_SECTOR_BITS;
size = bdrv_getlength(bs->file->bs);
if (size < 0) {
@@ -472,7 +472,7 @@ static int coroutine_fn parallels_co_check(BlockDriverState *bs,
high_off = off;
}
- if (prev_off != 0 && (prev_off + cluster_size) != off) {
+ if (prev_off != 0 && (prev_off + s->cluster_size) != off) {
res->bfi.fragmented_clusters++;
}
prev_off = off;
@@ -487,10 +487,10 @@ static int coroutine_fn parallels_co_check(BlockDriverState *bs,
}
}
- res->image_end_offset = high_off + cluster_size;
+ res->image_end_offset = high_off + s->cluster_size;
if (size > res->image_end_offset) {
int64_t count;
- count = DIV_ROUND_UP(size - res->image_end_offset, cluster_size);
+ count = DIV_ROUND_UP(size - res->image_end_offset, s->cluster_size);
fprintf(stderr, "%s space leaked at the end of the image %" PRId64 "\n",
fix & BDRV_FIX_LEAKS ? "Repairing" : "ERROR",
size - res->image_end_offset);
@@ -771,6 +771,7 @@ static int parallels_open(BlockDriverState *bs, QDict *options, int flags,
ret = -EFBIG;
goto fail;
}
+ s->cluster_size = s->tracks << BDRV_SECTOR_BITS;
s->bat_size = le32_to_cpu(ph.bat_entries);
if (s->bat_size > INT_MAX / sizeof(uint32_t)) {
@@ -843,6 +844,23 @@ static int parallels_open(BlockDriverState *bs, QDict *options, int flags,
goto fail_options;
}
+ if (ph.ext_off) {
+ if (flags & BDRV_O_RDWR) {
+ /*
+ * It's unsafe to open image RW if there is an extension (as we
+ * don't support it). But parallels driver in QEMU historically
+ * ignores the extension, so print warning and don't care.
+ */
+ warn_report("Format Extension ignored in RW mode");
+ } else {
+ ret = parallels_read_format_extension(
+ bs, le64_to_cpu(ph.ext_off) << BDRV_SECTOR_BITS, errp);
+ if (ret < 0) {
+ goto fail;
+ }
+ }
+ }
+
if ((flags & BDRV_O_RDWR) && !(flags & BDRV_O_INACTIVE)) {
s->header->inuse = cpu_to_le32(HEADER_INUSE_MAGIC);
ret = parallels_update_header(bs);
diff --git a/block/parallels.h b/block/parallels.h
index 5aa101cfc8..f22f43f988 100644
--- a/block/parallels.h
+++ b/block/parallels.h
@@ -48,7 +48,8 @@ typedef struct ParallelsHeader {
uint64_t nb_sectors;
uint32_t inuse;
uint32_t data_off;
- char padding[12];
+ uint32_t flags;
+ uint64_t ext_off;
} QEMU_PACKED ParallelsHeader;
typedef enum ParallelsPreallocMode {
@@ -79,9 +80,13 @@ typedef struct BDRVParallelsState {
ParallelsPreallocMode prealloc_mode;
unsigned int tracks;
+ unsigned int cluster_size;
unsigned int off_multiplier;
Error *migration_blocker;
} BDRVParallelsState;
+int parallels_read_format_extension(BlockDriverState *bs,
+ int64_t ext_off, Error **errp);
+
#endif
diff --git a/block/qcow2-bitmap.c b/block/qcow2-bitmap.c
index f417f9ccb1..8fb4731551 100644
--- a/block/qcow2-bitmap.c
+++ b/block/qcow2-bitmap.c
@@ -278,18 +278,6 @@ static int free_bitmap_clusters(BlockDriverState *bs, Qcow2BitmapTable *tb)
return 0;
}
-/* Return the disk size covered by a single qcow2 cluster of bitmap data. */
-static uint64_t bytes_covered_by_bitmap_cluster(const BDRVQcow2State *s,
- const BdrvDirtyBitmap *bitmap)
-{
- uint64_t granularity = bdrv_dirty_bitmap_granularity(bitmap);
- uint64_t limit = granularity * (s->cluster_size << 3);
-
- assert(QEMU_IS_ALIGNED(limit,
- bdrv_dirty_bitmap_serialization_align(bitmap)));
- return limit;
-}
-
/* load_bitmap_data
* @bitmap_table entries must satisfy specification constraints.
* @bitmap must be cleared */
@@ -312,7 +300,7 @@ static int load_bitmap_data(BlockDriverState *bs,
}
buf = g_malloc(s->cluster_size);
- limit = bytes_covered_by_bitmap_cluster(s, bitmap);
+ limit = bdrv_dirty_bitmap_serialization_coverage(s->cluster_size, bitmap);
for (i = 0, offset = 0; i < tab_size; ++i, offset += limit) {
uint64_t count = MIN(bm_size - offset, limit);
uint64_t entry = bitmap_table[i];
@@ -1309,7 +1297,7 @@ static uint64_t *store_bitmap_data(BlockDriverState *bs,
}
buf = g_malloc(s->cluster_size);
- limit = bytes_covered_by_bitmap_cluster(s, bitmap);
+ limit = bdrv_dirty_bitmap_serialization_coverage(s->cluster_size, bitmap);
assert(DIV_ROUND_UP(bm_size, limit) == tb_size);
offset = 0;
diff --git a/blockdev.c b/blockdev.c
index 65884a2826..5cc7c7effe 100644
--- a/blockdev.c
+++ b/blockdev.c
@@ -57,7 +57,6 @@
#include "block/block_int.h"
#include "block/trace.h"
#include "sysemu/arch_init.h"
-#include "sysemu/qtest.h"
#include "sysemu/runstate.h"
#include "sysemu/replay.h"
#include "qemu/cutils.h"
@@ -1515,13 +1514,13 @@ static void external_snapshot_prepare(BlkActionState *common,
s->has_snapshot_node_name ? s->snapshot_node_name : NULL;
if (node_name && !snapshot_node_name) {
- error_setg(errp, "New overlay node name missing");
+ error_setg(errp, "New overlay node-name missing");
goto out;
}
if (snapshot_node_name &&
bdrv_lookup_bs(snapshot_node_name, snapshot_node_name, NULL)) {
- error_setg(errp, "New overlay node name already in use");
+ error_setg(errp, "New overlay node-name already in use");
goto out;
}
@@ -3596,13 +3595,14 @@ void qmp_x_blockdev_reopen(BlockdevOptions *options, Error **errp)
/* Check for the selected node name */
if (!options->has_node_name) {
- error_setg(errp, "Node name not specified");
+ error_setg(errp, "node-name not specified");
goto fail;
}
bs = bdrv_find_node(options->node_name);
if (!bs) {
- error_setg(errp, "Cannot find node named '%s'", options->node_name);
+ error_setg(errp, "Failed to find node with node-name='%s'",
+ options->node_name);
goto fail;
}
@@ -3633,7 +3633,7 @@ void qmp_blockdev_del(const char *node_name, Error **errp)
bs = bdrv_find_node(node_name);
if (!bs) {
- error_setg(errp, "Cannot find node %s", node_name);
+ error_setg(errp, "Failed to find node with node-name='%s'", node_name);
return;
}
if (bdrv_has_blk(bs)) {
@@ -3756,7 +3756,7 @@ void qmp_x_blockdev_set_iothread(const char *node_name, StrOrNull *iothread,
bs = bdrv_find_node(node_name);
if (!bs) {
- error_setg(errp, "Cannot find node %s", node_name);
+ error_setg(errp, "Failed to find node with node-name='%s'", node_name);
return;
}
diff --git a/blockjob.c b/blockjob.c
index 9e0ffd8dc9..207e8c7fd9 100644
--- a/blockjob.c
+++ b/blockjob.c
@@ -320,8 +320,12 @@ BlockJobInfo *block_job_query(BlockJob *job, Error **errp)
info->status = job->job.status;
info->auto_finalize = job->job.auto_finalize;
info->auto_dismiss = job->job.auto_dismiss;
- info->has_error = job->job.ret != 0;
- info->error = job->job.ret ? g_strdup(strerror(-job->job.ret)) : NULL;
+ if (job->job.ret) {
+ info->has_error = true;
+ info->error = job->job.err ?
+ g_strdup(error_get_pretty(job->job.err)) :
+ g_strdup(strerror(-job->job.ret));
+ }
return info;
}
@@ -358,7 +362,7 @@ static void block_job_event_completed(Notifier *n, void *opaque)
}
if (job->job.ret < 0) {
- msg = strerror(-job->job.ret);
+ msg = error_get_pretty(job->job.err);
}
qapi_event_send_block_job_completed(job_type(&job->job),
diff --git a/configure b/configure
index 34fccaa2ba..f7d022a5db 100755
--- a/configure
+++ b/configure
@@ -1655,7 +1655,7 @@ if [ "$ARCH" = "unknown" ]; then
fi
default_target_list=""
-deprecated_targets_list=ppc64abi32-linux-user,tilegx-linux-user,lm32-softmmu,unicore32-softmmu
+deprecated_targets_list=ppc64abi32-linux-user,lm32-softmmu,unicore32-softmmu
deprecated_features=""
mak_wilds=""
diff --git a/default-configs/targets/tilegx-linux-user.mak b/default-configs/targets/tilegx-linux-user.mak
deleted file mode 100644
index 10480e74c9..0000000000
--- a/default-configs/targets/tilegx-linux-user.mak
+++ /dev/null
@@ -1 +0,0 @@
-TARGET_ARCH=tilegx
diff --git a/docs/devel/clocks.rst b/docs/devel/clocks.rst
index c54bbb8240..956bd147ea 100644
--- a/docs/devel/clocks.rst
+++ b/docs/devel/clocks.rst
@@ -80,11 +80,12 @@ Adding clocks to a device must be done during the init method of the Device
instance.
To add an input clock to a device, the function ``qdev_init_clock_in()``
-must be used. It takes the name, a callback and an opaque parameter
-for the callback (this will be explained in a following section).
+must be used. It takes the name, a callback, an opaque parameter
+for the callback and a mask of events when the callback should be
+called (this will be explained in a following section).
Output is simpler; only the name is required. Typically::
- qdev_init_clock_in(DEVICE(dev), "clk_in", clk_in_callback, dev);
+ qdev_init_clock_in(DEVICE(dev), "clk_in", clk_in_callback, dev, ClockUpdate);
qdev_init_clock_out(DEVICE(dev), "clk_out");
Both functions return the created Clock pointer, which should be saved in the
@@ -113,7 +114,7 @@ output.
* callback for the input clock (see "Callback on input clock
* change" section below for more information).
*/
- static void clk_in_callback(void *opaque);
+ static void clk_in_callback(void *opaque, ClockEvent event);
/*
* static array describing clocks:
@@ -124,7 +125,7 @@ output.
* the clk_out field of a MyDeviceState structure.
*/
static const ClockPortInitArray mydev_clocks = {
- QDEV_CLOCK_IN(MyDeviceState, clk_in, clk_in_callback),
+ QDEV_CLOCK_IN(MyDeviceState, clk_in, clk_in_callback, ClockUpdate),
QDEV_CLOCK_OUT(MyDeviceState, clk_out),
QDEV_CLOCK_END
};
@@ -153,6 +154,47 @@ nothing else to do. This value will be propagated to other clocks when
connecting the clocks together and devices will fetch the right value during
the first reset.
+Clock callbacks
+---------------
+
+You can give a clock a callback function in several ways:
+
+ * by passing it as an argument to ``qdev_init_clock_in()``
+ * as an argument to the ``QDEV_CLOCK_IN()`` macro initializing an
+ array to be passed to ``qdev_init_clocks()``
+ * by directly calling the ``clock_set_callback()`` function
+
+The callback function must be of this type:
+
+.. code-block:: c
+
+ typedef void ClockCallback(void *opaque, ClockEvent event);
+
+The ``opaque`` argument is the pointer passed to ``qdev_init_clock_in()``
+or ``clock_set_callback()``; for ``qdev_init_clocks()`` it is the
+``dev`` device pointer.
+
+The ``event`` argument specifies why the callback has been called.
+When you register the callback you specify a mask of ClockEvent values
+that you are interested in. The callback will only be called for those
+events.
+
+The events currently supported are:
+
+ * ``ClockPreUpdate`` : called when the input clock's period is about to
+ update. This is useful if the device needs to do some action for
+ which it needs to know the old value of the clock period. During
+ this callback, Clock API functions like ``clock_get()`` or
+ ``clock_ticks_to_ns()`` will use the old period.
+ * ``ClockUpdate`` : called after the input clock's period has changed.
+ During this callback, Clock API functions like ``clock_ticks_to_ns()``
+ will use the new period.
+
+Note that a clock only has one callback: it is not possible to register
+different functions for different events. You must register a single
+callback which listens for all of the events you are interested in,
+and use the ``event`` argument to identify which event has happened.
+
Retrieving clocks from a device
-------------------------------
@@ -231,7 +273,7 @@ object during device instance init. For example:
.. code-block:: c
clk = qdev_init_clock_in(DEVICE(dev), "clk-in", clk_in_callback,
- dev);
+ dev, ClockUpdate);
/* set initial value to 10ns / 100MHz */
clock_set_ns(clk, 10);
@@ -267,11 +309,12 @@ next lowest integer. This implies some inaccuracy due to the rounding,
so be cautious about using it in calculations.
It is also possible to register a callback on clock frequency changes.
-Here is an example:
+Here is an example, which assumes that ``clock_callback`` has been
+specified as the callback for the ``ClockUpdate`` event:
.. code-block:: c
- void clock_callback(void *opaque) {
+ void clock_callback(void *opaque, ClockEvent event) {
MyDeviceState *s = (MyDeviceState *) opaque;
/*
* 'opaque' is the argument passed to qdev_init_clock_in();
@@ -317,6 +360,18 @@ rather than simply passing it to a QEMUTimer function like
``timer_mod_ns()`` then you should be careful to avoid overflow
in those calculations, of course.)
+Obtaining tick counts
+---------------------
+
+For calculations where you need to know the number of ticks in
+a given duration, use ``clock_ns_to_ticks()``. This function handles
+possible non-whole-number-of-nanoseconds periods and avoids
+potential rounding errors. It will return '0' if the clock is stopped
+(i.e. it has period zero). If the inputs imply a tick count that
+overflows a 64-bit value (a very long duration for a clock with a
+very short period) the output value is truncated, so effectively
+the 64-bit output wraps around.
+
Changing a clock period
-----------------------
diff --git a/docs/devel/index.rst b/docs/devel/index.rst
index ae664da00c..7c424ea6d7 100644
--- a/docs/devel/index.rst
+++ b/docs/devel/index.rst
@@ -12,6 +12,7 @@ Contents:
.. toctree::
:maxdepth: 2
+ :includehidden:
build-system
style
diff --git a/docs/devel/qgraph.rst b/docs/devel/qgraph.rst
new file mode 100644
index 0000000000..a9aff167ad
--- /dev/null
+++ b/docs/devel/qgraph.rst
@@ -0,0 +1,568 @@
+.. _qgraph:
+
+========================================
+Qtest Driver Framework
+========================================
+
+In order to test a specific driver, plain libqos tests need to
+take care of booting QEMU with the right machine and devices.
+This makes each test "hardcoded" for a specific configuration, reducing
+the possible coverage that it can reach.
+
+For example, the sdhci device is supported on both x86_64 and ARM boards,
+therefore a generic sdhci test should test all machines and drivers that
+support that device.
+Using only libqos APIs, the test has to manually take care of
+covering all the setups, and build the correct command line.
+
+This also introduces backward compability issues: if a device/driver command
+line name is changed, all tests that use that will not work
+properly anymore and need to be adjusted.
+
+The aim of qgraph is to create a graph of drivers, machines and tests such that
+a test aimed to a certain driver does not have to care of
+booting the right QEMU machine, pick the right device, build the command line
+and so on. Instead, it only defines what type of device it is testing
+(interface in qgraph terms) and the framework takes care of
+covering all supported types of devices and machine architectures.
+
+Following the above example, an interface would be ``sdhci``,
+so the sdhci-test should only care of linking its qgraph node with
+that interface. In this way, if the command line of a sdhci driver
+is changed, only the respective qgraph driver node has to be adjusted.
+
+The graph is composed by nodes that represent machines, drivers, tests
+and edges that define the relationships between them (``CONSUMES``, ``PRODUCES``, and
+``CONTAINS``).
+
+
+Nodes
+^^^^^^
+
+A node can be of four types:
+
+- **QNODE_MACHINE**: for example ``arm/raspi2``
+- **QNODE_DRIVER**: for example ``generic-sdhci``
+- **QNODE_INTERFACE**: for example ``sdhci`` (interface for all ``-sdhci``
+ drivers).
+ An interface is not explicitly created, it will be automatically
+ instantiated when a node consumes or produces it.
+ An interface is simply a struct that abstracts the various drivers
+ for the same type of device, and offers an API to the nodes that
+ use it ("consume" relation in qgraph terms) that is implemented/backed up by the drivers that implement it ("produce" relation in qgraph terms).
+- **QNODE_TEST**: for example ``sdhci-test``. A test consumes an interface
+ and tests the functions provided by it.
+
+Notes for the nodes:
+
+- QNODE_MACHINE: each machine struct must have a ``QGuestAllocator`` and
+ implement ``get_driver()`` to return the allocator mapped to the interface
+ "memory". The function can also return ``NULL`` if the allocator
+ is not set.
+- QNODE_DRIVER: driver names must be unique, and machines and nodes
+ planned to be "consumed" by other nodes must match QEMU
+ drivers name, otherwise they won't be discovered
+
+Edges
+^^^^^^
+
+An edge relation between two nodes (drivers or machines) `X` and `Y` can be:
+
+- ``X CONSUMES Y``: `Y` can be plugged into `X`
+- ``X PRODUCES Y``: `X` provides the interface `Y`
+- ``X CONTAINS Y``: `Y` is part of `X` component
+
+Execution steps
+^^^^^^^^^^^^^^^
+
+The basic framework steps are the following:
+
+- All nodes and edges are created in their respective
+ machine/driver/test files
+- The framework starts QEMU and asks for a list of available devices
+ and machines (note that only machines and "consumed" nodes are mapped
+ 1:1 with QEMU devices)
+- The framework walks the graph starting from the available machines and
+ performs a Depth First Search for tests
+- Once a test is found, the path is walked again and all drivers are
+ allocated accordingly and the final interface is passed to the test
+- The test is executed
+- Unused objects are cleaned and the path discovery is continued
+
+Depending on the QEMU binary used, only some drivers/machines will be
+available and only test that are reached by them will be executed.
+
+Creating a new driver and its interface
+"""""""""""""""""""""""""""""""""""""""""
+
+Here we continue the ``sdhci`` use case, with the following scenario:
+
+- ``sdhci-test`` aims to test the ``read[q,w], writeq`` functions
+ offered by the ``sdhci`` drivers.
+- The current ``sdhci`` device is supported by both ``x86_64/pc`` and ``ARM``
+ (in this example we focus on the ``arm-raspi2``) machines.
+- QEMU offers 2 types of drivers: ``QSDHCI_MemoryMapped`` for ``ARM`` and
+ ``QSDHCI_PCI`` for ``x86_64/pc``. Both implement the
+ ``read[q,w], writeq`` functions.
+
+In order to implement such scenario in qgraph, the test developer needs to:
+
+- Create the ``x86_64/pc`` machine node. This machine uses the
+ ``pci-bus`` architecture so it ``contains`` a PCI driver,
+ ``pci-bus-pc``. The actual path is
+
+ ``x86_64/pc --contains--> 1440FX-pcihost --contains-->
+ pci-bus-pc --produces--> pci-bus``.
+
+ For the sake of this example,
+ we do not focus on the PCI interface implementation.
+- Create the ``sdhci-pci`` driver node, representing ``QSDHCI_PCI``.
+ The driver uses the PCI bus (and its API),
+ so it must ``consume`` the ``pci-bus`` generic interface (which abstracts
+ all the pci drivers available)
+
+ ``sdhci-pci --consumes--> pci-bus``
+- Create an ``arm/raspi2`` machine node. This machine ``contains``
+ a ``generic-sdhci`` memory mapped ``sdhci`` driver node, representing
+ ``QSDHCI_MemoryMapped``.
+
+ ``arm/raspi2 --contains--> generic-sdhci``
+- Create the ``sdhci`` interface node. This interface offers the
+ functions that are shared by all ``sdhci`` devices.
+ The interface is produced by ``sdhci-pci`` and ``generic-sdhci``,
+ the available architecture-specific drivers.
+
+ ``sdhci-pci --produces--> sdhci``
+
+ ``generic-sdhci --produces--> sdhci``
+- Create the ``sdhci-test`` test node. The test ``consumes`` the
+ ``sdhci`` interface, using its API. It doesn't need to look at
+ the supported machines or drivers.
+
+ ``sdhci-test --consumes--> sdhci``
+
+``arm-raspi2`` machine, simplified from
+``tests/qtest/libqos/arm-raspi2-machine.c``::
+
+ #include "qgraph.h"
+
+ struct QRaspi2Machine {
+ QOSGraphObject obj;
+ QGuestAllocator alloc;
+ QSDHCI_MemoryMapped sdhci;
+ };
+
+ static void *raspi2_get_driver(void *object, const char *interface)
+ {
+ QRaspi2Machine *machine = object;
+ if (!g_strcmp0(interface, "memory")) {
+ return &machine->alloc;
+ }
+
+ fprintf(stderr, "%s not present in arm/raspi2\n", interface);
+ g_assert_not_reached();
+ }
+
+ static QOSGraphObject *raspi2_get_device(void *obj,
+ const char *device)
+ {
+ QRaspi2Machine *machine = obj;
+ if (!g_strcmp0(device, "generic-sdhci")) {
+ return &machine->sdhci.obj;
+ }
+
+ fprintf(stderr, "%s not present in arm/raspi2\n", device);
+ g_assert_not_reached();
+ }
+
+ static void *qos_create_machine_arm_raspi2(QTestState *qts)
+ {
+ QRaspi2Machine *machine = g_new0(QRaspi2Machine, 1);
+
+ alloc_init(&machine->alloc, ...);
+
+ /* Get node(s) contained inside (CONTAINS) */
+ machine->obj.get_device = raspi2_get_device;
+
+ /* Get node(s) produced (PRODUCES) */
+ machine->obj.get_driver = raspi2_get_driver;
+
+ /* free the object */
+ machine->obj.destructor = raspi2_destructor;
+ qos_init_sdhci_mm(&machine->sdhci, ...);
+ return &machine->obj;
+ }
+
+ static void raspi2_register_nodes(void)
+ {
+ /* arm/raspi2 --contains--> generic-sdhci */
+ qos_node_create_machine("arm/raspi2",
+ qos_create_machine_arm_raspi2);
+ qos_node_contains("arm/raspi2", "generic-sdhci", NULL);
+ }
+
+ libqos_init(raspi2_register_nodes);
+
+``x86_64/pc`` machine, simplified from
+``tests/qtest/libqos/x86_64_pc-machine.c``::
+
+ #include "qgraph.h"
+
+ struct i440FX_pcihost {
+ QOSGraphObject obj;
+ QPCIBusPC pci;
+ };
+
+ struct QX86PCMachine {
+ QOSGraphObject obj;
+ QGuestAllocator alloc;
+ i440FX_pcihost bridge;
+ };
+
+ /* i440FX_pcihost */
+
+ static QOSGraphObject *i440FX_host_get_device(void *obj,
+ const char *device)
+ {
+ i440FX_pcihost *host = obj;
+ if (!g_strcmp0(device, "pci-bus-pc")) {
+ return &host->pci.obj;
+ }
+ fprintf(stderr, "%s not present in i440FX-pcihost\n", device);
+ g_assert_not_reached();
+ }
+
+ /* x86_64/pc machine */
+
+ static void *pc_get_driver(void *object, const char *interface)
+ {
+ QX86PCMachine *machine = object;
+ if (!g_strcmp0(interface, "memory")) {
+ return &machine->alloc;
+ }
+
+ fprintf(stderr, "%s not present in x86_64/pc\n", interface);
+ g_assert_not_reached();
+ }
+
+ static QOSGraphObject *pc_get_device(void *obj, const char *device)
+ {
+ QX86PCMachine *machine = obj;
+ if (!g_strcmp0(device, "i440FX-pcihost")) {
+ return &machine->bridge.obj;
+ }
+
+ fprintf(stderr, "%s not present in x86_64/pc\n", device);
+ g_assert_not_reached();
+ }
+
+ static void *qos_create_machine_pc(QTestState *qts)
+ {
+ QX86PCMachine *machine = g_new0(QX86PCMachine, 1);
+
+ /* Get node(s) contained inside (CONTAINS) */
+ machine->obj.get_device = pc_get_device;
+
+ /* Get node(s) produced (PRODUCES) */
+ machine->obj.get_driver = pc_get_driver;
+
+ /* free the object */
+ machine->obj.destructor = pc_destructor;
+ pc_alloc_init(&machine->alloc, qts, ALLOC_NO_FLAGS);
+
+ /* Get node(s) contained inside (CONTAINS) */
+ machine->bridge.obj.get_device = i440FX_host_get_device;
+
+ return &machine->obj;
+ }
+
+ static void pc_machine_register_nodes(void)
+ {
+ /* x86_64/pc --contains--> 1440FX-pcihost --contains-->
+ * pci-bus-pc [--produces--> pci-bus (in pci.h)] */
+ qos_node_create_machine("x86_64/pc", qos_create_machine_pc);
+ qos_node_contains("x86_64/pc", "i440FX-pcihost", NULL);
+
+ /* contained drivers don't need a constructor,
+ * they will be init by the parent */
+ qos_node_create_driver("i440FX-pcihost", NULL);
+ qos_node_contains("i440FX-pcihost", "pci-bus-pc", NULL);
+ }
+
+ libqos_init(pc_machine_register_nodes);
+
+``sdhci`` taken from ``tests/qtest/libqos/sdhci.c``::
+
+ /* Interface node, offers the sdhci API */
+ struct QSDHCI {
+ uint16_t (*readw)(QSDHCI *s, uint32_t reg);
+ uint64_t (*readq)(QSDHCI *s, uint32_t reg);
+ void (*writeq)(QSDHCI *s, uint32_t reg, uint64_t val);
+ /* other fields */
+ };
+
+ /* Memory Mapped implementation of QSDHCI */
+ struct QSDHCI_MemoryMapped {
+ QOSGraphObject obj;
+ QSDHCI sdhci;
+ /* other driver-specific fields */
+ };
+
+ /* PCI implementation of QSDHCI */
+ struct QSDHCI_PCI {
+ QOSGraphObject obj;
+ QSDHCI sdhci;
+ /* other driver-specific fields */
+ };
+
+ /* Memory mapped implementation of QSDHCI */
+
+ static void *sdhci_mm_get_driver(void *obj, const char *interface)
+ {
+ QSDHCI_MemoryMapped *smm = obj;
+ if (!g_strcmp0(interface, "sdhci")) {
+ return &smm->sdhci;
+ }
+ fprintf(stderr, "%s not present in generic-sdhci\n", interface);
+ g_assert_not_reached();
+ }
+
+ void qos_init_sdhci_mm(QSDHCI_MemoryMapped *sdhci, QTestState *qts,
+ uint32_t addr, QSDHCIProperties *common)
+ {
+ /* Get node contained inside (CONTAINS) */
+ sdhci->obj.get_driver = sdhci_mm_get_driver;
+
+ /* SDHCI interface API */
+ sdhci->sdhci.readw = sdhci_mm_readw;
+ sdhci->sdhci.readq = sdhci_mm_readq;
+ sdhci->sdhci.writeq = sdhci_mm_writeq;
+ sdhci->qts = qts;
+ }
+
+ /* PCI implementation of QSDHCI */
+
+ static void *sdhci_pci_get_driver(void *object,
+ const char *interface)
+ {
+ QSDHCI_PCI *spci = object;
+ if (!g_strcmp0(interface, "sdhci")) {
+ return &spci->sdhci;
+ }
+
+ fprintf(stderr, "%s not present in sdhci-pci\n", interface);
+ g_assert_not_reached();
+ }
+
+ static void *sdhci_pci_create(void *pci_bus,
+ QGuestAllocator *alloc,
+ void *addr)
+ {
+ QSDHCI_PCI *spci = g_new0(QSDHCI_PCI, 1);
+ QPCIBus *bus = pci_bus;
+ uint64_t barsize;
+
+ qpci_device_init(&spci->dev, bus, addr);
+
+ /* SDHCI interface API */
+ spci->sdhci.readw = sdhci_pci_readw;
+ spci->sdhci.readq = sdhci_pci_readq;
+ spci->sdhci.writeq = sdhci_pci_writeq;
+
+ /* Get node(s) produced (PRODUCES) */
+ spci->obj.get_driver = sdhci_pci_get_driver;
+
+ spci->obj.start_hw = sdhci_pci_start_hw;
+ spci->obj.destructor = sdhci_destructor;
+ return &spci->obj;
+ }
+
+ static void qsdhci_register_nodes(void)
+ {
+ QOSGraphEdgeOptions opts = {
+ .extra_device_opts = "addr=04.0",
+ };
+
+ /* generic-sdhci */
+ /* generic-sdhci --produces--> sdhci */
+ qos_node_create_driver("generic-sdhci", NULL);
+ qos_node_produces("generic-sdhci", "sdhci");
+
+ /* sdhci-pci */
+ /* sdhci-pci --produces--> sdhci
+ * sdhci-pci --consumes--> pci-bus */
+ qos_node_create_driver("sdhci-pci", sdhci_pci_create);
+ qos_node_produces("sdhci-pci", "sdhci");
+ qos_node_consumes("sdhci-pci", "pci-bus", &opts);
+ }
+
+ libqos_init(qsdhci_register_nodes);
+
+In the above example, all possible types of relations are created::
+
+ x86_64/pc --contains--> 1440FX-pcihost --contains--> pci-bus-pc
+ |
+ sdhci-pci --consumes--> pci-bus <--produces--+
+ |
+ +--produces--+
+ |
+ v
+ sdhci
+ ^
+ |
+ +--produces-- +
+ |
+ arm/raspi2 --contains--> generic-sdhci
+
+or inverting the consumes edge in consumed_by::
+
+ x86_64/pc --contains--> 1440FX-pcihost --contains--> pci-bus-pc
+ |
+ sdhci-pci <--consumed by-- pci-bus <--produces--+
+ |
+ +--produces--+
+ |
+ v
+ sdhci
+ ^
+ |
+ +--produces-- +
+ |
+ arm/raspi2 --contains--> generic-sdhci
+
+Adding a new test
+"""""""""""""""""
+
+Given the above setup, adding a new test is very simple.
+``sdhci-test``, taken from ``tests/qtest/sdhci-test.c``::
+
+ static void check_capab_sdma(QSDHCI *s, bool supported)
+ {
+ uint64_t capab, capab_sdma;
+
+ capab = s->readq(s, SDHC_CAPAB);
+ capab_sdma = FIELD_EX64(capab, SDHC_CAPAB, SDMA);
+ g_assert_cmpuint(capab_sdma, ==, supported);
+ }
+
+ static void test_registers(void *obj, void *data,
+ QGuestAllocator *alloc)
+ {
+ QSDHCI *s = obj;
+
+ /* example test */
+ check_capab_sdma(s, s->props.capab.sdma);
+ }
+
+ static void register_sdhci_test(void)
+ {
+ /* sdhci-test --consumes--> sdhci */
+ qos_add_test("registers", "sdhci", test_registers, NULL);
+ }
+
+ libqos_init(register_sdhci_test);
+
+Here a new test is created, consuming ``sdhci`` interface node
+and creating a valid path from both machines to a test.
+Final graph will be like this::
+
+ x86_64/pc --contains--> 1440FX-pcihost --contains--> pci-bus-pc
+ |
+ sdhci-pci --consumes--> pci-bus <--produces--+
+ |
+ +--produces--+
+ |
+ v
+ sdhci <--consumes-- sdhci-test
+ ^
+ |
+ +--produces-- +
+ |
+ arm/raspi2 --contains--> generic-sdhci
+
+or inverting the consumes edge in consumed_by::
+
+ x86_64/pc --contains--> 1440FX-pcihost --contains--> pci-bus-pc
+ |
+ sdhci-pci <--consumed by-- pci-bus <--produces--+
+ |
+ +--produces--+
+ |
+ v
+ sdhci --consumed by--> sdhci-test
+ ^
+ |
+ +--produces-- +
+ |
+ arm/raspi2 --contains--> generic-sdhci
+
+Assuming there the binary is
+``QTEST_QEMU_BINARY=./qemu-system-x86_64``
+a valid test path will be:
+``/x86_64/pc/1440FX-pcihost/pci-bus-pc/pci-bus/sdhci-pc/sdhci/sdhci-test``
+
+and for the binary ``QTEST_QEMU_BINARY=./qemu-system-arm``:
+
+``/arm/raspi2/generic-sdhci/sdhci/sdhci-test``
+
+Additional examples are also in ``test-qgraph.c``
+
+Command line:
+""""""""""""""
+
+Command line is built by using node names and optional arguments
+passed by the user when building the edges.
+
+There are three types of command line arguments:
+
+- ``in node`` : created from the node name. For example, machines will
+ have ``-M <machine>`` to its command line, while devices
+ ``-device <device>``. It is automatically done by the framework.
+- ``after node`` : added as additional argument to the node name.
+ This argument is added optionally when creating edges,
+ by setting the parameter ``after_cmd_line`` and
+ ``extra_edge_opts`` in ``QOSGraphEdgeOptions``.
+ The framework automatically adds
+ a comma before ``extra_edge_opts``,
+ because it is going to add attributes
+ after the destination node pointed by
+ the edge containing these options, and automatically
+ adds a space before ``after_cmd_line``, because it
+ adds an additional device, not an attribute.
+- ``before node`` : added as additional argument to the node name.
+ This argument is added optionally when creating edges,
+ by setting the parameter ``before_cmd_line`` in
+ ``QOSGraphEdgeOptions``. This attribute
+ is going to add attributes before the destination node
+ pointed by the edge containing these options. It is
+ helpful to commands that are not node-representable,
+ such as ``-fdsev`` or ``-netdev``.
+
+While adding command line in edges is always used, not all nodes names are
+used in every path walk: this is because the contained or produced ones
+are already added by QEMU, so only nodes that "consumes" will be used to
+build the command line. Also, nodes that will have ``{ "abstract" : true }``
+as QMP attribute will loose their command line, since they are not proper
+devices to be added in QEMU.
+
+Example::
+
+ QOSGraphEdgeOptions opts = {
+ .before_cmd_line = "-drive id=drv0,if=none,file=null-co://,"
+ "file.read-zeroes=on,format=raw",
+ .after_cmd_line = "-device scsi-hd,bus=vs0.0,drive=drv0",
+
+ opts.extra_device_opts = "id=vs0";
+ };
+
+ qos_node_create_driver("virtio-scsi-device",
+ virtio_scsi_device_create);
+ qos_node_consumes("virtio-scsi-device", "virtio-bus", &opts);
+
+Will produce the following command line:
+``-drive id=drv0,if=none,file=null-co://, -device virtio-scsi-device,id=vs0 -device scsi-hd,bus=vs0.0,drive=drv0``
+
+Qgraph API reference
+^^^^^^^^^^^^^^^^^^^^
+
+.. kernel-doc:: tests/qtest/libqos/qgraph.h
diff --git a/docs/devel/qtest.rst b/docs/devel/qtest.rst
index 97c5a75626..c3dceb6c8a 100644
--- a/docs/devel/qtest.rst
+++ b/docs/devel/qtest.rst
@@ -2,6 +2,11 @@
QTest Device Emulation Testing Framework
========================================
+.. toctree::
+ :hidden:
+
+ qgraph
+
QTest is a device emulation testing framework. It can be very useful to test
device models; it could also control certain aspects of QEMU (such as virtual
clock stepping), with a special purpose "qtest" protocol. Refer to
@@ -24,6 +29,9 @@ On top of libqtest, a higher level library, ``libqos``, was created to
encapsulate common tasks of device drivers, such as memory management and
communicating with system buses or devices. Many virtual device tests use
libqos instead of directly calling into libqtest.
+Libqos also offers the Qgraph API to increase each test coverage and
+automate QEMU command line arguments and devices setup.
+Refer to :ref:`qgraph` for Qgraph explanation and API.
Steps to add a new QTest case are:
diff --git a/docs/interop/parallels.txt b/docs/interop/parallels.txt
index f15bf35bd1..bb3fadf369 100644
--- a/docs/interop/parallels.txt
+++ b/docs/interop/parallels.txt
@@ -208,21 +208,25 @@ of its data area are:
28 - 31: l1_size
The number of entries in the L1 table of the bitmap.
- variable: l1_table (8 * l1_size bytes)
- L1 offset table (in bytes)
+ variable: L1 offset table (l1_table), size: 8 * l1_size bytes
-A dirty bitmap is stored using a one-level structure for the mapping to host
-clusters - an L1 table.
+The dirty bitmap described by this feature extension is stored in a set of
+clusters inside the Parallels image file. The offsets of these clusters are
+saved in the L1 offset table specified by the feature extension. Each L1 table
+entry is a 64 bit integer as described below:
-Given an offset in bytes into the bitmap data, the offset in bytes into the
-image file can be obtained as follows:
+Given an offset in bytes into the bitmap data, corresponding L1 entry is
- offset = l1_table[offset / cluster_size] + (offset % cluster_size)
+ l1_table[offset / cluster_size]
-If an L1 table entry is 0, the corresponding cluster of the bitmap is assumed
-to be zero.
+If an L1 table entry is 0, all bits in the corresponding cluster of the bitmap
+are assumed to be 0.
-If an L1 table entry is 1, the corresponding cluster of the bitmap is assumed
-to have all bits set.
+If an L1 table entry is 1, all bits in the corresponding cluster of the bitmap
+are assumed to be 1.
-If an L1 table entry is not 0 or 1, it allocates a cluster from the data area.
+If an L1 table entry is not 0 or 1, it contains the corresponding cluster
+offset (in 512b sectors). Given an offset in bytes into the bitmap data the
+offset in bytes into the image file can be obtained as follows:
+
+ offset = l1_table[offset / cluster_size] * 512 + (offset % cluster_size)
diff --git a/docs/system/arm/aspeed.rst b/docs/system/arm/aspeed.rst
index 690bada784..d1fb8f25b3 100644
--- a/docs/system/arm/aspeed.rst
+++ b/docs/system/arm/aspeed.rst
@@ -48,6 +48,7 @@ Supported devices
* UART
* Ethernet controllers
* Front LEDs (PCA9552 on I2C bus)
+ * LPC Peripheral Controller (a subset of subdevices are supported)
Missing devices
@@ -56,7 +57,6 @@ Missing devices
* Coprocessor support
* ADC (out of tree implementation)
* PWM and Fan Controller
- * LPC Bus Controller
* Slave GPIO Controller
* Super I/O Controller
* Hash/Crypto Engine
@@ -72,18 +72,22 @@ Missing devices
Boot options
------------
-The Aspeed machines can be started using the -kernel option to load a
-Linux kernel or from a firmware image which can be downloaded from the
-OpenPOWER jenkins :
+The Aspeed machines can be started using the ``-kernel`` option to
+load a Linux kernel or from a firmware. Images can be downloaded from
+the OpenBMC jenkins :
- https://openpower.xyz/
+ https://jenkins.openbmc.org/job/ci-openbmc/lastSuccessfulBuild/distro=ubuntu,label=docker-builder
+
+or directly from the OpenBMC GitHub release repository :
+
+ https://github.com/openbmc/openbmc/releases
The image should be attached as an MTD drive. Run :
.. code-block:: bash
$ qemu-system-arm -M romulus-bmc -nic user \
- -drive file=flash-romulus,format=raw,if=mtd -nographic
+ -drive file=obmc-phosphor-image-romulus.static.mtd,format=raw,if=mtd -nographic
Options specific to Aspeed machines are :
diff --git a/docs/system/arm/mps2.rst b/docs/system/arm/mps2.rst
index 601ccea15c..f83b151787 100644
--- a/docs/system/arm/mps2.rst
+++ b/docs/system/arm/mps2.rst
@@ -1,5 +1,5 @@
-Arm MPS2 and MPS3 boards (``mps2-an385``, ``mps2-an386``, ``mps2-an500``, ``mps2-an505``, ``mps2-an511``, ``mps2-an521``, ``mps3-an524``)
-=========================================================================================================================================
+Arm MPS2 and MPS3 boards (``mps2-an385``, ``mps2-an386``, ``mps2-an500``, ``mps2-an505``, ``mps2-an511``, ``mps2-an521``, ``mps3-an524``, ``mps3-an547``)
+=========================================================================================================================================================
These board models all use Arm M-profile CPUs.
@@ -27,6 +27,8 @@ QEMU models the following FPGA images:
Dual Cortex-M33 as documented in Arm Application Note AN521
``mps3-an524``
Dual Cortex-M33 on an MPS3, as documented in Arm Application Note AN524
+``mps3-an547``
+ Cortex-M55 on an MPS3, as documented in Arm Application Note AN547
Differences between QEMU and real hardware:
diff --git a/docs/system/deprecated.rst b/docs/system/deprecated.rst
index ecff6bf8c6..5e3a31c123 100644
--- a/docs/system/deprecated.rst
+++ b/docs/system/deprecated.rst
@@ -410,14 +410,6 @@ it out of sheepdog volumes into an alternative storage backend.
linux-user mode CPUs
--------------------
-``tilegx`` CPUs (since 5.1.0)
-'''''''''''''''''''''''''''''
-
-The ``tilegx`` guest CPU support (which was only implemented in
-linux-user mode) is deprecated and will be removed in a future version
-of QEMU. Support for this CPU was removed from the upstream Linux
-kernel in 2018, and has also been dropped from glibc.
-
``ppc64abi32`` CPUs (since 5.2.0)
'''''''''''''''''''''''''''''''''
diff --git a/docs/system/removed-features.rst b/docs/system/removed-features.rst
index c8481cafbd..4dcf4f924c 100644
--- a/docs/system/removed-features.rst
+++ b/docs/system/removed-features.rst
@@ -142,6 +142,20 @@ This machine has been renamed ``fuloong2e``.
These machine types were very old and likely could not be used for live
migration from old QEMU versions anymore. Use a newer machine type instead.
+
+linux-user mode CPUs
+--------------------
+
+``tilegx`` CPUs (removed in 6.0)
+''''''''''''''''''''''''''''''''
+
+The ``tilegx`` guest CPU support has been removed without replacement. It was
+only implemented in linux-user mode, but support for this CPU was removed from
+the upstream Linux kernel in 2018, and it has also been dropped from glibc, so
+there is no new Linux development taking place with this architecture. For
+running the old binaries, you can use older versions of QEMU.
+
+
Related binaries
----------------
diff --git a/docs/tools/qemu-storage-daemon.rst b/docs/tools/qemu-storage-daemon.rst
index c05b3d3811..086493ebb3 100644
--- a/docs/tools/qemu-storage-daemon.rst
+++ b/docs/tools/qemu-storage-daemon.rst
@@ -69,7 +69,7 @@ Standard options:
a description of character device properties. A common character device
definition configures a UNIX domain socket::
- --chardev socket,id=char1,path=/tmp/qmp.sock,server=on,wait=off
+ --chardev socket,id=char1,path=/var/run/qsd-qmp.sock,server=on,wait=off
.. option:: --export [type=]nbd,id=<id>,node-name=<node-name>[,name=<export-name>][,writable=on|off][,bitmap=<name>]
--export [type=]vhost-user-blk,id=<id>,node-name=<node-name>,addr.type=unix,addr.path=<socket-path>[,writable=on|off][,logical-block-size=<block-size>][,num-queues=<num-queues>]
@@ -80,8 +80,9 @@ Standard options:
requests for modifying data (the default is off).
The ``nbd`` export type requires ``--nbd-server`` (see below). ``name`` is
- the NBD export name. ``bitmap`` is the name of a dirty bitmap reachable from
- the block node, so the NBD client can use NBD_OPT_SET_META_CONTEXT with the
+ the NBD export name (if not specified, it defaults to the given
+ ``node-name``). ``bitmap`` is the name of a dirty bitmap reachable from the
+ block node, so the NBD client can use NBD_OPT_SET_META_CONTEXT with the
metadata context name "qemu:dirty-bitmap:BITMAP" to inspect the bitmap.
The ``vhost-user-blk`` export type takes a vhost-user socket address on which
@@ -101,14 +102,17 @@ Standard options:
.. option:: --nbd-server addr.type=inet,addr.host=<host>,addr.port=<port>[,tls-creds=<id>][,tls-authz=<id>][,max-connections=<n>]
--nbd-server addr.type=unix,addr.path=<path>[,tls-creds=<id>][,tls-authz=<id>][,max-connections=<n>]
+ --nbd-server addr.type=fd,addr.str=<fd>[,tls-creds=<id>][,tls-authz=<id>][,max-connections=<n>]
is a server for NBD exports. Both TCP and UNIX domain sockets are supported.
- TLS encryption can be configured using ``--object`` tls-creds-* and authz-*
- secrets (see below).
+ A listen socket can be provided via file descriptor passing (see Examples
+ below). TLS encryption can be configured using ``--object`` tls-creds-* and
+ authz-* secrets (see below).
- To configure an NBD server on UNIX domain socket path ``/tmp/nbd.sock``::
+ To configure an NBD server on UNIX domain socket path
+ ``/var/run/qsd-nbd.sock``::
- --nbd-server addr.type=unix,addr.path=/tmp/nbd.sock
+ --nbd-server addr.type=unix,addr.path=/var/run/qsd-nbd.sock
.. option:: --object help
--object <type>,help
@@ -118,6 +122,20 @@ Standard options:
List object properties with ``<type>,help``. See the :manpage:`qemu(1)`
manual page for a description of the object properties.
+.. option:: --pidfile PATH
+
+ is the path to a file where the daemon writes its pid. This allows scripts to
+ stop the daemon by sending a signal::
+
+ $ kill -SIGTERM $(<path/to/qsd.pid)
+
+ A file lock is applied to the file so only one instance of the daemon can run
+ with a given pid file path. The daemon unlinks its pid file when terminating.
+
+ The pid file is written after chardevs, exports, and NBD servers have been
+ created but before accepting connections. The daemon has started successfully
+ when the pid file is written and clients may begin connecting.
+
Examples
--------
Launch the daemon with QMP monitor socket ``qmp.sock`` so clients can execute
@@ -127,6 +145,42 @@ QMP commands::
--chardev socket,path=qmp.sock,server=on,wait=off,id=char1 \
--monitor chardev=char1
+Launch the daemon from Python with a QMP monitor socket using file descriptor
+passing so there is no need to busy wait for the QMP monitor to become
+available::
+
+ #!/usr/bin/env python3
+ import subprocess
+ import socket
+
+ sock_path = '/var/run/qmp.sock'
+
+ with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as listen_sock:
+ listen_sock.bind(sock_path)
+ listen_sock.listen()
+
+ fd = listen_sock.fileno()
+
+ subprocess.Popen(
+ ['qemu-storage-daemon',
+ '--chardev', f'socket,fd={fd},server=on,id=char1',
+ '--monitor', 'chardev=char1'],
+ pass_fds=[fd],
+ )
+
+ # listen_sock was automatically closed when leaving the 'with' statement
+ # body. If the daemon process terminated early then the following connect()
+ # will fail with "Connection refused" because no process has the listen
+ # socket open anymore. Launch errors can be detected this way.
+
+ qmp_sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ qmp_sock.connect(sock_path)
+ ...QMP interaction...
+
+The same socket spawning approach also works with the ``--nbd-server
+addr.type=fd,addr.str=<fd>`` and ``--export
+type=vhost-user-blk,addr.type=fd,addr.str=<fd>`` options.
+
Export raw image file ``disk.img`` over NBD UNIX domain socket ``nbd.sock``::
$ qemu-storage-daemon \
diff --git a/hw/9pfs/9p.c b/hw/9pfs/9p.c
index 5a6e2c9d3d..134806db52 100644
--- a/hw/9pfs/9p.c
+++ b/hw/9pfs/9p.c
@@ -25,7 +25,6 @@
#include "coth.h"
#include "trace.h"
#include "migration/blocker.h"
-#include "sysemu/qtest.h"
#include "qemu/xxhash.h"
#include <math.h>
#include <linux/limits.h>
diff --git a/hw/adc/npcm7xx_adc.c b/hw/adc/npcm7xx_adc.c
index 870a6d50c2..0f0a9f63e2 100644
--- a/hw/adc/npcm7xx_adc.c
+++ b/hw/adc/npcm7xx_adc.c
@@ -238,7 +238,7 @@ static void npcm7xx_adc_init(Object *obj)
memory_region_init_io(&s->iomem, obj, &npcm7xx_adc_ops, s,
TYPE_NPCM7XX_ADC, 4 * KiB);
sysbus_init_mmio(sbd, &s->iomem);
- s->clock = qdev_init_clock_in(DEVICE(s), "clock", NULL, NULL);
+ s->clock = qdev_init_clock_in(DEVICE(s), "clock", NULL, NULL, 0);
for (i = 0; i < NPCM7XX_ADC_NUM_INPUTS; ++i) {
object_property_add_uint32_ptr(obj, "adci[*]",
diff --git a/hw/arm/Kconfig b/hw/arm/Kconfig
index 4e6f4ffe90..8c37cf00da 100644
--- a/hw/arm/Kconfig
+++ b/hw/arm/Kconfig
@@ -353,6 +353,7 @@ config XLNX_ZYNQMP_ARM
select SSI_M25P80
select XILINX_AXI
select XILINX_SPIPS
+ select XLNX_CSU_DMA
select XLNX_ZYNQMP
select XLNX_ZDMA
@@ -505,6 +506,7 @@ config ARM11MPCORE
config ARMSSE
bool
select ARM_V7M
+ select ARMSSE_CPU_PWRCTRL
select ARMSSE_CPUID
select ARMSSE_MHU
select CMSDK_APB_TIMER
@@ -520,9 +522,5 @@ config ARMSSE
select TZ_MSC
select TZ_PPC
select UNIMP
-
-config ARMSSE_CPUID
- bool
-
-config ARMSSE_MHU
- bool
+ select SSE_COUNTER
+ select SSE_TIMER
diff --git a/hw/arm/armsse.c b/hw/arm/armsse.c
index 26e1a8c95b..e5aeb9e485 100644
--- a/hw/arm/armsse.c
+++ b/hw/arm/armsse.c
@@ -19,29 +19,58 @@
#include "migration/vmstate.h"
#include "hw/registerfields.h"
#include "hw/arm/armsse.h"
+#include "hw/arm/armsse-version.h"
#include "hw/arm/boot.h"
#include "hw/irq.h"
#include "hw/qdev-clock.h"
-/* Format of the System Information block SYS_CONFIG register */
-typedef enum SysConfigFormat {
- IoTKitFormat,
- SSE200Format,
-} SysConfigFormat;
+/*
+ * The SSE-300 puts some devices in different places to the
+ * SSE-200 (and original IoTKit). We use an array of these structs
+ * to define how each variant lays out these devices. (Parts of the
+ * SoC that are the same for all variants aren't handled via these
+ * data structures.)
+ */
+
+#define NO_IRQ -1
+#define NO_PPC -1
+/*
+ * Special values for ARMSSEDeviceInfo::irq to indicate that this
+ * device uses one of the inputs to the OR gate that feeds into the
+ * CPU NMI input.
+ */
+#define NMI_0 10000
+#define NMI_1 10001
+
+typedef struct ARMSSEDeviceInfo {
+ const char *name; /* name to use for the QOM object; NULL terminates list */
+ const char *type; /* QOM type name */
+ unsigned int index; /* Which of the N devices of this type is this ? */
+ hwaddr addr;
+ hwaddr size; /* only needed for TYPE_UNIMPLEMENTED_DEVICE */
+ int ppc; /* Index of APB PPC this device is wired up to, or NO_PPC */
+ int ppc_port; /* Port number of this device on the PPC */
+ int irq; /* NO_IRQ, or 0..NUM_SSE_IRQS-1, or NMI_0 or NMI_1 */
+ bool slowclk; /* true if device uses the slow 32KHz clock */
+} ARMSSEDeviceInfo;
struct ARMSSEInfo {
const char *name;
+ uint32_t sse_version;
int sram_banks;
int num_cpus;
uint32_t sys_version;
+ uint32_t iidr;
uint32_t cpuwait_rst;
- SysConfigFormat sys_config_format;
bool has_mhus;
- bool has_ppus;
bool has_cachectrl;
bool has_cpusecctrl;
bool has_cpuid;
+ bool has_cpu_pwrctrl;
+ bool has_sse_counter;
Property *props;
+ const ARMSSEDeviceInfo *devinfo;
+ const bool *irq_is_common;
};
static Property iotkit_properties[] = {
@@ -68,34 +97,449 @@ static Property armsse_properties[] = {
DEFINE_PROP_END_OF_LIST()
};
+static const ARMSSEDeviceInfo iotkit_devices[] = {
+ {
+ .name = "timer0",
+ .type = TYPE_CMSDK_APB_TIMER,
+ .index = 0,
+ .addr = 0x40000000,
+ .ppc = 0,
+ .ppc_port = 0,
+ .irq = 3,
+ },
+ {
+ .name = "timer1",
+ .type = TYPE_CMSDK_APB_TIMER,
+ .index = 1,
+ .addr = 0x40001000,
+ .ppc = 0,
+ .ppc_port = 1,
+ .irq = 4,
+ },
+ {
+ .name = "s32ktimer",
+ .type = TYPE_CMSDK_APB_TIMER,
+ .index = 2,
+ .addr = 0x4002f000,
+ .ppc = 1,
+ .ppc_port = 0,
+ .irq = 2,
+ .slowclk = true,
+ },
+ {
+ .name = "dualtimer",
+ .type = TYPE_CMSDK_APB_DUALTIMER,
+ .index = 0,
+ .addr = 0x40002000,
+ .ppc = 0,
+ .ppc_port = 2,
+ .irq = 5,
+ },
+ {
+ .name = "s32kwatchdog",
+ .type = TYPE_CMSDK_APB_WATCHDOG,
+ .index = 0,
+ .addr = 0x5002e000,
+ .ppc = NO_PPC,
+ .irq = NMI_0,
+ .slowclk = true,
+ },
+ {
+ .name = "nswatchdog",
+ .type = TYPE_CMSDK_APB_WATCHDOG,
+ .index = 1,
+ .addr = 0x40081000,
+ .ppc = NO_PPC,
+ .irq = 1,
+ },
+ {
+ .name = "swatchdog",
+ .type = TYPE_CMSDK_APB_WATCHDOG,
+ .index = 2,
+ .addr = 0x50081000,
+ .ppc = NO_PPC,
+ .irq = NMI_1,
+ },
+ {
+ .name = "armsse-sysinfo",
+ .type = TYPE_IOTKIT_SYSINFO,
+ .index = 0,
+ .addr = 0x40020000,
+ .ppc = NO_PPC,
+ .irq = NO_IRQ,
+ },
+ {
+ .name = "armsse-sysctl",
+ .type = TYPE_IOTKIT_SYSCTL,
+ .index = 0,
+ .addr = 0x50021000,
+ .ppc = NO_PPC,
+ .irq = NO_IRQ,
+ },
+ {
+ .name = NULL,
+ }
+};
+
+static const ARMSSEDeviceInfo sse200_devices[] = {
+ {
+ .name = "timer0",
+ .type = TYPE_CMSDK_APB_TIMER,
+ .index = 0,
+ .addr = 0x40000000,
+ .ppc = 0,
+ .ppc_port = 0,
+ .irq = 3,
+ },
+ {
+ .name = "timer1",
+ .type = TYPE_CMSDK_APB_TIMER,
+ .index = 1,
+ .addr = 0x40001000,
+ .ppc = 0,
+ .ppc_port = 1,
+ .irq = 4,
+ },
+ {
+ .name = "s32ktimer",
+ .type = TYPE_CMSDK_APB_TIMER,
+ .index = 2,
+ .addr = 0x4002f000,
+ .ppc = 1,
+ .ppc_port = 0,
+ .irq = 2,
+ .slowclk = true,
+ },
+ {
+ .name = "dualtimer",
+ .type = TYPE_CMSDK_APB_DUALTIMER,
+ .index = 0,
+ .addr = 0x40002000,
+ .ppc = 0,
+ .ppc_port = 2,
+ .irq = 5,
+ },
+ {
+ .name = "s32kwatchdog",
+ .type = TYPE_CMSDK_APB_WATCHDOG,
+ .index = 0,
+ .addr = 0x5002e000,
+ .ppc = NO_PPC,
+ .irq = NMI_0,
+ .slowclk = true,
+ },
+ {
+ .name = "nswatchdog",
+ .type = TYPE_CMSDK_APB_WATCHDOG,
+ .index = 1,
+ .addr = 0x40081000,
+ .ppc = NO_PPC,
+ .irq = 1,
+ },
+ {
+ .name = "swatchdog",
+ .type = TYPE_CMSDK_APB_WATCHDOG,
+ .index = 2,
+ .addr = 0x50081000,
+ .ppc = NO_PPC,
+ .irq = NMI_1,
+ },
+ {
+ .name = "armsse-sysinfo",
+ .type = TYPE_IOTKIT_SYSINFO,
+ .index = 0,
+ .addr = 0x40020000,
+ .ppc = NO_PPC,
+ .irq = NO_IRQ,
+ },
+ {
+ .name = "armsse-sysctl",
+ .type = TYPE_IOTKIT_SYSCTL,
+ .index = 0,
+ .addr = 0x50021000,
+ .ppc = NO_PPC,
+ .irq = NO_IRQ,
+ },
+ {
+ .name = "CPU0CORE_PPU",
+ .type = TYPE_UNIMPLEMENTED_DEVICE,
+ .index = 0,
+ .addr = 0x50023000,
+ .size = 0x1000,
+ .ppc = NO_PPC,
+ .irq = NO_IRQ,
+ },
+ {
+ .name = "CPU1CORE_PPU",
+ .type = TYPE_UNIMPLEMENTED_DEVICE,
+ .index = 1,
+ .addr = 0x50025000,
+ .size = 0x1000,
+ .ppc = NO_PPC,
+ .irq = NO_IRQ,
+ },
+ {
+ .name = "DBG_PPU",
+ .type = TYPE_UNIMPLEMENTED_DEVICE,
+ .index = 2,
+ .addr = 0x50029000,
+ .size = 0x1000,
+ .ppc = NO_PPC,
+ .irq = NO_IRQ,
+ },
+ {
+ .name = "RAM0_PPU",
+ .type = TYPE_UNIMPLEMENTED_DEVICE,
+ .index = 3,
+ .addr = 0x5002a000,
+ .size = 0x1000,
+ .ppc = NO_PPC,
+ .irq = NO_IRQ,
+ },
+ {
+ .name = "RAM1_PPU",
+ .type = TYPE_UNIMPLEMENTED_DEVICE,
+ .index = 4,
+ .addr = 0x5002b000,
+ .size = 0x1000,
+ .ppc = NO_PPC,
+ .irq = NO_IRQ,
+ },
+ {
+ .name = "RAM2_PPU",
+ .type = TYPE_UNIMPLEMENTED_DEVICE,
+ .index = 5,
+ .addr = 0x5002c000,
+ .size = 0x1000,
+ .ppc = NO_PPC,
+ .irq = NO_IRQ,
+ },
+ {
+ .name = "RAM3_PPU",
+ .type = TYPE_UNIMPLEMENTED_DEVICE,
+ .index = 6,
+ .addr = 0x5002d000,
+ .size = 0x1000,
+ .ppc = NO_PPC,
+ .irq = NO_IRQ,
+ },
+ {
+ .name = "SYS_PPU",
+ .type = TYPE_UNIMPLEMENTED_DEVICE,
+ .index = 7,
+ .addr = 0x50022000,
+ .size = 0x1000,
+ .ppc = NO_PPC,
+ .irq = NO_IRQ,
+ },
+ {
+ .name = NULL,
+ }
+};
+
+static const ARMSSEDeviceInfo sse300_devices[] = {
+ {
+ .name = "timer0",
+ .type = TYPE_SSE_TIMER,
+ .index = 0,
+ .addr = 0x48000000,
+ .ppc = 0,
+ .ppc_port = 0,
+ .irq = 3,
+ },
+ {
+ .name = "timer1",
+ .type = TYPE_SSE_TIMER,
+ .index = 1,
+ .addr = 0x48001000,
+ .ppc = 0,
+ .ppc_port = 1,
+ .irq = 4,
+ },
+ {
+ .name = "timer2",
+ .type = TYPE_SSE_TIMER,
+ .index = 2,
+ .addr = 0x48002000,
+ .ppc = 0,
+ .ppc_port = 2,
+ .irq = 5,
+ },
+ {
+ .name = "timer3",
+ .type = TYPE_SSE_TIMER,
+ .index = 3,
+ .addr = 0x48003000,
+ .ppc = 0,
+ .ppc_port = 5,
+ .irq = 27,
+ },
+ {
+ .name = "s32ktimer",
+ .type = TYPE_CMSDK_APB_TIMER,
+ .index = 0,
+ .addr = 0x4802f000,
+ .ppc = 1,
+ .ppc_port = 0,
+ .irq = 2,
+ .slowclk = true,
+ },
+ {
+ .name = "s32kwatchdog",
+ .type = TYPE_CMSDK_APB_WATCHDOG,
+ .index = 0,
+ .addr = 0x4802e000,
+ .ppc = NO_PPC,
+ .irq = NMI_0,
+ .slowclk = true,
+ },
+ {
+ .name = "watchdog",
+ .type = TYPE_UNIMPLEMENTED_DEVICE,
+ .index = 0,
+ .addr = 0x48040000,
+ .size = 0x2000,
+ .ppc = NO_PPC,
+ .irq = NO_IRQ,
+ },
+ {
+ .name = "armsse-sysinfo",
+ .type = TYPE_IOTKIT_SYSINFO,
+ .index = 0,
+ .addr = 0x48020000,
+ .ppc = NO_PPC,
+ .irq = NO_IRQ,
+ },
+ {
+ .name = "armsse-sysctl",
+ .type = TYPE_IOTKIT_SYSCTL,
+ .index = 0,
+ .addr = 0x58021000,
+ .ppc = NO_PPC,
+ .irq = NO_IRQ,
+ },
+ {
+ .name = "SYS_PPU",
+ .type = TYPE_UNIMPLEMENTED_DEVICE,
+ .index = 1,
+ .addr = 0x58022000,
+ .size = 0x1000,
+ .ppc = NO_PPC,
+ .irq = NO_IRQ,
+ },
+ {
+ .name = "CPU0CORE_PPU",
+ .type = TYPE_UNIMPLEMENTED_DEVICE,
+ .index = 2,
+ .addr = 0x50023000,
+ .size = 0x1000,
+ .ppc = NO_PPC,
+ .irq = NO_IRQ,
+ },
+ {
+ .name = "MGMT_PPU",
+ .type = TYPE_UNIMPLEMENTED_DEVICE,
+ .index = 3,
+ .addr = 0x50028000,
+ .size = 0x1000,
+ .ppc = NO_PPC,
+ .irq = NO_IRQ,
+ },
+ {
+ .name = "DEBUG_PPU",
+ .type = TYPE_UNIMPLEMENTED_DEVICE,
+ .index = 4,
+ .addr = 0x50029000,
+ .size = 0x1000,
+ .ppc = NO_PPC,
+ .irq = NO_IRQ,
+ },
+ {
+ .name = NULL,
+ }
+};
+
+/* Is internal IRQ n shared between CPUs in a multi-core SSE ? */
+static const bool sse200_irq_is_common[32] = {
+ [0 ... 5] = true,
+ /* 6, 7: per-CPU MHU interrupts */
+ [8 ... 12] = true,
+ /* 13: per-CPU icache interrupt */
+ /* 14: reserved */
+ [15 ... 20] = true,
+ /* 21: reserved */
+ [22 ... 26] = true,
+ /* 27: reserved */
+ /* 28, 29: per-CPU CTI interrupts */
+ /* 30, 31: reserved */
+};
+
+static const bool sse300_irq_is_common[32] = {
+ [0 ... 5] = true,
+ /* 6, 7: per-CPU MHU interrupts */
+ [8 ... 12] = true,
+ /* 13: reserved */
+ [14 ... 16] = true,
+ /* 17-25: reserved */
+ [26 ... 27] = true,
+ /* 28, 29: per-CPU CTI interrupts */
+ /* 30, 31: reserved */
+};
+
static const ARMSSEInfo armsse_variants[] = {
{
.name = TYPE_IOTKIT,
+ .sse_version = ARMSSE_IOTKIT,
.sram_banks = 1,
.num_cpus = 1,
.sys_version = 0x41743,
+ .iidr = 0,
.cpuwait_rst = 0,
- .sys_config_format = IoTKitFormat,
.has_mhus = false,
- .has_ppus = false,
.has_cachectrl = false,
.has_cpusecctrl = false,
.has_cpuid = false,
+ .has_cpu_pwrctrl = false,
+ .has_sse_counter = false,
.props = iotkit_properties,
+ .devinfo = iotkit_devices,
+ .irq_is_common = sse200_irq_is_common,
},
{
.name = TYPE_SSE200,
+ .sse_version = ARMSSE_SSE200,
.sram_banks = 4,
.num_cpus = 2,
.sys_version = 0x22041743,
+ .iidr = 0,
.cpuwait_rst = 2,
- .sys_config_format = SSE200Format,
.has_mhus = true,
- .has_ppus = true,
.has_cachectrl = true,
.has_cpusecctrl = true,
.has_cpuid = true,
+ .has_cpu_pwrctrl = false,
+ .has_sse_counter = false,
+ .props = armsse_properties,
+ .devinfo = sse200_devices,
+ .irq_is_common = sse200_irq_is_common,
+ },
+ {
+ .name = TYPE_SSE300,
+ .sse_version = ARMSSE_SSE300,
+ .sram_banks = 2,
+ .num_cpus = 1,
+ .sys_version = 0x7e00043b,
+ .iidr = 0x74a0043b,
+ .cpuwait_rst = 0,
+ .has_mhus = false,
+ .has_cachectrl = false,
+ .has_cpusecctrl = true,
+ .has_cpuid = true,
+ .has_cpu_pwrctrl = true,
+ .has_sse_counter = true,
.props = armsse_properties,
+ .devinfo = sse300_devices,
+ .irq_is_common = sse300_irq_is_common,
},
};
@@ -104,13 +548,13 @@ static uint32_t armsse_sys_config_value(ARMSSE *s, const ARMSSEInfo *info)
/* Return the SYS_CONFIG value for this SSE */
uint32_t sys_config;
- switch (info->sys_config_format) {
- case IoTKitFormat:
+ switch (info->sse_version) {
+ case ARMSSE_IOTKIT:
sys_config = 0;
sys_config = deposit32(sys_config, 0, 4, info->sram_banks);
sys_config = deposit32(sys_config, 4, 4, s->sram_addr_width - 12);
break;
- case SSE200Format:
+ case ARMSSE_SSE200:
sys_config = 0;
sys_config = deposit32(sys_config, 0, 4, info->sram_banks);
sys_config = deposit32(sys_config, 4, 5, s->sram_addr_width);
@@ -121,6 +565,12 @@ static uint32_t armsse_sys_config_value(ARMSSE *s, const ARMSSEInfo *info)
sys_config = deposit32(sys_config, 28, 4, 2);
}
break;
+ case ARMSSE_SSE300:
+ sys_config = 0;
+ sys_config = deposit32(sys_config, 0, 4, info->sram_banks);
+ sys_config = deposit32(sys_config, 4, 5, s->sram_addr_width);
+ sys_config = deposit32(sys_config, 16, 3, 3); /* CPU0 = Cortex-M55 */
+ break;
default:
g_assert_not_reached();
}
@@ -130,21 +580,6 @@ static uint32_t armsse_sys_config_value(ARMSSE *s, const ARMSSEInfo *info)
/* Clock frequency in HZ of the 32KHz "slow clock" */
#define S32KCLK (32 * 1000)
-/* Is internal IRQ n shared between CPUs in a multi-core SSE ? */
-static bool irq_is_common[32] = {
- [0 ... 5] = true,
- /* 6, 7: per-CPU MHU interrupts */
- [8 ... 12] = true,
- /* 13: per-CPU icache interrupt */
- /* 14: reserved */
- [15 ... 20] = true,
- /* 21: reserved */
- [22 ... 26] = true,
- /* 27: reserved */
- /* 28, 29: per-CPU CTI interrupts */
- /* 30, 31: reserved */
-};
-
/*
* Create an alias region in @container of @size bytes starting at @base
* which mirrors the memory starting at @orig.
@@ -230,9 +665,10 @@ static void armsse_forward_sec_resp_cfg(ARMSSE *s)
qdev_connect_gpio_out(dev_splitter, 2, s->sec_resp_cfg_in);
}
-static void armsse_mainclk_update(void *opaque)
+static void armsse_mainclk_update(void *opaque, ClockEvent event)
{
ARMSSE *s = ARM_SSE(opaque);
+
/*
* Set system_clock_scale from our Clock input; this is what
* controls the tick rate of the CPU SysTick timer.
@@ -245,14 +681,15 @@ static void armsse_init(Object *obj)
ARMSSE *s = ARM_SSE(obj);
ARMSSEClass *asc = ARM_SSE_GET_CLASS(obj);
const ARMSSEInfo *info = asc->info;
+ const ARMSSEDeviceInfo *devinfo;
int i;
assert(info->sram_banks <= MAX_SRAM_BANKS);
assert(info->num_cpus <= SSE_MAX_CPUS);
s->mainclk = qdev_init_clock_in(DEVICE(s), "MAINCLK",
- armsse_mainclk_update, s);
- s->s32kclk = qdev_init_clock_in(DEVICE(s), "S32KCLK", NULL, NULL);
+ armsse_mainclk_update, s, ClockUpdate);
+ s->s32kclk = qdev_init_clock_in(DEVICE(s), "S32KCLK", NULL, NULL, 0);
memory_region_init(&s->container, obj, "armsse-container", UINT64_MAX);
@@ -285,9 +722,52 @@ static void armsse_init(Object *obj)
}
}
+ for (devinfo = info->devinfo; devinfo->name; devinfo++) {
+ assert(devinfo->ppc == NO_PPC || devinfo->ppc < ARRAY_SIZE(s->apb_ppc));
+ if (!strcmp(devinfo->type, TYPE_CMSDK_APB_TIMER)) {
+ assert(devinfo->index < ARRAY_SIZE(s->timer));
+ object_initialize_child(obj, devinfo->name,
+ &s->timer[devinfo->index],
+ TYPE_CMSDK_APB_TIMER);
+ } else if (!strcmp(devinfo->type, TYPE_CMSDK_APB_DUALTIMER)) {
+ assert(devinfo->index == 0);
+ object_initialize_child(obj, devinfo->name, &s->dualtimer,
+ TYPE_CMSDK_APB_DUALTIMER);
+ } else if (!strcmp(devinfo->type, TYPE_SSE_TIMER)) {
+ assert(devinfo->index < ARRAY_SIZE(s->sse_timer));
+ object_initialize_child(obj, devinfo->name,
+ &s->sse_timer[devinfo->index],
+ TYPE_SSE_TIMER);
+ } else if (!strcmp(devinfo->type, TYPE_CMSDK_APB_WATCHDOG)) {
+ assert(devinfo->index < ARRAY_SIZE(s->cmsdk_watchdog));
+ object_initialize_child(obj, devinfo->name,
+ &s->cmsdk_watchdog[devinfo->index],
+ TYPE_CMSDK_APB_WATCHDOG);
+ } else if (!strcmp(devinfo->type, TYPE_IOTKIT_SYSINFO)) {
+ assert(devinfo->index == 0);
+ object_initialize_child(obj, devinfo->name, &s->sysinfo,
+ TYPE_IOTKIT_SYSINFO);
+ } else if (!strcmp(devinfo->type, TYPE_IOTKIT_SYSCTL)) {
+ assert(devinfo->index == 0);
+ object_initialize_child(obj, devinfo->name, &s->sysctl,
+ TYPE_IOTKIT_SYSCTL);
+ } else if (!strcmp(devinfo->type, TYPE_UNIMPLEMENTED_DEVICE)) {
+ assert(devinfo->index < ARRAY_SIZE(s->unimp));
+ object_initialize_child(obj, devinfo->name,
+ &s->unimp[devinfo->index],
+ TYPE_UNIMPLEMENTED_DEVICE);
+ } else {
+ g_assert_not_reached();
+ }
+ }
+
object_initialize_child(obj, "secctl", &s->secctl, TYPE_IOTKIT_SECCTL);
- object_initialize_child(obj, "apb-ppc0", &s->apb_ppc0, TYPE_TZ_PPC);
- object_initialize_child(obj, "apb-ppc1", &s->apb_ppc1, TYPE_TZ_PPC);
+
+ for (i = 0; i < ARRAY_SIZE(s->apb_ppc); i++) {
+ g_autofree char *name = g_strdup_printf("apb-ppc%d", i);
+ object_initialize_child(obj, name, &s->apb_ppc[i], TYPE_TZ_PPC);
+ }
+
for (i = 0; i < info->sram_banks; i++) {
char *name = g_strdup_printf("mpc%d", i);
object_initialize_child(obj, name, &s->mpc[i], TYPE_TZ_MPC);
@@ -303,46 +783,11 @@ static void armsse_init(Object *obj)
object_initialize_child(obj, name, splitter, TYPE_SPLIT_IRQ);
g_free(name);
}
- object_initialize_child(obj, "timer0", &s->timer0, TYPE_CMSDK_APB_TIMER);
- object_initialize_child(obj, "timer1", &s->timer1, TYPE_CMSDK_APB_TIMER);
- object_initialize_child(obj, "s32ktimer", &s->s32ktimer,
- TYPE_CMSDK_APB_TIMER);
- object_initialize_child(obj, "dualtimer", &s->dualtimer,
- TYPE_CMSDK_APB_DUALTIMER);
- object_initialize_child(obj, "s32kwatchdog", &s->s32kwatchdog,
- TYPE_CMSDK_APB_WATCHDOG);
- object_initialize_child(obj, "nswatchdog", &s->nswatchdog,
- TYPE_CMSDK_APB_WATCHDOG);
- object_initialize_child(obj, "swatchdog", &s->swatchdog,
- TYPE_CMSDK_APB_WATCHDOG);
- object_initialize_child(obj, "armsse-sysctl", &s->sysctl,
- TYPE_IOTKIT_SYSCTL);
- object_initialize_child(obj, "armsse-sysinfo", &s->sysinfo,
- TYPE_IOTKIT_SYSINFO);
+
if (info->has_mhus) {
object_initialize_child(obj, "mhu0", &s->mhu[0], TYPE_ARMSSE_MHU);
object_initialize_child(obj, "mhu1", &s->mhu[1], TYPE_ARMSSE_MHU);
}
- if (info->has_ppus) {
- for (i = 0; i < info->num_cpus; i++) {
- char *name = g_strdup_printf("CPU%dCORE_PPU", i);
- int ppuidx = CPU0CORE_PPU + i;
-
- object_initialize_child(obj, name, &s->ppu[ppuidx],
- TYPE_UNIMPLEMENTED_DEVICE);
- g_free(name);
- }
- object_initialize_child(obj, "DBG_PPU", &s->ppu[DBG_PPU],
- TYPE_UNIMPLEMENTED_DEVICE);
- for (i = 0; i < info->sram_banks; i++) {
- char *name = g_strdup_printf("RAM%d_PPU", i);
- int ppuidx = RAM0_PPU + i;
-
- object_initialize_child(obj, name, &s->ppu[ppuidx],
- TYPE_UNIMPLEMENTED_DEVICE);
- g_free(name);
- }
- }
if (info->has_cachectrl) {
for (i = 0; i < info->num_cpus; i++) {
char *name = g_strdup_printf("cachectrl%d", i);
@@ -370,6 +815,20 @@ static void armsse_init(Object *obj)
g_free(name);
}
}
+ if (info->has_cpu_pwrctrl) {
+ for (i = 0; i < info->num_cpus; i++) {
+ char *name = g_strdup_printf("cpu_pwrctrl%d", i);
+
+ object_initialize_child(obj, name, &s->cpu_pwrctrl[i],
+ TYPE_ARMSSE_CPU_PWRCTRL);
+ g_free(name);
+ }
+ }
+ if (info->has_sse_counter) {
+ object_initialize_child(obj, "sse-counter", &s->sse_counter,
+ TYPE_SSE_COUNTER);
+ }
+
object_initialize_child(obj, "nmi-orgate", &s->nmi_orgate, TYPE_OR_IRQ);
object_initialize_child(obj, "ppc-irq-orgate", &s->ppc_irq_orgate,
TYPE_OR_IRQ);
@@ -384,7 +843,7 @@ static void armsse_init(Object *obj)
}
if (info->num_cpus > 1) {
for (i = 0; i < ARRAY_SIZE(s->cpu_irq_splitter); i++) {
- if (irq_is_common[i]) {
+ if (info->irq_is_common[i]) {
char *name = g_strdup_printf("cpu-irq-splitter%d", i);
SplitIRQ *splitter = &s->cpu_irq_splitter[i];
@@ -417,7 +876,7 @@ static qemu_irq armsse_get_common_irq_in(ARMSSE *s, int irqno)
ARMSSEClass *asc = ARM_SSE_GET_CLASS(s);
const ARMSSEInfo *info = asc->info;
- assert(irq_is_common[irqno]);
+ assert(info->irq_is_common[irqno]);
if (info->num_cpus == 1) {
/* Only one CPU -- just connect directly to it */
@@ -428,22 +887,12 @@ static qemu_irq armsse_get_common_irq_in(ARMSSE *s, int irqno)
}
}
-static void map_ppu(ARMSSE *s, int ppuidx, const char *name, hwaddr addr)
-{
- /* Map a PPU unimplemented device stub */
- DeviceState *dev = DEVICE(&s->ppu[ppuidx]);
-
- qdev_prop_set_string(dev, "name", name);
- qdev_prop_set_uint64(dev, "size", 0x1000);
- sysbus_realize(SYS_BUS_DEVICE(dev), &error_fatal);
- sysbus_mmio_map(SYS_BUS_DEVICE(&s->ppu[ppuidx]), 0, addr);
-}
-
static void armsse_realize(DeviceState *dev, Error **errp)
{
ARMSSE *s = ARM_SSE(dev);
ARMSSEClass *asc = ARM_SSE_GET_CLASS(dev);
const ARMSSEInfo *info = asc->info;
+ const ARMSSEDeviceInfo *devinfo;
int i;
MemoryRegion *mr;
Error *err = NULL;
@@ -522,7 +971,7 @@ static void armsse_realize(DeviceState *dev, Error **errp)
int j;
char *gpioname;
- qdev_prop_set_uint32(cpudev, "num-irq", s->exp_numirq + 32);
+ qdev_prop_set_uint32(cpudev, "num-irq", s->exp_numirq + NUM_SSE_IRQS);
/*
* In real hardware the initial Secure VTOR is set from the INITSVTOR*
* registers in the IoT Kit System Control Register block. In QEMU
@@ -593,7 +1042,7 @@ static void armsse_realize(DeviceState *dev, Error **errp)
/* Connect EXP_IRQ/EXP_CPUn_IRQ GPIOs to the NVIC's lines 32 and up */
s->exp_irqs[i] = g_new(qemu_irq, s->exp_numirq);
for (j = 0; j < s->exp_numirq; j++) {
- s->exp_irqs[i][j] = qdev_get_gpio_in(cpudev, j + 32);
+ s->exp_irqs[i][j] = qdev_get_gpio_in(cpudev, j + NUM_SSE_IRQS);
}
if (i == 0) {
gpioname = g_strdup("EXP_IRQ");
@@ -609,7 +1058,7 @@ static void armsse_realize(DeviceState *dev, Error **errp)
/* Wire up the splitters that connect common IRQs to all CPUs */
if (info->num_cpus > 1) {
for (i = 0; i < ARRAY_SIZE(s->cpu_irq_splitter); i++) {
- if (irq_is_common[i]) {
+ if (info->irq_is_common[i]) {
Object *splitter = OBJECT(&s->cpu_irq_splitter[i]);
DeviceState *devs = DEVICE(splitter);
int cpunum;
@@ -649,6 +1098,8 @@ static void armsse_realize(DeviceState *dev, Error **errp)
}
/* Security controller */
+ object_property_set_int(OBJECT(&s->secctl), "sse-version",
+ info->sse_version, &error_abort);
if (!sysbus_realize(SYS_BUS_DEVICE(&s->secctl), errp)) {
return;
}
@@ -715,6 +1166,36 @@ static void armsse_realize(DeviceState *dev, Error **errp)
qdev_connect_gpio_out(DEVICE(&s->mpc_irq_orgate), 0,
armsse_get_common_irq_in(s, 9));
+ /* This OR gate wires together outputs from the secure watchdogs to NMI */
+ if (!object_property_set_int(OBJECT(&s->nmi_orgate), "num-lines", 2,
+ errp)) {
+ return;
+ }
+ if (!qdev_realize(DEVICE(&s->nmi_orgate), NULL, errp)) {
+ return;
+ }
+ qdev_connect_gpio_out(DEVICE(&s->nmi_orgate), 0,
+ qdev_get_gpio_in_named(DEVICE(&s->armv7m), "NMI", 0));
+
+ /* The SSE-300 has a System Counter / System Timestamp Generator */
+ if (info->has_sse_counter) {
+ SysBusDevice *sbd = SYS_BUS_DEVICE(&s->sse_counter);
+
+ qdev_connect_clock_in(DEVICE(sbd), "CLK", s->mainclk);
+ if (!sysbus_realize(sbd, errp)) {
+ return;
+ }
+ /*
+ * The control frame is only in the Secure region;
+ * the status frame is in the NS region (and visible in the
+ * S region via the alias mapping).
+ */
+ memory_region_add_subregion(&s->container, 0x58100000,
+ sysbus_mmio_get_region(sbd, 0));
+ memory_region_add_subregion(&s->container, 0x48101000,
+ sysbus_mmio_get_region(sbd, 1));
+ }
+
/* Devices behind APB PPC0:
* 0x40000000: timer0
* 0x40001000: timer1
@@ -725,35 +1206,127 @@ static void armsse_realize(DeviceState *dev, Error **errp)
* it to the appropriate PPC port; then we can realize the PPC and
* map its upstream ends to the right place in the container.
*/
- qdev_connect_clock_in(DEVICE(&s->timer0), "pclk", s->mainclk);
- if (!sysbus_realize(SYS_BUS_DEVICE(&s->timer0), errp)) {
- return;
- }
- sysbus_connect_irq(SYS_BUS_DEVICE(&s->timer0), 0,
- armsse_get_common_irq_in(s, 3));
- mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->timer0), 0);
- object_property_set_link(OBJECT(&s->apb_ppc0), "port[0]", OBJECT(mr),
- &error_abort);
-
- qdev_connect_clock_in(DEVICE(&s->timer1), "pclk", s->mainclk);
- if (!sysbus_realize(SYS_BUS_DEVICE(&s->timer1), errp)) {
- return;
- }
- sysbus_connect_irq(SYS_BUS_DEVICE(&s->timer1), 0,
- armsse_get_common_irq_in(s, 4));
- mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->timer1), 0);
- object_property_set_link(OBJECT(&s->apb_ppc0), "port[1]", OBJECT(mr),
- &error_abort);
-
- qdev_connect_clock_in(DEVICE(&s->dualtimer), "TIMCLK", s->mainclk);
- if (!sysbus_realize(SYS_BUS_DEVICE(&s->dualtimer), errp)) {
- return;
+ for (devinfo = info->devinfo; devinfo->name; devinfo++) {
+ SysBusDevice *sbd;
+ qemu_irq irq;
+
+ if (!strcmp(devinfo->type, TYPE_CMSDK_APB_TIMER)) {
+ sbd = SYS_BUS_DEVICE(&s->timer[devinfo->index]);
+
+ qdev_connect_clock_in(DEVICE(sbd), "pclk",
+ devinfo->slowclk ? s->s32kclk : s->mainclk);
+ if (!sysbus_realize(sbd, errp)) {
+ return;
+ }
+ mr = sysbus_mmio_get_region(sbd, 0);
+ } else if (!strcmp(devinfo->type, TYPE_CMSDK_APB_DUALTIMER)) {
+ sbd = SYS_BUS_DEVICE(&s->dualtimer);
+
+ qdev_connect_clock_in(DEVICE(sbd), "TIMCLK", s->mainclk);
+ if (!sysbus_realize(sbd, errp)) {
+ return;
+ }
+ mr = sysbus_mmio_get_region(sbd, 0);
+ } else if (!strcmp(devinfo->type, TYPE_SSE_TIMER)) {
+ sbd = SYS_BUS_DEVICE(&s->sse_timer[devinfo->index]);
+
+ assert(info->has_sse_counter);
+ object_property_set_link(OBJECT(sbd), "counter",
+ OBJECT(&s->sse_counter), &error_abort);
+ if (!sysbus_realize(sbd, errp)) {
+ return;
+ }
+ mr = sysbus_mmio_get_region(sbd, 0);
+ } else if (!strcmp(devinfo->type, TYPE_CMSDK_APB_WATCHDOG)) {
+ sbd = SYS_BUS_DEVICE(&s->cmsdk_watchdog[devinfo->index]);
+
+ qdev_connect_clock_in(DEVICE(sbd), "WDOGCLK",
+ devinfo->slowclk ? s->s32kclk : s->mainclk);
+ if (!sysbus_realize(sbd, errp)) {
+ return;
+ }
+ mr = sysbus_mmio_get_region(sbd, 0);
+ } else if (!strcmp(devinfo->type, TYPE_IOTKIT_SYSINFO)) {
+ sbd = SYS_BUS_DEVICE(&s->sysinfo);
+
+ object_property_set_int(OBJECT(&s->sysinfo), "SYS_VERSION",
+ info->sys_version, &error_abort);
+ object_property_set_int(OBJECT(&s->sysinfo), "SYS_CONFIG",
+ armsse_sys_config_value(s, info),
+ &error_abort);
+ object_property_set_int(OBJECT(&s->sysinfo), "sse-version",
+ info->sse_version, &error_abort);
+ object_property_set_int(OBJECT(&s->sysinfo), "IIDR",
+ info->iidr, &error_abort);
+ if (!sysbus_realize(sbd, errp)) {
+ return;
+ }
+ mr = sysbus_mmio_get_region(sbd, 0);
+ } else if (!strcmp(devinfo->type, TYPE_IOTKIT_SYSCTL)) {
+ /* System control registers */
+ sbd = SYS_BUS_DEVICE(&s->sysctl);
+
+ object_property_set_int(OBJECT(&s->sysctl), "sse-version",
+ info->sse_version, &error_abort);
+ object_property_set_int(OBJECT(&s->sysctl), "CPUWAIT_RST",
+ info->cpuwait_rst, &error_abort);
+ object_property_set_int(OBJECT(&s->sysctl), "INITSVTOR0_RST",
+ s->init_svtor, &error_abort);
+ object_property_set_int(OBJECT(&s->sysctl), "INITSVTOR1_RST",
+ s->init_svtor, &error_abort);
+ if (!sysbus_realize(sbd, errp)) {
+ return;
+ }
+ mr = sysbus_mmio_get_region(sbd, 0);
+ } else if (!strcmp(devinfo->type, TYPE_UNIMPLEMENTED_DEVICE)) {
+ sbd = SYS_BUS_DEVICE(&s->unimp[devinfo->index]);
+
+ qdev_prop_set_string(DEVICE(sbd), "name", devinfo->name);
+ qdev_prop_set_uint64(DEVICE(sbd), "size", devinfo->size);
+ if (!sysbus_realize(sbd, errp)) {
+ return;
+ }
+ mr = sysbus_mmio_get_region(sbd, 0);
+ } else {
+ g_assert_not_reached();
+ }
+
+ switch (devinfo->irq) {
+ case NO_IRQ:
+ irq = NULL;
+ break;
+ case 0 ... NUM_SSE_IRQS - 1:
+ irq = armsse_get_common_irq_in(s, devinfo->irq);
+ break;
+ case NMI_0:
+ case NMI_1:
+ irq = qdev_get_gpio_in(DEVICE(&s->nmi_orgate),
+ devinfo->irq - NMI_0);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ if (irq) {
+ sysbus_connect_irq(sbd, 0, irq);
+ }
+
+ /*
+ * Devices connected to a PPC are connected to the port here;
+ * we will map the upstream end of that port to the right address
+ * in the container later after the PPC has been realized.
+ * Devices not connected to a PPC can be mapped immediately.
+ */
+ if (devinfo->ppc != NO_PPC) {
+ TZPPC *ppc = &s->apb_ppc[devinfo->ppc];
+ g_autofree char *portname = g_strdup_printf("port[%d]",
+ devinfo->ppc_port);
+ object_property_set_link(OBJECT(ppc), portname, OBJECT(mr),
+ &error_abort);
+ } else {
+ memory_region_add_subregion(&s->container, devinfo->addr, mr);
+ }
}
- sysbus_connect_irq(SYS_BUS_DEVICE(&s->dualtimer), 0,
- armsse_get_common_irq_in(s, 5));
- mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->dualtimer), 0);
- object_property_set_link(OBJECT(&s->apb_ppc0), "port[2]", OBJECT(mr),
- &error_abort);
if (info->has_mhus) {
/*
@@ -775,7 +1348,7 @@ static void armsse_realize(DeviceState *dev, Error **errp)
}
port = g_strdup_printf("port[%d]", i + 3);
mr = sysbus_mmio_get_region(mhu_sbd, 0);
- object_property_set_link(OBJECT(&s->apb_ppc0), port, OBJECT(mr),
+ object_property_set_link(OBJECT(&s->apb_ppc[0]), port, OBJECT(mr),
&error_abort);
g_free(port);
@@ -795,19 +1368,13 @@ static void armsse_realize(DeviceState *dev, Error **errp)
}
}
- if (!sysbus_realize(SYS_BUS_DEVICE(&s->apb_ppc0), errp)) {
+ if (!sysbus_realize(SYS_BUS_DEVICE(&s->apb_ppc[0]), errp)) {
return;
}
- sbd_apb_ppc0 = SYS_BUS_DEVICE(&s->apb_ppc0);
- dev_apb_ppc0 = DEVICE(&s->apb_ppc0);
+ sbd_apb_ppc0 = SYS_BUS_DEVICE(&s->apb_ppc[0]);
+ dev_apb_ppc0 = DEVICE(&s->apb_ppc[0]);
- mr = sysbus_mmio_get_region(sbd_apb_ppc0, 0);
- memory_region_add_subregion(&s->container, 0x40000000, mr);
- mr = sysbus_mmio_get_region(sbd_apb_ppc0, 1);
- memory_region_add_subregion(&s->container, 0x40001000, mr);
- mr = sysbus_mmio_get_region(sbd_apb_ppc0, 2);
- memory_region_add_subregion(&s->container, 0x40002000, mr);
if (info->has_mhus) {
mr = sysbus_mmio_get_region(sbd_apb_ppc0, 3);
memory_region_add_subregion(&s->container, 0x40003000, mr);
@@ -852,6 +1419,8 @@ static void armsse_realize(DeviceState *dev, Error **errp)
* 0x50010000: L1 icache control registers
* 0x50011000: CPUSECCTRL (CPU local security control registers)
* 0x4001f000 and 0x5001f000: CPU_IDENTITY register block
+ * The SSE-300 has an extra:
+ * 0x40012000 and 0x50012000: CPU_PWRCTRL register block
*/
if (info->has_cachectrl) {
for (i = 0; i < info->num_cpus; i++) {
@@ -898,28 +1467,24 @@ static void armsse_realize(DeviceState *dev, Error **errp)
memory_region_add_subregion(&s->cpu_container[i], 0x4001F000, mr);
}
}
+ if (info->has_cpu_pwrctrl) {
+ for (i = 0; i < info->num_cpus; i++) {
+ MemoryRegion *mr;
- /* 0x40020000 .. 0x4002ffff : ARMSSE system control peripheral region */
- /* Devices behind APB PPC1:
- * 0x4002f000: S32K timer
- */
- qdev_connect_clock_in(DEVICE(&s->s32ktimer), "pclk", s->s32kclk);
- if (!sysbus_realize(SYS_BUS_DEVICE(&s->s32ktimer), errp)) {
- return;
+ if (!sysbus_realize(SYS_BUS_DEVICE(&s->cpu_pwrctrl[i]), errp)) {
+ return;
+ }
+
+ mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->cpu_pwrctrl[i]), 0);
+ memory_region_add_subregion(&s->cpu_container[i], 0x40012000, mr);
+ }
}
- sysbus_connect_irq(SYS_BUS_DEVICE(&s->s32ktimer), 0,
- armsse_get_common_irq_in(s, 2));
- mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->s32ktimer), 0);
- object_property_set_link(OBJECT(&s->apb_ppc1), "port[0]", OBJECT(mr),
- &error_abort);
- if (!sysbus_realize(SYS_BUS_DEVICE(&s->apb_ppc1), errp)) {
+ if (!sysbus_realize(SYS_BUS_DEVICE(&s->apb_ppc[1]), errp)) {
return;
}
- mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->apb_ppc1), 0);
- memory_region_add_subregion(&s->container, 0x4002f000, mr);
- dev_apb_ppc1 = DEVICE(&s->apb_ppc1);
+ dev_apb_ppc1 = DEVICE(&s->apb_ppc[1]);
qdev_connect_gpio_out_named(dev_secctl, "apb_ppc1_nonsec", 0,
qdev_get_gpio_in_named(dev_apb_ppc1,
"cfg_nonsec", 0));
@@ -936,92 +1501,23 @@ static void armsse_realize(DeviceState *dev, Error **errp)
qdev_get_gpio_in_named(dev_apb_ppc1,
"cfg_sec_resp", 0));
- if (!object_property_set_int(OBJECT(&s->sysinfo), "SYS_VERSION",
- info->sys_version, errp)) {
- return;
- }
- if (!object_property_set_int(OBJECT(&s->sysinfo), "SYS_CONFIG",
- armsse_sys_config_value(s, info), errp)) {
- return;
- }
- if (!sysbus_realize(SYS_BUS_DEVICE(&s->sysinfo), errp)) {
- return;
- }
- /* System information registers */
- sysbus_mmio_map(SYS_BUS_DEVICE(&s->sysinfo), 0, 0x40020000);
- /* System control registers */
- object_property_set_int(OBJECT(&s->sysctl), "SYS_VERSION",
- info->sys_version, &error_abort);
- object_property_set_int(OBJECT(&s->sysctl), "CPUWAIT_RST",
- info->cpuwait_rst, &error_abort);
- object_property_set_int(OBJECT(&s->sysctl), "INITSVTOR0_RST",
- s->init_svtor, &error_abort);
- object_property_set_int(OBJECT(&s->sysctl), "INITSVTOR1_RST",
- s->init_svtor, &error_abort);
- if (!sysbus_realize(SYS_BUS_DEVICE(&s->sysctl), errp)) {
- return;
- }
- sysbus_mmio_map(SYS_BUS_DEVICE(&s->sysctl), 0, 0x50021000);
-
- if (info->has_ppus) {
- /* CPUnCORE_PPU for each CPU */
- for (i = 0; i < info->num_cpus; i++) {
- char *name = g_strdup_printf("CPU%dCORE_PPU", i);
-
- map_ppu(s, CPU0CORE_PPU + i, name, 0x50023000 + i * 0x2000);
- /*
- * We don't support CPU debug so don't create the
- * CPU0DEBUG_PPU at 0x50024000 and 0x50026000.
- */
- g_free(name);
- }
- map_ppu(s, DBG_PPU, "DBG_PPU", 0x50029000);
-
- for (i = 0; i < info->sram_banks; i++) {
- char *name = g_strdup_printf("RAM%d_PPU", i);
+ /*
+ * Now both PPCs are realized we can map the upstream ends of
+ * ports which correspond to entries in the devinfo array.
+ * The ports which are connected to non-devinfo devices have
+ * already been mapped.
+ */
+ for (devinfo = info->devinfo; devinfo->name; devinfo++) {
+ SysBusDevice *ppc_sbd;
- map_ppu(s, RAM0_PPU + i, name, 0x5002a000 + i * 0x1000);
- g_free(name);
+ if (devinfo->ppc == NO_PPC) {
+ continue;
}
+ ppc_sbd = SYS_BUS_DEVICE(&s->apb_ppc[devinfo->ppc]);
+ mr = sysbus_mmio_get_region(ppc_sbd, devinfo->ppc_port);
+ memory_region_add_subregion(&s->container, devinfo->addr, mr);
}
- /* This OR gate wires together outputs from the secure watchdogs to NMI */
- if (!object_property_set_int(OBJECT(&s->nmi_orgate), "num-lines", 2,
- errp)) {
- return;
- }
- if (!qdev_realize(DEVICE(&s->nmi_orgate), NULL, errp)) {
- return;
- }
- qdev_connect_gpio_out(DEVICE(&s->nmi_orgate), 0,
- qdev_get_gpio_in_named(DEVICE(&s->armv7m), "NMI", 0));
-
- qdev_connect_clock_in(DEVICE(&s->s32kwatchdog), "WDOGCLK", s->s32kclk);
- if (!sysbus_realize(SYS_BUS_DEVICE(&s->s32kwatchdog), errp)) {
- return;
- }
- sysbus_connect_irq(SYS_BUS_DEVICE(&s->s32kwatchdog), 0,
- qdev_get_gpio_in(DEVICE(&s->nmi_orgate), 0));
- sysbus_mmio_map(SYS_BUS_DEVICE(&s->s32kwatchdog), 0, 0x5002e000);
-
- /* 0x40080000 .. 0x4008ffff : ARMSSE second Base peripheral region */
-
- qdev_connect_clock_in(DEVICE(&s->nswatchdog), "WDOGCLK", s->mainclk);
- if (!sysbus_realize(SYS_BUS_DEVICE(&s->nswatchdog), errp)) {
- return;
- }
- sysbus_connect_irq(SYS_BUS_DEVICE(&s->nswatchdog), 0,
- armsse_get_common_irq_in(s, 1));
- sysbus_mmio_map(SYS_BUS_DEVICE(&s->nswatchdog), 0, 0x40081000);
-
- qdev_connect_clock_in(DEVICE(&s->swatchdog), "WDOGCLK", s->mainclk);
- if (!sysbus_realize(SYS_BUS_DEVICE(&s->swatchdog), errp)) {
- return;
- }
- sysbus_connect_irq(SYS_BUS_DEVICE(&s->swatchdog), 0,
- qdev_get_gpio_in(DEVICE(&s->nmi_orgate), 1));
- sysbus_mmio_map(SYS_BUS_DEVICE(&s->swatchdog), 0, 0x50081000);
-
for (i = 0; i < ARRAY_SIZE(s->ppc_irq_splitter); i++) {
Object *splitter = OBJECT(&s->ppc_irq_splitter[i]);
@@ -1052,7 +1548,7 @@ static void armsse_realize(DeviceState *dev, Error **errp)
DeviceState *devs = DEVICE(&s->ppc_irq_splitter[i]);
char *gpioname = g_strdup_printf("apb_ppc%d_irq_status",
i - NUM_EXTERNAL_PPCS);
- TZPPC *ppc = (i == NUM_EXTERNAL_PPCS) ? &s->apb_ppc0 : &s->apb_ppc1;
+ TZPPC *ppc = &s->apb_ppc[i - NUM_EXTERNAL_PPCS];
qdev_connect_gpio_out(devs, 0,
qdev_get_gpio_in_named(dev_secctl, gpioname, 0));
@@ -1120,7 +1616,7 @@ static void armsse_realize(DeviceState *dev, Error **errp)
sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->container);
/* Set initial system_clock_scale from MAINCLK */
- armsse_mainclk_update(s);
+ armsse_mainclk_update(s, ClockUpdate);
}
static void armsse_idau_check(IDAUInterface *ii, uint32_t address,
diff --git a/hw/arm/armv7m.c b/hw/arm/armv7m.c
index 8224d4ade9..6dd10d8470 100644
--- a/hw/arm/armv7m.c
+++ b/hw/arm/armv7m.c
@@ -16,7 +16,6 @@
#include "hw/loader.h"
#include "hw/qdev-properties.h"
#include "elf.h"
-#include "sysemu/qtest.h"
#include "sysemu/reset.h"
#include "qemu/error-report.h"
#include "qemu/module.h"
diff --git a/hw/arm/aspeed_ast2600.c b/hw/arm/aspeed_ast2600.c
index bf31ca351f..bc87e754a3 100644
--- a/hw/arm/aspeed_ast2600.c
+++ b/hw/arm/aspeed_ast2600.c
@@ -65,7 +65,7 @@ static const hwaddr aspeed_soc_ast2600_memmap[] = {
#define ASPEED_A7MPCORE_ADDR 0x40460000
-#define ASPEED_SOC_AST2600_MAX_IRQ 128
+#define AST2600_MAX_IRQ 197
/* Shared Peripheral Interrupt values below are offset by -32 from datasheet */
static const int aspeed_soc_ast2600_irqmap[] = {
@@ -98,13 +98,13 @@ static const int aspeed_soc_ast2600_irqmap[] = {
[ASPEED_DEV_WDT] = 24,
[ASPEED_DEV_PWM] = 44,
[ASPEED_DEV_LPC] = 35,
- [ASPEED_DEV_IBT] = 35, /* LPC */
+ [ASPEED_DEV_IBT] = 143,
[ASPEED_DEV_I2C] = 110, /* 110 -> 125 */
[ASPEED_DEV_ETH1] = 2,
[ASPEED_DEV_ETH2] = 3,
[ASPEED_DEV_ETH3] = 32,
[ASPEED_DEV_ETH4] = 33,
-
+ [ASPEED_DEV_KCS] = 138, /* 138 -> 142 */
};
static qemu_irq aspeed_soc_get_irq(AspeedSoCState *s, int ctrl)
@@ -211,6 +211,8 @@ static void aspeed_soc_ast2600_init(Object *obj)
object_initialize_child(obj, "emmc-controller.sdhci", &s->emmc.slots[0],
TYPE_SYSBUS_SDHCI);
+
+ object_initialize_child(obj, "lpc", &s->lpc, TYPE_ASPEED_LPC);
}
/*
@@ -241,8 +243,6 @@ static void aspeed_soc_ast2600_realize(DeviceState *dev, Error **errp)
/* CPU */
for (i = 0; i < sc->num_cpus; i++) {
- object_property_set_int(OBJECT(&s->cpu[i]), "psci-conduit",
- QEMU_PSCI_CONDUIT_SMC, &error_abort);
if (sc->num_cpus > 1) {
object_property_set_int(OBJECT(&s->cpu[i]), "reset-cbar",
ASPEED_A7MPCORE_ADDR, &error_abort);
@@ -253,11 +253,6 @@ static void aspeed_soc_ast2600_realize(DeviceState *dev, Error **errp)
object_property_set_int(OBJECT(&s->cpu[i]), "cntfrq", 1125000000,
&error_abort);
- /*
- * TODO: the secondary CPUs are started and a boot helper
- * is needed when using -kernel
- */
-
if (!qdev_realize(DEVICE(&s->cpu[i]), NULL, errp)) {
return;
}
@@ -267,7 +262,7 @@ static void aspeed_soc_ast2600_realize(DeviceState *dev, Error **errp)
object_property_set_int(OBJECT(&s->a7mpcore), "num-cpu", sc->num_cpus,
&error_abort);
object_property_set_int(OBJECT(&s->a7mpcore), "num-irq",
- ASPEED_SOC_AST2600_MAX_IRQ + GIC_INTERNAL,
+ ROUND_UP(AST2600_MAX_IRQ + GIC_INTERNAL, 32),
&error_abort);
sysbus_realize(SYS_BUS_DEVICE(&s->a7mpcore), &error_abort);
@@ -469,6 +464,40 @@ static void aspeed_soc_ast2600_realize(DeviceState *dev, Error **errp)
sysbus_mmio_map(SYS_BUS_DEVICE(&s->emmc), 0, sc->memmap[ASPEED_DEV_EMMC]);
sysbus_connect_irq(SYS_BUS_DEVICE(&s->emmc), 0,
aspeed_soc_get_irq(s, ASPEED_DEV_EMMC));
+
+ /* LPC */
+ if (!sysbus_realize(SYS_BUS_DEVICE(&s->lpc), errp)) {
+ return;
+ }
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->lpc), 0, sc->memmap[ASPEED_DEV_LPC]);
+
+ /* Connect the LPC IRQ to the GIC. It is otherwise unused. */
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->lpc), 0,
+ aspeed_soc_get_irq(s, ASPEED_DEV_LPC));
+
+ /*
+ * On the AST2600 LPC subdevice IRQs are connected straight to the GIC.
+ *
+ * LPC subdevice IRQ sources are offset from 1 because the LPC model caters
+ * to the AST2400 and AST2500. SoCs before the AST2600 have one LPC IRQ
+ * shared across the subdevices, and the shared IRQ output to the VIC is at
+ * offset 0.
+ */
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->lpc), 1 + aspeed_lpc_kcs_1,
+ qdev_get_gpio_in(DEVICE(&s->a7mpcore),
+ sc->irqmap[ASPEED_DEV_KCS] + aspeed_lpc_kcs_1));
+
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->lpc), 1 + aspeed_lpc_kcs_2,
+ qdev_get_gpio_in(DEVICE(&s->a7mpcore),
+ sc->irqmap[ASPEED_DEV_KCS] + aspeed_lpc_kcs_2));
+
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->lpc), 1 + aspeed_lpc_kcs_3,
+ qdev_get_gpio_in(DEVICE(&s->a7mpcore),
+ sc->irqmap[ASPEED_DEV_KCS] + aspeed_lpc_kcs_3));
+
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->lpc), 1 + aspeed_lpc_kcs_4,
+ qdev_get_gpio_in(DEVICE(&s->a7mpcore),
+ sc->irqmap[ASPEED_DEV_KCS] + aspeed_lpc_kcs_4));
}
static void aspeed_soc_ast2600_class_init(ObjectClass *oc, void *data)
diff --git a/hw/arm/aspeed_soc.c b/hw/arm/aspeed_soc.c
index 7eefd54ac0..057d053c84 100644
--- a/hw/arm/aspeed_soc.c
+++ b/hw/arm/aspeed_soc.c
@@ -112,7 +112,6 @@ static const int aspeed_soc_ast2400_irqmap[] = {
[ASPEED_DEV_WDT] = 27,
[ASPEED_DEV_PWM] = 28,
[ASPEED_DEV_LPC] = 8,
- [ASPEED_DEV_IBT] = 8, /* LPC */
[ASPEED_DEV_I2C] = 12,
[ASPEED_DEV_ETH1] = 2,
[ASPEED_DEV_ETH2] = 3,
@@ -211,6 +210,8 @@ static void aspeed_soc_init(Object *obj)
object_initialize_child(obj, "sdhci[*]", &s->sdhci.slots[i],
TYPE_SYSBUS_SDHCI);
}
+
+ object_initialize_child(obj, "lpc", &s->lpc, TYPE_ASPEED_LPC);
}
static void aspeed_soc_realize(DeviceState *dev, Error **errp)
@@ -393,6 +394,37 @@ static void aspeed_soc_realize(DeviceState *dev, Error **errp)
sc->memmap[ASPEED_DEV_SDHCI]);
sysbus_connect_irq(SYS_BUS_DEVICE(&s->sdhci), 0,
aspeed_soc_get_irq(s, ASPEED_DEV_SDHCI));
+
+ /* LPC */
+ if (!sysbus_realize(SYS_BUS_DEVICE(&s->lpc), errp)) {
+ return;
+ }
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->lpc), 0, sc->memmap[ASPEED_DEV_LPC]);
+
+ /* Connect the LPC IRQ to the VIC */
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->lpc), 0,
+ aspeed_soc_get_irq(s, ASPEED_DEV_LPC));
+
+ /*
+ * On the AST2400 and AST2500 the one LPC IRQ is shared between all of the
+ * subdevices. Connect the LPC subdevice IRQs to the LPC controller IRQ (by
+ * contrast, on the AST2600, the subdevice IRQs are connected straight to
+ * the GIC).
+ *
+ * LPC subdevice IRQ sources are offset from 1 because the shared IRQ output
+ * to the VIC is at offset 0.
+ */
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->lpc), 1 + aspeed_lpc_kcs_1,
+ qdev_get_gpio_in(DEVICE(&s->lpc), aspeed_lpc_kcs_1));
+
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->lpc), 1 + aspeed_lpc_kcs_2,
+ qdev_get_gpio_in(DEVICE(&s->lpc), aspeed_lpc_kcs_2));
+
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->lpc), 1 + aspeed_lpc_kcs_3,
+ qdev_get_gpio_in(DEVICE(&s->lpc), aspeed_lpc_kcs_3));
+
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->lpc), 1 + aspeed_lpc_kcs_4,
+ qdev_get_gpio_in(DEVICE(&s->lpc), aspeed_lpc_kcs_4));
}
static Property aspeed_soc_properties[] = {
DEFINE_PROP_LINK("dram", AspeedSoCState, dram_mr, TYPE_MEMORY_REGION,
diff --git a/hw/arm/mainstone.c b/hw/arm/mainstone.c
index 6bc643651b..8454b65458 100644
--- a/hw/arm/mainstone.c
+++ b/hw/arm/mainstone.c
@@ -22,7 +22,6 @@
#include "hw/block/flash.h"
#include "hw/sysbus.h"
#include "exec/address-spaces.h"
-#include "sysemu/qtest.h"
#include "cpu.h"
/* Device addresses */
diff --git a/hw/arm/mps2-tz.c b/hw/arm/mps2-tz.c
index 72da8cb1a1..3fbe3d29f9 100644
--- a/hw/arm/mps2-tz.c
+++ b/hw/arm/mps2-tz.c
@@ -17,6 +17,7 @@
* "mps2-an505" -- Cortex-M33 as documented in ARM Application Note AN505
* "mps2-an521" -- Dual Cortex-M33 as documented in Application Note AN521
* "mps2-an524" -- Dual Cortex-M33 as documented in Application Note AN524
+ * "mps2-an547" -- Single Cortex-M55 as documented in Application Note AN547
*
* Links to the TRM for the board itself and to the various Application
* Notes which document the FPGA images can be found here:
@@ -30,6 +31,8 @@
* https://developer.arm.com/documentation/dai0521/latest/
* Application Note AN524:
* https://developer.arm.com/documentation/dai0524/latest/
+ * Application Note AN547:
+ * https://developer.arm.com/-/media/Arm%20Developer%20Community/PDF/DAI0547B_SSE300_PLUS_U55_FPGA_for_mps3.pdf
*
* The AN505 defers to the Cortex-M33 processor ARMv8M IoT Kit FVP User Guide
* (ARM ECM0601256) for the details of some of the device layout:
@@ -37,6 +40,8 @@
* Similarly, the AN521 and AN524 use the SSE-200, and the SSE-200 TRM defines
* most of the device layout:
* https://developer.arm.com/documentation/101104/latest/
+ * and the AN547 uses the SSE-300, whose layout is in the SSE-300 TRM:
+ * https://developer.arm.com/documentation/101773/latest/
*/
#include "qemu/osdep.h"
@@ -68,13 +73,14 @@
#include "hw/qdev-clock.h"
#include "qom/object.h"
-#define MPS2TZ_NUMIRQ_MAX 95
-#define MPS2TZ_RAM_MAX 4
+#define MPS2TZ_NUMIRQ_MAX 96
+#define MPS2TZ_RAM_MAX 5
typedef enum MPS2TZFPGAType {
FPGA_AN505,
FPGA_AN521,
FPGA_AN524,
+ FPGA_AN547,
} MPS2TZFPGAType;
/*
@@ -106,11 +112,15 @@ struct MPS2TZMachineClass {
MPS2TZFPGAType fpga_type;
uint32_t scc_id;
uint32_t sysclk_frq; /* Main SYSCLK frequency in Hz */
+ uint32_t apb_periph_frq; /* APB peripheral frequency in Hz */
uint32_t len_oscclk;
const uint32_t *oscclk;
uint32_t fpgaio_num_leds; /* Number of LEDs in FPGAIO LED0 register */
bool fpgaio_has_switches; /* Does FPGAIO have SWITCH register? */
+ bool fpgaio_has_dbgctrl; /* Does FPGAIO have DBGCTRL register? */
int numirq; /* Number of external interrupts */
+ int uart_overflow_irq; /* number of the combined UART overflow IRQ */
+ uint32_t init_svtor; /* init-svtor setting for SSE */
const RAMInfo *raminfo;
const char *armsse_type;
};
@@ -149,6 +159,7 @@ struct MPS2TZMachineState {
#define TYPE_MPS2TZ_AN505_MACHINE MACHINE_TYPE_NAME("mps2-an505")
#define TYPE_MPS2TZ_AN521_MACHINE MACHINE_TYPE_NAME("mps2-an521")
#define TYPE_MPS3TZ_AN524_MACHINE MACHINE_TYPE_NAME("mps3-an524")
+#define TYPE_MPS3TZ_AN547_MACHINE MACHINE_TYPE_NAME("mps3-an547")
OBJECT_DECLARE_TYPE(MPS2TZMachineState, MPS2TZMachineClass, MPS2TZ_MACHINE)
@@ -248,6 +259,49 @@ static const RAMInfo an524_raminfo[] = { {
},
};
+static const RAMInfo an547_raminfo[] = { {
+ .name = "itcm",
+ .base = 0x00000000,
+ .size = 512 * KiB,
+ .mpc = -1,
+ .mrindex = 0,
+ }, {
+ .name = "sram",
+ .base = 0x01000000,
+ .size = 2 * MiB,
+ .mpc = 0,
+ .mrindex = 1,
+ }, {
+ .name = "dtcm",
+ .base = 0x20000000,
+ .size = 4 * 128 * KiB,
+ .mpc = -1,
+ .mrindex = 2,
+ }, {
+ .name = "sram 2",
+ .base = 0x21000000,
+ .size = 4 * MiB,
+ .mpc = -1,
+ .mrindex = 3,
+ }, {
+ /* We don't model QSPI flash yet; for now expose it as simple ROM */
+ .name = "QSPI",
+ .base = 0x28000000,
+ .size = 8 * MiB,
+ .mpc = 1,
+ .mrindex = 4,
+ .flags = IS_ROM,
+ }, {
+ .name = "DDR",
+ .base = 0x60000000,
+ .size = MPS3_DDR_SIZE,
+ .mpc = 2,
+ .mrindex = -1,
+ }, {
+ .name = NULL,
+ },
+};
+
static const RAMInfo *find_raminfo_for_mpc(MPS2TZMachineState *mms, int mpc)
{
MPS2TZMachineClass *mmc = MPS2TZ_MACHINE_GET_CLASS(mms);
@@ -377,7 +431,7 @@ static MemoryRegion *make_uart(MPS2TZMachineState *mms, void *opaque,
object_initialize_child(OBJECT(mms), name, uart, TYPE_CMSDK_APB_UART);
qdev_prop_set_chr(DEVICE(uart), "chardev", serial_hd(i));
- qdev_prop_set_uint32(DEVICE(uart), "pclk-frq", mmc->sysclk_frq);
+ qdev_prop_set_uint32(DEVICE(uart), "pclk-frq", mmc->apb_periph_frq);
sysbus_realize(SYS_BUS_DEVICE(uart), &error_fatal);
s = SYS_BUS_DEVICE(uart);
sysbus_connect_irq(s, 0, get_sse_irq_in(mms, irqs[0]));
@@ -421,6 +475,7 @@ static MemoryRegion *make_fpgaio(MPS2TZMachineState *mms, void *opaque,
object_initialize_child(OBJECT(mms), "fpgaio", fpgaio, TYPE_MPS2_FPGAIO);
qdev_prop_set_uint32(DEVICE(fpgaio), "num-leds", mmc->fpgaio_num_leds);
qdev_prop_set_bit(DEVICE(fpgaio), "has-switches", mmc->fpgaio_has_switches);
+ qdev_prop_set_bit(DEVICE(fpgaio), "has-dbgctrl", mmc->fpgaio_has_dbgctrl);
sysbus_realize(SYS_BUS_DEVICE(fpgaio), &error_fatal);
return sysbus_mmio_get_region(SYS_BUS_DEVICE(fpgaio), 0);
}
@@ -696,6 +751,7 @@ static void mps2tz_common_init(MachineState *machine)
object_property_set_link(OBJECT(&mms->iotkit), "memory",
OBJECT(system_memory), &error_abort);
qdev_prop_set_uint32(iotkitdev, "EXP_NUMIRQ", mmc->numirq);
+ qdev_prop_set_uint32(iotkitdev, "init-svtor", mmc->init_svtor);
qdev_connect_clock_in(iotkitdev, "MAINCLK", mms->sysclk);
qdev_connect_clock_in(iotkitdev, "S32KCLK", mms->s32kclk);
sysbus_realize(SYS_BUS_DEVICE(&mms->iotkit), &error_fatal);
@@ -770,7 +826,7 @@ static void mps2tz_common_init(MachineState *machine)
&error_fatal);
qdev_realize(DEVICE(&mms->uart_irq_orgate), NULL, &error_fatal);
qdev_connect_gpio_out(DEVICE(&mms->uart_irq_orgate), 0,
- get_sse_irq_in(mms, 47));
+ get_sse_irq_in(mms, mmc->uart_overflow_irq));
/* Most of the devices in the FPGA are behind Peripheral Protection
* Controllers. The required order for initializing things is:
@@ -887,6 +943,55 @@ static void mps2tz_common_init(MachineState *machine)
},
};
+ const PPCInfo an547_ppcs[] = { {
+ .name = "apb_ppcexp0",
+ .ports = {
+ { "ssram-mpc", make_mpc, &mms->mpc[0], 0x57000000, 0x1000 },
+ { "qspi-mpc", make_mpc, &mms->mpc[1], 0x57001000, 0x1000 },
+ { "ddr-mpc", make_mpc, &mms->mpc[2], 0x57002000, 0x1000 },
+ },
+ }, {
+ .name = "apb_ppcexp1",
+ .ports = {
+ { "i2c0", make_i2c, &mms->i2c[0], 0x49200000, 0x1000 },
+ { "i2c1", make_i2c, &mms->i2c[1], 0x49201000, 0x1000 },
+ { "spi0", make_spi, &mms->spi[0], 0x49202000, 0x1000, { 53 } },
+ { "spi1", make_spi, &mms->spi[1], 0x49203000, 0x1000, { 54 } },
+ { "spi2", make_spi, &mms->spi[2], 0x49204000, 0x1000, { 55 } },
+ { "i2c2", make_i2c, &mms->i2c[2], 0x49205000, 0x1000 },
+ { "i2c3", make_i2c, &mms->i2c[3], 0x49206000, 0x1000 },
+ { /* port 7 reserved */ },
+ { "i2c4", make_i2c, &mms->i2c[4], 0x49208000, 0x1000 },
+ },
+ }, {
+ .name = "apb_ppcexp2",
+ .ports = {
+ { "scc", make_scc, &mms->scc, 0x49300000, 0x1000 },
+ { "i2s-audio", make_unimp_dev, &mms->i2s_audio, 0x49301000, 0x1000 },
+ { "fpgaio", make_fpgaio, &mms->fpgaio, 0x49302000, 0x1000 },
+ { "uart0", make_uart, &mms->uart[0], 0x49303000, 0x1000, { 33, 34, 43 } },
+ { "uart1", make_uart, &mms->uart[1], 0x49304000, 0x1000, { 35, 36, 44 } },
+ { "uart2", make_uart, &mms->uart[2], 0x49305000, 0x1000, { 37, 38, 45 } },
+ { "uart3", make_uart, &mms->uart[3], 0x49306000, 0x1000, { 39, 40, 46 } },
+ { "uart4", make_uart, &mms->uart[4], 0x49307000, 0x1000, { 41, 42, 47 } },
+ { "uart5", make_uart, &mms->uart[5], 0x49308000, 0x1000, { 125, 126, 127 } },
+
+ { /* port 9 reserved */ },
+ { "clcd", make_unimp_dev, &mms->cldc, 0x4930a000, 0x1000 },
+ { "rtc", make_rtc, &mms->rtc, 0x4930b000, 0x1000 },
+ },
+ }, {
+ .name = "ahb_ppcexp0",
+ .ports = {
+ { "gpio0", make_unimp_dev, &mms->gpio[0], 0x41100000, 0x1000 },
+ { "gpio1", make_unimp_dev, &mms->gpio[1], 0x41101000, 0x1000 },
+ { "gpio2", make_unimp_dev, &mms->gpio[2], 0x41102000, 0x1000 },
+ { "gpio3", make_unimp_dev, &mms->gpio[3], 0x41103000, 0x1000 },
+ { "eth-usb", make_eth_usb, NULL, 0x41400000, 0x200000, { 49 } },
+ },
+ },
+ };
+
switch (mmc->fpga_type) {
case FPGA_AN505:
case FPGA_AN521:
@@ -897,6 +1002,10 @@ static void mps2tz_common_init(MachineState *machine)
ppcs = an524_ppcs;
num_ppcs = ARRAY_SIZE(an524_ppcs);
break;
+ case FPGA_AN547:
+ ppcs = an547_ppcs;
+ num_ppcs = ARRAY_SIZE(an547_ppcs);
+ break;
default:
g_assert_not_reached();
}
@@ -975,6 +1084,11 @@ static void mps2tz_common_init(MachineState *machine)
create_unimplemented_device("FPGA NS PC", 0x48007000, 0x1000);
+ if (mmc->fpga_type == FPGA_AN547) {
+ create_unimplemented_device("U55 timing adapter 0", 0x48102000, 0x1000);
+ create_unimplemented_device("U55 timing adapter 1", 0x48103000, 0x1000);
+ }
+
create_non_mpc_ram(mms);
armv7m_load_kernel(ARM_CPU(first_cpu), machine->kernel_filename,
@@ -1041,11 +1155,15 @@ static void mps2tz_an505_class_init(ObjectClass *oc, void *data)
mc->default_cpu_type = ARM_CPU_TYPE_NAME("cortex-m33");
mmc->scc_id = 0x41045050;
mmc->sysclk_frq = 20 * 1000 * 1000; /* 20MHz */
+ mmc->apb_periph_frq = mmc->sysclk_frq;
mmc->oscclk = an505_oscclk;
mmc->len_oscclk = ARRAY_SIZE(an505_oscclk);
mmc->fpgaio_num_leds = 2;
mmc->fpgaio_has_switches = false;
+ mmc->fpgaio_has_dbgctrl = false;
mmc->numirq = 92;
+ mmc->uart_overflow_irq = 47;
+ mmc->init_svtor = 0x10000000;
mmc->raminfo = an505_raminfo;
mmc->armsse_type = TYPE_IOTKIT;
mps2tz_set_default_ram_info(mmc);
@@ -1064,11 +1182,15 @@ static void mps2tz_an521_class_init(ObjectClass *oc, void *data)
mc->default_cpu_type = ARM_CPU_TYPE_NAME("cortex-m33");
mmc->scc_id = 0x41045210;
mmc->sysclk_frq = 20 * 1000 * 1000; /* 20MHz */
+ mmc->apb_periph_frq = mmc->sysclk_frq;
mmc->oscclk = an505_oscclk; /* AN521 is the same as AN505 here */
mmc->len_oscclk = ARRAY_SIZE(an505_oscclk);
mmc->fpgaio_num_leds = 2;
mmc->fpgaio_has_switches = false;
+ mmc->fpgaio_has_dbgctrl = false;
mmc->numirq = 92;
+ mmc->uart_overflow_irq = 47;
+ mmc->init_svtor = 0x10000000;
mmc->raminfo = an505_raminfo; /* AN521 is the same as AN505 here */
mmc->armsse_type = TYPE_SSE200;
mps2tz_set_default_ram_info(mmc);
@@ -1087,16 +1209,47 @@ static void mps3tz_an524_class_init(ObjectClass *oc, void *data)
mc->default_cpu_type = ARM_CPU_TYPE_NAME("cortex-m33");
mmc->scc_id = 0x41045240;
mmc->sysclk_frq = 32 * 1000 * 1000; /* 32MHz */
+ mmc->apb_periph_frq = mmc->sysclk_frq;
mmc->oscclk = an524_oscclk;
mmc->len_oscclk = ARRAY_SIZE(an524_oscclk);
mmc->fpgaio_num_leds = 10;
mmc->fpgaio_has_switches = true;
+ mmc->fpgaio_has_dbgctrl = false;
mmc->numirq = 95;
+ mmc->uart_overflow_irq = 47;
+ mmc->init_svtor = 0x10000000;
mmc->raminfo = an524_raminfo;
mmc->armsse_type = TYPE_SSE200;
mps2tz_set_default_ram_info(mmc);
}
+static void mps3tz_an547_class_init(ObjectClass *oc, void *data)
+{
+ MachineClass *mc = MACHINE_CLASS(oc);
+ MPS2TZMachineClass *mmc = MPS2TZ_MACHINE_CLASS(oc);
+
+ mc->desc = "ARM MPS3 with AN547 FPGA image for Cortex-M55";
+ mc->default_cpus = 1;
+ mc->min_cpus = mc->default_cpus;
+ mc->max_cpus = mc->default_cpus;
+ mmc->fpga_type = FPGA_AN547;
+ mc->default_cpu_type = ARM_CPU_TYPE_NAME("cortex-m55");
+ mmc->scc_id = 0x41055470;
+ mmc->sysclk_frq = 32 * 1000 * 1000; /* 32MHz */
+ mmc->apb_periph_frq = 25 * 1000 * 1000; /* 25MHz */
+ mmc->oscclk = an524_oscclk; /* same as AN524 */
+ mmc->len_oscclk = ARRAY_SIZE(an524_oscclk);
+ mmc->fpgaio_num_leds = 10;
+ mmc->fpgaio_has_switches = true;
+ mmc->fpgaio_has_dbgctrl = true;
+ mmc->numirq = 96;
+ mmc->uart_overflow_irq = 48;
+ mmc->init_svtor = 0x00000000;
+ mmc->raminfo = an547_raminfo;
+ mmc->armsse_type = TYPE_SSE300;
+ mps2tz_set_default_ram_info(mmc);
+}
+
static const TypeInfo mps2tz_info = {
.name = TYPE_MPS2TZ_MACHINE,
.parent = TYPE_MACHINE,
@@ -1128,12 +1281,19 @@ static const TypeInfo mps3tz_an524_info = {
.class_init = mps3tz_an524_class_init,
};
+static const TypeInfo mps3tz_an547_info = {
+ .name = TYPE_MPS3TZ_AN547_MACHINE,
+ .parent = TYPE_MPS2TZ_MACHINE,
+ .class_init = mps3tz_an547_class_init,
+};
+
static void mps2tz_machine_init(void)
{
type_register_static(&mps2tz_info);
type_register_static(&mps2tz_an505_info);
type_register_static(&mps2tz_an521_info);
type_register_static(&mps3tz_an524_info);
+ type_register_static(&mps3tz_an547_info);
}
type_init(mps2tz_machine_init);
diff --git a/hw/arm/xlnx-zcu102.c b/hw/arm/xlnx-zcu102.c
index c9713638c5..a9db25eb99 100644
--- a/hw/arm/xlnx-zcu102.c
+++ b/hw/arm/xlnx-zcu102.c
@@ -22,7 +22,6 @@
#include "hw/boards.h"
#include "qemu/error-report.h"
#include "qemu/log.h"
-#include "sysemu/qtest.h"
#include "sysemu/device_tree.h"
#include "qom/object.h"
#include "net/can_emu.h"
diff --git a/hw/arm/xlnx-zynqmp.c b/hw/arm/xlnx-zynqmp.c
index 46030c1ef8..7f01284a5c 100644
--- a/hw/arm/xlnx-zynqmp.c
+++ b/hw/arm/xlnx-zynqmp.c
@@ -50,6 +50,7 @@
#define QSPI_ADDR 0xff0f0000
#define LQSPI_ADDR 0xc0000000
#define QSPI_IRQ 15
+#define QSPI_DMA_ADDR 0xff0f0800
#define DP_ADDR 0xfd4a0000
#define DP_IRQ 113
@@ -284,6 +285,8 @@ static void xlnx_zynqmp_init(Object *obj)
for (i = 0; i < XLNX_ZYNQMP_NUM_ADMA_CH; i++) {
object_initialize_child(obj, "adma[*]", &s->adma[i], TYPE_XLNX_ZDMA);
}
+
+ object_initialize_child(obj, "qspi-dma", &s->qspi_dma, TYPE_XLNX_CSU_DMA);
}
static void xlnx_zynqmp_realize(DeviceState *dev, Error **errp)
@@ -301,11 +304,13 @@ static void xlnx_zynqmp_realize(DeviceState *dev, Error **errp)
ram_size = memory_region_size(s->ddr_ram);
- /* Create the DDR Memory Regions. User friendly checks should happen at
+ /*
+ * Create the DDR Memory Regions. User friendly checks should happen at
* the board level
*/
if (ram_size > XLNX_ZYNQMP_MAX_LOW_RAM_SIZE) {
- /* The RAM size is above the maximum available for the low DDR.
+ /*
+ * The RAM size is above the maximum available for the low DDR.
* Create the high DDR memory region as well.
*/
assert(ram_size <= XLNX_ZYNQMP_MAX_RAM_SIZE);
@@ -521,7 +526,8 @@ static void xlnx_zynqmp_realize(DeviceState *dev, Error **errp)
SysBusDevice *sbd = SYS_BUS_DEVICE(&s->sdhci[i]);
Object *sdhci = OBJECT(&s->sdhci[i]);
- /* Compatible with:
+ /*
+ * Compatible with:
* - SD Host Controller Specification Version 3.00
* - SDIO Specification Version 3.0
* - eMMC Specification Version 4.51
@@ -635,6 +641,15 @@ static void xlnx_zynqmp_realize(DeviceState *dev, Error **errp)
sysbus_connect_irq(SYS_BUS_DEVICE(&s->adma[i]), 0,
gic_spi[adma_ch_intr[i]]);
}
+
+ if (!sysbus_realize(SYS_BUS_DEVICE(&s->qspi_dma), errp)) {
+ return;
+ }
+
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->qspi_dma), 0, QSPI_DMA_ADDR);
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->qspi_dma), 0, gic_spi[QSPI_IRQ]);
+ object_property_set_link(OBJECT(&s->qspi), "stream-connected-dma",
+ OBJECT(&s->qspi_dma), errp);
}
static Property xlnx_zynqmp_props[] = {
diff --git a/hw/arm/z2.c b/hw/arm/z2.c
index 308c4da956..5099bd8380 100644
--- a/hw/arm/z2.c
+++ b/hw/arm/z2.c
@@ -24,7 +24,6 @@
#include "hw/audio/wm8750.h"
#include "audio/audio.h"
#include "exec/address-spaces.h"
-#include "sysemu/qtest.h"
#include "cpu.h"
#include "qom/object.h"
diff --git a/hw/block/Kconfig b/hw/block/Kconfig
index 2d17f481ad..4fcd152166 100644
--- a/hw/block/Kconfig
+++ b/hw/block/Kconfig
@@ -22,6 +22,9 @@ config ECC
config ONENAND
bool
+config TC58128
+ bool
+
config NVME_PCI
bool
default y if PCI_DEVICES
diff --git a/hw/block/meson.build b/hw/block/meson.build
index 602ca6c854..5492829155 100644
--- a/hw/block/meson.build
+++ b/hw/block/meson.build
@@ -12,8 +12,8 @@ softmmu_ss.add(when: 'CONFIG_PFLASH_CFI02', if_true: files('pflash_cfi02.c'))
softmmu_ss.add(when: 'CONFIG_SSI_M25P80', if_true: files('m25p80.c'))
softmmu_ss.add(when: 'CONFIG_SWIM', if_true: files('swim.c'))
softmmu_ss.add(when: 'CONFIG_XEN', if_true: files('xen-block.c'))
-softmmu_ss.add(when: 'CONFIG_SH4', if_true: files('tc58128.c'))
-softmmu_ss.add(when: 'CONFIG_NVME_PCI', if_true: files('nvme.c', 'nvme-ns.c'))
+softmmu_ss.add(when: 'CONFIG_TC58128', if_true: files('tc58128.c'))
+softmmu_ss.add(when: 'CONFIG_NVME_PCI', if_true: files('nvme.c', 'nvme-ns.c', 'nvme-subsys.c'))
specific_ss.add(when: 'CONFIG_VIRTIO_BLK', if_true: files('virtio-blk.c'))
specific_ss.add(when: 'CONFIG_VHOST_USER_BLK', if_true: files('vhost-user-blk.c'))
diff --git a/hw/block/nvme-ns.c b/hw/block/nvme-ns.c
index 93ac6e107a..eda6a0c003 100644
--- a/hw/block/nvme-ns.c
+++ b/hw/block/nvme-ns.c
@@ -63,6 +63,15 @@ static int nvme_ns_init(NvmeNamespace *ns, Error **errp)
id_ns->npda = id_ns->npdg = npdg - 1;
+ if (nvme_ns_shared(ns)) {
+ id_ns->nmic |= NVME_NMIC_NS_SHARED;
+ }
+
+ /* simple copy */
+ id_ns->mssrl = cpu_to_le16(ns->params.mssrl);
+ id_ns->mcl = cpu_to_le32(ns->params.mcl);
+ id_ns->msrc = ns->params.msrc;
+
return 0;
}
@@ -154,6 +163,18 @@ static int nvme_ns_zoned_check_calc_geometry(NvmeNamespace *ns, Error **errp)
return -1;
}
+ if (ns->params.max_active_zones) {
+ if (ns->params.max_open_zones > ns->params.max_active_zones) {
+ error_setg(errp, "max_open_zones (%u) exceeds max_active_zones (%u)",
+ ns->params.max_open_zones, ns->params.max_active_zones);
+ return -1;
+ }
+
+ if (!ns->params.max_open_zones) {
+ ns->params.max_open_zones = ns->params.max_active_zones;
+ }
+ }
+
if (ns->params.zd_extension_size) {
if (ns->params.zd_extension_size & 0x3f) {
error_setg(errp,
@@ -363,16 +384,27 @@ static void nvme_ns_realize(DeviceState *dev, Error **errp)
return;
}
- if (nvme_register_namespace(n, ns, errp)) {
- return;
+ if (ns->subsys) {
+ if (nvme_subsys_register_ns(ns, errp)) {
+ return;
+ }
+ } else {
+ if (nvme_register_namespace(n, ns, errp)) {
+ return;
+ }
}
-
}
static Property nvme_ns_props[] = {
DEFINE_BLOCK_PROPERTIES(NvmeNamespace, blkconf),
+ DEFINE_PROP_LINK("subsys", NvmeNamespace, subsys, TYPE_NVME_SUBSYS,
+ NvmeSubsystem *),
+ DEFINE_PROP_BOOL("detached", NvmeNamespace, params.detached, false),
DEFINE_PROP_UINT32("nsid", NvmeNamespace, params.nsid, 0),
DEFINE_PROP_UUID("uuid", NvmeNamespace, params.uuid),
+ DEFINE_PROP_UINT16("mssrl", NvmeNamespace, params.mssrl, 128),
+ DEFINE_PROP_UINT32("mcl", NvmeNamespace, params.mcl, 128),
+ DEFINE_PROP_UINT8("msrc", NvmeNamespace, params.msrc, 127),
DEFINE_PROP_BOOL("zoned", NvmeNamespace, params.zoned, false),
DEFINE_PROP_SIZE("zoned.zone_size", NvmeNamespace, params.zone_size_bs,
NVME_DEFAULT_ZONE_SIZE),
diff --git a/hw/block/nvme-ns.h b/hw/block/nvme-ns.h
index 293ac990e3..318d3aebe1 100644
--- a/hw/block/nvme-ns.h
+++ b/hw/block/nvme-ns.h
@@ -26,9 +26,14 @@ typedef struct NvmeZone {
} NvmeZone;
typedef struct NvmeNamespaceParams {
+ bool detached;
uint32_t nsid;
QemuUUID uuid;
+ uint16_t mssrl;
+ uint32_t mcl;
+ uint8_t msrc;
+
bool zoned;
bool cross_zone_read;
uint64_t zone_size_bs;
@@ -47,6 +52,9 @@ typedef struct NvmeNamespace {
const uint32_t *iocs;
uint8_t csi;
+ NvmeSubsystem *subsys;
+ QTAILQ_ENTRY(NvmeNamespace) entry;
+
NvmeIdNsZoned *id_ns_zoned;
NvmeZone *zone_array;
QTAILQ_HEAD(, NvmeZone) exp_open_zones;
@@ -77,6 +85,11 @@ static inline uint32_t nvme_nsid(NvmeNamespace *ns)
return -1;
}
+static inline bool nvme_ns_shared(NvmeNamespace *ns)
+{
+ return !!ns->subsys;
+}
+
static inline NvmeLBAF *nvme_ns_lbaf(NvmeNamespace *ns)
{
NvmeIdNs *id_ns = &ns->id_ns;
diff --git a/hw/block/nvme-subsys.c b/hw/block/nvme-subsys.c
new file mode 100644
index 0000000000..af4804a819
--- /dev/null
+++ b/hw/block/nvme-subsys.c
@@ -0,0 +1,116 @@
+/*
+ * QEMU NVM Express Subsystem: nvme-subsys
+ *
+ * Copyright (c) 2021 Minwoo Im <minwoo.im.dev@gmail.com>
+ *
+ * This code is licensed under the GNU GPL v2. Refer COPYING.
+ */
+
+#include "qemu/units.h"
+#include "qemu/osdep.h"
+#include "qemu/uuid.h"
+#include "qemu/iov.h"
+#include "qemu/cutils.h"
+#include "qapi/error.h"
+#include "hw/qdev-properties.h"
+#include "hw/qdev-core.h"
+#include "hw/block/block.h"
+#include "block/aio.h"
+#include "block/accounting.h"
+#include "sysemu/sysemu.h"
+#include "hw/pci/pci.h"
+#include "nvme.h"
+#include "nvme-subsys.h"
+
+int nvme_subsys_register_ctrl(NvmeCtrl *n, Error **errp)
+{
+ NvmeSubsystem *subsys = n->subsys;
+ int cntlid;
+
+ for (cntlid = 0; cntlid < ARRAY_SIZE(subsys->ctrls); cntlid++) {
+ if (!subsys->ctrls[cntlid]) {
+ break;
+ }
+ }
+
+ if (cntlid == ARRAY_SIZE(subsys->ctrls)) {
+ error_setg(errp, "no more free controller id");
+ return -1;
+ }
+
+ subsys->ctrls[cntlid] = n;
+
+ return cntlid;
+}
+
+int nvme_subsys_register_ns(NvmeNamespace *ns, Error **errp)
+{
+ NvmeSubsystem *subsys = ns->subsys;
+ NvmeCtrl *n;
+ int i;
+
+ if (subsys->namespaces[nvme_nsid(ns)]) {
+ error_setg(errp, "namespace %d already registerd to subsy %s",
+ nvme_nsid(ns), subsys->parent_obj.id);
+ return -1;
+ }
+
+ subsys->namespaces[nvme_nsid(ns)] = ns;
+
+ for (i = 0; i < ARRAY_SIZE(subsys->ctrls); i++) {
+ n = subsys->ctrls[i];
+
+ if (n && nvme_register_namespace(n, ns, errp)) {
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static void nvme_subsys_setup(NvmeSubsystem *subsys)
+{
+ const char *nqn = subsys->params.nqn ?
+ subsys->params.nqn : subsys->parent_obj.id;
+
+ snprintf((char *)subsys->subnqn, sizeof(subsys->subnqn),
+ "nqn.2019-08.org.qemu:%s", nqn);
+}
+
+static void nvme_subsys_realize(DeviceState *dev, Error **errp)
+{
+ NvmeSubsystem *subsys = NVME_SUBSYS(dev);
+
+ nvme_subsys_setup(subsys);
+}
+
+static Property nvme_subsystem_props[] = {
+ DEFINE_PROP_STRING("nqn", NvmeSubsystem, params.nqn),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void nvme_subsys_class_init(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+
+ set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
+
+ dc->realize = nvme_subsys_realize;
+ dc->desc = "Virtual NVMe subsystem";
+
+ device_class_set_props(dc, nvme_subsystem_props);
+}
+
+static const TypeInfo nvme_subsys_info = {
+ .name = TYPE_NVME_SUBSYS,
+ .parent = TYPE_DEVICE,
+ .class_init = nvme_subsys_class_init,
+ .instance_size = sizeof(NvmeSubsystem),
+};
+
+static void nvme_subsys_register_types(void)
+{
+ type_register_static(&nvme_subsys_info);
+}
+
+type_init(nvme_subsys_register_types)
diff --git a/hw/block/nvme-subsys.h b/hw/block/nvme-subsys.h
new file mode 100644
index 0000000000..fb66ae752a
--- /dev/null
+++ b/hw/block/nvme-subsys.h
@@ -0,0 +1,60 @@
+/*
+ * QEMU NVM Express Subsystem: nvme-subsys
+ *
+ * Copyright (c) 2021 Minwoo Im <minwoo.im.dev@gmail.com>
+ *
+ * This code is licensed under the GNU GPL v2. Refer COPYING.
+ */
+
+#ifndef NVME_SUBSYS_H
+#define NVME_SUBSYS_H
+
+#define TYPE_NVME_SUBSYS "nvme-subsys"
+#define NVME_SUBSYS(obj) \
+ OBJECT_CHECK(NvmeSubsystem, (obj), TYPE_NVME_SUBSYS)
+
+#define NVME_SUBSYS_MAX_CTRLS 32
+#define NVME_SUBSYS_MAX_NAMESPACES 256
+
+typedef struct NvmeCtrl NvmeCtrl;
+typedef struct NvmeNamespace NvmeNamespace;
+typedef struct NvmeSubsystem {
+ DeviceState parent_obj;
+ uint8_t subnqn[256];
+
+ NvmeCtrl *ctrls[NVME_SUBSYS_MAX_CTRLS];
+ /* Allocated namespaces for this subsystem */
+ NvmeNamespace *namespaces[NVME_SUBSYS_MAX_NAMESPACES + 1];
+
+ struct {
+ char *nqn;
+ } params;
+} NvmeSubsystem;
+
+int nvme_subsys_register_ctrl(NvmeCtrl *n, Error **errp);
+int nvme_subsys_register_ns(NvmeNamespace *ns, Error **errp);
+
+static inline NvmeCtrl *nvme_subsys_ctrl(NvmeSubsystem *subsys,
+ uint32_t cntlid)
+{
+ if (!subsys) {
+ return NULL;
+ }
+
+ return subsys->ctrls[cntlid];
+}
+
+/*
+ * Return allocated namespace of the specified nsid in the subsystem.
+ */
+static inline NvmeNamespace *nvme_subsys_ns(NvmeSubsystem *subsys,
+ uint32_t nsid)
+{
+ if (!subsys) {
+ return NULL;
+ }
+
+ return subsys->namespaces[nsid];
+}
+
+#endif /* NVME_SUBSYS_H */
diff --git a/hw/block/nvme.c b/hw/block/nvme.c
index fb83636abd..d439e44db8 100644
--- a/hw/block/nvme.c
+++ b/hw/block/nvme.c
@@ -17,14 +17,17 @@
/**
* Usage: add options:
* -drive file=<file>,if=none,id=<drive_id>
+ * -device nvme-subsys,id=<subsys_id>,nqn=<nqn_id>
* -device nvme,serial=<serial>,id=<bus_name>, \
* cmb_size_mb=<cmb_size_mb[optional]>, \
* [pmrdev=<mem_backend_file_id>,] \
* max_ioqpairs=<N[optional]>, \
- * aerl=<N[optional]>, aer_max_queued=<N[optional]>, \
- * mdts=<N[optional]>,zoned.append_size_limit=<N[optional]> \
+ * aerl=<N[optional]>,aer_max_queued=<N[optional]>, \
+ * mdts=<N[optional]>,zoned.zasl=<N[optional]>, \
+ * subsys=<subsys_id>
* -device nvme-ns,drive=<drive_id>,bus=<bus_name>,nsid=<nsid>,\
- * zoned=<true|false[optional]>
+ * zoned=<true|false[optional]>, \
+ * subsys=<subsys_id>,detached=<true|false[optional]>
*
* Note cmb_size_mb denotes size of CMB in MB. CMB is assumed to be at
* offset 0 in BAR2 and supports only WDS, RDS and SQS for now. By default, the
@@ -38,9 +41,27 @@
*
* The PMR will use BAR 4/5 exclusively.
*
+ * To place controller(s) and namespace(s) to a subsystem, then provide
+ * nvme-subsys device as above.
+ *
+ * nvme subsystem device parameters
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * - `nqn`
+ * This parameter provides the `<nqn_id>` part of the string
+ * `nqn.2019-08.org.qemu:<nqn_id>` which will be reported in the SUBNQN field
+ * of subsystem controllers. Note that `<nqn_id>` should be unique per
+ * subsystem, but this is not enforced by QEMU. If not specified, it will
+ * default to the value of the `id` parameter (`<subsys_id>`).
*
* nvme device parameters
* ~~~~~~~~~~~~~~~~~~~~~~
+ * - `subsys`
+ * Specifying this parameter attaches the controller to the subsystem and
+ * the SUBNQN field in the controller will report the NQN of the subsystem
+ * device. This also enables multi controller capability represented in
+ * Identify Controller data structure in CMIC (Controller Multi-path I/O and
+ * Namesapce Sharing Capabilities).
+ *
* - `aerl`
* The Asynchronous Event Request Limit (AERL). Indicates the maximum number
* of concurrently outstanding Asynchronous Event Request commands support
@@ -51,13 +72,31 @@
* completion when there are no outstanding AERs. When the maximum number of
* enqueued events are reached, subsequent events will be dropped.
*
- * - `zoned.append_size_limit`
- * The maximum I/O size in bytes that is allowed in Zone Append command.
- * The default is 128KiB. Since internally this this value is maintained as
- * ZASL = log2(<maximum append size> / <page size>), some values assigned
- * to this property may be rounded down and result in a lower maximum ZA
- * data size being in effect. By setting this property to 0, users can make
- * ZASL to be equal to MDTS. This property only affects zoned namespaces.
+ * - `mdts`
+ * Indicates the maximum data transfer size for a command that transfers data
+ * between host-accessible memory and the controller. The value is specified
+ * as a power of two (2^n) and is in units of the minimum memory page size
+ * (CAP.MPSMIN). The default value is 7 (i.e. 512 KiB).
+ *
+ * - `zoned.zasl`
+ * Indicates the maximum data transfer size for the Zone Append command. Like
+ * `mdts`, the value is specified as a power of two (2^n) and is in units of
+ * the minimum memory page size (CAP.MPSMIN). The default value is 0 (i.e.
+ * defaulting to the value of `mdts`).
+ *
+ * nvme namespace device parameters
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * - `subsys`
+ * If given, the namespace will be attached to all controllers in the
+ * subsystem. Otherwise, `bus` must be given to attach this namespace to a
+ * specific controller as a non-shared namespace.
+ *
+ * - `detached`
+ * This parameter is only valid together with the `subsys` parameter. If left
+ * at the default value (`false/off`), the namespace will be attached to all
+ * controllers in the NVMe subsystem at boot-up. If set to `true/on`, the
+ * namespace will be be available in the subsystem not not attached to any
+ * controllers.
*
* Setting `zoned` to true selects Zoned Command Set at the namespace.
* In this case, the following namespace properties are available to configure
@@ -157,6 +196,7 @@ static const uint32_t nvme_cse_acs[256] = {
[NVME_ADM_CMD_SET_FEATURES] = NVME_CMD_EFF_CSUPP,
[NVME_ADM_CMD_GET_FEATURES] = NVME_CMD_EFF_CSUPP,
[NVME_ADM_CMD_ASYNC_EV_REQ] = NVME_CMD_EFF_CSUPP,
+ [NVME_ADM_CMD_NS_ATTACHMENT] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_NIC,
};
static const uint32_t nvme_cse_iocs_none[256];
@@ -167,6 +207,7 @@ static const uint32_t nvme_cse_iocs_nvm[256] = {
[NVME_CMD_WRITE] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC,
[NVME_CMD_READ] = NVME_CMD_EFF_CSUPP,
[NVME_CMD_DSM] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC,
+ [NVME_CMD_COPY] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC,
[NVME_CMD_COMPARE] = NVME_CMD_EFF_CSUPP,
};
@@ -176,6 +217,7 @@ static const uint32_t nvme_cse_iocs_zoned[256] = {
[NVME_CMD_WRITE] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC,
[NVME_CMD_READ] = NVME_CMD_EFF_CSUPP,
[NVME_CMD_DSM] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC,
+ [NVME_CMD_COPY] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC,
[NVME_CMD_COMPARE] = NVME_CMD_EFF_CSUPP,
[NVME_CMD_ZONE_APPEND] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC,
[NVME_CMD_ZONE_MGMT_SEND] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC,
@@ -407,15 +449,31 @@ static void nvme_req_clear(NvmeRequest *req)
req->status = NVME_SUCCESS;
}
-static void nvme_req_exit(NvmeRequest *req)
+static inline void nvme_sg_init(NvmeCtrl *n, NvmeSg *sg, bool dma)
{
- if (req->qsg.sg) {
- qemu_sglist_destroy(&req->qsg);
+ if (dma) {
+ pci_dma_sglist_init(&sg->qsg, &n->parent_obj, 0);
+ sg->flags = NVME_SG_DMA;
+ } else {
+ qemu_iovec_init(&sg->iov, 0);
}
- if (req->iov.iov) {
- qemu_iovec_destroy(&req->iov);
+ sg->flags |= NVME_SG_ALLOC;
+}
+
+static inline void nvme_sg_unmap(NvmeSg *sg)
+{
+ if (!(sg->flags & NVME_SG_ALLOC)) {
+ return;
}
+
+ if (sg->flags & NVME_SG_DMA) {
+ qemu_sglist_destroy(&sg->qsg);
+ } else {
+ qemu_iovec_destroy(&sg->iov);
+ }
+
+ memset(sg, 0x0, sizeof(*sg));
}
static uint16_t nvme_map_addr_cmb(NvmeCtrl *n, QEMUIOVector *iov, hwaddr addr,
@@ -452,8 +510,7 @@ static uint16_t nvme_map_addr_pmr(NvmeCtrl *n, QEMUIOVector *iov, hwaddr addr,
return NVME_SUCCESS;
}
-static uint16_t nvme_map_addr(NvmeCtrl *n, QEMUSGList *qsg, QEMUIOVector *iov,
- hwaddr addr, size_t len)
+static uint16_t nvme_map_addr(NvmeCtrl *n, NvmeSg *sg, hwaddr addr, size_t len)
{
bool cmb = false, pmr = false;
@@ -470,40 +527,33 @@ static uint16_t nvme_map_addr(NvmeCtrl *n, QEMUSGList *qsg, QEMUIOVector *iov,
}
if (cmb || pmr) {
- if (qsg && qsg->sg) {
+ if (sg->flags & NVME_SG_DMA) {
return NVME_INVALID_USE_OF_CMB | NVME_DNR;
}
- assert(iov);
-
- if (!iov->iov) {
- qemu_iovec_init(iov, 1);
- }
-
if (cmb) {
- return nvme_map_addr_cmb(n, iov, addr, len);
+ return nvme_map_addr_cmb(n, &sg->iov, addr, len);
} else {
- return nvme_map_addr_pmr(n, iov, addr, len);
+ return nvme_map_addr_pmr(n, &sg->iov, addr, len);
}
}
- if (iov && iov->iov) {
+ if (!(sg->flags & NVME_SG_DMA)) {
return NVME_INVALID_USE_OF_CMB | NVME_DNR;
}
- assert(qsg);
-
- if (!qsg->sg) {
- pci_dma_sglist_init(qsg, &n->parent_obj, 1);
- }
-
- qemu_sglist_add(qsg, addr, len);
+ qemu_sglist_add(&sg->qsg, addr, len);
return NVME_SUCCESS;
}
-static uint16_t nvme_map_prp(NvmeCtrl *n, uint64_t prp1, uint64_t prp2,
- uint32_t len, NvmeRequest *req)
+static inline bool nvme_addr_is_dma(NvmeCtrl *n, hwaddr addr)
+{
+ return !(nvme_addr_is_cmb(n, addr) || nvme_addr_is_pmr(n, addr));
+}
+
+static uint16_t nvme_map_prp(NvmeCtrl *n, NvmeSg *sg, uint64_t prp1,
+ uint64_t prp2, uint32_t len)
{
hwaddr trans_len = n->page_size - (prp1 % n->page_size);
trans_len = MIN(len, trans_len);
@@ -511,20 +561,13 @@ static uint16_t nvme_map_prp(NvmeCtrl *n, uint64_t prp1, uint64_t prp2,
uint16_t status;
int ret;
- QEMUSGList *qsg = &req->qsg;
- QEMUIOVector *iov = &req->iov;
-
trace_pci_nvme_map_prp(trans_len, len, prp1, prp2, num_prps);
- if (nvme_addr_is_cmb(n, prp1) || (nvme_addr_is_pmr(n, prp1))) {
- qemu_iovec_init(iov, num_prps);
- } else {
- pci_dma_sglist_init(qsg, &n->parent_obj, num_prps);
- }
+ nvme_sg_init(n, sg, nvme_addr_is_dma(n, prp1));
- status = nvme_map_addr(n, qsg, iov, prp1, trans_len);
+ status = nvme_map_addr(n, sg, prp1, trans_len);
if (status) {
- return status;
+ goto unmap;
}
len -= trans_len;
@@ -539,7 +582,8 @@ static uint16_t nvme_map_prp(NvmeCtrl *n, uint64_t prp1, uint64_t prp2,
ret = nvme_addr_read(n, prp2, (void *)prp_list, prp_trans);
if (ret) {
trace_pci_nvme_err_addr_read(prp2);
- return NVME_DATA_TRAS_ERROR;
+ status = NVME_DATA_TRAS_ERROR;
+ goto unmap;
}
while (len != 0) {
uint64_t prp_ent = le64_to_cpu(prp_list[i]);
@@ -547,7 +591,8 @@ static uint16_t nvme_map_prp(NvmeCtrl *n, uint64_t prp1, uint64_t prp2,
if (i == n->max_prp_ents - 1 && len > n->page_size) {
if (unlikely(prp_ent & (n->page_size - 1))) {
trace_pci_nvme_err_invalid_prplist_ent(prp_ent);
- return NVME_INVALID_PRP_OFFSET | NVME_DNR;
+ status = NVME_INVALID_PRP_OFFSET | NVME_DNR;
+ goto unmap;
}
i = 0;
@@ -557,20 +602,22 @@ static uint16_t nvme_map_prp(NvmeCtrl *n, uint64_t prp1, uint64_t prp2,
prp_trans);
if (ret) {
trace_pci_nvme_err_addr_read(prp_ent);
- return NVME_DATA_TRAS_ERROR;
+ status = NVME_DATA_TRAS_ERROR;
+ goto unmap;
}
prp_ent = le64_to_cpu(prp_list[i]);
}
if (unlikely(prp_ent & (n->page_size - 1))) {
trace_pci_nvme_err_invalid_prplist_ent(prp_ent);
- return NVME_INVALID_PRP_OFFSET | NVME_DNR;
+ status = NVME_INVALID_PRP_OFFSET | NVME_DNR;
+ goto unmap;
}
trans_len = MIN(len, n->page_size);
- status = nvme_map_addr(n, qsg, iov, prp_ent, trans_len);
+ status = nvme_map_addr(n, sg, prp_ent, trans_len);
if (status) {
- return status;
+ goto unmap;
}
len -= trans_len;
@@ -579,26 +626,30 @@ static uint16_t nvme_map_prp(NvmeCtrl *n, uint64_t prp1, uint64_t prp2,
} else {
if (unlikely(prp2 & (n->page_size - 1))) {
trace_pci_nvme_err_invalid_prp2_align(prp2);
- return NVME_INVALID_PRP_OFFSET | NVME_DNR;
+ status = NVME_INVALID_PRP_OFFSET | NVME_DNR;
+ goto unmap;
}
- status = nvme_map_addr(n, qsg, iov, prp2, len);
+ status = nvme_map_addr(n, sg, prp2, len);
if (status) {
- return status;
+ goto unmap;
}
}
}
return NVME_SUCCESS;
+
+unmap:
+ nvme_sg_unmap(sg);
+ return status;
}
/*
* Map 'nsgld' data descriptors from 'segment'. The function will subtract the
* number of bytes mapped in len.
*/
-static uint16_t nvme_map_sgl_data(NvmeCtrl *n, QEMUSGList *qsg,
- QEMUIOVector *iov,
+static uint16_t nvme_map_sgl_data(NvmeCtrl *n, NvmeSg *sg,
NvmeSglDescriptor *segment, uint64_t nsgld,
- size_t *len, NvmeRequest *req)
+ size_t *len, NvmeCmd *cmd)
{
dma_addr_t addr, trans_len;
uint32_t dlen;
@@ -609,7 +660,7 @@ static uint16_t nvme_map_sgl_data(NvmeCtrl *n, QEMUSGList *qsg,
switch (type) {
case NVME_SGL_DESCR_TYPE_BIT_BUCKET:
- if (req->cmd.opcode == NVME_CMD_WRITE) {
+ if (cmd->opcode == NVME_CMD_WRITE) {
continue;
}
case NVME_SGL_DESCR_TYPE_DATA_BLOCK:
@@ -638,7 +689,7 @@ static uint16_t nvme_map_sgl_data(NvmeCtrl *n, QEMUSGList *qsg,
break;
}
- trace_pci_nvme_err_invalid_sgl_excess_length(nvme_cid(req));
+ trace_pci_nvme_err_invalid_sgl_excess_length(dlen);
return NVME_DATA_SGL_LEN_INVALID | NVME_DNR;
}
@@ -654,7 +705,7 @@ static uint16_t nvme_map_sgl_data(NvmeCtrl *n, QEMUSGList *qsg,
return NVME_DATA_SGL_LEN_INVALID | NVME_DNR;
}
- status = nvme_map_addr(n, qsg, iov, addr, trans_len);
+ status = nvme_map_addr(n, sg, addr, trans_len);
if (status) {
return status;
}
@@ -666,9 +717,8 @@ next:
return NVME_SUCCESS;
}
-static uint16_t nvme_map_sgl(NvmeCtrl *n, QEMUSGList *qsg, QEMUIOVector *iov,
- NvmeSglDescriptor sgl, size_t len,
- NvmeRequest *req)
+static uint16_t nvme_map_sgl(NvmeCtrl *n, NvmeSg *sg, NvmeSglDescriptor sgl,
+ size_t len, NvmeCmd *cmd)
{
/*
* Read the segment in chunks of 256 descriptors (one 4k page) to avoid
@@ -689,14 +739,16 @@ static uint16_t nvme_map_sgl(NvmeCtrl *n, QEMUSGList *qsg, QEMUIOVector *iov,
sgld = &sgl;
addr = le64_to_cpu(sgl.addr);
- trace_pci_nvme_map_sgl(nvme_cid(req), NVME_SGL_TYPE(sgl.type), len);
+ trace_pci_nvme_map_sgl(NVME_SGL_TYPE(sgl.type), len);
+
+ nvme_sg_init(n, sg, nvme_addr_is_dma(n, addr));
/*
* If the entire transfer can be described with a single data block it can
* be mapped directly.
*/
if (NVME_SGL_TYPE(sgl.type) == NVME_SGL_DESCR_TYPE_DATA_BLOCK) {
- status = nvme_map_sgl_data(n, qsg, iov, sgld, 1, &len, req);
+ status = nvme_map_sgl_data(n, sg, sgld, 1, &len, cmd);
if (status) {
goto unmap;
}
@@ -734,8 +786,8 @@ static uint16_t nvme_map_sgl(NvmeCtrl *n, QEMUSGList *qsg, QEMUIOVector *iov,
goto unmap;
}
- status = nvme_map_sgl_data(n, qsg, iov, segment, SEG_CHUNK_SIZE,
- &len, req);
+ status = nvme_map_sgl_data(n, sg, segment, SEG_CHUNK_SIZE,
+ &len, cmd);
if (status) {
goto unmap;
}
@@ -761,7 +813,7 @@ static uint16_t nvme_map_sgl(NvmeCtrl *n, QEMUSGList *qsg, QEMUIOVector *iov,
switch (NVME_SGL_TYPE(last_sgld->type)) {
case NVME_SGL_DESCR_TYPE_DATA_BLOCK:
case NVME_SGL_DESCR_TYPE_BIT_BUCKET:
- status = nvme_map_sgl_data(n, qsg, iov, segment, nsgld, &len, req);
+ status = nvme_map_sgl_data(n, sg, segment, nsgld, &len, cmd);
if (status) {
goto unmap;
}
@@ -788,7 +840,7 @@ static uint16_t nvme_map_sgl(NvmeCtrl *n, QEMUSGList *qsg, QEMUIOVector *iov,
* Do not map the last descriptor; it will be a Segment or Last Segment
* descriptor and is handled by the next iteration.
*/
- status = nvme_map_sgl_data(n, qsg, iov, segment, nsgld - 1, &len, req);
+ status = nvme_map_sgl_data(n, sg, segment, nsgld - 1, &len, cmd);
if (status) {
goto unmap;
}
@@ -804,83 +856,120 @@ out:
return NVME_SUCCESS;
unmap:
- if (iov->iov) {
- qemu_iovec_destroy(iov);
- }
-
- if (qsg->sg) {
- qemu_sglist_destroy(qsg);
- }
-
+ nvme_sg_unmap(sg);
return status;
}
-static uint16_t nvme_map_dptr(NvmeCtrl *n, size_t len, NvmeRequest *req)
+static uint16_t nvme_map_dptr(NvmeCtrl *n, NvmeSg *sg, size_t len,
+ NvmeCmd *cmd)
{
uint64_t prp1, prp2;
- switch (NVME_CMD_FLAGS_PSDT(req->cmd.flags)) {
+ switch (NVME_CMD_FLAGS_PSDT(cmd->flags)) {
case NVME_PSDT_PRP:
- prp1 = le64_to_cpu(req->cmd.dptr.prp1);
- prp2 = le64_to_cpu(req->cmd.dptr.prp2);
+ prp1 = le64_to_cpu(cmd->dptr.prp1);
+ prp2 = le64_to_cpu(cmd->dptr.prp2);
- return nvme_map_prp(n, prp1, prp2, len, req);
+ return nvme_map_prp(n, sg, prp1, prp2, len);
case NVME_PSDT_SGL_MPTR_CONTIGUOUS:
case NVME_PSDT_SGL_MPTR_SGL:
- /* SGLs shall not be used for Admin commands in NVMe over PCIe */
- if (!req->sq->sqid) {
- return NVME_INVALID_FIELD | NVME_DNR;
- }
-
- return nvme_map_sgl(n, &req->qsg, &req->iov, req->cmd.dptr.sgl, len,
- req);
+ return nvme_map_sgl(n, sg, cmd->dptr.sgl, len, cmd);
default:
return NVME_INVALID_FIELD;
}
}
-static uint16_t nvme_dma(NvmeCtrl *n, uint8_t *ptr, uint32_t len,
- DMADirection dir, NvmeRequest *req)
-{
- uint16_t status = NVME_SUCCESS;
+typedef enum NvmeTxDirection {
+ NVME_TX_DIRECTION_TO_DEVICE = 0,
+ NVME_TX_DIRECTION_FROM_DEVICE = 1,
+} NvmeTxDirection;
- status = nvme_map_dptr(n, len, req);
- if (status) {
- return status;
- }
-
- /* assert that only one of qsg and iov carries data */
- assert((req->qsg.nsg > 0) != (req->iov.niov > 0));
+static uint16_t nvme_tx(NvmeCtrl *n, NvmeSg *sg, uint8_t *ptr, uint32_t len,
+ NvmeTxDirection dir)
+{
+ assert(sg->flags & NVME_SG_ALLOC);
- if (req->qsg.nsg > 0) {
+ if (sg->flags & NVME_SG_DMA) {
uint64_t residual;
- if (dir == DMA_DIRECTION_TO_DEVICE) {
- residual = dma_buf_write(ptr, len, &req->qsg);
+ if (dir == NVME_TX_DIRECTION_TO_DEVICE) {
+ residual = dma_buf_write(ptr, len, &sg->qsg);
} else {
- residual = dma_buf_read(ptr, len, &req->qsg);
+ residual = dma_buf_read(ptr, len, &sg->qsg);
}
if (unlikely(residual)) {
trace_pci_nvme_err_invalid_dma();
- status = NVME_INVALID_FIELD | NVME_DNR;
+ return NVME_INVALID_FIELD | NVME_DNR;
}
} else {
size_t bytes;
- if (dir == DMA_DIRECTION_TO_DEVICE) {
- bytes = qemu_iovec_to_buf(&req->iov, 0, ptr, len);
+ if (dir == NVME_TX_DIRECTION_TO_DEVICE) {
+ bytes = qemu_iovec_to_buf(&sg->iov, 0, ptr, len);
} else {
- bytes = qemu_iovec_from_buf(&req->iov, 0, ptr, len);
+ bytes = qemu_iovec_from_buf(&sg->iov, 0, ptr, len);
}
if (unlikely(bytes != len)) {
trace_pci_nvme_err_invalid_dma();
- status = NVME_INVALID_FIELD | NVME_DNR;
+ return NVME_INVALID_FIELD | NVME_DNR;
}
}
- return status;
+ return NVME_SUCCESS;
+}
+
+static inline uint16_t nvme_c2h(NvmeCtrl *n, uint8_t *ptr, uint32_t len,
+ NvmeRequest *req)
+{
+ uint16_t status;
+
+ status = nvme_map_dptr(n, &req->sg, len, &req->cmd);
+ if (status) {
+ return status;
+ }
+
+ return nvme_tx(n, &req->sg, ptr, len, NVME_TX_DIRECTION_FROM_DEVICE);
+}
+
+static inline uint16_t nvme_h2c(NvmeCtrl *n, uint8_t *ptr, uint32_t len,
+ NvmeRequest *req)
+{
+ uint16_t status;
+
+ status = nvme_map_dptr(n, &req->sg, len, &req->cmd);
+ if (status) {
+ return status;
+ }
+
+ return nvme_tx(n, &req->sg, ptr, len, NVME_TX_DIRECTION_TO_DEVICE);
+}
+
+static inline void nvme_blk_read(BlockBackend *blk, int64_t offset,
+ BlockCompletionFunc *cb, NvmeRequest *req)
+{
+ assert(req->sg.flags & NVME_SG_ALLOC);
+
+ if (req->sg.flags & NVME_SG_DMA) {
+ req->aiocb = dma_blk_read(blk, &req->sg.qsg, offset, BDRV_SECTOR_SIZE,
+ cb, req);
+ } else {
+ req->aiocb = blk_aio_preadv(blk, offset, &req->sg.iov, 0, cb, req);
+ }
+}
+
+static inline void nvme_blk_write(BlockBackend *blk, int64_t offset,
+ BlockCompletionFunc *cb, NvmeRequest *req)
+{
+ assert(req->sg.flags & NVME_SG_ALLOC);
+
+ if (req->sg.flags & NVME_SG_DMA) {
+ req->aiocb = dma_blk_write(blk, &req->sg.qsg, offset, BDRV_SECTOR_SIZE,
+ cb, req);
+ } else {
+ req->aiocb = blk_aio_pwritev(blk, offset, &req->sg.iov, 0, cb, req);
+ }
}
static void nvme_post_cqes(void *opaque)
@@ -913,7 +1002,7 @@ static void nvme_post_cqes(void *opaque)
}
QTAILQ_REMOVE(&cq->req_list, req, entry);
nvme_inc_cq_tail(cq);
- nvme_req_exit(req);
+ nvme_sg_unmap(&req->sg);
QTAILQ_INSERT_TAIL(&sq->req_list, req, entry);
}
if (cq->tail != cq->head) {
@@ -1048,6 +1137,7 @@ static inline uint16_t nvme_check_mdts(NvmeCtrl *n, size_t len)
uint8_t mdts = n->params.mdts;
if (mdts && len > n->page_size << mdts) {
+ trace_pci_nvme_err_mdts(len);
return NVME_INVALID_FIELD | NVME_DNR;
}
@@ -1129,7 +1219,7 @@ static void nvme_aio_err(NvmeRequest *req, int ret)
break;
}
- trace_pci_nvme_err_aio(nvme_cid(req), strerror(ret), status);
+ trace_pci_nvme_err_aio(nvme_cid(req), strerror(-ret), status);
error_setg_errno(&local_err, -ret, "aio failed");
error_report_err(local_err);
@@ -1185,9 +1275,8 @@ static uint16_t nvme_check_zone_state_for_write(NvmeZone *zone)
return NVME_INTERNAL_DEV_ERROR;
}
-static uint16_t nvme_check_zone_write(NvmeCtrl *n, NvmeNamespace *ns,
- NvmeZone *zone, uint64_t slba,
- uint32_t nlb)
+static uint16_t nvme_check_zone_write(NvmeNamespace *ns, NvmeZone *zone,
+ uint64_t slba, uint32_t nlb)
{
uint64_t zcap = nvme_zone_wr_boundary(zone);
uint16_t status;
@@ -1212,8 +1301,6 @@ static uint16_t nvme_check_zone_write(NvmeCtrl *n, NvmeNamespace *ns,
static uint16_t nvme_check_zone_state_for_read(NvmeZone *zone)
{
- uint16_t status;
-
switch (nvme_get_zone_state(zone)) {
case NVME_ZONE_STATE_EMPTY:
case NVME_ZONE_STATE_IMPLICITLY_OPEN:
@@ -1221,16 +1308,15 @@ static uint16_t nvme_check_zone_state_for_read(NvmeZone *zone)
case NVME_ZONE_STATE_FULL:
case NVME_ZONE_STATE_CLOSED:
case NVME_ZONE_STATE_READ_ONLY:
- status = NVME_SUCCESS;
- break;
+ return NVME_SUCCESS;
case NVME_ZONE_STATE_OFFLINE:
- status = NVME_ZONE_OFFLINE;
- break;
+ trace_pci_nvme_err_zone_is_offline(zone->d.zslba);
+ return NVME_ZONE_OFFLINE;
default:
assert(false);
}
- return status;
+ return NVME_INTERNAL_DEV_ERROR;
}
static uint16_t nvme_check_zone_read(NvmeNamespace *ns, uint64_t slba,
@@ -1265,7 +1351,45 @@ static uint16_t nvme_check_zone_read(NvmeNamespace *ns, uint64_t slba,
return status;
}
-static void nvme_auto_transition_zone(NvmeNamespace *ns)
+static uint16_t nvme_zrm_finish(NvmeNamespace *ns, NvmeZone *zone)
+{
+ switch (nvme_get_zone_state(zone)) {
+ case NVME_ZONE_STATE_FULL:
+ return NVME_SUCCESS;
+
+ case NVME_ZONE_STATE_IMPLICITLY_OPEN:
+ case NVME_ZONE_STATE_EXPLICITLY_OPEN:
+ nvme_aor_dec_open(ns);
+ /* fallthrough */
+ case NVME_ZONE_STATE_CLOSED:
+ nvme_aor_dec_active(ns);
+ /* fallthrough */
+ case NVME_ZONE_STATE_EMPTY:
+ nvme_assign_zone_state(ns, zone, NVME_ZONE_STATE_FULL);
+ return NVME_SUCCESS;
+
+ default:
+ return NVME_ZONE_INVAL_TRANSITION;
+ }
+}
+
+static uint16_t nvme_zrm_close(NvmeNamespace *ns, NvmeZone *zone)
+{
+ switch (nvme_get_zone_state(zone)) {
+ case NVME_ZONE_STATE_EXPLICITLY_OPEN:
+ case NVME_ZONE_STATE_IMPLICITLY_OPEN:
+ nvme_aor_dec_open(ns);
+ nvme_assign_zone_state(ns, zone, NVME_ZONE_STATE_CLOSED);
+ /* fall through */
+ case NVME_ZONE_STATE_CLOSED:
+ return NVME_SUCCESS;
+
+ default:
+ return NVME_ZONE_INVAL_TRANSITION;
+ }
+}
+
+static void nvme_zrm_auto_transition_zone(NvmeNamespace *ns)
{
NvmeZone *zone;
@@ -1277,85 +1401,92 @@ static void nvme_auto_transition_zone(NvmeNamespace *ns)
* Automatically close this implicitly open zone.
*/
QTAILQ_REMOVE(&ns->imp_open_zones, zone, entry);
- nvme_aor_dec_open(ns);
- nvme_assign_zone_state(ns, zone, NVME_ZONE_STATE_CLOSED);
+ nvme_zrm_close(ns, zone);
}
}
}
-static uint16_t nvme_auto_open_zone(NvmeNamespace *ns, NvmeZone *zone)
+static uint16_t __nvme_zrm_open(NvmeNamespace *ns, NvmeZone *zone,
+ bool implicit)
{
- uint16_t status = NVME_SUCCESS;
- uint8_t zs = nvme_get_zone_state(zone);
+ int act = 0;
+ uint16_t status;
- if (zs == NVME_ZONE_STATE_EMPTY) {
- nvme_auto_transition_zone(ns);
- status = nvme_aor_check(ns, 1, 1);
- } else if (zs == NVME_ZONE_STATE_CLOSED) {
- nvme_auto_transition_zone(ns);
- status = nvme_aor_check(ns, 0, 1);
- }
+ switch (nvme_get_zone_state(zone)) {
+ case NVME_ZONE_STATE_EMPTY:
+ act = 1;
- return status;
+ /* fallthrough */
+
+ case NVME_ZONE_STATE_CLOSED:
+ nvme_zrm_auto_transition_zone(ns);
+ status = nvme_aor_check(ns, act, 1);
+ if (status) {
+ return status;
+ }
+
+ if (act) {
+ nvme_aor_inc_active(ns);
+ }
+
+ nvme_aor_inc_open(ns);
+
+ if (implicit) {
+ nvme_assign_zone_state(ns, zone, NVME_ZONE_STATE_IMPLICITLY_OPEN);
+ return NVME_SUCCESS;
+ }
+
+ /* fallthrough */
+
+ case NVME_ZONE_STATE_IMPLICITLY_OPEN:
+ if (implicit) {
+ return NVME_SUCCESS;
+ }
+
+ nvme_assign_zone_state(ns, zone, NVME_ZONE_STATE_EXPLICITLY_OPEN);
+
+ /* fallthrough */
+
+ case NVME_ZONE_STATE_EXPLICITLY_OPEN:
+ return NVME_SUCCESS;
+
+ default:
+ return NVME_ZONE_INVAL_TRANSITION;
+ }
}
-static void nvme_finalize_zoned_write(NvmeNamespace *ns, NvmeRequest *req,
- bool failed)
+static inline uint16_t nvme_zrm_auto(NvmeNamespace *ns, NvmeZone *zone)
{
- NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd;
- NvmeZone *zone;
- NvmeZonedResult *res = (NvmeZonedResult *)&req->cqe;
- uint64_t slba;
- uint32_t nlb;
+ return __nvme_zrm_open(ns, zone, true);
+}
- slba = le64_to_cpu(rw->slba);
- nlb = le16_to_cpu(rw->nlb) + 1;
- zone = nvme_get_zone_by_slba(ns, slba);
+static inline uint16_t nvme_zrm_open(NvmeNamespace *ns, NvmeZone *zone)
+{
+ return __nvme_zrm_open(ns, zone, false);
+}
+static void __nvme_advance_zone_wp(NvmeNamespace *ns, NvmeZone *zone,
+ uint32_t nlb)
+{
zone->d.wp += nlb;
- if (failed) {
- res->slba = 0;
- }
-
if (zone->d.wp == nvme_zone_wr_boundary(zone)) {
- switch (nvme_get_zone_state(zone)) {
- case NVME_ZONE_STATE_IMPLICITLY_OPEN:
- case NVME_ZONE_STATE_EXPLICITLY_OPEN:
- nvme_aor_dec_open(ns);
- /* fall through */
- case NVME_ZONE_STATE_CLOSED:
- nvme_aor_dec_active(ns);
- /* fall through */
- case NVME_ZONE_STATE_EMPTY:
- nvme_assign_zone_state(ns, zone, NVME_ZONE_STATE_FULL);
- /* fall through */
- case NVME_ZONE_STATE_FULL:
- break;
- default:
- assert(false);
- }
+ nvme_zrm_finish(ns, zone);
}
}
-static void nvme_advance_zone_wp(NvmeNamespace *ns, NvmeZone *zone,
- uint32_t nlb)
+static void nvme_finalize_zoned_write(NvmeNamespace *ns, NvmeRequest *req)
{
- uint8_t zs;
+ NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd;
+ NvmeZone *zone;
+ uint64_t slba;
+ uint32_t nlb;
- zone->w_ptr += nlb;
+ slba = le64_to_cpu(rw->slba);
+ nlb = le16_to_cpu(rw->nlb) + 1;
+ zone = nvme_get_zone_by_slba(ns, slba);
- if (zone->w_ptr < nvme_zone_wr_boundary(zone)) {
- zs = nvme_get_zone_state(zone);
- switch (zs) {
- case NVME_ZONE_STATE_EMPTY:
- nvme_aor_inc_active(ns);
- /* fall through */
- case NVME_ZONE_STATE_CLOSED:
- nvme_aor_inc_open(ns);
- nvme_assign_zone_state(ns, zone, NVME_ZONE_STATE_IMPLICITLY_OPEN);
- }
- }
+ __nvme_advance_zone_wp(ns, zone, nlb);
}
static inline bool nvme_is_write(NvmeRequest *req)
@@ -1379,9 +1510,37 @@ static void nvme_rw_cb(void *opaque, int ret)
trace_pci_nvme_rw_cb(nvme_cid(req), blk_name(blk));
if (ns->params.zoned && nvme_is_write(req)) {
- nvme_finalize_zoned_write(ns, req, ret != 0);
+ nvme_finalize_zoned_write(ns, req);
+ }
+
+ if (!ret) {
+ block_acct_done(stats, acct);
+ } else {
+ block_acct_failed(stats, acct);
+ nvme_aio_err(req, ret);
}
+ nvme_enqueue_req_completion(nvme_cq(req), req);
+}
+
+struct nvme_aio_flush_ctx {
+ NvmeRequest *req;
+ NvmeNamespace *ns;
+ BlockAcctCookie acct;
+};
+
+static void nvme_aio_flush_cb(void *opaque, int ret)
+{
+ struct nvme_aio_flush_ctx *ctx = opaque;
+ NvmeRequest *req = ctx->req;
+ uintptr_t *num_flushes = (uintptr_t *)&req->opaque;
+
+ BlockBackend *blk = ctx->ns->blkconf.blk;
+ BlockAcctCookie *acct = &ctx->acct;
+ BlockAcctStats *stats = blk_get_stats(blk);
+
+ trace_pci_nvme_aio_flush_cb(nvme_cid(req), blk_name(blk));
+
if (!ret) {
block_acct_done(stats, acct);
} else {
@@ -1389,6 +1548,13 @@ static void nvme_rw_cb(void *opaque, int ret)
nvme_aio_err(req, ret);
}
+ (*num_flushes)--;
+ g_free(ctx);
+
+ if (*num_flushes) {
+ return;
+ }
+
nvme_enqueue_req_completion(nvme_cq(req), req);
}
@@ -1459,10 +1625,139 @@ static void nvme_aio_zone_reset_cb(void *opaque, int ret)
nvme_enqueue_req_completion(nvme_cq(req), req);
}
+struct nvme_copy_ctx {
+ int copies;
+ uint8_t *bounce;
+ uint32_t nlb;
+};
+
+struct nvme_copy_in_ctx {
+ NvmeRequest *req;
+ QEMUIOVector iov;
+};
+
+static void nvme_copy_cb(void *opaque, int ret)
+{
+ NvmeRequest *req = opaque;
+ NvmeNamespace *ns = req->ns;
+ struct nvme_copy_ctx *ctx = req->opaque;
+
+ trace_pci_nvme_copy_cb(nvme_cid(req));
+
+ if (ns->params.zoned) {
+ NvmeCopyCmd *copy = (NvmeCopyCmd *)&req->cmd;
+ uint64_t sdlba = le64_to_cpu(copy->sdlba);
+ NvmeZone *zone = nvme_get_zone_by_slba(ns, sdlba);
+
+ __nvme_advance_zone_wp(ns, zone, ctx->nlb);
+ }
+
+ if (!ret) {
+ block_acct_done(blk_get_stats(ns->blkconf.blk), &req->acct);
+ } else {
+ block_acct_failed(blk_get_stats(ns->blkconf.blk), &req->acct);
+ nvme_aio_err(req, ret);
+ }
+
+ g_free(ctx->bounce);
+ g_free(ctx);
+
+ nvme_enqueue_req_completion(nvme_cq(req), req);
+}
+
+static void nvme_copy_in_complete(NvmeRequest *req)
+{
+ NvmeNamespace *ns = req->ns;
+ NvmeCopyCmd *copy = (NvmeCopyCmd *)&req->cmd;
+ struct nvme_copy_ctx *ctx = req->opaque;
+ uint64_t sdlba = le64_to_cpu(copy->sdlba);
+ uint16_t status;
+
+ trace_pci_nvme_copy_in_complete(nvme_cid(req));
+
+ block_acct_done(blk_get_stats(ns->blkconf.blk), &req->acct);
+
+ status = nvme_check_bounds(ns, sdlba, ctx->nlb);
+ if (status) {
+ trace_pci_nvme_err_invalid_lba_range(sdlba, ctx->nlb, ns->id_ns.nsze);
+ goto invalid;
+ }
+
+ if (ns->params.zoned) {
+ NvmeZone *zone = nvme_get_zone_by_slba(ns, sdlba);
+
+ status = nvme_check_zone_write(ns, zone, sdlba, ctx->nlb);
+ if (status) {
+ goto invalid;
+ }
+
+ status = nvme_zrm_auto(ns, zone);
+ if (status) {
+ goto invalid;
+ }
+
+ zone->w_ptr += ctx->nlb;
+ }
+
+ qemu_iovec_init(&req->sg.iov, 1);
+ qemu_iovec_add(&req->sg.iov, ctx->bounce, nvme_l2b(ns, ctx->nlb));
+
+ block_acct_start(blk_get_stats(ns->blkconf.blk), &req->acct, 0,
+ BLOCK_ACCT_WRITE);
+
+ req->aiocb = blk_aio_pwritev(ns->blkconf.blk, nvme_l2b(ns, sdlba),
+ &req->sg.iov, 0, nvme_copy_cb, req);
+
+ return;
+
+invalid:
+ req->status = status;
+
+ g_free(ctx->bounce);
+ g_free(ctx);
+
+ nvme_enqueue_req_completion(nvme_cq(req), req);
+}
+
+static void nvme_aio_copy_in_cb(void *opaque, int ret)
+{
+ struct nvme_copy_in_ctx *in_ctx = opaque;
+ NvmeRequest *req = in_ctx->req;
+ NvmeNamespace *ns = req->ns;
+ struct nvme_copy_ctx *ctx = req->opaque;
+
+ qemu_iovec_destroy(&in_ctx->iov);
+ g_free(in_ctx);
+
+ trace_pci_nvme_aio_copy_in_cb(nvme_cid(req));
+
+ if (ret) {
+ nvme_aio_err(req, ret);
+ }
+
+ ctx->copies--;
+
+ if (ctx->copies) {
+ return;
+ }
+
+ if (req->status) {
+ block_acct_failed(blk_get_stats(ns->blkconf.blk), &req->acct);
+
+ g_free(ctx->bounce);
+ g_free(ctx);
+
+ nvme_enqueue_req_completion(nvme_cq(req), req);
+
+ return;
+ }
+
+ nvme_copy_in_complete(req);
+}
+
struct nvme_compare_ctx {
QEMUIOVector iov;
uint8_t *bounce;
- size_t len;
};
static void nvme_compare_cb(void *opaque, int ret)
@@ -1483,16 +1778,15 @@ static void nvme_compare_cb(void *opaque, int ret)
goto out;
}
- buf = g_malloc(ctx->len);
+ buf = g_malloc(ctx->iov.size);
- status = nvme_dma(nvme_ctrl(req), buf, ctx->len, DMA_DIRECTION_TO_DEVICE,
- req);
+ status = nvme_h2c(nvme_ctrl(req), buf, ctx->iov.size, req);
if (status) {
req->status = status;
goto out;
}
- if (memcmp(buf, ctx->bounce, ctx->len)) {
+ if (memcmp(buf, ctx->bounce, ctx->iov.size)) {
req->status = NVME_CMP_FAILURE;
}
@@ -1522,8 +1816,7 @@ static uint16_t nvme_dsm(NvmeCtrl *n, NvmeRequest *req)
NvmeDsmRange range[nr];
uintptr_t *discards = (uintptr_t *)&req->opaque;
- status = nvme_dma(n, (uint8_t *)range, sizeof(range),
- DMA_DIRECTION_TO_DEVICE, req);
+ status = nvme_h2c(n, (uint8_t *)range, sizeof(range), req);
if (status) {
return status;
}
@@ -1548,6 +1841,10 @@ static uint16_t nvme_dsm(NvmeCtrl *n, NvmeRequest *req)
trace_pci_nvme_dsm_deallocate(nvme_cid(req), nvme_nsid(ns), slba,
nlb);
+ if (nlb > n->dmrsl) {
+ trace_pci_nvme_dsm_single_range_limit_exceeded(nlb, n->dmrsl);
+ }
+
offset = nvme_l2b(ns, slba);
len = nvme_l2b(ns, nlb);
@@ -1577,6 +1874,121 @@ static uint16_t nvme_dsm(NvmeCtrl *n, NvmeRequest *req)
return status;
}
+static uint16_t nvme_copy(NvmeCtrl *n, NvmeRequest *req)
+{
+ NvmeNamespace *ns = req->ns;
+ NvmeCopyCmd *copy = (NvmeCopyCmd *)&req->cmd;
+ g_autofree NvmeCopySourceRange *range = NULL;
+
+ uint16_t nr = copy->nr + 1;
+ uint8_t format = copy->control[0] & 0xf;
+ uint32_t nlb = 0;
+
+ uint8_t *bounce = NULL, *bouncep = NULL;
+ struct nvme_copy_ctx *ctx;
+ uint16_t status;
+ int i;
+
+ trace_pci_nvme_copy(nvme_cid(req), nvme_nsid(ns), nr, format);
+
+ if (!(n->id_ctrl.ocfs & (1 << format))) {
+ trace_pci_nvme_err_copy_invalid_format(format);
+ return NVME_INVALID_FIELD | NVME_DNR;
+ }
+
+ if (nr > ns->id_ns.msrc + 1) {
+ return NVME_CMD_SIZE_LIMIT | NVME_DNR;
+ }
+
+ range = g_new(NvmeCopySourceRange, nr);
+
+ status = nvme_h2c(n, (uint8_t *)range, nr * sizeof(NvmeCopySourceRange),
+ req);
+ if (status) {
+ return status;
+ }
+
+ for (i = 0; i < nr; i++) {
+ uint64_t slba = le64_to_cpu(range[i].slba);
+ uint32_t _nlb = le16_to_cpu(range[i].nlb) + 1;
+
+ if (_nlb > le16_to_cpu(ns->id_ns.mssrl)) {
+ return NVME_CMD_SIZE_LIMIT | NVME_DNR;
+ }
+
+ status = nvme_check_bounds(ns, slba, _nlb);
+ if (status) {
+ trace_pci_nvme_err_invalid_lba_range(slba, _nlb, ns->id_ns.nsze);
+ return status;
+ }
+
+ if (NVME_ERR_REC_DULBE(ns->features.err_rec)) {
+ status = nvme_check_dulbe(ns, slba, _nlb);
+ if (status) {
+ return status;
+ }
+ }
+
+ if (ns->params.zoned) {
+ status = nvme_check_zone_read(ns, slba, _nlb);
+ if (status) {
+ return status;
+ }
+ }
+
+ nlb += _nlb;
+ }
+
+ if (nlb > le32_to_cpu(ns->id_ns.mcl)) {
+ return NVME_CMD_SIZE_LIMIT | NVME_DNR;
+ }
+
+ bounce = bouncep = g_malloc(nvme_l2b(ns, nlb));
+
+ block_acct_start(blk_get_stats(ns->blkconf.blk), &req->acct, 0,
+ BLOCK_ACCT_READ);
+
+ ctx = g_new(struct nvme_copy_ctx, 1);
+
+ ctx->bounce = bounce;
+ ctx->nlb = nlb;
+ ctx->copies = 1;
+
+ req->opaque = ctx;
+
+ for (i = 0; i < nr; i++) {
+ uint64_t slba = le64_to_cpu(range[i].slba);
+ uint32_t nlb = le16_to_cpu(range[i].nlb) + 1;
+
+ size_t len = nvme_l2b(ns, nlb);
+ int64_t offset = nvme_l2b(ns, slba);
+
+ trace_pci_nvme_copy_source_range(slba, nlb);
+
+ struct nvme_copy_in_ctx *in_ctx = g_new(struct nvme_copy_in_ctx, 1);
+ in_ctx->req = req;
+
+ qemu_iovec_init(&in_ctx->iov, 1);
+ qemu_iovec_add(&in_ctx->iov, bouncep, len);
+
+ ctx->copies++;
+
+ blk_aio_preadv(ns->blkconf.blk, offset, &in_ctx->iov, 0,
+ nvme_aio_copy_in_cb, in_ctx);
+
+ bouncep += len;
+ }
+
+ /* account for the 1-initialization */
+ ctx->copies--;
+
+ if (!ctx->copies) {
+ nvme_copy_in_complete(req);
+ }
+
+ return NVME_NO_COMPLETE;
+}
+
static uint16_t nvme_compare(NvmeCtrl *n, NvmeRequest *req)
{
NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd;
@@ -1594,7 +2006,6 @@ static uint16_t nvme_compare(NvmeCtrl *n, NvmeRequest *req)
status = nvme_check_mdts(n, len);
if (status) {
- trace_pci_nvme_err_mdts(nvme_cid(req), len);
return status;
}
@@ -1615,7 +2026,6 @@ static uint16_t nvme_compare(NvmeCtrl *n, NvmeRequest *req)
ctx = g_new(struct nvme_compare_ctx, 1);
ctx->bounce = bounce;
- ctx->len = len;
req->opaque = ctx;
@@ -1630,10 +2040,56 @@ static uint16_t nvme_compare(NvmeCtrl *n, NvmeRequest *req)
static uint16_t nvme_flush(NvmeCtrl *n, NvmeRequest *req)
{
- block_acct_start(blk_get_stats(req->ns->blkconf.blk), &req->acct, 0,
- BLOCK_ACCT_FLUSH);
- req->aiocb = blk_aio_flush(req->ns->blkconf.blk, nvme_rw_cb, req);
- return NVME_NO_COMPLETE;
+ uint32_t nsid = le32_to_cpu(req->cmd.nsid);
+ uintptr_t *num_flushes = (uintptr_t *)&req->opaque;
+ uint16_t status;
+ struct nvme_aio_flush_ctx *ctx;
+ NvmeNamespace *ns;
+
+ trace_pci_nvme_flush(nvme_cid(req), nsid);
+
+ if (nsid != NVME_NSID_BROADCAST) {
+ req->ns = nvme_ns(n, nsid);
+ if (unlikely(!req->ns)) {
+ return NVME_INVALID_FIELD | NVME_DNR;
+ }
+
+ block_acct_start(blk_get_stats(req->ns->blkconf.blk), &req->acct, 0,
+ BLOCK_ACCT_FLUSH);
+ req->aiocb = blk_aio_flush(req->ns->blkconf.blk, nvme_rw_cb, req);
+ return NVME_NO_COMPLETE;
+ }
+
+ /* 1-initialize; see comment in nvme_dsm */
+ *num_flushes = 1;
+
+ for (int i = 1; i <= n->num_namespaces; i++) {
+ ns = nvme_ns(n, i);
+ if (!ns) {
+ continue;
+ }
+
+ ctx = g_new(struct nvme_aio_flush_ctx, 1);
+ ctx->req = req;
+ ctx->ns = ns;
+
+ (*num_flushes)++;
+
+ block_acct_start(blk_get_stats(ns->blkconf.blk), &ctx->acct, 0,
+ BLOCK_ACCT_FLUSH);
+ blk_aio_flush(ns->blkconf.blk, nvme_aio_flush_cb, ctx);
+ }
+
+ /* account for the 1-initialization */
+ (*num_flushes)--;
+
+ if (*num_flushes) {
+ status = NVME_NO_COMPLETE;
+ } else {
+ status = req->status;
+ }
+
+ return status;
}
static uint16_t nvme_read(NvmeCtrl *n, NvmeRequest *req)
@@ -1651,7 +2107,6 @@ static uint16_t nvme_read(NvmeCtrl *n, NvmeRequest *req)
status = nvme_check_mdts(n, data_size);
if (status) {
- trace_pci_nvme_err_mdts(nvme_cid(req), data_size);
goto invalid;
}
@@ -1669,7 +2124,7 @@ static uint16_t nvme_read(NvmeCtrl *n, NvmeRequest *req)
}
}
- status = nvme_map_dptr(n, data_size, req);
+ status = nvme_map_dptr(n, &req->sg, data_size, &req->cmd);
if (status) {
goto invalid;
}
@@ -1685,13 +2140,7 @@ static uint16_t nvme_read(NvmeCtrl *n, NvmeRequest *req)
block_acct_start(blk_get_stats(blk), &req->acct, data_size,
BLOCK_ACCT_READ);
- if (req->qsg.sg) {
- req->aiocb = dma_blk_read(blk, &req->qsg, data_offset,
- BDRV_SECTOR_SIZE, nvme_rw_cb, req);
- } else {
- req->aiocb = blk_aio_preadv(blk, data_offset, &req->iov, 0,
- nvme_rw_cb, req);
- }
+ nvme_blk_read(blk, data_offset, nvme_rw_cb, req);
return NVME_NO_COMPLETE;
invalid:
@@ -1719,7 +2168,6 @@ static uint16_t nvme_do_write(NvmeCtrl *n, NvmeRequest *req, bool append,
if (!wrz) {
status = nvme_check_mdts(n, data_size);
if (status) {
- trace_pci_nvme_err_mdts(nvme_cid(req), data_size);
goto invalid;
}
}
@@ -1740,48 +2188,40 @@ static uint16_t nvme_do_write(NvmeCtrl *n, NvmeRequest *req, bool append,
goto invalid;
}
- if (nvme_l2b(ns, nlb) > (n->page_size << n->zasl)) {
- trace_pci_nvme_err_append_too_large(slba, nlb, n->zasl);
- status = NVME_INVALID_FIELD;
- goto invalid;
+ if (n->params.zasl && data_size > n->page_size << n->params.zasl) {
+ trace_pci_nvme_err_zasl(data_size);
+ return NVME_INVALID_FIELD | NVME_DNR;
}
slba = zone->w_ptr;
res->slba = cpu_to_le64(slba);
}
- status = nvme_check_zone_write(n, ns, zone, slba, nlb);
+ status = nvme_check_zone_write(ns, zone, slba, nlb);
if (status) {
goto invalid;
}
- status = nvme_auto_open_zone(ns, zone);
+ status = nvme_zrm_auto(ns, zone);
if (status) {
goto invalid;
}
- nvme_advance_zone_wp(ns, zone, nlb);
+ zone->w_ptr += nlb;
}
data_offset = nvme_l2b(ns, slba);
if (!wrz) {
- status = nvme_map_dptr(n, data_size, req);
+ status = nvme_map_dptr(n, &req->sg, data_size, &req->cmd);
if (status) {
goto invalid;
}
block_acct_start(blk_get_stats(blk), &req->acct, data_size,
BLOCK_ACCT_WRITE);
- if (req->qsg.sg) {
- req->aiocb = dma_blk_write(blk, &req->qsg, data_offset,
- BDRV_SECTOR_SIZE, nvme_rw_cb, req);
- } else {
- req->aiocb = blk_aio_pwritev(blk, data_offset, &req->iov, 0,
- nvme_rw_cb, req);
- }
+ nvme_blk_write(blk, data_offset, nvme_rw_cb, req);
} else {
- block_acct_start(blk_get_stats(blk), &req->acct, 0, BLOCK_ACCT_WRITE);
req->aiocb = blk_aio_pwrite_zeroes(blk, data_offset, data_size,
BDRV_REQ_MAY_UNMAP, nvme_rw_cb,
req);
@@ -1846,73 +2286,19 @@ enum NvmeZoneProcessingMask {
static uint16_t nvme_open_zone(NvmeNamespace *ns, NvmeZone *zone,
NvmeZoneState state, NvmeRequest *req)
{
- uint16_t status;
-
- switch (state) {
- case NVME_ZONE_STATE_EMPTY:
- status = nvme_aor_check(ns, 1, 0);
- if (status) {
- return status;
- }
- nvme_aor_inc_active(ns);
- /* fall through */
- case NVME_ZONE_STATE_CLOSED:
- status = nvme_aor_check(ns, 0, 1);
- if (status) {
- if (state == NVME_ZONE_STATE_EMPTY) {
- nvme_aor_dec_active(ns);
- }
- return status;
- }
- nvme_aor_inc_open(ns);
- /* fall through */
- case NVME_ZONE_STATE_IMPLICITLY_OPEN:
- nvme_assign_zone_state(ns, zone, NVME_ZONE_STATE_EXPLICITLY_OPEN);
- /* fall through */
- case NVME_ZONE_STATE_EXPLICITLY_OPEN:
- return NVME_SUCCESS;
- default:
- return NVME_ZONE_INVAL_TRANSITION;
- }
+ return nvme_zrm_open(ns, zone);
}
static uint16_t nvme_close_zone(NvmeNamespace *ns, NvmeZone *zone,
NvmeZoneState state, NvmeRequest *req)
{
- switch (state) {
- case NVME_ZONE_STATE_EXPLICITLY_OPEN:
- case NVME_ZONE_STATE_IMPLICITLY_OPEN:
- nvme_aor_dec_open(ns);
- nvme_assign_zone_state(ns, zone, NVME_ZONE_STATE_CLOSED);
- /* fall through */
- case NVME_ZONE_STATE_CLOSED:
- return NVME_SUCCESS;
- default:
- return NVME_ZONE_INVAL_TRANSITION;
- }
+ return nvme_zrm_close(ns, zone);
}
static uint16_t nvme_finish_zone(NvmeNamespace *ns, NvmeZone *zone,
NvmeZoneState state, NvmeRequest *req)
{
- switch (state) {
- case NVME_ZONE_STATE_EXPLICITLY_OPEN:
- case NVME_ZONE_STATE_IMPLICITLY_OPEN:
- nvme_aor_dec_open(ns);
- /* fall through */
- case NVME_ZONE_STATE_CLOSED:
- nvme_aor_dec_active(ns);
- /* fall through */
- case NVME_ZONE_STATE_EMPTY:
- zone->w_ptr = nvme_zone_wr_boundary(zone);
- zone->d.wp = zone->w_ptr;
- nvme_assign_zone_state(ns, zone, NVME_ZONE_STATE_FULL);
- /* fall through */
- case NVME_ZONE_STATE_FULL:
- return NVME_SUCCESS;
- default:
- return NVME_ZONE_INVAL_TRANSITION;
- }
+ return nvme_zrm_finish(ns, zone);
}
static uint16_t nvme_reset_zone(NvmeNamespace *ns, NvmeZone *zone,
@@ -2168,8 +2554,7 @@ static uint16_t nvme_zone_mgmt_send(NvmeCtrl *n, NvmeRequest *req)
return NVME_INVALID_FIELD | NVME_DNR;
}
zd_ext = nvme_get_zd_extension(ns, zone_idx);
- status = nvme_dma(n, zd_ext, ns->params.zd_extension_size,
- DMA_DIRECTION_TO_DEVICE, req);
+ status = nvme_h2c(n, zd_ext, ns->params.zd_extension_size, req);
if (status) {
trace_pci_nvme_err_zd_extension_map_error(zone_idx);
return status;
@@ -2267,7 +2652,6 @@ static uint16_t nvme_zone_mgmt_recv(NvmeCtrl *n, NvmeRequest *req)
status = nvme_check_mdts(n, data_size);
if (status) {
- trace_pci_nvme_err_mdts(nvme_cid(req), data_size);
return status;
}
@@ -2324,8 +2708,7 @@ static uint16_t nvme_zone_mgmt_recv(NvmeCtrl *n, NvmeRequest *req)
}
}
- status = nvme_dma(n, (uint8_t *)buf, data_size,
- DMA_DIRECTION_FROM_DEVICE, req);
+ status = nvme_c2h(n, (uint8_t *)buf, data_size, req);
g_free(buf);
@@ -2343,6 +2726,29 @@ static uint16_t nvme_io_cmd(NvmeCtrl *n, NvmeRequest *req)
return NVME_INVALID_NSID | NVME_DNR;
}
+ /*
+ * In the base NVM command set, Flush may apply to all namespaces
+ * (indicated by NSID being set to 0xFFFFFFFF). But if that feature is used
+ * along with TP 4056 (Namespace Types), it may be pretty screwed up.
+ *
+ * If NSID is indeed set to 0xFFFFFFFF, we simply cannot associate the
+ * opcode with a specific command since we cannot determine a unique I/O
+ * command set. Opcode 0x0 could have any other meaning than something
+ * equivalent to flushing and say it DOES have completely different
+ * semantics in some other command set - does an NSID of 0xFFFFFFFF then
+ * mean "for all namespaces, apply whatever command set specific command
+ * that uses the 0x0 opcode?" Or does it mean "for all namespaces, apply
+ * whatever command that uses the 0x0 opcode if, and only if, it allows
+ * NSID to be 0xFFFFFFFF"?
+ *
+ * Anyway (and luckily), for now, we do not care about this since the
+ * device only supports namespace types that includes the NVM Flush command
+ * (NVM and Zoned), so always do an NVM Flush.
+ */
+ if (req->cmd.opcode == NVME_CMD_FLUSH) {
+ return nvme_flush(n, req);
+ }
+
req->ns = nvme_ns(n, nsid);
if (unlikely(!req->ns)) {
return NVME_INVALID_FIELD | NVME_DNR;
@@ -2354,8 +2760,6 @@ static uint16_t nvme_io_cmd(NvmeCtrl *n, NvmeRequest *req)
}
switch (req->cmd.opcode) {
- case NVME_CMD_FLUSH:
- return nvme_flush(n, req);
case NVME_CMD_WRITE_ZEROES:
return nvme_write_zeroes(n, req);
case NVME_CMD_ZONE_APPEND:
@@ -2368,6 +2772,8 @@ static uint16_t nvme_io_cmd(NvmeCtrl *n, NvmeRequest *req)
return nvme_compare(n, req);
case NVME_CMD_DSM:
return nvme_dsm(n, req);
+ case NVME_CMD_COPY:
+ return nvme_copy(n, req);
case NVME_CMD_ZONE_MGMT_SEND:
return nvme_zone_mgmt_send(n, req);
case NVME_CMD_ZONE_MGMT_RECV:
@@ -2568,8 +2974,7 @@ static uint16_t nvme_smart_info(NvmeCtrl *n, uint8_t rae, uint32_t buf_len,
nvme_clear_events(n, NVME_AER_TYPE_SMART);
}
- return nvme_dma(n, (uint8_t *) &smart + off, trans_len,
- DMA_DIRECTION_FROM_DEVICE, req);
+ return nvme_c2h(n, (uint8_t *) &smart + off, trans_len, req);
}
static uint16_t nvme_fw_log_info(NvmeCtrl *n, uint32_t buf_len, uint64_t off,
@@ -2587,8 +2992,7 @@ static uint16_t nvme_fw_log_info(NvmeCtrl *n, uint32_t buf_len, uint64_t off,
strpadcpy((char *)&fw_log.frs1, sizeof(fw_log.frs1), "1.0", ' ');
trans_len = MIN(sizeof(fw_log) - off, buf_len);
- return nvme_dma(n, (uint8_t *) &fw_log + off, trans_len,
- DMA_DIRECTION_FROM_DEVICE, req);
+ return nvme_c2h(n, (uint8_t *) &fw_log + off, trans_len, req);
}
static uint16_t nvme_error_info(NvmeCtrl *n, uint8_t rae, uint32_t buf_len,
@@ -2608,8 +3012,49 @@ static uint16_t nvme_error_info(NvmeCtrl *n, uint8_t rae, uint32_t buf_len,
memset(&errlog, 0x0, sizeof(errlog));
trans_len = MIN(sizeof(errlog) - off, buf_len);
- return nvme_dma(n, (uint8_t *)&errlog, trans_len,
- DMA_DIRECTION_FROM_DEVICE, req);
+ return nvme_c2h(n, (uint8_t *)&errlog, trans_len, req);
+}
+
+static uint16_t nvme_changed_nslist(NvmeCtrl *n, uint8_t rae, uint32_t buf_len,
+ uint64_t off, NvmeRequest *req)
+{
+ uint32_t nslist[1024];
+ uint32_t trans_len;
+ int i = 0;
+ uint32_t nsid;
+
+ memset(nslist, 0x0, sizeof(nslist));
+ trans_len = MIN(sizeof(nslist) - off, buf_len);
+
+ while ((nsid = find_first_bit(n->changed_nsids, NVME_CHANGED_NSID_SIZE)) !=
+ NVME_CHANGED_NSID_SIZE) {
+ /*
+ * If more than 1024 namespaces, the first entry in the log page should
+ * be set to 0xffffffff and the others to 0 as spec.
+ */
+ if (i == ARRAY_SIZE(nslist)) {
+ memset(nslist, 0x0, sizeof(nslist));
+ nslist[0] = 0xffffffff;
+ break;
+ }
+
+ nslist[i++] = nsid;
+ clear_bit(nsid, n->changed_nsids);
+ }
+
+ /*
+ * Remove all the remaining list entries in case returns directly due to
+ * more than 1024 namespaces.
+ */
+ if (nslist[0] == 0xffffffff) {
+ bitmap_zero(n->changed_nsids, NVME_CHANGED_NSID_SIZE);
+ }
+
+ if (!rae) {
+ nvme_clear_events(n, NVME_AER_TYPE_NOTICE);
+ }
+
+ return nvme_c2h(n, ((uint8_t *)nslist) + off, trans_len, req);
}
static uint16_t nvme_cmd_effects(NvmeCtrl *n, uint8_t csi, uint32_t buf_len,
@@ -2649,8 +3094,7 @@ static uint16_t nvme_cmd_effects(NvmeCtrl *n, uint8_t csi, uint32_t buf_len,
trans_len = MIN(sizeof(log) - off, buf_len);
- return nvme_dma(n, ((uint8_t *)&log) + off, trans_len,
- DMA_DIRECTION_FROM_DEVICE, req);
+ return nvme_c2h(n, ((uint8_t *)&log) + off, trans_len, req);
}
static uint16_t nvme_get_log(NvmeCtrl *n, NvmeRequest *req)
@@ -2686,7 +3130,6 @@ static uint16_t nvme_get_log(NvmeCtrl *n, NvmeRequest *req)
status = nvme_check_mdts(n, len);
if (status) {
- trace_pci_nvme_err_mdts(nvme_cid(req), len);
return status;
}
@@ -2697,6 +3140,8 @@ static uint16_t nvme_get_log(NvmeCtrl *n, NvmeRequest *req)
return nvme_smart_info(n, rae, len, off, req);
case NVME_LOG_FW_SLOT_INFO:
return nvme_fw_log_info(n, len, off, req);
+ case NVME_LOG_CHANGED_NSLIST:
+ return nvme_changed_nslist(n, rae, len, off, req);
case NVME_LOG_CMD_EFFECTS:
return nvme_cmd_effects(n, csi, len, off, req);
default:
@@ -2819,7 +3264,7 @@ static uint16_t nvme_rpt_empty_id_struct(NvmeCtrl *n, NvmeRequest *req)
{
uint8_t id[NVME_IDENTIFY_DATA_SIZE] = {};
- return nvme_dma(n, id, sizeof(id), DMA_DIRECTION_FROM_DEVICE, req);
+ return nvme_c2h(n, id, sizeof(id), req);
}
static inline bool nvme_csi_has_nvm_support(NvmeNamespace *ns)
@@ -2836,31 +3281,33 @@ static uint16_t nvme_identify_ctrl(NvmeCtrl *n, NvmeRequest *req)
{
trace_pci_nvme_identify_ctrl();
- return nvme_dma(n, (uint8_t *)&n->id_ctrl, sizeof(n->id_ctrl),
- DMA_DIRECTION_FROM_DEVICE, req);
+ return nvme_c2h(n, (uint8_t *)&n->id_ctrl, sizeof(n->id_ctrl), req);
}
static uint16_t nvme_identify_ctrl_csi(NvmeCtrl *n, NvmeRequest *req)
{
NvmeIdentify *c = (NvmeIdentify *)&req->cmd;
- NvmeIdCtrlZoned id = {};
+ uint8_t id[NVME_IDENTIFY_DATA_SIZE] = {};
trace_pci_nvme_identify_ctrl_csi(c->csi);
- if (c->csi == NVME_CSI_NVM) {
- return nvme_rpt_empty_id_struct(n, req);
- } else if (c->csi == NVME_CSI_ZONED) {
- if (n->params.zasl_bs) {
- id.zasl = n->zasl;
- }
- return nvme_dma(n, (uint8_t *)&id, sizeof(id),
- DMA_DIRECTION_FROM_DEVICE, req);
+ switch (c->csi) {
+ case NVME_CSI_NVM:
+ ((NvmeIdCtrlNvm *)&id)->dmrsl = cpu_to_le32(n->dmrsl);
+ break;
+
+ case NVME_CSI_ZONED:
+ ((NvmeIdCtrlZoned *)&id)->zasl = n->params.zasl;
+ break;
+
+ default:
+ return NVME_INVALID_FIELD | NVME_DNR;
}
- return NVME_INVALID_FIELD | NVME_DNR;
+ return nvme_c2h(n, id, sizeof(id), req);
}
-static uint16_t nvme_identify_ns(NvmeCtrl *n, NvmeRequest *req)
+static uint16_t nvme_identify_ns(NvmeCtrl *n, NvmeRequest *req, bool active)
{
NvmeNamespace *ns;
NvmeIdentify *c = (NvmeIdentify *)&req->cmd;
@@ -2874,18 +3321,64 @@ static uint16_t nvme_identify_ns(NvmeCtrl *n, NvmeRequest *req)
ns = nvme_ns(n, nsid);
if (unlikely(!ns)) {
- return nvme_rpt_empty_id_struct(n, req);
+ if (!active) {
+ ns = nvme_subsys_ns(n->subsys, nsid);
+ if (!ns) {
+ return nvme_rpt_empty_id_struct(n, req);
+ }
+ } else {
+ return nvme_rpt_empty_id_struct(n, req);
+ }
}
if (c->csi == NVME_CSI_NVM && nvme_csi_has_nvm_support(ns)) {
- return nvme_dma(n, (uint8_t *)&ns->id_ns, sizeof(NvmeIdNs),
- DMA_DIRECTION_FROM_DEVICE, req);
+ return nvme_c2h(n, (uint8_t *)&ns->id_ns, sizeof(NvmeIdNs), req);
}
return NVME_INVALID_CMD_SET | NVME_DNR;
}
-static uint16_t nvme_identify_ns_csi(NvmeCtrl *n, NvmeRequest *req)
+static uint16_t nvme_identify_ns_attached_list(NvmeCtrl *n, NvmeRequest *req)
+{
+ NvmeIdentify *c = (NvmeIdentify *)&req->cmd;
+ uint16_t min_id = le16_to_cpu(c->ctrlid);
+ uint16_t list[NVME_CONTROLLER_LIST_SIZE] = {};
+ uint16_t *ids = &list[1];
+ NvmeNamespace *ns;
+ NvmeCtrl *ctrl;
+ int cntlid, nr_ids = 0;
+
+ trace_pci_nvme_identify_ns_attached_list(min_id);
+
+ if (c->nsid == NVME_NSID_BROADCAST) {
+ return NVME_INVALID_FIELD | NVME_DNR;
+ }
+
+ ns = nvme_subsys_ns(n->subsys, c->nsid);
+ if (!ns) {
+ return NVME_INVALID_FIELD | NVME_DNR;
+ }
+
+ for (cntlid = min_id; cntlid < ARRAY_SIZE(n->subsys->ctrls); cntlid++) {
+ ctrl = nvme_subsys_ctrl(n->subsys, cntlid);
+ if (!ctrl) {
+ continue;
+ }
+
+ if (!nvme_ns_is_attached(ctrl, ns)) {
+ continue;
+ }
+
+ ids[nr_ids++] = cntlid;
+ }
+
+ list[0] = nr_ids;
+
+ return nvme_c2h(n, (uint8_t *)list, sizeof(list), req);
+}
+
+static uint16_t nvme_identify_ns_csi(NvmeCtrl *n, NvmeRequest *req,
+ bool active)
{
NvmeNamespace *ns;
NvmeIdentify *c = (NvmeIdentify *)&req->cmd;
@@ -2899,20 +3392,28 @@ static uint16_t nvme_identify_ns_csi(NvmeCtrl *n, NvmeRequest *req)
ns = nvme_ns(n, nsid);
if (unlikely(!ns)) {
- return nvme_rpt_empty_id_struct(n, req);
+ if (!active) {
+ ns = nvme_subsys_ns(n->subsys, nsid);
+ if (!ns) {
+ return nvme_rpt_empty_id_struct(n, req);
+ }
+ } else {
+ return nvme_rpt_empty_id_struct(n, req);
+ }
}
if (c->csi == NVME_CSI_NVM && nvme_csi_has_nvm_support(ns)) {
return nvme_rpt_empty_id_struct(n, req);
} else if (c->csi == NVME_CSI_ZONED && ns->csi == NVME_CSI_ZONED) {
- return nvme_dma(n, (uint8_t *)ns->id_ns_zoned, sizeof(NvmeIdNsZoned),
- DMA_DIRECTION_FROM_DEVICE, req);
+ return nvme_c2h(n, (uint8_t *)ns->id_ns_zoned, sizeof(NvmeIdNsZoned),
+ req);
}
return NVME_INVALID_FIELD | NVME_DNR;
}
-static uint16_t nvme_identify_nslist(NvmeCtrl *n, NvmeRequest *req)
+static uint16_t nvme_identify_nslist(NvmeCtrl *n, NvmeRequest *req,
+ bool active)
{
NvmeNamespace *ns;
NvmeIdentify *c = (NvmeIdentify *)&req->cmd;
@@ -2937,7 +3438,14 @@ static uint16_t nvme_identify_nslist(NvmeCtrl *n, NvmeRequest *req)
for (i = 1; i <= n->num_namespaces; i++) {
ns = nvme_ns(n, i);
if (!ns) {
- continue;
+ if (!active) {
+ ns = nvme_subsys_ns(n->subsys, i);
+ if (!ns) {
+ continue;
+ }
+ } else {
+ continue;
+ }
}
if (ns->params.nsid <= min_nsid) {
continue;
@@ -2948,10 +3456,11 @@ static uint16_t nvme_identify_nslist(NvmeCtrl *n, NvmeRequest *req)
}
}
- return nvme_dma(n, list, data_len, DMA_DIRECTION_FROM_DEVICE, req);
+ return nvme_c2h(n, list, data_len, req);
}
-static uint16_t nvme_identify_nslist_csi(NvmeCtrl *n, NvmeRequest *req)
+static uint16_t nvme_identify_nslist_csi(NvmeCtrl *n, NvmeRequest *req,
+ bool active)
{
NvmeNamespace *ns;
NvmeIdentify *c = (NvmeIdentify *)&req->cmd;
@@ -2977,7 +3486,14 @@ static uint16_t nvme_identify_nslist_csi(NvmeCtrl *n, NvmeRequest *req)
for (i = 1; i <= n->num_namespaces; i++) {
ns = nvme_ns(n, i);
if (!ns) {
- continue;
+ if (!active) {
+ ns = nvme_subsys_ns(n->subsys, i);
+ if (!ns) {
+ continue;
+ }
+ } else {
+ continue;
+ }
}
if (ns->params.nsid <= min_nsid || c->csi != ns->csi) {
continue;
@@ -2988,7 +3504,7 @@ static uint16_t nvme_identify_nslist_csi(NvmeCtrl *n, NvmeRequest *req)
}
}
- return nvme_dma(n, list, data_len, DMA_DIRECTION_FROM_DEVICE, req);
+ return nvme_c2h(n, list, data_len, req);
}
static uint16_t nvme_identify_ns_descr_list(NvmeCtrl *n, NvmeRequest *req)
@@ -3035,7 +3551,7 @@ static uint16_t nvme_identify_ns_descr_list(NvmeCtrl *n, NvmeRequest *req)
ns_descrs->csi.hdr.nidl = NVME_NIDL_CSI;
ns_descrs->csi.v = ns->csi;
- return nvme_dma(n, list, sizeof(list), DMA_DIRECTION_FROM_DEVICE, req);
+ return nvme_c2h(n, list, sizeof(list), req);
}
static uint16_t nvme_identify_cmd_set(NvmeCtrl *n, NvmeRequest *req)
@@ -3048,34 +3564,39 @@ static uint16_t nvme_identify_cmd_set(NvmeCtrl *n, NvmeRequest *req)
NVME_SET_CSI(*list, NVME_CSI_NVM);
NVME_SET_CSI(*list, NVME_CSI_ZONED);
- return nvme_dma(n, list, data_len, DMA_DIRECTION_FROM_DEVICE, req);
+ return nvme_c2h(n, list, data_len, req);
}
static uint16_t nvme_identify(NvmeCtrl *n, NvmeRequest *req)
{
NvmeIdentify *c = (NvmeIdentify *)&req->cmd;
- switch (le32_to_cpu(c->cns)) {
+ trace_pci_nvme_identify(nvme_cid(req), c->cns, le16_to_cpu(c->ctrlid),
+ c->csi);
+
+ switch (c->cns) {
case NVME_ID_CNS_NS:
- /* fall through */
+ return nvme_identify_ns(n, req, true);
case NVME_ID_CNS_NS_PRESENT:
- return nvme_identify_ns(n, req);
+ return nvme_identify_ns(n, req, false);
+ case NVME_ID_CNS_NS_ATTACHED_CTRL_LIST:
+ return nvme_identify_ns_attached_list(n, req);
case NVME_ID_CNS_CS_NS:
- /* fall through */
+ return nvme_identify_ns_csi(n, req, true);
case NVME_ID_CNS_CS_NS_PRESENT:
- return nvme_identify_ns_csi(n, req);
+ return nvme_identify_ns_csi(n, req, false);
case NVME_ID_CNS_CTRL:
return nvme_identify_ctrl(n, req);
case NVME_ID_CNS_CS_CTRL:
return nvme_identify_ctrl_csi(n, req);
case NVME_ID_CNS_NS_ACTIVE_LIST:
- /* fall through */
+ return nvme_identify_nslist(n, req, true);
case NVME_ID_CNS_NS_PRESENT_LIST:
- return nvme_identify_nslist(n, req);
+ return nvme_identify_nslist(n, req, false);
case NVME_ID_CNS_CS_NS_ACTIVE_LIST:
- /* fall through */
+ return nvme_identify_nslist_csi(n, req, true);
case NVME_ID_CNS_CS_NS_PRESENT_LIST:
- return nvme_identify_nslist_csi(n, req);
+ return nvme_identify_nslist_csi(n, req, false);
case NVME_ID_CNS_NS_DESCR_LIST:
return nvme_identify_ns_descr_list(n, req);
case NVME_ID_CNS_IO_COMMAND_SET:
@@ -3137,8 +3658,7 @@ static uint16_t nvme_get_feature_timestamp(NvmeCtrl *n, NvmeRequest *req)
{
uint64_t timestamp = nvme_get_timestamp(n);
- return nvme_dma(n, (uint8_t *)&timestamp, sizeof(timestamp),
- DMA_DIRECTION_FROM_DEVICE, req);
+ return nvme_c2h(n, (uint8_t *)&timestamp, sizeof(timestamp), req);
}
static uint16_t nvme_get_feature(NvmeCtrl *n, NvmeRequest *req)
@@ -3299,8 +3819,7 @@ static uint16_t nvme_set_feature_timestamp(NvmeCtrl *n, NvmeRequest *req)
uint16_t ret;
uint64_t timestamp;
- ret = nvme_dma(n, (uint8_t *)&timestamp, sizeof(timestamp),
- DMA_DIRECTION_TO_DEVICE, req);
+ ret = nvme_h2c(n, (uint8_t *)&timestamp, sizeof(timestamp), req);
if (ret) {
return ret;
}
@@ -3472,6 +3991,71 @@ static uint16_t nvme_aer(NvmeCtrl *n, NvmeRequest *req)
return NVME_NO_COMPLETE;
}
+static void __nvme_select_ns_iocs(NvmeCtrl *n, NvmeNamespace *ns);
+static uint16_t nvme_ns_attachment(NvmeCtrl *n, NvmeRequest *req)
+{
+ NvmeNamespace *ns;
+ NvmeCtrl *ctrl;
+ uint16_t list[NVME_CONTROLLER_LIST_SIZE] = {};
+ uint32_t nsid = le32_to_cpu(req->cmd.nsid);
+ uint32_t dw10 = le32_to_cpu(req->cmd.cdw10);
+ bool attach = !(dw10 & 0xf);
+ uint16_t *nr_ids = &list[0];
+ uint16_t *ids = &list[1];
+ uint16_t ret;
+ int i;
+
+ trace_pci_nvme_ns_attachment(nvme_cid(req), dw10 & 0xf);
+
+ ns = nvme_subsys_ns(n->subsys, nsid);
+ if (!ns) {
+ return NVME_INVALID_FIELD | NVME_DNR;
+ }
+
+ ret = nvme_h2c(n, (uint8_t *)list, 4096, req);
+ if (ret) {
+ return ret;
+ }
+
+ if (!*nr_ids) {
+ return NVME_NS_CTRL_LIST_INVALID | NVME_DNR;
+ }
+
+ for (i = 0; i < *nr_ids; i++) {
+ ctrl = nvme_subsys_ctrl(n->subsys, ids[i]);
+ if (!ctrl) {
+ return NVME_NS_CTRL_LIST_INVALID | NVME_DNR;
+ }
+
+ if (attach) {
+ if (nvme_ns_is_attached(ctrl, ns)) {
+ return NVME_NS_ALREADY_ATTACHED | NVME_DNR;
+ }
+
+ nvme_ns_attach(ctrl, ns);
+ __nvme_select_ns_iocs(ctrl, ns);
+ } else {
+ if (!nvme_ns_is_attached(ctrl, ns)) {
+ return NVME_NS_NOT_ATTACHED | NVME_DNR;
+ }
+
+ nvme_ns_detach(ctrl, ns);
+ }
+
+ /*
+ * Add namespace id to the changed namespace id list for event clearing
+ * via Get Log Page command.
+ */
+ if (!test_and_set_bit(nsid, ctrl->changed_nsids)) {
+ nvme_enqueue_event(ctrl, NVME_AER_TYPE_NOTICE,
+ NVME_AER_INFO_NOTICE_NS_ATTR_CHANGED,
+ NVME_LOG_CHANGED_NSLIST);
+ }
+ }
+
+ return NVME_SUCCESS;
+}
+
static uint16_t nvme_admin_cmd(NvmeCtrl *n, NvmeRequest *req)
{
trace_pci_nvme_admin_cmd(nvme_cid(req), nvme_sqid(req), req->cmd.opcode,
@@ -3482,6 +4066,11 @@ static uint16_t nvme_admin_cmd(NvmeCtrl *n, NvmeRequest *req)
return NVME_INVALID_OPCODE | NVME_DNR;
}
+ /* SGLs shall not be used for Admin commands in NVMe over PCIe */
+ if (NVME_CMD_FLAGS_PSDT(req->cmd.flags) != NVME_PSDT_PRP) {
+ return NVME_INVALID_FIELD | NVME_DNR;
+ }
+
switch (req->cmd.opcode) {
case NVME_ADM_CMD_DELETE_SQ:
return nvme_del_sq(n, req);
@@ -3503,6 +4092,8 @@ static uint16_t nvme_admin_cmd(NvmeCtrl *n, NvmeRequest *req)
return nvme_get_feature(n, req);
case NVME_ADM_CMD_ASYNC_EV_REQ:
return nvme_aer(n, req);
+ case NVME_ADM_CMD_NS_ATTACHMENT:
+ return nvme_ns_attachment(n, req);
default:
assert(false);
}
@@ -3604,6 +4195,25 @@ static void nvme_ctrl_shutdown(NvmeCtrl *n)
}
}
+static void __nvme_select_ns_iocs(NvmeCtrl *n, NvmeNamespace *ns)
+{
+ ns->iocs = nvme_cse_iocs_none;
+ switch (ns->csi) {
+ case NVME_CSI_NVM:
+ if (NVME_CC_CSS(n->bar.cc) != NVME_CC_CSS_ADMIN_ONLY) {
+ ns->iocs = nvme_cse_iocs_nvm;
+ }
+ break;
+ case NVME_CSI_ZONED:
+ if (NVME_CC_CSS(n->bar.cc) == NVME_CC_CSS_CSI) {
+ ns->iocs = nvme_cse_iocs_zoned;
+ } else if (NVME_CC_CSS(n->bar.cc) == NVME_CC_CSS_NVM) {
+ ns->iocs = nvme_cse_iocs_nvm;
+ }
+ break;
+ }
+}
+
static void nvme_select_ns_iocs(NvmeCtrl *n)
{
NvmeNamespace *ns;
@@ -3614,21 +4224,8 @@ static void nvme_select_ns_iocs(NvmeCtrl *n)
if (!ns) {
continue;
}
- ns->iocs = nvme_cse_iocs_none;
- switch (ns->csi) {
- case NVME_CSI_NVM:
- if (NVME_CC_CSS(n->bar.cc) != NVME_CC_CSS_ADMIN_ONLY) {
- ns->iocs = nvme_cse_iocs_nvm;
- }
- break;
- case NVME_CSI_ZONED:
- if (NVME_CC_CSS(n->bar.cc) == NVME_CC_CSS_CSI) {
- ns->iocs = nvme_cse_iocs_zoned;
- } else if (NVME_CC_CSS(n->bar.cc) == NVME_CC_CSS_NVM) {
- ns->iocs = nvme_cse_iocs_nvm;
- }
- break;
- }
+
+ __nvme_select_ns_iocs(n, ns);
}
}
@@ -3726,17 +4323,6 @@ static int nvme_start_ctrl(NvmeCtrl *n)
nvme_init_sq(&n->admin_sq, n, n->bar.asq, 0, 0,
NVME_AQA_ASQS(n->bar.aqa) + 1);
- if (!n->params.zasl_bs) {
- n->zasl = n->params.mdts;
- } else {
- if (n->params.zasl_bs < n->page_size) {
- trace_pci_nvme_err_startfail_zasl_too_small(n->params.zasl_bs,
- n->page_size);
- return -1;
- }
- n->zasl = 31 - clz32(n->params.zasl_bs / n->page_size);
- }
-
nvme_set_timestamp(n, 0ULL);
QTAILQ_INIT(&n->aer_queue);
@@ -4245,11 +4831,10 @@ static void nvme_check_constraints(NvmeCtrl *n, Error **errp)
host_memory_backend_set_mapped(n->pmr.dev, true);
}
- if (n->params.zasl_bs) {
- if (!is_power_of_2(n->params.zasl_bs)) {
- error_setg(errp, "zone append size limit has to be a power of 2");
- return;
- }
+ if (n->params.zasl > n->params.mdts) {
+ error_setg(errp, "zoned.zasl (Zone Append Size Limit) must be less "
+ "than or equal to mdts (Maximum Data Transfer Size)");
+ return;
}
}
@@ -4267,6 +4852,20 @@ static void nvme_init_state(NvmeCtrl *n)
n->aer_reqs = g_new0(NvmeRequest *, n->params.aerl + 1);
}
+static int nvme_attach_namespace(NvmeCtrl *n, NvmeNamespace *ns, Error **errp)
+{
+ if (nvme_ns_is_attached(n, ns)) {
+ error_setg(errp,
+ "namespace %d is already attached to controller %d",
+ nvme_nsid(ns), n->cntlid);
+ return -1;
+ }
+
+ nvme_ns_attach(n, ns);
+
+ return 0;
+}
+
int nvme_register_namespace(NvmeCtrl *n, NvmeNamespace *ns, Error **errp)
{
uint32_t nsid = nvme_nsid(ns);
@@ -4298,7 +4897,26 @@ int nvme_register_namespace(NvmeCtrl *n, NvmeNamespace *ns, Error **errp)
trace_pci_nvme_register_namespace(nsid);
- n->namespaces[nsid - 1] = ns;
+ /*
+ * If subsys is not given, namespae is always attached to the controller
+ * because there's no subsystem to manage namespace allocation.
+ */
+ if (!n->subsys) {
+ if (ns->params.detached) {
+ error_setg(errp,
+ "detached needs nvme-subsys specified nvme or nvme-ns");
+ return -1;
+ }
+
+ return nvme_attach_namespace(n, ns, errp);
+ } else {
+ if (!ns->params.detached) {
+ return nvme_attach_namespace(n, ns, errp);
+ }
+ }
+
+ n->dmrsl = MIN_NON_ZERO(n->dmrsl,
+ BDRV_REQUEST_MAX_BYTES / nvme_l2b(ns, 1));
return 0;
}
@@ -4405,24 +5023,49 @@ static int nvme_init_pci(NvmeCtrl *n, PCIDevice *pci_dev, Error **errp)
return 0;
}
+static void nvme_init_subnqn(NvmeCtrl *n)
+{
+ NvmeSubsystem *subsys = n->subsys;
+ NvmeIdCtrl *id = &n->id_ctrl;
+
+ if (!subsys) {
+ snprintf((char *)id->subnqn, sizeof(id->subnqn),
+ "nqn.2019-08.org.qemu:%s", n->params.serial);
+ } else {
+ pstrcpy((char *)id->subnqn, sizeof(id->subnqn), (char*)subsys->subnqn);
+ }
+}
+
static void nvme_init_ctrl(NvmeCtrl *n, PCIDevice *pci_dev)
{
NvmeIdCtrl *id = &n->id_ctrl;
uint8_t *pci_conf = pci_dev->config;
- char *subnqn;
id->vid = cpu_to_le16(pci_get_word(pci_conf + PCI_VENDOR_ID));
id->ssvid = cpu_to_le16(pci_get_word(pci_conf + PCI_SUBSYSTEM_VENDOR_ID));
strpadcpy((char *)id->mn, sizeof(id->mn), "QEMU NVMe Ctrl", ' ');
strpadcpy((char *)id->fr, sizeof(id->fr), "1.0", ' ');
strpadcpy((char *)id->sn, sizeof(id->sn), n->params.serial, ' ');
+
+ id->cntlid = cpu_to_le16(n->cntlid);
+
+ id->oaes = cpu_to_le32(NVME_OAES_NS_ATTR);
+
id->rab = 6;
- id->ieee[0] = 0x00;
- id->ieee[1] = 0x02;
- id->ieee[2] = 0xb3;
+
+ if (n->params.use_intel_id) {
+ id->ieee[0] = 0xb3;
+ id->ieee[1] = 0x02;
+ id->ieee[2] = 0x00;
+ } else {
+ id->ieee[0] = 0x00;
+ id->ieee[1] = 0x54;
+ id->ieee[2] = 0x52;
+ }
+
id->mdts = n->params.mdts;
id->ver = cpu_to_le32(NVME_SPEC_VER);
- id->oacs = cpu_to_le16(0);
+ id->oacs = cpu_to_le16(NVME_OACS_NS_MGMT);
id->cntrltype = 0x1;
/*
@@ -4450,20 +5093,31 @@ static void nvme_init_ctrl(NvmeCtrl *n, PCIDevice *pci_dev)
id->nn = cpu_to_le32(n->num_namespaces);
id->oncs = cpu_to_le16(NVME_ONCS_WRITE_ZEROES | NVME_ONCS_TIMESTAMP |
NVME_ONCS_FEATURES | NVME_ONCS_DSM |
- NVME_ONCS_COMPARE);
+ NVME_ONCS_COMPARE | NVME_ONCS_COPY);
- id->vwc = (0x2 << 1) | 0x1;
+ /*
+ * NOTE: If this device ever supports a command set that does NOT use 0x0
+ * as a Flush-equivalent operation, support for the broadcast NSID in Flush
+ * should probably be removed.
+ *
+ * See comment in nvme_io_cmd.
+ */
+ id->vwc = NVME_VWC_NSID_BROADCAST_SUPPORT | NVME_VWC_PRESENT;
+
+ id->ocfs = cpu_to_le16(NVME_OCFS_COPY_FORMAT_0);
id->sgls = cpu_to_le32(NVME_CTRL_SGLS_SUPPORT_NO_ALIGN |
NVME_CTRL_SGLS_BITBUCKET);
- subnqn = g_strdup_printf("nqn.2019-08.org.qemu:%s", n->params.serial);
- strpadcpy((char *)id->subnqn, sizeof(id->subnqn), subnqn, '\0');
- g_free(subnqn);
+ nvme_init_subnqn(n);
id->psd[0].mp = cpu_to_le16(0x9c4);
id->psd[0].enlat = cpu_to_le32(0x10);
id->psd[0].exlat = cpu_to_le32(0x4);
+ if (n->subsys) {
+ id->cmic |= NVME_CMIC_MULTI_CTRL;
+ }
+
NVME_CAP_SET_MQES(n->bar.cap, 0x7ff);
NVME_CAP_SET_CQR(n->bar.cap, 1);
NVME_CAP_SET_TO(n->bar.cap, 0xf);
@@ -4478,6 +5132,24 @@ static void nvme_init_ctrl(NvmeCtrl *n, PCIDevice *pci_dev)
n->bar.intmc = n->bar.intms = 0;
}
+static int nvme_init_subsys(NvmeCtrl *n, Error **errp)
+{
+ int cntlid;
+
+ if (!n->subsys) {
+ return 0;
+ }
+
+ cntlid = nvme_subsys_register_ctrl(n, errp);
+ if (cntlid < 0) {
+ return -1;
+ }
+
+ n->cntlid = cntlid;
+
+ return 0;
+}
+
static void nvme_realize(PCIDevice *pci_dev, Error **errp)
{
NvmeCtrl *n = NVME(pci_dev);
@@ -4498,6 +5170,10 @@ static void nvme_realize(PCIDevice *pci_dev, Error **errp)
return;
}
+ if (nvme_init_subsys(n, errp)) {
+ error_propagate(errp, local_err);
+ return;
+ }
nvme_init_ctrl(n, pci_dev);
/* setup a namespace if the controller drive property was given */
@@ -4550,6 +5226,8 @@ static Property nvme_props[] = {
DEFINE_BLOCK_PROPERTIES(NvmeCtrl, namespace.blkconf),
DEFINE_PROP_LINK("pmrdev", NvmeCtrl, pmr.dev, TYPE_MEMORY_BACKEND,
HostMemoryBackend *),
+ DEFINE_PROP_LINK("subsys", NvmeCtrl, subsys, TYPE_NVME_SUBSYS,
+ NvmeSubsystem *),
DEFINE_PROP_STRING("serial", NvmeCtrl, params.serial),
DEFINE_PROP_UINT32("cmb_size_mb", NvmeCtrl, params.cmb_size_mb, 0),
DEFINE_PROP_UINT32("num_queues", NvmeCtrl, params.num_queues, 0),
@@ -4560,8 +5238,7 @@ static Property nvme_props[] = {
DEFINE_PROP_UINT8("mdts", NvmeCtrl, params.mdts, 7),
DEFINE_PROP_BOOL("use-intel-id", NvmeCtrl, params.use_intel_id, false),
DEFINE_PROP_BOOL("legacy-cmb", NvmeCtrl, params.legacy_cmb, false),
- DEFINE_PROP_SIZE32("zoned.append_size_limit", NvmeCtrl, params.zasl_bs,
- NVME_DEFAULT_MAX_ZA_SIZE),
+ DEFINE_PROP_UINT8("zoned.zasl", NvmeCtrl, params.zasl, 0),
DEFINE_PROP_END_OF_LIST(),
};
diff --git a/hw/block/nvme.h b/hw/block/nvme.h
index dee6092bd4..4955d649c7 100644
--- a/hw/block/nvme.h
+++ b/hw/block/nvme.h
@@ -2,6 +2,7 @@
#define HW_NVME_H
#include "block/nvme.h"
+#include "nvme-subsys.h"
#include "nvme-ns.h"
#define NVME_MAX_NAMESPACES 256
@@ -9,6 +10,12 @@
#define NVME_DEFAULT_ZONE_SIZE (128 * MiB)
#define NVME_DEFAULT_MAX_ZA_SIZE (128 * KiB)
+/*
+ * Subsystem namespace list for allocated namespaces should be larger than
+ * attached namespace list in a controller.
+ */
+QEMU_BUILD_BUG_ON(NVME_MAX_NAMESPACES > NVME_SUBSYS_MAX_NAMESPACES);
+
typedef struct NvmeParams {
char *serial;
uint32_t num_queues; /* deprecated since 5.1 */
@@ -19,7 +26,7 @@ typedef struct NvmeParams {
uint32_t aer_max_queued;
uint8_t mdts;
bool use_intel_id;
- uint32_t zasl_bs;
+ uint8_t zasl;
bool legacy_cmb;
} NvmeParams;
@@ -28,6 +35,20 @@ typedef struct NvmeAsyncEvent {
NvmeAerResult result;
} NvmeAsyncEvent;
+enum {
+ NVME_SG_ALLOC = 1 << 0,
+ NVME_SG_DMA = 1 << 1,
+};
+
+typedef struct NvmeSg {
+ int flags;
+
+ union {
+ QEMUSGList qsg;
+ QEMUIOVector iov;
+ };
+} NvmeSg;
+
typedef struct NvmeRequest {
struct NvmeSQueue *sq;
struct NvmeNamespace *ns;
@@ -37,8 +58,7 @@ typedef struct NvmeRequest {
NvmeCqe cqe;
NvmeCmd cmd;
BlockAcctCookie acct;
- QEMUSGList qsg;
- QEMUIOVector iov;
+ NvmeSg sg;
QTAILQ_ENTRY(NvmeRequest)entry;
} NvmeRequest;
@@ -68,6 +88,7 @@ static inline const char *nvme_io_opc_str(uint8_t opc)
case NVME_CMD_COMPARE: return "NVME_NVM_CMD_COMPARE";
case NVME_CMD_WRITE_ZEROES: return "NVME_NVM_CMD_WRITE_ZEROES";
case NVME_CMD_DSM: return "NVME_NVM_CMD_DSM";
+ case NVME_CMD_COPY: return "NVME_NVM_CMD_COPY";
case NVME_CMD_ZONE_MGMT_SEND: return "NVME_ZONED_CMD_MGMT_SEND";
case NVME_CMD_ZONE_MGMT_RECV: return "NVME_ZONED_CMD_MGMT_RECV";
case NVME_CMD_ZONE_APPEND: return "NVME_ZONED_CMD_ZONE_APPEND";
@@ -133,6 +154,7 @@ typedef struct NvmeCtrl {
NvmeBus bus;
BlockConf conf;
+ uint16_t cntlid;
bool qs_created;
uint32_t page_size;
uint16_t page_bits;
@@ -168,9 +190,19 @@ typedef struct NvmeCtrl {
QTAILQ_HEAD(, NvmeAsyncEvent) aer_queue;
int aer_queued;
- uint8_t zasl;
+ uint32_t dmrsl;
+
+ /* Namespace ID is started with 1 so bitmap should be 1-based */
+#define NVME_CHANGED_NSID_SIZE (NVME_MAX_NAMESPACES + 1)
+ DECLARE_BITMAP(changed_nsids, NVME_CHANGED_NSID_SIZE);
+
+ NvmeSubsystem *subsys;
NvmeNamespace namespace;
+ /*
+ * Attached namespaces to this controller. If subsys is not given, all
+ * namespaces in this list will always be attached.
+ */
NvmeNamespace *namespaces[NVME_MAX_NAMESPACES];
NvmeSQueue **sq;
NvmeCQueue **cq;
@@ -189,6 +221,29 @@ static inline NvmeNamespace *nvme_ns(NvmeCtrl *n, uint32_t nsid)
return n->namespaces[nsid - 1];
}
+static inline bool nvme_ns_is_attached(NvmeCtrl *n, NvmeNamespace *ns)
+{
+ int nsid;
+
+ for (nsid = 1; nsid <= n->num_namespaces; nsid++) {
+ if (nvme_ns(n, nsid) == ns) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static inline void nvme_ns_attach(NvmeCtrl *n, NvmeNamespace *ns)
+{
+ n->namespaces[nvme_nsid(ns) - 1] = ns;
+}
+
+static inline void nvme_ns_detach(NvmeCtrl *n, NvmeNamespace *ns)
+{
+ n->namespaces[nvme_nsid(ns) - 1] = NULL;
+}
+
static inline NvmeCQueue *nvme_cq(NvmeRequest *req)
{
NvmeSQueue *sq = req->sq;
diff --git a/hw/block/tc58128.c b/hw/block/tc58128.c
index 9888f01ac6..bfc27ad899 100644
--- a/hw/block/tc58128.c
+++ b/hw/block/tc58128.c
@@ -1,3 +1,29 @@
+/*
+ * TC58128 NAND EEPROM emulation
+ *
+ * Copyright (c) 2005 Samuel Tardieu
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ * SPDX-License-Identifier: MIT
+ */
#include "qemu/osdep.h"
#include "qemu/units.h"
#include "hw/sh4/sh.h"
diff --git a/hw/block/trace-events b/hw/block/trace-events
index d32475c398..ef06d2ea74 100644
--- a/hw/block/trace-events
+++ b/hw/block/trace-events
@@ -37,26 +37,36 @@ pci_nvme_dma_read(uint64_t prp1, uint64_t prp2) "DMA read, prp1=0x%"PRIx64" prp2
pci_nvme_map_addr(uint64_t addr, uint64_t len) "addr 0x%"PRIx64" len %"PRIu64""
pci_nvme_map_addr_cmb(uint64_t addr, uint64_t len) "addr 0x%"PRIx64" len %"PRIu64""
pci_nvme_map_prp(uint64_t trans_len, uint32_t len, uint64_t prp1, uint64_t prp2, int num_prps) "trans_len %"PRIu64" len %"PRIu32" prp1 0x%"PRIx64" prp2 0x%"PRIx64" num_prps %d"
-pci_nvme_map_sgl(uint16_t cid, uint8_t typ, uint64_t len) "cid %"PRIu16" type 0x%"PRIx8" len %"PRIu64""
+pci_nvme_map_sgl(uint8_t typ, uint64_t len) "type 0x%"PRIx8" len %"PRIu64""
pci_nvme_io_cmd(uint16_t cid, uint32_t nsid, uint16_t sqid, uint8_t opcode, const char *opname) "cid %"PRIu16" nsid %"PRIu32" sqid %"PRIu16" opc 0x%"PRIx8" opname '%s'"
pci_nvme_admin_cmd(uint16_t cid, uint16_t sqid, uint8_t opcode, const char *opname) "cid %"PRIu16" sqid %"PRIu16" opc 0x%"PRIx8" opname '%s'"
+pci_nvme_flush(uint16_t cid, uint32_t nsid) "cid %"PRIu16" nsid %"PRIu32""
pci_nvme_read(uint16_t cid, uint32_t nsid, uint32_t nlb, uint64_t count, uint64_t lba) "cid %"PRIu16" nsid %"PRIu32" nlb %"PRIu32" count %"PRIu64" lba 0x%"PRIx64""
pci_nvme_write(uint16_t cid, const char *verb, uint32_t nsid, uint32_t nlb, uint64_t count, uint64_t lba) "cid %"PRIu16" opname '%s' nsid %"PRIu32" nlb %"PRIu32" count %"PRIu64" lba 0x%"PRIx64""
pci_nvme_rw_cb(uint16_t cid, const char *blkname) "cid %"PRIu16" blk '%s'"
+pci_nvme_copy(uint16_t cid, uint32_t nsid, uint16_t nr, uint8_t format) "cid %"PRIu16" nsid %"PRIu32" nr %"PRIu16" format 0x%"PRIx8""
+pci_nvme_copy_source_range(uint64_t slba, uint32_t nlb) "slba 0x%"PRIx64" nlb %"PRIu32""
+pci_nvme_copy_in_complete(uint16_t cid) "cid %"PRIu16""
+pci_nvme_copy_cb(uint16_t cid) "cid %"PRIu16""
pci_nvme_block_status(int64_t offset, int64_t bytes, int64_t pnum, int ret, bool zeroed) "offset %"PRId64" bytes %"PRId64" pnum %"PRId64" ret 0x%x zeroed %d"
pci_nvme_dsm(uint16_t cid, uint32_t nsid, uint32_t nr, uint32_t attr) "cid %"PRIu16" nsid %"PRIu32" nr %"PRIu32" attr 0x%"PRIx32""
pci_nvme_dsm_deallocate(uint16_t cid, uint32_t nsid, uint64_t slba, uint32_t nlb) "cid %"PRIu16" nsid %"PRIu32" slba %"PRIu64" nlb %"PRIu32""
+pci_nvme_dsm_single_range_limit_exceeded(uint32_t nlb, uint32_t dmrsl) "nlb %"PRIu32" dmrsl %"PRIu32""
pci_nvme_compare(uint16_t cid, uint32_t nsid, uint64_t slba, uint32_t nlb) "cid %"PRIu16" nsid %"PRIu32" slba 0x%"PRIx64" nlb %"PRIu32""
pci_nvme_compare_cb(uint16_t cid) "cid %"PRIu16""
pci_nvme_aio_discard_cb(uint16_t cid) "cid %"PRIu16""
+pci_nvme_aio_copy_in_cb(uint16_t cid) "cid %"PRIu16""
pci_nvme_aio_zone_reset_cb(uint16_t cid, uint64_t zslba) "cid %"PRIu16" zslba 0x%"PRIx64""
+pci_nvme_aio_flush_cb(uint16_t cid, const char *blkname) "cid %"PRIu16" blk '%s'"
pci_nvme_create_sq(uint64_t addr, uint16_t sqid, uint16_t cqid, uint16_t qsize, uint16_t qflags) "create submission queue, addr=0x%"PRIx64", sqid=%"PRIu16", cqid=%"PRIu16", qsize=%"PRIu16", qflags=%"PRIu16""
pci_nvme_create_cq(uint64_t addr, uint16_t cqid, uint16_t vector, uint16_t size, uint16_t qflags, int ien) "create completion queue, addr=0x%"PRIx64", cqid=%"PRIu16", vector=%"PRIu16", qsize=%"PRIu16", qflags=%"PRIu16", ien=%d"
pci_nvme_del_sq(uint16_t qid) "deleting submission queue sqid=%"PRIu16""
pci_nvme_del_cq(uint16_t cqid) "deleted completion queue, cqid=%"PRIu16""
+pci_nvme_identify(uint16_t cid, uint8_t cns, uint16_t ctrlid, uint8_t csi) "cid %"PRIu16" cns 0x%"PRIx8" ctrlid %"PRIu16" csi 0x%"PRIx8""
pci_nvme_identify_ctrl(void) "identify controller"
pci_nvme_identify_ctrl_csi(uint8_t csi) "identify controller, csi=0x%"PRIx8""
pci_nvme_identify_ns(uint32_t ns) "nsid %"PRIu32""
+pci_nvme_identify_ns_attached_list(uint16_t cntid) "cntid=%"PRIu16""
pci_nvme_identify_ns_csi(uint32_t ns, uint8_t csi) "nsid=%"PRIu32", csi=0x%"PRIx8""
pci_nvme_identify_nslist(uint32_t ns) "nsid %"PRIu32""
pci_nvme_identify_nslist_csi(uint16_t ns, uint8_t csi) "nsid=%"PRIu16", csi=0x%"PRIx8""
@@ -75,6 +85,8 @@ pci_nvme_aer(uint16_t cid) "cid %"PRIu16""
pci_nvme_aer_aerl_exceeded(void) "aerl exceeded"
pci_nvme_aer_masked(uint8_t type, uint8_t mask) "type 0x%"PRIx8" mask 0x%"PRIx8""
pci_nvme_aer_post_cqe(uint8_t typ, uint8_t info, uint8_t log_page) "type 0x%"PRIx8" info 0x%"PRIx8" lid 0x%"PRIx8""
+pci_nvme_ns_attachment(uint16_t cid, uint8_t sel) "cid %"PRIu16", sel=0x%"PRIx8""
+pci_nvme_ns_attachment_attach(uint16_t cntlid, uint32_t nsid) "cntlid=0x%"PRIx16", nsid=0x%"PRIx32""
pci_nvme_enqueue_event(uint8_t typ, uint8_t info, uint8_t log_page) "type 0x%"PRIx8" info 0x%"PRIx8" lid 0x%"PRIx8""
pci_nvme_enqueue_event_noqueue(int queued) "queued %d"
pci_nvme_enqueue_event_masked(uint8_t typ) "type 0x%"PRIx8""
@@ -107,15 +119,17 @@ pci_nvme_clear_ns_close(uint32_t state, uint64_t slba) "zone state=%"PRIu32", sl
pci_nvme_clear_ns_reset(uint32_t state, uint64_t slba) "zone state=%"PRIu32", slba=%"PRIu64" transitioned to Empty state"
# nvme traces for error conditions
-pci_nvme_err_mdts(uint16_t cid, size_t len) "cid %"PRIu16" len %zu"
+pci_nvme_err_mdts(size_t len) "len %zu"
+pci_nvme_err_zasl(size_t len) "len %zu"
pci_nvme_err_req_status(uint16_t cid, uint32_t nsid, uint16_t status, uint8_t opc) "cid %"PRIu16" nsid %"PRIu32" status 0x%"PRIx16" opc 0x%"PRIx8""
pci_nvme_err_addr_read(uint64_t addr) "addr 0x%"PRIx64""
pci_nvme_err_addr_write(uint64_t addr) "addr 0x%"PRIx64""
pci_nvme_err_cfs(void) "controller fatal status"
pci_nvme_err_aio(uint16_t cid, const char *errname, uint16_t status) "cid %"PRIu16" err '%s' status 0x%"PRIx16""
+pci_nvme_err_copy_invalid_format(uint8_t format) "format 0x%"PRIx8""
pci_nvme_err_invalid_sgld(uint16_t cid, uint8_t typ) "cid %"PRIu16" type 0x%"PRIx8""
pci_nvme_err_invalid_num_sgld(uint16_t cid, uint8_t typ) "cid %"PRIu16" type 0x%"PRIx8""
-pci_nvme_err_invalid_sgl_excess_length(uint16_t cid) "cid %"PRIu16""
+pci_nvme_err_invalid_sgl_excess_length(uint32_t residual) "residual %"PRIu32""
pci_nvme_err_invalid_dma(void) "PRP/SGL is too small for transfer size"
pci_nvme_err_invalid_prplist_ent(uint64_t prplist) "PRP list entry is not page aligned: 0x%"PRIx64""
pci_nvme_err_invalid_prp2_align(uint64_t prp2) "PRP2 is not page aligned: 0x%"PRIx64""
@@ -136,7 +150,6 @@ pci_nvme_err_zone_boundary(uint64_t slba, uint32_t nlb, uint64_t zcap) "lba 0x%"
pci_nvme_err_zone_invalid_write(uint64_t slba, uint64_t wp) "lba 0x%"PRIx64" wp 0x%"PRIx64""
pci_nvme_err_zone_write_not_ok(uint64_t slba, uint32_t nlb, uint16_t status) "slba=%"PRIu64", nlb=%"PRIu32", status=0x%"PRIx16""
pci_nvme_err_zone_read_not_ok(uint64_t slba, uint32_t nlb, uint16_t status) "slba=%"PRIu64", nlb=%"PRIu32", status=0x%"PRIx16""
-pci_nvme_err_append_too_large(uint64_t slba, uint32_t nlb, uint8_t zasl) "slba=%"PRIu64", nlb=%"PRIu32", zasl=%"PRIu8""
pci_nvme_err_insuff_active_res(uint32_t max_active) "max_active=%"PRIu32" zone limit exceeded"
pci_nvme_err_insuff_open_res(uint32_t max_open) "max_open=%"PRIu32" zone limit exceeded"
pci_nvme_err_zd_extension_map_error(uint32_t zone_idx) "can't map descriptor extension for zone_idx=%"PRIu32""
diff --git a/hw/block/vhost-user-blk.c b/hw/block/vhost-user-blk.c
index da4fbf9084..b870a50e6b 100644
--- a/hw/block/vhost-user-blk.c
+++ b/hw/block/vhost-user-blk.c
@@ -54,6 +54,9 @@ static void vhost_user_blk_update_config(VirtIODevice *vdev, uint8_t *config)
{
VHostUserBlk *s = VHOST_USER_BLK(vdev);
+ /* Our num_queues overrides the device backend */
+ virtio_stw_p(vdev, &s->blkcfg.num_queues, s->num_queues);
+
memcpy(config, &s->blkcfg, sizeof(struct virtio_blk_config));
}
@@ -491,10 +494,6 @@ reconnect:
goto reconnect;
}
- if (s->blkcfg.num_queues != s->num_queues) {
- s->blkcfg.num_queues = s->num_queues;
- }
-
return;
virtio_err:
diff --git a/hw/char/Kconfig b/hw/char/Kconfig
index 939bc44758..f6f4fffd1b 100644
--- a/hw/char/Kconfig
+++ b/hw/char/Kconfig
@@ -50,6 +50,9 @@ config SCLPCONSOLE
config TERMINAL3270
bool
+config SH_SCI
+ bool
+
config RENESAS_SCI
bool
diff --git a/hw/char/cadence_uart.c b/hw/char/cadence_uart.c
index c603e14012..ceb677bc5a 100644
--- a/hw/char/cadence_uart.c
+++ b/hw/char/cadence_uart.c
@@ -519,7 +519,7 @@ static void cadence_uart_realize(DeviceState *dev, Error **errp)
uart_event, NULL, s, NULL, true);
}
-static void cadence_uart_refclk_update(void *opaque)
+static void cadence_uart_refclk_update(void *opaque, ClockEvent event)
{
CadenceUARTState *s = opaque;
@@ -537,7 +537,7 @@ static void cadence_uart_init(Object *obj)
sysbus_init_irq(sbd, &s->irq);
s->refclk = qdev_init_clock_in(DEVICE(obj), "refclk",
- cadence_uart_refclk_update, s);
+ cadence_uart_refclk_update, s, ClockUpdate);
/* initialize the frequency in case the clock remains unconnected */
clock_set_hz(s->refclk, UART_DEFAULT_REF_CLK);
diff --git a/hw/char/ibex_uart.c b/hw/char/ibex_uart.c
index 89f1182c9b..edcaa30ade 100644
--- a/hw/char/ibex_uart.c
+++ b/hw/char/ibex_uart.c
@@ -396,7 +396,7 @@ static void ibex_uart_write(void *opaque, hwaddr addr,
}
}
-static void ibex_uart_clk_update(void *opaque)
+static void ibex_uart_clk_update(void *opaque, ClockEvent event)
{
IbexUartState *s = opaque;
@@ -466,7 +466,7 @@ static void ibex_uart_init(Object *obj)
IbexUartState *s = IBEX_UART(obj);
s->f_clk = qdev_init_clock_in(DEVICE(obj), "f_clock",
- ibex_uart_clk_update, s);
+ ibex_uart_clk_update, s, ClockUpdate);
clock_set_hz(s->f_clk, IBEX_UART_CLOCK);
sysbus_init_irq(SYS_BUS_DEVICE(obj), &s->tx_watermark);
diff --git a/hw/char/meson.build b/hw/char/meson.build
index 196ac91fa2..afe9a0af88 100644
--- a/hw/char/meson.build
+++ b/hw/char/meson.build
@@ -31,7 +31,7 @@ softmmu_ss.add(when: 'CONFIG_OMAP', if_true: files('omap_uart.c'))
softmmu_ss.add(when: 'CONFIG_RASPI', if_true: files('bcm2835_aux.c'))
softmmu_ss.add(when: 'CONFIG_RENESAS_SCI', if_true: files('renesas_sci.c'))
softmmu_ss.add(when: 'CONFIG_SIFIVE_UART', if_true: files('sifive_uart.c'))
-softmmu_ss.add(when: 'CONFIG_SH4', if_true: files('sh_serial.c'))
+softmmu_ss.add(when: 'CONFIG_SH_SCI', if_true: files('sh_serial.c'))
softmmu_ss.add(when: 'CONFIG_STM32F2XX_USART', if_true: files('stm32f2xx_usart.c'))
softmmu_ss.add(when: 'CONFIG_MCHP_PFSOC_MMUART', if_true: files('mchp_pfsoc_mmuart.c'))
diff --git a/hw/char/pl011.c b/hw/char/pl011.c
index ea4a4e5235..c5621a195f 100644
--- a/hw/char/pl011.c
+++ b/hw/char/pl011.c
@@ -309,7 +309,7 @@ static void pl011_event(void *opaque, QEMUChrEvent event)
pl011_put_fifo(opaque, 0x400);
}
-static void pl011_clock_update(void *opaque)
+static void pl011_clock_update(void *opaque, ClockEvent event)
{
PL011State *s = PL011(opaque);
@@ -378,7 +378,8 @@ static void pl011_init(Object *obj)
sysbus_init_irq(sbd, &s->irq[i]);
}
- s->clk = qdev_init_clock_in(DEVICE(obj), "clk", pl011_clock_update, s);
+ s->clk = qdev_init_clock_in(DEVICE(obj), "clk", pl011_clock_update, s,
+ ClockUpdate);
s->read_trigger = 1;
s->ifl = 0x12;
diff --git a/hw/core/clock.c b/hw/core/clock.c
index 76b5f468b6..fc5a99683f 100644
--- a/hw/core/clock.c
+++ b/hw/core/clock.c
@@ -39,15 +39,17 @@ Clock *clock_new(Object *parent, const char *name)
return clk;
}
-void clock_set_callback(Clock *clk, ClockCallback *cb, void *opaque)
+void clock_set_callback(Clock *clk, ClockCallback *cb, void *opaque,
+ unsigned int events)
{
clk->callback = cb;
clk->callback_opaque = opaque;
+ clk->callback_events = events;
}
void clock_clear_callback(Clock *clk)
{
- clock_set_callback(clk, NULL, NULL);
+ clock_set_callback(clk, NULL, NULL, 0);
}
bool clock_set(Clock *clk, uint64_t period)
@@ -62,18 +64,32 @@ bool clock_set(Clock *clk, uint64_t period)
return true;
}
+static void clock_call_callback(Clock *clk, ClockEvent event)
+{
+ /*
+ * Call the Clock's callback for this event, if it has one and
+ * is interested in this event.
+ */
+ if (clk->callback && (clk->callback_events & event)) {
+ clk->callback(clk->callback_opaque, event);
+ }
+}
+
static void clock_propagate_period(Clock *clk, bool call_callbacks)
{
Clock *child;
QLIST_FOREACH(child, &clk->children, sibling) {
if (child->period != clk->period) {
+ if (call_callbacks) {
+ clock_call_callback(child, ClockPreUpdate);
+ }
child->period = clk->period;
trace_clock_update(CLOCK_PATH(child), CLOCK_PATH(clk),
CLOCK_PERIOD_TO_HZ(clk->period),
call_callbacks);
- if (call_callbacks && child->callback) {
- child->callback(child->callback_opaque);
+ if (call_callbacks) {
+ clock_call_callback(child, ClockUpdate);
}
clock_propagate_period(child, call_callbacks);
}
diff --git a/hw/core/qdev-clock.c b/hw/core/qdev-clock.c
index eb05f2a13c..117f4c6ea4 100644
--- a/hw/core/qdev-clock.c
+++ b/hw/core/qdev-clock.c
@@ -111,7 +111,8 @@ Clock *qdev_init_clock_out(DeviceState *dev, const char *name)
}
Clock *qdev_init_clock_in(DeviceState *dev, const char *name,
- ClockCallback *callback, void *opaque)
+ ClockCallback *callback, void *opaque,
+ unsigned int events)
{
NamedClockList *ncl;
@@ -120,7 +121,7 @@ Clock *qdev_init_clock_in(DeviceState *dev, const char *name,
ncl = qdev_init_clocklist(dev, name, false, NULL);
if (callback) {
- clock_set_callback(ncl->clock, callback, opaque);
+ clock_set_callback(ncl->clock, callback, opaque, events);
}
return ncl->clock;
}
@@ -137,7 +138,8 @@ void qdev_init_clocks(DeviceState *dev, const ClockPortInitArray clocks)
if (elem->is_output) {
*clkp = qdev_init_clock_out(dev, elem->name);
} else {
- *clkp = qdev_init_clock_in(dev, elem->name, elem->callback, dev);
+ *clkp = qdev_init_clock_in(dev, elem->name, elem->callback, dev,
+ elem->callback_events);
}
}
}
diff --git a/hw/dma/Kconfig b/hw/dma/Kconfig
index 5d6be1a7a7..98fbb1bb04 100644
--- a/hw/dma/Kconfig
+++ b/hw/dma/Kconfig
@@ -26,3 +26,7 @@ config STP2000
config SIFIVE_PDMA
bool
+
+config XLNX_CSU_DMA
+ bool
+ select REGISTER
diff --git a/hw/dma/meson.build b/hw/dma/meson.build
index 47b4a7cb47..5c78a4e05f 100644
--- a/hw/dma/meson.build
+++ b/hw/dma/meson.build
@@ -14,3 +14,4 @@ softmmu_ss.add(when: 'CONFIG_OMAP', if_true: files('omap_dma.c', 'soc_dma.c'))
softmmu_ss.add(when: 'CONFIG_PXA2XX', if_true: files('pxa2xx_dma.c'))
softmmu_ss.add(when: 'CONFIG_RASPI', if_true: files('bcm2835_dma.c'))
softmmu_ss.add(when: 'CONFIG_SIFIVE_PDMA', if_true: files('sifive_pdma.c'))
+softmmu_ss.add(when: 'CONFIG_XLNX_CSU_DMA', if_true: files('xlnx_csu_dma.c'))
diff --git a/hw/dma/sparc32_dma.c b/hw/dma/sparc32_dma.c
index b643b413c5..03bc500878 100644
--- a/hw/dma/sparc32_dma.c
+++ b/hw/dma/sparc32_dma.c
@@ -295,13 +295,13 @@ static void sparc32_espdma_device_init(Object *obj)
memory_region_init_io(&s->iomem, OBJECT(s), &dma_mem_ops, s,
"espdma-mmio", DMA_SIZE);
- object_initialize_child(obj, "esp", &es->esp, TYPE_ESP);
+ object_initialize_child(obj, "esp", &es->esp, TYPE_SYSBUS_ESP);
}
static void sparc32_espdma_device_realize(DeviceState *dev, Error **errp)
{
ESPDMADeviceState *es = SPARC32_ESPDMA_DEVICE(dev);
- SysBusESPState *sysbus = ESP(&es->esp);
+ SysBusESPState *sysbus = SYSBUS_ESP(&es->esp);
ESPState *esp = &sysbus->esp;
esp->dma_memory_read = espdma_memory_read;
diff --git a/hw/dma/xlnx_csu_dma.c b/hw/dma/xlnx_csu_dma.c
new file mode 100644
index 0000000000..98324dadcd
--- /dev/null
+++ b/hw/dma/xlnx_csu_dma.c
@@ -0,0 +1,745 @@
+/*
+ * Xilinx Platform CSU Stream DMA emulation
+ *
+ * This implementation is based on
+ * https://github.com/Xilinx/qemu/blob/master/hw/dma/csu_stream_dma.c
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 or
+ * (at your option) version 3 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/log.h"
+#include "qapi/error.h"
+#include "hw/hw.h"
+#include "hw/irq.h"
+#include "hw/qdev-properties.h"
+#include "hw/sysbus.h"
+#include "migration/vmstate.h"
+#include "sysemu/dma.h"
+#include "hw/ptimer.h"
+#include "hw/stream.h"
+#include "hw/register.h"
+#include "hw/dma/xlnx_csu_dma.h"
+
+/*
+ * Ref: UG1087 (v1.7) February 8, 2019
+ * https://www.xilinx.com/html_docs/registers/ug1087/ug1087-zynq-ultrascale-registers.html
+ * CSUDMA Module section
+ */
+REG32(ADDR, 0x0)
+ FIELD(ADDR, ADDR, 2, 30) /* wo */
+REG32(SIZE, 0x4)
+ FIELD(SIZE, SIZE, 2, 27) /* wo */
+ FIELD(SIZE, LAST_WORD, 0, 1) /* rw, only exists in SRC */
+REG32(STATUS, 0x8)
+ FIELD(STATUS, DONE_CNT, 13, 3) /* wtc */
+ FIELD(STATUS, FIFO_LEVEL, 5, 8) /* ro */
+ FIELD(STATUS, OUTSTANDING, 1, 4) /* ro */
+ FIELD(STATUS, BUSY, 0, 1) /* ro */
+REG32(CTRL, 0xc)
+ FIELD(CTRL, FIFOTHRESH, 25, 7) /* rw, only exists in DST, reset 0x40 */
+ FIELD(CTRL, APB_ERR_RESP, 24, 1) /* rw */
+ FIELD(CTRL, ENDIANNESS, 23, 1) /* rw */
+ FIELD(CTRL, AXI_BRST_TYPE, 22, 1) /* rw */
+ FIELD(CTRL, TIMEOUT_VAL, 10, 12) /* rw, reset: 0xFFE */
+ FIELD(CTRL, FIFO_THRESH, 2, 8) /* rw, reset: 0x80 */
+ FIELD(CTRL, PAUSE_STRM, 1, 1) /* rw */
+ FIELD(CTRL, PAUSE_MEM, 0, 1) /* rw */
+REG32(CRC, 0x10)
+REG32(INT_STATUS, 0x14)
+ FIELD(INT_STATUS, FIFO_OVERFLOW, 7, 1) /* wtc */
+ FIELD(INT_STATUS, INVALID_APB, 6, 1) /* wtc */
+ FIELD(INT_STATUS, THRESH_HIT, 5, 1) /* wtc */
+ FIELD(INT_STATUS, TIMEOUT_MEM, 4, 1) /* wtc */
+ FIELD(INT_STATUS, TIMEOUT_STRM, 3, 1) /* wtc */
+ FIELD(INT_STATUS, AXI_BRESP_ERR, 2, 1) /* wtc, SRC: AXI_RDERR */
+ FIELD(INT_STATUS, DONE, 1, 1) /* wtc */
+ FIELD(INT_STATUS, MEM_DONE, 0, 1) /* wtc */
+REG32(INT_ENABLE, 0x18)
+ FIELD(INT_ENABLE, FIFO_OVERFLOW, 7, 1) /* wtc */
+ FIELD(INT_ENABLE, INVALID_APB, 6, 1) /* wtc */
+ FIELD(INT_ENABLE, THRESH_HIT, 5, 1) /* wtc */
+ FIELD(INT_ENABLE, TIMEOUT_MEM, 4, 1) /* wtc */
+ FIELD(INT_ENABLE, TIMEOUT_STRM, 3, 1) /* wtc */
+ FIELD(INT_ENABLE, AXI_BRESP_ERR, 2, 1) /* wtc, SRC: AXI_RDERR */
+ FIELD(INT_ENABLE, DONE, 1, 1) /* wtc */
+ FIELD(INT_ENABLE, MEM_DONE, 0, 1) /* wtc */
+REG32(INT_DISABLE, 0x1c)
+ FIELD(INT_DISABLE, FIFO_OVERFLOW, 7, 1) /* wtc */
+ FIELD(INT_DISABLE, INVALID_APB, 6, 1) /* wtc */
+ FIELD(INT_DISABLE, THRESH_HIT, 5, 1) /* wtc */
+ FIELD(INT_DISABLE, TIMEOUT_MEM, 4, 1) /* wtc */
+ FIELD(INT_DISABLE, TIMEOUT_STRM, 3, 1) /* wtc */
+ FIELD(INT_DISABLE, AXI_BRESP_ERR, 2, 1) /* wtc, SRC: AXI_RDERR */
+ FIELD(INT_DISABLE, DONE, 1, 1) /* wtc */
+ FIELD(INT_DISABLE, MEM_DONE, 0, 1) /* wtc */
+REG32(INT_MASK, 0x20)
+ FIELD(INT_MASK, FIFO_OVERFLOW, 7, 1) /* ro, reset: 0x1 */
+ FIELD(INT_MASK, INVALID_APB, 6, 1) /* ro, reset: 0x1 */
+ FIELD(INT_MASK, THRESH_HIT, 5, 1) /* ro, reset: 0x1 */
+ FIELD(INT_MASK, TIMEOUT_MEM, 4, 1) /* ro, reset: 0x1 */
+ FIELD(INT_MASK, TIMEOUT_STRM, 3, 1) /* ro, reset: 0x1 */
+ FIELD(INT_MASK, AXI_BRESP_ERR, 2, 1) /* ro, reset: 0x1, SRC: AXI_RDERR */
+ FIELD(INT_MASK, DONE, 1, 1) /* ro, reset: 0x1 */
+ FIELD(INT_MASK, MEM_DONE, 0, 1) /* ro, reset: 0x1 */
+REG32(CTRL2, 0x24)
+ FIELD(CTRL2, ARCACHE, 24, 3) /* rw */
+ FIELD(CTRL2, ROUTE_BIT, 23, 1) /* rw */
+ FIELD(CTRL2, TIMEOUT_EN, 22, 1) /* rw */
+ FIELD(CTRL2, TIMEOUT_PRE, 4, 12) /* rw, reset: 0xFFF */
+ FIELD(CTRL2, MAX_OUTS_CMDS, 0, 4) /* rw, reset: 0x8 */
+REG32(ADDR_MSB, 0x28)
+ FIELD(ADDR_MSB, ADDR_MSB, 0, 17) /* wo */
+
+#define R_CTRL_TIMEOUT_VAL_RESET (0xFFE)
+#define R_CTRL_FIFO_THRESH_RESET (0x80)
+#define R_CTRL_FIFOTHRESH_RESET (0x40)
+
+#define R_CTRL2_TIMEOUT_PRE_RESET (0xFFF)
+#define R_CTRL2_MAX_OUTS_CMDS_RESET (0x8)
+
+#define XLNX_CSU_DMA_ERR_DEBUG (0)
+#define XLNX_CSU_DMA_INT_R_MASK (0xff)
+
+/* UG1807: Set the prescaler value for the timeout in clk (~2.5ns) cycles */
+#define XLNX_CSU_DMA_TIMER_FREQ (400 * 1000 * 1000)
+
+static bool xlnx_csu_dma_is_paused(XlnxCSUDMA *s)
+{
+ bool paused;
+
+ paused = !!(s->regs[R_CTRL] & R_CTRL_PAUSE_STRM_MASK);
+ paused |= !!(s->regs[R_CTRL] & R_CTRL_PAUSE_MEM_MASK);
+
+ return paused;
+}
+
+static bool xlnx_csu_dma_get_eop(XlnxCSUDMA *s)
+{
+ return s->r_size_last_word;
+}
+
+static bool xlnx_csu_dma_burst_is_fixed(XlnxCSUDMA *s)
+{
+ return !!(s->regs[R_CTRL] & R_CTRL_AXI_BRST_TYPE_MASK);
+}
+
+static bool xlnx_csu_dma_timeout_enabled(XlnxCSUDMA *s)
+{
+ return !!(s->regs[R_CTRL2] & R_CTRL2_TIMEOUT_EN_MASK);
+}
+
+static void xlnx_csu_dma_update_done_cnt(XlnxCSUDMA *s, int a)
+{
+ int cnt;
+
+ /* Increase DONE_CNT */
+ cnt = ARRAY_FIELD_EX32(s->regs, STATUS, DONE_CNT) + a;
+ ARRAY_FIELD_DP32(s->regs, STATUS, DONE_CNT, cnt);
+}
+
+static void xlnx_csu_dma_data_process(XlnxCSUDMA *s, uint8_t *buf, uint32_t len)
+{
+ uint32_t bswap;
+ uint32_t i;
+
+ bswap = s->regs[R_CTRL] & R_CTRL_ENDIANNESS_MASK;
+ if (s->is_dst && !bswap) {
+ /* Fast when ENDIANNESS cleared */
+ return;
+ }
+
+ for (i = 0; i < len; i += 4) {
+ uint8_t *b = &buf[i];
+ union {
+ uint8_t u8[4];
+ uint32_t u32;
+ } v = {
+ .u8 = { b[0], b[1], b[2], b[3] }
+ };
+
+ if (!s->is_dst) {
+ s->regs[R_CRC] += v.u32;
+ }
+ if (bswap) {
+ /*
+ * No point using bswap, we need to writeback
+ * into a potentially unaligned pointer.
+ */
+ b[0] = v.u8[3];
+ b[1] = v.u8[2];
+ b[2] = v.u8[1];
+ b[3] = v.u8[0];
+ }
+ }
+}
+
+static void xlnx_csu_dma_update_irq(XlnxCSUDMA *s)
+{
+ qemu_set_irq(s->irq, !!(s->regs[R_INT_STATUS] & ~s->regs[R_INT_MASK]));
+}
+
+/* len is in bytes */
+static uint32_t xlnx_csu_dma_read(XlnxCSUDMA *s, uint8_t *buf, uint32_t len)
+{
+ hwaddr addr = (hwaddr)s->regs[R_ADDR_MSB] << 32 | s->regs[R_ADDR];
+ MemTxResult result = MEMTX_OK;
+
+ if (xlnx_csu_dma_burst_is_fixed(s)) {
+ uint32_t i;
+
+ for (i = 0; i < len && (result == MEMTX_OK); i += s->width) {
+ uint32_t mlen = MIN(len - i, s->width);
+
+ result = address_space_rw(s->dma_as, addr, s->attr,
+ buf + i, mlen, false);
+ }
+ } else {
+ result = address_space_rw(s->dma_as, addr, s->attr, buf, len, false);
+ }
+
+ if (result == MEMTX_OK) {
+ xlnx_csu_dma_data_process(s, buf, len);
+ } else {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad address " TARGET_FMT_plx
+ " for mem read", __func__, addr);
+ s->regs[R_INT_STATUS] |= R_INT_STATUS_AXI_BRESP_ERR_MASK;
+ xlnx_csu_dma_update_irq(s);
+ }
+ return len;
+}
+
+/* len is in bytes */
+static uint32_t xlnx_csu_dma_write(XlnxCSUDMA *s, uint8_t *buf, uint32_t len)
+{
+ hwaddr addr = (hwaddr)s->regs[R_ADDR_MSB] << 32 | s->regs[R_ADDR];
+ MemTxResult result = MEMTX_OK;
+
+ xlnx_csu_dma_data_process(s, buf, len);
+ if (xlnx_csu_dma_burst_is_fixed(s)) {
+ uint32_t i;
+
+ for (i = 0; i < len && (result == MEMTX_OK); i += s->width) {
+ uint32_t mlen = MIN(len - i, s->width);
+
+ result = address_space_rw(s->dma_as, addr, s->attr,
+ buf, mlen, true);
+ buf += mlen;
+ }
+ } else {
+ result = address_space_rw(s->dma_as, addr, s->attr, buf, len, true);
+ }
+
+ if (result != MEMTX_OK) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad address " TARGET_FMT_plx
+ " for mem write", __func__, addr);
+ s->regs[R_INT_STATUS] |= R_INT_STATUS_AXI_BRESP_ERR_MASK;
+ xlnx_csu_dma_update_irq(s);
+ }
+ return len;
+}
+
+static void xlnx_csu_dma_done(XlnxCSUDMA *s)
+{
+ s->regs[R_STATUS] &= ~R_STATUS_BUSY_MASK;
+ s->regs[R_INT_STATUS] |= R_INT_STATUS_DONE_MASK;
+
+ if (!s->is_dst) {
+ s->regs[R_INT_STATUS] |= R_INT_STATUS_MEM_DONE_MASK;
+ }
+
+ xlnx_csu_dma_update_done_cnt(s, 1);
+}
+
+static uint32_t xlnx_csu_dma_advance(XlnxCSUDMA *s, uint32_t len)
+{
+ uint32_t size = s->regs[R_SIZE];
+ hwaddr dst = (hwaddr)s->regs[R_ADDR_MSB] << 32 | s->regs[R_ADDR];
+
+ assert(len <= size);
+
+ size -= len;
+ s->regs[R_SIZE] = size;
+
+ if (!xlnx_csu_dma_burst_is_fixed(s)) {
+ dst += len;
+ s->regs[R_ADDR] = (uint32_t) dst;
+ s->regs[R_ADDR_MSB] = dst >> 32;
+ }
+
+ if (size == 0) {
+ xlnx_csu_dma_done(s);
+ }
+
+ return size;
+}
+
+static void xlnx_csu_dma_src_notify(void *opaque)
+{
+ XlnxCSUDMA *s = XLNX_CSU_DMA(opaque);
+ unsigned char buf[4 * 1024];
+ size_t rlen = 0;
+
+ ptimer_transaction_begin(s->src_timer);
+ /* Stop the backpreassure timer */
+ ptimer_stop(s->src_timer);
+
+ while (s->regs[R_SIZE] && !xlnx_csu_dma_is_paused(s) &&
+ stream_can_push(s->tx_dev, xlnx_csu_dma_src_notify, s)) {
+ uint32_t plen = MIN(s->regs[R_SIZE], sizeof buf);
+ bool eop = false;
+
+ /* Did we fit it all? */
+ if (s->regs[R_SIZE] == plen && xlnx_csu_dma_get_eop(s)) {
+ eop = true;
+ }
+
+ /* DMA transfer */
+ xlnx_csu_dma_read(s, buf, plen);
+ rlen = stream_push(s->tx_dev, buf, plen, eop);
+ xlnx_csu_dma_advance(s, rlen);
+ }
+
+ if (xlnx_csu_dma_timeout_enabled(s) && s->regs[R_SIZE] &&
+ !stream_can_push(s->tx_dev, xlnx_csu_dma_src_notify, s)) {
+ uint32_t timeout = ARRAY_FIELD_EX32(s->regs, CTRL, TIMEOUT_VAL);
+ uint32_t div = ARRAY_FIELD_EX32(s->regs, CTRL2, TIMEOUT_PRE) + 1;
+ uint32_t freq = XLNX_CSU_DMA_TIMER_FREQ;
+
+ freq /= div;
+ ptimer_set_freq(s->src_timer, freq);
+ ptimer_set_count(s->src_timer, timeout);
+ ptimer_run(s->src_timer, 1);
+ }
+
+ ptimer_transaction_commit(s->src_timer);
+ xlnx_csu_dma_update_irq(s);
+}
+
+static uint64_t addr_pre_write(RegisterInfo *reg, uint64_t val)
+{
+ /* Address is word aligned */
+ return val & R_ADDR_ADDR_MASK;
+}
+
+static uint64_t size_pre_write(RegisterInfo *reg, uint64_t val)
+{
+ XlnxCSUDMA *s = XLNX_CSU_DMA(reg->opaque);
+
+ if (s->regs[R_SIZE] != 0) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: Starting DMA while already running.\n", __func__);
+ }
+
+ if (!s->is_dst) {
+ s->r_size_last_word = !!(val & R_SIZE_LAST_WORD_MASK);
+ }
+
+ /* Size is word aligned */
+ return val & R_SIZE_SIZE_MASK;
+}
+
+static uint64_t size_post_read(RegisterInfo *reg, uint64_t val)
+{
+ XlnxCSUDMA *s = XLNX_CSU_DMA(reg->opaque);
+
+ return val | s->r_size_last_word;
+}
+
+static void size_post_write(RegisterInfo *reg, uint64_t val)
+{
+ XlnxCSUDMA *s = XLNX_CSU_DMA(reg->opaque);
+
+ s->regs[R_STATUS] |= R_STATUS_BUSY_MASK;
+
+ /*
+ * Note that if SIZE is programmed to 0, and the DMA is started,
+ * the interrupts DONE and MEM_DONE will be asserted.
+ */
+ if (s->regs[R_SIZE] == 0) {
+ xlnx_csu_dma_done(s);
+ xlnx_csu_dma_update_irq(s);
+ return;
+ }
+
+ /* Set SIZE is considered the last step in transfer configuration */
+ if (!s->is_dst) {
+ xlnx_csu_dma_src_notify(s);
+ } else {
+ if (s->notify) {
+ s->notify(s->notify_opaque);
+ }
+ }
+}
+
+static uint64_t status_pre_write(RegisterInfo *reg, uint64_t val)
+{
+ return val & (R_STATUS_DONE_CNT_MASK | R_STATUS_BUSY_MASK);
+}
+
+static void ctrl_post_write(RegisterInfo *reg, uint64_t val)
+{
+ XlnxCSUDMA *s = XLNX_CSU_DMA(reg->opaque);
+
+ if (!s->is_dst) {
+ if (!xlnx_csu_dma_is_paused(s)) {
+ xlnx_csu_dma_src_notify(s);
+ }
+ } else {
+ if (!xlnx_csu_dma_is_paused(s) && s->notify) {
+ s->notify(s->notify_opaque);
+ }
+ }
+}
+
+static uint64_t int_status_pre_write(RegisterInfo *reg, uint64_t val)
+{
+ XlnxCSUDMA *s = XLNX_CSU_DMA(reg->opaque);
+
+ /* DMA counter decrements when flag 'DONE' is cleared */
+ if ((val & s->regs[R_INT_STATUS] & R_INT_STATUS_DONE_MASK)) {
+ xlnx_csu_dma_update_done_cnt(s, -1);
+ }
+
+ return s->regs[R_INT_STATUS] & ~val;
+}
+
+static void int_status_post_write(RegisterInfo *reg, uint64_t val)
+{
+ XlnxCSUDMA *s = XLNX_CSU_DMA(reg->opaque);
+
+ xlnx_csu_dma_update_irq(s);
+}
+
+static uint64_t int_enable_pre_write(RegisterInfo *reg, uint64_t val)
+{
+ XlnxCSUDMA *s = XLNX_CSU_DMA(reg->opaque);
+ uint32_t v32 = val;
+
+ /*
+ * R_INT_ENABLE doesn't have its own state.
+ * It is used to indirectly modify R_INT_MASK.
+ *
+ * 1: Enable this interrupt field (the mask bit will be cleared to 0)
+ * 0: No effect
+ */
+ s->regs[R_INT_MASK] &= ~v32;
+ return 0;
+}
+
+static void int_enable_post_write(RegisterInfo *reg, uint64_t val)
+{
+ XlnxCSUDMA *s = XLNX_CSU_DMA(reg->opaque);
+
+ xlnx_csu_dma_update_irq(s);
+}
+
+static uint64_t int_disable_pre_write(RegisterInfo *reg, uint64_t val)
+{
+ XlnxCSUDMA *s = XLNX_CSU_DMA(reg->opaque);
+ uint32_t v32 = val;
+
+ /*
+ * R_INT_DISABLE doesn't have its own state.
+ * It is used to indirectly modify R_INT_MASK.
+ *
+ * 1: Disable this interrupt field (the mask bit will be set to 1)
+ * 0: No effect
+ */
+ s->regs[R_INT_MASK] |= v32;
+ return 0;
+}
+
+static void int_disable_post_write(RegisterInfo *reg, uint64_t val)
+{
+ XlnxCSUDMA *s = XLNX_CSU_DMA(reg->opaque);
+
+ xlnx_csu_dma_update_irq(s);
+}
+
+static uint64_t addr_msb_pre_write(RegisterInfo *reg, uint64_t val)
+{
+ return val & R_ADDR_MSB_ADDR_MSB_MASK;
+}
+
+static const RegisterAccessInfo *xlnx_csu_dma_regs_info[] = {
+#define DMACH_REGINFO(NAME, snd) \
+ (const RegisterAccessInfo []) { \
+ { \
+ .name = #NAME "_ADDR", \
+ .addr = A_ADDR, \
+ .pre_write = addr_pre_write \
+ }, { \
+ .name = #NAME "_SIZE", \
+ .addr = A_SIZE, \
+ .pre_write = size_pre_write, \
+ .post_write = size_post_write, \
+ .post_read = size_post_read \
+ }, { \
+ .name = #NAME "_STATUS", \
+ .addr = A_STATUS, \
+ .pre_write = status_pre_write, \
+ .w1c = R_STATUS_DONE_CNT_MASK, \
+ .ro = (R_STATUS_BUSY_MASK \
+ | R_STATUS_FIFO_LEVEL_MASK \
+ | R_STATUS_OUTSTANDING_MASK) \
+ }, { \
+ .name = #NAME "_CTRL", \
+ .addr = A_CTRL, \
+ .post_write = ctrl_post_write, \
+ .reset = ((R_CTRL_TIMEOUT_VAL_RESET << R_CTRL_TIMEOUT_VAL_SHIFT) \
+ | (R_CTRL_FIFO_THRESH_RESET << R_CTRL_FIFO_THRESH_SHIFT)\
+ | (snd ? 0 : R_CTRL_FIFOTHRESH_RESET \
+ << R_CTRL_FIFOTHRESH_SHIFT)) \
+ }, { \
+ .name = #NAME "_CRC", \
+ .addr = A_CRC, \
+ }, { \
+ .name = #NAME "_INT_STATUS", \
+ .addr = A_INT_STATUS, \
+ .pre_write = int_status_pre_write, \
+ .post_write = int_status_post_write \
+ }, { \
+ .name = #NAME "_INT_ENABLE", \
+ .addr = A_INT_ENABLE, \
+ .pre_write = int_enable_pre_write, \
+ .post_write = int_enable_post_write \
+ }, { \
+ .name = #NAME "_INT_DISABLE", \
+ .addr = A_INT_DISABLE, \
+ .pre_write = int_disable_pre_write, \
+ .post_write = int_disable_post_write \
+ }, { \
+ .name = #NAME "_INT_MASK", \
+ .addr = A_INT_MASK, \
+ .ro = ~0, \
+ .reset = XLNX_CSU_DMA_INT_R_MASK \
+ }, { \
+ .name = #NAME "_CTRL2", \
+ .addr = A_CTRL2, \
+ .reset = ((R_CTRL2_TIMEOUT_PRE_RESET \
+ << R_CTRL2_TIMEOUT_PRE_SHIFT) \
+ | (R_CTRL2_MAX_OUTS_CMDS_RESET \
+ << R_CTRL2_MAX_OUTS_CMDS_SHIFT)) \
+ }, { \
+ .name = #NAME "_ADDR_MSB", \
+ .addr = A_ADDR_MSB, \
+ .pre_write = addr_msb_pre_write \
+ } \
+ }
+
+ DMACH_REGINFO(DMA_SRC, true),
+ DMACH_REGINFO(DMA_DST, false)
+};
+
+static const MemoryRegionOps xlnx_csu_dma_ops = {
+ .read = register_read_memory,
+ .write = register_write_memory,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .valid = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ }
+};
+
+static void xlnx_csu_dma_src_timeout_hit(void *opaque)
+{
+ XlnxCSUDMA *s = XLNX_CSU_DMA(opaque);
+
+ /* Ignore if the timeout is masked */
+ if (!xlnx_csu_dma_timeout_enabled(s)) {
+ return;
+ }
+
+ s->regs[R_INT_STATUS] |= R_INT_STATUS_TIMEOUT_STRM_MASK;
+ xlnx_csu_dma_update_irq(s);
+}
+
+static size_t xlnx_csu_dma_stream_push(StreamSink *obj, uint8_t *buf,
+ size_t len, bool eop)
+{
+ XlnxCSUDMA *s = XLNX_CSU_DMA(obj);
+ uint32_t size = s->regs[R_SIZE];
+ uint32_t mlen = MIN(size, len) & (~3); /* Size is word aligned */
+
+ /* Be called when it's DST */
+ assert(s->is_dst);
+
+ if (size == 0 || len <= 0) {
+ return 0;
+ }
+
+ if (len && (xlnx_csu_dma_is_paused(s) || mlen == 0)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "csu-dma: DST channel dropping %zd b of data.\n", len);
+ s->regs[R_INT_STATUS] |= R_INT_STATUS_FIFO_OVERFLOW_MASK;
+ return len;
+ }
+
+ if (xlnx_csu_dma_write(s, buf, mlen) != mlen) {
+ return 0;
+ }
+
+ xlnx_csu_dma_advance(s, mlen);
+ xlnx_csu_dma_update_irq(s);
+
+ return mlen;
+}
+
+static bool xlnx_csu_dma_stream_can_push(StreamSink *obj,
+ StreamCanPushNotifyFn notify,
+ void *notify_opaque)
+{
+ XlnxCSUDMA *s = XLNX_CSU_DMA(obj);
+
+ if (s->regs[R_SIZE] != 0) {
+ return true;
+ } else {
+ s->notify = notify;
+ s->notify_opaque = notify_opaque;
+ return false;
+ }
+}
+
+static void xlnx_csu_dma_reset(DeviceState *dev)
+{
+ XlnxCSUDMA *s = XLNX_CSU_DMA(dev);
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(s->regs_info); ++i) {
+ register_reset(&s->regs_info[i]);
+ }
+}
+
+static void xlnx_csu_dma_realize(DeviceState *dev, Error **errp)
+{
+ XlnxCSUDMA *s = XLNX_CSU_DMA(dev);
+ RegisterInfoArray *reg_array;
+
+ reg_array =
+ register_init_block32(dev, xlnx_csu_dma_regs_info[!!s->is_dst],
+ XLNX_CSU_DMA_R_MAX,
+ s->regs_info, s->regs,
+ &xlnx_csu_dma_ops,
+ XLNX_CSU_DMA_ERR_DEBUG,
+ XLNX_CSU_DMA_R_MAX * 4);
+ memory_region_add_subregion(&s->iomem,
+ 0x0,
+ &reg_array->mem);
+
+ sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->iomem);
+ sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->irq);
+
+ if (!s->is_dst && !s->tx_dev) {
+ error_setg(errp, "zynqmp.csu-dma: Stream not connected");
+ return;
+ }
+
+ s->src_timer = ptimer_init(xlnx_csu_dma_src_timeout_hit,
+ s, PTIMER_POLICY_DEFAULT);
+
+ if (s->dma_mr) {
+ s->dma_as = g_malloc0(sizeof(AddressSpace));
+ address_space_init(s->dma_as, s->dma_mr, NULL);
+ } else {
+ s->dma_as = &address_space_memory;
+ }
+
+ s->attr = MEMTXATTRS_UNSPECIFIED;
+
+ s->r_size_last_word = 0;
+}
+
+static const VMStateDescription vmstate_xlnx_csu_dma = {
+ .name = TYPE_XLNX_CSU_DMA,
+ .version_id = 0,
+ .minimum_version_id = 0,
+ .minimum_version_id_old = 0,
+ .fields = (VMStateField[]) {
+ VMSTATE_PTIMER(src_timer, XlnxCSUDMA),
+ VMSTATE_UINT16(width, XlnxCSUDMA),
+ VMSTATE_BOOL(is_dst, XlnxCSUDMA),
+ VMSTATE_BOOL(r_size_last_word, XlnxCSUDMA),
+ VMSTATE_UINT32_ARRAY(regs, XlnxCSUDMA, XLNX_CSU_DMA_R_MAX),
+ VMSTATE_END_OF_LIST(),
+ }
+};
+
+static Property xlnx_csu_dma_properties[] = {
+ /*
+ * Ref PG021, Stream Data Width:
+ * Data width in bits of the AXI S2MM AXI4-Stream Data bus.
+ * This value must be equal or less than the Memory Map Data Width.
+ * Valid values are 8, 16, 32, 64, 128, 512 and 1024.
+ * "dma-width" is the byte value of the "Stream Data Width".
+ */
+ DEFINE_PROP_UINT16("dma-width", XlnxCSUDMA, width, 4),
+ /*
+ * The CSU DMA is a two-channel, simple DMA, allowing separate control of
+ * the SRC (read) channel and DST (write) channel. "is-dst" is used to mark
+ * which channel the device is connected to.
+ */
+ DEFINE_PROP_BOOL("is-dst", XlnxCSUDMA, is_dst, true),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void xlnx_csu_dma_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ StreamSinkClass *ssc = STREAM_SINK_CLASS(klass);
+
+ dc->reset = xlnx_csu_dma_reset;
+ dc->realize = xlnx_csu_dma_realize;
+ dc->vmsd = &vmstate_xlnx_csu_dma;
+ device_class_set_props(dc, xlnx_csu_dma_properties);
+
+ ssc->push = xlnx_csu_dma_stream_push;
+ ssc->can_push = xlnx_csu_dma_stream_can_push;
+}
+
+static void xlnx_csu_dma_init(Object *obj)
+{
+ XlnxCSUDMA *s = XLNX_CSU_DMA(obj);
+
+ memory_region_init(&s->iomem, obj, TYPE_XLNX_CSU_DMA,
+ XLNX_CSU_DMA_R_MAX * 4);
+
+ object_property_add_link(obj, "stream-connected-dma", TYPE_STREAM_SINK,
+ (Object **)&s->tx_dev,
+ qdev_prop_allow_set_link_before_realize,
+ OBJ_PROP_LINK_STRONG);
+ object_property_add_link(obj, "dma", TYPE_MEMORY_REGION,
+ (Object **)&s->dma_mr,
+ qdev_prop_allow_set_link_before_realize,
+ OBJ_PROP_LINK_STRONG);
+}
+
+static const TypeInfo xlnx_csu_dma_info = {
+ .name = TYPE_XLNX_CSU_DMA,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(XlnxCSUDMA),
+ .class_init = xlnx_csu_dma_class_init,
+ .instance_init = xlnx_csu_dma_init,
+ .interfaces = (InterfaceInfo[]) {
+ { TYPE_STREAM_SINK },
+ { }
+ }
+};
+
+static void xlnx_csu_dma_register_types(void)
+{
+ type_register_static(&xlnx_csu_dma_info);
+}
+
+type_init(xlnx_csu_dma_register_types)
diff --git a/hw/i386/pc.c b/hw/i386/pc.c
index 8aa85dec54..410db9ef96 100644
--- a/hw/i386/pc.c
+++ b/hw/i386/pc.c
@@ -58,7 +58,6 @@
#include "sysemu/numa.h"
#include "sysemu/kvm.h"
#include "sysemu/xen.h"
-#include "sysemu/qtest.h"
#include "sysemu/reset.h"
#include "sysemu/runstate.h"
#include "kvm/kvm_i386.h"
diff --git a/hw/intc/Kconfig b/hw/intc/Kconfig
index c18d11142a..66bf0b90b4 100644
--- a/hw/intc/Kconfig
+++ b/hw/intc/Kconfig
@@ -53,6 +53,9 @@ config OMPIC
config PPC_UIC
bool
+config SH_INTC
+ bool
+
config RX_ICU
bool
diff --git a/hw/intc/meson.build b/hw/intc/meson.build
index 53cba11569..b3d9345a0d 100644
--- a/hw/intc/meson.build
+++ b/hw/intc/meson.build
@@ -47,7 +47,7 @@ specific_ss.add(when: 'CONFIG_RASPI', if_true: files('bcm2835_ic.c', 'bcm2836_co
specific_ss.add(when: 'CONFIG_RX_ICU', if_true: files('rx_icu.c'))
specific_ss.add(when: 'CONFIG_S390_FLIC', if_true: files('s390_flic.c'))
specific_ss.add(when: 'CONFIG_S390_FLIC_KVM', if_true: files('s390_flic_kvm.c'))
-specific_ss.add(when: 'CONFIG_SH4', if_true: files('sh_intc.c'))
+specific_ss.add(when: 'CONFIG_SH_INTC', if_true: files('sh_intc.c'))
specific_ss.add(when: 'CONFIG_SIFIVE_CLINT', if_true: files('sifive_clint.c'))
specific_ss.add(when: 'CONFIG_SIFIVE_PLIC', if_true: files('sifive_plic.c'))
specific_ss.add(when: 'CONFIG_XICS', if_true: files('xics.c'))
diff --git a/hw/m68k/q800.c b/hw/m68k/q800.c
index d4eca46767..4d2e866eec 100644
--- a/hw/m68k/q800.c
+++ b/hw/m68k/q800.c
@@ -350,8 +350,8 @@ static void q800_init(MachineState *machine)
/* SCSI */
- dev = qdev_new(TYPE_ESP);
- sysbus_esp = ESP(dev);
+ dev = qdev_new(TYPE_SYSBUS_ESP);
+ sysbus_esp = SYSBUS_ESP(dev);
esp = &sysbus_esp->esp;
esp->dma_memory_read = NULL;
esp->dma_memory_write = NULL;
diff --git a/hw/mips/cps.c b/hw/mips/cps.c
index 7a0d289efa..2b436700ce 100644
--- a/hw/mips/cps.c
+++ b/hw/mips/cps.c
@@ -39,7 +39,7 @@ static void mips_cps_init(Object *obj)
SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
MIPSCPSState *s = MIPS_CPS(obj);
- s->clock = qdev_init_clock_in(DEVICE(obj), "clk-in", NULL, NULL);
+ s->clock = qdev_init_clock_in(DEVICE(obj), "clk-in", NULL, NULL, 0);
/*
* Cover entire address space as there do not seem to be any
* constraints for the base address of CPC and GIC.
diff --git a/hw/mips/jazz.c b/hw/mips/jazz.c
index 83c8086062..1a0888a0fd 100644
--- a/hw/mips/jazz.c
+++ b/hw/mips/jazz.c
@@ -328,8 +328,8 @@ static void mips_jazz_init(MachineState *machine,
}
/* SCSI adapter */
- dev = qdev_new(TYPE_ESP);
- sysbus_esp = ESP(dev);
+ dev = qdev_new(TYPE_SYSBUS_ESP);
+ sysbus_esp = SYSBUS_ESP(dev);
esp = &sysbus_esp->esp;
esp->dma_memory_read = rc4030_dma_read;
esp->dma_memory_write = rc4030_dma_write;
diff --git a/hw/misc/Kconfig b/hw/misc/Kconfig
index 19c216f3ef..5426b9b1a1 100644
--- a/hw/misc/Kconfig
+++ b/hw/misc/Kconfig
@@ -2,6 +2,15 @@ config APPLESMC
bool
depends on ISA_BUS
+config ARMSSE_CPUID
+ bool
+
+config ARMSSE_MHU
+ bool
+
+config ARMSSE_CPU_PWRCTRL
+ bool
+
config MAX111X
bool
diff --git a/hw/misc/armsse-cpu-pwrctrl.c b/hw/misc/armsse-cpu-pwrctrl.c
new file mode 100644
index 0000000000..42fc38879f
--- /dev/null
+++ b/hw/misc/armsse-cpu-pwrctrl.c
@@ -0,0 +1,149 @@
+/*
+ * Arm SSE CPU PWRCTRL register block
+ *
+ * Copyright (c) 2021 Linaro Limited
+ * Written by Peter Maydell
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 or
+ * (at your option) any later version.
+ */
+
+/*
+ * This is a model of the "CPU<N>_PWRCTRL block" which is part of the
+ * Arm Corstone SSE-300 Example Subsystem and documented in
+ * https://developer.arm.com/documentation/101773/0000
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/log.h"
+#include "qemu/module.h"
+#include "trace.h"
+#include "qapi/error.h"
+#include "migration/vmstate.h"
+#include "hw/sysbus.h"
+#include "hw/registerfields.h"
+#include "hw/misc/armsse-cpu-pwrctrl.h"
+
+REG32(CPUPWRCFG, 0x0)
+REG32(PID4, 0xfd0)
+REG32(PID5, 0xfd4)
+REG32(PID6, 0xfd8)
+REG32(PID7, 0xfdc)
+REG32(PID0, 0xfe0)
+REG32(PID1, 0xfe4)
+REG32(PID2, 0xfe8)
+REG32(PID3, 0xfec)
+REG32(CID0, 0xff0)
+REG32(CID1, 0xff4)
+REG32(CID2, 0xff8)
+REG32(CID3, 0xffc)
+
+/* PID/CID values */
+static const int cpu_pwrctrl_id[] = {
+ 0x04, 0x00, 0x00, 0x00, /* PID4..PID7 */
+ 0x5a, 0xb8, 0x0b, 0x00, /* PID0..PID3 */
+ 0x0d, 0xf0, 0x05, 0xb1, /* CID0..CID3 */
+};
+
+static uint64_t pwrctrl_read(void *opaque, hwaddr offset, unsigned size)
+{
+ ARMSSECPUPwrCtrl *s = ARMSSE_CPU_PWRCTRL(opaque);
+ uint64_t r;
+
+ switch (offset) {
+ case A_CPUPWRCFG:
+ r = s->cpupwrcfg;
+ break;
+ case A_PID4 ... A_CID3:
+ r = cpu_pwrctrl_id[(offset - A_PID4) / 4];
+ break;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "SSE CPU_PWRCTRL read: bad offset %x\n", (int)offset);
+ r = 0;
+ break;
+ }
+ trace_armsse_cpu_pwrctrl_read(offset, r, size);
+ return r;
+}
+
+static void pwrctrl_write(void *opaque, hwaddr offset,
+ uint64_t value, unsigned size)
+{
+ ARMSSECPUPwrCtrl *s = ARMSSE_CPU_PWRCTRL(opaque);
+
+ trace_armsse_cpu_pwrctrl_write(offset, value, size);
+
+ switch (offset) {
+ case A_CPUPWRCFG:
+ qemu_log_mask(LOG_UNIMP,
+ "SSE CPU_PWRCTRL: CPUPWRCFG unimplemented\n");
+ s->cpupwrcfg = value;
+ break;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "SSE CPU_PWRCTRL write: bad offset 0x%x\n", (int)offset);
+ break;
+ }
+}
+
+static const MemoryRegionOps pwrctrl_ops = {
+ .read = pwrctrl_read,
+ .write = pwrctrl_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .impl.min_access_size = 4,
+ .impl.max_access_size = 4,
+ .valid.min_access_size = 4,
+ .valid.max_access_size = 4,
+};
+
+static void pwrctrl_reset(DeviceState *dev)
+{
+ ARMSSECPUPwrCtrl *s = ARMSSE_CPU_PWRCTRL(dev);
+
+ s->cpupwrcfg = 0;
+}
+
+static const VMStateDescription pwrctrl_vmstate = {
+ .name = "armsse-cpu-pwrctrl",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(cpupwrcfg, ARMSSECPUPwrCtrl),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
+static void pwrctrl_init(Object *obj)
+{
+ SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
+ ARMSSECPUPwrCtrl *s = ARMSSE_CPU_PWRCTRL(obj);
+
+ memory_region_init_io(&s->iomem, obj, &pwrctrl_ops,
+ s, "armsse-cpu-pwrctrl", 0x1000);
+ sysbus_init_mmio(sbd, &s->iomem);
+}
+
+static void pwrctrl_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->reset = pwrctrl_reset;
+ dc->vmsd = &pwrctrl_vmstate;
+}
+
+static const TypeInfo pwrctrl_info = {
+ .name = TYPE_ARMSSE_CPU_PWRCTRL,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(ARMSSECPUPwrCtrl),
+ .instance_init = pwrctrl_init,
+ .class_init = pwrctrl_class_init,
+};
+
+static void pwrctrl_register_types(void)
+{
+ type_register_static(&pwrctrl_info);
+}
+
+type_init(pwrctrl_register_types);
diff --git a/hw/misc/aspeed_lpc.c b/hw/misc/aspeed_lpc.c
new file mode 100644
index 0000000000..2dddb27c35
--- /dev/null
+++ b/hw/misc/aspeed_lpc.c
@@ -0,0 +1,486 @@
+/*
+ * ASPEED LPC Controller
+ *
+ * Copyright (C) 2017-2018 IBM Corp.
+ *
+ * This code is licensed under the GPL version 2 or later. See
+ * the COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/log.h"
+#include "qemu/error-report.h"
+#include "hw/misc/aspeed_lpc.h"
+#include "qapi/error.h"
+#include "qapi/visitor.h"
+#include "hw/irq.h"
+#include "hw/qdev-properties.h"
+#include "migration/vmstate.h"
+
+#define TO_REG(offset) ((offset) >> 2)
+
+#define HICR0 TO_REG(0x00)
+#define HICR0_LPC3E BIT(7)
+#define HICR0_LPC2E BIT(6)
+#define HICR0_LPC1E BIT(5)
+#define HICR1 TO_REG(0x04)
+#define HICR2 TO_REG(0x08)
+#define HICR2_IBFIE3 BIT(3)
+#define HICR2_IBFIE2 BIT(2)
+#define HICR2_IBFIE1 BIT(1)
+#define HICR3 TO_REG(0x0C)
+#define HICR4 TO_REG(0x10)
+#define HICR4_KCSENBL BIT(2)
+#define IDR1 TO_REG(0x24)
+#define IDR2 TO_REG(0x28)
+#define IDR3 TO_REG(0x2C)
+#define ODR1 TO_REG(0x30)
+#define ODR2 TO_REG(0x34)
+#define ODR3 TO_REG(0x38)
+#define STR1 TO_REG(0x3C)
+#define STR_OBF BIT(0)
+#define STR_IBF BIT(1)
+#define STR_CMD_DATA BIT(3)
+#define STR2 TO_REG(0x40)
+#define STR3 TO_REG(0x44)
+#define HICR5 TO_REG(0x80)
+#define HICR6 TO_REG(0x84)
+#define HICR7 TO_REG(0x88)
+#define HICR8 TO_REG(0x8C)
+#define HICRB TO_REG(0x100)
+#define HICRB_IBFIE4 BIT(1)
+#define HICRB_LPC4E BIT(0)
+#define IDR4 TO_REG(0x114)
+#define ODR4 TO_REG(0x118)
+#define STR4 TO_REG(0x11C)
+
+enum aspeed_kcs_channel_id {
+ kcs_channel_1 = 0,
+ kcs_channel_2,
+ kcs_channel_3,
+ kcs_channel_4,
+};
+
+static const enum aspeed_lpc_subdevice aspeed_kcs_subdevice_map[] = {
+ [kcs_channel_1] = aspeed_lpc_kcs_1,
+ [kcs_channel_2] = aspeed_lpc_kcs_2,
+ [kcs_channel_3] = aspeed_lpc_kcs_3,
+ [kcs_channel_4] = aspeed_lpc_kcs_4,
+};
+
+struct aspeed_kcs_channel {
+ enum aspeed_kcs_channel_id id;
+
+ int idr;
+ int odr;
+ int str;
+};
+
+static const struct aspeed_kcs_channel aspeed_kcs_channel_map[] = {
+ [kcs_channel_1] = {
+ .id = kcs_channel_1,
+ .idr = IDR1,
+ .odr = ODR1,
+ .str = STR1
+ },
+
+ [kcs_channel_2] = {
+ .id = kcs_channel_2,
+ .idr = IDR2,
+ .odr = ODR2,
+ .str = STR2
+ },
+
+ [kcs_channel_3] = {
+ .id = kcs_channel_3,
+ .idr = IDR3,
+ .odr = ODR3,
+ .str = STR3
+ },
+
+ [kcs_channel_4] = {
+ .id = kcs_channel_4,
+ .idr = IDR4,
+ .odr = ODR4,
+ .str = STR4
+ },
+};
+
+struct aspeed_kcs_register_data {
+ const char *name;
+ int reg;
+ const struct aspeed_kcs_channel *chan;
+};
+
+static const struct aspeed_kcs_register_data aspeed_kcs_registers[] = {
+ {
+ .name = "idr1",
+ .reg = IDR1,
+ .chan = &aspeed_kcs_channel_map[kcs_channel_1],
+ },
+ {
+ .name = "odr1",
+ .reg = ODR1,
+ .chan = &aspeed_kcs_channel_map[kcs_channel_1],
+ },
+ {
+ .name = "str1",
+ .reg = STR1,
+ .chan = &aspeed_kcs_channel_map[kcs_channel_1],
+ },
+ {
+ .name = "idr2",
+ .reg = IDR2,
+ .chan = &aspeed_kcs_channel_map[kcs_channel_2],
+ },
+ {
+ .name = "odr2",
+ .reg = ODR2,
+ .chan = &aspeed_kcs_channel_map[kcs_channel_2],
+ },
+ {
+ .name = "str2",
+ .reg = STR2,
+ .chan = &aspeed_kcs_channel_map[kcs_channel_2],
+ },
+ {
+ .name = "idr3",
+ .reg = IDR3,
+ .chan = &aspeed_kcs_channel_map[kcs_channel_3],
+ },
+ {
+ .name = "odr3",
+ .reg = ODR3,
+ .chan = &aspeed_kcs_channel_map[kcs_channel_3],
+ },
+ {
+ .name = "str3",
+ .reg = STR3,
+ .chan = &aspeed_kcs_channel_map[kcs_channel_3],
+ },
+ {
+ .name = "idr4",
+ .reg = IDR4,
+ .chan = &aspeed_kcs_channel_map[kcs_channel_4],
+ },
+ {
+ .name = "odr4",
+ .reg = ODR4,
+ .chan = &aspeed_kcs_channel_map[kcs_channel_4],
+ },
+ {
+ .name = "str4",
+ .reg = STR4,
+ .chan = &aspeed_kcs_channel_map[kcs_channel_4],
+ },
+ { },
+};
+
+static const struct aspeed_kcs_register_data *
+aspeed_kcs_get_register_data_by_name(const char *name)
+{
+ const struct aspeed_kcs_register_data *pos = aspeed_kcs_registers;
+
+ while (pos->name) {
+ if (!strcmp(pos->name, name)) {
+ return pos;
+ }
+ pos++;
+ }
+
+ return NULL;
+}
+
+static const struct aspeed_kcs_channel *
+aspeed_kcs_get_channel_by_register(int reg)
+{
+ const struct aspeed_kcs_register_data *pos = aspeed_kcs_registers;
+
+ while (pos->name) {
+ if (pos->reg == reg) {
+ return pos->chan;
+ }
+ pos++;
+ }
+
+ return NULL;
+}
+
+static void aspeed_kcs_get_register_property(Object *obj,
+ Visitor *v,
+ const char *name,
+ void *opaque,
+ Error **errp)
+{
+ const struct aspeed_kcs_register_data *data;
+ AspeedLPCState *s = ASPEED_LPC(obj);
+ uint32_t val;
+
+ data = aspeed_kcs_get_register_data_by_name(name);
+ if (!data) {
+ return;
+ }
+
+ if (!strncmp("odr", name, 3)) {
+ s->regs[data->chan->str] &= ~STR_OBF;
+ }
+
+ val = s->regs[data->reg];
+
+ visit_type_uint32(v, name, &val, errp);
+}
+
+static bool aspeed_kcs_channel_enabled(AspeedLPCState *s,
+ const struct aspeed_kcs_channel *channel)
+{
+ switch (channel->id) {
+ case kcs_channel_1: return s->regs[HICR0] & HICR0_LPC1E;
+ case kcs_channel_2: return s->regs[HICR0] & HICR0_LPC2E;
+ case kcs_channel_3:
+ return (s->regs[HICR0] & HICR0_LPC3E) &&
+ (s->regs[HICR4] & HICR4_KCSENBL);
+ case kcs_channel_4: return s->regs[HICRB] & HICRB_LPC4E;
+ default: return false;
+ }
+}
+
+static bool
+aspeed_kcs_channel_ibf_irq_enabled(AspeedLPCState *s,
+ const struct aspeed_kcs_channel *channel)
+{
+ if (!aspeed_kcs_channel_enabled(s, channel)) {
+ return false;
+ }
+
+ switch (channel->id) {
+ case kcs_channel_1: return s->regs[HICR2] & HICR2_IBFIE1;
+ case kcs_channel_2: return s->regs[HICR2] & HICR2_IBFIE2;
+ case kcs_channel_3: return s->regs[HICR2] & HICR2_IBFIE3;
+ case kcs_channel_4: return s->regs[HICRB] & HICRB_IBFIE4;
+ default: return false;
+ }
+}
+
+static void aspeed_kcs_set_register_property(Object *obj,
+ Visitor *v,
+ const char *name,
+ void *opaque,
+ Error **errp)
+{
+ const struct aspeed_kcs_register_data *data;
+ AspeedLPCState *s = ASPEED_LPC(obj);
+ uint32_t val;
+
+ data = aspeed_kcs_get_register_data_by_name(name);
+ if (!data) {
+ return;
+ }
+
+ if (!visit_type_uint32(v, name, &val, errp)) {
+ return;
+ }
+
+ if (strncmp("str", name, 3)) {
+ s->regs[data->reg] = val;
+ }
+
+ if (!strncmp("idr", name, 3)) {
+ s->regs[data->chan->str] |= STR_IBF;
+ if (aspeed_kcs_channel_ibf_irq_enabled(s, data->chan)) {
+ enum aspeed_lpc_subdevice subdev;
+
+ subdev = aspeed_kcs_subdevice_map[data->chan->id];
+ qemu_irq_raise(s->subdevice_irqs[subdev]);
+ }
+ }
+}
+
+static void aspeed_lpc_set_irq(void *opaque, int irq, int level)
+{
+ AspeedLPCState *s = (AspeedLPCState *)opaque;
+
+ if (level) {
+ s->subdevice_irqs_pending |= BIT(irq);
+ } else {
+ s->subdevice_irqs_pending &= ~BIT(irq);
+ }
+
+ qemu_set_irq(s->irq, !!s->subdevice_irqs_pending);
+}
+
+static uint64_t aspeed_lpc_read(void *opaque, hwaddr offset, unsigned size)
+{
+ AspeedLPCState *s = ASPEED_LPC(opaque);
+ int reg = TO_REG(offset);
+
+ if (reg >= ARRAY_SIZE(s->regs)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: Out-of-bounds read at offset 0x%" HWADDR_PRIx "\n",
+ __func__, offset);
+ return 0;
+ }
+
+ switch (reg) {
+ case IDR1:
+ case IDR2:
+ case IDR3:
+ case IDR4:
+ {
+ const struct aspeed_kcs_channel *channel;
+
+ channel = aspeed_kcs_get_channel_by_register(reg);
+ if (s->regs[channel->str] & STR_IBF) {
+ enum aspeed_lpc_subdevice subdev;
+
+ subdev = aspeed_kcs_subdevice_map[channel->id];
+ qemu_irq_lower(s->subdevice_irqs[subdev]);
+ }
+
+ s->regs[channel->str] &= ~STR_IBF;
+ break;
+ }
+ default:
+ break;
+ }
+
+ return s->regs[reg];
+}
+
+static void aspeed_lpc_write(void *opaque, hwaddr offset, uint64_t data,
+ unsigned int size)
+{
+ AspeedLPCState *s = ASPEED_LPC(opaque);
+ int reg = TO_REG(offset);
+
+ if (reg >= ARRAY_SIZE(s->regs)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: Out-of-bounds write at offset 0x%" HWADDR_PRIx "\n",
+ __func__, offset);
+ return;
+ }
+
+
+ switch (reg) {
+ case ODR1:
+ case ODR2:
+ case ODR3:
+ case ODR4:
+ s->regs[aspeed_kcs_get_channel_by_register(reg)->str] |= STR_OBF;
+ break;
+ default:
+ break;
+ }
+
+ s->regs[reg] = data;
+}
+
+static const MemoryRegionOps aspeed_lpc_ops = {
+ .read = aspeed_lpc_read,
+ .write = aspeed_lpc_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .valid = {
+ .min_access_size = 1,
+ .max_access_size = 4,
+ },
+};
+
+static void aspeed_lpc_reset(DeviceState *dev)
+{
+ struct AspeedLPCState *s = ASPEED_LPC(dev);
+
+ s->subdevice_irqs_pending = 0;
+
+ memset(s->regs, 0, sizeof(s->regs));
+
+ s->regs[HICR7] = s->hicr7;
+}
+
+static void aspeed_lpc_realize(DeviceState *dev, Error **errp)
+{
+ AspeedLPCState *s = ASPEED_LPC(dev);
+ SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
+
+ sysbus_init_irq(sbd, &s->irq);
+ sysbus_init_irq(sbd, &s->subdevice_irqs[aspeed_lpc_kcs_1]);
+ sysbus_init_irq(sbd, &s->subdevice_irqs[aspeed_lpc_kcs_2]);
+ sysbus_init_irq(sbd, &s->subdevice_irqs[aspeed_lpc_kcs_3]);
+ sysbus_init_irq(sbd, &s->subdevice_irqs[aspeed_lpc_kcs_4]);
+ sysbus_init_irq(sbd, &s->subdevice_irqs[aspeed_lpc_ibt]);
+
+ memory_region_init_io(&s->iomem, OBJECT(s), &aspeed_lpc_ops, s,
+ TYPE_ASPEED_LPC, 0x1000);
+
+ sysbus_init_mmio(sbd, &s->iomem);
+
+ qdev_init_gpio_in(dev, aspeed_lpc_set_irq, ASPEED_LPC_NR_SUBDEVS);
+}
+
+static void aspeed_lpc_init(Object *obj)
+{
+ object_property_add(obj, "idr1", "uint32", aspeed_kcs_get_register_property,
+ aspeed_kcs_set_register_property, NULL, NULL);
+ object_property_add(obj, "odr1", "uint32", aspeed_kcs_get_register_property,
+ aspeed_kcs_set_register_property, NULL, NULL);
+ object_property_add(obj, "str1", "uint32", aspeed_kcs_get_register_property,
+ aspeed_kcs_set_register_property, NULL, NULL);
+ object_property_add(obj, "idr2", "uint32", aspeed_kcs_get_register_property,
+ aspeed_kcs_set_register_property, NULL, NULL);
+ object_property_add(obj, "odr2", "uint32", aspeed_kcs_get_register_property,
+ aspeed_kcs_set_register_property, NULL, NULL);
+ object_property_add(obj, "str2", "uint32", aspeed_kcs_get_register_property,
+ aspeed_kcs_set_register_property, NULL, NULL);
+ object_property_add(obj, "idr3", "uint32", aspeed_kcs_get_register_property,
+ aspeed_kcs_set_register_property, NULL, NULL);
+ object_property_add(obj, "odr3", "uint32", aspeed_kcs_get_register_property,
+ aspeed_kcs_set_register_property, NULL, NULL);
+ object_property_add(obj, "str3", "uint32", aspeed_kcs_get_register_property,
+ aspeed_kcs_set_register_property, NULL, NULL);
+ object_property_add(obj, "idr4", "uint32", aspeed_kcs_get_register_property,
+ aspeed_kcs_set_register_property, NULL, NULL);
+ object_property_add(obj, "odr4", "uint32", aspeed_kcs_get_register_property,
+ aspeed_kcs_set_register_property, NULL, NULL);
+ object_property_add(obj, "str4", "uint32", aspeed_kcs_get_register_property,
+ aspeed_kcs_set_register_property, NULL, NULL);
+}
+
+static const VMStateDescription vmstate_aspeed_lpc = {
+ .name = TYPE_ASPEED_LPC,
+ .version_id = 2,
+ .minimum_version_id = 2,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32_ARRAY(regs, AspeedLPCState, ASPEED_LPC_NR_REGS),
+ VMSTATE_UINT32(subdevice_irqs_pending, AspeedLPCState),
+ VMSTATE_END_OF_LIST(),
+ }
+};
+
+static Property aspeed_lpc_properties[] = {
+ DEFINE_PROP_UINT32("hicr7", AspeedLPCState, hicr7, 0),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void aspeed_lpc_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->realize = aspeed_lpc_realize;
+ dc->reset = aspeed_lpc_reset;
+ dc->desc = "Aspeed LPC Controller",
+ dc->vmsd = &vmstate_aspeed_lpc;
+ device_class_set_props(dc, aspeed_lpc_properties);
+}
+
+static const TypeInfo aspeed_lpc_info = {
+ .name = TYPE_ASPEED_LPC,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(AspeedLPCState),
+ .class_init = aspeed_lpc_class_init,
+ .instance_init = aspeed_lpc_init,
+};
+
+static void aspeed_lpc_register_types(void)
+{
+ type_register_static(&aspeed_lpc_info);
+}
+
+type_init(aspeed_lpc_register_types);
diff --git a/hw/misc/bcm2835_cprman.c b/hw/misc/bcm2835_cprman.c
index 7e415a017c..75e6c574d4 100644
--- a/hw/misc/bcm2835_cprman.c
+++ b/hw/misc/bcm2835_cprman.c
@@ -107,7 +107,7 @@ static void pll_update(CprmanPllState *pll)
clock_update_hz(pll->out, freq);
}
-static void pll_xosc_update(void *opaque)
+static void pll_xosc_update(void *opaque, ClockEvent event)
{
pll_update(CPRMAN_PLL(opaque));
}
@@ -116,7 +116,8 @@ static void pll_init(Object *obj)
{
CprmanPllState *s = CPRMAN_PLL(obj);
- s->xosc_in = qdev_init_clock_in(DEVICE(s), "xosc-in", pll_xosc_update, s);
+ s->xosc_in = qdev_init_clock_in(DEVICE(s), "xosc-in", pll_xosc_update,
+ s, ClockUpdate);
s->out = qdev_init_clock_out(DEVICE(s), "out");
}
@@ -209,7 +210,7 @@ static void pll_update_all_channels(BCM2835CprmanState *s,
}
}
-static void pll_channel_pll_in_update(void *opaque)
+static void pll_channel_pll_in_update(void *opaque, ClockEvent event)
{
pll_channel_update(CPRMAN_PLL_CHANNEL(opaque));
}
@@ -219,7 +220,8 @@ static void pll_channel_init(Object *obj)
CprmanPllChannelState *s = CPRMAN_PLL_CHANNEL(obj);
s->pll_in = qdev_init_clock_in(DEVICE(s), "pll-in",
- pll_channel_pll_in_update, s);
+ pll_channel_pll_in_update, s,
+ ClockUpdate);
s->out = qdev_init_clock_out(DEVICE(s), "out");
}
@@ -303,7 +305,7 @@ static void clock_mux_update(CprmanClockMuxState *mux)
clock_update_hz(mux->out, freq);
}
-static void clock_mux_src_update(void *opaque)
+static void clock_mux_src_update(void *opaque, ClockEvent event)
{
CprmanClockMuxState **backref = opaque;
CprmanClockMuxState *s = *backref;
@@ -335,7 +337,8 @@ static void clock_mux_init(Object *obj)
s->backref[i] = s;
s->srcs[i] = qdev_init_clock_in(DEVICE(s), name,
clock_mux_src_update,
- &s->backref[i]);
+ &s->backref[i],
+ ClockUpdate);
g_free(name);
}
@@ -380,7 +383,7 @@ static void dsi0hsck_mux_update(CprmanDsi0HsckMuxState *s)
clock_update(s->out, clock_get(src));
}
-static void dsi0hsck_mux_in_update(void *opaque)
+static void dsi0hsck_mux_in_update(void *opaque, ClockEvent event)
{
dsi0hsck_mux_update(CPRMAN_DSI0HSCK_MUX(opaque));
}
@@ -390,8 +393,10 @@ static void dsi0hsck_mux_init(Object *obj)
CprmanDsi0HsckMuxState *s = CPRMAN_DSI0HSCK_MUX(obj);
DeviceState *dev = DEVICE(obj);
- s->plla_in = qdev_init_clock_in(dev, "plla-in", dsi0hsck_mux_in_update, s);
- s->plld_in = qdev_init_clock_in(dev, "plld-in", dsi0hsck_mux_in_update, s);
+ s->plla_in = qdev_init_clock_in(dev, "plla-in", dsi0hsck_mux_in_update,
+ s, ClockUpdate);
+ s->plld_in = qdev_init_clock_in(dev, "plld-in", dsi0hsck_mux_in_update,
+ s, ClockUpdate);
s->out = qdev_init_clock_out(DEVICE(s), "out");
}
diff --git a/hw/misc/iotkit-secctl.c b/hw/misc/iotkit-secctl.c
index 9fdb82056a..7b41cfa8fc 100644
--- a/hw/misc/iotkit-secctl.c
+++ b/hw/misc/iotkit-secctl.c
@@ -19,6 +19,8 @@
#include "hw/registerfields.h"
#include "hw/irq.h"
#include "hw/misc/iotkit-secctl.h"
+#include "hw/arm/armsse-version.h"
+#include "hw/qdev-properties.h"
/* Registers in the secure privilege control block */
REG32(SECRESPCFG, 0x10)
@@ -95,6 +97,19 @@ static const uint8_t iotkit_secctl_ns_idregs[] = {
0x0d, 0xf0, 0x05, 0xb1,
};
+static const uint8_t iotkit_secctl_s_sse300_idregs[] = {
+ 0x04, 0x00, 0x00, 0x00,
+ 0x52, 0xb8, 0x2b, 0x00,
+ 0x0d, 0xf0, 0x05, 0xb1,
+};
+
+static const uint8_t iotkit_secctl_ns_sse300_idregs[] = {
+ 0x04, 0x00, 0x00, 0x00,
+ 0x53, 0xb8, 0x2b, 0x00,
+ 0x0d, 0xf0, 0x05, 0xb1,
+};
+
+
/* The register sets for the various PPCs (AHB internal, APB internal,
* AHB expansion, APB expansion) are all set up so that they are
* in 16-aligned blocks so offsets 0xN0, 0xN4, 0xN8, 0xNC are PPCs
@@ -213,7 +228,14 @@ static MemTxResult iotkit_secctl_s_read(void *opaque, hwaddr addr,
case A_CID1:
case A_CID2:
case A_CID3:
- r = iotkit_secctl_s_idregs[(offset - A_PID4) / 4];
+ switch (s->sse_version) {
+ case ARMSSE_SSE300:
+ r = iotkit_secctl_s_sse300_idregs[(offset - A_PID4) / 4];
+ break;
+ default:
+ r = iotkit_secctl_s_idregs[(offset - A_PID4) / 4];
+ break;
+ }
break;
case A_SECPPCINTCLR:
case A_SECMSCINTCLR:
@@ -473,7 +495,14 @@ static MemTxResult iotkit_secctl_ns_read(void *opaque, hwaddr addr,
case A_CID1:
case A_CID2:
case A_CID3:
- r = iotkit_secctl_ns_idregs[(offset - A_PID4) / 4];
+ switch (s->sse_version) {
+ case ARMSSE_SSE300:
+ r = iotkit_secctl_ns_sse300_idregs[(offset - A_PID4) / 4];
+ break;
+ default:
+ r = iotkit_secctl_ns_idregs[(offset - A_PID4) / 4];
+ break;
+ }
break;
default:
qemu_log_mask(LOG_GUEST_ERROR,
@@ -710,6 +739,16 @@ static void iotkit_secctl_init(Object *obj)
sysbus_init_mmio(sbd, &s->ns_regs);
}
+static void iotkit_secctl_realize(DeviceState *dev, Error **errp)
+{
+ IoTKitSecCtl *s = IOTKIT_SECCTL(dev);
+
+ if (!armsse_version_valid(s->sse_version)) {
+ error_setg(errp, "invalid sse-version value %d", s->sse_version);
+ return;
+ }
+}
+
static const VMStateDescription iotkit_secctl_ppc_vmstate = {
.name = "iotkit-secctl-ppc",
.version_id = 1,
@@ -775,12 +814,19 @@ static const VMStateDescription iotkit_secctl_vmstate = {
},
};
+static Property iotkit_secctl_props[] = {
+ DEFINE_PROP_UINT32("sse-version", IoTKitSecCtl, sse_version, 0),
+ DEFINE_PROP_END_OF_LIST()
+};
+
static void iotkit_secctl_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->vmsd = &iotkit_secctl_vmstate;
dc->reset = iotkit_secctl_reset;
+ dc->realize = iotkit_secctl_realize;
+ device_class_set_props(dc, iotkit_secctl_props);
}
static const TypeInfo iotkit_secctl_info = {
diff --git a/hw/misc/iotkit-sysctl.c b/hw/misc/iotkit-sysctl.c
index 222511c4b0..9ee8fe8495 100644
--- a/hw/misc/iotkit-sysctl.c
+++ b/hw/misc/iotkit-sysctl.c
@@ -28,6 +28,7 @@
#include "hw/registerfields.h"
#include "hw/misc/iotkit-sysctl.h"
#include "hw/qdev-properties.h"
+#include "hw/arm/armsse-version.h"
#include "target/arm/arm-powerctl.h"
#include "target/arm/cpu.h"
@@ -44,16 +45,22 @@ REG32(SWRESET, 0x108)
FIELD(SWRESET, SWRESETREQ, 9, 1)
REG32(GRETREG, 0x10c)
REG32(INITSVTOR0, 0x110)
+ FIELD(INITSVTOR0, LOCK, 0, 1)
+ FIELD(INITSVTOR0, VTOR, 7, 25)
REG32(INITSVTOR1, 0x114)
REG32(CPUWAIT, 0x118)
REG32(NMI_ENABLE, 0x11c) /* BUSWAIT in IoTKit */
REG32(WICCTRL, 0x120)
REG32(EWCTRL, 0x124)
+REG32(PWRCTRL, 0x1fc)
+ FIELD(PWRCTRL, PPU_ACCESS_UNLOCK, 0, 1)
+ FIELD(PWRCTRL, PPU_ACCESS_FILTER, 1, 1)
REG32(PDCM_PD_SYS_SENSE, 0x200)
+REG32(PDCM_PD_CPU0_SENSE, 0x204)
REG32(PDCM_PD_SRAM0_SENSE, 0x20c)
REG32(PDCM_PD_SRAM1_SENSE, 0x210)
-REG32(PDCM_PD_SRAM2_SENSE, 0x214)
-REG32(PDCM_PD_SRAM3_SENSE, 0x218)
+REG32(PDCM_PD_SRAM2_SENSE, 0x214) /* PDCM_PD_VMR0_SENSE on SSE300 */
+REG32(PDCM_PD_SRAM3_SENSE, 0x218) /* PDCM_PD_VMR1_SENSE on SSE300 */
REG32(PID4, 0xfd0)
REG32(PID5, 0xfd4)
REG32(PID6, 0xfd8)
@@ -68,12 +75,19 @@ REG32(CID2, 0xff8)
REG32(CID3, 0xffc)
/* PID/CID values */
-static const int sysctl_id[] = {
+static const int iotkit_sysctl_id[] = {
0x04, 0x00, 0x00, 0x00, /* PID4..PID7 */
0x54, 0xb8, 0x0b, 0x00, /* PID0..PID3 */
0x0d, 0xf0, 0x05, 0xb1, /* CID0..CID3 */
};
+/* Also used by the SSE300 */
+static const int sse200_sysctl_id[] = {
+ 0x04, 0x00, 0x00, 0x00, /* PID4..PID7 */
+ 0x54, 0xb8, 0x1b, 0x00, /* PID0..PID3 */
+ 0x0d, 0xf0, 0x05, 0xb1, /* CID0..CID3 */
+};
+
/*
* Set the initial secure vector table offset address for the core.
* This will take effect when the CPU next resets.
@@ -100,28 +114,52 @@ static uint64_t iotkit_sysctl_read(void *opaque, hwaddr offset,
r = s->secure_debug;
break;
case A_SCSECCTRL:
- if (!s->is_sse200) {
+ switch (s->sse_version) {
+ case ARMSSE_IOTKIT:
goto bad_offset;
+ case ARMSSE_SSE200:
+ case ARMSSE_SSE300:
+ r = s->scsecctrl;
+ break;
+ default:
+ g_assert_not_reached();
}
- r = s->scsecctrl;
break;
case A_FCLK_DIV:
- if (!s->is_sse200) {
+ switch (s->sse_version) {
+ case ARMSSE_IOTKIT:
goto bad_offset;
+ case ARMSSE_SSE200:
+ case ARMSSE_SSE300:
+ r = s->fclk_div;
+ break;
+ default:
+ g_assert_not_reached();
}
- r = s->fclk_div;
break;
case A_SYSCLK_DIV:
- if (!s->is_sse200) {
+ switch (s->sse_version) {
+ case ARMSSE_IOTKIT:
goto bad_offset;
+ case ARMSSE_SSE200:
+ case ARMSSE_SSE300:
+ r = s->sysclk_div;
+ break;
+ default:
+ g_assert_not_reached();
}
- r = s->sysclk_div;
break;
case A_CLOCK_FORCE:
- if (!s->is_sse200) {
+ switch (s->sse_version) {
+ case ARMSSE_IOTKIT:
goto bad_offset;
+ case ARMSSE_SSE200:
+ case ARMSSE_SSE300:
+ r = s->clock_force;
+ break;
+ default:
+ g_assert_not_reached();
}
- r = s->clock_force;
break;
case A_RESET_SYNDROME:
r = s->reset_syndrome;
@@ -136,63 +174,178 @@ static uint64_t iotkit_sysctl_read(void *opaque, hwaddr offset,
r = s->initsvtor0;
break;
case A_INITSVTOR1:
- if (!s->is_sse200) {
+ switch (s->sse_version) {
+ case ARMSSE_IOTKIT:
+ goto bad_offset;
+ case ARMSSE_SSE200:
+ r = s->initsvtor1;
+ break;
+ case ARMSSE_SSE300:
goto bad_offset;
+ default:
+ g_assert_not_reached();
}
- r = s->initsvtor1;
break;
case A_CPUWAIT:
- r = s->cpuwait;
+ switch (s->sse_version) {
+ case ARMSSE_IOTKIT:
+ case ARMSSE_SSE200:
+ r = s->cpuwait;
+ break;
+ case ARMSSE_SSE300:
+ /* In SSE300 this is reserved (for INITSVTOR2) */
+ goto bad_offset;
+ default:
+ g_assert_not_reached();
+ }
break;
case A_NMI_ENABLE:
- /* In IoTKit this is named BUSWAIT but is marked reserved, R/O, zero */
- if (!s->is_sse200) {
+ switch (s->sse_version) {
+ case ARMSSE_IOTKIT:
+ /* In IoTKit this is named BUSWAIT but marked reserved, R/O, zero */
r = 0;
break;
+ case ARMSSE_SSE200:
+ r = s->nmi_enable;
+ break;
+ case ARMSSE_SSE300:
+ /* In SSE300 this is reserved (for INITSVTOR3) */
+ goto bad_offset;
+ default:
+ g_assert_not_reached();
}
- r = s->nmi_enable;
break;
case A_WICCTRL:
- r = s->wicctrl;
+ switch (s->sse_version) {
+ case ARMSSE_IOTKIT:
+ case ARMSSE_SSE200:
+ r = s->wicctrl;
+ break;
+ case ARMSSE_SSE300:
+ /* In SSE300 this offset is CPUWAIT */
+ r = s->cpuwait;
+ break;
+ default:
+ g_assert_not_reached();
+ }
break;
case A_EWCTRL:
- if (!s->is_sse200) {
+ switch (s->sse_version) {
+ case ARMSSE_IOTKIT:
goto bad_offset;
+ case ARMSSE_SSE200:
+ r = s->ewctrl;
+ break;
+ case ARMSSE_SSE300:
+ /* In SSE300 this offset is is NMI_ENABLE */
+ r = s->nmi_enable;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ break;
+ case A_PWRCTRL:
+ switch (s->sse_version) {
+ case ARMSSE_IOTKIT:
+ case ARMSSE_SSE200:
+ goto bad_offset;
+ case ARMSSE_SSE300:
+ r = s->pwrctrl;
+ break;
+ default:
+ g_assert_not_reached();
}
- r = s->ewctrl;
break;
case A_PDCM_PD_SYS_SENSE:
- if (!s->is_sse200) {
+ switch (s->sse_version) {
+ case ARMSSE_IOTKIT:
goto bad_offset;
+ case ARMSSE_SSE200:
+ case ARMSSE_SSE300:
+ r = s->pdcm_pd_sys_sense;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ break;
+ case A_PDCM_PD_CPU0_SENSE:
+ switch (s->sse_version) {
+ case ARMSSE_IOTKIT:
+ case ARMSSE_SSE200:
+ goto bad_offset;
+ case ARMSSE_SSE300:
+ r = s->pdcm_pd_cpu0_sense;
+ break;
+ default:
+ g_assert_not_reached();
}
- r = s->pdcm_pd_sys_sense;
break;
case A_PDCM_PD_SRAM0_SENSE:
- if (!s->is_sse200) {
+ switch (s->sse_version) {
+ case ARMSSE_IOTKIT:
goto bad_offset;
+ case ARMSSE_SSE200:
+ r = s->pdcm_pd_sram0_sense;
+ break;
+ case ARMSSE_SSE300:
+ goto bad_offset;
+ default:
+ g_assert_not_reached();
}
- r = s->pdcm_pd_sram0_sense;
break;
case A_PDCM_PD_SRAM1_SENSE:
- if (!s->is_sse200) {
+ switch (s->sse_version) {
+ case ARMSSE_IOTKIT:
+ goto bad_offset;
+ case ARMSSE_SSE200:
+ r = s->pdcm_pd_sram1_sense;
+ break;
+ case ARMSSE_SSE300:
goto bad_offset;
+ default:
+ g_assert_not_reached();
}
- r = s->pdcm_pd_sram1_sense;
break;
case A_PDCM_PD_SRAM2_SENSE:
- if (!s->is_sse200) {
+ switch (s->sse_version) {
+ case ARMSSE_IOTKIT:
goto bad_offset;
+ case ARMSSE_SSE200:
+ r = s->pdcm_pd_sram2_sense;
+ break;
+ case ARMSSE_SSE300:
+ r = s->pdcm_pd_vmr0_sense;
+ break;
+ default:
+ g_assert_not_reached();
}
- r = s->pdcm_pd_sram2_sense;
break;
case A_PDCM_PD_SRAM3_SENSE:
- if (!s->is_sse200) {
+ switch (s->sse_version) {
+ case ARMSSE_IOTKIT:
goto bad_offset;
+ case ARMSSE_SSE200:
+ r = s->pdcm_pd_sram3_sense;
+ break;
+ case ARMSSE_SSE300:
+ r = s->pdcm_pd_vmr1_sense;
+ break;
+ default:
+ g_assert_not_reached();
}
- r = s->pdcm_pd_sram3_sense;
break;
case A_PID4 ... A_CID3:
- r = sysctl_id[(offset - A_PID4) / 4];
+ switch (s->sse_version) {
+ case ARMSSE_IOTKIT:
+ r = iotkit_sysctl_id[(offset - A_PID4) / 4];
+ break;
+ case ARMSSE_SSE200:
+ case ARMSSE_SSE300:
+ r = sse200_sysctl_id[(offset - A_PID4) / 4];
+ break;
+ default:
+ g_assert_not_reached();
+ }
break;
case A_SECDBGSET:
case A_SECDBGCLR:
@@ -213,6 +366,21 @@ static uint64_t iotkit_sysctl_read(void *opaque, hwaddr offset,
return r;
}
+static void cpuwait_write(IoTKitSysCtl *s, uint32_t value)
+{
+ int num_cpus = (s->sse_version == ARMSSE_SSE300) ? 1 : 2;
+ int i;
+
+ for (i = 0; i < num_cpus; i++) {
+ uint32_t mask = 1 << i;
+ if ((s->cpuwait & mask) && !(value & mask)) {
+ /* Powering up CPU 0 */
+ arm_set_cpu_on_and_reset(i);
+ }
+ }
+ s->cpuwait = value;
+}
+
static void iotkit_sysctl_write(void *opaque, hwaddr offset,
uint64_t value, unsigned size)
{
@@ -249,23 +417,53 @@ static void iotkit_sysctl_write(void *opaque, hwaddr offset,
s->gretreg = value;
break;
case A_INITSVTOR0:
- s->initsvtor0 = value;
- set_init_vtor(0, s->initsvtor0);
+ switch (s->sse_version) {
+ case ARMSSE_SSE300:
+ /* SSE300 has a LOCK bit which prevents further writes when set */
+ if (s->initsvtor0 & R_INITSVTOR0_LOCK_MASK) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "IoTKit INITSVTOR0 write when register locked\n");
+ break;
+ }
+ s->initsvtor0 = value;
+ set_init_vtor(0, s->initsvtor0 & R_INITSVTOR0_VTOR_MASK);
+ break;
+ case ARMSSE_IOTKIT:
+ case ARMSSE_SSE200:
+ s->initsvtor0 = value;
+ set_init_vtor(0, s->initsvtor0);
+ break;
+ default:
+ g_assert_not_reached();
+ }
break;
case A_CPUWAIT:
- if ((s->cpuwait & 1) && !(value & 1)) {
- /* Powering up CPU 0 */
- arm_set_cpu_on_and_reset(0);
- }
- if ((s->cpuwait & 2) && !(value & 2)) {
- /* Powering up CPU 1 */
- arm_set_cpu_on_and_reset(1);
+ switch (s->sse_version) {
+ case ARMSSE_IOTKIT:
+ case ARMSSE_SSE200:
+ cpuwait_write(s, value);
+ break;
+ case ARMSSE_SSE300:
+ /* In SSE300 this is reserved (for INITSVTOR2) */
+ goto bad_offset;
+ default:
+ g_assert_not_reached();
}
- s->cpuwait = value;
break;
case A_WICCTRL:
- qemu_log_mask(LOG_UNIMP, "IoTKit SysCtl WICCTRL unimplemented\n");
- s->wicctrl = value;
+ switch (s->sse_version) {
+ case ARMSSE_IOTKIT:
+ case ARMSSE_SSE200:
+ qemu_log_mask(LOG_UNIMP, "IoTKit SysCtl WICCTRL unimplemented\n");
+ s->wicctrl = value;
+ break;
+ case ARMSSE_SSE300:
+ /* In SSE300 this offset is CPUWAIT */
+ cpuwait_write(s, value);
+ break;
+ default:
+ g_assert_not_reached();
+ }
break;
case A_SECDBGSET:
/* write-1-to-set */
@@ -283,94 +481,214 @@ static void iotkit_sysctl_write(void *opaque, hwaddr offset,
}
break;
case A_SCSECCTRL:
- if (!s->is_sse200) {
+ switch (s->sse_version) {
+ case ARMSSE_IOTKIT:
goto bad_offset;
+ case ARMSSE_SSE200:
+ case ARMSSE_SSE300:
+ qemu_log_mask(LOG_UNIMP, "IoTKit SysCtl SCSECCTRL unimplemented\n");
+ s->scsecctrl = value;
+ break;
+ default:
+ g_assert_not_reached();
}
- qemu_log_mask(LOG_UNIMP, "IoTKit SysCtl SCSECCTRL unimplemented\n");
- s->scsecctrl = value;
break;
case A_FCLK_DIV:
- if (!s->is_sse200) {
+ switch (s->sse_version) {
+ case ARMSSE_IOTKIT:
goto bad_offset;
+ case ARMSSE_SSE200:
+ case ARMSSE_SSE300:
+ qemu_log_mask(LOG_UNIMP, "IoTKit SysCtl FCLK_DIV unimplemented\n");
+ s->fclk_div = value;
+ break;
+ default:
+ g_assert_not_reached();
}
- qemu_log_mask(LOG_UNIMP, "IoTKit SysCtl FCLK_DIV unimplemented\n");
- s->fclk_div = value;
break;
case A_SYSCLK_DIV:
- if (!s->is_sse200) {
+ switch (s->sse_version) {
+ case ARMSSE_IOTKIT:
goto bad_offset;
+ case ARMSSE_SSE200:
+ case ARMSSE_SSE300:
+ qemu_log_mask(LOG_UNIMP, "IoTKit SysCtl SYSCLK_DIV unimplemented\n");
+ s->sysclk_div = value;
+ break;
+ default:
+ g_assert_not_reached();
}
- qemu_log_mask(LOG_UNIMP, "IoTKit SysCtl SYSCLK_DIV unimplemented\n");
- s->sysclk_div = value;
break;
case A_CLOCK_FORCE:
- if (!s->is_sse200) {
+ switch (s->sse_version) {
+ case ARMSSE_IOTKIT:
goto bad_offset;
+ case ARMSSE_SSE200:
+ case ARMSSE_SSE300:
+ qemu_log_mask(LOG_UNIMP, "IoTKit SysCtl CLOCK_FORCE unimplemented\n");
+ s->clock_force = value;
+ break;
+ default:
+ g_assert_not_reached();
}
- qemu_log_mask(LOG_UNIMP, "IoTKit SysCtl CLOCK_FORCE unimplemented\n");
- s->clock_force = value;
break;
case A_INITSVTOR1:
- if (!s->is_sse200) {
+ switch (s->sse_version) {
+ case ARMSSE_IOTKIT:
goto bad_offset;
+ case ARMSSE_SSE200:
+ s->initsvtor1 = value;
+ set_init_vtor(1, s->initsvtor1);
+ break;
+ case ARMSSE_SSE300:
+ goto bad_offset;
+ default:
+ g_assert_not_reached();
}
- s->initsvtor1 = value;
- set_init_vtor(1, s->initsvtor1);
break;
case A_EWCTRL:
- if (!s->is_sse200) {
+ switch (s->sse_version) {
+ case ARMSSE_IOTKIT:
goto bad_offset;
+ case ARMSSE_SSE200:
+ qemu_log_mask(LOG_UNIMP, "IoTKit SysCtl EWCTRL unimplemented\n");
+ s->ewctrl = value;
+ break;
+ case ARMSSE_SSE300:
+ /* In SSE300 this offset is is NMI_ENABLE */
+ qemu_log_mask(LOG_UNIMP, "IoTKit SysCtl NMI_ENABLE unimplemented\n");
+ s->nmi_enable = value;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ break;
+ case A_PWRCTRL:
+ switch (s->sse_version) {
+ case ARMSSE_IOTKIT:
+ case ARMSSE_SSE200:
+ goto bad_offset;
+ case ARMSSE_SSE300:
+ if (!(s->pwrctrl & R_PWRCTRL_PPU_ACCESS_UNLOCK_MASK)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "IoTKit PWRCTRL write when register locked\n");
+ break;
+ }
+ s->pwrctrl = value;
+ break;
+ default:
+ g_assert_not_reached();
}
- qemu_log_mask(LOG_UNIMP, "IoTKit SysCtl EWCTRL unimplemented\n");
- s->ewctrl = value;
break;
case A_PDCM_PD_SYS_SENSE:
- if (!s->is_sse200) {
+ switch (s->sse_version) {
+ case ARMSSE_IOTKIT:
+ goto bad_offset;
+ case ARMSSE_SSE200:
+ case ARMSSE_SSE300:
+ qemu_log_mask(LOG_UNIMP,
+ "IoTKit SysCtl PDCM_PD_SYS_SENSE unimplemented\n");
+ s->pdcm_pd_sys_sense = value;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ break;
+ case A_PDCM_PD_CPU0_SENSE:
+ switch (s->sse_version) {
+ case ARMSSE_IOTKIT:
+ case ARMSSE_SSE200:
goto bad_offset;
+ case ARMSSE_SSE300:
+ qemu_log_mask(LOG_UNIMP,
+ "IoTKit SysCtl PDCM_PD_CPU0_SENSE unimplemented\n");
+ s->pdcm_pd_cpu0_sense = value;
+ break;
+ default:
+ g_assert_not_reached();
}
- qemu_log_mask(LOG_UNIMP,
- "IoTKit SysCtl PDCM_PD_SYS_SENSE unimplemented\n");
- s->pdcm_pd_sys_sense = value;
break;
case A_PDCM_PD_SRAM0_SENSE:
- if (!s->is_sse200) {
+ switch (s->sse_version) {
+ case ARMSSE_IOTKIT:
+ goto bad_offset;
+ case ARMSSE_SSE200:
+ qemu_log_mask(LOG_UNIMP,
+ "IoTKit SysCtl PDCM_PD_SRAM0_SENSE unimplemented\n");
+ s->pdcm_pd_sram0_sense = value;
+ break;
+ case ARMSSE_SSE300:
goto bad_offset;
+ default:
+ g_assert_not_reached();
}
- qemu_log_mask(LOG_UNIMP,
- "IoTKit SysCtl PDCM_PD_SRAM0_SENSE unimplemented\n");
- s->pdcm_pd_sram0_sense = value;
break;
case A_PDCM_PD_SRAM1_SENSE:
- if (!s->is_sse200) {
+ switch (s->sse_version) {
+ case ARMSSE_IOTKIT:
+ goto bad_offset;
+ case ARMSSE_SSE200:
+ qemu_log_mask(LOG_UNIMP,
+ "IoTKit SysCtl PDCM_PD_SRAM1_SENSE unimplemented\n");
+ s->pdcm_pd_sram1_sense = value;
+ break;
+ case ARMSSE_SSE300:
goto bad_offset;
+ default:
+ g_assert_not_reached();
}
- qemu_log_mask(LOG_UNIMP,
- "IoTKit SysCtl PDCM_PD_SRAM1_SENSE unimplemented\n");
- s->pdcm_pd_sram1_sense = value;
break;
case A_PDCM_PD_SRAM2_SENSE:
- if (!s->is_sse200) {
+ switch (s->sse_version) {
+ case ARMSSE_IOTKIT:
goto bad_offset;
+ case ARMSSE_SSE200:
+ qemu_log_mask(LOG_UNIMP,
+ "IoTKit SysCtl PDCM_PD_SRAM2_SENSE unimplemented\n");
+ s->pdcm_pd_sram2_sense = value;
+ break;
+ case ARMSSE_SSE300:
+ qemu_log_mask(LOG_UNIMP,
+ "IoTKit SysCtl PDCM_PD_VMR0_SENSE unimplemented\n");
+ s->pdcm_pd_vmr0_sense = value;
+ break;
+ default:
+ g_assert_not_reached();
}
- qemu_log_mask(LOG_UNIMP,
- "IoTKit SysCtl PDCM_PD_SRAM2_SENSE unimplemented\n");
- s->pdcm_pd_sram2_sense = value;
break;
case A_PDCM_PD_SRAM3_SENSE:
- if (!s->is_sse200) {
+ switch (s->sse_version) {
+ case ARMSSE_IOTKIT:
goto bad_offset;
+ case ARMSSE_SSE200:
+ qemu_log_mask(LOG_UNIMP,
+ "IoTKit SysCtl PDCM_PD_SRAM3_SENSE unimplemented\n");
+ s->pdcm_pd_sram3_sense = value;
+ break;
+ case ARMSSE_SSE300:
+ qemu_log_mask(LOG_UNIMP,
+ "IoTKit SysCtl PDCM_PD_VMR1_SENSE unimplemented\n");
+ s->pdcm_pd_vmr1_sense = value;
+ break;
+ default:
+ g_assert_not_reached();
}
- qemu_log_mask(LOG_UNIMP,
- "IoTKit SysCtl PDCM_PD_SRAM3_SENSE unimplemented\n");
- s->pdcm_pd_sram3_sense = value;
break;
case A_NMI_ENABLE:
/* In IoTKit this is BUSWAIT: reserved, R/O, zero */
- if (!s->is_sse200) {
+ switch (s->sse_version) {
+ case ARMSSE_IOTKIT:
goto ro_offset;
+ case ARMSSE_SSE200:
+ qemu_log_mask(LOG_UNIMP, "IoTKit SysCtl NMI_ENABLE unimplemented\n");
+ s->nmi_enable = value;
+ break;
+ case ARMSSE_SSE300:
+ /* In SSE300 this is reserved (for INITSVTOR3) */
+ goto bad_offset;
+ default:
+ g_assert_not_reached();
}
- qemu_log_mask(LOG_UNIMP, "IoTKit SysCtl NMI_ENABLE unimplemented\n");
- s->nmi_enable = value;
break;
case A_SECDBGSTAT:
case A_PID4 ... A_CID3:
@@ -417,11 +735,15 @@ static void iotkit_sysctl_reset(DeviceState *dev)
s->clock_force = 0;
s->nmi_enable = 0;
s->ewctrl = 0;
+ s->pwrctrl = 0x3;
s->pdcm_pd_sys_sense = 0x7f;
s->pdcm_pd_sram0_sense = 0;
s->pdcm_pd_sram1_sense = 0;
s->pdcm_pd_sram2_sense = 0;
s->pdcm_pd_sram3_sense = 0;
+ s->pdcm_pd_cpu0_sense = 0;
+ s->pdcm_pd_vmr0_sense = 0;
+ s->pdcm_pd_vmr1_sense = 0;
}
static void iotkit_sysctl_init(Object *obj)
@@ -438,17 +760,38 @@ static void iotkit_sysctl_realize(DeviceState *dev, Error **errp)
{
IoTKitSysCtl *s = IOTKIT_SYSCTL(dev);
- /* The top 4 bits of the SYS_VERSION register tell us if we're an SSE-200 */
- if (extract32(s->sys_version, 28, 4) == 2) {
- s->is_sse200 = true;
+ if (!armsse_version_valid(s->sse_version)) {
+ error_setg(errp, "invalid sse-version value %d", s->sse_version);
+ return;
}
}
+static bool sse300_needed(void *opaque)
+{
+ IoTKitSysCtl *s = IOTKIT_SYSCTL(opaque);
+
+ return s->sse_version == ARMSSE_SSE300;
+}
+
+static const VMStateDescription iotkit_sysctl_sse300_vmstate = {
+ .name = "iotkit-sysctl/sse-300",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = sse300_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(pwrctrl, IoTKitSysCtl),
+ VMSTATE_UINT32(pdcm_pd_cpu0_sense, IoTKitSysCtl),
+ VMSTATE_UINT32(pdcm_pd_vmr0_sense, IoTKitSysCtl),
+ VMSTATE_UINT32(pdcm_pd_vmr1_sense, IoTKitSysCtl),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
static bool sse200_needed(void *opaque)
{
IoTKitSysCtl *s = IOTKIT_SYSCTL(opaque);
- return s->is_sse200;
+ return s->sse_version != ARMSSE_IOTKIT;
}
static const VMStateDescription iotkit_sysctl_sse200_vmstate = {
@@ -488,12 +831,13 @@ static const VMStateDescription iotkit_sysctl_vmstate = {
},
.subsections = (const VMStateDescription*[]) {
&iotkit_sysctl_sse200_vmstate,
+ &iotkit_sysctl_sse300_vmstate,
NULL
}
};
static Property iotkit_sysctl_props[] = {
- DEFINE_PROP_UINT32("SYS_VERSION", IoTKitSysCtl, sys_version, 0),
+ DEFINE_PROP_UINT32("sse-version", IoTKitSysCtl, sse_version, 0),
DEFINE_PROP_UINT32("CPUWAIT_RST", IoTKitSysCtl, cpuwait_rst, 0),
DEFINE_PROP_UINT32("INITSVTOR0_RST", IoTKitSysCtl, initsvtor0_rst,
0x10000000),
diff --git a/hw/misc/iotkit-sysinfo.c b/hw/misc/iotkit-sysinfo.c
index 52e70053df..aaa9305b2e 100644
--- a/hw/misc/iotkit-sysinfo.c
+++ b/hw/misc/iotkit-sysinfo.c
@@ -26,9 +26,12 @@
#include "hw/registerfields.h"
#include "hw/misc/iotkit-sysinfo.h"
#include "hw/qdev-properties.h"
+#include "hw/arm/armsse-version.h"
REG32(SYS_VERSION, 0x0)
REG32(SYS_CONFIG, 0x4)
+REG32(SYS_CONFIG1, 0x8)
+REG32(IIDR, 0xfc8)
REG32(PID4, 0xfd0)
REG32(PID5, 0xfd4)
REG32(PID6, 0xfd8)
@@ -49,6 +52,12 @@ static const int sysinfo_id[] = {
0x0d, 0xf0, 0x05, 0xb1, /* CID0..CID3 */
};
+static const int sysinfo_sse300_id[] = {
+ 0x04, 0x00, 0x00, 0x00, /* PID4..PID7 */
+ 0x58, 0xb8, 0x1b, 0x00, /* PID0..PID3 */
+ 0x0d, 0xf0, 0x05, 0xb1, /* CID0..CID3 */
+};
+
static uint64_t iotkit_sysinfo_read(void *opaque, hwaddr offset,
unsigned size)
{
@@ -63,10 +72,36 @@ static uint64_t iotkit_sysinfo_read(void *opaque, hwaddr offset,
case A_SYS_CONFIG:
r = s->sys_config;
break;
+ case A_SYS_CONFIG1:
+ switch (s->sse_version) {
+ case ARMSSE_SSE300:
+ return 0;
+ break;
+ default:
+ goto bad_read;
+ }
+ break;
+ case A_IIDR:
+ switch (s->sse_version) {
+ case ARMSSE_SSE300:
+ return s->iidr;
+ break;
+ default:
+ goto bad_read;
+ }
+ break;
case A_PID4 ... A_CID3:
- r = sysinfo_id[(offset - A_PID4) / 4];
+ switch (s->sse_version) {
+ case ARMSSE_SSE300:
+ r = sysinfo_sse300_id[(offset - A_PID4) / 4];
+ break;
+ default:
+ r = sysinfo_id[(offset - A_PID4) / 4];
+ break;
+ }
break;
default:
+ bad_read:
qemu_log_mask(LOG_GUEST_ERROR,
"IoTKit SysInfo read: bad offset %x\n", (int)offset);
r = 0;
@@ -99,6 +134,8 @@ static const MemoryRegionOps iotkit_sysinfo_ops = {
static Property iotkit_sysinfo_props[] = {
DEFINE_PROP_UINT32("SYS_VERSION", IoTKitSysInfo, sys_version, 0),
DEFINE_PROP_UINT32("SYS_CONFIG", IoTKitSysInfo, sys_config, 0),
+ DEFINE_PROP_UINT32("sse-version", IoTKitSysInfo, sse_version, 0),
+ DEFINE_PROP_UINT32("IIDR", IoTKitSysInfo, iidr, 0),
DEFINE_PROP_END_OF_LIST()
};
@@ -112,6 +149,16 @@ static void iotkit_sysinfo_init(Object *obj)
sysbus_init_mmio(sbd, &s->iomem);
}
+static void iotkit_sysinfo_realize(DeviceState *dev, Error **errp)
+{
+ IoTKitSysInfo *s = IOTKIT_SYSINFO(dev);
+
+ if (!armsse_version_valid(s->sse_version)) {
+ error_setg(errp, "invalid sse-version value %d", s->sse_version);
+ return;
+ }
+}
+
static void iotkit_sysinfo_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -120,7 +167,7 @@ static void iotkit_sysinfo_class_init(ObjectClass *klass, void *data)
* This device has no guest-modifiable state and so it
* does not need a reset function or VMState.
*/
-
+ dc->realize = iotkit_sysinfo_realize;
device_class_set_props(dc, iotkit_sysinfo_props);
}
diff --git a/hw/misc/ivshmem.c b/hw/misc/ivshmem.c
index 603e992a7f..a1fa4878be 100644
--- a/hw/misc/ivshmem.c
+++ b/hw/misc/ivshmem.c
@@ -35,7 +35,6 @@
#include "qom/object_interfaces.h"
#include "chardev/char-fe.h"
#include "sysemu/hostmem.h"
-#include "sysemu/qtest.h"
#include "qapi/visitor.h"
#include "hw/misc/ivshmem.h"
diff --git a/hw/misc/meson.build b/hw/misc/meson.build
index 629283957f..00356cf12e 100644
--- a/hw/misc/meson.build
+++ b/hw/misc/meson.build
@@ -96,13 +96,19 @@ softmmu_ss.add(when: 'CONFIG_TZ_MSC', if_true: files('tz-msc.c'))
softmmu_ss.add(when: 'CONFIG_TZ_PPC', if_true: files('tz-ppc.c'))
softmmu_ss.add(when: 'CONFIG_IOTKIT_SECCTL', if_true: files('iotkit-secctl.c'))
softmmu_ss.add(when: 'CONFIG_IOTKIT_SYSINFO', if_true: files('iotkit-sysinfo.c'))
+softmmu_ss.add(when: 'CONFIG_ARMSSE_CPU_PWRCTRL', if_true: files('armsse-cpu-pwrctrl.c'))
softmmu_ss.add(when: 'CONFIG_ARMSSE_CPUID', if_true: files('armsse-cpuid.c'))
softmmu_ss.add(when: 'CONFIG_ARMSSE_MHU', if_true: files('armsse-mhu.c'))
softmmu_ss.add(when: 'CONFIG_PVPANIC_ISA', if_true: files('pvpanic-isa.c'))
softmmu_ss.add(when: 'CONFIG_PVPANIC_PCI', if_true: files('pvpanic-pci.c'))
softmmu_ss.add(when: 'CONFIG_AUX', if_true: files('auxbus.c'))
-softmmu_ss.add(when: 'CONFIG_ASPEED_SOC', if_true: files('aspeed_scu.c', 'aspeed_sdmc.c', 'aspeed_xdma.c'))
+softmmu_ss.add(when: 'CONFIG_ASPEED_SOC', if_true: files(
+ 'aspeed_lpc.c',
+ 'aspeed_scu.c',
+ 'aspeed_sdmc.c',
+ 'aspeed_xdma.c'))
+
softmmu_ss.add(when: 'CONFIG_MSF2', if_true: files('msf2-sysreg.c'))
softmmu_ss.add(when: 'CONFIG_NRF51_SOC', if_true: files('nrf51_rng.c'))
diff --git a/hw/misc/mps2-fpgaio.c b/hw/misc/mps2-fpgaio.c
index f3db88ddcc..07b8cbdad2 100644
--- a/hw/misc/mps2-fpgaio.c
+++ b/hw/misc/mps2-fpgaio.c
@@ -29,6 +29,7 @@
#include "qemu/timer.h"
REG32(LED0, 0)
+REG32(DBGCTRL, 4)
REG32(BUTTON, 8)
REG32(CLK1HZ, 0x10)
REG32(CLK100HZ, 0x14)
@@ -129,6 +130,12 @@ static uint64_t mps2_fpgaio_read(void *opaque, hwaddr offset, unsigned size)
case A_LED0:
r = s->led0;
break;
+ case A_DBGCTRL:
+ if (!s->has_dbgctrl) {
+ goto bad_offset;
+ }
+ r = s->dbgctrl;
+ break;
case A_BUTTON:
/* User-pressable board buttons. We don't model that, so just return
* zeroes.
@@ -195,6 +202,14 @@ static void mps2_fpgaio_write(void *opaque, hwaddr offset, uint64_t value,
}
}
break;
+ case A_DBGCTRL:
+ if (!s->has_dbgctrl) {
+ goto bad_offset;
+ }
+ qemu_log_mask(LOG_UNIMP,
+ "MPS2 FPGAIO: DBGCTRL unimplemented\n");
+ s->dbgctrl = value;
+ break;
case A_PRESCALE:
resync_counter(s);
s->prescale = value;
@@ -225,6 +240,7 @@ static void mps2_fpgaio_write(void *opaque, hwaddr offset, uint64_t value,
s->pscntr = value;
break;
default:
+ bad_offset:
qemu_log_mask(LOG_GUEST_ERROR,
"MPS2 FPGAIO write: bad offset 0x%x\n", (int) offset);
break;
@@ -285,41 +301,22 @@ static void mps2_fpgaio_realize(DeviceState *dev, Error **errp)
}
}
-static bool mps2_fpgaio_counters_needed(void *opaque)
-{
- /* Currently vmstate.c insists all subsections have a 'needed' function */
- return true;
-}
-
-static const VMStateDescription mps2_fpgaio_counters_vmstate = {
- .name = "mps2-fpgaio/counters",
- .version_id = 2,
- .minimum_version_id = 2,
- .needed = mps2_fpgaio_counters_needed,
- .fields = (VMStateField[]) {
- VMSTATE_INT64(clk1hz_tick_offset, MPS2FPGAIO),
- VMSTATE_INT64(clk100hz_tick_offset, MPS2FPGAIO),
- VMSTATE_UINT32(counter, MPS2FPGAIO),
- VMSTATE_UINT32(pscntr, MPS2FPGAIO),
- VMSTATE_INT64(pscntr_sync_ticks, MPS2FPGAIO),
- VMSTATE_END_OF_LIST()
- }
-};
-
static const VMStateDescription mps2_fpgaio_vmstate = {
.name = "mps2-fpgaio",
- .version_id = 1,
- .minimum_version_id = 1,
+ .version_id = 3,
+ .minimum_version_id = 3,
.fields = (VMStateField[]) {
VMSTATE_UINT32(led0, MPS2FPGAIO),
VMSTATE_UINT32(prescale, MPS2FPGAIO),
VMSTATE_UINT32(misc, MPS2FPGAIO),
+ VMSTATE_UINT32(dbgctrl, MPS2FPGAIO),
+ VMSTATE_INT64(clk1hz_tick_offset, MPS2FPGAIO),
+ VMSTATE_INT64(clk100hz_tick_offset, MPS2FPGAIO),
+ VMSTATE_UINT32(counter, MPS2FPGAIO),
+ VMSTATE_UINT32(pscntr, MPS2FPGAIO),
+ VMSTATE_INT64(pscntr_sync_ticks, MPS2FPGAIO),
VMSTATE_END_OF_LIST()
},
- .subsections = (const VMStateDescription*[]) {
- &mps2_fpgaio_counters_vmstate,
- NULL
- }
};
static Property mps2_fpgaio_properties[] = {
@@ -328,6 +325,7 @@ static Property mps2_fpgaio_properties[] = {
/* Number of LEDs controlled by LED0 register */
DEFINE_PROP_UINT32("num-leds", MPS2FPGAIO, num_leds, 2),
DEFINE_PROP_BOOL("has-switches", MPS2FPGAIO, has_switches, false),
+ DEFINE_PROP_BOOL("has-dbgctrl", MPS2FPGAIO, has_dbgctrl, false),
DEFINE_PROP_END_OF_LIST(),
};
diff --git a/hw/misc/mps2-scc.c b/hw/misc/mps2-scc.c
index 140a4b9ceb..c56aca86ad 100644
--- a/hw/misc/mps2-scc.c
+++ b/hw/misc/mps2-scc.c
@@ -110,14 +110,14 @@ static uint64_t mps2_scc_read(void *opaque, hwaddr offset, unsigned size)
r = s->cfg1;
break;
case A_CFG2:
- if (scc_partno(s) != 0x524) {
+ if (scc_partno(s) != 0x524 && scc_partno(s) != 0x547) {
/* CFG2 reserved on other boards */
goto bad_offset;
}
r = s->cfg2;
break;
case A_CFG3:
- if (scc_partno(s) == 0x524) {
+ if (scc_partno(s) == 0x524 && scc_partno(s) == 0x547) {
/* CFG3 reserved on AN524 */
goto bad_offset;
}
@@ -130,7 +130,7 @@ static uint64_t mps2_scc_read(void *opaque, hwaddr offset, unsigned size)
r = s->cfg4;
break;
case A_CFG5:
- if (scc_partno(s) != 0x524) {
+ if (scc_partno(s) != 0x524 && scc_partno(s) != 0x547) {
/* CFG5 reserved on other boards */
goto bad_offset;
}
@@ -185,7 +185,10 @@ static void mps2_scc_write(void *opaque, hwaddr offset, uint64_t value,
switch (offset) {
case A_CFG0:
- /* TODO on some boards bit 0 controls RAM remapping */
+ /*
+ * TODO on some boards bit 0 controls RAM remapping;
+ * on others bit 1 is CPU_WAIT.
+ */
s->cfg0 = value;
break;
case A_CFG1:
@@ -195,7 +198,7 @@ static void mps2_scc_write(void *opaque, hwaddr offset, uint64_t value,
}
break;
case A_CFG2:
- if (scc_partno(s) != 0x524) {
+ if (scc_partno(s) != 0x524 && scc_partno(s) != 0x547) {
/* CFG2 reserved on other boards */
goto bad_offset;
}
@@ -203,7 +206,7 @@ static void mps2_scc_write(void *opaque, hwaddr offset, uint64_t value,
s->cfg2 = value;
break;
case A_CFG5:
- if (scc_partno(s) != 0x524) {
+ if (scc_partno(s) != 0x524 && scc_partno(s) != 0x547) {
/* CFG5 reserved on other boards */
goto bad_offset;
}
diff --git a/hw/misc/npcm7xx_clk.c b/hw/misc/npcm7xx_clk.c
index 0bcae9ce95..a1ee67dc9a 100644
--- a/hw/misc/npcm7xx_clk.c
+++ b/hw/misc/npcm7xx_clk.c
@@ -586,15 +586,26 @@ static const DividerInitInfo divider_init_info_list[] = {
},
};
+static void npcm7xx_clk_update_pll_cb(void *opaque, ClockEvent event)
+{
+ npcm7xx_clk_update_pll(opaque);
+}
+
static void npcm7xx_clk_pll_init(Object *obj)
{
NPCM7xxClockPLLState *pll = NPCM7XX_CLOCK_PLL(obj);
pll->clock_in = qdev_init_clock_in(DEVICE(pll), "clock-in",
- npcm7xx_clk_update_pll, pll);
+ npcm7xx_clk_update_pll_cb, pll,
+ ClockUpdate);
pll->clock_out = qdev_init_clock_out(DEVICE(pll), "clock-out");
}
+static void npcm7xx_clk_update_sel_cb(void *opaque, ClockEvent event)
+{
+ npcm7xx_clk_update_sel(opaque);
+}
+
static void npcm7xx_clk_sel_init(Object *obj)
{
int i;
@@ -603,16 +614,23 @@ static void npcm7xx_clk_sel_init(Object *obj)
for (i = 0; i < NPCM7XX_CLK_SEL_MAX_INPUT; ++i) {
sel->clock_in[i] = qdev_init_clock_in(DEVICE(sel),
g_strdup_printf("clock-in[%d]", i),
- npcm7xx_clk_update_sel, sel);
+ npcm7xx_clk_update_sel_cb, sel, ClockUpdate);
}
sel->clock_out = qdev_init_clock_out(DEVICE(sel), "clock-out");
}
+
+static void npcm7xx_clk_update_divider_cb(void *opaque, ClockEvent event)
+{
+ npcm7xx_clk_update_divider(opaque);
+}
+
static void npcm7xx_clk_divider_init(Object *obj)
{
NPCM7xxClockDividerState *div = NPCM7XX_CLOCK_DIVIDER(obj);
div->clock_in = qdev_init_clock_in(DEVICE(div), "clock-in",
- npcm7xx_clk_update_divider, div);
+ npcm7xx_clk_update_divider_cb,
+ div, ClockUpdate);
div->clock_out = qdev_init_clock_out(DEVICE(div), "clock-out");
}
@@ -875,7 +893,7 @@ static void npcm7xx_clk_init_clock_hierarchy(NPCM7xxCLKState *s)
{
int i;
- s->clkref = qdev_init_clock_in(DEVICE(s), "clkref", NULL, NULL);
+ s->clkref = qdev_init_clock_in(DEVICE(s), "clkref", NULL, NULL, 0);
/* First pass: init all converter modules */
QEMU_BUILD_BUG_ON(ARRAY_SIZE(pll_init_info_list) != NPCM7XX_CLOCK_NR_PLLS);
diff --git a/hw/misc/npcm7xx_pwm.c b/hw/misc/npcm7xx_pwm.c
index dabcb6c0f9..ce192bb274 100644
--- a/hw/misc/npcm7xx_pwm.c
+++ b/hw/misc/npcm7xx_pwm.c
@@ -493,7 +493,7 @@ static void npcm7xx_pwm_init(Object *obj)
memory_region_init_io(&s->iomem, obj, &npcm7xx_pwm_ops, s,
TYPE_NPCM7XX_PWM, 4 * KiB);
sysbus_init_mmio(sbd, &s->iomem);
- s->clock = qdev_init_clock_in(DEVICE(s), "clock", NULL, NULL);
+ s->clock = qdev_init_clock_in(DEVICE(s), "clock", NULL, NULL, 0);
for (i = 0; i < NPCM7XX_PWM_PER_MODULE; ++i) {
object_property_add_uint32_ptr(obj, "freq[*]",
diff --git a/hw/misc/trace-events b/hw/misc/trace-events
index d626b9d7a7..4b15db8ca4 100644
--- a/hw/misc/trace-events
+++ b/hw/misc/trace-events
@@ -186,6 +186,10 @@ iotkit_sysctl_read(uint64_t offset, uint64_t data, unsigned size) "IoTKit SysCtl
iotkit_sysctl_write(uint64_t offset, uint64_t data, unsigned size) "IoTKit SysCtl write: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u"
iotkit_sysctl_reset(void) "IoTKit SysCtl: reset"
+# armsse-cpu-pwrctrl.c
+armsse_cpu_pwrctrl_read(uint64_t offset, uint64_t data, unsigned size) "SSE-300 CPU_PWRCTRL read: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u"
+armsse_cpu_pwrctrl_write(uint64_t offset, uint64_t data, unsigned size) "SSE-300 CPU_PWRCTRL write: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u"
+
# armsse-cpuid.c
armsse_cpuid_read(uint64_t offset, uint64_t data, unsigned size) "SSE-200 CPU_IDENTITY read: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u"
armsse_cpuid_write(uint64_t offset, uint64_t data, unsigned size) "SSE-200 CPU_IDENTITY write: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u"
diff --git a/hw/misc/zynq_slcr.c b/hw/misc/zynq_slcr.c
index 66504a9d3a..c66d7db177 100644
--- a/hw/misc/zynq_slcr.c
+++ b/hw/misc/zynq_slcr.c
@@ -307,9 +307,10 @@ static void zynq_slcr_propagate_clocks(ZynqSLCRState *s)
clock_propagate(s->uart1_ref_clk);
}
-static void zynq_slcr_ps_clk_callback(void *opaque)
+static void zynq_slcr_ps_clk_callback(void *opaque, ClockEvent event)
{
ZynqSLCRState *s = (ZynqSLCRState *) opaque;
+
zynq_slcr_compute_clocks(s);
zynq_slcr_propagate_clocks(s);
}
@@ -576,7 +577,7 @@ static const MemoryRegionOps slcr_ops = {
};
static const ClockPortInitArray zynq_slcr_clocks = {
- QDEV_CLOCK_IN(ZynqSLCRState, ps_clk, zynq_slcr_ps_clk_callback),
+ QDEV_CLOCK_IN(ZynqSLCRState, ps_clk, zynq_slcr_ps_clk_callback, ClockUpdate),
QDEV_CLOCK_OUT(ZynqSLCRState, uart0_ref_clk),
QDEV_CLOCK_OUT(ZynqSLCRState, uart1_ref_clk),
QDEV_CLOCK_END
diff --git a/hw/pci-host/Kconfig b/hw/pci-host/Kconfig
index 8b8c763c28..2ccc96f02c 100644
--- a/hw/pci-host/Kconfig
+++ b/hw/pci-host/Kconfig
@@ -68,3 +68,7 @@ config PCI_POWERNV
config REMOTE_PCIHOST
bool
+
+config SH_PCI
+ bool
+ select PCI
diff --git a/hw/pci-host/meson.build b/hw/pci-host/meson.build
index 1847c69905..87a896973e 100644
--- a/hw/pci-host/meson.build
+++ b/hw/pci-host/meson.build
@@ -10,6 +10,7 @@ pci_ss.add(when: 'CONFIG_PCI_I440FX', if_true: files('i440fx.c'))
pci_ss.add(when: 'CONFIG_PCI_SABRE', if_true: files('sabre.c'))
pci_ss.add(when: 'CONFIG_XEN_IGD_PASSTHROUGH', if_true: files('xen_igd_pt.c'))
pci_ss.add(when: 'CONFIG_REMOTE_PCIHOST', if_true: files('remote.c'))
+pci_ss.add(when: 'CONFIG_SH_PCI', if_true: files('sh_pci.c'))
# PPC devices
pci_ss.add(when: 'CONFIG_PREP_PCI', if_true: files('prep.c'))
diff --git a/hw/sh4/sh_pci.c b/hw/pci-host/sh_pci.c
index 734892f47c..734892f47c 100644
--- a/hw/sh4/sh_pci.c
+++ b/hw/pci-host/sh_pci.c
diff --git a/hw/ppc/ppc440_bamboo.c b/hw/ppc/ppc440_bamboo.c
index b156bcb999..b7539aa721 100644
--- a/hw/ppc/ppc440_bamboo.c
+++ b/hw/ppc/ppc440_bamboo.c
@@ -30,7 +30,6 @@
#include "hw/ppc/ppc.h"
#include "ppc405.h"
#include "sysemu/sysemu.h"
-#include "sysemu/qtest.h"
#include "sysemu/reset.h"
#include "hw/sysbus.h"
#include "hw/intc/ppc-uic.h"
diff --git a/hw/ppc/prep.c b/hw/ppc/prep.c
index 7e72f6e4a9..f1b1efdcef 100644
--- a/hw/ppc/prep.c
+++ b/hw/ppc/prep.c
@@ -45,7 +45,6 @@
#include "hw/qdev-properties.h"
#include "sysemu/arch_init.h"
#include "sysemu/kvm.h"
-#include "sysemu/qtest.h"
#include "sysemu/reset.h"
#include "exec/address-spaces.h"
#include "trace.h"
diff --git a/hw/ppc/sam460ex.c b/hw/ppc/sam460ex.c
index e459b43065..0c6baf77e8 100644
--- a/hw/ppc/sam460ex.c
+++ b/hw/ppc/sam460ex.c
@@ -30,7 +30,6 @@
#include "ppc405.h"
#include "hw/block/flash.h"
#include "sysemu/sysemu.h"
-#include "sysemu/qtest.h"
#include "sysemu/reset.h"
#include "hw/sysbus.h"
#include "hw/char/serial.h"
diff --git a/hw/ppc/spapr_caps.c b/hw/ppc/spapr_caps.c
index 9341e9782a..9ea7ddd1e9 100644
--- a/hw/ppc/spapr_caps.c
+++ b/hw/ppc/spapr_caps.c
@@ -33,7 +33,6 @@
#include "cpu-models.h"
#include "kvm_ppc.h"
#include "migration/vmstate.h"
-#include "sysemu/qtest.h"
#include "sysemu/tcg.h"
#include "hw/ppc/spapr.h"
diff --git a/hw/ppc/spapr_pci_vfio.c b/hw/ppc/spapr_pci_vfio.c
index ecb34aaade..e0547b1740 100644
--- a/hw/ppc/spapr_pci_vfio.c
+++ b/hw/ppc/spapr_pci_vfio.c
@@ -25,7 +25,6 @@
#include "hw/pci/msix.h"
#include "hw/vfio/vfio.h"
#include "qemu/error-report.h"
-#include "sysemu/qtest.h"
bool spapr_phb_eeh_available(SpaprPhbState *sphb)
{
diff --git a/hw/ppc/spapr_vio.c b/hw/ppc/spapr_vio.c
index 3cc9421526..ef06e0362c 100644
--- a/hw/ppc/spapr_vio.c
+++ b/hw/ppc/spapr_vio.c
@@ -31,7 +31,6 @@
#include "sysemu/device_tree.h"
#include "kvm_ppc.h"
#include "migration/vmstate.h"
-#include "sysemu/qtest.h"
#include "hw/ppc/spapr.h"
#include "hw/ppc/spapr_vio.h"
diff --git a/hw/ppc/virtex_ml507.c b/hw/ppc/virtex_ml507.c
index b26ff17767..cb421570da 100644
--- a/hw/ppc/virtex_ml507.c
+++ b/hw/ppc/virtex_ml507.c
@@ -31,7 +31,6 @@
#include "hw/char/serial.h"
#include "hw/block/flash.h"
#include "sysemu/sysemu.h"
-#include "sysemu/qtest.h"
#include "sysemu/reset.h"
#include "hw/boards.h"
#include "sysemu/device_tree.h"
diff --git a/hw/riscv/spike.c b/hw/riscv/spike.c
index ed4ca9808e..ec7cb2f707 100644
--- a/hw/riscv/spike.c
+++ b/hw/riscv/spike.c
@@ -40,7 +40,6 @@
#include "chardev/char.h"
#include "sysemu/arch_init.h"
#include "sysemu/device_tree.h"
-#include "sysemu/qtest.h"
#include "sysemu/sysemu.h"
static const MemMapEntry spike_memmap[] = {
diff --git a/hw/rx/rx62n.c b/hw/rx/rx62n.c
index 17ec73fc7b..9c34ce14de 100644
--- a/hw/rx/rx62n.c
+++ b/hw/rx/rx62n.c
@@ -29,7 +29,6 @@
#include "hw/sysbus.h"
#include "hw/qdev-properties.h"
#include "sysemu/sysemu.h"
-#include "sysemu/qtest.h"
#include "cpu.h"
#include "qom/object.h"
diff --git a/hw/scsi/esp-pci.c b/hw/scsi/esp-pci.c
index 4d7c2cab56..c3d3dab05e 100644
--- a/hw/scsi/esp-pci.c
+++ b/hw/scsi/esp-pci.c
@@ -79,8 +79,10 @@ struct PCIESPState {
static void esp_pci_handle_idle(PCIESPState *pci, uint32_t val)
{
+ ESPState *s = ESP(&pci->esp);
+
trace_esp_pci_dma_idle(val);
- esp_dma_enable(&pci->esp, 0, 0);
+ esp_dma_enable(s, 0, 0);
}
static void esp_pci_handle_blast(PCIESPState *pci, uint32_t val)
@@ -91,14 +93,18 @@ static void esp_pci_handle_blast(PCIESPState *pci, uint32_t val)
static void esp_pci_handle_abort(PCIESPState *pci, uint32_t val)
{
+ ESPState *s = ESP(&pci->esp);
+
trace_esp_pci_dma_abort(val);
- if (pci->esp.current_req) {
- scsi_req_cancel(pci->esp.current_req);
+ if (s->current_req) {
+ scsi_req_cancel(s->current_req);
}
}
static void esp_pci_handle_start(PCIESPState *pci, uint32_t val)
{
+ ESPState *s = ESP(&pci->esp);
+
trace_esp_pci_dma_start(val);
pci->dma_regs[DMA_WBC] = pci->dma_regs[DMA_STC];
@@ -109,7 +115,7 @@ static void esp_pci_handle_start(PCIESPState *pci, uint32_t val)
| DMA_STAT_DONE | DMA_STAT_ABORT
| DMA_STAT_ERROR | DMA_STAT_PWDN);
- esp_dma_enable(&pci->esp, 0, 1);
+ esp_dma_enable(s, 0, 1);
}
static void esp_pci_dma_write(PCIESPState *pci, uint32_t saddr, uint32_t val)
@@ -155,11 +161,12 @@ static void esp_pci_dma_write(PCIESPState *pci, uint32_t saddr, uint32_t val)
static uint32_t esp_pci_dma_read(PCIESPState *pci, uint32_t saddr)
{
+ ESPState *s = ESP(&pci->esp);
uint32_t val;
val = pci->dma_regs[saddr];
if (saddr == DMA_STAT) {
- if (pci->esp.rregs[ESP_RSTAT] & STAT_INT) {
+ if (s->rregs[ESP_RSTAT] & STAT_INT) {
val |= DMA_STAT_SCSIINT;
}
if (!(pci->sbac & SBAC_STATUS)) {
@@ -176,6 +183,7 @@ static void esp_pci_io_write(void *opaque, hwaddr addr,
uint64_t val, unsigned int size)
{
PCIESPState *pci = opaque;
+ ESPState *s = ESP(&pci->esp);
if (size < 4 || addr & 3) {
/* need to upgrade request: we only support 4-bytes accesses */
@@ -183,7 +191,7 @@ static void esp_pci_io_write(void *opaque, hwaddr addr,
int shift;
if (addr < 0x40) {
- current = pci->esp.wregs[addr >> 2];
+ current = s->wregs[addr >> 2];
} else if (addr < 0x60) {
current = pci->dma_regs[(addr - 0x40) >> 2];
} else if (addr < 0x74) {
@@ -203,7 +211,7 @@ static void esp_pci_io_write(void *opaque, hwaddr addr,
if (addr < 0x40) {
/* SCSI core reg */
- esp_reg_write(&pci->esp, addr >> 2, val);
+ esp_reg_write(s, addr >> 2, val);
} else if (addr < 0x60) {
/* PCI DMA CCB */
esp_pci_dma_write(pci, (addr - 0x40) >> 2, val);
@@ -220,11 +228,12 @@ static uint64_t esp_pci_io_read(void *opaque, hwaddr addr,
unsigned int size)
{
PCIESPState *pci = opaque;
+ ESPState *s = ESP(&pci->esp);
uint32_t ret;
if (addr < 0x40) {
/* SCSI core reg */
- ret = esp_reg_read(&pci->esp, addr >> 2);
+ ret = esp_reg_read(s, addr >> 2);
} else if (addr < 0x60) {
/* PCI DMA CCB */
ret = esp_pci_dma_read(pci, (addr - 0x40) >> 2);
@@ -306,7 +315,9 @@ static const MemoryRegionOps esp_pci_io_ops = {
static void esp_pci_hard_reset(DeviceState *dev)
{
PCIESPState *pci = PCI_ESP(dev);
- esp_hard_reset(&pci->esp);
+ ESPState *s = ESP(&pci->esp);
+
+ esp_hard_reset(s);
pci->dma_regs[DMA_CMD] &= ~(DMA_CMD_DIR | DMA_CMD_INTE_D | DMA_CMD_INTE_P
| DMA_CMD_MDL | DMA_CMD_DIAG | DMA_CMD_MASK);
pci->dma_regs[DMA_WBC] &= ~0xffff;
@@ -319,11 +330,12 @@ static void esp_pci_hard_reset(DeviceState *dev)
static const VMStateDescription vmstate_esp_pci_scsi = {
.name = "pciespscsi",
- .version_id = 1,
+ .version_id = 2,
.minimum_version_id = 1,
.fields = (VMStateField[]) {
VMSTATE_PCI_DEVICE(parent_obj, PCIESPState),
VMSTATE_BUFFER_UNSAFE(dma_regs, PCIESPState, 0, 8 * sizeof(uint32_t)),
+ VMSTATE_UINT8_V(esp.mig_version_id, PCIESPState, 2),
VMSTATE_STRUCT(esp, PCIESPState, 0, vmstate_esp, ESPState),
VMSTATE_END_OF_LIST()
}
@@ -353,9 +365,13 @@ static void esp_pci_scsi_realize(PCIDevice *dev, Error **errp)
{
PCIESPState *pci = PCI_ESP(dev);
DeviceState *d = DEVICE(dev);
- ESPState *s = &pci->esp;
+ ESPState *s = ESP(&pci->esp);
uint8_t *pci_conf;
+ if (!qdev_realize(DEVICE(s), NULL, errp)) {
+ return;
+ }
+
pci_conf = dev->config;
/* Interrupt pin A */
@@ -374,11 +390,19 @@ static void esp_pci_scsi_realize(PCIDevice *dev, Error **errp)
scsi_bus_new(&s->bus, sizeof(s->bus), d, &esp_pci_scsi_info, NULL);
}
-static void esp_pci_scsi_uninit(PCIDevice *d)
+static void esp_pci_scsi_exit(PCIDevice *d)
{
PCIESPState *pci = PCI_ESP(d);
+ ESPState *s = ESP(&pci->esp);
+
+ qemu_free_irq(s->irq);
+}
+
+static void esp_pci_init(Object *obj)
+{
+ PCIESPState *pci = PCI_ESP(obj);
- qemu_free_irq(pci->esp.irq);
+ object_initialize_child(obj, "esp", &pci->esp, TYPE_ESP);
}
static void esp_pci_class_init(ObjectClass *klass, void *data)
@@ -387,7 +411,7 @@ static void esp_pci_class_init(ObjectClass *klass, void *data)
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
k->realize = esp_pci_scsi_realize;
- k->exit = esp_pci_scsi_uninit;
+ k->exit = esp_pci_scsi_exit;
k->vendor_id = PCI_VENDOR_ID_AMD;
k->device_id = PCI_DEVICE_ID_AMD_SCSI;
k->revision = 0x10;
@@ -401,6 +425,7 @@ static void esp_pci_class_init(ObjectClass *klass, void *data)
static const TypeInfo esp_pci_info = {
.name = TYPE_AM53C974_DEVICE,
.parent = TYPE_PCI_DEVICE,
+ .instance_init = esp_pci_init,
.instance_size = sizeof(PCIESPState),
.class_init = esp_pci_class_init,
.interfaces = (InterfaceInfo[]) {
diff --git a/hw/scsi/esp.c b/hw/scsi/esp.c
index 93d9c9c7b9..507ab363bc 100644
--- a/hw/scsi/esp.c
+++ b/hw/scsi/esp.c
@@ -63,11 +63,13 @@ static void esp_lower_irq(ESPState *s)
static void esp_raise_drq(ESPState *s)
{
qemu_irq_raise(s->irq_data);
+ trace_esp_raise_drq();
}
static void esp_lower_drq(ESPState *s)
{
qemu_irq_lower(s->irq_data);
+ trace_esp_lower_drq();
}
void esp_dma_enable(ESPState *s, int irq, int level)
@@ -96,39 +98,112 @@ void esp_request_cancelled(SCSIRequest *req)
}
}
-static void set_pdma(ESPState *s, enum pdma_origin_id origin,
- uint32_t index, uint32_t len)
+static void esp_fifo_push(ESPState *s, uint8_t val)
{
- s->pdma_origin = origin;
- s->pdma_start = index;
- s->pdma_cur = index;
- s->pdma_len = len;
+ if (fifo8_num_used(&s->fifo) == ESP_FIFO_SZ) {
+ trace_esp_error_fifo_overrun();
+ return;
+ }
+
+ fifo8_push(&s->fifo, val);
+}
+
+static uint8_t esp_fifo_pop(ESPState *s)
+{
+ if (fifo8_is_empty(&s->fifo)) {
+ return 0;
+ }
+
+ return fifo8_pop(&s->fifo);
+}
+
+static void esp_cmdfifo_push(ESPState *s, uint8_t val)
+{
+ if (fifo8_num_used(&s->cmdfifo) == ESP_CMDFIFO_SZ) {
+ trace_esp_error_fifo_overrun();
+ return;
+ }
+
+ fifo8_push(&s->cmdfifo, val);
+}
+
+static uint8_t esp_cmdfifo_pop(ESPState *s)
+{
+ if (fifo8_is_empty(&s->cmdfifo)) {
+ return 0;
+ }
+
+ return fifo8_pop(&s->cmdfifo);
+}
+
+static uint32_t esp_get_tc(ESPState *s)
+{
+ uint32_t dmalen;
+
+ dmalen = s->rregs[ESP_TCLO];
+ dmalen |= s->rregs[ESP_TCMID] << 8;
+ dmalen |= s->rregs[ESP_TCHI] << 16;
+
+ return dmalen;
+}
+
+static void esp_set_tc(ESPState *s, uint32_t dmalen)
+{
+ s->rregs[ESP_TCLO] = dmalen;
+ s->rregs[ESP_TCMID] = dmalen >> 8;
+ s->rregs[ESP_TCHI] = dmalen >> 16;
+}
+
+static uint32_t esp_get_stc(ESPState *s)
+{
+ uint32_t dmalen;
+
+ dmalen = s->wregs[ESP_TCLO];
+ dmalen |= s->wregs[ESP_TCMID] << 8;
+ dmalen |= s->wregs[ESP_TCHI] << 16;
+
+ return dmalen;
+}
+
+static uint8_t esp_pdma_read(ESPState *s)
+{
+ uint8_t val;
+
+ if (s->do_cmd) {
+ val = esp_cmdfifo_pop(s);
+ } else {
+ val = esp_fifo_pop(s);
+ }
+
+ return val;
}
-static uint8_t *get_pdma_buf(ESPState *s)
+static void esp_pdma_write(ESPState *s, uint8_t val)
{
- switch (s->pdma_origin) {
- case PDMA:
- return s->pdma_buf;
- case TI:
- return s->ti_buf;
- case CMD:
- return s->cmdbuf;
- case ASYNC:
- return s->async_buf;
+ uint32_t dmalen = esp_get_tc(s);
+
+ if (dmalen == 0) {
+ return;
+ }
+
+ if (s->do_cmd) {
+ esp_cmdfifo_push(s, val);
+ } else {
+ esp_fifo_push(s, val);
}
- return NULL;
+
+ dmalen--;
+ esp_set_tc(s, dmalen);
}
-static int get_cmd_cb(ESPState *s)
+static int esp_select(ESPState *s)
{
int target;
target = s->wregs[ESP_WBUSID] & BUSID_DID;
s->ti_size = 0;
- s->ti_rptr = 0;
- s->ti_wptr = 0;
+ fifo8_reset(&s->fifo);
if (s->current_req) {
/* Started a new command before the old one finished. Cancel it. */
@@ -140,148 +215,195 @@ static int get_cmd_cb(ESPState *s)
if (!s->current_dev) {
/* No such drive */
s->rregs[ESP_RSTAT] = 0;
- s->rregs[ESP_RINTR] = INTR_DC;
+ s->rregs[ESP_RINTR] |= INTR_DC;
s->rregs[ESP_RSEQ] = SEQ_0;
esp_raise_irq(s);
return -1;
}
+
+ /*
+ * Note that we deliberately don't raise the IRQ here: this will be done
+ * either in do_busid_cmd() for DATA OUT transfers or by the deferred
+ * IRQ mechanism in esp_transfer_data() for DATA IN transfers
+ */
+ s->rregs[ESP_RINTR] |= INTR_FC;
+ s->rregs[ESP_RSEQ] = SEQ_CD;
return 0;
}
-static uint32_t get_cmd(ESPState *s, uint8_t *buf, uint8_t buflen)
+static uint32_t get_cmd(ESPState *s, uint32_t maxlen)
{
- uint32_t dmalen;
+ uint8_t buf[ESP_CMDFIFO_SZ];
+ uint32_t dmalen, n;
int target;
target = s->wregs[ESP_WBUSID] & BUSID_DID;
if (s->dma) {
- dmalen = s->rregs[ESP_TCLO];
- dmalen |= s->rregs[ESP_TCMID] << 8;
- dmalen |= s->rregs[ESP_TCHI] << 16;
- if (dmalen > buflen) {
+ dmalen = MIN(esp_get_tc(s), maxlen);
+ if (dmalen == 0) {
return 0;
}
if (s->dma_memory_read) {
s->dma_memory_read(s->dma_opaque, buf, dmalen);
+ fifo8_push_all(&s->cmdfifo, buf, dmalen);
} else {
- memcpy(s->pdma_buf, buf, dmalen);
- set_pdma(s, PDMA, 0, dmalen);
+ if (esp_select(s) < 0) {
+ fifo8_reset(&s->cmdfifo);
+ return -1;
+ }
esp_raise_drq(s);
+ fifo8_reset(&s->cmdfifo);
return 0;
}
} else {
- dmalen = s->ti_size;
- if (dmalen > TI_BUFSZ) {
+ dmalen = MIN(fifo8_num_used(&s->fifo), maxlen);
+ if (dmalen == 0) {
return 0;
}
- memcpy(buf, s->ti_buf, dmalen);
- buf[0] = buf[2] >> 5;
+ memcpy(buf, fifo8_pop_buf(&s->fifo, dmalen, &n), dmalen);
+ if (dmalen >= 3) {
+ buf[0] = buf[2] >> 5;
+ }
+ fifo8_push_all(&s->cmdfifo, buf, dmalen);
}
trace_esp_get_cmd(dmalen, target);
- if (get_cmd_cb(s) < 0) {
- return 0;
+ if (esp_select(s) < 0) {
+ fifo8_reset(&s->cmdfifo);
+ return -1;
}
return dmalen;
}
-static void do_busid_cmd(ESPState *s, uint8_t *buf, uint8_t busid)
+static void do_busid_cmd(ESPState *s, uint8_t busid)
{
+ uint32_t n, cmdlen;
int32_t datalen;
int lun;
SCSIDevice *current_lun;
+ uint8_t *buf;
trace_esp_do_busid_cmd(busid);
lun = busid & 7;
+ cmdlen = fifo8_num_used(&s->cmdfifo);
+ buf = (uint8_t *)fifo8_pop_buf(&s->cmdfifo, cmdlen, &n);
+
current_lun = scsi_device_find(&s->bus, 0, s->current_dev->id, lun);
s->current_req = scsi_req_new(current_lun, 0, lun, buf, s);
datalen = scsi_req_enqueue(s->current_req);
s->ti_size = datalen;
+ fifo8_reset(&s->cmdfifo);
if (datalen != 0) {
s->rregs[ESP_RSTAT] = STAT_TC;
- s->dma_left = 0;
- s->dma_counter = 0;
+ s->rregs[ESP_RSEQ] = SEQ_CD;
+ s->ti_cmd = 0;
+ esp_set_tc(s, 0);
if (datalen > 0) {
+ /*
+ * Switch to DATA IN phase but wait until initial data xfer is
+ * complete before raising the command completion interrupt
+ */
+ s->data_in_ready = false;
s->rregs[ESP_RSTAT] |= STAT_DI;
} else {
s->rregs[ESP_RSTAT] |= STAT_DO;
+ s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
+ esp_raise_irq(s);
+ esp_lower_drq(s);
}
scsi_req_continue(s->current_req);
+ return;
}
- s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
- s->rregs[ESP_RSEQ] = SEQ_CD;
- esp_raise_irq(s);
}
-static void do_cmd(ESPState *s, uint8_t *buf)
+static void do_cmd(ESPState *s)
{
- uint8_t busid = buf[0];
+ uint8_t busid = fifo8_pop(&s->cmdfifo);
+ uint32_t n;
+
+ s->cmdfifo_cdb_offset--;
+
+ /* Ignore extended messages for now */
+ if (s->cmdfifo_cdb_offset) {
+ fifo8_pop_buf(&s->cmdfifo, s->cmdfifo_cdb_offset, &n);
+ s->cmdfifo_cdb_offset = 0;
+ }
- do_busid_cmd(s, &buf[1], busid);
+ do_busid_cmd(s, busid);
}
static void satn_pdma_cb(ESPState *s)
{
- if (get_cmd_cb(s) < 0) {
- return;
- }
- if (s->pdma_cur != s->pdma_start) {
- do_cmd(s, get_pdma_buf(s) + s->pdma_start);
+ s->do_cmd = 0;
+ if (!fifo8_is_empty(&s->cmdfifo)) {
+ s->cmdfifo_cdb_offset = 1;
+ do_cmd(s);
}
}
static void handle_satn(ESPState *s)
{
- uint8_t buf[32];
- int len;
+ int32_t cmdlen;
if (s->dma && !s->dma_enabled) {
s->dma_cb = handle_satn;
return;
}
s->pdma_cb = satn_pdma_cb;
- len = get_cmd(s, buf, sizeof(buf));
- if (len)
- do_cmd(s, buf);
+ cmdlen = get_cmd(s, ESP_CMDFIFO_SZ);
+ if (cmdlen > 0) {
+ s->cmdfifo_cdb_offset = 1;
+ do_cmd(s);
+ } else if (cmdlen == 0) {
+ s->do_cmd = 1;
+ /* Target present, but no cmd yet - switch to command phase */
+ s->rregs[ESP_RSEQ] = SEQ_CD;
+ s->rregs[ESP_RSTAT] = STAT_CD;
+ }
}
static void s_without_satn_pdma_cb(ESPState *s)
{
- if (get_cmd_cb(s) < 0) {
- return;
- }
- if (s->pdma_cur != s->pdma_start) {
- do_busid_cmd(s, get_pdma_buf(s) + s->pdma_start, 0);
+ uint32_t len;
+
+ s->do_cmd = 0;
+ len = fifo8_num_used(&s->cmdfifo);
+ if (len) {
+ s->cmdfifo_cdb_offset = 0;
+ do_busid_cmd(s, 0);
}
}
static void handle_s_without_atn(ESPState *s)
{
- uint8_t buf[32];
- int len;
+ int32_t cmdlen;
if (s->dma && !s->dma_enabled) {
s->dma_cb = handle_s_without_atn;
return;
}
s->pdma_cb = s_without_satn_pdma_cb;
- len = get_cmd(s, buf, sizeof(buf));
- if (len) {
- do_busid_cmd(s, buf, 0);
+ cmdlen = get_cmd(s, ESP_CMDFIFO_SZ);
+ if (cmdlen > 0) {
+ s->cmdfifo_cdb_offset = 0;
+ do_busid_cmd(s, 0);
+ } else if (cmdlen == 0) {
+ s->do_cmd = 1;
+ /* Target present, but no cmd yet - switch to command phase */
+ s->rregs[ESP_RSEQ] = SEQ_CD;
+ s->rregs[ESP_RSTAT] = STAT_CD;
}
}
static void satn_stop_pdma_cb(ESPState *s)
{
- if (get_cmd_cb(s) < 0) {
- return;
- }
- s->cmdlen = s->pdma_cur - s->pdma_start;
- if (s->cmdlen) {
- trace_esp_handle_satn_stop(s->cmdlen);
+ s->do_cmd = 0;
+ if (!fifo8_is_empty(&s->cmdfifo)) {
+ trace_esp_handle_satn_stop(fifo8_num_used(&s->cmdfifo));
s->do_cmd = 1;
+ s->cmdfifo_cdb_offset = 1;
s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
- s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
+ s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
s->rregs[ESP_RSEQ] = SEQ_CD;
esp_raise_irq(s);
}
@@ -289,51 +411,62 @@ static void satn_stop_pdma_cb(ESPState *s)
static void handle_satn_stop(ESPState *s)
{
+ int32_t cmdlen;
+
if (s->dma && !s->dma_enabled) {
s->dma_cb = handle_satn_stop;
return;
}
s->pdma_cb = satn_stop_pdma_cb;
- s->cmdlen = get_cmd(s, s->cmdbuf, sizeof(s->cmdbuf));
- if (s->cmdlen) {
- trace_esp_handle_satn_stop(s->cmdlen);
+ cmdlen = get_cmd(s, 1);
+ if (cmdlen > 0) {
+ trace_esp_handle_satn_stop(fifo8_num_used(&s->cmdfifo));
s->do_cmd = 1;
- s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
- s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
- s->rregs[ESP_RSEQ] = SEQ_CD;
+ s->cmdfifo_cdb_offset = 1;
+ s->rregs[ESP_RSTAT] = STAT_MO;
+ s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
+ s->rregs[ESP_RSEQ] = SEQ_MO;
esp_raise_irq(s);
+ } else if (cmdlen == 0) {
+ s->do_cmd = 1;
+ /* Target present, switch to message out phase */
+ s->rregs[ESP_RSEQ] = SEQ_MO;
+ s->rregs[ESP_RSTAT] = STAT_MO;
}
}
static void write_response_pdma_cb(ESPState *s)
{
s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST;
- s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
+ s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
s->rregs[ESP_RSEQ] = SEQ_CD;
esp_raise_irq(s);
}
static void write_response(ESPState *s)
{
+ uint32_t n;
+
trace_esp_write_response(s->status);
- s->ti_buf[0] = s->status;
- s->ti_buf[1] = 0;
+
+ fifo8_reset(&s->fifo);
+ esp_fifo_push(s, s->status);
+ esp_fifo_push(s, 0);
+
if (s->dma) {
if (s->dma_memory_write) {
- s->dma_memory_write(s->dma_opaque, s->ti_buf, 2);
+ s->dma_memory_write(s->dma_opaque,
+ (uint8_t *)fifo8_pop_buf(&s->fifo, 2, &n), 2);
s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST;
- s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
+ s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
s->rregs[ESP_RSEQ] = SEQ_CD;
} else {
- set_pdma(s, TI, 0, 2);
s->pdma_cb = write_response_pdma_cb;
esp_raise_drq(s);
return;
}
} else {
s->ti_size = 2;
- s->ti_rptr = 0;
- s->ti_wptr = 2;
s->rregs[ESP_RFLAGS] = 2;
}
esp_raise_irq(s);
@@ -342,77 +475,133 @@ static void write_response(ESPState *s)
static void esp_dma_done(ESPState *s)
{
s->rregs[ESP_RSTAT] |= STAT_TC;
- s->rregs[ESP_RINTR] = INTR_BS;
+ s->rregs[ESP_RINTR] |= INTR_BS;
s->rregs[ESP_RSEQ] = 0;
s->rregs[ESP_RFLAGS] = 0;
- s->rregs[ESP_TCLO] = 0;
- s->rregs[ESP_TCMID] = 0;
- s->rregs[ESP_TCHI] = 0;
+ esp_set_tc(s, 0);
esp_raise_irq(s);
}
static void do_dma_pdma_cb(ESPState *s)
{
- int to_device = (s->ti_size < 0);
- int len = s->pdma_cur - s->pdma_start;
+ int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO);
+ int len;
+ uint32_t n;
+
if (s->do_cmd) {
s->ti_size = 0;
- s->cmdlen = 0;
s->do_cmd = 0;
- do_cmd(s, s->cmdbuf);
+ do_cmd(s);
+ esp_lower_drq(s);
return;
}
- s->dma_left -= len;
- s->async_buf += len;
- s->async_len -= len;
+
if (to_device) {
- s->ti_size += len;
+ /* Copy FIFO data to device */
+ len = MIN(s->async_len, ESP_FIFO_SZ);
+ len = MIN(len, fifo8_num_used(&s->fifo));
+ memcpy(s->async_buf, fifo8_pop_buf(&s->fifo, len, &n), len);
+ s->async_buf += n;
+ s->async_len -= n;
+ s->ti_size += n;
+
+ if (n < len) {
+ /* Unaligned accesses can cause FIFO wraparound */
+ len = len - n;
+ memcpy(s->async_buf, fifo8_pop_buf(&s->fifo, len, &n), len);
+ s->async_buf += n;
+ s->async_len -= n;
+ s->ti_size += n;
+ }
+
+ if (s->async_len == 0) {
+ scsi_req_continue(s->current_req);
+ return;
+ }
+
+ if (esp_get_tc(s) == 0) {
+ esp_lower_drq(s);
+ esp_dma_done(s);
+ }
+
+ return;
} else {
- s->ti_size -= len;
- }
- if (s->async_len == 0) {
- scsi_req_continue(s->current_req);
- /*
- * If there is still data to be read from the device then
- * complete the DMA operation immediately. Otherwise defer
- * until the scsi layer has completed.
- */
- if (to_device || s->dma_left != 0 || s->ti_size == 0) {
+ if (s->async_len == 0) {
+ if (s->current_req) {
+ /* Defer until the scsi layer has completed */
+ scsi_req_continue(s->current_req);
+ s->data_in_ready = false;
+ }
return;
}
- }
- /* Partially filled a scsi buffer. Complete immediately. */
- esp_dma_done(s);
+ if (esp_get_tc(s) != 0) {
+ /* Copy device data to FIFO */
+ len = MIN(s->async_len, esp_get_tc(s));
+ len = MIN(len, fifo8_num_free(&s->fifo));
+ fifo8_push_all(&s->fifo, s->async_buf, len);
+ s->async_buf += len;
+ s->async_len -= len;
+ s->ti_size -= len;
+ esp_set_tc(s, esp_get_tc(s) - len);
+
+ if (esp_get_tc(s) == 0) {
+ /* Indicate transfer to FIFO is complete */
+ s->rregs[ESP_RSTAT] |= STAT_TC;
+ }
+ return;
+ }
+
+ /* Partially filled a scsi buffer. Complete immediately. */
+ esp_lower_drq(s);
+ esp_dma_done(s);
+ }
}
static void esp_do_dma(ESPState *s)
{
- uint32_t len;
- int to_device;
+ uint32_t len, cmdlen;
+ int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO);
+ uint8_t buf[ESP_CMDFIFO_SZ];
- len = s->dma_left;
+ len = esp_get_tc(s);
if (s->do_cmd) {
/*
* handle_ti_cmd() case: esp_do_dma() is called only from
* handle_ti_cmd() with do_cmd != NULL (see the assert())
*/
- trace_esp_do_dma(s->cmdlen, len);
- assert (s->cmdlen <= sizeof(s->cmdbuf) &&
- len <= sizeof(s->cmdbuf) - s->cmdlen);
+ cmdlen = fifo8_num_used(&s->cmdfifo);
+ trace_esp_do_dma(cmdlen, len);
if (s->dma_memory_read) {
- s->dma_memory_read(s->dma_opaque, &s->cmdbuf[s->cmdlen], len);
+ s->dma_memory_read(s->dma_opaque, buf, len);
+ fifo8_push_all(&s->cmdfifo, buf, len);
} else {
- set_pdma(s, CMD, s->cmdlen, len);
s->pdma_cb = do_dma_pdma_cb;
esp_raise_drq(s);
return;
}
- trace_esp_handle_ti_cmd(s->cmdlen);
+ trace_esp_handle_ti_cmd(cmdlen);
s->ti_size = 0;
- s->cmdlen = 0;
- s->do_cmd = 0;
- do_cmd(s, s->cmdbuf);
+ if ((s->rregs[ESP_RSTAT] & 7) == STAT_CD) {
+ /* No command received */
+ if (s->cmdfifo_cdb_offset == fifo8_num_used(&s->cmdfifo)) {
+ return;
+ }
+
+ /* Command has been received */
+ s->do_cmd = 0;
+ do_cmd(s);
+ } else {
+ /*
+ * Extra message out bytes received: update cmdfifo_cdb_offset
+ * and then switch to commmand phase
+ */
+ s->cmdfifo_cdb_offset = fifo8_num_used(&s->cmdfifo);
+ s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
+ s->rregs[ESP_RSEQ] = SEQ_CD;
+ s->rregs[ESP_RINTR] |= INTR_BS;
+ esp_raise_irq(s);
+ }
return;
}
if (s->async_len == 0) {
@@ -422,12 +611,10 @@ static void esp_do_dma(ESPState *s)
if (len > s->async_len) {
len = s->async_len;
}
- to_device = (s->ti_size < 0);
if (to_device) {
if (s->dma_memory_read) {
s->dma_memory_read(s->dma_opaque, s->async_buf, len);
} else {
- set_pdma(s, ASYNC, 0, len);
s->pdma_cb = do_dma_pdma_cb;
esp_raise_drq(s);
return;
@@ -436,48 +623,145 @@ static void esp_do_dma(ESPState *s)
if (s->dma_memory_write) {
s->dma_memory_write(s->dma_opaque, s->async_buf, len);
} else {
- set_pdma(s, ASYNC, 0, len);
+ /* Adjust TC for any leftover data in the FIFO */
+ if (!fifo8_is_empty(&s->fifo)) {
+ esp_set_tc(s, esp_get_tc(s) - fifo8_num_used(&s->fifo));
+ }
+
+ /* Copy device data to FIFO */
+ len = MIN(len, fifo8_num_free(&s->fifo));
+ fifo8_push_all(&s->fifo, s->async_buf, len);
+ s->async_buf += len;
+ s->async_len -= len;
+ s->ti_size -= len;
+
+ /*
+ * MacOS toolbox uses a TI length of 16 bytes for all commands, so
+ * commands shorter than this must be padded accordingly
+ */
+ if (len < esp_get_tc(s) && esp_get_tc(s) <= ESP_FIFO_SZ) {
+ while (fifo8_num_used(&s->fifo) < ESP_FIFO_SZ) {
+ esp_fifo_push(s, 0);
+ len++;
+ }
+ }
+
+ esp_set_tc(s, esp_get_tc(s) - len);
s->pdma_cb = do_dma_pdma_cb;
esp_raise_drq(s);
+
+ /* Indicate transfer to FIFO is complete */
+ s->rregs[ESP_RSTAT] |= STAT_TC;
return;
}
}
- s->dma_left -= len;
+ esp_set_tc(s, esp_get_tc(s) - len);
s->async_buf += len;
s->async_len -= len;
- if (to_device)
+ if (to_device) {
s->ti_size += len;
- else
+ } else {
s->ti_size -= len;
+ }
if (s->async_len == 0) {
scsi_req_continue(s->current_req);
- /* If there is still data to be read from the device then
- complete the DMA operation immediately. Otherwise defer
- until the scsi layer has completed. */
- if (to_device || s->dma_left != 0 || s->ti_size == 0) {
+ /*
+ * If there is still data to be read from the device then
+ * complete the DMA operation immediately. Otherwise defer
+ * until the scsi layer has completed.
+ */
+ if (to_device || esp_get_tc(s) != 0 || s->ti_size == 0) {
return;
}
}
/* Partially filled a scsi buffer. Complete immediately. */
esp_dma_done(s);
+ esp_lower_drq(s);
}
-static void esp_report_command_complete(ESPState *s, uint32_t status)
+static void esp_do_nodma(ESPState *s)
{
+ int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO);
+ uint32_t cmdlen, n;
+ int len;
+
+ if (s->do_cmd) {
+ cmdlen = fifo8_num_used(&s->cmdfifo);
+ trace_esp_handle_ti_cmd(cmdlen);
+ s->ti_size = 0;
+ if ((s->rregs[ESP_RSTAT] & 7) == STAT_CD) {
+ /* No command received */
+ if (s->cmdfifo_cdb_offset == fifo8_num_used(&s->cmdfifo)) {
+ return;
+ }
+
+ /* Command has been received */
+ s->do_cmd = 0;
+ do_cmd(s);
+ } else {
+ /*
+ * Extra message out bytes received: update cmdfifo_cdb_offset
+ * and then switch to commmand phase
+ */
+ s->cmdfifo_cdb_offset = fifo8_num_used(&s->cmdfifo);
+ s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
+ s->rregs[ESP_RSEQ] = SEQ_CD;
+ s->rregs[ESP_RINTR] |= INTR_BS;
+ esp_raise_irq(s);
+ }
+ return;
+ }
+
+ if (s->async_len == 0) {
+ /* Defer until data is available. */
+ return;
+ }
+
+ if (to_device) {
+ len = MIN(fifo8_num_used(&s->fifo), ESP_FIFO_SZ);
+ memcpy(s->async_buf, fifo8_pop_buf(&s->fifo, len, &n), len);
+ s->async_buf += len;
+ s->async_len -= len;
+ s->ti_size += len;
+ } else {
+ len = MIN(s->ti_size, s->async_len);
+ len = MIN(len, fifo8_num_free(&s->fifo));
+ fifo8_push_all(&s->fifo, s->async_buf, len);
+ s->async_buf += len;
+ s->async_len -= len;
+ s->ti_size -= len;
+ }
+
+ if (s->async_len == 0) {
+ scsi_req_continue(s->current_req);
+
+ if (to_device || s->ti_size == 0) {
+ return;
+ }
+ }
+
+ s->rregs[ESP_RINTR] |= INTR_BS;
+ esp_raise_irq(s);
+}
+
+void esp_command_complete(SCSIRequest *req, size_t resid)
+{
+ ESPState *s = req->hba_private;
+
trace_esp_command_complete();
if (s->ti_size != 0) {
trace_esp_command_complete_unexpected();
}
s->ti_size = 0;
- s->dma_left = 0;
s->async_len = 0;
- if (status) {
+ if (req->status) {
trace_esp_command_complete_fail();
}
- s->status = status;
+ s->status = req->status;
s->rregs[ESP_RSTAT] = STAT_ST;
esp_dma_done(s);
+ esp_lower_drq(s);
if (s->current_req) {
scsi_req_unref(s->current_req);
s->current_req = NULL;
@@ -485,73 +769,83 @@ static void esp_report_command_complete(ESPState *s, uint32_t status)
}
}
-void esp_command_complete(SCSIRequest *req, size_t resid)
+void esp_transfer_data(SCSIRequest *req, uint32_t len)
{
ESPState *s = req->hba_private;
+ int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO);
+ uint32_t dmalen = esp_get_tc(s);
- if (s->rregs[ESP_RSTAT] & STAT_INT) {
- /* Defer handling command complete until the previous
- * interrupt has been handled.
+ assert(!s->do_cmd);
+ trace_esp_transfer_data(dmalen, s->ti_size);
+ s->async_len = len;
+ s->async_buf = scsi_req_get_buf(req);
+
+ if (!to_device && !s->data_in_ready) {
+ /*
+ * Initial incoming data xfer is complete so raise command
+ * completion interrupt
*/
- trace_esp_command_complete_deferred();
- s->deferred_status = req->status;
- s->deferred_complete = true;
+ s->data_in_ready = true;
+ s->rregs[ESP_RSTAT] |= STAT_TC;
+ s->rregs[ESP_RINTR] |= INTR_BS;
+ esp_raise_irq(s);
+
+ /*
+ * If data is ready to transfer and the TI command has already
+ * been executed, start DMA immediately. Otherwise DMA will start
+ * when host sends the TI command
+ */
+ if (s->ti_size && (s->rregs[ESP_CMD] == (CMD_TI | CMD_DMA))) {
+ esp_do_dma(s);
+ }
return;
}
- esp_report_command_complete(s, req->status);
-}
-void esp_transfer_data(SCSIRequest *req, uint32_t len)
-{
- ESPState *s = req->hba_private;
+ if (s->ti_cmd == 0) {
+ /*
+ * Always perform the initial transfer upon reception of the next TI
+ * command to ensure the DMA/non-DMA status of the command is correct.
+ * It is not possible to use s->dma directly in the section below as
+ * some OSs send non-DMA NOP commands after a DMA transfer. Hence if the
+ * async data transfer is delayed then s->dma is set incorrectly.
+ */
+ return;
+ }
- assert(!s->do_cmd);
- trace_esp_transfer_data(s->dma_left, s->ti_size);
- s->async_len = len;
- s->async_buf = scsi_req_get_buf(req);
- if (s->dma_left) {
- esp_do_dma(s);
- } else if (s->dma_counter != 0 && s->ti_size <= 0) {
- /* If this was the last part of a DMA transfer then the
- completion interrupt is deferred to here. */
- esp_dma_done(s);
+ if (s->ti_cmd & CMD_DMA) {
+ if (dmalen) {
+ esp_do_dma(s);
+ } else if (s->ti_size <= 0) {
+ /*
+ * If this was the last part of a DMA transfer then the
+ * completion interrupt is deferred to here.
+ */
+ esp_dma_done(s);
+ esp_lower_drq(s);
+ }
+ } else {
+ esp_do_nodma(s);
}
}
static void handle_ti(ESPState *s)
{
- uint32_t dmalen, minlen;
+ uint32_t dmalen;
if (s->dma && !s->dma_enabled) {
s->dma_cb = handle_ti;
return;
}
- dmalen = s->rregs[ESP_TCLO];
- dmalen |= s->rregs[ESP_TCMID] << 8;
- dmalen |= s->rregs[ESP_TCHI] << 16;
- if (dmalen==0) {
- dmalen=0x10000;
- }
- s->dma_counter = dmalen;
-
- if (s->do_cmd)
- minlen = (dmalen < ESP_CMDBUF_SZ) ? dmalen : ESP_CMDBUF_SZ;
- else if (s->ti_size < 0)
- minlen = (dmalen < -s->ti_size) ? dmalen : -s->ti_size;
- else
- minlen = (dmalen < s->ti_size) ? dmalen : s->ti_size;
- trace_esp_handle_ti(minlen);
+ s->ti_cmd = s->rregs[ESP_CMD];
if (s->dma) {
- s->dma_left = minlen;
+ dmalen = esp_get_tc(s);
+ trace_esp_handle_ti(dmalen);
s->rregs[ESP_RSTAT] &= ~STAT_TC;
esp_do_dma(s);
- } else if (s->do_cmd) {
- trace_esp_handle_ti_cmd(s->cmdlen);
- s->ti_size = 0;
- s->cmdlen = 0;
- s->do_cmd = 0;
- do_cmd(s, s->cmdbuf);
+ } else {
+ trace_esp_handle_ti(s->ti_size);
+ esp_do_nodma(s);
}
}
@@ -561,8 +855,8 @@ void esp_hard_reset(ESPState *s)
memset(s->wregs, 0, ESP_REGS);
s->tchi_written = 0;
s->ti_size = 0;
- s->ti_rptr = 0;
- s->ti_wptr = 0;
+ fifo8_reset(&s->fifo);
+ fifo8_reset(&s->cmdfifo);
s->dma = 0;
s->do_cmd = 0;
s->dma_cb = NULL;
@@ -586,46 +880,50 @@ static void parent_esp_reset(ESPState *s, int irq, int level)
uint64_t esp_reg_read(ESPState *s, uint32_t saddr)
{
- uint32_t old_val;
+ uint32_t val;
- trace_esp_mem_readb(saddr, s->rregs[saddr]);
switch (saddr) {
case ESP_FIFO:
- if ((s->rregs[ESP_RSTAT] & STAT_PIO_MASK) == 0) {
+ if (s->dma_memory_read && s->dma_memory_write &&
+ (s->rregs[ESP_RSTAT] & STAT_PIO_MASK) == 0) {
/* Data out. */
qemu_log_mask(LOG_UNIMP, "esp: PIO data read not implemented\n");
s->rregs[ESP_FIFO] = 0;
- } else if (s->ti_rptr < s->ti_wptr) {
- s->ti_size--;
- s->rregs[ESP_FIFO] = s->ti_buf[s->ti_rptr++];
- }
- if (s->ti_rptr == s->ti_wptr) {
- s->ti_rptr = 0;
- s->ti_wptr = 0;
+ } else {
+ s->rregs[ESP_FIFO] = esp_fifo_pop(s);
}
+ val = s->rregs[ESP_FIFO];
break;
case ESP_RINTR:
- /* Clear sequence step, interrupt register and all status bits
- except TC */
- old_val = s->rregs[ESP_RINTR];
+ /*
+ * Clear sequence step, interrupt register and all status bits
+ * except TC
+ */
+ val = s->rregs[ESP_RINTR];
s->rregs[ESP_RINTR] = 0;
s->rregs[ESP_RSTAT] &= ~STAT_TC;
- s->rregs[ESP_RSEQ] = SEQ_CD;
+ s->rregs[ESP_RSEQ] = SEQ_0;
esp_lower_irq(s);
- if (s->deferred_complete) {
- esp_report_command_complete(s, s->deferred_status);
- s->deferred_complete = false;
- }
- return old_val;
+ break;
case ESP_TCHI:
/* Return the unique id if the value has never been written */
if (!s->tchi_written) {
- return s->chip_id;
+ val = s->chip_id;
+ } else {
+ val = s->rregs[saddr];
}
+ break;
+ case ESP_RFLAGS:
+ /* Bottom 5 bits indicate number of bytes in FIFO */
+ val = fifo8_num_used(&s->fifo);
+ break;
default:
+ val = s->rregs[saddr];
break;
}
- return s->rregs[saddr];
+
+ trace_esp_mem_readb(saddr, val);
+ return val;
}
void esp_reg_write(ESPState *s, uint32_t saddr, uint64_t val)
@@ -641,16 +939,15 @@ void esp_reg_write(ESPState *s, uint32_t saddr, uint64_t val)
break;
case ESP_FIFO:
if (s->do_cmd) {
- if (s->cmdlen < ESP_CMDBUF_SZ) {
- s->cmdbuf[s->cmdlen++] = val & 0xff;
- } else {
- trace_esp_error_fifo_overrun();
- }
- } else if (s->ti_wptr == TI_BUFSZ - 1) {
- trace_esp_error_fifo_overrun();
+ esp_cmdfifo_push(s, val);
} else {
- s->ti_size++;
- s->ti_buf[s->ti_wptr++] = val & 0xff;
+ esp_fifo_push(s, val);
+ }
+
+ /* Non-DMA transfers raise an interrupt after every byte */
+ if (s->rregs[ESP_CMD] == CMD_TI) {
+ s->rregs[ESP_RINTR] |= INTR_FC | INTR_BS;
+ esp_raise_irq(s);
}
break;
case ESP_CMD:
@@ -658,22 +955,21 @@ void esp_reg_write(ESPState *s, uint32_t saddr, uint64_t val)
if (val & CMD_DMA) {
s->dma = 1;
/* Reload DMA counter. */
- s->rregs[ESP_TCLO] = s->wregs[ESP_TCLO];
- s->rregs[ESP_TCMID] = s->wregs[ESP_TCMID];
- s->rregs[ESP_TCHI] = s->wregs[ESP_TCHI];
+ if (esp_get_stc(s) == 0) {
+ esp_set_tc(s, 0x10000);
+ } else {
+ esp_set_tc(s, esp_get_stc(s));
+ }
} else {
s->dma = 0;
}
- switch(val & CMD_CMD) {
+ switch (val & CMD_CMD) {
case CMD_NOP:
trace_esp_mem_writeb_cmd_nop(val);
break;
case CMD_FLUSH:
trace_esp_mem_writeb_cmd_flush(val);
- //s->ti_size = 0;
- s->rregs[ESP_RINTR] = INTR_FC;
- s->rregs[ESP_RSEQ] = 0;
- s->rregs[ESP_RFLAGS] = 0;
+ fifo8_reset(&s->fifo);
break;
case CMD_RESET:
trace_esp_mem_writeb_cmd_reset(val);
@@ -681,23 +977,24 @@ void esp_reg_write(ESPState *s, uint32_t saddr, uint64_t val)
break;
case CMD_BUSRESET:
trace_esp_mem_writeb_cmd_bus_reset(val);
- s->rregs[ESP_RINTR] = INTR_RST;
if (!(s->wregs[ESP_CFG1] & CFG1_RESREPT)) {
+ s->rregs[ESP_RINTR] |= INTR_RST;
esp_raise_irq(s);
}
break;
case CMD_TI:
+ trace_esp_mem_writeb_cmd_ti(val);
handle_ti(s);
break;
case CMD_ICCS:
trace_esp_mem_writeb_cmd_iccs(val);
write_response(s);
- s->rregs[ESP_RINTR] = INTR_FC;
+ s->rregs[ESP_RINTR] |= INTR_FC;
s->rregs[ESP_RSTAT] |= STAT_MI;
break;
case CMD_MSGACC:
trace_esp_mem_writeb_cmd_msgacc(val);
- s->rregs[ESP_RINTR] = INTR_DC;
+ s->rregs[ESP_RINTR] |= INTR_DC;
s->rregs[ESP_RSEQ] = 0;
s->rregs[ESP_RFLAGS] = 0;
esp_raise_irq(s);
@@ -705,7 +1002,7 @@ void esp_reg_write(ESPState *s, uint32_t saddr, uint64_t val)
case CMD_PAD:
trace_esp_mem_writeb_cmd_pad(val);
s->rregs[ESP_RSTAT] = STAT_TC;
- s->rregs[ESP_RINTR] = INTR_FC;
+ s->rregs[ESP_RINTR] |= INTR_FC;
s->rregs[ESP_RSEQ] = 0;
break;
case CMD_SATN:
@@ -763,74 +1060,112 @@ static bool esp_mem_accepts(void *opaque, hwaddr addr,
return (size == 1) || (is_write && size == 4);
}
-static bool esp_pdma_needed(void *opaque)
+static bool esp_is_before_version_5(void *opaque, int version_id)
{
- ESPState *s = opaque;
- return s->dma_memory_read == NULL && s->dma_memory_write == NULL &&
- s->dma_enabled;
+ ESPState *s = ESP(opaque);
+
+ version_id = MIN(version_id, s->mig_version_id);
+ return version_id < 5;
}
-static const VMStateDescription vmstate_esp_pdma = {
- .name = "esp/pdma",
- .version_id = 1,
- .minimum_version_id = 1,
- .needed = esp_pdma_needed,
- .fields = (VMStateField[]) {
- VMSTATE_BUFFER(pdma_buf, ESPState),
- VMSTATE_INT32(pdma_origin, ESPState),
- VMSTATE_UINT32(pdma_len, ESPState),
- VMSTATE_UINT32(pdma_start, ESPState),
- VMSTATE_UINT32(pdma_cur, ESPState),
- VMSTATE_END_OF_LIST()
+static bool esp_is_version_5(void *opaque, int version_id)
+{
+ ESPState *s = ESP(opaque);
+
+ version_id = MIN(version_id, s->mig_version_id);
+ return version_id == 5;
+}
+
+static int esp_pre_save(void *opaque)
+{
+ ESPState *s = ESP(opaque);
+
+ s->mig_version_id = vmstate_esp.version_id;
+ return 0;
+}
+
+static int esp_post_load(void *opaque, int version_id)
+{
+ ESPState *s = ESP(opaque);
+ int len, i;
+
+ version_id = MIN(version_id, s->mig_version_id);
+
+ if (version_id < 5) {
+ esp_set_tc(s, s->mig_dma_left);
+
+ /* Migrate ti_buf to fifo */
+ len = s->mig_ti_wptr - s->mig_ti_rptr;
+ for (i = 0; i < len; i++) {
+ fifo8_push(&s->fifo, s->mig_ti_buf[i]);
+ }
+
+ /* Migrate cmdbuf to cmdfifo */
+ for (i = 0; i < s->mig_cmdlen; i++) {
+ fifo8_push(&s->cmdfifo, s->mig_cmdbuf[i]);
+ }
}
-};
+
+ s->mig_version_id = vmstate_esp.version_id;
+ return 0;
+}
const VMStateDescription vmstate_esp = {
- .name ="esp",
- .version_id = 4,
+ .name = "esp",
+ .version_id = 5,
.minimum_version_id = 3,
+ .pre_save = esp_pre_save,
+ .post_load = esp_post_load,
.fields = (VMStateField[]) {
VMSTATE_BUFFER(rregs, ESPState),
VMSTATE_BUFFER(wregs, ESPState),
VMSTATE_INT32(ti_size, ESPState),
- VMSTATE_UINT32(ti_rptr, ESPState),
- VMSTATE_UINT32(ti_wptr, ESPState),
- VMSTATE_BUFFER(ti_buf, ESPState),
+ VMSTATE_UINT32_TEST(mig_ti_rptr, ESPState, esp_is_before_version_5),
+ VMSTATE_UINT32_TEST(mig_ti_wptr, ESPState, esp_is_before_version_5),
+ VMSTATE_BUFFER_TEST(mig_ti_buf, ESPState, esp_is_before_version_5),
VMSTATE_UINT32(status, ESPState),
- VMSTATE_UINT32(deferred_status, ESPState),
- VMSTATE_BOOL(deferred_complete, ESPState),
+ VMSTATE_UINT32_TEST(mig_deferred_status, ESPState,
+ esp_is_before_version_5),
+ VMSTATE_BOOL_TEST(mig_deferred_complete, ESPState,
+ esp_is_before_version_5),
VMSTATE_UINT32(dma, ESPState),
- VMSTATE_PARTIAL_BUFFER(cmdbuf, ESPState, 16),
- VMSTATE_BUFFER_START_MIDDLE_V(cmdbuf, ESPState, 16, 4),
- VMSTATE_UINT32(cmdlen, ESPState),
+ VMSTATE_STATIC_BUFFER(mig_cmdbuf, ESPState, 0,
+ esp_is_before_version_5, 0, 16),
+ VMSTATE_STATIC_BUFFER(mig_cmdbuf, ESPState, 4,
+ esp_is_before_version_5, 16,
+ sizeof(typeof_field(ESPState, mig_cmdbuf))),
+ VMSTATE_UINT32_TEST(mig_cmdlen, ESPState, esp_is_before_version_5),
VMSTATE_UINT32(do_cmd, ESPState),
- VMSTATE_UINT32(dma_left, ESPState),
+ VMSTATE_UINT32_TEST(mig_dma_left, ESPState, esp_is_before_version_5),
+ VMSTATE_BOOL_TEST(data_in_ready, ESPState, esp_is_version_5),
+ VMSTATE_UINT8_TEST(cmdfifo_cdb_offset, ESPState, esp_is_version_5),
+ VMSTATE_FIFO8_TEST(fifo, ESPState, esp_is_version_5),
+ VMSTATE_FIFO8_TEST(cmdfifo, ESPState, esp_is_version_5),
+ VMSTATE_UINT8_TEST(ti_cmd, ESPState, esp_is_version_5),
VMSTATE_END_OF_LIST()
},
- .subsections = (const VMStateDescription * []) {
- &vmstate_esp_pdma,
- NULL
- }
};
static void sysbus_esp_mem_write(void *opaque, hwaddr addr,
uint64_t val, unsigned int size)
{
SysBusESPState *sysbus = opaque;
+ ESPState *s = ESP(&sysbus->esp);
uint32_t saddr;
saddr = addr >> sysbus->it_shift;
- esp_reg_write(&sysbus->esp, saddr, val);
+ esp_reg_write(s, saddr, val);
}
static uint64_t sysbus_esp_mem_read(void *opaque, hwaddr addr,
unsigned int size)
{
SysBusESPState *sysbus = opaque;
+ ESPState *s = ESP(&sysbus->esp);
uint32_t saddr;
saddr = addr >> sysbus->it_shift;
- return esp_reg_read(&sysbus->esp, saddr);
+ return esp_reg_read(s, saddr);
}
static const MemoryRegionOps sysbus_esp_mem_ops = {
@@ -844,36 +1179,23 @@ static void sysbus_esp_pdma_write(void *opaque, hwaddr addr,
uint64_t val, unsigned int size)
{
SysBusESPState *sysbus = opaque;
- ESPState *s = &sysbus->esp;
+ ESPState *s = ESP(&sysbus->esp);
uint32_t dmalen;
- uint8_t *buf = get_pdma_buf(s);
- dmalen = s->rregs[ESP_TCLO];
- dmalen |= s->rregs[ESP_TCMID] << 8;
- dmalen |= s->rregs[ESP_TCHI] << 16;
- if (dmalen == 0 || s->pdma_len == 0) {
- return;
- }
+ trace_esp_pdma_write(size);
+
switch (size) {
case 1:
- buf[s->pdma_cur++] = val;
- s->pdma_len--;
- dmalen--;
+ esp_pdma_write(s, val);
break;
case 2:
- buf[s->pdma_cur++] = val >> 8;
- buf[s->pdma_cur++] = val;
- s->pdma_len -= 2;
- dmalen -= 2;
+ esp_pdma_write(s, val >> 8);
+ esp_pdma_write(s, val);
break;
}
- s->rregs[ESP_TCLO] = dmalen & 0xff;
- s->rregs[ESP_TCMID] = dmalen >> 8;
- s->rregs[ESP_TCHI] = dmalen >> 16;
- if (s->pdma_len == 0 && s->pdma_cb) {
- esp_lower_drq(s);
+ dmalen = esp_get_tc(s);
+ if (dmalen == 0 || fifo8_num_free(&s->fifo) < 2) {
s->pdma_cb(s);
- s->pdma_cb = NULL;
}
}
@@ -881,29 +1203,22 @@ static uint64_t sysbus_esp_pdma_read(void *opaque, hwaddr addr,
unsigned int size)
{
SysBusESPState *sysbus = opaque;
- ESPState *s = &sysbus->esp;
- uint8_t *buf = get_pdma_buf(s);
+ ESPState *s = ESP(&sysbus->esp);
uint64_t val = 0;
- if (s->pdma_len == 0) {
- return 0;
- }
+ trace_esp_pdma_read(size);
+
switch (size) {
case 1:
- val = buf[s->pdma_cur++];
- s->pdma_len--;
+ val = esp_pdma_read(s);
break;
case 2:
- val = buf[s->pdma_cur++];
- val = (val << 8) | buf[s->pdma_cur++];
- s->pdma_len -= 2;
+ val = esp_pdma_read(s);
+ val = (val << 8) | esp_pdma_read(s);
break;
}
-
- if (s->pdma_len == 0 && s->pdma_cb) {
- esp_lower_drq(s);
+ if (fifo8_num_used(&s->fifo) < 2) {
s->pdma_cb(s);
- s->pdma_cb = NULL;
}
return val;
}
@@ -913,7 +1228,9 @@ static const MemoryRegionOps sysbus_esp_pdma_ops = {
.write = sysbus_esp_pdma_write,
.endianness = DEVICE_NATIVE_ENDIAN,
.valid.min_access_size = 1,
- .valid.max_access_size = 2,
+ .valid.max_access_size = 4,
+ .impl.min_access_size = 1,
+ .impl.max_access_size = 2,
};
static const struct SCSIBusInfo esp_scsi_info = {
@@ -928,8 +1245,8 @@ static const struct SCSIBusInfo esp_scsi_info = {
static void sysbus_esp_gpio_demux(void *opaque, int irq, int level)
{
- SysBusESPState *sysbus = ESP(opaque);
- ESPState *s = &sysbus->esp;
+ SysBusESPState *sysbus = SYSBUS_ESP(opaque);
+ ESPState *s = ESP(&sysbus->esp);
switch (irq) {
case 0:
@@ -944,8 +1261,12 @@ static void sysbus_esp_gpio_demux(void *opaque, int irq, int level)
static void sysbus_esp_realize(DeviceState *dev, Error **errp)
{
SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
- SysBusESPState *sysbus = ESP(dev);
- ESPState *s = &sysbus->esp;
+ SysBusESPState *sysbus = SYSBUS_ESP(dev);
+ ESPState *s = ESP(&sysbus->esp);
+
+ if (!qdev_realize(DEVICE(s), NULL, errp)) {
+ return;
+ }
sysbus_init_irq(sbd, &s->irq);
sysbus_init_irq(sbd, &s->irq_data);
@@ -956,7 +1277,7 @@ static void sysbus_esp_realize(DeviceState *dev, Error **errp)
sysbus, "esp-regs", ESP_REGS << sysbus->it_shift);
sysbus_init_mmio(sbd, &sysbus->iomem);
memory_region_init_io(&sysbus->pdma, OBJECT(sysbus), &sysbus_esp_pdma_ops,
- sysbus, "esp-pdma", 2);
+ sysbus, "esp-pdma", 4);
sysbus_init_mmio(sbd, &sysbus->pdma);
qdev_init_gpio_in(dev, sysbus_esp_gpio_demux, 2);
@@ -966,15 +1287,25 @@ static void sysbus_esp_realize(DeviceState *dev, Error **errp)
static void sysbus_esp_hard_reset(DeviceState *dev)
{
- SysBusESPState *sysbus = ESP(dev);
- esp_hard_reset(&sysbus->esp);
+ SysBusESPState *sysbus = SYSBUS_ESP(dev);
+ ESPState *s = ESP(&sysbus->esp);
+
+ esp_hard_reset(s);
+}
+
+static void sysbus_esp_init(Object *obj)
+{
+ SysBusESPState *sysbus = SYSBUS_ESP(obj);
+
+ object_initialize_child(obj, "esp", &sysbus->esp, TYPE_ESP);
}
static const VMStateDescription vmstate_sysbus_esp_scsi = {
.name = "sysbusespscsi",
- .version_id = 1,
+ .version_id = 2,
.minimum_version_id = 1,
.fields = (VMStateField[]) {
+ VMSTATE_UINT8_V(esp.mig_version_id, SysBusESPState, 2),
VMSTATE_STRUCT(esp, SysBusESPState, 0, vmstate_esp, ESPState),
VMSTATE_END_OF_LIST()
}
@@ -991,15 +1322,51 @@ static void sysbus_esp_class_init(ObjectClass *klass, void *data)
}
static const TypeInfo sysbus_esp_info = {
- .name = TYPE_ESP,
+ .name = TYPE_SYSBUS_ESP,
.parent = TYPE_SYS_BUS_DEVICE,
+ .instance_init = sysbus_esp_init,
.instance_size = sizeof(SysBusESPState),
.class_init = sysbus_esp_class_init,
};
+static void esp_finalize(Object *obj)
+{
+ ESPState *s = ESP(obj);
+
+ fifo8_destroy(&s->fifo);
+ fifo8_destroy(&s->cmdfifo);
+}
+
+static void esp_init(Object *obj)
+{
+ ESPState *s = ESP(obj);
+
+ fifo8_create(&s->fifo, ESP_FIFO_SZ);
+ fifo8_create(&s->cmdfifo, ESP_CMDFIFO_SZ);
+}
+
+static void esp_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ /* internal device for sysbusesp/pciespscsi, not user-creatable */
+ dc->user_creatable = false;
+ set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
+}
+
+static const TypeInfo esp_info = {
+ .name = TYPE_ESP,
+ .parent = TYPE_DEVICE,
+ .instance_init = esp_init,
+ .instance_finalize = esp_finalize,
+ .instance_size = sizeof(ESPState),
+ .class_init = esp_class_init,
+};
+
static void esp_register_types(void)
{
type_register_static(&sysbus_esp_info);
+ type_register_static(&esp_info);
}
type_init(esp_register_types)
diff --git a/hw/scsi/trace-events b/hw/scsi/trace-events
index 9788661bfd..1c331fb189 100644
--- a/hw/scsi/trace-events
+++ b/hw/scsi/trace-events
@@ -159,8 +159,12 @@ esp_error_unhandled_command(uint32_t val) "unhandled command (0x%2.2x)"
esp_error_invalid_write(uint32_t val, uint32_t addr) "invalid write of 0x%02x at [0x%x]"
esp_raise_irq(void) "Raise IRQ"
esp_lower_irq(void) "Lower IRQ"
+esp_raise_drq(void) "Raise DREQ"
+esp_lower_drq(void) "Lower DREQ"
esp_dma_enable(void) "Raise enable"
esp_dma_disable(void) "Lower enable"
+esp_pdma_read(int size) "pDMA read %u bytes"
+esp_pdma_write(int size) "pDMA write %u bytes"
esp_get_cmd(uint32_t dmalen, int target) "len %d target %d"
esp_do_busid_cmd(uint8_t busid) "busid 0x%x"
esp_handle_satn_stop(uint32_t cmdlen) "cmdlen %d"
@@ -189,6 +193,7 @@ esp_mem_writeb_cmd_selatn(uint32_t val) "Select with ATN (0x%2.2x)"
esp_mem_writeb_cmd_selatns(uint32_t val) "Select with ATN & stop (0x%2.2x)"
esp_mem_writeb_cmd_ensel(uint32_t val) "Enable selection (0x%2.2x)"
esp_mem_writeb_cmd_dissel(uint32_t val) "Disable selection (0x%2.2x)"
+esp_mem_writeb_cmd_ti(uint32_t val) "Transfer Information (0x%2.2x)"
# esp-pci.c
esp_pci_error_invalid_dma_direction(void) "invalid DMA transfer direction"
diff --git a/hw/sh4/Kconfig b/hw/sh4/Kconfig
index 4cbce3a0ed..ab733a3f76 100644
--- a/hw/sh4/Kconfig
+++ b/hw/sh4/Kconfig
@@ -9,16 +9,16 @@ config R2D
select USB_OHCI_PCI
select PCI
select SM501
- select SH4
+ select SH7750
+ select SH_PCI
config SHIX
bool
select SH7750
- select SH4
+ select TC58128
config SH7750
bool
-
-config SH4
- bool
- select PTIMER
+ select SH_INTC
+ select SH_SCI
+ select SH_TIMER
diff --git a/hw/sh4/meson.build b/hw/sh4/meson.build
index 303c0f4287..424d5674de 100644
--- a/hw/sh4/meson.build
+++ b/hw/sh4/meson.build
@@ -2,7 +2,6 @@ sh4_ss = ss.source_set()
sh4_ss.add(files(
'sh7750.c',
'sh7750_regnames.c',
- 'sh_pci.c'
))
sh4_ss.add(when: 'CONFIG_R2D', if_true: files('r2d.c'))
sh4_ss.add(when: 'CONFIG_SHIX', if_true: files('shix.c'))
diff --git a/hw/sh4/sh7750_regs.h b/hw/sh4/sh7750_regs.h
index 3e4554af31..ab073dadc7 100644
--- a/hw/sh4/sh7750_regs.h
+++ b/hw/sh4/sh7750_regs.h
@@ -10,8 +10,28 @@
* Victor V. Vengerov <vvv@oktet.ru>
*
* The license and distribution terms for this file may be
- * found in the file LICENSE in this distribution or at
- * http://www.rtems.com/license/LICENSE.
+ * found in this file hereafter or at http://www.rtems.com/license/LICENSE.
+ *
+ * LICENSE INFORMATION
+ *
+ * RTEMS is free software; you can redistribute it and/or modify it under
+ * terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version. RTEMS is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details. You should have received
+ * a copy of the GNU General Public License along with RTEMS; see
+ * file COPYING. If not, write to the Free Software Foundation, 675
+ * Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * As a special exception, including RTEMS header files in a file,
+ * instantiating RTEMS generics or templates, or linking other files
+ * with RTEMS objects to produce an executable application, does not
+ * by itself cause the resulting executable application to be covered
+ * by the GNU General Public License. This exception does not
+ * however invalidate any other reasons why the executable file might be
+ * covered by the GNU Public License.
*
* @(#) sh7750_regs.h,v 1.2.4.1 2003/09/04 18:46:00 joel Exp
*/
diff --git a/hw/sparc/sun4m.c b/hw/sparc/sun4m.c
index 38ca1e33c7..312e2afaf9 100644
--- a/hw/sparc/sun4m.c
+++ b/hw/sparc/sun4m.c
@@ -334,7 +334,7 @@ static void *sparc32_dma_init(hwaddr dma_base,
OBJECT(dma), "espdma"));
sysbus_connect_irq(SYS_BUS_DEVICE(espdma), 0, espdma_irq);
- esp = ESP(object_resolve_path_component(OBJECT(espdma), "esp"));
+ esp = SYSBUS_ESP(object_resolve_path_component(OBJECT(espdma), "esp"));
ledma = SPARC32_LEDMA_DEVICE(object_resolve_path_component(
OBJECT(dma), "ledma"));
diff --git a/hw/ssi/xilinx_spips.c b/hw/ssi/xilinx_spips.c
index a897034601..1e9dba2039 100644
--- a/hw/ssi/xilinx_spips.c
+++ b/hw/ssi/xilinx_spips.c
@@ -176,7 +176,8 @@
FIELD(GQSPI_FIFO_CTRL, GENERIC_FIFO_RESET, 0, 1)
#define R_GQSPI_GFIFO_THRESH (0x150 / 4)
#define R_GQSPI_DATA_STS (0x15c / 4)
-/* We use the snapshot register to hold the core state for the currently
+/*
+ * We use the snapshot register to hold the core state for the currently
* or most recently executed command. So the generic fifo format is defined
* for the snapshot register
*/
@@ -194,13 +195,6 @@
#define R_GQSPI_MOD_ID (0x1fc / 4)
#define R_GQSPI_MOD_ID_RESET (0x10a0000)
-#define R_QSPIDMA_DST_CTRL (0x80c / 4)
-#define R_QSPIDMA_DST_CTRL_RESET (0x803ffa00)
-#define R_QSPIDMA_DST_I_MASK (0x820 / 4)
-#define R_QSPIDMA_DST_I_MASK_RESET (0xfe)
-#define R_QSPIDMA_DST_CTRL2 (0x824 / 4)
-#define R_QSPIDMA_DST_CTRL2_RESET (0x081bfff8)
-
/* size of TXRX FIFOs */
#define RXFF_A (128)
#define TXFF_A (128)
@@ -416,15 +410,13 @@ static void xlnx_zynqmp_qspips_reset(DeviceState *d)
s->regs[R_GQSPI_GPIO] = 1;
s->regs[R_GQSPI_LPBK_DLY_ADJ] = R_GQSPI_LPBK_DLY_ADJ_RESET;
s->regs[R_GQSPI_MOD_ID] = R_GQSPI_MOD_ID_RESET;
- s->regs[R_QSPIDMA_DST_CTRL] = R_QSPIDMA_DST_CTRL_RESET;
- s->regs[R_QSPIDMA_DST_I_MASK] = R_QSPIDMA_DST_I_MASK_RESET;
- s->regs[R_QSPIDMA_DST_CTRL2] = R_QSPIDMA_DST_CTRL2_RESET;
s->man_start_com_g = false;
s->gqspi_irqline = 0;
xlnx_zynqmp_qspips_update_ixr(s);
}
-/* N way (num) in place bit striper. Lay out row wise bits (MSB to LSB)
+/*
+ * N way (num) in place bit striper. Lay out row wise bits (MSB to LSB)
* column wise (from element 0 to N-1). num is the length of x, and dir
* reverses the direction of the transform. Best illustrated by example:
* Each digit in the below array is a single bit (num == 3):
@@ -637,8 +629,10 @@ static void xilinx_spips_flush_txfifo(XilinxSPIPS *s)
tx_rx[i] = tx;
}
} else {
- /* Extract a dummy byte and generate dummy cycles according to the
- * link state */
+ /*
+ * Extract a dummy byte and generate dummy cycles according to the
+ * link state
+ */
tx = fifo8_pop(&s->tx_fifo);
dummy_cycles = 8 / s->link_state;
}
@@ -721,8 +715,9 @@ static void xilinx_spips_flush_txfifo(XilinxSPIPS *s)
}
break;
case (SNOOP_ADDR):
- /* Address has been transmitted, transmit dummy cycles now if
- * needed */
+ /*
+ * Address has been transmitted, transmit dummy cycles now if needed
+ */
if (s->cmd_dummies < 0) {
s->snoop_state = SNOOP_NONE;
} else {
@@ -876,7 +871,7 @@ static void xlnx_zynqmp_qspips_notify(void *opaque)
}
static uint64_t xilinx_spips_read(void *opaque, hwaddr addr,
- unsigned size)
+ unsigned size)
{
XilinxSPIPS *s = opaque;
uint32_t mask = ~0;
@@ -970,7 +965,7 @@ static uint64_t xlnx_zynqmp_qspips_read(void *opaque,
}
static void xilinx_spips_write(void *opaque, hwaddr addr,
- uint64_t value, unsigned size)
+ uint64_t value, unsigned size)
{
int mask = ~0;
XilinxSPIPS *s = opaque;
@@ -1072,7 +1067,7 @@ static void xilinx_qspips_write(void *opaque, hwaddr addr,
}
static void xlnx_zynqmp_qspips_write(void *opaque, hwaddr addr,
- uint64_t value, unsigned size)
+ uint64_t value, unsigned size)
{
XlnxZynqMPQSPIPS *s = XLNX_ZYNQMP_QSPIPS(opaque);
uint32_t reg = addr / 4;
diff --git a/hw/timer/Kconfig b/hw/timer/Kconfig
index 8749edfb6a..bac2511715 100644
--- a/hw/timer/Kconfig
+++ b/hw/timer/Kconfig
@@ -36,11 +36,21 @@ config CMSDK_APB_DUALTIMER
bool
select PTIMER
+config SH_TIMER
+ bool
+ select PTIMER
+
config RENESAS_TMR
bool
config RENESAS_CMT
bool
+config SSE_COUNTER
+ bool
+
+config SSE_TIMER
+ bool
+
config AVR_TIMER16
bool
diff --git a/hw/timer/cmsdk-apb-dualtimer.c b/hw/timer/cmsdk-apb-dualtimer.c
index ef49f5852d..d4a509c798 100644
--- a/hw/timer/cmsdk-apb-dualtimer.c
+++ b/hw/timer/cmsdk-apb-dualtimer.c
@@ -449,7 +449,7 @@ static void cmsdk_apb_dualtimer_reset(DeviceState *dev)
s->timeritop = 0;
}
-static void cmsdk_apb_dualtimer_clk_update(void *opaque)
+static void cmsdk_apb_dualtimer_clk_update(void *opaque, ClockEvent event)
{
CMSDKAPBDualTimer *s = CMSDK_APB_DUALTIMER(opaque);
int i;
@@ -478,7 +478,8 @@ static void cmsdk_apb_dualtimer_init(Object *obj)
sysbus_init_irq(sbd, &s->timermod[i].timerint);
}
s->timclk = qdev_init_clock_in(DEVICE(s), "TIMCLK",
- cmsdk_apb_dualtimer_clk_update, s);
+ cmsdk_apb_dualtimer_clk_update, s,
+ ClockUpdate);
}
static void cmsdk_apb_dualtimer_realize(DeviceState *dev, Error **errp)
diff --git a/hw/timer/cmsdk-apb-timer.c b/hw/timer/cmsdk-apb-timer.c
index ee51ce3369..68aa1a7636 100644
--- a/hw/timer/cmsdk-apb-timer.c
+++ b/hw/timer/cmsdk-apb-timer.c
@@ -204,7 +204,7 @@ static void cmsdk_apb_timer_reset(DeviceState *dev)
ptimer_transaction_commit(s->timer);
}
-static void cmsdk_apb_timer_clk_update(void *opaque)
+static void cmsdk_apb_timer_clk_update(void *opaque, ClockEvent event)
{
CMSDKAPBTimer *s = CMSDK_APB_TIMER(opaque);
@@ -223,7 +223,7 @@ static void cmsdk_apb_timer_init(Object *obj)
sysbus_init_mmio(sbd, &s->iomem);
sysbus_init_irq(sbd, &s->timerint);
s->pclk = qdev_init_clock_in(DEVICE(s), "pclk",
- cmsdk_apb_timer_clk_update, s);
+ cmsdk_apb_timer_clk_update, s, ClockUpdate);
}
static void cmsdk_apb_timer_realize(DeviceState *dev, Error **errp)
diff --git a/hw/timer/meson.build b/hw/timer/meson.build
index be343f68fe..a429792b08 100644
--- a/hw/timer/meson.build
+++ b/hw/timer/meson.build
@@ -30,8 +30,10 @@ softmmu_ss.add(when: 'CONFIG_OMAP', if_true: files('omap_synctimer.c'))
softmmu_ss.add(when: 'CONFIG_PUV3', if_true: files('puv3_ost.c'))
softmmu_ss.add(when: 'CONFIG_PXA2XX', if_true: files('pxa2xx_timer.c'))
softmmu_ss.add(when: 'CONFIG_RASPI', if_true: files('bcm2835_systmr.c'))
-softmmu_ss.add(when: 'CONFIG_SH4', if_true: files('sh_timer.c'))
+softmmu_ss.add(when: 'CONFIG_SH_TIMER', if_true: files('sh_timer.c'))
softmmu_ss.add(when: 'CONFIG_SLAVIO', if_true: files('slavio_timer.c'))
+softmmu_ss.add(when: 'CONFIG_SSE_COUNTER', if_true: files('sse-counter.c'))
+softmmu_ss.add(when: 'CONFIG_SSE_TIMER', if_true: files('sse-timer.c'))
softmmu_ss.add(when: 'CONFIG_STM32F2XX_TIMER', if_true: files('stm32f2xx_timer.c'))
softmmu_ss.add(when: 'CONFIG_XILINX', if_true: files('xilinx_timer.c'))
diff --git a/hw/timer/npcm7xx_timer.c b/hw/timer/npcm7xx_timer.c
index 36e2c07db2..32f5e021f8 100644
--- a/hw/timer/npcm7xx_timer.c
+++ b/hw/timer/npcm7xx_timer.c
@@ -138,8 +138,8 @@ static int64_t npcm7xx_timer_count_to_ns(NPCM7xxTimer *t, uint32_t count)
/* Convert a time interval in nanoseconds to a timer cycle count. */
static uint32_t npcm7xx_timer_ns_to_count(NPCM7xxTimer *t, int64_t ns)
{
- return ns / clock_ticks_to_ns(t->ctrl->clock,
- npcm7xx_tcsr_prescaler(t->tcsr));
+ return clock_ns_to_ticks(t->ctrl->clock, ns) /
+ npcm7xx_tcsr_prescaler(t->tcsr);
}
static uint32_t npcm7xx_watchdog_timer_prescaler(const NPCM7xxWatchdogTimer *t)
@@ -627,7 +627,7 @@ static void npcm7xx_timer_init(Object *obj)
sysbus_init_mmio(sbd, &s->iomem);
qdev_init_gpio_out_named(dev, &w->reset_signal,
NPCM7XX_WATCHDOG_RESET_GPIO_OUT, 1);
- s->clock = qdev_init_clock_in(dev, "clock", NULL, NULL);
+ s->clock = qdev_init_clock_in(dev, "clock", NULL, NULL, 0);
}
static const VMStateDescription vmstate_npcm7xx_base_timer = {
diff --git a/hw/timer/renesas_tmr.c b/hw/timer/renesas_tmr.c
index e03a8155b2..eed39917fe 100644
--- a/hw/timer/renesas_tmr.c
+++ b/hw/timer/renesas_tmr.c
@@ -46,8 +46,10 @@ REG8(TCCR, 10)
FIELD(TCCR, CSS, 3, 2)
FIELD(TCCR, TMRIS, 7, 1)
-#define INTERNAL 0x01
-#define CASCADING 0x03
+#define CSS_EXTERNAL 0x00
+#define CSS_INTERNAL 0x01
+#define CSS_INVALID 0x02
+#define CSS_CASCADING 0x03
#define CCLR_A 0x01
#define CCLR_B 0x02
@@ -72,7 +74,7 @@ static void update_events(RTMRState *tmr, int ch)
/* event not happened */
return ;
}
- if (FIELD_EX8(tmr->tccr[0], TCCR, CSS) == CASCADING) {
+ if (FIELD_EX8(tmr->tccr[0], TCCR, CSS) == CSS_CASCADING) {
/* cascading mode */
if (ch == 1) {
tmr->next[ch] = none;
@@ -130,23 +132,32 @@ static uint16_t read_tcnt(RTMRState *tmr, unsigned size, int ch)
if (delta > 0) {
tmr->tick = now;
- if (FIELD_EX8(tmr->tccr[1], TCCR, CSS) == INTERNAL) {
+ switch (FIELD_EX8(tmr->tccr[1], TCCR, CSS)) {
+ case CSS_INTERNAL:
/* timer1 count update */
elapsed = elapsed_time(tmr, 1, delta);
if (elapsed >= 0x100) {
ovf = elapsed >> 8;
}
tcnt[1] = tmr->tcnt[1] + (elapsed & 0xff);
+ break;
+ case CSS_INVALID: /* guest error to have set this */
+ case CSS_EXTERNAL: /* QEMU doesn't implement these */
+ case CSS_CASCADING:
+ tcnt[1] = tmr->tcnt[1];
+ break;
}
switch (FIELD_EX8(tmr->tccr[0], TCCR, CSS)) {
- case INTERNAL:
+ case CSS_INTERNAL:
elapsed = elapsed_time(tmr, 0, delta);
tcnt[0] = tmr->tcnt[0] + elapsed;
break;
- case CASCADING:
- if (ovf > 0) {
- tcnt[0] = tmr->tcnt[0] + ovf;
- }
+ case CSS_CASCADING:
+ tcnt[0] = tmr->tcnt[0] + ovf;
+ break;
+ case CSS_INVALID: /* guest error to have set this */
+ case CSS_EXTERNAL: /* QEMU doesn't implement this */
+ tcnt[0] = tmr->tcnt[0];
break;
}
} else {
@@ -330,7 +341,7 @@ static uint16_t issue_event(RTMRState *tmr, int ch, int sz,
qemu_irq_pulse(tmr->cmia[ch]);
}
if (sz == 8 && ch == 0 &&
- FIELD_EX8(tmr->tccr[1], TCCR, CSS) == CASCADING) {
+ FIELD_EX8(tmr->tccr[1], TCCR, CSS) == CSS_CASCADING) {
tmr->tcnt[1]++;
timer_events(tmr, 1);
}
@@ -362,7 +373,7 @@ static void timer_events(RTMRState *tmr, int ch)
uint16_t tcnt;
tmr->tcnt[ch] = read_tcnt(tmr, 1, ch);
- if (FIELD_EX8(tmr->tccr[0], TCCR, CSS) != CASCADING) {
+ if (FIELD_EX8(tmr->tccr[0], TCCR, CSS) != CSS_CASCADING) {
tmr->tcnt[ch] = issue_event(tmr, ch, 8,
tmr->tcnt[ch],
tmr->tcora[ch],
diff --git a/hw/timer/sse-counter.c b/hw/timer/sse-counter.c
new file mode 100644
index 0000000000..0384051f15
--- /dev/null
+++ b/hw/timer/sse-counter.c
@@ -0,0 +1,474 @@
+/*
+ * Arm SSE Subsystem System Counter
+ *
+ * Copyright (c) 2020 Linaro Limited
+ * Written by Peter Maydell
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 or
+ * (at your option) any later version.
+ */
+
+/*
+ * This is a model of the "System counter" which is documented in
+ * the Arm SSE-123 Example Subsystem Technical Reference Manual:
+ * https://developer.arm.com/documentation/101370/latest/
+ *
+ * The system counter is a non-stop 64-bit up-counter. It provides
+ * this count value to other devices like the SSE system timer,
+ * which are driven by this system timestamp rather than directly
+ * from a clock. Internally to the counter the count is actually
+ * 88-bit precision (64.24 fixed point), with a programmable scale factor.
+ *
+ * The hardware has the optional feature that it supports dynamic
+ * clock switching, where two clock inputs are connected, and which
+ * one is used is selected via a CLKSEL input signal. Since the
+ * users of this device in QEMU don't use this feature, we only model
+ * the HWCLKSW=0 configuration.
+ */
+#include "qemu/osdep.h"
+#include "qemu/log.h"
+#include "qemu/timer.h"
+#include "qapi/error.h"
+#include "trace.h"
+#include "hw/timer/sse-counter.h"
+#include "hw/sysbus.h"
+#include "hw/irq.h"
+#include "hw/registerfields.h"
+#include "hw/clock.h"
+#include "hw/qdev-clock.h"
+#include "migration/vmstate.h"
+
+/* Registers in the control frame */
+REG32(CNTCR, 0x0)
+ FIELD(CNTCR, EN, 0, 1)
+ FIELD(CNTCR, HDBG, 1, 1)
+ FIELD(CNTCR, SCEN, 2, 1)
+ FIELD(CNTCR, INTRMASK, 3, 1)
+ FIELD(CNTCR, PSLVERRDIS, 4, 1)
+ FIELD(CNTCR, INTRCLR, 5, 1)
+/*
+ * Although CNTCR defines interrupt-related bits, the counter doesn't
+ * appear to actually have an interrupt output. So INTRCLR is
+ * effectively a RAZ/WI bit, as are the reserved bits [31:6].
+ */
+#define CNTCR_VALID_MASK (R_CNTCR_EN_MASK | R_CNTCR_HDBG_MASK | \
+ R_CNTCR_SCEN_MASK | R_CNTCR_INTRMASK_MASK | \
+ R_CNTCR_PSLVERRDIS_MASK)
+REG32(CNTSR, 0x4)
+REG32(CNTCV_LO, 0x8)
+REG32(CNTCV_HI, 0xc)
+REG32(CNTSCR, 0x10) /* Aliased with CNTSCR0 */
+REG32(CNTID, 0x1c)
+ FIELD(CNTID, CNTSC, 0, 4)
+ FIELD(CNTID, CNTCS, 16, 1)
+ FIELD(CNTID, CNTSELCLK, 17, 2)
+ FIELD(CNTID, CNTSCR_OVR, 19, 1)
+REG32(CNTSCR0, 0xd0)
+REG32(CNTSCR1, 0xd4)
+
+/* Registers in the status frame */
+REG32(STATUS_CNTCV_LO, 0x0)
+REG32(STATUS_CNTCV_HI, 0x4)
+
+/* Standard ID registers, present in both frames */
+REG32(PID4, 0xFD0)
+REG32(PID5, 0xFD4)
+REG32(PID6, 0xFD8)
+REG32(PID7, 0xFDC)
+REG32(PID0, 0xFE0)
+REG32(PID1, 0xFE4)
+REG32(PID2, 0xFE8)
+REG32(PID3, 0xFEC)
+REG32(CID0, 0xFF0)
+REG32(CID1, 0xFF4)
+REG32(CID2, 0xFF8)
+REG32(CID3, 0xFFC)
+
+/* PID/CID values */
+static const int control_id[] = {
+ 0x04, 0x00, 0x00, 0x00, /* PID4..PID7 */
+ 0xba, 0xb0, 0x0b, 0x00, /* PID0..PID3 */
+ 0x0d, 0xf0, 0x05, 0xb1, /* CID0..CID3 */
+};
+
+static const int status_id[] = {
+ 0x04, 0x00, 0x00, 0x00, /* PID4..PID7 */
+ 0xbb, 0xb0, 0x0b, 0x00, /* PID0..PID3 */
+ 0x0d, 0xf0, 0x05, 0xb1, /* CID0..CID3 */
+};
+
+static void sse_counter_notify_users(SSECounter *s)
+{
+ /*
+ * Notify users of the count timestamp that they may
+ * need to recalculate.
+ */
+ notifier_list_notify(&s->notifier_list, NULL);
+}
+
+static bool sse_counter_enabled(SSECounter *s)
+{
+ return (s->cntcr & R_CNTCR_EN_MASK) != 0;
+}
+
+uint64_t sse_counter_tick_to_time(SSECounter *s, uint64_t tick)
+{
+ if (!sse_counter_enabled(s)) {
+ return UINT64_MAX;
+ }
+
+ tick -= s->ticks_then;
+
+ if (s->cntcr & R_CNTCR_SCEN_MASK) {
+ /* Adjust the tick count to account for the scale factor */
+ tick = muldiv64(tick, 0x01000000, s->cntscr0);
+ }
+
+ return s->ns_then + clock_ticks_to_ns(s->clk, tick);
+}
+
+void sse_counter_register_consumer(SSECounter *s, Notifier *notifier)
+{
+ /*
+ * For the moment we assume that both we and the devices
+ * which consume us last for the life of the simulation,
+ * and so there is no mechanism for removing a notifier.
+ */
+ notifier_list_add(&s->notifier_list, notifier);
+}
+
+uint64_t sse_counter_for_timestamp(SSECounter *s, uint64_t now)
+{
+ /* Return the CNTCV value for a particular timestamp (clock ns value). */
+ uint64_t ticks;
+
+ if (!sse_counter_enabled(s)) {
+ /* Counter is disabled and does not increment */
+ return s->ticks_then;
+ }
+
+ ticks = clock_ns_to_ticks(s->clk, now - s->ns_then);
+ if (s->cntcr & R_CNTCR_SCEN_MASK) {
+ /*
+ * Scaling is enabled. The CNTSCR value is the amount added to
+ * the underlying 88-bit counter for every tick of the
+ * underlying clock; CNTCV is the top 64 bits of that full
+ * 88-bit value. Multiplying the tick count by CNTSCR tells us
+ * how much the full 88-bit counter has moved on; we then
+ * divide that by 0x01000000 to find out how much the 64-bit
+ * visible portion has advanced. muldiv64() gives us the
+ * necessary at-least-88-bit precision for the intermediate
+ * result.
+ */
+ ticks = muldiv64(ticks, s->cntscr0, 0x01000000);
+ }
+ return s->ticks_then + ticks;
+}
+
+static uint64_t sse_cntcv(SSECounter *s)
+{
+ /* Return the CNTCV value for the current time */
+ return sse_counter_for_timestamp(s, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
+}
+
+static void sse_write_cntcv(SSECounter *s, uint32_t value, unsigned startbit)
+{
+ /*
+ * Write one 32-bit half of the counter value; startbit is the
+ * bit position of this half in the 64-bit word, either 0 or 32.
+ */
+ uint64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
+ uint64_t cntcv = sse_counter_for_timestamp(s, now);
+
+ cntcv = deposit64(cntcv, startbit, 32, value);
+ s->ticks_then = cntcv;
+ s->ns_then = now;
+ sse_counter_notify_users(s);
+}
+
+static uint64_t sse_counter_control_read(void *opaque, hwaddr offset,
+ unsigned size)
+{
+ SSECounter *s = SSE_COUNTER(opaque);
+ uint64_t r;
+
+ switch (offset) {
+ case A_CNTCR:
+ r = s->cntcr;
+ break;
+ case A_CNTSR:
+ /*
+ * The only bit here is DBGH, indicating that the counter has been
+ * halted via the Halt-on-Debug signal. We don't implement halting
+ * debug, so the whole register always reads as zero.
+ */
+ r = 0;
+ break;
+ case A_CNTCV_LO:
+ r = extract64(sse_cntcv(s), 0, 32);
+ break;
+ case A_CNTCV_HI:
+ r = extract64(sse_cntcv(s), 32, 32);
+ break;
+ case A_CNTID:
+ /*
+ * For our implementation:
+ * - CNTSCR can only be written when CNTCR.EN == 0
+ * - HWCLKSW=0, so selected clock is always CLK0
+ * - counter scaling is implemented
+ */
+ r = (1 << R_CNTID_CNTSELCLK_SHIFT) | (1 << R_CNTID_CNTSC_SHIFT);
+ break;
+ case A_CNTSCR:
+ case A_CNTSCR0:
+ r = s->cntscr0;
+ break;
+ case A_CNTSCR1:
+ /* If HWCLKSW == 0, CNTSCR1 is RAZ/WI */
+ r = 0;
+ break;
+ case A_PID4 ... A_CID3:
+ r = control_id[(offset - A_PID4) / 4];
+ break;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "SSE System Counter control frame read: bad offset 0x%x",
+ (unsigned)offset);
+ r = 0;
+ break;
+ }
+
+ trace_sse_counter_control_read(offset, r, size);
+ return r;
+}
+
+static void sse_counter_control_write(void *opaque, hwaddr offset,
+ uint64_t value, unsigned size)
+{
+ SSECounter *s = SSE_COUNTER(opaque);
+
+ trace_sse_counter_control_write(offset, value, size);
+
+ switch (offset) {
+ case A_CNTCR:
+ /*
+ * Although CNTCR defines interrupt-related bits, the counter doesn't
+ * appear to actually have an interrupt output. So INTRCLR is
+ * effectively a RAZ/WI bit, as are the reserved bits [31:6].
+ * The documentation does not explicitly say so, but we assume
+ * that changing the scale factor while the counter is enabled
+ * by toggling CNTCR.SCEN has the same behaviour (making the counter
+ * value UNKNOWN) as changing it by writing to CNTSCR, and so we
+ * don't need to try to recalculate for that case.
+ */
+ value &= CNTCR_VALID_MASK;
+ if ((value ^ s->cntcr) & R_CNTCR_EN_MASK) {
+ /*
+ * Whether the counter is being enabled or disabled, the
+ * required action is the same: sync the (ns_then, ticks_then)
+ * tuple.
+ */
+ uint64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
+ s->ticks_then = sse_counter_for_timestamp(s, now);
+ s->ns_then = now;
+ sse_counter_notify_users(s);
+ }
+ s->cntcr = value;
+ break;
+ case A_CNTCV_LO:
+ sse_write_cntcv(s, value, 0);
+ break;
+ case A_CNTCV_HI:
+ sse_write_cntcv(s, value, 32);
+ break;
+ case A_CNTSCR:
+ case A_CNTSCR0:
+ /*
+ * If the scale registers are changed when the counter is enabled,
+ * the count value becomes UNKNOWN. So we don't try to recalculate
+ * anything here but only do it on a write to CNTCR.EN.
+ */
+ s->cntscr0 = value;
+ break;
+ case A_CNTSCR1:
+ /* If HWCLKSW == 0, CNTSCR1 is RAZ/WI */
+ break;
+ case A_CNTSR:
+ case A_CNTID:
+ case A_PID4 ... A_CID3:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "SSE System Counter control frame: write to RO offset 0x%x\n",
+ (unsigned)offset);
+ break;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "SSE System Counter control frame: write to bad offset 0x%x\n",
+ (unsigned)offset);
+ break;
+ }
+}
+
+static uint64_t sse_counter_status_read(void *opaque, hwaddr offset,
+ unsigned size)
+{
+ SSECounter *s = SSE_COUNTER(opaque);
+ uint64_t r;
+
+ switch (offset) {
+ case A_STATUS_CNTCV_LO:
+ r = extract64(sse_cntcv(s), 0, 32);
+ break;
+ case A_STATUS_CNTCV_HI:
+ r = extract64(sse_cntcv(s), 32, 32);
+ break;
+ case A_PID4 ... A_CID3:
+ r = status_id[(offset - A_PID4) / 4];
+ break;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "SSE System Counter status frame read: bad offset 0x%x",
+ (unsigned)offset);
+ r = 0;
+ break;
+ }
+
+ trace_sse_counter_status_read(offset, r, size);
+ return r;
+}
+
+static void sse_counter_status_write(void *opaque, hwaddr offset,
+ uint64_t value, unsigned size)
+{
+ trace_sse_counter_status_write(offset, value, size);
+
+ switch (offset) {
+ case A_STATUS_CNTCV_LO:
+ case A_STATUS_CNTCV_HI:
+ case A_PID4 ... A_CID3:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "SSE System Counter status frame: write to RO offset 0x%x\n",
+ (unsigned)offset);
+ break;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "SSE System Counter status frame: write to bad offset 0x%x\n",
+ (unsigned)offset);
+ break;
+ }
+}
+
+static const MemoryRegionOps sse_counter_control_ops = {
+ .read = sse_counter_control_read,
+ .write = sse_counter_control_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .valid.min_access_size = 4,
+ .valid.max_access_size = 4,
+};
+
+static const MemoryRegionOps sse_counter_status_ops = {
+ .read = sse_counter_status_read,
+ .write = sse_counter_status_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .valid.min_access_size = 4,
+ .valid.max_access_size = 4,
+};
+
+static void sse_counter_reset(DeviceState *dev)
+{
+ SSECounter *s = SSE_COUNTER(dev);
+
+ trace_sse_counter_reset();
+
+ s->cntcr = 0;
+ s->cntscr0 = 0x01000000;
+ s->ns_then = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
+ s->ticks_then = 0;
+}
+
+static void sse_clk_callback(void *opaque, ClockEvent event)
+{
+ SSECounter *s = SSE_COUNTER(opaque);
+ uint64_t now;
+
+ switch (event) {
+ case ClockPreUpdate:
+ /*
+ * Before the clock period updates, set (ticks_then, ns_then)
+ * to the current time and tick count (as calculated with
+ * the old clock period).
+ */
+ if (sse_counter_enabled(s)) {
+ now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
+ s->ticks_then = sse_counter_for_timestamp(s, now);
+ s->ns_then = now;
+ }
+ break;
+ case ClockUpdate:
+ sse_counter_notify_users(s);
+ break;
+ default:
+ break;
+ }
+}
+
+static void sse_counter_init(Object *obj)
+{
+ SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
+ SSECounter *s = SSE_COUNTER(obj);
+
+ notifier_list_init(&s->notifier_list);
+
+ s->clk = qdev_init_clock_in(DEVICE(obj), "CLK", sse_clk_callback, s,
+ ClockPreUpdate | ClockUpdate);
+ memory_region_init_io(&s->control_mr, obj, &sse_counter_control_ops,
+ s, "sse-counter-control", 0x1000);
+ memory_region_init_io(&s->status_mr, obj, &sse_counter_status_ops,
+ s, "sse-counter-status", 0x1000);
+ sysbus_init_mmio(sbd, &s->control_mr);
+ sysbus_init_mmio(sbd, &s->status_mr);
+}
+
+static void sse_counter_realize(DeviceState *dev, Error **errp)
+{
+ SSECounter *s = SSE_COUNTER(dev);
+
+ if (!clock_has_source(s->clk)) {
+ error_setg(errp, "SSE system counter: CLK must be connected");
+ return;
+ }
+}
+
+static const VMStateDescription sse_counter_vmstate = {
+ .name = "sse-counter",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_CLOCK(clk, SSECounter),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void sse_counter_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->realize = sse_counter_realize;
+ dc->vmsd = &sse_counter_vmstate;
+ dc->reset = sse_counter_reset;
+}
+
+static const TypeInfo sse_counter_info = {
+ .name = TYPE_SSE_COUNTER,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(SSECounter),
+ .instance_init = sse_counter_init,
+ .class_init = sse_counter_class_init,
+};
+
+static void sse_counter_register_types(void)
+{
+ type_register_static(&sse_counter_info);
+}
+
+type_init(sse_counter_register_types);
diff --git a/hw/timer/sse-timer.c b/hw/timer/sse-timer.c
new file mode 100644
index 0000000000..8dbe6ac651
--- /dev/null
+++ b/hw/timer/sse-timer.c
@@ -0,0 +1,470 @@
+/*
+ * Arm SSE Subsystem System Timer
+ *
+ * Copyright (c) 2020 Linaro Limited
+ * Written by Peter Maydell
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 or
+ * (at your option) any later version.
+ */
+
+/*
+ * This is a model of the "System timer" which is documented in
+ * the Arm SSE-123 Example Subsystem Technical Reference Manual:
+ * https://developer.arm.com/documentation/101370/latest/
+ *
+ * The timer is based around a simple 64-bit incrementing counter
+ * (readable from CNTPCT_HI/LO). The timer fires when
+ * Counter - CompareValue >= 0.
+ * The CompareValue is guest-writable, via CNTP_CVAL_HI/LO.
+ * CNTP_TVAL is an alternative view of the CompareValue defined by
+ * TimerValue = CompareValue[31:0] - Counter[31:0]
+ * which can be both read and written.
+ * This part is similar to the generic timer in an Arm A-class CPU.
+ *
+ * The timer also has a separate auto-increment timer. When this
+ * timer is enabled, then the AutoIncrValue is set to:
+ * AutoIncrValue = Reload + Counter
+ * and this timer fires when
+ * Counter - AutoIncrValue >= 0
+ * at which point, an interrupt is generated and the new AutoIncrValue
+ * is calculated.
+ * When the auto-increment timer is enabled, interrupt generation
+ * via the compare/timervalue registers is disabled.
+ */
+#include "qemu/osdep.h"
+#include "qemu/log.h"
+#include "qemu/timer.h"
+#include "qapi/error.h"
+#include "trace.h"
+#include "hw/timer/sse-timer.h"
+#include "hw/timer/sse-counter.h"
+#include "hw/sysbus.h"
+#include "hw/irq.h"
+#include "hw/registerfields.h"
+#include "hw/clock.h"
+#include "hw/qdev-clock.h"
+#include "hw/qdev-properties.h"
+#include "migration/vmstate.h"
+
+REG32(CNTPCT_LO, 0x0)
+REG32(CNTPCT_HI, 0x4)
+REG32(CNTFRQ, 0x10)
+REG32(CNTP_CVAL_LO, 0x20)
+REG32(CNTP_CVAL_HI, 0x24)
+REG32(CNTP_TVAL, 0x28)
+REG32(CNTP_CTL, 0x2c)
+ FIELD(CNTP_CTL, ENABLE, 0, 1)
+ FIELD(CNTP_CTL, IMASK, 1, 1)
+ FIELD(CNTP_CTL, ISTATUS, 2, 1)
+REG32(CNTP_AIVAL_LO, 0x40)
+REG32(CNTP_AIVAL_HI, 0x44)
+REG32(CNTP_AIVAL_RELOAD, 0x48)
+REG32(CNTP_AIVAL_CTL, 0x4c)
+ FIELD(CNTP_AIVAL_CTL, EN, 0, 1)
+ FIELD(CNTP_AIVAL_CTL, CLR, 1, 1)
+REG32(CNTP_CFG, 0x50)
+ FIELD(CNTP_CFG, AIVAL, 0, 4)
+#define R_CNTP_CFG_AIVAL_IMPLEMENTED 1
+REG32(PID4, 0xFD0)
+REG32(PID5, 0xFD4)
+REG32(PID6, 0xFD8)
+REG32(PID7, 0xFDC)
+REG32(PID0, 0xFE0)
+REG32(PID1, 0xFE4)
+REG32(PID2, 0xFE8)
+REG32(PID3, 0xFEC)
+REG32(CID0, 0xFF0)
+REG32(CID1, 0xFF4)
+REG32(CID2, 0xFF8)
+REG32(CID3, 0xFFC)
+
+/* PID/CID values */
+static const int timer_id[] = {
+ 0x04, 0x00, 0x00, 0x00, /* PID4..PID7 */
+ 0xb7, 0xb0, 0x0b, 0x00, /* PID0..PID3 */
+ 0x0d, 0xf0, 0x05, 0xb1, /* CID0..CID3 */
+};
+
+static bool sse_is_autoinc(SSETimer *s)
+{
+ return (s->cntp_aival_ctl & R_CNTP_AIVAL_CTL_EN_MASK) != 0;
+}
+
+static bool sse_enabled(SSETimer *s)
+{
+ return (s->cntp_ctl & R_CNTP_CTL_ENABLE_MASK) != 0;
+}
+
+static uint64_t sse_cntpct(SSETimer *s)
+{
+ /* Return the CNTPCT value for the current time */
+ return sse_counter_for_timestamp(s->counter,
+ qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
+}
+
+static bool sse_timer_status(SSETimer *s)
+{
+ /*
+ * Return true if timer condition is met. This is used for both
+ * the CNTP_CTL.ISTATUS bit and for whether (unless masked) we
+ * assert our IRQ.
+ * The documentation is unclear about the behaviour of ISTATUS when
+ * in autoincrement mode; we assume that it follows CNTP_AIVAL_CTL.CLR
+ * (ie whether the autoincrement timer is asserting the interrupt).
+ */
+ if (!sse_enabled(s)) {
+ return false;
+ }
+
+ if (sse_is_autoinc(s)) {
+ return s->cntp_aival_ctl & R_CNTP_AIVAL_CTL_CLR_MASK;
+ } else {
+ return sse_cntpct(s) >= s->cntp_cval;
+ }
+}
+
+static void sse_update_irq(SSETimer *s)
+{
+ bool irqstate = (!(s->cntp_ctl & R_CNTP_CTL_IMASK_MASK) &&
+ sse_timer_status(s));
+
+ qemu_set_irq(s->irq, irqstate);
+}
+
+static void sse_set_timer(SSETimer *s, uint64_t nexttick)
+{
+ /* Set the timer to expire at nexttick */
+ uint64_t expiry = sse_counter_tick_to_time(s->counter, nexttick);
+
+ if (expiry <= INT64_MAX) {
+ timer_mod_ns(&s->timer, expiry);
+ } else {
+ /*
+ * nexttick is so far in the future that it would overflow the
+ * signed 64-bit range of a QEMUTimer. Since timer_mod_ns()
+ * expiry times are absolute, not relative, we are never going
+ * to be able to set the timer to this value, so we must just
+ * assume that guest execution can never run so long that it
+ * reaches the theoretical point when the timer fires.
+ * This is also the code path for "counter is not running",
+ * which is signalled by expiry == UINT64_MAX.
+ */
+ timer_del(&s->timer);
+ }
+}
+
+static void sse_recalc_timer(SSETimer *s)
+{
+ /* Recalculate the normal timer */
+ uint64_t count, nexttick;
+
+ if (sse_is_autoinc(s)) {
+ return;
+ }
+
+ if (!sse_enabled(s)) {
+ timer_del(&s->timer);
+ return;
+ }
+
+ count = sse_cntpct(s);
+
+ if (count >= s->cntp_cval) {
+ /*
+ * Timer condition already met. In theory we have a transition when
+ * the count rolls back over to 0, but that is so far in the future
+ * that it is not representable as a timer_mod() expiry, so in
+ * fact sse_set_timer() will always just delete the timer.
+ */
+ nexttick = UINT64_MAX;
+ } else {
+ /* Next transition is when count hits cval */
+ nexttick = s->cntp_cval;
+ }
+ sse_set_timer(s, nexttick);
+ sse_update_irq(s);
+}
+
+static void sse_autoinc(SSETimer *s)
+{
+ /* Auto-increment the AIVAL, and set the timer accordingly */
+ s->cntp_aival = sse_cntpct(s) + s->cntp_aival_reload;
+ sse_set_timer(s, s->cntp_aival);
+}
+
+static void sse_timer_cb(void *opaque)
+{
+ SSETimer *s = SSE_TIMER(opaque);
+
+ if (sse_is_autoinc(s)) {
+ uint64_t count = sse_cntpct(s);
+
+ if (count >= s->cntp_aival) {
+ /* Timer condition met, set CLR and do another autoinc */
+ s->cntp_aival_ctl |= R_CNTP_AIVAL_CTL_CLR_MASK;
+ s->cntp_aival = count + s->cntp_aival_reload;
+ }
+ sse_set_timer(s, s->cntp_aival);
+ sse_update_irq(s);
+ } else {
+ sse_recalc_timer(s);
+ }
+}
+
+static uint64_t sse_timer_read(void *opaque, hwaddr offset, unsigned size)
+{
+ SSETimer *s = SSE_TIMER(opaque);
+ uint64_t r;
+
+ switch (offset) {
+ case A_CNTPCT_LO:
+ r = extract64(sse_cntpct(s), 0, 32);
+ break;
+ case A_CNTPCT_HI:
+ r = extract64(sse_cntpct(s), 32, 32);
+ break;
+ case A_CNTFRQ:
+ r = s->cntfrq;
+ break;
+ case A_CNTP_CVAL_LO:
+ r = extract64(s->cntp_cval, 0, 32);
+ break;
+ case A_CNTP_CVAL_HI:
+ r = extract64(s->cntp_cval, 32, 32);
+ break;
+ case A_CNTP_TVAL:
+ r = extract64(s->cntp_cval - sse_cntpct(s), 0, 32);
+ break;
+ case A_CNTP_CTL:
+ r = s->cntp_ctl;
+ if (sse_timer_status(s)) {
+ r |= R_CNTP_CTL_ISTATUS_MASK;
+ }
+ break;
+ case A_CNTP_AIVAL_LO:
+ r = extract64(s->cntp_aival, 0, 32);
+ break;
+ case A_CNTP_AIVAL_HI:
+ r = extract64(s->cntp_aival, 32, 32);
+ break;
+ case A_CNTP_AIVAL_RELOAD:
+ r = s->cntp_aival_reload;
+ break;
+ case A_CNTP_AIVAL_CTL:
+ /*
+ * All the bits of AIVAL_CTL are documented as WO, but this is probably
+ * a documentation error. We implement them as readable.
+ */
+ r = s->cntp_aival_ctl;
+ break;
+ case A_CNTP_CFG:
+ r = R_CNTP_CFG_AIVAL_IMPLEMENTED << R_CNTP_CFG_AIVAL_SHIFT;
+ break;
+ case A_PID4 ... A_CID3:
+ r = timer_id[(offset - A_PID4) / 4];
+ break;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "SSE System Timer read: bad offset 0x%x",
+ (unsigned) offset);
+ r = 0;
+ break;
+ }
+
+ trace_sse_timer_read(offset, r, size);
+ return r;
+}
+
+static void sse_timer_write(void *opaque, hwaddr offset, uint64_t value,
+ unsigned size)
+{
+ SSETimer *s = SSE_TIMER(opaque);
+
+ trace_sse_timer_write(offset, value, size);
+
+ switch (offset) {
+ case A_CNTFRQ:
+ s->cntfrq = value;
+ break;
+ case A_CNTP_CVAL_LO:
+ s->cntp_cval = deposit64(s->cntp_cval, 0, 32, value);
+ sse_recalc_timer(s);
+ break;
+ case A_CNTP_CVAL_HI:
+ s->cntp_cval = deposit64(s->cntp_cval, 32, 32, value);
+ sse_recalc_timer(s);
+ break;
+ case A_CNTP_TVAL:
+ s->cntp_cval = sse_cntpct(s) + sextract64(value, 0, 32);
+ sse_recalc_timer(s);
+ break;
+ case A_CNTP_CTL:
+ {
+ uint32_t old_ctl = s->cntp_ctl;
+ value &= R_CNTP_CTL_ENABLE_MASK | R_CNTP_CTL_IMASK_MASK;
+ s->cntp_ctl = value;
+ if ((old_ctl ^ s->cntp_ctl) & R_CNTP_CTL_ENABLE_MASK) {
+ if (sse_enabled(s)) {
+ if (sse_is_autoinc(s)) {
+ sse_autoinc(s);
+ } else {
+ sse_recalc_timer(s);
+ }
+ }
+ }
+ sse_update_irq(s);
+ break;
+ }
+ case A_CNTP_AIVAL_RELOAD:
+ s->cntp_aival_reload = value;
+ break;
+ case A_CNTP_AIVAL_CTL:
+ {
+ uint32_t old_ctl = s->cntp_aival_ctl;
+
+ /* EN bit is writeable; CLR bit is write-0-to-clear, write-1-ignored */
+ s->cntp_aival_ctl &= ~R_CNTP_AIVAL_CTL_EN_MASK;
+ s->cntp_aival_ctl |= value & R_CNTP_AIVAL_CTL_EN_MASK;
+ if (!(value & R_CNTP_AIVAL_CTL_CLR_MASK)) {
+ s->cntp_aival_ctl &= ~R_CNTP_AIVAL_CTL_CLR_MASK;
+ }
+ if ((old_ctl ^ s->cntp_aival_ctl) & R_CNTP_AIVAL_CTL_EN_MASK) {
+ /* Auto-increment toggled on/off */
+ if (sse_enabled(s)) {
+ if (sse_is_autoinc(s)) {
+ sse_autoinc(s);
+ } else {
+ sse_recalc_timer(s);
+ }
+ }
+ }
+ sse_update_irq(s);
+ break;
+ }
+ case A_CNTPCT_LO:
+ case A_CNTPCT_HI:
+ case A_CNTP_CFG:
+ case A_CNTP_AIVAL_LO:
+ case A_CNTP_AIVAL_HI:
+ case A_PID4 ... A_CID3:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "SSE System Timer write: write to RO offset 0x%x\n",
+ (unsigned)offset);
+ break;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "SSE System Timer write: bad offset 0x%x\n",
+ (unsigned)offset);
+ break;
+ }
+}
+
+static const MemoryRegionOps sse_timer_ops = {
+ .read = sse_timer_read,
+ .write = sse_timer_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .valid.min_access_size = 4,
+ .valid.max_access_size = 4,
+};
+
+static void sse_timer_reset(DeviceState *dev)
+{
+ SSETimer *s = SSE_TIMER(dev);
+
+ trace_sse_timer_reset();
+
+ timer_del(&s->timer);
+ s->cntfrq = 0;
+ s->cntp_ctl = 0;
+ s->cntp_cval = 0;
+ s->cntp_aival = 0;
+ s->cntp_aival_ctl = 0;
+ s->cntp_aival_reload = 0;
+}
+
+static void sse_timer_counter_callback(Notifier *notifier, void *data)
+{
+ SSETimer *s = container_of(notifier, SSETimer, counter_notifier);
+
+ /* System counter told us we need to recalculate */
+ if (sse_enabled(s)) {
+ if (sse_is_autoinc(s)) {
+ sse_set_timer(s, s->cntp_aival);
+ } else {
+ sse_recalc_timer(s);
+ }
+ }
+}
+
+static void sse_timer_init(Object *obj)
+{
+ SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
+ SSETimer *s = SSE_TIMER(obj);
+
+ memory_region_init_io(&s->iomem, obj, &sse_timer_ops,
+ s, "sse-timer", 0x1000);
+ sysbus_init_mmio(sbd, &s->iomem);
+ sysbus_init_irq(sbd, &s->irq);
+}
+
+static void sse_timer_realize(DeviceState *dev, Error **errp)
+{
+ SSETimer *s = SSE_TIMER(dev);
+
+ if (!s->counter) {
+ error_setg(errp, "counter property was not set");
+ }
+
+ s->counter_notifier.notify = sse_timer_counter_callback;
+ sse_counter_register_consumer(s->counter, &s->counter_notifier);
+
+ timer_init_ns(&s->timer, QEMU_CLOCK_VIRTUAL, sse_timer_cb, s);
+}
+
+static const VMStateDescription sse_timer_vmstate = {
+ .name = "sse-timer",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_TIMER(timer, SSETimer),
+ VMSTATE_UINT32(cntfrq, SSETimer),
+ VMSTATE_UINT32(cntp_ctl, SSETimer),
+ VMSTATE_UINT64(cntp_cval, SSETimer),
+ VMSTATE_UINT64(cntp_aival, SSETimer),
+ VMSTATE_UINT32(cntp_aival_ctl, SSETimer),
+ VMSTATE_UINT32(cntp_aival_reload, SSETimer),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static Property sse_timer_properties[] = {
+ DEFINE_PROP_LINK("counter", SSETimer, counter, TYPE_SSE_COUNTER, SSECounter *),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void sse_timer_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->realize = sse_timer_realize;
+ dc->vmsd = &sse_timer_vmstate;
+ dc->reset = sse_timer_reset;
+ device_class_set_props(dc, sse_timer_properties);
+}
+
+static const TypeInfo sse_timer_info = {
+ .name = TYPE_SSE_TIMER,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(SSETimer),
+ .instance_init = sse_timer_init,
+ .class_init = sse_timer_class_init,
+};
+
+static void sse_timer_register_types(void)
+{
+ type_register_static(&sse_timer_info);
+}
+
+type_init(sse_timer_register_types);
diff --git a/hw/timer/trace-events b/hw/timer/trace-events
index 7a4326d956..f8b9db25c2 100644
--- a/hw/timer/trace-events
+++ b/hw/timer/trace-events
@@ -93,3 +93,15 @@ avr_timer16_interrupt_count(uint8_t cnt) "count: %u"
avr_timer16_interrupt_overflow(const char *reason) "overflow: %s"
avr_timer16_next_alarm(uint64_t delay_ns) "next alarm: %" PRIu64 " ns from now"
avr_timer16_clksrc_update(uint64_t freq_hz, uint64_t period_ns, uint64_t delay_s) "timer frequency: %" PRIu64 " Hz, period: %" PRIu64 " ns (%" PRId64 " us)"
+
+# sse_counter.c
+sse_counter_control_read(uint64_t offset, uint64_t data, unsigned size) "SSE system counter control frame read: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u"
+sse_counter_control_write(uint64_t offset, uint64_t data, unsigned size) "SSE system counter control framen write: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u"
+sse_counter_status_read(uint64_t offset, uint64_t data, unsigned size) "SSE system counter status frame read: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u"
+sse_counter_status_write(uint64_t offset, uint64_t data, unsigned size) "SSE system counter status frame write: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u"
+sse_counter_reset(void) "SSE system counter: reset"
+
+# sse_timer.c
+sse_timer_read(uint64_t offset, uint64_t data, unsigned size) "SSE system timer read: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u"
+sse_timer_write(uint64_t offset, uint64_t data, unsigned size) "SSE system timer write: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u"
+sse_timer_reset(void) "SSE system timer: reset"
diff --git a/hw/watchdog/cmsdk-apb-watchdog.c b/hw/watchdog/cmsdk-apb-watchdog.c
index 302f171173..5a2cd46eb7 100644
--- a/hw/watchdog/cmsdk-apb-watchdog.c
+++ b/hw/watchdog/cmsdk-apb-watchdog.c
@@ -310,7 +310,7 @@ static void cmsdk_apb_watchdog_reset(DeviceState *dev)
ptimer_transaction_commit(s->timer);
}
-static void cmsdk_apb_watchdog_clk_update(void *opaque)
+static void cmsdk_apb_watchdog_clk_update(void *opaque, ClockEvent event)
{
CMSDKAPBWatchdog *s = CMSDK_APB_WATCHDOG(opaque);
@@ -329,7 +329,8 @@ static void cmsdk_apb_watchdog_init(Object *obj)
sysbus_init_mmio(sbd, &s->iomem);
sysbus_init_irq(sbd, &s->wdogint);
s->wdogclk = qdev_init_clock_in(DEVICE(s), "WDOGCLK",
- cmsdk_apb_watchdog_clk_update, s);
+ cmsdk_apb_watchdog_clk_update, s,
+ ClockUpdate);
s->is_luminary = false;
s->id = cmsdk_apb_watchdog_id;
diff --git a/include/block/dirty-bitmap.h b/include/block/dirty-bitmap.h
index 36e8da4fc2..f581cf9fd7 100644
--- a/include/block/dirty-bitmap.h
+++ b/include/block/dirty-bitmap.h
@@ -57,6 +57,8 @@ void bdrv_dirty_iter_free(BdrvDirtyBitmapIter *iter);
uint64_t bdrv_dirty_bitmap_serialization_size(const BdrvDirtyBitmap *bitmap,
uint64_t offset, uint64_t bytes);
uint64_t bdrv_dirty_bitmap_serialization_align(const BdrvDirtyBitmap *bitmap);
+uint64_t bdrv_dirty_bitmap_serialization_coverage(int serialized_chunk_size,
+ const BdrvDirtyBitmap *bitmap);
void bdrv_dirty_bitmap_serialize_part(const BdrvDirtyBitmap *bitmap,
uint8_t *buf, uint64_t offset,
uint64_t bytes);
diff --git a/include/block/nvme.h b/include/block/nvme.h
index 07cfc92936..372d0f2799 100644
--- a/include/block/nvme.h
+++ b/include/block/nvme.h
@@ -566,6 +566,7 @@ enum NvmeAdminCommands {
NVME_ADM_CMD_ASYNC_EV_REQ = 0x0c,
NVME_ADM_CMD_ACTIVATE_FW = 0x10,
NVME_ADM_CMD_DOWNLOAD_FW = 0x11,
+ NVME_ADM_CMD_NS_ATTACHMENT = 0x15,
NVME_ADM_CMD_FORMAT_NVM = 0x80,
NVME_ADM_CMD_SECURITY_SEND = 0x81,
NVME_ADM_CMD_SECURITY_RECV = 0x82,
@@ -579,6 +580,7 @@ enum NvmeIoCommands {
NVME_CMD_COMPARE = 0x05,
NVME_CMD_WRITE_ZEROES = 0x08,
NVME_CMD_DSM = 0x09,
+ NVME_CMD_COPY = 0x19,
NVME_CMD_ZONE_MGMT_SEND = 0x79,
NVME_CMD_ZONE_MGMT_RECV = 0x7a,
NVME_CMD_ZONE_APPEND = 0x7d,
@@ -724,9 +726,41 @@ typedef struct QEMU_PACKED NvmeDsmRange {
uint64_t slba;
} NvmeDsmRange;
+enum {
+ NVME_COPY_FORMAT_0 = 0x0,
+};
+
+typedef struct QEMU_PACKED NvmeCopyCmd {
+ uint8_t opcode;
+ uint8_t flags;
+ uint16_t cid;
+ uint32_t nsid;
+ uint32_t rsvd2[4];
+ NvmeCmdDptr dptr;
+ uint64_t sdlba;
+ uint8_t nr;
+ uint8_t control[3];
+ uint16_t rsvd13;
+ uint16_t dspec;
+ uint32_t reftag;
+ uint16_t apptag;
+ uint16_t appmask;
+} NvmeCopyCmd;
+
+typedef struct QEMU_PACKED NvmeCopySourceRange {
+ uint8_t rsvd0[8];
+ uint64_t slba;
+ uint16_t nlb;
+ uint8_t rsvd18[6];
+ uint32_t reftag;
+ uint16_t apptag;
+ uint16_t appmask;
+} NvmeCopySourceRange;
+
enum NvmeAsyncEventRequest {
NVME_AER_TYPE_ERROR = 0,
NVME_AER_TYPE_SMART = 1,
+ NVME_AER_TYPE_NOTICE = 2,
NVME_AER_TYPE_IO_SPECIFIC = 6,
NVME_AER_TYPE_VENDOR_SPECIFIC = 7,
NVME_AER_INFO_ERR_INVALID_DB_REGISTER = 0,
@@ -738,6 +772,7 @@ enum NvmeAsyncEventRequest {
NVME_AER_INFO_SMART_RELIABILITY = 0,
NVME_AER_INFO_SMART_TEMP_THRESH = 1,
NVME_AER_INFO_SMART_SPARE_THRESH = 2,
+ NVME_AER_INFO_NOTICE_NS_ATTR_CHANGED = 0,
};
typedef struct QEMU_PACKED NvmeAerResult {
@@ -804,9 +839,13 @@ enum NvmeStatusCodes {
NVME_FEAT_NOT_CHANGEABLE = 0x010e,
NVME_FEAT_NOT_NS_SPEC = 0x010f,
NVME_FW_REQ_SUSYSTEM_RESET = 0x0110,
+ NVME_NS_ALREADY_ATTACHED = 0x0118,
+ NVME_NS_NOT_ATTACHED = 0x011A,
+ NVME_NS_CTRL_LIST_INVALID = 0x011C,
NVME_CONFLICTING_ATTRS = 0x0180,
NVME_INVALID_PROT_INFO = 0x0181,
NVME_WRITE_TO_RO = 0x0182,
+ NVME_CMD_SIZE_LIMIT = 0x0183,
NVME_ZONE_BOUNDARY_ERROR = 0x01b8,
NVME_ZONE_FULL = 0x01b9,
NVME_ZONE_READ_ONLY = 0x01ba,
@@ -903,6 +942,7 @@ enum NvmeLogIdentifier {
NVME_LOG_ERROR_INFO = 0x01,
NVME_LOG_SMART_INFO = 0x02,
NVME_LOG_FW_SLOT_INFO = 0x03,
+ NVME_LOG_CHANGED_NSLIST = 0x04,
NVME_LOG_CMD_EFFECTS = 0x05,
};
@@ -918,6 +958,7 @@ typedef struct QEMU_PACKED NvmePSD {
uint8_t resv[16];
} NvmePSD;
+#define NVME_CONTROLLER_LIST_SIZE 2048
#define NVME_IDENTIFY_DATA_SIZE 4096
enum NvmeIdCns {
@@ -930,6 +971,7 @@ enum NvmeIdCns {
NVME_ID_CNS_CS_NS_ACTIVE_LIST = 0x07,
NVME_ID_CNS_NS_PRESENT_LIST = 0x10,
NVME_ID_CNS_NS_PRESENT = 0x11,
+ NVME_ID_CNS_NS_ATTACHED_CTRL_LIST = 0x12,
NVME_ID_CNS_CS_NS_PRESENT_LIST = 0x1a,
NVME_ID_CNS_CS_NS_PRESENT = 0x1b,
NVME_ID_CNS_IO_COMMAND_SET = 0x1c,
@@ -994,7 +1036,7 @@ typedef struct QEMU_PACKED NvmeIdCtrl {
uint8_t nvscc;
uint8_t rsvd531;
uint16_t acwu;
- uint8_t rsvd534[2];
+ uint16_t ocfs;
uint32_t sgls;
uint8_t rsvd540[228];
uint8_t subnqn[256];
@@ -1008,10 +1050,25 @@ typedef struct NvmeIdCtrlZoned {
uint8_t rsvd1[4095];
} NvmeIdCtrlZoned;
+typedef struct NvmeIdCtrlNvm {
+ uint8_t vsl;
+ uint8_t wzsl;
+ uint8_t wusl;
+ uint8_t dmrl;
+ uint32_t dmrsl;
+ uint64_t dmsl;
+ uint8_t rsvd16[4080];
+} NvmeIdCtrlNvm;
+
+enum NvmeIdCtrlOaes {
+ NVME_OAES_NS_ATTR = 1 << 8,
+};
+
enum NvmeIdCtrlOacs {
NVME_OACS_SECURITY = 1 << 0,
NVME_OACS_FORMAT = 1 << 1,
NVME_OACS_FW = 1 << 2,
+ NVME_OACS_NS_MGMT = 1 << 3,
};
enum NvmeIdCtrlOncs {
@@ -1022,6 +1079,19 @@ enum NvmeIdCtrlOncs {
NVME_ONCS_FEATURES = 1 << 4,
NVME_ONCS_RESRVATIONS = 1 << 5,
NVME_ONCS_TIMESTAMP = 1 << 6,
+ NVME_ONCS_COPY = 1 << 8,
+};
+
+enum NvmeIdCtrlOcfs {
+ NVME_OCFS_COPY_FORMAT_0 = 1 << 0,
+};
+
+enum NvmeIdctrlVwc {
+ NVME_VWC_PRESENT = 1 << 0,
+ NVME_VWC_NSID_BROADCAST_NO_SUPPORT = 0 << 1,
+ NVME_VWC_NSID_BROADCAST_RESERVED = 1 << 1,
+ NVME_VWC_NSID_BROADCAST_CTRL_SPEC = 2 << 1,
+ NVME_VWC_NSID_BROADCAST_SUPPORT = 3 << 1,
};
enum NvmeIdCtrlFrmw {
@@ -1034,6 +1104,10 @@ enum NvmeIdCtrlLpa {
NVME_LPA_EXTENDED = 1 << 2,
};
+enum NvmeIdCtrlCmic {
+ NVME_CMIC_MULTI_CTRL = 1 << 1,
+};
+
#define NVME_CTRL_SQES_MIN(sqes) ((sqes) & 0xf)
#define NVME_CTRL_SQES_MAX(sqes) (((sqes) >> 4) & 0xf)
#define NVME_CTRL_CQES_MIN(cqes) ((cqes) & 0xf)
@@ -1171,7 +1245,10 @@ typedef struct QEMU_PACKED NvmeIdNs {
uint16_t npdg;
uint16_t npda;
uint16_t nows;
- uint8_t rsvd74[30];
+ uint16_t mssrl;
+ uint32_t mcl;
+ uint8_t msrc;
+ uint8_t rsvd81[23];
uint8_t nguid[16];
uint64_t eui64;
NvmeLBAF lbaf[16];
@@ -1199,6 +1276,10 @@ enum NvmeNsIdentifierType {
NVME_NIDT_CSI = 0x04,
};
+enum NvmeIdNsNmic {
+ NVME_NMIC_NS_SHARED = 1 << 0,
+};
+
enum NvmeCsi {
NVME_CSI_NVM = 0x00,
NVME_CSI_ZONED = 0x02,
@@ -1323,6 +1404,7 @@ static inline void _nvme_check_size(void)
QEMU_BUILD_BUG_ON(sizeof(NvmeZonedResult) != 8);
QEMU_BUILD_BUG_ON(sizeof(NvmeCqe) != 16);
QEMU_BUILD_BUG_ON(sizeof(NvmeDsmRange) != 16);
+ QEMU_BUILD_BUG_ON(sizeof(NvmeCopySourceRange) != 32);
QEMU_BUILD_BUG_ON(sizeof(NvmeCmd) != 64);
QEMU_BUILD_BUG_ON(sizeof(NvmeDeleteQ) != 64);
QEMU_BUILD_BUG_ON(sizeof(NvmeCreateCq) != 64);
@@ -1330,6 +1412,7 @@ static inline void _nvme_check_size(void)
QEMU_BUILD_BUG_ON(sizeof(NvmeIdentify) != 64);
QEMU_BUILD_BUG_ON(sizeof(NvmeRwCmd) != 64);
QEMU_BUILD_BUG_ON(sizeof(NvmeDsmCmd) != 64);
+ QEMU_BUILD_BUG_ON(sizeof(NvmeCopyCmd) != 64);
QEMU_BUILD_BUG_ON(sizeof(NvmeRangeType) != 64);
QEMU_BUILD_BUG_ON(sizeof(NvmeErrorLog) != 64);
QEMU_BUILD_BUG_ON(sizeof(NvmeFwSlotInfoLog) != 512);
@@ -1337,6 +1420,7 @@ static inline void _nvme_check_size(void)
QEMU_BUILD_BUG_ON(sizeof(NvmeEffectsLog) != 4096);
QEMU_BUILD_BUG_ON(sizeof(NvmeIdCtrl) != 4096);
QEMU_BUILD_BUG_ON(sizeof(NvmeIdCtrlZoned) != 4096);
+ QEMU_BUILD_BUG_ON(sizeof(NvmeIdCtrlNvm) != 4096);
QEMU_BUILD_BUG_ON(sizeof(NvmeLBAF) != 4);
QEMU_BUILD_BUG_ON(sizeof(NvmeLBAFE) != 16);
QEMU_BUILD_BUG_ON(sizeof(NvmeIdNs) != 4096);
diff --git a/include/elf.h b/include/elf.h
index f4fa3c1cd4..78237c9a87 100644
--- a/include/elf.h
+++ b/include/elf.h
@@ -206,8 +206,6 @@ typedef struct mips_elf_abiflags_v0 {
#define EM_AARCH64 183
-#define EM_TILEGX 191 /* TILE-Gx */
-
#define EM_MOXIE 223 /* Moxie processor family */
#define EM_MOXIE_OLD 0xFEED
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
index b7b3c0ef12..6b036cae8f 100644
--- a/include/exec/exec-all.h
+++ b/include/exec/exec-all.h
@@ -448,9 +448,6 @@ struct TranslationBlock {
target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */
target_ulong cs_base; /* CS base for this block */
uint32_t flags; /* flags defining in which context the code was generated */
- uint16_t size; /* size of target code for this block (1 <=
- size <= TARGET_PAGE_SIZE) */
- uint16_t icount;
uint32_t cflags; /* compile flags */
#define CF_COUNT_MASK 0x00007fff
#define CF_LAST_IO 0x00008000 /* Last insn may be an IO access. */
@@ -460,12 +457,18 @@ struct TranslationBlock {
#define CF_PARALLEL 0x00080000 /* Generate code for a parallel context */
#define CF_CLUSTER_MASK 0xff000000 /* Top 8 bits are cluster ID */
#define CF_CLUSTER_SHIFT 24
-/* cflags' mask for hashing/comparison, basically ignore CF_INVALID */
-#define CF_HASH_MASK (~CF_INVALID)
/* Per-vCPU dynamic tracing state used to generate this TB */
uint32_t trace_vcpu_dstate;
+ /*
+ * Above fields used for comparing
+ */
+
+ /* size of target code for this block (1 <= size <= TARGET_PAGE_SIZE) */
+ uint16_t size;
+ uint16_t icount;
+
struct tb_tc tc;
/* first and second physical page containing code. The lower bit
@@ -510,8 +513,6 @@ struct TranslationBlock {
uintptr_t jmp_dest[2];
};
-extern bool parallel_cpus;
-
/* Hide the qatomic_read to make code a little easier on the eyes */
static inline uint32_t tb_cflags(const TranslationBlock *tb)
{
@@ -519,10 +520,9 @@ static inline uint32_t tb_cflags(const TranslationBlock *tb)
}
/* current cflags for hashing/comparison */
-static inline uint32_t curr_cflags(void)
+static inline uint32_t curr_cflags(CPUState *cpu)
{
- return (parallel_cpus ? CF_PARALLEL : 0)
- | (icount_enabled() ? CF_USE_ICOUNT : 0);
+ return cpu->tcg_cflags;
}
/* TranslationBlock invalidate API */
@@ -536,7 +536,7 @@ void tb_flush(CPUState *cpu);
void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
target_ulong cs_base, uint32_t flags,
- uint32_t cf_mask);
+ uint32_t cflags);
void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr);
/* GETPC is the true target of the return instruction that we'll execute. */
diff --git a/include/exec/poison.h b/include/exec/poison.h
index d7ae1f23e7..4cd3f8abb4 100644
--- a/include/exec/poison.h
+++ b/include/exec/poison.h
@@ -10,6 +10,7 @@
#pragma GCC poison TARGET_ALPHA
#pragma GCC poison TARGET_ARM
#pragma GCC poison TARGET_CRIS
+#pragma GCC poison TARGET_HEXAGON
#pragma GCC poison TARGET_HPPA
#pragma GCC poison TARGET_LM32
#pragma GCC poison TARGET_M68K
@@ -30,7 +31,6 @@
#pragma GCC poison TARGET_SH4
#pragma GCC poison TARGET_SPARC
#pragma GCC poison TARGET_SPARC64
-#pragma GCC poison TARGET_TILEGX
#pragma GCC poison TARGET_TRICORE
#pragma GCC poison TARGET_UNICORE32
#pragma GCC poison TARGET_XTENSA
@@ -73,6 +73,7 @@
#pragma GCC poison CONFIG_CRIS_DIS
#pragma GCC poison CONFIG_HPPA_DIS
#pragma GCC poison CONFIG_I386_DIS
+#pragma GCC poison CONFIG_HEXAGON_DIS
#pragma GCC poison CONFIG_LM32_DIS
#pragma GCC poison CONFIG_M68K_DIS
#pragma GCC poison CONFIG_MICROBLAZE_DIS
diff --git a/include/exec/tb-lookup.h b/include/exec/tb-lookup.h
index 9cf475bb03..29d61ceb34 100644
--- a/include/exec/tb-lookup.h
+++ b/include/exec/tb-lookup.h
@@ -17,30 +17,28 @@
#include "exec/tb-hash.h"
/* Might cause an exception, so have a longjmp destination ready */
-static inline TranslationBlock *
-tb_lookup__cpu_state(CPUState *cpu, target_ulong *pc, target_ulong *cs_base,
- uint32_t *flags, uint32_t cf_mask)
+static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc,
+ target_ulong cs_base,
+ uint32_t flags, uint32_t cflags)
{
- CPUArchState *env = (CPUArchState *)cpu->env_ptr;
TranslationBlock *tb;
uint32_t hash;
- cpu_get_tb_cpu_state(env, pc, cs_base, flags);
- hash = tb_jmp_cache_hash_func(*pc);
- tb = qatomic_rcu_read(&cpu->tb_jmp_cache[hash]);
+ /* we should never be trying to look up an INVALID tb */
+ tcg_debug_assert(!(cflags & CF_INVALID));
- cf_mask &= ~CF_CLUSTER_MASK;
- cf_mask |= cpu->cluster_index << CF_CLUSTER_SHIFT;
+ hash = tb_jmp_cache_hash_func(pc);
+ tb = qatomic_rcu_read(&cpu->tb_jmp_cache[hash]);
if (likely(tb &&
- tb->pc == *pc &&
- tb->cs_base == *cs_base &&
- tb->flags == *flags &&
+ tb->pc == pc &&
+ tb->cs_base == cs_base &&
+ tb->flags == flags &&
tb->trace_vcpu_dstate == *cpu->trace_dstate &&
- (tb_cflags(tb) & (CF_HASH_MASK | CF_INVALID)) == cf_mask)) {
+ tb_cflags(tb) == cflags)) {
return tb;
}
- tb = tb_htable_lookup(cpu, *pc, *cs_base, *flags, cf_mask);
+ tb = tb_htable_lookup(cpu, pc, cs_base, flags, cflags);
if (tb == NULL) {
return NULL;
}
diff --git a/include/hw/arm/armsse-version.h b/include/hw/arm/armsse-version.h
new file mode 100644
index 0000000000..60780fa984
--- /dev/null
+++ b/include/hw/arm/armsse-version.h
@@ -0,0 +1,42 @@
+/*
+ * ARM SSE (Subsystems for Embedded): IoTKit, SSE-200
+ *
+ * Copyright (c) 2020 Linaro Limited
+ * Written by Peter Maydell
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 or
+ * (at your option) any later version.
+ */
+
+#ifndef ARMSSE_VERSION_H
+#define ARMSSE_VERSION_H
+
+
+/*
+ * Define an enumeration of the possible values of the sse-version
+ * property implemented by various sub-devices of the SSE, and
+ * a validation function that checks that a valid value has been passed.
+ * These are arbitrary QEMU-internal values (nobody should be creating
+ * the sub-devices of the SSE except for the SSE object itself), but
+ * we pick obvious numbers for the benefit of people debugging with gdb.
+ */
+enum {
+ ARMSSE_IOTKIT = 0,
+ ARMSSE_SSE200 = 200,
+ ARMSSE_SSE300 = 300,
+};
+
+static inline bool armsse_version_valid(uint32_t sse_version)
+{
+ switch (sse_version) {
+ case ARMSSE_IOTKIT:
+ case ARMSSE_SSE200:
+ case ARMSSE_SSE300:
+ return true;
+ default:
+ return false;
+ }
+}
+
+#endif
diff --git a/include/hw/arm/armsse.h b/include/hw/arm/armsse.h
index 09284ca75c..36592be62c 100644
--- a/include/hw/arm/armsse.h
+++ b/include/hw/arm/armsse.h
@@ -97,11 +97,14 @@
#include "hw/misc/tz-mpc.h"
#include "hw/timer/cmsdk-apb-timer.h"
#include "hw/timer/cmsdk-apb-dualtimer.h"
+#include "hw/timer/sse-counter.h"
+#include "hw/timer/sse-timer.h"
#include "hw/watchdog/cmsdk-apb-watchdog.h"
#include "hw/misc/iotkit-sysctl.h"
#include "hw/misc/iotkit-sysinfo.h"
#include "hw/misc/armsse-cpuid.h"
#include "hw/misc/armsse-mhu.h"
+#include "hw/misc/armsse-cpu-pwrctrl.h"
#include "hw/misc/unimp.h"
#include "hw/or-irq.h"
#include "hw/clock.h"
@@ -120,12 +123,14 @@ OBJECT_DECLARE_TYPE(ARMSSE, ARMSSEClass,
*/
#define TYPE_IOTKIT "iotkit"
#define TYPE_SSE200 "sse-200"
+#define TYPE_SSE300 "sse-300"
/* We have an IRQ splitter and an OR gate input for each external PPC
* and the 2 internal PPCs
*/
+#define NUM_INTERNAL_PPCS 2
#define NUM_EXTERNAL_PPCS (IOTS_NUM_AHB_EXP_PPC + IOTS_NUM_APB_EXP_PPC)
-#define NUM_PPCS (NUM_EXTERNAL_PPCS + 2)
+#define NUM_PPCS (NUM_EXTERNAL_PPCS + NUM_INTERNAL_PPCS)
#define MAX_SRAM_BANKS 4
#if MAX_SRAM_BANKS > IOTS_NUM_MPC
@@ -134,15 +139,10 @@ OBJECT_DECLARE_TYPE(ARMSSE, ARMSSEClass,
#define SSE_MAX_CPUS 2
-/* These define what each PPU in the ppu[] index is for */
-#define CPU0CORE_PPU 0
-#define CPU1CORE_PPU 1
-#define DBG_PPU 2
-#define RAM0_PPU 3
-#define RAM1_PPU 4
-#define RAM2_PPU 5
-#define RAM3_PPU 6
-#define NUM_PPUS 7
+#define NUM_PPUS 8
+
+/* Number of CPU IRQs used by the SSE itself */
+#define NUM_SSE_IRQS 32
struct ARMSSE {
/*< private >*/
@@ -152,12 +152,9 @@ struct ARMSSE {
ARMv7MState armv7m[SSE_MAX_CPUS];
CPUClusterState cluster[SSE_MAX_CPUS];
IoTKitSecCtl secctl;
- TZPPC apb_ppc0;
- TZPPC apb_ppc1;
+ TZPPC apb_ppc[NUM_INTERNAL_PPCS];
TZMPC mpc[IOTS_NUM_MPC];
- CMSDKAPBTimer timer0;
- CMSDKAPBTimer timer1;
- CMSDKAPBTimer s32ktimer;
+ CMSDKAPBTimer timer[3];
qemu_or_irq ppc_irq_orgate;
SplitIRQ sec_resp_splitter;
SplitIRQ ppc_irq_splitter[NUM_PPCS];
@@ -165,24 +162,27 @@ struct ARMSSE {
qemu_or_irq mpc_irq_orgate;
qemu_or_irq nmi_orgate;
- SplitIRQ cpu_irq_splitter[32];
+ SplitIRQ cpu_irq_splitter[NUM_SSE_IRQS];
CMSDKAPBDualTimer dualtimer;
- CMSDKAPBWatchdog s32kwatchdog;
- CMSDKAPBWatchdog nswatchdog;
- CMSDKAPBWatchdog swatchdog;
+ CMSDKAPBWatchdog cmsdk_watchdog[3];
+
+ SSECounter sse_counter;
+ SSETimer sse_timer[4];
IoTKitSysCtl sysctl;
IoTKitSysCtl sysinfo;
ARMSSEMHU mhu[2];
- UnimplementedDeviceState ppu[NUM_PPUS];
+ UnimplementedDeviceState unimp[NUM_PPUS];
UnimplementedDeviceState cachectrl[SSE_MAX_CPUS];
UnimplementedDeviceState cpusecctrl[SSE_MAX_CPUS];
ARMSSECPUID cpuid[SSE_MAX_CPUS];
+ ARMSSECPUPwrCtrl cpu_pwrctrl[SSE_MAX_CPUS];
+
/*
* 'container' holds all devices seen by all CPUs.
* 'cpu_container[i]' is the view that CPU i has: this has the
diff --git a/include/hw/arm/aspeed_soc.h b/include/hw/arm/aspeed_soc.h
index 11cfe6e358..9359d6da33 100644
--- a/include/hw/arm/aspeed_soc.h
+++ b/include/hw/arm/aspeed_soc.h
@@ -28,6 +28,7 @@
#include "hw/sd/aspeed_sdhci.h"
#include "hw/usb/hcd-ehci.h"
#include "qom/object.h"
+#include "hw/misc/aspeed_lpc.h"
#define ASPEED_SPIS_NUM 2
#define ASPEED_EHCIS_NUM 2
@@ -61,6 +62,7 @@ struct AspeedSoCState {
AspeedGPIOState gpio_1_8v;
AspeedSDHCIState sdhci;
AspeedSDHCIState emmc;
+ AspeedLPCState lpc;
};
#define TYPE_ASPEED_SOC "aspeed-soc"
@@ -130,6 +132,7 @@ enum {
ASPEED_DEV_SDRAM,
ASPEED_DEV_XDMA,
ASPEED_DEV_EMMC,
+ ASPEED_DEV_KCS,
};
#endif /* ASPEED_SOC_H */
diff --git a/include/hw/arm/xlnx-zynqmp.h b/include/hw/arm/xlnx-zynqmp.h
index 0678b419a2..1676a84ec8 100644
--- a/include/hw/arm/xlnx-zynqmp.h
+++ b/include/hw/arm/xlnx-zynqmp.h
@@ -35,6 +35,7 @@
#include "target/arm/cpu.h"
#include "qom/object.h"
#include "net/can_emu.h"
+#include "hw/dma/xlnx_csu_dma.h"
#define TYPE_XLNX_ZYNQMP "xlnx,zynqmp"
OBJECT_DECLARE_SIMPLE_TYPE(XlnxZynqMPState, XLNX_ZYNQMP)
@@ -60,7 +61,8 @@ OBJECT_DECLARE_SIMPLE_TYPE(XlnxZynqMPState, XLNX_ZYNQMP)
#define XLNX_ZYNQMP_GIC_REGIONS 6
-/* ZynqMP maps the ARM GIC regions (GICC, GICD ...) at consecutive 64k offsets
+/*
+ * ZynqMP maps the ARM GIC regions (GICC, GICD ...) at consecutive 64k offsets
* and under-decodes the 64k region. This mirrors the 4k regions to every 4k
* aligned address in the 64k region. To implement each GIC region needs a
* number of memory region aliases.
@@ -107,6 +109,7 @@ struct XlnxZynqMPState {
XlnxZynqMPRTC rtc;
XlnxZDMA gdma[XLNX_ZYNQMP_NUM_GDMA_CH];
XlnxZDMA adma[XLNX_ZYNQMP_NUM_ADMA_CH];
+ XlnxCSUDMA qspi_dma;
char *boot_cpu;
ARMCPU *boot_cpu_ptr;
diff --git a/include/hw/clock.h b/include/hw/clock.h
index e5f45e2626..a7187eab95 100644
--- a/include/hw/clock.h
+++ b/include/hw/clock.h
@@ -22,7 +22,18 @@
#define TYPE_CLOCK "clock"
OBJECT_DECLARE_SIMPLE_TYPE(Clock, CLOCK)
-typedef void ClockCallback(void *opaque);
+/*
+ * Argument to ClockCallback functions indicating why the callback
+ * has been called. A mask of these values logically ORed together
+ * is used to specify which events are interesting when the callback
+ * is registered, so these values must all be different bit values.
+ */
+typedef enum ClockEvent {
+ ClockUpdate = 1, /* Clock period has just updated */
+ ClockPreUpdate = 2, /* Clock period is about to update */
+} ClockEvent;
+
+typedef void ClockCallback(void *opaque, ClockEvent event);
/*
* clock store a value representing the clock's period in 2^-32ns unit.
@@ -50,6 +61,7 @@ typedef void ClockCallback(void *opaque);
* @canonical_path: clock path string cache (used for trace purpose)
* @callback: called when clock changes
* @callback_opaque: argument for @callback
+ * @callback_events: mask of events when callback should be called
* @source: source (or parent in clock tree) of the clock
* @children: list of clocks connected to this one (it is their source)
* @sibling: structure used to form a clock list
@@ -67,6 +79,7 @@ struct Clock {
char *canonical_path;
ClockCallback *callback;
void *callback_opaque;
+ unsigned int callback_events;
/* Clocks are organized in a clock tree */
Clock *source;
@@ -114,10 +127,15 @@ Clock *clock_new(Object *parent, const char *name);
* @clk: the clock to register the callback into
* @cb: the callback function
* @opaque: the argument to the callback
+ * @events: the events the callback should be called for
+ * (logical OR of ClockEvent enum values)
*
* Register a callback called on every clock update.
+ * Note that a clock has only one callback: you cannot register
+ * different callback functions for different events.
*/
-void clock_set_callback(Clock *clk, ClockCallback *cb, void *opaque);
+void clock_set_callback(Clock *clk, ClockCallback *cb,
+ void *opaque, unsigned int events);
/**
* clock_clear_callback:
@@ -269,6 +287,47 @@ static inline uint64_t clock_ticks_to_ns(const Clock *clk, uint64_t ticks)
}
/**
+ * clock_ns_to_ticks:
+ * @clk: the clock to query
+ * @ns: duration in nanoseconds
+ *
+ * Returns the number of ticks this clock would make in the given
+ * number of nanoseconds. Because a clock can have a period which
+ * is not a whole number of nanoseconds, it is important to use this
+ * function rather than attempting to obtain a "period in nanoseconds"
+ * value and then dividing the duration by that value.
+ *
+ * If the clock is stopped (ie it has period zero), returns 0.
+ *
+ * For some inputs the result could overflow a 64-bit value (because
+ * the clock's period is short and the duration is long). In these
+ * cases we truncate the result to a 64-bit value. This is on the
+ * assumption that generally the result is going to be used to report
+ * a 32-bit or 64-bit guest register value, so wrapping either cannot
+ * happen or is the desired behaviour.
+ */
+static inline uint64_t clock_ns_to_ticks(const Clock *clk, uint64_t ns)
+{
+ /*
+ * ticks = duration_in_ns / period_in_ns
+ * = ns / (period / 2^32)
+ * = (ns * 2^32) / period
+ * The hi, lo inputs to divu128() are (ns << 32) as a 128 bit value.
+ */
+ uint64_t lo = ns << 32;
+ uint64_t hi = ns >> 32;
+ if (clk->period == 0) {
+ return 0;
+ }
+ /*
+ * Ignore divu128() return value as we've caught div-by-zero and don't
+ * need different behaviour for overflow.
+ */
+ divu128(&lo, &hi, clk->period);
+ return lo;
+}
+
+/**
* clock_is_enabled:
* @clk: a clock
*
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
index c005d3dc2d..c68bc3ba8a 100644
--- a/include/hw/core/cpu.h
+++ b/include/hw/core/cpu.h
@@ -282,6 +282,7 @@ struct qemu_work_item;
* to a cluster this will be UNASSIGNED_CLUSTER_INDEX; otherwise it will
* be the same as the cluster-id property of the CPU object's TYPE_CPU_CLUSTER
* QOM parent.
+ * @tcg_cflags: Pre-computed cflags for this cpu.
* @nr_cores: Number of cores within this CPU package.
* @nr_threads: Number of threads within this CPU.
* @running: #true if CPU is currently running (lockless).
@@ -412,6 +413,7 @@ struct CPUState {
/* TODO Move common fields from CPUArchState here. */
int cpu_index;
int cluster_index;
+ uint32_t tcg_cflags;
uint32_t halted;
uint32_t can_do_io;
int32_t exception_index;
diff --git a/include/hw/dma/xlnx_csu_dma.h b/include/hw/dma/xlnx_csu_dma.h
new file mode 100644
index 0000000000..204d94c673
--- /dev/null
+++ b/include/hw/dma/xlnx_csu_dma.h
@@ -0,0 +1,52 @@
+/*
+ * Xilinx Platform CSU Stream DMA emulation
+ *
+ * This implementation is based on
+ * https://github.com/Xilinx/qemu/blob/master/hw/dma/csu_stream_dma.c
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 or
+ * (at your option) version 3 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef XLNX_CSU_DMA_H
+#define XLNX_CSU_DMA_H
+
+#define TYPE_XLNX_CSU_DMA "xlnx.csu_dma"
+
+#define XLNX_CSU_DMA_R_MAX (0x2c / 4)
+
+typedef struct XlnxCSUDMA {
+ SysBusDevice busdev;
+ MemoryRegion iomem;
+ MemTxAttrs attr;
+ MemoryRegion *dma_mr;
+ AddressSpace *dma_as;
+ qemu_irq irq;
+ StreamSink *tx_dev; /* Used as generic StreamSink */
+ ptimer_state *src_timer;
+
+ uint16_t width;
+ bool is_dst;
+ bool r_size_last_word;
+
+ StreamCanPushNotifyFn notify;
+ void *notify_opaque;
+
+ uint32_t regs[XLNX_CSU_DMA_R_MAX];
+ RegisterInfo regs_info[XLNX_CSU_DMA_R_MAX];
+} XlnxCSUDMA;
+
+#define XLNX_CSU_DMA(obj) \
+ OBJECT_CHECK(XlnxCSUDMA, (obj), TYPE_XLNX_CSU_DMA)
+
+#endif
diff --git a/include/hw/misc/armsse-cpu-pwrctrl.h b/include/hw/misc/armsse-cpu-pwrctrl.h
new file mode 100644
index 0000000000..51d45ede7d
--- /dev/null
+++ b/include/hw/misc/armsse-cpu-pwrctrl.h
@@ -0,0 +1,40 @@
+/*
+ * ARM SSE CPU PWRCTRL register block
+ *
+ * Copyright (c) 2021 Linaro Limited
+ * Written by Peter Maydell
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 or
+ * (at your option) any later version.
+ */
+
+/*
+ * This is a model of the "CPU<N>_PWRCTRL block" which is part of the
+ * Arm Corstone SSE-300 Example Subsystem and documented in
+ * https://developer.arm.com/documentation/101773/0000
+ *
+ * QEMU interface:
+ * + sysbus MMIO region 0: the register bank
+ */
+
+#ifndef HW_MISC_ARMSSE_CPU_PWRCTRL_H
+#define HW_MISC_ARMSSE_CPU_PWRCTRL_H
+
+#include "hw/sysbus.h"
+#include "qom/object.h"
+
+#define TYPE_ARMSSE_CPU_PWRCTRL "armsse-cpu-pwrctrl"
+OBJECT_DECLARE_SIMPLE_TYPE(ARMSSECPUPwrCtrl, ARMSSE_CPU_PWRCTRL)
+
+struct ARMSSECPUPwrCtrl {
+ /*< private >*/
+ SysBusDevice parent_obj;
+
+ /*< public >*/
+ MemoryRegion iomem;
+
+ uint32_t cpupwrcfg;
+};
+
+#endif
diff --git a/include/hw/misc/aspeed_lpc.h b/include/hw/misc/aspeed_lpc.h
new file mode 100644
index 0000000000..df418cfcd3
--- /dev/null
+++ b/include/hw/misc/aspeed_lpc.h
@@ -0,0 +1,47 @@
+/*
+ * ASPEED LPC Controller
+ *
+ * Copyright (C) 2017-2018 IBM Corp.
+ *
+ * This code is licensed under the GPL version 2 or later. See
+ * the COPYING file in the top-level directory.
+ */
+
+#ifndef ASPEED_LPC_H
+#define ASPEED_LPC_H
+
+#include "hw/sysbus.h"
+
+#include <stdint.h>
+
+#define TYPE_ASPEED_LPC "aspeed.lpc"
+#define ASPEED_LPC(obj) OBJECT_CHECK(AspeedLPCState, (obj), TYPE_ASPEED_LPC)
+
+#define ASPEED_LPC_NR_REGS (0x260 >> 2)
+
+enum aspeed_lpc_subdevice {
+ aspeed_lpc_kcs_1 = 0,
+ aspeed_lpc_kcs_2,
+ aspeed_lpc_kcs_3,
+ aspeed_lpc_kcs_4,
+ aspeed_lpc_ibt,
+};
+
+#define ASPEED_LPC_NR_SUBDEVS 5
+
+typedef struct AspeedLPCState {
+ /* <private> */
+ SysBusDevice parent;
+
+ /*< public >*/
+ MemoryRegion iomem;
+ qemu_irq irq;
+
+ qemu_irq subdevice_irqs[ASPEED_LPC_NR_SUBDEVS];
+ uint32_t subdevice_irqs_pending;
+
+ uint32_t regs[ASPEED_LPC_NR_REGS];
+ uint32_t hicr7;
+} AspeedLPCState;
+
+#endif /* _ASPEED_LPC_H_ */
diff --git a/include/hw/misc/iotkit-secctl.h b/include/hw/misc/iotkit-secctl.h
index 227d44abe4..79a3628320 100644
--- a/include/hw/misc/iotkit-secctl.h
+++ b/include/hw/misc/iotkit-secctl.h
@@ -120,6 +120,8 @@ struct IoTKitSecCtl {
IoTKitSecCtlPPC apb[IOTS_NUM_APB_PPC];
IoTKitSecCtlPPC apbexp[IOTS_NUM_APB_EXP_PPC];
IoTKitSecCtlPPC ahbexp[IOTS_NUM_APB_EXP_PPC];
+
+ uint32_t sse_version;
};
#endif
diff --git a/include/hw/misc/iotkit-sysctl.h b/include/hw/misc/iotkit-sysctl.h
index 2bc391138d..481e27f4db 100644
--- a/include/hw/misc/iotkit-sysctl.h
+++ b/include/hw/misc/iotkit-sysctl.h
@@ -17,9 +17,8 @@
* "system control register" blocks.
*
* QEMU interface:
- * + QOM property "SYS_VERSION": value of the SYS_VERSION register of the
- * system information block of the SSE
- * (used to identify whether to provide SSE-200-only registers)
+ * + QOM property "sse-version": indicates which SSE version this is part of
+ * (used to identify whether to provide SSE-200-only registers, etc)
* + sysbus MMIO region 0: the system information register bank
* + sysbus MMIO region 1: the system control register bank
*/
@@ -54,19 +53,21 @@ struct IoTKitSysCtl {
uint32_t initsvtor1;
uint32_t nmi_enable;
uint32_t ewctrl;
+ uint32_t pwrctrl;
uint32_t pdcm_pd_sys_sense;
uint32_t pdcm_pd_sram0_sense;
uint32_t pdcm_pd_sram1_sense;
uint32_t pdcm_pd_sram2_sense;
uint32_t pdcm_pd_sram3_sense;
+ uint32_t pdcm_pd_cpu0_sense;
+ uint32_t pdcm_pd_vmr0_sense;
+ uint32_t pdcm_pd_vmr1_sense;
/* Properties */
- uint32_t sys_version;
+ uint32_t sse_version;
uint32_t cpuwait_rst;
uint32_t initsvtor0_rst;
uint32_t initsvtor1_rst;
-
- bool is_sse200;
};
#endif
diff --git a/include/hw/misc/iotkit-sysinfo.h b/include/hw/misc/iotkit-sysinfo.h
index 055771d209..91c23f90d2 100644
--- a/include/hw/misc/iotkit-sysinfo.h
+++ b/include/hw/misc/iotkit-sysinfo.h
@@ -38,6 +38,8 @@ struct IoTKitSysInfo {
/* Properties */
uint32_t sys_version;
uint32_t sys_config;
+ uint32_t sse_version;
+ uint32_t iidr;
};
#endif
diff --git a/include/hw/misc/mps2-fpgaio.h b/include/hw/misc/mps2-fpgaio.h
index e04fd590b6..7b8bd604de 100644
--- a/include/hw/misc/mps2-fpgaio.h
+++ b/include/hw/misc/mps2-fpgaio.h
@@ -39,10 +39,12 @@ struct MPS2FPGAIO {
LEDState *led[MPS2FPGAIO_MAX_LEDS];
uint32_t num_leds;
bool has_switches;
+ bool has_dbgctrl;
uint32_t led0;
uint32_t prescale;
uint32_t misc;
+ uint32_t dbgctrl;
/* QEMU_CLOCK_VIRTUAL time at which counter and pscntr were last synced */
int64_t pscntr_sync_ticks;
diff --git a/include/hw/qdev-clock.h b/include/hw/qdev-clock.h
index 64ca4d266f..ffa0f7ba09 100644
--- a/include/hw/qdev-clock.h
+++ b/include/hw/qdev-clock.h
@@ -22,6 +22,8 @@
* @name: the name of the clock (can't be NULL).
* @callback: optional callback to be called on update or NULL.
* @opaque: argument for the callback
+ * @events: the events the callback should be called for
+ * (logical OR of ClockEvent enum values)
* @returns: a pointer to the newly added clock
*
* Add an input clock to device @dev as a clock named @name.
@@ -29,7 +31,8 @@
* The callback will be called with @opaque as opaque parameter.
*/
Clock *qdev_init_clock_in(DeviceState *dev, const char *name,
- ClockCallback *callback, void *opaque);
+ ClockCallback *callback, void *opaque,
+ unsigned int events);
/**
* qdev_init_clock_out:
@@ -105,6 +108,7 @@ void qdev_finalize_clocklist(DeviceState *dev);
* @output: indicates whether the clock is input or output
* @callback: for inputs, optional callback to be called on clock's update
* with device as opaque
+ * @callback_events: mask of ClockEvent values for when callback is called
* @offset: optional offset to store the ClockIn or ClockOut pointer in device
* state structure (0 means unused)
*/
@@ -112,6 +116,7 @@ struct ClockPortInitElem {
const char *name;
bool is_output;
ClockCallback *callback;
+ unsigned int callback_events;
size_t offset;
};
@@ -119,10 +124,11 @@ struct ClockPortInitElem {
(offsetof(devstate, field) + \
type_check(Clock *, typeof_field(devstate, field)))
-#define QDEV_CLOCK(out_not_in, devstate, field, cb) { \
+#define QDEV_CLOCK(out_not_in, devstate, field, cb, cbevents) { \
.name = (stringify(field)), \
.is_output = out_not_in, \
.callback = cb, \
+ .callback_events = cbevents, \
.offset = clock_offset_value(devstate, field), \
}
@@ -133,14 +139,15 @@ struct ClockPortInitElem {
* @field: a field in @_devstate (must be Clock*)
* @callback: (for input only) callback (or NULL) to be called with the device
* state as argument
+ * @cbevents: (for input only) ClockEvent mask for when callback is called
*
* The name of the clock will be derived from @field
*/
-#define QDEV_CLOCK_IN(devstate, field, callback) \
- QDEV_CLOCK(false, devstate, field, callback)
+#define QDEV_CLOCK_IN(devstate, field, callback, cbevents) \
+ QDEV_CLOCK(false, devstate, field, callback, cbevents)
#define QDEV_CLOCK_OUT(devstate, field) \
- QDEV_CLOCK(true, devstate, field, NULL)
+ QDEV_CLOCK(true, devstate, field, NULL, 0)
#define QDEV_CLOCK_END { .name = NULL }
diff --git a/include/hw/scsi/esp.h b/include/hw/scsi/esp.h
index d8a6263c13..95088490aa 100644
--- a/include/hw/scsi/esp.h
+++ b/include/hw/scsi/esp.h
@@ -3,6 +3,7 @@
#include "hw/scsi/scsi.h"
#include "hw/sysbus.h"
+#include "qemu/fifo8.h"
#include "qom/object.h"
/* esp.c */
@@ -10,19 +11,17 @@
typedef void (*ESPDMAMemoryReadWriteFunc)(void *opaque, uint8_t *buf, int len);
#define ESP_REGS 16
-#define TI_BUFSZ 16
-#define ESP_CMDBUF_SZ 32
+#define ESP_FIFO_SZ 16
+#define ESP_CMDFIFO_SZ 32
typedef struct ESPState ESPState;
-enum pdma_origin_id {
- PDMA,
- TI,
- CMD,
- ASYNC,
-};
+#define TYPE_ESP "esp"
+OBJECT_DECLARE_SIMPLE_TYPE(ESPState, ESP)
struct ESPState {
+ DeviceState parent_obj;
+
uint8_t rregs[ESP_REGS];
uint8_t wregs[ESP_REGS];
qemu_irq irq;
@@ -30,24 +29,18 @@ struct ESPState {
uint8_t chip_id;
bool tchi_written;
int32_t ti_size;
- uint32_t ti_rptr, ti_wptr;
uint32_t status;
- uint32_t deferred_status;
- bool deferred_complete;
uint32_t dma;
- uint8_t ti_buf[TI_BUFSZ];
+ Fifo8 fifo;
SCSIBus bus;
SCSIDevice *current_dev;
SCSIRequest *current_req;
- uint8_t cmdbuf[ESP_CMDBUF_SZ];
- uint32_t cmdlen;
+ Fifo8 cmdfifo;
+ uint8_t cmdfifo_cdb_offset;
uint32_t do_cmd;
- /* The amount of data left in the current DMA transfer. */
- uint32_t dma_left;
- /* The size of the current DMA transfer. Zero if no transfer is in
- progress. */
- uint32_t dma_counter;
+ bool data_in_ready;
+ uint8_t ti_cmd;
int dma_enabled;
uint32_t async_len;
@@ -57,16 +50,22 @@ struct ESPState {
ESPDMAMemoryReadWriteFunc dma_memory_write;
void *dma_opaque;
void (*dma_cb)(ESPState *s);
- uint8_t pdma_buf[32];
- int pdma_origin;
- uint32_t pdma_len;
- uint32_t pdma_start;
- uint32_t pdma_cur;
void (*pdma_cb)(ESPState *s);
+
+ uint8_t mig_version_id;
+
+ /* Legacy fields for vmstate_esp version < 5 */
+ uint32_t mig_dma_left;
+ uint32_t mig_deferred_status;
+ bool mig_deferred_complete;
+ uint32_t mig_ti_rptr, mig_ti_wptr;
+ uint8_t mig_ti_buf[ESP_FIFO_SZ];
+ uint8_t mig_cmdbuf[ESP_CMDFIFO_SZ];
+ uint32_t mig_cmdlen;
};
-#define TYPE_ESP "esp"
-OBJECT_DECLARE_SIMPLE_TYPE(SysBusESPState, ESP)
+#define TYPE_SYSBUS_ESP "sysbus-esp"
+OBJECT_DECLARE_SIMPLE_TYPE(SysBusESPState, SYSBUS_ESP)
struct SysBusESPState {
/*< private >*/
@@ -142,6 +141,7 @@ struct SysBusESPState {
#define INTR_RST 0x80
#define SEQ_0 0x0
+#define SEQ_MO 0x1
#define SEQ_CD 0x4
#define CFG1_RESREPT 0x40
diff --git a/include/hw/sh4/sh.h b/include/hw/sh4/sh.h
index 93f464bf4c..becb596979 100644
--- a/include/hw/sh4/sh.h
+++ b/include/hw/sh4/sh.h
@@ -1,6 +1,31 @@
-#ifndef QEMU_SH_H
-#define QEMU_SH_H
-/* Definitions for SH board emulation. */
+/*
+ * Definitions for SH board emulation
+ *
+ * Copyright (c) 2005 Samuel Tardieu
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ * SPDX-License-Identifier: MIT
+ */
+#ifndef QEMU_HW_SH_H
+#define QEMU_HW_SH_H
#include "hw/sh4/sh_intc.h"
#include "target/sh4/cpu-qom.h"
diff --git a/include/hw/ssi/xilinx_spips.h b/include/hw/ssi/xilinx_spips.h
index 3eae73480e..06bfd18312 100644
--- a/include/hw/ssi/xilinx_spips.h
+++ b/include/hw/ssi/xilinx_spips.h
@@ -34,7 +34,7 @@
typedef struct XilinxSPIPS XilinxSPIPS;
#define XLNX_SPIPS_R_MAX (0x100 / 4)
-#define XLNX_ZYNQMP_SPIPS_R_MAX (0x830 / 4)
+#define XLNX_ZYNQMP_SPIPS_R_MAX (0x200 / 4)
/* Bite off 4k chunks at a time */
#define LQSPI_CACHE_SIZE 1024
diff --git a/include/hw/timer/sse-counter.h b/include/hw/timer/sse-counter.h
new file mode 100644
index 0000000000..b433e58d37
--- /dev/null
+++ b/include/hw/timer/sse-counter.h
@@ -0,0 +1,105 @@
+/*
+ * Arm SSE Subsystem System Counter
+ *
+ * Copyright (c) 2020 Linaro Limited
+ * Written by Peter Maydell
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 or
+ * (at your option) any later version.
+ */
+
+/*
+ * This is a model of the "System counter" which is documented in
+ * the Arm SSE-123 Example Subsystem Technical Reference Manual:
+ * https://developer.arm.com/documentation/101370/latest/
+ *
+ * QEMU interface:
+ * + Clock input "CLK": clock
+ * + sysbus MMIO region 0: the control register frame
+ * + sysbus MMIO region 1: the status register frame
+ *
+ * Consumers of the system counter's timestamp, such as the SSE
+ * System Timer device, can also use the APIs sse_counter_for_timestamp(),
+ * sse_counter_tick_to_time() and sse_counter_register_consumer() to
+ * interact with an instance of the System Counter. Generally the
+ * consumer device should have a QOM link property which the board
+ * code can set to the appropriate instance of the system counter.
+ */
+
+#ifndef SSE_COUNTER_H
+#define SSE_COUNTER_H
+
+#include "hw/sysbus.h"
+#include "qom/object.h"
+#include "qemu/notify.h"
+
+#define TYPE_SSE_COUNTER "sse-counter"
+OBJECT_DECLARE_SIMPLE_TYPE(SSECounter, SSE_COUNTER)
+
+struct SSECounter {
+ /*< private >*/
+ SysBusDevice parent_obj;
+
+ /*< public >*/
+ MemoryRegion control_mr;
+ MemoryRegion status_mr;
+ Clock *clk;
+ NotifierList notifier_list;
+
+ uint32_t cntcr;
+ uint32_t cntscr0;
+
+ /*
+ * These are used for handling clock frequency changes: they are a
+ * tuple of (QEMU_CLOCK_VIRTUAL timestamp, CNTCV at that time),
+ * taken when the clock frequency changes. sse_cntcv() needs them
+ * to calculate the current CNTCV.
+ */
+ uint64_t ns_then;
+ uint64_t ticks_then;
+};
+
+/*
+ * These functions are the interface by which a consumer of
+ * the system timestamp (such as the SSE system timer device)
+ * can communicate with the SSECounter.
+ */
+
+/**
+ * sse_counter_for_timestamp:
+ * @counter: SSECounter
+ * @ns: timestamp of QEMU_CLOCK_VIRTUAL in nanoseconds
+ *
+ * Returns the value of the timestamp counter at the specified
+ * point in time (assuming that no changes to scale factor, enable, etc
+ * happen in the meantime).
+ */
+uint64_t sse_counter_for_timestamp(SSECounter *counter, uint64_t ns);
+
+/**
+ * sse_counter_tick_to_time:
+ * @counter: SSECounter
+ * @tick: tick value
+ *
+ * Returns the time (a QEMU_CLOCK_VIRTUAL timestamp in nanoseconds)
+ * when the timestamp counter will reach the specified tick count.
+ * If the counter is not currently running, returns UINT64_MAX.
+ */
+uint64_t sse_counter_tick_to_time(SSECounter *counter, uint64_t tick);
+
+/**
+ * sse_counter_register_consumer:
+ * @counter: SSECounter
+ * @notifier: Notifier which is notified on counter changes
+ *
+ * Registers @notifier with the SSECounter. When the counter's
+ * configuration changes in a way that might invalidate information
+ * previously returned via sse_counter_for_timestamp() or
+ * sse_counter_tick_to_time(), the notifier will be called.
+ * Devices which consume the timestamp counter can use this as
+ * a cue to recalculate timer events.
+ */
+void sse_counter_register_consumer(SSECounter *counter, Notifier *notifier);
+
+#endif
diff --git a/include/hw/timer/sse-timer.h b/include/hw/timer/sse-timer.h
new file mode 100644
index 0000000000..b4ee8e7f6c
--- /dev/null
+++ b/include/hw/timer/sse-timer.h
@@ -0,0 +1,53 @@
+/*
+ * Arm SSE Subsystem System Timer
+ *
+ * Copyright (c) 2020 Linaro Limited
+ * Written by Peter Maydell
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 or
+ * (at your option) any later version.
+ */
+
+/*
+ * This is a model of the "System timer" which is documented in
+ * the Arm SSE-123 Example Subsystem Technical Reference Manual:
+ * https://developer.arm.com/documentation/101370/latest/
+ *
+ * QEMU interface:
+ * + QOM property "counter": link property to be set to the
+ * TYPE_SSE_COUNTER timestamp counter device this timer runs off
+ * + sysbus MMIO region 0: the register bank
+ * + sysbus IRQ 0: timer interrupt
+ */
+
+#ifndef SSE_TIMER_H
+#define SSE_TIMER_H
+
+#include "hw/sysbus.h"
+#include "qom/object.h"
+#include "hw/timer/sse-counter.h"
+
+#define TYPE_SSE_TIMER "sse-timer"
+OBJECT_DECLARE_SIMPLE_TYPE(SSETimer, SSE_TIMER)
+
+struct SSETimer {
+ /*< private >*/
+ SysBusDevice parent_obj;
+
+ /*< public >*/
+ MemoryRegion iomem;
+ qemu_irq irq;
+ SSECounter *counter;
+ QEMUTimer timer;
+ Notifier counter_notifier;
+
+ uint32_t cntfrq;
+ uint32_t cntp_ctl;
+ uint64_t cntp_cval;
+ uint64_t cntp_aival;
+ uint32_t cntp_aival_ctl;
+ uint32_t cntp_aival_reload;
+};
+
+#endif
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
index bab4237e90..140a971632 100644
--- a/linux-user/elfload.c
+++ b/linux-user/elfload.c
@@ -1387,29 +1387,6 @@ static inline void init_thread(struct target_pt_regs *regs, struct image_info *i
#endif /* TARGET_S390X */
-#ifdef TARGET_TILEGX
-
-/* 42 bits real used address, a half for user mode */
-#define ELF_START_MMAP (0x00000020000000000ULL)
-
-#define elf_check_arch(x) ((x) == EM_TILEGX)
-
-#define ELF_CLASS ELFCLASS64
-#define ELF_DATA ELFDATA2LSB
-#define ELF_ARCH EM_TILEGX
-
-static inline void init_thread(struct target_pt_regs *regs,
- struct image_info *infop)
-{
- regs->pc = infop->entry;
- regs->sp = infop->start_stack;
-
-}
-
-#define ELF_EXEC_PAGESIZE 65536 /* TILE-Gx page size is 64KB */
-
-#endif /* TARGET_TILEGX */
-
#ifdef TARGET_RISCV
#define ELF_START_MMAP 0x80000000
diff --git a/linux-user/main.c b/linux-user/main.c
index 81f48ff54e..4f4746dce8 100644
--- a/linux-user/main.c
+++ b/linux-user/main.c
@@ -205,6 +205,7 @@ CPUArchState *cpu_copy(CPUArchState *env)
/* Reset non arch specific state */
cpu_reset(new_cpu);
+ new_cpu->tcg_cflags = cpu->tcg_cflags;
memcpy(new_env, env, sizeof(CPUArchState));
/* Clone all break/watchpoints.
diff --git a/linux-user/sh4/signal.c b/linux-user/sh4/signal.c
index cc89a48ff8..29c1ee30e6 100644
--- a/linux-user/sh4/signal.c
+++ b/linux-user/sh4/signal.c
@@ -82,9 +82,11 @@ static abi_ulong get_sigframe(struct target_sigaction *ka,
return (sp - frame_size) & -8ul;
}
-/* Notice when we're in the middle of a gUSA region and reset.
- Note that this will only occur for !parallel_cpus, as we will
- translate such sequences differently in a parallel context. */
+/*
+ * Notice when we're in the middle of a gUSA region and reset.
+ * Note that this will only occur when #CF_PARALLEL is unset, as we
+ * will translate such sequences differently in a parallel context.
+ */
static void unwind_gusa(CPUSH4State *regs)
{
/* If the stack pointer is sufficiently negative, and we haven't
diff --git a/linux-user/syscall.c b/linux-user/syscall.c
index 389ec09764..9522f603aa 100644
--- a/linux-user/syscall.c
+++ b/linux-user/syscall.c
@@ -6481,6 +6481,16 @@ static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
/* Grab a mutex so that thread setup appears atomic. */
pthread_mutex_lock(&clone_lock);
+ /*
+ * If this is our first additional thread, we need to ensure we
+ * generate code for parallel execution and flush old translations.
+ * Do this now so that the copy gets CF_PARALLEL too.
+ */
+ if (!(cpu->tcg_cflags & CF_PARALLEL)) {
+ cpu->tcg_cflags |= CF_PARALLEL;
+ tb_flush(cpu);
+ }
+
/* we create a new CPU instance. */
new_env = cpu_copy(env);
/* Init regs that differ from the parent. */
@@ -6521,14 +6531,6 @@ static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
cpu->random_seed = qemu_guest_random_seed_thread_part1();
- /* If this is our first additional thread, we need to ensure we
- * generate code for parallel execution and flush old translations.
- */
- if (!parallel_cpus) {
- parallel_cpus = true;
- tb_flush(cpu);
- }
-
ret = pthread_create(&info.thread, &attr, clone_func, &info);
/* TODO: Free new CPU state if thread creation failed. */
diff --git a/linux-user/syscall_defs.h b/linux-user/syscall_defs.h
index 6823d8646c..25be414727 100644
--- a/linux-user/syscall_defs.h
+++ b/linux-user/syscall_defs.h
@@ -72,8 +72,7 @@
#if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_SH4) \
|| defined(TARGET_M68K) || defined(TARGET_CRIS) \
- || defined(TARGET_S390X) \
- || defined(TARGET_OPENRISC) || defined(TARGET_TILEGX) \
+ || defined(TARGET_S390X) || defined(TARGET_OPENRISC) \
|| defined(TARGET_NIOS2) || defined(TARGET_RISCV) \
|| defined(TARGET_XTENSA)
@@ -691,10 +690,6 @@ typedef struct target_siginfo {
#define TARGET_ILL_PRVREG (6) /* privileged register */
#define TARGET_ILL_COPROC (7) /* coprocessor error */
#define TARGET_ILL_BADSTK (8) /* internal stack error */
-#ifdef TARGET_TILEGX
-#define TARGET_ILL_DBLFLT (9) /* double fault */
-#define TARGET_ILL_HARDWALL (10) /* user networks hardwall violation */
-#endif
/*
* SIGFPE si_codes
@@ -2149,8 +2144,7 @@ struct target_stat64 {
abi_ulong __unused5;
};
-#elif defined(TARGET_OPENRISC) || defined(TARGET_TILEGX) || \
- defined(TARGET_NIOS2) || defined(TARGET_RISCV)
+#elif defined(TARGET_OPENRISC) || defined(TARGET_NIOS2) || defined(TARGET_RISCV)
/* These are the asm-generic versions of the stat and stat64 structures */
diff --git a/linux-user/tilegx/cpu_loop.c b/linux-user/tilegx/cpu_loop.c
deleted file mode 100644
index 490a8f38e5..0000000000
--- a/linux-user/tilegx/cpu_loop.c
+++ /dev/null
@@ -1,287 +0,0 @@
-/*
- * qemu user cpu loop
- *
- * Copyright (c) 2003-2008 Fabrice Bellard
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#include "qemu/osdep.h"
-#include "qemu-common.h"
-#include "qemu.h"
-#include "cpu_loop-common.h"
-
-static void gen_sigill_reg(CPUTLGState *env)
-{
- target_siginfo_t info;
-
- info.si_signo = TARGET_SIGILL;
- info.si_errno = 0;
- info.si_code = TARGET_ILL_PRVREG;
- info._sifields._sigfault._addr = env->pc;
- queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
-}
-
-static void do_signal(CPUTLGState *env, int signo, int sigcode)
-{
- target_siginfo_t info;
-
- info.si_signo = signo;
- info.si_errno = 0;
- info._sifields._sigfault._addr = env->pc;
-
- if (signo == TARGET_SIGSEGV) {
- /* The passed in sigcode is a dummy; check for a page mapping
- and pass either MAPERR or ACCERR. */
- target_ulong addr = env->excaddr;
- info._sifields._sigfault._addr = addr;
- if (page_check_range(addr, 1, PAGE_VALID) < 0) {
- sigcode = TARGET_SEGV_MAPERR;
- } else {
- sigcode = TARGET_SEGV_ACCERR;
- }
- }
- info.si_code = sigcode;
-
- queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
-}
-
-static void gen_sigsegv_maperr(CPUTLGState *env, target_ulong addr)
-{
- env->excaddr = addr;
- do_signal(env, TARGET_SIGSEGV, 0);
-}
-
-static void set_regval(CPUTLGState *env, uint8_t reg, uint64_t val)
-{
- if (unlikely(reg >= TILEGX_R_COUNT)) {
- switch (reg) {
- case TILEGX_R_SN:
- case TILEGX_R_ZERO:
- return;
- case TILEGX_R_IDN0:
- case TILEGX_R_IDN1:
- case TILEGX_R_UDN0:
- case TILEGX_R_UDN1:
- case TILEGX_R_UDN2:
- case TILEGX_R_UDN3:
- gen_sigill_reg(env);
- return;
- default:
- g_assert_not_reached();
- }
- }
- env->regs[reg] = val;
-}
-
-/*
- * Compare the 8-byte contents of the CmpValue SPR with the 8-byte value in
- * memory at the address held in the first source register. If the values are
- * not equal, then no memory operation is performed. If the values are equal,
- * the 8-byte quantity from the second source register is written into memory
- * at the address held in the first source register. In either case, the result
- * of the instruction is the value read from memory. The compare and write to
- * memory are atomic and thus can be used for synchronization purposes. This
- * instruction only operates for addresses aligned to a 8-byte boundary.
- * Unaligned memory access causes an Unaligned Data Reference interrupt.
- *
- * Functional Description (64-bit)
- * uint64_t memVal = memoryReadDoubleWord (rf[SrcA]);
- * rf[Dest] = memVal;
- * if (memVal == SPR[CmpValueSPR])
- * memoryWriteDoubleWord (rf[SrcA], rf[SrcB]);
- *
- * Functional Description (32-bit)
- * uint64_t memVal = signExtend32 (memoryReadWord (rf[SrcA]));
- * rf[Dest] = memVal;
- * if (memVal == signExtend32 (SPR[CmpValueSPR]))
- * memoryWriteWord (rf[SrcA], rf[SrcB]);
- *
- *
- * This function also processes exch and exch4 which need not process SPR.
- */
-static void do_exch(CPUTLGState *env, bool quad, bool cmp)
-{
- target_ulong addr;
- target_long val, sprval;
-
- start_exclusive();
-
- addr = env->atomic_srca;
- if (quad ? get_user_s64(val, addr) : get_user_s32(val, addr)) {
- goto sigsegv_maperr;
- }
-
- if (cmp) {
- if (quad) {
- sprval = env->spregs[TILEGX_SPR_CMPEXCH];
- } else {
- sprval = sextract64(env->spregs[TILEGX_SPR_CMPEXCH], 0, 32);
- }
- }
-
- if (!cmp || val == sprval) {
- target_long valb = env->atomic_srcb;
- if (quad ? put_user_u64(valb, addr) : put_user_u32(valb, addr)) {
- goto sigsegv_maperr;
- }
- }
-
- set_regval(env, env->atomic_dstr, val);
- end_exclusive();
- return;
-
- sigsegv_maperr:
- end_exclusive();
- gen_sigsegv_maperr(env, addr);
-}
-
-static void do_fetch(CPUTLGState *env, int trapnr, bool quad)
-{
- int8_t write = 1;
- target_ulong addr;
- target_long val, valb;
-
- start_exclusive();
-
- addr = env->atomic_srca;
- valb = env->atomic_srcb;
- if (quad ? get_user_s64(val, addr) : get_user_s32(val, addr)) {
- goto sigsegv_maperr;
- }
-
- switch (trapnr) {
- case TILEGX_EXCP_OPCODE_FETCHADD:
- case TILEGX_EXCP_OPCODE_FETCHADD4:
- valb += val;
- break;
- case TILEGX_EXCP_OPCODE_FETCHADDGEZ:
- valb += val;
- if (valb < 0) {
- write = 0;
- }
- break;
- case TILEGX_EXCP_OPCODE_FETCHADDGEZ4:
- valb += val;
- if ((int32_t)valb < 0) {
- write = 0;
- }
- break;
- case TILEGX_EXCP_OPCODE_FETCHAND:
- case TILEGX_EXCP_OPCODE_FETCHAND4:
- valb &= val;
- break;
- case TILEGX_EXCP_OPCODE_FETCHOR:
- case TILEGX_EXCP_OPCODE_FETCHOR4:
- valb |= val;
- break;
- default:
- g_assert_not_reached();
- }
-
- if (write) {
- if (quad ? put_user_u64(valb, addr) : put_user_u32(valb, addr)) {
- goto sigsegv_maperr;
- }
- }
-
- set_regval(env, env->atomic_dstr, val);
- end_exclusive();
- return;
-
- sigsegv_maperr:
- end_exclusive();
- gen_sigsegv_maperr(env, addr);
-}
-
-void cpu_loop(CPUTLGState *env)
-{
- CPUState *cs = env_cpu(env);
- int trapnr;
-
- while (1) {
- cpu_exec_start(cs);
- trapnr = cpu_exec(cs);
- cpu_exec_end(cs);
- process_queued_cpu_work(cs);
-
- switch (trapnr) {
- case TILEGX_EXCP_SYSCALL:
- {
- abi_ulong ret = do_syscall(env, env->regs[TILEGX_R_NR],
- env->regs[0], env->regs[1],
- env->regs[2], env->regs[3],
- env->regs[4], env->regs[5],
- env->regs[6], env->regs[7]);
- if (ret == -TARGET_ERESTARTSYS) {
- env->pc -= 8;
- } else if (ret != -TARGET_QEMU_ESIGRETURN) {
- env->regs[TILEGX_R_RE] = ret;
- env->regs[TILEGX_R_ERR] = TILEGX_IS_ERRNO(ret) ? -ret : 0;
- }
- break;
- }
- case TILEGX_EXCP_OPCODE_EXCH:
- do_exch(env, true, false);
- break;
- case TILEGX_EXCP_OPCODE_EXCH4:
- do_exch(env, false, false);
- break;
- case TILEGX_EXCP_OPCODE_CMPEXCH:
- do_exch(env, true, true);
- break;
- case TILEGX_EXCP_OPCODE_CMPEXCH4:
- do_exch(env, false, true);
- break;
- case TILEGX_EXCP_OPCODE_FETCHADD:
- case TILEGX_EXCP_OPCODE_FETCHADDGEZ:
- case TILEGX_EXCP_OPCODE_FETCHAND:
- case TILEGX_EXCP_OPCODE_FETCHOR:
- do_fetch(env, trapnr, true);
- break;
- case TILEGX_EXCP_OPCODE_FETCHADD4:
- case TILEGX_EXCP_OPCODE_FETCHADDGEZ4:
- case TILEGX_EXCP_OPCODE_FETCHAND4:
- case TILEGX_EXCP_OPCODE_FETCHOR4:
- do_fetch(env, trapnr, false);
- break;
- case TILEGX_EXCP_SIGNAL:
- do_signal(env, env->signo, env->sigcode);
- break;
- case TILEGX_EXCP_REG_IDN_ACCESS:
- case TILEGX_EXCP_REG_UDN_ACCESS:
- gen_sigill_reg(env);
- break;
- case EXCP_ATOMIC:
- cpu_exec_step_atomic(cs);
- break;
- default:
- fprintf(stderr, "trapnr is %d[0x%x].\n", trapnr, trapnr);
- g_assert_not_reached();
- }
- process_pending_signals(env);
- }
-}
-
-void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs)
-{
- int i;
- for (i = 0; i < TILEGX_R_COUNT; i++) {
- env->regs[i] = regs->regs[i];
- }
- for (i = 0; i < TILEGX_SPR_COUNT; i++) {
- env->spregs[i] = 0;
- }
- env->pc = regs->pc;
-}
diff --git a/linux-user/tilegx/signal.c b/linux-user/tilegx/signal.c
deleted file mode 100644
index c5a1c7161d..0000000000
--- a/linux-user/tilegx/signal.c
+++ /dev/null
@@ -1,178 +0,0 @@
-/*
- * Emulation of Linux signals
- *
- * Copyright (c) 2003 Fabrice Bellard
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-#include "qemu/osdep.h"
-#include "qemu.h"
-#include "signal-common.h"
-#include "linux-user/trace.h"
-
-struct target_sigcontext {
- union {
- /* General-purpose registers. */
- abi_ulong gregs[56];
- struct {
- abi_ulong __gregs[53];
- abi_ulong tp; /* Aliases gregs[TREG_TP]. */
- abi_ulong sp; /* Aliases gregs[TREG_SP]. */
- abi_ulong lr; /* Aliases gregs[TREG_LR]. */
- };
- };
- abi_ulong pc; /* Program counter. */
- abi_ulong ics; /* In Interrupt Critical Section? */
- abi_ulong faultnum; /* Fault number. */
- abi_ulong pad[5];
-};
-
-struct target_ucontext {
- abi_ulong tuc_flags;
- abi_ulong tuc_link;
- target_stack_t tuc_stack;
- struct target_sigcontext tuc_mcontext;
- target_sigset_t tuc_sigmask; /* mask last for extensibility */
-};
-
-struct target_rt_sigframe {
- unsigned char save_area[16]; /* caller save area */
- struct target_siginfo info;
- struct target_ucontext uc;
- abi_ulong retcode[2];
-};
-
-#define INSN_MOVELI_R10_139 0x00045fe551483000ULL /* { moveli r10, 139 } */
-#define INSN_SWINT1 0x286b180051485000ULL /* { swint1 } */
-
-
-static void setup_sigcontext(struct target_sigcontext *sc,
- CPUArchState *env, int signo)
-{
- int i;
-
- for (i = 0; i < TILEGX_R_COUNT; ++i) {
- __put_user(env->regs[i], &sc->gregs[i]);
- }
-
- __put_user(env->pc, &sc->pc);
- __put_user(0, &sc->ics);
- __put_user(signo, &sc->faultnum);
-}
-
-static void restore_sigcontext(CPUTLGState *env, struct target_sigcontext *sc)
-{
- int i;
-
- for (i = 0; i < TILEGX_R_COUNT; ++i) {
- __get_user(env->regs[i], &sc->gregs[i]);
- }
-
- __get_user(env->pc, &sc->pc);
-}
-
-static abi_ulong get_sigframe(struct target_sigaction *ka, CPUArchState *env,
- size_t frame_size)
-{
- unsigned long sp = get_sp_from_cpustate(env);
-
- if (on_sig_stack(sp) && !likely(on_sig_stack(sp - frame_size))) {
- return -1UL;
- }
-
- sp = target_sigsp(sp, ka) - frame_size;
- sp &= -16UL;
- return sp;
-}
-
-void setup_rt_frame(int sig, struct target_sigaction *ka,
- target_siginfo_t *info,
- target_sigset_t *set, CPUArchState *env)
-{
- abi_ulong frame_addr;
- struct target_rt_sigframe *frame;
- unsigned long restorer;
-
- frame_addr = get_sigframe(ka, env, sizeof(*frame));
- trace_user_setup_rt_frame(env, frame_addr);
- if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
- goto give_sigsegv;
- }
-
- /* Always write at least the signal number for the stack backtracer. */
- if (ka->sa_flags & TARGET_SA_SIGINFO) {
- /* At sigreturn time, restore the callee-save registers too. */
- tswap_siginfo(&frame->info, info);
- /* regs->flags |= PT_FLAGS_RESTORE_REGS; FIXME: we can skip it? */
- } else {
- __put_user(info->si_signo, &frame->info.si_signo);
- }
-
- /* Create the ucontext. */
- __put_user(0, &frame->uc.tuc_flags);
- __put_user(0, &frame->uc.tuc_link);
- target_save_altstack(&frame->uc.tuc_stack, env);
- setup_sigcontext(&frame->uc.tuc_mcontext, env, info->si_signo);
-
- if (ka->sa_flags & TARGET_SA_RESTORER) {
- restorer = (unsigned long) ka->sa_restorer;
- } else {
- __put_user(INSN_MOVELI_R10_139, &frame->retcode[0]);
- __put_user(INSN_SWINT1, &frame->retcode[1]);
- restorer = frame_addr + offsetof(struct target_rt_sigframe, retcode);
- }
- env->pc = (unsigned long) ka->_sa_handler;
- env->regs[TILEGX_R_SP] = (unsigned long) frame;
- env->regs[TILEGX_R_LR] = restorer;
- env->regs[0] = (unsigned long) sig;
- env->regs[1] = (unsigned long) &frame->info;
- env->regs[2] = (unsigned long) &frame->uc;
- /* regs->flags |= PT_FLAGS_CALLER_SAVES; FIXME: we can skip it? */
-
- unlock_user_struct(frame, frame_addr, 1);
- return;
-
-give_sigsegv:
- force_sigsegv(sig);
-}
-
-long do_rt_sigreturn(CPUTLGState *env)
-{
- abi_ulong frame_addr = env->regs[TILEGX_R_SP];
- struct target_rt_sigframe *frame;
- sigset_t set;
-
- trace_user_do_rt_sigreturn(env, frame_addr);
- if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
- goto badframe;
- }
- target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
- set_sigmask(&set);
-
- restore_sigcontext(env, &frame->uc.tuc_mcontext);
- if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe,
- uc.tuc_stack),
- 0, env->regs[TILEGX_R_SP]) == -EFAULT) {
- goto badframe;
- }
-
- unlock_user_struct(frame, frame_addr, 0);
- return -TARGET_QEMU_ESIGRETURN;
-
-
- badframe:
- unlock_user_struct(frame, frame_addr, 0);
- force_sig(TARGET_SIGSEGV);
- return -TARGET_QEMU_ESIGRETURN;
-}
diff --git a/linux-user/tilegx/sockbits.h b/linux-user/tilegx/sockbits.h
deleted file mode 100644
index 0e4c8f012d..0000000000
--- a/linux-user/tilegx/sockbits.h
+++ /dev/null
@@ -1 +0,0 @@
-#include "../generic/sockbits.h"
diff --git a/linux-user/tilegx/syscall_nr.h b/linux-user/tilegx/syscall_nr.h
deleted file mode 100644
index c104b94230..0000000000
--- a/linux-user/tilegx/syscall_nr.h
+++ /dev/null
@@ -1,327 +0,0 @@
-#ifndef TILEGX_SYSCALL_NR_H
-#define TILEGX_SYSCALL_NR_H
-
-/*
- * Copy from linux kernel asm-generic/unistd.h, which tilegx uses.
- */
-#define TARGET_NR_io_setup 0
-#define TARGET_NR_io_destroy 1
-#define TARGET_NR_io_submit 2
-#define TARGET_NR_io_cancel 3
-#define TARGET_NR_io_getevents 4
-#define TARGET_NR_setxattr 5
-#define TARGET_NR_lsetxattr 6
-#define TARGET_NR_fsetxattr 7
-#define TARGET_NR_getxattr 8
-#define TARGET_NR_lgetxattr 9
-#define TARGET_NR_fgetxattr 10
-#define TARGET_NR_listxattr 11
-#define TARGET_NR_llistxattr 12
-#define TARGET_NR_flistxattr 13
-#define TARGET_NR_removexattr 14
-#define TARGET_NR_lremovexattr 15
-#define TARGET_NR_fremovexattr 16
-#define TARGET_NR_getcwd 17
-#define TARGET_NR_lookup_dcookie 18
-#define TARGET_NR_eventfd2 19
-#define TARGET_NR_epoll_create1 20
-#define TARGET_NR_epoll_ctl 21
-#define TARGET_NR_epoll_pwait 22
-#define TARGET_NR_dup 23
-#define TARGET_NR_dup3 24
-#define TARGET_NR_fcntl 25
-#define TARGET_NR_inotify_init1 26
-#define TARGET_NR_inotify_add_watch 27
-#define TARGET_NR_inotify_rm_watch 28
-#define TARGET_NR_ioctl 29
-#define TARGET_NR_ioprio_set 30
-#define TARGET_NR_ioprio_get 31
-#define TARGET_NR_flock 32
-#define TARGET_NR_mknodat 33
-#define TARGET_NR_mkdirat 34
-#define TARGET_NR_unlinkat 35
-#define TARGET_NR_symlinkat 36
-#define TARGET_NR_linkat 37
-#define TARGET_NR_renameat 38
-#define TARGET_NR_umount2 39
-#define TARGET_NR_mount 40
-#define TARGET_NR_pivot_root 41
-#define TARGET_NR_nfsservctl 42
-#define TARGET_NR_statfs 43
-#define TARGET_NR_fstatfs 44
-#define TARGET_NR_truncate 45
-#define TARGET_NR_ftruncate 46
-#define TARGET_NR_fallocate 47
-#define TARGET_NR_faccessat 48
-#define TARGET_NR_chdir 49
-#define TARGET_NR_fchdir 50
-#define TARGET_NR_chroot 51
-#define TARGET_NR_fchmod 52
-#define TARGET_NR_fchmodat 53
-#define TARGET_NR_fchownat 54
-#define TARGET_NR_fchown 55
-#define TARGET_NR_openat 56
-#define TARGET_NR_close 57
-#define TARGET_NR_vhangup 58
-#define TARGET_NR_pipe2 59
-#define TARGET_NR_quotactl 60
-#define TARGET_NR_getdents64 61
-#define TARGET_NR_lseek 62
-#define TARGET_NR_read 63
-#define TARGET_NR_write 64
-#define TARGET_NR_readv 65
-#define TARGET_NR_writev 66
-#define TARGET_NR_pread64 67
-#define TARGET_NR_pwrite64 68
-#define TARGET_NR_preadv 69
-#define TARGET_NR_pwritev 70
-#define TARGET_NR_sendfile 71
-#define TARGET_NR_pselect6 72
-#define TARGET_NR_ppoll 73
-#define TARGET_NR_signalfd4 74
-#define TARGET_NR_vmsplice 75
-#define TARGET_NR_splice 76
-#define TARGET_NR_tee 77
-#define TARGET_NR_readlinkat 78
-#define TARGET_NR_fstatat64 79 /* let syscall.c known */
-#define TARGET_NR_fstat 80
-#define TARGET_NR_sync 81
-#define TARGET_NR_fsync 82
-#define TARGET_NR_fdatasync 83
-#define TARGET_NR_sync_file_range 84 /* For tilegx, no range2 */
-#define TARGET_NR_timerfd_create 85
-#define TARGET_NR_timerfd_settime 86
-#define TARGET_NR_timerfd_gettime 87
-#define TARGET_NR_utimensat 88
-#define TARGET_NR_acct 89
-#define TARGET_NR_capget 90
-#define TARGET_NR_capset 91
-#define TARGET_NR_personality 92
-#define TARGET_NR_exit 93
-#define TARGET_NR_exit_group 94
-#define TARGET_NR_waitid 95
-#define TARGET_NR_set_tid_address 96
-#define TARGET_NR_unshare 97
-#define TARGET_NR_futex 98
-#define TARGET_NR_set_robust_list 99
-#define TARGET_NR_get_robust_list 100
-#define TARGET_NR_nanosleep 101
-#define TARGET_NR_getitimer 102
-#define TARGET_NR_setitimer 103
-#define TARGET_NR_kexec_load 104
-#define TARGET_NR_init_module 105
-#define TARGET_NR_delete_module 106
-#define TARGET_NR_timer_create 107
-#define TARGET_NR_timer_gettime 108
-#define TARGET_NR_timer_getoverrun 109
-#define TARGET_NR_timer_settime 110
-#define TARGET_NR_timer_delete 111
-#define TARGET_NR_clock_settime 112
-#define TARGET_NR_clock_gettime 113
-#define TARGET_NR_clock_getres 114
-#define TARGET_NR_clock_nanosleep 115
-#define TARGET_NR_syslog 116
-#define TARGET_NR_ptrace 117
-#define TARGET_NR_sched_setparam 118
-#define TARGET_NR_sched_setscheduler 119
-#define TARGET_NR_sched_getscheduler 120
-#define TARGET_NR_sched_getparam 121
-#define TARGET_NR_sched_setaffinity 122
-#define TARGET_NR_sched_getaffinity 123
-#define TARGET_NR_sched_yield 124
-#define TARGET_NR_sched_get_priority_max 125
-#define TARGET_NR_sched_get_priority_min 126
-#define TARGET_NR_sched_rr_get_interval 127
-#define TARGET_NR_restart_syscall 128
-#define TARGET_NR_kill 129
-#define TARGET_NR_tkill 130
-#define TARGET_NR_tgkill 131
-#define TARGET_NR_sigaltstack 132
-#define TARGET_NR_rt_sigsuspend 133
-#define TARGET_NR_rt_sigaction 134
-#define TARGET_NR_rt_sigprocmask 135
-#define TARGET_NR_rt_sigpending 136
-#define TARGET_NR_rt_sigtimedwait 137
-#define TARGET_NR_rt_sigqueueinfo 138
-#define TARGET_NR_rt_sigreturn 139
-#define TARGET_NR_setpriority 140
-#define TARGET_NR_getpriority 141
-#define TARGET_NR_reboot 142
-#define TARGET_NR_setregid 143
-#define TARGET_NR_setgid 144
-#define TARGET_NR_setreuid 145
-#define TARGET_NR_setuid 146
-#define TARGET_NR_setresuid 147
-#define TARGET_NR_getresuid 148
-#define TARGET_NR_setresgid 149
-#define TARGET_NR_getresgid 150
-#define TARGET_NR_setfsuid 151
-#define TARGET_NR_setfsgid 152
-#define TARGET_NR_times 153
-#define TARGET_NR_setpgid 154
-#define TARGET_NR_getpgid 155
-#define TARGET_NR_getsid 156
-#define TARGET_NR_setsid 157
-#define TARGET_NR_getgroups 158
-#define TARGET_NR_setgroups 159
-#define TARGET_NR_uname 160
-#define TARGET_NR_sethostname 161
-#define TARGET_NR_setdomainname 162
-#define TARGET_NR_getrlimit 163
-#define TARGET_NR_setrlimit 164
-#define TARGET_NR_getrusage 165
-#define TARGET_NR_umask 166
-#define TARGET_NR_prctl 167
-#define TARGET_NR_getcpu 168
-#define TARGET_NR_gettimeofday 169
-#define TARGET_NR_settimeofday 170
-#define TARGET_NR_adjtimex 171
-#define TARGET_NR_getpid 172
-#define TARGET_NR_getppid 173
-#define TARGET_NR_getuid 174
-#define TARGET_NR_geteuid 175
-#define TARGET_NR_getgid 176
-#define TARGET_NR_getegid 177
-#define TARGET_NR_gettid 178
-#define TARGET_NR_sysinfo 179
-#define TARGET_NR_mq_open 180
-#define TARGET_NR_mq_unlink 181
-#define TARGET_NR_mq_timedsend 182
-#define TARGET_NR_mq_timedreceive 183
-#define TARGET_NR_mq_notify 184
-#define TARGET_NR_mq_getsetattr 185
-#define TARGET_NR_msgget 186
-#define TARGET_NR_msgctl 187
-#define TARGET_NR_msgrcv 188
-#define TARGET_NR_msgsnd 189
-#define TARGET_NR_semget 190
-#define TARGET_NR_semctl 191
-#define TARGET_NR_semtimedop 192
-#define TARGET_NR_semop 193
-#define TARGET_NR_shmget 194
-#define TARGET_NR_shmctl 195
-#define TARGET_NR_shmat 196
-#define TARGET_NR_shmdt 197
-#define TARGET_NR_socket 198
-#define TARGET_NR_socketpair 199
-#define TARGET_NR_bind 200
-#define TARGET_NR_listen 201
-#define TARGET_NR_accept 202
-#define TARGET_NR_connect 203
-#define TARGET_NR_getsockname 204
-#define TARGET_NR_getpeername 205
-#define TARGET_NR_sendto 206
-#define TARGET_NR_recvfrom 207
-#define TARGET_NR_setsockopt 208
-#define TARGET_NR_getsockopt 209
-#define TARGET_NR_shutdown 210
-#define TARGET_NR_sendmsg 211
-#define TARGET_NR_recvmsg 212
-#define TARGET_NR_readahead 213
-#define TARGET_NR_brk 214
-#define TARGET_NR_munmap 215
-#define TARGET_NR_mremap 216
-#define TARGET_NR_add_key 217
-#define TARGET_NR_request_key 218
-#define TARGET_NR_keyctl 219
-#define TARGET_NR_clone 220
-#define TARGET_NR_execve 221
-#define TARGET_NR_mmap 222
-#define TARGET_NR_fadvise64 223
-#define TARGET_NR_swapon 224
-#define TARGET_NR_swapoff 225
-#define TARGET_NR_mprotect 226
-#define TARGET_NR_msync 227
-#define TARGET_NR_mlock 228
-#define TARGET_NR_munlock 229
-#define TARGET_NR_mlockall 230
-#define TARGET_NR_munlockall 231
-#define TARGET_NR_mincore 232
-#define TARGET_NR_madvise 233
-#define TARGET_NR_remap_file_pages 234
-#define TARGET_NR_mbind 235
-#define TARGET_NR_get_mempolicy 236
-#define TARGET_NR_set_mempolicy 237
-#define TARGET_NR_migrate_pages 238
-#define TARGET_NR_move_pages 239
-#define TARGET_NR_rt_tgsigqueueinfo 240
-#define TARGET_NR_perf_event_open 241
-#define TARGET_NR_accept4 242
-#define TARGET_NR_recvmmsg 243
-
-#define TARGET_NR_arch_specific_syscall 244
-#define TARGET_NR_cacheflush 245 /* tilegx own syscall */
-
-#define TARGET_NR_wait4 260
-#define TARGET_NR_prlimit64 261
-#define TARGET_NR_fanotify_init 262
-#define TARGET_NR_fanotify_mark 263
-#define TARGET_NR_name_to_handle_at 264
-#define TARGET_NR_open_by_handle_at 265
-#define TARGET_NR_clock_adjtime 266
-#define TARGET_NR_syncfs 267
-#define TARGET_NR_setns 268
-#define TARGET_NR_sendmmsg 269
-#define TARGET_NR_process_vm_readv 270
-#define TARGET_NR_process_vm_writev 271
-#define TARGET_NR_kcmp 272
-#define TARGET_NR_finit_module 273
-#define TARGET_NR_sched_setattr 274
-#define TARGET_NR_sched_getattr 275
-#define TARGET_NR_renameat2 276
-#define TARGET_NR_seccomp 277
-#define TARGET_NR_getrandom 278
-#define TARGET_NR_memfd_create 279
-#define TARGET_NR_bpf 280
-#define TARGET_NR_execveat 281
-#define TARGET_NR_userfaultfd 282
-#define TARGET_NR_membarrier 283
-#define TARGET_NR_mlock2 284
-#define TARGET_NR_copy_file_range 285
-
-#define TARGET_NR_open 1024
-#define TARGET_NR_link 1025
-#define TARGET_NR_unlink 1026
-#define TARGET_NR_mknod 1027
-#define TARGET_NR_chmod 1028
-#define TARGET_NR_chown 1029
-#define TARGET_NR_mkdir 1030
-#define TARGET_NR_rmdir 1031
-#define TARGET_NR_lchown 1032
-#define TARGET_NR_access 1033
-#define TARGET_NR_rename 1034
-#define TARGET_NR_readlink 1035
-#define TARGET_NR_symlink 1036
-#define TARGET_NR_utimes 1037
-#define TARGET_NR_stat64 1038 /* let syscall.c known */
-#define TARGET_NR_lstat 1039
-
-#define TARGET_NR_pipe 1040
-#define TARGET_NR_dup2 1041
-#define TARGET_NR_epoll_create 1042
-#define TARGET_NR_inotify_init 1043
-#define TARGET_NR_eventfd 1044
-#define TARGET_NR_signalfd 1045
-
-#define TARGET_NR_alarm 1059
-#define TARGET_NR_getpgrp 1060
-#define TARGET_NR_pause 1061
-#define TARGET_NR_time 1062
-#define TARGET_NR_utime 1063
-#define TARGET_NR_creat 1064
-#define TARGET_NR_getdents 1065
-#define TARGET_NR_futimesat 1066
-#define TARGET_NR_poll 1068
-#define TARGET_NR_epoll_wait 1069
-#define TARGET_NR_ustat 1070
-#define TARGET_NR_vfork 1071
-#define TARGET_NR_oldwait4 1072
-#define TARGET_NR_recv 1073
-#define TARGET_NR_send 1074
-#define TARGET_NR_bdflush 1075
-#define TARGET_NR_umount 1076
-#define TARGET_NR_uselib 1077
-#define TARGET_NR__sysctl 1078
-#define TARGET_NR_fork 1079
-
-#endif
diff --git a/linux-user/tilegx/target_cpu.h b/linux-user/tilegx/target_cpu.h
deleted file mode 100644
index 5fa9e2a9a4..0000000000
--- a/linux-user/tilegx/target_cpu.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * TILE-Gx specific CPU ABI and functions for linux-user
- *
- * Copyright (c) 2015 Chen Gang
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>.
- */
-#ifndef TILEGX_TARGET_CPU_H
-#define TILEGX_TARGET_CPU_H
-
-static inline void cpu_clone_regs_child(CPUTLGState *env, target_ulong newsp,
- unsigned flags)
-{
- if (newsp) {
- env->regs[TILEGX_R_SP] = newsp;
- }
- env->regs[TILEGX_R_RE] = 0;
-}
-
-static inline void cpu_clone_regs_parent(CPUTLGState *env, unsigned flags)
-{
-}
-
-static inline void cpu_set_tls(CPUTLGState *env, target_ulong newtls)
-{
- env->regs[TILEGX_R_TP] = newtls;
-}
-
-static inline abi_ulong get_sp_from_cpustate(CPUTLGState *state)
-{
- return state->regs[TILEGX_R_SP];
-}
-#endif
diff --git a/linux-user/tilegx/target_elf.h b/linux-user/tilegx/target_elf.h
deleted file mode 100644
index 7197bb0005..0000000000
--- a/linux-user/tilegx/target_elf.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation, or (at your option) any
- * later version. See the COPYING file in the top-level directory.
- */
-
-#ifndef TILEGX_TARGET_ELF_H
-#define TILEGX_TARGET_ELF_H
-static inline const char *cpu_get_model(uint32_t eflags)
-{
- return "any";
-}
-#endif
diff --git a/linux-user/tilegx/target_fcntl.h b/linux-user/tilegx/target_fcntl.h
deleted file mode 100644
index 5ed7438459..0000000000
--- a/linux-user/tilegx/target_fcntl.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation, or (at your option) any
- * later version. See the COPYING file in the top-level directory.
- */
-
-#ifndef TILEGX_TARGET_FCNTL_H
-#define TILEGX_TARGET_FCNTL_H
-#include "../generic/fcntl.h"
-#endif
diff --git a/linux-user/tilegx/target_signal.h b/linux-user/tilegx/target_signal.h
deleted file mode 100644
index 655be13009..0000000000
--- a/linux-user/tilegx/target_signal.h
+++ /dev/null
@@ -1,23 +0,0 @@
-#ifndef TILEGX_TARGET_SIGNAL_H
-#define TILEGX_TARGET_SIGNAL_H
-
-/* this struct defines a stack used during syscall handling */
-
-typedef struct target_sigaltstack {
- abi_ulong ss_sp;
- abi_int ss_flags;
- abi_ulong ss_size;
-} target_stack_t;
-
-/*
- * sigaltstack controls
- */
-#define TARGET_SS_ONSTACK 1
-#define TARGET_SS_DISABLE 2
-
-#define TARGET_MINSIGSTKSZ 2048
-#define TARGET_SIGSTKSZ 8192
-
-#include "../generic/signal.h"
-
-#endif /* TILEGX_TARGET_SIGNAL_H */
diff --git a/linux-user/tilegx/target_structs.h b/linux-user/tilegx/target_structs.h
deleted file mode 100644
index 1df000cc96..0000000000
--- a/linux-user/tilegx/target_structs.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * TILE-Gx specific structures for linux-user
- *
- * Copyright (c) 2015 Chen Gang
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>.
- */
-#ifndef TILEGX_TARGET_STRUCTS_H
-#define TILEGX_TARGET_STRUCTS_H
-
-struct target_ipc_perm {
- abi_int __key; /* Key. */
- abi_uint uid; /* Owner's user ID. */
- abi_uint gid; /* Owner's group ID. */
- abi_uint cuid; /* Creator's user ID. */
- abi_uint cgid; /* Creator's group ID. */
- abi_uint mode; /* Read/write permission. */
- abi_ushort __seq; /* Sequence number. */
-};
-
-struct target_shmid_ds {
- struct target_ipc_perm shm_perm; /* operation permission struct */
- abi_long shm_segsz; /* size of segment in bytes */
- abi_ulong shm_atime; /* time of last shmat() */
- abi_ulong shm_dtime; /* time of last shmdt() */
- abi_ulong shm_ctime; /* time of last change by shmctl() */
- abi_int shm_cpid; /* pid of creator */
- abi_int shm_lpid; /* pid of last shmop */
- abi_ushort shm_nattch; /* number of current attaches */
- abi_ushort shm_unused; /* compatibility */
- abi_ulong __unused4;
- abi_ulong __unused5;
-};
-
-#endif
diff --git a/linux-user/tilegx/target_syscall.h b/linux-user/tilegx/target_syscall.h
deleted file mode 100644
index 8e9db734b8..0000000000
--- a/linux-user/tilegx/target_syscall.h
+++ /dev/null
@@ -1,44 +0,0 @@
-#ifndef TILEGX_TARGET_SYSCALL_H
-#define TILEGX_TARGET_SYSCALL_H
-
-#define UNAME_MACHINE "tilegx"
-#define UNAME_MINIMUM_RELEASE "3.19"
-
-#define MMAP_SHIFT TARGET_PAGE_BITS
-
-#define TILEGX_IS_ERRNO(ret) \
- ((ret) > 0xfffffffffffff000ULL) /* errno is 0 -- 4096 */
-
-typedef uint64_t tilegx_reg_t;
-
-struct target_pt_regs {
-
- union {
- /* Saved main processor registers; 56..63 are special. */
- tilegx_reg_t regs[56];
- struct {
- tilegx_reg_t __regs[53];
- tilegx_reg_t tp; /* aliases regs[TREG_TP] */
- tilegx_reg_t sp; /* aliases regs[TREG_SP] */
- tilegx_reg_t lr; /* aliases regs[TREG_LR] */
- };
- };
-
- /* Saved special registers. */
- tilegx_reg_t pc; /* stored in EX_CONTEXT_K_0 */
- tilegx_reg_t ex1; /* stored in EX_CONTEXT_K_1 (PL and ICS bit) */
- tilegx_reg_t faultnum; /* fault number (INT_SWINT_1 for syscall) */
- tilegx_reg_t orig_r0; /* r0 at syscall entry, else zero */
- tilegx_reg_t flags; /* flags (see below) */
- tilegx_reg_t cmpexch; /* value of CMPEXCH_VALUE SPR at interrupt */
- tilegx_reg_t pad[2];
-};
-
-#define TARGET_MCL_CURRENT 1
-#define TARGET_MCL_FUTURE 2
-#define TARGET_MCL_ONFAULT 4
-
-/* For faultnum */
-#define TARGET_INT_SWINT_1 14
-
-#endif
diff --git a/linux-user/tilegx/termbits.h b/linux-user/tilegx/termbits.h
deleted file mode 100644
index b1d4f4fedb..0000000000
--- a/linux-user/tilegx/termbits.h
+++ /dev/null
@@ -1 +0,0 @@
-#include "../generic/termbits.h"
diff --git a/net/net.c b/net/net.c
index fb7b7dcc25..6002ba50db 100644
--- a/net/net.c
+++ b/net/net.c
@@ -50,7 +50,6 @@
#include "qapi/error.h"
#include "qapi/opts-visitor.h"
#include "sysemu/sysemu.h"
-#include "sysemu/qtest.h"
#include "sysemu/runstate.h"
#include "sysemu/sysemu.h"
#include "net/filter.h"
diff --git a/scripts/ci/gitlab-pipeline-status b/scripts/ci/gitlab-pipeline-status
index 78e72f6008..924db327ff 100755
--- a/scripts/ci/gitlab-pipeline-status
+++ b/scripts/ci/gitlab-pipeline-status
@@ -48,24 +48,35 @@ def get_local_branch_commit(branch):
return result
-def get_pipeline_status(project_id, commit_sha1):
+def get_json_http_response(url):
"""
- Returns the JSON content of the pipeline status API response
+ Returns the JSON content of an HTTP GET request to gitlab.com
"""
- url = '/api/v4/projects/{}/pipelines?sha={}'.format(project_id,
- commit_sha1)
connection = http.client.HTTPSConnection('gitlab.com')
connection.request('GET', url=url)
response = connection.getresponse()
if response.code != http.HTTPStatus.OK:
- raise CommunicationFailure("Failed to receive a successful response")
- json_response = json.loads(response.read())
+ msg = "Received unsuccessful response: %s (%s)" % (response.code,
+ response.reason)
+ raise CommunicationFailure(msg)
+ return json.loads(response.read())
+
+
+def get_pipeline_status(project_id, commit_sha1):
+ """
+ Returns the JSON content of the pipeline status API response
+ """
+ url = '/api/v4/projects/{}/pipelines?sha={}'.format(project_id,
+ commit_sha1)
+ json_response = get_json_http_response(url)
# As far as I can tell, there should be only one pipeline for the same
# project + commit. If this assumption is false, we can add further
# filters to the url, such as username, and order_by.
if not json_response:
- raise NoPipelineFound("No pipeline found")
+ msg = "No pipeline found for project %s and commit %s" % (project_id,
+ commit_sha1)
+ raise NoPipelineFound(msg)
return json_response[0]
diff --git a/scripts/mtest2make.py b/scripts/mtest2make.py
index cbbcba100d..ee072c0502 100644
--- a/scripts/mtest2make.py
+++ b/scripts/mtest2make.py
@@ -21,7 +21,7 @@ print('''
SPEED = quick
# $1 = environment, $2 = test command, $3 = test name, $4 = dir
-.test-human-tap = $1 $(if $4,(cd $4 && $2),$2) < /dev/null | ./scripts/tap-driver.pl --test-name="$3" $(if $(V),,--show-failures-only)
+.test-human-tap = $1 $(if $4,(cd $4 && $2),$2) -m $(SPEED) < /dev/null | ./scripts/tap-driver.pl --test-name="$3" $(if $(V),,--show-failures-only)
.test-human-exitcode = $1 $(PYTHON) scripts/test-driver.py $(if $4,-C$4) $(if $(V),--verbose) -- $2 < /dev/null
.test-tap-tap = $1 $(if $4,(cd $4 && $2),$2) < /dev/null | sed "s/^[a-z][a-z]* [0-9]*/& $3/" || true
.test-tap-exitcode = printf "%s\\n" 1..1 "`$1 $(if $4,(cd $4 && $2),$2) < /dev/null > /dev/null || echo "not "`ok 1 $3"
diff --git a/softmmu/cpu-timers.c b/softmmu/cpu-timers.c
index 1eb7c675c1..cd38595245 100644
--- a/softmmu/cpu-timers.c
+++ b/softmmu/cpu-timers.c
@@ -30,7 +30,6 @@
#include "qemu/error-report.h"
#include "exec/exec-all.h"
#include "sysemu/cpus.h"
-#include "sysemu/qtest.h"
#include "qemu/main-loop.h"
#include "qemu/option.h"
#include "qemu/seqlock.h"
diff --git a/softmmu/physmem.c b/softmmu/physmem.c
index 19e0aa9836..7e8b0fab89 100644
--- a/softmmu/physmem.c
+++ b/softmmu/physmem.c
@@ -937,7 +937,7 @@ void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
cpu_loop_exit_restore(cpu, ra);
} else {
/* Force execution of one insn next time. */
- cpu->cflags_next_tb = 1 | curr_cflags();
+ cpu->cflags_next_tb = 1 | curr_cflags(cpu);
mmap_unlock();
if (ra) {
cpu_restore_state(cpu, ra, true);
diff --git a/storage-daemon/qemu-storage-daemon.c b/storage-daemon/qemu-storage-daemon.c
index 9021a46b3a..23756fc8e5 100644
--- a/storage-daemon/qemu-storage-daemon.c
+++ b/storage-daemon/qemu-storage-daemon.c
@@ -59,6 +59,7 @@
#include "sysemu/runstate.h"
#include "trace/control.h"
+static const char *pid_file;
static volatile bool exit_requested = false;
void qemu_system_killed(int signal, pid_t pid)
@@ -115,6 +116,8 @@ static void help(void)
" See the qemu(1) man page for documentation of the\n"
" objects that can be added.\n"
"\n"
+" --pidfile <path> write process ID to a file after startup\n"
+"\n"
QEMU_HELP_BOTTOM "\n",
error_get_progname());
}
@@ -126,6 +129,7 @@ enum {
OPTION_MONITOR,
OPTION_NBD_SERVER,
OPTION_OBJECT,
+ OPTION_PIDFILE,
};
extern QemuOptsList qemu_chardev_opts;
@@ -152,6 +156,20 @@ static void init_qmp_commands(void)
qmp_marshal_qmp_capabilities, QCO_ALLOW_PRECONFIG);
}
+static int getopt_set_loc(int argc, char **argv, const char *optstring,
+ const struct option *longopts)
+{
+ int c, save_index;
+
+ optarg = NULL;
+ save_index = optind;
+ c = getopt_long(argc, argv, optstring, longopts, NULL);
+ if (optarg) {
+ loc_set_cmdline(argv, save_index, MAX(1, optind - save_index));
+ }
+ return c;
+}
+
static void process_options(int argc, char *argv[])
{
int c;
@@ -164,6 +182,7 @@ static void process_options(int argc, char *argv[])
{"monitor", required_argument, NULL, OPTION_MONITOR},
{"nbd-server", required_argument, NULL, OPTION_NBD_SERVER},
{"object", required_argument, NULL, OPTION_OBJECT},
+ {"pidfile", required_argument, NULL, OPTION_PIDFILE},
{"trace", required_argument, NULL, 'T'},
{"version", no_argument, NULL, 'V'},
{0, 0, 0, 0}
@@ -174,7 +193,7 @@ static void process_options(int argc, char *argv[])
* they are given on the command lines. This means that things must be
* defined first before they can be referenced in another option.
*/
- while ((c = getopt_long(argc, argv, "hT:V", long_options, NULL)) != -1) {
+ while ((c = getopt_set_loc(argc, argv, "-hT:V", long_options)) != -1) {
switch (c) {
case '?':
exit(EXIT_FAILURE);
@@ -275,14 +294,38 @@ static void process_options(int argc, char *argv[])
qobject_unref(args);
break;
}
+ case OPTION_PIDFILE:
+ pid_file = optarg;
+ break;
+ case 1:
+ error_report("Unexpected argument");
+ exit(EXIT_FAILURE);
default:
g_assert_not_reached();
}
}
- if (optind != argc) {
- error_report("Unexpected argument: %s", argv[optind]);
+ loc_set_none();
+}
+
+static void pid_file_cleanup(void)
+{
+ unlink(pid_file);
+}
+
+static void pid_file_init(void)
+{
+ Error *err = NULL;
+
+ if (!pid_file) {
+ return;
+ }
+
+ if (!qemu_write_pidfile(pid_file, &err)) {
+ error_reportf_err(err, "cannot create PID file: ");
exit(EXIT_FAILURE);
}
+
+ atexit(pid_file_cleanup);
}
int main(int argc, char *argv[])
@@ -312,6 +355,13 @@ int main(int argc, char *argv[])
qemu_init_main_loop(&error_fatal);
process_options(argc, argv);
+ /*
+ * Write the pid file after creating chardevs, exports, and NBD servers but
+ * before accepting connections. This ordering is documented. Do not change
+ * it.
+ */
+ pid_file_init();
+
while (!exit_requested) {
main_loop_wait(false);
}
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
index 6facb66f4d..ae04884408 100644
--- a/target/arm/cpu.c
+++ b/target/arm/cpu.c
@@ -1922,331 +1922,6 @@ static ObjectClass *arm_cpu_class_by_name(const char *cpu_model)
return oc;
}
-/* CPU models. These are not needed for the AArch64 linux-user build. */
-#if !defined(CONFIG_USER_ONLY) || !defined(TARGET_AARCH64)
-
-static const ARMCPRegInfo cortexa8_cp_reginfo[] = {
- { .name = "L2LOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 1, .opc2 = 0,
- .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
- { .name = "L2AUXCR", .cp = 15, .crn = 9, .crm = 0, .opc1 = 1, .opc2 = 2,
- .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
- REGINFO_SENTINEL
-};
-
-static void cortex_a8_initfn(Object *obj)
-{
- ARMCPU *cpu = ARM_CPU(obj);
-
- cpu->dtb_compatible = "arm,cortex-a8";
- set_feature(&cpu->env, ARM_FEATURE_V7);
- set_feature(&cpu->env, ARM_FEATURE_NEON);
- set_feature(&cpu->env, ARM_FEATURE_THUMB2EE);
- set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
- set_feature(&cpu->env, ARM_FEATURE_EL3);
- cpu->midr = 0x410fc080;
- cpu->reset_fpsid = 0x410330c0;
- cpu->isar.mvfr0 = 0x11110222;
- cpu->isar.mvfr1 = 0x00011111;
- cpu->ctr = 0x82048004;
- cpu->reset_sctlr = 0x00c50078;
- cpu->isar.id_pfr0 = 0x1031;
- cpu->isar.id_pfr1 = 0x11;
- cpu->isar.id_dfr0 = 0x400;
- cpu->id_afr0 = 0;
- cpu->isar.id_mmfr0 = 0x31100003;
- cpu->isar.id_mmfr1 = 0x20000000;
- cpu->isar.id_mmfr2 = 0x01202000;
- cpu->isar.id_mmfr3 = 0x11;
- cpu->isar.id_isar0 = 0x00101111;
- cpu->isar.id_isar1 = 0x12112111;
- cpu->isar.id_isar2 = 0x21232031;
- cpu->isar.id_isar3 = 0x11112131;
- cpu->isar.id_isar4 = 0x00111142;
- cpu->isar.dbgdidr = 0x15141000;
- cpu->clidr = (1 << 27) | (2 << 24) | 3;
- cpu->ccsidr[0] = 0xe007e01a; /* 16k L1 dcache. */
- cpu->ccsidr[1] = 0x2007e01a; /* 16k L1 icache. */
- cpu->ccsidr[2] = 0xf0000000; /* No L2 icache. */
- cpu->reset_auxcr = 2;
- define_arm_cp_regs(cpu, cortexa8_cp_reginfo);
-}
-
-static const ARMCPRegInfo cortexa9_cp_reginfo[] = {
- /*
- * power_control should be set to maximum latency. Again,
- * default to 0 and set by private hook
- */
- { .name = "A9_PWRCTL", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 0,
- .access = PL1_RW, .resetvalue = 0,
- .fieldoffset = offsetof(CPUARMState, cp15.c15_power_control) },
- { .name = "A9_DIAG", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 1,
- .access = PL1_RW, .resetvalue = 0,
- .fieldoffset = offsetof(CPUARMState, cp15.c15_diagnostic) },
- { .name = "A9_PWRDIAG", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 2,
- .access = PL1_RW, .resetvalue = 0,
- .fieldoffset = offsetof(CPUARMState, cp15.c15_power_diagnostic) },
- { .name = "NEONBUSY", .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0,
- .access = PL1_RW, .resetvalue = 0, .type = ARM_CP_CONST },
- /* TLB lockdown control */
- { .name = "TLB_LOCKR", .cp = 15, .crn = 15, .crm = 4, .opc1 = 5, .opc2 = 2,
- .access = PL1_W, .resetvalue = 0, .type = ARM_CP_NOP },
- { .name = "TLB_LOCKW", .cp = 15, .crn = 15, .crm = 4, .opc1 = 5, .opc2 = 4,
- .access = PL1_W, .resetvalue = 0, .type = ARM_CP_NOP },
- { .name = "TLB_VA", .cp = 15, .crn = 15, .crm = 5, .opc1 = 5, .opc2 = 2,
- .access = PL1_RW, .resetvalue = 0, .type = ARM_CP_CONST },
- { .name = "TLB_PA", .cp = 15, .crn = 15, .crm = 6, .opc1 = 5, .opc2 = 2,
- .access = PL1_RW, .resetvalue = 0, .type = ARM_CP_CONST },
- { .name = "TLB_ATTR", .cp = 15, .crn = 15, .crm = 7, .opc1 = 5, .opc2 = 2,
- .access = PL1_RW, .resetvalue = 0, .type = ARM_CP_CONST },
- REGINFO_SENTINEL
-};
-
-static void cortex_a9_initfn(Object *obj)
-{
- ARMCPU *cpu = ARM_CPU(obj);
-
- cpu->dtb_compatible = "arm,cortex-a9";
- set_feature(&cpu->env, ARM_FEATURE_V7);
- set_feature(&cpu->env, ARM_FEATURE_NEON);
- set_feature(&cpu->env, ARM_FEATURE_THUMB2EE);
- set_feature(&cpu->env, ARM_FEATURE_EL3);
- /*
- * Note that A9 supports the MP extensions even for
- * A9UP and single-core A9MP (which are both different
- * and valid configurations; we don't model A9UP).
- */
- set_feature(&cpu->env, ARM_FEATURE_V7MP);
- set_feature(&cpu->env, ARM_FEATURE_CBAR);
- cpu->midr = 0x410fc090;
- cpu->reset_fpsid = 0x41033090;
- cpu->isar.mvfr0 = 0x11110222;
- cpu->isar.mvfr1 = 0x01111111;
- cpu->ctr = 0x80038003;
- cpu->reset_sctlr = 0x00c50078;
- cpu->isar.id_pfr0 = 0x1031;
- cpu->isar.id_pfr1 = 0x11;
- cpu->isar.id_dfr0 = 0x000;
- cpu->id_afr0 = 0;
- cpu->isar.id_mmfr0 = 0x00100103;
- cpu->isar.id_mmfr1 = 0x20000000;
- cpu->isar.id_mmfr2 = 0x01230000;
- cpu->isar.id_mmfr3 = 0x00002111;
- cpu->isar.id_isar0 = 0x00101111;
- cpu->isar.id_isar1 = 0x13112111;
- cpu->isar.id_isar2 = 0x21232041;
- cpu->isar.id_isar3 = 0x11112131;
- cpu->isar.id_isar4 = 0x00111142;
- cpu->isar.dbgdidr = 0x35141000;
- cpu->clidr = (1 << 27) | (1 << 24) | 3;
- cpu->ccsidr[0] = 0xe00fe019; /* 16k L1 dcache. */
- cpu->ccsidr[1] = 0x200fe019; /* 16k L1 icache. */
- define_arm_cp_regs(cpu, cortexa9_cp_reginfo);
-}
-
-#ifndef CONFIG_USER_ONLY
-static uint64_t a15_l2ctlr_read(CPUARMState *env, const ARMCPRegInfo *ri)
-{
- MachineState *ms = MACHINE(qdev_get_machine());
-
- /*
- * Linux wants the number of processors from here.
- * Might as well set the interrupt-controller bit too.
- */
- return ((ms->smp.cpus - 1) << 24) | (1 << 23);
-}
-#endif
-
-static const ARMCPRegInfo cortexa15_cp_reginfo[] = {
-#ifndef CONFIG_USER_ONLY
- { .name = "L2CTLR", .cp = 15, .crn = 9, .crm = 0, .opc1 = 1, .opc2 = 2,
- .access = PL1_RW, .resetvalue = 0, .readfn = a15_l2ctlr_read,
- .writefn = arm_cp_write_ignore, },
-#endif
- { .name = "L2ECTLR", .cp = 15, .crn = 9, .crm = 0, .opc1 = 1, .opc2 = 3,
- .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
- REGINFO_SENTINEL
-};
-
-static void cortex_a7_initfn(Object *obj)
-{
- ARMCPU *cpu = ARM_CPU(obj);
-
- cpu->dtb_compatible = "arm,cortex-a7";
- set_feature(&cpu->env, ARM_FEATURE_V7VE);
- set_feature(&cpu->env, ARM_FEATURE_NEON);
- set_feature(&cpu->env, ARM_FEATURE_THUMB2EE);
- set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
- set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
- set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
- set_feature(&cpu->env, ARM_FEATURE_EL2);
- set_feature(&cpu->env, ARM_FEATURE_EL3);
- set_feature(&cpu->env, ARM_FEATURE_PMU);
- cpu->kvm_target = QEMU_KVM_ARM_TARGET_CORTEX_A7;
- cpu->midr = 0x410fc075;
- cpu->reset_fpsid = 0x41023075;
- cpu->isar.mvfr0 = 0x10110222;
- cpu->isar.mvfr1 = 0x11111111;
- cpu->ctr = 0x84448003;
- cpu->reset_sctlr = 0x00c50078;
- cpu->isar.id_pfr0 = 0x00001131;
- cpu->isar.id_pfr1 = 0x00011011;
- cpu->isar.id_dfr0 = 0x02010555;
- cpu->id_afr0 = 0x00000000;
- cpu->isar.id_mmfr0 = 0x10101105;
- cpu->isar.id_mmfr1 = 0x40000000;
- cpu->isar.id_mmfr2 = 0x01240000;
- cpu->isar.id_mmfr3 = 0x02102211;
- /*
- * a7_mpcore_r0p5_trm, page 4-4 gives 0x01101110; but
- * table 4-41 gives 0x02101110, which includes the arm div insns.
- */
- cpu->isar.id_isar0 = 0x02101110;
- cpu->isar.id_isar1 = 0x13112111;
- cpu->isar.id_isar2 = 0x21232041;
- cpu->isar.id_isar3 = 0x11112131;
- cpu->isar.id_isar4 = 0x10011142;
- cpu->isar.dbgdidr = 0x3515f005;
- cpu->clidr = 0x0a200023;
- cpu->ccsidr[0] = 0x701fe00a; /* 32K L1 dcache */
- cpu->ccsidr[1] = 0x201fe00a; /* 32K L1 icache */
- cpu->ccsidr[2] = 0x711fe07a; /* 4096K L2 unified cache */
- define_arm_cp_regs(cpu, cortexa15_cp_reginfo); /* Same as A15 */
-}
-
-static void cortex_a15_initfn(Object *obj)
-{
- ARMCPU *cpu = ARM_CPU(obj);
-
- cpu->dtb_compatible = "arm,cortex-a15";
- set_feature(&cpu->env, ARM_FEATURE_V7VE);
- set_feature(&cpu->env, ARM_FEATURE_NEON);
- set_feature(&cpu->env, ARM_FEATURE_THUMB2EE);
- set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
- set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
- set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
- set_feature(&cpu->env, ARM_FEATURE_EL2);
- set_feature(&cpu->env, ARM_FEATURE_EL3);
- set_feature(&cpu->env, ARM_FEATURE_PMU);
- cpu->kvm_target = QEMU_KVM_ARM_TARGET_CORTEX_A15;
- cpu->midr = 0x412fc0f1;
- cpu->reset_fpsid = 0x410430f0;
- cpu->isar.mvfr0 = 0x10110222;
- cpu->isar.mvfr1 = 0x11111111;
- cpu->ctr = 0x8444c004;
- cpu->reset_sctlr = 0x00c50078;
- cpu->isar.id_pfr0 = 0x00001131;
- cpu->isar.id_pfr1 = 0x00011011;
- cpu->isar.id_dfr0 = 0x02010555;
- cpu->id_afr0 = 0x00000000;
- cpu->isar.id_mmfr0 = 0x10201105;
- cpu->isar.id_mmfr1 = 0x20000000;
- cpu->isar.id_mmfr2 = 0x01240000;
- cpu->isar.id_mmfr3 = 0x02102211;
- cpu->isar.id_isar0 = 0x02101110;
- cpu->isar.id_isar1 = 0x13112111;
- cpu->isar.id_isar2 = 0x21232041;
- cpu->isar.id_isar3 = 0x11112131;
- cpu->isar.id_isar4 = 0x10011142;
- cpu->isar.dbgdidr = 0x3515f021;
- cpu->clidr = 0x0a200023;
- cpu->ccsidr[0] = 0x701fe00a; /* 32K L1 dcache */
- cpu->ccsidr[1] = 0x201fe00a; /* 32K L1 icache */
- cpu->ccsidr[2] = 0x711fe07a; /* 4096K L2 unified cache */
- define_arm_cp_regs(cpu, cortexa15_cp_reginfo);
-}
-
-#ifndef TARGET_AARCH64
-/*
- * -cpu max: a CPU with as many features enabled as our emulation supports.
- * The version of '-cpu max' for qemu-system-aarch64 is defined in cpu64.c;
- * this only needs to handle 32 bits, and need not care about KVM.
- */
-static void arm_max_initfn(Object *obj)
-{
- ARMCPU *cpu = ARM_CPU(obj);
-
- cortex_a15_initfn(obj);
-
- /* old-style VFP short-vector support */
- cpu->isar.mvfr0 = FIELD_DP32(cpu->isar.mvfr0, MVFR0, FPSHVEC, 1);
-
-#ifdef CONFIG_USER_ONLY
- /*
- * We don't set these in system emulation mode for the moment,
- * since we don't correctly set (all of) the ID registers to
- * advertise them.
- */
- set_feature(&cpu->env, ARM_FEATURE_V8);
- {
- uint32_t t;
-
- t = cpu->isar.id_isar5;
- t = FIELD_DP32(t, ID_ISAR5, AES, 2);
- t = FIELD_DP32(t, ID_ISAR5, SHA1, 1);
- t = FIELD_DP32(t, ID_ISAR5, SHA2, 1);
- t = FIELD_DP32(t, ID_ISAR5, CRC32, 1);
- t = FIELD_DP32(t, ID_ISAR5, RDM, 1);
- t = FIELD_DP32(t, ID_ISAR5, VCMA, 1);
- cpu->isar.id_isar5 = t;
-
- t = cpu->isar.id_isar6;
- t = FIELD_DP32(t, ID_ISAR6, JSCVT, 1);
- t = FIELD_DP32(t, ID_ISAR6, DP, 1);
- t = FIELD_DP32(t, ID_ISAR6, FHM, 1);
- t = FIELD_DP32(t, ID_ISAR6, SB, 1);
- t = FIELD_DP32(t, ID_ISAR6, SPECRES, 1);
- cpu->isar.id_isar6 = t;
-
- t = cpu->isar.mvfr1;
- t = FIELD_DP32(t, MVFR1, FPHP, 3); /* v8.2-FP16 */
- t = FIELD_DP32(t, MVFR1, SIMDHP, 2); /* v8.2-FP16 */
- cpu->isar.mvfr1 = t;
-
- t = cpu->isar.mvfr2;
- t = FIELD_DP32(t, MVFR2, SIMDMISC, 3); /* SIMD MaxNum */
- t = FIELD_DP32(t, MVFR2, FPMISC, 4); /* FP MaxNum */
- cpu->isar.mvfr2 = t;
-
- t = cpu->isar.id_mmfr3;
- t = FIELD_DP32(t, ID_MMFR3, PAN, 2); /* ATS1E1 */
- cpu->isar.id_mmfr3 = t;
-
- t = cpu->isar.id_mmfr4;
- t = FIELD_DP32(t, ID_MMFR4, HPDS, 1); /* AA32HPD */
- t = FIELD_DP32(t, ID_MMFR4, AC2, 1); /* ACTLR2, HACTLR2 */
- t = FIELD_DP32(t, ID_MMFR4, CNP, 1); /* TTCNP */
- t = FIELD_DP32(t, ID_MMFR4, XNX, 1); /* TTS2UXN */
- cpu->isar.id_mmfr4 = t;
-
- t = cpu->isar.id_pfr0;
- t = FIELD_DP32(t, ID_PFR0, DIT, 1);
- cpu->isar.id_pfr0 = t;
-
- t = cpu->isar.id_pfr2;
- t = FIELD_DP32(t, ID_PFR2, SSBS, 1);
- cpu->isar.id_pfr2 = t;
- }
-#endif
-}
-#endif
-
-#endif /* !defined(CONFIG_USER_ONLY) || !defined(TARGET_AARCH64) */
-
-static const ARMCPUInfo arm_cpus[] = {
-#if !defined(CONFIG_USER_ONLY) || !defined(TARGET_AARCH64)
- { .name = "cortex-a7", .initfn = cortex_a7_initfn },
- { .name = "cortex-a8", .initfn = cortex_a8_initfn },
- { .name = "cortex-a9", .initfn = cortex_a9_initfn },
- { .name = "cortex-a15", .initfn = cortex_a15_initfn },
-#ifndef TARGET_AARCH64
- { .name = "max", .initfn = arm_max_initfn },
-#endif
-#ifdef CONFIG_USER_ONLY
- { .name = "any", .initfn = arm_max_initfn },
-#endif
-#endif
-};
-
static Property arm_cpu_properties[] = {
DEFINE_PROP_UINT32("psci-conduit", ARMCPU, psci_conduit, 0),
DEFINE_PROP_UINT64("midr", ARMCPU, midr, 0),
@@ -2390,21 +2065,11 @@ static const TypeInfo arm_cpu_type_info = {
static void arm_cpu_register_types(void)
{
- const size_t cpu_count = ARRAY_SIZE(arm_cpus);
-
type_register_static(&arm_cpu_type_info);
#ifdef CONFIG_KVM
type_register_static(&host_arm_cpu_type_info);
#endif
-
- if (cpu_count) {
- size_t i;
-
- for (i = 0; i < cpu_count; ++i) {
- arm_cpu_register(&arm_cpus[i]);
- }
- }
}
type_init(arm_cpu_register_types)
diff --git a/target/arm/cpu_tcg.c b/target/arm/cpu_tcg.c
index fb07a33693..046e476f65 100644
--- a/target/arm/cpu_tcg.c
+++ b/target/arm/cpu_tcg.c
@@ -15,6 +15,9 @@
#endif /* CONFIG_TCG */
#include "internals.h"
#include "target/arm/idau.h"
+#if !defined(CONFIG_USER_ONLY)
+#include "hw/boards.h"
+#endif
/* CPU models. These are not needed for the AArch64 linux-user build. */
#if !defined(CONFIG_USER_ONLY) || !defined(TARGET_AARCH64)
@@ -255,6 +258,236 @@ static void arm11mpcore_initfn(Object *obj)
cpu->reset_auxcr = 1;
}
+static const ARMCPRegInfo cortexa8_cp_reginfo[] = {
+ { .name = "L2LOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 1, .opc2 = 0,
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "L2AUXCR", .cp = 15, .crn = 9, .crm = 0, .opc1 = 1, .opc2 = 2,
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ REGINFO_SENTINEL
+};
+
+static void cortex_a8_initfn(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ cpu->dtb_compatible = "arm,cortex-a8";
+ set_feature(&cpu->env, ARM_FEATURE_V7);
+ set_feature(&cpu->env, ARM_FEATURE_NEON);
+ set_feature(&cpu->env, ARM_FEATURE_THUMB2EE);
+ set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
+ set_feature(&cpu->env, ARM_FEATURE_EL3);
+ cpu->midr = 0x410fc080;
+ cpu->reset_fpsid = 0x410330c0;
+ cpu->isar.mvfr0 = 0x11110222;
+ cpu->isar.mvfr1 = 0x00011111;
+ cpu->ctr = 0x82048004;
+ cpu->reset_sctlr = 0x00c50078;
+ cpu->isar.id_pfr0 = 0x1031;
+ cpu->isar.id_pfr1 = 0x11;
+ cpu->isar.id_dfr0 = 0x400;
+ cpu->id_afr0 = 0;
+ cpu->isar.id_mmfr0 = 0x31100003;
+ cpu->isar.id_mmfr1 = 0x20000000;
+ cpu->isar.id_mmfr2 = 0x01202000;
+ cpu->isar.id_mmfr3 = 0x11;
+ cpu->isar.id_isar0 = 0x00101111;
+ cpu->isar.id_isar1 = 0x12112111;
+ cpu->isar.id_isar2 = 0x21232031;
+ cpu->isar.id_isar3 = 0x11112131;
+ cpu->isar.id_isar4 = 0x00111142;
+ cpu->isar.dbgdidr = 0x15141000;
+ cpu->clidr = (1 << 27) | (2 << 24) | 3;
+ cpu->ccsidr[0] = 0xe007e01a; /* 16k L1 dcache. */
+ cpu->ccsidr[1] = 0x2007e01a; /* 16k L1 icache. */
+ cpu->ccsidr[2] = 0xf0000000; /* No L2 icache. */
+ cpu->reset_auxcr = 2;
+ define_arm_cp_regs(cpu, cortexa8_cp_reginfo);
+}
+
+static const ARMCPRegInfo cortexa9_cp_reginfo[] = {
+ /*
+ * power_control should be set to maximum latency. Again,
+ * default to 0 and set by private hook
+ */
+ { .name = "A9_PWRCTL", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 0,
+ .access = PL1_RW, .resetvalue = 0,
+ .fieldoffset = offsetof(CPUARMState, cp15.c15_power_control) },
+ { .name = "A9_DIAG", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 1,
+ .access = PL1_RW, .resetvalue = 0,
+ .fieldoffset = offsetof(CPUARMState, cp15.c15_diagnostic) },
+ { .name = "A9_PWRDIAG", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 2,
+ .access = PL1_RW, .resetvalue = 0,
+ .fieldoffset = offsetof(CPUARMState, cp15.c15_power_diagnostic) },
+ { .name = "NEONBUSY", .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0,
+ .access = PL1_RW, .resetvalue = 0, .type = ARM_CP_CONST },
+ /* TLB lockdown control */
+ { .name = "TLB_LOCKR", .cp = 15, .crn = 15, .crm = 4, .opc1 = 5, .opc2 = 2,
+ .access = PL1_W, .resetvalue = 0, .type = ARM_CP_NOP },
+ { .name = "TLB_LOCKW", .cp = 15, .crn = 15, .crm = 4, .opc1 = 5, .opc2 = 4,
+ .access = PL1_W, .resetvalue = 0, .type = ARM_CP_NOP },
+ { .name = "TLB_VA", .cp = 15, .crn = 15, .crm = 5, .opc1 = 5, .opc2 = 2,
+ .access = PL1_RW, .resetvalue = 0, .type = ARM_CP_CONST },
+ { .name = "TLB_PA", .cp = 15, .crn = 15, .crm = 6, .opc1 = 5, .opc2 = 2,
+ .access = PL1_RW, .resetvalue = 0, .type = ARM_CP_CONST },
+ { .name = "TLB_ATTR", .cp = 15, .crn = 15, .crm = 7, .opc1 = 5, .opc2 = 2,
+ .access = PL1_RW, .resetvalue = 0, .type = ARM_CP_CONST },
+ REGINFO_SENTINEL
+};
+
+static void cortex_a9_initfn(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ cpu->dtb_compatible = "arm,cortex-a9";
+ set_feature(&cpu->env, ARM_FEATURE_V7);
+ set_feature(&cpu->env, ARM_FEATURE_NEON);
+ set_feature(&cpu->env, ARM_FEATURE_THUMB2EE);
+ set_feature(&cpu->env, ARM_FEATURE_EL3);
+ /*
+ * Note that A9 supports the MP extensions even for
+ * A9UP and single-core A9MP (which are both different
+ * and valid configurations; we don't model A9UP).
+ */
+ set_feature(&cpu->env, ARM_FEATURE_V7MP);
+ set_feature(&cpu->env, ARM_FEATURE_CBAR);
+ cpu->midr = 0x410fc090;
+ cpu->reset_fpsid = 0x41033090;
+ cpu->isar.mvfr0 = 0x11110222;
+ cpu->isar.mvfr1 = 0x01111111;
+ cpu->ctr = 0x80038003;
+ cpu->reset_sctlr = 0x00c50078;
+ cpu->isar.id_pfr0 = 0x1031;
+ cpu->isar.id_pfr1 = 0x11;
+ cpu->isar.id_dfr0 = 0x000;
+ cpu->id_afr0 = 0;
+ cpu->isar.id_mmfr0 = 0x00100103;
+ cpu->isar.id_mmfr1 = 0x20000000;
+ cpu->isar.id_mmfr2 = 0x01230000;
+ cpu->isar.id_mmfr3 = 0x00002111;
+ cpu->isar.id_isar0 = 0x00101111;
+ cpu->isar.id_isar1 = 0x13112111;
+ cpu->isar.id_isar2 = 0x21232041;
+ cpu->isar.id_isar3 = 0x11112131;
+ cpu->isar.id_isar4 = 0x00111142;
+ cpu->isar.dbgdidr = 0x35141000;
+ cpu->clidr = (1 << 27) | (1 << 24) | 3;
+ cpu->ccsidr[0] = 0xe00fe019; /* 16k L1 dcache. */
+ cpu->ccsidr[1] = 0x200fe019; /* 16k L1 icache. */
+ define_arm_cp_regs(cpu, cortexa9_cp_reginfo);
+}
+
+#ifndef CONFIG_USER_ONLY
+static uint64_t a15_l2ctlr_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ MachineState *ms = MACHINE(qdev_get_machine());
+
+ /*
+ * Linux wants the number of processors from here.
+ * Might as well set the interrupt-controller bit too.
+ */
+ return ((ms->smp.cpus - 1) << 24) | (1 << 23);
+}
+#endif
+
+static const ARMCPRegInfo cortexa15_cp_reginfo[] = {
+#ifndef CONFIG_USER_ONLY
+ { .name = "L2CTLR", .cp = 15, .crn = 9, .crm = 0, .opc1 = 1, .opc2 = 2,
+ .access = PL1_RW, .resetvalue = 0, .readfn = a15_l2ctlr_read,
+ .writefn = arm_cp_write_ignore, },
+#endif
+ { .name = "L2ECTLR", .cp = 15, .crn = 9, .crm = 0, .opc1 = 1, .opc2 = 3,
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ REGINFO_SENTINEL
+};
+
+static void cortex_a7_initfn(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ cpu->dtb_compatible = "arm,cortex-a7";
+ set_feature(&cpu->env, ARM_FEATURE_V7VE);
+ set_feature(&cpu->env, ARM_FEATURE_NEON);
+ set_feature(&cpu->env, ARM_FEATURE_THUMB2EE);
+ set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
+ set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
+ set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
+ set_feature(&cpu->env, ARM_FEATURE_EL2);
+ set_feature(&cpu->env, ARM_FEATURE_EL3);
+ set_feature(&cpu->env, ARM_FEATURE_PMU);
+ cpu->kvm_target = QEMU_KVM_ARM_TARGET_CORTEX_A7;
+ cpu->midr = 0x410fc075;
+ cpu->reset_fpsid = 0x41023075;
+ cpu->isar.mvfr0 = 0x10110222;
+ cpu->isar.mvfr1 = 0x11111111;
+ cpu->ctr = 0x84448003;
+ cpu->reset_sctlr = 0x00c50078;
+ cpu->isar.id_pfr0 = 0x00001131;
+ cpu->isar.id_pfr1 = 0x00011011;
+ cpu->isar.id_dfr0 = 0x02010555;
+ cpu->id_afr0 = 0x00000000;
+ cpu->isar.id_mmfr0 = 0x10101105;
+ cpu->isar.id_mmfr1 = 0x40000000;
+ cpu->isar.id_mmfr2 = 0x01240000;
+ cpu->isar.id_mmfr3 = 0x02102211;
+ /*
+ * a7_mpcore_r0p5_trm, page 4-4 gives 0x01101110; but
+ * table 4-41 gives 0x02101110, which includes the arm div insns.
+ */
+ cpu->isar.id_isar0 = 0x02101110;
+ cpu->isar.id_isar1 = 0x13112111;
+ cpu->isar.id_isar2 = 0x21232041;
+ cpu->isar.id_isar3 = 0x11112131;
+ cpu->isar.id_isar4 = 0x10011142;
+ cpu->isar.dbgdidr = 0x3515f005;
+ cpu->clidr = 0x0a200023;
+ cpu->ccsidr[0] = 0x701fe00a; /* 32K L1 dcache */
+ cpu->ccsidr[1] = 0x201fe00a; /* 32K L1 icache */
+ cpu->ccsidr[2] = 0x711fe07a; /* 4096K L2 unified cache */
+ define_arm_cp_regs(cpu, cortexa15_cp_reginfo); /* Same as A15 */
+}
+
+static void cortex_a15_initfn(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ cpu->dtb_compatible = "arm,cortex-a15";
+ set_feature(&cpu->env, ARM_FEATURE_V7VE);
+ set_feature(&cpu->env, ARM_FEATURE_NEON);
+ set_feature(&cpu->env, ARM_FEATURE_THUMB2EE);
+ set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
+ set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
+ set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
+ set_feature(&cpu->env, ARM_FEATURE_EL2);
+ set_feature(&cpu->env, ARM_FEATURE_EL3);
+ set_feature(&cpu->env, ARM_FEATURE_PMU);
+ cpu->kvm_target = QEMU_KVM_ARM_TARGET_CORTEX_A15;
+ cpu->midr = 0x412fc0f1;
+ cpu->reset_fpsid = 0x410430f0;
+ cpu->isar.mvfr0 = 0x10110222;
+ cpu->isar.mvfr1 = 0x11111111;
+ cpu->ctr = 0x8444c004;
+ cpu->reset_sctlr = 0x00c50078;
+ cpu->isar.id_pfr0 = 0x00001131;
+ cpu->isar.id_pfr1 = 0x00011011;
+ cpu->isar.id_dfr0 = 0x02010555;
+ cpu->id_afr0 = 0x00000000;
+ cpu->isar.id_mmfr0 = 0x10201105;
+ cpu->isar.id_mmfr1 = 0x20000000;
+ cpu->isar.id_mmfr2 = 0x01240000;
+ cpu->isar.id_mmfr3 = 0x02102211;
+ cpu->isar.id_isar0 = 0x02101110;
+ cpu->isar.id_isar1 = 0x13112111;
+ cpu->isar.id_isar2 = 0x21232041;
+ cpu->isar.id_isar3 = 0x11112131;
+ cpu->isar.id_isar4 = 0x10011142;
+ cpu->isar.dbgdidr = 0x3515f021;
+ cpu->clidr = 0x0a200023;
+ cpu->ccsidr[0] = 0x701fe00a; /* 32K L1 dcache */
+ cpu->ccsidr[1] = 0x201fe00a; /* 32K L1 icache */
+ cpu->ccsidr[2] = 0x711fe07a; /* 4096K L2 unified cache */
+ define_arm_cp_regs(cpu, cortexa15_cp_reginfo);
+}
+
static void cortex_m0_initfn(Object *obj)
{
ARMCPU *cpu = ARM_CPU(obj);
@@ -695,6 +928,81 @@ static void arm_v7m_class_init(ObjectClass *oc, void *data)
cc->gdb_core_xml_file = "arm-m-profile.xml";
}
+#ifndef TARGET_AARCH64
+/*
+ * -cpu max: a CPU with as many features enabled as our emulation supports.
+ * The version of '-cpu max' for qemu-system-aarch64 is defined in cpu64.c;
+ * this only needs to handle 32 bits, and need not care about KVM.
+ */
+static void arm_max_initfn(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ cortex_a15_initfn(obj);
+
+ /* old-style VFP short-vector support */
+ cpu->isar.mvfr0 = FIELD_DP32(cpu->isar.mvfr0, MVFR0, FPSHVEC, 1);
+
+#ifdef CONFIG_USER_ONLY
+ /*
+ * We don't set these in system emulation mode for the moment,
+ * since we don't correctly set (all of) the ID registers to
+ * advertise them.
+ */
+ set_feature(&cpu->env, ARM_FEATURE_V8);
+ {
+ uint32_t t;
+
+ t = cpu->isar.id_isar5;
+ t = FIELD_DP32(t, ID_ISAR5, AES, 2);
+ t = FIELD_DP32(t, ID_ISAR5, SHA1, 1);
+ t = FIELD_DP32(t, ID_ISAR5, SHA2, 1);
+ t = FIELD_DP32(t, ID_ISAR5, CRC32, 1);
+ t = FIELD_DP32(t, ID_ISAR5, RDM, 1);
+ t = FIELD_DP32(t, ID_ISAR5, VCMA, 1);
+ cpu->isar.id_isar5 = t;
+
+ t = cpu->isar.id_isar6;
+ t = FIELD_DP32(t, ID_ISAR6, JSCVT, 1);
+ t = FIELD_DP32(t, ID_ISAR6, DP, 1);
+ t = FIELD_DP32(t, ID_ISAR6, FHM, 1);
+ t = FIELD_DP32(t, ID_ISAR6, SB, 1);
+ t = FIELD_DP32(t, ID_ISAR6, SPECRES, 1);
+ cpu->isar.id_isar6 = t;
+
+ t = cpu->isar.mvfr1;
+ t = FIELD_DP32(t, MVFR1, FPHP, 3); /* v8.2-FP16 */
+ t = FIELD_DP32(t, MVFR1, SIMDHP, 2); /* v8.2-FP16 */
+ cpu->isar.mvfr1 = t;
+
+ t = cpu->isar.mvfr2;
+ t = FIELD_DP32(t, MVFR2, SIMDMISC, 3); /* SIMD MaxNum */
+ t = FIELD_DP32(t, MVFR2, FPMISC, 4); /* FP MaxNum */
+ cpu->isar.mvfr2 = t;
+
+ t = cpu->isar.id_mmfr3;
+ t = FIELD_DP32(t, ID_MMFR3, PAN, 2); /* ATS1E1 */
+ cpu->isar.id_mmfr3 = t;
+
+ t = cpu->isar.id_mmfr4;
+ t = FIELD_DP32(t, ID_MMFR4, HPDS, 1); /* AA32HPD */
+ t = FIELD_DP32(t, ID_MMFR4, AC2, 1); /* ACTLR2, HACTLR2 */
+ t = FIELD_DP32(t, ID_MMFR4, CNP, 1); /* TTCNP */
+ t = FIELD_DP32(t, ID_MMFR4, XNX, 1); /* TTS2UXN */
+ cpu->isar.id_mmfr4 = t;
+
+ t = cpu->isar.id_pfr0;
+ t = FIELD_DP32(t, ID_PFR0, DIT, 1);
+ cpu->isar.id_pfr0 = t;
+
+ t = cpu->isar.id_pfr2;
+ t = FIELD_DP32(t, ID_PFR2, SSBS, 1);
+ cpu->isar.id_pfr2 = t;
+ }
+#endif /* CONFIG_USER_ONLY */
+}
+#endif /* !TARGET_AARCH64 */
+
static const ARMCPUInfo arm_tcg_cpus[] = {
{ .name = "arm926", .initfn = arm926_initfn },
{ .name = "arm946", .initfn = arm946_initfn },
@@ -708,6 +1016,10 @@ static const ARMCPUInfo arm_tcg_cpus[] = {
{ .name = "arm1136", .initfn = arm1136_initfn },
{ .name = "arm1176", .initfn = arm1176_initfn },
{ .name = "arm11mpcore", .initfn = arm11mpcore_initfn },
+ { .name = "cortex-a7", .initfn = cortex_a7_initfn },
+ { .name = "cortex-a8", .initfn = cortex_a8_initfn },
+ { .name = "cortex-a9", .initfn = cortex_a9_initfn },
+ { .name = "cortex-a15", .initfn = cortex_a15_initfn },
{ .name = "cortex-m0", .initfn = cortex_m0_initfn,
.class_init = arm_v7m_class_init },
{ .name = "cortex-m3", .initfn = cortex_m3_initfn,
@@ -738,6 +1050,12 @@ static const ARMCPUInfo arm_tcg_cpus[] = {
{ .name = "pxa270-b1", .initfn = pxa270b1_initfn },
{ .name = "pxa270-c0", .initfn = pxa270c0_initfn },
{ .name = "pxa270-c5", .initfn = pxa270c5_initfn },
+#ifndef TARGET_AARCH64
+ { .name = "max", .initfn = arm_max_initfn },
+#endif
+#ifdef CONFIG_USER_ONLY
+ { .name = "any", .initfn = arm_max_initfn },
+#endif
};
static const TypeInfo idau_interface_type_info = {
diff --git a/target/hexagon/macros.h b/target/hexagon/macros.h
index 78c4efb5cb..cfcb8173ba 100644
--- a/target/hexagon/macros.h
+++ b/target/hexagon/macros.h
@@ -459,7 +459,7 @@ static inline void gen_logical_not(TCGv dest, TCGv src)
: (fCAST##REGSTYPE##s(SRC) >> (SHAMT)))
#define fASHIFTR(SRC, SHAMT, REGSTYPE) (fCAST##REGSTYPE##s(SRC) >> (SHAMT))
#define fLSHIFTR(SRC, SHAMT, REGSTYPE) \
- (((SHAMT) >= 64) ? 0 : (fCAST##REGSTYPE##u(SRC) >> (SHAMT)))
+ (((SHAMT) >= (sizeof(SRC) * 8)) ? 0 : (fCAST##REGSTYPE##u(SRC) >> (SHAMT)))
#define fROTL(SRC, SHAMT, REGSTYPE) \
(((SHAMT) == 0) ? (SRC) : ((fCAST##REGSTYPE##u(SRC) << (SHAMT)) | \
((fCAST##REGSTYPE##u(SRC) >> \
@@ -469,7 +469,7 @@ static inline void gen_logical_not(TCGv dest, TCGv src)
((fCAST##REGSTYPE##u(SRC) << \
((sizeof(SRC) * 8) - (SHAMT))))))
#define fASHIFTL(SRC, SHAMT, REGSTYPE) \
- (((SHAMT) >= 64) ? 0 : (fCAST##REGSTYPE##s(SRC) << (SHAMT)))
+ (((SHAMT) >= (sizeof(SRC) * 8)) ? 0 : (fCAST##REGSTYPE##s(SRC) << (SHAMT)))
#ifdef QEMU_GENERATE
#define fLOAD(NUM, SIZE, SIGN, EA, DST) MEM_LOAD##SIZE##SIGN(DST, EA)
diff --git a/target/hexagon/opcodes.c b/target/hexagon/opcodes.c
index 4eef5fc40f..35d790cdd5 100644
--- a/target/hexagon/opcodes.c
+++ b/target/hexagon/opcodes.c
@@ -82,6 +82,7 @@ static void init_attribs(int tag, ...)
while ((attr = va_arg(ap, int)) != 0) {
set_bit(attr, opcode_attribs[tag]);
}
+ va_end(ap);
}
const OpcodeEncoding opcode_encodings[] = {
diff --git a/target/meson.build b/target/meson.build
index c35c1e9d34..0e2c4b69cb 100644
--- a/target/meson.build
+++ b/target/meson.build
@@ -18,7 +18,6 @@ subdir('rx')
subdir('s390x')
subdir('sh4')
subdir('sparc')
-subdir('tilegx')
subdir('tricore')
subdir('unicore32')
subdir('xtensa')
diff --git a/target/mips/cpu.c b/target/mips/cpu.c
index bf70c77295..f6ef09c9e2 100644
--- a/target/mips/cpu.c
+++ b/target/mips/cpu.c
@@ -653,7 +653,7 @@ static void mips_cpu_initfn(Object *obj)
MIPSCPUClass *mcc = MIPS_CPU_GET_CLASS(obj);
cpu_set_cpustate_pointers(cpu);
- cpu->clock = qdev_init_clock_in(DEVICE(obj), "clk-in", NULL, cpu);
+ cpu->clock = qdev_init_clock_in(DEVICE(obj), "clk-in", NULL, cpu, 0);
env->cpu_model = mcc->cpu_def;
}
diff --git a/target/ppc/translate_init.c.inc b/target/ppc/translate_init.c.inc
index e7324e85cd..108ff2be2b 100644
--- a/target/ppc/translate_init.c.inc
+++ b/target/ppc/translate_init.c.inc
@@ -37,7 +37,6 @@
#include "hw/qdev-properties.h"
#include "hw/ppc/ppc.h"
#include "mmu-book3s-v3.h"
-#include "sysemu/qtest.h"
#include "qemu/cutils.h"
#include "disas/capstone.h"
#include "fpu/softfloat.h"
diff --git a/target/sh4/cpu.h b/target/sh4/cpu.h
index 714e3b5641..01c4344082 100644
--- a/target/sh4/cpu.h
+++ b/target/sh4/cpu.h
@@ -271,17 +271,6 @@ typedef SuperHCPU ArchCPU;
#include "exec/cpu-all.h"
-/* Memory access type */
-enum {
- /* Privilege */
- ACCESS_PRIV = 0x01,
- /* Direction */
- ACCESS_WRITE = 0x02,
- /* Type of instruction */
- ACCESS_CODE = 0x10,
- ACCESS_INT = 0x20
-};
-
/* MMU control register */
#define MMUCR 0x1F000010
#define MMUCR_AT (1<<0)
diff --git a/target/sh4/helper.c b/target/sh4/helper.c
index 408478ce5d..bd8e034f17 100644
--- a/target/sh4/helper.c
+++ b/target/sh4/helper.c
@@ -330,22 +330,22 @@ static int find_utlb_entry(CPUSH4State * env, target_ulong address, int use_asid
MMU_IADDR_ERROR, MMU_DADDR_ERROR_READ, MMU_DADDR_ERROR_WRITE.
*/
static int get_mmu_address(CPUSH4State * env, target_ulong * physical,
- int *prot, target_ulong address,
- int rw, int access_type)
+ int *prot, target_ulong address,
+ MMUAccessType access_type)
{
int use_asid, n;
tlb_t *matching = NULL;
use_asid = !(env->mmucr & MMUCR_SV) || !(env->sr & (1u << SR_MD));
- if (rw == 2) {
+ if (access_type == MMU_INST_FETCH) {
n = find_itlb_entry(env, address, use_asid);
- if (n >= 0) {
- matching = &env->itlb[n];
+ if (n >= 0) {
+ matching = &env->itlb[n];
if (!(env->sr & (1u << SR_MD)) && !(matching->pr & 2)) {
- n = MMU_ITLB_VIOLATION;
+ n = MMU_ITLB_VIOLATION;
} else {
- *prot = PAGE_EXEC;
+ *prot = PAGE_EXEC;
}
} else {
n = find_utlb_entry(env, address, use_asid);
@@ -365,17 +365,17 @@ static int get_mmu_address(CPUSH4State * env, target_ulong * physical,
} else if (n == MMU_DTLB_MISS) {
n = MMU_ITLB_MISS;
}
- }
+ }
} else {
- n = find_utlb_entry(env, address, use_asid);
- if (n >= 0) {
- matching = &env->utlb[n];
+ n = find_utlb_entry(env, address, use_asid);
+ if (n >= 0) {
+ matching = &env->utlb[n];
if (!(env->sr & (1u << SR_MD)) && !(matching->pr & 2)) {
- n = (rw == 1) ? MMU_DTLB_VIOLATION_WRITE :
- MMU_DTLB_VIOLATION_READ;
- } else if ((rw == 1) && !(matching->pr & 1)) {
+ n = (access_type == MMU_DATA_STORE)
+ ? MMU_DTLB_VIOLATION_WRITE : MMU_DTLB_VIOLATION_READ;
+ } else if ((access_type == MMU_DATA_STORE) && !(matching->pr & 1)) {
n = MMU_DTLB_VIOLATION_WRITE;
- } else if ((rw == 1) && !matching->d) {
+ } else if ((access_type == MMU_DATA_STORE) && !matching->d) {
n = MMU_DTLB_INITIAL_WRITE;
} else {
*prot = PAGE_READ;
@@ -383,56 +383,56 @@ static int get_mmu_address(CPUSH4State * env, target_ulong * physical,
*prot |= PAGE_WRITE;
}
}
- } else if (n == MMU_DTLB_MISS) {
- n = (rw == 1) ? MMU_DTLB_MISS_WRITE :
- MMU_DTLB_MISS_READ;
- }
+ } else if (n == MMU_DTLB_MISS) {
+ n = (access_type == MMU_DATA_STORE)
+ ? MMU_DTLB_MISS_WRITE : MMU_DTLB_MISS_READ;
+ }
}
if (n >= 0) {
- n = MMU_OK;
- *physical = ((matching->ppn << 10) & ~(matching->size - 1)) |
- (address & (matching->size - 1));
+ n = MMU_OK;
+ *physical = ((matching->ppn << 10) & ~(matching->size - 1))
+ | (address & (matching->size - 1));
}
return n;
}
static int get_physical_address(CPUSH4State * env, target_ulong * physical,
int *prot, target_ulong address,
- int rw, int access_type)
+ MMUAccessType access_type)
{
/* P1, P2 and P4 areas do not use translation */
- if ((address >= 0x80000000 && address < 0xc0000000) ||
- address >= 0xe0000000) {
+ if ((address >= 0x80000000 && address < 0xc0000000) || address >= 0xe0000000) {
if (!(env->sr & (1u << SR_MD))
- && (address < 0xe0000000 || address >= 0xe4000000)) {
- /* Unauthorized access in user mode (only store queues are available) */
+ && (address < 0xe0000000 || address >= 0xe4000000)) {
+ /* Unauthorized access in user mode (only store queues are available) */
qemu_log_mask(LOG_GUEST_ERROR, "Unauthorized access\n");
- if (rw == 0)
- return MMU_DADDR_ERROR_READ;
- else if (rw == 1)
- return MMU_DADDR_ERROR_WRITE;
- else
- return MMU_IADDR_ERROR;
- }
- if (address >= 0x80000000 && address < 0xc0000000) {
- /* Mask upper 3 bits for P1 and P2 areas */
- *physical = address & 0x1fffffff;
- } else {
- *physical = address;
- }
- *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
- return MMU_OK;
+ if (access_type == MMU_DATA_LOAD) {
+ return MMU_DADDR_ERROR_READ;
+ } else if (access_type == MMU_DATA_STORE) {
+ return MMU_DADDR_ERROR_WRITE;
+ } else {
+ return MMU_IADDR_ERROR;
+ }
+ }
+ if (address >= 0x80000000 && address < 0xc0000000) {
+ /* Mask upper 3 bits for P1 and P2 areas */
+ *physical = address & 0x1fffffff;
+ } else {
+ *physical = address;
+ }
+ *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ return MMU_OK;
}
/* If MMU is disabled, return the corresponding physical page */
if (!(env->mmucr & MMUCR_AT)) {
- *physical = address & 0x1FFFFFFF;
- *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
- return MMU_OK;
+ *physical = address & 0x1FFFFFFF;
+ *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ return MMU_OK;
}
/* We need to resort to the MMU */
- return get_mmu_address(env, physical, prot, address, rw, access_type);
+ return get_mmu_address(env, physical, prot, address, access_type);
}
hwaddr superh_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
@@ -441,7 +441,8 @@ hwaddr superh_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
target_ulong physical;
int prot;
- get_physical_address(&cpu->env, &physical, &prot, addr, 0, 0);
+ get_physical_address(&cpu->env, &physical, &prot, addr, MMU_DATA_LOAD);
+
return physical;
}
@@ -813,11 +814,9 @@ bool superh_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMU_DTLB_VIOLATION_READ);
#else
target_ulong physical;
- int prot, sh_access_type;
+ int prot;
- sh_access_type = ACCESS_INT;
- ret = get_physical_address(env, &physical, &prot, address,
- access_type, sh_access_type);
+ ret = get_physical_address(env, &physical, &prot, address, access_type);
if (ret == MMU_OK) {
address &= TARGET_PAGE_MASK;
diff --git a/target/tilegx/cpu-param.h b/target/tilegx/cpu-param.h
deleted file mode 100644
index 80a341cbb7..0000000000
--- a/target/tilegx/cpu-param.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/*
- * TILE-Gx cpu parameters for qemu.
- *
- * Copyright (c) 2015 Chen Gang
- * SPDX-License-Identifier: LGPL-2.0+
- */
-
-#ifndef TILEGX_CPU_PARAM_H
-#define TILEGX_CPU_PARAM_H 1
-
-#define TARGET_LONG_BITS 64
-#define TARGET_PAGE_BITS 16 /* TILE-Gx uses 64KB page size */
-#define TARGET_PHYS_ADDR_SPACE_BITS 42
-#define TARGET_VIRT_ADDR_SPACE_BITS 64
-#define NB_MMU_MODES 1
-
-#endif
diff --git a/target/tilegx/cpu.c b/target/tilegx/cpu.c
deleted file mode 100644
index d969c2f133..0000000000
--- a/target/tilegx/cpu.c
+++ /dev/null
@@ -1,182 +0,0 @@
-/*
- * QEMU TILE-Gx CPU
- *
- * Copyright (c) 2015 Chen Gang
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see
- * <http://www.gnu.org/licenses/lgpl-2.1.html>
- */
-
-#include "qemu/osdep.h"
-#include "qapi/error.h"
-#include "cpu.h"
-#include "qemu/module.h"
-#include "linux-user/syscall_defs.h"
-#include "qemu/qemu-print.h"
-#include "exec/exec-all.h"
-
-static void tilegx_cpu_dump_state(CPUState *cs, FILE *f, int flags)
-{
- static const char * const reg_names[TILEGX_R_COUNT] = {
- "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
- "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
- "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
- "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
- "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
- "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
- "r48", "r49", "r50", "r51", "bp", "tp", "sp", "lr"
- };
-
- TileGXCPU *cpu = TILEGX_CPU(cs);
- CPUTLGState *env = &cpu->env;
- int i;
-
- for (i = 0; i < TILEGX_R_COUNT; i++) {
- qemu_fprintf(f, "%-4s" TARGET_FMT_lx "%s",
- reg_names[i], env->regs[i],
- (i % 4) == 3 ? "\n" : " ");
- }
- qemu_fprintf(f, "PC " TARGET_FMT_lx " CEX " TARGET_FMT_lx "\n\n",
- env->pc, env->spregs[TILEGX_SPR_CMPEXCH]);
-}
-
-static ObjectClass *tilegx_cpu_class_by_name(const char *cpu_model)
-{
- return object_class_by_name(TYPE_TILEGX_CPU);
-}
-
-static void tilegx_cpu_set_pc(CPUState *cs, vaddr value)
-{
- TileGXCPU *cpu = TILEGX_CPU(cs);
-
- cpu->env.pc = value;
-}
-
-static bool tilegx_cpu_has_work(CPUState *cs)
-{
- return true;
-}
-
-static void tilegx_cpu_reset(DeviceState *dev)
-{
- CPUState *s = CPU(dev);
- TileGXCPU *cpu = TILEGX_CPU(s);
- TileGXCPUClass *tcc = TILEGX_CPU_GET_CLASS(cpu);
- CPUTLGState *env = &cpu->env;
-
- tcc->parent_reset(dev);
-
- memset(env, 0, offsetof(CPUTLGState, end_reset_fields));
-}
-
-static void tilegx_cpu_realizefn(DeviceState *dev, Error **errp)
-{
- CPUState *cs = CPU(dev);
- TileGXCPUClass *tcc = TILEGX_CPU_GET_CLASS(dev);
- Error *local_err = NULL;
-
- cpu_exec_realizefn(cs, &local_err);
- if (local_err != NULL) {
- error_propagate(errp, local_err);
- return;
- }
-
- cpu_reset(cs);
- qemu_init_vcpu(cs);
-
- tcc->parent_realize(dev, errp);
-}
-
-static void tilegx_cpu_initfn(Object *obj)
-{
- TileGXCPU *cpu = TILEGX_CPU(obj);
-
- cpu_set_cpustate_pointers(cpu);
-}
-
-static void tilegx_cpu_do_interrupt(CPUState *cs)
-{
- cs->exception_index = -1;
-}
-
-static bool tilegx_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
- MMUAccessType access_type, int mmu_idx,
- bool probe, uintptr_t retaddr)
-{
- TileGXCPU *cpu = TILEGX_CPU(cs);
-
- /* The sigcode field will be filled in by do_signal in main.c. */
- cs->exception_index = TILEGX_EXCP_SIGNAL;
- cpu->env.excaddr = address;
- cpu->env.signo = TARGET_SIGSEGV;
- cpu->env.sigcode = 0;
-
- cpu_loop_exit_restore(cs, retaddr);
-}
-
-static bool tilegx_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
-{
- if (interrupt_request & CPU_INTERRUPT_HARD) {
- tilegx_cpu_do_interrupt(cs);
- return true;
- }
- return false;
-}
-
-#include "hw/core/tcg-cpu-ops.h"
-
-static struct TCGCPUOps tilegx_tcg_ops = {
- .initialize = tilegx_tcg_init,
- .cpu_exec_interrupt = tilegx_cpu_exec_interrupt,
- .tlb_fill = tilegx_cpu_tlb_fill,
-
-#ifndef CONFIG_USER_ONLY
- .do_interrupt = tilegx_cpu_do_interrupt,
-#endif /* !CONFIG_USER_ONLY */
-};
-
-static void tilegx_cpu_class_init(ObjectClass *oc, void *data)
-{
- DeviceClass *dc = DEVICE_CLASS(oc);
- CPUClass *cc = CPU_CLASS(oc);
- TileGXCPUClass *tcc = TILEGX_CPU_CLASS(oc);
-
- device_class_set_parent_realize(dc, tilegx_cpu_realizefn,
- &tcc->parent_realize);
-
- device_class_set_parent_reset(dc, tilegx_cpu_reset, &tcc->parent_reset);
-
- cc->class_by_name = tilegx_cpu_class_by_name;
- cc->has_work = tilegx_cpu_has_work;
- cc->dump_state = tilegx_cpu_dump_state;
- cc->set_pc = tilegx_cpu_set_pc;
- cc->gdb_num_core_regs = 0;
- cc->tcg_ops = &tilegx_tcg_ops;
-}
-
-static const TypeInfo tilegx_cpu_type_info = {
- .name = TYPE_TILEGX_CPU,
- .parent = TYPE_CPU,
- .instance_size = sizeof(TileGXCPU),
- .instance_init = tilegx_cpu_initfn,
- .class_size = sizeof(TileGXCPUClass),
- .class_init = tilegx_cpu_class_init,
-};
-
-static void tilegx_cpu_register_types(void)
-{
- type_register_static(&tilegx_cpu_type_info);
-}
-
-type_init(tilegx_cpu_register_types)
diff --git a/target/tilegx/cpu.h b/target/tilegx/cpu.h
deleted file mode 100644
index 7d8e44d12e..0000000000
--- a/target/tilegx/cpu.h
+++ /dev/null
@@ -1,160 +0,0 @@
-/*
- * TILE-Gx virtual CPU header
- *
- * Copyright (c) 2015 Chen Gang
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef TILEGX_CPU_H
-#define TILEGX_CPU_H
-
-#include "exec/cpu-defs.h"
-#include "qom/object.h"
-
-/* TILE-Gx common register alias */
-#define TILEGX_R_RE 0 /* 0 register, for function/syscall return value */
-#define TILEGX_R_ERR 1 /* 1 register, for syscall errno flag */
-#define TILEGX_R_NR 10 /* 10 register, for syscall number */
-#define TILEGX_R_BP 52 /* 52 register, optional frame pointer */
-#define TILEGX_R_TP 53 /* TP register, thread local storage data */
-#define TILEGX_R_SP 54 /* SP register, stack pointer */
-#define TILEGX_R_LR 55 /* LR register, may save pc, but it is not pc */
-#define TILEGX_R_COUNT 56 /* Only 56 registers are really useful */
-#define TILEGX_R_SN 56 /* SN register, obsoleted, it likes zero register */
-#define TILEGX_R_IDN0 57 /* IDN0 register, cause IDN_ACCESS exception */
-#define TILEGX_R_IDN1 58 /* IDN1 register, cause IDN_ACCESS exception */
-#define TILEGX_R_UDN0 59 /* UDN0 register, cause UDN_ACCESS exception */
-#define TILEGX_R_UDN1 60 /* UDN1 register, cause UDN_ACCESS exception */
-#define TILEGX_R_UDN2 61 /* UDN2 register, cause UDN_ACCESS exception */
-#define TILEGX_R_UDN3 62 /* UDN3 register, cause UDN_ACCESS exception */
-#define TILEGX_R_ZERO 63 /* Zero register, always zero */
-#define TILEGX_R_NOREG 255 /* Invalid register value */
-
-/* TILE-Gx special registers used by outside */
-enum {
- TILEGX_SPR_CMPEXCH = 0,
- TILEGX_SPR_CRITICAL_SEC = 1,
- TILEGX_SPR_SIM_CONTROL = 2,
- TILEGX_SPR_EX_CONTEXT_0_0 = 3,
- TILEGX_SPR_EX_CONTEXT_0_1 = 4,
- TILEGX_SPR_COUNT
-};
-
-/* Exception numbers */
-typedef enum {
- TILEGX_EXCP_NONE = 0,
- TILEGX_EXCP_SYSCALL = 1,
- TILEGX_EXCP_SIGNAL = 2,
- TILEGX_EXCP_OPCODE_UNKNOWN = 0x101,
- TILEGX_EXCP_OPCODE_UNIMPLEMENTED = 0x102,
- TILEGX_EXCP_OPCODE_CMPEXCH = 0x103,
- TILEGX_EXCP_OPCODE_CMPEXCH4 = 0x104,
- TILEGX_EXCP_OPCODE_EXCH = 0x105,
- TILEGX_EXCP_OPCODE_EXCH4 = 0x106,
- TILEGX_EXCP_OPCODE_FETCHADD = 0x107,
- TILEGX_EXCP_OPCODE_FETCHADD4 = 0x108,
- TILEGX_EXCP_OPCODE_FETCHADDGEZ = 0x109,
- TILEGX_EXCP_OPCODE_FETCHADDGEZ4 = 0x10a,
- TILEGX_EXCP_OPCODE_FETCHAND = 0x10b,
- TILEGX_EXCP_OPCODE_FETCHAND4 = 0x10c,
- TILEGX_EXCP_OPCODE_FETCHOR = 0x10d,
- TILEGX_EXCP_OPCODE_FETCHOR4 = 0x10e,
- TILEGX_EXCP_REG_IDN_ACCESS = 0x181,
- TILEGX_EXCP_REG_UDN_ACCESS = 0x182,
- TILEGX_EXCP_UNALIGNMENT = 0x201,
- TILEGX_EXCP_DBUG_BREAK = 0x301
-} TileExcp;
-
-typedef struct CPUTLGState {
- uint64_t regs[TILEGX_R_COUNT]; /* Common used registers by outside */
- uint64_t spregs[TILEGX_SPR_COUNT]; /* Special used registers by outside */
- uint64_t pc; /* Current pc */
-
-#if defined(CONFIG_USER_ONLY)
- uint64_t excaddr; /* exception address */
- uint64_t atomic_srca; /* Arguments to atomic "exceptions" */
- uint64_t atomic_srcb;
- uint32_t atomic_dstr;
- uint32_t signo; /* Signal number */
- uint32_t sigcode; /* Signal code */
-#endif
-
- /* Fields up to this point are cleared by a CPU reset */
- struct {} end_reset_fields;
-} CPUTLGState;
-
-#include "hw/core/cpu.h"
-
-#define TYPE_TILEGX_CPU "tilegx-cpu"
-
-OBJECT_DECLARE_TYPE(TileGXCPU, TileGXCPUClass,
- TILEGX_CPU)
-
-/**
- * TileGXCPUClass:
- * @parent_realize: The parent class' realize handler.
- * @parent_reset: The parent class' reset handler.
- *
- * A Tile-Gx CPU model.
- */
-struct TileGXCPUClass {
- /*< private >*/
- CPUClass parent_class;
- /*< public >*/
-
- DeviceRealize parent_realize;
- DeviceReset parent_reset;
-};
-
-/**
- * TileGXCPU:
- * @env: #CPUTLGState
- *
- * A Tile-GX CPU.
- */
-struct TileGXCPU {
- /*< private >*/
- CPUState parent_obj;
- /*< public >*/
-
- CPUNegativeOffsetState neg;
- CPUTLGState env;
-};
-
-
-/* TILE-Gx memory attributes */
-#define MMU_USER_IDX 0 /* Current memory operation is in user mode */
-
-typedef CPUTLGState CPUArchState;
-typedef TileGXCPU ArchCPU;
-
-#include "exec/cpu-all.h"
-
-void tilegx_tcg_init(void);
-int cpu_tilegx_signal_handler(int host_signum, void *pinfo, void *puc);
-
-#define CPU_RESOLVING_TYPE TYPE_TILEGX_CPU
-
-#define cpu_signal_handler cpu_tilegx_signal_handler
-
-static inline void cpu_get_tb_cpu_state(CPUTLGState *env, target_ulong *pc,
- target_ulong *cs_base, uint32_t *flags)
-{
- *pc = env->pc;
- *cs_base = 0;
- *flags = 0;
-}
-
-#endif
diff --git a/target/tilegx/helper.c b/target/tilegx/helper.c
deleted file mode 100644
index c006bf7454..0000000000
--- a/target/tilegx/helper.c
+++ /dev/null
@@ -1,147 +0,0 @@
-/*
- * QEMU TILE-Gx helpers
- *
- * Copyright (c) 2015 Chen Gang
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see
- * <http://www.gnu.org/licenses/lgpl-2.1.html>
- */
-
-#include "qemu/osdep.h"
-#include "cpu.h"
-#include "exec/exec-all.h"
-#include "exec/helper-proto.h"
-#include <zlib.h> /* For crc32 */
-#include "syscall_defs.h"
-
-void helper_exception(CPUTLGState *env, uint32_t excp)
-{
- CPUState *cs = env_cpu(env);
-
- cs->exception_index = excp;
- cpu_loop_exit(cs);
-}
-
-void helper_ext01_ics(CPUTLGState *env)
-{
- uint64_t val = env->spregs[TILEGX_SPR_EX_CONTEXT_0_1];
-
- switch (val) {
- case 0:
- case 1:
- env->spregs[TILEGX_SPR_CRITICAL_SEC] = val;
- break;
- default:
-#if defined(CONFIG_USER_ONLY)
- env->signo = TARGET_SIGILL;
- env->sigcode = TARGET_ILL_ILLOPC;
- helper_exception(env, TILEGX_EXCP_SIGNAL);
-#else
- helper_exception(env, TILEGX_EXCP_OPCODE_UNIMPLEMENTED);
-#endif
- break;
- }
-}
-
-uint64_t helper_revbits(uint64_t arg)
-{
- return revbit64(arg);
-}
-
-/*
- * Functional Description
- * uint64_t a = rf[SrcA];
- * uint64_t b = rf[SrcB];
- * uint64_t d = rf[Dest];
- * uint64_t output = 0;
- * unsigned int counter;
- * for (counter = 0; counter < (WORD_SIZE / BYTE_SIZE); counter++)
- * {
- * int sel = getByte (b, counter) & 0xf;
- * uint8_t byte = (sel < 8) ? getByte (d, sel) : getByte (a, (sel - 8));
- * output = setByte (output, counter, byte);
- * }
- * rf[Dest] = output;
- */
-uint64_t helper_shufflebytes(uint64_t dest, uint64_t srca, uint64_t srcb)
-{
- uint64_t vdst = 0;
- int count;
-
- for (count = 0; count < 64; count += 8) {
- uint64_t sel = srcb >> count;
- uint64_t src = (sel & 8) ? srca : dest;
- vdst |= extract64(src, (sel & 7) * 8, 8) << count;
- }
-
- return vdst;
-}
-
-uint64_t helper_crc32_8(uint64_t accum, uint64_t input)
-{
- uint8_t buf = input;
-
- /* zlib crc32 converts the accumulator and output to one's complement. */
- return crc32(accum ^ 0xffffffff, &buf, 1) ^ 0xffffffff;
-}
-
-uint64_t helper_crc32_32(uint64_t accum, uint64_t input)
-{
- uint8_t buf[4];
-
- stl_le_p(buf, input);
-
- /* zlib crc32 converts the accumulator and output to one's complement. */
- return crc32(accum ^ 0xffffffff, buf, 4) ^ 0xffffffff;
-}
-
-uint64_t helper_cmula(uint64_t srcd, uint64_t srca, uint64_t srcb)
-{
- uint32_t reala = (int16_t)srca;
- uint32_t imaga = (int16_t)(srca >> 16);
- uint32_t realb = (int16_t)srcb;
- uint32_t imagb = (int16_t)(srcb >> 16);
- uint32_t reald = srcd;
- uint32_t imagd = srcd >> 32;
- uint32_t realr = reala * realb - imaga * imagb + reald;
- uint32_t imagr = reala * imagb + imaga * realb + imagd;
-
- return deposit64(realr, 32, 32, imagr);
-}
-
-uint64_t helper_cmulaf(uint64_t srcd, uint64_t srca, uint64_t srcb)
-{
- uint32_t reala = (int16_t)srca;
- uint32_t imaga = (int16_t)(srca >> 16);
- uint32_t realb = (int16_t)srcb;
- uint32_t imagb = (int16_t)(srcb >> 16);
- uint32_t reald = (int16_t)srcd;
- uint32_t imagd = (int16_t)(srcd >> 16);
- int32_t realr = reala * realb - imaga * imagb;
- int32_t imagr = reala * imagb + imaga * realb;
-
- return deposit32((realr >> 15) + reald, 16, 16, (imagr >> 15) + imagd);
-}
-
-uint64_t helper_cmul2(uint64_t srca, uint64_t srcb, int shift, int round)
-{
- uint32_t reala = (int16_t)srca;
- uint32_t imaga = (int16_t)(srca >> 16);
- uint32_t realb = (int16_t)srcb;
- uint32_t imagb = (int16_t)(srcb >> 16);
- int32_t realr = reala * realb - imaga * imagb + round;
- int32_t imagr = reala * imagb + imaga * realb + round;
-
- return deposit32(realr >> shift, 16, 16, imagr >> shift);
-}
diff --git a/target/tilegx/helper.h b/target/tilegx/helper.h
deleted file mode 100644
index 16745c266f..0000000000
--- a/target/tilegx/helper.h
+++ /dev/null
@@ -1,23 +0,0 @@
-DEF_HELPER_2(exception, noreturn, env, i32)
-DEF_HELPER_1(ext01_ics, void, env)
-DEF_HELPER_FLAGS_1(revbits, TCG_CALL_NO_RWG_SE, i64, i64)
-DEF_HELPER_FLAGS_3(shufflebytes, TCG_CALL_NO_RWG_SE, i64, i64, i64, i64)
-DEF_HELPER_FLAGS_2(crc32_8, TCG_CALL_NO_RWG_SE, i64, i64, i64)
-DEF_HELPER_FLAGS_2(crc32_32, TCG_CALL_NO_RWG_SE, i64, i64, i64)
-DEF_HELPER_FLAGS_3(cmula, TCG_CALL_NO_RWG_SE, i64, i64, i64, i64)
-DEF_HELPER_FLAGS_3(cmulaf, TCG_CALL_NO_RWG_SE, i64, i64, i64, i64)
-DEF_HELPER_FLAGS_4(cmul2, TCG_CALL_NO_RWG_SE, i64, i64, i64, int, int)
-
-DEF_HELPER_FLAGS_2(v1int_h, TCG_CALL_NO_RWG_SE, i64, i64, i64)
-DEF_HELPER_FLAGS_2(v1int_l, TCG_CALL_NO_RWG_SE, i64, i64, i64)
-DEF_HELPER_FLAGS_2(v2int_h, TCG_CALL_NO_RWG_SE, i64, i64, i64)
-DEF_HELPER_FLAGS_2(v2int_l, TCG_CALL_NO_RWG_SE, i64, i64, i64)
-
-DEF_HELPER_FLAGS_2(v1multu, TCG_CALL_NO_RWG_SE, i64, i64, i64)
-DEF_HELPER_FLAGS_2(v2mults, TCG_CALL_NO_RWG_SE, i64, i64, i64)
-DEF_HELPER_FLAGS_2(v1shl, TCG_CALL_NO_RWG_SE, i64, i64, i64)
-DEF_HELPER_FLAGS_2(v1shru, TCG_CALL_NO_RWG_SE, i64, i64, i64)
-DEF_HELPER_FLAGS_2(v1shrs, TCG_CALL_NO_RWG_SE, i64, i64, i64)
-DEF_HELPER_FLAGS_2(v2shl, TCG_CALL_NO_RWG_SE, i64, i64, i64)
-DEF_HELPER_FLAGS_2(v2shru, TCG_CALL_NO_RWG_SE, i64, i64, i64)
-DEF_HELPER_FLAGS_2(v2shrs, TCG_CALL_NO_RWG_SE, i64, i64, i64)
diff --git a/target/tilegx/meson.build b/target/tilegx/meson.build
deleted file mode 100644
index 678590439c..0000000000
--- a/target/tilegx/meson.build
+++ /dev/null
@@ -1,13 +0,0 @@
-tilegx_ss = ss.source_set()
-tilegx_ss.add(files(
- 'cpu.c',
- 'helper.c',
- 'simd_helper.c',
- 'translate.c',
-))
-tilegx_ss.add(zlib)
-
-tilegx_softmmu_ss = ss.source_set()
-
-target_arch += {'tilegx': tilegx_ss}
-target_softmmu_arch += {'tilegx': tilegx_softmmu_ss}
diff --git a/target/tilegx/opcode_tilegx.h b/target/tilegx/opcode_tilegx.h
deleted file mode 100644
index 55376be4cf..0000000000
--- a/target/tilegx/opcode_tilegx.h
+++ /dev/null
@@ -1,1406 +0,0 @@
-/* TILE-Gx opcode information.
- *
- * Copyright 2011 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- *
- *
- *
- *
- */
-
-#ifndef OPCODE_TILEGX_H
-#define OPCODE_TILEGX_H
-
-#ifndef __ASSEMBLER__
-
-typedef uint64_t tilegx_bundle_bits;
-
-/* These are the bits that determine if a bundle is in the X encoding. */
-#define TILEGX_BUNDLE_MODE_MASK ((tilegx_bundle_bits)3 << 62)
-
-enum
-{
- /* Maximum number of instructions in a bundle (2 for X, 3 for Y). */
- TILEGX_MAX_INSTRUCTIONS_PER_BUNDLE = 3,
-
- /* How many different pipeline encodings are there? X0, X1, Y0, Y1, Y2. */
- TILEGX_NUM_PIPELINE_ENCODINGS = 5,
-
- /* Log base 2 of TILEGX_BUNDLE_SIZE_IN_BYTES. */
- TILEGX_LOG2_BUNDLE_SIZE_IN_BYTES = 3,
-
- /* Instructions take this many bytes. */
- TILEGX_BUNDLE_SIZE_IN_BYTES = 1 << TILEGX_LOG2_BUNDLE_SIZE_IN_BYTES,
-
- /* Log base 2 of TILEGX_BUNDLE_ALIGNMENT_IN_BYTES. */
- TILEGX_LOG2_BUNDLE_ALIGNMENT_IN_BYTES = 3,
-
- /* Bundles should be aligned modulo this number of bytes. */
- TILEGX_BUNDLE_ALIGNMENT_IN_BYTES =
- (1 << TILEGX_LOG2_BUNDLE_ALIGNMENT_IN_BYTES),
-
- /* Number of registers (some are magic, such as network I/O). */
- TILEGX_NUM_REGISTERS = 64,
-};
-
-/* Make a few "tile_" variables to simplify common code between
- architectures. */
-
-typedef tilegx_bundle_bits tile_bundle_bits;
-#define TILE_BUNDLE_SIZE_IN_BYTES TILEGX_BUNDLE_SIZE_IN_BYTES
-#define TILE_BUNDLE_ALIGNMENT_IN_BYTES TILEGX_BUNDLE_ALIGNMENT_IN_BYTES
-#define TILE_LOG2_BUNDLE_ALIGNMENT_IN_BYTES \
- TILEGX_LOG2_BUNDLE_ALIGNMENT_IN_BYTES
-#define TILE_BPT_BUNDLE TILEGX_BPT_BUNDLE
-
-/* 64-bit pattern for a { bpt ; nop } bundle. */
-#define TILEGX_BPT_BUNDLE 0x286a44ae51485000ULL
-
-static inline unsigned int
-get_BFEnd_X0(tilegx_bundle_bits num)
-{
- const unsigned int n = (unsigned int)num;
- return (((n >> 12)) & 0x3f);
-}
-
-static inline unsigned int
-get_BFOpcodeExtension_X0(tilegx_bundle_bits num)
-{
- const unsigned int n = (unsigned int)num;
- return (((n >> 24)) & 0xf);
-}
-
-static inline unsigned int
-get_BFStart_X0(tilegx_bundle_bits num)
-{
- const unsigned int n = (unsigned int)num;
- return (((n >> 18)) & 0x3f);
-}
-
-static inline unsigned int
-get_BrOff_X1(tilegx_bundle_bits n)
-{
- return (((unsigned int)(n >> 31)) & 0x0000003f) |
- (((unsigned int)(n >> 37)) & 0x0001ffc0);
-}
-
-static inline unsigned int
-get_BrType_X1(tilegx_bundle_bits n)
-{
- return (((unsigned int)(n >> 54)) & 0x1f);
-}
-
-static inline unsigned int
-get_Dest_Imm8_X1(tilegx_bundle_bits n)
-{
- return (((unsigned int)(n >> 31)) & 0x0000003f) |
- (((unsigned int)(n >> 43)) & 0x000000c0);
-}
-
-static inline unsigned int
-get_Dest_X0(tilegx_bundle_bits num)
-{
- const unsigned int n = (unsigned int)num;
- return (((n >> 0)) & 0x3f);
-}
-
-static inline unsigned int
-get_Dest_X1(tilegx_bundle_bits n)
-{
- return (((unsigned int)(n >> 31)) & 0x3f);
-}
-
-static inline unsigned int
-get_Dest_Y0(tilegx_bundle_bits num)
-{
- const unsigned int n = (unsigned int)num;
- return (((n >> 0)) & 0x3f);
-}
-
-static inline unsigned int
-get_Dest_Y1(tilegx_bundle_bits n)
-{
- return (((unsigned int)(n >> 31)) & 0x3f);
-}
-
-static inline unsigned int
-get_Imm16_X0(tilegx_bundle_bits num)
-{
- const unsigned int n = (unsigned int)num;
- return (((n >> 12)) & 0xffff);
-}
-
-static inline unsigned int
-get_Imm16_X1(tilegx_bundle_bits n)
-{
- return (((unsigned int)(n >> 43)) & 0xffff);
-}
-
-static inline unsigned int
-get_Imm8OpcodeExtension_X0(tilegx_bundle_bits num)
-{
- const unsigned int n = (unsigned int)num;
- return (((n >> 20)) & 0xff);
-}
-
-static inline unsigned int
-get_Imm8OpcodeExtension_X1(tilegx_bundle_bits n)
-{
- return (((unsigned int)(n >> 51)) & 0xff);
-}
-
-static inline unsigned int
-get_Imm8_X0(tilegx_bundle_bits num)
-{
- const unsigned int n = (unsigned int)num;
- return (((n >> 12)) & 0xff);
-}
-
-static inline unsigned int
-get_Imm8_X1(tilegx_bundle_bits n)
-{
- return (((unsigned int)(n >> 43)) & 0xff);
-}
-
-static inline unsigned int
-get_Imm8_Y0(tilegx_bundle_bits num)
-{
- const unsigned int n = (unsigned int)num;
- return (((n >> 12)) & 0xff);
-}
-
-static inline unsigned int
-get_Imm8_Y1(tilegx_bundle_bits n)
-{
- return (((unsigned int)(n >> 43)) & 0xff);
-}
-
-static inline unsigned int
-get_JumpOff_X1(tilegx_bundle_bits n)
-{
- return (((unsigned int)(n >> 31)) & 0x7ffffff);
-}
-
-static inline unsigned int
-get_JumpOpcodeExtension_X1(tilegx_bundle_bits n)
-{
- return (((unsigned int)(n >> 58)) & 0x1);
-}
-
-static inline unsigned int
-get_MF_Imm14_X1(tilegx_bundle_bits n)
-{
- return (((unsigned int)(n >> 37)) & 0x3fff);
-}
-
-static inline unsigned int
-get_MT_Imm14_X1(tilegx_bundle_bits n)
-{
- return (((unsigned int)(n >> 31)) & 0x0000003f) |
- (((unsigned int)(n >> 37)) & 0x00003fc0);
-}
-
-static inline unsigned int
-get_Mode(tilegx_bundle_bits n)
-{
- return (((unsigned int)(n >> 62)) & 0x3);
-}
-
-static inline unsigned int
-get_Opcode_X0(tilegx_bundle_bits num)
-{
- const unsigned int n = (unsigned int)num;
- return (((n >> 28)) & 0x7);
-}
-
-static inline unsigned int
-get_Opcode_X1(tilegx_bundle_bits n)
-{
- return (((unsigned int)(n >> 59)) & 0x7);
-}
-
-static inline unsigned int
-get_Opcode_Y0(tilegx_bundle_bits num)
-{
- const unsigned int n = (unsigned int)num;
- return (((n >> 27)) & 0xf);
-}
-
-static inline unsigned int
-get_Opcode_Y1(tilegx_bundle_bits n)
-{
- return (((unsigned int)(n >> 58)) & 0xf);
-}
-
-static inline unsigned int
-get_Opcode_Y2(tilegx_bundle_bits n)
-{
- return (((n >> 26)) & 0x00000001) |
- (((unsigned int)(n >> 56)) & 0x00000002);
-}
-
-static inline unsigned int
-get_RRROpcodeExtension_X0(tilegx_bundle_bits num)
-{
- const unsigned int n = (unsigned int)num;
- return (((n >> 18)) & 0x3ff);
-}
-
-static inline unsigned int
-get_RRROpcodeExtension_X1(tilegx_bundle_bits n)
-{
- return (((unsigned int)(n >> 49)) & 0x3ff);
-}
-
-static inline unsigned int
-get_RRROpcodeExtension_Y0(tilegx_bundle_bits num)
-{
- const unsigned int n = (unsigned int)num;
- return (((n >> 18)) & 0x3);
-}
-
-static inline unsigned int
-get_RRROpcodeExtension_Y1(tilegx_bundle_bits n)
-{
- return (((unsigned int)(n >> 49)) & 0x3);
-}
-
-static inline unsigned int
-get_ShAmt_X0(tilegx_bundle_bits num)
-{
- const unsigned int n = (unsigned int)num;
- return (((n >> 12)) & 0x3f);
-}
-
-static inline unsigned int
-get_ShAmt_X1(tilegx_bundle_bits n)
-{
- return (((unsigned int)(n >> 43)) & 0x3f);
-}
-
-static inline unsigned int
-get_ShAmt_Y0(tilegx_bundle_bits num)
-{
- const unsigned int n = (unsigned int)num;
- return (((n >> 12)) & 0x3f);
-}
-
-static inline unsigned int
-get_ShAmt_Y1(tilegx_bundle_bits n)
-{
- return (((unsigned int)(n >> 43)) & 0x3f);
-}
-
-static inline unsigned int
-get_ShiftOpcodeExtension_X0(tilegx_bundle_bits num)
-{
- const unsigned int n = (unsigned int)num;
- return (((n >> 18)) & 0x3ff);
-}
-
-static inline unsigned int
-get_ShiftOpcodeExtension_X1(tilegx_bundle_bits n)
-{
- return (((unsigned int)(n >> 49)) & 0x3ff);
-}
-
-static inline unsigned int
-get_ShiftOpcodeExtension_Y0(tilegx_bundle_bits num)
-{
- const unsigned int n = (unsigned int)num;
- return (((n >> 18)) & 0x3);
-}
-
-static inline unsigned int
-get_ShiftOpcodeExtension_Y1(tilegx_bundle_bits n)
-{
- return (((unsigned int)(n >> 49)) & 0x3);
-}
-
-static inline unsigned int
-get_SrcA_X0(tilegx_bundle_bits num)
-{
- const unsigned int n = (unsigned int)num;
- return (((n >> 6)) & 0x3f);
-}
-
-static inline unsigned int
-get_SrcA_X1(tilegx_bundle_bits n)
-{
- return (((unsigned int)(n >> 37)) & 0x3f);
-}
-
-static inline unsigned int
-get_SrcA_Y0(tilegx_bundle_bits num)
-{
- const unsigned int n = (unsigned int)num;
- return (((n >> 6)) & 0x3f);
-}
-
-static inline unsigned int
-get_SrcA_Y1(tilegx_bundle_bits n)
-{
- return (((unsigned int)(n >> 37)) & 0x3f);
-}
-
-static inline unsigned int
-get_SrcA_Y2(tilegx_bundle_bits num)
-{
- const unsigned int n = (unsigned int)num;
- return (((n >> 20)) & 0x3f);
-}
-
-static inline unsigned int
-get_SrcBDest_Y2(tilegx_bundle_bits n)
-{
- return (((unsigned int)(n >> 51)) & 0x3f);
-}
-
-static inline unsigned int
-get_SrcB_X0(tilegx_bundle_bits num)
-{
- const unsigned int n = (unsigned int)num;
- return (((n >> 12)) & 0x3f);
-}
-
-static inline unsigned int
-get_SrcB_X1(tilegx_bundle_bits n)
-{
- return (((unsigned int)(n >> 43)) & 0x3f);
-}
-
-static inline unsigned int
-get_SrcB_Y0(tilegx_bundle_bits num)
-{
- const unsigned int n = (unsigned int)num;
- return (((n >> 12)) & 0x3f);
-}
-
-static inline unsigned int
-get_SrcB_Y1(tilegx_bundle_bits n)
-{
- return (((unsigned int)(n >> 43)) & 0x3f);
-}
-
-static inline unsigned int
-get_UnaryOpcodeExtension_X0(tilegx_bundle_bits num)
-{
- const unsigned int n = (unsigned int)num;
- return (((n >> 12)) & 0x3f);
-}
-
-static inline unsigned int
-get_UnaryOpcodeExtension_X1(tilegx_bundle_bits n)
-{
- return (((unsigned int)(n >> 43)) & 0x3f);
-}
-
-static inline unsigned int
-get_UnaryOpcodeExtension_Y0(tilegx_bundle_bits num)
-{
- const unsigned int n = (unsigned int)num;
- return (((n >> 12)) & 0x3f);
-}
-
-static inline unsigned int
-get_UnaryOpcodeExtension_Y1(tilegx_bundle_bits n)
-{
- return (((unsigned int)(n >> 43)) & 0x3f);
-}
-
-
-static inline int
-sign_extend(int n, int num_bits)
-{
- int shift = (int)(sizeof(int) * 8 - num_bits);
- return (n << shift) >> shift;
-}
-
-
-
-static inline tilegx_bundle_bits
-create_BFEnd_X0(int num)
-{
- const unsigned int n = (unsigned int)num;
- return ((n & 0x3f) << 12);
-}
-
-static inline tilegx_bundle_bits
-create_BFOpcodeExtension_X0(int num)
-{
- const unsigned int n = (unsigned int)num;
- return ((n & 0xf) << 24);
-}
-
-static inline tilegx_bundle_bits
-create_BFStart_X0(int num)
-{
- const unsigned int n = (unsigned int)num;
- return ((n & 0x3f) << 18);
-}
-
-static inline tilegx_bundle_bits
-create_BrOff_X1(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilegx_bundle_bits)(n & 0x0000003f)) << 31) |
- (((tilegx_bundle_bits)(n & 0x0001ffc0)) << 37);
-}
-
-static inline tilegx_bundle_bits
-create_BrType_X1(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilegx_bundle_bits)(n & 0x1f)) << 54);
-}
-
-static inline tilegx_bundle_bits
-create_Dest_Imm8_X1(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilegx_bundle_bits)(n & 0x0000003f)) << 31) |
- (((tilegx_bundle_bits)(n & 0x000000c0)) << 43);
-}
-
-static inline tilegx_bundle_bits
-create_Dest_X0(int num)
-{
- const unsigned int n = (unsigned int)num;
- return ((n & 0x3f) << 0);
-}
-
-static inline tilegx_bundle_bits
-create_Dest_X1(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilegx_bundle_bits)(n & 0x3f)) << 31);
-}
-
-static inline tilegx_bundle_bits
-create_Dest_Y0(int num)
-{
- const unsigned int n = (unsigned int)num;
- return ((n & 0x3f) << 0);
-}
-
-static inline tilegx_bundle_bits
-create_Dest_Y1(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilegx_bundle_bits)(n & 0x3f)) << 31);
-}
-
-static inline tilegx_bundle_bits
-create_Imm16_X0(int num)
-{
- const unsigned int n = (unsigned int)num;
- return ((n & 0xffff) << 12);
-}
-
-static inline tilegx_bundle_bits
-create_Imm16_X1(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilegx_bundle_bits)(n & 0xffff)) << 43);
-}
-
-static inline tilegx_bundle_bits
-create_Imm8OpcodeExtension_X0(int num)
-{
- const unsigned int n = (unsigned int)num;
- return ((n & 0xff) << 20);
-}
-
-static inline tilegx_bundle_bits
-create_Imm8OpcodeExtension_X1(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilegx_bundle_bits)(n & 0xff)) << 51);
-}
-
-static inline tilegx_bundle_bits
-create_Imm8_X0(int num)
-{
- const unsigned int n = (unsigned int)num;
- return ((n & 0xff) << 12);
-}
-
-static inline tilegx_bundle_bits
-create_Imm8_X1(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilegx_bundle_bits)(n & 0xff)) << 43);
-}
-
-static inline tilegx_bundle_bits
-create_Imm8_Y0(int num)
-{
- const unsigned int n = (unsigned int)num;
- return ((n & 0xff) << 12);
-}
-
-static inline tilegx_bundle_bits
-create_Imm8_Y1(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilegx_bundle_bits)(n & 0xff)) << 43);
-}
-
-static inline tilegx_bundle_bits
-create_JumpOff_X1(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilegx_bundle_bits)(n & 0x7ffffff)) << 31);
-}
-
-static inline tilegx_bundle_bits
-create_JumpOpcodeExtension_X1(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilegx_bundle_bits)(n & 0x1)) << 58);
-}
-
-static inline tilegx_bundle_bits
-create_MF_Imm14_X1(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilegx_bundle_bits)(n & 0x3fff)) << 37);
-}
-
-static inline tilegx_bundle_bits
-create_MT_Imm14_X1(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilegx_bundle_bits)(n & 0x0000003f)) << 31) |
- (((tilegx_bundle_bits)(n & 0x00003fc0)) << 37);
-}
-
-static inline tilegx_bundle_bits
-create_Mode(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilegx_bundle_bits)(n & 0x3)) << 62);
-}
-
-static inline tilegx_bundle_bits
-create_Opcode_X0(int num)
-{
- const unsigned int n = (unsigned int)num;
- return ((n & 0x7) << 28);
-}
-
-static inline tilegx_bundle_bits
-create_Opcode_X1(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilegx_bundle_bits)(n & 0x7)) << 59);
-}
-
-static inline tilegx_bundle_bits
-create_Opcode_Y0(int num)
-{
- const unsigned int n = (unsigned int)num;
- return ((n & 0xf) << 27);
-}
-
-static inline tilegx_bundle_bits
-create_Opcode_Y1(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilegx_bundle_bits)(n & 0xf)) << 58);
-}
-
-static inline tilegx_bundle_bits
-create_Opcode_Y2(int num)
-{
- const unsigned int n = (unsigned int)num;
- return ((n & 0x00000001) << 26) |
- (((tilegx_bundle_bits)(n & 0x00000002)) << 56);
-}
-
-static inline tilegx_bundle_bits
-create_RRROpcodeExtension_X0(int num)
-{
- const unsigned int n = (unsigned int)num;
- return ((n & 0x3ff) << 18);
-}
-
-static inline tilegx_bundle_bits
-create_RRROpcodeExtension_X1(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilegx_bundle_bits)(n & 0x3ff)) << 49);
-}
-
-static inline tilegx_bundle_bits
-create_RRROpcodeExtension_Y0(int num)
-{
- const unsigned int n = (unsigned int)num;
- return ((n & 0x3) << 18);
-}
-
-static inline tilegx_bundle_bits
-create_RRROpcodeExtension_Y1(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilegx_bundle_bits)(n & 0x3)) << 49);
-}
-
-static inline tilegx_bundle_bits
-create_ShAmt_X0(int num)
-{
- const unsigned int n = (unsigned int)num;
- return ((n & 0x3f) << 12);
-}
-
-static inline tilegx_bundle_bits
-create_ShAmt_X1(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilegx_bundle_bits)(n & 0x3f)) << 43);
-}
-
-static inline tilegx_bundle_bits
-create_ShAmt_Y0(int num)
-{
- const unsigned int n = (unsigned int)num;
- return ((n & 0x3f) << 12);
-}
-
-static inline tilegx_bundle_bits
-create_ShAmt_Y1(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilegx_bundle_bits)(n & 0x3f)) << 43);
-}
-
-static inline tilegx_bundle_bits
-create_ShiftOpcodeExtension_X0(int num)
-{
- const unsigned int n = (unsigned int)num;
- return ((n & 0x3ff) << 18);
-}
-
-static inline tilegx_bundle_bits
-create_ShiftOpcodeExtension_X1(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilegx_bundle_bits)(n & 0x3ff)) << 49);
-}
-
-static inline tilegx_bundle_bits
-create_ShiftOpcodeExtension_Y0(int num)
-{
- const unsigned int n = (unsigned int)num;
- return ((n & 0x3) << 18);
-}
-
-static inline tilegx_bundle_bits
-create_ShiftOpcodeExtension_Y1(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilegx_bundle_bits)(n & 0x3)) << 49);
-}
-
-static inline tilegx_bundle_bits
-create_SrcA_X0(int num)
-{
- const unsigned int n = (unsigned int)num;
- return ((n & 0x3f) << 6);
-}
-
-static inline tilegx_bundle_bits
-create_SrcA_X1(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilegx_bundle_bits)(n & 0x3f)) << 37);
-}
-
-static inline tilegx_bundle_bits
-create_SrcA_Y0(int num)
-{
- const unsigned int n = (unsigned int)num;
- return ((n & 0x3f) << 6);
-}
-
-static inline tilegx_bundle_bits
-create_SrcA_Y1(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilegx_bundle_bits)(n & 0x3f)) << 37);
-}
-
-static inline tilegx_bundle_bits
-create_SrcA_Y2(int num)
-{
- const unsigned int n = (unsigned int)num;
- return ((n & 0x3f) << 20);
-}
-
-static inline tilegx_bundle_bits
-create_SrcBDest_Y2(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilegx_bundle_bits)(n & 0x3f)) << 51);
-}
-
-static inline tilegx_bundle_bits
-create_SrcB_X0(int num)
-{
- const unsigned int n = (unsigned int)num;
- return ((n & 0x3f) << 12);
-}
-
-static inline tilegx_bundle_bits
-create_SrcB_X1(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilegx_bundle_bits)(n & 0x3f)) << 43);
-}
-
-static inline tilegx_bundle_bits
-create_SrcB_Y0(int num)
-{
- const unsigned int n = (unsigned int)num;
- return ((n & 0x3f) << 12);
-}
-
-static inline tilegx_bundle_bits
-create_SrcB_Y1(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilegx_bundle_bits)(n & 0x3f)) << 43);
-}
-
-static inline tilegx_bundle_bits
-create_UnaryOpcodeExtension_X0(int num)
-{
- const unsigned int n = (unsigned int)num;
- return ((n & 0x3f) << 12);
-}
-
-static inline tilegx_bundle_bits
-create_UnaryOpcodeExtension_X1(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilegx_bundle_bits)(n & 0x3f)) << 43);
-}
-
-static inline tilegx_bundle_bits
-create_UnaryOpcodeExtension_Y0(int num)
-{
- const unsigned int n = (unsigned int)num;
- return ((n & 0x3f) << 12);
-}
-
-static inline tilegx_bundle_bits
-create_UnaryOpcodeExtension_Y1(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilegx_bundle_bits)(n & 0x3f)) << 43);
-}
-
-
-enum
-{
- ADDI_IMM8_OPCODE_X0 = 1,
- ADDI_IMM8_OPCODE_X1 = 1,
- ADDI_OPCODE_Y0 = 0,
- ADDI_OPCODE_Y1 = 1,
- ADDLI_OPCODE_X0 = 1,
- ADDLI_OPCODE_X1 = 0,
- ADDXI_IMM8_OPCODE_X0 = 2,
- ADDXI_IMM8_OPCODE_X1 = 2,
- ADDXI_OPCODE_Y0 = 1,
- ADDXI_OPCODE_Y1 = 2,
- ADDXLI_OPCODE_X0 = 2,
- ADDXLI_OPCODE_X1 = 1,
- ADDXSC_RRR_0_OPCODE_X0 = 1,
- ADDXSC_RRR_0_OPCODE_X1 = 1,
- ADDX_RRR_0_OPCODE_X0 = 2,
- ADDX_RRR_0_OPCODE_X1 = 2,
- ADDX_RRR_0_OPCODE_Y0 = 0,
- ADDX_RRR_0_OPCODE_Y1 = 0,
- ADD_RRR_0_OPCODE_X0 = 3,
- ADD_RRR_0_OPCODE_X1 = 3,
- ADD_RRR_0_OPCODE_Y0 = 1,
- ADD_RRR_0_OPCODE_Y1 = 1,
- ANDI_IMM8_OPCODE_X0 = 3,
- ANDI_IMM8_OPCODE_X1 = 3,
- ANDI_OPCODE_Y0 = 2,
- ANDI_OPCODE_Y1 = 3,
- AND_RRR_0_OPCODE_X0 = 4,
- AND_RRR_0_OPCODE_X1 = 4,
- AND_RRR_5_OPCODE_Y0 = 0,
- AND_RRR_5_OPCODE_Y1 = 0,
- BEQZT_BRANCH_OPCODE_X1 = 16,
- BEQZ_BRANCH_OPCODE_X1 = 17,
- BFEXTS_BF_OPCODE_X0 = 4,
- BFEXTU_BF_OPCODE_X0 = 5,
- BFINS_BF_OPCODE_X0 = 6,
- BF_OPCODE_X0 = 3,
- BGEZT_BRANCH_OPCODE_X1 = 18,
- BGEZ_BRANCH_OPCODE_X1 = 19,
- BGTZT_BRANCH_OPCODE_X1 = 20,
- BGTZ_BRANCH_OPCODE_X1 = 21,
- BLBCT_BRANCH_OPCODE_X1 = 22,
- BLBC_BRANCH_OPCODE_X1 = 23,
- BLBST_BRANCH_OPCODE_X1 = 24,
- BLBS_BRANCH_OPCODE_X1 = 25,
- BLEZT_BRANCH_OPCODE_X1 = 26,
- BLEZ_BRANCH_OPCODE_X1 = 27,
- BLTZT_BRANCH_OPCODE_X1 = 28,
- BLTZ_BRANCH_OPCODE_X1 = 29,
- BNEZT_BRANCH_OPCODE_X1 = 30,
- BNEZ_BRANCH_OPCODE_X1 = 31,
- BRANCH_OPCODE_X1 = 2,
- CMOVEQZ_RRR_0_OPCODE_X0 = 5,
- CMOVEQZ_RRR_4_OPCODE_Y0 = 0,
- CMOVNEZ_RRR_0_OPCODE_X0 = 6,
- CMOVNEZ_RRR_4_OPCODE_Y0 = 1,
- CMPEQI_IMM8_OPCODE_X0 = 4,
- CMPEQI_IMM8_OPCODE_X1 = 4,
- CMPEQI_OPCODE_Y0 = 3,
- CMPEQI_OPCODE_Y1 = 4,
- CMPEQ_RRR_0_OPCODE_X0 = 7,
- CMPEQ_RRR_0_OPCODE_X1 = 5,
- CMPEQ_RRR_3_OPCODE_Y0 = 0,
- CMPEQ_RRR_3_OPCODE_Y1 = 2,
- CMPEXCH4_RRR_0_OPCODE_X1 = 6,
- CMPEXCH_RRR_0_OPCODE_X1 = 7,
- CMPLES_RRR_0_OPCODE_X0 = 8,
- CMPLES_RRR_0_OPCODE_X1 = 8,
- CMPLES_RRR_2_OPCODE_Y0 = 0,
- CMPLES_RRR_2_OPCODE_Y1 = 0,
- CMPLEU_RRR_0_OPCODE_X0 = 9,
- CMPLEU_RRR_0_OPCODE_X1 = 9,
- CMPLEU_RRR_2_OPCODE_Y0 = 1,
- CMPLEU_RRR_2_OPCODE_Y1 = 1,
- CMPLTSI_IMM8_OPCODE_X0 = 5,
- CMPLTSI_IMM8_OPCODE_X1 = 5,
- CMPLTSI_OPCODE_Y0 = 4,
- CMPLTSI_OPCODE_Y1 = 5,
- CMPLTS_RRR_0_OPCODE_X0 = 10,
- CMPLTS_RRR_0_OPCODE_X1 = 10,
- CMPLTS_RRR_2_OPCODE_Y0 = 2,
- CMPLTS_RRR_2_OPCODE_Y1 = 2,
- CMPLTUI_IMM8_OPCODE_X0 = 6,
- CMPLTUI_IMM8_OPCODE_X1 = 6,
- CMPLTU_RRR_0_OPCODE_X0 = 11,
- CMPLTU_RRR_0_OPCODE_X1 = 11,
- CMPLTU_RRR_2_OPCODE_Y0 = 3,
- CMPLTU_RRR_2_OPCODE_Y1 = 3,
- CMPNE_RRR_0_OPCODE_X0 = 12,
- CMPNE_RRR_0_OPCODE_X1 = 12,
- CMPNE_RRR_3_OPCODE_Y0 = 1,
- CMPNE_RRR_3_OPCODE_Y1 = 3,
- CMULAF_RRR_0_OPCODE_X0 = 13,
- CMULA_RRR_0_OPCODE_X0 = 14,
- CMULFR_RRR_0_OPCODE_X0 = 15,
- CMULF_RRR_0_OPCODE_X0 = 16,
- CMULHR_RRR_0_OPCODE_X0 = 17,
- CMULH_RRR_0_OPCODE_X0 = 18,
- CMUL_RRR_0_OPCODE_X0 = 19,
- CNTLZ_UNARY_OPCODE_X0 = 1,
- CNTLZ_UNARY_OPCODE_Y0 = 1,
- CNTTZ_UNARY_OPCODE_X0 = 2,
- CNTTZ_UNARY_OPCODE_Y0 = 2,
- CRC32_32_RRR_0_OPCODE_X0 = 20,
- CRC32_8_RRR_0_OPCODE_X0 = 21,
- DBLALIGN2_RRR_0_OPCODE_X0 = 22,
- DBLALIGN2_RRR_0_OPCODE_X1 = 13,
- DBLALIGN4_RRR_0_OPCODE_X0 = 23,
- DBLALIGN4_RRR_0_OPCODE_X1 = 14,
- DBLALIGN6_RRR_0_OPCODE_X0 = 24,
- DBLALIGN6_RRR_0_OPCODE_X1 = 15,
- DBLALIGN_RRR_0_OPCODE_X0 = 25,
- DRAIN_UNARY_OPCODE_X1 = 1,
- DTLBPR_UNARY_OPCODE_X1 = 2,
- EXCH4_RRR_0_OPCODE_X1 = 16,
- EXCH_RRR_0_OPCODE_X1 = 17,
- FDOUBLE_ADDSUB_RRR_0_OPCODE_X0 = 26,
- FDOUBLE_ADD_FLAGS_RRR_0_OPCODE_X0 = 27,
- FDOUBLE_MUL_FLAGS_RRR_0_OPCODE_X0 = 28,
- FDOUBLE_PACK1_RRR_0_OPCODE_X0 = 29,
- FDOUBLE_PACK2_RRR_0_OPCODE_X0 = 30,
- FDOUBLE_SUB_FLAGS_RRR_0_OPCODE_X0 = 31,
- FDOUBLE_UNPACK_MAX_RRR_0_OPCODE_X0 = 32,
- FDOUBLE_UNPACK_MIN_RRR_0_OPCODE_X0 = 33,
- FETCHADD4_RRR_0_OPCODE_X1 = 18,
- FETCHADDGEZ4_RRR_0_OPCODE_X1 = 19,
- FETCHADDGEZ_RRR_0_OPCODE_X1 = 20,
- FETCHADD_RRR_0_OPCODE_X1 = 21,
- FETCHAND4_RRR_0_OPCODE_X1 = 22,
- FETCHAND_RRR_0_OPCODE_X1 = 23,
- FETCHOR4_RRR_0_OPCODE_X1 = 24,
- FETCHOR_RRR_0_OPCODE_X1 = 25,
- FINV_UNARY_OPCODE_X1 = 3,
- FLUSHWB_UNARY_OPCODE_X1 = 4,
- FLUSH_UNARY_OPCODE_X1 = 5,
- FNOP_UNARY_OPCODE_X0 = 3,
- FNOP_UNARY_OPCODE_X1 = 6,
- FNOP_UNARY_OPCODE_Y0 = 3,
- FNOP_UNARY_OPCODE_Y1 = 8,
- FSINGLE_ADD1_RRR_0_OPCODE_X0 = 34,
- FSINGLE_ADDSUB2_RRR_0_OPCODE_X0 = 35,
- FSINGLE_MUL1_RRR_0_OPCODE_X0 = 36,
- FSINGLE_MUL2_RRR_0_OPCODE_X0 = 37,
- FSINGLE_PACK1_UNARY_OPCODE_X0 = 4,
- FSINGLE_PACK1_UNARY_OPCODE_Y0 = 4,
- FSINGLE_PACK2_RRR_0_OPCODE_X0 = 38,
- FSINGLE_SUB1_RRR_0_OPCODE_X0 = 39,
- ICOH_UNARY_OPCODE_X1 = 7,
- ILL_UNARY_OPCODE_X1 = 8,
- ILL_UNARY_OPCODE_Y1 = 9,
- IMM8_OPCODE_X0 = 4,
- IMM8_OPCODE_X1 = 3,
- INV_UNARY_OPCODE_X1 = 9,
- IRET_UNARY_OPCODE_X1 = 10,
- JALRP_UNARY_OPCODE_X1 = 11,
- JALRP_UNARY_OPCODE_Y1 = 10,
- JALR_UNARY_OPCODE_X1 = 12,
- JALR_UNARY_OPCODE_Y1 = 11,
- JAL_JUMP_OPCODE_X1 = 0,
- JRP_UNARY_OPCODE_X1 = 13,
- JRP_UNARY_OPCODE_Y1 = 12,
- JR_UNARY_OPCODE_X1 = 14,
- JR_UNARY_OPCODE_Y1 = 13,
- JUMP_OPCODE_X1 = 4,
- J_JUMP_OPCODE_X1 = 1,
- LD1S_ADD_IMM8_OPCODE_X1 = 7,
- LD1S_OPCODE_Y2 = 0,
- LD1S_UNARY_OPCODE_X1 = 15,
- LD1U_ADD_IMM8_OPCODE_X1 = 8,
- LD1U_OPCODE_Y2 = 1,
- LD1U_UNARY_OPCODE_X1 = 16,
- LD2S_ADD_IMM8_OPCODE_X1 = 9,
- LD2S_OPCODE_Y2 = 2,
- LD2S_UNARY_OPCODE_X1 = 17,
- LD2U_ADD_IMM8_OPCODE_X1 = 10,
- LD2U_OPCODE_Y2 = 3,
- LD2U_UNARY_OPCODE_X1 = 18,
- LD4S_ADD_IMM8_OPCODE_X1 = 11,
- LD4S_OPCODE_Y2 = 1,
- LD4S_UNARY_OPCODE_X1 = 19,
- LD4U_ADD_IMM8_OPCODE_X1 = 12,
- LD4U_OPCODE_Y2 = 2,
- LD4U_UNARY_OPCODE_X1 = 20,
- LDNA_UNARY_OPCODE_X1 = 21,
- LDNT1S_ADD_IMM8_OPCODE_X1 = 13,
- LDNT1S_UNARY_OPCODE_X1 = 22,
- LDNT1U_ADD_IMM8_OPCODE_X1 = 14,
- LDNT1U_UNARY_OPCODE_X1 = 23,
- LDNT2S_ADD_IMM8_OPCODE_X1 = 15,
- LDNT2S_UNARY_OPCODE_X1 = 24,
- LDNT2U_ADD_IMM8_OPCODE_X1 = 16,
- LDNT2U_UNARY_OPCODE_X1 = 25,
- LDNT4S_ADD_IMM8_OPCODE_X1 = 17,
- LDNT4S_UNARY_OPCODE_X1 = 26,
- LDNT4U_ADD_IMM8_OPCODE_X1 = 18,
- LDNT4U_UNARY_OPCODE_X1 = 27,
- LDNT_ADD_IMM8_OPCODE_X1 = 19,
- LDNT_UNARY_OPCODE_X1 = 28,
- LD_ADD_IMM8_OPCODE_X1 = 20,
- LD_OPCODE_Y2 = 3,
- LD_UNARY_OPCODE_X1 = 29,
- LNK_UNARY_OPCODE_X1 = 30,
- LNK_UNARY_OPCODE_Y1 = 14,
- LDNA_ADD_IMM8_OPCODE_X1 = 21,
- MFSPR_IMM8_OPCODE_X1 = 22,
- MF_UNARY_OPCODE_X1 = 31,
- MM_BF_OPCODE_X0 = 7,
- MNZ_RRR_0_OPCODE_X0 = 40,
- MNZ_RRR_0_OPCODE_X1 = 26,
- MNZ_RRR_4_OPCODE_Y0 = 2,
- MNZ_RRR_4_OPCODE_Y1 = 2,
- MODE_OPCODE_YA2 = 1,
- MODE_OPCODE_YB2 = 2,
- MODE_OPCODE_YC2 = 3,
- MTSPR_IMM8_OPCODE_X1 = 23,
- MULAX_RRR_0_OPCODE_X0 = 41,
- MULAX_RRR_3_OPCODE_Y0 = 2,
- MULA_HS_HS_RRR_0_OPCODE_X0 = 42,
- MULA_HS_HS_RRR_9_OPCODE_Y0 = 0,
- MULA_HS_HU_RRR_0_OPCODE_X0 = 43,
- MULA_HS_LS_RRR_0_OPCODE_X0 = 44,
- MULA_HS_LU_RRR_0_OPCODE_X0 = 45,
- MULA_HU_HU_RRR_0_OPCODE_X0 = 46,
- MULA_HU_HU_RRR_9_OPCODE_Y0 = 1,
- MULA_HU_LS_RRR_0_OPCODE_X0 = 47,
- MULA_HU_LU_RRR_0_OPCODE_X0 = 48,
- MULA_LS_LS_RRR_0_OPCODE_X0 = 49,
- MULA_LS_LS_RRR_9_OPCODE_Y0 = 2,
- MULA_LS_LU_RRR_0_OPCODE_X0 = 50,
- MULA_LU_LU_RRR_0_OPCODE_X0 = 51,
- MULA_LU_LU_RRR_9_OPCODE_Y0 = 3,
- MULX_RRR_0_OPCODE_X0 = 52,
- MULX_RRR_3_OPCODE_Y0 = 3,
- MUL_HS_HS_RRR_0_OPCODE_X0 = 53,
- MUL_HS_HS_RRR_8_OPCODE_Y0 = 0,
- MUL_HS_HU_RRR_0_OPCODE_X0 = 54,
- MUL_HS_LS_RRR_0_OPCODE_X0 = 55,
- MUL_HS_LU_RRR_0_OPCODE_X0 = 56,
- MUL_HU_HU_RRR_0_OPCODE_X0 = 57,
- MUL_HU_HU_RRR_8_OPCODE_Y0 = 1,
- MUL_HU_LS_RRR_0_OPCODE_X0 = 58,
- MUL_HU_LU_RRR_0_OPCODE_X0 = 59,
- MUL_LS_LS_RRR_0_OPCODE_X0 = 60,
- MUL_LS_LS_RRR_8_OPCODE_Y0 = 2,
- MUL_LS_LU_RRR_0_OPCODE_X0 = 61,
- MUL_LU_LU_RRR_0_OPCODE_X0 = 62,
- MUL_LU_LU_RRR_8_OPCODE_Y0 = 3,
- MZ_RRR_0_OPCODE_X0 = 63,
- MZ_RRR_0_OPCODE_X1 = 27,
- MZ_RRR_4_OPCODE_Y0 = 3,
- MZ_RRR_4_OPCODE_Y1 = 3,
- NAP_UNARY_OPCODE_X1 = 32,
- NOP_UNARY_OPCODE_X0 = 5,
- NOP_UNARY_OPCODE_X1 = 33,
- NOP_UNARY_OPCODE_Y0 = 5,
- NOP_UNARY_OPCODE_Y1 = 15,
- NOR_RRR_0_OPCODE_X0 = 64,
- NOR_RRR_0_OPCODE_X1 = 28,
- NOR_RRR_5_OPCODE_Y0 = 1,
- NOR_RRR_5_OPCODE_Y1 = 1,
- ORI_IMM8_OPCODE_X0 = 7,
- ORI_IMM8_OPCODE_X1 = 24,
- OR_RRR_0_OPCODE_X0 = 65,
- OR_RRR_0_OPCODE_X1 = 29,
- OR_RRR_5_OPCODE_Y0 = 2,
- OR_RRR_5_OPCODE_Y1 = 2,
- PCNT_UNARY_OPCODE_X0 = 6,
- PCNT_UNARY_OPCODE_Y0 = 6,
- REVBITS_UNARY_OPCODE_X0 = 7,
- REVBITS_UNARY_OPCODE_Y0 = 7,
- REVBYTES_UNARY_OPCODE_X0 = 8,
- REVBYTES_UNARY_OPCODE_Y0 = 8,
- ROTLI_SHIFT_OPCODE_X0 = 1,
- ROTLI_SHIFT_OPCODE_X1 = 1,
- ROTLI_SHIFT_OPCODE_Y0 = 0,
- ROTLI_SHIFT_OPCODE_Y1 = 0,
- ROTL_RRR_0_OPCODE_X0 = 66,
- ROTL_RRR_0_OPCODE_X1 = 30,
- ROTL_RRR_6_OPCODE_Y0 = 0,
- ROTL_RRR_6_OPCODE_Y1 = 0,
- RRR_0_OPCODE_X0 = 5,
- RRR_0_OPCODE_X1 = 5,
- RRR_0_OPCODE_Y0 = 5,
- RRR_0_OPCODE_Y1 = 6,
- RRR_1_OPCODE_Y0 = 6,
- RRR_1_OPCODE_Y1 = 7,
- RRR_2_OPCODE_Y0 = 7,
- RRR_2_OPCODE_Y1 = 8,
- RRR_3_OPCODE_Y0 = 8,
- RRR_3_OPCODE_Y1 = 9,
- RRR_4_OPCODE_Y0 = 9,
- RRR_4_OPCODE_Y1 = 10,
- RRR_5_OPCODE_Y0 = 10,
- RRR_5_OPCODE_Y1 = 11,
- RRR_6_OPCODE_Y0 = 11,
- RRR_6_OPCODE_Y1 = 12,
- RRR_7_OPCODE_Y0 = 12,
- RRR_7_OPCODE_Y1 = 13,
- RRR_8_OPCODE_Y0 = 13,
- RRR_9_OPCODE_Y0 = 14,
- SHIFT_OPCODE_X0 = 6,
- SHIFT_OPCODE_X1 = 6,
- SHIFT_OPCODE_Y0 = 15,
- SHIFT_OPCODE_Y1 = 14,
- SHL16INSLI_OPCODE_X0 = 7,
- SHL16INSLI_OPCODE_X1 = 7,
- SHL1ADDX_RRR_0_OPCODE_X0 = 67,
- SHL1ADDX_RRR_0_OPCODE_X1 = 31,
- SHL1ADDX_RRR_7_OPCODE_Y0 = 1,
- SHL1ADDX_RRR_7_OPCODE_Y1 = 1,
- SHL1ADD_RRR_0_OPCODE_X0 = 68,
- SHL1ADD_RRR_0_OPCODE_X1 = 32,
- SHL1ADD_RRR_1_OPCODE_Y0 = 0,
- SHL1ADD_RRR_1_OPCODE_Y1 = 0,
- SHL2ADDX_RRR_0_OPCODE_X0 = 69,
- SHL2ADDX_RRR_0_OPCODE_X1 = 33,
- SHL2ADDX_RRR_7_OPCODE_Y0 = 2,
- SHL2ADDX_RRR_7_OPCODE_Y1 = 2,
- SHL2ADD_RRR_0_OPCODE_X0 = 70,
- SHL2ADD_RRR_0_OPCODE_X1 = 34,
- SHL2ADD_RRR_1_OPCODE_Y0 = 1,
- SHL2ADD_RRR_1_OPCODE_Y1 = 1,
- SHL3ADDX_RRR_0_OPCODE_X0 = 71,
- SHL3ADDX_RRR_0_OPCODE_X1 = 35,
- SHL3ADDX_RRR_7_OPCODE_Y0 = 3,
- SHL3ADDX_RRR_7_OPCODE_Y1 = 3,
- SHL3ADD_RRR_0_OPCODE_X0 = 72,
- SHL3ADD_RRR_0_OPCODE_X1 = 36,
- SHL3ADD_RRR_1_OPCODE_Y0 = 2,
- SHL3ADD_RRR_1_OPCODE_Y1 = 2,
- SHLI_SHIFT_OPCODE_X0 = 2,
- SHLI_SHIFT_OPCODE_X1 = 2,
- SHLI_SHIFT_OPCODE_Y0 = 1,
- SHLI_SHIFT_OPCODE_Y1 = 1,
- SHLXI_SHIFT_OPCODE_X0 = 3,
- SHLXI_SHIFT_OPCODE_X1 = 3,
- SHLX_RRR_0_OPCODE_X0 = 73,
- SHLX_RRR_0_OPCODE_X1 = 37,
- SHL_RRR_0_OPCODE_X0 = 74,
- SHL_RRR_0_OPCODE_X1 = 38,
- SHL_RRR_6_OPCODE_Y0 = 1,
- SHL_RRR_6_OPCODE_Y1 = 1,
- SHRSI_SHIFT_OPCODE_X0 = 4,
- SHRSI_SHIFT_OPCODE_X1 = 4,
- SHRSI_SHIFT_OPCODE_Y0 = 2,
- SHRSI_SHIFT_OPCODE_Y1 = 2,
- SHRS_RRR_0_OPCODE_X0 = 75,
- SHRS_RRR_0_OPCODE_X1 = 39,
- SHRS_RRR_6_OPCODE_Y0 = 2,
- SHRS_RRR_6_OPCODE_Y1 = 2,
- SHRUI_SHIFT_OPCODE_X0 = 5,
- SHRUI_SHIFT_OPCODE_X1 = 5,
- SHRUI_SHIFT_OPCODE_Y0 = 3,
- SHRUI_SHIFT_OPCODE_Y1 = 3,
- SHRUXI_SHIFT_OPCODE_X0 = 6,
- SHRUXI_SHIFT_OPCODE_X1 = 6,
- SHRUX_RRR_0_OPCODE_X0 = 76,
- SHRUX_RRR_0_OPCODE_X1 = 40,
- SHRU_RRR_0_OPCODE_X0 = 77,
- SHRU_RRR_0_OPCODE_X1 = 41,
- SHRU_RRR_6_OPCODE_Y0 = 3,
- SHRU_RRR_6_OPCODE_Y1 = 3,
- SHUFFLEBYTES_RRR_0_OPCODE_X0 = 78,
- ST1_ADD_IMM8_OPCODE_X1 = 25,
- ST1_OPCODE_Y2 = 0,
- ST1_RRR_0_OPCODE_X1 = 42,
- ST2_ADD_IMM8_OPCODE_X1 = 26,
- ST2_OPCODE_Y2 = 1,
- ST2_RRR_0_OPCODE_X1 = 43,
- ST4_ADD_IMM8_OPCODE_X1 = 27,
- ST4_OPCODE_Y2 = 2,
- ST4_RRR_0_OPCODE_X1 = 44,
- STNT1_ADD_IMM8_OPCODE_X1 = 28,
- STNT1_RRR_0_OPCODE_X1 = 45,
- STNT2_ADD_IMM8_OPCODE_X1 = 29,
- STNT2_RRR_0_OPCODE_X1 = 46,
- STNT4_ADD_IMM8_OPCODE_X1 = 30,
- STNT4_RRR_0_OPCODE_X1 = 47,
- STNT_ADD_IMM8_OPCODE_X1 = 31,
- STNT_RRR_0_OPCODE_X1 = 48,
- ST_ADD_IMM8_OPCODE_X1 = 32,
- ST_OPCODE_Y2 = 3,
- ST_RRR_0_OPCODE_X1 = 49,
- SUBXSC_RRR_0_OPCODE_X0 = 79,
- SUBXSC_RRR_0_OPCODE_X1 = 50,
- SUBX_RRR_0_OPCODE_X0 = 80,
- SUBX_RRR_0_OPCODE_X1 = 51,
- SUBX_RRR_0_OPCODE_Y0 = 2,
- SUBX_RRR_0_OPCODE_Y1 = 2,
- SUB_RRR_0_OPCODE_X0 = 81,
- SUB_RRR_0_OPCODE_X1 = 52,
- SUB_RRR_0_OPCODE_Y0 = 3,
- SUB_RRR_0_OPCODE_Y1 = 3,
- SWINT0_UNARY_OPCODE_X1 = 34,
- SWINT1_UNARY_OPCODE_X1 = 35,
- SWINT2_UNARY_OPCODE_X1 = 36,
- SWINT3_UNARY_OPCODE_X1 = 37,
- TBLIDXB0_UNARY_OPCODE_X0 = 9,
- TBLIDXB0_UNARY_OPCODE_Y0 = 9,
- TBLIDXB1_UNARY_OPCODE_X0 = 10,
- TBLIDXB1_UNARY_OPCODE_Y0 = 10,
- TBLIDXB2_UNARY_OPCODE_X0 = 11,
- TBLIDXB2_UNARY_OPCODE_Y0 = 11,
- TBLIDXB3_UNARY_OPCODE_X0 = 12,
- TBLIDXB3_UNARY_OPCODE_Y0 = 12,
- UNARY_RRR_0_OPCODE_X0 = 82,
- UNARY_RRR_0_OPCODE_X1 = 53,
- UNARY_RRR_1_OPCODE_Y0 = 3,
- UNARY_RRR_1_OPCODE_Y1 = 3,
- V1ADDI_IMM8_OPCODE_X0 = 8,
- V1ADDI_IMM8_OPCODE_X1 = 33,
- V1ADDUC_RRR_0_OPCODE_X0 = 83,
- V1ADDUC_RRR_0_OPCODE_X1 = 54,
- V1ADD_RRR_0_OPCODE_X0 = 84,
- V1ADD_RRR_0_OPCODE_X1 = 55,
- V1ADIFFU_RRR_0_OPCODE_X0 = 85,
- V1AVGU_RRR_0_OPCODE_X0 = 86,
- V1CMPEQI_IMM8_OPCODE_X0 = 9,
- V1CMPEQI_IMM8_OPCODE_X1 = 34,
- V1CMPEQ_RRR_0_OPCODE_X0 = 87,
- V1CMPEQ_RRR_0_OPCODE_X1 = 56,
- V1CMPLES_RRR_0_OPCODE_X0 = 88,
- V1CMPLES_RRR_0_OPCODE_X1 = 57,
- V1CMPLEU_RRR_0_OPCODE_X0 = 89,
- V1CMPLEU_RRR_0_OPCODE_X1 = 58,
- V1CMPLTSI_IMM8_OPCODE_X0 = 10,
- V1CMPLTSI_IMM8_OPCODE_X1 = 35,
- V1CMPLTS_RRR_0_OPCODE_X0 = 90,
- V1CMPLTS_RRR_0_OPCODE_X1 = 59,
- V1CMPLTUI_IMM8_OPCODE_X0 = 11,
- V1CMPLTUI_IMM8_OPCODE_X1 = 36,
- V1CMPLTU_RRR_0_OPCODE_X0 = 91,
- V1CMPLTU_RRR_0_OPCODE_X1 = 60,
- V1CMPNE_RRR_0_OPCODE_X0 = 92,
- V1CMPNE_RRR_0_OPCODE_X1 = 61,
- V1DDOTPUA_RRR_0_OPCODE_X0 = 161,
- V1DDOTPUSA_RRR_0_OPCODE_X0 = 93,
- V1DDOTPUS_RRR_0_OPCODE_X0 = 94,
- V1DDOTPU_RRR_0_OPCODE_X0 = 162,
- V1DOTPA_RRR_0_OPCODE_X0 = 95,
- V1DOTPUA_RRR_0_OPCODE_X0 = 163,
- V1DOTPUSA_RRR_0_OPCODE_X0 = 96,
- V1DOTPUS_RRR_0_OPCODE_X0 = 97,
- V1DOTPU_RRR_0_OPCODE_X0 = 164,
- V1DOTP_RRR_0_OPCODE_X0 = 98,
- V1INT_H_RRR_0_OPCODE_X0 = 99,
- V1INT_H_RRR_0_OPCODE_X1 = 62,
- V1INT_L_RRR_0_OPCODE_X0 = 100,
- V1INT_L_RRR_0_OPCODE_X1 = 63,
- V1MAXUI_IMM8_OPCODE_X0 = 12,
- V1MAXUI_IMM8_OPCODE_X1 = 37,
- V1MAXU_RRR_0_OPCODE_X0 = 101,
- V1MAXU_RRR_0_OPCODE_X1 = 64,
- V1MINUI_IMM8_OPCODE_X0 = 13,
- V1MINUI_IMM8_OPCODE_X1 = 38,
- V1MINU_RRR_0_OPCODE_X0 = 102,
- V1MINU_RRR_0_OPCODE_X1 = 65,
- V1MNZ_RRR_0_OPCODE_X0 = 103,
- V1MNZ_RRR_0_OPCODE_X1 = 66,
- V1MULTU_RRR_0_OPCODE_X0 = 104,
- V1MULUS_RRR_0_OPCODE_X0 = 105,
- V1MULU_RRR_0_OPCODE_X0 = 106,
- V1MZ_RRR_0_OPCODE_X0 = 107,
- V1MZ_RRR_0_OPCODE_X1 = 67,
- V1SADAU_RRR_0_OPCODE_X0 = 108,
- V1SADU_RRR_0_OPCODE_X0 = 109,
- V1SHLI_SHIFT_OPCODE_X0 = 7,
- V1SHLI_SHIFT_OPCODE_X1 = 7,
- V1SHL_RRR_0_OPCODE_X0 = 110,
- V1SHL_RRR_0_OPCODE_X1 = 68,
- V1SHRSI_SHIFT_OPCODE_X0 = 8,
- V1SHRSI_SHIFT_OPCODE_X1 = 8,
- V1SHRS_RRR_0_OPCODE_X0 = 111,
- V1SHRS_RRR_0_OPCODE_X1 = 69,
- V1SHRUI_SHIFT_OPCODE_X0 = 9,
- V1SHRUI_SHIFT_OPCODE_X1 = 9,
- V1SHRU_RRR_0_OPCODE_X0 = 112,
- V1SHRU_RRR_0_OPCODE_X1 = 70,
- V1SUBUC_RRR_0_OPCODE_X0 = 113,
- V1SUBUC_RRR_0_OPCODE_X1 = 71,
- V1SUB_RRR_0_OPCODE_X0 = 114,
- V1SUB_RRR_0_OPCODE_X1 = 72,
- V2ADDI_IMM8_OPCODE_X0 = 14,
- V2ADDI_IMM8_OPCODE_X1 = 39,
- V2ADDSC_RRR_0_OPCODE_X0 = 115,
- V2ADDSC_RRR_0_OPCODE_X1 = 73,
- V2ADD_RRR_0_OPCODE_X0 = 116,
- V2ADD_RRR_0_OPCODE_X1 = 74,
- V2ADIFFS_RRR_0_OPCODE_X0 = 117,
- V2AVGS_RRR_0_OPCODE_X0 = 118,
- V2CMPEQI_IMM8_OPCODE_X0 = 15,
- V2CMPEQI_IMM8_OPCODE_X1 = 40,
- V2CMPEQ_RRR_0_OPCODE_X0 = 119,
- V2CMPEQ_RRR_0_OPCODE_X1 = 75,
- V2CMPLES_RRR_0_OPCODE_X0 = 120,
- V2CMPLES_RRR_0_OPCODE_X1 = 76,
- V2CMPLEU_RRR_0_OPCODE_X0 = 121,
- V2CMPLEU_RRR_0_OPCODE_X1 = 77,
- V2CMPLTSI_IMM8_OPCODE_X0 = 16,
- V2CMPLTSI_IMM8_OPCODE_X1 = 41,
- V2CMPLTS_RRR_0_OPCODE_X0 = 122,
- V2CMPLTS_RRR_0_OPCODE_X1 = 78,
- V2CMPLTUI_IMM8_OPCODE_X0 = 17,
- V2CMPLTUI_IMM8_OPCODE_X1 = 42,
- V2CMPLTU_RRR_0_OPCODE_X0 = 123,
- V2CMPLTU_RRR_0_OPCODE_X1 = 79,
- V2CMPNE_RRR_0_OPCODE_X0 = 124,
- V2CMPNE_RRR_0_OPCODE_X1 = 80,
- V2DOTPA_RRR_0_OPCODE_X0 = 125,
- V2DOTP_RRR_0_OPCODE_X0 = 126,
- V2INT_H_RRR_0_OPCODE_X0 = 127,
- V2INT_H_RRR_0_OPCODE_X1 = 81,
- V2INT_L_RRR_0_OPCODE_X0 = 128,
- V2INT_L_RRR_0_OPCODE_X1 = 82,
- V2MAXSI_IMM8_OPCODE_X0 = 18,
- V2MAXSI_IMM8_OPCODE_X1 = 43,
- V2MAXS_RRR_0_OPCODE_X0 = 129,
- V2MAXS_RRR_0_OPCODE_X1 = 83,
- V2MINSI_IMM8_OPCODE_X0 = 19,
- V2MINSI_IMM8_OPCODE_X1 = 44,
- V2MINS_RRR_0_OPCODE_X0 = 130,
- V2MINS_RRR_0_OPCODE_X1 = 84,
- V2MNZ_RRR_0_OPCODE_X0 = 131,
- V2MNZ_RRR_0_OPCODE_X1 = 85,
- V2MULFSC_RRR_0_OPCODE_X0 = 132,
- V2MULS_RRR_0_OPCODE_X0 = 133,
- V2MULTS_RRR_0_OPCODE_X0 = 134,
- V2MZ_RRR_0_OPCODE_X0 = 135,
- V2MZ_RRR_0_OPCODE_X1 = 86,
- V2PACKH_RRR_0_OPCODE_X0 = 136,
- V2PACKH_RRR_0_OPCODE_X1 = 87,
- V2PACKL_RRR_0_OPCODE_X0 = 137,
- V2PACKL_RRR_0_OPCODE_X1 = 88,
- V2PACKUC_RRR_0_OPCODE_X0 = 138,
- V2PACKUC_RRR_0_OPCODE_X1 = 89,
- V2SADAS_RRR_0_OPCODE_X0 = 139,
- V2SADAU_RRR_0_OPCODE_X0 = 140,
- V2SADS_RRR_0_OPCODE_X0 = 141,
- V2SADU_RRR_0_OPCODE_X0 = 142,
- V2SHLI_SHIFT_OPCODE_X0 = 10,
- V2SHLI_SHIFT_OPCODE_X1 = 10,
- V2SHLSC_RRR_0_OPCODE_X0 = 143,
- V2SHLSC_RRR_0_OPCODE_X1 = 90,
- V2SHL_RRR_0_OPCODE_X0 = 144,
- V2SHL_RRR_0_OPCODE_X1 = 91,
- V2SHRSI_SHIFT_OPCODE_X0 = 11,
- V2SHRSI_SHIFT_OPCODE_X1 = 11,
- V2SHRS_RRR_0_OPCODE_X0 = 145,
- V2SHRS_RRR_0_OPCODE_X1 = 92,
- V2SHRUI_SHIFT_OPCODE_X0 = 12,
- V2SHRUI_SHIFT_OPCODE_X1 = 12,
- V2SHRU_RRR_0_OPCODE_X0 = 146,
- V2SHRU_RRR_0_OPCODE_X1 = 93,
- V2SUBSC_RRR_0_OPCODE_X0 = 147,
- V2SUBSC_RRR_0_OPCODE_X1 = 94,
- V2SUB_RRR_0_OPCODE_X0 = 148,
- V2SUB_RRR_0_OPCODE_X1 = 95,
- V4ADDSC_RRR_0_OPCODE_X0 = 149,
- V4ADDSC_RRR_0_OPCODE_X1 = 96,
- V4ADD_RRR_0_OPCODE_X0 = 150,
- V4ADD_RRR_0_OPCODE_X1 = 97,
- V4INT_H_RRR_0_OPCODE_X0 = 151,
- V4INT_H_RRR_0_OPCODE_X1 = 98,
- V4INT_L_RRR_0_OPCODE_X0 = 152,
- V4INT_L_RRR_0_OPCODE_X1 = 99,
- V4PACKSC_RRR_0_OPCODE_X0 = 153,
- V4PACKSC_RRR_0_OPCODE_X1 = 100,
- V4SHLSC_RRR_0_OPCODE_X0 = 154,
- V4SHLSC_RRR_0_OPCODE_X1 = 101,
- V4SHL_RRR_0_OPCODE_X0 = 155,
- V4SHL_RRR_0_OPCODE_X1 = 102,
- V4SHRS_RRR_0_OPCODE_X0 = 156,
- V4SHRS_RRR_0_OPCODE_X1 = 103,
- V4SHRU_RRR_0_OPCODE_X0 = 157,
- V4SHRU_RRR_0_OPCODE_X1 = 104,
- V4SUBSC_RRR_0_OPCODE_X0 = 158,
- V4SUBSC_RRR_0_OPCODE_X1 = 105,
- V4SUB_RRR_0_OPCODE_X0 = 159,
- V4SUB_RRR_0_OPCODE_X1 = 106,
- WH64_UNARY_OPCODE_X1 = 38,
- XORI_IMM8_OPCODE_X0 = 20,
- XORI_IMM8_OPCODE_X1 = 45,
- XOR_RRR_0_OPCODE_X0 = 160,
- XOR_RRR_0_OPCODE_X1 = 107,
- XOR_RRR_5_OPCODE_Y0 = 3,
- XOR_RRR_5_OPCODE_Y1 = 3
-};
-
-
-#endif /* __ASSEMBLER__ */
-
-#endif /* OPCODE_TILEGX_H */
diff --git a/target/tilegx/simd_helper.c b/target/tilegx/simd_helper.c
deleted file mode 100644
index 0fdfad2fa9..0000000000
--- a/target/tilegx/simd_helper.c
+++ /dev/null
@@ -1,165 +0,0 @@
-/*
- * QEMU TILE-Gx helpers
- *
- * Copyright (c) 2015 Chen Gang
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see
- * <http://www.gnu.org/licenses/lgpl-2.1.html>
- */
-
-#include "qemu/osdep.h"
-#include "cpu.h"
-#include "exec/helper-proto.h"
-
-
-/* Broadcast a value to all elements of a vector. */
-#define V1(X) (((X) & 0xff) * 0x0101010101010101ull)
-#define V2(X) (((X) & 0xffff) * 0x0001000100010001ull)
-
-
-uint64_t helper_v1multu(uint64_t a, uint64_t b)
-{
- uint64_t r = 0;
- int i;
-
- for (i = 0; i < 64; i += 8) {
- unsigned ae = extract64(a, i, 8);
- unsigned be = extract64(b, i, 8);
- r = deposit64(r, i, 8, ae * be);
- }
- return r;
-}
-
-uint64_t helper_v2mults(uint64_t a, uint64_t b)
-{
- uint64_t r = 0;
- int i;
-
- /* While the instruction talks about signed inputs, with a
- truncated result the sign of the inputs doesn't matter. */
- for (i = 0; i < 64; i += 16) {
- unsigned ae = extract64(a, i, 16);
- unsigned be = extract64(b, i, 16);
- r = deposit64(r, i, 16, ae * be);
- }
- return r;
-}
-
-uint64_t helper_v1shl(uint64_t a, uint64_t b)
-{
- uint64_t m;
-
- b &= 7;
- m = V1(0xff >> b);
- return (a & m) << b;
-}
-
-uint64_t helper_v2shl(uint64_t a, uint64_t b)
-{
- uint64_t m;
-
- b &= 15;
- m = V2(0xffff >> b);
- return (a & m) << b;
-}
-
-uint64_t helper_v1shru(uint64_t a, uint64_t b)
-{
- uint64_t m;
-
- b &= 7;
- m = V1(0xff << b);
- return (a & m) >> b;
-}
-
-uint64_t helper_v2shru(uint64_t a, uint64_t b)
-{
- uint64_t m;
-
- b &= 15;
- m = V2(0xffff << b);
- return (a & m) >> b;
-}
-
-uint64_t helper_v1shrs(uint64_t a, uint64_t b)
-{
- uint64_t r = 0;
- int i;
-
- b &= 7;
- for (i = 0; i < 64; i += 8) {
- r = deposit64(r, i, 8, sextract64(a, i + b, 8 - b));
- }
- return r;
-}
-
-uint64_t helper_v2shrs(uint64_t a, uint64_t b)
-{
- uint64_t r = 0;
- int i;
-
- b &= 15;
- for (i = 0; i < 64; i += 16) {
- r = deposit64(r, i, 16, sextract64(a, i + b, 16 - b));
- }
- return r;
-}
-
-uint64_t helper_v1int_h(uint64_t a, uint64_t b)
-{
- uint64_t r = 0;
- int i;
-
- for (i = 0; i < 32; i += 8) {
- r = deposit64(r, 2 * i + 8, 8, extract64(a, i + 32, 8));
- r = deposit64(r, 2 * i, 8, extract64(b, i + 32, 8));
- }
- return r;
-}
-
-uint64_t helper_v1int_l(uint64_t a, uint64_t b)
-{
- uint64_t r = 0;
- int i;
-
- for (i = 0; i < 32; i += 8) {
- r = deposit64(r, 2 * i + 8, 8, extract64(a, i, 8));
- r = deposit64(r, 2 * i, 8, extract64(b, i, 8));
- }
- return r;
-}
-
-uint64_t helper_v2int_h(uint64_t a, uint64_t b)
-{
- uint64_t r = 0;
- int i;
-
- for (i = 0; i < 32; i += 16) {
- r = deposit64(r, 2 * i + 16, 16, extract64(a, i + 32, 16));
- r = deposit64(r, 2 * i, 16, extract64(b, i + 32, 16));
- }
- return r;
-}
-
-uint64_t helper_v2int_l(uint64_t a, uint64_t b)
-{
- uint64_t r = 0;
- int i;
-
- for (i = 0; i < 32; i += 16) {
- r = deposit64(r, 2 * i + 16, 16, extract64(a, i, 16));
- r = deposit64(r, 2 * i, 16, extract64(b, i, 16));
- }
- return r;
-}
diff --git a/target/tilegx/spr_def_64.h b/target/tilegx/spr_def_64.h
deleted file mode 100644
index d3c0cc26d8..0000000000
--- a/target/tilegx/spr_def_64.h
+++ /dev/null
@@ -1,212 +0,0 @@
-/*
- * Copyright 2011 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#ifndef ARCH_SPR_DEF_64_H
-#define ARCH_SPR_DEF_64_H
-
-#define SPR_AUX_PERF_COUNT_0 0x2105
-#define SPR_AUX_PERF_COUNT_1 0x2106
-#define SPR_AUX_PERF_COUNT_CTL 0x2107
-#define SPR_AUX_PERF_COUNT_STS 0x2108
-#define SPR_CMPEXCH_VALUE 0x2780
-#define SPR_CYCLE 0x2781
-#define SPR_DONE 0x2705
-#define SPR_DSTREAM_PF 0x2706
-#define SPR_EVENT_BEGIN 0x2782
-#define SPR_EVENT_END 0x2783
-#define SPR_EX_CONTEXT_0_0 0x2580
-#define SPR_EX_CONTEXT_0_1 0x2581
-#define SPR_EX_CONTEXT_0_1__PL_SHIFT 0
-#define SPR_EX_CONTEXT_0_1__PL_RMASK 0x3
-#define SPR_EX_CONTEXT_0_1__PL_MASK 0x3
-#define SPR_EX_CONTEXT_0_1__ICS_SHIFT 2
-#define SPR_EX_CONTEXT_0_1__ICS_RMASK 0x1
-#define SPR_EX_CONTEXT_0_1__ICS_MASK 0x4
-#define SPR_EX_CONTEXT_1_0 0x2480
-#define SPR_EX_CONTEXT_1_1 0x2481
-#define SPR_EX_CONTEXT_1_1__PL_SHIFT 0
-#define SPR_EX_CONTEXT_1_1__PL_RMASK 0x3
-#define SPR_EX_CONTEXT_1_1__PL_MASK 0x3
-#define SPR_EX_CONTEXT_1_1__ICS_SHIFT 2
-#define SPR_EX_CONTEXT_1_1__ICS_RMASK 0x1
-#define SPR_EX_CONTEXT_1_1__ICS_MASK 0x4
-#define SPR_EX_CONTEXT_2_0 0x2380
-#define SPR_EX_CONTEXT_2_1 0x2381
-#define SPR_EX_CONTEXT_2_1__PL_SHIFT 0
-#define SPR_EX_CONTEXT_2_1__PL_RMASK 0x3
-#define SPR_EX_CONTEXT_2_1__PL_MASK 0x3
-#define SPR_EX_CONTEXT_2_1__ICS_SHIFT 2
-#define SPR_EX_CONTEXT_2_1__ICS_RMASK 0x1
-#define SPR_EX_CONTEXT_2_1__ICS_MASK 0x4
-#define SPR_FAIL 0x2707
-#define SPR_IDN_AVAIL_EN 0x1a05
-#define SPR_IDN_DATA_AVAIL 0x0a80
-#define SPR_IDN_DEADLOCK_TIMEOUT 0x1806
-#define SPR_IDN_DEMUX_COUNT_0 0x0a05
-#define SPR_IDN_DEMUX_COUNT_1 0x0a06
-#define SPR_IDN_DIRECTION_PROTECT 0x1405
-#define SPR_IDN_PENDING 0x0a08
-#define SPR_ILL_TRANS_REASON__I_STREAM_VA_RMASK 0x1
-#define SPR_INTCTRL_0_STATUS 0x2505
-#define SPR_INTCTRL_1_STATUS 0x2405
-#define SPR_INTCTRL_2_STATUS 0x2305
-#define SPR_INTERRUPT_CRITICAL_SECTION 0x2708
-#define SPR_INTERRUPT_MASK_0 0x2506
-#define SPR_INTERRUPT_MASK_1 0x2406
-#define SPR_INTERRUPT_MASK_2 0x2306
-#define SPR_INTERRUPT_MASK_RESET_0 0x2507
-#define SPR_INTERRUPT_MASK_RESET_1 0x2407
-#define SPR_INTERRUPT_MASK_RESET_2 0x2307
-#define SPR_INTERRUPT_MASK_SET_0 0x2508
-#define SPR_INTERRUPT_MASK_SET_1 0x2408
-#define SPR_INTERRUPT_MASK_SET_2 0x2308
-#define SPR_INTERRUPT_VECTOR_BASE_0 0x2509
-#define SPR_INTERRUPT_VECTOR_BASE_1 0x2409
-#define SPR_INTERRUPT_VECTOR_BASE_2 0x2309
-#define SPR_INTERRUPT_VECTOR_BASE_3 0x2209
-#define SPR_IPI_EVENT_0 0x1f05
-#define SPR_IPI_EVENT_1 0x1e05
-#define SPR_IPI_EVENT_2 0x1d05
-#define SPR_IPI_EVENT_RESET_0 0x1f06
-#define SPR_IPI_EVENT_RESET_1 0x1e06
-#define SPR_IPI_EVENT_RESET_2 0x1d06
-#define SPR_IPI_EVENT_SET_0 0x1f07
-#define SPR_IPI_EVENT_SET_1 0x1e07
-#define SPR_IPI_EVENT_SET_2 0x1d07
-#define SPR_IPI_MASK_0 0x1f08
-#define SPR_IPI_MASK_1 0x1e08
-#define SPR_IPI_MASK_2 0x1d08
-#define SPR_IPI_MASK_RESET_0 0x1f09
-#define SPR_IPI_MASK_RESET_1 0x1e09
-#define SPR_IPI_MASK_RESET_2 0x1d09
-#define SPR_IPI_MASK_SET_0 0x1f0a
-#define SPR_IPI_MASK_SET_1 0x1e0a
-#define SPR_IPI_MASK_SET_2 0x1d0a
-#define SPR_MPL_AUX_PERF_COUNT_SET_0 0x2100
-#define SPR_MPL_AUX_PERF_COUNT_SET_1 0x2101
-#define SPR_MPL_AUX_PERF_COUNT_SET_2 0x2102
-#define SPR_MPL_AUX_TILE_TIMER_SET_0 0x1700
-#define SPR_MPL_AUX_TILE_TIMER_SET_1 0x1701
-#define SPR_MPL_AUX_TILE_TIMER_SET_2 0x1702
-#define SPR_MPL_IDN_ACCESS_SET_0 0x0a00
-#define SPR_MPL_IDN_ACCESS_SET_1 0x0a01
-#define SPR_MPL_IDN_ACCESS_SET_2 0x0a02
-#define SPR_MPL_IDN_AVAIL_SET_0 0x1a00
-#define SPR_MPL_IDN_AVAIL_SET_1 0x1a01
-#define SPR_MPL_IDN_AVAIL_SET_2 0x1a02
-#define SPR_MPL_IDN_COMPLETE_SET_0 0x0500
-#define SPR_MPL_IDN_COMPLETE_SET_1 0x0501
-#define SPR_MPL_IDN_COMPLETE_SET_2 0x0502
-#define SPR_MPL_IDN_FIREWALL_SET_0 0x1400
-#define SPR_MPL_IDN_FIREWALL_SET_1 0x1401
-#define SPR_MPL_IDN_FIREWALL_SET_2 0x1402
-#define SPR_MPL_IDN_TIMER_SET_0 0x1800
-#define SPR_MPL_IDN_TIMER_SET_1 0x1801
-#define SPR_MPL_IDN_TIMER_SET_2 0x1802
-#define SPR_MPL_INTCTRL_0_SET_0 0x2500
-#define SPR_MPL_INTCTRL_0_SET_1 0x2501
-#define SPR_MPL_INTCTRL_0_SET_2 0x2502
-#define SPR_MPL_INTCTRL_1_SET_0 0x2400
-#define SPR_MPL_INTCTRL_1_SET_1 0x2401
-#define SPR_MPL_INTCTRL_1_SET_2 0x2402
-#define SPR_MPL_INTCTRL_2_SET_0 0x2300
-#define SPR_MPL_INTCTRL_2_SET_1 0x2301
-#define SPR_MPL_INTCTRL_2_SET_2 0x2302
-#define SPR_MPL_IPI_0 0x1f04
-#define SPR_MPL_IPI_0_SET_0 0x1f00
-#define SPR_MPL_IPI_0_SET_1 0x1f01
-#define SPR_MPL_IPI_0_SET_2 0x1f02
-#define SPR_MPL_IPI_1 0x1e04
-#define SPR_MPL_IPI_1_SET_0 0x1e00
-#define SPR_MPL_IPI_1_SET_1 0x1e01
-#define SPR_MPL_IPI_1_SET_2 0x1e02
-#define SPR_MPL_IPI_2 0x1d04
-#define SPR_MPL_IPI_2_SET_0 0x1d00
-#define SPR_MPL_IPI_2_SET_1 0x1d01
-#define SPR_MPL_IPI_2_SET_2 0x1d02
-#define SPR_MPL_PERF_COUNT_SET_0 0x2000
-#define SPR_MPL_PERF_COUNT_SET_1 0x2001
-#define SPR_MPL_PERF_COUNT_SET_2 0x2002
-#define SPR_MPL_UDN_ACCESS_SET_0 0x0b00
-#define SPR_MPL_UDN_ACCESS_SET_1 0x0b01
-#define SPR_MPL_UDN_ACCESS_SET_2 0x0b02
-#define SPR_MPL_UDN_AVAIL_SET_0 0x1b00
-#define SPR_MPL_UDN_AVAIL_SET_1 0x1b01
-#define SPR_MPL_UDN_AVAIL_SET_2 0x1b02
-#define SPR_MPL_UDN_COMPLETE_SET_0 0x0600
-#define SPR_MPL_UDN_COMPLETE_SET_1 0x0601
-#define SPR_MPL_UDN_COMPLETE_SET_2 0x0602
-#define SPR_MPL_UDN_FIREWALL_SET_0 0x1500
-#define SPR_MPL_UDN_FIREWALL_SET_1 0x1501
-#define SPR_MPL_UDN_FIREWALL_SET_2 0x1502
-#define SPR_MPL_UDN_TIMER_SET_0 0x1900
-#define SPR_MPL_UDN_TIMER_SET_1 0x1901
-#define SPR_MPL_UDN_TIMER_SET_2 0x1902
-#define SPR_MPL_WORLD_ACCESS_SET_0 0x2700
-#define SPR_MPL_WORLD_ACCESS_SET_1 0x2701
-#define SPR_MPL_WORLD_ACCESS_SET_2 0x2702
-#define SPR_PASS 0x2709
-#define SPR_PERF_COUNT_0 0x2005
-#define SPR_PERF_COUNT_1 0x2006
-#define SPR_PERF_COUNT_CTL 0x2007
-#define SPR_PERF_COUNT_DN_CTL 0x2008
-#define SPR_PERF_COUNT_STS 0x2009
-#define SPR_PROC_STATUS 0x2784
-#define SPR_SIM_CONTROL 0x2785
-#define SPR_SINGLE_STEP_CONTROL_0 0x0405
-#define SPR_SINGLE_STEP_CONTROL_0__CANCELED_MASK 0x1
-#define SPR_SINGLE_STEP_CONTROL_0__INHIBIT_MASK 0x2
-#define SPR_SINGLE_STEP_CONTROL_1 0x0305
-#define SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK 0x1
-#define SPR_SINGLE_STEP_CONTROL_1__INHIBIT_MASK 0x2
-#define SPR_SINGLE_STEP_CONTROL_2 0x0205
-#define SPR_SINGLE_STEP_CONTROL_2__CANCELED_MASK 0x1
-#define SPR_SINGLE_STEP_CONTROL_2__INHIBIT_MASK 0x2
-#define SPR_SINGLE_STEP_EN_0_0 0x250a
-#define SPR_SINGLE_STEP_EN_0_1 0x240a
-#define SPR_SINGLE_STEP_EN_0_2 0x230a
-#define SPR_SINGLE_STEP_EN_1_0 0x250b
-#define SPR_SINGLE_STEP_EN_1_1 0x240b
-#define SPR_SINGLE_STEP_EN_1_2 0x230b
-#define SPR_SINGLE_STEP_EN_2_0 0x250c
-#define SPR_SINGLE_STEP_EN_2_1 0x240c
-#define SPR_SINGLE_STEP_EN_2_2 0x230c
-#define SPR_SYSTEM_SAVE_0_0 0x2582
-#define SPR_SYSTEM_SAVE_0_1 0x2583
-#define SPR_SYSTEM_SAVE_0_2 0x2584
-#define SPR_SYSTEM_SAVE_0_3 0x2585
-#define SPR_SYSTEM_SAVE_1_0 0x2482
-#define SPR_SYSTEM_SAVE_1_1 0x2483
-#define SPR_SYSTEM_SAVE_1_2 0x2484
-#define SPR_SYSTEM_SAVE_1_3 0x2485
-#define SPR_SYSTEM_SAVE_2_0 0x2382
-#define SPR_SYSTEM_SAVE_2_1 0x2383
-#define SPR_SYSTEM_SAVE_2_2 0x2384
-#define SPR_SYSTEM_SAVE_2_3 0x2385
-#define SPR_TILE_COORD 0x270b
-#define SPR_TILE_RTF_HWM 0x270c
-#define SPR_TILE_TIMER_CONTROL 0x1605
-#define SPR_UDN_AVAIL_EN 0x1b05
-#define SPR_UDN_DATA_AVAIL 0x0b80
-#define SPR_UDN_DEADLOCK_TIMEOUT 0x1906
-#define SPR_UDN_DEMUX_COUNT_0 0x0b05
-#define SPR_UDN_DEMUX_COUNT_1 0x0b06
-#define SPR_UDN_DEMUX_COUNT_2 0x0b07
-#define SPR_UDN_DEMUX_COUNT_3 0x0b08
-#define SPR_UDN_DIRECTION_PROTECT 0x1505
-#define SPR_UDN_PENDING 0x0b0a
-#define SPR_WATCH_MASK 0x200a
-#define SPR_WATCH_VAL 0x200b
-
-#endif
diff --git a/target/tilegx/translate.c b/target/tilegx/translate.c
deleted file mode 100644
index 65f1c91f4f..0000000000
--- a/target/tilegx/translate.c
+++ /dev/null
@@ -1,2437 +0,0 @@
-/*
- * QEMU TILE-Gx CPU
- *
- * Copyright (c) 2015 Chen Gang
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see
- * <http://www.gnu.org/licenses/lgpl-2.1.html>
- */
-
-#include "qemu/osdep.h"
-#include "cpu.h"
-#include "qemu/log.h"
-#include "exec/log.h"
-#include "disas/disas.h"
-#include "exec/exec-all.h"
-#include "tcg/tcg-op.h"
-#include "exec/cpu_ldst.h"
-#include "linux-user/syscall_defs.h"
-
-#include "opcode_tilegx.h"
-#include "spr_def_64.h"
-
-#define FMT64X "%016" PRIx64
-
-static TCGv cpu_pc;
-static TCGv cpu_regs[TILEGX_R_COUNT];
-
-static const char * const reg_names[64] = {
- "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
- "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
- "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
- "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
- "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
- "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
- "r48", "r49", "r50", "r51", "bp", "tp", "sp", "lr",
- "sn", "idn0", "idn1", "udn0", "udn1", "udn2", "udn2", "zero"
-};
-
-/* Modified registers are cached in temporaries until the end of the bundle. */
-typedef struct {
- unsigned reg;
- TCGv val;
-} DisasContextTemp;
-
-#define MAX_WRITEBACK 4
-
-/* This is the state at translation time. */
-typedef struct {
- uint64_t pc; /* Current pc */
-
- TCGv zero; /* For zero register */
-
- DisasContextTemp wb[MAX_WRITEBACK];
- int num_wb;
- int mmuidx;
- bool exit_tb;
- TileExcp atomic_excp;
-
- struct {
- TCGCond cond; /* branch condition */
- TCGv dest; /* branch destination */
- TCGv val1; /* value to be compared against zero, for cond */
- } jmp; /* Jump object, only once in each TB block */
-} DisasContext;
-
-#include "exec/gen-icount.h"
-
-/* Differentiate the various pipe encodings. */
-#define TY_X0 0
-#define TY_X1 1
-#define TY_Y0 2
-#define TY_Y1 3
-
-/* Remerge the base opcode and extension fields for switching.
- The X opcode fields are 3 bits; Y0/Y1 opcode fields are 4 bits;
- Y2 opcode field is 2 bits. */
-#define OE(OP, EXT, XY) (TY_##XY + OP * 4 + EXT * 64)
-
-/* Similar, but for Y2 only. */
-#define OEY2(OP, MODE) (OP + MODE * 4)
-
-/* Similar, but make sure opcode names match up. */
-#define OE_RR_X0(E) OE(RRR_0_OPCODE_X0, E##_UNARY_OPCODE_X0, X0)
-#define OE_RR_X1(E) OE(RRR_0_OPCODE_X1, E##_UNARY_OPCODE_X1, X1)
-#define OE_RR_Y0(E) OE(RRR_1_OPCODE_Y0, E##_UNARY_OPCODE_Y0, Y0)
-#define OE_RR_Y1(E) OE(RRR_1_OPCODE_Y1, E##_UNARY_OPCODE_Y1, Y1)
-#define OE_RRR(E,N,XY) OE(RRR_##N##_OPCODE_##XY, E##_RRR_##N##_OPCODE_##XY, XY)
-#define OE_IM(E,XY) OE(IMM8_OPCODE_##XY, E##_IMM8_OPCODE_##XY, XY)
-#define OE_SH(E,XY) OE(SHIFT_OPCODE_##XY, E##_SHIFT_OPCODE_##XY, XY)
-
-#define V1_IMM(X) (((X) & 0xff) * 0x0101010101010101ull)
-#define V2_IMM(X) (((X) & 0xffff) * 0x0001000100010001ull)
-
-
-static void gen_exception(DisasContext *dc, TileExcp num)
-{
- TCGv_i32 tmp;
-
- tcg_gen_movi_tl(cpu_pc, dc->pc + TILEGX_BUNDLE_SIZE_IN_BYTES);
-
- tmp = tcg_const_i32(num);
- gen_helper_exception(cpu_env, tmp);
- tcg_temp_free_i32(tmp);
- dc->exit_tb = true;
-}
-
-static bool check_gr(DisasContext *dc, uint8_t reg)
-{
- if (likely(reg < TILEGX_R_COUNT)) {
- return true;
- }
-
- switch (reg) {
- case TILEGX_R_SN:
- case TILEGX_R_ZERO:
- break;
- case TILEGX_R_IDN0:
- case TILEGX_R_IDN1:
- gen_exception(dc, TILEGX_EXCP_REG_IDN_ACCESS);
- break;
- case TILEGX_R_UDN0:
- case TILEGX_R_UDN1:
- case TILEGX_R_UDN2:
- case TILEGX_R_UDN3:
- gen_exception(dc, TILEGX_EXCP_REG_UDN_ACCESS);
- break;
- default:
- g_assert_not_reached();
- }
- return false;
-}
-
-static TCGv load_zero(DisasContext *dc)
-{
- if (!dc->zero) {
- dc->zero = tcg_const_i64(0);
- }
- return dc->zero;
-}
-
-static TCGv load_gr(DisasContext *dc, unsigned reg)
-{
- if (check_gr(dc, reg)) {
- return cpu_regs[reg];
- }
- return load_zero(dc);
-}
-
-static TCGv dest_gr(DisasContext *dc, unsigned reg)
-{
- int n;
-
- /* Skip the result, mark the exception if necessary, and continue */
- check_gr(dc, reg);
-
- n = dc->num_wb++;
- dc->wb[n].reg = reg;
- return dc->wb[n].val = tcg_temp_new_i64();
-}
-
-static void gen_saturate_op(TCGv tdest, TCGv tsrca, TCGv tsrcb,
- void (*operate)(TCGv, TCGv, TCGv))
-{
- TCGv t0 = tcg_temp_new();
-
- tcg_gen_ext32s_tl(tdest, tsrca);
- tcg_gen_ext32s_tl(t0, tsrcb);
- operate(tdest, tdest, t0);
-
- tcg_gen_movi_tl(t0, 0x7fffffff);
- tcg_gen_movcond_tl(TCG_COND_GT, tdest, tdest, t0, t0, tdest);
- tcg_gen_movi_tl(t0, -0x80000000LL);
- tcg_gen_movcond_tl(TCG_COND_LT, tdest, tdest, t0, t0, tdest);
-
- tcg_temp_free(t0);
-}
-
-static void gen_atomic_excp(DisasContext *dc, unsigned dest, TCGv tdest,
- TCGv tsrca, TCGv tsrcb, TileExcp excp)
-{
-#ifdef CONFIG_USER_ONLY
- TCGv_i32 t;
-
- tcg_gen_st_tl(tsrca, cpu_env, offsetof(CPUTLGState, atomic_srca));
- tcg_gen_st_tl(tsrcb, cpu_env, offsetof(CPUTLGState, atomic_srcb));
- t = tcg_const_i32(dest);
- tcg_gen_st_i32(t, cpu_env, offsetof(CPUTLGState, atomic_dstr));
- tcg_temp_free_i32(t);
-
- /* We're going to write the real result in the exception. But in
- the meantime we've already created a writeback register, and
- we don't want that to remain uninitialized. */
- tcg_gen_movi_tl(tdest, 0);
-
- /* Note that we need to delay issuing the exception that implements
- the atomic operation until after writing back the results of the
- instruction occupying the X0 pipe. */
- dc->atomic_excp = excp;
-#else
- gen_exception(dc, TILEGX_EXCP_OPCODE_UNIMPLEMENTED);
-#endif
-}
-
-/* Shift the 128-bit value TSRCA:TSRCD right by the number of bytes
- specified by the bottom 3 bits of TSRCB, and set TDEST to the
- low 64 bits of the resulting value. */
-static void gen_dblalign(TCGv tdest, TCGv tsrcd, TCGv tsrca, TCGv tsrcb)
-{
- TCGv t0 = tcg_temp_new();
-
- tcg_gen_andi_tl(t0, tsrcb, 7);
- tcg_gen_shli_tl(t0, t0, 3);
- tcg_gen_shr_tl(tdest, tsrcd, t0);
-
- /* We want to do "t0 = tsrca << (64 - t0)". Two's complement
- arithmetic on a 6-bit field tells us that 64 - t0 is equal
- to (t0 ^ 63) + 1. So we can do the shift in two parts,
- neither of which will be an invalid shift by 64. */
- tcg_gen_xori_tl(t0, t0, 63);
- tcg_gen_shl_tl(t0, tsrca, t0);
- tcg_gen_shli_tl(t0, t0, 1);
- tcg_gen_or_tl(tdest, tdest, t0);
-
- tcg_temp_free(t0);
-}
-
-/* Similarly, except that the 128-bit value is TSRCA:TSRCB, and the
- right shift is an immediate. */
-static void gen_dblaligni(TCGv tdest, TCGv tsrca, TCGv tsrcb, int shr)
-{
- TCGv t0 = tcg_temp_new();
-
- tcg_gen_shri_tl(t0, tsrcb, shr);
- tcg_gen_shli_tl(tdest, tsrca, 64 - shr);
- tcg_gen_or_tl(tdest, tdest, t0);
-
- tcg_temp_free(t0);
-}
-
-typedef enum {
- LU, LS, HU, HS
-} MulHalf;
-
-static void gen_ext_half(TCGv d, TCGv s, MulHalf h)
-{
- switch (h) {
- case LU:
- tcg_gen_ext32u_tl(d, s);
- break;
- case LS:
- tcg_gen_ext32s_tl(d, s);
- break;
- case HU:
- tcg_gen_shri_tl(d, s, 32);
- break;
- case HS:
- tcg_gen_sari_tl(d, s, 32);
- break;
- }
-}
-
-static void gen_mul_half(TCGv tdest, TCGv tsrca, TCGv tsrcb,
- MulHalf ha, MulHalf hb)
-{
- TCGv t = tcg_temp_new();
- gen_ext_half(t, tsrca, ha);
- gen_ext_half(tdest, tsrcb, hb);
- tcg_gen_mul_tl(tdest, tdest, t);
- tcg_temp_free(t);
-}
-
-static void gen_cmul2(TCGv tdest, TCGv tsrca, TCGv tsrcb, int sh, int rd)
-{
- TCGv_i32 tsh = tcg_const_i32(sh);
- TCGv_i32 trd = tcg_const_i32(rd);
- gen_helper_cmul2(tdest, tsrca, tsrcb, tsh, trd);
- tcg_temp_free_i32(tsh);
- tcg_temp_free_i32(trd);
-}
-
-static TileExcp gen_st_opcode(DisasContext *dc, unsigned dest, unsigned srca,
- unsigned srcb, MemOp memop, const char *name)
-{
- if (dest) {
- return TILEGX_EXCP_OPCODE_UNKNOWN;
- }
-
- tcg_gen_qemu_st_tl(load_gr(dc, srcb), load_gr(dc, srca),
- dc->mmuidx, memop);
-
- qemu_log_mask(CPU_LOG_TB_IN_ASM, "%s %s, %s", name,
- reg_names[srca], reg_names[srcb]);
- return TILEGX_EXCP_NONE;
-}
-
-static TileExcp gen_st_add_opcode(DisasContext *dc, unsigned srca, unsigned srcb,
- int imm, MemOp memop, const char *name)
-{
- TCGv tsrca = load_gr(dc, srca);
- TCGv tsrcb = load_gr(dc, srcb);
-
- tcg_gen_qemu_st_tl(tsrcb, tsrca, dc->mmuidx, memop);
- tcg_gen_addi_tl(dest_gr(dc, srca), tsrca, imm);
-
- qemu_log_mask(CPU_LOG_TB_IN_ASM, "%s %s, %s, %d", name,
- reg_names[srca], reg_names[srcb], imm);
- return TILEGX_EXCP_NONE;
-}
-
-/* Equality comparison with zero can be done quickly and efficiently. */
-static void gen_v1cmpeq0(TCGv v)
-{
- TCGv m = tcg_const_tl(V1_IMM(0x7f));
- TCGv c = tcg_temp_new();
-
- /* ~(((v & m) + m) | m | v). Sets the msb for each byte == 0. */
- tcg_gen_and_tl(c, v, m);
- tcg_gen_add_tl(c, c, m);
- tcg_gen_or_tl(c, c, m);
- tcg_gen_nor_tl(c, c, v);
- tcg_temp_free(m);
-
- /* Shift the msb down to form the lsb boolean result. */
- tcg_gen_shri_tl(v, c, 7);
- tcg_temp_free(c);
-}
-
-static void gen_v1cmpne0(TCGv v)
-{
- TCGv m = tcg_const_tl(V1_IMM(0x7f));
- TCGv c = tcg_temp_new();
-
- /* (((v & m) + m) | v) & ~m. Sets the msb for each byte != 0. */
- tcg_gen_and_tl(c, v, m);
- tcg_gen_add_tl(c, c, m);
- tcg_gen_or_tl(c, c, v);
- tcg_gen_andc_tl(c, c, m);
- tcg_temp_free(m);
-
- /* Shift the msb down to form the lsb boolean result. */
- tcg_gen_shri_tl(v, c, 7);
- tcg_temp_free(c);
-}
-
-/* Vector addition can be performed via arithmetic plus masking. It is
- efficient this way only for 4 or more elements. */
-static void gen_v12add(TCGv tdest, TCGv tsrca, TCGv tsrcb, uint64_t sign)
-{
- TCGv tmask = tcg_const_tl(~sign);
- TCGv t0 = tcg_temp_new();
- TCGv t1 = tcg_temp_new();
-
- /* ((a & ~sign) + (b & ~sign)) ^ ((a ^ b) & sign). */
- tcg_gen_and_tl(t0, tsrca, tmask);
- tcg_gen_and_tl(t1, tsrcb, tmask);
- tcg_gen_add_tl(tdest, t0, t1);
- tcg_gen_xor_tl(t0, tsrca, tsrcb);
- tcg_gen_andc_tl(t0, t0, tmask);
- tcg_gen_xor_tl(tdest, tdest, t0);
-
- tcg_temp_free(t1);
- tcg_temp_free(t0);
- tcg_temp_free(tmask);
-}
-
-/* Similarly for vector subtraction. */
-static void gen_v12sub(TCGv tdest, TCGv tsrca, TCGv tsrcb, uint64_t sign)
-{
- TCGv tsign = tcg_const_tl(sign);
- TCGv t0 = tcg_temp_new();
- TCGv t1 = tcg_temp_new();
-
- /* ((a | sign) - (b & ~sign)) ^ ((a ^ ~b) & sign). */
- tcg_gen_or_tl(t0, tsrca, tsign);
- tcg_gen_andc_tl(t1, tsrcb, tsign);
- tcg_gen_sub_tl(tdest, t0, t1);
- tcg_gen_eqv_tl(t0, tsrca, tsrcb);
- tcg_gen_and_tl(t0, t0, tsign);
- tcg_gen_xor_tl(tdest, tdest, t0);
-
- tcg_temp_free(t1);
- tcg_temp_free(t0);
- tcg_temp_free(tsign);
-}
-
-static void gen_v4sh(TCGv d64, TCGv a64, TCGv b64,
- void (*generate)(TCGv_i32, TCGv_i32, TCGv_i32))
-{
- TCGv_i32 al = tcg_temp_new_i32();
- TCGv_i32 ah = tcg_temp_new_i32();
- TCGv_i32 bl = tcg_temp_new_i32();
-
- tcg_gen_extr_i64_i32(al, ah, a64);
- tcg_gen_extrl_i64_i32(bl, b64);
- tcg_gen_andi_i32(bl, bl, 31);
- generate(al, al, bl);
- generate(ah, ah, bl);
- tcg_gen_concat_i32_i64(d64, al, ah);
-
- tcg_temp_free_i32(al);
- tcg_temp_free_i32(ah);
- tcg_temp_free_i32(bl);
-}
-
-static void gen_v4op(TCGv d64, TCGv a64, TCGv b64,
- void (*generate)(TCGv_i32, TCGv_i32, TCGv_i32))
-{
- TCGv_i32 al = tcg_temp_new_i32();
- TCGv_i32 ah = tcg_temp_new_i32();
- TCGv_i32 bl = tcg_temp_new_i32();
- TCGv_i32 bh = tcg_temp_new_i32();
-
- tcg_gen_extr_i64_i32(al, ah, a64);
- tcg_gen_extr_i64_i32(bl, bh, b64);
- generate(al, al, bl);
- generate(ah, ah, bh);
- tcg_gen_concat_i32_i64(d64, al, ah);
-
- tcg_temp_free_i32(al);
- tcg_temp_free_i32(ah);
- tcg_temp_free_i32(bl);
- tcg_temp_free_i32(bh);
-}
-
-static TileExcp gen_signal(DisasContext *dc, int signo, int sigcode,
- const char *mnemonic)
-{
- TCGv_i32 t0 = tcg_const_i32(signo);
- TCGv_i32 t1 = tcg_const_i32(sigcode);
-
- tcg_gen_st_i32(t0, cpu_env, offsetof(CPUTLGState, signo));
- tcg_gen_st_i32(t1, cpu_env, offsetof(CPUTLGState, sigcode));
-
- tcg_temp_free_i32(t1);
- tcg_temp_free_i32(t0);
-
- qemu_log_mask(CPU_LOG_TB_IN_ASM, "%s", mnemonic);
- return TILEGX_EXCP_SIGNAL;
-}
-
-static bool parse_from_addli(uint64_t bundle, int *signo, int *sigcode)
-{
- int imm;
-
- if ((get_Opcode_X0(bundle) != ADDLI_OPCODE_X0)
- || (get_Dest_X0(bundle) != TILEGX_R_ZERO)
- || (get_SrcA_X0(bundle) != TILEGX_R_ZERO)) {
- return false;
- }
-
- imm = get_Imm16_X0(bundle);
- *signo = imm & 0x3f;
- *sigcode = (imm >> 6) & 0xf;
-
- /* ??? The linux kernel validates both signo and the sigcode vs the
- known max for each signal. Don't bother here. */
- return true;
-}
-
-static TileExcp gen_specill(DisasContext *dc, unsigned dest, unsigned srca,
- uint64_t bundle)
-{
- const char *mnemonic;
- int signo;
- int sigcode;
-
- if (dest == 0x1c && srca == 0x25) {
- signo = TARGET_SIGTRAP;
- sigcode = TARGET_TRAP_BRKPT;
- mnemonic = "bpt";
- } else if (dest == 0x1d && srca == 0x25
- && parse_from_addli(bundle, &signo, &sigcode)) {
- mnemonic = "raise";
- } else {
- signo = TARGET_SIGILL;
- sigcode = TARGET_ILL_ILLOPC;
- mnemonic = "ill";
- }
-
- return gen_signal(dc, signo, sigcode, mnemonic);
-}
-
-static TileExcp gen_rr_opcode(DisasContext *dc, unsigned opext,
- unsigned dest, unsigned srca, uint64_t bundle)
-{
- TCGv tdest, tsrca;
- const char *mnemonic;
- MemOp memop;
- TileExcp ret = TILEGX_EXCP_NONE;
- bool prefetch_nofault = false;
-
- /* Eliminate instructions with no output before doing anything else. */
- switch (opext) {
- case OE_RR_Y0(NOP):
- case OE_RR_Y1(NOP):
- case OE_RR_X0(NOP):
- case OE_RR_X1(NOP):
- mnemonic = "nop";
- goto done0;
- case OE_RR_Y0(FNOP):
- case OE_RR_Y1(FNOP):
- case OE_RR_X0(FNOP):
- case OE_RR_X1(FNOP):
- mnemonic = "fnop";
- goto done0;
- case OE_RR_X1(DRAIN):
- mnemonic = "drain";
- goto done0;
- case OE_RR_X1(FLUSHWB):
- mnemonic = "flushwb";
- goto done0;
- case OE_RR_X1(ILL):
- return gen_specill(dc, dest, srca, bundle);
- case OE_RR_Y1(ILL):
- return gen_signal(dc, TARGET_SIGILL, TARGET_ILL_ILLOPC, "ill");
- case OE_RR_X1(MF):
- mnemonic = "mf";
- goto done0;
- case OE_RR_X1(NAP):
- /* ??? This should yield, especially in system mode. */
- mnemonic = "nap";
- goto done0;
- case OE_RR_X1(IRET):
- gen_helper_ext01_ics(cpu_env);
- dc->jmp.cond = TCG_COND_ALWAYS;
- dc->jmp.dest = tcg_temp_new();
- tcg_gen_ld_tl(dc->jmp.dest, cpu_env,
- offsetof(CPUTLGState, spregs[TILEGX_SPR_EX_CONTEXT_0_0]));
- tcg_gen_andi_tl(dc->jmp.dest, dc->jmp.dest, ~7);
- mnemonic = "iret";
- goto done0;
- case OE_RR_X1(SWINT0):
- case OE_RR_X1(SWINT2):
- case OE_RR_X1(SWINT3):
- return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
- case OE_RR_X1(SWINT1):
- ret = TILEGX_EXCP_SYSCALL;
- mnemonic = "swint1";
- done0:
- if (srca || dest) {
- return TILEGX_EXCP_OPCODE_UNKNOWN;
- }
- qemu_log_mask(CPU_LOG_TB_IN_ASM, "%s", mnemonic);
- return ret;
-
- case OE_RR_X1(DTLBPR):
- return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
- case OE_RR_X1(FINV):
- mnemonic = "finv";
- goto done1;
- case OE_RR_X1(FLUSH):
- mnemonic = "flush";
- goto done1;
- case OE_RR_X1(ICOH):
- mnemonic = "icoh";
- goto done1;
- case OE_RR_X1(INV):
- mnemonic = "inv";
- goto done1;
- case OE_RR_X1(WH64):
- mnemonic = "wh64";
- goto done1;
- case OE_RR_X1(JRP):
- case OE_RR_Y1(JRP):
- mnemonic = "jrp";
- goto do_jr;
- case OE_RR_X1(JR):
- case OE_RR_Y1(JR):
- mnemonic = "jr";
- goto do_jr;
- case OE_RR_X1(JALRP):
- case OE_RR_Y1(JALRP):
- mnemonic = "jalrp";
- goto do_jalr;
- case OE_RR_X1(JALR):
- case OE_RR_Y1(JALR):
- mnemonic = "jalr";
- do_jalr:
- tcg_gen_movi_tl(dest_gr(dc, TILEGX_R_LR),
- dc->pc + TILEGX_BUNDLE_SIZE_IN_BYTES);
- do_jr:
- dc->jmp.cond = TCG_COND_ALWAYS;
- dc->jmp.dest = tcg_temp_new();
- tcg_gen_andi_tl(dc->jmp.dest, load_gr(dc, srca), ~7);
- done1:
- if (dest) {
- return TILEGX_EXCP_OPCODE_UNKNOWN;
- }
- qemu_log_mask(CPU_LOG_TB_IN_ASM, "%s %s", mnemonic, reg_names[srca]);
- return ret;
- }
-
- tdest = dest_gr(dc, dest);
- tsrca = load_gr(dc, srca);
-
- switch (opext) {
- case OE_RR_X0(CNTLZ):
- case OE_RR_Y0(CNTLZ):
- tcg_gen_clzi_tl(tdest, tsrca, TARGET_LONG_BITS);
- mnemonic = "cntlz";
- break;
- case OE_RR_X0(CNTTZ):
- case OE_RR_Y0(CNTTZ):
- tcg_gen_ctzi_tl(tdest, tsrca, TARGET_LONG_BITS);
- mnemonic = "cnttz";
- break;
- case OE_RR_X0(FSINGLE_PACK1):
- case OE_RR_Y0(FSINGLE_PACK1):
- return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
- case OE_RR_X1(LD1S):
- memop = MO_SB;
- mnemonic = "ld1s"; /* prefetch_l1_fault */
- goto do_load;
- case OE_RR_X1(LD1U):
- memop = MO_UB;
- mnemonic = "ld1u"; /* prefetch, prefetch_l1 */
- prefetch_nofault = (dest == TILEGX_R_ZERO);
- goto do_load;
- case OE_RR_X1(LD2S):
- memop = MO_TESW;
- mnemonic = "ld2s"; /* prefetch_l2_fault */
- goto do_load;
- case OE_RR_X1(LD2U):
- memop = MO_TEUW;
- mnemonic = "ld2u"; /* prefetch_l2 */
- prefetch_nofault = (dest == TILEGX_R_ZERO);
- goto do_load;
- case OE_RR_X1(LD4S):
- memop = MO_TESL;
- mnemonic = "ld4s"; /* prefetch_l3_fault */
- goto do_load;
- case OE_RR_X1(LD4U):
- memop = MO_TEUL;
- mnemonic = "ld4u"; /* prefetch_l3 */
- prefetch_nofault = (dest == TILEGX_R_ZERO);
- goto do_load;
- case OE_RR_X1(LDNT1S):
- memop = MO_SB;
- mnemonic = "ldnt1s";
- goto do_load;
- case OE_RR_X1(LDNT1U):
- memop = MO_UB;
- mnemonic = "ldnt1u";
- goto do_load;
- case OE_RR_X1(LDNT2S):
- memop = MO_TESW;
- mnemonic = "ldnt2s";
- goto do_load;
- case OE_RR_X1(LDNT2U):
- memop = MO_TEUW;
- mnemonic = "ldnt2u";
- goto do_load;
- case OE_RR_X1(LDNT4S):
- memop = MO_TESL;
- mnemonic = "ldnt4s";
- goto do_load;
- case OE_RR_X1(LDNT4U):
- memop = MO_TEUL;
- mnemonic = "ldnt4u";
- goto do_load;
- case OE_RR_X1(LDNT):
- memop = MO_TEQ;
- mnemonic = "ldnt";
- goto do_load;
- case OE_RR_X1(LD):
- memop = MO_TEQ;
- mnemonic = "ld";
- do_load:
- if (!prefetch_nofault) {
- tcg_gen_qemu_ld_tl(tdest, tsrca, dc->mmuidx, memop);
- }
- break;
- case OE_RR_X1(LDNA):
- tcg_gen_andi_tl(tdest, tsrca, ~7);
- tcg_gen_qemu_ld_tl(tdest, tdest, dc->mmuidx, MO_TEQ);
- mnemonic = "ldna";
- break;
- case OE_RR_X1(LNK):
- case OE_RR_Y1(LNK):
- if (srca) {
- return TILEGX_EXCP_OPCODE_UNKNOWN;
- }
- tcg_gen_movi_tl(tdest, dc->pc + TILEGX_BUNDLE_SIZE_IN_BYTES);
- mnemonic = "lnk";
- break;
- case OE_RR_X0(PCNT):
- case OE_RR_Y0(PCNT):
- tcg_gen_ctpop_tl(tdest, tsrca);
- mnemonic = "pcnt";
- break;
- case OE_RR_X0(REVBITS):
- case OE_RR_Y0(REVBITS):
- gen_helper_revbits(tdest, tsrca);
- mnemonic = "revbits";
- break;
- case OE_RR_X0(REVBYTES):
- case OE_RR_Y0(REVBYTES):
- tcg_gen_bswap64_tl(tdest, tsrca);
- mnemonic = "revbytes";
- break;
- case OE_RR_X0(TBLIDXB0):
- case OE_RR_Y0(TBLIDXB0):
- tcg_gen_deposit_tl(tdest, load_gr(dc, dest), tsrca, 2, 8);
- mnemonic = "tblidxb0";
- break;
- case OE_RR_X0(TBLIDXB1):
- case OE_RR_Y0(TBLIDXB1):
- tcg_gen_shri_tl(tdest, tsrca, 8);
- tcg_gen_deposit_tl(tdest, load_gr(dc, dest), tdest, 2, 8);
- mnemonic = "tblidxb1";
- break;
- case OE_RR_X0(TBLIDXB2):
- case OE_RR_Y0(TBLIDXB2):
- tcg_gen_shri_tl(tdest, tsrca, 16);
- tcg_gen_deposit_tl(tdest, load_gr(dc, dest), tdest, 2, 8);
- mnemonic = "tblidxb2";
- break;
- case OE_RR_X0(TBLIDXB3):
- case OE_RR_Y0(TBLIDXB3):
- tcg_gen_shri_tl(tdest, tsrca, 24);
- tcg_gen_deposit_tl(tdest, load_gr(dc, dest), tdest, 2, 8);
- mnemonic = "tblidxb3";
- break;
- default:
- return TILEGX_EXCP_OPCODE_UNKNOWN;
- }
-
- qemu_log_mask(CPU_LOG_TB_IN_ASM, "%s %s, %s", mnemonic,
- reg_names[dest], reg_names[srca]);
- return ret;
-}
-
-static TileExcp gen_rrr_opcode(DisasContext *dc, unsigned opext,
- unsigned dest, unsigned srca, unsigned srcb)
-{
- TCGv tdest = dest_gr(dc, dest);
- TCGv tsrca = load_gr(dc, srca);
- TCGv tsrcb = load_gr(dc, srcb);
- TCGv t0;
- const char *mnemonic;
-
- switch (opext) {
- case OE_RRR(ADDXSC, 0, X0):
- case OE_RRR(ADDXSC, 0, X1):
- gen_saturate_op(tdest, tsrca, tsrcb, tcg_gen_add_tl);
- mnemonic = "addxsc";
- break;
- case OE_RRR(ADDX, 0, X0):
- case OE_RRR(ADDX, 0, X1):
- case OE_RRR(ADDX, 0, Y0):
- case OE_RRR(ADDX, 0, Y1):
- tcg_gen_add_tl(tdest, tsrca, tsrcb);
- tcg_gen_ext32s_tl(tdest, tdest);
- mnemonic = "addx";
- break;
- case OE_RRR(ADD, 0, X0):
- case OE_RRR(ADD, 0, X1):
- case OE_RRR(ADD, 0, Y0):
- case OE_RRR(ADD, 0, Y1):
- tcg_gen_add_tl(tdest, tsrca, tsrcb);
- mnemonic = "add";
- break;
- case OE_RRR(AND, 0, X0):
- case OE_RRR(AND, 0, X1):
- case OE_RRR(AND, 5, Y0):
- case OE_RRR(AND, 5, Y1):
- tcg_gen_and_tl(tdest, tsrca, tsrcb);
- mnemonic = "and";
- break;
- case OE_RRR(CMOVEQZ, 0, X0):
- case OE_RRR(CMOVEQZ, 4, Y0):
- tcg_gen_movcond_tl(TCG_COND_EQ, tdest, tsrca, load_zero(dc),
- tsrcb, load_gr(dc, dest));
- mnemonic = "cmoveqz";
- break;
- case OE_RRR(CMOVNEZ, 0, X0):
- case OE_RRR(CMOVNEZ, 4, Y0):
- tcg_gen_movcond_tl(TCG_COND_NE, tdest, tsrca, load_zero(dc),
- tsrcb, load_gr(dc, dest));
- mnemonic = "cmovnez";
- break;
- case OE_RRR(CMPEQ, 0, X0):
- case OE_RRR(CMPEQ, 0, X1):
- case OE_RRR(CMPEQ, 3, Y0):
- case OE_RRR(CMPEQ, 3, Y1):
- tcg_gen_setcond_tl(TCG_COND_EQ, tdest, tsrca, tsrcb);
- mnemonic = "cmpeq";
- break;
- case OE_RRR(CMPEXCH4, 0, X1):
- gen_atomic_excp(dc, dest, tdest, tsrca, tsrcb,
- TILEGX_EXCP_OPCODE_CMPEXCH4);
- mnemonic = "cmpexch4";
- break;
- case OE_RRR(CMPEXCH, 0, X1):
- gen_atomic_excp(dc, dest, tdest, tsrca, tsrcb,
- TILEGX_EXCP_OPCODE_CMPEXCH);
- mnemonic = "cmpexch";
- break;
- case OE_RRR(CMPLES, 0, X0):
- case OE_RRR(CMPLES, 0, X1):
- case OE_RRR(CMPLES, 2, Y0):
- case OE_RRR(CMPLES, 2, Y1):
- tcg_gen_setcond_tl(TCG_COND_LE, tdest, tsrca, tsrcb);
- mnemonic = "cmples";
- break;
- case OE_RRR(CMPLEU, 0, X0):
- case OE_RRR(CMPLEU, 0, X1):
- case OE_RRR(CMPLEU, 2, Y0):
- case OE_RRR(CMPLEU, 2, Y1):
- tcg_gen_setcond_tl(TCG_COND_LEU, tdest, tsrca, tsrcb);
- mnemonic = "cmpleu";
- break;
- case OE_RRR(CMPLTS, 0, X0):
- case OE_RRR(CMPLTS, 0, X1):
- case OE_RRR(CMPLTS, 2, Y0):
- case OE_RRR(CMPLTS, 2, Y1):
- tcg_gen_setcond_tl(TCG_COND_LT, tdest, tsrca, tsrcb);
- mnemonic = "cmplts";
- break;
- case OE_RRR(CMPLTU, 0, X0):
- case OE_RRR(CMPLTU, 0, X1):
- case OE_RRR(CMPLTU, 2, Y0):
- case OE_RRR(CMPLTU, 2, Y1):
- tcg_gen_setcond_tl(TCG_COND_LTU, tdest, tsrca, tsrcb);
- mnemonic = "cmpltu";
- break;
- case OE_RRR(CMPNE, 0, X0):
- case OE_RRR(CMPNE, 0, X1):
- case OE_RRR(CMPNE, 3, Y0):
- case OE_RRR(CMPNE, 3, Y1):
- tcg_gen_setcond_tl(TCG_COND_NE, tdest, tsrca, tsrcb);
- mnemonic = "cmpne";
- break;
- case OE_RRR(CMULAF, 0, X0):
- gen_helper_cmulaf(tdest, load_gr(dc, dest), tsrca, tsrcb);
- mnemonic = "cmulaf";
- break;
- case OE_RRR(CMULA, 0, X0):
- gen_helper_cmula(tdest, load_gr(dc, dest), tsrca, tsrcb);
- mnemonic = "cmula";
- break;
- case OE_RRR(CMULFR, 0, X0):
- gen_cmul2(tdest, tsrca, tsrcb, 15, 1 << 14);
- mnemonic = "cmulfr";
- break;
- case OE_RRR(CMULF, 0, X0):
- gen_cmul2(tdest, tsrca, tsrcb, 15, 0);
- mnemonic = "cmulf";
- break;
- case OE_RRR(CMULHR, 0, X0):
- gen_cmul2(tdest, tsrca, tsrcb, 16, 1 << 15);
- mnemonic = "cmulhr";
- break;
- case OE_RRR(CMULH, 0, X0):
- gen_cmul2(tdest, tsrca, tsrcb, 16, 0);
- mnemonic = "cmulh";
- break;
- case OE_RRR(CMUL, 0, X0):
- gen_helper_cmula(tdest, load_zero(dc), tsrca, tsrcb);
- mnemonic = "cmul";
- break;
- case OE_RRR(CRC32_32, 0, X0):
- gen_helper_crc32_32(tdest, tsrca, tsrcb);
- mnemonic = "crc32_32";
- break;
- case OE_RRR(CRC32_8, 0, X0):
- gen_helper_crc32_8(tdest, tsrca, tsrcb);
- mnemonic = "crc32_8";
- break;
- case OE_RRR(DBLALIGN2, 0, X0):
- case OE_RRR(DBLALIGN2, 0, X1):
- gen_dblaligni(tdest, tsrca, tsrcb, 16);
- mnemonic = "dblalign2";
- break;
- case OE_RRR(DBLALIGN4, 0, X0):
- case OE_RRR(DBLALIGN4, 0, X1):
- gen_dblaligni(tdest, tsrca, tsrcb, 32);
- mnemonic = "dblalign4";
- break;
- case OE_RRR(DBLALIGN6, 0, X0):
- case OE_RRR(DBLALIGN6, 0, X1):
- gen_dblaligni(tdest, tsrca, tsrcb, 48);
- mnemonic = "dblalign6";
- break;
- case OE_RRR(DBLALIGN, 0, X0):
- gen_dblalign(tdest, load_gr(dc, dest), tsrca, tsrcb);
- mnemonic = "dblalign";
- break;
- case OE_RRR(EXCH4, 0, X1):
- gen_atomic_excp(dc, dest, tdest, tsrca, tsrcb,
- TILEGX_EXCP_OPCODE_EXCH4);
- mnemonic = "exch4";
- break;
- case OE_RRR(EXCH, 0, X1):
- gen_atomic_excp(dc, dest, tdest, tsrca, tsrcb,
- TILEGX_EXCP_OPCODE_EXCH);
- mnemonic = "exch";
- break;
- case OE_RRR(FDOUBLE_ADDSUB, 0, X0):
- case OE_RRR(FDOUBLE_ADD_FLAGS, 0, X0):
- case OE_RRR(FDOUBLE_MUL_FLAGS, 0, X0):
- case OE_RRR(FDOUBLE_PACK1, 0, X0):
- case OE_RRR(FDOUBLE_PACK2, 0, X0):
- case OE_RRR(FDOUBLE_SUB_FLAGS, 0, X0):
- case OE_RRR(FDOUBLE_UNPACK_MAX, 0, X0):
- case OE_RRR(FDOUBLE_UNPACK_MIN, 0, X0):
- return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
- case OE_RRR(FETCHADD4, 0, X1):
- gen_atomic_excp(dc, dest, tdest, tsrca, tsrcb,
- TILEGX_EXCP_OPCODE_FETCHADD4);
- mnemonic = "fetchadd4";
- break;
- case OE_RRR(FETCHADDGEZ4, 0, X1):
- gen_atomic_excp(dc, dest, tdest, tsrca, tsrcb,
- TILEGX_EXCP_OPCODE_FETCHADDGEZ4);
- mnemonic = "fetchaddgez4";
- break;
- case OE_RRR(FETCHADDGEZ, 0, X1):
- gen_atomic_excp(dc, dest, tdest, tsrca, tsrcb,
- TILEGX_EXCP_OPCODE_FETCHADDGEZ);
- mnemonic = "fetchaddgez";
- break;
- case OE_RRR(FETCHADD, 0, X1):
- gen_atomic_excp(dc, dest, tdest, tsrca, tsrcb,
- TILEGX_EXCP_OPCODE_FETCHADD);
- mnemonic = "fetchadd";
- break;
- case OE_RRR(FETCHAND4, 0, X1):
- gen_atomic_excp(dc, dest, tdest, tsrca, tsrcb,
- TILEGX_EXCP_OPCODE_FETCHAND4);
- mnemonic = "fetchand4";
- break;
- case OE_RRR(FETCHAND, 0, X1):
- gen_atomic_excp(dc, dest, tdest, tsrca, tsrcb,
- TILEGX_EXCP_OPCODE_FETCHAND);
- mnemonic = "fetchand";
- break;
- case OE_RRR(FETCHOR4, 0, X1):
- gen_atomic_excp(dc, dest, tdest, tsrca, tsrcb,
- TILEGX_EXCP_OPCODE_FETCHOR4);
- mnemonic = "fetchor4";
- break;
- case OE_RRR(FETCHOR, 0, X1):
- gen_atomic_excp(dc, dest, tdest, tsrca, tsrcb,
- TILEGX_EXCP_OPCODE_FETCHOR);
- mnemonic = "fetchor";
- break;
- case OE_RRR(FSINGLE_ADD1, 0, X0):
- case OE_RRR(FSINGLE_ADDSUB2, 0, X0):
- case OE_RRR(FSINGLE_MUL1, 0, X0):
- case OE_RRR(FSINGLE_MUL2, 0, X0):
- case OE_RRR(FSINGLE_PACK2, 0, X0):
- case OE_RRR(FSINGLE_SUB1, 0, X0):
- return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
- case OE_RRR(MNZ, 0, X0):
- case OE_RRR(MNZ, 0, X1):
- case OE_RRR(MNZ, 4, Y0):
- case OE_RRR(MNZ, 4, Y1):
- t0 = load_zero(dc);
- tcg_gen_movcond_tl(TCG_COND_NE, tdest, tsrca, t0, tsrcb, t0);
- mnemonic = "mnz";
- break;
- case OE_RRR(MULAX, 0, X0):
- case OE_RRR(MULAX, 3, Y0):
- tcg_gen_mul_tl(tdest, tsrca, tsrcb);
- tcg_gen_add_tl(tdest, tdest, load_gr(dc, dest));
- tcg_gen_ext32s_tl(tdest, tdest);
- mnemonic = "mulax";
- break;
- case OE_RRR(MULA_HS_HS, 0, X0):
- case OE_RRR(MULA_HS_HS, 9, Y0):
- gen_mul_half(tdest, tsrca, tsrcb, HS, HS);
- tcg_gen_add_tl(tdest, tdest, load_gr(dc, dest));
- mnemonic = "mula_hs_hs";
- break;
- case OE_RRR(MULA_HS_HU, 0, X0):
- gen_mul_half(tdest, tsrca, tsrcb, HS, HU);
- tcg_gen_add_tl(tdest, tdest, load_gr(dc, dest));
- mnemonic = "mula_hs_hu";
- break;
- case OE_RRR(MULA_HS_LS, 0, X0):
- gen_mul_half(tdest, tsrca, tsrcb, HS, LS);
- tcg_gen_add_tl(tdest, tdest, load_gr(dc, dest));
- mnemonic = "mula_hs_ls";
- break;
- case OE_RRR(MULA_HS_LU, 0, X0):
- gen_mul_half(tdest, tsrca, tsrcb, HS, LU);
- tcg_gen_add_tl(tdest, tdest, load_gr(dc, dest));
- mnemonic = "mula_hs_lu";
- break;
- case OE_RRR(MULA_HU_HU, 0, X0):
- case OE_RRR(MULA_HU_HU, 9, Y0):
- gen_mul_half(tdest, tsrca, tsrcb, HU, HU);
- tcg_gen_add_tl(tdest, tdest, load_gr(dc, dest));
- mnemonic = "mula_hu_hu";
- break;
- case OE_RRR(MULA_HU_LS, 0, X0):
- gen_mul_half(tdest, tsrca, tsrcb, HU, LS);
- tcg_gen_add_tl(tdest, tdest, load_gr(dc, dest));
- mnemonic = "mula_hu_ls";
- break;
- case OE_RRR(MULA_HU_LU, 0, X0):
- gen_mul_half(tdest, tsrca, tsrcb, HU, LU);
- tcg_gen_add_tl(tdest, tdest, load_gr(dc, dest));
- mnemonic = "mula_hu_lu";
- break;
- case OE_RRR(MULA_LS_LS, 0, X0):
- case OE_RRR(MULA_LS_LS, 9, Y0):
- gen_mul_half(tdest, tsrca, tsrcb, LS, LS);
- tcg_gen_add_tl(tdest, tdest, load_gr(dc, dest));
- mnemonic = "mula_ls_ls";
- break;
- case OE_RRR(MULA_LS_LU, 0, X0):
- gen_mul_half(tdest, tsrca, tsrcb, LS, LU);
- tcg_gen_add_tl(tdest, tdest, load_gr(dc, dest));
- mnemonic = "mula_ls_lu";
- break;
- case OE_RRR(MULA_LU_LU, 0, X0):
- case OE_RRR(MULA_LU_LU, 9, Y0):
- gen_mul_half(tdest, tsrca, tsrcb, LU, LU);
- tcg_gen_add_tl(tdest, tdest, load_gr(dc, dest));
- mnemonic = "mula_lu_lu";
- break;
- case OE_RRR(MULX, 0, X0):
- case OE_RRR(MULX, 3, Y0):
- tcg_gen_mul_tl(tdest, tsrca, tsrcb);
- tcg_gen_ext32s_tl(tdest, tdest);
- mnemonic = "mulx";
- break;
- case OE_RRR(MUL_HS_HS, 0, X0):
- case OE_RRR(MUL_HS_HS, 8, Y0):
- gen_mul_half(tdest, tsrca, tsrcb, HS, HS);
- mnemonic = "mul_hs_hs";
- break;
- case OE_RRR(MUL_HS_HU, 0, X0):
- gen_mul_half(tdest, tsrca, tsrcb, HS, HU);
- mnemonic = "mul_hs_hu";
- break;
- case OE_RRR(MUL_HS_LS, 0, X0):
- gen_mul_half(tdest, tsrca, tsrcb, HS, LS);
- mnemonic = "mul_hs_ls";
- break;
- case OE_RRR(MUL_HS_LU, 0, X0):
- gen_mul_half(tdest, tsrca, tsrcb, HS, LU);
- mnemonic = "mul_hs_lu";
- break;
- case OE_RRR(MUL_HU_HU, 0, X0):
- case OE_RRR(MUL_HU_HU, 8, Y0):
- gen_mul_half(tdest, tsrca, tsrcb, HU, HU);
- mnemonic = "mul_hu_hu";
- break;
- case OE_RRR(MUL_HU_LS, 0, X0):
- gen_mul_half(tdest, tsrca, tsrcb, HU, LS);
- mnemonic = "mul_hu_ls";
- break;
- case OE_RRR(MUL_HU_LU, 0, X0):
- gen_mul_half(tdest, tsrca, tsrcb, HU, LU);
- mnemonic = "mul_hu_lu";
- break;
- case OE_RRR(MUL_LS_LS, 0, X0):
- case OE_RRR(MUL_LS_LS, 8, Y0):
- gen_mul_half(tdest, tsrca, tsrcb, LS, LS);
- mnemonic = "mul_ls_ls";
- break;
- case OE_RRR(MUL_LS_LU, 0, X0):
- gen_mul_half(tdest, tsrca, tsrcb, LS, LU);
- mnemonic = "mul_ls_lu";
- break;
- case OE_RRR(MUL_LU_LU, 0, X0):
- case OE_RRR(MUL_LU_LU, 8, Y0):
- gen_mul_half(tdest, tsrca, tsrcb, LU, LU);
- mnemonic = "mul_lu_lu";
- break;
- case OE_RRR(MZ, 0, X0):
- case OE_RRR(MZ, 0, X1):
- case OE_RRR(MZ, 4, Y0):
- case OE_RRR(MZ, 4, Y1):
- t0 = load_zero(dc);
- tcg_gen_movcond_tl(TCG_COND_EQ, tdest, tsrca, t0, tsrcb, t0);
- mnemonic = "mz";
- break;
- case OE_RRR(NOR, 0, X0):
- case OE_RRR(NOR, 0, X1):
- case OE_RRR(NOR, 5, Y0):
- case OE_RRR(NOR, 5, Y1):
- tcg_gen_nor_tl(tdest, tsrca, tsrcb);
- mnemonic = "nor";
- break;
- case OE_RRR(OR, 0, X0):
- case OE_RRR(OR, 0, X1):
- case OE_RRR(OR, 5, Y0):
- case OE_RRR(OR, 5, Y1):
- tcg_gen_or_tl(tdest, tsrca, tsrcb);
- mnemonic = "or";
- break;
- case OE_RRR(ROTL, 0, X0):
- case OE_RRR(ROTL, 0, X1):
- case OE_RRR(ROTL, 6, Y0):
- case OE_RRR(ROTL, 6, Y1):
- tcg_gen_andi_tl(tdest, tsrcb, 63);
- tcg_gen_rotl_tl(tdest, tsrca, tdest);
- mnemonic = "rotl";
- break;
- case OE_RRR(SHL1ADDX, 0, X0):
- case OE_RRR(SHL1ADDX, 0, X1):
- case OE_RRR(SHL1ADDX, 7, Y0):
- case OE_RRR(SHL1ADDX, 7, Y1):
- tcg_gen_shli_tl(tdest, tsrca, 1);
- tcg_gen_add_tl(tdest, tdest, tsrcb);
- tcg_gen_ext32s_tl(tdest, tdest);
- mnemonic = "shl1addx";
- break;
- case OE_RRR(SHL1ADD, 0, X0):
- case OE_RRR(SHL1ADD, 0, X1):
- case OE_RRR(SHL1ADD, 1, Y0):
- case OE_RRR(SHL1ADD, 1, Y1):
- tcg_gen_shli_tl(tdest, tsrca, 1);
- tcg_gen_add_tl(tdest, tdest, tsrcb);
- mnemonic = "shl1add";
- break;
- case OE_RRR(SHL2ADDX, 0, X0):
- case OE_RRR(SHL2ADDX, 0, X1):
- case OE_RRR(SHL2ADDX, 7, Y0):
- case OE_RRR(SHL2ADDX, 7, Y1):
- tcg_gen_shli_tl(tdest, tsrca, 2);
- tcg_gen_add_tl(tdest, tdest, tsrcb);
- tcg_gen_ext32s_tl(tdest, tdest);
- mnemonic = "shl2addx";
- break;
- case OE_RRR(SHL2ADD, 0, X0):
- case OE_RRR(SHL2ADD, 0, X1):
- case OE_RRR(SHL2ADD, 1, Y0):
- case OE_RRR(SHL2ADD, 1, Y1):
- tcg_gen_shli_tl(tdest, tsrca, 2);
- tcg_gen_add_tl(tdest, tdest, tsrcb);
- mnemonic = "shl2add";
- break;
- case OE_RRR(SHL3ADDX, 0, X0):
- case OE_RRR(SHL3ADDX, 0, X1):
- case OE_RRR(SHL3ADDX, 7, Y0):
- case OE_RRR(SHL3ADDX, 7, Y1):
- tcg_gen_shli_tl(tdest, tsrca, 3);
- tcg_gen_add_tl(tdest, tdest, tsrcb);
- tcg_gen_ext32s_tl(tdest, tdest);
- mnemonic = "shl3addx";
- break;
- case OE_RRR(SHL3ADD, 0, X0):
- case OE_RRR(SHL3ADD, 0, X1):
- case OE_RRR(SHL3ADD, 1, Y0):
- case OE_RRR(SHL3ADD, 1, Y1):
- tcg_gen_shli_tl(tdest, tsrca, 3);
- tcg_gen_add_tl(tdest, tdest, tsrcb);
- mnemonic = "shl3add";
- break;
- case OE_RRR(SHLX, 0, X0):
- case OE_RRR(SHLX, 0, X1):
- tcg_gen_andi_tl(tdest, tsrcb, 31);
- tcg_gen_shl_tl(tdest, tsrca, tdest);
- tcg_gen_ext32s_tl(tdest, tdest);
- mnemonic = "shlx";
- break;
- case OE_RRR(SHL, 0, X0):
- case OE_RRR(SHL, 0, X1):
- case OE_RRR(SHL, 6, Y0):
- case OE_RRR(SHL, 6, Y1):
- tcg_gen_andi_tl(tdest, tsrcb, 63);
- tcg_gen_shl_tl(tdest, tsrca, tdest);
- mnemonic = "shl";
- break;
- case OE_RRR(SHRS, 0, X0):
- case OE_RRR(SHRS, 0, X1):
- case OE_RRR(SHRS, 6, Y0):
- case OE_RRR(SHRS, 6, Y1):
- tcg_gen_andi_tl(tdest, tsrcb, 63);
- tcg_gen_sar_tl(tdest, tsrca, tdest);
- mnemonic = "shrs";
- break;
- case OE_RRR(SHRUX, 0, X0):
- case OE_RRR(SHRUX, 0, X1):
- t0 = tcg_temp_new();
- tcg_gen_andi_tl(t0, tsrcb, 31);
- tcg_gen_ext32u_tl(tdest, tsrca);
- tcg_gen_shr_tl(tdest, tdest, t0);
- tcg_gen_ext32s_tl(tdest, tdest);
- tcg_temp_free(t0);
- mnemonic = "shrux";
- break;
- case OE_RRR(SHRU, 0, X0):
- case OE_RRR(SHRU, 0, X1):
- case OE_RRR(SHRU, 6, Y0):
- case OE_RRR(SHRU, 6, Y1):
- tcg_gen_andi_tl(tdest, tsrcb, 63);
- tcg_gen_shr_tl(tdest, tsrca, tdest);
- mnemonic = "shru";
- break;
- case OE_RRR(SHUFFLEBYTES, 0, X0):
- gen_helper_shufflebytes(tdest, load_gr(dc, dest), tsrca, tsrca);
- mnemonic = "shufflebytes";
- break;
- case OE_RRR(SUBXSC, 0, X0):
- case OE_RRR(SUBXSC, 0, X1):
- gen_saturate_op(tdest, tsrca, tsrcb, tcg_gen_sub_tl);
- mnemonic = "subxsc";
- break;
- case OE_RRR(SUBX, 0, X0):
- case OE_RRR(SUBX, 0, X1):
- case OE_RRR(SUBX, 0, Y0):
- case OE_RRR(SUBX, 0, Y1):
- tcg_gen_sub_tl(tdest, tsrca, tsrcb);
- tcg_gen_ext32s_tl(tdest, tdest);
- mnemonic = "subx";
- break;
- case OE_RRR(SUB, 0, X0):
- case OE_RRR(SUB, 0, X1):
- case OE_RRR(SUB, 0, Y0):
- case OE_RRR(SUB, 0, Y1):
- tcg_gen_sub_tl(tdest, tsrca, tsrcb);
- mnemonic = "sub";
- break;
- case OE_RRR(V1ADDUC, 0, X0):
- case OE_RRR(V1ADDUC, 0, X1):
- return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
- case OE_RRR(V1ADD, 0, X0):
- case OE_RRR(V1ADD, 0, X1):
- gen_v12add(tdest, tsrca, tsrcb, V1_IMM(0x80));
- mnemonic = "v1add";
- break;
- case OE_RRR(V1ADIFFU, 0, X0):
- case OE_RRR(V1AVGU, 0, X0):
- return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
- case OE_RRR(V1CMPEQ, 0, X0):
- case OE_RRR(V1CMPEQ, 0, X1):
- tcg_gen_xor_tl(tdest, tsrca, tsrcb);
- gen_v1cmpeq0(tdest);
- mnemonic = "v1cmpeq";
- break;
- case OE_RRR(V1CMPLES, 0, X0):
- case OE_RRR(V1CMPLES, 0, X1):
- case OE_RRR(V1CMPLEU, 0, X0):
- case OE_RRR(V1CMPLEU, 0, X1):
- case OE_RRR(V1CMPLTS, 0, X0):
- case OE_RRR(V1CMPLTS, 0, X1):
- case OE_RRR(V1CMPLTU, 0, X0):
- case OE_RRR(V1CMPLTU, 0, X1):
- return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
- case OE_RRR(V1CMPNE, 0, X0):
- case OE_RRR(V1CMPNE, 0, X1):
- tcg_gen_xor_tl(tdest, tsrca, tsrcb);
- gen_v1cmpne0(tdest);
- mnemonic = "v1cmpne";
- break;
- case OE_RRR(V1DDOTPUA, 0, X0):
- case OE_RRR(V1DDOTPUSA, 0, X0):
- case OE_RRR(V1DDOTPUS, 0, X0):
- case OE_RRR(V1DDOTPU, 0, X0):
- case OE_RRR(V1DOTPA, 0, X0):
- case OE_RRR(V1DOTPUA, 0, X0):
- case OE_RRR(V1DOTPUSA, 0, X0):
- case OE_RRR(V1DOTPUS, 0, X0):
- case OE_RRR(V1DOTPU, 0, X0):
- case OE_RRR(V1DOTP, 0, X0):
- return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
- case OE_RRR(V1INT_H, 0, X0):
- case OE_RRR(V1INT_H, 0, X1):
- gen_helper_v1int_h(tdest, tsrca, tsrcb);
- mnemonic = "v1int_h";
- break;
- case OE_RRR(V1INT_L, 0, X0):
- case OE_RRR(V1INT_L, 0, X1):
- gen_helper_v1int_l(tdest, tsrca, tsrcb);
- mnemonic = "v1int_l";
- break;
- case OE_RRR(V1MAXU, 0, X0):
- case OE_RRR(V1MAXU, 0, X1):
- case OE_RRR(V1MINU, 0, X0):
- case OE_RRR(V1MINU, 0, X1):
- case OE_RRR(V1MNZ, 0, X0):
- case OE_RRR(V1MNZ, 0, X1):
- return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
- case OE_RRR(V1MULTU, 0, X0):
- gen_helper_v1multu(tdest, tsrca, tsrcb);
- mnemonic = "v1multu";
- break;
- case OE_RRR(V1MULUS, 0, X0):
- case OE_RRR(V1MULU, 0, X0):
- case OE_RRR(V1MZ, 0, X0):
- case OE_RRR(V1MZ, 0, X1):
- case OE_RRR(V1SADAU, 0, X0):
- case OE_RRR(V1SADU, 0, X0):
- return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
- case OE_RRR(V1SHL, 0, X0):
- case OE_RRR(V1SHL, 0, X1):
- gen_helper_v1shl(tdest, tsrca, tsrcb);
- mnemonic = "v1shl";
- break;
- case OE_RRR(V1SHRS, 0, X0):
- case OE_RRR(V1SHRS, 0, X1):
- gen_helper_v1shrs(tdest, tsrca, tsrcb);
- mnemonic = "v1shrs";
- break;
- case OE_RRR(V1SHRU, 0, X0):
- case OE_RRR(V1SHRU, 0, X1):
- gen_helper_v1shru(tdest, tsrca, tsrcb);
- mnemonic = "v1shru";
- break;
- case OE_RRR(V1SUBUC, 0, X0):
- case OE_RRR(V1SUBUC, 0, X1):
- return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
- case OE_RRR(V1SUB, 0, X0):
- case OE_RRR(V1SUB, 0, X1):
- gen_v12sub(tdest, tsrca, tsrcb, V1_IMM(0x80));
- mnemonic = "v1sub";
- break;
- case OE_RRR(V2ADDSC, 0, X0):
- case OE_RRR(V2ADDSC, 0, X1):
- return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
- case OE_RRR(V2ADD, 0, X0):
- case OE_RRR(V2ADD, 0, X1):
- gen_v12add(tdest, tsrca, tsrcb, V2_IMM(0x8000));
- mnemonic = "v2add";
- break;
- case OE_RRR(V2ADIFFS, 0, X0):
- case OE_RRR(V2AVGS, 0, X0):
- case OE_RRR(V2CMPEQ, 0, X0):
- case OE_RRR(V2CMPEQ, 0, X1):
- case OE_RRR(V2CMPLES, 0, X0):
- case OE_RRR(V2CMPLES, 0, X1):
- case OE_RRR(V2CMPLEU, 0, X0):
- case OE_RRR(V2CMPLEU, 0, X1):
- case OE_RRR(V2CMPLTS, 0, X0):
- case OE_RRR(V2CMPLTS, 0, X1):
- case OE_RRR(V2CMPLTU, 0, X0):
- case OE_RRR(V2CMPLTU, 0, X1):
- case OE_RRR(V2CMPNE, 0, X0):
- case OE_RRR(V2CMPNE, 0, X1):
- case OE_RRR(V2DOTPA, 0, X0):
- case OE_RRR(V2DOTP, 0, X0):
- return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
- case OE_RRR(V2INT_H, 0, X0):
- case OE_RRR(V2INT_H, 0, X1):
- gen_helper_v2int_h(tdest, tsrca, tsrcb);
- mnemonic = "v2int_h";
- break;
- case OE_RRR(V2INT_L, 0, X0):
- case OE_RRR(V2INT_L, 0, X1):
- gen_helper_v2int_l(tdest, tsrca, tsrcb);
- mnemonic = "v2int_l";
- break;
- case OE_RRR(V2MAXS, 0, X0):
- case OE_RRR(V2MAXS, 0, X1):
- case OE_RRR(V2MINS, 0, X0):
- case OE_RRR(V2MINS, 0, X1):
- case OE_RRR(V2MNZ, 0, X0):
- case OE_RRR(V2MNZ, 0, X1):
- case OE_RRR(V2MULFSC, 0, X0):
- case OE_RRR(V2MULS, 0, X0):
- return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
- case OE_RRR(V2MULTS, 0, X0):
- gen_helper_v2mults(tdest, tsrca, tsrcb);
- mnemonic = "v2mults";
- break;
- case OE_RRR(V2MZ, 0, X0):
- case OE_RRR(V2MZ, 0, X1):
- case OE_RRR(V2PACKH, 0, X0):
- case OE_RRR(V2PACKH, 0, X1):
- case OE_RRR(V2PACKL, 0, X0):
- case OE_RRR(V2PACKL, 0, X1):
- case OE_RRR(V2PACKUC, 0, X0):
- case OE_RRR(V2PACKUC, 0, X1):
- case OE_RRR(V2SADAS, 0, X0):
- case OE_RRR(V2SADAU, 0, X0):
- case OE_RRR(V2SADS, 0, X0):
- case OE_RRR(V2SADU, 0, X0):
- case OE_RRR(V2SHLSC, 0, X0):
- case OE_RRR(V2SHLSC, 0, X1):
- return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
- case OE_RRR(V2SHL, 0, X0):
- case OE_RRR(V2SHL, 0, X1):
- gen_helper_v2shl(tdest, tsrca, tsrcb);
- mnemonic = "v2shl";
- break;
- case OE_RRR(V2SHRS, 0, X0):
- case OE_RRR(V2SHRS, 0, X1):
- gen_helper_v2shrs(tdest, tsrca, tsrcb);
- mnemonic = "v2shrs";
- break;
- case OE_RRR(V2SHRU, 0, X0):
- case OE_RRR(V2SHRU, 0, X1):
- gen_helper_v2shru(tdest, tsrca, tsrcb);
- mnemonic = "v2shru";
- break;
- case OE_RRR(V2SUBSC, 0, X0):
- case OE_RRR(V2SUBSC, 0, X1):
- return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
- case OE_RRR(V2SUB, 0, X0):
- case OE_RRR(V2SUB, 0, X1):
- gen_v12sub(tdest, tsrca, tsrcb, V2_IMM(0x8000));
- mnemonic = "v2sub";
- break;
- case OE_RRR(V4ADDSC, 0, X0):
- case OE_RRR(V4ADDSC, 0, X1):
- return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
- case OE_RRR(V4ADD, 0, X0):
- case OE_RRR(V4ADD, 0, X1):
- gen_v4op(tdest, tsrca, tsrcb, tcg_gen_add_i32);
- mnemonic = "v4add";
- break;
- case OE_RRR(V4INT_H, 0, X0):
- case OE_RRR(V4INT_H, 0, X1):
- tcg_gen_shri_tl(tdest, tsrcb, 32);
- tcg_gen_deposit_tl(tdest, tsrca, tdest, 0, 32);
- mnemonic = "v4int_h";
- break;
- case OE_RRR(V4INT_L, 0, X0):
- case OE_RRR(V4INT_L, 0, X1):
- tcg_gen_deposit_tl(tdest, tsrcb, tsrca, 32, 32);
- mnemonic = "v4int_l";
- break;
- case OE_RRR(V4PACKSC, 0, X0):
- case OE_RRR(V4PACKSC, 0, X1):
- case OE_RRR(V4SHLSC, 0, X0):
- case OE_RRR(V4SHLSC, 0, X1):
- return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
- case OE_RRR(V4SHL, 0, X0):
- case OE_RRR(V4SHL, 0, X1):
- gen_v4sh(tdest, tsrca, tsrcb, tcg_gen_shl_i32);
- mnemonic = "v4shl";
- break;
- case OE_RRR(V4SHRS, 0, X0):
- case OE_RRR(V4SHRS, 0, X1):
- gen_v4sh(tdest, tsrca, tsrcb, tcg_gen_sar_i32);
- mnemonic = "v4shrs";
- break;
- case OE_RRR(V4SHRU, 0, X0):
- case OE_RRR(V4SHRU, 0, X1):
- gen_v4sh(tdest, tsrca, tsrcb, tcg_gen_shr_i32);
- mnemonic = "v4shru";
- break;
- case OE_RRR(V4SUBSC, 0, X0):
- case OE_RRR(V4SUBSC, 0, X1):
- return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
- case OE_RRR(V4SUB, 0, X0):
- case OE_RRR(V4SUB, 0, X1):
- gen_v4op(tdest, tsrca, tsrcb, tcg_gen_sub_i32);
- mnemonic = "v2sub";
- break;
- case OE_RRR(XOR, 0, X0):
- case OE_RRR(XOR, 0, X1):
- case OE_RRR(XOR, 5, Y0):
- case OE_RRR(XOR, 5, Y1):
- tcg_gen_xor_tl(tdest, tsrca, tsrcb);
- mnemonic = "xor";
- break;
- default:
- return TILEGX_EXCP_OPCODE_UNKNOWN;
- }
-
- qemu_log_mask(CPU_LOG_TB_IN_ASM, "%s %s, %s, %s", mnemonic,
- reg_names[dest], reg_names[srca], reg_names[srcb]);
- return TILEGX_EXCP_NONE;
-}
-
-static TileExcp gen_rri_opcode(DisasContext *dc, unsigned opext,
- unsigned dest, unsigned srca, int imm)
-{
- TCGv tdest = dest_gr(dc, dest);
- TCGv tsrca = load_gr(dc, srca);
- bool prefetch_nofault = false;
- const char *mnemonic;
- MemOp memop;
- int i2, i3;
- TCGv t0;
-
- switch (opext) {
- case OE(ADDI_OPCODE_Y0, 0, Y0):
- case OE(ADDI_OPCODE_Y1, 0, Y1):
- case OE_IM(ADDI, X0):
- case OE_IM(ADDI, X1):
- tcg_gen_addi_tl(tdest, tsrca, imm);
- mnemonic = "addi";
- break;
- case OE(ADDXI_OPCODE_Y0, 0, Y0):
- case OE(ADDXI_OPCODE_Y1, 0, Y1):
- case OE_IM(ADDXI, X0):
- case OE_IM(ADDXI, X1):
- tcg_gen_addi_tl(tdest, tsrca, imm);
- tcg_gen_ext32s_tl(tdest, tdest);
- mnemonic = "addxi";
- break;
- case OE(ANDI_OPCODE_Y0, 0, Y0):
- case OE(ANDI_OPCODE_Y1, 0, Y1):
- case OE_IM(ANDI, X0):
- case OE_IM(ANDI, X1):
- tcg_gen_andi_tl(tdest, tsrca, imm);
- mnemonic = "andi";
- break;
- case OE(CMPEQI_OPCODE_Y0, 0, Y0):
- case OE(CMPEQI_OPCODE_Y1, 0, Y1):
- case OE_IM(CMPEQI, X0):
- case OE_IM(CMPEQI, X1):
- tcg_gen_setcondi_tl(TCG_COND_EQ, tdest, tsrca, imm);
- mnemonic = "cmpeqi";
- break;
- case OE(CMPLTSI_OPCODE_Y0, 0, Y0):
- case OE(CMPLTSI_OPCODE_Y1, 0, Y1):
- case OE_IM(CMPLTSI, X0):
- case OE_IM(CMPLTSI, X1):
- tcg_gen_setcondi_tl(TCG_COND_LT, tdest, tsrca, imm);
- mnemonic = "cmpltsi";
- break;
- case OE_IM(CMPLTUI, X0):
- case OE_IM(CMPLTUI, X1):
- tcg_gen_setcondi_tl(TCG_COND_LTU, tdest, tsrca, imm);
- mnemonic = "cmpltui";
- break;
- case OE_IM(LD1S_ADD, X1):
- memop = MO_SB;
- mnemonic = "ld1s_add"; /* prefetch_add_l1_fault */
- goto do_load_add;
- case OE_IM(LD1U_ADD, X1):
- memop = MO_UB;
- mnemonic = "ld1u_add"; /* prefetch_add_l1 */
- prefetch_nofault = (dest == TILEGX_R_ZERO);
- goto do_load_add;
- case OE_IM(LD2S_ADD, X1):
- memop = MO_TESW;
- mnemonic = "ld2s_add"; /* prefetch_add_l2_fault */
- goto do_load_add;
- case OE_IM(LD2U_ADD, X1):
- memop = MO_TEUW;
- mnemonic = "ld2u_add"; /* prefetch_add_l2 */
- prefetch_nofault = (dest == TILEGX_R_ZERO);
- goto do_load_add;
- case OE_IM(LD4S_ADD, X1):
- memop = MO_TESL;
- mnemonic = "ld4s_add"; /* prefetch_add_l3_fault */
- goto do_load_add;
- case OE_IM(LD4U_ADD, X1):
- memop = MO_TEUL;
- mnemonic = "ld4u_add"; /* prefetch_add_l3 */
- prefetch_nofault = (dest == TILEGX_R_ZERO);
- goto do_load_add;
- case OE_IM(LDNT1S_ADD, X1):
- memop = MO_SB;
- mnemonic = "ldnt1s_add";
- goto do_load_add;
- case OE_IM(LDNT1U_ADD, X1):
- memop = MO_UB;
- mnemonic = "ldnt1u_add";
- goto do_load_add;
- case OE_IM(LDNT2S_ADD, X1):
- memop = MO_TESW;
- mnemonic = "ldnt2s_add";
- goto do_load_add;
- case OE_IM(LDNT2U_ADD, X1):
- memop = MO_TEUW;
- mnemonic = "ldnt2u_add";
- goto do_load_add;
- case OE_IM(LDNT4S_ADD, X1):
- memop = MO_TESL;
- mnemonic = "ldnt4s_add";
- goto do_load_add;
- case OE_IM(LDNT4U_ADD, X1):
- memop = MO_TEUL;
- mnemonic = "ldnt4u_add";
- goto do_load_add;
- case OE_IM(LDNT_ADD, X1):
- memop = MO_TEQ;
- mnemonic = "ldnt_add";
- goto do_load_add;
- case OE_IM(LD_ADD, X1):
- memop = MO_TEQ;
- mnemonic = "ld_add";
- do_load_add:
- if (!prefetch_nofault) {
- tcg_gen_qemu_ld_tl(tdest, tsrca, dc->mmuidx, memop);
- }
- tcg_gen_addi_tl(dest_gr(dc, srca), tsrca, imm);
- break;
- case OE_IM(LDNA_ADD, X1):
- tcg_gen_andi_tl(tdest, tsrca, ~7);
- tcg_gen_qemu_ld_tl(tdest, tdest, dc->mmuidx, MO_TEQ);
- tcg_gen_addi_tl(dest_gr(dc, srca), tsrca, imm);
- mnemonic = "ldna_add";
- break;
- case OE_IM(ORI, X0):
- case OE_IM(ORI, X1):
- tcg_gen_ori_tl(tdest, tsrca, imm);
- mnemonic = "ori";
- break;
- case OE_IM(V1ADDI, X0):
- case OE_IM(V1ADDI, X1):
- t0 = tcg_const_tl(V1_IMM(imm));
- gen_v12add(tdest, tsrca, t0, V1_IMM(0x80));
- tcg_temp_free(t0);
- mnemonic = "v1addi";
- break;
- case OE_IM(V1CMPEQI, X0):
- case OE_IM(V1CMPEQI, X1):
- tcg_gen_xori_tl(tdest, tsrca, V1_IMM(imm));
- gen_v1cmpeq0(tdest);
- mnemonic = "v1cmpeqi";
- break;
- case OE_IM(V1CMPLTSI, X0):
- case OE_IM(V1CMPLTSI, X1):
- case OE_IM(V1CMPLTUI, X0):
- case OE_IM(V1CMPLTUI, X1):
- case OE_IM(V1MAXUI, X0):
- case OE_IM(V1MAXUI, X1):
- case OE_IM(V1MINUI, X0):
- case OE_IM(V1MINUI, X1):
- return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
- case OE_IM(V2ADDI, X0):
- case OE_IM(V2ADDI, X1):
- t0 = tcg_const_tl(V2_IMM(imm));
- gen_v12add(tdest, tsrca, t0, V2_IMM(0x8000));
- tcg_temp_free(t0);
- mnemonic = "v2addi";
- break;
- case OE_IM(V2CMPEQI, X0):
- case OE_IM(V2CMPEQI, X1):
- case OE_IM(V2CMPLTSI, X0):
- case OE_IM(V2CMPLTSI, X1):
- case OE_IM(V2CMPLTUI, X0):
- case OE_IM(V2CMPLTUI, X1):
- case OE_IM(V2MAXSI, X0):
- case OE_IM(V2MAXSI, X1):
- case OE_IM(V2MINSI, X0):
- case OE_IM(V2MINSI, X1):
- return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
- case OE_IM(XORI, X0):
- case OE_IM(XORI, X1):
- tcg_gen_xori_tl(tdest, tsrca, imm);
- mnemonic = "xori";
- break;
-
- case OE_SH(ROTLI, X0):
- case OE_SH(ROTLI, X1):
- case OE_SH(ROTLI, Y0):
- case OE_SH(ROTLI, Y1):
- tcg_gen_rotli_tl(tdest, tsrca, imm);
- mnemonic = "rotli";
- break;
- case OE_SH(SHLI, X0):
- case OE_SH(SHLI, X1):
- case OE_SH(SHLI, Y0):
- case OE_SH(SHLI, Y1):
- tcg_gen_shli_tl(tdest, tsrca, imm);
- mnemonic = "shli";
- break;
- case OE_SH(SHLXI, X0):
- case OE_SH(SHLXI, X1):
- tcg_gen_shli_tl(tdest, tsrca, imm & 31);
- tcg_gen_ext32s_tl(tdest, tdest);
- mnemonic = "shlxi";
- break;
- case OE_SH(SHRSI, X0):
- case OE_SH(SHRSI, X1):
- case OE_SH(SHRSI, Y0):
- case OE_SH(SHRSI, Y1):
- tcg_gen_sari_tl(tdest, tsrca, imm);
- mnemonic = "shrsi";
- break;
- case OE_SH(SHRUI, X0):
- case OE_SH(SHRUI, X1):
- case OE_SH(SHRUI, Y0):
- case OE_SH(SHRUI, Y1):
- tcg_gen_shri_tl(tdest, tsrca, imm);
- mnemonic = "shrui";
- break;
- case OE_SH(SHRUXI, X0):
- case OE_SH(SHRUXI, X1):
- if ((imm & 31) == 0) {
- tcg_gen_ext32s_tl(tdest, tsrca);
- } else {
- tcg_gen_ext32u_tl(tdest, tsrca);
- tcg_gen_shri_tl(tdest, tdest, imm & 31);
- }
- mnemonic = "shlxi";
- break;
- case OE_SH(V1SHLI, X0):
- case OE_SH(V1SHLI, X1):
- i2 = imm & 7;
- i3 = 0xff >> i2;
- tcg_gen_andi_tl(tdest, tsrca, V1_IMM(i3));
- tcg_gen_shli_tl(tdest, tdest, i2);
- mnemonic = "v1shli";
- break;
- case OE_SH(V1SHRSI, X0):
- case OE_SH(V1SHRSI, X1):
- t0 = tcg_const_tl(imm & 7);
- gen_helper_v1shrs(tdest, tsrca, t0);
- tcg_temp_free(t0);
- mnemonic = "v1shrsi";
- break;
- case OE_SH(V1SHRUI, X0):
- case OE_SH(V1SHRUI, X1):
- i2 = imm & 7;
- i3 = (0xff << i2) & 0xff;
- tcg_gen_andi_tl(tdest, tsrca, V1_IMM(i3));
- tcg_gen_shri_tl(tdest, tdest, i2);
- mnemonic = "v1shrui";
- break;
- case OE_SH(V2SHLI, X0):
- case OE_SH(V2SHLI, X1):
- i2 = imm & 15;
- i3 = 0xffff >> i2;
- tcg_gen_andi_tl(tdest, tsrca, V2_IMM(i3));
- tcg_gen_shli_tl(tdest, tdest, i2);
- mnemonic = "v2shli";
- break;
- case OE_SH(V2SHRSI, X0):
- case OE_SH(V2SHRSI, X1):
- t0 = tcg_const_tl(imm & 15);
- gen_helper_v2shrs(tdest, tsrca, t0);
- tcg_temp_free(t0);
- mnemonic = "v2shrsi";
- break;
- case OE_SH(V2SHRUI, X0):
- case OE_SH(V2SHRUI, X1):
- i2 = imm & 15;
- i3 = (0xffff << i2) & 0xffff;
- tcg_gen_andi_tl(tdest, tsrca, V2_IMM(i3));
- tcg_gen_shri_tl(tdest, tdest, i2);
- mnemonic = "v2shrui";
- break;
-
- case OE(ADDLI_OPCODE_X0, 0, X0):
- case OE(ADDLI_OPCODE_X1, 0, X1):
- tcg_gen_addi_tl(tdest, tsrca, imm);
- mnemonic = "addli";
- break;
- case OE(ADDXLI_OPCODE_X0, 0, X0):
- case OE(ADDXLI_OPCODE_X1, 0, X1):
- tcg_gen_addi_tl(tdest, tsrca, imm);
- tcg_gen_ext32s_tl(tdest, tdest);
- mnemonic = "addxli";
- break;
- case OE(SHL16INSLI_OPCODE_X0, 0, X0):
- case OE(SHL16INSLI_OPCODE_X1, 0, X1):
- tcg_gen_shli_tl(tdest, tsrca, 16);
- tcg_gen_ori_tl(tdest, tdest, imm & 0xffff);
- mnemonic = "shl16insli";
- break;
-
- default:
- return TILEGX_EXCP_OPCODE_UNKNOWN;
- }
-
- qemu_log_mask(CPU_LOG_TB_IN_ASM, "%s %s, %s, %d", mnemonic,
- reg_names[dest], reg_names[srca], imm);
- return TILEGX_EXCP_NONE;
-}
-
-static TileExcp gen_bf_opcode_x0(DisasContext *dc, unsigned ext,
- unsigned dest, unsigned srca,
- unsigned bfs, unsigned bfe)
-{
- TCGv tdest = dest_gr(dc, dest);
- TCGv tsrca = load_gr(dc, srca);
- TCGv tsrcd;
- int len;
- const char *mnemonic;
-
- /* The bitfield is either between E and S inclusive,
- or up from S and down from E inclusive. */
- if (bfs <= bfe) {
- len = bfe - bfs + 1;
- } else {
- len = (64 - bfs) + (bfe + 1);
- }
-
- switch (ext) {
- case BFEXTU_BF_OPCODE_X0:
- if (bfs == 0 && bfe == 7) {
- tcg_gen_ext8u_tl(tdest, tsrca);
- } else if (bfs == 0 && bfe == 15) {
- tcg_gen_ext16u_tl(tdest, tsrca);
- } else if (bfs == 0 && bfe == 31) {
- tcg_gen_ext32u_tl(tdest, tsrca);
- } else {
- int rol = 63 - bfe;
- if (bfs <= bfe) {
- tcg_gen_shli_tl(tdest, tsrca, rol);
- } else {
- tcg_gen_rotli_tl(tdest, tsrca, rol);
- }
- tcg_gen_shri_tl(tdest, tdest, (bfs + rol) & 63);
- }
- mnemonic = "bfextu";
- break;
-
- case BFEXTS_BF_OPCODE_X0:
- if (bfs == 0 && bfe == 7) {
- tcg_gen_ext8s_tl(tdest, tsrca);
- } else if (bfs == 0 && bfe == 15) {
- tcg_gen_ext16s_tl(tdest, tsrca);
- } else if (bfs == 0 && bfe == 31) {
- tcg_gen_ext32s_tl(tdest, tsrca);
- } else {
- int rol = 63 - bfe;
- if (bfs <= bfe) {
- tcg_gen_shli_tl(tdest, tsrca, rol);
- } else {
- tcg_gen_rotli_tl(tdest, tsrca, rol);
- }
- tcg_gen_sari_tl(tdest, tdest, (bfs + rol) & 63);
- }
- mnemonic = "bfexts";
- break;
-
- case BFINS_BF_OPCODE_X0:
- tsrcd = load_gr(dc, dest);
- if (bfs <= bfe) {
- tcg_gen_deposit_tl(tdest, tsrcd, tsrca, bfs, len);
- } else {
- tcg_gen_rotri_tl(tdest, tsrcd, bfs);
- tcg_gen_deposit_tl(tdest, tdest, tsrca, 0, len);
- tcg_gen_rotli_tl(tdest, tdest, bfs);
- }
- mnemonic = "bfins";
- break;
-
- case MM_BF_OPCODE_X0:
- tsrcd = load_gr(dc, dest);
- if (bfs == 0) {
- tcg_gen_deposit_tl(tdest, tsrca, tsrcd, 0, len);
- } else {
- uint64_t mask = len == 64 ? -1 : rol64((1ULL << len) - 1, bfs);
- TCGv tmp = tcg_const_tl(mask);
-
- tcg_gen_and_tl(tdest, tsrcd, tmp);
- tcg_gen_andc_tl(tmp, tsrca, tmp);
- tcg_gen_or_tl(tdest, tdest, tmp);
- tcg_temp_free(tmp);
- }
- mnemonic = "mm";
- break;
-
- default:
- return TILEGX_EXCP_OPCODE_UNKNOWN;
- }
-
- qemu_log_mask(CPU_LOG_TB_IN_ASM, "%s %s, %s, %u, %u", mnemonic,
- reg_names[dest], reg_names[srca], bfs, bfe);
- return TILEGX_EXCP_NONE;
-}
-
-static TileExcp gen_branch_opcode_x1(DisasContext *dc, unsigned ext,
- unsigned srca, int off)
-{
- target_ulong tgt = dc->pc + off * TILEGX_BUNDLE_SIZE_IN_BYTES;
- const char *mnemonic;
-
- dc->jmp.dest = tcg_const_tl(tgt);
- dc->jmp.val1 = tcg_temp_new();
- tcg_gen_mov_tl(dc->jmp.val1, load_gr(dc, srca));
-
- /* Note that the "predict taken" opcodes have bit 0 clear.
- Therefore, fold the two cases together by setting bit 0. */
- switch (ext | 1) {
- case BEQZ_BRANCH_OPCODE_X1:
- dc->jmp.cond = TCG_COND_EQ;
- mnemonic = "beqz";
- break;
- case BNEZ_BRANCH_OPCODE_X1:
- dc->jmp.cond = TCG_COND_NE;
- mnemonic = "bnez";
- break;
- case BGEZ_BRANCH_OPCODE_X1:
- dc->jmp.cond = TCG_COND_GE;
- mnemonic = "bgez";
- break;
- case BGTZ_BRANCH_OPCODE_X1:
- dc->jmp.cond = TCG_COND_GT;
- mnemonic = "bgtz";
- break;
- case BLEZ_BRANCH_OPCODE_X1:
- dc->jmp.cond = TCG_COND_LE;
- mnemonic = "blez";
- break;
- case BLTZ_BRANCH_OPCODE_X1:
- dc->jmp.cond = TCG_COND_LT;
- mnemonic = "bltz";
- break;
- case BLBC_BRANCH_OPCODE_X1:
- dc->jmp.cond = TCG_COND_EQ;
- tcg_gen_andi_tl(dc->jmp.val1, dc->jmp.val1, 1);
- mnemonic = "blbc";
- break;
- case BLBS_BRANCH_OPCODE_X1:
- dc->jmp.cond = TCG_COND_NE;
- tcg_gen_andi_tl(dc->jmp.val1, dc->jmp.val1, 1);
- mnemonic = "blbs";
- break;
- default:
- return TILEGX_EXCP_OPCODE_UNKNOWN;
- }
-
- if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
- qemu_log("%s%s %s, " TARGET_FMT_lx " <%s>",
- mnemonic, ext & 1 ? "" : "t",
- reg_names[srca], tgt, lookup_symbol(tgt));
- }
- return TILEGX_EXCP_NONE;
-}
-
-static TileExcp gen_jump_opcode_x1(DisasContext *dc, unsigned ext, int off)
-{
- target_ulong tgt = dc->pc + off * TILEGX_BUNDLE_SIZE_IN_BYTES;
- const char *mnemonic = "j";
-
- /* The extension field is 1 bit, therefore we only have JAL and J. */
- if (ext == JAL_JUMP_OPCODE_X1) {
- tcg_gen_movi_tl(dest_gr(dc, TILEGX_R_LR),
- dc->pc + TILEGX_BUNDLE_SIZE_IN_BYTES);
- mnemonic = "jal";
- }
- dc->jmp.cond = TCG_COND_ALWAYS;
- dc->jmp.dest = tcg_const_tl(tgt);
-
- if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
- qemu_log("%s " TARGET_FMT_lx " <%s>",
- mnemonic, tgt, lookup_symbol(tgt));
- }
- return TILEGX_EXCP_NONE;
-}
-
-typedef struct {
- const char *name;
- intptr_t offset;
- void (*get)(TCGv, TCGv_ptr);
- void (*put)(TCGv_ptr, TCGv);
-} TileSPR;
-
-static const TileSPR *find_spr(unsigned spr)
-{
- /* Allow the compiler to construct the binary search tree. */
-#define D(N, O, G, P) \
- case SPR_##N: { static const TileSPR x = { #N, O, G, P }; return &x; }
-
- switch (spr) {
- D(CMPEXCH_VALUE,
- offsetof(CPUTLGState, spregs[TILEGX_SPR_CMPEXCH]), 0, 0)
- D(INTERRUPT_CRITICAL_SECTION,
- offsetof(CPUTLGState, spregs[TILEGX_SPR_CRITICAL_SEC]), 0, 0)
- D(SIM_CONTROL,
- offsetof(CPUTLGState, spregs[TILEGX_SPR_SIM_CONTROL]), 0, 0)
- D(EX_CONTEXT_0_0,
- offsetof(CPUTLGState, spregs[TILEGX_SPR_EX_CONTEXT_0_0]), 0, 0)
- D(EX_CONTEXT_0_1,
- offsetof(CPUTLGState, spregs[TILEGX_SPR_EX_CONTEXT_0_1]), 0, 0)
- }
-
-#undef D
-
- qemu_log_mask(LOG_UNIMP, "UNIMP SPR %u\n", spr);
- return NULL;
-}
-
-static TileExcp gen_mtspr_x1(DisasContext *dc, unsigned spr, unsigned srca)
-{
- const TileSPR *def = find_spr(spr);
- TCGv tsrca;
-
- if (def == NULL) {
- qemu_log_mask(CPU_LOG_TB_IN_ASM, "mtspr spr[%u], %s", spr, reg_names[srca]);
- return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
- }
-
- tsrca = load_gr(dc, srca);
- if (def->put) {
- def->put(cpu_env, tsrca);
- } else {
- tcg_gen_st_tl(tsrca, cpu_env, def->offset);
- }
- qemu_log_mask(CPU_LOG_TB_IN_ASM, "mtspr %s, %s", def->name, reg_names[srca]);
- return TILEGX_EXCP_NONE;
-}
-
-static TileExcp gen_mfspr_x1(DisasContext *dc, unsigned dest, unsigned spr)
-{
- const TileSPR *def = find_spr(spr);
- TCGv tdest;
-
- if (def == NULL) {
- qemu_log_mask(CPU_LOG_TB_IN_ASM, "mtspr %s, spr[%u]", reg_names[dest], spr);
- return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
- }
-
- tdest = dest_gr(dc, dest);
- if (def->get) {
- def->get(tdest, cpu_env);
- } else {
- tcg_gen_ld_tl(tdest, cpu_env, def->offset);
- }
- qemu_log_mask(CPU_LOG_TB_IN_ASM, "mfspr %s, %s", reg_names[dest], def->name);
- return TILEGX_EXCP_NONE;
-}
-
-static TileExcp decode_y0(DisasContext *dc, tilegx_bundle_bits bundle)
-{
- unsigned opc = get_Opcode_Y0(bundle);
- unsigned ext = get_RRROpcodeExtension_Y0(bundle);
- unsigned dest = get_Dest_Y0(bundle);
- unsigned srca = get_SrcA_Y0(bundle);
- unsigned srcb;
- int imm;
-
- switch (opc) {
- case RRR_1_OPCODE_Y0:
- if (ext == UNARY_RRR_1_OPCODE_Y0) {
- ext = get_UnaryOpcodeExtension_Y0(bundle);
- return gen_rr_opcode(dc, OE(opc, ext, Y0), dest, srca, bundle);
- }
- /* fallthru */
- case RRR_0_OPCODE_Y0:
- case RRR_2_OPCODE_Y0:
- case RRR_3_OPCODE_Y0:
- case RRR_4_OPCODE_Y0:
- case RRR_5_OPCODE_Y0:
- case RRR_6_OPCODE_Y0:
- case RRR_7_OPCODE_Y0:
- case RRR_8_OPCODE_Y0:
- case RRR_9_OPCODE_Y0:
- srcb = get_SrcB_Y0(bundle);
- return gen_rrr_opcode(dc, OE(opc, ext, Y0), dest, srca, srcb);
-
- case SHIFT_OPCODE_Y0:
- ext = get_ShiftOpcodeExtension_Y0(bundle);
- imm = get_ShAmt_Y0(bundle);
- return gen_rri_opcode(dc, OE(opc, ext, Y0), dest, srca, imm);
-
- case ADDI_OPCODE_Y0:
- case ADDXI_OPCODE_Y0:
- case ANDI_OPCODE_Y0:
- case CMPEQI_OPCODE_Y0:
- case CMPLTSI_OPCODE_Y0:
- imm = (int8_t)get_Imm8_Y0(bundle);
- return gen_rri_opcode(dc, OE(opc, 0, Y0), dest, srca, imm);
-
- default:
- return TILEGX_EXCP_OPCODE_UNKNOWN;
- }
-}
-
-static TileExcp decode_y1(DisasContext *dc, tilegx_bundle_bits bundle)
-{
- unsigned opc = get_Opcode_Y1(bundle);
- unsigned ext = get_RRROpcodeExtension_Y1(bundle);
- unsigned dest = get_Dest_Y1(bundle);
- unsigned srca = get_SrcA_Y1(bundle);
- unsigned srcb;
- int imm;
-
- switch (get_Opcode_Y1(bundle)) {
- case RRR_1_OPCODE_Y1:
- if (ext == UNARY_RRR_1_OPCODE_Y0) {
- ext = get_UnaryOpcodeExtension_Y1(bundle);
- return gen_rr_opcode(dc, OE(opc, ext, Y1), dest, srca, bundle);
- }
- /* fallthru */
- case RRR_0_OPCODE_Y1:
- case RRR_2_OPCODE_Y1:
- case RRR_3_OPCODE_Y1:
- case RRR_4_OPCODE_Y1:
- case RRR_5_OPCODE_Y1:
- case RRR_6_OPCODE_Y1:
- case RRR_7_OPCODE_Y1:
- srcb = get_SrcB_Y1(bundle);
- return gen_rrr_opcode(dc, OE(opc, ext, Y1), dest, srca, srcb);
-
- case SHIFT_OPCODE_Y1:
- ext = get_ShiftOpcodeExtension_Y1(bundle);
- imm = get_ShAmt_Y1(bundle);
- return gen_rri_opcode(dc, OE(opc, ext, Y1), dest, srca, imm);
-
- case ADDI_OPCODE_Y1:
- case ADDXI_OPCODE_Y1:
- case ANDI_OPCODE_Y1:
- case CMPEQI_OPCODE_Y1:
- case CMPLTSI_OPCODE_Y1:
- imm = (int8_t)get_Imm8_Y1(bundle);
- return gen_rri_opcode(dc, OE(opc, 0, Y1), dest, srca, imm);
-
- default:
- return TILEGX_EXCP_OPCODE_UNKNOWN;
- }
-}
-
-static TileExcp decode_y2(DisasContext *dc, tilegx_bundle_bits bundle)
-{
- unsigned mode = get_Mode(bundle);
- unsigned opc = get_Opcode_Y2(bundle);
- unsigned srca = get_SrcA_Y2(bundle);
- unsigned srcbdest = get_SrcBDest_Y2(bundle);
- const char *mnemonic;
- MemOp memop;
- bool prefetch_nofault = false;
-
- switch (OEY2(opc, mode)) {
- case OEY2(LD1S_OPCODE_Y2, MODE_OPCODE_YA2):
- memop = MO_SB;
- mnemonic = "ld1s"; /* prefetch_l1_fault */
- goto do_load;
- case OEY2(LD1U_OPCODE_Y2, MODE_OPCODE_YA2):
- memop = MO_UB;
- mnemonic = "ld1u"; /* prefetch, prefetch_l1 */
- prefetch_nofault = (srcbdest == TILEGX_R_ZERO);
- goto do_load;
- case OEY2(LD2S_OPCODE_Y2, MODE_OPCODE_YA2):
- memop = MO_TESW;
- mnemonic = "ld2s"; /* prefetch_l2_fault */
- goto do_load;
- case OEY2(LD2U_OPCODE_Y2, MODE_OPCODE_YA2):
- memop = MO_TEUW;
- mnemonic = "ld2u"; /* prefetch_l2 */
- prefetch_nofault = (srcbdest == TILEGX_R_ZERO);
- goto do_load;
- case OEY2(LD4S_OPCODE_Y2, MODE_OPCODE_YB2):
- memop = MO_TESL;
- mnemonic = "ld4s"; /* prefetch_l3_fault */
- goto do_load;
- case OEY2(LD4U_OPCODE_Y2, MODE_OPCODE_YB2):
- memop = MO_TEUL;
- mnemonic = "ld4u"; /* prefetch_l3 */
- prefetch_nofault = (srcbdest == TILEGX_R_ZERO);
- goto do_load;
- case OEY2(LD_OPCODE_Y2, MODE_OPCODE_YB2):
- memop = MO_TEQ;
- mnemonic = "ld";
- do_load:
- if (!prefetch_nofault) {
- tcg_gen_qemu_ld_tl(dest_gr(dc, srcbdest), load_gr(dc, srca),
- dc->mmuidx, memop);
- }
- qemu_log_mask(CPU_LOG_TB_IN_ASM, "%s %s, %s", mnemonic,
- reg_names[srcbdest], reg_names[srca]);
- return TILEGX_EXCP_NONE;
-
- case OEY2(ST1_OPCODE_Y2, MODE_OPCODE_YC2):
- return gen_st_opcode(dc, 0, srca, srcbdest, MO_UB, "st1");
- case OEY2(ST2_OPCODE_Y2, MODE_OPCODE_YC2):
- return gen_st_opcode(dc, 0, srca, srcbdest, MO_TEUW, "st2");
- case OEY2(ST4_OPCODE_Y2, MODE_OPCODE_YC2):
- return gen_st_opcode(dc, 0, srca, srcbdest, MO_TEUL, "st4");
- case OEY2(ST_OPCODE_Y2, MODE_OPCODE_YC2):
- return gen_st_opcode(dc, 0, srca, srcbdest, MO_TEQ, "st");
-
- default:
- return TILEGX_EXCP_OPCODE_UNKNOWN;
- }
-}
-
-static TileExcp decode_x0(DisasContext *dc, tilegx_bundle_bits bundle)
-{
- unsigned opc = get_Opcode_X0(bundle);
- unsigned dest = get_Dest_X0(bundle);
- unsigned srca = get_SrcA_X0(bundle);
- unsigned ext, srcb, bfs, bfe;
- int imm;
-
- switch (opc) {
- case RRR_0_OPCODE_X0:
- ext = get_RRROpcodeExtension_X0(bundle);
- if (ext == UNARY_RRR_0_OPCODE_X0) {
- ext = get_UnaryOpcodeExtension_X0(bundle);
- return gen_rr_opcode(dc, OE(opc, ext, X0), dest, srca, bundle);
- }
- srcb = get_SrcB_X0(bundle);
- return gen_rrr_opcode(dc, OE(opc, ext, X0), dest, srca, srcb);
-
- case SHIFT_OPCODE_X0:
- ext = get_ShiftOpcodeExtension_X0(bundle);
- imm = get_ShAmt_X0(bundle);
- return gen_rri_opcode(dc, OE(opc, ext, X0), dest, srca, imm);
-
- case IMM8_OPCODE_X0:
- ext = get_Imm8OpcodeExtension_X0(bundle);
- imm = (int8_t)get_Imm8_X0(bundle);
- return gen_rri_opcode(dc, OE(opc, ext, X0), dest, srca, imm);
-
- case BF_OPCODE_X0:
- ext = get_BFOpcodeExtension_X0(bundle);
- bfs = get_BFStart_X0(bundle);
- bfe = get_BFEnd_X0(bundle);
- return gen_bf_opcode_x0(dc, ext, dest, srca, bfs, bfe);
-
- case ADDLI_OPCODE_X0:
- case SHL16INSLI_OPCODE_X0:
- case ADDXLI_OPCODE_X0:
- imm = (int16_t)get_Imm16_X0(bundle);
- return gen_rri_opcode(dc, OE(opc, 0, X0), dest, srca, imm);
-
- default:
- return TILEGX_EXCP_OPCODE_UNKNOWN;
- }
-}
-
-static TileExcp decode_x1(DisasContext *dc, tilegx_bundle_bits bundle)
-{
- unsigned opc = get_Opcode_X1(bundle);
- unsigned dest = get_Dest_X1(bundle);
- unsigned srca = get_SrcA_X1(bundle);
- unsigned ext, srcb;
- int imm;
-
- switch (opc) {
- case RRR_0_OPCODE_X1:
- ext = get_RRROpcodeExtension_X1(bundle);
- srcb = get_SrcB_X1(bundle);
- switch (ext) {
- case UNARY_RRR_0_OPCODE_X1:
- ext = get_UnaryOpcodeExtension_X1(bundle);
- return gen_rr_opcode(dc, OE(opc, ext, X1), dest, srca, bundle);
- case ST1_RRR_0_OPCODE_X1:
- return gen_st_opcode(dc, dest, srca, srcb, MO_UB, "st1");
- case ST2_RRR_0_OPCODE_X1:
- return gen_st_opcode(dc, dest, srca, srcb, MO_TEUW, "st2");
- case ST4_RRR_0_OPCODE_X1:
- return gen_st_opcode(dc, dest, srca, srcb, MO_TEUL, "st4");
- case STNT1_RRR_0_OPCODE_X1:
- return gen_st_opcode(dc, dest, srca, srcb, MO_UB, "stnt1");
- case STNT2_RRR_0_OPCODE_X1:
- return gen_st_opcode(dc, dest, srca, srcb, MO_TEUW, "stnt2");
- case STNT4_RRR_0_OPCODE_X1:
- return gen_st_opcode(dc, dest, srca, srcb, MO_TEUL, "stnt4");
- case STNT_RRR_0_OPCODE_X1:
- return gen_st_opcode(dc, dest, srca, srcb, MO_TEQ, "stnt");
- case ST_RRR_0_OPCODE_X1:
- return gen_st_opcode(dc, dest, srca, srcb, MO_TEQ, "st");
- }
- return gen_rrr_opcode(dc, OE(opc, ext, X1), dest, srca, srcb);
-
- case SHIFT_OPCODE_X1:
- ext = get_ShiftOpcodeExtension_X1(bundle);
- imm = get_ShAmt_X1(bundle);
- return gen_rri_opcode(dc, OE(opc, ext, X1), dest, srca, imm);
-
- case IMM8_OPCODE_X1:
- ext = get_Imm8OpcodeExtension_X1(bundle);
- imm = (int8_t)get_Dest_Imm8_X1(bundle);
- srcb = get_SrcB_X1(bundle);
- switch (ext) {
- case ST1_ADD_IMM8_OPCODE_X1:
- return gen_st_add_opcode(dc, srca, srcb, imm, MO_UB, "st1_add");
- case ST2_ADD_IMM8_OPCODE_X1:
- return gen_st_add_opcode(dc, srca, srcb, imm, MO_TEUW, "st2_add");
- case ST4_ADD_IMM8_OPCODE_X1:
- return gen_st_add_opcode(dc, srca, srcb, imm, MO_TEUL, "st4_add");
- case STNT1_ADD_IMM8_OPCODE_X1:
- return gen_st_add_opcode(dc, srca, srcb, imm, MO_UB, "stnt1_add");
- case STNT2_ADD_IMM8_OPCODE_X1:
- return gen_st_add_opcode(dc, srca, srcb, imm, MO_TEUW, "stnt2_add");
- case STNT4_ADD_IMM8_OPCODE_X1:
- return gen_st_add_opcode(dc, srca, srcb, imm, MO_TEUL, "stnt4_add");
- case STNT_ADD_IMM8_OPCODE_X1:
- return gen_st_add_opcode(dc, srca, srcb, imm, MO_TEQ, "stnt_add");
- case ST_ADD_IMM8_OPCODE_X1:
- return gen_st_add_opcode(dc, srca, srcb, imm, MO_TEQ, "st_add");
- case MFSPR_IMM8_OPCODE_X1:
- return gen_mfspr_x1(dc, dest, get_MF_Imm14_X1(bundle));
- case MTSPR_IMM8_OPCODE_X1:
- return gen_mtspr_x1(dc, get_MT_Imm14_X1(bundle), srca);
- }
- imm = (int8_t)get_Imm8_X1(bundle);
- return gen_rri_opcode(dc, OE(opc, ext, X1), dest, srca, imm);
-
- case BRANCH_OPCODE_X1:
- ext = get_BrType_X1(bundle);
- imm = sextract32(get_BrOff_X1(bundle), 0, 17);
- return gen_branch_opcode_x1(dc, ext, srca, imm);
-
- case JUMP_OPCODE_X1:
- ext = get_JumpOpcodeExtension_X1(bundle);
- imm = sextract32(get_JumpOff_X1(bundle), 0, 27);
- return gen_jump_opcode_x1(dc, ext, imm);
-
- case ADDLI_OPCODE_X1:
- case SHL16INSLI_OPCODE_X1:
- case ADDXLI_OPCODE_X1:
- imm = (int16_t)get_Imm16_X1(bundle);
- return gen_rri_opcode(dc, OE(opc, 0, X1), dest, srca, imm);
-
- default:
- return TILEGX_EXCP_OPCODE_UNKNOWN;
- }
-}
-
-static void notice_excp(DisasContext *dc, uint64_t bundle,
- const char *type, TileExcp excp)
-{
- if (likely(excp == TILEGX_EXCP_NONE)) {
- return;
- }
- gen_exception(dc, excp);
- switch (excp) {
- case TILEGX_EXCP_OPCODE_UNIMPLEMENTED:
- qemu_log_mask(LOG_UNIMP, "UNIMP %s, [" FMT64X "]\n", type, bundle);
- break;
- case TILEGX_EXCP_OPCODE_UNKNOWN:
- qemu_log_mask(LOG_UNIMP, "UNKNOWN %s, [" FMT64X "]\n", type, bundle);
- break;
- default:
- break;
- }
-}
-
-static void translate_one_bundle(DisasContext *dc, uint64_t bundle)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(dc->wb); i++) {
- DisasContextTemp *wb = &dc->wb[i];
- wb->reg = TILEGX_R_NOREG;
- wb->val = NULL;
- }
- dc->num_wb = 0;
-
- qemu_log_mask(CPU_LOG_TB_IN_ASM, " %" PRIx64 ": { ", dc->pc);
- if (get_Mode(bundle)) {
- notice_excp(dc, bundle, "y0", decode_y0(dc, bundle));
- qemu_log_mask(CPU_LOG_TB_IN_ASM, " ; ");
- notice_excp(dc, bundle, "y1", decode_y1(dc, bundle));
- qemu_log_mask(CPU_LOG_TB_IN_ASM, " ; ");
- notice_excp(dc, bundle, "y2", decode_y2(dc, bundle));
- } else {
- notice_excp(dc, bundle, "x0", decode_x0(dc, bundle));
- qemu_log_mask(CPU_LOG_TB_IN_ASM, " ; ");
- notice_excp(dc, bundle, "x1", decode_x1(dc, bundle));
- }
- qemu_log_mask(CPU_LOG_TB_IN_ASM, " }\n");
-
- for (i = dc->num_wb - 1; i >= 0; --i) {
- DisasContextTemp *wb = &dc->wb[i];
- if (wb->reg < TILEGX_R_COUNT) {
- tcg_gen_mov_i64(cpu_regs[wb->reg], wb->val);
- }
- tcg_temp_free_i64(wb->val);
- }
-
- if (dc->jmp.cond != TCG_COND_NEVER) {
- if (dc->jmp.cond == TCG_COND_ALWAYS) {
- tcg_gen_mov_i64(cpu_pc, dc->jmp.dest);
- } else {
- TCGv next = tcg_const_i64(dc->pc + TILEGX_BUNDLE_SIZE_IN_BYTES);
- tcg_gen_movcond_i64(dc->jmp.cond, cpu_pc,
- dc->jmp.val1, load_zero(dc),
- dc->jmp.dest, next);
- tcg_temp_free_i64(dc->jmp.val1);
- tcg_temp_free_i64(next);
- }
- tcg_temp_free_i64(dc->jmp.dest);
- tcg_gen_exit_tb(NULL, 0);
- dc->exit_tb = true;
- } else if (dc->atomic_excp != TILEGX_EXCP_NONE) {
- gen_exception(dc, dc->atomic_excp);
- }
-}
-
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
-{
- CPUTLGState *env = cs->env_ptr;
- DisasContext ctx;
- DisasContext *dc = &ctx;
- uint64_t pc_start = tb->pc;
- uint64_t page_start = pc_start & TARGET_PAGE_MASK;
- int num_insns = 0;
-
- dc->pc = pc_start;
- dc->mmuidx = 0;
- dc->exit_tb = false;
- dc->atomic_excp = TILEGX_EXCP_NONE;
- dc->jmp.cond = TCG_COND_NEVER;
- dc->jmp.dest = NULL;
- dc->jmp.val1 = NULL;
- dc->zero = NULL;
-
- if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
- qemu_log("IN: %s\n", lookup_symbol(pc_start));
- }
- gen_tb_start(tb);
-
- while (1) {
- tcg_gen_insn_start(dc->pc);
- num_insns++;
-
- translate_one_bundle(dc, cpu_ldq_data(env, dc->pc));
-
- if (dc->exit_tb) {
- /* PC updated and EXIT_TB/GOTO_TB/exception emitted. */
- break;
- }
- dc->pc += TILEGX_BUNDLE_SIZE_IN_BYTES;
- if (num_insns >= max_insns
- || (dc->pc - page_start >= TARGET_PAGE_SIZE)
- || tcg_op_buf_full()) {
- /* Ending the TB due to TB size or page boundary. Set PC. */
- tcg_gen_movi_tl(cpu_pc, dc->pc);
- tcg_gen_exit_tb(NULL, 0);
- break;
- }
- }
-
- gen_tb_end(tb, num_insns);
- tb->size = dc->pc - pc_start;
- tb->icount = num_insns;
-}
-
-void restore_state_to_opc(CPUTLGState *env, TranslationBlock *tb,
- target_ulong *data)
-{
- env->pc = data[0];
-}
-
-void tilegx_tcg_init(void)
-{
- int i;
-
- cpu_pc = tcg_global_mem_new_i64(cpu_env, offsetof(CPUTLGState, pc), "pc");
- for (i = 0; i < TILEGX_R_COUNT; i++) {
- cpu_regs[i] = tcg_global_mem_new_i64(cpu_env,
- offsetof(CPUTLGState, regs[i]),
- reg_names[i]);
- }
-}
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
index 1376cdc404..fcaa5aface 100644
--- a/tcg/aarch64/tcg-target.c.inc
+++ b/tcg/aarch64/tcg-target.c.inc
@@ -519,6 +519,39 @@ typedef enum {
I3606_BIC = 0x2f001400,
I3606_ORR = 0x0f001400,
+ /* AdvSIMD scalar shift by immediate */
+ I3609_SSHR = 0x5f000400,
+ I3609_SSRA = 0x5f001400,
+ I3609_SHL = 0x5f005400,
+ I3609_USHR = 0x7f000400,
+ I3609_USRA = 0x7f001400,
+ I3609_SLI = 0x7f005400,
+
+ /* AdvSIMD scalar three same */
+ I3611_SQADD = 0x5e200c00,
+ I3611_SQSUB = 0x5e202c00,
+ I3611_CMGT = 0x5e203400,
+ I3611_CMGE = 0x5e203c00,
+ I3611_SSHL = 0x5e204400,
+ I3611_ADD = 0x5e208400,
+ I3611_CMTST = 0x5e208c00,
+ I3611_UQADD = 0x7e200c00,
+ I3611_UQSUB = 0x7e202c00,
+ I3611_CMHI = 0x7e203400,
+ I3611_CMHS = 0x7e203c00,
+ I3611_USHL = 0x7e204400,
+ I3611_SUB = 0x7e208400,
+ I3611_CMEQ = 0x7e208c00,
+
+ /* AdvSIMD scalar two-reg misc */
+ I3612_CMGT0 = 0x5e208800,
+ I3612_CMEQ0 = 0x5e209800,
+ I3612_CMLT0 = 0x5e20a800,
+ I3612_ABS = 0x5e20b800,
+ I3612_CMGE0 = 0x7e208800,
+ I3612_CMLE0 = 0x7e209800,
+ I3612_NEG = 0x7e20b800,
+
/* AdvSIMD shift by immediate */
I3614_SSHR = 0x0f000400,
I3614_SSRA = 0x0f001400,
@@ -561,7 +594,7 @@ typedef enum {
I3617_CMEQ0 = 0x0e209800,
I3617_CMLT0 = 0x0e20a800,
I3617_CMGE0 = 0x2e208800,
- I3617_CMLE0 = 0x2e20a800,
+ I3617_CMLE0 = 0x2e209800,
I3617_NOT = 0x2e205800,
I3617_ABS = 0x0e20b800,
I3617_NEG = 0x2e20b800,
@@ -735,6 +768,25 @@ static void tcg_out_insn_3606(TCGContext *s, AArch64Insn insn, bool q,
| (imm8 & 0xe0) << (16 - 5) | (imm8 & 0x1f) << 5);
}
+static void tcg_out_insn_3609(TCGContext *s, AArch64Insn insn,
+ TCGReg rd, TCGReg rn, unsigned immhb)
+{
+ tcg_out32(s, insn | immhb << 16 | (rn & 0x1f) << 5 | (rd & 0x1f));
+}
+
+static void tcg_out_insn_3611(TCGContext *s, AArch64Insn insn,
+ unsigned size, TCGReg rd, TCGReg rn, TCGReg rm)
+{
+ tcg_out32(s, insn | (size << 22) | (rm & 0x1f) << 16
+ | (rn & 0x1f) << 5 | (rd & 0x1f));
+}
+
+static void tcg_out_insn_3612(TCGContext *s, AArch64Insn insn,
+ unsigned size, TCGReg rd, TCGReg rn)
+{
+ tcg_out32(s, insn | (size << 22) | (rn & 0x1f) << 5 | (rd & 0x1f));
+}
+
static void tcg_out_insn_3614(TCGContext *s, AArch64Insn insn, bool q,
TCGReg rd, TCGReg rn, unsigned immhb)
{
@@ -1410,10 +1462,10 @@ static void tcg_out_addsubi(TCGContext *s, int ext, TCGReg rd,
}
}
-static inline void tcg_out_addsub2(TCGContext *s, TCGType ext, TCGReg rl,
- TCGReg rh, TCGReg al, TCGReg ah,
- tcg_target_long bl, tcg_target_long bh,
- bool const_bl, bool const_bh, bool sub)
+static void tcg_out_addsub2(TCGContext *s, TCGType ext, TCGReg rl,
+ TCGReg rh, TCGReg al, TCGReg ah,
+ tcg_target_long bl, tcg_target_long bh,
+ bool const_bl, bool const_bh, bool sub)
{
TCGReg orig_rl = rl;
AArch64Insn insn;
@@ -1423,11 +1475,13 @@ static inline void tcg_out_addsub2(TCGContext *s, TCGType ext, TCGReg rl,
}
if (const_bl) {
- insn = I3401_ADDSI;
- if ((bl < 0) ^ sub) {
- insn = I3401_SUBSI;
+ if (bl < 0) {
bl = -bl;
+ insn = sub ? I3401_ADDSI : I3401_SUBSI;
+ } else {
+ insn = sub ? I3401_SUBSI : I3401_ADDSI;
}
+
if (unlikely(al == TCG_REG_XZR)) {
/* ??? We want to allow al to be zero for the benefit of
negation via subtraction. However, that leaves open the
@@ -2234,23 +2288,38 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
unsigned vecl, unsigned vece,
const TCGArg *args, const int *const_args)
{
- static const AArch64Insn cmp_insn[16] = {
+ static const AArch64Insn cmp_vec_insn[16] = {
[TCG_COND_EQ] = I3616_CMEQ,
[TCG_COND_GT] = I3616_CMGT,
[TCG_COND_GE] = I3616_CMGE,
[TCG_COND_GTU] = I3616_CMHI,
[TCG_COND_GEU] = I3616_CMHS,
};
- static const AArch64Insn cmp0_insn[16] = {
+ static const AArch64Insn cmp_scalar_insn[16] = {
+ [TCG_COND_EQ] = I3611_CMEQ,
+ [TCG_COND_GT] = I3611_CMGT,
+ [TCG_COND_GE] = I3611_CMGE,
+ [TCG_COND_GTU] = I3611_CMHI,
+ [TCG_COND_GEU] = I3611_CMHS,
+ };
+ static const AArch64Insn cmp0_vec_insn[16] = {
[TCG_COND_EQ] = I3617_CMEQ0,
[TCG_COND_GT] = I3617_CMGT0,
[TCG_COND_GE] = I3617_CMGE0,
[TCG_COND_LT] = I3617_CMLT0,
[TCG_COND_LE] = I3617_CMLE0,
};
+ static const AArch64Insn cmp0_scalar_insn[16] = {
+ [TCG_COND_EQ] = I3612_CMEQ0,
+ [TCG_COND_GT] = I3612_CMGT0,
+ [TCG_COND_GE] = I3612_CMGE0,
+ [TCG_COND_LT] = I3612_CMLT0,
+ [TCG_COND_LE] = I3612_CMLE0,
+ };
TCGType type = vecl + TCG_TYPE_V64;
unsigned is_q = vecl;
+ bool is_scalar = !is_q && vece == MO_64;
TCGArg a0, a1, a2, a3;
int cmode, imm8;
@@ -2269,19 +2338,35 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
break;
case INDEX_op_add_vec:
- tcg_out_insn(s, 3616, ADD, is_q, vece, a0, a1, a2);
+ if (is_scalar) {
+ tcg_out_insn(s, 3611, ADD, vece, a0, a1, a2);
+ } else {
+ tcg_out_insn(s, 3616, ADD, is_q, vece, a0, a1, a2);
+ }
break;
case INDEX_op_sub_vec:
- tcg_out_insn(s, 3616, SUB, is_q, vece, a0, a1, a2);
+ if (is_scalar) {
+ tcg_out_insn(s, 3611, SUB, vece, a0, a1, a2);
+ } else {
+ tcg_out_insn(s, 3616, SUB, is_q, vece, a0, a1, a2);
+ }
break;
case INDEX_op_mul_vec:
tcg_out_insn(s, 3616, MUL, is_q, vece, a0, a1, a2);
break;
case INDEX_op_neg_vec:
- tcg_out_insn(s, 3617, NEG, is_q, vece, a0, a1);
+ if (is_scalar) {
+ tcg_out_insn(s, 3612, NEG, vece, a0, a1);
+ } else {
+ tcg_out_insn(s, 3617, NEG, is_q, vece, a0, a1);
+ }
break;
case INDEX_op_abs_vec:
- tcg_out_insn(s, 3617, ABS, is_q, vece, a0, a1);
+ if (is_scalar) {
+ tcg_out_insn(s, 3612, ABS, vece, a0, a1);
+ } else {
+ tcg_out_insn(s, 3617, ABS, is_q, vece, a0, a1);
+ }
break;
case INDEX_op_and_vec:
if (const_args[2]) {
@@ -2335,16 +2420,32 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
tcg_out_insn(s, 3616, EOR, is_q, 0, a0, a1, a2);
break;
case INDEX_op_ssadd_vec:
- tcg_out_insn(s, 3616, SQADD, is_q, vece, a0, a1, a2);
+ if (is_scalar) {
+ tcg_out_insn(s, 3611, SQADD, vece, a0, a1, a2);
+ } else {
+ tcg_out_insn(s, 3616, SQADD, is_q, vece, a0, a1, a2);
+ }
break;
case INDEX_op_sssub_vec:
- tcg_out_insn(s, 3616, SQSUB, is_q, vece, a0, a1, a2);
+ if (is_scalar) {
+ tcg_out_insn(s, 3611, SQSUB, vece, a0, a1, a2);
+ } else {
+ tcg_out_insn(s, 3616, SQSUB, is_q, vece, a0, a1, a2);
+ }
break;
case INDEX_op_usadd_vec:
- tcg_out_insn(s, 3616, UQADD, is_q, vece, a0, a1, a2);
+ if (is_scalar) {
+ tcg_out_insn(s, 3611, UQADD, vece, a0, a1, a2);
+ } else {
+ tcg_out_insn(s, 3616, UQADD, is_q, vece, a0, a1, a2);
+ }
break;
case INDEX_op_ussub_vec:
- tcg_out_insn(s, 3616, UQSUB, is_q, vece, a0, a1, a2);
+ if (is_scalar) {
+ tcg_out_insn(s, 3611, UQSUB, vece, a0, a1, a2);
+ } else {
+ tcg_out_insn(s, 3616, UQSUB, is_q, vece, a0, a1, a2);
+ }
break;
case INDEX_op_smax_vec:
tcg_out_insn(s, 3616, SMAX, is_q, vece, a0, a1, a2);
@@ -2362,22 +2463,46 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
tcg_out_insn(s, 3617, NOT, is_q, 0, a0, a1);
break;
case INDEX_op_shli_vec:
- tcg_out_insn(s, 3614, SHL, is_q, a0, a1, a2 + (8 << vece));
+ if (is_scalar) {
+ tcg_out_insn(s, 3609, SHL, a0, a1, a2 + (8 << vece));
+ } else {
+ tcg_out_insn(s, 3614, SHL, is_q, a0, a1, a2 + (8 << vece));
+ }
break;
case INDEX_op_shri_vec:
- tcg_out_insn(s, 3614, USHR, is_q, a0, a1, (16 << vece) - a2);
+ if (is_scalar) {
+ tcg_out_insn(s, 3609, USHR, a0, a1, (16 << vece) - a2);
+ } else {
+ tcg_out_insn(s, 3614, USHR, is_q, a0, a1, (16 << vece) - a2);
+ }
break;
case INDEX_op_sari_vec:
- tcg_out_insn(s, 3614, SSHR, is_q, a0, a1, (16 << vece) - a2);
+ if (is_scalar) {
+ tcg_out_insn(s, 3609, SSHR, a0, a1, (16 << vece) - a2);
+ } else {
+ tcg_out_insn(s, 3614, SSHR, is_q, a0, a1, (16 << vece) - a2);
+ }
break;
case INDEX_op_aa64_sli_vec:
- tcg_out_insn(s, 3614, SLI, is_q, a0, a2, args[3] + (8 << vece));
+ if (is_scalar) {
+ tcg_out_insn(s, 3609, SLI, a0, a2, args[3] + (8 << vece));
+ } else {
+ tcg_out_insn(s, 3614, SLI, is_q, a0, a2, args[3] + (8 << vece));
+ }
break;
case INDEX_op_shlv_vec:
- tcg_out_insn(s, 3616, USHL, is_q, vece, a0, a1, a2);
+ if (is_scalar) {
+ tcg_out_insn(s, 3611, USHL, vece, a0, a1, a2);
+ } else {
+ tcg_out_insn(s, 3616, USHL, is_q, vece, a0, a1, a2);
+ }
break;
case INDEX_op_aa64_sshl_vec:
- tcg_out_insn(s, 3616, SSHL, is_q, vece, a0, a1, a2);
+ if (is_scalar) {
+ tcg_out_insn(s, 3611, SSHL, vece, a0, a1, a2);
+ } else {
+ tcg_out_insn(s, 3616, SSHL, is_q, vece, a0, a1, a2);
+ }
break;
case INDEX_op_cmp_vec:
{
@@ -2386,30 +2511,58 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
if (cond == TCG_COND_NE) {
if (const_args[2]) {
- tcg_out_insn(s, 3616, CMTST, is_q, vece, a0, a1, a1);
+ if (is_scalar) {
+ tcg_out_insn(s, 3611, CMTST, vece, a0, a1, a1);
+ } else {
+ tcg_out_insn(s, 3616, CMTST, is_q, vece, a0, a1, a1);
+ }
} else {
- tcg_out_insn(s, 3616, CMEQ, is_q, vece, a0, a1, a2);
+ if (is_scalar) {
+ tcg_out_insn(s, 3611, CMEQ, vece, a0, a1, a2);
+ } else {
+ tcg_out_insn(s, 3616, CMEQ, is_q, vece, a0, a1, a2);
+ }
tcg_out_insn(s, 3617, NOT, is_q, 0, a0, a0);
}
} else {
if (const_args[2]) {
- insn = cmp0_insn[cond];
- if (insn) {
- tcg_out_insn_3617(s, insn, is_q, vece, a0, a1);
- break;
+ if (is_scalar) {
+ insn = cmp0_scalar_insn[cond];
+ if (insn) {
+ tcg_out_insn_3612(s, insn, vece, a0, a1);
+ break;
+ }
+ } else {
+ insn = cmp0_vec_insn[cond];
+ if (insn) {
+ tcg_out_insn_3617(s, insn, is_q, vece, a0, a1);
+ break;
+ }
}
tcg_out_dupi_vec(s, type, MO_8, TCG_VEC_TMP, 0);
a2 = TCG_VEC_TMP;
}
- insn = cmp_insn[cond];
- if (insn == 0) {
- TCGArg t;
- t = a1, a1 = a2, a2 = t;
- cond = tcg_swap_cond(cond);
- insn = cmp_insn[cond];
- tcg_debug_assert(insn != 0);
+ if (is_scalar) {
+ insn = cmp_scalar_insn[cond];
+ if (insn == 0) {
+ TCGArg t;
+ t = a1, a1 = a2, a2 = t;
+ cond = tcg_swap_cond(cond);
+ insn = cmp_scalar_insn[cond];
+ tcg_debug_assert(insn != 0);
+ }
+ tcg_out_insn_3611(s, insn, vece, a0, a1, a2);
+ } else {
+ insn = cmp_vec_insn[cond];
+ if (insn == 0) {
+ TCGArg t;
+ t = a1, a1 = a2, a2 = t;
+ cond = tcg_swap_cond(cond);
+ insn = cmp_vec_insn[cond];
+ tcg_debug_assert(insn != 0);
+ }
+ tcg_out_insn_3616(s, insn, is_q, vece, a0, a1, a2);
}
- tcg_out_insn_3616(s, insn, is_q, vece, a0, a1, a2);
}
}
break;
diff --git a/tcg/tcg.c b/tcg/tcg.c
index 63a12b197b..2991112829 100644
--- a/tcg/tcg.c
+++ b/tcg/tcg.c
@@ -346,6 +346,12 @@ static void set_jmp_reset_offset(TCGContext *s, int which)
s->tb_jmp_reset_offset[which] = tcg_current_code_size(s);
}
+/* Signal overflow, starting over with fewer guest insns. */
+static void QEMU_NORETURN tcg_raise_tb_overflow(TCGContext *s)
+{
+ siglongjmp(s->jmp_trans, -2);
+}
+
#define C_PFX1(P, A) P##A
#define C_PFX2(P, A, B) P##A##_##B
#define C_PFX3(P, A, B, C) P##A##_##B##_##C
@@ -507,11 +513,21 @@ static void tcg_region_trees_init(void)
}
}
-static struct tcg_region_tree *tc_ptr_to_region_tree(const void *cp)
+static struct tcg_region_tree *tc_ptr_to_region_tree(const void *p)
{
- void *p = tcg_splitwx_to_rw(cp);
size_t region_idx;
+ /*
+ * Like tcg_splitwx_to_rw, with no assert. The pc may come from
+ * a signal handler over which the caller has no control.
+ */
+ if (!in_code_gen_buffer(p)) {
+ p -= tcg_splitwx_diff;
+ if (!in_code_gen_buffer(p)) {
+ return NULL;
+ }
+ }
+
if (p < region.start_aligned) {
region_idx = 0;
} else {
@@ -530,6 +546,7 @@ void tcg_tb_insert(TranslationBlock *tb)
{
struct tcg_region_tree *rt = tc_ptr_to_region_tree(tb->tc.ptr);
+ g_assert(rt != NULL);
qemu_mutex_lock(&rt->lock);
g_tree_insert(rt->tree, &tb->tc, tb);
qemu_mutex_unlock(&rt->lock);
@@ -539,6 +556,7 @@ void tcg_tb_remove(TranslationBlock *tb)
{
struct tcg_region_tree *rt = tc_ptr_to_region_tree(tb->tc.ptr);
+ g_assert(rt != NULL);
qemu_mutex_lock(&rt->lock);
g_tree_remove(rt->tree, &tb->tc);
qemu_mutex_unlock(&rt->lock);
@@ -555,6 +573,10 @@ TranslationBlock *tcg_tb_lookup(uintptr_t tc_ptr)
TranslationBlock *tb;
struct tb_tc s = { .ptr = (void *)tc_ptr };
+ if (rt == NULL) {
+ return NULL;
+ }
+
qemu_mutex_lock(&rt->lock);
tb = g_tree_lookup(rt->tree, &s);
qemu_mutex_unlock(&rt->lock);
@@ -1310,8 +1332,7 @@ static TCGTemp *tcg_temp_alloc(TCGContext *s)
int n = s->nb_temps++;
if (n >= TCG_MAX_TEMPS) {
- /* Signal overflow, starting over with fewer guest insns. */
- siglongjmp(s->jmp_trans, -2);
+ tcg_raise_tb_overflow(s);
}
return memset(&s->temps[n], 0, sizeof(TCGTemp));
}
diff --git a/tcg/tci.c b/tcg/tci.c
index fb3c97aaf1..3ccd30c39c 100644
--- a/tcg/tci.c
+++ b/tcg/tci.c
@@ -57,49 +57,6 @@ static tcg_target_ulong tci_read_reg(const tcg_target_ulong *regs, TCGReg index)
return regs[index];
}
-#if TCG_TARGET_HAS_ext8s_i32 || TCG_TARGET_HAS_ext8s_i64
-static int8_t tci_read_reg8s(const tcg_target_ulong *regs, TCGReg index)
-{
- return (int8_t)tci_read_reg(regs, index);
-}
-#endif
-
-#if TCG_TARGET_HAS_ext16s_i32 || TCG_TARGET_HAS_ext16s_i64
-static int16_t tci_read_reg16s(const tcg_target_ulong *regs, TCGReg index)
-{
- return (int16_t)tci_read_reg(regs, index);
-}
-#endif
-
-#if TCG_TARGET_REG_BITS == 64
-static int32_t tci_read_reg32s(const tcg_target_ulong *regs, TCGReg index)
-{
- return (int32_t)tci_read_reg(regs, index);
-}
-#endif
-
-static uint8_t tci_read_reg8(const tcg_target_ulong *regs, TCGReg index)
-{
- return (uint8_t)tci_read_reg(regs, index);
-}
-
-static uint16_t tci_read_reg16(const tcg_target_ulong *regs, TCGReg index)
-{
- return (uint16_t)tci_read_reg(regs, index);
-}
-
-static uint32_t tci_read_reg32(const tcg_target_ulong *regs, TCGReg index)
-{
- return (uint32_t)tci_read_reg(regs, index);
-}
-
-#if TCG_TARGET_REG_BITS == 64
-static uint64_t tci_read_reg64(const tcg_target_ulong *regs, TCGReg index)
-{
- return tci_read_reg(regs, index);
-}
-#endif
-
static void
tci_write_reg(tcg_target_ulong *regs, TCGReg index, tcg_target_ulong value)
{
@@ -169,78 +126,20 @@ tci_read_r(const tcg_target_ulong *regs, const uint8_t **tb_ptr)
return value;
}
-/* Read indexed register (8 bit) from bytecode. */
-static uint8_t tci_read_r8(const tcg_target_ulong *regs, const uint8_t **tb_ptr)
-{
- uint8_t value = tci_read_reg8(regs, **tb_ptr);
- *tb_ptr += 1;
- return value;
-}
-
-#if TCG_TARGET_HAS_ext8s_i32 || TCG_TARGET_HAS_ext8s_i64
-/* Read indexed register (8 bit signed) from bytecode. */
-static int8_t tci_read_r8s(const tcg_target_ulong *regs, const uint8_t **tb_ptr)
-{
- int8_t value = tci_read_reg8s(regs, **tb_ptr);
- *tb_ptr += 1;
- return value;
-}
-#endif
-
-/* Read indexed register (16 bit) from bytecode. */
-static uint16_t tci_read_r16(const tcg_target_ulong *regs,
- const uint8_t **tb_ptr)
-{
- uint16_t value = tci_read_reg16(regs, **tb_ptr);
- *tb_ptr += 1;
- return value;
-}
-
-#if TCG_TARGET_HAS_ext16s_i32 || TCG_TARGET_HAS_ext16s_i64
-/* Read indexed register (16 bit signed) from bytecode. */
-static int16_t tci_read_r16s(const tcg_target_ulong *regs,
- const uint8_t **tb_ptr)
-{
- int16_t value = tci_read_reg16s(regs, **tb_ptr);
- *tb_ptr += 1;
- return value;
-}
-#endif
-
-/* Read indexed register (32 bit) from bytecode. */
-static uint32_t tci_read_r32(const tcg_target_ulong *regs,
- const uint8_t **tb_ptr)
-{
- uint32_t value = tci_read_reg32(regs, **tb_ptr);
- *tb_ptr += 1;
- return value;
-}
-
#if TCG_TARGET_REG_BITS == 32
/* Read two indexed registers (2 * 32 bit) from bytecode. */
static uint64_t tci_read_r64(const tcg_target_ulong *regs,
const uint8_t **tb_ptr)
{
- uint32_t low = tci_read_r32(regs, tb_ptr);
- return tci_uint64(tci_read_r32(regs, tb_ptr), low);
+ uint32_t low = tci_read_r(regs, tb_ptr);
+ return tci_uint64(tci_read_r(regs, tb_ptr), low);
}
#elif TCG_TARGET_REG_BITS == 64
-/* Read indexed register (32 bit signed) from bytecode. */
-static int32_t tci_read_r32s(const tcg_target_ulong *regs,
- const uint8_t **tb_ptr)
-{
- int32_t value = tci_read_reg32s(regs, **tb_ptr);
- *tb_ptr += 1;
- return value;
-}
-
/* Read indexed register (64 bit) from bytecode. */
static uint64_t tci_read_r64(const tcg_target_ulong *regs,
const uint8_t **tb_ptr)
{
- uint64_t value = tci_read_reg64(regs, **tb_ptr);
- *tb_ptr += 1;
- return value;
+ return tci_read_r(regs, tb_ptr);
}
#endif
@@ -346,51 +245,34 @@ static bool tci_compare64(uint64_t u0, uint64_t u1, TCGCond condition)
return result;
}
-#ifdef CONFIG_SOFTMMU
-# define qemu_ld_ub \
- helper_ret_ldub_mmu(env, taddr, oi, (uintptr_t)tb_ptr)
-# define qemu_ld_leuw \
- helper_le_lduw_mmu(env, taddr, oi, (uintptr_t)tb_ptr)
-# define qemu_ld_leul \
- helper_le_ldul_mmu(env, taddr, oi, (uintptr_t)tb_ptr)
-# define qemu_ld_leq \
- helper_le_ldq_mmu(env, taddr, oi, (uintptr_t)tb_ptr)
-# define qemu_ld_beuw \
- helper_be_lduw_mmu(env, taddr, oi, (uintptr_t)tb_ptr)
-# define qemu_ld_beul \
- helper_be_ldul_mmu(env, taddr, oi, (uintptr_t)tb_ptr)
-# define qemu_ld_beq \
- helper_be_ldq_mmu(env, taddr, oi, (uintptr_t)tb_ptr)
-# define qemu_st_b(X) \
- helper_ret_stb_mmu(env, taddr, X, oi, (uintptr_t)tb_ptr)
-# define qemu_st_lew(X) \
- helper_le_stw_mmu(env, taddr, X, oi, (uintptr_t)tb_ptr)
-# define qemu_st_lel(X) \
- helper_le_stl_mmu(env, taddr, X, oi, (uintptr_t)tb_ptr)
-# define qemu_st_leq(X) \
- helper_le_stq_mmu(env, taddr, X, oi, (uintptr_t)tb_ptr)
-# define qemu_st_bew(X) \
- helper_be_stw_mmu(env, taddr, X, oi, (uintptr_t)tb_ptr)
-# define qemu_st_bel(X) \
- helper_be_stl_mmu(env, taddr, X, oi, (uintptr_t)tb_ptr)
-# define qemu_st_beq(X) \
- helper_be_stq_mmu(env, taddr, X, oi, (uintptr_t)tb_ptr)
-#else
-# define qemu_ld_ub ldub_p(g2h(taddr))
-# define qemu_ld_leuw lduw_le_p(g2h(taddr))
-# define qemu_ld_leul (uint32_t)ldl_le_p(g2h(taddr))
-# define qemu_ld_leq ldq_le_p(g2h(taddr))
-# define qemu_ld_beuw lduw_be_p(g2h(taddr))
-# define qemu_ld_beul (uint32_t)ldl_be_p(g2h(taddr))
-# define qemu_ld_beq ldq_be_p(g2h(taddr))
-# define qemu_st_b(X) stb_p(g2h(taddr), X)
-# define qemu_st_lew(X) stw_le_p(g2h(taddr), X)
-# define qemu_st_lel(X) stl_le_p(g2h(taddr), X)
-# define qemu_st_leq(X) stq_le_p(g2h(taddr), X)
-# define qemu_st_bew(X) stw_be_p(g2h(taddr), X)
-# define qemu_st_bel(X) stl_be_p(g2h(taddr), X)
-# define qemu_st_beq(X) stq_be_p(g2h(taddr), X)
-#endif
+#define qemu_ld_ub \
+ cpu_ldub_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr)
+#define qemu_ld_leuw \
+ cpu_lduw_le_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr)
+#define qemu_ld_leul \
+ cpu_ldl_le_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr)
+#define qemu_ld_leq \
+ cpu_ldq_le_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr)
+#define qemu_ld_beuw \
+ cpu_lduw_be_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr)
+#define qemu_ld_beul \
+ cpu_ldl_be_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr)
+#define qemu_ld_beq \
+ cpu_ldq_be_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr)
+#define qemu_st_b(X) \
+ cpu_stb_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr)
+#define qemu_st_lew(X) \
+ cpu_stw_le_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr)
+#define qemu_st_lel(X) \
+ cpu_stl_le_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr)
+#define qemu_st_leq(X) \
+ cpu_stq_le_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr)
+#define qemu_st_bew(X) \
+ cpu_stw_be_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr)
+#define qemu_st_bel(X) \
+ cpu_stl_be_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr)
+#define qemu_st_beq(X) \
+ cpu_stq_be_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr)
#if TCG_TARGET_REG_BITS == 64
# define CASE_32_64(x) \
@@ -483,8 +365,8 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
continue;
case INDEX_op_setcond_i32:
t0 = *tb_ptr++;
- t1 = tci_read_r32(regs, &tb_ptr);
- t2 = tci_read_r32(regs, &tb_ptr);
+ t1 = tci_read_r(regs, &tb_ptr);
+ t2 = tci_read_r(regs, &tb_ptr);
condition = *tb_ptr++;
tci_write_reg(regs, t0, tci_compare32(t1, t2, condition));
break;
@@ -499,15 +381,15 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
#elif TCG_TARGET_REG_BITS == 64
case INDEX_op_setcond_i64:
t0 = *tb_ptr++;
- t1 = tci_read_r64(regs, &tb_ptr);
- t2 = tci_read_r64(regs, &tb_ptr);
+ t1 = tci_read_r(regs, &tb_ptr);
+ t2 = tci_read_r(regs, &tb_ptr);
condition = *tb_ptr++;
tci_write_reg(regs, t0, tci_compare64(t1, t2, condition));
break;
#endif
- case INDEX_op_mov_i32:
+ CASE_32_64(mov)
t0 = *tb_ptr++;
- t1 = tci_read_r32(regs, &tb_ptr);
+ t1 = tci_read_r(regs, &tb_ptr);
tci_write_reg(regs, t0, t1);
break;
case INDEX_op_tci_movi_i32:
@@ -550,127 +432,130 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
tci_write_reg(regs, t0, *(uint32_t *)(t1 + t2));
break;
CASE_32_64(st8)
- t0 = tci_read_r8(regs, &tb_ptr);
+ t0 = tci_read_r(regs, &tb_ptr);
t1 = tci_read_r(regs, &tb_ptr);
t2 = tci_read_s32(&tb_ptr);
*(uint8_t *)(t1 + t2) = t0;
break;
CASE_32_64(st16)
- t0 = tci_read_r16(regs, &tb_ptr);
+ t0 = tci_read_r(regs, &tb_ptr);
t1 = tci_read_r(regs, &tb_ptr);
t2 = tci_read_s32(&tb_ptr);
*(uint16_t *)(t1 + t2) = t0;
break;
case INDEX_op_st_i32:
CASE_64(st32)
- t0 = tci_read_r32(regs, &tb_ptr);
+ t0 = tci_read_r(regs, &tb_ptr);
t1 = tci_read_r(regs, &tb_ptr);
t2 = tci_read_s32(&tb_ptr);
*(uint32_t *)(t1 + t2) = t0;
break;
- /* Arithmetic operations (32 bit). */
+ /* Arithmetic operations (mixed 32/64 bit). */
- case INDEX_op_add_i32:
+ CASE_32_64(add)
t0 = *tb_ptr++;
- t1 = tci_read_r32(regs, &tb_ptr);
- t2 = tci_read_r32(regs, &tb_ptr);
+ t1 = tci_read_r(regs, &tb_ptr);
+ t2 = tci_read_r(regs, &tb_ptr);
tci_write_reg(regs, t0, t1 + t2);
break;
- case INDEX_op_sub_i32:
+ CASE_32_64(sub)
t0 = *tb_ptr++;
- t1 = tci_read_r32(regs, &tb_ptr);
- t2 = tci_read_r32(regs, &tb_ptr);
+ t1 = tci_read_r(regs, &tb_ptr);
+ t2 = tci_read_r(regs, &tb_ptr);
tci_write_reg(regs, t0, t1 - t2);
break;
- case INDEX_op_mul_i32:
+ CASE_32_64(mul)
t0 = *tb_ptr++;
- t1 = tci_read_r32(regs, &tb_ptr);
- t2 = tci_read_r32(regs, &tb_ptr);
+ t1 = tci_read_r(regs, &tb_ptr);
+ t2 = tci_read_r(regs, &tb_ptr);
tci_write_reg(regs, t0, t1 * t2);
break;
- case INDEX_op_div_i32:
+ CASE_32_64(and)
t0 = *tb_ptr++;
- t1 = tci_read_r32(regs, &tb_ptr);
- t2 = tci_read_r32(regs, &tb_ptr);
- tci_write_reg(regs, t0, (int32_t)t1 / (int32_t)t2);
+ t1 = tci_read_r(regs, &tb_ptr);
+ t2 = tci_read_r(regs, &tb_ptr);
+ tci_write_reg(regs, t0, t1 & t2);
break;
- case INDEX_op_divu_i32:
+ CASE_32_64(or)
t0 = *tb_ptr++;
- t1 = tci_read_r32(regs, &tb_ptr);
- t2 = tci_read_r32(regs, &tb_ptr);
- tci_write_reg(regs, t0, t1 / t2);
+ t1 = tci_read_r(regs, &tb_ptr);
+ t2 = tci_read_r(regs, &tb_ptr);
+ tci_write_reg(regs, t0, t1 | t2);
break;
- case INDEX_op_rem_i32:
+ CASE_32_64(xor)
t0 = *tb_ptr++;
- t1 = tci_read_r32(regs, &tb_ptr);
- t2 = tci_read_r32(regs, &tb_ptr);
- tci_write_reg(regs, t0, (int32_t)t1 % (int32_t)t2);
+ t1 = tci_read_r(regs, &tb_ptr);
+ t2 = tci_read_r(regs, &tb_ptr);
+ tci_write_reg(regs, t0, t1 ^ t2);
break;
- case INDEX_op_remu_i32:
+
+ /* Arithmetic operations (32 bit). */
+
+ case INDEX_op_div_i32:
t0 = *tb_ptr++;
- t1 = tci_read_r32(regs, &tb_ptr);
- t2 = tci_read_r32(regs, &tb_ptr);
- tci_write_reg(regs, t0, t1 % t2);
+ t1 = tci_read_r(regs, &tb_ptr);
+ t2 = tci_read_r(regs, &tb_ptr);
+ tci_write_reg(regs, t0, (int32_t)t1 / (int32_t)t2);
break;
- case INDEX_op_and_i32:
+ case INDEX_op_divu_i32:
t0 = *tb_ptr++;
- t1 = tci_read_r32(regs, &tb_ptr);
- t2 = tci_read_r32(regs, &tb_ptr);
- tci_write_reg(regs, t0, t1 & t2);
+ t1 = tci_read_r(regs, &tb_ptr);
+ t2 = tci_read_r(regs, &tb_ptr);
+ tci_write_reg(regs, t0, (uint32_t)t1 / (uint32_t)t2);
break;
- case INDEX_op_or_i32:
+ case INDEX_op_rem_i32:
t0 = *tb_ptr++;
- t1 = tci_read_r32(regs, &tb_ptr);
- t2 = tci_read_r32(regs, &tb_ptr);
- tci_write_reg(regs, t0, t1 | t2);
+ t1 = tci_read_r(regs, &tb_ptr);
+ t2 = tci_read_r(regs, &tb_ptr);
+ tci_write_reg(regs, t0, (int32_t)t1 % (int32_t)t2);
break;
- case INDEX_op_xor_i32:
+ case INDEX_op_remu_i32:
t0 = *tb_ptr++;
- t1 = tci_read_r32(regs, &tb_ptr);
- t2 = tci_read_r32(regs, &tb_ptr);
- tci_write_reg(regs, t0, t1 ^ t2);
+ t1 = tci_read_r(regs, &tb_ptr);
+ t2 = tci_read_r(regs, &tb_ptr);
+ tci_write_reg(regs, t0, (uint32_t)t1 % (uint32_t)t2);
break;
/* Shift/rotate operations (32 bit). */
case INDEX_op_shl_i32:
t0 = *tb_ptr++;
- t1 = tci_read_r32(regs, &tb_ptr);
- t2 = tci_read_r32(regs, &tb_ptr);
- tci_write_reg(regs, t0, t1 << (t2 & 31));
+ t1 = tci_read_r(regs, &tb_ptr);
+ t2 = tci_read_r(regs, &tb_ptr);
+ tci_write_reg(regs, t0, (uint32_t)t1 << (t2 & 31));
break;
case INDEX_op_shr_i32:
t0 = *tb_ptr++;
- t1 = tci_read_r32(regs, &tb_ptr);
- t2 = tci_read_r32(regs, &tb_ptr);
- tci_write_reg(regs, t0, t1 >> (t2 & 31));
+ t1 = tci_read_r(regs, &tb_ptr);
+ t2 = tci_read_r(regs, &tb_ptr);
+ tci_write_reg(regs, t0, (uint32_t)t1 >> (t2 & 31));
break;
case INDEX_op_sar_i32:
t0 = *tb_ptr++;
- t1 = tci_read_r32(regs, &tb_ptr);
- t2 = tci_read_r32(regs, &tb_ptr);
- tci_write_reg(regs, t0, ((int32_t)t1 >> (t2 & 31)));
+ t1 = tci_read_r(regs, &tb_ptr);
+ t2 = tci_read_r(regs, &tb_ptr);
+ tci_write_reg(regs, t0, (int32_t)t1 >> (t2 & 31));
break;
#if TCG_TARGET_HAS_rot_i32
case INDEX_op_rotl_i32:
t0 = *tb_ptr++;
- t1 = tci_read_r32(regs, &tb_ptr);
- t2 = tci_read_r32(regs, &tb_ptr);
+ t1 = tci_read_r(regs, &tb_ptr);
+ t2 = tci_read_r(regs, &tb_ptr);
tci_write_reg(regs, t0, rol32(t1, t2 & 31));
break;
case INDEX_op_rotr_i32:
t0 = *tb_ptr++;
- t1 = tci_read_r32(regs, &tb_ptr);
- t2 = tci_read_r32(regs, &tb_ptr);
+ t1 = tci_read_r(regs, &tb_ptr);
+ t2 = tci_read_r(regs, &tb_ptr);
tci_write_reg(regs, t0, ror32(t1, t2 & 31));
break;
#endif
#if TCG_TARGET_HAS_deposit_i32
case INDEX_op_deposit_i32:
t0 = *tb_ptr++;
- t1 = tci_read_r32(regs, &tb_ptr);
- t2 = tci_read_r32(regs, &tb_ptr);
+ t1 = tci_read_r(regs, &tb_ptr);
+ t2 = tci_read_r(regs, &tb_ptr);
tmp16 = *tb_ptr++;
tmp8 = *tb_ptr++;
tmp32 = (((1 << tmp8) - 1) << tmp16);
@@ -678,8 +563,8 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
break;
#endif
case INDEX_op_brcond_i32:
- t0 = tci_read_r32(regs, &tb_ptr);
- t1 = tci_read_r32(regs, &tb_ptr);
+ t0 = tci_read_r(regs, &tb_ptr);
+ t1 = tci_read_r(regs, &tb_ptr);
condition = *tb_ptr++;
label = tci_read_label(&tb_ptr);
if (tci_compare32(t0, t1, condition)) {
@@ -717,73 +602,68 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
case INDEX_op_mulu2_i32:
t0 = *tb_ptr++;
t1 = *tb_ptr++;
- t2 = tci_read_r32(regs, &tb_ptr);
- tmp64 = tci_read_r32(regs, &tb_ptr);
- tci_write_reg64(regs, t1, t0, t2 * tmp64);
+ t2 = tci_read_r(regs, &tb_ptr);
+ tmp64 = (uint32_t)tci_read_r(regs, &tb_ptr);
+ tci_write_reg64(regs, t1, t0, (uint32_t)t2 * tmp64);
break;
#endif /* TCG_TARGET_REG_BITS == 32 */
-#if TCG_TARGET_HAS_ext8s_i32
- case INDEX_op_ext8s_i32:
+#if TCG_TARGET_HAS_ext8s_i32 || TCG_TARGET_HAS_ext8s_i64
+ CASE_32_64(ext8s)
t0 = *tb_ptr++;
- t1 = tci_read_r8s(regs, &tb_ptr);
- tci_write_reg(regs, t0, t1);
+ t1 = tci_read_r(regs, &tb_ptr);
+ tci_write_reg(regs, t0, (int8_t)t1);
break;
#endif
-#if TCG_TARGET_HAS_ext16s_i32
- case INDEX_op_ext16s_i32:
+#if TCG_TARGET_HAS_ext16s_i32 || TCG_TARGET_HAS_ext16s_i64
+ CASE_32_64(ext16s)
t0 = *tb_ptr++;
- t1 = tci_read_r16s(regs, &tb_ptr);
- tci_write_reg(regs, t0, t1);
+ t1 = tci_read_r(regs, &tb_ptr);
+ tci_write_reg(regs, t0, (int16_t)t1);
break;
#endif
-#if TCG_TARGET_HAS_ext8u_i32
- case INDEX_op_ext8u_i32:
+#if TCG_TARGET_HAS_ext8u_i32 || TCG_TARGET_HAS_ext8u_i64
+ CASE_32_64(ext8u)
t0 = *tb_ptr++;
- t1 = tci_read_r8(regs, &tb_ptr);
- tci_write_reg(regs, t0, t1);
+ t1 = tci_read_r(regs, &tb_ptr);
+ tci_write_reg(regs, t0, (uint8_t)t1);
break;
#endif
-#if TCG_TARGET_HAS_ext16u_i32
- case INDEX_op_ext16u_i32:
+#if TCG_TARGET_HAS_ext16u_i32 || TCG_TARGET_HAS_ext16u_i64
+ CASE_32_64(ext16u)
t0 = *tb_ptr++;
- t1 = tci_read_r16(regs, &tb_ptr);
- tci_write_reg(regs, t0, t1);
+ t1 = tci_read_r(regs, &tb_ptr);
+ tci_write_reg(regs, t0, (uint16_t)t1);
break;
#endif
-#if TCG_TARGET_HAS_bswap16_i32
- case INDEX_op_bswap16_i32:
+#if TCG_TARGET_HAS_bswap16_i32 || TCG_TARGET_HAS_bswap16_i64
+ CASE_32_64(bswap16)
t0 = *tb_ptr++;
- t1 = tci_read_r16(regs, &tb_ptr);
+ t1 = tci_read_r(regs, &tb_ptr);
tci_write_reg(regs, t0, bswap16(t1));
break;
#endif
-#if TCG_TARGET_HAS_bswap32_i32
- case INDEX_op_bswap32_i32:
+#if TCG_TARGET_HAS_bswap32_i32 || TCG_TARGET_HAS_bswap32_i64
+ CASE_32_64(bswap32)
t0 = *tb_ptr++;
- t1 = tci_read_r32(regs, &tb_ptr);
+ t1 = tci_read_r(regs, &tb_ptr);
tci_write_reg(regs, t0, bswap32(t1));
break;
#endif
-#if TCG_TARGET_HAS_not_i32
- case INDEX_op_not_i32:
+#if TCG_TARGET_HAS_not_i32 || TCG_TARGET_HAS_not_i64
+ CASE_32_64(not)
t0 = *tb_ptr++;
- t1 = tci_read_r32(regs, &tb_ptr);
+ t1 = tci_read_r(regs, &tb_ptr);
tci_write_reg(regs, t0, ~t1);
break;
#endif
-#if TCG_TARGET_HAS_neg_i32
- case INDEX_op_neg_i32:
+#if TCG_TARGET_HAS_neg_i32 || TCG_TARGET_HAS_neg_i64
+ CASE_32_64(neg)
t0 = *tb_ptr++;
- t1 = tci_read_r32(regs, &tb_ptr);
+ t1 = tci_read_r(regs, &tb_ptr);
tci_write_reg(regs, t0, -t1);
break;
#endif
#if TCG_TARGET_REG_BITS == 64
- case INDEX_op_mov_i64:
- t0 = *tb_ptr++;
- t1 = tci_read_r64(regs, &tb_ptr);
- tci_write_reg(regs, t0, t1);
- break;
case INDEX_op_tci_movi_i64:
t0 = *tb_ptr++;
t1 = tci_read_i64(&tb_ptr);
@@ -805,7 +685,7 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
tci_write_reg(regs, t0, *(uint64_t *)(t1 + t2));
break;
case INDEX_op_st_i64:
- t0 = tci_read_r64(regs, &tb_ptr);
+ t0 = tci_read_r(regs, &tb_ptr);
t1 = tci_read_r(regs, &tb_ptr);
t2 = tci_read_s32(&tb_ptr);
*(uint64_t *)(t1 + t2) = t0;
@@ -813,106 +693,70 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
/* Arithmetic operations (64 bit). */
- case INDEX_op_add_i64:
- t0 = *tb_ptr++;
- t1 = tci_read_r64(regs, &tb_ptr);
- t2 = tci_read_r64(regs, &tb_ptr);
- tci_write_reg(regs, t0, t1 + t2);
- break;
- case INDEX_op_sub_i64:
- t0 = *tb_ptr++;
- t1 = tci_read_r64(regs, &tb_ptr);
- t2 = tci_read_r64(regs, &tb_ptr);
- tci_write_reg(regs, t0, t1 - t2);
- break;
- case INDEX_op_mul_i64:
- t0 = *tb_ptr++;
- t1 = tci_read_r64(regs, &tb_ptr);
- t2 = tci_read_r64(regs, &tb_ptr);
- tci_write_reg(regs, t0, t1 * t2);
- break;
case INDEX_op_div_i64:
t0 = *tb_ptr++;
- t1 = tci_read_r64(regs, &tb_ptr);
- t2 = tci_read_r64(regs, &tb_ptr);
+ t1 = tci_read_r(regs, &tb_ptr);
+ t2 = tci_read_r(regs, &tb_ptr);
tci_write_reg(regs, t0, (int64_t)t1 / (int64_t)t2);
break;
case INDEX_op_divu_i64:
t0 = *tb_ptr++;
- t1 = tci_read_r64(regs, &tb_ptr);
- t2 = tci_read_r64(regs, &tb_ptr);
+ t1 = tci_read_r(regs, &tb_ptr);
+ t2 = tci_read_r(regs, &tb_ptr);
tci_write_reg(regs, t0, (uint64_t)t1 / (uint64_t)t2);
break;
case INDEX_op_rem_i64:
t0 = *tb_ptr++;
- t1 = tci_read_r64(regs, &tb_ptr);
- t2 = tci_read_r64(regs, &tb_ptr);
+ t1 = tci_read_r(regs, &tb_ptr);
+ t2 = tci_read_r(regs, &tb_ptr);
tci_write_reg(regs, t0, (int64_t)t1 % (int64_t)t2);
break;
case INDEX_op_remu_i64:
t0 = *tb_ptr++;
- t1 = tci_read_r64(regs, &tb_ptr);
- t2 = tci_read_r64(regs, &tb_ptr);
+ t1 = tci_read_r(regs, &tb_ptr);
+ t2 = tci_read_r(regs, &tb_ptr);
tci_write_reg(regs, t0, (uint64_t)t1 % (uint64_t)t2);
break;
- case INDEX_op_and_i64:
- t0 = *tb_ptr++;
- t1 = tci_read_r64(regs, &tb_ptr);
- t2 = tci_read_r64(regs, &tb_ptr);
- tci_write_reg(regs, t0, t1 & t2);
- break;
- case INDEX_op_or_i64:
- t0 = *tb_ptr++;
- t1 = tci_read_r64(regs, &tb_ptr);
- t2 = tci_read_r64(regs, &tb_ptr);
- tci_write_reg(regs, t0, t1 | t2);
- break;
- case INDEX_op_xor_i64:
- t0 = *tb_ptr++;
- t1 = tci_read_r64(regs, &tb_ptr);
- t2 = tci_read_r64(regs, &tb_ptr);
- tci_write_reg(regs, t0, t1 ^ t2);
- break;
/* Shift/rotate operations (64 bit). */
case INDEX_op_shl_i64:
t0 = *tb_ptr++;
- t1 = tci_read_r64(regs, &tb_ptr);
- t2 = tci_read_r64(regs, &tb_ptr);
+ t1 = tci_read_r(regs, &tb_ptr);
+ t2 = tci_read_r(regs, &tb_ptr);
tci_write_reg(regs, t0, t1 << (t2 & 63));
break;
case INDEX_op_shr_i64:
t0 = *tb_ptr++;
- t1 = tci_read_r64(regs, &tb_ptr);
- t2 = tci_read_r64(regs, &tb_ptr);
+ t1 = tci_read_r(regs, &tb_ptr);
+ t2 = tci_read_r(regs, &tb_ptr);
tci_write_reg(regs, t0, t1 >> (t2 & 63));
break;
case INDEX_op_sar_i64:
t0 = *tb_ptr++;
- t1 = tci_read_r64(regs, &tb_ptr);
- t2 = tci_read_r64(regs, &tb_ptr);
+ t1 = tci_read_r(regs, &tb_ptr);
+ t2 = tci_read_r(regs, &tb_ptr);
tci_write_reg(regs, t0, ((int64_t)t1 >> (t2 & 63)));
break;
#if TCG_TARGET_HAS_rot_i64
case INDEX_op_rotl_i64:
t0 = *tb_ptr++;
- t1 = tci_read_r64(regs, &tb_ptr);
- t2 = tci_read_r64(regs, &tb_ptr);
+ t1 = tci_read_r(regs, &tb_ptr);
+ t2 = tci_read_r(regs, &tb_ptr);
tci_write_reg(regs, t0, rol64(t1, t2 & 63));
break;
case INDEX_op_rotr_i64:
t0 = *tb_ptr++;
- t1 = tci_read_r64(regs, &tb_ptr);
- t2 = tci_read_r64(regs, &tb_ptr);
+ t1 = tci_read_r(regs, &tb_ptr);
+ t2 = tci_read_r(regs, &tb_ptr);
tci_write_reg(regs, t0, ror64(t1, t2 & 63));
break;
#endif
#if TCG_TARGET_HAS_deposit_i64
case INDEX_op_deposit_i64:
t0 = *tb_ptr++;
- t1 = tci_read_r64(regs, &tb_ptr);
- t2 = tci_read_r64(regs, &tb_ptr);
+ t1 = tci_read_r(regs, &tb_ptr);
+ t2 = tci_read_r(regs, &tb_ptr);
tmp16 = *tb_ptr++;
tmp8 = *tb_ptr++;
tmp64 = (((1ULL << tmp8) - 1) << tmp16);
@@ -920,8 +764,8 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
break;
#endif
case INDEX_op_brcond_i64:
- t0 = tci_read_r64(regs, &tb_ptr);
- t1 = tci_read_r64(regs, &tb_ptr);
+ t0 = tci_read_r(regs, &tb_ptr);
+ t1 = tci_read_r(regs, &tb_ptr);
condition = *tb_ptr++;
label = tci_read_label(&tb_ptr);
if (tci_compare64(t0, t1, condition)) {
@@ -930,85 +774,29 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
continue;
}
break;
-#if TCG_TARGET_HAS_ext8u_i64
- case INDEX_op_ext8u_i64:
- t0 = *tb_ptr++;
- t1 = tci_read_r8(regs, &tb_ptr);
- tci_write_reg(regs, t0, t1);
- break;
-#endif
-#if TCG_TARGET_HAS_ext8s_i64
- case INDEX_op_ext8s_i64:
- t0 = *tb_ptr++;
- t1 = tci_read_r8s(regs, &tb_ptr);
- tci_write_reg(regs, t0, t1);
- break;
-#endif
-#if TCG_TARGET_HAS_ext16s_i64
- case INDEX_op_ext16s_i64:
- t0 = *tb_ptr++;
- t1 = tci_read_r16s(regs, &tb_ptr);
- tci_write_reg(regs, t0, t1);
- break;
-#endif
-#if TCG_TARGET_HAS_ext16u_i64
- case INDEX_op_ext16u_i64:
- t0 = *tb_ptr++;
- t1 = tci_read_r16(regs, &tb_ptr);
- tci_write_reg(regs, t0, t1);
- break;
-#endif
#if TCG_TARGET_HAS_ext32s_i64
case INDEX_op_ext32s_i64:
#endif
case INDEX_op_ext_i32_i64:
t0 = *tb_ptr++;
- t1 = tci_read_r32s(regs, &tb_ptr);
- tci_write_reg(regs, t0, t1);
+ t1 = tci_read_r(regs, &tb_ptr);
+ tci_write_reg(regs, t0, (int32_t)t1);
break;
#if TCG_TARGET_HAS_ext32u_i64
case INDEX_op_ext32u_i64:
#endif
case INDEX_op_extu_i32_i64:
t0 = *tb_ptr++;
- t1 = tci_read_r32(regs, &tb_ptr);
- tci_write_reg(regs, t0, t1);
- break;
-#if TCG_TARGET_HAS_bswap16_i64
- case INDEX_op_bswap16_i64:
- t0 = *tb_ptr++;
- t1 = tci_read_r16(regs, &tb_ptr);
- tci_write_reg(regs, t0, bswap16(t1));
- break;
-#endif
-#if TCG_TARGET_HAS_bswap32_i64
- case INDEX_op_bswap32_i64:
- t0 = *tb_ptr++;
- t1 = tci_read_r32(regs, &tb_ptr);
- tci_write_reg(regs, t0, bswap32(t1));
+ t1 = tci_read_r(regs, &tb_ptr);
+ tci_write_reg(regs, t0, (uint32_t)t1);
break;
-#endif
#if TCG_TARGET_HAS_bswap64_i64
case INDEX_op_bswap64_i64:
t0 = *tb_ptr++;
- t1 = tci_read_r64(regs, &tb_ptr);
+ t1 = tci_read_r(regs, &tb_ptr);
tci_write_reg(regs, t0, bswap64(t1));
break;
#endif
-#if TCG_TARGET_HAS_not_i64
- case INDEX_op_not_i64:
- t0 = *tb_ptr++;
- t1 = tci_read_r64(regs, &tb_ptr);
- tci_write_reg(regs, t0, ~t1);
- break;
-#endif
-#if TCG_TARGET_HAS_neg_i64
- case INDEX_op_neg_i64:
- t0 = *tb_ptr++;
- t1 = tci_read_r64(regs, &tb_ptr);
- tci_write_reg(regs, t0, -t1);
- break;
-#endif
#endif /* TCG_TARGET_REG_BITS == 64 */
/* QEMU specific operations. */
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
index feac4659cc..c79f9c32d8 100644
--- a/tcg/tci/tcg-target.c.inc
+++ b/tcg/tci/tcg-target.c.inc
@@ -380,6 +380,18 @@ static inline void tcg_out_call(TCGContext *s, const tcg_insn_unit *arg)
old_code_ptr[1] = s->code_ptr - old_code_ptr;
}
+#if TCG_TARGET_REG_BITS == 64
+# define CASE_32_64(x) \
+ case glue(glue(INDEX_op_, x), _i64): \
+ case glue(glue(INDEX_op_, x), _i32):
+# define CASE_64(x) \
+ case glue(glue(INDEX_op_, x), _i64):
+#else
+# define CASE_32_64(x) \
+ case glue(glue(INDEX_op_, x), _i32):
+# define CASE_64(x)
+#endif
+
static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
const int *const_args)
{
@@ -391,6 +403,7 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
case INDEX_op_exit_tb:
tcg_out64(s, args[0]);
break;
+
case INDEX_op_goto_tb:
if (s->tb_jmp_insn_offset) {
/* Direct jump method. */
@@ -404,15 +417,18 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
}
set_jmp_reset_offset(s, args[0]);
break;
+
case INDEX_op_br:
tci_out_label(s, arg_label(args[0]));
break;
- case INDEX_op_setcond_i32:
+
+ CASE_32_64(setcond)
tcg_out_r(s, args[0]);
tcg_out_r(s, args[1]);
tcg_out_r(s, args[2]);
tcg_out8(s, args[3]); /* condition */
break;
+
#if TCG_TARGET_REG_BITS == 32
case INDEX_op_setcond2_i32:
/* setcond2_i32 cond, t0, t1_low, t1_high, t2_low, t2_high */
@@ -423,95 +439,54 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
tcg_out_r(s, args[4]);
tcg_out8(s, args[5]); /* condition */
break;
-#elif TCG_TARGET_REG_BITS == 64
- case INDEX_op_setcond_i64:
- tcg_out_r(s, args[0]);
- tcg_out_r(s, args[1]);
- tcg_out_r(s, args[2]);
- tcg_out8(s, args[3]); /* condition */
- break;
#endif
- case INDEX_op_ld8u_i32:
- case INDEX_op_ld8s_i32:
- case INDEX_op_ld16u_i32:
- case INDEX_op_ld16s_i32:
+
+ CASE_32_64(ld8u)
+ CASE_32_64(ld8s)
+ CASE_32_64(ld16u)
+ CASE_32_64(ld16s)
case INDEX_op_ld_i32:
- case INDEX_op_st8_i32:
- case INDEX_op_st16_i32:
+ CASE_64(ld32u)
+ CASE_64(ld32s)
+ CASE_64(ld)
+ CASE_32_64(st8)
+ CASE_32_64(st16)
case INDEX_op_st_i32:
- case INDEX_op_ld8u_i64:
- case INDEX_op_ld8s_i64:
- case INDEX_op_ld16u_i64:
- case INDEX_op_ld16s_i64:
- case INDEX_op_ld32u_i64:
- case INDEX_op_ld32s_i64:
- case INDEX_op_ld_i64:
- case INDEX_op_st8_i64:
- case INDEX_op_st16_i64:
- case INDEX_op_st32_i64:
- case INDEX_op_st_i64:
+ CASE_64(st32)
+ CASE_64(st)
stack_bounds_check(args[1], args[2]);
tcg_out_r(s, args[0]);
tcg_out_r(s, args[1]);
tcg_debug_assert(args[2] == (int32_t)args[2]);
tcg_out32(s, args[2]);
break;
- case INDEX_op_add_i32:
- case INDEX_op_sub_i32:
- case INDEX_op_mul_i32:
- case INDEX_op_and_i32:
- case INDEX_op_andc_i32: /* Optional (TCG_TARGET_HAS_andc_i32). */
- case INDEX_op_eqv_i32: /* Optional (TCG_TARGET_HAS_eqv_i32). */
- case INDEX_op_nand_i32: /* Optional (TCG_TARGET_HAS_nand_i32). */
- case INDEX_op_nor_i32: /* Optional (TCG_TARGET_HAS_nor_i32). */
- case INDEX_op_or_i32:
- case INDEX_op_orc_i32: /* Optional (TCG_TARGET_HAS_orc_i32). */
- case INDEX_op_xor_i32:
- case INDEX_op_shl_i32:
- case INDEX_op_shr_i32:
- case INDEX_op_sar_i32:
- case INDEX_op_rotl_i32: /* Optional (TCG_TARGET_HAS_rot_i32). */
- case INDEX_op_rotr_i32: /* Optional (TCG_TARGET_HAS_rot_i32). */
- tcg_out_r(s, args[0]);
- tcg_out_r(s, args[1]);
- tcg_out_r(s, args[2]);
- break;
- case INDEX_op_deposit_i32: /* Optional (TCG_TARGET_HAS_deposit_i32). */
- tcg_out_r(s, args[0]);
- tcg_out_r(s, args[1]);
- tcg_out_r(s, args[2]);
- tcg_debug_assert(args[3] <= UINT8_MAX);
- tcg_out8(s, args[3]);
- tcg_debug_assert(args[4] <= UINT8_MAX);
- tcg_out8(s, args[4]);
- break;
-#if TCG_TARGET_REG_BITS == 64
- case INDEX_op_add_i64:
- case INDEX_op_sub_i64:
- case INDEX_op_mul_i64:
- case INDEX_op_and_i64:
- case INDEX_op_andc_i64: /* Optional (TCG_TARGET_HAS_andc_i64). */
- case INDEX_op_eqv_i64: /* Optional (TCG_TARGET_HAS_eqv_i64). */
- case INDEX_op_nand_i64: /* Optional (TCG_TARGET_HAS_nand_i64). */
- case INDEX_op_nor_i64: /* Optional (TCG_TARGET_HAS_nor_i64). */
- case INDEX_op_or_i64:
- case INDEX_op_orc_i64: /* Optional (TCG_TARGET_HAS_orc_i64). */
- case INDEX_op_xor_i64:
- case INDEX_op_shl_i64:
- case INDEX_op_shr_i64:
- case INDEX_op_sar_i64:
- case INDEX_op_rotl_i64: /* Optional (TCG_TARGET_HAS_rot_i64). */
- case INDEX_op_rotr_i64: /* Optional (TCG_TARGET_HAS_rot_i64). */
- case INDEX_op_div_i64: /* Optional (TCG_TARGET_HAS_div_i64). */
- case INDEX_op_divu_i64: /* Optional (TCG_TARGET_HAS_div_i64). */
- case INDEX_op_rem_i64: /* Optional (TCG_TARGET_HAS_div_i64). */
- case INDEX_op_remu_i64: /* Optional (TCG_TARGET_HAS_div_i64). */
+ CASE_32_64(add)
+ CASE_32_64(sub)
+ CASE_32_64(mul)
+ CASE_32_64(and)
+ CASE_32_64(or)
+ CASE_32_64(xor)
+ CASE_32_64(andc) /* Optional (TCG_TARGET_HAS_andc_*). */
+ CASE_32_64(orc) /* Optional (TCG_TARGET_HAS_orc_*). */
+ CASE_32_64(eqv) /* Optional (TCG_TARGET_HAS_eqv_*). */
+ CASE_32_64(nand) /* Optional (TCG_TARGET_HAS_nand_*). */
+ CASE_32_64(nor) /* Optional (TCG_TARGET_HAS_nor_*). */
+ CASE_32_64(shl)
+ CASE_32_64(shr)
+ CASE_32_64(sar)
+ CASE_32_64(rotl) /* Optional (TCG_TARGET_HAS_rot_*). */
+ CASE_32_64(rotr) /* Optional (TCG_TARGET_HAS_rot_*). */
+ CASE_32_64(div) /* Optional (TCG_TARGET_HAS_div_*). */
+ CASE_32_64(divu) /* Optional (TCG_TARGET_HAS_div_*). */
+ CASE_32_64(rem) /* Optional (TCG_TARGET_HAS_div_*). */
+ CASE_32_64(remu) /* Optional (TCG_TARGET_HAS_div_*). */
tcg_out_r(s, args[0]);
tcg_out_r(s, args[1]);
tcg_out_r(s, args[2]);
break;
- case INDEX_op_deposit_i64: /* Optional (TCG_TARGET_HAS_deposit_i64). */
+
+ CASE_32_64(deposit) /* Optional (TCG_TARGET_HAS_deposit_*). */
tcg_out_r(s, args[0]);
tcg_out_r(s, args[1]);
tcg_out_r(s, args[2]);
@@ -520,45 +495,31 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
tcg_debug_assert(args[4] <= UINT8_MAX);
tcg_out8(s, args[4]);
break;
- case INDEX_op_brcond_i64:
+
+ CASE_32_64(brcond)
tcg_out_r(s, args[0]);
tcg_out_r(s, args[1]);
tcg_out8(s, args[2]); /* condition */
tci_out_label(s, arg_label(args[3]));
break;
- case INDEX_op_bswap16_i64: /* Optional (TCG_TARGET_HAS_bswap16_i64). */
- case INDEX_op_bswap32_i64: /* Optional (TCG_TARGET_HAS_bswap32_i64). */
- case INDEX_op_bswap64_i64: /* Optional (TCG_TARGET_HAS_bswap64_i64). */
- case INDEX_op_not_i64: /* Optional (TCG_TARGET_HAS_not_i64). */
- case INDEX_op_neg_i64: /* Optional (TCG_TARGET_HAS_neg_i64). */
- case INDEX_op_ext8s_i64: /* Optional (TCG_TARGET_HAS_ext8s_i64). */
- case INDEX_op_ext8u_i64: /* Optional (TCG_TARGET_HAS_ext8u_i64). */
- case INDEX_op_ext16s_i64: /* Optional (TCG_TARGET_HAS_ext16s_i64). */
- case INDEX_op_ext16u_i64: /* Optional (TCG_TARGET_HAS_ext16u_i64). */
- case INDEX_op_ext32s_i64: /* Optional (TCG_TARGET_HAS_ext32s_i64). */
- case INDEX_op_ext32u_i64: /* Optional (TCG_TARGET_HAS_ext32u_i64). */
- case INDEX_op_ext_i32_i64:
- case INDEX_op_extu_i32_i64:
-#endif /* TCG_TARGET_REG_BITS == 64 */
- case INDEX_op_neg_i32: /* Optional (TCG_TARGET_HAS_neg_i32). */
- case INDEX_op_not_i32: /* Optional (TCG_TARGET_HAS_not_i32). */
- case INDEX_op_ext8s_i32: /* Optional (TCG_TARGET_HAS_ext8s_i32). */
- case INDEX_op_ext16s_i32: /* Optional (TCG_TARGET_HAS_ext16s_i32). */
- case INDEX_op_ext8u_i32: /* Optional (TCG_TARGET_HAS_ext8u_i32). */
- case INDEX_op_ext16u_i32: /* Optional (TCG_TARGET_HAS_ext16u_i32). */
- case INDEX_op_bswap16_i32: /* Optional (TCG_TARGET_HAS_bswap16_i32). */
- case INDEX_op_bswap32_i32: /* Optional (TCG_TARGET_HAS_bswap32_i32). */
- tcg_out_r(s, args[0]);
- tcg_out_r(s, args[1]);
- break;
- case INDEX_op_div_i32: /* Optional (TCG_TARGET_HAS_div_i32). */
- case INDEX_op_divu_i32: /* Optional (TCG_TARGET_HAS_div_i32). */
- case INDEX_op_rem_i32: /* Optional (TCG_TARGET_HAS_div_i32). */
- case INDEX_op_remu_i32: /* Optional (TCG_TARGET_HAS_div_i32). */
+
+ CASE_32_64(neg) /* Optional (TCG_TARGET_HAS_neg_*). */
+ CASE_32_64(not) /* Optional (TCG_TARGET_HAS_not_*). */
+ CASE_32_64(ext8s) /* Optional (TCG_TARGET_HAS_ext8s_*). */
+ CASE_32_64(ext8u) /* Optional (TCG_TARGET_HAS_ext8u_*). */
+ CASE_32_64(ext16s) /* Optional (TCG_TARGET_HAS_ext16s_*). */
+ CASE_32_64(ext16u) /* Optional (TCG_TARGET_HAS_ext16u_*). */
+ CASE_64(ext32s) /* Optional (TCG_TARGET_HAS_ext32s_i64). */
+ CASE_64(ext32u) /* Optional (TCG_TARGET_HAS_ext32u_i64). */
+ CASE_64(ext_i32)
+ CASE_64(extu_i32)
+ CASE_32_64(bswap16) /* Optional (TCG_TARGET_HAS_bswap16_*). */
+ CASE_32_64(bswap32) /* Optional (TCG_TARGET_HAS_bswap32_*). */
+ CASE_64(bswap64) /* Optional (TCG_TARGET_HAS_bswap64_i64). */
tcg_out_r(s, args[0]);
tcg_out_r(s, args[1]);
- tcg_out_r(s, args[2]);
break;
+
#if TCG_TARGET_REG_BITS == 32
case INDEX_op_add2_i32:
case INDEX_op_sub2_i32:
@@ -584,31 +545,8 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
tcg_out_r(s, args[3]);
break;
#endif
- case INDEX_op_brcond_i32:
- tcg_out_r(s, args[0]);
- tcg_out_r(s, args[1]);
- tcg_out8(s, args[2]); /* condition */
- tci_out_label(s, arg_label(args[3]));
- break;
+
case INDEX_op_qemu_ld_i32:
- tcg_out_r(s, *args++);
- tcg_out_r(s, *args++);
- if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
- tcg_out_r(s, *args++);
- }
- tcg_out_i(s, *args++);
- break;
- case INDEX_op_qemu_ld_i64:
- tcg_out_r(s, *args++);
- if (TCG_TARGET_REG_BITS == 32) {
- tcg_out_r(s, *args++);
- }
- tcg_out_r(s, *args++);
- if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
- tcg_out_r(s, *args++);
- }
- tcg_out_i(s, *args++);
- break;
case INDEX_op_qemu_st_i32:
tcg_out_r(s, *args++);
tcg_out_r(s, *args++);
@@ -617,6 +555,8 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
}
tcg_out_i(s, *args++);
break;
+
+ case INDEX_op_qemu_ld_i64:
case INDEX_op_qemu_st_i64:
tcg_out_r(s, *args++);
if (TCG_TARGET_REG_BITS == 32) {
@@ -628,8 +568,10 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
}
tcg_out_i(s, *args++);
break;
+
case INDEX_op_mb:
break;
+
case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
case INDEX_op_mov_i64:
case INDEX_op_call: /* Always emitted via tcg_out_call. */
diff --git a/tests/Makefile.include b/tests/Makefile.include
index d34254fb29..799e47169c 100644
--- a/tests/Makefile.include
+++ b/tests/Makefile.include
@@ -92,7 +92,7 @@ TESTS_RESULTS_DIR=$(BUILD_DIR)/tests/results
# Any number of command separated loggers are accepted. For more
# information please refer to "avocado --help".
AVOCADO_SHOW=app
-AVOCADO_TAGS=$(patsubst %-softmmu,-t arch:%, $(filter %-softmmu,$(TARGET_DIRS)))
+AVOCADO_TAGS=$(patsubst %-softmmu,-t arch:%, $(filter %-softmmu,$(TARGETS)))
$(TESTS_VENV_DIR): $(TESTS_VENV_REQ)
$(call quiet-command, \
@@ -109,7 +109,8 @@ $(TESTS_RESULTS_DIR):
check-venv: $(TESTS_VENV_DIR)
-FEDORA_31_ARCHES_CANDIDATES=$(patsubst ppc64,ppc64le,$(TARGETS))
+FEDORA_31_ARCHES_TARGETS=$(patsubst %-softmmu,%, $(filter %-softmmu,$(TARGETS)))
+FEDORA_31_ARCHES_CANDIDATES=$(patsubst ppc64,ppc64le,$(FEDORA_31_ARCHES_TARGETS))
FEDORA_31_ARCHES := x86_64 aarch64 ppc64le s390x
FEDORA_31_DOWNLOAD=$(filter $(FEDORA_31_ARCHES),$(FEDORA_31_ARCHES_CANDIDATES))
diff --git a/tests/qemu-iotests/030 b/tests/qemu-iotests/030
index 12aa9ed37e..5fb65b4bef 100755
--- a/tests/qemu-iotests/030
+++ b/tests/qemu-iotests/030
@@ -153,7 +153,7 @@ class TestSingleDrive(iotests.QMPTestCase):
def test_device_not_found(self):
result = self.vm.qmp('block-stream', device='nonexistent')
self.assert_qmp(result, 'error/desc',
- 'Cannot find device=nonexistent nor node_name=nonexistent')
+ 'Cannot find device=\'nonexistent\' nor node-name=\'nonexistent\'')
def test_job_id_missing(self):
result = self.vm.qmp('block-stream', device='mid')
@@ -507,7 +507,7 @@ class TestParallelOps(iotests.QMPTestCase):
# Error: the base node does not exist
result = self.vm.qmp('block-stream', device='node4', base_node='none', job_id='stream')
self.assert_qmp(result, 'error/desc',
- 'Cannot find device= nor node_name=none')
+ 'Cannot find device=\'\' nor node-name=\'none\'')
# Error: the base node is not a backing file of the top node
result = self.vm.qmp('block-stream', device='node4', base_node='node6', job_id='stream')
diff --git a/tests/qemu-iotests/040 b/tests/qemu-iotests/040
index 7ebc9ed825..336ff7c4f2 100755
--- a/tests/qemu-iotests/040
+++ b/tests/qemu-iotests/040
@@ -175,13 +175,13 @@ class TestSingleDrive(ImageCommitTestCase):
self.assert_no_active_block_jobs()
result = self.vm.qmp('block-commit', device='drive0', top_node='badfile', base_node='base')
self.assert_qmp(result, 'error/class', 'GenericError')
- self.assert_qmp(result, 'error/desc', "Cannot find device= nor node_name=badfile")
+ self.assert_qmp(result, 'error/desc', "Cannot find device='' nor node-name='badfile'")
def test_base_node_invalid(self):
self.assert_no_active_block_jobs()
result = self.vm.qmp('block-commit', device='drive0', top_node='mid', base_node='badfile')
self.assert_qmp(result, 'error/class', 'GenericError')
- self.assert_qmp(result, 'error/desc', "Cannot find device= nor node_name=badfile")
+ self.assert_qmp(result, 'error/desc', "Cannot find device='' nor node-name='badfile'")
def test_top_path_and_node(self):
self.assert_no_active_block_jobs()
diff --git a/tests/qemu-iotests/051.pc.out b/tests/qemu-iotests/051.pc.out
index f707471fb0..f570610f64 100644
--- a/tests/qemu-iotests/051.pc.out
+++ b/tests/qemu-iotests/051.pc.out
@@ -61,13 +61,13 @@ QEMU X.Y.Z monitor - type 'help' for more information
(qemu) quit
Testing: -drive file=TEST_DIR/t.qcow2,node-name=123foo
-QEMU_PROG: -drive file=TEST_DIR/t.qcow2,node-name=123foo: Invalid node name
+QEMU_PROG: -drive file=TEST_DIR/t.qcow2,node-name=123foo: Invalid node-name: '123foo'
Testing: -drive file=TEST_DIR/t.qcow2,node-name=_foo
-QEMU_PROG: -drive file=TEST_DIR/t.qcow2,node-name=_foo: Invalid node name
+QEMU_PROG: -drive file=TEST_DIR/t.qcow2,node-name=_foo: Invalid node-name: '_foo'
Testing: -drive file=TEST_DIR/t.qcow2,node-name=foo#12
-QEMU_PROG: -drive file=TEST_DIR/t.qcow2,node-name=foo#12: Invalid node name
+QEMU_PROG: -drive file=TEST_DIR/t.qcow2,node-name=foo#12: Invalid node-name: 'foo#12'
=== Device without drive ===
diff --git a/tests/qemu-iotests/081.out b/tests/qemu-iotests/081.out
index 1974262fac..615c083549 100644
--- a/tests/qemu-iotests/081.out
+++ b/tests/qemu-iotests/081.out
@@ -140,7 +140,7 @@ Testing:
QMP_VERSION
{"return": {}}
{"error": {"class": "GenericError", "desc": "blkverify=on can only be set if there are exactly two files and vote-threshold is 2"}}
-{"error": {"class": "GenericError", "desc": "Cannot find device=drive0-quorum nor node_name=drive0-quorum"}}
+{"error": {"class": "GenericError", "desc": "Cannot find device='drive0-quorum' nor node-name='drive0-quorum'"}}
{"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false, "reason": "host-qmp-quit"}}
diff --git a/tests/qemu-iotests/085.out b/tests/qemu-iotests/085.out
index 32a193f2c2..1d4c565b6d 100644
--- a/tests/qemu-iotests/085.out
+++ b/tests/qemu-iotests/085.out
@@ -24,7 +24,7 @@ Formatting 'TEST_DIR/1-snapshot-v0.qcow2', fmt=qcow2 cluster_size=65536 extended
{ 'execute': 'blockdev-snapshot-sync',
'arguments': { 'snapshot-file':'TEST_DIR/1-snapshot-v0.IMGFMT',
'format': 'IMGFMT' } }
-{"error": {"class": "GenericError", "desc": "Cannot find device= nor node_name="}}
+{"error": {"class": "GenericError", "desc": "Cannot find device='' nor node-name=''"}}
=== Invalid command - missing snapshot-file ===
@@ -222,10 +222,10 @@ Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134217728 backing_file=TEST_DIR/
{ 'execute': 'blockdev-snapshot',
'arguments': { 'node': 'virtio0',
'overlay':'snap_14' } }
-{"error": {"class": "GenericError", "desc": "Cannot find device=snap_14 nor node_name=snap_14"}}
+{"error": {"class": "GenericError", "desc": "Cannot find device='snap_14' nor node-name='snap_14'"}}
{ 'execute': 'blockdev-snapshot',
'arguments': { 'node':'nodevice',
'overlay':'snap_13' }
}
-{"error": {"class": "GenericError", "desc": "Cannot find device=nodevice nor node_name=nodevice"}}
+{"error": {"class": "GenericError", "desc": "Cannot find device='nodevice' nor node-name='nodevice'"}}
*** done
diff --git a/tests/qemu-iotests/087 b/tests/qemu-iotests/087
index edd43f1a28..d8e0e384cd 100755
--- a/tests/qemu-iotests/087
+++ b/tests/qemu-iotests/087
@@ -143,9 +143,7 @@ run_qemu <<EOF
"arguments": {
"qom-type": "secret",
"id": "sec0",
- "props": {
- "data": "123456"
- }
+ "data": "123456"
}
}
{ "execute": "blockdev-add",
@@ -176,9 +174,7 @@ run_qemu <<EOF
"arguments": {
"qom-type": "secret",
"id": "sec0",
- "props": {
- "data": "123456"
- }
+ "data": "123456"
}
}
{ "execute": "blockdev-add",
diff --git a/tests/qemu-iotests/087.out b/tests/qemu-iotests/087.out
index b61ba638af..e1c23a6983 100644
--- a/tests/qemu-iotests/087.out
+++ b/tests/qemu-iotests/087.out
@@ -17,7 +17,7 @@ Testing: -drive driver=IMGFMT,id=disk,node-name=test-node,file=TEST_DIR/t.IMGFMT
QMP_VERSION
{"return": {}}
{"error": {"class": "GenericError", "desc": "node-name=disk is conflicting with a device id"}}
-{"error": {"class": "GenericError", "desc": "Duplicate node name"}}
+{"error": {"class": "GenericError", "desc": "Duplicate nodes with node-name='test-node'"}}
{"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false, "reason": "host-qmp-quit"}}
diff --git a/tests/qemu-iotests/184 b/tests/qemu-iotests/184
index 513d167098..e4cbcd8634 100755
--- a/tests/qemu-iotests/184
+++ b/tests/qemu-iotests/184
@@ -67,10 +67,8 @@ run_qemu <<EOF
"arguments": {
"qom-type": "throttle-group",
"id": "group0",
- "props": {
- "limits" : {
- "iops-total": 1000
- }
+ "limits" : {
+ "iops-total": 1000
}
}
}
@@ -96,10 +94,8 @@ run_qemu <<EOF
"arguments": {
"qom-type": "throttle-group",
"id": "group0",
- "props" : {
- "limits": {
- "iops-total": 1000
- }
+ "limits": {
+ "iops-total": 1000
}
}
}
@@ -136,10 +132,8 @@ run_qemu <<EOF
"arguments": {
"qom-type": "throttle-group",
"id": "group0",
- "props" : {
- "limits": {
- "iops-total": 1000
- }
+ "limits": {
+ "iops-total": 1000
}
}
}
diff --git a/tests/qemu-iotests/206.out b/tests/qemu-iotests/206.out
index 5dd589d14e..b68c443867 100644
--- a/tests/qemu-iotests/206.out
+++ b/tests/qemu-iotests/206.out
@@ -155,7 +155,7 @@ Format specific information:
{"execute": "blockdev-create", "arguments": {"job-id": "job0", "options": {"driver": "qcow2", "file": "this doesn't exist", "size": 33554432}}}
{"return": {}}
-Job failed: Cannot find device=this doesn't exist nor node_name=this doesn't exist
+Job failed: Cannot find device='this doesn't exist' nor node-name='this doesn't exist'
{"execute": "job-dismiss", "arguments": {"id": "job0"}}
{"return": {}}
diff --git a/tests/qemu-iotests/210.out b/tests/qemu-iotests/210.out
index 2e9fc596eb..55c0844370 100644
--- a/tests/qemu-iotests/210.out
+++ b/tests/qemu-iotests/210.out
@@ -108,7 +108,7 @@ Format specific information:
{"execute": "blockdev-create", "arguments": {"job-id": "job0", "options": {"driver": "luks", "file": "this doesn't exist", "size": 67108864}}}
{"return": {}}
-Job failed: Cannot find device=this doesn't exist nor node_name=this doesn't exist
+Job failed: Cannot find device='this doesn't exist' nor node-name='this doesn't exist'
{"execute": "job-dismiss", "arguments": {"id": "job0"}}
{"return": {}}
diff --git a/tests/qemu-iotests/211.out b/tests/qemu-iotests/211.out
index b83384deea..3bc092a8a8 100644
--- a/tests/qemu-iotests/211.out
+++ b/tests/qemu-iotests/211.out
@@ -62,7 +62,7 @@ cluster_size: 1048576
{"execute": "blockdev-create", "arguments": {"job-id": "job0", "options": {"driver": "vdi", "file": "this doesn't exist", "size": 33554432}}}
{"return": {}}
-Job failed: Cannot find device=this doesn't exist nor node_name=this doesn't exist
+Job failed: Cannot find device='this doesn't exist' nor node-name='this doesn't exist'
{"execute": "job-dismiss", "arguments": {"id": "job0"}}
{"return": {}}
diff --git a/tests/qemu-iotests/212.out b/tests/qemu-iotests/212.out
index 1538d679be..8102033488 100644
--- a/tests/qemu-iotests/212.out
+++ b/tests/qemu-iotests/212.out
@@ -52,7 +52,7 @@ virtual size: 32 MiB (33554432 bytes)
{"execute": "blockdev-create", "arguments": {"job-id": "job0", "options": {"driver": "parallels", "file": "this doesn't exist", "size": 33554432}}}
{"return": {}}
-Job failed: Cannot find device=this doesn't exist nor node_name=this doesn't exist
+Job failed: Cannot find device='this doesn't exist' nor node-name='this doesn't exist'
{"execute": "job-dismiss", "arguments": {"id": "job0"}}
{"return": {}}
diff --git a/tests/qemu-iotests/213.out b/tests/qemu-iotests/213.out
index be4ae85180..3cdce4d790 100644
--- a/tests/qemu-iotests/213.out
+++ b/tests/qemu-iotests/213.out
@@ -55,7 +55,7 @@ cluster_size: 268435456
{"execute": "blockdev-create", "arguments": {"job-id": "job0", "options": {"driver": "vhdx", "file": "this doesn't exist", "size": 33554432}}}
{"return": {}}
-Job failed: Cannot find device=this doesn't exist nor node_name=this doesn't exist
+Job failed: Cannot find device='this doesn't exist' nor node-name='this doesn't exist'
{"execute": "job-dismiss", "arguments": {"id": "job0"}}
{"return": {}}
diff --git a/tests/qemu-iotests/218 b/tests/qemu-iotests/218
index ae7c4fb187..325d8244fb 100755
--- a/tests/qemu-iotests/218
+++ b/tests/qemu-iotests/218
@@ -152,7 +152,7 @@ with iotests.VM() as vm, \
vm.launch()
ret = vm.qmp('object-add', qom_type='throttle-group', id='tg',
- props={'x-bps-read': 4096})
+ limits={'bps-read': 4096})
assert ret['return'] == {}
ret = vm.qmp('blockdev-add',
diff --git a/tests/qemu-iotests/223.out b/tests/qemu-iotests/223.out
index bbc85289e3..083b62d053 100644
--- a/tests/qemu-iotests/223.out
+++ b/tests/qemu-iotests/223.out
@@ -53,7 +53,7 @@ exports available: 0
{"return": {}}
{"execute":"nbd-server-add",
"arguments":{"device":"nosuch"}}
-{"error": {"class": "GenericError", "desc": "Cannot find device=nosuch nor node_name=nosuch"}}
+{"error": {"class": "GenericError", "desc": "Cannot find device='nosuch' nor node-name='nosuch'"}}
{"execute":"nbd-server-add",
"arguments":{"device":"n"}}
{"error": {"class": "GenericError", "desc": "Block export id 'n' is already in use"}}
@@ -154,7 +154,7 @@ exports available: 0
{"return": {}}
{"execute":"nbd-server-add",
"arguments":{"device":"nosuch"}}
-{"error": {"class": "GenericError", "desc": "Cannot find device=nosuch nor node_name=nosuch"}}
+{"error": {"class": "GenericError", "desc": "Cannot find device='nosuch' nor node-name='nosuch'"}}
{"execute":"nbd-server-add",
"arguments":{"device":"n"}}
{"error": {"class": "GenericError", "desc": "Block export id 'n' is already in use"}}
diff --git a/tests/qemu-iotests/235 b/tests/qemu-iotests/235
index 20d16dbf38..8aed45f9a7 100755
--- a/tests/qemu-iotests/235
+++ b/tests/qemu-iotests/235
@@ -57,7 +57,7 @@ vm.add_args('-drive', 'id=src,file=' + disk)
vm.launch()
log(vm.qmp('object-add', qom_type='throttle-group', id='tg0',
- props={ 'x-bps-total': size }))
+ limits={'bps-total': size}))
log(vm.qmp('blockdev-add',
**{ 'node-name': 'target',
diff --git a/tests/qemu-iotests/237.out b/tests/qemu-iotests/237.out
index a8c800bfad..aa94986803 100644
--- a/tests/qemu-iotests/237.out
+++ b/tests/qemu-iotests/237.out
@@ -85,7 +85,7 @@ Format specific information:
{"execute": "blockdev-create", "arguments": {"job-id": "job0", "options": {"driver": "vmdk", "file": "this doesn't exist", "size": 33554432}}}
{"return": {}}
-Job failed: Cannot find device=this doesn't exist nor node_name=this doesn't exist
+Job failed: Cannot find device='this doesn't exist' nor node-name='this doesn't exist'
{"execute": "job-dismiss", "arguments": {"id": "job0"}}
{"return": {}}
diff --git a/tests/qemu-iotests/245 b/tests/qemu-iotests/245
index cfdeb902be..11104b9208 100755
--- a/tests/qemu-iotests/245
+++ b/tests/qemu-iotests/245
@@ -140,8 +140,8 @@ class TestBlockdevReopen(iotests.QMPTestCase):
self.reopen(opts, {'file': 'hd0-file'})
# We cannot change any of these
- self.reopen(opts, {'node-name': 'not-found'}, "Cannot find node named 'not-found'")
- self.reopen(opts, {'node-name': ''}, "Cannot find node named ''")
+ self.reopen(opts, {'node-name': 'not-found'}, "Failed to find node with node-name='not-found'")
+ self.reopen(opts, {'node-name': ''}, "Failed to find node with node-name=''")
self.reopen(opts, {'node-name': None}, "Invalid parameter type for 'node-name', expected: string")
self.reopen(opts, {'driver': 'raw'}, "Cannot change the option 'driver'")
self.reopen(opts, {'driver': ''}, "Invalid parameter ''")
@@ -158,7 +158,7 @@ class TestBlockdevReopen(iotests.QMPTestCase):
# node-name is optional in BlockdevOptions, but x-blockdev-reopen needs it
del opts['node-name']
- self.reopen(opts, {}, "Node name not specified")
+ self.reopen(opts, {}, "node-name not specified")
# Check that nothing has changed
self.check_node_graph(original_graph)
@@ -187,8 +187,8 @@ class TestBlockdevReopen(iotests.QMPTestCase):
self.reopen(opts, {'backing': backing_node_name})
# We can't use a non-existing or empty (non-NULL) node as the backing image
- self.reopen(opts, {'backing': 'not-found'}, "Cannot find device= nor node_name=not-found")
- self.reopen(opts, {'backing': ''}, "Cannot find device= nor node_name=")
+ self.reopen(opts, {'backing': 'not-found'}, "Cannot find device=\'\' nor node-name=\'not-found\'")
+ self.reopen(opts, {'backing': ''}, "Cannot find device=\'\' nor node-name=\'\'")
# We can reopen the image just fine if we specify the backing options
opts['backing'] = {'driver': iotests.imgfmt,
@@ -644,12 +644,12 @@ class TestBlockdevReopen(iotests.QMPTestCase):
###### throttle ######
######################
opts = { 'qom-type': 'throttle-group', 'id': 'group0',
- 'props': { 'limits': { 'iops-total': 1000 } } }
+ 'limits': { 'iops-total': 1000 } }
result = self.vm.qmp('object-add', conv_keys = False, **opts)
self.assert_qmp(result, 'return', {})
opts = { 'qom-type': 'throttle-group', 'id': 'group1',
- 'props': { 'limits': { 'iops-total': 2000 } } }
+ 'limits': { 'iops-total': 2000 } }
result = self.vm.qmp('object-add', conv_keys = False, **opts)
self.assert_qmp(result, 'return', {})
diff --git a/tests/qemu-iotests/249.out b/tests/qemu-iotests/249.out
index 92ec81db03..d2bf9be85e 100644
--- a/tests/qemu-iotests/249.out
+++ b/tests/qemu-iotests/249.out
@@ -18,7 +18,7 @@ Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1048576 backing_file=TEST_DIR/t.
'filter-node-name': '1234'}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "job0"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "job0"}}
-{"error": {"class": "GenericError", "desc": "Invalid node name"}}
+{"error": {"class": "GenericError", "desc": "Invalid node-name: '1234'"}}
=== Send a write command to a drive opened in read-only mode (2)
diff --git a/tests/qemu-iotests/258 b/tests/qemu-iotests/258
index 9a2d33ae5e..a6618208a8 100755
--- a/tests/qemu-iotests/258
+++ b/tests/qemu-iotests/258
@@ -103,9 +103,9 @@ def test_concurrent_finish(write_to_stream_node):
vm.qmp_log('object-add',
qom_type='throttle-group',
id='tg',
- props={
- 'x-iops-write': 1,
- 'x-iops-write-max': 1
+ limits={
+ 'iops-write': 1,
+ 'iops-write-max': 1
})
vm.qmp_log('blockdev-add',
diff --git a/tests/qemu-iotests/258.out b/tests/qemu-iotests/258.out
index ce6e9ba3e5..c3a003d3e3 100644
--- a/tests/qemu-iotests/258.out
+++ b/tests/qemu-iotests/258.out
@@ -2,7 +2,7 @@ Running tests:
=== Commit and stream finish concurrently (letting stream write) ===
-{"execute": "object-add", "arguments": {"id": "tg", "props": {"x-iops-write": 1, "x-iops-write-max": 1}, "qom-type": "throttle-group"}}
+{"execute": "object-add", "arguments": {"id": "tg", "limits": {"iops-write": 1, "iops-write-max": 1}, "qom-type": "throttle-group"}}
{"return": {}}
{"execute": "blockdev-add", "arguments": {"backing": {"backing": {"backing": {"backing": {"driver": "raw", "file": {"driver": "file", "filename": "TEST_DIR/PID-node0.img"}, "node-name": "node0"}, "driver": "IMGFMT", "file": {"driver": "file", "filename": "TEST_DIR/PID-node1.img"}, "node-name": "node1"}, "driver": "IMGFMT", "file": {"driver": "file", "filename": "TEST_DIR/PID-node2.img"}, "node-name": "node2"}, "driver": "IMGFMT", "file": {"driver": "file", "filename": "TEST_DIR/PID-node3.img"}, "node-name": "node3"}, "driver": "IMGFMT", "file": {"driver": "throttle", "file": {"driver": "file", "filename": "TEST_DIR/PID-node4.img"}, "throttle-group": "tg"}, "node-name": "node4"}}
{"return": {}}
@@ -18,7 +18,7 @@ Running tests:
=== Commit and stream finish concurrently (letting commit write) ===
-{"execute": "object-add", "arguments": {"id": "tg", "props": {"x-iops-write": 1, "x-iops-write-max": 1}, "qom-type": "throttle-group"}}
+{"execute": "object-add", "arguments": {"id": "tg", "limits": {"iops-write": 1, "iops-write-max": 1}, "qom-type": "throttle-group"}}
{"return": {}}
{"execute": "blockdev-add", "arguments": {"backing": {"backing": {"backing": {"backing": {"driver": "raw", "file": {"driver": "throttle", "file": {"driver": "file", "filename": "TEST_DIR/PID-node0.img"}, "throttle-group": "tg"}, "node-name": "node0"}, "driver": "IMGFMT", "file": {"driver": "file", "filename": "TEST_DIR/PID-node1.img"}, "node-name": "node1"}, "driver": "IMGFMT", "file": {"driver": "file", "filename": "TEST_DIR/PID-node2.img"}, "node-name": "node2"}, "driver": "IMGFMT", "file": {"driver": "file", "filename": "TEST_DIR/PID-node3.img"}, "node-name": "node3"}, "driver": "IMGFMT", "file": {"driver": "file", "filename": "TEST_DIR/PID-node4.img"}, "node-name": "node4"}}
{"return": {}}
diff --git a/tests/qemu-iotests/283 b/tests/qemu-iotests/283
index 79643e375b..010c22f0a2 100755
--- a/tests/qemu-iotests/283
+++ b/tests/qemu-iotests/283
@@ -97,3 +97,56 @@ vm.qmp_log('blockdev-add', **{
vm.qmp_log('blockdev-backup', sync='full', device='source', target='target')
vm.shutdown()
+
+
+print('\n=== backup-top should be gone after job-finalize ===\n')
+
+# Check that the backup-top node is gone after job-finalize.
+#
+# During finalization, the node becomes inactive and can no longer
+# function. If it is still present, new parents might be attached, and
+# there would be no meaningful way to handle their I/O requests.
+
+vm = iotests.VM()
+vm.launch()
+
+vm.qmp_log('blockdev-add', **{
+ 'node-name': 'source',
+ 'driver': 'null-co',
+})
+
+vm.qmp_log('blockdev-add', **{
+ 'node-name': 'target',
+ 'driver': 'null-co',
+})
+
+vm.qmp_log('blockdev-backup',
+ job_id='backup',
+ device='source',
+ target='target',
+ sync='full',
+ filter_node_name='backup-filter',
+ auto_finalize=False,
+ auto_dismiss=False)
+
+vm.event_wait('BLOCK_JOB_PENDING', 5.0)
+
+# The backup-top filter should still be present prior to finalization
+assert vm.node_info('backup-filter') is not None
+
+vm.qmp_log('job-finalize', id='backup')
+vm.event_wait('BLOCK_JOB_COMPLETED', 5.0)
+
+# The filter should be gone now. Check that by trying to access it
+# with qemu-io (which will most likely crash qemu if it is still
+# there.).
+vm.qmp_log('human-monitor-command',
+ command_line='qemu-io backup-filter "write 0 1M"')
+
+# (Also, do an explicit check.)
+assert vm.node_info('backup-filter') is None
+
+vm.qmp_log('job-dismiss', id='backup')
+vm.event_wait('JOB_STATUS_CHANGE', 5.0, {'data': {'status': 'null'}})
+
+vm.shutdown()
diff --git a/tests/qemu-iotests/283.out b/tests/qemu-iotests/283.out
index d8cff22cc1..37c35058ae 100644
--- a/tests/qemu-iotests/283.out
+++ b/tests/qemu-iotests/283.out
@@ -6,3 +6,18 @@
{"return": {}}
{"execute": "blockdev-backup", "arguments": {"device": "source", "sync": "full", "target": "target"}}
{"error": {"class": "GenericError", "desc": "Cannot set permissions for backup-top filter: Conflicts with use by other as 'image', which uses 'write' on base"}}
+
+=== backup-top should be gone after job-finalize ===
+
+{"execute": "blockdev-add", "arguments": {"driver": "null-co", "node-name": "source"}}
+{"return": {}}
+{"execute": "blockdev-add", "arguments": {"driver": "null-co", "node-name": "target"}}
+{"return": {}}
+{"execute": "blockdev-backup", "arguments": {"auto-dismiss": false, "auto-finalize": false, "device": "source", "filter-node-name": "backup-filter", "job-id": "backup", "sync": "full", "target": "target"}}
+{"return": {}}
+{"execute": "job-finalize", "arguments": {"id": "backup"}}
+{"return": {}}
+{"execute": "human-monitor-command", "arguments": {"command-line": "qemu-io backup-filter \"write 0 1M\""}}
+{"return": "Error: Cannot find device='' nor node-name='backup-filter'\r\n"}
+{"execute": "job-dismiss", "arguments": {"id": "backup"}}
+{"return": {}}
diff --git a/tests/qemu-iotests/295 b/tests/qemu-iotests/295
index 01a6c0b31f..270ad3999f 100755
--- a/tests/qemu-iotests/295
+++ b/tests/qemu-iotests/295
@@ -43,7 +43,7 @@ class Secret:
def to_qmp_object(self):
return { "qom_type" : "secret", "id": self.id(),
- "props": { "data": self.secret() } }
+ "data": self.secret() }
################################################################################
class EncryptionSetupTestCase(iotests.QMPTestCase):
diff --git a/tests/qemu-iotests/296 b/tests/qemu-iotests/296
index 0bc3c6c7d7..7c65e987a1 100755
--- a/tests/qemu-iotests/296
+++ b/tests/qemu-iotests/296
@@ -43,7 +43,7 @@ class Secret:
def to_qmp_object(self):
return { "qom_type" : "secret", "id": self.id(),
- "props": { "data": self.secret() } }
+ "data": self.secret() }
################################################################################
diff --git a/tests/qemu-iotests/300 b/tests/qemu-iotests/300
index 63036f6a6e..b475a92c47 100755
--- a/tests/qemu-iotests/300
+++ b/tests/qemu-iotests/300
@@ -22,7 +22,7 @@
import os
import random
import re
-from typing import Dict, List, Optional, Union
+from typing import Dict, List, Optional
import iotests
@@ -30,7 +30,7 @@ import iotests
# pylint: disable=wrong-import-order
import qemu
-BlockBitmapMapping = List[Dict[str, Union[str, List[Dict[str, str]]]]]
+BlockBitmapMapping = List[Dict[str, object]]
mig_sock = os.path.join(iotests.sock_dir, 'mig_sock')
@@ -189,8 +189,8 @@ class TestAliasMigration(TestDirtyBitmapMigration):
# Check for error message on the destination
if self.src_node_name != self.dst_node_name:
self.verify_dest_error(f"Cannot find "
- f"device={self.src_node_name} nor "
- f"node_name={self.src_node_name}")
+ f"device='{self.src_node_name}' nor "
+ f"node-name='{self.src_node_name}'")
else:
self.verify_dest_error(None)
@@ -602,7 +602,8 @@ class TestCrossAliasMigration(TestDirtyBitmapMigration):
class TestAliasTransformMigration(TestDirtyBitmapMigration):
"""
- Tests the 'transform' option which modifies bitmap persistence on migration.
+ Tests the 'transform' option which modifies bitmap persistence on
+ migration.
"""
src_node_name = 'node-a'
@@ -674,7 +675,8 @@ class TestAliasTransformMigration(TestDirtyBitmapMigration):
bitmaps = self.vm_b.query_bitmaps()
for node in bitmaps:
- bitmaps[node] = sorted(((bmap['name'], bmap['persistent']) for bmap in bitmaps[node]))
+ bitmaps[node] = sorted(((bmap['name'], bmap['persistent'])
+ for bmap in bitmaps[node]))
self.assertEqual(bitmaps,
{'node-a': [('bmap-a', True), ('bmap-b', False)],
diff --git a/tests/qemu-iotests/iotests.py b/tests/qemu-iotests/iotests.py
index 4e758308f2..90d0b62523 100644
--- a/tests/qemu-iotests/iotests.py
+++ b/tests/qemu-iotests/iotests.py
@@ -17,6 +17,7 @@
#
import atexit
+import bz2
from collections import OrderedDict
import faulthandler
import io
@@ -24,6 +25,7 @@ import json
import logging
import os
import re
+import shutil
import signal
import struct
import subprocess
@@ -96,6 +98,14 @@ luks_default_secret_object = 'secret,id=keysec0,data=' + \
os.environ.get('IMGKEYSECRET', '')
luks_default_key_secret_opt = 'key-secret=keysec0'
+sample_img_dir = os.environ['SAMPLE_IMG_DIR']
+
+
+def unarchive_sample_image(sample, fname):
+ sample_fname = os.path.join(sample_img_dir, sample + '.bz2')
+ with bz2.open(sample_fname) as f_in, open(fname, 'wb') as f_out:
+ shutil.copyfileobj(f_in, f_out)
+
def qemu_tool_pipe_and_status(tool: str, args: Sequence[str],
connect_stderr: bool = True) -> Tuple[str, int]:
diff --git a/tests/qemu-iotests/sample_images/parallels-with-bitmap.bz2 b/tests/qemu-iotests/sample_images/parallels-with-bitmap.bz2
new file mode 100644
index 0000000000..54892fd4d0
--- /dev/null
+++ b/tests/qemu-iotests/sample_images/parallels-with-bitmap.bz2
Binary files differ
diff --git a/tests/qemu-iotests/sample_images/parallels-with-bitmap.sh b/tests/qemu-iotests/sample_images/parallels-with-bitmap.sh
new file mode 100755
index 0000000000..30615aa6bd
--- /dev/null
+++ b/tests/qemu-iotests/sample_images/parallels-with-bitmap.sh
@@ -0,0 +1,51 @@
+#!/bin/bash
+#
+# Test parallels load bitmap
+#
+# Copyright (c) 2021 Virtuozzo International GmbH.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+CT=parallels-with-bitmap-ct
+DIR=$PWD/parallels-with-bitmap-dir
+IMG=$DIR/root.hds
+XML=$DIR/DiskDescriptor.xml
+TARGET=parallels-with-bitmap.bz2
+
+rm -rf $DIR
+
+prlctl create $CT --vmtype ct
+prlctl set $CT --device-add hdd --image $DIR --recreate --size 2G
+
+# cleanup the image
+qemu-img create -f parallels $IMG 64G
+
+# create bitmap
+prlctl backup $CT
+
+prlctl set $CT --device-del hdd1
+prlctl destroy $CT
+
+dev=$(ploop mount $XML | sed -n 's/^Adding delta dev=\(\/dev\/ploop[0-9]\+\).*/\1/p')
+dd if=/dev/zero of=$dev bs=64K seek=5 count=2 oflag=direct
+dd if=/dev/zero of=$dev bs=64K seek=30 count=1 oflag=direct
+dd if=/dev/zero of=$dev bs=64K seek=10 count=3 oflag=direct
+ploop umount $XML # bitmap name will be in the output
+
+bzip2 -z $IMG
+
+mv $IMG.bz2 $TARGET
+
+rm -rf $DIR
diff --git a/tests/qemu-iotests/tests/parallels-read-bitmap b/tests/qemu-iotests/tests/parallels-read-bitmap
new file mode 100755
index 0000000000..af6b9c5db3
--- /dev/null
+++ b/tests/qemu-iotests/tests/parallels-read-bitmap
@@ -0,0 +1,55 @@
+#!/usr/bin/env python3
+#
+# Test parallels load bitmap
+#
+# Copyright (c) 2021 Virtuozzo International GmbH.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import json
+import iotests
+from iotests import qemu_nbd_popen, qemu_img_pipe, log, file_path
+
+iotests.script_initialize(supported_fmts=['parallels'])
+
+nbd_sock = file_path('nbd-sock', base_dir=iotests.sock_dir)
+disk = iotests.file_path('disk')
+bitmap = 'e4f2eed0-37fe-4539-b50b-85d2e7fd235f'
+nbd_opts = f'driver=nbd,server.type=unix,server.path={nbd_sock}' \
+ f',x-dirty-bitmap=qemu:dirty-bitmap:{bitmap}'
+
+
+iotests.unarchive_sample_image('parallels-with-bitmap', disk)
+
+
+with qemu_nbd_popen('--read-only', f'--socket={nbd_sock}',
+ f'--bitmap={bitmap}', '-f', iotests.imgfmt, disk):
+ out = qemu_img_pipe('map', '--output=json', '--image-opts', nbd_opts)
+ chunks = json.loads(out)
+ cluster = 64 * 1024
+
+ log('dirty clusters (cluster size is 64K):')
+ for c in chunks:
+ assert c['start'] % cluster == 0
+ assert c['length'] % cluster == 0
+ if c['data']:
+ continue
+
+ a = c['start'] // cluster
+ b = (c['start'] + c['length']) // cluster
+ if b - a > 1:
+ log(f'{a}-{b-1}')
+ else:
+ log(a)
diff --git a/tests/qemu-iotests/tests/parallels-read-bitmap.out b/tests/qemu-iotests/tests/parallels-read-bitmap.out
new file mode 100644
index 0000000000..e8f6bc9e96
--- /dev/null
+++ b/tests/qemu-iotests/tests/parallels-read-bitmap.out
@@ -0,0 +1,6 @@
+Start NBD server
+dirty clusters (cluster size is 64K):
+5-6
+10-12
+30
+Kill NBD server
diff --git a/tests/qtest/libqos/libqtest.h b/tests/qtest/libqos/libqtest.h
index 724f65aa94..a68dcd79d4 100644
--- a/tests/qtest/libqos/libqtest.h
+++ b/tests/qtest/libqos/libqtest.h
@@ -75,6 +75,17 @@ QTestState *qtest_init_without_qmp_handshake(const char *extra_args);
QTestState *qtest_init_with_serial(const char *extra_args, int *sock_fd);
/**
+ * qtest_kill_qemu:
+ * @s: #QTestState instance to operate on.
+ *
+ * Kill the QEMU process and wait for it to terminate. It is safe to call this
+ * function multiple times. Normally qtest_quit() is used instead because it
+ * also frees QTestState. Use qtest_kill_qemu() when you just want to kill QEMU
+ * and qtest_quit() will be called later.
+ */
+void qtest_kill_qemu(QTestState *s);
+
+/**
* qtest_quit:
* @s: #QTestState instance to operate on.
*
@@ -133,6 +144,14 @@ void qtest_qmp_send_raw(QTestState *s, const char *fmt, ...)
GCC_FMT_ATTR(2, 3);
/**
+ * qtest_socket_server:
+ * @socket_path: the UNIX domain socket path
+ *
+ * Create and return a listen socket file descriptor, or abort on failure.
+ */
+int qtest_socket_server(const char *socket_path);
+
+/**
* qtest_vqmp_fds:
* @s: #QTestState instance to operate on.
* @fds: array of file descriptors
@@ -630,9 +649,27 @@ void qtest_add_data_func_full(const char *str, void *data,
g_free(path); \
} while (0)
+/**
+ * qtest_add_abrt_handler:
+ * @fn: Handler function
+ * @data: Argument that is passed to the handler
+ *
+ * Add a handler function that is invoked on SIGABRT. This can be used to
+ * terminate processes and perform other cleanup. The handler can be removed
+ * with qtest_remove_abrt_handler().
+ */
void qtest_add_abrt_handler(GHookFunc fn, const void *data);
/**
+ * qtest_remove_abrt_handler:
+ * @data: Argument previously passed to qtest_add_abrt_handler()
+ *
+ * Remove an abrt handler that was previously added with
+ * qtest_add_abrt_handler().
+ */
+void qtest_remove_abrt_handler(void *data);
+
+/**
* qtest_qmp_assert_success:
* @qts: QTestState instance to operate on
* @fmt: QMP message to send to qemu, formatted like
diff --git a/tests/qtest/libqos/qgraph.h b/tests/qtest/libqos/qgraph.h
index 07a32535f1..54672350c8 100644
--- a/tests/qtest/libqos/qgraph.h
+++ b/tests/qtest/libqos/qgraph.h
@@ -29,7 +29,6 @@
typedef struct QOSGraphObject QOSGraphObject;
typedef struct QOSGraphNode QOSGraphNode;
typedef struct QOSGraphEdge QOSGraphEdge;
-typedef struct QOSGraphNodeOptions QOSGraphNodeOptions;
typedef struct QOSGraphEdgeOptions QOSGraphEdgeOptions;
typedef struct QOSGraphTestOptions QOSGraphTestOptions;
@@ -49,340 +48,94 @@ typedef void (*QOSStartFunct) (QOSGraphObject *object);
typedef void *(*QOSBeforeTest) (GString *cmd_line, void *arg);
/**
- * SECTION: qgraph.h
- * @title: Qtest Driver Framework
- * @short_description: interfaces to organize drivers and tests
- * as nodes in a graph
- *
- * This Qgraph API provides all basic functions to create a graph
- * and instantiate nodes representing machines, drivers and tests
- * representing their relations with CONSUMES, PRODUCES, and CONTAINS
- * edges.
- *
- * The idea is to have a framework where each test asks for a specific
- * driver, and the framework takes care of allocating the proper devices
- * required and passing the correct command line arguments to QEMU.
- *
- * A node can be of four types:
- * - QNODE_MACHINE: for example "arm/raspi2"
- * - QNODE_DRIVER: for example "generic-sdhci"
- * - QNODE_INTERFACE: for example "sdhci" (interface for all "-sdhci" drivers)
- * an interface is not explicitly created, it will be auto-
- * matically instantiated when a node consumes or produces
- * it.
- * - QNODE_TEST: for example "sdhci-test", consumes an interface and tests
- * the functions provided
- *
- * Notes for the nodes:
- * - QNODE_MACHINE: each machine struct must have a QGuestAllocator and
- * implement get_driver to return the allocator passing
- * "memory". The function can also return NULL if the
- * allocator is not set.
- * - QNODE_DRIVER: driver names must be unique, and machines and nodes
- * planned to be "consumed" by other nodes must match QEMU
- * drivers name, otherwise they won't be discovered
- *
- * An edge relation between two nodes (drivers or machines) X and Y can be:
- * - X CONSUMES Y: Y can be plugged into X
- * - X PRODUCES Y: X provides the interface Y
- * - X CONTAINS Y: Y is part of X component
- *
- * Basic framework steps are the following:
- * - All nodes and edges are created in their respective
- * machine/driver/test files
- * - The framework starts QEMU and asks for a list of available devices
- * and machines (note that only machines and "consumed" nodes are mapped
- * 1:1 with QEMU devices)
- * - The framework walks the graph starting from the available machines and
- * performs a Depth First Search for tests
- * - Once a test is found, the path is walked again and all drivers are
- * allocated accordingly and the final interface is passed to the test
- * - The test is executed
- * - Unused objects are cleaned and the path discovery is continued
- *
- * Depending on the QEMU binary used, only some drivers/machines will be
- * available and only test that are reached by them will be executed.
- *
- * <example>
- * <title>Creating new driver an its interface</title>
- * <programlisting>
- #include "qgraph.h"
-
- struct My_driver {
- QOSGraphObject obj;
- Node_produced prod;
- Node_contained cont;
- }
-
- static void my_destructor(QOSGraphObject *obj)
- {
- g_free(obj);
- }
-
- static void my_get_driver(void *object, const char *interface) {
- My_driver *dev = object;
- if (!g_strcmp0(interface, "my_interface")) {
- return &dev->prod;
- }
- abort();
- }
-
- static void my_get_device(void *object, const char *device) {
- My_driver *dev = object;
- if (!g_strcmp0(device, "my_driver_contained")) {
- return &dev->cont;
- }
- abort();
- }
-
- static void *my_driver_constructor(void *node_consumed,
- QOSGraphObject *alloc)
- {
- My_driver dev = g_new(My_driver, 1);
- // get the node pointed by the produce edge
- dev->obj.get_driver = my_get_driver;
- // get the node pointed by the contains
- dev->obj.get_device = my_get_device;
- // free the object
- dev->obj.destructor = my_destructor;
- do_something_with_node_consumed(node_consumed);
- // set all fields of contained device
- init_contained_device(&dev->cont);
- return &dev->obj;
- }
-
- static void register_my_driver(void)
- {
- qos_node_create_driver("my_driver", my_driver_constructor);
- // contained drivers don't need a constructor,
- // they will be init by the parent.
- qos_node_create_driver("my_driver_contained", NULL);
-
- // For the sake of this example, assume machine x86_64/pc contains
- // "other_node".
- // This relation, along with the machine and "other_node" creation,
- // should be defined in the x86_64_pc-machine.c file.
- // "my_driver" will then consume "other_node"
- qos_node_contains("my_driver", "my_driver_contained");
- qos_node_produces("my_driver", "my_interface");
- qos_node_consumes("my_driver", "other_node");
- }
- * </programlisting>
- * </example>
- *
- * In the above example, all possible types of relations are created:
- * node "my_driver" consumes, contains and produces other nodes.
- * more specifically:
- * x86_64/pc -->contains--> other_node <--consumes-- my_driver
- * |
- * my_driver_contained <--contains--+
- * |
- * my_interface <--produces--+
- *
- * or inverting the consumes edge in consumed_by:
- *
- * x86_64/pc -->contains--> other_node --consumed_by--> my_driver
- * |
- * my_driver_contained <--contains--+
- * |
- * my_interface <--produces--+
- *
- * <example>
- * <title>Creating new test</title>
- * <programlisting>
- * #include "qgraph.h"
- *
- * static void my_test_function(void *obj, void *data)
- * {
- * Node_produced *interface_to_test = obj;
- * // test interface_to_test
- * }
- *
- * static void register_my_test(void)
- * {
- * qos_add_test("my_interface", "my_test", my_test_function);
- * }
- *
- * libqos_init(register_my_test);
- *
- * </programlisting>
- * </example>
- *
- * Here a new test is created, consuming "my_interface" node
- * and creating a valid path from a machine to a test.
- * Final graph will be like this:
- * x86_64/pc -->contains--> other_node <--consumes-- my_driver
- * |
- * my_driver_contained <--contains--+
- * |
- * my_test --consumes--> my_interface <--produces--+
- *
- * or inverting the consumes edge in consumed_by:
- *
- * x86_64/pc -->contains--> other_node --consumed_by--> my_driver
- * |
- * my_driver_contained <--contains--+
- * |
- * my_test <--consumed_by-- my_interface <--produces--+
- *
- * Assuming there the binary is
- * QTEST_QEMU_BINARY=./qemu-system-x86_64
- * a valid test path will be:
- * "/x86_64/pc/other_node/my_driver/my_interface/my_test".
- *
- * Additional examples are also in test-qgraph.c
- *
- * Command line:
- * Command line is built by using node names and optional arguments
- * passed by the user when building the edges.
- *
- * There are three types of command line arguments:
- * - in node : created from the node name. For example, machines will
- * have "-M <machine>" to its command line, while devices
- * "-device <device>". It is automatically done by the
- * framework.
- * - after node : added as additional argument to the node name.
- * This argument is added optionally when creating edges,
- * by setting the parameter @after_cmd_line and
- * @extra_edge_opts in #QOSGraphEdgeOptions.
- * The framework automatically adds
- * a comma before @extra_edge_opts,
- * because it is going to add attributes
- * after the destination node pointed by
- * the edge containing these options, and automatically
- * adds a space before @after_cmd_line, because it
- * adds an additional device, not an attribute.
- * - before node : added as additional argument to the node name.
- * This argument is added optionally when creating edges,
- * by setting the parameter @before_cmd_line in
- * #QOSGraphEdgeOptions. This attribute
- * is going to add attributes before the destination node
- * pointed by the edge containing these options. It is
- * helpful to commands that are not node-representable,
- * such as "-fdsev" or "-netdev".
- *
- * While adding command line in edges is always used, not all nodes names are
- * used in every path walk: this is because the contained or produced ones
- * are already added by QEMU, so only nodes that "consumes" will be used to
- * build the command line. Also, nodes that will have { "abstract" : true }
- * as QMP attribute will loose their command line, since they are not proper
- * devices to be added in QEMU.
- *
- * Example:
- *
- QOSGraphEdgeOptions opts = {
- .arg = NULL,
- .size_arg = 0,
- .after_cmd_line = "-device other",
- .before_cmd_line = "-netdev something",
- .extra_edge_opts = "addr=04.0",
- };
- QOSGraphNode * node = qos_node_create_driver("my_node", constructor);
- qos_node_consumes_args("my_node", "interface", &opts);
- *
- * Will produce the following command line:
- * "-netdev something -device my_node,addr=04.0 -device other"
- */
-
-/**
- * Edge options to be passed to the contains/consumes *_args function.
+ * struct QOSGraphEdgeOptions:
+ * Edge options to be passed to the contains/consumes \*_args function.
+ * @arg: optional arg that will be used by dest edge
+ * @size_arg: @arg size that will be used by dest edge
+ * @extra_device_opts: optional additional command line for dest
+ * edge, used to add additional attributes
+ * *after* the node command line, the
+ * framework automatically prepends ","
+ * to this argument.
+ * @before_cmd_line: optional additional command line for dest
+ * edge, used to add additional attributes
+ * *before* the node command line, usually
+ * other non-node represented commands,
+ * like "-fdsev synt"
+ * @after_cmd_line: optional extra command line to be added
+ * after the device command. This option
+ * is used to add other devices
+ * command line that depend on current node.
+ * Automatically prepends " " to this argument
+ * @edge_name: optional edge to differentiate multiple
+ * devices with same node name
*/
struct QOSGraphEdgeOptions {
- void *arg; /*
- * optional arg that will be used by
- * dest edge
- */
- uint32_t size_arg; /*
- * optional arg size that will be used by
- * dest edge
- */
- const char *extra_device_opts;/*
- *optional additional command line for dest
- * edge, used to add additional attributes
- * *after* the node command line, the
- * framework automatically prepends ","
- * to this argument.
- */
- const char *before_cmd_line; /*
- * optional additional command line for dest
- * edge, used to add additional attributes
- * *before* the node command line, usually
- * other non-node represented commands,
- * like "-fdsev synt"
- */
- const char *after_cmd_line; /*
- * optional extra command line to be added
- * after the device command. This option
- * is used to add other devices
- * command line that depend on current node.
- * Automatically prepends " " to this
- * argument
- */
- const char *edge_name; /*
- * optional edge to differentiate multiple
- * devices with same node name
- */
+ void *arg;
+ uint32_t size_arg;
+ const char *extra_device_opts;
+ const char *before_cmd_line;
+ const char *after_cmd_line;
+ const char *edge_name;
};
/**
+ * struct QOSGraphTestOptions:
* Test options to be passed to the test functions.
+ * @edge: edge arguments that will be used by test.
+ * Note that test *does not* use edge_name,
+ * and uses instead arg and size_arg as
+ * data arg for its test function.
+ * @arg: if @before is non-NULL, pass @arg there.
+ * Otherwise pass it to the test function.
+ * @before: executed before the test. Used to add
+ * additional parameters to the command line
+ * and modify the argument to the test function.
+ * @subprocess: run the test in a subprocess.
*/
struct QOSGraphTestOptions {
- QOSGraphEdgeOptions edge; /* edge arguments that will be used by test.
- * Note that test *does not* use edge_name,
- * and uses instead arg and size_arg as
- * data arg for its test function.
- */
- void *arg; /* passed to the .before function, or to the
- * test function if there is no .before
- * function
- */
- QOSBeforeTest before; /* executed before the test. Can add
- * additional parameters to the command line
- * and modify the argument to the test function.
- */
- bool subprocess; /* run the test in a subprocess */
+ QOSGraphEdgeOptions edge;
+ void *arg;
+ QOSBeforeTest before;
+ bool subprocess;
};
/**
+ * struct QOSGraphObject:
* Each driver, test or machine of this framework will have a
* QOSGraphObject as first field.
*
* This set of functions offered by QOSGraphObject are executed
* in different stages of the framework:
- * - get_driver / get_device : Once a machine-to-test path has been
- * found, the framework traverses it again and allocates all the
- * nodes, using the provided constructor. To satisfy their relations,
- * i.e. for produces or contains, where a struct constructor needs
- * an external parameter represented by the previous node,
- * the framework will call get_device (for contains) or
- * get_driver (for produces), depending on the edge type, passing
- * them the name of the next node to be taken and getting from them
- * the corresponding pointer to the actual structure of the next node to
- * be used in the path.
- *
- * - start_hw: This function is executed after all the path objects
- * have been allocated, but before the test is run. It starts the hw, setting
- * the initial configurations (*_device_enable) and making it ready for the
- * test.
- *
- * - destructor: Opposite to the node constructor, destroys the object.
- * This function is called after the test has been executed, and performs
- * a complete cleanup of each node allocated field. In case no constructor
- * is provided, no destructor will be called.
- *
+ * @get_driver: see @get_device
+ * @get_device: Once a machine-to-test path has been
+ * found, the framework traverses it again and allocates all the
+ * nodes, using the provided constructor. To satisfy their
+ * relations, i.e. for produces or contains, where a struct
+ * constructor needs an external parameter represented by the
+ * previous node, the framework will call
+ * @get_device (for contains) or @get_driver (for produces),
+ * depending on the edge type, passing them the name of the next
+ * node to be taken and getting from them the corresponding
+ * pointer to the actual structure of the next node to
+ * be used in the path.
+ * @start_hw: This function is executed after all the path objects
+ * have been allocated, but before the test is run. It starts the
+ * hw, setting the initial configurations (\*_device_enable) and
+ * making it ready for the test.
+ * @destructor: Opposite to the node constructor, destroys the object.
+ * This function is called after the test has been executed, and
+ * performs a complete cleanup of each node allocated field.
+ * In case no constructor is provided, no destructor will be
+ * called.
+ * @free: free the memory associated to the QOSGraphObject and its contained
+ * children
*/
struct QOSGraphObject {
- /* for produces edges, returns void * */
QOSGetDriver get_driver;
- /* for contains edges, returns a QOSGraphObject * */
QOSGetDevice get_device;
- /* start the hw, get ready for the test */
QOSStartFunct start_hw;
- /* destroy this QOSGraphObject */
QOSDestructorFunc destructor;
- /* free the memory associated to the QOSGraphObject and its contained
- * children */
GDestroyNotify free;
};
@@ -399,24 +152,30 @@ void qos_graph_init(void);
void qos_graph_destroy(void);
/**
- * qos_node_destroy(): removes and frees a node from the,
+ * qos_node_destroy(): removes and frees a node from the
* nodes hash table.
+ * @key: Name of the node
*/
void qos_node_destroy(void *key);
/**
- * qos_edge_destroy(): removes and frees an edge from the,
+ * qos_edge_destroy(): removes and frees an edge from the
* edges hash table.
+ * @key: Name of the node
*/
void qos_edge_destroy(void *key);
/**
* qos_add_test(): adds a test node @name to the nodes hash table.
+ * @name: Name of the test
+ * @interface: Name of the interface node it consumes
+ * @test_func: Actual test to perform
+ * @opts: Facultative options (see %QOSGraphTestOptions)
*
* The test will consume a @interface node, and once the
* graph walking algorithm has found it, the @test_func will be
* executed. It also has the possibility to
- * add an optional @opts (see %QOSGraphNodeOptions).
+ * add an optional @opts (see %QOSGraphTestOptions).
*
* For tests, opts->edge.arg and size_arg represent the arg to pass
* to @test_func
@@ -428,6 +187,8 @@ void qos_add_test(const char *name, const char *interface,
/**
* qos_node_create_machine(): creates the machine @name and
* adds it to the node hash table.
+ * @name: Name of the machine
+ * @function: Machine constructor
*
* This node will be of type QNODE_MACHINE and have @function
* as constructor
@@ -438,6 +199,9 @@ void qos_node_create_machine(const char *name, QOSCreateMachineFunc function);
* qos_node_create_machine_args(): same as qos_node_create_machine,
* but with the possibility to add an optional ", @opts" after -M machine
* command line.
+ * @name: Name of the machine
+ * @function: Machine constructor
+ * @opts: Optional additional command line
*/
void qos_node_create_machine_args(const char *name,
QOSCreateMachineFunc function,
@@ -446,6 +210,8 @@ void qos_node_create_machine_args(const char *name,
/**
* qos_node_create_driver(): creates the driver @name and
* adds it to the node hash table.
+ * @name: Name of the driver
+ * @function: Driver constructor
*
* This node will be of type QNODE_DRIVER and have @function
* as constructor
@@ -453,17 +219,17 @@ void qos_node_create_machine_args(const char *name,
void qos_node_create_driver(const char *name, QOSCreateDriverFunc function);
/**
- * Behaves as qos_node_create_driver() with the extension of allowing to
- * specify a different node name vs. associated QEMU device name.
+ * qos_node_create_driver_named(): behaves as qos_node_create_driver() with the
+ * extension of allowing to specify a different node name vs. associated QEMU
+ * device name.
+ * @name: Custom, unique name of the node to be created
+ * @qemu_name: Actual (official) QEMU driver name the node shall be
+ * associated with
+ * @function: Driver constructor
*
* Use this function instead of qos_node_create_driver() if you need to create
* several instances of the same QEMU device. You are free to choose a custom
* node name, however the chosen node name must always be unique.
- *
- * @param name: custom, unique name of the node to be created
- * @param qemu_name: actual (official) QEMU driver name the node shall be
- * associated with
- * @param function: driver constructor
*/
void qos_node_create_driver_named(const char *name, const char *qemu_name,
QOSCreateDriverFunc function);
@@ -472,6 +238,9 @@ void qos_node_create_driver_named(const char *name, const char *qemu_name,
* qos_node_contains(): creates one or more edges of type QEDGE_CONTAINS
* and adds them to the edge list mapped to @container in the
* edge hash table.
+ * @container: Source node that "contains"
+ * @contained: Destination node that "is contained"
+ * @opts: Facultative options (see %QOSGraphEdgeOptions)
*
* The edges will have @container as source and @contained as destination.
*
@@ -483,14 +252,17 @@ void qos_node_create_driver_named(const char *name, const char *qemu_name,
* This function can be useful when there are multiple devices
* with the same node name contained in a machine/other node
*
- * For example, if "arm/raspi2" contains 2 "generic-sdhci"
+ * For example, if ``arm/raspi2`` contains 2 ``generic-sdhci``
* devices, the right commands will be:
- * qos_node_create_machine("arm/raspi2");
- * qos_node_create_driver("generic-sdhci", constructor);
- * //assume rest of the fields are set NULL
- * QOSGraphEdgeOptions op1 = { .edge_name = "emmc" };
- * QOSGraphEdgeOptions op2 = { .edge_name = "sdcard" };
- * qos_node_contains("arm/raspi2", "generic-sdhci", &op1, &op2, NULL);
+ *
+ * .. code::
+ *
+ * qos_node_create_machine("arm/raspi2");
+ * qos_node_create_driver("generic-sdhci", constructor);
+ * // assume rest of the fields are set NULL
+ * QOSGraphEdgeOptions op1 = { .edge_name = "emmc" };
+ * QOSGraphEdgeOptions op2 = { .edge_name = "sdcard" };
+ * qos_node_contains("arm/raspi2", "generic-sdhci", &op1, &op2, NULL);
*
* Of course this also requires that the @container's get_device function
* should implement a case for "emmc" and "sdcard".
@@ -505,6 +277,8 @@ void qos_node_contains(const char *container, const char *contained,
* qos_node_produces(): creates an edge of type QEDGE_PRODUCES and
* adds it to the edge list mapped to @producer in the
* edge hash table.
+ * @producer: Source node that "produces"
+ * @interface: Interface node that "is produced"
*
* This edge will have @producer as source and @interface as destination.
*/
@@ -514,6 +288,9 @@ void qos_node_produces(const char *producer, const char *interface);
* qos_node_consumes(): creates an edge of type QEDGE_CONSUMED_BY and
* adds it to the edge list mapped to @interface in the
* edge hash table.
+ * @consumer: Node that "consumes"
+ * @interface: Interface node that "is consumed by"
+ * @opts: Facultative options (see %QOSGraphEdgeOptions)
*
* This edge will have @interface as source and @consumer as destination.
* It also has the possibility to add an optional @opts
@@ -539,7 +316,7 @@ const char *qos_get_current_command_line(void);
/**
* qos_allocate_objects():
* @qts: The #QTestState that will be referred to by the machine object.
- * @alloc: Where to store the allocator for the machine object, or %NULL.
+ * @p_alloc: Where to store the allocator for the machine object, or %NULL.
*
* Allocate driver objects for the current test
* path, but relative to the QTestState @qts.
@@ -551,24 +328,27 @@ void *qos_allocate_objects(QTestState *qts, QGuestAllocator **p_alloc);
/**
* qos_object_destroy(): calls the destructor for @obj
+ * @obj: A #QOSGraphObject to destroy
*/
void qos_object_destroy(QOSGraphObject *obj);
/**
* qos_object_queue_destroy(): queue the destructor for @obj so that it is
* called at the end of the test
+ * @obj: A #QOSGraphObject to destroy
*/
void qos_object_queue_destroy(QOSGraphObject *obj);
/**
* qos_object_start_hw(): calls the start_hw function for @obj
+ * @obj: A #QOSGraphObject containing the start_hw function
*/
void qos_object_start_hw(QOSGraphObject *obj);
/**
* qos_machine_new(): instantiate a new machine node
- * @node: A machine node to be instantiated
- * @qts: The #QTestState that will be referred to by the machine object.
+ * @node: Machine node to be instantiated
+ * @qts: A #QTestState that will be referred to by the machine object.
*
* Returns a machine object.
*/
@@ -587,8 +367,8 @@ QOSGraphObject *qos_driver_new(QOSGraphNode *node, QOSGraphObject *parent,
QGuestAllocator *alloc, void *arg);
/**
- * Just for debugging purpose: prints all currently existing nodes and
- * edges to stdout.
+ * qos_dump_graph(): prints all currently existing nodes and
+ * edges to stdout. Just for debugging purposes.
*
* All qtests add themselves to the overall qos graph by calling qgraph
* functions that add device nodes and edges between the individual graph
diff --git a/tests/qtest/libqtest.c b/tests/qtest/libqtest.c
index fd043b0570..71e359efcd 100644
--- a/tests/qtest/libqtest.c
+++ b/tests/qtest/libqtest.c
@@ -81,24 +81,8 @@ static void qtest_client_set_rx_handler(QTestState *s, QTestRecvFn recv);
static int init_socket(const char *socket_path)
{
- struct sockaddr_un addr;
- int sock;
- int ret;
-
- sock = socket(PF_UNIX, SOCK_STREAM, 0);
- g_assert_cmpint(sock, !=, -1);
-
- addr.sun_family = AF_UNIX;
- snprintf(addr.sun_path, sizeof(addr.sun_path), "%s", socket_path);
+ int sock = qtest_socket_server(socket_path);
qemu_set_cloexec(sock);
-
- do {
- ret = bind(sock, (struct sockaddr *)&addr, sizeof(addr));
- } while (ret == -1 && errno == EINTR);
- g_assert_cmpint(ret, !=, -1);
- ret = listen(sock, 1);
- g_assert_cmpint(ret, !=, -1);
-
return sock;
}
@@ -149,7 +133,7 @@ void qtest_set_expected_status(QTestState *s, int status)
s->expected_status = status;
}
-static void kill_qemu(QTestState *s)
+void qtest_kill_qemu(QTestState *s)
{
pid_t pid = s->qemu_pid;
int wstatus;
@@ -159,6 +143,7 @@ static void kill_qemu(QTestState *s)
kill(pid, SIGTERM);
TFR(pid = waitpid(s->qemu_pid, &s->wstatus, 0));
assert(pid == s->qemu_pid);
+ s->qemu_pid = -1;
}
/*
@@ -185,7 +170,7 @@ static void kill_qemu(QTestState *s)
static void kill_qemu_hook_func(void *s)
{
- kill_qemu(s);
+ qtest_kill_qemu(s);
}
static void sigabrt_handler(int signo)
@@ -211,15 +196,30 @@ static void cleanup_sigabrt_handler(void)
sigaction(SIGABRT, &sigact_old, NULL);
}
+static bool hook_list_is_empty(GHookList *hook_list)
+{
+ GHook *hook = g_hook_first_valid(hook_list, TRUE);
+
+ if (!hook) {
+ return false;
+ }
+
+ g_hook_unref(hook_list, hook);
+ return true;
+}
+
void qtest_add_abrt_handler(GHookFunc fn, const void *data)
{
GHook *hook;
- /* Only install SIGABRT handler once */
if (!abrt_hooks.is_setup) {
g_hook_list_init(&abrt_hooks, sizeof(GHook));
}
- setup_sigabrt_handler();
+
+ /* Only install SIGABRT handler once */
+ if (hook_list_is_empty(&abrt_hooks)) {
+ setup_sigabrt_handler();
+ }
hook = g_hook_alloc(&abrt_hooks);
hook->func = fn;
@@ -228,6 +228,17 @@ void qtest_add_abrt_handler(GHookFunc fn, const void *data)
g_hook_prepend(&abrt_hooks, hook);
}
+void qtest_remove_abrt_handler(void *data)
+{
+ GHook *hook = g_hook_find_data(&abrt_hooks, TRUE, data);
+ g_hook_destroy_link(&abrt_hooks, hook);
+
+ /* Uninstall SIGABRT handler on last instance */
+ if (hook_list_is_empty(&abrt_hooks)) {
+ cleanup_sigabrt_handler();
+ }
+}
+
static const char *qtest_qemu_binary(void)
{
const char *qemu_bin;
@@ -384,12 +395,9 @@ QTestState *qtest_init_with_serial(const char *extra_args, int *sock_fd)
void qtest_quit(QTestState *s)
{
- g_hook_destroy_link(&abrt_hooks, g_hook_find_data(&abrt_hooks, TRUE, s));
-
- /* Uninstall SIGABRT handler on last instance */
- cleanup_sigabrt_handler();
+ qtest_remove_abrt_handler(s);
- kill_qemu(s);
+ qtest_kill_qemu(s);
close(s->fd);
close(s->qmp_fd);
g_string_free(s->rx, true);
@@ -638,6 +646,28 @@ QDict *qtest_qmp_receive_dict(QTestState *s)
return qmp_fd_receive(s->qmp_fd);
}
+int qtest_socket_server(const char *socket_path)
+{
+ struct sockaddr_un addr;
+ int sock;
+ int ret;
+
+ sock = socket(PF_UNIX, SOCK_STREAM, 0);
+ g_assert_cmpint(sock, !=, -1);
+
+ addr.sun_family = AF_UNIX;
+ snprintf(addr.sun_path, sizeof(addr.sun_path), "%s", socket_path);
+
+ do {
+ ret = bind(sock, (struct sockaddr *)&addr, sizeof(addr));
+ } while (ret == -1 && errno == EINTR);
+ g_assert_cmpint(ret, !=, -1);
+ ret = listen(sock, 1);
+ g_assert_cmpint(ret, !=, -1);
+
+ return sock;
+}
+
/**
* Allow users to send a message without waiting for the reply,
* in the case that they choose to discard all replies up until
diff --git a/tests/qtest/meson.build b/tests/qtest/meson.build
index 58efc46144..2688e1bfad 100644
--- a/tests/qtest/meson.build
+++ b/tests/qtest/meson.build
@@ -157,6 +157,7 @@ qtests_npcm7xx = \
'npcm7xx_watchdog_timer-test'] + \
(slirp.found() ? ['npcm7xx_emc-test'] : [])
qtests_arm = \
+ (config_all_devices.has_key('CONFIG_MPS2') ? ['sse-timer-test'] : []) + \
(config_all_devices.has_key('CONFIG_CMSDK_APB_DUALTIMER') ? ['cmsdk-apb-dualtimer-test'] : []) + \
(config_all_devices.has_key('CONFIG_CMSDK_APB_TIMER') ? ['cmsdk-apb-timer-test'] : []) + \
(config_all_devices.has_key('CONFIG_CMSDK_APB_WATCHDOG') ? ['cmsdk-apb-watchdog-test'] : []) + \
diff --git a/tests/qtest/sse-timer-test.c b/tests/qtest/sse-timer-test.c
new file mode 100644
index 0000000000..a65d7542d5
--- /dev/null
+++ b/tests/qtest/sse-timer-test.c
@@ -0,0 +1,240 @@
+/*
+ * QTest testcase for the SSE timer device
+ *
+ * Copyright (c) 2021 Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include "qemu/osdep.h"
+#include "libqtest-single.h"
+
+/*
+ * SSE-123/SSE-300 timer in the mps3-an547 board, where it is driven
+ * at 32MHz, so 31.25ns per tick.
+ */
+#define TIMER_BASE 0x48000000
+
+/* PERIPHNSPPC0 register in the SSE-300 Secure Access Configuration block */
+#define PERIPHNSPPC0 (0x50080000 + 0x70)
+
+/* Base of the System Counter control frame */
+#define COUNTER_BASE 0x58100000
+
+/* SSE counter register offsets in the control frame */
+#define CNTCR 0
+#define CNTSR 0x4
+#define CNTCV_LO 0x8
+#define CNTCV_HI 0xc
+#define CNTSCR 0x10
+
+/* SSE timer register offsets */
+#define CNTPCT_LO 0
+#define CNTPCT_HI 4
+#define CNTFRQ 0x10
+#define CNTP_CVAL_LO 0x20
+#define CNTP_CVAL_HI 0x24
+#define CNTP_TVAL 0x28
+#define CNTP_CTL 0x2c
+#define CNTP_AIVAL_LO 0x40
+#define CNTP_AIVAL_HI 0x44
+#define CNTP_AIVAL_RELOAD 0x48
+#define CNTP_AIVAL_CTL 0x4c
+
+/* 4 ticks in nanoseconds (so we can work in integers) */
+#define FOUR_TICKS 125
+
+static void clock_step_ticks(uint64_t ticks)
+{
+ /*
+ * Advance the qtest clock by however many nanoseconds we
+ * need to move the timer forward the specified number of ticks.
+ * ticks must be a multiple of 4, so we get a whole number of ns.
+ */
+ assert(!(ticks & 3));
+ clock_step(FOUR_TICKS * (ticks >> 2));
+}
+
+static void reset_counter_and_timer(void)
+{
+ /*
+ * Reset the system counter and the timer between tests. This
+ * isn't a full reset, but it's sufficient for what the tests check.
+ */
+ writel(COUNTER_BASE + CNTCR, 0);
+ writel(TIMER_BASE + CNTP_CTL, 0);
+ writel(TIMER_BASE + CNTP_AIVAL_CTL, 0);
+ writel(COUNTER_BASE + CNTCV_LO, 0);
+ writel(COUNTER_BASE + CNTCV_HI, 0);
+}
+
+static void test_counter(void)
+{
+ /* Basic counter functionality test */
+
+ reset_counter_and_timer();
+ /* The counter should start disabled: check that it doesn't move */
+ clock_step_ticks(100);
+ g_assert_cmpuint(readl(COUNTER_BASE + CNTCV_LO), ==, 0);
+ g_assert_cmpuint(readl(COUNTER_BASE + CNTCV_HI), ==, 0);
+ /* Now enable it and check that it does count */
+ writel(COUNTER_BASE + CNTCR, 1);
+ clock_step_ticks(100);
+ g_assert_cmpuint(readl(COUNTER_BASE + CNTCV_LO), ==, 100);
+ g_assert_cmpuint(readl(COUNTER_BASE + CNTCV_HI), ==, 0);
+ /* Check the counter scaling functionality */
+ writel(COUNTER_BASE + CNTCR, 0);
+ writel(COUNTER_BASE + CNTSCR, 0x00100000); /* 1/16th normal speed */
+ writel(COUNTER_BASE + CNTCR, 5); /* EN, SCEN */
+ clock_step_ticks(160);
+ g_assert_cmpuint(readl(COUNTER_BASE + CNTCV_LO), ==, 110);
+ g_assert_cmpuint(readl(COUNTER_BASE + CNTCV_HI), ==, 0);
+}
+
+static void test_timer(void)
+{
+ /* Basic timer functionality test */
+
+ reset_counter_and_timer();
+ /*
+ * The timer is behind a Peripheral Protection Controller, and
+ * qtest accesses are always non-secure (no memory attributes),
+ * so we must program the PPC to accept NS transactions. TIMER0
+ * is on port 0 of PPC0, controlled by bit 0 of this register.
+ */
+ writel(PERIPHNSPPC0, 1);
+ /* We must enable the System Counter or the timer won't run. */
+ writel(COUNTER_BASE + CNTCR, 1);
+
+ /* Timer starts disabled and with a counter of 0 */
+ g_assert_cmpuint(readl(TIMER_BASE + CNTP_CTL), ==, 0);
+ g_assert_cmpuint(readl(TIMER_BASE + CNTPCT_LO), ==, 0);
+ g_assert_cmpuint(readl(TIMER_BASE + CNTPCT_HI), ==, 0);
+
+ /* Turn it on */
+ writel(TIMER_BASE + CNTP_CTL, 1);
+
+ /* Is the timer ticking? */
+ clock_step_ticks(100);
+ g_assert_cmpuint(readl(TIMER_BASE + CNTPCT_LO), ==, 100);
+ g_assert_cmpuint(readl(TIMER_BASE + CNTPCT_HI), ==, 0);
+
+ /* Set the CompareValue to 4000 ticks */
+ writel(TIMER_BASE + CNTP_CVAL_LO, 4000);
+ writel(TIMER_BASE + CNTP_CVAL_HI, 0);
+
+ /* Check TVAL view of the counter */
+ g_assert_cmpuint(readl(TIMER_BASE + CNTP_TVAL), ==, 3900);
+
+ /* Advance to the CompareValue mark and check ISTATUS is set */
+ clock_step_ticks(3900);
+ g_assert_cmpuint(readl(TIMER_BASE + CNTP_TVAL), ==, 0);
+ g_assert_cmpuint(readl(TIMER_BASE + CNTP_CTL), ==, 5);
+
+ /* Now exercise the auto-reload part of the timer */
+ writel(TIMER_BASE + CNTP_AIVAL_RELOAD, 200);
+ writel(TIMER_BASE + CNTP_AIVAL_CTL, 1);
+
+ /* Check AIVAL was reloaded and that ISTATUS is now clear */
+ g_assert_cmpuint(readl(TIMER_BASE + CNTP_AIVAL_LO), ==, 4200);
+ g_assert_cmpuint(readl(TIMER_BASE + CNTP_AIVAL_HI), ==, 0);
+ g_assert_cmpuint(readl(TIMER_BASE + CNTP_CTL), ==, 1);
+
+ /*
+ * Check that when we advance forward to the reload time the interrupt
+ * fires and the value reloads
+ */
+ clock_step_ticks(100);
+ g_assert_cmpuint(readl(TIMER_BASE + CNTP_CTL), ==, 1);
+ clock_step_ticks(100);
+ g_assert_cmpuint(readl(TIMER_BASE + CNTP_CTL), ==, 5);
+ g_assert_cmpuint(readl(TIMER_BASE + CNTP_AIVAL_LO), ==, 4400);
+ g_assert_cmpuint(readl(TIMER_BASE + CNTP_AIVAL_HI), ==, 0);
+
+ clock_step_ticks(100);
+ g_assert_cmpuint(readl(TIMER_BASE + CNTP_CTL), ==, 5);
+ /* Check that writing 0 to CLR clears the interrupt */
+ writel(TIMER_BASE + CNTP_AIVAL_CTL, 1);
+ g_assert_cmpuint(readl(TIMER_BASE + CNTP_CTL), ==, 1);
+ /* Check that when we move forward to the reload time it fires again */
+ clock_step_ticks(100);
+ g_assert_cmpuint(readl(TIMER_BASE + CNTP_CTL), ==, 5);
+ g_assert_cmpuint(readl(TIMER_BASE + CNTP_AIVAL_LO), ==, 4600);
+ g_assert_cmpuint(readl(TIMER_BASE + CNTP_AIVAL_HI), ==, 0);
+
+ /*
+ * Step the clock far enough that we overflow the low half of the
+ * CNTPCT and AIVAL registers, and check that their high halves
+ * give the right values. We do the forward movement in
+ * non-autoinc mode because otherwise it takes forever as the
+ * timer has to emulate all the 'reload at t + N, t + 2N, etc'
+ * steps.
+ */
+ writel(TIMER_BASE + CNTP_AIVAL_CTL, 0);
+ clock_step_ticks(0x42ULL << 32);
+ g_assert_cmpuint(readl(TIMER_BASE + CNTPCT_LO), ==, 4400);
+ g_assert_cmpuint(readl(TIMER_BASE + CNTPCT_HI), ==, 0x42);
+
+ /* Turn on the autoinc again to check AIVAL_HI */
+ writel(TIMER_BASE + CNTP_AIVAL_CTL, 1);
+ g_assert_cmpuint(readl(TIMER_BASE + CNTP_AIVAL_LO), ==, 4600);
+ g_assert_cmpuint(readl(TIMER_BASE + CNTP_AIVAL_HI), ==, 0x42);
+}
+
+static void test_timer_scale_change(void)
+{
+ /*
+ * Test that the timer responds correctly to counter
+ * scaling changes while it has an active timer.
+ */
+ reset_counter_and_timer();
+ /* Give ourselves access to the timer, and enable the counter and timer */
+ writel(PERIPHNSPPC0, 1);
+ writel(COUNTER_BASE + CNTCR, 1);
+ writel(TIMER_BASE + CNTP_CTL, 1);
+ /* Set the CompareValue to 4000 ticks */
+ writel(TIMER_BASE + CNTP_CVAL_LO, 4000);
+ writel(TIMER_BASE + CNTP_CVAL_HI, 0);
+ /* Advance halfway and check ISTATUS is not set */
+ clock_step_ticks(2000);
+ g_assert_cmpuint(readl(TIMER_BASE + CNTP_CTL), ==, 1);
+ /* Reprogram the counter to run at 1/16th speed */
+ writel(COUNTER_BASE + CNTCR, 0);
+ writel(COUNTER_BASE + CNTSCR, 0x00100000); /* 1/16th normal speed */
+ writel(COUNTER_BASE + CNTCR, 5); /* EN, SCEN */
+ /* Advance to where the timer would have fired and check it has not */
+ clock_step_ticks(2000);
+ g_assert_cmpuint(readl(TIMER_BASE + CNTP_CTL), ==, 1);
+ /* Advance to where the timer must fire at the new clock rate */
+ clock_step_ticks(29996);
+ g_assert_cmpuint(readl(TIMER_BASE + CNTP_CTL), ==, 1);
+ clock_step_ticks(4);
+ g_assert_cmpuint(readl(TIMER_BASE + CNTP_CTL), ==, 5);
+}
+
+int main(int argc, char **argv)
+{
+ int r;
+
+ g_test_init(&argc, &argv, NULL);
+
+ qtest_start("-machine mps3-an547");
+
+ qtest_add_func("/sse-timer/counter", test_counter);
+ qtest_add_func("/sse-timer/timer", test_timer);
+ qtest_add_func("/sse-timer/timer-scale-change", test_timer_scale_change);
+
+ r = g_test_run();
+
+ qtest_end();
+
+ return r;
+}
diff --git a/util/main-loop.c b/util/main-loop.c
index 6bfc7c46f5..5188ff6540 100644
--- a/util/main-loop.c
+++ b/util/main-loop.c
@@ -26,7 +26,6 @@
#include "qapi/error.h"
#include "qemu/cutils.h"
#include "qemu/timer.h"
-#include "sysemu/qtest.h"
#include "sysemu/cpu-timers.h"
#include "sysemu/replay.h"
#include "qemu/main-loop.h"
diff --git a/util/qemu-timer.c b/util/qemu-timer.c
index 81c28af517..f36c75e594 100644
--- a/util/qemu-timer.c
+++ b/util/qemu-timer.c
@@ -29,7 +29,6 @@
#include "sysemu/cpu-timers.h"
#include "sysemu/replay.h"
#include "sysemu/cpus.h"
-#include "sysemu/qtest.h"
#ifdef CONFIG_POSIX
#include <pthread.h>