aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.mailmap7
-rw-r--r--MAINTAINERS10
-rw-r--r--Makefile7
-rw-r--r--accel/kvm/kvm-all.c4
-rw-r--r--balloon.c13
-rw-r--r--docs/qemu-cpu-models.texi484
-rw-r--r--hw/vfio/ccw.c9
-rw-r--r--hw/vfio/common.c51
-rw-r--r--hw/vfio/pci.c26
-rw-r--r--hw/vfio/trace-events1
-rw-r--r--hw/virtio/virtio-balloon.c4
-rw-r--r--include/elf.h24
-rw-r--r--include/hw/vfio/vfio-common.h2
-rw-r--r--linux-user/mips/syscall_nr.h9
-rw-r--r--linux-user/mips64/syscall_nr.h18
-rw-r--r--linux-user/strace.c14
-rw-r--r--linux-user/syscall.c29
-rw-r--r--qemu-doc.texi19
-rw-r--r--target/i386/cpu.c130
-rw-r--r--target/i386/cpu.h6
-rw-r--r--target/mips/cpu.h158
-rw-r--r--target/mips/helper.c4
-rw-r--r--target/mips/machine.c5
-rw-r--r--target/mips/op_helper.c12
-rw-r--r--target/mips/translate.c374
25 files changed, 1258 insertions, 162 deletions
diff --git a/.mailmap b/.mailmap
index 778a4d4e2c..2c2b9b1205 100644
--- a/.mailmap
+++ b/.mailmap
@@ -12,8 +12,11 @@ Fabrice Bellard <fabrice@bellard.org> bellard <bellard@c046a42c-6fe2-441c-8c8c-7
James Hogan <jhogan@kernel.org> <james.hogan@imgtec.com>
Jocelyn Mayer <l_indien@magic.fr> j_mayer <j_mayer@c046a42c-6fe2-441c-8c8c-71466251a162>
Paul Brook <paul@codesourcery.com> pbrook <pbrook@c046a42c-6fe2-441c-8c8c-71466251a162>
-Paul Burton <paul.burton@mips.com> <paul.burton@imgtec.com>
-Paul Burton <paul.burton@mips.com> <paul@archlinuxmips.org>
+Aleksandar Markovic <amarkovic@wavecomp.com> <aleksandar.markovic@mips.com>
+Aleksandar Markovic <amarkovic@wavecomp.com> <aleksandar.markovic@imgtec.com>
+Paul Burton <pburton@wavecomp.com> <paul.burton@mips.com>
+Paul Burton <pburton@wavecomp.com> <paul.burton@imgtec.com>
+Paul Burton <pburton@wavecomp.com> <paul@archlinuxmips.org>
Thiemo Seufer <ths@networkno.de> ths <ths@c046a42c-6fe2-441c-8c8c-71466251a162>
malc <av1474@comtv.ru> malc <malc@c046a42c-6fe2-441c-8c8c-71466251a162>
diff --git a/MAINTAINERS b/MAINTAINERS
index c48d9271cf..70651f7da0 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -187,7 +187,7 @@ F: disas/microblaze.c
MIPS
M: Aurelien Jarno <aurelien@aurel32.net>
-M: Aleksandar Markovic <aleksandar.markovic@mips.com>
+M: Aleksandar Markovic <amarkovic@wavecomp.com>
S: Maintained
F: target/mips/
F: hw/mips/
@@ -289,6 +289,7 @@ F: tests/tcg/i386/
F: tests/tcg/x86_64/
F: hw/i386/
F: disas/i386.c
+F: docs/qemu-cpu-models.texi
T: git git://github.com/ehabkost/qemu.git x86-next
Xtensa
@@ -718,7 +719,7 @@ S: Maintained
F: hw/mips/mips_malta.c
Mipssim
-M: Aleksandar Markovic <aleksandar.markovic@mips.com>
+M: Aleksandar Markovic <amarkovic@wavecomp.com>
S: Odd Fixes
F: hw/mips/mips_mipssim.c
F: hw/net/mipsnet.c
@@ -729,14 +730,15 @@ S: Maintained
F: hw/mips/mips_r4k.c
Fulong 2E
-M: Aleksandar Markovic <aleksandar.markovic@mips.com>
+M: Aleksandar Markovic <amarkovic@wavecomp.com>
S: Odd Fixes
F: hw/mips/mips_fulong2e.c
F: hw/isa/vt82c686.c
+
F: include/hw/isa/vt82c686.h
Boston
-M: Paul Burton <paul.burton@mips.com>
+M: Paul Burton <pburton@wavecomp.com>
S: Maintained
F: hw/core/loader-fit.c
F: hw/mips/boston.c
diff --git a/Makefile b/Makefile
index 2da686be33..b7c6e57de6 100644
--- a/Makefile
+++ b/Makefile
@@ -357,6 +357,7 @@ DOCS=qemu-doc.html qemu-doc.txt qemu.1 qemu-img.1 qemu-nbd.8 qemu-ga.8
DOCS+=docs/interop/qemu-qmp-ref.html docs/interop/qemu-qmp-ref.txt docs/interop/qemu-qmp-ref.7
DOCS+=docs/interop/qemu-ga-ref.html docs/interop/qemu-ga-ref.txt docs/interop/qemu-ga-ref.7
DOCS+=docs/qemu-block-drivers.7
+DOCS+=docs/qemu-cpu-models.7
ifdef CONFIG_VIRTFS
DOCS+=fsdev/virtfs-proxy-helper.1
endif
@@ -778,6 +779,7 @@ distclean: clean
rm -f docs/interop/qemu-qmp-ref.pdf docs/interop/qemu-ga-ref.pdf
rm -f docs/interop/qemu-qmp-ref.html docs/interop/qemu-ga-ref.html
rm -f docs/qemu-block-drivers.7
+ rm -f docs/qemu-cpu-models.7
for d in $(TARGET_DIRS); do \
rm -rf $$d || exit 1 ; \
done
@@ -823,6 +825,7 @@ ifdef CONFIG_POSIX
$(INSTALL_DIR) "$(DESTDIR)$(mandir)/man7"
$(INSTALL_DATA) docs/interop/qemu-qmp-ref.7 "$(DESTDIR)$(mandir)/man7"
$(INSTALL_DATA) docs/qemu-block-drivers.7 "$(DESTDIR)$(mandir)/man7"
+ $(INSTALL_DATA) docs/qemu-cpu-models.7 "$(DESTDIR)$(mandir)/man7"
ifneq ($(TOOLS),)
$(INSTALL_DATA) qemu-img.1 "$(DESTDIR)$(mandir)/man1"
$(INSTALL_DIR) "$(DESTDIR)$(mandir)/man8"
@@ -965,6 +968,7 @@ fsdev/virtfs-proxy-helper.1: fsdev/virtfs-proxy-helper.texi
qemu-nbd.8: qemu-nbd.texi qemu-option-trace.texi
qemu-ga.8: qemu-ga.texi
docs/qemu-block-drivers.7: docs/qemu-block-drivers.texi
+docs/qemu-cpu-models.7: docs/qemu-cpu-models.texi
html: qemu-doc.html docs/interop/qemu-qmp-ref.html docs/interop/qemu-ga-ref.html
info: qemu-doc.info docs/interop/qemu-qmp-ref.info docs/interop/qemu-ga-ref.info
@@ -974,7 +978,8 @@ txt: qemu-doc.txt docs/interop/qemu-qmp-ref.txt docs/interop/qemu-ga-ref.txt
qemu-doc.html qemu-doc.info qemu-doc.pdf qemu-doc.txt: \
qemu-img.texi qemu-nbd.texi qemu-options.texi qemu-option-trace.texi \
qemu-monitor.texi qemu-img-cmds.texi qemu-ga.texi \
- qemu-monitor-info.texi docs/qemu-block-drivers.texi
+ qemu-monitor-info.texi docs/qemu-block-drivers.texi \
+ docs/qemu-cpu-models.texi
docs/interop/qemu-ga-ref.dvi docs/interop/qemu-ga-ref.html \
docs/interop/qemu-ga-ref.info docs/interop/qemu-ga-ref.pdf \
diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c
index eb7db92a5e..38f468d8e2 100644
--- a/accel/kvm/kvm-all.c
+++ b/accel/kvm/kvm-all.c
@@ -39,6 +39,7 @@
#include "trace.h"
#include "hw/irq.h"
#include "sysemu/sev.h"
+#include "sysemu/balloon.h"
#include "hw/boards.h"
@@ -1698,6 +1699,9 @@ static int kvm_init(MachineState *ms)
s->many_ioeventfds = kvm_check_many_ioeventfds();
s->sync_mmu = !!kvm_vm_check_extension(kvm_state, KVM_CAP_SYNC_MMU);
+ if (!s->sync_mmu) {
+ qemu_balloon_inhibit(true);
+ }
return 0;
diff --git a/balloon.c b/balloon.c
index 6bf0a96813..9319879838 100644
--- a/balloon.c
+++ b/balloon.c
@@ -26,6 +26,7 @@
#include "qemu/osdep.h"
#include "qemu-common.h"
+#include "qemu/atomic.h"
#include "exec/cpu-common.h"
#include "sysemu/kvm.h"
#include "sysemu/balloon.h"
@@ -37,16 +38,22 @@
static QEMUBalloonEvent *balloon_event_fn;
static QEMUBalloonStatus *balloon_stat_fn;
static void *balloon_opaque;
-static bool balloon_inhibited;
+static int balloon_inhibit_count;
bool qemu_balloon_is_inhibited(void)
{
- return balloon_inhibited;
+ return atomic_read(&balloon_inhibit_count) > 0;
}
void qemu_balloon_inhibit(bool state)
{
- balloon_inhibited = state;
+ if (state) {
+ atomic_inc(&balloon_inhibit_count);
+ } else {
+ atomic_dec(&balloon_inhibit_count);
+ }
+
+ assert(atomic_read(&balloon_inhibit_count) >= 0);
}
static bool have_balloon(Error **errp)
diff --git a/docs/qemu-cpu-models.texi b/docs/qemu-cpu-models.texi
new file mode 100644
index 0000000000..1935f98c63
--- /dev/null
+++ b/docs/qemu-cpu-models.texi
@@ -0,0 +1,484 @@
+@c man begin SYNOPSIS
+QEMU / KVM CPU model configuration
+@c man end
+
+@c man begin DESCRIPTION
+
+@menu
+* recommendations_cpu_models_x86:: Recommendations for KVM CPU model configuration on x86 hosts
+* cpu_model_syntax_apps:: Syntax for configuring CPU models
+@end menu
+
+QEMU / KVM virtualization supports two ways to configure CPU models
+
+@table @option
+
+@item Host passthrough
+
+This passes the host CPU model features, model, stepping, exactly to the
+guest. Note that KVM may filter out some host CPU model features if they
+cannot be supported with virtualization. Live migration is unsafe when
+this mode is used as libvirt / QEMU cannot guarantee a stable CPU is
+exposed to the guest across hosts. This is the recommended CPU to use,
+provided live migration is not required.
+
+@item Named model
+
+QEMU comes with a number of predefined named CPU models, that typically
+refer to specific generations of hardware released by Intel and AMD.
+These allow the guest VMs to have a degree of isolation from the host CPU,
+allowing greater flexibility in live migrating between hosts with differing
+hardware.
+@end table
+
+In both cases, it is possible to optionally add or remove individual CPU
+features, to alter what is presented to the guest by default.
+
+Libvirt supports a third way to configure CPU models known as "Host model".
+This uses the QEMU "Named model" feature, automatically picking a CPU model
+that is similar the host CPU, and then adding extra features to approximate
+the host model as closely as possible. This does not guarantee the CPU family,
+stepping, etc will precisely match the host CPU, as they would with "Host
+passthrough", but gives much of the benefit of passthrough, while making
+live migration safe.
+
+@node recommendations_cpu_models_x86
+@subsection Recommendations for KVM CPU model configuration on x86 hosts
+
+The information that follows provides recommendations for configuring
+CPU models on x86 hosts. The goals are to maximise performance, while
+protecting guest OS against various CPU hardware flaws, and optionally
+enabling live migration between hosts with hetergeneous CPU models.
+
+@menu
+* preferred_cpu_models_intel_x86:: Preferred CPU models for Intel x86 hosts
+* important_cpu_features_intel_x86:: Important CPU features for Intel x86 hosts
+* preferred_cpu_models_amd_x86:: Preferred CPU models for AMD x86 hosts
+* important_cpu_features_amd_x86:: Important CPU features for AMD x86 hosts
+* default_cpu_models_x86:: Default x86 CPU models
+* other_non_recommended_cpu_models_x86:: Other non-recommended x86 CPUs
+@end menu
+
+@node preferred_cpu_models_intel_x86
+@subsubsection Preferred CPU models for Intel x86 hosts
+
+The following CPU models are preferred for use on Intel hosts. Administrators /
+applications are recommended to use the CPU model that matches the generation
+of the host CPUs in use. In a deployment with a mixture of host CPU models
+between machines, if live migration compatibility is required, use the newest
+CPU model that is compatible across all desired hosts.
+
+@table @option
+@item @code{Skylake-Server}
+@item @code{Skylake-Server-IBRS}
+
+Intel Xeon Processor (Skylake, 2016)
+
+
+@item @code{Skylake-Client}
+@item @code{Skylake-Client-IBRS}
+
+Intel Core Processor (Skylake, 2015)
+
+
+@item @code{Broadwell}
+@item @code{Broadwell-IBRS}
+@item @code{Broadwell-noTSX}
+@item @code{Broadwell-noTSX-IBRS}
+
+Intel Core Processor (Broadwell, 2014)
+
+
+@item @code{Haswell}
+@item @code{Haswell-IBRS}
+@item @code{Haswell-noTSX}
+@item @code{Haswell-noTSX-IBRS}
+
+Intel Core Processor (Haswell, 2013)
+
+
+@item @code{IvyBridge}
+@item @code{IvyBridge-IBRS}
+
+Intel Xeon E3-12xx v2 (Ivy Bridge, 2012)
+
+
+@item @code{SandyBridge}
+@item @code{SandyBridge-IBRS}
+
+Intel Xeon E312xx (Sandy Bridge, 2011)
+
+
+@item @code{Westmere}
+@item @code{Westmere-IBRS}
+
+Westmere E56xx/L56xx/X56xx (Nehalem-C, 2010)
+
+
+@item @code{Nehalem}
+@item @code{Nehalem-IBRS}
+
+Intel Core i7 9xx (Nehalem Class Core i7, 2008)
+
+
+@item @code{Penryn}
+
+Intel Core 2 Duo P9xxx (Penryn Class Core 2, 2007)
+
+
+@item @code{Conroe}
+
+Intel Celeron_4x0 (Conroe/Merom Class Core 2, 2006)
+
+@end table
+
+@node important_cpu_features_intel_x86
+@subsubsection Important CPU features for Intel x86 hosts
+
+The following are important CPU features that should be used on Intel x86
+hosts, when available in the host CPU. Some of them require explicit
+configuration to enable, as they are not included by default in some, or all,
+of the named CPU models listed above. In general all of these features are
+included if using "Host passthrough" or "Host model".
+
+
+@table @option
+
+@item @code{pcid}
+
+Recommended to mitigate the cost of the Meltdown (CVE-2017-5754) fix
+
+Included by default in Haswell, Broadwell & Skylake Intel CPU models.
+
+Should be explicitly turned on for Westmere, SandyBridge, and IvyBridge
+Intel CPU models. Note that some desktop/mobile Westmere CPUs cannot
+support this feature.
+
+
+@item @code{spec-ctrl}
+
+Required to enable the Spectre (CVE-2017-5753 and CVE-2017-5715) fix,
+in cases where retpolines are not sufficient.
+
+Included by default in Intel CPU models with -IBRS suffix.
+
+Must be explicitly turned on for Intel CPU models without -IBRS suffix.
+
+Requires the host CPU microcode to support this feature before it
+can be used for guest CPUs.
+
+
+@item @code{ssbd}
+
+Required to enable the CVE-2018-3639 fix
+
+Not included by default in any Intel CPU model.
+
+Must be explicitly turned on for all Intel CPU models.
+
+Requires the host CPU microcode to support this feature before it
+can be used for guest CPUs.
+
+
+@item @code{pdpe1gb}
+
+Recommended to allow guest OS to use 1GB size pages
+
+Not included by default in any Intel CPU model.
+
+Should be explicitly turned on for all Intel CPU models.
+
+Note that not all CPU hardware will support this feature.
+@end table
+
+
+@node preferred_cpu_models_amd_x86
+@subsubsection Preferred CPU models for AMD x86 hosts
+
+The following CPU models are preferred for use on Intel hosts. Administrators /
+applications are recommended to use the CPU model that matches the generation
+of the host CPUs in use. In a deployment with a mixture of host CPU models
+between machines, if live migration compatibility is required, use the newest
+CPU model that is compatible across all desired hosts.
+
+@table @option
+
+@item @code{EPYC}
+@item @code{EPYC-IBPB}
+
+AMD EPYC Processor (2017)
+
+
+@item @code{Opteron_G5}
+
+AMD Opteron 63xx class CPU (2012)
+
+
+@item @code{Opteron_G4}
+
+AMD Opteron 62xx class CPU (2011)
+
+
+@item @code{Opteron_G3}
+
+AMD Opteron 23xx (Gen 3 Class Opteron, 2009)
+
+
+@item @code{Opteron_G2}
+
+AMD Opteron 22xx (Gen 2 Class Opteron, 2006)
+
+
+@item @code{Opteron_G1}
+
+AMD Opteron 240 (Gen 1 Class Opteron, 2004)
+@end table
+
+@node important_cpu_features_amd_x86
+@subsubsection Important CPU features for AMD x86 hosts
+
+The following are important CPU features that should be used on AMD x86
+hosts, when available in the host CPU. Some of them require explicit
+configuration to enable, as they are not included by default in some, or all,
+of the named CPU models listed above. In general all of these features are
+included if using "Host passthrough" or "Host model".
+
+
+@table @option
+
+@item @code{ibpb}
+
+Required to enable the Spectre (CVE-2017-5753 and CVE-2017-5715) fix,
+in cases where retpolines are not sufficient.
+
+Included by default in AMD CPU models with -IBPB suffix.
+
+Must be explicitly turned on for AMD CPU models without -IBPB suffix.
+
+Requires the host CPU microcode to support this feature before it
+can be used for guest CPUs.
+
+
+@item @code{virt-ssbd}
+
+Required to enable the CVE-2018-3639 fix
+
+Not included by default in any AMD CPU model.
+
+Must be explicitly turned on for all AMD CPU models.
+
+This should be provided to guests, even if amd-ssbd is also
+provided, for maximum guest compatibility.
+
+Note for some QEMU / libvirt versions, this must be force enabled
+when when using "Host model", because this is a virtual feature
+that doesn't exist in the physical host CPUs.
+
+
+@item @code{amd-ssbd}
+
+Required to enable the CVE-2018-3639 fix
+
+Not included by default in any AMD CPU model.
+
+Must be explicitly turned on for all AMD CPU models.
+
+This provides higher performance than virt-ssbd so should be
+exposed to guests whenever available in the host. virt-ssbd
+should none the less also be exposed for maximum guest
+compatability as some kernels only know about virt-ssbd.
+
+
+@item @code{amd-no-ssb}
+
+Recommended to indicate the host is not vulnerable CVE-2018-3639
+
+Not included by default in any AMD CPU model.
+
+Future hardware genarations of CPU will not be vulnerable to
+CVE-2018-3639, and thus the guest should be told not to enable
+its mitigations, by exposing amd-no-ssb. This is mutually
+exclusive with virt-ssbd and amd-ssbd.
+
+
+@item @code{pdpe1gb}
+
+Recommended to allow guest OS to use 1GB size pages
+
+Not included by default in any AMD CPU model.
+
+Should be explicitly turned on for all AMD CPU models.
+
+Note that not all CPU hardware will support this feature.
+@end table
+
+
+@node default_cpu_models_x86
+@subsubsection Default x86 CPU models
+
+The default QEMU CPU models are designed such that they can run on all hosts.
+If an application does not wish to do perform any host compatibility checks
+before launching guests, the default is guaranteed to work.
+
+The default CPU models will, however, leave the guest OS vulnerable to various
+CPU hardware flaws, so their use is strongly discouraged. Applications should
+follow the earlier guidance to setup a better CPU configuration, with host
+passthrough recommended if live migration is not needed.
+
+@table @option
+@item @code{qemu32}
+@item @code{qemu64}
+
+QEMU Virtual CPU version 2.5+ (32 & 64 bit variants)
+
+qemu64 is used for x86_64 guests and qemu32 is used for i686 guests, when no
+-cpu argument is given to QEMU, or no <cpu> is provided in libvirt XML.
+@end table
+
+
+@node other_non_recommended_cpu_models_x86
+@subsubsection Other non-recommended x86 CPUs
+
+The following CPUs models are compatible with most AMD and Intel x86 hosts, but
+their usage is discouraged, as they expose a very limited featureset, which
+prevents guests having optimal performance.
+
+@table @option
+
+@item @code{kvm32}
+@item @code{kvm64}
+
+Common KVM processor (32 & 64 bit variants)
+
+Legacy models just for historical compatibility with ancient QEMU versions.
+
+
+@item @code{486}
+@item @code{athlon}
+@item @code{phenom}
+@item @code{coreduo}
+@item @code{core2duo}
+@item @code{n270}
+@item @code{pentium}
+@item @code{pentium2}
+@item @code{pentium3}
+
+Various very old x86 CPU models, mostly predating the introduction of
+hardware assisted virtualization, that should thus not be required for
+running virtual machines.
+@end table
+
+@node cpu_model_syntax_apps
+@subsection Syntax for configuring CPU models
+
+The example below illustrate the approach to configuring the various
+CPU models / features in QEMU and libvirt
+
+@menu
+* cpu_model_syntax_qemu:: QEMU command line
+* cpu_model_syntax_libvirt:: Libvirt guest XML
+@end menu
+
+@node cpu_model_syntax_qemu
+@subsubsection QEMU command line
+
+@table @option
+
+@item Host passthrough
+
+@example
+ $ qemu-system-x86_64 -cpu host
+@end example
+
+With feature customization:
+
+@example
+ $ qemu-system-x86_64 -cpu host,-vmx,...
+@end example
+
+@item Named CPU models
+
+@example
+ $ qemu-system-x86_64 -cpu Westmere
+@end example
+
+With feature customization:
+
+@example
+ $ qemu-system-x86_64 -cpu Westmere,+pcid,...
+@end example
+
+@end table
+
+@node cpu_model_syntax_libvirt
+@subsubsection Libvirt guest XML
+
+@table @option
+
+@item Host passthrough
+
+@example
+ <cpu mode='host-passthrough'/>
+@end example
+
+With feature customization:
+
+@example
+ <cpu mode='host-passthrough'>
+ <feature name="vmx" policy="disable"/>
+ ...
+ </cpu>
+@end example
+
+@item Host model
+
+@example
+ <cpu mode='host-model'/>
+@end example
+
+With feature customization:
+
+@example
+ <cpu mode='host-model'>
+ <feature name="vmx" policy="disable"/>
+ ...
+ </cpu>
+@end example
+
+@item Named model
+
+@example
+ <cpu mode='custom'>
+ <model name="Westmere"/>
+ </cpu>
+@end example
+
+With feature customization:
+
+@example
+ <cpu mode='custom'>
+ <model name="Westmere"/>
+ <feature name="pcid" policy="require"/>
+ ...
+ </cpu>
+@end example
+
+@end table
+
+@c man end
+
+@ignore
+
+@setfilename qemu-cpu-models
+@settitle QEMU / KVM CPU model configuration
+
+@c man begin SEEALSO
+The HTML documentation of QEMU for more precise information and Linux
+user mode emulator invocation.
+@c man end
+
+@c man begin AUTHOR
+Daniel P. Berrange
+@c man end
+
+@end ignore
diff --git a/hw/vfio/ccw.c b/hw/vfio/ccw.c
index 351b305e1a..e96bbdc78b 100644
--- a/hw/vfio/ccw.c
+++ b/hw/vfio/ccw.c
@@ -349,6 +349,15 @@ static void vfio_ccw_get_device(VFIOGroup *group, VFIOCCWDevice *vcdev,
}
}
+ /*
+ * All vfio-ccw devices are believed to operate in a way compatible with
+ * memory ballooning, ie. pages pinned in the host are in the current
+ * working set of the guest driver and therefore never overlap with pages
+ * available to the guest balloon driver. This needs to be set before
+ * vfio_get_device() for vfio common to handle the balloon inhibitor.
+ */
+ vcdev->vdev.balloon_allowed = true;
+
if (vfio_get_device(group, vcdev->cdev.mdevid, &vcdev->vdev, errp)) {
goto out_err;
}
diff --git a/hw/vfio/common.c b/hw/vfio/common.c
index fb396cf00a..cd1f4af18a 100644
--- a/hw/vfio/common.c
+++ b/hw/vfio/common.c
@@ -32,6 +32,7 @@
#include "hw/hw.h"
#include "qemu/error-report.h"
#include "qemu/range.h"
+#include "sysemu/balloon.h"
#include "sysemu/kvm.h"
#include "trace.h"
#include "qapi/error.h"
@@ -1044,6 +1045,33 @@ static int vfio_connect_container(VFIOGroup *group, AddressSpace *as,
space = vfio_get_address_space(as);
+ /*
+ * VFIO is currently incompatible with memory ballooning insofar as the
+ * madvise to purge (zap) the page from QEMU's address space does not
+ * interact with the memory API and therefore leaves stale virtual to
+ * physical mappings in the IOMMU if the page was previously pinned. We
+ * therefore add a balloon inhibit for each group added to a container,
+ * whether the container is used individually or shared. This provides
+ * us with options to allow devices within a group to opt-in and allow
+ * ballooning, so long as it is done consistently for a group (for instance
+ * if the device is an mdev device where it is known that the host vendor
+ * driver will never pin pages outside of the working set of the guest
+ * driver, which would thus not be ballooning candidates).
+ *
+ * The first opportunity to induce pinning occurs here where we attempt to
+ * attach the group to existing containers within the AddressSpace. If any
+ * pages are already zapped from the virtual address space, such as from a
+ * previous ballooning opt-in, new pinning will cause valid mappings to be
+ * re-established. Likewise, when the overall MemoryListener for a new
+ * container is registered, a replay of mappings within the AddressSpace
+ * will occur, re-establishing any previously zapped pages as well.
+ *
+ * NB. Balloon inhibiting does not currently block operation of the
+ * balloon driver or revoke previously pinned pages, it only prevents
+ * calling madvise to modify the virtual mapping of ballooned pages.
+ */
+ qemu_balloon_inhibit(true);
+
QLIST_FOREACH(container, &space->containers, next) {
if (!ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &container->fd)) {
group->container = container;
@@ -1232,6 +1260,7 @@ close_fd_exit:
close(fd);
put_space_exit:
+ qemu_balloon_inhibit(false);
vfio_put_address_space(space);
return ret;
@@ -1352,6 +1381,9 @@ void vfio_put_group(VFIOGroup *group)
return;
}
+ if (!group->balloon_allowed) {
+ qemu_balloon_inhibit(false);
+ }
vfio_kvm_device_del_group(group);
vfio_disconnect_container(group);
QLIST_REMOVE(group, next);
@@ -1387,6 +1419,25 @@ int vfio_get_device(VFIOGroup *group, const char *name,
return ret;
}
+ /*
+ * Clear the balloon inhibitor for this group if the driver knows the
+ * device operates compatibly with ballooning. Setting must be consistent
+ * per group, but since compatibility is really only possible with mdev
+ * currently, we expect singleton groups.
+ */
+ if (vbasedev->balloon_allowed != group->balloon_allowed) {
+ if (!QLIST_EMPTY(&group->device_list)) {
+ error_setg(errp,
+ "Inconsistent device balloon setting within group");
+ return -1;
+ }
+
+ if (!group->balloon_allowed) {
+ group->balloon_allowed = true;
+ qemu_balloon_inhibit(false);
+ }
+ }
+
vbasedev->fd = fd;
vbasedev->group = group;
QLIST_INSERT_HEAD(&group->device_list, vbasedev, next);
diff --git a/hw/vfio/pci.c b/hw/vfio/pci.c
index 6cbb8fa054..056f3a887a 100644
--- a/hw/vfio/pci.c
+++ b/hw/vfio/pci.c
@@ -2804,12 +2804,13 @@ static void vfio_realize(PCIDevice *pdev, Error **errp)
VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
VFIODevice *vbasedev_iter;
VFIOGroup *group;
- char *tmp, group_path[PATH_MAX], *group_name;
+ char *tmp, *subsys, group_path[PATH_MAX], *group_name;
Error *err = NULL;
ssize_t len;
struct stat st;
int groupid;
int i, ret;
+ bool is_mdev;
if (!vdev->vbasedev.sysfsdev) {
if (!(~vdev->host.domain || ~vdev->host.bus ||
@@ -2869,6 +2870,27 @@ static void vfio_realize(PCIDevice *pdev, Error **errp)
}
}
+ /*
+ * Mediated devices *might* operate compatibly with memory ballooning, but
+ * we cannot know for certain, it depends on whether the mdev vendor driver
+ * stays in sync with the active working set of the guest driver. Prevent
+ * the x-balloon-allowed option unless this is minimally an mdev device.
+ */
+ tmp = g_strdup_printf("%s/subsystem", vdev->vbasedev.sysfsdev);
+ subsys = realpath(tmp, NULL);
+ g_free(tmp);
+ is_mdev = (strcmp(subsys, "/sys/bus/mdev") == 0);
+ free(subsys);
+
+ trace_vfio_mdev(vdev->vbasedev.name, is_mdev);
+
+ if (vdev->vbasedev.balloon_allowed && !is_mdev) {
+ error_setg(errp, "x-balloon-allowed only potentially compatible "
+ "with mdev devices");
+ vfio_put_group(group);
+ goto error;
+ }
+
ret = vfio_get_device(group, vdev->vbasedev.name, &vdev->vbasedev, errp);
if (ret) {
vfio_put_group(group);
@@ -3170,6 +3192,8 @@ static Property vfio_pci_dev_properties[] = {
DEFINE_PROP_BIT("x-igd-opregion", VFIOPCIDevice, features,
VFIO_FEATURE_ENABLE_IGD_OPREGION_BIT, false),
DEFINE_PROP_BOOL("x-no-mmap", VFIOPCIDevice, vbasedev.no_mmap, false),
+ DEFINE_PROP_BOOL("x-balloon-allowed", VFIOPCIDevice,
+ vbasedev.balloon_allowed, false),
DEFINE_PROP_BOOL("x-no-kvm-intx", VFIOPCIDevice, no_kvm_intx, false),
DEFINE_PROP_BOOL("x-no-kvm-msi", VFIOPCIDevice, no_kvm_msi, false),
DEFINE_PROP_BOOL("x-no-kvm-msix", VFIOPCIDevice, no_kvm_msix, false),
diff --git a/hw/vfio/trace-events b/hw/vfio/trace-events
index d2a74952e3..a85e8662ea 100644
--- a/hw/vfio/trace-events
+++ b/hw/vfio/trace-events
@@ -39,6 +39,7 @@ vfio_pci_hot_reset_result(const char *name, const char *result) "%s hot reset: %
vfio_populate_device_config(const char *name, unsigned long size, unsigned long offset, unsigned long flags) "Device %s config:\n size: 0x%lx, offset: 0x%lx, flags: 0x%lx"
vfio_populate_device_get_irq_info_failure(void) "VFIO_DEVICE_GET_IRQ_INFO failure: %m"
vfio_realize(const char *name, int group_id) " (%s) group %d"
+vfio_mdev(const char *name, bool is_mdev) " (%s) is_mdev %d"
vfio_add_ext_cap_dropped(const char *name, uint16_t cap, uint16_t offset) "%s 0x%x@0x%x"
vfio_pci_reset(const char *name) " (%s)"
vfio_pci_reset_flr(const char *name) "%s FLR/VFIO_DEVICE_RESET"
diff --git a/hw/virtio/virtio-balloon.c b/hw/virtio/virtio-balloon.c
index 1f7a87f094..b5425080c5 100644
--- a/hw/virtio/virtio-balloon.c
+++ b/hw/virtio/virtio-balloon.c
@@ -21,7 +21,6 @@
#include "hw/mem/pc-dimm.h"
#include "sysemu/balloon.h"
#include "hw/virtio/virtio-balloon.h"
-#include "sysemu/kvm.h"
#include "exec/address-spaces.h"
#include "qapi/error.h"
#include "qapi/qapi-events-misc.h"
@@ -36,8 +35,7 @@
static void balloon_page(void *addr, int deflate)
{
- if (!qemu_balloon_is_inhibited() && (!kvm_enabled() ||
- kvm_has_sync_mmu())) {
+ if (!qemu_balloon_is_inhibited()) {
qemu_madvise(addr, BALLOON_PAGE_SIZE,
deflate ? QEMU_MADV_WILLNEED : QEMU_MADV_DONTNEED);
}
diff --git a/include/elf.h b/include/elf.h
index 934dbbd6b3..28a5a638e0 100644
--- a/include/elf.h
+++ b/include/elf.h
@@ -61,7 +61,29 @@ typedef int64_t Elf64_Sxword;
#define EF_MIPS_ABI 0x0000f000
#define EF_MIPS_FP64 0x00000200
#define EF_MIPS_NAN2008 0x00000400
-#define EF_MIPS_ARCH 0xf0000000
+
+/* MIPS machine variant */
+#define EF_MIPS_MACH_NONE 0x00000000 /* A standard MIPS implementation */
+#define EF_MIPS_MACH_3900 0x00810000 /* Toshiba R3900 */
+#define EF_MIPS_MACH_4010 0x00820000 /* LSI R4010 */
+#define EF_MIPS_MACH_4100 0x00830000 /* NEC VR4100 */
+#define EF_MIPS_MACH_4650 0x00850000 /* MIPS R4650 */
+#define EF_MIPS_MACH_4120 0x00870000 /* NEC VR4120 */
+#define EF_MIPS_MACH_4111 0x00880000 /* NEC VR4111/VR4181 */
+#define EF_MIPS_MACH_SB1 0x008a0000 /* Broadcom SB-1 */
+#define EF_MIPS_MACH_OCTEON 0x008b0000 /* Cavium Networks Octeon */
+#define EF_MIPS_MACH_XLR 0x008c0000 /* RMI Xlr */
+#define EF_MIPS_MACH_OCTEON2 0x008d0000 /* Cavium Networks Octeon2 */
+#define EF_MIPS_MACH_OCTEON3 0x008e0000 /* Cavium Networks Octeon3 */
+#define EF_MIPS_MACH_5400 0x00910000 /* NEC VR5400 */
+#define EF_MIPS_MACH_5900 0x00920000 /* MIPS R5900 */
+#define EF_MIPS_MACH_5500 0x00980000 /* NEC VR5500 */
+#define EF_MIPS_MACH_9000 0x00990000 /* PMC-Sierra's RM9000 */
+#define EF_MIPS_MACH_LS2E 0x00a00000 /* ST Microelectronics Loongson 2E */
+#define EF_MIPS_MACH_LS2F 0x00a10000 /* ST Microelectronics Loongson 2F */
+#define EF_MIPS_MACH_LS3A 0x00a20000 /* ST Microelectronics Loongson 3A */
+#define EF_MIPS_MACH 0x00ff0000 /* EF_MIPS_MACH_xxx selection mask */
+
/* These constants define the different elf file types */
#define ET_NONE 0
diff --git a/include/hw/vfio/vfio-common.h b/include/hw/vfio/vfio-common.h
index a9036929b2..15ea6c26fd 100644
--- a/include/hw/vfio/vfio-common.h
+++ b/include/hw/vfio/vfio-common.h
@@ -112,6 +112,7 @@ typedef struct VFIODevice {
bool reset_works;
bool needs_reset;
bool no_mmap;
+ bool balloon_allowed;
VFIODeviceOps *ops;
unsigned int num_irqs;
unsigned int num_regions;
@@ -131,6 +132,7 @@ typedef struct VFIOGroup {
QLIST_HEAD(, VFIODevice) device_list;
QLIST_ENTRY(VFIOGroup) next;
QLIST_ENTRY(VFIOGroup) container_next;
+ bool balloon_allowed;
} VFIOGroup;
typedef struct VFIODMABuf {
diff --git a/linux-user/mips/syscall_nr.h b/linux-user/mips/syscall_nr.h
index ced32806ea..e70adfc2fe 100644
--- a/linux-user/mips/syscall_nr.h
+++ b/linux-user/mips/syscall_nr.h
@@ -363,3 +363,12 @@
#define TARGET_NR_userfaultfd (TARGET_NR_Linux + 357)
#define TARGET_NR_membarrier (TARGET_NR_Linux + 358)
#define TARGET_NR_mlock2 (TARGET_NR_Linux + 359)
+#define TARGET_NR_copy_file_range (TARGET_NR_Linux + 360)
+#define TARGET_NR_preadv2 (TARGET_NR_Linux + 361)
+#define TARGET_NR_pwritev2 (TARGET_NR_Linux + 362)
+#define TARGET_NR_pkey_mprotect (TARGET_NR_Linux + 363)
+#define TARGET_NR_pkey_alloc (TARGET_NR_Linux + 364)
+#define TARGET_NR_pkey_free (TARGET_NR_Linux + 365)
+#define TARGET_NR_statx (TARGET_NR_Linux + 366)
+#define TARGET_NR_rseq (TARGET_NR_Linux + 367)
+#define TARGET_NR_io_pgetevents (TARGET_NR_Linux + 368)
diff --git a/linux-user/mips64/syscall_nr.h b/linux-user/mips64/syscall_nr.h
index 746cc267e9..ff218a9bf2 100644
--- a/linux-user/mips64/syscall_nr.h
+++ b/linux-user/mips64/syscall_nr.h
@@ -327,6 +327,15 @@
#define TARGET_NR_userfaultfd (TARGET_NR_Linux + 321)
#define TARGET_NR_membarrier (TARGET_NR_Linux + 322)
#define TARGET_NR_mlock2 (TARGET_NR_Linux + 323)
+#define TARGET_NR_copy_file_range (TARGET_NR_Linux + 324)
+#define TARGET_NR_preadv2 (TARGET_NR_Linux + 325)
+#define TARGET_NR_pwritev2 (TARGET_NR_Linux + 326)
+#define TARGET_NR_pkey_mprotect (TARGET_NR_Linux + 327)
+#define TARGET_NR_pkey_alloc (TARGET_NR_Linux + 328)
+#define TARGET_NR_pkey_free (TARGET_NR_Linux + 329)
+#define TARGET_NR_statx (TARGET_NR_Linux + 330)
+#define TARGET_NR_rseq (TARGET_NR_Linux + 331)
+#define TARGET_NR_io_pgetevents (TARGET_NR_Linux + 332)
#else
/*
@@ -653,4 +662,13 @@
#define TARGET_NR_userfaultfd (TARGET_NR_Linux + 317)
#define TARGET_NR_membarrier (TARGET_NR_Linux + 318)
#define TARGET_NR_mlock2 (TARGET_NR_Linux + 319)
+#define TARGET_NR_copy_file_range (TARGET_NR_Linux + 320)
+#define TARGET_NR_preadv2 (TARGET_NR_Linux + 321)
+#define TARGET_NR_pwritev2 (TARGET_NR_Linux + 322)
+#define TARGET_NR_pkey_mprotect (TARGET_NR_Linux + 323)
+#define TARGET_NR_pkey_alloc (TARGET_NR_Linux + 324)
+#define TARGET_NR_pkey_free (TARGET_NR_Linux + 325)
+#define TARGET_NR_statx (TARGET_NR_Linux + 326)
+#define TARGET_NR_rseq (TARGET_NR_Linux + 327)
+#define TARGET_NR_io_pgetevents (TARGET_NR_Linux + 328)
#endif
diff --git a/linux-user/strace.c b/linux-user/strace.c
index bd897a3f20..33f4a506a2 100644
--- a/linux-user/strace.c
+++ b/linux-user/strace.c
@@ -2304,7 +2304,19 @@ print_statfs(const struct syscallname *name,
print_pointer(arg1, 1);
print_syscall_epilogue(name);
}
-#define print_statfs64 print_statfs
+#endif
+
+#ifdef TARGET_NR_statfs64
+static void
+print_statfs64(const struct syscallname *name,
+ abi_long arg0, abi_long arg1, abi_long arg2,
+ abi_long arg3, abi_long arg4, abi_long arg5)
+{
+ print_syscall_prologue(name);
+ print_string(arg0, 0);
+ print_pointer(arg1, 1);
+ print_syscall_epilogue(name);
+}
#endif
#ifdef TARGET_NR_symlink
diff --git a/linux-user/syscall.c b/linux-user/syscall.c
index 5a4af76c03..bb42a225eb 100644
--- a/linux-user/syscall.c
+++ b/linux-user/syscall.c
@@ -7286,6 +7286,9 @@ static inline int target_to_host_mlockall_arg(int arg)
}
#endif
+#if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \
+ defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \
+ defined(TARGET_NR_newfstatat))
static inline abi_long host_to_target_stat64(void *cpu_env,
abi_ulong target_addr,
struct stat *host_st)
@@ -7348,6 +7351,7 @@ static inline abi_long host_to_target_stat64(void *cpu_env,
return 0;
}
+#endif
/* ??? Using host futex calls even when target atomic operations
are not really atomic probably breaks things. However implementing
@@ -7996,8 +8000,15 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
{
CPUState *cpu = ENV_GET_CPU(cpu_env);
abi_long ret;
+#if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
+ || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
+ || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64)
struct stat st;
+#endif
+#if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
+ || defined(TARGET_NR_fstatfs)
struct statfs stfs;
+#endif
void *p;
#if defined(DEBUG_ERESTARTSYS)
@@ -8365,9 +8376,11 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
case TARGET_NR_oldstat:
goto unimplemented;
#endif
+#ifdef TARGET_NR_lseek
case TARGET_NR_lseek:
ret = get_errno(lseek(arg1, arg2, arg3));
break;
+#endif
#if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
/* Alpha specific */
case TARGET_NR_getxpid:
@@ -9251,6 +9264,7 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
ret = get_errno(sethostname(p, arg2));
unlock_user(p, arg1, 0);
break;
+#ifdef TARGET_NR_setrlimit
case TARGET_NR_setrlimit:
{
int resource = target_to_host_resource(arg1);
@@ -9264,6 +9278,8 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
ret = get_errno(setrlimit(resource, &rlim));
}
break;
+#endif
+#ifdef TARGET_NR_getrlimit
case TARGET_NR_getrlimit:
{
int resource = target_to_host_resource(arg1);
@@ -9280,6 +9296,7 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
}
}
break;
+#endif
case TARGET_NR_getrusage:
{
struct rusage rusage;
@@ -9644,15 +9661,19 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
ret = get_errno(munlockall());
break;
#endif
+#ifdef TARGET_NR_truncate
case TARGET_NR_truncate:
if (!(p = lock_user_string(arg1)))
goto efault;
ret = get_errno(truncate(p, arg2));
unlock_user(p, arg1, 0);
break;
+#endif
+#ifdef TARGET_NR_ftruncate
case TARGET_NR_ftruncate:
ret = get_errno(ftruncate(arg1, arg2));
break;
+#endif
case TARGET_NR_fchmod:
ret = get_errno(fchmod(arg1, arg2));
break;
@@ -9688,6 +9709,7 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
case TARGET_NR_profil:
goto unimplemented;
#endif
+#ifdef TARGET_NR_statfs
case TARGET_NR_statfs:
if (!(p = lock_user_string(arg1)))
goto efault;
@@ -9719,9 +9741,12 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
unlock_user_struct(target_stfs, arg2, 1);
}
break;
+#endif
+#ifdef TARGET_NR_fstatfs
case TARGET_NR_fstatfs:
ret = get_errno(fstatfs(arg1, &stfs));
goto convert_statfs;
+#endif
#ifdef TARGET_NR_statfs64
case TARGET_NR_statfs64:
if (!(p = lock_user_string(arg1)))
@@ -9969,6 +9994,7 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
unlock_user(p, arg1, 0);
goto do_stat;
#endif
+#ifdef TARGET_NR_fstat
case TARGET_NR_fstat:
{
ret = get_errno(fstat(arg1, &st));
@@ -9998,6 +10024,7 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
}
}
break;
+#endif
#ifdef TARGET_NR_olduname
case TARGET_NR_olduname:
goto unimplemented;
@@ -11004,6 +11031,7 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
break;
#ifdef CONFIG_SENDFILE
+#ifdef TARGET_NR_sendfile
case TARGET_NR_sendfile:
{
off_t *offp = NULL;
@@ -11024,6 +11052,7 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
}
break;
}
+#endif
#ifdef TARGET_NR_sendfile64
case TARGET_NR_sendfile64:
{
diff --git a/qemu-doc.texi b/qemu-doc.texi
index abfd2db546..f74542a0e9 100644
--- a/qemu-doc.texi
+++ b/qemu-doc.texi
@@ -135,6 +135,7 @@ accelerator is required to use more than one host CPU for emulation.
* pcsys_keys:: Keys in the graphical frontends
* mux_keys:: Keys in the character backend multiplexer
* pcsys_monitor:: QEMU Monitor
+* cpu_models:: CPU models
* disk_images:: Disk Images
* pcsys_network:: Network emulation
* pcsys_other_devs:: Other Devices
@@ -602,6 +603,11 @@ The monitor understands integers expressions for every integer
argument. You can use register names to get the value of specifics
CPU registers by prefixing them with @emph{$}.
+@node cpu_models
+@section CPU models
+
+@include docs/qemu-cpu-models.texi
+
@node disk_images
@section Disk Images
@@ -2693,8 +2699,17 @@ The binary format is detected automatically.
@command{qemu-microblaze} TODO.
@cindex user mode (MIPS)
-@command{qemu-mips} TODO.
-@command{qemu-mipsel} TODO.
+@command{qemu-mips} executes 32-bit big endian MIPS binaries (MIPS O32 ABI).
+
+@command{qemu-mipsel} executes 32-bit little endian MIPS binaries (MIPS O32 ABI).
+
+@command{qemu-mips64} executes 64-bit big endian MIPS binaries (MIPS N64 ABI).
+
+@command{qemu-mips64el} executes 64-bit little endian MIPS binaries (MIPS N64 ABI).
+
+@command{qemu-mipsn32} executes 32-bit big endian MIPS binaries (MIPS N32 ABI).
+
+@command{qemu-mipsn32el} executes 32-bit little endian MIPS binaries (MIPS N32 ABI).
@cindex user mode (NiosII)
@command{qemu-nios2} TODO.
diff --git a/target/i386/cpu.c b/target/i386/cpu.c
index 723e02221e..4e4fe8fa8b 100644
--- a/target/i386/cpu.c
+++ b/target/i386/cpu.c
@@ -849,6 +849,12 @@ static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
},
.cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
.tcg_features = TCG_EXT3_FEATURES,
+ /*
+ * TOPOEXT is always allowed but can't be enabled blindly by
+ * "-cpu host", as it requires consistent cache topology info
+ * to be provided so it doesn't confuse guests.
+ */
+ .no_autoenable_flags = CPUID_EXT3_TOPOEXT,
},
[FEAT_C000_0001_EDX] = {
.feat_names = {
@@ -868,7 +874,7 @@ static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
.feat_names = {
"kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock",
"kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt",
- NULL, "kvm-pv-tlb-flush", NULL, NULL,
+ NULL, "kvm-pv-tlb-flush", NULL, "kvm-pv-ipi",
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
@@ -997,15 +1003,16 @@ static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
+ NULL, NULL, "pconfig", NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, "spec-ctrl", NULL,
- NULL, NULL, NULL, "ssbd",
+ NULL, "arch-capabilities", NULL, "ssbd",
},
.cpuid_eax = 7,
.cpuid_needs_ecx = true, .cpuid_ecx = 0,
.cpuid_reg = R_EDX,
.tcg_features = TCG_7_0_EDX_FEATURES,
+ .unmigratable_flags = CPUID_7_0_EDX_ARCH_CAPABILITIES,
},
[FEAT_8000_0007_EDX] = {
.feat_names = {
@@ -1027,7 +1034,7 @@ static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
.feat_names = {
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
+ NULL, "wbnoinvd", NULL, NULL,
"ibpb", NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
@@ -2380,6 +2387,121 @@ static X86CPUDefinition builtin_x86_defs[] = {
.model_id = "Intel Xeon Processor (Skylake, IBRS)",
},
{
+ .name = "Icelake-Client",
+ .level = 0xd,
+ .vendor = CPUID_VENDOR_INTEL,
+ .family = 6,
+ .model = 126,
+ .stepping = 0,
+ .features[FEAT_1_EDX] =
+ CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+ CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
+ CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
+ CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
+ CPUID_DE | CPUID_FP87,
+ .features[FEAT_1_ECX] =
+ CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
+ CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
+ CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
+ CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
+ CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
+ CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
+ .features[FEAT_8000_0001_EDX] =
+ CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
+ CPUID_EXT2_SYSCALL,
+ .features[FEAT_8000_0001_ECX] =
+ CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
+ .features[FEAT_8000_0008_EBX] =
+ CPUID_8000_0008_EBX_WBNOINVD,
+ .features[FEAT_7_0_EBX] =
+ CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
+ CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
+ CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
+ CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
+ CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_INTEL_PT,
+ .features[FEAT_7_0_ECX] =
+ CPUID_7_0_ECX_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU |
+ CPUID_7_0_ECX_OSPKE | CPUID_7_0_ECX_VBMI2 | CPUID_7_0_ECX_GFNI |
+ CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ |
+ CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG |
+ CPUID_7_0_ECX_AVX512_VPOPCNTDQ,
+ .features[FEAT_7_0_EDX] =
+ CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD,
+ /* Missing: XSAVES (not supported by some Linux versions,
+ * including v4.1 to v4.12).
+ * KVM doesn't yet expose any XSAVES state save component,
+ * and the only one defined in Skylake (processor tracing)
+ * probably will block migration anyway.
+ */
+ .features[FEAT_XSAVE] =
+ CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
+ CPUID_XSAVE_XGETBV1,
+ .features[FEAT_6_EAX] =
+ CPUID_6_EAX_ARAT,
+ .xlevel = 0x80000008,
+ .model_id = "Intel Core Processor (Icelake)",
+ },
+ {
+ .name = "Icelake-Server",
+ .level = 0xd,
+ .vendor = CPUID_VENDOR_INTEL,
+ .family = 6,
+ .model = 134,
+ .stepping = 0,
+ .features[FEAT_1_EDX] =
+ CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+ CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
+ CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
+ CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
+ CPUID_DE | CPUID_FP87,
+ .features[FEAT_1_ECX] =
+ CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
+ CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
+ CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
+ CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
+ CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
+ CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
+ .features[FEAT_8000_0001_EDX] =
+ CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
+ CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
+ .features[FEAT_8000_0001_ECX] =
+ CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
+ .features[FEAT_8000_0008_EBX] =
+ CPUID_8000_0008_EBX_WBNOINVD,
+ .features[FEAT_7_0_EBX] =
+ CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
+ CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
+ CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
+ CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
+ CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_CLWB |
+ CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
+ CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
+ CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT |
+ CPUID_7_0_EBX_INTEL_PT,
+ .features[FEAT_7_0_ECX] =
+ CPUID_7_0_ECX_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU |
+ CPUID_7_0_ECX_OSPKE | CPUID_7_0_ECX_VBMI2 | CPUID_7_0_ECX_GFNI |
+ CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ |
+ CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG |
+ CPUID_7_0_ECX_AVX512_VPOPCNTDQ | CPUID_7_0_ECX_LA57,
+ .features[FEAT_7_0_EDX] =
+ CPUID_7_0_EDX_PCONFIG | CPUID_7_0_EDX_SPEC_CTRL |
+ CPUID_7_0_EDX_SPEC_CTRL_SSBD,
+ /* Missing: XSAVES (not supported by some Linux versions,
+ * including v4.1 to v4.12).
+ * KVM doesn't yet expose any XSAVES state save component,
+ * and the only one defined in Skylake (processor tracing)
+ * probably will block migration anyway.
+ */
+ .features[FEAT_XSAVE] =
+ CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
+ CPUID_XSAVE_XGETBV1,
+ .features[FEAT_6_EAX] =
+ CPUID_6_EAX_ARAT,
+ .xlevel = 0x80000008,
+ .model_id = "Intel Xeon Processor (Icelake)",
+ },
+ {
.name = "KnightsMill",
.level = 0xd,
.vendor = CPUID_VENDOR_INTEL,
diff --git a/target/i386/cpu.h b/target/i386/cpu.h
index c18863ec7a..9cad5812cd 100644
--- a/target/i386/cpu.h
+++ b/target/i386/cpu.h
@@ -354,6 +354,8 @@ typedef enum X86Seg {
#define MSR_TSC_ADJUST 0x0000003b
#define MSR_IA32_SPEC_CTRL 0x48
#define MSR_VIRT_SSBD 0xc001011f
+#define MSR_IA32_PRED_CMD 0x49
+#define MSR_IA32_ARCH_CAPABILITIES 0x10a
#define MSR_IA32_TSCDEADLINE 0x6e0
#define FEATURE_CONTROL_LOCKED (1<<0)
@@ -687,9 +689,13 @@ typedef uint32_t FeatureWordArray[FEATURE_WORDS];
#define CPUID_7_0_EDX_AVX512_4VNNIW (1U << 2) /* AVX512 Neural Network Instructions */
#define CPUID_7_0_EDX_AVX512_4FMAPS (1U << 3) /* AVX512 Multiply Accumulation Single Precision */
+#define CPUID_7_0_EDX_PCONFIG (1U << 18) /* Platform Configuration */
#define CPUID_7_0_EDX_SPEC_CTRL (1U << 26) /* Speculation Control */
+#define CPUID_7_0_EDX_ARCH_CAPABILITIES (1U << 29) /*Arch Capabilities*/
#define CPUID_7_0_EDX_SPEC_CTRL_SSBD (1U << 31) /* Speculative Store Bypass Disable */
+#define CPUID_8000_0008_EBX_WBNOINVD (1U << 9) /* Write back and
+ do not invalidate cache */
#define CPUID_8000_0008_EBX_IBPB (1U << 12) /* Indirect Branch Prediction Barrier */
#define CPUID_XSAVE_XSAVEOPT (1U << 0)
diff --git a/target/mips/cpu.h b/target/mips/cpu.h
index cfe1735e0e..009202cf64 100644
--- a/target/mips/cpu.h
+++ b/target/mips/cpu.h
@@ -323,6 +323,7 @@ struct CPUMIPSState {
target_ulong CP0_BadVAddr;
uint32_t CP0_BadInstr;
uint32_t CP0_BadInstrP;
+ uint32_t CP0_BadInstrX;
int32_t CP0_Count;
target_ulong CP0_EntryHi;
#define CP0EnHi_EHINV 10
@@ -388,26 +389,27 @@ struct CPUMIPSState {
target_ulong CP0_CMGCRBase;
int32_t CP0_Config0;
#define CP0C0_M 31
-#define CP0C0_K23 28
-#define CP0C0_KU 25
+#define CP0C0_K23 28 /* 30..28 */
+#define CP0C0_KU 25 /* 27..25 */
#define CP0C0_MDU 20
#define CP0C0_MM 18
#define CP0C0_BM 16
+#define CP0C0_Impl 16 /* 24..16 */
#define CP0C0_BE 15
-#define CP0C0_AT 13
-#define CP0C0_AR 10
-#define CP0C0_MT 7
+#define CP0C0_AT 13 /* 14..13 */
+#define CP0C0_AR 10 /* 12..10 */
+#define CP0C0_MT 7 /* 9..7 */
#define CP0C0_VI 3
-#define CP0C0_K0 0
+#define CP0C0_K0 0 /* 2..0 */
int32_t CP0_Config1;
#define CP0C1_M 31
-#define CP0C1_MMU 25
-#define CP0C1_IS 22
-#define CP0C1_IL 19
-#define CP0C1_IA 16
-#define CP0C1_DS 13
-#define CP0C1_DL 10
-#define CP0C1_DA 7
+#define CP0C1_MMU 25 /* 30..25 */
+#define CP0C1_IS 22 /* 24..22 */
+#define CP0C1_IL 19 /* 21..19 */
+#define CP0C1_IA 16 /* 18..16 */
+#define CP0C1_DS 13 /* 15..13 */
+#define CP0C1_DL 10 /* 12..10 */
+#define CP0C1_DA 7 /* 9..7 */
#define CP0C1_C2 6
#define CP0C1_MD 5
#define CP0C1_PC 4
@@ -417,67 +419,85 @@ struct CPUMIPSState {
#define CP0C1_FP 0
int32_t CP0_Config2;
#define CP0C2_M 31
-#define CP0C2_TU 28
-#define CP0C2_TS 24
-#define CP0C2_TL 20
-#define CP0C2_TA 16
-#define CP0C2_SU 12
-#define CP0C2_SS 8
-#define CP0C2_SL 4
-#define CP0C2_SA 0
+#define CP0C2_TU 28 /* 30..28 */
+#define CP0C2_TS 24 /* 27..24 */
+#define CP0C2_TL 20 /* 23..20 */
+#define CP0C2_TA 16 /* 19..16 */
+#define CP0C2_SU 12 /* 15..12 */
+#define CP0C2_SS 8 /* 11..8 */
+#define CP0C2_SL 4 /* 7..4 */
+#define CP0C2_SA 0 /* 3..0 */
int32_t CP0_Config3;
-#define CP0C3_M 31
-#define CP0C3_BPG 30
-#define CP0C3_CMGCR 29
-#define CP0C3_MSAP 28
-#define CP0C3_BP 27
-#define CP0C3_BI 26
-#define CP0C3_SC 25
-#define CP0C3_IPLW 21
-#define CP0C3_MMAR 18
-#define CP0C3_MCU 17
-#define CP0C3_ISA_ON_EXC 16
-#define CP0C3_ISA 14
-#define CP0C3_ULRI 13
-#define CP0C3_RXI 12
-#define CP0C3_DSP2P 11
-#define CP0C3_DSPP 10
-#define CP0C3_LPA 7
-#define CP0C3_VEIC 6
-#define CP0C3_VInt 5
-#define CP0C3_SP 4
-#define CP0C3_CDMM 3
-#define CP0C3_MT 2
-#define CP0C3_SM 1
-#define CP0C3_TL 0
+#define CP0C3_M 31
+#define CP0C3_BPG 30
+#define CP0C3_CMGCR 29
+#define CP0C3_MSAP 28
+#define CP0C3_BP 27
+#define CP0C3_BI 26
+#define CP0C3_SC 25
+#define CP0C3_PW 24
+#define CP0C3_VZ 23
+#define CP0C3_IPLV 21 /* 22..21 */
+#define CP0C3_MMAR 18 /* 20..18 */
+#define CP0C3_MCU 17
+#define CP0C3_ISA_ON_EXC 16
+#define CP0C3_ISA 14 /* 15..14 */
+#define CP0C3_ULRI 13
+#define CP0C3_RXI 12
+#define CP0C3_DSP2P 11
+#define CP0C3_DSPP 10
+#define CP0C3_CTXTC 9
+#define CP0C3_ITL 8
+#define CP0C3_LPA 7
+#define CP0C3_VEIC 6
+#define CP0C3_VInt 5
+#define CP0C3_SP 4
+#define CP0C3_CDMM 3
+#define CP0C3_MT 2
+#define CP0C3_SM 1
+#define CP0C3_TL 0
int32_t CP0_Config4;
int32_t CP0_Config4_rw_bitmask;
-#define CP0C4_M 31
-#define CP0C4_IE 29
-#define CP0C4_AE 28
-#define CP0C4_KScrExist 16
-#define CP0C4_MMUExtDef 14
-#define CP0C4_FTLBPageSize 8
-#define CP0C4_FTLBWays 4
-#define CP0C4_FTLBSets 0
-#define CP0C4_MMUSizeExt 0
+#define CP0C4_M 31
+#define CP0C4_IE 29 /* 30..29 */
+#define CP0C4_AE 28
+#define CP0C4_VTLBSizeExt 24 /* 27..24 */
+#define CP0C4_KScrExist 16
+#define CP0C4_MMUExtDef 14
+#define CP0C4_FTLBPageSize 8 /* 12..8 */
+/* bit layout if MMUExtDef=1 */
+#define CP0C4_MMUSizeExt 0 /* 7..0 */
+/* bit layout if MMUExtDef=2 */
+#define CP0C4_FTLBWays 4 /* 7..4 */
+#define CP0C4_FTLBSets 0 /* 3..0 */
int32_t CP0_Config5;
int32_t CP0_Config5_rw_bitmask;
-#define CP0C5_M 31
-#define CP0C5_K 30
-#define CP0C5_CV 29
-#define CP0C5_EVA 28
-#define CP0C5_MSAEn 27
-#define CP0C5_XNP 13
-#define CP0C5_UFE 9
-#define CP0C5_FRE 8
-#define CP0C5_VP 7
-#define CP0C5_SBRI 6
-#define CP0C5_MVH 5
-#define CP0C5_LLB 4
-#define CP0C5_MRP 3
-#define CP0C5_UFR 2
-#define CP0C5_NFExists 0
+#define CP0C5_M 31
+#define CP0C5_K 30
+#define CP0C5_CV 29
+#define CP0C5_EVA 28
+#define CP0C5_MSAEn 27
+#define CP0C5_PMJ 23 /* 25..23 */
+#define CP0C5_WR2 22
+#define CP0C5_NMS 21
+#define CP0C5_ULS 20
+#define CP0C5_XPA 19
+#define CP0C5_CRCP 18
+#define CP0C5_MI 17
+#define CP0C5_GI 15 /* 16..15 */
+#define CP0C5_CA2 14
+#define CP0C5_XNP 13
+#define CP0C5_DEC 11
+#define CP0C5_L2C 10
+#define CP0C5_UFE 9
+#define CP0C5_FRE 8
+#define CP0C5_VP 7
+#define CP0C5_SBRI 6
+#define CP0C5_MVH 5
+#define CP0C5_LLB 4
+#define CP0C5_MRP 3
+#define CP0C5_UFR 2
+#define CP0C5_NFExists 0
int32_t CP0_Config6;
int32_t CP0_Config7;
uint64_t CP0_MAAR[MIPS_MAAR_MAX];
diff --git a/target/mips/helper.c b/target/mips/helper.c
index 8cf91ce339..e215af9a41 100644
--- a/target/mips/helper.c
+++ b/target/mips/helper.c
@@ -502,7 +502,9 @@ static void raise_mmu_exception(CPUMIPSState *env, target_ulong address,
break;
}
/* Raise exception */
- env->CP0_BadVAddr = address;
+ if (!(env->hflags & MIPS_HFLAG_DM)) {
+ env->CP0_BadVAddr = address;
+ }
env->CP0_Context = (env->CP0_Context & ~0x007fffff) |
((address >> 9) & 0x007ffff0);
env->CP0_EntryHi = (env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask) |
diff --git a/target/mips/machine.c b/target/mips/machine.c
index 20100d5adb..5ba78acd6d 100644
--- a/target/mips/machine.c
+++ b/target/mips/machine.c
@@ -212,8 +212,8 @@ const VMStateDescription vmstate_tlb = {
const VMStateDescription vmstate_mips_cpu = {
.name = "cpu",
- .version_id = 10,
- .minimum_version_id = 10,
+ .version_id = 11,
+ .minimum_version_id = 11,
.post_load = cpu_post_load,
.fields = (VMStateField[]) {
/* Active TC */
@@ -266,6 +266,7 @@ const VMStateDescription vmstate_mips_cpu = {
VMSTATE_UINTTL(env.CP0_BadVAddr, MIPSCPU),
VMSTATE_UINT32(env.CP0_BadInstr, MIPSCPU),
VMSTATE_UINT32(env.CP0_BadInstrP, MIPSCPU),
+ VMSTATE_UINT32(env.CP0_BadInstrX, MIPSCPU),
VMSTATE_INT32(env.CP0_Count, MIPSCPU),
VMSTATE_UINTTL(env.CP0_EntryHi, MIPSCPU),
VMSTATE_INT32(env.CP0_Compare, MIPSCPU),
diff --git a/target/mips/op_helper.c b/target/mips/op_helper.c
index 41d3634289..0b2663b73a 100644
--- a/target/mips/op_helper.c
+++ b/target/mips/op_helper.c
@@ -271,7 +271,9 @@ static inline hwaddr do_translate_address(CPUMIPSState *env,
target_ulong helper_##name(CPUMIPSState *env, target_ulong arg, int mem_idx) \
{ \
if (arg & almask) { \
- env->CP0_BadVAddr = arg; \
+ if (!(env->hflags & MIPS_HFLAG_DM)) { \
+ env->CP0_BadVAddr = arg; \
+ } \
do_raise_exception(env, EXCP_AdEL, GETPC()); \
} \
env->lladdr = do_translate_address(env, arg, 0, GETPC()); \
@@ -291,7 +293,9 @@ target_ulong helper_##name(CPUMIPSState *env, target_ulong arg1, \
target_long tmp; \
\
if (arg2 & almask) { \
- env->CP0_BadVAddr = arg2; \
+ if (!(env->hflags & MIPS_HFLAG_DM)) { \
+ env->CP0_BadVAddr = arg2; \
+ } \
do_raise_exception(env, EXCP_AdES, GETPC()); \
} \
if (do_translate_address(env, arg2, 1, GETPC()) == env->lladdr) { \
@@ -2437,7 +2441,9 @@ void mips_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
int error_code = 0;
int excp;
- env->CP0_BadVAddr = addr;
+ if (!(env->hflags & MIPS_HFLAG_DM)) {
+ env->CP0_BadVAddr = addr;
+ }
if (access_type == MMU_DATA_STORE) {
excp = EXCP_AdES;
diff --git a/target/mips/translate.c b/target/mips/translate.c
index 20b43c0337..bdd880bb77 100644
--- a/target/mips/translate.c
+++ b/target/mips/translate.c
@@ -902,8 +902,21 @@ enum {
OPC_MTTR = (0x0C << 21) | OPC_CP0,
OPC_WRPGPR = (0x0E << 21) | OPC_CP0,
OPC_C0 = (0x10 << 21) | OPC_CP0,
- OPC_C0_FIRST = (0x10 << 21) | OPC_CP0,
- OPC_C0_LAST = (0x1F << 21) | OPC_CP0,
+ OPC_C0_1 = (0x11 << 21) | OPC_CP0,
+ OPC_C0_2 = (0x12 << 21) | OPC_CP0,
+ OPC_C0_3 = (0x13 << 21) | OPC_CP0,
+ OPC_C0_4 = (0x14 << 21) | OPC_CP0,
+ OPC_C0_5 = (0x15 << 21) | OPC_CP0,
+ OPC_C0_6 = (0x16 << 21) | OPC_CP0,
+ OPC_C0_7 = (0x17 << 21) | OPC_CP0,
+ OPC_C0_8 = (0x18 << 21) | OPC_CP0,
+ OPC_C0_9 = (0x19 << 21) | OPC_CP0,
+ OPC_C0_A = (0x1A << 21) | OPC_CP0,
+ OPC_C0_B = (0x1B << 21) | OPC_CP0,
+ OPC_C0_C = (0x1C << 21) | OPC_CP0,
+ OPC_C0_D = (0x1D << 21) | OPC_CP0,
+ OPC_C0_E = (0x1E << 21) | OPC_CP0,
+ OPC_C0_F = (0x1F << 21) | OPC_CP0,
};
/* MFMC0 opcodes */
@@ -4884,12 +4897,11 @@ static void gen_mfhc0(DisasContext *ctx, TCGv arg, int reg, int sel)
{
const char *rn = "invalid";
- CP0_CHECK(ctx->hflags & MIPS_HFLAG_ELPA);
-
switch (reg) {
case 2:
switch (sel) {
case 0:
+ CP0_CHECK(ctx->hflags & MIPS_HFLAG_ELPA);
gen_mfhc0_entrylo(arg, offsetof(CPUMIPSState, CP0_EntryLo0));
rn = "EntryLo0";
break;
@@ -4900,6 +4912,7 @@ static void gen_mfhc0(DisasContext *ctx, TCGv arg, int reg, int sel)
case 3:
switch (sel) {
case 0:
+ CP0_CHECK(ctx->hflags & MIPS_HFLAG_ELPA);
gen_mfhc0_entrylo(arg, offsetof(CPUMIPSState, CP0_EntryLo1));
rn = "EntryLo1";
break;
@@ -4952,12 +4965,11 @@ static void gen_mthc0(DisasContext *ctx, TCGv arg, int reg, int sel)
const char *rn = "invalid";
uint64_t mask = ctx->PAMask >> 36;
- CP0_CHECK(ctx->hflags & MIPS_HFLAG_ELPA);
-
switch (reg) {
case 2:
switch (sel) {
case 0:
+ CP0_CHECK(ctx->hflags & MIPS_HFLAG_ELPA);
tcg_gen_andi_tl(arg, arg, mask);
gen_mthc0_entrylo(arg, offsetof(CPUMIPSState, CP0_EntryLo0));
rn = "EntryLo0";
@@ -4969,6 +4981,7 @@ static void gen_mthc0(DisasContext *ctx, TCGv arg, int reg, int sel)
case 3:
switch (sel) {
case 0:
+ CP0_CHECK(ctx->hflags & MIPS_HFLAG_ELPA);
tcg_gen_andi_tl(arg, arg, mask);
gen_mthc0_entrylo(arg, offsetof(CPUMIPSState, CP0_EntryLo1));
rn = "EntryLo1";
@@ -5315,7 +5328,13 @@ static void gen_mfc0(DisasContext *ctx, TCGv arg, int reg, int sel)
gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_BadInstrP));
rn = "BadInstrP";
break;
- default:
+ case 3:
+ CP0_CHECK(ctx->bi);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_BadInstrX));
+ tcg_gen_andi_tl(arg, arg, ~0xffff);
+ rn = "BadInstrX";
+ break;
+ default:
goto cp0_unimplemented;
}
break;
@@ -5494,7 +5513,15 @@ static void gen_mfc0(DisasContext *ctx, TCGv arg, int reg, int sel)
break;
case 18:
switch (sel) {
- case 0 ... 7:
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ CP0_CHECK(ctx->CP0_Config1 & (1 << CP0C1_WR));
gen_helper_1e0i(mfc0_watchlo, arg, sel);
rn = "WatchLo";
break;
@@ -5504,7 +5531,15 @@ static void gen_mfc0(DisasContext *ctx, TCGv arg, int reg, int sel)
break;
case 19:
switch (sel) {
- case 0 ...7:
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ CP0_CHECK(ctx->CP0_Config1 & (1 << CP0C1_WR));
gen_helper_1e0i(mfc0_watchhi, arg, sel);
rn = "WatchHi";
break;
@@ -5630,7 +5665,10 @@ static void gen_mfc0(DisasContext *ctx, TCGv arg, int reg, int sel)
break;
case 27:
switch (sel) {
- case 0 ... 3:
+ case 0:
+ case 1:
+ case 2:
+ case 3:
tcg_gen_movi_tl(arg, 0); /* unimplemented */
rn = "CacheErr";
break;
@@ -5701,7 +5739,12 @@ static void gen_mfc0(DisasContext *ctx, TCGv arg, int reg, int sel)
gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_DESAVE));
rn = "DESAVE";
break;
- case 2 ... 7:
+ case 2:
+ case 3:
+ case 4:
+ case 5:
+ case 6:
+ case 7:
CP0_CHECK(ctx->kscrexist & (1 << sel));
tcg_gen_ld_tl(arg, cpu_env,
offsetof(CPUMIPSState, CP0_KScratch[sel-2]));
@@ -5984,6 +6027,10 @@ static void gen_mtc0(DisasContext *ctx, TCGv arg, int reg, int sel)
/* ignored */
rn = "BadInstrP";
break;
+ case 3:
+ /* ignored */
+ rn = "BadInstrX";
+ break;
default:
goto cp0_unimplemented;
}
@@ -6167,7 +6214,15 @@ static void gen_mtc0(DisasContext *ctx, TCGv arg, int reg, int sel)
break;
case 18:
switch (sel) {
- case 0 ... 7:
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ CP0_CHECK(ctx->CP0_Config1 & (1 << CP0C1_WR));
gen_helper_0e1i(mtc0_watchlo, arg, sel);
rn = "WatchLo";
break;
@@ -6177,7 +6232,15 @@ static void gen_mtc0(DisasContext *ctx, TCGv arg, int reg, int sel)
break;
case 19:
switch (sel) {
- case 0 ... 7:
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ CP0_CHECK(ctx->CP0_Config1 & (1 << CP0C1_WR));
gen_helper_0e1i(mtc0_watchhi, arg, sel);
rn = "WatchHi";
break;
@@ -6315,7 +6378,10 @@ static void gen_mtc0(DisasContext *ctx, TCGv arg, int reg, int sel)
break;
case 27:
switch (sel) {
- case 0 ... 3:
+ case 0:
+ case 1:
+ case 2:
+ case 3:
/* ignored */
rn = "CacheErr";
break;
@@ -6381,7 +6447,12 @@ static void gen_mtc0(DisasContext *ctx, TCGv arg, int reg, int sel)
gen_mtc0_store32(arg, offsetof(CPUMIPSState, CP0_DESAVE));
rn = "DESAVE";
break;
- case 2 ... 7:
+ case 2:
+ case 3:
+ case 4:
+ case 5:
+ case 6:
+ case 7:
CP0_CHECK(ctx->kscrexist & (1 << sel));
tcg_gen_st_tl(arg, cpu_env,
offsetof(CPUMIPSState, CP0_KScratch[sel-2]));
@@ -6667,6 +6738,12 @@ static void gen_dmfc0(DisasContext *ctx, TCGv arg, int reg, int sel)
gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_BadInstrP));
rn = "BadInstrP";
break;
+ case 3:
+ CP0_CHECK(ctx->bi);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_BadInstrX));
+ tcg_gen_andi_tl(arg, arg, ~0xffff);
+ rn = "BadInstrX";
+ break;
default:
goto cp0_unimplemented;
}
@@ -6842,7 +6919,15 @@ static void gen_dmfc0(DisasContext *ctx, TCGv arg, int reg, int sel)
break;
case 18:
switch (sel) {
- case 0 ... 7:
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ CP0_CHECK(ctx->CP0_Config1 & (1 << CP0C1_WR));
gen_helper_1e0i(dmfc0_watchlo, arg, sel);
rn = "WatchLo";
break;
@@ -6852,7 +6937,15 @@ static void gen_dmfc0(DisasContext *ctx, TCGv arg, int reg, int sel)
break;
case 19:
switch (sel) {
- case 0 ... 7:
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ CP0_CHECK(ctx->CP0_Config1 & (1 << CP0C1_WR));
gen_helper_1e0i(mfc0_watchhi, arg, sel);
rn = "WatchHi";
break;
@@ -6975,7 +7068,10 @@ static void gen_dmfc0(DisasContext *ctx, TCGv arg, int reg, int sel)
case 27:
switch (sel) {
/* ignored */
- case 0 ... 3:
+ case 0:
+ case 1:
+ case 2:
+ case 3:
tcg_gen_movi_tl(arg, 0); /* unimplemented */
rn = "CacheErr";
break;
@@ -7040,7 +7136,12 @@ static void gen_dmfc0(DisasContext *ctx, TCGv arg, int reg, int sel)
gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_DESAVE));
rn = "DESAVE";
break;
- case 2 ... 7:
+ case 2:
+ case 3:
+ case 4:
+ case 5:
+ case 6:
+ case 7:
CP0_CHECK(ctx->kscrexist & (1 << sel));
tcg_gen_ld_tl(arg, cpu_env,
offsetof(CPUMIPSState, CP0_KScratch[sel-2]));
@@ -7319,6 +7420,10 @@ static void gen_dmtc0(DisasContext *ctx, TCGv arg, int reg, int sel)
/* ignored */
rn = "BadInstrP";
break;
+ case 3:
+ /* ignored */
+ rn = "BadInstrX";
+ break;
default:
goto cp0_unimplemented;
}
@@ -7497,7 +7602,15 @@ static void gen_dmtc0(DisasContext *ctx, TCGv arg, int reg, int sel)
break;
case 18:
switch (sel) {
- case 0 ... 7:
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ CP0_CHECK(ctx->CP0_Config1 & (1 << CP0C1_WR));
gen_helper_0e1i(mtc0_watchlo, arg, sel);
rn = "WatchLo";
break;
@@ -7507,7 +7620,15 @@ static void gen_dmtc0(DisasContext *ctx, TCGv arg, int reg, int sel)
break;
case 19:
switch (sel) {
- case 0 ... 7:
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ CP0_CHECK(ctx->CP0_Config1 & (1 << CP0C1_WR));
gen_helper_0e1i(mtc0_watchhi, arg, sel);
rn = "WatchHi";
break;
@@ -7641,7 +7762,10 @@ static void gen_dmtc0(DisasContext *ctx, TCGv arg, int reg, int sel)
break;
case 27:
switch (sel) {
- case 0 ... 3:
+ case 0:
+ case 1:
+ case 2:
+ case 3:
/* ignored */
rn = "CacheErr";
break;
@@ -7707,7 +7831,12 @@ static void gen_dmtc0(DisasContext *ctx, TCGv arg, int reg, int sel)
gen_mtc0_store32(arg, offsetof(CPUMIPSState, CP0_DESAVE));
rn = "DESAVE";
break;
- case 2 ... 7:
+ case 2:
+ case 3:
+ case 4:
+ case 5:
+ case 6:
+ case 7:
CP0_CHECK(ctx->kscrexist & (1 << sel));
tcg_gen_st_tl(arg, cpu_env,
offsetof(CPUMIPSState, CP0_KScratch[sel-2]));
@@ -7843,7 +7972,14 @@ static void gen_mftr(CPUMIPSState *env, DisasContext *ctx, int rt, int rd,
break;
case 16:
switch (sel) {
- case 0 ... 7:
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ case 5:
+ case 6:
+ case 7:
gen_helper_mftc0_configx(t0, cpu_env, tcg_const_tl(sel));
break;
default:
@@ -12395,10 +12531,22 @@ enum {
/* PCREL Instructions perform PC-Relative address calculation. bits 20..16 */
enum {
ADDIUPC_00 = 0x00,
+ ADDIUPC_01 = 0x01,
+ ADDIUPC_02 = 0x02,
+ ADDIUPC_03 = 0x03,
+ ADDIUPC_04 = 0x04,
+ ADDIUPC_05 = 0x05,
+ ADDIUPC_06 = 0x06,
ADDIUPC_07 = 0x07,
AUIPC = 0x1e,
ALUIPC = 0x1f,
LWPC_08 = 0x08,
+ LWPC_09 = 0x09,
+ LWPC_0A = 0x0A,
+ LWPC_0B = 0x0B,
+ LWPC_0C = 0x0C,
+ LWPC_0D = 0x0D,
+ LWPC_0E = 0x0E,
LWPC_0F = 0x0F,
};
@@ -12833,12 +12981,16 @@ enum {
R6_LWM16 = 0x02,
R6_JRC16 = 0x03,
MOVEP = 0x04,
+ MOVEP_05 = 0x05,
+ MOVEP_06 = 0x06,
MOVEP_07 = 0x07,
R6_XOR16 = 0x08,
R6_OR16 = 0x09,
R6_SWM16 = 0x0a,
JALRC16 = 0x0b,
MOVEP_0C = 0x0c,
+ MOVEP_0D = 0x0d,
+ MOVEP_0E = 0x0e,
MOVEP_0F = 0x0f,
JRCADDIUSP = 0x13,
R6_BREAK16 = 0x1b,
@@ -13152,12 +13304,18 @@ static void gen_pool16c_r6_insn(DisasContext *ctx)
gen_arith_imm(ctx, OPC_ADDIU, 29, 29, imm << 2);
} else {
/* JRC16 */
- int rs = extract32(ctx->opcode, 5, 5);
+ rs = extract32(ctx->opcode, 5, 5);
gen_compute_branch(ctx, OPC_JR, 2, rs, 0, 0, 0);
}
break;
- case MOVEP ... MOVEP_07:
- case MOVEP_0C ... MOVEP_0F:
+ case MOVEP:
+ case MOVEP_05:
+ case MOVEP_06:
+ case MOVEP_07:
+ case MOVEP_0C:
+ case MOVEP_0D:
+ case MOVEP_0E:
+ case MOVEP_0F:
{
int enc_dest = uMIPS_RD(ctx->opcode);
int enc_rt = uMIPS_RS2(ctx->opcode);
@@ -14160,8 +14318,8 @@ static void decode_micromips32_opc(CPUMIPSState *env, DisasContext *ctx)
case SDP:
check_insn(ctx, ISA_MIPS3);
check_mips_64(ctx);
- /* Fallthrough */
#endif
+ /* fall through */
case LWP:
case SWP:
gen_ldst_pair(ctx, minor, rt, rs, SIMM(ctx->opcode, 0, 12));
@@ -14171,8 +14329,8 @@ static void decode_micromips32_opc(CPUMIPSState *env, DisasContext *ctx)
case SDM:
check_insn(ctx, ISA_MIPS3);
check_mips_64(ctx);
- /* Fallthrough */
#endif
+ /* fall through */
case LWM32:
case SWM32:
gen_ldst_multiple(ctx, minor, rt, rs, SIMM(ctx->opcode, 0, 12));
@@ -15135,7 +15293,14 @@ static void decode_micromips32_opc(CPUMIPSState *env, DisasContext *ctx)
if (ctx->insn_flags & ISA_MIPS32R6) {
/* PCREL: ADDIUPC, AUIPC, ALUIPC, LWPC */
switch ((ctx->opcode >> 16) & 0x1f) {
- case ADDIUPC_00 ... ADDIUPC_07:
+ case ADDIUPC_00:
+ case ADDIUPC_01:
+ case ADDIUPC_02:
+ case ADDIUPC_03:
+ case ADDIUPC_04:
+ case ADDIUPC_05:
+ case ADDIUPC_06:
+ case ADDIUPC_07:
gen_pcrel(ctx, OPC_ADDIUPC, ctx->base.pc_next & ~0x3, rt);
break;
case AUIPC:
@@ -15144,7 +15309,14 @@ static void decode_micromips32_opc(CPUMIPSState *env, DisasContext *ctx)
case ALUIPC:
gen_pcrel(ctx, OPC_ALUIPC, ctx->base.pc_next, rt);
break;
- case LWPC_08 ... LWPC_0F:
+ case LWPC_08:
+ case LWPC_09:
+ case LWPC_0A:
+ case LWPC_0B:
+ case LWPC_0C:
+ case LWPC_0D:
+ case LWPC_0E:
+ case LWPC_0F:
gen_pcrel(ctx, R6_OPC_LWPC, ctx->base.pc_next & ~0x3, rt);
break;
default:
@@ -15154,7 +15326,7 @@ static void decode_micromips32_opc(CPUMIPSState *env, DisasContext *ctx)
} else {
/* ADDIUPC */
int reg = mmreg(ZIMM(ctx->opcode, 23, 3));
- int offset = SIMM(ctx->opcode, 0, 23) << 2;
+ offset = SIMM(ctx->opcode, 0, 23) << 2;
gen_addiupc(ctx, reg, offset, 0, 0);
}
@@ -17231,7 +17403,10 @@ static void decode_opc_special_r6(CPUMIPSState *env, DisasContext *ctx)
case OPC_LSA:
gen_lsa(ctx, op1, rd, rs, rt, extract32(ctx->opcode, 6, 2));
break;
- case OPC_MULT ... OPC_DIVU:
+ case OPC_MULT:
+ case OPC_MULTU:
+ case OPC_DIV:
+ case OPC_DIVU:
op2 = MASK_R6_MULDIV(ctx->opcode);
switch (op2) {
case R6_OPC_MUL:
@@ -17291,7 +17466,11 @@ static void decode_opc_special_r6(CPUMIPSState *env, DisasContext *ctx)
generate_exception_end(ctx, EXCP_RI);
}
break;
- case OPC_DMULT ... OPC_DDIVU:
+ case OPC_DMULT:
+ case OPC_DMULTU:
+ case OPC_DDIV:
+ case OPC_DDIVU:
+
op2 = MASK_R6_MULDIV(ctx->opcode);
switch (op2) {
case R6_OPC_DMUL:
@@ -17370,7 +17549,10 @@ static void decode_opc_special_legacy(CPUMIPSState *env, DisasContext *ctx)
gen_muldiv(ctx, op1, 0, rs, rt);
break;
#if defined(TARGET_MIPS64)
- case OPC_DMULT ... OPC_DDIVU:
+ case OPC_DMULT:
+ case OPC_DMULTU:
+ case OPC_DDIV:
+ case OPC_DDIVU:
check_insn(ctx, ISA_MIPS3);
check_mips_64(ctx);
gen_muldiv(ctx, op1, 0, rs, rt);
@@ -17437,7 +17619,10 @@ static void decode_opc_special(CPUMIPSState *env, DisasContext *ctx)
break;
}
break;
- case OPC_ADD ... OPC_SUBU:
+ case OPC_ADD:
+ case OPC_ADDU:
+ case OPC_SUB:
+ case OPC_SUBU:
gen_arith(ctx, op1, rd, rs, rt);
break;
case OPC_SLLV: /* Shifts */
@@ -17473,7 +17658,11 @@ static void decode_opc_special(CPUMIPSState *env, DisasContext *ctx)
case OPC_JALR:
gen_compute_branch(ctx, op1, 4, rs, rd, sa, 4);
break;
- case OPC_TGE ... OPC_TEQ: /* Traps */
+ case OPC_TGE: /* Traps */
+ case OPC_TGEU:
+ case OPC_TLT:
+ case OPC_TLTU:
+ case OPC_TEQ:
case OPC_TNE:
check_insn(ctx, ISA_MIPS2);
gen_trap(ctx, op1, rs, rt, -1);
@@ -17549,7 +17738,10 @@ static void decode_opc_special(CPUMIPSState *env, DisasContext *ctx)
break;
}
break;
- case OPC_DADD ... OPC_DSUBU:
+ case OPC_DADD:
+ case OPC_DADDU:
+ case OPC_DSUB:
+ case OPC_DSUBU:
check_insn(ctx, ISA_MIPS3);
check_mips_64(ctx);
gen_arith(ctx, op1, rd, rs, rt);
@@ -17607,8 +17799,10 @@ static void decode_opc_special2_legacy(CPUMIPSState *env, DisasContext *ctx)
op1 = MASK_SPECIAL2(ctx->opcode);
switch (op1) {
- case OPC_MADD ... OPC_MADDU: /* Multiply and add/sub */
- case OPC_MSUB ... OPC_MSUBU:
+ case OPC_MADD: /* Multiply and add/sub */
+ case OPC_MADDU:
+ case OPC_MSUB:
+ case OPC_MSUBU:
check_insn(ctx, ISA_MIPS32);
gen_muldiv(ctx, op1, rd & 3, rs, rt);
break;
@@ -17705,7 +17899,8 @@ static void decode_opc_special3_r6(CPUMIPSState *env, DisasContext *ctx)
}
op2 = MASK_BSHFL(ctx->opcode);
switch (op2) {
- case OPC_ALIGN ... OPC_ALIGN_END:
+ case OPC_ALIGN:
+ case OPC_ALIGN_END:
gen_align(ctx, OPC_ALIGN, rd, rs, rt, sa & 3);
break;
case OPC_BITSWAP:
@@ -17730,7 +17925,8 @@ static void decode_opc_special3_r6(CPUMIPSState *env, DisasContext *ctx)
}
op2 = MASK_DBSHFL(ctx->opcode);
switch (op2) {
- case OPC_DALIGN ... OPC_DALIGN_END:
+ case OPC_DALIGN:
+ case OPC_DALIGN_END:
gen_align(ctx, OPC_DALIGN, rd, rs, rt, sa & 7);
break;
case OPC_DBITSWAP:
@@ -17759,9 +17955,12 @@ static void decode_opc_special3_legacy(CPUMIPSState *env, DisasContext *ctx)
op1 = MASK_SPECIAL3(ctx->opcode);
switch (op1) {
- case OPC_DIV_G_2E ... OPC_DIVU_G_2E:
- case OPC_MOD_G_2E ... OPC_MODU_G_2E:
- case OPC_MULT_G_2E ... OPC_MULTU_G_2E:
+ case OPC_DIV_G_2E:
+ case OPC_DIVU_G_2E:
+ case OPC_MOD_G_2E:
+ case OPC_MODU_G_2E:
+ case OPC_MULT_G_2E:
+ case OPC_MULTU_G_2E:
/* OPC_MULT_G_2E, OPC_ADDUH_QB_DSP, OPC_MUL_PH_DSP have
* the same mask and op1. */
if ((ctx->insn_flags & ASE_DSPR2) && (op1 == OPC_MULT_G_2E)) {
@@ -18025,9 +18224,12 @@ static void decode_opc_special3_legacy(CPUMIPSState *env, DisasContext *ctx)
}
break;
#if defined(TARGET_MIPS64)
- case OPC_DDIV_G_2E ... OPC_DDIVU_G_2E:
- case OPC_DMULT_G_2E ... OPC_DMULTU_G_2E:
- case OPC_DMOD_G_2E ... OPC_DMODU_G_2E:
+ case OPC_DDIV_G_2E:
+ case OPC_DDIVU_G_2E:
+ case OPC_DMULT_G_2E:
+ case OPC_DMULTU_G_2E:
+ case OPC_DMOD_G_2E:
+ case OPC_DMODU_G_2E:
check_insn(ctx, INSN_LOONGSON2E);
gen_loongson_integer(ctx, op1, rd, rs, rt);
break;
@@ -18289,18 +18491,25 @@ static void decode_opc_special3(CPUMIPSState *env, DisasContext *ctx)
*/
if (ctx->eva) {
switch (op1) {
- case OPC_LWLE ... OPC_LWRE:
+ case OPC_LWLE:
+ case OPC_LWRE:
check_insn_opc_removed(ctx, ISA_MIPS32R6);
/* fall through */
- case OPC_LBUE ... OPC_LHUE:
- case OPC_LBE ... OPC_LWE:
+ case OPC_LBUE:
+ case OPC_LHUE:
+ case OPC_LBE:
+ case OPC_LHE:
+ case OPC_LLE:
+ case OPC_LWE:
check_cp0_enabled(ctx);
gen_ld(ctx, op1, rt, rs, imm);
return;
- case OPC_SWLE ... OPC_SWRE:
+ case OPC_SWLE:
+ case OPC_SWRE:
check_insn_opc_removed(ctx, ISA_MIPS32R6);
/* fall through */
- case OPC_SBE ... OPC_SHE:
+ case OPC_SBE:
+ case OPC_SHE:
case OPC_SWE:
check_cp0_enabled(ctx);
gen_st(ctx, op1, rt, rs, imm);
@@ -18332,7 +18541,8 @@ static void decode_opc_special3(CPUMIPSState *env, DisasContext *ctx)
case OPC_BSHFL:
op2 = MASK_BSHFL(ctx->opcode);
switch (op2) {
- case OPC_ALIGN ... OPC_ALIGN_END:
+ case OPC_ALIGN:
+ case OPC_ALIGN_END:
case OPC_BITSWAP:
check_insn(ctx, ISA_MIPS32R6);
decode_opc_special3_r6(env, ctx);
@@ -18344,8 +18554,12 @@ static void decode_opc_special3(CPUMIPSState *env, DisasContext *ctx)
}
break;
#if defined(TARGET_MIPS64)
- case OPC_DEXTM ... OPC_DEXT:
- case OPC_DINSM ... OPC_DINS:
+ case OPC_DEXTM:
+ case OPC_DEXTU:
+ case OPC_DEXT:
+ case OPC_DINSM:
+ case OPC_DINSU:
+ case OPC_DINS:
check_insn(ctx, ISA_MIPS64R2);
check_mips_64(ctx);
gen_bitops(ctx, op1, rt, rs, sa, rd);
@@ -18353,7 +18567,8 @@ static void decode_opc_special3(CPUMIPSState *env, DisasContext *ctx)
case OPC_DBSHFL:
op2 = MASK_DBSHFL(ctx->opcode);
switch (op2) {
- case OPC_DALIGN ... OPC_DALIGN_END:
+ case OPC_DALIGN:
+ case OPC_DALIGN_END:
case OPC_DBITSWAP:
check_insn(ctx, ISA_MIPS32R6);
decode_opc_special3_r6(env, ctx);
@@ -19584,7 +19799,12 @@ static void decode_opc(CPUMIPSState *env, DisasContext *ctx)
gen_compute_branch(ctx, op1, 4, rs, -1, imm << 2, 4);
}
break;
- case OPC_TGEI ... OPC_TEQI: /* REGIMM traps */
+ case OPC_TGEI: /* REGIMM traps */
+ case OPC_TGEIU:
+ case OPC_TLTI:
+ case OPC_TLTIU:
+ case OPC_TEQI:
+
case OPC_TNEI:
check_insn(ctx, ISA_MIPS2);
check_insn_opc_removed(ctx, ISA_MIPS32R6);
@@ -19647,7 +19867,22 @@ static void decode_opc(CPUMIPSState *env, DisasContext *ctx)
gen_cp0(env, ctx, op1, rt, rd);
#endif /* !CONFIG_USER_ONLY */
break;
- case OPC_C0_FIRST ... OPC_C0_LAST:
+ case OPC_C0:
+ case OPC_C0_1:
+ case OPC_C0_2:
+ case OPC_C0_3:
+ case OPC_C0_4:
+ case OPC_C0_5:
+ case OPC_C0_6:
+ case OPC_C0_7:
+ case OPC_C0_8:
+ case OPC_C0_9:
+ case OPC_C0_A:
+ case OPC_C0_B:
+ case OPC_C0_C:
+ case OPC_C0_D:
+ case OPC_C0_E:
+ case OPC_C0_F:
#ifndef CONFIG_USER_ONLY
gen_cp0(env, ctx, MASK_C0(ctx->opcode), rt, rd);
#endif /* !CONFIG_USER_ONLY */
@@ -19759,7 +19994,8 @@ static void decode_opc(CPUMIPSState *env, DisasContext *ctx)
case OPC_XORI:
gen_logic_imm(ctx, op, rt, rs, imm);
break;
- case OPC_J ... OPC_JAL: /* Jump */
+ case OPC_J: /* Jump */
+ case OPC_JAL:
offset = (int32_t)(ctx->opcode & 0x3FFFFFF) << 2;
gen_compute_branch(ctx, op, 4, rs, rt, offset, 4);
break;
@@ -19826,15 +20062,20 @@ static void decode_opc(CPUMIPSState *env, DisasContext *ctx)
case OPC_LWR:
check_insn_opc_removed(ctx, ISA_MIPS32R6);
/* Fallthrough */
- case OPC_LB ... OPC_LH:
- case OPC_LW ... OPC_LHU:
+ case OPC_LB:
+ case OPC_LH:
+ case OPC_LW:
+ case OPC_LWPC:
+ case OPC_LBU:
+ case OPC_LHU:
gen_ld(ctx, op, rt, rs, imm);
break;
case OPC_SWL:
case OPC_SWR:
check_insn_opc_removed(ctx, ISA_MIPS32R6);
/* fall through */
- case OPC_SB ... OPC_SH:
+ case OPC_SB:
+ case OPC_SH:
case OPC_SW:
gen_st(ctx, op, rt, rs, imm);
break;
@@ -19874,6 +20115,7 @@ static void decode_opc(CPUMIPSState *env, DisasContext *ctx)
case OPC_MTHC1:
check_cp1_enabled(ctx);
check_insn(ctx, ISA_MIPS32R2);
+ /* fall through */
case OPC_MFC1:
case OPC_CFC1:
case OPC_MTC1:
@@ -20105,7 +20347,8 @@ static void decode_opc(CPUMIPSState *env, DisasContext *ctx)
#if defined(TARGET_MIPS64)
/* MIPS64 opcodes */
- case OPC_LDL ... OPC_LDR:
+ case OPC_LDL:
+ case OPC_LDR:
case OPC_LLD:
check_insn_opc_removed(ctx, ISA_MIPS32R6);
/* fall through */
@@ -20115,7 +20358,8 @@ static void decode_opc(CPUMIPSState *env, DisasContext *ctx)
check_mips_64(ctx);
gen_ld(ctx, op, rt, rs, imm);
break;
- case OPC_SDL ... OPC_SDR:
+ case OPC_SDL:
+ case OPC_SDR:
check_insn_opc_removed(ctx, ISA_MIPS32R6);
/* fall through */
case OPC_SD: