aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2016-02-09 19:34:46 +0000
committerPeter Maydell <peter.maydell@linaro.org>2016-02-09 19:34:46 +0000
commitc9f19dff101e2c2cf3fa3967eceec2833e845e40 (patch)
tree5bcc3ba8281fc7902d3c99bbbf1a7097384c711b
parentf075c89f0a9cb31daf38892371d2822177505706 (diff)
parent150dcd1aed6f9ebcf370dbb9b666e7d7c6d908e2 (diff)
Merge remote-tracking branch 'remotes/bonzini/tags/for-upstream' into staging
* switch to C11 atomics (Alex) * Coverity fixes for IPMI (Corey), i386 (Paolo), qemu-char (Paolo) * at long last, fail on wrong .pc files if -m32 is in use (Daniel) * qemu-char regression fix (Daniel) * SAS1068 device (Paolo) * memory region docs improvements (Peter) * target-i386 cleanups (Richard) * qemu-nbd docs improvements (Sitsofe) * thread-safe memory hotplug (Stefan) # gpg: Signature made Tue 09 Feb 2016 16:09:30 GMT using RSA key ID 78C7AE83 # gpg: Good signature from "Paolo Bonzini <bonzini@gnu.org>" # gpg: aka "Paolo Bonzini <pbonzini@redhat.com>" * remotes/bonzini/tags/for-upstream: (33 commits) qemu-char, io: fix ordering of arguments for UDP socket creation MAINTAINERS: add all-match entry for qemu-devel@ get_maintainer.pl: fall back to git if only lists are found target-i386: fix PSE36 mode docs/memory.txt: Improve list of different memory regions ipmi_bmc_sim: Add break to correct watchdog NMI check ipmi_bmc_sim: Fix off by one in check. ipmi: do not take/drop iothread lock target-i386: Deconstruct the cpu_T array target-i386: Tidy gen_add_A0_im target-i386: Rewrite leave target-i386: Rewrite gen_enter inline target-i386: Use gen_lea_v_seg in pusha/popa target-i386: Access segs via TCG registers target-i386: Use gen_lea_v_seg in stack subroutines target-i386: Use gen_lea_v_seg in gen_lea_modrm target-i386: Introduce mo_stacksize target-i386: Create gen_lea_v_seg char: fix repeated registration of tcp chardev I/O handlers kvm-all: trace: strerror fixup ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
-rw-r--r--MAINTAINERS5
-rwxr-xr-xconfigure24
-rw-r--r--default-configs/pci.mak1
-rw-r--r--docs/memory.txt26
-rw-r--r--exec.c75
-rw-r--r--hw/ipmi/ipmi.c2
-rw-r--r--hw/ipmi/ipmi_bmc_sim.c4
-rw-r--r--hw/scsi/Makefile.objs1
-rw-r--r--hw/scsi/mpi.h1153
-rw-r--r--hw/scsi/mptconfig.c904
-rw-r--r--hw/scsi/mptendian.c204
-rw-r--r--hw/scsi/mptsas.c1441
-rw-r--r--hw/scsi/mptsas.h100
-rw-r--r--hw/scsi/scsi-disk.c23
-rw-r--r--hw/scsi/scsi-generic.c92
-rw-r--r--include/exec/ram_addr.h193
-rw-r--r--include/hw/pci/pci_ids.h1
-rw-r--r--include/hw/scsi/scsi.h3
-rw-r--r--include/qemu/atomic.h192
-rw-r--r--io/channel-socket.c2
-rw-r--r--kvm-all.c4
-rw-r--r--migration/ram.c4
-rw-r--r--nbd/server.c20
-rw-r--r--qemu-char.c10
-rw-r--r--qemu-nbd.texi80
-rwxr-xr-xscripts/get_maintainer.pl2
-rwxr-xr-xscripts/kvm/kvm_stat23
-rw-r--r--target-i386/helper.c4
-rw-r--r--target-i386/helper.h4
-rw-r--r--target-i386/seg_helper.c74
-rw-r--r--target-i386/translate.c1725
-rw-r--r--trace-events22
32 files changed, 5223 insertions, 1195 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 2d6ee1721d..02710f857c 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -52,6 +52,11 @@ General Project Administration
------------------------------
M: Peter Maydell <peter.maydell@linaro.org>
+All patches CC here
+L: qemu-devel@nongnu.org
+F: *
+F: */
+
Responsible Disclosure, Reporting Security Issues
------------------------------
W: http://wiki.qemu.org/SecurityProcess
diff --git a/configure b/configure
index d4411a1314..c9cf1c91f5 100755
--- a/configure
+++ b/configure
@@ -3063,6 +3063,30 @@ for i in $glib_modules; do
fi
done
+# Sanity check that the current size_t matches the
+# size that glib thinks it should be. This catches
+# problems on multi-arch where people try to build
+# 32-bit QEMU while pointing at 64-bit glib headers
+cat > $TMPC <<EOF
+#include <glib.h>
+#include <unistd.h>
+
+#define QEMU_BUILD_BUG_ON(x) \
+ typedef char qemu_build_bug_on[(x)?-1:1] __attribute__((unused));
+
+int main(void) {
+ QEMU_BUILD_BUG_ON(sizeof(size_t) != GLIB_SIZEOF_SIZE_T);
+ return 0;
+}
+EOF
+
+if ! compile_prog "-Werror $CFLAGS" "$LIBS" ; then
+ error_exit "sizeof(size_t) doesn't match GLIB_SIZEOF_SIZE_T."\
+ "You probably need to set PKG_CONFIG_LIBDIR"\
+ "to point to the right pkg-config files for your"\
+ "build target"
+fi
+
# g_test_trap_subprocess added in 2.38. Used by some tests.
glib_subprocess=yes
if ! $pkg_config --atleast-version=2.38 glib-2.0; then
diff --git a/default-configs/pci.mak b/default-configs/pci.mak
index f250119e1b..4fa9a28ef6 100644
--- a/default-configs/pci.mak
+++ b/default-configs/pci.mak
@@ -15,6 +15,7 @@ CONFIG_ES1370=y
CONFIG_LSI_SCSI_PCI=y
CONFIG_VMW_PVSCSI_SCSI_PCI=y
CONFIG_MEGASAS_SCSI_PCI=y
+CONFIG_MPTSAS_SCSI_PCI=y
CONFIG_RTL8139_PCI=y
CONFIG_E1000_PCI=y
CONFIG_VMXNET3_PCI=y
diff --git a/docs/memory.txt b/docs/memory.txt
index 2ceb348942..8745f7603f 100644
--- a/docs/memory.txt
+++ b/docs/memory.txt
@@ -26,14 +26,28 @@ These represent memory as seen from the CPU or a device's viewpoint.
Types of regions
----------------
-There are four types of memory regions (all represented by a single C type
+There are multiple types of memory regions (all represented by a single C type
MemoryRegion):
- RAM: a RAM region is simply a range of host memory that can be made available
to the guest.
+ You typically initialize these with memory_region_init_ram(). Some special
+ purposes require the variants memory_region_init_resizeable_ram(),
+ memory_region_init_ram_from_file(), or memory_region_init_ram_ptr().
- MMIO: a range of guest memory that is implemented by host callbacks;
each read or write causes a callback to be called on the host.
+ You initialize these with memory_region_io(), passing it a MemoryRegionOps
+ structure describing the callbacks.
+
+- ROM: a ROM memory region works like RAM for reads (directly accessing
+ a region of host memory), but like MMIO for writes (invoking a callback).
+ You initialize these with memory_region_init_rom_device().
+
+- IOMMU region: an IOMMU region translates addresses of accesses made to it
+ and forwards them to some other target memory region. As the name suggests,
+ these are only needed for modelling an IOMMU, not for simple devices.
+ You initialize these with memory_region_init_iommu().
- container: a container simply includes other memory regions, each at
a different offset. Containers are useful for grouping several regions
@@ -45,12 +59,22 @@ MemoryRegion):
can overlay a subregion of RAM with MMIO or ROM, or a PCI controller
that does not prevent card from claiming overlapping BARs.
+ You initialize a pure container with memory_region_init().
+
- alias: a subsection of another region. Aliases allow a region to be
split apart into discontiguous regions. Examples of uses are memory banks
used when the guest address space is smaller than the amount of RAM
addressed, or a memory controller that splits main memory to expose a "PCI
hole". Aliases may point to any type of region, including other aliases,
but an alias may not point back to itself, directly or indirectly.
+ You initialize these with memory_region_init_alias().
+
+- reservation region: a reservation region is primarily for debugging.
+ It claims I/O space that is not supposed to be handled by QEMU itself.
+ The typical use is to track parts of the address space which will be
+ handled by the host kernel when KVM is enabled.
+ You initialize these with memory_region_init_reservation(), or by
+ passing a NULL callback parameter to memory_region_init_io().
It is valid to add subregions to a region which is not a pure container
(that is, to an MMIO, RAM or ROM region). This means that the region
diff --git a/exec.c b/exec.c
index ab373604d7..7d67c11601 100644
--- a/exec.c
+++ b/exec.c
@@ -980,8 +980,9 @@ bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
ram_addr_t length,
unsigned client)
{
+ DirtyMemoryBlocks *blocks;
unsigned long end, page;
- bool dirty;
+ bool dirty = false;
if (length == 0) {
return false;
@@ -989,8 +990,22 @@ bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
page = start >> TARGET_PAGE_BITS;
- dirty = bitmap_test_and_clear_atomic(ram_list.dirty_memory[client],
- page, end - page);
+
+ rcu_read_lock();
+
+ blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
+
+ while (page < end) {
+ unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
+ unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE;
+ unsigned long num = MIN(end - page, DIRTY_MEMORY_BLOCK_SIZE - offset);
+
+ dirty |= bitmap_test_and_clear_atomic(blocks->blocks[idx],
+ offset, num);
+ page += num;
+ }
+
+ rcu_read_unlock();
if (dirty && tcg_enabled()) {
tlb_reset_dirty_range_all(start, length);
@@ -1504,6 +1519,47 @@ int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp)
return 0;
}
+/* Called with ram_list.mutex held */
+static void dirty_memory_extend(ram_addr_t old_ram_size,
+ ram_addr_t new_ram_size)
+{
+ ram_addr_t old_num_blocks = DIV_ROUND_UP(old_ram_size,
+ DIRTY_MEMORY_BLOCK_SIZE);
+ ram_addr_t new_num_blocks = DIV_ROUND_UP(new_ram_size,
+ DIRTY_MEMORY_BLOCK_SIZE);
+ int i;
+
+ /* Only need to extend if block count increased */
+ if (new_num_blocks <= old_num_blocks) {
+ return;
+ }
+
+ for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
+ DirtyMemoryBlocks *old_blocks;
+ DirtyMemoryBlocks *new_blocks;
+ int j;
+
+ old_blocks = atomic_rcu_read(&ram_list.dirty_memory[i]);
+ new_blocks = g_malloc(sizeof(*new_blocks) +
+ sizeof(new_blocks->blocks[0]) * new_num_blocks);
+
+ if (old_num_blocks) {
+ memcpy(new_blocks->blocks, old_blocks->blocks,
+ old_num_blocks * sizeof(old_blocks->blocks[0]));
+ }
+
+ for (j = old_num_blocks; j < new_num_blocks; j++) {
+ new_blocks->blocks[j] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE);
+ }
+
+ atomic_rcu_set(&ram_list.dirty_memory[i], new_blocks);
+
+ if (old_blocks) {
+ g_free_rcu(old_blocks, rcu);
+ }
+ }
+}
+
static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
{
RAMBlock *block;
@@ -1543,6 +1599,7 @@ static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
(new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
if (new_ram_size > old_ram_size) {
migration_bitmap_extend(old_ram_size, new_ram_size);
+ dirty_memory_extend(old_ram_size, new_ram_size);
}
/* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
* QLIST (which has an RCU-friendly variant) does not have insertion at
@@ -1568,18 +1625,6 @@ static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
ram_list.version++;
qemu_mutex_unlock_ramlist();
- new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
-
- if (new_ram_size > old_ram_size) {
- int i;
-
- /* ram_list.dirty_memory[] is protected by the iothread lock. */
- for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
- ram_list.dirty_memory[i] =
- bitmap_zero_extend(ram_list.dirty_memory[i],
- old_ram_size, new_ram_size);
- }
- }
cpu_physical_memory_set_dirty_range(new_block->offset,
new_block->used_length,
DIRTY_CLIENTS_ALL);
diff --git a/hw/ipmi/ipmi.c b/hw/ipmi/ipmi.c
index dfab272f9e..6adec1e990 100644
--- a/hw/ipmi/ipmi.c
+++ b/hw/ipmi/ipmi.c
@@ -51,9 +51,7 @@ static int ipmi_do_hw_op(IPMIInterface *s, enum ipmi_op op, int checkonly)
if (checkonly) {
return 0;
}
- qemu_mutex_lock_iothread();
qmp_inject_nmi(NULL);
- qemu_mutex_unlock_iothread();
return 0;
case IPMI_POWERCYCLE_CHASSIS:
diff --git a/hw/ipmi/ipmi_bmc_sim.c b/hw/ipmi/ipmi_bmc_sim.c
index e1ad19b8db..f8b21761a2 100644
--- a/hw/ipmi/ipmi_bmc_sim.c
+++ b/hw/ipmi/ipmi_bmc_sim.c
@@ -559,7 +559,7 @@ static void ipmi_init_sensors_from_sdrs(IPMIBmcSim *s)
static int ipmi_register_netfn(IPMIBmcSim *s, unsigned int netfn,
const IPMINetfn *netfnd)
{
- if ((netfn & 1) || (netfn > MAX_NETFNS) || (s->netfns[netfn / 2])) {
+ if ((netfn & 1) || (netfn >= MAX_NETFNS) || (s->netfns[netfn / 2])) {
return -1;
}
s->netfns[netfn / 2] = netfnd;
@@ -1135,6 +1135,8 @@ static void set_watchdog_timer(IPMIBmcSim *ibs,
rsp[2] = IPMI_CC_INVALID_DATA_FIELD;
return;
}
+ break;
+
default:
/* We don't support PRE_SMI */
rsp[2] = IPMI_CC_INVALID_DATA_FIELD;
diff --git a/hw/scsi/Makefile.objs b/hw/scsi/Makefile.objs
index 40c79d34c9..5a2248be36 100644
--- a/hw/scsi/Makefile.objs
+++ b/hw/scsi/Makefile.objs
@@ -1,6 +1,7 @@
common-obj-y += scsi-disk.o
common-obj-y += scsi-generic.o scsi-bus.o
common-obj-$(CONFIG_LSI_SCSI_PCI) += lsi53c895a.o
+common-obj-$(CONFIG_MPTSAS_SCSI_PCI) += mptsas.o mptconfig.o mptendian.o
common-obj-$(CONFIG_MEGASAS_SCSI_PCI) += megasas.o
common-obj-$(CONFIG_VMW_PVSCSI_SCSI_PCI) += vmw_pvscsi.o
common-obj-$(CONFIG_ESP) += esp.o
diff --git a/hw/scsi/mpi.h b/hw/scsi/mpi.h
new file mode 100644
index 0000000000..0568e19503
--- /dev/null
+++ b/hw/scsi/mpi.h
@@ -0,0 +1,1153 @@
+/*-
+ * Based on FreeBSD sys/dev/mpt/mpilib headers.
+ *
+ * Copyright (c) 2000-2010, LSI Logic Corporation and its contributors.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon including
+ * a substantially similar Disclaimer requirement for further binary
+ * redistribution.
+ * 3. Neither the name of the LSI Logic Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
+ * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef MPI_H
+#define MPI_H
+
+enum {
+ MPI_FUNCTION_SCSI_IO_REQUEST = 0x00,
+ MPI_FUNCTION_SCSI_TASK_MGMT = 0x01,
+ MPI_FUNCTION_IOC_INIT = 0x02,
+ MPI_FUNCTION_IOC_FACTS = 0x03,
+ MPI_FUNCTION_CONFIG = 0x04,
+ MPI_FUNCTION_PORT_FACTS = 0x05,
+ MPI_FUNCTION_PORT_ENABLE = 0x06,
+ MPI_FUNCTION_EVENT_NOTIFICATION = 0x07,
+ MPI_FUNCTION_EVENT_ACK = 0x08,
+ MPI_FUNCTION_FW_DOWNLOAD = 0x09,
+ MPI_FUNCTION_TARGET_CMD_BUFFER_POST = 0x0A,
+ MPI_FUNCTION_TARGET_ASSIST = 0x0B,
+ MPI_FUNCTION_TARGET_STATUS_SEND = 0x0C,
+ MPI_FUNCTION_TARGET_MODE_ABORT = 0x0D,
+ MPI_FUNCTION_FC_LINK_SRVC_BUF_POST = 0x0E,
+ MPI_FUNCTION_FC_LINK_SRVC_RSP = 0x0F,
+ MPI_FUNCTION_FC_EX_LINK_SRVC_SEND = 0x10,
+ MPI_FUNCTION_FC_ABORT = 0x11,
+ MPI_FUNCTION_FW_UPLOAD = 0x12,
+ MPI_FUNCTION_FC_COMMON_TRANSPORT_SEND = 0x13,
+ MPI_FUNCTION_FC_PRIMITIVE_SEND = 0x14,
+
+ MPI_FUNCTION_RAID_ACTION = 0x15,
+ MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH = 0x16,
+
+ MPI_FUNCTION_TOOLBOX = 0x17,
+
+ MPI_FUNCTION_SCSI_ENCLOSURE_PROCESSOR = 0x18,
+
+ MPI_FUNCTION_MAILBOX = 0x19,
+
+ MPI_FUNCTION_SMP_PASSTHROUGH = 0x1A,
+ MPI_FUNCTION_SAS_IO_UNIT_CONTROL = 0x1B,
+ MPI_FUNCTION_SATA_PASSTHROUGH = 0x1C,
+
+ MPI_FUNCTION_DIAG_BUFFER_POST = 0x1D,
+ MPI_FUNCTION_DIAG_RELEASE = 0x1E,
+
+ MPI_FUNCTION_SCSI_IO_32 = 0x1F,
+
+ MPI_FUNCTION_LAN_SEND = 0x20,
+ MPI_FUNCTION_LAN_RECEIVE = 0x21,
+ MPI_FUNCTION_LAN_RESET = 0x22,
+
+ MPI_FUNCTION_TARGET_ASSIST_EXTENDED = 0x23,
+ MPI_FUNCTION_TARGET_CMD_BUF_BASE_POST = 0x24,
+ MPI_FUNCTION_TARGET_CMD_BUF_LIST_POST = 0x25,
+
+ MPI_FUNCTION_INBAND_BUFFER_POST = 0x28,
+ MPI_FUNCTION_INBAND_SEND = 0x29,
+ MPI_FUNCTION_INBAND_RSP = 0x2A,
+ MPI_FUNCTION_INBAND_ABORT = 0x2B,
+
+ MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET = 0x40,
+ MPI_FUNCTION_IO_UNIT_RESET = 0x41,
+ MPI_FUNCTION_HANDSHAKE = 0x42,
+ MPI_FUNCTION_REPLY_FRAME_REMOVAL = 0x43,
+ MPI_FUNCTION_HOST_PAGEBUF_ACCESS_CONTROL = 0x44,
+};
+
+/****************************************************************************/
+/* Registers */
+/****************************************************************************/
+
+enum {
+ MPI_IOC_STATE_RESET = 0x00000000,
+ MPI_IOC_STATE_READY = 0x10000000,
+ MPI_IOC_STATE_OPERATIONAL = 0x20000000,
+ MPI_IOC_STATE_FAULT = 0x40000000,
+
+ MPI_DOORBELL_OFFSET = 0x00000000,
+ MPI_DOORBELL_ACTIVE = 0x08000000, /* DoorbellUsed */
+ MPI_DOORBELL_WHO_INIT_MASK = 0x07000000,
+ MPI_DOORBELL_WHO_INIT_SHIFT = 24,
+ MPI_DOORBELL_FUNCTION_MASK = 0xFF000000,
+ MPI_DOORBELL_FUNCTION_SHIFT = 24,
+ MPI_DOORBELL_ADD_DWORDS_MASK = 0x00FF0000,
+ MPI_DOORBELL_ADD_DWORDS_SHIFT = 16,
+ MPI_DOORBELL_DATA_MASK = 0x0000FFFF,
+ MPI_DOORBELL_FUNCTION_SPECIFIC_MASK = 0x0000FFFF,
+
+ MPI_DB_HPBAC_VALUE_MASK = 0x0000F000,
+ MPI_DB_HPBAC_ENABLE_ACCESS = 0x01,
+ MPI_DB_HPBAC_DISABLE_ACCESS = 0x02,
+ MPI_DB_HPBAC_FREE_BUFFER = 0x03,
+
+ MPI_WRITE_SEQUENCE_OFFSET = 0x00000004,
+ MPI_WRSEQ_KEY_VALUE_MASK = 0x0000000F,
+ MPI_WRSEQ_1ST_KEY_VALUE = 0x04,
+ MPI_WRSEQ_2ND_KEY_VALUE = 0x0B,
+ MPI_WRSEQ_3RD_KEY_VALUE = 0x02,
+ MPI_WRSEQ_4TH_KEY_VALUE = 0x07,
+ MPI_WRSEQ_5TH_KEY_VALUE = 0x0D,
+
+ MPI_DIAGNOSTIC_OFFSET = 0x00000008,
+ MPI_DIAG_CLEAR_FLASH_BAD_SIG = 0x00000400,
+ MPI_DIAG_PREVENT_IOC_BOOT = 0x00000200,
+ MPI_DIAG_DRWE = 0x00000080,
+ MPI_DIAG_FLASH_BAD_SIG = 0x00000040,
+ MPI_DIAG_RESET_HISTORY = 0x00000020,
+ MPI_DIAG_RW_ENABLE = 0x00000010,
+ MPI_DIAG_RESET_ADAPTER = 0x00000004,
+ MPI_DIAG_DISABLE_ARM = 0x00000002,
+ MPI_DIAG_MEM_ENABLE = 0x00000001,
+
+ MPI_TEST_BASE_ADDRESS_OFFSET = 0x0000000C,
+
+ MPI_DIAG_RW_DATA_OFFSET = 0x00000010,
+
+ MPI_DIAG_RW_ADDRESS_OFFSET = 0x00000014,
+
+ MPI_HOST_INTERRUPT_STATUS_OFFSET = 0x00000030,
+ MPI_HIS_IOP_DOORBELL_STATUS = 0x80000000,
+ MPI_HIS_REPLY_MESSAGE_INTERRUPT = 0x00000008,
+ MPI_HIS_DOORBELL_INTERRUPT = 0x00000001,
+
+ MPI_HOST_INTERRUPT_MASK_OFFSET = 0x00000034,
+ MPI_HIM_RIM = 0x00000008,
+ MPI_HIM_DIM = 0x00000001,
+
+ MPI_REQUEST_QUEUE_OFFSET = 0x00000040,
+ MPI_REQUEST_POST_FIFO_OFFSET = 0x00000040,
+
+ MPI_REPLY_QUEUE_OFFSET = 0x00000044,
+ MPI_REPLY_POST_FIFO_OFFSET = 0x00000044,
+ MPI_REPLY_FREE_FIFO_OFFSET = 0x00000044,
+
+ MPI_HI_PRI_REQUEST_QUEUE_OFFSET = 0x00000048,
+};
+
+#define MPI_ADDRESS_REPLY_A_BIT 0x80000000
+
+/****************************************************************************/
+/* Scatter/gather elements */
+/****************************************************************************/
+
+typedef struct MPISGEntry {
+ uint32_t FlagsLength;
+ union
+ {
+ uint32_t Address32;
+ uint64_t Address64;
+ } u;
+} QEMU_PACKED MPISGEntry;
+
+/* Flags field bit definitions */
+
+enum {
+ MPI_SGE_FLAGS_LAST_ELEMENT = 0x80000000,
+ MPI_SGE_FLAGS_END_OF_BUFFER = 0x40000000,
+ MPI_SGE_FLAGS_ELEMENT_TYPE_MASK = 0x30000000,
+ MPI_SGE_FLAGS_LOCAL_ADDRESS = 0x08000000,
+ MPI_SGE_FLAGS_DIRECTION = 0x04000000,
+ MPI_SGE_FLAGS_64_BIT_ADDRESSING = 0x02000000,
+ MPI_SGE_FLAGS_END_OF_LIST = 0x01000000,
+
+ MPI_SGE_LENGTH_MASK = 0x00FFFFFF,
+ MPI_SGE_CHAIN_LENGTH_MASK = 0x0000FFFF,
+
+ MPI_SGE_FLAGS_TRANSACTION_ELEMENT = 0x00000000,
+ MPI_SGE_FLAGS_SIMPLE_ELEMENT = 0x10000000,
+ MPI_SGE_FLAGS_CHAIN_ELEMENT = 0x30000000,
+
+ /* Direction */
+
+ MPI_SGE_FLAGS_IOC_TO_HOST = 0x00000000,
+ MPI_SGE_FLAGS_HOST_TO_IOC = 0x04000000,
+
+ MPI_SGE_CHAIN_OFFSET_MASK = 0x00FF0000,
+};
+
+#define MPI_SGE_CHAIN_OFFSET_SHIFT 16
+
+/****************************************************************************/
+/* Standard message request header for all request messages */
+/****************************************************************************/
+
+typedef struct MPIRequestHeader {
+ uint8_t Reserved[2]; /* function specific */
+ uint8_t ChainOffset;
+ uint8_t Function;
+ uint8_t Reserved1[3]; /* function specific */
+ uint8_t MsgFlags;
+ uint32_t MsgContext;
+} QEMU_PACKED MPIRequestHeader;
+
+
+typedef struct MPIDefaultReply {
+ uint8_t Reserved[2]; /* function specific */
+ uint8_t MsgLength;
+ uint8_t Function;
+ uint8_t Reserved1[3]; /* function specific */
+ uint8_t MsgFlags;
+ uint32_t MsgContext;
+ uint8_t Reserved2[2]; /* function specific */
+ uint16_t IOCStatus;
+ uint32_t IOCLogInfo;
+} QEMU_PACKED MPIDefaultReply;
+
+/* MsgFlags definition for all replies */
+
+#define MPI_MSGFLAGS_CONTINUATION_REPLY (0x80)
+
+enum {
+
+ /************************************************************************/
+ /* Common IOCStatus values for all replies */
+ /************************************************************************/
+
+ MPI_IOCSTATUS_SUCCESS = 0x0000,
+ MPI_IOCSTATUS_INVALID_FUNCTION = 0x0001,
+ MPI_IOCSTATUS_BUSY = 0x0002,
+ MPI_IOCSTATUS_INVALID_SGL = 0x0003,
+ MPI_IOCSTATUS_INTERNAL_ERROR = 0x0004,
+ MPI_IOCSTATUS_RESERVED = 0x0005,
+ MPI_IOCSTATUS_INSUFFICIENT_RESOURCES = 0x0006,
+ MPI_IOCSTATUS_INVALID_FIELD = 0x0007,
+ MPI_IOCSTATUS_INVALID_STATE = 0x0008,
+ MPI_IOCSTATUS_OP_STATE_NOT_SUPPORTED = 0x0009,
+
+ /************************************************************************/
+ /* Config IOCStatus values */
+ /************************************************************************/
+
+ MPI_IOCSTATUS_CONFIG_INVALID_ACTION = 0x0020,
+ MPI_IOCSTATUS_CONFIG_INVALID_TYPE = 0x0021,
+ MPI_IOCSTATUS_CONFIG_INVALID_PAGE = 0x0022,
+ MPI_IOCSTATUS_CONFIG_INVALID_DATA = 0x0023,
+ MPI_IOCSTATUS_CONFIG_NO_DEFAULTS = 0x0024,
+ MPI_IOCSTATUS_CONFIG_CANT_COMMIT = 0x0025,
+
+ /************************************************************************/
+ /* SCSIIO Reply = SPI & FCP, initiator values */
+ /************************************************************************/
+
+ MPI_IOCSTATUS_SCSI_RECOVERED_ERROR = 0x0040,
+ MPI_IOCSTATUS_SCSI_INVALID_BUS = 0x0041,
+ MPI_IOCSTATUS_SCSI_INVALID_TARGETID = 0x0042,
+ MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE = 0x0043,
+ MPI_IOCSTATUS_SCSI_DATA_OVERRUN = 0x0044,
+ MPI_IOCSTATUS_SCSI_DATA_UNDERRUN = 0x0045,
+ MPI_IOCSTATUS_SCSI_IO_DATA_ERROR = 0x0046,
+ MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR = 0x0047,
+ MPI_IOCSTATUS_SCSI_TASK_TERMINATED = 0x0048,
+ MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH = 0x0049,
+ MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED = 0x004A,
+ MPI_IOCSTATUS_SCSI_IOC_TERMINATED = 0x004B,
+ MPI_IOCSTATUS_SCSI_EXT_TERMINATED = 0x004C,
+
+ /************************************************************************/
+ /* For use by SCSI Initiator and SCSI Target end-to-end data protection*/
+ /************************************************************************/
+
+ MPI_IOCSTATUS_EEDP_GUARD_ERROR = 0x004D,
+ MPI_IOCSTATUS_EEDP_REF_TAG_ERROR = 0x004E,
+ MPI_IOCSTATUS_EEDP_APP_TAG_ERROR = 0x004F,
+
+ /************************************************************************/
+ /* SCSI Target values */
+ /************************************************************************/
+
+ MPI_IOCSTATUS_TARGET_PRIORITY_IO = 0x0060,
+ MPI_IOCSTATUS_TARGET_INVALID_PORT = 0x0061,
+ MPI_IOCSTATUS_TARGET_INVALID_IO_INDEX = 0x0062,
+ MPI_IOCSTATUS_TARGET_ABORTED = 0x0063,
+ MPI_IOCSTATUS_TARGET_NO_CONN_RETRYABLE = 0x0064,
+ MPI_IOCSTATUS_TARGET_NO_CONNECTION = 0x0065,
+ MPI_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH = 0x006A,
+ MPI_IOCSTATUS_TARGET_STS_DATA_NOT_SENT = 0x006B,
+ MPI_IOCSTATUS_TARGET_DATA_OFFSET_ERROR = 0x006D,
+ MPI_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA = 0x006E,
+ MPI_IOCSTATUS_TARGET_IU_TOO_SHORT = 0x006F,
+ MPI_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT = 0x0070,
+ MPI_IOCSTATUS_TARGET_NAK_RECEIVED = 0x0071,
+
+ /************************************************************************/
+ /* Fibre Channel Direct Access values */
+ /************************************************************************/
+
+ MPI_IOCSTATUS_FC_ABORTED = 0x0066,
+ MPI_IOCSTATUS_FC_RX_ID_INVALID = 0x0067,
+ MPI_IOCSTATUS_FC_DID_INVALID = 0x0068,
+ MPI_IOCSTATUS_FC_NODE_LOGGED_OUT = 0x0069,
+ MPI_IOCSTATUS_FC_EXCHANGE_CANCELED = 0x006C,
+
+ /************************************************************************/
+ /* LAN values */
+ /************************************************************************/
+
+ MPI_IOCSTATUS_LAN_DEVICE_NOT_FOUND = 0x0080,
+ MPI_IOCSTATUS_LAN_DEVICE_FAILURE = 0x0081,
+ MPI_IOCSTATUS_LAN_TRANSMIT_ERROR = 0x0082,
+ MPI_IOCSTATUS_LAN_TRANSMIT_ABORTED = 0x0083,
+ MPI_IOCSTATUS_LAN_RECEIVE_ERROR = 0x0084,
+ MPI_IOCSTATUS_LAN_RECEIVE_ABORTED = 0x0085,
+ MPI_IOCSTATUS_LAN_PARTIAL_PACKET = 0x0086,
+ MPI_IOCSTATUS_LAN_CANCELED = 0x0087,
+
+ /************************************************************************/
+ /* Serial Attached SCSI values */
+ /************************************************************************/
+
+ MPI_IOCSTATUS_SAS_SMP_REQUEST_FAILED = 0x0090,
+ MPI_IOCSTATUS_SAS_SMP_DATA_OVERRUN = 0x0091,
+
+ /************************************************************************/
+ /* Inband values */
+ /************************************************************************/
+
+ MPI_IOCSTATUS_INBAND_ABORTED = 0x0098,
+ MPI_IOCSTATUS_INBAND_NO_CONNECTION = 0x0099,
+
+ /************************************************************************/
+ /* Diagnostic Tools values */
+ /************************************************************************/
+
+ MPI_IOCSTATUS_DIAGNOSTIC_RELEASED = 0x00A0,
+
+ /************************************************************************/
+ /* IOCStatus flag to indicate that log info is available */
+ /************************************************************************/
+
+ MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE = 0x8000,
+ MPI_IOCSTATUS_MASK = 0x7FFF,
+
+ /************************************************************************/
+ /* LogInfo Types */
+ /************************************************************************/
+
+ MPI_IOCLOGINFO_TYPE_MASK = 0xF0000000,
+ MPI_IOCLOGINFO_TYPE_SHIFT = 28,
+ MPI_IOCLOGINFO_TYPE_NONE = 0x0,
+ MPI_IOCLOGINFO_TYPE_SCSI = 0x1,
+ MPI_IOCLOGINFO_TYPE_FC = 0x2,
+ MPI_IOCLOGINFO_TYPE_SAS = 0x3,
+ MPI_IOCLOGINFO_TYPE_ISCSI = 0x4,
+ MPI_IOCLOGINFO_LOG_DATA_MASK = 0x0FFFFFFF,
+};
+
+/****************************************************************************/
+/* SCSI IO messages and associated structures */
+/****************************************************************************/
+
+typedef struct MPIMsgSCSIIORequest {
+ uint8_t TargetID; /* 00h */
+ uint8_t Bus; /* 01h */
+ uint8_t ChainOffset; /* 02h */
+ uint8_t Function; /* 03h */
+ uint8_t CDBLength; /* 04h */
+ uint8_t SenseBufferLength; /* 05h */
+ uint8_t Reserved; /* 06h */
+ uint8_t MsgFlags; /* 07h */
+ uint32_t MsgContext; /* 08h */
+ uint8_t LUN[8]; /* 0Ch */
+ uint32_t Control; /* 14h */
+ uint8_t CDB[16]; /* 18h */
+ uint32_t DataLength; /* 28h */
+ uint32_t SenseBufferLowAddr; /* 2Ch */
+} QEMU_PACKED MPIMsgSCSIIORequest;
+
+/* SCSI IO MsgFlags bits */
+
+#define MPI_SCSIIO_MSGFLGS_SENSE_WIDTH (0x01)
+#define MPI_SCSIIO_MSGFLGS_SENSE_WIDTH_32 (0x00)
+#define MPI_SCSIIO_MSGFLGS_SENSE_WIDTH_64 (0x01)
+
+#define MPI_SCSIIO_MSGFLGS_SENSE_LOCATION (0x02)
+#define MPI_SCSIIO_MSGFLGS_SENSE_LOC_HOST (0x00)
+#define MPI_SCSIIO_MSGFLGS_SENSE_LOC_IOC (0x02)
+
+#define MPI_SCSIIO_MSGFLGS_CMD_DETERMINES_DATA_DIR (0x04)
+
+/* SCSI IO LUN fields */
+
+#define MPI_SCSIIO_LUN_FIRST_LEVEL_ADDRESSING (0x0000FFFF)
+#define MPI_SCSIIO_LUN_SECOND_LEVEL_ADDRESSING (0xFFFF0000)
+#define MPI_SCSIIO_LUN_THIRD_LEVEL_ADDRESSING (0x0000FFFF)
+#define MPI_SCSIIO_LUN_FOURTH_LEVEL_ADDRESSING (0xFFFF0000)
+#define MPI_SCSIIO_LUN_LEVEL_1_WORD (0xFF00)
+#define MPI_SCSIIO_LUN_LEVEL_1_DWORD (0x0000FF00)
+
+/* SCSI IO Control bits */
+
+#define MPI_SCSIIO_CONTROL_DATADIRECTION_MASK (0x03000000)
+#define MPI_SCSIIO_CONTROL_NODATATRANSFER (0x00000000)
+#define MPI_SCSIIO_CONTROL_WRITE (0x01000000)
+#define MPI_SCSIIO_CONTROL_READ (0x02000000)
+
+#define MPI_SCSIIO_CONTROL_ADDCDBLEN_MASK (0x3C000000)
+#define MPI_SCSIIO_CONTROL_ADDCDBLEN_SHIFT (26)
+
+#define MPI_SCSIIO_CONTROL_TASKATTRIBUTE_MASK (0x00000700)
+#define MPI_SCSIIO_CONTROL_SIMPLEQ (0x00000000)
+#define MPI_SCSIIO_CONTROL_HEADOFQ (0x00000100)
+#define MPI_SCSIIO_CONTROL_ORDEREDQ (0x00000200)
+#define MPI_SCSIIO_CONTROL_ACAQ (0x00000400)
+#define MPI_SCSIIO_CONTROL_UNTAGGED (0x00000500)
+#define MPI_SCSIIO_CONTROL_NO_DISCONNECT (0x00000700)
+
+#define MPI_SCSIIO_CONTROL_TASKMANAGE_MASK (0x00FF0000)
+#define MPI_SCSIIO_CONTROL_OBSOLETE (0x00800000)
+#define MPI_SCSIIO_CONTROL_CLEAR_ACA_RSV (0x00400000)
+#define MPI_SCSIIO_CONTROL_TARGET_RESET (0x00200000)
+#define MPI_SCSIIO_CONTROL_LUN_RESET_RSV (0x00100000)
+#define MPI_SCSIIO_CONTROL_RESERVED (0x00080000)
+#define MPI_SCSIIO_CONTROL_CLR_TASK_SET_RSV (0x00040000)
+#define MPI_SCSIIO_CONTROL_ABORT_TASK_SET (0x00020000)
+#define MPI_SCSIIO_CONTROL_RESERVED2 (0x00010000)
+
+/* SCSI IO reply structure */
+typedef struct MPIMsgSCSIIOReply
+{
+ uint8_t TargetID; /* 00h */
+ uint8_t Bus; /* 01h */
+ uint8_t MsgLength; /* 02h */
+ uint8_t Function; /* 03h */
+ uint8_t CDBLength; /* 04h */
+ uint8_t SenseBufferLength; /* 05h */
+ uint8_t Reserved; /* 06h */
+ uint8_t MsgFlags; /* 07h */
+ uint32_t MsgContext; /* 08h */
+ uint8_t SCSIStatus; /* 0Ch */
+ uint8_t SCSIState; /* 0Dh */
+ uint16_t IOCStatus; /* 0Eh */
+ uint32_t IOCLogInfo; /* 10h */
+ uint32_t TransferCount; /* 14h */
+ uint32_t SenseCount; /* 18h */
+ uint32_t ResponseInfo; /* 1Ch */
+ uint16_t TaskTag; /* 20h */
+ uint16_t Reserved1; /* 22h */
+} QEMU_PACKED MPIMsgSCSIIOReply;
+
+/* SCSI IO Reply SCSIStatus values (SAM-2 status codes) */
+
+#define MPI_SCSI_STATUS_SUCCESS (0x00)
+#define MPI_SCSI_STATUS_CHECK_CONDITION (0x02)
+#define MPI_SCSI_STATUS_CONDITION_MET (0x04)
+#define MPI_SCSI_STATUS_BUSY (0x08)
+#define MPI_SCSI_STATUS_INTERMEDIATE (0x10)
+#define MPI_SCSI_STATUS_INTERMEDIATE_CONDMET (0x14)
+#define MPI_SCSI_STATUS_RESERVATION_CONFLICT (0x18)
+#define MPI_SCSI_STATUS_COMMAND_TERMINATED (0x22)
+#define MPI_SCSI_STATUS_TASK_SET_FULL (0x28)
+#define MPI_SCSI_STATUS_ACA_ACTIVE (0x30)
+
+#define MPI_SCSI_STATUS_FCPEXT_DEVICE_LOGGED_OUT (0x80)
+#define MPI_SCSI_STATUS_FCPEXT_NO_LINK (0x81)
+#define MPI_SCSI_STATUS_FCPEXT_UNASSIGNED (0x82)
+
+
+/* SCSI IO Reply SCSIState values */
+
+#define MPI_SCSI_STATE_AUTOSENSE_VALID (0x01)
+#define MPI_SCSI_STATE_AUTOSENSE_FAILED (0x02)
+#define MPI_SCSI_STATE_NO_SCSI_STATUS (0x04)
+#define MPI_SCSI_STATE_TERMINATED (0x08)
+#define MPI_SCSI_STATE_RESPONSE_INFO_VALID (0x10)
+#define MPI_SCSI_STATE_QUEUE_TAG_REJECTED (0x20)
+
+/* SCSI IO Reply ResponseInfo values */
+/* (FCP-1 RSP_CODE values and SPI-3 Packetized Failure codes) */
+
+#define MPI_SCSI_RSP_INFO_FUNCTION_COMPLETE (0x00000000)
+#define MPI_SCSI_RSP_INFO_FCP_BURST_LEN_ERROR (0x01000000)
+#define MPI_SCSI_RSP_INFO_CMND_FIELDS_INVALID (0x02000000)
+#define MPI_SCSI_RSP_INFO_FCP_DATA_RO_ERROR (0x03000000)
+#define MPI_SCSI_RSP_INFO_TASK_MGMT_UNSUPPORTED (0x04000000)
+#define MPI_SCSI_RSP_INFO_TASK_MGMT_FAILED (0x05000000)
+#define MPI_SCSI_RSP_INFO_SPI_LQ_INVALID_TYPE (0x06000000)
+
+#define MPI_SCSI_TASKTAG_UNKNOWN (0xFFFF)
+
+
+/****************************************************************************/
+/* SCSI Task Management messages */
+/****************************************************************************/
+
+typedef struct MPIMsgSCSITaskMgmt {
+ uint8_t TargetID; /* 00h */
+ uint8_t Bus; /* 01h */
+ uint8_t ChainOffset; /* 02h */
+ uint8_t Function; /* 03h */
+ uint8_t Reserved; /* 04h */
+ uint8_t TaskType; /* 05h */
+ uint8_t Reserved1; /* 06h */
+ uint8_t MsgFlags; /* 07h */
+ uint32_t MsgContext; /* 08h */
+ uint8_t LUN[8]; /* 0Ch */
+ uint32_t Reserved2[7]; /* 14h */
+ uint32_t TaskMsgContext; /* 30h */
+} QEMU_PACKED MPIMsgSCSITaskMgmt;
+
+enum {
+ /* TaskType values */
+
+ MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK = 0x01,
+ MPI_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET = 0x02,
+ MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET = 0x03,
+ MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS = 0x04,
+ MPI_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET = 0x05,
+ MPI_SCSITASKMGMT_TASKTYPE_CLEAR_TASK_SET = 0x06,
+ MPI_SCSITASKMGMT_TASKTYPE_QUERY_TASK = 0x07,
+ MPI_SCSITASKMGMT_TASKTYPE_CLR_ACA = 0x08,
+
+ /* MsgFlags bits */
+
+ MPI_SCSITASKMGMT_MSGFLAGS_DO_NOT_SEND_TASK_IU = 0x01,
+
+ MPI_SCSITASKMGMT_MSGFLAGS_TARGET_RESET_OPTION = 0x00,
+ MPI_SCSITASKMGMT_MSGFLAGS_LIP_RESET_OPTION = 0x02,
+ MPI_SCSITASKMGMT_MSGFLAGS_LIPRESET_RESET_OPTION = 0x04,
+
+ MPI_SCSITASKMGMT_MSGFLAGS_SOFT_RESET_OPTION = 0x08,
+};
+
+/* SCSI Task Management Reply */
+typedef struct MPIMsgSCSITaskMgmtReply {
+ uint8_t TargetID; /* 00h */
+ uint8_t Bus; /* 01h */
+ uint8_t MsgLength; /* 02h */
+ uint8_t Function; /* 03h */
+ uint8_t ResponseCode; /* 04h */
+ uint8_t TaskType; /* 05h */
+ uint8_t Reserved1; /* 06h */
+ uint8_t MsgFlags; /* 07h */
+ uint32_t MsgContext; /* 08h */
+ uint8_t Reserved2[2]; /* 0Ch */
+ uint16_t IOCStatus; /* 0Eh */
+ uint32_t IOCLogInfo; /* 10h */
+ uint32_t TerminationCount; /* 14h */
+} QEMU_PACKED MPIMsgSCSITaskMgmtReply;
+
+/* ResponseCode values */
+enum {
+ MPI_SCSITASKMGMT_RSP_TM_COMPLETE = 0x00,
+ MPI_SCSITASKMGMT_RSP_INVALID_FRAME = 0x02,
+ MPI_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED = 0x04,
+ MPI_SCSITASKMGMT_RSP_TM_FAILED = 0x05,
+ MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED = 0x08,
+ MPI_SCSITASKMGMT_RSP_TM_INVALID_LUN = 0x09,
+ MPI_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC = 0x80,
+};
+
+/****************************************************************************/
+/* IOCInit message */
+/****************************************************************************/
+
+typedef struct MPIMsgIOCInit {
+ uint8_t WhoInit; /* 00h */
+ uint8_t Reserved; /* 01h */
+ uint8_t ChainOffset; /* 02h */
+ uint8_t Function; /* 03h */
+ uint8_t Flags; /* 04h */
+ uint8_t MaxDevices; /* 05h */
+ uint8_t MaxBuses; /* 06h */
+ uint8_t MsgFlags; /* 07h */
+ uint32_t MsgContext; /* 08h */
+ uint16_t ReplyFrameSize; /* 0Ch */
+ uint8_t Reserved1[2]; /* 0Eh */
+ uint32_t HostMfaHighAddr; /* 10h */
+ uint32_t SenseBufferHighAddr; /* 14h */
+ uint32_t ReplyFifoHostSignalingAddr; /* 18h */
+ MPISGEntry HostPageBufferSGE; /* 1Ch */
+ uint16_t MsgVersion; /* 28h */
+ uint16_t HeaderVersion; /* 2Ah */
+} QEMU_PACKED MPIMsgIOCInit;
+
+enum {
+ /* WhoInit values */
+
+ MPI_WHOINIT_NO_ONE = 0x00,
+ MPI_WHOINIT_SYSTEM_BIOS = 0x01,
+ MPI_WHOINIT_ROM_BIOS = 0x02,
+ MPI_WHOINIT_PCI_PEER = 0x03,
+ MPI_WHOINIT_HOST_DRIVER = 0x04,
+ MPI_WHOINIT_MANUFACTURER = 0x05,
+
+ /* Flags values */
+
+ MPI_IOCINIT_FLAGS_HOST_PAGE_BUFFER_PERSISTENT = 0x04,
+ MPI_IOCINIT_FLAGS_REPLY_FIFO_HOST_SIGNAL = 0x02,
+ MPI_IOCINIT_FLAGS_DISCARD_FW_IMAGE = 0x01,
+
+ /* MsgVersion */
+
+ MPI_IOCINIT_MSGVERSION_MAJOR_MASK = 0xFF00,
+ MPI_IOCINIT_MSGVERSION_MAJOR_SHIFT = 8,
+ MPI_IOCINIT_MSGVERSION_MINOR_MASK = 0x00FF,
+ MPI_IOCINIT_MSGVERSION_MINOR_SHIFT = 0,
+
+ /* HeaderVersion */
+
+ MPI_IOCINIT_HEADERVERSION_UNIT_MASK = 0xFF00,
+ MPI_IOCINIT_HEADERVERSION_UNIT_SHIFT = 8,
+ MPI_IOCINIT_HEADERVERSION_DEV_MASK = 0x00FF,
+ MPI_IOCINIT_HEADERVERSION_DEV_SHIFT = 0,
+};
+
+typedef struct MPIMsgIOCInitReply {
+ uint8_t WhoInit; /* 00h */
+ uint8_t Reserved; /* 01h */
+ uint8_t MsgLength; /* 02h */
+ uint8_t Function; /* 03h */
+ uint8_t Flags; /* 04h */
+ uint8_t MaxDevices; /* 05h */
+ uint8_t MaxBuses; /* 06h */
+ uint8_t MsgFlags; /* 07h */
+ uint32_t MsgContext; /* 08h */
+ uint16_t Reserved2; /* 0Ch */
+ uint16_t IOCStatus; /* 0Eh */
+ uint32_t IOCLogInfo; /* 10h */
+} QEMU_PACKED MPIMsgIOCInitReply;
+
+
+
+/****************************************************************************/
+/* IOC Facts message */
+/****************************************************************************/
+
+typedef struct MPIMsgIOCFacts {
+ uint8_t Reserved[2]; /* 00h */
+ uint8_t ChainOffset; /* 01h */
+ uint8_t Function; /* 02h */
+ uint8_t Reserved1[3]; /* 03h */
+ uint8_t MsgFlags; /* 04h */
+ uint32_t MsgContext; /* 08h */
+} QEMU_PACKED MPIMsgIOCFacts;
+
+/* IOC Facts Reply */
+typedef struct MPIMsgIOCFactsReply {
+ uint16_t MsgVersion; /* 00h */
+ uint8_t MsgLength; /* 02h */
+ uint8_t Function; /* 03h */
+ uint16_t HeaderVersion; /* 04h */
+ uint8_t IOCNumber; /* 06h */
+ uint8_t MsgFlags; /* 07h */
+ uint32_t MsgContext; /* 08h */
+ uint16_t IOCExceptions; /* 0Ch */
+ uint16_t IOCStatus; /* 0Eh */
+ uint32_t IOCLogInfo; /* 10h */
+ uint8_t MaxChainDepth; /* 14h */
+ uint8_t WhoInit; /* 15h */
+ uint8_t BlockSize; /* 16h */
+ uint8_t Flags; /* 17h */
+ uint16_t ReplyQueueDepth; /* 18h */
+ uint16_t RequestFrameSize; /* 1Ah */
+ uint16_t Reserved_0101_FWVersion; /* 1Ch */ /* obsolete 16-bit FWVersion */
+ uint16_t ProductID; /* 1Eh */
+ uint32_t CurrentHostMfaHighAddr; /* 20h */
+ uint16_t GlobalCredits; /* 24h */
+ uint8_t NumberOfPorts; /* 26h */
+ uint8_t EventState; /* 27h */
+ uint32_t CurrentSenseBufferHighAddr; /* 28h */
+ uint16_t CurReplyFrameSize; /* 2Ch */
+ uint8_t MaxDevices; /* 2Eh */
+ uint8_t MaxBuses; /* 2Fh */
+ uint32_t FWImageSize; /* 30h */
+ uint32_t IOCCapabilities; /* 34h */
+ uint8_t FWVersionDev; /* 38h */
+ uint8_t FWVersionUnit; /* 39h */
+ uint8_t FWVersionMinor; /* 3ah */
+ uint8_t FWVersionMajor; /* 3bh */
+ uint16_t HighPriorityQueueDepth; /* 3Ch */
+ uint16_t Reserved2; /* 3Eh */
+ MPISGEntry HostPageBufferSGE; /* 40h */
+ uint32_t ReplyFifoHostSignalingAddr; /* 4Ch */
+} QEMU_PACKED MPIMsgIOCFactsReply;
+
+enum {
+ MPI_IOCFACTS_MSGVERSION_MAJOR_MASK = 0xFF00,
+ MPI_IOCFACTS_MSGVERSION_MAJOR_SHIFT = 8,
+ MPI_IOCFACTS_MSGVERSION_MINOR_MASK = 0x00FF,
+ MPI_IOCFACTS_MSGVERSION_MINOR_SHIFT = 0,
+
+ MPI_IOCFACTS_HDRVERSION_UNIT_MASK = 0xFF00,
+ MPI_IOCFACTS_HDRVERSION_UNIT_SHIFT = 8,
+ MPI_IOCFACTS_HDRVERSION_DEV_MASK = 0x00FF,
+ MPI_IOCFACTS_HDRVERSION_DEV_SHIFT = 0,
+
+ MPI_IOCFACTS_EXCEPT_CONFIG_CHECKSUM_FAIL = 0x0001,
+ MPI_IOCFACTS_EXCEPT_RAID_CONFIG_INVALID = 0x0002,
+ MPI_IOCFACTS_EXCEPT_FW_CHECKSUM_FAIL = 0x0004,
+ MPI_IOCFACTS_EXCEPT_PERSISTENT_TABLE_FULL = 0x0008,
+ MPI_IOCFACTS_EXCEPT_METADATA_UNSUPPORTED = 0x0010,
+
+ MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT = 0x01,
+ MPI_IOCFACTS_FLAGS_REPLY_FIFO_HOST_SIGNAL = 0x02,
+ MPI_IOCFACTS_FLAGS_HOST_PAGE_BUFFER_PERSISTENT = 0x04,
+
+ MPI_IOCFACTS_EVENTSTATE_DISABLED = 0x00,
+ MPI_IOCFACTS_EVENTSTATE_ENABLED = 0x01,
+
+ MPI_IOCFACTS_CAPABILITY_HIGH_PRI_Q = 0x00000001,
+ MPI_IOCFACTS_CAPABILITY_REPLY_HOST_SIGNAL = 0x00000002,
+ MPI_IOCFACTS_CAPABILITY_QUEUE_FULL_HANDLING = 0x00000004,
+ MPI_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER = 0x00000008,
+ MPI_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER = 0x00000010,
+ MPI_IOCFACTS_CAPABILITY_EXTENDED_BUFFER = 0x00000020,
+ MPI_IOCFACTS_CAPABILITY_EEDP = 0x00000040,
+ MPI_IOCFACTS_CAPABILITY_BIDIRECTIONAL = 0x00000080,
+ MPI_IOCFACTS_CAPABILITY_MULTICAST = 0x00000100,
+ MPI_IOCFACTS_CAPABILITY_SCSIIO32 = 0x00000200,
+ MPI_IOCFACTS_CAPABILITY_NO_SCSIIO16 = 0x00000400,
+ MPI_IOCFACTS_CAPABILITY_TLR = 0x00000800,
+};
+
+/****************************************************************************/
+/* Port Facts message and Reply */
+/****************************************************************************/
+
+typedef struct MPIMsgPortFacts {
+ uint8_t Reserved[2]; /* 00h */
+ uint8_t ChainOffset; /* 02h */
+ uint8_t Function; /* 03h */
+ uint8_t Reserved1[2]; /* 04h */
+ uint8_t PortNumber; /* 06h */
+ uint8_t MsgFlags; /* 07h */
+ uint32_t MsgContext; /* 08h */
+} QEMU_PACKED MPIMsgPortFacts;
+
+typedef struct MPIMsgPortFactsReply {
+ uint16_t Reserved; /* 00h */
+ uint8_t MsgLength; /* 02h */
+ uint8_t Function; /* 03h */
+ uint16_t Reserved1; /* 04h */
+ uint8_t PortNumber; /* 06h */
+ uint8_t MsgFlags; /* 07h */
+ uint32_t MsgContext; /* 08h */
+ uint16_t Reserved2; /* 0Ch */
+ uint16_t IOCStatus; /* 0Eh */
+ uint32_t IOCLogInfo; /* 10h */
+ uint8_t Reserved3; /* 14h */
+ uint8_t PortType; /* 15h */
+ uint16_t MaxDevices; /* 16h */
+ uint16_t PortSCSIID; /* 18h */
+ uint16_t ProtocolFlags; /* 1Ah */
+ uint16_t MaxPostedCmdBuffers; /* 1Ch */
+ uint16_t MaxPersistentIDs; /* 1Eh */
+ uint16_t MaxLanBuckets; /* 20h */
+ uint8_t MaxInitiators; /* 22h */
+ uint8_t Reserved4; /* 23h */
+ uint32_t Reserved5; /* 24h */
+} QEMU_PACKED MPIMsgPortFactsReply;
+
+
+enum {
+ /* PortTypes values */
+ MPI_PORTFACTS_PORTTYPE_INACTIVE = 0x00,
+ MPI_PORTFACTS_PORTTYPE_SCSI = 0x01,
+ MPI_PORTFACTS_PORTTYPE_FC = 0x10,
+ MPI_PORTFACTS_PORTTYPE_ISCSI = 0x20,
+ MPI_PORTFACTS_PORTTYPE_SAS = 0x30,
+
+ /* ProtocolFlags values */
+ MPI_PORTFACTS_PROTOCOL_LOGBUSADDR = 0x01,
+ MPI_PORTFACTS_PROTOCOL_LAN = 0x02,
+ MPI_PORTFACTS_PROTOCOL_TARGET = 0x04,
+ MPI_PORTFACTS_PROTOCOL_INITIATOR = 0x08,
+};
+
+
+/****************************************************************************/
+/* Port Enable Message */
+/****************************************************************************/
+
+typedef struct MPIMsgPortEnable {
+ uint8_t Reserved[2]; /* 00h */
+ uint8_t ChainOffset; /* 02h */
+ uint8_t Function; /* 03h */
+ uint8_t Reserved1[2]; /* 04h */
+ uint8_t PortNumber; /* 06h */
+ uint8_t MsgFlags; /* 07h */
+ uint32_t MsgContext; /* 08h */
+} QEMU_PACKED MPIMsgPortEnable;
+
+typedef struct MPIMsgPortEnableReply {
+ uint8_t Reserved[2]; /* 00h */
+ uint8_t MsgLength; /* 02h */
+ uint8_t Function; /* 03h */
+ uint8_t Reserved1[2]; /* 04h */
+ uint8_t PortNumber; /* 05h */
+ uint8_t MsgFlags; /* 07h */
+ uint32_t MsgContext; /* 08h */
+ uint16_t Reserved2; /* 0Ch */
+ uint16_t IOCStatus; /* 0Eh */
+ uint32_t IOCLogInfo; /* 10h */
+} QEMU_PACKED MPIMsgPortEnableReply;
+
+/****************************************************************************/
+/* Event Notification messages */
+/****************************************************************************/
+
+typedef struct MPIMsgEventNotify {
+ uint8_t Switch; /* 00h */
+ uint8_t Reserved; /* 01h */
+ uint8_t ChainOffset; /* 02h */
+ uint8_t Function; /* 03h */
+ uint8_t Reserved1[3]; /* 04h */
+ uint8_t MsgFlags; /* 07h */
+ uint32_t MsgContext; /* 08h */
+} QEMU_PACKED MPIMsgEventNotify;
+
+/* Event Notification Reply */
+
+typedef struct MPIMsgEventNotifyReply {
+ uint16_t EventDataLength; /* 00h */
+ uint8_t MsgLength; /* 02h */
+ uint8_t Function; /* 03h */
+ uint8_t Reserved1[2]; /* 04h */
+ uint8_t AckRequired; /* 06h */
+ uint8_t MsgFlags; /* 07h */
+ uint32_t MsgContext; /* 08h */
+ uint8_t Reserved2[2]; /* 0Ch */
+ uint16_t IOCStatus; /* 0Eh */
+ uint32_t IOCLogInfo; /* 10h */
+ uint32_t Event; /* 14h */
+ uint32_t EventContext; /* 18h */
+ uint32_t Data[1]; /* 1Ch */
+} QEMU_PACKED MPIMsgEventNotifyReply;
+
+/* Event Acknowledge */
+
+typedef struct MPIMsgEventAck {
+ uint8_t Reserved[2]; /* 00h */
+ uint8_t ChainOffset; /* 02h */
+ uint8_t Function; /* 03h */
+ uint8_t Reserved1[3]; /* 04h */
+ uint8_t MsgFlags; /* 07h */
+ uint32_t MsgContext; /* 08h */
+ uint32_t Event; /* 0Ch */
+ uint32_t EventContext; /* 10h */
+} QEMU_PACKED MPIMsgEventAck;
+
+typedef struct MPIMsgEventAckReply {
+ uint8_t Reserved[2]; /* 00h */
+ uint8_t MsgLength; /* 02h */
+ uint8_t Function; /* 03h */
+ uint8_t Reserved1[3]; /* 04h */
+ uint8_t MsgFlags; /* 07h */
+ uint32_t MsgContext; /* 08h */
+ uint16_t Reserved2; /* 0Ch */
+ uint16_t IOCStatus; /* 0Eh */
+ uint32_t IOCLogInfo; /* 10h */
+} QEMU_PACKED MPIMsgEventAckReply;
+
+enum {
+ /* Switch */
+
+ MPI_EVENT_NOTIFICATION_SWITCH_OFF = 0x00,
+ MPI_EVENT_NOTIFICATION_SWITCH_ON = 0x01,
+
+ /* Event */
+
+ MPI_EVENT_NONE = 0x00000000,
+ MPI_EVENT_LOG_DATA = 0x00000001,
+ MPI_EVENT_STATE_CHANGE = 0x00000002,
+ MPI_EVENT_UNIT_ATTENTION = 0x00000003,
+ MPI_EVENT_IOC_BUS_RESET = 0x00000004,
+ MPI_EVENT_EXT_BUS_RESET = 0x00000005,
+ MPI_EVENT_RESCAN = 0x00000006,
+ MPI_EVENT_LINK_STATUS_CHANGE = 0x00000007,
+ MPI_EVENT_LOOP_STATE_CHANGE = 0x00000008,
+ MPI_EVENT_LOGOUT = 0x00000009,
+ MPI_EVENT_EVENT_CHANGE = 0x0000000A,
+ MPI_EVENT_INTEGRATED_RAID = 0x0000000B,
+ MPI_EVENT_SCSI_DEVICE_STATUS_CHANGE = 0x0000000C,
+ MPI_EVENT_ON_BUS_TIMER_EXPIRED = 0x0000000D,
+ MPI_EVENT_QUEUE_FULL = 0x0000000E,
+ MPI_EVENT_SAS_DEVICE_STATUS_CHANGE = 0x0000000F,
+ MPI_EVENT_SAS_SES = 0x00000010,
+ MPI_EVENT_PERSISTENT_TABLE_FULL = 0x00000011,
+ MPI_EVENT_SAS_PHY_LINK_STATUS = 0x00000012,
+ MPI_EVENT_SAS_DISCOVERY_ERROR = 0x00000013,
+ MPI_EVENT_IR_RESYNC_UPDATE = 0x00000014,
+ MPI_EVENT_IR2 = 0x00000015,
+ MPI_EVENT_SAS_DISCOVERY = 0x00000016,
+ MPI_EVENT_SAS_BROADCAST_PRIMITIVE = 0x00000017,
+ MPI_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE = 0x00000018,
+ MPI_EVENT_SAS_INIT_TABLE_OVERFLOW = 0x00000019,
+ MPI_EVENT_SAS_SMP_ERROR = 0x0000001A,
+ MPI_EVENT_SAS_EXPANDER_STATUS_CHANGE = 0x0000001B,
+ MPI_EVENT_LOG_ENTRY_ADDED = 0x00000021,
+
+ /* AckRequired field values */
+
+ MPI_EVENT_NOTIFICATION_ACK_NOT_REQUIRED = 0x00,
+ MPI_EVENT_NOTIFICATION_ACK_REQUIRED = 0x01,
+};
+
+/****************************************************************************
+* Config Request Message
+****************************************************************************/
+
+typedef struct MPIMsgConfig {
+ uint8_t Action; /* 00h */
+ uint8_t Reserved; /* 01h */
+ uint8_t ChainOffset; /* 02h */
+ uint8_t Function; /* 03h */
+ uint16_t ExtPageLength; /* 04h */
+ uint8_t ExtPageType; /* 06h */
+ uint8_t MsgFlags; /* 07h */
+ uint32_t MsgContext; /* 08h */
+ uint8_t Reserved2[8]; /* 0Ch */
+ uint8_t PageVersion; /* 14h */
+ uint8_t PageLength; /* 15h */
+ uint8_t PageNumber; /* 16h */
+ uint8_t PageType; /* 17h */
+ uint32_t PageAddress; /* 18h */
+ MPISGEntry PageBufferSGE; /* 1Ch */
+} QEMU_PACKED MPIMsgConfig;
+
+/* Action field values */
+
+enum {
+ MPI_CONFIG_ACTION_PAGE_HEADER = 0x00,
+ MPI_CONFIG_ACTION_PAGE_READ_CURRENT = 0x01,
+ MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT = 0x02,
+ MPI_CONFIG_ACTION_PAGE_DEFAULT = 0x03,
+ MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM = 0x04,
+ MPI_CONFIG_ACTION_PAGE_READ_DEFAULT = 0x05,
+ MPI_CONFIG_ACTION_PAGE_READ_NVRAM = 0x06,
+};
+
+
+/* Config Reply Message */
+typedef struct MPIMsgConfigReply {
+ uint8_t Action; /* 00h */
+ uint8_t Reserved; /* 01h */
+ uint8_t MsgLength; /* 02h */
+ uint8_t Function; /* 03h */
+ uint16_t ExtPageLength; /* 04h */
+ uint8_t ExtPageType; /* 06h */
+ uint8_t MsgFlags; /* 07h */
+ uint32_t MsgContext; /* 08h */
+ uint8_t Reserved2[2]; /* 0Ch */
+ uint16_t IOCStatus; /* 0Eh */
+ uint32_t IOCLogInfo; /* 10h */
+ uint8_t PageVersion; /* 14h */
+ uint8_t PageLength; /* 15h */
+ uint8_t PageNumber; /* 16h */
+ uint8_t PageType; /* 17h */
+} QEMU_PACKED MPIMsgConfigReply;
+
+enum {
+ /* PageAddress field values */
+ MPI_CONFIG_PAGEATTR_READ_ONLY = 0x00,
+ MPI_CONFIG_PAGEATTR_CHANGEABLE = 0x10,
+ MPI_CONFIG_PAGEATTR_PERSISTENT = 0x20,
+ MPI_CONFIG_PAGEATTR_RO_PERSISTENT = 0x30,
+ MPI_CONFIG_PAGEATTR_MASK = 0xF0,
+
+ MPI_CONFIG_PAGETYPE_IO_UNIT = 0x00,
+ MPI_CONFIG_PAGETYPE_IOC = 0x01,
+ MPI_CONFIG_PAGETYPE_BIOS = 0x02,
+ MPI_CONFIG_PAGETYPE_SCSI_PORT = 0x03,
+ MPI_CONFIG_PAGETYPE_SCSI_DEVICE = 0x04,
+ MPI_CONFIG_PAGETYPE_FC_PORT = 0x05,
+ MPI_CONFIG_PAGETYPE_FC_DEVICE = 0x06,
+ MPI_CONFIG_PAGETYPE_LAN = 0x07,
+ MPI_CONFIG_PAGETYPE_RAID_VOLUME = 0x08,
+ MPI_CONFIG_PAGETYPE_MANUFACTURING = 0x09,
+ MPI_CONFIG_PAGETYPE_RAID_PHYSDISK = 0x0A,
+ MPI_CONFIG_PAGETYPE_INBAND = 0x0B,
+ MPI_CONFIG_PAGETYPE_EXTENDED = 0x0F,
+ MPI_CONFIG_PAGETYPE_MASK = 0x0F,
+
+ MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT = 0x10,
+ MPI_CONFIG_EXTPAGETYPE_SAS_EXPANDER = 0x11,
+ MPI_CONFIG_EXTPAGETYPE_SAS_DEVICE = 0x12,
+ MPI_CONFIG_EXTPAGETYPE_SAS_PHY = 0x13,
+ MPI_CONFIG_EXTPAGETYPE_LOG = 0x14,
+ MPI_CONFIG_EXTPAGETYPE_ENCLOSURE = 0x15,
+
+ MPI_SCSI_PORT_PGAD_PORT_MASK = 0x000000FF,
+
+ MPI_SCSI_DEVICE_FORM_MASK = 0xF0000000,
+ MPI_SCSI_DEVICE_FORM_BUS_TID = 0x00000000,
+ MPI_SCSI_DEVICE_TARGET_ID_MASK = 0x000000FF,
+ MPI_SCSI_DEVICE_TARGET_ID_SHIFT = 0,
+ MPI_SCSI_DEVICE_BUS_MASK = 0x0000FF00,
+ MPI_SCSI_DEVICE_BUS_SHIFT = 8,
+ MPI_SCSI_DEVICE_FORM_TARGET_MODE = 0x10000000,
+ MPI_SCSI_DEVICE_TM_RESPOND_ID_MASK = 0x000000FF,
+ MPI_SCSI_DEVICE_TM_RESPOND_ID_SHIFT = 0,
+ MPI_SCSI_DEVICE_TM_BUS_MASK = 0x0000FF00,
+ MPI_SCSI_DEVICE_TM_BUS_SHIFT = 8,
+ MPI_SCSI_DEVICE_TM_INIT_ID_MASK = 0x00FF0000,
+ MPI_SCSI_DEVICE_TM_INIT_ID_SHIFT = 16,
+
+ MPI_FC_PORT_PGAD_PORT_MASK = 0xF0000000,
+ MPI_FC_PORT_PGAD_PORT_SHIFT = 28,
+ MPI_FC_PORT_PGAD_FORM_MASK = 0x0F000000,
+ MPI_FC_PORT_PGAD_FORM_INDEX = 0x01000000,
+ MPI_FC_PORT_PGAD_INDEX_MASK = 0x0000FFFF,
+ MPI_FC_PORT_PGAD_INDEX_SHIFT = 0,
+
+ MPI_FC_DEVICE_PGAD_PORT_MASK = 0xF0000000,
+ MPI_FC_DEVICE_PGAD_PORT_SHIFT = 28,
+ MPI_FC_DEVICE_PGAD_FORM_MASK = 0x0F000000,
+ MPI_FC_DEVICE_PGAD_FORM_NEXT_DID = 0x00000000,
+ MPI_FC_DEVICE_PGAD_ND_PORT_MASK = 0xF0000000,
+ MPI_FC_DEVICE_PGAD_ND_PORT_SHIFT = 28,
+ MPI_FC_DEVICE_PGAD_ND_DID_MASK = 0x00FFFFFF,
+ MPI_FC_DEVICE_PGAD_ND_DID_SHIFT = 0,
+ MPI_FC_DEVICE_PGAD_FORM_BUS_TID = 0x01000000,
+ MPI_FC_DEVICE_PGAD_BT_BUS_MASK = 0x0000FF00,
+ MPI_FC_DEVICE_PGAD_BT_BUS_SHIFT = 8,
+ MPI_FC_DEVICE_PGAD_BT_TID_MASK = 0x000000FF,
+ MPI_FC_DEVICE_PGAD_BT_TID_SHIFT = 0,
+
+ MPI_PHYSDISK_PGAD_PHYSDISKNUM_MASK = 0x000000FF,
+ MPI_PHYSDISK_PGAD_PHYSDISKNUM_SHIFT = 0,
+
+ MPI_SAS_EXPAND_PGAD_FORM_MASK = 0xF0000000,
+ MPI_SAS_EXPAND_PGAD_FORM_SHIFT = 28,
+ MPI_SAS_EXPAND_PGAD_FORM_GET_NEXT_HANDLE = 0x00000000,
+ MPI_SAS_EXPAND_PGAD_FORM_HANDLE_PHY_NUM = 0x00000001,
+ MPI_SAS_EXPAND_PGAD_FORM_HANDLE = 0x00000002,
+ MPI_SAS_EXPAND_PGAD_GNH_MASK_HANDLE = 0x0000FFFF,
+ MPI_SAS_EXPAND_PGAD_GNH_SHIFT_HANDLE = 0,
+ MPI_SAS_EXPAND_PGAD_HPN_MASK_PHY = 0x00FF0000,
+ MPI_SAS_EXPAND_PGAD_HPN_SHIFT_PHY = 16,
+ MPI_SAS_EXPAND_PGAD_HPN_MASK_HANDLE = 0x0000FFFF,
+ MPI_SAS_EXPAND_PGAD_HPN_SHIFT_HANDLE = 0,
+ MPI_SAS_EXPAND_PGAD_H_MASK_HANDLE = 0x0000FFFF,
+ MPI_SAS_EXPAND_PGAD_H_SHIFT_HANDLE = 0,
+
+ MPI_SAS_DEVICE_PGAD_FORM_MASK = 0xF0000000,
+ MPI_SAS_DEVICE_PGAD_FORM_SHIFT = 28,
+ MPI_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE = 0x00000000,
+ MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID = 0x00000001,
+ MPI_SAS_DEVICE_PGAD_FORM_HANDLE = 0x00000002,
+ MPI_SAS_DEVICE_PGAD_GNH_HANDLE_MASK = 0x0000FFFF,
+ MPI_SAS_DEVICE_PGAD_GNH_HANDLE_SHIFT = 0,
+ MPI_SAS_DEVICE_PGAD_BT_BUS_MASK = 0x0000FF00,
+ MPI_SAS_DEVICE_PGAD_BT_BUS_SHIFT = 8,
+ MPI_SAS_DEVICE_PGAD_BT_TID_MASK = 0x000000FF,
+ MPI_SAS_DEVICE_PGAD_BT_TID_SHIFT = 0,
+ MPI_SAS_DEVICE_PGAD_H_HANDLE_MASK = 0x0000FFFF,
+ MPI_SAS_DEVICE_PGAD_H_HANDLE_SHIFT = 0,
+
+ MPI_SAS_PHY_PGAD_FORM_MASK = 0xF0000000,
+ MPI_SAS_PHY_PGAD_FORM_SHIFT = 28,
+ MPI_SAS_PHY_PGAD_FORM_PHY_NUMBER = 0x0,
+ MPI_SAS_PHY_PGAD_FORM_PHY_TBL_INDEX = 0x1,
+ MPI_SAS_PHY_PGAD_PHY_NUMBER_MASK = 0x000000FF,
+ MPI_SAS_PHY_PGAD_PHY_NUMBER_SHIFT = 0,
+ MPI_SAS_PHY_PGAD_PHY_TBL_INDEX_MASK = 0x0000FFFF,
+ MPI_SAS_PHY_PGAD_PHY_TBL_INDEX_SHIFT = 0,
+
+ MPI_SAS_ENCLOS_PGAD_FORM_MASK = 0xF0000000,
+ MPI_SAS_ENCLOS_PGAD_FORM_SHIFT = 28,
+ MPI_SAS_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE = 0x00000000,
+ MPI_SAS_ENCLOS_PGAD_FORM_HANDLE = 0x00000001,
+ MPI_SAS_ENCLOS_PGAD_GNH_HANDLE_MASK = 0x0000FFFF,
+ MPI_SAS_ENCLOS_PGAD_GNH_HANDLE_SHIFT = 0,
+ MPI_SAS_ENCLOS_PGAD_H_HANDLE_MASK = 0x0000FFFF,
+ MPI_SAS_ENCLOS_PGAD_H_HANDLE_SHIFT = 0,
+};
+
+/* Too many structs and definitions... see mptconfig.c for the few
+ * that are used.
+ */
+
+/****************************************************************************/
+/* Firmware Upload message and associated structures */
+/****************************************************************************/
+
+enum {
+ /* defines for using the ProductId field */
+ MPI_FW_HEADER_PID_TYPE_MASK = 0xF000,
+ MPI_FW_HEADER_PID_TYPE_SCSI = 0x0000,
+ MPI_FW_HEADER_PID_TYPE_FC = 0x1000,
+ MPI_FW_HEADER_PID_TYPE_SAS = 0x2000,
+
+ MPI_FW_HEADER_PID_PROD_MASK = 0x0F00,
+ MPI_FW_HEADER_PID_PROD_INITIATOR_SCSI = 0x0100,
+ MPI_FW_HEADER_PID_PROD_TARGET_INITIATOR_SCSI = 0x0200,
+ MPI_FW_HEADER_PID_PROD_TARGET_SCSI = 0x0300,
+ MPI_FW_HEADER_PID_PROD_IM_SCSI = 0x0400,
+ MPI_FW_HEADER_PID_PROD_IS_SCSI = 0x0500,
+ MPI_FW_HEADER_PID_PROD_CTX_SCSI = 0x0600,
+ MPI_FW_HEADER_PID_PROD_IR_SCSI = 0x0700,
+
+ MPI_FW_HEADER_PID_FAMILY_MASK = 0x00FF,
+
+ /* SCSI */
+ MPI_FW_HEADER_PID_FAMILY_1030A0_SCSI = 0x0001,
+ MPI_FW_HEADER_PID_FAMILY_1030B0_SCSI = 0x0002,
+ MPI_FW_HEADER_PID_FAMILY_1030B1_SCSI = 0x0003,
+ MPI_FW_HEADER_PID_FAMILY_1030C0_SCSI = 0x0004,
+ MPI_FW_HEADER_PID_FAMILY_1020A0_SCSI = 0x0005,
+ MPI_FW_HEADER_PID_FAMILY_1020B0_SCSI = 0x0006,
+ MPI_FW_HEADER_PID_FAMILY_1020B1_SCSI = 0x0007,
+ MPI_FW_HEADER_PID_FAMILY_1020C0_SCSI = 0x0008,
+ MPI_FW_HEADER_PID_FAMILY_1035A0_SCSI = 0x0009,
+ MPI_FW_HEADER_PID_FAMILY_1035B0_SCSI = 0x000A,
+ MPI_FW_HEADER_PID_FAMILY_1030TA0_SCSI = 0x000B,
+ MPI_FW_HEADER_PID_FAMILY_1020TA0_SCSI = 0x000C,
+
+ /* Fibre Channel */
+ MPI_FW_HEADER_PID_FAMILY_909_FC = 0x0000,
+ MPI_FW_HEADER_PID_FAMILY_919_FC = 0x0001, /* 919 and 929 */
+ MPI_FW_HEADER_PID_FAMILY_919X_FC = 0x0002, /* 919X and 929X */
+ MPI_FW_HEADER_PID_FAMILY_919XL_FC = 0x0003, /* 919XL and 929XL */
+ MPI_FW_HEADER_PID_FAMILY_939X_FC = 0x0004, /* 939X and 949X */
+ MPI_FW_HEADER_PID_FAMILY_959_FC = 0x0005,
+ MPI_FW_HEADER_PID_FAMILY_949E_FC = 0x0006,
+
+ /* SAS */
+ MPI_FW_HEADER_PID_FAMILY_1064_SAS = 0x0001,
+ MPI_FW_HEADER_PID_FAMILY_1068_SAS = 0x0002,
+ MPI_FW_HEADER_PID_FAMILY_1078_SAS = 0x0003,
+ MPI_FW_HEADER_PID_FAMILY_106xE_SAS = 0x0004, /* 1068E, 1066E, and 1064E */
+};
+
+#endif
diff --git a/hw/scsi/mptconfig.c b/hw/scsi/mptconfig.c
new file mode 100644
index 0000000000..d04982513a
--- /dev/null
+++ b/hw/scsi/mptconfig.c
@@ -0,0 +1,904 @@
+/*
+ * QEMU LSI SAS1068 Host Bus Adapter emulation - configuration pages
+ *
+ * Copyright (c) 2016 Red Hat, Inc.
+ *
+ * Author: Paolo Bonzini
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ */
+#include "qemu/osdep.h"
+#include "hw/hw.h"
+#include "hw/pci/pci.h"
+#include "hw/scsi/scsi.h"
+
+#include "mptsas.h"
+#include "mpi.h"
+#include "trace.h"
+
+/* Generic functions for marshaling and unmarshaling. */
+
+#define repl1(x) x
+#define repl2(x) x x
+#define repl3(x) x x x
+#define repl4(x) x x x x
+#define repl5(x) x x x x x
+#define repl6(x) x x x x x x
+#define repl7(x) x x x x x x x
+#define repl8(x) x x x x x x x x
+
+#define repl(n, x) glue(repl, n)(x)
+
+typedef union PackValue {
+ uint64_t ll;
+ char *str;
+} PackValue;
+
+static size_t vfill(uint8_t *data, size_t size, const char *fmt, va_list ap)
+{
+ size_t ofs;
+ PackValue val;
+ const char *p;
+
+ ofs = 0;
+ p = fmt;
+ while (*p) {
+ memset(&val, 0, sizeof(val));
+ switch (*p) {
+ case '*':
+ p++;
+ break;
+ case 'b':
+ case 'w':
+ case 'l':
+ val.ll = va_arg(ap, int);
+ break;
+ case 'q':
+ val.ll = va_arg(ap, int64_t);
+ break;
+ case 's':
+ val.str = va_arg(ap, void *);
+ break;
+ }
+ switch (*p++) {
+ case 'b':
+ if (data) {
+ stb_p(data + ofs, val.ll);
+ }
+ ofs++;
+ break;
+ case 'w':
+ if (data) {
+ stw_le_p(data + ofs, val.ll);
+ }
+ ofs += 2;
+ break;
+ case 'l':
+ if (data) {
+ stl_le_p(data + ofs, val.ll);
+ }
+ ofs += 4;
+ break;
+ case 'q':
+ if (data) {
+ stq_le_p(data + ofs, val.ll);
+ }
+ ofs += 8;
+ break;
+ case 's':
+ {
+ int cnt = atoi(p);
+ if (data) {
+ if (val.str) {
+ strncpy((void *)data + ofs, val.str, cnt);
+ } else {
+ memset((void *)data + ofs, 0, cnt);
+ }
+ }
+ ofs += cnt;
+ break;
+ }
+ }
+ }
+
+ return ofs;
+}
+
+static size_t vpack(uint8_t **p_data, const char *fmt, va_list ap1)
+{
+ size_t size = 0;
+ uint8_t *data = NULL;
+
+ if (p_data) {
+ va_list ap2;
+
+ va_copy(ap2, ap1);
+ size = vfill(NULL, 0, fmt, ap2);
+ *p_data = data = g_malloc(size);
+ }
+ return vfill(data, size, fmt, ap1);
+}
+
+static size_t fill(uint8_t *data, size_t size, const char *fmt, ...)
+{
+ va_list ap;
+ size_t ret;
+
+ va_start(ap, fmt);
+ ret = vfill(data, size, fmt, ap);
+ va_end(ap);
+
+ return ret;
+}
+
+/* Functions to build the page header and fill in the length, always used
+ * through the macros.
+ */
+
+#define MPTSAS_CONFIG_PACK(number, type, version, fmt, ...) \
+ mptsas_config_pack(data, "b*bbb" fmt, version, number, type, \
+ ## __VA_ARGS__)
+
+static size_t mptsas_config_pack(uint8_t **data, const char *fmt, ...)
+{
+ va_list ap;
+ size_t ret;
+
+ va_start(ap, fmt);
+ ret = vpack(data, fmt, ap);
+ va_end(ap);
+
+ if (data) {
+ assert(ret < 256 && (ret % 4) == 0);
+ stb_p(*data + 1, ret / 4);
+ }
+ return ret;
+}
+
+#define MPTSAS_CONFIG_PACK_EXT(number, type, version, fmt, ...) \
+ mptsas_config_pack_ext(data, "b*bbb*wb*b" fmt, version, number, \
+ MPI_CONFIG_PAGETYPE_EXTENDED, type, ## __VA_ARGS__)
+
+static size_t mptsas_config_pack_ext(uint8_t **data, const char *fmt, ...)
+{
+ va_list ap;
+ size_t ret;
+
+ va_start(ap, fmt);
+ ret = vpack(data, fmt, ap);
+ va_end(ap);
+
+ if (data) {
+ assert(ret < 65536 && (ret % 4) == 0);
+ stw_le_p(*data + 4, ret / 4);
+ }
+ return ret;
+}
+
+/* Manufacturing pages */
+
+static
+size_t mptsas_config_manufacturing_0(MPTSASState *s, uint8_t **data, int address)
+{
+ return MPTSAS_CONFIG_PACK(0, MPI_CONFIG_PAGETYPE_MANUFACTURING, 0x00,
+ "s16s8s16s16s16",
+ "QEMU MPT Fusion",
+ "2.5",
+ "QEMU MPT Fusion",
+ "QEMU",
+ "0000111122223333");
+}
+
+static
+size_t mptsas_config_manufacturing_1(MPTSASState *s, uint8_t **data, int address)
+{
+ /* VPD - all zeros */
+ return MPTSAS_CONFIG_PACK(1, MPI_CONFIG_PAGETYPE_MANUFACTURING, 0x00,
+ "s256");
+}
+
+static
+size_t mptsas_config_manufacturing_2(MPTSASState *s, uint8_t **data, int address)
+{
+ PCIDeviceClass *pcic = PCI_DEVICE_GET_CLASS(s);
+ return MPTSAS_CONFIG_PACK(2, MPI_CONFIG_PAGETYPE_MANUFACTURING, 0x00,
+ "wb*b*l",
+ pcic->device_id, pcic->revision);
+}
+
+static
+size_t mptsas_config_manufacturing_3(MPTSASState *s, uint8_t **data, int address)
+{
+ PCIDeviceClass *pcic = PCI_DEVICE_GET_CLASS(s);
+ return MPTSAS_CONFIG_PACK(3, MPI_CONFIG_PAGETYPE_MANUFACTURING, 0x00,
+ "wb*b*l",
+ pcic->device_id, pcic->revision);
+}
+
+static
+size_t mptsas_config_manufacturing_4(MPTSASState *s, uint8_t **data, int address)
+{
+ /* All zeros */
+ return MPTSAS_CONFIG_PACK(4, MPI_CONFIG_PAGETYPE_MANUFACTURING, 0x05,
+ "*l*b*b*b*b*b*b*w*s56*l*l*l*l*l*l"
+ "*b*b*w*b*b*w*l*l");
+}
+
+static
+size_t mptsas_config_manufacturing_5(MPTSASState *s, uint8_t **data, int address)
+{
+ return MPTSAS_CONFIG_PACK(5, MPI_CONFIG_PAGETYPE_MANUFACTURING, 0x02,
+ "q*b*b*w*l*l", s->sas_addr);
+}
+
+static
+size_t mptsas_config_manufacturing_6(MPTSASState *s, uint8_t **data, int address)
+{
+ return MPTSAS_CONFIG_PACK(6, MPI_CONFIG_PAGETYPE_MANUFACTURING, 0x00,
+ "*l");
+}
+
+static
+size_t mptsas_config_manufacturing_7(MPTSASState *s, uint8_t **data, int address)
+{
+ return MPTSAS_CONFIG_PACK(7, MPI_CONFIG_PAGETYPE_MANUFACTURING, 0x00,
+ "*l*l*l*s16*b*b*w", MPTSAS_NUM_PORTS);
+}
+
+static
+size_t mptsas_config_manufacturing_8(MPTSASState *s, uint8_t **data, int address)
+{
+ return MPTSAS_CONFIG_PACK(8, MPI_CONFIG_PAGETYPE_MANUFACTURING, 0x00,
+ "*l");
+}
+
+static
+size_t mptsas_config_manufacturing_9(MPTSASState *s, uint8_t **data, int address)
+{
+ return MPTSAS_CONFIG_PACK(9, MPI_CONFIG_PAGETYPE_MANUFACTURING, 0x00,
+ "*l");
+}
+
+static
+size_t mptsas_config_manufacturing_10(MPTSASState *s, uint8_t **data, int address)
+{
+ return MPTSAS_CONFIG_PACK(10, MPI_CONFIG_PAGETYPE_MANUFACTURING, 0x00,
+ "*l");
+}
+
+/* I/O unit pages */
+
+static
+size_t mptsas_config_io_unit_0(MPTSASState *s, uint8_t **data, int address)
+{
+ PCIDevice *pci = PCI_DEVICE(s);
+ uint64_t unique_value = 0x53504D554D4551LL; /* "QEMUMPTx" */
+
+ unique_value |= (uint64_t)pci->devfn << 56;
+ return MPTSAS_CONFIG_PACK(0, MPI_CONFIG_PAGETYPE_IO_UNIT, 0x00,
+ "q", unique_value);
+}
+
+static
+size_t mptsas_config_io_unit_1(MPTSASState *s, uint8_t **data, int address)
+{
+ return MPTSAS_CONFIG_PACK(1, MPI_CONFIG_PAGETYPE_IO_UNIT, 0x02, "l",
+ 0x41 /* single function, RAID disabled */ );
+}
+
+static
+size_t mptsas_config_io_unit_2(MPTSASState *s, uint8_t **data, int address)
+{
+ PCIDevice *pci = PCI_DEVICE(s);
+ uint8_t devfn = pci->devfn;
+ return MPTSAS_CONFIG_PACK(2, MPI_CONFIG_PAGETYPE_IO_UNIT, 0x02,
+ "llbbw*b*b*w*b*b*w*b*b*w*l",
+ 0, 0x100, 0 /* pci bus? */, devfn, 0);
+}
+
+static
+size_t mptsas_config_io_unit_3(MPTSASState *s, uint8_t **data, int address)
+{
+ return MPTSAS_CONFIG_PACK(3, MPI_CONFIG_PAGETYPE_IO_UNIT, 0x01,
+ "*b*b*w*l");
+}
+
+static
+size_t mptsas_config_io_unit_4(MPTSASState *s, uint8_t **data, int address)
+{
+ return MPTSAS_CONFIG_PACK(4, MPI_CONFIG_PAGETYPE_IO_UNIT, 0x00, "*l*l*q");
+}
+
+/* I/O controller pages */
+
+static
+size_t mptsas_config_ioc_0(MPTSASState *s, uint8_t **data, int address)
+{
+ PCIDeviceClass *pcic = PCI_DEVICE_GET_CLASS(s);
+
+ return MPTSAS_CONFIG_PACK(0, MPI_CONFIG_PAGETYPE_IOC, 0x01,
+ "*l*lwwb*b*b*blww",
+ pcic->vendor_id, pcic->device_id, pcic->revision,
+ pcic->subsystem_vendor_id,
+ pcic->subsystem_id);
+}
+
+static
+size_t mptsas_config_ioc_1(MPTSASState *s, uint8_t **data, int address)
+{
+ return MPTSAS_CONFIG_PACK(1, MPI_CONFIG_PAGETYPE_IOC, 0x03,
+ "*l*l*b*b*b*b");
+}
+
+static
+size_t mptsas_config_ioc_2(MPTSASState *s, uint8_t **data, int address)
+{
+ return MPTSAS_CONFIG_PACK(2, MPI_CONFIG_PAGETYPE_IOC, 0x04,
+ "*l*b*b*b*b");
+}
+
+static
+size_t mptsas_config_ioc_3(MPTSASState *s, uint8_t **data, int address)
+{
+ return MPTSAS_CONFIG_PACK(3, MPI_CONFIG_PAGETYPE_IOC, 0x00,
+ "*b*b*w");
+}
+
+static
+size_t mptsas_config_ioc_4(MPTSASState *s, uint8_t **data, int address)
+{
+ return MPTSAS_CONFIG_PACK(4, MPI_CONFIG_PAGETYPE_IOC, 0x00,
+ "*b*b*w");
+}
+
+static
+size_t mptsas_config_ioc_5(MPTSASState *s, uint8_t **data, int address)
+{
+ return MPTSAS_CONFIG_PACK(5, MPI_CONFIG_PAGETYPE_IOC, 0x00,
+ "*l*b*b*w");
+}
+
+static
+size_t mptsas_config_ioc_6(MPTSASState *s, uint8_t **data, int address)
+{
+ return MPTSAS_CONFIG_PACK(6, MPI_CONFIG_PAGETYPE_IOC, 0x01,
+ "*l*b*b*b*b*b*b*b*b*b*b*w*l*l*l*l*b*b*w"
+ "*w*w*w*w*l*l*l");
+}
+
+/* SAS I/O unit pages (extended) */
+
+#define MPTSAS_CONFIG_SAS_IO_UNIT_0_SIZE 16
+
+#define MPI_SAS_IOUNIT0_RATE_FAILED_SPEED_NEGOTIATION 0x02
+#define MPI_SAS_IOUNIT0_RATE_1_5 0x08
+#define MPI_SAS_IOUNIT0_RATE_3_0 0x09
+
+#define MPI_SAS_DEVICE_INFO_NO_DEVICE 0x00000000
+#define MPI_SAS_DEVICE_INFO_END_DEVICE 0x00000001
+#define MPI_SAS_DEVICE_INFO_SSP_TARGET 0x00000400
+
+#define MPI_SAS_DEVICE0_ASTATUS_NO_ERRORS 0x00
+
+#define MPI_SAS_DEVICE0_FLAGS_DEVICE_PRESENT 0x0001
+#define MPI_SAS_DEVICE0_FLAGS_DEVICE_MAPPED 0x0002
+#define MPI_SAS_DEVICE0_FLAGS_MAPPING_PERSISTENT 0x0004
+
+
+
+static SCSIDevice *mptsas_phy_get_device(MPTSASState *s, int i,
+ int *phy_handle, int *dev_handle)
+{
+ SCSIDevice *d = scsi_device_find(&s->bus, 0, i, 0);
+
+ if (phy_handle) {
+ *phy_handle = i + 1;
+ }
+ if (dev_handle) {
+ *dev_handle = d ? i + 1 + MPTSAS_NUM_PORTS : 0;
+ }
+ return d;
+}
+
+static
+size_t mptsas_config_sas_io_unit_0(MPTSASState *s, uint8_t **data, int address)
+{
+ size_t size = MPTSAS_CONFIG_PACK_EXT(0, MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT, 0x04,
+ "*w*wb*b*w"
+ repl(MPTSAS_NUM_PORTS, "*s16"),
+ MPTSAS_NUM_PORTS);
+
+ if (data) {
+ size_t ofs = size - MPTSAS_NUM_PORTS * MPTSAS_CONFIG_SAS_IO_UNIT_0_SIZE;
+ int i;
+
+ for (i = 0; i < MPTSAS_NUM_PORTS; i++) {
+ int phy_handle, dev_handle;
+ SCSIDevice *dev = mptsas_phy_get_device(s, i, &phy_handle, &dev_handle);
+
+ fill(*data + ofs, MPTSAS_CONFIG_SAS_IO_UNIT_0_SIZE,
+ "bbbblwwl", i, 0, 0,
+ (dev
+ ? MPI_SAS_IOUNIT0_RATE_3_0
+ : MPI_SAS_IOUNIT0_RATE_FAILED_SPEED_NEGOTIATION),
+ (dev
+ ? MPI_SAS_DEVICE_INFO_END_DEVICE | MPI_SAS_DEVICE_INFO_SSP_TARGET
+ : MPI_SAS_DEVICE_INFO_NO_DEVICE),
+ dev_handle,
+ dev_handle,
+ 0);
+ ofs += MPTSAS_CONFIG_SAS_IO_UNIT_0_SIZE;
+ }
+ assert(ofs == size);
+ }
+ return size;
+}
+
+#define MPTSAS_CONFIG_SAS_IO_UNIT_1_SIZE 12
+
+static
+size_t mptsas_config_sas_io_unit_1(MPTSASState *s, uint8_t **data, int address)
+{
+ size_t size = MPTSAS_CONFIG_PACK_EXT(1, MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT, 0x07,
+ "*w*w*w*wb*b*b*b"
+ repl(MPTSAS_NUM_PORTS, "*s12"),
+ MPTSAS_NUM_PORTS);
+
+ if (data) {
+ size_t ofs = size - MPTSAS_NUM_PORTS * MPTSAS_CONFIG_SAS_IO_UNIT_1_SIZE;
+ int i;
+
+ for (i = 0; i < MPTSAS_NUM_PORTS; i++) {
+ SCSIDevice *dev = mptsas_phy_get_device(s, i, NULL, NULL);
+ fill(*data + ofs, MPTSAS_CONFIG_SAS_IO_UNIT_1_SIZE,
+ "bbbblww", i, 0, 0,
+ (MPI_SAS_IOUNIT0_RATE_3_0 << 4) | MPI_SAS_IOUNIT0_RATE_1_5,
+ (dev
+ ? MPI_SAS_DEVICE_INFO_END_DEVICE | MPI_SAS_DEVICE_INFO_SSP_TARGET
+ : MPI_SAS_DEVICE_INFO_NO_DEVICE),
+ 0, 0);
+ ofs += MPTSAS_CONFIG_SAS_IO_UNIT_1_SIZE;
+ }
+ assert(ofs == size);
+ }
+ return size;
+}
+
+static
+size_t mptsas_config_sas_io_unit_2(MPTSASState *s, uint8_t **data, int address)
+{
+ return MPTSAS_CONFIG_PACK_EXT(2, MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT, 0x06,
+ "*b*b*w*w*w*b*b*w");
+}
+
+static
+size_t mptsas_config_sas_io_unit_3(MPTSASState *s, uint8_t **data, int address)
+{
+ return MPTSAS_CONFIG_PACK_EXT(3, MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT, 0x06,
+ "*l*l*l*l*l*l*l*l*l");
+}
+
+/* SAS PHY pages (extended) */
+
+static int mptsas_phy_addr_get(MPTSASState *s, int address)
+{
+ int i;
+ if ((address >> MPI_SAS_PHY_PGAD_FORM_SHIFT) == 0) {
+ i = address & 255;
+ } else if ((address >> MPI_SAS_PHY_PGAD_FORM_SHIFT) == 1) {
+ i = address & 65535;
+ } else {
+ return -EINVAL;
+ }
+
+ if (i >= MPTSAS_NUM_PORTS) {
+ return -EINVAL;
+ }
+
+ return i;
+}
+
+static
+size_t mptsas_config_phy_0(MPTSASState *s, uint8_t **data, int address)
+{
+ int phy_handle = -1;
+ int dev_handle = -1;
+ int i = mptsas_phy_addr_get(s, address);
+ SCSIDevice *dev;
+
+ if (i < 0) {
+ trace_mptsas_config_sas_phy(s, address, i, phy_handle, dev_handle, 0);
+ return i;
+ }
+
+ dev = mptsas_phy_get_device(s, i, &phy_handle, &dev_handle);
+ trace_mptsas_config_sas_phy(s, address, i, phy_handle, dev_handle, 0);
+
+ return MPTSAS_CONFIG_PACK_EXT(0, MPI_CONFIG_EXTPAGETYPE_SAS_PHY, 0x01,
+ "w*wqwb*blbb*b*b*l",
+ dev_handle, s->sas_addr, dev_handle, i,
+ (dev
+ ? MPI_SAS_DEVICE_INFO_END_DEVICE /* | MPI_SAS_DEVICE_INFO_SSP_TARGET?? */
+ : MPI_SAS_DEVICE_INFO_NO_DEVICE),
+ (MPI_SAS_IOUNIT0_RATE_3_0 << 4) | MPI_SAS_IOUNIT0_RATE_1_5,
+ (MPI_SAS_IOUNIT0_RATE_3_0 << 4) | MPI_SAS_IOUNIT0_RATE_1_5);
+}
+
+static
+size_t mptsas_config_phy_1(MPTSASState *s, uint8_t **data, int address)
+{
+ int phy_handle = -1;
+ int dev_handle = -1;
+ int i = mptsas_phy_addr_get(s, address);
+
+ if (i < 0) {
+ trace_mptsas_config_sas_phy(s, address, i, phy_handle, dev_handle, 1);
+ return i;
+ }
+
+ (void) mptsas_phy_get_device(s, i, &phy_handle, &dev_handle);
+ trace_mptsas_config_sas_phy(s, address, i, phy_handle, dev_handle, 1);
+
+ return MPTSAS_CONFIG_PACK_EXT(1, MPI_CONFIG_EXTPAGETYPE_SAS_PHY, 0x01,
+ "*l*l*l*l*l");
+}
+
+/* SAS device pages (extended) */
+
+static int mptsas_device_addr_get(MPTSASState *s, int address)
+{
+ uint32_t handle, i;
+ uint32_t form = address >> MPI_SAS_PHY_PGAD_FORM_SHIFT;
+ if (form == MPI_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE) {
+ handle = address & MPI_SAS_DEVICE_PGAD_GNH_HANDLE_MASK;
+ do {
+ if (handle == 65535) {
+ handle = MPTSAS_NUM_PORTS + 1;
+ } else {
+ ++handle;
+ }
+ i = handle - 1 - MPTSAS_NUM_PORTS;
+ } while (i < MPTSAS_NUM_PORTS && !scsi_device_find(&s->bus, 0, i, 0));
+
+ } else if (form == MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID) {
+ if (address & MPI_SAS_DEVICE_PGAD_BT_BUS_MASK) {
+ return -EINVAL;
+ }
+ i = address & MPI_SAS_DEVICE_PGAD_BT_TID_MASK;
+
+ } else if (form == MPI_SAS_DEVICE_PGAD_FORM_HANDLE) {
+ handle = address & MPI_SAS_DEVICE_PGAD_H_HANDLE_MASK;
+ i = handle - 1 - MPTSAS_NUM_PORTS;
+
+ } else {
+ return -EINVAL;
+ }
+
+ if (i >= MPTSAS_NUM_PORTS) {
+ return -EINVAL;
+ }
+
+ return i;
+}
+
+static
+size_t mptsas_config_sas_device_0(MPTSASState *s, uint8_t **data, int address)
+{
+ int phy_handle = -1;
+ int dev_handle = -1;
+ int i = mptsas_device_addr_get(s, address);
+ SCSIDevice *dev = mptsas_phy_get_device(s, i, &phy_handle, &dev_handle);
+
+ trace_mptsas_config_sas_device(s, address, i, phy_handle, dev_handle, 0);
+ if (!dev) {
+ return -ENOENT;
+ }
+
+ return MPTSAS_CONFIG_PACK_EXT(0, MPI_CONFIG_EXTPAGETYPE_SAS_DEVICE, 0x05,
+ "*w*wqwbbwbblwb*b",
+ dev->wwn, phy_handle, i,
+ MPI_SAS_DEVICE0_ASTATUS_NO_ERRORS,
+ dev_handle, i, 0,
+ MPI_SAS_DEVICE_INFO_END_DEVICE | MPI_SAS_DEVICE_INFO_SSP_TARGET,
+ (MPI_SAS_DEVICE0_FLAGS_DEVICE_PRESENT |
+ MPI_SAS_DEVICE0_FLAGS_DEVICE_MAPPED |
+ MPI_SAS_DEVICE0_FLAGS_MAPPING_PERSISTENT), i);
+}
+
+static
+size_t mptsas_config_sas_device_1(MPTSASState *s, uint8_t **data, int address)
+{
+ int phy_handle = -1;
+ int dev_handle = -1;
+ int i = mptsas_device_addr_get(s, address);
+ SCSIDevice *dev = mptsas_phy_get_device(s, i, &phy_handle, &dev_handle);
+
+ trace_mptsas_config_sas_device(s, address, i, phy_handle, dev_handle, 1);
+ if (!dev) {
+ return -ENOENT;
+ }
+
+ return MPTSAS_CONFIG_PACK_EXT(1, MPI_CONFIG_EXTPAGETYPE_SAS_DEVICE, 0x00,
+ "*lq*lwbb*s20",
+ dev->wwn, dev_handle, i, 0);
+}
+
+static
+size_t mptsas_config_sas_device_2(MPTSASState *s, uint8_t **data, int address)
+{
+ int phy_handle = -1;
+ int dev_handle = -1;
+ int i = mptsas_device_addr_get(s, address);
+ SCSIDevice *dev = mptsas_phy_get_device(s, i, &phy_handle, &dev_handle);
+
+ trace_mptsas_config_sas_device(s, address, i, phy_handle, dev_handle, 2);
+ if (!dev) {
+ return -ENOENT;
+ }
+
+ return MPTSAS_CONFIG_PACK_EXT(2, MPI_CONFIG_EXTPAGETYPE_SAS_DEVICE, 0x01,
+ "ql", dev->wwn, 0);
+}
+
+typedef struct MPTSASConfigPage {
+ uint8_t number;
+ uint8_t type;
+ size_t (*mpt_config_build)(MPTSASState *s, uint8_t **data, int address);
+} MPTSASConfigPage;
+
+static const MPTSASConfigPage mptsas_config_pages[] = {
+ {
+ 0, MPI_CONFIG_PAGETYPE_MANUFACTURING,
+ mptsas_config_manufacturing_0,
+ }, {
+ 1, MPI_CONFIG_PAGETYPE_MANUFACTURING,
+ mptsas_config_manufacturing_1,
+ }, {
+ 2, MPI_CONFIG_PAGETYPE_MANUFACTURING,
+ mptsas_config_manufacturing_2,
+ }, {
+ 3, MPI_CONFIG_PAGETYPE_MANUFACTURING,
+ mptsas_config_manufacturing_3,
+ }, {
+ 4, MPI_CONFIG_PAGETYPE_MANUFACTURING,
+ mptsas_config_manufacturing_4,
+ }, {
+ 5, MPI_CONFIG_PAGETYPE_MANUFACTURING,
+ mptsas_config_manufacturing_5,
+ }, {
+ 6, MPI_CONFIG_PAGETYPE_MANUFACTURING,
+ mptsas_config_manufacturing_6,
+ }, {
+ 7, MPI_CONFIG_PAGETYPE_MANUFACTURING,
+ mptsas_config_manufacturing_7,
+ }, {
+ 8, MPI_CONFIG_PAGETYPE_MANUFACTURING,
+ mptsas_config_manufacturing_8,
+ }, {
+ 9, MPI_CONFIG_PAGETYPE_MANUFACTURING,
+ mptsas_config_manufacturing_9,
+ }, {
+ 10, MPI_CONFIG_PAGETYPE_MANUFACTURING,
+ mptsas_config_manufacturing_10,
+ }, {
+ 0, MPI_CONFIG_PAGETYPE_IO_UNIT,
+ mptsas_config_io_unit_0,
+ }, {
+ 1, MPI_CONFIG_PAGETYPE_IO_UNIT,
+ mptsas_config_io_unit_1,
+ }, {
+ 2, MPI_CONFIG_PAGETYPE_IO_UNIT,
+ mptsas_config_io_unit_2,
+ }, {
+ 3, MPI_CONFIG_PAGETYPE_IO_UNIT,
+ mptsas_config_io_unit_3,
+ }, {
+ 4, MPI_CONFIG_PAGETYPE_IO_UNIT,
+ mptsas_config_io_unit_4,
+ }, {
+ 0, MPI_CONFIG_PAGETYPE_IOC,
+ mptsas_config_ioc_0,
+ }, {
+ 1, MPI_CONFIG_PAGETYPE_IOC,
+ mptsas_config_ioc_1,
+ }, {
+ 2, MPI_CONFIG_PAGETYPE_IOC,
+ mptsas_config_ioc_2,
+ }, {
+ 3, MPI_CONFIG_PAGETYPE_IOC,
+ mptsas_config_ioc_3,
+ }, {
+ 4, MPI_CONFIG_PAGETYPE_IOC,
+ mptsas_config_ioc_4,
+ }, {
+ 5, MPI_CONFIG_PAGETYPE_IOC,
+ mptsas_config_ioc_5,
+ }, {
+ 6, MPI_CONFIG_PAGETYPE_IOC,
+ mptsas_config_ioc_6,
+ }, {
+ 0, MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT,
+ mptsas_config_sas_io_unit_0,
+ }, {
+ 1, MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT,
+ mptsas_config_sas_io_unit_1,
+ }, {
+ 2, MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT,
+ mptsas_config_sas_io_unit_2,
+ }, {
+ 3, MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT,
+ mptsas_config_sas_io_unit_3,
+ }, {
+ 0, MPI_CONFIG_EXTPAGETYPE_SAS_PHY,
+ mptsas_config_phy_0,
+ }, {
+ 1, MPI_CONFIG_EXTPAGETYPE_SAS_PHY,
+ mptsas_config_phy_1,
+ }, {
+ 0, MPI_CONFIG_EXTPAGETYPE_SAS_DEVICE,
+ mptsas_config_sas_device_0,
+ }, {
+ 1, MPI_CONFIG_EXTPAGETYPE_SAS_DEVICE,
+ mptsas_config_sas_device_1,
+ }, {
+ 2, MPI_CONFIG_EXTPAGETYPE_SAS_DEVICE,
+ mptsas_config_sas_device_2,
+ }
+};
+
+static const MPTSASConfigPage *mptsas_find_config_page(int type, int number)
+{
+ const MPTSASConfigPage *page;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mptsas_config_pages); i++) {
+ page = &mptsas_config_pages[i];
+ if (page->type == type && page->number == number) {
+ return page;
+ }
+ }
+
+ return NULL;
+}
+
+void mptsas_process_config(MPTSASState *s, MPIMsgConfig *req)
+{
+ PCIDevice *pci = PCI_DEVICE(s);
+
+ MPIMsgConfigReply reply;
+ const MPTSASConfigPage *page;
+ size_t length;
+ uint8_t type;
+ uint8_t *data = NULL;
+ uint32_t flags_and_length;
+ uint32_t dmalen;
+ uint64_t pa;
+
+ mptsas_fix_config_endianness(req);
+
+ QEMU_BUILD_BUG_ON(sizeof(s->doorbell_msg) < sizeof(*req));
+ QEMU_BUILD_BUG_ON(sizeof(s->doorbell_reply) < sizeof(reply));
+
+ /* Copy common bits from the request into the reply. */
+ memset(&reply, 0, sizeof(reply));
+ reply.Action = req->Action;
+ reply.Function = req->Function;
+ reply.MsgContext = req->MsgContext;
+ reply.MsgLength = sizeof(reply) / 4;
+ reply.PageType = req->PageType;
+ reply.PageNumber = req->PageNumber;
+ reply.PageLength = req->PageLength;
+ reply.PageVersion = req->PageVersion;
+
+ type = req->PageType & MPI_CONFIG_PAGETYPE_MASK;
+ if (type == MPI_CONFIG_PAGETYPE_EXTENDED) {
+ type = req->ExtPageType;
+ if (type <= MPI_CONFIG_PAGETYPE_MASK) {
+ reply.IOCStatus = MPI_IOCSTATUS_CONFIG_INVALID_TYPE;
+ goto out;
+ }
+
+ reply.ExtPageType = req->ExtPageType;
+ }
+
+ page = mptsas_find_config_page(type, req->PageNumber);
+
+ switch(req->Action) {
+ case MPI_CONFIG_ACTION_PAGE_DEFAULT:
+ case MPI_CONFIG_ACTION_PAGE_HEADER:
+ case MPI_CONFIG_ACTION_PAGE_READ_NVRAM:
+ case MPI_CONFIG_ACTION_PAGE_READ_CURRENT:
+ case MPI_CONFIG_ACTION_PAGE_READ_DEFAULT:
+ case MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT:
+ case MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM:
+ break;
+
+ default:
+ reply.IOCStatus = MPI_IOCSTATUS_CONFIG_INVALID_ACTION;
+ goto out;
+ }
+
+ if (!page) {
+ page = mptsas_find_config_page(type, 1);
+ if (page) {
+ reply.IOCStatus = MPI_IOCSTATUS_CONFIG_INVALID_PAGE;
+ } else {
+ reply.IOCStatus = MPI_IOCSTATUS_CONFIG_INVALID_TYPE;
+ }
+ goto out;
+ }
+
+ if (req->Action == MPI_CONFIG_ACTION_PAGE_DEFAULT ||
+ req->Action == MPI_CONFIG_ACTION_PAGE_HEADER) {
+ length = page->mpt_config_build(s, NULL, req->PageAddress);
+ if ((ssize_t)length < 0) {
+ reply.IOCStatus = MPI_IOCSTATUS_CONFIG_INVALID_PAGE;
+ goto out;
+ } else {
+ goto done;
+ }
+ }
+
+ if (req->Action == MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT ||
+ req->Action == MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM) {
+ length = page->mpt_config_build(s, NULL, req->PageAddress);
+ if ((ssize_t)length < 0) {
+ reply.IOCStatus = MPI_IOCSTATUS_CONFIG_INVALID_PAGE;
+ } else {
+ reply.IOCStatus = MPI_IOCSTATUS_CONFIG_CANT_COMMIT;
+ }
+ goto out;
+ }
+
+ flags_and_length = req->PageBufferSGE.FlagsLength;
+ dmalen = flags_and_length & MPI_SGE_LENGTH_MASK;
+ if (dmalen == 0) {
+ length = page->mpt_config_build(s, NULL, req->PageAddress);
+ if ((ssize_t)length < 0) {
+ reply.IOCStatus = MPI_IOCSTATUS_CONFIG_INVALID_PAGE;
+ goto out;
+ } else {
+ goto done;
+ }
+ }
+
+ if (flags_and_length & MPI_SGE_FLAGS_64_BIT_ADDRESSING) {
+ pa = req->PageBufferSGE.u.Address64;
+ } else {
+ pa = req->PageBufferSGE.u.Address32;
+ }
+
+ /* Only read actions left. */
+ length = page->mpt_config_build(s, &data, req->PageAddress);
+ if ((ssize_t)length < 0) {
+ reply.IOCStatus = MPI_IOCSTATUS_CONFIG_INVALID_PAGE;
+ goto out;
+ } else {
+ assert(data[2] == page->number);
+ pci_dma_write(pci, pa, data, MIN(length, dmalen));
+ goto done;
+ }
+
+ abort();
+
+done:
+ if (type > MPI_CONFIG_PAGETYPE_MASK) {
+ reply.ExtPageLength = length / 4;
+ reply.ExtPageType = req->ExtPageType;
+ } else {
+ reply.PageLength = length / 4;
+ }
+
+out:
+ mptsas_fix_config_reply_endianness(&reply);
+ mptsas_reply(s, (MPIDefaultReply *)&reply);
+ g_free(data);
+}
diff --git a/hw/scsi/mptendian.c b/hw/scsi/mptendian.c
new file mode 100644
index 0000000000..b7fe2a2a36
--- /dev/null
+++ b/hw/scsi/mptendian.c
@@ -0,0 +1,204 @@
+/*
+ * QEMU LSI SAS1068 Host Bus Adapter emulation
+ * Endianness conversion for MPI data structures
+ *
+ * Copyright (c) 2016 Red Hat, Inc.
+ *
+ * Authors: Paolo Bonzini <pbonzini@redhat.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "hw/hw.h"
+#include "hw/pci/pci.h"
+#include "sysemu/dma.h"
+#include "sysemu/block-backend.h"
+#include "hw/pci/msi.h"
+#include "qemu/iov.h"
+#include "hw/scsi/scsi.h"
+#include "block/scsi.h"
+#include "trace.h"
+
+#include "mptsas.h"
+#include "mpi.h"
+
+static void mptsas_fix_sgentry_endianness(MPISGEntry *sge)
+{
+ le32_to_cpus(&sge->FlagsLength);
+ if (sge->FlagsLength & MPI_SGE_FLAGS_64_BIT_ADDRESSING) {
+ le64_to_cpus(&sge->u.Address64);
+ } else {
+ le32_to_cpus(&sge->u.Address32);
+ }
+}
+
+static void mptsas_fix_sgentry_endianness_reply(MPISGEntry *sge)
+{
+ if (sge->FlagsLength & MPI_SGE_FLAGS_64_BIT_ADDRESSING) {
+ cpu_to_le64s(&sge->u.Address64);
+ } else {
+ cpu_to_le32s(&sge->u.Address32);
+ }
+ cpu_to_le32s(&sge->FlagsLength);
+}
+
+void mptsas_fix_scsi_io_endianness(MPIMsgSCSIIORequest *req)
+{
+ le32_to_cpus(&req->MsgContext);
+ le32_to_cpus(&req->Control);
+ le32_to_cpus(&req->DataLength);
+ le32_to_cpus(&req->SenseBufferLowAddr);
+}
+
+void mptsas_fix_scsi_io_reply_endianness(MPIMsgSCSIIOReply *reply)
+{
+ cpu_to_le32s(&reply->MsgContext);
+ cpu_to_le16s(&reply->IOCStatus);
+ cpu_to_le32s(&reply->IOCLogInfo);
+ cpu_to_le32s(&reply->TransferCount);
+ cpu_to_le32s(&reply->SenseCount);
+ cpu_to_le32s(&reply->ResponseInfo);
+ cpu_to_le16s(&reply->TaskTag);
+}
+
+void mptsas_fix_scsi_task_mgmt_endianness(MPIMsgSCSITaskMgmt *req)
+{
+ le32_to_cpus(&req->MsgContext);
+ le32_to_cpus(&req->TaskMsgContext);
+}
+
+void mptsas_fix_scsi_task_mgmt_reply_endianness(MPIMsgSCSITaskMgmtReply *reply)
+{
+ cpu_to_le32s(&reply->MsgContext);
+ cpu_to_le16s(&reply->IOCStatus);
+ cpu_to_le32s(&reply->IOCLogInfo);
+ cpu_to_le32s(&reply->TerminationCount);
+}
+
+void mptsas_fix_ioc_init_endianness(MPIMsgIOCInit *req)
+{
+ le32_to_cpus(&req->MsgContext);
+ le16_to_cpus(&req->ReplyFrameSize);
+ le32_to_cpus(&req->HostMfaHighAddr);
+ le32_to_cpus(&req->SenseBufferHighAddr);
+ le32_to_cpus(&req->ReplyFifoHostSignalingAddr);
+ mptsas_fix_sgentry_endianness(&req->HostPageBufferSGE);
+ le16_to_cpus(&req->MsgVersion);
+ le16_to_cpus(&req->HeaderVersion);
+}
+
+void mptsas_fix_ioc_init_reply_endianness(MPIMsgIOCInitReply *reply)
+{
+ cpu_to_le32s(&reply->MsgContext);
+ cpu_to_le16s(&reply->IOCStatus);
+ cpu_to_le32s(&reply->IOCLogInfo);
+}
+
+void mptsas_fix_ioc_facts_endianness(MPIMsgIOCFacts *req)
+{
+ le32_to_cpus(&req->MsgContext);
+}
+
+void mptsas_fix_ioc_facts_reply_endianness(MPIMsgIOCFactsReply *reply)
+{
+ cpu_to_le16s(&reply->MsgVersion);
+ cpu_to_le16s(&reply->HeaderVersion);
+ cpu_to_le32s(&reply->MsgContext);
+ cpu_to_le16s(&reply->IOCExceptions);
+ cpu_to_le16s(&reply->IOCStatus);
+ cpu_to_le32s(&reply->IOCLogInfo);
+ cpu_to_le16s(&reply->ReplyQueueDepth);
+ cpu_to_le16s(&reply->RequestFrameSize);
+ cpu_to_le16s(&reply->ProductID);
+ cpu_to_le32s(&reply->CurrentHostMfaHighAddr);
+ cpu_to_le16s(&reply->GlobalCredits);
+ cpu_to_le32s(&reply->CurrentSenseBufferHighAddr);
+ cpu_to_le16s(&reply->CurReplyFrameSize);
+ cpu_to_le32s(&reply->FWImageSize);
+ cpu_to_le32s(&reply->IOCCapabilities);
+ cpu_to_le16s(&reply->HighPriorityQueueDepth);
+ mptsas_fix_sgentry_endianness_reply(&reply->HostPageBufferSGE);
+ cpu_to_le32s(&reply->ReplyFifoHostSignalingAddr);
+}
+
+void mptsas_fix_config_endianness(MPIMsgConfig *req)
+{
+ le16_to_cpus(&req->ExtPageLength);
+ le32_to_cpus(&req->MsgContext);
+ le32_to_cpus(&req->PageAddress);
+ mptsas_fix_sgentry_endianness(&req->PageBufferSGE);
+}
+
+void mptsas_fix_config_reply_endianness(MPIMsgConfigReply *reply)
+{
+ cpu_to_le16s(&reply->ExtPageLength);
+ cpu_to_le32s(&reply->MsgContext);
+ cpu_to_le16s(&reply->IOCStatus);
+ cpu_to_le32s(&reply->IOCLogInfo);
+}
+
+void mptsas_fix_port_facts_endianness(MPIMsgPortFacts *req)
+{
+ le32_to_cpus(&req->MsgContext);
+}
+
+void mptsas_fix_port_facts_reply_endianness(MPIMsgPortFactsReply *reply)
+{
+ cpu_to_le32s(&reply->MsgContext);
+ cpu_to_le16s(&reply->IOCStatus);
+ cpu_to_le32s(&reply->IOCLogInfo);
+ cpu_to_le16s(&reply->MaxDevices);
+ cpu_to_le16s(&reply->PortSCSIID);
+ cpu_to_le16s(&reply->ProtocolFlags);
+ cpu_to_le16s(&reply->MaxPostedCmdBuffers);
+ cpu_to_le16s(&reply->MaxPersistentIDs);
+ cpu_to_le16s(&reply->MaxLanBuckets);
+}
+
+void mptsas_fix_port_enable_endianness(MPIMsgPortEnable *req)
+{
+ le32_to_cpus(&req->MsgContext);
+}
+
+void mptsas_fix_port_enable_reply_endianness(MPIMsgPortEnableReply *reply)
+{
+ cpu_to_le32s(&reply->MsgContext);
+ cpu_to_le16s(&reply->IOCStatus);
+ cpu_to_le32s(&reply->IOCLogInfo);
+}
+
+void mptsas_fix_event_notification_endianness(MPIMsgEventNotify *req)
+{
+ le32_to_cpus(&req->MsgContext);
+}
+
+void mptsas_fix_event_notification_reply_endianness(MPIMsgEventNotifyReply *reply)
+{
+ int length = reply->EventDataLength;
+ int i;
+
+ cpu_to_le16s(&reply->EventDataLength);
+ cpu_to_le32s(&reply->MsgContext);
+ cpu_to_le16s(&reply->IOCStatus);
+ cpu_to_le32s(&reply->IOCLogInfo);
+ cpu_to_le32s(&reply->Event);
+ cpu_to_le32s(&reply->EventContext);
+
+ /* Really depends on the event kind. This will do for now. */
+ for (i = 0; i < length; i++) {
+ cpu_to_le32s(&reply->Data[i]);
+ }
+}
+
diff --git a/hw/scsi/mptsas.c b/hw/scsi/mptsas.c
new file mode 100644
index 0000000000..333cc1fb97
--- /dev/null
+++ b/hw/scsi/mptsas.c
@@ -0,0 +1,1441 @@
+/*
+ * QEMU LSI SAS1068 Host Bus Adapter emulation
+ * Based on the QEMU Megaraid emulator
+ *
+ * Copyright (c) 2009-2012 Hannes Reinecke, SUSE Labs
+ * Copyright (c) 2012 Verizon, Inc.
+ * Copyright (c) 2016 Red Hat, Inc.
+ *
+ * Authors: Don Slutz, Paolo Bonzini
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "hw/hw.h"
+#include "hw/pci/pci.h"
+#include "sysemu/dma.h"
+#include "sysemu/block-backend.h"
+#include "hw/pci/msi.h"
+#include "qemu/iov.h"
+#include "hw/scsi/scsi.h"
+#include "block/scsi.h"
+#include "trace.h"
+
+#include "mptsas.h"
+#include "mpi.h"
+
+#define NAA_LOCALLY_ASSIGNED_ID 0x3ULL
+#define IEEE_COMPANY_LOCALLY_ASSIGNED 0x525400
+
+#define TYPE_MPTSAS1068 "mptsas1068"
+
+#define MPT_SAS(obj) \
+ OBJECT_CHECK(MPTSASState, (obj), TYPE_MPTSAS1068)
+
+#define MPTSAS1068_PRODUCT_ID \
+ (MPI_FW_HEADER_PID_FAMILY_1068_SAS | \
+ MPI_FW_HEADER_PID_PROD_INITIATOR_SCSI | \
+ MPI_FW_HEADER_PID_TYPE_SAS)
+
+struct MPTSASRequest {
+ MPIMsgSCSIIORequest scsi_io;
+ SCSIRequest *sreq;
+ QEMUSGList qsg;
+ MPTSASState *dev;
+
+ QTAILQ_ENTRY(MPTSASRequest) next;
+};
+
+static void mptsas_update_interrupt(MPTSASState *s)
+{
+ PCIDevice *pci = (PCIDevice *) s;
+ uint32_t state = s->intr_status & ~(s->intr_mask | MPI_HIS_IOP_DOORBELL_STATUS);
+
+ if (s->msi_in_use && msi_enabled(pci)) {
+ if (state) {
+ trace_mptsas_irq_msi(s);
+ msi_notify(pci, 0);
+ }
+ }
+
+ trace_mptsas_irq_intx(s, !!state);
+ pci_set_irq(pci, !!state);
+}
+
+static void mptsas_set_fault(MPTSASState *s, uint32_t code)
+{
+ if ((s->state & MPI_IOC_STATE_FAULT) == 0) {
+ s->state = MPI_IOC_STATE_FAULT | code;
+ }
+}
+
+#define MPTSAS_FIFO_INVALID(s, name) \
+ ((s)->name##_head > ARRAY_SIZE((s)->name) || \
+ (s)->name##_tail > ARRAY_SIZE((s)->name))
+
+#define MPTSAS_FIFO_EMPTY(s, name) \
+ ((s)->name##_head == (s)->name##_tail)
+
+#define MPTSAS_FIFO_FULL(s, name) \
+ ((s)->name##_head == ((s)->name##_tail + 1) % ARRAY_SIZE((s)->name))
+
+#define MPTSAS_FIFO_GET(s, name) ({ \
+ uint32_t _val = (s)->name[(s)->name##_head++]; \
+ (s)->name##_head %= ARRAY_SIZE((s)->name); \
+ _val; \
+})
+
+#define MPTSAS_FIFO_PUT(s, name, val) do { \
+ (s)->name[(s)->name##_tail++] = (val); \
+ (s)->name##_tail %= ARRAY_SIZE((s)->name); \
+} while(0)
+
+static void mptsas_post_reply(MPTSASState *s, MPIDefaultReply *reply)
+{
+ PCIDevice *pci = (PCIDevice *) s;
+ uint32_t addr_lo;
+
+ if (MPTSAS_FIFO_EMPTY(s, reply_free) || MPTSAS_FIFO_FULL(s, reply_post)) {
+ mptsas_set_fault(s, MPI_IOCSTATUS_INSUFFICIENT_RESOURCES);
+ return;
+ }
+
+ addr_lo = MPTSAS_FIFO_GET(s, reply_free);
+
+ pci_dma_write(pci, addr_lo | s->host_mfa_high_addr, reply,
+ MIN(s->reply_frame_size, 4 * reply->MsgLength));
+
+ MPTSAS_FIFO_PUT(s, reply_post, MPI_ADDRESS_REPLY_A_BIT | (addr_lo >> 1));
+
+ s->intr_status |= MPI_HIS_REPLY_MESSAGE_INTERRUPT;
+ if (s->doorbell_state == DOORBELL_WRITE) {
+ s->doorbell_state = DOORBELL_NONE;
+ s->intr_status |= MPI_HIS_DOORBELL_INTERRUPT;
+ }
+ mptsas_update_interrupt(s);
+}
+
+void mptsas_reply(MPTSASState *s, MPIDefaultReply *reply)
+{
+ if (s->doorbell_state == DOORBELL_WRITE) {
+ /* The reply is sent out in 16 bit chunks, while the size
+ * in the reply is in 32 bit units.
+ */
+ s->doorbell_state = DOORBELL_READ;
+ s->doorbell_reply_idx = 0;
+ s->doorbell_reply_size = reply->MsgLength * 2;
+ memcpy(s->doorbell_reply, reply, s->doorbell_reply_size * 2);
+ s->intr_status |= MPI_HIS_DOORBELL_INTERRUPT;
+ mptsas_update_interrupt(s);
+ } else {
+ mptsas_post_reply(s, reply);
+ }
+}
+
+static void mptsas_turbo_reply(MPTSASState *s, uint32_t msgctx)
+{
+ if (MPTSAS_FIFO_FULL(s, reply_post)) {
+ mptsas_set_fault(s, MPI_IOCSTATUS_INSUFFICIENT_RESOURCES);
+ return;
+ }
+
+ /* The reply is just the message context ID (bit 31 = clear). */
+ MPTSAS_FIFO_PUT(s, reply_post, msgctx);
+
+ s->intr_status |= MPI_HIS_REPLY_MESSAGE_INTERRUPT;
+ mptsas_update_interrupt(s);
+}
+
+#define MPTSAS_MAX_REQUEST_SIZE 52
+
+static const int mpi_request_sizes[] = {
+ [MPI_FUNCTION_SCSI_IO_REQUEST] = sizeof(MPIMsgSCSIIORequest),
+ [MPI_FUNCTION_SCSI_TASK_MGMT] = sizeof(MPIMsgSCSITaskMgmt),
+ [MPI_FUNCTION_IOC_INIT] = sizeof(MPIMsgIOCInit),
+ [MPI_FUNCTION_IOC_FACTS] = sizeof(MPIMsgIOCFacts),
+ [MPI_FUNCTION_CONFIG] = sizeof(MPIMsgConfig),
+ [MPI_FUNCTION_PORT_FACTS] = sizeof(MPIMsgPortFacts),
+ [MPI_FUNCTION_PORT_ENABLE] = sizeof(MPIMsgPortEnable),
+ [MPI_FUNCTION_EVENT_NOTIFICATION] = sizeof(MPIMsgEventNotify),
+};
+
+static dma_addr_t mptsas_ld_sg_base(MPTSASState *s, uint32_t flags_and_length,
+ dma_addr_t *sgaddr)
+{
+ PCIDevice *pci = (PCIDevice *) s;
+ dma_addr_t addr;
+
+ if (flags_and_length & MPI_SGE_FLAGS_64_BIT_ADDRESSING) {
+ addr = ldq_le_pci_dma(pci, *sgaddr + 4);
+ *sgaddr += 12;
+ } else {
+ addr = ldl_le_pci_dma(pci, *sgaddr + 4);
+ *sgaddr += 8;
+ }
+ return addr;
+}
+
+static int mptsas_build_sgl(MPTSASState *s, MPTSASRequest *req, hwaddr addr)
+{
+ PCIDevice *pci = (PCIDevice *) s;
+ hwaddr next_chain_addr;
+ uint32_t left;
+ hwaddr sgaddr;
+ uint32_t chain_offset;
+
+ chain_offset = req->scsi_io.ChainOffset;
+ next_chain_addr = addr + chain_offset * sizeof(uint32_t);
+ sgaddr = addr + sizeof(MPIMsgSCSIIORequest);
+ pci_dma_sglist_init(&req->qsg, pci, 4);
+ left = req->scsi_io.DataLength;
+
+ for(;;) {
+ dma_addr_t addr, len;
+ uint32_t flags_and_length;
+
+ flags_and_length = ldl_le_pci_dma(pci, sgaddr);
+ len = flags_and_length & MPI_SGE_LENGTH_MASK;
+ if ((flags_and_length & MPI_SGE_FLAGS_ELEMENT_TYPE_MASK)
+ != MPI_SGE_FLAGS_SIMPLE_ELEMENT ||
+ (!len &&
+ !(flags_and_length & MPI_SGE_FLAGS_END_OF_LIST) &&
+ !(flags_and_length & MPI_SGE_FLAGS_END_OF_BUFFER))) {
+ return MPI_IOCSTATUS_INVALID_SGL;
+ }
+
+ len = MIN(len, left);
+ if (!len) {
+ /* We reached the desired transfer length, ignore extra
+ * elements of the s/g list.
+ */
+ break;
+ }
+
+ addr = mptsas_ld_sg_base(s, flags_and_length, &sgaddr);
+ qemu_sglist_add(&req->qsg, addr, len);
+ left -= len;
+
+ if (flags_and_length & MPI_SGE_FLAGS_END_OF_LIST) {
+ break;
+ }
+
+ if (flags_and_length & MPI_SGE_FLAGS_LAST_ELEMENT) {
+ if (!chain_offset) {
+ break;
+ }
+
+ flags_and_length = ldl_le_pci_dma(pci, next_chain_addr);
+ if ((flags_and_length & MPI_SGE_FLAGS_ELEMENT_TYPE_MASK)
+ != MPI_SGE_FLAGS_CHAIN_ELEMENT) {
+ return MPI_IOCSTATUS_INVALID_SGL;
+ }
+
+ sgaddr = mptsas_ld_sg_base(s, flags_and_length, &next_chain_addr);
+ chain_offset =
+ (flags_and_length & MPI_SGE_CHAIN_OFFSET_MASK) >> MPI_SGE_CHAIN_OFFSET_SHIFT;
+ next_chain_addr = sgaddr + chain_offset * sizeof(uint32_t);
+ }
+ }
+ return 0;
+}
+
+static void mptsas_free_request(MPTSASRequest *req)
+{
+ MPTSASState *s = req->dev;
+
+ if (req->sreq != NULL) {
+ req->sreq->hba_private = NULL;
+ scsi_req_unref(req->sreq);
+ req->sreq = NULL;
+ QTAILQ_REMOVE(&s->pending, req, next);
+ }
+ qemu_sglist_destroy(&req->qsg);
+ g_free(req);
+}
+
+static int mptsas_scsi_device_find(MPTSASState *s, int bus, int target,
+ uint8_t *lun, SCSIDevice **sdev)
+{
+ if (bus != 0) {
+ return MPI_IOCSTATUS_SCSI_INVALID_BUS;
+ }
+
+ if (target >= s->max_devices) {
+ return MPI_IOCSTATUS_SCSI_INVALID_TARGETID;
+ }
+
+ *sdev = scsi_device_find(&s->bus, bus, target, lun[1]);
+ if (!*sdev) {
+ return MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE;
+ }
+
+ return 0;
+}
+
+static int mptsas_process_scsi_io_request(MPTSASState *s,
+ MPIMsgSCSIIORequest *scsi_io,
+ hwaddr addr)
+{
+ MPTSASRequest *req;
+ MPIMsgSCSIIOReply reply;
+ SCSIDevice *sdev;
+ int status;
+
+ mptsas_fix_scsi_io_endianness(scsi_io);
+
+ trace_mptsas_process_scsi_io_request(s, scsi_io->Bus, scsi_io->TargetID,
+ scsi_io->LUN[1], scsi_io->DataLength);
+
+ status = mptsas_scsi_device_find(s, scsi_io->Bus, scsi_io->TargetID,
+ scsi_io->LUN, &sdev);
+ if (status) {
+ goto bad;
+ }
+
+ req = g_new(MPTSASRequest, 1);
+ QTAILQ_INSERT_TAIL(&s->pending, req, next);
+ req->scsi_io = *scsi_io;
+ req->dev = s;
+
+ status = mptsas_build_sgl(s, req, addr);
+ if (status) {
+ goto free_bad;
+ }
+
+ if (req->qsg.size < scsi_io->DataLength) {
+ trace_mptsas_sgl_overflow(s, scsi_io->MsgContext, scsi_io->DataLength,
+ req->qsg.size);
+ status = MPI_IOCSTATUS_INVALID_SGL;
+ goto free_bad;
+ }
+
+ req->sreq = scsi_req_new(sdev, scsi_io->MsgContext,
+ scsi_io->LUN[1], scsi_io->CDB, req);
+
+ if (req->sreq->cmd.xfer > scsi_io->DataLength) {
+ goto overrun;
+ }
+ switch (scsi_io->Control & MPI_SCSIIO_CONTROL_DATADIRECTION_MASK) {
+ case MPI_SCSIIO_CONTROL_NODATATRANSFER:
+ if (req->sreq->cmd.mode != SCSI_XFER_NONE) {
+ goto overrun;
+ }
+ break;
+
+ case MPI_SCSIIO_CONTROL_WRITE:
+ if (req->sreq->cmd.mode != SCSI_XFER_TO_DEV) {
+ goto overrun;
+ }
+ break;
+
+ case MPI_SCSIIO_CONTROL_READ:
+ if (req->sreq->cmd.mode != SCSI_XFER_FROM_DEV) {
+ goto overrun;
+ }
+ break;
+ }
+
+ if (scsi_req_enqueue(req->sreq)) {
+ scsi_req_continue(req->sreq);
+ }
+ return 0;
+
+overrun:
+ trace_mptsas_scsi_overflow(s, scsi_io->MsgContext, req->sreq->cmd.xfer,
+ scsi_io->DataLength);
+ status = MPI_IOCSTATUS_SCSI_DATA_OVERRUN;
+free_bad:
+ mptsas_free_request(req);
+bad:
+ memset(&reply, 0, sizeof(reply));
+ reply.TargetID = scsi_io->TargetID;
+ reply.Bus = scsi_io->Bus;
+ reply.MsgLength = sizeof(reply) / 4;
+ reply.Function = scsi_io->Function;
+ reply.CDBLength = scsi_io->CDBLength;
+ reply.SenseBufferLength = scsi_io->SenseBufferLength;
+ reply.MsgContext = scsi_io->MsgContext;
+ reply.SCSIState = MPI_SCSI_STATE_NO_SCSI_STATUS;
+ reply.IOCStatus = status;
+
+ mptsas_fix_scsi_io_reply_endianness(&reply);
+ mptsas_reply(s, (MPIDefaultReply *)&reply);
+
+ return 0;
+}
+
+typedef struct {
+ Notifier notifier;
+ MPTSASState *s;
+ MPIMsgSCSITaskMgmtReply *reply;
+} MPTSASCancelNotifier;
+
+static void mptsas_cancel_notify(Notifier *notifier, void *data)
+{
+ MPTSASCancelNotifier *n = container_of(notifier,
+ MPTSASCancelNotifier,
+ notifier);
+
+ /* Abusing IOCLogInfo to store the expected number of requests... */
+ if (++n->reply->TerminationCount == n->reply->IOCLogInfo) {
+ n->reply->IOCLogInfo = 0;
+ mptsas_fix_scsi_task_mgmt_reply_endianness(n->reply);
+ mptsas_post_reply(n->s, (MPIDefaultReply *)n->reply);
+ g_free(n->reply);
+ }
+ g_free(n);
+}
+
+static void mptsas_process_scsi_task_mgmt(MPTSASState *s, MPIMsgSCSITaskMgmt *req)
+{
+ MPIMsgSCSITaskMgmtReply reply;
+ MPIMsgSCSITaskMgmtReply *reply_async;
+ int status, count;
+ SCSIDevice *sdev;
+ SCSIRequest *r, *next;
+ BusChild *kid;
+
+ mptsas_fix_scsi_task_mgmt_endianness(req);
+
+ QEMU_BUILD_BUG_ON(MPTSAS_MAX_REQUEST_SIZE < sizeof(*req));
+ QEMU_BUILD_BUG_ON(sizeof(s->doorbell_msg) < sizeof(*req));
+ QEMU_BUILD_BUG_ON(sizeof(s->doorbell_reply) < sizeof(reply));
+
+ memset(&reply, 0, sizeof(reply));
+ reply.TargetID = req->TargetID;
+ reply.Bus = req->Bus;
+ reply.MsgLength = sizeof(reply) / 4;
+ reply.Function = req->Function;
+ reply.TaskType = req->TaskType;
+ reply.MsgContext = req->MsgContext;
+
+ switch (req->TaskType) {
+ case MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
+ case MPI_SCSITASKMGMT_TASKTYPE_QUERY_TASK:
+ status = mptsas_scsi_device_find(s, req->Bus, req->TargetID,
+ req->LUN, &sdev);
+ if (status) {
+ reply.IOCStatus = status;
+ goto out;
+ }
+ if (sdev->lun != req->LUN[1]) {
+ reply.ResponseCode = MPI_SCSITASKMGMT_RSP_TM_INVALID_LUN;
+ goto out;
+ }
+
+ QTAILQ_FOREACH_SAFE(r, &sdev->requests, next, next) {
+ MPTSASRequest *cmd_req = r->hba_private;
+ if (cmd_req && cmd_req->scsi_io.MsgContext == req->TaskMsgContext) {
+ break;
+ }
+ }
+ if (r) {
+ /*
+ * Assert that the request has not been completed yet, we
+ * check for it in the loop above.
+ */
+ assert(r->hba_private);
+ if (req->TaskType == MPI_SCSITASKMGMT_TASKTYPE_QUERY_TASK) {
+ /* "If the specified command is present in the task set, then
+ * return a service response set to FUNCTION SUCCEEDED".
+ */
+ reply.ResponseCode = MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED;
+ } else {
+ MPTSASCancelNotifier *notifier;
+
+ reply_async = g_memdup(&reply, sizeof(MPIMsgSCSITaskMgmtReply));
+ reply_async->IOCLogInfo = INT_MAX;
+
+ count = 1;
+ notifier = g_new(MPTSASCancelNotifier, 1);
+ notifier->s = s;
+ notifier->reply = reply_async;
+ notifier->notifier.notify = mptsas_cancel_notify;
+ scsi_req_cancel_async(r, &notifier->notifier);
+ goto reply_maybe_async;
+ }
+ }
+ break;
+
+ case MPI_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
+ case MPI_SCSITASKMGMT_TASKTYPE_CLEAR_TASK_SET:
+ status = mptsas_scsi_device_find(s, req->Bus, req->TargetID,
+ req->LUN, &sdev);
+ if (status) {
+ reply.IOCStatus = status;
+ goto out;
+ }
+ if (sdev->lun != req->LUN[1]) {
+ reply.ResponseCode = MPI_SCSITASKMGMT_RSP_TM_INVALID_LUN;
+ goto out;
+ }
+
+ reply_async = g_memdup(&reply, sizeof(MPIMsgSCSITaskMgmtReply));
+ reply_async->IOCLogInfo = INT_MAX;
+
+ count = 0;
+ QTAILQ_FOREACH_SAFE(r, &sdev->requests, next, next) {
+ if (r->hba_private) {
+ MPTSASCancelNotifier *notifier;
+
+ count++;
+ notifier = g_new(MPTSASCancelNotifier, 1);
+ notifier->s = s;
+ notifier->reply = reply_async;
+ notifier->notifier.notify = mptsas_cancel_notify;
+ scsi_req_cancel_async(r, &notifier->notifier);
+ }
+ }
+
+reply_maybe_async:
+ if (reply_async->TerminationCount < count) {
+ reply_async->IOCLogInfo = count;
+ return;
+ }
+ reply.TerminationCount = count;
+ break;
+
+ case MPI_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
+ status = mptsas_scsi_device_find(s, req->Bus, req->TargetID,
+ req->LUN, &sdev);
+ if (status) {
+ reply.IOCStatus = status;
+ goto out;
+ }
+ if (sdev->lun != req->LUN[1]) {
+ reply.ResponseCode = MPI_SCSITASKMGMT_RSP_TM_INVALID_LUN;
+ goto out;
+ }
+ qdev_reset_all(&sdev->qdev);
+ break;
+
+ case MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
+ if (req->Bus != 0) {
+ reply.IOCStatus = MPI_IOCSTATUS_SCSI_INVALID_BUS;
+ goto out;
+ }
+ if (req->TargetID > s->max_devices) {
+ reply.IOCStatus = MPI_IOCSTATUS_SCSI_INVALID_TARGETID;
+ goto out;
+ }
+
+ QTAILQ_FOREACH(kid, &s->bus.qbus.children, sibling) {
+ sdev = SCSI_DEVICE(kid->child);
+ if (sdev->channel == 0 && sdev->id == req->TargetID) {
+ qdev_reset_all(kid->child);
+ }
+ }
+ break;
+
+ case MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS:
+ qbus_reset_all(&s->bus.qbus);
+ break;
+
+ default:
+ reply.ResponseCode = MPI_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED;
+ break;
+ }
+
+out:
+ mptsas_fix_scsi_task_mgmt_reply_endianness(&reply);
+ mptsas_post_reply(s, (MPIDefaultReply *)&reply);
+}
+
+static void mptsas_process_ioc_init(MPTSASState *s, MPIMsgIOCInit *req)
+{
+ MPIMsgIOCInitReply reply;
+
+ mptsas_fix_ioc_init_endianness(req);
+
+ QEMU_BUILD_BUG_ON(MPTSAS_MAX_REQUEST_SIZE < sizeof(*req));
+ QEMU_BUILD_BUG_ON(sizeof(s->doorbell_msg) < sizeof(*req));
+ QEMU_BUILD_BUG_ON(sizeof(s->doorbell_reply) < sizeof(reply));
+
+ s->who_init = req->WhoInit;
+ s->reply_frame_size = req->ReplyFrameSize;
+ s->max_buses = req->MaxBuses;
+ s->max_devices = req->MaxDevices ? req->MaxDevices : 256;
+ s->host_mfa_high_addr = (hwaddr)req->HostMfaHighAddr << 32;
+ s->sense_buffer_high_addr = (hwaddr)req->SenseBufferHighAddr << 32;
+
+ if (s->state == MPI_IOC_STATE_READY) {
+ s->state = MPI_IOC_STATE_OPERATIONAL;
+ }
+
+ memset(&reply, 0, sizeof(reply));
+ reply.WhoInit = s->who_init;
+ reply.MsgLength = sizeof(reply) / 4;
+ reply.Function = req->Function;
+ reply.MaxDevices = s->max_devices;
+ reply.MaxBuses = s->max_buses;
+ reply.MsgContext = req->MsgContext;
+
+ mptsas_fix_ioc_init_reply_endianness(&reply);
+ mptsas_reply(s, (MPIDefaultReply *)&reply);
+}
+
+static void mptsas_process_ioc_facts(MPTSASState *s,
+ MPIMsgIOCFacts *req)
+{
+ MPIMsgIOCFactsReply reply;
+
+ mptsas_fix_ioc_facts_endianness(req);
+
+ QEMU_BUILD_BUG_ON(MPTSAS_MAX_REQUEST_SIZE < sizeof(*req));
+ QEMU_BUILD_BUG_ON(sizeof(s->doorbell_msg) < sizeof(*req));
+ QEMU_BUILD_BUG_ON(sizeof(s->doorbell_reply) < sizeof(reply));
+
+ memset(&reply, 0, sizeof(reply));
+ reply.MsgVersion = 0x0105;
+ reply.MsgLength = sizeof(reply) / 4;
+ reply.Function = req->Function;
+ reply.MsgContext = req->MsgContext;
+ reply.MaxChainDepth = MPTSAS_MAXIMUM_CHAIN_DEPTH;
+ reply.WhoInit = s->who_init;
+ reply.BlockSize = MPTSAS_MAX_REQUEST_SIZE / sizeof(uint32_t);
+ reply.ReplyQueueDepth = ARRAY_SIZE(s->reply_post) - 1;
+ QEMU_BUILD_BUG_ON(ARRAY_SIZE(s->reply_post) != ARRAY_SIZE(s->reply_free));
+
+ reply.RequestFrameSize = 128;
+ reply.ProductID = MPTSAS1068_PRODUCT_ID;
+ reply.CurrentHostMfaHighAddr = s->host_mfa_high_addr >> 32;
+ reply.GlobalCredits = ARRAY_SIZE(s->request_post) - 1;
+ reply.NumberOfPorts = MPTSAS_NUM_PORTS;
+ reply.CurrentSenseBufferHighAddr = s->sense_buffer_high_addr >> 32;
+ reply.CurReplyFrameSize = s->reply_frame_size;
+ reply.MaxDevices = s->max_devices;
+ reply.MaxBuses = s->max_buses;
+ reply.FWVersionDev = 0;
+ reply.FWVersionUnit = 0x92;
+ reply.FWVersionMinor = 0x32;
+ reply.FWVersionMajor = 0x1;
+
+ mptsas_fix_ioc_facts_reply_endianness(&reply);
+ mptsas_reply(s, (MPIDefaultReply *)&reply);
+}
+
+static void mptsas_process_port_facts(MPTSASState *s,
+ MPIMsgPortFacts *req)
+{
+ MPIMsgPortFactsReply reply;
+
+ mptsas_fix_port_facts_endianness(req);
+
+ QEMU_BUILD_BUG_ON(MPTSAS_MAX_REQUEST_SIZE < sizeof(*req));
+ QEMU_BUILD_BUG_ON(sizeof(s->doorbell_msg) < sizeof(*req));
+ QEMU_BUILD_BUG_ON(sizeof(s->doorbell_reply) < sizeof(reply));
+
+ memset(&reply, 0, sizeof(reply));
+ reply.MsgLength = sizeof(reply) / 4;
+ reply.Function = req->Function;
+ reply.PortNumber = req->PortNumber;
+ reply.MsgContext = req->MsgContext;
+
+ if (req->PortNumber < MPTSAS_NUM_PORTS) {
+ reply.PortType = MPI_PORTFACTS_PORTTYPE_SAS;
+ reply.MaxDevices = MPTSAS_NUM_PORTS;
+ reply.PortSCSIID = MPTSAS_NUM_PORTS;
+ reply.ProtocolFlags = MPI_PORTFACTS_PROTOCOL_LOGBUSADDR | MPI_PORTFACTS_PROTOCOL_INITIATOR;
+ }
+
+ mptsas_fix_port_facts_reply_endianness(&reply);
+ mptsas_reply(s, (MPIDefaultReply *)&reply);
+}
+
+static void mptsas_process_port_enable(MPTSASState *s,
+ MPIMsgPortEnable *req)
+{
+ MPIMsgPortEnableReply reply;
+
+ mptsas_fix_port_enable_endianness(req);
+
+ QEMU_BUILD_BUG_ON(MPTSAS_MAX_REQUEST_SIZE < sizeof(*req));
+ QEMU_BUILD_BUG_ON(sizeof(s->doorbell_msg) < sizeof(*req));
+ QEMU_BUILD_BUG_ON(sizeof(s->doorbell_reply) < sizeof(reply));
+
+ memset(&reply, 0, sizeof(reply));
+ reply.MsgLength = sizeof(reply) / 4;
+ reply.PortNumber = req->PortNumber;
+ reply.Function = req->Function;
+ reply.MsgContext = req->MsgContext;
+
+ mptsas_fix_port_enable_reply_endianness(&reply);
+ mptsas_reply(s, (MPIDefaultReply *)&reply);
+}
+
+static void mptsas_process_event_notification(MPTSASState *s,
+ MPIMsgEventNotify *req)
+{
+ MPIMsgEventNotifyReply reply;
+
+ mptsas_fix_event_notification_endianness(req);
+
+ QEMU_BUILD_BUG_ON(MPTSAS_MAX_REQUEST_SIZE < sizeof(*req));
+ QEMU_BUILD_BUG_ON(sizeof(s->doorbell_msg) < sizeof(*req));
+ QEMU_BUILD_BUG_ON(sizeof(s->doorbell_reply) < sizeof(reply));
+
+ /* Don't even bother storing whether event notification is enabled,
+ * since it is not accessible.
+ */
+
+ memset(&reply, 0, sizeof(reply));
+ reply.EventDataLength = sizeof(reply.Data) / 4;
+ reply.MsgLength = sizeof(reply) / 4;
+ reply.Function = req->Function;
+
+ /* This is set because events are sent through the reply FIFOs. */
+ reply.MsgFlags = MPI_MSGFLAGS_CONTINUATION_REPLY;
+
+ reply.MsgContext = req->MsgContext;
+ reply.Event = MPI_EVENT_EVENT_CHANGE;
+ reply.Data[0] = !!req->Switch;
+
+ mptsas_fix_event_notification_reply_endianness(&reply);
+ mptsas_reply(s, (MPIDefaultReply *)&reply);
+}
+
+static void mptsas_process_message(MPTSASState *s, MPIRequestHeader *req)
+{
+ trace_mptsas_process_message(s, req->Function, req->MsgContext);
+ switch (req->Function) {
+ case MPI_FUNCTION_SCSI_TASK_MGMT:
+ mptsas_process_scsi_task_mgmt(s, (MPIMsgSCSITaskMgmt *)req);
+ break;
+
+ case MPI_FUNCTION_IOC_INIT:
+ mptsas_process_ioc_init(s, (MPIMsgIOCInit *)req);
+ break;
+
+ case MPI_FUNCTION_IOC_FACTS:
+ mptsas_process_ioc_facts(s, (MPIMsgIOCFacts *)req);
+ break;
+
+ case MPI_FUNCTION_PORT_FACTS:
+ mptsas_process_port_facts(s, (MPIMsgPortFacts *)req);
+ break;
+
+ case MPI_FUNCTION_PORT_ENABLE:
+ mptsas_process_port_enable(s, (MPIMsgPortEnable *)req);
+ break;
+
+ case MPI_FUNCTION_EVENT_NOTIFICATION:
+ mptsas_process_event_notification(s, (MPIMsgEventNotify *)req);
+ break;
+
+ case MPI_FUNCTION_CONFIG:
+ mptsas_process_config(s, (MPIMsgConfig *)req);
+ break;
+
+ default:
+ trace_mptsas_unhandled_cmd(s, req->Function, 0);
+ mptsas_set_fault(s, MPI_IOCSTATUS_INVALID_FUNCTION);
+ break;
+ }
+}
+
+static void mptsas_fetch_request(MPTSASState *s)
+{
+ PCIDevice *pci = (PCIDevice *) s;
+ char req[MPTSAS_MAX_REQUEST_SIZE];
+ MPIRequestHeader *hdr = (MPIRequestHeader *)req;
+ hwaddr addr;
+ int size;
+
+ if (s->state != MPI_IOC_STATE_OPERATIONAL) {
+ mptsas_set_fault(s, MPI_IOCSTATUS_INVALID_STATE);
+ return;
+ }
+
+ /* Read the message header from the guest first. */
+ addr = s->host_mfa_high_addr | MPTSAS_FIFO_GET(s, request_post);
+ pci_dma_read(pci, addr, req, sizeof(hdr));
+
+ if (hdr->Function < ARRAY_SIZE(mpi_request_sizes) &&
+ mpi_request_sizes[hdr->Function]) {
+ /* Read the rest of the request based on the type. Do not
+ * reread everything, as that could cause a TOC/TOU mismatch
+ * and leak data from the QEMU stack.
+ */
+ size = mpi_request_sizes[hdr->Function];
+ assert(size <= MPTSAS_MAX_REQUEST_SIZE);
+ pci_dma_read(pci, addr + sizeof(hdr), &req[sizeof(hdr)],
+ size - sizeof(hdr));
+ }
+
+ if (hdr->Function == MPI_FUNCTION_SCSI_IO_REQUEST) {
+ /* SCSI I/O requests are separate from mptsas_process_message
+ * because they cannot be sent through the doorbell yet.
+ */
+ mptsas_process_scsi_io_request(s, (MPIMsgSCSIIORequest *)req, addr);
+ } else {
+ mptsas_process_message(s, (MPIRequestHeader *)req);
+ }
+}
+
+static void mptsas_fetch_requests(void *opaque)
+{
+ MPTSASState *s = opaque;
+
+ while (!MPTSAS_FIFO_EMPTY(s, request_post)) {
+ mptsas_fetch_request(s);
+ }
+}
+
+static void mptsas_soft_reset(MPTSASState *s)
+{
+ uint32_t save_mask;
+
+ trace_mptsas_reset(s);
+
+ /* Temporarily disable interrupts */
+ save_mask = s->intr_mask;
+ s->intr_mask = MPI_HIM_DIM | MPI_HIM_RIM;
+ mptsas_update_interrupt(s);
+
+ qbus_reset_all(&s->bus.qbus);
+ s->intr_status = 0;
+ s->intr_mask = save_mask;
+
+ s->reply_free_tail = 0;
+ s->reply_free_head = 0;
+ s->reply_post_tail = 0;
+ s->reply_post_head = 0;
+ s->request_post_tail = 0;
+ s->request_post_head = 0;
+ qemu_bh_cancel(s->request_bh);
+
+ s->state = MPI_IOC_STATE_READY;
+}
+
+static uint32_t mptsas_doorbell_read(MPTSASState *s)
+{
+ uint32_t ret;
+
+ ret = (s->who_init << MPI_DOORBELL_WHO_INIT_SHIFT) & MPI_DOORBELL_WHO_INIT_SHIFT;
+ ret |= s->state;
+ switch (s->doorbell_state) {
+ case DOORBELL_NONE:
+ break;
+
+ case DOORBELL_WRITE:
+ ret |= MPI_DOORBELL_ACTIVE;
+ break;
+
+ case DOORBELL_READ:
+ /* Get rid of the IOC fault code. */
+ ret &= ~MPI_DOORBELL_DATA_MASK;
+
+ assert(s->intr_status & MPI_HIS_DOORBELL_INTERRUPT);
+ assert(s->doorbell_reply_idx <= s->doorbell_reply_size);
+
+ ret |= MPI_DOORBELL_ACTIVE;
+ if (s->doorbell_reply_idx < s->doorbell_reply_size) {
+ /* For more information about this endian switch, see the
+ * commit message for commit 36b62ae ("fw_cfg: fix endianness in
+ * fw_cfg_data_mem_read() / _write()", 2015-01-16).
+ */
+ ret |= le16_to_cpu(s->doorbell_reply[s->doorbell_reply_idx++]);
+ }
+ break;
+
+ default:
+ abort();
+ }
+
+ return ret;
+}
+
+static void mptsas_doorbell_write(MPTSASState *s, uint32_t val)
+{
+ if (s->doorbell_state == DOORBELL_WRITE) {
+ if (s->doorbell_idx < s->doorbell_cnt) {
+ /* For more information about this endian switch, see the
+ * commit message for commit 36b62ae ("fw_cfg: fix endianness in
+ * fw_cfg_data_mem_read() / _write()", 2015-01-16).
+ */
+ s->doorbell_msg[s->doorbell_idx++] = cpu_to_le32(val);
+ if (s->doorbell_idx == s->doorbell_cnt) {
+ mptsas_process_message(s, (MPIRequestHeader *)s->doorbell_msg);
+ }
+ }
+ return;
+ }
+
+ switch ((val & MPI_DOORBELL_FUNCTION_MASK) >> MPI_DOORBELL_FUNCTION_SHIFT) {
+ case MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET:
+ mptsas_soft_reset(s);
+ break;
+ case MPI_FUNCTION_IO_UNIT_RESET:
+ break;
+ case MPI_FUNCTION_HANDSHAKE:
+ s->doorbell_state = DOORBELL_WRITE;
+ s->doorbell_idx = 0;
+ s->doorbell_cnt = (val & MPI_DOORBELL_ADD_DWORDS_MASK)
+ >> MPI_DOORBELL_ADD_DWORDS_SHIFT;
+ s->intr_status |= MPI_HIS_DOORBELL_INTERRUPT;
+ mptsas_update_interrupt(s);
+ break;
+ default:
+ trace_mptsas_unhandled_doorbell_cmd(s, val);
+ break;
+ }
+}
+
+static void mptsas_write_sequence_write(MPTSASState *s, uint32_t val)
+{
+ /* If the diagnostic register is enabled, any write to this register
+ * will disable it. Otherwise, the guest has to do a magic five-write
+ * sequence.
+ */
+ if (s->diagnostic & MPI_DIAG_DRWE) {
+ goto disable;
+ }
+
+ switch (s->diagnostic_idx) {
+ case 0:
+ if ((val & MPI_WRSEQ_KEY_VALUE_MASK) != MPI_WRSEQ_1ST_KEY_VALUE) {
+ goto disable;
+ }
+ break;
+ case 1:
+ if ((val & MPI_WRSEQ_KEY_VALUE_MASK) != MPI_WRSEQ_2ND_KEY_VALUE) {
+ goto disable;
+ }
+ break;
+ case 2:
+ if ((val & MPI_WRSEQ_KEY_VALUE_MASK) != MPI_WRSEQ_3RD_KEY_VALUE) {
+ goto disable;
+ }
+ break;
+ case 3:
+ if ((val & MPI_WRSEQ_KEY_VALUE_MASK) != MPI_WRSEQ_4TH_KEY_VALUE) {
+ goto disable;
+ }
+ break;
+ case 4:
+ if ((val & MPI_WRSEQ_KEY_VALUE_MASK) != MPI_WRSEQ_5TH_KEY_VALUE) {
+ goto disable;
+ }
+ /* Prepare Spaceball One for departure, and change the
+ * combination on my luggage!
+ */
+ s->diagnostic |= MPI_DIAG_DRWE;
+ break;
+ }
+ s->diagnostic_idx++;
+ return;
+
+disable:
+ s->diagnostic &= ~MPI_DIAG_DRWE;
+ s->diagnostic_idx = 0;
+}
+
+static int mptsas_hard_reset(MPTSASState *s)
+{
+ mptsas_soft_reset(s);
+
+ s->intr_mask = MPI_HIM_DIM | MPI_HIM_RIM;
+
+ s->host_mfa_high_addr = 0;
+ s->sense_buffer_high_addr = 0;
+ s->reply_frame_size = 0;
+ s->max_devices = MPTSAS_NUM_PORTS;
+ s->max_buses = 1;
+
+ return 0;
+}
+
+static void mptsas_interrupt_status_write(MPTSASState *s)
+{
+ switch (s->doorbell_state) {
+ case DOORBELL_NONE:
+ case DOORBELL_WRITE:
+ s->intr_status &= ~MPI_HIS_DOORBELL_INTERRUPT;
+ break;
+
+ case DOORBELL_READ:
+ /* The reply can be read continuously, so leave the interrupt up. */
+ assert(s->intr_status & MPI_HIS_DOORBELL_INTERRUPT);
+ if (s->doorbell_reply_idx == s->doorbell_reply_size) {
+ s->doorbell_state = DOORBELL_NONE;
+ }
+ break;
+
+ default:
+ abort();
+ }
+ mptsas_update_interrupt(s);
+}
+
+static uint32_t mptsas_reply_post_read(MPTSASState *s)
+{
+ uint32_t ret;
+
+ if (!MPTSAS_FIFO_EMPTY(s, reply_post)) {
+ ret = MPTSAS_FIFO_GET(s, reply_post);
+ } else {
+ ret = -1;
+ s->intr_status &= ~MPI_HIS_REPLY_MESSAGE_INTERRUPT;
+ mptsas_update_interrupt(s);
+ }
+
+ return ret;
+}
+
+static uint64_t mptsas_mmio_read(void *opaque, hwaddr addr,
+ unsigned size)
+{
+ MPTSASState *s = opaque;
+ uint32_t ret = 0;
+
+ switch (addr & ~3) {
+ case MPI_DOORBELL_OFFSET:
+ ret = mptsas_doorbell_read(s);
+ break;
+
+ case MPI_DIAGNOSTIC_OFFSET:
+ ret = s->diagnostic;
+ break;
+
+ case MPI_HOST_INTERRUPT_STATUS_OFFSET:
+ ret = s->intr_status;
+ break;
+
+ case MPI_HOST_INTERRUPT_MASK_OFFSET:
+ ret = s->intr_mask;
+ break;
+
+ case MPI_REPLY_POST_FIFO_OFFSET:
+ ret = mptsas_reply_post_read(s);
+ break;
+
+ default:
+ trace_mptsas_mmio_unhandled_read(s, addr);
+ break;
+ }
+ trace_mptsas_mmio_read(s, addr, ret);
+ return ret;
+}
+
+static void mptsas_mmio_write(void *opaque, hwaddr addr,
+ uint64_t val, unsigned size)
+{
+ MPTSASState *s = opaque;
+
+ trace_mptsas_mmio_write(s, addr, val);
+ switch (addr) {
+ case MPI_DOORBELL_OFFSET:
+ mptsas_doorbell_write(s, val);
+ break;
+
+ case MPI_WRITE_SEQUENCE_OFFSET:
+ mptsas_write_sequence_write(s, val);
+ break;
+
+ case MPI_DIAGNOSTIC_OFFSET:
+ if (val & MPI_DIAG_RESET_ADAPTER) {
+ mptsas_hard_reset(s);
+ }
+ break;
+
+ case MPI_HOST_INTERRUPT_STATUS_OFFSET:
+ mptsas_interrupt_status_write(s);
+ break;
+
+ case MPI_HOST_INTERRUPT_MASK_OFFSET:
+ s->intr_mask = val & (MPI_HIM_RIM | MPI_HIM_DIM);
+ mptsas_update_interrupt(s);
+ break;
+
+ case MPI_REQUEST_POST_FIFO_OFFSET:
+ if (MPTSAS_FIFO_FULL(s, request_post)) {
+ mptsas_set_fault(s, MPI_IOCSTATUS_INSUFFICIENT_RESOURCES);
+ } else {
+ MPTSAS_FIFO_PUT(s, request_post, val & ~0x03);
+ qemu_bh_schedule(s->request_bh);
+ }
+ break;
+
+ case MPI_REPLY_FREE_FIFO_OFFSET:
+ if (MPTSAS_FIFO_FULL(s, reply_free)) {
+ mptsas_set_fault(s, MPI_IOCSTATUS_INSUFFICIENT_RESOURCES);
+ } else {
+ MPTSAS_FIFO_PUT(s, reply_free, val);
+ }
+ break;
+
+ default:
+ trace_mptsas_mmio_unhandled_write(s, addr, val);
+ break;
+ }
+}
+
+static const MemoryRegionOps mptsas_mmio_ops = {
+ .read = mptsas_mmio_read,
+ .write = mptsas_mmio_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .impl = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ }
+};
+
+static const MemoryRegionOps mptsas_port_ops = {
+ .read = mptsas_mmio_read,
+ .write = mptsas_mmio_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .impl = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ }
+};
+
+static uint64_t mptsas_diag_read(void *opaque, hwaddr addr,
+ unsigned size)
+{
+ MPTSASState *s = opaque;
+ trace_mptsas_diag_read(s, addr, 0);
+ return 0;
+}
+
+static void mptsas_diag_write(void *opaque, hwaddr addr,
+ uint64_t val, unsigned size)
+{
+ MPTSASState *s = opaque;
+ trace_mptsas_diag_write(s, addr, val);
+}
+
+static const MemoryRegionOps mptsas_diag_ops = {
+ .read = mptsas_diag_read,
+ .write = mptsas_diag_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .impl = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ }
+};
+
+static QEMUSGList *mptsas_get_sg_list(SCSIRequest *sreq)
+{
+ MPTSASRequest *req = sreq->hba_private;
+
+ return &req->qsg;
+}
+
+static void mptsas_command_complete(SCSIRequest *sreq,
+ uint32_t status, size_t resid)
+{
+ MPTSASRequest *req = sreq->hba_private;
+ MPTSASState *s = req->dev;
+ uint8_t sense_buf[SCSI_SENSE_BUF_SIZE];
+ uint8_t sense_len;
+
+ hwaddr sense_buffer_addr = req->dev->sense_buffer_high_addr |
+ req->scsi_io.SenseBufferLowAddr;
+
+ trace_mptsas_command_complete(s, req->scsi_io.MsgContext, status, resid);
+
+ sense_len = scsi_req_get_sense(sreq, sense_buf, SCSI_SENSE_BUF_SIZE);
+ if (sense_len > 0) {
+ pci_dma_write(PCI_DEVICE(s), sense_buffer_addr, sense_buf,
+ MIN(req->scsi_io.SenseBufferLength, sense_len));
+ }
+
+ if (sreq->status != GOOD || resid ||
+ req->dev->doorbell_state == DOORBELL_WRITE) {
+ MPIMsgSCSIIOReply reply;
+
+ memset(&reply, 0, sizeof(reply));
+ reply.TargetID = req->scsi_io.TargetID;
+ reply.Bus = req->scsi_io.Bus;
+ reply.MsgLength = sizeof(reply) / 4;
+ reply.Function = req->scsi_io.Function;
+ reply.CDBLength = req->scsi_io.CDBLength;
+ reply.SenseBufferLength = req->scsi_io.SenseBufferLength;
+ reply.MsgFlags = req->scsi_io.MsgFlags;
+ reply.MsgContext = req->scsi_io.MsgContext;
+ reply.SCSIStatus = sreq->status;
+ if (sreq->status == GOOD) {
+ reply.TransferCount = req->scsi_io.DataLength - resid;
+ if (resid) {
+ reply.IOCStatus = MPI_IOCSTATUS_SCSI_DATA_UNDERRUN;
+ }
+ } else {
+ reply.SCSIState = MPI_SCSI_STATE_AUTOSENSE_VALID;
+ reply.SenseCount = sense_len;
+ reply.IOCStatus = MPI_IOCSTATUS_SCSI_DATA_UNDERRUN;
+ }
+
+ mptsas_fix_scsi_io_reply_endianness(&reply);
+ mptsas_post_reply(req->dev, (MPIDefaultReply *)&reply);
+ } else {
+ mptsas_turbo_reply(req->dev, req->scsi_io.MsgContext);
+ }
+
+ mptsas_free_request(req);
+}
+
+static void mptsas_request_cancelled(SCSIRequest *sreq)
+{
+ MPTSASRequest *req = sreq->hba_private;
+ MPIMsgSCSIIOReply reply;
+
+ memset(&reply, 0, sizeof(reply));
+ reply.TargetID = req->scsi_io.TargetID;
+ reply.Bus = req->scsi_io.Bus;
+ reply.MsgLength = sizeof(reply) / 4;
+ reply.Function = req->scsi_io.Function;
+ reply.CDBLength = req->scsi_io.CDBLength;
+ reply.SenseBufferLength = req->scsi_io.SenseBufferLength;
+ reply.MsgFlags = req->scsi_io.MsgFlags;
+ reply.MsgContext = req->scsi_io.MsgContext;
+ reply.SCSIState = MPI_SCSI_STATE_NO_SCSI_STATUS;
+ reply.IOCStatus = MPI_IOCSTATUS_SCSI_TASK_TERMINATED;
+
+ mptsas_fix_scsi_io_reply_endianness(&reply);
+ mptsas_post_reply(req->dev, (MPIDefaultReply *)&reply);
+ mptsas_free_request(req);
+}
+
+static void mptsas_save_request(QEMUFile *f, SCSIRequest *sreq)
+{
+ MPTSASRequest *req = sreq->hba_private;
+ int i;
+
+ qemu_put_buffer(f, (unsigned char *)&req->scsi_io, sizeof(req->scsi_io));
+ qemu_put_be32(f, req->qsg.nsg);
+ for (i = 0; i < req->qsg.nsg; i++) {
+ qemu_put_be64(f, req->qsg.sg[i].base);
+ qemu_put_be64(f, req->qsg.sg[i].len);
+ }
+}
+
+static void *mptsas_load_request(QEMUFile *f, SCSIRequest *sreq)
+{
+ SCSIBus *bus = sreq->bus;
+ MPTSASState *s = container_of(bus, MPTSASState, bus);
+ PCIDevice *pci = PCI_DEVICE(s);
+ MPTSASRequest *req;
+ int i, n;
+
+ req = g_new(MPTSASRequest, 1);
+ qemu_get_buffer(f, (unsigned char *)&req->scsi_io, sizeof(req->scsi_io));
+
+ n = qemu_get_be32(f);
+ /* TODO: add a way for SCSIBusInfo's load_request to fail,
+ * and fail migration instead of asserting here.
+ * When we do, we might be able to re-enable NDEBUG below.
+ */
+#ifdef NDEBUG
+#error building with NDEBUG is not supported
+#endif
+ assert(n >= 0);
+
+ pci_dma_sglist_init(&req->qsg, pci, n);
+ for (i = 0; i < n; i++) {
+ uint64_t base = qemu_get_be64(f);
+ uint64_t len = qemu_get_be64(f);
+ qemu_sglist_add(&req->qsg, base, len);
+ }
+
+ scsi_req_ref(sreq);
+ req->sreq = sreq;
+ req->dev = s;
+
+ return req;
+}
+
+static const struct SCSIBusInfo mptsas_scsi_info = {
+ .tcq = true,
+ .max_target = MPTSAS_NUM_PORTS,
+ .max_lun = 1,
+
+ .get_sg_list = mptsas_get_sg_list,
+ .complete = mptsas_command_complete,
+ .cancel = mptsas_request_cancelled,
+ .save_request = mptsas_save_request,
+ .load_request = mptsas_load_request,
+};
+
+static void mptsas_scsi_init(PCIDevice *dev, Error **errp)
+{
+ DeviceState *d = DEVICE(dev);
+ MPTSASState *s = MPT_SAS(dev);
+
+ dev->config[PCI_LATENCY_TIMER] = 0;
+ dev->config[PCI_INTERRUPT_PIN] = 0x01;
+
+ memory_region_init_io(&s->mmio_io, OBJECT(s), &mptsas_mmio_ops, s,
+ "mptsas-mmio", 0x4000);
+ memory_region_init_io(&s->port_io, OBJECT(s), &mptsas_port_ops, s,
+ "mptsas-io", 256);
+ memory_region_init_io(&s->diag_io, OBJECT(s), &mptsas_diag_ops, s,
+ "mptsas-diag", 0x10000);
+
+ if (s->msi_available &&
+ msi_init(dev, 0, 1, true, false) >= 0) {
+ s->msi_in_use = true;
+ }
+
+ pci_register_bar(dev, 0, PCI_BASE_ADDRESS_SPACE_IO, &s->port_io);
+ pci_register_bar(dev, 1, PCI_BASE_ADDRESS_SPACE_MEMORY |
+ PCI_BASE_ADDRESS_MEM_TYPE_32, &s->mmio_io);
+ pci_register_bar(dev, 2, PCI_BASE_ADDRESS_SPACE_MEMORY |
+ PCI_BASE_ADDRESS_MEM_TYPE_32, &s->diag_io);
+
+ if (!s->sas_addr) {
+ s->sas_addr = ((NAA_LOCALLY_ASSIGNED_ID << 24) |
+ IEEE_COMPANY_LOCALLY_ASSIGNED) << 36;
+ s->sas_addr |= (pci_bus_num(dev->bus) << 16);
+ s->sas_addr |= (PCI_SLOT(dev->devfn) << 8);
+ s->sas_addr |= PCI_FUNC(dev->devfn);
+ }
+ s->max_devices = MPTSAS_NUM_PORTS;
+
+ s->request_bh = qemu_bh_new(mptsas_fetch_requests, s);
+
+ QTAILQ_INIT(&s->pending);
+
+ scsi_bus_new(&s->bus, sizeof(s->bus), &dev->qdev, &mptsas_scsi_info, NULL);
+ if (!d->hotplugged) {
+ scsi_bus_legacy_handle_cmdline(&s->bus, errp);
+ }
+}
+
+static void mptsas_scsi_uninit(PCIDevice *dev)
+{
+ MPTSASState *s = MPT_SAS(dev);
+
+ qemu_bh_delete(s->request_bh);
+ if (s->msi_in_use) {
+ msi_uninit(dev);
+ }
+}
+
+static void mptsas_reset(DeviceState *dev)
+{
+ MPTSASState *s = MPT_SAS(dev);
+
+ mptsas_hard_reset(s);
+}
+
+static int mptsas_post_load(void *opaque, int version_id)
+{
+ MPTSASState *s = opaque;
+
+ if (s->doorbell_idx > s->doorbell_cnt ||
+ s->doorbell_cnt > ARRAY_SIZE(s->doorbell_msg) ||
+ s->doorbell_reply_idx > s->doorbell_reply_size ||
+ s->doorbell_reply_size > ARRAY_SIZE(s->doorbell_reply) ||
+ MPTSAS_FIFO_INVALID(s, request_post) ||
+ MPTSAS_FIFO_INVALID(s, reply_post) ||
+ MPTSAS_FIFO_INVALID(s, reply_free) ||
+ s->diagnostic_idx > 4) {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const VMStateDescription vmstate_mptsas = {
+ .name = "mptsas",
+ .version_id = 0,
+ .minimum_version_id = 0,
+ .minimum_version_id_old = 0,
+ .post_load = mptsas_post_load,
+ .fields = (VMStateField[]) {
+ VMSTATE_PCI_DEVICE(dev, MPTSASState),
+ VMSTATE_BOOL(msi_in_use, MPTSASState),
+
+ VMSTATE_UINT32(state, MPTSASState),
+ VMSTATE_UINT8(who_init, MPTSASState),
+ VMSTATE_UINT8(doorbell_state, MPTSASState),
+ VMSTATE_UINT32_ARRAY(doorbell_msg, MPTSASState, 256),
+ VMSTATE_INT32(doorbell_idx, MPTSASState),
+ VMSTATE_INT32(doorbell_cnt, MPTSASState),
+
+ VMSTATE_UINT16_ARRAY(doorbell_reply, MPTSASState, 256),
+ VMSTATE_INT32(doorbell_reply_idx, MPTSASState),
+ VMSTATE_INT32(doorbell_reply_size, MPTSASState),
+
+ VMSTATE_UINT32(diagnostic, MPTSASState),
+ VMSTATE_UINT8(diagnostic_idx, MPTSASState),
+
+ VMSTATE_UINT32(intr_status, MPTSASState),
+ VMSTATE_UINT32(intr_mask, MPTSASState),
+
+ VMSTATE_UINT32_ARRAY(request_post, MPTSASState,
+ MPTSAS_REQUEST_QUEUE_DEPTH + 1),
+ VMSTATE_UINT16(request_post_head, MPTSASState),
+ VMSTATE_UINT16(request_post_tail, MPTSASState),
+
+ VMSTATE_UINT32_ARRAY(reply_post, MPTSASState,
+ MPTSAS_REPLY_QUEUE_DEPTH + 1),
+ VMSTATE_UINT16(reply_post_head, MPTSASState),
+ VMSTATE_UINT16(reply_post_tail, MPTSASState),
+
+ VMSTATE_UINT32_ARRAY(reply_free, MPTSASState,
+ MPTSAS_REPLY_QUEUE_DEPTH + 1),
+ VMSTATE_UINT16(reply_free_head, MPTSASState),
+ VMSTATE_UINT16(reply_free_tail, MPTSASState),
+
+ VMSTATE_UINT16(max_buses, MPTSASState),
+ VMSTATE_UINT16(max_devices, MPTSASState),
+ VMSTATE_UINT16(reply_frame_size, MPTSASState),
+ VMSTATE_UINT64(host_mfa_high_addr, MPTSASState),
+ VMSTATE_UINT64(sense_buffer_high_addr, MPTSASState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static Property mptsas_properties[] = {
+ DEFINE_PROP_UINT64("sas_address", MPTSASState, sas_addr, 0),
+ /* TODO: test MSI support under Windows */
+ DEFINE_PROP_BIT("msi", MPTSASState, msi_available, 0, true),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void mptsas1068_class_init(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PCIDeviceClass *pc = PCI_DEVICE_CLASS(oc);
+
+ pc->realize = mptsas_scsi_init;
+ pc->exit = mptsas_scsi_uninit;
+ pc->romfile = 0;
+ pc->vendor_id = PCI_VENDOR_ID_LSI_LOGIC;
+ pc->device_id = PCI_DEVICE_ID_LSI_SAS1068;
+ pc->subsystem_vendor_id = PCI_VENDOR_ID_LSI_LOGIC;
+ pc->subsystem_id = 0x8000;
+ pc->class_id = PCI_CLASS_STORAGE_SCSI;
+ dc->props = mptsas_properties;
+ dc->reset = mptsas_reset;
+ dc->vmsd = &vmstate_mptsas;
+ dc->desc = "LSI SAS 1068";
+}
+
+static const TypeInfo mptsas_info = {
+ .name = TYPE_MPTSAS1068,
+ .parent = TYPE_PCI_DEVICE,
+ .instance_size = sizeof(MPTSASState),
+ .class_init = mptsas1068_class_init,
+};
+
+static void mptsas_register_types(void)
+{
+ type_register(&mptsas_info);
+}
+
+type_init(mptsas_register_types)
diff --git a/hw/scsi/mptsas.h b/hw/scsi/mptsas.h
new file mode 100644
index 0000000000..595f81fb5b
--- /dev/null
+++ b/hw/scsi/mptsas.h
@@ -0,0 +1,100 @@
+#ifndef MPTSAS_H
+#define MPTSAS_H
+
+#include "mpi.h"
+
+#define MPTSAS_NUM_PORTS 8
+#define MPTSAS_MAX_FRAMES 2048 /* Firmware limit at 65535 */
+
+#define MPTSAS_REQUEST_QUEUE_DEPTH 128
+#define MPTSAS_REPLY_QUEUE_DEPTH 128
+
+#define MPTSAS_MAXIMUM_CHAIN_DEPTH 0x22
+
+typedef struct MPTSASState MPTSASState;
+typedef struct MPTSASRequest MPTSASRequest;
+
+enum {
+ DOORBELL_NONE,
+ DOORBELL_WRITE,
+ DOORBELL_READ
+};
+
+struct MPTSASState {
+ PCIDevice dev;
+ MemoryRegion mmio_io;
+ MemoryRegion port_io;
+ MemoryRegion diag_io;
+ QEMUBH *request_bh;
+
+ uint32_t msi_available;
+ uint64_t sas_addr;
+
+ bool msi_in_use;
+
+ /* Doorbell register */
+ uint32_t state;
+ uint8_t who_init;
+ uint8_t doorbell_state;
+
+ /* Buffer for requests that are sent through the doorbell register. */
+ uint32_t doorbell_msg[256];
+ int doorbell_idx;
+ int doorbell_cnt;
+
+ uint16_t doorbell_reply[256];
+ int doorbell_reply_idx;
+ int doorbell_reply_size;
+
+ /* Other registers */
+ uint8_t diagnostic_idx;
+ uint32_t diagnostic;
+ uint32_t intr_mask;
+ uint32_t intr_status;
+
+ /* Request queues */
+ uint32_t request_post[MPTSAS_REQUEST_QUEUE_DEPTH + 1];
+ uint16_t request_post_head;
+ uint16_t request_post_tail;
+
+ uint32_t reply_post[MPTSAS_REPLY_QUEUE_DEPTH + 1];
+ uint16_t reply_post_head;
+ uint16_t reply_post_tail;
+
+ uint32_t reply_free[MPTSAS_REPLY_QUEUE_DEPTH + 1];
+ uint16_t reply_free_head;
+ uint16_t reply_free_tail;
+
+ /* IOC Facts */
+ hwaddr host_mfa_high_addr;
+ hwaddr sense_buffer_high_addr;
+ uint16_t max_devices;
+ uint16_t max_buses;
+ uint16_t reply_frame_size;
+
+ SCSIBus bus;
+ QTAILQ_HEAD(, MPTSASRequest) pending;
+};
+
+void mptsas_fix_scsi_io_endianness(MPIMsgSCSIIORequest *req);
+void mptsas_fix_scsi_io_reply_endianness(MPIMsgSCSIIOReply *reply);
+void mptsas_fix_scsi_task_mgmt_endianness(MPIMsgSCSITaskMgmt *req);
+void mptsas_fix_scsi_task_mgmt_reply_endianness(MPIMsgSCSITaskMgmtReply *reply);
+void mptsas_fix_ioc_init_endianness(MPIMsgIOCInit *req);
+void mptsas_fix_ioc_init_reply_endianness(MPIMsgIOCInitReply *reply);
+void mptsas_fix_ioc_facts_endianness(MPIMsgIOCFacts *req);
+void mptsas_fix_ioc_facts_reply_endianness(MPIMsgIOCFactsReply *reply);
+void mptsas_fix_config_endianness(MPIMsgConfig *req);
+void mptsas_fix_config_reply_endianness(MPIMsgConfigReply *reply);
+void mptsas_fix_port_facts_endianness(MPIMsgPortFacts *req);
+void mptsas_fix_port_facts_reply_endianness(MPIMsgPortFactsReply *reply);
+void mptsas_fix_port_enable_endianness(MPIMsgPortEnable *req);
+void mptsas_fix_port_enable_reply_endianness(MPIMsgPortEnableReply *reply);
+void mptsas_fix_event_notification_endianness(MPIMsgEventNotify *req);
+void mptsas_fix_event_notification_reply_endianness(MPIMsgEventNotifyReply *reply);
+
+void mptsas_reply(MPTSASState *s, MPIDefaultReply *reply);
+
+void mptsas_process_config(MPTSASState *s, MPIMsgConfig *req);
+
+#endif /* MPTSAS_H */
diff --git a/hw/scsi/scsi-disk.c b/hw/scsi/scsi-disk.c
index dc79dc0c58..469aec2839 100644
--- a/hw/scsi/scsi-disk.c
+++ b/hw/scsi/scsi-disk.c
@@ -77,8 +77,6 @@ struct SCSIDiskState
bool media_changed;
bool media_event;
bool eject_request;
- uint64_t wwn;
- uint64_t port_wwn;
uint16_t port_index;
uint64_t max_unmap_size;
uint64_t max_io_size;
@@ -633,21 +631,21 @@ static int scsi_disk_emulate_inquiry(SCSIRequest *req, uint8_t *outbuf)
memcpy(outbuf+buflen, str, id_len);
buflen += id_len;
- if (s->wwn) {
+ if (s->qdev.wwn) {
outbuf[buflen++] = 0x1; // Binary
outbuf[buflen++] = 0x3; // NAA
outbuf[buflen++] = 0; // reserved
outbuf[buflen++] = 8;
- stq_be_p(&outbuf[buflen], s->wwn);
+ stq_be_p(&outbuf[buflen], s->qdev.wwn);
buflen += 8;
}
- if (s->port_wwn) {
+ if (s->qdev.port_wwn) {
outbuf[buflen++] = 0x61; // SAS / Binary
outbuf[buflen++] = 0x93; // PIV / Target port / NAA
outbuf[buflen++] = 0; // reserved
outbuf[buflen++] = 8;
- stq_be_p(&outbuf[buflen], s->port_wwn);
+ stq_be_p(&outbuf[buflen], s->qdev.port_wwn);
buflen += 8;
}
@@ -2575,6 +2573,7 @@ static void scsi_block_realize(SCSIDevice *dev, Error **errp)
s->features |= (1 << SCSI_DISK_F_NO_REMOVABLE_DEVOPS);
scsi_realize(&s->qdev, errp);
+ scsi_generic_read_device_identification(&s->qdev);
}
static bool scsi_block_is_passthrough(SCSIDiskState *s, uint8_t *buf)
@@ -2668,8 +2667,8 @@ static Property scsi_hd_properties[] = {
SCSI_DISK_F_REMOVABLE, false),
DEFINE_PROP_BIT("dpofua", SCSIDiskState, features,
SCSI_DISK_F_DPOFUA, false),
- DEFINE_PROP_UINT64("wwn", SCSIDiskState, wwn, 0),
- DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, port_wwn, 0),
+ DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0),
+ DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0),
DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0),
DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size,
DEFAULT_MAX_UNMAP_SIZE),
@@ -2718,8 +2717,8 @@ static const TypeInfo scsi_hd_info = {
static Property scsi_cd_properties[] = {
DEFINE_SCSI_DISK_PROPERTIES(),
- DEFINE_PROP_UINT64("wwn", SCSIDiskState, wwn, 0),
- DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, port_wwn, 0),
+ DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0),
+ DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0),
DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0),
DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size,
DEFAULT_MAX_IO_SIZE),
@@ -2783,8 +2782,8 @@ static Property scsi_disk_properties[] = {
SCSI_DISK_F_REMOVABLE, false),
DEFINE_PROP_BIT("dpofua", SCSIDiskState, features,
SCSI_DISK_F_DPOFUA, false),
- DEFINE_PROP_UINT64("wwn", SCSIDiskState, wwn, 0),
- DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, port_wwn, 0),
+ DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0),
+ DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0),
DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0),
DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size,
DEFAULT_MAX_UNMAP_SIZE),
diff --git a/hw/scsi/scsi-generic.c b/hw/scsi/scsi-generic.c
index 75a4127d3a..f8a1ff2cac 100644
--- a/hw/scsi/scsi-generic.c
+++ b/hw/scsi/scsi-generic.c
@@ -355,6 +355,96 @@ static int32_t scsi_send_command(SCSIRequest *req, uint8_t *cmd)
}
}
+static int read_naa_id(const uint8_t *p, uint64_t *p_wwn)
+{
+ int i;
+
+ if ((p[1] & 0xF) == 3) {
+ /* NAA designator type */
+ if (p[3] != 8) {
+ return -EINVAL;
+ }
+ *p_wwn = ldq_be_p(p + 4);
+ return 0;
+ }
+
+ if ((p[1] & 0xF) == 8) {
+ /* SCSI name string designator type */
+ if (p[3] < 20 || memcmp(&p[4], "naa.", 4)) {
+ return -EINVAL;
+ }
+ if (p[3] > 20 && p[24] != ',') {
+ return -EINVAL;
+ }
+ *p_wwn = 0;
+ for (i = 8; i < 24; i++) {
+ char c = toupper(p[i]);
+ c -= (c >= '0' && c <= '9' ? '0' : 'A' - 10);
+ *p_wwn = (*p_wwn << 4) | c;
+ }
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+void scsi_generic_read_device_identification(SCSIDevice *s)
+{
+ uint8_t cmd[6];
+ uint8_t buf[250];
+ uint8_t sensebuf[8];
+ sg_io_hdr_t io_header;
+ int ret;
+ int i, len;
+
+ memset(cmd, 0, sizeof(cmd));
+ memset(buf, 0, sizeof(buf));
+ cmd[0] = INQUIRY;
+ cmd[1] = 1;
+ cmd[2] = 0x83;
+ cmd[4] = sizeof(buf);
+
+ memset(&io_header, 0, sizeof(io_header));
+ io_header.interface_id = 'S';
+ io_header.dxfer_direction = SG_DXFER_FROM_DEV;
+ io_header.dxfer_len = sizeof(buf);
+ io_header.dxferp = buf;
+ io_header.cmdp = cmd;
+ io_header.cmd_len = sizeof(cmd);
+ io_header.mx_sb_len = sizeof(sensebuf);
+ io_header.sbp = sensebuf;
+ io_header.timeout = 6000; /* XXX */
+
+ ret = blk_ioctl(s->conf.blk, SG_IO, &io_header);
+ if (ret < 0 || io_header.driver_status || io_header.host_status) {
+ return;
+ }
+
+ len = MIN((buf[2] << 8) | buf[3], sizeof(buf) - 4);
+ for (i = 0; i + 3 <= len; ) {
+ const uint8_t *p = &buf[i + 4];
+ uint64_t wwn;
+
+ if (i + (p[3] + 4) > len) {
+ break;
+ }
+
+ if ((p[1] & 0x10) == 0) {
+ /* Associated with the logical unit */
+ if (read_naa_id(p, &wwn) == 0) {
+ s->wwn = wwn;
+ }
+ } else if ((p[1] & 0x10) == 0x10) {
+ /* Associated with the target port */
+ if (read_naa_id(p, &wwn) == 0) {
+ s->port_wwn = wwn;
+ }
+ }
+
+ i += p[3] + 4;
+ }
+}
+
static int get_stream_blocksize(BlockBackend *blk)
{
uint8_t cmd[6];
@@ -458,6 +548,8 @@ static void scsi_generic_realize(SCSIDevice *s, Error **errp)
}
DPRINTF("block size %d\n", s->blocksize);
+
+ scsi_generic_read_device_identification(s);
}
const SCSIReqOps scsi_generic_req_ops = {
diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h
index 606e277092..b1413a1286 100644
--- a/include/exec/ram_addr.h
+++ b/include/exec/ram_addr.h
@@ -49,13 +49,43 @@ static inline void *ramblock_ptr(RAMBlock *block, ram_addr_t offset)
return (char *)block->host + offset;
}
+/* The dirty memory bitmap is split into fixed-size blocks to allow growth
+ * under RCU. The bitmap for a block can be accessed as follows:
+ *
+ * rcu_read_lock();
+ *
+ * DirtyMemoryBlocks *blocks =
+ * atomic_rcu_read(&ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION]);
+ *
+ * ram_addr_t idx = (addr >> TARGET_PAGE_BITS) / DIRTY_MEMORY_BLOCK_SIZE;
+ * unsigned long *block = blocks.blocks[idx];
+ * ...access block bitmap...
+ *
+ * rcu_read_unlock();
+ *
+ * Remember to check for the end of the block when accessing a range of
+ * addresses. Move on to the next block if you reach the end.
+ *
+ * Organization into blocks allows dirty memory to grow (but not shrink) under
+ * RCU. When adding new RAMBlocks requires the dirty memory to grow, a new
+ * DirtyMemoryBlocks array is allocated with pointers to existing blocks kept
+ * the same. Other threads can safely access existing blocks while dirty
+ * memory is being grown. When no threads are using the old DirtyMemoryBlocks
+ * anymore it is freed by RCU (but the underlying blocks stay because they are
+ * pointed to from the new DirtyMemoryBlocks).
+ */
+#define DIRTY_MEMORY_BLOCK_SIZE ((ram_addr_t)256 * 1024 * 8)
+typedef struct {
+ struct rcu_head rcu;
+ unsigned long *blocks[];
+} DirtyMemoryBlocks;
+
typedef struct RAMList {
QemuMutex mutex;
- /* Protected by the iothread lock. */
- unsigned long *dirty_memory[DIRTY_MEMORY_NUM];
RAMBlock *mru_block;
/* RCU-enabled, writes protected by the ramlist lock. */
QLIST_HEAD(, RAMBlock) blocks;
+ DirtyMemoryBlocks *dirty_memory[DIRTY_MEMORY_NUM];
uint32_t version;
} RAMList;
extern RAMList ram_list;
@@ -89,30 +119,70 @@ static inline bool cpu_physical_memory_get_dirty(ram_addr_t start,
ram_addr_t length,
unsigned client)
{
- unsigned long end, page, next;
+ DirtyMemoryBlocks *blocks;
+ unsigned long end, page;
+ bool dirty = false;
assert(client < DIRTY_MEMORY_NUM);
end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
page = start >> TARGET_PAGE_BITS;
- next = find_next_bit(ram_list.dirty_memory[client], end, page);
- return next < end;
+ rcu_read_lock();
+
+ blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
+
+ while (page < end) {
+ unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
+ unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE;
+ unsigned long num = MIN(end - page, DIRTY_MEMORY_BLOCK_SIZE - offset);
+
+ if (find_next_bit(blocks->blocks[idx], offset, num) < num) {
+ dirty = true;
+ break;
+ }
+
+ page += num;
+ }
+
+ rcu_read_unlock();
+
+ return dirty;
}
static inline bool cpu_physical_memory_all_dirty(ram_addr_t start,
ram_addr_t length,
unsigned client)
{
- unsigned long end, page, next;
+ DirtyMemoryBlocks *blocks;
+ unsigned long end, page;
+ bool dirty = true;
assert(client < DIRTY_MEMORY_NUM);
end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
page = start >> TARGET_PAGE_BITS;
- next = find_next_zero_bit(ram_list.dirty_memory[client], end, page);
- return next >= end;
+ rcu_read_lock();
+
+ blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
+
+ while (page < end) {
+ unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
+ unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE;
+ unsigned long num = MIN(end - page, DIRTY_MEMORY_BLOCK_SIZE - offset);
+
+ if (find_next_zero_bit(blocks->blocks[idx], offset, num) < num) {
+ dirty = false;
+ break;
+ }
+
+ page += num;
+ }
+
+ rcu_read_unlock();
+
+ return dirty;
}
static inline bool cpu_physical_memory_get_dirty_flag(ram_addr_t addr,
@@ -154,28 +224,68 @@ static inline uint8_t cpu_physical_memory_range_includes_clean(ram_addr_t start,
static inline void cpu_physical_memory_set_dirty_flag(ram_addr_t addr,
unsigned client)
{
+ unsigned long page, idx, offset;
+ DirtyMemoryBlocks *blocks;
+
assert(client < DIRTY_MEMORY_NUM);
- set_bit_atomic(addr >> TARGET_PAGE_BITS, ram_list.dirty_memory[client]);
+
+ page = addr >> TARGET_PAGE_BITS;
+ idx = page / DIRTY_MEMORY_BLOCK_SIZE;
+ offset = page % DIRTY_MEMORY_BLOCK_SIZE;
+
+ rcu_read_lock();
+
+ blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
+
+ set_bit_atomic(offset, blocks->blocks[idx]);
+
+ rcu_read_unlock();
}
static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start,
ram_addr_t length,
uint8_t mask)
{
+ DirtyMemoryBlocks *blocks[DIRTY_MEMORY_NUM];
unsigned long end, page;
- unsigned long **d = ram_list.dirty_memory;
+ int i;
+
+ if (!mask && !xen_enabled()) {
+ return;
+ }
end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
page = start >> TARGET_PAGE_BITS;
- if (likely(mask & (1 << DIRTY_MEMORY_MIGRATION))) {
- bitmap_set_atomic(d[DIRTY_MEMORY_MIGRATION], page, end - page);
- }
- if (unlikely(mask & (1 << DIRTY_MEMORY_VGA))) {
- bitmap_set_atomic(d[DIRTY_MEMORY_VGA], page, end - page);
+
+ rcu_read_lock();
+
+ for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
+ blocks[i] = atomic_rcu_read(&ram_list.dirty_memory[i]);
}
- if (unlikely(mask & (1 << DIRTY_MEMORY_CODE))) {
- bitmap_set_atomic(d[DIRTY_MEMORY_CODE], page, end - page);
+
+ while (page < end) {
+ unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
+ unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE;
+ unsigned long num = MIN(end - page, DIRTY_MEMORY_BLOCK_SIZE - offset);
+
+ if (likely(mask & (1 << DIRTY_MEMORY_MIGRATION))) {
+ bitmap_set_atomic(blocks[DIRTY_MEMORY_MIGRATION]->blocks[idx],
+ offset, num);
+ }
+ if (unlikely(mask & (1 << DIRTY_MEMORY_VGA))) {
+ bitmap_set_atomic(blocks[DIRTY_MEMORY_VGA]->blocks[idx],
+ offset, num);
+ }
+ if (unlikely(mask & (1 << DIRTY_MEMORY_CODE))) {
+ bitmap_set_atomic(blocks[DIRTY_MEMORY_CODE]->blocks[idx],
+ offset, num);
+ }
+
+ page += num;
}
+
+ rcu_read_unlock();
+
xen_modified_memory(start, length);
}
@@ -195,21 +305,41 @@ static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
/* start address is aligned at the start of a word? */
if ((((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) &&
(hpratio == 1)) {
+ unsigned long **blocks[DIRTY_MEMORY_NUM];
+ unsigned long idx;
+ unsigned long offset;
long k;
long nr = BITS_TO_LONGS(pages);
+ idx = (start >> TARGET_PAGE_BITS) / DIRTY_MEMORY_BLOCK_SIZE;
+ offset = BIT_WORD((start >> TARGET_PAGE_BITS) %
+ DIRTY_MEMORY_BLOCK_SIZE);
+
+ rcu_read_lock();
+
+ for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
+ blocks[i] = atomic_rcu_read(&ram_list.dirty_memory[i])->blocks;
+ }
+
for (k = 0; k < nr; k++) {
if (bitmap[k]) {
unsigned long temp = leul_to_cpu(bitmap[k]);
- unsigned long **d = ram_list.dirty_memory;
- atomic_or(&d[DIRTY_MEMORY_MIGRATION][page + k], temp);
- atomic_or(&d[DIRTY_MEMORY_VGA][page + k], temp);
+ atomic_or(&blocks[DIRTY_MEMORY_MIGRATION][idx][offset], temp);
+ atomic_or(&blocks[DIRTY_MEMORY_VGA][idx][offset], temp);
if (tcg_enabled()) {
- atomic_or(&d[DIRTY_MEMORY_CODE][page + k], temp);
+ atomic_or(&blocks[DIRTY_MEMORY_CODE][idx][offset], temp);
}
}
+
+ if (++offset >= BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE)) {
+ offset = 0;
+ idx++;
+ }
}
+
+ rcu_read_unlock();
+
xen_modified_memory(start, pages << TARGET_PAGE_BITS);
} else {
uint8_t clients = tcg_enabled() ? DIRTY_CLIENTS_ALL : DIRTY_CLIENTS_NOCODE;
@@ -261,18 +391,33 @@ uint64_t cpu_physical_memory_sync_dirty_bitmap(unsigned long *dest,
if (((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) {
int k;
int nr = BITS_TO_LONGS(length >> TARGET_PAGE_BITS);
- unsigned long *src = ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION];
+ unsigned long * const *src;
+ unsigned long idx = (page * BITS_PER_LONG) / DIRTY_MEMORY_BLOCK_SIZE;
+ unsigned long offset = BIT_WORD((page * BITS_PER_LONG) %
+ DIRTY_MEMORY_BLOCK_SIZE);
+
+ rcu_read_lock();
+
+ src = atomic_rcu_read(
+ &ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION])->blocks;
for (k = page; k < page + nr; k++) {
- if (src[k]) {
- unsigned long bits = atomic_xchg(&src[k], 0);
+ if (src[idx][offset]) {
+ unsigned long bits = atomic_xchg(&src[idx][offset], 0);
unsigned long new_dirty;
new_dirty = ~dest[k];
dest[k] |= bits;
new_dirty &= bits;
num_dirty += ctpopl(new_dirty);
}
+
+ if (++offset >= BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE)) {
+ offset = 0;
+ idx++;
+ }
}
+
+ rcu_read_unlock();
} else {
for (addr = 0; addr < length; addr += TARGET_PAGE_SIZE) {
if (cpu_physical_memory_test_and_clear_dirty(
diff --git a/include/hw/pci/pci_ids.h b/include/hw/pci/pci_ids.h
index d98e6c915d..db85afa03e 100644
--- a/include/hw/pci/pci_ids.h
+++ b/include/hw/pci/pci_ids.h
@@ -64,6 +64,7 @@
#define PCI_VENDOR_ID_LSI_LOGIC 0x1000
#define PCI_DEVICE_ID_LSI_53C810 0x0001
#define PCI_DEVICE_ID_LSI_53C895A 0x0012
+#define PCI_DEVICE_ID_LSI_SAS1068 0x0054
#define PCI_DEVICE_ID_LSI_SAS1078 0x0060
#define PCI_DEVICE_ID_LSI_SAS0079 0x0079
diff --git a/include/hw/scsi/scsi.h b/include/hw/scsi/scsi.h
index 1915a7342e..29052f81a5 100644
--- a/include/hw/scsi/scsi.h
+++ b/include/hw/scsi/scsi.h
@@ -108,6 +108,8 @@ struct SCSIDevice
int blocksize;
int type;
uint64_t max_lba;
+ uint64_t wwn;
+ uint64_t port_wwn;
};
extern const VMStateDescription vmstate_scsi_device;
@@ -271,6 +273,7 @@ void scsi_device_purge_requests(SCSIDevice *sdev, SCSISense sense);
void scsi_device_set_ua(SCSIDevice *sdev, SCSISense sense);
void scsi_device_report_change(SCSIDevice *dev, SCSISense sense);
void scsi_device_unit_attention_reported(SCSIDevice *dev);
+void scsi_generic_read_device_identification(SCSIDevice *dev);
int scsi_device_get_sense(SCSIDevice *dev, uint8_t *buf, int len, bool fixed);
SCSIDevice *scsi_device_find(SCSIBus *bus, int channel, int target, int lun);
diff --git a/include/qemu/atomic.h b/include/qemu/atomic.h
index bd2c075343..05b447c728 100644
--- a/include/qemu/atomic.h
+++ b/include/qemu/atomic.h
@@ -8,6 +8,8 @@
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*
+ * See docs/atomics.txt for discussion about the guarantees each
+ * atomic primitive is meant to provide.
*/
#ifndef __QEMU_ATOMIC_H
@@ -15,12 +17,130 @@
#include "qemu/compiler.h"
-/* For C11 atomic ops */
/* Compiler barrier */
#define barrier() ({ asm volatile("" ::: "memory"); (void)0; })
-#ifndef __ATOMIC_RELAXED
+#ifdef __ATOMIC_RELAXED
+/* For C11 atomic ops */
+
+/* Manual memory barriers
+ *
+ *__atomic_thread_fence does not include a compiler barrier; instead,
+ * the barrier is part of __atomic_load/__atomic_store's "volatile-like"
+ * semantics. If smp_wmb() is a no-op, absence of the barrier means that
+ * the compiler is free to reorder stores on each side of the barrier.
+ * Add one here, and similarly in smp_rmb() and smp_read_barrier_depends().
+ */
+
+#define smp_mb() ({ barrier(); __atomic_thread_fence(__ATOMIC_SEQ_CST); barrier(); })
+#define smp_wmb() ({ barrier(); __atomic_thread_fence(__ATOMIC_RELEASE); barrier(); })
+#define smp_rmb() ({ barrier(); __atomic_thread_fence(__ATOMIC_ACQUIRE); barrier(); })
+
+#define smp_read_barrier_depends() ({ barrier(); __atomic_thread_fence(__ATOMIC_CONSUME); barrier(); })
+
+/* Weak atomic operations prevent the compiler moving other
+ * loads/stores past the atomic operation load/store. However there is
+ * no explicit memory barrier for the processor.
+ */
+#define atomic_read(ptr) \
+ ({ \
+ typeof(*ptr) _val; \
+ __atomic_load(ptr, &_val, __ATOMIC_RELAXED); \
+ _val; \
+ })
+
+#define atomic_set(ptr, i) do { \
+ typeof(*ptr) _val = (i); \
+ __atomic_store(ptr, &_val, __ATOMIC_RELAXED); \
+} while(0)
+
+/* Atomic RCU operations imply weak memory barriers */
+
+#define atomic_rcu_read(ptr) \
+ ({ \
+ typeof(*ptr) _val; \
+ __atomic_load(ptr, &_val, __ATOMIC_CONSUME); \
+ _val; \
+ })
+
+#define atomic_rcu_set(ptr, i) do { \
+ typeof(*ptr) _val = (i); \
+ __atomic_store(ptr, &_val, __ATOMIC_RELEASE); \
+} while(0)
+
+/* atomic_mb_read/set semantics map Java volatile variables. They are
+ * less expensive on some platforms (notably POWER & ARMv7) than fully
+ * sequentially consistent operations.
+ *
+ * As long as they are used as paired operations they are safe to
+ * use. See docs/atomic.txt for more discussion.
+ */
+
+#if defined(_ARCH_PPC)
+#define atomic_mb_read(ptr) \
+ ({ \
+ typeof(*ptr) _val; \
+ __atomic_load(ptr, &_val, __ATOMIC_RELAXED); \
+ smp_rmb(); \
+ _val; \
+ })
+
+#define atomic_mb_set(ptr, i) do { \
+ typeof(*ptr) _val = (i); \
+ smp_wmb(); \
+ __atomic_store(ptr, &_val, __ATOMIC_RELAXED); \
+ smp_mb(); \
+} while(0)
+#else
+#define atomic_mb_read(ptr) \
+ ({ \
+ typeof(*ptr) _val; \
+ __atomic_load(ptr, &_val, __ATOMIC_SEQ_CST); \
+ _val; \
+ })
+
+#define atomic_mb_set(ptr, i) do { \
+ typeof(*ptr) _val = (i); \
+ __atomic_store(ptr, &_val, __ATOMIC_SEQ_CST); \
+} while(0)
+#endif
+
+
+/* All the remaining operations are fully sequentially consistent */
+
+#define atomic_xchg(ptr, i) ({ \
+ typeof(*ptr) _new = (i), _old; \
+ __atomic_exchange(ptr, &_new, &_old, __ATOMIC_SEQ_CST); \
+ _old; \
+})
+
+/* Returns the eventual value, failed or not */
+#define atomic_cmpxchg(ptr, old, new) \
+ ({ \
+ typeof(*ptr) _old = (old), _new = (new); \
+ __atomic_compare_exchange(ptr, &_old, &_new, false, \
+ __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); \
+ _old; \
+ })
+
+/* Provide shorter names for GCC atomic builtins, return old value */
+#define atomic_fetch_inc(ptr) __atomic_fetch_add(ptr, 1, __ATOMIC_SEQ_CST)
+#define atomic_fetch_dec(ptr) __atomic_fetch_sub(ptr, 1, __ATOMIC_SEQ_CST)
+#define atomic_fetch_add(ptr, n) __atomic_fetch_add(ptr, n, __ATOMIC_SEQ_CST)
+#define atomic_fetch_sub(ptr, n) __atomic_fetch_sub(ptr, n, __ATOMIC_SEQ_CST)
+#define atomic_fetch_and(ptr, n) __atomic_fetch_and(ptr, n, __ATOMIC_SEQ_CST)
+#define atomic_fetch_or(ptr, n) __atomic_fetch_or(ptr, n, __ATOMIC_SEQ_CST)
+
+/* And even shorter names that return void. */
+#define atomic_inc(ptr) ((void) __atomic_fetch_add(ptr, 1, __ATOMIC_SEQ_CST))
+#define atomic_dec(ptr) ((void) __atomic_fetch_sub(ptr, 1, __ATOMIC_SEQ_CST))
+#define atomic_add(ptr, n) ((void) __atomic_fetch_add(ptr, n, __ATOMIC_SEQ_CST))
+#define atomic_sub(ptr, n) ((void) __atomic_fetch_sub(ptr, n, __ATOMIC_SEQ_CST))
+#define atomic_and(ptr, n) ((void) __atomic_fetch_and(ptr, n, __ATOMIC_SEQ_CST))
+#define atomic_or(ptr, n) ((void) __atomic_fetch_or(ptr, n, __ATOMIC_SEQ_CST))
+
+#else /* __ATOMIC_RELAXED */
/*
* We use GCC builtin if it's available, as that can use mfence on
@@ -85,8 +205,6 @@
#endif /* _ARCH_PPC */
-#endif /* C11 atomics */
-
/*
* For (host) platforms we don't have explicit barrier definitions
* for, we use the gcc __sync_synchronize() primitive to generate a
@@ -98,42 +216,22 @@
#endif
#ifndef smp_wmb
-#ifdef __ATOMIC_RELEASE
-/* __atomic_thread_fence does not include a compiler barrier; instead,
- * the barrier is part of __atomic_load/__atomic_store's "volatile-like"
- * semantics. If smp_wmb() is a no-op, absence of the barrier means that
- * the compiler is free to reorder stores on each side of the barrier.
- * Add one here, and similarly in smp_rmb() and smp_read_barrier_depends().
- */
-#define smp_wmb() ({ barrier(); __atomic_thread_fence(__ATOMIC_RELEASE); barrier(); })
-#else
#define smp_wmb() __sync_synchronize()
#endif
-#endif
#ifndef smp_rmb
-#ifdef __ATOMIC_ACQUIRE
-#define smp_rmb() ({ barrier(); __atomic_thread_fence(__ATOMIC_ACQUIRE); barrier(); })
-#else
#define smp_rmb() __sync_synchronize()
#endif
-#endif
#ifndef smp_read_barrier_depends
-#ifdef __ATOMIC_CONSUME
-#define smp_read_barrier_depends() ({ barrier(); __atomic_thread_fence(__ATOMIC_CONSUME); barrier(); })
-#else
#define smp_read_barrier_depends() barrier()
#endif
-#endif
-#ifndef atomic_read
+/* These will only be atomic if the processor does the fetch or store
+ * in a single issue memory operation
+ */
#define atomic_read(ptr) (*(__typeof__(*ptr) volatile*) (ptr))
-#endif
-
-#ifndef atomic_set
#define atomic_set(ptr, i) ((*(__typeof__(*ptr) volatile*) (ptr)) = (i))
-#endif
/**
* atomic_rcu_read - reads a RCU-protected pointer to a local variable
@@ -146,30 +244,18 @@
* Inserts memory barriers on architectures that require them (currently only
* Alpha) and documents which pointers are protected by RCU.
*
- * Unless the __ATOMIC_CONSUME memory order is available, atomic_rcu_read also
- * includes a compiler barrier to ensure that value-speculative optimizations
- * (e.g. VSS: Value Speculation Scheduling) does not perform the data read
- * before the pointer read by speculating the value of the pointer. On new
- * enough compilers, atomic_load takes care of such concern about
- * dependency-breaking optimizations.
+ * atomic_rcu_read also includes a compiler barrier to ensure that
+ * value-speculative optimizations (e.g. VSS: Value Speculation
+ * Scheduling) does not perform the data read before the pointer read
+ * by speculating the value of the pointer.
*
* Should match atomic_rcu_set(), atomic_xchg(), atomic_cmpxchg().
*/
-#ifndef atomic_rcu_read
-#ifdef __ATOMIC_CONSUME
-#define atomic_rcu_read(ptr) ({ \
- typeof(*ptr) _val; \
- __atomic_load(ptr, &_val, __ATOMIC_CONSUME); \
- _val; \
-})
-#else
#define atomic_rcu_read(ptr) ({ \
typeof(*ptr) _val = atomic_read(ptr); \
smp_read_barrier_depends(); \
_val; \
})
-#endif
-#endif
/**
* atomic_rcu_set - assigns (publicizes) a pointer to a new data structure
@@ -182,19 +268,10 @@
*
* Should match atomic_rcu_read().
*/
-#ifndef atomic_rcu_set
-#ifdef __ATOMIC_RELEASE
-#define atomic_rcu_set(ptr, i) do { \
- typeof(*ptr) _val = (i); \
- __atomic_store(ptr, &_val, __ATOMIC_RELEASE); \
-} while(0)
-#else
#define atomic_rcu_set(ptr, i) do { \
smp_wmb(); \
atomic_set(ptr, i); \
} while (0)
-#endif
-#endif
/* These have the same semantics as Java volatile variables.
* See http://gee.cs.oswego.edu/dl/jmm/cookbook.html:
@@ -218,13 +295,11 @@
* (see docs/atomics.txt), and I'm not sure that __ATOMIC_ACQ_REL is enough.
* Just always use the barriers manually by the rules above.
*/
-#ifndef atomic_mb_read
#define atomic_mb_read(ptr) ({ \
typeof(*ptr) _val = atomic_read(ptr); \
smp_rmb(); \
_val; \
})
-#endif
#ifndef atomic_mb_set
#define atomic_mb_set(ptr, i) do { \
@@ -237,12 +312,6 @@
#ifndef atomic_xchg
#if defined(__clang__)
#define atomic_xchg(ptr, i) __sync_swap(ptr, i)
-#elif defined(__ATOMIC_SEQ_CST)
-#define atomic_xchg(ptr, i) ({ \
- typeof(*ptr) _new = (i), _old; \
- __atomic_exchange(ptr, &_new, &_old, __ATOMIC_SEQ_CST); \
- _old; \
-})
#else
/* __sync_lock_test_and_set() is documented to be an acquire barrier only. */
#define atomic_xchg(ptr, i) (smp_mb(), __sync_lock_test_and_set(ptr, i))
@@ -266,4 +335,5 @@
#define atomic_and(ptr, n) ((void) __sync_fetch_and_and(ptr, n))
#define atomic_or(ptr, n) ((void) __sync_fetch_and_or(ptr, n))
-#endif
+#endif /* __ATOMIC_RELAXED */
+#endif /* __QEMU_ATOMIC_H */
diff --git a/io/channel-socket.c b/io/channel-socket.c
index 22d2fd67d4..bf66a78235 100644
--- a/io/channel-socket.c
+++ b/io/channel-socket.c
@@ -258,7 +258,7 @@ int qio_channel_socket_dgram_sync(QIOChannelSocket *ioc,
int fd;
trace_qio_channel_socket_dgram_sync(ioc, localAddr, remoteAddr);
- fd = socket_dgram(localAddr, remoteAddr, errp);
+ fd = socket_dgram(remoteAddr, localAddr, errp);
if (fd < 0) {
trace_qio_channel_socket_dgram_fail(ioc);
return -1;
diff --git a/kvm-all.c b/kvm-all.c
index 9cc9ba6ea6..a65e73fb1d 100644
--- a/kvm-all.c
+++ b/kvm-all.c
@@ -2361,7 +2361,7 @@ int kvm_set_one_reg(CPUState *cs, uint64_t id, void *source)
reg.addr = (uintptr_t) source;
r = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
if (r) {
- trace_kvm_failed_reg_set(id, strerror(r));
+ trace_kvm_failed_reg_set(id, strerror(-r));
}
return r;
}
@@ -2375,7 +2375,7 @@ int kvm_get_one_reg(CPUState *cs, uint64_t id, void *target)
reg.addr = (uintptr_t) target;
r = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
if (r) {
- trace_kvm_failed_reg_get(id, strerror(r));
+ trace_kvm_failed_reg_get(id, strerror(-r));
}
return r;
}
diff --git a/migration/ram.c b/migration/ram.c
index 3cdfea4a5c..96c749face 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -609,7 +609,6 @@ static void migration_bitmap_sync_init(void)
iterations_prev = 0;
}
-/* Called with iothread lock held, to protect ram_list.dirty_memory[] */
static void migration_bitmap_sync(void)
{
RAMBlock *block;
@@ -1921,8 +1920,6 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
acct_clear();
}
- /* iothread lock needed for ram_list.dirty_memory[] */
- qemu_mutex_lock_iothread();
qemu_mutex_lock_ramlist();
rcu_read_lock();
bytes_transferred = 0;
@@ -1947,7 +1944,6 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
memory_global_dirty_log_start();
migration_bitmap_sync();
qemu_mutex_unlock_ramlist();
- qemu_mutex_unlock_iothread();
qemu_put_be64(f, ram_bytes_total() | RAM_SAVE_FLAG_MEM_SIZE);
diff --git a/nbd/server.c b/nbd/server.c
index 43135306b4..dc1d66fa47 100644
--- a/nbd/server.c
+++ b/nbd/server.c
@@ -417,12 +417,12 @@ static coroutine_fn int nbd_negotiate(NBDClientNewData *data)
memcpy(buf, "NBDMAGIC", 8);
if (client->exp) {
assert ((client->exp->nbdflags & ~65535) == 0);
- cpu_to_be64w((uint64_t*)(buf + 8), NBD_CLIENT_MAGIC);
- cpu_to_be64w((uint64_t*)(buf + 16), client->exp->size);
- cpu_to_be16w((uint16_t*)(buf + 26), client->exp->nbdflags | myflags);
+ stq_be_p(buf + 8, NBD_CLIENT_MAGIC);
+ stq_be_p(buf + 16, client->exp->size);
+ stw_be_p(buf + 26, client->exp->nbdflags | myflags);
} else {
- cpu_to_be64w((uint64_t*)(buf + 8), NBD_OPTS_MAGIC);
- cpu_to_be16w((uint16_t *)(buf + 16), NBD_FLAG_FIXED_NEWSTYLE);
+ stq_be_p(buf + 8, NBD_OPTS_MAGIC);
+ stw_be_p(buf + 16, NBD_FLAG_FIXED_NEWSTYLE);
}
if (client->exp) {
@@ -442,8 +442,8 @@ static coroutine_fn int nbd_negotiate(NBDClientNewData *data)
}
assert ((client->exp->nbdflags & ~65535) == 0);
- cpu_to_be64w((uint64_t*)(buf + 18), client->exp->size);
- cpu_to_be16w((uint16_t*)(buf + 26), client->exp->nbdflags | myflags);
+ stq_be_p(buf + 18, client->exp->size);
+ stw_be_p(buf + 26, client->exp->nbdflags | myflags);
if (nbd_negotiate_write(csock, buf + 18,
sizeof(buf) - 18) != sizeof(buf) - 18) {
LOG("write failed");
@@ -528,9 +528,9 @@ static ssize_t nbd_send_reply(int csock, struct nbd_reply *reply)
[ 4 .. 7] error (0 == no error)
[ 7 .. 15] handle
*/
- cpu_to_be32w((uint32_t*)buf, NBD_REPLY_MAGIC);
- cpu_to_be32w((uint32_t*)(buf + 4), reply->error);
- cpu_to_be64w((uint64_t*)(buf + 8), reply->handle);
+ stl_be_p(buf, NBD_REPLY_MAGIC);
+ stl_be_p(buf + 4, reply->error);
+ stq_be_p(buf + 8, reply->handle);
TRACE("Sending response to client");
diff --git a/qemu-char.c b/qemu-char.c
index 927c47e503..2b2c56b87f 100644
--- a/qemu-char.c
+++ b/qemu-char.c
@@ -1171,6 +1171,7 @@ typedef struct {
int connected;
guint timer_tag;
guint open_tag;
+ int slave_fd;
} PtyCharDriver;
static void pty_chr_update_read_handler_locked(CharDriverState *chr);
@@ -1347,6 +1348,7 @@ static void pty_chr_close(struct CharDriverState *chr)
qemu_mutex_lock(&chr->chr_write_lock);
pty_chr_state(chr, 0);
+ close(s->slave_fd);
object_unref(OBJECT(s->ioc));
if (s->timer_tag) {
g_source_remove(s->timer_tag);
@@ -1374,7 +1376,6 @@ static CharDriverState *qemu_chr_open_pty(const char *id,
return NULL;
}
- close(slave_fd);
qemu_set_nonblock(master_fd);
chr = qemu_chr_alloc(common, errp);
@@ -1399,6 +1400,7 @@ static CharDriverState *qemu_chr_open_pty(const char *id,
chr->explicit_be_open = true;
s->ioc = QIO_CHANNEL(qio_channel_file_new_fd(master_fd));
+ s->slave_fd = slave_fd;
s->timer_tag = 0;
return chr;
@@ -2856,6 +2858,10 @@ static void tcp_chr_update_read_handler(CharDriverState *chr)
{
TCPCharDriver *s = chr->opaque;
+ if (!s->connected) {
+ return;
+ }
+
remove_fd_in_watch(chr);
if (s->ioc) {
chr->fd_in_tag = io_add_watch_poll(s->ioc,
@@ -4380,7 +4386,7 @@ static CharDriverState *qmp_chardev_open_udp(const char *id,
QIOChannelSocket *sioc = qio_channel_socket_new();
if (qio_channel_socket_dgram_sync(sioc,
- udp->remote, udp->local,
+ udp->local, udp->remote,
errp) < 0) {
object_unref(OBJECT(sioc));
return NULL;
diff --git a/qemu-nbd.texi b/qemu-nbd.texi
index 46fd483eb8..0027841ecb 100644
--- a/qemu-nbd.texi
+++ b/qemu-nbd.texi
@@ -1,68 +1,78 @@
@example
@c man begin SYNOPSIS
-usage: qemu-nbd [OPTION]... @var{filename}
+@command{qemu-nbd} [OPTION]... @var{filename}
+
+@command{qemu-nbd} @option{-d} @var{dev}
@c man end
@end example
@c man begin DESCRIPTION
-Export QEMU disk image using NBD protocol.
+Export a QEMU disk image using the NBD protocol.
@c man end
@c man begin OPTIONS
+@var{filename} is a disk image filename.
+
+@var{dev} is an NBD device.
+
@table @option
-@item @var{filename}
- is a disk image filename
@item -p, --port=@var{port}
- port to listen on (default @samp{10809})
+The TCP port to listen on (default @samp{10809})
@item -o, --offset=@var{offset}
- offset into the image
+The offset into the image
@item -b, --bind=@var{iface}
- interface to bind to (default @samp{0.0.0.0})
+The interface to bind to (default @samp{0.0.0.0})
@item -k, --socket=@var{path}
- Use a unix socket with path @var{path}
-@item -f, --format=@var{format}
- Set image format as @var{format}
+Use a unix socket with path @var{path}
+@item -f, --format=@var{fmt}
+Force the use of the block driver for format @var{fmt} instead of
+auto-detecting
@item -r, --read-only
- export read-only
+Export the disk as read-only
@item -P, --partition=@var{num}
- only expose partition @var{num}
+Only expose partition @var{num}
@item -s, --snapshot
- use @var{filename} as an external snapshot, create a temporary
- file with backing_file=@var{filename}, redirect the write to
- the temporary one
+Use @var{filename} as an external snapshot, create a temporary
+file with backing_file=@var{filename}, redirect the write to
+the temporary one
@item -l, --load-snapshot=@var{snapshot_param}
- load an internal snapshot inside @var{filename} and export it
- as an read-only device, @var{snapshot_param} format is
- 'snapshot.id=[ID],snapshot.name=[NAME]' or '[ID_OR_NAME]'
+Load an internal snapshot inside @var{filename} and export it
+as an read-only device, @var{snapshot_param} format is
+'snapshot.id=[ID],snapshot.name=[NAME]' or '[ID_OR_NAME]'
@item -n, --nocache
@itemx --cache=@var{cache}
- set cache mode to be used with the file. See the documentation of
- the emulator's @code{-drive cache=...} option for allowed values.
+The cache mode to be used with the file. See the documentation of
+the emulator's @code{-drive cache=...} option for allowed values.
@item --aio=@var{aio}
- choose asynchronous I/O mode between @samp{threads} (the default)
- and @samp{native} (Linux only).
+Set the asynchronous I/O mode between @samp{threads} (the default)
+and @samp{native} (Linux only).
@item --discard=@var{discard}
- toggles whether @dfn{discard} (also known as @dfn{trim} or @dfn{unmap})
- requests are ignored or passed to the filesystem. The default is no
- (@samp{--discard=ignore}).
+Control whether @dfn{discard} (also known as @dfn{trim} or @dfn{unmap})
+requests are ignored or passed to the filesystem. @var{discard} is one of
+@samp{ignore} (or @samp{off}), @samp{unmap} (or @samp{on}). The default is
+@samp{ignore}.
+@item --detect-zeroes=@var{detect-zeroes}
+Control the automatic conversion of plain zero writes by the OS to
+driver-specific optimized zero write commands. @var{detect-zeroes} is one of
+@samp{off}, @samp{on} or @samp{unmap}. @samp{unmap}
+converts a zero write to an unmap operation and can only be used if
+@var{discard} is set to @samp{unmap}. The default is @samp{off}.
@item -c, --connect=@var{dev}
- connect @var{filename} to NBD device @var{dev}
+Connect @var{filename} to NBD device @var{dev}
@item -d, --disconnect
- disconnect the specified device
+Disconnect the device @var{dev}
@item -e, --shared=@var{num}
- device can be shared by @var{num} clients (default @samp{1})
-@item -f, --format=@var{fmt}
- force block driver for format @var{fmt} instead of auto-detecting
+Allow up to @var{num} clients to share the device (default @samp{1})
@item -t, --persistent
- don't exit on the last connection
+Don't exit on the last connection
@item -v, --verbose
- display extra debugging information
+Display extra debugging information
@item -h, --help
- display this help and exit
+Display this help and exit
@item -V, --version
- output version information and exit
+Display version information and exit
@end table
@c man end
@@ -79,7 +89,7 @@ warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
@c man end
@c man begin SEEALSO
-qemu-img(1)
+qemu(1), qemu-img(1)
@c man end
@end ignore
diff --git a/scripts/get_maintainer.pl b/scripts/get_maintainer.pl
index 7dacf32f43..8261bcb1ad 100755
--- a/scripts/get_maintainer.pl
+++ b/scripts/get_maintainer.pl
@@ -636,7 +636,7 @@ sub get_maintainers {
if ($email) {
if (! $interactive) {
- $email_git_fallback = 0 if @email_to > 0 || @list_to > 0 || $email_git || $email_git_blame;
+ $email_git_fallback = 0 if @email_to > 0 || $email_git || $email_git_blame;
if ($email_git_fallback) {
print STDERR "get_maintainer.pl: No maintainers found, printing recent contributors.\n";
print STDERR "get_maintainer.pl: Do not blindly cc: them on patches! Use common sense.\n";
diff --git a/scripts/kvm/kvm_stat b/scripts/kvm/kvm_stat
index d43e8f3e85..3cf1181750 100755
--- a/scripts/kvm/kvm_stat
+++ b/scripts/kvm/kvm_stat
@@ -22,6 +22,7 @@ import resource
import struct
import re
from collections import defaultdict
+from time import sleep
VMX_EXIT_REASONS = {
'EXCEPTION_NMI': 0,
@@ -778,7 +779,7 @@ def get_providers(options):
return providers
-def check_access():
+def check_access(options):
if not os.path.exists('/sys/kernel/debug'):
sys.stderr.write('Please enable CONFIG_DEBUG_FS in your kernel.')
sys.exit(1)
@@ -790,14 +791,24 @@ def check_access():
"Also ensure, that the kvm modules are loaded.\n")
sys.exit(1)
- if not os.path.exists(PATH_DEBUGFS_TRACING):
- sys.stderr.write("Please make {0} readable by the current user.\n"
- .format(PATH_DEBUGFS_TRACING))
- sys.exit(1)
+ if not os.path.exists(PATH_DEBUGFS_TRACING) and (options.tracepoints
+ or not options.debugfs):
+ sys.stderr.write("Please enable CONFIG_TRACING in your kernel "
+ "when using the option -t (default).\n"
+ "If it is enabled, make {0} readable by the "
+ "current user.\n")
+ if options.tracepoints:
+ sys.exit(1)
+
+ sys.stderr.write("Falling back to debugfs statistics!\n"
+ options.debugfs = True
+ sleep(5)
+
+ return options
def main():
- check_access()
options = get_options()
+ options = check_access(options)
providers = get_providers(options)
stats = Stats(providers, fields=options.fields)
diff --git a/target-i386/helper.c b/target-i386/helper.c
index 81568c8b2b..3802ed9359 100644
--- a/target-i386/helper.c
+++ b/target-i386/helper.c
@@ -861,7 +861,7 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
/* Bits 20-13 provide bits 39-32 of the address, bit 21 is reserved.
* Leave bits 20-13 in place for setting accessed/dirty bits below.
*/
- pte = pde | ((pde & 0x1fe000) << (32 - 13));
+ pte = pde | ((pde & 0x1fe000LL) << (32 - 13));
rsvd_mask = 0x200000;
goto do_check_protect_pse36;
}
@@ -1056,7 +1056,7 @@ hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
if (!(pde & PG_PRESENT_MASK))
return -1;
if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
- pte = pde | ((pde & 0x1fe000) << (32 - 13));
+ pte = pde | ((pde & 0x1fe000LL) << (32 - 13));
page_size = 4096 * 1024;
} else {
/* page directory entry */
diff --git a/target-i386/helper.h b/target-i386/helper.h
index ecfcfd1a97..3a25c3b392 100644
--- a/target-i386/helper.h
+++ b/target-i386/helper.h
@@ -44,10 +44,6 @@ DEF_HELPER_FLAGS_3(set_dr, TCG_CALL_NO_WG, void, env, int, tl)
DEF_HELPER_FLAGS_2(get_dr, TCG_CALL_NO_WG, tl, env, int)
DEF_HELPER_2(invlpg, void, env, tl)
-DEF_HELPER_4(enter_level, void, env, int, int, tl)
-#ifdef TARGET_X86_64
-DEF_HELPER_4(enter64_level, void, env, int, int, tl)
-#endif
DEF_HELPER_1(sysenter, void, env)
DEF_HELPER_2(sysexit, void, env, int)
#ifdef TARGET_X86_64
diff --git a/target-i386/seg_helper.c b/target-i386/seg_helper.c
index 4f269416a5..b5f3d72fe3 100644
--- a/target-i386/seg_helper.c
+++ b/target-i386/seg_helper.c
@@ -1379,80 +1379,6 @@ bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
return ret;
}
-void helper_enter_level(CPUX86State *env, int level, int data32,
- target_ulong t1)
-{
- target_ulong ssp;
- uint32_t esp_mask, esp, ebp;
-
- esp_mask = get_sp_mask(env->segs[R_SS].flags);
- ssp = env->segs[R_SS].base;
- ebp = env->regs[R_EBP];
- esp = env->regs[R_ESP];
- if (data32) {
- /* 32 bit */
- esp -= 4;
- while (--level) {
- esp -= 4;
- ebp -= 4;
- cpu_stl_data_ra(env, ssp + (esp & esp_mask),
- cpu_ldl_data_ra(env, ssp + (ebp & esp_mask),
- GETPC()),
- GETPC());
- }
- esp -= 4;
- cpu_stl_data_ra(env, ssp + (esp & esp_mask), t1, GETPC());
- } else {
- /* 16 bit */
- esp -= 2;
- while (--level) {
- esp -= 2;
- ebp -= 2;
- cpu_stw_data_ra(env, ssp + (esp & esp_mask),
- cpu_lduw_data_ra(env, ssp + (ebp & esp_mask),
- GETPC()),
- GETPC());
- }
- esp -= 2;
- cpu_stw_data_ra(env, ssp + (esp & esp_mask), t1, GETPC());
- }
-}
-
-#ifdef TARGET_X86_64
-void helper_enter64_level(CPUX86State *env, int level, int data64,
- target_ulong t1)
-{
- target_ulong esp, ebp;
-
- ebp = env->regs[R_EBP];
- esp = env->regs[R_ESP];
-
- if (data64) {
- /* 64 bit */
- esp -= 8;
- while (--level) {
- esp -= 8;
- ebp -= 8;
- cpu_stq_data_ra(env, esp, cpu_ldq_data_ra(env, ebp, GETPC()),
- GETPC());
- }
- esp -= 8;
- cpu_stq_data_ra(env, esp, t1, GETPC());
- } else {
- /* 16 bit */
- esp -= 2;
- while (--level) {
- esp -= 2;
- ebp -= 2;
- cpu_stw_data_ra(env, esp, cpu_lduw_data_ra(env, ebp, GETPC()),
- GETPC());
- }
- esp -= 2;
- cpu_stw_data_ra(env, esp, t1, GETPC());
- }
-}
-#endif
-
void helper_lldt(CPUX86State *env, int selector)
{
SegmentCache *dt;
diff --git a/target-i386/translate.c b/target-i386/translate.c
index f7ceadd2d1..c8e2799269 100644
--- a/target-i386/translate.c
+++ b/target-i386/translate.c
@@ -64,8 +64,9 @@ static TCGv cpu_A0;
static TCGv cpu_cc_dst, cpu_cc_src, cpu_cc_src2, cpu_cc_srcT;
static TCGv_i32 cpu_cc_op;
static TCGv cpu_regs[CPU_NB_REGS];
+static TCGv cpu_seg_base[6];
/* local temps */
-static TCGv cpu_T[2];
+static TCGv cpu_T0, cpu_T1;
/* local register indexes (only used inside old micro ops) */
static TCGv cpu_tmp0, cpu_tmp4;
static TCGv_ptr cpu_ptr0, cpu_ptr1;
@@ -304,6 +305,12 @@ static inline TCGMemOp mo_pushpop(DisasContext *s, TCGMemOp ot)
}
}
+/* Select the size of the stack pointer. */
+static inline TCGMemOp mo_stacksize(DisasContext *s)
+{
+ return CODE64(s) ? MO_64 : s->ss32 ? MO_32 : MO_16;
+}
+
/* Select only size 64 else 32. Used for SSE operand sizes. */
static inline TCGMemOp mo_64_32(TCGMemOp ot)
{
@@ -366,34 +373,12 @@ static inline void gen_op_mov_v_reg(TCGMemOp ot, TCGv t0, int reg)
}
}
-static inline void gen_op_movl_A0_reg(int reg)
-{
- tcg_gen_mov_tl(cpu_A0, cpu_regs[reg]);
-}
-
-static inline void gen_op_addl_A0_im(int32_t val)
-{
- tcg_gen_addi_tl(cpu_A0, cpu_A0, val);
-#ifdef TARGET_X86_64
- tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
-#endif
-}
-
-#ifdef TARGET_X86_64
-static inline void gen_op_addq_A0_im(int64_t val)
-{
- tcg_gen_addi_tl(cpu_A0, cpu_A0, val);
-}
-#endif
-
static void gen_add_A0_im(DisasContext *s, int val)
{
-#ifdef TARGET_X86_64
- if (CODE64(s))
- gen_op_addq_A0_im(val);
- else
-#endif
- gen_op_addl_A0_im(val);
+ tcg_gen_addi_tl(cpu_A0, cpu_A0, val);
+ if (!CODE64(s)) {
+ tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
+ }
}
static inline void gen_op_jmp_v(TCGv dest)
@@ -409,68 +394,10 @@ static inline void gen_op_add_reg_im(TCGMemOp size, int reg, int32_t val)
static inline void gen_op_add_reg_T0(TCGMemOp size, int reg)
{
- tcg_gen_add_tl(cpu_tmp0, cpu_regs[reg], cpu_T[0]);
+ tcg_gen_add_tl(cpu_tmp0, cpu_regs[reg], cpu_T0);
gen_op_mov_reg_v(size, reg, cpu_tmp0);
}
-static inline void gen_op_addl_A0_reg_sN(int shift, int reg)
-{
- tcg_gen_mov_tl(cpu_tmp0, cpu_regs[reg]);
- if (shift != 0)
- tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift);
- tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
- /* For x86_64, this sets the higher half of register to zero.
- For i386, this is equivalent to a nop. */
- tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
-}
-
-static inline void gen_op_movl_A0_seg(int reg)
-{
- tcg_gen_ld32u_tl(cpu_A0, cpu_env, offsetof(CPUX86State, segs[reg].base) + REG_L_OFFSET);
-}
-
-static inline void gen_op_addl_A0_seg(DisasContext *s, int reg)
-{
- tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, segs[reg].base));
-#ifdef TARGET_X86_64
- if (CODE64(s)) {
- tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
- tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
- } else {
- tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
- tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
- }
-#else
- tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
-#endif
-}
-
-#ifdef TARGET_X86_64
-static inline void gen_op_movq_A0_seg(int reg)
-{
- tcg_gen_ld_tl(cpu_A0, cpu_env, offsetof(CPUX86State, segs[reg].base));
-}
-
-static inline void gen_op_addq_A0_seg(int reg)
-{
- tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, segs[reg].base));
- tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
-}
-
-static inline void gen_op_movq_A0_reg(int reg)
-{
- tcg_gen_mov_tl(cpu_A0, cpu_regs[reg]);
-}
-
-static inline void gen_op_addq_A0_reg_sN(int shift, int reg)
-{
- tcg_gen_mov_tl(cpu_tmp0, cpu_regs[reg]);
- if (shift != 0)
- tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift);
- tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
-}
-#endif
-
static inline void gen_op_ld_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
{
tcg_gen_qemu_ld_tl(t0, a0, s->mem_index, idx | MO_LE);
@@ -484,9 +411,9 @@ static inline void gen_op_st_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
static inline void gen_op_st_rm_T0_A0(DisasContext *s, int idx, int d)
{
if (d == OR_TMP0) {
- gen_op_st_v(s, idx, cpu_T[0], cpu_A0);
+ gen_op_st_v(s, idx, cpu_T0, cpu_A0);
} else {
- gen_op_mov_reg_v(idx, d, cpu_T[0]);
+ gen_op_mov_reg_v(idx, d, cpu_T0);
}
}
@@ -496,74 +423,77 @@ static inline void gen_jmp_im(target_ulong pc)
gen_op_jmp_v(cpu_tmp0);
}
-static inline void gen_string_movl_A0_ESI(DisasContext *s)
+/* Compute SEG:REG into A0. SEG is selected from the override segment
+ (OVR_SEG) and the default segment (DEF_SEG). OVR_SEG may be -1 to
+ indicate no override. */
+static void gen_lea_v_seg(DisasContext *s, TCGMemOp aflag, TCGv a0,
+ int def_seg, int ovr_seg)
{
- int override;
-
- override = s->override;
- switch (s->aflag) {
+ switch (aflag) {
#ifdef TARGET_X86_64
case MO_64:
- if (override >= 0) {
- gen_op_movq_A0_seg(override);
- gen_op_addq_A0_reg_sN(0, R_ESI);
- } else {
- gen_op_movq_A0_reg(R_ESI);
+ if (ovr_seg < 0) {
+ tcg_gen_mov_tl(cpu_A0, a0);
+ return;
}
break;
#endif
case MO_32:
/* 32 bit address */
- if (s->addseg && override < 0)
- override = R_DS;
- if (override >= 0) {
- gen_op_movl_A0_seg(override);
- gen_op_addl_A0_reg_sN(0, R_ESI);
- } else {
- gen_op_movl_A0_reg(R_ESI);
+ if (ovr_seg < 0) {
+ if (s->addseg) {
+ ovr_seg = def_seg;
+ } else {
+ tcg_gen_ext32u_tl(cpu_A0, a0);
+ return;
+ }
}
break;
case MO_16:
- /* 16 address, always override */
- if (override < 0)
- override = R_DS;
- tcg_gen_ext16u_tl(cpu_A0, cpu_regs[R_ESI]);
- gen_op_addl_A0_seg(s, override);
+ /* 16 bit address */
+ if (ovr_seg < 0) {
+ ovr_seg = def_seg;
+ }
+ tcg_gen_ext16u_tl(cpu_A0, a0);
+ /* ADDSEG will only be false in 16-bit mode for LEA. */
+ if (!s->addseg) {
+ return;
+ }
+ a0 = cpu_A0;
break;
default:
tcg_abort();
}
-}
-static inline void gen_string_movl_A0_EDI(DisasContext *s)
-{
- switch (s->aflag) {
-#ifdef TARGET_X86_64
- case MO_64:
- gen_op_movq_A0_reg(R_EDI);
- break;
-#endif
- case MO_32:
- if (s->addseg) {
- gen_op_movl_A0_seg(R_ES);
- gen_op_addl_A0_reg_sN(0, R_EDI);
+ if (ovr_seg >= 0) {
+ TCGv seg = cpu_seg_base[ovr_seg];
+
+ if (aflag == MO_64) {
+ tcg_gen_add_tl(cpu_A0, a0, seg);
+ } else if (CODE64(s)) {
+ tcg_gen_ext32u_tl(cpu_A0, a0);
+ tcg_gen_add_tl(cpu_A0, cpu_A0, seg);
} else {
- gen_op_movl_A0_reg(R_EDI);
+ tcg_gen_add_tl(cpu_A0, a0, seg);
+ tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
}
- break;
- case MO_16:
- tcg_gen_ext16u_tl(cpu_A0, cpu_regs[R_EDI]);
- gen_op_addl_A0_seg(s, R_ES);
- break;
- default:
- tcg_abort();
}
}
+static inline void gen_string_movl_A0_ESI(DisasContext *s)
+{
+ gen_lea_v_seg(s, s->aflag, cpu_regs[R_ESI], R_DS, s->override);
+}
+
+static inline void gen_string_movl_A0_EDI(DisasContext *s)
+{
+ gen_lea_v_seg(s, s->aflag, cpu_regs[R_EDI], R_ES, -1);
+}
+
static inline void gen_op_movl_T0_Dshift(TCGMemOp ot)
{
- tcg_gen_ld32s_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, df));
- tcg_gen_shli_tl(cpu_T[0], cpu_T[0], ot);
+ tcg_gen_ld32s_tl(cpu_T0, cpu_env, offsetof(CPUX86State, df));
+ tcg_gen_shli_tl(cpu_T0, cpu_T0, ot);
};
static TCGv gen_ext_tl(TCGv dst, TCGv src, TCGMemOp size, bool sign)
@@ -661,7 +591,7 @@ static void gen_check_io(DisasContext *s, TCGMemOp ot, target_ulong cur_eip,
target_ulong next_eip;
if (s->pe && (s->cpl > s->iopl || s->vm86)) {
- tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
+ tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
switch (ot) {
case MO_8:
gen_helper_check_iob(cpu_env, cpu_tmp2_i32);
@@ -681,7 +611,7 @@ static void gen_check_io(DisasContext *s, TCGMemOp ot, target_ulong cur_eip,
gen_jmp_im(cur_eip);
svm_flags |= (1 << (4 + ot));
next_eip = s->pc - s->cs_base;
- tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
+ tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
gen_helper_svm_check_io(cpu_env, cpu_tmp2_i32,
tcg_const_i32(svm_flags),
tcg_const_i32(next_eip - cur_eip));
@@ -691,9 +621,9 @@ static void gen_check_io(DisasContext *s, TCGMemOp ot, target_ulong cur_eip,
static inline void gen_movs(DisasContext *s, TCGMemOp ot)
{
gen_string_movl_A0_ESI(s);
- gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
+ gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
gen_string_movl_A0_EDI(s);
- gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
+ gen_op_st_v(s, ot, cpu_T0, cpu_A0);
gen_op_movl_T0_Dshift(ot);
gen_op_add_reg_T0(s->aflag, R_ESI);
gen_op_add_reg_T0(s->aflag, R_EDI);
@@ -701,31 +631,31 @@ static inline void gen_movs(DisasContext *s, TCGMemOp ot)
static void gen_op_update1_cc(void)
{
- tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
}
static void gen_op_update2_cc(void)
{
- tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
- tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
+ tcg_gen_mov_tl(cpu_cc_src, cpu_T1);
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
}
static void gen_op_update3_cc(TCGv reg)
{
tcg_gen_mov_tl(cpu_cc_src2, reg);
- tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
- tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
+ tcg_gen_mov_tl(cpu_cc_src, cpu_T1);
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
}
static inline void gen_op_testl_T0_T1_cc(void)
{
- tcg_gen_and_tl(cpu_cc_dst, cpu_T[0], cpu_T[1]);
+ tcg_gen_and_tl(cpu_cc_dst, cpu_T0, cpu_T1);
}
static void gen_op_update_neg_cc(void)
{
- tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
- tcg_gen_neg_tl(cpu_cc_src, cpu_T[0]);
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
+ tcg_gen_neg_tl(cpu_cc_src, cpu_T0);
tcg_gen_movi_tl(cpu_cc_srcT, 0);
}
@@ -1067,11 +997,11 @@ static inline void gen_compute_eflags_c(DisasContext *s, TCGv reg)
value 'b'. In the fast case, T0 is guaranted not to be used. */
static inline void gen_jcc1_noeob(DisasContext *s, int b, TCGLabel *l1)
{
- CCPrepare cc = gen_prepare_cc(s, b, cpu_T[0]);
+ CCPrepare cc = gen_prepare_cc(s, b, cpu_T0);
if (cc.mask != -1) {
- tcg_gen_andi_tl(cpu_T[0], cc.reg, cc.mask);
- cc.reg = cpu_T[0];
+ tcg_gen_andi_tl(cpu_T0, cc.reg, cc.mask);
+ cc.reg = cpu_T0;
}
if (cc.use_reg2) {
tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
@@ -1085,12 +1015,12 @@ static inline void gen_jcc1_noeob(DisasContext *s, int b, TCGLabel *l1)
A translation block must end soon. */
static inline void gen_jcc1(DisasContext *s, int b, TCGLabel *l1)
{
- CCPrepare cc = gen_prepare_cc(s, b, cpu_T[0]);
+ CCPrepare cc = gen_prepare_cc(s, b, cpu_T0);
gen_update_cc_op(s);
if (cc.mask != -1) {
- tcg_gen_andi_tl(cpu_T[0], cc.reg, cc.mask);
- cc.reg = cpu_T[0];
+ tcg_gen_andi_tl(cpu_T0, cc.reg, cc.mask);
+ cc.reg = cpu_T0;
}
set_cc_op(s, CC_OP_DYNAMIC);
if (cc.use_reg2) {
@@ -1115,9 +1045,9 @@ static TCGLabel *gen_jz_ecx_string(DisasContext *s, target_ulong next_eip)
static inline void gen_stos(DisasContext *s, TCGMemOp ot)
{
- gen_op_mov_v_reg(MO_32, cpu_T[0], R_EAX);
+ gen_op_mov_v_reg(MO_32, cpu_T0, R_EAX);
gen_string_movl_A0_EDI(s);
- gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
+ gen_op_st_v(s, ot, cpu_T0, cpu_A0);
gen_op_movl_T0_Dshift(ot);
gen_op_add_reg_T0(s->aflag, R_EDI);
}
@@ -1125,8 +1055,8 @@ static inline void gen_stos(DisasContext *s, TCGMemOp ot)
static inline void gen_lods(DisasContext *s, TCGMemOp ot)
{
gen_string_movl_A0_ESI(s);
- gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
- gen_op_mov_reg_v(ot, R_EAX, cpu_T[0]);
+ gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
+ gen_op_mov_reg_v(ot, R_EAX, cpu_T0);
gen_op_movl_T0_Dshift(ot);
gen_op_add_reg_T0(s->aflag, R_ESI);
}
@@ -1134,7 +1064,7 @@ static inline void gen_lods(DisasContext *s, TCGMemOp ot)
static inline void gen_scas(DisasContext *s, TCGMemOp ot)
{
gen_string_movl_A0_EDI(s);
- gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
+ gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
gen_op(s, OP_CMPL, ot, R_EAX);
gen_op_movl_T0_Dshift(ot);
gen_op_add_reg_T0(s->aflag, R_EDI);
@@ -1143,7 +1073,7 @@ static inline void gen_scas(DisasContext *s, TCGMemOp ot)
static inline void gen_cmps(DisasContext *s, TCGMemOp ot)
{
gen_string_movl_A0_EDI(s);
- gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
+ gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
gen_string_movl_A0_ESI(s);
gen_op(s, OP_CMPL, ot, OR_TMP0);
gen_op_movl_T0_Dshift(ot);
@@ -1172,12 +1102,12 @@ static inline void gen_ins(DisasContext *s, TCGMemOp ot)
gen_string_movl_A0_EDI(s);
/* Note: we must do this dummy write first to be restartable in
case of page fault. */
- tcg_gen_movi_tl(cpu_T[0], 0);
- gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
+ tcg_gen_movi_tl(cpu_T0, 0);
+ gen_op_st_v(s, ot, cpu_T0, cpu_A0);
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_EDX]);
tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
- gen_helper_in_func(ot, cpu_T[0], cpu_tmp2_i32);
- gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
+ gen_helper_in_func(ot, cpu_T0, cpu_tmp2_i32);
+ gen_op_st_v(s, ot, cpu_T0, cpu_A0);
gen_op_movl_T0_Dshift(ot);
gen_op_add_reg_T0(s->aflag, R_EDI);
gen_bpt_io(s, cpu_tmp2_i32, ot);
@@ -1192,11 +1122,11 @@ static inline void gen_outs(DisasContext *s, TCGMemOp ot)
gen_io_start();
}
gen_string_movl_A0_ESI(s);
- gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
+ gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_EDX]);
tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
- tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[0]);
+ tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T0);
gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
gen_op_movl_T0_Dshift(ot);
gen_op_add_reg_T0(s->aflag, R_ESI);
@@ -1310,63 +1240,63 @@ static void gen_helper_fp_arith_STN_ST0(int op, int opreg)
static void gen_op(DisasContext *s1, int op, TCGMemOp ot, int d)
{
if (d != OR_TMP0) {
- gen_op_mov_v_reg(ot, cpu_T[0], d);
+ gen_op_mov_v_reg(ot, cpu_T0, d);
} else {
- gen_op_ld_v(s1, ot, cpu_T[0], cpu_A0);
+ gen_op_ld_v(s1, ot, cpu_T0, cpu_A0);
}
switch(op) {
case OP_ADCL:
gen_compute_eflags_c(s1, cpu_tmp4);
- tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
- tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_tmp4);
+ tcg_gen_add_tl(cpu_T0, cpu_T0, cpu_T1);
+ tcg_gen_add_tl(cpu_T0, cpu_T0, cpu_tmp4);
gen_op_st_rm_T0_A0(s1, ot, d);
gen_op_update3_cc(cpu_tmp4);
set_cc_op(s1, CC_OP_ADCB + ot);
break;
case OP_SBBL:
gen_compute_eflags_c(s1, cpu_tmp4);
- tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
- tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_tmp4);
+ tcg_gen_sub_tl(cpu_T0, cpu_T0, cpu_T1);
+ tcg_gen_sub_tl(cpu_T0, cpu_T0, cpu_tmp4);
gen_op_st_rm_T0_A0(s1, ot, d);
gen_op_update3_cc(cpu_tmp4);
set_cc_op(s1, CC_OP_SBBB + ot);
break;
case OP_ADDL:
- tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
+ tcg_gen_add_tl(cpu_T0, cpu_T0, cpu_T1);
gen_op_st_rm_T0_A0(s1, ot, d);
gen_op_update2_cc();
set_cc_op(s1, CC_OP_ADDB + ot);
break;
case OP_SUBL:
- tcg_gen_mov_tl(cpu_cc_srcT, cpu_T[0]);
- tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
+ tcg_gen_mov_tl(cpu_cc_srcT, cpu_T0);
+ tcg_gen_sub_tl(cpu_T0, cpu_T0, cpu_T1);
gen_op_st_rm_T0_A0(s1, ot, d);
gen_op_update2_cc();
set_cc_op(s1, CC_OP_SUBB + ot);
break;
default:
case OP_ANDL:
- tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
+ tcg_gen_and_tl(cpu_T0, cpu_T0, cpu_T1);
gen_op_st_rm_T0_A0(s1, ot, d);
gen_op_update1_cc();
set_cc_op(s1, CC_OP_LOGICB + ot);
break;
case OP_ORL:
- tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
+ tcg_gen_or_tl(cpu_T0, cpu_T0, cpu_T1);
gen_op_st_rm_T0_A0(s1, ot, d);
gen_op_update1_cc();
set_cc_op(s1, CC_OP_LOGICB + ot);
break;
case OP_XORL:
- tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
+ tcg_gen_xor_tl(cpu_T0, cpu_T0, cpu_T1);
gen_op_st_rm_T0_A0(s1, ot, d);
gen_op_update1_cc();
set_cc_op(s1, CC_OP_LOGICB + ot);
break;
case OP_CMPL:
- tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
- tcg_gen_mov_tl(cpu_cc_srcT, cpu_T[0]);
- tcg_gen_sub_tl(cpu_cc_dst, cpu_T[0], cpu_T[1]);
+ tcg_gen_mov_tl(cpu_cc_src, cpu_T1);
+ tcg_gen_mov_tl(cpu_cc_srcT, cpu_T0);
+ tcg_gen_sub_tl(cpu_cc_dst, cpu_T0, cpu_T1);
set_cc_op(s1, CC_OP_SUBB + ot);
break;
}
@@ -1376,20 +1306,20 @@ static void gen_op(DisasContext *s1, int op, TCGMemOp ot, int d)
static void gen_inc(DisasContext *s1, TCGMemOp ot, int d, int c)
{
if (d != OR_TMP0) {
- gen_op_mov_v_reg(ot, cpu_T[0], d);
+ gen_op_mov_v_reg(ot, cpu_T0, d);
} else {
- gen_op_ld_v(s1, ot, cpu_T[0], cpu_A0);
+ gen_op_ld_v(s1, ot, cpu_T0, cpu_A0);
}
gen_compute_eflags_c(s1, cpu_cc_src);
if (c > 0) {
- tcg_gen_addi_tl(cpu_T[0], cpu_T[0], 1);
+ tcg_gen_addi_tl(cpu_T0, cpu_T0, 1);
set_cc_op(s1, CC_OP_INCB + ot);
} else {
- tcg_gen_addi_tl(cpu_T[0], cpu_T[0], -1);
+ tcg_gen_addi_tl(cpu_T0, cpu_T0, -1);
set_cc_op(s1, CC_OP_DECB + ot);
}
gen_op_st_rm_T0_A0(s1, ot, d);
- tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
}
static void gen_shift_flags(DisasContext *s, TCGMemOp ot, TCGv result,
@@ -1444,33 +1374,33 @@ static void gen_shift_rm_T1(DisasContext *s, TCGMemOp ot, int op1,
/* load */
if (op1 == OR_TMP0) {
- gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
+ gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
} else {
- gen_op_mov_v_reg(ot, cpu_T[0], op1);
+ gen_op_mov_v_reg(ot, cpu_T0, op1);
}
- tcg_gen_andi_tl(cpu_T[1], cpu_T[1], mask);
- tcg_gen_subi_tl(cpu_tmp0, cpu_T[1], 1);
+ tcg_gen_andi_tl(cpu_T1, cpu_T1, mask);
+ tcg_gen_subi_tl(cpu_tmp0, cpu_T1, 1);
if (is_right) {
if (is_arith) {
- gen_exts(ot, cpu_T[0]);
- tcg_gen_sar_tl(cpu_tmp0, cpu_T[0], cpu_tmp0);
- tcg_gen_sar_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
+ gen_exts(ot, cpu_T0);
+ tcg_gen_sar_tl(cpu_tmp0, cpu_T0, cpu_tmp0);
+ tcg_gen_sar_tl(cpu_T0, cpu_T0, cpu_T1);
} else {
- gen_extu(ot, cpu_T[0]);
- tcg_gen_shr_tl(cpu_tmp0, cpu_T[0], cpu_tmp0);
- tcg_gen_shr_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
+ gen_extu(ot, cpu_T0);
+ tcg_gen_shr_tl(cpu_tmp0, cpu_T0, cpu_tmp0);
+ tcg_gen_shr_tl(cpu_T0, cpu_T0, cpu_T1);
}
} else {
- tcg_gen_shl_tl(cpu_tmp0, cpu_T[0], cpu_tmp0);
- tcg_gen_shl_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
+ tcg_gen_shl_tl(cpu_tmp0, cpu_T0, cpu_tmp0);
+ tcg_gen_shl_tl(cpu_T0, cpu_T0, cpu_T1);
}
/* store */
gen_op_st_rm_T0_A0(s, ot, op1);
- gen_shift_flags(s, ot, cpu_T[0], cpu_tmp0, cpu_T[1], is_right);
+ gen_shift_flags(s, ot, cpu_T0, cpu_tmp0, cpu_T1, is_right);
}
static void gen_shift_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2,
@@ -1480,25 +1410,25 @@ static void gen_shift_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2,
/* load */
if (op1 == OR_TMP0)
- gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
+ gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
else
- gen_op_mov_v_reg(ot, cpu_T[0], op1);
+ gen_op_mov_v_reg(ot, cpu_T0, op1);
op2 &= mask;
if (op2 != 0) {
if (is_right) {
if (is_arith) {
- gen_exts(ot, cpu_T[0]);
- tcg_gen_sari_tl(cpu_tmp4, cpu_T[0], op2 - 1);
- tcg_gen_sari_tl(cpu_T[0], cpu_T[0], op2);
+ gen_exts(ot, cpu_T0);
+ tcg_gen_sari_tl(cpu_tmp4, cpu_T0, op2 - 1);
+ tcg_gen_sari_tl(cpu_T0, cpu_T0, op2);
} else {
- gen_extu(ot, cpu_T[0]);
- tcg_gen_shri_tl(cpu_tmp4, cpu_T[0], op2 - 1);
- tcg_gen_shri_tl(cpu_T[0], cpu_T[0], op2);
+ gen_extu(ot, cpu_T0);
+ tcg_gen_shri_tl(cpu_tmp4, cpu_T0, op2 - 1);
+ tcg_gen_shri_tl(cpu_T0, cpu_T0, op2);
}
} else {
- tcg_gen_shli_tl(cpu_tmp4, cpu_T[0], op2 - 1);
- tcg_gen_shli_tl(cpu_T[0], cpu_T[0], op2);
+ tcg_gen_shli_tl(cpu_tmp4, cpu_T0, op2 - 1);
+ tcg_gen_shli_tl(cpu_T0, cpu_T0, op2);
}
}
@@ -1508,7 +1438,7 @@ static void gen_shift_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2,
/* update eflags if non zero shift */
if (op2 != 0) {
tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4);
- tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
set_cc_op(s, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot);
}
}
@@ -1520,41 +1450,41 @@ static void gen_rot_rm_T1(DisasContext *s, TCGMemOp ot, int op1, int is_right)
/* load */
if (op1 == OR_TMP0) {
- gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
+ gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
} else {
- gen_op_mov_v_reg(ot, cpu_T[0], op1);
+ gen_op_mov_v_reg(ot, cpu_T0, op1);
}
- tcg_gen_andi_tl(cpu_T[1], cpu_T[1], mask);
+ tcg_gen_andi_tl(cpu_T1, cpu_T1, mask);
switch (ot) {
case MO_8:
/* Replicate the 8-bit input so that a 32-bit rotate works. */
- tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]);
- tcg_gen_muli_tl(cpu_T[0], cpu_T[0], 0x01010101);
+ tcg_gen_ext8u_tl(cpu_T0, cpu_T0);
+ tcg_gen_muli_tl(cpu_T0, cpu_T0, 0x01010101);
goto do_long;
case MO_16:
/* Replicate the 16-bit input so that a 32-bit rotate works. */
- tcg_gen_deposit_tl(cpu_T[0], cpu_T[0], cpu_T[0], 16, 16);
+ tcg_gen_deposit_tl(cpu_T0, cpu_T0, cpu_T0, 16, 16);
goto do_long;
do_long:
#ifdef TARGET_X86_64
case MO_32:
- tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
- tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
+ tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
+ tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T1);
if (is_right) {
tcg_gen_rotr_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
} else {
tcg_gen_rotl_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
}
- tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
+ tcg_gen_extu_i32_tl(cpu_T0, cpu_tmp2_i32);
break;
#endif
default:
if (is_right) {
- tcg_gen_rotr_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
+ tcg_gen_rotr_tl(cpu_T0, cpu_T0, cpu_T1);
} else {
- tcg_gen_rotl_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
+ tcg_gen_rotl_tl(cpu_T0, cpu_T0, cpu_T1);
}
break;
}
@@ -1570,12 +1500,12 @@ static void gen_rot_rm_T1(DisasContext *s, TCGMemOp ot, int op1, int is_right)
since we've computed the flags into CC_SRC, these variables are
currently dead. */
if (is_right) {
- tcg_gen_shri_tl(cpu_cc_src2, cpu_T[0], mask - 1);
- tcg_gen_shri_tl(cpu_cc_dst, cpu_T[0], mask);
+ tcg_gen_shri_tl(cpu_cc_src2, cpu_T0, mask - 1);
+ tcg_gen_shri_tl(cpu_cc_dst, cpu_T0, mask);
tcg_gen_andi_tl(cpu_cc_dst, cpu_cc_dst, 1);
} else {
- tcg_gen_shri_tl(cpu_cc_src2, cpu_T[0], mask);
- tcg_gen_andi_tl(cpu_cc_dst, cpu_T[0], 1);
+ tcg_gen_shri_tl(cpu_cc_src2, cpu_T0, mask);
+ tcg_gen_andi_tl(cpu_cc_dst, cpu_T0, 1);
}
tcg_gen_andi_tl(cpu_cc_src2, cpu_cc_src2, 1);
tcg_gen_xor_tl(cpu_cc_src2, cpu_cc_src2, cpu_cc_dst);
@@ -1586,7 +1516,7 @@ static void gen_rot_rm_T1(DisasContext *s, TCGMemOp ot, int op1, int is_right)
exactly as we computed above. */
t0 = tcg_const_i32(0);
t1 = tcg_temp_new_i32();
- tcg_gen_trunc_tl_i32(t1, cpu_T[1]);
+ tcg_gen_trunc_tl_i32(t1, cpu_T1);
tcg_gen_movi_i32(cpu_tmp2_i32, CC_OP_ADCOX);
tcg_gen_movi_i32(cpu_tmp3_i32, CC_OP_EFLAGS);
tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, t1, t0,
@@ -1606,9 +1536,9 @@ static void gen_rot_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2,
/* load */
if (op1 == OR_TMP0) {
- gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
+ gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
} else {
- gen_op_mov_v_reg(ot, cpu_T[0], op1);
+ gen_op_mov_v_reg(ot, cpu_T0, op1);
}
op2 &= mask;
@@ -1616,20 +1546,20 @@ static void gen_rot_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2,
switch (ot) {
#ifdef TARGET_X86_64
case MO_32:
- tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
+ tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
if (is_right) {
tcg_gen_rotri_i32(cpu_tmp2_i32, cpu_tmp2_i32, op2);
} else {
tcg_gen_rotli_i32(cpu_tmp2_i32, cpu_tmp2_i32, op2);
}
- tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
+ tcg_gen_extu_i32_tl(cpu_T0, cpu_tmp2_i32);
break;
#endif
default:
if (is_right) {
- tcg_gen_rotri_tl(cpu_T[0], cpu_T[0], op2);
+ tcg_gen_rotri_tl(cpu_T0, cpu_T0, op2);
} else {
- tcg_gen_rotli_tl(cpu_T[0], cpu_T[0], op2);
+ tcg_gen_rotli_tl(cpu_T0, cpu_T0, op2);
}
break;
case MO_8:
@@ -1642,10 +1572,10 @@ static void gen_rot_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2,
if (is_right) {
shift = mask + 1 - shift;
}
- gen_extu(ot, cpu_T[0]);
- tcg_gen_shli_tl(cpu_tmp0, cpu_T[0], shift);
- tcg_gen_shri_tl(cpu_T[0], cpu_T[0], mask + 1 - shift);
- tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
+ gen_extu(ot, cpu_T0);
+ tcg_gen_shli_tl(cpu_tmp0, cpu_T0, shift);
+ tcg_gen_shri_tl(cpu_T0, cpu_T0, mask + 1 - shift);
+ tcg_gen_or_tl(cpu_T0, cpu_T0, cpu_tmp0);
break;
}
}
@@ -1662,12 +1592,12 @@ static void gen_rot_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2,
since we've computed the flags into CC_SRC, these variables are
currently dead. */
if (is_right) {
- tcg_gen_shri_tl(cpu_cc_src2, cpu_T[0], mask - 1);
- tcg_gen_shri_tl(cpu_cc_dst, cpu_T[0], mask);
+ tcg_gen_shri_tl(cpu_cc_src2, cpu_T0, mask - 1);
+ tcg_gen_shri_tl(cpu_cc_dst, cpu_T0, mask);
tcg_gen_andi_tl(cpu_cc_dst, cpu_cc_dst, 1);
} else {
- tcg_gen_shri_tl(cpu_cc_src2, cpu_T[0], mask);
- tcg_gen_andi_tl(cpu_cc_dst, cpu_T[0], 1);
+ tcg_gen_shri_tl(cpu_cc_src2, cpu_T0, mask);
+ tcg_gen_andi_tl(cpu_cc_dst, cpu_T0, 1);
}
tcg_gen_andi_tl(cpu_cc_src2, cpu_cc_src2, 1);
tcg_gen_xor_tl(cpu_cc_src2, cpu_cc_src2, cpu_cc_dst);
@@ -1684,24 +1614,24 @@ static void gen_rotc_rm_T1(DisasContext *s, TCGMemOp ot, int op1,
/* load */
if (op1 == OR_TMP0)
- gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
+ gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
else
- gen_op_mov_v_reg(ot, cpu_T[0], op1);
+ gen_op_mov_v_reg(ot, cpu_T0, op1);
if (is_right) {
switch (ot) {
case MO_8:
- gen_helper_rcrb(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
+ gen_helper_rcrb(cpu_T0, cpu_env, cpu_T0, cpu_T1);
break;
case MO_16:
- gen_helper_rcrw(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
+ gen_helper_rcrw(cpu_T0, cpu_env, cpu_T0, cpu_T1);
break;
case MO_32:
- gen_helper_rcrl(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
+ gen_helper_rcrl(cpu_T0, cpu_env, cpu_T0, cpu_T1);
break;
#ifdef TARGET_X86_64
case MO_64:
- gen_helper_rcrq(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
+ gen_helper_rcrq(cpu_T0, cpu_env, cpu_T0, cpu_T1);
break;
#endif
default:
@@ -1710,17 +1640,17 @@ static void gen_rotc_rm_T1(DisasContext *s, TCGMemOp ot, int op1,
} else {
switch (ot) {
case MO_8:
- gen_helper_rclb(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
+ gen_helper_rclb(cpu_T0, cpu_env, cpu_T0, cpu_T1);
break;
case MO_16:
- gen_helper_rclw(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
+ gen_helper_rclw(cpu_T0, cpu_env, cpu_T0, cpu_T1);
break;
case MO_32:
- gen_helper_rcll(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
+ gen_helper_rcll(cpu_T0, cpu_env, cpu_T0, cpu_T1);
break;
#ifdef TARGET_X86_64
case MO_64:
- gen_helper_rclq(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
+ gen_helper_rclq(cpu_T0, cpu_env, cpu_T0, cpu_T1);
break;
#endif
default:
@@ -1740,9 +1670,9 @@ static void gen_shiftd_rm_T1(DisasContext *s, TCGMemOp ot, int op1,
/* load */
if (op1 == OR_TMP0) {
- gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
+ gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
} else {
- gen_op_mov_v_reg(ot, cpu_T[0], op1);
+ gen_op_mov_v_reg(ot, cpu_T0, op1);
}
count = tcg_temp_new();
@@ -1754,11 +1684,11 @@ static void gen_shiftd_rm_T1(DisasContext *s, TCGMemOp ot, int op1,
This means "shrdw C, B, A" shifts A:B:A >> C. Build the B:A
portion by constructing it as a 32-bit value. */
if (is_right) {
- tcg_gen_deposit_tl(cpu_tmp0, cpu_T[0], cpu_T[1], 16, 16);
- tcg_gen_mov_tl(cpu_T[1], cpu_T[0]);
- tcg_gen_mov_tl(cpu_T[0], cpu_tmp0);
+ tcg_gen_deposit_tl(cpu_tmp0, cpu_T0, cpu_T1, 16, 16);
+ tcg_gen_mov_tl(cpu_T1, cpu_T0);
+ tcg_gen_mov_tl(cpu_T0, cpu_tmp0);
} else {
- tcg_gen_deposit_tl(cpu_T[1], cpu_T[0], cpu_T[1], 16, 16);
+ tcg_gen_deposit_tl(cpu_T1, cpu_T0, cpu_T1, 16, 16);
}
/* FALLTHRU */
#ifdef TARGET_X86_64
@@ -1766,57 +1696,57 @@ static void gen_shiftd_rm_T1(DisasContext *s, TCGMemOp ot, int op1,
/* Concatenate the two 32-bit values and use a 64-bit shift. */
tcg_gen_subi_tl(cpu_tmp0, count, 1);
if (is_right) {
- tcg_gen_concat_tl_i64(cpu_T[0], cpu_T[0], cpu_T[1]);
- tcg_gen_shr_i64(cpu_tmp0, cpu_T[0], cpu_tmp0);
- tcg_gen_shr_i64(cpu_T[0], cpu_T[0], count);
+ tcg_gen_concat_tl_i64(cpu_T0, cpu_T0, cpu_T1);
+ tcg_gen_shr_i64(cpu_tmp0, cpu_T0, cpu_tmp0);
+ tcg_gen_shr_i64(cpu_T0, cpu_T0, count);
} else {
- tcg_gen_concat_tl_i64(cpu_T[0], cpu_T[1], cpu_T[0]);
- tcg_gen_shl_i64(cpu_tmp0, cpu_T[0], cpu_tmp0);
- tcg_gen_shl_i64(cpu_T[0], cpu_T[0], count);
+ tcg_gen_concat_tl_i64(cpu_T0, cpu_T1, cpu_T0);
+ tcg_gen_shl_i64(cpu_tmp0, cpu_T0, cpu_tmp0);
+ tcg_gen_shl_i64(cpu_T0, cpu_T0, count);
tcg_gen_shri_i64(cpu_tmp0, cpu_tmp0, 32);
- tcg_gen_shri_i64(cpu_T[0], cpu_T[0], 32);
+ tcg_gen_shri_i64(cpu_T0, cpu_T0, 32);
}
break;
#endif
default:
tcg_gen_subi_tl(cpu_tmp0, count, 1);
if (is_right) {
- tcg_gen_shr_tl(cpu_tmp0, cpu_T[0], cpu_tmp0);
+ tcg_gen_shr_tl(cpu_tmp0, cpu_T0, cpu_tmp0);
tcg_gen_subfi_tl(cpu_tmp4, mask + 1, count);
- tcg_gen_shr_tl(cpu_T[0], cpu_T[0], count);
- tcg_gen_shl_tl(cpu_T[1], cpu_T[1], cpu_tmp4);
+ tcg_gen_shr_tl(cpu_T0, cpu_T0, count);
+ tcg_gen_shl_tl(cpu_T1, cpu_T1, cpu_tmp4);
} else {
- tcg_gen_shl_tl(cpu_tmp0, cpu_T[0], cpu_tmp0);
+ tcg_gen_shl_tl(cpu_tmp0, cpu_T0, cpu_tmp0);
if (ot == MO_16) {
/* Only needed if count > 16, for Intel behaviour. */
tcg_gen_subfi_tl(cpu_tmp4, 33, count);
- tcg_gen_shr_tl(cpu_tmp4, cpu_T[1], cpu_tmp4);
+ tcg_gen_shr_tl(cpu_tmp4, cpu_T1, cpu_tmp4);
tcg_gen_or_tl(cpu_tmp0, cpu_tmp0, cpu_tmp4);
}
tcg_gen_subfi_tl(cpu_tmp4, mask + 1, count);
- tcg_gen_shl_tl(cpu_T[0], cpu_T[0], count);
- tcg_gen_shr_tl(cpu_T[1], cpu_T[1], cpu_tmp4);
+ tcg_gen_shl_tl(cpu_T0, cpu_T0, count);
+ tcg_gen_shr_tl(cpu_T1, cpu_T1, cpu_tmp4);
}
tcg_gen_movi_tl(cpu_tmp4, 0);
- tcg_gen_movcond_tl(TCG_COND_EQ, cpu_T[1], count, cpu_tmp4,
- cpu_tmp4, cpu_T[1]);
- tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
+ tcg_gen_movcond_tl(TCG_COND_EQ, cpu_T1, count, cpu_tmp4,
+ cpu_tmp4, cpu_T1);
+ tcg_gen_or_tl(cpu_T0, cpu_T0, cpu_T1);
break;
}
/* store */
gen_op_st_rm_T0_A0(s, ot, op1);
- gen_shift_flags(s, ot, cpu_T[0], cpu_tmp0, count, is_right);
+ gen_shift_flags(s, ot, cpu_T0, cpu_tmp0, count, is_right);
tcg_temp_free(count);
}
static void gen_shift(DisasContext *s1, int op, TCGMemOp ot, int d, int s)
{
if (s != OR_TMP1)
- gen_op_mov_v_reg(ot, cpu_T[1], s);
+ gen_op_mov_v_reg(ot, cpu_T1, s);
switch(op) {
case OP_ROL:
gen_rot_rm_T1(s1, ot, d, 0);
@@ -1864,7 +1794,7 @@ static void gen_shifti(DisasContext *s1, int op, TCGMemOp ot, int d, int c)
break;
default:
/* currently not optimized */
- tcg_gen_movi_tl(cpu_T[1], c);
+ tcg_gen_movi_tl(cpu_T1, c);
gen_shift(s1, op, ot, d, OR_TMP1);
break;
}
@@ -1873,17 +1803,12 @@ static void gen_shifti(DisasContext *s1, int op, TCGMemOp ot, int d, int c)
static void gen_lea_modrm(CPUX86State *env, DisasContext *s, int modrm)
{
target_long disp;
- int havesib;
- int base;
- int index;
- int scale;
- int mod, rm, code, override, must_add_seg;
+ int havesib, base, index, scale;
+ int mod, rm, code, def_seg, ovr_seg;
TCGv sum;
- override = s->override;
- must_add_seg = s->addseg;
- if (override >= 0)
- must_add_seg = 1;
+ def_seg = R_DS;
+ ovr_seg = s->override;
mod = (modrm >> 6) & 3;
rm = modrm & 7;
@@ -1953,61 +1878,34 @@ static void gen_lea_modrm(CPUX86State *env, DisasContext *s, int modrm)
}
if (TCGV_IS_UNUSED(sum)) {
tcg_gen_movi_tl(cpu_A0, disp);
- } else {
+ sum = cpu_A0;
+ } else if (disp != 0) {
tcg_gen_addi_tl(cpu_A0, sum, disp);
+ sum = cpu_A0;
}
- if (must_add_seg) {
- if (override < 0) {
- if (base == R_EBP || base == R_ESP) {
- override = R_SS;
- } else {
- override = R_DS;
- }
- }
-
- tcg_gen_ld_tl(cpu_tmp0, cpu_env,
- offsetof(CPUX86State, segs[override].base));
- if (CODE64(s)) {
- if (s->aflag == MO_32) {
- tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
- }
- tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
- return;
- }
-
- tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
- }
-
- if (s->aflag == MO_32) {
- tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
+ if (base == R_EBP || base == R_ESP) {
+ def_seg = R_SS;
}
break;
case MO_16:
- switch (mod) {
- case 0:
+ sum = cpu_A0;
+ if (mod == 0) {
if (rm == 6) {
disp = cpu_lduw_code(env, s->pc);
s->pc += 2;
tcg_gen_movi_tl(cpu_A0, disp);
- rm = 0; /* avoid SS override */
- goto no_rm;
- } else {
- disp = 0;
+ break;
}
- break;
- case 1:
+ disp = 0;
+ } else if (mod == 1) {
disp = (int8_t)cpu_ldub_code(env, s->pc++);
- break;
- default:
- case 2:
+ } else {
disp = (int16_t)cpu_lduw_code(env, s->pc);
s->pc += 2;
- break;
}
- sum = cpu_A0;
switch (rm) {
case 0:
tcg_gen_add_tl(cpu_A0, cpu_regs[R_EBX], cpu_regs[R_ESI]);
@@ -2017,9 +1915,11 @@ static void gen_lea_modrm(CPUX86State *env, DisasContext *s, int modrm)
break;
case 2:
tcg_gen_add_tl(cpu_A0, cpu_regs[R_EBP], cpu_regs[R_ESI]);
+ def_seg = R_SS;
break;
case 3:
tcg_gen_add_tl(cpu_A0, cpu_regs[R_EBP], cpu_regs[R_EDI]);
+ def_seg = R_SS;
break;
case 4:
sum = cpu_regs[R_ESI];
@@ -2029,30 +1929,24 @@ static void gen_lea_modrm(CPUX86State *env, DisasContext *s, int modrm)
break;
case 6:
sum = cpu_regs[R_EBP];
+ def_seg = R_SS;
break;
default:
case 7:
sum = cpu_regs[R_EBX];
break;
}
- tcg_gen_addi_tl(cpu_A0, sum, disp);
- tcg_gen_ext16u_tl(cpu_A0, cpu_A0);
- no_rm:
- if (must_add_seg) {
- if (override < 0) {
- if (rm == 2 || rm == 3 || rm == 6) {
- override = R_SS;
- } else {
- override = R_DS;
- }
- }
- gen_op_addl_A0_seg(s, override);
+ if (disp != 0) {
+ tcg_gen_addi_tl(cpu_A0, sum, disp);
+ sum = cpu_A0;
}
break;
default:
tcg_abort();
}
+
+ gen_lea_v_seg(s, s->aflag, sum, def_seg, ovr_seg);
}
static void gen_nop_modrm(CPUX86State *env, DisasContext *s, int modrm)
@@ -2115,23 +2009,7 @@ static void gen_nop_modrm(CPUX86State *env, DisasContext *s, int modrm)
/* used for LEA and MOV AX, mem */
static void gen_add_A0_ds_seg(DisasContext *s)
{
- int override, must_add_seg;
- must_add_seg = s->addseg;
- override = R_DS;
- if (s->override >= 0) {
- override = s->override;
- must_add_seg = 1;
- }
- if (must_add_seg) {
-#ifdef TARGET_X86_64
- if (CODE64(s)) {
- gen_op_addq_A0_seg(override);
- } else
-#endif
- {
- gen_op_addl_A0_seg(s, override);
- }
- }
+ gen_lea_v_seg(s, s->aflag, cpu_A0, R_DS, s->override);
}
/* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
@@ -2146,23 +2024,23 @@ static void gen_ldst_modrm(CPUX86State *env, DisasContext *s, int modrm,
if (mod == 3) {
if (is_store) {
if (reg != OR_TMP0)
- gen_op_mov_v_reg(ot, cpu_T[0], reg);
- gen_op_mov_reg_v(ot, rm, cpu_T[0]);
+ gen_op_mov_v_reg(ot, cpu_T0, reg);
+ gen_op_mov_reg_v(ot, rm, cpu_T0);
} else {
- gen_op_mov_v_reg(ot, cpu_T[0], rm);
+ gen_op_mov_v_reg(ot, cpu_T0, rm);
if (reg != OR_TMP0)
- gen_op_mov_reg_v(ot, reg, cpu_T[0]);
+ gen_op_mov_reg_v(ot, reg, cpu_T0);
}
} else {
gen_lea_modrm(env, s, modrm);
if (is_store) {
if (reg != OR_TMP0)
- gen_op_mov_v_reg(ot, cpu_T[0], reg);
- gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
+ gen_op_mov_v_reg(ot, cpu_T0, reg);
+ gen_op_st_v(s, ot, cpu_T0, cpu_A0);
} else {
- gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
+ gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
if (reg != OR_TMP0)
- gen_op_mov_reg_v(ot, reg, cpu_T[0]);
+ gen_op_mov_reg_v(ot, reg, cpu_T0);
}
}
}
@@ -2259,7 +2137,7 @@ static void gen_cmovcc1(CPUX86State *env, DisasContext *s, TCGMemOp ot, int b,
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
- cc = gen_prepare_cc(s, b, cpu_T[1]);
+ cc = gen_prepare_cc(s, b, cpu_T1);
if (cc.mask != -1) {
TCGv t0 = tcg_temp_new();
tcg_gen_andi_tl(t0, cc.reg, cc.mask);
@@ -2269,9 +2147,9 @@ static void gen_cmovcc1(CPUX86State *env, DisasContext *s, TCGMemOp ot, int b,
cc.reg2 = tcg_const_tl(cc.imm);
}
- tcg_gen_movcond_tl(cc.cond, cpu_T[0], cc.reg, cc.reg2,
- cpu_T[0], cpu_regs[reg]);
- gen_op_mov_reg_v(ot, reg, cpu_T[0]);
+ tcg_gen_movcond_tl(cc.cond, cpu_T0, cc.reg, cc.reg2,
+ cpu_T0, cpu_regs[reg]);
+ gen_op_mov_reg_v(ot, reg, cpu_T0);
if (cc.mask != -1) {
tcg_temp_free(cc.reg);
@@ -2283,18 +2161,16 @@ static void gen_cmovcc1(CPUX86State *env, DisasContext *s, TCGMemOp ot, int b,
static inline void gen_op_movl_T0_seg(int seg_reg)
{
- tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
+ tcg_gen_ld32u_tl(cpu_T0, cpu_env,
offsetof(CPUX86State,segs[seg_reg].selector));
}
static inline void gen_op_movl_seg_T0_vm(int seg_reg)
{
- tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffff);
- tcg_gen_st32_tl(cpu_T[0], cpu_env,
+ tcg_gen_ext16u_tl(cpu_T0, cpu_T0);
+ tcg_gen_st32_tl(cpu_T0, cpu_env,
offsetof(CPUX86State,segs[seg_reg].selector));
- tcg_gen_shli_tl(cpu_T[0], cpu_T[0], 4);
- tcg_gen_st_tl(cpu_T[0], cpu_env,
- offsetof(CPUX86State,segs[seg_reg].base));
+ tcg_gen_shli_tl(cpu_seg_base[seg_reg], cpu_T0, 4);
}
/* move T0 to seg_reg and compute if the CPU state may change. Never
@@ -2302,7 +2178,7 @@ static inline void gen_op_movl_seg_T0_vm(int seg_reg)
static void gen_movl_seg_T0(DisasContext *s, int seg_reg)
{
if (s->pe && !s->vm86) {
- tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
+ tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
gen_helper_load_seg(cpu_env, tcg_const_i32(seg_reg), cpu_tmp2_i32);
/* abort translation because the addseg value may change or
because ss32 may change. For R_SS, translation must always
@@ -2343,44 +2219,25 @@ gen_svm_check_intercept(DisasContext *s, target_ulong pc_start, uint64_t type)
static inline void gen_stack_update(DisasContext *s, int addend)
{
-#ifdef TARGET_X86_64
- if (CODE64(s)) {
- gen_op_add_reg_im(MO_64, R_ESP, addend);
- } else
-#endif
- if (s->ss32) {
- gen_op_add_reg_im(MO_32, R_ESP, addend);
- } else {
- gen_op_add_reg_im(MO_16, R_ESP, addend);
- }
+ gen_op_add_reg_im(mo_stacksize(s), R_ESP, addend);
}
/* Generate a push. It depends on ss32, addseg and dflag. */
static void gen_push_v(DisasContext *s, TCGv val)
{
- TCGMemOp a_ot, d_ot = mo_pushpop(s, s->dflag);
+ TCGMemOp d_ot = mo_pushpop(s, s->dflag);
+ TCGMemOp a_ot = mo_stacksize(s);
int size = 1 << d_ot;
TCGv new_esp = cpu_A0;
tcg_gen_subi_tl(cpu_A0, cpu_regs[R_ESP], size);
- if (CODE64(s)) {
- a_ot = MO_64;
- } else if (s->ss32) {
- a_ot = MO_32;
+ if (!CODE64(s)) {
if (s->addseg) {
new_esp = cpu_tmp4;
tcg_gen_mov_tl(new_esp, cpu_A0);
- gen_op_addl_A0_seg(s, R_SS);
- } else {
- tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
}
- } else {
- a_ot = MO_16;
- new_esp = cpu_tmp4;
- tcg_gen_ext16u_tl(cpu_A0, cpu_A0);
- tcg_gen_mov_tl(new_esp, cpu_A0);
- gen_op_addl_A0_seg(s, R_SS);
+ gen_lea_v_seg(s, a_ot, cpu_A0, R_SS, -1);
}
gen_op_st_v(s, d_ot, val, cpu_A0);
@@ -2391,127 +2248,112 @@ static void gen_push_v(DisasContext *s, TCGv val)
static TCGMemOp gen_pop_T0(DisasContext *s)
{
TCGMemOp d_ot = mo_pushpop(s, s->dflag);
- TCGv addr = cpu_A0;
- if (CODE64(s)) {
- addr = cpu_regs[R_ESP];
- } else if (!s->ss32) {
- tcg_gen_ext16u_tl(cpu_A0, cpu_regs[R_ESP]);
- gen_op_addl_A0_seg(s, R_SS);
- } else if (s->addseg) {
- tcg_gen_mov_tl(cpu_A0, cpu_regs[R_ESP]);
- gen_op_addl_A0_seg(s, R_SS);
- } else {
- tcg_gen_ext32u_tl(cpu_A0, cpu_regs[R_ESP]);
- }
+ gen_lea_v_seg(s, mo_stacksize(s), cpu_regs[R_ESP], R_SS, -1);
+ gen_op_ld_v(s, d_ot, cpu_T0, cpu_A0);
- gen_op_ld_v(s, d_ot, cpu_T[0], addr);
return d_ot;
}
-static void gen_pop_update(DisasContext *s, TCGMemOp ot)
+static inline void gen_pop_update(DisasContext *s, TCGMemOp ot)
{
gen_stack_update(s, 1 << ot);
}
-static void gen_stack_A0(DisasContext *s)
+static inline void gen_stack_A0(DisasContext *s)
{
- gen_op_movl_A0_reg(R_ESP);
- if (!s->ss32)
- tcg_gen_ext16u_tl(cpu_A0, cpu_A0);
- tcg_gen_mov_tl(cpu_T[1], cpu_A0);
- if (s->addseg)
- gen_op_addl_A0_seg(s, R_SS);
+ gen_lea_v_seg(s, s->ss32 ? MO_32 : MO_16, cpu_regs[R_ESP], R_SS, -1);
}
-/* NOTE: wrap around in 16 bit not fully handled */
static void gen_pusha(DisasContext *s)
{
+ TCGMemOp s_ot = s->ss32 ? MO_32 : MO_16;
+ TCGMemOp d_ot = s->dflag;
+ int size = 1 << d_ot;
int i;
- gen_op_movl_A0_reg(R_ESP);
- gen_op_addl_A0_im(-(8 << s->dflag));
- if (!s->ss32)
- tcg_gen_ext16u_tl(cpu_A0, cpu_A0);
- tcg_gen_mov_tl(cpu_T[1], cpu_A0);
- if (s->addseg)
- gen_op_addl_A0_seg(s, R_SS);
- for(i = 0;i < 8; i++) {
- gen_op_mov_v_reg(MO_32, cpu_T[0], 7 - i);
- gen_op_st_v(s, s->dflag, cpu_T[0], cpu_A0);
- gen_op_addl_A0_im(1 << s->dflag);
+
+ for (i = 0; i < 8; i++) {
+ tcg_gen_addi_tl(cpu_A0, cpu_regs[R_ESP], (i - 8) * size);
+ gen_lea_v_seg(s, s_ot, cpu_A0, R_SS, -1);
+ gen_op_st_v(s, d_ot, cpu_regs[7 - i], cpu_A0);
}
- gen_op_mov_reg_v(MO_16 + s->ss32, R_ESP, cpu_T[1]);
+
+ gen_stack_update(s, -8 * size);
}
-/* NOTE: wrap around in 16 bit not fully handled */
static void gen_popa(DisasContext *s)
{
+ TCGMemOp s_ot = s->ss32 ? MO_32 : MO_16;
+ TCGMemOp d_ot = s->dflag;
+ int size = 1 << d_ot;
int i;
- gen_op_movl_A0_reg(R_ESP);
- if (!s->ss32)
- tcg_gen_ext16u_tl(cpu_A0, cpu_A0);
- tcg_gen_mov_tl(cpu_T[1], cpu_A0);
- tcg_gen_addi_tl(cpu_T[1], cpu_T[1], 8 << s->dflag);
- if (s->addseg)
- gen_op_addl_A0_seg(s, R_SS);
- for(i = 0;i < 8; i++) {
+
+ for (i = 0; i < 8; i++) {
/* ESP is not reloaded */
- if (i != 3) {
- gen_op_ld_v(s, s->dflag, cpu_T[0], cpu_A0);
- gen_op_mov_reg_v(s->dflag, 7 - i, cpu_T[0]);
+ if (7 - i == R_ESP) {
+ continue;
}
- gen_op_addl_A0_im(1 << s->dflag);
+ tcg_gen_addi_tl(cpu_A0, cpu_regs[R_ESP], i * size);
+ gen_lea_v_seg(s, s_ot, cpu_A0, R_SS, -1);
+ gen_op_ld_v(s, d_ot, cpu_T0, cpu_A0);
+ gen_op_mov_reg_v(d_ot, 7 - i, cpu_T0);
}
- gen_op_mov_reg_v(MO_16 + s->ss32, R_ESP, cpu_T[1]);
+
+ gen_stack_update(s, 8 * size);
}
static void gen_enter(DisasContext *s, int esp_addend, int level)
{
- TCGMemOp ot = mo_pushpop(s, s->dflag);
- int opsize = 1 << ot;
+ TCGMemOp d_ot = mo_pushpop(s, s->dflag);
+ TCGMemOp a_ot = CODE64(s) ? MO_64 : s->ss32 ? MO_32 : MO_16;
+ int size = 1 << d_ot;
- level &= 0x1f;
-#ifdef TARGET_X86_64
- if (CODE64(s)) {
- gen_op_movl_A0_reg(R_ESP);
- gen_op_addq_A0_im(-opsize);
- tcg_gen_mov_tl(cpu_T[1], cpu_A0);
-
- /* push bp */
- gen_op_mov_v_reg(MO_32, cpu_T[0], R_EBP);
- gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
- if (level) {
- /* XXX: must save state */
- gen_helper_enter64_level(cpu_env, tcg_const_i32(level),
- tcg_const_i32((ot == MO_64)),
- cpu_T[1]);
- }
- gen_op_mov_reg_v(ot, R_EBP, cpu_T[1]);
- tcg_gen_addi_tl(cpu_T[1], cpu_T[1], -esp_addend + (-opsize * level));
- gen_op_mov_reg_v(MO_64, R_ESP, cpu_T[1]);
- } else
-#endif
- {
- gen_op_movl_A0_reg(R_ESP);
- gen_op_addl_A0_im(-opsize);
- if (!s->ss32)
- tcg_gen_ext16u_tl(cpu_A0, cpu_A0);
- tcg_gen_mov_tl(cpu_T[1], cpu_A0);
- if (s->addseg)
- gen_op_addl_A0_seg(s, R_SS);
- /* push bp */
- gen_op_mov_v_reg(MO_32, cpu_T[0], R_EBP);
- gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
- if (level) {
- /* XXX: must save state */
- gen_helper_enter_level(cpu_env, tcg_const_i32(level),
- tcg_const_i32(s->dflag - 1),
- cpu_T[1]);
- }
- gen_op_mov_reg_v(ot, R_EBP, cpu_T[1]);
- tcg_gen_addi_tl(cpu_T[1], cpu_T[1], -esp_addend + (-opsize * level));
- gen_op_mov_reg_v(MO_16 + s->ss32, R_ESP, cpu_T[1]);
+ /* Push BP; compute FrameTemp into T1. */
+ tcg_gen_subi_tl(cpu_T1, cpu_regs[R_ESP], size);
+ gen_lea_v_seg(s, a_ot, cpu_T1, R_SS, -1);
+ gen_op_st_v(s, d_ot, cpu_regs[R_EBP], cpu_A0);
+
+ level &= 31;
+ if (level != 0) {
+ int i;
+
+ /* Copy level-1 pointers from the previous frame. */
+ for (i = 1; i < level; ++i) {
+ tcg_gen_subi_tl(cpu_A0, cpu_regs[R_EBP], size * i);
+ gen_lea_v_seg(s, a_ot, cpu_A0, R_SS, -1);
+ gen_op_ld_v(s, d_ot, cpu_tmp0, cpu_A0);
+
+ tcg_gen_subi_tl(cpu_A0, cpu_T1, size * i);
+ gen_lea_v_seg(s, a_ot, cpu_A0, R_SS, -1);
+ gen_op_st_v(s, d_ot, cpu_tmp0, cpu_A0);
+ }
+
+ /* Push the current FrameTemp as the last level. */
+ tcg_gen_subi_tl(cpu_A0, cpu_T1, size * level);
+ gen_lea_v_seg(s, a_ot, cpu_A0, R_SS, -1);
+ gen_op_st_v(s, d_ot, cpu_T1, cpu_A0);
}
+
+ /* Copy the FrameTemp value to EBP. */
+ gen_op_mov_reg_v(a_ot, R_EBP, cpu_T1);
+
+ /* Compute the final value of ESP. */
+ tcg_gen_subi_tl(cpu_T1, cpu_T1, esp_addend + size * level);
+ gen_op_mov_reg_v(a_ot, R_ESP, cpu_T1);
+}
+
+static void gen_leave(DisasContext *s)
+{
+ TCGMemOp d_ot = mo_pushpop(s, s->dflag);
+ TCGMemOp a_ot = mo_stacksize(s);
+
+ gen_lea_v_seg(s, a_ot, cpu_regs[R_EBP], R_SS, -1);
+ gen_op_ld_v(s, d_ot, cpu_T0, cpu_A0);
+
+ tcg_gen_addi_tl(cpu_T1, cpu_regs[R_EBP], 1 << d_ot);
+
+ gen_op_mov_reg_v(d_ot, R_EBP, cpu_T0);
+ gen_op_mov_reg_v(a_ot, R_ESP, cpu_T1);
}
static void gen_exception(DisasContext *s, int trapno, target_ulong cur_eip)
@@ -3073,23 +2915,23 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
gen_stq_env_A0(s, offsetof(CPUX86State,
xmm_regs[reg].ZMM_Q(0)));
} else {
- tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
+ tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State,
xmm_regs[reg].ZMM_L(0)));
- gen_op_st_v(s, MO_32, cpu_T[0], cpu_A0);
+ gen_op_st_v(s, MO_32, cpu_T0, cpu_A0);
}
break;
case 0x6e: /* movd mm, ea */
#ifdef TARGET_X86_64
if (s->dflag == MO_64) {
gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 0);
- tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,fpregs[reg].mmx));
+ tcg_gen_st_tl(cpu_T0, cpu_env, offsetof(CPUX86State,fpregs[reg].mmx));
} else
#endif
{
gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 0);
tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
offsetof(CPUX86State,fpregs[reg].mmx));
- tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
+ tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
gen_helper_movl_mm_T0_mmx(cpu_ptr0, cpu_tmp2_i32);
}
break;
@@ -3099,14 +2941,14 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 0);
tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
offsetof(CPUX86State,xmm_regs[reg]));
- gen_helper_movq_mm_T0_xmm(cpu_ptr0, cpu_T[0]);
+ gen_helper_movq_mm_T0_xmm(cpu_ptr0, cpu_T0);
} else
#endif
{
gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 0);
tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
offsetof(CPUX86State,xmm_regs[reg]));
- tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
+ tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
gen_helper_movl_mm_T0_xmm(cpu_ptr0, cpu_tmp2_i32);
}
break;
@@ -3140,12 +2982,12 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
case 0x210: /* movss xmm, ea */
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
- gen_op_ld_v(s, MO_32, cpu_T[0], cpu_A0);
- tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)));
- tcg_gen_movi_tl(cpu_T[0], 0);
- tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(1)));
- tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(2)));
- tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(3)));
+ gen_op_ld_v(s, MO_32, cpu_T0, cpu_A0);
+ tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)));
+ tcg_gen_movi_tl(cpu_T0, 0);
+ tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(1)));
+ tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(2)));
+ tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(3)));
} else {
rm = (modrm & 7) | REX_B(s);
gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)),
@@ -3157,9 +2999,9 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
gen_lea_modrm(env, s, modrm);
gen_ldq_env_A0(s, offsetof(CPUX86State,
xmm_regs[reg].ZMM_Q(0)));
- tcg_gen_movi_tl(cpu_T[0], 0);
- tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(2)));
- tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(3)));
+ tcg_gen_movi_tl(cpu_T0, 0);
+ tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(2)));
+ tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(3)));
} else {
rm = (modrm & 7) | REX_B(s);
gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)),
@@ -3261,13 +3103,13 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
case 0x7e: /* movd ea, mm */
#ifdef TARGET_X86_64
if (s->dflag == MO_64) {
- tcg_gen_ld_i64(cpu_T[0], cpu_env,
+ tcg_gen_ld_i64(cpu_T0, cpu_env,
offsetof(CPUX86State,fpregs[reg].mmx));
gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 1);
} else
#endif
{
- tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
+ tcg_gen_ld32u_tl(cpu_T0, cpu_env,
offsetof(CPUX86State,fpregs[reg].mmx.MMX_L(0)));
gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 1);
}
@@ -3275,13 +3117,13 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
case 0x17e: /* movd ea, xmm */
#ifdef TARGET_X86_64
if (s->dflag == MO_64) {
- tcg_gen_ld_i64(cpu_T[0], cpu_env,
+ tcg_gen_ld_i64(cpu_T0, cpu_env,
offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)));
gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 1);
} else
#endif
{
- tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
+ tcg_gen_ld32u_tl(cpu_T0, cpu_env,
offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)));
gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 1);
}
@@ -3326,8 +3168,8 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
case 0x211: /* movss ea, xmm */
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
- tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)));
- gen_op_st_v(s, MO_32, cpu_T[0], cpu_A0);
+ tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)));
+ gen_op_st_v(s, MO_32, cpu_T0, cpu_A0);
} else {
rm = (modrm & 7) | REX_B(s);
gen_op_movl(offsetof(CPUX86State,xmm_regs[rm].ZMM_L(0)),
@@ -3376,16 +3218,16 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
}
val = cpu_ldub_code(env, s->pc++);
if (is_xmm) {
- tcg_gen_movi_tl(cpu_T[0], val);
- tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.ZMM_L(0)));
- tcg_gen_movi_tl(cpu_T[0], 0);
- tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.ZMM_L(1)));
+ tcg_gen_movi_tl(cpu_T0, val);
+ tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_t0.ZMM_L(0)));
+ tcg_gen_movi_tl(cpu_T0, 0);
+ tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_t0.ZMM_L(1)));
op1_offset = offsetof(CPUX86State,xmm_t0);
} else {
- tcg_gen_movi_tl(cpu_T[0], val);
- tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(0)));
- tcg_gen_movi_tl(cpu_T[0], 0);
- tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(1)));
+ tcg_gen_movi_tl(cpu_T0, val);
+ tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(0)));
+ tcg_gen_movi_tl(cpu_T0, 0);
+ tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(1)));
op1_offset = offsetof(CPUX86State,mmx_t0);
}
sse_fn_epp = sse_op_table2[((b - 1) & 3) * 8 +
@@ -3450,12 +3292,12 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
if (ot == MO_32) {
SSEFunc_0_epi sse_fn_epi = sse_op_table3ai[(b >> 8) & 1];
- tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
+ tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
sse_fn_epi(cpu_env, cpu_ptr0, cpu_tmp2_i32);
} else {
#ifdef TARGET_X86_64
SSEFunc_0_epl sse_fn_epl = sse_op_table3aq[(b >> 8) & 1];
- sse_fn_epl(cpu_env, cpu_ptr0, cpu_T[0]);
+ sse_fn_epl(cpu_env, cpu_ptr0, cpu_T0);
#else
goto illegal_op;
#endif
@@ -3502,8 +3344,8 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
if ((b >> 8) & 1) {
gen_ldq_env_A0(s, offsetof(CPUX86State, xmm_t0.ZMM_Q(0)));
} else {
- gen_op_ld_v(s, MO_32, cpu_T[0], cpu_A0);
- tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.ZMM_L(0)));
+ gen_op_ld_v(s, MO_32, cpu_T0, cpu_A0);
+ tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_t0.ZMM_L(0)));
}
op2_offset = offsetof(CPUX86State,xmm_t0);
} else {
@@ -3515,17 +3357,17 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
SSEFunc_i_ep sse_fn_i_ep =
sse_op_table3bi[((b >> 7) & 2) | (b & 1)];
sse_fn_i_ep(cpu_tmp2_i32, cpu_env, cpu_ptr0);
- tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
+ tcg_gen_extu_i32_tl(cpu_T0, cpu_tmp2_i32);
} else {
#ifdef TARGET_X86_64
SSEFunc_l_ep sse_fn_l_ep =
sse_op_table3bq[((b >> 7) & 2) | (b & 1)];
- sse_fn_l_ep(cpu_T[0], cpu_env, cpu_ptr0);
+ sse_fn_l_ep(cpu_T0, cpu_env, cpu_ptr0);
#else
goto illegal_op;
#endif
}
- gen_op_mov_reg_v(ot, reg, cpu_T[0]);
+ gen_op_mov_reg_v(ot, reg, cpu_T0);
break;
case 0xc4: /* pinsrw */
case 0x1c4:
@@ -3534,11 +3376,11 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
val = cpu_ldub_code(env, s->pc++);
if (b1) {
val &= 7;
- tcg_gen_st16_tl(cpu_T[0], cpu_env,
+ tcg_gen_st16_tl(cpu_T0, cpu_env,
offsetof(CPUX86State,xmm_regs[reg].ZMM_W(val)));
} else {
val &= 3;
- tcg_gen_st16_tl(cpu_T[0], cpu_env,
+ tcg_gen_st16_tl(cpu_T0, cpu_env,
offsetof(CPUX86State,fpregs[reg].mmx.MMX_W(val)));
}
break;
@@ -3551,16 +3393,16 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
if (b1) {
val &= 7;
rm = (modrm & 7) | REX_B(s);
- tcg_gen_ld16u_tl(cpu_T[0], cpu_env,
+ tcg_gen_ld16u_tl(cpu_T0, cpu_env,
offsetof(CPUX86State,xmm_regs[rm].ZMM_W(val)));
} else {
val &= 3;
rm = (modrm & 7);
- tcg_gen_ld16u_tl(cpu_T[0], cpu_env,
+ tcg_gen_ld16u_tl(cpu_T0, cpu_env,
offsetof(CPUX86State,fpregs[rm].mmx.MMX_W(val)));
}
reg = ((modrm >> 3) & 7) | rex_r;
- gen_op_mov_reg_v(ot, reg, cpu_T[0]);
+ gen_op_mov_reg_v(ot, reg, cpu_T0);
break;
case 0x1d6: /* movq ea, xmm */
if (mod != 3) {
@@ -3707,11 +3549,11 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[reg]);
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
- gen_helper_crc32(cpu_T[0], cpu_tmp2_i32,
- cpu_T[0], tcg_const_i32(8 << ot));
+ gen_helper_crc32(cpu_T0, cpu_tmp2_i32,
+ cpu_T0, tcg_const_i32(8 << ot));
ot = mo_64_32(s->dflag);
- gen_op_mov_reg_v(ot, reg, cpu_T[0]);
+ gen_op_mov_reg_v(ot, reg, cpu_T0);
break;
case 0x1f0: /* crc32 or movbe */
@@ -3736,9 +3578,9 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
gen_lea_modrm(env, s, modrm);
if ((b & 1) == 0) {
- tcg_gen_qemu_ld_tl(cpu_T[0], cpu_A0,
+ tcg_gen_qemu_ld_tl(cpu_T0, cpu_A0,
s->mem_index, ot | MO_BE);
- gen_op_mov_reg_v(ot, reg, cpu_T[0]);
+ gen_op_mov_reg_v(ot, reg, cpu_T0);
} else {
tcg_gen_qemu_st_tl(cpu_regs[reg], cpu_A0,
s->mem_index, ot | MO_BE);
@@ -3753,8 +3595,8 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
}
ot = mo_64_32(s->dflag);
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
- tcg_gen_andc_tl(cpu_T[0], cpu_regs[s->vex_v], cpu_T[0]);
- gen_op_mov_reg_v(ot, reg, cpu_T[0]);
+ tcg_gen_andc_tl(cpu_T0, cpu_regs[s->vex_v], cpu_T0);
+ gen_op_mov_reg_v(ot, reg, cpu_T0);
gen_op_update1_cc();
set_cc_op(s, CC_OP_LOGICB + ot);
break;
@@ -3773,12 +3615,12 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
/* Extract START, and shift the operand.
Shifts larger than operand size get zeros. */
tcg_gen_ext8u_tl(cpu_A0, cpu_regs[s->vex_v]);
- tcg_gen_shr_tl(cpu_T[0], cpu_T[0], cpu_A0);
+ tcg_gen_shr_tl(cpu_T0, cpu_T0, cpu_A0);
bound = tcg_const_tl(ot == MO_64 ? 63 : 31);
zero = tcg_const_tl(0);
- tcg_gen_movcond_tl(TCG_COND_LEU, cpu_T[0], cpu_A0, bound,
- cpu_T[0], zero);
+ tcg_gen_movcond_tl(TCG_COND_LEU, cpu_T0, cpu_A0, bound,
+ cpu_T0, zero);
tcg_temp_free(zero);
/* Extract the LEN into a mask. Lengths larger than
@@ -3788,12 +3630,12 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
tcg_gen_movcond_tl(TCG_COND_LEU, cpu_A0, cpu_A0, bound,
cpu_A0, bound);
tcg_temp_free(bound);
- tcg_gen_movi_tl(cpu_T[1], 1);
- tcg_gen_shl_tl(cpu_T[1], cpu_T[1], cpu_A0);
- tcg_gen_subi_tl(cpu_T[1], cpu_T[1], 1);
- tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
+ tcg_gen_movi_tl(cpu_T1, 1);
+ tcg_gen_shl_tl(cpu_T1, cpu_T1, cpu_A0);
+ tcg_gen_subi_tl(cpu_T1, cpu_T1, 1);
+ tcg_gen_and_tl(cpu_T0, cpu_T0, cpu_T1);
- gen_op_mov_reg_v(ot, reg, cpu_T[0]);
+ gen_op_mov_reg_v(ot, reg, cpu_T0);
gen_op_update1_cc();
set_cc_op(s, CC_OP_LOGICB + ot);
}
@@ -3807,21 +3649,21 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
}
ot = mo_64_32(s->dflag);
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
- tcg_gen_ext8u_tl(cpu_T[1], cpu_regs[s->vex_v]);
+ tcg_gen_ext8u_tl(cpu_T1, cpu_regs[s->vex_v]);
{
TCGv bound = tcg_const_tl(ot == MO_64 ? 63 : 31);
/* Note that since we're using BMILG (in order to get O
cleared) we need to store the inverse into C. */
tcg_gen_setcond_tl(TCG_COND_LT, cpu_cc_src,
- cpu_T[1], bound);
- tcg_gen_movcond_tl(TCG_COND_GT, cpu_T[1], cpu_T[1],
- bound, bound, cpu_T[1]);
+ cpu_T1, bound);
+ tcg_gen_movcond_tl(TCG_COND_GT, cpu_T1, cpu_T1,
+ bound, bound, cpu_T1);
tcg_temp_free(bound);
}
tcg_gen_movi_tl(cpu_A0, -1);
- tcg_gen_shl_tl(cpu_A0, cpu_A0, cpu_T[1]);
- tcg_gen_andc_tl(cpu_T[0], cpu_T[0], cpu_A0);
- gen_op_mov_reg_v(ot, reg, cpu_T[0]);
+ tcg_gen_shl_tl(cpu_A0, cpu_A0, cpu_T1);
+ tcg_gen_andc_tl(cpu_T0, cpu_T0, cpu_A0);
+ gen_op_mov_reg_v(ot, reg, cpu_T0);
gen_op_update1_cc();
set_cc_op(s, CC_OP_BMILGB + ot);
break;
@@ -3836,7 +3678,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
switch (ot) {
default:
- tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
+ tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EDX]);
tcg_gen_mulu2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
cpu_tmp2_i32, cpu_tmp3_i32);
@@ -3845,10 +3687,10 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
break;
#ifdef TARGET_X86_64
case MO_64:
- tcg_gen_mulu2_i64(cpu_T[0], cpu_T[1],
- cpu_T[0], cpu_regs[R_EDX]);
- tcg_gen_mov_i64(cpu_regs[s->vex_v], cpu_T[0]);
- tcg_gen_mov_i64(cpu_regs[reg], cpu_T[1]);
+ tcg_gen_mulu2_i64(cpu_T0, cpu_T1,
+ cpu_T0, cpu_regs[R_EDX]);
+ tcg_gen_mov_i64(cpu_regs[s->vex_v], cpu_T0);
+ tcg_gen_mov_i64(cpu_regs[reg], cpu_T1);
break;
#endif
}
@@ -3865,11 +3707,11 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
/* Note that by zero-extending the mask operand, we
automatically handle zero-extending the result. */
if (ot == MO_64) {
- tcg_gen_mov_tl(cpu_T[1], cpu_regs[s->vex_v]);
+ tcg_gen_mov_tl(cpu_T1, cpu_regs[s->vex_v]);
} else {
- tcg_gen_ext32u_tl(cpu_T[1], cpu_regs[s->vex_v]);
+ tcg_gen_ext32u_tl(cpu_T1, cpu_regs[s->vex_v]);
}
- gen_helper_pdep(cpu_regs[reg], cpu_T[0], cpu_T[1]);
+ gen_helper_pdep(cpu_regs[reg], cpu_T0, cpu_T1);
break;
case 0x2f5: /* pext Gy, By, Ey */
@@ -3883,11 +3725,11 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
/* Note that by zero-extending the mask operand, we
automatically handle zero-extending the result. */
if (ot == MO_64) {
- tcg_gen_mov_tl(cpu_T[1], cpu_regs[s->vex_v]);
+ tcg_gen_mov_tl(cpu_T1, cpu_regs[s->vex_v]);
} else {
- tcg_gen_ext32u_tl(cpu_T[1], cpu_regs[s->vex_v]);
+ tcg_gen_ext32u_tl(cpu_T1, cpu_regs[s->vex_v]);
}
- gen_helper_pext(cpu_regs[reg], cpu_T[0], cpu_T[1]);
+ gen_helper_pext(cpu_regs[reg], cpu_T0, cpu_T1);
break;
case 0x1f6: /* adcx Gy, Ey */
@@ -3946,22 +3788,22 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
/* If we know TL is 64-bit, and we want a 32-bit
result, just do everything in 64-bit arithmetic. */
tcg_gen_ext32u_i64(cpu_regs[reg], cpu_regs[reg]);
- tcg_gen_ext32u_i64(cpu_T[0], cpu_T[0]);
- tcg_gen_add_i64(cpu_T[0], cpu_T[0], cpu_regs[reg]);
- tcg_gen_add_i64(cpu_T[0], cpu_T[0], carry_in);
- tcg_gen_ext32u_i64(cpu_regs[reg], cpu_T[0]);
- tcg_gen_shri_i64(carry_out, cpu_T[0], 32);
+ tcg_gen_ext32u_i64(cpu_T0, cpu_T0);
+ tcg_gen_add_i64(cpu_T0, cpu_T0, cpu_regs[reg]);
+ tcg_gen_add_i64(cpu_T0, cpu_T0, carry_in);
+ tcg_gen_ext32u_i64(cpu_regs[reg], cpu_T0);
+ tcg_gen_shri_i64(carry_out, cpu_T0, 32);
break;
#endif
default:
/* Otherwise compute the carry-out in two steps. */
zero = tcg_const_tl(0);
- tcg_gen_add2_tl(cpu_T[0], carry_out,
- cpu_T[0], zero,
+ tcg_gen_add2_tl(cpu_T0, carry_out,
+ cpu_T0, zero,
carry_in, zero);
tcg_gen_add2_tl(cpu_regs[reg], carry_out,
cpu_regs[reg], carry_out,
- cpu_T[0], zero);
+ cpu_T0, zero);
tcg_temp_free(zero);
break;
}
@@ -3980,24 +3822,24 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
ot = mo_64_32(s->dflag);
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
if (ot == MO_64) {
- tcg_gen_andi_tl(cpu_T[1], cpu_regs[s->vex_v], 63);
+ tcg_gen_andi_tl(cpu_T1, cpu_regs[s->vex_v], 63);
} else {
- tcg_gen_andi_tl(cpu_T[1], cpu_regs[s->vex_v], 31);
+ tcg_gen_andi_tl(cpu_T1, cpu_regs[s->vex_v], 31);
}
if (b == 0x1f7) {
- tcg_gen_shl_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
+ tcg_gen_shl_tl(cpu_T0, cpu_T0, cpu_T1);
} else if (b == 0x2f7) {
if (ot != MO_64) {
- tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
+ tcg_gen_ext32s_tl(cpu_T0, cpu_T0);
}
- tcg_gen_sar_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
+ tcg_gen_sar_tl(cpu_T0, cpu_T0, cpu_T1);
} else {
if (ot != MO_64) {
- tcg_gen_ext32u_tl(cpu_T[0], cpu_T[0]);
+ tcg_gen_ext32u_tl(cpu_T0, cpu_T0);
}
- tcg_gen_shr_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
+ tcg_gen_shr_tl(cpu_T0, cpu_T0, cpu_T1);
}
- gen_op_mov_reg_v(ot, reg, cpu_T[0]);
+ gen_op_mov_reg_v(ot, reg, cpu_T0);
break;
case 0x0f3:
@@ -4014,26 +3856,26 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
switch (reg & 7) {
case 1: /* blsr By,Ey */
- tcg_gen_neg_tl(cpu_T[1], cpu_T[0]);
- tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
- gen_op_mov_reg_v(ot, s->vex_v, cpu_T[0]);
+ tcg_gen_neg_tl(cpu_T1, cpu_T0);
+ tcg_gen_and_tl(cpu_T0, cpu_T0, cpu_T1);
+ gen_op_mov_reg_v(ot, s->vex_v, cpu_T0);
gen_op_update2_cc();
set_cc_op(s, CC_OP_BMILGB + ot);
break;
case 2: /* blsmsk By,Ey */
- tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
- tcg_gen_subi_tl(cpu_T[0], cpu_T[0], 1);
- tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_cc_src);
- tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
+ tcg_gen_mov_tl(cpu_cc_src, cpu_T0);
+ tcg_gen_subi_tl(cpu_T0, cpu_T0, 1);
+ tcg_gen_xor_tl(cpu_T0, cpu_T0, cpu_cc_src);
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
set_cc_op(s, CC_OP_BMILGB + ot);
break;
case 3: /* blsi By, Ey */
- tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
- tcg_gen_subi_tl(cpu_T[0], cpu_T[0], 1);
- tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_cc_src);
- tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
+ tcg_gen_mov_tl(cpu_cc_src, cpu_T0);
+ tcg_gen_subi_tl(cpu_T0, cpu_T0, 1);
+ tcg_gen_and_tl(cpu_T0, cpu_T0, cpu_cc_src);
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
set_cc_op(s, CC_OP_BMILGB + ot);
break;
@@ -4074,22 +3916,22 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
val = cpu_ldub_code(env, s->pc++);
switch (b) {
case 0x14: /* pextrb */
- tcg_gen_ld8u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
+ tcg_gen_ld8u_tl(cpu_T0, cpu_env, offsetof(CPUX86State,
xmm_regs[reg].ZMM_B(val & 15)));
if (mod == 3) {
- gen_op_mov_reg_v(ot, rm, cpu_T[0]);
+ gen_op_mov_reg_v(ot, rm, cpu_T0);
} else {
- tcg_gen_qemu_st_tl(cpu_T[0], cpu_A0,
+ tcg_gen_qemu_st_tl(cpu_T0, cpu_A0,
s->mem_index, MO_UB);
}
break;
case 0x15: /* pextrw */
- tcg_gen_ld16u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
+ tcg_gen_ld16u_tl(cpu_T0, cpu_env, offsetof(CPUX86State,
xmm_regs[reg].ZMM_W(val & 7)));
if (mod == 3) {
- gen_op_mov_reg_v(ot, rm, cpu_T[0]);
+ gen_op_mov_reg_v(ot, rm, cpu_T0);
} else {
- tcg_gen_qemu_st_tl(cpu_T[0], cpu_A0,
+ tcg_gen_qemu_st_tl(cpu_T0, cpu_A0,
s->mem_index, MO_LEUW);
}
break;
@@ -4121,23 +3963,23 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
}
break;
case 0x17: /* extractps */
- tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
+ tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State,
xmm_regs[reg].ZMM_L(val & 3)));
if (mod == 3) {
- gen_op_mov_reg_v(ot, rm, cpu_T[0]);
+ gen_op_mov_reg_v(ot, rm, cpu_T0);
} else {
- tcg_gen_qemu_st_tl(cpu_T[0], cpu_A0,
+ tcg_gen_qemu_st_tl(cpu_T0, cpu_A0,
s->mem_index, MO_LEUL);
}
break;
case 0x20: /* pinsrb */
if (mod == 3) {
- gen_op_mov_v_reg(MO_32, cpu_T[0], rm);
+ gen_op_mov_v_reg(MO_32, cpu_T0, rm);
} else {
- tcg_gen_qemu_ld_tl(cpu_T[0], cpu_A0,
+ tcg_gen_qemu_ld_tl(cpu_T0, cpu_A0,
s->mem_index, MO_UB);
}
- tcg_gen_st8_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
+ tcg_gen_st8_tl(cpu_T0, cpu_env, offsetof(CPUX86State,
xmm_regs[reg].ZMM_B(val & 15)));
break;
case 0x21: /* insertps */
@@ -4252,13 +4094,13 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
b = cpu_ldub_code(env, s->pc++);
if (ot == MO_64) {
- tcg_gen_rotri_tl(cpu_T[0], cpu_T[0], b & 63);
+ tcg_gen_rotri_tl(cpu_T0, cpu_T0, b & 63);
} else {
- tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
+ tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
tcg_gen_rotri_i32(cpu_tmp2_i32, cpu_tmp2_i32, b & 31);
- tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
+ tcg_gen_extu_i32_tl(cpu_T0, cpu_tmp2_i32);
}
- gen_op_mov_reg_v(ot, reg, cpu_T[0]);
+ gen_op_mov_reg_v(ot, reg, cpu_T0);
break;
default:
@@ -4313,8 +4155,8 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
switch (sz) {
case 2:
/* 32 bit access */
- gen_op_ld_v(s, MO_32, cpu_T[0], cpu_A0);
- tcg_gen_st32_tl(cpu_T[0], cpu_env,
+ gen_op_ld_v(s, MO_32, cpu_T0, cpu_A0);
+ tcg_gen_st32_tl(cpu_T0, cpu_env,
offsetof(CPUX86State,xmm_t0.ZMM_L(0)));
break;
case 3:
@@ -4605,13 +4447,13 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
xor_zero:
/* xor reg, reg optimisation */
set_cc_op(s, CC_OP_CLR);
- tcg_gen_movi_tl(cpu_T[0], 0);
- gen_op_mov_reg_v(ot, reg, cpu_T[0]);
+ tcg_gen_movi_tl(cpu_T0, 0);
+ gen_op_mov_reg_v(ot, reg, cpu_T0);
break;
} else {
opreg = rm;
}
- gen_op_mov_v_reg(ot, cpu_T[1], reg);
+ gen_op_mov_v_reg(ot, cpu_T1, reg);
gen_op(s, op, ot, opreg);
break;
case 1: /* OP Gv, Ev */
@@ -4621,17 +4463,17 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
rm = (modrm & 7) | REX_B(s);
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
- gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
+ gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
} else if (op == OP_XORL && rm == reg) {
goto xor_zero;
} else {
- gen_op_mov_v_reg(ot, cpu_T[1], rm);
+ gen_op_mov_v_reg(ot, cpu_T1, rm);
}
gen_op(s, op, ot, reg);
break;
case 2: /* OP A, Iv */
val = insn_get(env, s, ot);
- tcg_gen_movi_tl(cpu_T[1], val);
+ tcg_gen_movi_tl(cpu_T1, val);
gen_op(s, op, ot, OR_EAX);
break;
}
@@ -4676,7 +4518,7 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
val = (int8_t)insn_get(env, s, MO_8);
break;
}
- tcg_gen_movi_tl(cpu_T[1], val);
+ tcg_gen_movi_tl(cpu_T1, val);
gen_op(s, op, ot, opreg);
}
break;
@@ -4703,32 +4545,32 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
if (op == 0)
s->rip_offset = insn_const_size(ot);
gen_lea_modrm(env, s, modrm);
- gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
+ gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
} else {
- gen_op_mov_v_reg(ot, cpu_T[0], rm);
+ gen_op_mov_v_reg(ot, cpu_T0, rm);
}
switch(op) {
case 0: /* test */
val = insn_get(env, s, ot);
- tcg_gen_movi_tl(cpu_T[1], val);
+ tcg_gen_movi_tl(cpu_T1, val);
gen_op_testl_T0_T1_cc();
set_cc_op(s, CC_OP_LOGICB + ot);
break;
case 2: /* not */
- tcg_gen_not_tl(cpu_T[0], cpu_T[0]);
+ tcg_gen_not_tl(cpu_T0, cpu_T0);
if (mod != 3) {
- gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
+ gen_op_st_v(s, ot, cpu_T0, cpu_A0);
} else {
- gen_op_mov_reg_v(ot, rm, cpu_T[0]);
+ gen_op_mov_reg_v(ot, rm, cpu_T0);
}
break;
case 3: /* neg */
- tcg_gen_neg_tl(cpu_T[0], cpu_T[0]);
+ tcg_gen_neg_tl(cpu_T0, cpu_T0);
if (mod != 3) {
- gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
+ gen_op_st_v(s, ot, cpu_T0, cpu_A0);
} else {
- gen_op_mov_reg_v(ot, rm, cpu_T[0]);
+ gen_op_mov_reg_v(ot, rm, cpu_T0);
}
gen_op_update_neg_cc();
set_cc_op(s, CC_OP_SUBB + ot);
@@ -4736,32 +4578,32 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
case 4: /* mul */
switch(ot) {
case MO_8:
- gen_op_mov_v_reg(MO_8, cpu_T[1], R_EAX);
- tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]);
- tcg_gen_ext8u_tl(cpu_T[1], cpu_T[1]);
+ gen_op_mov_v_reg(MO_8, cpu_T1, R_EAX);
+ tcg_gen_ext8u_tl(cpu_T0, cpu_T0);
+ tcg_gen_ext8u_tl(cpu_T1, cpu_T1);
/* XXX: use 32 bit mul which could be faster */
- tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
- gen_op_mov_reg_v(MO_16, R_EAX, cpu_T[0]);
- tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
- tcg_gen_andi_tl(cpu_cc_src, cpu_T[0], 0xff00);
+ tcg_gen_mul_tl(cpu_T0, cpu_T0, cpu_T1);
+ gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0);
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
+ tcg_gen_andi_tl(cpu_cc_src, cpu_T0, 0xff00);
set_cc_op(s, CC_OP_MULB);
break;
case MO_16:
- gen_op_mov_v_reg(MO_16, cpu_T[1], R_EAX);
- tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
- tcg_gen_ext16u_tl(cpu_T[1], cpu_T[1]);
+ gen_op_mov_v_reg(MO_16, cpu_T1, R_EAX);
+ tcg_gen_ext16u_tl(cpu_T0, cpu_T0);
+ tcg_gen_ext16u_tl(cpu_T1, cpu_T1);
/* XXX: use 32 bit mul which could be faster */
- tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
- gen_op_mov_reg_v(MO_16, R_EAX, cpu_T[0]);
- tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
- tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 16);
- gen_op_mov_reg_v(MO_16, R_EDX, cpu_T[0]);
- tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
+ tcg_gen_mul_tl(cpu_T0, cpu_T0, cpu_T1);
+ gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0);
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
+ tcg_gen_shri_tl(cpu_T0, cpu_T0, 16);
+ gen_op_mov_reg_v(MO_16, R_EDX, cpu_T0);
+ tcg_gen_mov_tl(cpu_cc_src, cpu_T0);
set_cc_op(s, CC_OP_MULW);
break;
default:
case MO_32:
- tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
+ tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EAX]);
tcg_gen_mulu2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
cpu_tmp2_i32, cpu_tmp3_i32);
@@ -4774,7 +4616,7 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
#ifdef TARGET_X86_64
case MO_64:
tcg_gen_mulu2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX],
- cpu_T[0], cpu_regs[R_EAX]);
+ cpu_T0, cpu_regs[R_EAX]);
tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]);
set_cc_op(s, CC_OP_MULQ);
@@ -4785,34 +4627,34 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
case 5: /* imul */
switch(ot) {
case MO_8:
- gen_op_mov_v_reg(MO_8, cpu_T[1], R_EAX);
- tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
- tcg_gen_ext8s_tl(cpu_T[1], cpu_T[1]);
+ gen_op_mov_v_reg(MO_8, cpu_T1, R_EAX);
+ tcg_gen_ext8s_tl(cpu_T0, cpu_T0);
+ tcg_gen_ext8s_tl(cpu_T1, cpu_T1);
/* XXX: use 32 bit mul which could be faster */
- tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
- gen_op_mov_reg_v(MO_16, R_EAX, cpu_T[0]);
- tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
- tcg_gen_ext8s_tl(cpu_tmp0, cpu_T[0]);
- tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
+ tcg_gen_mul_tl(cpu_T0, cpu_T0, cpu_T1);
+ gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0);
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
+ tcg_gen_ext8s_tl(cpu_tmp0, cpu_T0);
+ tcg_gen_sub_tl(cpu_cc_src, cpu_T0, cpu_tmp0);
set_cc_op(s, CC_OP_MULB);
break;
case MO_16:
- gen_op_mov_v_reg(MO_16, cpu_T[1], R_EAX);
- tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
- tcg_gen_ext16s_tl(cpu_T[1], cpu_T[1]);
+ gen_op_mov_v_reg(MO_16, cpu_T1, R_EAX);
+ tcg_gen_ext16s_tl(cpu_T0, cpu_T0);
+ tcg_gen_ext16s_tl(cpu_T1, cpu_T1);
/* XXX: use 32 bit mul which could be faster */
- tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
- gen_op_mov_reg_v(MO_16, R_EAX, cpu_T[0]);
- tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
- tcg_gen_ext16s_tl(cpu_tmp0, cpu_T[0]);
- tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
- tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 16);
- gen_op_mov_reg_v(MO_16, R_EDX, cpu_T[0]);
+ tcg_gen_mul_tl(cpu_T0, cpu_T0, cpu_T1);
+ gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0);
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
+ tcg_gen_ext16s_tl(cpu_tmp0, cpu_T0);
+ tcg_gen_sub_tl(cpu_cc_src, cpu_T0, cpu_tmp0);
+ tcg_gen_shri_tl(cpu_T0, cpu_T0, 16);
+ gen_op_mov_reg_v(MO_16, R_EDX, cpu_T0);
set_cc_op(s, CC_OP_MULW);
break;
default:
case MO_32:
- tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
+ tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EAX]);
tcg_gen_muls2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
cpu_tmp2_i32, cpu_tmp3_i32);
@@ -4827,7 +4669,7 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
#ifdef TARGET_X86_64
case MO_64:
tcg_gen_muls2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX],
- cpu_T[0], cpu_regs[R_EAX]);
+ cpu_T0, cpu_regs[R_EAX]);
tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
tcg_gen_sari_tl(cpu_cc_src, cpu_regs[R_EAX], 63);
tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, cpu_regs[R_EDX]);
@@ -4839,18 +4681,18 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
case 6: /* div */
switch(ot) {
case MO_8:
- gen_helper_divb_AL(cpu_env, cpu_T[0]);
+ gen_helper_divb_AL(cpu_env, cpu_T0);
break;
case MO_16:
- gen_helper_divw_AX(cpu_env, cpu_T[0]);
+ gen_helper_divw_AX(cpu_env, cpu_T0);
break;
default:
case MO_32:
- gen_helper_divl_EAX(cpu_env, cpu_T[0]);
+ gen_helper_divl_EAX(cpu_env, cpu_T0);
break;
#ifdef TARGET_X86_64
case MO_64:
- gen_helper_divq_EAX(cpu_env, cpu_T[0]);
+ gen_helper_divq_EAX(cpu_env, cpu_T0);
break;
#endif
}
@@ -4858,18 +4700,18 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
case 7: /* idiv */
switch(ot) {
case MO_8:
- gen_helper_idivb_AL(cpu_env, cpu_T[0]);
+ gen_helper_idivb_AL(cpu_env, cpu_T0);
break;
case MO_16:
- gen_helper_idivw_AX(cpu_env, cpu_T[0]);
+ gen_helper_idivw_AX(cpu_env, cpu_T0);
break;
default:
case MO_32:
- gen_helper_idivl_EAX(cpu_env, cpu_T[0]);
+ gen_helper_idivl_EAX(cpu_env, cpu_T0);
break;
#ifdef TARGET_X86_64
case MO_64:
- gen_helper_idivq_EAX(cpu_env, cpu_T[0]);
+ gen_helper_idivq_EAX(cpu_env, cpu_T0);
break;
#endif
}
@@ -4904,9 +4746,9 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
if (op >= 2 && op != 3 && op != 5)
- gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
+ gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
} else {
- gen_op_mov_v_reg(ot, cpu_T[0], rm);
+ gen_op_mov_v_reg(ot, cpu_T0, rm);
}
switch(op) {
@@ -4927,27 +4769,27 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
case 2: /* call Ev */
/* XXX: optimize if memory (no 'and' is necessary) */
if (dflag == MO_16) {
- tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
+ tcg_gen_ext16u_tl(cpu_T0, cpu_T0);
}
next_eip = s->pc - s->cs_base;
- tcg_gen_movi_tl(cpu_T[1], next_eip);
- gen_push_v(s, cpu_T[1]);
- gen_op_jmp_v(cpu_T[0]);
+ tcg_gen_movi_tl(cpu_T1, next_eip);
+ gen_push_v(s, cpu_T1);
+ gen_op_jmp_v(cpu_T0);
gen_eob(s);
break;
case 3: /* lcall Ev */
- gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
+ gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
gen_add_A0_im(s, 1 << ot);
- gen_op_ld_v(s, MO_16, cpu_T[0], cpu_A0);
+ gen_op_ld_v(s, MO_16, cpu_T0, cpu_A0);
do_lcall:
if (s->pe && !s->vm86) {
- tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
- gen_helper_lcall_protected(cpu_env, cpu_tmp2_i32, cpu_T[1],
+ tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
+ gen_helper_lcall_protected(cpu_env, cpu_tmp2_i32, cpu_T1,
tcg_const_i32(dflag - 1),
tcg_const_tl(s->pc - s->cs_base));
} else {
- tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
- gen_helper_lcall_real(cpu_env, cpu_tmp2_i32, cpu_T[1],
+ tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
+ gen_helper_lcall_real(cpu_env, cpu_tmp2_i32, cpu_T1,
tcg_const_i32(dflag - 1),
tcg_const_i32(s->pc - s->cs_base));
}
@@ -4955,28 +4797,28 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
break;
case 4: /* jmp Ev */
if (dflag == MO_16) {
- tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
+ tcg_gen_ext16u_tl(cpu_T0, cpu_T0);
}
- gen_op_jmp_v(cpu_T[0]);
+ gen_op_jmp_v(cpu_T0);
gen_eob(s);
break;
case 5: /* ljmp Ev */
- gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
+ gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
gen_add_A0_im(s, 1 << ot);
- gen_op_ld_v(s, MO_16, cpu_T[0], cpu_A0);
+ gen_op_ld_v(s, MO_16, cpu_T0, cpu_A0);
do_ljmp:
if (s->pe && !s->vm86) {
- tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
- gen_helper_ljmp_protected(cpu_env, cpu_tmp2_i32, cpu_T[1],
+ tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
+ gen_helper_ljmp_protected(cpu_env, cpu_tmp2_i32, cpu_T1,
tcg_const_tl(s->pc - s->cs_base));
} else {
gen_op_movl_seg_T0_vm(R_CS);
- gen_op_jmp_v(cpu_T[1]);
+ gen_op_jmp_v(cpu_T1);
}
gen_eob(s);
break;
case 6: /* push Ev */
- gen_push_v(s, cpu_T[0]);
+ gen_push_v(s, cpu_T0);
break;
default:
goto illegal_op;
@@ -4991,7 +4833,7 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
reg = ((modrm >> 3) & 7) | rex_r;
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
- gen_op_mov_v_reg(ot, cpu_T[1], reg);
+ gen_op_mov_v_reg(ot, cpu_T1, reg);
gen_op_testl_T0_T1_cc();
set_cc_op(s, CC_OP_LOGICB + ot);
break;
@@ -5001,8 +4843,8 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
ot = mo_b_d(b, dflag);
val = insn_get(env, s, ot);
- gen_op_mov_v_reg(ot, cpu_T[0], OR_EAX);
- tcg_gen_movi_tl(cpu_T[1], val);
+ gen_op_mov_v_reg(ot, cpu_T0, OR_EAX);
+ tcg_gen_movi_tl(cpu_T1, val);
gen_op_testl_T0_T1_cc();
set_cc_op(s, CC_OP_LOGICB + ot);
break;
@@ -5011,20 +4853,20 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
switch (dflag) {
#ifdef TARGET_X86_64
case MO_64:
- gen_op_mov_v_reg(MO_32, cpu_T[0], R_EAX);
- tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
- gen_op_mov_reg_v(MO_64, R_EAX, cpu_T[0]);
+ gen_op_mov_v_reg(MO_32, cpu_T0, R_EAX);
+ tcg_gen_ext32s_tl(cpu_T0, cpu_T0);
+ gen_op_mov_reg_v(MO_64, R_EAX, cpu_T0);
break;
#endif
case MO_32:
- gen_op_mov_v_reg(MO_16, cpu_T[0], R_EAX);
- tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
- gen_op_mov_reg_v(MO_32, R_EAX, cpu_T[0]);
+ gen_op_mov_v_reg(MO_16, cpu_T0, R_EAX);
+ tcg_gen_ext16s_tl(cpu_T0, cpu_T0);
+ gen_op_mov_reg_v(MO_32, R_EAX, cpu_T0);
break;
case MO_16:
- gen_op_mov_v_reg(MO_8, cpu_T[0], R_EAX);
- tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
- gen_op_mov_reg_v(MO_16, R_EAX, cpu_T[0]);
+ gen_op_mov_v_reg(MO_8, cpu_T0, R_EAX);
+ tcg_gen_ext8s_tl(cpu_T0, cpu_T0);
+ gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0);
break;
default:
tcg_abort();
@@ -5034,22 +4876,22 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
switch (dflag) {
#ifdef TARGET_X86_64
case MO_64:
- gen_op_mov_v_reg(MO_64, cpu_T[0], R_EAX);
- tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 63);
- gen_op_mov_reg_v(MO_64, R_EDX, cpu_T[0]);
+ gen_op_mov_v_reg(MO_64, cpu_T0, R_EAX);
+ tcg_gen_sari_tl(cpu_T0, cpu_T0, 63);
+ gen_op_mov_reg_v(MO_64, R_EDX, cpu_T0);
break;
#endif
case MO_32:
- gen_op_mov_v_reg(MO_32, cpu_T[0], R_EAX);
- tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
- tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 31);
- gen_op_mov_reg_v(MO_32, R_EDX, cpu_T[0]);
+ gen_op_mov_v_reg(MO_32, cpu_T0, R_EAX);
+ tcg_gen_ext32s_tl(cpu_T0, cpu_T0);
+ tcg_gen_sari_tl(cpu_T0, cpu_T0, 31);
+ gen_op_mov_reg_v(MO_32, R_EDX, cpu_T0);
break;
case MO_16:
- gen_op_mov_v_reg(MO_16, cpu_T[0], R_EAX);
- tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
- tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 15);
- gen_op_mov_reg_v(MO_16, R_EDX, cpu_T[0]);
+ gen_op_mov_v_reg(MO_16, cpu_T0, R_EAX);
+ tcg_gen_ext16s_tl(cpu_T0, cpu_T0);
+ tcg_gen_sari_tl(cpu_T0, cpu_T0, 15);
+ gen_op_mov_reg_v(MO_16, R_EDX, cpu_T0);
break;
default:
tcg_abort();
@@ -5068,25 +4910,25 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
if (b == 0x69) {
val = insn_get(env, s, ot);
- tcg_gen_movi_tl(cpu_T[1], val);
+ tcg_gen_movi_tl(cpu_T1, val);
} else if (b == 0x6b) {
val = (int8_t)insn_get(env, s, MO_8);
- tcg_gen_movi_tl(cpu_T[1], val);
+ tcg_gen_movi_tl(cpu_T1, val);
} else {
- gen_op_mov_v_reg(ot, cpu_T[1], reg);
+ gen_op_mov_v_reg(ot, cpu_T1, reg);
}
switch (ot) {
#ifdef TARGET_X86_64
case MO_64:
- tcg_gen_muls2_i64(cpu_regs[reg], cpu_T[1], cpu_T[0], cpu_T[1]);
+ tcg_gen_muls2_i64(cpu_regs[reg], cpu_T1, cpu_T0, cpu_T1);
tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]);
tcg_gen_sari_tl(cpu_cc_src, cpu_cc_dst, 63);
- tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, cpu_T[1]);
+ tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, cpu_T1);
break;
#endif
case MO_32:
- tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
- tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
+ tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
+ tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T1);
tcg_gen_muls2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
cpu_tmp2_i32, cpu_tmp3_i32);
tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
@@ -5096,14 +4938,14 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
tcg_gen_extu_i32_tl(cpu_cc_src, cpu_tmp2_i32);
break;
default:
- tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
- tcg_gen_ext16s_tl(cpu_T[1], cpu_T[1]);
+ tcg_gen_ext16s_tl(cpu_T0, cpu_T0);
+ tcg_gen_ext16s_tl(cpu_T1, cpu_T1);
/* XXX: use 32 bit mul which could be faster */
- tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
- tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
- tcg_gen_ext16s_tl(cpu_tmp0, cpu_T[0]);
- tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
- gen_op_mov_reg_v(ot, reg, cpu_T[0]);
+ tcg_gen_mul_tl(cpu_T0, cpu_T0, cpu_T1);
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
+ tcg_gen_ext16s_tl(cpu_tmp0, cpu_T0);
+ tcg_gen_sub_tl(cpu_cc_src, cpu_T0, cpu_tmp0);
+ gen_op_mov_reg_v(ot, reg, cpu_T0);
break;
}
set_cc_op(s, CC_OP_MULB + ot);
@@ -5116,18 +4958,18 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
mod = (modrm >> 6) & 3;
if (mod == 3) {
rm = (modrm & 7) | REX_B(s);
- gen_op_mov_v_reg(ot, cpu_T[0], reg);
- gen_op_mov_v_reg(ot, cpu_T[1], rm);
- tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
- gen_op_mov_reg_v(ot, reg, cpu_T[1]);
- gen_op_mov_reg_v(ot, rm, cpu_T[0]);
+ gen_op_mov_v_reg(ot, cpu_T0, reg);
+ gen_op_mov_v_reg(ot, cpu_T1, rm);
+ tcg_gen_add_tl(cpu_T0, cpu_T0, cpu_T1);
+ gen_op_mov_reg_v(ot, reg, cpu_T1);
+ gen_op_mov_reg_v(ot, rm, cpu_T0);
} else {
gen_lea_modrm(env, s, modrm);
- gen_op_mov_v_reg(ot, cpu_T[0], reg);
- gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
- tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
- gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
- gen_op_mov_reg_v(ot, reg, cpu_T[1]);
+ gen_op_mov_v_reg(ot, cpu_T0, reg);
+ gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
+ tcg_gen_add_tl(cpu_T0, cpu_T0, cpu_T1);
+ gen_op_st_v(s, ot, cpu_T0, cpu_A0);
+ gen_op_mov_reg_v(ot, reg, cpu_T1);
}
gen_op_update2_cc();
set_cc_op(s, CC_OP_ADDB + ot);
@@ -5213,14 +5055,14 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
/**************************/
/* push/pop */
case 0x50 ... 0x57: /* push */
- gen_op_mov_v_reg(MO_32, cpu_T[0], (b & 7) | REX_B(s));
- gen_push_v(s, cpu_T[0]);
+ gen_op_mov_v_reg(MO_32, cpu_T0, (b & 7) | REX_B(s));
+ gen_push_v(s, cpu_T0);
break;
case 0x58 ... 0x5f: /* pop */
ot = gen_pop_T0(s);
/* NOTE: order is important for pop %sp */
gen_pop_update(s, ot);
- gen_op_mov_reg_v(ot, (b & 7) | REX_B(s), cpu_T[0]);
+ gen_op_mov_reg_v(ot, (b & 7) | REX_B(s), cpu_T0);
break;
case 0x60: /* pusha */
if (CODE64(s))
@@ -5239,8 +5081,8 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
val = insn_get(env, s, ot);
else
val = (int8_t)insn_get(env, s, MO_8);
- tcg_gen_movi_tl(cpu_T[0], val);
- gen_push_v(s, cpu_T[0]);
+ tcg_gen_movi_tl(cpu_T0, val);
+ gen_push_v(s, cpu_T0);
break;
case 0x8f: /* pop Ev */
modrm = cpu_ldub_code(env, s->pc++);
@@ -5250,7 +5092,7 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
/* NOTE: order is important for pop %sp */
gen_pop_update(s, ot);
rm = (modrm & 7) | REX_B(s);
- gen_op_mov_reg_v(ot, rm, cpu_T[0]);
+ gen_op_mov_reg_v(ot, rm, cpu_T0);
} else {
/* NOTE: order is important too for MMU exceptions */
s->popl_esp_hack = 1 << ot;
@@ -5269,20 +5111,7 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
}
break;
case 0xc9: /* leave */
- /* XXX: exception not precise (ESP is updated before potential exception) */
- if (CODE64(s)) {
- gen_op_mov_v_reg(MO_64, cpu_T[0], R_EBP);
- gen_op_mov_reg_v(MO_64, R_ESP, cpu_T[0]);
- } else if (s->ss32) {
- gen_op_mov_v_reg(MO_32, cpu_T[0], R_EBP);
- gen_op_mov_reg_v(MO_32, R_ESP, cpu_T[0]);
- } else {
- gen_op_mov_v_reg(MO_16, cpu_T[0], R_EBP);
- gen_op_mov_reg_v(MO_16, R_ESP, cpu_T[0]);
- }
- ot = gen_pop_T0(s);
- gen_op_mov_reg_v(ot, R_EBP, cpu_T[0]);
- gen_pop_update(s, ot);
+ gen_leave(s);
break;
case 0x06: /* push es */
case 0x0e: /* push cs */
@@ -5291,12 +5120,12 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
if (CODE64(s))
goto illegal_op;
gen_op_movl_T0_seg(b >> 3);
- gen_push_v(s, cpu_T[0]);
+ gen_push_v(s, cpu_T0);
break;
case 0x1a0: /* push fs */
case 0x1a8: /* push gs */
gen_op_movl_T0_seg((b >> 3) & 7);
- gen_push_v(s, cpu_T[0]);
+ gen_push_v(s, cpu_T0);
break;
case 0x07: /* pop es */
case 0x17: /* pop ss */
@@ -5352,11 +5181,11 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
gen_lea_modrm(env, s, modrm);
}
val = insn_get(env, s, ot);
- tcg_gen_movi_tl(cpu_T[0], val);
+ tcg_gen_movi_tl(cpu_T0, val);
if (mod != 3) {
- gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
+ gen_op_st_v(s, ot, cpu_T0, cpu_A0);
} else {
- gen_op_mov_reg_v(ot, (modrm & 7) | REX_B(s), cpu_T[0]);
+ gen_op_mov_reg_v(ot, (modrm & 7) | REX_B(s), cpu_T0);
}
break;
case 0x8a:
@@ -5366,7 +5195,7 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
reg = ((modrm >> 3) & 7) | rex_r;
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
- gen_op_mov_reg_v(ot, reg, cpu_T[0]);
+ gen_op_mov_reg_v(ot, reg, cpu_T0);
break;
case 0x8e: /* mov seg, Gv */
modrm = cpu_ldub_code(env, s->pc++);
@@ -5420,27 +5249,27 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
rm = (modrm & 7) | REX_B(s);
if (mod == 3) {
- gen_op_mov_v_reg(ot, cpu_T[0], rm);
+ gen_op_mov_v_reg(ot, cpu_T0, rm);
switch (s_ot) {
case MO_UB:
- tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]);
+ tcg_gen_ext8u_tl(cpu_T0, cpu_T0);
break;
case MO_SB:
- tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
+ tcg_gen_ext8s_tl(cpu_T0, cpu_T0);
break;
case MO_UW:
- tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
+ tcg_gen_ext16u_tl(cpu_T0, cpu_T0);
break;
default:
case MO_SW:
- tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
+ tcg_gen_ext16s_tl(cpu_T0, cpu_T0);
break;
}
- gen_op_mov_reg_v(d_ot, reg, cpu_T[0]);
+ gen_op_mov_reg_v(d_ot, reg, cpu_T0);
} else {
gen_lea_modrm(env, s, modrm);
- gen_op_ld_v(s, s_ot, cpu_T[0], cpu_A0);
- gen_op_mov_reg_v(d_ot, reg, cpu_T[0]);
+ gen_op_ld_v(s, s_ot, cpu_T0, cpu_A0);
+ gen_op_mov_reg_v(d_ot, reg, cpu_T0);
}
}
break;
@@ -5483,27 +5312,27 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
tcg_gen_movi_tl(cpu_A0, offset_addr);
gen_add_A0_ds_seg(s);
if ((b & 2) == 0) {
- gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
- gen_op_mov_reg_v(ot, R_EAX, cpu_T[0]);
+ gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
+ gen_op_mov_reg_v(ot, R_EAX, cpu_T0);
} else {
- gen_op_mov_v_reg(ot, cpu_T[0], R_EAX);
- gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
+ gen_op_mov_v_reg(ot, cpu_T0, R_EAX);
+ gen_op_st_v(s, ot, cpu_T0, cpu_A0);
}
}
break;
case 0xd7: /* xlat */
tcg_gen_mov_tl(cpu_A0, cpu_regs[R_EBX]);
- tcg_gen_ext8u_tl(cpu_T[0], cpu_regs[R_EAX]);
- tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_T[0]);
+ tcg_gen_ext8u_tl(cpu_T0, cpu_regs[R_EAX]);
+ tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_T0);
gen_extu(s->aflag, cpu_A0);
gen_add_A0_ds_seg(s);
- gen_op_ld_v(s, MO_8, cpu_T[0], cpu_A0);
- gen_op_mov_reg_v(MO_8, R_EAX, cpu_T[0]);
+ gen_op_ld_v(s, MO_8, cpu_T0, cpu_A0);
+ gen_op_mov_reg_v(MO_8, R_EAX, cpu_T0);
break;
case 0xb0 ... 0xb7: /* mov R, Ib */
val = insn_get(env, s, MO_8);
- tcg_gen_movi_tl(cpu_T[0], val);
- gen_op_mov_reg_v(MO_8, (b & 7) | REX_B(s), cpu_T[0]);
+ tcg_gen_movi_tl(cpu_T0, val);
+ gen_op_mov_reg_v(MO_8, (b & 7) | REX_B(s), cpu_T0);
break;
case 0xb8 ... 0xbf: /* mov R, Iv */
#ifdef TARGET_X86_64
@@ -5513,16 +5342,16 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
tmp = cpu_ldq_code(env, s->pc);
s->pc += 8;
reg = (b & 7) | REX_B(s);
- tcg_gen_movi_tl(cpu_T[0], tmp);
- gen_op_mov_reg_v(MO_64, reg, cpu_T[0]);
+ tcg_gen_movi_tl(cpu_T0, tmp);
+ gen_op_mov_reg_v(MO_64, reg, cpu_T0);
} else
#endif
{
ot = dflag;
val = insn_get(env, s, ot);
reg = (b & 7) | REX_B(s);
- tcg_gen_movi_tl(cpu_T[0], val);
- gen_op_mov_reg_v(ot, reg, cpu_T[0]);
+ tcg_gen_movi_tl(cpu_T0, val);
+ gen_op_mov_reg_v(ot, reg, cpu_T0);
}
break;
@@ -5541,21 +5370,21 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
if (mod == 3) {
rm = (modrm & 7) | REX_B(s);
do_xchg_reg:
- gen_op_mov_v_reg(ot, cpu_T[0], reg);
- gen_op_mov_v_reg(ot, cpu_T[1], rm);
- gen_op_mov_reg_v(ot, rm, cpu_T[0]);
- gen_op_mov_reg_v(ot, reg, cpu_T[1]);
+ gen_op_mov_v_reg(ot, cpu_T0, reg);
+ gen_op_mov_v_reg(ot, cpu_T1, rm);
+ gen_op_mov_reg_v(ot, rm, cpu_T0);
+ gen_op_mov_reg_v(ot, reg, cpu_T1);
} else {
gen_lea_modrm(env, s, modrm);
- gen_op_mov_v_reg(ot, cpu_T[0], reg);
+ gen_op_mov_v_reg(ot, cpu_T0, reg);
/* for xchg, lock is implicit */
if (!(prefixes & PREFIX_LOCK))
gen_helper_lock();
- gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
- gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
+ gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
+ gen_op_st_v(s, ot, cpu_T0, cpu_A0);
if (!(prefixes & PREFIX_LOCK))
gen_helper_unlock();
- gen_op_mov_reg_v(ot, reg, cpu_T[1]);
+ gen_op_mov_reg_v(ot, reg, cpu_T1);
}
break;
case 0xc4: /* les Gv */
@@ -5582,13 +5411,13 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
if (mod == 3)
goto illegal_op;
gen_lea_modrm(env, s, modrm);
- gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
+ gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
gen_add_A0_im(s, 1 << ot);
/* load the segment first to handle exceptions properly */
- gen_op_ld_v(s, MO_16, cpu_T[0], cpu_A0);
+ gen_op_ld_v(s, MO_16, cpu_T0, cpu_A0);
gen_movl_seg_T0(s, op);
/* then put the data */
- gen_op_mov_reg_v(ot, reg, cpu_T[1]);
+ gen_op_mov_reg_v(ot, reg, cpu_T1);
if (s->is_jmp) {
gen_jmp_im(s->pc - s->cs_base);
gen_eob(s);
@@ -5667,7 +5496,7 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
} else {
opreg = rm;
}
- gen_op_mov_v_reg(ot, cpu_T[1], reg);
+ gen_op_mov_v_reg(ot, cpu_T1, reg);
if (shift) {
TCGv imm = tcg_const_tl(cpu_ldub_code(env, s->pc++));
@@ -6127,8 +5956,8 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
switch(rm) {
case 0:
gen_helper_fnstsw(cpu_tmp2_i32, cpu_env);
- tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
- gen_op_mov_reg_v(MO_16, R_EAX, cpu_T[0]);
+ tcg_gen_extu_i32_tl(cpu_T0, cpu_tmp2_i32);
+ gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0);
break;
default:
goto illegal_op;
@@ -6238,7 +6067,7 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
case 0x6c: /* insS */
case 0x6d:
ot = mo_b_d32(b, dflag);
- tcg_gen_ext16u_tl(cpu_T[0], cpu_regs[R_EDX]);
+ tcg_gen_ext16u_tl(cpu_T0, cpu_regs[R_EDX]);
gen_check_io(s, ot, pc_start - s->cs_base,
SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes) | 4);
if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
@@ -6253,7 +6082,7 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
case 0x6e: /* outsS */
case 0x6f:
ot = mo_b_d32(b, dflag);
- tcg_gen_ext16u_tl(cpu_T[0], cpu_regs[R_EDX]);
+ tcg_gen_ext16u_tl(cpu_T0, cpu_regs[R_EDX]);
gen_check_io(s, ot, pc_start - s->cs_base,
svm_is_rep(prefixes) | 4);
if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
@@ -6273,15 +6102,15 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
case 0xe5:
ot = mo_b_d32(b, dflag);
val = cpu_ldub_code(env, s->pc++);
- tcg_gen_movi_tl(cpu_T[0], val);
+ tcg_gen_movi_tl(cpu_T0, val);
gen_check_io(s, ot, pc_start - s->cs_base,
SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
if (s->tb->cflags & CF_USE_ICOUNT) {
gen_io_start();
}
tcg_gen_movi_i32(cpu_tmp2_i32, val);
- gen_helper_in_func(ot, cpu_T[1], cpu_tmp2_i32);
- gen_op_mov_reg_v(ot, R_EAX, cpu_T[1]);
+ gen_helper_in_func(ot, cpu_T1, cpu_tmp2_i32);
+ gen_op_mov_reg_v(ot, R_EAX, cpu_T1);
gen_bpt_io(s, cpu_tmp2_i32, ot);
if (s->tb->cflags & CF_USE_ICOUNT) {
gen_io_end();
@@ -6292,16 +6121,16 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
case 0xe7:
ot = mo_b_d32(b, dflag);
val = cpu_ldub_code(env, s->pc++);
- tcg_gen_movi_tl(cpu_T[0], val);
+ tcg_gen_movi_tl(cpu_T0, val);
gen_check_io(s, ot, pc_start - s->cs_base,
svm_is_rep(prefixes));
- gen_op_mov_v_reg(ot, cpu_T[1], R_EAX);
+ gen_op_mov_v_reg(ot, cpu_T1, R_EAX);
if (s->tb->cflags & CF_USE_ICOUNT) {
gen_io_start();
}
tcg_gen_movi_i32(cpu_tmp2_i32, val);
- tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
+ tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T1);
gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
gen_bpt_io(s, cpu_tmp2_i32, ot);
if (s->tb->cflags & CF_USE_ICOUNT) {
@@ -6312,15 +6141,15 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
case 0xec:
case 0xed:
ot = mo_b_d32(b, dflag);
- tcg_gen_ext16u_tl(cpu_T[0], cpu_regs[R_EDX]);
+ tcg_gen_ext16u_tl(cpu_T0, cpu_regs[R_EDX]);
gen_check_io(s, ot, pc_start - s->cs_base,
SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
if (s->tb->cflags & CF_USE_ICOUNT) {
gen_io_start();
}
- tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
- gen_helper_in_func(ot, cpu_T[1], cpu_tmp2_i32);
- gen_op_mov_reg_v(ot, R_EAX, cpu_T[1]);
+ tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
+ gen_helper_in_func(ot, cpu_T1, cpu_tmp2_i32);
+ gen_op_mov_reg_v(ot, R_EAX, cpu_T1);
gen_bpt_io(s, cpu_tmp2_i32, ot);
if (s->tb->cflags & CF_USE_ICOUNT) {
gen_io_end();
@@ -6330,16 +6159,16 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
case 0xee:
case 0xef:
ot = mo_b_d32(b, dflag);
- tcg_gen_ext16u_tl(cpu_T[0], cpu_regs[R_EDX]);
+ tcg_gen_ext16u_tl(cpu_T0, cpu_regs[R_EDX]);
gen_check_io(s, ot, pc_start - s->cs_base,
svm_is_rep(prefixes));
- gen_op_mov_v_reg(ot, cpu_T[1], R_EAX);
+ gen_op_mov_v_reg(ot, cpu_T1, R_EAX);
if (s->tb->cflags & CF_USE_ICOUNT) {
gen_io_start();
}
- tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
- tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
+ tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
+ tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T1);
gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
gen_bpt_io(s, cpu_tmp2_i32, ot);
if (s->tb->cflags & CF_USE_ICOUNT) {
@@ -6356,14 +6185,14 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
ot = gen_pop_T0(s);
gen_stack_update(s, val + (1 << ot));
/* Note that gen_pop_T0 uses a zero-extending load. */
- gen_op_jmp_v(cpu_T[0]);
+ gen_op_jmp_v(cpu_T0);
gen_eob(s);
break;
case 0xc3: /* ret */
ot = gen_pop_T0(s);
gen_pop_update(s, ot);
/* Note that gen_pop_T0 uses a zero-extending load. */
- gen_op_jmp_v(cpu_T[0]);
+ gen_op_jmp_v(cpu_T0);
gen_eob(s);
break;
case 0xca: /* lret im */
@@ -6378,13 +6207,13 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
} else {
gen_stack_A0(s);
/* pop offset */
- gen_op_ld_v(s, dflag, cpu_T[0], cpu_A0);
+ gen_op_ld_v(s, dflag, cpu_T0, cpu_A0);
/* NOTE: keeping EIP updated is not a problem in case of
exception */
- gen_op_jmp_v(cpu_T[0]);
+ gen_op_jmp_v(cpu_T0);
/* pop selector */
- gen_op_addl_A0_im(1 << dflag);
- gen_op_ld_v(s, dflag, cpu_T[0], cpu_A0);
+ gen_add_A0_im(s, 1 << dflag);
+ gen_op_ld_v(s, dflag, cpu_T0, cpu_A0);
gen_op_movl_seg_T0_vm(R_CS);
/* add stack offset */
gen_stack_update(s, val + (2 << dflag));
@@ -6428,8 +6257,8 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
} else if (!CODE64(s)) {
tval &= 0xffffffff;
}
- tcg_gen_movi_tl(cpu_T[0], next_eip);
- gen_push_v(s, cpu_T[0]);
+ tcg_gen_movi_tl(cpu_T0, next_eip);
+ gen_push_v(s, cpu_T0);
gen_jmp(s, tval);
}
break;
@@ -6443,8 +6272,8 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
offset = insn_get(env, s, ot);
selector = insn_get(env, s, MO_16);
- tcg_gen_movi_tl(cpu_T[0], selector);
- tcg_gen_movi_tl(cpu_T[1], offset);
+ tcg_gen_movi_tl(cpu_T0, selector);
+ tcg_gen_movi_tl(cpu_T1, offset);
}
goto do_lcall;
case 0xe9: /* jmp im */
@@ -6471,8 +6300,8 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
offset = insn_get(env, s, ot);
selector = insn_get(env, s, MO_16);
- tcg_gen_movi_tl(cpu_T[0], selector);
- tcg_gen_movi_tl(cpu_T[1], offset);
+ tcg_gen_movi_tl(cpu_T0, selector);
+ tcg_gen_movi_tl(cpu_T1, offset);
}
goto do_ljmp;
case 0xeb: /* jmp Jb */
@@ -6503,7 +6332,7 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
case 0x190 ... 0x19f: /* setcc Gv */
modrm = cpu_ldub_code(env, s->pc++);
- gen_setcc1(s, b, cpu_T[0]);
+ gen_setcc1(s, b, cpu_T0);
gen_ldst_modrm(env, s, modrm, MO_8, OR_TMP0, 1);
break;
case 0x140 ... 0x14f: /* cmov Gv, Ev */
@@ -6524,8 +6353,8 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
gen_update_cc_op(s);
- gen_helper_read_eflags(cpu_T[0], cpu_env);
- gen_push_v(s, cpu_T[0]);
+ gen_helper_read_eflags(cpu_T0, cpu_env);
+ gen_push_v(s, cpu_T0);
}
break;
case 0x9d: /* popf */
@@ -6536,13 +6365,13 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
ot = gen_pop_T0(s);
if (s->cpl == 0) {
if (dflag != MO_16) {
- gen_helper_write_eflags(cpu_env, cpu_T[0],
+ gen_helper_write_eflags(cpu_env, cpu_T0,
tcg_const_i32((TF_MASK | AC_MASK |
ID_MASK | NT_MASK |
IF_MASK |
IOPL_MASK)));
} else {
- gen_helper_write_eflags(cpu_env, cpu_T[0],
+ gen_helper_write_eflags(cpu_env, cpu_T0,
tcg_const_i32((TF_MASK | AC_MASK |
ID_MASK | NT_MASK |
IF_MASK | IOPL_MASK)
@@ -6551,14 +6380,14 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
} else {
if (s->cpl <= s->iopl) {
if (dflag != MO_16) {
- gen_helper_write_eflags(cpu_env, cpu_T[0],
+ gen_helper_write_eflags(cpu_env, cpu_T0,
tcg_const_i32((TF_MASK |
AC_MASK |
ID_MASK |
NT_MASK |
IF_MASK)));
} else {
- gen_helper_write_eflags(cpu_env, cpu_T[0],
+ gen_helper_write_eflags(cpu_env, cpu_T0,
tcg_const_i32((TF_MASK |
AC_MASK |
ID_MASK |
@@ -6568,11 +6397,11 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
}
} else {
if (dflag != MO_16) {
- gen_helper_write_eflags(cpu_env, cpu_T[0],
+ gen_helper_write_eflags(cpu_env, cpu_T0,
tcg_const_i32((TF_MASK | AC_MASK |
ID_MASK | NT_MASK)));
} else {
- gen_helper_write_eflags(cpu_env, cpu_T[0],
+ gen_helper_write_eflags(cpu_env, cpu_T0,
tcg_const_i32((TF_MASK | AC_MASK |
ID_MASK | NT_MASK)
& 0xffff));
@@ -6589,19 +6418,19 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
case 0x9e: /* sahf */
if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
goto illegal_op;
- gen_op_mov_v_reg(MO_8, cpu_T[0], R_AH);
+ gen_op_mov_v_reg(MO_8, cpu_T0, R_AH);
gen_compute_eflags(s);
tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, CC_O);
- tcg_gen_andi_tl(cpu_T[0], cpu_T[0], CC_S | CC_Z | CC_A | CC_P | CC_C);
- tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_T[0]);
+ tcg_gen_andi_tl(cpu_T0, cpu_T0, CC_S | CC_Z | CC_A | CC_P | CC_C);
+ tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_T0);
break;
case 0x9f: /* lahf */
if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
goto illegal_op;
gen_compute_eflags(s);
/* Note: gen_compute_eflags() only gives the condition codes */
- tcg_gen_ori_tl(cpu_T[0], cpu_cc_src, 0x02);
- gen_op_mov_reg_v(MO_8, R_AH, cpu_T[0]);
+ tcg_gen_ori_tl(cpu_T0, cpu_cc_src, 0x02);
+ gen_op_mov_reg_v(MO_8, R_AH, cpu_T0);
break;
case 0xf5: /* cmc */
gen_compute_eflags(s);
@@ -6635,13 +6464,13 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
if (mod != 3) {
s->rip_offset = 1;
gen_lea_modrm(env, s, modrm);
- gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
+ gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
} else {
- gen_op_mov_v_reg(ot, cpu_T[0], rm);
+ gen_op_mov_v_reg(ot, cpu_T0, rm);
}
/* load shift */
val = cpu_ldub_code(env, s->pc++);
- tcg_gen_movi_tl(cpu_T[1], val);
+ tcg_gen_movi_tl(cpu_T1, val);
if (op < 4)
goto illegal_op;
op -= 4;
@@ -6663,46 +6492,46 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
reg = ((modrm >> 3) & 7) | rex_r;
mod = (modrm >> 6) & 3;
rm = (modrm & 7) | REX_B(s);
- gen_op_mov_v_reg(MO_32, cpu_T[1], reg);
+ gen_op_mov_v_reg(MO_32, cpu_T1, reg);
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
/* specific case: we need to add a displacement */
- gen_exts(ot, cpu_T[1]);
- tcg_gen_sari_tl(cpu_tmp0, cpu_T[1], 3 + ot);
+ gen_exts(ot, cpu_T1);
+ tcg_gen_sari_tl(cpu_tmp0, cpu_T1, 3 + ot);
tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, ot);
tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
- gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
+ gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
} else {
- gen_op_mov_v_reg(ot, cpu_T[0], rm);
+ gen_op_mov_v_reg(ot, cpu_T0, rm);
}
bt_op:
- tcg_gen_andi_tl(cpu_T[1], cpu_T[1], (1 << (3 + ot)) - 1);
- tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]);
+ tcg_gen_andi_tl(cpu_T1, cpu_T1, (1 << (3 + ot)) - 1);
+ tcg_gen_shr_tl(cpu_tmp4, cpu_T0, cpu_T1);
switch(op) {
case 0:
break;
case 1:
tcg_gen_movi_tl(cpu_tmp0, 1);
- tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
- tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
+ tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T1);
+ tcg_gen_or_tl(cpu_T0, cpu_T0, cpu_tmp0);
break;
case 2:
tcg_gen_movi_tl(cpu_tmp0, 1);
- tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
- tcg_gen_andc_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
+ tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T1);
+ tcg_gen_andc_tl(cpu_T0, cpu_T0, cpu_tmp0);
break;
default:
case 3:
tcg_gen_movi_tl(cpu_tmp0, 1);
- tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
- tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
+ tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T1);
+ tcg_gen_xor_tl(cpu_T0, cpu_T0, cpu_tmp0);
break;
}
if (op != 0) {
if (mod != 3) {
- gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
+ gen_op_st_v(s, ot, cpu_T0, cpu_A0);
} else {
- gen_op_mov_reg_v(ot, rm, cpu_T[0]);
+ gen_op_mov_reg_v(ot, rm, cpu_T0);
}
}
@@ -6742,7 +6571,7 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7) | rex_r;
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
- gen_extu(ot, cpu_T[0]);
+ gen_extu(ot, cpu_T0);
/* Note that lzcnt and tzcnt are in different extensions. */
if ((prefixes & PREFIX_REPZ)
@@ -6750,18 +6579,18 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
? s->cpuid_ext3_features & CPUID_EXT3_ABM
: s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)) {
int size = 8 << ot;
- tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
+ tcg_gen_mov_tl(cpu_cc_src, cpu_T0);
if (b & 1) {
/* For lzcnt, reduce the target_ulong result by the
number of zeros that we expect to find at the top. */
- gen_helper_clz(cpu_T[0], cpu_T[0]);
- tcg_gen_subi_tl(cpu_T[0], cpu_T[0], TARGET_LONG_BITS - size);
+ gen_helper_clz(cpu_T0, cpu_T0);
+ tcg_gen_subi_tl(cpu_T0, cpu_T0, TARGET_LONG_BITS - size);
} else {
/* For tzcnt, a zero input must return the operand size:
force all bits outside the operand size to 1. */
target_ulong mask = (target_ulong)-2 << (size - 1);
- tcg_gen_ori_tl(cpu_T[0], cpu_T[0], mask);
- gen_helper_ctz(cpu_T[0], cpu_T[0]);
+ tcg_gen_ori_tl(cpu_T0, cpu_T0, mask);
+ gen_helper_ctz(cpu_T0, cpu_T0);
}
/* For lzcnt/tzcnt, C and Z bits are defined and are
related to the result. */
@@ -6770,24 +6599,24 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
} else {
/* For bsr/bsf, only the Z bit is defined and it is related
to the input and not the result. */
- tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
set_cc_op(s, CC_OP_LOGICB + ot);
if (b & 1) {
/* For bsr, return the bit index of the first 1 bit,
not the count of leading zeros. */
- gen_helper_clz(cpu_T[0], cpu_T[0]);
- tcg_gen_xori_tl(cpu_T[0], cpu_T[0], TARGET_LONG_BITS - 1);
+ gen_helper_clz(cpu_T0, cpu_T0);
+ tcg_gen_xori_tl(cpu_T0, cpu_T0, TARGET_LONG_BITS - 1);
} else {
- gen_helper_ctz(cpu_T[0], cpu_T[0]);
+ gen_helper_ctz(cpu_T0, cpu_T0);
}
/* ??? The manual says that the output is undefined when the
input is zero, but real hardware leaves it unchanged, and
real programs appear to depend on that. */
tcg_gen_movi_tl(cpu_tmp0, 0);
- tcg_gen_movcond_tl(TCG_COND_EQ, cpu_T[0], cpu_cc_dst, cpu_tmp0,
- cpu_regs[reg], cpu_T[0]);
+ tcg_gen_movcond_tl(TCG_COND_EQ, cpu_T0, cpu_cc_dst, cpu_tmp0,
+ cpu_regs[reg], cpu_T0);
}
- gen_op_mov_reg_v(ot, reg, cpu_T[0]);
+ gen_op_mov_reg_v(ot, reg, cpu_T0);
break;
/************************/
/* bcd */
@@ -6941,9 +6770,9 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
mod = (modrm >> 6) & 3;
if (mod == 3)
goto illegal_op;
- gen_op_mov_v_reg(ot, cpu_T[0], reg);
+ gen_op_mov_v_reg(ot, cpu_T0, reg);
gen_lea_modrm(env, s, modrm);
- tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
+ tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
if (ot == MO_16) {
gen_helper_boundw(cpu_env, cpu_A0, cpu_tmp2_i32);
} else {
@@ -6954,24 +6783,24 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
reg = (b & 7) | REX_B(s);
#ifdef TARGET_X86_64
if (dflag == MO_64) {
- gen_op_mov_v_reg(MO_64, cpu_T[0], reg);
- tcg_gen_bswap64_i64(cpu_T[0], cpu_T[0]);
- gen_op_mov_reg_v(MO_64, reg, cpu_T[0]);
+ gen_op_mov_v_reg(MO_64, cpu_T0, reg);
+ tcg_gen_bswap64_i64(cpu_T0, cpu_T0);
+ gen_op_mov_reg_v(MO_64, reg, cpu_T0);
} else
#endif
{
- gen_op_mov_v_reg(MO_32, cpu_T[0], reg);
- tcg_gen_ext32u_tl(cpu_T[0], cpu_T[0]);
- tcg_gen_bswap32_tl(cpu_T[0], cpu_T[0]);
- gen_op_mov_reg_v(MO_32, reg, cpu_T[0]);
+ gen_op_mov_v_reg(MO_32, cpu_T0, reg);
+ tcg_gen_ext32u_tl(cpu_T0, cpu_T0);
+ tcg_gen_bswap32_tl(cpu_T0, cpu_T0);
+ gen_op_mov_reg_v(MO_32, reg, cpu_T0);
}
break;
case 0xd6: /* salc */
if (CODE64(s))
goto illegal_op;
- gen_compute_eflags_c(s, cpu_T[0]);
- tcg_gen_neg_tl(cpu_T[0], cpu_T[0]);
- gen_op_mov_reg_v(MO_8, R_EAX, cpu_T[0]);
+ gen_compute_eflags_c(s, cpu_T0);
+ tcg_gen_neg_tl(cpu_T0, cpu_T0);
+ gen_op_mov_reg_v(MO_8, R_EAX, cpu_T0);
break;
case 0xe0: /* loopnz */
case 0xe1: /* loopz */
@@ -7116,7 +6945,8 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
if (!s->pe || s->vm86)
goto illegal_op;
gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_READ);
- tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,ldt.selector));
+ tcg_gen_ld32u_tl(cpu_T0, cpu_env,
+ offsetof(CPUX86State, ldt.selector));
ot = mod == 3 ? dflag : MO_16;
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
break;
@@ -7128,7 +6958,7 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
} else {
gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_WRITE);
gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
- tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
+ tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
gen_helper_lldt(cpu_env, cpu_tmp2_i32);
}
break;
@@ -7136,7 +6966,8 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
if (!s->pe || s->vm86)
goto illegal_op;
gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_READ);
- tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,tr.selector));
+ tcg_gen_ld32u_tl(cpu_T0, cpu_env,
+ offsetof(CPUX86State, tr.selector));
ot = mod == 3 ? dflag : MO_16;
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
break;
@@ -7148,7 +6979,7 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
} else {
gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_WRITE);
gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
- tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
+ tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
gen_helper_ltr(cpu_env, cpu_tmp2_i32);
}
break;
@@ -7159,9 +6990,9 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
gen_update_cc_op(s);
if (op == 4) {
- gen_helper_verr(cpu_env, cpu_T[0]);
+ gen_helper_verr(cpu_env, cpu_T0);
} else {
- gen_helper_verw(cpu_env, cpu_T[0]);
+ gen_helper_verw(cpu_env, cpu_T0);
}
set_cc_op(s, CC_OP_EFLAGS);
break;
@@ -7180,14 +7011,15 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
goto illegal_op;
gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_READ);
gen_lea_modrm(env, s, modrm);
- tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, gdt.limit));
- gen_op_st_v(s, MO_16, cpu_T[0], cpu_A0);
+ tcg_gen_ld32u_tl(cpu_T0,
+ cpu_env, offsetof(CPUX86State, gdt.limit));
+ gen_op_st_v(s, MO_16, cpu_T0, cpu_A0);
gen_add_A0_im(s, 2);
- tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, gdt.base));
+ tcg_gen_ld_tl(cpu_T0, cpu_env, offsetof(CPUX86State, gdt.base));
if (dflag == MO_16) {
- tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffffff);
+ tcg_gen_andi_tl(cpu_T0, cpu_T0, 0xffffff);
}
- gen_op_st_v(s, CODE64(s) + MO_32, cpu_T[0], cpu_A0);
+ gen_op_st_v(s, CODE64(s) + MO_32, cpu_T0, cpu_A0);
break;
case 1:
if (mod == 3) {
@@ -7236,14 +7068,16 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
} else { /* sidt */
gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_READ);
gen_lea_modrm(env, s, modrm);
- tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, idt.limit));
- gen_op_st_v(s, MO_16, cpu_T[0], cpu_A0);
+ tcg_gen_ld32u_tl(cpu_T0,
+ cpu_env, offsetof(CPUX86State, idt.limit));
+ gen_op_st_v(s, MO_16, cpu_T0, cpu_A0);
gen_add_A0_im(s, 2);
- tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, idt.base));
+ tcg_gen_ld_tl(cpu_T0,
+ cpu_env, offsetof(CPUX86State, idt.base));
if (dflag == MO_16) {
- tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffffff);
+ tcg_gen_andi_tl(cpu_T0, cpu_T0, 0xffffff);
}
- gen_op_st_v(s, CODE64(s) + MO_32, cpu_T[0], cpu_A0);
+ gen_op_st_v(s, CODE64(s) + MO_32, cpu_T0, cpu_A0);
}
break;
case 2: /* lgdt */
@@ -7339,27 +7173,31 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
gen_svm_check_intercept(s, pc_start,
op==2 ? SVM_EXIT_GDTR_WRITE : SVM_EXIT_IDTR_WRITE);
gen_lea_modrm(env, s, modrm);
- gen_op_ld_v(s, MO_16, cpu_T[1], cpu_A0);
+ gen_op_ld_v(s, MO_16, cpu_T1, cpu_A0);
gen_add_A0_im(s, 2);
- gen_op_ld_v(s, CODE64(s) + MO_32, cpu_T[0], cpu_A0);
+ gen_op_ld_v(s, CODE64(s) + MO_32, cpu_T0, cpu_A0);
if (dflag == MO_16) {
- tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffffff);
+ tcg_gen_andi_tl(cpu_T0, cpu_T0, 0xffffff);
}
if (op == 2) {
- tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,gdt.base));
- tcg_gen_st32_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,gdt.limit));
+ tcg_gen_st_tl(cpu_T0, cpu_env,
+ offsetof(CPUX86State, gdt.base));
+ tcg_gen_st32_tl(cpu_T1, cpu_env,
+ offsetof(CPUX86State, gdt.limit));
} else {
- tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,idt.base));
- tcg_gen_st32_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,idt.limit));
+ tcg_gen_st_tl(cpu_T0, cpu_env,
+ offsetof(CPUX86State, idt.base));
+ tcg_gen_st32_tl(cpu_T1, cpu_env,
+ offsetof(CPUX86State, idt.limit));
}
}
break;
case 4: /* smsw */
gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_CR0);
#if defined TARGET_X86_64 && defined HOST_WORDS_BIGENDIAN
- tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,cr[0]) + 4);
+ tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State, cr[0]) + 4);
#else
- tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,cr[0]));
+ tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State, cr[0]));
#endif
gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 1);
break;
@@ -7369,7 +7207,7 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
} else {
gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
- gen_helper_lmsw(cpu_env, cpu_T[0]);
+ gen_helper_lmsw(cpu_env, cpu_T0);
gen_jmp_im(s->pc - s->cs_base);
gen_eob(s);
}
@@ -7394,21 +7232,16 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
- tcg_gen_ld_tl(cpu_T[0], cpu_env,
- offsetof(CPUX86State,segs[R_GS].base));
- tcg_gen_ld_tl(cpu_T[1], cpu_env,
- offsetof(CPUX86State,kernelgsbase));
- tcg_gen_st_tl(cpu_T[1], cpu_env,
- offsetof(CPUX86State,segs[R_GS].base));
- tcg_gen_st_tl(cpu_T[0], cpu_env,
- offsetof(CPUX86State,kernelgsbase));
+ tcg_gen_mov_tl(cpu_T0, cpu_seg_base[R_GS]);
+ tcg_gen_ld_tl(cpu_seg_base[R_GS], cpu_env,
+ offsetof(CPUX86State, kernelgsbase));
+ tcg_gen_st_tl(cpu_T0, cpu_env,
+ offsetof(CPUX86State, kernelgsbase));
}
- } else
-#endif
- {
- goto illegal_op;
+ break;
}
- break;
+#endif
+ goto illegal_op;
case 1: /* rdtscp */
if (!(s->cpuid_ext2_features & CPUID_EXT2_RDTSCP))
goto illegal_op;
@@ -7454,16 +7287,16 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
rm = (modrm & 7) | REX_B(s);
if (mod == 3) {
- gen_op_mov_v_reg(MO_32, cpu_T[0], rm);
+ gen_op_mov_v_reg(MO_32, cpu_T0, rm);
/* sign extend */
if (d_ot == MO_64) {
- tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
+ tcg_gen_ext32s_tl(cpu_T0, cpu_T0);
}
- gen_op_mov_reg_v(d_ot, reg, cpu_T[0]);
+ gen_op_mov_reg_v(d_ot, reg, cpu_T0);
} else {
gen_lea_modrm(env, s, modrm);
- gen_op_ld_v(s, MO_32 | MO_SIGN, cpu_T[0], cpu_A0);
- gen_op_mov_reg_v(d_ot, reg, cpu_T[0]);
+ gen_op_ld_v(s, MO_32 | MO_SIGN, cpu_T0, cpu_A0);
+ gen_op_mov_reg_v(d_ot, reg, cpu_T0);
}
} else
#endif
@@ -7528,9 +7361,9 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
t0 = tcg_temp_local_new();
gen_update_cc_op(s);
if (b == 0x102) {
- gen_helper_lar(t0, cpu_env, cpu_T[0]);
+ gen_helper_lar(t0, cpu_env, cpu_T0);
} else {
- gen_helper_lsl(t0, cpu_env, cpu_T[0]);
+ gen_helper_lsl(t0, cpu_env, cpu_T0);
}
tcg_gen_andi_tl(cpu_tmp0, cpu_cc_src, CC_Z);
label1 = gen_new_label();
@@ -7594,14 +7427,14 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
if (b & 2) {
- gen_op_mov_v_reg(ot, cpu_T[0], rm);
+ gen_op_mov_v_reg(ot, cpu_T0, rm);
gen_helper_write_crN(cpu_env, tcg_const_i32(reg),
- cpu_T[0]);
+ cpu_T0);
gen_jmp_im(s->pc - s->cs_base);
gen_eob(s);
} else {
- gen_helper_read_crN(cpu_T[0], cpu_env, tcg_const_i32(reg));
- gen_op_mov_reg_v(ot, rm, cpu_T[0]);
+ gen_helper_read_crN(cpu_T0, cpu_env, tcg_const_i32(reg));
+ gen_op_mov_reg_v(ot, rm, cpu_T0);
}
break;
default:
@@ -7631,16 +7464,16 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
}
if (b & 2) {
gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_DR0 + reg);
- gen_op_mov_v_reg(ot, cpu_T[0], rm);
+ gen_op_mov_v_reg(ot, cpu_T0, rm);
tcg_gen_movi_i32(cpu_tmp2_i32, reg);
- gen_helper_set_dr(cpu_env, cpu_tmp2_i32, cpu_T[0]);
+ gen_helper_set_dr(cpu_env, cpu_tmp2_i32, cpu_T0);
gen_jmp_im(s->pc - s->cs_base);
gen_eob(s);
} else {
gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_DR0 + reg);
tcg_gen_movi_i32(cpu_tmp2_i32, reg);
- gen_helper_get_dr(cpu_T[0], cpu_env, cpu_tmp2_i32);
- gen_op_mov_reg_v(ot, rm, cpu_T[0]);
+ gen_helper_get_dr(cpu_T0, cpu_env, cpu_tmp2_i32);
+ gen_op_mov_reg_v(ot, rm, cpu_T0);
}
}
break;
@@ -7710,8 +7543,9 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
s->mem_index, MO_LEUL);
gen_helper_ldmxcsr(cpu_env, cpu_tmp2_i32);
} else {
- tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, mxcsr));
- gen_op_st_v(s, MO_32, cpu_T[0], cpu_A0);
+ tcg_gen_ld32u_tl(cpu_T0,
+ cpu_env, offsetof(CPUX86State, mxcsr));
+ gen_op_st_v(s, MO_32, cpu_T0, cpu_A0);
}
break;
case 5: /* lfence */
@@ -7793,8 +7627,8 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
}
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
- gen_helper_popcnt(cpu_T[0], cpu_env, cpu_T[0], tcg_const_i32(ot));
- gen_op_mov_reg_v(ot, reg, cpu_T[0]);
+ gen_helper_popcnt(cpu_T0, cpu_env, cpu_T0, tcg_const_i32(ot));
+ gen_op_mov_reg_v(ot, reg, cpu_T0);
set_cc_op(s, CC_OP_EFLAGS);
break;
@@ -7857,6 +7691,14 @@ void tcg_x86_init(void)
[R_ESP] = "esp",
#endif
};
+ static const char seg_base_names[6][8] = {
+ [R_CS] = "cs_base",
+ [R_DS] = "ds_base",
+ [R_ES] = "es_base",
+ [R_FS] = "fs_base",
+ [R_GS] = "gs_base",
+ [R_SS] = "ss_base",
+ };
int i;
cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
@@ -7875,6 +7717,13 @@ void tcg_x86_init(void)
reg_names[i]);
}
+ for (i = 0; i < 6; ++i) {
+ cpu_seg_base[i]
+ = tcg_global_mem_new(cpu_env,
+ offsetof(CPUX86State, segs[i].base),
+ seg_base_names[i]);
+ }
+
helper_lock_init();
}
@@ -7949,8 +7798,8 @@ void gen_intermediate_code(CPUX86State *env, TranslationBlock *tb)
printf("ERROR addseg\n");
#endif
- cpu_T[0] = tcg_temp_new();
- cpu_T[1] = tcg_temp_new();
+ cpu_T0 = tcg_temp_new();
+ cpu_T1 = tcg_temp_new();
cpu_A0 = tcg_temp_new();
cpu_tmp0 = tcg_temp_new();
diff --git a/trace-events b/trace-events
index c9ac144cee..f986c81dad 100644
--- a/trace-events
+++ b/trace-events
@@ -726,6 +726,28 @@ lm32_uart_memory_write(uint32_t addr, uint32_t value) "addr 0x%08x value 0x%08x"
lm32_uart_memory_read(uint32_t addr, uint32_t value) "addr 0x%08x value 0x%08x"
lm32_uart_irq_state(int level) "irq state %d"
+# hw/scsi/mptsas.c
+mptsas_command_complete(void *dev, uint32_t ctx, uint32_t status, uint32_t resid) "dev %p context 0x%08x status %x resid %d"
+mptsas_diag_read(void *dev, uint32_t addr, uint32_t val) "dev %p addr 0x%08x value 0x%08x"
+mptsas_diag_write(void *dev, uint32_t addr, uint32_t val) "dev %p addr 0x%08x value 0x%08x"
+mptsas_irq_intx(void *dev, int level) "dev %p level %d"
+mptsas_irq_msi(void *dev) "dev %p "
+mptsas_mmio_read(void *dev, uint32_t addr, uint32_t val) "dev %p addr 0x%08x value 0x%x"
+mptsas_mmio_unhandled_read(void *dev, uint32_t addr) "dev %p addr 0x%08x"
+mptsas_mmio_unhandled_write(void *dev, uint32_t addr, uint32_t val) "dev %p addr 0x%08x value 0x%x"
+mptsas_mmio_write(void *dev, uint32_t addr, uint32_t val) "dev %p addr 0x%08x value 0x%x"
+mptsas_process_message(void *dev, int msg, uint32_t ctx) "dev %p cmd %d context 0x%08x\n"
+mptsas_process_scsi_io_request(void *dev, int bus, int target, int lun, uint64_t len) "dev %p dev %d:%d:%d length %"PRIu64""
+mptsas_reset(void *dev) "dev %p "
+mptsas_scsi_overflow(void *dev, uint32_t ctx, uint64_t req, uint64_t found) "dev %p context 0x%08x: %"PRIu64"/%"PRIu64""
+mptsas_sgl_overflow(void *dev, uint32_t ctx, uint64_t req, uint64_t found) "dev %p context 0x%08x: %"PRIu64"/%"PRIu64""
+mptsas_unhandled_cmd(void *dev, uint32_t ctx, uint8_t msg_cmd) "dev %p context 0x%08x: Unhandled cmd %x"
+mptsas_unhandled_doorbell_cmd(void *dev, int cmd) "dev %p value 0x%08x"
+
+# hw/scsi/mptconfig.c
+mptsas_config_sas_device(void *dev, int address, int port, int phy_handle, int dev_handle, int page) "dev %p address %d (port %d, handles: phy %d dev %d) page %d"
+mptsas_config_sas_phy(void *dev, int address, int port, int phy_handle, int dev_handle, int page) "dev %p address %d (port %d, handles: phy %d dev %d) page %d"
+
# hw/scsi/megasas.c
megasas_init_firmware(uint64_t pa) "pa %" PRIx64 " "
megasas_init_queue(uint64_t queue_pa, int queue_len, uint64_t head, uint64_t tail, uint32_t flags) "queue at %" PRIx64 " len %d head %" PRIx64 " tail %" PRIx64 " flags %x"