aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rwxr-xr-xconfigure27
-rw-r--r--cpu-exec.c4
-rw-r--r--disas/ppc.c3
-rw-r--r--hw/char/spapr_vty.c2
-rw-r--r--hw/intc/xics.c21
-rw-r--r--hw/ppc/e500.c52
-rw-r--r--hw/ppc/ppc405_boards.c39
-rw-r--r--hw/ppc/ppc405_uc.c16
-rw-r--r--hw/ppc/spapr.c31
-rw-r--r--hw/ppc/spapr_hcall.c50
-rw-r--r--hw/ppc/spapr_iommu.c71
-rw-r--r--hw/ppc/spapr_pci.c73
-rw-r--r--hw/ppc/spapr_rtas.c23
-rw-r--r--hw/ppc/virtex_ml507.c29
-rw-r--r--include/exec/exec-all.h89
-rw-r--r--include/exec/gen-icount.h4
-rw-r--r--include/exec/softmmu_defs.h49
-rw-r--r--include/exec/softmmu_exec.h3
-rw-r--r--include/exec/softmmu_template.h88
-rw-r--r--include/hw/pci-host/spapr.h8
-rw-r--r--include/hw/ppc/spapr.h21
-rw-r--r--qtest.c8
-rw-r--r--target-alpha/translate.c8
-rw-r--r--target-arm/translate.c2
-rw-r--r--target-cris/translate.c2
-rw-r--r--target-i386/translate.c2
-rw-r--r--target-lm32/op_helper.c2
-rw-r--r--target-lm32/translate.c2
-rw-r--r--target-m68k/translate.c2
-rw-r--r--target-microblaze/translate.c2
-rw-r--r--target-mips/translate.c2
-rw-r--r--target-moxie/helper.c1
-rw-r--r--target-moxie/translate.c2
-rw-r--r--target-openrisc/translate.c2
-rw-r--r--target-ppc/cpu.h2
-rw-r--r--target-ppc/excp_helper.c10
-rw-r--r--target-ppc/kvm_ppc.c1
-rw-r--r--target-ppc/mmu_helper.c4
-rw-r--r--target-ppc/translate.c6
-rw-r--r--target-ppc/translate_init.c2
-rw-r--r--target-s390x/translate.c8
-rw-r--r--target-sh4/translate.c2
-rw-r--r--target-sparc/translate.c2
-rw-r--r--target-unicore32/op_helper.c2
-rw-r--r--target-unicore32/translate.c2
-rw-r--r--target-xtensa/op_helper.c1
-rw-r--r--target-xtensa/translate.c2
-rw-r--r--tcg/aarch64/tcg-target.c8
-rw-r--r--tcg/aarch64/tcg-target.h7
-rw-r--r--tcg/arm/tcg-target.c14
-rw-r--r--tcg/arm/tcg-target.h11
-rw-r--r--tcg/hppa/tcg-target.c16
-rw-r--r--tcg/hppa/tcg-target.h9
-rw-r--r--tcg/i386/tcg-target.c141
-rw-r--r--tcg/i386/tcg-target.h17
-rw-r--r--tcg/ia64/tcg-target.c21
-rw-r--r--tcg/ia64/tcg-target.h7
-rw-r--r--tcg/mips/tcg-target.c257
-rw-r--r--tcg/mips/tcg-target.h59
-rw-r--r--tcg/optimize.c43
-rw-r--r--tcg/ppc/tcg-target.c12
-rw-r--r--tcg/ppc/tcg-target.h4
-rw-r--r--tcg/ppc64/tcg-target.c45
-rw-r--r--tcg/ppc64/tcg-target.h8
-rw-r--r--tcg/s390/tcg-target.c13
-rw-r--r--tcg/s390/tcg-target.h7
-rw-r--r--tcg/sparc/tcg-target.c12
-rw-r--r--tcg/sparc/tcg-target.h24
-rw-r--r--tcg/tcg-op.h42
-rw-r--r--tcg/tcg-opc.h4
-rw-r--r--tcg/tcg.c96
-rw-r--r--tcg/tcg.h114
-rw-r--r--tcg/tci/tcg-target.c6
-rw-r--r--tcg/tci/tcg-target.h18
-rw-r--r--tci.c4
-rw-r--r--trace-events5
76 files changed, 1102 insertions, 706 deletions
diff --git a/configure b/configure
index 0a55c20252..af6b048c6e 100755
--- a/configure
+++ b/configure
@@ -362,7 +362,11 @@ if test ! -z "$cpu" ; then
elif check_define __i386__ ; then
cpu="i386"
elif check_define __x86_64__ ; then
- cpu="x86_64"
+ if check_define __ILP32__ ; then
+ cpu="x32"
+ else
+ cpu="x86_64"
+ fi
elif check_define __sparc__ ; then
if check_define __arch64__ ; then
cpu="sparc64"
@@ -399,7 +403,7 @@ ARCH=
# Normalise host CPU name and set ARCH.
# Note that this case should only have supported host CPUs, not guests.
case "$cpu" in
- ia64|ppc|ppc64|s390|s390x|sparc64)
+ ia64|ppc|ppc64|s390|s390x|sparc64|x32)
cpu="$cpu"
;;
i386|i486|i586|i686|i86pc|BePC)
@@ -550,7 +554,7 @@ Haiku)
kvm="yes"
vhost_net="yes"
vhost_scsi="yes"
- if [ "$cpu" = "i386" -o "$cpu" = "x86_64" ] ; then
+ if [ "$cpu" = "i386" -o "$cpu" = "x86_64" -o "$cpu" = "x32" ] ; then
audio_possible_drivers="$audio_possible_drivers fmod"
fi
QEMU_INCLUDES="-I\$(SRC_PATH)/linux-headers -I$(pwd)/linux-headers $QEMU_INCLUDES"
@@ -977,6 +981,11 @@ case "$cpu" in
LDFLAGS="-m64 $LDFLAGS"
cc_i386='$(CC) -m32'
;;
+ x32)
+ CPU_CFLAGS="-mx32"
+ LDFLAGS="-mx32 $LDFLAGS"
+ cc_i386='$(CC) -m32'
+ ;;
# No special flags required for other host CPUs
esac
@@ -1251,7 +1260,7 @@ fi
if test "$pie" = ""; then
case "$cpu-$targetos" in
- i386-Linux|x86_64-Linux|i386-OpenBSD|x86_64-OpenBSD)
+ i386-Linux|x86_64-Linux|x32-Linux|i386-OpenBSD|x86_64-OpenBSD)
;;
*)
pie="no"
@@ -3506,7 +3515,7 @@ fi
if test "$pie" = "no" ; then
textseg_addr=
case "$cpu" in
- arm | hppa | i386 | m68k | ppc | ppc64 | s390* | sparc | sparc64 | x86_64)
+ arm | hppa | i386 | m68k | ppc | ppc64 | s390* | sparc | sparc64 | x86_64 | x32)
textseg_addr=0x60000000
;;
mips)
@@ -3681,7 +3690,7 @@ echo "libs_softmmu=$libs_softmmu" >> $config_host_mak
echo "ARCH=$ARCH" >> $config_host_mak
case "$cpu" in
- arm|i386|x86_64|ppc|aarch64)
+ arm|i386|x86_64|x32|ppc|aarch64)
# The TCG interpreter currently does not support ld/st optimization.
if test "$tcg_interpreter" = "no" ; then
echo "CONFIG_QEMU_LDST_OPTIMIZATION=y" >> $config_host_mak
@@ -4116,7 +4125,7 @@ elif test "$ARCH" = "sparc64" ; then
QEMU_INCLUDES="-I\$(SRC_PATH)/tcg/sparc $QEMU_INCLUDES"
elif test "$ARCH" = "s390x" ; then
QEMU_INCLUDES="-I\$(SRC_PATH)/tcg/s390 $QEMU_INCLUDES"
-elif test "$ARCH" = "x86_64" ; then
+elif test "$ARCH" = "x86_64" -o "$ARCH" = "x32" ; then
QEMU_INCLUDES="-I\$(SRC_PATH)/tcg/i386 $QEMU_INCLUDES"
else
QEMU_INCLUDES="-I\$(SRC_PATH)/tcg/\$(ARCH) $QEMU_INCLUDES"
@@ -4178,7 +4187,7 @@ fi
if test "$linux" = "yes" ; then
mkdir -p linux-headers
case "$cpu" in
- i386|x86_64)
+ i386|x86_64|x32)
linux_arch=x86
;;
ppcemb|ppc|ppc64)
@@ -4444,7 +4453,7 @@ for i in $ARCH $TARGET_BASE_ARCH ; do
echo "CONFIG_HPPA_DIS=y" >> $config_target_mak
echo "CONFIG_HPPA_DIS=y" >> config-all-disas.mak
;;
- i386|x86_64)
+ i386|x86_64|x32)
echo "CONFIG_I386_DIS=y" >> $config_target_mak
echo "CONFIG_I386_DIS=y" >> config-all-disas.mak
;;
diff --git a/cpu-exec.c b/cpu-exec.c
index 301be28bf7..5a4399509e 100644
--- a/cpu-exec.c
+++ b/cpu-exec.c
@@ -53,7 +53,7 @@ void cpu_resume_from_signal(CPUArchState *env, void *puc)
static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr)
{
CPUArchState *env = cpu->env_ptr;
- tcg_target_ulong next_tb = tcg_qemu_tb_exec(env, tb_ptr);
+ uintptr_t next_tb = tcg_qemu_tb_exec(env, tb_ptr);
if ((next_tb & TB_EXIT_MASK) > TB_EXIT_IDX1) {
/* We didn't start executing this TB (eg because the instruction
* counter hit zero); we must restore the guest PC to the address
@@ -209,7 +209,7 @@ int cpu_exec(CPUArchState *env)
int ret, interrupt_request;
TranslationBlock *tb;
uint8_t *tc_ptr;
- tcg_target_ulong next_tb;
+ uintptr_t next_tb;
if (cpu->halted) {
if (!cpu_has_work(cpu)) {
diff --git a/disas/ppc.c b/disas/ppc.c
index c149506fd8..99c4cbc3ab 100644
--- a/disas/ppc.c
+++ b/disas/ppc.c
@@ -5157,7 +5157,8 @@ int
print_insn_ppc (bfd_vma memaddr, struct disassemble_info *info)
{
int dialect = (char *) info->private_data - (char *) 0;
- return print_insn_powerpc (memaddr, info, 1, dialect);
+ return print_insn_powerpc (memaddr, info, info->endian == BFD_ENDIAN_BIG,
+ dialect);
}
/* Print a big endian PowerPC instruction. */
diff --git a/hw/char/spapr_vty.c b/hw/char/spapr_vty.c
index a7997213b6..9c2aef82e6 100644
--- a/hw/char/spapr_vty.c
+++ b/hw/char/spapr_vty.c
@@ -47,6 +47,8 @@ static int vty_getchars(VIOsPAPRDevice *sdev, uint8_t *buf, int max)
buf[n++] = dev->buf[dev->out++ % VTERM_BUFSIZE];
}
+ qemu_chr_accept_input(dev->chardev);
+
return n;
}
diff --git a/hw/intc/xics.c b/hw/intc/xics.c
index b96b0413ca..bb018d1829 100644
--- a/hw/intc/xics.c
+++ b/hw/intc/xics.c
@@ -642,6 +642,17 @@ static void xics_realize(DeviceState *dev, Error **errp)
ICSState *ics = icp->ics;
int i;
+ /* Registration of global state belongs into realize */
+ spapr_rtas_register("ibm,set-xive", rtas_set_xive);
+ spapr_rtas_register("ibm,get-xive", rtas_get_xive);
+ spapr_rtas_register("ibm,int-off", rtas_int_off);
+ spapr_rtas_register("ibm,int-on", rtas_int_on);
+
+ spapr_register_hypercall(H_CPPR, h_cppr);
+ spapr_register_hypercall(H_IPI, h_ipi);
+ spapr_register_hypercall(H_XIRR, h_xirr);
+ spapr_register_hypercall(H_EOI, h_eoi);
+
ics->nr_irqs = icp->nr_irqs;
ics->offset = XICS_IRQ_BASE;
ics->icp = icp;
@@ -678,16 +689,6 @@ static void xics_class_init(ObjectClass *oc, void *data)
dc->realize = xics_realize;
dc->props = xics_properties;
dc->reset = xics_reset;
-
- spapr_rtas_register("ibm,set-xive", rtas_set_xive);
- spapr_rtas_register("ibm,get-xive", rtas_get_xive);
- spapr_rtas_register("ibm,int-off", rtas_int_off);
- spapr_rtas_register("ibm,int-on", rtas_int_on);
-
- spapr_register_hypercall(H_CPPR, h_cppr);
- spapr_register_hypercall(H_IPI, h_ipi);
- spapr_register_hypercall(H_XIRR, h_xirr);
- spapr_register_hypercall(H_EOI, h_eoi);
}
static const TypeInfo xics_info = {
diff --git a/hw/ppc/e500.c b/hw/ppc/e500.c
index e79612b0e9..9059ff9bc7 100644
--- a/hw/ppc/e500.c
+++ b/hw/ppc/e500.c
@@ -123,13 +123,14 @@ static void dt_serial_create(void *fdt, unsigned long long offset,
}
}
-static int ppce500_load_device_tree(CPUPPCState *env,
- QEMUMachineInitArgs *args,
+static int ppce500_load_device_tree(QEMUMachineInitArgs *args,
PPCE500Params *params,
hwaddr addr,
hwaddr initrd_base,
- hwaddr initrd_size)
+ hwaddr initrd_size,
+ bool dry_run)
{
+ CPUPPCState *env = first_cpu->env_ptr;
int ret = -1;
uint64_t mem_reg_property[] = { 0, cpu_to_be64(args->ram_size) };
int fdt_size;
@@ -369,12 +370,10 @@ static int ppce500_load_device_tree(CPUPPCState *env,
}
done:
- qemu_devtree_dumpdtb(fdt, fdt_size);
- ret = rom_add_blob_fixed(BINARY_DEVICE_TREE_FILE, fdt, fdt_size, addr);
- if (ret < 0) {
- goto out;
+ if (!dry_run) {
+ qemu_devtree_dumpdtb(fdt, fdt_size);
+ cpu_physical_memory_write(addr, fdt, fdt_size);
}
- g_free(fdt);
ret = fdt_size;
out:
@@ -383,6 +382,41 @@ out:
return ret;
}
+typedef struct DeviceTreeParams {
+ QEMUMachineInitArgs args;
+ PPCE500Params params;
+ hwaddr addr;
+ hwaddr initrd_base;
+ hwaddr initrd_size;
+} DeviceTreeParams;
+
+static void ppce500_reset_device_tree(void *opaque)
+{
+ DeviceTreeParams *p = opaque;
+ ppce500_load_device_tree(&p->args, &p->params, p->addr, p->initrd_base,
+ p->initrd_size, false);
+}
+
+static int ppce500_prep_device_tree(QEMUMachineInitArgs *args,
+ PPCE500Params *params,
+ hwaddr addr,
+ hwaddr initrd_base,
+ hwaddr initrd_size)
+{
+ DeviceTreeParams *p = g_new(DeviceTreeParams, 1);
+ p->args = *args;
+ p->params = *params;
+ p->addr = addr;
+ p->initrd_base = initrd_base;
+ p->initrd_size = initrd_size;
+
+ qemu_register_reset(ppce500_reset_device_tree, p);
+
+ /* Issue the device tree loader once, so that we get the size of the blob */
+ return ppce500_load_device_tree(args, params, addr, initrd_base,
+ initrd_size, true);
+}
+
/* Create -kernel TLB entries for BookE. */
static inline hwaddr booke206_page_size_to_tlb(uint64_t size)
{
@@ -746,7 +780,7 @@ void ppce500_init(QEMUMachineInitArgs *args, PPCE500Params *params)
struct boot_info *boot_info;
int dt_size;
- dt_size = ppce500_load_device_tree(env, args, params, dt_base,
+ dt_size = ppce500_prep_device_tree(args, params, dt_base,
initrd_base, initrd_size);
if (dt_size < 0) {
fprintf(stderr, "couldn't load device tree\n");
diff --git a/hw/ppc/ppc405_boards.c b/hw/ppc/ppc405_boards.c
index f74e5e52c2..75b2177c9c 100644
--- a/hw/ppc/ppc405_boards.c
+++ b/hw/ppc/ppc405_boards.c
@@ -27,9 +27,11 @@
#include "hw/timer/m48t59.h"
#include "hw/block/flash.h"
#include "sysemu/sysemu.h"
+#include "sysemu/qtest.h"
#include "block/block.h"
#include "hw/boards.h"
#include "qemu/log.h"
+#include "qemu/error-report.h"
#include "hw/loader.h"
#include "sysemu/blockdev.h"
#include "exec/address-spaces.h"
@@ -42,7 +44,7 @@
#define USE_FLASH_BIOS
-#define DEBUG_BOARD_INIT
+//#define DEBUG_BOARD_INIT
/*****************************************************************************/
/* PPC405EP reference board (IBM) */
@@ -252,17 +254,20 @@ static void ref405ep_init(QEMUMachineInitArgs *args)
if (filename) {
bios_size = load_image(filename, memory_region_get_ram_ptr(bios));
g_free(filename);
+ if (bios_size < 0 || bios_size > BIOS_SIZE) {
+ error_report("Could not load PowerPC BIOS '%s'", bios_name);
+ exit(1);
+ }
+ bios_size = (bios_size + 0xfff) & ~0xfff;
+ memory_region_add_subregion(sysmem, (uint32_t)(-bios_size), bios);
+ } else if (!qtest_enabled() || kernel_filename != NULL) {
+ error_report("Could not load PowerPC BIOS '%s'", bios_name);
+ exit(1);
} else {
+ /* Avoid an uninitialized variable warning */
bios_size = -1;
}
- if (bios_size < 0 || bios_size > BIOS_SIZE) {
- fprintf(stderr, "qemu: could not load PowerPC bios '%s'\n",
- bios_name);
- exit(1);
- }
- bios_size = (bios_size + 0xfff) & ~0xfff;
memory_region_set_readonly(bios, true);
- memory_region_add_subregion(sysmem, (uint32_t)(-bios_size), bios);
}
/* Register FPGA */
#ifdef DEBUG_BOARD_INIT
@@ -353,9 +358,9 @@ static void ref405ep_init(QEMUMachineInitArgs *args)
bdloc = 0;
}
#ifdef DEBUG_BOARD_INIT
+ printf("bdloc " RAM_ADDR_FMT "\n", bdloc);
printf("%s: Done\n", __func__);
#endif
- printf("bdloc " RAM_ADDR_FMT "\n", bdloc);
}
static QEMUMachine ref405ep_machine = {
@@ -569,17 +574,17 @@ static void taihu_405ep_init(QEMUMachineInitArgs *args)
if (filename) {
bios_size = load_image(filename, memory_region_get_ram_ptr(bios));
g_free(filename);
- } else {
- bios_size = -1;
- }
- if (bios_size < 0 || bios_size > BIOS_SIZE) {
- fprintf(stderr, "qemu: could not load PowerPC bios '%s'\n",
- bios_name);
+ if (bios_size < 0 || bios_size > BIOS_SIZE) {
+ error_report("Could not load PowerPC BIOS '%s'", bios_name);
+ exit(1);
+ }
+ bios_size = (bios_size + 0xfff) & ~0xfff;
+ memory_region_add_subregion(sysmem, (uint32_t)(-bios_size), bios);
+ } else if (!qtest_enabled()) {
+ error_report("Could not load PowerPC BIOS '%s'", bios_name);
exit(1);
}
- bios_size = (bios_size + 0xfff) & ~0xfff;
memory_region_set_readonly(bios, true);
- memory_region_add_subregion(sysmem, (uint32_t)(-bios_size), bios);
}
/* Register Linux flash */
dinfo = drive_get(IF_PFLASH, 0, fl_idx);
diff --git a/hw/ppc/ppc405_uc.c b/hw/ppc/ppc405_uc.c
index 0ef5254cd7..6d6a7f1203 100644
--- a/hw/ppc/ppc405_uc.c
+++ b/hw/ppc/ppc405_uc.c
@@ -30,15 +30,15 @@
#include "qemu/log.h"
#include "exec/address-spaces.h"
-#define DEBUG_OPBA
-#define DEBUG_SDRAM
-#define DEBUG_GPIO
-#define DEBUG_SERIAL
-#define DEBUG_OCM
+//#define DEBUG_OPBA
+//#define DEBUG_SDRAM
+//#define DEBUG_GPIO
+//#define DEBUG_SERIAL
+//#define DEBUG_OCM
//#define DEBUG_I2C
-#define DEBUG_GPT
-#define DEBUG_MAL
-#define DEBUG_CLOCKS
+//#define DEBUG_GPT
+//#define DEBUG_MAL
+//#define DEBUG_CLOCKS
//#define DEBUG_CLOCKS_LL
ram_addr_t ppc405_set_bootinfo (CPUPPCState *env, ppc4xx_bd_info_t *bd,
diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
index 4b566aa410..04f0ee3da1 100644
--- a/hw/ppc/spapr.c
+++ b/hw/ppc/spapr.c
@@ -88,6 +88,9 @@ int spapr_allocate_irq(int hint, bool lsi)
if (hint) {
irq = hint;
+ if (hint >= spapr->next_irq) {
+ spapr->next_irq = hint + 1;
+ }
/* FIXME: we should probably check for collisions somehow */
} else {
irq = spapr->next_irq++;
@@ -103,22 +106,39 @@ int spapr_allocate_irq(int hint, bool lsi)
return irq;
}
-/* Allocate block of consequtive IRQs, returns a number of the first */
-int spapr_allocate_irq_block(int num, bool lsi)
+/*
+ * Allocate block of consequtive IRQs, returns a number of the first.
+ * If msi==true, aligns the first IRQ number to num.
+ */
+int spapr_allocate_irq_block(int num, bool lsi, bool msi)
{
int first = -1;
- int i;
+ int i, hint = 0;
+
+ /*
+ * MSIMesage::data is used for storing VIRQ so
+ * it has to be aligned to num to support multiple
+ * MSI vectors. MSI-X is not affected by this.
+ * The hint is used for the first IRQ, the rest should
+ * be allocated continously.
+ */
+ if (msi) {
+ assert((num == 1) || (num == 2) || (num == 4) ||
+ (num == 8) || (num == 16) || (num == 32));
+ hint = (spapr->next_irq + num - 1) & ~(num - 1);
+ }
for (i = 0; i < num; ++i) {
int irq;
- irq = spapr_allocate_irq(0, lsi);
+ irq = spapr_allocate_irq(hint, lsi);
if (!irq) {
return -1;
}
if (0 == i) {
first = irq;
+ hint = 0;
}
/* If the above doesn't create a consecutive block then that's
@@ -262,7 +282,7 @@ static void *spapr_create_fdt_skel(const char *cpu_model,
uint32_t start_prop = cpu_to_be32(initrd_base);
uint32_t end_prop = cpu_to_be32(initrd_base + initrd_size);
char hypertas_prop[] = "hcall-pft\0hcall-term\0hcall-dabr\0hcall-interrupt"
- "\0hcall-tce\0hcall-vio\0hcall-splpar\0hcall-bulk";
+ "\0hcall-tce\0hcall-vio\0hcall-splpar\0hcall-bulk\0hcall-set-mode";
char qemu_hypertas_prop[] = "hcall-memop1";
uint32_t refpoints[] = {cpu_to_be32(0x4), cpu_to_be32(0x4)};
uint32_t interrupt_server_ranges_prop[] = {0, cpu_to_be32(smp_cpus)};
@@ -1214,6 +1234,7 @@ static void ppc_spapr_init(QEMUMachineInitArgs *args)
spapr_create_nvram(spapr);
/* Set up PCI */
+ spapr_pci_msi_init(spapr, SPAPR_PCI_MSI_WINDOW);
spapr_pci_rtas_init();
phb = spapr_create_phb(spapr, 0);
diff --git a/hw/ppc/spapr_hcall.c b/hw/ppc/spapr_hcall.c
index 67d6cd91d1..89e6a00dd9 100644
--- a/hw/ppc/spapr_hcall.c
+++ b/hw/ppc/spapr_hcall.c
@@ -657,6 +657,54 @@ static target_ulong h_logical_dcbf(PowerPCCPU *cpu, sPAPREnvironment *spapr,
return H_SUCCESS;
}
+static target_ulong h_set_mode(PowerPCCPU *cpu, sPAPREnvironment *spapr,
+ target_ulong opcode, target_ulong *args)
+{
+ CPUState *cs;
+ target_ulong mflags = args[0];
+ target_ulong resource = args[1];
+ target_ulong value1 = args[2];
+ target_ulong value2 = args[3];
+ target_ulong ret = H_P2;
+
+ if (resource == H_SET_MODE_ENDIAN) {
+ if (value1) {
+ ret = H_P3;
+ goto out;
+ }
+ if (value2) {
+ ret = H_P4;
+ goto out;
+ }
+
+ switch (mflags) {
+ case H_SET_MODE_ENDIAN_BIG:
+ for (cs = first_cpu; cs != NULL; cs = cs->next_cpu) {
+ PowerPCCPU *cp = POWERPC_CPU(cs);
+ CPUPPCState *env = &cp->env;
+ env->spr[SPR_LPCR] &= ~LPCR_ILE;
+ }
+ ret = H_SUCCESS;
+ break;
+
+ case H_SET_MODE_ENDIAN_LITTLE:
+ for (cs = first_cpu; cs != NULL; cs = cs->next_cpu) {
+ PowerPCCPU *cp = POWERPC_CPU(cs);
+ CPUPPCState *env = &cp->env;
+ env->spr[SPR_LPCR] |= LPCR_ILE;
+ }
+ ret = H_SUCCESS;
+ break;
+
+ default:
+ ret = H_UNSUPPORTED_FLAG;
+ }
+ }
+
+out:
+ return ret;
+}
+
static spapr_hcall_fn papr_hypercall_table[(MAX_HCALL_OPCODE / 4) + 1];
static spapr_hcall_fn kvmppc_hypercall_table[KVMPPC_HCALL_MAX - KVMPPC_HCALL_BASE + 1];
@@ -734,6 +782,8 @@ static void hypercall_register_types(void)
/* qemu/KVM-PPC specific hcalls */
spapr_register_hypercall(KVMPPC_H_RTAS, h_rtas);
+
+ spapr_register_hypercall(H_SET_MODE, h_set_mode);
}
type_init(hypercall_register_types)
diff --git a/hw/ppc/spapr_iommu.c b/hw/ppc/spapr_iommu.c
index 3d4a1fcfe1..ef45f4f0cc 100644
--- a/hw/ppc/spapr_iommu.c
+++ b/hw/ppc/spapr_iommu.c
@@ -22,13 +22,12 @@
#include "kvm_ppc.h"
#include "sysemu/dma.h"
#include "exec/address-spaces.h"
+#include "trace.h"
#include "hw/ppc/spapr.h"
#include <libfdt.h>
-/* #define DEBUG_TCE */
-
enum sPAPRTCEAccess {
SPAPR_TCE_FAULT = 0,
SPAPR_TCE_RO = 1,
@@ -61,44 +60,28 @@ static IOMMUTLBEntry spapr_tce_translate_iommu(MemoryRegion *iommu, hwaddr addr)
{
sPAPRTCETable *tcet = container_of(iommu, sPAPRTCETable, iommu);
uint64_t tce;
-
-#ifdef DEBUG_TCE
- fprintf(stderr, "spapr_tce_translate liobn=0x%" PRIx32 " addr=0x"
- DMA_ADDR_FMT "\n", tcet->liobn, addr);
-#endif
+ IOMMUTLBEntry ret = {
+ .target_as = &address_space_memory,
+ .iova = 0,
+ .translated_addr = 0,
+ .addr_mask = ~(hwaddr)0,
+ .perm = IOMMU_NONE,
+ };
if (tcet->bypass) {
- return (IOMMUTLBEntry) {
- .target_as = &address_space_memory,
- .iova = 0,
- .translated_addr = 0,
- .addr_mask = ~(hwaddr)0,
- .perm = IOMMU_RW,
- };
- }
-
- /* Check if we are in bound */
- if (addr >= tcet->window_size) {
-#ifdef DEBUG_TCE
- fprintf(stderr, "spapr_tce_translate out of bounds\n");
-#endif
- return (IOMMUTLBEntry) { .perm = IOMMU_NONE };
+ ret.perm = IOMMU_RW;
+ } else if (addr < tcet->window_size) {
+ /* Check if we are in bound */
+ tce = tcet->table[addr >> SPAPR_TCE_PAGE_SHIFT];
+ ret.iova = addr & ~SPAPR_TCE_PAGE_MASK;
+ ret.translated_addr = tce & ~SPAPR_TCE_PAGE_MASK;
+ ret.addr_mask = SPAPR_TCE_PAGE_MASK;
+ ret.perm = tce;
}
+ trace_spapr_iommu_xlate(tcet->liobn, addr, ret.iova, ret.perm,
+ ret.addr_mask);
- tce = tcet->table[addr >> SPAPR_TCE_PAGE_SHIFT];
-
-#ifdef DEBUG_TCE
- fprintf(stderr, " -> *paddr=0x%llx, *len=0x%llx\n",
- (tce & ~SPAPR_TCE_PAGE_MASK), SPAPR_TCE_PAGE_MASK + 1);
-#endif
-
- return (IOMMUTLBEntry) {
- .target_as = &address_space_memory,
- .iova = addr & ~SPAPR_TCE_PAGE_MASK,
- .translated_addr = tce & ~SPAPR_TCE_PAGE_MASK,
- .addr_mask = SPAPR_TCE_PAGE_MASK,
- .perm = tce,
- };
+ return ret;
}
static int spapr_tce_table_pre_load(void *opaque)
@@ -150,10 +133,7 @@ static int spapr_tce_table_realize(DeviceState *dev)
}
tcet->nb_table = tcet->window_size >> SPAPR_TCE_PAGE_SHIFT;
-#ifdef DEBUG_TCE
- fprintf(stderr, "spapr_iommu: New TCE table @ %p, liobn=0x%x, "
- "table @ %p, fd=%d\n", tcet, liobn, tcet->table, tcet->fd);
-#endif
+ trace_spapr_iommu_new_table(tcet->liobn, tcet, tcet->table, tcet->fd);
memory_region_init_iommu(&tcet->iommu, OBJECT(dev), &spapr_iommu_ops,
"iommu-spapr", UINT64_MAX);
@@ -250,20 +230,17 @@ static target_ulong h_put_tce(PowerPCCPU *cpu, sPAPREnvironment *spapr,
target_ulong liobn = args[0];
target_ulong ioba = args[1];
target_ulong tce = args[2];
+ target_ulong ret = H_PARAMETER;
sPAPRTCETable *tcet = spapr_tce_find_by_liobn(liobn);
ioba &= ~(SPAPR_TCE_PAGE_SIZE - 1);
if (tcet) {
- return put_tce_emu(tcet, ioba, tce);
+ ret = put_tce_emu(tcet, ioba, tce);
}
-#ifdef DEBUG_TCE
- fprintf(stderr, "%s on liobn=" TARGET_FMT_lx /*%s*/
- " ioba 0x" TARGET_FMT_lx " TCE 0x" TARGET_FMT_lx "\n",
- __func__, liobn, /*dev->qdev.id, */ioba, tce);
-#endif
+ trace_spapr_iommu_put(liobn, ioba, tce, ret);
- return H_PARAMETER;
+ return ret;
}
int spapr_dma_dt(void *fdt, int node_off, const char *propname,
diff --git a/hw/ppc/spapr_pci.c b/hw/ppc/spapr_pci.c
index 1ca35a0a72..9b6ee32acf 100644
--- a/hw/ppc/spapr_pci.c
+++ b/hw/ppc/spapr_pci.c
@@ -65,22 +65,14 @@ static PCIDevice *find_dev(sPAPREnvironment *spapr, uint64_t buid,
{
sPAPRPHBState *sphb = find_phb(spapr, buid);
PCIHostState *phb = PCI_HOST_BRIDGE(sphb);
- BusState *bus = BUS(phb->bus);
- BusChild *kid;
+ int bus_num = (config_addr >> 16) & 0xFF;
int devfn = (config_addr >> 8) & 0xFF;
if (!phb) {
return NULL;
}
- QTAILQ_FOREACH(kid, &bus->children, sibling) {
- PCIDevice *dev = (PCIDevice *)kid->child;
- if (dev->devfn == devfn) {
- return dev;
- }
- }
-
- return NULL;
+ return pci_find_device(phb->bus, bus_num, devfn);
}
static uint32_t rtas_pci_cfgaddr(uint32_t arg)
@@ -258,11 +250,11 @@ static int spapr_msicfg_find(sPAPRPHBState *phb, uint32_t config_addr,
* This is required for msi_notify()/msix_notify() which
* will write at the addresses via spapr_msi_write().
*/
-static void spapr_msi_setmsg(PCIDevice *pdev, hwaddr addr,
- bool msix, unsigned req_num)
+static void spapr_msi_setmsg(PCIDevice *pdev, hwaddr addr, bool msix,
+ unsigned first_irq, unsigned req_num)
{
unsigned i;
- MSIMessage msg = { .address = addr, .data = 0 };
+ MSIMessage msg = { .address = addr, .data = first_irq };
if (!msix) {
msi_set_message(pdev, msg);
@@ -270,8 +262,7 @@ static void spapr_msi_setmsg(PCIDevice *pdev, hwaddr addr,
return;
}
- for (i = 0; i < req_num; ++i) {
- msg.address = addr | (i << 2);
+ for (i = 0; i < req_num; ++i, ++msg.data) {
msix_set_message(pdev, i, msg);
trace_spapr_pci_msi_setup(pdev->name, i, msg.address);
}
@@ -351,7 +342,8 @@ static void rtas_ibm_change_msi(PowerPCCPU *cpu, sPAPREnvironment *spapr,
/* There is no cached config, allocate MSIs */
if (!phb->msi_table[ndev].nvec) {
- irq = spapr_allocate_irq_block(req_num, false);
+ irq = spapr_allocate_irq_block(req_num, false,
+ ret_intr_type == RTAS_TYPE_MSI);
if (irq < 0) {
fprintf(stderr, "Cannot allocate MSIs for device#%d", ndev);
rtas_st(rets, 0, -1); /* Hardware error */
@@ -363,8 +355,8 @@ static void rtas_ibm_change_msi(PowerPCCPU *cpu, sPAPREnvironment *spapr,
}
/* Setup MSI/MSIX vectors in the device (via cfgspace or MSIX BAR) */
- spapr_msi_setmsg(pdev, phb->msi_win_addr | (ndev << 16),
- ret_intr_type == RTAS_TYPE_MSIX, req_num);
+ spapr_msi_setmsg(pdev, spapr->msi_win_addr, ret_intr_type == RTAS_TYPE_MSIX,
+ phb->msi_table[ndev].irq, req_num);
rtas_st(rets, 0, 0);
rtas_st(rets, 1, req_num);
@@ -450,10 +442,7 @@ static void pci_spapr_set_irq(void *opaque, int irq_num, int level)
static void spapr_msi_write(void *opaque, hwaddr addr,
uint64_t data, unsigned size)
{
- sPAPRPHBState *phb = opaque;
- int ndev = addr >> 16;
- int vec = ((addr & 0xFFFF) >> 2) | data;
- uint32_t irq = phb->msi_table[ndev].irq + vec;
+ uint32_t irq = data;
trace_spapr_pci_msi_write(addr, data, irq);
@@ -467,6 +456,23 @@ static const MemoryRegionOps spapr_msi_ops = {
.endianness = DEVICE_LITTLE_ENDIAN
};
+void spapr_pci_msi_init(sPAPREnvironment *spapr, hwaddr addr)
+{
+ /*
+ * As MSI/MSIX interrupts trigger by writing at MSI/MSIX vectors,
+ * we need to allocate some memory to catch those writes coming
+ * from msi_notify()/msix_notify().
+ * As MSIMessage:addr is going to be the same and MSIMessage:data
+ * is going to be a VIRQ number, 4 bytes of the MSI MR will only
+ * be used.
+ */
+ spapr->msi_win_addr = addr;
+ memory_region_init_io(&spapr->msiwindow, NULL, &spapr_msi_ops, spapr,
+ "msi", getpagesize());
+ memory_region_add_subregion(get_system_memory(), spapr->msi_win_addr,
+ &spapr->msiwindow);
+}
+
/*
* PHB PCI device
*/
@@ -492,8 +498,7 @@ static int spapr_phb_init(SysBusDevice *s)
if ((sphb->buid != -1) || (sphb->dma_liobn != -1)
|| (sphb->mem_win_addr != -1)
- || (sphb->io_win_addr != -1)
- || (sphb->msi_win_addr != -1)) {
+ || (sphb->io_win_addr != -1)) {
fprintf(stderr, "Either \"index\" or other parameters must"
" be specified for PAPR PHB, not both\n");
return -1;
@@ -506,7 +511,6 @@ static int spapr_phb_init(SysBusDevice *s)
+ sphb->index * SPAPR_PCI_WINDOW_SPACING;
sphb->mem_win_addr = windows_base + SPAPR_PCI_MMIO_WIN_OFF;
sphb->io_win_addr = windows_base + SPAPR_PCI_IO_WIN_OFF;
- sphb->msi_win_addr = windows_base + SPAPR_PCI_MSI_WIN_OFF;
}
if (sphb->buid == -1) {
@@ -529,11 +533,6 @@ static int spapr_phb_init(SysBusDevice *s)
return -1;
}
- if (sphb->msi_win_addr == -1) {
- fprintf(stderr, "MSI window address not specified for PHB\n");
- return -1;
- }
-
if (find_phb(spapr, sphb->buid)) {
fprintf(stderr, "PCI host bridges must have unique BUIDs\n");
return -1;
@@ -573,18 +572,6 @@ static int spapr_phb_init(SysBusDevice *s)
get_system_io(), 0, SPAPR_PCI_IO_WIN_SIZE);
memory_region_add_subregion(get_system_memory(), sphb->io_win_addr,
&sphb->iowindow);
-
- /* As MSI/MSIX interrupts trigger by writing at MSI/MSIX vectors,
- * we need to allocate some memory to catch those writes coming
- * from msi_notify()/msix_notify() */
- if (msi_supported) {
- sprintf(namebuf, "%s.msi", sphb->dtbusname);
- memory_region_init_io(&sphb->msiwindow, OBJECT(sphb), &spapr_msi_ops, sphb,
- namebuf, SPAPR_MSIX_MAX_DEVS * 0x10000);
- memory_region_add_subregion(get_system_memory(), sphb->msi_win_addr,
- &sphb->msiwindow);
- }
-
/*
* Selecting a busname is more complex than you'd think, due to
* interacting constraints. If the user has specified an id
@@ -659,7 +646,6 @@ static Property spapr_phb_properties[] = {
DEFINE_PROP_HEX64("io_win_addr", sPAPRPHBState, io_win_addr, -1),
DEFINE_PROP_HEX64("io_win_size", sPAPRPHBState, io_win_size,
SPAPR_PCI_IO_WIN_SIZE),
- DEFINE_PROP_HEX64("msi_win_addr", sPAPRPHBState, msi_win_addr, -1),
DEFINE_PROP_END_OF_LIST(),
};
@@ -701,7 +687,6 @@ static const VMStateDescription vmstate_spapr_pci = {
VMSTATE_UINT64_EQUAL(mem_win_size, sPAPRPHBState),
VMSTATE_UINT64_EQUAL(io_win_addr, sPAPRPHBState),
VMSTATE_UINT64_EQUAL(io_win_size, sPAPRPHBState),
- VMSTATE_UINT64_EQUAL(msi_win_addr, sPAPRPHBState),
VMSTATE_STRUCT_ARRAY(lsi_table, sPAPRPHBState, PCI_NUM_PINS, 0,
vmstate_spapr_pci_lsi, struct spapr_pci_lsi),
VMSTATE_STRUCT_ARRAY(msi_table, sPAPRPHBState, SPAPR_MSIX_MAX_DEVS, 0,
diff --git a/hw/ppc/spapr_rtas.c b/hw/ppc/spapr_rtas.c
index 394ce05ba2..eb542f218a 100644
--- a/hw/ppc/spapr_rtas.c
+++ b/hw/ppc/spapr_rtas.c
@@ -202,6 +202,28 @@ static void rtas_start_cpu(PowerPCCPU *cpu_, sPAPREnvironment *spapr,
rtas_st(rets, 0, -3);
}
+static void rtas_stop_self(PowerPCCPU *cpu, sPAPREnvironment *spapr,
+ uint32_t token, uint32_t nargs,
+ target_ulong args,
+ uint32_t nret, target_ulong rets)
+{
+ CPUState *cs = CPU(cpu);
+ CPUPPCState *env = &cpu->env;
+
+ cs->halted = 1;
+ cpu_exit(cs);
+ /*
+ * While stopping a CPU, the guest calls H_CPPR which
+ * effectively disables interrupts on XICS level.
+ * However decrementer interrupts in TCG can still
+ * wake the CPU up so here we disable interrupts in MSR
+ * as well.
+ * As rtas_start_cpu() resets the whole MSR anyway, there is
+ * no need to bother with specific bits, we just clear it.
+ */
+ env->msr = 0;
+}
+
static struct rtas_call {
const char *name;
spapr_rtas_fn fn;
@@ -322,6 +344,7 @@ static void core_rtas_register_types(void)
spapr_rtas_register("query-cpu-stopped-state",
rtas_query_cpu_stopped_state);
spapr_rtas_register("start-cpu", rtas_start_cpu);
+ spapr_rtas_register("stop-self", rtas_stop_self);
}
type_init(core_rtas_register_types)
diff --git a/hw/ppc/virtex_ml507.c b/hw/ppc/virtex_ml507.c
index 08e77fbef5..e9468b1b25 100644
--- a/hw/ppc/virtex_ml507.c
+++ b/hw/ppc/virtex_ml507.c
@@ -141,22 +141,31 @@ static int xilinx_load_device_tree(hwaddr addr,
{
char *path;
int fdt_size;
- void *fdt;
+ void *fdt = NULL;
int r;
+ const char *dtb_filename;
- /* Try the local "ppc.dtb" override. */
- fdt = load_device_tree("ppc.dtb", &fdt_size);
- if (!fdt) {
- path = qemu_find_file(QEMU_FILE_TYPE_BIOS, BINARY_DEVICE_TREE_FILE);
- if (path) {
- fdt = load_device_tree(path, &fdt_size);
- g_free(path);
+ dtb_filename = qemu_opt_get(qemu_get_machine_opts(), "dtb");
+ if (dtb_filename) {
+ fdt = load_device_tree(dtb_filename, &fdt_size);
+ if (!fdt) {
+ error_report("Error while loading device tree file '%s'",
+ dtb_filename);
}
+ } else {
+ /* Try the local "ppc.dtb" override. */
+ fdt = load_device_tree("ppc.dtb", &fdt_size);
if (!fdt) {
- return 0;
+ path = qemu_find_file(QEMU_FILE_TYPE_BIOS, BINARY_DEVICE_TREE_FILE);
+ if (path) {
+ fdt = load_device_tree(path, &fdt_size);
+ g_free(path);
+ }
}
}
-
+ if (!fdt) {
+ return 0;
+ }
r = qemu_devtree_setprop_string(fdt, "/chosen", "bootargs", kernel_cmdline);
if (r < 0)
fprintf(stderr, "couldn't set /chosen/bootargs\n");
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
index ffb69a4c70..beb41491b4 100644
--- a/include/exec/exec-all.h
+++ b/include/exec/exec-all.h
@@ -295,47 +295,42 @@ static inline void tb_add_jump(TranslationBlock *tb, int n,
}
}
-/* The return address may point to the start of the next instruction.
- Subtracting one gets us the call instruction itself. */
+/* GETRA is the true target of the return instruction that we'll execute,
+ defined here for simplicity of defining the follow-up macros. */
#if defined(CONFIG_TCG_INTERPRETER)
extern uintptr_t tci_tb_ptr;
-# define GETPC() tci_tb_ptr
-#elif defined(__s390__) && !defined(__s390x__)
-# define GETPC() \
- (((uintptr_t)__builtin_return_address(0) & 0x7fffffffUL) - 1)
-#elif defined(__arm__)
-/* Thumb return addresses have the low bit set, so we need to subtract two.
- This is still safe in ARM mode because instructions are 4 bytes. */
-# define GETPC() ((uintptr_t)__builtin_return_address(0) - 2)
+# define GETRA() tci_tb_ptr
+#else
+# define GETRA() \
+ ((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0)))
+#endif
+
+/* The true return address will often point to a host insn that is part of
+ the next translated guest insn. Adjust the address backward to point to
+ the middle of the call insn. Subtracting one would do the job except for
+ several compressed mode architectures (arm, mips) which set the low bit
+ to indicate the compressed mode; subtracting two works around that. It
+ is also the case that there are no host isas that contain a call insn
+ smaller than 4 bytes, so we don't worry about special-casing this. */
+#if defined(CONFIG_TCG_INTERPRETER)
+# define GETPC_ADJ 0
#else
-# define GETPC() ((uintptr_t)__builtin_return_address(0) - 1)
+# define GETPC_ADJ 2
#endif
+#define GETPC() (GETRA() - GETPC_ADJ)
+
+/* The LDST optimizations splits code generation into fast and slow path.
+ In some implementations, we pass the "logical" return address manually;
+ in others, we must infer the logical return from the true return. */
#if defined(CONFIG_QEMU_LDST_OPTIMIZATION) && defined(CONFIG_SOFTMMU)
-/* qemu_ld/st optimization split code generation to fast and slow path, thus,
- it needs special handling for an MMU helper which is called from the slow
- path, to get the fast path's pc without any additional argument.
- It uses a tricky solution which embeds the fast path pc into the slow path.
-
- Code flow in slow path:
- (1) pre-process
- (2) call MMU helper
- (3) jump to (5)
- (4) fast path information (implementation specific)
- (5) post-process (e.g. stack adjust)
- (6) jump to corresponding code of the next of fast path
- */
-# if defined(__i386__) || defined(__x86_64__)
-# define GETPC_EXT() GETPC()
-# elif defined (_ARCH_PPC) && !defined (_ARCH_PPC64)
-# define GETRA() ((uintptr_t)__builtin_return_address(0))
-# define GETPC_LDST() ((uintptr_t) ((*(int32_t *)(GETRA() - 4)) - 1))
+# if defined (_ARCH_PPC) && !defined (_ARCH_PPC64)
+# define GETRA_LDST(RA) (*(int32_t *)((RA) - 4))
# elif defined(__arm__)
/* We define two insns between the return address and the branch back to
straight-line. Find and decode that branch insn. */
-# define GETRA() ((uintptr_t)__builtin_return_address(0))
-# define GETPC_LDST() tcg_getpc_ldst(GETRA())
-static inline uintptr_t tcg_getpc_ldst(uintptr_t ra)
+# define GETRA_LDST(RA) tcg_getra_ldst(RA)
+static inline uintptr_t tcg_getra_ldst(uintptr_t ra)
{
int32_t b;
ra += 8; /* skip the two insns */
@@ -343,33 +338,32 @@ static inline uintptr_t tcg_getpc_ldst(uintptr_t ra)
b = (b << 8) >> (8 - 2); /* extract the displacement */
ra += 8; /* branches are relative to pc+8 */
ra += b; /* apply the displacement */
- ra -= 4; /* return a pointer into the current opcode,
- not the start of the next opcode */
return ra;
}
# elif defined(__aarch64__)
-# define GETRA() ((uintptr_t)__builtin_return_address(0))
-# define GETPC_LDST() tcg_getpc_ldst(GETRA())
-static inline uintptr_t tcg_getpc_ldst(uintptr_t ra)
+# define GETRA_LDST(RA) tcg_getra_ldst(RA)
+static inline uintptr_t tcg_getra_ldst(uintptr_t ra)
{
int32_t b;
ra += 4; /* skip one instruction */
b = *(int32_t *)ra; /* load the branch insn */
b = (b << 6) >> (6 - 2); /* extract the displacement */
ra += b; /* apply the displacement */
- ra -= 4; /* return a pointer into the current opcode,
- not the start of the next opcode */
return ra;
}
-# else
-# error "CONFIG_QEMU_LDST_OPTIMIZATION needs GETPC_LDST() implementation!"
# endif
+#endif /* CONFIG_QEMU_LDST_OPTIMIZATION */
+
+/* ??? Delete these once they are no longer used. */
bool is_tcg_gen_code(uintptr_t pc_ptr);
-# ifndef GETPC_EXT
-# define GETPC_EXT() (is_tcg_gen_code(GETRA()) ? GETPC_LDST() : GETPC())
-# endif
+#ifdef GETRA_LDST
+# define GETRA_EXT() tcg_getra_ext(GETRA())
+static inline uintptr_t tcg_getra_ext(uintptr_t ra)
+{
+ return is_tcg_gen_code(ra) ? GETRA_LDST(ra) : ra;
+}
#else
-# define GETPC_EXT() GETPC()
+# define GETRA_EXT() GETRA()
#endif
#if !defined(CONFIG_USER_ONLY)
@@ -383,7 +377,10 @@ bool io_mem_write(struct MemoryRegion *mr, hwaddr addr,
void tlb_fill(CPUArchState *env1, target_ulong addr, int is_write, int mmu_idx,
uintptr_t retaddr);
-#include "exec/softmmu_defs.h"
+uint8_t helper_ldb_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
+uint16_t helper_ldw_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
+uint32_t helper_ldl_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
+uint64_t helper_ldq_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
#define ACCESS_TYPE (NB_MMU_MODES + 1)
#define MEMSUFFIX _code
diff --git a/include/exec/gen-icount.h b/include/exec/gen-icount.h
index 4fc7b2981d..39a6b61e4f 100644
--- a/include/exec/gen-icount.h
+++ b/include/exec/gen-icount.h
@@ -39,12 +39,12 @@ static inline void gen_tb_start(void)
static void gen_tb_end(TranslationBlock *tb, int num_insns)
{
gen_set_label(exitreq_label);
- tcg_gen_exit_tb((tcg_target_long)tb + TB_EXIT_REQUESTED);
+ tcg_gen_exit_tb((uintptr_t)tb + TB_EXIT_REQUESTED);
if (use_icount) {
*icount_arg = num_insns;
gen_set_label(icount_label);
- tcg_gen_exit_tb((tcg_target_long)tb + TB_EXIT_ICOUNT_EXPIRED);
+ tcg_gen_exit_tb((uintptr_t)tb + TB_EXIT_ICOUNT_EXPIRED);
}
}
diff --git a/include/exec/softmmu_defs.h b/include/exec/softmmu_defs.h
deleted file mode 100644
index e55e7178c6..0000000000
--- a/include/exec/softmmu_defs.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Software MMU support
- *
- * Declare helpers used by TCG for qemu_ld/st ops.
- *
- * Used by softmmu_exec.h, TCG targets and exec-all.h.
- *
- */
-#ifndef SOFTMMU_DEFS_H
-#define SOFTMMU_DEFS_H
-
-uint8_t helper_ret_ldb_mmu(CPUArchState *env, target_ulong addr,
- int mmu_idx, uintptr_t retaddr);
-uint16_t helper_ret_ldw_mmu(CPUArchState *env, target_ulong addr,
- int mmu_idx, uintptr_t retaddr);
-uint32_t helper_ret_ldl_mmu(CPUArchState *env, target_ulong addr,
- int mmu_idx, uintptr_t retaddr);
-uint64_t helper_ret_ldq_mmu(CPUArchState *env, target_ulong addr,
- int mmu_idx, uintptr_t retaddr);
-
-void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
- int mmu_idx, uintptr_t retaddr);
-void helper_ret_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
- int mmu_idx, uintptr_t retaddr);
-void helper_ret_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
- int mmu_idx, uintptr_t retaddr);
-void helper_ret_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
- int mmu_idx, uintptr_t retaddr);
-
-uint8_t helper_ldb_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
-uint16_t helper_ldw_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
-uint32_t helper_ldl_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
-uint64_t helper_ldq_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
-
-void helper_stb_mmu(CPUArchState *env, target_ulong addr,
- uint8_t val, int mmu_idx);
-void helper_stw_mmu(CPUArchState *env, target_ulong addr,
- uint16_t val, int mmu_idx);
-void helper_stl_mmu(CPUArchState *env, target_ulong addr,
- uint32_t val, int mmu_idx);
-void helper_stq_mmu(CPUArchState *env, target_ulong addr,
- uint64_t val, int mmu_idx);
-
-uint8_t helper_ldb_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
-uint16_t helper_ldw_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
-uint32_t helper_ldl_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
-uint64_t helper_ldq_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
-
-#endif /* SOFTMMU_DEFS_H */
diff --git a/include/exec/softmmu_exec.h b/include/exec/softmmu_exec.h
index 3e4e886a30..6fde154527 100644
--- a/include/exec/softmmu_exec.h
+++ b/include/exec/softmmu_exec.h
@@ -19,7 +19,8 @@
#define ldul_executive ldl_executive
#define ldul_supervisor ldl_supervisor
-#include "exec/softmmu_defs.h"
+/* The memory helpers for tcg-generated code need tcg_target_long etc. */
+#include "tcg.h"
#define ACCESS_TYPE 0
#define MEMSUFFIX MMU_MODE0_SUFFIX
diff --git a/include/exec/softmmu_template.h b/include/exec/softmmu_template.h
index eaca9e1035..5bbc56afd5 100644
--- a/include/exec/softmmu_template.h
+++ b/include/exec/softmmu_template.h
@@ -28,24 +28,40 @@
#if DATA_SIZE == 8
#define SUFFIX q
-#define USUFFIX q
-#define DATA_TYPE uint64_t
+#define LSUFFIX q
+#define SDATA_TYPE int64_t
#elif DATA_SIZE == 4
#define SUFFIX l
-#define USUFFIX l
-#define DATA_TYPE uint32_t
+#define LSUFFIX l
+#define SDATA_TYPE int32_t
#elif DATA_SIZE == 2
#define SUFFIX w
-#define USUFFIX uw
-#define DATA_TYPE uint16_t
+#define LSUFFIX uw
+#define SDATA_TYPE int16_t
#elif DATA_SIZE == 1
#define SUFFIX b
-#define USUFFIX ub
-#define DATA_TYPE uint8_t
+#define LSUFFIX ub
+#define SDATA_TYPE int8_t
#else
#error unsupported data size
#endif
+#define DATA_TYPE glue(u, SDATA_TYPE)
+
+/* For the benefit of TCG generated code, we want to avoid the complication
+ of ABI-specific return type promotion and always return a value extended
+ to the register size of the host. This is tcg_target_long, except in the
+ case of a 32-bit host and 64-bit data, and for that we always have
+ uint64_t. Don't bother with this widened value for SOFTMMU_CODE_ACCESS. */
+#if defined(SOFTMMU_CODE_ACCESS) || DATA_SIZE == 8
+# define WORD_TYPE DATA_TYPE
+# define USUFFIX SUFFIX
+#else
+# define WORD_TYPE tcg_target_ulong
+# define USUFFIX glue(u, SUFFIX)
+# define SSUFFIX glue(s, SUFFIX)
+#endif
+
#ifdef SOFTMMU_CODE_ACCESS
#define READ_ACCESS_TYPE 2
#define ADDR_READ addr_code
@@ -77,15 +93,18 @@ static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env,
#ifdef SOFTMMU_CODE_ACCESS
static
#endif
-DATA_TYPE
-glue(glue(helper_ret_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env,
- target_ulong addr, int mmu_idx,
- uintptr_t retaddr)
+WORD_TYPE
+glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)(CPUArchState *env,
+ target_ulong addr, int mmu_idx,
+ uintptr_t retaddr)
{
int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
uintptr_t haddr;
+ /* Adjust the given return address. */
+ retaddr -= GETPC_ADJ;
+
/* If the TLB entry is for a different page, reload and try again. */
if ((addr & TARGET_PAGE_MASK)
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
@@ -121,10 +140,12 @@ glue(glue(helper_ret_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env,
#endif
addr1 = addr & ~(DATA_SIZE - 1);
addr2 = addr1 + DATA_SIZE;
- res1 = glue(glue(helper_ret_ld, SUFFIX), MMUSUFFIX)(env, addr1,
- mmu_idx, retaddr);
- res2 = glue(glue(helper_ret_ld, SUFFIX), MMUSUFFIX)(env, addr2,
- mmu_idx, retaddr);
+ /* Note the adjustment at the beginning of the function.
+ Undo that for the recursion. */
+ res1 = glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)
+ (env, addr1, mmu_idx, retaddr + GETPC_ADJ);
+ res2 = glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)
+ (env, addr2, mmu_idx, retaddr + GETPC_ADJ);
shift = (addr & (DATA_SIZE - 1)) * 8;
#ifdef TARGET_WORDS_BIGENDIAN
res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift));
@@ -142,19 +163,33 @@ glue(glue(helper_ret_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env,
#endif
haddr = addr + env->tlb_table[mmu_idx][index].addend;
- return glue(glue(ld, USUFFIX), _raw)((uint8_t *)haddr);
+ /* Note that ldl_raw is defined with type "int". */
+ return (DATA_TYPE) glue(glue(ld, LSUFFIX), _raw)((uint8_t *)haddr);
}
DATA_TYPE
glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
int mmu_idx)
{
- return glue(glue(helper_ret_ld, SUFFIX), MMUSUFFIX)(env, addr, mmu_idx,
- GETPC_EXT());
+ return glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)(env, addr, mmu_idx,
+ GETRA_EXT());
}
#ifndef SOFTMMU_CODE_ACCESS
+/* Provide signed versions of the load routines as well. We can of course
+ avoid this for 64-bit data, or for 32-bit data on 32-bit host. */
+#if DATA_SIZE * 8 < TCG_TARGET_REG_BITS
+WORD_TYPE
+glue(glue(helper_ret_ld, SSUFFIX), MMUSUFFIX)(CPUArchState *env,
+ target_ulong addr, int mmu_idx,
+ uintptr_t retaddr)
+{
+ return (SDATA_TYPE) glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)
+ (env, addr, mmu_idx, retaddr);
+}
+#endif
+
static inline void glue(io_write, SUFFIX)(CPUArchState *env,
hwaddr physaddr,
DATA_TYPE val,
@@ -182,6 +217,9 @@ glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)(CPUArchState *env,
target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
uintptr_t haddr;
+ /* Adjust the given return address. */
+ retaddr -= GETPC_ADJ;
+
/* If the TLB entry is for a different page, reload and try again. */
if ((addr & TARGET_PAGE_MASK)
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
@@ -223,8 +261,10 @@ glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)(CPUArchState *env,
#else
uint8_t val8 = val >> (i * 8);
#endif
+ /* Note the adjustment at the beginning of the function.
+ Undo that for the recursion. */
glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8,
- mmu_idx, retaddr);
+ mmu_idx, retaddr + GETPC_ADJ);
}
return;
}
@@ -245,7 +285,7 @@ glue(glue(helper_st, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
DATA_TYPE val, int mmu_idx)
{
glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)(env, addr, val, mmu_idx,
- GETPC_EXT());
+ GETRA_EXT());
}
#endif /* !defined(SOFTMMU_CODE_ACCESS) */
@@ -254,6 +294,10 @@ glue(glue(helper_st, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
#undef SHIFT
#undef DATA_TYPE
#undef SUFFIX
-#undef USUFFIX
+#undef LSUFFIX
#undef DATA_SIZE
#undef ADDR_READ
+#undef WORD_TYPE
+#undef SDATA_TYPE
+#undef USUFFIX
+#undef SSUFFIX
diff --git a/include/hw/pci-host/spapr.h b/include/hw/pci-host/spapr.h
index 93f9511325..970b4a9e4a 100644
--- a/include/hw/pci-host/spapr.h
+++ b/include/hw/pci-host/spapr.h
@@ -43,8 +43,7 @@ typedef struct sPAPRPHBState {
MemoryRegion memspace, iospace;
hwaddr mem_win_addr, mem_win_size, io_win_addr, io_win_size;
- hwaddr msi_win_addr;
- MemoryRegion memwindow, iowindow, msiwindow;
+ MemoryRegion memwindow, iowindow;
uint32_t dma_liobn;
uint64_t dma_window_start;
@@ -73,7 +72,8 @@ typedef struct sPAPRPHBState {
#define SPAPR_PCI_MMIO_WIN_SIZE 0x20000000
#define SPAPR_PCI_IO_WIN_OFF 0x80000000
#define SPAPR_PCI_IO_WIN_SIZE 0x10000
-#define SPAPR_PCI_MSI_WIN_OFF 0x90000000
+
+#define SPAPR_PCI_MSI_WINDOW 0x40000000000ULL
#define SPAPR_PCI_MEM_WIN_BUS_OFFSET 0x80000000ULL
@@ -88,6 +88,8 @@ int spapr_populate_pci_dt(sPAPRPHBState *phb,
uint32_t xics_phandle,
void *fdt);
+void spapr_pci_msi_init(sPAPREnvironment *spapr, hwaddr addr);
+
void spapr_pci_rtas_init(void);
#endif /* __HW_SPAPR_PCI_H__ */
diff --git a/include/hw/ppc/spapr.h b/include/hw/ppc/spapr.h
index 9fc197286c..e37b41983c 100644
--- a/include/hw/ppc/spapr.h
+++ b/include/hw/ppc/spapr.h
@@ -13,6 +13,8 @@ struct sPAPRNVRAM;
typedef struct sPAPREnvironment {
struct VIOsPAPRBus *vio_bus;
QLIST_HEAD(, sPAPRPHBState) phbs;
+ hwaddr msi_win_addr;
+ MemoryRegion msiwindow;
struct sPAPRNVRAM *nvram;
XICSState *icp;
@@ -109,6 +111,15 @@ typedef struct sPAPREnvironment {
#define H_NOT_ENOUGH_RESOURCES -44
#define H_R_STATE -45
#define H_RESCINDEND -46
+#define H_P2 -55
+#define H_P3 -56
+#define H_P4 -57
+#define H_P5 -58
+#define H_P6 -59
+#define H_P7 -60
+#define H_P8 -61
+#define H_P9 -62
+#define H_UNSUPPORTED_FLAG -256
#define H_MULTI_THREADS_ACTIVE -9005
@@ -143,6 +154,11 @@ typedef struct sPAPREnvironment {
#define H_PP1 (1ULL<<(63-62))
#define H_PP2 (1ULL<<(63-63))
+/* H_SET_MODE flags */
+#define H_SET_MODE_ENDIAN 4
+#define H_SET_MODE_ENDIAN_BIG 0
+#define H_SET_MODE_ENDIAN_LITTLE 1
+
/* VASI States */
#define H_VASI_INVALID 0
#define H_VASI_ENABLED 1
@@ -267,7 +283,8 @@ typedef struct sPAPREnvironment {
#define H_GET_EM_PARMS 0x2B8
#define H_SET_MPP 0x2D0
#define H_GET_MPP 0x2D4
-#define MAX_HCALL_OPCODE H_GET_MPP
+#define H_SET_MODE 0x31C
+#define MAX_HCALL_OPCODE H_SET_MODE
/* The hcalls above are standardized in PAPR and implemented by pHyp
* as well.
@@ -303,7 +320,7 @@ target_ulong spapr_hypercall(PowerPCCPU *cpu, target_ulong opcode,
target_ulong *args);
int spapr_allocate_irq(int hint, bool lsi);
-int spapr_allocate_irq_block(int num, bool lsi);
+int spapr_allocate_irq_block(int num, bool lsi, bool msi);
static inline int spapr_allocate_msi(int hint)
{
diff --git a/qtest.c b/qtest.c
index ef671fb05d..584c70762a 100644
--- a/qtest.c
+++ b/qtest.c
@@ -177,7 +177,7 @@ static void qtest_send_prefix(CharDriverState *chr)
qtest_get_time(&tv);
fprintf(qtest_log_fp, "[S +" FMT_timeval "] ",
- tv.tv_sec, (long) tv.tv_usec);
+ (long) tv.tv_sec, (long) tv.tv_usec);
}
static void GCC_FMT_ATTR(2, 3) qtest_send(CharDriverState *chr,
@@ -225,7 +225,7 @@ static void qtest_process_command(CharDriverState *chr, gchar **words)
qtest_get_time(&tv);
fprintf(qtest_log_fp, "[R +" FMT_timeval "]",
- tv.tv_sec, (long) tv.tv_usec);
+ (long) tv.tv_sec, (long) tv.tv_usec);
for (i = 0; words[i]; i++) {
fprintf(qtest_log_fp, " %s", words[i]);
}
@@ -485,7 +485,7 @@ static void qtest_event(void *opaque, int event)
qtest_opened = true;
if (qtest_log_fp) {
fprintf(qtest_log_fp, "[I " FMT_timeval "] OPENED\n",
- start_time.tv_sec, (long) start_time.tv_usec);
+ (long) start_time.tv_sec, (long) start_time.tv_usec);
}
break;
case CHR_EVENT_CLOSED:
@@ -494,7 +494,7 @@ static void qtest_event(void *opaque, int event)
qemu_timeval tv;
qtest_get_time(&tv);
fprintf(qtest_log_fp, "[I +" FMT_timeval "] CLOSED\n",
- tv.tv_sec, (long) tv.tv_usec);
+ (long) tv.tv_sec, (long) tv.tv_usec);
}
break;
default:
diff --git a/target-alpha/translate.c b/target-alpha/translate.c
index 309dea6ff0..28ce4363f1 100644
--- a/target-alpha/translate.c
+++ b/target-alpha/translate.c
@@ -415,7 +415,7 @@ static ExitStatus gen_bdirect(DisasContext *ctx, int ra, int32_t disp)
} else if (use_goto_tb(ctx, dest)) {
tcg_gen_goto_tb(0);
tcg_gen_movi_i64(cpu_pc, dest);
- tcg_gen_exit_tb((tcg_target_long)ctx->tb);
+ tcg_gen_exit_tb((uintptr_t)ctx->tb);
return EXIT_GOTO_TB;
} else {
tcg_gen_movi_i64(cpu_pc, dest);
@@ -434,12 +434,12 @@ static ExitStatus gen_bcond_internal(DisasContext *ctx, TCGCond cond,
tcg_gen_goto_tb(0);
tcg_gen_movi_i64(cpu_pc, ctx->pc);
- tcg_gen_exit_tb((tcg_target_long)ctx->tb);
+ tcg_gen_exit_tb((uintptr_t)ctx->tb);
gen_set_label(lab_true);
tcg_gen_goto_tb(1);
tcg_gen_movi_i64(cpu_pc, dest);
- tcg_gen_exit_tb((tcg_target_long)ctx->tb + 1);
+ tcg_gen_exit_tb((uintptr_t)ctx->tb + 1);
return EXIT_GOTO_TB;
} else {
@@ -1629,7 +1629,7 @@ static ExitStatus gen_call_pal(DisasContext *ctx, int palcode)
we change the PAL base register. */
if (!ctx->singlestep_enabled && !(ctx->tb->cflags & CF_LAST_IO)) {
tcg_gen_goto_tb(0);
- tcg_gen_exit_tb((tcg_target_long)ctx->tb);
+ tcg_gen_exit_tb((uintptr_t)ctx->tb);
return EXIT_GOTO_TB;
}
diff --git a/target-arm/translate.c b/target-arm/translate.c
index d1e8538142..9160ced4fe 100644
--- a/target-arm/translate.c
+++ b/target-arm/translate.c
@@ -3356,7 +3356,7 @@ static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
tcg_gen_goto_tb(n);
gen_set_pc_im(dest);
- tcg_gen_exit_tb((tcg_target_long)tb + n);
+ tcg_gen_exit_tb((uintptr_t)tb + n);
} else {
gen_set_pc_im(dest);
tcg_gen_exit_tb(0);
diff --git a/target-cris/translate.c b/target-cris/translate.c
index 2a4beeb869..617e1b4242 100644
--- a/target-cris/translate.c
+++ b/target-cris/translate.c
@@ -558,7 +558,7 @@ static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
tcg_gen_goto_tb(n);
tcg_gen_movi_tl(env_pc, dest);
- tcg_gen_exit_tb((tcg_target_long)tb + n);
+ tcg_gen_exit_tb((uintptr_t)tb + n);
} else {
tcg_gen_movi_tl(env_pc, dest);
tcg_gen_exit_tb(0);
diff --git a/target-i386/translate.c b/target-i386/translate.c
index 065a9d320e..6d879003b3 100644
--- a/target-i386/translate.c
+++ b/target-i386/translate.c
@@ -2413,7 +2413,7 @@ static inline void gen_goto_tb(DisasContext *s, int tb_num, target_ulong eip)
/* jump to same page: we can use a direct jump */
tcg_gen_goto_tb(tb_num);
gen_jmp_im(eip);
- tcg_gen_exit_tb((tcg_target_long)tb + tb_num);
+ tcg_gen_exit_tb((uintptr_t)tb + tb_num);
} else {
/* jump to another page: currently not optimized */
gen_jmp_im(eip);
diff --git a/target-lm32/op_helper.c b/target-lm32/op_helper.c
index 2dab9f27b4..8f5ef554d5 100644
--- a/target-lm32/op_helper.c
+++ b/target-lm32/op_helper.c
@@ -6,6 +6,8 @@
#include "hw/lm32/lm32_pic.h"
#include "hw/char/lm32_juart.h"
+#include "exec/softmmu_exec.h"
+
#if !defined(CONFIG_USER_ONLY)
#define MMUSUFFIX _mmu
#define SHIFT 0
diff --git a/target-lm32/translate.c b/target-lm32/translate.c
index 1247287050..6ea0ecd63b 100644
--- a/target-lm32/translate.c
+++ b/target-lm32/translate.c
@@ -129,7 +129,7 @@ static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
likely(!dc->singlestep_enabled)) {
tcg_gen_goto_tb(n);
tcg_gen_movi_tl(cpu_pc, dest);
- tcg_gen_exit_tb((tcg_target_long)tb + n);
+ tcg_gen_exit_tb((uintptr_t)tb + n);
} else {
tcg_gen_movi_tl(cpu_pc, dest);
if (dc->singlestep_enabled) {
diff --git a/target-m68k/translate.c b/target-m68k/translate.c
index d562eebef3..0be0a96732 100644
--- a/target-m68k/translate.c
+++ b/target-m68k/translate.c
@@ -869,7 +869,7 @@ static void gen_jmp_tb(DisasContext *s, int n, uint32_t dest)
(s->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
tcg_gen_goto_tb(n);
tcg_gen_movi_i32(QREG_PC, dest);
- tcg_gen_exit_tb((tcg_target_long)tb + n);
+ tcg_gen_exit_tb((uintptr_t)tb + n);
} else {
gen_jmp_im(s, dest);
tcg_gen_exit_tb(0);
diff --git a/target-microblaze/translate.c b/target-microblaze/translate.c
index cd4357703f..0673176957 100644
--- a/target-microblaze/translate.c
+++ b/target-microblaze/translate.c
@@ -138,7 +138,7 @@ static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
tcg_gen_goto_tb(n);
tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
- tcg_gen_exit_tb((tcg_target_long)tb + n);
+ tcg_gen_exit_tb((uintptr_t)tb + n);
} else {
tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
tcg_gen_exit_tb(0);
diff --git a/target-mips/translate.c b/target-mips/translate.c
index e2eb908cf3..ad43d59103 100644
--- a/target-mips/translate.c
+++ b/target-mips/translate.c
@@ -3581,7 +3581,7 @@ static inline void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
likely(!ctx->singlestep_enabled)) {
tcg_gen_goto_tb(n);
gen_save_pc(dest);
- tcg_gen_exit_tb((tcg_target_long)tb + n);
+ tcg_gen_exit_tb((uintptr_t)tb + n);
} else {
gen_save_pc(dest);
if (ctx->singlestep_enabled) {
diff --git a/target-moxie/helper.c b/target-moxie/helper.c
index b12e4ffcaf..7859102ab7 100644
--- a/target-moxie/helper.c
+++ b/target-moxie/helper.c
@@ -25,6 +25,7 @@
#include "cpu.h"
#include "mmu.h"
#include "exec/exec-all.h"
+#include "exec/softmmu_exec.h"
#include "qemu/host-utils.h"
#include "helper.h"
diff --git a/target-moxie/translate.c b/target-moxie/translate.c
index 8cc0bb7bfb..a93196f47b 100644
--- a/target-moxie/translate.c
+++ b/target-moxie/translate.c
@@ -135,7 +135,7 @@ static inline void gen_goto_tb(CPUMoxieState *env, DisasContext *ctx,
!ctx->singlestep_enabled) {
tcg_gen_goto_tb(n);
tcg_gen_movi_i32(cpu_pc, dest);
- tcg_gen_exit_tb((tcg_target_long)tb + n);
+ tcg_gen_exit_tb((uintptr_t)tb + n);
} else {
tcg_gen_movi_i32(cpu_pc, dest);
if (ctx->singlestep_enabled) {
diff --git a/target-openrisc/translate.c b/target-openrisc/translate.c
index a6050ba6d8..723b77d3b4 100644
--- a/target-openrisc/translate.c
+++ b/target-openrisc/translate.c
@@ -198,7 +198,7 @@ static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
likely(!dc->singlestep_enabled)) {
tcg_gen_movi_tl(cpu_pc, dest);
tcg_gen_goto_tb(n);
- tcg_gen_exit_tb((tcg_target_long)tb + n);
+ tcg_gen_exit_tb((uintptr_t)tb + n);
} else {
tcg_gen_movi_tl(cpu_pc, dest);
if (dc->singlestep_enabled) {
diff --git a/target-ppc/cpu.h b/target-ppc/cpu.h
index 711db083e0..422a6bbd2e 100644
--- a/target-ppc/cpu.h
+++ b/target-ppc/cpu.h
@@ -453,6 +453,8 @@ struct ppc_slb_t {
#define MSR_RI 1 /* Recoverable interrupt 1 */
#define MSR_LE 0 /* Little-endian mode 1 hflags */
+#define LPCR_ILE (1 << (63-38))
+
#define msr_sf ((env->msr >> MSR_SF) & 1)
#define msr_isf ((env->msr >> MSR_ISF) & 1)
#define msr_shv ((env->msr >> MSR_SHV) & 1)
diff --git a/target-ppc/excp_helper.c b/target-ppc/excp_helper.c
index e9fcad8ef6..e957761109 100644
--- a/target-ppc/excp_helper.c
+++ b/target-ppc/excp_helper.c
@@ -611,9 +611,19 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
tlb_flush(env, 1);
}
+#ifdef TARGET_PPC64
+ if (excp_model == POWERPC_EXCP_POWER7) {
+ if (env->spr[SPR_LPCR] & LPCR_ILE) {
+ new_msr |= (target_ulong)1 << MSR_LE;
+ }
+ } else if (msr_ile) {
+ new_msr |= (target_ulong)1 << MSR_LE;
+ }
+#else
if (msr_ile) {
new_msr |= (target_ulong)1 << MSR_LE;
}
+#endif
/* Jump to handler */
vector = env->excp_vectors[excp];
diff --git a/target-ppc/kvm_ppc.c b/target-ppc/kvm_ppc.c
index 9b8365503b..f769acd44c 100644
--- a/target-ppc/kvm_ppc.c
+++ b/target-ppc/kvm_ppc.c
@@ -15,6 +15,7 @@
#include "qemu/timer.h"
#include "kvm_ppc.h"
#include "sysemu/device_tree.h"
+#include "qemu/main-loop.h"
#define PROC_DEVTREE_PATH "/proc/device-tree"
diff --git a/target-ppc/mmu_helper.c b/target-ppc/mmu_helper.c
index 5dd4e05f78..04a840b016 100644
--- a/target-ppc/mmu_helper.c
+++ b/target-ppc/mmu_helper.c
@@ -2061,7 +2061,7 @@ void helper_store_sr(CPUPPCState *env, target_ulong srnum, target_ulong value)
/* ESID = srnum */
rb |= ((uint32_t)srnum & 0xf) << 28;
/* Set the valid bit */
- rb |= 1 << 27;
+ rb |= SLB_ESID_V;
/* Index = ESID */
rb |= (uint32_t)srnum;
@@ -2871,6 +2871,8 @@ void helper_booke206_tlbflush(CPUPPCState *env, uint32_t type)
/*****************************************************************************/
+#include "exec/softmmu_exec.h"
+
#define MMUSUFFIX _mmu
#define SHIFT 0
diff --git a/target-ppc/translate.c b/target-ppc/translate.c
index f07d70d866..2da7bc740f 100644
--- a/target-ppc/translate.c
+++ b/target-ppc/translate.c
@@ -428,9 +428,9 @@ EXTRACT_HELPER(CRM, 12, 8);
EXTRACT_HELPER(SR, 16, 4);
/* mtfsf/mtfsfi */
-EXTRACT_HELPER(FPBF, 19, 3);
+EXTRACT_HELPER(FPBF, 23, 3);
EXTRACT_HELPER(FPIMM, 12, 4);
-EXTRACT_HELPER(FPL, 21, 1);
+EXTRACT_HELPER(FPL, 25, 1);
EXTRACT_HELPER(FPFLM, 17, 8);
EXTRACT_HELPER(FPW, 16, 1);
@@ -3551,7 +3551,7 @@ static inline void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
likely(!ctx->singlestep_enabled)) {
tcg_gen_goto_tb(n);
tcg_gen_movi_tl(cpu_nip, dest & ~3);
- tcg_gen_exit_tb((tcg_target_long)tb + n);
+ tcg_gen_exit_tb((uintptr_t)tb + n);
} else {
tcg_gen_movi_tl(cpu_nip, dest & ~3);
if (unlikely(ctx->singlestep_enabled)) {
diff --git a/target-ppc/translate_init.c b/target-ppc/translate_init.c
index 609f797083..d2645bad28 100644
--- a/target-ppc/translate_init.c
+++ b/target-ppc/translate_init.c
@@ -7227,7 +7227,7 @@ POWERPC_FAMILY(POWER7)(ObjectClass *oc, void *data)
PPC_SEGMENT_64B | PPC_SLBI |
PPC_POPCNTB | PPC_POPCNTWD;
pcc->insns_flags2 = PPC2_VSX | PPC2_DFP | PPC2_DBRX | PPC2_ISA205;
- pcc->msr_mask = 0x800000000204FF36ULL;
+ pcc->msr_mask = 0x800000000204FF37ULL;
pcc->mmu_model = POWERPC_MMU_2_06;
#if defined(CONFIG_SOFTMMU)
pcc->handle_mmu_fault = ppc_hash64_handle_mmu_fault;
diff --git a/target-s390x/translate.c b/target-s390x/translate.c
index 1fb76c5264..afe90eb8be 100644
--- a/target-s390x/translate.c
+++ b/target-s390x/translate.c
@@ -1169,7 +1169,7 @@ static ExitStatus help_goto_direct(DisasContext *s, uint64_t dest)
update_cc_op(s);
tcg_gen_goto_tb(0);
tcg_gen_movi_i64(psw_addr, dest);
- tcg_gen_exit_tb((tcg_target_long)s->tb);
+ tcg_gen_exit_tb((uintptr_t)s->tb);
return EXIT_GOTO_TB;
} else {
tcg_gen_movi_i64(psw_addr, dest);
@@ -1227,13 +1227,13 @@ static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
/* Branch not taken. */
tcg_gen_goto_tb(0);
tcg_gen_movi_i64(psw_addr, s->next_pc);
- tcg_gen_exit_tb((tcg_target_long)s->tb + 0);
+ tcg_gen_exit_tb((uintptr_t)s->tb + 0);
/* Branch taken. */
gen_set_label(lab);
tcg_gen_goto_tb(1);
tcg_gen_movi_i64(psw_addr, dest);
- tcg_gen_exit_tb((tcg_target_long)s->tb + 1);
+ tcg_gen_exit_tb((uintptr_t)s->tb + 1);
ret = EXIT_GOTO_TB;
} else {
@@ -1256,7 +1256,7 @@ static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
update_cc_op(s);
tcg_gen_goto_tb(0);
tcg_gen_movi_i64(psw_addr, s->next_pc);
- tcg_gen_exit_tb((tcg_target_long)s->tb + 0);
+ tcg_gen_exit_tb((uintptr_t)s->tb + 0);
gen_set_label(lab);
if (is_imm) {
diff --git a/target-sh4/translate.c b/target-sh4/translate.c
index 59f3d47023..c06b29f1dc 100644
--- a/target-sh4/translate.c
+++ b/target-sh4/translate.c
@@ -186,7 +186,7 @@ static void gen_goto_tb(DisasContext * ctx, int n, target_ulong dest)
/* Use a direct jump if in same page and singlestep not enabled */
tcg_gen_goto_tb(n);
tcg_gen_movi_i32(cpu_pc, dest);
- tcg_gen_exit_tb((tcg_target_long)tb + n);
+ tcg_gen_exit_tb((uintptr_t)tb + n);
} else {
tcg_gen_movi_i32(cpu_pc, dest);
if (ctx->singlestep_enabled)
diff --git a/target-sparc/translate.c b/target-sparc/translate.c
index 093e0e2c78..36615f1979 100644
--- a/target-sparc/translate.c
+++ b/target-sparc/translate.c
@@ -322,7 +322,7 @@ static inline void gen_goto_tb(DisasContext *s, int tb_num,
tcg_gen_goto_tb(tb_num);
tcg_gen_movi_tl(cpu_pc, pc);
tcg_gen_movi_tl(cpu_npc, npc);
- tcg_gen_exit_tb((tcg_target_long)tb + tb_num);
+ tcg_gen_exit_tb((uintptr_t)tb + tb_num);
} else {
/* jump to another page: currently not optimized */
tcg_gen_movi_tl(cpu_pc, pc);
diff --git a/target-unicore32/op_helper.c b/target-unicore32/op_helper.c
index 6443ffec1c..4f9f41eb36 100644
--- a/target-unicore32/op_helper.c
+++ b/target-unicore32/op_helper.c
@@ -239,6 +239,8 @@ uint32_t HELPER(ror_cc)(CPUUniCore32State *env, uint32_t x, uint32_t i)
}
#ifndef CONFIG_USER_ONLY
+#include "exec/softmmu_exec.h"
+
#define MMUSUFFIX _mmu
#define SHIFT 0
diff --git a/target-unicore32/translate.c b/target-unicore32/translate.c
index 68be1c64e0..1246895f86 100644
--- a/target-unicore32/translate.c
+++ b/target-unicore32/translate.c
@@ -1100,7 +1100,7 @@ static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
tcg_gen_goto_tb(n);
gen_set_pc_im(dest);
- tcg_gen_exit_tb((tcg_target_long)tb + n);
+ tcg_gen_exit_tb((uintptr_t)tb + n);
} else {
gen_set_pc_im(dest);
tcg_gen_exit_tb(0);
diff --git a/target-xtensa/op_helper.c b/target-xtensa/op_helper.c
index 01123af707..cf970257db 100644
--- a/target-xtensa/op_helper.c
+++ b/target-xtensa/op_helper.c
@@ -28,6 +28,7 @@
#include "cpu.h"
#include "helper.h"
#include "qemu/host-utils.h"
+#include "exec/softmmu_exec.h"
static void do_unaligned_access(CPUXtensaState *env,
target_ulong addr, int is_write, int is_user, uintptr_t retaddr);
diff --git a/target-xtensa/translate.c b/target-xtensa/translate.c
index 504cc539e3..24343bdf60 100644
--- a/target-xtensa/translate.c
+++ b/target-xtensa/translate.c
@@ -400,7 +400,7 @@ static void gen_jump_slot(DisasContext *dc, TCGv dest, int slot)
} else {
if (slot >= 0) {
tcg_gen_goto_tb(slot);
- tcg_gen_exit_tb((tcg_target_long)dc->tb + slot);
+ tcg_gen_exit_tb((uintptr_t)dc->tb + slot);
} else {
tcg_gen_exit_tb(0);
}
diff --git a/tcg/aarch64/tcg-target.c b/tcg/aarch64/tcg-target.c
index 41a17f8a62..6379df1f68 100644
--- a/tcg/aarch64/tcg-target.c
+++ b/tcg/aarch64/tcg-target.c
@@ -88,7 +88,7 @@ static inline void reloc_pc19(void *code_ptr, tcg_target_long target)
}
static inline void patch_reloc(uint8_t *code_ptr, int type,
- tcg_target_long value, tcg_target_long addend)
+ intptr_t value, intptr_t addend)
{
value += addend;
@@ -423,14 +423,14 @@ static inline void tcg_out_mov(TCGContext *s,
}
static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg,
- TCGReg arg1, tcg_target_long arg2)
+ TCGReg arg1, intptr_t arg2)
{
tcg_out_ldst(s, (type == TCG_TYPE_I64) ? LDST_64 : LDST_32, LDST_LD,
arg, arg1, arg2);
}
static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
- TCGReg arg1, tcg_target_long arg2)
+ TCGReg arg1, intptr_t arg2)
{
tcg_out_ldst(s, (type == TCG_TYPE_I64) ? LDST_64 : LDST_32, LDST_ST,
arg, arg1, arg2);
@@ -778,8 +778,6 @@ static inline void tcg_out_nop(TCGContext *s)
}
#ifdef CONFIG_SOFTMMU
-#include "exec/softmmu_defs.h"
-
/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
int mmu_idx) */
static const void * const qemu_ld_helpers[4] = {
diff --git a/tcg/aarch64/tcg-target.h b/tcg/aarch64/tcg-target.h
index 51e50920b2..d3a1bc2437 100644
--- a/tcg/aarch64/tcg-target.h
+++ b/tcg/aarch64/tcg-target.h
@@ -61,6 +61,8 @@ typedef enum {
#define TCG_TARGET_HAS_sub2_i32 0
#define TCG_TARGET_HAS_mulu2_i32 0
#define TCG_TARGET_HAS_muls2_i32 0
+#define TCG_TARGET_HAS_muluh_i32 0
+#define TCG_TARGET_HAS_mulsh_i32 0
#define TCG_TARGET_HAS_div_i64 0
#define TCG_TARGET_HAS_rem_i64 0
@@ -87,13 +89,14 @@ typedef enum {
#define TCG_TARGET_HAS_sub2_i64 0
#define TCG_TARGET_HAS_mulu2_i64 0
#define TCG_TARGET_HAS_muls2_i64 0
+#define TCG_TARGET_HAS_muluh_i64 0
+#define TCG_TARGET_HAS_mulsh_i64 0
enum {
TCG_AREG0 = TCG_REG_X19,
};
-static inline void flush_icache_range(tcg_target_ulong start,
- tcg_target_ulong stop)
+static inline void flush_icache_range(uintptr_t start, uintptr_t stop)
{
__builtin___clear_cache((char *)start, (char *)stop);
}
diff --git a/tcg/arm/tcg-target.c b/tcg/arm/tcg-target.c
index 6c4854dbb0..eb0e84ce44 100644
--- a/tcg/arm/tcg-target.c
+++ b/tcg/arm/tcg-target.c
@@ -108,21 +108,21 @@ static const int tcg_target_call_oarg_regs[2] = {
#define TCG_REG_TMP TCG_REG_R12
-static inline void reloc_abs32(void *code_ptr, tcg_target_long target)
+static inline void reloc_abs32(void *code_ptr, intptr_t target)
{
*(uint32_t *) code_ptr = target;
}
-static inline void reloc_pc24(void *code_ptr, tcg_target_long target)
+static inline void reloc_pc24(void *code_ptr, intptr_t target)
{
- uint32_t offset = ((target - ((tcg_target_long) code_ptr + 8)) >> 2);
+ uint32_t offset = ((target - ((intptr_t)code_ptr + 8)) >> 2);
*(uint32_t *) code_ptr = ((*(uint32_t *) code_ptr) & ~0xffffff)
| (offset & 0xffffff);
}
static void patch_reloc(uint8_t *code_ptr, int type,
- tcg_target_long value, tcg_target_long addend)
+ intptr_t value, intptr_t addend)
{
switch (type) {
case R_ARM_ABS32:
@@ -1058,8 +1058,6 @@ static inline void tcg_out_goto_label(TCGContext *s, int cond, int label_index)
#ifdef CONFIG_SOFTMMU
-#include "exec/softmmu_defs.h"
-
/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
int mmu_idx) */
static const void * const qemu_ld_helpers[4] = {
@@ -2065,13 +2063,13 @@ static void tcg_target_init(TCGContext *s)
}
static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg,
- TCGReg arg1, tcg_target_long arg2)
+ TCGReg arg1, intptr_t arg2)
{
tcg_out_ld32u(s, COND_AL, arg, arg1, arg2);
}
static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
- TCGReg arg1, tcg_target_long arg2)
+ TCGReg arg1, intptr_t arg2)
{
tcg_out_st32(s, COND_AL, arg, arg1, arg2);
}
diff --git a/tcg/arm/tcg-target.h b/tcg/arm/tcg-target.h
index 5cd9d6a679..9482bfa993 100644
--- a/tcg/arm/tcg-target.h
+++ b/tcg/arm/tcg-target.h
@@ -80,6 +80,8 @@ extern bool use_idiv_instructions;
#define TCG_TARGET_HAS_deposit_i32 1
#define TCG_TARGET_HAS_movcond_i32 1
#define TCG_TARGET_HAS_muls2_i32 1
+#define TCG_TARGET_HAS_muluh_i32 0
+#define TCG_TARGET_HAS_mulsh_i32 0
#define TCG_TARGET_HAS_div_i32 use_idiv_instructions
#define TCG_TARGET_HAS_rem_i32 0
@@ -90,15 +92,14 @@ enum {
TCG_AREG0 = TCG_REG_R6,
};
-static inline void flush_icache_range(tcg_target_ulong start,
- tcg_target_ulong stop)
+static inline void flush_icache_range(uintptr_t start, uintptr_t stop)
{
#if QEMU_GNUC_PREREQ(4, 1)
__builtin___clear_cache((char *) start, (char *) stop);
#else
- register unsigned long _beg __asm ("a1") = start;
- register unsigned long _end __asm ("a2") = stop;
- register unsigned long _flg __asm ("a3") = 0;
+ register uintptr_t _beg __asm("a1") = start;
+ register uintptr_t _end __asm("a2") = stop;
+ register uintptr_t _flg __asm("a3") = 0;
__asm __volatile__ ("swi 0x9f0002" : : "r" (_beg), "r" (_end), "r" (_flg));
#endif
}
diff --git a/tcg/hppa/tcg-target.c b/tcg/hppa/tcg-target.c
index 68f77ba4dd..236b39c31f 100644
--- a/tcg/hppa/tcg-target.c
+++ b/tcg/hppa/tcg-target.c
@@ -22,6 +22,10 @@
* THE SOFTWARE.
*/
+#if TCG_TARGET_REG_BITS != 32
+#error unsupported
+#endif
+
#ifndef NDEBUG
static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
"%r0", "%r1", "%rp", "%r3", "%r4", "%r5", "%r6", "%r7",
@@ -145,14 +149,14 @@ static int reassemble_21(int as21)
#define R_PARISC_PCREL12F R_PARISC_NONE
static void patch_reloc(uint8_t *code_ptr, int type,
- tcg_target_long value, tcg_target_long addend)
+ intptr_t value, intptr_t addend)
{
uint32_t *insn_ptr = (uint32_t *)code_ptr;
uint32_t insn = *insn_ptr;
- tcg_target_long pcrel;
+ intptr_t pcrel;
value += addend;
- pcrel = (value - ((tcg_target_long)code_ptr + 8)) >> 2;
+ pcrel = (value - ((intptr_t)code_ptr + 8)) >> 2;
switch (type) {
case R_PARISC_PCREL12F:
@@ -388,14 +392,14 @@ static void tcg_out_ldst(TCGContext *s, int ret, int addr,
/* This function is required by tcg.c. */
static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
- TCGReg arg1, tcg_target_long arg2)
+ TCGReg arg1, intptr_t arg2)
{
tcg_out_ldst(s, ret, arg1, arg2, INSN_LDW);
}
/* This function is required by tcg.c. */
static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg ret,
- TCGReg arg1, tcg_target_long arg2)
+ TCGReg arg1, intptr_t arg2)
{
tcg_out_ldst(s, ret, arg1, arg2, INSN_STW);
}
@@ -906,8 +910,6 @@ static void tcg_out_movcond(TCGContext *s, int cond, TCGArg ret,
}
#if defined(CONFIG_SOFTMMU)
-#include "exec/softmmu_defs.h"
-
/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
int mmu_idx) */
static const void * const qemu_ld_helpers[4] = {
diff --git a/tcg/hppa/tcg-target.h b/tcg/hppa/tcg-target.h
index 25467bdd43..122edce7a7 100644
--- a/tcg/hppa/tcg-target.h
+++ b/tcg/hppa/tcg-target.h
@@ -25,10 +25,6 @@
#ifndef TCG_TARGET_HPPA
#define TCG_TARGET_HPPA 1
-#if TCG_TARGET_REG_BITS != 32
-#error unsupported
-#endif
-
#define TCG_TARGET_WORDS_BIGENDIAN
#define TCG_TARGET_NB_REGS 32
@@ -100,6 +96,8 @@ typedef enum {
#define TCG_TARGET_HAS_deposit_i32 1
#define TCG_TARGET_HAS_movcond_i32 1
#define TCG_TARGET_HAS_muls2_i32 0
+#define TCG_TARGET_HAS_muluh_i32 0
+#define TCG_TARGET_HAS_mulsh_i32 0
/* optional instructions automatically implemented */
#define TCG_TARGET_HAS_neg_i32 0 /* sub rd, 0, rs */
@@ -109,8 +107,7 @@ typedef enum {
#define TCG_AREG0 TCG_REG_R17
-static inline void flush_icache_range(tcg_target_ulong start,
- tcg_target_ulong stop)
+static inline void flush_icache_range(uintptr_t start, uintptr_t stop)
{
start &= ~31;
while (start <= stop) {
diff --git a/tcg/i386/tcg-target.c b/tcg/i386/tcg-target.c
index 12a7ca3440..c1f07415ab 100644
--- a/tcg/i386/tcg-target.c
+++ b/tcg/i386/tcg-target.c
@@ -112,7 +112,7 @@ static bool have_cmov;
static uint8_t *tb_ret_addr;
static void patch_reloc(uint8_t *code_ptr, int type,
- tcg_target_long value, tcg_target_long addend)
+ intptr_t value, intptr_t addend)
{
value += addend;
switch(type) {
@@ -430,8 +430,7 @@ static void tcg_out_modrm(TCGContext *s, int opc, int r, int rm)
that will follow the instruction. */
static void tcg_out_modrm_sib_offset(TCGContext *s, int opc, int r, int rm,
- int index, int shift,
- tcg_target_long offset)
+ int index, int shift, intptr_t offset)
{
int mod, len;
@@ -439,8 +438,8 @@ static void tcg_out_modrm_sib_offset(TCGContext *s, int opc, int r, int rm,
if (TCG_TARGET_REG_BITS == 64) {
/* Try for a rip-relative addressing mode. This has replaced
the 32-bit-mode absolute addressing encoding. */
- tcg_target_long pc = (tcg_target_long)s->code_ptr + 5 + ~rm;
- tcg_target_long disp = offset - pc;
+ intptr_t pc = (intptr_t)s->code_ptr + 5 + ~rm;
+ intptr_t disp = offset - pc;
if (disp == (int32_t)disp) {
tcg_out_opc(s, opc, r, 0, 0);
tcg_out8(s, (LOWREGMASK(r) << 3) | 5);
@@ -514,7 +513,7 @@ static void tcg_out_modrm_sib_offset(TCGContext *s, int opc, int r, int rm,
/* A simplification of the above with no index or shift. */
static inline void tcg_out_modrm_offset(TCGContext *s, int opc, int r,
- int rm, tcg_target_long offset)
+ int rm, intptr_t offset)
{
tcg_out_modrm_sib_offset(s, opc, r, rm, -1, 0, offset);
}
@@ -559,7 +558,7 @@ static void tcg_out_movi(TCGContext *s, TCGType type,
}
/* Try a 7 byte pc-relative lea before the 10 byte movq. */
- diff = arg - ((tcg_target_long)s->code_ptr + 7);
+ diff = arg - ((uintptr_t)s->code_ptr + 7);
if (diff == (int32_t)diff) {
tcg_out_opc(s, OPC_LEA | P_REXW, ret, 0, 0);
tcg_out8(s, (LOWREGMASK(ret) << 3) | 5);
@@ -595,14 +594,14 @@ static inline void tcg_out_pop(TCGContext *s, int reg)
}
static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
- TCGReg arg1, tcg_target_long arg2)
+ TCGReg arg1, intptr_t arg2)
{
int opc = OPC_MOVL_GvEv + (type == TCG_TYPE_I64 ? P_REXW : 0);
tcg_out_modrm_offset(s, opc, ret, arg1, arg2);
}
static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
- TCGReg arg1, tcg_target_long arg2)
+ TCGReg arg1, intptr_t arg2)
{
int opc = OPC_MOVL_EvGv + (type == TCG_TYPE_I64 ? P_REXW : 0);
tcg_out_modrm_offset(s, opc, arg, arg1, arg2);
@@ -757,7 +756,7 @@ static void tcg_out_jxx(TCGContext *s, int opc, int label_index, int small)
TCGLabel *l = &s->labels[label_index];
if (l->has_value) {
- val = l->u.value - (tcg_target_long)s->code_ptr;
+ val = l->u.value - (intptr_t)s->code_ptr;
val1 = val - 2;
if ((int8_t)val1 == val1) {
if (opc == -1) {
@@ -997,9 +996,9 @@ static void tcg_out_movcond64(TCGContext *s, TCGCond cond, TCGArg dest,
}
#endif
-static void tcg_out_branch(TCGContext *s, int call, tcg_target_long dest)
+static void tcg_out_branch(TCGContext *s, int call, uintptr_t dest)
{
- tcg_target_long disp = dest - (tcg_target_long)s->code_ptr - 5;
+ intptr_t disp = dest - (intptr_t)s->code_ptr - 5;
if (disp == (int32_t)disp) {
tcg_out_opc(s, call ? OPC_CALL_Jz : OPC_JMP_long, 0, 0, 0);
@@ -1011,27 +1010,24 @@ static void tcg_out_branch(TCGContext *s, int call, tcg_target_long dest)
}
}
-static inline void tcg_out_calli(TCGContext *s, tcg_target_long dest)
+static inline void tcg_out_calli(TCGContext *s, uintptr_t dest)
{
tcg_out_branch(s, 1, dest);
}
-static void tcg_out_jmp(TCGContext *s, tcg_target_long dest)
+static void tcg_out_jmp(TCGContext *s, uintptr_t dest)
{
tcg_out_branch(s, 0, dest);
}
#if defined(CONFIG_SOFTMMU)
-
-#include "exec/softmmu_defs.h"
-
/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
* int mmu_idx, uintptr_t ra)
*/
static const void * const qemu_ld_helpers[4] = {
- helper_ret_ldb_mmu,
- helper_ret_ldw_mmu,
- helper_ret_ldl_mmu,
+ helper_ret_ldub_mmu,
+ helper_ret_lduw_mmu,
+ helper_ret_ldul_mmu,
helper_ret_ldq_mmu,
};
@@ -1086,33 +1082,46 @@ static inline void tcg_out_tlb_load(TCGContext *s, int addrlo_idx,
const int addrlo = args[addrlo_idx];
const int r0 = TCG_REG_L0;
const int r1 = TCG_REG_L1;
- TCGType type = TCG_TYPE_I32;
- int rexw = 0;
+ TCGType ttype = TCG_TYPE_I32;
+ TCGType htype = TCG_TYPE_I32;
+ int trexw = 0, hrexw = 0;
- if (TCG_TARGET_REG_BITS == 64 && TARGET_LONG_BITS == 64) {
- type = TCG_TYPE_I64;
- rexw = P_REXW;
+ if (TCG_TARGET_REG_BITS == 64) {
+ if (TARGET_LONG_BITS == 64) {
+ ttype = TCG_TYPE_I64;
+ trexw = P_REXW;
+ }
+ if (TCG_TYPE_PTR == TCG_TYPE_I64) {
+ htype = TCG_TYPE_I64;
+ hrexw = P_REXW;
+ }
}
- tcg_out_mov(s, type, r0, addrlo);
- tcg_out_mov(s, type, r1, addrlo);
+ tcg_out_mov(s, htype, r0, addrlo);
+ tcg_out_mov(s, ttype, r1, addrlo);
- tcg_out_shifti(s, SHIFT_SHR + rexw, r0,
+ tcg_out_shifti(s, SHIFT_SHR + hrexw, r0,
TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
- tgen_arithi(s, ARITH_AND + rexw, r1,
+ tgen_arithi(s, ARITH_AND + trexw, r1,
TARGET_PAGE_MASK | ((1 << s_bits) - 1), 0);
- tgen_arithi(s, ARITH_AND + rexw, r0,
+ tgen_arithi(s, ARITH_AND + hrexw, r0,
(CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS, 0);
- tcg_out_modrm_sib_offset(s, OPC_LEA + P_REXW, r0, TCG_AREG0, r0, 0,
+ tcg_out_modrm_sib_offset(s, OPC_LEA + hrexw, r0, TCG_AREG0, r0, 0,
offsetof(CPUArchState, tlb_table[mem_index][0])
+ which);
/* cmp 0(r0), r1 */
- tcg_out_modrm_offset(s, OPC_CMP_GvEv + rexw, r1, r0, 0);
+ tcg_out_modrm_offset(s, OPC_CMP_GvEv + trexw, r1, r0, 0);
- tcg_out_mov(s, type, r1, addrlo);
+ /* Prepare for both the fast path add of the tlb addend, and the slow
+ path function argument setup. There are two cases worth note:
+ For 32-bit guest and x86_64 host, MOVL zero-extends the guest address
+ before the fastpath ADDQ below. For 64-bit guest and x32 host, MOVQ
+ copies the entire guest address for the slow path, while truncation
+ for the 32-bit host happens with the fastpath ADDL below. */
+ tcg_out_mov(s, ttype, r1, addrlo);
/* jne slow_path */
tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0);
@@ -1132,7 +1141,7 @@ static inline void tcg_out_tlb_load(TCGContext *s, int addrlo_idx,
/* TLB Hit. */
/* add addend(r0), r1 */
- tcg_out_modrm_offset(s, OPC_ADD_GvEv + P_REXW, r1, r0,
+ tcg_out_modrm_offset(s, OPC_ADD_GvEv + hrexw, r1, r0,
offsetof(CPUTLBEntry, addend) - which);
}
#elif defined(__x86_64__) && defined(__linux__)
@@ -1154,8 +1163,7 @@ static inline void setup_guest_base_seg(void) { }
#endif /* SOFTMMU */
static void tcg_out_qemu_ld_direct(TCGContext *s, int datalo, int datahi,
- int base, tcg_target_long ofs, int seg,
- int sizeop)
+ int base, intptr_t ofs, int seg, int sizeop)
{
#ifdef TARGET_WORDS_BIGENDIAN
const int bswap = 1;
@@ -1305,7 +1313,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args,
}
static void tcg_out_qemu_st_direct(TCGContext *s, int datalo, int datahi,
- int base, tcg_target_long ofs, int seg,
+ int base, intptr_t ofs, int seg,
int sizeop)
{
#ifdef TARGET_WORDS_BIGENDIAN
@@ -1470,12 +1478,6 @@ static void add_qemu_ldst_label(TCGContext *s,
}
}
-/* See the GETPC definition in include/exec/exec-all.h. */
-static inline uintptr_t do_getpc(uint8_t *raddr)
-{
- return (uintptr_t)raddr - 1;
-}
-
/*
* Generate code for the slow path for a load at the end of block
*/
@@ -1509,17 +1511,17 @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
tcg_out_sti(s, TCG_TYPE_I32, TCG_REG_ESP, ofs, l->mem_index);
ofs += 4;
- tcg_out_sti(s, TCG_TYPE_I32, TCG_REG_ESP, ofs, do_getpc(l->raddr));
+ tcg_out_sti(s, TCG_TYPE_I32, TCG_REG_ESP, ofs, (uintptr_t)l->raddr);
} else {
tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0);
/* The second argument is already loaded with addrlo. */
tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[2],
l->mem_index);
tcg_out_movi(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[3],
- do_getpc(l->raddr));
+ (uintptr_t)l->raddr);
}
- tcg_out_calli(s, (tcg_target_long)qemu_ld_helpers[s_bits]);
+ tcg_out_calli(s, (uintptr_t)qemu_ld_helpers[s_bits]);
data_reg = l->datalo_reg;
switch(opc) {
@@ -1529,20 +1531,17 @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
case 1 | 4:
tcg_out_ext16s(s, data_reg, TCG_REG_EAX, P_REXW);
break;
- case 0:
- tcg_out_ext8u(s, data_reg, TCG_REG_EAX);
- break;
- case 1:
- tcg_out_ext16u(s, data_reg, TCG_REG_EAX);
- break;
- case 2:
- tcg_out_mov(s, TCG_TYPE_I32, data_reg, TCG_REG_EAX);
- break;
#if TCG_TARGET_REG_BITS == 64
case 2 | 4:
tcg_out_ext32s(s, data_reg, TCG_REG_EAX);
break;
#endif
+ case 0:
+ case 1:
+ /* Note that the helpers have zero-extended to tcg_target_long. */
+ case 2:
+ tcg_out_mov(s, TCG_TYPE_I32, data_reg, TCG_REG_EAX);
+ break;
case 3:
if (TCG_TARGET_REG_BITS == 64) {
tcg_out_mov(s, TCG_TYPE_I64, data_reg, TCG_REG_RAX);
@@ -1560,7 +1559,7 @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
}
/* Jump to the code corresponding to next IR of qemu_st */
- tcg_out_jmp(s, (tcg_target_long)l->raddr);
+ tcg_out_jmp(s, (uintptr_t)l->raddr);
}
/*
@@ -1571,6 +1570,7 @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
int opc = l->opc;
int s_bits = opc & 3;
uint8_t **label_ptr = &l->label_ptr[0];
+ TCGReg retaddr;
/* resolve label address */
*(uint32_t *)label_ptr[0] = (uint32_t)(s->code_ptr - label_ptr[0] - 4);
@@ -1603,10 +1603,10 @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
tcg_out_sti(s, TCG_TYPE_I32, TCG_REG_ESP, ofs, l->mem_index);
ofs += 4;
- tcg_out_sti(s, TCG_TYPE_I32, TCG_REG_ESP, ofs, do_getpc(l->raddr));
+ retaddr = TCG_REG_EAX;
+ tcg_out_movi(s, TCG_TYPE_I32, retaddr, (uintptr_t)l->raddr);
+ tcg_out_st(s, TCG_TYPE_I32, retaddr, TCG_REG_ESP, ofs);
} else {
- uintptr_t pc;
-
tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0);
/* The second argument is already loaded with addrlo. */
tcg_out_mov(s, (opc == 3 ? TCG_TYPE_I64 : TCG_TYPE_I32),
@@ -1614,20 +1614,19 @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[3],
l->mem_index);
- pc = do_getpc(l->raddr);
if (ARRAY_SIZE(tcg_target_call_iarg_regs) > 4) {
- tcg_out_movi(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[4], pc);
- } else if (pc == (int32_t)pc) {
- tcg_out_sti(s, TCG_TYPE_PTR, TCG_REG_ESP, 0, pc);
+ retaddr = tcg_target_call_iarg_regs[4];
+ tcg_out_movi(s, TCG_TYPE_PTR, retaddr, (uintptr_t)l->raddr);
} else {
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RAX, pc);
- tcg_out_st(s, TCG_TYPE_PTR, TCG_REG_RAX, TCG_REG_ESP, 0);
+ retaddr = TCG_REG_RAX;
+ tcg_out_movi(s, TCG_TYPE_PTR, retaddr, (uintptr_t)l->raddr);
+ tcg_out_st(s, TCG_TYPE_PTR, retaddr, TCG_REG_ESP, 0);
}
}
- tcg_out_calli(s, (tcg_target_long)qemu_st_helpers[s_bits]);
-
- tcg_out_jmp(s, (tcg_target_long)l->raddr);
+ /* "Tail call" to the helper, with the return address back inline. */
+ tcg_out_push(s, retaddr);
+ tcg_out_jmp(s, (uintptr_t)qemu_st_helpers[s_bits]);
}
/*
@@ -1668,7 +1667,7 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
switch(opc) {
case INDEX_op_exit_tb:
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_EAX, args[0]);
- tcg_out_jmp(s, (tcg_target_long) tb_ret_addr);
+ tcg_out_jmp(s, (uintptr_t)tb_ret_addr);
break;
case INDEX_op_goto_tb:
if (s->tb_jmp_offset) {
@@ -1679,7 +1678,7 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
} else {
/* indirect jump method */
tcg_out_modrm_offset(s, OPC_GRP5, EXT5_JMPN_Ev, -1,
- (tcg_target_long)(s->tb_next + args[0]));
+ (intptr_t)(s->tb_next + args[0]));
}
s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
break;
@@ -2372,7 +2371,7 @@ static DebugFrame debug_frame = {
#if defined(ELF_HOST_MACHINE)
void tcg_register_jit(void *buf, size_t buf_size)
{
- debug_frame.fde.func_start = (tcg_target_long) buf;
+ debug_frame.fde.func_start = (uintptr_t)buf;
debug_frame.fde.func_len = buf_size;
tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
diff --git a/tcg/i386/tcg-target.h b/tcg/i386/tcg-target.h
index e3f6bb965f..d32d7ef6f0 100644
--- a/tcg/i386/tcg-target.h
+++ b/tcg/i386/tcg-target.h
@@ -24,12 +24,14 @@
#ifndef TCG_TARGET_I386
#define TCG_TARGET_I386 1
-//#define TCG_TARGET_WORDS_BIGENDIAN
+#undef TCG_TARGET_WORDS_BIGENDIAN
-#if TCG_TARGET_REG_BITS == 64
-# define TCG_TARGET_NB_REGS 16
+#ifdef __x86_64__
+# define TCG_TARGET_REG_BITS 64
+# define TCG_TARGET_NB_REGS 16
#else
-# define TCG_TARGET_NB_REGS 8
+# define TCG_TARGET_REG_BITS 32
+# define TCG_TARGET_NB_REGS 8
#endif
typedef enum {
@@ -96,6 +98,8 @@ typedef enum {
#define TCG_TARGET_HAS_sub2_i32 1
#define TCG_TARGET_HAS_mulu2_i32 1
#define TCG_TARGET_HAS_muls2_i32 1
+#define TCG_TARGET_HAS_muluh_i32 0
+#define TCG_TARGET_HAS_mulsh_i32 0
#if TCG_TARGET_REG_BITS == 64
#define TCG_TARGET_HAS_div2_i64 1
@@ -122,6 +126,8 @@ typedef enum {
#define TCG_TARGET_HAS_sub2_i64 1
#define TCG_TARGET_HAS_mulu2_i64 1
#define TCG_TARGET_HAS_muls2_i64 1
+#define TCG_TARGET_HAS_muluh_i64 0
+#define TCG_TARGET_HAS_mulsh_i64 0
#endif
#define TCG_TARGET_deposit_i32_valid(ofs, len) \
@@ -135,8 +141,7 @@ typedef enum {
# define TCG_AREG0 TCG_REG_EBP
#endif
-static inline void flush_icache_range(tcg_target_ulong start,
- tcg_target_ulong stop)
+static inline void flush_icache_range(uintptr_t start, uintptr_t stop)
{
}
diff --git a/tcg/ia64/tcg-target.c b/tcg/ia64/tcg-target.c
index 2373d9ef79..cd4f1ae1db 100644
--- a/tcg/ia64/tcg-target.c
+++ b/tcg/ia64/tcg-target.c
@@ -668,16 +668,16 @@ static inline uint64_t tcg_opc_x3(int qp, uint64_t opc, uint64_t imm)
* Relocations
*/
-static inline void reloc_pcrel21b (void *pc, tcg_target_long target)
+static inline void reloc_pcrel21b(void *pc, intptr_t target)
{
uint64_t imm;
int64_t disp;
int slot;
- slot = (tcg_target_long) pc & 3;
- pc = (void *)((tcg_target_long) pc & ~3);
+ slot = (intptr_t)pc & 3;
+ pc = (void *)((intptr_t)pc & ~3);
- disp = target - (tcg_target_long) pc;
+ disp = target - (intptr_t)pc;
imm = (uint64_t) disp >> 4;
switch(slot) {
@@ -728,12 +728,12 @@ static inline uint64_t get_reloc_pcrel21b (void *pc)
}
}
-static inline void reloc_pcrel60b (void *pc, tcg_target_long target)
+static inline void reloc_pcrel60b(void *pc, intptr_t target)
{
int64_t disp;
uint64_t imm;
- disp = target - (tcg_target_long) pc;
+ disp = target - (intptr_t)pc;
imm = (uint64_t) disp >> 4;
*(uint64_t *)(pc + 8) = (*(uint64_t *)(pc + 8) & 0xf700000fff800000ull)
@@ -759,7 +759,7 @@ static inline uint64_t get_reloc_pcrel60b (void *pc)
static void patch_reloc(uint8_t *code_ptr, int type,
- tcg_target_long value, tcg_target_long addend)
+ intptr_t value, intptr_t addend)
{
value += addend;
switch (type) {
@@ -993,7 +993,7 @@ static inline void tcg_out_st_rel(TCGContext *s, uint64_t opc_m4, TCGArg arg,
}
static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg,
- TCGReg arg1, tcg_target_long arg2)
+ TCGReg arg1, intptr_t arg2)
{
if (type == TCG_TYPE_I32) {
tcg_out_ld_rel(s, OPC_LD4_M1, arg, arg1, arg2);
@@ -1003,7 +1003,7 @@ static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg,
}
static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
- TCGReg arg1, tcg_target_long arg2)
+ TCGReg arg1, intptr_t arg2)
{
if (type == TCG_TYPE_I32) {
tcg_out_st_rel(s, OPC_ST4_M4, arg, arg1, arg2);
@@ -1490,9 +1490,6 @@ static inline void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGArg ret,
}
#if defined(CONFIG_SOFTMMU)
-
-#include "exec/softmmu_defs.h"
-
/* Load and compare a TLB entry, and return the result in (p6, p7).
R2 is loaded with the address of the addend TLB entry.
R57 is loaded with the address, zero extented on 32-bit targets. */
diff --git a/tcg/ia64/tcg-target.h b/tcg/ia64/tcg-target.h
index f32d5199cb..4330c9cdd3 100644
--- a/tcg/ia64/tcg-target.h
+++ b/tcg/ia64/tcg-target.h
@@ -146,6 +146,10 @@ typedef enum {
#define TCG_TARGET_HAS_mulu2_i64 0
#define TCG_TARGET_HAS_muls2_i32 0
#define TCG_TARGET_HAS_muls2_i64 0
+#define TCG_TARGET_HAS_muluh_i32 0
+#define TCG_TARGET_HAS_muluh_i64 0
+#define TCG_TARGET_HAS_mulsh_i32 0
+#define TCG_TARGET_HAS_mulsh_i64 0
#define TCG_TARGET_deposit_i32_valid(ofs, len) ((len) <= 16)
#define TCG_TARGET_deposit_i64_valid(ofs, len) ((len) <= 16)
@@ -158,8 +162,7 @@ typedef enum {
#define TCG_AREG0 TCG_REG_R7
-static inline void flush_icache_range(tcg_target_ulong start,
- tcg_target_ulong stop)
+static inline void flush_icache_range(uintptr_t start, uintptr_t stop)
{
start = start & ~(32UL - 1UL);
stop = (stop + (32UL - 1UL)) & ~(32UL - 1UL);
diff --git a/tcg/mips/tcg-target.c b/tcg/mips/tcg-target.c
index 793532ec95..5f0a65b4ea 100644
--- a/tcg/mips/tcg-target.c
+++ b/tcg/mips/tcg-target.c
@@ -108,33 +108,33 @@ static const TCGReg tcg_target_call_oarg_regs[2] = {
static uint8_t *tb_ret_addr;
-static inline uint32_t reloc_lo16_val (void *pc, tcg_target_long target)
+static inline uint32_t reloc_lo16_val(void *pc, intptr_t target)
{
return target & 0xffff;
}
-static inline void reloc_lo16 (void *pc, tcg_target_long target)
+static inline void reloc_lo16(void *pc, intptr_t target)
{
*(uint32_t *) pc = (*(uint32_t *) pc & ~0xffff)
| reloc_lo16_val(pc, target);
}
-static inline uint32_t reloc_hi16_val (void *pc, tcg_target_long target)
+static inline uint32_t reloc_hi16_val(void *pc, intptr_t target)
{
return (target >> 16) & 0xffff;
}
-static inline void reloc_hi16 (void *pc, tcg_target_long target)
+static inline void reloc_hi16(void *pc, intptr_t target)
{
*(uint32_t *) pc = (*(uint32_t *) pc & ~0xffff)
| reloc_hi16_val(pc, target);
}
-static inline uint32_t reloc_pc16_val (void *pc, tcg_target_long target)
+static inline uint32_t reloc_pc16_val(void *pc, intptr_t target)
{
int32_t disp;
- disp = target - (tcg_target_long) pc - 4;
+ disp = target - (intptr_t)pc - 4;
if (disp != (disp << 14) >> 14) {
tcg_abort ();
}
@@ -157,14 +157,14 @@ static inline uint32_t reloc_26_val (void *pc, tcg_target_long target)
return (target >> 2) & 0x3ffffff;
}
-static inline void reloc_pc26 (void *pc, tcg_target_long target)
+static inline void reloc_pc26(void *pc, intptr_t target)
{
*(uint32_t *) pc = (*(uint32_t *) pc & ~0x3ffffff)
| reloc_26_val(pc, target);
}
static void patch_reloc(uint8_t *code_ptr, int type,
- tcg_target_long value, tcg_target_long addend)
+ intptr_t value, intptr_t addend)
{
value += addend;
switch(type) {
@@ -422,83 +422,83 @@ static inline void tcg_out_movi(TCGContext *s, TCGType type,
static inline void tcg_out_bswap16(TCGContext *s, TCGReg ret, TCGReg arg)
{
-#if defined(__mips_isa_rev) && (__mips_isa_rev >= 2)
- tcg_out_opc_reg(s, OPC_WSBH, ret, 0, arg);
-#else
- /* ret and arg can't be register at */
- if (ret == TCG_REG_AT || arg == TCG_REG_AT) {
- tcg_abort();
- }
+ if (use_mips32r2_instructions) {
+ tcg_out_opc_reg(s, OPC_WSBH, ret, 0, arg);
+ } else {
+ /* ret and arg can't be register at */
+ if (ret == TCG_REG_AT || arg == TCG_REG_AT) {
+ tcg_abort();
+ }
- tcg_out_opc_sa(s, OPC_SRL, TCG_REG_AT, arg, 8);
- tcg_out_opc_sa(s, OPC_SLL, ret, arg, 8);
- tcg_out_opc_imm(s, OPC_ANDI, ret, ret, 0xff00);
- tcg_out_opc_reg(s, OPC_OR, ret, ret, TCG_REG_AT);
-#endif
+ tcg_out_opc_sa(s, OPC_SRL, TCG_REG_AT, arg, 8);
+ tcg_out_opc_sa(s, OPC_SLL, ret, arg, 8);
+ tcg_out_opc_imm(s, OPC_ANDI, ret, ret, 0xff00);
+ tcg_out_opc_reg(s, OPC_OR, ret, ret, TCG_REG_AT);
+ }
}
static inline void tcg_out_bswap16s(TCGContext *s, TCGReg ret, TCGReg arg)
{
-#if defined(__mips_isa_rev) && (__mips_isa_rev >= 2)
- tcg_out_opc_reg(s, OPC_WSBH, ret, 0, arg);
- tcg_out_opc_reg(s, OPC_SEH, ret, 0, ret);
-#else
- /* ret and arg can't be register at */
- if (ret == TCG_REG_AT || arg == TCG_REG_AT) {
- tcg_abort();
- }
+ if (use_mips32r2_instructions) {
+ tcg_out_opc_reg(s, OPC_WSBH, ret, 0, arg);
+ tcg_out_opc_reg(s, OPC_SEH, ret, 0, ret);
+ } else {
+ /* ret and arg can't be register at */
+ if (ret == TCG_REG_AT || arg == TCG_REG_AT) {
+ tcg_abort();
+ }
- tcg_out_opc_sa(s, OPC_SRL, TCG_REG_AT, arg, 8);
- tcg_out_opc_sa(s, OPC_SLL, ret, arg, 24);
- tcg_out_opc_sa(s, OPC_SRA, ret, ret, 16);
- tcg_out_opc_reg(s, OPC_OR, ret, ret, TCG_REG_AT);
-#endif
+ tcg_out_opc_sa(s, OPC_SRL, TCG_REG_AT, arg, 8);
+ tcg_out_opc_sa(s, OPC_SLL, ret, arg, 24);
+ tcg_out_opc_sa(s, OPC_SRA, ret, ret, 16);
+ tcg_out_opc_reg(s, OPC_OR, ret, ret, TCG_REG_AT);
+ }
}
static inline void tcg_out_bswap32(TCGContext *s, TCGReg ret, TCGReg arg)
{
-#if defined(__mips_isa_rev) && (__mips_isa_rev >= 2)
- tcg_out_opc_reg(s, OPC_WSBH, ret, 0, arg);
- tcg_out_opc_sa(s, OPC_ROTR, ret, ret, 16);
-#else
- /* ret and arg must be different and can't be register at */
- if (ret == arg || ret == TCG_REG_AT || arg == TCG_REG_AT) {
- tcg_abort();
- }
+ if (use_mips32r2_instructions) {
+ tcg_out_opc_reg(s, OPC_WSBH, ret, 0, arg);
+ tcg_out_opc_sa(s, OPC_ROTR, ret, ret, 16);
+ } else {
+ /* ret and arg must be different and can't be register at */
+ if (ret == arg || ret == TCG_REG_AT || arg == TCG_REG_AT) {
+ tcg_abort();
+ }
- tcg_out_opc_sa(s, OPC_SLL, ret, arg, 24);
+ tcg_out_opc_sa(s, OPC_SLL, ret, arg, 24);
- tcg_out_opc_sa(s, OPC_SRL, TCG_REG_AT, arg, 24);
- tcg_out_opc_reg(s, OPC_OR, ret, ret, TCG_REG_AT);
+ tcg_out_opc_sa(s, OPC_SRL, TCG_REG_AT, arg, 24);
+ tcg_out_opc_reg(s, OPC_OR, ret, ret, TCG_REG_AT);
- tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_AT, arg, 0xff00);
- tcg_out_opc_sa(s, OPC_SLL, TCG_REG_AT, TCG_REG_AT, 8);
- tcg_out_opc_reg(s, OPC_OR, ret, ret, TCG_REG_AT);
+ tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_AT, arg, 0xff00);
+ tcg_out_opc_sa(s, OPC_SLL, TCG_REG_AT, TCG_REG_AT, 8);
+ tcg_out_opc_reg(s, OPC_OR, ret, ret, TCG_REG_AT);
- tcg_out_opc_sa(s, OPC_SRL, TCG_REG_AT, arg, 8);
- tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_AT, TCG_REG_AT, 0xff00);
- tcg_out_opc_reg(s, OPC_OR, ret, ret, TCG_REG_AT);
-#endif
+ tcg_out_opc_sa(s, OPC_SRL, TCG_REG_AT, arg, 8);
+ tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_AT, TCG_REG_AT, 0xff00);
+ tcg_out_opc_reg(s, OPC_OR, ret, ret, TCG_REG_AT);
+ }
}
static inline void tcg_out_ext8s(TCGContext *s, TCGReg ret, TCGReg arg)
{
-#if defined(__mips_isa_rev) && (__mips_isa_rev >= 2)
- tcg_out_opc_reg(s, OPC_SEB, ret, 0, arg);
-#else
- tcg_out_opc_sa(s, OPC_SLL, ret, arg, 24);
- tcg_out_opc_sa(s, OPC_SRA, ret, ret, 24);
-#endif
+ if (use_mips32r2_instructions) {
+ tcg_out_opc_reg(s, OPC_SEB, ret, 0, arg);
+ } else {
+ tcg_out_opc_sa(s, OPC_SLL, ret, arg, 24);
+ tcg_out_opc_sa(s, OPC_SRA, ret, ret, 24);
+ }
}
static inline void tcg_out_ext16s(TCGContext *s, TCGReg ret, TCGReg arg)
{
-#if defined(__mips_isa_rev) && (__mips_isa_rev >= 2)
- tcg_out_opc_reg(s, OPC_SEH, ret, 0, arg);
-#else
- tcg_out_opc_sa(s, OPC_SLL, ret, arg, 16);
- tcg_out_opc_sa(s, OPC_SRA, ret, ret, 16);
-#endif
+ if (use_mips32r2_instructions) {
+ tcg_out_opc_reg(s, OPC_SEH, ret, 0, arg);
+ } else {
+ tcg_out_opc_sa(s, OPC_SLL, ret, arg, 16);
+ tcg_out_opc_sa(s, OPC_SRA, ret, ret, 16);
+ }
}
static inline void tcg_out_ldst(TCGContext *s, int opc, TCGArg arg,
@@ -514,13 +514,13 @@ static inline void tcg_out_ldst(TCGContext *s, int opc, TCGArg arg,
}
static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg,
- TCGReg arg1, tcg_target_long arg2)
+ TCGReg arg1, intptr_t arg2)
{
tcg_out_ldst(s, OPC_LW, arg, arg1, arg2);
}
static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
- TCGReg arg1, tcg_target_long arg2)
+ TCGReg arg1, intptr_t arg2)
{
tcg_out_ldst(s, OPC_SW, arg, arg1, arg2);
}
@@ -919,9 +919,6 @@ static void tcg_out_setcond2(TCGContext *s, TCGCond cond, TCGReg ret,
}
#if defined(CONFIG_SOFTMMU)
-
-#include "exec/softmmu_defs.h"
-
/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
int mmu_idx) */
static const void * const qemu_ld_helpers[4] = {
@@ -1406,12 +1403,12 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
tcg_out_mov(s, TCG_TYPE_I32, args[0], TCG_REG_AT);
break;
case INDEX_op_mul_i32:
-#if defined(__mips_isa_rev) && (__mips_isa_rev >= 1)
- tcg_out_opc_reg(s, OPC_MUL, args[0], args[1], args[2]);
-#else
- tcg_out_opc_reg(s, OPC_MULT, 0, args[1], args[2]);
- tcg_out_opc_reg(s, OPC_MFLO, args[0], 0, 0);
-#endif
+ if (use_mips32_instructions) {
+ tcg_out_opc_reg(s, OPC_MUL, args[0], args[1], args[2]);
+ } else {
+ tcg_out_opc_reg(s, OPC_MULT, 0, args[1], args[2]);
+ tcg_out_opc_reg(s, OPC_MFLO, args[0], 0, 0);
+ }
break;
case INDEX_op_muls2_i32:
tcg_out_opc_reg(s, OPC_MULT, 0, args[2], args[3]);
@@ -1423,6 +1420,14 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
tcg_out_opc_reg(s, OPC_MFLO, args[0], 0, 0);
tcg_out_opc_reg(s, OPC_MFHI, args[1], 0, 0);
break;
+ case INDEX_op_mulsh_i32:
+ tcg_out_opc_reg(s, OPC_MULT, 0, args[1], args[2]);
+ tcg_out_opc_reg(s, OPC_MFHI, args[0], 0, 0);
+ break;
+ case INDEX_op_muluh_i32:
+ tcg_out_opc_reg(s, OPC_MULTU, 0, args[1], args[2]);
+ tcg_out_opc_reg(s, OPC_MFHI, args[0], 0, 0);
+ break;
case INDEX_op_div_i32:
tcg_out_opc_reg(s, OPC_DIV, 0, args[1], args[2]);
tcg_out_opc_reg(s, OPC_MFLO, args[0], 0, 0);
@@ -1506,20 +1511,19 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
}
break;
- /* The bswap routines do not work on non-R2 CPU. In that case
- we let TCG generating the corresponding code. */
case INDEX_op_bswap16_i32:
- tcg_out_bswap16(s, args[0], args[1]);
+ tcg_out_opc_reg(s, OPC_WSBH, args[0], 0, args[1]);
break;
case INDEX_op_bswap32_i32:
- tcg_out_bswap32(s, args[0], args[1]);
+ tcg_out_opc_reg(s, OPC_WSBH, args[0], 0, args[1]);
+ tcg_out_opc_sa(s, OPC_ROTR, args[0], args[0], 16);
break;
case INDEX_op_ext8s_i32:
- tcg_out_ext8s(s, args[0], args[1]);
+ tcg_out_opc_reg(s, OPC_SEB, args[0], 0, args[1]);
break;
case INDEX_op_ext16s_i32:
- tcg_out_ext16s(s, args[0], args[1]);
+ tcg_out_opc_reg(s, OPC_SEH, args[0], 0, args[1]);
break;
case INDEX_op_deposit_i32:
@@ -1602,6 +1606,8 @@ static const TCGTargetOpDef mips_op_defs[] = {
{ INDEX_op_mul_i32, { "r", "rZ", "rZ" } },
{ INDEX_op_muls2_i32, { "r", "r", "rZ", "rZ" } },
{ INDEX_op_mulu2_i32, { "r", "r", "rZ", "rZ" } },
+ { INDEX_op_mulsh_i32, { "r", "rZ", "rZ" } },
+ { INDEX_op_muluh_i32, { "r", "rZ", "rZ" } },
{ INDEX_op_div_i32, { "r", "rZ", "rZ" } },
{ INDEX_op_divu_i32, { "r", "rZ", "rZ" } },
{ INDEX_op_rem_i32, { "r", "rZ", "rZ" } },
@@ -1617,29 +1623,19 @@ static const TCGTargetOpDef mips_op_defs[] = {
{ INDEX_op_shl_i32, { "r", "rZ", "ri" } },
{ INDEX_op_shr_i32, { "r", "rZ", "ri" } },
{ INDEX_op_sar_i32, { "r", "rZ", "ri" } },
-#if TCG_TARGET_HAS_rot_i32
{ INDEX_op_rotr_i32, { "r", "rZ", "ri" } },
{ INDEX_op_rotl_i32, { "r", "rZ", "ri" } },
-#endif
-#if TCG_TARGET_HAS_bswap16_i32
{ INDEX_op_bswap16_i32, { "r", "r" } },
-#endif
-#if TCG_TARGET_HAS_bswap32_i32
{ INDEX_op_bswap32_i32, { "r", "r" } },
-#endif
{ INDEX_op_ext8s_i32, { "r", "rZ" } },
{ INDEX_op_ext16s_i32, { "r", "rZ" } },
-#if TCG_TARGET_HAS_deposit_i32
{ INDEX_op_deposit_i32, { "r", "0", "rZ" } },
-#endif
{ INDEX_op_brcond_i32, { "rZ", "rZ" } },
-#if TCG_TARGET_HAS_movcond_i32
{ INDEX_op_movcond_i32, { "r", "rZ", "rZ", "rZ", "0" } },
-#endif
{ INDEX_op_setcond_i32, { "r", "rZ", "rZ" } },
{ INDEX_op_setcond2_i32, { "r", "rZ", "rZ", "rZ", "rZ" } },
@@ -1688,6 +1684,86 @@ static int tcg_target_callee_save_regs[] = {
TCG_REG_RA, /* should be last for ABI compliance */
};
+/* The Linux kernel doesn't provide any information about the available
+ instruction set. Probe it using a signal handler. */
+
+#include <signal.h>
+
+#ifndef use_movnz_instructions
+bool use_movnz_instructions = false;
+#endif
+
+#ifndef use_mips32_instructions
+bool use_mips32_instructions = false;
+#endif
+
+#ifndef use_mips32r2_instructions
+bool use_mips32r2_instructions = false;
+#endif
+
+static volatile sig_atomic_t got_sigill;
+
+static void sigill_handler(int signo, siginfo_t *si, void *data)
+{
+ /* Skip the faulty instruction */
+ ucontext_t *uc = (ucontext_t *)data;
+ uc->uc_mcontext.pc += 4;
+
+ got_sigill = 1;
+}
+
+static void tcg_target_detect_isa(void)
+{
+ struct sigaction sa_old, sa_new;
+
+ memset(&sa_new, 0, sizeof(sa_new));
+ sa_new.sa_flags = SA_SIGINFO;
+ sa_new.sa_sigaction = sigill_handler;
+ sigaction(SIGILL, &sa_new, &sa_old);
+
+ /* Probe for movn/movz, necessary to implement movcond. */
+#ifndef use_movnz_instructions
+ got_sigill = 0;
+ asm volatile(".set push\n"
+ ".set mips32\n"
+ "movn $zero, $zero, $zero\n"
+ "movz $zero, $zero, $zero\n"
+ ".set pop\n"
+ : : : );
+ use_movnz_instructions = !got_sigill;
+#endif
+
+ /* Probe for MIPS32 instructions. As no subsetting is allowed
+ by the specification, it is only necessary to probe for one
+ of the instructions. */
+#ifndef use_mips32_instructions
+ got_sigill = 0;
+ asm volatile(".set push\n"
+ ".set mips32\n"
+ "mul $zero, $zero\n"
+ ".set pop\n"
+ : : : );
+ use_mips32_instructions = !got_sigill;
+#endif
+
+ /* Probe for MIPS32r2 instructions if MIPS32 instructions are
+ available. As no subsetting is allowed by the specification,
+ it is only necessary to probe for one of the instructions. */
+#ifndef use_mips32r2_instructions
+ if (use_mips32_instructions) {
+ got_sigill = 0;
+ asm volatile(".set push\n"
+ ".set mips32r2\n"
+ "seb $zero, $zero\n"
+ ".set pop\n"
+ : : : );
+ use_mips32r2_instructions = !got_sigill;
+ }
+#endif
+
+ sigaction(SIGILL, &sa_old, NULL);
+}
+
/* Generate global QEMU prologue and epilogue code */
static void tcg_target_qemu_prologue(TCGContext *s)
{
@@ -1727,6 +1803,7 @@ static void tcg_target_qemu_prologue(TCGContext *s)
static void tcg_target_init(TCGContext *s)
{
+ tcg_target_detect_isa();
tcg_regset_set(tcg_target_available_regs[TCG_TYPE_I32], 0xffffffff);
tcg_regset_set(tcg_target_call_clobber_regs,
(1 << TCG_REG_V0) |
diff --git a/tcg/mips/tcg-target.h b/tcg/mips/tcg-target.h
index a438950bc1..c37252269f 100644
--- a/tcg/mips/tcg-target.h
+++ b/tcg/mips/tcg-target.h
@@ -77,40 +77,50 @@ typedef enum {
#define TCG_TARGET_CALL_STACK_OFFSET 16
#define TCG_TARGET_CALL_ALIGN_ARGS 1
+/* MOVN/MOVZ instructions detection */
+#if (defined(__mips_isa_rev) && (__mips_isa_rev >= 1)) || \
+ defined(_MIPS_ARCH_LOONGSON2E) || defined(_MIPS_ARCH_LOONGSON2F) || \
+ defined(_MIPS_ARCH_MIPS4)
+#define use_movnz_instructions 1
+#else
+extern bool use_movnz_instructions;
+#endif
+
+/* MIPS32 instruction set detection */
+#if defined(__mips_isa_rev) && (__mips_isa_rev >= 1)
+#define use_mips32_instructions 1
+#else
+extern bool use_mips32_instructions;
+#endif
+
+/* MIPS32R2 instruction set detection */
+#if defined(__mips_isa_rev) && (__mips_isa_rev >= 2)
+#define use_mips32r2_instructions 1
+#else
+extern bool use_mips32r2_instructions;
+#endif
+
/* optional instructions */
#define TCG_TARGET_HAS_div_i32 1
#define TCG_TARGET_HAS_rem_i32 1
#define TCG_TARGET_HAS_not_i32 1
#define TCG_TARGET_HAS_nor_i32 1
-#define TCG_TARGET_HAS_ext8s_i32 1
-#define TCG_TARGET_HAS_ext16s_i32 1
#define TCG_TARGET_HAS_andc_i32 0
#define TCG_TARGET_HAS_orc_i32 0
#define TCG_TARGET_HAS_eqv_i32 0
#define TCG_TARGET_HAS_nand_i32 0
#define TCG_TARGET_HAS_muls2_i32 1
+#define TCG_TARGET_HAS_muluh_i32 1
+#define TCG_TARGET_HAS_mulsh_i32 1
-/* optional instructions only implemented on MIPS4, MIPS32 and Loongson 2 */
-#if (defined(__mips_isa_rev) && (__mips_isa_rev >= 1)) || \
- defined(_MIPS_ARCH_LOONGSON2E) || defined(_MIPS_ARCH_LOONGSON2F) || \
- defined(_MIPS_ARCH_MIPS4)
-#define TCG_TARGET_HAS_movcond_i32 1
-#else
-#define TCG_TARGET_HAS_movcond_i32 0
-#endif
-
-/* optional instructions only implemented on MIPS32R2 */
-#if defined(__mips_isa_rev) && (__mips_isa_rev >= 2)
-#define TCG_TARGET_HAS_bswap16_i32 1
-#define TCG_TARGET_HAS_bswap32_i32 1
-#define TCG_TARGET_HAS_rot_i32 1
-#define TCG_TARGET_HAS_deposit_i32 1
-#else
-#define TCG_TARGET_HAS_bswap16_i32 0
-#define TCG_TARGET_HAS_bswap32_i32 0
-#define TCG_TARGET_HAS_rot_i32 0
-#define TCG_TARGET_HAS_deposit_i32 0
-#endif
+/* optional instructions detected at runtime */
+#define TCG_TARGET_HAS_movcond_i32 use_movnz_instructions
+#define TCG_TARGET_HAS_bswap16_i32 use_mips32r2_instructions
+#define TCG_TARGET_HAS_bswap32_i32 use_mips32r2_instructions
+#define TCG_TARGET_HAS_deposit_i32 use_mips32r2_instructions
+#define TCG_TARGET_HAS_ext8s_i32 use_mips32r2_instructions
+#define TCG_TARGET_HAS_ext16s_i32 use_mips32r2_instructions
+#define TCG_TARGET_HAS_rot_i32 use_mips32r2_instructions
/* optional instructions automatically implemented */
#define TCG_TARGET_HAS_neg_i32 0 /* sub rd, zero, rt */
@@ -125,8 +135,7 @@ typedef enum {
#include <sys/cachectl.h>
#endif
-static inline void flush_icache_range(tcg_target_ulong start,
- tcg_target_ulong stop)
+static inline void flush_icache_range(uintptr_t start, uintptr_t stop)
{
cacheflush ((void *)start, stop-start, ICACHE);
}
diff --git a/tcg/optimize.c b/tcg/optimize.c
index b35868afbc..b29bf25b67 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -198,6 +198,8 @@ static TCGOpcode op_to_mov(TCGOpcode op)
static TCGArg do_constant_folding_2(TCGOpcode op, TCGArg x, TCGArg y)
{
+ uint64_t l64, h64;
+
switch (op) {
CASE_OP_32_64(add):
return x + y;
@@ -290,6 +292,37 @@ static TCGArg do_constant_folding_2(TCGOpcode op, TCGArg x, TCGArg y)
case INDEX_op_ext32u_i64:
return (uint32_t)x;
+ case INDEX_op_muluh_i32:
+ return ((uint64_t)(uint32_t)x * (uint32_t)y) >> 32;
+ case INDEX_op_mulsh_i32:
+ return ((int64_t)(int32_t)x * (int32_t)y) >> 32;
+
+ case INDEX_op_muluh_i64:
+ mulu64(&l64, &h64, x, y);
+ return h64;
+ case INDEX_op_mulsh_i64:
+ muls64(&l64, &h64, x, y);
+ return h64;
+
+ case INDEX_op_div_i32:
+ /* Avoid crashing on divide by zero, otherwise undefined. */
+ return (int32_t)x / ((int32_t)y ? : 1);
+ case INDEX_op_divu_i32:
+ return (uint32_t)x / ((uint32_t)y ? : 1);
+ case INDEX_op_div_i64:
+ return (int64_t)x / ((int64_t)y ? : 1);
+ case INDEX_op_divu_i64:
+ return (uint64_t)x / ((uint64_t)y ? : 1);
+
+ case INDEX_op_rem_i32:
+ return (int32_t)x % ((int32_t)y ? : 1);
+ case INDEX_op_remu_i32:
+ return (uint32_t)x % ((uint32_t)y ? : 1);
+ case INDEX_op_rem_i64:
+ return (int64_t)x % ((int64_t)y ? : 1);
+ case INDEX_op_remu_i64:
+ return (uint64_t)x % ((uint64_t)y ? : 1);
+
default:
fprintf(stderr,
"Unrecognized operation %d in do_constant_folding.\n", op);
@@ -531,6 +564,8 @@ static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr,
CASE_OP_32_64(eqv):
CASE_OP_32_64(nand):
CASE_OP_32_64(nor):
+ CASE_OP_32_64(muluh):
+ CASE_OP_32_64(mulsh):
swap_commutative(args[0], &args[1], &args[2]);
break;
CASE_OP_32_64(brcond):
@@ -771,6 +806,8 @@ static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr,
switch (op) {
CASE_OP_32_64(and):
CASE_OP_32_64(mul):
+ CASE_OP_32_64(muluh):
+ CASE_OP_32_64(mulsh):
if ((temps[args[2]].state == TCG_TEMP_CONST
&& temps[args[2]].val == 0)) {
s->gen_opc_buf[op_index] = op_to_movi(op);
@@ -882,6 +919,12 @@ static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr,
CASE_OP_32_64(eqv):
CASE_OP_32_64(nand):
CASE_OP_32_64(nor):
+ CASE_OP_32_64(muluh):
+ CASE_OP_32_64(mulsh):
+ CASE_OP_32_64(div):
+ CASE_OP_32_64(divu):
+ CASE_OP_32_64(rem):
+ CASE_OP_32_64(remu):
if (temps[args[1]].state == TCG_TEMP_CONST
&& temps[args[2]].state == TCG_TEMP_CONST) {
s->gen_opc_buf[op_index] = op_to_movi(op);
diff --git a/tcg/ppc/tcg-target.c b/tcg/ppc/tcg-target.c
index 453ab6b580..25955563b8 100644
--- a/tcg/ppc/tcg-target.c
+++ b/tcg/ppc/tcg-target.c
@@ -204,7 +204,7 @@ static void reloc_pc14 (void *pc, tcg_target_long target)
}
static void patch_reloc(uint8_t *code_ptr, int type,
- tcg_target_long value, tcg_target_long addend)
+ intptr_t value, intptr_t addend)
{
value += addend;
switch (type) {
@@ -549,8 +549,6 @@ static void add_qemu_ldst_label (TCGContext *s,
label->label_ptr[0] = label_ptr;
}
-#include "exec/softmmu_defs.h"
-
/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
int mmu_idx) */
static const void * const qemu_ld_helpers[4] = {
@@ -1062,14 +1060,14 @@ static void tcg_target_qemu_prologue (TCGContext *s)
#endif
}
-static void tcg_out_ld (TCGContext *s, TCGType type, TCGReg ret, TCGReg arg1,
- tcg_target_long arg2)
+static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg1,
+ intptr_t arg2)
{
tcg_out_ldst (s, ret, arg1, arg2, LWZ, LWZX);
}
-static void tcg_out_st (TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1,
- tcg_target_long arg2)
+static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1,
+ intptr_t arg2)
{
tcg_out_ldst (s, arg, arg1, arg2, STW, STWX);
}
diff --git a/tcg/ppc/tcg-target.h b/tcg/ppc/tcg-target.h
index b42d97cc24..c9f8ff5206 100644
--- a/tcg/ppc/tcg-target.h
+++ b/tcg/ppc/tcg-target.h
@@ -96,11 +96,13 @@ typedef enum {
#define TCG_TARGET_HAS_deposit_i32 1
#define TCG_TARGET_HAS_movcond_i32 1
#define TCG_TARGET_HAS_muls2_i32 0
+#define TCG_TARGET_HAS_muluh_i32 0
+#define TCG_TARGET_HAS_mulsh_i32 0
#define TCG_AREG0 TCG_REG_R27
#define tcg_qemu_tb_exec(env, tb_ptr) \
- ((long __attribute__ ((longcall)) \
+ ((uintptr_t __attribute__ ((longcall)) \
(*)(void *, void *))tcg_ctx.code_gen_prologue)(env, tb_ptr)
#endif
diff --git a/tcg/ppc64/tcg-target.c b/tcg/ppc64/tcg-target.c
index 0678de2045..0bd1e0ce8c 100644
--- a/tcg/ppc64/tcg-target.c
+++ b/tcg/ppc64/tcg-target.c
@@ -208,7 +208,7 @@ static void reloc_pc14 (void *pc, tcg_target_long target)
}
static void patch_reloc (uint8_t *code_ptr, int type,
- tcg_target_long value, tcg_target_long addend)
+ intptr_t value, intptr_t addend)
{
value += addend;
switch (type) {
@@ -750,9 +750,6 @@ static void tcg_out_ldsta(TCGContext *s, TCGReg ret, TCGReg addr,
}
#if defined (CONFIG_SOFTMMU)
-
-#include "exec/softmmu_defs.h"
-
/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
int mmu_idx) */
static const void * const qemu_ld_helpers[4] = {
@@ -1072,8 +1069,8 @@ static void tcg_target_qemu_prologue (TCGContext *s)
tcg_out32(s, BCLR | BO_ALWAYS);
}
-static void tcg_out_ld (TCGContext *s, TCGType type, TCGReg ret, TCGReg arg1,
- tcg_target_long arg2)
+static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg1,
+ intptr_t arg2)
{
if (type == TCG_TYPE_I32)
tcg_out_ldst (s, ret, arg1, arg2, LWZ, LWZX);
@@ -1081,8 +1078,8 @@ static void tcg_out_ld (TCGContext *s, TCGType type, TCGReg ret, TCGReg arg1,
tcg_out_ldsta (s, ret, arg1, arg2, LD, LDX);
}
-static void tcg_out_st (TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1,
- tcg_target_long arg2)
+static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1,
+ intptr_t arg2)
{
if (type == TCG_TYPE_I32)
tcg_out_ldst (s, arg, arg1, arg2, STW, STWX);
@@ -1975,29 +1972,11 @@ static void tcg_out_op (TCGContext *s, TCGOpcode opc, const TCGArg *args,
}
break;
- case INDEX_op_mulu2_i64:
- case INDEX_op_muls2_i64:
- {
- int oph = (opc == INDEX_op_mulu2_i64 ? MULHDU : MULHD);
- TCGReg outl = args[0], outh = args[1];
- a0 = args[2], a1 = args[3];
-
- if (outl == a0 || outl == a1) {
- if (outh == a0 || outh == a1) {
- outl = TCG_REG_R0;
- } else {
- tcg_out32(s, oph | TAB(outh, a0, a1));
- oph = 0;
- }
- }
- tcg_out32(s, MULLD | TAB(outl, a0, a1));
- if (oph != 0) {
- tcg_out32(s, oph | TAB(outh, a0, a1));
- }
- if (outl != args[0]) {
- tcg_out_mov(s, TCG_TYPE_I64, args[0], outl);
- }
- }
+ case INDEX_op_muluh_i64:
+ tcg_out32(s, MULHDU | TAB(args[0], args[1], args[2]));
+ break;
+ case INDEX_op_mulsh_i64:
+ tcg_out32(s, MULHD | TAB(args[0], args[1], args[2]));
break;
default:
@@ -2124,8 +2103,8 @@ static const TCGTargetOpDef ppc_op_defs[] = {
{ INDEX_op_add2_i64, { "r", "r", "r", "r", "rI", "rZM" } },
{ INDEX_op_sub2_i64, { "r", "r", "rI", "r", "rZM", "r" } },
- { INDEX_op_muls2_i64, { "r", "r", "r", "r" } },
- { INDEX_op_mulu2_i64, { "r", "r", "r", "r" } },
+ { INDEX_op_mulsh_i64, { "r", "r", "r" } },
+ { INDEX_op_muluh_i64, { "r", "r", "r" } },
{ -1 },
};
diff --git a/tcg/ppc64/tcg-target.h b/tcg/ppc64/tcg-target.h
index 48fc6e2e54..fa4b9da093 100644
--- a/tcg/ppc64/tcg-target.h
+++ b/tcg/ppc64/tcg-target.h
@@ -95,6 +95,8 @@ typedef enum {
#define TCG_TARGET_HAS_sub2_i32 0
#define TCG_TARGET_HAS_mulu2_i32 0
#define TCG_TARGET_HAS_muls2_i32 0
+#define TCG_TARGET_HAS_muluh_i32 0
+#define TCG_TARGET_HAS_mulsh_i32 0
#define TCG_TARGET_HAS_div_i64 1
#define TCG_TARGET_HAS_rem_i64 0
@@ -116,8 +118,10 @@ typedef enum {
#define TCG_TARGET_HAS_movcond_i64 1
#define TCG_TARGET_HAS_add2_i64 1
#define TCG_TARGET_HAS_sub2_i64 1
-#define TCG_TARGET_HAS_mulu2_i64 1
-#define TCG_TARGET_HAS_muls2_i64 1
+#define TCG_TARGET_HAS_mulu2_i64 0
+#define TCG_TARGET_HAS_muls2_i64 0
+#define TCG_TARGET_HAS_muluh_i64 1
+#define TCG_TARGET_HAS_mulsh_i64 1
#define TCG_AREG0 TCG_REG_R27
diff --git a/tcg/s390/tcg-target.c b/tcg/s390/tcg-target.c
index f229f1c346..1b44aeee96 100644
--- a/tcg/s390/tcg-target.c
+++ b/tcg/s390/tcg-target.c
@@ -315,9 +315,6 @@ static const uint8_t tcg_cond_to_ltr_cond[] = {
};
#ifdef CONFIG_SOFTMMU
-
-#include "exec/softmmu_defs.h"
-
/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
int mmu_idx) */
static const void * const qemu_ld_helpers[4] = {
@@ -351,10 +348,10 @@ static uint8_t *tb_ret_addr;
static uint64_t facilities;
static void patch_reloc(uint8_t *code_ptr, int type,
- tcg_target_long value, tcg_target_long addend)
+ intptr_t value, intptr_t addend)
{
- tcg_target_long code_ptr_tl = (tcg_target_long)code_ptr;
- tcg_target_long pcrel2;
+ intptr_t code_ptr_tl = (intptr_t)code_ptr;
+ intptr_t pcrel2;
/* ??? Not the usual definition of "addend". */
pcrel2 = (value - (code_ptr_tl + addend)) >> 1;
@@ -771,7 +768,7 @@ static void tcg_out_mem(TCGContext *s, S390Opcode opc_rx, S390Opcode opc_rxy,
/* load data without address translation or endianness conversion */
static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg data,
- TCGReg base, tcg_target_long ofs)
+ TCGReg base, intptr_t ofs)
{
if (type == TCG_TYPE_I32) {
tcg_out_mem(s, RX_L, RXY_LY, data, base, TCG_REG_NONE, ofs);
@@ -781,7 +778,7 @@ static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg data,
}
static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg data,
- TCGReg base, tcg_target_long ofs)
+ TCGReg base, intptr_t ofs)
{
if (type == TCG_TYPE_I32) {
tcg_out_mem(s, RX_ST, RXY_STY, data, base, TCG_REG_NONE, ofs);
diff --git a/tcg/s390/tcg-target.h b/tcg/s390/tcg-target.h
index 42ca36c0e9..6142fb26a2 100644
--- a/tcg/s390/tcg-target.h
+++ b/tcg/s390/tcg-target.h
@@ -69,6 +69,8 @@ typedef enum TCGReg {
#define TCG_TARGET_HAS_sub2_i32 1
#define TCG_TARGET_HAS_mulu2_i32 0
#define TCG_TARGET_HAS_muls2_i32 0
+#define TCG_TARGET_HAS_muluh_i32 0
+#define TCG_TARGET_HAS_mulsh_i32 0
#define TCG_TARGET_HAS_div2_i64 1
#define TCG_TARGET_HAS_rot_i64 1
@@ -94,6 +96,8 @@ typedef enum TCGReg {
#define TCG_TARGET_HAS_sub2_i64 1
#define TCG_TARGET_HAS_mulu2_i64 1
#define TCG_TARGET_HAS_muls2_i64 0
+#define TCG_TARGET_HAS_muluh_i64 0
+#define TCG_TARGET_HAS_mulsh_i64 0
extern bool tcg_target_deposit_valid(int ofs, int len);
#define TCG_TARGET_deposit_i32_valid tcg_target_deposit_valid
@@ -110,8 +114,7 @@ enum {
TCG_AREG0 = TCG_REG_R10,
};
-static inline void flush_icache_range(tcg_target_ulong start,
- tcg_target_ulong stop)
+static inline void flush_icache_range(uintptr_t start, uintptr_t stop)
{
}
diff --git a/tcg/sparc/tcg-target.c b/tcg/sparc/tcg-target.c
index 5bfd29c3b4..9574954ac4 100644
--- a/tcg/sparc/tcg-target.c
+++ b/tcg/sparc/tcg-target.c
@@ -252,7 +252,7 @@ static inline int check_fit_i32(uint32_t val, unsigned int bits)
}
static void patch_reloc(uint8_t *code_ptr, int type,
- tcg_target_long value, tcg_target_long addend)
+ intptr_t value, intptr_t addend)
{
uint32_t insn;
value += addend;
@@ -264,7 +264,7 @@ static void patch_reloc(uint8_t *code_ptr, int type,
*(uint32_t *)code_ptr = value;
break;
case R_SPARC_WDISP16:
- value -= (long)code_ptr;
+ value -= (intptr_t)code_ptr;
if (!check_fit_tl(value >> 2, 16)) {
tcg_abort();
}
@@ -274,7 +274,7 @@ static void patch_reloc(uint8_t *code_ptr, int type,
*(uint32_t *)code_ptr = insn;
break;
case R_SPARC_WDISP19:
- value -= (long)code_ptr;
+ value -= (intptr_t)code_ptr;
if (!check_fit_tl(value >> 2, 19)) {
tcg_abort();
}
@@ -436,13 +436,13 @@ static inline void tcg_out_ldst(TCGContext *s, int ret, int addr,
}
static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
- TCGReg arg1, tcg_target_long arg2)
+ TCGReg arg1, intptr_t arg2)
{
tcg_out_ldst(s, ret, arg1, arg2, (type == TCG_TYPE_I32 ? LDUW : LDX));
}
static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
- TCGReg arg1, tcg_target_long arg2)
+ TCGReg arg1, intptr_t arg2)
{
tcg_out_ldst(s, arg, arg1, arg2, (type == TCG_TYPE_I32 ? STW : STX));
}
@@ -831,8 +831,6 @@ static void tcg_target_qemu_prologue(TCGContext *s)
#if defined(CONFIG_SOFTMMU)
-#include "exec/softmmu_defs.h"
-
/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
int mmu_idx) */
static const void * const qemu_ld_helpers[4] = {
diff --git a/tcg/sparc/tcg-target.h b/tcg/sparc/tcg-target.h
index dab52d7176..2edf858733 100644
--- a/tcg/sparc/tcg-target.h
+++ b/tcg/sparc/tcg-target.h
@@ -24,6 +24,14 @@
#ifndef TCG_TARGET_SPARC
#define TCG_TARGET_SPARC 1
+#if UINTPTR_MAX == UINT32_MAX
+# define TCG_TARGET_REG_BITS 32
+#elif UINTPTR_MAX == UINT64_MAX
+# define TCG_TARGET_REG_BITS 64
+#else
+# error Unknown pointer size for tcg target
+#endif
+
#define TCG_TARGET_WORDS_BIGENDIAN
#define TCG_TARGET_NB_REGS 32
@@ -107,6 +115,8 @@ typedef enum {
#define TCG_TARGET_HAS_sub2_i32 1
#define TCG_TARGET_HAS_mulu2_i32 1
#define TCG_TARGET_HAS_muls2_i32 0
+#define TCG_TARGET_HAS_muluh_i32 0
+#define TCG_TARGET_HAS_mulsh_i32 0
#if TCG_TARGET_REG_BITS == 64
#define TCG_TARGET_HAS_div_i64 1
@@ -134,20 +144,18 @@ typedef enum {
#define TCG_TARGET_HAS_sub2_i64 0
#define TCG_TARGET_HAS_mulu2_i64 0
#define TCG_TARGET_HAS_muls2_i64 0
+#define TCG_TARGET_HAS_muluh_i64 0
+#define TCG_TARGET_HAS_mulsh_i64 0
#endif
#define TCG_AREG0 TCG_REG_I0
-static inline void flush_icache_range(tcg_target_ulong start,
- tcg_target_ulong stop)
+static inline void flush_icache_range(uintptr_t start, uintptr_t stop)
{
- unsigned long p;
-
- p = start & ~(8UL - 1UL);
- stop = (stop + (8UL - 1UL)) & ~(8UL - 1UL);
-
- for (; p < stop; p += 8)
+ uintptr_t p;
+ for (p = start & -8; p < (stop + 7) & -8; p += 8) {
__asm__ __volatile__("flush\t%0" : : "r" (p));
+ }
}
#endif
diff --git a/tcg/tcg-op.h b/tcg/tcg-op.h
index 364964d8d4..bb30a7cf39 100644
--- a/tcg/tcg-op.h
+++ b/tcg/tcg-op.h
@@ -1039,10 +1039,18 @@ static inline void tcg_gen_mul_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
t0 = tcg_temp_new_i64();
t1 = tcg_temp_new_i32();
- tcg_gen_op4_i32(INDEX_op_mulu2_i32, TCGV_LOW(t0), TCGV_HIGH(t0),
- TCGV_LOW(arg1), TCGV_LOW(arg2));
- /* Allow the optimizer room to replace mulu2 with two moves. */
- tcg_gen_op0(INDEX_op_nop);
+ if (TCG_TARGET_HAS_mulu2_i32) {
+ tcg_gen_op4_i32(INDEX_op_mulu2_i32, TCGV_LOW(t0), TCGV_HIGH(t0),
+ TCGV_LOW(arg1), TCGV_LOW(arg2));
+ /* Allow the optimizer room to replace mulu2 with two moves. */
+ tcg_gen_op0(INDEX_op_nop);
+ } else {
+ tcg_debug_assert(TCG_TARGET_HAS_muluh_i32);
+ tcg_gen_op3_i32(INDEX_op_mul_i32, TCGV_LOW(t0),
+ TCGV_LOW(arg1), TCGV_LOW(arg2));
+ tcg_gen_op3_i32(INDEX_op_muluh_i32, TCGV_HIGH(t0),
+ TCGV_LOW(arg1), TCGV_LOW(arg2));
+ }
tcg_gen_mul_i32(t1, TCGV_LOW(arg1), TCGV_HIGH(arg2));
tcg_gen_add_i32(TCGV_HIGH(t0), TCGV_HIGH(t0), t1);
@@ -2401,6 +2409,12 @@ static inline void tcg_gen_mulu2_i32(TCGv_i32 rl, TCGv_i32 rh,
tcg_gen_op4_i32(INDEX_op_mulu2_i32, rl, rh, arg1, arg2);
/* Allow the optimizer room to replace mulu2 with two moves. */
tcg_gen_op0(INDEX_op_nop);
+ } else if (TCG_TARGET_HAS_muluh_i32) {
+ TCGv_i32 t = tcg_temp_new_i32();
+ tcg_gen_op3_i32(INDEX_op_mul_i32, t, arg1, arg2);
+ tcg_gen_op3_i32(INDEX_op_muluh_i32, rh, arg1, arg2);
+ tcg_gen_mov_i32(rl, t);
+ tcg_temp_free_i32(t);
} else {
TCGv_i64 t0 = tcg_temp_new_i64();
TCGv_i64 t1 = tcg_temp_new_i64();
@@ -2420,6 +2434,12 @@ static inline void tcg_gen_muls2_i32(TCGv_i32 rl, TCGv_i32 rh,
tcg_gen_op4_i32(INDEX_op_muls2_i32, rl, rh, arg1, arg2);
/* Allow the optimizer room to replace muls2 with two moves. */
tcg_gen_op0(INDEX_op_nop);
+ } else if (TCG_TARGET_HAS_mulsh_i32) {
+ TCGv_i32 t = tcg_temp_new_i32();
+ tcg_gen_op3_i32(INDEX_op_mul_i32, t, arg1, arg2);
+ tcg_gen_op3_i32(INDEX_op_mulsh_i32, rh, arg1, arg2);
+ tcg_gen_mov_i32(rl, t);
+ tcg_temp_free_i32(t);
} else if (TCG_TARGET_REG_BITS == 32 && TCG_TARGET_HAS_mulu2_i32) {
TCGv_i32 t0 = tcg_temp_new_i32();
TCGv_i32 t1 = tcg_temp_new_i32();
@@ -2499,6 +2519,12 @@ static inline void tcg_gen_mulu2_i64(TCGv_i64 rl, TCGv_i64 rh,
tcg_gen_op4_i64(INDEX_op_mulu2_i64, rl, rh, arg1, arg2);
/* Allow the optimizer room to replace mulu2 with two moves. */
tcg_gen_op0(INDEX_op_nop);
+ } else if (TCG_TARGET_HAS_muluh_i64) {
+ TCGv_i64 t = tcg_temp_new_i64();
+ tcg_gen_op3_i64(INDEX_op_mul_i64, t, arg1, arg2);
+ tcg_gen_op3_i64(INDEX_op_muluh_i64, rh, arg1, arg2);
+ tcg_gen_mov_i64(rl, t);
+ tcg_temp_free_i64(t);
} else if (TCG_TARGET_HAS_mulu2_i64) {
TCGv_i64 t0 = tcg_temp_new_i64();
TCGv_i64 t1 = tcg_temp_new_i64();
@@ -2540,6 +2566,12 @@ static inline void tcg_gen_muls2_i64(TCGv_i64 rl, TCGv_i64 rh,
tcg_gen_op4_i64(INDEX_op_muls2_i64, rl, rh, arg1, arg2);
/* Allow the optimizer room to replace muls2 with two moves. */
tcg_gen_op0(INDEX_op_nop);
+ } else if (TCG_TARGET_HAS_mulsh_i64) {
+ TCGv_i64 t = tcg_temp_new_i64();
+ tcg_gen_op3_i64(INDEX_op_mul_i64, t, arg1, arg2);
+ tcg_gen_op3_i64(INDEX_op_mulsh_i64, rh, arg1, arg2);
+ tcg_gen_mov_i64(rl, t);
+ tcg_temp_free_i64(t);
} else {
TCGv_i64 t0 = tcg_temp_new_i64();
int sizemask = 0;
@@ -2599,7 +2631,7 @@ static inline void tcg_gen_debug_insn_start(uint64_t pc)
#endif
}
-static inline void tcg_gen_exit_tb(tcg_target_long val)
+static inline void tcg_gen_exit_tb(uintptr_t val)
{
tcg_gen_op1i(INDEX_op_exit_tb, val);
}
diff --git a/tcg/tcg-opc.h b/tcg/tcg-opc.h
index a8af5b96a4..a75c29d518 100644
--- a/tcg/tcg-opc.h
+++ b/tcg/tcg-opc.h
@@ -91,6 +91,8 @@ DEF(add2_i32, 2, 4, 0, IMPL(TCG_TARGET_HAS_add2_i32))
DEF(sub2_i32, 2, 4, 0, IMPL(TCG_TARGET_HAS_sub2_i32))
DEF(mulu2_i32, 2, 2, 0, IMPL(TCG_TARGET_HAS_mulu2_i32))
DEF(muls2_i32, 2, 2, 0, IMPL(TCG_TARGET_HAS_muls2_i32))
+DEF(muluh_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_muluh_i32))
+DEF(mulsh_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_mulsh_i32))
DEF(brcond2_i32, 0, 4, 2, TCG_OPF_BB_END | IMPL(TCG_TARGET_REG_BITS == 32))
DEF(setcond2_i32, 1, 4, 1, IMPL(TCG_TARGET_REG_BITS == 32))
@@ -167,6 +169,8 @@ DEF(add2_i64, 2, 4, 0, IMPL64 | IMPL(TCG_TARGET_HAS_add2_i64))
DEF(sub2_i64, 2, 4, 0, IMPL64 | IMPL(TCG_TARGET_HAS_sub2_i64))
DEF(mulu2_i64, 2, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_mulu2_i64))
DEF(muls2_i64, 2, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_muls2_i64))
+DEF(muluh_i64, 1, 2, 0, IMPL(TCG_TARGET_HAS_muluh_i64))
+DEF(mulsh_i64, 1, 2, 0, IMPL(TCG_TARGET_HAS_mulsh_i64))
/* QEMU specific */
#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
diff --git a/tcg/tcg.c b/tcg/tcg.c
index 19bd5a39bf..fd7fb6b85e 100644
--- a/tcg/tcg.c
+++ b/tcg/tcg.c
@@ -49,10 +49,10 @@
#include "tcg-op.h"
-#if TCG_TARGET_REG_BITS == 64
-# define ELF_CLASS ELFCLASS64
-#else
+#if UINTPTR_MAX == UINT32_MAX
# define ELF_CLASS ELFCLASS32
+#else
+# define ELF_CLASS ELFCLASS64
#endif
#ifdef HOST_WORDS_BIGENDIAN
# define ELF_DATA ELFDATA2MSB
@@ -66,7 +66,7 @@
static void tcg_target_init(TCGContext *s);
static void tcg_target_qemu_prologue(TCGContext *s);
static void patch_reloc(uint8_t *code_ptr, int type,
- tcg_target_long value, tcg_target_long addend);
+ intptr_t value, intptr_t addend);
/* The CIE and FDE header definitions will be common to all hosts. */
typedef struct {
@@ -82,8 +82,8 @@ typedef struct {
typedef struct QEMU_PACKED {
uint32_t len __attribute__((aligned((sizeof(void *)))));
uint32_t cie_offset;
- tcg_target_long func_start;
- tcg_target_long func_len;
+ uintptr_t func_start;
+ uintptr_t func_len;
} DebugFrameFDEHeader;
static void tcg_register_jit_int(void *buf, size_t size,
@@ -93,14 +93,14 @@ static void tcg_register_jit_int(void *buf, size_t size,
/* Forward declarations for functions declared and used in tcg-target.c. */
static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str);
static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg1,
- tcg_target_long arg2);
+ intptr_t arg2);
static void tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg);
static void tcg_out_movi(TCGContext *s, TCGType type,
TCGReg ret, tcg_target_long arg);
static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
const int *const_args);
static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1,
- tcg_target_long arg2);
+ intptr_t arg2);
static int tcg_target_const_match(tcg_target_long val,
const TCGArgConstraint *arg_ct);
@@ -143,7 +143,7 @@ static inline void tcg_out64(TCGContext *s, uint64_t v)
/* label relocation processing */
static void tcg_out_reloc(TCGContext *s, uint8_t *code_ptr, int type,
- int label_index, long addend)
+ int label_index, intptr_t addend)
{
TCGLabel *l;
TCGRelocation *r;
@@ -169,11 +169,12 @@ static void tcg_out_label(TCGContext *s, int label_index, void *ptr)
{
TCGLabel *l;
TCGRelocation *r;
- tcg_target_long value = (tcg_target_long)ptr;
+ intptr_t value = (intptr_t)ptr;
l = &s->labels[label_index];
- if (l->has_value)
+ if (l->has_value) {
tcg_abort();
+ }
r = l->u.first_reloc;
while (r != NULL) {
patch_reloc(r->ptr, r->type, value, r->addend);
@@ -293,8 +294,7 @@ void tcg_prologue_init(TCGContext *s)
s->code_buf = s->code_gen_prologue;
s->code_ptr = s->code_buf;
tcg_target_qemu_prologue(s);
- flush_icache_range((tcg_target_ulong)s->code_buf,
- (tcg_target_ulong)s->code_ptr);
+ flush_icache_range((uintptr_t)s->code_buf, (uintptr_t)s->code_ptr);
#ifdef DEBUG_DISAS
if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM)) {
@@ -307,8 +307,7 @@ void tcg_prologue_init(TCGContext *s)
#endif
}
-void tcg_set_frame(TCGContext *s, int reg,
- tcg_target_long start, tcg_target_long size)
+void tcg_set_frame(TCGContext *s, int reg, intptr_t start, intptr_t size)
{
s->frame_start = start;
s->frame_end = start + size;
@@ -391,7 +390,7 @@ TCGv_i64 tcg_global_reg_new_i64(int reg, const char *name)
}
static inline int tcg_global_mem_new_internal(TCGType type, int reg,
- tcg_target_long offset,
+ intptr_t offset,
const char *name)
{
TCGContext *s = &tcg_ctx;
@@ -451,21 +450,15 @@ static inline int tcg_global_mem_new_internal(TCGType type, int reg,
return idx;
}
-TCGv_i32 tcg_global_mem_new_i32(int reg, tcg_target_long offset,
- const char *name)
+TCGv_i32 tcg_global_mem_new_i32(int reg, intptr_t offset, const char *name)
{
- int idx;
-
- idx = tcg_global_mem_new_internal(TCG_TYPE_I32, reg, offset, name);
+ int idx = tcg_global_mem_new_internal(TCG_TYPE_I32, reg, offset, name);
return MAKE_TCGV_I32(idx);
}
-TCGv_i64 tcg_global_mem_new_i64(int reg, tcg_target_long offset,
- const char *name)
+TCGv_i64 tcg_global_mem_new_i64(int reg, intptr_t offset, const char *name)
{
- int idx;
-
- idx = tcg_global_mem_new_internal(TCG_TYPE_I64, reg, offset, name);
+ int idx = tcg_global_mem_new_internal(TCG_TYPE_I64, reg, offset, name);
return MAKE_TCGV_I64(idx);
}
@@ -641,7 +634,7 @@ void tcg_register_helper(void *func, const char *name)
s->helpers = realloc(s->helpers, n * sizeof(TCGHelperInfo));
s->allocated_helpers = n;
}
- s->helpers[s->nb_helpers].func = (tcg_target_ulong)func;
+ s->helpers[s->nb_helpers].func = (uintptr_t)func;
s->helpers[s->nb_helpers].name = name;
s->nb_helpers++;
}
@@ -871,11 +864,11 @@ static int helper_cmp(const void *p1, const void *p2)
}
/* find helper definition (Note: A hash table would be better) */
-static TCGHelperInfo *tcg_find_helper(TCGContext *s, tcg_target_ulong val)
+static TCGHelperInfo *tcg_find_helper(TCGContext *s, uintptr_t val)
{
int m, m_min, m_max;
TCGHelperInfo *th;
- tcg_target_ulong v;
+ uintptr_t v;
if (unlikely(!s->helpers_sorted)) {
qsort(s->helpers, s->nb_helpers, sizeof(TCGHelperInfo),
@@ -1252,12 +1245,13 @@ static inline void tcg_la_bb_end(TCGContext *s, uint8_t *dead_temps,
static void tcg_liveness_analysis(TCGContext *s)
{
int i, op_index, nb_args, nb_iargs, nb_oargs, arg, nb_ops;
- TCGOpcode op, op_new;
+ TCGOpcode op, op_new, op_new2;
TCGArg *args;
const TCGOpDef *def;
uint8_t *dead_temps, *mem_temps;
uint16_t dead_args;
uint8_t sync_args;
+ bool have_op_new2;
s->gen_opc_ptr++; /* skip end */
@@ -1394,29 +1388,52 @@ static void tcg_liveness_analysis(TCGContext *s)
goto do_not_remove;
case INDEX_op_mulu2_i32:
+ op_new = INDEX_op_mul_i32;
+ op_new2 = INDEX_op_muluh_i32;
+ have_op_new2 = TCG_TARGET_HAS_muluh_i32;
+ goto do_mul2;
case INDEX_op_muls2_i32:
op_new = INDEX_op_mul_i32;
+ op_new2 = INDEX_op_mulsh_i32;
+ have_op_new2 = TCG_TARGET_HAS_mulsh_i32;
goto do_mul2;
case INDEX_op_mulu2_i64:
+ op_new = INDEX_op_mul_i64;
+ op_new2 = INDEX_op_muluh_i64;
+ have_op_new2 = TCG_TARGET_HAS_muluh_i64;
+ goto do_mul2;
case INDEX_op_muls2_i64:
op_new = INDEX_op_mul_i64;
+ op_new2 = INDEX_op_mulsh_i64;
+ have_op_new2 = TCG_TARGET_HAS_mulsh_i64;
+ goto do_mul2;
do_mul2:
args -= 4;
nb_iargs = 2;
nb_oargs = 2;
- /* Likewise, test for the high part of the operation dead. */
if (dead_temps[args[1]] && !mem_temps[args[1]]) {
if (dead_temps[args[0]] && !mem_temps[args[0]]) {
+ /* Both parts of the operation are dead. */
goto do_remove;
}
+ /* The high part of the operation is dead; generate the low. */
s->gen_opc_buf[op_index] = op = op_new;
args[1] = args[2];
args[2] = args[3];
- assert(s->gen_opc_buf[op_index + 1] == INDEX_op_nop);
- tcg_set_nop(s, s->gen_opc_buf + op_index + 1, args + 3, 1);
- /* Fall through and mark the single-word operation live. */
- nb_oargs = 1;
+ } else if (have_op_new2 && dead_temps[args[0]]
+ && !mem_temps[args[0]]) {
+ /* The low part of the operation is dead; generate the high. */
+ s->gen_opc_buf[op_index] = op = op_new2;
+ args[0] = args[1];
+ args[1] = args[2];
+ args[2] = args[3];
+ } else {
+ goto do_not_remove;
}
+ assert(s->gen_opc_buf[op_index + 1] == INDEX_op_nop);
+ tcg_set_nop(s, s->gen_opc_buf + op_index + 1, args + 3, 1);
+ /* Mark the single-word operation live. */
+ nb_oargs = 1;
goto do_not_remove;
default:
@@ -1590,7 +1607,7 @@ static void temp_allocate_frame(TCGContext *s, int temp)
ts->mem_offset = s->current_frame_offset;
ts->mem_reg = s->frame_reg;
ts->mem_allocated = 1;
- s->current_frame_offset += (tcg_target_long)sizeof(tcg_target_long);
+ s->current_frame_offset += sizeof(tcg_target_long);
}
/* sync register 'reg' by saving it to the corresponding temporary */
@@ -2053,7 +2070,9 @@ static int tcg_reg_alloc_call(TCGContext *s, const TCGOpDef *def,
int nb_iargs, nb_oargs, flags, nb_regs, i, reg, nb_params;
TCGArg arg, func_arg;
TCGTemp *ts;
- tcg_target_long stack_offset, call_stack_size, func_addr;
+ intptr_t stack_offset;
+ size_t call_stack_size;
+ uintptr_t func_addr;
int const_func_arg, allocate_args;
TCGRegSet allocated_regs;
const TCGArgConstraint *arg_ct;
@@ -2391,8 +2410,7 @@ int tcg_gen_code(TCGContext *s, uint8_t *gen_code_buf)
tcg_gen_code_common(s, gen_code_buf, -1);
/* flush instruction cache */
- flush_icache_range((tcg_target_ulong)gen_code_buf,
- (tcg_target_ulong)s->code_ptr);
+ flush_icache_range((uintptr_t)gen_code_buf, (uintptr_t)s->code_ptr);
return s->code_ptr - gen_code_buf;
}
diff --git a/tcg/tcg.h b/tcg/tcg.h
index f3f9889694..902c751d26 100644
--- a/tcg/tcg.h
+++ b/tcg/tcg.h
@@ -21,15 +21,23 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
+
+#ifndef TCG_H
+#define TCG_H
+
#include "qemu-common.h"
-/* Target word size (must be identical to pointer size). */
-#if UINTPTR_MAX == UINT32_MAX
-# define TCG_TARGET_REG_BITS 32
-#elif UINTPTR_MAX == UINT64_MAX
-# define TCG_TARGET_REG_BITS 64
-#else
-# error Unknown pointer size for tcg target
+#include "tcg-target.h"
+
+/* Default target word size to pointer size. */
+#ifndef TCG_TARGET_REG_BITS
+# if UINTPTR_MAX == UINT32_MAX
+# define TCG_TARGET_REG_BITS 32
+# elif UINTPTR_MAX == UINT64_MAX
+# define TCG_TARGET_REG_BITS 64
+# else
+# error Unknown pointer size for tcg target
+# endif
#endif
#if TCG_TARGET_REG_BITS == 32
@@ -46,7 +54,6 @@ typedef uint64_t tcg_target_ulong;
#error unsupported
#endif
-#include "tcg-target.h"
#include "tcg-runtime.h"
#if TCG_TARGET_NB_REGS <= 32
@@ -85,6 +92,8 @@ typedef uint64_t TCGRegSet;
#define TCG_TARGET_HAS_sub2_i64 0
#define TCG_TARGET_HAS_mulu2_i64 0
#define TCG_TARGET_HAS_muls2_i64 0
+#define TCG_TARGET_HAS_muluh_i64 0
+#define TCG_TARGET_HAS_mulsh_i64 0
/* Turn some undef macros into true macros. */
#define TCG_TARGET_HAS_add2_i32 1
#define TCG_TARGET_HAS_sub2_i32 1
@@ -134,13 +143,13 @@ typedef struct TCGRelocation {
struct TCGRelocation *next;
int type;
uint8_t *ptr;
- tcg_target_long addend;
+ intptr_t addend;
} TCGRelocation;
typedef struct TCGLabel {
int has_value;
union {
- tcg_target_ulong value;
+ uintptr_t value;
TCGRelocation *first_reloc;
} u;
} TCGLabel;
@@ -173,9 +182,12 @@ typedef enum TCGType {
TCG_TYPE_REG = TCG_TYPE_I64,
#endif
- /* An alias for the size of the native pointer. We don't currently
- support any hosts with 64-bit registers and 32-bit pointers. */
- TCG_TYPE_PTR = TCG_TYPE_REG,
+ /* An alias for the size of the native pointer. */
+#if UINTPTR_MAX == UINT32_MAX
+ TCG_TYPE_PTR = TCG_TYPE_I32,
+#else
+ TCG_TYPE_PTR = TCG_TYPE_I64,
+#endif
/* An alias for the size of the target "long", aka register. */
#if TARGET_LONG_BITS == 64
@@ -380,7 +392,7 @@ typedef struct TCGTemp {
int reg;
tcg_target_long val;
int mem_reg;
- tcg_target_long mem_offset;
+ intptr_t mem_offset;
unsigned int fixed_reg:1;
unsigned int mem_coherent:1;
unsigned int mem_allocated:1;
@@ -394,7 +406,7 @@ typedef struct TCGTemp {
} TCGTemp;
typedef struct TCGHelperInfo {
- tcg_target_ulong func;
+ uintptr_t func;
const char *name;
} TCGHelperInfo;
@@ -427,9 +439,9 @@ struct TCGContext {
into account fixed registers */
int reg_to_temp[TCG_TARGET_NB_REGS];
TCGRegSet reserved_regs;
- tcg_target_long current_frame_offset;
- tcg_target_long frame_start;
- tcg_target_long frame_end;
+ intptr_t current_frame_offset;
+ intptr_t frame_start;
+ intptr_t frame_end;
int frame_reg;
uint8_t *code_ptr;
@@ -522,12 +534,10 @@ void tcg_func_start(TCGContext *s);
int tcg_gen_code(TCGContext *s, uint8_t *gen_code_buf);
int tcg_gen_code_search_pc(TCGContext *s, uint8_t *gen_code_buf, long offset);
-void tcg_set_frame(TCGContext *s, int reg,
- tcg_target_long start, tcg_target_long size);
+void tcg_set_frame(TCGContext *s, int reg, intptr_t start, intptr_t size);
TCGv_i32 tcg_global_reg_new_i32(int reg, const char *name);
-TCGv_i32 tcg_global_mem_new_i32(int reg, tcg_target_long offset,
- const char *name);
+TCGv_i32 tcg_global_mem_new_i32(int reg, intptr_t offset, const char *name);
TCGv_i32 tcg_temp_new_internal_i32(int temp_local);
static inline TCGv_i32 tcg_temp_new_i32(void)
{
@@ -541,8 +551,7 @@ void tcg_temp_free_i32(TCGv_i32 arg);
char *tcg_get_arg_str_i32(TCGContext *s, char *buf, int buf_size, TCGv_i32 arg);
TCGv_i64 tcg_global_reg_new_i64(int reg, const char *name);
-TCGv_i64 tcg_global_mem_new_i64(int reg, tcg_target_long offset,
- const char *name);
+TCGv_i64 tcg_global_mem_new_i64(int reg, intptr_t offset, const char *name);
TCGv_i64 tcg_temp_new_internal_i64(int temp_local);
static inline TCGv_i64 tcg_temp_new_i64(void)
{
@@ -637,11 +646,11 @@ do {\
void tcg_add_target_add_op_defs(const TCGTargetOpDef *tdefs);
-#if TCG_TARGET_REG_BITS == 32
+#if UINTPTR_MAX == UINT32_MAX
#define TCGV_NAT_TO_PTR(n) MAKE_TCGV_PTR(GET_TCGV_I32(n))
#define TCGV_PTR_TO_NAT(n) MAKE_TCGV_I32(GET_TCGV_PTR(n))
-#define tcg_const_ptr(V) TCGV_NAT_TO_PTR(tcg_const_i32((tcg_target_long)(V)))
+#define tcg_const_ptr(V) TCGV_NAT_TO_PTR(tcg_const_i32((intptr_t)(V)))
#define tcg_global_reg_new_ptr(R, N) \
TCGV_NAT_TO_PTR(tcg_global_reg_new_i32((R), (N)))
#define tcg_global_mem_new_ptr(R, O, N) \
@@ -652,7 +661,7 @@ void tcg_add_target_add_op_defs(const TCGTargetOpDef *tdefs);
#define TCGV_NAT_TO_PTR(n) MAKE_TCGV_PTR(GET_TCGV_I64(n))
#define TCGV_PTR_TO_NAT(n) MAKE_TCGV_I64(GET_TCGV_PTR(n))
-#define tcg_const_ptr(V) TCGV_NAT_TO_PTR(tcg_const_i64((tcg_target_long)(V)))
+#define tcg_const_ptr(V) TCGV_NAT_TO_PTR(tcg_const_i64((intptr_t)(V)))
#define tcg_global_reg_new_ptr(R, N) \
TCGV_NAT_TO_PTR(tcg_global_reg_new_i64((R), (N)))
#define tcg_global_mem_new_ptr(R, O, N) \
@@ -731,8 +740,7 @@ TCGv_i64 tcg_const_local_i64(int64_t val);
#if !defined(tcg_qemu_tb_exec)
# define tcg_qemu_tb_exec(env, tb_ptr) \
- ((tcg_target_ulong (*)(void *, void *))tcg_ctx.code_gen_prologue)(env, \
- tb_ptr)
+ ((uintptr_t (*)(void *, void *))tcg_ctx.code_gen_prologue)(env, tb_ptr)
#endif
void tcg_register_jit(void *buf, size_t buf_size);
@@ -741,3 +749,51 @@ void tcg_register_jit(void *buf, size_t buf_size);
/* Generate TB finalization at the end of block */
void tcg_out_tb_finalize(TCGContext *s);
#endif
+
+/*
+ * Memory helpers that will be used by TCG generated code.
+ */
+#ifdef CONFIG_SOFTMMU
+/* Value zero-extended to tcg register size. */
+tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr,
+ int mmu_idx, uintptr_t retaddr);
+tcg_target_ulong helper_ret_lduw_mmu(CPUArchState *env, target_ulong addr,
+ int mmu_idx, uintptr_t retaddr);
+tcg_target_ulong helper_ret_ldul_mmu(CPUArchState *env, target_ulong addr,
+ int mmu_idx, uintptr_t retaddr);
+uint64_t helper_ret_ldq_mmu(CPUArchState *env, target_ulong addr,
+ int mmu_idx, uintptr_t retaddr);
+
+/* Value sign-extended to tcg register size. */
+tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr,
+ int mmu_idx, uintptr_t retaddr);
+tcg_target_ulong helper_ret_ldsw_mmu(CPUArchState *env, target_ulong addr,
+ int mmu_idx, uintptr_t retaddr);
+tcg_target_ulong helper_ret_ldsl_mmu(CPUArchState *env, target_ulong addr,
+ int mmu_idx, uintptr_t retaddr);
+
+void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
+ int mmu_idx, uintptr_t retaddr);
+void helper_ret_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
+ int mmu_idx, uintptr_t retaddr);
+void helper_ret_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
+ int mmu_idx, uintptr_t retaddr);
+void helper_ret_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
+ int mmu_idx, uintptr_t retaddr);
+
+uint8_t helper_ldb_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
+uint16_t helper_ldw_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
+uint32_t helper_ldl_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
+uint64_t helper_ldq_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
+
+void helper_stb_mmu(CPUArchState *env, target_ulong addr,
+ uint8_t val, int mmu_idx);
+void helper_stw_mmu(CPUArchState *env, target_ulong addr,
+ uint16_t val, int mmu_idx);
+void helper_stl_mmu(CPUArchState *env, target_ulong addr,
+ uint32_t val, int mmu_idx);
+void helper_stq_mmu(CPUArchState *env, target_ulong addr,
+ uint64_t val, int mmu_idx);
+#endif /* CONFIG_SOFTMMU */
+
+#endif /* TCG_H */
diff --git a/tcg/tci/tcg-target.c b/tcg/tci/tcg-target.c
index e118bc7179..281d7d50f3 100644
--- a/tcg/tci/tcg-target.c
+++ b/tcg/tci/tcg-target.c
@@ -370,7 +370,7 @@ static const char *const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
#endif
static void patch_reloc(uint8_t *code_ptr, int type,
- tcg_target_long value, tcg_target_long addend)
+ intptr_t value, intptr_t addend)
{
/* tcg_out_reloc always uses the same type, addend. */
assert(type == sizeof(tcg_target_long));
@@ -488,7 +488,7 @@ static void tci_out_label(TCGContext *s, TCGArg arg)
}
static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg1,
- tcg_target_long arg2)
+ intptr_t arg2)
{
uint8_t *old_code_ptr = s->code_ptr;
if (type == TCG_TYPE_I32) {
@@ -842,7 +842,7 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
}
static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1,
- tcg_target_long arg2)
+ intptr_t arg2)
{
uint8_t *old_code_ptr = s->code_ptr;
if (type == TCG_TYPE_I32) {
diff --git a/tcg/tci/tcg-target.h b/tcg/tci/tcg-target.h
index d7fc14eb17..c2ecfbe047 100644
--- a/tcg/tci/tcg-target.h
+++ b/tcg/tci/tcg-target.h
@@ -44,6 +44,14 @@
#define TCG_TARGET_INTERPRETER 1
+#if UINTPTR_MAX == UINT32_MAX
+# define TCG_TARGET_REG_BITS 32
+#elif UINTPTR_MAX == UINT64_MAX
+# define TCG_TARGET_REG_BITS 64
+#else
+# error Unknown pointer size for tci target
+#endif
+
#ifdef CONFIG_DEBUG_TCG
/* Enable debug output. */
#define CONFIG_DEBUG_TCG_INTERPRETER
@@ -76,6 +84,8 @@
#define TCG_TARGET_HAS_rot_i32 1
#define TCG_TARGET_HAS_movcond_i32 0
#define TCG_TARGET_HAS_muls2_i32 0
+#define TCG_TARGET_HAS_muluh_i32 0
+#define TCG_TARGET_HAS_mulsh_i32 0
#if TCG_TARGET_REG_BITS == 64
#define TCG_TARGET_HAS_bswap16_i64 1
@@ -100,13 +110,14 @@
#define TCG_TARGET_HAS_rot_i64 1
#define TCG_TARGET_HAS_movcond_i64 0
#define TCG_TARGET_HAS_muls2_i64 0
-
#define TCG_TARGET_HAS_add2_i32 0
#define TCG_TARGET_HAS_sub2_i32 0
#define TCG_TARGET_HAS_mulu2_i32 0
#define TCG_TARGET_HAS_add2_i64 0
#define TCG_TARGET_HAS_sub2_i64 0
#define TCG_TARGET_HAS_mulu2_i64 0
+#define TCG_TARGET_HAS_muluh_i64 0
+#define TCG_TARGET_HAS_mulsh_i64 0
#endif /* TCG_TARGET_REG_BITS == 64 */
/* Number of registers available.
@@ -166,11 +177,10 @@ typedef enum {
void tci_disas(uint8_t opc);
-tcg_target_ulong tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr);
+uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr);
#define tcg_qemu_tb_exec tcg_qemu_tb_exec
-static inline void flush_icache_range(tcg_target_ulong start,
- tcg_target_ulong stop)
+static inline void flush_icache_range(uintptr_t start, uintptr_t stop)
{
}
diff --git a/tci.c b/tci.c
index c742c8df5c..18c888e54d 100644
--- a/tci.c
+++ b/tci.c
@@ -434,11 +434,11 @@ static bool tci_compare64(uint64_t u0, uint64_t u1, TCGCond condition)
}
/* Interpret pseudo code in tb. */
-tcg_target_ulong tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr)
+uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr)
{
long tcg_temps[CPU_TEMP_BUF_NLONGS];
uintptr_t sp_value = (uintptr_t)(tcg_temps + CPU_TEMP_BUF_NLONGS);
- tcg_target_ulong next_tb = 0;
+ uintptr_t next_tb = 0;
tci_reg[TCG_AREG0] = (tcg_target_ulong)env;
tci_reg[TCG_REG_CALL_STACK] = sp_value;
diff --git a/trace-events b/trace-events
index 3856b5c206..aaad3560f4 100644
--- a/trace-events
+++ b/trace-events
@@ -1133,6 +1133,11 @@ xics_ics_write_xive(int nr, int srcno, int server, uint8_t priority) "ics_write_
xics_ics_reject(int nr, int srcno) "reject irq %#x [src %d]"
xics_ics_eoi(int nr) "ics_eoi: irq %#x"
+# hw/ppc/spapr_iommu.c
+spapr_iommu_put(uint64_t liobn, uint64_t ioba, uint64_t tce, uint64_t ret) "liobn=%"PRIx64" ioba=0x%"PRIx64" tce=0x%"PRIx64" ret=%"PRId64
+spapr_iommu_xlate(uint64_t liobn, uint64_t ioba, uint64_t tce, unsigned perm, unsigned pgsize) "liobn=%"PRIx64" 0x%"PRIx64" -> 0x%"PRIx64" perm=%u mask=%x"
+spapr_iommu_new_table(uint64_t liobn, void *tcet, void *table, int fd) "liobn=%"PRIx64" tcet=%p table=%p fd=%d"
+
# util/hbitmap.c
hbitmap_iter_skip_words(const void *hb, void *hbi, uint64_t pos, unsigned long cur) "hb %p hbi %p pos %"PRId64" cur 0x%lx"
hbitmap_reset(void *hb, uint64_t start, uint64_t count, uint64_t sbit, uint64_t ebit) "hb %p items %"PRIu64",%"PRIu64" bits %"PRIu64"..%"PRIu64