aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rwxr-xr-xconfigure6
-rw-r--r--cpu-exec.c6
-rw-r--r--cpus.c9
-rw-r--r--hw/arm/virt-acpi-build.c4
-rw-r--r--hw/arm/virt.c16
-rw-r--r--hw/core/machine.c37
-rw-r--r--hw/i386/acpi-build.c3
-rw-r--r--hw/i386/pc.c21
-rw-r--r--hw/intc/openpic.c22
-rw-r--r--hw/net/vmxnet3.c8
-rw-r--r--hw/nvram/spapr_nvram.c10
-rw-r--r--hw/ppc/pnv.c5
-rw-r--r--hw/ppc/spapr.c99
-rw-r--r--hw/ppc/spapr_cpu_core.c4
-rw-r--r--hw/ppc/spapr_drc.c573
-rw-r--r--hw/ppc/spapr_events.c12
-rw-r--r--hw/ppc/spapr_hcall.c5
-rw-r--r--hw/ppc/spapr_pci.c13
-rw-r--r--hw/ppc/spapr_rtas.c304
-rw-r--r--hw/s390x/s390-skeys.c9
-rw-r--r--hw/s390x/s390-virtio-ccw.c8
-rw-r--r--hw/xtensa/sim.c4
-rw-r--r--include/exec/exec-all.h2
-rw-r--r--include/exec/tb-hash.h12
-rw-r--r--include/hw/i386/pc.h42
-rw-r--r--include/hw/ppc/spapr.h14
-rw-r--r--include/hw/ppc/spapr_drc.h69
-rw-r--r--include/migration/vmstate.h8
-rw-r--r--include/qemu/atomic.h34
-rw-r--r--include/qom/cpu.h2
-rw-r--r--include/sysemu/cpus.h1
-rw-r--r--include/sysemu/hax.h1
-rw-r--r--include/sysemu/hw_accel.h10
-rw-r--r--include/sysemu/kvm.h1
-rw-r--r--include/sysemu/numa.h1
-rw-r--r--kvm-all.c10
-rw-r--r--migration/savevm.c18
-rw-r--r--monitor.c11
-rw-r--r--numa.c43
-rw-r--r--qapi/block-core.json3
-rw-r--r--qom/object_interfaces.c9
-rwxr-xr-xscripts/device-crash-test624
-rw-r--r--scripts/qemu.py17
-rw-r--r--slirp/slirp.c8
-rw-r--r--target/alpha/translate.c30
-rw-r--r--target/arm/cpu.c2
-rw-r--r--target/arm/cpu.h2
-rw-r--r--target/arm/translate-a64.c5
-rw-r--r--target/arm/translate.c21
-rw-r--r--target/arm/translate.h4
-rw-r--r--target/hppa/translate.c8
-rw-r--r--target/i386/cpu.c2
-rw-r--r--target/i386/cpu.h1
-rw-r--r--target/i386/hax-all.c10
-rw-r--r--target/i386/translate.c43
-rw-r--r--target/mips/translate.c4
-rw-r--r--target/nios2/translate.c2
-rw-r--r--target/ppc/cpu.h1
-rw-r--r--target/s390x/cpu.h7
-rw-r--r--target/s390x/cpu_models.c36
-rw-r--r--target/s390x/fpu_helper.c27
-rw-r--r--target/s390x/helper.c7
-rw-r--r--target/s390x/helper.h28
-rw-r--r--target/s390x/insn-data.def66
-rw-r--r--target/s390x/machine.c19
-rw-r--r--target/s390x/mem_helper.c1325
-rw-r--r--target/s390x/misc_helper.c4
-rw-r--r--target/s390x/mmu_helper.c4
-rw-r--r--target/s390x/translate.c543
-rw-r--r--target/xtensa/cpu.h1
-rw-r--r--target/xtensa/gdbstub.c13
-rw-r--r--target/xtensa/xtensa-semi.c91
-rw-r--r--tcg-runtime.c32
-rw-r--r--tcg/README8
-rw-r--r--tcg/aarch64/tcg-target.h1
-rw-r--r--tcg/aarch64/tcg-target.inc.c22
-rw-r--r--tcg/arm/tcg-target.h1
-rw-r--r--tcg/arm/tcg-target.inc.c54
-rw-r--r--tcg/i386/tcg-target.h1
-rw-r--r--tcg/i386/tcg-target.inc.c24
-rw-r--r--tcg/ia64/tcg-target.h1
-rw-r--r--tcg/mips/tcg-target.h1
-rw-r--r--tcg/mips/tcg-target.inc.c13
-rw-r--r--tcg/ppc/tcg-target.h1
-rw-r--r--tcg/ppc/tcg-target.inc.c7
-rw-r--r--tcg/s390/tcg-target.h1
-rw-r--r--tcg/s390/tcg-target.inc.c24
-rw-r--r--tcg/sparc/tcg-target.h1
-rw-r--r--tcg/sparc/tcg-target.inc.c11
-rw-r--r--tcg/tcg-op.c12
-rw-r--r--tcg/tcg-op.h11
-rw-r--r--tcg/tcg-opc.h1
-rw-r--r--tcg/tcg-runtime.h2
-rw-r--r--tcg/tcg.c5
-rw-r--r--tcg/tcg.h1
-rw-r--r--tcg/tci/tcg-target.h1
-rw-r--r--tests/check-qom-proplist.c56
-rw-r--r--tests/test-x86-cpuid-compat.c38
-rw-r--r--tests/virtio-scsi-test.c2
99 files changed, 3430 insertions, 1326 deletions
diff --git a/configure b/configure
index fbb6a93c99..13e040d28c 100755
--- a/configure
+++ b/configure
@@ -1213,12 +1213,12 @@ case "$cpu" in
LDFLAGS="-m64 $LDFLAGS"
;;
sparc)
- LDFLAGS="-m32 $LDFLAGS"
- CPU_CFLAGS="-m32 -mcpu=ultrasparc"
+ CPU_CFLAGS="-m32 -mv8plus -mcpu=ultrasparc"
+ LDFLAGS="-m32 -mv8plus $LDFLAGS"
;;
sparc64)
- LDFLAGS="-m64 $LDFLAGS"
CPU_CFLAGS="-m64 -mcpu=ultrasparc"
+ LDFLAGS="-m64 $LDFLAGS"
;;
s390)
CPU_CFLAGS="-m31"
diff --git a/cpu-exec.c b/cpu-exec.c
index 63a56d0407..5b181c18ed 100644
--- a/cpu-exec.c
+++ b/cpu-exec.c
@@ -309,10 +309,8 @@ static bool tb_cmp(const void *p, const void *d)
return false;
}
-static TranslationBlock *tb_htable_lookup(CPUState *cpu,
- target_ulong pc,
- target_ulong cs_base,
- uint32_t flags)
+TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
+ target_ulong cs_base, uint32_t flags)
{
tb_page_addr_t phys_pc;
struct tb_desc desc;
diff --git a/cpus.c b/cpus.c
index 516e5cbac1..6398439946 100644
--- a/cpus.c
+++ b/cpus.c
@@ -921,6 +921,15 @@ void cpu_synchronize_all_post_init(void)
}
}
+void cpu_synchronize_all_pre_loadvm(void)
+{
+ CPUState *cpu;
+
+ CPU_FOREACH(cpu) {
+ cpu_synchronize_pre_loadvm(cpu);
+ }
+}
+
static int do_vm_stop(RunState state)
{
int ret = 0;
diff --git a/hw/arm/virt-acpi-build.c b/hw/arm/virt-acpi-build.c
index 2079828c22..3d78ff68e6 100644
--- a/hw/arm/virt-acpi-build.c
+++ b/hw/arm/virt-acpi-build.c
@@ -496,12 +496,10 @@ build_srat(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
srat->reserved1 = cpu_to_le32(1);
for (i = 0; i < cpu_list->len; ++i) {
- int node_id = cpu_list->cpus[i].props.has_node_id ?
- cpu_list->cpus[i].props.node_id : 0;
core = acpi_data_push(table_data, sizeof(*core));
core->type = ACPI_SRAT_PROCESSOR_GICC;
core->length = sizeof(*core);
- core->proximity = cpu_to_le32(node_id);
+ core->proximity = cpu_to_le32(cpu_list->cpus[i].props.node_id);
core->acpi_processor_uid = cpu_to_le32(i);
core->flags = cpu_to_le32(1);
}
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
index 4db2d4207c..010f7244bf 100644
--- a/hw/arm/virt.c
+++ b/hw/arm/virt.c
@@ -1372,7 +1372,6 @@ static void machvirt_init(MachineState *machine)
for (n = 0; n < possible_cpus->len; n++) {
Object *cpuobj;
CPUState *cs;
- int node_id;
if (n >= smp_cpus) {
break;
@@ -1385,19 +1384,8 @@ static void machvirt_init(MachineState *machine)
cs = CPU(cpuobj);
cs->cpu_index = n;
- node_id = possible_cpus->cpus[cs->cpu_index].props.node_id;
- if (!possible_cpus->cpus[cs->cpu_index].props.has_node_id) {
- /* by default CPUState::numa_node was 0 if it's not set via CLI
- * keep it this way for now but in future we probably should
- * refuse to start up with incomplete numa mapping */
- node_id = 0;
- }
- if (cs->numa_node == CPU_UNSET_NUMA_NODE_ID) {
- cs->numa_node = node_id;
- } else {
- /* CPU isn't device_add compatible yet, this shouldn't happen */
- error_setg(&error_abort, "user set node-id not implemented");
- }
+ numa_cpu_pre_plug(&possible_cpus->cpus[cs->cpu_index], DEVICE(cpuobj),
+ &error_fatal);
if (!vms->secure) {
object_property_set_bool(cpuobj, false, "has_el3", NULL);
diff --git a/hw/core/machine.c b/hw/core/machine.c
index 3adebf14c4..2e7e9778cd 100644
--- a/hw/core/machine.c
+++ b/hw/core/machine.c
@@ -701,26 +701,43 @@ static char *cpu_slot_to_string(const CPUArchId *cpu)
return g_string_free(s, false);
}
-static void machine_numa_validate(MachineState *machine)
+static void machine_numa_finish_init(MachineState *machine)
{
int i;
+ bool default_mapping;
GString *s = g_string_new(NULL);
MachineClass *mc = MACHINE_GET_CLASS(machine);
const CPUArchIdList *possible_cpus = mc->possible_cpu_arch_ids(machine);
assert(nb_numa_nodes);
for (i = 0; i < possible_cpus->len; i++) {
+ if (possible_cpus->cpus[i].props.has_node_id) {
+ break;
+ }
+ }
+ default_mapping = (i == possible_cpus->len);
+
+ for (i = 0; i < possible_cpus->len; i++) {
const CPUArchId *cpu_slot = &possible_cpus->cpus[i];
- /* at this point numa mappings are initilized by CLI options
- * or with default mappings so it's sufficient to list
- * all not yet mapped CPUs here */
- /* TODO: make it hard error in future */
if (!cpu_slot->props.has_node_id) {
- char *cpu_str = cpu_slot_to_string(cpu_slot);
- g_string_append_printf(s, "%sCPU %d [%s]", s->len ? ", " : "", i,
- cpu_str);
- g_free(cpu_str);
+ /* fetch default mapping from board and enable it */
+ CpuInstanceProperties props = cpu_slot->props;
+
+ if (!default_mapping) {
+ /* record slots with not set mapping,
+ * TODO: make it hard error in future */
+ char *cpu_str = cpu_slot_to_string(cpu_slot);
+ g_string_append_printf(s, "%sCPU %d [%s]",
+ s->len ? ", " : "", i, cpu_str);
+ g_free(cpu_str);
+
+ /* non mapped cpus used to fallback to node 0 */
+ props.node_id = 0;
+ }
+
+ props.has_node_id = true;
+ machine_set_cpu_numa_node(machine, &props, &error_fatal);
}
}
if (s->len && !qtest_enabled()) {
@@ -738,7 +755,7 @@ void machine_run_board_init(MachineState *machine)
MachineClass *machine_class = MACHINE_GET_CLASS(machine);
if (nb_numa_nodes) {
- machine_numa_validate(machine);
+ machine_numa_finish_init(machine);
}
machine_class->init(machine);
}
diff --git a/hw/i386/acpi-build.c b/hw/i386/acpi-build.c
index 82bd44f38e..ce74c84460 100644
--- a/hw/i386/acpi-build.c
+++ b/hw/i386/acpi-build.c
@@ -2335,8 +2335,7 @@ build_srat(GArray *table_data, BIOSLinker *linker, MachineState *machine)
srat->reserved1 = cpu_to_le32(1);
for (i = 0; i < apic_ids->len; i++) {
- int node_id = apic_ids->cpus[i].props.has_node_id ?
- apic_ids->cpus[i].props.node_id : 0;
+ int node_id = apic_ids->cpus[i].props.node_id;
uint32_t apic_id = apic_ids->cpus[i].arch_id;
if (apic_id < 255) {
diff --git a/hw/i386/pc.c b/hw/i386/pc.c
index 107a34125b..5b8c6fbbea 100644
--- a/hw/i386/pc.c
+++ b/hw/i386/pc.c
@@ -788,9 +788,7 @@ static FWCfgState *bochs_bios_init(AddressSpace *as, PCMachineState *pcms)
for (i = 0; i < cpus->len; i++) {
unsigned int apic_id = cpus->cpus[i].arch_id;
assert(apic_id < pcms->apic_id_limit);
- if (cpus->cpus[i].props.has_node_id) {
- numa_fw_cfg[apic_id + 1] = cpu_to_le64(cpus->cpus[i].props.node_id);
- }
+ numa_fw_cfg[apic_id + 1] = cpu_to_le64(cpus->cpus[i].props.node_id);
}
for (i = 0; i < nb_numa_nodes; i++) {
numa_fw_cfg[pcms->apic_id_limit + 1 + i] =
@@ -1893,7 +1891,6 @@ static void pc_cpu_pre_plug(HotplugHandler *hotplug_dev,
DeviceState *dev, Error **errp)
{
int idx;
- int node_id;
CPUState *cs;
CPUArchId *cpu_slot;
X86CPUTopoInfo topo;
@@ -1984,21 +1981,7 @@ static void pc_cpu_pre_plug(HotplugHandler *hotplug_dev,
cs = CPU(cpu);
cs->cpu_index = idx;
- node_id = cpu_slot->props.node_id;
- if (!cpu_slot->props.has_node_id) {
- /* by default CPUState::numa_node was 0 if it's not set via CLI
- * keep it this way for now but in future we probably should
- * refuse to start up with incomplete numa mapping */
- node_id = 0;
- }
- if (cs->numa_node == CPU_UNSET_NUMA_NODE_ID) {
- cs->numa_node = node_id;
- } else if (cs->numa_node != node_id) {
- error_setg(errp, "node-id %d must match numa node specified"
- "with -numa option for cpu-index %d",
- cs->numa_node, cs->cpu_index);
- return;
- }
+ numa_cpu_pre_plug(cpu_slot, dev, errp);
}
static void pc_machine_device_pre_plug_cb(HotplugHandler *hotplug_dev,
diff --git a/hw/intc/openpic.c b/hw/intc/openpic.c
index 4349e45e04..f966d0604a 100644
--- a/hw/intc/openpic.c
+++ b/hw/intc/openpic.c
@@ -796,27 +796,24 @@ static uint64_t openpic_gbl_read(void *opaque, hwaddr addr, unsigned len)
}
static void openpic_tmr_write(void *opaque, hwaddr addr, uint64_t val,
- unsigned len)
+ unsigned len)
{
OpenPICState *opp = opaque;
int idx;
- addr += 0x10f0;
-
DPRINTF("%s: addr %#" HWADDR_PRIx " <= %08" PRIx64 "\n",
- __func__, addr, val);
+ __func__, (addr + 0x10f0), val);
if (addr & 0xF) {
return;
}
- if (addr == 0x10f0) {
+ if (addr == 0) {
/* TFRR */
opp->tfrr = val;
return;
}
-
+ addr -= 0x10; /* correct for TFRR */
idx = (addr >> 6) & 0x3;
- addr = addr & 0x30;
switch (addr & 0x30) {
case 0x00: /* TCCR */
@@ -844,16 +841,17 @@ static uint64_t openpic_tmr_read(void *opaque, hwaddr addr, unsigned len)
uint32_t retval = -1;
int idx;
- DPRINTF("%s: addr %#" HWADDR_PRIx "\n", __func__, addr);
+ DPRINTF("%s: addr %#" HWADDR_PRIx "\n", __func__, addr + 0x10f0);
if (addr & 0xF) {
goto out;
}
- idx = (addr >> 6) & 0x3;
- if (addr == 0x0) {
+ if (addr == 0) {
/* TFRR */
retval = opp->tfrr;
goto out;
}
+ addr -= 0x10; /* correct for TFRR */
+ idx = (addr >> 6) & 0x3;
switch (addr & 0x30) {
case 0x00: /* TCCR */
retval = opp->timers[idx].tccr;
@@ -861,10 +859,10 @@ static uint64_t openpic_tmr_read(void *opaque, hwaddr addr, unsigned len)
case 0x10: /* TBCR */
retval = opp->timers[idx].tbcr;
break;
- case 0x20: /* TIPV */
+ case 0x20: /* TVPR */
retval = read_IRQreg_ivpr(opp, opp->irq_tim0 + idx);
break;
- case 0x30: /* TIDE (TIDR) */
+ case 0x30: /* TDR */
retval = read_IRQreg_idr(opp, opp->irq_tim0 + idx);
break;
}
diff --git a/hw/net/vmxnet3.c b/hw/net/vmxnet3.c
index 8b1fab24fd..4df31101ec 100644
--- a/hw/net/vmxnet3.c
+++ b/hw/net/vmxnet3.c
@@ -2262,6 +2262,11 @@ static const MemoryRegionOps b1_ops = {
},
};
+static SaveVMHandlers savevm_vmxnet3_msix = {
+ .save_state = vmxnet3_msix_save,
+ .load_state = vmxnet3_msix_load,
+};
+
static uint64_t vmxnet3_device_serial_num(VMXNET3State *s)
{
uint64_t dsn_payload;
@@ -2331,8 +2336,7 @@ static void vmxnet3_pci_realize(PCIDevice *pci_dev, Error **errp)
vmxnet3_device_serial_num(s));
}
- register_savevm(dev, "vmxnet3-msix", -1, 1,
- vmxnet3_msix_save, vmxnet3_msix_load, s);
+ register_savevm_live(dev, "vmxnet3-msix", -1, 1, &savevm_vmxnet3_msix, s);
}
static void vmxnet3_instance_init(Object *obj)
diff --git a/hw/nvram/spapr_nvram.c b/hw/nvram/spapr_nvram.c
index aa5d2c1f5f..bc355a4348 100644
--- a/hw/nvram/spapr_nvram.c
+++ b/hw/nvram/spapr_nvram.c
@@ -144,7 +144,15 @@ static void spapr_nvram_realize(VIOsPAPRDevice *dev, Error **errp)
int ret;
if (nvram->blk) {
- nvram->size = blk_getlength(nvram->blk);
+ int64_t len = blk_getlength(nvram->blk);
+
+ if (len < 0) {
+ error_setg_errno(errp, -len,
+ "could not get length of backing image");
+ return;
+ }
+
+ nvram->size = len;
ret = blk_set_perm(nvram->blk,
BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE,
diff --git a/hw/ppc/pnv.c b/hw/ppc/pnv.c
index 231ed9735b..89b6801f67 100644
--- a/hw/ppc/pnv.c
+++ b/hw/ppc/pnv.c
@@ -378,8 +378,9 @@ static void powernv_populate_ipmi_bt(ISADevice *d, void *fdt, int lpc_off)
_FDT(node);
g_free(name);
- fdt_setprop(fdt, node, "reg", io_regs, sizeof(io_regs));
- fdt_setprop(fdt, node, "compatible", compatible, sizeof(compatible));
+ _FDT((fdt_setprop(fdt, node, "reg", io_regs, sizeof(io_regs))));
+ _FDT((fdt_setprop(fdt, node, "compatible", compatible,
+ sizeof(compatible))));
/* Mark it as reserved to avoid Linux trying to claim it */
_FDT((fdt_setprop_string(fdt, node, "status", "reserved")));
diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
index ab3aab1279..91b4057933 100644
--- a/hw/ppc/spapr.c
+++ b/hw/ppc/spapr.c
@@ -57,6 +57,7 @@
#include "hw/pci/pci.h"
#include "hw/scsi/scsi.h"
#include "hw/virtio/virtio-scsi.h"
+#include "hw/virtio/vhost-scsi-common.h"
#include "exec/address-spaces.h"
#include "hw/usb.h"
@@ -182,25 +183,19 @@ static int spapr_fixup_cpu_smt_dt(void *fdt, int offset, PowerPCCPU *cpu,
return ret;
}
-static int spapr_fixup_cpu_numa_dt(void *fdt, int offset, CPUState *cs)
+static int spapr_fixup_cpu_numa_dt(void *fdt, int offset, PowerPCCPU *cpu)
{
- int ret = 0;
- PowerPCCPU *cpu = POWERPC_CPU(cs);
int index = ppc_get_vcpu_dt_id(cpu);
uint32_t associativity[] = {cpu_to_be32(0x5),
cpu_to_be32(0x0),
cpu_to_be32(0x0),
cpu_to_be32(0x0),
- cpu_to_be32(cs->numa_node),
+ cpu_to_be32(cpu->node_id),
cpu_to_be32(index)};
/* Advertise NUMA via ibm,associativity */
- if (nb_numa_nodes > 1) {
- ret = fdt_setprop(fdt, offset, "ibm,associativity", associativity,
+ return fdt_setprop(fdt, offset, "ibm,associativity", associativity,
sizeof(associativity));
- }
-
- return ret;
}
/* Populate the "ibm,pa-features" property */
@@ -325,9 +320,11 @@ static int spapr_fixup_cpu_dt(void *fdt, sPAPRMachineState *spapr)
return ret;
}
- ret = spapr_fixup_cpu_numa_dt(fdt, offset, cs);
- if (ret < 0) {
- return ret;
+ if (nb_numa_nodes > 1) {
+ ret = spapr_fixup_cpu_numa_dt(fdt, offset, cpu);
+ if (ret < 0) {
+ return ret;
+ }
}
ret = spapr_fixup_cpu_smt_dt(fdt, offset, cpu, compat_smt);
@@ -456,15 +453,13 @@ static void spapr_populate_cpu_dt(CPUState *cs, void *fdt, int offset,
uint32_t pft_size_prop[] = {0, cpu_to_be32(spapr->htab_shift)};
int compat_smt = MIN(smp_threads, ppc_compat_max_threads(cpu));
sPAPRDRConnector *drc;
- sPAPRDRConnectorClass *drck;
int drc_index;
uint32_t radix_AP_encodings[PPC_PAGE_SIZES_MAX_SZ];
int i;
- drc = spapr_dr_connector_by_id(SPAPR_DR_CONNECTOR_TYPE_CPU, index);
+ drc = spapr_drc_by_id(TYPE_SPAPR_DRC_CPU, index);
if (drc) {
- drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
- drc_index = drck->get_index(drc);
+ drc_index = spapr_drc_index(drc);
_FDT((fdt_setprop_cell(fdt, offset, "ibm,my-drc-index", drc_index)));
}
@@ -542,7 +537,9 @@ static void spapr_populate_cpu_dt(CPUState *cs, void *fdt, int offset,
_FDT((fdt_setprop(fdt, offset, "ibm,pft-size",
pft_size_prop, sizeof(pft_size_prop))));
- _FDT(spapr_fixup_cpu_numa_dt(fdt, offset, cs));
+ if (nb_numa_nodes > 1) {
+ _FDT(spapr_fixup_cpu_numa_dt(fdt, offset, cpu));
+ }
_FDT(spapr_fixup_cpu_smt_dt(fdt, offset, cpu, compat_smt));
@@ -654,15 +651,13 @@ static int spapr_populate_drconf_memory(sPAPRMachineState *spapr, void *fdt)
if (i >= hotplug_lmb_start) {
sPAPRDRConnector *drc;
- sPAPRDRConnectorClass *drck;
- drc = spapr_dr_connector_by_id(SPAPR_DR_CONNECTOR_TYPE_LMB, i);
+ drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, i);
g_assert(drc);
- drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
dynamic_memory[0] = cpu_to_be32(addr >> 32);
dynamic_memory[1] = cpu_to_be32(addr & 0xffffffff);
- dynamic_memory[2] = cpu_to_be32(drck->get_index(drc));
+ dynamic_memory[2] = cpu_to_be32(spapr_drc_index(drc));
dynamic_memory[3] = cpu_to_be32(0); /* reserved */
dynamic_memory[4] = cpu_to_be32(numa_get_node(addr, NULL));
if (memory_region_present(get_system_memory(), addr)) {
@@ -1915,7 +1910,7 @@ static void spapr_create_lmb_dr_connectors(sPAPRMachineState *spapr)
uint64_t addr;
addr = i * lmb_size + spapr->hotplug_memory.base;
- drc = spapr_dr_connector_new(OBJECT(spapr), SPAPR_DR_CONNECTOR_TYPE_LMB,
+ drc = spapr_dr_connector_new(OBJECT(spapr), TYPE_SPAPR_DRC_LMB,
addr/lmb_size);
qemu_register_reset(spapr_drc_reset, drc);
}
@@ -2012,8 +2007,7 @@ static void spapr_init_cpus(sPAPRMachineState *spapr)
if (mc->has_hotpluggable_cpus) {
sPAPRDRConnector *drc =
- spapr_dr_connector_new(OBJECT(spapr),
- SPAPR_DR_CONNECTOR_TYPE_CPU,
+ spapr_dr_connector_new(OBJECT(spapr), TYPE_SPAPR_DRC_CPU,
(core_id / smp_threads) * smt);
qemu_register_reset(spapr_drc_reset, drc);
@@ -2344,10 +2338,6 @@ static void ppc_spapr_init(MachineState *machine)
register_savevm_live(NULL, "spapr/htab", -1, 1,
&savevm_htab_handlers, spapr);
- /* used by RTAS */
- QTAILQ_INIT(&spapr->ccs_list);
- qemu_register_reset(spapr_ccs_reset_hook, spapr);
-
qemu_register_boot_set(spapr_boot_set, spapr);
if (kvm_enabled()) {
@@ -2388,6 +2378,7 @@ static char *spapr_get_fw_dev_path(FWPathProvider *p, BusState *bus,
((type *)object_dynamic_cast(OBJECT(obj), (name)))
SCSIDevice *d = CAST(SCSIDevice, dev, TYPE_SCSI_DEVICE);
sPAPRPHBState *phb = CAST(sPAPRPHBState, dev, TYPE_SPAPR_PCI_HOST_BRIDGE);
+ VHostSCSICommon *vsc = CAST(VHostSCSICommon, dev, TYPE_VHOST_SCSI_COMMON);
if (d) {
void *spapr = CAST(void, bus->parent, "spapr-vscsi");
@@ -2444,6 +2435,12 @@ static char *spapr_get_fw_dev_path(FWPathProvider *p, BusState *bus,
return g_strdup_printf("pci@%"PRIX64, phb->buid);
}
+ if (vsc) {
+ /* Same logic as virtio above */
+ unsigned id = 0x1000000 | (vsc->target << 16) | vsc->lun;
+ return g_strdup_printf("disk@%"PRIX64, (uint64_t)id << 32);
+ }
+
return NULL;
}
@@ -2533,8 +2530,8 @@ static void spapr_add_lmbs(DeviceState *dev, uint64_t addr_start, uint64_t size,
uint64_t addr = addr_start;
for (i = 0; i < nr_lmbs; i++) {
- drc = spapr_dr_connector_by_id(SPAPR_DR_CONNECTOR_TYPE_LMB,
- addr/SPAPR_MEMORY_BLOCK_SIZE);
+ drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB,
+ addr / SPAPR_MEMORY_BLOCK_SIZE);
g_assert(drc);
fdt = create_device_tree(&fdt_size);
@@ -2555,12 +2552,12 @@ static void spapr_add_lmbs(DeviceState *dev, uint64_t addr_start, uint64_t size,
*/
if (dev->hotplugged) {
if (dedicated_hp_event_source) {
- drc = spapr_dr_connector_by_id(SPAPR_DR_CONNECTOR_TYPE_LMB,
- addr_start / SPAPR_MEMORY_BLOCK_SIZE);
+ drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB,
+ addr_start / SPAPR_MEMORY_BLOCK_SIZE);
drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
spapr_hotplug_req_add_by_count_indexed(SPAPR_DR_CONNECTOR_TYPE_LMB,
nr_lmbs,
- drck->get_index(drc));
+ spapr_drc_index(drc));
} else {
spapr_hotplug_req_add_by_count(SPAPR_DR_CONNECTOR_TYPE_LMB,
nr_lmbs);
@@ -2673,8 +2670,8 @@ static sPAPRDIMMState *spapr_recover_pending_dimm_state(sPAPRMachineState *ms,
addr = addr_start;
for (i = 0; i < nr_lmbs; i++) {
- drc = spapr_dr_connector_by_id(SPAPR_DR_CONNECTOR_TYPE_LMB,
- addr / SPAPR_MEMORY_BLOCK_SIZE);
+ drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB,
+ addr / SPAPR_MEMORY_BLOCK_SIZE);
g_assert(drc);
if (drc->indicator_state != SPAPR_DR_INDICATOR_STATE_INACTIVE) {
avail_lmbs++;
@@ -2757,8 +2754,8 @@ static void spapr_memory_unplug_request(HotplugHandler *hotplug_dev,
addr = addr_start;
for (i = 0; i < nr_lmbs; i++) {
- drc = spapr_dr_connector_by_id(SPAPR_DR_CONNECTOR_TYPE_LMB,
- addr / SPAPR_MEMORY_BLOCK_SIZE);
+ drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB,
+ addr / SPAPR_MEMORY_BLOCK_SIZE);
g_assert(drc);
drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
@@ -2766,12 +2763,11 @@ static void spapr_memory_unplug_request(HotplugHandler *hotplug_dev,
addr += SPAPR_MEMORY_BLOCK_SIZE;
}
- drc = spapr_dr_connector_by_id(SPAPR_DR_CONNECTOR_TYPE_LMB,
- addr_start / SPAPR_MEMORY_BLOCK_SIZE);
+ drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB,
+ addr_start / SPAPR_MEMORY_BLOCK_SIZE);
drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
spapr_hotplug_req_remove_by_count_indexed(SPAPR_DR_CONNECTOR_TYPE_LMB,
- nr_lmbs,
- drck->get_index(drc));
+ nr_lmbs, spapr_drc_index(drc));
out:
error_propagate(errp, local_err);
}
@@ -2839,7 +2835,7 @@ void spapr_core_unplug_request(HotplugHandler *hotplug_dev, DeviceState *dev,
return;
}
- drc = spapr_dr_connector_by_id(SPAPR_DR_CONNECTOR_TYPE_CPU, index * smt);
+ drc = spapr_drc_by_id(TYPE_SPAPR_DRC_CPU, index * smt);
g_assert(drc);
drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
@@ -2874,7 +2870,7 @@ static void spapr_core_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
cc->core_id);
return;
}
- drc = spapr_dr_connector_by_id(SPAPR_DR_CONNECTOR_TYPE_CPU, index * smt);
+ drc = spapr_drc_by_id(TYPE_SPAPR_DRC_CPU, index * smt);
g_assert(drc || !mc->has_hotpluggable_cpus);
@@ -2922,11 +2918,9 @@ static void spapr_core_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
MachineClass *mc = MACHINE_GET_CLASS(hotplug_dev);
Error *local_err = NULL;
CPUCore *cc = CPU_CORE(dev);
- sPAPRCPUCore *sc = SPAPR_CPU_CORE(dev);
char *base_core_type = spapr_get_cpu_core_type(machine->cpu_model);
const char *type = object_get_typename(OBJECT(dev));
CPUArchId *core_slot;
- int node_id;
int index;
if (dev->hotplugged && !mc->has_hotpluggable_cpus) {
@@ -2967,20 +2961,7 @@ static void spapr_core_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
goto out;
}
- node_id = core_slot->props.node_id;
- if (!core_slot->props.has_node_id) {
- /* by default CPUState::numa_node was 0 if it's not set via CLI
- * keep it this way for now but in future we probably should
- * refuse to start up with incomplete numa mapping */
- node_id = 0;
- }
- if (sc->node_id == CPU_UNSET_NUMA_NODE_ID) {
- sc->node_id = node_id;
- } else if (sc->node_id != node_id) {
- error_setg(&local_err, "node-id %d must match numa node specified"
- "with -numa option for cpu-index %d", sc->node_id, cc->core_id);
- goto out;
- }
+ numa_cpu_pre_plug(core_slot, dev, &local_err);
out:
g_free(base_core_type);
diff --git a/hw/ppc/spapr_cpu_core.c b/hw/ppc/spapr_cpu_core.c
index ff7058ecc0..029a14120e 100644
--- a/hw/ppc/spapr_cpu_core.c
+++ b/hw/ppc/spapr_cpu_core.c
@@ -184,15 +184,17 @@ static void spapr_cpu_core_realize(DeviceState *dev, Error **errp)
for (i = 0; i < cc->nr_threads; i++) {
char id[32];
CPUState *cs;
+ PowerPCCPU *cpu;
obj = sc->threads + i * size;
object_initialize(obj, size, typename);
cs = CPU(obj);
+ cpu = POWERPC_CPU(cs);
cs->cpu_index = cc->core_id + i;
/* Set NUMA node for the threads belonged to core */
- cs->numa_node = sc->node_id;
+ cpu->node_id = sc->node_id;
snprintf(id, sizeof(id), "thread[%d]", i);
object_property_add_child(OBJECT(sc), id, obj, &local_err);
diff --git a/hw/ppc/spapr_drc.c b/hw/ppc/spapr_drc.c
index cc2400bcd5..39e7f3080a 100644
--- a/hw/ppc/spapr_drc.c
+++ b/hw/ppc/spapr_drc.c
@@ -27,29 +27,23 @@
#define DRC_INDEX_TYPE_SHIFT 28
#define DRC_INDEX_ID_MASK ((1ULL << DRC_INDEX_TYPE_SHIFT) - 1)
-static sPAPRDRConnectorTypeShift get_type_shift(sPAPRDRConnectorType type)
+sPAPRDRConnectorType spapr_drc_type(sPAPRDRConnector *drc)
{
- uint32_t shift = 0;
-
- /* make sure this isn't SPAPR_DR_CONNECTOR_TYPE_ANY, or some
- * other wonky value.
- */
- g_assert(is_power_of_2(type));
+ sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
- while (type != (1 << shift)) {
- shift++;
- }
- return shift;
+ return 1 << drck->typeshift;
}
-static uint32_t get_index(sPAPRDRConnector *drc)
+uint32_t spapr_drc_index(sPAPRDRConnector *drc)
{
+ sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
+
/* no set format for a drc index: it only needs to be globally
* unique. this is how we encode the DRC type on bare-metal
* however, so might as well do that here
*/
- return (get_type_shift(drc->type) << DRC_INDEX_TYPE_SHIFT) |
- (drc->id & DRC_INDEX_ID_MASK);
+ return (drck->typeshift << DRC_INDEX_TYPE_SHIFT)
+ | (drc->id & DRC_INDEX_ID_MASK);
}
static uint32_t set_isolation_state(sPAPRDRConnector *drc,
@@ -57,7 +51,17 @@ static uint32_t set_isolation_state(sPAPRDRConnector *drc,
{
sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
- trace_spapr_drc_set_isolation_state(get_index(drc), state);
+ trace_spapr_drc_set_isolation_state(spapr_drc_index(drc), state);
+
+ /* if the guest is configuring a device attached to this DRC, we
+ * should reset the configuration state at this point since it may
+ * no longer be reliable (guest released device and needs to start
+ * over, or unplug occurred so the FDT is no longer valid)
+ */
+ if (state == SPAPR_DR_ISOLATION_STATE_ISOLATED) {
+ g_free(drc->ccs);
+ drc->ccs = NULL;
+ }
if (state == SPAPR_DR_ISOLATION_STATE_UNISOLATED) {
/* cannot unisolate a non-existent resource, and, or resources
@@ -79,7 +83,7 @@ static uint32_t set_isolation_state(sPAPRDRConnector *drc,
* If the LMB being removed doesn't belong to a DIMM device that is
* actually being unplugged, fail the isolation request here.
*/
- if (drc->type == SPAPR_DR_CONNECTOR_TYPE_LMB) {
+ if (spapr_drc_type(drc) == SPAPR_DR_CONNECTOR_TYPE_LMB) {
if ((state == SPAPR_DR_ISOLATION_STATE_ISOLATED) &&
!drc->awaiting_release) {
return RTAS_OUT_HW_ERROR;
@@ -98,11 +102,12 @@ static uint32_t set_isolation_state(sPAPRDRConnector *drc,
* PAPR+ 2.7, 13.4
*/
if (drc->awaiting_release) {
+ uint32_t drc_index = spapr_drc_index(drc);
if (drc->configured) {
- trace_spapr_drc_set_isolation_state_finalizing(get_index(drc));
+ trace_spapr_drc_set_isolation_state_finalizing(drc_index);
drck->detach(drc, DEVICE(drc->dev), NULL);
} else {
- trace_spapr_drc_set_isolation_state_deferring(get_index(drc));
+ trace_spapr_drc_set_isolation_state_deferring(drc_index);
}
}
drc->configured = false;
@@ -114,7 +119,7 @@ static uint32_t set_isolation_state(sPAPRDRConnector *drc,
static uint32_t set_indicator_state(sPAPRDRConnector *drc,
sPAPRDRIndicatorState state)
{
- trace_spapr_drc_set_indicator_state(get_index(drc), state);
+ trace_spapr_drc_set_indicator_state(spapr_drc_index(drc), state);
drc->indicator_state = state;
return RTAS_OUT_SUCCESS;
}
@@ -124,7 +129,7 @@ static uint32_t set_allocation_state(sPAPRDRConnector *drc,
{
sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
- trace_spapr_drc_set_allocation_state(get_index(drc), state);
+ trace_spapr_drc_set_allocation_state(spapr_drc_index(drc), state);
if (state == SPAPR_DR_ALLOCATION_STATE_USABLE) {
/* if there's no resource/device associated with the DRC, there's
@@ -148,11 +153,12 @@ static uint32_t set_allocation_state(sPAPRDRConnector *drc,
}
}
- if (drc->type != SPAPR_DR_CONNECTOR_TYPE_PCI) {
+ if (spapr_drc_type(drc) != SPAPR_DR_CONNECTOR_TYPE_PCI) {
drc->allocation_state = state;
if (drc->awaiting_release &&
drc->allocation_state == SPAPR_DR_ALLOCATION_STATE_UNUSABLE) {
- trace_spapr_drc_set_allocation_state_finalizing(get_index(drc));
+ uint32_t drc_index = spapr_drc_index(drc);
+ trace_spapr_drc_set_allocation_state_finalizing(drc_index);
drck->detach(drc, DEVICE(drc->dev), NULL);
} else if (drc->allocation_state == SPAPR_DR_ALLOCATION_STATE_USABLE) {
drc->awaiting_allocation = false;
@@ -161,36 +167,11 @@ static uint32_t set_allocation_state(sPAPRDRConnector *drc,
return RTAS_OUT_SUCCESS;
}
-static uint32_t get_type(sPAPRDRConnector *drc)
-{
- return drc->type;
-}
-
static const char *get_name(sPAPRDRConnector *drc)
{
return drc->name;
}
-static const void *get_fdt(sPAPRDRConnector *drc, int *fdt_start_offset)
-{
- if (fdt_start_offset) {
- *fdt_start_offset = drc->fdt_start_offset;
- }
- return drc->fdt;
-}
-
-static void set_configured(sPAPRDRConnector *drc)
-{
- trace_spapr_drc_set_configured(get_index(drc));
-
- if (drc->isolation_state != SPAPR_DR_ISOLATION_STATE_UNISOLATED) {
- /* guest should be not configuring an isolated device */
- trace_spapr_drc_set_configured_skipping(get_index(drc));
- return;
- }
- drc->configured = true;
-}
-
/* has the guest been notified of device attachment? */
static void set_signalled(sPAPRDRConnector *drc)
{
@@ -207,7 +188,7 @@ static void set_signalled(sPAPRDRConnector *drc)
static uint32_t entity_sense(sPAPRDRConnector *drc, sPAPRDREntitySense *state)
{
if (drc->dev) {
- if (drc->type != SPAPR_DR_CONNECTOR_TYPE_PCI &&
+ if (spapr_drc_type(drc) != SPAPR_DR_CONNECTOR_TYPE_PCI &&
drc->allocation_state == SPAPR_DR_ALLOCATION_STATE_UNUSABLE) {
/* for logical DR, we return a state of UNUSABLE
* iff the allocation state UNUSABLE.
@@ -225,7 +206,7 @@ static uint32_t entity_sense(sPAPRDRConnector *drc, sPAPRDREntitySense *state)
*state = SPAPR_DR_ENTITY_SENSE_PRESENT;
}
} else {
- if (drc->type == SPAPR_DR_CONNECTOR_TYPE_PCI) {
+ if (spapr_drc_type(drc) == SPAPR_DR_CONNECTOR_TYPE_PCI) {
/* PCI devices, and only PCI devices, use EMPTY
* in cases where we'd otherwise use UNUSABLE
*/
@@ -235,7 +216,7 @@ static uint32_t entity_sense(sPAPRDRConnector *drc, sPAPRDREntitySense *state)
}
}
- trace_spapr_drc_entity_sense(get_index(drc), *state);
+ trace_spapr_drc_entity_sense(spapr_drc_index(drc), *state);
return RTAS_OUT_SUCCESS;
}
@@ -243,17 +224,7 @@ static void prop_get_index(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
sPAPRDRConnector *drc = SPAPR_DR_CONNECTOR(obj);
- sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
- uint32_t value = (uint32_t)drck->get_index(drc);
- visit_type_uint32(v, name, &value, errp);
-}
-
-static void prop_get_type(Object *obj, Visitor *v, const char *name,
- void *opaque, Error **errp)
-{
- sPAPRDRConnector *drc = SPAPR_DR_CONNECTOR(obj);
- sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
- uint32_t value = (uint32_t)drck->get_type(drc);
+ uint32_t value = spapr_drc_index(drc);
visit_type_uint32(v, name, &value, errp);
}
@@ -264,17 +235,6 @@ static char *prop_get_name(Object *obj, Error **errp)
return g_strdup(drck->get_name(drc));
}
-static void prop_get_entity_sense(Object *obj, Visitor *v, const char *name,
- void *opaque, Error **errp)
-{
- sPAPRDRConnector *drc = SPAPR_DR_CONNECTOR(obj);
- sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
- uint32_t value;
-
- drck->entity_sense(drc, &value);
- visit_type_uint32(v, name, &value, errp);
-}
-
static void prop_get_fdt(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
@@ -354,13 +314,13 @@ static void prop_get_fdt(Object *obj, Visitor *v, const char *name,
static void attach(sPAPRDRConnector *drc, DeviceState *d, void *fdt,
int fdt_start_offset, bool coldplug, Error **errp)
{
- trace_spapr_drc_attach(get_index(drc));
+ trace_spapr_drc_attach(spapr_drc_index(drc));
if (drc->isolation_state != SPAPR_DR_ISOLATION_STATE_ISOLATED) {
error_setg(errp, "an attached device is still awaiting release");
return;
}
- if (drc->type == SPAPR_DR_CONNECTOR_TYPE_PCI) {
+ if (spapr_drc_type(drc) == SPAPR_DR_CONNECTOR_TYPE_PCI) {
g_assert(drc->allocation_state == SPAPR_DR_ALLOCATION_STATE_USABLE);
}
g_assert(fdt || coldplug);
@@ -372,7 +332,7 @@ static void attach(sPAPRDRConnector *drc, DeviceState *d, void *fdt,
* may be accessing the device, we can easily crash the guest, so we
* we defer completion of removal in such cases to the reset() hook.
*/
- if (drc->type == SPAPR_DR_CONNECTOR_TYPE_PCI) {
+ if (spapr_drc_type(drc) == SPAPR_DR_CONNECTOR_TYPE_PCI) {
drc->isolation_state = SPAPR_DR_ISOLATION_STATE_UNISOLATED;
}
drc->indicator_state = SPAPR_DR_INDICATOR_STATE_ACTIVE;
@@ -390,10 +350,10 @@ static void attach(sPAPRDRConnector *drc, DeviceState *d, void *fdt,
* 'physical' DR resources such as PCI where each device/resource is
* signalled individually.
*/
- drc->signalled = (drc->type != SPAPR_DR_CONNECTOR_TYPE_PCI)
+ drc->signalled = (spapr_drc_type(drc) != SPAPR_DR_CONNECTOR_TYPE_PCI)
? true : coldplug;
- if (drc->type != SPAPR_DR_CONNECTOR_TYPE_PCI) {
+ if (spapr_drc_type(drc) != SPAPR_DR_CONNECTOR_TYPE_PCI) {
drc->awaiting_allocation = true;
}
@@ -405,7 +365,7 @@ static void attach(sPAPRDRConnector *drc, DeviceState *d, void *fdt,
static void detach(sPAPRDRConnector *drc, DeviceState *d, Error **errp)
{
- trace_spapr_drc_detach(get_index(drc));
+ trace_spapr_drc_detach(spapr_drc_index(drc));
/* if we've signalled device presence to the guest, or if the guest
* has gone ahead and configured the device (via manually-executed
@@ -428,14 +388,14 @@ static void detach(sPAPRDRConnector *drc, DeviceState *d, Error **errp)
}
if (drc->isolation_state != SPAPR_DR_ISOLATION_STATE_ISOLATED) {
- trace_spapr_drc_awaiting_isolated(get_index(drc));
+ trace_spapr_drc_awaiting_isolated(spapr_drc_index(drc));
drc->awaiting_release = true;
return;
}
- if (drc->type != SPAPR_DR_CONNECTOR_TYPE_PCI &&
+ if (spapr_drc_type(drc) != SPAPR_DR_CONNECTOR_TYPE_PCI &&
drc->allocation_state != SPAPR_DR_ALLOCATION_STATE_UNUSABLE) {
- trace_spapr_drc_awaiting_unusable(get_index(drc));
+ trace_spapr_drc_awaiting_unusable(spapr_drc_index(drc));
drc->awaiting_release = true;
return;
}
@@ -443,15 +403,15 @@ static void detach(sPAPRDRConnector *drc, DeviceState *d, Error **errp)
if (drc->awaiting_allocation) {
if (!drc->awaiting_allocation_skippable) {
drc->awaiting_release = true;
- trace_spapr_drc_awaiting_allocation(get_index(drc));
+ trace_spapr_drc_awaiting_allocation(spapr_drc_index(drc));
return;
}
}
drc->indicator_state = SPAPR_DR_INDICATOR_STATE_INACTIVE;
- /* Calling release callbacks based on drc->type. */
- switch (drc->type) {
+ /* Calling release callbacks based on spapr_drc_type(drc). */
+ switch (spapr_drc_type(drc)) {
case SPAPR_DR_CONNECTOR_TYPE_CPU:
spapr_core_release(drc->dev);
break;
@@ -487,7 +447,11 @@ static void reset(DeviceState *d)
sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
sPAPRDREntitySense state;
- trace_spapr_drc_reset(drck->get_index(drc));
+ trace_spapr_drc_reset(spapr_drc_index(drc));
+
+ g_free(drc->ccs);
+ drc->ccs = NULL;
+
/* immediately upon reset we can safely assume DRCs whose devices
* are pending removal can be safely removed, and that they will
* subsequently be left in an ISOLATED state. move the DRC to this
@@ -507,7 +471,7 @@ static void reset(DeviceState *d)
}
/* non-PCI devices may be awaiting a transition to UNUSABLE */
- if (drc->type != SPAPR_DR_CONNECTOR_TYPE_PCI &&
+ if (spapr_drc_type(drc) != SPAPR_DR_CONNECTOR_TYPE_PCI &&
drc->awaiting_release) {
drck->set_allocation_state(drc, SPAPR_DR_ALLOCATION_STATE_UNUSABLE);
}
@@ -536,22 +500,18 @@ static bool spapr_drc_needed(void *opaque)
* If there is dev plugged in, we need to migrate the DRC state when
* it is different from cold-plugged state
*/
- switch (drc->type) {
+ switch (spapr_drc_type(drc)) {
case SPAPR_DR_CONNECTOR_TYPE_PCI:
- rc = !((drc->isolation_state == SPAPR_DR_ISOLATION_STATE_UNISOLATED) &&
- (drc->allocation_state == SPAPR_DR_ALLOCATION_STATE_USABLE) &&
- drc->configured && drc->signalled && !drc->awaiting_release);
- break;
case SPAPR_DR_CONNECTOR_TYPE_CPU:
case SPAPR_DR_CONNECTOR_TYPE_LMB:
- rc = !((drc->isolation_state == SPAPR_DR_ISOLATION_STATE_ISOLATED) &&
- (drc->allocation_state == SPAPR_DR_ALLOCATION_STATE_UNUSABLE) &&
+ rc = !((drc->isolation_state == SPAPR_DR_ISOLATION_STATE_UNISOLATED) &&
+ (drc->allocation_state == SPAPR_DR_ALLOCATION_STATE_USABLE) &&
drc->configured && drc->signalled && !drc->awaiting_release);
break;
case SPAPR_DR_CONNECTOR_TYPE_PHB:
case SPAPR_DR_CONNECTOR_TYPE_VIO:
default:
- g_assert(false);
+ g_assert_not_reached();
}
return rc;
}
@@ -576,13 +536,12 @@ static const VMStateDescription vmstate_spapr_drc = {
static void realize(DeviceState *d, Error **errp)
{
sPAPRDRConnector *drc = SPAPR_DR_CONNECTOR(d);
- sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
Object *root_container;
char link_name[256];
gchar *child_name;
Error *err = NULL;
- trace_spapr_drc_realize(drck->get_index(drc));
+ trace_spapr_drc_realize(spapr_drc_index(drc));
/* NOTE: we do this as part of realize/unrealize due to the fact
* that the guest will communicate with the DRC via RTAS calls
* referencing the global DRC index. By unlinking the DRC
@@ -591,9 +550,9 @@ static void realize(DeviceState *d, Error **errp)
* existing in the composition tree
*/
root_container = container_get(object_get_root(), DRC_CONTAINER_PATH);
- snprintf(link_name, sizeof(link_name), "%x", drck->get_index(drc));
+ snprintf(link_name, sizeof(link_name), "%x", spapr_drc_index(drc));
child_name = object_get_canonical_path_component(OBJECT(drc));
- trace_spapr_drc_realize_child(drck->get_index(drc), child_name);
+ trace_spapr_drc_realize_child(spapr_drc_index(drc), child_name);
object_property_add_alias(root_container, link_name,
drc->owner, child_name, &err);
if (err) {
@@ -601,22 +560,21 @@ static void realize(DeviceState *d, Error **errp)
object_unref(OBJECT(drc));
}
g_free(child_name);
- vmstate_register(DEVICE(drc), drck->get_index(drc), &vmstate_spapr_drc,
+ vmstate_register(DEVICE(drc), spapr_drc_index(drc), &vmstate_spapr_drc,
drc);
- trace_spapr_drc_realize_complete(drck->get_index(drc));
+ trace_spapr_drc_realize_complete(spapr_drc_index(drc));
}
static void unrealize(DeviceState *d, Error **errp)
{
sPAPRDRConnector *drc = SPAPR_DR_CONNECTOR(d);
- sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
Object *root_container;
char name[256];
Error *err = NULL;
- trace_spapr_drc_unrealize(drck->get_index(drc));
+ trace_spapr_drc_unrealize(spapr_drc_index(drc));
root_container = container_get(object_get_root(), DRC_CONTAINER_PATH);
- snprintf(name, sizeof(name), "%x", drck->get_index(drc));
+ snprintf(name, sizeof(name), "%x", spapr_drc_index(drc));
object_property_del(root_container, name, &err);
if (err) {
error_report_err(err);
@@ -624,20 +582,16 @@ static void unrealize(DeviceState *d, Error **errp)
}
}
-sPAPRDRConnector *spapr_dr_connector_new(Object *owner,
- sPAPRDRConnectorType type,
+sPAPRDRConnector *spapr_dr_connector_new(Object *owner, const char *type,
uint32_t id)
{
- sPAPRDRConnector *drc =
- SPAPR_DR_CONNECTOR(object_new(TYPE_SPAPR_DR_CONNECTOR));
+ sPAPRDRConnector *drc = SPAPR_DR_CONNECTOR(object_new(type));
char *prop_name;
- g_assert(type);
-
- drc->type = type;
drc->id = id;
drc->owner = owner;
- prop_name = g_strdup_printf("dr-connector[%"PRIu32"]", get_index(drc));
+ prop_name = g_strdup_printf("dr-connector[%"PRIu32"]",
+ spapr_drc_index(drc));
object_property_add_child(owner, prop_name, OBJECT(drc), NULL);
object_property_set_bool(OBJECT(drc), true, "realized", NULL);
g_free(prop_name);
@@ -663,7 +617,7 @@ sPAPRDRConnector *spapr_dr_connector_new(Object *owner,
* DRC names as documented by PAPR+ v2.7, 13.5.2.4
* location codes as documented by PAPR+ v2.7, 12.3.1.5
*/
- switch (drc->type) {
+ switch (spapr_drc_type(drc)) {
case SPAPR_DR_CONNECTOR_TYPE_CPU:
drc->name = g_strdup_printf("CPU %d", id);
break;
@@ -682,7 +636,7 @@ sPAPRDRConnector *spapr_dr_connector_new(Object *owner,
}
/* PCI slot always start in a USABLE state, and stay there */
- if (drc->type == SPAPR_DR_CONNECTOR_TYPE_PCI) {
+ if (spapr_drc_type(drc) == SPAPR_DR_CONNECTOR_TYPE_PCI) {
drc->allocation_state = SPAPR_DR_ALLOCATION_STATE_USABLE;
}
@@ -693,20 +647,10 @@ static void spapr_dr_connector_instance_init(Object *obj)
{
sPAPRDRConnector *drc = SPAPR_DR_CONNECTOR(obj);
- object_property_add_uint32_ptr(obj, "isolation-state",
- &drc->isolation_state, NULL);
- object_property_add_uint32_ptr(obj, "indicator-state",
- &drc->indicator_state, NULL);
- object_property_add_uint32_ptr(obj, "allocation-state",
- &drc->allocation_state, NULL);
object_property_add_uint32_ptr(obj, "id", &drc->id, NULL);
object_property_add(obj, "index", "uint32", prop_get_index,
NULL, NULL, NULL, NULL);
- object_property_add(obj, "connector_type", "uint32", prop_get_type,
- NULL, NULL, NULL, NULL);
object_property_add_str(obj, "name", prop_get_name, NULL, NULL);
- object_property_add(obj, "entity-sense", "uint32", prop_get_entity_sense,
- NULL, NULL, NULL, NULL);
object_property_add(obj, "fdt", "struct", prop_get_fdt,
NULL, NULL, NULL, NULL);
}
@@ -722,11 +666,7 @@ static void spapr_dr_connector_class_init(ObjectClass *k, void *data)
drck->set_isolation_state = set_isolation_state;
drck->set_indicator_state = set_indicator_state;
drck->set_allocation_state = set_allocation_state;
- drck->get_index = get_index;
- drck->get_type = get_type;
drck->get_name = get_name;
- drck->get_fdt = get_fdt;
- drck->set_configured = set_configured;
drck->entity_sense = entity_sense;
drck->attach = attach;
drck->detach = detach;
@@ -738,6 +678,30 @@ static void spapr_dr_connector_class_init(ObjectClass *k, void *data)
dk->user_creatable = false;
}
+static void spapr_drc_cpu_class_init(ObjectClass *k, void *data)
+{
+ sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_CLASS(k);
+
+ drck->typeshift = SPAPR_DR_CONNECTOR_TYPE_SHIFT_CPU;
+ drck->typename = "CPU";
+}
+
+static void spapr_drc_pci_class_init(ObjectClass *k, void *data)
+{
+ sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_CLASS(k);
+
+ drck->typeshift = SPAPR_DR_CONNECTOR_TYPE_SHIFT_PCI;
+ drck->typename = "28";
+}
+
+static void spapr_drc_lmb_class_init(ObjectClass *k, void *data)
+{
+ sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_CLASS(k);
+
+ drck->typeshift = SPAPR_DR_CONNECTOR_TYPE_SHIFT_LMB;
+ drck->typename = "MEM";
+}
+
static const TypeInfo spapr_dr_connector_info = {
.name = TYPE_SPAPR_DR_CONNECTOR,
.parent = TYPE_DEVICE,
@@ -745,18 +709,47 @@ static const TypeInfo spapr_dr_connector_info = {
.instance_init = spapr_dr_connector_instance_init,
.class_size = sizeof(sPAPRDRConnectorClass),
.class_init = spapr_dr_connector_class_init,
+ .abstract = true,
};
-static void spapr_drc_register_types(void)
-{
- type_register_static(&spapr_dr_connector_info);
-}
+static const TypeInfo spapr_drc_physical_info = {
+ .name = TYPE_SPAPR_DRC_PHYSICAL,
+ .parent = TYPE_SPAPR_DR_CONNECTOR,
+ .instance_size = sizeof(sPAPRDRConnector),
+ .abstract = true,
+};
-type_init(spapr_drc_register_types)
+static const TypeInfo spapr_drc_logical_info = {
+ .name = TYPE_SPAPR_DRC_LOGICAL,
+ .parent = TYPE_SPAPR_DR_CONNECTOR,
+ .instance_size = sizeof(sPAPRDRConnector),
+ .abstract = true,
+};
+
+static const TypeInfo spapr_drc_cpu_info = {
+ .name = TYPE_SPAPR_DRC_CPU,
+ .parent = TYPE_SPAPR_DRC_LOGICAL,
+ .instance_size = sizeof(sPAPRDRConnector),
+ .class_init = spapr_drc_cpu_class_init,
+};
+
+static const TypeInfo spapr_drc_pci_info = {
+ .name = TYPE_SPAPR_DRC_PCI,
+ .parent = TYPE_SPAPR_DRC_PHYSICAL,
+ .instance_size = sizeof(sPAPRDRConnector),
+ .class_init = spapr_drc_pci_class_init,
+};
+
+static const TypeInfo spapr_drc_lmb_info = {
+ .name = TYPE_SPAPR_DRC_LMB,
+ .parent = TYPE_SPAPR_DRC_LOGICAL,
+ .instance_size = sizeof(sPAPRDRConnector),
+ .class_init = spapr_drc_lmb_class_init,
+};
/* helper functions for external users */
-sPAPRDRConnector *spapr_dr_connector_by_index(uint32_t index)
+sPAPRDRConnector *spapr_drc_by_index(uint32_t index)
{
Object *obj;
char name[256];
@@ -767,37 +760,13 @@ sPAPRDRConnector *spapr_dr_connector_by_index(uint32_t index)
return !obj ? NULL : SPAPR_DR_CONNECTOR(obj);
}
-sPAPRDRConnector *spapr_dr_connector_by_id(sPAPRDRConnectorType type,
- uint32_t id)
+sPAPRDRConnector *spapr_drc_by_id(const char *type, uint32_t id)
{
- return spapr_dr_connector_by_index(
- (get_type_shift(type) << DRC_INDEX_TYPE_SHIFT) |
- (id & DRC_INDEX_ID_MASK));
-}
-
-/* generate a string the describes the DRC to encode into the
- * device tree.
- *
- * as documented by PAPR+ v2.7, 13.5.2.6 and C.6.1
- */
-static const char *spapr_drc_get_type_str(sPAPRDRConnectorType type)
-{
- switch (type) {
- case SPAPR_DR_CONNECTOR_TYPE_CPU:
- return "CPU";
- case SPAPR_DR_CONNECTOR_TYPE_PHB:
- return "PHB";
- case SPAPR_DR_CONNECTOR_TYPE_VIO:
- return "SLOT";
- case SPAPR_DR_CONNECTOR_TYPE_PCI:
- return "28";
- case SPAPR_DR_CONNECTOR_TYPE_LMB:
- return "MEM";
- default:
- g_assert(false);
- }
+ sPAPRDRConnectorClass *drck
+ = SPAPR_DR_CONNECTOR_CLASS(object_class_by_name(type));
- return NULL;
+ return spapr_drc_by_index(drck->typeshift << DRC_INDEX_TYPE_SHIFT
+ | (id & DRC_INDEX_ID_MASK));
}
/**
@@ -862,14 +831,14 @@ int spapr_drc_populate_dt(void *fdt, int fdt_offset, Object *owner,
continue;
}
- if ((drc->type & drc_type_mask) == 0) {
+ if ((spapr_drc_type(drc) & drc_type_mask) == 0) {
continue;
}
drc_count++;
/* ibm,drc-indexes */
- drc_index = cpu_to_be32(drck->get_index(drc));
+ drc_index = cpu_to_be32(spapr_drc_index(drc));
g_array_append_val(drc_indexes, drc_index);
/* ibm,drc-power-domains */
@@ -881,8 +850,7 @@ int spapr_drc_populate_dt(void *fdt, int fdt_offset, Object *owner,
drc_names = g_string_insert_len(drc_names, -1, "\0", 1);
/* ibm,drc-types */
- drc_types = g_string_append(drc_types,
- spapr_drc_get_type_str(drc->type));
+ drc_types = g_string_append(drc_types, drck->typename);
drc_types = g_string_insert_len(drc_types, -1, "\0", 1);
}
@@ -932,3 +900,276 @@ out:
return ret;
}
+
+/*
+ * RTAS calls
+ */
+
+static bool sensor_type_is_dr(uint32_t sensor_type)
+{
+ switch (sensor_type) {
+ case RTAS_SENSOR_TYPE_ISOLATION_STATE:
+ case RTAS_SENSOR_TYPE_DR:
+ case RTAS_SENSOR_TYPE_ALLOCATION_STATE:
+ return true;
+ }
+
+ return false;
+}
+
+static void rtas_set_indicator(PowerPCCPU *cpu, sPAPRMachineState *spapr,
+ uint32_t token, uint32_t nargs,
+ target_ulong args, uint32_t nret,
+ target_ulong rets)
+{
+ uint32_t sensor_type;
+ uint32_t sensor_index;
+ uint32_t sensor_state;
+ uint32_t ret = RTAS_OUT_SUCCESS;
+ sPAPRDRConnector *drc;
+ sPAPRDRConnectorClass *drck;
+
+ if (nargs != 3 || nret != 1) {
+ ret = RTAS_OUT_PARAM_ERROR;
+ goto out;
+ }
+
+ sensor_type = rtas_ld(args, 0);
+ sensor_index = rtas_ld(args, 1);
+ sensor_state = rtas_ld(args, 2);
+
+ if (!sensor_type_is_dr(sensor_type)) {
+ goto out_unimplemented;
+ }
+
+ /* if this is a DR sensor we can assume sensor_index == drc_index */
+ drc = spapr_drc_by_index(sensor_index);
+ if (!drc) {
+ trace_spapr_rtas_set_indicator_invalid(sensor_index);
+ ret = RTAS_OUT_PARAM_ERROR;
+ goto out;
+ }
+ drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
+
+ switch (sensor_type) {
+ case RTAS_SENSOR_TYPE_ISOLATION_STATE:
+ ret = drck->set_isolation_state(drc, sensor_state);
+ break;
+ case RTAS_SENSOR_TYPE_DR:
+ ret = drck->set_indicator_state(drc, sensor_state);
+ break;
+ case RTAS_SENSOR_TYPE_ALLOCATION_STATE:
+ ret = drck->set_allocation_state(drc, sensor_state);
+ break;
+ default:
+ goto out_unimplemented;
+ }
+
+out:
+ rtas_st(rets, 0, ret);
+ return;
+
+out_unimplemented:
+ /* currently only DR-related sensors are implemented */
+ trace_spapr_rtas_set_indicator_not_supported(sensor_index, sensor_type);
+ rtas_st(rets, 0, RTAS_OUT_NOT_SUPPORTED);
+}
+
+static void rtas_get_sensor_state(PowerPCCPU *cpu, sPAPRMachineState *spapr,
+ uint32_t token, uint32_t nargs,
+ target_ulong args, uint32_t nret,
+ target_ulong rets)
+{
+ uint32_t sensor_type;
+ uint32_t sensor_index;
+ uint32_t sensor_state = 0;
+ sPAPRDRConnector *drc;
+ sPAPRDRConnectorClass *drck;
+ uint32_t ret = RTAS_OUT_SUCCESS;
+
+ if (nargs != 2 || nret != 2) {
+ ret = RTAS_OUT_PARAM_ERROR;
+ goto out;
+ }
+
+ sensor_type = rtas_ld(args, 0);
+ sensor_index = rtas_ld(args, 1);
+
+ if (sensor_type != RTAS_SENSOR_TYPE_ENTITY_SENSE) {
+ /* currently only DR-related sensors are implemented */
+ trace_spapr_rtas_get_sensor_state_not_supported(sensor_index,
+ sensor_type);
+ ret = RTAS_OUT_NOT_SUPPORTED;
+ goto out;
+ }
+
+ drc = spapr_drc_by_index(sensor_index);
+ if (!drc) {
+ trace_spapr_rtas_get_sensor_state_invalid(sensor_index);
+ ret = RTAS_OUT_PARAM_ERROR;
+ goto out;
+ }
+ drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
+ ret = drck->entity_sense(drc, &sensor_state);
+
+out:
+ rtas_st(rets, 0, ret);
+ rtas_st(rets, 1, sensor_state);
+}
+
+/* configure-connector work area offsets, int32_t units for field
+ * indexes, bytes for field offset/len values.
+ *
+ * as documented by PAPR+ v2.7, 13.5.3.5
+ */
+#define CC_IDX_NODE_NAME_OFFSET 2
+#define CC_IDX_PROP_NAME_OFFSET 2
+#define CC_IDX_PROP_LEN 3
+#define CC_IDX_PROP_DATA_OFFSET 4
+#define CC_VAL_DATA_OFFSET ((CC_IDX_PROP_DATA_OFFSET + 1) * 4)
+#define CC_WA_LEN 4096
+
+static void configure_connector_st(target_ulong addr, target_ulong offset,
+ const void *buf, size_t len)
+{
+ cpu_physical_memory_write(ppc64_phys_to_real(addr + offset),
+ buf, MIN(len, CC_WA_LEN - offset));
+}
+
+static void rtas_ibm_configure_connector(PowerPCCPU *cpu,
+ sPAPRMachineState *spapr,
+ uint32_t token, uint32_t nargs,
+ target_ulong args, uint32_t nret,
+ target_ulong rets)
+{
+ uint64_t wa_addr;
+ uint64_t wa_offset;
+ uint32_t drc_index;
+ sPAPRDRConnector *drc;
+ sPAPRConfigureConnectorState *ccs;
+ sPAPRDRCCResponse resp = SPAPR_DR_CC_RESPONSE_CONTINUE;
+ int rc;
+
+ if (nargs != 2 || nret != 1) {
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
+ return;
+ }
+
+ wa_addr = ((uint64_t)rtas_ld(args, 1) << 32) | rtas_ld(args, 0);
+
+ drc_index = rtas_ld(wa_addr, 0);
+ drc = spapr_drc_by_index(drc_index);
+ if (!drc) {
+ trace_spapr_rtas_ibm_configure_connector_invalid(drc_index);
+ rc = RTAS_OUT_PARAM_ERROR;
+ goto out;
+ }
+
+ if (!drc->fdt) {
+ trace_spapr_rtas_ibm_configure_connector_missing_fdt(drc_index);
+ rc = SPAPR_DR_CC_RESPONSE_NOT_CONFIGURABLE;
+ goto out;
+ }
+
+ ccs = drc->ccs;
+ if (!ccs) {
+ ccs = g_new0(sPAPRConfigureConnectorState, 1);
+ ccs->fdt_offset = drc->fdt_start_offset;
+ drc->ccs = ccs;
+ }
+
+ do {
+ uint32_t tag;
+ const char *name;
+ const struct fdt_property *prop;
+ int fdt_offset_next, prop_len;
+
+ tag = fdt_next_tag(drc->fdt, ccs->fdt_offset, &fdt_offset_next);
+
+ switch (tag) {
+ case FDT_BEGIN_NODE:
+ ccs->fdt_depth++;
+ name = fdt_get_name(drc->fdt, ccs->fdt_offset, NULL);
+
+ /* provide the name of the next OF node */
+ wa_offset = CC_VAL_DATA_OFFSET;
+ rtas_st(wa_addr, CC_IDX_NODE_NAME_OFFSET, wa_offset);
+ configure_connector_st(wa_addr, wa_offset, name, strlen(name) + 1);
+ resp = SPAPR_DR_CC_RESPONSE_NEXT_CHILD;
+ break;
+ case FDT_END_NODE:
+ ccs->fdt_depth--;
+ if (ccs->fdt_depth == 0) {
+ sPAPRDRIsolationState state = drc->isolation_state;
+ uint32_t drc_index = spapr_drc_index(drc);
+ /* done sending the device tree, don't need to track
+ * the state anymore
+ */
+ trace_spapr_drc_set_configured(drc_index);
+ if (state == SPAPR_DR_ISOLATION_STATE_UNISOLATED) {
+ drc->configured = true;
+ } else {
+ /* guest should be not configuring an isolated device */
+ trace_spapr_drc_set_configured_skipping(drc_index);
+ }
+ g_free(ccs);
+ drc->ccs = NULL;
+ ccs = NULL;
+ resp = SPAPR_DR_CC_RESPONSE_SUCCESS;
+ } else {
+ resp = SPAPR_DR_CC_RESPONSE_PREV_PARENT;
+ }
+ break;
+ case FDT_PROP:
+ prop = fdt_get_property_by_offset(drc->fdt, ccs->fdt_offset,
+ &prop_len);
+ name = fdt_string(drc->fdt, fdt32_to_cpu(prop->nameoff));
+
+ /* provide the name of the next OF property */
+ wa_offset = CC_VAL_DATA_OFFSET;
+ rtas_st(wa_addr, CC_IDX_PROP_NAME_OFFSET, wa_offset);
+ configure_connector_st(wa_addr, wa_offset, name, strlen(name) + 1);
+
+ /* provide the length and value of the OF property. data gets
+ * placed immediately after NULL terminator of the OF property's
+ * name string
+ */
+ wa_offset += strlen(name) + 1,
+ rtas_st(wa_addr, CC_IDX_PROP_LEN, prop_len);
+ rtas_st(wa_addr, CC_IDX_PROP_DATA_OFFSET, wa_offset);
+ configure_connector_st(wa_addr, wa_offset, prop->data, prop_len);
+ resp = SPAPR_DR_CC_RESPONSE_NEXT_PROPERTY;
+ break;
+ case FDT_END:
+ resp = SPAPR_DR_CC_RESPONSE_ERROR;
+ default:
+ /* keep seeking for an actionable tag */
+ break;
+ }
+ if (ccs) {
+ ccs->fdt_offset = fdt_offset_next;
+ }
+ } while (resp == SPAPR_DR_CC_RESPONSE_CONTINUE);
+
+ rc = resp;
+out:
+ rtas_st(rets, 0, rc);
+}
+
+static void spapr_drc_register_types(void)
+{
+ type_register_static(&spapr_dr_connector_info);
+ type_register_static(&spapr_drc_physical_info);
+ type_register_static(&spapr_drc_logical_info);
+ type_register_static(&spapr_drc_cpu_info);
+ type_register_static(&spapr_drc_pci_info);
+ type_register_static(&spapr_drc_lmb_info);
+
+ spapr_rtas_register(RTAS_SET_INDICATOR, "set-indicator",
+ rtas_set_indicator);
+ spapr_rtas_register(RTAS_GET_SENSOR_STATE, "get-sensor-state",
+ rtas_get_sensor_state);
+ spapr_rtas_register(RTAS_IBM_CONFIGURE_CONNECTOR, "ibm,configure-connector",
+ rtas_ibm_configure_connector);
+}
+type_init(spapr_drc_register_types)
diff --git a/hw/ppc/spapr_events.c b/hw/ppc/spapr_events.c
index 57acd85a87..171aedc7e0 100644
--- a/hw/ppc/spapr_events.c
+++ b/hw/ppc/spapr_events.c
@@ -477,7 +477,7 @@ static void spapr_powerdown_req(Notifier *n, void *opaque)
static void spapr_hotplug_set_signalled(uint32_t drc_index)
{
- sPAPRDRConnector *drc = spapr_dr_connector_by_index(drc_index);
+ sPAPRDRConnector *drc = spapr_drc_by_index(drc_index);
sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
drck->set_signalled(drc);
}
@@ -570,22 +570,20 @@ static void spapr_hotplug_req_event(uint8_t hp_id, uint8_t hp_action,
void spapr_hotplug_req_add_by_index(sPAPRDRConnector *drc)
{
- sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
- sPAPRDRConnectorType drc_type = drck->get_type(drc);
+ sPAPRDRConnectorType drc_type = spapr_drc_type(drc);
union drc_identifier drc_id;
- drc_id.index = drck->get_index(drc);
+ drc_id.index = spapr_drc_index(drc);
spapr_hotplug_req_event(RTAS_LOG_V6_HP_ID_DRC_INDEX,
RTAS_LOG_V6_HP_ACTION_ADD, drc_type, &drc_id);
}
void spapr_hotplug_req_remove_by_index(sPAPRDRConnector *drc)
{
- sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
- sPAPRDRConnectorType drc_type = drck->get_type(drc);
+ sPAPRDRConnectorType drc_type = spapr_drc_type(drc);
union drc_identifier drc_id;
- drc_id.index = drck->get_index(drc);
+ drc_id.index = spapr_drc_index(drc);
spapr_hotplug_req_event(RTAS_LOG_V6_HP_ID_DRC_INDEX,
RTAS_LOG_V6_HP_ACTION_REMOVE, drc_type, &drc_id);
}
diff --git a/hw/ppc/spapr_hcall.c b/hw/ppc/spapr_hcall.c
index aae5a62a61..aa1ffea9e5 100644
--- a/hw/ppc/spapr_hcall.c
+++ b/hw/ppc/spapr_hcall.c
@@ -992,9 +992,10 @@ static target_ulong h_register_process_table(PowerPCCPU *cpu,
/* Update the UPRT and GTSE bits in the LPCR for all cpus */
CPU_FOREACH(cs) {
- set_spr(cs, SPR_LPCR, LPCR_UPRT | LPCR_GTSE,
+ set_spr(cs, SPR_LPCR,
((flags & (FLAG_RADIX | FLAG_HASH_PROC_TBL)) ? LPCR_UPRT : 0) |
- ((flags & FLAG_GTSE) ? LPCR_GTSE : 0));
+ ((flags & FLAG_GTSE) ? LPCR_GTSE : 0),
+ LPCR_UPRT | LPCR_GTSE);
}
if (kvm_enabled()) {
diff --git a/hw/ppc/spapr_pci.c b/hw/ppc/spapr_pci.c
index e4daf8d5f1..0c181bbca5 100644
--- a/hw/ppc/spapr_pci.c
+++ b/hw/ppc/spapr_pci.c
@@ -1400,10 +1400,8 @@ static sPAPRDRConnector *spapr_phb_get_pci_func_drc(sPAPRPHBState *phb,
uint32_t busnr,
int32_t devfn)
{
- return spapr_dr_connector_by_id(SPAPR_DR_CONNECTOR_TYPE_PCI,
- (phb->index << 16) |
- (busnr << 8) |
- devfn);
+ return spapr_drc_by_id(TYPE_SPAPR_DRC_PCI,
+ (phb->index << 16) | (busnr << 8) | devfn);
}
static sPAPRDRConnector *spapr_phb_get_pci_drc(sPAPRPHBState *phb,
@@ -1417,14 +1415,12 @@ static uint32_t spapr_phb_get_pci_drc_index(sPAPRPHBState *phb,
PCIDevice *pdev)
{
sPAPRDRConnector *drc = spapr_phb_get_pci_drc(phb, pdev);
- sPAPRDRConnectorClass *drck;
if (!drc) {
return 0;
}
- drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
- return drck->get_index(drc);
+ return spapr_drc_index(drc);
}
static void spapr_phb_hot_plug_child(HotplugHandler *plug_handler,
@@ -1763,8 +1759,7 @@ static void spapr_phb_realize(DeviceState *dev, Error **errp)
/* allocate connectors for child PCI devices */
if (sphb->dr_enabled) {
for (i = 0; i < PCI_SLOT_MAX * 8; i++) {
- spapr_dr_connector_new(OBJECT(phb),
- SPAPR_DR_CONNECTOR_TYPE_PCI,
+ spapr_dr_connector_new(OBJECT(phb), TYPE_SPAPR_DRC_PCI,
(sphb->index << 16) | i);
}
}
diff --git a/hw/ppc/spapr_rtas.c b/hw/ppc/spapr_rtas.c
index b666a4c15c..707c4d4936 100644
--- a/hw/ppc/spapr_rtas.c
+++ b/hw/ppc/spapr_rtas.c
@@ -47,44 +47,6 @@
#include "trace.h"
#include "hw/ppc/fdt.h"
-static sPAPRConfigureConnectorState *spapr_ccs_find(sPAPRMachineState *spapr,
- uint32_t drc_index)
-{
- sPAPRConfigureConnectorState *ccs = NULL;
-
- QTAILQ_FOREACH(ccs, &spapr->ccs_list, next) {
- if (ccs->drc_index == drc_index) {
- break;
- }
- }
-
- return ccs;
-}
-
-static void spapr_ccs_add(sPAPRMachineState *spapr,
- sPAPRConfigureConnectorState *ccs)
-{
- g_assert(!spapr_ccs_find(spapr, ccs->drc_index));
- QTAILQ_INSERT_HEAD(&spapr->ccs_list, ccs, next);
-}
-
-static void spapr_ccs_remove(sPAPRMachineState *spapr,
- sPAPRConfigureConnectorState *ccs)
-{
- QTAILQ_REMOVE(&spapr->ccs_list, ccs, next);
- g_free(ccs);
-}
-
-void spapr_ccs_reset_hook(void *opaque)
-{
- sPAPRMachineState *spapr = opaque;
- sPAPRConfigureConnectorState *ccs, *ccs_tmp;
-
- QTAILQ_FOREACH_SAFE(ccs, &spapr->ccs_list, next, ccs_tmp) {
- spapr_ccs_remove(spapr, ccs);
- }
-}
-
static void rtas_display_character(PowerPCCPU *cpu, sPAPRMachineState *spapr,
uint32_t token, uint32_t nargs,
target_ulong args,
@@ -389,266 +351,6 @@ static void rtas_get_power_level(PowerPCCPU *cpu, sPAPRMachineState *spapr,
rtas_st(rets, 1, 100);
}
-static bool sensor_type_is_dr(uint32_t sensor_type)
-{
- switch (sensor_type) {
- case RTAS_SENSOR_TYPE_ISOLATION_STATE:
- case RTAS_SENSOR_TYPE_DR:
- case RTAS_SENSOR_TYPE_ALLOCATION_STATE:
- return true;
- }
-
- return false;
-}
-
-static void rtas_set_indicator(PowerPCCPU *cpu, sPAPRMachineState *spapr,
- uint32_t token, uint32_t nargs,
- target_ulong args, uint32_t nret,
- target_ulong rets)
-{
- uint32_t sensor_type;
- uint32_t sensor_index;
- uint32_t sensor_state;
- uint32_t ret = RTAS_OUT_SUCCESS;
- sPAPRDRConnector *drc;
- sPAPRDRConnectorClass *drck;
-
- if (nargs != 3 || nret != 1) {
- ret = RTAS_OUT_PARAM_ERROR;
- goto out;
- }
-
- sensor_type = rtas_ld(args, 0);
- sensor_index = rtas_ld(args, 1);
- sensor_state = rtas_ld(args, 2);
-
- if (!sensor_type_is_dr(sensor_type)) {
- goto out_unimplemented;
- }
-
- /* if this is a DR sensor we can assume sensor_index == drc_index */
- drc = spapr_dr_connector_by_index(sensor_index);
- if (!drc) {
- trace_spapr_rtas_set_indicator_invalid(sensor_index);
- ret = RTAS_OUT_PARAM_ERROR;
- goto out;
- }
- drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
-
- switch (sensor_type) {
- case RTAS_SENSOR_TYPE_ISOLATION_STATE:
- /* if the guest is configuring a device attached to this
- * DRC, we should reset the configuration state at this
- * point since it may no longer be reliable (guest released
- * device and needs to start over, or unplug occurred so
- * the FDT is no longer valid)
- */
- if (sensor_state == SPAPR_DR_ISOLATION_STATE_ISOLATED) {
- sPAPRConfigureConnectorState *ccs = spapr_ccs_find(spapr,
- sensor_index);
- if (ccs) {
- spapr_ccs_remove(spapr, ccs);
- }
- }
- ret = drck->set_isolation_state(drc, sensor_state);
- break;
- case RTAS_SENSOR_TYPE_DR:
- ret = drck->set_indicator_state(drc, sensor_state);
- break;
- case RTAS_SENSOR_TYPE_ALLOCATION_STATE:
- ret = drck->set_allocation_state(drc, sensor_state);
- break;
- default:
- goto out_unimplemented;
- }
-
-out:
- rtas_st(rets, 0, ret);
- return;
-
-out_unimplemented:
- /* currently only DR-related sensors are implemented */
- trace_spapr_rtas_set_indicator_not_supported(sensor_index, sensor_type);
- rtas_st(rets, 0, RTAS_OUT_NOT_SUPPORTED);
-}
-
-static void rtas_get_sensor_state(PowerPCCPU *cpu, sPAPRMachineState *spapr,
- uint32_t token, uint32_t nargs,
- target_ulong args, uint32_t nret,
- target_ulong rets)
-{
- uint32_t sensor_type;
- uint32_t sensor_index;
- uint32_t sensor_state = 0;
- sPAPRDRConnector *drc;
- sPAPRDRConnectorClass *drck;
- uint32_t ret = RTAS_OUT_SUCCESS;
-
- if (nargs != 2 || nret != 2) {
- ret = RTAS_OUT_PARAM_ERROR;
- goto out;
- }
-
- sensor_type = rtas_ld(args, 0);
- sensor_index = rtas_ld(args, 1);
-
- if (sensor_type != RTAS_SENSOR_TYPE_ENTITY_SENSE) {
- /* currently only DR-related sensors are implemented */
- trace_spapr_rtas_get_sensor_state_not_supported(sensor_index,
- sensor_type);
- ret = RTAS_OUT_NOT_SUPPORTED;
- goto out;
- }
-
- drc = spapr_dr_connector_by_index(sensor_index);
- if (!drc) {
- trace_spapr_rtas_get_sensor_state_invalid(sensor_index);
- ret = RTAS_OUT_PARAM_ERROR;
- goto out;
- }
- drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
- ret = drck->entity_sense(drc, &sensor_state);
-
-out:
- rtas_st(rets, 0, ret);
- rtas_st(rets, 1, sensor_state);
-}
-
-/* configure-connector work area offsets, int32_t units for field
- * indexes, bytes for field offset/len values.
- *
- * as documented by PAPR+ v2.7, 13.5.3.5
- */
-#define CC_IDX_NODE_NAME_OFFSET 2
-#define CC_IDX_PROP_NAME_OFFSET 2
-#define CC_IDX_PROP_LEN 3
-#define CC_IDX_PROP_DATA_OFFSET 4
-#define CC_VAL_DATA_OFFSET ((CC_IDX_PROP_DATA_OFFSET + 1) * 4)
-#define CC_WA_LEN 4096
-
-static void configure_connector_st(target_ulong addr, target_ulong offset,
- const void *buf, size_t len)
-{
- cpu_physical_memory_write(ppc64_phys_to_real(addr + offset),
- buf, MIN(len, CC_WA_LEN - offset));
-}
-
-static void rtas_ibm_configure_connector(PowerPCCPU *cpu,
- sPAPRMachineState *spapr,
- uint32_t token, uint32_t nargs,
- target_ulong args, uint32_t nret,
- target_ulong rets)
-{
- uint64_t wa_addr;
- uint64_t wa_offset;
- uint32_t drc_index;
- sPAPRDRConnector *drc;
- sPAPRDRConnectorClass *drck;
- sPAPRConfigureConnectorState *ccs;
- sPAPRDRCCResponse resp = SPAPR_DR_CC_RESPONSE_CONTINUE;
- int rc;
- const void *fdt;
-
- if (nargs != 2 || nret != 1) {
- rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
- return;
- }
-
- wa_addr = ((uint64_t)rtas_ld(args, 1) << 32) | rtas_ld(args, 0);
-
- drc_index = rtas_ld(wa_addr, 0);
- drc = spapr_dr_connector_by_index(drc_index);
- if (!drc) {
- trace_spapr_rtas_ibm_configure_connector_invalid(drc_index);
- rc = RTAS_OUT_PARAM_ERROR;
- goto out;
- }
-
- drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
- fdt = drck->get_fdt(drc, NULL);
- if (!fdt) {
- trace_spapr_rtas_ibm_configure_connector_missing_fdt(drc_index);
- rc = SPAPR_DR_CC_RESPONSE_NOT_CONFIGURABLE;
- goto out;
- }
-
- ccs = spapr_ccs_find(spapr, drc_index);
- if (!ccs) {
- ccs = g_new0(sPAPRConfigureConnectorState, 1);
- (void)drck->get_fdt(drc, &ccs->fdt_offset);
- ccs->drc_index = drc_index;
- spapr_ccs_add(spapr, ccs);
- }
-
- do {
- uint32_t tag;
- const char *name;
- const struct fdt_property *prop;
- int fdt_offset_next, prop_len;
-
- tag = fdt_next_tag(fdt, ccs->fdt_offset, &fdt_offset_next);
-
- switch (tag) {
- case FDT_BEGIN_NODE:
- ccs->fdt_depth++;
- name = fdt_get_name(fdt, ccs->fdt_offset, NULL);
-
- /* provide the name of the next OF node */
- wa_offset = CC_VAL_DATA_OFFSET;
- rtas_st(wa_addr, CC_IDX_NODE_NAME_OFFSET, wa_offset);
- configure_connector_st(wa_addr, wa_offset, name, strlen(name) + 1);
- resp = SPAPR_DR_CC_RESPONSE_NEXT_CHILD;
- break;
- case FDT_END_NODE:
- ccs->fdt_depth--;
- if (ccs->fdt_depth == 0) {
- /* done sending the device tree, don't need to track
- * the state anymore
- */
- drck->set_configured(drc);
- spapr_ccs_remove(spapr, ccs);
- ccs = NULL;
- resp = SPAPR_DR_CC_RESPONSE_SUCCESS;
- } else {
- resp = SPAPR_DR_CC_RESPONSE_PREV_PARENT;
- }
- break;
- case FDT_PROP:
- prop = fdt_get_property_by_offset(fdt, ccs->fdt_offset,
- &prop_len);
- name = fdt_string(fdt, fdt32_to_cpu(prop->nameoff));
-
- /* provide the name of the next OF property */
- wa_offset = CC_VAL_DATA_OFFSET;
- rtas_st(wa_addr, CC_IDX_PROP_NAME_OFFSET, wa_offset);
- configure_connector_st(wa_addr, wa_offset, name, strlen(name) + 1);
-
- /* provide the length and value of the OF property. data gets
- * placed immediately after NULL terminator of the OF property's
- * name string
- */
- wa_offset += strlen(name) + 1,
- rtas_st(wa_addr, CC_IDX_PROP_LEN, prop_len);
- rtas_st(wa_addr, CC_IDX_PROP_DATA_OFFSET, wa_offset);
- configure_connector_st(wa_addr, wa_offset, prop->data, prop_len);
- resp = SPAPR_DR_CC_RESPONSE_NEXT_PROPERTY;
- break;
- case FDT_END:
- resp = SPAPR_DR_CC_RESPONSE_ERROR;
- default:
- /* keep seeking for an actionable tag */
- break;
- }
- if (ccs) {
- ccs->fdt_offset = fdt_offset_next;
- }
- } while (resp == SPAPR_DR_CC_RESPONSE_CONTINUE);
-
- rc = resp;
-out:
- rtas_st(rets, 0, rc);
-}
-
static struct rtas_call {
const char *name;
spapr_rtas_fn fn;
@@ -790,12 +492,6 @@ static void core_rtas_register_types(void)
rtas_set_power_level);
spapr_rtas_register(RTAS_GET_POWER_LEVEL, "get-power-level",
rtas_get_power_level);
- spapr_rtas_register(RTAS_SET_INDICATOR, "set-indicator",
- rtas_set_indicator);
- spapr_rtas_register(RTAS_GET_SENSOR_STATE, "get-sensor-state",
- rtas_get_sensor_state);
- spapr_rtas_register(RTAS_IBM_CONFIGURE_CONNECTOR, "ibm,configure-connector",
- rtas_ibm_configure_connector);
}
type_init(core_rtas_register_types)
diff --git a/hw/s390x/s390-skeys.c b/hw/s390x/s390-skeys.c
index 619152cc37..35e7f6316f 100644
--- a/hw/s390x/s390-skeys.c
+++ b/hw/s390x/s390-skeys.c
@@ -362,6 +362,11 @@ static inline bool s390_skeys_get_migration_enabled(Object *obj, Error **errp)
return ss->migration_enabled;
}
+static SaveVMHandlers savevm_s390_storage_keys = {
+ .save_state = s390_storage_keys_save,
+ .load_state = s390_storage_keys_load,
+};
+
static inline void s390_skeys_set_migration_enabled(Object *obj, bool value,
Error **errp)
{
@@ -375,8 +380,8 @@ static inline void s390_skeys_set_migration_enabled(Object *obj, bool value,
ss->migration_enabled = value;
if (ss->migration_enabled) {
- register_savevm(NULL, TYPE_S390_SKEYS, 0, 1, s390_storage_keys_save,
- s390_storage_keys_load, ss);
+ register_savevm_live(NULL, TYPE_S390_SKEYS, 0, 1,
+ &savevm_s390_storage_keys, ss);
} else {
unregister_savevm(DEVICE(ss), TYPE_S390_SKEYS, ss);
}
diff --git a/hw/s390x/s390-virtio-ccw.c b/hw/s390x/s390-virtio-ccw.c
index c9021f2fa9..a806345276 100644
--- a/hw/s390x/s390-virtio-ccw.c
+++ b/hw/s390x/s390-virtio-ccw.c
@@ -104,6 +104,11 @@ void s390_memory_init(ram_addr_t mem_size)
s390_skeys_init();
}
+static SaveVMHandlers savevm_gtod = {
+ .save_state = gtod_save,
+ .load_state = gtod_load,
+};
+
static void ccw_init(MachineState *machine)
{
int ret;
@@ -151,8 +156,7 @@ static void ccw_init(MachineState *machine)
s390_create_virtio_net(BUS(css_bus), "virtio-net-ccw");
/* Register savevm handler for guest TOD clock */
- register_savevm(NULL, "todclock", 0, 1,
- gtod_save, gtod_load, kvm_state);
+ register_savevm_live(NULL, "todclock", 0, 1, &savevm_gtod, kvm_state);
}
static void s390_cpu_plug(HotplugHandler *hotplug_dev,
diff --git a/hw/xtensa/sim.c b/hw/xtensa/sim.c
index b27e28d802..5521e9184a 100644
--- a/hw/xtensa/sim.c
+++ b/hw/xtensa/sim.c
@@ -114,6 +114,9 @@ static void xtensa_sim_init(MachineState *machine)
xtensa_create_memory_regions(&sysram, "xtensa.sysram");
}
+ if (serial_hds[0]) {
+ xtensa_sim_open_console(serial_hds[0]);
+ }
if (kernel_filename) {
uint64_t elf_entry;
uint64_t elf_lowaddr;
@@ -136,6 +139,7 @@ static void xtensa_sim_machine_init(MachineClass *mc)
mc->is_default = true;
mc->init = xtensa_sim_init;
mc->max_cpus = 4;
+ mc->no_serial = 1;
}
DEFINE_MACHINE("sim", xtensa_sim_machine_init)
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
index bcde1e6a14..87ae10bcc9 100644
--- a/include/exec/exec-all.h
+++ b/include/exec/exec-all.h
@@ -368,6 +368,8 @@ struct TranslationBlock {
void tb_free(TranslationBlock *tb);
void tb_flush(CPUState *cpu);
void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
+TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
+ target_ulong cs_base, uint32_t flags);
#if defined(USE_DIRECT_JUMP)
diff --git a/include/exec/tb-hash.h b/include/exec/tb-hash.h
index 2c27490cb8..b1fe2d0161 100644
--- a/include/exec/tb-hash.h
+++ b/include/exec/tb-hash.h
@@ -22,6 +22,8 @@
#include "exec/tb-hash-xx.h"
+#ifdef CONFIG_SOFTMMU
+
/* Only the bottom TB_JMP_PAGE_BITS of the jump cache hash bits vary for
addresses on the same page. The top bits are the same. This allows
TLB invalidation to quickly clear a subset of the hash table. */
@@ -45,6 +47,16 @@ static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc)
| (tmp & TB_JMP_ADDR_MASK));
}
+#else
+
+/* In user-mode we can get better hashing because we do not have a TLB */
+static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc)
+{
+ return (pc ^ (pc >> TB_JMP_CACHE_BITS)) & (TB_JMP_CACHE_SIZE - 1);
+}
+
+#endif /* CONFIG_SOFTMMU */
+
static inline
uint32_t tb_hash_func(tb_page_addr_t phys_pc, target_ulong pc, uint32_t flags)
{
diff --git a/include/hw/i386/pc.h b/include/hw/i386/pc.h
index e447f5d8f4..d071c9c0e9 100644
--- a/include/hw/i386/pc.h
+++ b/include/hw/i386/pc.h
@@ -566,75 +566,75 @@ bool e820_get_entry(int, uint32_t, uint64_t *, uint64_t *);
.value = "off",\
},{\
.driver = "qemu64" "-" TYPE_X86_CPU,\
- .property = "level",\
+ .property = "min-level",\
.value = stringify(4),\
},{\
.driver = "kvm64" "-" TYPE_X86_CPU,\
- .property = "level",\
+ .property = "min-level",\
.value = stringify(5),\
},{\
.driver = "pentium3" "-" TYPE_X86_CPU,\
- .property = "level",\
+ .property = "min-level",\
.value = stringify(2),\
},{\
.driver = "n270" "-" TYPE_X86_CPU,\
- .property = "level",\
+ .property = "min-level",\
.value = stringify(5),\
},{\
.driver = "Conroe" "-" TYPE_X86_CPU,\
- .property = "level",\
+ .property = "min-level",\
.value = stringify(4),\
},{\
.driver = "Penryn" "-" TYPE_X86_CPU,\
- .property = "level",\
+ .property = "min-level",\
.value = stringify(4),\
},{\
.driver = "Nehalem" "-" TYPE_X86_CPU,\
- .property = "level",\
+ .property = "min-level",\
.value = stringify(4),\
},{\
.driver = "n270" "-" TYPE_X86_CPU,\
- .property = "xlevel",\
+ .property = "min-xlevel",\
.value = stringify(0x8000000a),\
},{\
.driver = "Penryn" "-" TYPE_X86_CPU,\
- .property = "xlevel",\
+ .property = "min-xlevel",\
.value = stringify(0x8000000a),\
},{\
.driver = "Conroe" "-" TYPE_X86_CPU,\
- .property = "xlevel",\
+ .property = "min-xlevel",\
.value = stringify(0x8000000a),\
},{\
.driver = "Nehalem" "-" TYPE_X86_CPU,\
- .property = "xlevel",\
+ .property = "min-xlevel",\
.value = stringify(0x8000000a),\
},{\
.driver = "Westmere" "-" TYPE_X86_CPU,\
- .property = "xlevel",\
+ .property = "min-xlevel",\
.value = stringify(0x8000000a),\
},{\
.driver = "SandyBridge" "-" TYPE_X86_CPU,\
- .property = "xlevel",\
+ .property = "min-xlevel",\
.value = stringify(0x8000000a),\
},{\
.driver = "IvyBridge" "-" TYPE_X86_CPU,\
- .property = "xlevel",\
+ .property = "min-xlevel",\
.value = stringify(0x8000000a),\
},{\
.driver = "Haswell" "-" TYPE_X86_CPU,\
- .property = "xlevel",\
+ .property = "min-xlevel",\
.value = stringify(0x8000000a),\
},{\
.driver = "Haswell-noTSX" "-" TYPE_X86_CPU,\
- .property = "xlevel",\
+ .property = "min-xlevel",\
.value = stringify(0x8000000a),\
},{\
.driver = "Broadwell" "-" TYPE_X86_CPU,\
- .property = "xlevel",\
+ .property = "min-xlevel",\
.value = stringify(0x8000000a),\
},{\
.driver = "Broadwell-noTSX" "-" TYPE_X86_CPU,\
- .property = "xlevel",\
+ .property = "min-xlevel",\
.value = stringify(0x8000000a),\
},{\
.driver = TYPE_X86_CPU,\
@@ -860,7 +860,7 @@ bool e820_get_entry(int, uint32_t, uint64_t *, uint64_t *);
.value = stringify(2),\
},{\
.driver = "Conroe-" TYPE_X86_CPU,\
- .property = "level",\
+ .property = "min-level",\
.value = stringify(2),\
},{\
.driver = "Penryn-" TYPE_X86_CPU,\
@@ -868,7 +868,7 @@ bool e820_get_entry(int, uint32_t, uint64_t *, uint64_t *);
.value = stringify(2),\
},{\
.driver = "Penryn-" TYPE_X86_CPU,\
- .property = "level",\
+ .property = "min-level",\
.value = stringify(2),\
},{\
.driver = "Nehalem-" TYPE_X86_CPU,\
@@ -876,7 +876,7 @@ bool e820_get_entry(int, uint32_t, uint64_t *, uint64_t *);
.value = stringify(2),\
},{\
.driver = "Nehalem-" TYPE_X86_CPU,\
- .property = "level",\
+ .property = "min-level",\
.value = stringify(2),\
},{\
.driver = "virtio-net-pci",\
diff --git a/include/hw/ppc/spapr.h b/include/hw/ppc/spapr.h
index 98fb78b012..f973b02845 100644
--- a/include/hw/ppc/spapr.h
+++ b/include/hw/ppc/spapr.h
@@ -11,7 +11,6 @@
struct VIOsPAPRBus;
struct sPAPRPHBState;
struct sPAPRNVRAM;
-typedef struct sPAPRConfigureConnectorState sPAPRConfigureConnectorState;
typedef struct sPAPREventLogEntry sPAPREventLogEntry;
typedef struct sPAPREventSource sPAPREventSource;
@@ -102,9 +101,6 @@ struct sPAPRMachineState {
bool htab_first_pass;
int htab_fd;
- /* RTAS state */
- QTAILQ_HEAD(, sPAPRConfigureConnectorState) ccs_list;
-
/* Pending DIMM unplug cache. It is populated when a LMB
* unplug starts. It can be regenerated if a migration
* occurs during the unplug process. */
@@ -646,16 +642,6 @@ void *spapr_populate_hotplug_cpu_dt(CPUState *cs, int *fdt_offset,
void spapr_core_release(DeviceState *dev);
void spapr_lmb_release(DeviceState *dev);
-/* rtas-configure-connector state */
-struct sPAPRConfigureConnectorState {
- uint32_t drc_index;
- int fdt_offset;
- int fdt_depth;
- QTAILQ_ENTRY(sPAPRConfigureConnectorState) next;
-};
-
-void spapr_ccs_reset_hook(void *opaque);
-
void spapr_rtc_read(sPAPRRTCState *rtc, struct tm *tm, uint32_t *ns);
int spapr_rtc_import_offset(sPAPRRTCState *rtc, int64_t legacy_offset);
diff --git a/include/hw/ppc/spapr_drc.h b/include/hw/ppc/spapr_drc.h
index 813b9ffd60..c88e1beed4 100644
--- a/include/hw/ppc/spapr_drc.h
+++ b/include/hw/ppc/spapr_drc.h
@@ -26,6 +26,48 @@
#define SPAPR_DR_CONNECTOR(obj) OBJECT_CHECK(sPAPRDRConnector, (obj), \
TYPE_SPAPR_DR_CONNECTOR)
+#define TYPE_SPAPR_DRC_PHYSICAL "spapr-drc-physical"
+#define SPAPR_DRC_PHYSICAL_GET_CLASS(obj) \
+ OBJECT_GET_CLASS(sPAPRDRConnectorClass, obj, TYPE_SPAPR_DRC_PHYSICAL)
+#define SPAPR_DRC_PHYSICAL_CLASS(klass) \
+ OBJECT_CLASS_CHECK(sPAPRDRConnectorClass, klass, \
+ TYPE_SPAPR_DRC_PHYSICAL)
+#define SPAPR_DRC_PHYSICAL(obj) OBJECT_CHECK(sPAPRDRConnector, (obj), \
+ TYPE_SPAPR_DRC_PHYSICAL)
+
+#define TYPE_SPAPR_DRC_LOGICAL "spapr-drc-logical"
+#define SPAPR_DRC_LOGICAL_GET_CLASS(obj) \
+ OBJECT_GET_CLASS(sPAPRDRConnectorClass, obj, TYPE_SPAPR_DRC_LOGICAL)
+#define SPAPR_DRC_LOGICAL_CLASS(klass) \
+ OBJECT_CLASS_CHECK(sPAPRDRConnectorClass, klass, \
+ TYPE_SPAPR_DRC_LOGICAL)
+#define SPAPR_DRC_LOGICAL(obj) OBJECT_CHECK(sPAPRDRConnector, (obj), \
+ TYPE_SPAPR_DRC_LOGICAL)
+
+#define TYPE_SPAPR_DRC_CPU "spapr-drc-cpu"
+#define SPAPR_DRC_CPU_GET_CLASS(obj) \
+ OBJECT_GET_CLASS(sPAPRDRConnectorClass, obj, TYPE_SPAPR_DRC_CPU)
+#define SPAPR_DRC_CPU_CLASS(klass) \
+ OBJECT_CLASS_CHECK(sPAPRDRConnectorClass, klass, TYPE_SPAPR_DRC_CPU)
+#define SPAPR_DRC_CPU(obj) OBJECT_CHECK(sPAPRDRConnector, (obj), \
+ TYPE_SPAPR_DRC_CPU)
+
+#define TYPE_SPAPR_DRC_PCI "spapr-drc-pci"
+#define SPAPR_DRC_PCI_GET_CLASS(obj) \
+ OBJECT_GET_CLASS(sPAPRDRConnectorClass, obj, TYPE_SPAPR_DRC_PCI)
+#define SPAPR_DRC_PCI_CLASS(klass) \
+ OBJECT_CLASS_CHECK(sPAPRDRConnectorClass, klass, TYPE_SPAPR_DRC_PCI)
+#define SPAPR_DRC_PCI(obj) OBJECT_CHECK(sPAPRDRConnector, (obj), \
+ TYPE_SPAPR_DRC_PCI)
+
+#define TYPE_SPAPR_DRC_LMB "spapr-drc-lmb"
+#define SPAPR_DRC_LMB_GET_CLASS(obj) \
+ OBJECT_GET_CLASS(sPAPRDRConnectorClass, obj, TYPE_SPAPR_DRC_LMB)
+#define SPAPR_DRC_LMB_CLASS(klass) \
+ OBJECT_CLASS_CHECK(sPAPRDRConnectorClass, klass, TYPE_SPAPR_DRC_LMB)
+#define SPAPR_DRC_LMB(obj) OBJECT_CHECK(sPAPRDRConnector, (obj), \
+ TYPE_SPAPR_DRC_LMB)
+
/*
* Various hotplug types managed by sPAPRDRConnector
*
@@ -130,11 +172,16 @@ typedef enum {
SPAPR_DR_CC_RESPONSE_NOT_CONFIGURABLE = -9003,
} sPAPRDRCCResponse;
+/* rtas-configure-connector state */
+typedef struct sPAPRConfigureConnectorState {
+ int fdt_offset;
+ int fdt_depth;
+} sPAPRConfigureConnectorState;
+
typedef struct sPAPRDRConnector {
/*< private >*/
DeviceState parent;
- sPAPRDRConnectorType type;
uint32_t id;
Object *owner;
const char *name;
@@ -148,6 +195,7 @@ typedef struct sPAPRDRConnector {
void *fdt;
int fdt_start_offset;
bool configured;
+ sPAPRConfigureConnectorState *ccs;
bool awaiting_release;
bool signalled;
@@ -163,6 +211,8 @@ typedef struct sPAPRDRConnectorClass {
DeviceClass parent;
/*< public >*/
+ sPAPRDRConnectorTypeShift typeshift;
+ const char *typename; /* used in device tree, PAPR 13.5.2.6 & C.6.1 */
/* accessors for guest-visible (generally via RTAS) DR state */
uint32_t (*set_isolation_state)(sPAPRDRConnector *drc,
@@ -171,16 +221,10 @@ typedef struct sPAPRDRConnectorClass {
sPAPRDRIndicatorState state);
uint32_t (*set_allocation_state)(sPAPRDRConnector *drc,
sPAPRDRAllocationState state);
- uint32_t (*get_index)(sPAPRDRConnector *drc);
- uint32_t (*get_type)(sPAPRDRConnector *drc);
const char *(*get_name)(sPAPRDRConnector *drc);
uint32_t (*entity_sense)(sPAPRDRConnector *drc, sPAPRDREntitySense *state);
- /* QEMU interfaces for managing FDT/configure-connector */
- const void *(*get_fdt)(sPAPRDRConnector *drc, int *fdt_start_offset);
- void (*set_configured)(sPAPRDRConnector *drc);
-
/* QEMU interfaces for managing hotplug operations */
void (*attach)(sPAPRDRConnector *drc, DeviceState *d, void *fdt,
int fdt_start_offset, bool coldplug, Error **errp);
@@ -189,12 +233,13 @@ typedef struct sPAPRDRConnectorClass {
void (*set_signalled)(sPAPRDRConnector *drc);
} sPAPRDRConnectorClass;
-sPAPRDRConnector *spapr_dr_connector_new(Object *owner,
- sPAPRDRConnectorType type,
+uint32_t spapr_drc_index(sPAPRDRConnector *drc);
+sPAPRDRConnectorType spapr_drc_type(sPAPRDRConnector *drc);
+
+sPAPRDRConnector *spapr_dr_connector_new(Object *owner, const char *type,
uint32_t id);
-sPAPRDRConnector *spapr_dr_connector_by_index(uint32_t index);
-sPAPRDRConnector *spapr_dr_connector_by_id(sPAPRDRConnectorType type,
- uint32_t id);
+sPAPRDRConnector *spapr_drc_by_index(uint32_t index);
+sPAPRDRConnector *spapr_drc_by_id(const char *type, uint32_t id);
int spapr_drc_populate_dt(void *fdt, int fdt_offset, Object *owner,
uint32_t drc_type_mask);
diff --git a/include/migration/vmstate.h b/include/migration/vmstate.h
index 66895623da..8a3e9e6088 100644
--- a/include/migration/vmstate.h
+++ b/include/migration/vmstate.h
@@ -59,14 +59,6 @@ typedef struct SaveVMHandlers {
LoadStateHandler *load_state;
} SaveVMHandlers;
-int register_savevm(DeviceState *dev,
- const char *idstr,
- int instance_id,
- int version_id,
- SaveStateHandler *save_state,
- LoadStateHandler *load_state,
- void *opaque);
-
int register_savevm_live(DeviceState *dev,
const char *idstr,
int instance_id,
diff --git a/include/qemu/atomic.h b/include/qemu/atomic.h
index 878fa0700d..e07c7972ab 100644
--- a/include/qemu/atomic.h
+++ b/include/qemu/atomic.h
@@ -88,6 +88,24 @@
#define smp_read_barrier_depends() barrier()
#endif
+/* Sanity check that the size of an atomic operation isn't "overly large".
+ * Despite the fact that e.g. i686 has 64-bit atomic operations, we do not
+ * want to use them because we ought not need them, and this lets us do a
+ * bit of sanity checking that other 32-bit hosts might build.
+ *
+ * That said, we have a problem on 64-bit ILP32 hosts in that in order to
+ * sync with TCG_OVERSIZED_GUEST, this must match TCG_TARGET_REG_BITS.
+ * We'd prefer not want to pull in everything else TCG related, so handle
+ * those few cases by hand.
+ *
+ * Note that x32 is fully detected with __x64_64__ + _ILP32, and that for
+ * Sparc we always force the use of sparcv9 in configure.
+ */
+#if defined(__x86_64__) || defined(__sparc__)
+# define ATOMIC_REG_SIZE 8
+#else
+# define ATOMIC_REG_SIZE sizeof(void *)
+#endif
/* Weak atomic operations prevent the compiler moving other
* loads/stores past the atomic operation load/store. However there is
@@ -104,7 +122,7 @@
#define atomic_read(ptr) \
({ \
- QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \
+ QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
atomic_read__nocheck(ptr); \
})
@@ -112,7 +130,7 @@
__atomic_store_n(ptr, i, __ATOMIC_RELAXED)
#define atomic_set(ptr, i) do { \
- QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \
+ QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
atomic_set__nocheck(ptr, i); \
} while(0)
@@ -130,27 +148,27 @@
#define atomic_rcu_read(ptr) \
({ \
- QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \
+ QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
typeof_strip_qual(*ptr) _val; \
atomic_rcu_read__nocheck(ptr, &_val); \
_val; \
})
#define atomic_rcu_set(ptr, i) do { \
- QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \
+ QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
__atomic_store_n(ptr, i, __ATOMIC_RELEASE); \
} while(0)
#define atomic_load_acquire(ptr) \
({ \
- QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \
+ QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
typeof_strip_qual(*ptr) _val; \
__atomic_load(ptr, &_val, __ATOMIC_ACQUIRE); \
_val; \
})
#define atomic_store_release(ptr, i) do { \
- QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \
+ QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
__atomic_store_n(ptr, i, __ATOMIC_RELEASE); \
} while(0)
@@ -162,7 +180,7 @@
})
#define atomic_xchg(ptr, i) ({ \
- QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \
+ QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
atomic_xchg__nocheck(ptr, i); \
})
@@ -175,7 +193,7 @@
})
#define atomic_cmpxchg(ptr, old, new) ({ \
- QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \
+ QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
atomic_cmpxchg__nocheck(ptr, old, new); \
})
diff --git a/include/qom/cpu.h b/include/qom/cpu.h
index 55214ce131..89ddb686fb 100644
--- a/include/qom/cpu.h
+++ b/include/qom/cpu.h
@@ -265,7 +265,6 @@ struct qemu_work_item;
* @cpu_index: CPU index (informative).
* @nr_cores: Number of cores within this CPU package.
* @nr_threads: Number of threads within this CPU.
- * @numa_node: NUMA node this CPU is belonging to.
* @host_tid: Host thread ID.
* @running: #true if CPU is currently running (lockless).
* @has_waiter: #true if a CPU is currently waiting for the cpu_exec_end;
@@ -314,7 +313,6 @@ struct CPUState {
int nr_cores;
int nr_threads;
- int numa_node;
struct QemuThread *thread;
#ifdef _WIN32
diff --git a/include/sysemu/cpus.h b/include/sysemu/cpus.h
index a8053f1715..731756d948 100644
--- a/include/sysemu/cpus.h
+++ b/include/sysemu/cpus.h
@@ -27,6 +27,7 @@ void qemu_timer_notify_cb(void *opaque, QEMUClockType type);
void cpu_synchronize_all_states(void);
void cpu_synchronize_all_post_reset(void);
void cpu_synchronize_all_post_init(void);
+void cpu_synchronize_all_pre_loadvm(void);
void qtest_clock_warp(int64_t dest);
diff --git a/include/sysemu/hax.h b/include/sysemu/hax.h
index d9f023918e..232a68ab1b 100644
--- a/include/sysemu/hax.h
+++ b/include/sysemu/hax.h
@@ -33,6 +33,7 @@ int hax_populate_ram(uint64_t va, uint32_t size);
void hax_cpu_synchronize_state(CPUState *cpu);
void hax_cpu_synchronize_post_reset(CPUState *cpu);
void hax_cpu_synchronize_post_init(CPUState *cpu);
+void hax_cpu_synchronize_pre_loadvm(CPUState *cpu);
#ifdef CONFIG_HAX
diff --git a/include/sysemu/hw_accel.h b/include/sysemu/hw_accel.h
index c9b3105bc7..469ffda460 100644
--- a/include/sysemu/hw_accel.h
+++ b/include/sysemu/hw_accel.h
@@ -45,4 +45,14 @@ static inline void cpu_synchronize_post_init(CPUState *cpu)
}
}
+static inline void cpu_synchronize_pre_loadvm(CPUState *cpu)
+{
+ if (kvm_enabled()) {
+ kvm_cpu_synchronize_pre_loadvm(cpu);
+ }
+ if (hax_enabled()) {
+ hax_cpu_synchronize_pre_loadvm(cpu);
+ }
+}
+
#endif /* QEMU_HW_ACCEL_H */
diff --git a/include/sysemu/kvm.h b/include/sysemu/kvm.h
index 5cc83f2003..a45c145560 100644
--- a/include/sysemu/kvm.h
+++ b/include/sysemu/kvm.h
@@ -459,6 +459,7 @@ int kvm_physical_memory_addr_from_host(KVMState *s, void *ram_addr,
void kvm_cpu_synchronize_state(CPUState *cpu);
void kvm_cpu_synchronize_post_reset(CPUState *cpu);
void kvm_cpu_synchronize_post_init(CPUState *cpu);
+void kvm_cpu_synchronize_pre_loadvm(CPUState *cpu);
void kvm_init_cpu_signals(CPUState *cpu);
diff --git a/include/sysemu/numa.h b/include/sysemu/numa.h
index 7ffde5b119..610eece211 100644
--- a/include/sysemu/numa.h
+++ b/include/sysemu/numa.h
@@ -35,4 +35,5 @@ void numa_legacy_auto_assign_ram(MachineClass *mc, NodeInfo *nodes,
int nb_nodes, ram_addr_t size);
void numa_default_auto_assign_ram(MachineClass *mc, NodeInfo *nodes,
int nb_nodes, ram_addr_t size);
+void numa_cpu_pre_plug(const CPUArchId *slot, DeviceState *dev, Error **errp);
#endif
diff --git a/kvm-all.c b/kvm-all.c
index 7df27c8522..494b9256aa 100644
--- a/kvm-all.c
+++ b/kvm-all.c
@@ -1896,6 +1896,16 @@ void kvm_cpu_synchronize_post_init(CPUState *cpu)
run_on_cpu(cpu, do_kvm_cpu_synchronize_post_init, RUN_ON_CPU_NULL);
}
+static void do_kvm_cpu_synchronize_pre_loadvm(CPUState *cpu, run_on_cpu_data arg)
+{
+ cpu->kvm_vcpu_dirty = true;
+}
+
+void kvm_cpu_synchronize_pre_loadvm(CPUState *cpu)
+{
+ run_on_cpu(cpu, do_kvm_cpu_synchronize_pre_loadvm, RUN_ON_CPU_NULL);
+}
+
#ifdef KVM_HAVE_MCE_INJECTION
static __thread void *pending_sigbus_addr;
static __thread int pending_sigbus_code;
diff --git a/migration/savevm.c b/migration/savevm.c
index 9c320f59d0..1993ca23fe 100644
--- a/migration/savevm.c
+++ b/migration/savevm.c
@@ -645,21 +645,6 @@ int register_savevm_live(DeviceState *dev,
return 0;
}
-int register_savevm(DeviceState *dev,
- const char *idstr,
- int instance_id,
- int version_id,
- SaveStateHandler *save_state,
- LoadStateHandler *load_state,
- void *opaque)
-{
- SaveVMHandlers *ops = g_new0(SaveVMHandlers, 1);
- ops->save_state = save_state;
- ops->load_state = load_state;
- return register_savevm_live(dev, idstr, instance_id, version_id,
- ops, opaque);
-}
-
void unregister_savevm(DeviceState *dev, const char *idstr, void *opaque)
{
SaveStateEntry *se, *new_se;
@@ -679,7 +664,6 @@ void unregister_savevm(DeviceState *dev, const char *idstr, void *opaque)
if (strcmp(se->idstr, id) == 0 && se->opaque == opaque) {
QTAILQ_REMOVE(&savevm_state.handlers, se, entry);
g_free(se->compat);
- g_free(se->ops);
g_free(se);
}
}
@@ -2015,6 +1999,8 @@ int qemu_loadvm_state(QEMUFile *f)
}
}
+ cpu_synchronize_all_pre_loadvm();
+
ret = qemu_loadvm_state_main(f, mis);
qemu_event_set(&mis->main_thread_load_event);
diff --git a/monitor.c b/monitor.c
index 75e7cd26d0..1e63ace2d4 100644
--- a/monitor.c
+++ b/monitor.c
@@ -1696,23 +1696,26 @@ static void hmp_info_mtree(Monitor *mon, const QDict *qdict)
static void hmp_info_numa(Monitor *mon, const QDict *qdict)
{
int i;
- CPUState *cpu;
uint64_t *node_mem;
+ CpuInfoList *cpu_list, *cpu;
+ cpu_list = qmp_query_cpus(&error_abort);
node_mem = g_new0(uint64_t, nb_numa_nodes);
query_numa_node_mem(node_mem);
monitor_printf(mon, "%d nodes\n", nb_numa_nodes);
for (i = 0; i < nb_numa_nodes; i++) {
monitor_printf(mon, "node %d cpus:", i);
- CPU_FOREACH(cpu) {
- if (cpu->numa_node == i) {
- monitor_printf(mon, " %d", cpu->cpu_index);
+ for (cpu = cpu_list; cpu; cpu = cpu->next) {
+ if (cpu->value->has_props && cpu->value->props->has_node_id &&
+ cpu->value->props->node_id == i) {
+ monitor_printf(mon, " %" PRIi64, cpu->value->CPU);
}
}
monitor_printf(mon, "\n");
monitor_printf(mon, "node %d size: %" PRId64 " MB\n", i,
node_mem[i] >> 20);
}
+ qapi_free_CpuInfoList(cpu_list);
g_free(node_mem);
}
diff --git a/numa.c b/numa.c
index be50c62aa9..65701cb6c8 100644
--- a/numa.c
+++ b/numa.c
@@ -426,7 +426,6 @@ void numa_default_auto_assign_ram(MachineClass *mc, NodeInfo *nodes,
void parse_numa_opts(MachineState *ms)
{
int i;
- const CPUArchIdList *possible_cpus;
MachineClass *mc = MACHINE_GET_CLASS(ms);
if (qemu_opts_foreach(qemu_find_opts("numa"), parse_numa, ms, NULL)) {
@@ -484,31 +483,6 @@ void parse_numa_opts(MachineState *ms)
numa_set_mem_ranges();
- /* assign CPUs to nodes using board provided default mapping */
- if (!mc->cpu_index_to_instance_props || !mc->possible_cpu_arch_ids) {
- error_report("default CPUs to NUMA node mapping isn't supported");
- exit(1);
- }
-
- possible_cpus = mc->possible_cpu_arch_ids(ms);
- for (i = 0; i < possible_cpus->len; i++) {
- if (possible_cpus->cpus[i].props.has_node_id) {
- break;
- }
- }
-
- /* no CPUs are assigned to NUMA nodes */
- if (i == possible_cpus->len) {
- for (i = 0; i < max_cpus; i++) {
- CpuInstanceProperties props;
- /* fetch default mapping from board and enable it */
- props = mc->cpu_index_to_instance_props(ms, i);
- props.has_node_id = true;
-
- machine_set_cpu_numa_node(ms, &props, &error_fatal);
- }
- }
-
/* QEMU needs at least all unique node pair distances to build
* the whole NUMA distance table. QEMU treats the distance table
* as symmetric by default, i.e. distance A->B == distance B->A.
@@ -533,6 +507,23 @@ void parse_numa_opts(MachineState *ms)
}
}
+void numa_cpu_pre_plug(const CPUArchId *slot, DeviceState *dev, Error **errp)
+{
+ int node_id = object_property_get_int(OBJECT(dev), "node-id", &error_abort);
+
+ if (node_id == CPU_UNSET_NUMA_NODE_ID) {
+ /* due to bug in libvirt, it doesn't pass node-id from props on
+ * device_add as expected, so we have to fix it up here */
+ if (slot->props.has_node_id) {
+ object_property_set_int(OBJECT(dev), slot->props.node_id,
+ "node-id", errp);
+ }
+ } else if (node_id != slot->props.node_id) {
+ error_setg(errp, "node-id=%d must match numa node specified "
+ "with -numa option", node_id);
+ }
+}
+
static void allocate_system_memory_nonnuma(MemoryRegion *mr, Object *owner,
const char *name,
uint64_t ram_size)
diff --git a/qapi/block-core.json b/qapi/block-core.json
index 88a7471a23..f85c2235c7 100644
--- a/qapi/block-core.json
+++ b/qapi/block-core.json
@@ -449,7 +449,8 @@
#
# @io-status: @BlockDeviceIoStatus. Only present if the device
# supports it and the VM is configured to stop on errors
-# (supported device models: virtio-blk, ide, scsi-disk)
+# (supported device models: virtio-blk, IDE, SCSI except
+# scsi-generic)
#
# @inserted: @BlockDeviceInfo describing the device if media is
# present
diff --git a/qom/object_interfaces.c b/qom/object_interfaces.c
index d4253a88de..ff27e0669e 100644
--- a/qom/object_interfaces.c
+++ b/qom/object_interfaces.c
@@ -4,6 +4,7 @@
#include "qemu/module.h"
#include "qapi-visit.h"
#include "qapi/opts-visitor.h"
+#include "qemu/config-file.h"
void user_creatable_complete(Object *obj, Error **errp)
{
@@ -181,6 +182,14 @@ void user_creatable_del(const char *id, Error **errp)
error_setg(errp, "object '%s' is in use, can not be deleted", id);
return;
}
+
+ /*
+ * if object was defined on the command-line, remove its corresponding
+ * option group entry
+ */
+ qemu_opts_del(qemu_opts_find(qemu_find_opts_err("object", &error_abort),
+ id));
+
object_unparent(obj);
}
diff --git a/scripts/device-crash-test b/scripts/device-crash-test
new file mode 100755
index 0000000000..5f90e9bb54
--- /dev/null
+++ b/scripts/device-crash-test
@@ -0,0 +1,624 @@
+#!/usr/bin/env python2.7
+#
+# Copyright (c) 2017 Red Hat Inc
+#
+# Author:
+# Eduardo Habkost <ehabkost@redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+Run QEMU with all combinations of -machine and -device types,
+check for crashes and unexpected errors.
+"""
+
+import sys
+import os
+import glob
+import logging
+import traceback
+import re
+import random
+import argparse
+from itertools import chain
+
+sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'scripts'))
+from qemu import QEMUMachine
+
+logger = logging.getLogger('device-crash-test')
+dbg = logger.debug
+
+
+# Purposes of the following whitelist:
+# * Avoiding verbose log messages when we find known non-fatal
+# (exitcode=1) errors
+# * Avoiding fatal errors when we find known crashes
+# * Skipping machines/devices that are known not to work out of
+# the box, when running in --quick mode
+#
+# Keeping the whitelist updated is desirable, but not required,
+# because unexpected cases where QEMU exits with exitcode=1 will
+# just trigger a INFO message.
+
+# Valid whitelist entry keys:
+# * accel: regexp, full match only
+# * machine: regexp, full match only
+# * device: regexp, full match only
+# * log: regexp, partial match allowed
+# * exitcode: if not present, defaults to 1. If None, matches any exitcode
+# * warn: if True, matching failures will be logged as warnings
+# * expected: if True, QEMU is expected to always fail every time
+# when testing the corresponding test case
+# * loglevel: log level of log output when there's a match.
+ERROR_WHITELIST = [
+ # Machines that won't work out of the box:
+ # MACHINE | ERROR MESSAGE
+ {'machine':'niagara', 'expected':True}, # Unable to load a firmware for -M niagara
+ {'machine':'boston', 'expected':True}, # Please provide either a -kernel or -bios argument
+ {'machine':'leon3_generic', 'expected':True}, # Can't read bios image (null)
+
+ # devices that don't work out of the box because they require extra options to "-device DEV":
+ # DEVICE | ERROR MESSAGE
+ {'device':'.*-(i386|x86_64)-cpu', 'expected':True}, # CPU socket-id is not set
+ {'device':'ARM,bitband-memory', 'expected':True}, # source-memory property not set
+ {'device':'arm.cortex-a9-global-timer', 'expected':True}, # a9_gtimer_realize: num-cpu must be between 1 and 4
+ {'device':'arm_mptimer', 'expected':True}, # num-cpu must be between 1 and 4
+ {'device':'armv7m', 'expected':True}, # memory property was not set
+ {'device':'aspeed.scu', 'expected':True}, # Unknown silicon revision: 0x0
+ {'device':'aspeed.sdmc', 'expected':True}, # Unknown silicon revision: 0x0
+ {'device':'bcm2835-dma', 'expected':True}, # bcm2835_dma_realize: required dma-mr link not found: Property '.dma-mr' not found
+ {'device':'bcm2835-fb', 'expected':True}, # bcm2835_fb_realize: required vcram-base property not set
+ {'device':'bcm2835-mbox', 'expected':True}, # bcm2835_mbox_realize: required mbox-mr link not found: Property '.mbox-mr' not found
+ {'device':'bcm2835-peripherals', 'expected':True}, # bcm2835_peripherals_realize: required ram link not found: Property '.ram' not found
+ {'device':'bcm2835-property', 'expected':True}, # bcm2835_property_realize: required fb link not found: Property '.fb' not found
+ {'device':'bcm2835_gpio', 'expected':True}, # bcm2835_gpio_realize: required sdhci link not found: Property '.sdbus-sdhci' not found
+ {'device':'bcm2836', 'expected':True}, # bcm2836_realize: required ram link not found: Property '.ram' not found
+ {'device':'cfi.pflash01', 'expected':True}, # attribute "sector-length" not specified or zero.
+ {'device':'cfi.pflash02', 'expected':True}, # attribute "sector-length" not specified or zero.
+ {'device':'icp', 'expected':True}, # icp_realize: required link 'xics' not found: Property '.xics' not found
+ {'device':'ics', 'expected':True}, # ics_base_realize: required link 'xics' not found: Property '.xics' not found
+ # "-device ide-cd" does work on more recent QEMU versions, so it doesn't have expected=True
+ {'device':'ide-cd'}, # No drive specified
+ {'device':'ide-drive', 'expected':True}, # No drive specified
+ {'device':'ide-hd', 'expected':True}, # No drive specified
+ {'device':'ipmi-bmc-extern', 'expected':True}, # IPMI external bmc requires chardev attribute
+ {'device':'isa-debugcon', 'expected':True}, # Can't create serial device, empty char device
+ {'device':'isa-ipmi-bt', 'expected':True}, # IPMI device requires a bmc attribute to be set
+ {'device':'isa-ipmi-kcs', 'expected':True}, # IPMI device requires a bmc attribute to be set
+ {'device':'isa-parallel', 'expected':True}, # Can't create serial device, empty char device
+ {'device':'isa-serial', 'expected':True}, # Can't create serial device, empty char device
+ {'device':'ivshmem', 'expected':True}, # You must specify either 'shm' or 'chardev'
+ {'device':'ivshmem-doorbell', 'expected':True}, # You must specify a 'chardev'
+ {'device':'ivshmem-plain', 'expected':True}, # You must specify a 'memdev'
+ {'device':'kvm-pci-assign', 'expected':True}, # no host device specified
+ {'device':'loader', 'expected':True}, # please include valid arguments
+ {'device':'nand', 'expected':True}, # Unsupported NAND block size 0x1
+ {'device':'nvdimm', 'expected':True}, # 'memdev' property is not set
+ {'device':'nvme', 'expected':True}, # Device initialization failed
+ {'device':'pc-dimm', 'expected':True}, # 'memdev' property is not set
+ {'device':'pci-bridge', 'expected':True}, # Bridge chassis not specified. Each bridge is required to be assigned a unique chassis id > 0.
+ {'device':'pci-bridge-seat', 'expected':True}, # Bridge chassis not specified. Each bridge is required to be assigned a unique chassis id > 0.
+ {'device':'pci-serial', 'expected':True}, # Can't create serial device, empty char device
+ {'device':'pci-serial-2x', 'expected':True}, # Can't create serial device, empty char device
+ {'device':'pci-serial-4x', 'expected':True}, # Can't create serial device, empty char device
+ {'device':'pxa2xx-dma', 'expected':True}, # channels value invalid
+ {'device':'pxb', 'expected':True}, # Bridge chassis not specified. Each bridge is required to be assigned a unique chassis id > 0.
+ {'device':'scsi-block', 'expected':True}, # drive property not set
+ {'device':'scsi-disk', 'expected':True}, # drive property not set
+ {'device':'scsi-generic', 'expected':True}, # drive property not set
+ {'device':'scsi-hd', 'expected':True}, # drive property not set
+ {'device':'spapr-pci-host-bridge', 'expected':True}, # BUID not specified for PHB
+ {'device':'spapr-pci-vfio-host-bridge', 'expected':True}, # BUID not specified for PHB
+ {'device':'spapr-rng', 'expected':True}, # spapr-rng needs an RNG backend!
+ {'device':'spapr-vty', 'expected':True}, # chardev property not set
+ {'device':'tpm-tis', 'expected':True}, # tpm_tis: backend driver with id (null) could not be found
+ {'device':'unimplemented-device', 'expected':True}, # property 'size' not specified or zero
+ {'device':'usb-braille', 'expected':True}, # Property chardev is required
+ {'device':'usb-mtp', 'expected':True}, # x-root property must be configured
+ {'device':'usb-redir', 'expected':True}, # Parameter 'chardev' is missing
+ {'device':'usb-serial', 'expected':True}, # Property chardev is required
+ {'device':'usb-storage', 'expected':True}, # drive property not set
+ {'device':'vfio-amd-xgbe', 'expected':True}, # -device vfio-amd-xgbe: vfio error: wrong host device name
+ {'device':'vfio-calxeda-xgmac', 'expected':True}, # -device vfio-calxeda-xgmac: vfio error: wrong host device name
+ {'device':'vfio-pci', 'expected':True}, # No provided host device
+ {'device':'vfio-pci-igd-lpc-bridge', 'expected':True}, # VFIO dummy ISA/LPC bridge must have address 1f.0
+ {'device':'vhost-scsi.*', 'expected':True}, # vhost-scsi: missing wwpn
+ {'device':'vhost-vsock-device', 'expected':True}, # guest-cid property must be greater than 2
+ {'device':'vhost-vsock-pci', 'expected':True}, # guest-cid property must be greater than 2
+ {'device':'virtio-9p-ccw', 'expected':True}, # 9pfs device couldn't find fsdev with the id = NULL
+ {'device':'virtio-9p-device', 'expected':True}, # 9pfs device couldn't find fsdev with the id = NULL
+ {'device':'virtio-9p-pci', 'expected':True}, # 9pfs device couldn't find fsdev with the id = NULL
+ {'device':'virtio-blk-ccw', 'expected':True}, # drive property not set
+ {'device':'virtio-blk-device', 'expected':True}, # drive property not set
+ {'device':'virtio-blk-device', 'expected':True}, # drive property not set
+ {'device':'virtio-blk-pci', 'expected':True}, # drive property not set
+ {'device':'virtio-crypto-ccw', 'expected':True}, # 'cryptodev' parameter expects a valid object
+ {'device':'virtio-crypto-device', 'expected':True}, # 'cryptodev' parameter expects a valid object
+ {'device':'virtio-crypto-pci', 'expected':True}, # 'cryptodev' parameter expects a valid object
+ {'device':'virtio-input-host-device', 'expected':True}, # evdev property is required
+ {'device':'virtio-input-host-pci', 'expected':True}, # evdev property is required
+ {'device':'xen-pvdevice', 'expected':True}, # Device ID invalid, it must always be supplied
+ {'device':'vhost-vsock-ccw', 'expected':True}, # guest-cid property must be greater than 2
+ {'device':'ALTR.timer', 'expected':True}, # "clock-frequency" property must be provided
+ {'device':'zpci', 'expected':True}, # target must be defined
+ {'device':'pnv-(occ|icp|lpc)', 'expected':True}, # required link 'xics' not found: Property '.xics' not found
+ {'device':'powernv-cpu-.*', 'expected':True}, # pnv_core_realize: required link 'xics' not found: Property '.xics' not found
+
+ # ioapic devices are already created by pc and will fail:
+ {'machine':'q35|pc.*', 'device':'kvm-ioapic', 'expected':True}, # Only 1 ioapics allowed
+ {'machine':'q35|pc.*', 'device':'ioapic', 'expected':True}, # Only 1 ioapics allowed
+
+ # KVM-specific devices shouldn't be tried without accel=kvm:
+ {'accel':'(?!kvm).*', 'device':'kvmclock', 'expected':True},
+ {'accel':'(?!kvm).*', 'device':'kvm-pci-assign', 'expected':True},
+
+ # xen-specific machines and devices:
+ {'accel':'(?!xen).*', 'machine':'xen.*', 'expected':True},
+ {'accel':'(?!xen).*', 'device':'xen-.*', 'expected':True},
+
+ # this fails on some machine-types, but not all, so they don't have expected=True:
+ {'device':'vmgenid'}, # vmgenid requires DMA write support in fw_cfg, which this machine type does not provide
+
+ # Silence INFO messages for errors that are common on multiple
+ # devices/machines:
+ {'log':r"No '[\w-]+' bus found for device '[\w-]+'"},
+ {'log':r"images* must be given with the 'pflash' parameter"},
+ {'log':r"(Guest|ROM|Flash|Kernel) image must be specified"},
+ {'log':r"[cC]ould not load [\w ]+ (BIOS|bios) '[\w-]+\.bin'"},
+ {'log':r"Couldn't find rom image '[\w-]+\.bin'"},
+ {'log':r"speed mismatch trying to attach usb device"},
+ {'log':r"Can't create a second ISA bus"},
+ {'log':r"duplicate fw_cfg file name"},
+ # sysbus-related error messages: most machines reject most dynamic sysbus devices:
+ {'log':r"Option '-device [\w.,-]+' cannot be handled by this machine"},
+ {'log':r"Device [\w.,-]+ is not supported by this machine yet"},
+ {'log':r"Device [\w.,-]+ can not be dynamically instantiated"},
+ {'log':r"Platform Bus: Can not fit MMIO region of size "},
+ # other more specific errors we will ignore:
+ {'device':'allwinner-a10', 'log':"Unsupported NIC model:"},
+ {'device':'.*-spapr-cpu-core', 'log':r"CPU core type should be"},
+ {'log':r"MSI(-X)? is not supported by interrupt controller"},
+ {'log':r"pxb-pcie? devices cannot reside on a PCIe? bus"},
+ {'log':r"Ignoring smp_cpus value"},
+ {'log':r"sd_init failed: Drive 'sd0' is already in use because it has been automatically connected to another device"},
+ {'log':r"This CPU requires a smaller page size than the system is using"},
+ {'log':r"MSI-X support is mandatory in the S390 architecture"},
+ {'log':r"rom check and register reset failed"},
+ {'log':r"Unable to initialize GIC, CPUState for CPU#0 not valid"},
+ {'log':r"Multiple VT220 operator consoles are not supported"},
+ {'log':r"core 0 already populated"},
+ {'log':r"could not find stage1 bootloader"},
+
+ # other exitcode=1 failures not listed above will just generate INFO messages:
+ {'exitcode':1, 'loglevel':logging.INFO},
+
+ # KNOWN CRASHES:
+ # Known crashes will generate error messages, but won't be fatal.
+ # Those entries must be removed once we fix the crashes.
+ {'exitcode':-6, 'log':r"Device 'serial0' is in use", 'loglevel':logging.ERROR},
+ {'exitcode':-6, 'log':r"spapr_rtas_register: Assertion .*rtas_table\[token\]\.name.* failed", 'loglevel':logging.ERROR},
+ {'exitcode':-6, 'log':r"qemu_net_client_setup: Assertion `!peer->peer' failed", 'loglevel':logging.ERROR},
+ {'exitcode':-6, 'log':r'RAMBlock "[\w.-]+" already registered', 'loglevel':logging.ERROR},
+ {'exitcode':-6, 'log':r"find_ram_offset: Assertion `size != 0' failed.", 'loglevel':logging.ERROR},
+ {'exitcode':-6, 'log':r"puv3_load_kernel: Assertion `kernel_filename != NULL' failed", 'loglevel':logging.ERROR},
+ {'exitcode':-6, 'log':r"add_cpreg_to_hashtable: code should not be reached", 'loglevel':logging.ERROR},
+ {'exitcode':-6, 'log':r"qemu_alloc_display: Assertion `surface->image != NULL' failed", 'loglevel':logging.ERROR},
+ {'exitcode':-6, 'log':r"Unexpected error in error_set_from_qdev_prop_error", 'loglevel':logging.ERROR},
+ {'exitcode':-6, 'log':r"Object .* is not an instance of type spapr-machine", 'loglevel':logging.ERROR},
+ {'exitcode':-6, 'log':r"Object .* is not an instance of type generic-pc-machine", 'loglevel':logging.ERROR},
+ {'exitcode':-6, 'log':r"Object .* is not an instance of type e500-ccsr", 'loglevel':logging.ERROR},
+ {'exitcode':-6, 'log':r"vmstate_register_with_alias_id: Assertion `!se->compat || se->instance_id == 0' failed", 'loglevel':logging.ERROR},
+ {'exitcode':-11, 'device':'stm32f205-soc', 'loglevel':logging.ERROR, 'expected':True},
+ {'exitcode':-11, 'device':'xlnx,zynqmp', 'loglevel':logging.ERROR, 'expected':True},
+ {'exitcode':-11, 'device':'mips-cps', 'loglevel':logging.ERROR, 'expected':True},
+ {'exitcode':-11, 'device':'gus', 'loglevel':logging.ERROR, 'expected':True},
+ {'exitcode':-11, 'device':'a9mpcore_priv', 'loglevel':logging.ERROR, 'expected':True},
+ {'exitcode':-11, 'device':'a15mpcore_priv', 'loglevel':logging.ERROR, 'expected':True},
+ {'exitcode':-11, 'device':'isa-serial', 'loglevel':logging.ERROR, 'expected':True},
+ {'exitcode':-11, 'device':'sb16', 'loglevel':logging.ERROR, 'expected':True},
+ {'exitcode':-11, 'device':'cs4231a', 'loglevel':logging.ERROR, 'expected':True},
+ {'exitcode':-11, 'device':'arm-gicv3', 'loglevel':logging.ERROR, 'expected':True},
+ {'exitcode':-11, 'machine':'isapc', 'device':'.*-iommu', 'loglevel':logging.ERROR, 'expected':True},
+
+ # everything else (including SIGABRT and SIGSEGV) will be a fatal error:
+ {'exitcode':None, 'fatal':True, 'loglevel':logging.FATAL},
+]
+
+
+def whitelistTestCaseMatch(wl, t):
+ """Check if a test case specification can match a whitelist entry
+
+ This only checks if a whitelist entry is a candidate match
+ for a given test case, it won't check if the test case
+ results/output match the entry. See whitelistResultMatch().
+ """
+ return (('machine' not in wl or
+ 'machine' not in t or
+ re.match(wl['machine'] + '$', t['machine'])) and
+ ('accel' not in wl or
+ 'accel' not in t or
+ re.match(wl['accel'] + '$', t['accel'])) and
+ ('device' not in wl or
+ 'device' not in t or
+ re.match(wl['device'] + '$', t['device'])))
+
+
+def whitelistCandidates(t):
+ """Generate the list of candidates that can match a test case"""
+ for i, wl in enumerate(ERROR_WHITELIST):
+ if whitelistTestCaseMatch(wl, t):
+ yield (i, wl)
+
+
+def findExpectedResult(t):
+ """Check if there's an expected=True whitelist entry for a test case
+
+ Returns (i, wl) tuple, where i is the index in
+ ERROR_WHITELIST and wl is the whitelist entry itself.
+ """
+ for i, wl in whitelistCandidates(t):
+ if wl.get('expected'):
+ return (i, wl)
+
+
+def whitelistResultMatch(wl, r):
+ """Check if test case results/output match a whitelist entry
+
+ It is valid to call this function only if
+ whitelistTestCaseMatch() is True for the entry (e.g. on
+ entries returned by whitelistCandidates())
+ """
+ assert whitelistTestCaseMatch(wl, r['testcase'])
+ return ((wl.get('exitcode', 1) is None or
+ r['exitcode'] == wl.get('exitcode', 1)) and
+ ('log' not in wl or
+ re.search(wl['log'], r['log'], re.MULTILINE)))
+
+
+def checkResultWhitelist(r):
+ """Look up whitelist entry for a given test case result
+
+ Returns (i, wl) tuple, where i is the index in
+ ERROR_WHITELIST and wl is the whitelist entry itself.
+ """
+ for i, wl in whitelistCandidates(r['testcase']):
+ if whitelistResultMatch(wl, r):
+ return i, wl
+
+ raise Exception("this should never happen")
+
+
+def qemuOptsEscape(s):
+ """Escape option value QemuOpts"""
+ return s.replace(",", ",,")
+
+
+def formatTestCase(t):
+ """Format test case info as "key=value key=value" for prettier logging output"""
+ return ' '.join('%s=%s' % (k, v) for k, v in t.items())
+
+
+def qomListTypeNames(vm, **kwargs):
+ """Run qom-list-types QMP command, return type names"""
+ types = vm.command('qom-list-types', **kwargs)
+ return [t['name'] for t in types]
+
+
+def infoQDM(vm):
+ """Parse 'info qdm' output"""
+ args = {'command-line': 'info qdm'}
+ devhelp = vm.command('human-monitor-command', **args)
+ for l in devhelp.split('\n'):
+ l = l.strip()
+ if l == '' or l.endswith(':'):
+ continue
+ d = {'name': re.search(r'name "([^"]+)"', l).group(1),
+ 'no-user': (re.search(', no-user', l) is not None)}
+ yield d
+
+
+class QemuBinaryInfo(object):
+ def __init__(self, binary, devtype):
+ if devtype is None:
+ devtype = 'device'
+
+ self.binary = binary
+ self._machine_info = {}
+
+ dbg("devtype: %r", devtype)
+ args = ['-S', '-machine', 'none,accel=kvm:tcg']
+ dbg("querying info for QEMU binary: %s", binary)
+ vm = QEMUMachine(binary=binary, args=args)
+ vm.launch()
+ try:
+ self.alldevs = set(qomListTypeNames(vm, implements=devtype, abstract=False))
+ # there's no way to query DeviceClass::user_creatable using QMP,
+ # so use 'info qdm':
+ self.no_user_devs = set([d['name'] for d in infoQDM(vm, ) if d['no-user']])
+ self.machines = list(m['name'] for m in vm.command('query-machines'))
+ self.user_devs = self.alldevs.difference(self.no_user_devs)
+ self.kvm_available = vm.command('query-kvm')['enabled']
+ finally:
+ vm.shutdown()
+
+ def machineInfo(self, machine):
+ """Query for information on a specific machine-type
+
+ Results are cached internally, in case the same machine-
+ type is queried multiple times.
+ """
+ if machine in self._machine_info:
+ return self._machine_info[machine]
+
+ mi = {}
+ args = ['-S', '-machine', '%s' % (machine)]
+ dbg("querying machine info for binary=%s machine=%s", self.binary, machine)
+ vm = QEMUMachine(binary=self.binary, args=args)
+ try:
+ vm.launch()
+ mi['runnable'] = True
+ except KeyboardInterrupt:
+ raise
+ except:
+ dbg("exception trying to run binary=%s machine=%s", self.binary, machine, exc_info=sys.exc_info())
+ dbg("log: %r", vm.get_log())
+ mi['runnable'] = False
+
+ vm.shutdown()
+ self._machine_info[machine] = mi
+ return mi
+
+
+BINARY_INFO = {}
+
+
+def getBinaryInfo(args, binary):
+ if binary not in BINARY_INFO:
+ BINARY_INFO[binary] = QemuBinaryInfo(binary, args.devtype)
+ return BINARY_INFO[binary]
+
+
+def checkOneCase(args, testcase):
+ """Check one specific case
+
+ Returns a dictionary containing failure information on error,
+ or None on success
+ """
+ binary = testcase['binary']
+ accel = testcase['accel']
+ machine = testcase['machine']
+ device = testcase['device']
+
+ dbg("will test: %r", testcase)
+
+ args = ['-S', '-machine', '%s,accel=%s' % (machine, accel),
+ '-device', qemuOptsEscape(device)]
+ cmdline = ' '.join([binary] + args)
+ dbg("will launch QEMU: %s", cmdline)
+ vm = QEMUMachine(binary=binary, args=args)
+
+ exc_traceback = None
+ try:
+ vm.launch()
+ except KeyboardInterrupt:
+ raise
+ except:
+ exc_traceback = traceback.format_exc()
+ dbg("Exception while running test case")
+ finally:
+ vm.shutdown()
+ ec = vm.exitcode()
+ log = vm.get_log()
+
+ if exc_traceback is not None or ec != 0:
+ return {'exc_traceback':exc_traceback,
+ 'exitcode':ec,
+ 'log':log,
+ 'testcase':testcase,
+ 'cmdline':cmdline}
+
+
+def binariesToTest(args, testcase):
+ if args.qemu:
+ r = args.qemu
+ else:
+ r = glob.glob('./*-softmmu/qemu-system-*')
+ return r
+
+
+def accelsToTest(args, testcase):
+ if getBinaryInfo(args, testcase['binary']).kvm_available:
+ yield 'kvm'
+ yield 'tcg'
+
+
+def machinesToTest(args, testcase):
+ return getBinaryInfo(args, testcase['binary']).machines
+
+
+def devicesToTest(args, testcase):
+ return getBinaryInfo(args, testcase['binary']).user_devs
+
+
+TESTCASE_VARIABLES = [
+ ('binary', binariesToTest),
+ ('accel', accelsToTest),
+ ('machine', machinesToTest),
+ ('device', devicesToTest),
+]
+
+
+def genCases1(args, testcases, var, fn):
+ """Generate new testcases for one variable
+
+ If an existing item already has a variable set, don't
+ generate new items and just return it directly. This
+ allows the "-t" command-line option to be used to choose
+ a specific test case.
+ """
+ for testcase in testcases:
+ if var in testcase:
+ yield testcase.copy()
+ else:
+ for i in fn(args, testcase):
+ t = testcase.copy()
+ t[var] = i
+ yield t
+
+
+def genCases(args, testcase):
+ """Generate test cases for all variables
+ """
+ cases = [testcase.copy()]
+ for var, fn in TESTCASE_VARIABLES:
+ dbg("var: %r, fn: %r", var, fn)
+ cases = genCases1(args, cases, var, fn)
+ return cases
+
+
+def casesToTest(args, testcase):
+ cases = genCases(args, testcase)
+ if args.random:
+ cases = list(cases)
+ cases = random.sample(cases, min(args.random, len(cases)))
+ if args.debug:
+ cases = list(cases)
+ dbg("%d test cases to test", len(cases))
+ if args.shuffle:
+ cases = list(cases)
+ random.shuffle(cases)
+ return cases
+
+
+def logFailure(f, level):
+ t = f['testcase']
+ logger.log(level, "failed: %s", formatTestCase(t))
+ logger.log(level, "cmdline: %s", f['cmdline'])
+ for l in f['log'].strip().split('\n'):
+ logger.log(level, "log: %s", l)
+ logger.log(level, "exit code: %r", f['exitcode'])
+ if f['exc_traceback']:
+ logger.log(level, "exception:")
+ for l in f['exc_traceback'].split('\n'):
+ logger.log(level, " %s", l.rstrip('\n'))
+
+
+def main():
+ parser = argparse.ArgumentParser(description="QEMU -device crash test")
+ parser.add_argument('-t', metavar='KEY=VALUE', nargs='*',
+ help="Limit test cases to KEY=VALUE",
+ action='append', dest='testcases', default=[])
+ parser.add_argument('-d', '--debug', action='store_true',
+ help='debug output')
+ parser.add_argument('-v', '--verbose', action='store_true', default=True,
+ help='verbose output')
+ parser.add_argument('-q', '--quiet', dest='verbose', action='store_false',
+ help='non-verbose output')
+ parser.add_argument('-r', '--random', type=int, metavar='COUNT',
+ help='run a random sample of COUNT test cases',
+ default=0)
+ parser.add_argument('--shuffle', action='store_true',
+ help='Run test cases in random order')
+ parser.add_argument('--dry-run', action='store_true',
+ help="Don't run any tests, just generate list")
+ parser.add_argument('-D', '--devtype', metavar='TYPE',
+ help="Test only device types that implement TYPE")
+ parser.add_argument('-Q', '--quick', action='store_true', default=True,
+ help="Quick mode: skip test cases that are expected to fail")
+ parser.add_argument('-F', '--full', action='store_false', dest='quick',
+ help="Full mode: test cases that are expected to fail")
+ parser.add_argument('--strict', action='store_true', dest='strict',
+ help="Treat all warnings as fatal")
+ parser.add_argument('qemu', nargs='*', metavar='QEMU',
+ help='QEMU binary to run')
+ args = parser.parse_args()
+
+ if args.debug:
+ lvl = logging.DEBUG
+ elif args.verbose:
+ lvl = logging.INFO
+ else:
+ lvl = logging.WARN
+ logging.basicConfig(stream=sys.stdout, level=lvl, format='%(levelname)s: %(message)s')
+
+ fatal_failures = []
+ wl_stats = {}
+ skipped = 0
+ total = 0
+
+ tc = {}
+ dbg("testcases: %r", args.testcases)
+ if args.testcases:
+ for t in chain(*args.testcases):
+ for kv in t.split():
+ k, v = kv.split('=', 1)
+ tc[k] = v
+
+ if len(binariesToTest(args, tc)) == 0:
+ print >>sys.stderr, "No QEMU binary found"
+ parser.print_usage(sys.stderr)
+ return 1
+
+ for t in casesToTest(args, tc):
+ logger.info("running test case: %s", formatTestCase(t))
+ total += 1
+
+ expected_match = findExpectedResult(t)
+ if (args.quick and
+ (expected_match or
+ not getBinaryInfo(args, t['binary']).machineInfo(t['machine'])['runnable'])):
+ dbg("skipped: %s", formatTestCase(t))
+ skipped += 1
+ continue
+
+ if args.dry_run:
+ continue
+
+ try:
+ f = checkOneCase(args, t)
+ except KeyboardInterrupt:
+ break
+
+ if f:
+ i, wl = checkResultWhitelist(f)
+ dbg("testcase: %r, whitelist match: %r", t, wl)
+ wl_stats.setdefault(i, []).append(f)
+ level = wl.get('loglevel', logging.DEBUG)
+ logFailure(f, level)
+ if wl.get('fatal') or (args.strict and level >= logging.WARN):
+ fatal_failures.append(f)
+ else:
+ dbg("success: %s", formatTestCase(t))
+ if expected_match:
+ logger.warn("Didn't fail as expected: %s", formatTestCase(t))
+
+ logger.info("Total: %d test cases", total)
+ if skipped:
+ logger.info("Skipped %d test cases", skipped)
+
+ if args.debug:
+ stats = sorted([(len(wl_stats.get(i, [])), wl) for i, wl in enumerate(ERROR_WHITELIST)])
+ for count, wl in stats:
+ dbg("whitelist entry stats: %d: %r", count, wl)
+
+ if fatal_failures:
+ for f in fatal_failures:
+ t = f['testcase']
+ logger.error("Fatal failure: %s", formatTestCase(t))
+ logger.error("Fatal failures on some machine/device combinations")
+ return 1
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/scripts/qemu.py b/scripts/qemu.py
index 6d1b6230b7..880e3e8219 100644
--- a/scripts/qemu.py
+++ b/scripts/qemu.py
@@ -85,8 +85,16 @@ class QEMUMachine(object):
return
raise
+ def is_running(self):
+ return self._popen and (self._popen.returncode is None)
+
+ def exitcode(self):
+ if self._popen is None:
+ return None
+ return self._popen.returncode
+
def get_pid(self):
- if not self._popen:
+ if not self.is_running():
return None
return self._popen.pid
@@ -128,16 +136,16 @@ class QEMUMachine(object):
stderr=subprocess.STDOUT, shell=False)
self._post_launch()
except:
- if self._popen:
+ if self.is_running():
self._popen.kill()
+ self._popen.wait()
self._load_io_log()
self._post_shutdown()
- self._popen = None
raise
def shutdown(self):
'''Terminate the VM and clean up'''
- if not self._popen is None:
+ if self.is_running():
try:
self._qmp.cmd('quit')
self._qmp.close()
@@ -149,7 +157,6 @@ class QEMUMachine(object):
sys.stderr.write('qemu received signal %i: %s\n' % (-exitcode, ' '.join(self._args)))
self._load_io_log()
self._post_shutdown()
- self._popen = None
underscore_to_dash = string.maketrans('_', '-')
def qmp(self, cmd, conv_keys=True, **args):
diff --git a/slirp/slirp.c b/slirp/slirp.c
index e79345bdfc..23864938f7 100644
--- a/slirp/slirp.c
+++ b/slirp/slirp.c
@@ -272,6 +272,11 @@ static void slirp_init_once(void)
static void slirp_state_save(QEMUFile *f, void *opaque);
static int slirp_state_load(QEMUFile *f, void *opaque, int version_id);
+static SaveVMHandlers savevm_slirp_state = {
+ .save_state = slirp_state_save,
+ .load_state = slirp_state_load,
+};
+
Slirp *slirp_init(int restricted, bool in_enabled, struct in_addr vnetwork,
struct in_addr vnetmask, struct in_addr vhost,
bool in6_enabled,
@@ -321,8 +326,7 @@ Slirp *slirp_init(int restricted, bool in_enabled, struct in_addr vnetwork,
slirp->opaque = opaque;
- register_savevm(NULL, "slirp", 0, 4,
- slirp_state_save, slirp_state_load, slirp);
+ register_savevm_live(NULL, "slirp", 0, 4, &savevm_slirp_state, slirp);
QTAILQ_INSERT_TAIL(&slirp_instances, slirp, entry);
diff --git a/target/alpha/translate.c b/target/alpha/translate.c
index df5d695344..7c45ae360c 100644
--- a/target/alpha/translate.c
+++ b/target/alpha/translate.c
@@ -89,6 +89,9 @@ typedef enum {
updated the PC for the next instruction to be executed. */
EXIT_PC_STALE,
+ /* We are exiting the TB due to page crossing or space constraints. */
+ EXIT_FALLTHRU,
+
/* We are ending the TB with a noreturn function call, e.g. longjmp.
No following code will be executed. */
EXIT_NORETURN,
@@ -1157,6 +1160,7 @@ static ExitStatus gen_call_pal(DisasContext *ctx, int palcode)
#ifndef CONFIG_USER_ONLY
/* Privileged PAL code */
if (palcode < 0x40 && (ctx->tb->flags & TB_FLAGS_USER_MODE) == 0) {
+ TCGv tmp;
switch (palcode) {
case 0x01:
/* CFLUSH */
@@ -1182,10 +1186,8 @@ static ExitStatus gen_call_pal(DisasContext *ctx, int palcode)
offsetof(CPUAlphaState, sysval));
break;
- case 0x35: {
+ case 0x35:
/* SWPIPL */
- TCGv tmp;
-
/* Note that we already know we're in kernel mode, so we know
that PS only contains the 3 IPL bits. */
tcg_gen_ld8u_i64(ctx->ir[IR_V0], cpu_env,
@@ -1197,7 +1199,6 @@ static ExitStatus gen_call_pal(DisasContext *ctx, int palcode)
tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUAlphaState, ps));
tcg_temp_free(tmp);
break;
- }
case 0x36:
/* RDPS */
@@ -1220,6 +1221,14 @@ static ExitStatus gen_call_pal(DisasContext *ctx, int palcode)
-offsetof(AlphaCPU, env) + offsetof(CPUState, cpu_index));
break;
+ case 0x3E:
+ /* WTINT */
+ tmp = tcg_const_i64(1);
+ tcg_gen_st32_i64(tmp, cpu_env, -offsetof(AlphaCPU, env) +
+ offsetof(CPUState, halted));
+ tcg_gen_movi_i64(ctx->ir[IR_V0], 0);
+ return gen_excp(ctx, EXCP_HALTED, 0);
+
default:
palcode &= 0x3f;
goto do_call_pal;
@@ -1369,7 +1378,7 @@ static ExitStatus gen_mtpr(DisasContext *ctx, TCGv vb, int regno)
tmp = tcg_const_i64(1);
tcg_gen_st32_i64(tmp, cpu_env, -offsetof(AlphaCPU, env) +
offsetof(CPUState, halted));
- return gen_excp(ctx, EXCP_HLT, 0);
+ return gen_excp(ctx, EXCP_HALTED, 0);
case 252:
/* HALT */
@@ -2978,7 +2987,7 @@ void gen_intermediate_code(CPUAlphaState *env, struct TranslationBlock *tb)
|| num_insns >= max_insns
|| singlestep
|| ctx.singlestep_enabled)) {
- ret = EXIT_PC_STALE;
+ ret = EXIT_FALLTHRU;
}
} while (ret == NO_EXIT);
@@ -2990,6 +2999,13 @@ void gen_intermediate_code(CPUAlphaState *env, struct TranslationBlock *tb)
case EXIT_GOTO_TB:
case EXIT_NORETURN:
break;
+ case EXIT_FALLTHRU:
+ if (use_goto_tb(&ctx, ctx.pc)) {
+ tcg_gen_goto_tb(0);
+ tcg_gen_movi_i64(cpu_pc, ctx.pc);
+ tcg_gen_exit_tb((uintptr_t)ctx.tb);
+ }
+ /* FALLTHRU */
case EXIT_PC_STALE:
tcg_gen_movi_i64(cpu_pc, ctx.pc);
/* FALLTHRU */
@@ -3001,7 +3017,7 @@ void gen_intermediate_code(CPUAlphaState *env, struct TranslationBlock *tb)
}
break;
default:
- abort();
+ g_assert_not_reached();
}
gen_tb_end(tb, num_insns);
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
index 04a3fea03f..28a9141298 100644
--- a/target/arm/cpu.c
+++ b/target/arm/cpu.c
@@ -1589,7 +1589,7 @@ static Property arm_cpu_properties[] = {
DEFINE_PROP_UINT32("midr", ARMCPU, midr, 0),
DEFINE_PROP_UINT64("mp-affinity", ARMCPU,
mp_affinity, ARM64_AFFINITY_INVALID),
- DEFINE_PROP_INT32("node-id", CPUState, numa_node, CPU_UNSET_NUMA_NODE_ID),
+ DEFINE_PROP_INT32("node-id", ARMCPU, node_id, CPU_UNSET_NUMA_NODE_ID),
DEFINE_PROP_END_OF_LIST()
};
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
index 13da5036bc..16a1e59615 100644
--- a/target/arm/cpu.h
+++ b/target/arm/cpu.h
@@ -704,6 +704,8 @@ struct ARMCPU {
ARMELChangeHook *el_change_hook;
void *el_change_hook_opaque;
+
+ int32_t node_id; /* NUMA node this CPU belongs to */
};
static inline ARMCPU *arm_env_get_cpu(CPUARMState *env)
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
index a82ab49c94..860e279658 100644
--- a/target/arm/translate-a64.c
+++ b/target/arm/translate-a64.c
@@ -379,7 +379,7 @@ static inline void gen_goto_tb(DisasContext *s, int n, uint64_t dest)
} else if (s->singlestep_enabled) {
gen_exception_internal(EXCP_DEBUG);
} else {
- tcg_gen_exit_tb(0);
+ tcg_gen_lookup_and_goto_ptr(cpu_pc);
s->is_jmp = DISAS_TB_JUMP;
}
}
@@ -11367,8 +11367,7 @@ void gen_intermediate_code_a64(ARMCPU *cpu, TranslationBlock *tb)
gen_a64_set_pc_im(dc->pc);
/* fall through */
case DISAS_JUMP:
- /* indicate that the hash table must be used to find the next TB */
- tcg_gen_exit_tb(0);
+ tcg_gen_lookup_and_goto_ptr(cpu_pc);
break;
case DISAS_TB_JUMP:
case DISAS_EXC:
diff --git a/target/arm/translate.c b/target/arm/translate.c
index ae6646c05b..0862f9e4aa 100644
--- a/target/arm/translate.c
+++ b/target/arm/translate.c
@@ -1182,7 +1182,7 @@ static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
gen_set_condexec(s);
gen_set_pc_im(s, s->pc - offset);
gen_exception_internal(excp);
- s->is_jmp = DISAS_JUMP;
+ s->is_jmp = DISAS_EXC;
}
static void gen_exception_insn(DisasContext *s, int offset, int excp,
@@ -1191,14 +1191,14 @@ static void gen_exception_insn(DisasContext *s, int offset, int excp,
gen_set_condexec(s);
gen_set_pc_im(s, s->pc - offset);
gen_exception(excp, syn, target_el);
- s->is_jmp = DISAS_JUMP;
+ s->is_jmp = DISAS_EXC;
}
/* Force a TB lookup after an instruction that changes the CPU state. */
static inline void gen_lookup_tb(DisasContext *s)
{
tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
- s->is_jmp = DISAS_JUMP;
+ s->is_jmp = DISAS_EXIT;
}
static inline void gen_hlt(DisasContext *s, int imm)
@@ -4150,7 +4150,15 @@ static inline bool use_goto_tb(DisasContext *s, target_ulong dest)
#endif
}
-static inline void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
+static void gen_goto_ptr(void)
+{
+ TCGv addr = tcg_temp_new();
+ tcg_gen_extu_i32_tl(addr, cpu_R[15]);
+ tcg_gen_lookup_and_goto_ptr(addr);
+ tcg_temp_free(addr);
+}
+
+static void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
{
if (use_goto_tb(s, dest)) {
tcg_gen_goto_tb(n);
@@ -4158,7 +4166,7 @@ static inline void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
tcg_gen_exit_tb((uintptr_t)s->tb + n);
} else {
gen_set_pc_im(s, dest);
- tcg_gen_exit_tb(0);
+ gen_goto_ptr();
}
}
@@ -12091,11 +12099,14 @@ void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb)
gen_set_pc_im(dc, dc->pc);
/* fall through */
case DISAS_JUMP:
+ gen_goto_ptr();
+ break;
default:
/* indicate that the hash table must be used to find the next TB */
tcg_gen_exit_tb(0);
break;
case DISAS_TB_JUMP:
+ case DISAS_EXC:
/* nothing more to generate */
break;
case DISAS_WFI:
diff --git a/target/arm/translate.h b/target/arm/translate.h
index 6b2cc34c33..15d383d9af 100644
--- a/target/arm/translate.h
+++ b/target/arm/translate.h
@@ -139,6 +139,10 @@ static void disas_set_insn_syndrome(DisasContext *s, uint32_t syn)
* custom end-of-TB code)
*/
#define DISAS_BX_EXCRET 11
+/* For instructions which want an immediate exit to the main loop,
+ * as opposed to attempting to use lookup_and_goto_ptr.
+ */
+#define DISAS_EXIT 12
#ifdef TARGET_AARCH64
void a64_translate_init(void);
diff --git a/target/hppa/translate.c b/target/hppa/translate.c
index 9e8c233501..e10abc5e04 100644
--- a/target/hppa/translate.c
+++ b/target/hppa/translate.c
@@ -517,7 +517,7 @@ static void gen_goto_tb(DisasContext *ctx, int which,
if (ctx->singlestep_enabled) {
gen_excp_1(EXCP_DEBUG);
} else {
- tcg_gen_exit_tb(0);
+ tcg_gen_lookup_and_goto_ptr(cpu_iaoq_f);
}
}
}
@@ -1510,7 +1510,7 @@ static ExitStatus do_ibranch(DisasContext *ctx, TCGv dest,
} else if (is_n && use_nullify_skip(ctx)) {
/* The (conditional) branch, B, nullifies the next insn, N,
and we're allowed to skip execution N (no single-step or
- tracepoint in effect). Since the exit_tb that we must use
+ tracepoint in effect). Since the goto_ptr that we must use
for the indirect branch consumes no special resources, we
can (conditionally) skip B and continue execution. */
/* The use_nullify_skip test implies we have a known control path. */
@@ -1527,7 +1527,7 @@ static ExitStatus do_ibranch(DisasContext *ctx, TCGv dest,
if (link != 0) {
tcg_gen_movi_tl(cpu_gr[link], ctx->iaoq_n);
}
- tcg_gen_exit_tb(0);
+ tcg_gen_lookup_and_goto_ptr(cpu_iaoq_f);
return nullify_end(ctx, NO_EXIT);
} else {
cond_prep(&ctx->null_cond);
@@ -3885,7 +3885,7 @@ void gen_intermediate_code(CPUHPPAState *env, struct TranslationBlock *tb)
if (ctx.singlestep_enabled) {
gen_excp_1(EXCP_DEBUG);
} else {
- tcg_gen_exit_tb(0);
+ tcg_gen_lookup_and_goto_ptr(cpu_iaoq_f);
}
break;
default:
diff --git a/target/i386/cpu.c b/target/i386/cpu.c
index a41d595c23..ffb5267162 100644
--- a/target/i386/cpu.c
+++ b/target/i386/cpu.c
@@ -3986,7 +3986,7 @@ static Property x86_cpu_properties[] = {
DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1),
DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1),
#endif
- DEFINE_PROP_INT32("node-id", CPUState, numa_node, CPU_UNSET_NUMA_NODE_ID),
+ DEFINE_PROP_INT32("node-id", X86CPU, node_id, CPU_UNSET_NUMA_NODE_ID),
DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
{ .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
diff --git a/target/i386/cpu.h b/target/i386/cpu.h
index c4602ca80d..cfe825f0a4 100644
--- a/target/i386/cpu.h
+++ b/target/i386/cpu.h
@@ -1275,6 +1275,7 @@ struct X86CPU {
struct kvm_msrs *kvm_msr_buf;
+ int32_t node_id; /* NUMA node this CPU belongs to */
int32_t socket_id;
int32_t core_id;
int32_t thread_id;
diff --git a/target/i386/hax-all.c b/target/i386/hax-all.c
index 73469311d6..097db5cae1 100644
--- a/target/i386/hax-all.c
+++ b/target/i386/hax-all.c
@@ -635,6 +635,16 @@ void hax_cpu_synchronize_post_init(CPUState *cpu)
run_on_cpu(cpu, do_hax_cpu_synchronize_post_init, RUN_ON_CPU_NULL);
}
+static void do_hax_cpu_synchronize_pre_loadvm(CPUState *cpu, run_on_cpu_data arg)
+{
+ cpu->hax_vcpu_dirty = true;
+}
+
+void hax_cpu_synchronize_pre_loadvm(CPUState *cpu)
+{
+ run_on_cpu(cpu, do_hax_cpu_synchronize_pre_loadvm, RUN_ON_CPU_NULL);
+}
+
int hax_smp_cpu_exec(CPUState *cpu)
{
CPUArchState *env = (CPUArchState *) (cpu->env_ptr);
diff --git a/target/i386/translate.c b/target/i386/translate.c
index 1d1372fb43..674ec96d5a 100644
--- a/target/i386/translate.c
+++ b/target/i386/translate.c
@@ -141,6 +141,7 @@ typedef struct DisasContext {
} DisasContext;
static void gen_eob(DisasContext *s);
+static void gen_jr(DisasContext *s, TCGv dest);
static void gen_jmp(DisasContext *s, target_ulong eip);
static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num);
static void gen_op(DisasContext *s1, int op, TCGMemOp ot, int d);
@@ -2153,9 +2154,9 @@ static inline void gen_goto_tb(DisasContext *s, int tb_num, target_ulong eip)
gen_jmp_im(eip);
tcg_gen_exit_tb((uintptr_t)s->tb + tb_num);
} else {
- /* jump to another page: currently not optimized */
+ /* jump to another page */
gen_jmp_im(eip);
- gen_eob(s);
+ gen_jr(s, cpu_tmp0);
}
}
@@ -2509,7 +2510,8 @@ static void gen_bnd_jmp(DisasContext *s)
If INHIBIT, set HF_INHIBIT_IRQ_MASK if it isn't already set.
If RECHECK_TF, emit a rechecking helper for #DB, ignoring the state of
S->TF. This is used by the syscall/sysret insns. */
-static void gen_eob_worker(DisasContext *s, bool inhibit, bool recheck_tf)
+static void
+do_gen_eob_worker(DisasContext *s, bool inhibit, bool recheck_tf, TCGv jr)
{
gen_update_cc_op(s);
@@ -2530,12 +2532,27 @@ static void gen_eob_worker(DisasContext *s, bool inhibit, bool recheck_tf)
tcg_gen_exit_tb(0);
} else if (s->tf) {
gen_helper_single_step(cpu_env);
+ } else if (!TCGV_IS_UNUSED(jr)) {
+ TCGv vaddr = tcg_temp_new();
+
+ tcg_gen_add_tl(vaddr, jr, cpu_seg_base[R_CS]);
+ tcg_gen_lookup_and_goto_ptr(vaddr);
+ tcg_temp_free(vaddr);
} else {
tcg_gen_exit_tb(0);
}
s->is_jmp = DISAS_TB_JUMP;
}
+static inline void
+gen_eob_worker(DisasContext *s, bool inhibit, bool recheck_tf)
+{
+ TCGv unused;
+
+ TCGV_UNUSED(unused);
+ do_gen_eob_worker(s, inhibit, recheck_tf, unused);
+}
+
/* End of block.
If INHIBIT, set HF_INHIBIT_IRQ_MASK if it isn't already set. */
static void gen_eob_inhibit_irq(DisasContext *s, bool inhibit)
@@ -2549,6 +2566,12 @@ static void gen_eob(DisasContext *s)
gen_eob_worker(s, false, false);
}
+/* Jump to register */
+static void gen_jr(DisasContext *s, TCGv dest)
+{
+ do_gen_eob_worker(s, false, false, dest);
+}
+
/* generate a jump to eip. No segment change must happen before as a
direct call to the next block may occur */
static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num)
@@ -4973,7 +4996,7 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
gen_push_v(s, cpu_T1);
gen_op_jmp_v(cpu_T0);
gen_bnd_jmp(s);
- gen_eob(s);
+ gen_jr(s, cpu_T0);
break;
case 3: /* lcall Ev */
gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
@@ -4991,7 +5014,8 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
tcg_const_i32(dflag - 1),
tcg_const_i32(s->pc - s->cs_base));
}
- gen_eob(s);
+ tcg_gen_ld_tl(cpu_tmp4, cpu_env, offsetof(CPUX86State, eip));
+ gen_jr(s, cpu_tmp4);
break;
case 4: /* jmp Ev */
if (dflag == MO_16) {
@@ -4999,7 +5023,7 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
}
gen_op_jmp_v(cpu_T0);
gen_bnd_jmp(s);
- gen_eob(s);
+ gen_jr(s, cpu_T0);
break;
case 5: /* ljmp Ev */
gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
@@ -5014,7 +5038,8 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
gen_op_movl_seg_T0_vm(R_CS);
gen_op_jmp_v(cpu_T1);
}
- gen_eob(s);
+ tcg_gen_ld_tl(cpu_tmp4, cpu_env, offsetof(CPUX86State, eip));
+ gen_jr(s, cpu_tmp4);
break;
case 6: /* push Ev */
gen_push_v(s, cpu_T0);
@@ -6394,7 +6419,7 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
/* Note that gen_pop_T0 uses a zero-extending load. */
gen_op_jmp_v(cpu_T0);
gen_bnd_jmp(s);
- gen_eob(s);
+ gen_jr(s, cpu_T0);
break;
case 0xc3: /* ret */
ot = gen_pop_T0(s);
@@ -6402,7 +6427,7 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
/* Note that gen_pop_T0 uses a zero-extending load. */
gen_op_jmp_v(cpu_T0);
gen_bnd_jmp(s);
- gen_eob(s);
+ gen_jr(s, cpu_T0);
break;
case 0xca: /* lret im */
val = cpu_ldsw_code(env, s->pc);
diff --git a/target/mips/translate.c b/target/mips/translate.c
index 3022f349cb..559f8fed89 100644
--- a/target/mips/translate.c
+++ b/target/mips/translate.c
@@ -4233,7 +4233,7 @@ static inline void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
save_cpu_state(ctx, 0);
gen_helper_raise_exception_debug(cpu_env);
}
- tcg_gen_exit_tb(0);
+ tcg_gen_lookup_and_goto_ptr(cpu_PC);
}
}
@@ -10725,7 +10725,7 @@ static void gen_branch(DisasContext *ctx, int insn_bytes)
save_cpu_state(ctx, 0);
gen_helper_raise_exception_debug(cpu_env);
}
- tcg_gen_exit_tb(0);
+ tcg_gen_lookup_and_goto_ptr(cpu_PC);
break;
default:
fprintf(stderr, "unknown branch 0x%x\n", proc_hflags);
diff --git a/target/nios2/translate.c b/target/nios2/translate.c
index cfec47959d..2f3c2e5dfb 100644
--- a/target/nios2/translate.c
+++ b/target/nios2/translate.c
@@ -164,7 +164,7 @@ static void gen_goto_tb(DisasContext *dc, int n, uint32_t dest)
if (use_goto_tb(dc, dest)) {
tcg_gen_goto_tb(n);
tcg_gen_movi_tl(dc->cpu_R[R_PC], dest);
- tcg_gen_exit_tb((tcg_target_long)tb + n);
+ tcg_gen_exit_tb((uintptr_t)tb + n);
} else {
tcg_gen_movi_tl(dc->cpu_R[R_PC], dest);
tcg_gen_exit_tb(0);
diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h
index 401e10e7da..d10808d9f4 100644
--- a/target/ppc/cpu.h
+++ b/target/ppc/cpu.h
@@ -1205,6 +1205,7 @@ struct PowerPCCPU {
uint32_t compat_pvr;
PPCVirtualHypervisor *vhyp;
Object *intc;
+ int32_t node_id; /* NUMA node this CPU belongs to */
/* Fields related to migration compatibility hacks */
bool pre_2_8_migration;
diff --git a/target/s390x/cpu.h b/target/s390x/cpu.h
index c74b4193ee..a4d31df2b5 100644
--- a/target/s390x/cpu.h
+++ b/target/s390x/cpu.h
@@ -107,6 +107,8 @@ typedef struct CPUS390XState {
uint64_t cc_dst;
uint64_t cc_vr;
+ uint64_t ex_value;
+
uint64_t __excp_addr;
uint64_t psa;
@@ -393,7 +395,7 @@ static inline void cpu_get_tb_cpu_state(CPUS390XState* env, target_ulong *pc,
target_ulong *cs_base, uint32_t *flags)
{
*pc = env->psw.addr;
- *cs_base = 0;
+ *cs_base = env->ex_value;
*flags = ((env->psw.mask >> 32) & ~FLAG_MASK_CC) |
((env->psw.mask & PSW_MASK_32) ? FLAG_MASK_32 : 0);
}
@@ -1033,6 +1035,8 @@ struct sysib_322 {
#define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */
#define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */
+#define VADDR_PX 0xff000 /* page index bits */
+
#define _PAGE_RO 0x200 /* HW read-only bit */
#define _PAGE_INVALID 0x400 /* HW invalid bit */
#define _PAGE_RES0 0x800 /* bit must be zero */
@@ -1084,6 +1088,7 @@ struct sysib_322 {
#define SIGP_ORDER_MASK 0x000000ff
void load_psw(CPUS390XState *env, uint64_t mask, uint64_t addr);
+target_ulong mmu_real2abs(CPUS390XState *env, target_ulong raddr);
int mmu_translate(CPUS390XState *env, target_ulong vaddr, int rw, uint64_t asc,
target_ulong *raddr, int *flags, bool exc);
int sclp_service_call(CPUS390XState *env, uint64_t sccb, uint32_t code);
diff --git a/target/s390x/cpu_models.c b/target/s390x/cpu_models.c
index 8d27363b07..fc3cb25cc3 100644
--- a/target/s390x/cpu_models.c
+++ b/target/s390x/cpu_models.c
@@ -658,6 +658,32 @@ static void check_compatibility(const S390CPUModel *max_model,
"available in the configuration: ");
}
+/**
+ * The base TCG CPU model "qemu" is based on the z900. However, we already
+ * can also emulate some additional features of later CPU generations, so
+ * we add these additional feature bits here.
+ */
+static void add_qemu_cpu_model_features(S390FeatBitmap fbm)
+{
+ static const int feats[] = {
+ S390_FEAT_STFLE,
+ S390_FEAT_EXTENDED_IMMEDIATE,
+ S390_FEAT_EXTENDED_TRANSLATION_2,
+ S390_FEAT_LONG_DISPLACEMENT,
+ S390_FEAT_LONG_DISPLACEMENT_FAST,
+ S390_FEAT_ETF2_ENH,
+ S390_FEAT_STORE_CLOCK_FAST,
+ S390_FEAT_GENERAL_INSTRUCTIONS_EXT,
+ S390_FEAT_EXECUTE_EXT,
+ S390_FEAT_STFLE_45,
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(feats); i++) {
+ set_bit(feats[i], fbm);
+ }
+}
+
static S390CPUModel *get_max_cpu_model(Error **errp)
{
static S390CPUModel max_model;
@@ -670,10 +696,11 @@ static S390CPUModel *get_max_cpu_model(Error **errp)
if (kvm_enabled()) {
kvm_s390_get_host_cpu_model(&max_model, errp);
} else {
- /* TCG emulates a z900 */
+ /* TCG emulates a z900 (with some optional additional features) */
max_model.def = &s390_cpu_defs[0];
bitmap_copy(max_model.features, max_model.def->default_feat,
S390_FEAT_MAX);
+ add_qemu_cpu_model_features(max_model.features);
}
if (!*errp) {
cached = true;
@@ -925,11 +952,14 @@ static void s390_host_cpu_model_initfn(Object *obj)
static void s390_qemu_cpu_model_initfn(Object *obj)
{
+ static S390CPUDef s390_qemu_cpu_defs;
S390CPU *cpu = S390_CPU(obj);
cpu->model = g_malloc0(sizeof(*cpu->model));
- /* TCG emulates a z900 */
- cpu->model->def = &s390_cpu_defs[0];
+ /* TCG emulates a z900 (with some optional additional features) */
+ memcpy(&s390_qemu_cpu_defs, &s390_cpu_defs[0], sizeof(s390_qemu_cpu_defs));
+ add_qemu_cpu_model_features(s390_qemu_cpu_defs.full_feat);
+ cpu->model->def = &s390_qemu_cpu_defs;
bitmap_copy(cpu->model->features, cpu->model->def->default_feat,
S390_FEAT_MAX);
}
diff --git a/target/s390x/fpu_helper.c b/target/s390x/fpu_helper.c
index e604e9f7be..26f124fe96 100644
--- a/target/s390x/fpu_helper.c
+++ b/target/s390x/fpu_helper.c
@@ -585,6 +585,33 @@ uint64_t HELPER(fixb)(CPUS390XState *env, uint64_t ah, uint64_t al, uint32_t m3)
return RET128(ret);
}
+/* 32-bit FP compare and signal */
+uint32_t HELPER(keb)(CPUS390XState *env, uint64_t f1, uint64_t f2)
+{
+ int cmp = float32_compare(f1, f2, &env->fpu_status);
+ handle_exceptions(env, GETPC());
+ return float_comp_to_cc(env, cmp);
+}
+
+/* 64-bit FP compare and signal */
+uint32_t HELPER(kdb)(CPUS390XState *env, uint64_t f1, uint64_t f2)
+{
+ int cmp = float64_compare(f1, f2, &env->fpu_status);
+ handle_exceptions(env, GETPC());
+ return float_comp_to_cc(env, cmp);
+}
+
+/* 128-bit FP compare and signal */
+uint32_t HELPER(kxb)(CPUS390XState *env, uint64_t ah, uint64_t al,
+ uint64_t bh, uint64_t bl)
+{
+ int cmp = float128_compare(make_float128(ah, al),
+ make_float128(bh, bl),
+ &env->fpu_status);
+ handle_exceptions(env, GETPC());
+ return float_comp_to_cc(env, cmp);
+}
+
/* 32-bit FP multiply and add */
uint64_t HELPER(maeb)(CPUS390XState *env, uint64_t f1,
uint64_t f2, uint64_t f3)
diff --git a/target/s390x/helper.c b/target/s390x/helper.c
index 4f8aadf305..a8d20c51fa 100644
--- a/target/s390x/helper.c
+++ b/target/s390x/helper.c
@@ -204,7 +204,7 @@ int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr orig_vaddr,
if (raddr > ram_size) {
DPRINTF("%s: raddr %" PRIx64 " > ram_size %" PRIx64 "\n", __func__,
(uint64_t)raddr, (uint64_t)ram_size);
- trigger_pgm_exception(env, PGM_ADDRESSING, ILEN_LATER);
+ trigger_pgm_exception(env, PGM_ADDRESSING, ILEN_LATER_INC);
return 1;
}
@@ -642,6 +642,11 @@ bool s390_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
S390CPU *cpu = S390_CPU(cs);
CPUS390XState *env = &cpu->env;
+ if (env->ex_value) {
+ /* Execution of the target insn is indivisible from
+ the parent EXECUTE insn. */
+ return false;
+ }
if (env->psw.mask & PSW_MASK_EXT) {
s390_cpu_do_interrupt(cs);
return true;
diff --git a/target/s390x/helper.h b/target/s390x/helper.h
index 0b70770e4e..69249a5249 100644
--- a/target/s390x/helper.h
+++ b/target/s390x/helper.h
@@ -3,8 +3,10 @@ DEF_HELPER_FLAGS_4(nc, TCG_CALL_NO_WG, i32, env, i32, i64, i64)
DEF_HELPER_FLAGS_4(oc, TCG_CALL_NO_WG, i32, env, i32, i64, i64)
DEF_HELPER_FLAGS_4(xc, TCG_CALL_NO_WG, i32, env, i32, i64, i64)
DEF_HELPER_FLAGS_4(mvc, TCG_CALL_NO_WG, void, env, i32, i64, i64)
+DEF_HELPER_FLAGS_4(mvcin, TCG_CALL_NO_WG, void, env, i32, i64, i64)
DEF_HELPER_FLAGS_4(clc, TCG_CALL_NO_WG, i32, env, i32, i64, i64)
DEF_HELPER_3(mvcl, i32, env, i32, i32)
+DEF_HELPER_3(clcl, i32, env, i32, i32)
DEF_HELPER_FLAGS_4(clm, TCG_CALL_NO_WG, i32, env, i32, i32, i64)
DEF_HELPER_FLAGS_3(divs32, TCG_CALL_NO_WG, s64, env, s64, s64)
DEF_HELPER_FLAGS_3(divu32, TCG_CALL_NO_WG, i64, env, i64, i64)
@@ -12,13 +14,18 @@ DEF_HELPER_FLAGS_3(divs64, TCG_CALL_NO_WG, s64, env, s64, s64)
DEF_HELPER_FLAGS_4(divu64, TCG_CALL_NO_WG, i64, env, i64, i64, i64)
DEF_HELPER_4(srst, i64, env, i64, i64, i64)
DEF_HELPER_4(clst, i64, env, i64, i64, i64)
-DEF_HELPER_4(mvpg, void, env, i64, i64, i64)
+DEF_HELPER_FLAGS_4(mvn, TCG_CALL_NO_WG, void, env, i32, i64, i64)
+DEF_HELPER_FLAGS_4(mvo, TCG_CALL_NO_WG, void, env, i32, i64, i64)
+DEF_HELPER_FLAGS_4(mvpg, TCG_CALL_NO_WG, i32, env, i64, i64, i64)
+DEF_HELPER_FLAGS_4(mvz, TCG_CALL_NO_WG, void, env, i32, i64, i64)
DEF_HELPER_4(mvst, i64, env, i64, i64, i64)
-DEF_HELPER_5(ex, i32, env, i32, i64, i64, i64)
+DEF_HELPER_4(ex, void, env, i32, i64, i64)
DEF_HELPER_FLAGS_4(stam, TCG_CALL_NO_WG, void, env, i32, i64, i32)
DEF_HELPER_FLAGS_4(lam, TCG_CALL_NO_WG, void, env, i32, i64, i32)
DEF_HELPER_4(mvcle, i32, env, i32, i64, i32)
+DEF_HELPER_4(mvclu, i32, env, i32, i64, i32)
DEF_HELPER_4(clcle, i32, env, i32, i64, i32)
+DEF_HELPER_4(clclu, i32, env, i32, i64, i32)
DEF_HELPER_3(cegb, i64, env, s64, i32)
DEF_HELPER_3(cdgb, i64, env, s64, i32)
DEF_HELPER_3(cxgb, i64, env, s64, i32)
@@ -49,6 +56,9 @@ DEF_HELPER_FLAGS_3(lexb, TCG_CALL_NO_WG, i64, env, i64, i64)
DEF_HELPER_FLAGS_3(ceb, TCG_CALL_NO_WG_SE, i32, env, i64, i64)
DEF_HELPER_FLAGS_3(cdb, TCG_CALL_NO_WG_SE, i32, env, i64, i64)
DEF_HELPER_FLAGS_5(cxb, TCG_CALL_NO_WG_SE, i32, env, i64, i64, i64, i64)
+DEF_HELPER_FLAGS_3(keb, TCG_CALL_NO_WG, i32, env, i64, i64)
+DEF_HELPER_FLAGS_3(kdb, TCG_CALL_NO_WG, i32, env, i64, i64)
+DEF_HELPER_FLAGS_5(kxb, TCG_CALL_NO_WG, i32, env, i64, i64, i64, i64)
DEF_HELPER_FLAGS_3(cgeb, TCG_CALL_NO_WG, i64, env, i64, i32)
DEF_HELPER_FLAGS_3(cgdb, TCG_CALL_NO_WG, i64, env, i64, i32)
DEF_HELPER_FLAGS_4(cgxb, TCG_CALL_NO_WG, i64, env, i64, i64, i32)
@@ -75,10 +85,17 @@ DEF_HELPER_FLAGS_2(sqeb, TCG_CALL_NO_WG, i64, env, i64)
DEF_HELPER_FLAGS_2(sqdb, TCG_CALL_NO_WG, i64, env, i64)
DEF_HELPER_FLAGS_3(sqxb, TCG_CALL_NO_WG, i64, env, i64, i64)
DEF_HELPER_FLAGS_1(cvd, TCG_CALL_NO_RWG_SE, i64, s32)
+DEF_HELPER_FLAGS_4(pack, TCG_CALL_NO_WG, void, env, i32, i64, i64)
+DEF_HELPER_FLAGS_4(pka, TCG_CALL_NO_WG, void, env, i64, i64, i32)
+DEF_HELPER_FLAGS_4(pku, TCG_CALL_NO_WG, void, env, i64, i64, i32)
DEF_HELPER_FLAGS_4(unpk, TCG_CALL_NO_WG, void, env, i32, i64, i64)
+DEF_HELPER_FLAGS_4(unpka, TCG_CALL_NO_WG, i32, env, i64, i32, i64)
+DEF_HELPER_FLAGS_4(unpku, TCG_CALL_NO_WG, i32, env, i64, i32, i64)
+DEF_HELPER_FLAGS_3(tp, TCG_CALL_NO_WG, i32, env, i64, i32)
DEF_HELPER_FLAGS_4(tr, TCG_CALL_NO_WG, void, env, i32, i64, i64)
DEF_HELPER_4(tre, i64, env, i64, i64, i64)
DEF_HELPER_4(trt, i32, env, i32, i64, i64)
+DEF_HELPER_5(trXX, i32, env, i32, i32, i32, i32)
DEF_HELPER_4(cksm, i64, env, i64, i64, i64)
DEF_HELPER_FLAGS_5(calc_cc, TCG_CALL_NO_RWG_SE, i32, env, i32, i64, i64, i64)
DEF_HELPER_FLAGS_2(sfpc, TCG_CALL_NO_RWG, void, env, i64)
@@ -86,6 +103,8 @@ DEF_HELPER_FLAGS_2(sfas, TCG_CALL_NO_WG, void, env, i64)
DEF_HELPER_FLAGS_1(popcnt, TCG_CALL_NO_RWG_SE, i64, i64)
DEF_HELPER_FLAGS_1(stfl, TCG_CALL_NO_RWG, void, env)
DEF_HELPER_2(stfle, i32, env, i64)
+DEF_HELPER_FLAGS_2(lpq, TCG_CALL_NO_WG, i64, env, i64)
+DEF_HELPER_FLAGS_4(stpq, TCG_CALL_NO_WG, void, env, i64, i64, i64)
#ifndef CONFIG_USER_ONLY
DEF_HELPER_3(servc, i32, env, i64, i64)
@@ -102,17 +121,18 @@ DEF_HELPER_FLAGS_4(lctl, TCG_CALL_NO_WG, void, env, i32, i64, i32)
DEF_HELPER_FLAGS_4(lctlg, TCG_CALL_NO_WG, void, env, i32, i64, i32)
DEF_HELPER_FLAGS_4(stctl, TCG_CALL_NO_WG, void, env, i32, i64, i32)
DEF_HELPER_FLAGS_4(stctg, TCG_CALL_NO_WG, void, env, i32, i64, i32)
+DEF_HELPER_FLAGS_2(testblock, TCG_CALL_NO_WG, i32, env, i64)
DEF_HELPER_FLAGS_2(tprot, TCG_CALL_NO_RWG, i32, i64, i64)
DEF_HELPER_FLAGS_2(iske, TCG_CALL_NO_RWG_SE, i64, env, i64)
DEF_HELPER_FLAGS_3(sske, TCG_CALL_NO_RWG, void, env, i64, i64)
DEF_HELPER_FLAGS_2(rrbe, TCG_CALL_NO_RWG, i32, env, i64)
-DEF_HELPER_3(csp, i32, env, i32, i64)
DEF_HELPER_4(mvcs, i32, env, i64, i64, i64)
DEF_HELPER_4(mvcp, i32, env, i64, i64, i64)
DEF_HELPER_4(sigp, i32, env, i64, i32, i64)
DEF_HELPER_FLAGS_2(sacf, TCG_CALL_NO_WG, void, env, i64)
-DEF_HELPER_FLAGS_3(ipte, TCG_CALL_NO_RWG, void, env, i64, i64)
+DEF_HELPER_FLAGS_4(ipte, TCG_CALL_NO_RWG, void, env, i64, i64, i32)
DEF_HELPER_FLAGS_1(ptlb, TCG_CALL_NO_RWG, void, env)
+DEF_HELPER_FLAGS_1(purge, TCG_CALL_NO_RWG, void, env)
DEF_HELPER_2(lra, i64, env, i64)
DEF_HELPER_FLAGS_2(lura, TCG_CALL_NO_WG, i64, env, i64)
DEF_HELPER_FLAGS_2(lurag, TCG_CALL_NO_WG, i64, env, i64)
diff --git a/target/s390x/insn-data.def b/target/s390x/insn-data.def
index 55a7c529b4..73dd05daf0 100644
--- a/target/s390x/insn-data.def
+++ b/target/s390x/insn-data.def
@@ -154,6 +154,12 @@
C(0xb349, CXBR, RRE, Z, x1_o, x2_o, 0, 0, cxb, 0)
C(0xed09, CEB, RXE, Z, e1, m2_32u, 0, 0, ceb, 0)
C(0xed19, CDB, RXE, Z, f1_o, m2_64, 0, 0, cdb, 0)
+/* COMPARE AND SIGNAL */
+ C(0xb308, KEBR, RRE, Z, e1, e2, 0, 0, keb, 0)
+ C(0xb318, KDBR, RRE, Z, f1_o, f2_o, 0, 0, kdb, 0)
+ C(0xb348, KXBR, RRE, Z, x1_o, x2_o, 0, 0, kxb, 0)
+ C(0xed08, KEB, RXE, Z, e1, m2_32u, 0, 0, keb, 0)
+ C(0xed18, KDB, RXE, Z, f1_o, m2_64, 0, 0, kdb, 0)
/* COMPARE IMMEDIATE */
C(0xc20d, CFI, RIL_a, EI, r1, i2, 0, 0, 0, cmps32)
C(0xc20c, CGFI, RIL_a, EI, r1, i2, 0, 0, 0, cmps64)
@@ -210,8 +216,12 @@
C(0xc60e, CLGFRL, RIL_b, GIE, r1_o, mri2_32u, 0, 0, 0, cmpu64)
C(0xc607, CLHRL, RIL_b, GIE, r1_o, mri2_16u, 0, 0, 0, cmpu32)
C(0xc606, CLGHRL, RIL_b, GIE, r1_o, mri2_16u, 0, 0, 0, cmpu64)
+/* COMPARE LOGICAL LONG */
+ C(0x0f00, CLCL, RR_a, Z, 0, 0, 0, 0, clcl, 0)
/* COMPARE LOGICAL LONG EXTENDED */
C(0xa900, CLCLE, RS_a, Z, 0, a2, 0, 0, clcle, 0)
+/* COMPARE LOGICAL LONG UNICODE */
+ C(0xeb8f, CLCLU, RSY_a, E2, 0, a2, 0, 0, clclu, 0)
/* COMPARE LOGICAL CHARACTERS UNDER MASK */
C(0xbd00, CLM, RS_b, Z, r1_o, a2, 0, 0, clm, 0)
C(0xeb21, CLMY, RSY_b, LD, r1_o, a2, 0, 0, clm, 0)
@@ -327,9 +337,9 @@
C(0xeb57, XIY, SIY, LD, m1_8u, i2_8u, new, m1_8, xor, nz64)
/* EXECUTE */
- C(0x4400, EX, RX_a, Z, r1_o, a2, 0, 0, ex, 0)
+ C(0x4400, EX, RX_a, Z, 0, a2, 0, 0, ex, 0)
/* EXECUTE RELATIVE LONG */
- C(0xc600, EXRL, RIL_b, EE, r1_o, ri2, 0, 0, ex, 0)
+ C(0xc600, EXRL, RIL_b, EE, 0, ri2, 0, 0, ex, 0)
/* EXTRACT ACCESS */
C(0xb24f, EAR, RRE, Z, 0, 0, new, r1_32, ear, 0)
@@ -507,6 +517,8 @@
/* LOAD PAIR DISJOINT */
D(0xc804, LPD, SSF, ILA, 0, 0, new_P, r3_P32, lpd, 0, MO_TEUL)
D(0xc805, LPDG, SSF, ILA, 0, 0, new_P, r3_P64, lpd, 0, MO_TEQ)
+/* LOAD PAIR FROM QUADWORD */
+ C(0xe38f, LPQ, RXY_a, Z, 0, a2, r1_P, 0, lpq, 0)
/* LOAD POSITIVE */
C(0x1000, LPR, RR_a, Z, 0, r2_32s, new, r1_32, abs, abs32)
C(0xb900, LPGR, RRE, Z, 0, r2, r1, 0, abs, abs64)
@@ -564,14 +576,26 @@
C(0xe548, MVGHI, SIL, GIE, la1, i2, 0, m1_64, mov2, 0)
C(0x9200, MVI, SI, Z, la1, i2, 0, m1_8, mov2, 0)
C(0xeb52, MVIY, SIY, LD, la1, i2, 0, m1_8, mov2, 0)
+/* MOVE INVERSE */
+ C(0xe800, MVCIN, SS_a, Z, la1, a2, 0, 0, mvcin, 0)
/* MOVE LONG */
C(0x0e00, MVCL, RR_a, Z, 0, 0, 0, 0, mvcl, 0)
/* MOVE LONG EXTENDED */
C(0xa800, MVCLE, RS_a, Z, 0, a2, 0, 0, mvcle, 0)
+/* MOVE LONG UNICODE */
+ C(0xeb8e, MVCLU, RSY_a, E2, 0, a2, 0, 0, mvclu, 0)
+/* MOVE NUMERICS */
+ C(0xd100, MVN, SS_a, Z, la1, a2, 0, 0, mvn, 0)
/* MOVE PAGE */
C(0xb254, MVPG, RRE, Z, r1_o, r2_o, 0, 0, mvpg, 0)
/* MOVE STRING */
C(0xb255, MVST, RRE, Z, r1_o, r2_o, 0, 0, mvst, 0)
+/* MOVE WITH OFFSET */
+ /* Really format SS_b, but we pack both lengths into one argument
+ for the helper call, so we might as well leave one 8-bit field. */
+ C(0xf100, MVO, SS_a, Z, la1, a2, 0, 0, mvo, 0)
+/* MOVE ZONES */
+ C(0xd300, MVZ, SS_a, Z, la1, a2, 0, 0, mvz, 0)
/* MULTIPLY */
C(0x1c00, MR, RR_a, Z, r1p1_32s, r2_32s, new, r1_D32, mul, 0)
@@ -639,6 +663,15 @@
C(0x9600, OI, SI, Z, m1_8u, i2_8u, new, m1_8, or, nz64)
C(0xeb56, OIY, SIY, LD, m1_8u, i2_8u, new, m1_8, or, nz64)
+/* PACK */
+ /* Really format SS_b, but we pack both lengths into one argument
+ for the helper call, so we might as well leave one 8-bit field. */
+ C(0xf200, PACK, SS_a, Z, la1, a2, 0, 0, pack, 0)
+/* PACK ASCII */
+ C(0xe900, PKA, SS_f, E2, la1, a2, 0, 0, pka, 0)
+/* PACK UNICODE */
+ C(0xe100, PKU, SS_f, E2, la1, a2, 0, 0, pku, 0)
+
/* PREFETCH */
/* Implemented as nops of course. */
C(0xe336, PFD, RXY_b, GIE, 0, 0, 0, 0, 0, 0)
@@ -763,6 +796,8 @@
/* STORE ACCESS MULTIPLE */
C(0x9b00, STAM, RS_a, Z, 0, a2, 0, 0, stam, 0)
C(0xeb9b, STAMY, RSY_a, LD, 0, a2, 0, 0, stam, 0)
+/* STORE PAIR TO QUADWORD */
+ C(0xe38e, STPQ, RXY_a, Z, 0, a2, r1_P, 0, stpq, 0)
/* SUBTRACT */
C(0x1b00, SR, RR_a, Z, r1, r2, new, r1_32, sub, subs32)
@@ -810,11 +845,20 @@
/* SUPERVISOR CALL */
C(0x0a00, SVC, I, Z, 0, 0, 0, 0, svc, 0)
+/* TEST ADDRESSING MODE */
+ C(0x010b, TAM, E, Z, 0, 0, 0, 0, tam, 0)
+
+/* TEST AND SET */
+ C(0x9300, TS, S, Z, 0, a2, 0, 0, ts, 0)
+
/* TEST DATA CLASS */
C(0xed10, TCEB, RXE, Z, e1, a2, 0, 0, tceb, 0)
C(0xed11, TCDB, RXE, Z, f1_o, a2, 0, 0, tcdb, 0)
C(0xed12, TCXB, RXE, Z, x1_o, a2, 0, 0, tcxb, 0)
+/* TEST DECIMAL */
+ C(0xebc0, TP, RSL, E2, la1, 0, 0, 0, tp, 0)
+
/* TEST UNDER MASK */
C(0x9100, TM, SI, Z, m1_8u, i2_8u, 0, 0, 0, tm32)
C(0xeb51, TMY, SIY, LD, m1_8u, i2_8u, 0, 0, 0, tm32)
@@ -830,14 +874,28 @@
/* TRANSLATE EXTENDED */
C(0xb2a5, TRE, RRE, Z, 0, r2, r1_P, 0, tre, 0)
+/* TRANSLATE ONE TO ONE */
+ C(0xb993, TROO, RRF_c, E2, 0, 0, 0, 0, trXX, 0)
+/* TRANSLATE ONE TO TWO */
+ C(0xb992, TROT, RRF_c, E2, 0, 0, 0, 0, trXX, 0)
+/* TRANSLATE TWO TO ONE */
+ C(0xb991, TRTO, RRF_c, E2, 0, 0, 0, 0, trXX, 0)
+/* TRANSLATE TWO TO TWO */
+ C(0xb990, TRTT, RRF_c, E2, 0, 0, 0, 0, trXX, 0)
+
/* UNPACK */
/* Really format SS_b, but we pack both lengths into one argument
for the helper call, so we might as well leave one 8-bit field. */
C(0xf300, UNPK, SS_a, Z, la1, a2, 0, 0, unpk, 0)
+/* UNPACK ASCII */
+ C(0xea00, UNPKA, SS_a, E2, la1, a2, 0, 0, unpka, 0)
+/* UNPACK UNICODE */
+ C(0xe200, UNPKU, SS_a, E2, la1, a2, 0, 0, unpku, 0)
#ifndef CONFIG_USER_ONLY
/* COMPARE AND SWAP AND PURGE */
- C(0xb250, CSP, RRE, Z, 0, ra2, 0, 0, csp, 0)
+ D(0xb250, CSP, RRE, Z, r1_32u, ra2, r1_P, 0, csp, 0, MO_TEUL)
+ D(0xb98a, CSPG, RRE, DAT_ENH, r1_o, ra2, r1_P, 0, csp, 0, MO_TEQ)
/* DIAGNOSE (KVM hypercall) */
C(0x8300, DIAG, RSI, Z, 0, 0, 0, 0, diag, 0)
/* INSERT STORAGE KEY EXTENDED */
@@ -918,6 +976,8 @@
/* STORE USING REAL ADDRESS */
C(0xb246, STURA, RRE, Z, r1_o, r2_o, 0, 0, stura, 0)
C(0xb925, STURG, RRE, Z, r1_o, r2_o, 0, 0, sturg, 0)
+/* TEST BLOCK */
+ C(0xb22c, TB, RRE, Z, 0, r2_o, 0, 0, testblock, 0)
/* TEST PROTECTION */
C(0xe501, TPROT, SSE, Z, la1, a2, 0, 0, tprot, 0)
diff --git a/target/s390x/machine.c b/target/s390x/machine.c
index 8503fa1c8d..8f908bbe82 100644
--- a/target/s390x/machine.c
+++ b/target/s390x/machine.c
@@ -34,6 +34,7 @@ static int cpu_post_load(void *opaque, int version_id)
return 0;
}
+
static void cpu_pre_save(void *opaque)
{
S390CPU *cpu = opaque;
@@ -156,6 +157,23 @@ const VMStateDescription vmstate_riccb = {
}
};
+static bool exval_needed(void *opaque)
+{
+ S390CPU *cpu = opaque;
+ return cpu->env.ex_value != 0;
+}
+
+const VMStateDescription vmstate_exval = {
+ .name = "cpu/exval",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = exval_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(env.ex_value, S390CPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
const VMStateDescription vmstate_s390_cpu = {
.name = "cpu",
.post_load = cpu_post_load,
@@ -188,6 +206,7 @@ const VMStateDescription vmstate_s390_cpu = {
&vmstate_fpu,
&vmstate_vregs,
&vmstate_riccb,
+ &vmstate_exval,
NULL
},
};
diff --git a/target/s390x/mem_helper.c b/target/s390x/mem_helper.c
index f6e5bcec5d..80caab9c9d 100644
--- a/target/s390x/mem_helper.c
+++ b/target/s390x/mem_helper.c
@@ -20,6 +20,7 @@
#include "qemu/osdep.h"
#include "cpu.h"
+#include "exec/address-spaces.h"
#include "exec/helper-proto.h"
#include "exec/exec-all.h"
#include "exec/cpu_ldst.h"
@@ -40,15 +41,9 @@
void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type,
int mmu_idx, uintptr_t retaddr)
{
- int ret;
-
- ret = s390_cpu_handle_mmu_fault(cs, addr, access_type, mmu_idx);
+ int ret = s390_cpu_handle_mmu_fault(cs, addr, access_type, mmu_idx);
if (unlikely(ret != 0)) {
- if (likely(retaddr)) {
- /* now we have a real cpu fault */
- cpu_restore_state(cs, retaddr);
- }
- cpu_loop_exit(cs);
+ cpu_loop_exit_restore(cs, retaddr);
}
}
@@ -62,18 +57,61 @@ void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type,
#endif
/* Reduce the length so that addr + len doesn't cross a page boundary. */
-static inline uint64_t adj_len_to_page(uint64_t len, uint64_t addr)
+static inline uint32_t adj_len_to_page(uint32_t len, uint64_t addr)
{
#ifndef CONFIG_USER_ONLY
if ((addr & ~TARGET_PAGE_MASK) + len - 1 >= TARGET_PAGE_SIZE) {
- return -addr & ~TARGET_PAGE_MASK;
+ return -(addr | TARGET_PAGE_MASK);
}
#endif
return len;
}
+/* Trigger a SPECIFICATION exception if an address or a length is not
+ naturally aligned. */
+static inline void check_alignment(CPUS390XState *env, uint64_t v,
+ int wordsize, uintptr_t ra)
+{
+ if (v % wordsize) {
+ CPUState *cs = CPU(s390_env_get_cpu(env));
+ cpu_restore_state(cs, ra);
+ program_interrupt(env, PGM_SPECIFICATION, 6);
+ }
+}
+
+/* Load a value from memory according to its size. */
+static inline uint64_t cpu_ldusize_data_ra(CPUS390XState *env, uint64_t addr,
+ int wordsize, uintptr_t ra)
+{
+ switch (wordsize) {
+ case 1:
+ return cpu_ldub_data_ra(env, addr, ra);
+ case 2:
+ return cpu_lduw_data_ra(env, addr, ra);
+ default:
+ abort();
+ }
+}
+
+/* Store a to memory according to its size. */
+static inline void cpu_stsize_data_ra(CPUS390XState *env, uint64_t addr,
+ uint64_t value, int wordsize,
+ uintptr_t ra)
+{
+ switch (wordsize) {
+ case 1:
+ cpu_stb_data_ra(env, addr, value, ra);
+ break;
+ case 2:
+ cpu_stw_data_ra(env, addr, value, ra);
+ break;
+ default:
+ abort();
+ }
+}
+
static void fast_memset(CPUS390XState *env, uint64_t dest, uint8_t byte,
- uint32_t l)
+ uint32_t l, uintptr_t ra)
{
int mmu_idx = cpu_mmu_index(env, false);
@@ -81,14 +119,14 @@ static void fast_memset(CPUS390XState *env, uint64_t dest, uint8_t byte,
void *p = tlb_vaddr_to_host(env, dest, MMU_DATA_STORE, mmu_idx);
if (p) {
/* Access to the whole page in write mode granted. */
- int l_adj = adj_len_to_page(l, dest);
+ uint32_t l_adj = adj_len_to_page(l, dest);
memset(p, byte, l_adj);
dest += l_adj;
l -= l_adj;
} else {
/* We failed to get access to the whole page. The next write
access will likely fill the QEMU TLB for the next iteration. */
- cpu_stb_data(env, dest, byte);
+ cpu_stb_data_ra(env, dest, byte, ra);
dest++;
l--;
}
@@ -96,7 +134,7 @@ static void fast_memset(CPUS390XState *env, uint64_t dest, uint8_t byte,
}
static void fast_memmove(CPUS390XState *env, uint64_t dest, uint64_t src,
- uint32_t l)
+ uint32_t l, uintptr_t ra)
{
int mmu_idx = cpu_mmu_index(env, false);
@@ -105,7 +143,7 @@ static void fast_memmove(CPUS390XState *env, uint64_t dest, uint64_t src,
void *dest_p = tlb_vaddr_to_host(env, dest, MMU_DATA_STORE, mmu_idx);
if (src_p && dest_p) {
/* Access to both whole pages granted. */
- int l_adj = adj_len_to_page(l, src);
+ uint32_t l_adj = adj_len_to_page(l, src);
l_adj = adj_len_to_page(l_adj, dest);
memmove(dest_p, src_p, l_adj);
src += l_adj;
@@ -115,7 +153,7 @@ static void fast_memmove(CPUS390XState *env, uint64_t dest, uint64_t src,
/* We failed to get access to one or both whole pages. The next
read or write access will likely fill the QEMU TLB for the
next iteration. */
- cpu_stb_data(env, dest, cpu_ldub_data(env, src));
+ cpu_stb_data_ra(env, dest, cpu_ldub_data_ra(env, src, ra), ra);
src++;
dest++;
l--;
@@ -124,140 +162,233 @@ static void fast_memmove(CPUS390XState *env, uint64_t dest, uint64_t src,
}
/* and on array */
-uint32_t HELPER(nc)(CPUS390XState *env, uint32_t l, uint64_t dest,
- uint64_t src)
+static uint32_t do_helper_nc(CPUS390XState *env, uint32_t l, uint64_t dest,
+ uint64_t src, uintptr_t ra)
{
- int i;
- unsigned char x;
- uint32_t cc = 0;
+ uint32_t i;
+ uint8_t c = 0;
HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
__func__, l, dest, src);
+
for (i = 0; i <= l; i++) {
- x = cpu_ldub_data(env, dest + i) & cpu_ldub_data(env, src + i);
- if (x) {
- cc = 1;
- }
- cpu_stb_data(env, dest + i, x);
+ uint8_t x = cpu_ldub_data_ra(env, src + i, ra);
+ x &= cpu_ldub_data_ra(env, dest + i, ra);
+ c |= x;
+ cpu_stb_data_ra(env, dest + i, x, ra);
}
- return cc;
+ return c != 0;
}
-/* xor on array */
-uint32_t HELPER(xc)(CPUS390XState *env, uint32_t l, uint64_t dest,
+uint32_t HELPER(nc)(CPUS390XState *env, uint32_t l, uint64_t dest,
uint64_t src)
{
- int i;
- unsigned char x;
- uint32_t cc = 0;
+ return do_helper_nc(env, l, dest, src, GETPC());
+}
+
+/* xor on array */
+static uint32_t do_helper_xc(CPUS390XState *env, uint32_t l, uint64_t dest,
+ uint64_t src, uintptr_t ra)
+{
+ uint32_t i;
+ uint8_t c = 0;
HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
__func__, l, dest, src);
/* xor with itself is the same as memset(0) */
if (src == dest) {
- fast_memset(env, dest, 0, l + 1);
+ fast_memset(env, dest, 0, l + 1, ra);
return 0;
}
for (i = 0; i <= l; i++) {
- x = cpu_ldub_data(env, dest + i) ^ cpu_ldub_data(env, src + i);
- if (x) {
- cc = 1;
- }
- cpu_stb_data(env, dest + i, x);
+ uint8_t x = cpu_ldub_data_ra(env, src + i, ra);
+ x ^= cpu_ldub_data_ra(env, dest + i, ra);
+ c |= x;
+ cpu_stb_data_ra(env, dest + i, x, ra);
}
- return cc;
+ return c != 0;
}
-/* or on array */
-uint32_t HELPER(oc)(CPUS390XState *env, uint32_t l, uint64_t dest,
+uint32_t HELPER(xc)(CPUS390XState *env, uint32_t l, uint64_t dest,
uint64_t src)
{
- int i;
- unsigned char x;
- uint32_t cc = 0;
+ return do_helper_xc(env, l, dest, src, GETPC());
+}
+
+/* or on array */
+static uint32_t do_helper_oc(CPUS390XState *env, uint32_t l, uint64_t dest,
+ uint64_t src, uintptr_t ra)
+{
+ uint32_t i;
+ uint8_t c = 0;
HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
__func__, l, dest, src);
+
for (i = 0; i <= l; i++) {
- x = cpu_ldub_data(env, dest + i) | cpu_ldub_data(env, src + i);
- if (x) {
- cc = 1;
- }
- cpu_stb_data(env, dest + i, x);
+ uint8_t x = cpu_ldub_data_ra(env, src + i, ra);
+ x |= cpu_ldub_data_ra(env, dest + i, ra);
+ c |= x;
+ cpu_stb_data_ra(env, dest + i, x, ra);
}
- return cc;
+ return c != 0;
+}
+
+uint32_t HELPER(oc)(CPUS390XState *env, uint32_t l, uint64_t dest,
+ uint64_t src)
+{
+ return do_helper_oc(env, l, dest, src, GETPC());
}
/* memmove */
-void HELPER(mvc)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src)
+static uint32_t do_helper_mvc(CPUS390XState *env, uint32_t l, uint64_t dest,
+ uint64_t src, uintptr_t ra)
{
- int i = 0;
+ uint32_t i;
HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
__func__, l, dest, src);
+ /* mvc and memmove do not behave the same when areas overlap! */
/* mvc with source pointing to the byte after the destination is the
same as memset with the first source byte */
- if (dest == (src + 1)) {
- fast_memset(env, dest, cpu_ldub_data(env, src), l + 1);
- return;
+ if (dest == src + 1) {
+ fast_memset(env, dest, cpu_ldub_data_ra(env, src, ra), l + 1, ra);
+ } else if (dest < src || src + l < dest) {
+ fast_memmove(env, dest, src, l + 1, ra);
+ } else {
+ /* slow version with byte accesses which always work */
+ for (i = 0; i <= l; i++) {
+ uint8_t x = cpu_ldub_data_ra(env, src + i, ra);
+ cpu_stb_data_ra(env, dest + i, x, ra);
+ }
}
- /* mvc and memmove do not behave the same when areas overlap! */
- if ((dest < src) || (src + l < dest)) {
- fast_memmove(env, dest, src, l + 1);
- return;
+ return env->cc_op;
+}
+
+void HELPER(mvc)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src)
+{
+ do_helper_mvc(env, l, dest, src, GETPC());
+}
+
+/* move inverse */
+void HELPER(mvcin)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src)
+{
+ uintptr_t ra = GETPC();
+ int i;
+
+ for (i = 0; i <= l; i++) {
+ uint8_t v = cpu_ldub_data_ra(env, src - i, ra);
+ cpu_stb_data_ra(env, dest + i, v, ra);
}
+}
+
+/* move numerics */
+void HELPER(mvn)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src)
+{
+ uintptr_t ra = GETPC();
+ int i;
- /* slow version with byte accesses which always work */
for (i = 0; i <= l; i++) {
- cpu_stb_data(env, dest + i, cpu_ldub_data(env, src + i));
+ uint8_t v = cpu_ldub_data_ra(env, dest + i, ra) & 0xf0;
+ v |= cpu_ldub_data_ra(env, src + i, ra) & 0x0f;
+ cpu_stb_data_ra(env, dest + i, v, ra);
}
}
-/* compare unsigned byte arrays */
-uint32_t HELPER(clc)(CPUS390XState *env, uint32_t l, uint64_t s1, uint64_t s2)
+/* move with offset */
+void HELPER(mvo)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src)
{
+ uintptr_t ra = GETPC();
+ int len_dest = l >> 4;
+ int len_src = l & 0xf;
+ uint8_t byte_dest, byte_src;
int i;
- unsigned char x, y;
- uint32_t cc;
+
+ src += len_src;
+ dest += len_dest;
+
+ /* Handle rightmost byte */
+ byte_src = cpu_ldub_data_ra(env, src, ra);
+ byte_dest = cpu_ldub_data_ra(env, dest, ra);
+ byte_dest = (byte_dest & 0x0f) | (byte_src << 4);
+ cpu_stb_data_ra(env, dest, byte_dest, ra);
+
+ /* Process remaining bytes from right to left */
+ for (i = 1; i <= len_dest; i++) {
+ byte_dest = byte_src >> 4;
+ if (len_src - i >= 0) {
+ byte_src = cpu_ldub_data_ra(env, src - i, ra);
+ } else {
+ byte_src = 0;
+ }
+ byte_dest |= byte_src << 4;
+ cpu_stb_data_ra(env, dest - i, byte_dest, ra);
+ }
+}
+
+/* move zones */
+void HELPER(mvz)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src)
+{
+ uintptr_t ra = GETPC();
+ int i;
+
+ for (i = 0; i <= l; i++) {
+ uint8_t b = cpu_ldub_data_ra(env, dest + i, ra) & 0x0f;
+ b |= cpu_ldub_data_ra(env, src + i, ra) & 0xf0;
+ cpu_stb_data_ra(env, dest + i, b, ra);
+ }
+}
+
+/* compare unsigned byte arrays */
+static uint32_t do_helper_clc(CPUS390XState *env, uint32_t l, uint64_t s1,
+ uint64_t s2, uintptr_t ra)
+{
+ uint32_t i;
+ uint32_t cc = 0;
HELPER_LOG("%s l %d s1 %" PRIx64 " s2 %" PRIx64 "\n",
__func__, l, s1, s2);
+
for (i = 0; i <= l; i++) {
- x = cpu_ldub_data(env, s1 + i);
- y = cpu_ldub_data(env, s2 + i);
+ uint8_t x = cpu_ldub_data_ra(env, s1 + i, ra);
+ uint8_t y = cpu_ldub_data_ra(env, s2 + i, ra);
HELPER_LOG("%02x (%c)/%02x (%c) ", x, x, y, y);
if (x < y) {
cc = 1;
- goto done;
+ break;
} else if (x > y) {
cc = 2;
- goto done;
+ break;
}
}
- cc = 0;
- done:
+
HELPER_LOG("\n");
return cc;
}
+uint32_t HELPER(clc)(CPUS390XState *env, uint32_t l, uint64_t s1, uint64_t s2)
+{
+ return do_helper_clc(env, l, s1, s2, GETPC());
+}
+
/* compare logical under mask */
uint32_t HELPER(clm)(CPUS390XState *env, uint32_t r1, uint32_t mask,
uint64_t addr)
{
- uint8_t r, d;
- uint32_t cc;
+ uintptr_t ra = GETPC();
+ uint32_t cc = 0;
HELPER_LOG("%s: r1 0x%x mask 0x%x addr 0x%" PRIx64 "\n", __func__, r1,
mask, addr);
- cc = 0;
+
while (mask) {
if (mask & 8) {
- d = cpu_ldub_data(env, addr);
- r = (r1 & 0xff000000UL) >> 24;
+ uint8_t d = cpu_ldub_data_ra(env, addr, ra);
+ uint8_t r = extract32(r1, 24, 8);
HELPER_LOG("mask 0x%x %02x/%02x (0x%" PRIx64 ") ", mask, r, d,
addr);
if (r < d) {
@@ -272,45 +403,88 @@ uint32_t HELPER(clm)(CPUS390XState *env, uint32_t r1, uint32_t mask,
mask = (mask << 1) & 0xf;
r1 <<= 8;
}
+
HELPER_LOG("\n");
return cc;
}
-static inline uint64_t fix_address(CPUS390XState *env, uint64_t a)
+static inline uint64_t wrap_address(CPUS390XState *env, uint64_t a)
{
- /* 31-Bit mode */
if (!(env->psw.mask & PSW_MASK_64)) {
- a &= 0x7fffffff;
+ if (!(env->psw.mask & PSW_MASK_32)) {
+ /* 24-Bit mode */
+ a &= 0x00ffffff;
+ } else {
+ /* 31-Bit mode */
+ a &= 0x7fffffff;
+ }
}
return a;
}
-static inline uint64_t get_address(CPUS390XState *env, int x2, int b2, int d2)
+static inline uint64_t get_address(CPUS390XState *env, int reg)
+{
+ return wrap_address(env, env->regs[reg]);
+}
+
+static inline void set_address(CPUS390XState *env, int reg, uint64_t address)
{
- uint64_t r = d2;
- if (x2) {
- r += env->regs[x2];
+ if (env->psw.mask & PSW_MASK_64) {
+ /* 64-Bit mode */
+ env->regs[reg] = address;
+ } else {
+ if (!(env->psw.mask & PSW_MASK_32)) {
+ /* 24-Bit mode. According to the PoO it is implementation
+ dependent if bits 32-39 remain unchanged or are set to
+ zeros. Choose the former so that the function can also be
+ used for TRT. */
+ env->regs[reg] = deposit64(env->regs[reg], 0, 24, address);
+ } else {
+ /* 31-Bit mode. According to the PoO it is implementation
+ dependent if bit 32 remains unchanged or is set to zero.
+ Choose the latter so that the function can also be used for
+ TRT. */
+ address &= 0x7fffffff;
+ env->regs[reg] = deposit64(env->regs[reg], 0, 32, address);
+ }
}
- if (b2) {
- r += env->regs[b2];
+}
+
+static inline uint64_t wrap_length(CPUS390XState *env, uint64_t length)
+{
+ if (!(env->psw.mask & PSW_MASK_64)) {
+ /* 24-Bit and 31-Bit mode */
+ length &= 0x7fffffff;
}
- return fix_address(env, r);
+ return length;
}
-static inline uint64_t get_address_31fix(CPUS390XState *env, int reg)
+static inline uint64_t get_length(CPUS390XState *env, int reg)
{
- return fix_address(env, env->regs[reg]);
+ return wrap_length(env, env->regs[reg]);
+}
+
+static inline void set_length(CPUS390XState *env, int reg, uint64_t length)
+{
+ if (env->psw.mask & PSW_MASK_64) {
+ /* 64-Bit mode */
+ env->regs[reg] = length;
+ } else {
+ /* 24-Bit and 31-Bit mode */
+ env->regs[reg] = deposit64(env->regs[reg], 0, 32, length);
+ }
}
/* search string (c is byte to search, r2 is string, r1 end of string) */
uint64_t HELPER(srst)(CPUS390XState *env, uint64_t r0, uint64_t end,
uint64_t str)
{
+ uintptr_t ra = GETPC();
uint32_t len;
uint8_t v, c = r0;
- str = fix_address(env, str);
- end = fix_address(env, end);
+ str = wrap_address(env, str);
+ end = wrap_address(env, end);
/* Assume for now that R2 is unmodified. */
env->retxl = str;
@@ -323,7 +497,7 @@ uint64_t HELPER(srst)(CPUS390XState *env, uint64_t r0, uint64_t end,
env->cc_op = 2;
return end;
}
- v = cpu_ldub_data(env, str + len);
+ v = cpu_ldub_data_ra(env, str + len, ra);
if (v == c) {
/* Character found. Set R1 to the location; R2 is unmodified. */
env->cc_op = 1;
@@ -340,17 +514,18 @@ uint64_t HELPER(srst)(CPUS390XState *env, uint64_t r0, uint64_t end,
/* unsigned string compare (c is string terminator) */
uint64_t HELPER(clst)(CPUS390XState *env, uint64_t c, uint64_t s1, uint64_t s2)
{
+ uintptr_t ra = GETPC();
uint32_t len;
c = c & 0xff;
- s1 = fix_address(env, s1);
- s2 = fix_address(env, s2);
+ s1 = wrap_address(env, s1);
+ s2 = wrap_address(env, s2);
/* Lest we fail to service interrupts in a timely manner, limit the
amount of work we're willing to do. For now, let's cap at 8k. */
for (len = 0; len < 0x2000; ++len) {
- uint8_t v1 = cpu_ldub_data(env, s1 + len);
- uint8_t v2 = cpu_ldub_data(env, s2 + len);
+ uint8_t v1 = cpu_ldub_data_ra(env, s1 + len, ra);
+ uint8_t v2 = cpu_ldub_data_ra(env, s2 + len, ra);
if (v1 == v2) {
if (v1 == c) {
/* Equal. CC=0, and don't advance the registers. */
@@ -375,27 +550,29 @@ uint64_t HELPER(clst)(CPUS390XState *env, uint64_t c, uint64_t s1, uint64_t s2)
}
/* move page */
-void HELPER(mvpg)(CPUS390XState *env, uint64_t r0, uint64_t r1, uint64_t r2)
+uint32_t HELPER(mvpg)(CPUS390XState *env, uint64_t r0, uint64_t r1, uint64_t r2)
{
- /* XXX missing r0 handling */
- env->cc_op = 0;
- fast_memmove(env, r1, r2, TARGET_PAGE_SIZE);
+ /* ??? missing r0 handling, which includes access keys, but more
+ importantly optional suppression of the exception! */
+ fast_memmove(env, r1, r2, TARGET_PAGE_SIZE, GETPC());
+ return 0; /* data moved */
}
/* string copy (c is string terminator) */
uint64_t HELPER(mvst)(CPUS390XState *env, uint64_t c, uint64_t d, uint64_t s)
{
+ uintptr_t ra = GETPC();
uint32_t len;
c = c & 0xff;
- d = fix_address(env, d);
- s = fix_address(env, s);
+ d = wrap_address(env, d);
+ s = wrap_address(env, s);
/* Lest we fail to service interrupts in a timely manner, limit the
amount of work we're willing to do. For now, let's cap at 8k. */
for (len = 0; len < 0x2000; ++len) {
- uint8_t v = cpu_ldub_data(env, s + len);
- cpu_stb_data(env, d + len, v);
+ uint8_t v = cpu_ldub_data_ra(env, s + len, ra);
+ cpu_stb_data_ra(env, d + len, v, ra);
if (v == c) {
/* Complete. Set CC=1 and advance R1. */
env->cc_op = 1;
@@ -410,124 +587,14 @@ uint64_t HELPER(mvst)(CPUS390XState *env, uint64_t c, uint64_t d, uint64_t s)
return d + len;
}
-static uint32_t helper_icm(CPUS390XState *env, uint32_t r1, uint64_t address,
- uint32_t mask)
-{
- int pos = 24; /* top of the lower half of r1 */
- uint64_t rmask = 0xff000000ULL;
- uint8_t val = 0;
- int ccd = 0;
- uint32_t cc = 0;
-
- while (mask) {
- if (mask & 8) {
- env->regs[r1] &= ~rmask;
- val = cpu_ldub_data(env, address);
- if ((val & 0x80) && !ccd) {
- cc = 1;
- }
- ccd = 1;
- if (val && cc == 0) {
- cc = 2;
- }
- env->regs[r1] |= (uint64_t)val << pos;
- address++;
- }
- mask = (mask << 1) & 0xf;
- pos -= 8;
- rmask >>= 8;
- }
-
- return cc;
-}
-
-/* execute instruction
- this instruction executes an insn modified with the contents of r1
- it does not change the executed instruction in memory
- it does not change the program counter
- in other words: tricky...
- currently implemented by interpreting the cases it is most commonly used in
-*/
-uint32_t HELPER(ex)(CPUS390XState *env, uint32_t cc, uint64_t v1,
- uint64_t addr, uint64_t ret)
-{
- S390CPU *cpu = s390_env_get_cpu(env);
- uint16_t insn = cpu_lduw_code(env, addr);
-
- HELPER_LOG("%s: v1 0x%lx addr 0x%lx insn 0x%x\n", __func__, v1, addr,
- insn);
- if ((insn & 0xf0ff) == 0xd000) {
- uint32_t l, insn2, b1, b2, d1, d2;
-
- l = v1 & 0xff;
- insn2 = cpu_ldl_code(env, addr + 2);
- b1 = (insn2 >> 28) & 0xf;
- b2 = (insn2 >> 12) & 0xf;
- d1 = (insn2 >> 16) & 0xfff;
- d2 = insn2 & 0xfff;
- switch (insn & 0xf00) {
- case 0x200:
- helper_mvc(env, l, get_address(env, 0, b1, d1),
- get_address(env, 0, b2, d2));
- break;
- case 0x400:
- cc = helper_nc(env, l, get_address(env, 0, b1, d1),
- get_address(env, 0, b2, d2));
- break;
- case 0x500:
- cc = helper_clc(env, l, get_address(env, 0, b1, d1),
- get_address(env, 0, b2, d2));
- break;
- case 0x600:
- cc = helper_oc(env, l, get_address(env, 0, b1, d1),
- get_address(env, 0, b2, d2));
- break;
- case 0x700:
- cc = helper_xc(env, l, get_address(env, 0, b1, d1),
- get_address(env, 0, b2, d2));
- break;
- case 0xc00:
- helper_tr(env, l, get_address(env, 0, b1, d1),
- get_address(env, 0, b2, d2));
- break;
- case 0xd00:
- cc = helper_trt(env, l, get_address(env, 0, b1, d1),
- get_address(env, 0, b2, d2));
- break;
- default:
- goto abort;
- }
- } else if ((insn & 0xff00) == 0x0a00) {
- /* supervisor call */
- HELPER_LOG("%s: svc %ld via execute\n", __func__, (insn | v1) & 0xff);
- env->psw.addr = ret - 4;
- env->int_svc_code = (insn | v1) & 0xff;
- env->int_svc_ilen = 4;
- helper_exception(env, EXCP_SVC);
- } else if ((insn & 0xff00) == 0xbf00) {
- uint32_t insn2, r1, r3, b2, d2;
-
- insn2 = cpu_ldl_code(env, addr + 2);
- r1 = (insn2 >> 20) & 0xf;
- r3 = (insn2 >> 16) & 0xf;
- b2 = (insn2 >> 12) & 0xf;
- d2 = insn2 & 0xfff;
- cc = helper_icm(env, r1, get_address(env, 0, b2, d2), r3);
- } else {
- abort:
- cpu_abort(CPU(cpu), "EXECUTE on instruction prefix 0x%x not implemented\n",
- insn);
- }
- return cc;
-}
-
/* load access registers r1 to r3 from memory at a2 */
void HELPER(lam)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
{
+ uintptr_t ra = GETPC();
int i;
for (i = r1;; i = (i + 1) % 16) {
- env->aregs[i] = cpu_ldl_data(env, a2);
+ env->aregs[i] = cpu_ldl_data_ra(env, a2, ra);
a2 += 4;
if (i == r3) {
@@ -539,10 +606,11 @@ void HELPER(lam)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
/* store access registers r1 to r3 in memory at a2 */
void HELPER(stam)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
{
+ uintptr_t ra = GETPC();
int i;
for (i = r1;; i = (i + 1) % 16) {
- cpu_stl_data(env, a2, env->aregs[i]);
+ cpu_stl_data_ra(env, a2, env->aregs[i], ra);
a2 += 4;
if (i == r3) {
@@ -551,131 +619,230 @@ void HELPER(stam)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
}
}
-/* move long */
-uint32_t HELPER(mvcl)(CPUS390XState *env, uint32_t r1, uint32_t r2)
+/* move long helper */
+static inline uint32_t do_mvcl(CPUS390XState *env,
+ uint64_t *dest, uint64_t *destlen,
+ uint64_t *src, uint64_t *srclen,
+ uint16_t pad, int wordsize, uintptr_t ra)
{
- uint64_t destlen = env->regs[r1 + 1] & 0xffffff;
- uint64_t dest = get_address_31fix(env, r1);
- uint64_t srclen = env->regs[r2 + 1] & 0xffffff;
- uint64_t src = get_address_31fix(env, r2);
- uint8_t pad = env->regs[r2 + 1] >> 24;
- uint8_t v;
+ uint64_t len = MIN(*srclen, *destlen);
uint32_t cc;
- if (destlen == srclen) {
+ if (*destlen == *srclen) {
cc = 0;
- } else if (destlen < srclen) {
+ } else if (*destlen < *srclen) {
cc = 1;
} else {
cc = 2;
}
- if (srclen > destlen) {
- srclen = destlen;
- }
+ /* Copy the src array */
+ fast_memmove(env, *dest, *src, len, ra);
+ *src += len;
+ *srclen -= len;
+ *dest += len;
+ *destlen -= len;
- for (; destlen && srclen; src++, dest++, destlen--, srclen--) {
- v = cpu_ldub_data(env, src);
- cpu_stb_data(env, dest, v);
+ /* Pad the remaining area */
+ if (wordsize == 1) {
+ fast_memset(env, *dest, pad, *destlen, ra);
+ *dest += *destlen;
+ *destlen = 0;
+ } else {
+ /* If remaining length is odd, pad with odd byte first. */
+ if (*destlen & 1) {
+ cpu_stb_data_ra(env, *dest, pad & 0xff, ra);
+ *dest += 1;
+ *destlen -= 1;
+ }
+ /* The remaining length is even, pad using words. */
+ for (; *destlen; *dest += 2, *destlen -= 2) {
+ cpu_stw_data_ra(env, *dest, pad, ra);
+ }
}
- for (; destlen; dest++, destlen--) {
- cpu_stb_data(env, dest, pad);
- }
+ return cc;
+}
+
+/* move long */
+uint32_t HELPER(mvcl)(CPUS390XState *env, uint32_t r1, uint32_t r2)
+{
+ uintptr_t ra = GETPC();
+ uint64_t destlen = env->regs[r1 + 1] & 0xffffff;
+ uint64_t dest = get_address(env, r1);
+ uint64_t srclen = env->regs[r2 + 1] & 0xffffff;
+ uint64_t src = get_address(env, r2);
+ uint8_t pad = env->regs[r2 + 1] >> 24;
+ uint32_t cc;
- env->regs[r1 + 1] = destlen;
- /* can't use srclen here, we trunc'ed it */
- env->regs[r2 + 1] -= src - env->regs[r2];
- env->regs[r1] = dest;
- env->regs[r2] = src;
+ cc = do_mvcl(env, &dest, &destlen, &src, &srclen, pad, 1, ra);
+
+ env->regs[r1 + 1] = deposit64(env->regs[r1 + 1], 0, 24, destlen);
+ env->regs[r2 + 1] = deposit64(env->regs[r2 + 1], 0, 24, srclen);
+ set_address(env, r1, dest);
+ set_address(env, r2, src);
return cc;
}
-/* move long extended another memcopy insn with more bells and whistles */
+/* move long extended */
uint32_t HELPER(mvcle)(CPUS390XState *env, uint32_t r1, uint64_t a2,
uint32_t r3)
{
- uint64_t destlen = env->regs[r1 + 1];
- uint64_t dest = env->regs[r1];
- uint64_t srclen = env->regs[r3 + 1];
- uint64_t src = env->regs[r3];
- uint8_t pad = a2 & 0xff;
- uint8_t v;
+ uintptr_t ra = GETPC();
+ uint64_t destlen = get_length(env, r1 + 1);
+ uint64_t dest = get_address(env, r1);
+ uint64_t srclen = get_length(env, r3 + 1);
+ uint64_t src = get_address(env, r3);
+ uint8_t pad = a2;
uint32_t cc;
- if (!(env->psw.mask & PSW_MASK_64)) {
- destlen = (uint32_t)destlen;
- srclen = (uint32_t)srclen;
- dest &= 0x7fffffff;
- src &= 0x7fffffff;
- }
+ cc = do_mvcl(env, &dest, &destlen, &src, &srclen, pad, 1, ra);
- if (destlen == srclen) {
- cc = 0;
- } else if (destlen < srclen) {
- cc = 1;
- } else {
- cc = 2;
- }
+ set_length(env, r1 + 1, destlen);
+ set_length(env, r3 + 1, srclen);
+ set_address(env, r1, dest);
+ set_address(env, r3, src);
- if (srclen > destlen) {
- srclen = destlen;
- }
+ return cc;
+}
- for (; destlen && srclen; src++, dest++, destlen--, srclen--) {
- v = cpu_ldub_data(env, src);
- cpu_stb_data(env, dest, v);
- }
+/* move long unicode */
+uint32_t HELPER(mvclu)(CPUS390XState *env, uint32_t r1, uint64_t a2,
+ uint32_t r3)
+{
+ uintptr_t ra = GETPC();
+ uint64_t destlen = get_length(env, r1 + 1);
+ uint64_t dest = get_address(env, r1);
+ uint64_t srclen = get_length(env, r3 + 1);
+ uint64_t src = get_address(env, r3);
+ uint16_t pad = a2;
+ uint32_t cc;
- for (; destlen; dest++, destlen--) {
- cpu_stb_data(env, dest, pad);
- }
+ cc = do_mvcl(env, &dest, &destlen, &src, &srclen, pad, 2, ra);
- env->regs[r1 + 1] = destlen;
- /* can't use srclen here, we trunc'ed it */
- /* FIXME: 31-bit mode! */
- env->regs[r3 + 1] -= src - env->regs[r3];
- env->regs[r1] = dest;
- env->regs[r3] = src;
+ set_length(env, r1 + 1, destlen);
+ set_length(env, r3 + 1, srclen);
+ set_address(env, r1, dest);
+ set_address(env, r3, src);
return cc;
}
-/* compare logical long extended memcompare insn with padding */
-uint32_t HELPER(clcle)(CPUS390XState *env, uint32_t r1, uint64_t a2,
- uint32_t r3)
+/* compare logical long helper */
+static inline uint32_t do_clcl(CPUS390XState *env,
+ uint64_t *src1, uint64_t *src1len,
+ uint64_t *src3, uint64_t *src3len,
+ uint16_t pad, uint64_t limit,
+ int wordsize, uintptr_t ra)
{
- uint64_t destlen = env->regs[r1 + 1];
- uint64_t dest = get_address_31fix(env, r1);
- uint64_t srclen = env->regs[r3 + 1];
- uint64_t src = get_address_31fix(env, r3);
- uint8_t pad = a2 & 0xff;
- uint8_t v1 = 0, v2 = 0;
+ uint64_t len = MAX(*src1len, *src3len);
uint32_t cc = 0;
- if (!(destlen || srclen)) {
+ check_alignment(env, *src1len | *src3len, wordsize, ra);
+
+ if (!len) {
return cc;
}
- if (srclen > destlen) {
- srclen = destlen;
+ /* Lest we fail to service interrupts in a timely manner, limit the
+ amount of work we're willing to do. */
+ if (len > limit) {
+ len = limit;
+ cc = 3;
}
- for (; destlen || srclen; src++, dest++, destlen--, srclen--) {
- v1 = srclen ? cpu_ldub_data(env, src) : pad;
- v2 = destlen ? cpu_ldub_data(env, dest) : pad;
- if (v1 != v2) {
- cc = (v1 < v2) ? 1 : 2;
+ for (; len; len -= wordsize) {
+ uint16_t v1 = pad;
+ uint16_t v3 = pad;
+
+ if (*src1len) {
+ v1 = cpu_ldusize_data_ra(env, *src1, wordsize, ra);
+ }
+ if (*src3len) {
+ v3 = cpu_ldusize_data_ra(env, *src3, wordsize, ra);
+ }
+
+ if (v1 != v3) {
+ cc = (v1 < v3) ? 1 : 2;
break;
}
+
+ if (*src1len) {
+ *src1 += wordsize;
+ *src1len -= wordsize;
+ }
+ if (*src3len) {
+ *src3 += wordsize;
+ *src3len -= wordsize;
+ }
}
- env->regs[r1 + 1] = destlen;
- /* can't use srclen here, we trunc'ed it */
- env->regs[r3 + 1] -= src - env->regs[r3];
- env->regs[r1] = dest;
- env->regs[r3] = src;
+ return cc;
+}
+
+
+/* compare logical long */
+uint32_t HELPER(clcl)(CPUS390XState *env, uint32_t r1, uint32_t r2)
+{
+ uintptr_t ra = GETPC();
+ uint64_t src1len = extract64(env->regs[r1 + 1], 0, 24);
+ uint64_t src1 = get_address(env, r1);
+ uint64_t src3len = extract64(env->regs[r2 + 1], 0, 24);
+ uint64_t src3 = get_address(env, r2);
+ uint8_t pad = env->regs[r2 + 1] >> 24;
+ uint32_t cc;
+
+ cc = do_clcl(env, &src1, &src1len, &src3, &src3len, pad, -1, 1, ra);
+
+ env->regs[r1 + 1] = deposit64(env->regs[r1 + 1], 0, 24, src1len);
+ env->regs[r2 + 1] = deposit64(env->regs[r2 + 1], 0, 24, src3len);
+ set_address(env, r1, src1);
+ set_address(env, r2, src3);
+
+ return cc;
+}
+
+/* compare logical long extended memcompare insn with padding */
+uint32_t HELPER(clcle)(CPUS390XState *env, uint32_t r1, uint64_t a2,
+ uint32_t r3)
+{
+ uintptr_t ra = GETPC();
+ uint64_t src1len = get_length(env, r1 + 1);
+ uint64_t src1 = get_address(env, r1);
+ uint64_t src3len = get_length(env, r3 + 1);
+ uint64_t src3 = get_address(env, r3);
+ uint8_t pad = a2;
+ uint32_t cc;
+
+ cc = do_clcl(env, &src1, &src1len, &src3, &src3len, pad, 0x2000, 1, ra);
+
+ set_length(env, r1 + 1, src1len);
+ set_length(env, r3 + 1, src3len);
+ set_address(env, r1, src1);
+ set_address(env, r3, src3);
+
+ return cc;
+}
+
+/* compare logical long unicode memcompare insn with padding */
+uint32_t HELPER(clclu)(CPUS390XState *env, uint32_t r1, uint64_t a2,
+ uint32_t r3)
+{
+ uintptr_t ra = GETPC();
+ uint64_t src1len = get_length(env, r1 + 1);
+ uint64_t src1 = get_address(env, r1);
+ uint64_t src3len = get_length(env, r3 + 1);
+ uint64_t src3 = get_address(env, r3);
+ uint16_t pad = a2;
+ uint32_t cc = 0;
+
+ cc = do_clcl(env, &src1, &src1len, &src3, &src3len, pad, 0x1000, 2, ra);
+
+ set_length(env, r1 + 1, src1len);
+ set_length(env, r3 + 1, src3len);
+ set_address(env, r1, src1);
+ set_address(env, r3, src3);
return cc;
}
@@ -684,6 +851,7 @@ uint32_t HELPER(clcle)(CPUS390XState *env, uint32_t r1, uint64_t a2,
uint64_t HELPER(cksm)(CPUS390XState *env, uint64_t r1,
uint64_t src, uint64_t src_len)
{
+ uintptr_t ra = GETPC();
uint64_t max_len, len;
uint64_t cksm = (uint32_t)r1;
@@ -693,21 +861,21 @@ uint64_t HELPER(cksm)(CPUS390XState *env, uint64_t r1,
/* Process full words as available. */
for (len = 0; len + 4 <= max_len; len += 4, src += 4) {
- cksm += (uint32_t)cpu_ldl_data(env, src);
+ cksm += (uint32_t)cpu_ldl_data_ra(env, src, ra);
}
switch (max_len - len) {
case 1:
- cksm += cpu_ldub_data(env, src) << 24;
+ cksm += cpu_ldub_data_ra(env, src, ra) << 24;
len += 1;
break;
case 2:
- cksm += cpu_lduw_data(env, src) << 16;
+ cksm += cpu_lduw_data_ra(env, src, ra) << 16;
len += 2;
break;
case 3:
- cksm += cpu_lduw_data(env, src) << 16;
- cksm += cpu_ldub_data(env, src + 2) << 8;
+ cksm += cpu_lduw_data_ra(env, src, ra) << 16;
+ cksm += cpu_ldub_data_ra(env, src + 2, ra) << 8;
len += 3;
break;
}
@@ -726,9 +894,94 @@ uint64_t HELPER(cksm)(CPUS390XState *env, uint64_t r1,
return len;
}
+void HELPER(pack)(CPUS390XState *env, uint32_t len, uint64_t dest, uint64_t src)
+{
+ uintptr_t ra = GETPC();
+ int len_dest = len >> 4;
+ int len_src = len & 0xf;
+ uint8_t b;
+
+ dest += len_dest;
+ src += len_src;
+
+ /* last byte is special, it only flips the nibbles */
+ b = cpu_ldub_data_ra(env, src, ra);
+ cpu_stb_data_ra(env, dest, (b << 4) | (b >> 4), ra);
+ src--;
+ len_src--;
+
+ /* now pack every value */
+ while (len_dest >= 0) {
+ b = 0;
+
+ if (len_src > 0) {
+ b = cpu_ldub_data_ra(env, src, ra) & 0x0f;
+ src--;
+ len_src--;
+ }
+ if (len_src > 0) {
+ b |= cpu_ldub_data_ra(env, src, ra) << 4;
+ src--;
+ len_src--;
+ }
+
+ len_dest--;
+ dest--;
+ cpu_stb_data_ra(env, dest, b, ra);
+ }
+}
+
+static inline void do_pkau(CPUS390XState *env, uint64_t dest, uint64_t src,
+ uint32_t srclen, int ssize, uintptr_t ra)
+{
+ int i;
+ /* The destination operand is always 16 bytes long. */
+ const int destlen = 16;
+
+ /* The operands are processed from right to left. */
+ src += srclen - 1;
+ dest += destlen - 1;
+
+ for (i = 0; i < destlen; i++) {
+ uint8_t b = 0;
+
+ /* Start with a positive sign */
+ if (i == 0) {
+ b = 0xc;
+ } else if (srclen > ssize) {
+ b = cpu_ldub_data_ra(env, src, ra) & 0x0f;
+ src -= ssize;
+ srclen -= ssize;
+ }
+
+ if (srclen > ssize) {
+ b |= cpu_ldub_data_ra(env, src, ra) << 4;
+ src -= ssize;
+ srclen -= ssize;
+ }
+
+ cpu_stb_data_ra(env, dest, b, ra);
+ dest--;
+ }
+}
+
+
+void HELPER(pka)(CPUS390XState *env, uint64_t dest, uint64_t src,
+ uint32_t srclen)
+{
+ do_pkau(env, dest, src, srclen, 1, GETPC());
+}
+
+void HELPER(pku)(CPUS390XState *env, uint64_t dest, uint64_t src,
+ uint32_t srclen)
+{
+ do_pkau(env, dest, src, srclen, 2, GETPC());
+}
+
void HELPER(unpk)(CPUS390XState *env, uint32_t len, uint64_t dest,
uint64_t src)
{
+ uintptr_t ra = GETPC();
int len_dest = len >> 4;
int len_src = len & 0xf;
uint8_t b;
@@ -738,8 +991,8 @@ void HELPER(unpk)(CPUS390XState *env, uint32_t len, uint64_t dest,
src += len_src;
/* last byte is special, it only flips the nibbles */
- b = cpu_ldub_data(env, src);
- cpu_stb_data(env, dest, (b << 4) | (b >> 4));
+ b = cpu_ldub_data_ra(env, src, ra);
+ cpu_stb_data_ra(env, dest, (b << 4) | (b >> 4), ra);
src--;
len_src--;
@@ -749,7 +1002,7 @@ void HELPER(unpk)(CPUS390XState *env, uint32_t len, uint64_t dest,
uint8_t cur_byte = 0;
if (len_src > 0) {
- cur_byte = cpu_ldub_data(env, src);
+ cur_byte = cpu_ldub_data_ra(env, src, ra);
}
len_dest--;
@@ -768,29 +1021,124 @@ void HELPER(unpk)(CPUS390XState *env, uint32_t len, uint64_t dest,
/* zone bits */
cur_byte |= 0xf0;
- cpu_stb_data(env, dest, cur_byte);
+ cpu_stb_data_ra(env, dest, cur_byte, ra);
}
}
-void HELPER(tr)(CPUS390XState *env, uint32_t len, uint64_t array,
- uint64_t trans)
+static inline uint32_t do_unpkau(CPUS390XState *env, uint64_t dest,
+ uint32_t destlen, int dsize, uint64_t src,
+ uintptr_t ra)
{
int i;
+ uint32_t cc;
+ uint8_t b;
+ /* The source operand is always 16 bytes long. */
+ const int srclen = 16;
- for (i = 0; i <= len; i++) {
- uint8_t byte = cpu_ldub_data(env, array + i);
- uint8_t new_byte = cpu_ldub_data(env, trans + byte);
+ /* The operands are processed from right to left. */
+ src += srclen - 1;
+ dest += destlen - dsize;
+
+ /* Check for the sign. */
+ b = cpu_ldub_data_ra(env, src, ra);
+ src--;
+ switch (b & 0xf) {
+ case 0xa:
+ case 0xc:
+ case 0xe ... 0xf:
+ cc = 0; /* plus */
+ break;
+ case 0xb:
+ case 0xd:
+ cc = 1; /* minus */
+ break;
+ default:
+ case 0x0 ... 0x9:
+ cc = 3; /* invalid */
+ break;
+ }
- cpu_stb_data(env, array + i, new_byte);
+ /* Now pad every nibble with 0x30, advancing one nibble at a time. */
+ for (i = 0; i < destlen; i += dsize) {
+ if (i == (31 * dsize)) {
+ /* If length is 32/64 bytes, the leftmost byte is 0. */
+ b = 0;
+ } else if (i % (2 * dsize)) {
+ b = cpu_ldub_data_ra(env, src, ra);
+ src--;
+ } else {
+ b >>= 4;
+ }
+ cpu_stsize_data_ra(env, dest, 0x30 + (b & 0xf), dsize, ra);
+ dest -= dsize;
}
+
+ return cc;
+}
+
+uint32_t HELPER(unpka)(CPUS390XState *env, uint64_t dest, uint32_t destlen,
+ uint64_t src)
+{
+ return do_unpkau(env, dest, destlen, 1, src, GETPC());
+}
+
+uint32_t HELPER(unpku)(CPUS390XState *env, uint64_t dest, uint32_t destlen,
+ uint64_t src)
+{
+ return do_unpkau(env, dest, destlen, 2, src, GETPC());
+}
+
+uint32_t HELPER(tp)(CPUS390XState *env, uint64_t dest, uint32_t destlen)
+{
+ uintptr_t ra = GETPC();
+ uint32_t cc = 0;
+ int i;
+
+ for (i = 0; i < destlen; i++) {
+ uint8_t b = cpu_ldub_data_ra(env, dest + i, ra);
+ /* digit */
+ cc |= (b & 0xf0) > 0x90 ? 2 : 0;
+
+ if (i == (destlen - 1)) {
+ /* sign */
+ cc |= (b & 0xf) < 0xa ? 1 : 0;
+ } else {
+ /* digit */
+ cc |= (b & 0xf) > 0x9 ? 2 : 0;
+ }
+ }
+
+ return cc;
+}
+
+static uint32_t do_helper_tr(CPUS390XState *env, uint32_t len, uint64_t array,
+ uint64_t trans, uintptr_t ra)
+{
+ uint32_t i;
+
+ for (i = 0; i <= len; i++) {
+ uint8_t byte = cpu_ldub_data_ra(env, array + i, ra);
+ uint8_t new_byte = cpu_ldub_data_ra(env, trans + byte, ra);
+ cpu_stb_data_ra(env, array + i, new_byte, ra);
+ }
+
+ return env->cc_op;
+}
+
+void HELPER(tr)(CPUS390XState *env, uint32_t len, uint64_t array,
+ uint64_t trans)
+{
+ do_helper_tr(env, len, array, trans, GETPC());
}
uint64_t HELPER(tre)(CPUS390XState *env, uint64_t array,
uint64_t len, uint64_t trans)
{
+ uintptr_t ra = GETPC();
uint8_t end = env->regs[0] & 0xff;
uint64_t l = len;
uint64_t i;
+ uint32_t cc = 0;
if (!(env->psw.mask & PSW_MASK_64)) {
array &= 0x7fffffff;
@@ -801,47 +1149,95 @@ uint64_t HELPER(tre)(CPUS390XState *env, uint64_t array,
amount of work we're willing to do. For now, let's cap at 8k. */
if (l > 0x2000) {
l = 0x2000;
- env->cc_op = 3;
- } else {
- env->cc_op = 0;
+ cc = 3;
}
for (i = 0; i < l; i++) {
uint8_t byte, new_byte;
- byte = cpu_ldub_data(env, array + i);
+ byte = cpu_ldub_data_ra(env, array + i, ra);
if (byte == end) {
- env->cc_op = 1;
+ cc = 1;
break;
}
- new_byte = cpu_ldub_data(env, trans + byte);
- cpu_stb_data(env, array + i, new_byte);
+ new_byte = cpu_ldub_data_ra(env, trans + byte, ra);
+ cpu_stb_data_ra(env, array + i, new_byte, ra);
}
+ env->cc_op = cc;
env->retxl = len - i;
return array + i;
}
+static uint32_t do_helper_trt(CPUS390XState *env, uint32_t len, uint64_t array,
+ uint64_t trans, uintptr_t ra)
+{
+ uint32_t i;
+
+ for (i = 0; i <= len; i++) {
+ uint8_t byte = cpu_ldub_data_ra(env, array + i, ra);
+ uint8_t sbyte = cpu_ldub_data_ra(env, trans + byte, ra);
+
+ if (sbyte != 0) {
+ set_address(env, 1, array + i);
+ env->regs[2] = deposit64(env->regs[2], 0, 8, sbyte);
+ return (i == len) ? 2 : 1;
+ }
+ }
+
+ return 0;
+}
+
uint32_t HELPER(trt)(CPUS390XState *env, uint32_t len, uint64_t array,
uint64_t trans)
{
- uint32_t cc = 0;
+ return do_helper_trt(env, len, array, trans, GETPC());
+}
+
+/* Translate one/two to one/two */
+uint32_t HELPER(trXX)(CPUS390XState *env, uint32_t r1, uint32_t r2,
+ uint32_t tst, uint32_t sizes)
+{
+ uintptr_t ra = GETPC();
+ int dsize = (sizes & 1) ? 1 : 2;
+ int ssize = (sizes & 2) ? 1 : 2;
+ uint64_t tbl = get_address(env, 1) & ~7;
+ uint64_t dst = get_address(env, r1);
+ uint64_t len = get_length(env, r1 + 1);
+ uint64_t src = get_address(env, r2);
+ uint32_t cc = 3;
int i;
- for (i = 0; i <= len; i++) {
- uint8_t byte = cpu_ldub_data(env, array + i);
- uint8_t sbyte = cpu_ldub_data(env, trans + byte);
+ check_alignment(env, len, ssize, ra);
- if (sbyte != 0) {
- env->regs[1] = array + i;
- env->regs[2] = (env->regs[2] & ~0xff) | sbyte;
- cc = (i == len) ? 2 : 1;
+ /* Lest we fail to service interrupts in a timely manner, */
+ /* limit the amount of work we're willing to do. */
+ for (i = 0; i < 0x2000; i++) {
+ uint16_t sval = cpu_ldusize_data_ra(env, src, ssize, ra);
+ uint64_t tble = tbl + (sval * dsize);
+ uint16_t dval = cpu_ldusize_data_ra(env, tble, dsize, ra);
+ if (dval == tst) {
+ cc = 1;
+ break;
+ }
+ cpu_stsize_data_ra(env, dst, dval, dsize, ra);
+
+ len -= ssize;
+ src += ssize;
+ dst += dsize;
+
+ if (len == 0) {
+ cc = 0;
break;
}
}
+ set_address(env, r1, dst);
+ set_length(env, r1 + 1, len);
+ set_address(env, r2, src);
+
return cc;
}
@@ -866,6 +1262,8 @@ void HELPER(cdsg)(CPUS390XState *env, uint64_t addr,
} else {
uint64_t oldh, oldl;
+ check_alignment(env, addr, 16, ra);
+
oldh = cpu_ldq_data_ra(env, addr + 0, ra);
oldl = cpu_ldq_data_ra(env, addr + 8, ra);
@@ -887,20 +1285,20 @@ void HELPER(cdsg)(CPUS390XState *env, uint64_t addr,
#if !defined(CONFIG_USER_ONLY)
void HELPER(lctlg)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
{
+ uintptr_t ra = GETPC();
S390CPU *cpu = s390_env_get_cpu(env);
bool PERchanged = false;
- int i;
uint64_t src = a2;
- uint64_t val;
+ uint32_t i;
for (i = r1;; i = (i + 1) % 16) {
- val = cpu_ldq_data(env, src);
+ uint64_t val = cpu_ldq_data_ra(env, src, ra);
if (env->cregs[i] != val && i >= 9 && i <= 11) {
PERchanged = true;
}
env->cregs[i] = val;
HELPER_LOG("load ctl %d from 0x%" PRIx64 " == 0x%" PRIx64 "\n",
- i, src, env->cregs[i]);
+ i, src, val);
src += sizeof(uint64_t);
if (i == r3) {
@@ -917,18 +1315,19 @@ void HELPER(lctlg)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
void HELPER(lctl)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
{
+ uintptr_t ra = GETPC();
S390CPU *cpu = s390_env_get_cpu(env);
bool PERchanged = false;
- int i;
uint64_t src = a2;
- uint32_t val;
+ uint32_t i;
for (i = r1;; i = (i + 1) % 16) {
- val = cpu_ldl_data(env, src);
+ uint32_t val = cpu_ldl_data_ra(env, src, ra);
if ((uint32_t)env->cregs[i] != val && i >= 9 && i <= 11) {
PERchanged = true;
}
- env->cregs[i] = (env->cregs[i] & 0xFFFFFFFF00000000ULL) | val;
+ env->cregs[i] = deposit64(env->cregs[i], 0, 32, val);
+ HELPER_LOG("load ctl %d from 0x%" PRIx64 " == 0x%x\n", i, src, val);
src += sizeof(uint32_t);
if (i == r3) {
@@ -945,11 +1344,12 @@ void HELPER(lctl)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
void HELPER(stctg)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
{
- int i;
+ uintptr_t ra = GETPC();
uint64_t dest = a2;
+ uint32_t i;
for (i = r1;; i = (i + 1) % 16) {
- cpu_stq_data(env, dest, env->cregs[i]);
+ cpu_stq_data_ra(env, dest, env->cregs[i], ra);
dest += sizeof(uint64_t);
if (i == r3) {
@@ -960,11 +1360,12 @@ void HELPER(stctg)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
void HELPER(stctl)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
{
- int i;
+ uintptr_t ra = GETPC();
uint64_t dest = a2;
+ uint32_t i;
for (i = r1;; i = (i + 1) % 16) {
- cpu_stl_data(env, dest, env->cregs[i]);
+ cpu_stl_data_ra(env, dest, env->cregs[i], ra);
dest += sizeof(uint32_t);
if (i == r3) {
@@ -973,10 +1374,39 @@ void HELPER(stctl)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
}
}
+uint32_t HELPER(testblock)(CPUS390XState *env, uint64_t real_addr)
+{
+ uintptr_t ra = GETPC();
+ CPUState *cs = CPU(s390_env_get_cpu(env));
+ uint64_t abs_addr;
+ int i;
+
+ real_addr = wrap_address(env, real_addr);
+ abs_addr = mmu_real2abs(env, real_addr) & TARGET_PAGE_MASK;
+ if (!address_space_access_valid(&address_space_memory, abs_addr,
+ TARGET_PAGE_SIZE, true)) {
+ cpu_restore_state(cs, ra);
+ program_interrupt(env, PGM_ADDRESSING, 4);
+ return 1;
+ }
+
+ /* Check low-address protection */
+ if ((env->cregs[0] & CR0_LOWPROT) && real_addr < 0x2000) {
+ cpu_restore_state(cs, ra);
+ program_interrupt(env, PGM_PROTECTION, 4);
+ return 1;
+ }
+
+ for (i = 0; i < TARGET_PAGE_SIZE; i += 8) {
+ stq_phys(cs->as, abs_addr + i, 0);
+ }
+
+ return 0;
+}
+
uint32_t HELPER(tprot)(uint64_t a1, uint64_t a2)
{
/* XXX implement */
-
return 0;
}
@@ -985,7 +1415,7 @@ uint64_t HELPER(iske)(CPUS390XState *env, uint64_t r2)
{
static S390SKeysState *ss;
static S390SKeysClass *skeyclass;
- uint64_t addr = get_address(env, 0, 0, r2);
+ uint64_t addr = wrap_address(env, r2);
uint8_t key;
if (addr > ram_size) {
@@ -1008,7 +1438,7 @@ void HELPER(sske)(CPUS390XState *env, uint64_t r1, uint64_t r2)
{
static S390SKeysState *ss;
static S390SKeysClass *skeyclass;
- uint64_t addr = get_address(env, 0, 0, r2);
+ uint64_t addr = wrap_address(env, r2);
uint8_t key;
if (addr > ram_size) {
@@ -1063,32 +1493,9 @@ uint32_t HELPER(rrbe)(CPUS390XState *env, uint64_t r2)
return re >> 1;
}
-/* compare and swap and purge */
-uint32_t HELPER(csp)(CPUS390XState *env, uint32_t r1, uint64_t r2)
-{
- S390CPU *cpu = s390_env_get_cpu(env);
- uint32_t cc;
- uint32_t o1 = env->regs[r1];
- uint64_t a2 = r2 & ~3ULL;
- uint32_t o2 = cpu_ldl_data(env, a2);
-
- if (o1 == o2) {
- cpu_stl_data(env, a2, env->regs[(r1 + 1) & 15]);
- if (r2 & 0x3) {
- /* flush TLB / ALB */
- tlb_flush(CPU(cpu));
- }
- cc = 0;
- } else {
- env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) | o2;
- cc = 1;
- }
-
- return cc;
-}
-
uint32_t HELPER(mvcs)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2)
{
+ uintptr_t ra = GETPC();
int cc = 0, i;
HELPER_LOG("%s: %16" PRIx64 " %16" PRIx64 " %16" PRIx64 "\n",
@@ -1102,7 +1509,8 @@ uint32_t HELPER(mvcs)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2)
/* XXX replace w/ memcpy */
for (i = 0; i < l; i++) {
- cpu_stb_secondary(env, a1 + i, cpu_ldub_primary(env, a2 + i));
+ uint8_t x = cpu_ldub_primary_ra(env, a2 + i, ra);
+ cpu_stb_secondary_ra(env, a1 + i, x, ra);
}
return cc;
@@ -1110,6 +1518,7 @@ uint32_t HELPER(mvcs)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2)
uint32_t HELPER(mvcp)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2)
{
+ uintptr_t ra = GETPC();
int cc = 0, i;
HELPER_LOG("%s: %16" PRIx64 " %16" PRIx64 " %16" PRIx64 "\n",
@@ -1123,36 +1532,45 @@ uint32_t HELPER(mvcp)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2)
/* XXX replace w/ memcpy */
for (i = 0; i < l; i++) {
- cpu_stb_primary(env, a1 + i, cpu_ldub_secondary(env, a2 + i));
+ uint8_t x = cpu_ldub_secondary_ra(env, a2 + i, ra);
+ cpu_stb_primary_ra(env, a1 + i, x, ra);
}
return cc;
}
/* invalidate pte */
-void HELPER(ipte)(CPUS390XState *env, uint64_t pte_addr, uint64_t vaddr)
+void HELPER(ipte)(CPUS390XState *env, uint64_t pto, uint64_t vaddr,
+ uint32_t m4)
{
CPUState *cs = CPU(s390_env_get_cpu(env));
uint64_t page = vaddr & TARGET_PAGE_MASK;
- uint64_t pte = 0;
+ uint64_t pte_addr, pte;
- /* XXX broadcast to other CPUs */
+ /* Compute the page table entry address */
+ pte_addr = (pto & _SEGMENT_ENTRY_ORIGIN);
+ pte_addr += (vaddr & VADDR_PX) >> 9;
- /* XXX Linux is nice enough to give us the exact pte address.
- According to spec we'd have to find it out ourselves */
- /* XXX Linux is fine with overwriting the pte, the spec requires
- us to only set the invalid bit */
- stq_phys(cs->as, pte_addr, pte | _PAGE_INVALID);
+ /* Mark the page table entry as invalid */
+ pte = ldq_phys(cs->as, pte_addr);
+ pte |= _PAGE_INVALID;
+ stq_phys(cs->as, pte_addr, pte);
/* XXX we exploit the fact that Linux passes the exact virtual
address here - it's not obliged to! */
- tlb_flush_page(cs, page);
+ /* XXX: the LC bit should be considered as 0 if the local-TLB-clearing
+ facility is not installed. */
+ if (m4 & 1) {
+ tlb_flush_page(cs, page);
+ } else {
+ tlb_flush_page_all_cpus_synced(cs, page);
+ }
/* XXX 31-bit hack */
- if (page & 0x80000000) {
- tlb_flush_page(cs, page & ~0x80000000);
+ if (m4 & 1) {
+ tlb_flush_page(cs, page ^ 0x80000000);
} else {
- tlb_flush_page(cs, page | 0x80000000);
+ tlb_flush_page_all_cpus_synced(cs, page ^ 0x80000000);
}
}
@@ -1164,19 +1582,27 @@ void HELPER(ptlb)(CPUS390XState *env)
tlb_flush(CPU(cpu));
}
+/* flush global tlb */
+void HELPER(purge)(CPUS390XState *env)
+{
+ S390CPU *cpu = s390_env_get_cpu(env);
+
+ tlb_flush_all_cpus_synced(CPU(cpu));
+}
+
/* load using real address */
uint64_t HELPER(lura)(CPUS390XState *env, uint64_t addr)
{
CPUState *cs = CPU(s390_env_get_cpu(env));
- return (uint32_t)ldl_phys(cs->as, get_address(env, 0, 0, addr));
+ return (uint32_t)ldl_phys(cs->as, wrap_address(env, addr));
}
uint64_t HELPER(lurag)(CPUS390XState *env, uint64_t addr)
{
CPUState *cs = CPU(s390_env_get_cpu(env));
- return ldq_phys(cs->as, get_address(env, 0, 0, addr));
+ return ldq_phys(cs->as, wrap_address(env, addr));
}
/* store using real address */
@@ -1184,7 +1610,7 @@ void HELPER(stura)(CPUS390XState *env, uint64_t addr, uint64_t v1)
{
CPUState *cs = CPU(s390_env_get_cpu(env));
- stl_phys(cs->as, get_address(env, 0, 0, addr), (uint32_t)v1);
+ stl_phys(cs->as, wrap_address(env, addr), (uint32_t)v1);
if ((env->psw.mask & PSW_MASK_PER) &&
(env->cregs[9] & PER_CR9_EVENT_STORE) &&
@@ -1199,7 +1625,7 @@ void HELPER(sturg)(CPUS390XState *env, uint64_t addr, uint64_t v1)
{
CPUState *cs = CPU(s390_env_get_cpu(env));
- stq_phys(cs->as, get_address(env, 0, 0, addr), v1);
+ stq_phys(cs->as, wrap_address(env, addr), v1);
if ((env->psw.mask & PSW_MASK_PER) &&
(env->cregs[9] & PER_CR9_EVENT_STORE) &&
@@ -1215,17 +1641,17 @@ uint64_t HELPER(lra)(CPUS390XState *env, uint64_t addr)
{
CPUState *cs = CPU(s390_env_get_cpu(env));
uint32_t cc = 0;
- int old_exc = cs->exception_index;
uint64_t asc = env->psw.mask & PSW_MASK_ASC;
uint64_t ret;
- int flags;
+ int old_exc, flags;
/* XXX incomplete - has more corner cases */
if (!(env->psw.mask & PSW_MASK_64) && (addr >> 32)) {
+ cpu_restore_state(cs, GETPC());
program_interrupt(env, PGM_SPECIAL_OP, 2);
}
- cs->exception_index = old_exc;
+ old_exc = cs->exception_index;
if (mmu_translate(env, addr, 0, asc, &ret, &flags, true)) {
cc = 3;
}
@@ -1240,3 +1666,126 @@ uint64_t HELPER(lra)(CPUS390XState *env, uint64_t addr)
return ret;
}
#endif
+
+/* load pair from quadword */
+uint64_t HELPER(lpq)(CPUS390XState *env, uint64_t addr)
+{
+ uintptr_t ra = GETPC();
+ uint64_t hi, lo;
+
+ if (parallel_cpus) {
+#ifndef CONFIG_ATOMIC128
+ cpu_loop_exit_atomic(ENV_GET_CPU(env), ra);
+#else
+ int mem_idx = cpu_mmu_index(env, false);
+ TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
+ Int128 v = helper_atomic_ldo_be_mmu(env, addr, oi, ra);
+ hi = int128_gethi(v);
+ lo = int128_getlo(v);
+#endif
+ } else {
+ check_alignment(env, addr, 16, ra);
+
+ hi = cpu_ldq_data_ra(env, addr + 0, ra);
+ lo = cpu_ldq_data_ra(env, addr + 8, ra);
+ }
+
+ env->retxl = lo;
+ return hi;
+}
+
+/* store pair to quadword */
+void HELPER(stpq)(CPUS390XState *env, uint64_t addr,
+ uint64_t low, uint64_t high)
+{
+ uintptr_t ra = GETPC();
+
+ if (parallel_cpus) {
+#ifndef CONFIG_ATOMIC128
+ cpu_loop_exit_atomic(ENV_GET_CPU(env), ra);
+#else
+ int mem_idx = cpu_mmu_index(env, false);
+ TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
+
+ Int128 v = int128_make128(low, high);
+ helper_atomic_sto_be_mmu(env, addr, v, oi, ra);
+#endif
+ } else {
+ check_alignment(env, addr, 16, ra);
+
+ cpu_stq_data_ra(env, addr + 0, high, ra);
+ cpu_stq_data_ra(env, addr + 8, low, ra);
+ }
+}
+
+/* Execute instruction. This instruction executes an insn modified with
+ the contents of r1. It does not change the executed instruction in memory;
+ it does not change the program counter.
+
+ Perform this by recording the modified instruction in env->ex_value.
+ This will be noticed by cpu_get_tb_cpu_state and thus tb translation.
+*/
+void HELPER(ex)(CPUS390XState *env, uint32_t ilen, uint64_t r1, uint64_t addr)
+{
+ uint64_t insn = cpu_lduw_code(env, addr);
+ uint8_t opc = insn >> 8;
+
+ /* Or in the contents of R1[56:63]. */
+ insn |= r1 & 0xff;
+
+ /* Load the rest of the instruction. */
+ insn <<= 48;
+ switch (get_ilen(opc)) {
+ case 2:
+ break;
+ case 4:
+ insn |= (uint64_t)cpu_lduw_code(env, addr + 2) << 32;
+ break;
+ case 6:
+ insn |= (uint64_t)(uint32_t)cpu_ldl_code(env, addr + 2) << 16;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ /* The very most common cases can be sped up by avoiding a new TB. */
+ if ((opc & 0xf0) == 0xd0) {
+ typedef uint32_t (*dx_helper)(CPUS390XState *, uint32_t, uint64_t,
+ uint64_t, uintptr_t);
+ static const dx_helper dx[16] = {
+ [0x2] = do_helper_mvc,
+ [0x4] = do_helper_nc,
+ [0x5] = do_helper_clc,
+ [0x6] = do_helper_oc,
+ [0x7] = do_helper_xc,
+ [0xc] = do_helper_tr,
+ [0xd] = do_helper_trt,
+ };
+ dx_helper helper = dx[opc & 0xf];
+
+ if (helper) {
+ uint32_t l = extract64(insn, 48, 8);
+ uint32_t b1 = extract64(insn, 44, 4);
+ uint32_t d1 = extract64(insn, 32, 12);
+ uint32_t b2 = extract64(insn, 28, 4);
+ uint32_t d2 = extract64(insn, 16, 12);
+ uint64_t a1 = wrap_address(env, env->regs[b1] + d1);
+ uint64_t a2 = wrap_address(env, env->regs[b2] + d2);
+
+ env->cc_op = helper(env, l, a1, a2, 0);
+ env->psw.addr += ilen;
+ return;
+ }
+ } else if (opc == 0x0a) {
+ env->int_svc_code = extract64(insn, 48, 8);
+ env->int_svc_ilen = ilen;
+ helper_exception(env, EXCP_SVC);
+ g_assert_not_reached();
+ }
+
+ /* Record the insn we want to execute as well as the ilen to use
+ during the execution of the target insn. This will also ensure
+ that ex_value is non-zero, which flags that we are in a state
+ that requires such execution. */
+ env->ex_value = insn | ilen;
+}
diff --git a/target/s390x/misc_helper.c b/target/s390x/misc_helper.c
index 1b9f448875..edcdf17db6 100644
--- a/target/s390x/misc_helper.c
+++ b/target/s390x/misc_helper.c
@@ -80,8 +80,6 @@ void HELPER(exception)(CPUS390XState *env, uint32_t excp)
cpu_loop_exit(cs);
}
-#ifndef CONFIG_USER_ONLY
-
void program_interrupt(CPUS390XState *env, uint32_t code, int ilen)
{
S390CPU *cpu = s390_env_get_cpu(env);
@@ -108,6 +106,8 @@ void program_interrupt(CPUS390XState *env, uint32_t code, int ilen)
}
}
+#ifndef CONFIG_USER_ONLY
+
/* SCLP service call */
uint32_t HELPER(servc)(CPUS390XState *env, uint64_t r1, uint64_t r2)
{
diff --git a/target/s390x/mmu_helper.c b/target/s390x/mmu_helper.c
index b11a02706c..501e39010d 100644
--- a/target/s390x/mmu_helper.c
+++ b/target/s390x/mmu_helper.c
@@ -108,7 +108,7 @@ static void trigger_page_fault(CPUS390XState *env, target_ulong vaddr,
* Translate real address to absolute (= physical)
* address by taking care of the prefix mapping.
*/
-static target_ulong mmu_real2abs(CPUS390XState *env, target_ulong raddr)
+target_ulong mmu_real2abs(CPUS390XState *env, target_ulong raddr)
{
if (raddr < 0x2000) {
return raddr + env->psa; /* Map the lowcore. */
@@ -143,8 +143,6 @@ static int mmu_translate_pte(CPUS390XState *env, target_ulong vaddr,
return 0;
}
-#define VADDR_PX 0xff000 /* Page index bits */
-
/* Decode segment table entry */
static int mmu_translate_segment(CPUS390XState *env, target_ulong vaddr,
uint64_t asc, uint64_t st_entry,
diff --git a/target/s390x/translate.c b/target/s390x/translate.c
index 4c48c593cd..95f91d4f08 100644
--- a/target/s390x/translate.c
+++ b/target/s390x/translate.c
@@ -57,7 +57,9 @@ struct DisasContext {
struct TranslationBlock *tb;
const DisasInsn *insn;
DisasFields *fields;
+ uint64_t ex_value;
uint64_t pc, next_pc;
+ uint32_t ilen;
enum cc_op cc_op;
bool singlestep_enabled;
};
@@ -349,7 +351,7 @@ static void gen_program_exception(DisasContext *s, int code)
tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
tcg_temp_free_i32(tmp);
- tmp = tcg_const_i32(s->next_pc - s->pc);
+ tmp = tcg_const_i32(s->ilen);
tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
tcg_temp_free_i32(tmp);
@@ -608,11 +610,16 @@ static void gen_op_calc_cc(DisasContext *s)
set_cc_static(s);
}
-static int use_goto_tb(DisasContext *s, uint64_t dest)
+static bool use_exit_tb(DisasContext *s)
{
- if (unlikely(s->singlestep_enabled) ||
- (s->tb->cflags & CF_LAST_IO) ||
- (s->tb->flags & FLAG_MASK_PER)) {
+ return (s->singlestep_enabled ||
+ (s->tb->cflags & CF_LAST_IO) ||
+ (s->tb->flags & FLAG_MASK_PER));
+}
+
+static bool use_goto_tb(DisasContext *s, uint64_t dest)
+{
+ if (unlikely(use_exit_tb(s))) {
return false;
}
#ifndef CONFIG_USER_ONLY
@@ -1162,6 +1169,8 @@ typedef enum {
the PC (for whatever reason), so there's no need to do it again on
exiting the TB. */
EXIT_PC_UPDATED,
+ /* We have updated the PC and CC values. */
+ EXIT_PC_CC_UPDATED,
/* We are exiting the TB, but have neither emitted a goto_tb, nor
updated the PC for the next instruction to be executed. */
EXIT_PC_STALE,
@@ -1195,6 +1204,8 @@ typedef enum DisasFacility {
FAC_SFLE, /* store facility list extended */
FAC_ILA, /* interlocked access facility 1 */
FAC_LPP, /* load-program-parameter */
+ FAC_DAT_ENH, /* DAT-enhancement */
+ FAC_E2, /* extended-translation facility 2 */
} DisasFacility;
struct DisasInsn {
@@ -1866,7 +1877,6 @@ static ExitStatus op_cksm(DisasContext *s, DisasOps *o)
int r2 = get_field(s->fields, r2);
TCGv_i64 len = tcg_temp_new_i64();
- potential_page_fault(s);
gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
set_cc_static(s);
return_low128(o->out);
@@ -1901,7 +1911,6 @@ static ExitStatus op_clc(DisasContext *s, DisasOps *o)
tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
break;
default:
- potential_page_fault(s);
vl = tcg_const_i32(l);
gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
tcg_temp_free_i32(vl);
@@ -1912,14 +1921,65 @@ static ExitStatus op_clc(DisasContext *s, DisasOps *o)
return NO_EXIT;
}
+static ExitStatus op_clcl(DisasContext *s, DisasOps *o)
+{
+ int r1 = get_field(s->fields, r1);
+ int r2 = get_field(s->fields, r2);
+ TCGv_i32 t1, t2;
+
+ /* r1 and r2 must be even. */
+ if (r1 & 1 || r2 & 1) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return EXIT_NORETURN;
+ }
+
+ t1 = tcg_const_i32(r1);
+ t2 = tcg_const_i32(r2);
+ gen_helper_clcl(cc_op, cpu_env, t1, t2);
+ tcg_temp_free_i32(t1);
+ tcg_temp_free_i32(t2);
+ set_cc_static(s);
+ return NO_EXIT;
+}
+
static ExitStatus op_clcle(DisasContext *s, DisasOps *o)
{
- TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
- TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
- potential_page_fault(s);
- gen_helper_clcle(cc_op, cpu_env, r1, o->in2, r3);
- tcg_temp_free_i32(r1);
- tcg_temp_free_i32(r3);
+ int r1 = get_field(s->fields, r1);
+ int r3 = get_field(s->fields, r3);
+ TCGv_i32 t1, t3;
+
+ /* r1 and r3 must be even. */
+ if (r1 & 1 || r3 & 1) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return EXIT_NORETURN;
+ }
+
+ t1 = tcg_const_i32(r1);
+ t3 = tcg_const_i32(r3);
+ gen_helper_clcle(cc_op, cpu_env, t1, o->in2, t3);
+ tcg_temp_free_i32(t1);
+ tcg_temp_free_i32(t3);
+ set_cc_static(s);
+ return NO_EXIT;
+}
+
+static ExitStatus op_clclu(DisasContext *s, DisasOps *o)
+{
+ int r1 = get_field(s->fields, r1);
+ int r3 = get_field(s->fields, r3);
+ TCGv_i32 t1, t3;
+
+ /* r1 and r3 must be even. */
+ if (r1 & 1 || r3 & 1) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return EXIT_NORETURN;
+ }
+
+ t1 = tcg_const_i32(r1);
+ t3 = tcg_const_i32(r3);
+ gen_helper_clclu(cc_op, cpu_env, t1, o->in2, t3);
+ tcg_temp_free_i32(t1);
+ tcg_temp_free_i32(t3);
set_cc_static(s);
return NO_EXIT;
}
@@ -1929,7 +1989,6 @@ static ExitStatus op_clm(DisasContext *s, DisasOps *o)
TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
TCGv_i32 t1 = tcg_temp_new_i32();
tcg_gen_extrl_i64_i32(t1, o->in1);
- potential_page_fault(s);
gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
set_cc_static(s);
tcg_temp_free_i32(t1);
@@ -1939,7 +1998,6 @@ static ExitStatus op_clm(DisasContext *s, DisasOps *o)
static ExitStatus op_clst(DisasContext *s, DisasOps *o)
{
- potential_page_fault(s);
gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
set_cc_static(s);
return_low128(o->in2);
@@ -2006,11 +2064,45 @@ static ExitStatus op_cdsg(DisasContext *s, DisasOps *o)
#ifndef CONFIG_USER_ONLY
static ExitStatus op_csp(DisasContext *s, DisasOps *o)
{
- TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
+ TCGMemOp mop = s->insn->data;
+ TCGv_i64 addr, old, cc;
+ TCGLabel *lab = gen_new_label();
+
+ /* Note that in1 = R1 (zero-extended expected value),
+ out = R1 (original reg), out2 = R1+1 (new value). */
+
check_privileged(s);
- gen_helper_csp(cc_op, cpu_env, r1, o->in2);
- tcg_temp_free_i32(r1);
- set_cc_static(s);
+ addr = tcg_temp_new_i64();
+ old = tcg_temp_new_i64();
+ tcg_gen_andi_i64(addr, o->in2, -1ULL << (mop & MO_SIZE));
+ tcg_gen_atomic_cmpxchg_i64(old, addr, o->in1, o->out2,
+ get_mem_index(s), mop | MO_ALIGN);
+ tcg_temp_free_i64(addr);
+
+ /* Are the memory and expected values (un)equal? */
+ cc = tcg_temp_new_i64();
+ tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in1, old);
+ tcg_gen_extrl_i64_i32(cc_op, cc);
+
+ /* Write back the output now, so that it happens before the
+ following branch, so that we don't need local temps. */
+ if ((mop & MO_SIZE) == MO_32) {
+ tcg_gen_deposit_i64(o->out, o->out, old, 0, 32);
+ } else {
+ tcg_gen_mov_i64(o->out, old);
+ }
+ tcg_temp_free_i64(old);
+
+ /* If the comparison was equal, and the LSB of R2 was set,
+ then we need to flush the TLB (for all cpus). */
+ tcg_gen_xori_i64(cc, cc, 1);
+ tcg_gen_and_i64(cc, cc, o->in2);
+ tcg_gen_brcondi_i64(TCG_COND_EQ, cc, 0, lab);
+ tcg_temp_free_i64(cc);
+
+ gen_helper_purge(cpu_env);
+ gen_set_label(lab);
+
return NO_EXIT;
}
#endif
@@ -2153,27 +2245,34 @@ static ExitStatus op_epsw(DisasContext *s, DisasOps *o)
static ExitStatus op_ex(DisasContext *s, DisasOps *o)
{
- /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
- tb->flags, (ab)use the tb->cs_base field as the address of
- the template in memory, and grab 8 bits of tb->flags/cflags for
- the contents of the register. We would then recognize all this
- in gen_intermediate_code_internal, generating code for exactly
- one instruction. This new TB then gets executed normally.
-
- On the other hand, this seems to be mostly used for modifying
- MVC inside of memcpy, which needs a helper call anyway. So
- perhaps this doesn't bear thinking about any further. */
+ int r1 = get_field(s->fields, r1);
+ TCGv_i32 ilen;
+ TCGv_i64 v1;
- TCGv_i64 tmp;
+ /* Nested EXECUTE is not allowed. */
+ if (unlikely(s->ex_value)) {
+ gen_program_exception(s, PGM_EXECUTE);
+ return EXIT_NORETURN;
+ }
update_psw_addr(s);
- gen_op_calc_cc(s);
+ update_cc_op(s);
- tmp = tcg_const_i64(s->next_pc);
- gen_helper_ex(cc_op, cpu_env, cc_op, o->in1, o->in2, tmp);
- tcg_temp_free_i64(tmp);
+ if (r1 == 0) {
+ v1 = tcg_const_i64(0);
+ } else {
+ v1 = regs[r1];
+ }
- return NO_EXIT;
+ ilen = tcg_const_i32(s->ilen);
+ gen_helper_ex(cpu_env, ilen, v1, o->in2);
+ tcg_temp_free_i32(ilen);
+
+ if (r1 == 0) {
+ tcg_temp_free_i64(v1);
+ }
+
+ return EXIT_PC_CC_UPDATED;
}
static ExitStatus op_fieb(DisasContext *s, DisasOps *o)
@@ -2311,8 +2410,12 @@ static ExitStatus op_ipm(DisasContext *s, DisasOps *o)
#ifndef CONFIG_USER_ONLY
static ExitStatus op_ipte(DisasContext *s, DisasOps *o)
{
+ TCGv_i32 m4;
+
check_privileged(s);
- gen_helper_ipte(cpu_env, o->in1, o->in2);
+ m4 = tcg_const_i32(get_field(s->fields, m4));
+ gen_helper_ipte(cpu_env, o->in1, o->in2, m4);
+ tcg_temp_free_i32(m4);
return NO_EXIT;
}
@@ -2324,6 +2427,27 @@ static ExitStatus op_iske(DisasContext *s, DisasOps *o)
}
#endif
+static ExitStatus op_keb(DisasContext *s, DisasOps *o)
+{
+ gen_helper_keb(cc_op, cpu_env, o->in1, o->in2);
+ set_cc_static(s);
+ return NO_EXIT;
+}
+
+static ExitStatus op_kdb(DisasContext *s, DisasOps *o)
+{
+ gen_helper_kdb(cc_op, cpu_env, o->in1, o->in2);
+ set_cc_static(s);
+ return NO_EXIT;
+}
+
+static ExitStatus op_kxb(DisasContext *s, DisasOps *o)
+{
+ gen_helper_kxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
+ set_cc_static(s);
+ return NO_EXIT;
+}
+
static ExitStatus op_laa(DisasContext *s, DisasOps *o)
{
/* The real output is indeed the original value in memory;
@@ -2545,7 +2669,6 @@ static ExitStatus op_lctl(DisasContext *s, DisasOps *o)
TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
check_privileged(s);
- potential_page_fault(s);
gen_helper_lctl(cpu_env, r1, o->in2, r3);
tcg_temp_free_i32(r1);
tcg_temp_free_i32(r3);
@@ -2557,7 +2680,6 @@ static ExitStatus op_lctlg(DisasContext *s, DisasOps *o)
TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
check_privileged(s);
- potential_page_fault(s);
gen_helper_lctlg(cpu_env, r1, o->in2, r3);
tcg_temp_free_i32(r1);
tcg_temp_free_i32(r3);
@@ -2567,7 +2689,6 @@ static ExitStatus op_lctlg(DisasContext *s, DisasOps *o)
static ExitStatus op_lra(DisasContext *s, DisasOps *o)
{
check_privileged(s);
- potential_page_fault(s);
gen_helper_lra(o->out, cpu_env, o->in2);
set_cc_static(s);
return NO_EXIT;
@@ -2624,7 +2745,6 @@ static ExitStatus op_lam(DisasContext *s, DisasOps *o)
{
TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
- potential_page_fault(s);
gen_helper_lam(cpu_env, r1, o->in2, r3);
tcg_temp_free_i32(r1);
tcg_temp_free_i32(r3);
@@ -2789,6 +2909,13 @@ static ExitStatus op_lpd(DisasContext *s, DisasOps *o)
return NO_EXIT;
}
+static ExitStatus op_lpq(DisasContext *s, DisasOps *o)
+{
+ gen_helper_lpq(o->out, cpu_env, o->in2);
+ return_low128(o->out2);
+ return NO_EXIT;
+}
+
#ifndef CONFIG_USER_ONLY
static ExitStatus op_lura(DisasContext *s, DisasOps *o)
{
@@ -2866,32 +2993,78 @@ static ExitStatus op_movx(DisasContext *s, DisasOps *o)
static ExitStatus op_mvc(DisasContext *s, DisasOps *o)
{
TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
- potential_page_fault(s);
gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
tcg_temp_free_i32(l);
return NO_EXIT;
}
+static ExitStatus op_mvcin(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
+ gen_helper_mvcin(cpu_env, l, o->addr1, o->in2);
+ tcg_temp_free_i32(l);
+ return NO_EXIT;
+}
+
static ExitStatus op_mvcl(DisasContext *s, DisasOps *o)
{
- TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
- TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
- potential_page_fault(s);
- gen_helper_mvcl(cc_op, cpu_env, r1, r2);
- tcg_temp_free_i32(r1);
- tcg_temp_free_i32(r2);
+ int r1 = get_field(s->fields, r1);
+ int r2 = get_field(s->fields, r2);
+ TCGv_i32 t1, t2;
+
+ /* r1 and r2 must be even. */
+ if (r1 & 1 || r2 & 1) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return EXIT_NORETURN;
+ }
+
+ t1 = tcg_const_i32(r1);
+ t2 = tcg_const_i32(r2);
+ gen_helper_mvcl(cc_op, cpu_env, t1, t2);
+ tcg_temp_free_i32(t1);
+ tcg_temp_free_i32(t2);
set_cc_static(s);
return NO_EXIT;
}
static ExitStatus op_mvcle(DisasContext *s, DisasOps *o)
{
- TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
- TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
- potential_page_fault(s);
- gen_helper_mvcle(cc_op, cpu_env, r1, o->in2, r3);
- tcg_temp_free_i32(r1);
- tcg_temp_free_i32(r3);
+ int r1 = get_field(s->fields, r1);
+ int r3 = get_field(s->fields, r3);
+ TCGv_i32 t1, t3;
+
+ /* r1 and r3 must be even. */
+ if (r1 & 1 || r3 & 1) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return EXIT_NORETURN;
+ }
+
+ t1 = tcg_const_i32(r1);
+ t3 = tcg_const_i32(r3);
+ gen_helper_mvcle(cc_op, cpu_env, t1, o->in2, t3);
+ tcg_temp_free_i32(t1);
+ tcg_temp_free_i32(t3);
+ set_cc_static(s);
+ return NO_EXIT;
+}
+
+static ExitStatus op_mvclu(DisasContext *s, DisasOps *o)
+{
+ int r1 = get_field(s->fields, r1);
+ int r3 = get_field(s->fields, r3);
+ TCGv_i32 t1, t3;
+
+ /* r1 and r3 must be even. */
+ if (r1 & 1 || r3 & 1) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return EXIT_NORETURN;
+ }
+
+ t1 = tcg_const_i32(r1);
+ t3 = tcg_const_i32(r3);
+ gen_helper_mvclu(cc_op, cpu_env, t1, o->in2, t3);
+ tcg_temp_free_i32(t1);
+ tcg_temp_free_i32(t3);
set_cc_static(s);
return NO_EXIT;
}
@@ -2901,7 +3074,6 @@ static ExitStatus op_mvcp(DisasContext *s, DisasOps *o)
{
int r1 = get_field(s->fields, l1);
check_privileged(s);
- potential_page_fault(s);
gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
set_cc_static(s);
return NO_EXIT;
@@ -2911,30 +3083,51 @@ static ExitStatus op_mvcs(DisasContext *s, DisasOps *o)
{
int r1 = get_field(s->fields, l1);
check_privileged(s);
- potential_page_fault(s);
gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
set_cc_static(s);
return NO_EXIT;
}
#endif
+static ExitStatus op_mvn(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
+ gen_helper_mvn(cpu_env, l, o->addr1, o->in2);
+ tcg_temp_free_i32(l);
+ return NO_EXIT;
+}
+
+static ExitStatus op_mvo(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
+ gen_helper_mvo(cpu_env, l, o->addr1, o->in2);
+ tcg_temp_free_i32(l);
+ return NO_EXIT;
+}
+
static ExitStatus op_mvpg(DisasContext *s, DisasOps *o)
{
- potential_page_fault(s);
- gen_helper_mvpg(cpu_env, regs[0], o->in1, o->in2);
+ gen_helper_mvpg(cc_op, cpu_env, regs[0], o->in1, o->in2);
set_cc_static(s);
return NO_EXIT;
}
static ExitStatus op_mvst(DisasContext *s, DisasOps *o)
{
- potential_page_fault(s);
gen_helper_mvst(o->in1, cpu_env, regs[0], o->in1, o->in2);
set_cc_static(s);
return_low128(o->in2);
return NO_EXIT;
}
+static ExitStatus op_mvz(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
+ gen_helper_mvz(cpu_env, l, o->addr1, o->in2);
+ tcg_temp_free_i32(l);
+ return NO_EXIT;
+}
+
static ExitStatus op_mul(DisasContext *s, DisasOps *o)
{
tcg_gen_mul_i64(o->out, o->in1, o->in2);
@@ -3043,7 +3236,6 @@ static ExitStatus op_nabsf128(DisasContext *s, DisasOps *o)
static ExitStatus op_nc(DisasContext *s, DisasOps *o)
{
TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
- potential_page_fault(s);
gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
tcg_temp_free_i32(l);
set_cc_static(s);
@@ -3078,7 +3270,6 @@ static ExitStatus op_negf128(DisasContext *s, DisasOps *o)
static ExitStatus op_oc(DisasContext *s, DisasOps *o)
{
TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
- potential_page_fault(s);
gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
tcg_temp_free_i32(l);
set_cc_static(s);
@@ -3107,6 +3298,46 @@ static ExitStatus op_ori(DisasContext *s, DisasOps *o)
return NO_EXIT;
}
+static ExitStatus op_pack(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
+ gen_helper_pack(cpu_env, l, o->addr1, o->in2);
+ tcg_temp_free_i32(l);
+ return NO_EXIT;
+}
+
+static ExitStatus op_pka(DisasContext *s, DisasOps *o)
+{
+ int l2 = get_field(s->fields, l2) + 1;
+ TCGv_i32 l;
+
+ /* The length must not exceed 32 bytes. */
+ if (l2 > 32) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return EXIT_NORETURN;
+ }
+ l = tcg_const_i32(l2);
+ gen_helper_pka(cpu_env, o->addr1, o->in2, l);
+ tcg_temp_free_i32(l);
+ return NO_EXIT;
+}
+
+static ExitStatus op_pku(DisasContext *s, DisasOps *o)
+{
+ int l2 = get_field(s->fields, l2) + 1;
+ TCGv_i32 l;
+
+ /* The length must be even and should not exceed 64 bytes. */
+ if ((l2 & 1) || (l2 > 64)) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return EXIT_NORETURN;
+ }
+ l = tcg_const_i32(l2);
+ gen_helper_pku(cpu_env, o->addr1, o->in2, l);
+ tcg_temp_free_i32(l);
+ return NO_EXIT;
+}
+
static ExitStatus op_popcnt(DisasContext *s, DisasOps *o)
{
gen_helper_popcnt(o->out, o->in2);
@@ -3627,7 +3858,6 @@ static ExitStatus op_stctg(DisasContext *s, DisasOps *o)
TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
check_privileged(s);
- potential_page_fault(s);
gen_helper_stctg(cpu_env, r1, o->in2, r3);
tcg_temp_free_i32(r1);
tcg_temp_free_i32(r3);
@@ -3639,7 +3869,6 @@ static ExitStatus op_stctl(DisasContext *s, DisasOps *o)
TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
check_privileged(s);
- potential_page_fault(s);
gen_helper_stctl(cpu_env, r1, o->in2, r3);
tcg_temp_free_i32(r1);
tcg_temp_free_i32(r3);
@@ -3871,7 +4100,6 @@ static ExitStatus op_stam(DisasContext *s, DisasOps *o)
{
TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
- potential_page_fault(s);
gen_helper_stam(cpu_env, r1, o->in2, r3);
tcg_temp_free_i32(r1);
tcg_temp_free_i32(r3);
@@ -3975,9 +4203,14 @@ static ExitStatus op_stmh(DisasContext *s, DisasOps *o)
return NO_EXIT;
}
+static ExitStatus op_stpq(DisasContext *s, DisasOps *o)
+{
+ gen_helper_stpq(cpu_env, o->in2, o->out2, o->out);
+ return NO_EXIT;
+}
+
static ExitStatus op_srst(DisasContext *s, DisasOps *o)
{
- potential_page_fault(s);
gen_helper_srst(o->in1, cpu_env, regs[0], o->in1, o->in2);
set_cc_static(s);
return_low128(o->in2);
@@ -4027,7 +4260,7 @@ static ExitStatus op_svc(DisasContext *s, DisasOps *o)
tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
tcg_temp_free_i32(t);
- t = tcg_const_i32(s->next_pc - s->pc);
+ t = tcg_const_i32(s->ilen);
tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
tcg_temp_free_i32(t);
@@ -4035,6 +4268,16 @@ static ExitStatus op_svc(DisasContext *s, DisasOps *o)
return EXIT_NORETURN;
}
+static ExitStatus op_tam(DisasContext *s, DisasOps *o)
+{
+ int cc = 0;
+
+ cc |= (s->tb->flags & FLAG_MASK_64) ? 2 : 0;
+ cc |= (s->tb->flags & FLAG_MASK_32) ? 1 : 0;
+ gen_op_movi_cc(s, cc);
+ return NO_EXIT;
+}
+
static ExitStatus op_tceb(DisasContext *s, DisasOps *o)
{
gen_helper_tceb(cc_op, cpu_env, o->in1, o->in2);
@@ -4057,19 +4300,36 @@ static ExitStatus op_tcxb(DisasContext *s, DisasOps *o)
}
#ifndef CONFIG_USER_ONLY
+
+static ExitStatus op_testblock(DisasContext *s, DisasOps *o)
+{
+ check_privileged(s);
+ gen_helper_testblock(cc_op, cpu_env, o->in2);
+ set_cc_static(s);
+ return NO_EXIT;
+}
+
static ExitStatus op_tprot(DisasContext *s, DisasOps *o)
{
- potential_page_fault(s);
gen_helper_tprot(cc_op, o->addr1, o->in2);
set_cc_static(s);
return NO_EXIT;
}
+
#endif
+static ExitStatus op_tp(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 l1 = tcg_const_i32(get_field(s->fields, l1) + 1);
+ gen_helper_tp(cc_op, cpu_env, o->addr1, l1);
+ tcg_temp_free_i32(l1);
+ set_cc_static(s);
+ return NO_EXIT;
+}
+
static ExitStatus op_tr(DisasContext *s, DisasOps *o)
{
TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
- potential_page_fault(s);
gen_helper_tr(cpu_env, l, o->addr1, o->in2);
tcg_temp_free_i32(l);
set_cc_static(s);
@@ -4078,7 +4338,6 @@ static ExitStatus op_tr(DisasContext *s, DisasOps *o)
static ExitStatus op_tre(DisasContext *s, DisasOps *o)
{
- potential_page_fault(s);
gen_helper_tre(o->out, cpu_env, o->out, o->out2, o->in2);
return_low128(o->out2);
set_cc_static(s);
@@ -4088,22 +4347,95 @@ static ExitStatus op_tre(DisasContext *s, DisasOps *o)
static ExitStatus op_trt(DisasContext *s, DisasOps *o)
{
TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
- potential_page_fault(s);
gen_helper_trt(cc_op, cpu_env, l, o->addr1, o->in2);
tcg_temp_free_i32(l);
set_cc_static(s);
return NO_EXIT;
}
+static ExitStatus op_trXX(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
+ TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
+ TCGv_i32 sizes = tcg_const_i32(s->insn->opc & 3);
+ TCGv_i32 tst = tcg_temp_new_i32();
+ int m3 = get_field(s->fields, m3);
+
+ /* XXX: the C bit in M3 should be considered as 0 when the
+ ETF2-enhancement facility is not installed. */
+ if (m3 & 1) {
+ tcg_gen_movi_i32(tst, -1);
+ } else {
+ tcg_gen_extrl_i64_i32(tst, regs[0]);
+ if (s->insn->opc & 3) {
+ tcg_gen_ext8u_i32(tst, tst);
+ } else {
+ tcg_gen_ext16u_i32(tst, tst);
+ }
+ }
+ gen_helper_trXX(cc_op, cpu_env, r1, r2, tst, sizes);
+
+ tcg_temp_free_i32(r1);
+ tcg_temp_free_i32(r2);
+ tcg_temp_free_i32(sizes);
+ tcg_temp_free_i32(tst);
+ set_cc_static(s);
+ return NO_EXIT;
+}
+
+static ExitStatus op_ts(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 t1 = tcg_const_i32(0xff);
+ tcg_gen_atomic_xchg_i32(t1, o->in2, t1, get_mem_index(s), MO_UB);
+ tcg_gen_extract_i32(cc_op, t1, 7, 1);
+ tcg_temp_free_i32(t1);
+ set_cc_static(s);
+ return NO_EXIT;
+}
+
static ExitStatus op_unpk(DisasContext *s, DisasOps *o)
{
TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
- potential_page_fault(s);
gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
tcg_temp_free_i32(l);
return NO_EXIT;
}
+static ExitStatus op_unpka(DisasContext *s, DisasOps *o)
+{
+ int l1 = get_field(s->fields, l1) + 1;
+ TCGv_i32 l;
+
+ /* The length must not exceed 32 bytes. */
+ if (l1 > 32) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return EXIT_NORETURN;
+ }
+ l = tcg_const_i32(l1);
+ gen_helper_unpka(cc_op, cpu_env, o->addr1, l, o->in2);
+ tcg_temp_free_i32(l);
+ set_cc_static(s);
+ return NO_EXIT;
+}
+
+static ExitStatus op_unpku(DisasContext *s, DisasOps *o)
+{
+ int l1 = get_field(s->fields, l1) + 1;
+ TCGv_i32 l;
+
+ /* The length must be even and should not exceed 64 bytes. */
+ if ((l1 & 1) || (l1 > 64)) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return EXIT_NORETURN;
+ }
+ l = tcg_const_i32(l1);
+ gen_helper_unpku(cc_op, cpu_env, o->addr1, l, o->in2);
+ tcg_temp_free_i32(l);
+ set_cc_static(s);
+ return NO_EXIT;
+}
+
+
static ExitStatus op_xc(DisasContext *s, DisasOps *o)
{
int d1 = get_field(s->fields, d1);
@@ -4151,7 +4483,6 @@ static ExitStatus op_xc(DisasContext *s, DisasOps *o)
/* But in general we'll defer to a helper. */
o->in2 = get_address(s, 0, b2, d2);
t32 = tcg_const_i32(l);
- potential_page_fault(s);
gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
tcg_temp_free_i32(t32);
set_cc_static(s);
@@ -5158,24 +5489,36 @@ static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
int op, op2, ilen;
const DisasInsn *info;
- insn = ld_code2(env, pc);
- op = (insn >> 8) & 0xff;
- ilen = get_ilen(op);
- s->next_pc = s->pc + ilen;
+ if (unlikely(s->ex_value)) {
+ /* Drop the EX data now, so that it's clear on exception paths. */
+ TCGv_i64 zero = tcg_const_i64(0);
+ tcg_gen_st_i64(zero, cpu_env, offsetof(CPUS390XState, ex_value));
+ tcg_temp_free_i64(zero);
- switch (ilen) {
- case 2:
- insn = insn << 48;
- break;
- case 4:
- insn = ld_code4(env, pc) << 32;
- break;
- case 6:
- insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
- break;
- default:
- abort();
+ /* Extract the values saved by EXECUTE. */
+ insn = s->ex_value & 0xffffffffffff0000ull;
+ ilen = s->ex_value & 0xf;
+ op = insn >> 56;
+ } else {
+ insn = ld_code2(env, pc);
+ op = (insn >> 8) & 0xff;
+ ilen = get_ilen(op);
+ switch (ilen) {
+ case 2:
+ insn = insn << 48;
+ break;
+ case 4:
+ insn = ld_code4(env, pc) << 32;
+ break;
+ case 6:
+ insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
+ break;
+ default:
+ g_assert_not_reached();
+ }
}
+ s->next_pc = s->pc + ilen;
+ s->ilen = ilen;
/* We can't actually determine the insn format until we've looked up
the full insn opcode. Which we can't do without locating the
@@ -5392,6 +5735,7 @@ void gen_intermediate_code(CPUS390XState *env, struct TranslationBlock *tb)
dc.tb = tb;
dc.pc = pc_start;
dc.cc_op = CC_OP_DYNAMIC;
+ dc.ex_value = tb->cs_base;
do_debug = dc.singlestep_enabled = cs->singlestep_enabled;
next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
@@ -5426,10 +5770,7 @@ void gen_intermediate_code(CPUS390XState *env, struct TranslationBlock *tb)
gen_io_start();
}
- status = NO_EXIT;
- if (status == NO_EXIT) {
- status = translate_one(env, &dc);
- }
+ status = translate_one(env, &dc);
/* If we reach a page boundary, are single stepping,
or exhaust instruction count, stop generation. */
@@ -5438,7 +5779,8 @@ void gen_intermediate_code(CPUS390XState *env, struct TranslationBlock *tb)
|| tcg_op_buf_full()
|| num_insns >= max_insns
|| singlestep
- || cs->singlestep_enabled)) {
+ || cs->singlestep_enabled
+ || dc.ex_value)) {
status = EXIT_PC_STALE;
}
} while (status == NO_EXIT);
@@ -5458,11 +5800,15 @@ void gen_intermediate_code(CPUS390XState *env, struct TranslationBlock *tb)
/* Next TB starts off with CC_OP_DYNAMIC, so make sure the
cc op type is in env */
update_cc_op(&dc);
+ /* FALLTHRU */
+ case EXIT_PC_CC_UPDATED:
/* Exit the TB, either by raising a debug exception or by return. */
if (do_debug) {
gen_exception(EXCP_DEBUG);
- } else {
+ } else if (use_exit_tb(&dc)) {
tcg_gen_exit_tb(0);
+ } else {
+ tcg_gen_lookup_and_goto_ptr(psw_addr);
}
break;
default:
@@ -5478,9 +5824,14 @@ void gen_intermediate_code(CPUS390XState *env, struct TranslationBlock *tb)
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
&& qemu_log_in_addr_range(pc_start)) {
qemu_log_lock();
- qemu_log("IN: %s\n", lookup_symbol(pc_start));
- log_target_disas(cs, pc_start, dc.pc - pc_start, 1);
- qemu_log("\n");
+ if (unlikely(dc.ex_value)) {
+ /* ??? Unfortunately log_target_disas can't use host memory. */
+ qemu_log("IN: EXECUTE %016" PRIx64 "\n", dc.ex_value);
+ } else {
+ qemu_log("IN: %s\n", lookup_symbol(pc_start));
+ log_target_disas(cs, pc_start, dc.pc - pc_start, 1);
+ qemu_log("\n");
+ }
qemu_log_unlock();
}
#endif
diff --git a/target/xtensa/cpu.h b/target/xtensa/cpu.h
index ecca17d45d..ee29fb1a14 100644
--- a/target/xtensa/cpu.h
+++ b/target/xtensa/cpu.h
@@ -483,6 +483,7 @@ void xtensa_translate_init(void);
void xtensa_breakpoint_handler(CPUState *cs);
void xtensa_finalize_config(XtensaConfig *config);
void xtensa_register_core(XtensaConfigList *node);
+void xtensa_sim_open_console(Chardev *chr);
void check_interrupts(CPUXtensaState *s);
void xtensa_irq_init(CPUXtensaState *env);
void *xtensa_get_extint(CPUXtensaState *env, unsigned extint);
diff --git a/target/xtensa/gdbstub.c b/target/xtensa/gdbstub.c
index fa5469a4ef..da131ae8cc 100644
--- a/target/xtensa/gdbstub.c
+++ b/target/xtensa/gdbstub.c
@@ -58,7 +58,10 @@ int xtensa_cpu_gdb_read_register(CPUState *cs, uint8_t *mem_buf, int n)
case 8:
return gdb_get_reg64(mem_buf, float64_val(env->fregs[i].f64));
default:
- return 0;
+ qemu_log_mask(LOG_UNIMP, "%s from reg %d of unsupported size %d\n",
+ __func__, n, reg->size);
+ memset(mem_buf, 0, reg->size);
+ return reg->size;
}
case 8: /*a*/
@@ -67,6 +70,8 @@ int xtensa_cpu_gdb_read_register(CPUState *cs, uint8_t *mem_buf, int n)
default:
qemu_log_mask(LOG_UNIMP, "%s from reg %d of unsupported type %d\n",
__func__, n, reg->type);
+ memset(mem_buf, 0, reg->size);
+ return reg->size;
return 0;
}
}
@@ -111,7 +116,9 @@ int xtensa_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
env->fregs[reg->targno & 0x0f].f64 = make_float64(tmp);
return 8;
default:
- return 0;
+ qemu_log_mask(LOG_UNIMP, "%s to reg %d of unsupported size %d\n",
+ __func__, n, reg->size);
+ return reg->size;
}
case 8: /*a*/
@@ -121,7 +128,7 @@ int xtensa_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
default:
qemu_log_mask(LOG_UNIMP, "%s to reg %d of unsupported type %d\n",
__func__, n, reg->type);
- return 0;
+ return reg->size;
}
return 4;
diff --git a/target/xtensa/xtensa-semi.c b/target/xtensa/xtensa-semi.c
index a888a9dc7b..32e2bd7f1d 100644
--- a/target/xtensa/xtensa-semi.c
+++ b/target/xtensa/xtensa-semi.c
@@ -27,9 +27,14 @@
#include "qemu/osdep.h"
#include "cpu.h"
+#include "chardev/char-fe.h"
#include "exec/helper-proto.h"
#include "exec/semihost.h"
+#include "qapi/error.h"
#include "qemu/log.h"
+#include "sysemu/sysemu.h"
+
+static CharBackend *xtensa_sim_console;
enum {
TARGET_SYS_exit = 1,
@@ -148,6 +153,15 @@ static uint32_t errno_h2g(int host_errno)
}
}
+void xtensa_sim_open_console(Chardev *chr)
+{
+ static CharBackend console;
+
+ qemu_chr_fe_init(&console, chr, &error_abort);
+ qemu_chr_fe_set_handlers(&console, NULL, NULL, NULL, NULL, NULL, true);
+ xtensa_sim_console = &console;
+}
+
void HELPER(simcall)(CPUXtensaState *env)
{
CPUState *cs = CPU(xtensa_env_get_cpu(env));
@@ -166,6 +180,7 @@ void HELPER(simcall)(CPUXtensaState *env)
uint32_t fd = regs[3];
uint32_t vaddr = regs[4];
uint32_t len = regs[5];
+ uint32_t len_done = 0;
while (len > 0) {
hwaddr paddr = cpu_get_phys_page_debug(cs, vaddr);
@@ -173,25 +188,54 @@ void HELPER(simcall)(CPUXtensaState *env)
TARGET_PAGE_SIZE - (vaddr & (TARGET_PAGE_SIZE - 1));
uint32_t io_sz = page_left < len ? page_left : len;
hwaddr sz = io_sz;
- void *buf = cpu_physical_memory_map(paddr, &sz, is_write);
+ void *buf = cpu_physical_memory_map(paddr, &sz, !is_write);
+ uint32_t io_done;
+ bool error = false;
if (buf) {
vaddr += io_sz;
len -= io_sz;
- regs[2] = is_write ?
- write(fd, buf, io_sz) :
- read(fd, buf, io_sz);
- regs[3] = errno_h2g(errno);
- cpu_physical_memory_unmap(buf, sz, is_write, sz);
- if (regs[2] == -1) {
- break;
+ if (fd < 3 && xtensa_sim_console) {
+ if (is_write && (fd == 1 || fd == 2)) {
+ io_done = qemu_chr_fe_write_all(xtensa_sim_console,
+ buf, io_sz);
+ regs[3] = errno_h2g(errno);
+ } else {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s fd %d is not supported with chardev console\n",
+ is_write ?
+ "writing to" : "reading from", fd);
+ io_done = -1;
+ regs[3] = TARGET_EBADF;
+ }
+ } else {
+ io_done = is_write ?
+ write(fd, buf, io_sz) :
+ read(fd, buf, io_sz);
+ regs[3] = errno_h2g(errno);
}
+ if (io_done == -1) {
+ error = true;
+ io_done = 0;
+ }
+ cpu_physical_memory_unmap(buf, sz, !is_write, io_done);
} else {
- regs[2] = -1;
+ error = true;
regs[3] = TARGET_EINVAL;
break;
}
+ if (error) {
+ if (!len_done) {
+ len_done = -1;
+ }
+ break;
+ }
+ len_done += io_done;
+ if (io_done < io_sz) {
+ break;
+ }
}
+ regs[2] = len_done;
}
break;
@@ -241,10 +285,6 @@ void HELPER(simcall)(CPUXtensaState *env)
uint32_t target_tvv[2];
struct timeval tv = {0};
- fd_set fdset;
-
- FD_ZERO(&fdset);
- FD_SET(fd, &fdset);
if (target_tv) {
cpu_memory_rw_debug(cs, target_tv,
@@ -252,12 +292,25 @@ void HELPER(simcall)(CPUXtensaState *env)
tv.tv_sec = (int32_t)tswap32(target_tvv[0]);
tv.tv_usec = (int32_t)tswap32(target_tvv[1]);
}
- regs[2] = select(fd + 1,
- rq == SELECT_ONE_READ ? &fdset : NULL,
- rq == SELECT_ONE_WRITE ? &fdset : NULL,
- rq == SELECT_ONE_EXCEPT ? &fdset : NULL,
- target_tv ? &tv : NULL);
- regs[3] = errno_h2g(errno);
+ if (fd < 3 && xtensa_sim_console) {
+ if ((fd == 1 || fd == 2) && rq == SELECT_ONE_WRITE) {
+ regs[2] = 1;
+ } else {
+ regs[2] = 0;
+ }
+ regs[3] = 0;
+ } else {
+ fd_set fdset;
+
+ FD_ZERO(&fdset);
+ FD_SET(fd, &fdset);
+ regs[2] = select(fd + 1,
+ rq == SELECT_ONE_READ ? &fdset : NULL,
+ rq == SELECT_ONE_WRITE ? &fdset : NULL,
+ rq == SELECT_ONE_EXCEPT ? &fdset : NULL,
+ target_tv ? &tv : NULL);
+ regs[3] = errno_h2g(errno);
+ }
}
break;
diff --git a/tcg-runtime.c b/tcg-runtime.c
index 4c60c96658..7fa90ce508 100644
--- a/tcg-runtime.c
+++ b/tcg-runtime.c
@@ -27,6 +27,9 @@
#include "exec/helper-proto.h"
#include "exec/cpu_ldst.h"
#include "exec/exec-all.h"
+#include "exec/tb-hash.h"
+#include "disas/disas.h"
+#include "exec/log.h"
/* 32-bit helpers */
@@ -141,6 +144,35 @@ uint64_t HELPER(ctpop_i64)(uint64_t arg)
return ctpop64(arg);
}
+void *HELPER(lookup_tb_ptr)(CPUArchState *env, target_ulong addr)
+{
+ CPUState *cpu = ENV_GET_CPU(env);
+ TranslationBlock *tb;
+ target_ulong cs_base, pc;
+ uint32_t flags;
+
+ tb = atomic_rcu_read(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(addr)]);
+ if (likely(tb)) {
+ cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
+ if (likely(tb->pc == addr && tb->cs_base == cs_base &&
+ tb->flags == flags)) {
+ goto found;
+ }
+ tb = tb_htable_lookup(cpu, addr, cs_base, flags);
+ if (likely(tb)) {
+ atomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(addr)], tb);
+ goto found;
+ }
+ }
+ return tcg_ctx.code_gen_epilogue;
+ found:
+ qemu_log_mask_and_addr(CPU_LOG_EXEC, addr,
+ "Chain %p [%d: " TARGET_FMT_lx "] %s\n",
+ tb->tc_ptr, cpu->cpu_index, addr,
+ lookup_symbol(addr));
+ return tb->tc_ptr;
+}
+
void HELPER(exit_atomic)(CPUArchState *env)
{
cpu_loop_exit_atomic(ENV_GET_CPU(env), GETPC());
diff --git a/tcg/README b/tcg/README
index a9858c2f74..bf49e8242b 100644
--- a/tcg/README
+++ b/tcg/README
@@ -477,6 +477,14 @@ current TB was linked to this TB. Otherwise execute the next
instructions. Only indices 0 and 1 are valid and tcg_gen_goto_tb may be issued
at most once with each slot index per TB.
+* lookup_and_goto_ptr tb_addr
+
+Look up a TB address ('tb_addr') and jump to it if valid. If not valid,
+jump to the TCG epilogue to go back to the exec loop.
+
+This operation is optional. If the TCG backend does not implement the
+goto_ptr opcode, emitting this op is equivalent to emitting exit_tb(0).
+
* qemu_ld_i32/i64 t0, t1, flags, memidx
* qemu_st_i32/i64 t0, t1, flags, memidx
diff --git a/tcg/aarch64/tcg-target.h b/tcg/aarch64/tcg-target.h
index 1a5ea23844..55a46ac825 100644
--- a/tcg/aarch64/tcg-target.h
+++ b/tcg/aarch64/tcg-target.h
@@ -77,6 +77,7 @@ typedef enum {
#define TCG_TARGET_HAS_mulsh_i32 0
#define TCG_TARGET_HAS_extrl_i64_i32 0
#define TCG_TARGET_HAS_extrh_i64_i32 0
+#define TCG_TARGET_HAS_goto_ptr 1
#define TCG_TARGET_HAS_div_i64 1
#define TCG_TARGET_HAS_rem_i64 1
diff --git a/tcg/aarch64/tcg-target.inc.c b/tcg/aarch64/tcg-target.inc.c
index 290de6dae6..5f185458f1 100644
--- a/tcg/aarch64/tcg-target.inc.c
+++ b/tcg/aarch64/tcg-target.inc.c
@@ -1357,8 +1357,13 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
switch (opc) {
case INDEX_op_exit_tb:
- tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_X0, a0);
- tcg_out_goto(s, tb_ret_addr);
+ /* Reuse the zeroing that exists for goto_ptr. */
+ if (a0 == 0) {
+ tcg_out_goto(s, s->code_gen_epilogue);
+ } else {
+ tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_X0, a0);
+ tcg_out_goto(s, tb_ret_addr);
+ }
break;
case INDEX_op_goto_tb:
@@ -1374,6 +1379,10 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
s->tb_jmp_reset_offset[a0] = tcg_current_code_size(s);
break;
+ case INDEX_op_goto_ptr:
+ tcg_out_insn(s, 3207, BR, a0);
+ break;
+
case INDEX_op_br:
tcg_out_goto_label(s, arg_label(a0));
break;
@@ -1735,6 +1744,7 @@ static const TCGTargetOpDef aarch64_op_defs[] = {
{ INDEX_op_exit_tb, { } },
{ INDEX_op_goto_tb, { } },
{ INDEX_op_br, { } },
+ { INDEX_op_goto_ptr, { "r" } },
{ INDEX_op_ld8u_i32, { "r", "r" } },
{ INDEX_op_ld8s_i32, { "r", "r" } },
@@ -1942,6 +1952,14 @@ static void tcg_target_qemu_prologue(TCGContext *s)
tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
tcg_out_insn(s, 3207, BR, tcg_target_call_iarg_regs[1]);
+ /*
+ * Return path for goto_ptr. Set return value to 0, a-la exit_tb,
+ * and fall through to the rest of the epilogue.
+ */
+ s->code_gen_epilogue = s->code_ptr;
+ tcg_out_movi(s, TCG_TYPE_REG, TCG_REG_X0, 0);
+
+ /* TB epilogue */
tb_ret_addr = s->code_ptr;
/* Remove TCG locals stack space. */
diff --git a/tcg/arm/tcg-target.h b/tcg/arm/tcg-target.h
index 75ea247bc4..5ef1086710 100644
--- a/tcg/arm/tcg-target.h
+++ b/tcg/arm/tcg-target.h
@@ -123,6 +123,7 @@ extern bool use_idiv_instructions;
#define TCG_TARGET_HAS_mulsh_i32 0
#define TCG_TARGET_HAS_div_i32 use_idiv_instructions
#define TCG_TARGET_HAS_rem_i32 0
+#define TCG_TARGET_HAS_goto_ptr 1
enum {
TCG_AREG0 = TCG_REG_R6,
diff --git a/tcg/arm/tcg-target.inc.c b/tcg/arm/tcg-target.inc.c
index e75a6d4943..9f5cb66718 100644
--- a/tcg/arm/tcg-target.inc.c
+++ b/tcg/arm/tcg-target.inc.c
@@ -329,11 +329,6 @@ static const uint8_t tcg_cond_to_arm_cond[] = {
[TCG_COND_GTU] = COND_HI,
};
-static inline void tcg_out_bx(TCGContext *s, int cond, int rn)
-{
- tcg_out32(s, (cond << 28) | 0x012fff10 | rn);
-}
-
static inline void tcg_out_b(TCGContext *s, int cond, int32_t offset)
{
tcg_out32(s, (cond << 28) | 0x0a000000 |
@@ -402,6 +397,18 @@ static inline void tcg_out_mov_reg(TCGContext *s, int cond, int rd, int rm)
}
}
+static inline void tcg_out_bx(TCGContext *s, int cond, TCGReg rn)
+{
+ /* Unless the C portion of QEMU is compiled as thumb, we don't
+ actually need true BX semantics; merely a branch to an address
+ held in a register. */
+ if (use_armv5t_instructions) {
+ tcg_out32(s, (cond << 28) | 0x012fff10 | rn);
+ } else {
+ tcg_out_mov_reg(s, cond, TCG_REG_PC, rn);
+ }
+}
+
static inline void tcg_out_dat_imm(TCGContext *s,
int cond, int opc, int rd, int rn, int im)
{
@@ -977,7 +984,7 @@ static inline void tcg_out_st8(TCGContext *s, int cond,
* with the code buffer limited to 16MB we wouldn't need the long case.
* But we also use it for the tail-call to the qemu_ld/st helpers, which does.
*/
-static inline void tcg_out_goto(TCGContext *s, int cond, tcg_insn_unit *addr)
+static void tcg_out_goto(TCGContext *s, int cond, tcg_insn_unit *addr)
{
intptr_t addri = (intptr_t)addr;
ptrdiff_t disp = tcg_pcrel_diff(s, addr);
@@ -987,15 +994,9 @@ static inline void tcg_out_goto(TCGContext *s, int cond, tcg_insn_unit *addr)
return;
}
+ assert(use_armv5t_instructions || (addri & 1) == 0);
tcg_out_movi32(s, cond, TCG_REG_TMP, addri);
- if (use_armv5t_instructions) {
- tcg_out_bx(s, cond, TCG_REG_TMP);
- } else {
- if (addri & 1) {
- tcg_abort();
- }
- tcg_out_mov_reg(s, cond, TCG_REG_PC, TCG_REG_TMP);
- }
+ tcg_out_bx(s, cond, TCG_REG_TMP);
}
/* The call case is mostly used for helpers - so it's not unreasonable
@@ -1654,8 +1655,14 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
switch (opc) {
case INDEX_op_exit_tb:
- tcg_out_movi32(s, COND_AL, TCG_REG_R0, args[0]);
- tcg_out_goto(s, COND_AL, tb_ret_addr);
+ /* Reuse the zeroing that exists for goto_ptr. */
+ a0 = args[0];
+ if (a0 == 0) {
+ tcg_out_goto(s, COND_AL, s->code_gen_epilogue);
+ } else {
+ tcg_out_movi32(s, COND_AL, TCG_REG_R0, args[0]);
+ tcg_out_goto(s, COND_AL, tb_ret_addr);
+ }
break;
case INDEX_op_goto_tb:
if (s->tb_jmp_insn_offset) {
@@ -1670,6 +1677,9 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
}
s->tb_jmp_reset_offset[args[0]] = tcg_current_code_size(s);
break;
+ case INDEX_op_goto_ptr:
+ tcg_out_bx(s, COND_AL, args[0]);
+ break;
case INDEX_op_br:
tcg_out_goto_label(s, COND_AL, arg_label(args[0]));
break;
@@ -1960,6 +1970,7 @@ static const TCGTargetOpDef arm_op_defs[] = {
{ INDEX_op_exit_tb, { } },
{ INDEX_op_goto_tb, { } },
{ INDEX_op_br, { } },
+ { INDEX_op_goto_ptr, { "r" } },
{ INDEX_op_ld8u_i32, { "r", "r" } },
{ INDEX_op_ld8s_i32, { "r", "r" } },
@@ -2135,9 +2146,16 @@ static void tcg_target_qemu_prologue(TCGContext *s)
tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
tcg_out_bx(s, COND_AL, tcg_target_call_iarg_regs[1]);
- tb_ret_addr = s->code_ptr;
- /* Epilogue. We branch here via tb_ret_addr. */
+ /*
+ * Return path for goto_ptr. Set return value to 0, a-la exit_tb,
+ * and fall through to the rest of the epilogue.
+ */
+ s->code_gen_epilogue = s->code_ptr;
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R0, 0);
+
+ /* TB epilogue */
+ tb_ret_addr = s->code_ptr;
tcg_out_dat_rI(s, COND_AL, ARITH_ADD, TCG_REG_CALL_STACK,
TCG_REG_CALL_STACK, stack_addend, 1);
diff --git a/tcg/i386/tcg-target.h b/tcg/i386/tcg-target.h
index 4275787db9..73a15f7e80 100644
--- a/tcg/i386/tcg-target.h
+++ b/tcg/i386/tcg-target.h
@@ -107,6 +107,7 @@ extern bool have_popcnt;
#define TCG_TARGET_HAS_muls2_i32 1
#define TCG_TARGET_HAS_muluh_i32 0
#define TCG_TARGET_HAS_mulsh_i32 0
+#define TCG_TARGET_HAS_goto_ptr 1
#if TCG_TARGET_REG_BITS == 64
#define TCG_TARGET_HAS_extrl_i64_i32 0
diff --git a/tcg/i386/tcg-target.inc.c b/tcg/i386/tcg-target.inc.c
index 5918008296..01e3b4e95c 100644
--- a/tcg/i386/tcg-target.inc.c
+++ b/tcg/i386/tcg-target.inc.c
@@ -1882,8 +1882,13 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
switch (opc) {
case INDEX_op_exit_tb:
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_EAX, a0);
- tcg_out_jmp(s, tb_ret_addr);
+ /* Reuse the zeroing that exists for goto_ptr. */
+ if (a0 == 0) {
+ tcg_out_jmp(s, s->code_gen_epilogue);
+ } else {
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_EAX, a0);
+ tcg_out_jmp(s, tb_ret_addr);
+ }
break;
case INDEX_op_goto_tb:
if (s->tb_jmp_insn_offset) {
@@ -1906,6 +1911,10 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
}
s->tb_jmp_reset_offset[a0] = tcg_current_code_size(s);
break;
+ case INDEX_op_goto_ptr:
+ /* jmp to the given host address (could be epilogue) */
+ tcg_out_modrm(s, OPC_GRP5, EXT5_JMPN_Ev, a0);
+ break;
case INDEX_op_br:
tcg_out_jxx(s, JCC_JMP, arg_label(a0), 0);
break;
@@ -2277,6 +2286,7 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
{
+ static const TCGTargetOpDef r = { .args_ct_str = { "r" } };
static const TCGTargetOpDef ri_r = { .args_ct_str = { "ri", "r" } };
static const TCGTargetOpDef re_r = { .args_ct_str = { "re", "r" } };
static const TCGTargetOpDef qi_r = { .args_ct_str = { "qi", "r" } };
@@ -2299,6 +2309,9 @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
= { .args_ct_str = { "L", "L", "L", "L" } };
switch (op) {
+ case INDEX_op_goto_ptr:
+ return &r;
+
case INDEX_op_ld8u_i32:
case INDEX_op_ld8u_i64:
case INDEX_op_ld8s_i32:
@@ -2567,6 +2580,13 @@ static void tcg_target_qemu_prologue(TCGContext *s)
tcg_out_modrm(s, OPC_GRP5, EXT5_JMPN_Ev, tcg_target_call_iarg_regs[1]);
#endif
+ /*
+ * Return path for goto_ptr. Set return value to 0, a-la exit_tb,
+ * and fall through to the rest of the epilogue.
+ */
+ s->code_gen_epilogue = s->code_ptr;
+ tcg_out_movi(s, TCG_TYPE_REG, TCG_REG_EAX, 0);
+
/* TB epilogue */
tb_ret_addr = s->code_ptr;
diff --git a/tcg/ia64/tcg-target.h b/tcg/ia64/tcg-target.h
index 42aea03a8b..901bb7575d 100644
--- a/tcg/ia64/tcg-target.h
+++ b/tcg/ia64/tcg-target.h
@@ -173,6 +173,7 @@ typedef enum {
#define TCG_TARGET_HAS_mulsh_i64 0
#define TCG_TARGET_HAS_extrl_i64_i32 0
#define TCG_TARGET_HAS_extrh_i64_i32 0
+#define TCG_TARGET_HAS_goto_ptr 0
#define TCG_TARGET_deposit_i32_valid(ofs, len) ((len) <= 16)
#define TCG_TARGET_deposit_i64_valid(ofs, len) ((len) <= 16)
diff --git a/tcg/mips/tcg-target.h b/tcg/mips/tcg-target.h
index f46d64a3a7..d75cb63ed3 100644
--- a/tcg/mips/tcg-target.h
+++ b/tcg/mips/tcg-target.h
@@ -130,6 +130,7 @@ extern bool use_mips32r2_instructions;
#define TCG_TARGET_HAS_muluh_i32 1
#define TCG_TARGET_HAS_mulsh_i32 1
#define TCG_TARGET_HAS_bswap32_i32 1
+#define TCG_TARGET_HAS_goto_ptr 1
#if TCG_TARGET_REG_BITS == 64
#define TCG_TARGET_HAS_add2_i32 0
diff --git a/tcg/mips/tcg-target.inc.c b/tcg/mips/tcg-target.inc.c
index 2a7e1c7f5b..8cff9a6bf9 100644
--- a/tcg/mips/tcg-target.inc.c
+++ b/tcg/mips/tcg-target.inc.c
@@ -1747,6 +1747,11 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
tcg_out_nop(s);
s->tb_jmp_reset_offset[a0] = tcg_current_code_size(s);
break;
+ case INDEX_op_goto_ptr:
+ /* jmp to the given host address (could be epilogue) */
+ tcg_out_opc_reg(s, OPC_JR, 0, a0, 0);
+ tcg_out_nop(s);
+ break;
case INDEX_op_br:
tcg_out_brcond(s, TCG_COND_EQ, TCG_REG_ZERO, TCG_REG_ZERO,
arg_label(a0));
@@ -2160,6 +2165,7 @@ static const TCGTargetOpDef mips_op_defs[] = {
{ INDEX_op_exit_tb, { } },
{ INDEX_op_goto_tb, { } },
{ INDEX_op_br, { } },
+ { INDEX_op_goto_ptr, { "r" } },
{ INDEX_op_ld8u_i32, { "r", "r" } },
{ INDEX_op_ld8s_i32, { "r", "r" } },
@@ -2451,6 +2457,13 @@ static void tcg_target_qemu_prologue(TCGContext *s)
/* delay slot */
tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
+ /*
+ * Return path for goto_ptr. Set return value to 0, a-la exit_tb,
+ * and fall through to the rest of the epilogue.
+ */
+ s->code_gen_epilogue = s->code_ptr;
+ tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_V0, TCG_REG_ZERO);
+
/* TB epilogue */
tb_ret_addr = s->code_ptr;
for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
diff --git a/tcg/ppc/tcg-target.h b/tcg/ppc/tcg-target.h
index abd8b3d6cd..5f4a40a5b4 100644
--- a/tcg/ppc/tcg-target.h
+++ b/tcg/ppc/tcg-target.h
@@ -82,6 +82,7 @@ extern bool have_isa_3_00;
#define TCG_TARGET_HAS_muls2_i32 0
#define TCG_TARGET_HAS_muluh_i32 1
#define TCG_TARGET_HAS_mulsh_i32 1
+#define TCG_TARGET_HAS_goto_ptr 1
#if TCG_TARGET_REG_BITS == 64
#define TCG_TARGET_HAS_add2_i32 0
diff --git a/tcg/ppc/tcg-target.inc.c b/tcg/ppc/tcg-target.inc.c
index 64f67d2c77..8d50f18328 100644
--- a/tcg/ppc/tcg-target.inc.c
+++ b/tcg/ppc/tcg-target.inc.c
@@ -1932,6 +1932,7 @@ static void tcg_target_qemu_prologue(TCGContext *s)
/* Epilogue */
tcg_debug_assert(tb_ret_addr == s->code_ptr);
+ s->code_gen_epilogue = tb_ret_addr;
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R0, TCG_REG_R1, FRAME_SIZE+LR_OFFSET);
for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); ++i) {
@@ -1986,6 +1987,11 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
#endif
s->tb_jmp_reset_offset[args[0]] = tcg_current_code_size(s);
break;
+ case INDEX_op_goto_ptr:
+ tcg_out32(s, MTSPR | RS(args[0]) | CTR);
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R3, 0);
+ tcg_out32(s, BCCTR | BO_ALWAYS);
+ break;
case INDEX_op_br:
{
TCGLabel *l = arg_label(args[0]);
@@ -2555,6 +2561,7 @@ static const TCGTargetOpDef ppc_op_defs[] = {
{ INDEX_op_exit_tb, { } },
{ INDEX_op_goto_tb, { } },
{ INDEX_op_br, { } },
+ { INDEX_op_goto_ptr, { "r" } },
{ INDEX_op_ld8u_i32, { "r", "r" } },
{ INDEX_op_ld8s_i32, { "r", "r" } },
diff --git a/tcg/s390/tcg-target.h b/tcg/s390/tcg-target.h
index cbdd2a6275..957f0c0afe 100644
--- a/tcg/s390/tcg-target.h
+++ b/tcg/s390/tcg-target.h
@@ -92,6 +92,7 @@ extern uint64_t s390_facilities;
#define TCG_TARGET_HAS_mulsh_i32 0
#define TCG_TARGET_HAS_extrl_i64_i32 0
#define TCG_TARGET_HAS_extrh_i64_i32 0
+#define TCG_TARGET_HAS_goto_ptr 1
#define TCG_TARGET_HAS_div2_i64 1
#define TCG_TARGET_HAS_rot_i64 1
diff --git a/tcg/s390/tcg-target.inc.c b/tcg/s390/tcg-target.inc.c
index a679280b92..5d7083e90c 100644
--- a/tcg/s390/tcg-target.inc.c
+++ b/tcg/s390/tcg-target.inc.c
@@ -1741,9 +1741,14 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
switch (opc) {
case INDEX_op_exit_tb:
- /* return value */
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2, args[0]);
- tgen_gotoi(s, S390_CC_ALWAYS, tb_ret_addr);
+ /* Reuse the zeroing that exists for goto_ptr. */
+ a0 = args[0];
+ if (a0 == 0) {
+ tgen_gotoi(s, S390_CC_ALWAYS, s->code_gen_epilogue);
+ } else {
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2, a0);
+ tgen_gotoi(s, S390_CC_ALWAYS, tb_ret_addr);
+ }
break;
case INDEX_op_goto_tb:
@@ -1767,6 +1772,10 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
s->tb_jmp_reset_offset[args[0]] = tcg_current_code_size(s);
break;
+ case INDEX_op_goto_ptr:
+ tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, args[0]);
+ break;
+
OP_32_64(ld8u):
/* ??? LLC (RXY format) is only present with the extended-immediate
facility, whereas LLGC is always present. */
@@ -2241,6 +2250,7 @@ static const TCGTargetOpDef s390_op_defs[] = {
{ INDEX_op_exit_tb, { } },
{ INDEX_op_goto_tb, { } },
{ INDEX_op_br, { } },
+ { INDEX_op_goto_ptr, { "r" } },
{ INDEX_op_ld8u_i32, { "r", "r" } },
{ INDEX_op_ld8s_i32, { "r", "r" } },
@@ -2439,6 +2449,14 @@ static void tcg_target_qemu_prologue(TCGContext *s)
/* br %r3 (go to TB) */
tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, tcg_target_call_iarg_regs[1]);
+ /*
+ * Return path for goto_ptr. Set return value to 0, a-la exit_tb,
+ * and fall through to the rest of the epilogue.
+ */
+ s->code_gen_epilogue = s->code_ptr;
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2, 0);
+
+ /* TB epilogue */
tb_ret_addr = s->code_ptr;
/* lmg %r6,%r15,fs+48(%r15) (restore registers) */
diff --git a/tcg/sparc/tcg-target.h b/tcg/sparc/tcg-target.h
index b8b74f96ff..854a0afd70 100644
--- a/tcg/sparc/tcg-target.h
+++ b/tcg/sparc/tcg-target.h
@@ -123,6 +123,7 @@ extern bool use_vis3_instructions;
#define TCG_TARGET_HAS_muls2_i32 1
#define TCG_TARGET_HAS_muluh_i32 0
#define TCG_TARGET_HAS_mulsh_i32 0
+#define TCG_TARGET_HAS_goto_ptr 1
#define TCG_TARGET_HAS_extrl_i64_i32 1
#define TCG_TARGET_HAS_extrh_i64_i32 1
diff --git a/tcg/sparc/tcg-target.inc.c b/tcg/sparc/tcg-target.inc.c
index 3785d77f62..18afce2f87 100644
--- a/tcg/sparc/tcg-target.inc.c
+++ b/tcg/sparc/tcg-target.inc.c
@@ -1003,7 +1003,11 @@ static void tcg_target_qemu_prologue(TCGContext *s)
/* delay slot */
tcg_out_nop(s);
- /* No epilogue required. We issue ret + restore directly in the TB. */
+ /* Epilogue for goto_ptr. */
+ s->code_gen_epilogue = s->code_ptr;
+ tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
+ /* delay slot */
+ tcg_out_movi_imm13(s, TCG_REG_O0, 0);
#ifdef CONFIG_SOFTMMU
build_trampolines(s);
@@ -1288,6 +1292,10 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
tcg_out_nop(s);
s->tb_jmp_reset_offset[a0] = tcg_current_code_size(s);
break;
+ case INDEX_op_goto_ptr:
+ tcg_out_arithi(s, TCG_REG_G0, a0, 0, JMPL);
+ tcg_out_nop(s);
+ break;
case INDEX_op_br:
tcg_out_bpcc(s, COND_A, BPCC_PT, arg_label(a0));
tcg_out_nop(s);
@@ -1513,6 +1521,7 @@ static const TCGTargetOpDef sparc_op_defs[] = {
{ INDEX_op_exit_tb, { } },
{ INDEX_op_goto_tb, { } },
{ INDEX_op_br, { } },
+ { INDEX_op_goto_ptr, { "r" } },
{ INDEX_op_ld8u_i32, { "r", "r" } },
{ INDEX_op_ld8s_i32, { "r", "r" } },
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
index 6b1f41500c..87f673ef49 100644
--- a/tcg/tcg-op.c
+++ b/tcg/tcg-op.c
@@ -2587,6 +2587,18 @@ void tcg_gen_goto_tb(unsigned idx)
tcg_gen_op1i(INDEX_op_goto_tb, idx);
}
+void tcg_gen_lookup_and_goto_ptr(TCGv addr)
+{
+ if (TCG_TARGET_HAS_goto_ptr && !qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {
+ TCGv_ptr ptr = tcg_temp_new_ptr();
+ gen_helper_lookup_tb_ptr(ptr, tcg_ctx.tcg_env, addr);
+ tcg_gen_op1i(INDEX_op_goto_ptr, GET_TCGV_PTR(ptr));
+ tcg_temp_free_ptr(ptr);
+ } else {
+ tcg_gen_exit_tb(0);
+ }
+}
+
static inline TCGMemOp tcg_canonicalize_memop(TCGMemOp op, bool is64, bool st)
{
/* Trigger the asserts within as early as possible. */
diff --git a/tcg/tcg-op.h b/tcg/tcg-op.h
index c68e300a68..5d3278f243 100644
--- a/tcg/tcg-op.h
+++ b/tcg/tcg-op.h
@@ -796,6 +796,17 @@ static inline void tcg_gen_exit_tb(uintptr_t val)
*/
void tcg_gen_goto_tb(unsigned idx);
+/**
+ * tcg_gen_lookup_and_goto_ptr() - look up a TB and jump to it if valid
+ * @addr: Guest address of the target TB
+ *
+ * If the TB is not valid, jump to the epilogue.
+ *
+ * This operation is optional. If the TCG backend does not implement goto_ptr,
+ * this op is equivalent to calling tcg_gen_exit_tb() with 0 as the argument.
+ */
+void tcg_gen_lookup_and_goto_ptr(TCGv addr);
+
#if TARGET_LONG_BITS == 32
#define tcg_temp_new() tcg_temp_new_i32()
#define tcg_global_reg_new tcg_global_reg_new_i32
diff --git a/tcg/tcg-opc.h b/tcg/tcg-opc.h
index f06f89405e..956fb1e9f3 100644
--- a/tcg/tcg-opc.h
+++ b/tcg/tcg-opc.h
@@ -193,6 +193,7 @@ DEF(insn_start, 0, 0, TLADDR_ARGS * TARGET_INSN_START_WORDS,
TCG_OPF_NOT_PRESENT)
DEF(exit_tb, 0, 0, 1, TCG_OPF_BB_END)
DEF(goto_tb, 0, 0, 1, TCG_OPF_BB_END)
+DEF(goto_ptr, 0, 1, 0, TCG_OPF_BB_END | IMPL(TCG_TARGET_HAS_goto_ptr))
DEF(qemu_ld_i32, 1, TLADDR_ARGS, 1,
TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
diff --git a/tcg/tcg-runtime.h b/tcg/tcg-runtime.h
index 114ea6fecf..c41d38a557 100644
--- a/tcg/tcg-runtime.h
+++ b/tcg/tcg-runtime.h
@@ -24,6 +24,8 @@ DEF_HELPER_FLAGS_1(clrsb_i64, TCG_CALL_NO_RWG_SE, i64, i64)
DEF_HELPER_FLAGS_1(ctpop_i32, TCG_CALL_NO_RWG_SE, i32, i32)
DEF_HELPER_FLAGS_1(ctpop_i64, TCG_CALL_NO_RWG_SE, i64, i64)
+DEF_HELPER_FLAGS_2(lookup_tb_ptr, TCG_CALL_NO_WG_SE, ptr, env, tl)
+
DEF_HELPER_FLAGS_1(exit_atomic, TCG_CALL_NO_WG, noreturn, env)
#ifdef CONFIG_SOFTMMU
diff --git a/tcg/tcg.c b/tcg/tcg.c
index cb898f1636..564292f54d 100644
--- a/tcg/tcg.c
+++ b/tcg/tcg.c
@@ -424,6 +424,11 @@ void tcg_prologue_init(TCGContext *s)
qemu_log_unlock();
}
#endif
+
+ /* Assert that goto_ptr is implemented completely. */
+ if (TCG_TARGET_HAS_goto_ptr) {
+ tcg_debug_assert(s->code_gen_epilogue != NULL);
+ }
}
void tcg_func_start(TCGContext *s)
diff --git a/tcg/tcg.h b/tcg/tcg.h
index 6c216bb73f..5ec48d1787 100644
--- a/tcg/tcg.h
+++ b/tcg/tcg.h
@@ -699,6 +699,7 @@ struct TCGContext {
extension that allows arithmetic on void*. */
int code_gen_max_blocks;
void *code_gen_prologue;
+ void *code_gen_epilogue;
void *code_gen_buffer;
size_t code_gen_buffer_size;
void *code_gen_ptr;
diff --git a/tcg/tci/tcg-target.h b/tcg/tci/tcg-target.h
index 838bf3a858..06963288dc 100644
--- a/tcg/tci/tcg-target.h
+++ b/tcg/tci/tcg-target.h
@@ -85,6 +85,7 @@
#define TCG_TARGET_HAS_muls2_i32 0
#define TCG_TARGET_HAS_muluh_i32 0
#define TCG_TARGET_HAS_mulsh_i32 0
+#define TCG_TARGET_HAS_goto_ptr 0
#if TCG_TARGET_REG_BITS == 64
#define TCG_TARGET_HAS_extrl_i64_i32 0
diff --git a/tests/check-qom-proplist.c b/tests/check-qom-proplist.c
index a16cefca73..8e432e9ab6 100644
--- a/tests/check-qom-proplist.c
+++ b/tests/check-qom-proplist.c
@@ -23,6 +23,9 @@
#include "qapi/error.h"
#include "qom/object.h"
#include "qemu/module.h"
+#include "qemu/option.h"
+#include "qemu/config-file.h"
+#include "qom/object_interfaces.h"
#define TYPE_DUMMY "qemu-dummy"
@@ -162,6 +165,10 @@ static const TypeInfo dummy_info = {
.instance_finalize = dummy_finalize,
.class_size = sizeof(DummyObjectClass),
.class_init = dummy_class_init,
+ .interfaces = (InterfaceInfo[]) {
+ { TYPE_USER_CREATABLE },
+ { }
+ }
};
@@ -320,6 +327,14 @@ static const TypeInfo dummy_backend_info = {
.class_size = sizeof(DummyBackendClass),
};
+static QemuOptsList qemu_object_opts = {
+ .name = "object",
+ .implied_opt_name = "qom-type",
+ .head = QTAILQ_HEAD_INITIALIZER(qemu_object_opts.head),
+ .desc = {
+ { }
+ },
+};
static void test_dummy_createv(void)
@@ -388,6 +403,46 @@ static void test_dummy_createlist(void)
object_unparent(OBJECT(dobj));
}
+static void test_dummy_createcmdl(void)
+{
+ QemuOpts *opts;
+ DummyObject *dobj;
+ Error *err = NULL;
+ const char *params = TYPE_DUMMY \
+ ",id=dev0," \
+ "bv=yes,sv=Hiss hiss hiss,av=platypus";
+
+ qemu_add_opts(&qemu_object_opts);
+ opts = qemu_opts_parse(&qemu_object_opts, params, true, &err);
+ g_assert(err == NULL);
+ g_assert(opts);
+
+ dobj = DUMMY_OBJECT(user_creatable_add_opts(opts, &err));
+ g_assert(err == NULL);
+ g_assert(dobj);
+ g_assert_cmpstr(dobj->sv, ==, "Hiss hiss hiss");
+ g_assert(dobj->bv == true);
+ g_assert(dobj->av == DUMMY_PLATYPUS);
+
+ user_creatable_del("dev0", &err);
+ g_assert(err == NULL);
+ error_free(err);
+
+ /*
+ * cmdline-parsing via qemu_opts_parse() results in a QemuOpts entry
+ * corresponding to the Object's ID to be added to the QemuOptsList
+ * for objects. To avoid having this entry conflict with future
+ * Objects using the same ID (which can happen in cases where
+ * qemu_opts_parse() is used to parse the object params, such as
+ * with hmp_object_add() at the time of this comment), we need to
+ * check for this in user_creatable_del() and remove the QemuOpts if
+ * it is present.
+ *
+ * The below check ensures this works as expected.
+ */
+ g_assert_null(qemu_opts_find(&qemu_object_opts, "dev0"));
+}
+
static void test_dummy_badenum(void)
{
Error *err = NULL;
@@ -525,6 +580,7 @@ int main(int argc, char **argv)
g_test_add_func("/qom/proplist/createlist", test_dummy_createlist);
g_test_add_func("/qom/proplist/createv", test_dummy_createv);
+ g_test_add_func("/qom/proplist/createcmdline", test_dummy_createcmdl);
g_test_add_func("/qom/proplist/badenum", test_dummy_badenum);
g_test_add_func("/qom/proplist/getenum", test_dummy_getenum);
g_test_add_func("/qom/proplist/iterator", test_dummy_iterator);
diff --git a/tests/test-x86-cpuid-compat.c b/tests/test-x86-cpuid-compat.c
index 6c71e46391..4166ce54b7 100644
--- a/tests/test-x86-cpuid-compat.c
+++ b/tests/test-x86-cpuid-compat.c
@@ -313,6 +313,44 @@ int main(int argc, char **argv)
add_cpuid_test("x86/cpuid/auto-xlevel2/pc-2.7",
"-machine pc-i440fx-2.7 -cpu 486,+xstore",
"xlevel2", 0);
+ /*
+ * QEMU 1.4.0 had auto-level enabled for CPUID[7], already,
+ * and the compat code that sets default level shouldn't
+ * disable the auto-level=7 code:
+ */
+ add_cpuid_test("x86/cpuid/auto-level7/pc-i440fx-1.4/off",
+ "-machine pc-i440fx-1.4 -cpu Nehalem",
+ "level", 2);
+ add_cpuid_test("x86/cpuid/auto-level7/pc-i440fx-1.5/on",
+ "-machine pc-i440fx-1.4 -cpu Nehalem,+smap",
+ "level", 7);
+ add_cpuid_test("x86/cpuid/auto-level7/pc-i440fx-2.3/off",
+ "-machine pc-i440fx-2.3 -cpu Penryn",
+ "level", 4);
+ add_cpuid_test("x86/cpuid/auto-level7/pc-i440fx-2.3/on",
+ "-machine pc-i440fx-2.3 -cpu Penryn,+erms",
+ "level", 7);
+ add_cpuid_test("x86/cpuid/auto-level7/pc-i440fx-2.9/off",
+ "-machine pc-i440fx-2.9 -cpu Conroe",
+ "level", 10);
+ add_cpuid_test("x86/cpuid/auto-level7/pc-i440fx-2.9/on",
+ "-machine pc-i440fx-2.9 -cpu Conroe,+erms",
+ "level", 10);
+
+ /*
+ * xlevel doesn't have any feature that triggers auto-level
+ * code on old machine-types. Just check that the compat code
+ * is working correctly:
+ */
+ add_cpuid_test("x86/cpuid/xlevel-compat/pc-i440fx-2.3",
+ "-machine pc-i440fx-2.3 -cpu SandyBridge",
+ "xlevel", 0x8000000a);
+ add_cpuid_test("x86/cpuid/xlevel-compat/pc-i440fx-2.4/npt-off",
+ "-machine pc-i440fx-2.4 -cpu SandyBridge,",
+ "xlevel", 0x80000008);
+ add_cpuid_test("x86/cpuid/xlevel-compat/pc-i440fx-2.4/npt-on",
+ "-machine pc-i440fx-2.4 -cpu SandyBridge,+npt",
+ "xlevel", 0x80000008);
/* Test feature parsing */
add_feature_test("x86/cpuid/features/plus",
diff --git a/tests/virtio-scsi-test.c b/tests/virtio-scsi-test.c
index 8b0f77a63e..eff71df81f 100644
--- a/tests/virtio-scsi-test.c
+++ b/tests/virtio-scsi-test.c
@@ -149,7 +149,7 @@ static QVirtIOSCSI *qvirtio_scsi_pci_init(int slot)
vs->qs = qvirtio_scsi_start("-drive file=blkdebug::null-co://,"
"if=none,id=dr1,format=raw,file.align=4k "
- "-device scsi-disk,drive=dr1,lun=0,scsi-id=1");
+ "-device scsi-hd,drive=dr1,lun=0,scsi-id=1");
dev = qvirtio_pci_device_find(vs->qs->pcibus, VIRTIO_ID_SCSI);
vs->dev = (QVirtioDevice *)dev;
g_assert(dev != NULL);