aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--fpu/softfloat.c108
-rw-r--r--hw/acpi/cpu.c2
-rw-r--r--hw/core/machine.c31
-rw-r--r--hw/i386/pc.c126
-rw-r--r--hw/net/spapr_llan.c18
-rw-r--r--hw/pci-host/prep.c11
-rw-r--r--hw/ppc/mac_newworld.c15
-rw-r--r--hw/ppc/pnv.c6
-rw-r--r--hw/ppc/ppc405_uc.c6
-rw-r--r--hw/ppc/ppc4xx_pci.c13
-rw-r--r--hw/ppc/spapr.c255
-rw-r--r--hw/ppc/spapr_cpu_core.c137
-rw-r--r--hw/ppc/spapr_ovec.c19
-rw-r--r--hw/ppc/trace-events12
-rw-r--r--include/fpu/softfloat.h5
-rw-r--r--include/hw/boards.h16
-rw-r--r--include/hw/i386/pc.h1
-rw-r--r--include/hw/ppc/spapr.h1
-rw-r--r--include/hw/ppc/spapr_cpu_core.h6
-rw-r--r--linux-user/main.c3
-rw-r--r--monitor.c4
-rw-r--r--target/ppc/cpu-qom.h5
-rw-r--r--target/ppc/cpu.h20
-rw-r--r--target/ppc/fpu_helper.c312
-rw-r--r--target/ppc/helper.h11
-rw-r--r--target/ppc/internal.h3
-rw-r--r--target/ppc/kvm.c32
-rw-r--r--target/ppc/kvm_ppc.h7
-rw-r--r--target/ppc/mmu-hash64.c44
-rw-r--r--target/ppc/mmu_helper.c4
-rw-r--r--target/ppc/translate.c153
-rw-r--r--target/ppc/translate/vsx-impl.inc.c11
-rw-r--r--target/ppc/translate/vsx-ops.inc.c21
-rw-r--r--target/ppc/translate_init.c27
-rw-r--r--vl.c2
35 files changed, 1093 insertions, 354 deletions
diff --git a/fpu/softfloat.c b/fpu/softfloat.c
index c295f3183f..485a006aa7 100644
--- a/fpu/softfloat.c
+++ b/fpu/softfloat.c
@@ -623,6 +623,9 @@ static float64 roundAndPackFloat64(flag zSign, int zExp, uint64_t zSig,
case float_round_down:
roundIncrement = zSign ? 0x3ff : 0;
break;
+ case float_round_to_odd:
+ roundIncrement = (zSig & 0x400) ? 0 : 0x3ff;
+ break;
default:
abort();
}
@@ -632,8 +635,10 @@ static float64 roundAndPackFloat64(flag zSign, int zExp, uint64_t zSig,
|| ( ( zExp == 0x7FD )
&& ( (int64_t) ( zSig + roundIncrement ) < 0 ) )
) {
+ bool overflow_to_inf = roundingMode != float_round_to_odd &&
+ roundIncrement != 0;
float_raise(float_flag_overflow | float_flag_inexact, status);
- return packFloat64( zSign, 0x7FF, - ( roundIncrement == 0 ));
+ return packFloat64(zSign, 0x7FF, -(!overflow_to_inf));
}
if ( zExp < 0 ) {
if (status->flush_to_zero) {
@@ -651,6 +656,13 @@ static float64 roundAndPackFloat64(flag zSign, int zExp, uint64_t zSig,
if (isTiny && roundBits) {
float_raise(float_flag_underflow, status);
}
+ if (roundingMode == float_round_to_odd) {
+ /*
+ * For round-to-odd case, the roundIncrement depends on
+ * zSig which just changed.
+ */
+ roundIncrement = (zSig & 0x400) ? 0 : 0x3ff;
+ }
}
}
if (roundBits) {
@@ -1149,6 +1161,9 @@ static float128 roundAndPackFloat128(flag zSign, int32_t zExp,
case float_round_down:
increment = zSign && zSig2;
break;
+ case float_round_to_odd:
+ increment = !(zSig1 & 0x1) && zSig2;
+ break;
default:
abort();
}
@@ -1168,6 +1183,7 @@ static float128 roundAndPackFloat128(flag zSign, int32_t zExp,
if ( ( roundingMode == float_round_to_zero )
|| ( zSign && ( roundingMode == float_round_up ) )
|| ( ! zSign && ( roundingMode == float_round_down ) )
+ || (roundingMode == float_round_to_odd)
) {
return
packFloat128(
@@ -1215,6 +1231,9 @@ static float128 roundAndPackFloat128(flag zSign, int32_t zExp,
case float_round_down:
increment = zSign && zSig2;
break;
+ case float_round_to_odd:
+ increment = !(zSig1 & 0x1) && zSig2;
+ break;
default:
abort();
}
@@ -6109,6 +6128,93 @@ int64_t float128_to_int64_round_to_zero(float128 a, float_status *status)
}
/*----------------------------------------------------------------------------
+| Returns the result of converting the quadruple-precision floating-point value
+| `a' to the 64-bit unsigned integer format. The conversion is
+| performed according to the IEC/IEEE Standard for Binary Floating-Point
+| Arithmetic---which means in particular that the conversion is rounded
+| according to the current rounding mode. If `a' is a NaN, the largest
+| positive integer is returned. If the conversion overflows, the
+| largest unsigned integer is returned. If 'a' is negative, the value is
+| rounded and zero is returned; negative values that do not round to zero
+| will raise the inexact exception.
+*----------------------------------------------------------------------------*/
+
+uint64_t float128_to_uint64(float128 a, float_status *status)
+{
+ flag aSign;
+ int aExp;
+ int shiftCount;
+ uint64_t aSig0, aSig1;
+
+ aSig0 = extractFloat128Frac0(a);
+ aSig1 = extractFloat128Frac1(a);
+ aExp = extractFloat128Exp(a);
+ aSign = extractFloat128Sign(a);
+ if (aSign && (aExp > 0x3FFE)) {
+ float_raise(float_flag_invalid, status);
+ if (float128_is_any_nan(a)) {
+ return LIT64(0xFFFFFFFFFFFFFFFF);
+ } else {
+ return 0;
+ }
+ }
+ if (aExp) {
+ aSig0 |= LIT64(0x0001000000000000);
+ }
+ shiftCount = 0x402F - aExp;
+ if (shiftCount <= 0) {
+ if (0x403E < aExp) {
+ float_raise(float_flag_invalid, status);
+ return LIT64(0xFFFFFFFFFFFFFFFF);
+ }
+ shortShift128Left(aSig0, aSig1, -shiftCount, &aSig0, &aSig1);
+ } else {
+ shift64ExtraRightJamming(aSig0, aSig1, shiftCount, &aSig0, &aSig1);
+ }
+ return roundAndPackUint64(aSign, aSig0, aSig1, status);
+}
+
+uint64_t float128_to_uint64_round_to_zero(float128 a, float_status *status)
+{
+ uint64_t v;
+ signed char current_rounding_mode = status->float_rounding_mode;
+
+ set_float_rounding_mode(float_round_to_zero, status);
+ v = float128_to_uint64(a, status);
+ set_float_rounding_mode(current_rounding_mode, status);
+
+ return v;
+}
+
+/*----------------------------------------------------------------------------
+| Returns the result of converting the quadruple-precision floating-point
+| value `a' to the 32-bit unsigned integer format. The conversion
+| is performed according to the IEC/IEEE Standard for Binary Floating-Point
+| Arithmetic except that the conversion is always rounded toward zero.
+| If `a' is a NaN, the largest positive integer is returned. Otherwise,
+| if the conversion overflows, the largest unsigned integer is returned.
+| If 'a' is negative, the value is rounded and zero is returned; negative
+| values that do not round to zero will raise the inexact exception.
+*----------------------------------------------------------------------------*/
+
+uint32_t float128_to_uint32_round_to_zero(float128 a, float_status *status)
+{
+ uint64_t v;
+ uint32_t res;
+ int old_exc_flags = get_float_exception_flags(status);
+
+ v = float128_to_uint64_round_to_zero(a, status);
+ if (v > 0xffffffff) {
+ res = 0xffffffff;
+ } else {
+ return v;
+ }
+ set_float_exception_flags(old_exc_flags, status);
+ float_raise(float_flag_invalid, status);
+ return res;
+}
+
+/*----------------------------------------------------------------------------
| Returns the result of converting the quadruple-precision floating-point
| value `a' to the single-precision floating-point format. The conversion
| is performed according to the IEC/IEEE Standard for Binary Floating-Point
diff --git a/hw/acpi/cpu.c b/hw/acpi/cpu.c
index 6017ca04bf..8c719d3f9d 100644
--- a/hw/acpi/cpu.c
+++ b/hw/acpi/cpu.c
@@ -198,7 +198,7 @@ void cpu_hotplug_hw_init(MemoryRegion *as, Object *owner,
state->dev_count = id_list->len;
state->devs = g_new0(typeof(*state->devs), state->dev_count);
for (i = 0; i < id_list->len; i++) {
- state->devs[i].cpu = id_list->cpus[i].cpu;
+ state->devs[i].cpu = CPU(id_list->cpus[i].cpu);
state->devs[i].arch_id = id_list->cpus[i].arch_id;
}
memory_region_init_io(&state->ctrl_reg, owner, &cpu_hotplug_ops, state,
diff --git a/hw/core/machine.c b/hw/core/machine.c
index b0fd91f6cd..0699750336 100644
--- a/hw/core/machine.c
+++ b/hw/core/machine.c
@@ -357,6 +357,37 @@ static void machine_init_notify(Notifier *notifier, void *data)
foreach_dynamic_sysbus_device(error_on_sysbus_device, NULL);
}
+HotpluggableCPUList *machine_query_hotpluggable_cpus(MachineState *machine)
+{
+ int i;
+ Object *cpu;
+ HotpluggableCPUList *head = NULL;
+ const char *cpu_type;
+
+ cpu = machine->possible_cpus->cpus[0].cpu;
+ assert(cpu); /* Boot cpu is always present */
+ cpu_type = object_get_typename(cpu);
+ for (i = 0; i < machine->possible_cpus->len; i++) {
+ HotpluggableCPUList *list_item = g_new0(typeof(*list_item), 1);
+ HotpluggableCPU *cpu_item = g_new0(typeof(*cpu_item), 1);
+
+ cpu_item->type = g_strdup(cpu_type);
+ cpu_item->vcpus_count = machine->possible_cpus->cpus[i].vcpus_count;
+ cpu_item->props = g_memdup(&machine->possible_cpus->cpus[i].props,
+ sizeof(*cpu_item->props));
+
+ cpu = machine->possible_cpus->cpus[i].cpu;
+ if (cpu) {
+ cpu_item->has_qom_path = true;
+ cpu_item->qom_path = object_get_canonical_path(cpu);
+ }
+ list_item->value = cpu_item;
+ list_item->next = head;
+ head = list_item;
+ }
+ return head;
+}
+
static void machine_class_init(ObjectClass *oc, void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
diff --git a/hw/i386/pc.c b/hw/i386/pc.c
index 60b0946be3..d24388e05f 100644
--- a/hw/i386/pc.c
+++ b/hw/i386/pc.c
@@ -707,7 +707,8 @@ static void pc_build_smbios(PCMachineState *pcms)
size_t smbios_tables_len, smbios_anchor_len;
struct smbios_phys_mem_area *mem_array;
unsigned i, array_count;
- X86CPU *cpu = X86_CPU(pcms->possible_cpus->cpus[0].cpu);
+ MachineState *ms = MACHINE(pcms);
+ X86CPU *cpu = X86_CPU(ms->possible_cpus->cpus[0].cpu);
/* tell smbios about cpuid version and features */
smbios_set_cpuid(cpu->env.cpuid_version, cpu->env.features[FEAT_1_EDX]);
@@ -1111,7 +1112,7 @@ static void pc_new_cpu(const char *typename, int64_t apic_id, Error **errp)
void pc_hot_add_cpu(const int64_t id, Error **errp)
{
ObjectClass *oc;
- PCMachineState *pcms = PC_MACHINE(qdev_get_machine());
+ MachineState *ms = MACHINE(qdev_get_machine());
int64_t apic_id = x86_cpu_apic_id_from_index(id);
Error *local_err = NULL;
@@ -1127,8 +1128,8 @@ void pc_hot_add_cpu(const int64_t id, Error **errp)
return;
}
- assert(pcms->possible_cpus->cpus[0].cpu); /* BSP is always present */
- oc = OBJECT_CLASS(CPU_GET_CLASS(pcms->possible_cpus->cpus[0].cpu));
+ assert(ms->possible_cpus->cpus[0].cpu); /* BSP is always present */
+ oc = OBJECT_CLASS(CPU_GET_CLASS(ms->possible_cpus->cpus[0].cpu));
pc_new_cpu(object_class_get_name(oc), apic_id, &local_err);
if (local_err) {
error_propagate(errp, local_err);
@@ -1143,7 +1144,9 @@ void pc_cpus_init(PCMachineState *pcms)
ObjectClass *oc;
const char *typename;
gchar **model_pieces;
+ const CPUArchIdList *possible_cpus;
MachineState *machine = MACHINE(pcms);
+ MachineClass *mc = MACHINE_GET_CLASS(pcms);
/* init CPUs */
if (machine->cpu_model == NULL) {
@@ -1178,20 +1181,16 @@ void pc_cpus_init(PCMachineState *pcms)
* This is used for FW_CFG_MAX_CPUS. See comments on bochs_bios_init().
*/
pcms->apic_id_limit = x86_cpu_apic_id_from_index(max_cpus - 1) + 1;
- pcms->possible_cpus = g_malloc0(sizeof(CPUArchIdList) +
- sizeof(CPUArchId) * max_cpus);
- for (i = 0; i < max_cpus; i++) {
- pcms->possible_cpus->cpus[i].arch_id = x86_cpu_apic_id_from_index(i);
- pcms->possible_cpus->len++;
- if (i < smp_cpus) {
- pc_new_cpu(typename, x86_cpu_apic_id_from_index(i), &error_fatal);
- }
+ possible_cpus = mc->possible_cpu_arch_ids(machine);
+ for (i = 0; i < smp_cpus; i++) {
+ pc_new_cpu(typename, possible_cpus->cpus[i].arch_id, &error_fatal);
}
}
static void pc_build_feature_control_file(PCMachineState *pcms)
{
- X86CPU *cpu = X86_CPU(pcms->possible_cpus->cpus[0].cpu);
+ MachineState *ms = MACHINE(pcms);
+ X86CPU *cpu = X86_CPU(ms->possible_cpus->cpus[0].cpu);
CPUX86State *env = &cpu->env;
uint32_t unused, ecx, edx;
uint64_t feature_control_bits = 0;
@@ -1787,21 +1786,19 @@ static int pc_apic_cmp(const void *a, const void *b)
}
/* returns pointer to CPUArchId descriptor that matches CPU's apic_id
- * in pcms->possible_cpus->cpus, if pcms->possible_cpus->cpus has no
+ * in ms->possible_cpus->cpus, if ms->possible_cpus->cpus has no
* entry corresponding to CPU's apic_id returns NULL.
*/
-static CPUArchId *pc_find_cpu_slot(PCMachineState *pcms, CPUState *cpu,
- int *idx)
+static CPUArchId *pc_find_cpu_slot(MachineState *ms, uint32_t id, int *idx)
{
- CPUClass *cc = CPU_GET_CLASS(cpu);
CPUArchId apic_id, *found_cpu;
- apic_id.arch_id = cc->get_arch_id(CPU(cpu));
- found_cpu = bsearch(&apic_id, pcms->possible_cpus->cpus,
- pcms->possible_cpus->len, sizeof(*pcms->possible_cpus->cpus),
+ apic_id.arch_id = id;
+ found_cpu = bsearch(&apic_id, ms->possible_cpus->cpus,
+ ms->possible_cpus->len, sizeof(*ms->possible_cpus->cpus),
pc_apic_cmp);
if (found_cpu && idx) {
- *idx = found_cpu - pcms->possible_cpus->cpus;
+ *idx = found_cpu - ms->possible_cpus->cpus;
}
return found_cpu;
}
@@ -1812,6 +1809,7 @@ static void pc_cpu_plug(HotplugHandler *hotplug_dev,
CPUArchId *found_cpu;
HotplugHandlerClass *hhc;
Error *local_err = NULL;
+ X86CPU *cpu = X86_CPU(dev);
PCMachineState *pcms = PC_MACHINE(hotplug_dev);
if (pcms->acpi_dev) {
@@ -1831,8 +1829,8 @@ static void pc_cpu_plug(HotplugHandler *hotplug_dev,
fw_cfg_modify_i16(pcms->fw_cfg, FW_CFG_NB_CPUS, pcms->boot_cpus);
}
- found_cpu = pc_find_cpu_slot(pcms, CPU(dev), NULL);
- found_cpu->cpu = CPU(dev);
+ found_cpu = pc_find_cpu_slot(MACHINE(pcms), cpu->apic_id, NULL);
+ found_cpu->cpu = OBJECT(dev);
out:
error_propagate(errp, local_err);
}
@@ -1842,9 +1840,10 @@ static void pc_cpu_unplug_request_cb(HotplugHandler *hotplug_dev,
int idx = -1;
HotplugHandlerClass *hhc;
Error *local_err = NULL;
+ X86CPU *cpu = X86_CPU(dev);
PCMachineState *pcms = PC_MACHINE(hotplug_dev);
- pc_find_cpu_slot(pcms, CPU(dev), &idx);
+ pc_find_cpu_slot(MACHINE(pcms), cpu->apic_id, &idx);
assert(idx != -1);
if (idx == 0) {
error_setg(&local_err, "Boot CPU is unpluggable");
@@ -1869,6 +1868,7 @@ static void pc_cpu_unplug_cb(HotplugHandler *hotplug_dev,
CPUArchId *found_cpu;
HotplugHandlerClass *hhc;
Error *local_err = NULL;
+ X86CPU *cpu = X86_CPU(dev);
PCMachineState *pcms = PC_MACHINE(hotplug_dev);
hhc = HOTPLUG_HANDLER_GET_CLASS(pcms->acpi_dev);
@@ -1878,7 +1878,7 @@ static void pc_cpu_unplug_cb(HotplugHandler *hotplug_dev,
goto out;
}
- found_cpu = pc_find_cpu_slot(pcms, CPU(dev), NULL);
+ found_cpu = pc_find_cpu_slot(MACHINE(pcms), cpu->apic_id, NULL);
found_cpu->cpu = NULL;
object_unparent(OBJECT(dev));
@@ -1936,13 +1936,15 @@ static void pc_cpu_pre_plug(HotplugHandler *hotplug_dev,
cpu->apic_id = apicid_from_topo_ids(smp_cores, smp_threads, &topo);
}
- cpu_slot = pc_find_cpu_slot(pcms, CPU(dev), &idx);
+ cpu_slot = pc_find_cpu_slot(MACHINE(pcms), cpu->apic_id, &idx);
if (!cpu_slot) {
+ MachineState *ms = MACHINE(pcms);
+
x86_topo_ids_from_apicid(cpu->apic_id, smp_cores, smp_threads, &topo);
error_setg(errp, "Invalid CPU [socket: %u, core: %u, thread: %u] with"
" APIC ID %" PRIu32 ", valid index range 0:%d",
topo.pkg_id, topo.core_id, topo.smt_id, cpu->apic_id,
- pcms->possible_cpus->len - 1);
+ ms->possible_cpus->len - 1);
return;
}
@@ -1953,7 +1955,7 @@ static void pc_cpu_pre_plug(HotplugHandler *hotplug_dev,
}
/* if 'address' properties socket-id/core-id/thread-id are not set, set them
- * so that query_hotpluggable_cpus would show correct values
+ * so that machine_query_hotpluggable_cpus would show correct values
*/
/* TODO: move socket_id/core_id/thread_id checks into x86_cpu_realizefn()
* once -smp refactoring is complete and there will be CPU private
@@ -2251,55 +2253,37 @@ static unsigned pc_cpu_index_to_socket_id(unsigned cpu_index)
return topo.pkg_id;
}
-static const CPUArchIdList *pc_possible_cpu_arch_ids(MachineState *machine)
-{
- PCMachineState *pcms = PC_MACHINE(machine);
- assert(pcms->possible_cpus);
- return pcms->possible_cpus;
-}
-
-static HotpluggableCPUList *pc_query_hotpluggable_cpus(MachineState *machine)
+static const CPUArchIdList *pc_possible_cpu_arch_ids(MachineState *ms)
{
int i;
- CPUState *cpu;
- HotpluggableCPUList *head = NULL;
- PCMachineState *pcms = PC_MACHINE(machine);
- const char *cpu_type;
- cpu = pcms->possible_cpus->cpus[0].cpu;
- assert(cpu); /* BSP is always present */
- cpu_type = object_class_get_name(OBJECT_CLASS(CPU_GET_CLASS(cpu)));
+ if (ms->possible_cpus) {
+ /*
+ * make sure that max_cpus hasn't changed since the first use, i.e.
+ * -smp hasn't been parsed after it
+ */
+ assert(ms->possible_cpus->len == max_cpus);
+ return ms->possible_cpus;
+ }
- for (i = 0; i < pcms->possible_cpus->len; i++) {
+ ms->possible_cpus = g_malloc0(sizeof(CPUArchIdList) +
+ sizeof(CPUArchId) * max_cpus);
+ ms->possible_cpus->len = max_cpus;
+ for (i = 0; i < ms->possible_cpus->len; i++) {
X86CPUTopoInfo topo;
- HotpluggableCPUList *list_item = g_new0(typeof(*list_item), 1);
- HotpluggableCPU *cpu_item = g_new0(typeof(*cpu_item), 1);
- CpuInstanceProperties *cpu_props = g_new0(typeof(*cpu_props), 1);
- const uint32_t apic_id = pcms->possible_cpus->cpus[i].arch_id;
-
- x86_topo_ids_from_apicid(apic_id, smp_cores, smp_threads, &topo);
-
- cpu_item->type = g_strdup(cpu_type);
- cpu_item->vcpus_count = 1;
- cpu_props->has_socket_id = true;
- cpu_props->socket_id = topo.pkg_id;
- cpu_props->has_core_id = true;
- cpu_props->core_id = topo.core_id;
- cpu_props->has_thread_id = true;
- cpu_props->thread_id = topo.smt_id;
- cpu_item->props = cpu_props;
-
- cpu = pcms->possible_cpus->cpus[i].cpu;
- if (cpu) {
- cpu_item->has_qom_path = true;
- cpu_item->qom_path = object_get_canonical_path(OBJECT(cpu));
- }
- list_item->value = cpu_item;
- list_item->next = head;
- head = list_item;
+ ms->possible_cpus->cpus[i].vcpus_count = 1;
+ ms->possible_cpus->cpus[i].arch_id = x86_cpu_apic_id_from_index(i);
+ x86_topo_ids_from_apicid(ms->possible_cpus->cpus[i].arch_id,
+ smp_cores, smp_threads, &topo);
+ ms->possible_cpus->cpus[i].props.has_socket_id = true;
+ ms->possible_cpus->cpus[i].props.socket_id = topo.pkg_id;
+ ms->possible_cpus->cpus[i].props.has_core_id = true;
+ ms->possible_cpus->cpus[i].props.core_id = topo.core_id;
+ ms->possible_cpus->cpus[i].props.has_thread_id = true;
+ ms->possible_cpus->cpus[i].props.thread_id = topo.smt_id;
}
- return head;
+ return ms->possible_cpus;
}
static void x86_nmi(NMIState *n, int cpu_index, Error **errp)
@@ -2342,7 +2326,7 @@ static void pc_machine_class_init(ObjectClass *oc, void *data)
mc->get_hotplug_handler = pc_get_hotpug_handler;
mc->cpu_index_to_socket_id = pc_cpu_index_to_socket_id;
mc->possible_cpu_arch_ids = pc_possible_cpu_arch_ids;
- mc->query_hotpluggable_cpus = pc_query_hotpluggable_cpus;
+ mc->has_hotpluggable_cpus = true;
mc->default_boot_order = "cad";
mc->hot_add_cpu = pc_hot_add_cpu;
mc->block_default_type = IF_IDE;
diff --git a/hw/net/spapr_llan.c b/hw/net/spapr_llan.c
index 058908d8d7..d239e4bd7d 100644
--- a/hw/net/spapr_llan.c
+++ b/hw/net/spapr_llan.c
@@ -385,18 +385,24 @@ static int spapr_vlan_devnode(VIOsPAPRDevice *dev, void *fdt, int node_off)
int ret;
/* Some old phyp versions give the mac address in an 8-byte
- * property. The kernel driver has an insane workaround for this;
+ * property. The kernel driver (before 3.10) has an insane workaround;
* rather than doing the obvious thing and checking the property
* length, it checks whether the first byte has 0b10 in the low
* bits. If a correct 6-byte property has a different first byte
* the kernel will get the wrong mac address, overrunning its
* buffer in the process (read only, thank goodness).
*
- * Here we workaround the kernel workaround by always supplying an
- * 8-byte property, with the mac address in the last six bytes */
- memcpy(&padded_mac[2], &vdev->nicconf.macaddr, ETH_ALEN);
- ret = fdt_setprop(fdt, node_off, "local-mac-address",
- padded_mac, sizeof(padded_mac));
+ * Here we return a 6-byte address unless that would break a pre-3.10
+ * driver. In that case we return a padded 8-byte address to allow the old
+ * workaround to succeed. */
+ if ((vdev->nicconf.macaddr.a[0] & 0x3) == 0x2) {
+ ret = fdt_setprop(fdt, node_off, "local-mac-address",
+ &vdev->nicconf.macaddr, ETH_ALEN);
+ } else {
+ memcpy(&padded_mac[2], &vdev->nicconf.macaddr, ETH_ALEN);
+ ret = fdt_setprop(fdt, node_off, "local-mac-address",
+ padded_mac, sizeof(padded_mac));
+ }
if (ret < 0) {
return ret;
}
diff --git a/hw/pci-host/prep.c b/hw/pci-host/prep.c
index 5580293f93..260a119a9e 100644
--- a/hw/pci-host/prep.c
+++ b/hw/pci-host/prep.c
@@ -309,7 +309,6 @@ static void raven_realize(PCIDevice *d, Error **errp)
memory_region_set_readonly(&s->bios, true);
memory_region_add_subregion(get_system_memory(), (uint32_t)(-BIOS_SIZE),
&s->bios);
- vmstate_register_ram_global(&s->bios);
if (s->bios_name) {
filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, s->bios_name);
if (filename) {
@@ -328,12 +327,15 @@ static void raven_realize(PCIDevice *d, Error **errp)
}
}
}
+ g_free(filename);
if (bios_size < 0 || bios_size > BIOS_SIZE) {
- /* FIXME should error_setg() */
- hw_error("qemu: could not load bios image '%s'\n", s->bios_name);
+ memory_region_del_subregion(get_system_memory(), &s->bios);
+ error_setg(errp, "Could not load bios image '%s'", s->bios_name);
+ return;
}
- g_free(filename);
}
+
+ vmstate_register_ram_global(&s->bios);
}
static const VMStateDescription vmstate_raven = {
@@ -361,7 +363,6 @@ static void raven_class_init(ObjectClass *klass, void *data)
/*
* Reason: PCI-facing part of the host bridge, not usable without
* the host-facing part, which can't be device_add'ed, yet.
- * Reason: realize() method uses hw_error().
*/
dc->cannot_instantiate_with_device_add_yet = true;
}
diff --git a/hw/ppc/mac_newworld.c b/hw/ppc/mac_newworld.c
index 716aea6852..68aaedc06d 100644
--- a/hw/ppc/mac_newworld.c
+++ b/hw/ppc/mac_newworld.c
@@ -72,6 +72,7 @@
#include "exec/address-spaces.h"
#include "hw/sysbus.h"
#include "qemu/cutils.h"
+#include "trace.h"
#define MAX_IDE_BUS 2
#define CFG_ADDR 0xf0000510
@@ -79,21 +80,11 @@
#define CLOCKFREQ (266UL * 1000UL * 1000UL)
#define BUSFREQ (100UL * 1000UL * 1000UL)
-/* debug UniNorth */
-//#define DEBUG_UNIN
-
-#ifdef DEBUG_UNIN
-#define UNIN_DPRINTF(fmt, ...) \
- do { printf("UNIN: " fmt , ## __VA_ARGS__); } while (0)
-#else
-#define UNIN_DPRINTF(fmt, ...)
-#endif
-
/* UniN device */
static void unin_write(void *opaque, hwaddr addr, uint64_t value,
unsigned size)
{
- UNIN_DPRINTF("write addr " TARGET_FMT_plx " val %"PRIx64"\n", addr, value);
+ trace_mac99_uninorth_write(addr, value);
if (addr == 0x0) {
*(int*)opaque = value;
}
@@ -109,7 +100,7 @@ static uint64_t unin_read(void *opaque, hwaddr addr, unsigned size)
value = *(int*)opaque;
}
- UNIN_DPRINTF("readl addr " TARGET_FMT_plx " val %x\n", addr, value);
+ trace_mac99_uninorth_read(addr, value);
return value;
}
diff --git a/hw/ppc/pnv.c b/hw/ppc/pnv.c
index 4fab5c0ae7..09f0d22def 100644
--- a/hw/ppc/pnv.c
+++ b/hw/ppc/pnv.c
@@ -381,7 +381,7 @@ static void ppc_powernv_init(MachineState *machine)
fw_size = load_image_targphys(fw_filename, FW_LOAD_ADDR, FW_MAX_SIZE);
if (fw_size < 0) {
- error_report("qemu: could not load OPAL '%s'", fw_filename);
+ error_report("Could not load OPAL '%s'", fw_filename);
exit(1);
}
g_free(fw_filename);
@@ -393,7 +393,7 @@ static void ppc_powernv_init(MachineState *machine)
kernel_size = load_image_targphys(machine->kernel_filename,
KERNEL_LOAD_ADDR, 0x2000000);
if (kernel_size < 0) {
- error_report("qemu: could not load kernel'%s'",
+ error_report("Could not load kernel '%s'",
machine->kernel_filename);
exit(1);
}
@@ -405,7 +405,7 @@ static void ppc_powernv_init(MachineState *machine)
pnv->initrd_size = load_image_targphys(machine->initrd_filename,
pnv->initrd_base, 0x10000000); /* 128MB max */
if (pnv->initrd_size < 0) {
- error_report("qemu: could not load initial ram disk '%s'",
+ error_report("Could not load initial ram disk '%s'",
machine->initrd_filename);
exit(1);
}
diff --git a/hw/ppc/ppc405_uc.c b/hw/ppc/ppc405_uc.c
index d6d3fc2c4a..d5df94aa6e 100644
--- a/hw/ppc/ppc405_uc.c
+++ b/hw/ppc/ppc405_uc.c
@@ -1881,7 +1881,7 @@ static void ppc405cr_clk_setup (ppc405cr_cpc_t *cpc)
D1 = (((cpc->pllmr >> 20) - 1) & 0xF) + 1; /* FBDV */
D2 = 8 - ((cpc->pllmr >> 16) & 0x7); /* FWDVA */
M = D0 * D1 * D2;
- VCO_out = cpc->sysclk * M;
+ VCO_out = (uint64_t)cpc->sysclk * M;
if (VCO_out < 400000000 || VCO_out > 800000000) {
/* PLL cannot lock */
cpc->pllmr &= ~0x80000000;
@@ -1892,7 +1892,7 @@ static void ppc405cr_clk_setup (ppc405cr_cpc_t *cpc)
/* Bypass PLL */
bypass_pll:
M = D0;
- PLL_out = cpc->sysclk * M;
+ PLL_out = (uint64_t)cpc->sysclk * M;
}
CPU_clk = PLL_out;
if (cpc->cr1 & 0x00800000)
@@ -2242,7 +2242,7 @@ static void ppc405ep_compute_clocks (ppc405ep_cpc_t *cpc)
#ifdef DEBUG_CLOCKS_LL
printf("FWDA %01" PRIx32 " %d\n", (cpc->pllmr[1] >> 16) & 0x7, D);
#endif
- VCO_out = cpc->sysclk * M * D;
+ VCO_out = (uint64_t)cpc->sysclk * M * D;
if (VCO_out < 500000000UL || VCO_out > 1000000000UL) {
/* Error - unlock the PLL */
printf("VCO out of range %" PRIu64 "\n", VCO_out);
diff --git a/hw/ppc/ppc4xx_pci.c b/hw/ppc/ppc4xx_pci.c
index 683218e5c5..dc19682970 100644
--- a/hw/ppc/ppc4xx_pci.c
+++ b/hw/ppc/ppc4xx_pci.c
@@ -26,13 +26,7 @@
#include "hw/pci/pci.h"
#include "hw/pci/pci_host.h"
#include "exec/address-spaces.h"
-
-#undef DEBUG
-#ifdef DEBUG
-#define DPRINTF(fmt, ...) do { printf(fmt, ## __VA_ARGS__); } while (0)
-#else
-#define DPRINTF(fmt, ...)
-#endif /* DEBUG */
+#include "trace.h"
struct PCIMasterMap {
uint32_t la;
@@ -249,8 +243,7 @@ static int ppc4xx_pci_map_irq(PCIDevice *pci_dev, int irq_num)
{
int slot = pci_dev->devfn >> 3;
- DPRINTF("%s: devfn %x irq %d -> %d\n", __func__,
- pci_dev->devfn, irq_num, slot);
+ trace_ppc4xx_pci_map_irq(pci_dev->devfn, irq_num, slot);
return slot - 1;
}
@@ -259,7 +252,7 @@ static void ppc4xx_pci_set_irq(void *opaque, int irq_num, int level)
{
qemu_irq *pci_irqs = opaque;
- DPRINTF("%s: PCI irq %d\n", __func__, irq_num);
+ trace_ppc4xx_pci_set_irq(irq_num);
if (irq_num < 0) {
fprintf(stderr, "%s: PCI irq %d\n", __func__, irq_num);
return;
diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
index e465d7ac98..5904e6498f 100644
--- a/hw/ppc/spapr.c
+++ b/hw/ppc/spapr.c
@@ -958,7 +958,7 @@ static void *spapr_build_fdt(sPAPRMachineState *spapr,
_FDT(spapr_drc_populate_dt(fdt, 0, NULL, SPAPR_DR_CONNECTOR_TYPE_LMB));
}
- if (mc->query_hotpluggable_cpus) {
+ if (mc->has_hotpluggable_cpus) {
int offset = fdt_path_offset(fdt, "/cpus");
ret = spapr_drc_populate_dt(fdt, offset, NULL,
SPAPR_DR_CONNECTOR_TYPE_CPU);
@@ -1751,13 +1751,28 @@ static void spapr_validate_node_memory(MachineState *machine, Error **errp)
}
}
+/* find cpu slot in machine->possible_cpus by core_id */
+static CPUArchId *spapr_find_cpu_slot(MachineState *ms, uint32_t id, int *idx)
+{
+ int index = id / smp_threads;
+
+ if (index >= ms->possible_cpus->len) {
+ return NULL;
+ }
+ if (idx) {
+ *idx = index;
+ }
+ return &ms->possible_cpus->cpus[index];
+}
+
static void spapr_init_cpus(sPAPRMachineState *spapr)
{
MachineState *machine = MACHINE(spapr);
MachineClass *mc = MACHINE_GET_CLASS(machine);
char *type = spapr_get_cpu_core_type(machine->cpu_model);
int smt = kvmppc_smt_threads();
- int spapr_max_cores, spapr_cores;
+ const CPUArchIdList *possible_cpus;
+ int boot_cores_nr = smp_cpus / smp_threads;
int i;
if (!type) {
@@ -1765,7 +1780,8 @@ static void spapr_init_cpus(sPAPRMachineState *spapr)
exit(1);
}
- if (mc->query_hotpluggable_cpus) {
+ possible_cpus = mc->possible_cpu_arch_ids(machine);
+ if (mc->has_hotpluggable_cpus) {
if (smp_cpus % smp_threads) {
error_report("smp_cpus (%u) must be multiple of threads (%u)",
smp_cpus, smp_threads);
@@ -1776,24 +1792,18 @@ static void spapr_init_cpus(sPAPRMachineState *spapr)
max_cpus, smp_threads);
exit(1);
}
-
- spapr_max_cores = max_cpus / smp_threads;
- spapr_cores = smp_cpus / smp_threads;
} else {
if (max_cpus != smp_cpus) {
error_report("This machine version does not support CPU hotplug");
exit(1);
}
-
- spapr_max_cores = QEMU_ALIGN_UP(smp_cpus, smp_threads) / smp_threads;
- spapr_cores = spapr_max_cores;
+ boot_cores_nr = possible_cpus->len;
}
- spapr->cores = g_new0(Object *, spapr_max_cores);
- for (i = 0; i < spapr_max_cores; i++) {
+ for (i = 0; i < possible_cpus->len; i++) {
int core_id = i * smp_threads;
- if (mc->query_hotpluggable_cpus) {
+ if (mc->has_hotpluggable_cpus) {
sPAPRDRConnector *drc =
spapr_dr_connector_new(OBJECT(spapr),
SPAPR_DR_CONNECTOR_TYPE_CPU,
@@ -1802,7 +1812,7 @@ static void spapr_init_cpus(sPAPRMachineState *spapr)
qemu_register_reset(spapr_drc_reset, drc);
}
- if (i < spapr_cores) {
+ if (i < boot_cores_nr) {
Object *core = object_new(type);
int nr_threads = smp_threads;
@@ -2357,6 +2367,7 @@ static void spapr_memory_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
uint64_t align = memory_region_get_alignment(mr);
uint64_t size = memory_region_size(mr);
uint64_t addr;
+ char *mem_dev;
if (size % SPAPR_MEMORY_BLOCK_SIZE) {
error_setg(&local_err, "Hotplugged memory size must be a multiple of "
@@ -2364,6 +2375,13 @@ static void spapr_memory_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
goto out;
}
+ mem_dev = object_property_get_str(OBJECT(dimm), PC_DIMM_MEMDEV_PROP, NULL);
+ if (mem_dev && !kvmppc_is_mem_backend_page_size_ok(mem_dev)) {
+ error_setg(&local_err, "Memory backend has bad page size. "
+ "Use 'memory-backend-file' with correct mem-path.");
+ goto out;
+ }
+
pc_dimm_memory_plug(dev, &ms->hotplug_memory, mr, align, &local_err);
if (local_err) {
goto out;
@@ -2488,6 +2506,165 @@ void *spapr_populate_hotplug_cpu_dt(CPUState *cs, int *fdt_offset,
return fdt;
}
+static void spapr_core_unplug(HotplugHandler *hotplug_dev, DeviceState *dev,
+ Error **errp)
+{
+ MachineState *ms = MACHINE(qdev_get_machine());
+ CPUCore *cc = CPU_CORE(dev);
+ CPUArchId *core_slot = spapr_find_cpu_slot(ms, cc->core_id, NULL);
+
+ core_slot->cpu = NULL;
+ object_unparent(OBJECT(dev));
+}
+
+static void spapr_core_release(DeviceState *dev, void *opaque)
+{
+ HotplugHandler *hotplug_ctrl;
+
+ hotplug_ctrl = qdev_get_hotplug_handler(dev);
+ hotplug_handler_unplug(hotplug_ctrl, dev, &error_abort);
+}
+
+static
+void spapr_core_unplug_request(HotplugHandler *hotplug_dev, DeviceState *dev,
+ Error **errp)
+{
+ int index;
+ sPAPRDRConnector *drc;
+ sPAPRDRConnectorClass *drck;
+ Error *local_err = NULL;
+ CPUCore *cc = CPU_CORE(dev);
+ int smt = kvmppc_smt_threads();
+
+ if (!spapr_find_cpu_slot(MACHINE(hotplug_dev), cc->core_id, &index)) {
+ error_setg(errp, "Unable to find CPU core with core-id: %d",
+ cc->core_id);
+ return;
+ }
+ if (index == 0) {
+ error_setg(errp, "Boot CPU core may not be unplugged");
+ return;
+ }
+
+ drc = spapr_dr_connector_by_id(SPAPR_DR_CONNECTOR_TYPE_CPU, index * smt);
+ g_assert(drc);
+
+ drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
+ drck->detach(drc, dev, spapr_core_release, NULL, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
+ spapr_hotplug_req_remove_by_index(drc);
+}
+
+static void spapr_core_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
+ Error **errp)
+{
+ sPAPRMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev));
+ MachineClass *mc = MACHINE_GET_CLASS(spapr);
+ sPAPRCPUCore *core = SPAPR_CPU_CORE(OBJECT(dev));
+ CPUCore *cc = CPU_CORE(dev);
+ CPUState *cs = CPU(core->threads);
+ sPAPRDRConnector *drc;
+ Error *local_err = NULL;
+ void *fdt = NULL;
+ int fdt_offset = 0;
+ int smt = kvmppc_smt_threads();
+ CPUArchId *core_slot;
+ int index;
+
+ core_slot = spapr_find_cpu_slot(MACHINE(hotplug_dev), cc->core_id, &index);
+ if (!core_slot) {
+ error_setg(errp, "Unable to find CPU core with core-id: %d",
+ cc->core_id);
+ return;
+ }
+ drc = spapr_dr_connector_by_id(SPAPR_DR_CONNECTOR_TYPE_CPU, index * smt);
+
+ g_assert(drc || !mc->has_hotpluggable_cpus);
+
+ /*
+ * Setup CPU DT entries only for hotplugged CPUs. For boot time or
+ * coldplugged CPUs DT entries are setup in spapr_build_fdt().
+ */
+ if (dev->hotplugged) {
+ fdt = spapr_populate_hotplug_cpu_dt(cs, &fdt_offset, spapr);
+ }
+
+ if (drc) {
+ sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
+ drck->attach(drc, dev, fdt, fdt_offset, !dev->hotplugged, &local_err);
+ if (local_err) {
+ g_free(fdt);
+ error_propagate(errp, local_err);
+ return;
+ }
+ }
+
+ if (dev->hotplugged) {
+ /*
+ * Send hotplug notification interrupt to the guest only in case
+ * of hotplugged CPUs.
+ */
+ spapr_hotplug_req_add_by_index(drc);
+ } else {
+ /*
+ * Set the right DRC states for cold plugged CPU.
+ */
+ if (drc) {
+ sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
+ drck->set_allocation_state(drc, SPAPR_DR_ALLOCATION_STATE_USABLE);
+ drck->set_isolation_state(drc, SPAPR_DR_ISOLATION_STATE_UNISOLATED);
+ }
+ }
+ core_slot->cpu = OBJECT(dev);
+}
+
+static void spapr_core_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
+ Error **errp)
+{
+ MachineState *machine = MACHINE(OBJECT(hotplug_dev));
+ MachineClass *mc = MACHINE_GET_CLASS(hotplug_dev);
+ Error *local_err = NULL;
+ CPUCore *cc = CPU_CORE(dev);
+ char *base_core_type = spapr_get_cpu_core_type(machine->cpu_model);
+ const char *type = object_get_typename(OBJECT(dev));
+ CPUArchId *core_slot;
+ int index;
+
+ if (dev->hotplugged && !mc->has_hotpluggable_cpus) {
+ error_setg(&local_err, "CPU hotplug not supported for this machine");
+ goto out;
+ }
+
+ if (strcmp(base_core_type, type)) {
+ error_setg(&local_err, "CPU core type should be %s", base_core_type);
+ goto out;
+ }
+
+ if (cc->core_id % smp_threads) {
+ error_setg(&local_err, "invalid core id %d", cc->core_id);
+ goto out;
+ }
+
+ core_slot = spapr_find_cpu_slot(MACHINE(hotplug_dev), cc->core_id, &index);
+ if (!core_slot) {
+ error_setg(&local_err, "core id %d out of range", cc->core_id);
+ goto out;
+ }
+
+ if (core_slot->cpu) {
+ error_setg(&local_err, "core %d already populated", cc->core_id);
+ goto out;
+ }
+
+out:
+ g_free(base_core_type);
+ error_propagate(errp, local_err);
+}
+
static void spapr_machine_device_plug(HotplugHandler *hotplug_dev,
DeviceState *dev, Error **errp)
{
@@ -2550,7 +2727,7 @@ static void spapr_machine_device_unplug(HotplugHandler *hotplug_dev,
error_setg(errp, "Memory hot unplug not supported for this guest");
}
} else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE)) {
- if (!mc->query_hotpluggable_cpus) {
+ if (!mc->has_hotpluggable_cpus) {
error_setg(errp, "CPU hot unplug not supported on this machine");
return;
}
@@ -2577,11 +2754,11 @@ static void spapr_machine_device_unplug_request(HotplugHandler *hotplug_dev,
error_setg(errp, "Memory hot unplug not supported for this guest");
}
} else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE)) {
- if (!mc->query_hotpluggable_cpus) {
+ if (!mc->has_hotpluggable_cpus) {
error_setg(errp, "CPU hot unplug not supported on this machine");
return;
}
- spapr_core_unplug(hotplug_dev, dev, errp);
+ spapr_core_unplug_request(hotplug_dev, dev, errp);
}
}
@@ -2610,35 +2787,34 @@ static unsigned spapr_cpu_index_to_socket_id(unsigned cpu_index)
return cpu_index / smp_threads / smp_cores;
}
-static HotpluggableCPUList *spapr_query_hotpluggable_cpus(MachineState *machine)
+static const CPUArchIdList *spapr_possible_cpu_arch_ids(MachineState *machine)
{
int i;
- HotpluggableCPUList *head = NULL;
- sPAPRMachineState *spapr = SPAPR_MACHINE(machine);
int spapr_max_cores = max_cpus / smp_threads;
+ MachineClass *mc = MACHINE_GET_CLASS(machine);
- for (i = 0; i < spapr_max_cores; i++) {
- HotpluggableCPUList *list_item = g_new0(typeof(*list_item), 1);
- HotpluggableCPU *cpu_item = g_new0(typeof(*cpu_item), 1);
- CpuInstanceProperties *cpu_props = g_new0(typeof(*cpu_props), 1);
+ if (!mc->has_hotpluggable_cpus) {
+ spapr_max_cores = QEMU_ALIGN_UP(smp_cpus, smp_threads) / smp_threads;
+ }
+ if (machine->possible_cpus) {
+ assert(machine->possible_cpus->len == spapr_max_cores);
+ return machine->possible_cpus;
+ }
+
+ machine->possible_cpus = g_malloc0(sizeof(CPUArchIdList) +
+ sizeof(CPUArchId) * spapr_max_cores);
+ machine->possible_cpus->len = spapr_max_cores;
+ for (i = 0; i < machine->possible_cpus->len; i++) {
+ int core_id = i * smp_threads;
- cpu_item->type = spapr_get_cpu_core_type(machine->cpu_model);
- cpu_item->vcpus_count = smp_threads;
- cpu_props->has_core_id = true;
- cpu_props->core_id = i * smp_threads;
+ machine->possible_cpus->cpus[i].vcpus_count = smp_threads;
+ machine->possible_cpus->cpus[i].arch_id = core_id;
+ machine->possible_cpus->cpus[i].props.has_core_id = true;
+ machine->possible_cpus->cpus[i].props.core_id = core_id;
/* TODO: add 'has_node/node' here to describe
to which node core belongs */
-
- cpu_item->props = cpu_props;
- if (spapr->cores[i]) {
- cpu_item->has_qom_path = true;
- cpu_item->qom_path = object_get_canonical_path(spapr->cores[i]);
- }
- list_item->value = cpu_item;
- list_item->next = head;
- head = list_item;
}
- return head;
+ return machine->possible_cpus;
}
static void spapr_phb_placement(sPAPRMachineState *spapr, uint32_t index,
@@ -2724,11 +2900,12 @@ static void spapr_machine_class_init(ObjectClass *oc, void *data)
hc->plug = spapr_machine_device_plug;
hc->unplug = spapr_machine_device_unplug;
mc->cpu_index_to_socket_id = spapr_cpu_index_to_socket_id;
+ mc->possible_cpu_arch_ids = spapr_possible_cpu_arch_ids;
hc->unplug_request = spapr_machine_device_unplug_request;
smc->dr_lmb_enabled = true;
smc->tcg_default_cpu = "POWER8";
- mc->query_hotpluggable_cpus = spapr_query_hotpluggable_cpus;
+ mc->has_hotpluggable_cpus = true;
fwc->get_dev_path = spapr_get_fw_dev_path;
nc->nmi_monitor_handler = spapr_nmi;
smc->phb_placement = spapr_phb_placement;
@@ -2928,7 +3105,7 @@ static void spapr_machine_2_6_instance_options(MachineState *machine)
static void spapr_machine_2_6_class_options(MachineClass *mc)
{
spapr_machine_2_7_class_options(mc);
- mc->query_hotpluggable_cpus = NULL;
+ mc->has_hotpluggable_cpus = false;
SET_MACHINE_COMPAT(mc, SPAPR_COMPAT_2_6);
}
diff --git a/hw/ppc/spapr_cpu_core.c b/hw/ppc/spapr_cpu_core.c
index 9dddaeb3fa..55cd0456eb 100644
--- a/hw/ppc/spapr_cpu_core.c
+++ b/hw/ppc/spapr_cpu_core.c
@@ -109,13 +109,12 @@ char *spapr_get_cpu_core_type(const char *model)
return core_type;
}
-static void spapr_core_release(DeviceState *dev, void *opaque)
+static void spapr_cpu_core_unrealizefn(DeviceState *dev, Error **errp)
{
sPAPRCPUCore *sc = SPAPR_CPU_CORE(OBJECT(dev));
sPAPRCPUCoreClass *scc = SPAPR_CPU_CORE_GET_CLASS(OBJECT(dev));
const char *typename = object_class_get_name(scc->cpu_class);
size_t size = object_type_get_instance_size(typename);
- sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
CPUCore *cc = CPU_CORE(dev);
int i;
@@ -129,140 +128,7 @@ static void spapr_core_release(DeviceState *dev, void *opaque)
cpu_remove_sync(cs);
object_unparent(obj);
}
-
- spapr->cores[cc->core_id / smp_threads] = NULL;
-
g_free(sc->threads);
- object_unparent(OBJECT(dev));
-}
-
-void spapr_core_unplug(HotplugHandler *hotplug_dev, DeviceState *dev,
- Error **errp)
-{
- CPUCore *cc = CPU_CORE(dev);
- int smt = kvmppc_smt_threads();
- int index = cc->core_id / smp_threads;
- sPAPRDRConnector *drc =
- spapr_dr_connector_by_id(SPAPR_DR_CONNECTOR_TYPE_CPU, index * smt);
- sPAPRDRConnectorClass *drck;
- Error *local_err = NULL;
-
- if (index == 0) {
- error_setg(errp, "Boot CPU core may not be unplugged");
- return;
- }
-
- g_assert(drc);
-
- drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
- drck->detach(drc, dev, spapr_core_release, NULL, &local_err);
- if (local_err) {
- error_propagate(errp, local_err);
- return;
- }
-
- spapr_hotplug_req_remove_by_index(drc);
-}
-
-void spapr_core_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
- Error **errp)
-{
- sPAPRMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev));
- MachineClass *mc = MACHINE_GET_CLASS(spapr);
- sPAPRCPUCore *core = SPAPR_CPU_CORE(OBJECT(dev));
- CPUCore *cc = CPU_CORE(dev);
- CPUState *cs = CPU(core->threads);
- sPAPRDRConnector *drc;
- Error *local_err = NULL;
- void *fdt = NULL;
- int fdt_offset = 0;
- int index = cc->core_id / smp_threads;
- int smt = kvmppc_smt_threads();
-
- drc = spapr_dr_connector_by_id(SPAPR_DR_CONNECTOR_TYPE_CPU, index * smt);
- spapr->cores[index] = OBJECT(dev);
-
- g_assert(drc || !mc->query_hotpluggable_cpus);
-
- /*
- * Setup CPU DT entries only for hotplugged CPUs. For boot time or
- * coldplugged CPUs DT entries are setup in spapr_build_fdt().
- */
- if (dev->hotplugged) {
- fdt = spapr_populate_hotplug_cpu_dt(cs, &fdt_offset, spapr);
- }
-
- if (drc) {
- sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
- drck->attach(drc, dev, fdt, fdt_offset, !dev->hotplugged, &local_err);
- if (local_err) {
- g_free(fdt);
- spapr->cores[index] = NULL;
- error_propagate(errp, local_err);
- return;
- }
- }
-
- if (dev->hotplugged) {
- /*
- * Send hotplug notification interrupt to the guest only in case
- * of hotplugged CPUs.
- */
- spapr_hotplug_req_add_by_index(drc);
- } else {
- /*
- * Set the right DRC states for cold plugged CPU.
- */
- if (drc) {
- sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
- drck->set_allocation_state(drc, SPAPR_DR_ALLOCATION_STATE_USABLE);
- drck->set_isolation_state(drc, SPAPR_DR_ISOLATION_STATE_UNISOLATED);
- }
- }
-}
-
-void spapr_core_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
- Error **errp)
-{
- MachineState *machine = MACHINE(OBJECT(hotplug_dev));
- MachineClass *mc = MACHINE_GET_CLASS(hotplug_dev);
- sPAPRMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev));
- int spapr_max_cores = max_cpus / smp_threads;
- int index;
- Error *local_err = NULL;
- CPUCore *cc = CPU_CORE(dev);
- char *base_core_type = spapr_get_cpu_core_type(machine->cpu_model);
- const char *type = object_get_typename(OBJECT(dev));
-
- if (dev->hotplugged && !mc->query_hotpluggable_cpus) {
- error_setg(&local_err, "CPU hotplug not supported for this machine");
- goto out;
- }
-
- if (strcmp(base_core_type, type)) {
- error_setg(&local_err, "CPU core type should be %s", base_core_type);
- goto out;
- }
-
- if (cc->core_id % smp_threads) {
- error_setg(&local_err, "invalid core id %d", cc->core_id);
- goto out;
- }
-
- index = cc->core_id / smp_threads;
- if (index < 0 || index >= spapr_max_cores) {
- error_setg(&local_err, "core id %d out of range", cc->core_id);
- goto out;
- }
-
- if (spapr->cores[index]) {
- error_setg(&local_err, "core %d already populated", cc->core_id);
- goto out;
- }
-
-out:
- g_free(base_core_type);
- error_propagate(errp, local_err);
}
static void spapr_cpu_core_realize_child(Object *child, Error **errp)
@@ -368,6 +234,7 @@ void spapr_cpu_core_class_init(ObjectClass *oc, void *data)
sPAPRCPUCoreClass *scc = SPAPR_CPU_CORE_CLASS(oc);
dc->realize = spapr_cpu_core_realize;
+ dc->unrealize = spapr_cpu_core_unrealizefn;
scc->cpu_class = cpu_class_by_name(TYPE_POWERPC_CPU, data);
g_assert(scc->cpu_class);
}
diff --git a/hw/ppc/spapr_ovec.c b/hw/ppc/spapr_ovec.c
index 3eb1d5976f..41df4c35ba 100644
--- a/hw/ppc/spapr_ovec.c
+++ b/hw/ppc/spapr_ovec.c
@@ -16,18 +16,9 @@
#include "qemu/bitmap.h"
#include "exec/address-spaces.h"
#include "qemu/error-report.h"
+#include "trace.h"
#include <libfdt.h>
-/* #define DEBUG_SPAPR_OVEC */
-
-#ifdef DEBUG_SPAPR_OVEC
-#define DPRINTFN(fmt, ...) \
- do { fprintf(stderr, fmt "\n", ## __VA_ARGS__); } while (0)
-#else
-#define DPRINTFN(fmt, ...) \
- do { } while (0)
-#endif
-
#define OV_MAXBYTES 256 /* not including length byte */
#define OV_MAXBITS (OV_MAXBYTES * BITS_PER_BYTE)
@@ -210,8 +201,7 @@ sPAPROptionVector *spapr_ovec_parse_vector(target_ulong table_addr, int vector)
for (i = 0; i < vector_len; i++) {
uint8_t entry = ldub_phys(&address_space_memory, addr + i);
if (entry) {
- DPRINTFN("read guest vector %2d, byte %3d / %3d: 0x%.2x",
- vector, i + 1, vector_len, entry);
+ trace_spapr_ovec_parse_vector(vector, i + 1, vector_len, entry);
guest_byte_to_bitmap(entry, ov->bitmap, i * BITS_PER_BYTE);
}
}
@@ -245,10 +235,9 @@ int spapr_ovec_populate_dt(void *fdt, int fdt_offset,
for (i = 1; i < vec_len + 1; i++) {
vec[i] = guest_byte_from_bitmap(ov->bitmap, (i - 1) * BITS_PER_BYTE);
if (vec[i]) {
- DPRINTFN("encoding guest vector byte %3d / %3d: 0x%.2x",
- i, vec_len, vec[i]);
+ trace_spapr_ovec_populate_dt(i, vec_len, vec[i]);
}
}
- return fdt_setprop(fdt, fdt_offset, name, vec, vec_len);
+ return fdt_setprop(fdt, fdt_offset, name, vec, vec_len + 1);
}
diff --git a/hw/ppc/trace-events b/hw/ppc/trace-events
index f46995cdb2..43d265f351 100644
--- a/hw/ppc/trace-events
+++ b/hw/ppc/trace-events
@@ -56,6 +56,10 @@ spapr_drc_realize_child(uint32_t index, char *childname) "drc: 0x%"PRIx32", chil
spapr_drc_realize_complete(uint32_t index) "drc: 0x%"PRIx32
spapr_drc_unrealize(uint32_t index) "drc: 0x%"PRIx32
+# hw/ppc/spapr_ovec.c
+spapr_ovec_parse_vector(int vector, int byte, uint16_t vec_len, uint8_t entry) "read guest vector %2d, byte %3d / %3d: 0x%.2x"
+spapr_ovec_populate_dt(int byte, uint16_t vec_len, uint8_t entry) "encoding guest vector byte %3d / %3d: 0x%.2x"
+
# hw/ppc/spapr_rtas.c
spapr_rtas_set_indicator_invalid(uint32_t index) "sensor index: 0x%"PRIx32
spapr_rtas_set_indicator_not_supported(uint32_t index, uint32_t type) "sensor index: 0x%"PRIx32", type: %"PRIu32
@@ -85,3 +89,11 @@ rs6000mc_presence_read(uint32_t addr, uint32_t val) "read addr=%x val=%x"
rs6000mc_size_read(uint32_t addr, uint32_t val) "read addr=%x val=%x"
rs6000mc_size_write(uint32_t addr, uint32_t val) "write addr=%x val=%x"
rs6000mc_parity_read(uint32_t addr, uint32_t val) "read addr=%x val=%x"
+
+# hw/ppc/mac_newworld.c
+mac99_uninorth_write(uint64_t addr, uint64_t value) "addr=0x%" PRIx64 " val=0x%"PRIx64
+mac99_uninorth_read(uint64_t addr, uint64_t value) "addr=0x%" PRIx64 " val=0x%"PRIx64
+
+# hw/ppc/ppc4xx_pci.c
+ppc4xx_pci_map_irq(int32_t devfn, int irq_num, int slot) "devfn %x irq %d -> %d"
+ppc4xx_pci_set_irq(int irq_num) "PCI irq %d"
diff --git a/include/fpu/softfloat.h b/include/fpu/softfloat.h
index 842ec6b22a..f1288efa87 100644
--- a/include/fpu/softfloat.h
+++ b/include/fpu/softfloat.h
@@ -180,6 +180,8 @@ enum {
float_round_up = 2,
float_round_to_zero = 3,
float_round_ties_away = 4,
+ /* Not an IEEE rounding mode: round to the closest odd mantissa value */
+ float_round_to_odd = 5,
};
/*----------------------------------------------------------------------------
@@ -712,6 +714,9 @@ int32_t float128_to_int32(float128, float_status *status);
int32_t float128_to_int32_round_to_zero(float128, float_status *status);
int64_t float128_to_int64(float128, float_status *status);
int64_t float128_to_int64_round_to_zero(float128, float_status *status);
+uint64_t float128_to_uint64(float128, float_status *status);
+uint64_t float128_to_uint64_round_to_zero(float128, float_status *status);
+uint32_t float128_to_uint32_round_to_zero(float128, float_status *status);
float32 float128_to_float32(float128, float_status *status);
float64 float128_to_float64(float128, float_status *status);
floatx80 float128_to_floatx80(float128, float_status *status);
diff --git a/include/hw/boards.h b/include/hw/boards.h
index ac891a828b..269d0ba399 100644
--- a/include/hw/boards.h
+++ b/include/hw/boards.h
@@ -41,15 +41,20 @@ int machine_phandle_start(MachineState *machine);
bool machine_dump_guest_core(MachineState *machine);
bool machine_mem_merge(MachineState *machine);
void machine_register_compat_props(MachineState *machine);
+HotpluggableCPUList *machine_query_hotpluggable_cpus(MachineState *machine);
/**
* CPUArchId:
* @arch_id - architecture-dependent CPU ID of present or possible CPU
* @cpu - pointer to corresponding CPU object if it's present on NULL otherwise
+ * @props - CPU object properties, initialized by board
+ * #vcpus_count - number of threads provided by @cpu object
*/
typedef struct {
uint64_t arch_id;
- struct CPUState *cpu;
+ int64_t vcpus_count;
+ CpuInstanceProperties props;
+ Object *cpu;
} CPUArchId;
/**
@@ -82,10 +87,8 @@ typedef struct {
* Returns an array of @CPUArchId architecture-dependent CPU IDs
* which includes CPU IDs for present and possible to hotplug CPUs.
* Caller is responsible for freeing returned list.
- * @query_hotpluggable_cpus:
- * Returns a @HotpluggableCPUList, which describes CPUs objects which
- * could be added with -device/device_add.
- * Caller is responsible for freeing returned list.
+ * @has_hotpluggable_cpus:
+ * If true, board supports CPUs creation with -device/device_add.
* @minimum_page_bits:
* If non-zero, the board promises never to create a CPU with a page size
* smaller than this, so QEMU can use a more efficient larger page
@@ -131,12 +134,12 @@ struct MachineClass {
bool option_rom_has_mr;
bool rom_file_has_mr;
int minimum_page_bits;
+ bool has_hotpluggable_cpus;
HotplugHandler *(*get_hotplug_handler)(MachineState *machine,
DeviceState *dev);
unsigned (*cpu_index_to_socket_id)(unsigned cpu_index);
const CPUArchIdList *(*possible_cpu_arch_ids)(MachineState *machine);
- HotpluggableCPUList *(*query_hotpluggable_cpus)(MachineState *machine);
};
/**
@@ -178,6 +181,7 @@ struct MachineState {
char *initrd_filename;
const char *cpu_model;
AccelState *accelerator;
+ CPUArchIdList *possible_cpus;
};
#define DEFINE_MACHINE(namestr, machine_initfn) \
diff --git a/include/hw/i386/pc.h b/include/hw/i386/pc.h
index 079e8d9393..d1f45540a1 100644
--- a/include/hw/i386/pc.h
+++ b/include/hw/i386/pc.h
@@ -73,7 +73,6 @@ struct PCMachineState {
/* CPU and apic information: */
bool apic_xrupt_override;
unsigned apic_id_limit;
- CPUArchIdList *possible_cpus;
uint16_t boot_cpus;
/* NUMA information: */
diff --git a/include/hw/ppc/spapr.h b/include/hw/ppc/spapr.h
index a2d8964f7e..f9b17d860a 100644
--- a/include/hw/ppc/spapr.h
+++ b/include/hw/ppc/spapr.h
@@ -94,7 +94,6 @@ struct sPAPRMachineState {
/*< public >*/
char *kvm_type;
MemoryHotplugState hotplug_memory;
- Object **cores;
};
#define H_SUCCESS 0
diff --git a/include/hw/ppc/spapr_cpu_core.h b/include/hw/ppc/spapr_cpu_core.h
index 50292f48b1..3c35665221 100644
--- a/include/hw/ppc/spapr_cpu_core.h
+++ b/include/hw/ppc/spapr_cpu_core.h
@@ -34,12 +34,6 @@ typedef struct sPAPRCPUCoreClass {
ObjectClass *cpu_class;
} sPAPRCPUCoreClass;
-void spapr_core_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
- Error **errp);
char *spapr_get_cpu_core_type(const char *model);
-void spapr_core_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
- Error **errp);
-void spapr_core_unplug(HotplugHandler *hotplug_dev, DeviceState *dev,
- Error **errp);
void spapr_cpu_core_class_init(ObjectClass *oc, void *data);
#endif
diff --git a/linux-user/main.c b/linux-user/main.c
index 4fd49ce6b6..9645122aa6 100644
--- a/linux-user/main.c
+++ b/linux-user/main.c
@@ -1712,10 +1712,12 @@ void cpu_loop(CPUPPCState *env)
* in syscalls.
*/
env->crf[0] &= ~0x1;
+ env->nip += 4;
ret = do_syscall(env, env->gpr[0], env->gpr[3], env->gpr[4],
env->gpr[5], env->gpr[6], env->gpr[7],
env->gpr[8], 0, 0);
if (ret == -TARGET_ERESTARTSYS) {
+ env->nip -= 4;
break;
}
if (ret == (target_ulong)(-TARGET_QEMU_ESIGRETURN)) {
@@ -1723,7 +1725,6 @@ void cpu_loop(CPUPPCState *env)
Avoid corrupting register state. */
break;
}
- env->nip += 4;
if (ret > (target_ulong)(-515)) {
env->crf[0] |= 0x1;
ret = -ret;
diff --git a/monitor.c b/monitor.c
index 5953fc984f..ead4c633fb 100644
--- a/monitor.c
+++ b/monitor.c
@@ -4181,10 +4181,10 @@ HotpluggableCPUList *qmp_query_hotpluggable_cpus(Error **errp)
MachineState *ms = MACHINE(qdev_get_machine());
MachineClass *mc = MACHINE_GET_CLASS(ms);
- if (!mc->query_hotpluggable_cpus) {
+ if (!mc->has_hotpluggable_cpus) {
error_setg(errp, QERR_FEATURE_DISABLED, "query-hotpluggable-cpus");
return NULL;
}
- return mc->query_hotpluggable_cpus(ms);
+ return machine_query_hotpluggable_cpus(ms);
}
diff --git a/target/ppc/cpu-qom.h b/target/ppc/cpu-qom.h
index b7977bad18..4e3132b56b 100644
--- a/target/ppc/cpu-qom.h
+++ b/target/ppc/cpu-qom.h
@@ -86,10 +86,13 @@ enum powerpc_mmu_t {
POWERPC_MMU_2_07 = POWERPC_MMU_64 | POWERPC_MMU_1TSEG
| POWERPC_MMU_64K
| POWERPC_MMU_AMR | 0x00000004,
- /* FIXME Add POWERPC_MMU_3_OO defines */
/* Architecture 2.07 "degraded" (no 1T segments) */
POWERPC_MMU_2_07a = POWERPC_MMU_64 | POWERPC_MMU_AMR
| 0x00000004,
+ /* Architecture 3.00 variant */
+ POWERPC_MMU_3_00 = POWERPC_MMU_64 | POWERPC_MMU_1TSEG
+ | POWERPC_MMU_64K
+ | POWERPC_MMU_AMR | 0x00000005,
};
/*****************************************************************************/
diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h
index bc2a2ce431..425e79d52d 100644
--- a/target/ppc/cpu.h
+++ b/target/ppc/cpu.h
@@ -381,15 +381,22 @@ struct ppc_slb_t {
#define LPCR_ISL (1ull << (63 - 2))
#define LPCR_KBV (1ull << (63 - 3))
#define LPCR_DPFD_SHIFT (63 - 11)
-#define LPCR_DPFD (0x3ull << LPCR_DPFD_SHIFT)
+#define LPCR_DPFD (0x7ull << LPCR_DPFD_SHIFT)
#define LPCR_VRMASD_SHIFT (63 - 16)
#define LPCR_VRMASD (0x1full << LPCR_VRMASD_SHIFT)
+/* P9: Power-saving mode Exit Cause Enable (Upper Section) Mask */
+#define LPCR_PECE_U_SHIFT (63 - 19)
+#define LPCR_PECE_U_MASK (0x7ull << LPCR_PECE_U_SHIFT)
+#define LPCR_HVEE (1ull << (63 - 17)) /* Hypervisor Virt Exit Enable */
#define LPCR_RMLS_SHIFT (63 - 37)
#define LPCR_RMLS (0xfull << LPCR_RMLS_SHIFT)
#define LPCR_ILE (1ull << (63 - 38))
#define LPCR_AIL_SHIFT (63 - 40) /* Alternate interrupt location */
#define LPCR_AIL (3ull << LPCR_AIL_SHIFT)
+#define LPCR_UPRT (1ull << (63 - 41)) /* Use Process Table */
+#define LPCR_EVIRT (1ull << (63 - 42)) /* Enhanced Virtualisation */
#define LPCR_ONL (1ull << (63 - 45))
+#define LPCR_LD (1ull << (63 - 46)) /* Large Decrementer */
#define LPCR_P7_PECE0 (1ull << (63 - 49))
#define LPCR_P7_PECE1 (1ull << (63 - 50))
#define LPCR_P7_PECE2 (1ull << (63 - 51))
@@ -398,11 +405,22 @@ struct ppc_slb_t {
#define LPCR_P8_PECE2 (1ull << (63 - 49))
#define LPCR_P8_PECE3 (1ull << (63 - 50))
#define LPCR_P8_PECE4 (1ull << (63 - 51))
+/* P9: Power-saving mode Exit Cause Enable (Lower Section) Mask */
+#define LPCR_PECE_L_SHIFT (63 - 51)
+#define LPCR_PECE_L_MASK (0x1full << LPCR_PECE_L_SHIFT)
+#define LPCR_PDEE (1ull << (63 - 47)) /* Privileged Doorbell Exit EN */
+#define LPCR_HDEE (1ull << (63 - 48)) /* Hyperv Doorbell Exit Enable */
+#define LPCR_EEE (1ull << (63 - 49)) /* External Exit Enable */
+#define LPCR_DEE (1ull << (63 - 50)) /* Decrementer Exit Enable */
+#define LPCR_OEE (1ull << (63 - 51)) /* Other Exit Enable */
#define LPCR_MER (1ull << (63 - 52))
+#define LPCR_GTSE (1ull << (63 - 53)) /* Guest Translation Shootdown */
#define LPCR_TC (1ull << (63 - 54))
+#define LPCR_HEIC (1ull << (63 - 59)) /* HV Extern Interrupt Control */
#define LPCR_LPES0 (1ull << (63 - 60))
#define LPCR_LPES1 (1ull << (63 - 61))
#define LPCR_RMI (1ull << (63 - 62))
+#define LPCR_HVICE (1ull << (63 - 62)) /* HV Virtualisation Int Enable */
#define LPCR_HDICE (1ull << (63 - 63))
#define msr_sf ((env->msr >> MSR_SF) & 1)
diff --git a/target/ppc/fpu_helper.c b/target/ppc/fpu_helper.c
index 9f5cafd5ba..58aee640c3 100644
--- a/target/ppc/fpu_helper.c
+++ b/target/ppc/fpu_helper.c
@@ -1850,12 +1850,11 @@ void helper_xsaddqp(CPUPPCState *env, uint32_t opcode)
getVSR(rD(opcode) + 32, &xt, env);
helper_reset_fpstatus(env);
+ tstat = env->fp_status;
if (unlikely(Rc(opcode) != 0)) {
- /* TODO: Support xsadddpo after round-to-odd is implemented */
- abort();
+ tstat.float_rounding_mode = float_round_to_odd;
}
- tstat = env->fp_status;
set_float_exception_flags(0, &tstat);
xt.f128 = float128_add(xa.f128, xb.f128, &tstat);
env->fp_status.float_exception_flags |= tstat.float_exception_flags;
@@ -1930,19 +1929,18 @@ VSX_MUL(xvmulsp, 4, float32, VsrW(i), 0, 0)
void helper_xsmulqp(CPUPPCState *env, uint32_t opcode)
{
ppc_vsr_t xt, xa, xb;
+ float_status tstat;
getVSR(rA(opcode) + 32, &xa, env);
getVSR(rB(opcode) + 32, &xb, env);
getVSR(rD(opcode) + 32, &xt, env);
+ helper_reset_fpstatus(env);
+ tstat = env->fp_status;
if (unlikely(Rc(opcode) != 0)) {
- /* TODO: Support xsmulpo after round-to-odd is implemented */
- abort();
+ tstat.float_rounding_mode = float_round_to_odd;
}
- helper_reset_fpstatus(env);
-
- float_status tstat = env->fp_status;
set_float_exception_flags(0, &tstat);
xt.f128 = float128_mul(xa.f128, xb.f128, &tstat);
env->fp_status.float_exception_flags |= tstat.float_exception_flags;
@@ -2019,18 +2017,18 @@ VSX_DIV(xvdivsp, 4, float32, VsrW(i), 0, 0)
void helper_xsdivqp(CPUPPCState *env, uint32_t opcode)
{
ppc_vsr_t xt, xa, xb;
+ float_status tstat;
getVSR(rA(opcode) + 32, &xa, env);
getVSR(rB(opcode) + 32, &xb, env);
getVSR(rD(opcode) + 32, &xt, env);
+ helper_reset_fpstatus(env);
+ tstat = env->fp_status;
if (unlikely(Rc(opcode) != 0)) {
- /* TODO: Support xsdivqpo after round-to-odd is implemented */
- abort();
+ tstat.float_rounding_mode = float_round_to_odd;
}
- helper_reset_fpstatus(env);
- float_status tstat = env->fp_status;
set_float_exception_flags(0, &tstat);
xt.f128 = float128_div(xa.f128, xb.f128, &tstat);
env->fp_status.float_exception_flags |= tstat.float_exception_flags;
@@ -2679,6 +2677,99 @@ VSX_MAX_MIN(xsmindp, minnum, 1, float64, VsrD(0))
VSX_MAX_MIN(xvmindp, minnum, 2, float64, VsrD(i))
VSX_MAX_MIN(xvminsp, minnum, 4, float32, VsrW(i))
+#define VSX_MAX_MINC(name, max) \
+void helper_##name(CPUPPCState *env, uint32_t opcode) \
+{ \
+ ppc_vsr_t xt, xa, xb; \
+ bool vxsnan_flag = false, vex_flag = false; \
+ \
+ getVSR(rA(opcode) + 32, &xa, env); \
+ getVSR(rB(opcode) + 32, &xb, env); \
+ getVSR(rD(opcode) + 32, &xt, env); \
+ \
+ if (unlikely(float64_is_any_nan(xa.VsrD(0)) || \
+ float64_is_any_nan(xb.VsrD(0)))) { \
+ if (float64_is_signaling_nan(xa.VsrD(0), &env->fp_status) || \
+ float64_is_signaling_nan(xb.VsrD(0), &env->fp_status)) { \
+ vxsnan_flag = true; \
+ } \
+ xt.VsrD(0) = xb.VsrD(0); \
+ } else if ((max && \
+ !float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status)) || \
+ (!max && \
+ float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status))) { \
+ xt.VsrD(0) = xa.VsrD(0); \
+ } else { \
+ xt.VsrD(0) = xb.VsrD(0); \
+ } \
+ \
+ vex_flag = fpscr_ve & vxsnan_flag; \
+ if (vxsnan_flag) { \
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
+ } \
+ if (!vex_flag) { \
+ putVSR(rD(opcode) + 32, &xt, env); \
+ } \
+} \
+
+VSX_MAX_MINC(xsmaxcdp, 1);
+VSX_MAX_MINC(xsmincdp, 0);
+
+#define VSX_MAX_MINJ(name, max) \
+void helper_##name(CPUPPCState *env, uint32_t opcode) \
+{ \
+ ppc_vsr_t xt, xa, xb; \
+ bool vxsnan_flag = false, vex_flag = false; \
+ \
+ getVSR(rA(opcode) + 32, &xa, env); \
+ getVSR(rB(opcode) + 32, &xb, env); \
+ getVSR(rD(opcode) + 32, &xt, env); \
+ \
+ if (unlikely(float64_is_any_nan(xa.VsrD(0)))) { \
+ if (float64_is_signaling_nan(xa.VsrD(0), &env->fp_status)) { \
+ vxsnan_flag = true; \
+ } \
+ xt.VsrD(0) = xa.VsrD(0); \
+ } else if (unlikely(float64_is_any_nan(xb.VsrD(0)))) { \
+ if (float64_is_signaling_nan(xb.VsrD(0), &env->fp_status)) { \
+ vxsnan_flag = true; \
+ } \
+ xt.VsrD(0) = xb.VsrD(0); \
+ } else if (float64_is_zero(xa.VsrD(0)) && float64_is_zero(xb.VsrD(0))) { \
+ if (max) { \
+ if (!float64_is_neg(xa.VsrD(0)) || !float64_is_neg(xb.VsrD(0))) { \
+ xt.VsrD(0) = 0ULL; \
+ } else { \
+ xt.VsrD(0) = 0x8000000000000000ULL; \
+ } \
+ } else { \
+ if (float64_is_neg(xa.VsrD(0)) || float64_is_neg(xb.VsrD(0))) { \
+ xt.VsrD(0) = 0x8000000000000000ULL; \
+ } else { \
+ xt.VsrD(0) = 0ULL; \
+ } \
+ } \
+ } else if ((max && \
+ !float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status)) || \
+ (!max && \
+ float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status))) { \
+ xt.VsrD(0) = xa.VsrD(0); \
+ } else { \
+ xt.VsrD(0) = xb.VsrD(0); \
+ } \
+ \
+ vex_flag = fpscr_ve & vxsnan_flag; \
+ if (vxsnan_flag) { \
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
+ } \
+ if (!vex_flag) { \
+ putVSR(rD(opcode) + 32, &xt, env); \
+ } \
+} \
+
+VSX_MAX_MINJ(xsmaxjdp, 1);
+VSX_MAX_MINJ(xsminjdp, 0);
+
/* VSX_CMP - VSX floating point compare
* op - instruction mnemonic
* nels - number of elements (1, 2 or 4)
@@ -2861,18 +2952,20 @@ VSX_CVT_FP_TO_FP_HP(xvcvhpsp, 4, float16, float32, VsrH(2 * i + 1), VsrW(i), 0)
void helper_xscvqpdp(CPUPPCState *env, uint32_t opcode)
{
ppc_vsr_t xt, xb;
+ float_status tstat;
getVSR(rB(opcode) + 32, &xb, env);
memset(&xt, 0, sizeof(xt));
+ tstat = env->fp_status;
if (unlikely(Rc(opcode) != 0)) {
- /* TODO: Support xscvqpdpo after round-to-odd is implemented */
- abort();
+ tstat.float_rounding_mode = float_round_to_odd;
}
- xt.VsrD(0) = float128_to_float64(xb.f128, &env->fp_status);
+ xt.VsrD(0) = float128_to_float64(xb.f128, &tstat);
+ env->fp_status.float_exception_flags |= tstat.float_exception_flags;
if (unlikely(float128_is_signaling_nan(xb.f128,
- &env->fp_status))) {
+ &tstat))) {
float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);
xt.VsrD(0) = float64_snan_to_qnan(xt.VsrD(0));
}
@@ -2993,6 +3086,8 @@ VSX_CVT_FP_TO_INT_VECTOR(xscvqpsdz, float128, int64, f128, VsrD(0), \
VSX_CVT_FP_TO_INT_VECTOR(xscvqpswz, float128, int32, f128, VsrD(0), \
0xffffffff80000000ULL)
+VSX_CVT_FP_TO_INT_VECTOR(xscvqpudz, float128, uint64, f128, VsrD(0), 0x0ULL)
+VSX_CVT_FP_TO_INT_VECTOR(xscvqpuwz, float128, uint32, f128, VsrD(0), 0x0ULL)
/* VSX_CVT_INT_TO_FP - VSX integer to floating point conversion
* op - instruction mnemonic
@@ -3277,3 +3372,188 @@ void helper_xststdcsp(CPUPPCState *env, uint32_t opcode)
env->fpscr |= cc << FPSCR_FPRF;
env->crf[BF(opcode)] = cc;
}
+
+void helper_xsrqpi(CPUPPCState *env, uint32_t opcode)
+{
+ ppc_vsr_t xb;
+ ppc_vsr_t xt;
+ uint8_t r = Rrm(opcode);
+ uint8_t ex = Rc(opcode);
+ uint8_t rmc = RMC(opcode);
+ uint8_t rmode = 0;
+ float_status tstat;
+
+ getVSR(rB(opcode) + 32, &xb, env);
+ memset(&xt, 0, sizeof(xt));
+ helper_reset_fpstatus(env);
+
+ if (r == 0 && rmc == 0) {
+ rmode = float_round_ties_away;
+ } else if (r == 0 && rmc == 0x3) {
+ rmode = fpscr_rn;
+ } else if (r == 1) {
+ switch (rmc) {
+ case 0:
+ rmode = float_round_nearest_even;
+ break;
+ case 1:
+ rmode = float_round_to_zero;
+ break;
+ case 2:
+ rmode = float_round_up;
+ break;
+ case 3:
+ rmode = float_round_down;
+ break;
+ default:
+ abort();
+ }
+ }
+
+ tstat = env->fp_status;
+ set_float_exception_flags(0, &tstat);
+ set_float_rounding_mode(rmode, &tstat);
+ xt.f128 = float128_round_to_int(xb.f128, &tstat);
+ env->fp_status.float_exception_flags |= tstat.float_exception_flags;
+
+ if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
+ if (float128_is_signaling_nan(xb.f128, &tstat)) {
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);
+ xt.f128 = float128_snan_to_qnan(xt.f128);
+ }
+ }
+
+ if (ex == 0 && (tstat.float_exception_flags & float_flag_inexact)) {
+ env->fp_status.float_exception_flags &= ~float_flag_inexact;
+ }
+
+ helper_compute_fprf_float128(env, xt.f128);
+ float_check_status(env);
+ putVSR(rD(opcode) + 32, &xt, env);
+}
+
+void helper_xsrqpxp(CPUPPCState *env, uint32_t opcode)
+{
+ ppc_vsr_t xb;
+ ppc_vsr_t xt;
+ uint8_t r = Rrm(opcode);
+ uint8_t rmc = RMC(opcode);
+ uint8_t rmode = 0;
+ floatx80 round_res;
+ float_status tstat;
+
+ getVSR(rB(opcode) + 32, &xb, env);
+ memset(&xt, 0, sizeof(xt));
+ helper_reset_fpstatus(env);
+
+ if (r == 0 && rmc == 0) {
+ rmode = float_round_ties_away;
+ } else if (r == 0 && rmc == 0x3) {
+ rmode = fpscr_rn;
+ } else if (r == 1) {
+ switch (rmc) {
+ case 0:
+ rmode = float_round_nearest_even;
+ break;
+ case 1:
+ rmode = float_round_to_zero;
+ break;
+ case 2:
+ rmode = float_round_up;
+ break;
+ case 3:
+ rmode = float_round_down;
+ break;
+ default:
+ abort();
+ }
+ }
+
+ tstat = env->fp_status;
+ set_float_exception_flags(0, &tstat);
+ set_float_rounding_mode(rmode, &tstat);
+ round_res = float128_to_floatx80(xb.f128, &tstat);
+ xt.f128 = floatx80_to_float128(round_res, &tstat);
+ env->fp_status.float_exception_flags |= tstat.float_exception_flags;
+
+ if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
+ if (float128_is_signaling_nan(xb.f128, &tstat)) {
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);
+ xt.f128 = float128_snan_to_qnan(xt.f128);
+ }
+ }
+
+ helper_compute_fprf_float128(env, xt.f128);
+ putVSR(rD(opcode) + 32, &xt, env);
+ float_check_status(env);
+}
+
+void helper_xssqrtqp(CPUPPCState *env, uint32_t opcode)
+{
+ ppc_vsr_t xb;
+ ppc_vsr_t xt;
+ float_status tstat;
+
+ getVSR(rB(opcode) + 32, &xb, env);
+ memset(&xt, 0, sizeof(xt));
+ helper_reset_fpstatus(env);
+
+ tstat = env->fp_status;
+ if (unlikely(Rc(opcode) != 0)) {
+ tstat.float_rounding_mode = float_round_to_odd;
+ }
+
+ set_float_exception_flags(0, &tstat);
+ xt.f128 = float128_sqrt(xb.f128, &tstat);
+ env->fp_status.float_exception_flags |= tstat.float_exception_flags;
+
+ if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
+ if (float128_is_signaling_nan(xb.f128, &tstat)) {
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
+ xt.f128 = float128_snan_to_qnan(xb.f128);
+ } else if (float128_is_quiet_nan(xb.f128, &tstat)) {
+ xt.f128 = xb.f128;
+ } else if (float128_is_neg(xb.f128) && !float128_is_zero(xb.f128)) {
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, 1);
+ set_snan_bit_is_one(0, &env->fp_status);
+ xt.f128 = float128_default_nan(&env->fp_status);
+ }
+ }
+
+ helper_compute_fprf_float128(env, xt.f128);
+ putVSR(rD(opcode) + 32, &xt, env);
+ float_check_status(env);
+}
+
+void helper_xssubqp(CPUPPCState *env, uint32_t opcode)
+{
+ ppc_vsr_t xt, xa, xb;
+ float_status tstat;
+
+ getVSR(rA(opcode) + 32, &xa, env);
+ getVSR(rB(opcode) + 32, &xb, env);
+ getVSR(rD(opcode) + 32, &xt, env);
+ helper_reset_fpstatus(env);
+
+ tstat = env->fp_status;
+ if (unlikely(Rc(opcode) != 0)) {
+ tstat.float_rounding_mode = float_round_to_odd;
+ }
+
+ set_float_exception_flags(0, &tstat);
+ xt.f128 = float128_sub(xa.f128, xb.f128, &tstat);
+ env->fp_status.float_exception_flags |= tstat.float_exception_flags;
+
+ if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
+ if (float128_is_infinity(xa.f128) && float128_is_infinity(xb.f128)) {
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1);
+ } else if (float128_is_signaling_nan(xa.f128, &tstat) ||
+ float128_is_signaling_nan(xb.f128, &tstat)) {
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
+ }
+ }
+
+ helper_compute_fprf_float128(env, xt.f128);
+ putVSR(rD(opcode) + 32, &xt, env);
+ float_check_status(env);
+}
diff --git a/target/ppc/helper.h b/target/ppc/helper.h
index 85af9df36d..6d77661f7c 100644
--- a/target/ppc/helper.h
+++ b/target/ppc/helper.h
@@ -431,6 +431,10 @@ DEF_HELPER_2(xscmpoqp, void, env, i32)
DEF_HELPER_2(xscmpuqp, void, env, i32)
DEF_HELPER_2(xsmaxdp, void, env, i32)
DEF_HELPER_2(xsmindp, void, env, i32)
+DEF_HELPER_2(xsmaxcdp, void, env, i32)
+DEF_HELPER_2(xsmincdp, void, env, i32)
+DEF_HELPER_2(xsmaxjdp, void, env, i32)
+DEF_HELPER_2(xsminjdp, void, env, i32)
DEF_HELPER_2(xscvdphp, void, env, i32)
DEF_HELPER_2(xscvdpqp, void, env, i32)
DEF_HELPER_2(xscvdpsp, void, env, i32)
@@ -438,6 +442,8 @@ DEF_HELPER_2(xscvdpspn, i64, env, i64)
DEF_HELPER_2(xscvqpdp, void, env, i32)
DEF_HELPER_2(xscvqpsdz, void, env, i32)
DEF_HELPER_2(xscvqpswz, void, env, i32)
+DEF_HELPER_2(xscvqpudz, void, env, i32)
+DEF_HELPER_2(xscvqpuwz, void, env, i32)
DEF_HELPER_2(xscvhpdp, void, env, i32)
DEF_HELPER_2(xscvsdqp, void, env, i32)
DEF_HELPER_2(xscvspdp, void, env, i32)
@@ -459,6 +465,10 @@ DEF_HELPER_2(xsrdpic, void, env, i32)
DEF_HELPER_2(xsrdpim, void, env, i32)
DEF_HELPER_2(xsrdpip, void, env, i32)
DEF_HELPER_2(xsrdpiz, void, env, i32)
+DEF_HELPER_2(xsrqpi, void, env, i32)
+DEF_HELPER_2(xsrqpxp, void, env, i32)
+DEF_HELPER_2(xssqrtqp, void, env, i32)
+DEF_HELPER_2(xssubqp, void, env, i32)
DEF_HELPER_2(xsaddsp, void, env, i32)
DEF_HELPER_2(xssubsp, void, env, i32)
@@ -661,6 +671,7 @@ DEF_HELPER_2(load_slb_vsid, tl, env, tl)
DEF_HELPER_2(find_slb_vsid, tl, env, tl)
DEF_HELPER_FLAGS_1(slbia, TCG_CALL_NO_RWG, void, env)
DEF_HELPER_FLAGS_2(slbie, TCG_CALL_NO_RWG, void, env, tl)
+DEF_HELPER_FLAGS_2(slbieg, TCG_CALL_NO_RWG, void, env, tl)
#endif
DEF_HELPER_FLAGS_2(load_sr, TCG_CALL_NO_RWG, tl, env, tl)
DEF_HELPER_FLAGS_3(store_sr, TCG_CALL_NO_RWG, void, env, tl, tl)
diff --git a/target/ppc/internal.h b/target/ppc/internal.h
index 5a2fd68427..1f441c6483 100644
--- a/target/ppc/internal.h
+++ b/target/ppc/internal.h
@@ -133,6 +133,8 @@ EXTRACT_HELPER(UIMM4, 16, 4);
EXTRACT_HELPER(NB, 11, 5);
/* Shift count */
EXTRACT_HELPER(SH, 11, 5);
+/* lwat/stwat/ldat/lwat */
+EXTRACT_HELPER(FC, 11, 5);
/* Vector shift count */
EXTRACT_HELPER(VSH, 6, 4);
/* Mask start */
@@ -186,6 +188,7 @@ EXTRACT_HELPER(DCM, 10, 6)
/* DFP Z23-form */
EXTRACT_HELPER(RMC, 9, 2)
+EXTRACT_HELPER(Rrm, 16, 1)
EXTRACT_HELPER_SPLIT(DQxT, 3, 1, 21, 5);
EXTRACT_HELPER_SPLIT(xT, 0, 1, 21, 5);
diff --git a/target/ppc/kvm.c b/target/ppc/kvm.c
index 663d2e79c9..52bbea514a 100644
--- a/target/ppc/kvm.c
+++ b/target/ppc/kvm.c
@@ -438,12 +438,13 @@ static bool kvm_valid_page_size(uint32_t flags, long rampgsize, uint32_t shift)
return (1ul << shift) <= rampgsize;
}
+static long max_cpu_page_size;
+
static void kvm_fixup_page_sizes(PowerPCCPU *cpu)
{
static struct kvm_ppc_smmu_info smmu_info;
static bool has_smmu_info;
CPUPPCState *env = &cpu->env;
- long rampagesize;
int iq, ik, jq, jk;
bool has_64k_pages = false;
@@ -458,7 +459,9 @@ static void kvm_fixup_page_sizes(PowerPCCPU *cpu)
has_smmu_info = true;
}
- rampagesize = getrampagesize();
+ if (!max_cpu_page_size) {
+ max_cpu_page_size = getrampagesize();
+ }
/* Convert to QEMU form */
memset(&env->sps, 0, sizeof(env->sps));
@@ -478,14 +481,14 @@ static void kvm_fixup_page_sizes(PowerPCCPU *cpu)
struct ppc_one_seg_page_size *qsps = &env->sps.sps[iq];
struct kvm_ppc_one_seg_page_size *ksps = &smmu_info.sps[ik];
- if (!kvm_valid_page_size(smmu_info.flags, rampagesize,
+ if (!kvm_valid_page_size(smmu_info.flags, max_cpu_page_size,
ksps->page_shift)) {
continue;
}
qsps->page_shift = ksps->page_shift;
qsps->slb_enc = ksps->slb_enc;
for (jk = jq = 0; jk < KVM_PPC_PAGE_SIZES_MAX_SZ; jk++) {
- if (!kvm_valid_page_size(smmu_info.flags, rampagesize,
+ if (!kvm_valid_page_size(smmu_info.flags, max_cpu_page_size,
ksps->enc[jk].page_shift)) {
continue;
}
@@ -510,12 +513,33 @@ static void kvm_fixup_page_sizes(PowerPCCPU *cpu)
env->mmu_model &= ~POWERPC_MMU_64K;
}
}
+
+bool kvmppc_is_mem_backend_page_size_ok(char *obj_path)
+{
+ Object *mem_obj = object_resolve_path(obj_path, NULL);
+ char *mempath = object_property_get_str(mem_obj, "mem-path", NULL);
+ long pagesize;
+
+ if (mempath) {
+ pagesize = gethugepagesize(mempath);
+ } else {
+ pagesize = getpagesize();
+ }
+
+ return pagesize >= max_cpu_page_size;
+}
+
#else /* defined (TARGET_PPC64) */
static inline void kvm_fixup_page_sizes(PowerPCCPU *cpu)
{
}
+bool kvmppc_is_mem_backend_page_size_ok(char *obj_path)
+{
+ return true;
+}
+
#endif /* !defined (TARGET_PPC64) */
unsigned long kvm_arch_vcpu_id(CPUState *cpu)
diff --git a/target/ppc/kvm_ppc.h b/target/ppc/kvm_ppc.h
index 151c00bac7..8da2ee418a 100644
--- a/target/ppc/kvm_ppc.h
+++ b/target/ppc/kvm_ppc.h
@@ -60,6 +60,8 @@ int kvmppc_enable_hwrng(void);
int kvmppc_put_books_sregs(PowerPCCPU *cpu);
PowerPCCPUClass *kvm_ppc_get_host_cpu_class(void);
+bool kvmppc_is_mem_backend_page_size_ok(char *obj_path);
+
#else
static inline uint32_t kvmppc_get_tbfreq(void)
@@ -192,6 +194,11 @@ static inline uint64_t kvmppc_rma_size(uint64_t current_size,
return ram_size;
}
+static inline bool kvmppc_is_mem_backend_page_size_ok(char *obj_path)
+{
+ return true;
+}
+
#endif /* !CONFIG_USER_ONLY */
static inline bool kvmppc_has_cap_epr(void)
diff --git a/target/ppc/mmu-hash64.c b/target/ppc/mmu-hash64.c
index bb78fb5497..76669ed82c 100644
--- a/target/ppc/mmu-hash64.c
+++ b/target/ppc/mmu-hash64.c
@@ -115,7 +115,8 @@ void helper_slbia(CPUPPCState *env)
}
}
-void helper_slbie(CPUPPCState *env, target_ulong addr)
+static void __helper_slbie(CPUPPCState *env, target_ulong addr,
+ target_ulong global)
{
PowerPCCPU *cpu = ppc_env_get_cpu(env);
ppc_slb_t *slb;
@@ -132,10 +133,21 @@ void helper_slbie(CPUPPCState *env, target_ulong addr)
* and we still don't have a tlb_flush_mask(env, n, mask)
* in QEMU, we just invalidate all TLBs
*/
- env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
+ env->tlb_need_flush |=
+ (global == false ? TLB_NEED_LOCAL_FLUSH : TLB_NEED_GLOBAL_FLUSH);
}
}
+void helper_slbie(CPUPPCState *env, target_ulong addr)
+{
+ __helper_slbie(env, addr, false);
+}
+
+void helper_slbieg(CPUPPCState *env, target_ulong addr)
+{
+ __helper_slbie(env, addr, true);
+}
+
int ppc_store_slb(PowerPCCPU *cpu, target_ulong slot,
target_ulong esid, target_ulong vsid)
{
@@ -640,7 +652,15 @@ static void ppc_hash64_set_isi(CPUState *cs, CPUPPCState *env,
if (msr_ir) {
vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM1);
} else {
- vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM0);
+ switch (env->mmu_model) {
+ case POWERPC_MMU_3_00:
+ /* Field deprecated in ISAv3.00 - interrupts always go to hyperv */
+ vpm = true;
+ break;
+ default:
+ vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM0);
+ break;
+ }
}
if (vpm && !msr_hv) {
cs->exception_index = POWERPC_EXCP_HISI;
@@ -658,7 +678,15 @@ static void ppc_hash64_set_dsi(CPUState *cs, CPUPPCState *env, uint64_t dar,
if (msr_dr) {
vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM1);
} else {
- vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM0);
+ switch (env->mmu_model) {
+ case POWERPC_MMU_3_00:
+ /* Field deprecated in ISAv3.00 - interrupts always go to hyperv */
+ vpm = true;
+ break;
+ default:
+ vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM0);
+ break;
+ }
}
if (vpm && !msr_hv) {
cs->exception_index = POWERPC_EXCP_HDSI;
@@ -1050,6 +1078,14 @@ void helper_store_lpcr(CPUPPCState *env, target_ulong val)
LPCR_P8_PECE2 | LPCR_P8_PECE3 | LPCR_P8_PECE4 |
LPCR_MER | LPCR_TC | LPCR_LPES0 | LPCR_HDICE);
break;
+ case POWERPC_MMU_3_00: /* P9 */
+ lpcr = val & (LPCR_VPM1 | LPCR_ISL | LPCR_KBV | LPCR_DPFD |
+ (LPCR_PECE_U_MASK & LPCR_HVEE) | LPCR_ILE | LPCR_AIL |
+ LPCR_UPRT | LPCR_EVIRT | LPCR_ONL |
+ (LPCR_PECE_L_MASK & (LPCR_PDEE | LPCR_HDEE | LPCR_EEE |
+ LPCR_DEE | LPCR_OEE)) | LPCR_MER | LPCR_GTSE | LPCR_TC |
+ LPCR_HEIC | LPCR_LPES0 | LPCR_HVICE | LPCR_HDICE);
+ break;
default:
;
}
diff --git a/target/ppc/mmu_helper.c b/target/ppc/mmu_helper.c
index f746f53615..eb2d482ef7 100644
--- a/target/ppc/mmu_helper.c
+++ b/target/ppc/mmu_helper.c
@@ -825,7 +825,7 @@ static int mmubooke_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx,
tlb = &env->tlb.tlbe[i];
ret = mmubooke_check_tlb(env, tlb, &raddr, &ctx->prot, address, rw,
access_type, i);
- if (!ret) {
+ if (ret != -1) {
break;
}
}
@@ -1935,6 +1935,7 @@ void ppc_tlb_invalidate_all(CPUPPCState *env)
case POWERPC_MMU_2_06a:
case POWERPC_MMU_2_07:
case POWERPC_MMU_2_07a:
+ case POWERPC_MMU_3_00:
#endif /* defined(TARGET_PPC64) */
env->tlb_need_flush = 0;
tlb_flush(CPU(cpu));
@@ -1974,6 +1975,7 @@ void ppc_tlb_invalidate_one(CPUPPCState *env, target_ulong addr)
case POWERPC_MMU_2_06a:
case POWERPC_MMU_2_07:
case POWERPC_MMU_2_07a:
+ case POWERPC_MMU_3_00:
/* tlbie invalidate TLBs for all segments */
/* XXX: given the fact that there are too many segments to invalidate,
* and we still don't have a tlb_flush_mask(env, n, mask) in QEMU,
diff --git a/target/ppc/translate.c b/target/ppc/translate.c
index b48abaedfb..3ba2616b8a 100644
--- a/target/ppc/translate.c
+++ b/target/ppc/translate.c
@@ -2976,6 +2976,113 @@ LARX(lbarx, DEF_MEMOP(MO_UB))
LARX(lharx, DEF_MEMOP(MO_UW))
LARX(lwarx, DEF_MEMOP(MO_UL))
+#define LD_ATOMIC(name, memop, tp, op, eop) \
+static void gen_##name(DisasContext *ctx) \
+{ \
+ int len = MEMOP_GET_SIZE(memop); \
+ uint32_t gpr_FC = FC(ctx->opcode); \
+ TCGv EA = tcg_temp_local_new(); \
+ TCGv_##tp t0, t1; \
+ \
+ gen_addr_register(ctx, EA); \
+ if (len > 1) { \
+ gen_check_align(ctx, EA, len - 1); \
+ } \
+ t0 = tcg_temp_new_##tp(); \
+ t1 = tcg_temp_new_##tp(); \
+ tcg_gen_##op(t0, cpu_gpr[rD(ctx->opcode) + 1]); \
+ \
+ switch (gpr_FC) { \
+ case 0: /* Fetch and add */ \
+ tcg_gen_atomic_fetch_add_##tp(t1, EA, t0, ctx->mem_idx, memop); \
+ break; \
+ case 1: /* Fetch and xor */ \
+ tcg_gen_atomic_fetch_xor_##tp(t1, EA, t0, ctx->mem_idx, memop); \
+ break; \
+ case 2: /* Fetch and or */ \
+ tcg_gen_atomic_fetch_or_##tp(t1, EA, t0, ctx->mem_idx, memop); \
+ break; \
+ case 3: /* Fetch and 'and' */ \
+ tcg_gen_atomic_fetch_and_##tp(t1, EA, t0, ctx->mem_idx, memop); \
+ break; \
+ case 8: /* Swap */ \
+ tcg_gen_atomic_xchg_##tp(t1, EA, t0, ctx->mem_idx, memop); \
+ break; \
+ case 4: /* Fetch and max unsigned */ \
+ case 5: /* Fetch and max signed */ \
+ case 6: /* Fetch and min unsigned */ \
+ case 7: /* Fetch and min signed */ \
+ case 16: /* compare and swap not equal */ \
+ case 24: /* Fetch and increment bounded */ \
+ case 25: /* Fetch and increment equal */ \
+ case 28: /* Fetch and decrement bounded */ \
+ gen_invalid(ctx); \
+ break; \
+ default: \
+ /* invoke data storage error handler */ \
+ gen_exception_err(ctx, POWERPC_EXCP_DSI, POWERPC_EXCP_INVAL); \
+ } \
+ tcg_gen_##eop(cpu_gpr[rD(ctx->opcode)], t1); \
+ tcg_temp_free_##tp(t0); \
+ tcg_temp_free_##tp(t1); \
+ tcg_temp_free(EA); \
+}
+
+LD_ATOMIC(lwat, DEF_MEMOP(MO_UL), i32, trunc_tl_i32, extu_i32_tl)
+#if defined(TARGET_PPC64)
+LD_ATOMIC(ldat, DEF_MEMOP(MO_Q), i64, mov_i64, mov_i64)
+#endif
+
+#define ST_ATOMIC(name, memop, tp, op) \
+static void gen_##name(DisasContext *ctx) \
+{ \
+ int len = MEMOP_GET_SIZE(memop); \
+ uint32_t gpr_FC = FC(ctx->opcode); \
+ TCGv EA = tcg_temp_local_new(); \
+ TCGv_##tp t0, t1; \
+ \
+ gen_addr_register(ctx, EA); \
+ if (len > 1) { \
+ gen_check_align(ctx, EA, len - 1); \
+ } \
+ t0 = tcg_temp_new_##tp(); \
+ t1 = tcg_temp_new_##tp(); \
+ tcg_gen_##op(t0, cpu_gpr[rD(ctx->opcode) + 1]); \
+ \
+ switch (gpr_FC) { \
+ case 0: /* add and Store */ \
+ tcg_gen_atomic_add_fetch_##tp(t1, EA, t0, ctx->mem_idx, memop); \
+ break; \
+ case 1: /* xor and Store */ \
+ tcg_gen_atomic_xor_fetch_##tp(t1, EA, t0, ctx->mem_idx, memop); \
+ break; \
+ case 2: /* Or and Store */ \
+ tcg_gen_atomic_or_fetch_##tp(t1, EA, t0, ctx->mem_idx, memop); \
+ break; \
+ case 3: /* 'and' and Store */ \
+ tcg_gen_atomic_and_fetch_##tp(t1, EA, t0, ctx->mem_idx, memop); \
+ break; \
+ case 4: /* Store max unsigned */ \
+ case 5: /* Store max signed */ \
+ case 6: /* Store min unsigned */ \
+ case 7: /* Store min signed */ \
+ case 24: /* Store twin */ \
+ gen_invalid(ctx); \
+ break; \
+ default: \
+ /* invoke data storage error handler */ \
+ gen_exception_err(ctx, POWERPC_EXCP_DSI, POWERPC_EXCP_INVAL); \
+ } \
+ tcg_temp_free_##tp(t0); \
+ tcg_temp_free_##tp(t1); \
+ tcg_temp_free(EA); \
+}
+
+ST_ATOMIC(stwat, DEF_MEMOP(MO_UL), i32, trunc_tl_i32)
+#if defined(TARGET_PPC64)
+ST_ATOMIC(stdat, DEF_MEMOP(MO_Q), i64, mov_i64)
+#endif
+
#if defined(CONFIG_USER_ONLY)
static void gen_conditional_store(DisasContext *ctx, TCGv EA,
int reg, int memop)
@@ -4377,6 +4484,30 @@ static void gen_slbie(DisasContext *ctx)
gen_helper_slbie(cpu_env, cpu_gpr[rB(ctx->opcode)]);
#endif /* defined(CONFIG_USER_ONLY) */
}
+
+/* slbieg */
+static void gen_slbieg(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ CHK_SV;
+
+ gen_helper_slbieg(cpu_env, cpu_gpr[rB(ctx->opcode)]);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+/* slbsync */
+static void gen_slbsync(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ CHK_SV;
+ gen_check_tlb_flush(ctx, true);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
#endif /* defined(TARGET_PPC64) */
/*** External control ***/
@@ -6025,6 +6156,19 @@ static inline void gen_cp_abort(DisasContext *ctx)
// Do Nothing
}
+#define GEN_CP_PASTE_NOOP(name) \
+static inline void gen_##name(DisasContext *ctx) \
+{ \
+ /* Generate invalid exception until \
+ * we have an implementation of the copy \
+ * paste facility \
+ */ \
+ gen_invalid(ctx); \
+}
+
+GEN_CP_PASTE_NOOP(copy)
+GEN_CP_PASTE_NOOP(paste)
+
static void gen_tcheck(DisasContext *ctx)
{
if (unlikely(!ctx->tm_enabled)) {
@@ -6174,7 +6318,9 @@ GEN_HANDLER2(andi_, "andi.", 0x1C, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
GEN_HANDLER2(andis_, "andis.", 0x1D, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
GEN_HANDLER(cntlzw, 0x1F, 0x1A, 0x00, 0x00000000, PPC_INTEGER),
GEN_HANDLER_E(cnttzw, 0x1F, 0x1A, 0x10, 0x00000000, PPC_NONE, PPC2_ISA300),
+GEN_HANDLER_E(copy, 0x1F, 0x06, 0x18, 0x03C00001, PPC_NONE, PPC2_ISA300),
GEN_HANDLER_E(cp_abort, 0x1F, 0x06, 0x1A, 0x03FFF801, PPC_NONE, PPC2_ISA300),
+GEN_HANDLER_E(paste, 0x1F, 0x06, 0x1C, 0x03C00000, PPC_NONE, PPC2_ISA300),
GEN_HANDLER(or, 0x1F, 0x1C, 0x0D, 0x00000000, PPC_INTEGER),
GEN_HANDLER(xor, 0x1F, 0x1C, 0x09, 0x00000000, PPC_INTEGER),
GEN_HANDLER(ori, 0x18, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
@@ -6230,10 +6376,14 @@ GEN_HANDLER(isync, 0x13, 0x16, 0x04, 0x03FFF801, PPC_MEM),
GEN_HANDLER_E(lbarx, 0x1F, 0x14, 0x01, 0, PPC_NONE, PPC2_ATOMIC_ISA206),
GEN_HANDLER_E(lharx, 0x1F, 0x14, 0x03, 0, PPC_NONE, PPC2_ATOMIC_ISA206),
GEN_HANDLER(lwarx, 0x1F, 0x14, 0x00, 0x00000000, PPC_RES),
+GEN_HANDLER_E(lwat, 0x1F, 0x06, 0x12, 0x00000001, PPC_NONE, PPC2_ISA300),
+GEN_HANDLER_E(stwat, 0x1F, 0x06, 0x16, 0x00000001, PPC_NONE, PPC2_ISA300),
GEN_HANDLER_E(stbcx_, 0x1F, 0x16, 0x15, 0, PPC_NONE, PPC2_ATOMIC_ISA206),
GEN_HANDLER_E(sthcx_, 0x1F, 0x16, 0x16, 0, PPC_NONE, PPC2_ATOMIC_ISA206),
GEN_HANDLER2(stwcx_, "stwcx.", 0x1F, 0x16, 0x04, 0x00000000, PPC_RES),
#if defined(TARGET_PPC64)
+GEN_HANDLER_E(ldat, 0x1F, 0x06, 0x13, 0x00000001, PPC_NONE, PPC2_ISA300),
+GEN_HANDLER_E(stdat, 0x1F, 0x06, 0x17, 0x00000001, PPC_NONE, PPC2_ISA300),
GEN_HANDLER(ldarx, 0x1F, 0x14, 0x02, 0x00000000, PPC_64B),
GEN_HANDLER_E(lqarx, 0x1F, 0x14, 0x08, 0, PPC_NONE, PPC2_LSQ_ISA207),
GEN_HANDLER2(stdcx_, "stdcx.", 0x1F, 0x16, 0x06, 0x00000000, PPC_64B),
@@ -6241,6 +6391,7 @@ GEN_HANDLER_E(stqcx_, 0x1F, 0x16, 0x05, 0, PPC_NONE, PPC2_LSQ_ISA207),
#endif
GEN_HANDLER(sync, 0x1F, 0x16, 0x12, 0x039FF801, PPC_MEM_SYNC),
GEN_HANDLER(wait, 0x1F, 0x1E, 0x01, 0x03FFF801, PPC_WAIT),
+GEN_HANDLER_E(wait, 0x1F, 0x1E, 0x00, 0x039FF801, PPC_NONE, PPC2_ISA300),
GEN_HANDLER(b, 0x12, 0xFF, 0xFF, 0x00000000, PPC_FLOW),
GEN_HANDLER(bc, 0x10, 0xFF, 0xFF, 0x00000000, PPC_FLOW),
GEN_HANDLER(bcctr, 0x13, 0x10, 0x10, 0x00000000, PPC_FLOW),
@@ -6313,6 +6464,8 @@ GEN_HANDLER(tlbsync, 0x1F, 0x16, 0x11, 0x03FFF801, PPC_MEM_TLBSYNC),
#if defined(TARGET_PPC64)
GEN_HANDLER(slbia, 0x1F, 0x12, 0x0F, 0x031FFC01, PPC_SLBI),
GEN_HANDLER(slbie, 0x1F, 0x12, 0x0D, 0x03FF0001, PPC_SLBI),
+GEN_HANDLER_E(slbieg, 0x1F, 0x12, 0x0E, 0x001F0001, PPC_NONE, PPC2_ISA300),
+GEN_HANDLER_E(slbsync, 0x1F, 0x12, 0x0A, 0x03FFF801, PPC_NONE, PPC2_ISA300),
#endif
GEN_HANDLER(eciwx, 0x1F, 0x16, 0x0D, 0x00000001, PPC_EXTERN),
GEN_HANDLER(ecowx, 0x1F, 0x16, 0x09, 0x00000001, PPC_EXTERN),
diff --git a/target/ppc/translate/vsx-impl.inc.c b/target/ppc/translate/vsx-impl.inc.c
index a44c0034a8..7f12908029 100644
--- a/target/ppc/translate/vsx-impl.inc.c
+++ b/target/ppc/translate/vsx-impl.inc.c
@@ -808,6 +808,10 @@ GEN_VSX_HELPER_2(xscmpoqp, 0x04, 0x04, 0, PPC2_VSX)
GEN_VSX_HELPER_2(xscmpuqp, 0x04, 0x14, 0, PPC2_VSX)
GEN_VSX_HELPER_2(xsmaxdp, 0x00, 0x14, 0, PPC2_VSX)
GEN_VSX_HELPER_2(xsmindp, 0x00, 0x15, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xsmaxcdp, 0x00, 0x10, 0, PPC2_ISA300)
+GEN_VSX_HELPER_2(xsmincdp, 0x00, 0x11, 0, PPC2_ISA300)
+GEN_VSX_HELPER_2(xsmaxjdp, 0x00, 0x12, 0, PPC2_ISA300)
+GEN_VSX_HELPER_2(xsminjdp, 0x00, 0x12, 0, PPC2_ISA300)
GEN_VSX_HELPER_2(xscvdphp, 0x16, 0x15, 0x11, PPC2_ISA300)
GEN_VSX_HELPER_2(xscvdpsp, 0x12, 0x10, 0, PPC2_VSX)
GEN_VSX_HELPER_2(xscvdpqp, 0x04, 0x1A, 0x16, PPC2_ISA300)
@@ -815,6 +819,8 @@ GEN_VSX_HELPER_XT_XB_ENV(xscvdpspn, 0x16, 0x10, 0, PPC2_VSX207)
GEN_VSX_HELPER_2(xscvqpdp, 0x04, 0x1A, 0x14, PPC2_ISA300)
GEN_VSX_HELPER_2(xscvqpsdz, 0x04, 0x1A, 0x19, PPC2_ISA300)
GEN_VSX_HELPER_2(xscvqpswz, 0x04, 0x1A, 0x09, PPC2_ISA300)
+GEN_VSX_HELPER_2(xscvqpudz, 0x04, 0x1A, 0x11, PPC2_ISA300)
+GEN_VSX_HELPER_2(xscvqpuwz, 0x04, 0x1A, 0x01, PPC2_ISA300)
GEN_VSX_HELPER_2(xscvhpdp, 0x16, 0x15, 0x10, PPC2_ISA300)
GEN_VSX_HELPER_2(xscvsdqp, 0x04, 0x1A, 0x0A, PPC2_ISA300)
GEN_VSX_HELPER_2(xscvspdp, 0x12, 0x14, 0, PPC2_VSX)
@@ -833,6 +839,11 @@ GEN_VSX_HELPER_2(xsrdpip, 0x12, 0x06, 0, PPC2_VSX)
GEN_VSX_HELPER_2(xsrdpiz, 0x12, 0x05, 0, PPC2_VSX)
GEN_VSX_HELPER_XT_XB_ENV(xsrsp, 0x12, 0x11, 0, PPC2_VSX207)
+GEN_VSX_HELPER_2(xsrqpi, 0x05, 0x00, 0, PPC2_ISA300)
+GEN_VSX_HELPER_2(xsrqpxp, 0x05, 0x01, 0, PPC2_ISA300)
+GEN_VSX_HELPER_2(xssqrtqp, 0x04, 0x19, 0x1B, PPC2_ISA300)
+GEN_VSX_HELPER_2(xssubqp, 0x04, 0x10, 0, PPC2_ISA300)
+
GEN_VSX_HELPER_2(xsaddsp, 0x00, 0x00, 0, PPC2_VSX207)
GEN_VSX_HELPER_2(xssubsp, 0x00, 0x01, 0, PPC2_VSX207)
GEN_VSX_HELPER_2(xsmulsp, 0x00, 0x02, 0, PPC2_VSX207)
diff --git a/target/ppc/translate/vsx-ops.inc.c b/target/ppc/translate/vsx-ops.inc.c
index 7dc9f6f477..5030c4aceb 100644
--- a/target/ppc/translate/vsx-ops.inc.c
+++ b/target/ppc/translate/vsx-ops.inc.c
@@ -103,6 +103,21 @@ GEN_HANDLER_E(name, 0x3F, opc2, opc3, inval, PPC_NONE, PPC2_ISA300)
#define GEN_VSX_XFORM_300_EO(name, opc2, opc3, opc4, inval) \
GEN_HANDLER_E_2(name, 0x3F, opc2, opc3, opc4, inval, PPC_NONE, PPC2_ISA300)
+#define GEN_VSX_Z23FORM_300(name, opc2, opc3, opc4, inval) \
+GEN_VSX_XFORM_300_EO(name, opc2, opc3 | 0x00, opc4 | 0x0, inval), \
+GEN_VSX_XFORM_300_EO(name, opc2, opc3 | 0x08, opc4 | 0x0, inval), \
+GEN_VSX_XFORM_300_EO(name, opc2, opc3 | 0x10, opc4 | 0x0, inval), \
+GEN_VSX_XFORM_300_EO(name, opc2, opc3 | 0x18, opc4 | 0x0, inval), \
+GEN_VSX_XFORM_300_EO(name, opc2, opc3 | 0x00, opc4 | 0x1, inval), \
+GEN_VSX_XFORM_300_EO(name, opc2, opc3 | 0x08, opc4 | 0x1, inval), \
+GEN_VSX_XFORM_300_EO(name, opc2, opc3 | 0x10, opc4 | 0x1, inval), \
+GEN_VSX_XFORM_300_EO(name, opc2, opc3 | 0x18, opc4 | 0x1, inval)
+
+GEN_VSX_Z23FORM_300(xsrqpi, 0x05, 0x0, 0x0, 0x0),
+GEN_VSX_Z23FORM_300(xsrqpxp, 0x05, 0x1, 0x0, 0x0),
+GEN_VSX_XFORM_300_EO(xssqrtqp, 0x04, 0x19, 0x1B, 0x0),
+GEN_VSX_XFORM_300(xssubqp, 0x04, 0x10, 0x0),
+
GEN_XX2FORM(xsabsdp, 0x12, 0x15, PPC2_VSX),
GEN_XX2FORM(xsnabsdp, 0x12, 0x16, PPC2_VSX),
GEN_XX2FORM(xsnegdp, 0x12, 0x17, PPC2_VSX),
@@ -116,6 +131,8 @@ GEN_VSX_XFORM_300_EO(xscvdpqp, 0x04, 0x1A, 0x16, 0x00000001),
GEN_VSX_XFORM_300_EO(xscvqpdp, 0x04, 0x1A, 0x14, 0x0),
GEN_VSX_XFORM_300_EO(xscvqpsdz, 0x04, 0x1A, 0x19, 0x00000001),
GEN_VSX_XFORM_300_EO(xscvqpswz, 0x04, 0x1A, 0x09, 0x00000001),
+GEN_VSX_XFORM_300_EO(xscvqpudz, 0x04, 0x1A, 0x11, 0x00000001),
+GEN_VSX_XFORM_300_EO(xscvqpuwz, 0x04, 0x1A, 0x01, 0x00000001),
#ifdef TARGET_PPC64
GEN_XX2FORM_EO(xsxexpdp, 0x16, 0x15, 0x00, PPC2_ISA300),
@@ -185,6 +202,10 @@ GEN_VSX_XFORM_300(xscmpoqp, 0x04, 0x04, 0x00600001),
GEN_VSX_XFORM_300(xscmpuqp, 0x04, 0x14, 0x00600001),
GEN_XX3FORM(xsmaxdp, 0x00, 0x14, PPC2_VSX),
GEN_XX3FORM(xsmindp, 0x00, 0x15, PPC2_VSX),
+GEN_XX3FORM(xsmaxcdp, 0x00, 0x10, PPC2_ISA300),
+GEN_XX3FORM(xsmincdp, 0x00, 0x11, PPC2_ISA300),
+GEN_XX3FORM(xsmaxjdp, 0x00, 0x12, PPC2_ISA300),
+GEN_XX3FORM(xsminjdp, 0x00, 0x13, PPC2_ISA300),
GEN_XX2FORM_EO(xscvdphp, 0x16, 0x15, 0x11, PPC2_ISA300),
GEN_XX2FORM(xscvdpsp, 0x12, 0x10, PPC2_VSX),
GEN_XX2FORM(xscvdpspn, 0x16, 0x10, PPC2_VSX207),
diff --git a/target/ppc/translate_init.c b/target/ppc/translate_init.c
index 76f79fa77b..be35cbd3a2 100644
--- a/target/ppc/translate_init.c
+++ b/target/ppc/translate_init.c
@@ -8816,8 +8816,7 @@ POWERPC_FAMILY(POWER9)(ObjectClass *oc, void *data)
(1ull << MSR_PMM) |
(1ull << MSR_RI) |
(1ull << MSR_LE);
- /* Using 2.07 defines until new radix model is added. */
- pcc->mmu_model = POWERPC_MMU_2_07;
+ pcc->mmu_model = POWERPC_MMU_3_00;
#if defined(CONFIG_SOFTMMU)
pcc->handle_mmu_fault = ppc_hash64_handle_mmu_fault;
/* segment page size remain the same */
@@ -8871,12 +8870,24 @@ void cpu_ppc_set_papr(PowerPCCPU *cpu)
lpcr->default_value &= ~LPCR_RMLS;
lpcr->default_value |= 1ull << LPCR_RMLS_SHIFT;
- /* P7 and P8 has slightly different PECE bits, mostly because P8 adds
- * bit 47 and 48 which are reserved on P7. Here we set them all, which
- * will work as expected for both implementations
- */
- lpcr->default_value |= LPCR_P8_PECE0 | LPCR_P8_PECE1 | LPCR_P8_PECE2 |
- LPCR_P8_PECE3 | LPCR_P8_PECE4;
+ switch (env->mmu_model) {
+ case POWERPC_MMU_3_00:
+ /* By default we choose legacy mode and switch to new hash or radix
+ * when a register process table hcall is made. So disable process
+ * tables and guest translation shootdown by default
+ */
+ lpcr->default_value &= ~(LPCR_UPRT | LPCR_GTSE);
+ lpcr->default_value |= LPCR_PDEE | LPCR_HDEE | LPCR_EEE | LPCR_DEE |
+ LPCR_OEE;
+ break;
+ default:
+ /* P7 and P8 has slightly different PECE bits, mostly because P8 adds
+ * bit 47 and 48 which are reserved on P7. Here we set them all, which
+ * will work as expected for both implementations
+ */
+ lpcr->default_value |= LPCR_P8_PECE0 | LPCR_P8_PECE1 | LPCR_P8_PECE2 |
+ LPCR_P8_PECE3 | LPCR_P8_PECE4;
+ }
/* We should be followed by a CPU reset but update the active value
* just in case...
diff --git a/vl.c b/vl.c
index b5d0a19811..904e34b72c 100644
--- a/vl.c
+++ b/vl.c
@@ -1492,7 +1492,7 @@ MachineInfoList *qmp_query_machines(Error **errp)
info->name = g_strdup(mc->name);
info->cpu_max = !mc->max_cpus ? 1 : mc->max_cpus;
- info->hotpluggable_cpus = !!mc->query_hotpluggable_cpus;
+ info->hotpluggable_cpus = mc->has_hotpluggable_cpus;
entry = g_malloc0(sizeof(*entry));
entry->value = info;