aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--hw/arm/virt.c6
-rw-r--r--hw/core/machine.c3
-rw-r--r--hw/display/vmware_vga.c4
-rw-r--r--hw/hppa/machine.c5
-rw-r--r--hw/mips/Kconfig1
-rw-r--r--hw/net/igb.c15
-rw-r--r--hw/net/igb_common.h1
-rw-r--r--hw/net/igb_core.c6
-rw-r--r--hw/net/igb_core.h3
-rw-r--r--hw/net/igbvf.c19
-rw-r--r--hw/net/trace-events1
-rw-r--r--hw/pci-host/astro.c73
-rw-r--r--hw/pci-host/meson.build2
-rw-r--r--pc-bios/hppa-firmware.imgbin755480 -> 681332 bytes
m---------roms/seabios-hppa0
-rw-r--r--target/arm/tcg/cpu32.c2
-rw-r--r--target/arm/tcg/mte_helper.c12
-rw-r--r--target/arm/tcg/translate-a64.c4
-rw-r--r--target/hppa/cpu-param.h3
-rw-r--r--target/hppa/cpu.h27
-rw-r--r--target/hppa/int_helper.c2
-rw-r--r--target/hppa/mem_helper.c97
-rw-r--r--target/hppa/op_helper.c5
-rw-r--r--target/hppa/translate.c41
-rw-r--r--tests/avocado/machine_s390_ccw_virtio.py18
-rw-r--r--tests/avocado/mem-addr-space-check.py356
26 files changed, 569 insertions, 137 deletions
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
index 85e3c5ba9d..be2856c018 100644
--- a/hw/arm/virt.c
+++ b/hw/arm/virt.c
@@ -576,7 +576,8 @@ static void fdt_add_gic_node(VirtMachineState *vms)
if (vms->virt) {
qemu_fdt_setprop_cells(ms->fdt, nodename, "interrupts",
- GIC_FDT_IRQ_TYPE_PPI, ARCH_GIC_MAINT_IRQ,
+ GIC_FDT_IRQ_TYPE_PPI,
+ INTID_TO_PPI(ARCH_GIC_MAINT_IRQ),
GIC_FDT_IRQ_FLAGS_LEVEL_HI);
}
} else {
@@ -600,7 +601,8 @@ static void fdt_add_gic_node(VirtMachineState *vms)
2, vms->memmap[VIRT_GIC_VCPU].base,
2, vms->memmap[VIRT_GIC_VCPU].size);
qemu_fdt_setprop_cells(ms->fdt, nodename, "interrupts",
- GIC_FDT_IRQ_TYPE_PPI, ARCH_GIC_MAINT_IRQ,
+ GIC_FDT_IRQ_TYPE_PPI,
+ INTID_TO_PPI(ARCH_GIC_MAINT_IRQ),
GIC_FDT_IRQ_FLAGS_LEVEL_HI);
}
}
diff --git a/hw/core/machine.c b/hw/core/machine.c
index 50edaab737..0c17398141 100644
--- a/hw/core/machine.c
+++ b/hw/core/machine.c
@@ -35,7 +35,8 @@
GlobalProperty hw_compat_8_1[] = {
{ TYPE_PCI_BRIDGE, "x-pci-express-writeable-slt-bug", "true" },
{ "ramfb", "x-migrate", "off" },
- { "vfio-pci-nohotplug", "x-ramfb-migrate", "off" }
+ { "vfio-pci-nohotplug", "x-ramfb-migrate", "off" },
+ { "igb", "x-pcie-flr-init", "off" },
};
const size_t hw_compat_8_1_len = G_N_ELEMENTS(hw_compat_8_1);
diff --git a/hw/display/vmware_vga.c b/hw/display/vmware_vga.c
index 7490d43881..3f26bea190 100644
--- a/hw/display/vmware_vga.c
+++ b/hw/display/vmware_vga.c
@@ -336,8 +336,8 @@ static inline bool vmsvga_verify_rect(DisplaySurface *surface,
return false;
}
if (h > SVGA_MAX_HEIGHT) {
- trace_vmware_verify_rect_greater_than_bound(name, "y", SVGA_MAX_HEIGHT,
- y);
+ trace_vmware_verify_rect_greater_than_bound(name, "h", SVGA_MAX_HEIGHT,
+ h);
return false;
}
if (y + h > surface_height(surface)) {
diff --git a/hw/hppa/machine.c b/hw/hppa/machine.c
index a3222d3a96..9d08f39490 100644
--- a/hw/hppa/machine.c
+++ b/hw/hppa/machine.c
@@ -34,9 +34,10 @@
#include "net/net.h"
#include "qemu/log.h"
-#define MIN_SEABIOS_HPPA_VERSION 10 /* require at least this fw version */
+#define MIN_SEABIOS_HPPA_VERSION 12 /* require at least this fw version */
-#define HPA_POWER_BUTTON (FIRMWARE_END - 0x10)
+/* Power button address at &PAGE0->pad[4] */
+#define HPA_POWER_BUTTON (0x40 + 4 * sizeof(uint32_t))
#define enable_lasi_lan() 0
diff --git a/hw/mips/Kconfig b/hw/mips/Kconfig
index 66ec536e06..505381a0bb 100644
--- a/hw/mips/Kconfig
+++ b/hw/mips/Kconfig
@@ -46,6 +46,7 @@ config LOONGSON3V
select PCI_EXPRESS_GENERIC_BRIDGE
select MSI_NONBROKEN
select FW_CFG_MIPS
+ select UNIMP
config MIPS_CPS
bool
diff --git a/hw/net/igb.c b/hw/net/igb.c
index 8ff832acfc..dfb722b695 100644
--- a/hw/net/igb.c
+++ b/hw/net/igb.c
@@ -78,6 +78,7 @@ struct IGBState {
uint32_t ioaddr;
IGBCore core;
+ bool has_flr;
};
#define IGB_CAP_SRIOV_OFFSET (0x160)
@@ -101,6 +102,9 @@ static void igb_write_config(PCIDevice *dev, uint32_t addr,
trace_igb_write_config(addr, val, len);
pci_default_write_config(dev, addr, val, len);
+ if (s->has_flr) {
+ pcie_cap_flr_write_config(dev, addr, val, len);
+ }
if (range_covers_byte(addr, len, PCI_COMMAND) &&
(dev->config[PCI_COMMAND] & PCI_COMMAND_MASTER)) {
@@ -122,6 +126,12 @@ igb_mmio_write(void *opaque, hwaddr addr, uint64_t val, unsigned size)
igb_core_write(&s->core, addr, val, size);
}
+void igb_vf_reset(void *opaque, uint16_t vfn)
+{
+ IGBState *s = opaque;
+ igb_core_vf_reset(&s->core, vfn);
+}
+
static bool
igb_io_get_reg_index(IGBState *s, uint32_t *idx)
{
@@ -427,6 +437,10 @@ static void igb_pci_realize(PCIDevice *pci_dev, Error **errp)
}
/* PCIe extended capabilities (in order) */
+ if (s->has_flr) {
+ pcie_cap_flr_init(pci_dev);
+ }
+
if (pcie_aer_init(pci_dev, 1, 0x100, 0x40, errp) < 0) {
hw_error("Failed to initialize AER capability");
}
@@ -582,6 +596,7 @@ static const VMStateDescription igb_vmstate = {
static Property igb_properties[] = {
DEFINE_NIC_PROPERTIES(IGBState, conf),
+ DEFINE_PROP_BOOL("x-pcie-flr-init", IGBState, has_flr, true),
DEFINE_PROP_END_OF_LIST(),
};
diff --git a/hw/net/igb_common.h b/hw/net/igb_common.h
index 5c261ba9d3..b316a5bcfa 100644
--- a/hw/net/igb_common.h
+++ b/hw/net/igb_common.h
@@ -152,5 +152,6 @@ enum {
uint64_t igb_mmio_read(void *opaque, hwaddr addr, unsigned size);
void igb_mmio_write(void *opaque, hwaddr addr, uint64_t val, unsigned size);
+void igb_vf_reset(void *opaque, uint16_t vfn);
#endif
diff --git a/hw/net/igb_core.c b/hw/net/igb_core.c
index f6a5e2327b..2a7a11aa9e 100644
--- a/hw/net/igb_core.c
+++ b/hw/net/igb_core.c
@@ -2477,11 +2477,13 @@ static void igb_set_vfmailbox(IGBCore *core, int index, uint32_t val)
}
}
-static void igb_vf_reset(IGBCore *core, uint16_t vfn)
+void igb_core_vf_reset(IGBCore *core, uint16_t vfn)
{
uint16_t qn0 = vfn;
uint16_t qn1 = vfn + IGB_NUM_VM_POOLS;
+ trace_igb_core_vf_reset(vfn);
+
/* disable Rx and Tx for the VF*/
core->mac[RXDCTL0 + (qn0 * 16)] &= ~E1000_RXDCTL_QUEUE_ENABLE;
core->mac[RXDCTL0 + (qn1 * 16)] &= ~E1000_RXDCTL_QUEUE_ENABLE;
@@ -2560,7 +2562,7 @@ static void igb_set_vtctrl(IGBCore *core, int index, uint32_t val)
if (val & E1000_CTRL_RST) {
vfn = (index - PVTCTRL0) / 0x40;
- igb_vf_reset(core, vfn);
+ igb_core_vf_reset(core, vfn);
}
}
diff --git a/hw/net/igb_core.h b/hw/net/igb_core.h
index 9cbbfd516b..bf8c46f26b 100644
--- a/hw/net/igb_core.h
+++ b/hw/net/igb_core.h
@@ -130,6 +130,9 @@ igb_core_set_link_status(IGBCore *core);
void
igb_core_pci_uninit(IGBCore *core);
+void
+igb_core_vf_reset(IGBCore *core, uint16_t vfn);
+
bool
igb_can_receive(IGBCore *core);
diff --git a/hw/net/igbvf.c b/hw/net/igbvf.c
index d55e1e8a6a..94a4e885f2 100644
--- a/hw/net/igbvf.c
+++ b/hw/net/igbvf.c
@@ -204,6 +204,10 @@ static void igbvf_write_config(PCIDevice *dev, uint32_t addr, uint32_t val,
{
trace_igbvf_write_config(addr, val, len);
pci_default_write_config(dev, addr, val, len);
+ if (object_property_get_bool(OBJECT(pcie_sriov_get_pf(dev)),
+ "x-pcie-flr-init", &error_abort)) {
+ pcie_cap_flr_write_config(dev, addr, val, len);
+ }
}
static uint64_t igbvf_mmio_read(void *opaque, hwaddr addr, unsigned size)
@@ -266,6 +270,11 @@ static void igbvf_pci_realize(PCIDevice *dev, Error **errp)
hw_error("Failed to initialize PCIe capability");
}
+ if (object_property_get_bool(OBJECT(pcie_sriov_get_pf(dev)),
+ "x-pcie-flr-init", &error_abort)) {
+ pcie_cap_flr_init(dev);
+ }
+
if (pcie_aer_init(dev, 1, 0x100, 0x40, errp) < 0) {
hw_error("Failed to initialize AER capability");
}
@@ -273,6 +282,13 @@ static void igbvf_pci_realize(PCIDevice *dev, Error **errp)
pcie_ari_init(dev, 0x150);
}
+static void igbvf_qdev_reset_hold(Object *obj)
+{
+ PCIDevice *vf = PCI_DEVICE(obj);
+
+ igb_vf_reset(pcie_sriov_get_pf(vf), pcie_sriov_vf_number(vf));
+}
+
static void igbvf_pci_uninit(PCIDevice *dev)
{
IgbVfState *s = IGBVF(dev);
@@ -287,6 +303,7 @@ static void igbvf_class_init(ObjectClass *class, void *data)
{
DeviceClass *dc = DEVICE_CLASS(class);
PCIDeviceClass *c = PCI_DEVICE_CLASS(class);
+ ResettableClass *rc = RESETTABLE_CLASS(class);
c->realize = igbvf_pci_realize;
c->exit = igbvf_pci_uninit;
@@ -295,6 +312,8 @@ static void igbvf_class_init(ObjectClass *class, void *data)
c->revision = 1;
c->class_id = PCI_CLASS_NETWORK_ETHERNET;
+ rc->phases.hold = igbvf_qdev_reset_hold;
+
dc->desc = "Intel 82576 Virtual Function";
dc->user_creatable = false;
diff --git a/hw/net/trace-events b/hw/net/trace-events
index 3097742cc0..387e32e153 100644
--- a/hw/net/trace-events
+++ b/hw/net/trace-events
@@ -274,6 +274,7 @@ igb_core_mdic_read(uint32_t addr, uint32_t data) "MDIC READ: PHY[%u] = 0x%x"
igb_core_mdic_read_unhandled(uint32_t addr) "MDIC READ: PHY[%u] UNHANDLED"
igb_core_mdic_write(uint32_t addr, uint32_t data) "MDIC WRITE: PHY[%u] = 0x%x"
igb_core_mdic_write_unhandled(uint32_t addr) "MDIC WRITE: PHY[%u] UNHANDLED"
+igb_core_vf_reset(uint16_t vfn) "VF%d"
igb_link_set_ext_params(bool asd_check, bool speed_select_bypass, bool pfrstd) "Set extended link params: ASD check: %d, Speed select bypass: %d, PF reset done: %d"
diff --git a/hw/pci-host/astro.c b/hw/pci-host/astro.c
index bd226581af..7d68ccee7e 100644
--- a/hw/pci-host/astro.c
+++ b/hw/pci-host/astro.c
@@ -32,6 +32,7 @@
#include "hw/pci-host/astro.h"
#include "hw/hppa/hppa_hardware.h"
#include "migration/vmstate.h"
+#include "target/hppa/cpu.h"
#include "trace.h"
#include "qom/object.h"
@@ -268,22 +269,6 @@ static const MemoryRegionOps elroy_config_addr_ops = {
};
-/*
- * A subroutine of astro_translate_iommu that builds an IOMMUTLBEntry using the
- * given translated address and mask.
- */
-static bool make_iommu_tlbe(hwaddr addr, hwaddr taddr, hwaddr mask,
- IOMMUTLBEntry *ret)
-{
- hwaddr tce_mask = ~((1ull << 12) - 1);
- ret->target_as = &address_space_memory;
- ret->iova = addr & tce_mask;
- ret->translated_addr = taddr & tce_mask;
- ret->addr_mask = ~tce_mask;
- ret->perm = IOMMU_RW;
- return true;
-}
-
/* Handle PCI-to-system address translation. */
static IOMMUTLBEntry astro_translate_iommu(IOMMUMemoryRegion *iommu,
hwaddr addr,
@@ -291,53 +276,59 @@ static IOMMUTLBEntry astro_translate_iommu(IOMMUMemoryRegion *iommu,
int iommu_idx)
{
AstroState *s = container_of(iommu, AstroState, iommu);
- IOMMUTLBEntry ret = {
- .target_as = &address_space_memory,
- .iova = addr,
- .translated_addr = 0,
- .addr_mask = ~(hwaddr)0,
- .perm = IOMMU_NONE,
- };
- hwaddr pdir_ptr, index, a, ibase;
+ hwaddr pdir_ptr, index, ibase;
hwaddr addr_mask = 0xfff; /* 4k translation */
uint64_t entry;
#define IOVP_SHIFT 12 /* equals PAGE_SHIFT */
#define PDIR_INDEX(iovp) ((iovp) >> IOVP_SHIFT)
-#define IOVP_MASK PAGE_MASK
#define SBA_PDIR_VALID_BIT 0x8000000000000000ULL
+ addr &= ~addr_mask;
+
+ /*
+ * Default translation: "32-bit PCI Addressing on 40-bit Runway".
+ * For addresses in the 32-bit memory address range ... and then
+ * language which not-coincidentally matches the PSW.W=0 mapping.
+ */
+ if (addr <= UINT32_MAX) {
+ entry = hppa_abs_to_phys_pa2_w0(addr);
+ } else {
+ entry = addr;
+ }
+
/* "range enable" flag cleared? */
if ((s->tlb_ibase & 1) == 0) {
- make_iommu_tlbe(addr, addr, addr_mask, &ret);
- return ret;
+ goto skip;
}
- a = addr;
ibase = s->tlb_ibase & ~1ULL;
- if ((a & s->tlb_imask) != ibase) {
+ if ((addr & s->tlb_imask) != ibase) {
/* do not translate this one! */
- make_iommu_tlbe(addr, addr, addr_mask, &ret);
- return ret;
+ goto skip;
}
- index = PDIR_INDEX(a);
+
+ index = PDIR_INDEX(addr);
pdir_ptr = s->tlb_pdir_base + index * sizeof(entry);
entry = ldq_le_phys(&address_space_memory, pdir_ptr);
+
if (!(entry & SBA_PDIR_VALID_BIT)) { /* I/O PDIR entry valid ? */
- g_assert_not_reached();
- goto failure;
+ /* failure */
+ return (IOMMUTLBEntry) { .perm = IOMMU_NONE };
}
+
entry &= ~SBA_PDIR_VALID_BIT;
entry >>= IOVP_SHIFT;
entry <<= 12;
- entry |= addr & 0xfff;
- make_iommu_tlbe(addr, entry, addr_mask, &ret);
- goto success;
- failure:
- ret = (IOMMUTLBEntry) { .perm = IOMMU_NONE };
- success:
- return ret;
+ skip:
+ return (IOMMUTLBEntry) {
+ .target_as = &address_space_memory,
+ .iova = addr,
+ .translated_addr = entry,
+ .addr_mask = addr_mask,
+ .perm = IOMMU_RW,
+ };
}
static AddressSpace *elroy_pcihost_set_iommu(PCIBus *bus, void *opaque,
diff --git a/hw/pci-host/meson.build b/hw/pci-host/meson.build
index de7bfb5a62..36d5ab756f 100644
--- a/hw/pci-host/meson.build
+++ b/hw/pci-host/meson.build
@@ -29,7 +29,7 @@ pci_ss.add(when: 'CONFIG_MV64361', if_true: files('mv64361.c'))
pci_ss.add(when: 'CONFIG_VERSATILE_PCI', if_true: files('versatile.c'))
# HPPA devices
-pci_ss.add(when: 'CONFIG_ASTRO', if_true: files('astro.c'))
+specific_ss.add(when: 'CONFIG_ASTRO', if_true: files('astro.c'))
pci_ss.add(when: 'CONFIG_DINO', if_true: files('dino.c'))
system_ss.add_all(when: 'CONFIG_PCI', if_true: pci_ss)
diff --git a/pc-bios/hppa-firmware.img b/pc-bios/hppa-firmware.img
index e976c0cc93..9a2d54f26b 100644
--- a/pc-bios/hppa-firmware.img
+++ b/pc-bios/hppa-firmware.img
Binary files differ
diff --git a/roms/seabios-hppa b/roms/seabios-hppa
-Subproject fd5b6cf82369a1e53d68302fb6ede2b9e2afccd
+Subproject 2a23dd388fcc1068f9c4a3077e0662803743e1c
diff --git a/target/arm/tcg/cpu32.c b/target/arm/tcg/cpu32.c
index 0d5d8e307d..d9e0e2a4dd 100644
--- a/target/arm/tcg/cpu32.c
+++ b/target/arm/tcg/cpu32.c
@@ -351,6 +351,7 @@ static void cortex_a8_initfn(Object *obj)
set_feature(&cpu->env, ARM_FEATURE_THUMB2EE);
set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
set_feature(&cpu->env, ARM_FEATURE_EL3);
+ set_feature(&cpu->env, ARM_FEATURE_PMU);
cpu->midr = 0x410fc080;
cpu->reset_fpsid = 0x410330c0;
cpu->isar.mvfr0 = 0x11110222;
@@ -418,6 +419,7 @@ static void cortex_a9_initfn(Object *obj)
set_feature(&cpu->env, ARM_FEATURE_NEON);
set_feature(&cpu->env, ARM_FEATURE_THUMB2EE);
set_feature(&cpu->env, ARM_FEATURE_EL3);
+ set_feature(&cpu->env, ARM_FEATURE_PMU);
/*
* Note that A9 supports the MP extensions even for
* A9UP and single-core A9MP (which are both different
diff --git a/target/arm/tcg/mte_helper.c b/target/arm/tcg/mte_helper.c
index 70ac876105..ffb8ea1c34 100644
--- a/target/arm/tcg/mte_helper.c
+++ b/target/arm/tcg/mte_helper.c
@@ -1101,10 +1101,18 @@ uint64_t mte_mops_probe_rev(CPUARMState *env, uint64_t ptr, uint64_t size,
uint32_t n;
mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
- /* True probe; this will never fault */
+ /*
+ * True probe; this will never fault. Note that our caller passes
+ * us a pointer to the end of the region, but allocation_tag_mem_probe()
+ * wants a pointer to the start. Because we know we don't span a page
+ * boundary and that allocation_tag_mem_probe() doesn't otherwise care
+ * about the size, pass in a size of 1 byte. This is simpler than
+ * adjusting the ptr to point to the start of the region and then having
+ * to adjust the returned 'mem' to get the end of the tag memory.
+ */
mem = allocation_tag_mem_probe(env, mmu_idx, ptr,
w ? MMU_DATA_STORE : MMU_DATA_LOAD,
- size, MMU_DATA_LOAD, true, 0);
+ 1, MMU_DATA_LOAD, true, 0);
if (!mem) {
return size;
}
diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c
index 41484d8ae5..a2e49c39f9 100644
--- a/target/arm/tcg/translate-a64.c
+++ b/target/arm/tcg/translate-a64.c
@@ -2351,6 +2351,8 @@ static bool trans_SVC(DisasContext *s, arg_i *a)
static bool trans_HVC(DisasContext *s, arg_i *a)
{
+ int target_el = s->current_el == 3 ? 3 : 2;
+
if (s->current_el == 0) {
unallocated_encoding(s);
return true;
@@ -2363,7 +2365,7 @@ static bool trans_HVC(DisasContext *s, arg_i *a)
gen_helper_pre_hvc(tcg_env);
/* Architecture requires ss advance before we do the actual work */
gen_ss_advance(s);
- gen_exception_insn_el(s, 4, EXCP_HVC, syn_aa64_hvc(a->imm), 2);
+ gen_exception_insn_el(s, 4, EXCP_HVC, syn_aa64_hvc(a->imm), target_el);
return true;
}
diff --git a/target/hppa/cpu-param.h b/target/hppa/cpu-param.h
index 6746869a3b..bb3d7ef6f7 100644
--- a/target/hppa/cpu-param.h
+++ b/target/hppa/cpu-param.h
@@ -14,7 +14,8 @@
# define TARGET_PHYS_ADDR_SPACE_BITS 32
# define TARGET_VIRT_ADDR_SPACE_BITS 32
#else
-# define TARGET_PHYS_ADDR_SPACE_BITS 64
+/* ??? PA-8000 through 8600 have 40 bits; PA-8700 and 8900 have 44 bits. */
+# define TARGET_PHYS_ADDR_SPACE_BITS 40
# define TARGET_VIRT_ADDR_SPACE_BITS 64
#endif
diff --git a/target/hppa/cpu.h b/target/hppa/cpu.h
index cecec59700..bcfed04f7c 100644
--- a/target/hppa/cpu.h
+++ b/target/hppa/cpu.h
@@ -31,23 +31,25 @@
basis. It's probably easier to fall back to a strong memory model. */
#define TCG_GUEST_DEFAULT_MO TCG_MO_ALL
-#define MMU_KERNEL_IDX 7
-#define MMU_KERNEL_P_IDX 8
-#define MMU_PL1_IDX 9
-#define MMU_PL1_P_IDX 10
-#define MMU_PL2_IDX 11
-#define MMU_PL2_P_IDX 12
-#define MMU_USER_IDX 13
-#define MMU_USER_P_IDX 14
-#define MMU_PHYS_IDX 15
-
+#define MMU_ABS_W_IDX 6
+#define MMU_ABS_IDX 7
+#define MMU_KERNEL_IDX 8
+#define MMU_KERNEL_P_IDX 9
+#define MMU_PL1_IDX 10
+#define MMU_PL1_P_IDX 11
+#define MMU_PL2_IDX 12
+#define MMU_PL2_P_IDX 13
+#define MMU_USER_IDX 14
+#define MMU_USER_P_IDX 15
+
+#define MMU_IDX_MMU_DISABLED(MIDX) ((MIDX) < MMU_KERNEL_IDX)
#define MMU_IDX_TO_PRIV(MIDX) (((MIDX) - MMU_KERNEL_IDX) / 2)
#define MMU_IDX_TO_P(MIDX) (((MIDX) - MMU_KERNEL_IDX) & 1)
#define PRIV_P_TO_MMU_IDX(PRIV, P) ((PRIV) * 2 + !!(P) + MMU_KERNEL_IDX)
#define TARGET_INSN_START_EXTRA_WORDS 2
-/* No need to flush MMU_PHYS_IDX */
+/* No need to flush MMU_ABS*_IDX */
#define HPPA_MMU_FLUSH_MASK \
(1 << MMU_KERNEL_IDX | 1 << MMU_KERNEL_P_IDX | \
1 << MMU_PL1_IDX | 1 << MMU_PL1_P_IDX | \
@@ -287,7 +289,8 @@ static inline int cpu_mmu_index(CPUHPPAState *env, bool ifetch)
if (env->psw & (ifetch ? PSW_C : PSW_D)) {
return PRIV_P_TO_MMU_IDX(env->iaoq_f & 3, env->psw & PSW_P);
}
- return MMU_PHYS_IDX; /* mmu disabled */
+ /* mmu disabled */
+ return env->psw & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX;
#endif
}
diff --git a/target/hppa/int_helper.c b/target/hppa/int_helper.c
index 467ee7daf5..98e9d688f6 100644
--- a/target/hppa/int_helper.c
+++ b/target/hppa/int_helper.c
@@ -126,7 +126,7 @@ void hppa_cpu_do_interrupt(CPUState *cs)
env->cr[CR_IIASQ] =
hppa_form_gva_psw(old_psw, env->iasq_f, env->iaoq_f) >> 32;
env->cr_back[0] =
- hppa_form_gva_psw(old_psw, env->iasq_f, env->iaoq_f) >> 32;
+ hppa_form_gva_psw(old_psw, env->iasq_b, env->iaoq_b) >> 32;
} else {
env->cr[CR_IIASQ] = 0;
env->cr_back[0] = 0;
diff --git a/target/hppa/mem_helper.c b/target/hppa/mem_helper.c
index 858ce6ec7f..08abd1a9f9 100644
--- a/target/hppa/mem_helper.c
+++ b/target/hppa/mem_helper.c
@@ -27,41 +27,39 @@
hwaddr hppa_abs_to_phys_pa2_w1(vaddr addr)
{
- if (likely(extract64(addr, 58, 4) != 0xf)) {
- /* Memory address space */
- return addr & MAKE_64BIT_MASK(0, 62);
- }
- if (extract64(addr, 54, 4) != 0) {
- /* I/O address space */
- return addr | MAKE_64BIT_MASK(62, 2);
- }
- /* PDC address space */
- return (addr & MAKE_64BIT_MASK(0, 54)) | MAKE_64BIT_MASK(60, 4);
+ /*
+ * Figure H-8 "62-bit Absolute Accesses when PSW W-bit is 1" describes
+ * an algorithm in which a 62-bit absolute address is transformed to
+ * a 64-bit physical address. This must then be combined with that
+ * pictured in Figure H-11 "Physical Address Space Mapping", in which
+ * the full physical address is truncated to the N-bit physical address
+ * supported by the implementation.
+ *
+ * Since the supported physical address space is below 54 bits, the
+ * H-8 algorithm is moot and all that is left is to truncate.
+ */
+ QEMU_BUILD_BUG_ON(TARGET_PHYS_ADDR_SPACE_BITS > 54);
+ return sextract64(addr, 0, TARGET_PHYS_ADDR_SPACE_BITS);
}
hwaddr hppa_abs_to_phys_pa2_w0(vaddr addr)
{
+ /*
+ * See Figure H-10, "Absolute Accesses when PSW W-bit is 0",
+ * combined with Figure H-11, as above.
+ */
if (likely(extract32(addr, 28, 4) != 0xf)) {
/* Memory address space */
- return addr & MAKE_64BIT_MASK(0, 32);
- }
- if (extract32(addr, 24, 4) != 0) {
+ addr = (uint32_t)addr;
+ } else if (extract32(addr, 24, 4) != 0) {
/* I/O address space */
- return addr | MAKE_64BIT_MASK(32, 32);
- }
- /* PDC address space */
- return (addr & MAKE_64BIT_MASK(0, 24)) | MAKE_64BIT_MASK(60, 4);
-}
-
-static hwaddr hppa_abs_to_phys(CPUHPPAState *env, vaddr addr)
-{
- if (!hppa_is_pa20(env)) {
- return addr;
- } else if (env->psw & PSW_W) {
- return hppa_abs_to_phys_pa2_w1(addr);
+ addr = (int32_t)addr;
} else {
- return hppa_abs_to_phys_pa2_w0(addr);
+ /* PDC address space */
+ addr &= MAKE_64BIT_MASK(0, 24);
+ addr |= -1ull << (TARGET_PHYS_ADDR_SPACE_BITS - 4);
}
+ return addr;
}
static HPPATLBEntry *hppa_find_tlb(CPUHPPAState *env, vaddr addr)
@@ -161,9 +159,22 @@ int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
*tlb_entry = NULL;
}
- /* Virtual translation disabled. Direct map virtual to physical. */
- if (mmu_idx == MMU_PHYS_IDX) {
- phys = addr;
+ /* Virtual translation disabled. Map absolute to physical. */
+ if (MMU_IDX_MMU_DISABLED(mmu_idx)) {
+ switch (mmu_idx) {
+ case MMU_ABS_W_IDX:
+ phys = hppa_abs_to_phys_pa2_w1(addr);
+ break;
+ case MMU_ABS_IDX:
+ if (hppa_is_pa20(env)) {
+ phys = hppa_abs_to_phys_pa2_w0(addr);
+ } else {
+ phys = (uint32_t)addr;
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
goto egress;
}
@@ -261,7 +272,7 @@ int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
}
egress:
- *pphys = phys = hppa_abs_to_phys(env, phys);
+ *pphys = phys;
*pprot = prot;
trace_hppa_tlb_get_physical_address(env, ret, prot, addr, phys);
return ret;
@@ -271,16 +282,15 @@ hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
{
HPPACPU *cpu = HPPA_CPU(cs);
hwaddr phys;
- int prot, excp;
+ int prot, excp, mmu_idx;
/* If the (data) mmu is disabled, bypass translation. */
/* ??? We really ought to know if the code mmu is disabled too,
in order to get the correct debugging dumps. */
- if (!(cpu->env.psw & PSW_D)) {
- return hppa_abs_to_phys(&cpu->env, addr);
- }
+ mmu_idx = (cpu->env.psw & PSW_D ? MMU_KERNEL_IDX :
+ cpu->env.psw & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX);
- excp = hppa_get_physical_address(&cpu->env, addr, MMU_KERNEL_IDX, 0,
+ excp = hppa_get_physical_address(&cpu->env, addr, mmu_idx, 0,
&phys, &prot, NULL);
/* Since we're translating for debugging, the only error that is a
@@ -367,8 +377,8 @@ bool hppa_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
trace_hppa_tlb_fill_excp(env, addr, size, type, mmu_idx);
/* Failure. Raise the indicated exception. */
- raise_exception_with_ior(env, excp, retaddr,
- addr, mmu_idx == MMU_PHYS_IDX);
+ raise_exception_with_ior(env, excp, retaddr, addr,
+ MMU_IDX_MMU_DISABLED(mmu_idx));
}
trace_hppa_tlb_fill_success(env, addr & TARGET_PAGE_MASK,
@@ -450,7 +460,7 @@ static void itlbt_pa20(CPUHPPAState *env, target_ulong r1,
int mask_shift;
mask_shift = 2 * (r1 & 0xf);
- va_size = TARGET_PAGE_SIZE << mask_shift;
+ va_size = (uint64_t)TARGET_PAGE_SIZE << mask_shift;
va_b &= -va_size;
va_e = va_b + va_size - 1;
@@ -459,7 +469,14 @@ static void itlbt_pa20(CPUHPPAState *env, target_ulong r1,
ent->itree.start = va_b;
ent->itree.last = va_e;
- ent->pa = (r1 << 7) & (TARGET_PAGE_MASK << mask_shift);
+
+ /* Extract all 52 bits present in the page table entry. */
+ ent->pa = r1 << (TARGET_PAGE_BITS - 5);
+ /* Align per the page size. */
+ ent->pa &= TARGET_PAGE_MASK << mask_shift;
+ /* Ignore the bits beyond physical address space. */
+ ent->pa = sextract64(ent->pa, 0, TARGET_PHYS_ADDR_SPACE_BITS);
+
ent->t = extract64(r2, 61, 1);
ent->d = extract64(r2, 60, 1);
ent->b = extract64(r2, 59, 1);
@@ -505,7 +522,7 @@ static void ptlb_work(CPUState *cpu, run_on_cpu_data data)
*/
end = start & 0xf;
start &= TARGET_PAGE_MASK;
- end = TARGET_PAGE_SIZE << (2 * end);
+ end = (vaddr)TARGET_PAGE_SIZE << (2 * end);
end = start + end - 1;
hppa_flush_tlb_range(env, start, end);
diff --git a/target/hppa/op_helper.c b/target/hppa/op_helper.c
index a0e31c0c25..7f607c3afd 100644
--- a/target/hppa/op_helper.c
+++ b/target/hppa/op_helper.c
@@ -338,7 +338,7 @@ target_ulong HELPER(probe)(CPUHPPAState *env, target_ulong addr,
#ifdef CONFIG_USER_ONLY
return page_check_range(addr, 1, want);
#else
- int prot, excp;
+ int prot, excp, mmu_idx;
hwaddr phys;
trace_hppa_tlb_probe(addr, level, want);
@@ -347,7 +347,8 @@ target_ulong HELPER(probe)(CPUHPPAState *env, target_ulong addr,
return 0;
}
- excp = hppa_get_physical_address(env, addr, level, 0, &phys,
+ mmu_idx = PRIV_P_TO_MMU_IDX(level, env->psw & PSW_P);
+ excp = hppa_get_physical_address(env, addr, mmu_idx, 0, &phys,
&prot, NULL);
if (excp >= 0) {
if (env->psw & PSW_Q) {
diff --git a/target/hppa/translate.c b/target/hppa/translate.c
index bcce65d587..4a4830c3e3 100644
--- a/target/hppa/translate.c
+++ b/target/hppa/translate.c
@@ -69,19 +69,24 @@ typedef struct DisasContext {
} DisasContext;
#ifdef CONFIG_USER_ONLY
-#define UNALIGN(C) (C)->unalign
+#define UNALIGN(C) (C)->unalign
+#define MMU_DISABLED(C) false
#else
-#define UNALIGN(C) MO_ALIGN
+#define UNALIGN(C) MO_ALIGN
+#define MMU_DISABLED(C) MMU_IDX_MMU_DISABLED((C)->mmu_idx)
#endif
/* Note that ssm/rsm instructions number PSW_W and PSW_E differently. */
static int expand_sm_imm(DisasContext *ctx, int val)
{
- if (val & PSW_SM_E) {
- val = (val & ~PSW_SM_E) | PSW_E;
- }
- if (val & PSW_SM_W) {
- val = (val & ~PSW_SM_W) | PSW_W;
+ /* Keep unimplemented bits disabled -- see cpu_hppa_put_psw. */
+ if (ctx->is_pa20) {
+ if (val & PSW_SM_W) {
+ val |= PSW_W;
+ }
+ val &= ~(PSW_SM_W | PSW_SM_E | PSW_G);
+ } else {
+ val &= ~(PSW_SM_W | PSW_SM_E | PSW_O);
}
return val;
}
@@ -1372,7 +1377,7 @@ static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb,
assert(ctx->null_cond.c == TCG_COND_NEVER);
form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
- ctx->mmu_idx == MMU_PHYS_IDX);
+ MMU_DISABLED(ctx));
tcg_gen_qemu_ld_i32(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
if (modify) {
save_gpr(ctx, rb, ofs);
@@ -1390,7 +1395,7 @@ static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb,
assert(ctx->null_cond.c == TCG_COND_NEVER);
form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
- ctx->mmu_idx == MMU_PHYS_IDX);
+ MMU_DISABLED(ctx));
tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
if (modify) {
save_gpr(ctx, rb, ofs);
@@ -1408,7 +1413,7 @@ static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb,
assert(ctx->null_cond.c == TCG_COND_NEVER);
form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
- ctx->mmu_idx == MMU_PHYS_IDX);
+ MMU_DISABLED(ctx));
tcg_gen_qemu_st_i32(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
if (modify) {
save_gpr(ctx, rb, ofs);
@@ -1426,7 +1431,7 @@ static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb,
assert(ctx->null_cond.c == TCG_COND_NEVER);
form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
- ctx->mmu_idx == MMU_PHYS_IDX);
+ MMU_DISABLED(ctx));
tcg_gen_qemu_st_i64(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
if (modify) {
save_gpr(ctx, rb, ofs);
@@ -2294,7 +2299,7 @@ static bool trans_probe(DisasContext *ctx, arg_probe *a)
form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
if (a->imm) {
- level = tcg_constant_i32(a->ri);
+ level = tcg_constant_i32(a->ri & 3);
} else {
level = tcg_temp_new_i32();
tcg_gen_extrl_i64_i32(level, load_gpr(ctx, a->ri));
@@ -3075,7 +3080,7 @@ static bool trans_ldc(DisasContext *ctx, arg_ldst *a)
}
form_gva(ctx, &addr, &ofs, a->b, a->x, a->scale ? a->size : 0,
- a->disp, a->sp, a->m, ctx->mmu_idx == MMU_PHYS_IDX);
+ a->disp, a->sp, a->m, MMU_DISABLED(ctx));
/*
* For hppa1.1, LDCW is undefined unless aligned mod 16.
@@ -3105,7 +3110,7 @@ static bool trans_stby(DisasContext *ctx, arg_stby *a)
nullify_over(ctx);
form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m,
- ctx->mmu_idx == MMU_PHYS_IDX);
+ MMU_DISABLED(ctx));
val = load_gpr(ctx, a->r);
if (a->a) {
if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
@@ -3139,7 +3144,7 @@ static bool trans_stdby(DisasContext *ctx, arg_stby *a)
nullify_over(ctx);
form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m,
- ctx->mmu_idx == MMU_PHYS_IDX);
+ MMU_DISABLED(ctx));
val = load_gpr(ctx, a->r);
if (a->a) {
if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
@@ -3167,7 +3172,7 @@ static bool trans_lda(DisasContext *ctx, arg_ldst *a)
int hold_mmu_idx = ctx->mmu_idx;
CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
- ctx->mmu_idx = MMU_PHYS_IDX;
+ ctx->mmu_idx = ctx->tb_flags & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX;
trans_ld(ctx, a);
ctx->mmu_idx = hold_mmu_idx;
return true;
@@ -3178,7 +3183,7 @@ static bool trans_sta(DisasContext *ctx, arg_ldst *a)
int hold_mmu_idx = ctx->mmu_idx;
CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
- ctx->mmu_idx = MMU_PHYS_IDX;
+ ctx->mmu_idx = ctx->tb_flags & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX;
trans_st(ctx, a);
ctx->mmu_idx = hold_mmu_idx;
return true;
@@ -4430,7 +4435,7 @@ static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
ctx->privilege = (ctx->tb_flags >> TB_FLAG_PRIV_SHIFT) & 3;
ctx->mmu_idx = (ctx->tb_flags & PSW_D
? PRIV_P_TO_MMU_IDX(ctx->privilege, ctx->tb_flags & PSW_P)
- : MMU_PHYS_IDX);
+ : ctx->tb_flags & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX);
/* Recover the IAOQ values from the GVA + PRIV. */
uint64_t cs_base = ctx->base.tb->cs_base;
diff --git a/tests/avocado/machine_s390_ccw_virtio.py b/tests/avocado/machine_s390_ccw_virtio.py
index e1f493bc44..ffd914ded9 100644
--- a/tests/avocado/machine_s390_ccw_virtio.py
+++ b/tests/avocado/machine_s390_ccw_virtio.py
@@ -36,8 +36,8 @@ class S390CCWVirtioMachine(QemuSystemTest):
dmesg_clear_count = 1
def clear_guest_dmesg(self):
exec_command_and_wait_for_pattern(self, 'dmesg -c > /dev/null; '
- 'echo dm_clear\ ' + str(self.dmesg_clear_count),
- 'dm_clear ' + str(self.dmesg_clear_count))
+ r'echo dm_clear\ ' + str(self.dmesg_clear_count),
+ r'dm_clear ' + str(self.dmesg_clear_count))
self.dmesg_clear_count += 1
def test_s390x_devices(self):
@@ -121,15 +121,15 @@ class S390CCWVirtioMachine(QemuSystemTest):
'cat /sys/bus/ccw/devices/0.1.1111/cutype',
'3832/01')
exec_command_and_wait_for_pattern(self,
- 'cat /sys/bus/pci/devices/0005\:00\:00.0/subsystem_vendor',
- '0x1af4')
+ r'cat /sys/bus/pci/devices/0005\:00\:00.0/subsystem_vendor',
+ r'0x1af4')
exec_command_and_wait_for_pattern(self,
- 'cat /sys/bus/pci/devices/0005\:00\:00.0/subsystem_device',
- '0x0001')
+ r'cat /sys/bus/pci/devices/0005\:00\:00.0/subsystem_device',
+ r'0x0001')
# check fid propagation
exec_command_and_wait_for_pattern(self,
- 'cat /sys/bus/pci/devices/000a\:00\:00.0/function_id',
- '0x0000000c')
+ r'cat /sys/bus/pci/devices/000a\:00\:00.0/function_id',
+ r'0x0000000c')
# add another device
self.clear_guest_dmesg()
self.vm.cmd('device_add', driver='virtio-net-ccw',
@@ -235,7 +235,7 @@ class S390CCWVirtioMachine(QemuSystemTest):
'while ! (dmesg | grep gpudrmfb) ; do sleep 1 ; done',
'virtio_gpudrmfb frame buffer device')
exec_command_and_wait_for_pattern(self,
- 'echo -e "\e[?25l" > /dev/tty0', ':/#')
+ r'echo -e "\e[?25l" > /dev/tty0', ':/#')
exec_command_and_wait_for_pattern(self, 'for ((i=0;i<250;i++)); do '
'echo " The qu ick fo x j ump s o ver a laz y d og" >> fox.txt;'
'done',
diff --git a/tests/avocado/mem-addr-space-check.py b/tests/avocado/mem-addr-space-check.py
new file mode 100644
index 0000000000..be949222a4
--- /dev/null
+++ b/tests/avocado/mem-addr-space-check.py
@@ -0,0 +1,356 @@
+# Check for crash when using memory beyond the available guest processor
+# address space.
+#
+# Copyright (c) 2023 Red Hat, Inc.
+#
+# Author:
+# Ani Sinha <anisinha@redhat.com>
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+from avocado_qemu import QemuSystemTest
+import signal
+import time
+
+class MemAddrCheck(QemuSystemTest):
+ # after launch, in order to generate the logs from QEMU we need to
+ # wait for some time. Launching and then immediately shutting down
+ # the VM generates empty logs. A delay of 1 second is added for
+ # this reason.
+ DELAY_Q35_BOOT_SEQUENCE = 1
+
+ # first, lets test some 32-bit processors.
+ # for all 32-bit cases, pci64_hole_size is 0.
+ def test_phybits_low_pse36(self):
+ """
+ :avocado: tags=machine:q35
+ :avocado: tags=arch:x86_64
+
+ With pse36 feature ON, a processor has 36 bits of addressing. So it can
+ access up to a maximum of 64GiB of memory. Memory hotplug region begins
+ at 4 GiB boundary when "above_4g_mem_size" is 0 (this would be true when
+ we have 0.5 GiB of VM memory, see pc_q35_init()). This means total
+ hotpluggable memory size is 60 GiB. Per slot, we reserve 1 GiB of memory
+ for dimm alignment for all newer machines (see enforce_aligned_dimm
+ property for pc machines and pc_get_device_memory_range()). That leaves
+ total hotpluggable actual memory size of 59 GiB. If the VM is started
+ with 0.5 GiB of memory, maxmem should be set to a maximum value of
+ 59.5 GiB to ensure that the processor can address all memory directly.
+ Note that 64-bit pci hole size is 0 in this case. If maxmem is set to
+ 59.6G, QEMU should fail to start with a message "phy-bits are too low".
+ If maxmem is set to 59.5G with all other QEMU parameters identical, QEMU
+ should start fine.
+ """
+ self.vm.add_args('-S', '-machine', 'q35', '-m',
+ '512,slots=1,maxmem=59.6G',
+ '-cpu', 'pentium,pse36=on', '-display', 'none',
+ '-object', 'memory-backend-ram,id=mem1,size=1G',
+ '-device', 'pc-dimm,id=vm0,memdev=mem1')
+ self.vm.set_qmp_monitor(enabled=False)
+ self.vm.launch()
+ self.vm.wait()
+ self.assertEquals(self.vm.exitcode(), 1, "QEMU exit code should be 1")
+ self.assertRegex(self.vm.get_log(), r'phys-bits too low')
+
+ def test_phybits_low_pae(self):
+ """
+ :avocado: tags=machine:q35
+ :avocado: tags=arch:x86_64
+
+ With pae feature ON, a processor has 36 bits of addressing. So it can
+ access up to a maximum of 64GiB of memory. Rest is the same as the case
+ with pse36 above.
+ """
+ self.vm.add_args('-S', '-machine', 'q35', '-m',
+ '512,slots=1,maxmem=59.6G',
+ '-cpu', 'pentium,pae=on', '-display', 'none',
+ '-object', 'memory-backend-ram,id=mem1,size=1G',
+ '-device', 'pc-dimm,id=vm0,memdev=mem1')
+ self.vm.set_qmp_monitor(enabled=False)
+ self.vm.launch()
+ self.vm.wait()
+ self.assertEquals(self.vm.exitcode(), 1, "QEMU exit code should be 1")
+ self.assertRegex(self.vm.get_log(), r'phys-bits too low')
+
+ def test_phybits_ok_pentium_pse36(self):
+ """
+ :avocado: tags=machine:q35
+ :avocado: tags=arch:x86_64
+
+ Setting maxmem to 59.5G and making sure that QEMU can start with the
+ same options as the failing case above with pse36 cpu feature.
+ """
+ self.vm.add_args('-machine', 'q35', '-m',
+ '512,slots=1,maxmem=59.5G',
+ '-cpu', 'pentium,pse36=on', '-display', 'none',
+ '-object', 'memory-backend-ram,id=mem1,size=1G',
+ '-device', 'pc-dimm,id=vm0,memdev=mem1')
+ self.vm.set_qmp_monitor(enabled=False)
+ self.vm.launch()
+ time.sleep(self.DELAY_Q35_BOOT_SEQUENCE)
+ self.vm.shutdown()
+ self.assertNotRegex(self.vm.get_log(), r'phys-bits too low')
+
+ def test_phybits_ok_pentium_pae(self):
+ """
+ :avocado: tags=machine:q35
+ :avocado: tags=arch:x86_64
+
+ Test is same as above but now with pae cpu feature turned on.
+ Setting maxmem to 59.5G and making sure that QEMU can start fine
+ with the same options as the case above.
+ """
+ self.vm.add_args('-machine', 'q35', '-m',
+ '512,slots=1,maxmem=59.5G',
+ '-cpu', 'pentium,pae=on', '-display', 'none',
+ '-object', 'memory-backend-ram,id=mem1,size=1G',
+ '-device', 'pc-dimm,id=vm0,memdev=mem1')
+ self.vm.set_qmp_monitor(enabled=False)
+ self.vm.launch()
+ time.sleep(self.DELAY_Q35_BOOT_SEQUENCE)
+ self.vm.shutdown()
+ self.assertNotRegex(self.vm.get_log(), r'phys-bits too low')
+
+ def test_phybits_ok_pentium2(self):
+ """
+ :avocado: tags=machine:q35
+ :avocado: tags=arch:x86_64
+
+ Pentium2 has 36 bits of addressing, so its same as pentium
+ with pse36 ON.
+ """
+ self.vm.add_args('-machine', 'q35', '-m',
+ '512,slots=1,maxmem=59.5G',
+ '-cpu', 'pentium2', '-display', 'none',
+ '-object', 'memory-backend-ram,id=mem1,size=1G',
+ '-device', 'pc-dimm,id=vm0,memdev=mem1')
+ self.vm.set_qmp_monitor(enabled=False)
+ self.vm.launch()
+ time.sleep(self.DELAY_Q35_BOOT_SEQUENCE)
+ self.vm.shutdown()
+ self.assertNotRegex(self.vm.get_log(), r'phys-bits too low')
+
+ def test_phybits_low_nonpse36(self):
+ """
+ :avocado: tags=machine:q35
+ :avocado: tags=arch:x86_64
+
+ Pentium processor has 32 bits of addressing without pse36 or pae
+ so it can access physical address up to 4 GiB. Setting maxmem to
+ 4 GiB should make QEMU fail to start with "phys-bits too low"
+ message because the region for memory hotplug is always placed
+ above 4 GiB due to the PCI hole and simplicity.
+ """
+ self.vm.add_args('-S', '-machine', 'q35', '-m',
+ '512,slots=1,maxmem=4G',
+ '-cpu', 'pentium', '-display', 'none',
+ '-object', 'memory-backend-ram,id=mem1,size=1G',
+ '-device', 'pc-dimm,id=vm0,memdev=mem1')
+ self.vm.set_qmp_monitor(enabled=False)
+ self.vm.launch()
+ self.vm.wait()
+ self.assertEquals(self.vm.exitcode(), 1, "QEMU exit code should be 1")
+ self.assertRegex(self.vm.get_log(), r'phys-bits too low')
+
+ # now lets test some 64-bit CPU cases.
+ def test_phybits_low_tcg_q35_70_amd(self):
+ """
+ :avocado: tags=machine:q35
+ :avocado: tags=arch:x86_64
+
+ For q35 7.1 machines and above, there is a HT window that starts at
+ 1024 GiB and ends at 1 TiB - 1. If the max GPA falls in this range,
+ "above_4G" memory is adjusted to start at 1 TiB boundary for AMD cpus
+ in the default case. Lets test without that case for machines 7.0.
+ For q35-7.0 machines, "above 4G" memory starts are 4G.
+ pci64_hole size is 32 GiB. Since TCG_PHYS_ADDR_BITS is defined to
+ be 40, TCG emulated CPUs have maximum of 1 TiB (1024 GiB) of
+ directly addressible memory.
+ Hence, maxmem value at most can be
+ 1024 GiB - 4 GiB - 1 GiB per slot for alignment - 32 GiB + 0.5 GiB
+ which is equal to 987.5 GiB. Setting the value to 988 GiB should
+ make QEMU fail with the error message.
+ """
+ self.vm.add_args('-S', '-machine', 'pc-q35-7.0', '-m',
+ '512,slots=1,maxmem=988G',
+ '-display', 'none',
+ '-object', 'memory-backend-ram,id=mem1,size=1G',
+ '-device', 'pc-dimm,id=vm0,memdev=mem1')
+ self.vm.set_qmp_monitor(enabled=False)
+ self.vm.launch()
+ self.vm.wait()
+ self.assertEquals(self.vm.exitcode(), 1, "QEMU exit code should be 1")
+ self.assertRegex(self.vm.get_log(), r'phys-bits too low')
+
+ def test_phybits_low_tcg_q35_71_amd(self):
+ """
+ :avocado: tags=machine:q35
+ :avocado: tags=arch:x86_64
+
+ AMD_HT_START is defined to be at 1012 GiB. So for q35 machines
+ version > 7.0 and AMD cpus, instead of 1024 GiB limit for 40 bit
+ processor address space, it has to be 1012 GiB , that is 12 GiB
+ less than the case above in order to accomodate HT hole.
+ Make sure QEMU fails when maxmem size is 976 GiB (12 GiB less
+ than 988 GiB).
+ """
+ self.vm.add_args('-S', '-machine', 'pc-q35-7.1', '-m',
+ '512,slots=1,maxmem=976G',
+ '-display', 'none',
+ '-object', 'memory-backend-ram,id=mem1,size=1G',
+ '-device', 'pc-dimm,id=vm0,memdev=mem1')
+ self.vm.set_qmp_monitor(enabled=False)
+ self.vm.launch()
+ self.vm.wait()
+ self.assertEquals(self.vm.exitcode(), 1, "QEMU exit code should be 1")
+ self.assertRegex(self.vm.get_log(), r'phys-bits too low')
+
+ def test_phybits_ok_tcg_q35_70_amd(self):
+ """
+ :avocado: tags=machine:q35
+ :avocado: tags=arch:x86_64
+
+ Same as q35-7.0 AMD case except that here we check that QEMU can
+ successfully start when maxmem is < 988G.
+ """
+ self.vm.add_args('-S', '-machine', 'pc-q35-7.0', '-m',
+ '512,slots=1,maxmem=987.5G',
+ '-display', 'none',
+ '-object', 'memory-backend-ram,id=mem1,size=1G',
+ '-device', 'pc-dimm,id=vm0,memdev=mem1')
+ self.vm.set_qmp_monitor(enabled=False)
+ self.vm.launch()
+ time.sleep(self.DELAY_Q35_BOOT_SEQUENCE)
+ self.vm.shutdown()
+ self.assertNotRegex(self.vm.get_log(), r'phys-bits too low')
+
+ def test_phybits_ok_tcg_q35_71_amd(self):
+ """
+ :avocado: tags=machine:q35
+ :avocado: tags=arch:x86_64
+
+ Same as q35-7.1 AMD case except that here we check that QEMU can
+ successfully start when maxmem is < 976G.
+ """
+ self.vm.add_args('-S', '-machine', 'pc-q35-7.1', '-m',
+ '512,slots=1,maxmem=975.5G',
+ '-display', 'none',
+ '-object', 'memory-backend-ram,id=mem1,size=1G',
+ '-device', 'pc-dimm,id=vm0,memdev=mem1')
+ self.vm.set_qmp_monitor(enabled=False)
+ self.vm.launch()
+ time.sleep(self.DELAY_Q35_BOOT_SEQUENCE)
+ self.vm.shutdown()
+ self.assertNotRegex(self.vm.get_log(), r'phys-bits too low')
+
+ def test_phybits_ok_tcg_q35_71_intel(self):
+ """
+ :avocado: tags=machine:q35
+ :avocado: tags=arch:x86_64
+
+ Same parameters as test_phybits_low_tcg_q35_71_amd() but use
+ Intel cpu instead. QEMU should start fine in this case as
+ "above_4G" memory starts at 4G.
+ """
+ self.vm.add_args('-S', '-cpu', 'Skylake-Server',
+ '-machine', 'pc-q35-7.1', '-m',
+ '512,slots=1,maxmem=976G',
+ '-display', 'none',
+ '-object', 'memory-backend-ram,id=mem1,size=1G',
+ '-device', 'pc-dimm,id=vm0,memdev=mem1')
+ self.vm.set_qmp_monitor(enabled=False)
+ self.vm.launch()
+ time.sleep(self.DELAY_Q35_BOOT_SEQUENCE)
+ self.vm.shutdown()
+ self.assertNotRegex(self.vm.get_log(), r'phys-bits too low')
+
+ def test_phybits_low_tcg_q35_71_amd_41bits(self):
+ """
+ :avocado: tags=machine:q35
+ :avocado: tags=arch:x86_64
+
+ AMD processor with 41 bits. Max cpu hw address = 2 TiB.
+ By setting maxram above 1012 GiB - 32 GiB - 4 GiB = 976 GiB, we can
+ force "above_4G" memory to start at 1 TiB for q35-7.1 machines
+ (max GPA will be above AMD_HT_START which is defined as 1012 GiB).
+
+ With pci_64_hole size at 32 GiB, in this case, maxmem should be 991.5
+ GiB with 1 GiB per slot for alignment and 0.5 GiB as non-hotplug
+ memory for the VM (1024 - 32 - 1 + 0.5). With 992 GiB, QEMU should
+ fail to start.
+ """
+ self.vm.add_args('-S', '-cpu', 'EPYC-v4,phys-bits=41',
+ '-machine', 'pc-q35-7.1', '-m',
+ '512,slots=1,maxmem=992G',
+ '-display', 'none',
+ '-object', 'memory-backend-ram,id=mem1,size=1G',
+ '-device', 'pc-dimm,id=vm0,memdev=mem1')
+ self.vm.set_qmp_monitor(enabled=False)
+ self.vm.launch()
+ self.vm.wait()
+ self.assertEquals(self.vm.exitcode(), 1, "QEMU exit code should be 1")
+ self.assertRegex(self.vm.get_log(), r'phys-bits too low')
+
+ def test_phybits_ok_tcg_q35_71_amd_41bits(self):
+ """
+ :avocado: tags=machine:q35
+ :avocado: tags=arch:x86_64
+
+ AMD processor with 41 bits. Max cpu hw address = 2 TiB.
+ Same as above but by setting maxram beween 976 GiB and 992 Gib,
+ QEMU should start fine.
+ """
+ self.vm.add_args('-S', '-cpu', 'EPYC-v4,phys-bits=41',
+ '-machine', 'pc-q35-7.1', '-m',
+ '512,slots=1,maxmem=990G',
+ '-display', 'none',
+ '-object', 'memory-backend-ram,id=mem1,size=1G',
+ '-device', 'pc-dimm,id=vm0,memdev=mem1')
+ self.vm.set_qmp_monitor(enabled=False)
+ self.vm.launch()
+ time.sleep(self.DELAY_Q35_BOOT_SEQUENCE)
+ self.vm.shutdown()
+ self.assertNotRegex(self.vm.get_log(), r'phys-bits too low')
+
+ def test_phybits_low_tcg_q35_intel_cxl(self):
+ """
+ :avocado: tags=machine:q35
+ :avocado: tags=arch:x86_64
+
+ cxl memory window starts after memory device range. Here, we use 1 GiB
+ of cxl window memory. 4G_mem end aligns at 4G. pci64_hole is 32 GiB and
+ starts after the cxl memory window.
+ So maxmem here should be at most 986 GiB considering all memory boundary
+ alignment constraints with 40 bits (1 TiB) of processor physical bits.
+ """
+ self.vm.add_args('-S', '-cpu', 'Skylake-Server,phys-bits=40',
+ '-machine', 'q35,cxl=on', '-m',
+ '512,slots=1,maxmem=987G',
+ '-display', 'none',
+ '-device', 'pxb-cxl,bus_nr=12,bus=pcie.0,id=cxl.1',
+ '-M', 'cxl-fmw.0.targets.0=cxl.1,cxl-fmw.0.size=1G')
+ self.vm.set_qmp_monitor(enabled=False)
+ self.vm.launch()
+ self.vm.wait()
+ self.assertEquals(self.vm.exitcode(), 1, "QEMU exit code should be 1")
+ self.assertRegex(self.vm.get_log(), r'phys-bits too low')
+
+ def test_phybits_ok_tcg_q35_intel_cxl(self):
+ """
+ :avocado: tags=machine:q35
+ :avocado: tags=arch:x86_64
+
+ Same as above but here we do not reserve any cxl memory window. Hence,
+ with the exact same parameters as above, QEMU should start fine even
+ with cxl enabled.
+ """
+ self.vm.add_args('-S', '-cpu', 'Skylake-Server,phys-bits=40',
+ '-machine', 'q35,cxl=on', '-m',
+ '512,slots=1,maxmem=987G',
+ '-display', 'none',
+ '-device', 'pxb-cxl,bus_nr=12,bus=pcie.0,id=cxl.1')
+ self.vm.set_qmp_monitor(enabled=False)
+ self.vm.launch()
+ time.sleep(self.DELAY_Q35_BOOT_SEQUENCE)
+ self.vm.shutdown()
+ self.assertNotRegex(self.vm.get_log(), r'phys-bits too low')