aboutsummaryrefslogtreecommitdiff
path: root/hw
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2018-06-22 16:03:31 +0100
committerPeter Maydell <peter.maydell@linaro.org>2018-06-22 16:03:31 +0100
commit5fce31220003bbe1b4c7faa0dbf92d131b0a413b (patch)
tree2a7aff159e9ce1f120b691a17d8a9f8a25ab1441 /hw
parentc52e53f429aa562539f5da2e7c21c66c6f9a8a16 (diff)
parent6dad8260e82b69bd278685ee25209f5824360455 (diff)
Merge remote-tracking branch 'remotes/pmaydell/tags/pull-target-arm-20180622' into staging
target-arm queue: * hw/intc/arm_gicv3: fix wrong values when reading IPRIORITYR * target/arm: fix read of freed memory in kvm_arm_machine_init_done() * virt: support up to 512 CPUs * virt: support 256MB ECAM PCI region (for more PCI devices) * xlnx-zynqmp: Use Cortex-R5F, not Cortex-R5 * mps2-tz: Implement and use the TrustZone Memory Protection Controller * target/arm: enforce alignment checking for v6M cores * xen: Don't use memory_region_init_ram_nomigrate() in pci_assign_dev_load_option_rom() * vl.c: Don't zero-initialize statics for serial_hds # gpg: Signature made Fri 22 Jun 2018 13:56:00 BST # gpg: using RSA key 3C2525ED14360CDE # gpg: Good signature from "Peter Maydell <peter.maydell@linaro.org>" # gpg: aka "Peter Maydell <pmaydell@gmail.com>" # gpg: aka "Peter Maydell <pmaydell@chiark.greenend.org.uk>" # Primary key fingerprint: E1A5 C593 CD41 9DE2 8E83 15CF 3C25 25ED 1436 0CDE * remotes/pmaydell/tags/pull-target-arm-20180622: (28 commits) xen: Don't use memory_region_init_ram_nomigrate() in pci_assign_dev_load_option_rom() vl.c: Don't zero-initialize statics for serial_hds target/arm: Strict alignment for ARMv6-M and ARMv8-M Baseline target/arm: Introduce ARM_FEATURE_M_MAIN hw/arm/mps2-tz.c: Instantiate MPCs hw/arm/iotkit: Wire up MPC interrupt lines hw/arm/iotkit: Instantiate MPC hw/misc/iotkit-secctl.c: Implement SECMPCINTSTATUS hw/misc/tz_mpc.c: Honour the BLK_LUT settings in translate hw/misc/tz-mpc.c: Implement correct blocked-access behaviour hw/misc/tz-mpc.c: Implement registers hw/misc/tz-mpc.c: Implement the Arm TrustZone Memory Protection Controller xlnx-zynqmp: Swap Cortex-R5 for Cortex-R5F target-arm: Add the Cortex-R5F hw/arm/virt: Increase max_cpus to 512 hw/arm/virt: Use 256MB ECAM region by default hw/arm/virt: Add virt-3.0 machine type hw/arm/virt: Add a new 256MB ECAM region hw/arm/virt: Register two redistributor regions when necessary hw/arm/virt-acpi-build: Advertise one or two GICR structures ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'hw')
-rw-r--r--hw/arm/iotkit.c112
-rw-r--r--hw/arm/mps2-tz.c71
-rw-r--r--hw/arm/virt-acpi-build.c30
-rw-r--r--hw/arm/virt.c100
-rw-r--r--hw/arm/xlnx-zcu102.c2
-rw-r--r--hw/arm/xlnx-zynqmp.c2
-rw-r--r--hw/intc/arm_gic_kvm.c4
-rw-r--r--hw/intc/arm_gicv3.c12
-rw-r--r--hw/intc/arm_gicv3_common.c38
-rw-r--r--hw/intc/arm_gicv3_dist.c3
-rw-r--r--hw/intc/arm_gicv3_its_kvm.c2
-rw-r--r--hw/intc/arm_gicv3_kvm.c44
-rw-r--r--hw/intc/arm_gicv3_redist.c3
-rw-r--r--hw/misc/Makefile.objs1
-rw-r--r--hw/misc/iotkit-secctl.c38
-rw-r--r--hw/misc/trace-events8
-rw-r--r--hw/misc/tz-mpc.c628
-rw-r--r--hw/xen/xen_pt.h2
-rw-r--r--hw/xen/xen_pt_graphics.c2
-rw-r--r--hw/xen/xen_pt_load_rom.c6
20 files changed, 1023 insertions, 85 deletions
diff --git a/hw/arm/iotkit.c b/hw/arm/iotkit.c
index 234185e8f7..133d5bb34f 100644
--- a/hw/arm/iotkit.c
+++ b/hw/arm/iotkit.c
@@ -130,6 +130,19 @@ static void iotkit_init(Object *obj)
TYPE_TZ_PPC);
init_sysbus_child(obj, "apb-ppc1", &s->apb_ppc1, sizeof(s->apb_ppc1),
TYPE_TZ_PPC);
+ init_sysbus_child(obj, "mpc", &s->mpc, sizeof(s->mpc), TYPE_TZ_MPC);
+ object_initialize(&s->mpc_irq_orgate, sizeof(s->mpc_irq_orgate),
+ TYPE_OR_IRQ);
+ object_property_add_child(obj, "mpc-irq-orgate",
+ OBJECT(&s->mpc_irq_orgate), &error_abort);
+ for (i = 0; i < ARRAY_SIZE(s->mpc_irq_splitter); i++) {
+ char *name = g_strdup_printf("mpc-irq-splitter-%d", i);
+ SplitIRQ *splitter = &s->mpc_irq_splitter[i];
+
+ object_initialize(splitter, sizeof(*splitter), TYPE_SPLIT_IRQ);
+ object_property_add_child(obj, name, OBJECT(splitter), &error_abort);
+ g_free(name);
+ }
init_sysbus_child(obj, "timer0", &s->timer0, sizeof(s->timer0),
TYPE_CMSDK_APB_TIMER);
init_sysbus_child(obj, "timer1", &s->timer1, sizeof(s->timer1),
@@ -162,6 +175,12 @@ static void iotkit_exp_irq(void *opaque, int n, int level)
qemu_set_irq(s->exp_irqs[n], level);
}
+static void iotkit_mpcexp_status(void *opaque, int n, int level)
+{
+ IoTKit *s = IOTKIT(opaque);
+ qemu_set_irq(s->mpcexp_status_in[n], level);
+}
+
static void iotkit_realize(DeviceState *dev, Error **errp)
{
IoTKit *s = IOTKIT(dev);
@@ -266,15 +285,6 @@ static void iotkit_realize(DeviceState *dev, Error **errp)
*/
make_alias(s, &s->alias3, "alias 3", 0x50000000, 0x10000000, 0x40000000);
- /* This RAM should be behind a Memory Protection Controller, but we
- * don't implement that yet.
- */
- memory_region_init_ram(&s->sram0, NULL, "iotkit.sram0", 0x00008000, &err);
- if (err) {
- error_propagate(errp, err);
- return;
- }
- memory_region_add_subregion(&s->container, 0x20000000, &s->sram0);
/* Security controller */
object_property_set_bool(OBJECT(&s->secctl), true, "realized", &err);
@@ -310,6 +320,48 @@ static void iotkit_realize(DeviceState *dev, Error **errp)
qdev_connect_gpio_out_named(dev_secctl, "sec_resp_cfg", 0,
qdev_get_gpio_in(dev_splitter, 0));
+ /* This RAM lives behind the Memory Protection Controller */
+ memory_region_init_ram(&s->sram0, NULL, "iotkit.sram0", 0x00008000, &err);
+ if (err) {
+ error_propagate(errp, err);
+ return;
+ }
+ object_property_set_link(OBJECT(&s->mpc), OBJECT(&s->sram0),
+ "downstream", &err);
+ if (err) {
+ error_propagate(errp, err);
+ return;
+ }
+ object_property_set_bool(OBJECT(&s->mpc), true, "realized", &err);
+ if (err) {
+ error_propagate(errp, err);
+ return;
+ }
+ /* Map the upstream end of the MPC into the right place... */
+ memory_region_add_subregion(&s->container, 0x20000000,
+ sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->mpc),
+ 1));
+ /* ...and its register interface */
+ memory_region_add_subregion(&s->container, 0x50083000,
+ sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->mpc),
+ 0));
+
+ /* We must OR together lines from the MPC splitters to go to the NVIC */
+ object_property_set_int(OBJECT(&s->mpc_irq_orgate),
+ IOTS_NUM_EXP_MPC + IOTS_NUM_MPC, "num-lines", &err);
+ if (err) {
+ error_propagate(errp, err);
+ return;
+ }
+ object_property_set_bool(OBJECT(&s->mpc_irq_orgate), true,
+ "realized", &err);
+ if (err) {
+ error_propagate(errp, err);
+ return;
+ }
+ qdev_connect_gpio_out(DEVICE(&s->mpc_irq_orgate), 0,
+ qdev_get_gpio_in(DEVICE(&s->armv7m), 9));
+
/* Devices behind APB PPC0:
* 0x40000000: timer0
* 0x40001000: timer1
@@ -473,8 +525,6 @@ static void iotkit_realize(DeviceState *dev, Error **errp)
create_unimplemented_device("NS watchdog", 0x40081000, 0x1000);
create_unimplemented_device("S watchdog", 0x50081000, 0x1000);
- create_unimplemented_device("SRAM0 MPC", 0x50083000, 0x1000);
-
for (i = 0; i < ARRAY_SIZE(s->ppc_irq_splitter); i++) {
Object *splitter = OBJECT(&s->ppc_irq_splitter[i]);
@@ -520,6 +570,46 @@ static void iotkit_realize(DeviceState *dev, Error **errp)
g_free(gpioname);
}
+ /* Wire up the splitters for the MPC IRQs */
+ for (i = 0; i < IOTS_NUM_EXP_MPC + IOTS_NUM_MPC; i++) {
+ SplitIRQ *splitter = &s->mpc_irq_splitter[i];
+ DeviceState *dev_splitter = DEVICE(splitter);
+
+ object_property_set_int(OBJECT(splitter), 2, "num-lines", &err);
+ if (err) {
+ error_propagate(errp, err);
+ return;
+ }
+ object_property_set_bool(OBJECT(splitter), true, "realized", &err);
+ if (err) {
+ error_propagate(errp, err);
+ return;
+ }
+
+ if (i < IOTS_NUM_EXP_MPC) {
+ /* Splitter input is from GPIO input line */
+ s->mpcexp_status_in[i] = qdev_get_gpio_in(dev_splitter, 0);
+ qdev_connect_gpio_out(dev_splitter, 0,
+ qdev_get_gpio_in_named(dev_secctl,
+ "mpcexp_status", i));
+ } else {
+ /* Splitter input is from our own MPC */
+ qdev_connect_gpio_out_named(DEVICE(&s->mpc), "irq", 0,
+ qdev_get_gpio_in(dev_splitter, 0));
+ qdev_connect_gpio_out(dev_splitter, 0,
+ qdev_get_gpio_in_named(dev_secctl,
+ "mpc_status", 0));
+ }
+
+ qdev_connect_gpio_out(dev_splitter, 1,
+ qdev_get_gpio_in(DEVICE(&s->mpc_irq_orgate), i));
+ }
+ /* Create GPIO inputs which will pass the line state for our
+ * mpcexp_irq inputs to the correct splitter devices.
+ */
+ qdev_init_gpio_in_named(dev, iotkit_mpcexp_status, "mpcexp_status",
+ IOTS_NUM_EXP_MPC);
+
iotkit_forward_sec_resp_cfg(s);
system_clock_scale = NANOSECONDS_PER_SECOND / s->mainclk_frq;
diff --git a/hw/arm/mps2-tz.c b/hw/arm/mps2-tz.c
index c5ef95e4cc..22180c56fb 100644
--- a/hw/arm/mps2-tz.c
+++ b/hw/arm/mps2-tz.c
@@ -44,6 +44,7 @@
#include "hw/timer/cmsdk-apb-timer.h"
#include "hw/misc/mps2-scc.h"
#include "hw/misc/mps2-fpgaio.h"
+#include "hw/misc/tz-mpc.h"
#include "hw/arm/iotkit.h"
#include "hw/devices.h"
#include "net/net.h"
@@ -64,13 +65,12 @@ typedef struct {
IoTKit iotkit;
MemoryRegion psram;
- MemoryRegion ssram1;
+ MemoryRegion ssram[3];
MemoryRegion ssram1_m;
- MemoryRegion ssram23;
MPS2SCC scc;
MPS2FPGAIO fpgaio;
TZPPC ppc[5];
- UnimplementedDeviceState ssram_mpc[3];
+ TZMPC ssram_mpc[3];
UnimplementedDeviceState spi[5];
UnimplementedDeviceState i2c[4];
UnimplementedDeviceState i2s_audio;
@@ -96,16 +96,6 @@ typedef struct {
/* Main SYSCLK frequency in Hz */
#define SYSCLK_FRQ 20000000
-/* Initialize the auxiliary RAM region @mr and map it into
- * the memory map at @base.
- */
-static void make_ram(MemoryRegion *mr, const char *name,
- hwaddr base, hwaddr size)
-{
- memory_region_init_ram(mr, NULL, name, size, &error_fatal);
- memory_region_add_subregion(get_system_memory(), base, mr);
-}
-
/* Create an alias of an entire original MemoryRegion @orig
* located at @base in the memory map.
*/
@@ -245,6 +235,44 @@ static MemoryRegion *make_eth_dev(MPS2TZMachineState *mms, void *opaque,
return sysbus_mmio_get_region(s, 0);
}
+static MemoryRegion *make_mpc(MPS2TZMachineState *mms, void *opaque,
+ const char *name, hwaddr size)
+{
+ TZMPC *mpc = opaque;
+ int i = mpc - &mms->ssram_mpc[0];
+ MemoryRegion *ssram = &mms->ssram[i];
+ MemoryRegion *upstream;
+ char *mpcname = g_strdup_printf("%s-mpc", name);
+ static uint32_t ramsize[] = { 0x00400000, 0x00200000, 0x00200000 };
+ static uint32_t rambase[] = { 0x00000000, 0x28000000, 0x28200000 };
+
+ memory_region_init_ram(ssram, NULL, name, ramsize[i], &error_fatal);
+
+ init_sysbus_child(OBJECT(mms), mpcname, mpc,
+ sizeof(mms->ssram_mpc[0]), TYPE_TZ_MPC);
+ object_property_set_link(OBJECT(mpc), OBJECT(ssram),
+ "downstream", &error_fatal);
+ object_property_set_bool(OBJECT(mpc), true, "realized", &error_fatal);
+ /* Map the upstream end of the MPC into system memory */
+ upstream = sysbus_mmio_get_region(SYS_BUS_DEVICE(mpc), 1);
+ memory_region_add_subregion(get_system_memory(), rambase[i], upstream);
+ /* and connect its interrupt to the IoTKit */
+ qdev_connect_gpio_out_named(DEVICE(mpc), "irq", 0,
+ qdev_get_gpio_in_named(DEVICE(&mms->iotkit),
+ "mpcexp_status", i));
+
+ /* The first SSRAM is a special case as it has an alias; accesses to
+ * the alias region at 0x00400000 must also go to the MPC upstream.
+ */
+ if (i == 0) {
+ make_ram_alias(&mms->ssram1_m, "mps.ssram1_m", upstream, 0x00400000);
+ }
+
+ g_free(mpcname);
+ /* Return the register interface MR for our caller to map behind the PPC */
+ return sysbus_mmio_get_region(SYS_BUS_DEVICE(mpc), 0);
+}
+
static void mps2tz_common_init(MachineState *machine)
{
MPS2TZMachineState *mms = MPS2TZ_MACHINE(machine);
@@ -306,14 +334,6 @@ static void mps2tz_common_init(MachineState *machine)
NULL, "mps.ram", 0x01000000);
memory_region_add_subregion(system_memory, 0x80000000, &mms->psram);
- /* The SSRAM memories should all be behind Memory Protection Controllers,
- * but we don't implement that yet.
- */
- make_ram(&mms->ssram1, "mps.ssram1", 0x00000000, 0x00400000);
- make_ram_alias(&mms->ssram1_m, "mps.ssram1_m", &mms->ssram1, 0x00400000);
-
- make_ram(&mms->ssram23, "mps.ssram23", 0x28000000, 0x00400000);
-
/* The overflow IRQs for all UARTs are ORed together.
* Tx, Rx and "combined" IRQs are sent to the NVIC separately.
* Create the OR gate for this.
@@ -343,12 +363,9 @@ static void mps2tz_common_init(MachineState *machine)
const PPCInfo ppcs[] = { {
.name = "apb_ppcexp0",
.ports = {
- { "ssram-mpc0", make_unimp_dev, &mms->ssram_mpc[0],
- 0x58007000, 0x1000 },
- { "ssram-mpc1", make_unimp_dev, &mms->ssram_mpc[1],
- 0x58008000, 0x1000 },
- { "ssram-mpc2", make_unimp_dev, &mms->ssram_mpc[2],
- 0x58009000, 0x1000 },
+ { "ssram-0", make_mpc, &mms->ssram_mpc[0], 0x58007000, 0x1000 },
+ { "ssram-1", make_mpc, &mms->ssram_mpc[1], 0x58008000, 0x1000 },
+ { "ssram-2", make_mpc, &mms->ssram_mpc[2], 0x58009000, 0x1000 },
},
}, {
.name = "apb_ppcexp1",
diff --git a/hw/arm/virt-acpi-build.c b/hw/arm/virt-acpi-build.c
index 74f5744e87..6ea47e2588 100644
--- a/hw/arm/virt-acpi-build.c
+++ b/hw/arm/virt-acpi-build.c
@@ -150,16 +150,17 @@ static void acpi_dsdt_add_virtio(Aml *scope,
}
static void acpi_dsdt_add_pci(Aml *scope, const MemMapEntry *memmap,
- uint32_t irq, bool use_highmem)
+ uint32_t irq, bool use_highmem, bool highmem_ecam)
{
+ int ecam_id = VIRT_ECAM_ID(highmem_ecam);
Aml *method, *crs, *ifctx, *UUID, *ifctx1, *elsectx, *buf;
int i, bus_no;
hwaddr base_mmio = memmap[VIRT_PCIE_MMIO].base;
hwaddr size_mmio = memmap[VIRT_PCIE_MMIO].size;
hwaddr base_pio = memmap[VIRT_PCIE_PIO].base;
hwaddr size_pio = memmap[VIRT_PCIE_PIO].size;
- hwaddr base_ecam = memmap[VIRT_PCIE_ECAM].base;
- hwaddr size_ecam = memmap[VIRT_PCIE_ECAM].size;
+ hwaddr base_ecam = memmap[ecam_id].base;
+ hwaddr size_ecam = memmap[ecam_id].size;
int nr_pcie_buses = size_ecam / PCIE_MMCFG_SIZE_MIN;
Aml *dev = aml_device("%s", "PCI0");
@@ -173,7 +174,7 @@ static void acpi_dsdt_add_pci(Aml *scope, const MemMapEntry *memmap,
aml_append(dev, aml_name_decl("_CCA", aml_int(1)));
/* Declare the PCI Routing Table. */
- Aml *rt_pkg = aml_package(nr_pcie_buses * PCI_NUM_PINS);
+ Aml *rt_pkg = aml_varpackage(nr_pcie_buses * PCI_NUM_PINS);
for (bus_no = 0; bus_no < nr_pcie_buses; bus_no++) {
for (i = 0; i < PCI_NUM_PINS; i++) {
int gsi = (i + bus_no) % PCI_NUM_PINS;
@@ -316,7 +317,10 @@ static void acpi_dsdt_add_pci(Aml *scope, const MemMapEntry *memmap,
Aml *dev_res0 = aml_device("%s", "RES0");
aml_append(dev_res0, aml_name_decl("_HID", aml_string("PNP0C02")));
crs = aml_resource_template();
- aml_append(crs, aml_memory32_fixed(base_ecam, size_ecam, AML_READ_WRITE));
+ aml_append(crs,
+ aml_qword_memory(AML_POS_DECODE, AML_MIN_FIXED, AML_MAX_FIXED,
+ AML_NON_CACHEABLE, AML_READ_WRITE, 0x0000, base_ecam,
+ base_ecam + size_ecam - 1, 0x0000, size_ecam));
aml_append(dev_res0, aml_name_decl("_CRS", crs));
aml_append(dev, dev_res0);
aml_append(scope, dev);
@@ -573,16 +577,17 @@ build_mcfg(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
{
AcpiTableMcfg *mcfg;
const MemMapEntry *memmap = vms->memmap;
+ int ecam_id = VIRT_ECAM_ID(vms->highmem_ecam);
int len = sizeof(*mcfg) + sizeof(mcfg->allocation[0]);
int mcfg_start = table_data->len;
mcfg = acpi_data_push(table_data, len);
- mcfg->allocation[0].address = cpu_to_le64(memmap[VIRT_PCIE_ECAM].base);
+ mcfg->allocation[0].address = cpu_to_le64(memmap[ecam_id].base);
/* Only a single allocation so no need to play with segments */
mcfg->allocation[0].pci_segment = cpu_to_le16(0);
mcfg->allocation[0].start_bus_number = 0;
- mcfg->allocation[0].end_bus_number = (memmap[VIRT_PCIE_ECAM].size
+ mcfg->allocation[0].end_bus_number = (memmap[ecam_id].size
/ PCIE_MMCFG_SIZE_MIN) - 1;
build_header(linker, table_data, (void *)(table_data->data + mcfg_start),
@@ -670,6 +675,7 @@ build_madt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
if (vms->gic_version == 3) {
AcpiMadtGenericTranslator *gic_its;
+ int nb_redist_regions = virt_gicv3_redist_region_count(vms);
AcpiMadtGenericRedistributor *gicr = acpi_data_push(table_data,
sizeof *gicr);
@@ -678,6 +684,14 @@ build_madt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
gicr->base_address = cpu_to_le64(memmap[VIRT_GIC_REDIST].base);
gicr->range_length = cpu_to_le32(memmap[VIRT_GIC_REDIST].size);
+ if (nb_redist_regions == 2) {
+ gicr = acpi_data_push(table_data, sizeof(*gicr));
+ gicr->type = ACPI_APIC_GENERIC_REDISTRIBUTOR;
+ gicr->length = sizeof(*gicr);
+ gicr->base_address = cpu_to_le64(memmap[VIRT_GIC_REDIST2].base);
+ gicr->range_length = cpu_to_le32(memmap[VIRT_GIC_REDIST2].size);
+ }
+
if (its_class_name() && !vmc->no_its) {
gic_its = acpi_data_push(table_data, sizeof *gic_its);
gic_its->type = ACPI_APIC_GENERIC_TRANSLATOR;
@@ -757,7 +771,7 @@ build_dsdt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
acpi_dsdt_add_virtio(scope, &memmap[VIRT_MMIO],
(irqmap[VIRT_MMIO] + ARM_SPI_BASE), NUM_VIRTIO_TRANSPORTS);
acpi_dsdt_add_pci(scope, memmap, (irqmap[VIRT_PCIE] + ARM_SPI_BASE),
- vms->highmem);
+ vms->highmem, vms->highmem_ecam);
acpi_dsdt_add_gpio(scope, &memmap[VIRT_GPIO],
(irqmap[VIRT_GPIO] + ARM_SPI_BASE));
acpi_dsdt_add_power_button(scope);
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
index 98b99cf236..742f68afca 100644
--- a/hw/arm/virt.c
+++ b/hw/arm/virt.c
@@ -149,6 +149,9 @@ static const MemMapEntry a15memmap[] = {
[VIRT_PCIE_PIO] = { 0x3eff0000, 0x00010000 },
[VIRT_PCIE_ECAM] = { 0x3f000000, 0x01000000 },
[VIRT_MEM] = { 0x40000000, RAMLIMIT_BYTES },
+ /* Additional 64 MB redist region (can contain up to 512 redistributors) */
+ [VIRT_GIC_REDIST2] = { 0x4000000000ULL, 0x4000000 },
+ [VIRT_PCIE_ECAM_HIGH] = { 0x4010000000ULL, 0x10000000 },
/* Second PCIe window, 512GB wide at the 512GB boundary */
[VIRT_PCIE_MMIO_HIGH] = { 0x8000000000ULL, 0x8000000000ULL },
};
@@ -402,13 +405,30 @@ static void fdt_add_gic_node(VirtMachineState *vms)
qemu_fdt_setprop_cell(vms->fdt, "/intc", "#size-cells", 0x2);
qemu_fdt_setprop(vms->fdt, "/intc", "ranges", NULL, 0);
if (vms->gic_version == 3) {
+ int nb_redist_regions = virt_gicv3_redist_region_count(vms);
+
qemu_fdt_setprop_string(vms->fdt, "/intc", "compatible",
"arm,gic-v3");
- qemu_fdt_setprop_sized_cells(vms->fdt, "/intc", "reg",
- 2, vms->memmap[VIRT_GIC_DIST].base,
- 2, vms->memmap[VIRT_GIC_DIST].size,
- 2, vms->memmap[VIRT_GIC_REDIST].base,
- 2, vms->memmap[VIRT_GIC_REDIST].size);
+
+ qemu_fdt_setprop_cell(vms->fdt, "/intc",
+ "#redistributor-regions", nb_redist_regions);
+
+ if (nb_redist_regions == 1) {
+ qemu_fdt_setprop_sized_cells(vms->fdt, "/intc", "reg",
+ 2, vms->memmap[VIRT_GIC_DIST].base,
+ 2, vms->memmap[VIRT_GIC_DIST].size,
+ 2, vms->memmap[VIRT_GIC_REDIST].base,
+ 2, vms->memmap[VIRT_GIC_REDIST].size);
+ } else {
+ qemu_fdt_setprop_sized_cells(vms->fdt, "/intc", "reg",
+ 2, vms->memmap[VIRT_GIC_DIST].base,
+ 2, vms->memmap[VIRT_GIC_DIST].size,
+ 2, vms->memmap[VIRT_GIC_REDIST].base,
+ 2, vms->memmap[VIRT_GIC_REDIST].size,
+ 2, vms->memmap[VIRT_GIC_REDIST2].base,
+ 2, vms->memmap[VIRT_GIC_REDIST2].size);
+ }
+
if (vms->virt) {
qemu_fdt_setprop_cells(vms->fdt, "/intc", "interrupts",
GIC_FDT_IRQ_TYPE_PPI, ARCH_GICV3_MAINT_IRQ,
@@ -510,6 +530,7 @@ static void create_gic(VirtMachineState *vms, qemu_irq *pic)
SysBusDevice *gicbusdev;
const char *gictype;
int type = vms->gic_version, i;
+ uint32_t nb_redist_regions = 0;
gictype = (type == 3) ? gicv3_class_name() : gic_class_name();
@@ -523,11 +544,34 @@ static void create_gic(VirtMachineState *vms, qemu_irq *pic)
if (!kvm_irqchip_in_kernel()) {
qdev_prop_set_bit(gicdev, "has-security-extensions", vms->secure);
}
+
+ if (type == 3) {
+ uint32_t redist0_capacity =
+ vms->memmap[VIRT_GIC_REDIST].size / GICV3_REDIST_SIZE;
+ uint32_t redist0_count = MIN(smp_cpus, redist0_capacity);
+
+ nb_redist_regions = virt_gicv3_redist_region_count(vms);
+
+ qdev_prop_set_uint32(gicdev, "len-redist-region-count",
+ nb_redist_regions);
+ qdev_prop_set_uint32(gicdev, "redist-region-count[0]", redist0_count);
+
+ if (nb_redist_regions == 2) {
+ uint32_t redist1_capacity =
+ vms->memmap[VIRT_GIC_REDIST2].size / GICV3_REDIST_SIZE;
+
+ qdev_prop_set_uint32(gicdev, "redist-region-count[1]",
+ MIN(smp_cpus - redist0_count, redist1_capacity));
+ }
+ }
qdev_init_nofail(gicdev);
gicbusdev = SYS_BUS_DEVICE(gicdev);
sysbus_mmio_map(gicbusdev, 0, vms->memmap[VIRT_GIC_DIST].base);
if (type == 3) {
sysbus_mmio_map(gicbusdev, 1, vms->memmap[VIRT_GIC_REDIST].base);
+ if (nb_redist_regions == 2) {
+ sysbus_mmio_map(gicbusdev, 2, vms->memmap[VIRT_GIC_REDIST2].base);
+ }
} else {
sysbus_mmio_map(gicbusdev, 1, vms->memmap[VIRT_GIC_CPU].base);
}
@@ -1001,10 +1045,9 @@ static void create_pcie(VirtMachineState *vms, qemu_irq *pic)
hwaddr size_mmio_high = vms->memmap[VIRT_PCIE_MMIO_HIGH].size;
hwaddr base_pio = vms->memmap[VIRT_PCIE_PIO].base;
hwaddr size_pio = vms->memmap[VIRT_PCIE_PIO].size;
- hwaddr base_ecam = vms->memmap[VIRT_PCIE_ECAM].base;
- hwaddr size_ecam = vms->memmap[VIRT_PCIE_ECAM].size;
+ hwaddr base_ecam, size_ecam;
hwaddr base = base_mmio;
- int nr_pcie_buses = size_ecam / PCIE_MMCFG_SIZE_MIN;
+ int nr_pcie_buses;
int irq = vms->irqmap[VIRT_PCIE];
MemoryRegion *mmio_alias;
MemoryRegion *mmio_reg;
@@ -1012,12 +1055,16 @@ static void create_pcie(VirtMachineState *vms, qemu_irq *pic)
MemoryRegion *ecam_reg;
DeviceState *dev;
char *nodename;
- int i;
+ int i, ecam_id;
PCIHostState *pci;
dev = qdev_create(NULL, TYPE_GPEX_HOST);
qdev_init_nofail(dev);
+ ecam_id = VIRT_ECAM_ID(vms->highmem_ecam);
+ base_ecam = vms->memmap[ecam_id].base;
+ size_ecam = vms->memmap[ecam_id].size;
+ nr_pcie_buses = size_ecam / PCIE_MMCFG_SIZE_MIN;
/* Map only the first size_ecam bytes of ECAM space */
ecam_alias = g_new0(MemoryRegion, 1);
ecam_reg = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 0);
@@ -1271,6 +1318,7 @@ static void machvirt_init(MachineState *machine)
int n, virt_max_cpus;
MemoryRegion *ram = g_new(MemoryRegion, 1);
bool firmware_loaded = bios_name || drive_get(IF_PFLASH, 0, 0);
+ bool aarch64 = true;
/* We can probe only here because during property set
* KVM is not available yet
@@ -1322,7 +1370,8 @@ static void machvirt_init(MachineState *machine)
* many redistributors we can fit into the memory map.
*/
if (vms->gic_version == 3) {
- virt_max_cpus = vms->memmap[VIRT_GIC_REDIST].size / 0x20000;
+ virt_max_cpus = vms->memmap[VIRT_GIC_REDIST].size / GICV3_REDIST_SIZE;
+ virt_max_cpus += vms->memmap[VIRT_GIC_REDIST2].size / GICV3_REDIST_SIZE;
} else {
virt_max_cpus = GIC_NCPU;
}
@@ -1385,6 +1434,8 @@ static void machvirt_init(MachineState *machine)
numa_cpu_pre_plug(&possible_cpus->cpus[cs->cpu_index], DEVICE(cpuobj),
&error_fatal);
+ aarch64 &= object_property_get_bool(cpuobj, "aarch64", NULL);
+
if (!vms->secure) {
object_property_set_bool(cpuobj, false, "has_el3", NULL);
}
@@ -1443,6 +1494,8 @@ static void machvirt_init(MachineState *machine)
create_uart(vms, pic, VIRT_SECURE_UART, secure_sysmem, serial_hd(1));
}
+ vms->highmem_ecam &= vms->highmem && (!firmware_loaded || aarch64);
+
create_rtc(vms, pic);
create_pcie(vms, pic);
@@ -1653,11 +1706,11 @@ static void virt_machine_class_init(ObjectClass *oc, void *data)
HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(oc);
mc->init = machvirt_init;
- /* Start max_cpus at the maximum QEMU supports. We'll further restrict
- * it later in machvirt_init, where we have more information about the
+ /* Start with max_cpus set to 512, which is the maximum supported by KVM.
+ * The value may be reduced later when we have more information about the
* configuration of the particular instance.
*/
- mc->max_cpus = 255;
+ mc->max_cpus = 512;
machine_class_allow_dynamic_sysbus_dev(mc, TYPE_VFIO_CALXEDA_XGMAC);
machine_class_allow_dynamic_sysbus_dev(mc, TYPE_VFIO_AMD_XGBE);
machine_class_allow_dynamic_sysbus_dev(mc, TYPE_RAMFB_DEVICE);
@@ -1697,7 +1750,7 @@ type_init(machvirt_machine_init);
#define VIRT_COMPAT_2_12 \
HW_COMPAT_2_12
-static void virt_2_12_instance_init(Object *obj)
+static void virt_3_0_instance_init(Object *obj)
{
VirtMachineState *vms = VIRT_MACHINE(obj);
VirtMachineClass *vmc = VIRT_MACHINE_GET_CLASS(vms);
@@ -1740,6 +1793,8 @@ static void virt_2_12_instance_init(Object *obj)
"Set GIC version. "
"Valid values are 2, 3 and host", NULL);
+ vms->highmem_ecam = !vmc->no_highmem_ecam;
+
if (vmc->no_its) {
vms->its = false;
} else {
@@ -1765,11 +1820,26 @@ static void virt_2_12_instance_init(Object *obj)
vms->irqmap = a15irqmap;
}
+static void virt_machine_3_0_options(MachineClass *mc)
+{
+}
+DEFINE_VIRT_MACHINE_AS_LATEST(3, 0)
+
+static void virt_2_12_instance_init(Object *obj)
+{
+ virt_3_0_instance_init(obj);
+}
+
static void virt_machine_2_12_options(MachineClass *mc)
{
+ VirtMachineClass *vmc = VIRT_MACHINE_CLASS(OBJECT_CLASS(mc));
+
+ virt_machine_3_0_options(mc);
SET_MACHINE_COMPAT(mc, VIRT_COMPAT_2_12);
+ vmc->no_highmem_ecam = true;
+ mc->max_cpus = 255;
}
-DEFINE_VIRT_MACHINE_AS_LATEST(2, 12)
+DEFINE_VIRT_MACHINE(2, 12)
#define VIRT_COMPAT_2_11 \
HW_COMPAT_2_11
diff --git a/hw/arm/xlnx-zcu102.c b/hw/arm/xlnx-zcu102.c
index f26fd8eb91..b6bc6a93b8 100644
--- a/hw/arm/xlnx-zcu102.c
+++ b/hw/arm/xlnx-zcu102.c
@@ -208,7 +208,7 @@ static void xlnx_zcu102_machine_class_init(ObjectClass *oc, void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
- mc->desc = "Xilinx ZynqMP ZCU102 board with 4xA53s and 2xR5s based on " \
+ mc->desc = "Xilinx ZynqMP ZCU102 board with 4xA53s and 2xR5Fs based on " \
"the value of smp";
mc->init = xlnx_zcu102_init;
mc->block_default_type = IF_IDE;
diff --git a/hw/arm/xlnx-zynqmp.c b/hw/arm/xlnx-zynqmp.c
index 2045b9d71e..29df35fb75 100644
--- a/hw/arm/xlnx-zynqmp.c
+++ b/hw/arm/xlnx-zynqmp.c
@@ -134,7 +134,7 @@ static void xlnx_zynqmp_create_rpu(XlnxZynqMPState *s, const char *boot_cpu,
char *name;
object_initialize(&s->rpu_cpu[i], sizeof(s->rpu_cpu[i]),
- "cortex-r5-" TYPE_ARM_CPU);
+ "cortex-r5f-" TYPE_ARM_CPU);
object_property_add_child(OBJECT(s), "rpu-cpu[*]",
OBJECT(&s->rpu_cpu[i]), &error_abort);
diff --git a/hw/intc/arm_gic_kvm.c b/hw/intc/arm_gic_kvm.c
index 204369d0e2..86665080bd 100644
--- a/hw/intc/arm_gic_kvm.c
+++ b/hw/intc/arm_gic_kvm.c
@@ -558,7 +558,7 @@ static void kvm_arm_gic_realize(DeviceState *dev, Error **errp)
| KVM_VGIC_V2_ADDR_TYPE_DIST,
KVM_DEV_ARM_VGIC_GRP_ADDR,
KVM_VGIC_V2_ADDR_TYPE_DIST,
- s->dev_fd);
+ s->dev_fd, 0);
/* CPU interface for current core. Unlike arm_gic, we don't
* provide the "interface for core #N" memory regions, because
* cores with a VGIC don't have those.
@@ -568,7 +568,7 @@ static void kvm_arm_gic_realize(DeviceState *dev, Error **errp)
| KVM_VGIC_V2_ADDR_TYPE_CPU,
KVM_DEV_ARM_VGIC_GRP_ADDR,
KVM_VGIC_V2_ADDR_TYPE_CPU,
- s->dev_fd);
+ s->dev_fd, 0);
if (kvm_has_gsi_routing()) {
/* set up irq routing */
diff --git a/hw/intc/arm_gicv3.c b/hw/intc/arm_gicv3.c
index 479c66733c..7044133e2d 100644
--- a/hw/intc/arm_gicv3.c
+++ b/hw/intc/arm_gicv3.c
@@ -373,7 +373,17 @@ static void arm_gic_realize(DeviceState *dev, Error **errp)
return;
}
- gicv3_init_irqs_and_mmio(s, gicv3_set_irq, gic_ops);
+ if (s->nb_redist_regions != 1) {
+ error_setg(errp, "VGICv3 redist region number(%d) not equal to 1",
+ s->nb_redist_regions);
+ return;
+ }
+
+ gicv3_init_irqs_and_mmio(s, gicv3_set_irq, gic_ops, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ return;
+ }
gicv3_init_cpuif(s);
}
diff --git a/hw/intc/arm_gicv3_common.c b/hw/intc/arm_gicv3_common.c
index 864b7c6515..ff326b374a 100644
--- a/hw/intc/arm_gicv3_common.c
+++ b/hw/intc/arm_gicv3_common.c
@@ -247,11 +247,22 @@ static const VMStateDescription vmstate_gicv3 = {
};
void gicv3_init_irqs_and_mmio(GICv3State *s, qemu_irq_handler handler,
- const MemoryRegionOps *ops)
+ const MemoryRegionOps *ops, Error **errp)
{
SysBusDevice *sbd = SYS_BUS_DEVICE(s);
+ int rdist_capacity = 0;
int i;
+ for (i = 0; i < s->nb_redist_regions; i++) {
+ rdist_capacity += s->redist_region_count[i];
+ }
+ if (rdist_capacity < s->num_cpu) {
+ error_setg(errp, "Capacity of the redist regions(%d) "
+ "is less than number of vcpus(%d)",
+ rdist_capacity, s->num_cpu);
+ return;
+ }
+
/* For the GIC, also expose incoming GPIO lines for PPIs for each CPU.
* GPIO array layout is thus:
* [0..N-1] spi
@@ -277,11 +288,18 @@ void gicv3_init_irqs_and_mmio(GICv3State *s, qemu_irq_handler handler,
memory_region_init_io(&s->iomem_dist, OBJECT(s), ops, s,
"gicv3_dist", 0x10000);
- memory_region_init_io(&s->iomem_redist, OBJECT(s), ops ? &ops[1] : NULL, s,
- "gicv3_redist", 0x20000 * s->num_cpu);
-
sysbus_init_mmio(sbd, &s->iomem_dist);
- sysbus_init_mmio(sbd, &s->iomem_redist);
+
+ s->iomem_redist = g_new0(MemoryRegion, s->nb_redist_regions);
+ for (i = 0; i < s->nb_redist_regions; i++) {
+ char *name = g_strdup_printf("gicv3_redist_region[%d]", i);
+
+ memory_region_init_io(&s->iomem_redist[i], OBJECT(s),
+ ops ? &ops[1] : NULL, s, name,
+ s->redist_region_count[i] * GICV3_REDIST_SIZE);
+ sysbus_init_mmio(sbd, &s->iomem_redist[i]);
+ g_free(name);
+ }
}
static void arm_gicv3_common_realize(DeviceState *dev, Error **errp)
@@ -363,6 +381,13 @@ static void arm_gicv3_common_realize(DeviceState *dev, Error **errp)
}
}
+static void arm_gicv3_finalize(Object *obj)
+{
+ GICv3State *s = ARM_GICV3_COMMON(obj);
+
+ g_free(s->redist_region_count);
+}
+
static void arm_gicv3_common_reset(DeviceState *dev)
{
GICv3State *s = ARM_GICV3_COMMON(dev);
@@ -467,6 +492,8 @@ static Property arm_gicv3_common_properties[] = {
DEFINE_PROP_UINT32("num-irq", GICv3State, num_irq, 32),
DEFINE_PROP_UINT32("revision", GICv3State, revision, 3),
DEFINE_PROP_BOOL("has-security-extensions", GICv3State, security_extn, 0),
+ DEFINE_PROP_ARRAY("redist-region-count", GICv3State, nb_redist_regions,
+ redist_region_count, qdev_prop_uint32, uint32_t),
DEFINE_PROP_END_OF_LIST(),
};
@@ -488,6 +515,7 @@ static const TypeInfo arm_gicv3_common_type = {
.instance_size = sizeof(GICv3State),
.class_size = sizeof(ARMGICv3CommonClass),
.class_init = arm_gicv3_common_class_init,
+ .instance_finalize = arm_gicv3_finalize,
.abstract = true,
.interfaces = (InterfaceInfo []) {
{ TYPE_ARM_LINUX_BOOT_IF },
diff --git a/hw/intc/arm_gicv3_dist.c b/hw/intc/arm_gicv3_dist.c
index 93fe936862..53c55c5729 100644
--- a/hw/intc/arm_gicv3_dist.c
+++ b/hw/intc/arm_gicv3_dist.c
@@ -441,7 +441,8 @@ static MemTxResult gicd_readl(GICv3State *s, hwaddr offset,
int i, irq = offset - GICD_IPRIORITYR;
uint32_t value = 0;
- for (i = irq + 3; i >= irq; i--, value <<= 8) {
+ for (i = irq + 3; i >= irq; i--) {
+ value <<= 8;
value |= gicd_read_ipriorityr(s, attrs, i);
}
*data = value;
diff --git a/hw/intc/arm_gicv3_its_kvm.c b/hw/intc/arm_gicv3_its_kvm.c
index eea6a73df2..271ebe461c 100644
--- a/hw/intc/arm_gicv3_its_kvm.c
+++ b/hw/intc/arm_gicv3_its_kvm.c
@@ -103,7 +103,7 @@ static void kvm_arm_its_realize(DeviceState *dev, Error **errp)
/* register the base address */
kvm_arm_register_device(&s->iomem_its_cntrl, -1, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_ITS_ADDR_TYPE, s->dev_fd);
+ KVM_VGIC_ITS_ADDR_TYPE, s->dev_fd, 0);
gicv3_its_init_mmio(s, NULL);
diff --git a/hw/intc/arm_gicv3_kvm.c b/hw/intc/arm_gicv3_kvm.c
index d8d3b25403..1e11200fe2 100644
--- a/hw/intc/arm_gicv3_kvm.c
+++ b/hw/intc/arm_gicv3_kvm.c
@@ -767,6 +767,7 @@ static void kvm_arm_gicv3_realize(DeviceState *dev, Error **errp)
{
GICv3State *s = KVM_ARM_GICV3(dev);
KVMARMGICv3Class *kgc = KVM_ARM_GICV3_GET_CLASS(s);
+ bool multiple_redist_region_allowed;
Error *local_err = NULL;
int i;
@@ -784,7 +785,11 @@ static void kvm_arm_gicv3_realize(DeviceState *dev, Error **errp)
return;
}
- gicv3_init_irqs_and_mmio(s, kvm_arm_gicv3_set_irq, NULL);
+ gicv3_init_irqs_and_mmio(s, kvm_arm_gicv3_set_irq, NULL, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ return;
+ }
for (i = 0; i < s->num_cpu; i++) {
ARMCPU *cpu = ARM_CPU(qemu_get_cpu(i));
@@ -799,6 +804,18 @@ static void kvm_arm_gicv3_realize(DeviceState *dev, Error **errp)
return;
}
+ multiple_redist_region_allowed =
+ kvm_device_check_attr(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION);
+
+ if (!multiple_redist_region_allowed && s->nb_redist_regions > 1) {
+ error_setg(errp, "Multiple VGICv3 redistributor regions are not "
+ "supported by this host kernel");
+ error_append_hint(errp, "A maximum of %d VCPUs can be used",
+ s->redist_region_count[0]);
+ return;
+ }
+
kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_NR_IRQS,
0, &s->num_irq, true, &error_abort);
@@ -807,9 +824,28 @@ static void kvm_arm_gicv3_realize(DeviceState *dev, Error **errp)
KVM_DEV_ARM_VGIC_CTRL_INIT, NULL, true, &error_abort);
kvm_arm_register_device(&s->iomem_dist, -1, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_V3_ADDR_TYPE_DIST, s->dev_fd);
- kvm_arm_register_device(&s->iomem_redist, -1, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_V3_ADDR_TYPE_REDIST, s->dev_fd);
+ KVM_VGIC_V3_ADDR_TYPE_DIST, s->dev_fd, 0);
+
+ if (!multiple_redist_region_allowed) {
+ kvm_arm_register_device(&s->iomem_redist[0], -1,
+ KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_V3_ADDR_TYPE_REDIST, s->dev_fd, 0);
+ } else {
+ /* we register regions in reverse order as "devices" are inserted at
+ * the head of a QSLIST and the list is then popped from the head
+ * onwards by kvm_arm_machine_init_done()
+ */
+ for (i = s->nb_redist_regions - 1; i >= 0; i--) {
+ /* Address mask made of the rdist region index and count */
+ uint64_t addr_ormask =
+ i | ((uint64_t)s->redist_region_count[i] << 52);
+
+ kvm_arm_register_device(&s->iomem_redist[i], -1,
+ KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION,
+ s->dev_fd, addr_ormask);
+ }
+ }
if (kvm_has_gsi_routing()) {
/* set up irq routing */
diff --git a/hw/intc/arm_gicv3_redist.c b/hw/intc/arm_gicv3_redist.c
index 8a8684d76e..3b0ba6de1a 100644
--- a/hw/intc/arm_gicv3_redist.c
+++ b/hw/intc/arm_gicv3_redist.c
@@ -192,7 +192,8 @@ static MemTxResult gicr_readl(GICv3CPUState *cs, hwaddr offset,
int i, irq = offset - GICR_IPRIORITYR;
uint32_t value = 0;
- for (i = irq + 3; i >= irq; i--, value <<= 8) {
+ for (i = irq + 3; i >= irq; i--) {
+ value <<= 8;
value |= gicr_read_ipriorityr(cs, attrs, i);
}
*data = value;
diff --git a/hw/misc/Makefile.objs b/hw/misc/Makefile.objs
index ecd8d61098..9350900845 100644
--- a/hw/misc/Makefile.objs
+++ b/hw/misc/Makefile.objs
@@ -62,6 +62,7 @@ obj-$(CONFIG_MIPS_ITU) += mips_itu.o
obj-$(CONFIG_MPS2_FPGAIO) += mps2-fpgaio.o
obj-$(CONFIG_MPS2_SCC) += mps2-scc.o
+obj-$(CONFIG_TZ_MPC) += tz-mpc.o
obj-$(CONFIG_TZ_PPC) += tz-ppc.o
obj-$(CONFIG_IOTKIT_SECCTL) += iotkit-secctl.o
diff --git a/hw/misc/iotkit-secctl.c b/hw/misc/iotkit-secctl.c
index ddd1584d34..de4fd8e36d 100644
--- a/hw/misc/iotkit-secctl.c
+++ b/hw/misc/iotkit-secctl.c
@@ -139,6 +139,9 @@ static MemTxResult iotkit_secctl_s_read(void *opaque, hwaddr addr,
case A_NSCCFG:
r = s->nsccfg;
break;
+ case A_SECMPCINTSTATUS:
+ r = s->mpcintstatus;
+ break;
case A_SECPPCINTSTAT:
r = s->secppcintstat;
break;
@@ -186,7 +189,6 @@ static MemTxResult iotkit_secctl_s_read(void *opaque, hwaddr addr,
case A_APBSPPPCEXP3:
r = s->apbexp[offset_to_ppc_idx(offset)].sp;
break;
- case A_SECMPCINTSTATUS:
case A_SECMSCINTSTAT:
case A_SECMSCINTEN:
case A_NSMSCEXP:
@@ -572,6 +574,20 @@ static void iotkit_secctl_reset(DeviceState *dev)
foreach_ppc(s, iotkit_secctl_reset_ppc);
}
+static void iotkit_secctl_mpc_status(void *opaque, int n, int level)
+{
+ IoTKitSecCtl *s = IOTKIT_SECCTL(opaque);
+
+ s->mpcintstatus = deposit32(s->mpcintstatus, 0, 1, !!level);
+}
+
+static void iotkit_secctl_mpcexp_status(void *opaque, int n, int level)
+{
+ IoTKitSecCtl *s = IOTKIT_SECCTL(opaque);
+
+ s->mpcintstatus = deposit32(s->mpcintstatus, n + 16, 1, !!level);
+}
+
static void iotkit_secctl_ppc_irqstatus(void *opaque, int n, int level)
{
IoTKitSecCtlPPC *ppc = opaque;
@@ -640,6 +656,10 @@ static void iotkit_secctl_init(Object *obj)
qdev_init_gpio_out_named(dev, &s->sec_resp_cfg, "sec_resp_cfg", 1);
qdev_init_gpio_out_named(dev, &s->nsc_cfg_irq, "nsc_cfg", 1);
+ qdev_init_gpio_in_named(dev, iotkit_secctl_mpc_status, "mpc_status", 1);
+ qdev_init_gpio_in_named(dev, iotkit_secctl_mpcexp_status,
+ "mpcexp_status", IOTS_NUM_EXP_MPC);
+
memory_region_init_io(&s->s_regs, obj, &iotkit_secctl_s_ops,
s, "iotkit-secctl-s-regs", 0x1000);
memory_region_init_io(&s->ns_regs, obj, &iotkit_secctl_ns_ops,
@@ -660,6 +680,16 @@ static const VMStateDescription iotkit_secctl_ppc_vmstate = {
}
};
+static const VMStateDescription iotkit_secctl_mpcintstatus_vmstate = {
+ .name = "iotkit-secctl-mpcintstatus",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(mpcintstatus, IoTKitSecCtl),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
static const VMStateDescription iotkit_secctl_vmstate = {
.name = "iotkit-secctl",
.version_id = 1,
@@ -677,7 +707,11 @@ static const VMStateDescription iotkit_secctl_vmstate = {
VMSTATE_STRUCT_ARRAY(ahbexp, IoTKitSecCtl, IOTS_NUM_AHB_EXP_PPC, 1,
iotkit_secctl_ppc_vmstate, IoTKitSecCtlPPC),
VMSTATE_END_OF_LIST()
- }
+ },
+ .subsections = (const VMStateDescription*[]) {
+ &iotkit_secctl_mpcintstatus_vmstate,
+ NULL
+ },
};
static void iotkit_secctl_class_init(ObjectClass *klass, void *data)
diff --git a/hw/misc/trace-events b/hw/misc/trace-events
index ec5a9f0da1..c956e1419b 100644
--- a/hw/misc/trace-events
+++ b/hw/misc/trace-events
@@ -84,6 +84,14 @@ mos6522_set_sr_int(void) "set sr_int"
mos6522_write(uint64_t addr, uint64_t val) "reg=0x%"PRIx64 " val=0x%"PRIx64
mos6522_read(uint64_t addr, unsigned val) "reg=0x%"PRIx64 " val=0x%x"
+# hw/misc/tz-mpc.c
+tz_mpc_reg_read(uint32_t offset, uint64_t data, unsigned size) "TZ MPC regs read: offset 0x%x data 0x%" PRIx64 " size %u"
+tz_mpc_reg_write(uint32_t offset, uint64_t data, unsigned size) "TZ MPC regs write: offset 0x%x data 0x%" PRIx64 " size %u"
+tz_mpc_mem_blocked_read(uint64_t addr, unsigned size, bool secure) "TZ MPC blocked read: offset 0x%" PRIx64 " size %u secure %d"
+tz_mpc_mem_blocked_write(uint64_t addr, uint64_t data, unsigned size, bool secure) "TZ MPC blocked write: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u secure %d"
+tz_mpc_translate(uint64_t addr, int flags, const char *idx, const char *res) "TZ MPC translate: addr 0x%" PRIx64 " flags 0x%x iommu_idx %s: %s"
+tz_mpc_iommu_notify(uint64_t addr) "TZ MPC iommu: notifying UNMAP/MAP for 0x%" PRIx64
+
# hw/misc/tz-ppc.c
tz_ppc_reset(void) "TZ PPC: reset"
tz_ppc_cfg_nonsec(int n, int level) "TZ PPC: cfg_nonsec[%d] = %d"
diff --git a/hw/misc/tz-mpc.c b/hw/misc/tz-mpc.c
new file mode 100644
index 0000000000..8316079b4b
--- /dev/null
+++ b/hw/misc/tz-mpc.c
@@ -0,0 +1,628 @@
+/*
+ * ARM AHB5 TrustZone Memory Protection Controller emulation
+ *
+ * Copyright (c) 2018 Linaro Limited
+ * Written by Peter Maydell
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 or
+ * (at your option) any later version.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/log.h"
+#include "qapi/error.h"
+#include "trace.h"
+#include "hw/sysbus.h"
+#include "hw/registerfields.h"
+#include "hw/misc/tz-mpc.h"
+
+/* Our IOMMU has two IOMMU indexes, one for secure transactions and one for
+ * non-secure transactions.
+ */
+enum {
+ IOMMU_IDX_S,
+ IOMMU_IDX_NS,
+ IOMMU_NUM_INDEXES,
+};
+
+/* Config registers */
+REG32(CTRL, 0x00)
+ FIELD(CTRL, SEC_RESP, 4, 1)
+ FIELD(CTRL, AUTOINC, 8, 1)
+ FIELD(CTRL, LOCKDOWN, 31, 1)
+REG32(BLK_MAX, 0x10)
+REG32(BLK_CFG, 0x14)
+REG32(BLK_IDX, 0x18)
+REG32(BLK_LUT, 0x1c)
+REG32(INT_STAT, 0x20)
+ FIELD(INT_STAT, IRQ, 0, 1)
+REG32(INT_CLEAR, 0x24)
+ FIELD(INT_CLEAR, IRQ, 0, 1)
+REG32(INT_EN, 0x28)
+ FIELD(INT_EN, IRQ, 0, 1)
+REG32(INT_INFO1, 0x2c)
+REG32(INT_INFO2, 0x30)
+ FIELD(INT_INFO2, HMASTER, 0, 16)
+ FIELD(INT_INFO2, HNONSEC, 16, 1)
+ FIELD(INT_INFO2, CFG_NS, 17, 1)
+REG32(INT_SET, 0x34)
+ FIELD(INT_SET, IRQ, 0, 1)
+REG32(PIDR4, 0xfd0)
+REG32(PIDR5, 0xfd4)
+REG32(PIDR6, 0xfd8)
+REG32(PIDR7, 0xfdc)
+REG32(PIDR0, 0xfe0)
+REG32(PIDR1, 0xfe4)
+REG32(PIDR2, 0xfe8)
+REG32(PIDR3, 0xfec)
+REG32(CIDR0, 0xff0)
+REG32(CIDR1, 0xff4)
+REG32(CIDR2, 0xff8)
+REG32(CIDR3, 0xffc)
+
+static const uint8_t tz_mpc_idregs[] = {
+ 0x04, 0x00, 0x00, 0x00,
+ 0x60, 0xb8, 0x1b, 0x00,
+ 0x0d, 0xf0, 0x05, 0xb1,
+};
+
+static void tz_mpc_irq_update(TZMPC *s)
+{
+ qemu_set_irq(s->irq, s->int_stat && s->int_en);
+}
+
+static void tz_mpc_iommu_notify(TZMPC *s, uint32_t lutidx,
+ uint32_t oldlut, uint32_t newlut)
+{
+ /* Called when the LUT word at lutidx has changed from oldlut to newlut;
+ * must call the IOMMU notifiers for the changed blocks.
+ */
+ IOMMUTLBEntry entry = {
+ .addr_mask = s->blocksize - 1,
+ };
+ hwaddr addr = lutidx * s->blocksize * 32;
+ int i;
+
+ for (i = 0; i < 32; i++, addr += s->blocksize) {
+ bool block_is_ns;
+
+ if (!((oldlut ^ newlut) & (1 << i))) {
+ continue;
+ }
+ /* This changes the mappings for both the S and the NS space,
+ * so we need to do four notifies: an UNMAP then a MAP for each.
+ */
+ block_is_ns = newlut & (1 << i);
+
+ trace_tz_mpc_iommu_notify(addr);
+ entry.iova = addr;
+ entry.translated_addr = addr;
+
+ entry.perm = IOMMU_NONE;
+ memory_region_notify_iommu(&s->upstream, IOMMU_IDX_S, entry);
+ memory_region_notify_iommu(&s->upstream, IOMMU_IDX_NS, entry);
+
+ entry.perm = IOMMU_RW;
+ if (block_is_ns) {
+ entry.target_as = &s->blocked_io_as;
+ } else {
+ entry.target_as = &s->downstream_as;
+ }
+ memory_region_notify_iommu(&s->upstream, IOMMU_IDX_S, entry);
+ if (block_is_ns) {
+ entry.target_as = &s->downstream_as;
+ } else {
+ entry.target_as = &s->blocked_io_as;
+ }
+ memory_region_notify_iommu(&s->upstream, IOMMU_IDX_NS, entry);
+ }
+}
+
+static void tz_mpc_autoinc_idx(TZMPC *s, unsigned access_size)
+{
+ /* Auto-increment BLK_IDX if necessary */
+ if (access_size == 4 && (s->ctrl & R_CTRL_AUTOINC_MASK)) {
+ s->blk_idx++;
+ s->blk_idx %= s->blk_max;
+ }
+}
+
+static MemTxResult tz_mpc_reg_read(void *opaque, hwaddr addr,
+ uint64_t *pdata,
+ unsigned size, MemTxAttrs attrs)
+{
+ TZMPC *s = TZ_MPC(opaque);
+ uint64_t r;
+ uint32_t offset = addr & ~0x3;
+
+ if (!attrs.secure && offset < A_PIDR4) {
+ /* NS accesses can only see the ID registers */
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "TZ MPC register read: NS access to offset 0x%x\n",
+ offset);
+ r = 0;
+ goto read_out;
+ }
+
+ switch (offset) {
+ case A_CTRL:
+ r = s->ctrl;
+ break;
+ case A_BLK_MAX:
+ r = s->blk_max;
+ break;
+ case A_BLK_CFG:
+ /* We are never in "init in progress state", so this just indicates
+ * the block size. s->blocksize == (1 << BLK_CFG + 5), so
+ * BLK_CFG == ctz32(s->blocksize) - 5
+ */
+ r = ctz32(s->blocksize) - 5;
+ break;
+ case A_BLK_IDX:
+ r = s->blk_idx;
+ break;
+ case A_BLK_LUT:
+ r = s->blk_lut[s->blk_idx];
+ tz_mpc_autoinc_idx(s, size);
+ break;
+ case A_INT_STAT:
+ r = s->int_stat;
+ break;
+ case A_INT_EN:
+ r = s->int_en;
+ break;
+ case A_INT_INFO1:
+ r = s->int_info1;
+ break;
+ case A_INT_INFO2:
+ r = s->int_info2;
+ break;
+ case A_PIDR4:
+ case A_PIDR5:
+ case A_PIDR6:
+ case A_PIDR7:
+ case A_PIDR0:
+ case A_PIDR1:
+ case A_PIDR2:
+ case A_PIDR3:
+ case A_CIDR0:
+ case A_CIDR1:
+ case A_CIDR2:
+ case A_CIDR3:
+ r = tz_mpc_idregs[(offset - A_PIDR4) / 4];
+ break;
+ case A_INT_CLEAR:
+ case A_INT_SET:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "TZ MPC register read: write-only offset 0x%x\n",
+ offset);
+ r = 0;
+ break;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "TZ MPC register read: bad offset 0x%x\n", offset);
+ r = 0;
+ break;
+ }
+
+ if (size != 4) {
+ /* None of our registers are read-sensitive (except BLK_LUT,
+ * which can special case the "size not 4" case), so just
+ * pull the right bytes out of the word read result.
+ */
+ r = extract32(r, (addr & 3) * 8, size * 8);
+ }
+
+read_out:
+ trace_tz_mpc_reg_read(addr, r, size);
+ *pdata = r;
+ return MEMTX_OK;
+}
+
+static MemTxResult tz_mpc_reg_write(void *opaque, hwaddr addr,
+ uint64_t value,
+ unsigned size, MemTxAttrs attrs)
+{
+ TZMPC *s = TZ_MPC(opaque);
+ uint32_t offset = addr & ~0x3;
+
+ trace_tz_mpc_reg_write(addr, value, size);
+
+ if (!attrs.secure && offset < A_PIDR4) {
+ /* NS accesses can only see the ID registers */
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "TZ MPC register write: NS access to offset 0x%x\n",
+ offset);
+ return MEMTX_OK;
+ }
+
+ if (size != 4) {
+ /* Expand the byte or halfword write to a full word size.
+ * In most cases we can do this with zeroes; the exceptions
+ * are CTRL, BLK_IDX and BLK_LUT.
+ */
+ uint32_t oldval;
+
+ switch (offset) {
+ case A_CTRL:
+ oldval = s->ctrl;
+ break;
+ case A_BLK_IDX:
+ oldval = s->blk_idx;
+ break;
+ case A_BLK_LUT:
+ oldval = s->blk_lut[s->blk_idx];
+ break;
+ default:
+ oldval = 0;
+ break;
+ }
+ value = deposit32(oldval, (addr & 3) * 8, size * 8, value);
+ }
+
+ if ((s->ctrl & R_CTRL_LOCKDOWN_MASK) &&
+ (offset == A_CTRL || offset == A_BLK_LUT || offset == A_INT_EN)) {
+ /* Lockdown mode makes these three registers read-only, and
+ * the only way out of it is to reset the device.
+ */
+ qemu_log_mask(LOG_GUEST_ERROR, "TZ MPC register write to offset 0x%x "
+ "while MPC is in lockdown mode\n", offset);
+ return MEMTX_OK;
+ }
+
+ switch (offset) {
+ case A_CTRL:
+ /* We don't implement the 'data gating' feature so all other bits
+ * are reserved and we make them RAZ/WI.
+ */
+ s->ctrl = value & (R_CTRL_SEC_RESP_MASK |
+ R_CTRL_AUTOINC_MASK |
+ R_CTRL_LOCKDOWN_MASK);
+ break;
+ case A_BLK_IDX:
+ s->blk_idx = value % s->blk_max;
+ break;
+ case A_BLK_LUT:
+ tz_mpc_iommu_notify(s, s->blk_idx, s->blk_lut[s->blk_idx], value);
+ s->blk_lut[s->blk_idx] = value;
+ tz_mpc_autoinc_idx(s, size);
+ break;
+ case A_INT_CLEAR:
+ if (value & R_INT_CLEAR_IRQ_MASK) {
+ s->int_stat = 0;
+ tz_mpc_irq_update(s);
+ }
+ break;
+ case A_INT_EN:
+ s->int_en = value & R_INT_EN_IRQ_MASK;
+ tz_mpc_irq_update(s);
+ break;
+ case A_INT_SET:
+ if (value & R_INT_SET_IRQ_MASK) {
+ s->int_stat = R_INT_STAT_IRQ_MASK;
+ tz_mpc_irq_update(s);
+ }
+ break;
+ case A_PIDR4:
+ case A_PIDR5:
+ case A_PIDR6:
+ case A_PIDR7:
+ case A_PIDR0:
+ case A_PIDR1:
+ case A_PIDR2:
+ case A_PIDR3:
+ case A_CIDR0:
+ case A_CIDR1:
+ case A_CIDR2:
+ case A_CIDR3:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "TZ MPC register write: read-only offset 0x%x\n", offset);
+ break;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "TZ MPC register write: bad offset 0x%x\n", offset);
+ break;
+ }
+
+ return MEMTX_OK;
+}
+
+static const MemoryRegionOps tz_mpc_reg_ops = {
+ .read_with_attrs = tz_mpc_reg_read,
+ .write_with_attrs = tz_mpc_reg_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .valid.min_access_size = 1,
+ .valid.max_access_size = 4,
+ .impl.min_access_size = 1,
+ .impl.max_access_size = 4,
+};
+
+static inline bool tz_mpc_cfg_ns(TZMPC *s, hwaddr addr)
+{
+ /* Return the cfg_ns bit from the LUT for the specified address */
+ hwaddr blknum = addr / s->blocksize;
+ hwaddr blkword = blknum / 32;
+ uint32_t blkbit = 1U << (blknum % 32);
+
+ /* This would imply the address was larger than the size we
+ * defined this memory region to be, so it can't happen.
+ */
+ assert(blkword < s->blk_max);
+ return s->blk_lut[blkword] & blkbit;
+}
+
+static MemTxResult tz_mpc_handle_block(TZMPC *s, hwaddr addr, MemTxAttrs attrs)
+{
+ /* Handle a blocked transaction: raise IRQ, capture info, etc */
+ if (!s->int_stat) {
+ /* First blocked transfer: capture information into INT_INFO1 and
+ * INT_INFO2. Subsequent transfers are still blocked but don't
+ * capture information until the guest clears the interrupt.
+ */
+
+ s->int_info1 = addr;
+ s->int_info2 = 0;
+ s->int_info2 = FIELD_DP32(s->int_info2, INT_INFO2, HMASTER,
+ attrs.requester_id & 0xffff);
+ s->int_info2 = FIELD_DP32(s->int_info2, INT_INFO2, HNONSEC,
+ ~attrs.secure);
+ s->int_info2 = FIELD_DP32(s->int_info2, INT_INFO2, CFG_NS,
+ tz_mpc_cfg_ns(s, addr));
+ s->int_stat |= R_INT_STAT_IRQ_MASK;
+ tz_mpc_irq_update(s);
+ }
+
+ /* Generate bus error if desired; otherwise RAZ/WI */
+ return (s->ctrl & R_CTRL_SEC_RESP_MASK) ? MEMTX_ERROR : MEMTX_OK;
+}
+
+/* Accesses only reach these read and write functions if the MPC is
+ * blocking them; non-blocked accesses go directly to the downstream
+ * memory region without passing through this code.
+ */
+static MemTxResult tz_mpc_mem_blocked_read(void *opaque, hwaddr addr,
+ uint64_t *pdata,
+ unsigned size, MemTxAttrs attrs)
+{
+ TZMPC *s = TZ_MPC(opaque);
+
+ trace_tz_mpc_mem_blocked_read(addr, size, attrs.secure);
+
+ *pdata = 0;
+ return tz_mpc_handle_block(s, addr, attrs);
+}
+
+static MemTxResult tz_mpc_mem_blocked_write(void *opaque, hwaddr addr,
+ uint64_t value,
+ unsigned size, MemTxAttrs attrs)
+{
+ TZMPC *s = TZ_MPC(opaque);
+
+ trace_tz_mpc_mem_blocked_write(addr, value, size, attrs.secure);
+
+ return tz_mpc_handle_block(s, addr, attrs);
+}
+
+static const MemoryRegionOps tz_mpc_mem_blocked_ops = {
+ .read_with_attrs = tz_mpc_mem_blocked_read,
+ .write_with_attrs = tz_mpc_mem_blocked_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .valid.min_access_size = 1,
+ .valid.max_access_size = 8,
+ .impl.min_access_size = 1,
+ .impl.max_access_size = 8,
+};
+
+static IOMMUTLBEntry tz_mpc_translate(IOMMUMemoryRegion *iommu,
+ hwaddr addr, IOMMUAccessFlags flags,
+ int iommu_idx)
+{
+ TZMPC *s = TZ_MPC(container_of(iommu, TZMPC, upstream));
+ bool ok;
+
+ IOMMUTLBEntry ret = {
+ .iova = addr & ~(s->blocksize - 1),
+ .translated_addr = addr & ~(s->blocksize - 1),
+ .addr_mask = s->blocksize - 1,
+ .perm = IOMMU_RW,
+ };
+
+ /* Look at the per-block configuration for this address, and
+ * return a TLB entry directing the transaction at either
+ * downstream_as or blocked_io_as, as appropriate.
+ * If the LUT cfg_ns bit is 1, only non-secure transactions
+ * may pass. If the bit is 0, only secure transactions may pass.
+ */
+ ok = tz_mpc_cfg_ns(s, addr) == (iommu_idx == IOMMU_IDX_NS);
+
+ trace_tz_mpc_translate(addr, flags,
+ iommu_idx == IOMMU_IDX_S ? "S" : "NS",
+ ok ? "pass" : "block");
+
+ ret.target_as = ok ? &s->downstream_as : &s->blocked_io_as;
+ return ret;
+}
+
+static int tz_mpc_attrs_to_index(IOMMUMemoryRegion *iommu, MemTxAttrs attrs)
+{
+ /* We treat unspecified attributes like secure. Transactions with
+ * unspecified attributes come from places like
+ * cpu_physical_memory_write_rom() for initial image load, and we want
+ * those to pass through the from-reset "everything is secure" config.
+ * All the real during-emulation transactions from the CPU will
+ * specify attributes.
+ */
+ return (attrs.unspecified || attrs.secure) ? IOMMU_IDX_S : IOMMU_IDX_NS;
+}
+
+static int tz_mpc_num_indexes(IOMMUMemoryRegion *iommu)
+{
+ return IOMMU_NUM_INDEXES;
+}
+
+static void tz_mpc_reset(DeviceState *dev)
+{
+ TZMPC *s = TZ_MPC(dev);
+
+ s->ctrl = 0x00000100;
+ s->blk_idx = 0;
+ s->int_stat = 0;
+ s->int_en = 1;
+ s->int_info1 = 0;
+ s->int_info2 = 0;
+
+ memset(s->blk_lut, 0, s->blk_max * sizeof(uint32_t));
+}
+
+static void tz_mpc_init(Object *obj)
+{
+ DeviceState *dev = DEVICE(obj);
+ TZMPC *s = TZ_MPC(obj);
+
+ qdev_init_gpio_out_named(dev, &s->irq, "irq", 1);
+}
+
+static void tz_mpc_realize(DeviceState *dev, Error **errp)
+{
+ Object *obj = OBJECT(dev);
+ SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
+ TZMPC *s = TZ_MPC(dev);
+ uint64_t size;
+
+ /* We can't create the upstream end of the port until realize,
+ * as we don't know the size of the MR used as the downstream until then.
+ * We insist on having a downstream, to avoid complicating the code
+ * with handling the "don't know how big this is" case. It's easy
+ * enough for the user to create an unimplemented_device as downstream
+ * if they have nothing else to plug into this.
+ */
+ if (!s->downstream) {
+ error_setg(errp, "MPC 'downstream' link not set");
+ return;
+ }
+
+ size = memory_region_size(s->downstream);
+
+ memory_region_init_iommu(&s->upstream, sizeof(s->upstream),
+ TYPE_TZ_MPC_IOMMU_MEMORY_REGION,
+ obj, "tz-mpc-upstream", size);
+
+ /* In real hardware the block size is configurable. In QEMU we could
+ * make it configurable but will need it to be at least as big as the
+ * target page size so we can execute out of the resulting MRs. Guest
+ * software is supposed to check the block size using the BLK_CFG
+ * register, so make it fixed at the page size.
+ */
+ s->blocksize = memory_region_iommu_get_min_page_size(&s->upstream);
+ if (size % s->blocksize != 0) {
+ error_setg(errp,
+ "MPC 'downstream' size %" PRId64
+ " is not a multiple of %" HWADDR_PRIx " bytes",
+ size, s->blocksize);
+ object_unref(OBJECT(&s->upstream));
+ return;
+ }
+
+ /* BLK_MAX is the max value of BLK_IDX, which indexes an array of 32-bit
+ * words, each bit of which indicates one block.
+ */
+ s->blk_max = DIV_ROUND_UP(size / s->blocksize, 32);
+
+ memory_region_init_io(&s->regmr, obj, &tz_mpc_reg_ops,
+ s, "tz-mpc-regs", 0x1000);
+ sysbus_init_mmio(sbd, &s->regmr);
+
+ sysbus_init_mmio(sbd, MEMORY_REGION(&s->upstream));
+
+ /* This memory region is not exposed to users of this device as a
+ * sysbus MMIO region, but is instead used internally as something
+ * that our IOMMU translate function might direct accesses to.
+ */
+ memory_region_init_io(&s->blocked_io, obj, &tz_mpc_mem_blocked_ops,
+ s, "tz-mpc-blocked-io", size);
+
+ address_space_init(&s->downstream_as, s->downstream,
+ "tz-mpc-downstream");
+ address_space_init(&s->blocked_io_as, &s->blocked_io,
+ "tz-mpc-blocked-io");
+
+ s->blk_lut = g_new(uint32_t, s->blk_max);
+}
+
+static int tz_mpc_post_load(void *opaque, int version_id)
+{
+ TZMPC *s = TZ_MPC(opaque);
+
+ /* Check the incoming data doesn't point blk_idx off the end of blk_lut. */
+ if (s->blk_idx >= s->blk_max) {
+ return -1;
+ }
+ return 0;
+}
+
+static const VMStateDescription tz_mpc_vmstate = {
+ .name = "tz-mpc",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .post_load = tz_mpc_post_load,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(ctrl, TZMPC),
+ VMSTATE_UINT32(blk_idx, TZMPC),
+ VMSTATE_UINT32(int_stat, TZMPC),
+ VMSTATE_UINT32(int_en, TZMPC),
+ VMSTATE_UINT32(int_info1, TZMPC),
+ VMSTATE_UINT32(int_info2, TZMPC),
+ VMSTATE_VARRAY_UINT32(blk_lut, TZMPC, blk_max,
+ 0, vmstate_info_uint32, uint32_t),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static Property tz_mpc_properties[] = {
+ DEFINE_PROP_LINK("downstream", TZMPC, downstream,
+ TYPE_MEMORY_REGION, MemoryRegion *),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void tz_mpc_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->realize = tz_mpc_realize;
+ dc->vmsd = &tz_mpc_vmstate;
+ dc->reset = tz_mpc_reset;
+ dc->props = tz_mpc_properties;
+}
+
+static const TypeInfo tz_mpc_info = {
+ .name = TYPE_TZ_MPC,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(TZMPC),
+ .instance_init = tz_mpc_init,
+ .class_init = tz_mpc_class_init,
+};
+
+static void tz_mpc_iommu_memory_region_class_init(ObjectClass *klass,
+ void *data)
+{
+ IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass);
+
+ imrc->translate = tz_mpc_translate;
+ imrc->attrs_to_index = tz_mpc_attrs_to_index;
+ imrc->num_indexes = tz_mpc_num_indexes;
+}
+
+static const TypeInfo tz_mpc_iommu_memory_region_info = {
+ .name = TYPE_TZ_MPC_IOMMU_MEMORY_REGION,
+ .parent = TYPE_IOMMU_MEMORY_REGION,
+ .class_init = tz_mpc_iommu_memory_region_class_init,
+};
+
+static void tz_mpc_register_types(void)
+{
+ type_register_static(&tz_mpc_info);
+ type_register_static(&tz_mpc_iommu_memory_region_info);
+}
+
+type_init(tz_mpc_register_types);
diff --git a/hw/xen/xen_pt.h b/hw/xen/xen_pt.h
index aa39a9aa5f..dbee3308fd 100644
--- a/hw/xen/xen_pt.h
+++ b/hw/xen/xen_pt.h
@@ -319,7 +319,7 @@ static inline bool xen_pt_has_msix_mapping(XenPCIPassthroughState *s, int bar)
}
extern void *pci_assign_dev_load_option_rom(PCIDevice *dev,
- struct Object *owner, int *size,
+ int *size,
unsigned int domain,
unsigned int bus, unsigned int slot,
unsigned int function);
diff --git a/hw/xen/xen_pt_graphics.c b/hw/xen/xen_pt_graphics.c
index 0f4c8d77e2..135c8df1e7 100644
--- a/hw/xen/xen_pt_graphics.c
+++ b/hw/xen/xen_pt_graphics.c
@@ -132,7 +132,7 @@ int xen_pt_unregister_vga_regions(XenHostPCIDevice *dev)
static void *get_vgabios(XenPCIPassthroughState *s, int *size,
XenHostPCIDevice *dev)
{
- return pci_assign_dev_load_option_rom(&s->dev, OBJECT(&s->dev), size,
+ return pci_assign_dev_load_option_rom(&s->dev, size,
dev->domain, dev->bus,
dev->dev, dev->func);
}
diff --git a/hw/xen/xen_pt_load_rom.c b/hw/xen/xen_pt_load_rom.c
index 71063c4d79..e6a86ca818 100644
--- a/hw/xen/xen_pt_load_rom.c
+++ b/hw/xen/xen_pt_load_rom.c
@@ -19,7 +19,7 @@
* load the corresponding ROM data to RAM. If an error occurs while loading an
* option ROM, we just ignore that option ROM and continue with the next one.
*/
-void *pci_assign_dev_load_option_rom(PCIDevice *dev, struct Object *owner,
+void *pci_assign_dev_load_option_rom(PCIDevice *dev,
int *size, unsigned int domain,
unsigned int bus, unsigned int slot,
unsigned int function)
@@ -29,6 +29,7 @@ void *pci_assign_dev_load_option_rom(PCIDevice *dev, struct Object *owner,
uint8_t val;
struct stat st;
void *ptr = NULL;
+ Object *owner = OBJECT(dev);
/* If loading ROM from file, pci handles it */
if (dev->romfile || !dev->rom_bar) {
@@ -59,8 +60,7 @@ void *pci_assign_dev_load_option_rom(PCIDevice *dev, struct Object *owner,
fseek(fp, 0, SEEK_SET);
snprintf(name, sizeof(name), "%s.rom", object_get_typename(owner));
- memory_region_init_ram_nomigrate(&dev->rom, owner, name, st.st_size, &error_abort);
- vmstate_register_ram(&dev->rom, &dev->qdev);
+ memory_region_init_ram(&dev->rom, owner, name, st.st_size, &error_abort);
ptr = memory_region_get_ram_ptr(&dev->rom);
memset(ptr, 0xff, st.st_size);