aboutsummaryrefslogtreecommitdiff
path: root/hw
diff options
context:
space:
mode:
Diffstat (limited to 'hw')
-rw-r--r--hw/acpi/ich9.c14
-rw-r--r--hw/acpi/piix4.c10
-rw-r--r--hw/alpha/dp264.c7
-rw-r--r--hw/alpha/typhoon.c8
-rw-r--r--hw/arm/Makefile.objs1
-rw-r--r--hw/arm/boot.c34
-rw-r--r--hw/arm/nseries.c5
-rw-r--r--hw/arm/omap_sx1.c2
-rw-r--r--hw/arm/pxa2xx.c248
-rw-r--r--hw/arm/pxa2xx_pic.c2
-rw-r--r--hw/arm/sysbus-fdt.c174
-rw-r--r--hw/arm/vexpress.c2
-rw-r--r--hw/arm/virt-acpi-build.c43
-rw-r--r--hw/arm/virt.c166
-rw-r--r--hw/block/fdc.c332
-rw-r--r--hw/block/pflash_cfi01.c204
-rw-r--r--hw/char/parallel.c25
-rw-r--r--hw/char/serial.c41
-rw-r--r--hw/core/machine.c9
-rw-r--r--hw/core/nmi.c20
-rw-r--r--hw/core/qdev-properties-system.c5
-rw-r--r--hw/display/Makefile.objs4
-rw-r--r--hw/display/cg3.c2
-rw-r--r--hw/display/exynos4210_fimd.c22
-rw-r--r--hw/display/framebuffer.c4
-rw-r--r--hw/display/g364fb.c3
-rw-r--r--hw/display/qxl.c11
-rw-r--r--hw/display/sm501.c2
-rw-r--r--hw/display/tc6393xb.c2
-rw-r--r--hw/display/tcx.c3
-rw-r--r--hw/display/vga-pci.c97
-rw-r--r--hw/display/vga.c11
-rw-r--r--hw/display/vga_int.h6
-rw-r--r--hw/display/virtio-gpu-pci.c68
-rw-r--r--hw/display/virtio-gpu.c918
-rw-r--r--hw/display/virtio-vga.c175
-rw-r--r--hw/display/vmware_vga.c2
-rw-r--r--hw/dma/rc4030.c462
-rw-r--r--hw/gpio/pl061.c2
-rw-r--r--hw/i386/acpi-build.c1
-rw-r--r--hw/i386/pc.c36
-rw-r--r--hw/i386/pc_piix.c13
-rw-r--r--hw/i386/pc_q35.c7
-rw-r--r--hw/ide/core.c32
-rw-r--r--hw/ide/macio.c271
-rw-r--r--hw/ide/pci.c21
-rw-r--r--hw/input/pckbd.c22
-rw-r--r--hw/input/ps2.c11
-rw-r--r--hw/intc/Makefile.objs1
-rw-r--r--hw/intc/apic.c9
-rw-r--r--hw/intc/apic_common.c10
-rw-r--r--hw/intc/arm_gic.c2
-rw-r--r--hw/intc/arm_gicv2m.c192
-rw-r--r--hw/intc/exynos4210_gic.c7
-rw-r--r--hw/isa/i82378.c7
-rw-r--r--hw/isa/isa-bus.c29
-rw-r--r--hw/isa/lpc_ich9.c34
-rw-r--r--hw/lm32/lm32_boards.c10
-rw-r--r--hw/lm32/milkymist.c5
-rw-r--r--hw/mips/Makefile.objs3
-rw-r--r--hw/mips/mips_jazz.c53
-rw-r--r--hw/mips/mips_malta.c15
-rw-r--r--hw/misc/macio/macio.c71
-rw-r--r--hw/net/Makefile.objs1
-rw-r--r--hw/net/cadence_gem.c2
-rw-r--r--hw/net/dp8393x.c369
-rw-r--r--hw/net/e1000.c11
-rw-r--r--hw/net/pcnet.c8
-rw-r--r--hw/net/rocker/qmp-norocker.c50
-rw-r--r--hw/net/rocker/rocker.c68
-rw-r--r--hw/net/rocker/rocker_fp.c29
-rw-r--r--hw/net/rocker/rocker_fp.h2
-rw-r--r--hw/net/rocker/rocker_hw.h1
-rw-r--r--hw/net/rocker/rocker_of_dpa.c312
-rw-r--r--hw/net/rtl8139.c11
-rw-r--r--hw/net/vmxnet3.c12
-rw-r--r--hw/nvram/fw_cfg.c55
-rw-r--r--hw/pci-host/pam.c20
-rw-r--r--hw/pci-host/piix.c54
-rw-r--r--hw/pci-host/q35.c142
-rw-r--r--hw/pci/msi.c4
-rw-r--r--hw/pci/pci-stub.c14
-rw-r--r--hw/pci/pci.c4
-rw-r--r--hw/pci/pcie_aer.c39
-rw-r--r--hw/ppc/Makefile.objs2
-rw-r--r--hw/ppc/e500.c1
-rw-r--r--hw/ppc/mac_newworld.c2
-rw-r--r--hw/ppc/mac_oldworld.c2
-rw-r--r--hw/ppc/prep.c5
-rw-r--r--hw/ppc/spapr.c49
-rw-r--r--hw/ppc/spapr_drc.c744
-rw-r--r--hw/ppc/spapr_events.c338
-rw-r--r--hw/ppc/spapr_iommu.c46
-rw-r--r--hw/ppc/spapr_pci.c513
-rw-r--r--hw/ppc/spapr_rtas.c361
-rw-r--r--hw/ppc/spapr_vio.c2
-rw-r--r--hw/s390x/s390-virtio-ccw.c1
-rw-r--r--hw/s390x/s390-virtio.c1
-rw-r--r--hw/s390x/virtio-ccw.c64
-rw-r--r--hw/s390x/virtio-ccw.h18
-rw-r--r--hw/scsi/scsi-bus.c11
-rw-r--r--hw/sd/pxa2xx_mmci.c68
-rw-r--r--hw/sh4/r2d.c12
-rw-r--r--hw/sparc/sun4m.c8
-rw-r--r--hw/sparc64/sun4u.c2
-rw-r--r--hw/timer/arm_timer.c6
-rw-r--r--hw/timer/hpet.c11
-rw-r--r--hw/timer/mc146818rtc.c23
-rw-r--r--hw/unicore32/puv3.c8
-rw-r--r--hw/usb/hcd-ohci.c11
-rw-r--r--hw/usb/redirect.c34
-rw-r--r--hw/vfio/Makefile.objs2
-rw-r--r--hw/vfio/calxeda-xgmac.c55
-rw-r--r--hw/vfio/platform.c615
-rw-r--r--hw/virtio/dataplane/vring.c2
-rw-r--r--hw/virtio/vhost.c9
-rw-r--r--hw/virtio/virtio-pci.h14
-rw-r--r--hw/virtio/virtio.c16
-rw-r--r--hw/watchdog/Makefile.objs1
-rw-r--r--hw/watchdog/watchdog.c10
-rw-r--r--hw/watchdog/wdt_diag288.c122
-rw-r--r--hw/xen/xen_backend.c4
-rw-r--r--hw/xen/xen_pt.c51
-rw-r--r--hw/xen/xen_pt.h7
-rw-r--r--hw/xen/xen_pt_config_init.c235
-rw-r--r--hw/xen/xen_pt_msi.c12
126 files changed, 6856 insertions, 1858 deletions
diff --git a/hw/acpi/ich9.c b/hw/acpi/ich9.c
index 799351ea44..8a64ffb38f 100644
--- a/hw/acpi/ich9.c
+++ b/hw/acpi/ich9.c
@@ -94,7 +94,8 @@ static void ich9_smi_writel(void *opaque, hwaddr addr, uint64_t val,
ICH9LPCPMRegs *pm = opaque;
switch (addr) {
case 0:
- pm->smi_en = val;
+ pm->smi_en &= ~pm->smi_en_wmask;
+ pm->smi_en |= (val & pm->smi_en_wmask);
break;
}
}
@@ -151,6 +152,7 @@ static const VMStateDescription vmstate_memhp_state = {
.version_id = 1,
.minimum_version_id = 1,
.minimum_version_id_old = 1,
+ .needed = vmstate_test_use_memhp,
.fields = (VMStateField[]) {
VMSTATE_MEMORY_HOTPLUG(acpi_memory_hotplug, ICH9LPCPMRegs),
VMSTATE_END_OF_LIST()
@@ -174,12 +176,9 @@ const VMStateDescription vmstate_ich9_pm = {
VMSTATE_UINT32(smi_sts, ICH9LPCPMRegs),
VMSTATE_END_OF_LIST()
},
- .subsections = (VMStateSubsection[]) {
- {
- .vmsd = &vmstate_memhp_state,
- .needed = vmstate_test_use_memhp,
- },
- VMSTATE_END_OF_LIST()
+ .subsections = (const VMStateDescription*[]) {
+ &vmstate_memhp_state,
+ NULL
}
};
@@ -198,6 +197,7 @@ static void pm_reset(void *opaque)
* support SMM mode. */
pm->smi_en |= ICH9_PMIO_SMI_EN_APMC_EN;
}
+ pm->smi_en_wmask = ~0;
acpi_update_sci(&pm->acpi_regs, pm->irq);
}
diff --git a/hw/acpi/piix4.c b/hw/acpi/piix4.c
index b730ca6ced..3bd1d5a865 100644
--- a/hw/acpi/piix4.c
+++ b/hw/acpi/piix4.c
@@ -260,6 +260,7 @@ static const VMStateDescription vmstate_memhp_state = {
.version_id = 1,
.minimum_version_id = 1,
.minimum_version_id_old = 1,
+ .needed = vmstate_test_use_memhp,
.fields = (VMStateField[]) {
VMSTATE_MEMORY_HOTPLUG(acpi_memory_hotplug, PIIX4PMState),
VMSTATE_END_OF_LIST()
@@ -298,12 +299,9 @@ static const VMStateDescription vmstate_acpi = {
vmstate_test_use_acpi_pci_hotplug),
VMSTATE_END_OF_LIST()
},
- .subsections = (VMStateSubsection[]) {
- {
- .vmsd = &vmstate_memhp_state,
- .needed = vmstate_test_use_memhp,
- },
- VMSTATE_END_OF_LIST()
+ .subsections = (const VMStateDescription*[]) {
+ &vmstate_memhp_state,
+ NULL
}
};
diff --git a/hw/alpha/dp264.c b/hw/alpha/dp264.c
index 9fe7e8b5cb..f86e7bb830 100644
--- a/hw/alpha/dp264.c
+++ b/hw/alpha/dp264.c
@@ -55,7 +55,7 @@ static void clipper_init(MachineState *machine)
ISABus *isa_bus;
qemu_irq rtc_irq;
long size, i;
- const char *palcode_filename;
+ char *palcode_filename;
uint64_t palcode_entry, palcode_low, palcode_high;
uint64_t kernel_entry, kernel_low, kernel_high;
@@ -101,8 +101,8 @@ static void clipper_init(MachineState *machine)
/* Load PALcode. Given that this is not "real" cpu palcode,
but one explicitly written for the emulation, we might as
well load it directly from and ELF image. */
- palcode_filename = (bios_name ? bios_name : "palcode-clipper");
- palcode_filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, palcode_filename);
+ palcode_filename = qemu_find_file(QEMU_FILE_TYPE_BIOS,
+ bios_name ? bios_name : "palcode-clipper");
if (palcode_filename == NULL) {
hw_error("no palcode provided\n");
exit(1);
@@ -114,6 +114,7 @@ static void clipper_init(MachineState *machine)
hw_error("could not load palcode '%s'\n", palcode_filename);
exit(1);
}
+ g_free(palcode_filename);
/* Start all cpus at the PALcode RESET entry point. */
for (i = 0; i < smp_cpus; ++i) {
diff --git a/hw/alpha/typhoon.c b/hw/alpha/typhoon.c
index 7df842dff7..421162e1d4 100644
--- a/hw/alpha/typhoon.c
+++ b/hw/alpha/typhoon.c
@@ -841,7 +841,7 @@ PCIBus *typhoon_init(ram_addr_t ram_size, ISABus **isa_bus,
}
}
- *p_rtc_irq = *qemu_allocate_irqs(typhoon_set_timer_irq, s, 1);
+ *p_rtc_irq = qemu_allocate_irq(typhoon_set_timer_irq, s, 0);
/* Main memory region, 0x00.0000.0000. Real hardware supports 32GB,
but the address space hole reserved at this point is 8TB. */
@@ -918,11 +918,11 @@ PCIBus *typhoon_init(ram_addr_t ram_size, ISABus **isa_bus,
/* Init the ISA bus. */
/* ??? Technically there should be a cy82c693ub pci-isa bridge. */
{
- qemu_irq isa_pci_irq, *isa_irqs;
+ qemu_irq *isa_irqs;
*isa_bus = isa_bus_new(NULL, get_system_memory(), &s->pchip.reg_io);
- isa_pci_irq = *qemu_allocate_irqs(typhoon_set_isa_irq, s, 1);
- isa_irqs = i8259_init(*isa_bus, isa_pci_irq);
+ isa_irqs = i8259_init(*isa_bus,
+ qemu_allocate_irq(typhoon_set_isa_irq, s, 0));
isa_bus_irqs(*isa_bus, isa_irqs);
}
diff --git a/hw/arm/Makefile.objs b/hw/arm/Makefile.objs
index 4b09caf594..cf346c1d0a 100644
--- a/hw/arm/Makefile.objs
+++ b/hw/arm/Makefile.objs
@@ -5,6 +5,7 @@ obj-y += omap_sx1.o palm.o realview.o spitz.o stellaris.o
obj-y += tosa.o versatilepb.o vexpress.o virt.o xilinx_zynq.o z2.o
obj-$(CONFIG_ACPI) += virt-acpi-build.o
obj-y += netduino2.o
+obj-y += sysbus-fdt.o
obj-y += armv7m.o exynos4210.o pxa2xx.o pxa2xx_gpio.o pxa2xx_pic.o
obj-$(CONFIG_DIGIC) += digic.o
diff --git a/hw/arm/boot.c b/hw/arm/boot.c
index fa6950352c..1e7fd28daa 100644
--- a/hw/arm/boot.c
+++ b/hw/arm/boot.c
@@ -557,7 +557,7 @@ static void load_image_to_fw_cfg(FWCfgState *fw_cfg, uint16_t size_key,
fw_cfg_add_bytes(fw_cfg, data_key, data, size);
}
-void arm_load_kernel(ARMCPU *cpu, struct arm_boot_info *info)
+static void arm_load_kernel_notify(Notifier *notifier, void *data)
{
CPUState *cs;
int kernel_size;
@@ -568,15 +568,11 @@ void arm_load_kernel(ARMCPU *cpu, struct arm_boot_info *info)
hwaddr entry, kernel_load_offset;
int big_endian;
static const ARMInsnFixup *primary_loader;
-
- /* CPU objects (unlike devices) are not automatically reset on system
- * reset, so we must always register a handler to do so. If we're
- * actually loading a kernel, the handler is also responsible for
- * arranging that we start it correctly.
- */
- for (cs = CPU(cpu); cs; cs = CPU_NEXT(cs)) {
- qemu_register_reset(do_cpu_reset, ARM_CPU(cs));
- }
+ ArmLoadKernelNotifier *n = DO_UPCAST(ArmLoadKernelNotifier,
+ notifier, notifier);
+ ARMCPU *cpu = n->cpu;
+ struct arm_boot_info *info =
+ container_of(n, struct arm_boot_info, load_kernel_notifier);
/* Load the kernel. */
if (!info->kernel_filename || info->firmware_loaded) {
@@ -775,3 +771,21 @@ void arm_load_kernel(ARMCPU *cpu, struct arm_boot_info *info)
ARM_CPU(cs)->env.boot_info = info;
}
}
+
+void arm_load_kernel(ARMCPU *cpu, struct arm_boot_info *info)
+{
+ CPUState *cs;
+
+ info->load_kernel_notifier.cpu = cpu;
+ info->load_kernel_notifier.notifier.notify = arm_load_kernel_notify;
+ qemu_add_machine_init_done_notifier(&info->load_kernel_notifier.notifier);
+
+ /* CPU objects (unlike devices) are not automatically reset on system
+ * reset, so we must always register a handler to do so. If we're
+ * actually loading a kernel, the handler is also responsible for
+ * arranging that we start it correctly.
+ */
+ for (cs = CPU(cpu); cs; cs = CPU_NEXT(cs)) {
+ qemu_register_reset(do_cpu_reset, ARM_CPU(cs));
+ }
+}
diff --git a/hw/arm/nseries.c b/hw/arm/nseries.c
index d243159664..a659e8525d 100644
--- a/hw/arm/nseries.c
+++ b/hw/arm/nseries.c
@@ -133,9 +133,8 @@ static void n800_mmc_cs_cb(void *opaque, int line, int level)
static void n8x0_gpio_setup(struct n800_s *s)
{
- qemu_irq *mmc_cs = qemu_allocate_irqs(n800_mmc_cs_cb, s->mpu->mmc, 1);
- qdev_connect_gpio_out(s->mpu->gpio, N8X0_MMC_CS_GPIO, mmc_cs[0]);
-
+ qdev_connect_gpio_out(s->mpu->gpio, N8X0_MMC_CS_GPIO,
+ qemu_allocate_irq(n800_mmc_cs_cb, s->mpu->mmc, 0));
qemu_irq_lower(qdev_get_gpio_in(s->mpu->gpio, N800_BAT_COVER_GPIO));
}
diff --git a/hw/arm/omap_sx1.c b/hw/arm/omap_sx1.c
index 671e02c4ed..4b0f7f9c42 100644
--- a/hw/arm/omap_sx1.c
+++ b/hw/arm/omap_sx1.c
@@ -103,7 +103,6 @@ static void sx1_init(MachineState *machine, const int version)
struct omap_mpu_state_s *mpu;
MemoryRegion *address_space = get_system_memory();
MemoryRegion *flash = g_new(MemoryRegion, 1);
- MemoryRegion *flash_1 = g_new(MemoryRegion, 1);
MemoryRegion *cs = g_new(MemoryRegion, 4);
static uint32_t cs0val = 0x00213090;
static uint32_t cs1val = 0x00215070;
@@ -165,6 +164,7 @@ static void sx1_init(MachineState *machine, const int version)
if ((version == 1) &&
(dinfo = drive_get(IF_PFLASH, 0, fl_idx)) != NULL) {
+ MemoryRegion *flash_1 = g_new(MemoryRegion, 1);
memory_region_init_ram(flash_1, NULL, "omap_sx1.flash1-0", flash1_size,
&error_abort);
vmstate_register_ram_global(flash_1);
diff --git a/hw/arm/pxa2xx.c b/hw/arm/pxa2xx.c
index f921a5680c..ec353f79c4 100644
--- a/hw/arm/pxa2xx.c
+++ b/hw/arm/pxa2xx.c
@@ -334,10 +334,10 @@ static uint64_t pxa2xx_cpccnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
static const ARMCPRegInfo pxa_cp_reginfo[] = {
/* cp14 crm==1: perf registers */
{ .name = "CPPMNC", .cp = 14, .crn = 0, .crm = 1, .opc1 = 0, .opc2 = 0,
- .access = PL1_RW,
+ .access = PL1_RW, .type = ARM_CP_IO,
.readfn = pxa2xx_cppmnc_read, .writefn = pxa2xx_cppmnc_write },
{ .name = "CPCCNT", .cp = 14, .crn = 1, .crm = 1, .opc1 = 0, .opc2 = 0,
- .access = PL1_RW,
+ .access = PL1_RW, .type = ARM_CP_IO,
.readfn = pxa2xx_cpccnt_read, .writefn = arm_cp_write_ignore },
{ .name = "CPINTEN", .cp = 14, .crn = 4, .crm = 1, .opc1 = 0, .opc2 = 0,
.access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
@@ -356,11 +356,11 @@ static const ARMCPRegInfo pxa_cp_reginfo[] = {
.access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
/* cp14 crn==6: CLKCFG */
{ .name = "CLKCFG", .cp = 14, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 0,
- .access = PL1_RW,
+ .access = PL1_RW, .type = ARM_CP_IO,
.readfn = pxa2xx_clkcfg_read, .writefn = pxa2xx_clkcfg_write },
/* cp14 crn==7: PWRMODE */
{ .name = "PWRMODE", .cp = 14, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 0,
- .access = PL1_RW,
+ .access = PL1_RW, .type = ARM_CP_IO,
.readfn = arm_cp_read_zero, .writefn = pxa2xx_pwrmode_write },
REGINFO_SENTINEL
};
@@ -457,7 +457,7 @@ typedef struct {
MemoryRegion iomem;
qemu_irq irq;
- int enable;
+ uint32_t enable;
SSIBus *bus;
uint32_t sscr[2];
@@ -470,10 +470,39 @@ typedef struct {
uint8_t ssacd;
uint32_t rx_fifo[16];
- int rx_level;
- int rx_start;
+ uint32_t rx_level;
+ uint32_t rx_start;
} PXA2xxSSPState;
+static bool pxa2xx_ssp_vmstate_validate(void *opaque, int version_id)
+{
+ PXA2xxSSPState *s = opaque;
+
+ return s->rx_start < sizeof(s->rx_fifo);
+}
+
+static const VMStateDescription vmstate_pxa2xx_ssp = {
+ .name = "pxa2xx-ssp",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(enable, PXA2xxSSPState),
+ VMSTATE_UINT32_ARRAY(sscr, PXA2xxSSPState, 2),
+ VMSTATE_UINT32(sspsp, PXA2xxSSPState),
+ VMSTATE_UINT32(ssto, PXA2xxSSPState),
+ VMSTATE_UINT32(ssitr, PXA2xxSSPState),
+ VMSTATE_UINT32(sssr, PXA2xxSSPState),
+ VMSTATE_UINT8(sstsa, PXA2xxSSPState),
+ VMSTATE_UINT8(ssrsa, PXA2xxSSPState),
+ VMSTATE_UINT8(ssacd, PXA2xxSSPState),
+ VMSTATE_UINT32(rx_level, PXA2xxSSPState),
+ VMSTATE_UINT32(rx_start, PXA2xxSSPState),
+ VMSTATE_VALIDATE("fifo is 16 bytes", pxa2xx_ssp_vmstate_validate),
+ VMSTATE_UINT32_ARRAY(rx_fifo, PXA2xxSSPState, 16),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
#define SSCR0 0x00 /* SSP Control register 0 */
#define SSCR1 0x04 /* SSP Control register 1 */
#define SSSR 0x08 /* SSP Status register */
@@ -705,55 +734,20 @@ static const MemoryRegionOps pxa2xx_ssp_ops = {
.endianness = DEVICE_NATIVE_ENDIAN,
};
-static void pxa2xx_ssp_save(QEMUFile *f, void *opaque)
+static void pxa2xx_ssp_reset(DeviceState *d)
{
- PXA2xxSSPState *s = (PXA2xxSSPState *) opaque;
- int i;
-
- qemu_put_be32(f, s->enable);
+ PXA2xxSSPState *s = PXA2XX_SSP(d);
- qemu_put_be32s(f, &s->sscr[0]);
- qemu_put_be32s(f, &s->sscr[1]);
- qemu_put_be32s(f, &s->sspsp);
- qemu_put_be32s(f, &s->ssto);
- qemu_put_be32s(f, &s->ssitr);
- qemu_put_be32s(f, &s->sssr);
- qemu_put_8s(f, &s->sstsa);
- qemu_put_8s(f, &s->ssrsa);
- qemu_put_8s(f, &s->ssacd);
-
- qemu_put_byte(f, s->rx_level);
- for (i = 0; i < s->rx_level; i ++)
- qemu_put_byte(f, s->rx_fifo[(s->rx_start + i) & 0xf]);
-}
-
-static int pxa2xx_ssp_load(QEMUFile *f, void *opaque, int version_id)
-{
- PXA2xxSSPState *s = (PXA2xxSSPState *) opaque;
- int i, v;
-
- s->enable = qemu_get_be32(f);
-
- qemu_get_be32s(f, &s->sscr[0]);
- qemu_get_be32s(f, &s->sscr[1]);
- qemu_get_be32s(f, &s->sspsp);
- qemu_get_be32s(f, &s->ssto);
- qemu_get_be32s(f, &s->ssitr);
- qemu_get_be32s(f, &s->sssr);
- qemu_get_8s(f, &s->sstsa);
- qemu_get_8s(f, &s->ssrsa);
- qemu_get_8s(f, &s->ssacd);
-
- v = qemu_get_byte(f);
- if (v < 0 || v > ARRAY_SIZE(s->rx_fifo)) {
- return -EINVAL;
- }
- s->rx_level = v;
- s->rx_start = 0;
- for (i = 0; i < s->rx_level; i ++)
- s->rx_fifo[i] = qemu_get_byte(f);
-
- return 0;
+ s->enable = 0;
+ s->sscr[0] = s->sscr[1] = 0;
+ s->sspsp = 0;
+ s->ssto = 0;
+ s->ssitr = 0;
+ s->sssr = 0;
+ s->sstsa = 0;
+ s->ssrsa = 0;
+ s->ssacd = 0;
+ s->rx_start = s->rx_level = 0;
}
static int pxa2xx_ssp_init(SysBusDevice *sbd)
@@ -766,8 +760,6 @@ static int pxa2xx_ssp_init(SysBusDevice *sbd)
memory_region_init_io(&s->iomem, OBJECT(s), &pxa2xx_ssp_ops, s,
"pxa2xx-ssp", 0x1000);
sysbus_init_mmio(sbd, &s->iomem);
- register_savevm(dev, "pxa2xx_ssp", -1, 0,
- pxa2xx_ssp_save, pxa2xx_ssp_load, s);
s->bus = ssi_create_bus(dev, "ssi");
return 0;
@@ -1759,24 +1751,33 @@ static PXA2xxI2SState *pxa2xx_i2s_init(MemoryRegion *sysmem,
}
/* PXA Fast Infra-red Communications Port */
+#define TYPE_PXA2XX_FIR "pxa2xx-fir"
+#define PXA2XX_FIR(obj) OBJECT_CHECK(PXA2xxFIrState, (obj), TYPE_PXA2XX_FIR)
+
struct PXA2xxFIrState {
+ /*< private >*/
+ SysBusDevice parent_obj;
+ /*< public >*/
+
MemoryRegion iomem;
qemu_irq irq;
qemu_irq rx_dma;
qemu_irq tx_dma;
- int enable;
+ uint32_t enable;
CharDriverState *chr;
uint8_t control[3];
uint8_t status[2];
- int rx_len;
- int rx_start;
+ uint32_t rx_len;
+ uint32_t rx_start;
uint8_t rx_fifo[64];
};
-static void pxa2xx_fir_reset(PXA2xxFIrState *s)
+static void pxa2xx_fir_reset(DeviceState *d)
{
+ PXA2xxFIrState *s = PXA2XX_FIR(d);
+
s->control[0] = 0x00;
s->control[1] = 0x00;
s->control[2] = 0x00;
@@ -1953,73 +1954,94 @@ static void pxa2xx_fir_event(void *opaque, int event)
{
}
-static void pxa2xx_fir_save(QEMUFile *f, void *opaque)
+static void pxa2xx_fir_instance_init(Object *obj)
{
- PXA2xxFIrState *s = (PXA2xxFIrState *) opaque;
- int i;
+ PXA2xxFIrState *s = PXA2XX_FIR(obj);
+ SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
- qemu_put_be32(f, s->enable);
-
- qemu_put_8s(f, &s->control[0]);
- qemu_put_8s(f, &s->control[1]);
- qemu_put_8s(f, &s->control[2]);
- qemu_put_8s(f, &s->status[0]);
- qemu_put_8s(f, &s->status[1]);
-
- qemu_put_byte(f, s->rx_len);
- for (i = 0; i < s->rx_len; i ++)
- qemu_put_byte(f, s->rx_fifo[(s->rx_start + i) & 63]);
+ memory_region_init_io(&s->iomem, NULL, &pxa2xx_fir_ops, s,
+ "pxa2xx-fir", 0x1000);
+ sysbus_init_mmio(sbd, &s->iomem);
+ sysbus_init_irq(sbd, &s->irq);
+ sysbus_init_irq(sbd, &s->rx_dma);
+ sysbus_init_irq(sbd, &s->tx_dma);
}
-static int pxa2xx_fir_load(QEMUFile *f, void *opaque, int version_id)
+static void pxa2xx_fir_realize(DeviceState *dev, Error **errp)
{
- PXA2xxFIrState *s = (PXA2xxFIrState *) opaque;
- int i;
-
- s->enable = qemu_get_be32(f);
+ PXA2xxFIrState *s = PXA2XX_FIR(dev);
- qemu_get_8s(f, &s->control[0]);
- qemu_get_8s(f, &s->control[1]);
- qemu_get_8s(f, &s->control[2]);
- qemu_get_8s(f, &s->status[0]);
- qemu_get_8s(f, &s->status[1]);
+ if (s->chr) {
+ qemu_chr_fe_claim_no_fail(s->chr);
+ qemu_chr_add_handlers(s->chr, pxa2xx_fir_is_empty,
+ pxa2xx_fir_rx, pxa2xx_fir_event, s);
+ }
+}
- s->rx_len = qemu_get_byte(f);
- s->rx_start = 0;
- for (i = 0; i < s->rx_len; i ++)
- s->rx_fifo[i] = qemu_get_byte(f);
+static bool pxa2xx_fir_vmstate_validate(void *opaque, int version_id)
+{
+ PXA2xxFIrState *s = opaque;
- return 0;
+ return s->rx_start < ARRAY_SIZE(s->rx_fifo);
}
-static PXA2xxFIrState *pxa2xx_fir_init(MemoryRegion *sysmem,
- hwaddr base,
- qemu_irq irq, qemu_irq rx_dma, qemu_irq tx_dma,
- CharDriverState *chr)
-{
- PXA2xxFIrState *s = (PXA2xxFIrState *)
- g_malloc0(sizeof(PXA2xxFIrState));
+static const VMStateDescription pxa2xx_fir_vmsd = {
+ .name = "pxa2xx-fir",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(enable, PXA2xxFIrState),
+ VMSTATE_UINT8_ARRAY(control, PXA2xxFIrState, 3),
+ VMSTATE_UINT8_ARRAY(status, PXA2xxFIrState, 2),
+ VMSTATE_UINT32(rx_len, PXA2xxFIrState),
+ VMSTATE_UINT32(rx_start, PXA2xxFIrState),
+ VMSTATE_VALIDATE("fifo is 64 bytes", pxa2xx_fir_vmstate_validate),
+ VMSTATE_UINT8_ARRAY(rx_fifo, PXA2xxFIrState, 64),
+ VMSTATE_END_OF_LIST()
+ }
+};
- s->irq = irq;
- s->rx_dma = rx_dma;
- s->tx_dma = tx_dma;
- s->chr = chr;
+static Property pxa2xx_fir_properties[] = {
+ DEFINE_PROP_CHR("chardev", PXA2xxFIrState, chr),
+ DEFINE_PROP_END_OF_LIST(),
+};
- pxa2xx_fir_reset(s);
+static void pxa2xx_fir_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
- memory_region_init_io(&s->iomem, NULL, &pxa2xx_fir_ops, s, "pxa2xx-fir", 0x1000);
- memory_region_add_subregion(sysmem, base, &s->iomem);
+ dc->realize = pxa2xx_fir_realize;
+ dc->vmsd = &pxa2xx_fir_vmsd;
+ dc->props = pxa2xx_fir_properties;
+ dc->reset = pxa2xx_fir_reset;
+}
- if (chr) {
- qemu_chr_fe_claim_no_fail(chr);
- qemu_chr_add_handlers(chr, pxa2xx_fir_is_empty,
- pxa2xx_fir_rx, pxa2xx_fir_event, s);
- }
+static const TypeInfo pxa2xx_fir_info = {
+ .name = TYPE_PXA2XX_FIR,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(PXA2xxFIrState),
+ .class_init = pxa2xx_fir_class_init,
+ .instance_init = pxa2xx_fir_instance_init,
+};
- register_savevm(NULL, "pxa2xx_fir", 0, 0, pxa2xx_fir_save,
- pxa2xx_fir_load, s);
+static PXA2xxFIrState *pxa2xx_fir_init(MemoryRegion *sysmem,
+ hwaddr base,
+ qemu_irq irq, qemu_irq rx_dma,
+ qemu_irq tx_dma,
+ CharDriverState *chr)
+{
+ DeviceState *dev;
+ SysBusDevice *sbd;
- return s;
+ dev = qdev_create(NULL, TYPE_PXA2XX_FIR);
+ qdev_prop_set_chr(dev, "chardev", chr);
+ qdev_init_nofail(dev);
+ sbd = SYS_BUS_DEVICE(dev);
+ sysbus_mmio_map(sbd, 0, base);
+ sysbus_connect_irq(sbd, 0, irq);
+ sysbus_connect_irq(sbd, 1, rx_dma);
+ sysbus_connect_irq(sbd, 2, tx_dma);
+ return PXA2XX_FIR(dev);
}
static void pxa2xx_reset(void *opaque, int line, int level)
@@ -2306,8 +2328,11 @@ PXA2xxState *pxa255_init(MemoryRegion *address_space, unsigned int sdram_size)
static void pxa2xx_ssp_class_init(ObjectClass *klass, void *data)
{
SysBusDeviceClass *sdc = SYS_BUS_DEVICE_CLASS(klass);
+ DeviceClass *dc = DEVICE_CLASS(klass);
sdc->init = pxa2xx_ssp_init;
+ dc->reset = pxa2xx_ssp_reset;
+ dc->vmsd = &vmstate_pxa2xx_ssp;
}
static const TypeInfo pxa2xx_ssp_info = {
@@ -2323,6 +2348,7 @@ static void pxa2xx_register_types(void)
type_register_static(&pxa2xx_ssp_info);
type_register_static(&pxa2xx_i2c_info);
type_register_static(&pxa2xx_rtc_sysbus_info);
+ type_register_static(&pxa2xx_fir_info);
}
type_init(pxa2xx_register_types)
diff --git a/hw/arm/pxa2xx_pic.c b/hw/arm/pxa2xx_pic.c
index 9cfc714874..d41ac93416 100644
--- a/hw/arm/pxa2xx_pic.c
+++ b/hw/arm/pxa2xx_pic.c
@@ -232,7 +232,7 @@ static void pxa2xx_pic_cp_write(CPUARMState *env, const ARMCPRegInfo *ri,
#define REGINFO_FOR_PIC_CP(NAME, CRN) \
{ .name = NAME, .cp = 6, .crn = CRN, .crm = 0, .opc1 = 0, .opc2 = 0, \
- .access = PL1_RW, \
+ .access = PL1_RW, .type = ARM_CP_IO, \
.readfn = pxa2xx_pic_cp_read, .writefn = pxa2xx_pic_cp_write }
static const ARMCPRegInfo pxa_pic_cp_reginfo[] = {
diff --git a/hw/arm/sysbus-fdt.c b/hw/arm/sysbus-fdt.c
new file mode 100644
index 0000000000..3038b94b4a
--- /dev/null
+++ b/hw/arm/sysbus-fdt.c
@@ -0,0 +1,174 @@
+/*
+ * ARM Platform Bus device tree generation helpers
+ *
+ * Copyright (c) 2014 Linaro Limited
+ *
+ * Authors:
+ * Alex Graf <agraf@suse.de>
+ * Eric Auger <eric.auger@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#include "hw/arm/sysbus-fdt.h"
+#include "qemu/error-report.h"
+#include "sysemu/device_tree.h"
+#include "hw/platform-bus.h"
+#include "sysemu/sysemu.h"
+
+/*
+ * internal struct that contains the information to create dynamic
+ * sysbus device node
+ */
+typedef struct PlatformBusFDTData {
+ void *fdt; /* device tree handle */
+ int irq_start; /* index of the first IRQ usable by platform bus devices */
+ const char *pbus_node_name; /* name of the platform bus node */
+ PlatformBusDevice *pbus;
+} PlatformBusFDTData;
+
+/*
+ * struct used when calling the machine init done notifier
+ * that constructs the fdt nodes of platform bus devices
+ */
+typedef struct PlatformBusFDTNotifierParams {
+ Notifier notifier;
+ ARMPlatformBusFDTParams *fdt_params;
+} PlatformBusFDTNotifierParams;
+
+/* struct that associates a device type name and a node creation function */
+typedef struct NodeCreationPair {
+ const char *typename;
+ int (*add_fdt_node_fn)(SysBusDevice *sbdev, void *opaque);
+} NodeCreationPair;
+
+/* list of supported dynamic sysbus devices */
+static const NodeCreationPair add_fdt_node_functions[] = {
+ {"", NULL}, /* last element */
+};
+
+/**
+ * add_fdt_node - add the device tree node of a dynamic sysbus device
+ *
+ * @sbdev: handle to the sysbus device
+ * @opaque: handle to the PlatformBusFDTData
+ *
+ * Checks the sysbus type belongs to the list of device types that
+ * are dynamically instantiable and if so call the node creation
+ * function.
+ */
+static int add_fdt_node(SysBusDevice *sbdev, void *opaque)
+{
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(add_fdt_node_functions); i++) {
+ if (!strcmp(object_get_typename(OBJECT(sbdev)),
+ add_fdt_node_functions[i].typename)) {
+ ret = add_fdt_node_functions[i].add_fdt_node_fn(sbdev, opaque);
+ assert(!ret);
+ return 0;
+ }
+ }
+ error_report("Device %s can not be dynamically instantiated",
+ qdev_fw_name(DEVICE(sbdev)));
+ exit(1);
+}
+
+/**
+ * add_all_platform_bus_fdt_nodes - create all the platform bus nodes
+ *
+ * builds the parent platform bus node and all the nodes of dynamic
+ * sysbus devices attached to it.
+ */
+static void add_all_platform_bus_fdt_nodes(ARMPlatformBusFDTParams *fdt_params)
+{
+ const char platcomp[] = "qemu,platform\0simple-bus";
+ PlatformBusDevice *pbus;
+ DeviceState *dev;
+ gchar *node;
+ uint64_t addr, size;
+ int irq_start, dtb_size;
+ struct arm_boot_info *info = fdt_params->binfo;
+ const ARMPlatformBusSystemParams *params = fdt_params->system_params;
+ const char *intc = fdt_params->intc;
+ void *fdt = info->get_dtb(info, &dtb_size);
+
+ /*
+ * If the user provided a dtb, we assume the dynamic sysbus nodes
+ * already are integrated there. This corresponds to a use case where
+ * the dynamic sysbus nodes are complex and their generation is not yet
+ * supported. In that case the user can take charge of the guest dt
+ * while qemu takes charge of the qom stuff.
+ */
+ if (info->dtb_filename) {
+ return;
+ }
+
+ assert(fdt);
+
+ node = g_strdup_printf("/platform@%"PRIx64, params->platform_bus_base);
+ addr = params->platform_bus_base;
+ size = params->platform_bus_size;
+ irq_start = params->platform_bus_first_irq;
+
+ /* Create a /platform node that we can put all devices into */
+ qemu_fdt_add_subnode(fdt, node);
+ qemu_fdt_setprop(fdt, node, "compatible", platcomp, sizeof(platcomp));
+
+ /* Our platform bus region is less than 32bits, so 1 cell is enough for
+ * address and size
+ */
+ qemu_fdt_setprop_cells(fdt, node, "#size-cells", 1);
+ qemu_fdt_setprop_cells(fdt, node, "#address-cells", 1);
+ qemu_fdt_setprop_cells(fdt, node, "ranges", 0, addr >> 32, addr, size);
+
+ qemu_fdt_setprop_phandle(fdt, node, "interrupt-parent", intc);
+
+ dev = qdev_find_recursive(sysbus_get_default(), TYPE_PLATFORM_BUS_DEVICE);
+ pbus = PLATFORM_BUS_DEVICE(dev);
+
+ /* We can only create dt nodes for dynamic devices when they're ready */
+ assert(pbus->done_gathering);
+
+ PlatformBusFDTData data = {
+ .fdt = fdt,
+ .irq_start = irq_start,
+ .pbus_node_name = node,
+ .pbus = pbus,
+ };
+
+ /* Loop through all dynamic sysbus devices and create their node */
+ foreach_dynamic_sysbus_device(add_fdt_node, &data);
+
+ g_free(node);
+}
+
+static void platform_bus_fdt_notify(Notifier *notifier, void *data)
+{
+ PlatformBusFDTNotifierParams *p = DO_UPCAST(PlatformBusFDTNotifierParams,
+ notifier, notifier);
+
+ add_all_platform_bus_fdt_nodes(p->fdt_params);
+ g_free(p->fdt_params);
+ g_free(p);
+}
+
+void arm_register_platform_bus_fdt_creator(ARMPlatformBusFDTParams *fdt_params)
+{
+ PlatformBusFDTNotifierParams *p = g_new(PlatformBusFDTNotifierParams, 1);
+
+ p->fdt_params = fdt_params;
+ p->notifier.notify = platform_bus_fdt_notify;
+ qemu_add_machine_init_done_notifier(&p->notifier);
+}
diff --git a/hw/arm/vexpress.c b/hw/arm/vexpress.c
index 8f1a5ea992..da217884e6 100644
--- a/hw/arm/vexpress.c
+++ b/hw/arm/vexpress.c
@@ -525,7 +525,7 @@ static pflash_t *ve_pflash_cfi01_register(hwaddr base, const char *name,
qdev_prop_set_uint64(dev, "sector-length", VEXPRESS_FLASH_SECT_SIZE);
qdev_prop_set_uint8(dev, "width", 4);
qdev_prop_set_uint8(dev, "device-width", 2);
- qdev_prop_set_uint8(dev, "big-endian", 0);
+ qdev_prop_set_bit(dev, "big-endian", false);
qdev_prop_set_uint16(dev, "id0", 0x89);
qdev_prop_set_uint16(dev, "id1", 0x18);
qdev_prop_set_uint16(dev, "id2", 0x00);
diff --git a/hw/arm/virt-acpi-build.c b/hw/arm/virt-acpi-build.c
index a9373ccaca..d5a8b9c017 100644
--- a/hw/arm/virt-acpi-build.c
+++ b/hw/arm/virt-acpi-build.c
@@ -84,6 +84,12 @@ static void acpi_dsdt_add_uart(Aml *scope, const MemMapEntry *uart_memmap,
aml_interrupt(AML_CONSUMER, AML_LEVEL, AML_ACTIVE_HIGH,
AML_EXCLUSIVE, uart_irq));
aml_append(dev, aml_name_decl("_CRS", crs));
+
+ /* The _ADR entry is used to link this device to the UART described
+ * in the SPCR table, i.e. SPCR.base_address.address == _ADR.
+ */
+ aml_append(dev, aml_name_decl("_ADR", aml_int(uart_memmap->base)));
+
aml_append(scope, dev);
}
@@ -334,6 +340,38 @@ build_rsdp(GArray *rsdp_table, GArray *linker, unsigned rsdt)
}
static void
+build_spcr(GArray *table_data, GArray *linker, VirtGuestInfo *guest_info)
+{
+ AcpiSerialPortConsoleRedirection *spcr;
+ const MemMapEntry *uart_memmap = &guest_info->memmap[VIRT_UART];
+ int irq = guest_info->irqmap[VIRT_UART] + ARM_SPI_BASE;
+
+ spcr = acpi_data_push(table_data, sizeof(*spcr));
+
+ spcr->interface_type = 0x3; /* ARM PL011 UART */
+
+ spcr->base_address.space_id = AML_SYSTEM_MEMORY;
+ spcr->base_address.bit_width = 8;
+ spcr->base_address.bit_offset = 0;
+ spcr->base_address.access_width = 1;
+ spcr->base_address.address = cpu_to_le64(uart_memmap->base);
+
+ spcr->interrupt_types = (1 << 3); /* Bit[3] ARMH GIC interrupt */
+ spcr->gsi = cpu_to_le32(irq); /* Global System Interrupt */
+
+ spcr->baud = 3; /* Baud Rate: 3 = 9600 */
+ spcr->parity = 0; /* No Parity */
+ spcr->stopbits = 1; /* 1 Stop bit */
+ spcr->flowctrl = (1 << 1); /* Bit[1] = RTS/CTS hardware flow control */
+ spcr->term_type = 0; /* Terminal Type: 0 = VT100 */
+
+ spcr->pci_device_id = 0xffff; /* PCI Device ID: not a PCI device */
+ spcr->pci_vendor_id = 0xffff; /* PCI Vendor ID: not a PCI device */
+
+ build_header(linker, table_data, (void *)spcr, "SPCR", sizeof(*spcr), 2);
+}
+
+static void
build_mcfg(GArray *table_data, GArray *linker, VirtGuestInfo *guest_info)
{
AcpiTableMcfg *mcfg;
@@ -514,7 +552,7 @@ void virt_acpi_build(VirtGuestInfo *guest_info, AcpiBuildTables *tables)
dsdt = tables_blob->len;
build_dsdt(tables_blob, tables->linker, guest_info);
- /* FADT MADT GTDT pointed to by RSDT */
+ /* FADT MADT GTDT SPCR pointed to by RSDT */
acpi_add_table(table_offsets, tables_blob);
build_fadt(tables_blob, tables->linker, dsdt);
@@ -527,6 +565,9 @@ void virt_acpi_build(VirtGuestInfo *guest_info, AcpiBuildTables *tables)
acpi_add_table(table_offsets, tables_blob);
build_mcfg(tables_blob, tables->linker, guest_info);
+ acpi_add_table(table_offsets, tables_blob);
+ build_spcr(tables_blob, tables->linker, guest_info);
+
/* RSDT is pointed to by RSDP */
rsdt = tables_blob->len;
build_rsdt(tables_blob, tables->linker, table_offsets);
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
index 05db8cb2f7..f1e85c8e92 100644
--- a/hw/arm/virt.c
+++ b/hw/arm/virt.c
@@ -45,9 +45,11 @@
#include "qemu/error-report.h"
#include "hw/pci-host/gpex.h"
#include "hw/arm/virt-acpi-build.h"
+#include "hw/arm/sysbus-fdt.h"
+#include "hw/platform-bus.h"
/* Number of external interrupt lines to configure the GIC with */
-#define NUM_IRQS 128
+#define NUM_IRQS 256
#define GIC_FDT_IRQ_TYPE_SPI 0
#define GIC_FDT_IRQ_TYPE_PPI 1
@@ -60,6 +62,10 @@
#define GIC_FDT_IRQ_PPI_CPU_START 8
#define GIC_FDT_IRQ_PPI_CPU_WIDTH 8
+#define PLATFORM_BUS_NUM_IRQS 64
+
+static ARMPlatformBusSystemParams platform_bus_params;
+
typedef struct VirtBoardInfo {
struct arm_boot_info bootinfo;
const char *cpu_model;
@@ -69,6 +75,8 @@ typedef struct VirtBoardInfo {
void *fdt;
int fdt_size;
uint32_t clock_phandle;
+ uint32_t gic_phandle;
+ uint32_t v2m_phandle;
} VirtBoardInfo;
typedef struct {
@@ -103,20 +111,22 @@ typedef struct {
*/
static const MemMapEntry a15memmap[] = {
/* Space up to 0x8000000 is reserved for a boot ROM */
- [VIRT_FLASH] = { 0, 0x08000000 },
- [VIRT_CPUPERIPHS] = { 0x08000000, 0x00020000 },
+ [VIRT_FLASH] = { 0, 0x08000000 },
+ [VIRT_CPUPERIPHS] = { 0x08000000, 0x00020000 },
/* GIC distributor and CPU interfaces sit inside the CPU peripheral space */
- [VIRT_GIC_DIST] = { 0x08000000, 0x00010000 },
- [VIRT_GIC_CPU] = { 0x08010000, 0x00010000 },
- [VIRT_UART] = { 0x09000000, 0x00001000 },
- [VIRT_RTC] = { 0x09010000, 0x00001000 },
- [VIRT_FW_CFG] = { 0x09020000, 0x0000000a },
- [VIRT_MMIO] = { 0x0a000000, 0x00000200 },
+ [VIRT_GIC_DIST] = { 0x08000000, 0x00010000 },
+ [VIRT_GIC_CPU] = { 0x08010000, 0x00010000 },
+ [VIRT_GIC_V2M] = { 0x08020000, 0x00001000 },
+ [VIRT_UART] = { 0x09000000, 0x00001000 },
+ [VIRT_RTC] = { 0x09010000, 0x00001000 },
+ [VIRT_FW_CFG] = { 0x09020000, 0x0000000a },
+ [VIRT_MMIO] = { 0x0a000000, 0x00000200 },
/* ...repeating for a total of NUM_VIRTIO_TRANSPORTS, each of that size */
- [VIRT_PCIE_MMIO] = { 0x10000000, 0x2eff0000 },
- [VIRT_PCIE_PIO] = { 0x3eff0000, 0x00010000 },
- [VIRT_PCIE_ECAM] = { 0x3f000000, 0x01000000 },
- [VIRT_MEM] = { 0x40000000, 30ULL * 1024 * 1024 * 1024 },
+ [VIRT_PLATFORM_BUS] = { 0x0c000000, 0x02000000 },
+ [VIRT_PCIE_MMIO] = { 0x10000000, 0x2eff0000 },
+ [VIRT_PCIE_PIO] = { 0x3eff0000, 0x00010000 },
+ [VIRT_PCIE_ECAM] = { 0x3f000000, 0x01000000 },
+ [VIRT_MEM] = { 0x40000000, 30ULL * 1024 * 1024 * 1024 },
};
static const int a15irqmap[] = {
@@ -124,6 +134,8 @@ static const int a15irqmap[] = {
[VIRT_RTC] = 2,
[VIRT_PCIE] = 3, /* ... to 6 */
[VIRT_MMIO] = 16, /* ...to 16 + NUM_VIRTIO_TRANSPORTS - 1 */
+ [VIRT_GIC_V2M] = 48, /* ...to 48 + NUM_GICV2M_SPIS - 1 */
+ [VIRT_PLATFORM_BUS] = 112, /* ...to 112 + PLATFORM_BUS_NUM_IRQS -1 */
};
static VirtBoardInfo machines[] = {
@@ -133,6 +145,11 @@ static VirtBoardInfo machines[] = {
.irqmap = a15irqmap,
},
{
+ .cpu_model = "cortex-a53",
+ .memmap = a15memmap,
+ .irqmap = a15irqmap,
+ },
+ {
.cpu_model = "cortex-a57",
.memmap = a15memmap,
.irqmap = a15irqmap,
@@ -294,17 +311,28 @@ static void fdt_add_cpu_nodes(const VirtBoardInfo *vbi)
"enable-method", "psci");
}
- qemu_fdt_setprop_cell(vbi->fdt, nodename, "reg", cpu);
+ qemu_fdt_setprop_cell(vbi->fdt, nodename, "reg", armcpu->mp_affinity);
g_free(nodename);
}
}
-static uint32_t fdt_add_gic_node(const VirtBoardInfo *vbi)
+static void fdt_add_v2m_gic_node(VirtBoardInfo *vbi)
{
- uint32_t gic_phandle;
+ vbi->v2m_phandle = qemu_fdt_alloc_phandle(vbi->fdt);
+ qemu_fdt_add_subnode(vbi->fdt, "/intc/v2m");
+ qemu_fdt_setprop_string(vbi->fdt, "/intc/v2m", "compatible",
+ "arm,gic-v2m-frame");
+ qemu_fdt_setprop(vbi->fdt, "/intc/v2m", "msi-controller", NULL, 0);
+ qemu_fdt_setprop_sized_cells(vbi->fdt, "/intc/v2m", "reg",
+ 2, vbi->memmap[VIRT_GIC_V2M].base,
+ 2, vbi->memmap[VIRT_GIC_V2M].size);
+ qemu_fdt_setprop_cell(vbi->fdt, "/intc/v2m", "phandle", vbi->v2m_phandle);
+}
- gic_phandle = qemu_fdt_alloc_phandle(vbi->fdt);
- qemu_fdt_setprop_cell(vbi->fdt, "/", "interrupt-parent", gic_phandle);
+static void fdt_add_gic_node(VirtBoardInfo *vbi)
+{
+ vbi->gic_phandle = qemu_fdt_alloc_phandle(vbi->fdt);
+ qemu_fdt_setprop_cell(vbi->fdt, "/", "interrupt-parent", vbi->gic_phandle);
qemu_fdt_add_subnode(vbi->fdt, "/intc");
/* 'cortex-a15-gic' means 'GIC v2' */
@@ -317,12 +345,32 @@ static uint32_t fdt_add_gic_node(const VirtBoardInfo *vbi)
2, vbi->memmap[VIRT_GIC_DIST].size,
2, vbi->memmap[VIRT_GIC_CPU].base,
2, vbi->memmap[VIRT_GIC_CPU].size);
- qemu_fdt_setprop_cell(vbi->fdt, "/intc", "phandle", gic_phandle);
+ qemu_fdt_setprop_cell(vbi->fdt, "/intc", "#address-cells", 0x2);
+ qemu_fdt_setprop_cell(vbi->fdt, "/intc", "#size-cells", 0x2);
+ qemu_fdt_setprop(vbi->fdt, "/intc", "ranges", NULL, 0);
+ qemu_fdt_setprop_cell(vbi->fdt, "/intc", "phandle", vbi->gic_phandle);
+}
+
+static void create_v2m(VirtBoardInfo *vbi, qemu_irq *pic)
+{
+ int i;
+ int irq = vbi->irqmap[VIRT_GIC_V2M];
+ DeviceState *dev;
+
+ dev = qdev_create(NULL, "arm-gicv2m");
+ sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, vbi->memmap[VIRT_GIC_V2M].base);
+ qdev_prop_set_uint32(dev, "base-spi", irq);
+ qdev_prop_set_uint32(dev, "num-spi", NUM_GICV2M_SPIS);
+ qdev_init_nofail(dev);
+
+ for (i = 0; i < NUM_GICV2M_SPIS; i++) {
+ sysbus_connect_irq(SYS_BUS_DEVICE(dev), i, pic[irq + i]);
+ }
- return gic_phandle;
+ fdt_add_v2m_gic_node(vbi);
}
-static uint32_t create_gic(const VirtBoardInfo *vbi, qemu_irq *pic)
+static void create_gic(VirtBoardInfo *vbi, qemu_irq *pic)
{
/* We create a standalone GIC v2 */
DeviceState *gicdev;
@@ -371,7 +419,9 @@ static uint32_t create_gic(const VirtBoardInfo *vbi, qemu_irq *pic)
pic[i] = qdev_get_gpio_in(gicdev, i);
}
- return fdt_add_gic_node(vbi);
+ fdt_add_gic_node(vbi);
+
+ create_v2m(vbi, pic);
}
static void create_uart(const VirtBoardInfo *vbi, qemu_irq *pic)
@@ -510,7 +560,7 @@ static void create_one_flash(const char *name, hwaddr flashbase,
qdev_prop_set_uint64(dev, "sector-length", sectorlength);
qdev_prop_set_uint8(dev, "width", 4);
qdev_prop_set_uint8(dev, "device-width", 2);
- qdev_prop_set_uint8(dev, "big-endian", 0);
+ qdev_prop_set_bit(dev, "big-endian", false);
qdev_prop_set_uint16(dev, "id0", 0x89);
qdev_prop_set_uint16(dev, "id1", 0x18);
qdev_prop_set_uint16(dev, "id2", 0x00);
@@ -587,7 +637,7 @@ static void create_pcie_irq_map(const VirtBoardInfo *vbi, uint32_t gic_phandle,
int first_irq, const char *nodename)
{
int devfn, pin;
- uint32_t full_irq_map[4 * 4 * 8] = { 0 };
+ uint32_t full_irq_map[4 * 4 * 10] = { 0 };
uint32_t *irq_map = full_irq_map;
for (devfn = 0; devfn <= 0x18; devfn += 0x8) {
@@ -600,13 +650,13 @@ static void create_pcie_irq_map(const VirtBoardInfo *vbi, uint32_t gic_phandle,
uint32_t map[] = {
devfn << 8, 0, 0, /* devfn */
pin + 1, /* PCI pin */
- gic_phandle, irq_type, irq_nr, irq_level }; /* GIC irq */
+ gic_phandle, 0, 0, irq_type, irq_nr, irq_level }; /* GIC irq */
/* Convert map to big endian */
- for (i = 0; i < 8; i++) {
+ for (i = 0; i < 10; i++) {
irq_map[i] = cpu_to_be32(map[i]);
}
- irq_map += 8;
+ irq_map += 10;
}
}
@@ -618,8 +668,7 @@ static void create_pcie_irq_map(const VirtBoardInfo *vbi, uint32_t gic_phandle,
0x7 /* PCI irq */);
}
-static void create_pcie(const VirtBoardInfo *vbi, qemu_irq *pic,
- uint32_t gic_phandle)
+static void create_pcie(const VirtBoardInfo *vbi, qemu_irq *pic)
{
hwaddr base_mmio = vbi->memmap[VIRT_PCIE_MMIO].base;
hwaddr size_mmio = vbi->memmap[VIRT_PCIE_MMIO].size;
@@ -676,6 +725,8 @@ static void create_pcie(const VirtBoardInfo *vbi, qemu_irq *pic,
qemu_fdt_setprop_cells(vbi->fdt, nodename, "bus-range", 0,
nr_pcie_buses - 1);
+ qemu_fdt_setprop_cells(vbi->fdt, nodename, "msi-parent", vbi->v2m_phandle);
+
qemu_fdt_setprop_sized_cells(vbi->fdt, nodename, "reg",
2, base_ecam, 2, size_ecam);
qemu_fdt_setprop_sized_cells(vbi->fdt, nodename, "ranges",
@@ -685,11 +736,52 @@ static void create_pcie(const VirtBoardInfo *vbi, qemu_irq *pic,
2, base_mmio, 2, size_mmio);
qemu_fdt_setprop_cell(vbi->fdt, nodename, "#interrupt-cells", 1);
- create_pcie_irq_map(vbi, gic_phandle, irq, nodename);
+ create_pcie_irq_map(vbi, vbi->gic_phandle, irq, nodename);
g_free(nodename);
}
+static void create_platform_bus(VirtBoardInfo *vbi, qemu_irq *pic)
+{
+ DeviceState *dev;
+ SysBusDevice *s;
+ int i;
+ ARMPlatformBusFDTParams *fdt_params = g_new(ARMPlatformBusFDTParams, 1);
+ MemoryRegion *sysmem = get_system_memory();
+
+ platform_bus_params.platform_bus_base = vbi->memmap[VIRT_PLATFORM_BUS].base;
+ platform_bus_params.platform_bus_size = vbi->memmap[VIRT_PLATFORM_BUS].size;
+ platform_bus_params.platform_bus_first_irq = vbi->irqmap[VIRT_PLATFORM_BUS];
+ platform_bus_params.platform_bus_num_irqs = PLATFORM_BUS_NUM_IRQS;
+
+ fdt_params->system_params = &platform_bus_params;
+ fdt_params->binfo = &vbi->bootinfo;
+ fdt_params->intc = "/intc";
+ /*
+ * register a machine init done notifier that creates the device tree
+ * nodes of the platform bus and its children dynamic sysbus devices
+ */
+ arm_register_platform_bus_fdt_creator(fdt_params);
+
+ dev = qdev_create(NULL, TYPE_PLATFORM_BUS_DEVICE);
+ dev->id = TYPE_PLATFORM_BUS_DEVICE;
+ qdev_prop_set_uint32(dev, "num_irqs",
+ platform_bus_params.platform_bus_num_irqs);
+ qdev_prop_set_uint32(dev, "mmio_size",
+ platform_bus_params.platform_bus_size);
+ qdev_init_nofail(dev);
+ s = SYS_BUS_DEVICE(dev);
+
+ for (i = 0; i < platform_bus_params.platform_bus_num_irqs; i++) {
+ int irqn = platform_bus_params.platform_bus_first_irq + i;
+ sysbus_connect_irq(s, i, pic[irqn]);
+ }
+
+ memory_region_add_subregion(sysmem,
+ platform_bus_params.platform_bus_base,
+ sysbus_mmio_get_region(s, 0));
+}
+
static void *machvirt_dtb(const struct arm_boot_info *binfo, int *fdt_size)
{
const VirtBoardInfo *board = (const VirtBoardInfo *)binfo;
@@ -717,7 +809,6 @@ static void machvirt_init(MachineState *machine)
VirtBoardInfo *vbi;
VirtGuestInfoState *guest_info_state = g_malloc0(sizeof *guest_info_state);
VirtGuestInfo *guest_info = &guest_info_state->info;
- uint32_t gic_phandle;
char **cpustr;
if (!cpu_model) {
@@ -794,13 +885,13 @@ static void machvirt_init(MachineState *machine)
create_flash(vbi);
- gic_phandle = create_gic(vbi, pic);
+ create_gic(vbi, pic);
create_uart(vbi, pic);
create_rtc(vbi, pic);
- create_pcie(vbi, pic, gic_phandle);
+ create_pcie(vbi, pic);
/* Create mmio transports, so the user can create virtio backends
* (which will be automatically plugged in to the transports). If
@@ -828,6 +919,14 @@ static void machvirt_init(MachineState *machine)
vbi->bootinfo.get_dtb = machvirt_dtb;
vbi->bootinfo.firmware_loaded = bios_name || drive_get(IF_PFLASH, 0, 0);
arm_load_kernel(ARM_CPU(first_cpu), &vbi->bootinfo);
+
+ /*
+ * arm_load_kernel machine init done notifier registration must
+ * happen before the platform_bus_create call. In this latter,
+ * another notifier is registered which adds platform bus nodes.
+ * Notifiers are executed in registration reverse order.
+ */
+ create_platform_bus(vbi, pic);
}
static bool virt_get_secure(Object *obj, Error **errp)
@@ -866,6 +965,7 @@ static void virt_class_init(ObjectClass *oc, void *data)
mc->desc = "ARM Virtual Machine",
mc->init = machvirt_init;
mc->max_cpus = 8;
+ mc->has_dynamic_sysbus = true;
}
static const TypeInfo machvirt_info = {
diff --git a/hw/block/fdc.c b/hw/block/fdc.c
index d8a8edd936..5e1b67ee43 100644
--- a/hw/block/fdc.c
+++ b/hw/block/fdc.c
@@ -324,7 +324,7 @@ static void fd_revalidate(FDrive *drv)
/* Intel 82078 floppy disk controller emulation */
static void fdctrl_reset(FDCtrl *fdctrl, int do_irq);
-static void fdctrl_reset_fifo(FDCtrl *fdctrl);
+static void fdctrl_to_command_phase(FDCtrl *fdctrl);
static int fdctrl_transfer_handler (void *opaque, int nchan,
int dma_pos, int dma_len);
static void fdctrl_raise_irq(FDCtrl *fdctrl);
@@ -495,6 +495,33 @@ enum {
FD_DIR_DSKCHG = 0x80,
};
+/*
+ * See chapter 5.0 "Controller phases" of the spec:
+ *
+ * Command phase:
+ * The host writes a command and its parameters into the FIFO. The command
+ * phase is completed when all parameters for the command have been supplied,
+ * and execution phase is entered.
+ *
+ * Execution phase:
+ * Data transfers, either DMA or non-DMA. For non-DMA transfers, the FIFO
+ * contains the payload now, otherwise it's unused. When all bytes of the
+ * required data have been transferred, the state is switched to either result
+ * phase (if the command produces status bytes) or directly back into the
+ * command phase for the next command.
+ *
+ * Result phase:
+ * The host reads out the FIFO, which contains one or more result bytes now.
+ */
+enum {
+ /* Only for migration: reconstruct phase from registers like qemu 2.3 */
+ FD_PHASE_RECONSTRUCT = 0,
+
+ FD_PHASE_COMMAND = 1,
+ FD_PHASE_EXECUTION = 2,
+ FD_PHASE_RESULT = 3,
+};
+
#define FD_MULTI_TRACK(state) ((state) & FD_STATE_MULTI)
#define FD_FORMAT_CMD(state) ((state) & FD_STATE_FORMAT)
@@ -504,6 +531,7 @@ struct FDCtrl {
/* Controller state */
QEMUTimer *result_timer;
int dma_chann;
+ uint8_t phase;
/* Controller's identification */
uint8_t version;
/* HW */
@@ -671,6 +699,7 @@ static const VMStateDescription vmstate_fdrive_media_changed = {
.name = "fdrive/media_changed",
.version_id = 1,
.minimum_version_id = 1,
+ .needed = fdrive_media_changed_needed,
.fields = (VMStateField[]) {
VMSTATE_UINT8(media_changed, FDrive),
VMSTATE_END_OF_LIST()
@@ -688,6 +717,7 @@ static const VMStateDescription vmstate_fdrive_media_rate = {
.name = "fdrive/media_rate",
.version_id = 1,
.minimum_version_id = 1,
+ .needed = fdrive_media_rate_needed,
.fields = (VMStateField[]) {
VMSTATE_UINT8(media_rate, FDrive),
VMSTATE_END_OF_LIST()
@@ -705,6 +735,7 @@ static const VMStateDescription vmstate_fdrive_perpendicular = {
.name = "fdrive/perpendicular",
.version_id = 1,
.minimum_version_id = 1,
+ .needed = fdrive_perpendicular_needed,
.fields = (VMStateField[]) {
VMSTATE_UINT8(perpendicular, FDrive),
VMSTATE_END_OF_LIST()
@@ -728,22 +759,36 @@ static const VMStateDescription vmstate_fdrive = {
VMSTATE_UINT8(sect, FDrive),
VMSTATE_END_OF_LIST()
},
- .subsections = (VMStateSubsection[]) {
- {
- .vmsd = &vmstate_fdrive_media_changed,
- .needed = &fdrive_media_changed_needed,
- } , {
- .vmsd = &vmstate_fdrive_media_rate,
- .needed = &fdrive_media_rate_needed,
- } , {
- .vmsd = &vmstate_fdrive_perpendicular,
- .needed = &fdrive_perpendicular_needed,
- } , {
- /* empty */
- }
+ .subsections = (const VMStateDescription*[]) {
+ &vmstate_fdrive_media_changed,
+ &vmstate_fdrive_media_rate,
+ &vmstate_fdrive_perpendicular,
+ NULL
}
};
+/*
+ * Reconstructs the phase from register values according to the logic that was
+ * implemented in qemu 2.3. This is the default value that is used if the phase
+ * subsection is not present on migration.
+ *
+ * Don't change this function to reflect newer qemu versions, it is part of
+ * the migration ABI.
+ */
+static int reconstruct_phase(FDCtrl *fdctrl)
+{
+ if (fdctrl->msr & FD_MSR_NONDMA) {
+ return FD_PHASE_EXECUTION;
+ } else if ((fdctrl->msr & FD_MSR_RQM) == 0) {
+ /* qemu 2.3 disabled RQM only during DMA transfers */
+ return FD_PHASE_EXECUTION;
+ } else if (fdctrl->msr & FD_MSR_DIO) {
+ return FD_PHASE_RESULT;
+ } else {
+ return FD_PHASE_COMMAND;
+ }
+}
+
static void fdc_pre_save(void *opaque)
{
FDCtrl *s = opaque;
@@ -751,12 +796,24 @@ static void fdc_pre_save(void *opaque)
s->dor_vmstate = s->dor | GET_CUR_DRV(s);
}
+static int fdc_pre_load(void *opaque)
+{
+ FDCtrl *s = opaque;
+ s->phase = FD_PHASE_RECONSTRUCT;
+ return 0;
+}
+
static int fdc_post_load(void *opaque, int version_id)
{
FDCtrl *s = opaque;
SET_CUR_DRV(s, s->dor_vmstate & FD_DOR_SELMASK);
s->dor = s->dor_vmstate & ~FD_DOR_SELMASK;
+
+ if (s->phase == FD_PHASE_RECONSTRUCT) {
+ s->phase = reconstruct_phase(s);
+ }
+
return 0;
}
@@ -771,6 +828,7 @@ static const VMStateDescription vmstate_fdc_reset_sensei = {
.name = "fdc/reset_sensei",
.version_id = 1,
.minimum_version_id = 1,
+ .needed = fdc_reset_sensei_needed,
.fields = (VMStateField[]) {
VMSTATE_INT32(reset_sensei, FDCtrl),
VMSTATE_END_OF_LIST()
@@ -788,17 +846,37 @@ static const VMStateDescription vmstate_fdc_result_timer = {
.name = "fdc/result_timer",
.version_id = 1,
.minimum_version_id = 1,
+ .needed = fdc_result_timer_needed,
.fields = (VMStateField[]) {
VMSTATE_TIMER_PTR(result_timer, FDCtrl),
VMSTATE_END_OF_LIST()
}
};
+static bool fdc_phase_needed(void *opaque)
+{
+ FDCtrl *fdctrl = opaque;
+
+ return reconstruct_phase(fdctrl) != fdctrl->phase;
+}
+
+static const VMStateDescription vmstate_fdc_phase = {
+ .name = "fdc/phase",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = fdc_phase_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT8(phase, FDCtrl),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
static const VMStateDescription vmstate_fdc = {
.name = "fdc",
.version_id = 2,
.minimum_version_id = 2,
.pre_save = fdc_pre_save,
+ .pre_load = fdc_pre_load,
.post_load = fdc_post_load,
.fields = (VMStateField[]) {
/* Controller State */
@@ -831,16 +909,11 @@ static const VMStateDescription vmstate_fdc = {
vmstate_fdrive, FDrive),
VMSTATE_END_OF_LIST()
},
- .subsections = (VMStateSubsection[]) {
- {
- .vmsd = &vmstate_fdc_reset_sensei,
- .needed = fdc_reset_sensei_needed,
- } , {
- .vmsd = &vmstate_fdc_result_timer,
- .needed = fdc_result_timer_needed,
- } , {
- /* empty */
- }
+ .subsections = (const VMStateDescription*[]) {
+ &vmstate_fdc_reset_sensei,
+ &vmstate_fdc_result_timer,
+ &vmstate_fdc_phase,
+ NULL
}
};
@@ -918,7 +991,7 @@ static void fdctrl_reset(FDCtrl *fdctrl, int do_irq)
fdctrl->data_dir = FD_DIR_WRITE;
for (i = 0; i < MAX_FD; i++)
fd_recalibrate(&fdctrl->drives[i]);
- fdctrl_reset_fifo(fdctrl);
+ fdctrl_to_command_phase(fdctrl);
if (do_irq) {
fdctrl->status0 |= FD_SR0_RDYCHG;
fdctrl_raise_irq(fdctrl);
@@ -1134,17 +1207,22 @@ static uint32_t fdctrl_read_dir(FDCtrl *fdctrl)
return retval;
}
-/* FIFO state control */
-static void fdctrl_reset_fifo(FDCtrl *fdctrl)
+/* Clear the FIFO and update the state for receiving the next command */
+static void fdctrl_to_command_phase(FDCtrl *fdctrl)
{
+ fdctrl->phase = FD_PHASE_COMMAND;
fdctrl->data_dir = FD_DIR_WRITE;
fdctrl->data_pos = 0;
+ fdctrl->data_len = 1; /* Accept command byte, adjust for params later */
fdctrl->msr &= ~(FD_MSR_CMDBUSY | FD_MSR_DIO);
+ fdctrl->msr |= FD_MSR_RQM;
}
-/* Set FIFO status for the host to read */
-static void fdctrl_set_fifo(FDCtrl *fdctrl, int fifo_len)
+/* Update the state to allow the guest to read out the command status.
+ * @fifo_len is the number of result bytes to be read out. */
+static void fdctrl_to_result_phase(FDCtrl *fdctrl, int fifo_len)
{
+ fdctrl->phase = FD_PHASE_RESULT;
fdctrl->data_dir = FD_DIR_READ;
fdctrl->data_len = fifo_len;
fdctrl->data_pos = 0;
@@ -1157,7 +1235,7 @@ static void fdctrl_unimplemented(FDCtrl *fdctrl, int direction)
qemu_log_mask(LOG_UNIMP, "fdc: unimplemented command 0x%02x\n",
fdctrl->fifo[0]);
fdctrl->fifo[0] = FD_SR0_INVCMD;
- fdctrl_set_fifo(fdctrl, 1);
+ fdctrl_to_result_phase(fdctrl, 1);
}
/* Seek to next sector
@@ -1238,7 +1316,7 @@ static void fdctrl_stop_transfer(FDCtrl *fdctrl, uint8_t status0,
fdctrl->msr |= FD_MSR_RQM | FD_MSR_DIO;
fdctrl->msr &= ~FD_MSR_NONDMA;
- fdctrl_set_fifo(fdctrl, 7);
+ fdctrl_to_result_phase(fdctrl, 7);
fdctrl_raise_irq(fdctrl);
}
@@ -1352,7 +1430,7 @@ static void fdctrl_start_transfer(FDCtrl *fdctrl, int direction)
}
}
FLOPPY_DPRINTF("start non-DMA transfer\n");
- fdctrl->msr |= FD_MSR_NONDMA;
+ fdctrl->msr |= FD_MSR_NONDMA | FD_MSR_RQM;
if (direction != FD_DIR_WRITE)
fdctrl->msr |= FD_MSR_DIO;
/* IO based transfer: calculate len */
@@ -1505,9 +1583,16 @@ static uint32_t fdctrl_read_data(FDCtrl *fdctrl)
FLOPPY_DPRINTF("error: controller not ready for reading\n");
return 0;
}
+
+ /* If data_len spans multiple sectors, the current position in the FIFO
+ * wraps around while fdctrl->data_pos is the real position in the whole
+ * request. */
pos = fdctrl->data_pos;
pos %= FD_SECTOR_LEN;
- if (fdctrl->msr & FD_MSR_NONDMA) {
+
+ switch (fdctrl->phase) {
+ case FD_PHASE_EXECUTION:
+ assert(fdctrl->msr & FD_MSR_NONDMA);
if (pos == 0) {
if (fdctrl->data_pos != 0)
if (!fdctrl_seek_to_next_sect(fdctrl, cur_drv)) {
@@ -1523,20 +1608,28 @@ static uint32_t fdctrl_read_data(FDCtrl *fdctrl)
memset(fdctrl->fifo, 0, FD_SECTOR_LEN);
}
}
- }
- retval = fdctrl->fifo[pos];
- if (++fdctrl->data_pos == fdctrl->data_len) {
- fdctrl->data_pos = 0;
- /* Switch from transfer mode to status mode
- * then from status mode to command mode
- */
- if (fdctrl->msr & FD_MSR_NONDMA) {
+
+ if (++fdctrl->data_pos == fdctrl->data_len) {
+ fdctrl->msr &= ~FD_MSR_RQM;
fdctrl_stop_transfer(fdctrl, 0x00, 0x00, 0x00);
- } else {
- fdctrl_reset_fifo(fdctrl);
+ }
+ break;
+
+ case FD_PHASE_RESULT:
+ assert(!(fdctrl->msr & FD_MSR_NONDMA));
+ if (++fdctrl->data_pos == fdctrl->data_len) {
+ fdctrl->msr &= ~FD_MSR_RQM;
+ fdctrl_to_command_phase(fdctrl);
fdctrl_reset_irq(fdctrl);
}
+ break;
+
+ case FD_PHASE_COMMAND:
+ default:
+ abort();
}
+
+ retval = fdctrl->fifo[pos];
FLOPPY_DPRINTF("data register: 0x%02x\n", retval);
return retval;
@@ -1606,7 +1699,7 @@ static void fdctrl_handle_lock(FDCtrl *fdctrl, int direction)
{
fdctrl->lock = (fdctrl->fifo[0] & 0x80) ? 1 : 0;
fdctrl->fifo[0] = fdctrl->lock << 4;
- fdctrl_set_fifo(fdctrl, 1);
+ fdctrl_to_result_phase(fdctrl, 1);
}
static void fdctrl_handle_dumpreg(FDCtrl *fdctrl, int direction)
@@ -1631,20 +1724,20 @@ static void fdctrl_handle_dumpreg(FDCtrl *fdctrl, int direction)
(cur_drv->perpendicular << 2);
fdctrl->fifo[8] = fdctrl->config;
fdctrl->fifo[9] = fdctrl->precomp_trk;
- fdctrl_set_fifo(fdctrl, 10);
+ fdctrl_to_result_phase(fdctrl, 10);
}
static void fdctrl_handle_version(FDCtrl *fdctrl, int direction)
{
/* Controller's version */
fdctrl->fifo[0] = fdctrl->version;
- fdctrl_set_fifo(fdctrl, 1);
+ fdctrl_to_result_phase(fdctrl, 1);
}
static void fdctrl_handle_partid(FDCtrl *fdctrl, int direction)
{
fdctrl->fifo[0] = 0x41; /* Stepping 1 */
- fdctrl_set_fifo(fdctrl, 1);
+ fdctrl_to_result_phase(fdctrl, 1);
}
static void fdctrl_handle_restore(FDCtrl *fdctrl, int direction)
@@ -1667,7 +1760,7 @@ static void fdctrl_handle_restore(FDCtrl *fdctrl, int direction)
fdctrl->config = fdctrl->fifo[11];
fdctrl->precomp_trk = fdctrl->fifo[12];
fdctrl->pwrd = fdctrl->fifo[13];
- fdctrl_reset_fifo(fdctrl);
+ fdctrl_to_command_phase(fdctrl);
}
static void fdctrl_handle_save(FDCtrl *fdctrl, int direction)
@@ -1697,7 +1790,7 @@ static void fdctrl_handle_save(FDCtrl *fdctrl, int direction)
fdctrl->fifo[12] = fdctrl->pwrd;
fdctrl->fifo[13] = 0;
fdctrl->fifo[14] = 0;
- fdctrl_set_fifo(fdctrl, 15);
+ fdctrl_to_result_phase(fdctrl, 15);
}
static void fdctrl_handle_readid(FDCtrl *fdctrl, int direction)
@@ -1746,7 +1839,7 @@ static void fdctrl_handle_specify(FDCtrl *fdctrl, int direction)
else
fdctrl->dor |= FD_DOR_DMAEN;
/* No result back */
- fdctrl_reset_fifo(fdctrl);
+ fdctrl_to_command_phase(fdctrl);
}
static void fdctrl_handle_sense_drive_status(FDCtrl *fdctrl, int direction)
@@ -1762,7 +1855,7 @@ static void fdctrl_handle_sense_drive_status(FDCtrl *fdctrl, int direction)
(cur_drv->head << 2) |
GET_CUR_DRV(fdctrl) |
0x28;
- fdctrl_set_fifo(fdctrl, 1);
+ fdctrl_to_result_phase(fdctrl, 1);
}
static void fdctrl_handle_recalibrate(FDCtrl *fdctrl, int direction)
@@ -1772,7 +1865,7 @@ static void fdctrl_handle_recalibrate(FDCtrl *fdctrl, int direction)
SET_CUR_DRV(fdctrl, fdctrl->fifo[1] & FD_DOR_SELMASK);
cur_drv = get_cur_drv(fdctrl);
fd_recalibrate(cur_drv);
- fdctrl_reset_fifo(fdctrl);
+ fdctrl_to_command_phase(fdctrl);
/* Raise Interrupt */
fdctrl->status0 |= FD_SR0_SEEK;
fdctrl_raise_irq(fdctrl);
@@ -1788,7 +1881,7 @@ static void fdctrl_handle_sense_interrupt_status(FDCtrl *fdctrl, int direction)
fdctrl->reset_sensei--;
} else if (!(fdctrl->sra & FD_SRA_INTPEND)) {
fdctrl->fifo[0] = FD_SR0_INVCMD;
- fdctrl_set_fifo(fdctrl, 1);
+ fdctrl_to_result_phase(fdctrl, 1);
return;
} else {
fdctrl->fifo[0] =
@@ -1797,7 +1890,7 @@ static void fdctrl_handle_sense_interrupt_status(FDCtrl *fdctrl, int direction)
}
fdctrl->fifo[1] = cur_drv->track;
- fdctrl_set_fifo(fdctrl, 2);
+ fdctrl_to_result_phase(fdctrl, 2);
fdctrl_reset_irq(fdctrl);
fdctrl->status0 = FD_SR0_RDYCHG;
}
@@ -1808,7 +1901,7 @@ static void fdctrl_handle_seek(FDCtrl *fdctrl, int direction)
SET_CUR_DRV(fdctrl, fdctrl->fifo[1] & FD_DOR_SELMASK);
cur_drv = get_cur_drv(fdctrl);
- fdctrl_reset_fifo(fdctrl);
+ fdctrl_to_command_phase(fdctrl);
/* The seek command just sends step pulses to the drive and doesn't care if
* there is a medium inserted of if it's banging the head against the drive.
*/
@@ -1825,7 +1918,7 @@ static void fdctrl_handle_perpendicular_mode(FDCtrl *fdctrl, int direction)
if (fdctrl->fifo[1] & 0x80)
cur_drv->perpendicular = fdctrl->fifo[1] & 0x7;
/* No result back */
- fdctrl_reset_fifo(fdctrl);
+ fdctrl_to_command_phase(fdctrl);
}
static void fdctrl_handle_configure(FDCtrl *fdctrl, int direction)
@@ -1833,20 +1926,20 @@ static void fdctrl_handle_configure(FDCtrl *fdctrl, int direction)
fdctrl->config = fdctrl->fifo[2];
fdctrl->precomp_trk = fdctrl->fifo[3];
/* No result back */
- fdctrl_reset_fifo(fdctrl);
+ fdctrl_to_command_phase(fdctrl);
}
static void fdctrl_handle_powerdown_mode(FDCtrl *fdctrl, int direction)
{
fdctrl->pwrd = fdctrl->fifo[1];
fdctrl->fifo[0] = fdctrl->fifo[1];
- fdctrl_set_fifo(fdctrl, 1);
+ fdctrl_to_result_phase(fdctrl, 1);
}
static void fdctrl_handle_option(FDCtrl *fdctrl, int direction)
{
/* No result back */
- fdctrl_reset_fifo(fdctrl);
+ fdctrl_to_command_phase(fdctrl);
}
static void fdctrl_handle_drive_specification_command(FDCtrl *fdctrl, int direction)
@@ -1862,15 +1955,15 @@ static void fdctrl_handle_drive_specification_command(FDCtrl *fdctrl, int direct
fdctrl->fifo[0] = fdctrl->fifo[1];
fdctrl->fifo[2] = 0;
fdctrl->fifo[3] = 0;
- fdctrl_set_fifo(fdctrl, 4);
+ fdctrl_to_result_phase(fdctrl, 4);
} else {
- fdctrl_reset_fifo(fdctrl);
+ fdctrl_to_command_phase(fdctrl);
}
} else if (fdctrl->data_len > 7) {
/* ERROR */
fdctrl->fifo[0] = 0x80 |
(cur_drv->head << 2) | GET_CUR_DRV(fdctrl);
- fdctrl_set_fifo(fdctrl, 1);
+ fdctrl_to_result_phase(fdctrl, 1);
}
}
@@ -1887,7 +1980,7 @@ static void fdctrl_handle_relative_seek_in(FDCtrl *fdctrl, int direction)
fd_seek(cur_drv, cur_drv->head,
cur_drv->track + fdctrl->fifo[2], cur_drv->sect, 1);
}
- fdctrl_reset_fifo(fdctrl);
+ fdctrl_to_command_phase(fdctrl);
/* Raise Interrupt */
fdctrl->status0 |= FD_SR0_SEEK;
fdctrl_raise_irq(fdctrl);
@@ -1905,20 +1998,25 @@ static void fdctrl_handle_relative_seek_out(FDCtrl *fdctrl, int direction)
fd_seek(cur_drv, cur_drv->head,
cur_drv->track - fdctrl->fifo[2], cur_drv->sect, 1);
}
- fdctrl_reset_fifo(fdctrl);
+ fdctrl_to_command_phase(fdctrl);
/* Raise Interrupt */
fdctrl->status0 |= FD_SR0_SEEK;
fdctrl_raise_irq(fdctrl);
}
-static const struct {
+/*
+ * Handlers for the execution phase of each command
+ */
+typedef struct FDCtrlCommand {
uint8_t value;
uint8_t mask;
const char* name;
int parameters;
void (*handler)(FDCtrl *fdctrl, int direction);
int direction;
-} handlers[] = {
+} FDCtrlCommand;
+
+static const FDCtrlCommand handlers[] = {
{ FD_CMD_READ, 0x1f, "READ", 8, fdctrl_start_transfer, FD_DIR_READ },
{ FD_CMD_WRITE, 0x3f, "WRITE", 8, fdctrl_start_transfer, FD_DIR_WRITE },
{ FD_CMD_SEEK, 0xff, "SEEK", 2, fdctrl_handle_seek },
@@ -1955,9 +2053,19 @@ static const struct {
/* Associate command to an index in the 'handlers' array */
static uint8_t command_to_handler[256];
+static const FDCtrlCommand *get_command(uint8_t cmd)
+{
+ int idx;
+
+ idx = command_to_handler[cmd];
+ FLOPPY_DPRINTF("%s command\n", handlers[idx].name);
+ return &handlers[idx];
+}
+
static void fdctrl_write_data(FDCtrl *fdctrl, uint32_t value)
{
FDrive *cur_drv;
+ const FDCtrlCommand *cmd;
uint32_t pos;
/* Reset mode */
@@ -1970,12 +2078,27 @@ static void fdctrl_write_data(FDCtrl *fdctrl, uint32_t value)
return;
}
fdctrl->dsr &= ~FD_DSR_PWRDOWN;
- /* Is it write command time ? */
- if (fdctrl->msr & FD_MSR_NONDMA) {
+
+ FLOPPY_DPRINTF("%s: %02x\n", __func__, value);
+
+ /* If data_len spans multiple sectors, the current position in the FIFO
+ * wraps around while fdctrl->data_pos is the real position in the whole
+ * request. */
+ pos = fdctrl->data_pos++;
+ pos %= FD_SECTOR_LEN;
+ fdctrl->fifo[pos] = value;
+
+ if (fdctrl->data_pos == fdctrl->data_len) {
+ fdctrl->msr &= ~FD_MSR_RQM;
+ }
+
+ switch (fdctrl->phase) {
+ case FD_PHASE_EXECUTION:
+ /* For DMA requests, RQM should be cleared during execution phase, so
+ * we would have errored out above. */
+ assert(fdctrl->msr & FD_MSR_NONDMA);
+
/* FIFO data write */
- pos = fdctrl->data_pos++;
- pos %= FD_SECTOR_LEN;
- fdctrl->fifo[pos] = value;
if (pos == FD_SECTOR_LEN - 1 ||
fdctrl->data_pos == fdctrl->data_len) {
cur_drv = get_cur_drv(fdctrl);
@@ -1983,45 +2106,54 @@ static void fdctrl_write_data(FDCtrl *fdctrl, uint32_t value)
< 0) {
FLOPPY_DPRINTF("error writing sector %d\n",
fd_sector(cur_drv));
- return;
+ break;
}
if (!fdctrl_seek_to_next_sect(fdctrl, cur_drv)) {
FLOPPY_DPRINTF("error seeking to next sector %d\n",
fd_sector(cur_drv));
- return;
+ break;
}
}
- /* Switch from transfer mode to status mode
- * then from status mode to command mode
- */
- if (fdctrl->data_pos == fdctrl->data_len)
+
+ /* Switch to result phase when done with the transfer */
+ if (fdctrl->data_pos == fdctrl->data_len) {
fdctrl_stop_transfer(fdctrl, 0x00, 0x00, 0x00);
- return;
- }
- if (fdctrl->data_pos == 0) {
- /* Command */
- pos = command_to_handler[value & 0xff];
- FLOPPY_DPRINTF("%s command\n", handlers[pos].name);
- fdctrl->data_len = handlers[pos].parameters + 1;
- fdctrl->msr |= FD_MSR_CMDBUSY;
- }
+ }
+ break;
- FLOPPY_DPRINTF("%s: %02x\n", __func__, value);
- pos = fdctrl->data_pos++;
- pos %= FD_SECTOR_LEN;
- fdctrl->fifo[pos] = value;
- if (fdctrl->data_pos == fdctrl->data_len) {
- /* We now have all parameters
- * and will be able to treat the command
- */
- if (fdctrl->data_state & FD_STATE_FORMAT) {
- fdctrl_format_sector(fdctrl);
- return;
+ case FD_PHASE_COMMAND:
+ assert(!(fdctrl->msr & FD_MSR_NONDMA));
+ assert(fdctrl->data_pos < FD_SECTOR_LEN);
+
+ if (pos == 0) {
+ /* The first byte specifies the command. Now we start reading
+ * as many parameters as this command requires. */
+ cmd = get_command(value);
+ fdctrl->data_len = cmd->parameters + 1;
+ if (cmd->parameters) {
+ fdctrl->msr |= FD_MSR_RQM;
+ }
+ fdctrl->msr |= FD_MSR_CMDBUSY;
+ }
+
+ if (fdctrl->data_pos == fdctrl->data_len) {
+ /* We have all parameters now, execute the command */
+ fdctrl->phase = FD_PHASE_EXECUTION;
+
+ if (fdctrl->data_state & FD_STATE_FORMAT) {
+ fdctrl_format_sector(fdctrl);
+ break;
+ }
+
+ cmd = get_command(fdctrl->fifo[0]);
+ FLOPPY_DPRINTF("Calling handler for '%s'\n", cmd->name);
+ cmd->handler(fdctrl, cmd->direction);
}
+ break;
- pos = command_to_handler[fdctrl->fifo[0] & 0xff];
- FLOPPY_DPRINTF("treat %s command\n", handlers[pos].name);
- (*handlers[pos].handler)(fdctrl, handlers[pos].direction);
+ case FD_PHASE_RESULT:
+ default:
+ abort();
}
}
diff --git a/hw/block/pflash_cfi01.c b/hw/block/pflash_cfi01.c
index d282695086..2ba6c77293 100644
--- a/hw/block/pflash_cfi01.c
+++ b/hw/block/pflash_cfi01.c
@@ -64,6 +64,9 @@ do { \
#define TYPE_CFI_PFLASH01 "cfi.pflash01"
#define CFI_PFLASH01(obj) OBJECT_CHECK(pflash_t, (obj), TYPE_CFI_PFLASH01)
+#define PFLASH_BE 0
+#define PFLASH_SECURE 1
+
struct pflash_t {
/*< private >*/
SysBusDevice parent_obj;
@@ -75,7 +78,7 @@ struct pflash_t {
uint8_t bank_width;
uint8_t device_width; /* If 0, device width not specified. */
uint8_t max_device_width; /* max device width in bytes */
- uint8_t be;
+ uint32_t features;
uint8_t wcycle; /* if 0, the flash is read normally */
int ro;
uint8_t cmd;
@@ -235,12 +238,57 @@ static uint32_t pflash_devid_query(pflash_t *pfl, hwaddr offset)
return resp;
}
+static uint32_t pflash_data_read(pflash_t *pfl, hwaddr offset,
+ int width, int be)
+{
+ uint8_t *p;
+ uint32_t ret;
+
+ p = pfl->storage;
+ switch (width) {
+ case 1:
+ ret = p[offset];
+ DPRINTF("%s: data offset " TARGET_FMT_plx " %02x\n",
+ __func__, offset, ret);
+ break;
+ case 2:
+ if (be) {
+ ret = p[offset] << 8;
+ ret |= p[offset + 1];
+ } else {
+ ret = p[offset];
+ ret |= p[offset + 1] << 8;
+ }
+ DPRINTF("%s: data offset " TARGET_FMT_plx " %04x\n",
+ __func__, offset, ret);
+ break;
+ case 4:
+ if (be) {
+ ret = p[offset] << 24;
+ ret |= p[offset + 1] << 16;
+ ret |= p[offset + 2] << 8;
+ ret |= p[offset + 3];
+ } else {
+ ret = p[offset];
+ ret |= p[offset + 1] << 8;
+ ret |= p[offset + 2] << 16;
+ ret |= p[offset + 3] << 24;
+ }
+ DPRINTF("%s: data offset " TARGET_FMT_plx " %08x\n",
+ __func__, offset, ret);
+ break;
+ default:
+ DPRINTF("BUG in %s\n", __func__);
+ abort();
+ }
+ return ret;
+}
+
static uint32_t pflash_read (pflash_t *pfl, hwaddr offset,
int width, int be)
{
hwaddr boff;
uint32_t ret;
- uint8_t *p;
ret = -1;
@@ -257,43 +305,7 @@ static uint32_t pflash_read (pflash_t *pfl, hwaddr offset,
/* fall through to read code */
case 0x00:
/* Flash area read */
- p = pfl->storage;
- switch (width) {
- case 1:
- ret = p[offset];
- DPRINTF("%s: data offset " TARGET_FMT_plx " %02x\n",
- __func__, offset, ret);
- break;
- case 2:
- if (be) {
- ret = p[offset] << 8;
- ret |= p[offset + 1];
- } else {
- ret = p[offset];
- ret |= p[offset + 1] << 8;
- }
- DPRINTF("%s: data offset " TARGET_FMT_plx " %04x\n",
- __func__, offset, ret);
- break;
- case 4:
- if (be) {
- ret = p[offset] << 24;
- ret |= p[offset + 1] << 16;
- ret |= p[offset + 2] << 8;
- ret |= p[offset + 3];
- } else {
- ret = p[offset];
- ret |= p[offset + 1] << 8;
- ret |= p[offset + 2] << 16;
- ret |= p[offset + 3] << 24;
- }
- DPRINTF("%s: data offset " TARGET_FMT_plx " %08x\n",
- __func__, offset, ret);
- break;
- default:
- DPRINTF("BUG in %s\n", __func__);
- }
-
+ ret = pflash_data_read(pfl, offset, width, be);
break;
case 0x10: /* Single byte program */
case 0x20: /* Block erase */
@@ -648,101 +660,37 @@ static void pflash_write(pflash_t *pfl, hwaddr offset,
}
-static uint32_t pflash_readb_be(void *opaque, hwaddr addr)
-{
- return pflash_read(opaque, addr, 1, 1);
-}
-
-static uint32_t pflash_readb_le(void *opaque, hwaddr addr)
-{
- return pflash_read(opaque, addr, 1, 0);
-}
-
-static uint32_t pflash_readw_be(void *opaque, hwaddr addr)
-{
- pflash_t *pfl = opaque;
-
- return pflash_read(pfl, addr, 2, 1);
-}
-
-static uint32_t pflash_readw_le(void *opaque, hwaddr addr)
-{
- pflash_t *pfl = opaque;
-
- return pflash_read(pfl, addr, 2, 0);
-}
-
-static uint32_t pflash_readl_be(void *opaque, hwaddr addr)
-{
- pflash_t *pfl = opaque;
-
- return pflash_read(pfl, addr, 4, 1);
-}
-
-static uint32_t pflash_readl_le(void *opaque, hwaddr addr)
+static MemTxResult pflash_mem_read_with_attrs(void *opaque, hwaddr addr, uint64_t *value,
+ unsigned len, MemTxAttrs attrs)
{
pflash_t *pfl = opaque;
+ bool be = !!(pfl->features & (1 << PFLASH_BE));
- return pflash_read(pfl, addr, 4, 0);
-}
-
-static void pflash_writeb_be(void *opaque, hwaddr addr,
- uint32_t value)
-{
- pflash_write(opaque, addr, value, 1, 1);
-}
-
-static void pflash_writeb_le(void *opaque, hwaddr addr,
- uint32_t value)
-{
- pflash_write(opaque, addr, value, 1, 0);
-}
-
-static void pflash_writew_be(void *opaque, hwaddr addr,
- uint32_t value)
-{
- pflash_t *pfl = opaque;
-
- pflash_write(pfl, addr, value, 2, 1);
-}
-
-static void pflash_writew_le(void *opaque, hwaddr addr,
- uint32_t value)
-{
- pflash_t *pfl = opaque;
-
- pflash_write(pfl, addr, value, 2, 0);
-}
-
-static void pflash_writel_be(void *opaque, hwaddr addr,
- uint32_t value)
-{
- pflash_t *pfl = opaque;
-
- pflash_write(pfl, addr, value, 4, 1);
+ if ((pfl->features & (1 << PFLASH_SECURE)) && !attrs.secure) {
+ *value = pflash_data_read(opaque, addr, len, be);
+ } else {
+ *value = pflash_read(opaque, addr, len, be);
+ }
+ return MEMTX_OK;
}
-static void pflash_writel_le(void *opaque, hwaddr addr,
- uint32_t value)
+static MemTxResult pflash_mem_write_with_attrs(void *opaque, hwaddr addr, uint64_t value,
+ unsigned len, MemTxAttrs attrs)
{
pflash_t *pfl = opaque;
+ bool be = !!(pfl->features & (1 << PFLASH_BE));
- pflash_write(pfl, addr, value, 4, 0);
+ if ((pfl->features & (1 << PFLASH_SECURE)) && !attrs.secure) {
+ return MEMTX_ERROR;
+ } else {
+ pflash_write(opaque, addr, value, len, be);
+ return MEMTX_OK;
+ }
}
-static const MemoryRegionOps pflash_cfi01_ops_be = {
- .old_mmio = {
- .read = { pflash_readb_be, pflash_readw_be, pflash_readl_be, },
- .write = { pflash_writeb_be, pflash_writew_be, pflash_writel_be, },
- },
- .endianness = DEVICE_NATIVE_ENDIAN,
-};
-
-static const MemoryRegionOps pflash_cfi01_ops_le = {
- .old_mmio = {
- .read = { pflash_readb_le, pflash_readw_le, pflash_readl_le, },
- .write = { pflash_writeb_le, pflash_writew_le, pflash_writel_le, },
- },
+static const MemoryRegionOps pflash_cfi01_ops = {
+ .read_with_attrs = pflash_mem_read_with_attrs,
+ .write_with_attrs = pflash_mem_write_with_attrs,
.endianness = DEVICE_NATIVE_ENDIAN,
};
@@ -773,7 +721,8 @@ static void pflash_cfi01_realize(DeviceState *dev, Error **errp)
memory_region_init_rom_device(
&pfl->mem, OBJECT(dev),
- pfl->be ? &pflash_cfi01_ops_be : &pflash_cfi01_ops_le, pfl,
+ &pflash_cfi01_ops,
+ pfl,
pfl->name, total_len, &local_err);
if (local_err) {
error_propagate(errp, local_err);
@@ -925,7 +874,8 @@ static Property pflash_cfi01_properties[] = {
DEFINE_PROP_UINT8("width", struct pflash_t, bank_width, 0),
DEFINE_PROP_UINT8("device-width", struct pflash_t, device_width, 0),
DEFINE_PROP_UINT8("max-device-width", struct pflash_t, max_device_width, 0),
- DEFINE_PROP_UINT8("big-endian", struct pflash_t, be, 0),
+ DEFINE_PROP_BIT("big-endian", struct pflash_t, features, PFLASH_BE, 0),
+ DEFINE_PROP_BIT("secure", struct pflash_t, features, PFLASH_SECURE, 0),
DEFINE_PROP_UINT16("id0", struct pflash_t, ident0, 0),
DEFINE_PROP_UINT16("id1", struct pflash_t, ident1, 0),
DEFINE_PROP_UINT16("id2", struct pflash_t, ident2, 0),
@@ -975,7 +925,7 @@ pflash_t *pflash_cfi01_register(hwaddr base,
qdev_prop_set_uint32(dev, "num-blocks", nb_blocs);
qdev_prop_set_uint64(dev, "sector-length", sector_len);
qdev_prop_set_uint8(dev, "width", bank_width);
- qdev_prop_set_uint8(dev, "big-endian", !!be);
+ qdev_prop_set_bit(dev, "big-endian", !!be);
qdev_prop_set_uint16(dev, "id0", id0);
qdev_prop_set_uint16(dev, "id1", id1);
qdev_prop_set_uint16(dev, "id2", id2);
diff --git a/hw/char/parallel.c b/hw/char/parallel.c
index 4079554bb9..c2b553f0d1 100644
--- a/hw/char/parallel.c
+++ b/hw/char/parallel.c
@@ -641,28 +641,3 @@ static void parallel_register_types(void)
}
type_init(parallel_register_types)
-
-static void parallel_init(ISABus *bus, int index, CharDriverState *chr)
-{
- DeviceState *dev;
- ISADevice *isadev;
-
- isadev = isa_create(bus, "isa-parallel");
- dev = DEVICE(isadev);
- qdev_prop_set_uint32(dev, "index", index);
- qdev_prop_set_chr(dev, "chardev", chr);
- qdev_init_nofail(dev);
-}
-
-void parallel_hds_isa_init(ISABus *bus, int n)
-{
- int i;
-
- assert(n <= MAX_PARALLEL_PORTS);
-
- for (i = 0; i < n; i++) {
- if (parallel_hds[i]) {
- parallel_init(bus, i, parallel_hds[i]);
- }
- }
-}
diff --git a/hw/char/serial.c b/hw/char/serial.c
index 55011cfd26..513d73c27f 100644
--- a/hw/char/serial.c
+++ b/hw/char/serial.c
@@ -662,6 +662,7 @@ static const VMStateDescription vmstate_serial_thr_ipending = {
.name = "serial/thr_ipending",
.version_id = 1,
.minimum_version_id = 1,
+ .needed = serial_thr_ipending_needed,
.fields = (VMStateField[]) {
VMSTATE_INT32(thr_ipending, SerialState),
VMSTATE_END_OF_LIST()
@@ -678,6 +679,7 @@ static const VMStateDescription vmstate_serial_tsr = {
.name = "serial/tsr",
.version_id = 1,
.minimum_version_id = 1,
+ .needed = serial_tsr_needed,
.fields = (VMStateField[]) {
VMSTATE_INT32(tsr_retry, SerialState),
VMSTATE_UINT8(thr, SerialState),
@@ -697,6 +699,7 @@ static const VMStateDescription vmstate_serial_recv_fifo = {
.name = "serial/recv_fifo",
.version_id = 1,
.minimum_version_id = 1,
+ .needed = serial_recv_fifo_needed,
.fields = (VMStateField[]) {
VMSTATE_STRUCT(recv_fifo, SerialState, 1, vmstate_fifo8, Fifo8),
VMSTATE_END_OF_LIST()
@@ -713,6 +716,7 @@ static const VMStateDescription vmstate_serial_xmit_fifo = {
.name = "serial/xmit_fifo",
.version_id = 1,
.minimum_version_id = 1,
+ .needed = serial_xmit_fifo_needed,
.fields = (VMStateField[]) {
VMSTATE_STRUCT(xmit_fifo, SerialState, 1, vmstate_fifo8, Fifo8),
VMSTATE_END_OF_LIST()
@@ -729,6 +733,7 @@ static const VMStateDescription vmstate_serial_fifo_timeout_timer = {
.name = "serial/fifo_timeout_timer",
.version_id = 1,
.minimum_version_id = 1,
+ .needed = serial_fifo_timeout_timer_needed,
.fields = (VMStateField[]) {
VMSTATE_TIMER_PTR(fifo_timeout_timer, SerialState),
VMSTATE_END_OF_LIST()
@@ -745,6 +750,7 @@ static const VMStateDescription vmstate_serial_timeout_ipending = {
.name = "serial/timeout_ipending",
.version_id = 1,
.minimum_version_id = 1,
+ .needed = serial_timeout_ipending_needed,
.fields = (VMStateField[]) {
VMSTATE_INT32(timeout_ipending, SerialState),
VMSTATE_END_OF_LIST()
@@ -760,6 +766,7 @@ static bool serial_poll_needed(void *opaque)
static const VMStateDescription vmstate_serial_poll = {
.name = "serial/poll",
.version_id = 1,
+ .needed = serial_poll_needed,
.minimum_version_id = 1,
.fields = (VMStateField[]) {
VMSTATE_INT32(poll_msl, SerialState),
@@ -788,31 +795,15 @@ const VMStateDescription vmstate_serial = {
VMSTATE_UINT8_V(fcr_vmstate, SerialState, 3),
VMSTATE_END_OF_LIST()
},
- .subsections = (VMStateSubsection[]) {
- {
- .vmsd = &vmstate_serial_thr_ipending,
- .needed = &serial_thr_ipending_needed,
- } , {
- .vmsd = &vmstate_serial_tsr,
- .needed = &serial_tsr_needed,
- } , {
- .vmsd = &vmstate_serial_recv_fifo,
- .needed = &serial_recv_fifo_needed,
- } , {
- .vmsd = &vmstate_serial_xmit_fifo,
- .needed = &serial_xmit_fifo_needed,
- } , {
- .vmsd = &vmstate_serial_fifo_timeout_timer,
- .needed = &serial_fifo_timeout_timer_needed,
- } , {
- .vmsd = &vmstate_serial_timeout_ipending,
- .needed = &serial_timeout_ipending_needed,
- } , {
- .vmsd = &vmstate_serial_poll,
- .needed = &serial_poll_needed,
- } , {
- /* empty */
- }
+ .subsections = (const VMStateDescription*[]) {
+ &vmstate_serial_thr_ipending,
+ &vmstate_serial_tsr,
+ &vmstate_serial_recv_fifo,
+ &vmstate_serial_xmit_fifo,
+ &vmstate_serial_fifo_timeout_timer,
+ &vmstate_serial_timeout_ipending,
+ &vmstate_serial_poll,
+ NULL
}
};
diff --git a/hw/core/machine.c b/hw/core/machine.c
index 25c45e6f9d..ac4654e9dd 100644
--- a/hw/core/machine.c
+++ b/hw/core/machine.c
@@ -294,6 +294,14 @@ static void machine_init_notify(Notifier *notifier, void *data)
foreach_dynamic_sysbus_device(error_on_sysbus_device, NULL);
}
+static void machine_class_init(ObjectClass *oc, void *data)
+{
+ MachineClass *mc = MACHINE_CLASS(oc);
+
+ /* Default 128 MB as guest ram size */
+ mc->default_ram_size = 128 * M_BYTE;
+}
+
static void machine_initfn(Object *obj)
{
MachineState *ms = MACHINE(obj);
@@ -463,6 +471,7 @@ static const TypeInfo machine_info = {
.parent = TYPE_OBJECT,
.abstract = true,
.class_size = sizeof(MachineClass),
+ .class_init = machine_class_init,
.instance_size = sizeof(MachineState),
.instance_init = machine_initfn,
.instance_finalize = machine_finalize,
diff --git a/hw/core/nmi.c b/hw/core/nmi.c
index 3dff020659..5260d6c1ec 100644
--- a/hw/core/nmi.c
+++ b/hw/core/nmi.c
@@ -21,6 +21,7 @@
#include "hw/nmi.h"
#include "qapi/qmp/qerror.h"
+#include "monitor/monitor.h"
struct do_nmi_s {
int cpu_index;
@@ -70,6 +71,25 @@ void nmi_monitor_handle(int cpu_index, Error **errp)
}
}
+void inject_nmi(void)
+{
+#if defined(TARGET_I386)
+ CPUState *cs;
+
+ CPU_FOREACH(cs) {
+ X86CPU *cpu = X86_CPU(cs);
+
+ if (!cpu->apic_state) {
+ cpu_interrupt(cs, CPU_INTERRUPT_NMI);
+ } else {
+ apic_deliver_nmi(cpu->apic_state);
+ }
+ }
+#else
+ nmi_monitor_handle(0, NULL);
+#endif
+}
+
static const TypeInfo nmi_info = {
.name = TYPE_NMI,
.parent = TYPE_INTERFACE,
diff --git a/hw/core/qdev-properties-system.c b/hw/core/qdev-properties-system.c
index c413226a97..0309fe5767 100644
--- a/hw/core/qdev-properties-system.c
+++ b/hw/core/qdev-properties-system.c
@@ -389,7 +389,7 @@ void qdev_set_nic_properties(DeviceState *dev, NICInfo *nd)
nd->instantiated = 1;
}
-static int qdev_add_one_global(QemuOpts *opts, void *opaque)
+static int qdev_add_one_global(void *opaque, QemuOpts *opts, Error **errp)
{
GlobalProperty *g;
@@ -404,5 +404,6 @@ static int qdev_add_one_global(QemuOpts *opts, void *opaque)
void qemu_add_globals(void)
{
- qemu_opts_foreach(qemu_find_opts("global"), qdev_add_one_global, NULL, 0);
+ qemu_opts_foreach(qemu_find_opts("global"),
+ qdev_add_one_global, NULL, NULL);
}
diff --git a/hw/display/Makefile.objs b/hw/display/Makefile.objs
index 3ea106d9f3..dd8ea76d17 100644
--- a/hw/display/Makefile.objs
+++ b/hw/display/Makefile.objs
@@ -34,3 +34,7 @@ obj-$(CONFIG_CG3) += cg3.o
obj-$(CONFIG_VGA) += vga.o
common-obj-$(CONFIG_QXL) += qxl.o qxl-logger.o qxl-render.o
+
+obj-$(CONFIG_VIRTIO) += virtio-gpu.o
+obj-$(CONFIG_VIRTIO_PCI) += virtio-gpu-pci.o
+obj-$(CONFIG_VIRTIO_VGA) += virtio-vga.o
diff --git a/hw/display/cg3.c b/hw/display/cg3.c
index 1e6ff2b546..b94e5e0d78 100644
--- a/hw/display/cg3.c
+++ b/hw/display/cg3.c
@@ -106,6 +106,7 @@ static void cg3_update_display(void *opaque)
pix = memory_region_get_ram_ptr(&s->vram_mem);
data = (uint32_t *)surface_data(surface);
+ memory_region_sync_dirty_bitmap(&s->vram_mem);
for (y = 0; y < height; y++) {
int update = s->full_update;
@@ -309,6 +310,7 @@ static void cg3_realizefn(DeviceState *dev, Error **errp)
memory_region_init_ram(&s->vram_mem, NULL, "cg3.vram", s->vram_size,
&error_abort);
+ memory_region_set_log(&s->vram_mem, true, DIRTY_MEMORY_VGA);
vmstate_register_ram_global(&s->vram_mem);
sysbus_init_mmio(sbd, &s->vram_mem);
diff --git a/hw/display/exynos4210_fimd.c b/hw/display/exynos4210_fimd.c
index 45c62afac1..603ef50568 100644
--- a/hw/display/exynos4210_fimd.c
+++ b/hw/display/exynos4210_fimd.c
@@ -337,7 +337,7 @@ static inline void fimd_swap_data(unsigned int swap_ctl, uint64_t *data)
if (swap_ctl & FIMD_WINCON_SWAP_BITS) {
res = 0;
for (i = 0; i < 64; i++) {
- if (x & (1ULL << (64 - i))) {
+ if (x & (1ULL << (63 - i))) {
res |= (1ULL << i);
}
}
@@ -1109,6 +1109,12 @@ static inline int fimd_get_buffer_id(Exynos4210fimdWindow *w)
}
}
+static void exynos4210_fimd_invalidate(void *opaque)
+{
+ Exynos4210fimdState *s = (Exynos4210fimdState *)opaque;
+ s->invalidate = true;
+}
+
/* Updates specified window's MemorySection based on values of WINCON,
* VIDOSDA, VIDOSDB, VIDWADDx and SHADOWCON registers */
static void fimd_update_memory_section(Exynos4210fimdState *s, unsigned win)
@@ -1136,7 +1142,11 @@ static void fimd_update_memory_section(Exynos4210fimdState *s, unsigned win)
/* TODO: add .exit and unref the region there. Not needed yet since sysbus
* does not support hot-unplug.
*/
- memory_region_unref(w->mem_section.mr);
+ if (w->mem_section.mr) {
+ memory_region_set_log(w->mem_section.mr, false, DIRTY_MEMORY_VGA);
+ memory_region_unref(w->mem_section.mr);
+ }
+
w->mem_section = memory_region_find(sysbus_address_space(sbd),
fb_start_addr, w->fb_len);
assert(w->mem_section.mr);
@@ -1162,6 +1172,8 @@ static void fimd_update_memory_section(Exynos4210fimdState *s, unsigned win)
cpu_physical_memory_unmap(w->host_fb_addr, fb_mapped_len, 0, 0);
goto error_return;
}
+ memory_region_set_log(w->mem_section.mr, true, DIRTY_MEMORY_VGA);
+ exynos4210_fimd_invalidate(s);
return;
error_return:
@@ -1224,12 +1236,6 @@ static void exynos4210_fimd_update_irq(Exynos4210fimdState *s)
}
}
-static void exynos4210_fimd_invalidate(void *opaque)
-{
- Exynos4210fimdState *s = (Exynos4210fimdState *)opaque;
- s->invalidate = true;
-}
-
static void exynos4210_update_resolution(Exynos4210fimdState *s)
{
DisplaySurface *surface = qemu_console_surface(s->console);
diff --git a/hw/display/framebuffer.c b/hw/display/framebuffer.c
index 4546e42654..2cabced208 100644
--- a/hw/display/framebuffer.c
+++ b/hw/display/framebuffer.c
@@ -63,6 +63,10 @@ void framebuffer_update_display(
assert(mem_section.offset_within_address_space == base);
memory_region_sync_dirty_bitmap(mem);
+ if (!memory_region_is_logging(mem, DIRTY_MEMORY_VGA)) {
+ invalidate = true;
+ }
+
src_base = cpu_physical_memory_map(base, &src_len, 0);
/* If we can't map the framebuffer then bail. We could try harder,
but it's not really worth it as dirty flag tracking will probably
diff --git a/hw/display/g364fb.c b/hw/display/g364fb.c
index 46f7b41211..52a9733bfd 100644
--- a/hw/display/g364fb.c
+++ b/hw/display/g364fb.c
@@ -260,6 +260,7 @@ static void g364fb_update_display(void *opaque)
qemu_console_resize(s->con, s->width, s->height);
}
+ memory_region_sync_dirty_bitmap(&s->mem_vram);
if (s->ctla & CTLA_FORCE_BLANK) {
g364fb_draw_blank(s);
} else if (s->depth == 8) {
@@ -489,7 +490,7 @@ static void g364fb_init(DeviceState *dev, G364State *s)
memory_region_init_ram_ptr(&s->mem_vram, NULL, "vram",
s->vram_size, s->vram);
vmstate_register_ram(&s->mem_vram, dev);
- memory_region_set_coalescing(&s->mem_vram);
+ memory_region_set_log(&s->mem_vram, true, DIRTY_MEMORY_VGA);
}
#define TYPE_G364 "sysbus-g364"
diff --git a/hw/display/qxl.c b/hw/display/qxl.c
index b220e2d5d2..722146ec3a 100644
--- a/hw/display/qxl.c
+++ b/hw/display/qxl.c
@@ -2220,6 +2220,7 @@ static VMStateDescription qxl_vmstate_monitors_config = {
.name = "qxl/monitors-config",
.version_id = 1,
.minimum_version_id = 1,
+ .needed = qxl_monitors_config_needed,
.fields = (VMStateField[]) {
VMSTATE_UINT64(guest_monitors_config, PCIQXLDevice),
VMSTATE_END_OF_LIST()
@@ -2253,13 +2254,9 @@ static VMStateDescription qxl_vmstate = {
VMSTATE_UINT64(guest_cursor, PCIQXLDevice),
VMSTATE_END_OF_LIST()
},
- .subsections = (VMStateSubsection[]) {
- {
- .vmsd = &qxl_vmstate_monitors_config,
- .needed = qxl_monitors_config_needed,
- }, {
- /* empty */
- }
+ .subsections = (const VMStateDescription*[]) {
+ &qxl_vmstate_monitors_config,
+ NULL
}
};
diff --git a/hw/display/sm501.c b/hw/display/sm501.c
index c72154b6f1..15a5ba8000 100644
--- a/hw/display/sm501.c
+++ b/hw/display/sm501.c
@@ -1322,6 +1322,7 @@ static void sm501_draw_crt(SM501State * s)
}
/* draw each line according to conditions */
+ memory_region_sync_dirty_bitmap(&s->local_mem_region);
for (y = 0; y < height; y++) {
int update_hwc = draw_hwc_line ? within_hwc_y_range(s, y, 1) : 0;
int update = full_update || update_hwc;
@@ -1412,6 +1413,7 @@ void sm501_init(MemoryRegion *address_space_mem, uint32_t base,
memory_region_init_ram(&s->local_mem_region, NULL, "sm501.local",
local_mem_bytes, &error_abort);
vmstate_register_ram_global(&s->local_mem_region);
+ memory_region_set_log(&s->local_mem_region, true, DIRTY_MEMORY_VGA);
s->local_mem = memory_region_get_ram_ptr(&s->local_mem_region);
memory_region_add_subregion(address_space_mem, base, &s->local_mem_region);
diff --git a/hw/display/tc6393xb.c b/hw/display/tc6393xb.c
index 66b7ade8da..f5f3f3e69d 100644
--- a/hw/display/tc6393xb.c
+++ b/hw/display/tc6393xb.c
@@ -571,7 +571,7 @@ TC6393xbState *tc6393xb_init(MemoryRegion *sysmem, uint32_t base, qemu_irq irq)
s->irq = irq;
s->gpio_in = qemu_allocate_irqs(tc6393xb_gpio_set, s, TC6393XB_GPIOS);
- s->l3v = *qemu_allocate_irqs(tc6393xb_l3v, s, 1);
+ s->l3v = qemu_allocate_irq(tc6393xb_l3v, s, 0);
s->blanked = 1;
s->sub_irqs = qemu_allocate_irqs(tc6393xb_sub_irq, s, TC6393XB_NR_IRQS);
diff --git a/hw/display/tcx.c b/hw/display/tcx.c
index a9f9f66d15..f3faf78bf8 100644
--- a/hw/display/tcx.c
+++ b/hw/display/tcx.c
@@ -353,6 +353,7 @@ static void tcx_update_display(void *opaque)
return;
}
+ memory_region_sync_dirty_bitmap(&ts->vram_mem);
for (y = 0; y < ts->height; page += TARGET_PAGE_SIZE) {
if (memory_region_get_dirty(&ts->vram_mem, page, TARGET_PAGE_SIZE,
DIRTY_MEMORY_VGA)) {
@@ -446,6 +447,7 @@ static void tcx24_update_display(void *opaque)
dd = surface_stride(surface);
ds = 1024;
+ memory_region_sync_dirty_bitmap(&ts->vram_mem);
for (y = 0; y < ts->height; page += TARGET_PAGE_SIZE,
page24 += TARGET_PAGE_SIZE, cpage += TARGET_PAGE_SIZE) {
if (tcx24_check_dirty(ts, page, page24, cpage)) {
@@ -1006,6 +1008,7 @@ static void tcx_realizefn(DeviceState *dev, Error **errp)
memory_region_init_ram(&s->vram_mem, OBJECT(s), "tcx.vram",
s->vram_size * (1 + 4 + 4), &error_abort);
vmstate_register_ram_global(&s->vram_mem);
+ memory_region_set_log(&s->vram_mem, true, DIRTY_MEMORY_VGA);
vram_base = memory_region_get_ram_ptr(&s->vram_mem);
/* 10/ROM : FCode ROM */
diff --git a/hw/display/vga-pci.c b/hw/display/vga-pci.c
index ff5dfb2c23..1dfa331e60 100644
--- a/hw/display/vga-pci.c
+++ b/hw/display/vga-pci.c
@@ -54,9 +54,7 @@ typedef struct PCIVGAState {
VGACommonState vga;
uint32_t flags;
MemoryRegion mmio;
- MemoryRegion ioport;
- MemoryRegion bochs;
- MemoryRegion qext;
+ MemoryRegion mrs[3];
} PCIVGAState;
#define TYPE_PCI_VGA "pci-vga"
@@ -76,16 +74,16 @@ static const VMStateDescription vmstate_vga_pci = {
static uint64_t pci_vga_ioport_read(void *ptr, hwaddr addr,
unsigned size)
{
- PCIVGAState *d = ptr;
+ VGACommonState *s = ptr;
uint64_t ret = 0;
switch (size) {
case 1:
- ret = vga_ioport_read(&d->vga, addr);
+ ret = vga_ioport_read(s, addr + 0x3c0);
break;
case 2:
- ret = vga_ioport_read(&d->vga, addr);
- ret |= vga_ioport_read(&d->vga, addr+1) << 8;
+ ret = vga_ioport_read(s, addr + 0x3c0);
+ ret |= vga_ioport_read(s, addr + 0x3c1) << 8;
break;
}
return ret;
@@ -94,11 +92,11 @@ static uint64_t pci_vga_ioport_read(void *ptr, hwaddr addr,
static void pci_vga_ioport_write(void *ptr, hwaddr addr,
uint64_t val, unsigned size)
{
- PCIVGAState *d = ptr;
+ VGACommonState *s = ptr;
switch (size) {
case 1:
- vga_ioport_write(&d->vga, addr + 0x3c0, val);
+ vga_ioport_write(s, addr + 0x3c0, val);
break;
case 2:
/*
@@ -106,8 +104,8 @@ static void pci_vga_ioport_write(void *ptr, hwaddr addr,
* indexed registers with a single word write because the
* index byte is updated first.
*/
- vga_ioport_write(&d->vga, addr + 0x3c0, val & 0xff);
- vga_ioport_write(&d->vga, addr + 0x3c1, (val >> 8) & 0xff);
+ vga_ioport_write(s, addr + 0x3c0, val & 0xff);
+ vga_ioport_write(s, addr + 0x3c1, (val >> 8) & 0xff);
break;
}
}
@@ -125,21 +123,21 @@ static const MemoryRegionOps pci_vga_ioport_ops = {
static uint64_t pci_vga_bochs_read(void *ptr, hwaddr addr,
unsigned size)
{
- PCIVGAState *d = ptr;
+ VGACommonState *s = ptr;
int index = addr >> 1;
- vbe_ioport_write_index(&d->vga, 0, index);
- return vbe_ioport_read_data(&d->vga, 0);
+ vbe_ioport_write_index(s, 0, index);
+ return vbe_ioport_read_data(s, 0);
}
static void pci_vga_bochs_write(void *ptr, hwaddr addr,
uint64_t val, unsigned size)
{
- PCIVGAState *d = ptr;
+ VGACommonState *s = ptr;
int index = addr >> 1;
- vbe_ioport_write_index(&d->vga, 0, index);
- vbe_ioport_write_data(&d->vga, 0, val);
+ vbe_ioport_write_index(s, 0, index);
+ vbe_ioport_write_data(s, 0, val);
}
static const MemoryRegionOps pci_vga_bochs_ops = {
@@ -154,13 +152,13 @@ static const MemoryRegionOps pci_vga_bochs_ops = {
static uint64_t pci_vga_qext_read(void *ptr, hwaddr addr, unsigned size)
{
- PCIVGAState *d = ptr;
+ VGACommonState *s = ptr;
switch (addr) {
case PCI_VGA_QEXT_REG_SIZE:
return PCI_VGA_QEXT_SIZE;
case PCI_VGA_QEXT_REG_BYTEORDER:
- return d->vga.big_endian_fb ?
+ return s->big_endian_fb ?
PCI_VGA_QEXT_BIG_ENDIAN : PCI_VGA_QEXT_LITTLE_ENDIAN;
default:
return 0;
@@ -170,15 +168,15 @@ static uint64_t pci_vga_qext_read(void *ptr, hwaddr addr, unsigned size)
static void pci_vga_qext_write(void *ptr, hwaddr addr,
uint64_t val, unsigned size)
{
- PCIVGAState *d = ptr;
+ VGACommonState *s = ptr;
switch (addr) {
case PCI_VGA_QEXT_REG_BYTEORDER:
if (val == PCI_VGA_QEXT_BIG_ENDIAN) {
- d->vga.big_endian_fb = true;
+ s->big_endian_fb = true;
}
if (val == PCI_VGA_QEXT_LITTLE_ENDIAN) {
- d->vga.big_endian_fb = false;
+ s->big_endian_fb = false;
}
break;
}
@@ -206,10 +204,34 @@ static const MemoryRegionOps pci_vga_qext_ops = {
.endianness = DEVICE_LITTLE_ENDIAN,
};
+void pci_std_vga_mmio_region_init(VGACommonState *s,
+ MemoryRegion *parent,
+ MemoryRegion *subs,
+ bool qext)
+{
+ memory_region_init_io(&subs[0], NULL, &pci_vga_ioport_ops, s,
+ "vga ioports remapped", PCI_VGA_IOPORT_SIZE);
+ memory_region_add_subregion(parent, PCI_VGA_IOPORT_OFFSET,
+ &subs[0]);
+
+ memory_region_init_io(&subs[1], NULL, &pci_vga_bochs_ops, s,
+ "bochs dispi interface", PCI_VGA_BOCHS_SIZE);
+ memory_region_add_subregion(parent, PCI_VGA_BOCHS_OFFSET,
+ &subs[1]);
+
+ if (qext) {
+ memory_region_init_io(&subs[2], NULL, &pci_vga_qext_ops, s,
+ "qemu extended regs", PCI_VGA_QEXT_SIZE);
+ memory_region_add_subregion(parent, PCI_VGA_QEXT_OFFSET,
+ &subs[2]);
+ }
+}
+
static void pci_std_vga_realize(PCIDevice *dev, Error **errp)
{
PCIVGAState *d = PCI_VGA(dev);
VGACommonState *s = &d->vga;
+ bool qext = false;
/* vga + console init */
vga_common_init(s, OBJECT(dev), true);
@@ -224,23 +246,12 @@ static void pci_std_vga_realize(PCIDevice *dev, Error **errp)
/* mmio bar for vga register access */
if (d->flags & (1 << PCI_VGA_FLAG_ENABLE_MMIO)) {
memory_region_init(&d->mmio, NULL, "vga.mmio", 4096);
- memory_region_init_io(&d->ioport, NULL, &pci_vga_ioport_ops, d,
- "vga ioports remapped", PCI_VGA_IOPORT_SIZE);
- memory_region_init_io(&d->bochs, NULL, &pci_vga_bochs_ops, d,
- "bochs dispi interface", PCI_VGA_BOCHS_SIZE);
-
- memory_region_add_subregion(&d->mmio, PCI_VGA_IOPORT_OFFSET,
- &d->ioport);
- memory_region_add_subregion(&d->mmio, PCI_VGA_BOCHS_OFFSET,
- &d->bochs);
if (d->flags & (1 << PCI_VGA_FLAG_ENABLE_QEXT)) {
- memory_region_init_io(&d->qext, NULL, &pci_vga_qext_ops, d,
- "qemu extended regs", PCI_VGA_QEXT_SIZE);
- memory_region_add_subregion(&d->mmio, PCI_VGA_QEXT_OFFSET,
- &d->qext);
+ qext = true;
pci_set_byte(&d->dev.config[PCI_REVISION_ID], 2);
}
+ pci_std_vga_mmio_region_init(s, &d->mmio, d->mrs, qext);
pci_register_bar(&d->dev, 2, PCI_BASE_ADDRESS_SPACE_MEMORY, &d->mmio);
}
@@ -262,6 +273,7 @@ static void pci_secondary_vga_realize(PCIDevice *dev, Error **errp)
{
PCIVGAState *d = PCI_VGA(dev);
VGACommonState *s = &d->vga;
+ bool qext = false;
/* vga + console init */
vga_common_init(s, OBJECT(dev), false);
@@ -269,23 +281,12 @@ static void pci_secondary_vga_realize(PCIDevice *dev, Error **errp)
/* mmio bar */
memory_region_init(&d->mmio, OBJECT(dev), "vga.mmio", 4096);
- memory_region_init_io(&d->ioport, OBJECT(dev), &pci_vga_ioport_ops, d,
- "vga ioports remapped", PCI_VGA_IOPORT_SIZE);
- memory_region_init_io(&d->bochs, OBJECT(dev), &pci_vga_bochs_ops, d,
- "bochs dispi interface", PCI_VGA_BOCHS_SIZE);
-
- memory_region_add_subregion(&d->mmio, PCI_VGA_IOPORT_OFFSET,
- &d->ioport);
- memory_region_add_subregion(&d->mmio, PCI_VGA_BOCHS_OFFSET,
- &d->bochs);
if (d->flags & (1 << PCI_VGA_FLAG_ENABLE_QEXT)) {
- memory_region_init_io(&d->qext, NULL, &pci_vga_qext_ops, d,
- "qemu extended regs", PCI_VGA_QEXT_SIZE);
- memory_region_add_subregion(&d->mmio, PCI_VGA_QEXT_OFFSET,
- &d->qext);
+ qext = true;
pci_set_byte(&d->dev.config[PCI_REVISION_ID], 2);
}
+ pci_std_vga_mmio_region_init(s, &d->mmio, d->mrs, qext);
pci_register_bar(&d->dev, 0, PCI_BASE_ADDRESS_MEM_PREFETCH, &s->vram);
pci_register_bar(&d->dev, 2, PCI_BASE_ADDRESS_SPACE_MEMORY, &d->mmio);
diff --git a/hw/display/vga.c b/hw/display/vga.c
index d1d296c74e..b35d523e65 100644
--- a/hw/display/vga.c
+++ b/hw/display/vga.c
@@ -2035,6 +2035,7 @@ static const VMStateDescription vmstate_vga_endian = {
.name = "vga.endian",
.version_id = 1,
.minimum_version_id = 1,
+ .needed = vga_endian_state_needed,
.fields = (VMStateField[]) {
VMSTATE_BOOL(big_endian_fb, VGACommonState),
VMSTATE_END_OF_LIST()
@@ -2078,13 +2079,9 @@ const VMStateDescription vmstate_vga_common = {
VMSTATE_UINT32(vbe_bank_mask, VGACommonState),
VMSTATE_END_OF_LIST()
},
- .subsections = (VMStateSubsection []) {
- {
- .vmsd = &vmstate_vga_endian,
- .needed = vga_endian_state_needed,
- }, {
- /* empty */
- }
+ .subsections = (const VMStateDescription*[]) {
+ &vmstate_vga_endian,
+ NULL
}
};
diff --git a/hw/display/vga_int.h b/hw/display/vga_int.h
index fcfcc5f431..40ba6a4207 100644
--- a/hw/display/vga_int.h
+++ b/hw/display/vga_int.h
@@ -219,4 +219,10 @@ extern const uint8_t gr_mask[16];
extern const MemoryRegionOps vga_mem_ops;
+/* vga-pci.c */
+void pci_std_vga_mmio_region_init(VGACommonState *s,
+ MemoryRegion *parent,
+ MemoryRegion *subs,
+ bool qext);
+
#endif
diff --git a/hw/display/virtio-gpu-pci.c b/hw/display/virtio-gpu-pci.c
new file mode 100644
index 0000000000..f0f25c7bc9
--- /dev/null
+++ b/hw/display/virtio-gpu-pci.c
@@ -0,0 +1,68 @@
+/*
+ * Virtio video device
+ *
+ * Copyright Red Hat
+ *
+ * Authors:
+ * Dave Airlie
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ *
+ */
+#include "hw/pci/pci.h"
+#include "hw/virtio/virtio.h"
+#include "hw/virtio/virtio-bus.h"
+#include "hw/virtio/virtio-pci.h"
+#include "hw/virtio/virtio-gpu.h"
+
+static Property virtio_gpu_pci_properties[] = {
+ DEFINE_VIRTIO_GPU_PROPERTIES(VirtIOGPUPCI, vdev.conf),
+ DEFINE_VIRTIO_GPU_PCI_PROPERTIES(VirtIOPCIProxy),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void virtio_gpu_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
+{
+ VirtIOGPUPCI *vgpu = VIRTIO_GPU_PCI(vpci_dev);
+ DeviceState *vdev = DEVICE(&vgpu->vdev);
+
+ qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
+ /* force virtio-1.0 */
+ vpci_dev->flags &= ~VIRTIO_PCI_FLAG_DISABLE_MODERN;
+ vpci_dev->flags |= VIRTIO_PCI_FLAG_DISABLE_LEGACY;
+ object_property_set_bool(OBJECT(vdev), true, "realized", errp);
+}
+
+static void virtio_gpu_pci_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
+ PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
+
+ set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories);
+ dc->props = virtio_gpu_pci_properties;
+ k->realize = virtio_gpu_pci_realize;
+ pcidev_k->class_id = PCI_CLASS_DISPLAY_OTHER;
+}
+
+static void virtio_gpu_initfn(Object *obj)
+{
+ VirtIOGPUPCI *dev = VIRTIO_GPU_PCI(obj);
+ object_initialize(&dev->vdev, sizeof(dev->vdev), TYPE_VIRTIO_GPU);
+ object_property_add_child(obj, "virtio-backend", OBJECT(&dev->vdev), NULL);
+}
+
+static const TypeInfo virtio_gpu_pci_info = {
+ .name = TYPE_VIRTIO_GPU_PCI,
+ .parent = TYPE_VIRTIO_PCI,
+ .instance_size = sizeof(VirtIOGPUPCI),
+ .instance_init = virtio_gpu_initfn,
+ .class_init = virtio_gpu_pci_class_init,
+};
+
+static void virtio_gpu_pci_register_types(void)
+{
+ type_register_static(&virtio_gpu_pci_info);
+}
+type_init(virtio_gpu_pci_register_types)
diff --git a/hw/display/virtio-gpu.c b/hw/display/virtio-gpu.c
new file mode 100644
index 0000000000..8c109b79f4
--- /dev/null
+++ b/hw/display/virtio-gpu.c
@@ -0,0 +1,918 @@
+/*
+ * Virtio GPU Device
+ *
+ * Copyright Red Hat, Inc. 2013-2014
+ *
+ * Authors:
+ * Dave Airlie <airlied@redhat.com>
+ * Gerd Hoffmann <kraxel@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include "qemu-common.h"
+#include "qemu/iov.h"
+#include "ui/console.h"
+#include "trace.h"
+#include "hw/virtio/virtio.h"
+#include "hw/virtio/virtio-gpu.h"
+#include "hw/virtio/virtio-bus.h"
+
+static struct virtio_gpu_simple_resource*
+virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id);
+
+static void update_cursor_data_simple(VirtIOGPU *g,
+ struct virtio_gpu_scanout *s,
+ uint32_t resource_id)
+{
+ struct virtio_gpu_simple_resource *res;
+ uint32_t pixels;
+
+ res = virtio_gpu_find_resource(g, resource_id);
+ if (!res) {
+ return;
+ }
+
+ if (pixman_image_get_width(res->image) != s->current_cursor->width ||
+ pixman_image_get_height(res->image) != s->current_cursor->height) {
+ return;
+ }
+
+ pixels = s->current_cursor->width * s->current_cursor->height;
+ memcpy(s->current_cursor->data,
+ pixman_image_get_data(res->image),
+ pixels * sizeof(uint32_t));
+}
+
+static void update_cursor(VirtIOGPU *g, struct virtio_gpu_update_cursor *cursor)
+{
+ struct virtio_gpu_scanout *s;
+
+ if (cursor->pos.scanout_id >= g->conf.max_outputs) {
+ return;
+ }
+ s = &g->scanout[cursor->pos.scanout_id];
+
+ if (cursor->hdr.type != VIRTIO_GPU_CMD_MOVE_CURSOR) {
+ if (!s->current_cursor) {
+ s->current_cursor = cursor_alloc(64, 64);
+ }
+
+ s->current_cursor->hot_x = cursor->hot_x;
+ s->current_cursor->hot_y = cursor->hot_y;
+
+ if (cursor->resource_id > 0) {
+ update_cursor_data_simple(g, s, cursor->resource_id);
+ }
+ dpy_cursor_define(s->con, s->current_cursor);
+ }
+ dpy_mouse_set(s->con, cursor->pos.x, cursor->pos.y,
+ cursor->resource_id ? 1 : 0);
+}
+
+static void virtio_gpu_get_config(VirtIODevice *vdev, uint8_t *config)
+{
+ VirtIOGPU *g = VIRTIO_GPU(vdev);
+ memcpy(config, &g->virtio_config, sizeof(g->virtio_config));
+}
+
+static void virtio_gpu_set_config(VirtIODevice *vdev, const uint8_t *config)
+{
+ VirtIOGPU *g = VIRTIO_GPU(vdev);
+ struct virtio_gpu_config vgconfig;
+
+ memcpy(&vgconfig, config, sizeof(g->virtio_config));
+
+ if (vgconfig.events_clear) {
+ g->virtio_config.events_read &= ~vgconfig.events_clear;
+ }
+}
+
+static uint64_t virtio_gpu_get_features(VirtIODevice *vdev, uint64_t features)
+{
+ return features;
+}
+
+static void virtio_gpu_notify_event(VirtIOGPU *g, uint32_t event_type)
+{
+ g->virtio_config.events_read |= event_type;
+ virtio_notify_config(&g->parent_obj);
+}
+
+static struct virtio_gpu_simple_resource *
+virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id)
+{
+ struct virtio_gpu_simple_resource *res;
+
+ QTAILQ_FOREACH(res, &g->reslist, next) {
+ if (res->resource_id == resource_id) {
+ return res;
+ }
+ }
+ return NULL;
+}
+
+void virtio_gpu_ctrl_response(VirtIOGPU *g,
+ struct virtio_gpu_ctrl_command *cmd,
+ struct virtio_gpu_ctrl_hdr *resp,
+ size_t resp_len)
+{
+ size_t s;
+
+ if (cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE) {
+ resp->flags |= VIRTIO_GPU_FLAG_FENCE;
+ resp->fence_id = cmd->cmd_hdr.fence_id;
+ resp->ctx_id = cmd->cmd_hdr.ctx_id;
+ }
+ s = iov_from_buf(cmd->elem.in_sg, cmd->elem.in_num, 0, resp, resp_len);
+ if (s != resp_len) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: response size incorrect %zu vs %zu\n",
+ __func__, s, resp_len);
+ }
+ virtqueue_push(cmd->vq, &cmd->elem, s);
+ virtio_notify(VIRTIO_DEVICE(g), cmd->vq);
+ cmd->finished = true;
+}
+
+void virtio_gpu_ctrl_response_nodata(VirtIOGPU *g,
+ struct virtio_gpu_ctrl_command *cmd,
+ enum virtio_gpu_ctrl_type type)
+{
+ struct virtio_gpu_ctrl_hdr resp;
+
+ memset(&resp, 0, sizeof(resp));
+ resp.type = type;
+ virtio_gpu_ctrl_response(g, cmd, &resp, sizeof(resp));
+}
+
+static void
+virtio_gpu_fill_display_info(VirtIOGPU *g,
+ struct virtio_gpu_resp_display_info *dpy_info)
+{
+ int i;
+
+ for (i = 0; i < g->conf.max_outputs; i++) {
+ if (g->enabled_output_bitmask & (1 << i)) {
+ dpy_info->pmodes[i].enabled = 1;
+ dpy_info->pmodes[i].r.width = g->req_state[i].width;
+ dpy_info->pmodes[i].r.height = g->req_state[i].height;
+ }
+ }
+}
+
+void virtio_gpu_get_display_info(VirtIOGPU *g,
+ struct virtio_gpu_ctrl_command *cmd)
+{
+ struct virtio_gpu_resp_display_info display_info;
+
+ trace_virtio_gpu_cmd_get_display_info();
+ memset(&display_info, 0, sizeof(display_info));
+ display_info.hdr.type = VIRTIO_GPU_RESP_OK_DISPLAY_INFO;
+ virtio_gpu_fill_display_info(g, &display_info);
+ virtio_gpu_ctrl_response(g, cmd, &display_info.hdr,
+ sizeof(display_info));
+}
+
+static pixman_format_code_t get_pixman_format(uint32_t virtio_gpu_format)
+{
+ switch (virtio_gpu_format) {
+#ifdef HOST_WORDS_BIGENDIAN
+ case VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM:
+ return PIXMAN_b8g8r8x8;
+ case VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM:
+ return PIXMAN_b8g8r8a8;
+ case VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM:
+ return PIXMAN_x8r8g8b8;
+ case VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM:
+ return PIXMAN_a8r8g8b8;
+ case VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM:
+ return PIXMAN_r8g8b8x8;
+ case VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM:
+ return PIXMAN_r8g8b8a8;
+ case VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM:
+ return PIXMAN_x8b8g8r8;
+ case VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM:
+ return PIXMAN_a8b8g8r8;
+#else
+ case VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM:
+ return PIXMAN_x8r8g8b8;
+ case VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM:
+ return PIXMAN_a8r8g8b8;
+ case VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM:
+ return PIXMAN_b8g8r8x8;
+ case VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM:
+ return PIXMAN_b8g8r8a8;
+ case VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM:
+ return PIXMAN_x8b8g8r8;
+ case VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM:
+ return PIXMAN_a8b8g8r8;
+ case VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM:
+ return PIXMAN_r8g8b8x8;
+ case VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM:
+ return PIXMAN_r8g8b8a8;
+#endif
+ default:
+ return 0;
+ }
+}
+
+static void virtio_gpu_resource_create_2d(VirtIOGPU *g,
+ struct virtio_gpu_ctrl_command *cmd)
+{
+ pixman_format_code_t pformat;
+ struct virtio_gpu_simple_resource *res;
+ struct virtio_gpu_resource_create_2d c2d;
+
+ VIRTIO_GPU_FILL_CMD(c2d);
+ trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format,
+ c2d.width, c2d.height);
+
+ if (c2d.resource_id == 0) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n",
+ __func__);
+ cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
+ return;
+ }
+
+ res = virtio_gpu_find_resource(g, c2d.resource_id);
+ if (res) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n",
+ __func__, c2d.resource_id);
+ cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
+ return;
+ }
+
+ res = g_new0(struct virtio_gpu_simple_resource, 1);
+
+ res->width = c2d.width;
+ res->height = c2d.height;
+ res->format = c2d.format;
+ res->resource_id = c2d.resource_id;
+
+ pformat = get_pixman_format(c2d.format);
+ if (!pformat) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: host couldn't handle guest format %d\n",
+ __func__, c2d.format);
+ cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
+ return;
+ }
+ res->image = pixman_image_create_bits(pformat,
+ c2d.width,
+ c2d.height,
+ NULL, 0);
+
+ if (!res->image) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: resource creation failed %d %d %d\n",
+ __func__, c2d.resource_id, c2d.width, c2d.height);
+ g_free(res);
+ cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY;
+ return;
+ }
+
+ QTAILQ_INSERT_HEAD(&g->reslist, res, next);
+}
+
+static void virtio_gpu_resource_destroy(VirtIOGPU *g,
+ struct virtio_gpu_simple_resource *res)
+{
+ pixman_image_unref(res->image);
+ QTAILQ_REMOVE(&g->reslist, res, next);
+ g_free(res);
+}
+
+static void virtio_gpu_resource_unref(VirtIOGPU *g,
+ struct virtio_gpu_ctrl_command *cmd)
+{
+ struct virtio_gpu_simple_resource *res;
+ struct virtio_gpu_resource_unref unref;
+
+ VIRTIO_GPU_FILL_CMD(unref);
+ trace_virtio_gpu_cmd_res_unref(unref.resource_id);
+
+ res = virtio_gpu_find_resource(g, unref.resource_id);
+ if (!res) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
+ __func__, unref.resource_id);
+ cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
+ return;
+ }
+ virtio_gpu_resource_destroy(g, res);
+}
+
+static void virtio_gpu_transfer_to_host_2d(VirtIOGPU *g,
+ struct virtio_gpu_ctrl_command *cmd)
+{
+ struct virtio_gpu_simple_resource *res;
+ int h;
+ uint32_t src_offset, dst_offset, stride;
+ int bpp;
+ pixman_format_code_t format;
+ struct virtio_gpu_transfer_to_host_2d t2d;
+
+ VIRTIO_GPU_FILL_CMD(t2d);
+ trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d.resource_id);
+
+ res = virtio_gpu_find_resource(g, t2d.resource_id);
+ if (!res || !res->iov) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
+ __func__, t2d.resource_id);
+ cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
+ return;
+ }
+
+ if (t2d.r.x > res->width ||
+ t2d.r.y > res->height ||
+ t2d.r.width > res->width ||
+ t2d.r.height > res->height ||
+ t2d.r.x + t2d.r.width > res->width ||
+ t2d.r.y + t2d.r.height > res->height) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: transfer bounds outside resource"
+ " bounds for resource %d: %d %d %d %d vs %d %d\n",
+ __func__, t2d.resource_id, t2d.r.x, t2d.r.y,
+ t2d.r.width, t2d.r.height, res->width, res->height);
+ cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
+ return;
+ }
+
+ format = pixman_image_get_format(res->image);
+ bpp = (PIXMAN_FORMAT_BPP(format) + 7) / 8;
+ stride = pixman_image_get_stride(res->image);
+
+ if (t2d.offset || t2d.r.x || t2d.r.y ||
+ t2d.r.width != pixman_image_get_width(res->image)) {
+ void *img_data = pixman_image_get_data(res->image);
+ for (h = 0; h < t2d.r.height; h++) {
+ src_offset = t2d.offset + stride * h;
+ dst_offset = (t2d.r.y + h) * stride + (t2d.r.x * bpp);
+
+ iov_to_buf(res->iov, res->iov_cnt, src_offset,
+ (uint8_t *)img_data
+ + dst_offset, t2d.r.width * bpp);
+ }
+ } else {
+ iov_to_buf(res->iov, res->iov_cnt, 0,
+ pixman_image_get_data(res->image),
+ pixman_image_get_stride(res->image)
+ * pixman_image_get_height(res->image));
+ }
+}
+
+static void virtio_gpu_resource_flush(VirtIOGPU *g,
+ struct virtio_gpu_ctrl_command *cmd)
+{
+ struct virtio_gpu_simple_resource *res;
+ struct virtio_gpu_resource_flush rf;
+ pixman_region16_t flush_region;
+ int i;
+
+ VIRTIO_GPU_FILL_CMD(rf);
+ trace_virtio_gpu_cmd_res_flush(rf.resource_id,
+ rf.r.width, rf.r.height, rf.r.x, rf.r.y);
+
+ res = virtio_gpu_find_resource(g, rf.resource_id);
+ if (!res) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
+ __func__, rf.resource_id);
+ cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
+ return;
+ }
+
+ if (rf.r.x > res->width ||
+ rf.r.y > res->height ||
+ rf.r.width > res->width ||
+ rf.r.height > res->height ||
+ rf.r.x + rf.r.width > res->width ||
+ rf.r.y + rf.r.height > res->height) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: flush bounds outside resource"
+ " bounds for resource %d: %d %d %d %d vs %d %d\n",
+ __func__, rf.resource_id, rf.r.x, rf.r.y,
+ rf.r.width, rf.r.height, res->width, res->height);
+ cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
+ return;
+ }
+
+ pixman_region_init_rect(&flush_region,
+ rf.r.x, rf.r.y, rf.r.width, rf.r.height);
+ for (i = 0; i < VIRTIO_GPU_MAX_SCANOUT; i++) {
+ struct virtio_gpu_scanout *scanout;
+ pixman_region16_t region, finalregion;
+ pixman_box16_t *extents;
+
+ if (!(res->scanout_bitmask & (1 << i))) {
+ continue;
+ }
+ scanout = &g->scanout[i];
+
+ pixman_region_init(&finalregion);
+ pixman_region_init_rect(&region, scanout->x, scanout->y,
+ scanout->width, scanout->height);
+
+ pixman_region_intersect(&finalregion, &flush_region, &region);
+ pixman_region_translate(&finalregion, -scanout->x, -scanout->y);
+ extents = pixman_region_extents(&finalregion);
+ /* work out the area we need to update for each console */
+ dpy_gfx_update(g->scanout[i].con,
+ extents->x1, extents->y1,
+ extents->x2 - extents->x1,
+ extents->y2 - extents->y1);
+
+ pixman_region_fini(&region);
+ pixman_region_fini(&finalregion);
+ }
+ pixman_region_fini(&flush_region);
+}
+
+static void virtio_gpu_set_scanout(VirtIOGPU *g,
+ struct virtio_gpu_ctrl_command *cmd)
+{
+ struct virtio_gpu_simple_resource *res;
+ struct virtio_gpu_scanout *scanout;
+ pixman_format_code_t format;
+ uint32_t offset;
+ int bpp;
+ struct virtio_gpu_set_scanout ss;
+
+ VIRTIO_GPU_FILL_CMD(ss);
+ trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id,
+ ss.r.width, ss.r.height, ss.r.x, ss.r.y);
+
+ g->enable = 1;
+ if (ss.resource_id == 0) {
+ scanout = &g->scanout[ss.scanout_id];
+ if (scanout->resource_id) {
+ res = virtio_gpu_find_resource(g, scanout->resource_id);
+ if (res) {
+ res->scanout_bitmask &= ~(1 << ss.scanout_id);
+ }
+ }
+ if (ss.scanout_id == 0 ||
+ ss.scanout_id >= g->conf.max_outputs) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: illegal scanout id specified %d",
+ __func__, ss.scanout_id);
+ cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;
+ return;
+ }
+ dpy_gfx_replace_surface(g->scanout[ss.scanout_id].con, NULL);
+ scanout->ds = NULL;
+ scanout->width = 0;
+ scanout->height = 0;
+ return;
+ }
+
+ /* create a surface for this scanout */
+ if (ss.scanout_id >= VIRTIO_GPU_MAX_SCANOUT ||
+ ss.scanout_id >= g->conf.max_outputs) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d",
+ __func__, ss.scanout_id);
+ cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;
+ return;
+ }
+
+ res = virtio_gpu_find_resource(g, ss.resource_id);
+ if (!res) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
+ __func__, ss.resource_id);
+ cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
+ return;
+ }
+
+ if (ss.r.x > res->width ||
+ ss.r.y > res->height ||
+ ss.r.width > res->width ||
+ ss.r.height > res->height ||
+ ss.r.x + ss.r.width > res->width ||
+ ss.r.y + ss.r.height > res->height) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout %d bounds for"
+ " resource %d, (%d,%d)+%d,%d vs %d %d\n",
+ __func__, ss.scanout_id, ss.resource_id, ss.r.x, ss.r.y,
+ ss.r.width, ss.r.height, res->width, res->height);
+ cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
+ return;
+ }
+
+ scanout = &g->scanout[ss.scanout_id];
+
+ format = pixman_image_get_format(res->image);
+ bpp = (PIXMAN_FORMAT_BPP(format) + 7) / 8;
+ offset = (ss.r.x * bpp) + ss.r.y * pixman_image_get_stride(res->image);
+ if (!scanout->ds || surface_data(scanout->ds)
+ != ((uint8_t *)pixman_image_get_data(res->image) + offset) ||
+ scanout->width != ss.r.width ||
+ scanout->height != ss.r.height) {
+ /* realloc the surface ptr */
+ scanout->ds = qemu_create_displaysurface_from
+ (ss.r.width, ss.r.height, format,
+ pixman_image_get_stride(res->image),
+ (uint8_t *)pixman_image_get_data(res->image) + offset);
+ if (!scanout->ds) {
+ cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
+ return;
+ }
+ dpy_gfx_replace_surface(g->scanout[ss.scanout_id].con, scanout->ds);
+ }
+
+ res->scanout_bitmask |= (1 << ss.scanout_id);
+ scanout->resource_id = ss.resource_id;
+ scanout->x = ss.r.x;
+ scanout->y = ss.r.y;
+ scanout->width = ss.r.width;
+ scanout->height = ss.r.height;
+}
+
+int virtio_gpu_create_mapping_iov(struct virtio_gpu_resource_attach_backing *ab,
+ struct virtio_gpu_ctrl_command *cmd,
+ struct iovec **iov)
+{
+ struct virtio_gpu_mem_entry *ents;
+ size_t esize, s;
+ int i;
+
+ if (ab->nr_entries > 16384) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: nr_entries is too big (%d > 16384)\n",
+ __func__, ab->nr_entries);
+ return -1;
+ }
+
+ esize = sizeof(*ents) * ab->nr_entries;
+ ents = g_malloc(esize);
+ s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num,
+ sizeof(*ab), ents, esize);
+ if (s != esize) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: command data size incorrect %zu vs %zu\n",
+ __func__, s, esize);
+ g_free(ents);
+ return -1;
+ }
+
+ *iov = g_malloc0(sizeof(struct iovec) * ab->nr_entries);
+ for (i = 0; i < ab->nr_entries; i++) {
+ hwaddr len = ents[i].length;
+ (*iov)[i].iov_len = ents[i].length;
+ (*iov)[i].iov_base = cpu_physical_memory_map(ents[i].addr, &len, 1);
+ if (!(*iov)[i].iov_base || len != ents[i].length) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map MMIO memory for"
+ " resource %d element %d\n",
+ __func__, ab->resource_id, i);
+ virtio_gpu_cleanup_mapping_iov(*iov, i);
+ g_free(ents);
+ g_free(*iov);
+ *iov = NULL;
+ return -1;
+ }
+ }
+ g_free(ents);
+ return 0;
+}
+
+void virtio_gpu_cleanup_mapping_iov(struct iovec *iov, uint32_t count)
+{
+ int i;
+
+ for (i = 0; i < count; i++) {
+ cpu_physical_memory_unmap(iov[i].iov_base, iov[i].iov_len, 1,
+ iov[i].iov_len);
+ }
+}
+
+static void virtio_gpu_cleanup_mapping(struct virtio_gpu_simple_resource *res)
+{
+ virtio_gpu_cleanup_mapping_iov(res->iov, res->iov_cnt);
+ g_free(res->iov);
+ res->iov = NULL;
+ res->iov_cnt = 0;
+}
+
+static void
+virtio_gpu_resource_attach_backing(VirtIOGPU *g,
+ struct virtio_gpu_ctrl_command *cmd)
+{
+ struct virtio_gpu_simple_resource *res;
+ struct virtio_gpu_resource_attach_backing ab;
+ int ret;
+
+ VIRTIO_GPU_FILL_CMD(ab);
+ trace_virtio_gpu_cmd_res_back_attach(ab.resource_id);
+
+ res = virtio_gpu_find_resource(g, ab.resource_id);
+ if (!res) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
+ __func__, ab.resource_id);
+ cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
+ return;
+ }
+
+ ret = virtio_gpu_create_mapping_iov(&ab, cmd, &res->iov);
+ if (ret != 0) {
+ cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
+ return;
+ }
+
+ res->iov_cnt = ab.nr_entries;
+}
+
+static void
+virtio_gpu_resource_detach_backing(VirtIOGPU *g,
+ struct virtio_gpu_ctrl_command *cmd)
+{
+ struct virtio_gpu_simple_resource *res;
+ struct virtio_gpu_resource_detach_backing detach;
+
+ VIRTIO_GPU_FILL_CMD(detach);
+ trace_virtio_gpu_cmd_res_back_detach(detach.resource_id);
+
+ res = virtio_gpu_find_resource(g, detach.resource_id);
+ if (!res || !res->iov) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
+ __func__, detach.resource_id);
+ cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
+ return;
+ }
+ virtio_gpu_cleanup_mapping(res);
+}
+
+static void virtio_gpu_simple_process_cmd(VirtIOGPU *g,
+ struct virtio_gpu_ctrl_command *cmd)
+{
+ VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr);
+
+ switch (cmd->cmd_hdr.type) {
+ case VIRTIO_GPU_CMD_GET_DISPLAY_INFO:
+ virtio_gpu_get_display_info(g, cmd);
+ break;
+ case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D:
+ virtio_gpu_resource_create_2d(g, cmd);
+ break;
+ case VIRTIO_GPU_CMD_RESOURCE_UNREF:
+ virtio_gpu_resource_unref(g, cmd);
+ break;
+ case VIRTIO_GPU_CMD_RESOURCE_FLUSH:
+ virtio_gpu_resource_flush(g, cmd);
+ break;
+ case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D:
+ virtio_gpu_transfer_to_host_2d(g, cmd);
+ break;
+ case VIRTIO_GPU_CMD_SET_SCANOUT:
+ virtio_gpu_set_scanout(g, cmd);
+ break;
+ case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING:
+ virtio_gpu_resource_attach_backing(g, cmd);
+ break;
+ case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING:
+ virtio_gpu_resource_detach_backing(g, cmd);
+ break;
+ default:
+ cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
+ break;
+ }
+ if (!cmd->finished) {
+ virtio_gpu_ctrl_response_nodata(g, cmd, cmd->error ? cmd->error :
+ VIRTIO_GPU_RESP_OK_NODATA);
+ }
+}
+
+static void virtio_gpu_handle_ctrl_cb(VirtIODevice *vdev, VirtQueue *vq)
+{
+ VirtIOGPU *g = VIRTIO_GPU(vdev);
+ qemu_bh_schedule(g->ctrl_bh);
+}
+
+static void virtio_gpu_handle_cursor_cb(VirtIODevice *vdev, VirtQueue *vq)
+{
+ VirtIOGPU *g = VIRTIO_GPU(vdev);
+ qemu_bh_schedule(g->cursor_bh);
+}
+
+static void virtio_gpu_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
+{
+ VirtIOGPU *g = VIRTIO_GPU(vdev);
+ struct virtio_gpu_ctrl_command *cmd;
+
+ if (!virtio_queue_ready(vq)) {
+ return;
+ }
+
+ cmd = g_new(struct virtio_gpu_ctrl_command, 1);
+ while (virtqueue_pop(vq, &cmd->elem)) {
+ cmd->vq = vq;
+ cmd->error = 0;
+ cmd->finished = false;
+ g->stats.requests++;
+
+ virtio_gpu_simple_process_cmd(g, cmd);
+ if (!cmd->finished) {
+ QTAILQ_INSERT_TAIL(&g->fenceq, cmd, next);
+ g->stats.inflight++;
+ if (g->stats.max_inflight < g->stats.inflight) {
+ g->stats.max_inflight = g->stats.inflight;
+ }
+ fprintf(stderr, "inflight: %3d (+)\r", g->stats.inflight);
+ cmd = g_new(struct virtio_gpu_ctrl_command, 1);
+ }
+ }
+ g_free(cmd);
+}
+
+static void virtio_gpu_ctrl_bh(void *opaque)
+{
+ VirtIOGPU *g = opaque;
+ virtio_gpu_handle_ctrl(&g->parent_obj, g->ctrl_vq);
+}
+
+static void virtio_gpu_handle_cursor(VirtIODevice *vdev, VirtQueue *vq)
+{
+ VirtIOGPU *g = VIRTIO_GPU(vdev);
+ VirtQueueElement elem;
+ size_t s;
+ struct virtio_gpu_update_cursor cursor_info;
+
+ if (!virtio_queue_ready(vq)) {
+ return;
+ }
+ while (virtqueue_pop(vq, &elem)) {
+ s = iov_to_buf(elem.out_sg, elem.out_num, 0,
+ &cursor_info, sizeof(cursor_info));
+ if (s != sizeof(cursor_info)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: cursor size incorrect %zu vs %zu\n",
+ __func__, s, sizeof(cursor_info));
+ } else {
+ update_cursor(g, &cursor_info);
+ }
+ virtqueue_push(vq, &elem, 0);
+ virtio_notify(vdev, vq);
+ }
+}
+
+static void virtio_gpu_cursor_bh(void *opaque)
+{
+ VirtIOGPU *g = opaque;
+ virtio_gpu_handle_cursor(&g->parent_obj, g->cursor_vq);
+}
+
+static void virtio_gpu_invalidate_display(void *opaque)
+{
+}
+
+static void virtio_gpu_update_display(void *opaque)
+{
+}
+
+static void virtio_gpu_text_update(void *opaque, console_ch_t *chardata)
+{
+}
+
+static int virtio_gpu_ui_info(void *opaque, uint32_t idx, QemuUIInfo *info)
+{
+ VirtIOGPU *g = opaque;
+
+ if (idx > g->conf.max_outputs) {
+ return -1;
+ }
+
+ g->req_state[idx].x = info->xoff;
+ g->req_state[idx].y = info->yoff;
+ g->req_state[idx].width = info->width;
+ g->req_state[idx].height = info->height;
+
+ if (info->width && info->height) {
+ g->enabled_output_bitmask |= (1 << idx);
+ } else {
+ g->enabled_output_bitmask &= ~(1 << idx);
+ }
+
+ /* send event to guest */
+ virtio_gpu_notify_event(g, VIRTIO_GPU_EVENT_DISPLAY);
+ return 0;
+}
+
+const GraphicHwOps virtio_gpu_ops = {
+ .invalidate = virtio_gpu_invalidate_display,
+ .gfx_update = virtio_gpu_update_display,
+ .text_update = virtio_gpu_text_update,
+ .ui_info = virtio_gpu_ui_info,
+};
+
+static void virtio_gpu_device_realize(DeviceState *qdev, Error **errp)
+{
+ VirtIODevice *vdev = VIRTIO_DEVICE(qdev);
+ VirtIOGPU *g = VIRTIO_GPU(qdev);
+ int i;
+
+ g->config_size = sizeof(struct virtio_gpu_config);
+ g->virtio_config.num_scanouts = g->conf.max_outputs;
+ virtio_init(VIRTIO_DEVICE(g), "virtio-gpu", VIRTIO_ID_GPU,
+ g->config_size);
+
+ g->req_state[0].width = 1024;
+ g->req_state[0].height = 768;
+
+ g->ctrl_vq = virtio_add_queue(vdev, 64, virtio_gpu_handle_ctrl_cb);
+ g->cursor_vq = virtio_add_queue(vdev, 16, virtio_gpu_handle_cursor_cb);
+
+ g->ctrl_bh = qemu_bh_new(virtio_gpu_ctrl_bh, g);
+ g->cursor_bh = qemu_bh_new(virtio_gpu_cursor_bh, g);
+ QTAILQ_INIT(&g->reslist);
+ QTAILQ_INIT(&g->fenceq);
+
+ g->enabled_output_bitmask = 1;
+ g->qdev = qdev;
+
+ for (i = 0; i < g->conf.max_outputs; i++) {
+ g->scanout[i].con =
+ graphic_console_init(DEVICE(g), i, &virtio_gpu_ops, g);
+ if (i > 0) {
+ dpy_gfx_replace_surface(g->scanout[i].con, NULL);
+ }
+ }
+}
+
+static void virtio_gpu_instance_init(Object *obj)
+{
+}
+
+static void virtio_gpu_reset(VirtIODevice *vdev)
+{
+ VirtIOGPU *g = VIRTIO_GPU(vdev);
+ struct virtio_gpu_simple_resource *res, *tmp;
+ int i;
+
+ g->enable = 0;
+
+ QTAILQ_FOREACH_SAFE(res, &g->reslist, next, tmp) {
+ virtio_gpu_resource_destroy(g, res);
+ }
+ for (i = 0; i < g->conf.max_outputs; i++) {
+#if 0
+ g->req_state[i].x = 0;
+ g->req_state[i].y = 0;
+ if (i == 0) {
+ g->req_state[0].width = 1024;
+ g->req_state[0].height = 768;
+ } else {
+ g->req_state[i].width = 0;
+ g->req_state[i].height = 0;
+ }
+#endif
+ g->scanout[i].resource_id = 0;
+ g->scanout[i].width = 0;
+ g->scanout[i].height = 0;
+ g->scanout[i].x = 0;
+ g->scanout[i].y = 0;
+ g->scanout[i].ds = NULL;
+ }
+ g->enabled_output_bitmask = 1;
+}
+
+static Property virtio_gpu_properties[] = {
+ DEFINE_VIRTIO_GPU_PROPERTIES(VirtIOGPU, conf),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void virtio_gpu_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
+
+ vdc->realize = virtio_gpu_device_realize;
+ vdc->get_config = virtio_gpu_get_config;
+ vdc->set_config = virtio_gpu_set_config;
+ vdc->get_features = virtio_gpu_get_features;
+
+ vdc->reset = virtio_gpu_reset;
+
+ dc->props = virtio_gpu_properties;
+}
+
+static const TypeInfo virtio_gpu_info = {
+ .name = TYPE_VIRTIO_GPU,
+ .parent = TYPE_VIRTIO_DEVICE,
+ .instance_size = sizeof(VirtIOGPU),
+ .instance_init = virtio_gpu_instance_init,
+ .class_init = virtio_gpu_class_init,
+};
+
+static void virtio_register_types(void)
+{
+ type_register_static(&virtio_gpu_info);
+}
+
+type_init(virtio_register_types)
+
+QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctrl_hdr) != 24);
+QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_update_cursor) != 56);
+QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_unref) != 32);
+QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_2d) != 40);
+QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_set_scanout) != 48);
+QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_flush) != 48);
+QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_to_host_2d) != 56);
+QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_mem_entry) != 16);
+QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_attach_backing) != 32);
+QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_detach_backing) != 32);
+QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_display_info) != 408);
diff --git a/hw/display/virtio-vga.c b/hw/display/virtio-vga.c
new file mode 100644
index 0000000000..94f9d0eb5a
--- /dev/null
+++ b/hw/display/virtio-vga.c
@@ -0,0 +1,175 @@
+#include "hw/hw.h"
+#include "hw/pci/pci.h"
+#include "ui/console.h"
+#include "vga_int.h"
+#include "hw/virtio/virtio-pci.h"
+
+/*
+ * virtio-vga: This extends VirtioPCIProxy.
+ */
+#define TYPE_VIRTIO_VGA "virtio-vga"
+#define VIRTIO_VGA(obj) \
+ OBJECT_CHECK(VirtIOVGA, (obj), TYPE_VIRTIO_VGA)
+
+typedef struct VirtIOVGA {
+ VirtIOPCIProxy parent_obj;
+ VirtIOGPU vdev;
+ VGACommonState vga;
+ MemoryRegion vga_mrs[3];
+} VirtIOVGA;
+
+static void virtio_vga_invalidate_display(void *opaque)
+{
+ VirtIOVGA *vvga = opaque;
+
+ if (vvga->vdev.enable) {
+ virtio_gpu_ops.invalidate(&vvga->vdev);
+ } else {
+ vvga->vga.hw_ops->invalidate(&vvga->vga);
+ }
+}
+
+static void virtio_vga_update_display(void *opaque)
+{
+ VirtIOVGA *vvga = opaque;
+
+ if (vvga->vdev.enable) {
+ virtio_gpu_ops.gfx_update(&vvga->vdev);
+ } else {
+ vvga->vga.hw_ops->gfx_update(&vvga->vga);
+ }
+}
+
+static void virtio_vga_text_update(void *opaque, console_ch_t *chardata)
+{
+ VirtIOVGA *vvga = opaque;
+
+ if (vvga->vdev.enable) {
+ if (virtio_gpu_ops.text_update) {
+ virtio_gpu_ops.text_update(&vvga->vdev, chardata);
+ }
+ } else {
+ if (vvga->vga.hw_ops->text_update) {
+ vvga->vga.hw_ops->text_update(&vvga->vga, chardata);
+ }
+ }
+}
+
+static int virtio_vga_ui_info(void *opaque, uint32_t idx, QemuUIInfo *info)
+{
+ VirtIOVGA *vvga = opaque;
+
+ if (virtio_gpu_ops.ui_info) {
+ return virtio_gpu_ops.ui_info(&vvga->vdev, idx, info);
+ }
+ return -1;
+}
+
+static const GraphicHwOps virtio_vga_ops = {
+ .invalidate = virtio_vga_invalidate_display,
+ .gfx_update = virtio_vga_update_display,
+ .text_update = virtio_vga_text_update,
+ .ui_info = virtio_vga_ui_info,
+};
+
+/* VGA device wrapper around PCI device around virtio GPU */
+static void virtio_vga_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
+{
+ VirtIOVGA *vvga = VIRTIO_VGA(vpci_dev);
+ VirtIOGPU *g = &vvga->vdev;
+ VGACommonState *vga = &vvga->vga;
+ uint32_t offset;
+
+ /* init vga compat bits */
+ vga->vram_size_mb = 8;
+ vga_common_init(vga, OBJECT(vpci_dev), false);
+ vga_init(vga, OBJECT(vpci_dev), pci_address_space(&vpci_dev->pci_dev),
+ pci_address_space_io(&vpci_dev->pci_dev), true);
+ pci_register_bar(&vpci_dev->pci_dev, 0,
+ PCI_BASE_ADDRESS_MEM_PREFETCH, &vga->vram);
+
+ /*
+ * Configure virtio bar and regions
+ *
+ * We use bar #2 for the mmio regions, to be compatible with stdvga.
+ * virtio regions are moved to the end of bar #2, to make room for
+ * the stdvga mmio registers at the start of bar #2.
+ */
+ vpci_dev->modern_mem_bar = 2;
+ vpci_dev->msix_bar = 4;
+ offset = memory_region_size(&vpci_dev->modern_bar);
+ offset -= vpci_dev->notify.size;
+ vpci_dev->notify.offset = offset;
+ offset -= vpci_dev->device.size;
+ vpci_dev->device.offset = offset;
+ offset -= vpci_dev->isr.size;
+ vpci_dev->isr.offset = offset;
+ offset -= vpci_dev->common.size;
+ vpci_dev->common.offset = offset;
+
+ /* init virtio bits */
+ qdev_set_parent_bus(DEVICE(g), BUS(&vpci_dev->bus));
+ /* force virtio-1.0 */
+ vpci_dev->flags &= ~VIRTIO_PCI_FLAG_DISABLE_MODERN;
+ vpci_dev->flags |= VIRTIO_PCI_FLAG_DISABLE_LEGACY;
+ object_property_set_bool(OBJECT(g), true, "realized", errp);
+
+ /* add stdvga mmio regions */
+ pci_std_vga_mmio_region_init(vga, &vpci_dev->modern_bar,
+ vvga->vga_mrs, true);
+
+ vga->con = g->scanout[0].con;
+ graphic_console_set_hwops(vga->con, &virtio_vga_ops, vvga);
+}
+
+static void virtio_vga_reset(DeviceState *dev)
+{
+ VirtIOVGA *vvga = VIRTIO_VGA(dev);
+ vvga->vdev.enable = 0;
+
+ vga_dirty_log_start(&vvga->vga);
+}
+
+static Property virtio_vga_properties[] = {
+ DEFINE_VIRTIO_GPU_PROPERTIES(VirtIOVGA, vdev.conf),
+ DEFINE_VIRTIO_GPU_PCI_PROPERTIES(VirtIOPCIProxy),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void virtio_vga_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
+ PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
+
+ set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories);
+ dc->props = virtio_vga_properties;
+ dc->reset = virtio_vga_reset;
+ dc->hotpluggable = false;
+
+ k->realize = virtio_vga_realize;
+ pcidev_k->romfile = "vgabios-virtio.bin";
+ pcidev_k->class_id = PCI_CLASS_DISPLAY_VGA;
+}
+
+static void virtio_vga_inst_initfn(Object *obj)
+{
+ VirtIOVGA *dev = VIRTIO_VGA(obj);
+ object_initialize(&dev->vdev, sizeof(dev->vdev), TYPE_VIRTIO_GPU);
+ object_property_add_child(obj, "virtio-backend", OBJECT(&dev->vdev), NULL);
+}
+
+static TypeInfo virtio_vga_info = {
+ .name = TYPE_VIRTIO_VGA,
+ .parent = TYPE_VIRTIO_PCI,
+ .instance_size = sizeof(struct VirtIOVGA),
+ .instance_init = virtio_vga_inst_initfn,
+ .class_init = virtio_vga_class_init,
+};
+
+static void virtio_vga_register_types(void)
+{
+ type_register_static(&virtio_vga_info);
+}
+
+type_init(virtio_vga_register_types)
diff --git a/hw/display/vmware_vga.c b/hw/display/vmware_vga.c
index c17ddd1fcd..7f397d3c2e 100644
--- a/hw/display/vmware_vga.c
+++ b/hw/display/vmware_vga.c
@@ -1124,7 +1124,7 @@ static void vmsvga_update_display(void *opaque)
* Is it more efficient to look at vram VGA-dirty bits or wait
* for the driver to issue SVGA_CMD_UPDATE?
*/
- if (memory_region_is_logging(&s->vga.vram)) {
+ if (memory_region_is_logging(&s->vga.vram, DIRTY_MEMORY_VGA)) {
vga_sync_dirty_bitmap(&s->vga);
dirty = memory_region_get_dirty(&s->vga.vram, 0,
surface_stride(surface) * surface_height(surface),
diff --git a/hw/dma/rc4030.c b/hw/dma/rc4030.c
index af2663256e..3efa6de352 100644
--- a/hw/dma/rc4030.c
+++ b/hw/dma/rc4030.c
@@ -1,7 +1,7 @@
/*
* QEMU JAZZ RC4030 chipset
*
- * Copyright (c) 2007-2009 Herve Poussineau
+ * Copyright (c) 2007-2013 Hervé Poussineau
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
@@ -24,29 +24,16 @@
#include "hw/hw.h"
#include "hw/mips/mips.h"
+#include "hw/sysbus.h"
#include "qemu/timer.h"
-
-/********************************************************/
-/* debug rc4030 */
-
-//#define DEBUG_RC4030
-//#define DEBUG_RC4030_DMA
-
-#ifdef DEBUG_RC4030
-#define DPRINTF(fmt, ...) \
-do { printf("rc4030: " fmt , ## __VA_ARGS__); } while (0)
-static const char* irq_names[] = { "parallel", "floppy", "sound", "video",
- "network", "scsi", "keyboard", "mouse", "serial0", "serial1" };
-#else
-#define DPRINTF(fmt, ...)
-#endif
-
-#define RC4030_ERROR(fmt, ...) \
-do { fprintf(stderr, "rc4030 ERROR: %s: " fmt, __func__ , ## __VA_ARGS__); } while (0)
+#include "exec/address-spaces.h"
+#include "trace.h"
/********************************************************/
/* rc4030 emulation */
+#define MAX_TL_ENTRIES 512
+
typedef struct dma_pagetable_entry {
int32_t frame;
int32_t owner;
@@ -63,8 +50,14 @@ typedef struct dma_pagetable_entry {
#define DMA_FLAG_MEM_INTR 0x0200
#define DMA_FLAG_ADDR_INTR 0x0400
+#define TYPE_RC4030 "rc4030"
+#define RC4030(obj) \
+ OBJECT_CHECK(rc4030State, (obj), TYPE_RC4030)
+
typedef struct rc4030State
{
+ SysBusDevice parent;
+
uint32_t config; /* 0x0000: RC4030 config register */
uint32_t revision; /* 0x0008: RC4030 Revision register */
uint32_t invalid_address_register; /* 0x0010: Invalid Address register */
@@ -83,7 +76,7 @@ typedef struct rc4030State
uint32_t cache_bmask; /* 0x0058: I/O Cache Byte Mask */
uint32_t nmi_interrupt; /* 0x0200: interrupt source */
- uint32_t offset210;
+ uint32_t memory_refresh_rate; /* 0x0210: memory refresh rate */
uint32_t nvram_protect; /* 0x0220: NV ram protect register */
uint32_t rem_speed[16];
uint32_t imr_jazz; /* Local bus int enable mask */
@@ -96,6 +89,16 @@ typedef struct rc4030State
qemu_irq timer_irq;
qemu_irq jazz_bus_irq;
+ /* biggest translation table */
+ MemoryRegion dma_tt;
+ /* translation table memory region alias, added to system RAM */
+ MemoryRegion dma_tt_alias;
+ /* whole DMA memory region, root of DMA address space */
+ MemoryRegion dma_mr;
+ /* translation table entry aliases, added to DMA memory region */
+ MemoryRegion dma_mrs[MAX_TL_ENTRIES];
+ AddressSpace dma_as;
+
MemoryRegion iomem_chipset;
MemoryRegion iomem_jazzio;
} rc4030State;
@@ -112,7 +115,7 @@ static void set_next_tick(rc4030State *s)
}
/* called for accesses to rc4030 */
-static uint32_t rc4030_readl(void *opaque, hwaddr addr)
+static uint64_t rc4030_read(void *opaque, hwaddr addr, unsigned int size)
{
rc4030State *s = opaque;
uint32_t val;
@@ -220,9 +223,9 @@ static uint32_t rc4030_readl(void *opaque, hwaddr addr)
case 0x0208:
val = 0;
break;
- /* Offset 0x0210 */
+ /* Memory refresh rate */
case 0x0210:
- val = s->offset210;
+ val = s->memory_refresh_rate;
break;
/* NV ram protect register */
case 0x0220:
@@ -238,39 +241,117 @@ static uint32_t rc4030_readl(void *opaque, hwaddr addr)
val = 7; /* FIXME: should be read from EISA controller */
break;
default:
- RC4030_ERROR("invalid read [" TARGET_FMT_plx "]\n", addr);
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "rc4030: invalid read at 0x%x", (int)addr);
val = 0;
break;
}
if ((addr & ~3) != 0x230) {
- DPRINTF("read 0x%02x at " TARGET_FMT_plx "\n", val, addr);
+ trace_rc4030_read(addr, val);
}
return val;
}
-static uint32_t rc4030_readw(void *opaque, hwaddr addr)
+static void rc4030_dma_as_update_one(rc4030State *s, int index, uint32_t frame)
{
- uint32_t v = rc4030_readl(opaque, addr & ~0x3);
- if (addr & 0x2)
- return v >> 16;
- else
- return v & 0xffff;
+ if (index < MAX_TL_ENTRIES) {
+ memory_region_set_enabled(&s->dma_mrs[index], false);
+ }
+
+ if (!frame) {
+ return;
+ }
+
+ if (index >= MAX_TL_ENTRIES) {
+ qemu_log_mask(LOG_UNIMP,
+ "rc4030: trying to use too high "
+ "translation table entry %d (max allowed=%d)",
+ index, MAX_TL_ENTRIES);
+ return;
+ }
+ memory_region_set_alias_offset(&s->dma_mrs[index], frame);
+ memory_region_set_enabled(&s->dma_mrs[index], true);
}
-static uint32_t rc4030_readb(void *opaque, hwaddr addr)
+static void rc4030_dma_tt_write(void *opaque, hwaddr addr, uint64_t data,
+ unsigned int size)
{
- uint32_t v = rc4030_readl(opaque, addr & ~0x3);
- return (v >> (8 * (addr & 0x3))) & 0xff;
+ rc4030State *s = opaque;
+
+ /* write memory */
+ memcpy(memory_region_get_ram_ptr(&s->dma_tt) + addr, &data, size);
+
+ /* update dma address space (only if frame field has been written) */
+ if (addr % sizeof(dma_pagetable_entry) == 0) {
+ int index = addr / sizeof(dma_pagetable_entry);
+ memory_region_transaction_begin();
+ rc4030_dma_as_update_one(s, index, (uint32_t)data);
+ memory_region_transaction_commit();
+ }
}
-static void rc4030_writel(void *opaque, hwaddr addr, uint32_t val)
+static const MemoryRegionOps rc4030_dma_tt_ops = {
+ .write = rc4030_dma_tt_write,
+ .impl.min_access_size = 4,
+ .impl.max_access_size = 4,
+};
+
+static void rc4030_dma_tt_update(rc4030State *s, uint32_t new_tl_base,
+ uint32_t new_tl_limit)
+{
+ int entries, i;
+ dma_pagetable_entry *dma_tl_contents;
+
+ if (s->dma_tl_limit) {
+ /* write old dma tl table to physical memory */
+ memory_region_del_subregion(get_system_memory(), &s->dma_tt_alias);
+ cpu_physical_memory_write(s->dma_tl_limit & 0x7fffffff,
+ memory_region_get_ram_ptr(&s->dma_tt),
+ memory_region_size(&s->dma_tt_alias));
+ }
+ object_unparent(OBJECT(&s->dma_tt_alias));
+
+ s->dma_tl_base = new_tl_base;
+ s->dma_tl_limit = new_tl_limit;
+ new_tl_base &= 0x7fffffff;
+
+ if (s->dma_tl_limit) {
+ uint64_t dma_tt_size;
+ if (s->dma_tl_limit <= memory_region_size(&s->dma_tt)) {
+ dma_tt_size = s->dma_tl_limit;
+ } else {
+ dma_tt_size = memory_region_size(&s->dma_tt);
+ }
+ memory_region_init_alias(&s->dma_tt_alias, OBJECT(s),
+ "dma-table-alias",
+ &s->dma_tt, 0, dma_tt_size);
+ dma_tl_contents = memory_region_get_ram_ptr(&s->dma_tt);
+ cpu_physical_memory_read(new_tl_base, dma_tl_contents, dma_tt_size);
+
+ memory_region_transaction_begin();
+ entries = dma_tt_size / sizeof(dma_pagetable_entry);
+ for (i = 0; i < entries; i++) {
+ rc4030_dma_as_update_one(s, i, dma_tl_contents[i].frame);
+ }
+ memory_region_add_subregion(get_system_memory(), new_tl_base,
+ &s->dma_tt_alias);
+ memory_region_transaction_commit();
+ } else {
+ memory_region_init(&s->dma_tt_alias, OBJECT(s),
+ "dma-table-alias", 0);
+ }
+}
+
+static void rc4030_write(void *opaque, hwaddr addr, uint64_t data,
+ unsigned int size)
{
rc4030State *s = opaque;
+ uint32_t val = data;
addr &= 0x3fff;
- DPRINTF("write 0x%02x at " TARGET_FMT_plx "\n", val, addr);
+ trace_rc4030_write(addr, val);
switch (addr & ~0x3) {
/* Global config register */
@@ -279,11 +360,11 @@ static void rc4030_writel(void *opaque, hwaddr addr, uint32_t val)
break;
/* DMA transl. table base */
case 0x0018:
- s->dma_tl_base = val;
+ rc4030_dma_tt_update(s, val, s->dma_tl_limit);
break;
/* DMA transl. table limit */
case 0x0020:
- s->dma_tl_limit = val;
+ rc4030_dma_tt_update(s, s->dma_tl_base, val);
break;
/* DMA transl. table invalidated */
case 0x0028:
@@ -371,9 +452,9 @@ static void rc4030_writel(void *opaque, hwaddr addr, uint32_t val)
s->dma_regs[entry][idx] = val;
}
break;
- /* Offset 0x0210 */
+ /* Memory refresh rate */
case 0x0210:
- s->offset210 = val;
+ s->memory_refresh_rate = val;
break;
/* Interval timer reload */
case 0x0228:
@@ -385,48 +466,18 @@ static void rc4030_writel(void *opaque, hwaddr addr, uint32_t val)
case 0x0238:
break;
default:
- RC4030_ERROR("invalid write of 0x%02x at [" TARGET_FMT_plx "]\n", val, addr);
- break;
- }
-}
-
-static void rc4030_writew(void *opaque, hwaddr addr, uint32_t val)
-{
- uint32_t old_val = rc4030_readl(opaque, addr & ~0x3);
-
- if (addr & 0x2)
- val = (val << 16) | (old_val & 0x0000ffff);
- else
- val = val | (old_val & 0xffff0000);
- rc4030_writel(opaque, addr & ~0x3, val);
-}
-
-static void rc4030_writeb(void *opaque, hwaddr addr, uint32_t val)
-{
- uint32_t old_val = rc4030_readl(opaque, addr & ~0x3);
-
- switch (addr & 3) {
- case 0:
- val = val | (old_val & 0xffffff00);
- break;
- case 1:
- val = (val << 8) | (old_val & 0xffff00ff);
- break;
- case 2:
- val = (val << 16) | (old_val & 0xff00ffff);
- break;
- case 3:
- val = (val << 24) | (old_val & 0x00ffffff);
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "rc4030: invalid write of 0x%02x at 0x%x",
+ val, (int)addr);
break;
}
- rc4030_writel(opaque, addr & ~0x3, val);
}
static const MemoryRegionOps rc4030_ops = {
- .old_mmio = {
- .read = { rc4030_readb, rc4030_readw, rc4030_readl, },
- .write = { rc4030_writeb, rc4030_writew, rc4030_writel, },
- },
+ .read = rc4030_read,
+ .write = rc4030_write,
+ .impl.min_access_size = 4,
+ .impl.max_access_size = 4,
.endianness = DEVICE_NATIVE_ENDIAN,
};
@@ -436,22 +487,6 @@ static void update_jazz_irq(rc4030State *s)
pending = s->isr_jazz & s->imr_jazz;
-#ifdef DEBUG_RC4030
- if (s->isr_jazz != 0) {
- uint32_t irq = 0;
- DPRINTF("pending irqs:");
- for (irq = 0; irq < ARRAY_SIZE(irq_names); irq++) {
- if (s->isr_jazz & (1 << irq)) {
- printf(" %s", irq_names[irq]);
- if (!(s->imr_jazz & (1 << irq))) {
- printf("(ignored)");
- }
- }
- }
- printf("\n");
- }
-#endif
-
if (pending != 0)
qemu_irq_raise(s->jazz_bus_irq);
else
@@ -479,7 +514,7 @@ static void rc4030_periodic_timer(void *opaque)
qemu_irq_raise(s->timer_irq);
}
-static uint32_t jazzio_readw(void *opaque, hwaddr addr)
+static uint64_t jazzio_read(void *opaque, hwaddr addr, unsigned int size)
{
rc4030State *s = opaque;
uint32_t val;
@@ -494,7 +529,6 @@ static uint32_t jazzio_readw(void *opaque, hwaddr addr)
irq = 0;
while (pending) {
if (pending & 1) {
- DPRINTF("returning irq %s\n", irq_names[irq]);
val = (irq + 1) << 2;
break;
}
@@ -508,36 +542,25 @@ static uint32_t jazzio_readw(void *opaque, hwaddr addr)
val = s->imr_jazz;
break;
default:
- RC4030_ERROR("(jazz io controller) invalid read [" TARGET_FMT_plx "]\n", addr);
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "rc4030/jazzio: invalid read at 0x%x", (int)addr);
val = 0;
+ break;
}
- DPRINTF("(jazz io controller) read 0x%04x at " TARGET_FMT_plx "\n", val, addr);
+ trace_jazzio_read(addr, val);
return val;
}
-static uint32_t jazzio_readb(void *opaque, hwaddr addr)
-{
- uint32_t v;
- v = jazzio_readw(opaque, addr & ~0x1);
- return (v >> (8 * (addr & 0x1))) & 0xff;
-}
-
-static uint32_t jazzio_readl(void *opaque, hwaddr addr)
-{
- uint32_t v;
- v = jazzio_readw(opaque, addr);
- v |= jazzio_readw(opaque, addr + 2) << 16;
- return v;
-}
-
-static void jazzio_writew(void *opaque, hwaddr addr, uint32_t val)
+static void jazzio_write(void *opaque, hwaddr addr, uint64_t data,
+ unsigned int size)
{
rc4030State *s = opaque;
+ uint32_t val = data;
addr &= 0xfff;
- DPRINTF("(jazz io controller) write 0x%04x at " TARGET_FMT_plx "\n", val, addr);
+ trace_jazzio_write(addr, val);
switch (addr) {
/* Local bus int enable mask */
@@ -546,43 +569,24 @@ static void jazzio_writew(void *opaque, hwaddr addr, uint32_t val)
update_jazz_irq(s);
break;
default:
- RC4030_ERROR("(jazz io controller) invalid write of 0x%04x at [" TARGET_FMT_plx "]\n", val, addr);
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "rc4030/jazzio: invalid write of 0x%02x at 0x%x",
+ val, (int)addr);
break;
}
}
-static void jazzio_writeb(void *opaque, hwaddr addr, uint32_t val)
-{
- uint32_t old_val = jazzio_readw(opaque, addr & ~0x1);
-
- switch (addr & 1) {
- case 0:
- val = val | (old_val & 0xff00);
- break;
- case 1:
- val = (val << 8) | (old_val & 0x00ff);
- break;
- }
- jazzio_writew(opaque, addr & ~0x1, val);
-}
-
-static void jazzio_writel(void *opaque, hwaddr addr, uint32_t val)
-{
- jazzio_writew(opaque, addr, val & 0xffff);
- jazzio_writew(opaque, addr + 2, (val >> 16) & 0xffff);
-}
-
static const MemoryRegionOps jazzio_ops = {
- .old_mmio = {
- .read = { jazzio_readb, jazzio_readw, jazzio_readl, },
- .write = { jazzio_writeb, jazzio_writew, jazzio_writel, },
- },
+ .read = jazzio_read,
+ .write = jazzio_write,
+ .impl.min_access_size = 2,
+ .impl.max_access_size = 2,
.endianness = DEVICE_NATIVE_ENDIAN,
};
-static void rc4030_reset(void *opaque)
+static void rc4030_reset(DeviceState *dev)
{
- rc4030State *s = opaque;
+ rc4030State *s = RC4030(dev);
int i;
s->config = 0x410; /* some boards seem to accept 0x104 too */
@@ -590,14 +594,14 @@ static void rc4030_reset(void *opaque)
s->invalid_address_register = 0;
memset(s->dma_regs, 0, sizeof(s->dma_regs));
- s->dma_tl_base = s->dma_tl_limit = 0;
+ rc4030_dma_tt_update(s, 0, 0);
s->remote_failed_address = s->memory_failed_address = 0;
s->cache_maint = 0;
s->cache_ptag = s->cache_ltag = 0;
s->cache_bmask = 0;
- s->offset210 = 0x18186;
+ s->memory_refresh_rate = 0x18186;
s->nvram_protect = 7;
for (i = 0; i < 15; i++)
s->rem_speed[i] = 7;
@@ -631,7 +635,7 @@ static int rc4030_load(QEMUFile *f, void *opaque, int version_id)
s->cache_ptag = qemu_get_be32(f);
s->cache_ltag = qemu_get_be32(f);
s->cache_bmask = qemu_get_be32(f);
- s->offset210 = qemu_get_be32(f);
+ s->memory_refresh_rate = qemu_get_be32(f);
s->nvram_protect = qemu_get_be32(f);
for (i = 0; i < 15; i++)
s->rem_speed[i] = qemu_get_be32(f);
@@ -663,7 +667,7 @@ static void rc4030_save(QEMUFile *f, void *opaque)
qemu_put_be32(f, s->cache_ptag);
qemu_put_be32(f, s->cache_ltag);
qemu_put_be32(f, s->cache_bmask);
- qemu_put_be32(f, s->offset210);
+ qemu_put_be32(f, s->memory_refresh_rate);
qemu_put_be32(f, s->nvram_protect);
for (i = 0; i < 15; i++)
qemu_put_be32(f, s->rem_speed[i]);
@@ -672,44 +676,6 @@ static void rc4030_save(QEMUFile *f, void *opaque)
qemu_put_be32(f, s->itr);
}
-void rc4030_dma_memory_rw(void *opaque, hwaddr addr, uint8_t *buf, int len, int is_write)
-{
- rc4030State *s = opaque;
- hwaddr entry_addr;
- hwaddr phys_addr;
- dma_pagetable_entry entry;
- int index;
- int ncpy, i;
-
- i = 0;
- for (;;) {
- if (i == len) {
- break;
- }
-
- ncpy = DMA_PAGESIZE - (addr & (DMA_PAGESIZE - 1));
- if (ncpy > len - i)
- ncpy = len - i;
-
- /* Get DMA translation table entry */
- index = addr / DMA_PAGESIZE;
- if (index >= s->dma_tl_limit / sizeof(dma_pagetable_entry)) {
- break;
- }
- entry_addr = s->dma_tl_base + index * sizeof(dma_pagetable_entry);
- /* XXX: not sure. should we really use only lowest bits? */
- entry_addr &= 0x7fffffff;
- cpu_physical_memory_read(entry_addr, &entry, sizeof(entry));
-
- /* Read/write data at right place */
- phys_addr = entry.frame + (addr & (DMA_PAGESIZE - 1));
- cpu_physical_memory_rw(phys_addr, &buf[i], ncpy, is_write);
-
- i += ncpy;
- addr += ncpy;
- }
-}
-
static void rc4030_do_dma(void *opaque, int n, uint8_t *buf, int len, int is_write)
{
rc4030State *s = opaque;
@@ -733,32 +699,11 @@ static void rc4030_do_dma(void *opaque, int n, uint8_t *buf, int len, int is_wri
dma_addr = s->dma_regs[n][DMA_REG_ADDRESS];
/* Read/write data at right place */
- rc4030_dma_memory_rw(opaque, dma_addr, buf, len, is_write);
+ address_space_rw(&s->dma_as, dma_addr, MEMTXATTRS_UNSPECIFIED,
+ buf, len, is_write);
s->dma_regs[n][DMA_REG_ENABLE] |= DMA_FLAG_TC_INTR;
s->dma_regs[n][DMA_REG_COUNT] -= len;
-
-#ifdef DEBUG_RC4030_DMA
- {
- int i, j;
- printf("rc4030 dma: Copying %d bytes %s host %p\n",
- len, is_write ? "from" : "to", buf);
- for (i = 0; i < len; i += 16) {
- int n = 16;
- if (n > len - i) {
- n = len - i;
- }
- for (j = 0; j < n; j++)
- printf("%02x ", buf[i + j]);
- while (j++ < 16)
- printf(" ");
- printf("| ");
- for (j = 0; j < n; j++)
- printf("%c", isprint(buf[i + j]) ? buf[i + j] : '.');
- printf("\n");
- }
- }
-#endif
}
struct rc4030DMAState {
@@ -795,31 +740,102 @@ static rc4030_dma *rc4030_allocate_dmas(void *opaque, int n)
return s;
}
-void *rc4030_init(qemu_irq timer, qemu_irq jazz_bus,
- qemu_irq **irqs, rc4030_dma **dmas,
- MemoryRegion *sysmem)
+static void rc4030_initfn(Object *obj)
{
- rc4030State *s;
+ DeviceState *dev = DEVICE(obj);
+ rc4030State *s = RC4030(obj);
+ SysBusDevice *sysbus = SYS_BUS_DEVICE(obj);
- s = g_malloc0(sizeof(rc4030State));
+ qdev_init_gpio_in(dev, rc4030_irq_jazz_request, 16);
- *irqs = qemu_allocate_irqs(rc4030_irq_jazz_request, s, 16);
- *dmas = rc4030_allocate_dmas(s, 4);
+ sysbus_init_irq(sysbus, &s->timer_irq);
+ sysbus_init_irq(sysbus, &s->jazz_bus_irq);
- s->periodic_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, rc4030_periodic_timer, s);
- s->timer_irq = timer;
- s->jazz_bus_irq = jazz_bus;
-
- qemu_register_reset(rc4030_reset, s);
register_savevm(NULL, "rc4030", 0, 2, rc4030_save, rc4030_load, s);
- rc4030_reset(s);
+
+ sysbus_init_mmio(sysbus, &s->iomem_chipset);
+ sysbus_init_mmio(sysbus, &s->iomem_jazzio);
+}
+
+static void rc4030_realize(DeviceState *dev, Error **errp)
+{
+ rc4030State *s = RC4030(dev);
+ Object *o = OBJECT(dev);
+ int i;
+
+ s->periodic_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
+ rc4030_periodic_timer, s);
memory_region_init_io(&s->iomem_chipset, NULL, &rc4030_ops, s,
"rc4030.chipset", 0x300);
- memory_region_add_subregion(sysmem, 0x80000000, &s->iomem_chipset);
memory_region_init_io(&s->iomem_jazzio, NULL, &jazzio_ops, s,
"rc4030.jazzio", 0x00001000);
- memory_region_add_subregion(sysmem, 0xf0000000, &s->iomem_jazzio);
- return s;
+ memory_region_init_rom_device(&s->dma_tt, o,
+ &rc4030_dma_tt_ops, s, "dma-table",
+ MAX_TL_ENTRIES * sizeof(dma_pagetable_entry),
+ NULL);
+ memory_region_init(&s->dma_tt_alias, o, "dma-table-alias", 0);
+ memory_region_init(&s->dma_mr, o, "dma", INT32_MAX);
+ for (i = 0; i < MAX_TL_ENTRIES; ++i) {
+ memory_region_init_alias(&s->dma_mrs[i], o, "dma-alias",
+ get_system_memory(), 0, DMA_PAGESIZE);
+ memory_region_set_enabled(&s->dma_mrs[i], false);
+ memory_region_add_subregion(&s->dma_mr, i * DMA_PAGESIZE,
+ &s->dma_mrs[i]);
+ }
+ address_space_init(&s->dma_as, &s->dma_mr, "rc4030-dma");
+}
+
+static void rc4030_unrealize(DeviceState *dev, Error **errp)
+{
+ rc4030State *s = RC4030(dev);
+ int i;
+
+ timer_free(s->periodic_timer);
+
+ address_space_destroy(&s->dma_as);
+ object_unparent(OBJECT(&s->dma_tt));
+ object_unparent(OBJECT(&s->dma_tt_alias));
+ object_unparent(OBJECT(&s->dma_mr));
+ for (i = 0; i < MAX_TL_ENTRIES; ++i) {
+ memory_region_del_subregion(&s->dma_mr, &s->dma_mrs[i]);
+ object_unparent(OBJECT(&s->dma_mrs[i]));
+ }
+}
+
+static void rc4030_class_init(ObjectClass *klass, void *class_data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->realize = rc4030_realize;
+ dc->unrealize = rc4030_unrealize;
+ dc->reset = rc4030_reset;
+}
+
+static const TypeInfo rc4030_info = {
+ .name = TYPE_RC4030,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(rc4030State),
+ .instance_init = rc4030_initfn,
+ .class_init = rc4030_class_init,
+};
+
+static void rc4030_register_types(void)
+{
+ type_register_static(&rc4030_info);
+}
+
+type_init(rc4030_register_types)
+
+DeviceState *rc4030_init(rc4030_dma **dmas, MemoryRegion **dma_mr)
+{
+ DeviceState *dev;
+
+ dev = qdev_create(NULL, TYPE_RC4030);
+ qdev_init_nofail(dev);
+
+ *dmas = rc4030_allocate_dmas(dev, 4);
+ *dma_mr = &RC4030(dev)->dma_mr;
+ return dev;
}
diff --git a/hw/gpio/pl061.c b/hw/gpio/pl061.c
index bd03e99975..4ba730b476 100644
--- a/hw/gpio/pl061.c
+++ b/hw/gpio/pl061.c
@@ -173,7 +173,7 @@ static uint64_t pl061_read(void *opaque, hwaddr offset,
case 0x414: /* Raw interrupt status */
return s->istate;
case 0x418: /* Masked interrupt status */
- return s->istate | s->im;
+ return s->istate & s->im;
case 0x420: /* Alternate function select */
return s->afsel;
case 0x500: /* 2mA drive */
diff --git a/hw/i386/acpi-build.c b/hw/i386/acpi-build.c
index 8e88aded9f..b71e942567 100644
--- a/hw/i386/acpi-build.c
+++ b/hw/i386/acpi-build.c
@@ -615,6 +615,7 @@ static void build_append_pci_bus_devices(Aml *parent_scope, PCIBus *bus,
}
}
aml_append(parent_scope, method);
+ qobject_decref(bsel);
}
/*
diff --git a/hw/i386/pc.c b/hw/i386/pc.c
index a93972fb41..3f0d435da9 100644
--- a/hw/i386/pc.c
+++ b/hw/i386/pc.c
@@ -164,27 +164,6 @@ uint64_t cpu_get_tsc(CPUX86State *env)
return cpu_get_ticks();
}
-/* SMM support */
-
-static cpu_set_smm_t smm_set;
-static void *smm_arg;
-
-void cpu_smm_register(cpu_set_smm_t callback, void *arg)
-{
- assert(smm_set == NULL);
- assert(smm_arg == NULL);
- smm_set = callback;
- smm_arg = arg;
-}
-
-void cpu_smm_update(CPUX86State *env)
-{
- if (smm_set && smm_arg && CPU(x86_env_get_cpu(env)) == first_cpu) {
- smm_set(!!(env->hflags & HF_SMM_MASK), smm_arg);
- }
-}
-
-
/* IRQ handling */
int cpu_get_pic_interrupt(CPUX86State *env)
{
@@ -1007,7 +986,6 @@ static X86CPU *pc_new_cpu(const char *cpu_model, int64_t apic_id,
}
qdev_set_parent_bus(DEVICE(cpu), qdev_get_child_bus(icc_bridge, "icc"));
- object_unref(OBJECT(cpu));
object_property_set_int(OBJECT(cpu), apic_id, "apic-id", &local_err);
object_property_set_bool(OBJECT(cpu), true, "realized", &local_err);
@@ -1026,7 +1004,9 @@ static const char *current_cpu_model;
void pc_hot_add_cpu(const int64_t id, Error **errp)
{
DeviceState *icc_bridge;
+ X86CPU *cpu;
int64_t apic_id = x86_cpu_apic_id_from_index(id);
+ Error *local_err = NULL;
if (id < 0) {
error_setg(errp, "Invalid CPU id: %" PRIi64, id);
@@ -1054,7 +1034,12 @@ void pc_hot_add_cpu(const int64_t id, Error **errp)
icc_bridge = DEVICE(object_resolve_path_type("icc-bridge",
TYPE_ICC_BRIDGE, NULL));
- pc_new_cpu(current_cpu_model, apic_id, icc_bridge, errp);
+ cpu = pc_new_cpu(current_cpu_model, apic_id, icc_bridge, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ return;
+ }
+ object_unref(OBJECT(cpu));
}
void pc_cpus_init(const char *cpu_model, DeviceState *icc_bridge)
@@ -1088,6 +1073,7 @@ void pc_cpus_init(const char *cpu_model, DeviceState *icc_bridge)
error_report_err(error);
exit(1);
}
+ object_unref(OBJECT(cpu));
}
/* map APIC MMIO area if CPU has APIC */
@@ -1365,9 +1351,9 @@ FWCfgState *pc_memory_init(MachineState *machine,
return fw_cfg;
}
-qemu_irq *pc_allocate_cpu_irq(void)
+qemu_irq pc_allocate_cpu_irq(void)
{
- return qemu_allocate_irqs(pic_irq_request, NULL, 1);
+ return qemu_allocate_irq(pic_irq_request, NULL, 0);
}
DeviceState *pc_vga_init(ISABus *isa_bus, PCIBus *pci_bus)
diff --git a/hw/i386/pc_piix.c b/hw/i386/pc_piix.c
index 0688f0c52f..e142f75649 100644
--- a/hw/i386/pc_piix.c
+++ b/hw/i386/pc_piix.c
@@ -52,6 +52,7 @@
#ifdef CONFIG_XEN
# include <xen/hvm/hvm_info_table.h>
#endif
+#include "migration/migration.h"
#define MAX_IDE_BUS 2
@@ -86,10 +87,9 @@ static void pc_init1(MachineState *machine)
ISABus *isa_bus;
PCII440FXState *i440fx_state;
int piix3_devfn = -1;
- qemu_irq *cpu_irq;
qemu_irq *gsi;
qemu_irq *i8259;
- qemu_irq *smi_irq;
+ qemu_irq smi_irq;
GSIState *gsi_state;
DriveInfo *hd[MAX_IDE_BUS * MAX_IDE_DEVS];
BusState *idebus[MAX_IDE_BUS];
@@ -219,13 +219,13 @@ static void pc_init1(MachineState *machine)
} else if (xen_enabled()) {
i8259 = xen_interrupt_controller_init();
} else {
- cpu_irq = pc_allocate_cpu_irq();
- i8259 = i8259_init(isa_bus, cpu_irq[0]);
+ i8259 = i8259_init(isa_bus, pc_allocate_cpu_irq());
}
for (i = 0; i < ISA_NUM_IRQS; i++) {
gsi_state->i8259_irq[i] = i8259[i];
}
+ g_free(i8259);
if (pci_enabled) {
ioapic_init_gsi(gsi_state, "i440fx");
}
@@ -283,10 +283,10 @@ static void pc_init1(MachineState *machine)
DeviceState *piix4_pm;
I2CBus *smbus;
- smi_irq = qemu_allocate_irqs(pc_acpi_smi_interrupt, first_cpu, 1);
+ smi_irq = qemu_allocate_irq(pc_acpi_smi_interrupt, first_cpu, 0);
/* TODO: Populate SPD eeprom data. */
smbus = piix4_pm_init(pci_bus, piix3_devfn + 3, 0xb100,
- gsi[9], *smi_irq,
+ gsi[9], smi_irq,
kvm_enabled(), &piix4_pm);
smbus_eeprom_init(smbus, 8, NULL, 0);
@@ -306,6 +306,7 @@ static void pc_init1(MachineState *machine)
static void pc_compat_2_3(MachineState *machine)
{
+ savevm_skip_section_footers();
}
static void pc_compat_2_2(MachineState *machine)
diff --git a/hw/i386/pc_q35.c b/hw/i386/pc_q35.c
index 66220b352b..082cd93bb2 100644
--- a/hw/i386/pc_q35.c
+++ b/hw/i386/pc_q35.c
@@ -45,6 +45,7 @@
#include "hw/usb.h"
#include "hw/cpu/icc_bus.h"
#include "qemu/error-report.h"
+#include "migration/migration.h"
/* ICH9 AHCI has 6 ports */
#define MAX_SATA_PORTS 6
@@ -79,7 +80,6 @@ static void pc_q35_init(MachineState *machine)
GSIState *gsi_state;
ISABus *isa_bus;
int pci_enabled = 1;
- qemu_irq *cpu_irq;
qemu_irq *gsi;
qemu_irq *i8259;
int i;
@@ -230,8 +230,7 @@ static void pc_q35_init(MachineState *machine)
} else if (xen_enabled()) {
i8259 = xen_interrupt_controller_init();
} else {
- cpu_irq = pc_allocate_cpu_irq();
- i8259 = i8259_init(isa_bus, cpu_irq[0]);
+ i8259 = i8259_init(isa_bus, pc_allocate_cpu_irq());
}
for (i = 0; i < ISA_NUM_IRQS; i++) {
@@ -291,6 +290,7 @@ static void pc_q35_init(MachineState *machine)
static void pc_compat_2_3(MachineState *machine)
{
+ savevm_skip_section_footers();
}
static void pc_compat_2_2(MachineState *machine)
@@ -403,6 +403,7 @@ DEFINE_Q35_MACHINE(v2_4, "pc-q35-2.4", NULL,
static void pc_q35_2_3_machine_options(MachineClass *m)
{
pc_q35_2_4_machine_options(m);
+ m->no_floppy = 0;
m->alias = NULL;
SET_MACHINE_COMPAT(m, PC_COMPAT_2_3);
}
diff --git a/hw/ide/core.c b/hw/ide/core.c
index fcb908061c..1efd98af63 100644
--- a/hw/ide/core.c
+++ b/hw/ide/core.c
@@ -2561,6 +2561,7 @@ static const VMStateDescription vmstate_ide_atapi_gesn_state = {
.name ="ide_drive/atapi/gesn_state",
.version_id = 1,
.minimum_version_id = 1,
+ .needed = ide_atapi_gesn_needed,
.fields = (VMStateField[]) {
VMSTATE_BOOL(events.new_media, IDEState),
VMSTATE_BOOL(events.eject_request, IDEState),
@@ -2572,6 +2573,7 @@ static const VMStateDescription vmstate_ide_tray_state = {
.name = "ide_drive/tray_state",
.version_id = 1,
.minimum_version_id = 1,
+ .needed = ide_tray_state_needed,
.fields = (VMStateField[]) {
VMSTATE_BOOL(tray_open, IDEState),
VMSTATE_BOOL(tray_locked, IDEState),
@@ -2585,6 +2587,7 @@ static const VMStateDescription vmstate_ide_drive_pio_state = {
.minimum_version_id = 1,
.pre_save = ide_drive_pio_pre_save,
.post_load = ide_drive_pio_post_load,
+ .needed = ide_drive_pio_state_needed,
.fields = (VMStateField[]) {
VMSTATE_INT32(req_nb_sectors, IDEState),
VMSTATE_VARRAY_INT32(io_buffer, IDEState, io_buffer_total_len, 1,
@@ -2626,19 +2629,11 @@ const VMStateDescription vmstate_ide_drive = {
VMSTATE_UINT8_V(cdrom_changed, IDEState, 3),
VMSTATE_END_OF_LIST()
},
- .subsections = (VMStateSubsection []) {
- {
- .vmsd = &vmstate_ide_drive_pio_state,
- .needed = ide_drive_pio_state_needed,
- }, {
- .vmsd = &vmstate_ide_tray_state,
- .needed = ide_tray_state_needed,
- }, {
- .vmsd = &vmstate_ide_atapi_gesn_state,
- .needed = ide_atapi_gesn_needed,
- }, {
- /* empty */
- }
+ .subsections = (const VMStateDescription*[]) {
+ &vmstate_ide_drive_pio_state,
+ &vmstate_ide_tray_state,
+ &vmstate_ide_atapi_gesn_state,
+ NULL
}
};
@@ -2646,6 +2641,7 @@ static const VMStateDescription vmstate_ide_error_status = {
.name ="ide_bus/error",
.version_id = 2,
.minimum_version_id = 1,
+ .needed = ide_error_needed,
.fields = (VMStateField[]) {
VMSTATE_INT32(error_status, IDEBus),
VMSTATE_INT64_V(retry_sector_num, IDEBus, 2),
@@ -2664,13 +2660,9 @@ const VMStateDescription vmstate_ide_bus = {
VMSTATE_UINT8(unit, IDEBus),
VMSTATE_END_OF_LIST()
},
- .subsections = (VMStateSubsection []) {
- {
- .vmsd = &vmstate_ide_error_status,
- .needed = ide_error_needed,
- }, {
- /* empty */
- }
+ .subsections = (const VMStateDescription*[]) {
+ &vmstate_ide_error_status,
+ NULL
}
};
diff --git a/hw/ide/macio.c b/hw/ide/macio.c
index 585a27bd6c..dd52d50732 100644
--- a/hw/ide/macio.c
+++ b/hw/ide/macio.c
@@ -51,8 +51,15 @@ static const int debug_macio = 0;
#define MACIO_PAGE_SIZE 4096
+/*
+ * Unaligned DMA read/write access functions required for OS X/Darwin which
+ * don't perform DMA transactions on sector boundaries. These functions are
+ * modelled on bdrv_co_do_preadv()/bdrv_co_do_pwritev() and so should be
+ * easy to remove if the unaligned block APIs are ever exposed.
+ */
+
static void pmac_dma_read(BlockBackend *blk,
- int64_t sector_num, int nb_sectors,
+ int64_t offset, unsigned int bytes,
void (*cb)(void *opaque, int ret), void *opaque)
{
DBDMA_io *io = opaque;
@@ -60,76 +67,48 @@ static void pmac_dma_read(BlockBackend *blk,
IDEState *s = idebus_active_if(&m->bus);
dma_addr_t dma_addr, dma_len;
void *mem;
- int nsector, remainder;
+ int64_t sector_num;
+ int nsector;
+ uint64_t align = BDRV_SECTOR_SIZE;
+ size_t head_bytes, tail_bytes;
qemu_iovec_destroy(&io->iov);
qemu_iovec_init(&io->iov, io->len / MACIO_PAGE_SIZE + 1);
- if (io->remainder_len > 0) {
- /* Return remainder of request */
- int transfer = MIN(io->remainder_len, io->len);
+ sector_num = (offset >> 9);
+ nsector = (io->len >> 9);
- MACIO_DPRINTF("--- DMA read pop - bounce addr: %p addr: %"
- HWADDR_PRIx " remainder_len: %x\n",
- &io->remainder + (0x200 - transfer), io->addr,
- io->remainder_len);
+ MACIO_DPRINTF("--- DMA read transfer (0x%" HWADDR_PRIx ",0x%x): "
+ "sector_num: %" PRId64 ", nsector: %d\n", io->addr, io->len,
+ sector_num, nsector);
- cpu_physical_memory_write(io->addr,
- &io->remainder + (0x200 - transfer),
- transfer);
+ dma_addr = io->addr;
+ dma_len = io->len;
+ mem = dma_memory_map(&address_space_memory, dma_addr, &dma_len,
+ DMA_DIRECTION_FROM_DEVICE);
- io->remainder_len -= transfer;
- io->len -= transfer;
- io->addr += transfer;
+ if (offset & (align - 1)) {
+ head_bytes = offset & (align - 1);
- s->io_buffer_index += transfer;
- s->io_buffer_size -= transfer;
+ MACIO_DPRINTF("--- DMA unaligned head: sector %" PRId64 ", "
+ "discarding %zu bytes\n", sector_num, head_bytes);
- if (io->remainder_len != 0) {
- /* Still waiting for remainder */
- return;
- }
+ qemu_iovec_add(&io->iov, &io->head_remainder, head_bytes);
- if (io->len == 0) {
- MACIO_DPRINTF("--- finished all read processing; go and finish\n");
- cb(opaque, 0);
- return;
- }
+ bytes += offset & (align - 1);
+ offset = offset & ~(align - 1);
}
- if (s->drive_kind == IDE_CD) {
- sector_num = (int64_t)(s->lba << 2) + (s->io_buffer_index >> 9);
- } else {
- sector_num = ide_get_sector(s) + (s->io_buffer_index >> 9);
- }
+ qemu_iovec_add(&io->iov, mem, io->len);
- nsector = ((io->len + 0x1ff) >> 9);
- remainder = (nsector << 9) - io->len;
+ if ((offset + bytes) & (align - 1)) {
+ tail_bytes = (offset + bytes) & (align - 1);
- MACIO_DPRINTF("--- DMA read transfer - addr: %" HWADDR_PRIx " len: %x\n",
- io->addr, io->len);
+ MACIO_DPRINTF("--- DMA unaligned tail: sector %" PRId64 ", "
+ "discarding bytes %zu\n", sector_num, tail_bytes);
- dma_addr = io->addr;
- dma_len = io->len;
- mem = dma_memory_map(&address_space_memory, dma_addr, &dma_len,
- DMA_DIRECTION_FROM_DEVICE);
-
- if (!remainder) {
- MACIO_DPRINTF("--- DMA read aligned - addr: %" HWADDR_PRIx
- " len: %x\n", io->addr, io->len);
- qemu_iovec_add(&io->iov, mem, io->len);
- } else {
- MACIO_DPRINTF("--- DMA read unaligned - addr: %" HWADDR_PRIx
- " len: %x\n", io->addr, io->len);
- qemu_iovec_add(&io->iov, mem, io->len);
-
- MACIO_DPRINTF("--- DMA read push - bounce addr: %p "
- "remainder_len: %x\n",
- &io->remainder + 0x200 - remainder, remainder);
- qemu_iovec_add(&io->iov, &io->remainder + 0x200 - remainder,
- remainder);
-
- io->remainder_len = remainder;
+ qemu_iovec_add(&io->iov, &io->tail_remainder, align - tail_bytes);
+ bytes = ROUND_UP(bytes, align);
}
s->io_buffer_size -= io->len;
@@ -137,15 +116,15 @@ static void pmac_dma_read(BlockBackend *blk,
io->len = 0;
- MACIO_DPRINTF("--- Block read transfer - sector_num: %"PRIx64" "
- "nsector: %x\n",
- sector_num, nsector);
+ MACIO_DPRINTF("--- Block read transfer - sector_num: %" PRIx64 " "
+ "nsector: %x\n", (offset >> 9), (bytes >> 9));
- m->aiocb = blk_aio_readv(blk, sector_num, &io->iov, nsector, cb, io);
+ m->aiocb = blk_aio_readv(blk, (offset >> 9), &io->iov, (bytes >> 9),
+ cb, io);
}
static void pmac_dma_write(BlockBackend *blk,
- int64_t sector_num, int nb_sectors,
+ int64_t offset, int bytes,
void (*cb)(void *opaque, int ret), void *opaque)
{
DBDMA_io *io = opaque;
@@ -153,90 +132,80 @@ static void pmac_dma_write(BlockBackend *blk,
IDEState *s = idebus_active_if(&m->bus);
dma_addr_t dma_addr, dma_len;
void *mem;
- int nsector, remainder;
- int extra = 0;
+ int64_t sector_num;
+ int nsector;
+ uint64_t align = BDRV_SECTOR_SIZE;
+ size_t head_bytes, tail_bytes;
+ bool unaligned_head = false, unaligned_tail = false;
qemu_iovec_destroy(&io->iov);
qemu_iovec_init(&io->iov, io->len / MACIO_PAGE_SIZE + 1);
- if (io->remainder_len > 0) {
- /* Return remainder of request */
- int transfer = MIN(io->remainder_len, io->len);
+ sector_num = (offset >> 9);
+ nsector = (io->len >> 9);
- MACIO_DPRINTF("--- processing write remainder %x\n", transfer);
- cpu_physical_memory_read(io->addr,
- &io->remainder + (0x200 - transfer),
- transfer);
+ MACIO_DPRINTF("--- DMA write transfer (0x%" HWADDR_PRIx ",0x%x): "
+ "sector_num: %" PRId64 ", nsector: %d\n", io->addr, io->len,
+ sector_num, nsector);
- io->remainder_len -= transfer;
- io->len -= transfer;
- io->addr += transfer;
+ dma_addr = io->addr;
+ dma_len = io->len;
+ mem = dma_memory_map(&address_space_memory, dma_addr, &dma_len,
+ DMA_DIRECTION_TO_DEVICE);
- s->io_buffer_index += transfer;
- s->io_buffer_size -= transfer;
+ if (offset & (align - 1)) {
+ head_bytes = offset & (align - 1);
+ sector_num = ((offset & ~(align - 1)) >> 9);
- if (io->remainder_len != 0) {
- /* Still waiting for remainder */
- return;
- }
+ MACIO_DPRINTF("--- DMA unaligned head: pre-reading head sector %"
+ PRId64 "\n", sector_num);
- MACIO_DPRINTF("--> prepending bounce buffer with size 0x200\n");
+ blk_pread(s->blk, (sector_num << 9), &io->head_remainder, align);
- /* Sector transfer complete - prepend to request */
- qemu_iovec_add(&io->iov, &io->remainder, 0x200);
- extra = 1;
- }
+ qemu_iovec_add(&io->iov, &io->head_remainder, head_bytes);
+ qemu_iovec_add(&io->iov, mem, io->len);
- if (s->drive_kind == IDE_CD) {
- sector_num = (int64_t)(s->lba << 2) + (s->io_buffer_index >> 9);
- } else {
- sector_num = ide_get_sector(s) + (s->io_buffer_index >> 9);
+ bytes += offset & (align - 1);
+ offset = offset & ~(align - 1);
+
+ unaligned_head = true;
}
- nsector = (io->len >> 9);
- remainder = io->len - (nsector << 9);
+ if ((offset + bytes) & (align - 1)) {
+ tail_bytes = (offset + bytes) & (align - 1);
+ sector_num = (((offset + bytes) & ~(align - 1)) >> 9);
- MACIO_DPRINTF("--- DMA write transfer - addr: %" HWADDR_PRIx " len: %x\n",
- io->addr, io->len);
- MACIO_DPRINTF("xxx remainder: %x\n", remainder);
- MACIO_DPRINTF("xxx sector_num: %"PRIx64" nsector: %x\n",
- sector_num, nsector);
+ MACIO_DPRINTF("--- DMA unaligned tail: pre-reading tail sector %"
+ PRId64 "\n", sector_num);
- dma_addr = io->addr;
- dma_len = io->len;
- mem = dma_memory_map(&address_space_memory, dma_addr, &dma_len,
- DMA_DIRECTION_TO_DEVICE);
+ blk_pread(s->blk, (sector_num << 9), &io->tail_remainder, align);
- if (!remainder) {
- MACIO_DPRINTF("--- DMA write aligned - addr: %" HWADDR_PRIx
- " len: %x\n", io->addr, io->len);
- qemu_iovec_add(&io->iov, mem, io->len);
- } else {
- /* Write up to last complete sector */
- MACIO_DPRINTF("--- DMA write unaligned - addr: %" HWADDR_PRIx
- " len: %x\n", io->addr, (nsector << 9));
- qemu_iovec_add(&io->iov, mem, (nsector << 9));
+ if (!unaligned_head) {
+ qemu_iovec_add(&io->iov, mem, io->len);
+ }
+
+ qemu_iovec_add(&io->iov, &io->tail_remainder + tail_bytes,
+ align - tail_bytes);
- MACIO_DPRINTF("--- DMA write read - bounce addr: %p "
- "remainder_len: %x\n", &io->remainder, remainder);
- cpu_physical_memory_read(io->addr + (nsector << 9), &io->remainder,
- remainder);
+ bytes = ROUND_UP(bytes, align);
- io->remainder_len = 0x200 - remainder;
+ unaligned_tail = true;
+ }
- MACIO_DPRINTF("xxx remainder_len: %x\n", io->remainder_len);
+ if (!unaligned_head && !unaligned_tail) {
+ qemu_iovec_add(&io->iov, mem, io->len);
}
- s->io_buffer_size -= ((nsector + extra) << 9);
- s->io_buffer_index += ((nsector + extra) << 9);
+ s->io_buffer_size -= io->len;
+ s->io_buffer_index += io->len;
io->len = 0;
- MACIO_DPRINTF("--- Block write transfer - sector_num: %"PRIx64" "
- "nsector: %x\n", sector_num, nsector + extra);
+ MACIO_DPRINTF("--- Block write transfer - sector_num: %" PRIx64 " "
+ "nsector: %x\n", (offset >> 9), (bytes >> 9));
- m->aiocb = blk_aio_writev(blk, sector_num, &io->iov, nsector + extra, cb,
- io);
+ m->aiocb = blk_aio_writev(blk, (offset >> 9), &io->iov, (bytes >> 9),
+ cb, io);
}
static void pmac_ide_atapi_transfer_cb(void *opaque, int ret)
@@ -244,19 +213,12 @@ static void pmac_ide_atapi_transfer_cb(void *opaque, int ret)
DBDMA_io *io = opaque;
MACIOIDEState *m = io->opaque;
IDEState *s = idebus_active_if(&m->bus);
- int64_t sector_num;
- int nsector, remainder;
+ int64_t offset;
- MACIO_DPRINTF("\ns is %p\n", s);
- MACIO_DPRINTF("io_buffer_index: %x\n", s->io_buffer_index);
- MACIO_DPRINTF("io_buffer_size: %x packet_transfer_size: %x\n",
- s->io_buffer_size, s->packet_transfer_size);
- MACIO_DPRINTF("lba: %x\n", s->lba);
- MACIO_DPRINTF("io_addr: %" HWADDR_PRIx " io_len: %x\n", io->addr,
- io->len);
+ MACIO_DPRINTF("pmac_ide_atapi_transfer_cb\n");
if (ret < 0) {
- MACIO_DPRINTF("THERE WAS AN ERROR! %d\n", ret);
+ MACIO_DPRINTF("DMA error: %d\n", ret);
ide_atapi_io_error(s, ret);
goto done;
}
@@ -270,6 +232,7 @@ static void pmac_ide_atapi_transfer_cb(void *opaque, int ret)
}
if (s->io_buffer_size <= 0) {
+ MACIO_DPRINTF("End of IDE transfer\n");
ide_atapi_cmd_ok(s);
m->dma_active = false;
goto done;
@@ -289,19 +252,13 @@ static void pmac_ide_atapi_transfer_cb(void *opaque, int ret)
goto done;
}
- /* Calculate number of sectors */
- sector_num = (int64_t)(s->lba << 2) + (s->io_buffer_index >> 9);
- nsector = (io->len + 0x1ff) >> 9;
- remainder = io->len & 0x1ff;
-
- MACIO_DPRINTF("nsector: %d remainder: %x\n", nsector, remainder);
- MACIO_DPRINTF("sector: %"PRIx64" %zx\n", sector_num, io->iov.size / 512);
+ /* Calculate current offset */
+ offset = (int64_t)(s->lba << 11) + s->io_buffer_index;
- pmac_dma_read(s->blk, sector_num, nsector, pmac_ide_atapi_transfer_cb, io);
+ pmac_dma_read(s->blk, offset, io->len, pmac_ide_atapi_transfer_cb, io);
return;
done:
- MACIO_DPRINTF("done DMA\n\n");
block_acct_done(blk_get_stats(s->blk), &s->acct);
io->dma_end(opaque);
@@ -313,16 +270,14 @@ static void pmac_ide_transfer_cb(void *opaque, int ret)
DBDMA_io *io = opaque;
MACIOIDEState *m = io->opaque;
IDEState *s = idebus_active_if(&m->bus);
- int64_t sector_num;
- int nsector, remainder;
+ int64_t offset;
MACIO_DPRINTF("pmac_ide_transfer_cb\n");
if (ret < 0) {
- MACIO_DPRINTF("DMA error\n");
+ MACIO_DPRINTF("DMA error: %d\n", ret);
m->aiocb = NULL;
ide_dma_error(s);
- io->remainder_len = 0;
goto done;
}
@@ -335,7 +290,7 @@ static void pmac_ide_transfer_cb(void *opaque, int ret)
}
if (s->io_buffer_size <= 0) {
- MACIO_DPRINTF("end of transfer\n");
+ MACIO_DPRINTF("End of IDE transfer\n");
s->status = READY_STAT | SEEK_STAT;
ide_set_irq(s->bus);
m->dma_active = false;
@@ -348,24 +303,16 @@ static void pmac_ide_transfer_cb(void *opaque, int ret)
}
/* Calculate number of sectors */
- sector_num = ide_get_sector(s) + (s->io_buffer_index >> 9);
- nsector = (io->len + 0x1ff) >> 9;
- remainder = io->len & 0x1ff;
-
- s->nsector -= nsector;
-
- MACIO_DPRINTF("nsector: %d remainder: %x\n", nsector, remainder);
- MACIO_DPRINTF("sector: %"PRIx64" %x\n", sector_num, nsector);
+ offset = (ide_get_sector(s) << 9) + s->io_buffer_index;
switch (s->dma_cmd) {
case IDE_DMA_READ:
- pmac_dma_read(s->blk, sector_num, nsector, pmac_ide_transfer_cb, io);
+ pmac_dma_read(s->blk, offset, io->len, pmac_ide_transfer_cb, io);
break;
case IDE_DMA_WRITE:
- pmac_dma_write(s->blk, sector_num, nsector, pmac_ide_transfer_cb, io);
+ pmac_dma_write(s->blk, offset, io->len, pmac_ide_transfer_cb, io);
break;
case IDE_DMA_TRIM:
- MACIO_DPRINTF("TRIM command issued!");
break;
}
@@ -561,15 +508,12 @@ static void ide_dbdma_start(IDEDMA *dma, IDEState *s,
BlockCompletionFunc *cb)
{
MACIOIDEState *m = container_of(dma, MACIOIDEState, dma);
- DBDMAState *dbdma = m->dbdma;
- DBDMA_io *io;
- int i;
s->io_buffer_index = 0;
if (s->drive_kind == IDE_CD) {
s->io_buffer_size = s->packet_transfer_size;
} else {
- s->io_buffer_size = s->nsector * 0x200;
+ s->io_buffer_size = s->nsector * BDRV_SECTOR_SIZE;
}
MACIO_DPRINTF("\n\n------------ IDE transfer\n");
@@ -578,15 +522,6 @@ static void ide_dbdma_start(IDEDMA *dma, IDEState *s,
MACIO_DPRINTF("lba: %x size: %x\n", s->lba, s->io_buffer_size);
MACIO_DPRINTF("-------------------------\n");
- for (i = 0; i < DBDMA_CHANNELS; i++) {
- io = &dbdma->channels[i].io;
-
- if (io->opaque == m) {
- io->remainder_len = 0;
- }
- }
-
- MACIO_DPRINTF("\n");
m->dma_active = true;
DBDMA_kick(m->dbdma);
}
diff --git a/hw/ide/pci.c b/hw/ide/pci.c
index 1b3d1c12ad..4afd0cfe8c 100644
--- a/hw/ide/pci.c
+++ b/hw/ide/pci.c
@@ -350,6 +350,7 @@ static const VMStateDescription vmstate_bmdma_current = {
.name = "ide bmdma_current",
.version_id = 1,
.minimum_version_id = 1,
+ .needed = ide_bmdma_current_needed,
.fields = (VMStateField[]) {
VMSTATE_UINT32(cur_addr, BMDMAState),
VMSTATE_UINT32(cur_prd_last, BMDMAState),
@@ -363,6 +364,7 @@ static const VMStateDescription vmstate_bmdma_status = {
.name ="ide bmdma/status",
.version_id = 1,
.minimum_version_id = 1,
+ .needed = ide_bmdma_status_needed,
.fields = (VMStateField[]) {
VMSTATE_UINT8(status, BMDMAState),
VMSTATE_END_OF_LIST()
@@ -383,16 +385,10 @@ static const VMStateDescription vmstate_bmdma = {
VMSTATE_UINT8(migration_retry_unit, BMDMAState),
VMSTATE_END_OF_LIST()
},
- .subsections = (VMStateSubsection []) {
- {
- .vmsd = &vmstate_bmdma_current,
- .needed = ide_bmdma_current_needed,
- }, {
- .vmsd = &vmstate_bmdma_status,
- .needed = ide_bmdma_status_needed,
- }, {
- /* empty */
- }
+ .subsections = (const VMStateDescription*[]) {
+ &vmstate_bmdma_current,
+ &vmstate_bmdma_status,
+ NULL
}
};
@@ -452,8 +448,6 @@ static const struct IDEDMAOps bmdma_ops = {
void bmdma_init(IDEBus *bus, BMDMAState *bm, PCIIDEState *d)
{
- qemu_irq *irq;
-
if (bus->dma == &bm->dma) {
return;
}
@@ -461,8 +455,7 @@ void bmdma_init(IDEBus *bus, BMDMAState *bm, PCIIDEState *d)
bm->dma.ops = &bmdma_ops;
bus->dma = &bm->dma;
bm->irq = bus->irq;
- irq = qemu_allocate_irqs(bmdma_irq, bm, 1);
- bus->irq = *irq;
+ bus->irq = qemu_allocate_irq(bmdma_irq, bm, 0);
bm->pci_dev = d;
}
diff --git a/hw/input/pckbd.c b/hw/input/pckbd.c
index 9b9a7d7a8a..ddac69df6f 100644
--- a/hw/input/pckbd.c
+++ b/hw/input/pckbd.c
@@ -391,23 +391,24 @@ static int kbd_outport_post_load(void *opaque, int version_id)
return 0;
}
+static bool kbd_outport_needed(void *opaque)
+{
+ KBDState *s = opaque;
+ return s->outport != kbd_outport_default(s);
+}
+
static const VMStateDescription vmstate_kbd_outport = {
.name = "pckbd_outport",
.version_id = 1,
.minimum_version_id = 1,
.post_load = kbd_outport_post_load,
+ .needed = kbd_outport_needed,
.fields = (VMStateField[]) {
VMSTATE_UINT8(outport, KBDState),
VMSTATE_END_OF_LIST()
}
};
-static bool kbd_outport_needed(void *opaque)
-{
- KBDState *s = opaque;
- return s->outport != kbd_outport_default(s);
-}
-
static int kbd_post_load(void *opaque, int version_id)
{
KBDState *s = opaque;
@@ -430,12 +431,9 @@ static const VMStateDescription vmstate_kbd = {
VMSTATE_UINT8(pending, KBDState),
VMSTATE_END_OF_LIST()
},
- .subsections = (VMStateSubsection[]) {
- {
- .vmsd = &vmstate_kbd_outport,
- .needed = kbd_outport_needed,
- },
- VMSTATE_END_OF_LIST()
+ .subsections = (const VMStateDescription*[]) {
+ &vmstate_kbd_outport,
+ NULL
}
};
diff --git a/hw/input/ps2.c b/hw/input/ps2.c
index 4baeea2b56..fdbe565e62 100644
--- a/hw/input/ps2.c
+++ b/hw/input/ps2.c
@@ -677,6 +677,7 @@ static const VMStateDescription vmstate_ps2_keyboard_ledstate = {
.version_id = 3,
.minimum_version_id = 2,
.post_load = ps2_kbd_ledstate_post_load,
+ .needed = ps2_keyboard_ledstate_needed,
.fields = (VMStateField[]) {
VMSTATE_INT32(ledstate, PS2KbdState),
VMSTATE_END_OF_LIST()
@@ -717,13 +718,9 @@ static const VMStateDescription vmstate_ps2_keyboard = {
VMSTATE_INT32_V(scancode_set, PS2KbdState,3),
VMSTATE_END_OF_LIST()
},
- .subsections = (VMStateSubsection []) {
- {
- .vmsd = &vmstate_ps2_keyboard_ledstate,
- .needed = ps2_keyboard_ledstate_needed,
- }, {
- /* empty */
- }
+ .subsections = (const VMStateDescription*[]) {
+ &vmstate_ps2_keyboard_ledstate,
+ NULL
}
};
diff --git a/hw/intc/Makefile.objs b/hw/intc/Makefile.objs
index 843864a3ef..092d8a80ac 100644
--- a/hw/intc/Makefile.objs
+++ b/hw/intc/Makefile.objs
@@ -11,6 +11,7 @@ common-obj-$(CONFIG_SLAVIO) += slavio_intctl.o
common-obj-$(CONFIG_IOAPIC) += ioapic_common.o
common-obj-$(CONFIG_ARM_GIC) += arm_gic_common.o
common-obj-$(CONFIG_ARM_GIC) += arm_gic.o
+common-obj-$(CONFIG_ARM_GIC) += arm_gicv2m.o
common-obj-$(CONFIG_OPENPIC) += openpic.o
obj-$(CONFIG_APIC) += apic.o apic_common.o
diff --git a/hw/intc/apic.c b/hw/intc/apic.c
index 0f97b47925..77b639cce8 100644
--- a/hw/intc/apic.c
+++ b/hw/intc/apic.c
@@ -370,13 +370,14 @@ static int apic_irq_pending(APICCommonState *s)
static void apic_update_irq(APICCommonState *s)
{
CPUState *cpu;
+ DeviceState *dev = (DeviceState *)s;
cpu = CPU(s->cpu);
if (!qemu_cpu_is_self(cpu)) {
cpu_interrupt(cpu, CPU_INTERRUPT_POLL);
} else if (apic_irq_pending(s) > 0) {
cpu_interrupt(cpu, CPU_INTERRUPT_HARD);
- } else if (!apic_accept_pic_intr(&s->busdev.qdev) || !pic_get_output(isa_pic)) {
+ } else if (!apic_accept_pic_intr(dev) || !pic_get_output(isa_pic)) {
cpu_reset_interrupt(cpu, CPU_INTERRUPT_HARD);
}
}
@@ -549,10 +550,12 @@ static void apic_deliver(DeviceState *dev, uint8_t dest, uint8_t dest_mode,
static bool apic_check_pic(APICCommonState *s)
{
- if (!apic_accept_pic_intr(&s->busdev.qdev) || !pic_get_output(isa_pic)) {
+ DeviceState *dev = (DeviceState *)s;
+
+ if (!apic_accept_pic_intr(dev) || !pic_get_output(isa_pic)) {
return false;
}
- apic_deliver_pic_intr(&s->busdev.qdev, 1);
+ apic_deliver_pic_intr(dev, 1);
return true;
}
diff --git a/hw/intc/apic_common.c b/hw/intc/apic_common.c
index d595d63a51..0032b97c5f 100644
--- a/hw/intc/apic_common.c
+++ b/hw/intc/apic_common.c
@@ -369,6 +369,7 @@ static const VMStateDescription vmstate_apic_common_sipi = {
.name = "apic_sipi",
.version_id = 1,
.minimum_version_id = 1,
+ .needed = apic_common_sipi_needed,
.fields = (VMStateField[]) {
VMSTATE_INT32(sipi_vector, APICCommonState),
VMSTATE_INT32(wait_for_sipi, APICCommonState),
@@ -408,12 +409,9 @@ static const VMStateDescription vmstate_apic_common = {
APICCommonState), /* open-coded timer state */
VMSTATE_END_OF_LIST()
},
- .subsections = (VMStateSubsection[]) {
- {
- .vmsd = &vmstate_apic_common_sipi,
- .needed = apic_common_sipi_needed,
- },
- VMSTATE_END_OF_LIST()
+ .subsections = (const VMStateDescription*[]) {
+ &vmstate_apic_common_sipi,
+ NULL
}
};
diff --git a/hw/intc/arm_gic.c b/hw/intc/arm_gic.c
index c1d2e704ec..454bfd7df5 100644
--- a/hw/intc/arm_gic.c
+++ b/hw/intc/arm_gic.c
@@ -71,7 +71,7 @@ void gic_update(GICState *s)
|| !(s->cpu_ctlr[cpu] & (GICC_CTLR_EN_GRP0 | GICC_CTLR_EN_GRP1))) {
qemu_irq_lower(s->parent_irq[cpu]);
qemu_irq_lower(s->parent_fiq[cpu]);
- return;
+ continue;
}
best_prio = 0x100;
best_irq = 1023;
diff --git a/hw/intc/arm_gicv2m.c b/hw/intc/arm_gicv2m.c
new file mode 100644
index 0000000000..43d1976c49
--- /dev/null
+++ b/hw/intc/arm_gicv2m.c
@@ -0,0 +1,192 @@
+/*
+ * GICv2m extension for MSI/MSI-x support with a GICv2-based system
+ *
+ * Copyright (C) 2015 Linaro, All rights reserved.
+ *
+ * Author: Christoffer Dall <christoffer.dall@linaro.org>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/* This file implements an emulated GICv2m widget as described in the ARM
+ * Server Base System Architecture (SBSA) specification Version 2.2
+ * (ARM-DEN-0029 v2.2) pages 35-39 without any optional implementation defined
+ * identification registers and with a single non-secure MSI register frame.
+ */
+
+#include "hw/sysbus.h"
+#include "hw/pci/msi.h"
+
+#define TYPE_ARM_GICV2M "arm-gicv2m"
+#define ARM_GICV2M(obj) OBJECT_CHECK(ARMGICv2mState, (obj), TYPE_ARM_GICV2M)
+
+#define GICV2M_NUM_SPI_MAX 128
+
+#define V2M_MSI_TYPER 0x008
+#define V2M_MSI_SETSPI_NS 0x040
+#define V2M_MSI_IIDR 0xFCC
+#define V2M_IIDR0 0xFD0
+#define V2M_IIDR11 0xFFC
+
+#define PRODUCT_ID_QEMU 0x51 /* ASCII code Q */
+
+typedef struct ARMGICv2mState {
+ SysBusDevice parent_obj;
+
+ MemoryRegion iomem;
+ qemu_irq spi[GICV2M_NUM_SPI_MAX];
+
+ uint32_t base_spi;
+ uint32_t num_spi;
+} ARMGICv2mState;
+
+static void gicv2m_set_irq(void *opaque, int irq)
+{
+ ARMGICv2mState *s = (ARMGICv2mState *)opaque;
+
+ qemu_irq_pulse(s->spi[irq]);
+}
+
+static uint64_t gicv2m_read(void *opaque, hwaddr offset,
+ unsigned size)
+{
+ ARMGICv2mState *s = (ARMGICv2mState *)opaque;
+ uint32_t val;
+
+ if (size != 4) {
+ qemu_log_mask(LOG_GUEST_ERROR, "gicv2m_read: bad size %u\n", size);
+ return 0;
+ }
+
+ switch (offset) {
+ case V2M_MSI_TYPER:
+ val = (s->base_spi + 32) << 16;
+ val |= s->num_spi;
+ return val;
+ case V2M_MSI_IIDR:
+ /* We don't have any valid implementor so we leave that field as zero
+ * and we return 0 in the arch revision as per the spec.
+ */
+ return (PRODUCT_ID_QEMU << 20);
+ case V2M_IIDR0 ... V2M_IIDR11:
+ /* We do not implement any optional identification registers and the
+ * mandatory MSI_PIDR2 register reads as 0x0, so we capture all
+ * implementation defined registers here.
+ */
+ return 0;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "gicv2m_read: Bad offset %x\n", (int)offset);
+ return 0;
+ }
+}
+
+static void gicv2m_write(void *opaque, hwaddr offset,
+ uint64_t value, unsigned size)
+{
+ ARMGICv2mState *s = (ARMGICv2mState *)opaque;
+
+ if (size != 2 && size != 4) {
+ qemu_log_mask(LOG_GUEST_ERROR, "gicv2m_write: bad size %u\n", size);
+ return;
+ }
+
+ switch (offset) {
+ case V2M_MSI_SETSPI_NS: {
+ int spi;
+
+ spi = (value & 0x3ff) - (s->base_spi + 32);
+ if (spi >= 0 && spi < s->num_spi) {
+ gicv2m_set_irq(s, spi);
+ }
+ return;
+ }
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "gicv2m_write: Bad offset %x\n", (int)offset);
+ }
+}
+
+static const MemoryRegionOps gicv2m_ops = {
+ .read = gicv2m_read,
+ .write = gicv2m_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+};
+
+static void gicv2m_realize(DeviceState *dev, Error **errp)
+{
+ ARMGICv2mState *s = ARM_GICV2M(dev);
+ int i;
+
+ if (s->num_spi > GICV2M_NUM_SPI_MAX) {
+ error_setg(errp,
+ "requested %u SPIs exceeds GICv2m frame maximum %d",
+ s->num_spi, GICV2M_NUM_SPI_MAX);
+ return;
+ }
+
+ if (s->base_spi + 32 > 1020 - s->num_spi) {
+ error_setg(errp,
+ "requested base SPI %u+%u exceeds max. number 1020",
+ s->base_spi + 32, s->num_spi);
+ return;
+ }
+
+ for (i = 0; i < s->num_spi; i++) {
+ sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->spi[i]);
+ }
+
+ msi_supported = true;
+ kvm_gsi_direct_mapping = true;
+ kvm_msi_via_irqfd_allowed = kvm_irqfds_enabled();
+}
+
+static void gicv2m_init(Object *obj)
+{
+ SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
+ ARMGICv2mState *s = ARM_GICV2M(obj);
+
+ memory_region_init_io(&s->iomem, OBJECT(s), &gicv2m_ops, s,
+ "gicv2m", 0x1000);
+ sysbus_init_mmio(sbd, &s->iomem);
+}
+
+static Property gicv2m_properties[] = {
+ DEFINE_PROP_UINT32("base-spi", ARMGICv2mState, base_spi, 0),
+ DEFINE_PROP_UINT32("num-spi", ARMGICv2mState, num_spi, 64),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void gicv2m_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->props = gicv2m_properties;
+ dc->realize = gicv2m_realize;
+}
+
+static const TypeInfo gicv2m_info = {
+ .name = TYPE_ARM_GICV2M,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(ARMGICv2mState),
+ .instance_init = gicv2m_init,
+ .class_init = gicv2m_class_init,
+};
+
+static void gicv2m_register_types(void)
+{
+ type_register_static(&gicv2m_info);
+}
+
+type_init(gicv2m_register_types)
diff --git a/hw/intc/exynos4210_gic.c b/hw/intc/exynos4210_gic.c
index 0590d5dfb8..b2a4950bc3 100644
--- a/hw/intc/exynos4210_gic.c
+++ b/hw/intc/exynos4210_gic.c
@@ -213,9 +213,6 @@ void exynos4210_init_board_irqs(Exynos4210Irq *s)
uint32_t grp, bit, irq_id, n;
for (n = 0; n < EXYNOS4210_MAX_EXT_COMBINER_IN_IRQ; n++) {
- s->board_irqs[n] = qemu_irq_split(s->int_combiner_irq[n],
- s->ext_combiner_irq[n]);
-
irq_id = 0;
if (n == EXYNOS4210_COMBINER_GET_IRQ_NUM(1, 4) ||
n == EXYNOS4210_COMBINER_GET_IRQ_NUM(12, 4)) {
@@ -230,8 +227,10 @@ void exynos4210_init_board_irqs(Exynos4210Irq *s)
if (irq_id) {
s->board_irqs[n] = qemu_irq_split(s->int_combiner_irq[n],
s->ext_gic_irq[irq_id-32]);
+ } else {
+ s->board_irqs[n] = qemu_irq_split(s->int_combiner_irq[n],
+ s->ext_combiner_irq[n]);
}
-
}
for (; n < EXYNOS4210_MAX_INT_COMBINER_IN_IRQ; n++) {
/* these IDs are passed to Internal Combiner and External GIC */
diff --git a/hw/isa/i82378.c b/hw/isa/i82378.c
index 9da9dfc4da..fcf97d86ac 100644
--- a/hw/isa/i82378.c
+++ b/hw/isa/i82378.c
@@ -65,7 +65,6 @@ static void i82378_realize(PCIDevice *pci, Error **errp)
uint8_t *pci_conf;
ISABus *isabus;
ISADevice *isa;
- qemu_irq *out0_irq;
pci_conf = pci->config;
pci_set_word(pci_conf + PCI_COMMAND,
@@ -88,11 +87,9 @@ static void i82378_realize(PCIDevice *pci, Error **errp)
All devices accept byte access only, except timer
*/
- /* Workaround the fact that i8259 is not qdev'ified... */
- out0_irq = qemu_allocate_irqs(i82378_request_out0_irq, s, 1);
-
/* 2 82C59 (irq) */
- s->i8259 = i8259_init(isabus, *out0_irq);
+ s->i8259 = i8259_init(isabus,
+ qemu_allocate_irq(i82378_request_out0_irq, s, 0));
isa_bus_irqs(isabus, s->i8259);
/* 1 82C54 (pit) */
diff --git a/hw/isa/isa-bus.c b/hw/isa/isa-bus.c
index 825aa627df..43e0cd8ddd 100644
--- a/hw/isa/isa-bus.c
+++ b/hw/isa/isa-bus.c
@@ -21,6 +21,7 @@
#include "hw/sysbus.h"
#include "sysemu/sysemu.h"
#include "hw/isa/isa.h"
+#include "hw/i386/pc.h"
static ISABus *isabus;
@@ -178,6 +179,9 @@ ISADevice *isa_vga_init(ISABus *bus)
case VGA_VMWARE:
fprintf(stderr, "%s: vmware_vga: no PCI bus\n", __func__);
return NULL;
+ case VGA_VIRTIO:
+ fprintf(stderr, "%s: virtio-vga: no PCI bus\n", __func__);
+ return NULL;
case VGA_NONE:
default:
return NULL;
@@ -267,3 +271,28 @@ MemoryRegion *isa_address_space_io(ISADevice *dev)
}
type_init(isabus_register_types)
+
+static void parallel_init(ISABus *bus, int index, CharDriverState *chr)
+{
+ DeviceState *dev;
+ ISADevice *isadev;
+
+ isadev = isa_create(bus, "isa-parallel");
+ dev = DEVICE(isadev);
+ qdev_prop_set_uint32(dev, "index", index);
+ qdev_prop_set_chr(dev, "chardev", chr);
+ qdev_init_nofail(dev);
+}
+
+void parallel_hds_isa_init(ISABus *bus, int n)
+{
+ int i;
+
+ assert(n <= MAX_PARALLEL_PORTS);
+
+ for (i = 0; i < n; i++) {
+ if (parallel_hds[i]) {
+ parallel_init(bus, i, parallel_hds[i]);
+ }
+ }
+}
diff --git a/hw/isa/lpc_ich9.c b/hw/isa/lpc_ich9.c
index bc9afc60ab..b3e0b1fd52 100644
--- a/hw/isa/lpc_ich9.c
+++ b/hw/isa/lpc_ich9.c
@@ -360,11 +360,8 @@ static void ich9_set_sci(void *opaque, int irq_num, int level)
void ich9_lpc_pm_init(PCIDevice *lpc_pci)
{
ICH9LPCState *lpc = ICH9_LPC_DEVICE(lpc_pci);
- qemu_irq *sci_irq;
-
- sci_irq = qemu_allocate_irqs(ich9_set_sci, lpc, 1);
- ich9_pm_init(lpc_pci, &lpc->pm, sci_irq[0]);
+ ich9_pm_init(lpc_pci, &lpc->pm, qemu_allocate_irq(ich9_set_sci, lpc, 0));
ich9_lpc_reset(&lpc->d.qdev);
}
@@ -410,12 +407,28 @@ static void ich9_lpc_rcba_update(ICH9LPCState *lpc, uint32_t rbca_old)
}
}
+/* config:GEN_PMCON* */
+static void
+ich9_lpc_pmcon_update(ICH9LPCState *lpc)
+{
+ uint16_t gen_pmcon_1 = pci_get_word(lpc->d.config + ICH9_LPC_GEN_PMCON_1);
+ uint16_t wmask;
+
+ if (gen_pmcon_1 & ICH9_LPC_GEN_PMCON_1_SMI_LOCK) {
+ wmask = pci_get_word(lpc->d.wmask + ICH9_LPC_GEN_PMCON_1);
+ wmask &= ~ICH9_LPC_GEN_PMCON_1_SMI_LOCK;
+ pci_set_word(lpc->d.wmask + ICH9_LPC_GEN_PMCON_1, wmask);
+ lpc->pm.smi_en_wmask &= ~1;
+ }
+}
+
static int ich9_lpc_post_load(void *opaque, int version_id)
{
ICH9LPCState *lpc = opaque;
ich9_lpc_pmbase_update(lpc);
ich9_lpc_rcba_update(lpc, 0 /* disabled ICH9_LPC_RBCA_EN */);
+ ich9_lpc_pmcon_update(lpc);
return 0;
}
@@ -438,6 +451,9 @@ static void ich9_lpc_config_write(PCIDevice *d,
if (ranges_overlap(addr, len, ICH9_LPC_PIRQE_ROUT, 4)) {
pci_bus_fire_intx_routing_notifier(lpc->d.bus);
}
+ if (ranges_overlap(addr, len, ICH9_LPC_GEN_PMCON_1, 8)) {
+ ich9_lpc_pmcon_update(lpc);
+ }
}
static void ich9_lpc_reset(DeviceState *qdev)
@@ -634,6 +650,7 @@ static const VMStateDescription vmstate_ich9_rst_cnt = {
.name = "ICH9LPC/rst_cnt",
.version_id = 1,
.minimum_version_id = 1,
+ .needed = ich9_rst_cnt_needed,
.fields = (VMStateField[]) {
VMSTATE_UINT8(rst_cnt, ICH9LPCState),
VMSTATE_END_OF_LIST()
@@ -653,12 +670,9 @@ static const VMStateDescription vmstate_ich9_lpc = {
VMSTATE_UINT32(sci_level, ICH9LPCState),
VMSTATE_END_OF_LIST()
},
- .subsections = (VMStateSubsection[]) {
- {
- .vmsd = &vmstate_ich9_rst_cnt,
- .needed = ich9_rst_cnt_needed
- },
- { 0 }
+ .subsections = (const VMStateDescription*[]) {
+ &vmstate_ich9_rst_cnt,
+ NULL
}
};
diff --git a/hw/lm32/lm32_boards.c b/hw/lm32/lm32_boards.c
index 14d0efcdd9..70f48d3b1d 100644
--- a/hw/lm32/lm32_boards.c
+++ b/hw/lm32/lm32_boards.c
@@ -78,7 +78,7 @@ static void lm32_evr_init(MachineState *machine)
DriveInfo *dinfo;
MemoryRegion *address_space_mem = get_system_memory();
MemoryRegion *phys_ram = g_new(MemoryRegion, 1);
- qemu_irq *cpu_irq, irq[32];
+ qemu_irq irq[32];
ResetInfo *reset_info;
int i;
@@ -123,8 +123,7 @@ static void lm32_evr_init(MachineState *machine)
1, 2, 0x01, 0x7e, 0x43, 0x00, 0x555, 0x2aa, 1);
/* create irq lines */
- cpu_irq = qemu_allocate_irqs(cpu_irq_handler, cpu, 1);
- env->pic_state = lm32_pic_init(*cpu_irq);
+ env->pic_state = lm32_pic_init(qemu_allocate_irq(cpu_irq_handler, cpu, 0));
for (i = 0; i < 32; i++) {
irq[i] = qdev_get_gpio_in(env->pic_state, i);
}
@@ -173,7 +172,7 @@ static void lm32_uclinux_init(MachineState *machine)
DriveInfo *dinfo;
MemoryRegion *address_space_mem = get_system_memory();
MemoryRegion *phys_ram = g_new(MemoryRegion, 1);
- qemu_irq *cpu_irq, irq[32];
+ qemu_irq irq[32];
HWSetup *hw;
ResetInfo *reset_info;
int i;
@@ -225,8 +224,7 @@ static void lm32_uclinux_init(MachineState *machine)
1, 2, 0x01, 0x7e, 0x43, 0x00, 0x555, 0x2aa, 1);
/* create irq lines */
- cpu_irq = qemu_allocate_irqs(cpu_irq_handler, env, 1);
- env->pic_state = lm32_pic_init(*cpu_irq);
+ env->pic_state = lm32_pic_init(qemu_allocate_irq(cpu_irq_handler, env, 0));
for (i = 0; i < 32; i++) {
irq[i] = qdev_get_gpio_in(env->pic_state, i);
}
diff --git a/hw/lm32/milkymist.c b/hw/lm32/milkymist.c
index e0cec7dc41..e755f5b24f 100644
--- a/hw/lm32/milkymist.c
+++ b/hw/lm32/milkymist.c
@@ -86,7 +86,7 @@ milkymist_init(MachineState *machine)
DriveInfo *dinfo;
MemoryRegion *address_space_mem = get_system_memory();
MemoryRegion *phys_sdram = g_new(MemoryRegion, 1);
- qemu_irq irq[32], *cpu_irq;
+ qemu_irq irq[32];
int i;
char *bios_filename;
ResetInfo *reset_info;
@@ -130,8 +130,7 @@ milkymist_init(MachineState *machine)
2, 0x00, 0x89, 0x00, 0x1d, 1);
/* create irq lines */
- cpu_irq = qemu_allocate_irqs(cpu_irq_handler, cpu, 1);
- env->pic_state = lm32_pic_init(*cpu_irq);
+ env->pic_state = lm32_pic_init(qemu_allocate_irq(cpu_irq_handler, cpu, 0));
for (i = 0; i < 32; i++) {
irq[i] = qdev_get_gpio_in(env->pic_state, i);
}
diff --git a/hw/mips/Makefile.objs b/hw/mips/Makefile.objs
index 0a652f8521..9633f3a57d 100644
--- a/hw/mips/Makefile.objs
+++ b/hw/mips/Makefile.objs
@@ -1,4 +1,5 @@
-obj-y += mips_r4k.o mips_jazz.o mips_malta.o mips_mipssim.o
+obj-y += mips_r4k.o mips_malta.o mips_mipssim.o
obj-y += addr.o cputimer.o mips_int.o
+obj-$(CONFIG_JAZZ) += mips_jazz.o
obj-$(CONFIG_FULONG) += mips_fulong2e.o
obj-y += gt64xxx_pci.o
diff --git a/hw/mips/mips_jazz.c b/hw/mips/mips_jazz.c
index 2c153e092f..9d60633efb 100644
--- a/hw/mips/mips_jazz.c
+++ b/hw/mips/mips_jazz.c
@@ -135,16 +135,16 @@ static void mips_jazz_init(MachineState *machine,
MIPSCPU *cpu;
CPUClass *cc;
CPUMIPSState *env;
- qemu_irq *rc4030, *i8259;
+ qemu_irq *i8259;
rc4030_dma *dmas;
- void* rc4030_opaque;
+ MemoryRegion *rc4030_dma_mr;
MemoryRegion *isa_mem = g_new(MemoryRegion, 1);
MemoryRegion *isa_io = g_new(MemoryRegion, 1);
MemoryRegion *rtc = g_new(MemoryRegion, 1);
MemoryRegion *i8042 = g_new(MemoryRegion, 1);
MemoryRegion *dma_dummy = g_new(MemoryRegion, 1);
NICInfo *nd;
- DeviceState *dev;
+ DeviceState *dev, *rc4030;
SysBusDevice *sysbus;
ISABus *isa_bus;
ISADevice *pit;
@@ -157,12 +157,7 @@ static void mips_jazz_init(MachineState *machine,
/* init CPUs */
if (cpu_model == NULL) {
-#ifdef TARGET_MIPS64
cpu_model = "R4000";
-#else
- /* FIXME: All wrong, this maybe should be R3000 for the older JAZZs. */
- cpu_model = "24Kf";
-#endif
}
cpu = cpu_mips_init(cpu_model);
if (cpu == NULL) {
@@ -218,8 +213,14 @@ static void mips_jazz_init(MachineState *machine,
cpu_mips_clock_init(env);
/* Chipset */
- rc4030_opaque = rc4030_init(env->irq[6], env->irq[3], &rc4030, &dmas,
- address_space);
+ rc4030 = rc4030_init(&dmas, &rc4030_dma_mr);
+ sysbus = SYS_BUS_DEVICE(rc4030);
+ sysbus_connect_irq(sysbus, 0, env->irq[6]);
+ sysbus_connect_irq(sysbus, 1, env->irq[3]);
+ memory_region_add_subregion(address_space, 0x80000000,
+ sysbus_mmio_get_region(sysbus, 0));
+ memory_region_add_subregion(address_space, 0xf0000000,
+ sysbus_mmio_get_region(sysbus, 1));
memory_region_init_io(dma_dummy, NULL, &dma_dummy_ops, NULL, "dummy_dma", 0x1000);
memory_region_add_subregion(address_space, 0x8000d000, dma_dummy);
@@ -246,7 +247,7 @@ static void mips_jazz_init(MachineState *machine,
sysbus = SYS_BUS_DEVICE(dev);
sysbus_mmio_map(sysbus, 0, 0x60080000);
sysbus_mmio_map(sysbus, 1, 0x40000000);
- sysbus_connect_irq(sysbus, 0, rc4030[3]);
+ sysbus_connect_irq(sysbus, 0, qdev_get_gpio_in(rc4030, 3));
{
/* Simple ROM, so user doesn't have to provide one */
MemoryRegion *rom_mr = g_new(MemoryRegion, 1);
@@ -272,8 +273,17 @@ static void mips_jazz_init(MachineState *machine,
if (!nd->model)
nd->model = g_strdup("dp83932");
if (strcmp(nd->model, "dp83932") == 0) {
- dp83932_init(nd, 0x80001000, 2, get_system_memory(), rc4030[4],
- rc4030_opaque, rc4030_dma_memory_rw);
+ qemu_check_nic_model(nd, "dp83932");
+
+ dev = qdev_create(NULL, "dp8393x");
+ qdev_set_nic_properties(dev, nd);
+ qdev_prop_set_uint8(dev, "it_shift", 2);
+ qdev_prop_set_ptr(dev, "dma_mr", rc4030_dma_mr);
+ qdev_init_nofail(dev);
+ sysbus = SYS_BUS_DEVICE(dev);
+ sysbus_mmio_map(sysbus, 0, 0x80001000);
+ sysbus_mmio_map(sysbus, 1, 0x8000b000);
+ sysbus_connect_irq(sysbus, 0, qdev_get_gpio_in(rc4030, 4));
break;
} else if (is_help_option(nd->model)) {
fprintf(stderr, "qemu: Supported NICs: dp83932\n");
@@ -287,7 +297,7 @@ static void mips_jazz_init(MachineState *machine,
/* SCSI adapter */
esp_init(0x80002000, 0,
rc4030_dma_read, rc4030_dma_write, dmas[0],
- rc4030[5], &esp_reset, &dma_enable);
+ qdev_get_gpio_in(rc4030, 5), &esp_reset, &dma_enable);
/* Floppy */
if (drive_get_max_bus(IF_FLOPPY) >= MAX_FD) {
@@ -297,7 +307,7 @@ static void mips_jazz_init(MachineState *machine,
for (n = 0; n < MAX_FD; n++) {
fds[n] = drive_get(IF_FLOPPY, 0, n);
}
- fdctrl_init_sysbus(rc4030[1], 0, 0x80003000, fds);
+ fdctrl_init_sysbus(qdev_get_gpio_in(rc4030, 1), 0, 0x80003000, fds);
/* Real time clock */
rtc_init(isa_bus, 1980, NULL);
@@ -305,23 +315,26 @@ static void mips_jazz_init(MachineState *machine,
memory_region_add_subregion(address_space, 0x80004000, rtc);
/* Keyboard (i8042) */
- i8042_mm_init(rc4030[6], rc4030[7], i8042, 0x1000, 0x1);
+ i8042_mm_init(qdev_get_gpio_in(rc4030, 6), qdev_get_gpio_in(rc4030, 7),
+ i8042, 0x1000, 0x1);
memory_region_add_subregion(address_space, 0x80005000, i8042);
/* Serial ports */
if (serial_hds[0]) {
- serial_mm_init(address_space, 0x80006000, 0, rc4030[8], 8000000/16,
+ serial_mm_init(address_space, 0x80006000, 0,
+ qdev_get_gpio_in(rc4030, 8), 8000000/16,
serial_hds[0], DEVICE_NATIVE_ENDIAN);
}
if (serial_hds[1]) {
- serial_mm_init(address_space, 0x80007000, 0, rc4030[9], 8000000/16,
+ serial_mm_init(address_space, 0x80007000, 0,
+ qdev_get_gpio_in(rc4030, 9), 8000000/16,
serial_hds[1], DEVICE_NATIVE_ENDIAN);
}
/* Parallel port */
if (parallel_hds[0])
- parallel_mm_init(address_space, 0x80008000, 0, rc4030[0],
- parallel_hds[0]);
+ parallel_mm_init(address_space, 0x80008000, 0,
+ qdev_get_gpio_in(rc4030, 0), parallel_hds[0]);
/* FIXME: missing Jazz sound at 0x8000c000, rc4030[2] */
diff --git a/hw/mips/mips_malta.c b/hw/mips/mips_malta.c
index 5140882c00..786a8f0638 100644
--- a/hw/mips/mips_malta.c
+++ b/hw/mips/mips_malta.c
@@ -97,7 +97,7 @@ typedef struct {
static ISADevice *pit;
static struct _loaderparams {
- int ram_size;
+ int ram_size, ram_low_size;
const char *kernel_filename;
const char *kernel_cmdline;
const char *initrd_filename;
@@ -641,8 +641,8 @@ static void write_bootloader (CPUMIPSState *env, uint8_t *base,
stl_p(p++, 0x34a50000 | (ENVP_ADDR & 0xffff)); /* ori a1, a1, low(ENVP_ADDR) */
stl_p(p++, 0x3c060000 | (((ENVP_ADDR + 8) >> 16) & 0xffff)); /* lui a2, high(ENVP_ADDR + 8) */
stl_p(p++, 0x34c60000 | ((ENVP_ADDR + 8) & 0xffff)); /* ori a2, a2, low(ENVP_ADDR + 8) */
- stl_p(p++, 0x3c070000 | (loaderparams.ram_size >> 16)); /* lui a3, high(ram_size) */
- stl_p(p++, 0x34e70000 | (loaderparams.ram_size & 0xffff)); /* ori a3, a3, low(ram_size) */
+ stl_p(p++, 0x3c070000 | (loaderparams.ram_low_size >> 16)); /* lui a3, high(ram_low_size) */
+ stl_p(p++, 0x34e70000 | (loaderparams.ram_low_size & 0xffff)); /* ori a3, a3, low(ram_low_size) */
/* Load BAR registers as done by YAMON */
stl_p(p++, 0x3c09b400); /* lui t1, 0xb400 */
@@ -851,8 +851,10 @@ static int64_t load_kernel (void)
}
prom_set(prom_buf, prom_index++, "memsize");
- prom_set(prom_buf, prom_index++, "%i",
- MIN(loaderparams.ram_size, 256 << 20));
+ prom_set(prom_buf, prom_index++, "%u", loaderparams.ram_low_size);
+
+ prom_set(prom_buf, prom_index++, "ememsize");
+ prom_set(prom_buf, prom_index++, "%u", loaderparams.ram_size);
prom_set(prom_buf, prom_index++, "modetty0");
prom_set(prom_buf, prom_index++, "38400n8r");
@@ -1054,7 +1056,8 @@ void mips_malta_init(MachineState *machine)
}
/* Write a small bootloader to the flash location. */
- loaderparams.ram_size = ram_low_size;
+ loaderparams.ram_size = ram_size;
+ loaderparams.ram_low_size = ram_low_size;
loaderparams.kernel_filename = kernel_filename;
loaderparams.kernel_cmdline = kernel_cmdline;
loaderparams.initrd_filename = initrd_filename;
diff --git a/hw/misc/macio/macio.c b/hw/misc/macio/macio.c
index 063ad80412..e9037b0c39 100644
--- a/hw/misc/macio/macio.c
+++ b/hw/misc/macio/macio.c
@@ -126,17 +126,18 @@ static void macio_bar_setup(MacIOState *macio_state)
}
}
-static int macio_common_initfn(PCIDevice *d)
+static void macio_common_realize(PCIDevice *d, Error **errp)
{
MacIOState *s = MACIO(d);
SysBusDevice *sysbus_dev;
- int ret;
+ Error *err = NULL;
d->config[0x3d] = 0x01; // interrupt on pin 1
- ret = qdev_init(DEVICE(&s->cuda));
- if (ret < 0) {
- return ret;
+ object_property_set_bool(OBJECT(&s->cuda), true, "realized", &err);
+ if (err) {
+ error_propagate(errp, err);
+ return;
}
sysbus_dev = SYS_BUS_DEVICE(&s->cuda);
memory_region_add_subregion(&s->bar, 0x16000,
@@ -144,12 +145,11 @@ static int macio_common_initfn(PCIDevice *d)
macio_bar_setup(s);
pci_register_bar(d, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->bar);
-
- return 0;
}
-static int macio_initfn_ide(MacIOState *s, MACIOIDEState *ide, qemu_irq irq0,
- qemu_irq irq1, int dmaid)
+static void macio_realize_ide(MacIOState *s, MACIOIDEState *ide,
+ qemu_irq irq0, qemu_irq irq1, int dmaid,
+ Error **errp)
{
SysBusDevice *sysbus_dev;
@@ -157,27 +157,31 @@ static int macio_initfn_ide(MacIOState *s, MACIOIDEState *ide, qemu_irq irq0,
sysbus_connect_irq(sysbus_dev, 0, irq0);
sysbus_connect_irq(sysbus_dev, 1, irq1);
macio_ide_register_dma(ide, s->dbdma, dmaid);
- return qdev_init(DEVICE(ide));
+ object_property_set_bool(OBJECT(ide), true, "realized", errp);
}
-static int macio_oldworld_initfn(PCIDevice *d)
+static void macio_oldworld_realize(PCIDevice *d, Error **errp)
{
MacIOState *s = MACIO(d);
OldWorldMacIOState *os = OLDWORLD_MACIO(d);
+ Error *err = NULL;
SysBusDevice *sysbus_dev;
int i;
int cur_irq = 0;
- int ret = macio_common_initfn(d);
- if (ret < 0) {
- return ret;
+
+ macio_common_realize(d, &err);
+ if (err) {
+ error_propagate(errp, err);
+ return;
}
sysbus_dev = SYS_BUS_DEVICE(&s->cuda);
sysbus_connect_irq(sysbus_dev, 0, os->irqs[cur_irq++]);
- ret = qdev_init(DEVICE(&os->nvram));
- if (ret < 0) {
- return ret;
+ object_property_set_bool(OBJECT(&os->nvram), true, "realized", &err);
+ if (err) {
+ error_propagate(errp, err);
+ return;
}
sysbus_dev = SYS_BUS_DEVICE(&os->nvram);
memory_region_add_subregion(&s->bar, 0x60000,
@@ -194,13 +198,12 @@ static int macio_oldworld_initfn(PCIDevice *d)
qemu_irq irq0 = os->irqs[cur_irq++];
qemu_irq irq1 = os->irqs[cur_irq++];
- ret = macio_initfn_ide(s, &os->ide[i], irq0, irq1, 0x16 + (i * 4));
- if (ret < 0) {
- return ret;
+ macio_realize_ide(s, &os->ide[i], irq0, irq1, 0x16 + (i * 4), &err);
+ if (err) {
+ error_propagate(errp, err);
+ return;
}
}
-
- return 0;
}
static void macio_init_ide(MacIOState *s, MACIOIDEState *ide, size_t ide_size,
@@ -268,17 +271,20 @@ static const MemoryRegionOps timer_ops = {
.endianness = DEVICE_LITTLE_ENDIAN,
};
-static int macio_newworld_initfn(PCIDevice *d)
+static void macio_newworld_realize(PCIDevice *d, Error **errp)
{
MacIOState *s = MACIO(d);
NewWorldMacIOState *ns = NEWWORLD_MACIO(d);
+ Error *err = NULL;
SysBusDevice *sysbus_dev;
MemoryRegion *timer_memory = NULL;
int i;
int cur_irq = 0;
- int ret = macio_common_initfn(d);
- if (ret < 0) {
- return ret;
+
+ macio_common_realize(d, &err);
+ if (err) {
+ error_propagate(errp, err);
+ return;
}
sysbus_dev = SYS_BUS_DEVICE(&s->cuda);
@@ -294,9 +300,10 @@ static int macio_newworld_initfn(PCIDevice *d)
qemu_irq irq0 = ns->irqs[cur_irq++];
qemu_irq irq1 = ns->irqs[cur_irq++];
- ret = macio_initfn_ide(s, &ns->ide[i], irq0, irq1, 0x16 + (i * 4));
- if (ret < 0) {
- return ret;
+ macio_realize_ide(s, &ns->ide[i], irq0, irq1, 0x16 + (i * 4), &err);
+ if (err) {
+ error_propagate(errp, err);
+ return;
}
}
@@ -305,8 +312,6 @@ static int macio_newworld_initfn(PCIDevice *d)
memory_region_init_io(timer_memory, OBJECT(s), &timer_ops, NULL, "timer",
0x1000);
memory_region_add_subregion(&s->bar, 0x15000, timer_memory);
-
- return 0;
}
static void macio_newworld_init(Object *obj)
@@ -352,7 +357,7 @@ static void macio_oldworld_class_init(ObjectClass *oc, void *data)
PCIDeviceClass *pdc = PCI_DEVICE_CLASS(oc);
DeviceClass *dc = DEVICE_CLASS(oc);
- pdc->init = macio_oldworld_initfn;
+ pdc->realize = macio_oldworld_realize;
pdc->device_id = PCI_DEVICE_ID_APPLE_343S1201;
dc->vmsd = &vmstate_macio_oldworld;
}
@@ -372,7 +377,7 @@ static void macio_newworld_class_init(ObjectClass *oc, void *data)
PCIDeviceClass *pdc = PCI_DEVICE_CLASS(oc);
DeviceClass *dc = DEVICE_CLASS(oc);
- pdc->init = macio_newworld_initfn;
+ pdc->realize = macio_newworld_realize;
pdc->device_id = PCI_DEVICE_ID_APPLE_UNI_N_KEYL;
dc->vmsd = &vmstate_macio_newworld;
}
diff --git a/hw/net/Makefile.objs b/hw/net/Makefile.objs
index 7b91c4e51d..98801739ef 100644
--- a/hw/net/Makefile.objs
+++ b/hw/net/Makefile.objs
@@ -39,3 +39,4 @@ obj-$(CONFIG_ETSEC) += fsl_etsec/etsec.o fsl_etsec/registers.o \
common-obj-$(CONFIG_ROCKER) += rocker/rocker.o rocker/rocker_fp.o \
rocker/rocker_desc.o rocker/rocker_world.o \
rocker/rocker_of_dpa.o
+obj-$(call lnot,$(CONFIG_ROCKER)) += rocker/qmp-norocker.o
diff --git a/hw/net/cadence_gem.c b/hw/net/cadence_gem.c
index dafe91421b..494a346cf6 100644
--- a/hw/net/cadence_gem.c
+++ b/hw/net/cadence_gem.c
@@ -155,7 +155,7 @@
#define GEM_NWCFG_BCAST_REJ 0x00000020 /* Reject broadcast packets */
#define GEM_NWCFG_PROMISC 0x00000010 /* Accept all packets */
-#define GEM_DMACFG_RBUFSZ_M 0x007F0000 /* DMA RX Buffer Size mask */
+#define GEM_DMACFG_RBUFSZ_M 0x00FF0000 /* DMA RX Buffer Size mask */
#define GEM_DMACFG_RBUFSZ_S 16 /* DMA RX Buffer Size shift */
#define GEM_DMACFG_RBUFSZ_MUL 64 /* DMA RX Buffer Size multiplier */
#define GEM_DMACFG_TXCSUM_OFFL 0x00000800 /* Transmit checksum offload */
diff --git a/hw/net/dp8393x.c b/hw/net/dp8393x.c
index 7ce13d2b46..cd889bce86 100644
--- a/hw/net/dp8393x.c
+++ b/hw/net/dp8393x.c
@@ -17,20 +17,15 @@
* with this program; if not, see <http://www.gnu.org/licenses/>.
*/
-#include "hw/hw.h"
-#include "qemu/timer.h"
+#include "hw/sysbus.h"
+#include "hw/devices.h"
#include "net/net.h"
-#include "hw/mips/mips.h"
+#include "qemu/timer.h"
+#include <zlib.h>
//#define DEBUG_SONIC
-/* Calculate CRCs properly on Rx packets */
-#define SONIC_CALCULATE_RXCRC
-
-#if defined(SONIC_CALCULATE_RXCRC)
-/* For crc32 */
-#include <zlib.h>
-#endif
+#define SONIC_PROM_SIZE 0x1000
#ifdef DEBUG_SONIC
#define DPRINTF(fmt, ...) \
@@ -145,9 +140,14 @@ do { printf("sonic ERROR: %s: " fmt, __func__ , ## __VA_ARGS__); } while (0)
#define SONIC_ISR_PINT 0x0800
#define SONIC_ISR_LCD 0x1000
+#define TYPE_DP8393X "dp8393x"
+#define DP8393X(obj) OBJECT_CHECK(dp8393xState, (obj), TYPE_DP8393X)
+
typedef struct dp8393xState {
+ SysBusDevice parent_obj;
+
/* Hardware */
- int it_shift;
+ uint8_t it_shift;
qemu_irq irq;
#ifdef DEBUG_SONIC
int irq_level;
@@ -156,8 +156,8 @@ typedef struct dp8393xState {
int64_t wt_last_update;
NICConf conf;
NICState *nic;
- MemoryRegion *address_space;
MemoryRegion mmio;
+ MemoryRegion prom;
/* Registers */
uint8_t cam[16][6];
@@ -168,8 +168,8 @@ typedef struct dp8393xState {
int loopback_packet;
/* Memory access */
- void (*memory_rw)(void *opaque, hwaddr addr, uint8_t *buf, int len, int is_write);
- void* mem_opaque;
+ void *dma_mr;
+ AddressSpace as;
} dp8393xState;
static void dp8393x_update_irq(dp8393xState *s)
@@ -190,7 +190,7 @@ static void dp8393x_update_irq(dp8393xState *s)
qemu_set_irq(s->irq, level);
}
-static void do_load_cam(dp8393xState *s)
+static void dp8393x_do_load_cam(dp8393xState *s)
{
uint16_t data[8];
int width, size;
@@ -201,9 +201,9 @@ static void do_load_cam(dp8393xState *s)
while (s->regs[SONIC_CDC] & 0x1f) {
/* Fill current entry */
- s->memory_rw(s->mem_opaque,
+ address_space_rw(&s->as,
(s->regs[SONIC_URRA] << 16) | s->regs[SONIC_CDP],
- (uint8_t *)data, size, 0);
+ MEMTXATTRS_UNSPECIFIED, (uint8_t *)data, size, 0);
s->cam[index][0] = data[1 * width] & 0xff;
s->cam[index][1] = data[1 * width] >> 8;
s->cam[index][2] = data[2 * width] & 0xff;
@@ -220,9 +220,9 @@ static void do_load_cam(dp8393xState *s)
}
/* Read CAM enable */
- s->memory_rw(s->mem_opaque,
+ address_space_rw(&s->as,
(s->regs[SONIC_URRA] << 16) | s->regs[SONIC_CDP],
- (uint8_t *)data, size, 0);
+ MEMTXATTRS_UNSPECIFIED, (uint8_t *)data, size, 0);
s->regs[SONIC_CE] = data[0 * width];
DPRINTF("load cam done. cam enable mask 0x%04x\n", s->regs[SONIC_CE]);
@@ -232,7 +232,7 @@ static void do_load_cam(dp8393xState *s)
dp8393x_update_irq(s);
}
-static void do_read_rra(dp8393xState *s)
+static void dp8393x_do_read_rra(dp8393xState *s)
{
uint16_t data[8];
int width, size;
@@ -240,9 +240,9 @@ static void do_read_rra(dp8393xState *s)
/* Read memory */
width = (s->regs[SONIC_DCR] & SONIC_DCR_DW) ? 2 : 1;
size = sizeof(uint16_t) * 4 * width;
- s->memory_rw(s->mem_opaque,
+ address_space_rw(&s->as,
(s->regs[SONIC_URRA] << 16) | s->regs[SONIC_RRP],
- (uint8_t *)data, size, 0);
+ MEMTXATTRS_UNSPECIFIED, (uint8_t *)data, size, 0);
/* Update SONIC registers */
s->regs[SONIC_CRBA0] = data[0 * width];
@@ -272,7 +272,7 @@ static void do_read_rra(dp8393xState *s)
s->regs[SONIC_CR] &= ~SONIC_CR_RRRA;
}
-static void do_software_reset(dp8393xState *s)
+static void dp8393x_do_software_reset(dp8393xState *s)
{
timer_del(s->watchdog);
@@ -280,7 +280,7 @@ static void do_software_reset(dp8393xState *s)
s->regs[SONIC_CR] |= SONIC_CR_RST | SONIC_CR_RXDIS;
}
-static void set_next_tick(dp8393xState *s)
+static void dp8393x_set_next_tick(dp8393xState *s)
{
uint32_t ticks;
int64_t delay;
@@ -296,7 +296,7 @@ static void set_next_tick(dp8393xState *s)
timer_mod(s->watchdog, s->wt_last_update + delay);
}
-static void update_wt_regs(dp8393xState *s)
+static void dp8393x_update_wt_regs(dp8393xState *s)
{
int64_t elapsed;
uint32_t val;
@@ -311,33 +311,33 @@ static void update_wt_regs(dp8393xState *s)
val -= elapsed / 5000000;
s->regs[SONIC_WT1] = (val >> 16) & 0xffff;
s->regs[SONIC_WT0] = (val >> 0) & 0xffff;
- set_next_tick(s);
+ dp8393x_set_next_tick(s);
}
-static void do_start_timer(dp8393xState *s)
+static void dp8393x_do_start_timer(dp8393xState *s)
{
s->regs[SONIC_CR] &= ~SONIC_CR_STP;
- set_next_tick(s);
+ dp8393x_set_next_tick(s);
}
-static void do_stop_timer(dp8393xState *s)
+static void dp8393x_do_stop_timer(dp8393xState *s)
{
s->regs[SONIC_CR] &= ~SONIC_CR_ST;
- update_wt_regs(s);
+ dp8393x_update_wt_regs(s);
}
-static void do_receiver_enable(dp8393xState *s)
+static void dp8393x_do_receiver_enable(dp8393xState *s)
{
s->regs[SONIC_CR] &= ~SONIC_CR_RXDIS;
}
-static void do_receiver_disable(dp8393xState *s)
+static void dp8393x_do_receiver_disable(dp8393xState *s)
{
s->regs[SONIC_CR] &= ~SONIC_CR_RXEN;
}
-static void do_transmit_packets(dp8393xState *s)
+static void dp8393x_do_transmit_packets(dp8393xState *s)
{
NetClientState *nc = qemu_get_queue(s->nic);
uint16_t data[12];
@@ -353,9 +353,9 @@ static void do_transmit_packets(dp8393xState *s)
(s->regs[SONIC_UTDA] << 16) | s->regs[SONIC_CTDA]);
size = sizeof(uint16_t) * 6 * width;
s->regs[SONIC_TTDA] = s->regs[SONIC_CTDA];
- s->memory_rw(s->mem_opaque,
+ address_space_rw(&s->as,
((s->regs[SONIC_UTDA] << 16) | s->regs[SONIC_TTDA]) + sizeof(uint16_t) * width,
- (uint8_t *)data, size, 0);
+ MEMTXATTRS_UNSPECIFIED, (uint8_t *)data, size, 0);
tx_len = 0;
/* Update registers */
@@ -379,18 +379,18 @@ static void do_transmit_packets(dp8393xState *s)
if (tx_len + len > sizeof(s->tx_buffer)) {
len = sizeof(s->tx_buffer) - tx_len;
}
- s->memory_rw(s->mem_opaque,
+ address_space_rw(&s->as,
(s->regs[SONIC_TSA1] << 16) | s->regs[SONIC_TSA0],
- &s->tx_buffer[tx_len], len, 0);
+ MEMTXATTRS_UNSPECIFIED, &s->tx_buffer[tx_len], len, 0);
tx_len += len;
i++;
if (i != s->regs[SONIC_TFC]) {
/* Read next fragment details */
size = sizeof(uint16_t) * 3 * width;
- s->memory_rw(s->mem_opaque,
+ address_space_rw(&s->as,
((s->regs[SONIC_UTDA] << 16) | s->regs[SONIC_TTDA]) + sizeof(uint16_t) * (4 + 3 * i) * width,
- (uint8_t *)data, size, 0);
+ MEMTXATTRS_UNSPECIFIED, (uint8_t *)data, size, 0);
s->regs[SONIC_TSA0] = data[0 * width];
s->regs[SONIC_TSA1] = data[1 * width];
s->regs[SONIC_TFS] = data[2 * width];
@@ -422,16 +422,16 @@ static void do_transmit_packets(dp8393xState *s)
/* Write status */
data[0 * width] = s->regs[SONIC_TCR] & 0x0fff; /* status */
size = sizeof(uint16_t) * width;
- s->memory_rw(s->mem_opaque,
+ address_space_rw(&s->as,
(s->regs[SONIC_UTDA] << 16) | s->regs[SONIC_TTDA],
- (uint8_t *)data, size, 1);
+ MEMTXATTRS_UNSPECIFIED, (uint8_t *)data, size, 1);
if (!(s->regs[SONIC_CR] & SONIC_CR_HTX)) {
/* Read footer of packet */
size = sizeof(uint16_t) * width;
- s->memory_rw(s->mem_opaque,
+ address_space_rw(&s->as,
((s->regs[SONIC_UTDA] << 16) | s->regs[SONIC_TTDA]) + sizeof(uint16_t) * (4 + 3 * s->regs[SONIC_TFC]) * width,
- (uint8_t *)data, size, 0);
+ MEMTXATTRS_UNSPECIFIED, (uint8_t *)data, size, 0);
s->regs[SONIC_CTDA] = data[0 * width] & ~0x1;
if (data[0 * width] & 0x1) {
/* EOL detected */
@@ -446,12 +446,12 @@ static void do_transmit_packets(dp8393xState *s)
dp8393x_update_irq(s);
}
-static void do_halt_transmission(dp8393xState *s)
+static void dp8393x_do_halt_transmission(dp8393xState *s)
{
/* Nothing to do */
}
-static void do_command(dp8393xState *s, uint16_t command)
+static void dp8393x_do_command(dp8393xState *s, uint16_t command)
{
if ((s->regs[SONIC_CR] & SONIC_CR_RST) && !(command & SONIC_CR_RST)) {
s->regs[SONIC_CR] &= ~SONIC_CR_RST;
@@ -461,34 +461,36 @@ static void do_command(dp8393xState *s, uint16_t command)
s->regs[SONIC_CR] |= (command & SONIC_CR_MASK);
if (command & SONIC_CR_HTX)
- do_halt_transmission(s);
+ dp8393x_do_halt_transmission(s);
if (command & SONIC_CR_TXP)
- do_transmit_packets(s);
+ dp8393x_do_transmit_packets(s);
if (command & SONIC_CR_RXDIS)
- do_receiver_disable(s);
+ dp8393x_do_receiver_disable(s);
if (command & SONIC_CR_RXEN)
- do_receiver_enable(s);
+ dp8393x_do_receiver_enable(s);
if (command & SONIC_CR_STP)
- do_stop_timer(s);
+ dp8393x_do_stop_timer(s);
if (command & SONIC_CR_ST)
- do_start_timer(s);
+ dp8393x_do_start_timer(s);
if (command & SONIC_CR_RST)
- do_software_reset(s);
+ dp8393x_do_software_reset(s);
if (command & SONIC_CR_RRRA)
- do_read_rra(s);
+ dp8393x_do_read_rra(s);
if (command & SONIC_CR_LCAM)
- do_load_cam(s);
+ dp8393x_do_load_cam(s);
}
-static uint16_t read_register(dp8393xState *s, int reg)
+static uint64_t dp8393x_read(void *opaque, hwaddr addr, unsigned int size)
{
+ dp8393xState *s = opaque;
+ int reg = addr >> s->it_shift;
uint16_t val = 0;
switch (reg) {
/* Update data before reading it */
case SONIC_WT0:
case SONIC_WT1:
- update_wt_regs(s);
+ dp8393x_update_wt_regs(s);
val = s->regs[reg];
break;
/* Accept read to some registers only when in reset mode */
@@ -510,14 +512,18 @@ static uint16_t read_register(dp8393xState *s, int reg)
return val;
}
-static void write_register(dp8393xState *s, int reg, uint16_t val)
+static void dp8393x_write(void *opaque, hwaddr addr, uint64_t data,
+ unsigned int size)
{
- DPRINTF("write 0x%04x to reg %s\n", val, reg_names[reg]);
+ dp8393xState *s = opaque;
+ int reg = addr >> s->it_shift;
+
+ DPRINTF("write 0x%04x to reg %s\n", (uint16_t)data, reg_names[reg]);
switch (reg) {
/* Command register */
case SONIC_CR:
- do_command(s, val);
+ dp8393x_do_command(s, data);
break;
/* Prevent write to read-only registers */
case SONIC_CAP2:
@@ -530,37 +536,37 @@ static void write_register(dp8393xState *s, int reg, uint16_t val)
/* Accept write to some registers only when in reset mode */
case SONIC_DCR:
if (s->regs[SONIC_CR] & SONIC_CR_RST) {
- s->regs[reg] = val & 0xbfff;
+ s->regs[reg] = data & 0xbfff;
} else {
DPRINTF("writing to DCR invalid\n");
}
break;
case SONIC_DCR2:
if (s->regs[SONIC_CR] & SONIC_CR_RST) {
- s->regs[reg] = val & 0xf017;
+ s->regs[reg] = data & 0xf017;
} else {
DPRINTF("writing to DCR2 invalid\n");
}
break;
/* 12 lower bytes are Read Only */
case SONIC_TCR:
- s->regs[reg] = val & 0xf000;
+ s->regs[reg] = data & 0xf000;
break;
/* 9 lower bytes are Read Only */
case SONIC_RCR:
- s->regs[reg] = val & 0xffe0;
+ s->regs[reg] = data & 0xffe0;
break;
/* Ignore most significant bit */
case SONIC_IMR:
- s->regs[reg] = val & 0x7fff;
+ s->regs[reg] = data & 0x7fff;
dp8393x_update_irq(s);
break;
/* Clear bits by writing 1 to them */
case SONIC_ISR:
- val &= s->regs[reg];
- s->regs[reg] &= ~val;
- if (val & SONIC_ISR_RBE) {
- do_read_rra(s);
+ data &= s->regs[reg];
+ s->regs[reg] &= ~data;
+ if (data & SONIC_ISR_RBE) {
+ dp8393x_do_read_rra(s);
}
dp8393x_update_irq(s);
break;
@@ -569,24 +575,32 @@ static void write_register(dp8393xState *s, int reg, uint16_t val)
case SONIC_REA:
case SONIC_RRP:
case SONIC_RWP:
- s->regs[reg] = val & 0xfffe;
+ s->regs[reg] = data & 0xfffe;
break;
/* Invert written value for some registers */
case SONIC_CRCT:
case SONIC_FAET:
case SONIC_MPT:
- s->regs[reg] = val ^ 0xffff;
+ s->regs[reg] = data ^ 0xffff;
break;
/* All other registers have no special contrainst */
default:
- s->regs[reg] = val;
+ s->regs[reg] = data;
}
if (reg == SONIC_WT0 || reg == SONIC_WT1) {
- set_next_tick(s);
+ dp8393x_set_next_tick(s);
}
}
+static const MemoryRegionOps dp8393x_ops = {
+ .read = dp8393x_read,
+ .write = dp8393x_write,
+ .impl.min_access_size = 2,
+ .impl.max_access_size = 2,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
static void dp8393x_watchdog(void *opaque)
{
dp8393xState *s = opaque;
@@ -597,84 +611,14 @@ static void dp8393x_watchdog(void *opaque)
s->regs[SONIC_WT1] = 0xffff;
s->regs[SONIC_WT0] = 0xffff;
- set_next_tick(s);
+ dp8393x_set_next_tick(s);
/* Signal underflow */
s->regs[SONIC_ISR] |= SONIC_ISR_TC;
dp8393x_update_irq(s);
}
-static uint32_t dp8393x_readw(void *opaque, hwaddr addr)
-{
- dp8393xState *s = opaque;
- int reg;
-
- if ((addr & ((1 << s->it_shift) - 1)) != 0) {
- return 0;
- }
-
- reg = addr >> s->it_shift;
- return read_register(s, reg);
-}
-
-static uint32_t dp8393x_readb(void *opaque, hwaddr addr)
-{
- uint16_t v = dp8393x_readw(opaque, addr & ~0x1);
- return (v >> (8 * (addr & 0x1))) & 0xff;
-}
-
-static uint32_t dp8393x_readl(void *opaque, hwaddr addr)
-{
- uint32_t v;
- v = dp8393x_readw(opaque, addr);
- v |= dp8393x_readw(opaque, addr + 2) << 16;
- return v;
-}
-
-static void dp8393x_writew(void *opaque, hwaddr addr, uint32_t val)
-{
- dp8393xState *s = opaque;
- int reg;
-
- if ((addr & ((1 << s->it_shift) - 1)) != 0) {
- return;
- }
-
- reg = addr >> s->it_shift;
-
- write_register(s, reg, (uint16_t)val);
-}
-
-static void dp8393x_writeb(void *opaque, hwaddr addr, uint32_t val)
-{
- uint16_t old_val = dp8393x_readw(opaque, addr & ~0x1);
-
- switch (addr & 3) {
- case 0:
- val = val | (old_val & 0xff00);
- break;
- case 1:
- val = (val << 8) | (old_val & 0x00ff);
- break;
- }
- dp8393x_writew(opaque, addr & ~0x1, val);
-}
-
-static void dp8393x_writel(void *opaque, hwaddr addr, uint32_t val)
-{
- dp8393x_writew(opaque, addr, val & 0xffff);
- dp8393x_writew(opaque, addr + 2, (val >> 16) & 0xffff);
-}
-
-static const MemoryRegionOps dp8393x_ops = {
- .old_mmio = {
- .read = { dp8393x_readb, dp8393x_readw, dp8393x_readl, },
- .write = { dp8393x_writeb, dp8393x_writew, dp8393x_writel, },
- },
- .endianness = DEVICE_NATIVE_ENDIAN,
-};
-
-static int nic_can_receive(NetClientState *nc)
+static int dp8393x_can_receive(NetClientState *nc)
{
dp8393xState *s = qemu_get_nic_opaque(nc);
@@ -685,7 +629,8 @@ static int nic_can_receive(NetClientState *nc)
return 1;
}
-static int receive_filter(dp8393xState *s, const uint8_t * buf, int size)
+static int dp8393x_receive_filter(dp8393xState *s, const uint8_t * buf,
+ int size)
{
static const uint8_t bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
int i;
@@ -723,7 +668,8 @@ static int receive_filter(dp8393xState *s, const uint8_t * buf, int size)
return -1;
}
-static ssize_t nic_receive(NetClientState *nc, const uint8_t * buf, size_t size)
+static ssize_t dp8393x_receive(NetClientState *nc, const uint8_t * buf,
+ size_t size)
{
dp8393xState *s = qemu_get_nic_opaque(nc);
uint16_t data[10];
@@ -737,7 +683,7 @@ static ssize_t nic_receive(NetClientState *nc, const uint8_t * buf, size_t size)
s->regs[SONIC_RCR] &= ~(SONIC_RCR_PRX | SONIC_RCR_LBK | SONIC_RCR_FAER |
SONIC_RCR_CRCR | SONIC_RCR_LPKT | SONIC_RCR_BC | SONIC_RCR_MC);
- packet_type = receive_filter(s, buf, size);
+ packet_type = dp8393x_receive_filter(s, buf, size);
if (packet_type < 0) {
DPRINTF("packet not for netcard\n");
return -1;
@@ -750,7 +696,8 @@ static ssize_t nic_receive(NetClientState *nc, const uint8_t * buf, size_t size)
/* Are we still in resource exhaustion? */
size = sizeof(uint16_t) * 1 * width;
address = ((s->regs[SONIC_URDA] << 16) | s->regs[SONIC_CRDA]) + sizeof(uint16_t) * 5 * width;
- s->memory_rw(s->mem_opaque, address, (uint8_t*)data, size, 0);
+ address_space_rw(&s->as, address, MEMTXATTRS_UNSPECIFIED,
+ (uint8_t *)data, size, 0);
if (data[0 * width] & 0x1) {
/* Still EOL ; stop reception */
return -1;
@@ -764,18 +711,16 @@ static ssize_t nic_receive(NetClientState *nc, const uint8_t * buf, size_t size)
s->regs[SONIC_TRBA0] = s->regs[SONIC_CRBA0];
/* Calculate the ethernet checksum */
-#ifdef SONIC_CALCULATE_RXCRC
checksum = cpu_to_le32(crc32(0, buf, rx_len));
-#else
- checksum = 0;
-#endif
/* Put packet into RBA */
DPRINTF("Receive packet at %08x\n", (s->regs[SONIC_CRBA1] << 16) | s->regs[SONIC_CRBA0]);
address = (s->regs[SONIC_CRBA1] << 16) | s->regs[SONIC_CRBA0];
- s->memory_rw(s->mem_opaque, address, (uint8_t*)buf, rx_len, 1);
+ address_space_rw(&s->as, address,
+ MEMTXATTRS_UNSPECIFIED, (uint8_t *)buf, rx_len, 1);
address += rx_len;
- s->memory_rw(s->mem_opaque, address, (uint8_t*)&checksum, 4, 1);
+ address_space_rw(&s->as, address,
+ MEMTXATTRS_UNSPECIFIED, (uint8_t *)&checksum, 4, 1);
rx_len += 4;
s->regs[SONIC_CRBA1] = address >> 16;
s->regs[SONIC_CRBA0] = address & 0xffff;
@@ -803,29 +748,30 @@ static ssize_t nic_receive(NetClientState *nc, const uint8_t * buf, size_t size)
data[3 * width] = s->regs[SONIC_TRBA1]; /* pkt_ptr1 */
data[4 * width] = s->regs[SONIC_RSC]; /* seq_no */
size = sizeof(uint16_t) * 5 * width;
- s->memory_rw(s->mem_opaque, (s->regs[SONIC_URDA] << 16) | s->regs[SONIC_CRDA], (uint8_t *)data, size, 1);
+ address_space_rw(&s->as, (s->regs[SONIC_URDA] << 16) | s->regs[SONIC_CRDA],
+ MEMTXATTRS_UNSPECIFIED, (uint8_t *)data, size, 1);
/* Move to next descriptor */
size = sizeof(uint16_t) * width;
- s->memory_rw(s->mem_opaque,
+ address_space_rw(&s->as,
((s->regs[SONIC_URDA] << 16) | s->regs[SONIC_CRDA]) + sizeof(uint16_t) * 5 * width,
- (uint8_t *)data, size, 0);
+ MEMTXATTRS_UNSPECIFIED, (uint8_t *)data, size, 0);
s->regs[SONIC_LLFA] = data[0 * width];
if (s->regs[SONIC_LLFA] & 0x1) {
/* EOL detected */
s->regs[SONIC_ISR] |= SONIC_ISR_RDE;
} else {
data[0 * width] = 0; /* in_use */
- s->memory_rw(s->mem_opaque,
+ address_space_rw(&s->as,
((s->regs[SONIC_URDA] << 16) | s->regs[SONIC_CRDA]) + sizeof(uint16_t) * 6 * width,
- (uint8_t *)data, size, 1);
+ MEMTXATTRS_UNSPECIFIED, (uint8_t *)data, sizeof(uint16_t), 1);
s->regs[SONIC_CRDA] = s->regs[SONIC_LLFA];
s->regs[SONIC_ISR] |= SONIC_ISR_PKTRX;
s->regs[SONIC_RSC] = (s->regs[SONIC_RSC] & 0xff00) | (((s->regs[SONIC_RSC] & 0x00ff) + 1) & 0x00ff);
if (s->regs[SONIC_RCR] & SONIC_RCR_LPKT) {
/* Read next RRA */
- do_read_rra(s);
+ dp8393x_do_read_rra(s);
}
}
@@ -835,11 +781,12 @@ static ssize_t nic_receive(NetClientState *nc, const uint8_t * buf, size_t size)
return size;
}
-static void nic_reset(void *opaque)
+static void dp8393x_reset(DeviceState *dev)
{
- dp8393xState *s = opaque;
+ dp8393xState *s = DP8393X(dev);
timer_del(s->watchdog);
+ memset(s->regs, 0, sizeof(s->regs));
s->regs[SONIC_CR] = SONIC_CR_RST | SONIC_CR_STP | SONIC_CR_RXDIS;
s->regs[SONIC_DCR] &= ~(SONIC_DCR_EXBUS | SONIC_DCR_LBR);
s->regs[SONIC_RCR] &= ~(SONIC_RCR_LB0 | SONIC_RCR_LB1 | SONIC_RCR_BRD | SONIC_RCR_RNT);
@@ -862,39 +809,91 @@ static void nic_reset(void *opaque)
static NetClientInfo net_dp83932_info = {
.type = NET_CLIENT_OPTIONS_KIND_NIC,
.size = sizeof(NICState),
- .can_receive = nic_can_receive,
- .receive = nic_receive,
+ .can_receive = dp8393x_can_receive,
+ .receive = dp8393x_receive,
};
-void dp83932_init(NICInfo *nd, hwaddr base, int it_shift,
- MemoryRegion *address_space,
- qemu_irq irq, void* mem_opaque,
- void (*memory_rw)(void *opaque, hwaddr addr, uint8_t *buf, int len, int is_write))
+static void dp8393x_instance_init(Object *obj)
{
- dp8393xState *s;
+ SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
+ dp8393xState *s = DP8393X(obj);
+
+ sysbus_init_mmio(sbd, &s->mmio);
+ sysbus_init_mmio(sbd, &s->prom);
+ sysbus_init_irq(sbd, &s->irq);
+}
- qemu_check_nic_model(nd, "dp83932");
+static void dp8393x_realize(DeviceState *dev, Error **errp)
+{
+ dp8393xState *s = DP8393X(dev);
+ int i, checksum;
+ uint8_t *prom;
- s = g_malloc0(sizeof(dp8393xState));
+ address_space_init(&s->as, s->dma_mr, "dp8393x");
+ memory_region_init_io(&s->mmio, OBJECT(dev), &dp8393x_ops, s,
+ "dp8393x-regs", 0x40 << s->it_shift);
+
+ s->nic = qemu_new_nic(&net_dp83932_info, &s->conf,
+ object_get_typename(OBJECT(dev)), dev->id, s);
+ qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a);
- s->address_space = address_space;
- s->mem_opaque = mem_opaque;
- s->memory_rw = memory_rw;
- s->it_shift = it_shift;
- s->irq = irq;
s->watchdog = timer_new_ns(QEMU_CLOCK_VIRTUAL, dp8393x_watchdog, s);
s->regs[SONIC_SR] = 0x0004; /* only revision recognized by Linux */
- s->conf.macaddr = nd->macaddr;
- s->conf.peers.ncs[0] = nd->netdev;
+ memory_region_init_rom_device(&s->prom, OBJECT(dev), NULL, NULL,
+ "dp8393x-prom", SONIC_PROM_SIZE, NULL);
+ prom = memory_region_get_ram_ptr(&s->prom);
+ checksum = 0;
+ for (i = 0; i < 6; i++) {
+ prom[i] = s->conf.macaddr.a[i];
+ checksum += prom[i];
+ if (checksum > 0xff) {
+ checksum = (checksum + 1) & 0xff;
+ }
+ }
+ prom[7] = 0xff - checksum;
+}
- s->nic = qemu_new_nic(&net_dp83932_info, &s->conf, nd->model, nd->name, s);
+static const VMStateDescription vmstate_dp8393x = {
+ .name = "dp8393x",
+ .version_id = 0,
+ .minimum_version_id = 0,
+ .fields = (VMStateField []) {
+ VMSTATE_BUFFER_UNSAFE(cam, dp8393xState, 0, 16 * 6),
+ VMSTATE_UINT16_ARRAY(regs, dp8393xState, 0x40),
+ VMSTATE_END_OF_LIST()
+ }
+};
- qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a);
- qemu_register_reset(nic_reset, s);
- nic_reset(s);
+static Property dp8393x_properties[] = {
+ DEFINE_NIC_PROPERTIES(dp8393xState, conf),
+ DEFINE_PROP_PTR("dma_mr", dp8393xState, dma_mr),
+ DEFINE_PROP_UINT8("it_shift", dp8393xState, it_shift, 0),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void dp8393x_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
- memory_region_init_io(&s->mmio, NULL, &dp8393x_ops, s,
- "dp8393x", 0x40 << it_shift);
- memory_region_add_subregion(address_space, base, &s->mmio);
+ set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
+ dc->realize = dp8393x_realize;
+ dc->reset = dp8393x_reset;
+ dc->vmsd = &vmstate_dp8393x;
+ dc->props = dp8393x_properties;
}
+
+static const TypeInfo dp8393x_info = {
+ .name = TYPE_DP8393X,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(dp8393xState),
+ .instance_init = dp8393x_instance_init,
+ .class_init = dp8393x_class_init,
+};
+
+static void dp8393x_register_types(void)
+{
+ type_register_static(&dp8393x_info);
+}
+
+type_init(dp8393x_register_types)
diff --git a/hw/net/e1000.c b/hw/net/e1000.c
index 091d61acc3..bab8e2abfb 100644
--- a/hw/net/e1000.c
+++ b/hw/net/e1000.c
@@ -1370,6 +1370,7 @@ static const VMStateDescription vmstate_e1000_mit_state = {
.name = "e1000/mit_state",
.version_id = 1,
.minimum_version_id = 1,
+ .needed = e1000_mit_state_needed,
.fields = (VMStateField[]) {
VMSTATE_UINT32(mac_reg[RDTR], E1000State),
VMSTATE_UINT32(mac_reg[RADV], E1000State),
@@ -1457,13 +1458,9 @@ static const VMStateDescription vmstate_e1000 = {
VMSTATE_UINT32_SUB_ARRAY(mac_reg, E1000State, VFTA, 128),
VMSTATE_END_OF_LIST()
},
- .subsections = (VMStateSubsection[]) {
- {
- .vmsd = &vmstate_e1000_mit_state,
- .needed = e1000_mit_state_needed,
- }, {
- /* empty */
- }
+ .subsections = (const VMStateDescription*[]) {
+ &vmstate_e1000_mit_state,
+ NULL
}
};
diff --git a/hw/net/pcnet.c b/hw/net/pcnet.c
index bdfd38f4ca..68b9981983 100644
--- a/hw/net/pcnet.c
+++ b/hw/net/pcnet.c
@@ -1241,6 +1241,14 @@ static void pcnet_transmit(PCNetState *s)
}
bcnt = 4096 - GET_FIELD(tmd.length, TMDL, BCNT);
+
+ /* if multi-tmd packet outsizes s->buffer then skip it silently.
+ Note: this is not what real hw does */
+ if (s->xmit_pos + bcnt > sizeof(s->buffer)) {
+ s->xmit_pos = -1;
+ goto txdone;
+ }
+
s->phys_mem_read(s->dma_opaque, PHYSADDR(s, tmd.tbadr),
s->buffer + s->xmit_pos, bcnt, CSR_BSWP(s));
s->xmit_pos += bcnt;
diff --git a/hw/net/rocker/qmp-norocker.c b/hw/net/rocker/qmp-norocker.c
new file mode 100644
index 0000000000..f253747361
--- /dev/null
+++ b/hw/net/rocker/qmp-norocker.c
@@ -0,0 +1,50 @@
+/*
+ * QMP Target options - Commands handled based on a target config
+ * versus a host config
+ *
+ * Copyright (c) 2015 David Ahern <dsahern@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "qemu-common.h"
+#include "qmp-commands.h"
+#include "qapi/qmp/qerror.h"
+
+RockerSwitch *qmp_query_rocker(const char *name, Error **errp)
+{
+ error_set(errp, QERR_FEATURE_DISABLED, "rocker");
+ return NULL;
+};
+
+RockerPortList *qmp_query_rocker_ports(const char *name, Error **errp)
+{
+ error_set(errp, QERR_FEATURE_DISABLED, "rocker");
+ return NULL;
+};
+
+RockerOfDpaFlowList *qmp_query_rocker_of_dpa_flows(const char *name,
+ bool has_tbl_id,
+ uint32_t tbl_id,
+ Error **errp)
+{
+ error_set(errp, QERR_FEATURE_DISABLED, "rocker");
+ return NULL;
+};
+
+RockerOfDpaGroupList *qmp_query_rocker_of_dpa_groups(const char *name,
+ bool has_type,
+ uint8_t type,
+ Error **errp)
+{
+ error_set(errp, QERR_FEATURE_DISABLED, "rocker");
+ return NULL;
+};
diff --git a/hw/net/rocker/rocker.c b/hw/net/rocker/rocker.c
index 55b6c46157..4d25842509 100644
--- a/hw/net/rocker/rocker.c
+++ b/hw/net/rocker/rocker.c
@@ -94,6 +94,51 @@ World *rocker_get_world(Rocker *r, enum rocker_world_type type)
return NULL;
}
+RockerSwitch *qmp_query_rocker(const char *name, Error **errp)
+{
+ RockerSwitch *rocker = g_malloc0(sizeof(*rocker));
+ Rocker *r;
+
+ r = rocker_find(name);
+ if (!r) {
+ error_set(errp, ERROR_CLASS_GENERIC_ERROR,
+ "rocker %s not found", name);
+ return NULL;
+ }
+
+ rocker->name = g_strdup(r->name);
+ rocker->id = r->switch_id;
+ rocker->ports = r->fp_ports;
+
+ return rocker;
+}
+
+RockerPortList *qmp_query_rocker_ports(const char *name, Error **errp)
+{
+ RockerPortList *list = NULL;
+ Rocker *r;
+ int i;
+
+ r = rocker_find(name);
+ if (!r) {
+ error_set(errp, ERROR_CLASS_GENERIC_ERROR,
+ "rocker %s not found", name);
+ return NULL;
+ }
+
+ for (i = r->fp_ports - 1; i >= 0; i--) {
+ RockerPortList *info = g_malloc0(sizeof(*info));
+ info->value = g_malloc0(sizeof(*info->value));
+ struct fp_port *port = r->fp_port[i];
+
+ fp_port_get_info(port, info);
+ info->next = list;
+ list = info;
+ }
+
+ return list;
+}
+
uint32_t rocker_fp_ports(Rocker *r)
{
return r->fp_ports;
@@ -238,6 +283,7 @@ static int cmd_get_port_settings(Rocker *r,
uint8_t duplex;
uint8_t autoneg;
uint8_t learning;
+ char *phys_name;
MACAddr macaddr;
enum rocker_world_type mode;
size_t tlv_size;
@@ -265,6 +311,7 @@ static int cmd_get_port_settings(Rocker *r,
fp_port_get_macaddr(fp_port, &macaddr);
mode = world_type(fp_port_get_world(fp_port));
learning = fp_port_get_learning(fp_port);
+ phys_name = fp_port_get_name(fp_port);
tlv_size = rocker_tlv_total_size(0) + /* nest */
rocker_tlv_total_size(sizeof(uint32_t)) + /* pport */
@@ -273,7 +320,8 @@ static int cmd_get_port_settings(Rocker *r,
rocker_tlv_total_size(sizeof(uint8_t)) + /* autoneg */
rocker_tlv_total_size(sizeof(macaddr.a)) + /* macaddr */
rocker_tlv_total_size(sizeof(uint8_t)) + /* mode */
- rocker_tlv_total_size(sizeof(uint8_t)); /* learning */
+ rocker_tlv_total_size(sizeof(uint8_t)) + /* learning */
+ rocker_tlv_total_size(strlen(phys_name));
if (tlv_size > desc_buf_size(info)) {
return -ROCKER_EMSGSIZE;
@@ -290,6 +338,8 @@ static int cmd_get_port_settings(Rocker *r,
rocker_tlv_put_u8(buf, &pos, ROCKER_TLV_CMD_PORT_SETTINGS_MODE, mode);
rocker_tlv_put_u8(buf, &pos, ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING,
learning);
+ rocker_tlv_put(buf, &pos, ROCKER_TLV_CMD_PORT_SETTINGS_PHYS_NAME,
+ strlen(phys_name), phys_name);
rocker_tlv_nest_end(buf, &pos, nest);
return desc_set_buf(info, tlv_size);
@@ -1277,6 +1327,22 @@ static int pci_rocker_init(PCIDevice *dev)
goto err_duplicate;
}
+ /* Rocker name is passed in port name requests to OS with the intention
+ * that the name is used in interface names. Limit the length of the
+ * rocker name to avoid naming problems in the OS. Also, adding the
+ * port number as p# and unganged breakout b#, where # is at most 2
+ * digits, so leave room for it too (-1 for string terminator, -3 for
+ * p# and -3 for b#)
+ */
+#define ROCKER_IFNAMSIZ 16
+#define MAX_ROCKER_NAME_LEN (ROCKER_IFNAMSIZ - 1 - 3 - 3)
+ if (strlen(r->name) > MAX_ROCKER_NAME_LEN) {
+ fprintf(stderr,
+ "rocker: name too long; please shorten to at most %d chars\n",
+ MAX_ROCKER_NAME_LEN);
+ return -EINVAL;
+ }
+
if (memcmp(&r->fp_start_macaddr, &zero, sizeof(zero)) == 0) {
memcpy(&r->fp_start_macaddr, &dflt, sizeof(dflt));
r->fp_start_macaddr.a[4] += (sw_index++);
diff --git a/hw/net/rocker/rocker_fp.c b/hw/net/rocker/rocker_fp.c
index 2f1e3b348a..d8d934c396 100644
--- a/hw/net/rocker/rocker_fp.c
+++ b/hw/net/rocker/rocker_fp.c
@@ -41,11 +41,26 @@ struct fp_port {
NICConf conf;
};
+char *fp_port_get_name(FpPort *port)
+{
+ return port->name;
+}
+
bool fp_port_get_link_up(FpPort *port)
{
return !qemu_get_queue(port->nic)->link_down;
}
+void fp_port_get_info(FpPort *port, RockerPortList *info)
+{
+ info->value->name = g_strdup(port->name);
+ info->value->enabled = port->enabled;
+ info->value->link_up = fp_port_get_link_up(port);
+ info->value->speed = port->speed;
+ info->value->duplex = port->duplex;
+ info->value->autoneg = port->autoneg;
+}
+
void fp_port_get_macaddr(FpPort *port, MACAddr *macaddr)
{
memcpy(macaddr->a, port->conf.macaddr.a, sizeof(macaddr->a));
@@ -173,8 +188,19 @@ bool fp_port_enabled(FpPort *port)
return port->enabled;
}
+static void fp_port_set_link(FpPort *port, bool up)
+{
+ NetClientState *nc = qemu_get_queue(port->nic);
+
+ if (up == nc->link_down) {
+ nc->link_down = !up;
+ nc->info->link_status_changed(nc);
+ }
+}
+
void fp_port_enable(FpPort *port)
{
+ fp_port_set_link(port, true);
port->enabled = true;
DPRINTF("port %d enabled\n", port->index);
}
@@ -182,6 +208,7 @@ void fp_port_enable(FpPort *port)
void fp_port_disable(FpPort *port)
{
port->enabled = false;
+ fp_port_set_link(port, false);
DPRINTF("port %d disabled\n", port->index);
}
@@ -201,7 +228,7 @@ FpPort *fp_port_alloc(Rocker *r, char *sw_name,
/* front-panel switch port names are 1-based */
- port->name = g_strdup_printf("%s.%d", sw_name, port->pport);
+ port->name = g_strdup_printf("%sp%d", sw_name, port->pport);
memcpy(port->conf.macaddr.a, start_mac, sizeof(port->conf.macaddr.a));
port->conf.macaddr.a[5] += index;
diff --git a/hw/net/rocker/rocker_fp.h b/hw/net/rocker/rocker_fp.h
index a5f28f120d..ab80fd833c 100644
--- a/hw/net/rocker/rocker_fp.h
+++ b/hw/net/rocker/rocker_fp.h
@@ -26,7 +26,9 @@ typedef struct fp_port FpPort;
int fp_port_eg(FpPort *port, const struct iovec *iov, int iovcnt);
+char *fp_port_get_name(FpPort *port);
bool fp_port_get_link_up(FpPort *port);
+void fp_port_get_info(FpPort *port, RockerPortList *info);
void fp_port_get_macaddr(FpPort *port, MACAddr *macaddr);
void fp_port_set_macaddr(FpPort *port, MACAddr *macaddr);
uint8_t fp_port_get_learning(FpPort *port);
diff --git a/hw/net/rocker/rocker_hw.h b/hw/net/rocker/rocker_hw.h
index c9c85a75bd..fe639badd4 100644
--- a/hw/net/rocker/rocker_hw.h
+++ b/hw/net/rocker/rocker_hw.h
@@ -179,6 +179,7 @@ enum {
ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR, /* binary */
ROCKER_TLV_CMD_PORT_SETTINGS_MODE, /* u8 */
ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING, /* u8 */
+ ROCKER_TLV_CMD_PORT_SETTINGS_PHYS_NAME, /* binary */
__ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
ROCKER_TLV_CMD_PORT_SETTINGS_MAX = __ROCKER_TLV_CMD_PORT_SETTINGS_MAX - 1,
diff --git a/hw/net/rocker/rocker_of_dpa.c b/hw/net/rocker/rocker_of_dpa.c
index 1bcb7af5ef..b25a17d6d7 100644
--- a/hw/net/rocker/rocker_of_dpa.c
+++ b/hw/net/rocker/rocker_of_dpa.c
@@ -2302,6 +2302,318 @@ static void of_dpa_uninit(World *world)
g_hash_table_destroy(of_dpa->flow_tbl);
}
+struct of_dpa_flow_fill_context {
+ RockerOfDpaFlowList *list;
+ uint32_t tbl_id;
+};
+
+static void of_dpa_flow_fill(void *cookie, void *value, void *user_data)
+{
+ struct of_dpa_flow *flow = value;
+ struct of_dpa_flow_key *key = &flow->key;
+ struct of_dpa_flow_key *mask = &flow->mask;
+ struct of_dpa_flow_fill_context *flow_context = user_data;
+ RockerOfDpaFlowList *new;
+ RockerOfDpaFlow *nflow;
+ RockerOfDpaFlowKey *nkey;
+ RockerOfDpaFlowMask *nmask;
+ RockerOfDpaFlowAction *naction;
+
+ if (flow_context->tbl_id != -1 &&
+ flow_context->tbl_id != key->tbl_id) {
+ return;
+ }
+
+ new = g_malloc0(sizeof(*new));
+ nflow = new->value = g_malloc0(sizeof(*nflow));
+ nkey = nflow->key = g_malloc0(sizeof(*nkey));
+ nmask = nflow->mask = g_malloc0(sizeof(*nmask));
+ naction = nflow->action = g_malloc0(sizeof(*naction));
+
+ nflow->cookie = flow->cookie;
+ nflow->hits = flow->stats.hits;
+ nkey->priority = flow->priority;
+ nkey->tbl_id = key->tbl_id;
+
+ if (key->in_pport || mask->in_pport) {
+ nkey->has_in_pport = true;
+ nkey->in_pport = key->in_pport;
+ }
+
+ if (nkey->has_in_pport && mask->in_pport != 0xffffffff) {
+ nmask->has_in_pport = true;
+ nmask->in_pport = mask->in_pport;
+ }
+
+ if (key->eth.vlan_id || mask->eth.vlan_id) {
+ nkey->has_vlan_id = true;
+ nkey->vlan_id = ntohs(key->eth.vlan_id);
+ }
+
+ if (nkey->has_vlan_id && mask->eth.vlan_id != 0xffff) {
+ nmask->has_vlan_id = true;
+ nmask->vlan_id = ntohs(mask->eth.vlan_id);
+ }
+
+ if (key->tunnel_id || mask->tunnel_id) {
+ nkey->has_tunnel_id = true;
+ nkey->tunnel_id = key->tunnel_id;
+ }
+
+ if (nkey->has_tunnel_id && mask->tunnel_id != 0xffffffff) {
+ nmask->has_tunnel_id = true;
+ nmask->tunnel_id = mask->tunnel_id;
+ }
+
+ if (memcmp(key->eth.src.a, zero_mac.a, ETH_ALEN) ||
+ memcmp(mask->eth.src.a, zero_mac.a, ETH_ALEN)) {
+ nkey->has_eth_src = true;
+ nkey->eth_src = qemu_mac_strdup_printf(key->eth.src.a);
+ }
+
+ if (nkey->has_eth_src && memcmp(mask->eth.src.a, ff_mac.a, ETH_ALEN)) {
+ nmask->has_eth_src = true;
+ nmask->eth_src = qemu_mac_strdup_printf(mask->eth.src.a);
+ }
+
+ if (memcmp(key->eth.dst.a, zero_mac.a, ETH_ALEN) ||
+ memcmp(mask->eth.dst.a, zero_mac.a, ETH_ALEN)) {
+ nkey->has_eth_dst = true;
+ nkey->eth_dst = qemu_mac_strdup_printf(key->eth.dst.a);
+ }
+
+ if (nkey->has_eth_dst && memcmp(mask->eth.dst.a, ff_mac.a, ETH_ALEN)) {
+ nmask->has_eth_dst = true;
+ nmask->eth_dst = qemu_mac_strdup_printf(mask->eth.dst.a);
+ }
+
+ if (key->eth.type) {
+
+ nkey->has_eth_type = true;
+ nkey->eth_type = ntohs(key->eth.type);
+
+ switch (ntohs(key->eth.type)) {
+ case 0x0800:
+ case 0x86dd:
+ if (key->ip.proto || mask->ip.proto) {
+ nkey->has_ip_proto = true;
+ nkey->ip_proto = key->ip.proto;
+ }
+ if (nkey->has_ip_proto && mask->ip.proto != 0xff) {
+ nmask->has_ip_proto = true;
+ nmask->ip_proto = mask->ip.proto;
+ }
+ if (key->ip.tos || mask->ip.tos) {
+ nkey->has_ip_tos = true;
+ nkey->ip_tos = key->ip.tos;
+ }
+ if (nkey->has_ip_tos && mask->ip.tos != 0xff) {
+ nmask->has_ip_tos = true;
+ nmask->ip_tos = mask->ip.tos;
+ }
+ break;
+ }
+
+ switch (ntohs(key->eth.type)) {
+ case 0x0800:
+ if (key->ipv4.addr.dst || mask->ipv4.addr.dst) {
+ char *dst = inet_ntoa(*(struct in_addr *)&key->ipv4.addr.dst);
+ int dst_len = of_dpa_mask2prefix(mask->ipv4.addr.dst);
+ nkey->has_ip_dst = true;
+ nkey->ip_dst = g_strdup_printf("%s/%d", dst, dst_len);
+ }
+ break;
+ }
+ }
+
+ if (flow->action.goto_tbl) {
+ naction->has_goto_tbl = true;
+ naction->goto_tbl = flow->action.goto_tbl;
+ }
+
+ if (flow->action.write.group_id) {
+ naction->has_group_id = true;
+ naction->group_id = flow->action.write.group_id;
+ }
+
+ if (flow->action.apply.new_vlan_id) {
+ naction->has_new_vlan_id = true;
+ naction->new_vlan_id = flow->action.apply.new_vlan_id;
+ }
+
+ new->next = flow_context->list;
+ flow_context->list = new;
+}
+
+RockerOfDpaFlowList *qmp_query_rocker_of_dpa_flows(const char *name,
+ bool has_tbl_id,
+ uint32_t tbl_id,
+ Error **errp)
+{
+ struct rocker *r;
+ struct world *w;
+ struct of_dpa *of_dpa;
+ struct of_dpa_flow_fill_context fill_context = {
+ .list = NULL,
+ .tbl_id = tbl_id,
+ };
+
+ r = rocker_find(name);
+ if (!r) {
+ error_set(errp, ERROR_CLASS_GENERIC_ERROR,
+ "rocker %s not found", name);
+ return NULL;
+ }
+
+ w = rocker_get_world(r, ROCKER_WORLD_TYPE_OF_DPA);
+ if (!w) {
+ error_set(errp, ERROR_CLASS_GENERIC_ERROR,
+ "rocker %s doesn't have OF-DPA world", name);
+ return NULL;
+ }
+
+ of_dpa = world_private(w);
+
+ g_hash_table_foreach(of_dpa->flow_tbl, of_dpa_flow_fill, &fill_context);
+
+ return fill_context.list;
+}
+
+struct of_dpa_group_fill_context {
+ RockerOfDpaGroupList *list;
+ uint8_t type;
+};
+
+static void of_dpa_group_fill(void *key, void *value, void *user_data)
+{
+ struct of_dpa_group *group = value;
+ struct of_dpa_group_fill_context *flow_context = user_data;
+ RockerOfDpaGroupList *new;
+ RockerOfDpaGroup *ngroup;
+ struct uint32List *id;
+ int i;
+
+ if (flow_context->type != 9 &&
+ flow_context->type != ROCKER_GROUP_TYPE_GET(group->id)) {
+ return;
+ }
+
+ new = g_malloc0(sizeof(*new));
+ ngroup = new->value = g_malloc0(sizeof(*ngroup));
+
+ ngroup->id = group->id;
+
+ ngroup->type = ROCKER_GROUP_TYPE_GET(group->id);
+
+ switch (ngroup->type) {
+ case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
+ ngroup->has_vlan_id = true;
+ ngroup->vlan_id = ROCKER_GROUP_VLAN_GET(group->id);
+ ngroup->has_pport = true;
+ ngroup->pport = ROCKER_GROUP_PORT_GET(group->id);
+ ngroup->has_out_pport = true;
+ ngroup->out_pport = group->l2_interface.out_pport;
+ ngroup->has_pop_vlan = true;
+ ngroup->pop_vlan = group->l2_interface.pop_vlan;
+ break;
+ case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
+ ngroup->has_index = true;
+ ngroup->index = ROCKER_GROUP_INDEX_LONG_GET(group->id);
+ ngroup->has_group_id = true;
+ ngroup->group_id = group->l2_rewrite.group_id;
+ if (group->l2_rewrite.vlan_id) {
+ ngroup->has_set_vlan_id = true;
+ ngroup->set_vlan_id = ntohs(group->l2_rewrite.vlan_id);
+ }
+ break;
+ if (memcmp(group->l2_rewrite.src_mac.a, zero_mac.a, ETH_ALEN)) {
+ ngroup->has_set_eth_src = true;
+ ngroup->set_eth_src =
+ qemu_mac_strdup_printf(group->l2_rewrite.src_mac.a);
+ }
+ if (memcmp(group->l2_rewrite.dst_mac.a, zero_mac.a, ETH_ALEN)) {
+ ngroup->has_set_eth_dst = true;
+ ngroup->set_eth_dst =
+ qemu_mac_strdup_printf(group->l2_rewrite.dst_mac.a);
+ }
+ case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
+ case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
+ ngroup->has_vlan_id = true;
+ ngroup->vlan_id = ROCKER_GROUP_VLAN_GET(group->id);
+ ngroup->has_index = true;
+ ngroup->index = ROCKER_GROUP_INDEX_GET(group->id);
+ for (i = 0; i < group->l2_flood.group_count; i++) {
+ ngroup->has_group_ids = true;
+ id = g_malloc0(sizeof(*id));
+ id->value = group->l2_flood.group_ids[i];
+ id->next = ngroup->group_ids;
+ ngroup->group_ids = id;
+ }
+ break;
+ case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST:
+ ngroup->has_index = true;
+ ngroup->index = ROCKER_GROUP_INDEX_LONG_GET(group->id);
+ ngroup->has_group_id = true;
+ ngroup->group_id = group->l3_unicast.group_id;
+ if (group->l3_unicast.vlan_id) {
+ ngroup->has_set_vlan_id = true;
+ ngroup->set_vlan_id = ntohs(group->l3_unicast.vlan_id);
+ }
+ if (memcmp(group->l3_unicast.src_mac.a, zero_mac.a, ETH_ALEN)) {
+ ngroup->has_set_eth_src = true;
+ ngroup->set_eth_src =
+ qemu_mac_strdup_printf(group->l3_unicast.src_mac.a);
+ }
+ if (memcmp(group->l3_unicast.dst_mac.a, zero_mac.a, ETH_ALEN)) {
+ ngroup->has_set_eth_dst = true;
+ ngroup->set_eth_dst =
+ qemu_mac_strdup_printf(group->l3_unicast.dst_mac.a);
+ }
+ if (group->l3_unicast.ttl_check) {
+ ngroup->has_ttl_check = true;
+ ngroup->ttl_check = group->l3_unicast.ttl_check;
+ }
+ break;
+ }
+
+ new->next = flow_context->list;
+ flow_context->list = new;
+}
+
+RockerOfDpaGroupList *qmp_query_rocker_of_dpa_groups(const char *name,
+ bool has_type,
+ uint8_t type,
+ Error **errp)
+{
+ struct rocker *r;
+ struct world *w;
+ struct of_dpa *of_dpa;
+ struct of_dpa_group_fill_context fill_context = {
+ .list = NULL,
+ .type = type,
+ };
+
+ r = rocker_find(name);
+ if (!r) {
+ error_set(errp, ERROR_CLASS_GENERIC_ERROR,
+ "rocker %s not found", name);
+ return NULL;
+ }
+
+ w = rocker_get_world(r, ROCKER_WORLD_TYPE_OF_DPA);
+ if (!w) {
+ error_set(errp, ERROR_CLASS_GENERIC_ERROR,
+ "rocker %s doesn't have OF-DPA world", name);
+ return NULL;
+ }
+
+ of_dpa = world_private(w);
+
+ g_hash_table_foreach(of_dpa->group_tbl, of_dpa_group_fill, &fill_context);
+
+ return fill_context.list;
+}
+
static WorldOps of_dpa_ops = {
.init = of_dpa_init,
.uninit = of_dpa_uninit,
diff --git a/hw/net/rtl8139.c b/hw/net/rtl8139.c
index f868108dfe..e0db4727ae 100644
--- a/hw/net/rtl8139.c
+++ b/hw/net/rtl8139.c
@@ -3240,6 +3240,7 @@ static const VMStateDescription vmstate_rtl8139_hotplug_ready ={
.name = "rtl8139/hotplug_ready",
.version_id = 1,
.minimum_version_id = 1,
+ .needed = rtl8139_hotplug_ready_needed,
.fields = (VMStateField[]) {
VMSTATE_END_OF_LIST()
}
@@ -3335,13 +3336,9 @@ static const VMStateDescription vmstate_rtl8139 = {
VMSTATE_UINT32_V(cplus_enabled, RTL8139State, 4),
VMSTATE_END_OF_LIST()
},
- .subsections = (VMStateSubsection []) {
- {
- .vmsd = &vmstate_rtl8139_hotplug_ready,
- .needed = rtl8139_hotplug_ready_needed,
- }, {
- /* empty */
- }
+ .subsections = (const VMStateDescription*[]) {
+ &vmstate_rtl8139_hotplug_ready,
+ NULL
}
};
diff --git a/hw/net/vmxnet3.c b/hw/net/vmxnet3.c
index 34ffafd5a6..104a0f599b 100644
--- a/hw/net/vmxnet3.c
+++ b/hw/net/vmxnet3.c
@@ -2226,6 +2226,7 @@ static const VMStateDescription vmxstate_vmxnet3_mcast_list = {
.version_id = 1,
.minimum_version_id = 1,
.pre_load = vmxnet3_mcast_list_pre_load,
+ .needed = vmxnet3_mc_list_needed,
.fields = (VMStateField[]) {
VMSTATE_VBUFFER_UINT32(mcast_list, VMXNET3State, 0, NULL, 0,
mcast_list_buff_size),
@@ -2470,14 +2471,9 @@ static const VMStateDescription vmstate_vmxnet3 = {
VMSTATE_END_OF_LIST()
},
- .subsections = (VMStateSubsection[]) {
- {
- .vmsd = &vmxstate_vmxnet3_mcast_list,
- .needed = vmxnet3_mc_list_needed
- },
- {
- /* empty element. */
- }
+ .subsections = (const VMStateDescription*[]) {
+ &vmxstate_vmxnet3_mcast_list,
+ NULL
}
};
diff --git a/hw/nvram/fw_cfg.c b/hw/nvram/fw_cfg.c
index 68eff77983..88481b78c4 100644
--- a/hw/nvram/fw_cfg.c
+++ b/hw/nvram/fw_cfg.c
@@ -46,7 +46,6 @@ typedef struct FWCfgEntry {
uint32_t len;
uint8_t *data;
void *callback_opaque;
- FWCfgCallback callback;
FWCfgReadCallback read_callback;
} FWCfgEntry;
@@ -232,19 +231,7 @@ static void fw_cfg_reboot(FWCfgState *s)
static void fw_cfg_write(FWCfgState *s, uint8_t value)
{
- int arch = !!(s->cur_entry & FW_CFG_ARCH_LOCAL);
- FWCfgEntry *e = &s->entries[arch][s->cur_entry & FW_CFG_ENTRY_MASK];
-
- trace_fw_cfg_write(s, value);
-
- if (s->cur_entry & FW_CFG_WRITE_CHANNEL && e->callback &&
- s->cur_offset < e->len) {
- e->data[s->cur_offset++] = value;
- if (s->cur_offset == e->len) {
- e->callback(e->callback_opaque, e->data);
- s->cur_offset = 0;
- }
- }
+ /* nothing, write support removed in QEMU v2.4+ */
}
static int fw_cfg_select(FWCfgState *s, uint16_t key)
@@ -436,6 +423,7 @@ static void fw_cfg_add_bytes_read_callback(FWCfgState *s, uint16_t key,
key &= FW_CFG_ENTRY_MASK;
assert(key < FW_CFG_MAX_ENTRY && len < UINT32_MAX);
+ assert(s->entries[arch][key].data == NULL); /* avoid key conflict */
s->entries[arch][key].data = data;
s->entries[arch][key].len = (uint32_t)len;
@@ -458,7 +446,6 @@ static void *fw_cfg_modify_bytes_read(FWCfgState *s, uint16_t key,
s->entries[arch][key].data = data;
s->entries[arch][key].len = len;
s->entries[arch][key].callback_opaque = NULL;
- s->entries[arch][key].callback = NULL;
return ptr;
}
@@ -484,6 +471,16 @@ void fw_cfg_add_i16(FWCfgState *s, uint16_t key, uint16_t value)
fw_cfg_add_bytes(s, key, copy, sizeof(value));
}
+void fw_cfg_modify_i16(FWCfgState *s, uint16_t key, uint16_t value)
+{
+ uint16_t *copy, *old;
+
+ copy = g_malloc(sizeof(value));
+ *copy = cpu_to_le16(value);
+ old = fw_cfg_modify_bytes_read(s, key, copy, sizeof(value));
+ g_free(old);
+}
+
void fw_cfg_add_i32(FWCfgState *s, uint16_t key, uint32_t value)
{
uint32_t *copy;
@@ -502,23 +499,6 @@ void fw_cfg_add_i64(FWCfgState *s, uint16_t key, uint64_t value)
fw_cfg_add_bytes(s, key, copy, sizeof(value));
}
-void fw_cfg_add_callback(FWCfgState *s, uint16_t key, FWCfgCallback callback,
- void *callback_opaque, void *data, size_t len)
-{
- int arch = !!(key & FW_CFG_ARCH_LOCAL);
-
- assert(key & FW_CFG_WRITE_CHANNEL);
-
- key &= FW_CFG_ENTRY_MASK;
-
- assert(key < FW_CFG_MAX_ENTRY && len <= UINT32_MAX);
-
- s->entries[arch][key].data = data;
- s->entries[arch][key].len = (uint32_t)len;
- s->entries[arch][key].callback_opaque = callback_opaque;
- s->entries[arch][key].callback = callback;
-}
-
void fw_cfg_add_file_callback(FWCfgState *s, const char *filename,
FWCfgReadCallback callback, void *callback_opaque,
void *data, size_t len)
@@ -535,18 +515,19 @@ void fw_cfg_add_file_callback(FWCfgState *s, const char *filename,
index = be32_to_cpu(s->files->count);
assert(index < FW_CFG_FILE_SLOTS);
- fw_cfg_add_bytes_read_callback(s, FW_CFG_FILE_FIRST + index,
- callback, callback_opaque, data, len);
-
pstrcpy(s->files->f[index].name, sizeof(s->files->f[index].name),
filename);
for (i = 0; i < index; i++) {
if (strcmp(s->files->f[index].name, s->files->f[i].name) == 0) {
- trace_fw_cfg_add_file_dupe(s, s->files->f[index].name);
- return;
+ error_report("duplicate fw_cfg file name: %s",
+ s->files->f[index].name);
+ exit(1);
}
}
+ fw_cfg_add_bytes_read_callback(s, FW_CFG_FILE_FIRST + index,
+ callback, callback_opaque, data, len);
+
s->files->f[index].size = cpu_to_be32(len);
s->files->f[index].select = cpu_to_be16(FW_CFG_FILE_FIRST + index);
trace_fw_cfg_add_file(s, index, s->files->f[index].name, len);
diff --git a/hw/pci-host/pam.c b/hw/pci-host/pam.c
index 8272de3f28..17d826cba5 100644
--- a/hw/pci-host/pam.c
+++ b/hw/pci-host/pam.c
@@ -31,26 +31,6 @@
#include "sysemu/sysemu.h"
#include "hw/pci-host/pam.h"
-void smram_update(MemoryRegion *smram_region, uint8_t smram,
- uint8_t smm_enabled)
-{
- bool smram_enabled;
-
- smram_enabled = ((smm_enabled && (smram & SMRAM_G_SMRAME)) ||
- (smram & SMRAM_D_OPEN));
- memory_region_set_enabled(smram_region, !smram_enabled);
-}
-
-void smram_set_smm(uint8_t *host_smm_enabled, int smm, uint8_t smram,
- MemoryRegion *smram_region)
-{
- uint8_t smm_enabled = (smm != 0);
- if (*host_smm_enabled != smm_enabled) {
- *host_smm_enabled = smm_enabled;
- smram_update(smram_region, smram, *host_smm_enabled);
- }
-}
-
void init_pam(DeviceState *dev, MemoryRegion *ram_memory,
MemoryRegion *system_memory, MemoryRegion *pci_address_space,
PAMMemoryRegion *mem, uint32_t start, uint32_t size)
diff --git a/hw/pci-host/piix.c b/hw/pci-host/piix.c
index 723836fb0e..ed2424c4cd 100644
--- a/hw/pci-host/piix.c
+++ b/hw/pci-host/piix.c
@@ -105,7 +105,7 @@ struct PCII440FXState {
MemoryRegion *ram_memory;
PAMMemoryRegion pam_regions[13];
MemoryRegion smram_region;
- uint8_t smm_enabled;
+ MemoryRegion smram, low_smram;
};
@@ -138,18 +138,10 @@ static void i440fx_update_memory_mappings(PCII440FXState *d)
pam_update(&d->pam_regions[i], i,
pd->config[I440FX_PAM + ((i + 1) / 2)]);
}
- smram_update(&d->smram_region, pd->config[I440FX_SMRAM], d->smm_enabled);
- memory_region_transaction_commit();
-}
-
-static void i440fx_set_smm(int val, void *arg)
-{
- PCII440FXState *d = arg;
- PCIDevice *pd = PCI_DEVICE(d);
-
- memory_region_transaction_begin();
- smram_set_smm(&d->smm_enabled, val, pd->config[I440FX_SMRAM],
- &d->smram_region);
+ memory_region_set_enabled(&d->smram_region,
+ !(pd->config[I440FX_SMRAM] & SMRAM_D_OPEN));
+ memory_region_set_enabled(&d->smram,
+ pd->config[I440FX_SMRAM] & SMRAM_G_SMRAME);
memory_region_transaction_commit();
}
@@ -172,12 +164,13 @@ static int i440fx_load_old(QEMUFile* f, void *opaque, int version_id)
PCII440FXState *d = opaque;
PCIDevice *pd = PCI_DEVICE(d);
int ret, i;
+ uint8_t smm_enabled;
ret = pci_device_load(pd, f);
if (ret < 0)
return ret;
i440fx_update_memory_mappings(d);
- qemu_get_8s(f, &d->smm_enabled);
+ qemu_get_8s(f, &smm_enabled);
if (version_id == 2) {
for (i = 0; i < PIIX_NUM_PIRQS; i++) {
@@ -205,7 +198,10 @@ static const VMStateDescription vmstate_i440fx = {
.post_load = i440fx_post_load,
.fields = (VMStateField[]) {
VMSTATE_PCI_DEVICE(parent_obj, PCII440FXState),
- VMSTATE_UINT8(smm_enabled, PCII440FXState),
+ /* Used to be smm_enabled, which was basically always zero because
+ * SeaBIOS hardly uses SMM. SMRAM is now handled by CPU code.
+ */
+ VMSTATE_UNUSED(1),
VMSTATE_END_OF_LIST()
}
};
@@ -297,11 +293,7 @@ static void i440fx_pcihost_realize(DeviceState *dev, Error **errp)
static void i440fx_realize(PCIDevice *dev, Error **errp)
{
- PCII440FXState *d = I440FX_PCI_DEVICE(dev);
-
dev->config[I440FX_SMRAM] = 0x02;
-
- cpu_smm_register(&i440fx_set_smm, d);
}
PCIBus *i440fx_init(PCII440FXState **pi440fx_state,
@@ -346,11 +338,23 @@ PCIBus *i440fx_init(PCII440FXState **pi440fx_state,
pc_pci_as_mapping_init(OBJECT(f), f->system_memory,
f->pci_address_space);
+ /* if *disabled* show SMRAM to all CPUs */
memory_region_init_alias(&f->smram_region, OBJECT(d), "smram-region",
f->pci_address_space, 0xa0000, 0x20000);
memory_region_add_subregion_overlap(f->system_memory, 0xa0000,
&f->smram_region, 1);
- memory_region_set_enabled(&f->smram_region, false);
+ memory_region_set_enabled(&f->smram_region, true);
+
+ /* smram, as seen by SMM CPUs */
+ memory_region_init(&f->smram, OBJECT(d), "smram", 1ull << 32);
+ memory_region_set_enabled(&f->smram, true);
+ memory_region_init_alias(&f->low_smram, OBJECT(d), "smram-low",
+ f->ram_memory, 0xa0000, 0x20000);
+ memory_region_set_enabled(&f->low_smram, true);
+ memory_region_add_subregion(&f->smram, 0xa0000, &f->low_smram);
+ object_property_add_const_link(qdev_get_machine(), "smram",
+ OBJECT(&f->smram), &error_abort);
+
init_pam(dev, f->ram_memory, f->system_memory, f->pci_address_space,
&f->pam_regions[0], PAM_BIOS_BASE, PAM_BIOS_SIZE);
for (i = 0; i < 12; ++i) {
@@ -578,6 +582,7 @@ static const VMStateDescription vmstate_piix3_rcr = {
.name = "PIIX3/rcr",
.version_id = 1,
.minimum_version_id = 1,
+ .needed = piix3_rcr_needed,
.fields = (VMStateField[]) {
VMSTATE_UINT8(rcr, PIIX3State),
VMSTATE_END_OF_LIST()
@@ -596,12 +601,9 @@ static const VMStateDescription vmstate_piix3 = {
PIIX_NUM_PIRQS, 3),
VMSTATE_END_OF_LIST()
},
- .subsections = (VMStateSubsection[]) {
- {
- .vmsd = &vmstate_piix3_rcr,
- .needed = piix3_rcr_needed,
- },
- { 0 }
+ .subsections = (const VMStateDescription*[]) {
+ &vmstate_piix3_rcr,
+ NULL
}
};
diff --git a/hw/pci-host/q35.c b/hw/pci-host/q35.c
index c8827cc000..bd7409456f 100644
--- a/hw/pci-host/q35.c
+++ b/hw/pci-host/q35.c
@@ -198,6 +198,28 @@ static const TypeInfo q35_host_info = {
* MCH D0:F0
*/
+static uint64_t tseg_blackhole_read(void *ptr, hwaddr reg, unsigned size)
+{
+ return 0xffffffff;
+}
+
+static void tseg_blackhole_write(void *opaque, hwaddr addr, uint64_t val,
+ unsigned width)
+{
+ /* nothing */
+}
+
+static const MemoryRegionOps tseg_blackhole_ops = {
+ .read = tseg_blackhole_read,
+ .write = tseg_blackhole_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+ .valid.min_access_size = 1,
+ .valid.max_access_size = 4,
+ .impl.min_access_size = 4,
+ .impl.max_access_size = 4,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+};
+
/* PCIe MMCFG */
static void mch_update_pciexbar(MCHPCIState *mch)
{
@@ -266,21 +288,70 @@ static void mch_update_pam(MCHPCIState *mch)
static void mch_update_smram(MCHPCIState *mch)
{
PCIDevice *pd = PCI_DEVICE(mch);
+ bool h_smrame = (pd->config[MCH_HOST_BRIDGE_ESMRAMC] & MCH_HOST_BRIDGE_ESMRAMC_H_SMRAME);
+ uint32_t tseg_size;
+
+ /* implement SMRAM.D_LCK */
+ if (pd->config[MCH_HOST_BRIDGE_SMRAM] & MCH_HOST_BRIDGE_SMRAM_D_LCK) {
+ pd->config[MCH_HOST_BRIDGE_SMRAM] &= ~MCH_HOST_BRIDGE_SMRAM_D_OPEN;
+ pd->wmask[MCH_HOST_BRIDGE_SMRAM] = MCH_HOST_BRIDGE_SMRAM_WMASK_LCK;
+ pd->wmask[MCH_HOST_BRIDGE_ESMRAMC] = MCH_HOST_BRIDGE_ESMRAMC_WMASK_LCK;
+ }
memory_region_transaction_begin();
- smram_update(&mch->smram_region, pd->config[MCH_HOST_BRIDGE_SMRAM],
- mch->smm_enabled);
- memory_region_transaction_commit();
-}
-static void mch_set_smm(int smm, void *arg)
-{
- MCHPCIState *mch = arg;
- PCIDevice *pd = PCI_DEVICE(mch);
+ if (pd->config[MCH_HOST_BRIDGE_SMRAM] & SMRAM_D_OPEN) {
+ /* Hide (!) low SMRAM if H_SMRAME = 1 */
+ memory_region_set_enabled(&mch->smram_region, h_smrame);
+ /* Show high SMRAM if H_SMRAME = 1 */
+ memory_region_set_enabled(&mch->open_high_smram, h_smrame);
+ } else {
+ /* Hide high SMRAM and low SMRAM */
+ memory_region_set_enabled(&mch->smram_region, true);
+ memory_region_set_enabled(&mch->open_high_smram, false);
+ }
+
+ if (pd->config[MCH_HOST_BRIDGE_SMRAM] & SMRAM_G_SMRAME) {
+ memory_region_set_enabled(&mch->low_smram, !h_smrame);
+ memory_region_set_enabled(&mch->high_smram, h_smrame);
+ } else {
+ memory_region_set_enabled(&mch->low_smram, false);
+ memory_region_set_enabled(&mch->high_smram, false);
+ }
+
+ if (pd->config[MCH_HOST_BRIDGE_ESMRAMC] & MCH_HOST_BRIDGE_ESMRAMC_T_EN) {
+ switch (pd->config[MCH_HOST_BRIDGE_ESMRAMC] &
+ MCH_HOST_BRIDGE_ESMRAMC_TSEG_SZ_MASK) {
+ case MCH_HOST_BRIDGE_ESMRAMC_TSEG_SZ_1MB:
+ tseg_size = 1024 * 1024;
+ break;
+ case MCH_HOST_BRIDGE_ESMRAMC_TSEG_SZ_2MB:
+ tseg_size = 1024 * 1024 * 2;
+ break;
+ case MCH_HOST_BRIDGE_ESMRAMC_TSEG_SZ_8MB:
+ tseg_size = 1024 * 1024 * 8;
+ break;
+ default:
+ tseg_size = 0;
+ break;
+ }
+ } else {
+ tseg_size = 0;
+ }
+ memory_region_del_subregion(mch->system_memory, &mch->tseg_blackhole);
+ memory_region_set_enabled(&mch->tseg_blackhole, tseg_size);
+ memory_region_set_size(&mch->tseg_blackhole, tseg_size);
+ memory_region_add_subregion_overlap(mch->system_memory,
+ mch->below_4g_mem_size - tseg_size,
+ &mch->tseg_blackhole, 1);
+
+ memory_region_set_enabled(&mch->tseg_window, tseg_size);
+ memory_region_set_size(&mch->tseg_window, tseg_size);
+ memory_region_set_address(&mch->tseg_window,
+ mch->below_4g_mem_size - tseg_size);
+ memory_region_set_alias_offset(&mch->tseg_window,
+ mch->below_4g_mem_size - tseg_size);
- memory_region_transaction_begin();
- smram_set_smm(&mch->smm_enabled, smm, pd->config[MCH_HOST_BRIDGE_SMRAM],
- &mch->smram_region);
memory_region_transaction_commit();
}
@@ -289,7 +360,6 @@ static void mch_write_config(PCIDevice *d,
{
MCHPCIState *mch = MCH_PCI_DEVICE(d);
- /* XXX: implement SMRAM.D_LOCK */
pci_default_write_config(d, address, val, len);
if (ranges_overlap(address, len, MCH_HOST_BRIDGE_PAM0,
@@ -329,7 +399,10 @@ static const VMStateDescription vmstate_mch = {
.post_load = mch_post_load,
.fields = (VMStateField[]) {
VMSTATE_PCI_DEVICE(parent_obj, MCHPCIState),
- VMSTATE_UINT8(smm_enabled, MCHPCIState),
+ /* Used to be smm_enabled, which was basically always zero because
+ * SeaBIOS hardly uses SMM. SMRAM is now handled by CPU code.
+ */
+ VMSTATE_UNUSED(1),
VMSTATE_END_OF_LIST()
}
};
@@ -343,6 +416,9 @@ static void mch_reset(DeviceState *qdev)
MCH_HOST_BRIDGE_PCIEXBAR_DEFAULT);
d->config[MCH_HOST_BRIDGE_SMRAM] = MCH_HOST_BRIDGE_SMRAM_DEFAULT;
+ d->config[MCH_HOST_BRIDGE_ESMRAMC] = MCH_HOST_BRIDGE_ESMRAMC_DEFAULT;
+ d->wmask[MCH_HOST_BRIDGE_SMRAM] = MCH_HOST_BRIDGE_SMRAM_WMASK;
+ d->wmask[MCH_HOST_BRIDGE_ESMRAMC] = MCH_HOST_BRIDGE_ESMRAMC_WMASK;
mch_update(mch);
}
@@ -399,13 +475,47 @@ static void mch_realize(PCIDevice *d, Error **errp)
pc_pci_as_mapping_init(OBJECT(mch), mch->system_memory,
mch->pci_address_space);
- /* smram */
- cpu_smm_register(&mch_set_smm, mch);
+ /* if *disabled* show SMRAM to all CPUs */
memory_region_init_alias(&mch->smram_region, OBJECT(mch), "smram-region",
mch->pci_address_space, 0xa0000, 0x20000);
memory_region_add_subregion_overlap(mch->system_memory, 0xa0000,
&mch->smram_region, 1);
- memory_region_set_enabled(&mch->smram_region, false);
+ memory_region_set_enabled(&mch->smram_region, true);
+
+ memory_region_init_alias(&mch->open_high_smram, OBJECT(mch), "smram-open-high",
+ mch->ram_memory, 0xa0000, 0x20000);
+ memory_region_add_subregion_overlap(mch->system_memory, 0xfeda0000,
+ &mch->open_high_smram, 1);
+ memory_region_set_enabled(&mch->open_high_smram, false);
+
+ /* smram, as seen by SMM CPUs */
+ memory_region_init(&mch->smram, OBJECT(mch), "smram", 1ull << 32);
+ memory_region_set_enabled(&mch->smram, true);
+ memory_region_init_alias(&mch->low_smram, OBJECT(mch), "smram-low",
+ mch->ram_memory, 0xa0000, 0x20000);
+ memory_region_set_enabled(&mch->low_smram, true);
+ memory_region_add_subregion(&mch->smram, 0xa0000, &mch->low_smram);
+ memory_region_init_alias(&mch->high_smram, OBJECT(mch), "smram-high",
+ mch->ram_memory, 0xa0000, 0x20000);
+ memory_region_set_enabled(&mch->high_smram, true);
+ memory_region_add_subregion(&mch->smram, 0xfeda0000, &mch->high_smram);
+
+ memory_region_init_io(&mch->tseg_blackhole, OBJECT(mch),
+ &tseg_blackhole_ops, NULL,
+ "tseg-blackhole", 0);
+ memory_region_set_enabled(&mch->tseg_blackhole, false);
+ memory_region_add_subregion_overlap(mch->system_memory,
+ mch->below_4g_mem_size,
+ &mch->tseg_blackhole, 1);
+
+ memory_region_init_alias(&mch->tseg_window, OBJECT(mch), "tseg-window",
+ mch->ram_memory, mch->below_4g_mem_size, 0);
+ memory_region_set_enabled(&mch->tseg_window, false);
+ memory_region_add_subregion(&mch->smram, mch->below_4g_mem_size,
+ &mch->tseg_window);
+ object_property_add_const_link(qdev_get_machine(), "smram",
+ OBJECT(&mch->smram), &error_abort);
+
init_pam(DEVICE(mch), mch->ram_memory, mch->system_memory,
mch->pci_address_space, &mch->pam_regions[0],
PAM_BIOS_BASE, PAM_BIOS_SIZE);
diff --git a/hw/pci/msi.c b/hw/pci/msi.c
index c111dbaff6..f9c0484420 100644
--- a/hw/pci/msi.c
+++ b/hw/pci/msi.c
@@ -21,10 +21,6 @@
#include "hw/pci/msi.h"
#include "qemu/range.h"
-/* Eventually those constants should go to Linux pci_regs.h */
-#define PCI_MSI_PENDING_32 0x10
-#define PCI_MSI_PENDING_64 0x14
-
/* PCI_MSI_ADDRESS_LO */
#define PCI_MSI_ADDRESS_LO_MASK (~0x3)
diff --git a/hw/pci/pci-stub.c b/hw/pci/pci-stub.c
index 5e564c3a87..f8f237e823 100644
--- a/hw/pci/pci-stub.c
+++ b/hw/pci/pci-stub.c
@@ -29,19 +29,7 @@ PciInfoList *qmp_query_pci(Error **errp)
return NULL;
}
-static void pci_error_message(Monitor *mon)
+void hmp_pcie_aer_inject_error(Monitor *mon, const QDict *qdict)
{
monitor_printf(mon, "PCI devices not supported\n");
}
-
-int hmp_pcie_aer_inject_error(Monitor *mon,
- const QDict *qdict, QObject **ret_data)
-{
- pci_error_message(mon);
- return -ENOSYS;
-}
-
-void pcie_aer_inject_error_print(Monitor *mon, const QObject *data)
-{
- pci_error_message(mon);
-}
diff --git a/hw/pci/pci.c b/hw/pci/pci.c
index 4989408ece..21580433d4 100644
--- a/hw/pci/pci.c
+++ b/hw/pci/pci.c
@@ -147,7 +147,7 @@ static uint16_t pci_default_sub_device_id = PCI_SUBDEVICE_ID_QEMU;
static QLIST_HEAD(, PCIHostState) pci_host_bridges;
-static int pci_bar(PCIDevice *d, int reg)
+int pci_bar(PCIDevice *d, int reg)
{
uint8_t type;
@@ -1698,6 +1698,8 @@ PCIDevice *pci_vga_init(PCIBus *bus)
return pci_create_simple(bus, -1, "VGA");
case VGA_VMWARE:
return pci_create_simple(bus, -1, "vmware-svga");
+ case VGA_VIRTIO:
+ return pci_create_simple(bus, -1, "virtio-vga");
case VGA_NONE:
default: /* Other non-PCI types. Checking for unsupported types is already
done in vl.c. */
diff --git a/hw/pci/pcie_aer.c b/hw/pci/pcie_aer.c
index b48c09cd11..c8dea8ed9c 100644
--- a/hw/pci/pcie_aer.c
+++ b/hw/pci/pcie_aer.c
@@ -815,21 +815,6 @@ const VMStateDescription vmstate_pcie_aer_log = {
}
};
-void pcie_aer_inject_error_print(Monitor *mon, const QObject *data)
-{
- QDict *qdict;
- int devfn;
- assert(qobject_type(data) == QTYPE_QDICT);
- qdict = qobject_to_qdict(data);
-
- devfn = (int)qdict_get_int(qdict, "devfn");
- monitor_printf(mon, "OK id: %s root bus: %s, bus: %x devfn: %x.%x\n",
- qdict_get_str(qdict, "id"),
- qdict_get_str(qdict, "root_bus"),
- (int) qdict_get_int(qdict, "bus"),
- PCI_SLOT(devfn), PCI_FUNC(devfn));
-}
-
typedef struct PCIEAERErrorName {
const char *name;
uint32_t val;
@@ -962,8 +947,8 @@ static int pcie_aer_parse_error_string(const char *error_name,
return -EINVAL;
}
-int hmp_pcie_aer_inject_error(Monitor *mon,
- const QDict *qdict, QObject **ret_data)
+static int do_pcie_aer_inject_error(Monitor *mon,
+ const QDict *qdict, QObject **ret_data)
{
const char *id = qdict_get_str(qdict, "id");
const char *error_name;
@@ -1035,3 +1020,23 @@ int hmp_pcie_aer_inject_error(Monitor *mon,
return 0;
}
+
+void hmp_pcie_aer_inject_error(Monitor *mon, const QDict *qdict)
+{
+ QObject *data;
+ int devfn;
+
+ if (do_pcie_aer_inject_error(mon, qdict, &data) < 0) {
+ return;
+ }
+
+ assert(qobject_type(data) == QTYPE_QDICT);
+ qdict = qobject_to_qdict(data);
+
+ devfn = (int)qdict_get_int(qdict, "devfn");
+ monitor_printf(mon, "OK id: %s root bus: %s, bus: %x devfn: %x.%x\n",
+ qdict_get_str(qdict, "id"),
+ qdict_get_str(qdict, "root_bus"),
+ (int) qdict_get_int(qdict, "bus"),
+ PCI_SLOT(devfn), PCI_FUNC(devfn));
+}
diff --git a/hw/ppc/Makefile.objs b/hw/ppc/Makefile.objs
index 437955d1d5..c8ab06e7f3 100644
--- a/hw/ppc/Makefile.objs
+++ b/hw/ppc/Makefile.objs
@@ -3,7 +3,7 @@ obj-y += ppc.o ppc_booke.o
# IBM pSeries (sPAPR)
obj-$(CONFIG_PSERIES) += spapr.o spapr_vio.o spapr_events.o
obj-$(CONFIG_PSERIES) += spapr_hcall.o spapr_iommu.o spapr_rtas.o
-obj-$(CONFIG_PSERIES) += spapr_pci.o spapr_rtc.o
+obj-$(CONFIG_PSERIES) += spapr_pci.o spapr_rtc.o spapr_drc.o
ifeq ($(CONFIG_PCI)$(CONFIG_PSERIES)$(CONFIG_LINUX), yyy)
obj-y += spapr_pci_vfio.o
endif
diff --git a/hw/ppc/e500.c b/hw/ppc/e500.c
index c10e1b57b6..d300846c3d 100644
--- a/hw/ppc/e500.c
+++ b/hw/ppc/e500.c
@@ -1030,6 +1030,7 @@ void ppce500_init(MachineState *machine, PPCE500Params *params)
exit(1);
}
}
+ g_free(filename);
/* Reserve space for dtb */
dt_base = (loadaddr + bios_size + DTC_LOAD_PAD) & ~DTC_PAD_MASK;
diff --git a/hw/ppc/mac_newworld.c b/hw/ppc/mac_newworld.c
index a365bf9223..0f3e34122a 100644
--- a/hw/ppc/mac_newworld.c
+++ b/hw/ppc/mac_newworld.c
@@ -119,7 +119,7 @@ static const MemoryRegionOps unin_ops = {
static void fw_cfg_boot_set(void *opaque, const char *boot_device,
Error **errp)
{
- fw_cfg_add_i16(opaque, FW_CFG_BOOT_DEVICE, boot_device[0]);
+ fw_cfg_modify_i16(opaque, FW_CFG_BOOT_DEVICE, boot_device[0]);
}
static uint64_t translate_kernel_address(void *opaque, uint64_t addr)
diff --git a/hw/ppc/mac_oldworld.c b/hw/ppc/mac_oldworld.c
index f26133dedd..99879dd2d5 100644
--- a/hw/ppc/mac_oldworld.c
+++ b/hw/ppc/mac_oldworld.c
@@ -52,7 +52,7 @@
static void fw_cfg_boot_set(void *opaque, const char *boot_device,
Error **errp)
{
- fw_cfg_add_i16(opaque, FW_CFG_BOOT_DEVICE, boot_device[0]);
+ fw_cfg_modify_i16(opaque, FW_CFG_BOOT_DEVICE, boot_device[0]);
}
static uint64_t translate_kernel_address(void *opaque, uint64_t addr)
diff --git a/hw/ppc/prep.c b/hw/ppc/prep.c
index 7f52662d76..998ee2d16b 100644
--- a/hw/ppc/prep.c
+++ b/hw/ppc/prep.c
@@ -528,7 +528,6 @@ static void ppc_prep_init(MachineState *machine)
PCIDevice *pci;
ISABus *isa_bus;
ISADevice *isa;
- qemu_irq *cpu_exit_irq;
int ppc_boot_device;
DriveInfo *hd[MAX_IDE_BUS * MAX_IDE_DEVS];
@@ -625,11 +624,11 @@ static void ppc_prep_init(MachineState *machine)
/* PCI -> ISA bridge */
pci = pci_create_simple(pci_bus, PCI_DEVFN(1, 0), "i82378");
- cpu_exit_irq = qemu_allocate_irqs(cpu_request_exit, NULL, 1);
cpu = POWERPC_CPU(first_cpu);
qdev_connect_gpio_out(&pci->qdev, 0,
cpu->env.irq_inputs[PPC6xx_INPUT_INT]);
- qdev_connect_gpio_out(&pci->qdev, 1, *cpu_exit_irq);
+ qdev_connect_gpio_out(&pci->qdev, 1,
+ qemu_allocate_irq(cpu_request_exit, NULL, 0));
sysbus_connect_irq(&pcihost->busdev, 0, qdev_get_gpio_in(&pci->qdev, 9));
sysbus_connect_irq(&pcihost->busdev, 1, qdev_get_gpio_in(&pci->qdev, 11));
sysbus_connect_irq(&pcihost->busdev, 2, qdev_get_gpio_in(&pci->qdev, 9));
diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
index a15fa3c965..f174e5a0f3 100644
--- a/hw/ppc/spapr.c
+++ b/hw/ppc/spapr.c
@@ -533,6 +533,8 @@ static void *spapr_create_fdt_skel(hwaddr initrd_base,
refpoints, sizeof(refpoints))));
_FDT((fdt_property_cell(fdt, "rtas-error-log-max", RTAS_ERROR_LOG_MAX)));
+ _FDT((fdt_property_cell(fdt, "rtas-event-scan-rate",
+ RTAS_EVENT_SCAN_RATE)));
/*
* According to PAPR, rtas ibm,os-term does not guarantee a return
@@ -794,8 +796,8 @@ static void spapr_finalize_fdt(sPAPREnvironment *spapr,
_FDT((fdt_pack(fdt)));
if (fdt_totalsize(fdt) > FDT_MAX_SIZE) {
- hw_error("FDT too big ! 0x%x bytes (max is 0x%x)\n",
- fdt_totalsize(fdt), FDT_MAX_SIZE);
+ error_report("FDT too big ! 0x%x bytes (max is 0x%x)",
+ fdt_totalsize(fdt), FDT_MAX_SIZE);
exit(1);
}
@@ -899,7 +901,7 @@ static int spapr_check_htab_fd(sPAPREnvironment *spapr)
spapr->htab_fd = kvmppc_get_htab_fd(false);
if (spapr->htab_fd < 0) {
error_report("Unable to open fd for reading hash table from KVM: "
- "%s", strerror(errno));
+ "%s", strerror(errno));
rc = -1;
}
spapr->htab_fd_stale = false;
@@ -1419,7 +1421,7 @@ static void ppc_spapr_init(MachineState *machine)
rma_alloc_size = kvmppc_alloc_rma(&rma);
if (rma_alloc_size == -1) {
- hw_error("qemu: Unable to create RMA\n");
+ error_report("Unable to create RMA");
exit(1);
}
@@ -1504,6 +1506,11 @@ static void ppc_spapr_init(MachineState *machine)
qemu_register_reset(spapr_cpu_reset, cpu);
}
+ if (kvm_enabled()) {
+ /* Enable H_LOGICAL_CI_* so SLOF can talk to in-kernel devices */
+ kvmppc_enable_logical_ci_hcalls();
+ }
+
/* allocate RAM */
spapr->ram_limit = ram_size;
memory_region_allocate_system_memory(ram, NULL, "ppc_spapr.ram",
@@ -1520,18 +1527,18 @@ static void ppc_spapr_init(MachineState *machine)
filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, "spapr-rtas.bin");
if (!filename) {
- hw_error("Could not find LPAR rtas '%s'\n", "spapr-rtas.bin");
+ error_report("Could not find LPAR rtas '%s'", "spapr-rtas.bin");
exit(1);
}
spapr->rtas_size = get_image_size(filename);
spapr->rtas_blob = g_malloc(spapr->rtas_size);
if (load_image_size(filename, spapr->rtas_blob, spapr->rtas_size) < 0) {
- hw_error("qemu: could not load LPAR rtas '%s'\n", filename);
+ error_report("Could not load LPAR rtas '%s'", filename);
exit(1);
}
if (spapr->rtas_size > RTAS_MAX_SIZE) {
- hw_error("RTAS too big ! 0x%zx bytes (max is 0x%x)\n",
- (size_t)spapr->rtas_size, RTAS_MAX_SIZE);
+ error_report("RTAS too big ! 0x%zx bytes (max is 0x%x)",
+ (size_t)spapr->rtas_size, RTAS_MAX_SIZE);
exit(1);
}
g_free(filename);
@@ -1641,12 +1648,12 @@ static void ppc_spapr_init(MachineState *machine)
}
filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name);
if (!filename) {
- hw_error("Could not find LPAR rtas '%s'\n", bios_name);
+ error_report("Could not find LPAR firmware '%s'", bios_name);
exit(1);
}
fw_size = load_image_targphys(filename, 0, FW_MAX_SIZE);
- if (fw_size < 0) {
- hw_error("qemu: could not load LPAR rtas '%s'\n", filename);
+ if (fw_size <= 0) {
+ error_report("Could not load LPAR firmware '%s'", filename);
exit(1);
}
g_free(filename);
@@ -1660,9 +1667,14 @@ static void ppc_spapr_init(MachineState *machine)
/* Prepare the device tree */
spapr->fdt_skel = spapr_create_fdt_skel(initrd_base, initrd_size,
kernel_size, kernel_le,
- kernel_cmdline, spapr->epow_irq);
+ kernel_cmdline,
+ spapr->check_exception_irq);
assert(spapr->fdt_skel != NULL);
+ /* used by RTAS */
+ QTAILQ_INIT(&spapr->ccs_list);
+ qemu_register_reset(spapr_ccs_reset_hook, spapr);
+
qemu_register_boot_set(spapr_boot_set, spapr);
}
@@ -1794,6 +1806,7 @@ static void spapr_machine_class_init(ObjectClass *oc, void *data)
mc->max_cpus = MAX_CPUS;
mc->no_parallel = 1;
mc->default_boot_order = "";
+ mc->default_ram_size = 512 * M_BYTE;
mc->kvm_type = spapr_kvm_type;
mc->has_dynamic_sysbus = true;
@@ -1816,7 +1829,12 @@ static const TypeInfo spapr_machine_info = {
};
#define SPAPR_COMPAT_2_3 \
- HW_COMPAT_2_3
+ HW_COMPAT_2_3 \
+ {\
+ .driver = "spapr-pci-host-bridge",\
+ .property = "dynamic-reconfiguration",\
+ .value = "off",\
+ },
#define SPAPR_COMPAT_2_2 \
SPAPR_COMPAT_2_3 \
@@ -1905,10 +1923,15 @@ static const TypeInfo spapr_machine_2_2_info = {
static void spapr_machine_2_3_class_init(ObjectClass *oc, void *data)
{
+ static GlobalProperty compat_props[] = {
+ SPAPR_COMPAT_2_3
+ { /* end of list */ }
+ };
MachineClass *mc = MACHINE_CLASS(oc);
mc->name = "pseries-2.3";
mc->desc = "pSeries Logical Partition (PAPR compliant) v2.3";
+ mc->compat_props = compat_props;
}
static const TypeInfo spapr_machine_2_3_info = {
diff --git a/hw/ppc/spapr_drc.c b/hw/ppc/spapr_drc.c
new file mode 100644
index 0000000000..ef985381cb
--- /dev/null
+++ b/hw/ppc/spapr_drc.c
@@ -0,0 +1,744 @@
+/*
+ * QEMU SPAPR Dynamic Reconfiguration Connector Implementation
+ *
+ * Copyright IBM Corp. 2014
+ *
+ * Authors:
+ * Michael Roth <mdroth@linux.vnet.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include "hw/ppc/spapr_drc.h"
+#include "qom/object.h"
+#include "hw/qdev.h"
+#include "qapi/visitor.h"
+#include "qemu/error-report.h"
+
+/* #define DEBUG_SPAPR_DRC */
+
+#ifdef DEBUG_SPAPR_DRC
+#define DPRINTF(fmt, ...) \
+ do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
+#define DPRINTFN(fmt, ...) \
+ do { DPRINTF(fmt, ## __VA_ARGS__); fprintf(stderr, "\n"); } while (0)
+#else
+#define DPRINTF(fmt, ...) \
+ do { } while (0)
+#define DPRINTFN(fmt, ...) \
+ do { } while (0)
+#endif
+
+#define DRC_CONTAINER_PATH "/dr-connector"
+#define DRC_INDEX_TYPE_SHIFT 28
+#define DRC_INDEX_ID_MASK (~(~0 << DRC_INDEX_TYPE_SHIFT))
+
+static sPAPRDRConnectorTypeShift get_type_shift(sPAPRDRConnectorType type)
+{
+ uint32_t shift = 0;
+
+ /* make sure this isn't SPAPR_DR_CONNECTOR_TYPE_ANY, or some
+ * other wonky value.
+ */
+ g_assert(is_power_of_2(type));
+
+ while (type != (1 << shift)) {
+ shift++;
+ }
+ return shift;
+}
+
+static uint32_t get_index(sPAPRDRConnector *drc)
+{
+ /* no set format for a drc index: it only needs to be globally
+ * unique. this is how we encode the DRC type on bare-metal
+ * however, so might as well do that here
+ */
+ return (get_type_shift(drc->type) << DRC_INDEX_TYPE_SHIFT) |
+ (drc->id & DRC_INDEX_ID_MASK);
+}
+
+static int set_isolation_state(sPAPRDRConnector *drc,
+ sPAPRDRIsolationState state)
+{
+ sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
+
+ DPRINTFN("drc: %x, set_isolation_state: %x", get_index(drc), state);
+
+ drc->isolation_state = state;
+
+ if (drc->isolation_state == SPAPR_DR_ISOLATION_STATE_ISOLATED) {
+ /* if we're awaiting release, but still in an unconfigured state,
+ * it's likely the guest is still in the process of configuring
+ * the device and is transitioning the devices to an ISOLATED
+ * state as a part of that process. so we only complete the
+ * removal when this transition happens for a device in a
+ * configured state, as suggested by the state diagram from
+ * PAPR+ 2.7, 13.4
+ */
+ if (drc->awaiting_release) {
+ if (drc->configured) {
+ DPRINTFN("finalizing device removal");
+ drck->detach(drc, DEVICE(drc->dev), drc->detach_cb,
+ drc->detach_cb_opaque, NULL);
+ } else {
+ DPRINTFN("deferring device removal on unconfigured device\n");
+ }
+ }
+ drc->configured = false;
+ }
+
+ return 0;
+}
+
+static int set_indicator_state(sPAPRDRConnector *drc,
+ sPAPRDRIndicatorState state)
+{
+ DPRINTFN("drc: %x, set_indicator_state: %x", get_index(drc), state);
+ drc->indicator_state = state;
+ return 0;
+}
+
+static int set_allocation_state(sPAPRDRConnector *drc,
+ sPAPRDRAllocationState state)
+{
+ sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
+
+ DPRINTFN("drc: %x, set_allocation_state: %x", get_index(drc), state);
+
+ if (drc->type != SPAPR_DR_CONNECTOR_TYPE_PCI) {
+ drc->allocation_state = state;
+ if (drc->awaiting_release &&
+ drc->allocation_state == SPAPR_DR_ALLOCATION_STATE_UNUSABLE) {
+ DPRINTFN("finalizing device removal");
+ drck->detach(drc, DEVICE(drc->dev), drc->detach_cb,
+ drc->detach_cb_opaque, NULL);
+ }
+ }
+ return 0;
+}
+
+static uint32_t get_type(sPAPRDRConnector *drc)
+{
+ return drc->type;
+}
+
+static const char *get_name(sPAPRDRConnector *drc)
+{
+ return drc->name;
+}
+
+static const void *get_fdt(sPAPRDRConnector *drc, int *fdt_start_offset)
+{
+ if (fdt_start_offset) {
+ *fdt_start_offset = drc->fdt_start_offset;
+ }
+ return drc->fdt;
+}
+
+static void set_configured(sPAPRDRConnector *drc)
+{
+ DPRINTFN("drc: %x, set_configured", get_index(drc));
+
+ if (drc->isolation_state != SPAPR_DR_ISOLATION_STATE_UNISOLATED) {
+ /* guest should be not configuring an isolated device */
+ DPRINTFN("drc: %x, set_configured: skipping isolated device",
+ get_index(drc));
+ return;
+ }
+ drc->configured = true;
+}
+
+/*
+ * dr-entity-sense sensor value
+ * returned via get-sensor-state RTAS calls
+ * as expected by state diagram in PAPR+ 2.7, 13.4
+ * based on the current allocation/indicator/power states
+ * for the DR connector.
+ */
+static sPAPRDREntitySense entity_sense(sPAPRDRConnector *drc)
+{
+ sPAPRDREntitySense state;
+
+ if (drc->dev) {
+ if (drc->type != SPAPR_DR_CONNECTOR_TYPE_PCI &&
+ drc->allocation_state == SPAPR_DR_ALLOCATION_STATE_UNUSABLE) {
+ /* for logical DR, we return a state of UNUSABLE
+ * iff the allocation state UNUSABLE.
+ * Otherwise, report the state as USABLE/PRESENT,
+ * as we would for PCI.
+ */
+ state = SPAPR_DR_ENTITY_SENSE_UNUSABLE;
+ } else {
+ /* this assumes all PCI devices are assigned to
+ * a 'live insertion' power domain, where QEMU
+ * manages power state automatically as opposed
+ * to the guest. present, non-PCI resources are
+ * unaffected by power state.
+ */
+ state = SPAPR_DR_ENTITY_SENSE_PRESENT;
+ }
+ } else {
+ if (drc->type == SPAPR_DR_CONNECTOR_TYPE_PCI) {
+ /* PCI devices, and only PCI devices, use EMPTY
+ * in cases where we'd otherwise use UNUSABLE
+ */
+ state = SPAPR_DR_ENTITY_SENSE_EMPTY;
+ } else {
+ state = SPAPR_DR_ENTITY_SENSE_UNUSABLE;
+ }
+ }
+
+ DPRINTFN("drc: %x, entity_sense: %x", get_index(drc), state);
+ return state;
+}
+
+static void prop_get_index(Object *obj, Visitor *v, void *opaque,
+ const char *name, Error **errp)
+{
+ sPAPRDRConnector *drc = SPAPR_DR_CONNECTOR(obj);
+ sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
+ uint32_t value = (uint32_t)drck->get_index(drc);
+ visit_type_uint32(v, &value, name, errp);
+}
+
+static void prop_get_type(Object *obj, Visitor *v, void *opaque,
+ const char *name, Error **errp)
+{
+ sPAPRDRConnector *drc = SPAPR_DR_CONNECTOR(obj);
+ sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
+ uint32_t value = (uint32_t)drck->get_type(drc);
+ visit_type_uint32(v, &value, name, errp);
+}
+
+static char *prop_get_name(Object *obj, Error **errp)
+{
+ sPAPRDRConnector *drc = SPAPR_DR_CONNECTOR(obj);
+ sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
+ return g_strdup(drck->get_name(drc));
+}
+
+static void prop_get_entity_sense(Object *obj, Visitor *v, void *opaque,
+ const char *name, Error **errp)
+{
+ sPAPRDRConnector *drc = SPAPR_DR_CONNECTOR(obj);
+ sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
+ uint32_t value = (uint32_t)drck->entity_sense(drc);
+ visit_type_uint32(v, &value, name, errp);
+}
+
+static void prop_get_fdt(Object *obj, Visitor *v, void *opaque,
+ const char *name, Error **errp)
+{
+ sPAPRDRConnector *drc = SPAPR_DR_CONNECTOR(obj);
+ int fdt_offset_next, fdt_offset, fdt_depth;
+ void *fdt;
+
+ if (!drc->fdt) {
+ return;
+ }
+
+ fdt = drc->fdt;
+ fdt_offset = drc->fdt_start_offset;
+ fdt_depth = 0;
+
+ do {
+ const char *name = NULL;
+ const struct fdt_property *prop = NULL;
+ int prop_len = 0, name_len = 0;
+ uint32_t tag;
+
+ tag = fdt_next_tag(fdt, fdt_offset, &fdt_offset_next);
+ switch (tag) {
+ case FDT_BEGIN_NODE:
+ fdt_depth++;
+ name = fdt_get_name(fdt, fdt_offset, &name_len);
+ visit_start_struct(v, NULL, NULL, name, 0, NULL);
+ break;
+ case FDT_END_NODE:
+ /* shouldn't ever see an FDT_END_NODE before FDT_BEGIN_NODE */
+ g_assert(fdt_depth > 0);
+ visit_end_struct(v, NULL);
+ fdt_depth--;
+ break;
+ case FDT_PROP: {
+ int i;
+ prop = fdt_get_property_by_offset(fdt, fdt_offset, &prop_len);
+ name = fdt_string(fdt, fdt32_to_cpu(prop->nameoff));
+ visit_start_list(v, name, NULL);
+ for (i = 0; i < prop_len; i++) {
+ visit_type_uint8(v, (uint8_t *)&prop->data[i], NULL, NULL);
+
+ }
+ visit_end_list(v, NULL);
+ break;
+ }
+ default:
+ error_setg(&error_abort, "device FDT in unexpected state: %d", tag);
+ }
+ fdt_offset = fdt_offset_next;
+ } while (fdt_depth != 0);
+}
+
+static void attach(sPAPRDRConnector *drc, DeviceState *d, void *fdt,
+ int fdt_start_offset, bool coldplug, Error **errp)
+{
+ DPRINTFN("drc: %x, attach", get_index(drc));
+
+ if (drc->isolation_state != SPAPR_DR_ISOLATION_STATE_ISOLATED) {
+ error_setg(errp, "an attached device is still awaiting release");
+ return;
+ }
+ if (drc->type == SPAPR_DR_CONNECTOR_TYPE_PCI) {
+ g_assert(drc->allocation_state == SPAPR_DR_ALLOCATION_STATE_USABLE);
+ }
+ g_assert(fdt || coldplug);
+
+ /* NOTE: setting initial isolation state to UNISOLATED means we can't
+ * detach unless guest has a userspace/kernel that moves this state
+ * back to ISOLATED in response to an unplug event, or this is done
+ * manually by the admin prior. if we force things while the guest
+ * may be accessing the device, we can easily crash the guest, so we
+ * we defer completion of removal in such cases to the reset() hook.
+ */
+ if (drc->type == SPAPR_DR_CONNECTOR_TYPE_PCI) {
+ drc->isolation_state = SPAPR_DR_ISOLATION_STATE_UNISOLATED;
+ }
+ drc->indicator_state = SPAPR_DR_INDICATOR_STATE_ACTIVE;
+
+ drc->dev = d;
+ drc->fdt = fdt;
+ drc->fdt_start_offset = fdt_start_offset;
+ drc->configured = false;
+
+ object_property_add_link(OBJECT(drc), "device",
+ object_get_typename(OBJECT(drc->dev)),
+ (Object **)(&drc->dev),
+ NULL, 0, NULL);
+}
+
+static void detach(sPAPRDRConnector *drc, DeviceState *d,
+ spapr_drc_detach_cb *detach_cb,
+ void *detach_cb_opaque, Error **errp)
+{
+ DPRINTFN("drc: %x, detach", get_index(drc));
+
+ drc->detach_cb = detach_cb;
+ drc->detach_cb_opaque = detach_cb_opaque;
+
+ if (drc->isolation_state != SPAPR_DR_ISOLATION_STATE_ISOLATED) {
+ DPRINTFN("awaiting transition to isolated state before removal");
+ drc->awaiting_release = true;
+ return;
+ }
+
+ if (drc->type != SPAPR_DR_CONNECTOR_TYPE_PCI &&
+ drc->allocation_state != SPAPR_DR_ALLOCATION_STATE_UNUSABLE) {
+ DPRINTFN("awaiting transition to unusable state before removal");
+ drc->awaiting_release = true;
+ return;
+ }
+
+ drc->indicator_state = SPAPR_DR_INDICATOR_STATE_INACTIVE;
+
+ if (drc->detach_cb) {
+ drc->detach_cb(drc->dev, drc->detach_cb_opaque);
+ }
+
+ drc->awaiting_release = false;
+ g_free(drc->fdt);
+ drc->fdt = NULL;
+ drc->fdt_start_offset = 0;
+ object_property_del(OBJECT(drc), "device", NULL);
+ drc->dev = NULL;
+ drc->detach_cb = NULL;
+ drc->detach_cb_opaque = NULL;
+}
+
+static bool release_pending(sPAPRDRConnector *drc)
+{
+ return drc->awaiting_release;
+}
+
+static void reset(DeviceState *d)
+{
+ sPAPRDRConnector *drc = SPAPR_DR_CONNECTOR(d);
+ sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
+
+ DPRINTFN("drc reset: %x", drck->get_index(drc));
+ /* immediately upon reset we can safely assume DRCs whose devices
+ * are pending removal can be safely removed, and that they will
+ * subsequently be left in an ISOLATED state. move the DRC to this
+ * state in these cases (which will in turn complete any pending
+ * device removals)
+ */
+ if (drc->awaiting_release) {
+ drck->set_isolation_state(drc, SPAPR_DR_ISOLATION_STATE_ISOLATED);
+ /* generally this should also finalize the removal, but if the device
+ * hasn't yet been configured we normally defer removal under the
+ * assumption that this transition is taking place as part of device
+ * configuration. so check if we're still waiting after this, and
+ * force removal if we are
+ */
+ if (drc->awaiting_release) {
+ drck->detach(drc, DEVICE(drc->dev), drc->detach_cb,
+ drc->detach_cb_opaque, NULL);
+ }
+
+ /* non-PCI devices may be awaiting a transition to UNUSABLE */
+ if (drc->type != SPAPR_DR_CONNECTOR_TYPE_PCI &&
+ drc->awaiting_release) {
+ drck->set_allocation_state(drc, SPAPR_DR_ALLOCATION_STATE_UNUSABLE);
+ }
+ }
+}
+
+static void realize(DeviceState *d, Error **errp)
+{
+ sPAPRDRConnector *drc = SPAPR_DR_CONNECTOR(d);
+ sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
+ Object *root_container;
+ char link_name[256];
+ gchar *child_name;
+ Error *err = NULL;
+
+ DPRINTFN("drc realize: %x", drck->get_index(drc));
+ /* NOTE: we do this as part of realize/unrealize due to the fact
+ * that the guest will communicate with the DRC via RTAS calls
+ * referencing the global DRC index. By unlinking the DRC
+ * from DRC_CONTAINER_PATH/<drc_index> we effectively make it
+ * inaccessible by the guest, since lookups rely on this path
+ * existing in the composition tree
+ */
+ root_container = container_get(object_get_root(), DRC_CONTAINER_PATH);
+ snprintf(link_name, sizeof(link_name), "%x", drck->get_index(drc));
+ child_name = object_get_canonical_path_component(OBJECT(drc));
+ DPRINTFN("drc child name: %s", child_name);
+ object_property_add_alias(root_container, link_name,
+ drc->owner, child_name, &err);
+ if (err) {
+ error_report("%s", error_get_pretty(err));
+ error_free(err);
+ object_unref(OBJECT(drc));
+ }
+ DPRINTFN("drc realize complete");
+}
+
+static void unrealize(DeviceState *d, Error **errp)
+{
+ sPAPRDRConnector *drc = SPAPR_DR_CONNECTOR(d);
+ sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
+ Object *root_container;
+ char name[256];
+ Error *err = NULL;
+
+ DPRINTFN("drc unrealize: %x", drck->get_index(drc));
+ root_container = container_get(object_get_root(), DRC_CONTAINER_PATH);
+ snprintf(name, sizeof(name), "%x", drck->get_index(drc));
+ object_property_del(root_container, name, &err);
+ if (err) {
+ error_report("%s", error_get_pretty(err));
+ error_free(err);
+ object_unref(OBJECT(drc));
+ }
+}
+
+sPAPRDRConnector *spapr_dr_connector_new(Object *owner,
+ sPAPRDRConnectorType type,
+ uint32_t id)
+{
+ sPAPRDRConnector *drc =
+ SPAPR_DR_CONNECTOR(object_new(TYPE_SPAPR_DR_CONNECTOR));
+
+ g_assert(type);
+
+ drc->type = type;
+ drc->id = id;
+ drc->owner = owner;
+ object_property_add_child(owner, "dr-connector[*]", OBJECT(drc), NULL);
+ object_property_set_bool(OBJECT(drc), true, "realized", NULL);
+
+ /* human-readable name for a DRC to encode into the DT
+ * description. this is mainly only used within a guest in place
+ * of the unique DRC index.
+ *
+ * in the case of VIO/PCI devices, it corresponds to a
+ * "location code" that maps a logical device/function (DRC index)
+ * to a physical (or virtual in the case of VIO) location in the
+ * system by chaining together the "location label" for each
+ * encapsulating component.
+ *
+ * since this is more to do with diagnosing physical hardware
+ * issues than guest compatibility, we choose location codes/DRC
+ * names that adhere to the documented format, but avoid encoding
+ * the entire topology information into the label/code, instead
+ * just using the location codes based on the labels for the
+ * endpoints (VIO/PCI adaptor connectors), which is basically
+ * just "C" followed by an integer ID.
+ *
+ * DRC names as documented by PAPR+ v2.7, 13.5.2.4
+ * location codes as documented by PAPR+ v2.7, 12.3.1.5
+ */
+ switch (drc->type) {
+ case SPAPR_DR_CONNECTOR_TYPE_CPU:
+ drc->name = g_strdup_printf("CPU %d", id);
+ break;
+ case SPAPR_DR_CONNECTOR_TYPE_PHB:
+ drc->name = g_strdup_printf("PHB %d", id);
+ break;
+ case SPAPR_DR_CONNECTOR_TYPE_VIO:
+ case SPAPR_DR_CONNECTOR_TYPE_PCI:
+ drc->name = g_strdup_printf("C%d", id);
+ break;
+ case SPAPR_DR_CONNECTOR_TYPE_LMB:
+ drc->name = g_strdup_printf("LMB %d", id);
+ break;
+ default:
+ g_assert(false);
+ }
+
+ /* PCI slot always start in a USABLE state, and stay there */
+ if (drc->type == SPAPR_DR_CONNECTOR_TYPE_PCI) {
+ drc->allocation_state = SPAPR_DR_ALLOCATION_STATE_USABLE;
+ }
+
+ return drc;
+}
+
+static void spapr_dr_connector_instance_init(Object *obj)
+{
+ sPAPRDRConnector *drc = SPAPR_DR_CONNECTOR(obj);
+
+ object_property_add_uint32_ptr(obj, "isolation-state",
+ &drc->isolation_state, NULL);
+ object_property_add_uint32_ptr(obj, "indicator-state",
+ &drc->indicator_state, NULL);
+ object_property_add_uint32_ptr(obj, "allocation-state",
+ &drc->allocation_state, NULL);
+ object_property_add_uint32_ptr(obj, "id", &drc->id, NULL);
+ object_property_add(obj, "index", "uint32", prop_get_index,
+ NULL, NULL, NULL, NULL);
+ object_property_add(obj, "connector_type", "uint32", prop_get_type,
+ NULL, NULL, NULL, NULL);
+ object_property_add_str(obj, "name", prop_get_name, NULL, NULL);
+ object_property_add(obj, "entity-sense", "uint32", prop_get_entity_sense,
+ NULL, NULL, NULL, NULL);
+ object_property_add(obj, "fdt", "struct", prop_get_fdt,
+ NULL, NULL, NULL, NULL);
+}
+
+static void spapr_dr_connector_class_init(ObjectClass *k, void *data)
+{
+ DeviceClass *dk = DEVICE_CLASS(k);
+ sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_CLASS(k);
+
+ dk->reset = reset;
+ dk->realize = realize;
+ dk->unrealize = unrealize;
+ drck->set_isolation_state = set_isolation_state;
+ drck->set_indicator_state = set_indicator_state;
+ drck->set_allocation_state = set_allocation_state;
+ drck->get_index = get_index;
+ drck->get_type = get_type;
+ drck->get_name = get_name;
+ drck->get_fdt = get_fdt;
+ drck->set_configured = set_configured;
+ drck->entity_sense = entity_sense;
+ drck->attach = attach;
+ drck->detach = detach;
+ drck->release_pending = release_pending;
+}
+
+static const TypeInfo spapr_dr_connector_info = {
+ .name = TYPE_SPAPR_DR_CONNECTOR,
+ .parent = TYPE_DEVICE,
+ .instance_size = sizeof(sPAPRDRConnector),
+ .instance_init = spapr_dr_connector_instance_init,
+ .class_size = sizeof(sPAPRDRConnectorClass),
+ .class_init = spapr_dr_connector_class_init,
+};
+
+static void spapr_drc_register_types(void)
+{
+ type_register_static(&spapr_dr_connector_info);
+}
+
+type_init(spapr_drc_register_types)
+
+/* helper functions for external users */
+
+sPAPRDRConnector *spapr_dr_connector_by_index(uint32_t index)
+{
+ Object *obj;
+ char name[256];
+
+ snprintf(name, sizeof(name), "%s/%x", DRC_CONTAINER_PATH, index);
+ obj = object_resolve_path(name, NULL);
+
+ return !obj ? NULL : SPAPR_DR_CONNECTOR(obj);
+}
+
+sPAPRDRConnector *spapr_dr_connector_by_id(sPAPRDRConnectorType type,
+ uint32_t id)
+{
+ return spapr_dr_connector_by_index(
+ (get_type_shift(type) << DRC_INDEX_TYPE_SHIFT) |
+ (id & DRC_INDEX_ID_MASK));
+}
+
+/* generate a string the describes the DRC to encode into the
+ * device tree.
+ *
+ * as documented by PAPR+ v2.7, 13.5.2.6 and C.6.1
+ */
+static const char *spapr_drc_get_type_str(sPAPRDRConnectorType type)
+{
+ switch (type) {
+ case SPAPR_DR_CONNECTOR_TYPE_CPU:
+ return "CPU";
+ case SPAPR_DR_CONNECTOR_TYPE_PHB:
+ return "PHB";
+ case SPAPR_DR_CONNECTOR_TYPE_VIO:
+ return "SLOT";
+ case SPAPR_DR_CONNECTOR_TYPE_PCI:
+ return "28";
+ case SPAPR_DR_CONNECTOR_TYPE_LMB:
+ return "MEM";
+ default:
+ g_assert(false);
+ }
+
+ return NULL;
+}
+
+/**
+ * spapr_drc_populate_dt
+ *
+ * @fdt: libfdt device tree
+ * @path: path in the DT to generate properties
+ * @owner: parent Object/DeviceState for which to generate DRC
+ * descriptions for
+ * @drc_type_mask: mask of sPAPRDRConnectorType values corresponding
+ * to the types of DRCs to generate entries for
+ *
+ * generate OF properties to describe DRC topology/indices to guests
+ *
+ * as documented in PAPR+ v2.1, 13.5.2
+ */
+int spapr_drc_populate_dt(void *fdt, int fdt_offset, Object *owner,
+ uint32_t drc_type_mask)
+{
+ Object *root_container;
+ ObjectProperty *prop;
+ uint32_t drc_count = 0;
+ GArray *drc_indexes, *drc_power_domains;
+ GString *drc_names, *drc_types;
+ int ret;
+
+ /* the first entry of each properties is a 32-bit integer encoding
+ * the number of elements in the array. we won't know this until
+ * we complete the iteration through all the matching DRCs, but
+ * reserve the space now and set the offsets accordingly so we
+ * can fill them in later.
+ */
+ drc_indexes = g_array_new(false, true, sizeof(uint32_t));
+ drc_indexes = g_array_set_size(drc_indexes, 1);
+ drc_power_domains = g_array_new(false, true, sizeof(uint32_t));
+ drc_power_domains = g_array_set_size(drc_power_domains, 1);
+ drc_names = g_string_set_size(g_string_new(NULL), sizeof(uint32_t));
+ drc_types = g_string_set_size(g_string_new(NULL), sizeof(uint32_t));
+
+ /* aliases for all DRConnector objects will be rooted in QOM
+ * composition tree at DRC_CONTAINER_PATH
+ */
+ root_container = container_get(object_get_root(), DRC_CONTAINER_PATH);
+
+ QTAILQ_FOREACH(prop, &root_container->properties, node) {
+ Object *obj;
+ sPAPRDRConnector *drc;
+ sPAPRDRConnectorClass *drck;
+ uint32_t drc_index, drc_power_domain;
+
+ if (!strstart(prop->type, "link<", NULL)) {
+ continue;
+ }
+
+ obj = object_property_get_link(root_container, prop->name, NULL);
+ drc = SPAPR_DR_CONNECTOR(obj);
+ drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
+
+ if (owner && (drc->owner != owner)) {
+ continue;
+ }
+
+ if ((drc->type & drc_type_mask) == 0) {
+ continue;
+ }
+
+ drc_count++;
+
+ /* ibm,drc-indexes */
+ drc_index = cpu_to_be32(drck->get_index(drc));
+ g_array_append_val(drc_indexes, drc_index);
+
+ /* ibm,drc-power-domains */
+ drc_power_domain = cpu_to_be32(-1);
+ g_array_append_val(drc_power_domains, drc_power_domain);
+
+ /* ibm,drc-names */
+ drc_names = g_string_append(drc_names, drck->get_name(drc));
+ drc_names = g_string_insert_len(drc_names, -1, "\0", 1);
+
+ /* ibm,drc-types */
+ drc_types = g_string_append(drc_types,
+ spapr_drc_get_type_str(drc->type));
+ drc_types = g_string_insert_len(drc_types, -1, "\0", 1);
+ }
+
+ /* now write the drc count into the space we reserved at the
+ * beginning of the arrays previously
+ */
+ *(uint32_t *)drc_indexes->data = cpu_to_be32(drc_count);
+ *(uint32_t *)drc_power_domains->data = cpu_to_be32(drc_count);
+ *(uint32_t *)drc_names->str = cpu_to_be32(drc_count);
+ *(uint32_t *)drc_types->str = cpu_to_be32(drc_count);
+
+ ret = fdt_setprop(fdt, fdt_offset, "ibm,drc-indexes",
+ drc_indexes->data,
+ drc_indexes->len * sizeof(uint32_t));
+ if (ret) {
+ fprintf(stderr, "Couldn't create ibm,drc-indexes property\n");
+ goto out;
+ }
+
+ ret = fdt_setprop(fdt, fdt_offset, "ibm,drc-power-domains",
+ drc_power_domains->data,
+ drc_power_domains->len * sizeof(uint32_t));
+ if (ret) {
+ fprintf(stderr, "Couldn't finalize ibm,drc-power-domains property\n");
+ goto out;
+ }
+
+ ret = fdt_setprop(fdt, fdt_offset, "ibm,drc-names",
+ drc_names->str, drc_names->len);
+ if (ret) {
+ fprintf(stderr, "Couldn't finalize ibm,drc-names property\n");
+ goto out;
+ }
+
+ ret = fdt_setprop(fdt, fdt_offset, "ibm,drc-types",
+ drc_types->str, drc_types->len);
+ if (ret) {
+ fprintf(stderr, "Couldn't finalize ibm,drc-types property\n");
+ goto out;
+ }
+
+out:
+ g_array_free(drc_indexes, true);
+ g_array_free(drc_power_domains, true);
+ g_string_free(drc_names, true);
+ g_string_free(drc_types, true);
+
+ return ret;
+}
diff --git a/hw/ppc/spapr_events.c b/hw/ppc/spapr_events.c
index 283e96bca1..fda9e3590a 100644
--- a/hw/ppc/spapr_events.c
+++ b/hw/ppc/spapr_events.c
@@ -32,6 +32,9 @@
#include "hw/ppc/spapr.h"
#include "hw/ppc/spapr_vio.h"
+#include "hw/pci/pci.h"
+#include "hw/pci-host/spapr.h"
+#include "hw/ppc/spapr_drc.h"
#include <libfdt.h>
@@ -77,6 +80,7 @@ struct rtas_error_log {
#define RTAS_LOG_TYPE_ECC_UNCORR 0x00000009
#define RTAS_LOG_TYPE_ECC_CORR 0x0000000a
#define RTAS_LOG_TYPE_EPOW 0x00000040
+#define RTAS_LOG_TYPE_HOTPLUG 0x000000e5
uint32_t extended_length;
} QEMU_PACKED;
@@ -166,6 +170,38 @@ struct epow_log_full {
struct rtas_event_log_v6_epow epow;
} QEMU_PACKED;
+struct rtas_event_log_v6_hp {
+#define RTAS_LOG_V6_SECTION_ID_HOTPLUG 0x4850 /* HP */
+ struct rtas_event_log_v6_section_header hdr;
+ uint8_t hotplug_type;
+#define RTAS_LOG_V6_HP_TYPE_CPU 1
+#define RTAS_LOG_V6_HP_TYPE_MEMORY 2
+#define RTAS_LOG_V6_HP_TYPE_SLOT 3
+#define RTAS_LOG_V6_HP_TYPE_PHB 4
+#define RTAS_LOG_V6_HP_TYPE_PCI 5
+ uint8_t hotplug_action;
+#define RTAS_LOG_V6_HP_ACTION_ADD 1
+#define RTAS_LOG_V6_HP_ACTION_REMOVE 2
+ uint8_t hotplug_identifier;
+#define RTAS_LOG_V6_HP_ID_DRC_NAME 1
+#define RTAS_LOG_V6_HP_ID_DRC_INDEX 2
+#define RTAS_LOG_V6_HP_ID_DRC_COUNT 3
+ uint8_t reserved;
+ union {
+ uint32_t index;
+ uint32_t count;
+ char name[1];
+ } drc;
+} QEMU_PACKED;
+
+struct hp_log_full {
+ struct rtas_error_log hdr;
+ struct rtas_event_log_v6 v6hdr;
+ struct rtas_event_log_v6_maina maina;
+ struct rtas_event_log_v6_mainb mainb;
+ struct rtas_event_log_v6_hp hp;
+} QEMU_PACKED;
+
#define EVENT_MASK_INTERNAL_ERRORS 0x80000000
#define EVENT_MASK_EPOW 0x40000000
#define EVENT_MASK_HOTPLUG 0x10000000
@@ -181,67 +217,105 @@ struct epow_log_full {
} \
} while (0)
-void spapr_events_fdt_skel(void *fdt, uint32_t epow_irq)
+void spapr_events_fdt_skel(void *fdt, uint32_t check_exception_irq)
{
- uint32_t epow_irq_ranges[] = {cpu_to_be32(epow_irq), cpu_to_be32(1)};
- uint32_t epow_interrupts[] = {cpu_to_be32(epow_irq), 0};
+ uint32_t irq_ranges[] = {cpu_to_be32(check_exception_irq), cpu_to_be32(1)};
+ uint32_t interrupts[] = {cpu_to_be32(check_exception_irq), 0};
_FDT((fdt_begin_node(fdt, "event-sources")));
_FDT((fdt_property(fdt, "interrupt-controller", NULL, 0)));
_FDT((fdt_property_cell(fdt, "#interrupt-cells", 2)));
_FDT((fdt_property(fdt, "interrupt-ranges",
- epow_irq_ranges, sizeof(epow_irq_ranges))));
+ irq_ranges, sizeof(irq_ranges))));
_FDT((fdt_begin_node(fdt, "epow-events")));
- _FDT((fdt_property(fdt, "interrupts",
- epow_interrupts, sizeof(epow_interrupts))));
+ _FDT((fdt_property(fdt, "interrupts", interrupts, sizeof(interrupts))));
_FDT((fdt_end_node(fdt)));
_FDT((fdt_end_node(fdt)));
}
-static struct epow_log_full *pending_epow;
-static uint32_t next_plid;
+static void rtas_event_log_queue(int log_type, void *data, bool exception)
+{
+ sPAPREventLogEntry *entry = g_new(sPAPREventLogEntry, 1);
-static void spapr_powerdown_req(Notifier *n, void *opaque)
+ g_assert(data);
+ entry->log_type = log_type;
+ entry->exception = exception;
+ entry->data = data;
+ QTAILQ_INSERT_TAIL(&spapr->pending_events, entry, next);
+}
+
+static sPAPREventLogEntry *rtas_event_log_dequeue(uint32_t event_mask,
+ bool exception)
{
- sPAPREnvironment *spapr = container_of(n, sPAPREnvironment, epow_notifier);
- struct rtas_error_log *hdr;
- struct rtas_event_log_v6 *v6hdr;
- struct rtas_event_log_v6_maina *maina;
- struct rtas_event_log_v6_mainb *mainb;
- struct rtas_event_log_v6_epow *epow;
- struct tm tm;
- int year;
+ sPAPREventLogEntry *entry = NULL;
- if (pending_epow) {
- /* For now, we just throw away earlier events if two come
- * along before any are consumed. This is sufficient for our
- * powerdown messages, but we'll need more if we do more
- * general error/event logging */
- g_free(pending_epow);
+ /* we only queue EPOW events atm. */
+ if ((event_mask & EVENT_MASK_EPOW) == 0) {
+ return NULL;
}
- pending_epow = g_malloc0(sizeof(*pending_epow));
- hdr = &pending_epow->hdr;
- v6hdr = &pending_epow->v6hdr;
- maina = &pending_epow->maina;
- mainb = &pending_epow->mainb;
- epow = &pending_epow->epow;
- hdr->summary = cpu_to_be32(RTAS_LOG_VERSION_6
- | RTAS_LOG_SEVERITY_EVENT
- | RTAS_LOG_DISPOSITION_NOT_RECOVERED
- | RTAS_LOG_OPTIONAL_PART_PRESENT
- | RTAS_LOG_TYPE_EPOW);
- hdr->extended_length = cpu_to_be32(sizeof(*pending_epow)
- - sizeof(pending_epow->hdr));
+ QTAILQ_FOREACH(entry, &spapr->pending_events, next) {
+ if (entry->exception != exception) {
+ continue;
+ }
+
+ /* EPOW and hotplug events are surfaced in the same manner */
+ if (entry->log_type == RTAS_LOG_TYPE_EPOW ||
+ entry->log_type == RTAS_LOG_TYPE_HOTPLUG) {
+ break;
+ }
+ }
+
+ if (entry) {
+ QTAILQ_REMOVE(&spapr->pending_events, entry, next);
+ }
+ return entry;
+}
+
+static bool rtas_event_log_contains(uint32_t event_mask, bool exception)
+{
+ sPAPREventLogEntry *entry = NULL;
+
+ /* we only queue EPOW events atm. */
+ if ((event_mask & EVENT_MASK_EPOW) == 0) {
+ return false;
+ }
+
+ QTAILQ_FOREACH(entry, &spapr->pending_events, next) {
+ if (entry->exception != exception) {
+ continue;
+ }
+
+ /* EPOW and hotplug events are surfaced in the same manner */
+ if (entry->log_type == RTAS_LOG_TYPE_EPOW ||
+ entry->log_type == RTAS_LOG_TYPE_HOTPLUG) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static uint32_t next_plid;
+
+static void spapr_init_v6hdr(struct rtas_event_log_v6 *v6hdr)
+{
v6hdr->b0 = RTAS_LOG_V6_B0_VALID | RTAS_LOG_V6_B0_NEW_LOG
| RTAS_LOG_V6_B0_BIGENDIAN;
v6hdr->b2 = RTAS_LOG_V6_B2_POWERPC_FORMAT
| RTAS_LOG_V6_B2_LOG_FORMAT_PLATFORM_EVENT;
v6hdr->company = cpu_to_be32(RTAS_LOG_V6_COMPANY_IBM);
+}
+
+static void spapr_init_maina(struct rtas_event_log_v6_maina *maina,
+ int section_count)
+{
+ struct tm tm;
+ int year;
maina->hdr.section_id = cpu_to_be16(RTAS_LOG_V6_SECTION_ID_MAINA);
maina->hdr.section_length = cpu_to_be16(sizeof(*maina));
@@ -256,8 +330,37 @@ static void spapr_powerdown_req(Notifier *n, void *opaque)
| (to_bcd(tm.tm_min) << 16)
| (to_bcd(tm.tm_sec) << 8));
maina->creator_id = 'H'; /* Hypervisor */
- maina->section_count = 3; /* Main-A, Main-B and EPOW */
+ maina->section_count = section_count;
maina->plid = next_plid++;
+}
+
+static void spapr_powerdown_req(Notifier *n, void *opaque)
+{
+ sPAPREnvironment *spapr = container_of(n, sPAPREnvironment, epow_notifier);
+ struct rtas_error_log *hdr;
+ struct rtas_event_log_v6 *v6hdr;
+ struct rtas_event_log_v6_maina *maina;
+ struct rtas_event_log_v6_mainb *mainb;
+ struct rtas_event_log_v6_epow *epow;
+ struct epow_log_full *new_epow;
+
+ new_epow = g_malloc0(sizeof(*new_epow));
+ hdr = &new_epow->hdr;
+ v6hdr = &new_epow->v6hdr;
+ maina = &new_epow->maina;
+ mainb = &new_epow->mainb;
+ epow = &new_epow->epow;
+
+ hdr->summary = cpu_to_be32(RTAS_LOG_VERSION_6
+ | RTAS_LOG_SEVERITY_EVENT
+ | RTAS_LOG_DISPOSITION_NOT_RECOVERED
+ | RTAS_LOG_OPTIONAL_PART_PRESENT
+ | RTAS_LOG_TYPE_EPOW);
+ hdr->extended_length = cpu_to_be32(sizeof(*new_epow)
+ - sizeof(new_epow->hdr));
+
+ spapr_init_v6hdr(v6hdr);
+ spapr_init_maina(maina, 3 /* Main-A, Main-B and EPOW */);
mainb->hdr.section_id = cpu_to_be16(RTAS_LOG_V6_SECTION_ID_MAINB);
mainb->hdr.section_length = cpu_to_be16(sizeof(*mainb));
@@ -274,7 +377,80 @@ static void spapr_powerdown_req(Notifier *n, void *opaque)
epow->event_modifier = RTAS_LOG_V6_EPOW_MODIFIER_NORMAL;
epow->extended_modifier = RTAS_LOG_V6_EPOW_XMODIFIER_PARTITION_SPECIFIC;
- qemu_irq_pulse(xics_get_qirq(spapr->icp, spapr->epow_irq));
+ rtas_event_log_queue(RTAS_LOG_TYPE_EPOW, new_epow, true);
+
+ qemu_irq_pulse(xics_get_qirq(spapr->icp, spapr->check_exception_irq));
+}
+
+static void spapr_hotplug_req_event(sPAPRDRConnector *drc, uint8_t hp_action)
+{
+ struct hp_log_full *new_hp;
+ struct rtas_error_log *hdr;
+ struct rtas_event_log_v6 *v6hdr;
+ struct rtas_event_log_v6_maina *maina;
+ struct rtas_event_log_v6_mainb *mainb;
+ struct rtas_event_log_v6_hp *hp;
+ sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
+ sPAPRDRConnectorType drc_type = drck->get_type(drc);
+
+ new_hp = g_malloc0(sizeof(struct hp_log_full));
+ hdr = &new_hp->hdr;
+ v6hdr = &new_hp->v6hdr;
+ maina = &new_hp->maina;
+ mainb = &new_hp->mainb;
+ hp = &new_hp->hp;
+
+ hdr->summary = cpu_to_be32(RTAS_LOG_VERSION_6
+ | RTAS_LOG_SEVERITY_EVENT
+ | RTAS_LOG_DISPOSITION_NOT_RECOVERED
+ | RTAS_LOG_OPTIONAL_PART_PRESENT
+ | RTAS_LOG_INITIATOR_HOTPLUG
+ | RTAS_LOG_TYPE_HOTPLUG);
+ hdr->extended_length = cpu_to_be32(sizeof(*new_hp)
+ - sizeof(new_hp->hdr));
+
+ spapr_init_v6hdr(v6hdr);
+ spapr_init_maina(maina, 3 /* Main-A, Main-B, HP */);
+
+ mainb->hdr.section_id = cpu_to_be16(RTAS_LOG_V6_SECTION_ID_MAINB);
+ mainb->hdr.section_length = cpu_to_be16(sizeof(*mainb));
+ mainb->subsystem_id = 0x80; /* External environment */
+ mainb->event_severity = 0x00; /* Informational / non-error */
+ mainb->event_subtype = 0x00; /* Normal shutdown */
+
+ hp->hdr.section_id = cpu_to_be16(RTAS_LOG_V6_SECTION_ID_HOTPLUG);
+ hp->hdr.section_length = cpu_to_be16(sizeof(*hp));
+ hp->hdr.section_version = 1; /* includes extended modifier */
+ hp->hotplug_action = hp_action;
+
+
+ switch (drc_type) {
+ case SPAPR_DR_CONNECTOR_TYPE_PCI:
+ hp->drc.index = cpu_to_be32(drck->get_index(drc));
+ hp->hotplug_identifier = RTAS_LOG_V6_HP_ID_DRC_INDEX;
+ hp->hotplug_type = RTAS_LOG_V6_HP_TYPE_PCI;
+ break;
+ default:
+ /* we shouldn't be signaling hotplug events for resources
+ * that don't support them
+ */
+ g_assert(false);
+ return;
+ }
+
+ rtas_event_log_queue(RTAS_LOG_TYPE_HOTPLUG, new_hp, true);
+
+ qemu_irq_pulse(xics_get_qirq(spapr->icp, spapr->check_exception_irq));
+}
+
+void spapr_hotplug_req_add_event(sPAPRDRConnector *drc)
+{
+ spapr_hotplug_req_event(drc, RTAS_LOG_V6_HP_ACTION_ADD);
+}
+
+void spapr_hotplug_req_remove_event(sPAPRDRConnector *drc)
+{
+ spapr_hotplug_req_event(drc, RTAS_LOG_V6_HP_ACTION_REMOVE);
}
static void check_exception(PowerPCCPU *cpu, sPAPREnvironment *spapr,
@@ -282,8 +458,10 @@ static void check_exception(PowerPCCPU *cpu, sPAPREnvironment *spapr,
target_ulong args,
uint32_t nret, target_ulong rets)
{
- uint32_t mask, buf, len;
+ uint32_t mask, buf, len, event_len;
uint64_t xinfo;
+ sPAPREventLogEntry *event;
+ struct rtas_error_log *hdr;
if ((nargs < 6) || (nargs > 7) || nret != 1) {
rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
@@ -298,25 +476,85 @@ static void check_exception(PowerPCCPU *cpu, sPAPREnvironment *spapr,
xinfo |= (uint64_t)rtas_ld(args, 6) << 32;
}
- if ((mask & EVENT_MASK_EPOW) && pending_epow) {
- if (sizeof(*pending_epow) < len) {
- len = sizeof(*pending_epow);
- }
+ event = rtas_event_log_dequeue(mask, true);
+ if (!event) {
+ goto out_no_events;
+ }
+
+ hdr = event->data;
+ event_len = be32_to_cpu(hdr->extended_length) + sizeof(*hdr);
+
+ if (event_len < len) {
+ len = event_len;
+ }
+
+ cpu_physical_memory_write(buf, event->data, len);
+ rtas_st(rets, 0, RTAS_OUT_SUCCESS);
+ g_free(event->data);
+ g_free(event);
+
+ /* according to PAPR+, the IRQ must be left asserted, or re-asserted, if
+ * there are still pending events to be fetched via check-exception. We
+ * do the latter here, since our code relies on edge-triggered
+ * interrupts.
+ */
+ if (rtas_event_log_contains(mask, true)) {
+ qemu_irq_pulse(xics_get_qirq(spapr->icp, spapr->check_exception_irq));
+ }
- cpu_physical_memory_write(buf, pending_epow, len);
- g_free(pending_epow);
- pending_epow = NULL;
- rtas_st(rets, 0, RTAS_OUT_SUCCESS);
- } else {
- rtas_st(rets, 0, RTAS_OUT_NO_ERRORS_FOUND);
+ return;
+
+out_no_events:
+ rtas_st(rets, 0, RTAS_OUT_NO_ERRORS_FOUND);
+}
+
+static void event_scan(PowerPCCPU *cpu, sPAPREnvironment *spapr,
+ uint32_t token, uint32_t nargs,
+ target_ulong args,
+ uint32_t nret, target_ulong rets)
+{
+ uint32_t mask, buf, len, event_len;
+ sPAPREventLogEntry *event;
+ struct rtas_error_log *hdr;
+
+ if (nargs != 4 || nret != 1) {
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
+ return;
}
+
+ mask = rtas_ld(args, 0);
+ buf = rtas_ld(args, 2);
+ len = rtas_ld(args, 3);
+
+ event = rtas_event_log_dequeue(mask, false);
+ if (!event) {
+ goto out_no_events;
+ }
+
+ hdr = event->data;
+ event_len = be32_to_cpu(hdr->extended_length) + sizeof(*hdr);
+
+ if (event_len < len) {
+ len = event_len;
+ }
+
+ cpu_physical_memory_write(buf, event->data, len);
+ rtas_st(rets, 0, RTAS_OUT_SUCCESS);
+ g_free(event->data);
+ g_free(event);
+ return;
+
+out_no_events:
+ rtas_st(rets, 0, RTAS_OUT_NO_ERRORS_FOUND);
}
void spapr_events_init(sPAPREnvironment *spapr)
{
- spapr->epow_irq = xics_alloc(spapr->icp, 0, 0, false);
+ QTAILQ_INIT(&spapr->pending_events);
+ spapr->check_exception_irq = xics_alloc(spapr->icp, 0, 0, false);
spapr->epow_notifier.notify = spapr_powerdown_req;
qemu_register_powerdown_notifier(&spapr->epow_notifier);
spapr_rtas_register(RTAS_CHECK_EXCEPTION, "check-exception",
check_exception);
+ spapr_rtas_register(RTAS_EVENT_SCAN, "event-scan", event_scan);
}
diff --git a/hw/ppc/spapr_iommu.c b/hw/ppc/spapr_iommu.c
index f3990fdc32..8cd9dba9ac 100644
--- a/hw/ppc/spapr_iommu.c
+++ b/hw/ppc/spapr_iommu.c
@@ -41,7 +41,7 @@ enum sPAPRTCEAccess {
static QLIST_HEAD(spapr_tce_tables, sPAPRTCETable) spapr_tce_tables;
-static sPAPRTCETable *spapr_tce_find_by_liobn(uint32_t liobn)
+sPAPRTCETable *spapr_tce_find_by_liobn(target_ulong liobn)
{
sPAPRTCETable *tcet;
@@ -52,7 +52,7 @@ static sPAPRTCETable *spapr_tce_find_by_liobn(uint32_t liobn)
}
QLIST_FOREACH(tcet, &spapr_tce_tables, list) {
- if (tcet->liobn == liobn) {
+ if (tcet->liobn == (uint32_t)liobn) {
return tcet;
}
}
@@ -126,11 +126,11 @@ static MemoryRegionIOMMUOps spapr_iommu_ops = {
static int spapr_tce_table_realize(DeviceState *dev)
{
sPAPRTCETable *tcet = SPAPR_TCE_TABLE(dev);
+ uint64_t window_size = (uint64_t)tcet->nb_table << tcet->page_shift;
- if (kvm_enabled()) {
+ if (kvm_enabled() && !(window_size >> 32)) {
tcet->table = kvmppc_create_spapr_tce(tcet->liobn,
- tcet->nb_table <<
- tcet->page_shift,
+ window_size,
&tcet->fd,
tcet->vfio_accel);
}
@@ -161,6 +161,7 @@ sPAPRTCETable *spapr_tce_new_table(DeviceState *owner, uint32_t liobn,
bool vfio_accel)
{
sPAPRTCETable *tcet;
+ char tmp[64];
if (spapr_tce_find_by_liobn(liobn)) {
fprintf(stderr, "Attempted to create TCE table with duplicate"
@@ -179,7 +180,8 @@ sPAPRTCETable *spapr_tce_new_table(DeviceState *owner, uint32_t liobn,
tcet->nb_table = nb_table;
tcet->vfio_accel = vfio_accel;
- object_property_add_child(OBJECT(owner), "tce-table", OBJECT(tcet), NULL);
+ snprintf(tmp, sizeof(tmp), "tce-table-%x", liobn);
+ object_property_add_child(OBJECT(owner), tmp, OBJECT(tcet), NULL);
object_property_set_bool(OBJECT(tcet), true, "realized", NULL);
@@ -247,7 +249,7 @@ static target_ulong h_put_tce_indirect(PowerPCCPU *cpu,
target_ulong ioba1 = ioba;
target_ulong tce_list = args[2];
target_ulong npages = args[3];
- target_ulong ret = H_PARAMETER;
+ target_ulong ret = H_PARAMETER, tce = 0;
sPAPRTCETable *tcet = spapr_tce_find_by_liobn(liobn);
CPUState *cs = CPU(cpu);
hwaddr page_mask, page_size;
@@ -267,7 +269,7 @@ static target_ulong h_put_tce_indirect(PowerPCCPU *cpu,
for (i = 0; i < npages; ++i, ioba += page_size) {
target_ulong off = (tce_list & ~SPAPR_TCE_RW) +
i * sizeof(target_ulong);
- target_ulong tce = ldq_phys(cs->as, off);
+ tce = ldq_be_phys(cs->as, off);
ret = put_tce_emu(tcet, ioba, tce);
if (ret) {
@@ -277,11 +279,11 @@ static target_ulong h_put_tce_indirect(PowerPCCPU *cpu,
/* Trace last successful or the first problematic entry */
i = i ? (i - 1) : 0;
- trace_spapr_iommu_indirect(liobn, ioba1, tce_list, i,
- ldq_phys(cs->as,
- tce_list + i * sizeof(target_ulong)),
- ret);
-
+ if (SPAPR_IS_PCI_LIOBN(liobn)) {
+ trace_spapr_iommu_pci_indirect(liobn, ioba1, tce_list, i, tce, ret);
+ } else {
+ trace_spapr_iommu_indirect(liobn, ioba1, tce_list, i, tce, ret);
+ }
return ret;
}
@@ -315,7 +317,11 @@ static target_ulong h_stuff_tce(PowerPCCPU *cpu, sPAPREnvironment *spapr,
break;
}
}
- trace_spapr_iommu_stuff(liobn, ioba, tce_value, npages, ret);
+ if (SPAPR_IS_PCI_LIOBN(liobn)) {
+ trace_spapr_iommu_pci_stuff(liobn, ioba, tce_value, npages, ret);
+ } else {
+ trace_spapr_iommu_stuff(liobn, ioba, tce_value, npages, ret);
+ }
return ret;
}
@@ -336,7 +342,11 @@ static target_ulong h_put_tce(PowerPCCPU *cpu, sPAPREnvironment *spapr,
ret = put_tce_emu(tcet, ioba, tce);
}
- trace_spapr_iommu_put(liobn, ioba, tce, ret);
+ if (SPAPR_IS_PCI_LIOBN(liobn)) {
+ trace_spapr_iommu_pci_put(liobn, ioba, tce, ret);
+ } else {
+ trace_spapr_iommu_put(liobn, ioba, tce, ret);
+ }
return ret;
}
@@ -376,7 +386,11 @@ static target_ulong h_get_tce(PowerPCCPU *cpu, sPAPREnvironment *spapr,
args[0] = tce;
}
}
- trace_spapr_iommu_get(liobn, ioba, ret, tce);
+ if (SPAPR_IS_PCI_LIOBN(liobn)) {
+ trace_spapr_iommu_pci_get(liobn, ioba, ret, tce);
+ } else {
+ trace_spapr_iommu_get(liobn, ioba, ret, tce);
+ }
return ret;
}
diff --git a/hw/ppc/spapr_pci.c b/hw/ppc/spapr_pci.c
index 05f4faca6e..4df3a33db4 100644
--- a/hw/ppc/spapr_pci.c
+++ b/hw/ppc/spapr_pci.c
@@ -33,8 +33,11 @@
#include <libfdt.h>
#include "trace.h"
#include "qemu/error-report.h"
+#include "qapi/qmp/qerror.h"
#include "hw/pci/pci_bus.h"
+#include "hw/ppc/spapr_drc.h"
+#include "sysemu/device_tree.h"
/* Copied from the kernel arch/powerpc/platforms/pseries/msi.c */
#define RTAS_QUERY_FN 0
@@ -47,7 +50,15 @@
#define RTAS_TYPE_MSI 1
#define RTAS_TYPE_MSIX 2
-static sPAPRPHBState *find_phb(sPAPREnvironment *spapr, uint64_t buid)
+#define _FDT(exp) \
+ do { \
+ int ret = (exp); \
+ if (ret < 0) { \
+ return ret; \
+ } \
+ } while (0)
+
+sPAPRPHBState *spapr_pci_find_phb(sPAPREnvironment *spapr, uint64_t buid)
{
sPAPRPHBState *sphb;
@@ -61,10 +72,10 @@ static sPAPRPHBState *find_phb(sPAPREnvironment *spapr, uint64_t buid)
return NULL;
}
-static PCIDevice *find_dev(sPAPREnvironment *spapr, uint64_t buid,
- uint32_t config_addr)
+PCIDevice *spapr_pci_find_dev(sPAPREnvironment *spapr, uint64_t buid,
+ uint32_t config_addr)
{
- sPAPRPHBState *sphb = find_phb(spapr, buid);
+ sPAPRPHBState *sphb = spapr_pci_find_phb(spapr, buid);
PCIHostState *phb = PCI_HOST_BRIDGE(sphb);
int bus_num = (config_addr >> 16) & 0xFF;
int devfn = (config_addr >> 8) & 0xFF;
@@ -95,7 +106,7 @@ static void finish_read_pci_config(sPAPREnvironment *spapr, uint64_t buid,
return;
}
- pci_dev = find_dev(spapr, buid, addr);
+ pci_dev = spapr_pci_find_dev(spapr, buid, addr);
addr = rtas_pci_cfgaddr(addr);
if (!pci_dev || (addr % size) || (addr >= pci_config_size(pci_dev))) {
@@ -162,7 +173,7 @@ static void finish_write_pci_config(sPAPREnvironment *spapr, uint64_t buid,
return;
}
- pci_dev = find_dev(spapr, buid, addr);
+ pci_dev = spapr_pci_find_dev(spapr, buid, addr);
addr = rtas_pci_cfgaddr(addr);
if (!pci_dev || (addr % size) || (addr >= pci_config_size(pci_dev))) {
@@ -280,9 +291,9 @@ static void rtas_ibm_change_msi(PowerPCCPU *cpu, sPAPREnvironment *spapr,
}
/* Fins sPAPRPHBState */
- phb = find_phb(spapr, buid);
+ phb = spapr_pci_find_phb(spapr, buid);
if (phb) {
- pdev = find_dev(spapr, buid, config_addr);
+ pdev = spapr_pci_find_dev(spapr, buid, config_addr);
}
if (!phb || !pdev) {
rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
@@ -381,9 +392,9 @@ static void rtas_ibm_query_interrupt_source_number(PowerPCCPU *cpu,
spapr_pci_msi *msi;
/* Find sPAPRPHBState */
- phb = find_phb(spapr, buid);
+ phb = spapr_pci_find_phb(spapr, buid);
if (phb) {
- pdev = find_dev(spapr, buid, config_addr);
+ pdev = spapr_pci_find_dev(spapr, buid, config_addr);
}
if (!phb || !pdev) {
rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
@@ -426,7 +437,7 @@ static void rtas_ibm_set_eeh_option(PowerPCCPU *cpu,
addr = rtas_ld(args, 0);
option = rtas_ld(args, 3);
- sphb = find_phb(spapr, buid);
+ sphb = spapr_pci_find_phb(spapr, buid);
if (!sphb) {
goto param_error_exit;
}
@@ -461,7 +472,7 @@ static void rtas_ibm_get_config_addr_info2(PowerPCCPU *cpu,
}
buid = ((uint64_t)rtas_ld(args, 1) << 32) | rtas_ld(args, 2);
- sphb = find_phb(spapr, buid);
+ sphb = spapr_pci_find_phb(spapr, buid);
if (!sphb) {
goto param_error_exit;
}
@@ -479,7 +490,7 @@ static void rtas_ibm_get_config_addr_info2(PowerPCCPU *cpu,
switch (option) {
case RTAS_GET_PE_ADDR:
addr = rtas_ld(args, 0);
- pdev = find_dev(spapr, buid, addr);
+ pdev = spapr_pci_find_dev(spapr, buid, addr);
if (!pdev) {
goto param_error_exit;
}
@@ -516,7 +527,7 @@ static void rtas_ibm_read_slot_reset_state2(PowerPCCPU *cpu,
}
buid = ((uint64_t)rtas_ld(args, 1) << 32) | rtas_ld(args, 2);
- sphb = find_phb(spapr, buid);
+ sphb = spapr_pci_find_phb(spapr, buid);
if (!sphb) {
goto param_error_exit;
}
@@ -562,7 +573,7 @@ static void rtas_ibm_set_slot_reset(PowerPCCPU *cpu,
buid = ((uint64_t)rtas_ld(args, 1) << 32) | rtas_ld(args, 2);
option = rtas_ld(args, 3);
- sphb = find_phb(spapr, buid);
+ sphb = spapr_pci_find_phb(spapr, buid);
if (!sphb) {
goto param_error_exit;
}
@@ -596,7 +607,7 @@ static void rtas_ibm_configure_pe(PowerPCCPU *cpu,
}
buid = ((uint64_t)rtas_ld(args, 1) << 32) | rtas_ld(args, 2);
- sphb = find_phb(spapr, buid);
+ sphb = spapr_pci_find_phb(spapr, buid);
if (!sphb) {
goto param_error_exit;
}
@@ -631,7 +642,7 @@ static void rtas_ibm_slot_error_detail(PowerPCCPU *cpu,
}
buid = ((uint64_t)rtas_ld(args, 1) << 32) | rtas_ld(args, 2);
- sphb = find_phb(spapr, buid);
+ sphb = spapr_pci_find_phb(spapr, buid);
if (!sphb) {
goto param_error_exit;
}
@@ -731,6 +742,372 @@ static AddressSpace *spapr_pci_dma_iommu(PCIBus *bus, void *opaque, int devfn)
return &phb->iommu_as;
}
+/* Macros to operate with address in OF binding to PCI */
+#define b_x(x, p, l) (((x) & ((1<<(l))-1)) << (p))
+#define b_n(x) b_x((x), 31, 1) /* 0 if relocatable */
+#define b_p(x) b_x((x), 30, 1) /* 1 if prefetchable */
+#define b_t(x) b_x((x), 29, 1) /* 1 if the address is aliased */
+#define b_ss(x) b_x((x), 24, 2) /* the space code */
+#define b_bbbbbbbb(x) b_x((x), 16, 8) /* bus number */
+#define b_ddddd(x) b_x((x), 11, 5) /* device number */
+#define b_fff(x) b_x((x), 8, 3) /* function number */
+#define b_rrrrrrrr(x) b_x((x), 0, 8) /* register number */
+
+/* for 'reg'/'assigned-addresses' OF properties */
+#define RESOURCE_CELLS_SIZE 2
+#define RESOURCE_CELLS_ADDRESS 3
+
+typedef struct ResourceFields {
+ uint32_t phys_hi;
+ uint32_t phys_mid;
+ uint32_t phys_lo;
+ uint32_t size_hi;
+ uint32_t size_lo;
+} QEMU_PACKED ResourceFields;
+
+typedef struct ResourceProps {
+ ResourceFields reg[8];
+ ResourceFields assigned[7];
+ uint32_t reg_len;
+ uint32_t assigned_len;
+} ResourceProps;
+
+/* fill in the 'reg'/'assigned-resources' OF properties for
+ * a PCI device. 'reg' describes resource requirements for a
+ * device's IO/MEM regions, 'assigned-addresses' describes the
+ * actual resource assignments.
+ *
+ * the properties are arrays of ('phys-addr', 'size') pairs describing
+ * the addressable regions of the PCI device, where 'phys-addr' is a
+ * RESOURCE_CELLS_ADDRESS-tuple of 32-bit integers corresponding to
+ * (phys.hi, phys.mid, phys.lo), and 'size' is a
+ * RESOURCE_CELLS_SIZE-tuple corresponding to (size.hi, size.lo).
+ *
+ * phys.hi = 0xYYXXXXZZ, where:
+ * 0xYY = npt000ss
+ * ||| |
+ * ||| +-- space code: 1 if IO region, 2 if MEM region
+ * ||+------ for non-relocatable IO: 1 if aliased
+ * || for relocatable IO: 1 if below 64KB
+ * || for MEM: 1 if below 1MB
+ * |+------- 1 if region is prefetchable
+ * +-------- 1 if region is non-relocatable
+ * 0xXXXX = bbbbbbbb dddddfff, encoding bus, slot, and function
+ * bits respectively
+ * 0xZZ = rrrrrrrr, the register number of the BAR corresponding
+ * to the region
+ *
+ * phys.mid and phys.lo correspond respectively to the hi/lo portions
+ * of the actual address of the region.
+ *
+ * how the phys-addr/size values are used differ slightly between
+ * 'reg' and 'assigned-addresses' properties. namely, 'reg' has
+ * an additional description for the config space region of the
+ * device, and in the case of QEMU has n=0 and phys.mid=phys.lo=0
+ * to describe the region as relocatable, with an address-mapping
+ * that corresponds directly to the PHB's address space for the
+ * resource. 'assigned-addresses' always has n=1 set with an absolute
+ * address assigned for the resource. in general, 'assigned-addresses'
+ * won't be populated, since addresses for PCI devices are generally
+ * unmapped initially and left to the guest to assign.
+ *
+ * note also that addresses defined in these properties are, at least
+ * for PAPR guests, relative to the PHBs IO/MEM windows, and
+ * correspond directly to the addresses in the BARs.
+ *
+ * in accordance with PCI Bus Binding to Open Firmware,
+ * IEEE Std 1275-1994, section 4.1.1, as implemented by PAPR+ v2.7,
+ * Appendix C.
+ */
+static void populate_resource_props(PCIDevice *d, ResourceProps *rp)
+{
+ int bus_num = pci_bus_num(PCI_BUS(qdev_get_parent_bus(DEVICE(d))));
+ uint32_t dev_id = (b_bbbbbbbb(bus_num) |
+ b_ddddd(PCI_SLOT(d->devfn)) |
+ b_fff(PCI_FUNC(d->devfn)));
+ ResourceFields *reg, *assigned;
+ int i, reg_idx = 0, assigned_idx = 0;
+
+ /* config space region */
+ reg = &rp->reg[reg_idx++];
+ reg->phys_hi = cpu_to_be32(dev_id);
+ reg->phys_mid = 0;
+ reg->phys_lo = 0;
+ reg->size_hi = 0;
+ reg->size_lo = 0;
+
+ for (i = 0; i < PCI_NUM_REGIONS; i++) {
+ if (!d->io_regions[i].size) {
+ continue;
+ }
+
+ reg = &rp->reg[reg_idx++];
+
+ reg->phys_hi = cpu_to_be32(dev_id | b_rrrrrrrr(pci_bar(d, i)));
+ if (d->io_regions[i].type & PCI_BASE_ADDRESS_SPACE_IO) {
+ reg->phys_hi |= cpu_to_be32(b_ss(1));
+ } else {
+ reg->phys_hi |= cpu_to_be32(b_ss(2));
+ }
+ reg->phys_mid = 0;
+ reg->phys_lo = 0;
+ reg->size_hi = cpu_to_be32(d->io_regions[i].size >> 32);
+ reg->size_lo = cpu_to_be32(d->io_regions[i].size);
+
+ if (d->io_regions[i].addr == PCI_BAR_UNMAPPED) {
+ continue;
+ }
+
+ assigned = &rp->assigned[assigned_idx++];
+ assigned->phys_hi = cpu_to_be32(reg->phys_hi | b_n(1));
+ assigned->phys_mid = cpu_to_be32(d->io_regions[i].addr >> 32);
+ assigned->phys_lo = cpu_to_be32(d->io_regions[i].addr);
+ assigned->size_hi = reg->size_hi;
+ assigned->size_lo = reg->size_lo;
+ }
+
+ rp->reg_len = reg_idx * sizeof(ResourceFields);
+ rp->assigned_len = assigned_idx * sizeof(ResourceFields);
+}
+
+static int spapr_populate_pci_child_dt(PCIDevice *dev, void *fdt, int offset,
+ int phb_index, int drc_index,
+ const char *drc_name)
+{
+ ResourceProps rp;
+ bool is_bridge = false;
+ int pci_status;
+
+ if (pci_default_read_config(dev, PCI_HEADER_TYPE, 1) ==
+ PCI_HEADER_TYPE_BRIDGE) {
+ is_bridge = true;
+ }
+
+ /* in accordance with PAPR+ v2.7 13.6.3, Table 181 */
+ _FDT(fdt_setprop_cell(fdt, offset, "vendor-id",
+ pci_default_read_config(dev, PCI_VENDOR_ID, 2)));
+ _FDT(fdt_setprop_cell(fdt, offset, "device-id",
+ pci_default_read_config(dev, PCI_DEVICE_ID, 2)));
+ _FDT(fdt_setprop_cell(fdt, offset, "revision-id",
+ pci_default_read_config(dev, PCI_REVISION_ID, 1)));
+ _FDT(fdt_setprop_cell(fdt, offset, "class-code",
+ pci_default_read_config(dev, PCI_CLASS_DEVICE, 2)
+ << 8));
+ if (pci_default_read_config(dev, PCI_INTERRUPT_PIN, 1)) {
+ _FDT(fdt_setprop_cell(fdt, offset, "interrupts",
+ pci_default_read_config(dev, PCI_INTERRUPT_PIN, 1)));
+ }
+
+ if (!is_bridge) {
+ _FDT(fdt_setprop_cell(fdt, offset, "min-grant",
+ pci_default_read_config(dev, PCI_MIN_GNT, 1)));
+ _FDT(fdt_setprop_cell(fdt, offset, "max-latency",
+ pci_default_read_config(dev, PCI_MAX_LAT, 1)));
+ }
+
+ if (pci_default_read_config(dev, PCI_SUBSYSTEM_ID, 2)) {
+ _FDT(fdt_setprop_cell(fdt, offset, "subsystem-id",
+ pci_default_read_config(dev, PCI_SUBSYSTEM_ID, 2)));
+ }
+
+ if (pci_default_read_config(dev, PCI_SUBSYSTEM_VENDOR_ID, 2)) {
+ _FDT(fdt_setprop_cell(fdt, offset, "subsystem-vendor-id",
+ pci_default_read_config(dev, PCI_SUBSYSTEM_VENDOR_ID, 2)));
+ }
+
+ _FDT(fdt_setprop_cell(fdt, offset, "cache-line-size",
+ pci_default_read_config(dev, PCI_CACHE_LINE_SIZE, 1)));
+
+ /* the following fdt cells are masked off the pci status register */
+ pci_status = pci_default_read_config(dev, PCI_STATUS, 2);
+ _FDT(fdt_setprop_cell(fdt, offset, "devsel-speed",
+ PCI_STATUS_DEVSEL_MASK & pci_status));
+
+ if (pci_status & PCI_STATUS_FAST_BACK) {
+ _FDT(fdt_setprop(fdt, offset, "fast-back-to-back", NULL, 0));
+ }
+ if (pci_status & PCI_STATUS_66MHZ) {
+ _FDT(fdt_setprop(fdt, offset, "66mhz-capable", NULL, 0));
+ }
+ if (pci_status & PCI_STATUS_UDF) {
+ _FDT(fdt_setprop(fdt, offset, "udf-supported", NULL, 0));
+ }
+
+ /* NOTE: this is normally generated by firmware via path/unit name,
+ * but in our case we must set it manually since it does not get
+ * processed by OF beforehand
+ */
+ _FDT(fdt_setprop_string(fdt, offset, "name", "pci"));
+ _FDT(fdt_setprop(fdt, offset, "ibm,loc-code", drc_name, strlen(drc_name)));
+ _FDT(fdt_setprop_cell(fdt, offset, "ibm,my-drc-index", drc_index));
+
+ _FDT(fdt_setprop_cell(fdt, offset, "#address-cells",
+ RESOURCE_CELLS_ADDRESS));
+ _FDT(fdt_setprop_cell(fdt, offset, "#size-cells",
+ RESOURCE_CELLS_SIZE));
+ _FDT(fdt_setprop_cell(fdt, offset, "ibm,req#msi-x",
+ RESOURCE_CELLS_SIZE));
+
+ populate_resource_props(dev, &rp);
+ _FDT(fdt_setprop(fdt, offset, "reg", (uint8_t *)rp.reg, rp.reg_len));
+ _FDT(fdt_setprop(fdt, offset, "assigned-addresses",
+ (uint8_t *)rp.assigned, rp.assigned_len));
+
+ return 0;
+}
+
+/* create OF node for pci device and required OF DT properties */
+static void *spapr_create_pci_child_dt(sPAPRPHBState *phb, PCIDevice *dev,
+ int drc_index, const char *drc_name,
+ int *dt_offset)
+{
+ void *fdt;
+ int offset, ret, fdt_size;
+ int slot = PCI_SLOT(dev->devfn);
+ int func = PCI_FUNC(dev->devfn);
+ char nodename[512];
+
+ fdt = create_device_tree(&fdt_size);
+ if (func != 0) {
+ sprintf(nodename, "pci@%d,%d", slot, func);
+ } else {
+ sprintf(nodename, "pci@%d", slot);
+ }
+ offset = fdt_add_subnode(fdt, 0, nodename);
+ ret = spapr_populate_pci_child_dt(dev, fdt, offset, phb->index, drc_index,
+ drc_name);
+ g_assert(!ret);
+
+ *dt_offset = offset;
+ return fdt;
+}
+
+static void spapr_phb_add_pci_device(sPAPRDRConnector *drc,
+ sPAPRPHBState *phb,
+ PCIDevice *pdev,
+ Error **errp)
+{
+ sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
+ DeviceState *dev = DEVICE(pdev);
+ int drc_index = drck->get_index(drc);
+ const char *drc_name = drck->get_name(drc);
+ void *fdt = NULL;
+ int fdt_start_offset = 0;
+
+ /* boot-time devices get their device tree node created by SLOF, but for
+ * hotplugged devices we need QEMU to generate it so the guest can fetch
+ * it via RTAS
+ */
+ if (dev->hotplugged) {
+ fdt = spapr_create_pci_child_dt(phb, pdev, drc_index, drc_name,
+ &fdt_start_offset);
+ }
+
+ drck->attach(drc, DEVICE(pdev),
+ fdt, fdt_start_offset, !dev->hotplugged, errp);
+ if (*errp) {
+ g_free(fdt);
+ }
+}
+
+static void spapr_phb_remove_pci_device_cb(DeviceState *dev, void *opaque)
+{
+ /* some version guests do not wait for completion of a device
+ * cleanup (generally done asynchronously by the kernel) before
+ * signaling to QEMU that the device is safe, but instead sleep
+ * for some 'safe' period of time. unfortunately on a busy host
+ * this sleep isn't guaranteed to be long enough, resulting in
+ * bad things like IRQ lines being left asserted during final
+ * device removal. to deal with this we call reset just prior
+ * to finalizing the device, which will put the device back into
+ * an 'idle' state, as the device cleanup code expects.
+ */
+ pci_device_reset(PCI_DEVICE(dev));
+ object_unparent(OBJECT(dev));
+}
+
+static void spapr_phb_remove_pci_device(sPAPRDRConnector *drc,
+ sPAPRPHBState *phb,
+ PCIDevice *pdev,
+ Error **errp)
+{
+ sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
+
+ drck->detach(drc, DEVICE(pdev), spapr_phb_remove_pci_device_cb, phb, errp);
+}
+
+static sPAPRDRConnector *spapr_phb_get_pci_drc(sPAPRPHBState *phb,
+ PCIDevice *pdev)
+{
+ uint32_t busnr = pci_bus_num(PCI_BUS(qdev_get_parent_bus(DEVICE(pdev))));
+ return spapr_dr_connector_by_id(SPAPR_DR_CONNECTOR_TYPE_PCI,
+ (phb->index << 16) |
+ (busnr << 8) |
+ pdev->devfn);
+}
+
+static void spapr_phb_hot_plug_child(HotplugHandler *plug_handler,
+ DeviceState *plugged_dev, Error **errp)
+{
+ sPAPRPHBState *phb = SPAPR_PCI_HOST_BRIDGE(DEVICE(plug_handler));
+ PCIDevice *pdev = PCI_DEVICE(plugged_dev);
+ sPAPRDRConnector *drc = spapr_phb_get_pci_drc(phb, pdev);
+ Error *local_err = NULL;
+
+ /* if DR is disabled we don't need to do anything in the case of
+ * hotplug or coldplug callbacks
+ */
+ if (!phb->dr_enabled) {
+ /* if this is a hotplug operation initiated by the user
+ * we need to let them know it's not enabled
+ */
+ if (plugged_dev->hotplugged) {
+ error_set(errp, QERR_BUS_NO_HOTPLUG,
+ object_get_typename(OBJECT(phb)));
+ }
+ return;
+ }
+
+ g_assert(drc);
+
+ spapr_phb_add_pci_device(drc, phb, pdev, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ return;
+ }
+ if (plugged_dev->hotplugged) {
+ spapr_hotplug_req_add_event(drc);
+ }
+}
+
+static void spapr_phb_hot_unplug_child(HotplugHandler *plug_handler,
+ DeviceState *plugged_dev, Error **errp)
+{
+ sPAPRPHBState *phb = SPAPR_PCI_HOST_BRIDGE(DEVICE(plug_handler));
+ PCIDevice *pdev = PCI_DEVICE(plugged_dev);
+ sPAPRDRConnectorClass *drck;
+ sPAPRDRConnector *drc = spapr_phb_get_pci_drc(phb, pdev);
+ Error *local_err = NULL;
+
+ if (!phb->dr_enabled) {
+ error_set(errp, QERR_BUS_NO_HOTPLUG,
+ object_get_typename(OBJECT(phb)));
+ return;
+ }
+
+ g_assert(drc);
+
+ drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
+ if (!drck->release_pending(drc)) {
+ spapr_phb_remove_pci_device(drc, phb, pdev, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ return;
+ }
+ spapr_hotplug_req_remove_event(drc);
+ }
+}
+
static void spapr_phb_realize(DeviceState *dev, Error **errp)
{
SysBusDevice *s = SYS_BUS_DEVICE(dev);
@@ -742,12 +1119,12 @@ static void spapr_phb_realize(DeviceState *dev, Error **errp)
PCIBus *bus;
uint64_t msi_window_size = 4096;
- if (sphb->index != -1) {
+ if (sphb->index != (uint32_t)-1) {
hwaddr windows_base;
- if ((sphb->buid != -1) || (sphb->dma_liobn != -1)
- || (sphb->mem_win_addr != -1)
- || (sphb->io_win_addr != -1)) {
+ if ((sphb->buid != (uint64_t)-1) || (sphb->dma_liobn != (uint32_t)-1)
+ || (sphb->mem_win_addr != (hwaddr)-1)
+ || (sphb->io_win_addr != (hwaddr)-1)) {
error_setg(errp, "Either \"index\" or other parameters must"
" be specified for PAPR PHB, not both");
return;
@@ -760,7 +1137,7 @@ static void spapr_phb_realize(DeviceState *dev, Error **errp)
}
sphb->buid = SPAPR_PCI_BASE_BUID + sphb->index;
- sphb->dma_liobn = SPAPR_PCI_BASE_LIOBN + sphb->index;
+ sphb->dma_liobn = SPAPR_PCI_LIOBN(sphb->index, 0);
windows_base = SPAPR_PCI_WINDOW_BASE
+ sphb->index * SPAPR_PCI_WINDOW_SPACING;
@@ -768,27 +1145,27 @@ static void spapr_phb_realize(DeviceState *dev, Error **errp)
sphb->io_win_addr = windows_base + SPAPR_PCI_IO_WIN_OFF;
}
- if (sphb->buid == -1) {
+ if (sphb->buid == (uint64_t)-1) {
error_setg(errp, "BUID not specified for PHB");
return;
}
- if (sphb->dma_liobn == -1) {
+ if (sphb->dma_liobn == (uint32_t)-1) {
error_setg(errp, "LIOBN not specified for PHB");
return;
}
- if (sphb->mem_win_addr == -1) {
+ if (sphb->mem_win_addr == (hwaddr)-1) {
error_setg(errp, "Memory window address not specified for PHB");
return;
}
- if (sphb->io_win_addr == -1) {
+ if (sphb->io_win_addr == (hwaddr)-1) {
error_setg(errp, "IO window address not specified for PHB");
return;
}
- if (find_phb(spapr, sphb->buid)) {
+ if (spapr_pci_find_phb(spapr, sphb->buid)) {
error_setg(errp, "PCI host bridges must have unique BUIDs");
return;
}
@@ -824,6 +1201,7 @@ static void spapr_phb_realize(DeviceState *dev, Error **errp)
&sphb->memspace, &sphb->iospace,
PCI_DEVFN(0, 0), PCI_NUM_PINS, TYPE_PCI_BUS);
phb->bus = bus;
+ qbus_set_hotplug_handler(BUS(phb->bus), DEVICE(sphb), NULL);
/*
* Initialize PHB address space.
@@ -880,6 +1258,15 @@ static void spapr_phb_realize(DeviceState *dev, Error **errp)
sphb->lsi_table[i].irq = irq;
}
+ /* allocate connectors for child PCI devices */
+ if (sphb->dr_enabled) {
+ for (i = 0; i < PCI_SLOT_MAX * 8; i++) {
+ spapr_dr_connector_new(OBJECT(phb),
+ SPAPR_DR_CONNECTOR_TYPE_PCI,
+ (sphb->index << 16) | i);
+ }
+ }
+
if (!info->finish_realize) {
error_setg(errp, "finish_realize not defined");
return;
@@ -893,11 +1280,11 @@ static void spapr_phb_realize(DeviceState *dev, Error **errp)
static void spapr_phb_finish_realize(sPAPRPHBState *sphb, Error **errp)
{
sPAPRTCETable *tcet;
+ uint32_t nb_table;
+ nb_table = SPAPR_PCI_DMA32_SIZE >> SPAPR_TCE_PAGE_SHIFT;
tcet = spapr_tce_new_table(DEVICE(sphb), sphb->dma_liobn,
- 0,
- SPAPR_TCE_PAGE_SHIFT,
- 0x40000000 >> SPAPR_TCE_PAGE_SHIFT, false);
+ 0, SPAPR_TCE_PAGE_SHIFT, nb_table, false);
if (!tcet) {
error_setg(errp, "Unable to create TCE table for %s",
sphb->dtbusname);
@@ -936,6 +1323,8 @@ static Property spapr_phb_properties[] = {
DEFINE_PROP_UINT64("io_win_addr", sPAPRPHBState, io_win_addr, -1),
DEFINE_PROP_UINT64("io_win_size", sPAPRPHBState, io_win_size,
SPAPR_PCI_IO_WIN_SIZE),
+ DEFINE_PROP_BOOL("dynamic-reconfiguration", sPAPRPHBState, dr_enabled,
+ true),
DEFINE_PROP_END_OF_LIST(),
};
@@ -1049,6 +1438,7 @@ static void spapr_phb_class_init(ObjectClass *klass, void *data)
PCIHostBridgeClass *hc = PCI_HOST_BRIDGE_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
sPAPRPHBClass *spc = SPAPR_PCI_HOST_BRIDGE_CLASS(klass);
+ HotplugHandlerClass *hp = HOTPLUG_HANDLER_CLASS(klass);
hc->root_bus_path = spapr_phb_root_bus_path;
dc->realize = spapr_phb_realize;
@@ -1058,6 +1448,8 @@ static void spapr_phb_class_init(ObjectClass *klass, void *data)
set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
dc->cannot_instantiate_with_device_add_yet = false;
spc->finish_realize = spapr_phb_finish_realize;
+ hp->plug = spapr_phb_hot_plug_child;
+ hp->unplug = spapr_phb_hot_unplug_child;
}
static const TypeInfo spapr_phb_info = {
@@ -1066,6 +1458,10 @@ static const TypeInfo spapr_phb_info = {
.instance_size = sizeof(sPAPRPHBState),
.class_init = spapr_phb_class_init,
.class_size = sizeof(sPAPRPHBClass),
+ .interfaces = (InterfaceInfo[]) {
+ { TYPE_HOTPLUG_HANDLER },
+ { }
+ }
};
PCIHostState *spapr_create_phb(sPAPREnvironment *spapr, int index)
@@ -1079,45 +1475,11 @@ PCIHostState *spapr_create_phb(sPAPREnvironment *spapr, int index)
return PCI_HOST_BRIDGE(dev);
}
-/* Macros to operate with address in OF binding to PCI */
-#define b_x(x, p, l) (((x) & ((1<<(l))-1)) << (p))
-#define b_n(x) b_x((x), 31, 1) /* 0 if relocatable */
-#define b_p(x) b_x((x), 30, 1) /* 1 if prefetchable */
-#define b_t(x) b_x((x), 29, 1) /* 1 if the address is aliased */
-#define b_ss(x) b_x((x), 24, 2) /* the space code */
-#define b_bbbbbbbb(x) b_x((x), 16, 8) /* bus number */
-#define b_ddddd(x) b_x((x), 11, 5) /* device number */
-#define b_fff(x) b_x((x), 8, 3) /* function number */
-#define b_rrrrrrrr(x) b_x((x), 0, 8) /* register number */
-
-typedef struct sPAPRTCEDT {
- void *fdt;
- int node_off;
-} sPAPRTCEDT;
-
-static int spapr_phb_children_dt(Object *child, void *opaque)
-{
- sPAPRTCEDT *p = opaque;
- sPAPRTCETable *tcet;
-
- tcet = (sPAPRTCETable *) object_dynamic_cast(child, TYPE_SPAPR_TCE_TABLE);
- if (!tcet) {
- return 0;
- }
-
- spapr_dma_dt(p->fdt, p->node_off, "ibm,dma-window",
- tcet->liobn, tcet->bus_offset,
- tcet->nb_table << tcet->page_shift);
- /* Stop after the first window */
-
- return 1;
-}
-
int spapr_populate_pci_dt(sPAPRPHBState *phb,
uint32_t xics_phandle,
void *fdt)
{
- int bus_off, i, j;
+ int bus_off, i, j, ret;
char nodename[256];
uint32_t bus_range[] = { cpu_to_be32(0), cpu_to_be32(0xff) };
const uint64_t mmiosize = memory_region_size(&phb->memwindow);
@@ -1151,6 +1513,7 @@ int spapr_populate_pci_dt(sPAPRPHBState *phb,
uint32_t interrupt_map_mask[] = {
cpu_to_be32(b_ddddd(-1)|b_fff(0)), 0x0, 0x0, cpu_to_be32(-1)};
uint32_t interrupt_map[PCI_SLOT_MAX * PCI_NUM_PINS][7];
+ sPAPRTCETable *tcet;
/* Start populating the FDT */
sprintf(nodename, "pci@%" PRIx64, phb->buid);
@@ -1159,14 +1522,6 @@ int spapr_populate_pci_dt(sPAPRPHBState *phb,
return bus_off;
}
-#define _FDT(exp) \
- do { \
- int ret = (exp); \
- if (ret < 0) { \
- return ret; \
- } \
- } while (0)
-
/* Write PHB properties */
_FDT(fdt_setprop_string(fdt, bus_off, "device_type", "pci"));
_FDT(fdt_setprop_string(fdt, bus_off, "compatible", "IBM,Logical_PHB"));
@@ -1203,8 +1558,16 @@ int spapr_populate_pci_dt(sPAPRPHBState *phb,
_FDT(fdt_setprop(fdt, bus_off, "interrupt-map", &interrupt_map,
sizeof(interrupt_map)));
- object_child_foreach(OBJECT(phb), spapr_phb_children_dt,
- &((sPAPRTCEDT){ .fdt = fdt, .node_off = bus_off }));
+ tcet = spapr_tce_find_by_liobn(SPAPR_PCI_LIOBN(phb->index, 0));
+ spapr_dma_dt(fdt, bus_off, "ibm,dma-window",
+ tcet->liobn, tcet->bus_offset,
+ tcet->nb_table << tcet->page_shift);
+
+ ret = spapr_drc_populate_dt(fdt, bus_off, OBJECT(phb),
+ SPAPR_DR_CONNECTOR_TYPE_PCI);
+ if (ret) {
+ return ret;
+ }
return 0;
}
diff --git a/hw/ppc/spapr_rtas.c b/hw/ppc/spapr_rtas.c
index 0f1ae55828..fa28d43f81 100644
--- a/hw/ppc/spapr_rtas.c
+++ b/hw/ppc/spapr_rtas.c
@@ -35,6 +35,55 @@
#include "qapi-event.h"
#include <libfdt.h>
+#include "hw/ppc/spapr_drc.h"
+
+/* #define DEBUG_SPAPR */
+
+#ifdef DEBUG_SPAPR
+#define DPRINTF(fmt, ...) \
+ do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
+#else
+#define DPRINTF(fmt, ...) \
+ do { } while (0)
+#endif
+
+static sPAPRConfigureConnectorState *spapr_ccs_find(sPAPREnvironment *spapr,
+ uint32_t drc_index)
+{
+ sPAPRConfigureConnectorState *ccs = NULL;
+
+ QTAILQ_FOREACH(ccs, &spapr->ccs_list, next) {
+ if (ccs->drc_index == drc_index) {
+ break;
+ }
+ }
+
+ return ccs;
+}
+
+static void spapr_ccs_add(sPAPREnvironment *spapr,
+ sPAPRConfigureConnectorState *ccs)
+{
+ g_assert(!spapr_ccs_find(spapr, ccs->drc_index));
+ QTAILQ_INSERT_HEAD(&spapr->ccs_list, ccs, next);
+}
+
+static void spapr_ccs_remove(sPAPREnvironment *spapr,
+ sPAPRConfigureConnectorState *ccs)
+{
+ QTAILQ_REMOVE(&spapr->ccs_list, ccs, next);
+ g_free(ccs);
+}
+
+void spapr_ccs_reset_hook(void *opaque)
+{
+ sPAPREnvironment *spapr = opaque;
+ sPAPRConfigureConnectorState *ccs, *ccs_tmp;
+
+ QTAILQ_FOREACH_SAFE(ccs, &spapr->ccs_list, next, ccs_tmp) {
+ spapr_ccs_remove(spapr, ccs);
+ }
+}
static void rtas_display_character(PowerPCCPU *cpu, sPAPREnvironment *spapr,
uint32_t token, uint32_t nargs,
@@ -245,6 +294,308 @@ static void rtas_ibm_os_term(PowerPCCPU *cpu,
rtas_st(rets, 0, ret);
}
+static void rtas_set_power_level(PowerPCCPU *cpu, sPAPREnvironment *spapr,
+ uint32_t token, uint32_t nargs,
+ target_ulong args, uint32_t nret,
+ target_ulong rets)
+{
+ int32_t power_domain;
+
+ if (nargs != 2 || nret != 2) {
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
+ return;
+ }
+
+ /* we currently only use a single, "live insert" powerdomain for
+ * hotplugged/dlpar'd resources, so the power is always live/full (100)
+ */
+ power_domain = rtas_ld(args, 0);
+ if (power_domain != -1) {
+ rtas_st(rets, 0, RTAS_OUT_NOT_SUPPORTED);
+ return;
+ }
+
+ rtas_st(rets, 0, RTAS_OUT_SUCCESS);
+ rtas_st(rets, 1, 100);
+}
+
+static void rtas_get_power_level(PowerPCCPU *cpu, sPAPREnvironment *spapr,
+ uint32_t token, uint32_t nargs,
+ target_ulong args, uint32_t nret,
+ target_ulong rets)
+{
+ int32_t power_domain;
+
+ if (nargs != 1 || nret != 2) {
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
+ return;
+ }
+
+ /* we currently only use a single, "live insert" powerdomain for
+ * hotplugged/dlpar'd resources, so the power is always live/full (100)
+ */
+ power_domain = rtas_ld(args, 0);
+ if (power_domain != -1) {
+ rtas_st(rets, 0, RTAS_OUT_NOT_SUPPORTED);
+ return;
+ }
+
+ rtas_st(rets, 0, RTAS_OUT_SUCCESS);
+ rtas_st(rets, 1, 100);
+}
+
+static bool sensor_type_is_dr(uint32_t sensor_type)
+{
+ switch (sensor_type) {
+ case RTAS_SENSOR_TYPE_ISOLATION_STATE:
+ case RTAS_SENSOR_TYPE_DR:
+ case RTAS_SENSOR_TYPE_ALLOCATION_STATE:
+ return true;
+ }
+
+ return false;
+}
+
+static void rtas_set_indicator(PowerPCCPU *cpu, sPAPREnvironment *spapr,
+ uint32_t token, uint32_t nargs,
+ target_ulong args, uint32_t nret,
+ target_ulong rets)
+{
+ uint32_t sensor_type;
+ uint32_t sensor_index;
+ uint32_t sensor_state;
+ sPAPRDRConnector *drc;
+ sPAPRDRConnectorClass *drck;
+
+ if (nargs != 3 || nret != 1) {
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
+ return;
+ }
+
+ sensor_type = rtas_ld(args, 0);
+ sensor_index = rtas_ld(args, 1);
+ sensor_state = rtas_ld(args, 2);
+
+ if (!sensor_type_is_dr(sensor_type)) {
+ goto out_unimplemented;
+ }
+
+ /* if this is a DR sensor we can assume sensor_index == drc_index */
+ drc = spapr_dr_connector_by_index(sensor_index);
+ if (!drc) {
+ DPRINTF("rtas_set_indicator: invalid sensor/DRC index: %xh\n",
+ sensor_index);
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
+ return;
+ }
+ drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
+
+ switch (sensor_type) {
+ case RTAS_SENSOR_TYPE_ISOLATION_STATE:
+ /* if the guest is configuring a device attached to this
+ * DRC, we should reset the configuration state at this
+ * point since it may no longer be reliable (guest released
+ * device and needs to start over, or unplug occurred so
+ * the FDT is no longer valid)
+ */
+ if (sensor_state == SPAPR_DR_ISOLATION_STATE_ISOLATED) {
+ sPAPRConfigureConnectorState *ccs = spapr_ccs_find(spapr,
+ sensor_index);
+ if (ccs) {
+ spapr_ccs_remove(spapr, ccs);
+ }
+ }
+ drck->set_isolation_state(drc, sensor_state);
+ break;
+ case RTAS_SENSOR_TYPE_DR:
+ drck->set_indicator_state(drc, sensor_state);
+ break;
+ case RTAS_SENSOR_TYPE_ALLOCATION_STATE:
+ drck->set_allocation_state(drc, sensor_state);
+ break;
+ default:
+ goto out_unimplemented;
+ }
+
+ rtas_st(rets, 0, RTAS_OUT_SUCCESS);
+ return;
+
+out_unimplemented:
+ /* currently only DR-related sensors are implemented */
+ DPRINTF("rtas_set_indicator: sensor/indicator not implemented: %d\n",
+ sensor_type);
+ rtas_st(rets, 0, RTAS_OUT_NOT_SUPPORTED);
+}
+
+static void rtas_get_sensor_state(PowerPCCPU *cpu, sPAPREnvironment *spapr,
+ uint32_t token, uint32_t nargs,
+ target_ulong args, uint32_t nret,
+ target_ulong rets)
+{
+ uint32_t sensor_type;
+ uint32_t sensor_index;
+ sPAPRDRConnector *drc;
+ sPAPRDRConnectorClass *drck;
+ uint32_t entity_sense;
+
+ if (nargs != 2 || nret != 2) {
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
+ return;
+ }
+
+ sensor_type = rtas_ld(args, 0);
+ sensor_index = rtas_ld(args, 1);
+
+ if (sensor_type != RTAS_SENSOR_TYPE_ENTITY_SENSE) {
+ /* currently only DR-related sensors are implemented */
+ DPRINTF("rtas_get_sensor_state: sensor/indicator not implemented: %d\n",
+ sensor_type);
+ rtas_st(rets, 0, RTAS_OUT_NOT_SUPPORTED);
+ return;
+ }
+
+ drc = spapr_dr_connector_by_index(sensor_index);
+ if (!drc) {
+ DPRINTF("rtas_get_sensor_state: invalid sensor/DRC index: %xh\n",
+ sensor_index);
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
+ return;
+ }
+ drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
+ entity_sense = drck->entity_sense(drc);
+
+ rtas_st(rets, 0, RTAS_OUT_SUCCESS);
+ rtas_st(rets, 1, entity_sense);
+}
+
+/* configure-connector work area offsets, int32_t units for field
+ * indexes, bytes for field offset/len values.
+ *
+ * as documented by PAPR+ v2.7, 13.5.3.5
+ */
+#define CC_IDX_NODE_NAME_OFFSET 2
+#define CC_IDX_PROP_NAME_OFFSET 2
+#define CC_IDX_PROP_LEN 3
+#define CC_IDX_PROP_DATA_OFFSET 4
+#define CC_VAL_DATA_OFFSET ((CC_IDX_PROP_DATA_OFFSET + 1) * 4)
+#define CC_WA_LEN 4096
+
+static void rtas_ibm_configure_connector(PowerPCCPU *cpu,
+ sPAPREnvironment *spapr,
+ uint32_t token, uint32_t nargs,
+ target_ulong args, uint32_t nret,
+ target_ulong rets)
+{
+ uint64_t wa_addr;
+ uint64_t wa_offset;
+ uint32_t drc_index;
+ sPAPRDRConnector *drc;
+ sPAPRDRConnectorClass *drck;
+ sPAPRConfigureConnectorState *ccs;
+ sPAPRDRCCResponse resp = SPAPR_DR_CC_RESPONSE_CONTINUE;
+ int rc;
+ const void *fdt;
+
+ if (nargs != 2 || nret != 1) {
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
+ return;
+ }
+
+ wa_addr = ((uint64_t)rtas_ld(args, 1) << 32) | rtas_ld(args, 0);
+
+ drc_index = rtas_ld(wa_addr, 0);
+ drc = spapr_dr_connector_by_index(drc_index);
+ if (!drc) {
+ DPRINTF("rtas_ibm_configure_connector: invalid DRC index: %xh\n",
+ drc_index);
+ rc = RTAS_OUT_PARAM_ERROR;
+ goto out;
+ }
+
+ drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
+ fdt = drck->get_fdt(drc, NULL);
+
+ ccs = spapr_ccs_find(spapr, drc_index);
+ if (!ccs) {
+ ccs = g_new0(sPAPRConfigureConnectorState, 1);
+ (void)drck->get_fdt(drc, &ccs->fdt_offset);
+ ccs->drc_index = drc_index;
+ spapr_ccs_add(spapr, ccs);
+ }
+
+ do {
+ uint32_t tag;
+ const char *name;
+ const struct fdt_property *prop;
+ int fdt_offset_next, prop_len;
+
+ tag = fdt_next_tag(fdt, ccs->fdt_offset, &fdt_offset_next);
+
+ switch (tag) {
+ case FDT_BEGIN_NODE:
+ ccs->fdt_depth++;
+ name = fdt_get_name(fdt, ccs->fdt_offset, NULL);
+
+ /* provide the name of the next OF node */
+ wa_offset = CC_VAL_DATA_OFFSET;
+ rtas_st(wa_addr, CC_IDX_NODE_NAME_OFFSET, wa_offset);
+ rtas_st_buffer_direct(wa_addr + wa_offset, CC_WA_LEN - wa_offset,
+ (uint8_t *)name, strlen(name) + 1);
+ resp = SPAPR_DR_CC_RESPONSE_NEXT_CHILD;
+ break;
+ case FDT_END_NODE:
+ ccs->fdt_depth--;
+ if (ccs->fdt_depth == 0) {
+ /* done sending the device tree, don't need to track
+ * the state anymore
+ */
+ drck->set_configured(drc);
+ spapr_ccs_remove(spapr, ccs);
+ ccs = NULL;
+ resp = SPAPR_DR_CC_RESPONSE_SUCCESS;
+ } else {
+ resp = SPAPR_DR_CC_RESPONSE_PREV_PARENT;
+ }
+ break;
+ case FDT_PROP:
+ prop = fdt_get_property_by_offset(fdt, ccs->fdt_offset,
+ &prop_len);
+ name = fdt_string(fdt, fdt32_to_cpu(prop->nameoff));
+
+ /* provide the name of the next OF property */
+ wa_offset = CC_VAL_DATA_OFFSET;
+ rtas_st(wa_addr, CC_IDX_PROP_NAME_OFFSET, wa_offset);
+ rtas_st_buffer_direct(wa_addr + wa_offset, CC_WA_LEN - wa_offset,
+ (uint8_t *)name, strlen(name) + 1);
+
+ /* provide the length and value of the OF property. data gets
+ * placed immediately after NULL terminator of the OF property's
+ * name string
+ */
+ wa_offset += strlen(name) + 1,
+ rtas_st(wa_addr, CC_IDX_PROP_LEN, prop_len);
+ rtas_st(wa_addr, CC_IDX_PROP_DATA_OFFSET, wa_offset);
+ rtas_st_buffer_direct(wa_addr + wa_offset, CC_WA_LEN - wa_offset,
+ (uint8_t *)((struct fdt_property *)prop)->data,
+ prop_len);
+ resp = SPAPR_DR_CC_RESPONSE_NEXT_PROPERTY;
+ break;
+ case FDT_END:
+ resp = SPAPR_DR_CC_RESPONSE_ERROR;
+ default:
+ /* keep seeking for an actionable tag */
+ break;
+ }
+ if (ccs) {
+ ccs->fdt_offset = fdt_offset_next;
+ }
+ } while (resp == SPAPR_DR_CC_RESPONSE_CONTINUE);
+
+ rc = resp;
+out:
+ rtas_st(rets, 0, rc);
+}
+
static struct rtas_call {
const char *name;
spapr_rtas_fn fn;
@@ -370,6 +721,16 @@ static void core_rtas_register_types(void)
rtas_ibm_set_system_parameter);
spapr_rtas_register(RTAS_IBM_OS_TERM, "ibm,os-term",
rtas_ibm_os_term);
+ spapr_rtas_register(RTAS_SET_POWER_LEVEL, "set-power-level",
+ rtas_set_power_level);
+ spapr_rtas_register(RTAS_GET_POWER_LEVEL, "get-power-level",
+ rtas_get_power_level);
+ spapr_rtas_register(RTAS_SET_INDICATOR, "set-indicator",
+ rtas_set_indicator);
+ spapr_rtas_register(RTAS_GET_SENSOR_STATE, "get-sensor-state",
+ rtas_get_sensor_state);
+ spapr_rtas_register(RTAS_IBM_CONFIGURE_CONNECTOR, "ibm,configure-connector",
+ rtas_ibm_configure_connector);
}
type_init(core_rtas_register_types)
diff --git a/hw/ppc/spapr_vio.c b/hw/ppc/spapr_vio.c
index 1360b97ab0..174033dd41 100644
--- a/hw/ppc/spapr_vio.c
+++ b/hw/ppc/spapr_vio.c
@@ -469,7 +469,7 @@ static void spapr_vio_busdev_realize(DeviceState *qdev, Error **errp)
}
if (pc->rtce_window_size) {
- uint32_t liobn = SPAPR_VIO_BASE_LIOBN | dev->reg;
+ uint32_t liobn = SPAPR_VIO_LIOBN(dev->reg);
memory_region_init(&dev->mrroot, OBJECT(dev), "iommu-spapr-root",
ram_size);
diff --git a/hw/s390x/s390-virtio-ccw.c b/hw/s390x/s390-virtio-ccw.c
index 8a565f657a..c574988c36 100644
--- a/hw/s390x/s390-virtio-ccw.c
+++ b/hw/s390x/s390-virtio-ccw.c
@@ -216,6 +216,7 @@ static void ccw_machine_class_init(ObjectClass *oc, void *data)
mc->no_sdcard = 1;
mc->use_sclp = 1;
mc->max_cpus = 255;
+ mc->is_default = 1;
nc->nmi_monitor_handler = s390_nmi;
}
diff --git a/hw/s390x/s390-virtio.c b/hw/s390x/s390-virtio.c
index 59750dbfcd..00ea793651 100644
--- a/hw/s390x/s390-virtio.c
+++ b/hw/s390x/s390-virtio.c
@@ -345,7 +345,6 @@ static void s390_machine_class_init(ObjectClass *oc, void *data)
mc->no_floppy = 1;
mc->no_cdrom = 1;
mc->no_sdcard = 1;
- mc->is_default = 1;
nc->nmi_monitor_handler = s390_nmi;
}
diff --git a/hw/s390x/virtio-ccw.c b/hw/s390x/virtio-ccw.c
index 3ef0055126..e32ada9bf1 100644
--- a/hw/s390x/virtio-ccw.c
+++ b/hw/s390x/virtio-ccw.c
@@ -1,8 +1,9 @@
/*
* virtio ccw target implementation
*
- * Copyright 2012,2014 IBM Corp.
+ * Copyright 2012,2015 IBM Corp.
* Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
+ * Pierre Morel <pmorel@linux.vnet.ibm.com>
*
* This work is licensed under the terms of the GNU GPL, version 2 or (at
* your option) any later version. See the COPYING file in the top-level
@@ -1314,6 +1315,7 @@ static void virtio_ccw_save_config(DeviceState *d, QEMUFile *f)
{
VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
SubchDev *s = dev->sch;
+ VirtIODevice *vdev = virtio_ccw_get_vdev(s);
subch_device_save(s, f);
if (dev->indicators != NULL) {
@@ -1337,6 +1339,7 @@ static void virtio_ccw_save_config(DeviceState *d, QEMUFile *f)
qemu_put_be32(f, 0);
qemu_put_be64(f, 0UL);
}
+ qemu_put_be16(f, vdev->config_vector);
qemu_put_be64(f, dev->routes.adapter.ind_offset);
qemu_put_byte(f, dev->thinint_isc);
}
@@ -1345,6 +1348,7 @@ static int virtio_ccw_load_config(DeviceState *d, QEMUFile *f)
{
VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
SubchDev *s = dev->sch;
+ VirtIODevice *vdev = virtio_ccw_get_vdev(s);
int len;
s->driver_data = dev;
@@ -1370,6 +1374,7 @@ static int virtio_ccw_load_config(DeviceState *d, QEMUFile *f)
qemu_get_be64(f);
dev->summary_indicator = NULL;
}
+ qemu_get_be16s(f, &vdev->config_vector);
dev->routes.adapter.ind_offset = qemu_get_be64(f);
dev->thinint_isc = qemu_get_byte(f);
if (s->thinint_active) {
@@ -1396,6 +1401,10 @@ static void virtio_ccw_device_plugged(DeviceState *d, Error **errp)
return;
}
+ if (!kvm_eventfds_enabled()) {
+ dev->flags &= ~VIRTIO_CCW_FLAG_USE_IOEVENTFD;
+ }
+
sch->id.cu_model = virtio_bus_get_vdev_id(&dev->bus);
css_generate_sch_crws(sch->cssid, sch->ssid, sch->schid,
@@ -1734,6 +1743,56 @@ static const TypeInfo virtio_ccw_bus_info = {
.class_init = virtio_ccw_bus_class_init,
};
+#ifdef CONFIG_VIRTFS
+static Property virtio_ccw_9p_properties[] = {
+ DEFINE_PROP_STRING("devno", VirtioCcwDevice, bus_id),
+ DEFINE_PROP_BIT("ioeventfd", VirtioCcwDevice, flags,
+ VIRTIO_CCW_FLAG_USE_IOEVENTFD_BIT, true),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void virtio_ccw_9p_realize(VirtioCcwDevice *ccw_dev, Error **errp)
+{
+ V9fsCCWState *dev = VIRTIO_9P_CCW(ccw_dev);
+ DeviceState *vdev = DEVICE(&dev->vdev);
+ Error *err = NULL;
+
+ qdev_set_parent_bus(vdev, BUS(&ccw_dev->bus));
+ object_property_set_bool(OBJECT(vdev), true, "realized", &err);
+ if (err) {
+ error_propagate(errp, err);
+ }
+}
+
+static void virtio_ccw_9p_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ VirtIOCCWDeviceClass *k = VIRTIO_CCW_DEVICE_CLASS(klass);
+
+ k->exit = virtio_ccw_exit;
+ k->realize = virtio_ccw_9p_realize;
+ dc->reset = virtio_ccw_reset;
+ dc->props = virtio_ccw_9p_properties;
+ set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
+}
+
+static void virtio_ccw_9p_instance_init(Object *obj)
+{
+ V9fsCCWState *dev = VIRTIO_9P_CCW(obj);
+
+ virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
+ TYPE_VIRTIO_9P);
+}
+
+static const TypeInfo virtio_ccw_9p_info = {
+ .name = TYPE_VIRTIO_9P_CCW,
+ .parent = TYPE_VIRTIO_CCW_DEVICE,
+ .instance_size = sizeof(V9fsCCWState),
+ .instance_init = virtio_ccw_9p_instance_init,
+ .class_init = virtio_ccw_9p_class_init,
+};
+#endif
+
static void virtio_ccw_register(void)
{
type_register_static(&virtio_ccw_bus_info);
@@ -1749,6 +1808,9 @@ static void virtio_ccw_register(void)
#endif
type_register_static(&virtio_ccw_rng);
type_register_static(&virtual_css_bridge_info);
+#ifdef CONFIG_VIRTFS
+ type_register_static(&virtio_ccw_9p_info);
+#endif
}
type_init(virtio_ccw_register)
diff --git a/hw/s390x/virtio-ccw.h b/hw/s390x/virtio-ccw.h
index ad3af7626a..d729263960 100644
--- a/hw/s390x/virtio-ccw.h
+++ b/hw/s390x/virtio-ccw.h
@@ -1,8 +1,9 @@
/*
* virtio ccw target definitions
*
- * Copyright 2012 IBM Corp.
+ * Copyright 2012,2015 IBM Corp.
* Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
+ * Pierre Morel <pmorel@linux.vnet.ibm.com>
*
* This work is licensed under the terms of the GNU GPL, version 2 or (at
* your option) any later version. See the COPYING file in the top-level
@@ -189,4 +190,19 @@ typedef struct VirtIORNGCcw {
VirtualCssBus *virtual_css_bus_init(void);
void virtio_ccw_device_update_status(SubchDev *sch);
VirtIODevice *virtio_ccw_get_vdev(SubchDev *sch);
+
+#ifdef CONFIG_VIRTFS
+#include "hw/9pfs/virtio-9p.h"
+
+#define TYPE_VIRTIO_9P_CCW "virtio-9p-ccw"
+#define VIRTIO_9P_CCW(obj) \
+ OBJECT_CHECK(V9fsCCWState, (obj), TYPE_VIRTIO_9P_CCW)
+
+typedef struct V9fsCCWState {
+ VirtioCcwDevice parent_obj;
+ V9fsState vdev;
+} V9fsCCWState;
+
+#endif /* CONFIG_VIRTFS */
+
#endif
diff --git a/hw/scsi/scsi-bus.c b/hw/scsi/scsi-bus.c
index bd2c0e4caa..f50b2f08af 100644
--- a/hw/scsi/scsi-bus.c
+++ b/hw/scsi/scsi-bus.c
@@ -1968,6 +1968,7 @@ static const VMStateDescription vmstate_scsi_sense_state = {
.name = "SCSIDevice/sense",
.version_id = 1,
.minimum_version_id = 1,
+ .needed = scsi_sense_state_needed,
.fields = (VMStateField[]) {
VMSTATE_UINT8_SUB_ARRAY(sense, SCSIDevice,
SCSI_SENSE_BUF_SIZE_OLD,
@@ -1998,13 +1999,9 @@ const VMStateDescription vmstate_scsi_device = {
},
VMSTATE_END_OF_LIST()
},
- .subsections = (VMStateSubsection []) {
- {
- .vmsd = &vmstate_scsi_sense_state,
- .needed = scsi_sense_state_needed,
- }, {
- /* empty */
- }
+ .subsections = (const VMStateDescription*[]) {
+ &vmstate_scsi_sense_state,
+ NULL
}
};
diff --git a/hw/sd/pxa2xx_mmci.c b/hw/sd/pxa2xx_mmci.c
index ac3ab39bea..d1fe6d58e8 100644
--- a/hw/sd/pxa2xx_mmci.c
+++ b/hw/sd/pxa2xx_mmci.c
@@ -48,7 +48,6 @@ struct PXA2xxMMCIState {
int resp_len;
int cmdreq;
- int ac_width;
};
#define MMC_STRPCL 0x00 /* MMC Clock Start/Stop register */
@@ -215,7 +214,7 @@ static void pxa2xx_mmci_wakequeues(PXA2xxMMCIState *s)
pxa2xx_mmci_fifo_update(s);
}
-static uint32_t pxa2xx_mmci_read(void *opaque, hwaddr offset)
+static uint64_t pxa2xx_mmci_read(void *opaque, hwaddr offset, unsigned size)
{
PXA2xxMMCIState *s = (PXA2xxMMCIState *) opaque;
uint32_t ret;
@@ -257,8 +256,8 @@ static uint32_t pxa2xx_mmci_read(void *opaque, hwaddr offset)
return 0;
case MMC_RXFIFO:
ret = 0;
- while (s->ac_width -- && s->rx_len) {
- ret |= s->rx_fifo[s->rx_start ++] << (s->ac_width << 3);
+ while (size-- && s->rx_len) {
+ ret |= s->rx_fifo[s->rx_start++] << (size << 3);
s->rx_start &= 0x1f;
s->rx_len --;
}
@@ -277,7 +276,7 @@ static uint32_t pxa2xx_mmci_read(void *opaque, hwaddr offset)
}
static void pxa2xx_mmci_write(void *opaque,
- hwaddr offset, uint32_t value)
+ hwaddr offset, uint64_t value, unsigned size)
{
PXA2xxMMCIState *s = (PXA2xxMMCIState *) opaque;
@@ -370,9 +369,9 @@ static void pxa2xx_mmci_write(void *opaque,
break;
case MMC_TXFIFO:
- while (s->ac_width -- && s->tx_len < 0x20)
+ while (size-- && s->tx_len < 0x20)
s->tx_fifo[(s->tx_start + (s->tx_len ++)) & 0x1f] =
- (value >> (s->ac_width << 3)) & 0xff;
+ (value >> (size << 3)) & 0xff;
s->intreq &= ~INT_TXFIFO_REQ;
pxa2xx_mmci_fifo_update(s);
break;
@@ -386,60 +385,9 @@ static void pxa2xx_mmci_write(void *opaque,
}
}
-static uint32_t pxa2xx_mmci_readb(void *opaque, hwaddr offset)
-{
- PXA2xxMMCIState *s = (PXA2xxMMCIState *) opaque;
- s->ac_width = 1;
- return pxa2xx_mmci_read(opaque, offset);
-}
-
-static uint32_t pxa2xx_mmci_readh(void *opaque, hwaddr offset)
-{
- PXA2xxMMCIState *s = (PXA2xxMMCIState *) opaque;
- s->ac_width = 2;
- return pxa2xx_mmci_read(opaque, offset);
-}
-
-static uint32_t pxa2xx_mmci_readw(void *opaque, hwaddr offset)
-{
- PXA2xxMMCIState *s = (PXA2xxMMCIState *) opaque;
- s->ac_width = 4;
- return pxa2xx_mmci_read(opaque, offset);
-}
-
-static void pxa2xx_mmci_writeb(void *opaque,
- hwaddr offset, uint32_t value)
-{
- PXA2xxMMCIState *s = (PXA2xxMMCIState *) opaque;
- s->ac_width = 1;
- pxa2xx_mmci_write(opaque, offset, value);
-}
-
-static void pxa2xx_mmci_writeh(void *opaque,
- hwaddr offset, uint32_t value)
-{
- PXA2xxMMCIState *s = (PXA2xxMMCIState *) opaque;
- s->ac_width = 2;
- pxa2xx_mmci_write(opaque, offset, value);
-}
-
-static void pxa2xx_mmci_writew(void *opaque,
- hwaddr offset, uint32_t value)
-{
- PXA2xxMMCIState *s = (PXA2xxMMCIState *) opaque;
- s->ac_width = 4;
- pxa2xx_mmci_write(opaque, offset, value);
-}
-
static const MemoryRegionOps pxa2xx_mmci_ops = {
- .old_mmio = {
- .read = { pxa2xx_mmci_readb,
- pxa2xx_mmci_readh,
- pxa2xx_mmci_readw, },
- .write = { pxa2xx_mmci_writeb,
- pxa2xx_mmci_writeh,
- pxa2xx_mmci_writew, },
- },
+ .read = pxa2xx_mmci_read,
+ .write = pxa2xx_mmci_write,
.endianness = DEVICE_NATIVE_ENDIAN,
};
diff --git a/hw/sh4/r2d.c b/hw/sh4/r2d.c
index 4221060308..5e22ed79b2 100644
--- a/hw/sh4/r2d.c
+++ b/hw/sh4/r2d.c
@@ -127,7 +127,7 @@ static void r2d_fpga_irq_set(void *opaque, int n, int level)
update_irl(fpga);
}
-static uint32_t r2d_fpga_read(void *opaque, hwaddr addr)
+static uint64_t r2d_fpga_read(void *opaque, hwaddr addr, unsigned int size)
{
r2d_fpga_t *s = opaque;
@@ -146,7 +146,7 @@ static uint32_t r2d_fpga_read(void *opaque, hwaddr addr)
}
static void
-r2d_fpga_write(void *opaque, hwaddr addr, uint32_t value)
+r2d_fpga_write(void *opaque, hwaddr addr, uint64_t value, unsigned int size)
{
r2d_fpga_t *s = opaque;
@@ -170,10 +170,10 @@ r2d_fpga_write(void *opaque, hwaddr addr, uint32_t value)
}
static const MemoryRegionOps r2d_fpga_ops = {
- .old_mmio = {
- .read = { r2d_fpga_read, r2d_fpga_read, NULL, },
- .write = { r2d_fpga_write, r2d_fpga_write, NULL, },
- },
+ .read = r2d_fpga_read,
+ .write = r2d_fpga_write,
+ .impl.min_access_size = 2,
+ .impl.max_access_size = 2,
.endianness = DEVICE_NATIVE_ENDIAN,
};
diff --git a/hw/sparc/sun4m.c b/hw/sparc/sun4m.c
index a69bf2da45..68ac4d8bba 100644
--- a/hw/sparc/sun4m.c
+++ b/hw/sparc/sun4m.c
@@ -124,7 +124,7 @@ void DMA_register_channel (int nchan,
static void fw_cfg_boot_set(void *opaque, const char *boot_device,
Error **errp)
{
- fw_cfg_add_i16(opaque, FW_CFG_BOOT_DEVICE, boot_device[0]);
+ fw_cfg_modify_i16(opaque, FW_CFG_BOOT_DEVICE, boot_device[0]);
}
static void nvram_init(Nvram *nvram, uint8_t *macaddr,
@@ -897,7 +897,6 @@ static void sun4m_hw_init(const struct sun4m_hwdef *hwdef,
espdma_irq, ledma_irq;
qemu_irq esp_reset, dma_enable;
qemu_irq fdc_tc;
- qemu_irq *cpu_halt;
unsigned long kernel_size;
DriveInfo *fd[MAX_FD];
FWCfgState *fw_cfg;
@@ -1024,9 +1023,8 @@ static void sun4m_hw_init(const struct sun4m_hwdef *hwdef,
escc_init(hwdef->serial_base, slavio_irq[15], slavio_irq[15],
serial_hds[0], serial_hds[1], ESCC_CLOCK, 1);
- cpu_halt = qemu_allocate_irqs(cpu_halt_signal, NULL, 1);
if (hwdef->apc_base) {
- apc_init(hwdef->apc_base, cpu_halt[0]);
+ apc_init(hwdef->apc_base, qemu_allocate_irq(cpu_halt_signal, NULL, 0));
}
if (hwdef->fd_base) {
@@ -1036,7 +1034,7 @@ static void sun4m_hw_init(const struct sun4m_hwdef *hwdef,
sun4m_fdctrl_init(slavio_irq[22], hwdef->fd_base, fd,
&fdc_tc);
} else {
- fdc_tc = *qemu_allocate_irqs(dummy_fdc_tc, NULL, 1);
+ fdc_tc = qemu_allocate_irq(dummy_fdc_tc, NULL, 0);
}
slavio_misc_init(hwdef->slavio_base, hwdef->aux1_base, hwdef->aux2_base,
diff --git a/hw/sparc64/sun4u.c b/hw/sparc64/sun4u.c
index 6f34e87935..30cfa0e0a0 100644
--- a/hw/sparc64/sun4u.c
+++ b/hw/sparc64/sun4u.c
@@ -127,7 +127,7 @@ void DMA_register_channel (int nchan,
static void fw_cfg_boot_set(void *opaque, const char *boot_device,
Error **errp)
{
- fw_cfg_add_i16(opaque, FW_CFG_BOOT_DEVICE, boot_device[0]);
+ fw_cfg_modify_i16(opaque, FW_CFG_BOOT_DEVICE, boot_device[0]);
}
static int sun4u_NVRAM_set_params(Nvram *nvram, uint16_t NVRAM_size,
diff --git a/hw/timer/arm_timer.c b/hw/timer/arm_timer.c
index 145291016b..d53f39ad62 100644
--- a/hw/timer/arm_timer.c
+++ b/hw/timer/arm_timer.c
@@ -280,14 +280,12 @@ static int sp804_init(SysBusDevice *sbd)
{
DeviceState *dev = DEVICE(sbd);
SP804State *s = SP804(dev);
- qemu_irq *qi;
- qi = qemu_allocate_irqs(sp804_set_irq, s, 2);
sysbus_init_irq(sbd, &s->irq);
s->timer[0] = arm_timer_init(s->freq0);
s->timer[1] = arm_timer_init(s->freq1);
- s->timer[0]->irq = qi[0];
- s->timer[1]->irq = qi[1];
+ s->timer[0]->irq = qemu_allocate_irq(sp804_set_irq, s, 0);
+ s->timer[1]->irq = qemu_allocate_irq(sp804_set_irq, s, 1);
memory_region_init_io(&s->iomem, OBJECT(s), &sp804_ops, s,
"sp804", 0x1000);
sysbus_init_mmio(sbd, &s->iomem);
diff --git a/hw/timer/hpet.c b/hw/timer/hpet.c
index b6b8a2063d..b50071ef93 100644
--- a/hw/timer/hpet.c
+++ b/hw/timer/hpet.c
@@ -283,6 +283,7 @@ static const VMStateDescription vmstate_hpet_rtc_irq_level = {
.name = "hpet/rtc_irq_level",
.version_id = 1,
.minimum_version_id = 1,
+ .needed = hpet_rtc_irq_level_needed,
.fields = (VMStateField[]) {
VMSTATE_UINT8(rtc_irq_level, HPETState),
VMSTATE_END_OF_LIST()
@@ -322,13 +323,9 @@ static const VMStateDescription vmstate_hpet = {
vmstate_hpet_timer, HPETTimer),
VMSTATE_END_OF_LIST()
},
- .subsections = (VMStateSubsection[]) {
- {
- .vmsd = &vmstate_hpet_rtc_irq_level,
- .needed = hpet_rtc_irq_level_needed,
- }, {
- /* empty */
- }
+ .subsections = (const VMStateDescription*[]) {
+ &vmstate_hpet_rtc_irq_level,
+ NULL
}
};
diff --git a/hw/timer/mc146818rtc.c b/hw/timer/mc146818rtc.c
index f2b77fa118..32048258c9 100644
--- a/hw/timer/mc146818rtc.c
+++ b/hw/timer/mc146818rtc.c
@@ -733,22 +733,23 @@ static int rtc_post_load(void *opaque, int version_id)
return 0;
}
+static bool rtc_irq_reinject_on_ack_count_needed(void *opaque)
+{
+ RTCState *s = (RTCState *)opaque;
+ return s->irq_reinject_on_ack_count != 0;
+}
+
static const VMStateDescription vmstate_rtc_irq_reinject_on_ack_count = {
.name = "mc146818rtc/irq_reinject_on_ack_count",
.version_id = 1,
.minimum_version_id = 1,
+ .needed = rtc_irq_reinject_on_ack_count_needed,
.fields = (VMStateField[]) {
VMSTATE_UINT16(irq_reinject_on_ack_count, RTCState),
VMSTATE_END_OF_LIST()
}
};
-static bool rtc_irq_reinject_on_ack_count_needed(void *opaque)
-{
- RTCState *s = (RTCState *)opaque;
- return s->irq_reinject_on_ack_count != 0;
-}
-
static const VMStateDescription vmstate_rtc = {
.name = "mc146818rtc",
.version_id = 3,
@@ -770,13 +771,9 @@ static const VMStateDescription vmstate_rtc = {
VMSTATE_UINT64_V(next_alarm_time, RTCState, 3),
VMSTATE_END_OF_LIST()
},
- .subsections = (VMStateSubsection[]) {
- {
- .vmsd = &vmstate_rtc_irq_reinject_on_ack_count,
- .needed = rtc_irq_reinject_on_ack_count_needed,
- }, {
- /* empty */
- }
+ .subsections = (const VMStateDescription*[]) {
+ &vmstate_rtc_irq_reinject_on_ack_count,
+ NULL
}
};
diff --git a/hw/unicore32/puv3.c b/hw/unicore32/puv3.c
index cc9a21a712..703e29d6d3 100644
--- a/hw/unicore32/puv3.c
+++ b/hw/unicore32/puv3.c
@@ -40,15 +40,15 @@ static void puv3_intc_cpu_handler(void *opaque, int irq, int level)
static void puv3_soc_init(CPUUniCore32State *env)
{
- qemu_irq *cpu_intc, irqs[PUV3_IRQS_NR];
+ qemu_irq cpu_intc, irqs[PUV3_IRQS_NR];
DeviceState *dev;
MemoryRegion *i8042 = g_new(MemoryRegion, 1);
int i;
/* Initialize interrupt controller */
- cpu_intc = qemu_allocate_irqs(puv3_intc_cpu_handler,
- uc32_env_get_cpu(env), 1);
- dev = sysbus_create_simple("puv3_intc", PUV3_INTC_BASE, *cpu_intc);
+ cpu_intc = qemu_allocate_irq(puv3_intc_cpu_handler,
+ uc32_env_get_cpu(env), 0);
+ dev = sysbus_create_simple("puv3_intc", PUV3_INTC_BASE, cpu_intc);
for (i = 0; i < PUV3_IRQS_NR; i++) {
irqs[i] = qdev_get_gpio_in(dev, i);
}
diff --git a/hw/usb/hcd-ohci.c b/hw/usb/hcd-ohci.c
index 1a22c9c0cb..7d65818064 100644
--- a/hw/usb/hcd-ohci.c
+++ b/hw/usb/hcd-ohci.c
@@ -2034,6 +2034,7 @@ static const VMStateDescription vmstate_ohci_eof_timer = {
.version_id = 1,
.minimum_version_id = 1,
.pre_load = ohci_eof_timer_pre_load,
+ .needed = ohci_eof_timer_needed,
.fields = (VMStateField[]) {
VMSTATE_TIMER_PTR(eof_timer, OHCIState),
VMSTATE_END_OF_LIST()
@@ -2081,13 +2082,9 @@ static const VMStateDescription vmstate_ohci_state = {
VMSTATE_BOOL(async_complete, OHCIState),
VMSTATE_END_OF_LIST()
},
- .subsections = (VMStateSubsection []) {
- {
- .vmsd = &vmstate_ohci_eof_timer,
- .needed = ohci_eof_timer_needed,
- } , {
- /* empty */
- }
+ .subsections = (const VMStateDescription*[]) {
+ &vmstate_ohci_eof_timer,
+ NULL
}
};
diff --git a/hw/usb/redirect.c b/hw/usb/redirect.c
index 242a654583..6b4218c037 100644
--- a/hw/usb/redirect.c
+++ b/hw/usb/redirect.c
@@ -2257,40 +2257,42 @@ static const VMStateInfo usbredir_ep_bufpq_vmstate_info = {
/* For endp_data migration */
+static bool usbredir_bulk_receiving_needed(void *priv)
+{
+ struct endp_data *endp = priv;
+
+ return endp->bulk_receiving_started;
+}
+
static const VMStateDescription usbredir_bulk_receiving_vmstate = {
.name = "usb-redir-ep/bulk-receiving",
.version_id = 1,
.minimum_version_id = 1,
+ .needed = usbredir_bulk_receiving_needed,
.fields = (VMStateField[]) {
VMSTATE_UINT8(bulk_receiving_started, struct endp_data),
VMSTATE_END_OF_LIST()
}
};
-static bool usbredir_bulk_receiving_needed(void *priv)
+static bool usbredir_stream_needed(void *priv)
{
struct endp_data *endp = priv;
- return endp->bulk_receiving_started;
+ return endp->max_streams;
}
static const VMStateDescription usbredir_stream_vmstate = {
.name = "usb-redir-ep/stream-state",
.version_id = 1,
.minimum_version_id = 1,
+ .needed = usbredir_stream_needed,
.fields = (VMStateField[]) {
VMSTATE_UINT32(max_streams, struct endp_data),
VMSTATE_END_OF_LIST()
}
};
-static bool usbredir_stream_needed(void *priv)
-{
- struct endp_data *endp = priv;
-
- return endp->max_streams;
-}
-
static const VMStateDescription usbredir_ep_vmstate = {
.name = "usb-redir-ep",
.version_id = 1,
@@ -2318,16 +2320,10 @@ static const VMStateDescription usbredir_ep_vmstate = {
VMSTATE_INT32(bufpq_target_size, struct endp_data),
VMSTATE_END_OF_LIST()
},
- .subsections = (VMStateSubsection[]) {
- {
- .vmsd = &usbredir_bulk_receiving_vmstate,
- .needed = usbredir_bulk_receiving_needed,
- }, {
- .vmsd = &usbredir_stream_vmstate,
- .needed = usbredir_stream_needed,
- }, {
- /* empty */
- }
+ .subsections = (const VMStateDescription*[]) {
+ &usbredir_bulk_receiving_vmstate,
+ &usbredir_stream_vmstate,
+ NULL
}
};
diff --git a/hw/vfio/Makefile.objs b/hw/vfio/Makefile.objs
index e31f30ec09..d540c9d140 100644
--- a/hw/vfio/Makefile.objs
+++ b/hw/vfio/Makefile.objs
@@ -1,4 +1,6 @@
ifeq ($(CONFIG_LINUX), y)
obj-$(CONFIG_SOFTMMU) += common.o
obj-$(CONFIG_PCI) += pci.o
+obj-$(CONFIG_SOFTMMU) += platform.o
+obj-$(CONFIG_SOFTMMU) += calxeda-xgmac.o
endif
diff --git a/hw/vfio/calxeda-xgmac.c b/hw/vfio/calxeda-xgmac.c
new file mode 100644
index 0000000000..eb914f0d0b
--- /dev/null
+++ b/hw/vfio/calxeda-xgmac.c
@@ -0,0 +1,55 @@
+/*
+ * calxeda xgmac VFIO device
+ *
+ * Copyright Linaro Limited, 2014
+ *
+ * Authors:
+ * Eric Auger <eric.auger@linaro.org>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ *
+ */
+
+#include "hw/vfio/vfio-calxeda-xgmac.h"
+
+static void calxeda_xgmac_realize(DeviceState *dev, Error **errp)
+{
+ VFIOPlatformDevice *vdev = VFIO_PLATFORM_DEVICE(dev);
+ VFIOCalxedaXgmacDeviceClass *k = VFIO_CALXEDA_XGMAC_DEVICE_GET_CLASS(dev);
+
+ vdev->compat = g_strdup("calxeda,hb-xgmac");
+
+ k->parent_realize(dev, errp);
+}
+
+static const VMStateDescription vfio_platform_calxeda_xgmac_vmstate = {
+ .name = TYPE_VFIO_CALXEDA_XGMAC,
+ .unmigratable = 1,
+};
+
+static void vfio_calxeda_xgmac_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ VFIOCalxedaXgmacDeviceClass *vcxc =
+ VFIO_CALXEDA_XGMAC_DEVICE_CLASS(klass);
+ vcxc->parent_realize = dc->realize;
+ dc->realize = calxeda_xgmac_realize;
+ dc->desc = "VFIO Calxeda XGMAC";
+ dc->vmsd = &vfio_platform_calxeda_xgmac_vmstate;
+}
+
+static const TypeInfo vfio_calxeda_xgmac_dev_info = {
+ .name = TYPE_VFIO_CALXEDA_XGMAC,
+ .parent = TYPE_VFIO_PLATFORM,
+ .instance_size = sizeof(VFIOCalxedaXgmacDevice),
+ .class_init = vfio_calxeda_xgmac_class_init,
+ .class_size = sizeof(VFIOCalxedaXgmacDeviceClass),
+};
+
+static void register_calxeda_xgmac_dev_type(void)
+{
+ type_register_static(&vfio_calxeda_xgmac_dev_info);
+}
+
+type_init(register_calxeda_xgmac_dev_type)
diff --git a/hw/vfio/platform.c b/hw/vfio/platform.c
new file mode 100644
index 0000000000..5c678b914e
--- /dev/null
+++ b/hw/vfio/platform.c
@@ -0,0 +1,615 @@
+/*
+ * vfio based device assignment support - platform devices
+ *
+ * Copyright Linaro Limited, 2014
+ *
+ * Authors:
+ * Kim Phillips <kim.phillips@linaro.org>
+ * Eric Auger <eric.auger@linaro.org>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ *
+ * Based on vfio based PCI device assignment support:
+ * Copyright Red Hat, Inc. 2012
+ */
+
+#include <sys/ioctl.h>
+#include <linux/vfio.h>
+
+#include "hw/vfio/vfio-platform.h"
+#include "qemu/error-report.h"
+#include "qemu/range.h"
+#include "sysemu/sysemu.h"
+#include "exec/memory.h"
+#include "qemu/queue.h"
+#include "hw/sysbus.h"
+#include "trace.h"
+#include "hw/platform-bus.h"
+
+/*
+ * Functions used whatever the injection method
+ */
+
+/**
+ * vfio_init_intp - allocate, initialize the IRQ struct pointer
+ * and add it into the list of IRQs
+ * @vbasedev: the VFIO device handle
+ * @info: irq info struct retrieved from VFIO driver
+ */
+static VFIOINTp *vfio_init_intp(VFIODevice *vbasedev,
+ struct vfio_irq_info info)
+{
+ int ret;
+ VFIOPlatformDevice *vdev =
+ container_of(vbasedev, VFIOPlatformDevice, vbasedev);
+ SysBusDevice *sbdev = SYS_BUS_DEVICE(vdev);
+ VFIOINTp *intp;
+
+ intp = g_malloc0(sizeof(*intp));
+ intp->vdev = vdev;
+ intp->pin = info.index;
+ intp->flags = info.flags;
+ intp->state = VFIO_IRQ_INACTIVE;
+
+ sysbus_init_irq(sbdev, &intp->qemuirq);
+
+ /* Get an eventfd for trigger */
+ ret = event_notifier_init(&intp->interrupt, 0);
+ if (ret) {
+ g_free(intp);
+ error_report("vfio: Error: trigger event_notifier_init failed ");
+ return NULL;
+ }
+
+ QLIST_INSERT_HEAD(&vdev->intp_list, intp, next);
+ return intp;
+}
+
+/**
+ * vfio_set_trigger_eventfd - set VFIO eventfd handling
+ *
+ * @intp: IRQ struct handle
+ * @handler: handler to be called on eventfd signaling
+ *
+ * Setup VFIO signaling and attach an optional user-side handler
+ * to the eventfd
+ */
+static int vfio_set_trigger_eventfd(VFIOINTp *intp,
+ eventfd_user_side_handler_t handler)
+{
+ VFIODevice *vbasedev = &intp->vdev->vbasedev;
+ struct vfio_irq_set *irq_set;
+ int argsz, ret;
+ int32_t *pfd;
+
+ argsz = sizeof(*irq_set) + sizeof(*pfd);
+ irq_set = g_malloc0(argsz);
+ irq_set->argsz = argsz;
+ irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER;
+ irq_set->index = intp->pin;
+ irq_set->start = 0;
+ irq_set->count = 1;
+ pfd = (int32_t *)&irq_set->data;
+ *pfd = event_notifier_get_fd(&intp->interrupt);
+ qemu_set_fd_handler(*pfd, (IOHandler *)handler, NULL, intp);
+ ret = ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, irq_set);
+ g_free(irq_set);
+ if (ret < 0) {
+ error_report("vfio: Failed to set trigger eventfd: %m");
+ qemu_set_fd_handler(*pfd, NULL, NULL, NULL);
+ }
+ return ret;
+}
+
+/*
+ * Functions only used when eventfds are handled on user-side
+ * ie. without irqfd
+ */
+
+/**
+ * vfio_mmap_set_enabled - enable/disable the fast path mode
+ * @vdev: the VFIO platform device
+ * @enabled: the target mmap state
+ *
+ * enabled = true ~ fast path = MMIO region is mmaped (no KVM TRAP);
+ * enabled = false ~ slow path = MMIO region is trapped and region callbacks
+ * are called; slow path enables to trap the device IRQ status register reset
+*/
+
+static void vfio_mmap_set_enabled(VFIOPlatformDevice *vdev, bool enabled)
+{
+ int i;
+
+ trace_vfio_platform_mmap_set_enabled(enabled);
+
+ for (i = 0; i < vdev->vbasedev.num_regions; i++) {
+ VFIORegion *region = vdev->regions[i];
+
+ memory_region_set_enabled(&region->mmap_mem, enabled);
+ }
+}
+
+/**
+ * vfio_intp_mmap_enable - timer function, restores the fast path
+ * if there is no more active IRQ
+ * @opaque: actually points to the VFIO platform device
+ *
+ * Called on mmap timer timout, this function checks whether the
+ * IRQ is still active and if not, restores the fast path.
+ * by construction a single eventfd is handled at a time.
+ * if the IRQ is still active, the timer is re-programmed.
+ */
+static void vfio_intp_mmap_enable(void *opaque)
+{
+ VFIOINTp *tmp;
+ VFIOPlatformDevice *vdev = (VFIOPlatformDevice *)opaque;
+
+ qemu_mutex_lock(&vdev->intp_mutex);
+ QLIST_FOREACH(tmp, &vdev->intp_list, next) {
+ if (tmp->state == VFIO_IRQ_ACTIVE) {
+ trace_vfio_platform_intp_mmap_enable(tmp->pin);
+ /* re-program the timer to check active status later */
+ timer_mod(vdev->mmap_timer,
+ qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) +
+ vdev->mmap_timeout);
+ qemu_mutex_unlock(&vdev->intp_mutex);
+ return;
+ }
+ }
+ vfio_mmap_set_enabled(vdev, true);
+ qemu_mutex_unlock(&vdev->intp_mutex);
+}
+
+/**
+ * vfio_intp_inject_pending_lockheld - Injects a pending IRQ
+ * @opaque: opaque pointer, in practice the VFIOINTp handle
+ *
+ * The function is called on a previous IRQ completion, from
+ * vfio_platform_eoi, while the intp_mutex is locked.
+ * Also in such situation, the slow path already is set and
+ * the mmap timer was already programmed.
+ */
+static void vfio_intp_inject_pending_lockheld(VFIOINTp *intp)
+{
+ trace_vfio_platform_intp_inject_pending_lockheld(intp->pin,
+ event_notifier_get_fd(&intp->interrupt));
+
+ intp->state = VFIO_IRQ_ACTIVE;
+
+ /* trigger the virtual IRQ */
+ qemu_set_irq(intp->qemuirq, 1);
+}
+
+/**
+ * vfio_intp_interrupt - The user-side eventfd handler
+ * @opaque: opaque pointer which in practice is the VFIOINTp handle
+ *
+ * the function is entered in event handler context:
+ * the vIRQ is injected into the guest if there is no other active
+ * or pending IRQ.
+ */
+static void vfio_intp_interrupt(VFIOINTp *intp)
+{
+ int ret;
+ VFIOINTp *tmp;
+ VFIOPlatformDevice *vdev = intp->vdev;
+ bool delay_handling = false;
+
+ qemu_mutex_lock(&vdev->intp_mutex);
+ if (intp->state == VFIO_IRQ_INACTIVE) {
+ QLIST_FOREACH(tmp, &vdev->intp_list, next) {
+ if (tmp->state == VFIO_IRQ_ACTIVE ||
+ tmp->state == VFIO_IRQ_PENDING) {
+ delay_handling = true;
+ break;
+ }
+ }
+ }
+ if (delay_handling) {
+ /*
+ * the new IRQ gets a pending status and is pushed in
+ * the pending queue
+ */
+ intp->state = VFIO_IRQ_PENDING;
+ trace_vfio_intp_interrupt_set_pending(intp->pin);
+ QSIMPLEQ_INSERT_TAIL(&vdev->pending_intp_queue,
+ intp, pqnext);
+ ret = event_notifier_test_and_clear(&intp->interrupt);
+ qemu_mutex_unlock(&vdev->intp_mutex);
+ return;
+ }
+
+ trace_vfio_platform_intp_interrupt(intp->pin,
+ event_notifier_get_fd(&intp->interrupt));
+
+ ret = event_notifier_test_and_clear(&intp->interrupt);
+ if (!ret) {
+ error_report("Error when clearing fd=%d (ret = %d)\n",
+ event_notifier_get_fd(&intp->interrupt), ret);
+ }
+
+ intp->state = VFIO_IRQ_ACTIVE;
+
+ /* sets slow path */
+ vfio_mmap_set_enabled(vdev, false);
+
+ /* trigger the virtual IRQ */
+ qemu_set_irq(intp->qemuirq, 1);
+
+ /*
+ * Schedule the mmap timer which will restore fastpath when no IRQ
+ * is active anymore
+ */
+ if (vdev->mmap_timeout) {
+ timer_mod(vdev->mmap_timer,
+ qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) +
+ vdev->mmap_timeout);
+ }
+ qemu_mutex_unlock(&vdev->intp_mutex);
+}
+
+/**
+ * vfio_platform_eoi - IRQ completion routine
+ * @vbasedev: the VFIO device handle
+ *
+ * De-asserts the active virtual IRQ and unmasks the physical IRQ
+ * (effective for level sensitive IRQ auto-masked by the VFIO driver).
+ * Then it handles next pending IRQ if any.
+ * eoi function is called on the first access to any MMIO region
+ * after an IRQ was triggered, trapped since slow path was set.
+ * It is assumed this access corresponds to the IRQ status
+ * register reset. With such a mechanism, a single IRQ can be
+ * handled at a time since there is no way to know which IRQ
+ * was completed by the guest (we would need additional details
+ * about the IRQ status register mask).
+ */
+static void vfio_platform_eoi(VFIODevice *vbasedev)
+{
+ VFIOINTp *intp;
+ VFIOPlatformDevice *vdev =
+ container_of(vbasedev, VFIOPlatformDevice, vbasedev);
+
+ qemu_mutex_lock(&vdev->intp_mutex);
+ QLIST_FOREACH(intp, &vdev->intp_list, next) {
+ if (intp->state == VFIO_IRQ_ACTIVE) {
+ trace_vfio_platform_eoi(intp->pin,
+ event_notifier_get_fd(&intp->interrupt));
+ intp->state = VFIO_IRQ_INACTIVE;
+
+ /* deassert the virtual IRQ */
+ qemu_set_irq(intp->qemuirq, 0);
+
+ if (intp->flags & VFIO_IRQ_INFO_AUTOMASKED) {
+ /* unmasks the physical level-sensitive IRQ */
+ vfio_unmask_single_irqindex(vbasedev, intp->pin);
+ }
+
+ /* a single IRQ can be active at a time */
+ break;
+ }
+ }
+ /* in case there are pending IRQs, handle the first one */
+ if (!QSIMPLEQ_EMPTY(&vdev->pending_intp_queue)) {
+ intp = QSIMPLEQ_FIRST(&vdev->pending_intp_queue);
+ vfio_intp_inject_pending_lockheld(intp);
+ QSIMPLEQ_REMOVE_HEAD(&vdev->pending_intp_queue, pqnext);
+ }
+ qemu_mutex_unlock(&vdev->intp_mutex);
+}
+
+/**
+ * vfio_start_eventfd_injection - starts the virtual IRQ injection using
+ * user-side handled eventfds
+ * @intp: the IRQ struct pointer
+ */
+
+static int vfio_start_eventfd_injection(VFIOINTp *intp)
+{
+ int ret;
+
+ ret = vfio_set_trigger_eventfd(intp, vfio_intp_interrupt);
+ if (ret) {
+ error_report("vfio: Error: Failed to pass IRQ fd to the driver: %m");
+ }
+ return ret;
+}
+
+/* VFIO skeleton */
+
+static void vfio_platform_compute_needs_reset(VFIODevice *vbasedev)
+{
+ vbasedev->needs_reset = true;
+}
+
+/* not implemented yet */
+static int vfio_platform_hot_reset_multi(VFIODevice *vbasedev)
+{
+ return -1;
+}
+
+/**
+ * vfio_populate_device - Allocate and populate MMIO region
+ * and IRQ structs according to driver returned information
+ * @vbasedev: the VFIO device handle
+ *
+ */
+static int vfio_populate_device(VFIODevice *vbasedev)
+{
+ VFIOINTp *intp, *tmp;
+ int i, ret = -1;
+ VFIOPlatformDevice *vdev =
+ container_of(vbasedev, VFIOPlatformDevice, vbasedev);
+
+ if (!(vbasedev->flags & VFIO_DEVICE_FLAGS_PLATFORM)) {
+ error_report("vfio: Um, this isn't a platform device");
+ return ret;
+ }
+
+ vdev->regions = g_new0(VFIORegion *, vbasedev->num_regions);
+
+ for (i = 0; i < vbasedev->num_regions; i++) {
+ struct vfio_region_info reg_info = { .argsz = sizeof(reg_info) };
+ VFIORegion *ptr;
+
+ vdev->regions[i] = g_malloc0(sizeof(VFIORegion));
+ ptr = vdev->regions[i];
+ reg_info.index = i;
+ ret = ioctl(vbasedev->fd, VFIO_DEVICE_GET_REGION_INFO, &reg_info);
+ if (ret) {
+ error_report("vfio: Error getting region %d info: %m", i);
+ goto reg_error;
+ }
+ ptr->flags = reg_info.flags;
+ ptr->size = reg_info.size;
+ ptr->fd_offset = reg_info.offset;
+ ptr->nr = i;
+ ptr->vbasedev = vbasedev;
+
+ trace_vfio_platform_populate_regions(ptr->nr,
+ (unsigned long)ptr->flags,
+ (unsigned long)ptr->size,
+ ptr->vbasedev->fd,
+ (unsigned long)ptr->fd_offset);
+ }
+
+ vdev->mmap_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL,
+ vfio_intp_mmap_enable, vdev);
+
+ QSIMPLEQ_INIT(&vdev->pending_intp_queue);
+
+ for (i = 0; i < vbasedev->num_irqs; i++) {
+ struct vfio_irq_info irq = { .argsz = sizeof(irq) };
+
+ irq.index = i;
+ ret = ioctl(vbasedev->fd, VFIO_DEVICE_GET_IRQ_INFO, &irq);
+ if (ret) {
+ error_printf("vfio: error getting device %s irq info",
+ vbasedev->name);
+ goto irq_err;
+ } else {
+ trace_vfio_platform_populate_interrupts(irq.index,
+ irq.count,
+ irq.flags);
+ intp = vfio_init_intp(vbasedev, irq);
+ if (!intp) {
+ error_report("vfio: Error installing IRQ %d up", i);
+ goto irq_err;
+ }
+ }
+ }
+ return 0;
+irq_err:
+ timer_del(vdev->mmap_timer);
+ QLIST_FOREACH_SAFE(intp, &vdev->intp_list, next, tmp) {
+ QLIST_REMOVE(intp, next);
+ g_free(intp);
+ }
+reg_error:
+ for (i = 0; i < vbasedev->num_regions; i++) {
+ g_free(vdev->regions[i]);
+ }
+ g_free(vdev->regions);
+ return ret;
+}
+
+/* specialized functions for VFIO Platform devices */
+static VFIODeviceOps vfio_platform_ops = {
+ .vfio_compute_needs_reset = vfio_platform_compute_needs_reset,
+ .vfio_hot_reset_multi = vfio_platform_hot_reset_multi,
+ .vfio_eoi = vfio_platform_eoi,
+};
+
+/**
+ * vfio_base_device_init - perform preliminary VFIO setup
+ * @vbasedev: the VFIO device handle
+ *
+ * Implement the VFIO command sequence that allows to discover
+ * assigned device resources: group extraction, device
+ * fd retrieval, resource query.
+ * Precondition: the device name must be initialized
+ */
+static int vfio_base_device_init(VFIODevice *vbasedev)
+{
+ VFIOGroup *group;
+ VFIODevice *vbasedev_iter;
+ char path[PATH_MAX], iommu_group_path[PATH_MAX], *group_name;
+ ssize_t len;
+ struct stat st;
+ int groupid;
+ int ret;
+
+ /* name must be set prior to the call */
+ if (!vbasedev->name || strchr(vbasedev->name, '/')) {
+ return -EINVAL;
+ }
+
+ /* Check that the host device exists */
+ g_snprintf(path, sizeof(path), "/sys/bus/platform/devices/%s/",
+ vbasedev->name);
+
+ if (stat(path, &st) < 0) {
+ error_report("vfio: error: no such host device: %s", path);
+ return -errno;
+ }
+
+ g_strlcat(path, "iommu_group", sizeof(path));
+ len = readlink(path, iommu_group_path, sizeof(iommu_group_path));
+ if (len < 0 || len >= sizeof(iommu_group_path)) {
+ error_report("vfio: error no iommu_group for device");
+ return len < 0 ? -errno : -ENAMETOOLONG;
+ }
+
+ iommu_group_path[len] = 0;
+ group_name = basename(iommu_group_path);
+
+ if (sscanf(group_name, "%d", &groupid) != 1) {
+ error_report("vfio: error reading %s: %m", path);
+ return -errno;
+ }
+
+ trace_vfio_platform_base_device_init(vbasedev->name, groupid);
+
+ group = vfio_get_group(groupid, &address_space_memory);
+ if (!group) {
+ error_report("vfio: failed to get group %d", groupid);
+ return -ENOENT;
+ }
+
+ g_snprintf(path, sizeof(path), "%s", vbasedev->name);
+
+ QLIST_FOREACH(vbasedev_iter, &group->device_list, next) {
+ if (strcmp(vbasedev_iter->name, vbasedev->name) == 0) {
+ error_report("vfio: error: device %s is already attached", path);
+ vfio_put_group(group);
+ return -EBUSY;
+ }
+ }
+ ret = vfio_get_device(group, path, vbasedev);
+ if (ret) {
+ error_report("vfio: failed to get device %s", path);
+ vfio_put_group(group);
+ return ret;
+ }
+
+ ret = vfio_populate_device(vbasedev);
+ if (ret) {
+ error_report("vfio: failed to populate device %s", path);
+ vfio_put_group(group);
+ }
+
+ return ret;
+}
+
+/**
+ * vfio_map_region - initialize the 2 memory regions for a given
+ * MMIO region index
+ * @vdev: the VFIO platform device handle
+ * @nr: the index of the region
+ *
+ * Init the top memory region and the mmapped memory region beneath
+ * VFIOPlatformDevice is used since VFIODevice is not a QOM Object
+ * and could not be passed to memory region functions
+*/
+static void vfio_map_region(VFIOPlatformDevice *vdev, int nr)
+{
+ VFIORegion *region = vdev->regions[nr];
+ uint64_t size = region->size;
+ char name[64];
+
+ if (!size) {
+ return;
+ }
+
+ g_snprintf(name, sizeof(name), "VFIO %s region %d",
+ vdev->vbasedev.name, nr);
+
+ /* A "slow" read/write mapping underlies all regions */
+ memory_region_init_io(&region->mem, OBJECT(vdev), &vfio_region_ops,
+ region, name, size);
+
+ g_strlcat(name, " mmap", sizeof(name));
+
+ if (vfio_mmap_region(OBJECT(vdev), region, &region->mem,
+ &region->mmap_mem, &region->mmap, size, 0, name)) {
+ error_report("%s unsupported. Performance may be slow", name);
+ }
+}
+
+/**
+ * vfio_platform_realize - the device realize function
+ * @dev: device state pointer
+ * @errp: error
+ *
+ * initialize the device, its memory regions and IRQ structures
+ * IRQ are started separately
+ */
+static void vfio_platform_realize(DeviceState *dev, Error **errp)
+{
+ VFIOPlatformDevice *vdev = VFIO_PLATFORM_DEVICE(dev);
+ SysBusDevice *sbdev = SYS_BUS_DEVICE(dev);
+ VFIODevice *vbasedev = &vdev->vbasedev;
+ VFIOINTp *intp;
+ int i, ret;
+
+ vbasedev->type = VFIO_DEVICE_TYPE_PLATFORM;
+ vbasedev->ops = &vfio_platform_ops;
+
+ trace_vfio_platform_realize(vbasedev->name, vdev->compat);
+
+ ret = vfio_base_device_init(vbasedev);
+ if (ret) {
+ error_setg(errp, "vfio: vfio_base_device_init failed for %s",
+ vbasedev->name);
+ return;
+ }
+
+ for (i = 0; i < vbasedev->num_regions; i++) {
+ vfio_map_region(vdev, i);
+ sysbus_init_mmio(sbdev, &vdev->regions[i]->mem);
+ }
+
+ QLIST_FOREACH(intp, &vdev->intp_list, next) {
+ vfio_start_eventfd_injection(intp);
+ }
+}
+
+static const VMStateDescription vfio_platform_vmstate = {
+ .name = TYPE_VFIO_PLATFORM,
+ .unmigratable = 1,
+};
+
+static Property vfio_platform_dev_properties[] = {
+ DEFINE_PROP_STRING("host", VFIOPlatformDevice, vbasedev.name),
+ DEFINE_PROP_BOOL("x-mmap", VFIOPlatformDevice, vbasedev.allow_mmap, true),
+ DEFINE_PROP_UINT32("mmap-timeout-ms", VFIOPlatformDevice,
+ mmap_timeout, 1100),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void vfio_platform_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->realize = vfio_platform_realize;
+ dc->props = vfio_platform_dev_properties;
+ dc->vmsd = &vfio_platform_vmstate;
+ dc->desc = "VFIO-based platform device assignment";
+ set_bit(DEVICE_CATEGORY_MISC, dc->categories);
+}
+
+static const TypeInfo vfio_platform_dev_info = {
+ .name = TYPE_VFIO_PLATFORM,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(VFIOPlatformDevice),
+ .class_init = vfio_platform_class_init,
+ .class_size = sizeof(VFIOPlatformDeviceClass),
+ .abstract = true,
+};
+
+static void register_vfio_platform_dev_type(void)
+{
+ type_register_static(&vfio_platform_dev_info);
+}
+
+type_init(register_vfio_platform_dev_type)
diff --git a/hw/virtio/dataplane/vring.c b/hw/virtio/dataplane/vring.c
index fabb8103e9..35891856ee 100644
--- a/hw/virtio/dataplane/vring.c
+++ b/hw/virtio/dataplane/vring.c
@@ -42,7 +42,7 @@ static void *vring_map(MemoryRegion **mr, hwaddr phys, hwaddr len,
}
/* Ignore regions with dirty logging, we cannot mark them dirty */
- if (memory_region_is_logging(section.mr)) {
+ if (memory_region_get_dirty_log_mask(section.mr)) {
goto out;
}
diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
index b84d21cf7e..a6dcc79399 100644
--- a/hw/virtio/vhost.c
+++ b/hw/virtio/vhost.c
@@ -418,7 +418,8 @@ static void vhost_set_memory(MemoryListener *listener,
memory_listener);
hwaddr start_addr = section->offset_within_address_space;
ram_addr_t size = int128_get64(section->size);
- bool log_dirty = memory_region_is_logging(section->mr);
+ bool log_dirty =
+ memory_region_get_dirty_log_mask(section->mr) & ~(1 << DIRTY_MEMORY_MIGRATION);
int s = offsetof(struct vhost_memory, regions) +
(dev->mem->nregions + 1) * sizeof dev->mem->regions[0];
void *ram;
@@ -677,13 +678,15 @@ static void vhost_log_global_stop(MemoryListener *listener)
}
static void vhost_log_start(MemoryListener *listener,
- MemoryRegionSection *section)
+ MemoryRegionSection *section,
+ int old, int new)
{
/* FIXME: implement */
}
static void vhost_log_stop(MemoryListener *listener,
- MemoryRegionSection *section)
+ MemoryRegionSection *section,
+ int old, int new)
{
/* FIXME: implement */
}
diff --git a/hw/virtio/virtio-pci.h b/hw/virtio/virtio-pci.h
index d962125351..96025ca205 100644
--- a/hw/virtio/virtio-pci.h
+++ b/hw/virtio/virtio-pci.h
@@ -25,6 +25,7 @@
#include "hw/virtio/virtio-bus.h"
#include "hw/virtio/virtio-9p.h"
#include "hw/virtio/virtio-input.h"
+#include "hw/virtio/virtio-gpu.h"
#ifdef CONFIG_VIRTFS
#include "hw/9pfs/virtio-9p.h"
#endif
@@ -42,6 +43,7 @@ typedef struct VHostSCSIPCI VHostSCSIPCI;
typedef struct VirtIORngPCI VirtIORngPCI;
typedef struct VirtIOInputPCI VirtIOInputPCI;
typedef struct VirtIOInputHIDPCI VirtIOInputHIDPCI;
+typedef struct VirtIOGPUPCI VirtIOGPUPCI;
/* virtio-pci-bus */
@@ -261,6 +263,18 @@ struct VirtIOInputHIDPCI {
VirtIOInputHID vdev;
};
+/*
+ * virtio-gpu-pci: This extends VirtioPCIProxy.
+ */
+#define TYPE_VIRTIO_GPU_PCI "virtio-gpu-pci"
+#define VIRTIO_GPU_PCI(obj) \
+ OBJECT_CHECK(VirtIOGPUPCI, (obj), TYPE_VIRTIO_GPU_PCI)
+
+struct VirtIOGPUPCI {
+ VirtIOPCIProxy parent_obj;
+ VirtIOGPU vdev;
+};
+
/* Virtio ABI version, if we increment this, we break the guest driver. */
#define VIRTIO_PCI_ABI_VERSION 0
diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
index fb49ffcb2d..ee4e07c5e7 100644
--- a/hw/virtio/virtio.c
+++ b/hw/virtio/virtio.c
@@ -1053,6 +1053,7 @@ static const VMStateDescription vmstate_virtio_device_endian = {
.name = "virtio/device_endian",
.version_id = 1,
.minimum_version_id = 1,
+ .needed = &virtio_device_endian_needed,
.fields = (VMStateField[]) {
VMSTATE_UINT8(device_endian, VirtIODevice),
VMSTATE_END_OF_LIST()
@@ -1063,6 +1064,7 @@ static const VMStateDescription vmstate_virtio_64bit_features = {
.name = "virtio/64bit_features",
.version_id = 1,
.minimum_version_id = 1,
+ .needed = &virtio_64bit_features_needed,
.fields = (VMStateField[]) {
VMSTATE_UINT64(guest_features, VirtIODevice),
VMSTATE_END_OF_LIST()
@@ -1077,16 +1079,10 @@ static const VMStateDescription vmstate_virtio = {
.fields = (VMStateField[]) {
VMSTATE_END_OF_LIST()
},
- .subsections = (VMStateSubsection[]) {
- {
- .vmsd = &vmstate_virtio_device_endian,
- .needed = &virtio_device_endian_needed
- },
- {
- .vmsd = &vmstate_virtio_64bit_features,
- .needed = &virtio_64bit_features_needed
- },
- { 0 }
+ .subsections = (const VMStateDescription*[]) {
+ &vmstate_virtio_device_endian,
+ &vmstate_virtio_64bit_features,
+ NULL
}
};
diff --git a/hw/watchdog/Makefile.objs b/hw/watchdog/Makefile.objs
index 4b0374a555..72e3ffd93c 100644
--- a/hw/watchdog/Makefile.objs
+++ b/hw/watchdog/Makefile.objs
@@ -1,3 +1,4 @@
common-obj-y += watchdog.o
common-obj-$(CONFIG_WDT_IB6300ESB) += wdt_i6300esb.o
common-obj-$(CONFIG_WDT_IB700) += wdt_ib700.o
+common-obj-$(CONFIG_WDT_DIAG288) += wdt_diag288.o
diff --git a/hw/watchdog/watchdog.c b/hw/watchdog/watchdog.c
index 54440c91c5..8d4b0eeeb0 100644
--- a/hw/watchdog/watchdog.c
+++ b/hw/watchdog/watchdog.c
@@ -27,6 +27,7 @@
#include "sysemu/sysemu.h"
#include "sysemu/watchdog.h"
#include "qapi-event.h"
+#include "hw/nmi.h"
/* Possible values for action parameter. */
#define WDT_RESET 1 /* Hard reset. */
@@ -35,6 +36,7 @@
#define WDT_PAUSE 4 /* Pause. */
#define WDT_DEBUG 5 /* Prints a message and continues running. */
#define WDT_NONE 6 /* Do nothing. */
+#define WDT_NMI 7 /* Inject nmi into the guest */
static int watchdog_action = WDT_RESET;
static QLIST_HEAD(watchdog_list, WatchdogTimerModel) watchdog_list;
@@ -95,6 +97,8 @@ int select_watchdog_action(const char *p)
watchdog_action = WDT_DEBUG;
else if (strcasecmp(p, "none") == 0)
watchdog_action = WDT_NONE;
+ else if (strcasecmp(p, "inject-nmi") == 0)
+ watchdog_action = WDT_NMI;
else
return -1;
@@ -138,5 +142,11 @@ void watchdog_perform_action(void)
case WDT_NONE:
qapi_event_send_watchdog(WATCHDOG_EXPIRATION_ACTION_NONE, &error_abort);
break;
+
+ case WDT_NMI:
+ qapi_event_send_watchdog(WATCHDOG_EXPIRATION_ACTION_INJECT_NMI,
+ &error_abort);
+ inject_nmi();
+ break;
}
}
diff --git a/hw/watchdog/wdt_diag288.c b/hw/watchdog/wdt_diag288.c
new file mode 100644
index 0000000000..1185e0681c
--- /dev/null
+++ b/hw/watchdog/wdt_diag288.c
@@ -0,0 +1,122 @@
+/*
+ * watchdog device diag288 support
+ *
+ * Copyright IBM, Corp. 2015
+ *
+ * Authors:
+ * Xu Wang <gesaint@linux.vnet.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at your
+ * option) any later version. See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "sysemu/watchdog.h"
+#include "hw/sysbus.h"
+#include "qemu/timer.h"
+#include "hw/watchdog/wdt_diag288.h"
+
+static WatchdogTimerModel model = {
+ .wdt_name = TYPE_WDT_DIAG288,
+ .wdt_description = "diag288 device for s390x platform",
+};
+
+static const VMStateDescription vmstate_diag288 = {
+ .name = "vmstate_diag288",
+ .version_id = 0,
+ .minimum_version_id = 0,
+ .fields = (VMStateField[]) {
+ VMSTATE_TIMER_PTR(timer, DIAG288State),
+ VMSTATE_BOOL(enabled, DIAG288State),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void wdt_diag288_reset(DeviceState *dev)
+{
+ DIAG288State *diag288 = DIAG288(dev);
+
+ diag288->enabled = false;
+ timer_del(diag288->timer);
+}
+
+static void diag288_timer_expired(void *dev)
+{
+ qemu_log_mask(CPU_LOG_RESET, "Watchdog timer expired.\n");
+ watchdog_perform_action();
+ wdt_diag288_reset(dev);
+}
+
+static int wdt_diag288_handle_timer(DIAG288State *diag288,
+ uint64_t func, uint64_t timeout)
+{
+ switch (func) {
+ case WDT_DIAG288_INIT:
+ diag288->enabled = true;
+ /* fall through */
+ case WDT_DIAG288_CHANGE:
+ if (!diag288->enabled) {
+ return -1;
+ }
+ timer_mod(diag288->timer,
+ qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
+ timeout * get_ticks_per_sec());
+ break;
+ case WDT_DIAG288_CANCEL:
+ if (!diag288->enabled) {
+ return -1;
+ }
+ diag288->enabled = false;
+ timer_del(diag288->timer);
+ break;
+ default:
+ return -1;
+ }
+
+ return 0;
+}
+
+static void wdt_diag288_realize(DeviceState *dev, Error **errp)
+{
+ DIAG288State *diag288 = DIAG288(dev);
+
+ diag288->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, diag288_timer_expired,
+ dev);
+}
+
+static void wdt_diag288_unrealize(DeviceState *dev, Error **errp)
+{
+ DIAG288State *diag288 = DIAG288(dev);
+
+ timer_del(diag288->timer);
+ timer_free(diag288->timer);
+}
+
+static void wdt_diag288_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ DIAG288Class *diag288 = DIAG288_CLASS(klass);
+
+ dc->realize = wdt_diag288_realize;
+ dc->unrealize = wdt_diag288_unrealize;
+ dc->reset = wdt_diag288_reset;
+ set_bit(DEVICE_CATEGORY_MISC, dc->categories);
+ dc->vmsd = &vmstate_diag288;
+ diag288->handle_timer = wdt_diag288_handle_timer;
+}
+
+static const TypeInfo wdt_diag288_info = {
+ .class_init = wdt_diag288_class_init,
+ .parent = TYPE_DEVICE,
+ .name = TYPE_WDT_DIAG288,
+ .instance_size = sizeof(DIAG288State),
+ .class_size = sizeof(DIAG288Class),
+};
+
+static void wdt_diag288_register_types(void)
+{
+ watchdog_add_model(&model);
+ type_register_static(&wdt_diag288_info);
+}
+
+type_init(wdt_diag288_register_types)
diff --git a/hw/xen/xen_backend.c b/hw/xen/xen_backend.c
index b2cb22b99d..2510e2e4ff 100644
--- a/hw/xen/xen_backend.c
+++ b/hw/xen/xen_backend.c
@@ -714,9 +714,7 @@ int xen_be_init(void)
return -1;
}
- if (qemu_set_fd_handler(xs_fileno(xenstore), xenstore_update, NULL, NULL) < 0) {
- goto err;
- }
+ qemu_set_fd_handler(xs_fileno(xenstore), xenstore_update, NULL, NULL);
if (xen_xc == XC_HANDLER_INITIAL_VALUE) {
/* Check if xen_init() have been called */
diff --git a/hw/xen/xen_pt.c b/hw/xen/xen_pt.c
index d095c081cc..9afcda8e21 100644
--- a/hw/xen/xen_pt.c
+++ b/hw/xen/xen_pt.c
@@ -234,11 +234,12 @@ static void xen_pt_pci_write_config(PCIDevice *d, uint32_t addr,
int index = 0;
XenPTRegGroup *reg_grp_entry = NULL;
int rc = 0;
- uint32_t read_val = 0;
+ uint32_t read_val = 0, wb_mask;
int emul_len = 0;
XenPTReg *reg_entry = NULL;
uint32_t find_addr = addr;
XenPTRegInfo *reg = NULL;
+ bool wp_flag = false;
if (xen_pt_pci_config_access_check(d, addr, len)) {
return;
@@ -271,10 +272,17 @@ static void xen_pt_pci_write_config(PCIDevice *d, uint32_t addr,
if (rc < 0) {
XEN_PT_ERR(d, "pci_read_block failed. return value: %d.\n", rc);
memset(&read_val, 0xff, len);
+ wb_mask = 0;
+ } else {
+ wb_mask = 0xFFFFFFFF >> ((4 - len) << 3);
}
/* pass directly to the real device for passthrough type register group */
if (reg_grp_entry == NULL) {
+ if (!s->permissive) {
+ wb_mask = 0;
+ wp_flag = true;
+ }
goto out;
}
@@ -295,9 +303,17 @@ static void xen_pt_pci_write_config(PCIDevice *d, uint32_t addr,
uint32_t real_offset = reg_grp_entry->base_offset + reg->offset;
uint32_t valid_mask = 0xFFFFFFFF >> ((4 - emul_len) << 3);
uint8_t *ptr_val = NULL;
+ uint32_t wp_mask = reg->emu_mask | reg->ro_mask;
valid_mask <<= (find_addr - real_offset) << 3;
ptr_val = (uint8_t *)&val + (real_offset & 3);
+ if (!s->permissive) {
+ wp_mask |= reg->res_mask;
+ }
+ if (wp_mask == (0xFFFFFFFF >> ((4 - reg->size) << 3))) {
+ wb_mask &= ~((wp_mask >> ((find_addr - real_offset) << 3))
+ << ((len - emul_len) << 3));
+ }
/* do emulation based on register size */
switch (reg->size) {
@@ -339,6 +355,16 @@ static void xen_pt_pci_write_config(PCIDevice *d, uint32_t addr,
} else {
/* nothing to do with passthrough type register,
* continue to find next byte */
+ if (!s->permissive) {
+ wb_mask &= ~(0xff << ((len - emul_len) << 3));
+ /* Unused BARs will make it here, but we don't want to issue
+ * warnings for writes to them (bogus writes get dealt with
+ * above).
+ */
+ if (index < 0) {
+ wp_flag = true;
+ }
+ }
emul_len--;
find_addr++;
}
@@ -350,10 +376,26 @@ static void xen_pt_pci_write_config(PCIDevice *d, uint32_t addr,
memory_region_transaction_commit();
out:
- if (!(reg && reg->no_wb)) {
+ if (wp_flag && !s->permissive_warned) {
+ s->permissive_warned = true;
+ xen_pt_log(d, "Write-back to unknown field 0x%02x (partially) inhibited (0x%0*x)\n",
+ addr, len * 2, wb_mask);
+ xen_pt_log(d, "If the device doesn't work, try enabling permissive mode\n");
+ xen_pt_log(d, "(unsafe) and if it helps report the problem to xen-devel\n");
+ }
+ for (index = 0; wb_mask; index += len) {
/* unknown regs are passed through */
- rc = xen_host_pci_set_block(&s->real_device, addr,
- (uint8_t *)&val, len);
+ while (!(wb_mask & 0xff)) {
+ index++;
+ wb_mask >>= 8;
+ }
+ len = 0;
+ do {
+ len++;
+ wb_mask >>= 8;
+ } while (wb_mask & 0xff);
+ rc = xen_host_pci_set_block(&s->real_device, addr + index,
+ (uint8_t *)&val + index, len);
if (rc < 0) {
XEN_PT_ERR(d, "pci_write_block failed. return value: %d.\n", rc);
@@ -807,6 +849,7 @@ static void xen_pt_unregister_device(PCIDevice *d)
static Property xen_pci_passthrough_properties[] = {
DEFINE_PROP_PCI_HOST_DEVADDR("hostaddr", XenPCIPassthroughState, hostaddr),
+ DEFINE_PROP_BOOL("permissive", XenPCIPassthroughState, permissive, false),
DEFINE_PROP_END_OF_LIST(),
};
diff --git a/hw/xen/xen_pt.h b/hw/xen/xen_pt.h
index 942dc60cc7..4bba559763 100644
--- a/hw/xen/xen_pt.h
+++ b/hw/xen/xen_pt.h
@@ -101,12 +101,12 @@ struct XenPTRegInfo {
uint32_t offset;
uint32_t size;
uint32_t init_val;
+ /* reg reserved field mask (ON:reserved, OFF:defined) */
+ uint32_t res_mask;
/* reg read only field mask (ON:RO/ROS, OFF:other) */
uint32_t ro_mask;
/* reg emulate field mask (ON:emu, OFF:passthrough) */
uint32_t emu_mask;
- /* no write back allowed */
- uint32_t no_wb;
xen_pt_conf_reg_init init;
/* read/write function pointer
* for double_word/word/byte size */
@@ -177,6 +177,7 @@ typedef struct XenPTMSIXEntry {
uint32_t data;
uint32_t vector_ctrl;
bool updated; /* indicate whether MSI ADDR or DATA is updated */
+ bool warned; /* avoid issuing (bogus) warning more than once */
} XenPTMSIXEntry;
typedef struct XenPTMSIX {
uint32_t ctrl_offset;
@@ -196,6 +197,8 @@ struct XenPCIPassthroughState {
PCIHostDeviceAddress hostaddr;
bool is_virtfn;
+ bool permissive;
+ bool permissive_warned;
XenHostPCIDevice real_device;
XenPTRegion bases[PCI_NUM_REGIONS]; /* Access regions */
QLIST_HEAD(, XenPTRegGroup) reg_grps;
diff --git a/hw/xen/xen_pt_config_init.c b/hw/xen/xen_pt_config_init.c
index 95a51dbbb1..f3cf069b60 100644
--- a/hw/xen/xen_pt_config_init.c
+++ b/hw/xen/xen_pt_config_init.c
@@ -95,6 +95,18 @@ XenPTReg *xen_pt_find_reg(XenPTRegGroup *reg_grp, uint32_t address)
return NULL;
}
+static uint32_t get_throughable_mask(const XenPCIPassthroughState *s,
+ const XenPTRegInfo *reg,
+ uint32_t valid_mask)
+{
+ uint32_t throughable_mask = ~(reg->emu_mask | reg->ro_mask);
+
+ if (!s->permissive) {
+ throughable_mask &= ~reg->res_mask;
+ }
+
+ return throughable_mask & valid_mask;
+}
/****************
* general register functions
@@ -157,14 +169,13 @@ static int xen_pt_byte_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
{
XenPTRegInfo *reg = cfg_entry->reg;
uint8_t writable_mask = 0;
- uint8_t throughable_mask = 0;
+ uint8_t throughable_mask = get_throughable_mask(s, reg, valid_mask);
/* modify emulate register */
writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask;
cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask);
/* create value for writing to I/O device register */
- throughable_mask = ~reg->emu_mask & valid_mask;
*val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask);
return 0;
@@ -175,14 +186,13 @@ static int xen_pt_word_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
{
XenPTRegInfo *reg = cfg_entry->reg;
uint16_t writable_mask = 0;
- uint16_t throughable_mask = 0;
+ uint16_t throughable_mask = get_throughable_mask(s, reg, valid_mask);
/* modify emulate register */
writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask;
cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask);
/* create value for writing to I/O device register */
- throughable_mask = ~reg->emu_mask & valid_mask;
*val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask);
return 0;
@@ -193,14 +203,13 @@ static int xen_pt_long_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
{
XenPTRegInfo *reg = cfg_entry->reg;
uint32_t writable_mask = 0;
- uint32_t throughable_mask = 0;
+ uint32_t throughable_mask = get_throughable_mask(s, reg, valid_mask);
/* modify emulate register */
writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask;
cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask);
/* create value for writing to I/O device register */
- throughable_mask = ~reg->emu_mask & valid_mask;
*val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask);
return 0;
@@ -292,15 +301,13 @@ static int xen_pt_cmd_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
{
XenPTRegInfo *reg = cfg_entry->reg;
uint16_t writable_mask = 0;
- uint16_t throughable_mask = 0;
+ uint16_t throughable_mask = get_throughable_mask(s, reg, valid_mask);
/* modify emulate register */
writable_mask = ~reg->ro_mask & valid_mask;
cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask);
/* create value for writing to I/O device register */
- throughable_mask = ~reg->emu_mask & valid_mask;
-
if (*val & PCI_COMMAND_INTX_DISABLE) {
throughable_mask |= PCI_COMMAND_INTX_DISABLE;
} else {
@@ -454,7 +461,6 @@ static int xen_pt_bar_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
PCIDevice *d = &s->dev;
const PCIIORegion *r;
uint32_t writable_mask = 0;
- uint32_t throughable_mask = 0;
uint32_t bar_emu_mask = 0;
uint32_t bar_ro_mask = 0;
uint32_t r_size = 0;
@@ -511,8 +517,7 @@ static int xen_pt_bar_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
}
/* create value for writing to I/O device register */
- throughable_mask = ~bar_emu_mask & valid_mask;
- *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask);
+ *val = XEN_PT_MERGE_VALUE(*val, dev_value, 0);
return 0;
}
@@ -526,9 +531,8 @@ static int xen_pt_exp_rom_bar_reg_write(XenPCIPassthroughState *s,
XenPTRegion *base = NULL;
PCIDevice *d = (PCIDevice *)&s->dev;
uint32_t writable_mask = 0;
- uint32_t throughable_mask = 0;
+ uint32_t throughable_mask = get_throughable_mask(s, reg, valid_mask);
pcibus_t r_size = 0;
- uint32_t bar_emu_mask = 0;
uint32_t bar_ro_mask = 0;
r_size = d->io_regions[PCI_ROM_SLOT].size;
@@ -537,7 +541,6 @@ static int xen_pt_exp_rom_bar_reg_write(XenPCIPassthroughState *s,
r_size = xen_pt_get_emul_size(base->bar_flag, r_size);
/* set emulate mask and read-only mask */
- bar_emu_mask = reg->emu_mask;
bar_ro_mask = (reg->ro_mask | (r_size - 1)) & ~PCI_ROM_ADDRESS_ENABLE;
/* modify emulate register */
@@ -545,7 +548,6 @@ static int xen_pt_exp_rom_bar_reg_write(XenPCIPassthroughState *s,
cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask);
/* create value for writing to I/O device register */
- throughable_mask = ~bar_emu_mask & valid_mask;
*val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask);
return 0;
@@ -580,7 +582,7 @@ static XenPTRegInfo xen_pt_emu_reg_header0[] = {
.offset = PCI_COMMAND,
.size = 2,
.init_val = 0x0000,
- .ro_mask = 0xF880,
+ .res_mask = 0xF880,
.emu_mask = 0x0743,
.init = xen_pt_common_reg_init,
.u.w.read = xen_pt_word_reg_read,
@@ -605,7 +607,8 @@ static XenPTRegInfo xen_pt_emu_reg_header0[] = {
.offset = PCI_STATUS,
.size = 2,
.init_val = 0x0000,
- .ro_mask = 0x06FF,
+ .res_mask = 0x0007,
+ .ro_mask = 0x06F8,
.emu_mask = 0x0010,
.init = xen_pt_status_reg_init,
.u.w.read = xen_pt_word_reg_read,
@@ -755,6 +758,15 @@ static XenPTRegInfo xen_pt_emu_reg_vpd[] = {
.u.b.write = xen_pt_byte_reg_write,
},
{
+ .offset = PCI_VPD_ADDR,
+ .size = 2,
+ .ro_mask = 0x0003,
+ .emu_mask = 0x0003,
+ .init = xen_pt_common_reg_init,
+ .u.w.read = xen_pt_word_reg_read,
+ .u.w.write = xen_pt_word_reg_write,
+ },
+ {
.size = 0,
},
};
@@ -873,7 +885,7 @@ static XenPTRegInfo xen_pt_emu_reg_pcie[] = {
.offset = PCI_EXP_DEVCAP,
.size = 4,
.init_val = 0x00000000,
- .ro_mask = 0x1FFCFFFF,
+ .ro_mask = 0xFFFFFFFF,
.emu_mask = 0x10000000,
.init = xen_pt_common_reg_init,
.u.dw.read = xen_pt_long_reg_read,
@@ -890,6 +902,16 @@ static XenPTRegInfo xen_pt_emu_reg_pcie[] = {
.u.w.read = xen_pt_word_reg_read,
.u.w.write = xen_pt_word_reg_write,
},
+ /* Device Status reg */
+ {
+ .offset = PCI_EXP_DEVSTA,
+ .size = 2,
+ .res_mask = 0xFFC0,
+ .ro_mask = 0x0030,
+ .init = xen_pt_common_reg_init,
+ .u.w.read = xen_pt_word_reg_read,
+ .u.w.write = xen_pt_word_reg_write,
+ },
/* Link Control reg */
{
.offset = PCI_EXP_LNKCTL,
@@ -901,6 +923,15 @@ static XenPTRegInfo xen_pt_emu_reg_pcie[] = {
.u.w.read = xen_pt_word_reg_read,
.u.w.write = xen_pt_word_reg_write,
},
+ /* Link Status reg */
+ {
+ .offset = PCI_EXP_LNKSTA,
+ .size = 2,
+ .ro_mask = 0x3FFF,
+ .init = xen_pt_common_reg_init,
+ .u.w.read = xen_pt_word_reg_read,
+ .u.w.write = xen_pt_word_reg_write,
+ },
/* Device Control 2 reg */
{
.offset = 0x28,
@@ -933,39 +964,22 @@ static XenPTRegInfo xen_pt_emu_reg_pcie[] = {
* Power Management Capability
*/
-/* read Power Management Control/Status register */
-static int xen_pt_pmcsr_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
- uint16_t *value, uint16_t valid_mask)
-{
- XenPTRegInfo *reg = cfg_entry->reg;
- uint16_t valid_emu_mask = reg->emu_mask;
-
- valid_emu_mask |= PCI_PM_CTRL_STATE_MASK | PCI_PM_CTRL_NO_SOFT_RESET;
-
- valid_emu_mask = valid_emu_mask & valid_mask;
- *value = XEN_PT_MERGE_VALUE(*value, cfg_entry->data, ~valid_emu_mask);
-
- return 0;
-}
/* write Power Management Control/Status register */
static int xen_pt_pmcsr_reg_write(XenPCIPassthroughState *s,
XenPTReg *cfg_entry, uint16_t *val,
uint16_t dev_value, uint16_t valid_mask)
{
XenPTRegInfo *reg = cfg_entry->reg;
- uint16_t emu_mask = reg->emu_mask;
uint16_t writable_mask = 0;
- uint16_t throughable_mask = 0;
-
- emu_mask |= PCI_PM_CTRL_STATE_MASK | PCI_PM_CTRL_NO_SOFT_RESET;
+ uint16_t throughable_mask = get_throughable_mask(s, reg, valid_mask);
/* modify emulate register */
- writable_mask = emu_mask & ~reg->ro_mask & valid_mask;
+ writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask;
cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask);
/* create value for writing to I/O device register */
- throughable_mask = ~emu_mask & valid_mask;
- *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask);
+ *val = XEN_PT_MERGE_VALUE(*val, dev_value & ~PCI_PM_CTRL_PME_STATUS,
+ throughable_mask);
return 0;
}
@@ -999,10 +1013,11 @@ static XenPTRegInfo xen_pt_emu_reg_pm[] = {
.offset = PCI_PM_CTRL,
.size = 2,
.init_val = 0x0008,
- .ro_mask = 0xE1FC,
- .emu_mask = 0x8100,
+ .res_mask = 0x00F0,
+ .ro_mask = 0xE10C,
+ .emu_mask = 0x810B,
.init = xen_pt_common_reg_init,
- .u.w.read = xen_pt_pmcsr_reg_read,
+ .u.w.read = xen_pt_word_reg_read,
.u.w.write = xen_pt_pmcsr_reg_write,
},
{
@@ -1016,13 +1031,9 @@ static XenPTRegInfo xen_pt_emu_reg_pm[] = {
*/
/* Helper */
-static bool xen_pt_msgdata_check_type(uint32_t offset, uint16_t flags)
-{
- /* check the offset whether matches the type or not */
- bool is_32 = (offset == PCI_MSI_DATA_32) && !(flags & PCI_MSI_FLAGS_64BIT);
- bool is_64 = (offset == PCI_MSI_DATA_64) && (flags & PCI_MSI_FLAGS_64BIT);
- return is_32 || is_64;
-}
+#define xen_pt_msi_check_type(offset, flags, what) \
+ ((offset) == ((flags) & PCI_MSI_FLAGS_64BIT ? \
+ PCI_MSI_##what##_64 : PCI_MSI_##what##_32))
/* Message Control register */
static int xen_pt_msgctrl_reg_init(XenPCIPassthroughState *s,
@@ -1056,8 +1067,7 @@ static int xen_pt_msgctrl_reg_write(XenPCIPassthroughState *s,
XenPTRegInfo *reg = cfg_entry->reg;
XenPTMSI *msi = s->msi;
uint16_t writable_mask = 0;
- uint16_t throughable_mask = 0;
- uint16_t raw_val;
+ uint16_t throughable_mask = get_throughable_mask(s, reg, valid_mask);
/* Currently no support for multi-vector */
if (*val & PCI_MSI_FLAGS_QSIZE) {
@@ -1070,12 +1080,10 @@ static int xen_pt_msgctrl_reg_write(XenPCIPassthroughState *s,
msi->flags |= cfg_entry->data & ~PCI_MSI_FLAGS_ENABLE;
/* create value for writing to I/O device register */
- raw_val = *val;
- throughable_mask = ~reg->emu_mask & valid_mask;
*val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask);
/* update MSI */
- if (raw_val & PCI_MSI_FLAGS_ENABLE) {
+ if (*val & PCI_MSI_FLAGS_ENABLE) {
/* setup MSI pirq for the first time */
if (!msi->initialized) {
/* Init physical one */
@@ -1103,10 +1111,6 @@ static int xen_pt_msgctrl_reg_write(XenPCIPassthroughState *s,
xen_pt_msi_disable(s);
}
- /* pass through MSI_ENABLE bit */
- *val &= ~PCI_MSI_FLAGS_ENABLE;
- *val |= raw_val & PCI_MSI_FLAGS_ENABLE;
-
return 0;
}
@@ -1134,7 +1138,45 @@ static int xen_pt_msgdata_reg_init(XenPCIPassthroughState *s,
uint32_t offset = reg->offset;
/* check the offset whether matches the type or not */
- if (xen_pt_msgdata_check_type(offset, flags)) {
+ if (xen_pt_msi_check_type(offset, flags, DATA)) {
+ *data = reg->init_val;
+ } else {
+ *data = XEN_PT_INVALID_REG;
+ }
+ return 0;
+}
+
+/* this function will be called twice (for 32 bit and 64 bit type) */
+/* initialize Mask register */
+static int xen_pt_mask_reg_init(XenPCIPassthroughState *s,
+ XenPTRegInfo *reg, uint32_t real_offset,
+ uint32_t *data)
+{
+ uint32_t flags = s->msi->flags;
+
+ /* check the offset whether matches the type or not */
+ if (!(flags & PCI_MSI_FLAGS_MASKBIT)) {
+ *data = XEN_PT_INVALID_REG;
+ } else if (xen_pt_msi_check_type(reg->offset, flags, MASK)) {
+ *data = reg->init_val;
+ } else {
+ *data = XEN_PT_INVALID_REG;
+ }
+ return 0;
+}
+
+/* this function will be called twice (for 32 bit and 64 bit type) */
+/* initialize Pending register */
+static int xen_pt_pending_reg_init(XenPCIPassthroughState *s,
+ XenPTRegInfo *reg, uint32_t real_offset,
+ uint32_t *data)
+{
+ uint32_t flags = s->msi->flags;
+
+ /* check the offset whether matches the type or not */
+ if (!(flags & PCI_MSI_FLAGS_MASKBIT)) {
+ *data = XEN_PT_INVALID_REG;
+ } else if (xen_pt_msi_check_type(reg->offset, flags, PENDING)) {
*data = reg->init_val;
} else {
*data = XEN_PT_INVALID_REG;
@@ -1149,7 +1191,6 @@ static int xen_pt_msgaddr32_reg_write(XenPCIPassthroughState *s,
{
XenPTRegInfo *reg = cfg_entry->reg;
uint32_t writable_mask = 0;
- uint32_t throughable_mask = 0;
uint32_t old_addr = cfg_entry->data;
/* modify emulate register */
@@ -1158,8 +1199,7 @@ static int xen_pt_msgaddr32_reg_write(XenPCIPassthroughState *s,
s->msi->addr_lo = cfg_entry->data;
/* create value for writing to I/O device register */
- throughable_mask = ~reg->emu_mask & valid_mask;
- *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask);
+ *val = XEN_PT_MERGE_VALUE(*val, dev_value, 0);
/* update MSI */
if (cfg_entry->data != old_addr) {
@@ -1177,7 +1217,6 @@ static int xen_pt_msgaddr64_reg_write(XenPCIPassthroughState *s,
{
XenPTRegInfo *reg = cfg_entry->reg;
uint32_t writable_mask = 0;
- uint32_t throughable_mask = 0;
uint32_t old_addr = cfg_entry->data;
/* check whether the type is 64 bit or not */
@@ -1194,8 +1233,7 @@ static int xen_pt_msgaddr64_reg_write(XenPCIPassthroughState *s,
s->msi->addr_hi = cfg_entry->data;
/* create value for writing to I/O device register */
- throughable_mask = ~reg->emu_mask & valid_mask;
- *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask);
+ *val = XEN_PT_MERGE_VALUE(*val, dev_value, 0);
/* update MSI */
if (cfg_entry->data != old_addr) {
@@ -1217,12 +1255,11 @@ static int xen_pt_msgdata_reg_write(XenPCIPassthroughState *s,
XenPTRegInfo *reg = cfg_entry->reg;
XenPTMSI *msi = s->msi;
uint16_t writable_mask = 0;
- uint16_t throughable_mask = 0;
uint16_t old_data = cfg_entry->data;
uint32_t offset = reg->offset;
/* check the offset whether matches the type or not */
- if (!xen_pt_msgdata_check_type(offset, msi->flags)) {
+ if (!xen_pt_msi_check_type(offset, msi->flags, DATA)) {
/* exit I/O emulator */
XEN_PT_ERR(&s->dev, "the offset does not match the 32/64 bit type!\n");
return -1;
@@ -1235,8 +1272,7 @@ static int xen_pt_msgdata_reg_write(XenPCIPassthroughState *s,
msi->data = cfg_entry->data;
/* create value for writing to I/O device register */
- throughable_mask = ~reg->emu_mask & valid_mask;
- *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask);
+ *val = XEN_PT_MERGE_VALUE(*val, dev_value, 0);
/* update MSI */
if (cfg_entry->data != old_data) {
@@ -1266,8 +1302,9 @@ static XenPTRegInfo xen_pt_emu_reg_msi[] = {
.offset = PCI_MSI_FLAGS,
.size = 2,
.init_val = 0x0000,
- .ro_mask = 0xFF8E,
- .emu_mask = 0x007F,
+ .res_mask = 0xFE00,
+ .ro_mask = 0x018E,
+ .emu_mask = 0x017E,
.init = xen_pt_msgctrl_reg_init,
.u.w.read = xen_pt_word_reg_read,
.u.w.write = xen_pt_msgctrl_reg_write,
@@ -1279,7 +1316,6 @@ static XenPTRegInfo xen_pt_emu_reg_msi[] = {
.init_val = 0x00000000,
.ro_mask = 0x00000003,
.emu_mask = 0xFFFFFFFF,
- .no_wb = 1,
.init = xen_pt_common_reg_init,
.u.dw.read = xen_pt_long_reg_read,
.u.dw.write = xen_pt_msgaddr32_reg_write,
@@ -1291,7 +1327,6 @@ static XenPTRegInfo xen_pt_emu_reg_msi[] = {
.init_val = 0x00000000,
.ro_mask = 0x00000000,
.emu_mask = 0xFFFFFFFF,
- .no_wb = 1,
.init = xen_pt_msgaddr64_reg_init,
.u.dw.read = xen_pt_long_reg_read,
.u.dw.write = xen_pt_msgaddr64_reg_write,
@@ -1303,7 +1338,6 @@ static XenPTRegInfo xen_pt_emu_reg_msi[] = {
.init_val = 0x0000,
.ro_mask = 0x0000,
.emu_mask = 0xFFFF,
- .no_wb = 1,
.init = xen_pt_msgdata_reg_init,
.u.w.read = xen_pt_word_reg_read,
.u.w.write = xen_pt_msgdata_reg_write,
@@ -1315,11 +1349,54 @@ static XenPTRegInfo xen_pt_emu_reg_msi[] = {
.init_val = 0x0000,
.ro_mask = 0x0000,
.emu_mask = 0xFFFF,
- .no_wb = 1,
.init = xen_pt_msgdata_reg_init,
.u.w.read = xen_pt_word_reg_read,
.u.w.write = xen_pt_msgdata_reg_write,
},
+ /* Mask reg (if PCI_MSI_FLAGS_MASKBIT set, for 32-bit devices) */
+ {
+ .offset = PCI_MSI_MASK_32,
+ .size = 4,
+ .init_val = 0x00000000,
+ .ro_mask = 0xFFFFFFFF,
+ .emu_mask = 0xFFFFFFFF,
+ .init = xen_pt_mask_reg_init,
+ .u.dw.read = xen_pt_long_reg_read,
+ .u.dw.write = xen_pt_long_reg_write,
+ },
+ /* Mask reg (if PCI_MSI_FLAGS_MASKBIT set, for 64-bit devices) */
+ {
+ .offset = PCI_MSI_MASK_64,
+ .size = 4,
+ .init_val = 0x00000000,
+ .ro_mask = 0xFFFFFFFF,
+ .emu_mask = 0xFFFFFFFF,
+ .init = xen_pt_mask_reg_init,
+ .u.dw.read = xen_pt_long_reg_read,
+ .u.dw.write = xen_pt_long_reg_write,
+ },
+ /* Pending reg (if PCI_MSI_FLAGS_MASKBIT set, for 32-bit devices) */
+ {
+ .offset = PCI_MSI_MASK_32 + 4,
+ .size = 4,
+ .init_val = 0x00000000,
+ .ro_mask = 0xFFFFFFFF,
+ .emu_mask = 0x00000000,
+ .init = xen_pt_pending_reg_init,
+ .u.dw.read = xen_pt_long_reg_read,
+ .u.dw.write = xen_pt_long_reg_write,
+ },
+ /* Pending reg (if PCI_MSI_FLAGS_MASKBIT set, for 64-bit devices) */
+ {
+ .offset = PCI_MSI_MASK_64 + 4,
+ .size = 4,
+ .init_val = 0x00000000,
+ .ro_mask = 0xFFFFFFFF,
+ .emu_mask = 0x00000000,
+ .init = xen_pt_pending_reg_init,
+ .u.dw.read = xen_pt_long_reg_read,
+ .u.dw.write = xen_pt_long_reg_write,
+ },
{
.size = 0,
},
@@ -1358,7 +1435,7 @@ static int xen_pt_msixctrl_reg_write(XenPCIPassthroughState *s,
{
XenPTRegInfo *reg = cfg_entry->reg;
uint16_t writable_mask = 0;
- uint16_t throughable_mask = 0;
+ uint16_t throughable_mask = get_throughable_mask(s, reg, valid_mask);
int debug_msix_enabled_old;
/* modify emulate register */
@@ -1366,7 +1443,6 @@ static int xen_pt_msixctrl_reg_write(XenPCIPassthroughState *s,
cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask);
/* create value for writing to I/O device register */
- throughable_mask = ~reg->emu_mask & valid_mask;
*val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask);
/* update MSI-X */
@@ -1405,7 +1481,8 @@ static XenPTRegInfo xen_pt_emu_reg_msix[] = {
.offset = PCI_MSI_FLAGS,
.size = 2,
.init_val = 0x0000,
- .ro_mask = 0x3FFF,
+ .res_mask = 0x3800,
+ .ro_mask = 0x07FF,
.emu_mask = 0x0000,
.init = xen_pt_msixctrl_reg_init,
.u.w.read = xen_pt_word_reg_read,
diff --git a/hw/xen/xen_pt_msi.c b/hw/xen/xen_pt_msi.c
index 9ed9321c9f..68db6233dc 100644
--- a/hw/xen/xen_pt_msi.c
+++ b/hw/xen/xen_pt_msi.c
@@ -434,11 +434,10 @@ static void pci_msix_write(void *opaque, hwaddr addr,
XenPCIPassthroughState *s = opaque;
XenPTMSIX *msix = s->msix;
XenPTMSIXEntry *entry;
- int entry_nr, offset;
+ unsigned int entry_nr, offset;
entry_nr = addr / PCI_MSIX_ENTRY_SIZE;
- if (entry_nr < 0 || entry_nr >= msix->total_entries) {
- XEN_PT_ERR(&s->dev, "asked MSI-X entry '%i' invalid!\n", entry_nr);
+ if (entry_nr >= msix->total_entries) {
return;
}
entry = &msix->msix_entry[entry_nr];
@@ -460,8 +459,11 @@ static void pci_msix_write(void *opaque, hwaddr addr,
+ PCI_MSIX_ENTRY_VECTOR_CTRL;
if (msix->enabled && !(*vec_ctrl & PCI_MSIX_ENTRY_CTRL_MASKBIT)) {
- XEN_PT_ERR(&s->dev, "Can't update msix entry %d since MSI-X is"
- " already enabled.\n", entry_nr);
+ if (!entry->warned) {
+ entry->warned = true;
+ XEN_PT_ERR(&s->dev, "Can't update msix entry %d since MSI-X is"
+ " already enabled.\n", entry_nr);
+ }
return;
}