aboutsummaryrefslogtreecommitdiff
path: root/hw
diff options
context:
space:
mode:
Diffstat (limited to 'hw')
-rw-r--r--hw/9pfs/9p.c2
-rw-r--r--hw/acpi/cpu.c2
-rw-r--r--hw/alpha/dp264.c1
-rw-r--r--hw/arm/aspeed.c22
-rw-r--r--hw/arm/aspeed_soc.c13
-rw-r--r--hw/arm/cubieboard.c4
-rw-r--r--hw/arm/highbank.c8
-rw-r--r--hw/arm/integratorcp.c78
-rw-r--r--hw/arm/realview.c3
-rw-r--r--hw/arm/spitz.c3
-rw-r--r--hw/arm/stellaris.c48
-rw-r--r--hw/arm/tosa.c1
-rw-r--r--hw/arm/versatilepb.c2
-rw-r--r--hw/arm/vexpress.c2
-rw-r--r--hw/arm/virt-acpi-build.c2
-rw-r--r--hw/arm/virt.c4
-rw-r--r--hw/arm/xilinx_zynq.c1
-rw-r--r--hw/arm/xlnx-ep108.c6
-rw-r--r--hw/block/dataplane/virtio-blk.c4
-rw-r--r--hw/block/virtio-blk.c31
-rw-r--r--hw/char/Makefile.objs3
-rw-r--r--hw/char/exynos4210_uart.c2
-rw-r--r--hw/char/mcf_uart.c102
-rw-r--r--hw/core/Makefile.objs1
-rw-r--r--hw/core/irq.c1
-rw-r--r--hw/core/loader-fit.c325
-rw-r--r--hw/core/loader.c7
-rw-r--r--hw/core/machine.c31
-rw-r--r--hw/core/qdev.c9
-rw-r--r--hw/display/cirrus_vga.c79
-rw-r--r--hw/display/g364fb.c2
-rw-r--r--hw/display/trace-events12
-rw-r--r--hw/display/vga.c27
-rw-r--r--hw/display/virtio-gpu-3d.c8
-rw-r--r--hw/display/virtio-gpu.c1
-rw-r--r--hw/dma/pl330.c8
-rw-r--r--hw/i386/intel_iommu.c238
-rw-r--r--hw/i386/intel_iommu_internal.h1
-rw-r--r--hw/i386/kvm/ioapic.c13
-rw-r--r--hw/i386/kvmvapic.c4
-rw-r--r--hw/i386/pc.c133
-rw-r--r--hw/i386/trace-events28
-rw-r--r--hw/ide/ahci.c2
-rw-r--r--hw/ide/core.c17
-rw-r--r--hw/intc/apic_common.c4
-rw-r--r--hw/intc/arm_gicv3_cpuif.c3
-rw-r--r--hw/intc/exynos4210_gic.c2
-rw-r--r--hw/intc/ioapic.c6
-rw-r--r--hw/intc/mips_gic.c56
-rw-r--r--hw/intc/s390_flic.c28
-rw-r--r--hw/intc/s390_flic_kvm.c6
-rw-r--r--hw/ipmi/isa_ipmi_bt.c6
-rw-r--r--hw/isa/Makefile.objs2
-rw-r--r--hw/m68k/Makefile.objs2
-rw-r--r--hw/m68k/dummy_m68k.c84
-rw-r--r--hw/m68k/mcf5208.c6
-rw-r--r--hw/m68k/mcf_intc.c48
-rw-r--r--hw/mips/Makefile.objs1
-rw-r--r--hw/mips/boston.c577
-rw-r--r--hw/mips/mips_fulong2e.c1
-rw-r--r--hw/mips/mips_jazz.c4
-rw-r--r--hw/mips/mips_malta.c1
-rw-r--r--hw/mips/mips_r4k.c1
-rw-r--r--hw/misc/Makefile.objs2
-rw-r--r--hw/misc/imx6_src.c58
-rw-r--r--hw/misc/ivshmem.c9
-rw-r--r--hw/misc/mips_cmgcr.c17
-rw-r--r--hw/misc/pvpanic.c2
-rw-r--r--hw/misc/unimp.c107
-rw-r--r--hw/net/e1000e_core.c9
-rw-r--r--hw/net/imx_fec.c10
-rw-r--r--hw/net/spapr_llan.c18
-rw-r--r--hw/net/virtio-net.c330
-rw-r--r--hw/net/vmxnet3.c2
-rw-r--r--hw/nvram/mac_nvram.c2
-rw-r--r--hw/nvram/spapr_nvram.c2
-rw-r--r--hw/openrisc/openrisc_sim.c4
-rw-r--r--hw/pci-host/Makefile.objs1
-rw-r--r--hw/pci-host/prep.c11
-rw-r--r--hw/pci-host/xilinx-pcie.c328
-rw-r--r--hw/pci/pcie.c23
-rw-r--r--hw/pci/pcie_aer.c2
-rw-r--r--hw/ppc/mac_newworld.c16
-rw-r--r--hw/ppc/mac_oldworld.c1
-rw-r--r--hw/ppc/pnv.c6
-rw-r--r--hw/ppc/ppc.c16
-rw-r--r--hw/ppc/ppc405_uc.c6
-rw-r--r--hw/ppc/ppc4xx_pci.c13
-rw-r--r--hw/ppc/prep.c1
-rw-r--r--hw/ppc/spapr.c258
-rw-r--r--hw/ppc/spapr_cpu_core.c137
-rw-r--r--hw/ppc/spapr_ovec.c19
-rw-r--r--hw/ppc/spapr_rtas.c3
-rw-r--r--hw/ppc/trace-events12
-rw-r--r--hw/s390x/css.c15
-rw-r--r--hw/s390x/s390-virtio-ccw.c9
-rw-r--r--hw/s390x/s390-virtio.c10
-rw-r--r--hw/s390x/virtio-ccw.c109
-rw-r--r--hw/s390x/virtio-ccw.h13
-rw-r--r--hw/scsi/esp-pci.c3
-rw-r--r--hw/scsi/esp.c6
-rw-r--r--hw/scsi/lsi53c895a.c10
-rw-r--r--hw/scsi/megasas.c4
-rw-r--r--hw/scsi/mptsas.c4
-rw-r--r--hw/scsi/scsi-bus.c58
-rw-r--r--hw/scsi/scsi-disk.c15
-rw-r--r--hw/scsi/scsi-generic.c20
-rw-r--r--hw/scsi/spapr_vscsi.c5
-rw-r--r--hw/scsi/virtio-scsi-dataplane.c14
-rw-r--r--hw/scsi/virtio-scsi.c29
-rw-r--r--hw/sd/sdhci.c4
-rw-r--r--hw/sh4/r2d.c1
-rw-r--r--hw/sparc/sun4m.c5
-rw-r--r--hw/sparc64/sun4u.c2
-rw-r--r--hw/ssi/aspeed_smc.c13
-rw-r--r--hw/timer/Makefile.objs3
-rw-r--r--hw/timer/m48t59-internal.h82
-rw-r--r--hw/timer/m48t59-isa.c181
-rw-r--r--hw/timer/m48t59.c230
-rw-r--r--hw/timer/mips_gictimer.c5
-rw-r--r--hw/usb/bus.c9
-rw-r--r--hw/usb/desc.c7
-rw-r--r--hw/usb/dev-audio.c4
-rw-r--r--hw/usb/dev-bluetooth.c4
-rw-r--r--hw/usb/dev-hid.c10
-rw-r--r--hw/usb/dev-hub.c4
-rw-r--r--hw/usb/dev-network.c4
-rw-r--r--hw/usb/dev-smartcard-reader.c142
-rw-r--r--hw/usb/dev-uas.c7
-rw-r--r--hw/usb/dev-wacom.c4
-rw-r--r--hw/usb/hcd-ehci-pci.c9
-rw-r--r--hw/usb/hcd-ehci.c5
-rw-r--r--hw/usb/hcd-ehci.h1
-rw-r--r--hw/usb/hcd-ohci.c13
-rw-r--r--hw/usb/hcd-xhci.c320
-rw-r--r--hw/usb/host-libusb.c4
-rw-r--r--hw/usb/redirect.c4
-rw-r--r--hw/usb/trace-events1
-rw-r--r--hw/vfio/Makefile.objs4
-rw-r--r--hw/vfio/common.c65
-rw-r--r--hw/vfio/pci-quirks.c71
-rw-r--r--hw/vfio/pci.c37
-rw-r--r--hw/vfio/trace-events2
-rw-r--r--hw/virtio/virtio.c364
-rw-r--r--hw/watchdog/Makefile.objs1
-rw-r--r--hw/watchdog/wdt_aspeed.c225
146 files changed, 4151 insertions, 1574 deletions
diff --git a/hw/9pfs/9p.c b/hw/9pfs/9p.c
index 99e94723b9..3af1c93dc8 100644
--- a/hw/9pfs/9p.c
+++ b/hw/9pfs/9p.c
@@ -2374,7 +2374,7 @@ static void coroutine_fn v9fs_flush(void *opaque)
/*
* Wait for pdu to complete.
*/
- qemu_co_queue_wait(&cancel_pdu->complete);
+ qemu_co_queue_wait(&cancel_pdu->complete, NULL);
cancel_pdu->cancelled = 0;
pdu_free(cancel_pdu);
}
diff --git a/hw/acpi/cpu.c b/hw/acpi/cpu.c
index 6017ca04bf..8c719d3f9d 100644
--- a/hw/acpi/cpu.c
+++ b/hw/acpi/cpu.c
@@ -198,7 +198,7 @@ void cpu_hotplug_hw_init(MemoryRegion *as, Object *owner,
state->dev_count = id_list->len;
state->devs = g_new0(typeof(*state->devs), state->dev_count);
for (i = 0; i < id_list->len; i++) {
- state->devs[i].cpu = id_list->cpus[i].cpu;
+ state->devs[i].cpu = CPU(id_list->cpus[i].cpu);
state->devs[i].arch_id = id_list->cpus[i].arch_id;
}
memory_region_init_io(&state->ctrl_reg, owner, &cpu_hotplug_ops, state,
diff --git a/hw/alpha/dp264.c b/hw/alpha/dp264.c
index d6431fd586..85405da3df 100644
--- a/hw/alpha/dp264.c
+++ b/hw/alpha/dp264.c
@@ -177,6 +177,7 @@ static void clipper_machine_init(MachineClass *mc)
{
mc->desc = "Alpha DP264/CLIPPER";
mc->init = clipper_init;
+ mc->block_default_type = IF_IDE;
mc->max_cpus = 4;
mc->is_default = 1;
}
diff --git a/hw/arm/aspeed.c b/hw/arm/aspeed.c
index a92c2f1c36..283c038814 100644
--- a/hw/arm/aspeed.c
+++ b/hw/arm/aspeed.c
@@ -113,9 +113,19 @@ static void write_boot_rom(DriveInfo *dinfo, hwaddr addr, size_t rom_size,
{
BlockBackend *blk = blk_by_legacy_dinfo(dinfo);
uint8_t *storage;
+ int64_t size;
- if (rom_size > blk_getlength(blk)) {
- rom_size = blk_getlength(blk);
+ /* The block backend size should have already been 'validated' by
+ * the creation of the m25p80 object.
+ */
+ size = blk_getlength(blk);
+ if (size <= 0) {
+ error_setg(errp, "failed to get flash size");
+ return;
+ }
+
+ if (rom_size > size) {
+ rom_size = size;
}
storage = g_new0(uint8_t, rom_size);
@@ -138,10 +148,6 @@ static void aspeed_board_init_flashes(AspeedSMCState *s, const char *flashtype,
DriveInfo *dinfo = drive_get_next(IF_MTD);
qemu_irq cs_line;
- /*
- * FIXME: check that we are not using a flash module exceeding
- * the controller segment size
- */
fl->flash = ssi_create_slave_no_init(s->spi, flashtype);
if (dinfo) {
qdev_prop_set_drive(fl->flash, "drive", blk_by_legacy_dinfo(dinfo),
@@ -200,7 +206,9 @@ static void aspeed_board_init(MachineState *machine,
/*
* create a ROM region using the default mapping window size of
- * the flash module.
+ * the flash module. The window size is 64MB for the AST2400
+ * SoC and 128MB for the AST2500 SoC, which is twice as big as
+ * needed by the flash modules of the Aspeed machines.
*/
memory_region_init_rom(boot_rom, OBJECT(bmc), "aspeed.boot_rom",
fl->size, &error_abort);
diff --git a/hw/arm/aspeed_soc.c b/hw/arm/aspeed_soc.c
index b3e7f07b61..571e4f097b 100644
--- a/hw/arm/aspeed_soc.c
+++ b/hw/arm/aspeed_soc.c
@@ -31,6 +31,7 @@
#define ASPEED_SOC_SCU_BASE 0x1E6E2000
#define ASPEED_SOC_SRAM_BASE 0x1E720000
#define ASPEED_SOC_TIMER_BASE 0x1E782000
+#define ASPEED_SOC_WDT_BASE 0x1E785000
#define ASPEED_SOC_I2C_BASE 0x1E78A000
static const int uart_irqs[] = { 9, 32, 33, 34, 10 };
@@ -170,6 +171,10 @@ static void aspeed_soc_init(Object *obj)
sc->info->silicon_rev);
object_property_add_alias(obj, "ram-size", OBJECT(&s->sdmc),
"ram-size", &error_abort);
+
+ object_initialize(&s->wdt, sizeof(s->wdt), TYPE_ASPEED_WDT);
+ object_property_add_child(obj, "wdt", OBJECT(&s->wdt), NULL);
+ qdev_set_parent_bus(DEVICE(&s->wdt), sysbus_get_default());
}
static void aspeed_soc_realize(DeviceState *dev, Error **errp)
@@ -286,6 +291,14 @@ static void aspeed_soc_realize(DeviceState *dev, Error **errp)
return;
}
sysbus_mmio_map(SYS_BUS_DEVICE(&s->sdmc), 0, ASPEED_SOC_SDMC_BASE);
+
+ /* Watch dog */
+ object_property_set_bool(OBJECT(&s->wdt), true, "realized", &err);
+ if (err) {
+ error_propagate(errp, err);
+ return;
+ }
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->wdt), 0, ASPEED_SOC_WDT_BASE);
}
static void aspeed_soc_class_init(ObjectClass *oc, void *data)
diff --git a/hw/arm/cubieboard.c b/hw/arm/cubieboard.c
index dd19ba3c99..b98e1c4a8c 100644
--- a/hw/arm/cubieboard.c
+++ b/hw/arm/cubieboard.c
@@ -71,6 +71,8 @@ static void cubieboard_init(MachineState *machine)
memory_region_add_subregion(get_system_memory(), AW_A10_SDRAM_BASE,
&s->sdram);
+ /* TODO create and connect IDE devices for ide_drive_get() */
+
cubieboard_binfo.ram_size = machine->ram_size;
cubieboard_binfo.kernel_filename = machine->kernel_filename;
cubieboard_binfo.kernel_cmdline = machine->kernel_cmdline;
@@ -82,6 +84,8 @@ static void cubieboard_machine_init(MachineClass *mc)
{
mc->desc = "cubietech cubieboard";
mc->init = cubieboard_init;
+ mc->block_default_type = IF_IDE;
+ mc->units_per_default_bus = 1;
}
DEFINE_MACHINE("cubieboard", cubieboard_machine_init)
diff --git a/hw/arm/highbank.c b/hw/arm/highbank.c
index 80e5fd458b..0a4508cef3 100644
--- a/hw/arm/highbank.c
+++ b/hw/arm/highbank.c
@@ -363,6 +363,8 @@ static void calxeda_init(MachineState *machine, enum cxmachines machine_id)
sysbus_connect_irq(SYS_BUS_DEVICE(dev), 2, pic[82]);
}
+ /* TODO create and connect IDE devices for ide_drive_get() */
+
highbank_binfo.ram_size = ram_size;
highbank_binfo.kernel_filename = kernel_filename;
highbank_binfo.kernel_cmdline = kernel_cmdline;
@@ -405,7 +407,8 @@ static void highbank_class_init(ObjectClass *oc, void *data)
mc->desc = "Calxeda Highbank (ECX-1000)";
mc->init = highbank_init;
- mc->block_default_type = IF_SCSI;
+ mc->block_default_type = IF_IDE;
+ mc->units_per_default_bus = 1;
mc->max_cpus = 4;
}
@@ -421,7 +424,8 @@ static void midway_class_init(ObjectClass *oc, void *data)
mc->desc = "Calxeda Midway (ECX-2000)";
mc->init = midway_init;
- mc->block_default_type = IF_SCSI;
+ mc->block_default_type = IF_IDE;
+ mc->units_per_default_bus = 1;
mc->max_cpus = 4;
}
diff --git a/hw/arm/integratorcp.c b/hw/arm/integratorcp.c
index 039812a3fd..5610ffc9ce 100644
--- a/hw/arm/integratorcp.c
+++ b/hw/arm/integratorcp.c
@@ -53,6 +53,26 @@ static uint8_t integrator_spd[128] = {
0xe, 4, 0x1c, 1, 2, 0x20, 0xc0, 0, 0, 0, 0, 0x30, 0x28, 0x30, 0x28, 0x40
};
+static const VMStateDescription vmstate_integratorcm = {
+ .name = "integratorcm",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(cm_osc, IntegratorCMState),
+ VMSTATE_UINT32(cm_ctrl, IntegratorCMState),
+ VMSTATE_UINT32(cm_lock, IntegratorCMState),
+ VMSTATE_UINT32(cm_auxosc, IntegratorCMState),
+ VMSTATE_UINT32(cm_sdram, IntegratorCMState),
+ VMSTATE_UINT32(cm_init, IntegratorCMState),
+ VMSTATE_UINT32(cm_flags, IntegratorCMState),
+ VMSTATE_UINT32(cm_nvflags, IntegratorCMState),
+ VMSTATE_UINT32(int_level, IntegratorCMState),
+ VMSTATE_UINT32(irq_enabled, IntegratorCMState),
+ VMSTATE_UINT32(fiq_enabled, IntegratorCMState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
static uint64_t integratorcm_read(void *opaque, hwaddr offset,
unsigned size)
{
@@ -309,6 +329,18 @@ typedef struct icp_pic_state {
qemu_irq parent_fiq;
} icp_pic_state;
+static const VMStateDescription vmstate_icp_pic = {
+ .name = "icp_pic",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(level, icp_pic_state),
+ VMSTATE_UINT32(irq_enabled, icp_pic_state),
+ VMSTATE_UINT32(fiq_enabled, icp_pic_state),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
static void icp_pic_update(icp_pic_state *s)
{
uint32_t flags;
@@ -438,6 +470,16 @@ typedef struct ICPCtrlRegsState {
#define ICP_INTREG_WPROT (1 << 0)
#define ICP_INTREG_CARDIN (1 << 3)
+static const VMStateDescription vmstate_icp_control = {
+ .name = "icp_control",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(intreg_state, ICPCtrlRegsState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
static uint64_t icp_control_read(void *opaque, hwaddr offset,
unsigned size)
{
@@ -535,27 +577,42 @@ static void integratorcp_init(MachineState *machine)
const char *kernel_filename = machine->kernel_filename;
const char *kernel_cmdline = machine->kernel_cmdline;
const char *initrd_filename = machine->initrd_filename;
+ char **cpustr;
ObjectClass *cpu_oc;
+ CPUClass *cc;
Object *cpuobj;
ARMCPU *cpu;
+ const char *typename;
MemoryRegion *address_space_mem = get_system_memory();
MemoryRegion *ram = g_new(MemoryRegion, 1);
MemoryRegion *ram_alias = g_new(MemoryRegion, 1);
qemu_irq pic[32];
DeviceState *dev, *sic, *icp;
int i;
+ Error *err = NULL;
if (!cpu_model) {
cpu_model = "arm926";
}
- cpu_oc = cpu_class_by_name(TYPE_ARM_CPU, cpu_model);
+ cpustr = g_strsplit(cpu_model, ",", 2);
+
+ cpu_oc = cpu_class_by_name(TYPE_ARM_CPU, cpustr[0]);
if (!cpu_oc) {
fprintf(stderr, "Unable to find CPU definition\n");
exit(1);
}
+ typename = object_class_get_name(cpu_oc);
- cpuobj = object_new(object_class_get_name(cpu_oc));
+ cc = CPU_CLASS(cpu_oc);
+ cc->parse_features(typename, cpustr[1], &err);
+ g_strfreev(cpustr);
+ if (err) {
+ error_report_err(err);
+ exit(1);
+ }
+
+ cpuobj = object_new(typename);
/* By default ARM1176 CPUs have EL3 enabled. This board does not
* currently support EL3 so the CPU EL3 property is disabled before
@@ -640,6 +697,21 @@ static void core_class_init(ObjectClass *klass, void *data)
dc->props = core_properties;
dc->realize = integratorcm_realize;
+ dc->vmsd = &vmstate_integratorcm;
+}
+
+static void icp_pic_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->vmsd = &vmstate_icp_pic;
+}
+
+static void icp_control_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->vmsd = &vmstate_icp_control;
}
static const TypeInfo core_info = {
@@ -655,6 +727,7 @@ static const TypeInfo icp_pic_info = {
.parent = TYPE_SYS_BUS_DEVICE,
.instance_size = sizeof(icp_pic_state),
.instance_init = icp_pic_init,
+ .class_init = icp_pic_class_init,
};
static const TypeInfo icp_ctrl_regs_info = {
@@ -662,6 +735,7 @@ static const TypeInfo icp_ctrl_regs_info = {
.parent = TYPE_SYS_BUS_DEVICE,
.instance_size = sizeof(ICPCtrlRegsState),
.instance_init = icp_control_init,
+ .class_init = icp_control_class_init,
};
static void integratorcp_register_types(void)
diff --git a/hw/arm/realview.c b/hw/arm/realview.c
index 8eafccaf1d..b7d4753400 100644
--- a/hw/arm/realview.c
+++ b/hw/arm/realview.c
@@ -259,7 +259,7 @@ static void realview_init(MachineState *machine,
}
n = drive_get_max_bus(IF_SCSI);
while (n >= 0) {
- pci_create_simple(pci_bus, -1, "lsi53c895a");
+ lsi53c895a_create(pci_bus);
n--;
}
}
@@ -443,7 +443,6 @@ static void realview_pbx_a9_class_init(ObjectClass *oc, void *data)
mc->desc = "ARM RealView Platform Baseboard Explore for Cortex-A9";
mc->init = realview_pbx_a9_init;
- mc->block_default_type = IF_SCSI;
mc->max_cpus = 4;
}
diff --git a/hw/arm/spitz.c b/hw/arm/spitz.c
index 949a15ae64..fe2d5a764c 100644
--- a/hw/arm/spitz.c
+++ b/hw/arm/spitz.c
@@ -998,6 +998,7 @@ static void spitzpda_class_init(ObjectClass *oc, void *data)
mc->desc = "Sharp SL-C3000 (Spitz) PDA (PXA270)";
mc->init = spitz_init;
+ mc->block_default_type = IF_IDE;
}
static const TypeInfo spitzpda_type = {
@@ -1012,6 +1013,7 @@ static void borzoipda_class_init(ObjectClass *oc, void *data)
mc->desc = "Sharp SL-C3100 (Borzoi) PDA (PXA270)";
mc->init = borzoi_init;
+ mc->block_default_type = IF_IDE;
}
static const TypeInfo borzoipda_type = {
@@ -1026,6 +1028,7 @@ static void terrierpda_class_init(ObjectClass *oc, void *data)
mc->desc = "Sharp SL-C3200 (Terrier) PDA (PXA270)";
mc->init = terrier_init;
+ mc->block_default_type = IF_IDE;
}
static const TypeInfo terrierpda_type = {
diff --git a/hw/arm/stellaris.c b/hw/arm/stellaris.c
index 794a3ada71..9edcd49740 100644
--- a/hw/arm/stellaris.c
+++ b/hw/arm/stellaris.c
@@ -21,6 +21,7 @@
#include "exec/address-spaces.h"
#include "sysemu/sysemu.h"
#include "hw/char/pl011.h"
+#include "hw/misc/unimp.h"
#define GPIO_A 0
#define GPIO_B 1
@@ -1220,6 +1221,40 @@ static void stellaris_init(const char *kernel_filename, const char *cpu_model,
0x40024000, 0x40025000, 0x40026000};
static const int gpio_irq[7] = {0, 1, 2, 3, 4, 30, 31};
+ /* Memory map of SoC devices, from
+ * Stellaris LM3S6965 Microcontroller Data Sheet (rev I)
+ * http://www.ti.com/lit/ds/symlink/lm3s6965.pdf
+ *
+ * 40000000 wdtimer (unimplemented)
+ * 40002000 i2c (unimplemented)
+ * 40004000 GPIO
+ * 40005000 GPIO
+ * 40006000 GPIO
+ * 40007000 GPIO
+ * 40008000 SSI
+ * 4000c000 UART
+ * 4000d000 UART
+ * 4000e000 UART
+ * 40020000 i2c
+ * 40021000 i2c (unimplemented)
+ * 40024000 GPIO
+ * 40025000 GPIO
+ * 40026000 GPIO
+ * 40028000 PWM (unimplemented)
+ * 4002c000 QEI (unimplemented)
+ * 4002d000 QEI (unimplemented)
+ * 40030000 gptimer
+ * 40031000 gptimer
+ * 40032000 gptimer
+ * 40033000 gptimer
+ * 40038000 ADC
+ * 4003c000 analogue comparator (unimplemented)
+ * 40048000 ethernet
+ * 400fc000 hibernation module (unimplemented)
+ * 400fd000 flash memory control (unimplemented)
+ * 400fe000 system control
+ */
+
DeviceState *gpio_dev[7], *nvic;
qemu_irq gpio_in[7][8];
qemu_irq gpio_out[7][8];
@@ -1370,6 +1405,19 @@ static void stellaris_init(const char *kernel_filename, const char *cpu_model,
}
}
}
+
+ /* Add dummy regions for the devices we don't implement yet,
+ * so guest accesses don't cause unlogged crashes.
+ */
+ create_unimplemented_device("wdtimer", 0x40000000, 0x1000);
+ create_unimplemented_device("i2c-0", 0x40002000, 0x1000);
+ create_unimplemented_device("i2c-2", 0x40021000, 0x1000);
+ create_unimplemented_device("PWM", 0x40028000, 0x1000);
+ create_unimplemented_device("QEI-0", 0x4002c000, 0x1000);
+ create_unimplemented_device("QEI-1", 0x4002d000, 0x1000);
+ create_unimplemented_device("analogue-comparator", 0x4003c000, 0x1000);
+ create_unimplemented_device("hibernation", 0x400fc000, 0x1000);
+ create_unimplemented_device("flash-control", 0x400fd000, 0x1000);
}
/* FIXME: Figure out how to generate these from stellaris_boards. */
diff --git a/hw/arm/tosa.c b/hw/arm/tosa.c
index c3db996930..9f58a23fb5 100644
--- a/hw/arm/tosa.c
+++ b/hw/arm/tosa.c
@@ -263,6 +263,7 @@ static void tosapda_machine_init(MachineClass *mc)
{
mc->desc = "Sharp SL-6000 (Tosa) PDA (PXA255)";
mc->init = tosa_init;
+ mc->block_default_type = IF_IDE;
}
DEFINE_MACHINE("tosa", tosapda_machine_init)
diff --git a/hw/arm/versatilepb.c b/hw/arm/versatilepb.c
index 7b5cb36d5a..b0e9f5be65 100644
--- a/hw/arm/versatilepb.c
+++ b/hw/arm/versatilepb.c
@@ -290,7 +290,7 @@ static void versatile_init(MachineState *machine, int board_id)
}
n = drive_get_max_bus(IF_SCSI);
while (n >= 0) {
- pci_create_simple(pci_bus, -1, "lsi53c895a");
+ lsi53c895a_create(pci_bus);
n--;
}
diff --git a/hw/arm/vexpress.c b/hw/arm/vexpress.c
index 58760f40ca..c6b1e674b4 100644
--- a/hw/arm/vexpress.c
+++ b/hw/arm/vexpress.c
@@ -452,6 +452,7 @@ static int add_virtio_mmio_node(void *fdt, uint32_t acells, uint32_t scells,
acells, addr, scells, size);
qemu_fdt_setprop_cells(fdt, nodename, "interrupt-parent", intc);
qemu_fdt_setprop_cells(fdt, nodename, "interrupts", 0, irq, 1);
+ qemu_fdt_setprop(fdt, nodename, "dma-coherent", NULL, 0);
g_free(nodename);
if (rc) {
return -1;
@@ -751,7 +752,6 @@ static void vexpress_class_init(ObjectClass *oc, void *data)
mc->desc = "ARM Versatile Express";
mc->init = vexpress_common_init;
- mc->block_default_type = IF_SCSI;
mc->max_cpus = 4;
}
diff --git a/hw/arm/virt-acpi-build.c b/hw/arm/virt-acpi-build.c
index 07a10aca40..0835e59bb2 100644
--- a/hw/arm/virt-acpi-build.c
+++ b/hw/arm/virt-acpi-build.c
@@ -90,6 +90,7 @@ static void acpi_dsdt_add_fw_cfg(Aml *scope, const MemMapEntry *fw_cfg_memmap)
aml_append(dev, aml_name_decl("_HID", aml_string("QEMU0002")));
/* device present, functioning, decoding, not shown in UI */
aml_append(dev, aml_name_decl("_STA", aml_int(0xB)));
+ aml_append(dev, aml_name_decl("_CCA", aml_int(1)));
Aml *crs = aml_resource_template();
aml_append(crs, aml_memory32_fixed(fw_cfg_memmap->base,
@@ -135,6 +136,7 @@ static void acpi_dsdt_add_virtio(Aml *scope,
Aml *dev = aml_device("VR%02u", i);
aml_append(dev, aml_name_decl("_HID", aml_string("LNRO0005")));
aml_append(dev, aml_name_decl("_UID", aml_int(i)));
+ aml_append(dev, aml_name_decl("_CCA", aml_int(1)));
Aml *crs = aml_resource_template();
aml_append(crs, aml_memory32_fixed(base, size, AML_READ_WRITE));
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
index 1f216cf3b1..f3440f2ccb 100644
--- a/hw/arm/virt.c
+++ b/hw/arm/virt.c
@@ -471,7 +471,7 @@ static void fdt_add_pmu_nodes(const VirtMachineState *vms)
CPU_FOREACH(cpu) {
armcpu = ARM_CPU(cpu);
if (!arm_feature(&armcpu->env, ARM_FEATURE_PMU) ||
- !kvm_arm_pmu_create(cpu, PPI(VIRTUAL_PMU_IRQ))) {
+ (kvm_enabled() && !kvm_arm_pmu_create(cpu, PPI(VIRTUAL_PMU_IRQ)))) {
return;
}
}
@@ -797,6 +797,7 @@ static void create_virtio_devices(const VirtMachineState *vms, qemu_irq *pic)
qemu_fdt_setprop_cells(vms->fdt, nodename, "interrupts",
GIC_FDT_IRQ_TYPE_SPI, irq,
GIC_FDT_IRQ_FLAGS_EDGE_LO_HI);
+ qemu_fdt_setprop(vms->fdt, nodename, "dma-coherent", NULL, 0);
g_free(nodename);
}
}
@@ -928,6 +929,7 @@ static FWCfgState *create_fw_cfg(const VirtMachineState *vms, AddressSpace *as)
"compatible", "qemu,fw-cfg-mmio");
qemu_fdt_setprop_sized_cells(vms->fdt, nodename, "reg",
2, base, 2, size);
+ qemu_fdt_setprop(vms->fdt, nodename, "dma-coherent", NULL, 0);
g_free(nodename);
return fw_cfg;
}
diff --git a/hw/arm/xilinx_zynq.c b/hw/arm/xilinx_zynq.c
index 7dac20d67d..3985356fc2 100644
--- a/hw/arm/xilinx_zynq.c
+++ b/hw/arm/xilinx_zynq.c
@@ -323,7 +323,6 @@ static void zynq_machine_init(MachineClass *mc)
{
mc->desc = "Xilinx Zynq Platform Baseboard for Cortex-A9";
mc->init = zynq_init;
- mc->block_default_type = IF_SCSI;
mc->max_cpus = 1;
mc->no_sdcard = 1;
}
diff --git a/hw/arm/xlnx-ep108.c b/hw/arm/xlnx-ep108.c
index 4ec590a25d..860780ab8b 100644
--- a/hw/arm/xlnx-ep108.c
+++ b/hw/arm/xlnx-ep108.c
@@ -106,6 +106,8 @@ static void xlnx_ep108_init(MachineState *machine)
sysbus_connect_irq(SYS_BUS_DEVICE(&s->soc.spi[i]), 1, cs_line);
}
+ /* TODO create and connect IDE devices for ide_drive_get() */
+
xlnx_ep108_binfo.ram_size = ram_size;
xlnx_ep108_binfo.kernel_filename = machine->kernel_filename;
xlnx_ep108_binfo.kernel_cmdline = machine->kernel_cmdline;
@@ -118,6 +120,8 @@ static void xlnx_ep108_machine_init(MachineClass *mc)
{
mc->desc = "Xilinx ZynqMP EP108 board";
mc->init = xlnx_ep108_init;
+ mc->block_default_type = IF_IDE;
+ mc->units_per_default_bus = 1;
}
DEFINE_MACHINE("xlnx-ep108", xlnx_ep108_machine_init)
@@ -126,6 +130,8 @@ static void xlnx_zcu102_machine_init(MachineClass *mc)
{
mc->desc = "Xilinx ZynqMP ZCU102 board";
mc->init = xlnx_ep108_init;
+ mc->block_default_type = IF_IDE;
+ mc->units_per_default_bus = 1;
}
DEFINE_MACHINE("xlnx-zcu102", xlnx_zcu102_machine_init)
diff --git a/hw/block/dataplane/virtio-blk.c b/hw/block/dataplane/virtio-blk.c
index d1f9f63eaf..5556f0e64e 100644
--- a/hw/block/dataplane/virtio-blk.c
+++ b/hw/block/dataplane/virtio-blk.c
@@ -147,7 +147,7 @@ void virtio_blk_data_plane_destroy(VirtIOBlockDataPlane *s)
g_free(s);
}
-static void virtio_blk_data_plane_handle_output(VirtIODevice *vdev,
+static bool virtio_blk_data_plane_handle_output(VirtIODevice *vdev,
VirtQueue *vq)
{
VirtIOBlock *s = (VirtIOBlock *)vdev;
@@ -155,7 +155,7 @@ static void virtio_blk_data_plane_handle_output(VirtIODevice *vdev,
assert(s->dataplane);
assert(s->dataplane_started);
- virtio_blk_handle_vq(s, vq);
+ return virtio_blk_handle_vq(s, vq);
}
/* Context: QEMU global mutex held */
diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c
index 702eda863e..843bd2fa73 100644
--- a/hw/block/virtio-blk.c
+++ b/hw/block/virtio-blk.c
@@ -89,7 +89,9 @@ static int virtio_blk_handle_rw_error(VirtIOBlockReq *req, int error,
static void virtio_blk_rw_complete(void *opaque, int ret)
{
VirtIOBlockReq *next = opaque;
+ VirtIOBlock *s = next->dev;
+ aio_context_acquire(blk_get_aio_context(s->conf.conf.blk));
while (next) {
VirtIOBlockReq *req = next;
next = req->mr_next;
@@ -122,21 +124,27 @@ static void virtio_blk_rw_complete(void *opaque, int ret)
block_acct_done(blk_get_stats(req->dev->blk), &req->acct);
virtio_blk_free_request(req);
}
+ aio_context_release(blk_get_aio_context(s->conf.conf.blk));
}
static void virtio_blk_flush_complete(void *opaque, int ret)
{
VirtIOBlockReq *req = opaque;
+ VirtIOBlock *s = req->dev;
+ aio_context_acquire(blk_get_aio_context(s->conf.conf.blk));
if (ret) {
if (virtio_blk_handle_rw_error(req, -ret, 0)) {
- return;
+ goto out;
}
}
virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
block_acct_done(blk_get_stats(req->dev->blk), &req->acct);
virtio_blk_free_request(req);
+
+out:
+ aio_context_release(blk_get_aio_context(s->conf.conf.blk));
}
#ifdef __linux__
@@ -150,7 +158,8 @@ static void virtio_blk_ioctl_complete(void *opaque, int status)
{
VirtIOBlockIoctlReq *ioctl_req = opaque;
VirtIOBlockReq *req = ioctl_req->req;
- VirtIODevice *vdev = VIRTIO_DEVICE(req->dev);
+ VirtIOBlock *s = req->dev;
+ VirtIODevice *vdev = VIRTIO_DEVICE(s);
struct virtio_scsi_inhdr *scsi;
struct sg_io_hdr *hdr;
@@ -182,8 +191,10 @@ static void virtio_blk_ioctl_complete(void *opaque, int status)
virtio_stl_p(vdev, &scsi->data_len, hdr->dxfer_len);
out:
+ aio_context_acquire(blk_get_aio_context(s->conf.conf.blk));
virtio_blk_req_complete(req, status);
virtio_blk_free_request(req);
+ aio_context_release(blk_get_aio_context(s->conf.conf.blk));
g_free(ioctl_req);
}
@@ -581,17 +592,20 @@ static int virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb)
return 0;
}
-void virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq)
+bool virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq)
{
VirtIOBlockReq *req;
MultiReqBuffer mrb = {};
+ bool progress = false;
+ aio_context_acquire(blk_get_aio_context(s->blk));
blk_io_plug(s->blk);
do {
virtio_queue_set_notification(vq, 0);
while ((req = virtio_blk_get_request(s, vq))) {
+ progress = true;
if (virtio_blk_handle_request(req, &mrb)) {
virtqueue_detach_element(req->vq, &req->elem, 0);
virtio_blk_free_request(req);
@@ -607,6 +621,13 @@ void virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq)
}
blk_io_unplug(s->blk);
+ aio_context_release(blk_get_aio_context(s->blk));
+ return progress;
+}
+
+static void virtio_blk_handle_output_do(VirtIOBlock *s, VirtQueue *vq)
+{
+ virtio_blk_handle_vq(s, vq);
}
static void virtio_blk_handle_output(VirtIODevice *vdev, VirtQueue *vq)
@@ -622,7 +643,7 @@ static void virtio_blk_handle_output(VirtIODevice *vdev, VirtQueue *vq)
return;
}
}
- virtio_blk_handle_vq(s, vq);
+ virtio_blk_handle_output_do(s, vq);
}
static void virtio_blk_dma_restart_bh(void *opaque)
@@ -636,6 +657,7 @@ static void virtio_blk_dma_restart_bh(void *opaque)
s->rq = NULL;
+ aio_context_acquire(blk_get_aio_context(s->conf.conf.blk));
while (req) {
VirtIOBlockReq *next = req->next;
if (virtio_blk_handle_request(req, &mrb)) {
@@ -656,6 +678,7 @@ static void virtio_blk_dma_restart_bh(void *opaque)
if (mrb.num_reqs) {
virtio_blk_submit_multireq(s->blk, &mrb);
}
+ aio_context_release(blk_get_aio_context(s->conf.conf.blk));
}
static void virtio_blk_dma_restart_cb(void *opaque, int running,
diff --git a/hw/char/Makefile.objs b/hw/char/Makefile.objs
index 69a553cd8d..6ea76feb12 100644
--- a/hw/char/Makefile.objs
+++ b/hw/char/Makefile.objs
@@ -2,7 +2,8 @@ common-obj-$(CONFIG_IPACK) += ipoctal232.o
common-obj-$(CONFIG_ESCC) += escc.o
common-obj-$(CONFIG_PARALLEL) += parallel.o
common-obj-$(CONFIG_PL011) += pl011.o
-common-obj-$(CONFIG_SERIAL) += serial.o serial-isa.o
+common-obj-$(CONFIG_SERIAL) += serial.o
+common-obj-$(CONFIG_SERIAL_ISA) += serial-isa.o
common-obj-$(CONFIG_SERIAL_PCI) += serial-pci.o
common-obj-$(CONFIG_VIRTIO) += virtio-console.o
common-obj-$(CONFIG_XILINX) += xilinx_uartlite.o
diff --git a/hw/char/exynos4210_uart.c b/hw/char/exynos4210_uart.c
index 7c16e894e2..b75f28d473 100644
--- a/hw/char/exynos4210_uart.c
+++ b/hw/char/exynos4210_uart.c
@@ -561,7 +561,7 @@ static const VMStateDescription vmstate_exynos4210_uart_fifo = {
.fields = (VMStateField[]) {
VMSTATE_UINT32(sp, Exynos4210UartFIFO),
VMSTATE_UINT32(rp, Exynos4210UartFIFO),
- VMSTATE_VBUFFER_UINT32(data, Exynos4210UartFIFO, 1, NULL, 0, size),
+ VMSTATE_VBUFFER_UINT32(data, Exynos4210UartFIFO, 1, NULL, size),
VMSTATE_END_OF_LIST()
}
};
diff --git a/hw/char/mcf_uart.c b/hw/char/mcf_uart.c
index 80c380e077..e69672f4e9 100644
--- a/hw/char/mcf_uart.c
+++ b/hw/char/mcf_uart.c
@@ -7,12 +7,15 @@
*/
#include "qemu/osdep.h"
#include "hw/hw.h"
+#include "hw/sysbus.h"
#include "hw/m68k/mcf.h"
#include "sysemu/char.h"
#include "exec/address-spaces.h"
#include "qapi/error.h"
typedef struct {
+ SysBusDevice parent_obj;
+
MemoryRegion iomem;
uint8_t mr[2];
uint8_t sr;
@@ -30,6 +33,9 @@ typedef struct {
CharBackend chr;
} mcf_uart_state;
+#define TYPE_MCF_UART "mcf-uart"
+#define MCF_UART(obj) OBJECT_CHECK(mcf_uart_state, (obj), TYPE_MCF_UART)
+
/* UART Status Register bits. */
#define MCF_UART_RxRDY 0x01
#define MCF_UART_FFULL 0x02
@@ -220,8 +226,10 @@ void mcf_uart_write(void *opaque, hwaddr addr,
mcf_uart_update(s);
}
-static void mcf_uart_reset(mcf_uart_state *s)
+static void mcf_uart_reset(DeviceState *dev)
{
+ mcf_uart_state *s = MCF_UART(dev);
+
s->fifo_len = 0;
s->mr[0] = 0;
s->mr[1] = 0;
@@ -275,36 +283,80 @@ static void mcf_uart_receive(void *opaque, const uint8_t *buf, int size)
mcf_uart_push_byte(s, buf[0]);
}
-void *mcf_uart_init(qemu_irq irq, Chardev *chr)
-{
- mcf_uart_state *s;
-
- s = g_malloc0(sizeof(mcf_uart_state));
- s->irq = irq;
- if (chr) {
- qemu_chr_fe_init(&s->chr, chr, &error_abort);
- qemu_chr_fe_set_handlers(&s->chr, mcf_uart_can_receive,
- mcf_uart_receive, mcf_uart_event,
- s, NULL, true);
- }
- mcf_uart_reset(s);
- return s;
-}
-
static const MemoryRegionOps mcf_uart_ops = {
.read = mcf_uart_read,
.write = mcf_uart_write,
.endianness = DEVICE_NATIVE_ENDIAN,
};
-void mcf_uart_mm_init(MemoryRegion *sysmem,
- hwaddr base,
- qemu_irq irq,
- Chardev *chr)
+static void mcf_uart_instance_init(Object *obj)
+{
+ SysBusDevice *dev = SYS_BUS_DEVICE(obj);
+ mcf_uart_state *s = MCF_UART(dev);
+
+ memory_region_init_io(&s->iomem, obj, &mcf_uart_ops, s, "uart", 0x40);
+ sysbus_init_mmio(dev, &s->iomem);
+
+ sysbus_init_irq(dev, &s->irq);
+}
+
+static void mcf_uart_realize(DeviceState *dev, Error **errp)
+{
+ mcf_uart_state *s = MCF_UART(dev);
+
+ qemu_chr_fe_set_handlers(&s->chr, mcf_uart_can_receive, mcf_uart_receive,
+ mcf_uart_event, s, NULL, true);
+}
+
+static Property mcf_uart_properties[] = {
+ DEFINE_PROP_CHR("chardev", mcf_uart_state, chr),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void mcf_uart_class_init(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+
+ dc->realize = mcf_uart_realize;
+ dc->reset = mcf_uart_reset;
+ dc->props = mcf_uart_properties;
+ set_bit(DEVICE_CATEGORY_INPUT, dc->categories);
+}
+
+static const TypeInfo mcf_uart_info = {
+ .name = TYPE_MCF_UART,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(mcf_uart_state),
+ .instance_init = mcf_uart_instance_init,
+ .class_init = mcf_uart_class_init,
+};
+
+static void mcf_uart_register(void)
+{
+ type_register_static(&mcf_uart_info);
+}
+
+type_init(mcf_uart_register)
+
+void *mcf_uart_init(qemu_irq irq, Chardev *chrdrv)
+{
+ DeviceState *dev;
+
+ dev = qdev_create(NULL, TYPE_MCF_UART);
+ if (chrdrv) {
+ qdev_prop_set_chr(dev, "chardev", chrdrv);
+ }
+ qdev_init_nofail(dev);
+
+ sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, irq);
+
+ return dev;
+}
+
+void mcf_uart_mm_init(hwaddr base, qemu_irq irq, Chardev *chrdrv)
{
- mcf_uart_state *s;
+ DeviceState *dev;
- s = mcf_uart_init(irq, chr);
- memory_region_init_io(&s->iomem, NULL, &mcf_uart_ops, s, "uart", 0x40);
- memory_region_add_subregion(sysmem, base, &s->iomem);
+ dev = mcf_uart_init(irq, chrdrv);
+ sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base);
}
diff --git a/hw/core/Makefile.objs b/hw/core/Makefile.objs
index 7f8c9dc659..91450b2eab 100644
--- a/hw/core/Makefile.objs
+++ b/hw/core/Makefile.objs
@@ -13,6 +13,7 @@ common-obj-$(CONFIG_PTIMER) += ptimer.o
common-obj-$(CONFIG_SOFTMMU) += sysbus.o
common-obj-$(CONFIG_SOFTMMU) += machine.o
common-obj-$(CONFIG_SOFTMMU) += loader.o
+common-obj-$(CONFIG_FITLOADER) += loader-fit.o
common-obj-$(CONFIG_SOFTMMU) += qdev-properties-system.o
common-obj-$(CONFIG_SOFTMMU) += register.o
common-obj-$(CONFIG_SOFTMMU) += or-irq.o
diff --git a/hw/core/irq.c b/hw/core/irq.c
index 49ff2e64fe..b98d1d69f5 100644
--- a/hw/core/irq.c
+++ b/hw/core/irq.c
@@ -22,6 +22,7 @@
* THE SOFTWARE.
*/
#include "qemu/osdep.h"
+#include "qemu/main-loop.h"
#include "qemu-common.h"
#include "hw/irq.h"
#include "qom/object.h"
diff --git a/hw/core/loader-fit.c b/hw/core/loader-fit.c
new file mode 100644
index 0000000000..0c4a7207f4
--- /dev/null
+++ b/hw/core/loader-fit.c
@@ -0,0 +1,325 @@
+/*
+ * Flattened Image Tree loader.
+ *
+ * Copyright (c) 2016 Imagination Technologies
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "exec/address-spaces.h"
+#include "exec/memory.h"
+#include "hw/loader.h"
+#include "hw/loader-fit.h"
+#include "qemu/cutils.h"
+#include "qemu/error-report.h"
+#include "sysemu/device_tree.h"
+#include "sysemu/sysemu.h"
+
+#include <libfdt.h>
+#include <zlib.h>
+
+#define FIT_LOADER_MAX_PATH (128)
+
+static const void *fit_load_image_alloc(const void *itb, const char *name,
+ int *poff, size_t *psz)
+{
+ const void *data;
+ const char *comp;
+ void *uncomp_data;
+ char path[FIT_LOADER_MAX_PATH];
+ int off, sz;
+ ssize_t uncomp_len;
+
+ snprintf(path, sizeof(path), "/images/%s", name);
+
+ off = fdt_path_offset(itb, path);
+ if (off < 0) {
+ return NULL;
+ }
+ if (poff) {
+ *poff = off;
+ }
+
+ data = fdt_getprop(itb, off, "data", &sz);
+ if (!data) {
+ return NULL;
+ }
+
+ comp = fdt_getprop(itb, off, "compression", NULL);
+ if (!comp || !strcmp(comp, "none")) {
+ if (psz) {
+ *psz = sz;
+ }
+ uncomp_data = g_malloc(sz);
+ memmove(uncomp_data, data, sz);
+ return uncomp_data;
+ }
+
+ if (!strcmp(comp, "gzip")) {
+ uncomp_len = UBOOT_MAX_GUNZIP_BYTES;
+ uncomp_data = g_malloc(uncomp_len);
+
+ uncomp_len = gunzip(uncomp_data, uncomp_len, (void *) data, sz);
+ if (uncomp_len < 0) {
+ error_printf("unable to decompress %s image\n", name);
+ g_free(uncomp_data);
+ return NULL;
+ }
+
+ data = g_realloc(uncomp_data, uncomp_len);
+ if (psz) {
+ *psz = uncomp_len;
+ }
+ return data;
+ }
+
+ error_printf("unknown compression '%s'\n", comp);
+ return NULL;
+}
+
+static int fit_image_addr(const void *itb, int img, const char *name,
+ hwaddr *addr)
+{
+ const void *prop;
+ int len;
+
+ prop = fdt_getprop(itb, img, name, &len);
+ if (!prop) {
+ return -ENOENT;
+ }
+
+ switch (len) {
+ case 4:
+ *addr = fdt32_to_cpu(*(fdt32_t *)prop);
+ return 0;
+ case 8:
+ *addr = fdt64_to_cpu(*(fdt64_t *)prop);
+ return 0;
+ default:
+ error_printf("invalid %s address length %d\n", name, len);
+ return -EINVAL;
+ }
+}
+
+static int fit_load_kernel(const struct fit_loader *ldr, const void *itb,
+ int cfg, void *opaque, hwaddr *pend)
+{
+ const char *name;
+ const void *data;
+ const void *load_data;
+ hwaddr load_addr, entry_addr;
+ int img_off, err;
+ size_t sz;
+ int ret;
+
+ name = fdt_getprop(itb, cfg, "kernel", NULL);
+ if (!name) {
+ error_printf("no kernel specified by FIT configuration\n");
+ return -EINVAL;
+ }
+
+ load_data = data = fit_load_image_alloc(itb, name, &img_off, &sz);
+ if (!data) {
+ error_printf("unable to load kernel image from FIT\n");
+ return -EINVAL;
+ }
+
+ err = fit_image_addr(itb, img_off, "load", &load_addr);
+ if (err) {
+ error_printf("unable to read kernel load address from FIT\n");
+ ret = err;
+ goto out;
+ }
+
+ err = fit_image_addr(itb, img_off, "entry", &entry_addr);
+ if (err) {
+ error_printf("unable to read kernel entry address from FIT\n");
+ ret = err;
+ goto out;
+ }
+
+ if (ldr->kernel_filter) {
+ load_data = ldr->kernel_filter(opaque, data, &load_addr, &entry_addr);
+ }
+
+ if (pend) {
+ *pend = load_addr + sz;
+ }
+
+ load_addr = ldr->addr_to_phys(opaque, load_addr);
+ rom_add_blob_fixed(name, load_data, sz, load_addr);
+
+ ret = 0;
+out:
+ g_free((void *) data);
+ if (data != load_data) {
+ g_free((void *) load_data);
+ }
+ return ret;
+}
+
+static int fit_load_fdt(const struct fit_loader *ldr, const void *itb,
+ int cfg, void *opaque, const void *match_data,
+ hwaddr kernel_end)
+{
+ const char *name;
+ const void *data;
+ const void *load_data;
+ hwaddr load_addr;
+ int img_off, err;
+ size_t sz;
+ int ret;
+
+ name = fdt_getprop(itb, cfg, "fdt", NULL);
+ if (!name) {
+ return 0;
+ }
+
+ load_data = data = fit_load_image_alloc(itb, name, &img_off, &sz);
+ if (!data) {
+ error_printf("unable to load FDT image from FIT\n");
+ return -EINVAL;
+ }
+
+ err = fit_image_addr(itb, img_off, "load", &load_addr);
+ if (err == -ENOENT) {
+ load_addr = ROUND_UP(kernel_end, 64 * K_BYTE) + (10 * M_BYTE);
+ } else if (err) {
+ ret = err;
+ goto out;
+ }
+
+ if (ldr->fdt_filter) {
+ load_data = ldr->fdt_filter(opaque, data, match_data, &load_addr);
+ }
+
+ load_addr = ldr->addr_to_phys(opaque, load_addr);
+ sz = fdt_totalsize(load_data);
+ rom_add_blob_fixed(name, load_data, sz, load_addr);
+
+ ret = 0;
+out:
+ g_free((void *) data);
+ if (data != load_data) {
+ g_free((void *) load_data);
+ }
+ return ret;
+}
+
+static bool fit_cfg_compatible(const void *itb, int cfg, const char *compat)
+{
+ const void *fdt;
+ const char *fdt_name;
+ bool ret;
+
+ fdt_name = fdt_getprop(itb, cfg, "fdt", NULL);
+ if (!fdt_name) {
+ return false;
+ }
+
+ fdt = fit_load_image_alloc(itb, fdt_name, NULL, NULL);
+ if (!fdt) {
+ return false;
+ }
+
+ if (fdt_check_header(fdt)) {
+ ret = false;
+ goto out;
+ }
+
+ if (fdt_node_check_compatible(fdt, 0, compat)) {
+ ret = false;
+ goto out;
+ }
+
+ ret = true;
+out:
+ g_free((void *) fdt);
+ return ret;
+}
+
+int load_fit(const struct fit_loader *ldr, const char *filename, void *opaque)
+{
+ const struct fit_loader_match *match;
+ const void *itb, *match_data = NULL;
+ const char *def_cfg_name;
+ char path[FIT_LOADER_MAX_PATH];
+ int itb_size, configs, cfg_off, off, err;
+ hwaddr kernel_end;
+ int ret;
+
+ itb = load_device_tree(filename, &itb_size);
+ if (!itb) {
+ return -EINVAL;
+ }
+
+ configs = fdt_path_offset(itb, "/configurations");
+ if (configs < 0) {
+ ret = configs;
+ goto out;
+ }
+
+ cfg_off = -FDT_ERR_NOTFOUND;
+
+ if (ldr->matches) {
+ for (match = ldr->matches; match->compatible; match++) {
+ off = fdt_first_subnode(itb, configs);
+ while (off >= 0) {
+ if (fit_cfg_compatible(itb, off, match->compatible)) {
+ cfg_off = off;
+ match_data = match->data;
+ break;
+ }
+
+ off = fdt_next_subnode(itb, off);
+ }
+
+ if (cfg_off >= 0) {
+ break;
+ }
+ }
+ }
+
+ if (cfg_off < 0) {
+ def_cfg_name = fdt_getprop(itb, configs, "default", NULL);
+ if (def_cfg_name) {
+ snprintf(path, sizeof(path), "/configurations/%s", def_cfg_name);
+ cfg_off = fdt_path_offset(itb, path);
+ }
+ }
+
+ if (cfg_off < 0) {
+ /* couldn't find a configuration to use */
+ ret = cfg_off;
+ goto out;
+ }
+
+ err = fit_load_kernel(ldr, itb, cfg_off, opaque, &kernel_end);
+ if (err) {
+ ret = err;
+ goto out;
+ }
+
+ err = fit_load_fdt(ldr, itb, cfg_off, opaque, match_data, kernel_end);
+ if (err) {
+ ret = err;
+ goto out;
+ }
+
+ ret = 0;
+out:
+ g_free((void *) itb);
+ return ret;
+}
diff --git a/hw/core/loader.c b/hw/core/loader.c
index ee5abd6eb7..8b980e91fb 100644
--- a/hw/core/loader.c
+++ b/hw/core/loader.c
@@ -527,12 +527,7 @@ static void zfree(void *x, void *addr)
#define DEFLATED 8
-/* This is the usual maximum in uboot, so if a uImage overflows this, it would
- * overflow on real hardware too. */
-#define UBOOT_MAX_GUNZIP_BYTES (64 << 20)
-
-static ssize_t gunzip(void *dst, size_t dstlen, uint8_t *src,
- size_t srclen)
+ssize_t gunzip(void *dst, size_t dstlen, uint8_t *src, size_t srclen)
{
z_stream s;
ssize_t dstbytes;
diff --git a/hw/core/machine.c b/hw/core/machine.c
index b0fd91f6cd..0699750336 100644
--- a/hw/core/machine.c
+++ b/hw/core/machine.c
@@ -357,6 +357,37 @@ static void machine_init_notify(Notifier *notifier, void *data)
foreach_dynamic_sysbus_device(error_on_sysbus_device, NULL);
}
+HotpluggableCPUList *machine_query_hotpluggable_cpus(MachineState *machine)
+{
+ int i;
+ Object *cpu;
+ HotpluggableCPUList *head = NULL;
+ const char *cpu_type;
+
+ cpu = machine->possible_cpus->cpus[0].cpu;
+ assert(cpu); /* Boot cpu is always present */
+ cpu_type = object_get_typename(cpu);
+ for (i = 0; i < machine->possible_cpus->len; i++) {
+ HotpluggableCPUList *list_item = g_new0(typeof(*list_item), 1);
+ HotpluggableCPU *cpu_item = g_new0(typeof(*cpu_item), 1);
+
+ cpu_item->type = g_strdup(cpu_type);
+ cpu_item->vcpus_count = machine->possible_cpus->cpus[i].vcpus_count;
+ cpu_item->props = g_memdup(&machine->possible_cpus->cpus[i].props,
+ sizeof(*cpu_item->props));
+
+ cpu = machine->possible_cpus->cpus[i].cpu;
+ if (cpu) {
+ cpu_item->has_qom_path = true;
+ cpu_item->qom_path = object_get_canonical_path(cpu);
+ }
+ list_item->value = cpu_item;
+ list_item->next = head;
+ head = list_item;
+ }
+ return head;
+}
+
static void machine_class_init(ObjectClass *oc, void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
diff --git a/hw/core/qdev.c b/hw/core/qdev.c
index 57834423b9..06ba02e2a3 100644
--- a/hw/core/qdev.c
+++ b/hw/core/qdev.c
@@ -933,9 +933,12 @@ static void device_set_realized(Object *obj, bool value, Error **errp)
}
if (qdev_get_vmsd(dev)) {
- vmstate_register_with_alias_id(dev, -1, qdev_get_vmsd(dev), dev,
- dev->instance_id_alias,
- dev->alias_required_for_version);
+ if (vmstate_register_with_alias_id(dev, -1, qdev_get_vmsd(dev), dev,
+ dev->instance_id_alias,
+ dev->alias_required_for_version,
+ &local_err) < 0) {
+ goto post_realize_fail;
+ }
}
QLIST_FOREACH(bus, &dev->child_bus, sibling) {
diff --git a/hw/display/cirrus_vga.c b/hw/display/cirrus_vga.c
index 16f27e8ac5..b9e7cb1df1 100644
--- a/hw/display/cirrus_vga.c
+++ b/hw/display/cirrus_vga.c
@@ -28,6 +28,7 @@
*/
#include "qemu/osdep.h"
#include "qapi/error.h"
+#include "trace.h"
#include "hw/hw.h"
#include "hw/pci/pci.h"
#include "ui/console.h"
@@ -272,6 +273,9 @@ static void cirrus_update_memory_access(CirrusVGAState *s);
static bool blit_region_is_unsafe(struct CirrusVGAState *s,
int32_t pitch, int32_t addr)
{
+ if (!pitch) {
+ return true;
+ }
if (pitch < 0) {
int64_t min = addr
+ ((int64_t)s->cirrus_blt_height - 1) * pitch
@@ -290,11 +294,8 @@ static bool blit_region_is_unsafe(struct CirrusVGAState *s,
return false;
}
-static bool blit_is_unsafe(struct CirrusVGAState *s, bool dst_only,
- bool zero_src_pitch_ok)
+static bool blit_is_unsafe(struct CirrusVGAState *s, bool dst_only)
{
- int32_t check_pitch;
-
/* should be the case, see cirrus_bitblt_start */
assert(s->cirrus_blt_width > 0);
assert(s->cirrus_blt_height > 0);
@@ -303,10 +304,6 @@ static bool blit_is_unsafe(struct CirrusVGAState *s, bool dst_only,
return true;
}
- if (!s->cirrus_blt_dstpitch) {
- return true;
- }
-
if (blit_region_is_unsafe(s, s->cirrus_blt_dstpitch,
s->cirrus_blt_dstaddr)) {
return true;
@@ -314,13 +311,7 @@ static bool blit_is_unsafe(struct CirrusVGAState *s, bool dst_only,
if (dst_only) {
return false;
}
-
- check_pitch = s->cirrus_blt_srcpitch;
- if (!zero_src_pitch_ok && !check_pitch) {
- check_pitch = s->cirrus_blt_width;
- }
-
- if (blit_region_is_unsafe(s, check_pitch,
+ if (blit_region_is_unsafe(s, s->cirrus_blt_srcpitch,
s->cirrus_blt_srcaddr)) {
return true;
}
@@ -683,14 +674,39 @@ static void cirrus_invalidate_region(CirrusVGAState * s, int off_begin,
}
}
-static int cirrus_bitblt_common_patterncopy(CirrusVGAState * s,
- const uint8_t * src)
+static int cirrus_bitblt_common_patterncopy(CirrusVGAState *s, bool videosrc)
{
+ uint32_t patternsize;
uint8_t *dst;
+ uint8_t *src;
dst = s->vga.vram_ptr + s->cirrus_blt_dstaddr;
- if (blit_is_unsafe(s, false, true)) {
+ if (videosrc) {
+ switch (s->vga.get_bpp(&s->vga)) {
+ case 8:
+ patternsize = 64;
+ break;
+ case 15:
+ case 16:
+ patternsize = 128;
+ break;
+ case 24:
+ case 32:
+ default:
+ patternsize = 256;
+ break;
+ }
+ s->cirrus_blt_srcaddr &= ~(patternsize - 1);
+ if (s->cirrus_blt_srcaddr + patternsize > s->vga.vram_size) {
+ return 0;
+ }
+ src = s->vga.vram_ptr + s->cirrus_blt_srcaddr;
+ } else {
+ src = s->cirrus_bltbuf;
+ }
+
+ if (blit_is_unsafe(s, true)) {
return 0;
}
@@ -709,7 +725,7 @@ static int cirrus_bitblt_solidfill(CirrusVGAState *s, int blt_rop)
{
cirrus_fill_t rop_func;
- if (blit_is_unsafe(s, true, true)) {
+ if (blit_is_unsafe(s, true)) {
return 0;
}
rop_func = cirrus_fill[rop_to_index[blt_rop]][s->cirrus_blt_pixelwidth - 1];
@@ -731,8 +747,7 @@ static int cirrus_bitblt_solidfill(CirrusVGAState *s, int blt_rop)
static int cirrus_bitblt_videotovideo_patterncopy(CirrusVGAState * s)
{
- return cirrus_bitblt_common_patterncopy(s, s->vga.vram_ptr +
- (s->cirrus_blt_srcaddr & ~7));
+ return cirrus_bitblt_common_patterncopy(s, true);
}
static int cirrus_do_copy(CirrusVGAState *s, int dst, int src, int w, int h)
@@ -810,7 +825,7 @@ static int cirrus_do_copy(CirrusVGAState *s, int dst, int src, int w, int h)
static int cirrus_bitblt_videotovideo_copy(CirrusVGAState * s)
{
- if (blit_is_unsafe(s, false, false))
+ if (blit_is_unsafe(s, false))
return 0;
return cirrus_do_copy(s, s->cirrus_blt_dstaddr - s->vga.start_addr,
@@ -831,7 +846,7 @@ static void cirrus_bitblt_cputovideo_next(CirrusVGAState * s)
if (s->cirrus_srccounter > 0) {
if (s->cirrus_blt_mode & CIRRUS_BLTMODE_PATTERNCOPY) {
- cirrus_bitblt_common_patterncopy(s, s->cirrus_bltbuf);
+ cirrus_bitblt_common_patterncopy(s, false);
the_end:
s->cirrus_srccounter = 0;
cirrus_bitblt_reset(s);
@@ -885,6 +900,10 @@ static int cirrus_bitblt_cputovideo(CirrusVGAState * s)
{
int w;
+ if (blit_is_unsafe(s, true)) {
+ return 0;
+ }
+
s->cirrus_blt_mode &= ~CIRRUS_BLTMODE_MEMSYSSRC;
s->cirrus_srcptr = &s->cirrus_bltbuf[0];
s->cirrus_srcptr_end = &s->cirrus_bltbuf[0];
@@ -910,6 +929,10 @@ static int cirrus_bitblt_cputovideo(CirrusVGAState * s)
}
s->cirrus_srccounter = s->cirrus_blt_srcpitch * s->cirrus_blt_height;
}
+
+ /* the blit_is_unsafe call above should catch this */
+ assert(s->cirrus_blt_srcpitch <= CIRRUS_BLTBUFSIZE);
+
s->cirrus_srcptr = s->cirrus_bltbuf;
s->cirrus_srcptr_end = s->cirrus_bltbuf + s->cirrus_blt_srcpitch;
cirrus_update_memory_access(s);
@@ -1852,12 +1875,14 @@ static uint8_t cirrus_mmio_blt_read(CirrusVGAState * s, unsigned address)
break;
}
+ trace_vga_cirrus_write_blt(address, value);
return (uint8_t) value;
}
static void cirrus_mmio_blt_write(CirrusVGAState * s, unsigned address,
uint8_t value)
{
+ trace_vga_cirrus_write_blt(address, value);
switch (address) {
case (CIRRUS_MMIO_BLTBGCOLOR + 0):
cirrus_vga_write_gr(s, 0x00, value);
@@ -2607,9 +2632,7 @@ static uint64_t cirrus_vga_ioport_read(void *opaque, hwaddr addr,
break;
}
}
-#if defined(DEBUG_VGA)
- printf("VGA: read addr=0x%04x data=0x%02x\n", addr, val);
-#endif
+ trace_vga_cirrus_read_io(addr, val);
return val;
}
@@ -2626,9 +2649,7 @@ static void cirrus_vga_ioport_write(void *opaque, hwaddr addr, uint64_t val,
if (vga_ioport_invalid(s, addr)) {
return;
}
-#ifdef DEBUG_VGA
- printf("VGA: write addr=0x%04x data=0x%02x\n", addr, val);
-#endif
+ trace_vga_cirrus_write_io(addr, val);
switch (addr) {
case 0x3c0:
diff --git a/hw/display/g364fb.c b/hw/display/g364fb.c
index 70ef2c7453..8cdc205dd9 100644
--- a/hw/display/g364fb.c
+++ b/hw/display/g364fb.c
@@ -464,7 +464,7 @@ static const VMStateDescription vmstate_g364fb = {
.minimum_version_id = 1,
.post_load = g364fb_post_load,
.fields = (VMStateField[]) {
- VMSTATE_VBUFFER_UINT32(vram, G364State, 1, NULL, 0, vram_size),
+ VMSTATE_VBUFFER_UINT32(vram, G364State, 1, NULL, vram_size),
VMSTATE_BUFFER_UNSAFE(color_palette, G364State, 0, 256 * 3),
VMSTATE_BUFFER_UNSAFE(cursor_palette, G364State, 0, 9),
VMSTATE_UINT16_ARRAY(cursor, G364State, 512),
diff --git a/hw/display/trace-events b/hw/display/trace-events
index aadb612dcb..3e896d2e3f 100644
--- a/hw/display/trace-events
+++ b/hw/display/trace-events
@@ -119,3 +119,15 @@ qxl_set_client_capabilities_unsupported_by_revision(int qid, int revision) "%d r
qxl_render_blit(int32_t stride, int32_t left, int32_t right, int32_t top, int32_t bottom) "stride=%d [%d, %d, %d, %d]"
qxl_render_guest_primary_resized(int32_t width, int32_t height, int32_t stride, int32_t bytes_pp, int32_t bits_pp) "%dx%d, stride %d, bpp %d, depth %d"
qxl_render_update_area_done(void *cookie) "%p"
+
+# hw/display/vga.c
+vga_std_read_io(uint32_t addr, uint32_t val) "addr 0x%x, val 0x%x"
+vga_std_write_io(uint32_t addr, uint32_t val) "addr 0x%x, val 0x%x"
+vga_vbe_read(uint32_t index, uint32_t val) "index 0x%x, val 0x%x"
+vga_vbe_write(uint32_t index, uint32_t val) "index 0x%x, val 0x%x"
+
+# hw/display/cirrus_vga.c
+vga_cirrus_read_io(uint32_t addr, uint32_t val) "addr 0x%x, val 0x%x"
+vga_cirrus_write_io(uint32_t addr, uint32_t val) "addr 0x%x, val 0x%x"
+vga_cirrus_read_blt(uint32_t offset, uint32_t val) "offset 0x%x, val 0x%x"
+vga_cirrus_write_blt(uint32_t offset, uint32_t val) "offset 0x%x, val 0x%x"
diff --git a/hw/display/vga.c b/hw/display/vga.c
index 2a88b3c1b4..69c3e1d674 100644
--- a/hw/display/vga.c
+++ b/hw/display/vga.c
@@ -34,12 +34,9 @@
#include "hw/xen/xen.h"
#include "trace.h"
-//#define DEBUG_VGA
//#define DEBUG_VGA_MEM
//#define DEBUG_VGA_REG
-//#define DEBUG_BOCHS_VBE
-
/* 16 state changes per vertical frame @60 Hz */
#define VGA_TEXT_CURSOR_PERIOD_MS (1000 * 2 * 16 / 60)
@@ -428,9 +425,7 @@ uint32_t vga_ioport_read(void *opaque, uint32_t addr)
break;
}
}
-#if defined(DEBUG_VGA)
- printf("VGA: read addr=0x%04x data=0x%02x\n", addr, val);
-#endif
+ trace_vga_std_read_io(addr, val);
return val;
}
@@ -443,9 +438,7 @@ void vga_ioport_write(void *opaque, uint32_t addr, uint32_t val)
if (vga_ioport_invalid(s, addr)) {
return;
}
-#ifdef DEBUG_VGA
- printf("VGA: write addr=0x%04x data=0x%02x\n", addr, val);
-#endif
+ trace_vga_std_write_io(addr, val);
switch(addr) {
case VGA_ATT_W:
@@ -733,9 +726,7 @@ uint32_t vbe_ioport_read_data(void *opaque, uint32_t addr)
} else {
val = 0;
}
-#ifdef DEBUG_BOCHS_VBE
- printf("VBE: read index=0x%x val=0x%x\n", s->vbe_index, val);
-#endif
+ trace_vga_vbe_read(s->vbe_index, val);
return val;
}
@@ -750,9 +741,7 @@ void vbe_ioport_write_data(void *opaque, uint32_t addr, uint32_t val)
VGACommonState *s = opaque;
if (s->vbe_index <= VBE_DISPI_INDEX_NB) {
-#ifdef DEBUG_BOCHS_VBE
- printf("VBE: write index=0x%x val=0x%x\n", s->vbe_index, val);
-#endif
+ trace_vga_vbe_write(s->vbe_index, val);
switch(s->vbe_index) {
case VBE_DISPI_INDEX_ID:
if (val == VBE_DISPI_ID0 ||
@@ -1543,17 +1532,9 @@ static void vga_draw_graphic(VGACommonState *s, int full_update)
height, format, s->line_offset,
s->vram_ptr + (s->start_addr * 4));
dpy_gfx_replace_surface(s->con, surface);
-#ifdef DEBUG_VGA
- printf("VGA: Using shared surface for depth=%d swap=%d\n",
- depth, byteswap);
-#endif
} else {
qemu_console_resize(s->con, disp_width, height);
surface = qemu_console_surface(s->con);
-#ifdef DEBUG_VGA
- printf("VGA: Using shadow surface for depth=%d swap=%d\n",
- depth, byteswap);
-#endif
}
s->last_scr_width = disp_width;
s->last_scr_height = height;
diff --git a/hw/display/virtio-gpu-3d.c b/hw/display/virtio-gpu-3d.c
index f96a0c2e59..ecb09d17a1 100644
--- a/hw/display/virtio-gpu-3d.c
+++ b/hw/display/virtio-gpu-3d.c
@@ -77,10 +77,18 @@ static void virgl_cmd_resource_unref(VirtIOGPU *g,
struct virtio_gpu_ctrl_command *cmd)
{
struct virtio_gpu_resource_unref unref;
+ struct iovec *res_iovs = NULL;
+ int num_iovs = 0;
VIRTIO_GPU_FILL_CMD(unref);
trace_virtio_gpu_cmd_res_unref(unref.resource_id);
+ virgl_renderer_resource_detach_iov(unref.resource_id,
+ &res_iovs,
+ &num_iovs);
+ if (res_iovs != NULL && num_iovs != 0) {
+ virtio_gpu_cleanup_mapping_iov(res_iovs, num_iovs);
+ }
virgl_renderer_resource_unref(unref.resource_id);
}
diff --git a/hw/display/virtio-gpu.c b/hw/display/virtio-gpu.c
index 444ca064c1..9b530ab5b0 100644
--- a/hw/display/virtio-gpu.c
+++ b/hw/display/virtio-gpu.c
@@ -608,6 +608,7 @@ static void virtio_gpu_set_scanout(VirtIOGPU *g,
cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
return;
}
+ pixman_image_unref(rect);
dpy_gfx_replace_surface(g->scanout[ss.scanout_id].con, scanout->ds);
}
diff --git a/hw/dma/pl330.c b/hw/dma/pl330.c
index c0bd9fec30..32cf8399b8 100644
--- a/hw/dma/pl330.c
+++ b/hw/dma/pl330.c
@@ -173,8 +173,8 @@ static const VMStateDescription vmstate_pl330_fifo = {
.version_id = 1,
.minimum_version_id = 1,
.fields = (VMStateField[]) {
- VMSTATE_VBUFFER_UINT32(buf, PL330Fifo, 1, NULL, 0, buf_size),
- VMSTATE_VBUFFER_UINT32(tag, PL330Fifo, 1, NULL, 0, buf_size),
+ VMSTATE_VBUFFER_UINT32(buf, PL330Fifo, 1, NULL, buf_size),
+ VMSTATE_VBUFFER_UINT32(tag, PL330Fifo, 1, NULL, buf_size),
VMSTATE_UINT32(head, PL330Fifo),
VMSTATE_UINT32(num, PL330Fifo),
VMSTATE_UINT32(buf_size, PL330Fifo),
@@ -282,8 +282,8 @@ static const VMStateDescription vmstate_pl330 = {
VMSTATE_STRUCT(manager, PL330State, 0, vmstate_pl330_chan, PL330Chan),
VMSTATE_STRUCT_VARRAY_UINT32(chan, PL330State, num_chnls, 0,
vmstate_pl330_chan, PL330Chan),
- VMSTATE_VBUFFER_UINT32(lo_seqn, PL330State, 1, NULL, 0, num_chnls),
- VMSTATE_VBUFFER_UINT32(hi_seqn, PL330State, 1, NULL, 0, num_chnls),
+ VMSTATE_VBUFFER_UINT32(lo_seqn, PL330State, 1, NULL, num_chnls),
+ VMSTATE_VBUFFER_UINT32(hi_seqn, PL330State, 1, NULL, num_chnls),
VMSTATE_STRUCT(fifo, PL330State, 0, vmstate_pl330_fifo, PL330Fifo),
VMSTATE_STRUCT(read_queue, PL330State, 0, vmstate_pl330_queue,
PL330Queue),
diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c
index 3270fb9162..22d8226e43 100644
--- a/hw/i386/intel_iommu.c
+++ b/hw/i386/intel_iommu.c
@@ -35,6 +35,7 @@
#include "sysemu/kvm.h"
#include "hw/i386/apic_internal.h"
#include "kvm_i386.h"
+#include "trace.h"
/*#define DEBUG_INTEL_IOMMU*/
#ifdef DEBUG_INTEL_IOMMU
@@ -167,6 +168,7 @@ static gboolean vtd_hash_remove_by_domain(gpointer key, gpointer value,
/* The shift of an addr for a certain level of paging structure */
static inline uint32_t vtd_slpt_level_shift(uint32_t level)
{
+ assert(level != 0);
return VTD_PAGE_SHIFT_4K + (level - 1) * VTD_SL_LEVEL_BITS;
}
@@ -259,11 +261,9 @@ static void vtd_update_iotlb(IntelIOMMUState *s, uint16_t source_id,
uint64_t *key = g_malloc(sizeof(*key));
uint64_t gfn = vtd_get_iotlb_gfn(addr, level);
- VTD_DPRINTF(CACHE, "update iotlb sid 0x%"PRIx16 " gpa 0x%"PRIx64
- " slpte 0x%"PRIx64 " did 0x%"PRIx16, source_id, addr, slpte,
- domain_id);
+ trace_vtd_iotlb_page_update(source_id, addr, slpte, domain_id);
if (g_hash_table_size(s->iotlb) >= VTD_IOTLB_MAX_SIZE) {
- VTD_DPRINTF(CACHE, "iotlb exceeds size limit, forced to reset");
+ trace_vtd_iotlb_reset("iotlb exceeds size limit");
vtd_reset_iotlb(s);
}
@@ -474,22 +474,19 @@ static void vtd_handle_inv_queue_error(IntelIOMMUState *s)
/* Set the IWC field and try to generate an invalidation completion interrupt */
static void vtd_generate_completion_event(IntelIOMMUState *s)
{
- VTD_DPRINTF(INV, "completes an invalidation wait command with "
- "Interrupt Flag");
if (vtd_get_long_raw(s, DMAR_ICS_REG) & VTD_ICS_IWC) {
- VTD_DPRINTF(INV, "there is a previous interrupt condition to be "
- "serviced by software, "
- "new invalidation event is not generated");
+ trace_vtd_inv_desc_wait_irq("One pending, skip current");
return;
}
vtd_set_clear_mask_long(s, DMAR_ICS_REG, 0, VTD_ICS_IWC);
vtd_set_clear_mask_long(s, DMAR_IECTL_REG, 0, VTD_IECTL_IP);
if (vtd_get_long_raw(s, DMAR_IECTL_REG) & VTD_IECTL_IM) {
- VTD_DPRINTF(INV, "IM filed in IECTL_REG is set, new invalidation "
- "event is not generated");
+ trace_vtd_inv_desc_wait_irq("IM in IECTL_REG is set, "
+ "new event not generated");
return;
} else {
/* Generate the interrupt event */
+ trace_vtd_inv_desc_wait_irq("Generating complete event");
vtd_generate_interrupt(s, DMAR_IEADDR_REG, DMAR_IEDATA_REG);
vtd_set_clear_mask_long(s, DMAR_IECTL_REG, VTD_IECTL_IP, 0);
}
@@ -507,8 +504,7 @@ static int vtd_get_root_entry(IntelIOMMUState *s, uint8_t index,
addr = s->root + index * sizeof(*re);
if (dma_memory_read(&address_space_memory, addr, re, sizeof(*re))) {
- VTD_DPRINTF(GENERAL, "error: fail to access root-entry at 0x%"PRIx64
- " + %"PRIu8, s->root, index);
+ trace_vtd_re_invalid(re->rsvd, re->val);
re->val = 0;
return -VTD_FR_ROOT_TABLE_INV;
}
@@ -526,15 +522,10 @@ static int vtd_get_context_entry_from_root(VTDRootEntry *root, uint8_t index,
{
dma_addr_t addr;
- if (!vtd_root_entry_present(root)) {
- VTD_DPRINTF(GENERAL, "error: root-entry is not present");
- return -VTD_FR_ROOT_ENTRY_P;
- }
+ /* we have checked that root entry is present */
addr = (root->val & VTD_ROOT_ENTRY_CTP) + index * sizeof(*ce);
if (dma_memory_read(&address_space_memory, addr, ce, sizeof(*ce))) {
- VTD_DPRINTF(GENERAL, "error: fail to access context-entry at 0x%"PRIx64
- " + %"PRIu8,
- (uint64_t)(root->val & VTD_ROOT_ENTRY_CTP), index);
+ trace_vtd_re_invalid(root->rsvd, root->val);
return -VTD_FR_CONTEXT_TABLE_INV;
}
ce->lo = le64_to_cpu(ce->lo);
@@ -575,12 +566,12 @@ static uint64_t vtd_get_slpte(dma_addr_t base_addr, uint32_t index)
return slpte;
}
-/* Given a gpa and the level of paging structure, return the offset of current
- * level.
+/* Given an iova and the level of paging structure, return the offset
+ * of current level.
*/
-static inline uint32_t vtd_gpa_level_offset(uint64_t gpa, uint32_t level)
+static inline uint32_t vtd_iova_level_offset(uint64_t iova, uint32_t level)
{
- return (gpa >> vtd_slpt_level_shift(level)) &
+ return (iova >> vtd_slpt_level_shift(level)) &
((1ULL << VTD_SL_LEVEL_BITS) - 1);
}
@@ -628,12 +619,12 @@ static bool vtd_slpte_nonzero_rsvd(uint64_t slpte, uint32_t level)
}
}
-/* Given the @gpa, get relevant @slptep. @slpte_level will be the last level
+/* Given the @iova, get relevant @slptep. @slpte_level will be the last level
* of the translation, can be used for deciding the size of large page.
*/
-static int vtd_gpa_to_slpte(VTDContextEntry *ce, uint64_t gpa, bool is_write,
- uint64_t *slptep, uint32_t *slpte_level,
- bool *reads, bool *writes)
+static int vtd_iova_to_slpte(VTDContextEntry *ce, uint64_t iova, bool is_write,
+ uint64_t *slptep, uint32_t *slpte_level,
+ bool *reads, bool *writes)
{
dma_addr_t addr = vtd_get_slpt_base_from_context(ce);
uint32_t level = vtd_get_level_from_context_entry(ce);
@@ -642,11 +633,11 @@ static int vtd_gpa_to_slpte(VTDContextEntry *ce, uint64_t gpa, bool is_write,
uint32_t ce_agaw = vtd_get_agaw_from_context_entry(ce);
uint64_t access_right_check;
- /* Check if @gpa is above 2^X-1, where X is the minimum of MGAW in CAP_REG
- * and AW in context-entry.
+ /* Check if @iova is above 2^X-1, where X is the minimum of MGAW
+ * in CAP_REG and AW in context-entry.
*/
- if (gpa & ~((1ULL << MIN(ce_agaw, VTD_MGAW)) - 1)) {
- VTD_DPRINTF(GENERAL, "error: gpa 0x%"PRIx64 " exceeds limits", gpa);
+ if (iova & ~((1ULL << MIN(ce_agaw, VTD_MGAW)) - 1)) {
+ VTD_DPRINTF(GENERAL, "error: iova 0x%"PRIx64 " exceeds limits", iova);
return -VTD_FR_ADDR_BEYOND_MGAW;
}
@@ -654,13 +645,13 @@ static int vtd_gpa_to_slpte(VTDContextEntry *ce, uint64_t gpa, bool is_write,
access_right_check = is_write ? VTD_SL_W : VTD_SL_R;
while (true) {
- offset = vtd_gpa_level_offset(gpa, level);
+ offset = vtd_iova_level_offset(iova, level);
slpte = vtd_get_slpte(addr, offset);
if (slpte == (uint64_t)-1) {
VTD_DPRINTF(GENERAL, "error: fail to access second-level paging "
- "entry at level %"PRIu32 " for gpa 0x%"PRIx64,
- level, gpa);
+ "entry at level %"PRIu32 " for iova 0x%"PRIx64,
+ level, iova);
if (level == vtd_get_level_from_context_entry(ce)) {
/* Invalid programming of context-entry */
return -VTD_FR_CONTEXT_ENTRY_INV;
@@ -672,8 +663,8 @@ static int vtd_gpa_to_slpte(VTDContextEntry *ce, uint64_t gpa, bool is_write,
*writes = (*writes) && (slpte & VTD_SL_W);
if (!(slpte & access_right_check)) {
VTD_DPRINTF(GENERAL, "error: lack of %s permission for "
- "gpa 0x%"PRIx64 " slpte 0x%"PRIx64,
- (is_write ? "write" : "read"), gpa, slpte);
+ "iova 0x%"PRIx64 " slpte 0x%"PRIx64,
+ (is_write ? "write" : "read"), iova, slpte);
return is_write ? -VTD_FR_WRITE : -VTD_FR_READ;
}
if (vtd_slpte_nonzero_rsvd(slpte, level)) {
@@ -706,12 +697,11 @@ static int vtd_dev_to_context_entry(IntelIOMMUState *s, uint8_t bus_num,
}
if (!vtd_root_entry_present(&re)) {
- VTD_DPRINTF(GENERAL, "error: root-entry #%"PRIu8 " is not present",
- bus_num);
+ /* Not error - it's okay we don't have root entry. */
+ trace_vtd_re_not_present(bus_num);
return -VTD_FR_ROOT_ENTRY_P;
} else if (re.rsvd || (re.val & VTD_ROOT_ENTRY_RSVD)) {
- VTD_DPRINTF(GENERAL, "error: non-zero reserved field in root-entry "
- "hi 0x%"PRIx64 " lo 0x%"PRIx64, re.rsvd, re.val);
+ trace_vtd_re_invalid(re.rsvd, re.val);
return -VTD_FR_ROOT_ENTRY_RSVD;
}
@@ -721,22 +711,17 @@ static int vtd_dev_to_context_entry(IntelIOMMUState *s, uint8_t bus_num,
}
if (!vtd_context_entry_present(ce)) {
- VTD_DPRINTF(GENERAL,
- "error: context-entry #%"PRIu8 "(bus #%"PRIu8 ") "
- "is not present", devfn, bus_num);
+ /* Not error - it's okay we don't have context entry. */
+ trace_vtd_ce_not_present(bus_num, devfn);
return -VTD_FR_CONTEXT_ENTRY_P;
} else if ((ce->hi & VTD_CONTEXT_ENTRY_RSVD_HI) ||
(ce->lo & VTD_CONTEXT_ENTRY_RSVD_LO)) {
- VTD_DPRINTF(GENERAL,
- "error: non-zero reserved field in context-entry "
- "hi 0x%"PRIx64 " lo 0x%"PRIx64, ce->hi, ce->lo);
+ trace_vtd_ce_invalid(ce->hi, ce->lo);
return -VTD_FR_CONTEXT_ENTRY_RSVD;
}
/* Check if the programming of context-entry is valid */
if (!vtd_is_level_supported(s, vtd_get_level_from_context_entry(ce))) {
- VTD_DPRINTF(GENERAL, "error: unsupported Address Width value in "
- "context-entry hi 0x%"PRIx64 " lo 0x%"PRIx64,
- ce->hi, ce->lo);
+ trace_vtd_ce_invalid(ce->hi, ce->lo);
return -VTD_FR_CONTEXT_ENTRY_INV;
} else {
switch (ce->lo & VTD_CONTEXT_ENTRY_TT) {
@@ -745,9 +730,7 @@ static int vtd_dev_to_context_entry(IntelIOMMUState *s, uint8_t bus_num,
case VTD_CONTEXT_TT_DEV_IOTLB:
break;
default:
- VTD_DPRINTF(GENERAL, "error: unsupported Translation Type in "
- "context-entry hi 0x%"PRIx64 " lo 0x%"PRIx64,
- ce->hi, ce->lo);
+ trace_vtd_ce_invalid(ce->hi, ce->lo);
return -VTD_FR_CONTEXT_ENTRY_INV;
}
}
@@ -818,34 +801,17 @@ static void vtd_do_iommu_translate(VTDAddressSpace *vtd_as, PCIBus *bus,
bool writes = true;
VTDIOTLBEntry *iotlb_entry;
- /* Check if the request is in interrupt address range */
- if (vtd_is_interrupt_addr(addr)) {
- if (is_write) {
- /* FIXME: since we don't know the length of the access here, we
- * treat Non-DWORD length write requests without PASID as
- * interrupt requests, too. Withoud interrupt remapping support,
- * we just use 1:1 mapping.
- */
- VTD_DPRINTF(MMU, "write request to interrupt address "
- "gpa 0x%"PRIx64, addr);
- entry->iova = addr & VTD_PAGE_MASK_4K;
- entry->translated_addr = addr & VTD_PAGE_MASK_4K;
- entry->addr_mask = ~VTD_PAGE_MASK_4K;
- entry->perm = IOMMU_WO;
- return;
- } else {
- VTD_DPRINTF(GENERAL, "error: read request from interrupt address "
- "gpa 0x%"PRIx64, addr);
- vtd_report_dmar_fault(s, source_id, addr, VTD_FR_READ, is_write);
- return;
- }
- }
+ /*
+ * We have standalone memory region for interrupt addresses, we
+ * should never receive translation requests in this region.
+ */
+ assert(!vtd_is_interrupt_addr(addr));
+
/* Try to fetch slpte form IOTLB */
iotlb_entry = vtd_lookup_iotlb(s, source_id, addr);
if (iotlb_entry) {
- VTD_DPRINTF(CACHE, "hit iotlb sid 0x%"PRIx16 " gpa 0x%"PRIx64
- " slpte 0x%"PRIx64 " did 0x%"PRIx16, source_id, addr,
- iotlb_entry->slpte, iotlb_entry->domain_id);
+ trace_vtd_iotlb_page_hit(source_id, addr, iotlb_entry->slpte,
+ iotlb_entry->domain_id);
slpte = iotlb_entry->slpte;
reads = iotlb_entry->read_flags;
writes = iotlb_entry->write_flags;
@@ -854,10 +820,9 @@ static void vtd_do_iommu_translate(VTDAddressSpace *vtd_as, PCIBus *bus,
}
/* Try to fetch context-entry from cache first */
if (cc_entry->context_cache_gen == s->context_cache_gen) {
- VTD_DPRINTF(CACHE, "hit context-cache bus %d devfn %d "
- "(hi %"PRIx64 " lo %"PRIx64 " gen %"PRIu32 ")",
- bus_num, devfn, cc_entry->context_entry.hi,
- cc_entry->context_entry.lo, cc_entry->context_cache_gen);
+ trace_vtd_iotlb_cc_hit(bus_num, devfn, cc_entry->context_entry.hi,
+ cc_entry->context_entry.lo,
+ cc_entry->context_cache_gen);
ce = cc_entry->context_entry;
is_fpd_set = ce.lo & VTD_CONTEXT_ENTRY_FPD;
} else {
@@ -866,30 +831,26 @@ static void vtd_do_iommu_translate(VTDAddressSpace *vtd_as, PCIBus *bus,
if (ret_fr) {
ret_fr = -ret_fr;
if (is_fpd_set && vtd_is_qualified_fault(ret_fr)) {
- VTD_DPRINTF(FLOG, "fault processing is disabled for DMA "
- "requests through this context-entry "
- "(with FPD Set)");
+ trace_vtd_fault_disabled();
} else {
vtd_report_dmar_fault(s, source_id, addr, ret_fr, is_write);
}
return;
}
/* Update context-cache */
- VTD_DPRINTF(CACHE, "update context-cache bus %d devfn %d "
- "(hi %"PRIx64 " lo %"PRIx64 " gen %"PRIu32 "->%"PRIu32 ")",
- bus_num, devfn, ce.hi, ce.lo,
- cc_entry->context_cache_gen, s->context_cache_gen);
+ trace_vtd_iotlb_cc_update(bus_num, devfn, ce.hi, ce.lo,
+ cc_entry->context_cache_gen,
+ s->context_cache_gen);
cc_entry->context_entry = ce;
cc_entry->context_cache_gen = s->context_cache_gen;
}
- ret_fr = vtd_gpa_to_slpte(&ce, addr, is_write, &slpte, &level,
- &reads, &writes);
+ ret_fr = vtd_iova_to_slpte(&ce, addr, is_write, &slpte, &level,
+ &reads, &writes);
if (ret_fr) {
ret_fr = -ret_fr;
if (is_fpd_set && vtd_is_qualified_fault(ret_fr)) {
- VTD_DPRINTF(FLOG, "fault processing is disabled for DMA requests "
- "through this context-entry (with FPD Set)");
+ trace_vtd_fault_disabled();
} else {
vtd_report_dmar_fault(s, source_id, addr, ret_fr, is_write);
}
@@ -939,6 +900,7 @@ static void vtd_interrupt_remap_table_setup(IntelIOMMUState *s)
static void vtd_context_global_invalidate(IntelIOMMUState *s)
{
+ trace_vtd_inv_desc_cc_global();
s->context_cache_gen++;
if (s->context_cache_gen == VTD_CONTEXT_CACHE_GEN_MAX) {
vtd_reset_context_cache(s);
@@ -978,9 +940,11 @@ static void vtd_context_device_invalidate(IntelIOMMUState *s,
uint16_t mask;
VTDBus *vtd_bus;
VTDAddressSpace *vtd_as;
- uint16_t devfn;
+ uint8_t bus_n, devfn;
uint16_t devfn_it;
+ trace_vtd_inv_desc_cc_devices(source_id, func_mask);
+
switch (func_mask & 3) {
case 0:
mask = 0; /* No bits in the SID field masked */
@@ -996,16 +960,16 @@ static void vtd_context_device_invalidate(IntelIOMMUState *s,
break;
}
mask = ~mask;
- VTD_DPRINTF(INV, "device-selective invalidation source 0x%"PRIx16
- " mask %"PRIu16, source_id, mask);
- vtd_bus = vtd_find_as_from_bus_num(s, VTD_SID_TO_BUS(source_id));
+
+ bus_n = VTD_SID_TO_BUS(source_id);
+ vtd_bus = vtd_find_as_from_bus_num(s, bus_n);
if (vtd_bus) {
devfn = VTD_SID_TO_DEVFN(source_id);
for (devfn_it = 0; devfn_it < X86_IOMMU_PCI_DEVFN_MAX; ++devfn_it) {
vtd_as = vtd_bus->dev_as[devfn_it];
if (vtd_as && ((devfn_it & mask) == (devfn & mask))) {
- VTD_DPRINTF(INV, "invalidate context-cahce of devfn 0x%"PRIx16,
- devfn_it);
+ trace_vtd_inv_desc_cc_device(bus_n, VTD_PCI_SLOT(devfn_it),
+ VTD_PCI_FUNC(devfn_it));
vtd_as->context_cache_entry.context_cache_gen = 0;
}
}
@@ -1046,6 +1010,7 @@ static uint64_t vtd_context_cache_invalidate(IntelIOMMUState *s, uint64_t val)
static void vtd_iotlb_global_invalidate(IntelIOMMUState *s)
{
+ trace_vtd_iotlb_reset("global invalidation recved");
vtd_reset_iotlb(s);
}
@@ -1318,9 +1283,7 @@ static bool vtd_process_wait_desc(IntelIOMMUState *s, VTDInvDesc *inv_desc)
{
if ((inv_desc->hi & VTD_INV_DESC_WAIT_RSVD_HI) ||
(inv_desc->lo & VTD_INV_DESC_WAIT_RSVD_LO)) {
- VTD_DPRINTF(GENERAL, "error: non-zero reserved field in Invalidation "
- "Wait Descriptor hi 0x%"PRIx64 " lo 0x%"PRIx64,
- inv_desc->hi, inv_desc->lo);
+ trace_vtd_inv_desc_wait_invalid(inv_desc->hi, inv_desc->lo);
return false;
}
if (inv_desc->lo & VTD_INV_DESC_WAIT_SW) {
@@ -1332,21 +1295,18 @@ static bool vtd_process_wait_desc(IntelIOMMUState *s, VTDInvDesc *inv_desc)
/* FIXME: need to be masked with HAW? */
dma_addr_t status_addr = inv_desc->hi;
- VTD_DPRINTF(INV, "status data 0x%x, status addr 0x%"PRIx64,
- status_data, status_addr);
+ trace_vtd_inv_desc_wait_sw(status_addr, status_data);
status_data = cpu_to_le32(status_data);
if (dma_memory_write(&address_space_memory, status_addr, &status_data,
sizeof(status_data))) {
- VTD_DPRINTF(GENERAL, "error: fail to perform a coherent write");
+ trace_vtd_inv_desc_wait_write_fail(inv_desc->hi, inv_desc->lo);
return false;
}
} else if (inv_desc->lo & VTD_INV_DESC_WAIT_IF) {
/* Interrupt flag */
- VTD_DPRINTF(INV, "Invalidation Wait Descriptor interrupt completion");
vtd_generate_completion_event(s);
} else {
- VTD_DPRINTF(GENERAL, "error: invalid Invalidation Wait Descriptor: "
- "hi 0x%"PRIx64 " lo 0x%"PRIx64, inv_desc->hi, inv_desc->lo);
+ trace_vtd_inv_desc_wait_invalid(inv_desc->hi, inv_desc->lo);
return false;
}
return true;
@@ -1355,30 +1315,29 @@ static bool vtd_process_wait_desc(IntelIOMMUState *s, VTDInvDesc *inv_desc)
static bool vtd_process_context_cache_desc(IntelIOMMUState *s,
VTDInvDesc *inv_desc)
{
+ uint16_t sid, fmask;
+
if ((inv_desc->lo & VTD_INV_DESC_CC_RSVD) || inv_desc->hi) {
- VTD_DPRINTF(GENERAL, "error: non-zero reserved field in Context-cache "
- "Invalidate Descriptor");
+ trace_vtd_inv_desc_cc_invalid(inv_desc->hi, inv_desc->lo);
return false;
}
switch (inv_desc->lo & VTD_INV_DESC_CC_G) {
case VTD_INV_DESC_CC_DOMAIN:
- VTD_DPRINTF(INV, "domain-selective invalidation domain 0x%"PRIx16,
- (uint16_t)VTD_INV_DESC_CC_DID(inv_desc->lo));
+ trace_vtd_inv_desc_cc_domain(
+ (uint16_t)VTD_INV_DESC_CC_DID(inv_desc->lo));
/* Fall through */
case VTD_INV_DESC_CC_GLOBAL:
- VTD_DPRINTF(INV, "global invalidation");
vtd_context_global_invalidate(s);
break;
case VTD_INV_DESC_CC_DEVICE:
- vtd_context_device_invalidate(s, VTD_INV_DESC_CC_SID(inv_desc->lo),
- VTD_INV_DESC_CC_FM(inv_desc->lo));
+ sid = VTD_INV_DESC_CC_SID(inv_desc->lo);
+ fmask = VTD_INV_DESC_CC_FM(inv_desc->lo);
+ vtd_context_device_invalidate(s, sid, fmask);
break;
default:
- VTD_DPRINTF(GENERAL, "error: invalid granularity in Context-cache "
- "Invalidate Descriptor hi 0x%"PRIx64 " lo 0x%"PRIx64,
- inv_desc->hi, inv_desc->lo);
+ trace_vtd_inv_desc_cc_invalid(inv_desc->hi, inv_desc->lo);
return false;
}
return true;
@@ -1392,22 +1351,19 @@ static bool vtd_process_iotlb_desc(IntelIOMMUState *s, VTDInvDesc *inv_desc)
if ((inv_desc->lo & VTD_INV_DESC_IOTLB_RSVD_LO) ||
(inv_desc->hi & VTD_INV_DESC_IOTLB_RSVD_HI)) {
- VTD_DPRINTF(GENERAL, "error: non-zero reserved field in IOTLB "
- "Invalidate Descriptor hi 0x%"PRIx64 " lo 0x%"PRIx64,
- inv_desc->hi, inv_desc->lo);
+ trace_vtd_inv_desc_iotlb_invalid(inv_desc->hi, inv_desc->lo);
return false;
}
switch (inv_desc->lo & VTD_INV_DESC_IOTLB_G) {
case VTD_INV_DESC_IOTLB_GLOBAL:
- VTD_DPRINTF(INV, "global invalidation");
+ trace_vtd_inv_desc_iotlb_global();
vtd_iotlb_global_invalidate(s);
break;
case VTD_INV_DESC_IOTLB_DOMAIN:
domain_id = VTD_INV_DESC_IOTLB_DID(inv_desc->lo);
- VTD_DPRINTF(INV, "domain-selective invalidation domain 0x%"PRIx16,
- domain_id);
+ trace_vtd_inv_desc_iotlb_domain(domain_id);
vtd_iotlb_domain_invalidate(s, domain_id);
break;
@@ -1415,20 +1371,16 @@ static bool vtd_process_iotlb_desc(IntelIOMMUState *s, VTDInvDesc *inv_desc)
domain_id = VTD_INV_DESC_IOTLB_DID(inv_desc->lo);
addr = VTD_INV_DESC_IOTLB_ADDR(inv_desc->hi);
am = VTD_INV_DESC_IOTLB_AM(inv_desc->hi);
- VTD_DPRINTF(INV, "page-selective invalidation domain 0x%"PRIx16
- " addr 0x%"PRIx64 " mask %"PRIu8, domain_id, addr, am);
+ trace_vtd_inv_desc_iotlb_pages(domain_id, addr, am);
if (am > VTD_MAMV) {
- VTD_DPRINTF(GENERAL, "error: supported max address mask value is "
- "%"PRIu8, (uint8_t)VTD_MAMV);
+ trace_vtd_inv_desc_iotlb_invalid(inv_desc->hi, inv_desc->lo);
return false;
}
vtd_iotlb_page_invalidate(s, domain_id, addr, am);
break;
default:
- VTD_DPRINTF(GENERAL, "error: invalid granularity in IOTLB Invalidate "
- "Descriptor hi 0x%"PRIx64 " lo 0x%"PRIx64,
- inv_desc->hi, inv_desc->lo);
+ trace_vtd_inv_desc_iotlb_invalid(inv_desc->hi, inv_desc->lo);
return false;
}
return true;
@@ -1527,33 +1479,28 @@ static bool vtd_process_inv_desc(IntelIOMMUState *s)
switch (desc_type) {
case VTD_INV_DESC_CC:
- VTD_DPRINTF(INV, "Context-cache Invalidate Descriptor hi 0x%"PRIx64
- " lo 0x%"PRIx64, inv_desc.hi, inv_desc.lo);
+ trace_vtd_inv_desc("context-cache", inv_desc.hi, inv_desc.lo);
if (!vtd_process_context_cache_desc(s, &inv_desc)) {
return false;
}
break;
case VTD_INV_DESC_IOTLB:
- VTD_DPRINTF(INV, "IOTLB Invalidate Descriptor hi 0x%"PRIx64
- " lo 0x%"PRIx64, inv_desc.hi, inv_desc.lo);
+ trace_vtd_inv_desc("iotlb", inv_desc.hi, inv_desc.lo);
if (!vtd_process_iotlb_desc(s, &inv_desc)) {
return false;
}
break;
case VTD_INV_DESC_WAIT:
- VTD_DPRINTF(INV, "Invalidation Wait Descriptor hi 0x%"PRIx64
- " lo 0x%"PRIx64, inv_desc.hi, inv_desc.lo);
+ trace_vtd_inv_desc("wait", inv_desc.hi, inv_desc.lo);
if (!vtd_process_wait_desc(s, &inv_desc)) {
return false;
}
break;
case VTD_INV_DESC_IEC:
- VTD_DPRINTF(INV, "Invalidation Interrupt Entry Cache "
- "Descriptor hi 0x%"PRIx64 " lo 0x%"PRIx64,
- inv_desc.hi, inv_desc.lo);
+ trace_vtd_inv_desc("iec", inv_desc.hi, inv_desc.lo);
if (!vtd_process_inv_iec_desc(s, &inv_desc)) {
return false;
}
@@ -1568,9 +1515,7 @@ static bool vtd_process_inv_desc(IntelIOMMUState *s)
break;
default:
- VTD_DPRINTF(GENERAL, "error: unkonw Invalidation Descriptor type "
- "hi 0x%"PRIx64 " lo 0x%"PRIx64 " type %"PRIu8,
- inv_desc.hi, inv_desc.lo, desc_type);
+ trace_vtd_inv_desc_invalid(inv_desc.hi, inv_desc.lo);
return false;
}
s->iq_head++;
@@ -2049,7 +1994,7 @@ static IOMMUTLBEntry vtd_iommu_translate(MemoryRegion *iommu, hwaddr addr,
is_write, &ret);
VTD_DPRINTF(MMU,
"bus %"PRIu8 " slot %"PRIu8 " func %"PRIu8 " devfn %"PRIu8
- " gpa 0x%"PRIx64 " hpa 0x%"PRIx64, pci_bus_num(vtd_as->bus),
+ " iova 0x%"PRIx64 " hpa 0x%"PRIx64, pci_bus_num(vtd_as->bus),
VTD_PCI_SLOT(vtd_as->devfn), VTD_PCI_FUNC(vtd_as->devfn),
vtd_as->devfn, addr, ret.translated_addr);
return ret;
@@ -2115,6 +2060,7 @@ static Property vtd_properties[] = {
DEFINE_PROP_ON_OFF_AUTO("eim", IntelIOMMUState, intr_eim,
ON_OFF_AUTO_AUTO),
DEFINE_PROP_BOOL("x-buggy-eim", IntelIOMMUState, buggy_eim, false),
+ DEFINE_PROP_BOOL("caching-mode", IntelIOMMUState, caching_mode, FALSE),
DEFINE_PROP_END_OF_LIST(),
};
@@ -2496,6 +2442,10 @@ static void vtd_init(IntelIOMMUState *s)
s->ecap |= VTD_ECAP_DT;
}
+ if (s->caching_mode) {
+ s->cap |= VTD_CAP_CM;
+ }
+
vtd_reset_context_cache(s);
vtd_reset_iotlb(s);
diff --git a/hw/i386/intel_iommu_internal.h b/hw/i386/intel_iommu_internal.h
index 356f188b73..41041219ba 100644
--- a/hw/i386/intel_iommu_internal.h
+++ b/hw/i386/intel_iommu_internal.h
@@ -202,6 +202,7 @@
#define VTD_CAP_MAMV (VTD_MAMV << 48)
#define VTD_CAP_PSI (1ULL << 39)
#define VTD_CAP_SLLPS ((1ULL << 34) | (1ULL << 35))
+#define VTD_CAP_CM (1ULL << 7)
/* Supported Adjusted Guest Address Widths */
#define VTD_CAP_SAGAW_SHIFT 8
diff --git a/hw/i386/kvm/ioapic.c b/hw/i386/kvm/ioapic.c
index 8eb2c7a70f..98ca480792 100644
--- a/hw/i386/kvm/ioapic.c
+++ b/hw/i386/kvm/ioapic.c
@@ -114,11 +114,11 @@ static void kvm_ioapic_put(IOAPICCommonState *s)
void kvm_ioapic_dump_state(Monitor *mon, const QDict *qdict)
{
- IOAPICCommonState s;
+ IOAPICCommonState *s = IOAPIC_COMMON(object_resolve_path("ioapic", NULL));
- kvm_ioapic_get(&s);
-
- ioapic_print_redtbl(mon, &s);
+ assert(s);
+ kvm_ioapic_get(s);
+ ioapic_print_redtbl(mon, s);
}
static void kvm_ioapic_reset(DeviceState *dev)
@@ -143,6 +143,11 @@ static void kvm_ioapic_realize(DeviceState *dev, Error **errp)
IOAPICCommonState *s = IOAPIC_COMMON(dev);
memory_region_init_reservation(&s->io_memory, NULL, "kvm-ioapic", 0x1000);
+ /*
+ * KVM ioapic only supports 0x11 now. This will only be used when
+ * we want to dump ioapic version.
+ */
+ s->version = 0x11;
qdev_init_gpio_in(dev, kvm_ioapic_set_irq, IOAPIC_NUM_PINS);
}
diff --git a/hw/i386/kvmvapic.c b/hw/i386/kvmvapic.c
index 7135633863..82a49556af 100644
--- a/hw/i386/kvmvapic.c
+++ b/hw/i386/kvmvapic.c
@@ -457,8 +457,8 @@ static void patch_instruction(VAPICROMState *s, X86CPU *cpu, target_ulong ip)
resume_all_vcpus();
if (!kvm_enabled()) {
- /* tb_lock will be reset when cpu_loop_exit_noexc longjmps
- * back into the cpu_exec loop. */
+ /* Both tb_lock and iothread_mutex will be reset when
+ * longjmps back into the cpu_exec loop. */
tb_lock();
tb_gen_code(cs, current_pc, current_cs_base, current_flags, 1);
cpu_loop_exit_noexc(cs);
diff --git a/hw/i386/pc.c b/hw/i386/pc.c
index e3fcd514dd..d24388e05f 100644
--- a/hw/i386/pc.c
+++ b/hw/i386/pc.c
@@ -707,7 +707,8 @@ static void pc_build_smbios(PCMachineState *pcms)
size_t smbios_tables_len, smbios_anchor_len;
struct smbios_phys_mem_area *mem_array;
unsigned i, array_count;
- X86CPU *cpu = X86_CPU(pcms->possible_cpus->cpus[0].cpu);
+ MachineState *ms = MACHINE(pcms);
+ X86CPU *cpu = X86_CPU(ms->possible_cpus->cpus[0].cpu);
/* tell smbios about cpuid version and features */
smbios_set_cpuid(cpu->env.cpuid_version, cpu->env.features[FEAT_1_EDX]);
@@ -1111,7 +1112,7 @@ static void pc_new_cpu(const char *typename, int64_t apic_id, Error **errp)
void pc_hot_add_cpu(const int64_t id, Error **errp)
{
ObjectClass *oc;
- PCMachineState *pcms = PC_MACHINE(qdev_get_machine());
+ MachineState *ms = MACHINE(qdev_get_machine());
int64_t apic_id = x86_cpu_apic_id_from_index(id);
Error *local_err = NULL;
@@ -1127,8 +1128,8 @@ void pc_hot_add_cpu(const int64_t id, Error **errp)
return;
}
- assert(pcms->possible_cpus->cpus[0].cpu); /* BSP is always present */
- oc = OBJECT_CLASS(CPU_GET_CLASS(pcms->possible_cpus->cpus[0].cpu));
+ assert(ms->possible_cpus->cpus[0].cpu); /* BSP is always present */
+ oc = OBJECT_CLASS(CPU_GET_CLASS(ms->possible_cpus->cpus[0].cpu));
pc_new_cpu(object_class_get_name(oc), apic_id, &local_err);
if (local_err) {
error_propagate(errp, local_err);
@@ -1143,7 +1144,9 @@ void pc_cpus_init(PCMachineState *pcms)
ObjectClass *oc;
const char *typename;
gchar **model_pieces;
+ const CPUArchIdList *possible_cpus;
MachineState *machine = MACHINE(pcms);
+ MachineClass *mc = MACHINE_GET_CLASS(pcms);
/* init CPUs */
if (machine->cpu_model == NULL) {
@@ -1178,20 +1181,16 @@ void pc_cpus_init(PCMachineState *pcms)
* This is used for FW_CFG_MAX_CPUS. See comments on bochs_bios_init().
*/
pcms->apic_id_limit = x86_cpu_apic_id_from_index(max_cpus - 1) + 1;
- pcms->possible_cpus = g_malloc0(sizeof(CPUArchIdList) +
- sizeof(CPUArchId) * max_cpus);
- for (i = 0; i < max_cpus; i++) {
- pcms->possible_cpus->cpus[i].arch_id = x86_cpu_apic_id_from_index(i);
- pcms->possible_cpus->len++;
- if (i < smp_cpus) {
- pc_new_cpu(typename, x86_cpu_apic_id_from_index(i), &error_fatal);
- }
+ possible_cpus = mc->possible_cpu_arch_ids(machine);
+ for (i = 0; i < smp_cpus; i++) {
+ pc_new_cpu(typename, possible_cpus->cpus[i].arch_id, &error_fatal);
}
}
static void pc_build_feature_control_file(PCMachineState *pcms)
{
- X86CPU *cpu = X86_CPU(pcms->possible_cpus->cpus[0].cpu);
+ MachineState *ms = MACHINE(pcms);
+ X86CPU *cpu = X86_CPU(ms->possible_cpus->cpus[0].cpu);
CPUX86State *env = &cpu->env;
uint32_t unused, ecx, edx;
uint64_t feature_control_bits = 0;
@@ -1650,9 +1649,15 @@ void pc_pci_device_init(PCIBus *pci_bus)
int max_bus;
int bus;
+ /* Note: if=scsi is deprecated with PC machine types */
max_bus = drive_get_max_bus(IF_SCSI);
for (bus = 0; bus <= max_bus; bus++) {
pci_create_simple(pci_bus, -1, "lsi53c895a");
+ /*
+ * By not creating frontends here, we make
+ * scsi_legacy_handle_cmdline() create them, and warn that
+ * this usage is deprecated.
+ */
}
}
@@ -1781,21 +1786,19 @@ static int pc_apic_cmp(const void *a, const void *b)
}
/* returns pointer to CPUArchId descriptor that matches CPU's apic_id
- * in pcms->possible_cpus->cpus, if pcms->possible_cpus->cpus has no
+ * in ms->possible_cpus->cpus, if ms->possible_cpus->cpus has no
* entry corresponding to CPU's apic_id returns NULL.
*/
-static CPUArchId *pc_find_cpu_slot(PCMachineState *pcms, CPUState *cpu,
- int *idx)
+static CPUArchId *pc_find_cpu_slot(MachineState *ms, uint32_t id, int *idx)
{
- CPUClass *cc = CPU_GET_CLASS(cpu);
CPUArchId apic_id, *found_cpu;
- apic_id.arch_id = cc->get_arch_id(CPU(cpu));
- found_cpu = bsearch(&apic_id, pcms->possible_cpus->cpus,
- pcms->possible_cpus->len, sizeof(*pcms->possible_cpus->cpus),
+ apic_id.arch_id = id;
+ found_cpu = bsearch(&apic_id, ms->possible_cpus->cpus,
+ ms->possible_cpus->len, sizeof(*ms->possible_cpus->cpus),
pc_apic_cmp);
if (found_cpu && idx) {
- *idx = found_cpu - pcms->possible_cpus->cpus;
+ *idx = found_cpu - ms->possible_cpus->cpus;
}
return found_cpu;
}
@@ -1806,6 +1809,7 @@ static void pc_cpu_plug(HotplugHandler *hotplug_dev,
CPUArchId *found_cpu;
HotplugHandlerClass *hhc;
Error *local_err = NULL;
+ X86CPU *cpu = X86_CPU(dev);
PCMachineState *pcms = PC_MACHINE(hotplug_dev);
if (pcms->acpi_dev) {
@@ -1825,8 +1829,8 @@ static void pc_cpu_plug(HotplugHandler *hotplug_dev,
fw_cfg_modify_i16(pcms->fw_cfg, FW_CFG_NB_CPUS, pcms->boot_cpus);
}
- found_cpu = pc_find_cpu_slot(pcms, CPU(dev), NULL);
- found_cpu->cpu = CPU(dev);
+ found_cpu = pc_find_cpu_slot(MACHINE(pcms), cpu->apic_id, NULL);
+ found_cpu->cpu = OBJECT(dev);
out:
error_propagate(errp, local_err);
}
@@ -1836,9 +1840,10 @@ static void pc_cpu_unplug_request_cb(HotplugHandler *hotplug_dev,
int idx = -1;
HotplugHandlerClass *hhc;
Error *local_err = NULL;
+ X86CPU *cpu = X86_CPU(dev);
PCMachineState *pcms = PC_MACHINE(hotplug_dev);
- pc_find_cpu_slot(pcms, CPU(dev), &idx);
+ pc_find_cpu_slot(MACHINE(pcms), cpu->apic_id, &idx);
assert(idx != -1);
if (idx == 0) {
error_setg(&local_err, "Boot CPU is unpluggable");
@@ -1863,6 +1868,7 @@ static void pc_cpu_unplug_cb(HotplugHandler *hotplug_dev,
CPUArchId *found_cpu;
HotplugHandlerClass *hhc;
Error *local_err = NULL;
+ X86CPU *cpu = X86_CPU(dev);
PCMachineState *pcms = PC_MACHINE(hotplug_dev);
hhc = HOTPLUG_HANDLER_GET_CLASS(pcms->acpi_dev);
@@ -1872,7 +1878,7 @@ static void pc_cpu_unplug_cb(HotplugHandler *hotplug_dev,
goto out;
}
- found_cpu = pc_find_cpu_slot(pcms, CPU(dev), NULL);
+ found_cpu = pc_find_cpu_slot(MACHINE(pcms), cpu->apic_id, NULL);
found_cpu->cpu = NULL;
object_unparent(OBJECT(dev));
@@ -1930,13 +1936,15 @@ static void pc_cpu_pre_plug(HotplugHandler *hotplug_dev,
cpu->apic_id = apicid_from_topo_ids(smp_cores, smp_threads, &topo);
}
- cpu_slot = pc_find_cpu_slot(pcms, CPU(dev), &idx);
+ cpu_slot = pc_find_cpu_slot(MACHINE(pcms), cpu->apic_id, &idx);
if (!cpu_slot) {
+ MachineState *ms = MACHINE(pcms);
+
x86_topo_ids_from_apicid(cpu->apic_id, smp_cores, smp_threads, &topo);
error_setg(errp, "Invalid CPU [socket: %u, core: %u, thread: %u] with"
" APIC ID %" PRIu32 ", valid index range 0:%d",
topo.pkg_id, topo.core_id, topo.smt_id, cpu->apic_id,
- pcms->possible_cpus->len - 1);
+ ms->possible_cpus->len - 1);
return;
}
@@ -1947,7 +1955,7 @@ static void pc_cpu_pre_plug(HotplugHandler *hotplug_dev,
}
/* if 'address' properties socket-id/core-id/thread-id are not set, set them
- * so that query_hotpluggable_cpus would show correct values
+ * so that machine_query_hotpluggable_cpus would show correct values
*/
/* TODO: move socket_id/core_id/thread_id checks into x86_cpu_realizefn()
* once -smp refactoring is complete and there will be CPU private
@@ -2245,55 +2253,37 @@ static unsigned pc_cpu_index_to_socket_id(unsigned cpu_index)
return topo.pkg_id;
}
-static const CPUArchIdList *pc_possible_cpu_arch_ids(MachineState *machine)
-{
- PCMachineState *pcms = PC_MACHINE(machine);
- assert(pcms->possible_cpus);
- return pcms->possible_cpus;
-}
-
-static HotpluggableCPUList *pc_query_hotpluggable_cpus(MachineState *machine)
+static const CPUArchIdList *pc_possible_cpu_arch_ids(MachineState *ms)
{
int i;
- CPUState *cpu;
- HotpluggableCPUList *head = NULL;
- PCMachineState *pcms = PC_MACHINE(machine);
- const char *cpu_type;
- cpu = pcms->possible_cpus->cpus[0].cpu;
- assert(cpu); /* BSP is always present */
- cpu_type = object_class_get_name(OBJECT_CLASS(CPU_GET_CLASS(cpu)));
+ if (ms->possible_cpus) {
+ /*
+ * make sure that max_cpus hasn't changed since the first use, i.e.
+ * -smp hasn't been parsed after it
+ */
+ assert(ms->possible_cpus->len == max_cpus);
+ return ms->possible_cpus;
+ }
- for (i = 0; i < pcms->possible_cpus->len; i++) {
+ ms->possible_cpus = g_malloc0(sizeof(CPUArchIdList) +
+ sizeof(CPUArchId) * max_cpus);
+ ms->possible_cpus->len = max_cpus;
+ for (i = 0; i < ms->possible_cpus->len; i++) {
X86CPUTopoInfo topo;
- HotpluggableCPUList *list_item = g_new0(typeof(*list_item), 1);
- HotpluggableCPU *cpu_item = g_new0(typeof(*cpu_item), 1);
- CpuInstanceProperties *cpu_props = g_new0(typeof(*cpu_props), 1);
- const uint32_t apic_id = pcms->possible_cpus->cpus[i].arch_id;
-
- x86_topo_ids_from_apicid(apic_id, smp_cores, smp_threads, &topo);
-
- cpu_item->type = g_strdup(cpu_type);
- cpu_item->vcpus_count = 1;
- cpu_props->has_socket_id = true;
- cpu_props->socket_id = topo.pkg_id;
- cpu_props->has_core_id = true;
- cpu_props->core_id = topo.core_id;
- cpu_props->has_thread_id = true;
- cpu_props->thread_id = topo.smt_id;
- cpu_item->props = cpu_props;
-
- cpu = pcms->possible_cpus->cpus[i].cpu;
- if (cpu) {
- cpu_item->has_qom_path = true;
- cpu_item->qom_path = object_get_canonical_path(OBJECT(cpu));
- }
- list_item->value = cpu_item;
- list_item->next = head;
- head = list_item;
+ ms->possible_cpus->cpus[i].vcpus_count = 1;
+ ms->possible_cpus->cpus[i].arch_id = x86_cpu_apic_id_from_index(i);
+ x86_topo_ids_from_apicid(ms->possible_cpus->cpus[i].arch_id,
+ smp_cores, smp_threads, &topo);
+ ms->possible_cpus->cpus[i].props.has_socket_id = true;
+ ms->possible_cpus->cpus[i].props.socket_id = topo.pkg_id;
+ ms->possible_cpus->cpus[i].props.has_core_id = true;
+ ms->possible_cpus->cpus[i].props.core_id = topo.core_id;
+ ms->possible_cpus->cpus[i].props.has_thread_id = true;
+ ms->possible_cpus->cpus[i].props.thread_id = topo.smt_id;
}
- return head;
+ return ms->possible_cpus;
}
static void x86_nmi(NMIState *n, int cpu_index, Error **errp)
@@ -2336,9 +2326,10 @@ static void pc_machine_class_init(ObjectClass *oc, void *data)
mc->get_hotplug_handler = pc_get_hotpug_handler;
mc->cpu_index_to_socket_id = pc_cpu_index_to_socket_id;
mc->possible_cpu_arch_ids = pc_possible_cpu_arch_ids;
- mc->query_hotpluggable_cpus = pc_query_hotpluggable_cpus;
+ mc->has_hotpluggable_cpus = true;
mc->default_boot_order = "cad";
mc->hot_add_cpu = pc_hot_add_cpu;
+ mc->block_default_type = IF_IDE;
mc->max_cpus = 255;
mc->reset = pc_machine_reset;
hc->pre_plug = pc_machine_device_pre_plug_cb;
diff --git a/hw/i386/trace-events b/hw/i386/trace-events
index 1cc4a10a07..88ad5e4c43 100644
--- a/hw/i386/trace-events
+++ b/hw/i386/trace-events
@@ -3,6 +3,34 @@
# hw/i386/x86-iommu.c
x86_iommu_iec_notify(bool global, uint32_t index, uint32_t mask) "Notify IEC invalidation: global=%d index=%" PRIu32 " mask=%" PRIu32
+# hw/i386/intel_iommu.c
+vtd_switch_address_space(uint8_t bus, uint8_t slot, uint8_t fn, bool on) "Device %02x:%02x.%x switching address space (iommu enabled=%d)"
+vtd_inv_desc(const char *type, uint64_t hi, uint64_t lo) "invalidate desc type %s high 0x%"PRIx64" low 0x%"PRIx64
+vtd_inv_desc_invalid(uint64_t hi, uint64_t lo) "invalid inv desc hi 0x%"PRIx64" lo 0x%"PRIx64
+vtd_inv_desc_cc_domain(uint16_t domain) "context invalidate domain 0x%"PRIx16
+vtd_inv_desc_cc_global(void) "context invalidate globally"
+vtd_inv_desc_cc_device(uint8_t bus, uint8_t dev, uint8_t fn) "context invalidate device %02"PRIx8":%02"PRIx8".%02"PRIx8
+vtd_inv_desc_cc_devices(uint16_t sid, uint16_t fmask) "context invalidate devices sid 0x%"PRIx16" fmask 0x%"PRIx16
+vtd_inv_desc_cc_invalid(uint64_t hi, uint64_t lo) "invalid context-cache desc hi 0x%"PRIx64" lo 0x%"PRIx64
+vtd_inv_desc_iotlb_global(void) "iotlb invalidate global"
+vtd_inv_desc_iotlb_domain(uint16_t domain) "iotlb invalidate whole domain 0x%"PRIx16
+vtd_inv_desc_iotlb_pages(uint16_t domain, uint64_t addr, uint8_t mask) "iotlb invalidate domain 0x%"PRIx16" addr 0x%"PRIx64" mask 0x%"PRIx8
+vtd_inv_desc_iotlb_invalid(uint64_t hi, uint64_t lo) "invalid iotlb desc hi 0x%"PRIx64" lo 0x%"PRIx64
+vtd_inv_desc_wait_sw(uint64_t addr, uint32_t data) "wait invalidate status write addr 0x%"PRIx64" data 0x%"PRIx32
+vtd_inv_desc_wait_irq(const char *msg) "%s"
+vtd_inv_desc_wait_invalid(uint64_t hi, uint64_t lo) "invalid wait desc hi 0x%"PRIx64" lo 0x%"PRIx64
+vtd_inv_desc_wait_write_fail(uint64_t hi, uint64_t lo) "write fail for wait desc hi 0x%"PRIx64" lo 0x%"PRIx64
+vtd_re_not_present(uint8_t bus) "Root entry bus %"PRIu8" not present"
+vtd_re_invalid(uint64_t hi, uint64_t lo) "invalid root entry hi 0x%"PRIx64" lo 0x%"PRIx64
+vtd_ce_not_present(uint8_t bus, uint8_t devfn) "Context entry bus %"PRIu8" devfn %"PRIu8" not present"
+vtd_ce_invalid(uint64_t hi, uint64_t lo) "invalid context entry hi 0x%"PRIx64" lo 0x%"PRIx64
+vtd_iotlb_page_hit(uint16_t sid, uint64_t addr, uint64_t slpte, uint16_t domain) "IOTLB page hit sid 0x%"PRIx16" iova 0x%"PRIx64" slpte 0x%"PRIx64" domain 0x%"PRIx16
+vtd_iotlb_page_update(uint16_t sid, uint64_t addr, uint64_t slpte, uint16_t domain) "IOTLB page update sid 0x%"PRIx16" iova 0x%"PRIx64" slpte 0x%"PRIx64" domain 0x%"PRIx16
+vtd_iotlb_cc_hit(uint8_t bus, uint8_t devfn, uint64_t high, uint64_t low, uint32_t gen) "IOTLB context hit bus 0x%"PRIx8" devfn 0x%"PRIx8" high 0x%"PRIx64" low 0x%"PRIx64" gen %"PRIu32
+vtd_iotlb_cc_update(uint8_t bus, uint8_t devfn, uint64_t high, uint64_t low, uint32_t gen1, uint32_t gen2) "IOTLB context update bus 0x%"PRIx8" devfn 0x%"PRIx8" high 0x%"PRIx64" low 0x%"PRIx64" gen %"PRIu32" -> gen %"PRIu32
+vtd_iotlb_reset(const char *reason) "IOTLB reset (reason: %s)"
+vtd_fault_disabled(void) "Fault processing disabled for context entry"
+
# hw/i386/amd_iommu.c
amdvi_evntlog_fail(uint64_t addr, uint32_t head) "error: fail to write at addr 0x%"PRIx64" + offset 0x%"PRIx32
amdvi_cache_update(uint16_t domid, uint8_t bus, uint8_t slot, uint8_t func, uint64_t gpa, uint64_t txaddr) " update iotlb domid 0x%"PRIx16" devid: %02x:%02x.%x gpa 0x%"PRIx64" hpa 0x%"PRIx64
diff --git a/hw/ide/ahci.c b/hw/ide/ahci.c
index 3c19bdadc5..6a17acf639 100644
--- a/hw/ide/ahci.c
+++ b/hw/ide/ahci.c
@@ -488,7 +488,7 @@ static void ahci_reg_init(AHCIState *s)
s->control_regs.cap = (s->ports - 1) |
(AHCI_NUM_COMMAND_SLOTS << 8) |
(AHCI_SUPPORTED_SPEED_GEN1 << AHCI_SUPPORTED_SPEED) |
- HOST_CAP_NCQ | HOST_CAP_AHCI;
+ HOST_CAP_NCQ | HOST_CAP_AHCI | HOST_CAP_64;
s->control_regs.impl = (1 << s->ports) - 1;
diff --git a/hw/ide/core.c b/hw/ide/core.c
index 43709e545f..cfa5de6ebf 100644
--- a/hw/ide/core.c
+++ b/hw/ide/core.c
@@ -2840,23 +2840,6 @@ const VMStateDescription vmstate_ide_bus = {
void ide_drive_get(DriveInfo **hd, int n)
{
int i;
- int highest_bus = drive_get_max_bus(IF_IDE) + 1;
- int max_devs = drive_get_max_devs(IF_IDE);
- int n_buses = max_devs ? (n / max_devs) : n;
-
- /*
- * Note: The number of actual buses available is not known.
- * We compute this based on the size of the DriveInfo* array, n.
- * If it is less than max_devs * <num_real_buses>,
- * We will stop looking for drives prematurely instead of overfilling
- * the array.
- */
-
- if (highest_bus > n_buses) {
- error_report("Too many IDE buses defined (%d > %d)",
- highest_bus, n_buses);
- exit(1);
- }
for (i = 0; i < n; i++) {
hd[i] = drive_get_by_index(IF_IDE, i);
diff --git a/hw/intc/apic_common.c b/hw/intc/apic_common.c
index 17df24c9d0..7a6e771ed1 100644
--- a/hw/intc/apic_common.c
+++ b/hw/intc/apic_common.c
@@ -251,6 +251,8 @@ static void apic_reset_common(DeviceState *dev)
s->apicbase = APIC_DEFAULT_ADDRESS | bsp | MSR_IA32_APICBASE_ENABLE;
s->id = s->initial_apic_id;
+ apic_reset_irq_delivered();
+
s->vapic_paddr = 0;
info->vapic_base_update(s);
@@ -329,7 +331,7 @@ static void apic_common_realize(DeviceState *dev, Error **errp)
instance_id = -1;
}
vmstate_register_with_alias_id(NULL, instance_id, &vmstate_apic_common,
- s, -1, 0);
+ s, -1, 0, NULL);
}
static void apic_common_unrealize(DeviceState *dev, Error **errp)
diff --git a/hw/intc/arm_gicv3_cpuif.c b/hw/intc/arm_gicv3_cpuif.c
index c25ee03556..f775aba507 100644
--- a/hw/intc/arm_gicv3_cpuif.c
+++ b/hw/intc/arm_gicv3_cpuif.c
@@ -14,6 +14,7 @@
#include "qemu/osdep.h"
#include "qemu/bitops.h"
+#include "qemu/main-loop.h"
#include "trace.h"
#include "gicv3_internal.h"
#include "cpu.h"
@@ -733,6 +734,8 @@ void gicv3_cpuif_update(GICv3CPUState *cs)
ARMCPU *cpu = ARM_CPU(cs->cpu);
CPUARMState *env = &cpu->env;
+ g_assert(qemu_mutex_iothread_locked());
+
trace_gicv3_cpuif_update(gicv3_redist_affid(cs), cs->hppi.irq,
cs->hppi.grp, cs->hppi.prio);
diff --git a/hw/intc/exynos4210_gic.c b/hw/intc/exynos4210_gic.c
index fd7a8f3058..2a55817b76 100644
--- a/hw/intc/exynos4210_gic.c
+++ b/hw/intc/exynos4210_gic.c
@@ -393,7 +393,7 @@ static const VMStateDescription vmstate_exynos4210_irq_gate = {
.version_id = 2,
.minimum_version_id = 2,
.fields = (VMStateField[]) {
- VMSTATE_VBUFFER_UINT32(level, Exynos4210IRQGateState, 1, NULL, 0, n_in),
+ VMSTATE_VBUFFER_UINT32(level, Exynos4210IRQGateState, 1, NULL, n_in),
VMSTATE_END_OF_LIST()
}
};
diff --git a/hw/intc/ioapic.c b/hw/intc/ioapic.c
index 9047b8950a..37c4386ae3 100644
--- a/hw/intc/ioapic.c
+++ b/hw/intc/ioapic.c
@@ -408,13 +408,15 @@ static void ioapic_machine_done_notify(Notifier *notifier, void *data)
#endif
}
+#define IOAPIC_VER_DEF 0x20
+
static void ioapic_realize(DeviceState *dev, Error **errp)
{
IOAPICCommonState *s = IOAPIC_COMMON(dev);
if (s->version != 0x11 && s->version != 0x20) {
error_report("IOAPIC only supports version 0x11 or 0x20 "
- "(default: 0x11).");
+ "(default: 0x%x).", IOAPIC_VER_DEF);
exit(1);
}
@@ -429,7 +431,7 @@ static void ioapic_realize(DeviceState *dev, Error **errp)
}
static Property ioapic_properties[] = {
- DEFINE_PROP_UINT8("version", IOAPICCommonState, version, 0x20),
+ DEFINE_PROP_UINT8("version", IOAPICCommonState, version, IOAPIC_VER_DEF),
DEFINE_PROP_END_OF_LIST(),
};
diff --git a/hw/intc/mips_gic.c b/hw/intc/mips_gic.c
index 6e257730f8..15e6e40f9f 100644
--- a/hw/intc/mips_gic.c
+++ b/hw/intc/mips_gic.c
@@ -20,31 +20,29 @@
#include "kvm_mips.h"
#include "hw/intc/mips_gic.h"
-static void mips_gic_set_vp_irq(MIPSGICState *gic, int vp, int pin, int level)
+static void mips_gic_set_vp_irq(MIPSGICState *gic, int vp, int pin)
{
- int ored_level = level;
+ int ored_level = 0;
int i;
/* ORing pending registers sharing same pin */
- if (!ored_level) {
- for (i = 0; i < gic->num_irq; i++) {
- if ((gic->irq_state[i].map_pin & GIC_MAP_MSK) == pin &&
- gic->irq_state[i].map_vp == vp &&
- gic->irq_state[i].enabled) {
- ored_level |= gic->irq_state[i].pending;
- }
- if (ored_level) {
- /* no need to iterate all interrupts */
- break;
- }
+ for (i = 0; i < gic->num_irq; i++) {
+ if ((gic->irq_state[i].map_pin & GIC_MAP_MSK) == pin &&
+ gic->irq_state[i].map_vp == vp &&
+ gic->irq_state[i].enabled) {
+ ored_level |= gic->irq_state[i].pending;
}
- if (((gic->vps[vp].compare_map & GIC_MAP_MSK) == pin) &&
- (gic->vps[vp].mask & GIC_VP_MASK_CMP_MSK)) {
- /* ORing with local pending register (count/compare) */
- ored_level |= (gic->vps[vp].pend & GIC_VP_MASK_CMP_MSK) >>
- GIC_VP_MASK_CMP_SHF;
+ if (ored_level) {
+ /* no need to iterate all interrupts */
+ break;
}
}
+ if (((gic->vps[vp].compare_map & GIC_MAP_MSK) == pin) &&
+ (gic->vps[vp].mask & GIC_VP_MASK_CMP_MSK)) {
+ /* ORing with local pending register (count/compare) */
+ ored_level |= (gic->vps[vp].pend & GIC_VP_MASK_CMP_MSK) >>
+ GIC_VP_MASK_CMP_SHF;
+ }
if (kvm_enabled()) {
kvm_mips_set_ipi_interrupt(mips_env_get_cpu(gic->vps[vp].env),
pin + GIC_CPU_PIN_OFFSET,
@@ -55,21 +53,27 @@ static void mips_gic_set_vp_irq(MIPSGICState *gic, int vp, int pin, int level)
}
}
-static void gic_set_irq(void *opaque, int n_IRQ, int level)
+static void gic_update_pin_for_irq(MIPSGICState *gic, int n_IRQ)
{
- MIPSGICState *gic = (MIPSGICState *) opaque;
int vp = gic->irq_state[n_IRQ].map_vp;
int pin = gic->irq_state[n_IRQ].map_pin & GIC_MAP_MSK;
+ if (vp < 0 || vp >= gic->num_vps) {
+ return;
+ }
+ mips_gic_set_vp_irq(gic, vp, pin);
+}
+
+static void gic_set_irq(void *opaque, int n_IRQ, int level)
+{
+ MIPSGICState *gic = (MIPSGICState *) opaque;
+
gic->irq_state[n_IRQ].pending = (uint8_t) level;
if (!gic->irq_state[n_IRQ].enabled) {
/* GIC interrupt source disabled */
return;
}
- if (vp < 0 || vp >= gic->num_vps) {
- return;
- }
- mips_gic_set_vp_irq(gic, vp, pin, level);
+ gic_update_pin_for_irq(gic, n_IRQ);
}
#define OFFSET_CHECK(c) \
@@ -209,7 +213,7 @@ static void gic_timer_store_vp_compare(MIPSGICState *gic, uint32_t vp_index,
gic->vps[vp_index].pend &= ~(1 << GIC_LOCAL_INT_COMPARE);
if (gic->vps[vp_index].compare_map & GIC_MAP_TO_PIN_MSK) {
uint32_t pin = (gic->vps[vp_index].compare_map & GIC_MAP_MSK);
- mips_gic_set_vp_irq(gic, vp_index, pin, 0);
+ mips_gic_set_vp_irq(gic, vp_index, pin);
}
mips_gictimer_store_vp_compare(gic->gic_timer, vp_index, compare);
}
@@ -286,6 +290,7 @@ static void gic_write(void *opaque, hwaddr addr, uint64_t data, unsigned size)
OFFSET_CHECK((base + size * 8) <= gic->num_irq);
for (i = 0; i < size * 8; i++) {
gic->irq_state[base + i].enabled &= !((data >> i) & 1);
+ gic_update_pin_for_irq(gic, base + i);
}
break;
case GIC_SH_WEDGE_OFS:
@@ -305,6 +310,7 @@ static void gic_write(void *opaque, hwaddr addr, uint64_t data, unsigned size)
OFFSET_CHECK((base + size * 8) <= gic->num_irq);
for (i = 0; i < size * 8; i++) {
gic->irq_state[base + i].enabled |= (data >> i) & 1;
+ gic_update_pin_for_irq(gic, base + i);
}
break;
case GIC_SH_MAP0_PIN_OFS ... GIC_SH_MAP255_PIN_OFS:
diff --git a/hw/intc/s390_flic.c b/hw/intc/s390_flic.c
index 6ab29efc65..bef4caf980 100644
--- a/hw/intc/s390_flic.c
+++ b/hw/intc/s390_flic.c
@@ -16,6 +16,8 @@
#include "migration/qemu-file.h"
#include "hw/s390x/s390_flic.h"
#include "trace.h"
+#include "hw/qdev.h"
+#include "qapi/error.h"
S390FLICState *s390_get_flic(void)
{
@@ -85,6 +87,30 @@ static void qemu_s390_flic_class_init(ObjectClass *oc, void *data)
fsc->clear_io_irq = qemu_s390_clear_io_flic;
}
+static Property s390_flic_common_properties[] = {
+ DEFINE_PROP_UINT32("adapter_routes_max_batch", S390FLICState,
+ adapter_routes_max_batch, ADAPTER_ROUTES_MAX_GSI),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void s390_flic_common_realize(DeviceState *dev, Error **errp)
+{
+ uint32_t max_batch = S390_FLIC_COMMON(dev)->adapter_routes_max_batch;
+
+ if (max_batch > ADAPTER_ROUTES_MAX_GSI) {
+ error_setg(errp, "flic adapter_routes_max_batch too big"
+ "%d (%d allowed)", max_batch, ADAPTER_ROUTES_MAX_GSI);
+ }
+}
+
+static void s390_flic_class_init(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+
+ dc->props = s390_flic_common_properties;
+ dc->realize = s390_flic_common_realize;
+}
+
static const TypeInfo qemu_s390_flic_info = {
.name = TYPE_QEMU_S390_FLIC,
.parent = TYPE_S390_FLIC_COMMON,
@@ -92,10 +118,12 @@ static const TypeInfo qemu_s390_flic_info = {
.class_init = qemu_s390_flic_class_init,
};
+
static const TypeInfo s390_flic_common_info = {
.name = TYPE_S390_FLIC_COMMON,
.parent = TYPE_SYS_BUS_DEVICE,
.instance_size = sizeof(S390FLICState),
+ .class_init = s390_flic_class_init,
.class_size = sizeof(S390FLICStateClass),
};
diff --git a/hw/intc/s390_flic_kvm.c b/hw/intc/s390_flic_kvm.c
index e86a84e49a..cc44bc4e1e 100644
--- a/hw/intc/s390_flic_kvm.c
+++ b/hw/intc/s390_flic_kvm.c
@@ -293,6 +293,7 @@ static int kvm_flic_save(QEMUFile *f, void *opaque, size_t size,
int len = FLIC_SAVE_INITIAL_SIZE;
void *buf;
int count;
+ int r = 0;
flic_disable_wait_pfault((struct KVMS390FLICState *) opaque);
@@ -303,7 +304,7 @@ static int kvm_flic_save(QEMUFile *f, void *opaque, size_t size,
* migration state */
error_report("flic: couldn't allocate memory");
qemu_put_be64(f, FLIC_FAILED);
- return 0;
+ return -ENOMEM;
}
count = __get_all_irqs(flic, &buf, len);
@@ -314,6 +315,7 @@ static int kvm_flic_save(QEMUFile *f, void *opaque, size_t size,
* target system to fail when attempting to load irqs from the
* migration state */
qemu_put_be64(f, FLIC_FAILED);
+ r = count;
} else {
qemu_put_be64(f, count);
qemu_put_buffer(f, (uint8_t *) buf,
@@ -321,7 +323,7 @@ static int kvm_flic_save(QEMUFile *f, void *opaque, size_t size,
}
g_free(buf);
- return 0;
+ return r;
}
/**
diff --git a/hw/ipmi/isa_ipmi_bt.c b/hw/ipmi/isa_ipmi_bt.c
index f03661715c..1c69cb33f8 100644
--- a/hw/ipmi/isa_ipmi_bt.c
+++ b/hw/ipmi/isa_ipmi_bt.c
@@ -471,10 +471,8 @@ static const VMStateDescription vmstate_ISAIPMIBTDevice = {
VMSTATE_BOOL(bt.use_irq, ISAIPMIBTDevice),
VMSTATE_BOOL(bt.irqs_enabled, ISAIPMIBTDevice),
VMSTATE_UINT32(bt.outpos, ISAIPMIBTDevice),
- VMSTATE_VBUFFER_UINT32(bt.outmsg, ISAIPMIBTDevice, 1, NULL, 0,
- bt.outlen),
- VMSTATE_VBUFFER_UINT32(bt.inmsg, ISAIPMIBTDevice, 1, NULL, 0,
- bt.inlen),
+ VMSTATE_VBUFFER_UINT32(bt.outmsg, ISAIPMIBTDevice, 1, NULL, bt.outlen),
+ VMSTATE_VBUFFER_UINT32(bt.inmsg, ISAIPMIBTDevice, 1, NULL, bt.inlen),
VMSTATE_UINT8(bt.control_reg, ISAIPMIBTDevice),
VMSTATE_UINT8(bt.mask_reg, ISAIPMIBTDevice),
VMSTATE_UINT8(bt.waiting_rsp, ISAIPMIBTDevice),
diff --git a/hw/isa/Makefile.objs b/hw/isa/Makefile.objs
index 9164556a4d..fb37c55cf2 100644
--- a/hw/isa/Makefile.objs
+++ b/hw/isa/Makefile.objs
@@ -1,4 +1,4 @@
-common-obj-y += isa-bus.o
+common-obj-$(CONFIG_ISA_BUS) += isa-bus.o
common-obj-$(CONFIG_APM) += apm.o
common-obj-$(CONFIG_I82378) += i82378.o
common-obj-$(CONFIG_PC87312) += pc87312.o
diff --git a/hw/m68k/Makefile.objs b/hw/m68k/Makefile.objs
index c4352e783a..d1f089c08a 100644
--- a/hw/m68k/Makefile.objs
+++ b/hw/m68k/Makefile.objs
@@ -1,4 +1,2 @@
obj-y += an5206.o mcf5208.o
-obj-y += dummy_m68k.o
-
obj-y += mcf5206.o mcf_intc.o
diff --git a/hw/m68k/dummy_m68k.c b/hw/m68k/dummy_m68k.c
deleted file mode 100644
index 0b11d2074a..0000000000
--- a/hw/m68k/dummy_m68k.c
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * Dummy board with just RAM and CPU for use as an ISS.
- *
- * Copyright (c) 2007 CodeSourcery.
- *
- * This code is licensed under the GPL
- */
-
-#include "qemu/osdep.h"
-#include "qemu-common.h"
-#include "cpu.h"
-#include "hw/hw.h"
-#include "hw/boards.h"
-#include "hw/loader.h"
-#include "elf.h"
-#include "exec/address-spaces.h"
-
-#define KERNEL_LOAD_ADDR 0x10000
-
-/* Board init. */
-
-static void dummy_m68k_init(MachineState *machine)
-{
- ram_addr_t ram_size = machine->ram_size;
- const char *cpu_model = machine->cpu_model;
- const char *kernel_filename = machine->kernel_filename;
- M68kCPU *cpu;
- CPUM68KState *env;
- MemoryRegion *address_space_mem = get_system_memory();
- MemoryRegion *ram = g_new(MemoryRegion, 1);
- int kernel_size;
- uint64_t elf_entry;
- hwaddr entry;
-
- if (!cpu_model)
- cpu_model = "cfv4e";
- cpu = cpu_m68k_init(cpu_model);
- if (!cpu) {
- fprintf(stderr, "Unable to find m68k CPU definition\n");
- exit(1);
- }
- env = &cpu->env;
-
- /* Initialize CPU registers. */
- env->vbr = 0;
-
- /* RAM at address zero */
- memory_region_allocate_system_memory(ram, NULL, "dummy_m68k.ram",
- ram_size);
- memory_region_add_subregion(address_space_mem, 0, ram);
-
- /* Load kernel. */
- if (kernel_filename) {
- kernel_size = load_elf(kernel_filename, NULL, NULL, &elf_entry,
- NULL, NULL, 1, EM_68K, 0, 0);
- entry = elf_entry;
- if (kernel_size < 0) {
- kernel_size = load_uimage(kernel_filename, &entry, NULL, NULL,
- NULL, NULL);
- }
- if (kernel_size < 0) {
- kernel_size = load_image_targphys(kernel_filename,
- KERNEL_LOAD_ADDR,
- ram_size - KERNEL_LOAD_ADDR);
- entry = KERNEL_LOAD_ADDR;
- }
- if (kernel_size < 0) {
- fprintf(stderr, "qemu: could not load kernel '%s'\n",
- kernel_filename);
- exit(1);
- }
- } else {
- entry = 0;
- }
- env->pc = entry;
-}
-
-static void dummy_m68k_machine_init(MachineClass *mc)
-{
- mc->desc = "Dummy board";
- mc->init = dummy_m68k_init;
-}
-
-DEFINE_MACHINE("dummy", dummy_m68k_machine_init)
diff --git a/hw/m68k/mcf5208.c b/hw/m68k/mcf5208.c
index bad1d332ed..656351834e 100644
--- a/hw/m68k/mcf5208.c
+++ b/hw/m68k/mcf5208.c
@@ -255,9 +255,9 @@ static void mcf5208evb_init(MachineState *machine)
/* Internal peripherals. */
pic = mcf_intc_init(address_space_mem, 0xfc048000, cpu);
- mcf_uart_mm_init(address_space_mem, 0xfc060000, pic[26], serial_hds[0]);
- mcf_uart_mm_init(address_space_mem, 0xfc064000, pic[27], serial_hds[1]);
- mcf_uart_mm_init(address_space_mem, 0xfc068000, pic[28], serial_hds[2]);
+ mcf_uart_mm_init(0xfc060000, pic[26], serial_hds[0]);
+ mcf_uart_mm_init(0xfc064000, pic[27], serial_hds[1]);
+ mcf_uart_mm_init(0xfc068000, pic[28], serial_hds[2]);
mcf5208_sys_init(address_space_mem, pic);
diff --git a/hw/m68k/mcf_intc.c b/hw/m68k/mcf_intc.c
index cf581324eb..8198afac1e 100644
--- a/hw/m68k/mcf_intc.c
+++ b/hw/m68k/mcf_intc.c
@@ -9,10 +9,16 @@
#include "qemu-common.h"
#include "cpu.h"
#include "hw/hw.h"
+#include "hw/sysbus.h"
#include "hw/m68k/mcf.h"
#include "exec/address-spaces.h"
+#define TYPE_MCF_INTC "mcf-intc"
+#define MCF_INTC(obj) OBJECT_CHECK(mcf_intc_state, (obj), TYPE_MCF_INTC)
+
typedef struct {
+ SysBusDevice parent_obj;
+
MemoryRegion iomem;
uint64_t ipr;
uint64_t imr;
@@ -138,8 +144,10 @@ static void mcf_intc_set_irq(void *opaque, int irq, int level)
mcf_intc_update(s);
}
-static void mcf_intc_reset(mcf_intc_state *s)
+static void mcf_intc_reset(DeviceState *dev)
{
+ mcf_intc_state *s = MCF_INTC(dev);
+
s->imr = ~0ull;
s->ipr = 0;
s->ifr = 0;
@@ -154,17 +162,49 @@ static const MemoryRegionOps mcf_intc_ops = {
.endianness = DEVICE_NATIVE_ENDIAN,
};
+static void mcf_intc_instance_init(Object *obj)
+{
+ mcf_intc_state *s = MCF_INTC(obj);
+
+ memory_region_init_io(&s->iomem, obj, &mcf_intc_ops, s, "mcf", 0x100);
+}
+
+static void mcf_intc_class_init(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+
+ set_bit(DEVICE_CATEGORY_MISC, dc->categories);
+ dc->reset = mcf_intc_reset;
+}
+
+static const TypeInfo mcf_intc_gate_info = {
+ .name = TYPE_MCF_INTC,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(mcf_intc_state),
+ .instance_init = mcf_intc_instance_init,
+ .class_init = mcf_intc_class_init,
+};
+
+static void mcf_intc_register_types(void)
+{
+ type_register_static(&mcf_intc_gate_info);
+}
+
+type_init(mcf_intc_register_types)
+
qemu_irq *mcf_intc_init(MemoryRegion *sysmem,
hwaddr base,
M68kCPU *cpu)
{
+ DeviceState *dev;
mcf_intc_state *s;
- s = g_malloc0(sizeof(mcf_intc_state));
+ dev = qdev_create(NULL, TYPE_MCF_INTC);
+ qdev_init_nofail(dev);
+
+ s = MCF_INTC(dev);
s->cpu = cpu;
- mcf_intc_reset(s);
- memory_region_init_io(&s->iomem, NULL, &mcf_intc_ops, s, "mcf", 0x100);
memory_region_add_subregion(sysmem, base, &s->iomem);
return qemu_allocate_irqs(mcf_intc_set_irq, s, 64);
diff --git a/hw/mips/Makefile.objs b/hw/mips/Makefile.objs
index 9352a1c062..48cd2ef50e 100644
--- a/hw/mips/Makefile.objs
+++ b/hw/mips/Makefile.objs
@@ -4,3 +4,4 @@ obj-$(CONFIG_JAZZ) += mips_jazz.o
obj-$(CONFIG_FULONG) += mips_fulong2e.o
obj-y += gt64xxx_pci.o
obj-$(CONFIG_MIPS_CPS) += cps.o
+obj-$(CONFIG_MIPS_BOSTON) += boston.o
diff --git a/hw/mips/boston.c b/hw/mips/boston.c
new file mode 100644
index 0000000000..83f7b82386
--- /dev/null
+++ b/hw/mips/boston.c
@@ -0,0 +1,577 @@
+/*
+ * MIPS Boston development board emulation.
+ *
+ * Copyright (c) 2016 Imagination Technologies
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu-common.h"
+
+#include "exec/address-spaces.h"
+#include "hw/boards.h"
+#include "hw/char/serial.h"
+#include "hw/hw.h"
+#include "hw/ide/pci.h"
+#include "hw/ide/ahci.h"
+#include "hw/loader.h"
+#include "hw/loader-fit.h"
+#include "hw/mips/cps.h"
+#include "hw/mips/cpudevs.h"
+#include "hw/pci-host/xilinx-pcie.h"
+#include "qapi/error.h"
+#include "qemu/cutils.h"
+#include "qemu/error-report.h"
+#include "qemu/log.h"
+#include "sysemu/char.h"
+#include "sysemu/device_tree.h"
+#include "sysemu/sysemu.h"
+#include "sysemu/qtest.h"
+
+#include <libfdt.h>
+
+#define TYPE_MIPS_BOSTON "mips-boston"
+#define BOSTON(obj) OBJECT_CHECK(BostonState, (obj), TYPE_MIPS_BOSTON)
+
+typedef struct {
+ SysBusDevice parent_obj;
+
+ MachineState *mach;
+ MIPSCPSState *cps;
+ SerialState *uart;
+
+ CharBackend lcd_display;
+ char lcd_content[8];
+ bool lcd_inited;
+
+ hwaddr kernel_entry;
+ hwaddr fdt_base;
+} BostonState;
+
+enum boston_plat_reg {
+ PLAT_FPGA_BUILD = 0x00,
+ PLAT_CORE_CL = 0x04,
+ PLAT_WRAPPER_CL = 0x08,
+ PLAT_SYSCLK_STATUS = 0x0c,
+ PLAT_SOFTRST_CTL = 0x10,
+#define PLAT_SOFTRST_CTL_SYSRESET (1 << 4)
+ PLAT_DDR3_STATUS = 0x14,
+#define PLAT_DDR3_STATUS_LOCKED (1 << 0)
+#define PLAT_DDR3_STATUS_CALIBRATED (1 << 2)
+ PLAT_PCIE_STATUS = 0x18,
+#define PLAT_PCIE_STATUS_PCIE0_LOCKED (1 << 0)
+#define PLAT_PCIE_STATUS_PCIE1_LOCKED (1 << 8)
+#define PLAT_PCIE_STATUS_PCIE2_LOCKED (1 << 16)
+ PLAT_FLASH_CTL = 0x1c,
+ PLAT_SPARE0 = 0x20,
+ PLAT_SPARE1 = 0x24,
+ PLAT_SPARE2 = 0x28,
+ PLAT_SPARE3 = 0x2c,
+ PLAT_MMCM_DIV = 0x30,
+#define PLAT_MMCM_DIV_CLK0DIV_SHIFT 0
+#define PLAT_MMCM_DIV_INPUT_SHIFT 8
+#define PLAT_MMCM_DIV_MUL_SHIFT 16
+#define PLAT_MMCM_DIV_CLK1DIV_SHIFT 24
+ PLAT_BUILD_CFG = 0x34,
+#define PLAT_BUILD_CFG_IOCU_EN (1 << 0)
+#define PLAT_BUILD_CFG_PCIE0_EN (1 << 1)
+#define PLAT_BUILD_CFG_PCIE1_EN (1 << 2)
+#define PLAT_BUILD_CFG_PCIE2_EN (1 << 3)
+ PLAT_DDR_CFG = 0x38,
+#define PLAT_DDR_CFG_SIZE (0xf << 0)
+#define PLAT_DDR_CFG_MHZ (0xfff << 4)
+ PLAT_NOC_PCIE0_ADDR = 0x3c,
+ PLAT_NOC_PCIE1_ADDR = 0x40,
+ PLAT_NOC_PCIE2_ADDR = 0x44,
+ PLAT_SYS_CTL = 0x48,
+};
+
+static void boston_lcd_event(void *opaque, int event)
+{
+ BostonState *s = opaque;
+ if (event == CHR_EVENT_OPENED && !s->lcd_inited) {
+ qemu_chr_fe_printf(&s->lcd_display, " ");
+ s->lcd_inited = true;
+ }
+}
+
+static uint64_t boston_lcd_read(void *opaque, hwaddr addr,
+ unsigned size)
+{
+ BostonState *s = opaque;
+ uint64_t val = 0;
+
+ switch (size) {
+ case 8:
+ val |= (uint64_t)s->lcd_content[(addr + 7) & 0x7] << 56;
+ val |= (uint64_t)s->lcd_content[(addr + 6) & 0x7] << 48;
+ val |= (uint64_t)s->lcd_content[(addr + 5) & 0x7] << 40;
+ val |= (uint64_t)s->lcd_content[(addr + 4) & 0x7] << 32;
+ /* fall through */
+ case 4:
+ val |= (uint64_t)s->lcd_content[(addr + 3) & 0x7] << 24;
+ val |= (uint64_t)s->lcd_content[(addr + 2) & 0x7] << 16;
+ /* fall through */
+ case 2:
+ val |= (uint64_t)s->lcd_content[(addr + 1) & 0x7] << 8;
+ /* fall through */
+ case 1:
+ val |= (uint64_t)s->lcd_content[(addr + 0) & 0x7];
+ break;
+ }
+
+ return val;
+}
+
+static void boston_lcd_write(void *opaque, hwaddr addr,
+ uint64_t val, unsigned size)
+{
+ BostonState *s = opaque;
+
+ switch (size) {
+ case 8:
+ s->lcd_content[(addr + 7) & 0x7] = val >> 56;
+ s->lcd_content[(addr + 6) & 0x7] = val >> 48;
+ s->lcd_content[(addr + 5) & 0x7] = val >> 40;
+ s->lcd_content[(addr + 4) & 0x7] = val >> 32;
+ /* fall through */
+ case 4:
+ s->lcd_content[(addr + 3) & 0x7] = val >> 24;
+ s->lcd_content[(addr + 2) & 0x7] = val >> 16;
+ /* fall through */
+ case 2:
+ s->lcd_content[(addr + 1) & 0x7] = val >> 8;
+ /* fall through */
+ case 1:
+ s->lcd_content[(addr + 0) & 0x7] = val;
+ break;
+ }
+
+ qemu_chr_fe_printf(&s->lcd_display,
+ "\r%-8.8s", s->lcd_content);
+}
+
+static const MemoryRegionOps boston_lcd_ops = {
+ .read = boston_lcd_read,
+ .write = boston_lcd_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
+static uint64_t boston_platreg_read(void *opaque, hwaddr addr,
+ unsigned size)
+{
+ BostonState *s = opaque;
+ uint32_t gic_freq, val;
+
+ if (size != 4) {
+ qemu_log_mask(LOG_UNIMP, "%uB platform register read", size);
+ return 0;
+ }
+
+ switch (addr & 0xffff) {
+ case PLAT_FPGA_BUILD:
+ case PLAT_CORE_CL:
+ case PLAT_WRAPPER_CL:
+ return 0;
+ case PLAT_DDR3_STATUS:
+ return PLAT_DDR3_STATUS_LOCKED | PLAT_DDR3_STATUS_CALIBRATED;
+ case PLAT_MMCM_DIV:
+ gic_freq = mips_gictimer_get_freq(s->cps->gic.gic_timer) / 1000000;
+ val = gic_freq << PLAT_MMCM_DIV_INPUT_SHIFT;
+ val |= 1 << PLAT_MMCM_DIV_MUL_SHIFT;
+ val |= 1 << PLAT_MMCM_DIV_CLK0DIV_SHIFT;
+ val |= 1 << PLAT_MMCM_DIV_CLK1DIV_SHIFT;
+ return val;
+ case PLAT_BUILD_CFG:
+ val = PLAT_BUILD_CFG_PCIE0_EN;
+ val |= PLAT_BUILD_CFG_PCIE1_EN;
+ val |= PLAT_BUILD_CFG_PCIE2_EN;
+ return val;
+ case PLAT_DDR_CFG:
+ val = s->mach->ram_size / G_BYTE;
+ assert(!(val & ~PLAT_DDR_CFG_SIZE));
+ val |= PLAT_DDR_CFG_MHZ;
+ return val;
+ default:
+ qemu_log_mask(LOG_UNIMP, "Read platform register 0x%" HWADDR_PRIx,
+ addr & 0xffff);
+ return 0;
+ }
+}
+
+static void boston_platreg_write(void *opaque, hwaddr addr,
+ uint64_t val, unsigned size)
+{
+ if (size != 4) {
+ qemu_log_mask(LOG_UNIMP, "%uB platform register write", size);
+ return;
+ }
+
+ switch (addr & 0xffff) {
+ case PLAT_FPGA_BUILD:
+ case PLAT_CORE_CL:
+ case PLAT_WRAPPER_CL:
+ case PLAT_DDR3_STATUS:
+ case PLAT_PCIE_STATUS:
+ case PLAT_MMCM_DIV:
+ case PLAT_BUILD_CFG:
+ case PLAT_DDR_CFG:
+ /* read only */
+ break;
+ case PLAT_SOFTRST_CTL:
+ if (val & PLAT_SOFTRST_CTL_SYSRESET) {
+ qemu_system_reset_request();
+ }
+ break;
+ default:
+ qemu_log_mask(LOG_UNIMP, "Write platform register 0x%" HWADDR_PRIx
+ " = 0x%" PRIx64, addr & 0xffff, val);
+ break;
+ }
+}
+
+static const MemoryRegionOps boston_platreg_ops = {
+ .read = boston_platreg_read,
+ .write = boston_platreg_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
+static void boston_flash_write(void *opaque, hwaddr addr,
+ uint64_t val, unsigned size)
+{
+}
+
+static const MemoryRegionOps boston_flash_ops = {
+ .write = boston_flash_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
+static const TypeInfo boston_device = {
+ .name = TYPE_MIPS_BOSTON,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(BostonState),
+};
+
+static void boston_register_types(void)
+{
+ type_register_static(&boston_device);
+}
+type_init(boston_register_types)
+
+static void gen_firmware(uint32_t *p, hwaddr kernel_entry, hwaddr fdt_addr,
+ bool is_64b)
+{
+ const uint32_t cm_base = 0x16100000;
+ const uint32_t gic_base = 0x16120000;
+ const uint32_t cpc_base = 0x16200000;
+
+ /* Move CM GCRs */
+ if (is_64b) {
+ stl_p(p++, 0x40287803); /* dmfc0 $8, CMGCRBase */
+ stl_p(p++, 0x00084138); /* dsll $8, $8, 4 */
+ } else {
+ stl_p(p++, 0x40087803); /* mfc0 $8, CMGCRBase */
+ stl_p(p++, 0x00084100); /* sll $8, $8, 4 */
+ }
+ stl_p(p++, 0x3c09a000); /* lui $9, 0xa000 */
+ stl_p(p++, 0x01094025); /* or $8, $9 */
+ stl_p(p++, 0x3c0a0000 | (cm_base >> 16)); /* lui $10, cm_base >> 16 */
+ if (is_64b) {
+ stl_p(p++, 0xfd0a0008); /* sd $10, 0x8($8) */
+ } else {
+ stl_p(p++, 0xad0a0008); /* sw $10, 0x8($8) */
+ }
+ stl_p(p++, 0x012a4025); /* or $8, $10 */
+
+ /* Move & enable GIC GCRs */
+ stl_p(p++, 0x3c090000 | (gic_base >> 16)); /* lui $9, gic_base >> 16 */
+ stl_p(p++, 0x35290001); /* ori $9, 0x1 */
+ if (is_64b) {
+ stl_p(p++, 0xfd090080); /* sd $9, 0x80($8) */
+ } else {
+ stl_p(p++, 0xad090080); /* sw $9, 0x80($8) */
+ }
+
+ /* Move & enable CPC GCRs */
+ stl_p(p++, 0x3c090000 | (cpc_base >> 16)); /* lui $9, cpc_base >> 16 */
+ stl_p(p++, 0x35290001); /* ori $9, 0x1 */
+ if (is_64b) {
+ stl_p(p++, 0xfd090088); /* sd $9, 0x88($8) */
+ } else {
+ stl_p(p++, 0xad090088); /* sw $9, 0x88($8) */
+ }
+
+ /*
+ * Setup argument registers to follow the UHI boot protocol:
+ *
+ * a0/$4 = -2
+ * a1/$5 = virtual address of FDT
+ * a2/$6 = 0
+ * a3/$7 = 0
+ */
+ stl_p(p++, 0x2404fffe); /* li $4, -2 */
+ /* lui $5, hi(fdt_addr) */
+ stl_p(p++, 0x3c050000 | ((fdt_addr >> 16) & 0xffff));
+ if (fdt_addr & 0xffff) { /* ori $5, lo(fdt_addr) */
+ stl_p(p++, 0x34a50000 | (fdt_addr & 0xffff));
+ }
+ stl_p(p++, 0x34060000); /* li $6, 0 */
+ stl_p(p++, 0x34070000); /* li $7, 0 */
+
+ /* Load kernel entry address & jump to it */
+ /* lui $25, hi(kernel_entry) */
+ stl_p(p++, 0x3c190000 | ((kernel_entry >> 16) & 0xffff));
+ /* ori $25, lo(kernel_entry) */
+ stl_p(p++, 0x37390000 | (kernel_entry & 0xffff));
+ stl_p(p++, 0x03200009); /* jr $25 */
+}
+
+static const void *boston_fdt_filter(void *opaque, const void *fdt_orig,
+ const void *match_data, hwaddr *load_addr)
+{
+ BostonState *s = BOSTON(opaque);
+ MachineState *machine = s->mach;
+ const char *cmdline;
+ int err;
+ void *fdt;
+ size_t fdt_sz, ram_low_sz, ram_high_sz;
+
+ fdt_sz = fdt_totalsize(fdt_orig) * 2;
+ fdt = g_malloc0(fdt_sz);
+
+ err = fdt_open_into(fdt_orig, fdt, fdt_sz);
+ if (err) {
+ fprintf(stderr, "unable to open FDT\n");
+ return NULL;
+ }
+
+ cmdline = (machine->kernel_cmdline && machine->kernel_cmdline[0])
+ ? machine->kernel_cmdline : " ";
+ err = qemu_fdt_setprop_string(fdt, "/chosen", "bootargs", cmdline);
+ if (err < 0) {
+ fprintf(stderr, "couldn't set /chosen/bootargs\n");
+ return NULL;
+ }
+
+ ram_low_sz = MIN(256 * M_BYTE, machine->ram_size);
+ ram_high_sz = machine->ram_size - ram_low_sz;
+ qemu_fdt_setprop_sized_cells(fdt, "/memory@0", "reg",
+ 1, 0x00000000, 1, ram_low_sz,
+ 1, 0x90000000, 1, ram_high_sz);
+
+ fdt = g_realloc(fdt, fdt_totalsize(fdt));
+ qemu_fdt_dumpdtb(fdt, fdt_sz);
+
+ s->fdt_base = *load_addr;
+
+ return fdt;
+}
+
+static const void *boston_kernel_filter(void *opaque, const void *kernel,
+ hwaddr *load_addr, hwaddr *entry_addr)
+{
+ BostonState *s = BOSTON(opaque);
+
+ s->kernel_entry = *entry_addr;
+
+ return kernel;
+}
+
+static const struct fit_loader_match boston_matches[] = {
+ { "img,boston" },
+ { NULL },
+};
+
+static const struct fit_loader boston_fit_loader = {
+ .matches = boston_matches,
+ .addr_to_phys = cpu_mips_kseg0_to_phys,
+ .fdt_filter = boston_fdt_filter,
+ .kernel_filter = boston_kernel_filter,
+};
+
+static inline XilinxPCIEHost *
+xilinx_pcie_init(MemoryRegion *sys_mem, uint32_t bus_nr,
+ hwaddr cfg_base, uint64_t cfg_size,
+ hwaddr mmio_base, uint64_t mmio_size,
+ qemu_irq irq, bool link_up)
+{
+ DeviceState *dev;
+ MemoryRegion *cfg, *mmio;
+
+ dev = qdev_create(NULL, TYPE_XILINX_PCIE_HOST);
+
+ qdev_prop_set_uint32(dev, "bus_nr", bus_nr);
+ qdev_prop_set_uint64(dev, "cfg_base", cfg_base);
+ qdev_prop_set_uint64(dev, "cfg_size", cfg_size);
+ qdev_prop_set_uint64(dev, "mmio_base", mmio_base);
+ qdev_prop_set_uint64(dev, "mmio_size", mmio_size);
+ qdev_prop_set_bit(dev, "link_up", link_up);
+
+ qdev_init_nofail(dev);
+
+ cfg = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 0);
+ memory_region_add_subregion_overlap(sys_mem, cfg_base, cfg, 0);
+
+ mmio = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 1);
+ memory_region_add_subregion_overlap(sys_mem, 0, mmio, 0);
+
+ qdev_connect_gpio_out_named(dev, "interrupt_out", 0, irq);
+
+ return XILINX_PCIE_HOST(dev);
+}
+
+static void boston_mach_init(MachineState *machine)
+{
+ DeviceState *dev;
+ BostonState *s;
+ Error *err = NULL;
+ const char *cpu_model;
+ MemoryRegion *flash, *ddr, *ddr_low_alias, *lcd, *platreg;
+ MemoryRegion *sys_mem = get_system_memory();
+ XilinxPCIEHost *pcie2;
+ PCIDevice *ahci;
+ DriveInfo *hd[6];
+ Chardev *chr;
+ int fw_size, fit_err;
+ bool is_64b;
+
+ if ((machine->ram_size % G_BYTE) ||
+ (machine->ram_size > (2 * G_BYTE))) {
+ error_report("Memory size must be 1GB or 2GB");
+ exit(1);
+ }
+
+ cpu_model = machine->cpu_model ?: "I6400";
+
+ dev = qdev_create(NULL, TYPE_MIPS_BOSTON);
+ qdev_init_nofail(dev);
+
+ s = BOSTON(dev);
+ s->mach = machine;
+ s->cps = g_new0(MIPSCPSState, 1);
+
+ if (!cpu_supports_cps_smp(cpu_model)) {
+ error_report("Boston requires CPUs which support CPS");
+ exit(1);
+ }
+
+ is_64b = cpu_supports_isa(cpu_model, ISA_MIPS64);
+
+ object_initialize(s->cps, sizeof(MIPSCPSState), TYPE_MIPS_CPS);
+ qdev_set_parent_bus(DEVICE(s->cps), sysbus_get_default());
+
+ object_property_set_str(OBJECT(s->cps), cpu_model, "cpu-model", &err);
+ object_property_set_int(OBJECT(s->cps), smp_cpus, "num-vp", &err);
+ object_property_set_bool(OBJECT(s->cps), true, "realized", &err);
+
+ if (err != NULL) {
+ error_report("%s", error_get_pretty(err));
+ exit(1);
+ }
+
+ sysbus_mmio_map_overlap(SYS_BUS_DEVICE(s->cps), 0, 0, 1);
+
+ flash = g_new(MemoryRegion, 1);
+ memory_region_init_rom_device(flash, NULL, &boston_flash_ops, s,
+ "boston.flash", 128 * M_BYTE, &err);
+ memory_region_add_subregion_overlap(sys_mem, 0x18000000, flash, 0);
+
+ ddr = g_new(MemoryRegion, 1);
+ memory_region_allocate_system_memory(ddr, NULL, "boston.ddr",
+ machine->ram_size);
+ memory_region_add_subregion_overlap(sys_mem, 0x80000000, ddr, 0);
+
+ ddr_low_alias = g_new(MemoryRegion, 1);
+ memory_region_init_alias(ddr_low_alias, NULL, "boston_low.ddr",
+ ddr, 0, MIN(machine->ram_size, (256 * M_BYTE)));
+ memory_region_add_subregion_overlap(sys_mem, 0, ddr_low_alias, 0);
+
+ xilinx_pcie_init(sys_mem, 0,
+ 0x10000000, 32 * M_BYTE,
+ 0x40000000, 1 * G_BYTE,
+ get_cps_irq(s->cps, 2), false);
+
+ xilinx_pcie_init(sys_mem, 1,
+ 0x12000000, 32 * M_BYTE,
+ 0x20000000, 512 * M_BYTE,
+ get_cps_irq(s->cps, 1), false);
+
+ pcie2 = xilinx_pcie_init(sys_mem, 2,
+ 0x14000000, 32 * M_BYTE,
+ 0x16000000, 1 * M_BYTE,
+ get_cps_irq(s->cps, 0), true);
+
+ platreg = g_new(MemoryRegion, 1);
+ memory_region_init_io(platreg, NULL, &boston_platreg_ops, s,
+ "boston-platregs", 0x1000);
+ memory_region_add_subregion_overlap(sys_mem, 0x17ffd000, platreg, 0);
+
+ if (!serial_hds[0]) {
+ serial_hds[0] = qemu_chr_new("serial0", "null");
+ }
+
+ s->uart = serial_mm_init(sys_mem, 0x17ffe000, 2,
+ get_cps_irq(s->cps, 3), 10000000,
+ serial_hds[0], DEVICE_NATIVE_ENDIAN);
+
+ lcd = g_new(MemoryRegion, 1);
+ memory_region_init_io(lcd, NULL, &boston_lcd_ops, s, "boston-lcd", 0x8);
+ memory_region_add_subregion_overlap(sys_mem, 0x17fff000, lcd, 0);
+
+ chr = qemu_chr_new("lcd", "vc:320x240");
+ qemu_chr_fe_init(&s->lcd_display, chr, NULL);
+ qemu_chr_fe_set_handlers(&s->lcd_display, NULL, NULL,
+ boston_lcd_event, s, NULL, true);
+
+ ahci = pci_create_simple_multifunction(&PCI_BRIDGE(&pcie2->root)->sec_bus,
+ PCI_DEVFN(0, 0),
+ true, TYPE_ICH9_AHCI);
+ g_assert(ARRAY_SIZE(hd) == ICH_AHCI(ahci)->ahci.ports);
+ ide_drive_get(hd, ICH_AHCI(ahci)->ahci.ports);
+ ahci_ide_create_devs(ahci, hd);
+
+ if (machine->firmware) {
+ fw_size = load_image_targphys(machine->firmware,
+ 0x1fc00000, 4 * M_BYTE);
+ if (fw_size == -1) {
+ error_printf("unable to load firmware image '%s'\n",
+ machine->firmware);
+ exit(1);
+ }
+ } else if (machine->kernel_filename) {
+ fit_err = load_fit(&boston_fit_loader, machine->kernel_filename, s);
+ if (fit_err) {
+ error_printf("unable to load FIT image\n");
+ exit(1);
+ }
+
+ gen_firmware(memory_region_get_ram_ptr(flash) + 0x7c00000,
+ s->kernel_entry, s->fdt_base, is_64b);
+ } else if (!qtest_enabled()) {
+ error_printf("Please provide either a -kernel or -bios argument\n");
+ exit(1);
+ }
+}
+
+static void boston_mach_class_init(MachineClass *mc)
+{
+ mc->desc = "MIPS Boston";
+ mc->init = boston_mach_init;
+ mc->block_default_type = IF_IDE;
+ mc->default_ram_size = 1 * G_BYTE;
+ mc->max_cpus = 16;
+}
+
+DEFINE_MACHINE("boston", boston_mach_class_init)
diff --git a/hw/mips/mips_fulong2e.c b/hw/mips/mips_fulong2e.c
index 9a4dae42d9..e636c3abaa 100644
--- a/hw/mips/mips_fulong2e.c
+++ b/hw/mips/mips_fulong2e.c
@@ -387,6 +387,7 @@ static void mips_fulong2e_machine_init(MachineClass *mc)
{
mc->desc = "Fulong 2e mini pc";
mc->init = mips_fulong2e_init;
+ mc->block_default_type = IF_IDE;
}
DEFINE_MACHINE("fulong2e", mips_fulong2e_machine_init)
diff --git a/hw/mips/mips_jazz.c b/hw/mips/mips_jazz.c
index 73f6c9facf..1cef581878 100644
--- a/hw/mips/mips_jazz.c
+++ b/hw/mips/mips_jazz.c
@@ -291,10 +291,6 @@ static void mips_jazz_init(MachineState *machine,
qdev_get_gpio_in(rc4030, 5), &esp_reset, &dma_enable);
/* Floppy */
- if (drive_get_max_bus(IF_FLOPPY) >= MAX_FD) {
- fprintf(stderr, "qemu: too many floppy drives\n");
- exit(1);
- }
for (n = 0; n < MAX_FD; n++) {
fds[n] = drive_get(IF_FLOPPY, 0, n);
}
diff --git a/hw/mips/mips_malta.c b/hw/mips/mips_malta.c
index 75877de11c..5dd177e961 100644
--- a/hw/mips/mips_malta.c
+++ b/hw/mips/mips_malta.c
@@ -1264,6 +1264,7 @@ static void mips_malta_machine_init(MachineClass *mc)
{
mc->desc = "MIPS Malta Core LV";
mc->init = mips_malta_init;
+ mc->block_default_type = IF_IDE;
mc->max_cpus = 16;
mc->is_default = 1;
}
diff --git a/hw/mips/mips_r4k.c b/hw/mips/mips_r4k.c
index 27548c43b6..748586ed77 100644
--- a/hw/mips/mips_r4k.c
+++ b/hw/mips/mips_r4k.c
@@ -306,6 +306,7 @@ static void mips_machine_init(MachineClass *mc)
{
mc->desc = "mips r4k platform";
mc->init = mips_r4k_init;
+ mc->block_default_type = IF_IDE;
}
DEFINE_MACHINE("mips", mips_machine_init)
diff --git a/hw/misc/Makefile.objs b/hw/misc/Makefile.objs
index 1a89615a62..898e4ccfb1 100644
--- a/hw/misc/Makefile.objs
+++ b/hw/misc/Makefile.objs
@@ -6,6 +6,8 @@ common-obj-$(CONFIG_SGA) += sga.o
common-obj-$(CONFIG_ISA_TESTDEV) += pc-testdev.o
common-obj-$(CONFIG_PCI_TESTDEV) += pci-testdev.o
+common-obj-y += unimp.o
+
obj-$(CONFIG_VMPORT) += vmport.o
# ARM devices
diff --git a/hw/misc/imx6_src.c b/hw/misc/imx6_src.c
index 55b817b8d7..edbb756c36 100644
--- a/hw/misc/imx6_src.c
+++ b/hw/misc/imx6_src.c
@@ -14,6 +14,7 @@
#include "qemu/bitops.h"
#include "qemu/log.h"
#include "arm-powerctl.h"
+#include "qom/cpu.h"
#ifndef DEBUG_IMX6_SRC
#define DEBUG_IMX6_SRC 0
@@ -113,6 +114,45 @@ static uint64_t imx6_src_read(void *opaque, hwaddr offset, unsigned size)
return value;
}
+
+/* The reset is asynchronous so we need to defer clearing the reset
+ * bit until the work is completed.
+ */
+
+struct SRCSCRResetInfo {
+ IMX6SRCState *s;
+ int reset_bit;
+};
+
+static void imx6_clear_reset_bit(CPUState *cpu, run_on_cpu_data data)
+{
+ struct SRCSCRResetInfo *ri = data.host_ptr;
+ IMX6SRCState *s = ri->s;
+
+ assert(qemu_mutex_iothread_locked());
+
+ s->regs[SRC_SCR] = deposit32(s->regs[SRC_SCR], ri->reset_bit, 1, 0);
+ DPRINTF("reg[%s] <= 0x%" PRIx32 "\n",
+ imx6_src_reg_name(SRC_SCR), s->regs[SRC_SCR]);
+
+ g_free(ri);
+}
+
+static void imx6_defer_clear_reset_bit(int cpuid,
+ IMX6SRCState *s,
+ unsigned long reset_shift)
+{
+ struct SRCSCRResetInfo *ri;
+
+ ri = g_malloc(sizeof(struct SRCSCRResetInfo));
+ ri->s = s;
+ ri->reset_bit = reset_shift;
+
+ async_run_on_cpu(arm_get_cpu_by_id(cpuid), imx6_clear_reset_bit,
+ RUN_ON_CPU_HOST_PTR(ri));
+}
+
+
static void imx6_src_write(void *opaque, hwaddr offset, uint64_t value,
unsigned size)
{
@@ -153,7 +193,7 @@ static void imx6_src_write(void *opaque, hwaddr offset, uint64_t value,
arm_set_cpu_off(3);
}
/* We clear the reset bits as the processor changed state */
- clear_bit(CORE3_RST_SHIFT, &current_value);
+ imx6_defer_clear_reset_bit(3, s, CORE3_RST_SHIFT);
clear_bit(CORE3_RST_SHIFT, &change_mask);
}
if (EXTRACT(change_mask, CORE2_ENABLE)) {
@@ -162,11 +202,11 @@ static void imx6_src_write(void *opaque, hwaddr offset, uint64_t value,
arm_set_cpu_on(2, s->regs[SRC_GPR5], s->regs[SRC_GPR6],
3, false);
} else {
- /* CORE 3 is shut down */
+ /* CORE 2 is shut down */
arm_set_cpu_off(2);
}
/* We clear the reset bits as the processor changed state */
- clear_bit(CORE2_RST_SHIFT, &current_value);
+ imx6_defer_clear_reset_bit(2, s, CORE2_RST_SHIFT);
clear_bit(CORE2_RST_SHIFT, &change_mask);
}
if (EXTRACT(change_mask, CORE1_ENABLE)) {
@@ -175,28 +215,28 @@ static void imx6_src_write(void *opaque, hwaddr offset, uint64_t value,
arm_set_cpu_on(1, s->regs[SRC_GPR3], s->regs[SRC_GPR4],
3, false);
} else {
- /* CORE 3 is shut down */
+ /* CORE 1 is shut down */
arm_set_cpu_off(1);
}
/* We clear the reset bits as the processor changed state */
- clear_bit(CORE1_RST_SHIFT, &current_value);
+ imx6_defer_clear_reset_bit(1, s, CORE1_RST_SHIFT);
clear_bit(CORE1_RST_SHIFT, &change_mask);
}
if (EXTRACT(change_mask, CORE0_RST)) {
arm_reset_cpu(0);
- clear_bit(CORE0_RST_SHIFT, &current_value);
+ imx6_defer_clear_reset_bit(0, s, CORE0_RST_SHIFT);
}
if (EXTRACT(change_mask, CORE1_RST)) {
arm_reset_cpu(1);
- clear_bit(CORE1_RST_SHIFT, &current_value);
+ imx6_defer_clear_reset_bit(1, s, CORE1_RST_SHIFT);
}
if (EXTRACT(change_mask, CORE2_RST)) {
arm_reset_cpu(2);
- clear_bit(CORE2_RST_SHIFT, &current_value);
+ imx6_defer_clear_reset_bit(2, s, CORE2_RST_SHIFT);
}
if (EXTRACT(change_mask, CORE3_RST)) {
arm_reset_cpu(3);
- clear_bit(CORE3_RST_SHIFT, &current_value);
+ imx6_defer_clear_reset_bit(3, s, CORE3_RST_SHIFT);
}
if (EXTRACT(change_mask, SW_IPU2_RST)) {
/* We pretend the IPU2 is reset */
diff --git a/hw/misc/ivshmem.c b/hw/misc/ivshmem.c
index bf57e635d6..82ce8378bf 100644
--- a/hw/misc/ivshmem.c
+++ b/hw/misc/ivshmem.c
@@ -1267,10 +1267,11 @@ static void ivshmem_realize(PCIDevice *dev, Error **errp)
if (s->sizearg == NULL) {
s->legacy_size = 4 << 20; /* 4 MB default */
} else {
- char *end;
- int64_t size = qemu_strtosz(s->sizearg, &end);
- if (size < 0 || (size_t)size != size || *end != '\0'
- || !is_power_of_2(size)) {
+ int ret;
+ uint64_t size;
+
+ ret = qemu_strtosz_MiB(s->sizearg, NULL, &size);
+ if (ret < 0 || (size_t)size != size || !is_power_of_2(size)) {
error_setg(errp, "Invalid size %s", s->sizearg);
return;
}
diff --git a/hw/misc/mips_cmgcr.c b/hw/misc/mips_cmgcr.c
index b3ba16694e..a1edb53f95 100644
--- a/hw/misc/mips_cmgcr.c
+++ b/hw/misc/mips_cmgcr.c
@@ -29,6 +29,20 @@ static inline bool is_gic_connected(MIPSGCRState *s)
return s->gic_mr != NULL;
}
+static inline void update_gcr_base(MIPSGCRState *gcr, uint64_t val)
+{
+ CPUState *cpu;
+ MIPSCPU *mips_cpu;
+
+ gcr->gcr_base = val & GCR_BASE_GCRBASE_MSK;
+ memory_region_set_address(&gcr->iomem, gcr->gcr_base);
+
+ CPU_FOREACH(cpu) {
+ mips_cpu = MIPS_CPU(cpu);
+ mips_cpu->env.CP0_CMGCRBase = gcr->gcr_base >> 4;
+ }
+}
+
static inline void update_cpc_base(MIPSGCRState *gcr, uint64_t val)
{
if (is_cpc_connected(gcr)) {
@@ -117,6 +131,9 @@ static void gcr_write(void *opaque, hwaddr addr, uint64_t data, unsigned size)
MIPSGCRVPState *other_vps = &gcr->vps[current_vps->other];
switch (addr) {
+ case GCR_BASE_OFS:
+ update_gcr_base(gcr, data);
+ break;
case GCR_GIC_BASE_OFS:
update_gic_base(gcr, data);
break;
diff --git a/hw/misc/pvpanic.c b/hw/misc/pvpanic.c
index 0ac1e6ac9b..57da7f2199 100644
--- a/hw/misc/pvpanic.c
+++ b/hw/misc/pvpanic.c
@@ -42,7 +42,7 @@ static void handle_event(int event)
}
if (event & PVPANIC_PANICKED) {
- qemu_system_guest_panicked();
+ qemu_system_guest_panicked(NULL);
return;
}
}
diff --git a/hw/misc/unimp.c b/hw/misc/unimp.c
new file mode 100644
index 0000000000..bcbb585888
--- /dev/null
+++ b/hw/misc/unimp.c
@@ -0,0 +1,107 @@
+/* "Unimplemented" device
+ *
+ * This is a dummy device which accepts and logs all accesses.
+ * It's useful for stubbing out regions of an SoC or board
+ * map which correspond to devices that have not yet been
+ * implemented. This is often sufficient to placate initial
+ * guest device driver probing such that the system will
+ * come up.
+ *
+ * Copyright Linaro Limited, 2017
+ * Written by Peter Maydell
+ */
+
+#include "qemu/osdep.h"
+#include "hw/hw.h"
+#include "hw/sysbus.h"
+#include "hw/misc/unimp.h"
+#include "qemu/log.h"
+#include "qapi/error.h"
+
+#define UNIMPLEMENTED_DEVICE(obj) \
+ OBJECT_CHECK(UnimplementedDeviceState, (obj), TYPE_UNIMPLEMENTED_DEVICE)
+
+typedef struct {
+ SysBusDevice parent_obj;
+ MemoryRegion iomem;
+ char *name;
+ uint64_t size;
+} UnimplementedDeviceState;
+
+static uint64_t unimp_read(void *opaque, hwaddr offset, unsigned size)
+{
+ UnimplementedDeviceState *s = UNIMPLEMENTED_DEVICE(opaque);
+
+ qemu_log_mask(LOG_UNIMP, "%s: unimplemented device read "
+ "(size %d, offset 0x%" HWADDR_PRIx ")\n",
+ s->name, size, offset);
+ return 0;
+}
+
+static void unimp_write(void *opaque, hwaddr offset,
+ uint64_t value, unsigned size)
+{
+ UnimplementedDeviceState *s = UNIMPLEMENTED_DEVICE(opaque);
+
+ qemu_log_mask(LOG_UNIMP, "%s: unimplemented device write "
+ "(size %d, value 0x%" PRIx64
+ ", offset 0x%" HWADDR_PRIx ")\n",
+ s->name, size, value, offset);
+}
+
+static const MemoryRegionOps unimp_ops = {
+ .read = unimp_read,
+ .write = unimp_write,
+ .impl.min_access_size = 1,
+ .impl.max_access_size = 8,
+ .valid.min_access_size = 1,
+ .valid.max_access_size = 8,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
+static void unimp_realize(DeviceState *dev, Error **errp)
+{
+ UnimplementedDeviceState *s = UNIMPLEMENTED_DEVICE(dev);
+
+ if (s->size == 0) {
+ error_setg(errp, "property 'size' not specified or zero");
+ return;
+ }
+
+ if (s->name == NULL) {
+ error_setg(errp, "property 'name' not specified");
+ return;
+ }
+
+ memory_region_init_io(&s->iomem, OBJECT(s), &unimp_ops, s,
+ s->name, s->size);
+ sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->iomem);
+}
+
+static Property unimp_properties[] = {
+ DEFINE_PROP_UINT64("size", UnimplementedDeviceState, size, 0),
+ DEFINE_PROP_STRING("name", UnimplementedDeviceState, name),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void unimp_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->realize = unimp_realize;
+ dc->props = unimp_properties;
+}
+
+static const TypeInfo unimp_info = {
+ .name = TYPE_UNIMPLEMENTED_DEVICE,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(UnimplementedDeviceState),
+ .class_init = unimp_class_init,
+};
+
+static void unimp_register_types(void)
+{
+ type_register_static(&unimp_info);
+}
+
+type_init(unimp_register_types)
diff --git a/hw/net/e1000e_core.c b/hw/net/e1000e_core.c
index 2b11499829..28c5be1506 100644
--- a/hw/net/e1000e_core.c
+++ b/hw/net/e1000e_core.c
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1507,6 +1508,7 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
const E1000E_RingInfo *rxi;
size_t ps_hdr_len = 0;
bool do_ps = e1000e_do_ps(core, pkt, &ps_hdr_len);
+ bool is_first = true;
rxi = rxr->i;
@@ -1514,7 +1516,6 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
hwaddr ba[MAX_PS_BUFFERS];
e1000e_ba_state bastate = { { 0 } };
bool is_last = false;
- bool is_first = true;
desc_size = total_size - desc_offset;
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
diff --git a/hw/net/imx_fec.c b/hw/net/imx_fec.c
index 50c75642c6..90e6ee35ba 100644
--- a/hw/net/imx_fec.c
+++ b/hw/net/imx_fec.c
@@ -55,6 +55,8 @@
} \
} while (0)
+#define IMX_MAX_DESC 1024
+
static const char *imx_default_reg_name(IMXFECState *s, uint32_t index)
{
static char tmp[20];
@@ -402,12 +404,12 @@ static void imx_eth_update(IMXFECState *s)
static void imx_fec_do_tx(IMXFECState *s)
{
- int frame_size = 0;
+ int frame_size = 0, descnt = 0;
uint8_t frame[ENET_MAX_FRAME_SIZE];
uint8_t *ptr = frame;
uint32_t addr = s->tx_descriptor;
- while (1) {
+ while (descnt++ < IMX_MAX_DESC) {
IMXFECBufDesc bd;
int len;
@@ -453,12 +455,12 @@ static void imx_fec_do_tx(IMXFECState *s)
static void imx_enet_do_tx(IMXFECState *s)
{
- int frame_size = 0;
+ int frame_size = 0, descnt = 0;
uint8_t frame[ENET_MAX_FRAME_SIZE];
uint8_t *ptr = frame;
uint32_t addr = s->tx_descriptor;
- while (1) {
+ while (descnt++ < IMX_MAX_DESC) {
IMXENETBufDesc bd;
int len;
diff --git a/hw/net/spapr_llan.c b/hw/net/spapr_llan.c
index 058908d8d7..d239e4bd7d 100644
--- a/hw/net/spapr_llan.c
+++ b/hw/net/spapr_llan.c
@@ -385,18 +385,24 @@ static int spapr_vlan_devnode(VIOsPAPRDevice *dev, void *fdt, int node_off)
int ret;
/* Some old phyp versions give the mac address in an 8-byte
- * property. The kernel driver has an insane workaround for this;
+ * property. The kernel driver (before 3.10) has an insane workaround;
* rather than doing the obvious thing and checking the property
* length, it checks whether the first byte has 0b10 in the low
* bits. If a correct 6-byte property has a different first byte
* the kernel will get the wrong mac address, overrunning its
* buffer in the process (read only, thank goodness).
*
- * Here we workaround the kernel workaround by always supplying an
- * 8-byte property, with the mac address in the last six bytes */
- memcpy(&padded_mac[2], &vdev->nicconf.macaddr, ETH_ALEN);
- ret = fdt_setprop(fdt, node_off, "local-mac-address",
- padded_mac, sizeof(padded_mac));
+ * Here we return a 6-byte address unless that would break a pre-3.10
+ * driver. In that case we return a padded 8-byte address to allow the old
+ * workaround to succeed. */
+ if ((vdev->nicconf.macaddr.a[0] & 0x3) == 0x2) {
+ ret = fdt_setprop(fdt, node_off, "local-mac-address",
+ &vdev->nicconf.macaddr, ETH_ALEN);
+ } else {
+ memcpy(&padded_mac[2], &vdev->nicconf.macaddr, ETH_ALEN);
+ ret = fdt_setprop(fdt, node_off, "local-mac-address",
+ padded_mac, sizeof(padded_mac));
+ }
if (ret < 0) {
return ret;
}
diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
index 7b3ad4a9f0..c32168077a 100644
--- a/hw/net/virtio-net.c
+++ b/hw/net/virtio-net.c
@@ -1130,7 +1130,8 @@ static int receive_filter(VirtIONet *n, const uint8_t *buf, int size)
return 0;
}
-static ssize_t virtio_net_receive(NetClientState *nc, const uint8_t *buf, size_t size)
+static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
+ size_t size)
{
VirtIONet *n = qemu_get_nic_opaque(nc);
VirtIONetQueue *q = virtio_net_get_subqueue(nc);
@@ -1233,6 +1234,17 @@ static ssize_t virtio_net_receive(NetClientState *nc, const uint8_t *buf, size_t
return size;
}
+static ssize_t virtio_net_receive(NetClientState *nc, const uint8_t *buf,
+ size_t size)
+{
+ ssize_t r;
+
+ rcu_read_lock();
+ r = virtio_net_receive_rcu(nc, buf, size);
+ rcu_read_unlock();
+ return r;
+}
+
static int32_t virtio_net_flush_tx(VirtIONetQueue *q);
static void virtio_net_tx_complete(NetClientState *nc, ssize_t len)
@@ -1557,119 +1569,22 @@ static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue)
virtio_net_set_queues(n);
}
-static void virtio_net_save_device(VirtIODevice *vdev, QEMUFile *f)
-{
- VirtIONet *n = VIRTIO_NET(vdev);
- int i;
-
- qemu_put_buffer(f, n->mac, ETH_ALEN);
- qemu_put_be32(f, n->vqs[0].tx_waiting);
- qemu_put_be32(f, n->mergeable_rx_bufs);
- qemu_put_be16(f, n->status);
- qemu_put_byte(f, n->promisc);
- qemu_put_byte(f, n->allmulti);
- qemu_put_be32(f, n->mac_table.in_use);
- qemu_put_buffer(f, n->mac_table.macs, n->mac_table.in_use * ETH_ALEN);
- qemu_put_buffer(f, (uint8_t *)n->vlans, MAX_VLAN >> 3);
- qemu_put_be32(f, n->has_vnet_hdr);
- qemu_put_byte(f, n->mac_table.multi_overflow);
- qemu_put_byte(f, n->mac_table.uni_overflow);
- qemu_put_byte(f, n->alluni);
- qemu_put_byte(f, n->nomulti);
- qemu_put_byte(f, n->nouni);
- qemu_put_byte(f, n->nobcast);
- qemu_put_byte(f, n->has_ufo);
- if (n->max_queues > 1) {
- qemu_put_be16(f, n->max_queues);
- qemu_put_be16(f, n->curr_queues);
- for (i = 1; i < n->curr_queues; i++) {
- qemu_put_be32(f, n->vqs[i].tx_waiting);
- }
- }
-
- if (virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
- qemu_put_be64(f, n->curr_guest_offloads);
- }
-}
-
-static int virtio_net_load_device(VirtIODevice *vdev, QEMUFile *f,
- int version_id)
+static int virtio_net_post_load_device(void *opaque, int version_id)
{
- VirtIONet *n = VIRTIO_NET(vdev);
+ VirtIONet *n = opaque;
+ VirtIODevice *vdev = VIRTIO_DEVICE(n);
int i, link_down;
- qemu_get_buffer(f, n->mac, ETH_ALEN);
- n->vqs[0].tx_waiting = qemu_get_be32(f);
-
- virtio_net_set_mrg_rx_bufs(n, qemu_get_be32(f),
+ virtio_net_set_mrg_rx_bufs(n, n->mergeable_rx_bufs,
virtio_vdev_has_feature(vdev,
VIRTIO_F_VERSION_1));
- n->status = qemu_get_be16(f);
-
- n->promisc = qemu_get_byte(f);
- n->allmulti = qemu_get_byte(f);
-
- n->mac_table.in_use = qemu_get_be32(f);
/* MAC_TABLE_ENTRIES may be different from the saved image */
- if (n->mac_table.in_use <= MAC_TABLE_ENTRIES) {
- qemu_get_buffer(f, n->mac_table.macs,
- n->mac_table.in_use * ETH_ALEN);
- } else {
- int64_t i;
-
- /* Overflow detected - can happen if source has a larger MAC table.
- * We simply set overflow flag so there's no need to maintain the
- * table of addresses, discard them all.
- * Note: 64 bit math to avoid integer overflow.
- */
- for (i = 0; i < (int64_t)n->mac_table.in_use * ETH_ALEN; ++i) {
- qemu_get_byte(f);
- }
- n->mac_table.multi_overflow = n->mac_table.uni_overflow = 1;
+ if (n->mac_table.in_use > MAC_TABLE_ENTRIES) {
n->mac_table.in_use = 0;
}
-
- qemu_get_buffer(f, (uint8_t *)n->vlans, MAX_VLAN >> 3);
-
- if (qemu_get_be32(f) && !peer_has_vnet_hdr(n)) {
- error_report("virtio-net: saved image requires vnet_hdr=on");
- return -1;
- }
-
- n->mac_table.multi_overflow = qemu_get_byte(f);
- n->mac_table.uni_overflow = qemu_get_byte(f);
-
- n->alluni = qemu_get_byte(f);
- n->nomulti = qemu_get_byte(f);
- n->nouni = qemu_get_byte(f);
- n->nobcast = qemu_get_byte(f);
-
- if (qemu_get_byte(f) && !peer_has_ufo(n)) {
- error_report("virtio-net: saved image requires TUN_F_UFO support");
- return -1;
- }
-
- if (n->max_queues > 1) {
- if (n->max_queues != qemu_get_be16(f)) {
- error_report("virtio-net: different max_queues ");
- return -1;
- }
-
- n->curr_queues = qemu_get_be16(f);
- if (n->curr_queues > n->max_queues) {
- error_report("virtio-net: curr_queues %x > max_queues %x",
- n->curr_queues, n->max_queues);
- return -1;
- }
- for (i = 1; i < n->curr_queues; i++) {
- n->vqs[i].tx_waiting = qemu_get_be32(f);
- }
- }
- if (virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
- n->curr_guest_offloads = qemu_get_be64(f);
- } else {
+ if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
n->curr_guest_offloads = virtio_net_supported_guest_offloads(n);
}
@@ -1703,6 +1618,210 @@ static int virtio_net_load_device(VirtIODevice *vdev, QEMUFile *f,
return 0;
}
+/* tx_waiting field of a VirtIONetQueue */
+static const VMStateDescription vmstate_virtio_net_queue_tx_waiting = {
+ .name = "virtio-net-queue-tx_waiting",
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(tx_waiting, VirtIONetQueue),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
+static bool max_queues_gt_1(void *opaque, int version_id)
+{
+ return VIRTIO_NET(opaque)->max_queues > 1;
+}
+
+static bool has_ctrl_guest_offloads(void *opaque, int version_id)
+{
+ return virtio_vdev_has_feature(VIRTIO_DEVICE(opaque),
+ VIRTIO_NET_F_CTRL_GUEST_OFFLOADS);
+}
+
+static bool mac_table_fits(void *opaque, int version_id)
+{
+ return VIRTIO_NET(opaque)->mac_table.in_use <= MAC_TABLE_ENTRIES;
+}
+
+static bool mac_table_doesnt_fit(void *opaque, int version_id)
+{
+ return !mac_table_fits(opaque, version_id);
+}
+
+/* This temporary type is shared by all the WITH_TMP methods
+ * although only some fields are used by each.
+ */
+struct VirtIONetMigTmp {
+ VirtIONet *parent;
+ VirtIONetQueue *vqs_1;
+ uint16_t curr_queues_1;
+ uint8_t has_ufo;
+ uint32_t has_vnet_hdr;
+};
+
+/* The 2nd and subsequent tx_waiting flags are loaded later than
+ * the 1st entry in the queues and only if there's more than one
+ * entry. We use the tmp mechanism to calculate a temporary
+ * pointer and count and also validate the count.
+ */
+
+static void virtio_net_tx_waiting_pre_save(void *opaque)
+{
+ struct VirtIONetMigTmp *tmp = opaque;
+
+ tmp->vqs_1 = tmp->parent->vqs + 1;
+ tmp->curr_queues_1 = tmp->parent->curr_queues - 1;
+ if (tmp->parent->curr_queues == 0) {
+ tmp->curr_queues_1 = 0;
+ }
+}
+
+static int virtio_net_tx_waiting_pre_load(void *opaque)
+{
+ struct VirtIONetMigTmp *tmp = opaque;
+
+ /* Reuse the pointer setup from save */
+ virtio_net_tx_waiting_pre_save(opaque);
+
+ if (tmp->parent->curr_queues > tmp->parent->max_queues) {
+ error_report("virtio-net: curr_queues %x > max_queues %x",
+ tmp->parent->curr_queues, tmp->parent->max_queues);
+
+ return -EINVAL;
+ }
+
+ return 0; /* all good */
+}
+
+static const VMStateDescription vmstate_virtio_net_tx_waiting = {
+ .name = "virtio-net-tx_waiting",
+ .pre_load = virtio_net_tx_waiting_pre_load,
+ .pre_save = virtio_net_tx_waiting_pre_save,
+ .fields = (VMStateField[]) {
+ VMSTATE_STRUCT_VARRAY_POINTER_UINT16(vqs_1, struct VirtIONetMigTmp,
+ curr_queues_1,
+ vmstate_virtio_net_queue_tx_waiting,
+ struct VirtIONetQueue),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
+/* the 'has_ufo' flag is just tested; if the incoming stream has the
+ * flag set we need to check that we have it
+ */
+static int virtio_net_ufo_post_load(void *opaque, int version_id)
+{
+ struct VirtIONetMigTmp *tmp = opaque;
+
+ if (tmp->has_ufo && !peer_has_ufo(tmp->parent)) {
+ error_report("virtio-net: saved image requires TUN_F_UFO support");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void virtio_net_ufo_pre_save(void *opaque)
+{
+ struct VirtIONetMigTmp *tmp = opaque;
+
+ tmp->has_ufo = tmp->parent->has_ufo;
+}
+
+static const VMStateDescription vmstate_virtio_net_has_ufo = {
+ .name = "virtio-net-ufo",
+ .post_load = virtio_net_ufo_post_load,
+ .pre_save = virtio_net_ufo_pre_save,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT8(has_ufo, struct VirtIONetMigTmp),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
+/* the 'has_vnet_hdr' flag is just tested; if the incoming stream has the
+ * flag set we need to check that we have it
+ */
+static int virtio_net_vnet_post_load(void *opaque, int version_id)
+{
+ struct VirtIONetMigTmp *tmp = opaque;
+
+ if (tmp->has_vnet_hdr && !peer_has_vnet_hdr(tmp->parent)) {
+ error_report("virtio-net: saved image requires vnet_hdr=on");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void virtio_net_vnet_pre_save(void *opaque)
+{
+ struct VirtIONetMigTmp *tmp = opaque;
+
+ tmp->has_vnet_hdr = tmp->parent->has_vnet_hdr;
+}
+
+static const VMStateDescription vmstate_virtio_net_has_vnet = {
+ .name = "virtio-net-vnet",
+ .post_load = virtio_net_vnet_post_load,
+ .pre_save = virtio_net_vnet_pre_save,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(has_vnet_hdr, struct VirtIONetMigTmp),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
+static const VMStateDescription vmstate_virtio_net_device = {
+ .name = "virtio-net-device",
+ .version_id = VIRTIO_NET_VM_VERSION,
+ .minimum_version_id = VIRTIO_NET_VM_VERSION,
+ .post_load = virtio_net_post_load_device,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT8_ARRAY(mac, VirtIONet, ETH_ALEN),
+ VMSTATE_STRUCT_POINTER(vqs, VirtIONet,
+ vmstate_virtio_net_queue_tx_waiting,
+ VirtIONetQueue),
+ VMSTATE_UINT32(mergeable_rx_bufs, VirtIONet),
+ VMSTATE_UINT16(status, VirtIONet),
+ VMSTATE_UINT8(promisc, VirtIONet),
+ VMSTATE_UINT8(allmulti, VirtIONet),
+ VMSTATE_UINT32(mac_table.in_use, VirtIONet),
+
+ /* Guarded pair: If it fits we load it, else we throw it away
+ * - can happen if source has a larger MAC table.; post-load
+ * sets flags in this case.
+ */
+ VMSTATE_VBUFFER_MULTIPLY(mac_table.macs, VirtIONet,
+ 0, mac_table_fits, mac_table.in_use,
+ ETH_ALEN),
+ VMSTATE_UNUSED_VARRAY_UINT32(VirtIONet, mac_table_doesnt_fit, 0,
+ mac_table.in_use, ETH_ALEN),
+
+ /* Note: This is an array of uint32's that's always been saved as a
+ * buffer; hold onto your endiannesses; it's actually used as a bitmap
+ * but based on the uint.
+ */
+ VMSTATE_BUFFER_POINTER_UNSAFE(vlans, VirtIONet, 0, MAX_VLAN >> 3),
+ VMSTATE_WITH_TMP(VirtIONet, struct VirtIONetMigTmp,
+ vmstate_virtio_net_has_vnet),
+ VMSTATE_UINT8(mac_table.multi_overflow, VirtIONet),
+ VMSTATE_UINT8(mac_table.uni_overflow, VirtIONet),
+ VMSTATE_UINT8(alluni, VirtIONet),
+ VMSTATE_UINT8(nomulti, VirtIONet),
+ VMSTATE_UINT8(nouni, VirtIONet),
+ VMSTATE_UINT8(nobcast, VirtIONet),
+ VMSTATE_WITH_TMP(VirtIONet, struct VirtIONetMigTmp,
+ vmstate_virtio_net_has_ufo),
+ VMSTATE_SINGLE_TEST(max_queues, VirtIONet, max_queues_gt_1, 0,
+ vmstate_info_uint16_equal, uint16_t),
+ VMSTATE_UINT16_TEST(curr_queues, VirtIONet, max_queues_gt_1),
+ VMSTATE_WITH_TMP(VirtIONet, struct VirtIONetMigTmp,
+ vmstate_virtio_net_tx_waiting),
+ VMSTATE_UINT64_TEST(curr_guest_offloads, VirtIONet,
+ has_ctrl_guest_offloads),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
static NetClientInfo net_virtio_info = {
.type = NET_CLIENT_DRIVER_NIC,
.size = sizeof(NICState),
@@ -1989,9 +2108,8 @@ static void virtio_net_class_init(ObjectClass *klass, void *data)
vdc->set_status = virtio_net_set_status;
vdc->guest_notifier_mask = virtio_net_guest_notifier_mask;
vdc->guest_notifier_pending = virtio_net_guest_notifier_pending;
- vdc->load = virtio_net_load_device;
- vdc->save = virtio_net_save_device;
vdc->legacy_features |= (0x1 << VIRTIO_NET_F_GSO);
+ vdc->vmsd = &vmstate_virtio_net_device;
}
static const TypeInfo virtio_net_info = {
diff --git a/hw/net/vmxnet3.c b/hw/net/vmxnet3.c
index 7dd456551c..e13a798b3b 100644
--- a/hw/net/vmxnet3.c
+++ b/hw/net/vmxnet3.c
@@ -2397,7 +2397,7 @@ static const VMStateDescription vmxstate_vmxnet3_mcast_list = {
.pre_load = vmxnet3_mcast_list_pre_load,
.needed = vmxnet3_mc_list_needed,
.fields = (VMStateField[]) {
- VMSTATE_VBUFFER_UINT32(mcast_list, VMXNET3State, 0, NULL, 0,
+ VMSTATE_VBUFFER_UINT32(mcast_list, VMXNET3State, 0, NULL,
mcast_list_buff_size),
VMSTATE_END_OF_LIST()
}
diff --git a/hw/nvram/mac_nvram.c b/hw/nvram/mac_nvram.c
index 63f9ed1d82..aef80e64df 100644
--- a/hw/nvram/mac_nvram.c
+++ b/hw/nvram/mac_nvram.c
@@ -82,7 +82,7 @@ static const VMStateDescription vmstate_macio_nvram = {
.version_id = 1,
.minimum_version_id = 1,
.fields = (VMStateField[]) {
- VMSTATE_VBUFFER_UINT32(data, MacIONVRAMState, 0, NULL, 0, size),
+ VMSTATE_VBUFFER_UINT32(data, MacIONVRAMState, 0, NULL, size),
VMSTATE_END_OF_LIST()
}
};
diff --git a/hw/nvram/spapr_nvram.c b/hw/nvram/spapr_nvram.c
index eb42ea323f..65ba188555 100644
--- a/hw/nvram/spapr_nvram.c
+++ b/hw/nvram/spapr_nvram.c
@@ -224,7 +224,7 @@ static const VMStateDescription vmstate_spapr_nvram = {
.post_load = spapr_nvram_post_load,
.fields = (VMStateField[]) {
VMSTATE_UINT32(size, sPAPRNVRAM),
- VMSTATE_VBUFFER_ALLOC_UINT32(buf, sPAPRNVRAM, 1, NULL, 0, size),
+ VMSTATE_VBUFFER_ALLOC_UINT32(buf, sPAPRNVRAM, 1, NULL, size),
VMSTATE_END_OF_LIST()
},
};
diff --git a/hw/openrisc/openrisc_sim.c b/hw/openrisc/openrisc_sim.c
index 6d06d5be01..fc0d0967b7 100644
--- a/hw/openrisc/openrisc_sim.c
+++ b/hw/openrisc/openrisc_sim.c
@@ -139,10 +139,10 @@ static void openrisc_sim_init(MachineState *machine)
static void openrisc_sim_machine_init(MachineClass *mc)
{
- mc->desc = "or32 simulation";
+ mc->desc = "or1k simulation";
mc->init = openrisc_sim_init;
mc->max_cpus = 1;
mc->is_default = 1;
}
-DEFINE_MACHINE("or32-sim", openrisc_sim_machine_init)
+DEFINE_MACHINE("or1k-sim", openrisc_sim_machine_init)
diff --git a/hw/pci-host/Makefile.objs b/hw/pci-host/Makefile.objs
index 45f1f0ebab..9c7909cf44 100644
--- a/hw/pci-host/Makefile.objs
+++ b/hw/pci-host/Makefile.objs
@@ -16,3 +16,4 @@ common-obj-$(CONFIG_FULONG) += bonito.o
common-obj-$(CONFIG_PCI_PIIX) += piix.o
common-obj-$(CONFIG_PCI_Q35) += q35.o
common-obj-$(CONFIG_PCI_GENERIC) += gpex.o
+common-obj-$(CONFIG_PCI_XILINX) += xilinx-pcie.o
diff --git a/hw/pci-host/prep.c b/hw/pci-host/prep.c
index 5580293f93..260a119a9e 100644
--- a/hw/pci-host/prep.c
+++ b/hw/pci-host/prep.c
@@ -309,7 +309,6 @@ static void raven_realize(PCIDevice *d, Error **errp)
memory_region_set_readonly(&s->bios, true);
memory_region_add_subregion(get_system_memory(), (uint32_t)(-BIOS_SIZE),
&s->bios);
- vmstate_register_ram_global(&s->bios);
if (s->bios_name) {
filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, s->bios_name);
if (filename) {
@@ -328,12 +327,15 @@ static void raven_realize(PCIDevice *d, Error **errp)
}
}
}
+ g_free(filename);
if (bios_size < 0 || bios_size > BIOS_SIZE) {
- /* FIXME should error_setg() */
- hw_error("qemu: could not load bios image '%s'\n", s->bios_name);
+ memory_region_del_subregion(get_system_memory(), &s->bios);
+ error_setg(errp, "Could not load bios image '%s'", s->bios_name);
+ return;
}
- g_free(filename);
}
+
+ vmstate_register_ram_global(&s->bios);
}
static const VMStateDescription vmstate_raven = {
@@ -361,7 +363,6 @@ static void raven_class_init(ObjectClass *klass, void *data)
/*
* Reason: PCI-facing part of the host bridge, not usable without
* the host-facing part, which can't be device_add'ed, yet.
- * Reason: realize() method uses hw_error().
*/
dc->cannot_instantiate_with_device_add_yet = true;
}
diff --git a/hw/pci-host/xilinx-pcie.c b/hw/pci-host/xilinx-pcie.c
new file mode 100644
index 0000000000..8b71e2d950
--- /dev/null
+++ b/hw/pci-host/xilinx-pcie.c
@@ -0,0 +1,328 @@
+/*
+ * Xilinx PCIe host controller emulation.
+ *
+ * Copyright (c) 2016 Imagination Technologies
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "hw/pci/pci_bridge.h"
+#include "hw/pci-host/xilinx-pcie.h"
+
+enum root_cfg_reg {
+ /* Interrupt Decode Register */
+ ROOTCFG_INTDEC = 0x138,
+
+ /* Interrupt Mask Register */
+ ROOTCFG_INTMASK = 0x13c,
+ /* INTx Interrupt Received */
+#define ROOTCFG_INTMASK_INTX (1 << 16)
+ /* MSI Interrupt Received */
+#define ROOTCFG_INTMASK_MSI (1 << 17)
+
+ /* PHY Status/Control Register */
+ ROOTCFG_PSCR = 0x144,
+ /* Link Up */
+#define ROOTCFG_PSCR_LINK_UP (1 << 11)
+
+ /* Root Port Status/Control Register */
+ ROOTCFG_RPSCR = 0x148,
+ /* Bridge Enable */
+#define ROOTCFG_RPSCR_BRIDGEEN (1 << 0)
+ /* Interrupt FIFO Not Empty */
+#define ROOTCFG_RPSCR_INTNEMPTY (1 << 18)
+ /* Interrupt FIFO Overflow */
+#define ROOTCFG_RPSCR_INTOVF (1 << 19)
+
+ /* Root Port Interrupt FIFO Read Register 1 */
+ ROOTCFG_RPIFR1 = 0x158,
+#define ROOTCFG_RPIFR1_INT_LANE_SHIFT 27
+#define ROOTCFG_RPIFR1_INT_ASSERT_SHIFT 29
+#define ROOTCFG_RPIFR1_INT_VALID_SHIFT 31
+ /* Root Port Interrupt FIFO Read Register 2 */
+ ROOTCFG_RPIFR2 = 0x15c,
+};
+
+static void xilinx_pcie_update_intr(XilinxPCIEHost *s,
+ uint32_t set, uint32_t clear)
+{
+ int level;
+
+ s->intr |= set;
+ s->intr &= ~clear;
+
+ if (s->intr_fifo_r != s->intr_fifo_w) {
+ s->intr |= ROOTCFG_INTMASK_INTX;
+ }
+
+ level = !!(s->intr & s->intr_mask);
+ qemu_set_irq(s->irq, level);
+}
+
+static void xilinx_pcie_queue_intr(XilinxPCIEHost *s,
+ uint32_t fifo_reg1, uint32_t fifo_reg2)
+{
+ XilinxPCIEInt *intr;
+ unsigned int new_w;
+
+ new_w = (s->intr_fifo_w + 1) % ARRAY_SIZE(s->intr_fifo);
+ if (new_w == s->intr_fifo_r) {
+ s->rpscr |= ROOTCFG_RPSCR_INTOVF;
+ return;
+ }
+
+ intr = &s->intr_fifo[s->intr_fifo_w];
+ s->intr_fifo_w = new_w;
+
+ intr->fifo_reg1 = fifo_reg1;
+ intr->fifo_reg2 = fifo_reg2;
+
+ xilinx_pcie_update_intr(s, ROOTCFG_INTMASK_INTX, 0);
+}
+
+static void xilinx_pcie_set_irq(void *opaque, int irq_num, int level)
+{
+ XilinxPCIEHost *s = XILINX_PCIE_HOST(opaque);
+
+ xilinx_pcie_queue_intr(s,
+ (irq_num << ROOTCFG_RPIFR1_INT_LANE_SHIFT) |
+ (level << ROOTCFG_RPIFR1_INT_ASSERT_SHIFT) |
+ (1 << ROOTCFG_RPIFR1_INT_VALID_SHIFT),
+ 0);
+}
+
+static void xilinx_pcie_host_realize(DeviceState *dev, Error **errp)
+{
+ PCIHostState *pci = PCI_HOST_BRIDGE(dev);
+ XilinxPCIEHost *s = XILINX_PCIE_HOST(dev);
+ SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
+ PCIExpressHost *pex = PCIE_HOST_BRIDGE(dev);
+
+ snprintf(s->name, sizeof(s->name), "pcie%u", s->bus_nr);
+
+ /* PCI configuration space */
+ pcie_host_mmcfg_init(pex, s->cfg_size);
+
+ /* MMIO region */
+ memory_region_init(&s->mmio, OBJECT(s), "mmio", UINT64_MAX);
+ memory_region_set_enabled(&s->mmio, false);
+
+ /* dummy I/O region */
+ memory_region_init_ram(&s->io, OBJECT(s), "io", 16, NULL);
+ memory_region_set_enabled(&s->io, false);
+
+ /* interrupt out */
+ qdev_init_gpio_out_named(dev, &s->irq, "interrupt_out", 1);
+
+ sysbus_init_mmio(sbd, &pex->mmio);
+ sysbus_init_mmio(sbd, &s->mmio);
+
+ pci->bus = pci_register_bus(dev, s->name, xilinx_pcie_set_irq,
+ pci_swizzle_map_irq_fn, s, &s->mmio,
+ &s->io, 0, 4, TYPE_PCIE_BUS);
+
+ qdev_set_parent_bus(DEVICE(&s->root), BUS(pci->bus));
+ qdev_init_nofail(DEVICE(&s->root));
+}
+
+static const char *xilinx_pcie_host_root_bus_path(PCIHostState *host_bridge,
+ PCIBus *rootbus)
+{
+ return "0000:00";
+}
+
+static void xilinx_pcie_host_init(Object *obj)
+{
+ XilinxPCIEHost *s = XILINX_PCIE_HOST(obj);
+ XilinxPCIERoot *root = &s->root;
+
+ object_initialize(root, sizeof(*root), TYPE_XILINX_PCIE_ROOT);
+ object_property_add_child(obj, "root", OBJECT(root), NULL);
+ qdev_prop_set_uint32(DEVICE(root), "addr", PCI_DEVFN(0, 0));
+ qdev_prop_set_bit(DEVICE(root), "multifunction", false);
+}
+
+static Property xilinx_pcie_host_props[] = {
+ DEFINE_PROP_UINT32("bus_nr", XilinxPCIEHost, bus_nr, 0),
+ DEFINE_PROP_SIZE("cfg_base", XilinxPCIEHost, cfg_base, 0),
+ DEFINE_PROP_SIZE("cfg_size", XilinxPCIEHost, cfg_size, 32 << 20),
+ DEFINE_PROP_SIZE("mmio_base", XilinxPCIEHost, mmio_base, 0),
+ DEFINE_PROP_SIZE("mmio_size", XilinxPCIEHost, mmio_size, 1 << 20),
+ DEFINE_PROP_BOOL("link_up", XilinxPCIEHost, link_up, true),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void xilinx_pcie_host_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ PCIHostBridgeClass *hc = PCI_HOST_BRIDGE_CLASS(klass);
+
+ hc->root_bus_path = xilinx_pcie_host_root_bus_path;
+ dc->realize = xilinx_pcie_host_realize;
+ set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
+ dc->fw_name = "pci";
+ dc->props = xilinx_pcie_host_props;
+}
+
+static const TypeInfo xilinx_pcie_host_info = {
+ .name = TYPE_XILINX_PCIE_HOST,
+ .parent = TYPE_PCIE_HOST_BRIDGE,
+ .instance_size = sizeof(XilinxPCIEHost),
+ .instance_init = xilinx_pcie_host_init,
+ .class_init = xilinx_pcie_host_class_init,
+};
+
+static uint32_t xilinx_pcie_root_config_read(PCIDevice *d,
+ uint32_t address, int len)
+{
+ XilinxPCIEHost *s = XILINX_PCIE_HOST(OBJECT(d)->parent);
+ uint32_t val;
+
+ switch (address) {
+ case ROOTCFG_INTDEC:
+ val = s->intr;
+ break;
+ case ROOTCFG_INTMASK:
+ val = s->intr_mask;
+ break;
+ case ROOTCFG_PSCR:
+ val = s->link_up ? ROOTCFG_PSCR_LINK_UP : 0;
+ break;
+ case ROOTCFG_RPSCR:
+ if (s->intr_fifo_r != s->intr_fifo_w) {
+ s->rpscr &= ~ROOTCFG_RPSCR_INTNEMPTY;
+ } else {
+ s->rpscr |= ROOTCFG_RPSCR_INTNEMPTY;
+ }
+ val = s->rpscr;
+ break;
+ case ROOTCFG_RPIFR1:
+ if (s->intr_fifo_w == s->intr_fifo_r) {
+ /* FIFO empty */
+ val = 0;
+ } else {
+ val = s->intr_fifo[s->intr_fifo_r].fifo_reg1;
+ }
+ break;
+ case ROOTCFG_RPIFR2:
+ if (s->intr_fifo_w == s->intr_fifo_r) {
+ /* FIFO empty */
+ val = 0;
+ } else {
+ val = s->intr_fifo[s->intr_fifo_r].fifo_reg2;
+ }
+ break;
+ default:
+ val = pci_default_read_config(d, address, len);
+ break;
+ }
+ return val;
+}
+
+static void xilinx_pcie_root_config_write(PCIDevice *d, uint32_t address,
+ uint32_t val, int len)
+{
+ XilinxPCIEHost *s = XILINX_PCIE_HOST(OBJECT(d)->parent);
+ switch (address) {
+ case ROOTCFG_INTDEC:
+ xilinx_pcie_update_intr(s, 0, val);
+ break;
+ case ROOTCFG_INTMASK:
+ s->intr_mask = val;
+ xilinx_pcie_update_intr(s, 0, 0);
+ break;
+ case ROOTCFG_RPSCR:
+ s->rpscr &= ~ROOTCFG_RPSCR_BRIDGEEN;
+ s->rpscr |= val & ROOTCFG_RPSCR_BRIDGEEN;
+ memory_region_set_enabled(&s->mmio, val & ROOTCFG_RPSCR_BRIDGEEN);
+
+ if (val & ROOTCFG_INTMASK_INTX) {
+ s->rpscr &= ~ROOTCFG_INTMASK_INTX;
+ }
+ break;
+ case ROOTCFG_RPIFR1:
+ case ROOTCFG_RPIFR2:
+ if (s->intr_fifo_w == s->intr_fifo_r) {
+ /* FIFO empty */
+ return;
+ } else {
+ s->intr_fifo_r = (s->intr_fifo_r + 1) % ARRAY_SIZE(s->intr_fifo);
+ }
+ break;
+ default:
+ pci_default_write_config(d, address, val, len);
+ break;
+ }
+}
+
+static int xilinx_pcie_root_init(PCIDevice *dev)
+{
+ BusState *bus = qdev_get_parent_bus(DEVICE(dev));
+ XilinxPCIEHost *s = XILINX_PCIE_HOST(bus->parent);
+
+ pci_set_word(dev->config + PCI_COMMAND,
+ PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
+ pci_set_word(dev->config + PCI_MEMORY_BASE, s->mmio_base >> 16);
+ pci_set_word(dev->config + PCI_MEMORY_LIMIT,
+ ((s->mmio_base + s->mmio_size - 1) >> 16) & 0xfff0);
+
+ pci_bridge_initfn(dev, TYPE_PCI_BUS);
+
+ if (pcie_endpoint_cap_v1_init(dev, 0x80) < 0) {
+ hw_error("Failed to initialize PCIe capability");
+ }
+
+ return 0;
+}
+
+static void xilinx_pcie_root_class_init(ObjectClass *klass, void *data)
+{
+ PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
+ dc->desc = "Xilinx AXI-PCIe Host Bridge";
+ k->vendor_id = PCI_VENDOR_ID_XILINX;
+ k->device_id = 0x7021;
+ k->revision = 0;
+ k->class_id = PCI_CLASS_BRIDGE_HOST;
+ k->is_express = true;
+ k->is_bridge = true;
+ k->init = xilinx_pcie_root_init;
+ k->exit = pci_bridge_exitfn;
+ dc->reset = pci_bridge_reset;
+ k->config_read = xilinx_pcie_root_config_read;
+ k->config_write = xilinx_pcie_root_config_write;
+ /*
+ * PCI-facing part of the host bridge, not usable without the
+ * host-facing part, which can't be device_add'ed, yet.
+ */
+ dc->cannot_instantiate_with_device_add_yet = true;
+}
+
+static const TypeInfo xilinx_pcie_root_info = {
+ .name = TYPE_XILINX_PCIE_ROOT,
+ .parent = TYPE_PCI_BRIDGE,
+ .instance_size = sizeof(XilinxPCIERoot),
+ .class_init = xilinx_pcie_root_class_init,
+};
+
+static void xilinx_pcie_register(void)
+{
+ type_register_static(&xilinx_pcie_root_info);
+ type_register_static(&xilinx_pcie_host_info);
+}
+
+type_init(xilinx_pcie_register)
diff --git a/hw/pci/pcie.c b/hw/pci/pcie.c
index cbd4bb4f8c..fc54bfd53d 100644
--- a/hw/pci/pcie.c
+++ b/hw/pci/pcie.c
@@ -610,7 +610,8 @@ bool pcie_cap_is_arifwd_enabled(const PCIDevice *dev)
* uint16_t ext_cap_size
*/
-static uint16_t pcie_find_capability_list(PCIDevice *dev, uint16_t cap_id,
+/* Passing a cap_id value > 0xffff will return 0 and put end of list in prev */
+static uint16_t pcie_find_capability_list(PCIDevice *dev, uint32_t cap_id,
uint16_t *prev_p)
{
uint16_t prev = 0;
@@ -664,30 +665,24 @@ void pcie_add_capability(PCIDevice *dev,
uint16_t cap_id, uint8_t cap_ver,
uint16_t offset, uint16_t size)
{
- uint32_t header;
- uint16_t next;
-
assert(offset >= PCI_CONFIG_SPACE_SIZE);
assert(offset < offset + size);
assert(offset + size <= PCIE_CONFIG_SPACE_SIZE);
assert(size >= 8);
assert(pci_is_express(dev));
- if (offset == PCI_CONFIG_SPACE_SIZE) {
- header = pci_get_long(dev->config + offset);
- next = PCI_EXT_CAP_NEXT(header);
- } else {
+ if (offset != PCI_CONFIG_SPACE_SIZE) {
uint16_t prev;
- /* 0 is reserved cap id. use internally to find the last capability
- in the linked list */
- next = pcie_find_capability_list(dev, 0, &prev);
-
+ /*
+ * 0xffffffff is not a valid cap id (it's a 16 bit field). use
+ * internally to find the last capability in the linked list.
+ */
+ pcie_find_capability_list(dev, 0xffffffff, &prev);
assert(prev >= PCI_CONFIG_SPACE_SIZE);
- assert(next == 0);
pcie_ext_cap_set_next(dev, prev, offset);
}
- pci_set_long(dev->config + offset, PCI_EXT_CAP(cap_id, cap_ver, next));
+ pci_set_long(dev->config + offset, PCI_EXT_CAP(cap_id, cap_ver, 0));
/* Make capability read-only by default */
memset(dev->wmask + offset, 0, size);
diff --git a/hw/pci/pcie_aer.c b/hw/pci/pcie_aer.c
index daf1f65427..a8c18203d6 100644
--- a/hw/pci/pcie_aer.c
+++ b/hw/pci/pcie_aer.c
@@ -1025,8 +1025,8 @@ void hmp_pcie_aer_inject_error(Monitor *mon, const QDict *qdict)
return;
}
- assert(qobject_type(data) == QTYPE_QDICT);
qdict = qobject_to_qdict(data);
+ assert(qdict);
devfn = (int)qdict_get_int(qdict, "devfn");
monitor_printf(mon, "OK id: %s root bus: %s, bus: %x devfn: %x.%x\n",
diff --git a/hw/ppc/mac_newworld.c b/hw/ppc/mac_newworld.c
index 2bfdb643df..68aaedc06d 100644
--- a/hw/ppc/mac_newworld.c
+++ b/hw/ppc/mac_newworld.c
@@ -72,6 +72,7 @@
#include "exec/address-spaces.h"
#include "hw/sysbus.h"
#include "qemu/cutils.h"
+#include "trace.h"
#define MAX_IDE_BUS 2
#define CFG_ADDR 0xf0000510
@@ -79,21 +80,11 @@
#define CLOCKFREQ (266UL * 1000UL * 1000UL)
#define BUSFREQ (100UL * 1000UL * 1000UL)
-/* debug UniNorth */
-//#define DEBUG_UNIN
-
-#ifdef DEBUG_UNIN
-#define UNIN_DPRINTF(fmt, ...) \
- do { printf("UNIN: " fmt , ## __VA_ARGS__); } while (0)
-#else
-#define UNIN_DPRINTF(fmt, ...)
-#endif
-
/* UniN device */
static void unin_write(void *opaque, hwaddr addr, uint64_t value,
unsigned size)
{
- UNIN_DPRINTF("write addr " TARGET_FMT_plx " val %"PRIx64"\n", addr, value);
+ trace_mac99_uninorth_write(addr, value);
if (addr == 0x0) {
*(int*)opaque = value;
}
@@ -109,7 +100,7 @@ static uint64_t unin_read(void *opaque, hwaddr addr, unsigned size)
value = *(int*)opaque;
}
- UNIN_DPRINTF("readl addr " TARGET_FMT_plx " val %x\n", addr, value);
+ trace_mac99_uninorth_read(addr, value);
return value;
}
@@ -518,6 +509,7 @@ static void core99_machine_class_init(ObjectClass *oc, void *data)
mc->desc = "Mac99 based PowerMAC";
mc->init = ppc_core99_init;
+ mc->block_default_type = IF_IDE;
mc->max_cpus = MAX_CPUS;
mc->default_boot_order = "cd";
mc->kvm_type = core99_kvm_type;
diff --git a/hw/ppc/mac_oldworld.c b/hw/ppc/mac_oldworld.c
index 56282c5bc6..5df94e239b 100644
--- a/hw/ppc/mac_oldworld.c
+++ b/hw/ppc/mac_oldworld.c
@@ -368,6 +368,7 @@ static void heathrow_machine_init(MachineClass *mc)
{
mc->desc = "Heathrow based PowerMAC";
mc->init = ppc_heathrow_init;
+ mc->block_default_type = IF_IDE;
mc->max_cpus = MAX_CPUS;
#ifndef TARGET_PPC64
mc->is_default = 1;
diff --git a/hw/ppc/pnv.c b/hw/ppc/pnv.c
index 4fab5c0ae7..09f0d22def 100644
--- a/hw/ppc/pnv.c
+++ b/hw/ppc/pnv.c
@@ -381,7 +381,7 @@ static void ppc_powernv_init(MachineState *machine)
fw_size = load_image_targphys(fw_filename, FW_LOAD_ADDR, FW_MAX_SIZE);
if (fw_size < 0) {
- error_report("qemu: could not load OPAL '%s'", fw_filename);
+ error_report("Could not load OPAL '%s'", fw_filename);
exit(1);
}
g_free(fw_filename);
@@ -393,7 +393,7 @@ static void ppc_powernv_init(MachineState *machine)
kernel_size = load_image_targphys(machine->kernel_filename,
KERNEL_LOAD_ADDR, 0x2000000);
if (kernel_size < 0) {
- error_report("qemu: could not load kernel'%s'",
+ error_report("Could not load kernel '%s'",
machine->kernel_filename);
exit(1);
}
@@ -405,7 +405,7 @@ static void ppc_powernv_init(MachineState *machine)
pnv->initrd_size = load_image_targphys(machine->initrd_filename,
pnv->initrd_base, 0x10000000); /* 128MB max */
if (pnv->initrd_size < 0) {
- error_report("qemu: could not load initial ram disk '%s'",
+ error_report("Could not load initial ram disk '%s'",
machine->initrd_filename);
exit(1);
}
diff --git a/hw/ppc/ppc.c b/hw/ppc/ppc.c
index d171e60b5c..5f93083d4a 100644
--- a/hw/ppc/ppc.c
+++ b/hw/ppc/ppc.c
@@ -62,7 +62,16 @@ void ppc_set_irq(PowerPCCPU *cpu, int n_IRQ, int level)
{
CPUState *cs = CPU(cpu);
CPUPPCState *env = &cpu->env;
- unsigned int old_pending = env->pending_interrupts;
+ unsigned int old_pending;
+ bool locked = false;
+
+ /* We may already have the BQL if coming from the reset path */
+ if (!qemu_mutex_iothread_locked()) {
+ locked = true;
+ qemu_mutex_lock_iothread();
+ }
+
+ old_pending = env->pending_interrupts;
if (level) {
env->pending_interrupts |= 1 << n_IRQ;
@@ -80,9 +89,14 @@ void ppc_set_irq(PowerPCCPU *cpu, int n_IRQ, int level)
#endif
}
+
LOG_IRQ("%s: %p n_IRQ %d level %d => pending %08" PRIx32
"req %08x\n", __func__, env, n_IRQ, level,
env->pending_interrupts, CPU(cpu)->interrupt_request);
+
+ if (locked) {
+ qemu_mutex_unlock_iothread();
+ }
}
/* PowerPC 6xx / 7xx internal IRQ controller */
diff --git a/hw/ppc/ppc405_uc.c b/hw/ppc/ppc405_uc.c
index d6d3fc2c4a..d5df94aa6e 100644
--- a/hw/ppc/ppc405_uc.c
+++ b/hw/ppc/ppc405_uc.c
@@ -1881,7 +1881,7 @@ static void ppc405cr_clk_setup (ppc405cr_cpc_t *cpc)
D1 = (((cpc->pllmr >> 20) - 1) & 0xF) + 1; /* FBDV */
D2 = 8 - ((cpc->pllmr >> 16) & 0x7); /* FWDVA */
M = D0 * D1 * D2;
- VCO_out = cpc->sysclk * M;
+ VCO_out = (uint64_t)cpc->sysclk * M;
if (VCO_out < 400000000 || VCO_out > 800000000) {
/* PLL cannot lock */
cpc->pllmr &= ~0x80000000;
@@ -1892,7 +1892,7 @@ static void ppc405cr_clk_setup (ppc405cr_cpc_t *cpc)
/* Bypass PLL */
bypass_pll:
M = D0;
- PLL_out = cpc->sysclk * M;
+ PLL_out = (uint64_t)cpc->sysclk * M;
}
CPU_clk = PLL_out;
if (cpc->cr1 & 0x00800000)
@@ -2242,7 +2242,7 @@ static void ppc405ep_compute_clocks (ppc405ep_cpc_t *cpc)
#ifdef DEBUG_CLOCKS_LL
printf("FWDA %01" PRIx32 " %d\n", (cpc->pllmr[1] >> 16) & 0x7, D);
#endif
- VCO_out = cpc->sysclk * M * D;
+ VCO_out = (uint64_t)cpc->sysclk * M * D;
if (VCO_out < 500000000UL || VCO_out > 1000000000UL) {
/* Error - unlock the PLL */
printf("VCO out of range %" PRIu64 "\n", VCO_out);
diff --git a/hw/ppc/ppc4xx_pci.c b/hw/ppc/ppc4xx_pci.c
index 683218e5c5..dc19682970 100644
--- a/hw/ppc/ppc4xx_pci.c
+++ b/hw/ppc/ppc4xx_pci.c
@@ -26,13 +26,7 @@
#include "hw/pci/pci.h"
#include "hw/pci/pci_host.h"
#include "exec/address-spaces.h"
-
-#undef DEBUG
-#ifdef DEBUG
-#define DPRINTF(fmt, ...) do { printf(fmt, ## __VA_ARGS__); } while (0)
-#else
-#define DPRINTF(fmt, ...)
-#endif /* DEBUG */
+#include "trace.h"
struct PCIMasterMap {
uint32_t la;
@@ -249,8 +243,7 @@ static int ppc4xx_pci_map_irq(PCIDevice *pci_dev, int irq_num)
{
int slot = pci_dev->devfn >> 3;
- DPRINTF("%s: devfn %x irq %d -> %d\n", __func__,
- pci_dev->devfn, irq_num, slot);
+ trace_ppc4xx_pci_map_irq(pci_dev->devfn, irq_num, slot);
return slot - 1;
}
@@ -259,7 +252,7 @@ static void ppc4xx_pci_set_irq(void *opaque, int irq_num, int level)
{
qemu_irq *pci_irqs = opaque;
- DPRINTF("%s: PCI irq %d\n", __func__, irq_num);
+ trace_ppc4xx_pci_set_irq(irq_num);
if (irq_num < 0) {
fprintf(stderr, "%s: PCI irq %d\n", __func__, irq_num);
return;
diff --git a/hw/ppc/prep.c b/hw/ppc/prep.c
index ca7959c126..961230c569 100644
--- a/hw/ppc/prep.c
+++ b/hw/ppc/prep.c
@@ -684,6 +684,7 @@ static void prep_machine_init(MachineClass *mc)
{
mc->desc = "PowerPC PREP platform";
mc->init = ppc_prep_init;
+ mc->block_default_type = IF_IDE;
mc->max_cpus = MAX_CPUS;
mc->default_boot_order = "cad";
}
diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
index e465d7ac98..87d8366c44 100644
--- a/hw/ppc/spapr.c
+++ b/hw/ppc/spapr.c
@@ -958,7 +958,7 @@ static void *spapr_build_fdt(sPAPRMachineState *spapr,
_FDT(spapr_drc_populate_dt(fdt, 0, NULL, SPAPR_DR_CONNECTOR_TYPE_LMB));
}
- if (mc->query_hotpluggable_cpus) {
+ if (mc->has_hotpluggable_cpus) {
int offset = fdt_path_offset(fdt, "/cpus");
ret = spapr_drc_populate_dt(fdt, offset, NULL,
SPAPR_DR_CONNECTOR_TYPE_CPU);
@@ -1010,6 +1010,9 @@ static void emulate_spapr_hypercall(PPCVirtualHypervisor *vhyp,
{
CPUPPCState *env = &cpu->env;
+ /* The TCG path should also be holding the BQL at this point */
+ g_assert(qemu_mutex_iothread_locked());
+
if (msr_pr) {
hcall_dprintf("Hypercall made with MSR[PR]=1\n");
env->gpr[3] = H_PRIVILEGE;
@@ -1751,13 +1754,28 @@ static void spapr_validate_node_memory(MachineState *machine, Error **errp)
}
}
+/* find cpu slot in machine->possible_cpus by core_id */
+static CPUArchId *spapr_find_cpu_slot(MachineState *ms, uint32_t id, int *idx)
+{
+ int index = id / smp_threads;
+
+ if (index >= ms->possible_cpus->len) {
+ return NULL;
+ }
+ if (idx) {
+ *idx = index;
+ }
+ return &ms->possible_cpus->cpus[index];
+}
+
static void spapr_init_cpus(sPAPRMachineState *spapr)
{
MachineState *machine = MACHINE(spapr);
MachineClass *mc = MACHINE_GET_CLASS(machine);
char *type = spapr_get_cpu_core_type(machine->cpu_model);
int smt = kvmppc_smt_threads();
- int spapr_max_cores, spapr_cores;
+ const CPUArchIdList *possible_cpus;
+ int boot_cores_nr = smp_cpus / smp_threads;
int i;
if (!type) {
@@ -1765,7 +1783,8 @@ static void spapr_init_cpus(sPAPRMachineState *spapr)
exit(1);
}
- if (mc->query_hotpluggable_cpus) {
+ possible_cpus = mc->possible_cpu_arch_ids(machine);
+ if (mc->has_hotpluggable_cpus) {
if (smp_cpus % smp_threads) {
error_report("smp_cpus (%u) must be multiple of threads (%u)",
smp_cpus, smp_threads);
@@ -1776,24 +1795,18 @@ static void spapr_init_cpus(sPAPRMachineState *spapr)
max_cpus, smp_threads);
exit(1);
}
-
- spapr_max_cores = max_cpus / smp_threads;
- spapr_cores = smp_cpus / smp_threads;
} else {
if (max_cpus != smp_cpus) {
error_report("This machine version does not support CPU hotplug");
exit(1);
}
-
- spapr_max_cores = QEMU_ALIGN_UP(smp_cpus, smp_threads) / smp_threads;
- spapr_cores = spapr_max_cores;
+ boot_cores_nr = possible_cpus->len;
}
- spapr->cores = g_new0(Object *, spapr_max_cores);
- for (i = 0; i < spapr_max_cores; i++) {
+ for (i = 0; i < possible_cpus->len; i++) {
int core_id = i * smp_threads;
- if (mc->query_hotpluggable_cpus) {
+ if (mc->has_hotpluggable_cpus) {
sPAPRDRConnector *drc =
spapr_dr_connector_new(OBJECT(spapr),
SPAPR_DR_CONNECTOR_TYPE_CPU,
@@ -1802,7 +1815,7 @@ static void spapr_init_cpus(sPAPRMachineState *spapr)
qemu_register_reset(spapr_drc_reset, drc);
}
- if (i < spapr_cores) {
+ if (i < boot_cores_nr) {
Object *core = object_new(type);
int nr_threads = smp_threads;
@@ -2357,6 +2370,7 @@ static void spapr_memory_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
uint64_t align = memory_region_get_alignment(mr);
uint64_t size = memory_region_size(mr);
uint64_t addr;
+ char *mem_dev;
if (size % SPAPR_MEMORY_BLOCK_SIZE) {
error_setg(&local_err, "Hotplugged memory size must be a multiple of "
@@ -2364,6 +2378,13 @@ static void spapr_memory_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
goto out;
}
+ mem_dev = object_property_get_str(OBJECT(dimm), PC_DIMM_MEMDEV_PROP, NULL);
+ if (mem_dev && !kvmppc_is_mem_backend_page_size_ok(mem_dev)) {
+ error_setg(&local_err, "Memory backend has bad page size. "
+ "Use 'memory-backend-file' with correct mem-path.");
+ goto out;
+ }
+
pc_dimm_memory_plug(dev, &ms->hotplug_memory, mr, align, &local_err);
if (local_err) {
goto out;
@@ -2488,6 +2509,165 @@ void *spapr_populate_hotplug_cpu_dt(CPUState *cs, int *fdt_offset,
return fdt;
}
+static void spapr_core_unplug(HotplugHandler *hotplug_dev, DeviceState *dev,
+ Error **errp)
+{
+ MachineState *ms = MACHINE(qdev_get_machine());
+ CPUCore *cc = CPU_CORE(dev);
+ CPUArchId *core_slot = spapr_find_cpu_slot(ms, cc->core_id, NULL);
+
+ core_slot->cpu = NULL;
+ object_unparent(OBJECT(dev));
+}
+
+static void spapr_core_release(DeviceState *dev, void *opaque)
+{
+ HotplugHandler *hotplug_ctrl;
+
+ hotplug_ctrl = qdev_get_hotplug_handler(dev);
+ hotplug_handler_unplug(hotplug_ctrl, dev, &error_abort);
+}
+
+static
+void spapr_core_unplug_request(HotplugHandler *hotplug_dev, DeviceState *dev,
+ Error **errp)
+{
+ int index;
+ sPAPRDRConnector *drc;
+ sPAPRDRConnectorClass *drck;
+ Error *local_err = NULL;
+ CPUCore *cc = CPU_CORE(dev);
+ int smt = kvmppc_smt_threads();
+
+ if (!spapr_find_cpu_slot(MACHINE(hotplug_dev), cc->core_id, &index)) {
+ error_setg(errp, "Unable to find CPU core with core-id: %d",
+ cc->core_id);
+ return;
+ }
+ if (index == 0) {
+ error_setg(errp, "Boot CPU core may not be unplugged");
+ return;
+ }
+
+ drc = spapr_dr_connector_by_id(SPAPR_DR_CONNECTOR_TYPE_CPU, index * smt);
+ g_assert(drc);
+
+ drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
+ drck->detach(drc, dev, spapr_core_release, NULL, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
+ spapr_hotplug_req_remove_by_index(drc);
+}
+
+static void spapr_core_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
+ Error **errp)
+{
+ sPAPRMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev));
+ MachineClass *mc = MACHINE_GET_CLASS(spapr);
+ sPAPRCPUCore *core = SPAPR_CPU_CORE(OBJECT(dev));
+ CPUCore *cc = CPU_CORE(dev);
+ CPUState *cs = CPU(core->threads);
+ sPAPRDRConnector *drc;
+ Error *local_err = NULL;
+ void *fdt = NULL;
+ int fdt_offset = 0;
+ int smt = kvmppc_smt_threads();
+ CPUArchId *core_slot;
+ int index;
+
+ core_slot = spapr_find_cpu_slot(MACHINE(hotplug_dev), cc->core_id, &index);
+ if (!core_slot) {
+ error_setg(errp, "Unable to find CPU core with core-id: %d",
+ cc->core_id);
+ return;
+ }
+ drc = spapr_dr_connector_by_id(SPAPR_DR_CONNECTOR_TYPE_CPU, index * smt);
+
+ g_assert(drc || !mc->has_hotpluggable_cpus);
+
+ /*
+ * Setup CPU DT entries only for hotplugged CPUs. For boot time or
+ * coldplugged CPUs DT entries are setup in spapr_build_fdt().
+ */
+ if (dev->hotplugged) {
+ fdt = spapr_populate_hotplug_cpu_dt(cs, &fdt_offset, spapr);
+ }
+
+ if (drc) {
+ sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
+ drck->attach(drc, dev, fdt, fdt_offset, !dev->hotplugged, &local_err);
+ if (local_err) {
+ g_free(fdt);
+ error_propagate(errp, local_err);
+ return;
+ }
+ }
+
+ if (dev->hotplugged) {
+ /*
+ * Send hotplug notification interrupt to the guest only in case
+ * of hotplugged CPUs.
+ */
+ spapr_hotplug_req_add_by_index(drc);
+ } else {
+ /*
+ * Set the right DRC states for cold plugged CPU.
+ */
+ if (drc) {
+ sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
+ drck->set_allocation_state(drc, SPAPR_DR_ALLOCATION_STATE_USABLE);
+ drck->set_isolation_state(drc, SPAPR_DR_ISOLATION_STATE_UNISOLATED);
+ }
+ }
+ core_slot->cpu = OBJECT(dev);
+}
+
+static void spapr_core_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
+ Error **errp)
+{
+ MachineState *machine = MACHINE(OBJECT(hotplug_dev));
+ MachineClass *mc = MACHINE_GET_CLASS(hotplug_dev);
+ Error *local_err = NULL;
+ CPUCore *cc = CPU_CORE(dev);
+ char *base_core_type = spapr_get_cpu_core_type(machine->cpu_model);
+ const char *type = object_get_typename(OBJECT(dev));
+ CPUArchId *core_slot;
+ int index;
+
+ if (dev->hotplugged && !mc->has_hotpluggable_cpus) {
+ error_setg(&local_err, "CPU hotplug not supported for this machine");
+ goto out;
+ }
+
+ if (strcmp(base_core_type, type)) {
+ error_setg(&local_err, "CPU core type should be %s", base_core_type);
+ goto out;
+ }
+
+ if (cc->core_id % smp_threads) {
+ error_setg(&local_err, "invalid core id %d", cc->core_id);
+ goto out;
+ }
+
+ core_slot = spapr_find_cpu_slot(MACHINE(hotplug_dev), cc->core_id, &index);
+ if (!core_slot) {
+ error_setg(&local_err, "core id %d out of range", cc->core_id);
+ goto out;
+ }
+
+ if (core_slot->cpu) {
+ error_setg(&local_err, "core %d already populated", cc->core_id);
+ goto out;
+ }
+
+out:
+ g_free(base_core_type);
+ error_propagate(errp, local_err);
+}
+
static void spapr_machine_device_plug(HotplugHandler *hotplug_dev,
DeviceState *dev, Error **errp)
{
@@ -2550,7 +2730,7 @@ static void spapr_machine_device_unplug(HotplugHandler *hotplug_dev,
error_setg(errp, "Memory hot unplug not supported for this guest");
}
} else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE)) {
- if (!mc->query_hotpluggable_cpus) {
+ if (!mc->has_hotpluggable_cpus) {
error_setg(errp, "CPU hot unplug not supported on this machine");
return;
}
@@ -2577,11 +2757,11 @@ static void spapr_machine_device_unplug_request(HotplugHandler *hotplug_dev,
error_setg(errp, "Memory hot unplug not supported for this guest");
}
} else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE)) {
- if (!mc->query_hotpluggable_cpus) {
+ if (!mc->has_hotpluggable_cpus) {
error_setg(errp, "CPU hot unplug not supported on this machine");
return;
}
- spapr_core_unplug(hotplug_dev, dev, errp);
+ spapr_core_unplug_request(hotplug_dev, dev, errp);
}
}
@@ -2610,35 +2790,34 @@ static unsigned spapr_cpu_index_to_socket_id(unsigned cpu_index)
return cpu_index / smp_threads / smp_cores;
}
-static HotpluggableCPUList *spapr_query_hotpluggable_cpus(MachineState *machine)
+static const CPUArchIdList *spapr_possible_cpu_arch_ids(MachineState *machine)
{
int i;
- HotpluggableCPUList *head = NULL;
- sPAPRMachineState *spapr = SPAPR_MACHINE(machine);
int spapr_max_cores = max_cpus / smp_threads;
+ MachineClass *mc = MACHINE_GET_CLASS(machine);
- for (i = 0; i < spapr_max_cores; i++) {
- HotpluggableCPUList *list_item = g_new0(typeof(*list_item), 1);
- HotpluggableCPU *cpu_item = g_new0(typeof(*cpu_item), 1);
- CpuInstanceProperties *cpu_props = g_new0(typeof(*cpu_props), 1);
+ if (!mc->has_hotpluggable_cpus) {
+ spapr_max_cores = QEMU_ALIGN_UP(smp_cpus, smp_threads) / smp_threads;
+ }
+ if (machine->possible_cpus) {
+ assert(machine->possible_cpus->len == spapr_max_cores);
+ return machine->possible_cpus;
+ }
+
+ machine->possible_cpus = g_malloc0(sizeof(CPUArchIdList) +
+ sizeof(CPUArchId) * spapr_max_cores);
+ machine->possible_cpus->len = spapr_max_cores;
+ for (i = 0; i < machine->possible_cpus->len; i++) {
+ int core_id = i * smp_threads;
- cpu_item->type = spapr_get_cpu_core_type(machine->cpu_model);
- cpu_item->vcpus_count = smp_threads;
- cpu_props->has_core_id = true;
- cpu_props->core_id = i * smp_threads;
+ machine->possible_cpus->cpus[i].vcpus_count = smp_threads;
+ machine->possible_cpus->cpus[i].arch_id = core_id;
+ machine->possible_cpus->cpus[i].props.has_core_id = true;
+ machine->possible_cpus->cpus[i].props.core_id = core_id;
/* TODO: add 'has_node/node' here to describe
to which node core belongs */
-
- cpu_item->props = cpu_props;
- if (spapr->cores[i]) {
- cpu_item->has_qom_path = true;
- cpu_item->qom_path = object_get_canonical_path(spapr->cores[i]);
- }
- list_item->value = cpu_item;
- list_item->next = head;
- head = list_item;
}
- return head;
+ return machine->possible_cpus;
}
static void spapr_phb_placement(sPAPRMachineState *spapr, uint32_t index,
@@ -2724,11 +2903,12 @@ static void spapr_machine_class_init(ObjectClass *oc, void *data)
hc->plug = spapr_machine_device_plug;
hc->unplug = spapr_machine_device_unplug;
mc->cpu_index_to_socket_id = spapr_cpu_index_to_socket_id;
+ mc->possible_cpu_arch_ids = spapr_possible_cpu_arch_ids;
hc->unplug_request = spapr_machine_device_unplug_request;
smc->dr_lmb_enabled = true;
smc->tcg_default_cpu = "POWER8";
- mc->query_hotpluggable_cpus = spapr_query_hotpluggable_cpus;
+ mc->has_hotpluggable_cpus = true;
fwc->get_dev_path = spapr_get_fw_dev_path;
nc->nmi_monitor_handler = spapr_nmi;
smc->phb_placement = spapr_phb_placement;
@@ -2928,7 +3108,7 @@ static void spapr_machine_2_6_instance_options(MachineState *machine)
static void spapr_machine_2_6_class_options(MachineClass *mc)
{
spapr_machine_2_7_class_options(mc);
- mc->query_hotpluggable_cpus = NULL;
+ mc->has_hotpluggable_cpus = false;
SET_MACHINE_COMPAT(mc, SPAPR_COMPAT_2_6);
}
diff --git a/hw/ppc/spapr_cpu_core.c b/hw/ppc/spapr_cpu_core.c
index 9dddaeb3fa..55cd0456eb 100644
--- a/hw/ppc/spapr_cpu_core.c
+++ b/hw/ppc/spapr_cpu_core.c
@@ -109,13 +109,12 @@ char *spapr_get_cpu_core_type(const char *model)
return core_type;
}
-static void spapr_core_release(DeviceState *dev, void *opaque)
+static void spapr_cpu_core_unrealizefn(DeviceState *dev, Error **errp)
{
sPAPRCPUCore *sc = SPAPR_CPU_CORE(OBJECT(dev));
sPAPRCPUCoreClass *scc = SPAPR_CPU_CORE_GET_CLASS(OBJECT(dev));
const char *typename = object_class_get_name(scc->cpu_class);
size_t size = object_type_get_instance_size(typename);
- sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
CPUCore *cc = CPU_CORE(dev);
int i;
@@ -129,140 +128,7 @@ static void spapr_core_release(DeviceState *dev, void *opaque)
cpu_remove_sync(cs);
object_unparent(obj);
}
-
- spapr->cores[cc->core_id / smp_threads] = NULL;
-
g_free(sc->threads);
- object_unparent(OBJECT(dev));
-}
-
-void spapr_core_unplug(HotplugHandler *hotplug_dev, DeviceState *dev,
- Error **errp)
-{
- CPUCore *cc = CPU_CORE(dev);
- int smt = kvmppc_smt_threads();
- int index = cc->core_id / smp_threads;
- sPAPRDRConnector *drc =
- spapr_dr_connector_by_id(SPAPR_DR_CONNECTOR_TYPE_CPU, index * smt);
- sPAPRDRConnectorClass *drck;
- Error *local_err = NULL;
-
- if (index == 0) {
- error_setg(errp, "Boot CPU core may not be unplugged");
- return;
- }
-
- g_assert(drc);
-
- drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
- drck->detach(drc, dev, spapr_core_release, NULL, &local_err);
- if (local_err) {
- error_propagate(errp, local_err);
- return;
- }
-
- spapr_hotplug_req_remove_by_index(drc);
-}
-
-void spapr_core_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
- Error **errp)
-{
- sPAPRMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev));
- MachineClass *mc = MACHINE_GET_CLASS(spapr);
- sPAPRCPUCore *core = SPAPR_CPU_CORE(OBJECT(dev));
- CPUCore *cc = CPU_CORE(dev);
- CPUState *cs = CPU(core->threads);
- sPAPRDRConnector *drc;
- Error *local_err = NULL;
- void *fdt = NULL;
- int fdt_offset = 0;
- int index = cc->core_id / smp_threads;
- int smt = kvmppc_smt_threads();
-
- drc = spapr_dr_connector_by_id(SPAPR_DR_CONNECTOR_TYPE_CPU, index * smt);
- spapr->cores[index] = OBJECT(dev);
-
- g_assert(drc || !mc->query_hotpluggable_cpus);
-
- /*
- * Setup CPU DT entries only for hotplugged CPUs. For boot time or
- * coldplugged CPUs DT entries are setup in spapr_build_fdt().
- */
- if (dev->hotplugged) {
- fdt = spapr_populate_hotplug_cpu_dt(cs, &fdt_offset, spapr);
- }
-
- if (drc) {
- sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
- drck->attach(drc, dev, fdt, fdt_offset, !dev->hotplugged, &local_err);
- if (local_err) {
- g_free(fdt);
- spapr->cores[index] = NULL;
- error_propagate(errp, local_err);
- return;
- }
- }
-
- if (dev->hotplugged) {
- /*
- * Send hotplug notification interrupt to the guest only in case
- * of hotplugged CPUs.
- */
- spapr_hotplug_req_add_by_index(drc);
- } else {
- /*
- * Set the right DRC states for cold plugged CPU.
- */
- if (drc) {
- sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
- drck->set_allocation_state(drc, SPAPR_DR_ALLOCATION_STATE_USABLE);
- drck->set_isolation_state(drc, SPAPR_DR_ISOLATION_STATE_UNISOLATED);
- }
- }
-}
-
-void spapr_core_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
- Error **errp)
-{
- MachineState *machine = MACHINE(OBJECT(hotplug_dev));
- MachineClass *mc = MACHINE_GET_CLASS(hotplug_dev);
- sPAPRMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev));
- int spapr_max_cores = max_cpus / smp_threads;
- int index;
- Error *local_err = NULL;
- CPUCore *cc = CPU_CORE(dev);
- char *base_core_type = spapr_get_cpu_core_type(machine->cpu_model);
- const char *type = object_get_typename(OBJECT(dev));
-
- if (dev->hotplugged && !mc->query_hotpluggable_cpus) {
- error_setg(&local_err, "CPU hotplug not supported for this machine");
- goto out;
- }
-
- if (strcmp(base_core_type, type)) {
- error_setg(&local_err, "CPU core type should be %s", base_core_type);
- goto out;
- }
-
- if (cc->core_id % smp_threads) {
- error_setg(&local_err, "invalid core id %d", cc->core_id);
- goto out;
- }
-
- index = cc->core_id / smp_threads;
- if (index < 0 || index >= spapr_max_cores) {
- error_setg(&local_err, "core id %d out of range", cc->core_id);
- goto out;
- }
-
- if (spapr->cores[index]) {
- error_setg(&local_err, "core %d already populated", cc->core_id);
- goto out;
- }
-
-out:
- g_free(base_core_type);
- error_propagate(errp, local_err);
}
static void spapr_cpu_core_realize_child(Object *child, Error **errp)
@@ -368,6 +234,7 @@ void spapr_cpu_core_class_init(ObjectClass *oc, void *data)
sPAPRCPUCoreClass *scc = SPAPR_CPU_CORE_CLASS(oc);
dc->realize = spapr_cpu_core_realize;
+ dc->unrealize = spapr_cpu_core_unrealizefn;
scc->cpu_class = cpu_class_by_name(TYPE_POWERPC_CPU, data);
g_assert(scc->cpu_class);
}
diff --git a/hw/ppc/spapr_ovec.c b/hw/ppc/spapr_ovec.c
index 3eb1d5976f..41df4c35ba 100644
--- a/hw/ppc/spapr_ovec.c
+++ b/hw/ppc/spapr_ovec.c
@@ -16,18 +16,9 @@
#include "qemu/bitmap.h"
#include "exec/address-spaces.h"
#include "qemu/error-report.h"
+#include "trace.h"
#include <libfdt.h>
-/* #define DEBUG_SPAPR_OVEC */
-
-#ifdef DEBUG_SPAPR_OVEC
-#define DPRINTFN(fmt, ...) \
- do { fprintf(stderr, fmt "\n", ## __VA_ARGS__); } while (0)
-#else
-#define DPRINTFN(fmt, ...) \
- do { } while (0)
-#endif
-
#define OV_MAXBYTES 256 /* not including length byte */
#define OV_MAXBITS (OV_MAXBYTES * BITS_PER_BYTE)
@@ -210,8 +201,7 @@ sPAPROptionVector *spapr_ovec_parse_vector(target_ulong table_addr, int vector)
for (i = 0; i < vector_len; i++) {
uint8_t entry = ldub_phys(&address_space_memory, addr + i);
if (entry) {
- DPRINTFN("read guest vector %2d, byte %3d / %3d: 0x%.2x",
- vector, i + 1, vector_len, entry);
+ trace_spapr_ovec_parse_vector(vector, i + 1, vector_len, entry);
guest_byte_to_bitmap(entry, ov->bitmap, i * BITS_PER_BYTE);
}
}
@@ -245,10 +235,9 @@ int spapr_ovec_populate_dt(void *fdt, int fdt_offset,
for (i = 1; i < vec_len + 1; i++) {
vec[i] = guest_byte_from_bitmap(ov->bitmap, (i - 1) * BITS_PER_BYTE);
if (vec[i]) {
- DPRINTFN("encoding guest vector byte %3d / %3d: 0x%.2x",
- i, vec_len, vec[i]);
+ trace_spapr_ovec_populate_dt(i, vec_len, vec[i]);
}
}
- return fdt_setprop(fdt, fdt_offset, name, vec, vec_len);
+ return fdt_setprop(fdt, fdt_offset, name, vec, vec_len + 1);
}
diff --git a/hw/ppc/spapr_rtas.c b/hw/ppc/spapr_rtas.c
index bb19944686..619f32c054 100644
--- a/hw/ppc/spapr_rtas.c
+++ b/hw/ppc/spapr_rtas.c
@@ -334,7 +334,8 @@ static void rtas_ibm_os_term(PowerPCCPU *cpu,
{
target_ulong ret = 0;
- qapi_event_send_guest_panicked(GUEST_PANIC_ACTION_PAUSE, &error_abort);
+ qapi_event_send_guest_panicked(GUEST_PANIC_ACTION_PAUSE, false, NULL,
+ &error_abort);
rtas_st(rets, 0, ret);
}
diff --git a/hw/ppc/trace-events b/hw/ppc/trace-events
index f46995cdb2..43d265f351 100644
--- a/hw/ppc/trace-events
+++ b/hw/ppc/trace-events
@@ -56,6 +56,10 @@ spapr_drc_realize_child(uint32_t index, char *childname) "drc: 0x%"PRIx32", chil
spapr_drc_realize_complete(uint32_t index) "drc: 0x%"PRIx32
spapr_drc_unrealize(uint32_t index) "drc: 0x%"PRIx32
+# hw/ppc/spapr_ovec.c
+spapr_ovec_parse_vector(int vector, int byte, uint16_t vec_len, uint8_t entry) "read guest vector %2d, byte %3d / %3d: 0x%.2x"
+spapr_ovec_populate_dt(int byte, uint16_t vec_len, uint8_t entry) "encoding guest vector byte %3d / %3d: 0x%.2x"
+
# hw/ppc/spapr_rtas.c
spapr_rtas_set_indicator_invalid(uint32_t index) "sensor index: 0x%"PRIx32
spapr_rtas_set_indicator_not_supported(uint32_t index, uint32_t type) "sensor index: 0x%"PRIx32", type: %"PRIu32
@@ -85,3 +89,11 @@ rs6000mc_presence_read(uint32_t addr, uint32_t val) "read addr=%x val=%x"
rs6000mc_size_read(uint32_t addr, uint32_t val) "read addr=%x val=%x"
rs6000mc_size_write(uint32_t addr, uint32_t val) "write addr=%x val=%x"
rs6000mc_parity_read(uint32_t addr, uint32_t val) "read addr=%x val=%x"
+
+# hw/ppc/mac_newworld.c
+mac99_uninorth_write(uint64_t addr, uint64_t value) "addr=0x%" PRIx64 " val=0x%"PRIx64
+mac99_uninorth_read(uint64_t addr, uint64_t value) "addr=0x%" PRIx64 " val=0x%"PRIx64
+
+# hw/ppc/ppc4xx_pci.c
+ppc4xx_pci_map_irq(int32_t devfn, int irq_num, int slot) "devfn %x irq %d -> %d"
+ppc4xx_pci_set_irq(int irq_num) "PCI irq %d"
diff --git a/hw/s390x/css.c b/hw/s390x/css.c
index 0f2580d644..e32b2a4d42 100644
--- a/hw/s390x/css.c
+++ b/hw/s390x/css.c
@@ -368,13 +368,16 @@ static CCW1 copy_ccw_from_guest(hwaddr addr, bool fmt1)
ret.cda = be32_to_cpu(tmp1.cda);
} else {
cpu_physical_memory_read(addr, &tmp0, sizeof(tmp0));
- ret.cmd_code = tmp0.cmd_code;
- ret.flags = tmp0.flags;
- ret.count = be16_to_cpu(tmp0.count);
- ret.cda = be16_to_cpu(tmp0.cda1) | (tmp0.cda0 << 16);
- if ((ret.cmd_code & 0x0f) == CCW_CMD_TIC) {
- ret.cmd_code &= 0x0f;
+ if ((tmp0.cmd_code & 0x0f) == CCW_CMD_TIC) {
+ ret.cmd_code = CCW_CMD_TIC;
+ ret.flags = 0;
+ ret.count = 0;
+ } else {
+ ret.cmd_code = tmp0.cmd_code;
+ ret.flags = tmp0.flags;
+ ret.count = be16_to_cpu(tmp0.count);
}
+ ret.cda = be16_to_cpu(tmp0.cda1) | (tmp0.cda0 << 16);
}
return ret;
}
diff --git a/hw/s390x/s390-virtio-ccw.c b/hw/s390x/s390-virtio-ccw.c
index e9a676797a..4f0d62b2d8 100644
--- a/hw/s390x/s390-virtio-ccw.c
+++ b/hw/s390x/s390-virtio-ccw.c
@@ -63,7 +63,7 @@ static int virtio_ccw_hcall_notify(const uint64_t *args)
if (!sch || !css_subch_visible(sch)) {
return -EINVAL;
}
- if (queue >= VIRTIO_CCW_QUEUE_MAX) {
+ if (queue >= VIRTIO_QUEUE_MAX) {
return -EINVAL;
}
virtio_queue_notify(virtio_ccw_get_vdev(sch), queue);
@@ -336,7 +336,12 @@ static const TypeInfo ccw_machine_info = {
type_init(ccw_machine_register_##suffix)
#define CCW_COMPAT_2_8 \
- HW_COMPAT_2_8
+ HW_COMPAT_2_8 \
+ {\
+ .driver = TYPE_S390_FLIC_COMMON,\
+ .property = "adapter_routes_max_batch",\
+ .value = "64",\
+ },
#define CCW_COMPAT_2_7 \
HW_COMPAT_2_7
diff --git a/hw/s390x/s390-virtio.c b/hw/s390x/s390-virtio.c
index 7a3a7fe5fd..9cfb09057e 100644
--- a/hw/s390x/s390-virtio.c
+++ b/hw/s390x/s390-virtio.c
@@ -44,16 +44,6 @@
#include "hw/s390x/ipl.h"
#include "cpu.h"
-//#define DEBUG_S390
-
-#ifdef DEBUG_S390
-#define DPRINTF(fmt, ...) \
- do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
-#else
-#define DPRINTF(fmt, ...) \
- do { } while (0)
-#endif
-
#define MAX_BLK_DEVS 10
#define S390_TOD_CLOCK_VALUE_MISSING 0x00
diff --git a/hw/s390x/virtio-ccw.c b/hw/s390x/virtio-ccw.c
index 63c46373fb..00b3bde4e9 100644
--- a/hw/s390x/virtio-ccw.c
+++ b/hw/s390x/virtio-ccw.c
@@ -35,6 +35,8 @@
#include "trace.h"
#include "hw/s390x/css-bridge.h"
+#define NR_CLASSIC_INDICATOR_BITS 64
+
static void virtio_ccw_bus_new(VirtioBusState *bus, size_t bus_size,
VirtioCcwDevice *dev);
@@ -126,7 +128,7 @@ static int virtio_ccw_set_vqs(SubchDev *sch, VqInfoBlock *info,
uint16_t num = info ? info->num : linfo->num;
uint64_t desc = info ? info->desc : linfo->queue;
- if (index >= VIRTIO_CCW_QUEUE_MAX) {
+ if (index >= VIRTIO_QUEUE_MAX) {
return -EINVAL;
}
@@ -162,7 +164,7 @@ static int virtio_ccw_set_vqs(SubchDev *sch, VqInfoBlock *info,
virtio_queue_set_vector(vdev, index, index);
}
/* tell notify handler in case of config change */
- vdev->config_vector = VIRTIO_CCW_QUEUE_MAX;
+ vdev->config_vector = VIRTIO_QUEUE_MAX;
return 0;
}
@@ -280,6 +282,15 @@ static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw)
ccw.cmd_code);
check_len = !((ccw.flags & CCW_FLAG_SLI) && !(ccw.flags & CCW_FLAG_DC));
+ if (dev->force_revision_1 && dev->revision < 0 &&
+ ccw.cmd_code != CCW_CMD_SET_VIRTIO_REV) {
+ /*
+ * virtio-1 drivers must start with negotiating to a revision >= 1,
+ * so post a command reject for all other commands
+ */
+ return -ENOSYS;
+ }
+
/* Look at the command. */
switch (ccw.cmd_code) {
case CCW_CMD_SET_VQ:
@@ -500,6 +511,11 @@ static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw)
ret = -ENOSYS;
break;
}
+ if (virtio_get_num_queues(vdev) > NR_CLASSIC_INDICATOR_BITS) {
+ /* More queues than indicator bits --> trigger a reject */
+ ret = -ENOSYS;
+ break;
+ }
if (!ccw.cda) {
ret = -EFAULT;
} else {
@@ -549,7 +565,7 @@ static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw)
ccw.cda,
MEMTXATTRS_UNSPECIFIED,
NULL);
- if (vq_config.index >= VIRTIO_CCW_QUEUE_MAX) {
+ if (vq_config.index >= VIRTIO_QUEUE_MAX) {
ret = -EINVAL;
break;
}
@@ -638,7 +654,8 @@ static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw)
* need to fetch it here. Nothing to do for now, though.
*/
if (dev->revision >= 0 ||
- revinfo.revision > virtio_ccw_rev_max(dev)) {
+ revinfo.revision > virtio_ccw_rev_max(dev) ||
+ (dev->force_revision_1 && !revinfo.revision)) {
ret = -ENOSYS;
break;
}
@@ -669,6 +686,12 @@ static void virtio_ccw_device_realize(VirtioCcwDevice *dev, Error **errp)
if (!sch) {
return;
}
+ if (!virtio_ccw_rev_max(dev) && dev->force_revision_1) {
+ error_setg(&err, "Invalid value of property max_rev "
+ "(is %d expected >= 1)", virtio_ccw_rev_max(dev));
+ error_propagate(errp, err);
+ return;
+ }
sch->driver_data = dev;
sch->ccw_cb = virtio_ccw_cb;
@@ -878,6 +901,24 @@ static void virtio_ccw_rng_realize(VirtioCcwDevice *ccw_dev, Error **errp)
NULL);
}
+static void virtio_ccw_crypto_realize(VirtioCcwDevice *ccw_dev, Error **errp)
+{
+ VirtIOCryptoCcw *dev = VIRTIO_CRYPTO_CCW(ccw_dev);
+ DeviceState *vdev = DEVICE(&dev->vdev);
+ Error *err = NULL;
+
+ qdev_set_parent_bus(vdev, BUS(&ccw_dev->bus));
+ object_property_set_bool(OBJECT(vdev), true, "realized", &err);
+ if (err) {
+ error_propagate(errp, err);
+ return;
+ }
+
+ object_property_set_link(OBJECT(vdev),
+ OBJECT(dev->vdev.conf.cryptodev), "cryptodev",
+ NULL);
+}
+
/* DeviceState to VirtioCcwDevice. Note: used on datapath,
* be careful and test performance if you change this.
*/
@@ -919,11 +960,11 @@ static void virtio_ccw_notify(DeviceState *d, uint16_t vector)
uint64_t indicators;
/* queue indicators + secondary indicators */
- if (vector >= VIRTIO_CCW_QUEUE_MAX + 64) {
+ if (vector >= VIRTIO_QUEUE_MAX + 64) {
return;
}
- if (vector < VIRTIO_CCW_QUEUE_MAX) {
+ if (vector < VIRTIO_QUEUE_MAX) {
if (!dev->indicators) {
return;
}
@@ -1278,15 +1319,22 @@ static void virtio_ccw_device_plugged(DeviceState *d, Error **errp)
CcwDevice *ccw_dev = CCW_DEVICE(d);
SubchDev *sch = ccw_dev->sch;
int n = virtio_get_num_queues(vdev);
+ S390FLICState *flic = s390_get_flic();
if (!virtio_has_feature(vdev->host_features, VIRTIO_F_VERSION_1)) {
dev->max_rev = 0;
}
- if (virtio_get_num_queues(vdev) > VIRTIO_CCW_QUEUE_MAX) {
+ if (virtio_get_num_queues(vdev) > VIRTIO_QUEUE_MAX) {
+ error_setg(errp, "The number of virtqueues %d "
+ "exceeds virtio limit %d", n,
+ VIRTIO_QUEUE_MAX);
+ return;
+ }
+ if (virtio_get_num_queues(vdev) > flic->adapter_routes_max_batch) {
error_setg(errp, "The number of virtqueues %d "
- "exceeds ccw limit %d", n,
- VIRTIO_CCW_QUEUE_MAX);
+ "exceeds flic adapter route limit %d", n,
+ flic->adapter_routes_max_batch);
return;
}
@@ -1518,6 +1566,48 @@ static const TypeInfo virtio_ccw_rng = {
.class_init = virtio_ccw_rng_class_init,
};
+static Property virtio_ccw_crypto_properties[] = {
+ DEFINE_PROP_CSS_DEV_ID("devno", VirtioCcwDevice, parent_obj.bus_id),
+ DEFINE_PROP_BIT("ioeventfd", VirtioCcwDevice, flags,
+ VIRTIO_CCW_FLAG_USE_IOEVENTFD_BIT, true),
+ DEFINE_PROP_UINT32("max_revision", VirtioCcwDevice, max_rev,
+ VIRTIO_CCW_MAX_REV),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void virtio_ccw_crypto_instance_init(Object *obj)
+{
+ VirtIOCryptoCcw *dev = VIRTIO_CRYPTO_CCW(obj);
+ VirtioCcwDevice *ccw_dev = VIRTIO_CCW_DEVICE(obj);
+
+ ccw_dev->force_revision_1 = true;
+ virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
+ TYPE_VIRTIO_CRYPTO);
+
+ object_property_add_alias(obj, "cryptodev", OBJECT(&dev->vdev),
+ "cryptodev", &error_abort);
+}
+
+static void virtio_ccw_crypto_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ VirtIOCCWDeviceClass *k = VIRTIO_CCW_DEVICE_CLASS(klass);
+
+ k->realize = virtio_ccw_crypto_realize;
+ k->exit = virtio_ccw_exit;
+ dc->reset = virtio_ccw_reset;
+ dc->props = virtio_ccw_crypto_properties;
+ set_bit(DEVICE_CATEGORY_MISC, dc->categories);
+}
+
+static const TypeInfo virtio_ccw_crypto = {
+ .name = TYPE_VIRTIO_CRYPTO_CCW,
+ .parent = TYPE_VIRTIO_CCW_DEVICE,
+ .instance_size = sizeof(VirtIOCryptoCcw),
+ .instance_init = virtio_ccw_crypto_instance_init,
+ .class_init = virtio_ccw_crypto_class_init,
+};
+
static void virtio_ccw_busdev_realize(DeviceState *dev, Error **errp)
{
VirtioCcwDevice *_dev = (VirtioCcwDevice *)dev;
@@ -1720,6 +1810,7 @@ static void virtio_ccw_register(void)
#ifdef CONFIG_VHOST_VSOCK
type_register_static(&vhost_vsock_ccw_info);
#endif
+ type_register_static(&virtio_ccw_crypto);
}
type_init(virtio_ccw_register)
diff --git a/hw/s390x/virtio-ccw.h b/hw/s390x/virtio-ccw.h
index 77d10f1671..41d4010378 100644
--- a/hw/s390x/virtio-ccw.h
+++ b/hw/s390x/virtio-ccw.h
@@ -22,6 +22,7 @@
#endif
#include "hw/virtio/virtio-balloon.h"
#include "hw/virtio/virtio-rng.h"
+#include "hw/virtio/virtio-crypto.h"
#include "hw/virtio/virtio-bus.h"
#ifdef CONFIG_VHOST_VSOCK
#include "hw/virtio/vhost-vsock.h"
@@ -94,6 +95,7 @@ struct VirtioCcwDevice {
IndAddr *indicators2;
IndAddr *summary_indicator;
uint64_t ind_bit;
+ bool force_revision_1;
};
/* The maximum virtio revision we support. */
@@ -182,6 +184,17 @@ typedef struct VirtIORNGCcw {
VirtIORNG vdev;
} VirtIORNGCcw;
+/* virtio-crypto-ccw */
+
+#define TYPE_VIRTIO_CRYPTO_CCW "virtio-crypto-ccw"
+#define VIRTIO_CRYPTO_CCW(obj) \
+ OBJECT_CHECK(VirtIOCryptoCcw, (obj), TYPE_VIRTIO_CRYPTO_CCW)
+
+typedef struct VirtIOCryptoCcw {
+ VirtioCcwDevice parent_obj;
+ VirtIOCrypto vdev;
+} VirtIOCryptoCcw;
+
VirtIODevice *virtio_ccw_get_vdev(SubchDev *sch);
#ifdef CONFIG_VIRTFS
diff --git a/hw/scsi/esp-pci.c b/hw/scsi/esp-pci.c
index 595f88b352..e295d88939 100644
--- a/hw/scsi/esp-pci.c
+++ b/hw/scsi/esp-pci.c
@@ -367,9 +367,6 @@ static void esp_pci_scsi_realize(PCIDevice *dev, Error **errp)
s->irq = pci_allocate_irq(dev);
scsi_bus_new(&s->bus, sizeof(s->bus), d, &esp_pci_scsi_info, NULL);
- if (!d->hotplugged) {
- scsi_bus_legacy_handle_cmdline(&s->bus, errp);
- }
}
static void esp_pci_scsi_uninit(PCIDevice *d)
diff --git a/hw/scsi/esp.c b/hw/scsi/esp.c
index 5a5a4e946a..eee831efeb 100644
--- a/hw/scsi/esp.c
+++ b/hw/scsi/esp.c
@@ -690,7 +690,6 @@ static void sysbus_esp_realize(DeviceState *dev, Error **errp)
SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
SysBusESPState *sysbus = ESP(dev);
ESPState *s = &sysbus->esp;
- Error *err = NULL;
sysbus_init_irq(sbd, &s->irq);
assert(sysbus->it_shift != -1);
@@ -703,11 +702,6 @@ static void sysbus_esp_realize(DeviceState *dev, Error **errp)
qdev_init_gpio_in(dev, sysbus_esp_gpio_demux, 2);
scsi_bus_new(&s->bus, sizeof(s->bus), dev, &esp_scsi_info, NULL);
- scsi_bus_legacy_handle_cmdline(&s->bus, &err);
- if (err != NULL) {
- error_propagate(errp, err);
- return;
- }
}
static void sysbus_esp_hard_reset(DeviceState *dev)
diff --git a/hw/scsi/lsi53c895a.c b/hw/scsi/lsi53c895a.c
index feb1191315..595c26017a 100644
--- a/hw/scsi/lsi53c895a.c
+++ b/hw/scsi/lsi53c895a.c
@@ -2216,9 +2216,6 @@ static void lsi_scsi_realize(PCIDevice *dev, Error **errp)
QTAILQ_INIT(&s->queue);
scsi_bus_new(&s->bus, sizeof(s->bus), d, &lsi_scsi_info, NULL);
- if (!d->hotplugged) {
- scsi_bus_legacy_handle_cmdline(&s->bus, errp);
- }
}
static void lsi_scsi_unrealize(DeviceState *dev, Error **errp)
@@ -2271,3 +2268,10 @@ static void lsi53c895a_register_types(void)
}
type_init(lsi53c895a_register_types)
+
+void lsi53c895a_create(PCIBus *bus)
+{
+ LSIState *s = LSI53C895A(pci_create_simple(bus, -1, "lsi53c895a"));
+
+ scsi_bus_legacy_handle_cmdline(&s->bus, false);
+}
diff --git a/hw/scsi/megasas.c b/hw/scsi/megasas.c
index 1a8b04c6d7..e3d59b7c83 100644
--- a/hw/scsi/megasas.c
+++ b/hw/scsi/megasas.c
@@ -2325,7 +2325,6 @@ static const struct SCSIBusInfo megasas_scsi_info = {
static void megasas_scsi_realize(PCIDevice *dev, Error **errp)
{
- DeviceState *d = DEVICE(dev);
MegasasState *s = MEGASAS(dev);
MegasasBaseClass *b = MEGASAS_DEVICE_GET_CLASS(s);
uint8_t *pci_conf;
@@ -2426,9 +2425,6 @@ static void megasas_scsi_realize(PCIDevice *dev, Error **errp)
scsi_bus_new(&s->bus, sizeof(s->bus), DEVICE(dev),
&megasas_scsi_info, NULL);
- if (!d->hotplugged) {
- scsi_bus_legacy_handle_cmdline(&s->bus, errp);
- }
}
static Property megasas_properties_gen1[] = {
diff --git a/hw/scsi/mptsas.c b/hw/scsi/mptsas.c
index ad87e78fe2..2e091c0156 100644
--- a/hw/scsi/mptsas.c
+++ b/hw/scsi/mptsas.c
@@ -1271,7 +1271,6 @@ static const struct SCSIBusInfo mptsas_scsi_info = {
static void mptsas_scsi_realize(PCIDevice *dev, Error **errp)
{
- DeviceState *d = DEVICE(dev);
MPTSASState *s = MPT_SAS(dev);
Error *err = NULL;
int ret;
@@ -1326,9 +1325,6 @@ static void mptsas_scsi_realize(PCIDevice *dev, Error **errp)
QTAILQ_INIT(&s->pending);
scsi_bus_new(&s->bus, sizeof(s->bus), &dev->qdev, &mptsas_scsi_info, NULL);
- if (!d->hotplugged) {
- scsi_bus_legacy_handle_cmdline(&s->bus, errp);
- }
}
static void mptsas_scsi_uninit(PCIDevice *dev)
diff --git a/hw/scsi/scsi-bus.c b/hw/scsi/scsi-bus.c
index 5940cb160c..f5574469c8 100644
--- a/hw/scsi/scsi-bus.c
+++ b/hw/scsi/scsi-bus.c
@@ -105,6 +105,7 @@ static void scsi_dma_restart_bh(void *opaque)
qemu_bh_delete(s->bh);
s->bh = NULL;
+ aio_context_acquire(blk_get_aio_context(s->conf.blk));
QTAILQ_FOREACH_SAFE(req, &s->requests, next, next) {
scsi_req_ref(req);
if (req->retry) {
@@ -122,6 +123,7 @@ static void scsi_dma_restart_bh(void *opaque)
}
scsi_req_unref(req);
}
+ aio_context_release(blk_get_aio_context(s->conf.blk));
}
void scsi_req_retry(SCSIRequest *req)
@@ -261,12 +263,11 @@ SCSIDevice *scsi_bus_legacy_add_drive(SCSIBus *bus, BlockBackend *blk,
return SCSI_DEVICE(dev);
}
-void scsi_bus_legacy_handle_cmdline(SCSIBus *bus, Error **errp)
+void scsi_bus_legacy_handle_cmdline(SCSIBus *bus, bool deprecated)
{
Location loc;
DriveInfo *dinfo;
int unit;
- Error *err = NULL;
loc_push_none(&loc);
for (unit = 0; unit <= bus->info->max_target; unit++) {
@@ -275,16 +276,59 @@ void scsi_bus_legacy_handle_cmdline(SCSIBus *bus, Error **errp)
continue;
}
qemu_opts_loc_restore(dinfo->opts);
- scsi_bus_legacy_add_drive(bus, blk_by_legacy_dinfo(dinfo),
- unit, false, -1, NULL, &err);
- if (err != NULL) {
- error_propagate(errp, err);
- break;
+ if (deprecated) {
+ /* Handling -drive not claimed by machine initialization */
+ if (blk_get_attached_dev(blk_by_legacy_dinfo(dinfo))) {
+ continue; /* claimed */
+ }
+ if (!dinfo->is_default) {
+ error_report("warning: bus=%d,unit=%d is deprecated with this"
+ " machine type",
+ bus->busnr, unit);
+ }
}
+ scsi_bus_legacy_add_drive(bus, blk_by_legacy_dinfo(dinfo),
+ unit, false, -1, NULL, &error_fatal);
}
loc_pop(&loc);
}
+static bool is_scsi_hba_with_legacy_magic(Object *obj)
+{
+ static const char *magic[] = {
+ "am53c974", "dc390", "esp", "lsi53c810", "lsi53c895a",
+ "megasas", "megasas-gen2", "mptsas1068", "spapr-vscsi",
+ "virtio-scsi-device",
+ NULL
+ };
+ const char *typename = object_get_typename(obj);
+ int i;
+
+ for (i = 0; magic[i]; i++)
+ if (!strcmp(typename, magic[i])) {
+ return true;
+ }
+
+ return false;
+}
+
+static int scsi_legacy_handle_cmdline_cb(Object *obj, void *opaque)
+{
+ SCSIBus *bus = (SCSIBus *)object_dynamic_cast(obj, TYPE_SCSI_BUS);
+
+ if (bus && is_scsi_hba_with_legacy_magic(OBJECT(bus->qbus.parent))) {
+ scsi_bus_legacy_handle_cmdline(bus, true);
+ }
+
+ return 0;
+}
+
+void scsi_legacy_handle_cmdline(void)
+{
+ object_child_foreach_recursive(object_get_root(),
+ scsi_legacy_handle_cmdline_cb, NULL);
+}
+
static int32_t scsi_invalid_field(SCSIRequest *req, uint8_t *buf)
{
scsi_req_build_sense(req, SENSE_CODE(INVALID_FIELD));
diff --git a/hw/scsi/scsi-disk.c b/hw/scsi/scsi-disk.c
index cc06fe5f6c..bbfb5dc289 100644
--- a/hw/scsi/scsi-disk.c
+++ b/hw/scsi/scsi-disk.c
@@ -207,6 +207,7 @@ static void scsi_aio_complete(void *opaque, int ret)
assert(r->req.aiocb != NULL);
r->req.aiocb = NULL;
+ aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
if (scsi_disk_req_check_error(r, ret, true)) {
goto done;
}
@@ -215,6 +216,7 @@ static void scsi_aio_complete(void *opaque, int ret)
scsi_req_complete(&r->req, GOOD);
done:
+ aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
scsi_req_unref(&r->req);
}
@@ -290,12 +292,14 @@ static void scsi_dma_complete(void *opaque, int ret)
assert(r->req.aiocb != NULL);
r->req.aiocb = NULL;
+ aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
if (ret < 0) {
block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
} else {
block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
}
scsi_dma_complete_noio(r, ret);
+ aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
}
static void scsi_read_complete(void * opaque, int ret)
@@ -306,6 +310,7 @@ static void scsi_read_complete(void * opaque, int ret)
assert(r->req.aiocb != NULL);
r->req.aiocb = NULL;
+ aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
if (scsi_disk_req_check_error(r, ret, true)) {
goto done;
}
@@ -320,6 +325,7 @@ static void scsi_read_complete(void * opaque, int ret)
done:
scsi_req_unref(&r->req);
+ aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
}
/* Actually issue a read to the block device. */
@@ -364,12 +370,14 @@ static void scsi_do_read_cb(void *opaque, int ret)
assert (r->req.aiocb != NULL);
r->req.aiocb = NULL;
+ aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
if (ret < 0) {
block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
} else {
block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
}
scsi_do_read(opaque, ret);
+ aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
}
/* Read more data from scsi device into buffer. */
@@ -489,12 +497,14 @@ static void scsi_write_complete(void * opaque, int ret)
assert (r->req.aiocb != NULL);
r->req.aiocb = NULL;
+ aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
if (ret < 0) {
block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
} else {
block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
}
scsi_write_complete_noio(r, ret);
+ aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
}
static void scsi_write_data(SCSIRequest *req)
@@ -1625,11 +1635,14 @@ static void scsi_unmap_complete(void *opaque, int ret)
{
UnmapCBData *data = opaque;
SCSIDiskReq *r = data->r;
+ SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
assert(r->req.aiocb != NULL);
r->req.aiocb = NULL;
+ aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
scsi_unmap_complete_noio(data, ret);
+ aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
}
static void scsi_disk_emulate_unmap(SCSIDiskReq *r, uint8_t *inbuf)
@@ -1696,6 +1709,7 @@ static void scsi_write_same_complete(void *opaque, int ret)
assert(r->req.aiocb != NULL);
r->req.aiocb = NULL;
+ aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
if (scsi_disk_req_check_error(r, ret, true)) {
goto done;
}
@@ -1724,6 +1738,7 @@ done:
scsi_req_unref(&r->req);
qemu_vfree(data->iov.iov_base);
g_free(data);
+ aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
}
static void scsi_disk_emulate_write_same(SCSIDiskReq *r, uint8_t *inbuf)
diff --git a/hw/scsi/scsi-generic.c b/hw/scsi/scsi-generic.c
index 92f091a613..2933119e7d 100644
--- a/hw/scsi/scsi-generic.c
+++ b/hw/scsi/scsi-generic.c
@@ -143,10 +143,14 @@ done:
static void scsi_command_complete(void *opaque, int ret)
{
SCSIGenericReq *r = (SCSIGenericReq *)opaque;
+ SCSIDevice *s = r->req.dev;
assert(r->req.aiocb != NULL);
r->req.aiocb = NULL;
+
+ aio_context_acquire(blk_get_aio_context(s->conf.blk));
scsi_command_complete_noio(r, ret);
+ aio_context_release(blk_get_aio_context(s->conf.blk));
}
static int execute_command(BlockBackend *blk,
@@ -182,9 +186,11 @@ static void scsi_read_complete(void * opaque, int ret)
assert(r->req.aiocb != NULL);
r->req.aiocb = NULL;
+ aio_context_acquire(blk_get_aio_context(s->conf.blk));
+
if (ret || r->req.io_canceled) {
scsi_command_complete_noio(r, ret);
- return;
+ goto done;
}
len = r->io_header.dxfer_len - r->io_header.resid;
@@ -193,7 +199,7 @@ static void scsi_read_complete(void * opaque, int ret)
r->len = -1;
if (len == 0) {
scsi_command_complete_noio(r, 0);
- return;
+ goto done;
}
/* Snoop READ CAPACITY output to set the blocksize. */
@@ -237,6 +243,9 @@ static void scsi_read_complete(void * opaque, int ret)
}
scsi_req_data(&r->req, len);
scsi_req_unref(&r->req);
+
+done:
+ aio_context_release(blk_get_aio_context(s->conf.blk));
}
/* Read more data from scsi device into buffer. */
@@ -272,9 +281,11 @@ static void scsi_write_complete(void * opaque, int ret)
assert(r->req.aiocb != NULL);
r->req.aiocb = NULL;
+ aio_context_acquire(blk_get_aio_context(s->conf.blk));
+
if (ret || r->req.io_canceled) {
scsi_command_complete_noio(r, ret);
- return;
+ goto done;
}
if (r->req.cmd.buf[0] == MODE_SELECT && r->req.cmd.buf[4] == 12 &&
@@ -284,6 +295,9 @@ static void scsi_write_complete(void * opaque, int ret)
}
scsi_command_complete_noio(r, ret);
+
+done:
+ aio_context_release(blk_get_aio_context(s->conf.blk));
}
/* Write data to a scsi device. Returns nonzero on failure.
diff --git a/hw/scsi/spapr_vscsi.c b/hw/scsi/spapr_vscsi.c
index 6090a204a0..55ee48c4da 100644
--- a/hw/scsi/spapr_vscsi.c
+++ b/hw/scsi/spapr_vscsi.c
@@ -1206,9 +1206,6 @@ static void spapr_vscsi_realize(VIOsPAPRDevice *dev, Error **errp)
scsi_bus_new(&s->bus, sizeof(s->bus), DEVICE(dev),
&vscsi_scsi_info, NULL);
- if (!dev->qdev.hotplugged) {
- scsi_bus_legacy_handle_cmdline(&s->bus, errp);
- }
}
void spapr_vscsi_create(VIOsPAPRBus *bus)
@@ -1218,6 +1215,8 @@ void spapr_vscsi_create(VIOsPAPRBus *bus)
dev = qdev_create(&bus->bus, "spapr-vscsi");
qdev_init_nofail(dev);
+ scsi_bus_legacy_handle_cmdline(&VIO_SPAPR_VSCSI_DEVICE(dev)->bus,
+ false);
}
static int spapr_vscsi_devnode(VIOsPAPRDevice *dev, void *fdt, int node_off)
diff --git a/hw/scsi/virtio-scsi-dataplane.c b/hw/scsi/virtio-scsi-dataplane.c
index 6b8d0f0024..74c95e0e60 100644
--- a/hw/scsi/virtio-scsi-dataplane.c
+++ b/hw/scsi/virtio-scsi-dataplane.c
@@ -49,35 +49,35 @@ void virtio_scsi_dataplane_setup(VirtIOSCSI *s, Error **errp)
}
}
-static void virtio_scsi_data_plane_handle_cmd(VirtIODevice *vdev,
+static bool virtio_scsi_data_plane_handle_cmd(VirtIODevice *vdev,
VirtQueue *vq)
{
VirtIOSCSI *s = (VirtIOSCSI *)vdev;
assert(s->ctx && s->dataplane_started);
- virtio_scsi_handle_cmd_vq(s, vq);
+ return virtio_scsi_handle_cmd_vq(s, vq);
}
-static void virtio_scsi_data_plane_handle_ctrl(VirtIODevice *vdev,
+static bool virtio_scsi_data_plane_handle_ctrl(VirtIODevice *vdev,
VirtQueue *vq)
{
VirtIOSCSI *s = VIRTIO_SCSI(vdev);
assert(s->ctx && s->dataplane_started);
- virtio_scsi_handle_ctrl_vq(s, vq);
+ return virtio_scsi_handle_ctrl_vq(s, vq);
}
-static void virtio_scsi_data_plane_handle_event(VirtIODevice *vdev,
+static bool virtio_scsi_data_plane_handle_event(VirtIODevice *vdev,
VirtQueue *vq)
{
VirtIOSCSI *s = VIRTIO_SCSI(vdev);
assert(s->ctx && s->dataplane_started);
- virtio_scsi_handle_event_vq(s, vq);
+ return virtio_scsi_handle_event_vq(s, vq);
}
static int virtio_scsi_vring_init(VirtIOSCSI *s, VirtQueue *vq, int n,
- void (*fn)(VirtIODevice *vdev, VirtQueue *vq))
+ VirtIOHandleAIOOutput fn)
{
BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(s)));
int rc;
diff --git a/hw/scsi/virtio-scsi.c b/hw/scsi/virtio-scsi.c
index ce19efffc8..1dbc4bced9 100644
--- a/hw/scsi/virtio-scsi.c
+++ b/hw/scsi/virtio-scsi.c
@@ -436,13 +436,18 @@ static inline void virtio_scsi_release(VirtIOSCSI *s)
}
}
-void virtio_scsi_handle_ctrl_vq(VirtIOSCSI *s, VirtQueue *vq)
+bool virtio_scsi_handle_ctrl_vq(VirtIOSCSI *s, VirtQueue *vq)
{
VirtIOSCSIReq *req;
+ bool progress = false;
+ virtio_scsi_acquire(s);
while ((req = virtio_scsi_pop_req(s, vq))) {
+ progress = true;
virtio_scsi_handle_ctrl_req(s, req);
}
+ virtio_scsi_release(s);
+ return progress;
}
static void virtio_scsi_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
@@ -591,17 +596,20 @@ static void virtio_scsi_handle_cmd_req_submit(VirtIOSCSI *s, VirtIOSCSIReq *req)
scsi_req_unref(sreq);
}
-void virtio_scsi_handle_cmd_vq(VirtIOSCSI *s, VirtQueue *vq)
+bool virtio_scsi_handle_cmd_vq(VirtIOSCSI *s, VirtQueue *vq)
{
VirtIOSCSIReq *req, *next;
int ret = 0;
+ bool progress = false;
QTAILQ_HEAD(, VirtIOSCSIReq) reqs = QTAILQ_HEAD_INITIALIZER(reqs);
+ virtio_scsi_acquire(s);
do {
virtio_queue_set_notification(vq, 0);
while ((req = virtio_scsi_pop_req(s, vq))) {
+ progress = true;
ret = virtio_scsi_handle_cmd_req_prepare(s, req);
if (!ret) {
QTAILQ_INSERT_TAIL(&reqs, req, next);
@@ -624,6 +632,8 @@ void virtio_scsi_handle_cmd_vq(VirtIOSCSI *s, VirtQueue *vq)
QTAILQ_FOREACH_SAFE(req, &reqs, next, next) {
virtio_scsi_handle_cmd_req_submit(s, req);
}
+ virtio_scsi_release(s);
+ return progress;
}
static void virtio_scsi_handle_cmd(VirtIODevice *vdev, VirtQueue *vq)
@@ -752,11 +762,16 @@ out:
virtio_scsi_release(s);
}
-void virtio_scsi_handle_event_vq(VirtIOSCSI *s, VirtQueue *vq)
+bool virtio_scsi_handle_event_vq(VirtIOSCSI *s, VirtQueue *vq)
{
+ virtio_scsi_acquire(s);
if (s->events_dropped) {
virtio_scsi_push_event(s, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
+ virtio_scsi_release(s);
+ return true;
}
+ virtio_scsi_release(s);
+ return false;
}
static void virtio_scsi_handle_event(VirtIODevice *vdev, VirtQueue *vq)
@@ -889,14 +904,6 @@ static void virtio_scsi_device_realize(DeviceState *dev, Error **errp)
/* override default SCSI bus hotplug-handler, with virtio-scsi's one */
qbus_set_hotplug_handler(BUS(&s->bus), dev, &error_abort);
- if (!dev->hotplugged) {
- scsi_bus_legacy_handle_cmdline(&s->bus, &err);
- if (err != NULL) {
- error_propagate(errp, err);
- return;
- }
- }
-
virtio_scsi_dataplane_setup(s, errp);
}
diff --git a/hw/sd/sdhci.c b/hw/sd/sdhci.c
index 01fbf228be..da32b5f709 100644
--- a/hw/sd/sdhci.c
+++ b/hw/sd/sdhci.c
@@ -536,7 +536,7 @@ static void sdhci_sdma_transfer_multi_blocks(SDHCIState *s)
boundary_count -= block_size - begin;
}
dma_memory_read(&address_space_memory, s->sdmasysad,
- &s->fifo_buffer[begin], s->data_count);
+ &s->fifo_buffer[begin], s->data_count - begin);
s->sdmasysad += s->data_count - begin;
if (s->data_count == block_size) {
for (n = 0; n < block_size; n++) {
@@ -1253,7 +1253,7 @@ const VMStateDescription sdhci_vmstate = {
VMSTATE_UINT16(data_count, SDHCIState),
VMSTATE_UINT64(admasysaddr, SDHCIState),
VMSTATE_UINT8(stopped_state, SDHCIState),
- VMSTATE_VBUFFER_UINT32(fifo_buffer, SDHCIState, 1, NULL, 0, buf_maxsz),
+ VMSTATE_VBUFFER_UINT32(fifo_buffer, SDHCIState, 1, NULL, buf_maxsz),
VMSTATE_TIMER_PTR(insert_timer, SDHCIState),
VMSTATE_TIMER_PTR(transfer_timer, SDHCIState),
VMSTATE_END_OF_LIST()
diff --git a/hw/sh4/r2d.c b/hw/sh4/r2d.c
index db373c70c5..6d06968f8b 100644
--- a/hw/sh4/r2d.c
+++ b/hw/sh4/r2d.c
@@ -362,6 +362,7 @@ static void r2d_machine_init(MachineClass *mc)
{
mc->desc = "r2d-plus board";
mc->init = r2d_init;
+ mc->block_default_type = IF_IDE;
}
DEFINE_MACHINE("r2d", r2d_machine_init)
diff --git a/hw/sparc/sun4m.c b/hw/sparc/sun4m.c
index f5b6efddf8..61416a6426 100644
--- a/hw/sparc/sun4m.c
+++ b/hw/sparc/sun4m.c
@@ -989,11 +989,6 @@ static void sun4m_hw_init(const struct sun4m_hwdef *hwdef,
slavio_misc_init(hwdef->slavio_base, hwdef->aux1_base, hwdef->aux2_base,
slavio_irq[30], fdc_tc);
- if (drive_get_max_bus(IF_SCSI) > 0) {
- fprintf(stderr, "qemu: too many SCSI bus\n");
- exit(1);
- }
-
esp_init(hwdef->esp_base, 2,
espdma_memory_read, espdma_memory_write,
espdma, espdma_irq, &esp_reset, &dma_enable);
diff --git a/hw/sparc64/sun4u.c b/hw/sparc64/sun4u.c
index d1a6bca873..d347b6616d 100644
--- a/hw/sparc64/sun4u.c
+++ b/hw/sparc64/sun4u.c
@@ -579,6 +579,7 @@ static void sun4u_class_init(ObjectClass *oc, void *data)
mc->desc = "Sun4u platform";
mc->init = sun4u_init;
+ mc->block_default_type = IF_IDE;
mc->max_cpus = 1; /* XXX for now */
mc->is_default = 1;
mc->default_boot_order = "c";
@@ -596,6 +597,7 @@ static void sun4v_class_init(ObjectClass *oc, void *data)
mc->desc = "Sun4v platform";
mc->init = sun4v_init;
+ mc->block_default_type = IF_IDE;
mc->max_cpus = 1; /* XXX for now */
mc->default_boot_order = "c";
}
diff --git a/hw/ssi/aspeed_smc.c b/hw/ssi/aspeed_smc.c
index 087b29e8da..cb515730c5 100644
--- a/hw/ssi/aspeed_smc.c
+++ b/hw/ssi/aspeed_smc.c
@@ -475,15 +475,15 @@ static uint32_t aspeed_smc_check_segment_addr(const AspeedSMCFlash *fl,
AspeedSegments seg;
aspeed_smc_reg_to_segment(s->regs[R_SEG_ADDR0 + fl->id], &seg);
- if ((addr & (seg.size - 1)) != addr) {
+ if ((addr % seg.size) != addr) {
qemu_log_mask(LOG_GUEST_ERROR,
"%s: invalid address 0x%08x for CS%d segment : "
"[ 0x%"HWADDR_PRIx" - 0x%"HWADDR_PRIx" ]\n",
s->ctrl->name, addr, fl->id, seg.addr,
seg.addr + seg.size);
+ addr %= seg.size;
}
- addr &= seg.size - 1;
return addr;
}
@@ -536,10 +536,13 @@ static uint64_t aspeed_smc_flash_read(void *opaque, hwaddr addr, unsigned size)
/*
* Use fake transfers to model dummy bytes. The value should
* be configured to some non-zero value in fast read mode and
- * zero in read mode.
+ * zero in read mode. But, as the HW allows inconsistent
+ * settings, let's check for fast read mode.
*/
- for (i = 0; i < aspeed_smc_flash_dummies(fl); i++) {
- ssi_transfer(fl->controller->spi, 0xFF);
+ if (aspeed_smc_flash_mode(fl) == CTRL_FREADMODE) {
+ for (i = 0; i < aspeed_smc_flash_dummies(fl); i++) {
+ ssi_transfer(fl->controller->spi, 0xFF);
+ }
}
for (i = 0; i < size; i++) {
diff --git a/hw/timer/Makefile.objs b/hw/timer/Makefile.objs
index 71994f2d88..fc9966880f 100644
--- a/hw/timer/Makefile.objs
+++ b/hw/timer/Makefile.objs
@@ -6,6 +6,9 @@ common-obj-$(CONFIG_DS1338) += ds1338.o
common-obj-$(CONFIG_HPET) += hpet.o
common-obj-$(CONFIG_I8254) += i8254_common.o i8254.o
common-obj-$(CONFIG_M48T59) += m48t59.o
+ifeq ($(CONFIG_ISA_BUS),y)
+common-obj-$(CONFIG_M48T59) += m48t59-isa.o
+endif
common-obj-$(CONFIG_PL031) += pl031.o
common-obj-$(CONFIG_PUV3) += puv3_ost.o
common-obj-$(CONFIG_TWL92230) += twl92230.o
diff --git a/hw/timer/m48t59-internal.h b/hw/timer/m48t59-internal.h
new file mode 100644
index 0000000000..32ae957805
--- /dev/null
+++ b/hw/timer/m48t59-internal.h
@@ -0,0 +1,82 @@
+/*
+ * QEMU M48T59 and M48T08 NVRAM emulation (common header)
+ *
+ * Copyright (c) 2003-2005, 2007 Jocelyn Mayer
+ * Copyright (c) 2013 Hervé Poussineau
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef HW_M48T59_INTERNAL_H
+#define HW_M48T59_INTERNAL_H 1
+
+//#define DEBUG_NVRAM
+
+#if defined(DEBUG_NVRAM)
+#define NVRAM_PRINTF(fmt, ...) do { printf(fmt , ## __VA_ARGS__); } while (0)
+#else
+#define NVRAM_PRINTF(fmt, ...) do { } while (0)
+#endif
+
+/*
+ * The M48T02, M48T08 and M48T59 chips are very similar. The newer '59 has
+ * alarm and a watchdog timer and related control registers. In the
+ * PPC platform there is also a nvram lock function.
+ */
+
+typedef struct M48txxInfo {
+ const char *bus_name;
+ uint32_t model; /* 2 = m48t02, 8 = m48t08, 59 = m48t59 */
+ uint32_t size;
+} M48txxInfo;
+
+typedef struct M48t59State {
+ /* Hardware parameters */
+ qemu_irq IRQ;
+ MemoryRegion iomem;
+ uint32_t size;
+ int32_t base_year;
+ /* RTC management */
+ time_t time_offset;
+ time_t stop_time;
+ /* Alarm & watchdog */
+ struct tm alarm;
+ QEMUTimer *alrm_timer;
+ QEMUTimer *wd_timer;
+ /* NVRAM storage */
+ uint8_t *buffer;
+ /* Model parameters */
+ uint32_t model; /* 2 = m48t02, 8 = m48t08, 59 = m48t59 */
+ /* NVRAM storage */
+ uint16_t addr;
+ uint8_t lock;
+} M48t59State;
+
+uint32_t m48t59_read(M48t59State *NVRAM, uint32_t addr);
+void m48t59_write(M48t59State *NVRAM, uint32_t addr, uint32_t val);
+void m48t59_reset_common(M48t59State *NVRAM);
+void m48t59_realize_common(M48t59State *s, Error **errp);
+
+static inline void m48t59_toggle_lock(M48t59State *NVRAM, int lock)
+{
+ NVRAM->lock ^= 1 << lock;
+}
+
+extern const MemoryRegionOps m48t59_io_ops;
+
+#endif /* HW_M48T59_INTERNAL_H */
diff --git a/hw/timer/m48t59-isa.c b/hw/timer/m48t59-isa.c
new file mode 100644
index 0000000000..ea1ba703d7
--- /dev/null
+++ b/hw/timer/m48t59-isa.c
@@ -0,0 +1,181 @@
+/*
+ * QEMU M48T59 and M48T08 NVRAM emulation (ISA bus interface
+ *
+ * Copyright (c) 2003-2005, 2007 Jocelyn Mayer
+ * Copyright (c) 2013 Hervé Poussineau
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#include "qemu/osdep.h"
+#include "hw/isa/isa.h"
+#include "hw/timer/m48t59.h"
+#include "m48t59-internal.h"
+
+#define TYPE_M48TXX_ISA "isa-m48txx"
+#define M48TXX_ISA_GET_CLASS(obj) \
+ OBJECT_GET_CLASS(M48txxISADeviceClass, (obj), TYPE_M48TXX_ISA)
+#define M48TXX_ISA_CLASS(klass) \
+ OBJECT_CLASS_CHECK(M48txxISADeviceClass, (klass), TYPE_M48TXX_ISA)
+#define M48TXX_ISA(obj) \
+ OBJECT_CHECK(M48txxISAState, (obj), TYPE_M48TXX_ISA)
+
+typedef struct M48txxISAState {
+ ISADevice parent_obj;
+ M48t59State state;
+ uint32_t io_base;
+ MemoryRegion io;
+} M48txxISAState;
+
+typedef struct M48txxISADeviceClass {
+ ISADeviceClass parent_class;
+ M48txxInfo info;
+} M48txxISADeviceClass;
+
+static M48txxInfo m48txx_isa_info[] = {
+ {
+ .bus_name = "isa-m48t59",
+ .model = 59,
+ .size = 0x2000,
+ }
+};
+
+Nvram *m48t59_init_isa(ISABus *bus, uint32_t io_base, uint16_t size,
+ int base_year, int model)
+{
+ DeviceState *dev;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(m48txx_isa_info); i++) {
+ if (m48txx_isa_info[i].size != size ||
+ m48txx_isa_info[i].model != model) {
+ continue;
+ }
+
+ dev = DEVICE(isa_create(bus, m48txx_isa_info[i].bus_name));
+ qdev_prop_set_uint32(dev, "iobase", io_base);
+ qdev_prop_set_int32(dev, "base-year", base_year);
+ qdev_init_nofail(dev);
+ return NVRAM(dev);
+ }
+
+ assert(false);
+ return NULL;
+}
+
+static uint32_t m48txx_isa_read(Nvram *obj, uint32_t addr)
+{
+ M48txxISAState *d = M48TXX_ISA(obj);
+ return m48t59_read(&d->state, addr);
+}
+
+static void m48txx_isa_write(Nvram *obj, uint32_t addr, uint32_t val)
+{
+ M48txxISAState *d = M48TXX_ISA(obj);
+ m48t59_write(&d->state, addr, val);
+}
+
+static void m48txx_isa_toggle_lock(Nvram *obj, int lock)
+{
+ M48txxISAState *d = M48TXX_ISA(obj);
+ m48t59_toggle_lock(&d->state, lock);
+}
+
+static Property m48t59_isa_properties[] = {
+ DEFINE_PROP_INT32("base-year", M48txxISAState, state.base_year, 0),
+ DEFINE_PROP_UINT32("iobase", M48txxISAState, io_base, 0x74),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void m48t59_reset_isa(DeviceState *d)
+{
+ M48txxISAState *isa = M48TXX_ISA(d);
+ M48t59State *NVRAM = &isa->state;
+
+ m48t59_reset_common(NVRAM);
+}
+
+static void m48t59_isa_realize(DeviceState *dev, Error **errp)
+{
+ M48txxISADeviceClass *u = M48TXX_ISA_GET_CLASS(dev);
+ ISADevice *isadev = ISA_DEVICE(dev);
+ M48txxISAState *d = M48TXX_ISA(dev);
+ M48t59State *s = &d->state;
+
+ s->model = u->info.model;
+ s->size = u->info.size;
+ isa_init_irq(isadev, &s->IRQ, 8);
+ m48t59_realize_common(s, errp);
+ memory_region_init_io(&d->io, OBJECT(dev), &m48t59_io_ops, s, "m48t59", 4);
+ if (d->io_base != 0) {
+ isa_register_ioport(isadev, &d->io, d->io_base);
+ }
+}
+
+static void m48txx_isa_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ NvramClass *nc = NVRAM_CLASS(klass);
+
+ dc->realize = m48t59_isa_realize;
+ dc->reset = m48t59_reset_isa;
+ dc->props = m48t59_isa_properties;
+ nc->read = m48txx_isa_read;
+ nc->write = m48txx_isa_write;
+ nc->toggle_lock = m48txx_isa_toggle_lock;
+}
+
+static void m48txx_isa_concrete_class_init(ObjectClass *klass, void *data)
+{
+ M48txxISADeviceClass *u = M48TXX_ISA_CLASS(klass);
+ M48txxInfo *info = data;
+
+ u->info = *info;
+}
+
+static const TypeInfo m48txx_isa_type_info = {
+ .name = TYPE_M48TXX_ISA,
+ .parent = TYPE_ISA_DEVICE,
+ .instance_size = sizeof(M48txxISAState),
+ .abstract = true,
+ .class_init = m48txx_isa_class_init,
+ .interfaces = (InterfaceInfo[]) {
+ { TYPE_NVRAM },
+ { }
+ }
+};
+
+static void m48t59_isa_register_types(void)
+{
+ TypeInfo isa_type_info = {
+ .parent = TYPE_M48TXX_ISA,
+ .class_size = sizeof(M48txxISADeviceClass),
+ .class_init = m48txx_isa_concrete_class_init,
+ };
+ int i;
+
+ type_register_static(&m48txx_isa_type_info);
+
+ for (i = 0; i < ARRAY_SIZE(m48txx_isa_info); i++) {
+ isa_type_info.name = m48txx_isa_info[i].bus_name;
+ isa_type_info.class_data = &m48txx_isa_info[i];
+ type_register(&isa_type_info);
+ }
+}
+
+type_init(m48t59_isa_register_types)
diff --git a/hw/timer/m48t59.c b/hw/timer/m48t59.c
index e46ca88391..474981a6ac 100644
--- a/hw/timer/m48t59.c
+++ b/hw/timer/m48t59.c
@@ -29,17 +29,10 @@
#include "qemu/timer.h"
#include "sysemu/sysemu.h"
#include "hw/sysbus.h"
-#include "hw/isa/isa.h"
#include "exec/address-spaces.h"
#include "qemu/bcd.h"
-//#define DEBUG_NVRAM
-
-#if defined(DEBUG_NVRAM)
-#define NVRAM_PRINTF(fmt, ...) do { printf(fmt , ## __VA_ARGS__); } while (0)
-#else
-#define NVRAM_PRINTF(fmt, ...) do { } while (0)
-#endif
+#include "m48t59-internal.h"
#define TYPE_M48TXX_SYS_BUS "sysbus-m48txx"
#define M48TXX_SYS_BUS_GET_CLASS(obj) \
@@ -49,27 +42,6 @@
#define M48TXX_SYS_BUS(obj) \
OBJECT_CHECK(M48txxSysBusState, (obj), TYPE_M48TXX_SYS_BUS)
-#define TYPE_M48TXX_ISA "isa-m48txx"
-#define M48TXX_ISA_GET_CLASS(obj) \
- OBJECT_GET_CLASS(M48txxISADeviceClass, (obj), TYPE_M48TXX_ISA)
-#define M48TXX_ISA_CLASS(klass) \
- OBJECT_CLASS_CHECK(M48txxISADeviceClass, (klass), TYPE_M48TXX_ISA)
-#define M48TXX_ISA(obj) \
- OBJECT_CHECK(M48txxISAState, (obj), TYPE_M48TXX_ISA)
-
-/*
- * The M48T02, M48T08 and M48T59 chips are very similar. The newer '59 has
- * alarm and a watchdog timer and related control registers. In the
- * PPC platform there is also a nvram lock function.
- */
-
-typedef struct M48txxInfo {
- const char *isa_name;
- const char *sysbus_name;
- uint32_t model; /* 2 = m48t02, 8 = m48t08, 59 = m48t59 */
- uint32_t size;
-} M48txxInfo;
-
/*
* Chipset docs:
* http://www.st.com/stonline/products/literature/ds/2410/m48t02.pdf
@@ -77,40 +49,6 @@ typedef struct M48txxInfo {
* http://www.st.com/stonline/products/literature/od/7001/m48t59y.pdf
*/
-typedef struct M48t59State {
- /* Hardware parameters */
- qemu_irq IRQ;
- MemoryRegion iomem;
- uint32_t size;
- int32_t base_year;
- /* RTC management */
- time_t time_offset;
- time_t stop_time;
- /* Alarm & watchdog */
- struct tm alarm;
- QEMUTimer *alrm_timer;
- QEMUTimer *wd_timer;
- /* NVRAM storage */
- uint8_t *buffer;
- /* Model parameters */
- uint32_t model; /* 2 = m48t02, 8 = m48t08, 59 = m48t59 */
- /* NVRAM storage */
- uint16_t addr;
- uint8_t lock;
-} M48t59State;
-
-typedef struct M48txxISAState {
- ISADevice parent_obj;
- M48t59State state;
- uint32_t io_base;
- MemoryRegion io;
-} M48txxISAState;
-
-typedef struct M48txxISADeviceClass {
- ISADeviceClass parent_class;
- M48txxInfo info;
-} M48txxISADeviceClass;
-
typedef struct M48txxSysBusState {
SysBusDevice parent_obj;
M48t59State state;
@@ -122,21 +60,17 @@ typedef struct M48txxSysBusDeviceClass {
M48txxInfo info;
} M48txxSysBusDeviceClass;
-static M48txxInfo m48txx_info[] = {
+static M48txxInfo m48txx_sysbus_info[] = {
{
- .sysbus_name = "sysbus-m48t02",
+ .bus_name = "sysbus-m48t02",
.model = 2,
.size = 0x800,
},{
- .sysbus_name = "sysbus-m48t08",
+ .bus_name = "sysbus-m48t08",
.model = 8,
.size = 0x2000,
},{
- .sysbus_name = "sysbus-m48t59",
- .model = 59,
- .size = 0x2000,
- },{
- .isa_name = "isa-m48t59",
+ .bus_name = "sysbus-m48t59",
.model = 59,
.size = 0x2000,
}
@@ -248,7 +182,7 @@ static void set_up_watchdog(M48t59State *NVRAM, uint8_t value)
}
/* Direct access to NVRAM */
-static void m48t59_write(M48t59State *NVRAM, uint32_t addr, uint32_t val)
+void m48t59_write(M48t59State *NVRAM, uint32_t addr, uint32_t val)
{
struct tm tm;
int tmp;
@@ -413,7 +347,7 @@ static void m48t59_write(M48t59State *NVRAM, uint32_t addr, uint32_t val)
}
}
-static uint32_t m48t59_read(M48t59State *NVRAM, uint32_t addr)
+uint32_t m48t59_read(M48t59State *NVRAM, uint32_t addr)
{
struct tm tm;
uint32_t retval = 0xFF;
@@ -517,11 +451,6 @@ static uint32_t m48t59_read(M48t59State *NVRAM, uint32_t addr)
return retval;
}
-static void m48t59_toggle_lock(M48t59State *NVRAM, int lock)
-{
- NVRAM->lock ^= 1 << lock;
-}
-
/* IO access to NVRAM */
static void NVRAM_writeb(void *opaque, hwaddr addr, uint64_t val,
unsigned size)
@@ -634,12 +563,12 @@ static const VMStateDescription vmstate_m48t59 = {
.fields = (VMStateField[]) {
VMSTATE_UINT8(lock, M48t59State),
VMSTATE_UINT16(addr, M48t59State),
- VMSTATE_VBUFFER_UINT32(buffer, M48t59State, 0, NULL, 0, size),
+ VMSTATE_VBUFFER_UINT32(buffer, M48t59State, 0, NULL, size),
VMSTATE_END_OF_LIST()
}
};
-static void m48t59_reset_common(M48t59State *NVRAM)
+void m48t59_reset_common(M48t59State *NVRAM)
{
NVRAM->addr = 0;
NVRAM->lock = 0;
@@ -650,14 +579,6 @@ static void m48t59_reset_common(M48t59State *NVRAM)
timer_del(NVRAM->wd_timer);
}
-static void m48t59_reset_isa(DeviceState *d)
-{
- M48txxISAState *isa = M48TXX_ISA(d);
- M48t59State *NVRAM = &isa->state;
-
- m48t59_reset_common(NVRAM);
-}
-
static void m48t59_reset_sysbus(DeviceState *d)
{
M48txxSysBusState *sys = M48TXX_SYS_BUS(d);
@@ -666,7 +587,7 @@ static void m48t59_reset_sysbus(DeviceState *d)
m48t59_reset_common(NVRAM);
}
-static const MemoryRegionOps m48t59_io_ops = {
+const MemoryRegionOps m48t59_io_ops = {
.read = NVRAM_readb,
.write = NVRAM_writeb,
.impl = {
@@ -685,14 +606,13 @@ Nvram *m48t59_init(qemu_irq IRQ, hwaddr mem_base,
SysBusDevice *s;
int i;
- for (i = 0; i < ARRAY_SIZE(m48txx_info); i++) {
- if (!m48txx_info[i].sysbus_name ||
- m48txx_info[i].size != size ||
- m48txx_info[i].model != model) {
+ for (i = 0; i < ARRAY_SIZE(m48txx_sysbus_info); i++) {
+ if (m48txx_sysbus_info[i].size != size ||
+ m48txx_sysbus_info[i].model != model) {
continue;
}
- dev = qdev_create(NULL, m48txx_info[i].sysbus_name);
+ dev = qdev_create(NULL, m48txx_sysbus_info[i].bus_name);
qdev_prop_set_int32(dev, "base-year", base_year);
qdev_init_nofail(dev);
s = SYS_BUS_DEVICE(dev);
@@ -712,31 +632,7 @@ Nvram *m48t59_init(qemu_irq IRQ, hwaddr mem_base,
return NULL;
}
-Nvram *m48t59_init_isa(ISABus *bus, uint32_t io_base, uint16_t size,
- int base_year, int model)
-{
- DeviceState *dev;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(m48txx_info); i++) {
- if (!m48txx_info[i].isa_name ||
- m48txx_info[i].size != size ||
- m48txx_info[i].model != model) {
- continue;
- }
-
- dev = DEVICE(isa_create(bus, m48txx_info[i].isa_name));
- qdev_prop_set_uint32(dev, "iobase", io_base);
- qdev_prop_set_int32(dev, "base-year", base_year);
- qdev_init_nofail(dev);
- return NVRAM(dev);
- }
-
- assert(false);
- return NULL;
-}
-
-static void m48t59_realize_common(M48t59State *s, Error **errp)
+void m48t59_realize_common(M48t59State *s, Error **errp)
{
s->buffer = g_malloc0(s->size);
if (s->model == 59) {
@@ -748,23 +644,6 @@ static void m48t59_realize_common(M48t59State *s, Error **errp)
vmstate_register(NULL, -1, &vmstate_m48t59, s);
}
-static void m48t59_isa_realize(DeviceState *dev, Error **errp)
-{
- M48txxISADeviceClass *u = M48TXX_ISA_GET_CLASS(dev);
- ISADevice *isadev = ISA_DEVICE(dev);
- M48txxISAState *d = M48TXX_ISA(dev);
- M48t59State *s = &d->state;
-
- s->model = u->info.model;
- s->size = u->info.size;
- isa_init_irq(isadev, &s->IRQ, 8);
- m48t59_realize_common(s, errp);
- memory_region_init_io(&d->io, OBJECT(dev), &m48t59_io_ops, s, "m48t59", 4);
- if (d->io_base != 0) {
- isa_register_ioport(isadev, &d->io, d->io_base);
- }
-}
-
static int m48t59_init1(SysBusDevice *dev)
{
M48txxSysBusDeviceClass *u = M48TXX_SYS_BUS_GET_CLASS(dev);
@@ -791,51 +670,6 @@ static int m48t59_init1(SysBusDevice *dev)
return 0;
}
-static uint32_t m48txx_isa_read(Nvram *obj, uint32_t addr)
-{
- M48txxISAState *d = M48TXX_ISA(obj);
- return m48t59_read(&d->state, addr);
-}
-
-static void m48txx_isa_write(Nvram *obj, uint32_t addr, uint32_t val)
-{
- M48txxISAState *d = M48TXX_ISA(obj);
- m48t59_write(&d->state, addr, val);
-}
-
-static void m48txx_isa_toggle_lock(Nvram *obj, int lock)
-{
- M48txxISAState *d = M48TXX_ISA(obj);
- m48t59_toggle_lock(&d->state, lock);
-}
-
-static Property m48t59_isa_properties[] = {
- DEFINE_PROP_INT32("base-year", M48txxISAState, state.base_year, 0),
- DEFINE_PROP_UINT32("iobase", M48txxISAState, io_base, 0x74),
- DEFINE_PROP_END_OF_LIST(),
-};
-
-static void m48txx_isa_class_init(ObjectClass *klass, void *data)
-{
- DeviceClass *dc = DEVICE_CLASS(klass);
- NvramClass *nc = NVRAM_CLASS(klass);
-
- dc->realize = m48t59_isa_realize;
- dc->reset = m48t59_reset_isa;
- dc->props = m48t59_isa_properties;
- nc->read = m48txx_isa_read;
- nc->write = m48txx_isa_write;
- nc->toggle_lock = m48txx_isa_toggle_lock;
-}
-
-static void m48txx_isa_concrete_class_init(ObjectClass *klass, void *data)
-{
- M48txxISADeviceClass *u = M48TXX_ISA_CLASS(klass);
- M48txxInfo *info = data;
-
- u->info = *info;
-}
-
static uint32_t m48txx_sysbus_read(Nvram *obj, uint32_t addr)
{
M48txxSysBusState *d = M48TXX_SYS_BUS(obj);
@@ -899,18 +733,6 @@ static const TypeInfo m48txx_sysbus_type_info = {
}
};
-static const TypeInfo m48txx_isa_type_info = {
- .name = TYPE_M48TXX_ISA,
- .parent = TYPE_ISA_DEVICE,
- .instance_size = sizeof(M48txxISAState),
- .abstract = true,
- .class_init = m48txx_isa_class_init,
- .interfaces = (InterfaceInfo[]) {
- { TYPE_NVRAM },
- { }
- }
-};
-
static void m48t59_register_types(void)
{
TypeInfo sysbus_type_info = {
@@ -918,29 +740,15 @@ static void m48t59_register_types(void)
.class_size = sizeof(M48txxSysBusDeviceClass),
.class_init = m48txx_sysbus_concrete_class_init,
};
- TypeInfo isa_type_info = {
- .parent = TYPE_M48TXX_ISA,
- .class_size = sizeof(M48txxISADeviceClass),
- .class_init = m48txx_isa_concrete_class_init,
- };
int i;
type_register_static(&nvram_info);
type_register_static(&m48txx_sysbus_type_info);
- type_register_static(&m48txx_isa_type_info);
- for (i = 0; i < ARRAY_SIZE(m48txx_info); i++) {
- if (m48txx_info[i].sysbus_name) {
- sysbus_type_info.name = m48txx_info[i].sysbus_name;
- sysbus_type_info.class_data = &m48txx_info[i];
- type_register(&sysbus_type_info);
- }
-
- if (m48txx_info[i].isa_name) {
- isa_type_info.name = m48txx_info[i].isa_name;
- isa_type_info.class_data = &m48txx_info[i];
- type_register(&isa_type_info);
- }
+ for (i = 0; i < ARRAY_SIZE(m48txx_sysbus_info); i++) {
+ sysbus_type_info.name = m48txx_sysbus_info[i].bus_name;
+ sysbus_type_info.class_data = &m48txx_sysbus_info[i];
+ type_register(&sysbus_type_info);
}
}
diff --git a/hw/timer/mips_gictimer.c b/hw/timer/mips_gictimer.c
index 3698889475..f5c5806724 100644
--- a/hw/timer/mips_gictimer.c
+++ b/hw/timer/mips_gictimer.c
@@ -14,6 +14,11 @@
#define TIMER_PERIOD 10 /* 10 ns period for 100 Mhz frequency */
+uint32_t mips_gictimer_get_freq(MIPSGICTimerState *gic)
+{
+ return NANOSECONDS_PER_SECOND / TIMER_PERIOD;
+}
+
static void gic_vptimer_update(MIPSGICTimerState *gictimer,
uint32_t vp_index, uint64_t now)
{
diff --git a/hw/usb/bus.c b/hw/usb/bus.c
index 1dcc35c8f8..efe4b8e1a6 100644
--- a/hw/usb/bus.c
+++ b/hw/usb/bus.c
@@ -136,11 +136,12 @@ USBDevice *usb_device_find_device(USBDevice *dev, uint8_t addr)
return NULL;
}
-static void usb_device_handle_destroy(USBDevice *dev)
+static void usb_device_unrealize(USBDevice *dev, Error **errp)
{
USBDeviceClass *klass = USB_DEVICE_GET_CLASS(dev);
- if (klass->handle_destroy) {
- klass->handle_destroy(dev);
+
+ if (klass->unrealize) {
+ klass->unrealize(dev, errp);
}
}
@@ -291,7 +292,7 @@ static void usb_qdev_unrealize(DeviceState *qdev, Error **errp)
if (dev->attached) {
usb_device_detach(dev);
}
- usb_device_handle_destroy(dev);
+ usb_device_unrealize(dev, errp);
if (dev->port) {
usb_release_port(dev);
}
diff --git a/hw/usb/desc.c b/hw/usb/desc.c
index 7828e52c6f..c36bf30e4f 100644
--- a/hw/usb/desc.c
+++ b/hw/usb/desc.c
@@ -774,6 +774,13 @@ int usb_desc_handle_control(USBDevice *dev, USBPacket *p,
trace_usb_set_device_feature(dev->addr, value, ret);
break;
+ case DeviceOutRequest | USB_REQ_SET_SEL:
+ case DeviceOutRequest | USB_REQ_SET_ISOCH_DELAY:
+ if (dev->speed == USB_SPEED_SUPER) {
+ ret = 0;
+ }
+ break;
+
case InterfaceRequest | USB_REQ_GET_INTERFACE:
if (index < 0 || index >= dev->ninterfaces) {
break;
diff --git a/hw/usb/dev-audio.c b/hw/usb/dev-audio.c
index 87cab0a3d1..343345235c 100644
--- a/hw/usb/dev-audio.c
+++ b/hw/usb/dev-audio.c
@@ -617,7 +617,7 @@ static void usb_audio_handle_data(USBDevice *dev, USBPacket *p)
}
}
-static void usb_audio_handle_destroy(USBDevice *dev)
+static void usb_audio_unrealize(USBDevice *dev, Error **errp)
{
USBAudioState *s = USB_AUDIO(dev);
@@ -683,7 +683,7 @@ static void usb_audio_class_init(ObjectClass *klass, void *data)
k->handle_reset = usb_audio_handle_reset;
k->handle_control = usb_audio_handle_control;
k->handle_data = usb_audio_handle_data;
- k->handle_destroy = usb_audio_handle_destroy;
+ k->unrealize = usb_audio_unrealize;
k->set_interface = usb_audio_set_interface;
}
diff --git a/hw/usb/dev-bluetooth.c b/hw/usb/dev-bluetooth.c
index 91a4a0b8b9..443e3c301d 100644
--- a/hw/usb/dev-bluetooth.c
+++ b/hw/usb/dev-bluetooth.c
@@ -496,7 +496,7 @@ static void usb_bt_out_hci_packet_acl(void *opaque,
usb_bt_fifo_enqueue(&s->acl, data, len);
}
-static void usb_bt_handle_destroy(USBDevice *dev)
+static void usb_bt_unrealize(USBDevice *dev, Error **errp)
{
struct USBBtState *s = (struct USBBtState *) dev->opaque;
@@ -559,7 +559,7 @@ static void usb_bt_class_initfn(ObjectClass *klass, void *data)
uc->handle_reset = usb_bt_handle_reset;
uc->handle_control = usb_bt_handle_control;
uc->handle_data = usb_bt_handle_data;
- uc->handle_destroy = usb_bt_handle_destroy;
+ uc->unrealize = usb_bt_unrealize;
dc->vmsd = &vmstate_usb_bt;
set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
}
diff --git a/hw/usb/dev-hid.c b/hw/usb/dev-hid.c
index 24d05f76f9..c40019df96 100644
--- a/hw/usb/dev-hid.c
+++ b/hw/usb/dev-hid.c
@@ -144,7 +144,7 @@ static const USBDescIface desc_iface_tablet = {
.bInterfaceNumber = 0,
.bNumEndpoints = 1,
.bInterfaceClass = USB_CLASS_HID,
- .bInterfaceProtocol = 0x02,
+ .bInterfaceProtocol = 0x00,
.ndesc = 1,
.descs = (USBDescOther[]) {
{
@@ -174,7 +174,7 @@ static const USBDescIface desc_iface_tablet2 = {
.bInterfaceNumber = 0,
.bNumEndpoints = 1,
.bInterfaceClass = USB_CLASS_HID,
- .bInterfaceProtocol = 0x02,
+ .bInterfaceProtocol = 0x00,
.ndesc = 1,
.descs = (USBDescOther[]) {
{
@@ -487,7 +487,7 @@ static const uint8_t qemu_mouse_hid_report_descriptor[] = {
static const uint8_t qemu_tablet_hid_report_descriptor[] = {
0x05, 0x01, /* Usage Page (Generic Desktop) */
- 0x09, 0x01, /* Usage (Pointer) */
+ 0x09, 0x02, /* Usage (Mouse) */
0xa1, 0x01, /* Collection (Application) */
0x09, 0x01, /* Usage (Pointer) */
0xa1, 0x00, /* Collection (Physical) */
@@ -690,7 +690,7 @@ static void usb_hid_handle_data(USBDevice *dev, USBPacket *p)
}
}
-static void usb_hid_handle_destroy(USBDevice *dev)
+static void usb_hid_unrealize(USBDevice *dev, Error **errp)
{
USBHIDState *us = USB_HID(dev);
@@ -785,7 +785,7 @@ static void usb_hid_class_initfn(ObjectClass *klass, void *data)
uc->handle_reset = usb_hid_handle_reset;
uc->handle_control = usb_hid_handle_control;
uc->handle_data = usb_hid_handle_data;
- uc->handle_destroy = usb_hid_handle_destroy;
+ uc->unrealize = usb_hid_unrealize;
uc->handle_attach = usb_desc_attach;
}
diff --git a/hw/usb/dev-hub.c b/hw/usb/dev-hub.c
index a33f21cb38..9fe7333946 100644
--- a/hw/usb/dev-hub.c
+++ b/hw/usb/dev-hub.c
@@ -497,7 +497,7 @@ static void usb_hub_handle_data(USBDevice *dev, USBPacket *p)
}
}
-static void usb_hub_handle_destroy(USBDevice *dev)
+static void usb_hub_unrealize(USBDevice *dev, Error **errp)
{
USBHubState *s = (USBHubState *)dev;
int i;
@@ -575,7 +575,7 @@ static void usb_hub_class_initfn(ObjectClass *klass, void *data)
uc->handle_reset = usb_hub_handle_reset;
uc->handle_control = usb_hub_handle_control;
uc->handle_data = usb_hub_handle_data;
- uc->handle_destroy = usb_hub_handle_destroy;
+ uc->unrealize = usb_hub_unrealize;
set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
dc->fw_name = "hub";
dc->vmsd = &vmstate_usb_hub;
diff --git a/hw/usb/dev-network.c b/hw/usb/dev-network.c
index c0f1193ba9..85fc81bf43 100644
--- a/hw/usb/dev-network.c
+++ b/hw/usb/dev-network.c
@@ -1324,7 +1324,7 @@ static void usbnet_cleanup(NetClientState *nc)
s->nic = NULL;
}
-static void usb_net_handle_destroy(USBDevice *dev)
+static void usb_net_unrealize(USBDevice *dev, Error **errp)
{
USBNetState *s = (USBNetState *) dev;
@@ -1428,7 +1428,7 @@ static void usb_net_class_initfn(ObjectClass *klass, void *data)
uc->handle_reset = usb_net_handle_reset;
uc->handle_control = usb_net_handle_control;
uc->handle_data = usb_net_handle_data;
- uc->handle_destroy = usb_net_handle_destroy;
+ uc->unrealize = usb_net_unrealize;
set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
dc->fw_name = "network";
dc->vmsd = &vmstate_usb_net;
diff --git a/hw/usb/dev-smartcard-reader.c b/hw/usb/dev-smartcard-reader.c
index 89e11b68c4..757b8b3f5a 100644
--- a/hw/usb/dev-smartcard-reader.c
+++ b/hw/usb/dev-smartcard-reader.c
@@ -967,7 +967,7 @@ static void ccid_on_apdu_from_guest(USBCCIDState *s, CCID_XferBlock *recv)
DPRINTF(s, 1, "%s: seq %d, len %d\n", __func__,
recv->hdr.bSeq, len);
ccid_add_pending_answer(s, (CCID_Header *)recv);
- if (s->card) {
+ if (s->card && len <= BULK_OUT_DATA_SIZE) {
ccid_card_apdu_from_guest(s->card, recv->abData, len);
} else {
DPRINTF(s, D_WARN, "warning: discarded apdu\n");
@@ -1001,80 +1001,92 @@ static void ccid_handle_bulk_out(USBCCIDState *s, USBPacket *p)
CCID_Header *ccid_header;
if (p->iov.size + s->bulk_out_pos > BULK_OUT_DATA_SIZE) {
- p->status = USB_RET_STALL;
- return;
+ goto err;
}
- ccid_header = (CCID_Header *)s->bulk_out_data;
usb_packet_copy(p, s->bulk_out_data + s->bulk_out_pos, p->iov.size);
s->bulk_out_pos += p->iov.size;
- if (p->iov.size == CCID_MAX_PACKET_SIZE) {
+ if (s->bulk_out_pos < 10) {
+ DPRINTF(s, 1, "%s: header incomplete\n", __func__);
+ goto err;
+ }
+
+ ccid_header = (CCID_Header *)s->bulk_out_data;
+ if ((s->bulk_out_pos - 10 < ccid_header->dwLength) &&
+ (p->iov.size == CCID_MAX_PACKET_SIZE)) {
DPRINTF(s, D_VERBOSE,
- "usb-ccid: bulk_in: expecting more packets (%zd/%d)\n",
- p->iov.size, ccid_header->dwLength);
+ "usb-ccid: bulk_in: expecting more packets (%d/%d)\n",
+ s->bulk_out_pos - 10, ccid_header->dwLength);
return;
}
- if (s->bulk_out_pos < 10) {
+ if (s->bulk_out_pos - 10 != ccid_header->dwLength) {
DPRINTF(s, 1,
- "%s: bad USB_TOKEN_OUT length, should be at least 10 bytes\n",
- __func__);
- } else {
- DPRINTF(s, D_MORE_INFO, "%s %x %s\n", __func__,
- ccid_header->bMessageType,
- ccid_message_type_to_str(ccid_header->bMessageType));
- switch (ccid_header->bMessageType) {
- case CCID_MESSAGE_TYPE_PC_to_RDR_GetSlotStatus:
- ccid_write_slot_status(s, ccid_header);
- break;
- case CCID_MESSAGE_TYPE_PC_to_RDR_IccPowerOn:
- DPRINTF(s, 1, "%s: PowerOn: %d\n", __func__,
+ "usb-ccid: bulk_in: message size mismatch (got %d, expected %d)\n",
+ s->bulk_out_pos - 10, ccid_header->dwLength);
+ goto err;
+ }
+
+ DPRINTF(s, D_MORE_INFO, "%s %x %s\n", __func__,
+ ccid_header->bMessageType,
+ ccid_message_type_to_str(ccid_header->bMessageType));
+ switch (ccid_header->bMessageType) {
+ case CCID_MESSAGE_TYPE_PC_to_RDR_GetSlotStatus:
+ ccid_write_slot_status(s, ccid_header);
+ break;
+ case CCID_MESSAGE_TYPE_PC_to_RDR_IccPowerOn:
+ DPRINTF(s, 1, "%s: PowerOn: %d\n", __func__,
((CCID_IccPowerOn *)(ccid_header))->bPowerSelect);
- s->powered = true;
- if (!ccid_card_inserted(s)) {
- ccid_report_error_failed(s, ERROR_ICC_MUTE);
- }
- /* atr is written regardless of error. */
- ccid_write_data_block_atr(s, ccid_header);
- break;
- case CCID_MESSAGE_TYPE_PC_to_RDR_IccPowerOff:
- ccid_reset_error_status(s);
- s->powered = false;
- ccid_write_slot_status(s, ccid_header);
- break;
- case CCID_MESSAGE_TYPE_PC_to_RDR_XfrBlock:
- ccid_on_apdu_from_guest(s, (CCID_XferBlock *)s->bulk_out_data);
- break;
- case CCID_MESSAGE_TYPE_PC_to_RDR_SetParameters:
- ccid_reset_error_status(s);
- ccid_set_parameters(s, ccid_header);
- ccid_write_parameters(s, ccid_header);
- break;
- case CCID_MESSAGE_TYPE_PC_to_RDR_ResetParameters:
- ccid_reset_error_status(s);
- ccid_reset_parameters(s);
- ccid_write_parameters(s, ccid_header);
- break;
- case CCID_MESSAGE_TYPE_PC_to_RDR_GetParameters:
- ccid_reset_error_status(s);
- ccid_write_parameters(s, ccid_header);
- break;
- case CCID_MESSAGE_TYPE_PC_to_RDR_Mechanical:
- ccid_report_error_failed(s, 0);
- ccid_write_slot_status(s, ccid_header);
- break;
- default:
- DPRINTF(s, 1,
+ s->powered = true;
+ if (!ccid_card_inserted(s)) {
+ ccid_report_error_failed(s, ERROR_ICC_MUTE);
+ }
+ /* atr is written regardless of error. */
+ ccid_write_data_block_atr(s, ccid_header);
+ break;
+ case CCID_MESSAGE_TYPE_PC_to_RDR_IccPowerOff:
+ ccid_reset_error_status(s);
+ s->powered = false;
+ ccid_write_slot_status(s, ccid_header);
+ break;
+ case CCID_MESSAGE_TYPE_PC_to_RDR_XfrBlock:
+ ccid_on_apdu_from_guest(s, (CCID_XferBlock *)s->bulk_out_data);
+ break;
+ case CCID_MESSAGE_TYPE_PC_to_RDR_SetParameters:
+ ccid_reset_error_status(s);
+ ccid_set_parameters(s, ccid_header);
+ ccid_write_parameters(s, ccid_header);
+ break;
+ case CCID_MESSAGE_TYPE_PC_to_RDR_ResetParameters:
+ ccid_reset_error_status(s);
+ ccid_reset_parameters(s);
+ ccid_write_parameters(s, ccid_header);
+ break;
+ case CCID_MESSAGE_TYPE_PC_to_RDR_GetParameters:
+ ccid_reset_error_status(s);
+ ccid_write_parameters(s, ccid_header);
+ break;
+ case CCID_MESSAGE_TYPE_PC_to_RDR_Mechanical:
+ ccid_report_error_failed(s, 0);
+ ccid_write_slot_status(s, ccid_header);
+ break;
+ default:
+ DPRINTF(s, 1,
"handle_data: ERROR: unhandled message type %Xh\n",
ccid_header->bMessageType);
- /*
- * The caller is expecting the device to respond, tell it we
- * don't support the operation.
- */
- ccid_report_error_failed(s, ERROR_CMD_NOT_SUPPORTED);
- ccid_write_slot_status(s, ccid_header);
- break;
- }
+ /*
+ * The caller is expecting the device to respond, tell it we
+ * don't support the operation.
+ */
+ ccid_report_error_failed(s, ERROR_CMD_NOT_SUPPORTED);
+ ccid_write_slot_status(s, ccid_header);
+ break;
}
s->bulk_out_pos = 0;
+ return;
+
+err:
+ p->status = USB_RET_STALL;
+ s->bulk_out_pos = 0;
+ return;
}
static void ccid_bulk_in_copy_to_guest(USBCCIDState *s, USBPacket *p)
@@ -1151,7 +1163,7 @@ static void ccid_handle_data(USBDevice *dev, USBPacket *p)
}
}
-static void ccid_handle_destroy(USBDevice *dev)
+static void ccid_unrealize(USBDevice *dev, Error **errp)
{
USBCCIDState *s = USB_CCID_DEV(dev);
@@ -1458,7 +1470,7 @@ static void ccid_class_initfn(ObjectClass *klass, void *data)
uc->handle_reset = ccid_handle_reset;
uc->handle_control = ccid_handle_control;
uc->handle_data = ccid_handle_data;
- uc->handle_destroy = ccid_handle_destroy;
+ uc->unrealize = ccid_unrealize;
dc->desc = "CCID Rev 1.1 smartcard reader";
dc->vmsd = &ccid_vmstate;
dc->props = ccid_properties;
diff --git a/hw/usb/dev-uas.c b/hw/usb/dev-uas.c
index 3a8ff18b1b..3b26655889 100644
--- a/hw/usb/dev-uas.c
+++ b/hw/usb/dev-uas.c
@@ -653,7 +653,8 @@ static void usb_uas_handle_control(USBDevice *dev, USBPacket *p,
if (ret >= 0) {
return;
}
- error_report("%s: unhandled control request", __func__);
+ error_report("%s: unhandled control request (req 0x%x, val 0x%x, idx 0x%x",
+ __func__, request, value, index);
p->status = USB_RET_STALL;
}
@@ -890,7 +891,7 @@ static void usb_uas_handle_data(USBDevice *dev, USBPacket *p)
}
}
-static void usb_uas_handle_destroy(USBDevice *dev)
+static void usb_uas_unrealize(USBDevice *dev, Error **errp)
{
UASDevice *uas = USB_UAS(dev);
@@ -943,7 +944,7 @@ static void usb_uas_class_initfn(ObjectClass *klass, void *data)
uc->handle_reset = usb_uas_handle_reset;
uc->handle_control = usb_uas_handle_control;
uc->handle_data = usb_uas_handle_data;
- uc->handle_destroy = usb_uas_handle_destroy;
+ uc->unrealize = usb_uas_unrealize;
uc->attached_settable = true;
set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
dc->fw_name = "storage";
diff --git a/hw/usb/dev-wacom.c b/hw/usb/dev-wacom.c
index c4702dbba0..bf70013059 100644
--- a/hw/usb/dev-wacom.c
+++ b/hw/usb/dev-wacom.c
@@ -329,7 +329,7 @@ static void usb_wacom_handle_data(USBDevice *dev, USBPacket *p)
}
}
-static void usb_wacom_handle_destroy(USBDevice *dev)
+static void usb_wacom_unrealize(USBDevice *dev, Error **errp)
{
USBWacomState *s = (USBWacomState *) dev;
@@ -364,7 +364,7 @@ static void usb_wacom_class_init(ObjectClass *klass, void *data)
uc->handle_reset = usb_wacom_handle_reset;
uc->handle_control = usb_wacom_handle_control;
uc->handle_data = usb_wacom_handle_data;
- uc->handle_destroy = usb_wacom_handle_destroy;
+ uc->unrealize = usb_wacom_unrealize;
set_bit(DEVICE_CATEGORY_INPUT, dc->categories);
dc->desc = "QEMU PenPartner Tablet";
dc->vmsd = &vmstate_usb_wacom;
diff --git a/hw/usb/hcd-ehci-pci.c b/hw/usb/hcd-ehci-pci.c
index 56577051e2..6dedcb8989 100644
--- a/hw/usb/hcd-ehci-pci.c
+++ b/hw/usb/hcd-ehci-pci.c
@@ -89,6 +89,14 @@ static void usb_ehci_pci_init(Object *obj)
usb_ehci_init(s, DEVICE(obj));
}
+static void usb_ehci_pci_finalize(Object *obj)
+{
+ EHCIPCIState *i = PCI_EHCI(obj);
+ EHCIState *s = &i->ehci;
+
+ usb_ehci_finalize(s);
+}
+
static void usb_ehci_pci_exit(PCIDevice *dev)
{
EHCIPCIState *i = PCI_EHCI(dev);
@@ -159,6 +167,7 @@ static const TypeInfo ehci_pci_type_info = {
.parent = TYPE_PCI_DEVICE,
.instance_size = sizeof(EHCIPCIState),
.instance_init = usb_ehci_pci_init,
+ .instance_finalize = usb_ehci_pci_finalize,
.abstract = true,
.class_init = ehci_class_init,
};
diff --git a/hw/usb/hcd-ehci.c b/hw/usb/hcd-ehci.c
index 7622a3ae72..50ef817f93 100644
--- a/hw/usb/hcd-ehci.c
+++ b/hw/usb/hcd-ehci.c
@@ -2545,6 +2545,11 @@ void usb_ehci_init(EHCIState *s, DeviceState *dev)
&s->mem_ports);
}
+void usb_ehci_finalize(EHCIState *s)
+{
+ usb_packet_cleanup(&s->ipacket);
+}
+
/*
* vim: expandtab ts=4
*/
diff --git a/hw/usb/hcd-ehci.h b/hw/usb/hcd-ehci.h
index 3fd7038658..938d8aa284 100644
--- a/hw/usb/hcd-ehci.h
+++ b/hw/usb/hcd-ehci.h
@@ -323,6 +323,7 @@ struct EHCIState {
extern const VMStateDescription vmstate_ehci;
void usb_ehci_init(EHCIState *s, DeviceState *dev);
+void usb_ehci_finalize(EHCIState *s);
void usb_ehci_realize(EHCIState *s, DeviceState *dev, Error **errp);
void usb_ehci_unrealize(EHCIState *s, DeviceState *dev, Error **errp);
void ehci_reset(void *opaque);
diff --git a/hw/usb/hcd-ohci.c b/hw/usb/hcd-ohci.c
index c82a92fff7..fe8406ac64 100644
--- a/hw/usb/hcd-ohci.c
+++ b/hw/usb/hcd-ohci.c
@@ -42,6 +42,8 @@
#define OHCI_MAX_PORTS 15
+#define ED_LINK_LIMIT 4
+
static int64_t usb_frame_time;
static int64_t usb_bit_time;
@@ -725,7 +727,7 @@ static int ohci_service_iso_td(OHCIState *ohci, struct ohci_ed *ed,
if (ohci_read_iso_td(ohci, addr, &iso_td)) {
trace_usb_ohci_iso_td_read_failed(addr);
ohci_die(ohci);
- return 0;
+ return 1;
}
starting_frame = OHCI_BM(iso_td.flags, TD_SF);
@@ -999,7 +1001,7 @@ static int ohci_service_td(OHCIState *ohci, struct ohci_ed *ed)
if (ohci_read_td(ohci, addr, &td)) {
trace_usb_ohci_td_read_error(addr);
ohci_die(ohci);
- return 0;
+ return 1;
}
dir = OHCI_BM(ed->flags, ED_D);
@@ -1184,7 +1186,7 @@ static int ohci_service_ed_list(OHCIState *ohci, uint32_t head, int completion)
uint32_t next_ed;
uint32_t cur;
int active;
-
+ uint32_t link_cnt = 0;
active = 0;
if (head == 0)
@@ -1199,6 +1201,11 @@ static int ohci_service_ed_list(OHCIState *ohci, uint32_t head, int completion)
next_ed = ed.next & OHCI_DPTR_MASK;
+ if (++link_cnt > ED_LINK_LIMIT) {
+ ohci_die(ohci);
+ return 0;
+ }
+
if ((ed.head & OHCI_ED_H) || (ed.flags & OHCI_ED_K)) {
uint32_t addr;
/* Cancel pending packets for ED that have been paused. */
diff --git a/hw/usb/hcd-xhci.c b/hw/usb/hcd-xhci.c
index f8106789d8..f0af852709 100644
--- a/hw/usb/hcd-xhci.c
+++ b/hw/usb/hcd-xhci.c
@@ -49,11 +49,10 @@
/* Very pessimistic, let's hope it's enough for all cases */
#define EV_QUEUE (((3 * 24) + 16) * MAXSLOTS)
-/* Do not deliver ER Full events. NEC's driver does some things not bound
- * to the specs when it gets them */
-#define ER_FULL_HACK
#define TRB_LINK_LIMIT 4
+#define COMMAND_LIMIT 256
+#define TRANSFER_LIMIT 256
#define LEN_CAP 0x40
#define LEN_OPER (0x400 + 0x10 * MAXPORTS)
@@ -199,7 +198,6 @@ typedef enum TRBType {
ER_DEVICE_NOTIFICATION,
ER_MFINDEX_WRAP,
/* vendor specific bits */
- CR_VENDOR_VIA_CHALLENGE_RESPONSE = 48,
CR_VENDOR_NEC_FIRMWARE_REVISION = 49,
CR_VENDOR_NEC_CHALLENGE_RESPONSE = 50,
} TRBType;
@@ -390,6 +388,7 @@ struct XHCIEPContext {
dma_addr_t pctx;
unsigned int max_psize;
uint32_t state;
+ uint32_t kick_active;
/* streams */
unsigned int max_pstreams;
@@ -430,12 +429,14 @@ typedef struct XHCIInterrupter {
uint32_t erdp_low;
uint32_t erdp_high;
- bool msix_used, er_pcs, er_full;
+ bool msix_used, er_pcs;
dma_addr_t er_start;
uint32_t er_size;
unsigned int er_ep_idx;
+ /* kept for live migration compat only */
+ bool er_full_unused;
XHCIEvent ev_buffer[EV_QUEUE];
unsigned int ev_buffer_put;
unsigned int ev_buffer_get;
@@ -485,9 +486,13 @@ struct XHCIState {
XHCIInterrupter intr[MAXINTRS];
XHCIRing cmd_ring;
+
+ bool nec_quirks;
};
-#define TYPE_XHCI "nec-usb-xhci"
+#define TYPE_XHCI "base-xhci"
+#define TYPE_NEC_XHCI "nec-usb-xhci"
+#define TYPE_QEMU_XHCI "qemu-xhci"
#define XHCI(obj) \
OBJECT_CHECK(XHCIState, (obj), TYPE_XHCI)
@@ -548,7 +553,6 @@ static const char *TRBType_names[] = {
[ER_HOST_CONTROLLER] = "ER_HOST_CONTROLLER",
[ER_DEVICE_NOTIFICATION] = "ER_DEVICE_NOTIFICATION",
[ER_MFINDEX_WRAP] = "ER_MFINDEX_WRAP",
- [CR_VENDOR_VIA_CHALLENGE_RESPONSE] = "CR_VENDOR_VIA_CHALLENGE_RESPONSE",
[CR_VENDOR_NEC_FIRMWARE_REVISION] = "CR_VENDOR_NEC_FIRMWARE_REVISION",
[CR_VENDOR_NEC_CHALLENGE_RESPONSE] = "CR_VENDOR_NEC_CHALLENGE_RESPONSE",
};
@@ -631,6 +635,11 @@ static bool xhci_get_flag(XHCIState *xhci, enum xhci_flags bit)
return xhci->flags & (1 << bit);
}
+static void xhci_set_flag(XHCIState *xhci, enum xhci_flags bit)
+{
+ xhci->flags |= (1 << bit);
+}
+
static uint64_t xhci_mfindex_get(XHCIState *xhci)
{
int64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
@@ -788,11 +797,15 @@ static void xhci_msix_update(XHCIState *xhci, int v)
static void xhci_intr_raise(XHCIState *xhci, int v)
{
PCIDevice *pci_dev = PCI_DEVICE(xhci);
+ bool pending = (xhci->intr[v].erdp_low & ERDP_EHB);
xhci->intr[v].erdp_low |= ERDP_EHB;
xhci->intr[v].iman |= IMAN_IP;
xhci->usbsts |= USBSTS_EINT;
+ if (pending) {
+ return;
+ }
if (!(xhci->intr[v].iman & IMAN_IE)) {
return;
}
@@ -821,7 +834,7 @@ static void xhci_intr_raise(XHCIState *xhci, int v)
static inline int xhci_running(XHCIState *xhci)
{
- return !(xhci->usbsts & USBSTS_HCH) && !xhci->intr[0].er_full;
+ return !(xhci->usbsts & USBSTS_HCH);
}
static void xhci_die(XHCIState *xhci)
@@ -860,74 +873,6 @@ static void xhci_write_event(XHCIState *xhci, XHCIEvent *event, int v)
}
}
-static void xhci_events_update(XHCIState *xhci, int v)
-{
- XHCIInterrupter *intr = &xhci->intr[v];
- dma_addr_t erdp;
- unsigned int dp_idx;
- bool do_irq = 0;
-
- if (xhci->usbsts & USBSTS_HCH) {
- return;
- }
-
- erdp = xhci_addr64(intr->erdp_low, intr->erdp_high);
- if (erdp < intr->er_start ||
- erdp >= (intr->er_start + TRB_SIZE*intr->er_size)) {
- DPRINTF("xhci: ERDP out of bounds: "DMA_ADDR_FMT"\n", erdp);
- DPRINTF("xhci: ER[%d] at "DMA_ADDR_FMT" len %d\n",
- v, intr->er_start, intr->er_size);
- xhci_die(xhci);
- return;
- }
- dp_idx = (erdp - intr->er_start) / TRB_SIZE;
- assert(dp_idx < intr->er_size);
-
- /* NEC didn't read section 4.9.4 of the spec (v1.0 p139 top Note) and thus
- * deadlocks when the ER is full. Hack it by holding off events until
- * the driver decides to free at least half of the ring */
- if (intr->er_full) {
- int er_free = dp_idx - intr->er_ep_idx;
- if (er_free <= 0) {
- er_free += intr->er_size;
- }
- if (er_free < (intr->er_size/2)) {
- DPRINTF("xhci_events_update(): event ring still "
- "more than half full (hack)\n");
- return;
- }
- }
-
- while (intr->ev_buffer_put != intr->ev_buffer_get) {
- assert(intr->er_full);
- if (((intr->er_ep_idx+1) % intr->er_size) == dp_idx) {
- DPRINTF("xhci_events_update(): event ring full again\n");
-#ifndef ER_FULL_HACK
- XHCIEvent full = {ER_HOST_CONTROLLER, CC_EVENT_RING_FULL_ERROR};
- xhci_write_event(xhci, &full, v);
-#endif
- do_irq = 1;
- break;
- }
- XHCIEvent *event = &intr->ev_buffer[intr->ev_buffer_get];
- xhci_write_event(xhci, event, v);
- intr->ev_buffer_get++;
- do_irq = 1;
- if (intr->ev_buffer_get == EV_QUEUE) {
- intr->ev_buffer_get = 0;
- }
- }
-
- if (do_irq) {
- xhci_intr_raise(xhci, v);
- }
-
- if (intr->er_full && intr->ev_buffer_put == intr->ev_buffer_get) {
- DPRINTF("xhci_events_update(): event ring no longer full\n");
- intr->er_full = 0;
- }
-}
-
static void xhci_event(XHCIState *xhci, XHCIEvent *event, int v)
{
XHCIInterrupter *intr;
@@ -940,19 +885,6 @@ static void xhci_event(XHCIState *xhci, XHCIEvent *event, int v)
}
intr = &xhci->intr[v];
- if (intr->er_full) {
- DPRINTF("xhci_event(): ER full, queueing\n");
- if (((intr->ev_buffer_put+1) % EV_QUEUE) == intr->ev_buffer_get) {
- DPRINTF("xhci: event queue full, dropping event!\n");
- return;
- }
- intr->ev_buffer[intr->ev_buffer_put++] = *event;
- if (intr->ev_buffer_put == EV_QUEUE) {
- intr->ev_buffer_put = 0;
- }
- return;
- }
-
erdp = xhci_addr64(intr->erdp_low, intr->erdp_high);
if (erdp < intr->er_start ||
erdp >= (intr->er_start + TRB_SIZE*intr->er_size)) {
@@ -966,21 +898,12 @@ static void xhci_event(XHCIState *xhci, XHCIEvent *event, int v)
dp_idx = (erdp - intr->er_start) / TRB_SIZE;
assert(dp_idx < intr->er_size);
- if ((intr->er_ep_idx+1) % intr->er_size == dp_idx) {
- DPRINTF("xhci_event(): ER full, queueing\n");
-#ifndef ER_FULL_HACK
+ if ((intr->er_ep_idx + 2) % intr->er_size == dp_idx) {
+ DPRINTF("xhci: ER %d full, send ring full error\n", v);
XHCIEvent full = {ER_HOST_CONTROLLER, CC_EVENT_RING_FULL_ERROR};
- xhci_write_event(xhci, &full);
-#endif
- intr->er_full = 1;
- if (((intr->ev_buffer_put+1) % EV_QUEUE) == intr->ev_buffer_get) {
- DPRINTF("xhci: event queue full, dropping event!\n");
- return;
- }
- intr->ev_buffer[intr->ev_buffer_put++] = *event;
- if (intr->ev_buffer_put == EV_QUEUE) {
- intr->ev_buffer_put = 0;
- }
+ xhci_write_event(xhci, &full, v);
+ } else if ((intr->er_ep_idx + 1) % intr->er_size == dp_idx) {
+ DPRINTF("xhci: ER %d full, drop event\n", v);
} else {
xhci_write_event(xhci, event, v);
}
@@ -1027,6 +950,7 @@ static TRBType xhci_ring_fetch(XHCIState *xhci, XHCIRing *ring, XHCITRB *trb,
return type;
} else {
if (++link_cnt > TRB_LINK_LIMIT) {
+ trace_usb_xhci_enforced_limit("trb-link");
return 0;
}
ring->dequeue = xhci_mask64(trb->parameter);
@@ -1119,7 +1043,6 @@ static void xhci_er_reset(XHCIState *xhci, int v)
intr->er_ep_idx = 0;
intr->er_pcs = 1;
- intr->er_full = 0;
DPRINTF("xhci: event ring[%d]:" DMA_ADDR_FMT " [%d]\n",
v, intr->er_start, intr->er_size);
@@ -1897,7 +1820,7 @@ static int xhci_setup_packet(XHCITransfer *xfer)
return 0;
}
-static int xhci_complete_packet(XHCITransfer *xfer)
+static int xhci_try_complete_packet(XHCITransfer *xfer)
{
if (xfer->packet.status == USB_RET_ASYNC) {
trace_usb_xhci_xfer_async(xfer);
@@ -2001,11 +1924,7 @@ static int xhci_fire_ctl_transfer(XHCIState *xhci, XHCITransfer *xfer)
xfer->packet.parameter = trb_setup->parameter;
usb_handle_packet(xfer->packet.ep->dev, &xfer->packet);
-
- xhci_complete_packet(xfer);
- if (!xfer->running_async && !xfer->running_retry) {
- xhci_kick_epctx(xfer->epctx, 0);
- }
+ xhci_try_complete_packet(xfer);
return 0;
}
@@ -2105,11 +2024,7 @@ static int xhci_submit(XHCIState *xhci, XHCITransfer *xfer, XHCIEPContext *epctx
return -1;
}
usb_handle_packet(xfer->packet.ep->dev, &xfer->packet);
-
- xhci_complete_packet(xfer);
- if (!xfer->running_async && !xfer->running_retry) {
- xhci_kick_epctx(xfer->epctx, xfer->streamid);
- }
+ xhci_try_complete_packet(xfer);
return 0;
}
@@ -2139,6 +2054,9 @@ static void xhci_kick_ep(XHCIState *xhci, unsigned int slotid,
return;
}
+ if (epctx->kick_active) {
+ return;
+ }
xhci_kick_epctx(epctx, streamid);
}
@@ -2150,10 +2068,12 @@ static void xhci_kick_epctx(XHCIEPContext *epctx, unsigned int streamid)
XHCIRing *ring;
USBEndpoint *ep = NULL;
uint64_t mfindex;
+ unsigned int count = 0;
int length;
int i;
trace_usb_xhci_ep_kick(epctx->slotid, epctx->epid, streamid);
+ assert(!epctx->kick_active);
/* If the device has been detached, but the guest has not noticed this
yet the 2 above checks will succeed, but we must NOT continue */
@@ -2185,7 +2105,7 @@ static void xhci_kick_epctx(XHCIEPContext *epctx, unsigned int streamid)
}
usb_handle_packet(xfer->packet.ep->dev, &xfer->packet);
assert(xfer->packet.status != USB_RET_NAK);
- xhci_complete_packet(xfer);
+ xhci_try_complete_packet(xfer);
} else {
/* retry nak'ed transfer */
if (xhci_setup_packet(xfer) < 0) {
@@ -2195,10 +2115,12 @@ static void xhci_kick_epctx(XHCIEPContext *epctx, unsigned int streamid)
if (xfer->packet.status == USB_RET_NAK) {
return;
}
- xhci_complete_packet(xfer);
+ xhci_try_complete_packet(xfer);
}
assert(!xfer->running_retry);
- xhci_ep_free_xfer(epctx->retry);
+ if (xfer->complete) {
+ xhci_ep_free_xfer(epctx->retry);
+ }
epctx->retry = NULL;
}
@@ -2223,6 +2145,7 @@ static void xhci_kick_epctx(XHCIEPContext *epctx, unsigned int streamid)
}
assert(ring->dequeue != 0);
+ epctx->kick_active++;
while (1) {
length = xhci_ring_chain_length(xhci, ring);
if (length <= 0) {
@@ -2258,7 +2181,12 @@ static void xhci_kick_epctx(XHCIEPContext *epctx, unsigned int streamid)
epctx->retry = xfer;
break;
}
+ if (count++ > TRANSFER_LIMIT) {
+ trace_usb_xhci_enforced_limit("transfers");
+ break;
+ }
}
+ epctx->kick_active--;
ep = xhci_epid_to_usbep(epctx);
if (ep) {
@@ -2697,39 +2625,13 @@ static uint32_t xhci_nec_challenge(uint32_t hi, uint32_t lo)
return ~val;
}
-static void xhci_via_challenge(XHCIState *xhci, uint64_t addr)
-{
- PCIDevice *pci_dev = PCI_DEVICE(xhci);
- uint32_t buf[8];
- uint32_t obuf[8];
- dma_addr_t paddr = xhci_mask64(addr);
-
- pci_dma_read(pci_dev, paddr, &buf, 32);
-
- memcpy(obuf, buf, sizeof(obuf));
-
- if ((buf[0] & 0xff) == 2) {
- obuf[0] = 0x49932000 + 0x54dc200 * buf[2] + 0x7429b578 * buf[3];
- obuf[0] |= (buf[2] * buf[3]) & 0xff;
- obuf[1] = 0x0132bb37 + 0xe89 * buf[2] + 0xf09 * buf[3];
- obuf[2] = 0x0066c2e9 + 0x2091 * buf[2] + 0x19bd * buf[3];
- obuf[3] = 0xd5281342 + 0x2cc9691 * buf[2] + 0x2367662 * buf[3];
- obuf[4] = 0x0123c75c + 0x1595 * buf[2] + 0x19ec * buf[3];
- obuf[5] = 0x00f695de + 0x26fd * buf[2] + 0x3e9 * buf[3];
- obuf[6] = obuf[2] ^ obuf[3] ^ 0x29472956;
- obuf[7] = obuf[2] ^ obuf[3] ^ 0x65866593;
- }
-
- pci_dma_write(pci_dev, paddr, &obuf, 32);
-}
-
static void xhci_process_commands(XHCIState *xhci)
{
XHCITRB trb;
TRBType type;
XHCIEvent event = {ER_COMMAND_COMPLETE, CC_SUCCESS};
dma_addr_t addr;
- unsigned int i, slotid = 0;
+ unsigned int i, slotid = 0, count = 0;
DPRINTF("xhci_process_commands()\n");
if (!xhci_running(xhci)) {
@@ -2818,24 +2720,27 @@ static void xhci_process_commands(XHCIState *xhci)
case CR_GET_PORT_BANDWIDTH:
event.ccode = xhci_get_port_bandwidth(xhci, trb.parameter);
break;
- case CR_VENDOR_VIA_CHALLENGE_RESPONSE:
- xhci_via_challenge(xhci, trb.parameter);
- break;
case CR_VENDOR_NEC_FIRMWARE_REVISION:
- event.type = 48; /* NEC reply */
- event.length = 0x3025;
+ if (xhci->nec_quirks) {
+ event.type = 48; /* NEC reply */
+ event.length = 0x3025;
+ } else {
+ event.ccode = CC_TRB_ERROR;
+ }
break;
case CR_VENDOR_NEC_CHALLENGE_RESPONSE:
- {
- uint32_t chi = trb.parameter >> 32;
- uint32_t clo = trb.parameter;
- uint32_t val = xhci_nec_challenge(chi, clo);
- event.length = val & 0xFFFF;
- event.epid = val >> 16;
- slotid = val >> 24;
- event.type = 48; /* NEC reply */
- }
- break;
+ if (xhci->nec_quirks) {
+ uint32_t chi = trb.parameter >> 32;
+ uint32_t clo = trb.parameter;
+ uint32_t val = xhci_nec_challenge(chi, clo);
+ event.length = val & 0xFFFF;
+ event.epid = val >> 16;
+ slotid = val >> 24;
+ event.type = 48; /* NEC reply */
+ } else {
+ event.ccode = CC_TRB_ERROR;
+ }
+ break;
default:
trace_usb_xhci_unimplemented("command", type);
event.ccode = CC_TRB_ERROR;
@@ -2843,6 +2748,11 @@ static void xhci_process_commands(XHCIState *xhci)
}
event.slotid = slotid;
xhci_event(xhci, &event, 0);
+
+ if (count++ > COMMAND_LIMIT) {
+ trace_usb_xhci_enforced_limit("commands");
+ return;
+ }
}
}
@@ -2973,7 +2883,6 @@ static void xhci_reset(DeviceState *dev)
xhci->intr[i].er_ep_idx = 0;
xhci->intr[i].er_pcs = 1;
- xhci->intr[i].er_full = 0;
xhci->intr[i].ev_buffer_put = 0;
xhci->intr[i].ev_buffer_get = 0;
}
@@ -3338,9 +3247,12 @@ static void xhci_runtime_write(void *ptr, hwaddr reg,
intr->erstsz = val & 0xffff;
break;
case 0x10: /* ERSTBA low */
- /* XXX NEC driver bug: it doesn't align this to 64 bytes
- intr->erstba_low = val & 0xffffffc0; */
- intr->erstba_low = val & 0xfffffff0;
+ if (xhci->nec_quirks) {
+ /* NEC driver bug: it doesn't align this to 64 bytes */
+ intr->erstba_low = val & 0xfffffff0;
+ } else {
+ intr->erstba_low = val & 0xffffffc0;
+ }
break;
case 0x14: /* ERSTBA high */
intr->erstba_high = val;
@@ -3351,10 +3263,18 @@ static void xhci_runtime_write(void *ptr, hwaddr reg,
intr->erdp_low &= ~ERDP_EHB;
}
intr->erdp_low = (val & ~ERDP_EHB) | (intr->erdp_low & ERDP_EHB);
+ if (val & ERDP_EHB) {
+ dma_addr_t erdp = xhci_addr64(intr->erdp_low, intr->erdp_high);
+ unsigned int dp_idx = (erdp - intr->er_start) / TRB_SIZE;
+ if (erdp >= intr->er_start &&
+ erdp < (intr->er_start + TRB_SIZE * intr->er_size) &&
+ dp_idx != intr->er_ep_idx) {
+ xhci_intr_raise(xhci, v);
+ }
+ }
break;
case 0x1c: /* ERDP high */
intr->erdp_high = val;
- xhci_events_update(xhci, v);
break;
default:
trace_usb_xhci_unimplemented("oper write", reg);
@@ -3490,7 +3410,7 @@ static void xhci_complete(USBPort *port, USBPacket *packet)
xhci_ep_nuke_one_xfer(xfer, 0);
return;
}
- xhci_complete_packet(xfer);
+ xhci_try_complete_packet(xfer);
xhci_kick_epctx(xfer->epctx, xfer->streamid);
if (xfer->complete) {
xhci_ep_free_xfer(xfer);
@@ -3627,6 +3547,9 @@ static void usb_xhci_realize(struct PCIDevice *dev, Error **errp)
dev->config[PCI_CACHE_LINE_SIZE] = 0x10;
dev->config[0x60] = 0x30; /* release number */
+ if (strcmp(object_get_typename(OBJECT(dev)), TYPE_NEC_XHCI) == 0) {
+ xhci->nec_quirks = true;
+ }
if (xhci->numintrs > MAXINTRS) {
xhci->numintrs = MAXINTRS;
}
@@ -3852,8 +3775,7 @@ static const VMStateDescription vmstate_xhci_event = {
static bool xhci_er_full(void *opaque, int version_id)
{
- struct XHCIInterrupter *intr = opaque;
- return intr->er_full;
+ return false;
}
static const VMStateDescription vmstate_xhci_intr = {
@@ -3877,7 +3799,7 @@ static const VMStateDescription vmstate_xhci_intr = {
VMSTATE_UINT32(er_ep_idx, XHCIInterrupter),
/* event queue (used if ring is full) */
- VMSTATE_BOOL(er_full, XHCIInterrupter),
+ VMSTATE_BOOL(er_full_unused, XHCIInterrupter),
VMSTATE_UINT32_TEST(ev_buffer_put, XHCIInterrupter, xhci_er_full),
VMSTATE_UINT32_TEST(ev_buffer_get, XHCIInterrupter, xhci_er_full),
VMSTATE_STRUCT_ARRAY_TEST(ev_buffer, XHCIInterrupter, EV_QUEUE,
@@ -3922,17 +3844,21 @@ static const VMStateDescription vmstate_xhci = {
}
};
-static Property xhci_properties[] = {
+static Property nec_xhci_properties[] = {
DEFINE_PROP_ON_OFF_AUTO("msi", XHCIState, msi, ON_OFF_AUTO_AUTO),
DEFINE_PROP_ON_OFF_AUTO("msix", XHCIState, msix, ON_OFF_AUTO_AUTO),
DEFINE_PROP_BIT("superspeed-ports-first",
XHCIState, flags, XHCI_FLAG_SS_FIRST, true),
DEFINE_PROP_BIT("force-pcie-endcap", XHCIState, flags,
XHCI_FLAG_FORCE_PCIE_ENDCAP, false),
- DEFINE_PROP_BIT("streams", XHCIState, flags,
- XHCI_FLAG_ENABLE_STREAMS, true),
DEFINE_PROP_UINT32("intrs", XHCIState, numintrs, MAXINTRS),
DEFINE_PROP_UINT32("slots", XHCIState, numslots, MAXSLOTS),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static Property xhci_properties[] = {
+ DEFINE_PROP_BIT("streams", XHCIState, flags,
+ XHCI_FLAG_ENABLE_STREAMS, true),
DEFINE_PROP_UINT32("p2", XHCIState, numports_2, 4),
DEFINE_PROP_UINT32("p3", XHCIState, numports_3, 4),
DEFINE_PROP_END_OF_LIST(),
@@ -3949,10 +3875,7 @@ static void xhci_class_init(ObjectClass *klass, void *data)
set_bit(DEVICE_CATEGORY_USB, dc->categories);
k->realize = usb_xhci_realize;
k->exit = usb_xhci_exit;
- k->vendor_id = PCI_VENDOR_ID_NEC;
- k->device_id = PCI_DEVICE_ID_NEC_UPD720200;
k->class_id = PCI_CLASS_SERIAL_USB;
- k->revision = 0x03;
k->is_express = 1;
}
@@ -3961,11 +3884,58 @@ static const TypeInfo xhci_info = {
.parent = TYPE_PCI_DEVICE,
.instance_size = sizeof(XHCIState),
.class_init = xhci_class_init,
+ .abstract = true,
+};
+
+static void nec_xhci_class_init(ObjectClass *klass, void *data)
+{
+ PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->props = nec_xhci_properties;
+ k->vendor_id = PCI_VENDOR_ID_NEC;
+ k->device_id = PCI_DEVICE_ID_NEC_UPD720200;
+ k->revision = 0x03;
+}
+
+static const TypeInfo nec_xhci_info = {
+ .name = TYPE_NEC_XHCI,
+ .parent = TYPE_XHCI,
+ .class_init = nec_xhci_class_init,
+};
+
+static void qemu_xhci_class_init(ObjectClass *klass, void *data)
+{
+ PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
+
+ k->vendor_id = PCI_VENDOR_ID_REDHAT;
+ k->device_id = PCI_DEVICE_ID_REDHAT_XHCI;
+ k->revision = 0x01;
+}
+
+static void qemu_xhci_instance_init(Object *obj)
+{
+ XHCIState *xhci = XHCI(obj);
+
+ xhci->msi = ON_OFF_AUTO_OFF;
+ xhci->msix = ON_OFF_AUTO_AUTO;
+ xhci->numintrs = MAXINTRS;
+ xhci->numslots = MAXSLOTS;
+ xhci_set_flag(xhci, XHCI_FLAG_SS_FIRST);
+}
+
+static const TypeInfo qemu_xhci_info = {
+ .name = TYPE_QEMU_XHCI,
+ .parent = TYPE_XHCI,
+ .class_init = qemu_xhci_class_init,
+ .instance_init = qemu_xhci_instance_init,
};
static void xhci_register_types(void)
{
type_register_static(&xhci_info);
+ type_register_static(&nec_xhci_info);
+ type_register_static(&qemu_xhci_info);
}
type_init(xhci_register_types)
diff --git a/hw/usb/host-libusb.c b/hw/usb/host-libusb.c
index 7791c6d520..c9876a5b0f 100644
--- a/hw/usb/host-libusb.c
+++ b/hw/usb/host-libusb.c
@@ -1065,7 +1065,7 @@ static void usb_host_instance_init(Object *obj)
&udev->qdev, NULL);
}
-static void usb_host_handle_destroy(USBDevice *udev)
+static void usb_host_unrealize(USBDevice *udev, Error **errp)
{
USBHostDevice *s = USB_HOST_DEVICE(udev);
@@ -1568,7 +1568,7 @@ static void usb_host_class_initfn(ObjectClass *klass, void *data)
uc->handle_data = usb_host_handle_data;
uc->handle_control = usb_host_handle_control;
uc->handle_reset = usb_host_handle_reset;
- uc->handle_destroy = usb_host_handle_destroy;
+ uc->unrealize = usb_host_unrealize;
uc->flush_ep_queue = usb_host_flush_ep_queue;
uc->alloc_streams = usb_host_alloc_streams;
uc->free_streams = usb_host_free_streams;
diff --git a/hw/usb/redirect.c b/hw/usb/redirect.c
index 860f5c35eb..0efe62f725 100644
--- a/hw/usb/redirect.c
+++ b/hw/usb/redirect.c
@@ -1427,7 +1427,7 @@ static void usbredir_cleanup_device_queues(USBRedirDevice *dev)
}
}
-static void usbredir_handle_destroy(USBDevice *udev)
+static void usbredir_unrealize(USBDevice *udev, Error **errp)
{
USBRedirDevice *dev = USB_REDIRECT(udev);
Chardev *chr = qemu_chr_fe_get_driver(&dev->cs);
@@ -2513,7 +2513,7 @@ static void usbredir_class_initfn(ObjectClass *klass, void *data)
uc->realize = usbredir_realize;
uc->product_desc = "USB Redirection Device";
- uc->handle_destroy = usbredir_handle_destroy;
+ uc->unrealize = usbredir_unrealize;
uc->cancel_packet = usbredir_cancel_packet;
uc->handle_reset = usbredir_handle_reset;
uc->handle_data = usbredir_handle_data;
diff --git a/hw/usb/trace-events b/hw/usb/trace-events
index fdd1d29030..0c323d4cac 100644
--- a/hw/usb/trace-events
+++ b/hw/usb/trace-events
@@ -174,6 +174,7 @@ usb_xhci_xfer_retry(void *xfer) "%p"
usb_xhci_xfer_success(void *xfer, uint32_t bytes) "%p: len %d"
usb_xhci_xfer_error(void *xfer, uint32_t ret) "%p: ret %d"
usb_xhci_unimplemented(const char *item, int nr) "%s (0x%x)"
+usb_xhci_enforced_limit(const char *item) "%s"
# hw/usb/desc.c
usb_desc_device(int addr, int len, int ret) "dev %d query device, len %d, ret %d"
diff --git a/hw/vfio/Makefile.objs b/hw/vfio/Makefile.objs
index c25e32b029..05e7fbb93f 100644
--- a/hw/vfio/Makefile.objs
+++ b/hw/vfio/Makefile.objs
@@ -2,7 +2,7 @@ ifeq ($(CONFIG_LINUX), y)
obj-$(CONFIG_SOFTMMU) += common.o
obj-$(CONFIG_PCI) += pci.o pci-quirks.o
obj-$(CONFIG_SOFTMMU) += platform.o
-obj-$(CONFIG_SOFTMMU) += calxeda-xgmac.o
-obj-$(CONFIG_SOFTMMU) += amd-xgbe.o
+obj-$(CONFIG_VFIO_XGMAC) += calxeda-xgmac.o
+obj-$(CONFIG_VFIO_AMD_XGBE) += amd-xgbe.o
obj-$(CONFIG_SOFTMMU) += spapr.o
endif
diff --git a/hw/vfio/common.c b/hw/vfio/common.c
index 801578b4b9..f3ba9b9007 100644
--- a/hw/vfio/common.c
+++ b/hw/vfio/common.c
@@ -294,53 +294,78 @@ static bool vfio_listener_skipped_section(MemoryRegionSection *section)
section->offset_within_address_space & (1ULL << 63);
}
-static void vfio_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
+/* Called with rcu_read_lock held. */
+static bool vfio_get_vaddr(IOMMUTLBEntry *iotlb, void **vaddr,
+ bool *read_only)
{
- VFIOGuestIOMMU *giommu = container_of(n, VFIOGuestIOMMU, n);
- VFIOContainer *container = giommu->container;
- hwaddr iova = iotlb->iova + giommu->iommu_offset;
MemoryRegion *mr;
hwaddr xlat;
hwaddr len = iotlb->addr_mask + 1;
- void *vaddr;
- int ret;
-
- trace_vfio_iommu_map_notify(iova, iova + iotlb->addr_mask);
-
- if (iotlb->target_as != &address_space_memory) {
- error_report("Wrong target AS \"%s\", only system memory is allowed",
- iotlb->target_as->name ? iotlb->target_as->name : "none");
- return;
- }
+ bool writable = iotlb->perm & IOMMU_WO;
/*
* The IOMMU TLB entry we have just covers translation through
* this IOMMU to its immediate target. We need to translate
* it the rest of the way through to memory.
*/
- rcu_read_lock();
mr = address_space_translate(&address_space_memory,
iotlb->translated_addr,
- &xlat, &len, iotlb->perm & IOMMU_WO);
+ &xlat, &len, writable);
if (!memory_region_is_ram(mr)) {
error_report("iommu map to non memory area %"HWADDR_PRIx"",
xlat);
- goto out;
+ return false;
}
+
/*
* Translation truncates length to the IOMMU page size,
* check that it did not truncate too much.
*/
if (len & iotlb->addr_mask) {
error_report("iommu has granularity incompatible with target AS");
- goto out;
+ return false;
}
+ *vaddr = memory_region_get_ram_ptr(mr) + xlat;
+ *read_only = !writable || mr->readonly;
+
+ return true;
+}
+
+static void vfio_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
+{
+ VFIOGuestIOMMU *giommu = container_of(n, VFIOGuestIOMMU, n);
+ VFIOContainer *container = giommu->container;
+ hwaddr iova = iotlb->iova + giommu->iommu_offset;
+ bool read_only;
+ void *vaddr;
+ int ret;
+
+ trace_vfio_iommu_map_notify(iotlb->perm == IOMMU_NONE ? "UNMAP" : "MAP",
+ iova, iova + iotlb->addr_mask);
+
+ if (iotlb->target_as != &address_space_memory) {
+ error_report("Wrong target AS \"%s\", only system memory is allowed",
+ iotlb->target_as->name ? iotlb->target_as->name : "none");
+ return;
+ }
+
+ rcu_read_lock();
+
if ((iotlb->perm & IOMMU_RW) != IOMMU_NONE) {
- vaddr = memory_region_get_ram_ptr(mr) + xlat;
+ if (!vfio_get_vaddr(iotlb, &vaddr, &read_only)) {
+ goto out;
+ }
+ /*
+ * vaddr is only valid until rcu_read_unlock(). But after
+ * vfio_dma_map has set up the mapping the pages will be
+ * pinned by the kernel. This makes sure that the RAM backend
+ * of vaddr will always be there, even if the memory object is
+ * destroyed and its backing memory munmap-ed.
+ */
ret = vfio_dma_map(container, iova,
iotlb->addr_mask + 1, vaddr,
- !(iotlb->perm & IOMMU_WO) || mr->readonly);
+ read_only);
if (ret) {
error_report("vfio_dma_map(%p, 0x%"HWADDR_PRIx", "
"0x%"HWADDR_PRIx", %p) = %d (%m)",
diff --git a/hw/vfio/pci-quirks.c b/hw/vfio/pci-quirks.c
index 6c771f778b..e995e32dee 100644
--- a/hw/vfio/pci-quirks.c
+++ b/hw/vfio/pci-quirks.c
@@ -1041,6 +1041,7 @@ static int igd_gen(VFIOPCIDevice *vdev)
typedef struct VFIOIGDQuirk {
struct VFIOPCIDevice *vdev;
uint32_t index;
+ uint32_t bdsm;
} VFIOIGDQuirk;
#define IGD_GMCH 0x50 /* Graphics Control Register */
@@ -1185,6 +1186,7 @@ static void vfio_pci_igd_lpc_bridge_class_init(ObjectClass *klass, void *data)
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
+ set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
dc->desc = "VFIO dummy ISA/LPC bridge for IGD assignment";
dc->hotpluggable = false;
k->realize = vfio_pci_igd_lpc_bridge_realize;
@@ -1304,7 +1306,7 @@ static void vfio_igd_quirk_data_write(void *opaque, hwaddr addr,
"BIOS reserved stolen memory. Unsupported BIOS?");
}
- val = base | (data & ((1 << 20) - 1));
+ val = data - igd->bdsm + base;
} else {
val = 0; /* upper 32bits of pte, we only enable below 4G PTEs */
}
@@ -1365,14 +1367,45 @@ static void vfio_probe_igd_bar4_quirk(VFIOPCIDevice *vdev, int nr)
uint16_t cmd_orig, cmd;
Error *err = NULL;
+ /* This must be an Intel VGA device. */
+ if (!vfio_pci_is(vdev, PCI_VENDOR_ID_INTEL, PCI_ANY_ID) ||
+ !vfio_is_vga(vdev) || nr != 4) {
+ return;
+ }
+
/*
- * This must be an Intel VGA device at address 00:02.0 for us to even
- * consider enabling legacy mode. The vBIOS has dependencies on the
- * PCI bus address.
+ * IGD is not a standard, they like to change their specs often. We
+ * only attempt to support back to SandBridge and we hope that newer
+ * devices maintain compatibility with generation 8.
*/
- if (!vfio_pci_is(vdev, PCI_VENDOR_ID_INTEL, PCI_ANY_ID) ||
- !vfio_is_vga(vdev) || nr != 4 ||
- &vdev->pdev != pci_find_device(pci_device_root_bus(&vdev->pdev),
+ gen = igd_gen(vdev);
+ if (gen != 6 && gen != 8) {
+ error_report("IGD device %s is unsupported by IGD quirks, "
+ "try SandyBridge or newer", vdev->vbasedev.name);
+ return;
+ }
+
+ /*
+ * Regardless of running in UPT or legacy mode, the guest graphics
+ * driver may attempt to use stolen memory, however only legacy mode
+ * has BIOS support for reserving stolen memory in the guest VM.
+ * Emulate the GMCH register in all cases and zero out the stolen
+ * memory size here. Legacy mode may request allocation and re-write
+ * this below.
+ */
+ gmch = vfio_pci_read_config(&vdev->pdev, IGD_GMCH, 4);
+ gmch &= ~((gen < 8 ? 0x1f : 0xff) << (gen < 8 ? 3 : 8));
+
+ /* GMCH is read-only, emulated */
+ pci_set_long(vdev->pdev.config + IGD_GMCH, gmch);
+ pci_set_long(vdev->pdev.wmask + IGD_GMCH, 0);
+ pci_set_long(vdev->emulated_config_bits + IGD_GMCH, ~0);
+
+ /*
+ * This must be at address 00:02.0 for us to even onsider enabling
+ * legacy mode. The vBIOS has dependencies on the PCI bus address.
+ */
+ if (&vdev->pdev != pci_find_device(pci_device_root_bus(&vdev->pdev),
0, PCI_DEVFN(0x2, 0))) {
return;
}
@@ -1392,18 +1425,6 @@ static void vfio_probe_igd_bar4_quirk(VFIOPCIDevice *vdev, int nr)
}
/*
- * IGD is not a standard, they like to change their specs often. We
- * only attempt to support back to SandBridge and we hope that newer
- * devices maintain compatibility with generation 8.
- */
- gen = igd_gen(vdev);
- if (gen != 6 && gen != 8) {
- error_report("IGD device %s is unsupported in legacy mode, "
- "try SandyBridge or newer", vdev->vbasedev.name);
- return;
- }
-
- /*
* Most of what we're doing here is to enable the ROM to run, so if
* there's no ROM, there's no point in setting up this quirk.
* NB. We only seem to get BIOS ROMs, so a UEFI VM would need CSM support.
@@ -1458,8 +1479,6 @@ static void vfio_probe_igd_bar4_quirk(VFIOPCIDevice *vdev, int nr)
goto out;
}
- gmch = vfio_pci_read_config(&vdev->pdev, IGD_GMCH, 4);
-
/*
* If IGD VGA Disable is clear (expected) and VGA is not already enabled,
* try to enable it. Probably shouldn't be using legacy mode without VGA,
@@ -1503,6 +1522,8 @@ static void vfio_probe_igd_bar4_quirk(VFIOPCIDevice *vdev, int nr)
igd = quirk->data = g_malloc0(sizeof(*igd));
igd->vdev = vdev;
igd->index = ~0;
+ igd->bdsm = vfio_pci_read_config(&vdev->pdev, IGD_BDSM, 4);
+ igd->bdsm &= ~((1 << 20) - 1); /* 1MB aligned */
memory_region_init_io(&quirk->mem[0], OBJECT(vdev), &vfio_igd_index_quirk,
igd, "vfio-igd-index-quirk", 4);
@@ -1528,12 +1549,11 @@ static void vfio_probe_igd_bar4_quirk(VFIOPCIDevice *vdev, int nr)
* when IVD (IGD VGA Disable) is clear, but the claim is that it's unused,
* so let's not waste VM memory for it.
*/
- gmch &= ~((gen < 8 ? 0x1f : 0xff) << (gen < 8 ? 3 : 8));
-
if (vdev->igd_gms) {
if (vdev->igd_gms <= 0x10) {
gms_mb = vdev->igd_gms * 32;
gmch |= vdev->igd_gms << (gen < 8 ? 3 : 8);
+ pci_set_long(vdev->pdev.config + IGD_GMCH, gmch);
} else {
error_report("Unsupported IGD GMS value 0x%x", vdev->igd_gms);
vdev->igd_gms = 0;
@@ -1553,11 +1573,6 @@ static void vfio_probe_igd_bar4_quirk(VFIOPCIDevice *vdev, int nr)
fw_cfg_add_file(fw_cfg_find(), "etc/igd-bdsm-size",
bdsm_size, sizeof(*bdsm_size));
- /* GMCH is read-only, emulated */
- pci_set_long(vdev->pdev.config + IGD_GMCH, gmch);
- pci_set_long(vdev->pdev.wmask + IGD_GMCH, 0);
- pci_set_long(vdev->emulated_config_bits + IGD_GMCH, ~0);
-
/* BDSM is read-write, emulated. The BIOS needs to be able to write it */
pci_set_long(vdev->pdev.config + IGD_BDSM, 0);
pci_set_long(vdev->pdev.wmask + IGD_BDSM, ~0);
diff --git a/hw/vfio/pci.c b/hw/vfio/pci.c
index 332f41d662..03a3d01549 100644
--- a/hw/vfio/pci.c
+++ b/hw/vfio/pci.c
@@ -1880,16 +1880,26 @@ static void vfio_add_ext_cap(VFIOPCIDevice *vdev)
/*
* Extended capabilities are chained with each pointing to the next, so we
* can drop anything other than the head of the chain simply by modifying
- * the previous next pointer. For the head of the chain, we can modify the
- * capability ID to something that cannot match a valid capability. ID
- * 0 is reserved for this since absence of capabilities is indicated by
- * 0 for the ID, version, AND next pointer. However, pcie_add_capability()
- * uses ID 0 as reserved for list management and will incorrectly match and
- * assert if we attempt to pre-load the head of the chain with this ID.
- * Use ID 0xFFFF temporarily since it is also seems to be reserved in
- * part for identifying absence of capabilities in a root complex register
- * block. If the ID still exists after adding capabilities, switch back to
- * zero. We'll mark this entire first dword as emulated for this purpose.
+ * the previous next pointer. Seed the head of the chain here such that
+ * we can simply skip any capabilities we want to drop below, regardless
+ * of their position in the chain. If this stub capability still exists
+ * after we add the capabilities we want to expose, update the capability
+ * ID to zero. Note that we cannot seed with the capability header being
+ * zero as this conflicts with definition of an absent capability chain
+ * and prevents capabilities beyond the head of the list from being added.
+ * By replacing the dummy capability ID with zero after walking the device
+ * chain, we also transparently mark extended capabilities as absent if
+ * no capabilities were added. Note that the PCIe spec defines an absence
+ * of extended capabilities to be determined by a value of zero for the
+ * capability ID, version, AND next pointer. A non-zero next pointer
+ * should be sufficient to indicate additional capabilities are present,
+ * which will occur if we call pcie_add_capability() below. The entire
+ * first dword is emulated to support this.
+ *
+ * NB. The kernel side does similar masking, so be prepared that our
+ * view of the device may also contain a capability ID zero in the head
+ * of the chain. Skip it for the same reason that we cannot seed the
+ * chain with a zero capability.
*/
pci_set_long(pdev->config + PCI_CONFIG_SPACE_SIZE,
PCI_EXT_CAP(0xFFFF, 0, 0));
@@ -1915,6 +1925,7 @@ static void vfio_add_ext_cap(VFIOPCIDevice *vdev)
PCI_EXT_CAP_NEXT_MASK);
switch (cap_id) {
+ case 0: /* kernel masked capability */
case PCI_EXT_CAP_ID_SRIOV: /* Read-only VF BARs confuse OVMF */
case PCI_EXT_CAP_ID_ARI: /* XXX Needs next function virtualization */
trace_vfio_add_ext_cap_dropped(vdev->vbasedev.name, cap_id, next);
@@ -2506,12 +2517,16 @@ static void vfio_unregister_err_notifier(VFIOPCIDevice *vdev)
static void vfio_req_notifier_handler(void *opaque)
{
VFIOPCIDevice *vdev = opaque;
+ Error *err = NULL;
if (!event_notifier_test_and_clear(&vdev->req_notifier)) {
return;
}
- qdev_unplug(&vdev->pdev.qdev, NULL);
+ qdev_unplug(&vdev->pdev.qdev, &err);
+ if (err) {
+ error_reportf_err(err, WARN_PREFIX, vdev->vbasedev.name);
+ }
}
static void vfio_register_req_notifier(VFIOPCIDevice *vdev)
diff --git a/hw/vfio/trace-events b/hw/vfio/trace-events
index 8de8281357..2561c6d31a 100644
--- a/hw/vfio/trace-events
+++ b/hw/vfio/trace-events
@@ -84,7 +84,7 @@ vfio_pci_igd_lpc_bridge_enabled(const char *name) "%s"
# hw/vfio/common.c
vfio_region_write(const char *name, int index, uint64_t addr, uint64_t data, unsigned size) " (%s:region%d+0x%"PRIx64", 0x%"PRIx64 ", %d)"
vfio_region_read(char *name, int index, uint64_t addr, unsigned size, uint64_t data) " (%s:region%d+0x%"PRIx64", %d) = 0x%"PRIx64
-vfio_iommu_map_notify(uint64_t iova_start, uint64_t iova_end) "iommu map @ %"PRIx64" - %"PRIx64
+vfio_iommu_map_notify(const char *op, uint64_t iova_start, uint64_t iova_end) "iommu %s @ %"PRIx64" - %"PRIx64
vfio_listener_region_add_skip(uint64_t start, uint64_t end) "SKIPPING region_add %"PRIx64" - %"PRIx64
vfio_listener_region_add_iommu(uint64_t start, uint64_t end) "region_add [iommu] %"PRIx64" - %"PRIx64
vfio_listener_region_add_ram(uint64_t iova_start, uint64_t iova_end, void *vaddr) "region_add [ram] %"PRIx64" - %"PRIx64" [%p]"
diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
index 63657066e7..23483c752f 100644
--- a/hw/virtio/virtio.c
+++ b/hw/virtio/virtio.c
@@ -60,6 +60,13 @@ typedef struct VRingUsed
VRingUsedElem ring[0];
} VRingUsed;
+typedef struct VRingMemoryRegionCaches {
+ struct rcu_head rcu;
+ MemoryRegionCache desc;
+ MemoryRegionCache avail;
+ MemoryRegionCache used;
+} VRingMemoryRegionCaches;
+
typedef struct VRing
{
unsigned int num;
@@ -68,6 +75,7 @@ typedef struct VRing
hwaddr desc;
hwaddr avail;
hwaddr used;
+ VRingMemoryRegionCaches *caches;
} VRing;
struct VirtQueue
@@ -97,13 +105,58 @@ struct VirtQueue
uint16_t vector;
VirtIOHandleOutput handle_output;
- VirtIOHandleOutput handle_aio_output;
+ VirtIOHandleAIOOutput handle_aio_output;
VirtIODevice *vdev;
EventNotifier guest_notifier;
EventNotifier host_notifier;
QLIST_ENTRY(VirtQueue) node;
};
+static void virtio_free_region_cache(VRingMemoryRegionCaches *caches)
+{
+ if (!caches) {
+ return;
+ }
+
+ address_space_cache_destroy(&caches->desc);
+ address_space_cache_destroy(&caches->avail);
+ address_space_cache_destroy(&caches->used);
+ g_free(caches);
+}
+
+static void virtio_init_region_cache(VirtIODevice *vdev, int n)
+{
+ VirtQueue *vq = &vdev->vq[n];
+ VRingMemoryRegionCaches *old = vq->vring.caches;
+ VRingMemoryRegionCaches *new;
+ hwaddr addr, size;
+ int event_size;
+
+ event_size = virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
+
+ addr = vq->vring.desc;
+ if (!addr) {
+ return;
+ }
+ new = g_new0(VRingMemoryRegionCaches, 1);
+ size = virtio_queue_get_desc_size(vdev, n);
+ address_space_cache_init(&new->desc, vdev->dma_as,
+ addr, size, false);
+
+ size = virtio_queue_get_used_size(vdev, n) + event_size;
+ address_space_cache_init(&new->used, vdev->dma_as,
+ vq->vring.used, size, true);
+
+ size = virtio_queue_get_avail_size(vdev, n) + event_size;
+ address_space_cache_init(&new->avail, vdev->dma_as,
+ vq->vring.avail, size, false);
+
+ atomic_rcu_set(&vq->vring.caches, new);
+ if (old) {
+ call_rcu(old, virtio_free_region_cache, rcu);
+ }
+}
+
/* virt queue functions */
void virtio_queue_update_rings(VirtIODevice *vdev, int n)
{
@@ -117,101 +170,125 @@ void virtio_queue_update_rings(VirtIODevice *vdev, int n)
vring->used = vring_align(vring->avail +
offsetof(VRingAvail, ring[vring->num]),
vring->align);
+ virtio_init_region_cache(vdev, n);
}
+/* Called within rcu_read_lock(). */
static void vring_desc_read(VirtIODevice *vdev, VRingDesc *desc,
- hwaddr desc_pa, int i)
+ MemoryRegionCache *cache, int i)
{
- address_space_read(vdev->dma_as, desc_pa + i * sizeof(VRingDesc),
- MEMTXATTRS_UNSPECIFIED, (void *)desc, sizeof(VRingDesc));
+ address_space_read_cached(cache, i * sizeof(VRingDesc),
+ desc, sizeof(VRingDesc));
virtio_tswap64s(vdev, &desc->addr);
virtio_tswap32s(vdev, &desc->len);
virtio_tswap16s(vdev, &desc->flags);
virtio_tswap16s(vdev, &desc->next);
}
+/* Called within rcu_read_lock(). */
static inline uint16_t vring_avail_flags(VirtQueue *vq)
{
- hwaddr pa;
- pa = vq->vring.avail + offsetof(VRingAvail, flags);
- return virtio_lduw_phys(vq->vdev, pa);
+ VRingMemoryRegionCaches *caches = atomic_rcu_read(&vq->vring.caches);
+ hwaddr pa = offsetof(VRingAvail, flags);
+ return virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
}
+/* Called within rcu_read_lock(). */
static inline uint16_t vring_avail_idx(VirtQueue *vq)
{
- hwaddr pa;
- pa = vq->vring.avail + offsetof(VRingAvail, idx);
- vq->shadow_avail_idx = virtio_lduw_phys(vq->vdev, pa);
+ VRingMemoryRegionCaches *caches = atomic_rcu_read(&vq->vring.caches);
+ hwaddr pa = offsetof(VRingAvail, idx);
+ vq->shadow_avail_idx = virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
return vq->shadow_avail_idx;
}
+/* Called within rcu_read_lock(). */
static inline uint16_t vring_avail_ring(VirtQueue *vq, int i)
{
- hwaddr pa;
- pa = vq->vring.avail + offsetof(VRingAvail, ring[i]);
- return virtio_lduw_phys(vq->vdev, pa);
+ VRingMemoryRegionCaches *caches = atomic_rcu_read(&vq->vring.caches);
+ hwaddr pa = offsetof(VRingAvail, ring[i]);
+ return virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
}
+/* Called within rcu_read_lock(). */
static inline uint16_t vring_get_used_event(VirtQueue *vq)
{
return vring_avail_ring(vq, vq->vring.num);
}
+/* Called within rcu_read_lock(). */
static inline void vring_used_write(VirtQueue *vq, VRingUsedElem *uelem,
int i)
{
- hwaddr pa;
+ VRingMemoryRegionCaches *caches = atomic_rcu_read(&vq->vring.caches);
+ hwaddr pa = offsetof(VRingUsed, ring[i]);
virtio_tswap32s(vq->vdev, &uelem->id);
virtio_tswap32s(vq->vdev, &uelem->len);
- pa = vq->vring.used + offsetof(VRingUsed, ring[i]);
- address_space_write(vq->vdev->dma_as, pa, MEMTXATTRS_UNSPECIFIED,
- (void *)uelem, sizeof(VRingUsedElem));
+ address_space_write_cached(&caches->used, pa, uelem, sizeof(VRingUsedElem));
+ address_space_cache_invalidate(&caches->used, pa, sizeof(VRingUsedElem));
}
+/* Called within rcu_read_lock(). */
static uint16_t vring_used_idx(VirtQueue *vq)
{
- hwaddr pa;
- pa = vq->vring.used + offsetof(VRingUsed, idx);
- return virtio_lduw_phys(vq->vdev, pa);
+ VRingMemoryRegionCaches *caches = atomic_rcu_read(&vq->vring.caches);
+ hwaddr pa = offsetof(VRingUsed, idx);
+ return virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
}
+/* Called within rcu_read_lock(). */
static inline void vring_used_idx_set(VirtQueue *vq, uint16_t val)
{
- hwaddr pa;
- pa = vq->vring.used + offsetof(VRingUsed, idx);
- virtio_stw_phys(vq->vdev, pa, val);
+ VRingMemoryRegionCaches *caches = atomic_rcu_read(&vq->vring.caches);
+ hwaddr pa = offsetof(VRingUsed, idx);
+ virtio_stw_phys_cached(vq->vdev, &caches->used, pa, val);
+ address_space_cache_invalidate(&caches->used, pa, sizeof(val));
vq->used_idx = val;
}
+/* Called within rcu_read_lock(). */
static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask)
{
+ VRingMemoryRegionCaches *caches = atomic_rcu_read(&vq->vring.caches);
VirtIODevice *vdev = vq->vdev;
- hwaddr pa;
- pa = vq->vring.used + offsetof(VRingUsed, flags);
- virtio_stw_phys(vdev, pa, virtio_lduw_phys(vdev, pa) | mask);
+ hwaddr pa = offsetof(VRingUsed, flags);
+ uint16_t flags = virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
+
+ virtio_stw_phys_cached(vdev, &caches->used, pa, flags | mask);
+ address_space_cache_invalidate(&caches->used, pa, sizeof(flags));
}
+/* Called within rcu_read_lock(). */
static inline void vring_used_flags_unset_bit(VirtQueue *vq, int mask)
{
+ VRingMemoryRegionCaches *caches = atomic_rcu_read(&vq->vring.caches);
VirtIODevice *vdev = vq->vdev;
- hwaddr pa;
- pa = vq->vring.used + offsetof(VRingUsed, flags);
- virtio_stw_phys(vdev, pa, virtio_lduw_phys(vdev, pa) & ~mask);
+ hwaddr pa = offsetof(VRingUsed, flags);
+ uint16_t flags = virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
+
+ virtio_stw_phys_cached(vdev, &caches->used, pa, flags & ~mask);
+ address_space_cache_invalidate(&caches->used, pa, sizeof(flags));
}
+/* Called within rcu_read_lock(). */
static inline void vring_set_avail_event(VirtQueue *vq, uint16_t val)
{
+ VRingMemoryRegionCaches *caches;
hwaddr pa;
if (!vq->notification) {
return;
}
- pa = vq->vring.used + offsetof(VRingUsed, ring[vq->vring.num]);
- virtio_stw_phys(vq->vdev, pa, val);
+
+ caches = atomic_rcu_read(&vq->vring.caches);
+ pa = offsetof(VRingUsed, ring[vq->vring.num]);
+ virtio_stw_phys_cached(vq->vdev, &caches->used, pa, val);
}
void virtio_queue_set_notification(VirtQueue *vq, int enable)
{
vq->notification = enable;
+
+ rcu_read_lock();
if (virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX)) {
vring_set_avail_event(vq, vring_avail_idx(vq));
} else if (enable) {
@@ -223,6 +300,7 @@ void virtio_queue_set_notification(VirtQueue *vq, int enable)
/* Expose avail event/used flags before caller checks the avail idx. */
smp_mb();
}
+ rcu_read_unlock();
}
int virtio_queue_ready(VirtQueue *vq)
@@ -231,8 +309,9 @@ int virtio_queue_ready(VirtQueue *vq)
}
/* Fetch avail_idx from VQ memory only when we really need to know if
- * guest has added some buffers. */
-int virtio_queue_empty(VirtQueue *vq)
+ * guest has added some buffers.
+ * Called within rcu_read_lock(). */
+static int virtio_queue_empty_rcu(VirtQueue *vq)
{
if (vq->shadow_avail_idx != vq->last_avail_idx) {
return 0;
@@ -241,6 +320,20 @@ int virtio_queue_empty(VirtQueue *vq)
return vring_avail_idx(vq) == vq->last_avail_idx;
}
+int virtio_queue_empty(VirtQueue *vq)
+{
+ bool empty;
+
+ if (vq->shadow_avail_idx != vq->last_avail_idx) {
+ return 0;
+ }
+
+ rcu_read_lock();
+ empty = vring_avail_idx(vq) == vq->last_avail_idx;
+ rcu_read_unlock();
+ return empty;
+}
+
static void virtqueue_unmap_sg(VirtQueue *vq, const VirtQueueElement *elem,
unsigned int len)
{
@@ -319,6 +412,7 @@ bool virtqueue_rewind(VirtQueue *vq, unsigned int num)
return true;
}
+/* Called within rcu_read_lock(). */
void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem,
unsigned int len, unsigned int idx)
{
@@ -339,6 +433,7 @@ void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem,
vring_used_write(vq, &uelem, idx);
}
+/* Called within rcu_read_lock(). */
void virtqueue_flush(VirtQueue *vq, unsigned int count)
{
uint16_t old, new;
@@ -362,10 +457,13 @@ void virtqueue_flush(VirtQueue *vq, unsigned int count)
void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem,
unsigned int len)
{
+ rcu_read_lock();
virtqueue_fill(vq, elem, len, 0);
virtqueue_flush(vq, 1);
+ rcu_read_unlock();
}
+/* Called within rcu_read_lock(). */
static int virtqueue_num_heads(VirtQueue *vq, unsigned int idx)
{
uint16_t num_heads = vring_avail_idx(vq) - idx;
@@ -385,6 +483,7 @@ static int virtqueue_num_heads(VirtQueue *vq, unsigned int idx)
return num_heads;
}
+/* Called within rcu_read_lock(). */
static bool virtqueue_get_head(VirtQueue *vq, unsigned int idx,
unsigned int *head)
{
@@ -408,7 +507,7 @@ enum {
};
static int virtqueue_read_next_desc(VirtIODevice *vdev, VRingDesc *desc,
- hwaddr desc_pa, unsigned int max,
+ MemoryRegionCache *desc_cache, unsigned int max,
unsigned int *next)
{
/* If this descriptor says it doesn't chain, we're done. */
@@ -426,7 +525,7 @@ static int virtqueue_read_next_desc(VirtIODevice *vdev, VRingDesc *desc,
return VIRTQUEUE_READ_DESC_ERROR;
}
- vring_desc_read(vdev, desc, desc_pa, *next);
+ vring_desc_read(vdev, desc, desc_cache, *next);
return VIRTQUEUE_READ_DESC_MORE;
}
@@ -434,29 +533,38 @@ void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
unsigned int *out_bytes,
unsigned max_in_bytes, unsigned max_out_bytes)
{
- unsigned int idx;
+ VirtIODevice *vdev = vq->vdev;
+ unsigned int max, idx;
unsigned int total_bufs, in_total, out_total;
+ VRingMemoryRegionCaches *caches;
+ MemoryRegionCache indirect_desc_cache = MEMORY_REGION_CACHE_INVALID;
+ int64_t len = 0;
int rc;
+ rcu_read_lock();
idx = vq->last_avail_idx;
-
total_bufs = in_total = out_total = 0;
+
+ max = vq->vring.num;
+ caches = atomic_rcu_read(&vq->vring.caches);
+ if (caches->desc.len < max * sizeof(VRingDesc)) {
+ virtio_error(vdev, "Cannot map descriptor ring");
+ goto err;
+ }
+
while ((rc = virtqueue_num_heads(vq, idx)) > 0) {
- VirtIODevice *vdev = vq->vdev;
- unsigned int max, num_bufs, indirect = 0;
+ MemoryRegionCache *desc_cache = &caches->desc;
+ unsigned int num_bufs;
VRingDesc desc;
- hwaddr desc_pa;
unsigned int i;
- max = vq->vring.num;
num_bufs = total_bufs;
if (!virtqueue_get_head(vq, idx++, &i)) {
goto err;
}
- desc_pa = vq->vring.desc;
- vring_desc_read(vdev, &desc, desc_pa, i);
+ vring_desc_read(vdev, &desc, desc_cache, i);
if (desc.flags & VRING_DESC_F_INDIRECT) {
if (desc.len % sizeof(VRingDesc)) {
@@ -471,11 +579,18 @@ void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
}
/* loop over the indirect descriptor table */
- indirect = 1;
+ len = address_space_cache_init(&indirect_desc_cache,
+ vdev->dma_as,
+ desc.addr, desc.len, false);
+ desc_cache = &indirect_desc_cache;
+ if (len < desc.len) {
+ virtio_error(vdev, "Cannot map indirect buffer");
+ goto err;
+ }
+
max = desc.len / sizeof(VRingDesc);
- desc_pa = desc.addr;
num_bufs = i = 0;
- vring_desc_read(vdev, &desc, desc_pa, i);
+ vring_desc_read(vdev, &desc, desc_cache, i);
}
do {
@@ -494,17 +609,19 @@ void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
goto done;
}
- rc = virtqueue_read_next_desc(vdev, &desc, desc_pa, max, &i);
+ rc = virtqueue_read_next_desc(vdev, &desc, desc_cache, max, &i);
} while (rc == VIRTQUEUE_READ_DESC_MORE);
if (rc == VIRTQUEUE_READ_DESC_ERROR) {
goto err;
}
- if (!indirect)
- total_bufs = num_bufs;
- else
+ if (desc_cache == &indirect_desc_cache) {
+ address_space_cache_destroy(&indirect_desc_cache);
total_bufs++;
+ } else {
+ total_bufs = num_bufs;
+ }
}
if (rc < 0) {
@@ -512,12 +629,14 @@ void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
}
done:
+ address_space_cache_destroy(&indirect_desc_cache);
if (in_bytes) {
*in_bytes = in_total;
}
if (out_bytes) {
*out_bytes = out_total;
}
+ rcu_read_unlock();
return;
err:
@@ -651,9 +770,12 @@ static void *virtqueue_alloc_element(size_t sz, unsigned out_num, unsigned in_nu
void *virtqueue_pop(VirtQueue *vq, size_t sz)
{
unsigned int i, head, max;
- hwaddr desc_pa = vq->vring.desc;
+ VRingMemoryRegionCaches *caches;
+ MemoryRegionCache indirect_desc_cache = MEMORY_REGION_CACHE_INVALID;
+ MemoryRegionCache *desc_cache;
+ int64_t len;
VirtIODevice *vdev = vq->vdev;
- VirtQueueElement *elem;
+ VirtQueueElement *elem = NULL;
unsigned out_num, in_num;
hwaddr addr[VIRTQUEUE_MAX_SIZE];
struct iovec iov[VIRTQUEUE_MAX_SIZE];
@@ -663,8 +785,9 @@ void *virtqueue_pop(VirtQueue *vq, size_t sz)
if (unlikely(vdev->broken)) {
return NULL;
}
- if (virtio_queue_empty(vq)) {
- return NULL;
+ rcu_read_lock();
+ if (virtio_queue_empty_rcu(vq)) {
+ goto done;
}
/* Needed after virtio_queue_empty(), see comment in
* virtqueue_num_heads(). */
@@ -677,11 +800,11 @@ void *virtqueue_pop(VirtQueue *vq, size_t sz)
if (vq->inuse >= vq->vring.num) {
virtio_error(vdev, "Virtqueue size exceeded");
- return NULL;
+ goto done;
}
if (!virtqueue_get_head(vq, vq->last_avail_idx++, &head)) {
- return NULL;
+ goto done;
}
if (virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
@@ -689,18 +812,33 @@ void *virtqueue_pop(VirtQueue *vq, size_t sz)
}
i = head;
- vring_desc_read(vdev, &desc, desc_pa, i);
+
+ caches = atomic_rcu_read(&vq->vring.caches);
+ if (caches->desc.len < max * sizeof(VRingDesc)) {
+ virtio_error(vdev, "Cannot map descriptor ring");
+ goto done;
+ }
+
+ desc_cache = &caches->desc;
+ vring_desc_read(vdev, &desc, desc_cache, i);
if (desc.flags & VRING_DESC_F_INDIRECT) {
if (desc.len % sizeof(VRingDesc)) {
virtio_error(vdev, "Invalid size for indirect buffer table");
- return NULL;
+ goto done;
}
/* loop over the indirect descriptor table */
+ len = address_space_cache_init(&indirect_desc_cache, vdev->dma_as,
+ desc.addr, desc.len, false);
+ desc_cache = &indirect_desc_cache;
+ if (len < desc.len) {
+ virtio_error(vdev, "Cannot map indirect buffer");
+ goto done;
+ }
+
max = desc.len / sizeof(VRingDesc);
- desc_pa = desc.addr;
i = 0;
- vring_desc_read(vdev, &desc, desc_pa, i);
+ vring_desc_read(vdev, &desc, desc_cache, i);
}
/* Collect all the descriptors */
@@ -731,7 +869,7 @@ void *virtqueue_pop(VirtQueue *vq, size_t sz)
goto err_undo_map;
}
- rc = virtqueue_read_next_desc(vdev, &desc, desc_pa, max, &i);
+ rc = virtqueue_read_next_desc(vdev, &desc, desc_cache, max, &i);
} while (rc == VIRTQUEUE_READ_DESC_MORE);
if (rc == VIRTQUEUE_READ_DESC_ERROR) {
@@ -753,11 +891,15 @@ void *virtqueue_pop(VirtQueue *vq, size_t sz)
vq->inuse++;
trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num);
+done:
+ address_space_cache_destroy(&indirect_desc_cache);
+ rcu_read_unlock();
+
return elem;
err_undo_map:
virtqueue_undo_map_desc(out_num, in_num, iov);
- return NULL;
+ goto done;
}
/* virtqueue_drop_all:
@@ -1219,6 +1361,7 @@ void virtio_queue_set_rings(VirtIODevice *vdev, int n, hwaddr desc,
vdev->vq[n].vring.desc = desc;
vdev->vq[n].vring.avail = avail;
vdev->vq[n].vring.used = used;
+ virtio_init_region_cache(vdev, n);
}
void virtio_queue_set_num(VirtIODevice *vdev, int n, int num)
@@ -1287,14 +1430,16 @@ void virtio_queue_set_align(VirtIODevice *vdev, int n, int align)
virtio_queue_update_rings(vdev, n);
}
-static void virtio_queue_notify_aio_vq(VirtQueue *vq)
+static bool virtio_queue_notify_aio_vq(VirtQueue *vq)
{
if (vq->vring.desc && vq->handle_aio_output) {
VirtIODevice *vdev = vq->vdev;
trace_virtio_queue_notify(vdev, vq - vdev->vq, vq);
- vq->handle_aio_output(vdev, vq);
+ return vq->handle_aio_output(vdev, vq);
}
+
+ return false;
}
static void virtio_queue_notify_vq(VirtQueue *vq)
@@ -1383,6 +1528,7 @@ static void virtio_set_isr(VirtIODevice *vdev, int value)
}
}
+/* Called within rcu_read_lock(). */
static bool virtio_should_notify(VirtIODevice *vdev, VirtQueue *vq)
{
uint16_t old, new;
@@ -1408,7 +1554,12 @@ static bool virtio_should_notify(VirtIODevice *vdev, VirtQueue *vq)
void virtio_notify_irqfd(VirtIODevice *vdev, VirtQueue *vq)
{
- if (!virtio_should_notify(vdev, vq)) {
+ bool should_notify;
+ rcu_read_lock();
+ should_notify = virtio_should_notify(vdev, vq);
+ rcu_read_unlock();
+
+ if (!should_notify) {
return;
}
@@ -1433,15 +1584,25 @@ void virtio_notify_irqfd(VirtIODevice *vdev, VirtQueue *vq)
event_notifier_set(&vq->guest_notifier);
}
+static void virtio_irq(VirtQueue *vq)
+{
+ virtio_set_isr(vq->vdev, 0x1);
+ virtio_notify_vector(vq->vdev, vq->vector);
+}
+
void virtio_notify(VirtIODevice *vdev, VirtQueue *vq)
{
- if (!virtio_should_notify(vdev, vq)) {
+ bool should_notify;
+ rcu_read_lock();
+ should_notify = virtio_should_notify(vdev, vq);
+ rcu_read_unlock();
+
+ if (!should_notify) {
return;
}
trace_virtio_notify(vdev, vq);
- virtio_set_isr(vq->vdev, 0x1);
- virtio_notify_vector(vdev, vq->vector);
+ virtio_irq(vq);
}
void virtio_notify_config(VirtIODevice *vdev)
@@ -1896,6 +2057,7 @@ int virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id)
}
}
+ rcu_read_lock();
for (i = 0; i < num; i++) {
if (vdev->vq[i].vring.desc) {
uint16_t nheads;
@@ -1930,6 +2092,7 @@ int virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id)
}
}
}
+ rcu_read_unlock();
return 0;
}
@@ -1937,9 +2100,6 @@ int virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id)
void virtio_cleanup(VirtIODevice *vdev)
{
qemu_del_vm_change_state_handler(vdev->vmstate);
- g_free(vdev->config);
- g_free(vdev->vq);
- g_free(vdev->vector_queues);
}
static void virtio_vmstate_change(void *opaque, int running, RunState state)
@@ -2059,7 +2219,11 @@ void virtio_queue_set_last_avail_idx(VirtIODevice *vdev, int n, uint16_t idx)
void virtio_queue_update_used_idx(VirtIODevice *vdev, int n)
{
- vdev->vq[n].used_idx = vring_used_idx(&vdev->vq[n]);
+ rcu_read_lock();
+ if (vdev->vq[n].vring.desc) {
+ vdev->vq[n].used_idx = vring_used_idx(&vdev->vq[n]);
+ }
+ rcu_read_unlock();
}
void virtio_queue_invalidate_signalled_used(VirtIODevice *vdev, int n)
@@ -2081,7 +2245,7 @@ static void virtio_queue_guest_notifier_read(EventNotifier *n)
{
VirtQueue *vq = container_of(n, VirtQueue, guest_notifier);
if (event_notifier_test_and_clear(n)) {
- virtio_notify_vector(vq->vdev, vq->vector);
+ virtio_irq(vq);
}
}
@@ -2125,16 +2289,17 @@ static bool virtio_queue_host_notifier_aio_poll(void *opaque)
{
EventNotifier *n = opaque;
VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
+ bool progress;
if (virtio_queue_empty(vq)) {
return false;
}
- virtio_queue_notify_aio_vq(vq);
+ progress = virtio_queue_notify_aio_vq(vq);
/* In case the handler function re-enabled notifications */
virtio_queue_set_notification(vq, 0);
- return true;
+ return progress;
}
static void virtio_queue_host_notifier_aio_poll_end(EventNotifier *n)
@@ -2146,7 +2311,7 @@ static void virtio_queue_host_notifier_aio_poll_end(EventNotifier *n)
}
void virtio_queue_aio_set_host_notifier_handler(VirtQueue *vq, AioContext *ctx,
- VirtIOHandleOutput handle_output)
+ VirtIOHandleAIOOutput handle_output)
{
if (handle_output) {
vq->handle_aio_output = handle_output;
@@ -2200,6 +2365,19 @@ void GCC_FMT_ATTR(2, 3) virtio_error(VirtIODevice *vdev, const char *fmt, ...)
}
}
+static void virtio_memory_listener_commit(MemoryListener *listener)
+{
+ VirtIODevice *vdev = container_of(listener, VirtIODevice, listener);
+ int i;
+
+ for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
+ if (vdev->vq[i].vring.num == 0) {
+ break;
+ }
+ virtio_init_region_cache(vdev, i);
+ }
+}
+
static void virtio_device_realize(DeviceState *dev, Error **errp)
{
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
@@ -2222,6 +2400,9 @@ static void virtio_device_realize(DeviceState *dev, Error **errp)
error_propagate(errp, err);
return;
}
+
+ vdev->listener.commit = virtio_memory_listener_commit;
+ memory_listener_register(&vdev->listener, vdev->dma_as);
}
static void virtio_device_unrealize(DeviceState *dev, Error **errp)
@@ -2244,6 +2425,36 @@ static void virtio_device_unrealize(DeviceState *dev, Error **errp)
vdev->bus_name = NULL;
}
+static void virtio_device_free_virtqueues(VirtIODevice *vdev)
+{
+ int i;
+ if (!vdev->vq) {
+ return;
+ }
+
+ for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
+ VRingMemoryRegionCaches *caches;
+ if (vdev->vq[i].vring.num == 0) {
+ break;
+ }
+ caches = atomic_read(&vdev->vq[i].vring.caches);
+ atomic_set(&vdev->vq[i].vring.caches, NULL);
+ virtio_free_region_cache(caches);
+ }
+ g_free(vdev->vq);
+}
+
+static void virtio_device_instance_finalize(Object *obj)
+{
+ VirtIODevice *vdev = VIRTIO_DEVICE(obj);
+
+ memory_listener_unregister(&vdev->listener);
+ virtio_device_free_virtqueues(vdev);
+
+ g_free(vdev->config);
+ g_free(vdev->vector_queues);
+}
+
static Property virtio_properties[] = {
DEFINE_VIRTIO_COMMON_FEATURES(VirtIODevice, host_features),
DEFINE_PROP_END_OF_LIST(),
@@ -2370,6 +2581,7 @@ static const TypeInfo virtio_device_info = {
.parent = TYPE_DEVICE,
.instance_size = sizeof(VirtIODevice),
.class_init = virtio_device_class_init,
+ .instance_finalize = virtio_device_instance_finalize,
.abstract = true,
.class_size = sizeof(VirtioDeviceClass),
};
diff --git a/hw/watchdog/Makefile.objs b/hw/watchdog/Makefile.objs
index 72e3ffd93c..9589bed63a 100644
--- a/hw/watchdog/Makefile.objs
+++ b/hw/watchdog/Makefile.objs
@@ -2,3 +2,4 @@ common-obj-y += watchdog.o
common-obj-$(CONFIG_WDT_IB6300ESB) += wdt_i6300esb.o
common-obj-$(CONFIG_WDT_IB700) += wdt_ib700.o
common-obj-$(CONFIG_WDT_DIAG288) += wdt_diag288.o
+common-obj-$(CONFIG_ASPEED_SOC) += wdt_aspeed.o
diff --git a/hw/watchdog/wdt_aspeed.c b/hw/watchdog/wdt_aspeed.c
new file mode 100644
index 0000000000..8bbe579b6b
--- /dev/null
+++ b/hw/watchdog/wdt_aspeed.c
@@ -0,0 +1,225 @@
+/*
+ * ASPEED Watchdog Controller
+ *
+ * Copyright (C) 2016-2017 IBM Corp.
+ *
+ * This code is licensed under the GPL version 2 or later. See the
+ * COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/log.h"
+#include "sysemu/watchdog.h"
+#include "hw/sysbus.h"
+#include "qemu/timer.h"
+#include "hw/watchdog/wdt_aspeed.h"
+
+#define WDT_STATUS (0x00 / 4)
+#define WDT_RELOAD_VALUE (0x04 / 4)
+#define WDT_RESTART (0x08 / 4)
+#define WDT_CTRL (0x0C / 4)
+#define WDT_CTRL_RESET_MODE_SOC (0x00 << 5)
+#define WDT_CTRL_RESET_MODE_FULL_CHIP (0x01 << 5)
+#define WDT_CTRL_1MHZ_CLK BIT(4)
+#define WDT_CTRL_WDT_EXT BIT(3)
+#define WDT_CTRL_WDT_INTR BIT(2)
+#define WDT_CTRL_RESET_SYSTEM BIT(1)
+#define WDT_CTRL_ENABLE BIT(0)
+
+#define WDT_TIMEOUT_STATUS (0x10 / 4)
+#define WDT_TIMEOUT_CLEAR (0x14 / 4)
+#define WDT_RESET_WDITH (0x18 / 4)
+
+#define WDT_RESTART_MAGIC 0x4755
+
+static bool aspeed_wdt_is_enabled(const AspeedWDTState *s)
+{
+ return s->regs[WDT_CTRL] & WDT_CTRL_ENABLE;
+}
+
+static uint64_t aspeed_wdt_read(void *opaque, hwaddr offset, unsigned size)
+{
+ AspeedWDTState *s = ASPEED_WDT(opaque);
+
+ offset >>= 2;
+
+ switch (offset) {
+ case WDT_STATUS:
+ return s->regs[WDT_STATUS];
+ case WDT_RELOAD_VALUE:
+ return s->regs[WDT_RELOAD_VALUE];
+ case WDT_RESTART:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: read from write-only reg at offset 0x%"
+ HWADDR_PRIx "\n", __func__, offset);
+ return 0;
+ case WDT_CTRL:
+ return s->regs[WDT_CTRL];
+ case WDT_TIMEOUT_STATUS:
+ case WDT_TIMEOUT_CLEAR:
+ case WDT_RESET_WDITH:
+ qemu_log_mask(LOG_UNIMP,
+ "%s: uninmplemented read at offset 0x%" HWADDR_PRIx "\n",
+ __func__, offset);
+ return 0;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: Out-of-bounds read at offset 0x%" HWADDR_PRIx "\n",
+ __func__, offset);
+ return 0;
+ }
+
+}
+
+static void aspeed_wdt_reload(AspeedWDTState *s, bool pclk)
+{
+ uint32_t reload;
+
+ if (pclk) {
+ reload = muldiv64(s->regs[WDT_RELOAD_VALUE], NANOSECONDS_PER_SECOND,
+ s->pclk_freq);
+ } else {
+ reload = s->regs[WDT_RELOAD_VALUE] * 1000;
+ }
+
+ if (aspeed_wdt_is_enabled(s)) {
+ timer_mod(s->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + reload);
+ }
+}
+
+static void aspeed_wdt_write(void *opaque, hwaddr offset, uint64_t data,
+ unsigned size)
+{
+ AspeedWDTState *s = ASPEED_WDT(opaque);
+ bool enable = data & WDT_CTRL_ENABLE;
+
+ offset >>= 2;
+
+ switch (offset) {
+ case WDT_STATUS:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: write to read-only reg at offset 0x%"
+ HWADDR_PRIx "\n", __func__, offset);
+ break;
+ case WDT_RELOAD_VALUE:
+ s->regs[WDT_RELOAD_VALUE] = data;
+ break;
+ case WDT_RESTART:
+ if ((data & 0xFFFF) == WDT_RESTART_MAGIC) {
+ s->regs[WDT_STATUS] = s->regs[WDT_RELOAD_VALUE];
+ aspeed_wdt_reload(s, !(data & WDT_CTRL_1MHZ_CLK));
+ }
+ break;
+ case WDT_CTRL:
+ if (enable && !aspeed_wdt_is_enabled(s)) {
+ s->regs[WDT_CTRL] = data;
+ aspeed_wdt_reload(s, !(data & WDT_CTRL_1MHZ_CLK));
+ } else if (!enable && aspeed_wdt_is_enabled(s)) {
+ s->regs[WDT_CTRL] = data;
+ timer_del(s->timer);
+ }
+ break;
+ case WDT_TIMEOUT_STATUS:
+ case WDT_TIMEOUT_CLEAR:
+ case WDT_RESET_WDITH:
+ qemu_log_mask(LOG_UNIMP,
+ "%s: uninmplemented write at offset 0x%" HWADDR_PRIx "\n",
+ __func__, offset);
+ break;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: Out-of-bounds write at offset 0x%" HWADDR_PRIx "\n",
+ __func__, offset);
+ }
+ return;
+}
+
+static WatchdogTimerModel model = {
+ .wdt_name = TYPE_ASPEED_WDT,
+ .wdt_description = "Aspeed watchdog device",
+};
+
+static const VMStateDescription vmstate_aspeed_wdt = {
+ .name = "vmstate_aspeed_wdt",
+ .version_id = 0,
+ .minimum_version_id = 0,
+ .fields = (VMStateField[]) {
+ VMSTATE_TIMER_PTR(timer, AspeedWDTState),
+ VMSTATE_UINT32_ARRAY(regs, AspeedWDTState, ASPEED_WDT_REGS_MAX),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static const MemoryRegionOps aspeed_wdt_ops = {
+ .read = aspeed_wdt_read,
+ .write = aspeed_wdt_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .valid.min_access_size = 4,
+ .valid.max_access_size = 4,
+ .valid.unaligned = false,
+};
+
+static void aspeed_wdt_reset(DeviceState *dev)
+{
+ AspeedWDTState *s = ASPEED_WDT(dev);
+
+ s->regs[WDT_STATUS] = 0x3EF1480;
+ s->regs[WDT_RELOAD_VALUE] = 0x03EF1480;
+ s->regs[WDT_RESTART] = 0;
+ s->regs[WDT_CTRL] = 0;
+
+ timer_del(s->timer);
+}
+
+static void aspeed_wdt_timer_expired(void *dev)
+{
+ AspeedWDTState *s = ASPEED_WDT(dev);
+
+ qemu_log_mask(CPU_LOG_RESET, "Watchdog timer expired.\n");
+ watchdog_perform_action();
+ timer_del(s->timer);
+}
+
+#define PCLK_HZ 24000000
+
+static void aspeed_wdt_realize(DeviceState *dev, Error **errp)
+{
+ SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
+ AspeedWDTState *s = ASPEED_WDT(dev);
+
+ s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, aspeed_wdt_timer_expired, dev);
+
+ /* FIXME: This setting should be derived from the SCU hw strapping
+ * register SCU70
+ */
+ s->pclk_freq = PCLK_HZ;
+
+ memory_region_init_io(&s->iomem, OBJECT(s), &aspeed_wdt_ops, s,
+ TYPE_ASPEED_WDT, ASPEED_WDT_REGS_MAX * 4);
+ sysbus_init_mmio(sbd, &s->iomem);
+}
+
+static void aspeed_wdt_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->realize = aspeed_wdt_realize;
+ dc->reset = aspeed_wdt_reset;
+ set_bit(DEVICE_CATEGORY_MISC, dc->categories);
+ dc->vmsd = &vmstate_aspeed_wdt;
+}
+
+static const TypeInfo aspeed_wdt_info = {
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .name = TYPE_ASPEED_WDT,
+ .instance_size = sizeof(AspeedWDTState),
+ .class_init = aspeed_wdt_class_init,
+};
+
+static void wdt_aspeed_register_types(void)
+{
+ watchdog_add_model(&model);
+ type_register_static(&aspeed_wdt_info);
+}
+
+type_init(wdt_aspeed_register_types)