diff options
Diffstat (limited to 'hw')
108 files changed, 4051 insertions, 1116 deletions
diff --git a/hw/acpi/core.c b/hw/acpi/core.c index d24b9a98c8..e890a5d675 100644 --- a/hw/acpi/core.c +++ b/hw/acpi/core.c @@ -239,11 +239,11 @@ void acpi_table_add(const QemuOpts *opts, Error **errp) char unsigned *blob = NULL; { - OptsVisitor *ov; + Visitor *v; - ov = opts_visitor_new(opts); - visit_type_AcpiTableOptions(opts_get_visitor(ov), NULL, &hdrs, &err); - opts_visitor_cleanup(ov); + v = opts_visitor_new(opts); + visit_type_AcpiTableOptions(v, NULL, &hdrs, &err); + visit_free(v); } if (err) { diff --git a/hw/arm/ast2400.c b/hw/arm/ast2400.c index b14a82fcde..0555843620 100644 --- a/hw/arm/ast2400.c +++ b/hw/arm/ast2400.c @@ -23,11 +23,17 @@ #define AST2400_UART_5_BASE 0x00184000 #define AST2400_IOMEM_SIZE 0x00200000 #define AST2400_IOMEM_BASE 0x1E600000 +#define AST2400_SMC_BASE AST2400_IOMEM_BASE /* Legacy SMC */ +#define AST2400_FMC_BASE 0X1E620000 +#define AST2400_SPI_BASE 0X1E630000 #define AST2400_VIC_BASE 0x1E6C0000 #define AST2400_SCU_BASE 0x1E6E2000 #define AST2400_TIMER_BASE 0x1E782000 #define AST2400_I2C_BASE 0x1E78A000 +#define AST2400_FMC_FLASH_BASE 0x20000000 +#define AST2400_SPI_FLASH_BASE 0x30000000 + #define AST2400_A0_SILICON_REV 0x02000303 static const int uart_irqs[] = { 9, 32, 33, 34, 10 }; @@ -85,13 +91,21 @@ static void ast2400_init(Object *obj) "hw-strap1", &error_abort); object_property_add_alias(obj, "hw-strap2", OBJECT(&s->scu), "hw-strap2", &error_abort); + + object_initialize(&s->smc, sizeof(s->smc), "aspeed.smc.fmc"); + object_property_add_child(obj, "smc", OBJECT(&s->smc), NULL); + qdev_set_parent_bus(DEVICE(&s->smc), sysbus_get_default()); + + object_initialize(&s->spi, sizeof(s->spi), "aspeed.smc.spi"); + object_property_add_child(obj, "spi", OBJECT(&s->spi), NULL); + qdev_set_parent_bus(DEVICE(&s->spi), sysbus_get_default()); } static void ast2400_realize(DeviceState *dev, Error **errp) { int i; AST2400State *s = AST2400(dev); - Error *err = NULL; + Error *err = NULL, *local_err = NULL; /* IO space */ memory_region_init_io(&s->iomem, NULL, &ast2400_io_ops, NULL, @@ -147,6 +161,30 @@ static void ast2400_realize(DeviceState *dev, Error **errp) sysbus_mmio_map(SYS_BUS_DEVICE(&s->i2c), 0, AST2400_I2C_BASE); sysbus_connect_irq(SYS_BUS_DEVICE(&s->i2c), 0, qdev_get_gpio_in(DEVICE(&s->vic), 12)); + + /* SMC */ + object_property_set_int(OBJECT(&s->smc), 1, "num-cs", &err); + object_property_set_bool(OBJECT(&s->smc), true, "realized", &local_err); + error_propagate(&err, local_err); + if (err) { + error_propagate(errp, err); + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->smc), 0, AST2400_FMC_BASE); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->smc), 1, AST2400_FMC_FLASH_BASE); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->smc), 0, + qdev_get_gpio_in(DEVICE(&s->vic), 19)); + + /* SPI */ + object_property_set_int(OBJECT(&s->spi), 1, "num-cs", &err); + object_property_set_bool(OBJECT(&s->spi), true, "realized", &local_err); + error_propagate(&err, local_err); + if (err) { + error_propagate(errp, err); + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->spi), 0, AST2400_SPI_BASE); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->spi), 1, AST2400_SPI_FLASH_BASE); } static void ast2400_class_init(ObjectClass *oc, void *data) diff --git a/hw/arm/fsl-imx25.c b/hw/arm/fsl-imx25.c index 1cd749aa4b..b4e358db65 100644 --- a/hw/arm/fsl-imx25.c +++ b/hw/arm/fsl-imx25.c @@ -51,7 +51,7 @@ static void fsl_imx25_init(Object *obj) } for (i = 0; i < FSL_IMX25_NUM_GPTS; i++) { - object_initialize(&s->gpt[i], sizeof(s->gpt[i]), TYPE_IMX_GPT); + object_initialize(&s->gpt[i], sizeof(s->gpt[i]), TYPE_IMX25_GPT); qdev_set_parent_bus(DEVICE(&s->gpt[i]), sysbus_get_default()); } @@ -249,16 +249,16 @@ static void fsl_imx25_realize(DeviceState *dev, Error **errp) } /* initialize 2 x 16 KB ROM */ - memory_region_init_rom_device(&s->rom[0], NULL, NULL, NULL, - "imx25.rom0", FSL_IMX25_ROM0_SIZE, &err); + memory_region_init_rom(&s->rom[0], NULL, + "imx25.rom0", FSL_IMX25_ROM0_SIZE, &err); if (err) { error_propagate(errp, err); return; } memory_region_add_subregion(get_system_memory(), FSL_IMX25_ROM0_ADDR, &s->rom[0]); - memory_region_init_rom_device(&s->rom[1], NULL, NULL, NULL, - "imx25.rom1", FSL_IMX25_ROM1_SIZE, &err); + memory_region_init_rom(&s->rom[1], NULL, + "imx25.rom1", FSL_IMX25_ROM1_SIZE, &err); if (err) { error_propagate(errp, err); return; diff --git a/hw/arm/fsl-imx31.c b/hw/arm/fsl-imx31.c index 31a3a87911..fe204ace62 100644 --- a/hw/arm/fsl-imx31.c +++ b/hw/arm/fsl-imx31.c @@ -47,7 +47,7 @@ static void fsl_imx31_init(Object *obj) qdev_set_parent_bus(DEVICE(&s->uart[i]), sysbus_get_default()); } - object_initialize(&s->gpt, sizeof(s->gpt), TYPE_IMX_GPT); + object_initialize(&s->gpt, sizeof(s->gpt), TYPE_IMX31_GPT); qdev_set_parent_bus(DEVICE(&s->gpt), sysbus_get_default()); for (i = 0; i < FSL_IMX31_NUM_EPITS; i++) { @@ -219,9 +219,8 @@ static void fsl_imx31_realize(DeviceState *dev, Error **errp) } /* On a real system, the first 16k is a `secure boot rom' */ - memory_region_init_rom_device(&s->secure_rom, NULL, NULL, NULL, - "imx31.secure_rom", - FSL_IMX31_SECURE_ROM_SIZE, &err); + memory_region_init_rom(&s->secure_rom, NULL, "imx31.secure_rom", + FSL_IMX31_SECURE_ROM_SIZE, &err); if (err) { error_propagate(errp, err); return; @@ -230,8 +229,8 @@ static void fsl_imx31_realize(DeviceState *dev, Error **errp) &s->secure_rom); /* There is also a 16k ROM */ - memory_region_init_rom_device(&s->rom, NULL, NULL, NULL, "imx31.rom", - FSL_IMX31_ROM_SIZE, &err); + memory_region_init_rom(&s->rom, NULL, "imx31.rom", + FSL_IMX31_ROM_SIZE, &err); if (err) { error_propagate(errp, err); return; diff --git a/hw/arm/fsl-imx6.c b/hw/arm/fsl-imx6.c index 0c00e7a560..6a1bf263a5 100644 --- a/hw/arm/fsl-imx6.c +++ b/hw/arm/fsl-imx6.c @@ -67,7 +67,7 @@ static void fsl_imx6_init(Object *obj) object_property_add_child(obj, name, OBJECT(&s->uart[i]), NULL); } - object_initialize(&s->gpt, sizeof(s->gpt), TYPE_IMX_GPT); + object_initialize(&s->gpt, sizeof(s->gpt), TYPE_IMX6_GPT); qdev_set_parent_bus(DEVICE(&s->gpt), sysbus_get_default()); object_property_add_child(obj, "gpt", OBJECT(&s->gpt), NULL); @@ -399,8 +399,8 @@ static void fsl_imx6_realize(DeviceState *dev, Error **errp) FSL_IMX6_ENET_MAC_1588_IRQ)); /* ROM memory */ - memory_region_init_rom_device(&s->rom, NULL, NULL, NULL, "imx6.rom", - FSL_IMX6_ROM_SIZE, &err); + memory_region_init_rom(&s->rom, NULL, "imx6.rom", + FSL_IMX6_ROM_SIZE, &err); if (err) { error_propagate(errp, err); return; @@ -409,8 +409,8 @@ static void fsl_imx6_realize(DeviceState *dev, Error **errp) &s->rom); /* CAAM memory */ - memory_region_init_rom_device(&s->caam, NULL, NULL, NULL, "imx6.caam", - FSL_IMX6_CAAM_MEM_SIZE, &err); + memory_region_init_rom(&s->caam, NULL, "imx6.caam", + FSL_IMX6_CAAM_MEM_SIZE, &err); if (err) { error_propagate(errp, err); return; diff --git a/hw/arm/palmetto-bmc.c b/hw/arm/palmetto-bmc.c index b8eed21348..54e29a865d 100644 --- a/hw/arm/palmetto-bmc.c +++ b/hw/arm/palmetto-bmc.c @@ -18,6 +18,8 @@ #include "hw/arm/ast2400.h" #include "hw/boards.h" #include "qemu/log.h" +#include "sysemu/block-backend.h" +#include "sysemu/blockdev.h" static struct arm_boot_info palmetto_bmc_binfo = { .loader_start = AST2400_SDRAM_BASE, @@ -30,6 +32,32 @@ typedef struct PalmettoBMCState { MemoryRegion ram; } PalmettoBMCState; +static void palmetto_bmc_init_flashes(AspeedSMCState *s, const char *flashtype, + Error **errp) +{ + int i ; + + for (i = 0; i < s->num_cs; ++i) { + AspeedSMCFlash *fl = &s->flashes[i]; + DriveInfo *dinfo = drive_get_next(IF_MTD); + qemu_irq cs_line; + + /* + * FIXME: check that we are not using a flash module exceeding + * the controller segment size + */ + fl->flash = ssi_create_slave_no_init(s->spi, flashtype); + if (dinfo) { + qdev_prop_set_drive(fl->flash, "drive", blk_by_legacy_dinfo(dinfo), + errp); + } + qdev_init_nofail(fl->flash); + + cs_line = qdev_get_gpio_in_named(fl->flash, SSI_GPIO_CS, 0); + sysbus_connect_irq(SYS_BUS_DEVICE(s), i + 1, cs_line); + } +} + static void palmetto_bmc_init(MachineState *machine) { PalmettoBMCState *bmc; @@ -49,6 +77,9 @@ static void palmetto_bmc_init(MachineState *machine) object_property_set_bool(OBJECT(&bmc->soc), true, "realized", &error_abort); + palmetto_bmc_init_flashes(&bmc->soc.smc, "n25q256a", &error_abort); + palmetto_bmc_init_flashes(&bmc->soc.spi, "mx25l25635e", &error_abort); + palmetto_bmc_binfo.kernel_filename = machine->kernel_filename; palmetto_bmc_binfo.initrd_filename = machine->initrd_filename; palmetto_bmc_binfo.kernel_cmdline = machine->kernel_cmdline; diff --git a/hw/arm/sabrelite.c b/hw/arm/sabrelite.c index 776c51e398..4e7ac8cc4f 100644 --- a/hw/arm/sabrelite.c +++ b/hw/arm/sabrelite.c @@ -86,13 +86,19 @@ static void sabrelite_init(MachineState *machine) spi_bus = (SSIBus *)qdev_get_child_bus(DEVICE(spi_dev), "spi"); if (spi_bus) { DeviceState *flash_dev; - - flash_dev = ssi_create_slave(spi_bus, "sst25vf016b"); - if (flash_dev) { - qemu_irq cs_line = qdev_get_gpio_in_named(flash_dev, - SSI_GPIO_CS, 0); - sysbus_connect_irq(SYS_BUS_DEVICE(spi_dev), 1, cs_line); + qemu_irq cs_line; + DriveInfo *dinfo = drive_get_next(IF_MTD); + + flash_dev = ssi_create_slave_no_init(spi_bus, "sst25vf016b"); + if (dinfo) { + qdev_prop_set_drive(flash_dev, "drive", + blk_by_legacy_dinfo(dinfo), + &error_fatal); } + qdev_init_nofail(flash_dev); + + cs_line = qdev_get_gpio_in_named(flash_dev, SSI_GPIO_CS, 0); + sysbus_connect_irq(SYS_BUS_DEVICE(spi_dev), 1, cs_line); } } } diff --git a/hw/arm/spitz.c b/hw/arm/spitz.c index ba40f8302b..41cc2eeeb1 100644 --- a/hw/arm/spitz.c +++ b/hw/arm/spitz.c @@ -598,15 +598,13 @@ static uint32_t spitz_lcdtg_transfer(SSISlave *dev, uint32_t value) return 0; } -static int spitz_lcdtg_init(SSISlave *dev) +static void spitz_lcdtg_realize(SSISlave *dev, Error **errp) { SpitzLCDTG *s = FROM_SSI_SLAVE(SpitzLCDTG, dev); spitz_lcdtg = s; s->bl_power = 0; s->bl_intensity = 0x20; - - return 0; } /* SSP devices */ @@ -666,7 +664,7 @@ static void spitz_adc_temp_on(void *opaque, int line, int level) max111x_set_input(max1111, MAX1111_BATT_TEMP, 0); } -static int corgi_ssp_init(SSISlave *d) +static void corgi_ssp_realize(SSISlave *d, Error **errp) { DeviceState *dev = DEVICE(d); CorgiSSPState *s = FROM_SSI_SLAVE(CorgiSSPState, d); @@ -675,8 +673,6 @@ static int corgi_ssp_init(SSISlave *d) s->bus[0] = ssi_create_bus(dev, "ssi0"); s->bus[1] = ssi_create_bus(dev, "ssi1"); s->bus[2] = ssi_create_bus(dev, "ssi2"); - - return 0; } static void spitz_ssp_attach(PXA2xxState *cpu) @@ -1121,7 +1117,7 @@ static void corgi_ssp_class_init(ObjectClass *klass, void *data) DeviceClass *dc = DEVICE_CLASS(klass); SSISlaveClass *k = SSI_SLAVE_CLASS(klass); - k->init = corgi_ssp_init; + k->realize = corgi_ssp_realize; k->transfer = corgi_ssp_transfer; dc->vmsd = &vmstate_corgi_ssp_regs; } @@ -1150,7 +1146,7 @@ static void spitz_lcdtg_class_init(ObjectClass *klass, void *data) DeviceClass *dc = DEVICE_CLASS(klass); SSISlaveClass *k = SSI_SLAVE_CLASS(klass); - k->init = spitz_lcdtg_init; + k->realize = spitz_lcdtg_realize; k->transfer = spitz_lcdtg_transfer; dc->vmsd = &vmstate_spitz_lcdtg_regs; } diff --git a/hw/arm/tosa.c b/hw/arm/tosa.c index 4e9494f94c..2db66508b5 100644 --- a/hw/arm/tosa.c +++ b/hw/arm/tosa.c @@ -127,10 +127,9 @@ static uint32_t tosa_ssp_tansfer(SSISlave *dev, uint32_t value) return 0; } -static int tosa_ssp_init(SSISlave *dev) +static void tosa_ssp_realize(SSISlave *dev, Error **errp) { /* Nothing to do. */ - return 0; } #define TYPE_TOSA_DAC "tosa_dac" @@ -283,7 +282,7 @@ static void tosa_ssp_class_init(ObjectClass *klass, void *data) { SSISlaveClass *k = SSI_SLAVE_CLASS(klass); - k->init = tosa_ssp_init; + k->realize = tosa_ssp_realize; k->transfer = tosa_ssp_tansfer; } diff --git a/hw/arm/virt.c b/hw/arm/virt.c index c5c125e920..4dafd42be8 100644 --- a/hw/arm/virt.c +++ b/hw/arm/virt.c @@ -1021,6 +1021,7 @@ static void create_pcie(const VirtBoardInfo *vbi, qemu_irq *pic, qemu_fdt_setprop_cell(vbi->fdt, nodename, "#size-cells", 2); qemu_fdt_setprop_cells(vbi->fdt, nodename, "bus-range", 0, nr_pcie_buses - 1); + qemu_fdt_setprop(vbi->fdt, nodename, "dma-coherent", NULL, 0); if (vbi->v2m_phandle) { qemu_fdt_setprop_cells(vbi->fdt, nodename, "msi-parent", @@ -1175,6 +1176,10 @@ static void machvirt_init(MachineState *machine) VirtGuestInfoState *guest_info_state = g_malloc0(sizeof *guest_info_state); VirtGuestInfo *guest_info = &guest_info_state->info; char **cpustr; + ObjectClass *oc; + const char *typename; + CPUClass *cc; + Error *err = NULL; bool firmware_loaded = bios_name || drive_get(IF_PFLASH, 0, 0); if (!cpu_model) { @@ -1258,26 +1263,24 @@ static void machvirt_init(MachineState *machine) create_fdt(vbi); - for (n = 0; n < smp_cpus; n++) { - ObjectClass *oc = cpu_class_by_name(TYPE_ARM_CPU, cpustr[0]); - CPUClass *cc = CPU_CLASS(oc); - Object *cpuobj; - Error *err = NULL; - char *cpuopts = g_strdup(cpustr[1]); - - if (!oc) { - error_report("Unable to find CPU definition"); - exit(1); - } - cpuobj = object_new(object_class_get_name(oc)); + oc = cpu_class_by_name(TYPE_ARM_CPU, cpustr[0]); + if (!oc) { + error_report("Unable to find CPU definition"); + exit(1); + } + typename = object_class_get_name(oc); - /* Handle any CPU options specified by the user */ - cc->parse_features(CPU(cpuobj), cpuopts, &err); - g_free(cpuopts); - if (err) { - error_report_err(err); - exit(1); - } + /* convert -smp CPU options specified by the user into global props */ + cc = CPU_CLASS(oc); + cc->parse_features(typename, cpustr[1], &err); + g_strfreev(cpustr); + if (err) { + error_report_err(err); + exit(1); + } + + for (n = 0; n < smp_cpus; n++) { + Object *cpuobj = object_new(typename); if (!vms->secure) { object_property_set_bool(cpuobj, false, "has_el3", NULL); @@ -1308,7 +1311,6 @@ static void machvirt_init(MachineState *machine) object_property_set_bool(cpuobj, true, "realized", NULL); } - g_strfreev(cpustr); fdt_add_timer_nodes(vbi, gic_version); fdt_add_cpu_nodes(vbi); fdt_add_psci_node(vbi); diff --git a/hw/arm/xilinx_zynq.c b/hw/arm/xilinx_zynq.c index aefebcfa6d..7dac20d67d 100644 --- a/hw/arm/xilinx_zynq.c +++ b/hw/arm/xilinx_zynq.c @@ -138,7 +138,13 @@ static inline void zynq_init_spi_flashes(uint32_t base_addr, qemu_irq irq, spi = (SSIBus *)qdev_get_child_bus(dev, bus_name); for (j = 0; j < num_ss; ++j) { - flash_dev = ssi_create_slave(spi, "n25q128"); + DriveInfo *dinfo = drive_get_next(IF_MTD); + flash_dev = ssi_create_slave_no_init(spi, "n25q128"); + if (dinfo) { + qdev_prop_set_drive(flash_dev, "drive", + blk_by_legacy_dinfo(dinfo), &error_fatal); + } + qdev_init_nofail(flash_dev); cs_line = qdev_get_gpio_in_named(flash_dev, SSI_GPIO_CS, 0); sysbus_connect_irq(busdev, i * num_ss + j + 1, cs_line); @@ -294,6 +300,12 @@ static void zynq_init(MachineState *machine) sysbus_connect_irq(busdev, n + 1, pic[dma_irqs[n] - IRQ_OFFSET]); } + dev = qdev_create(NULL, "xlnx.ps7-dev-cfg"); + qdev_init_nofail(dev); + busdev = SYS_BUS_DEVICE(dev); + sysbus_connect_irq(busdev, 0, pic[40 - IRQ_OFFSET]); + sysbus_mmio_map(busdev, 0, 0xF8007000); + zynq_binfo.ram_size = ram_size; zynq_binfo.kernel_filename = kernel_filename; zynq_binfo.kernel_cmdline = kernel_cmdline; diff --git a/hw/arm/xlnx-ep108.c b/hw/arm/xlnx-ep108.c index 34b4641712..4ec590a25d 100644 --- a/hw/arm/xlnx-ep108.c +++ b/hw/arm/xlnx-ep108.c @@ -88,12 +88,19 @@ static void xlnx_ep108_init(MachineState *machine) SSIBus *spi_bus; DeviceState *flash_dev; qemu_irq cs_line; + DriveInfo *dinfo = drive_get_next(IF_MTD); gchar *bus_name = g_strdup_printf("spi%d", i); spi_bus = (SSIBus *)qdev_get_child_bus(DEVICE(&s->soc), bus_name); g_free(bus_name); - flash_dev = ssi_create_slave(spi_bus, "sst25wf080"); + flash_dev = ssi_create_slave_no_init(spi_bus, "sst25wf080"); + if (dinfo) { + qdev_prop_set_drive(flash_dev, "drive", blk_by_legacy_dinfo(dinfo), + &error_fatal); + } + qdev_init_nofail(flash_dev); + cs_line = qdev_get_gpio_in_named(flash_dev, SSI_GPIO_CS, 0); sysbus_connect_irq(SYS_BUS_DEVICE(&s->soc.spi[i]), 1, cs_line); diff --git a/hw/arm/z2.c b/hw/arm/z2.c index aea895a500..68a92f3184 100644 --- a/hw/arm/z2.c +++ b/hw/arm/z2.c @@ -151,14 +151,12 @@ static void z2_lcd_cs(void *opaque, int line, int level) z2_lcd->selected = !level; } -static int zipit_lcd_init(SSISlave *dev) +static void zipit_lcd_realize(SSISlave *dev, Error **errp) { ZipitLCD *z = FROM_SSI_SLAVE(ZipitLCD, dev); z->selected = 0; z->enabled = 0; z->pos = 0; - - return 0; } static VMStateDescription vmstate_zipit_lcd_state = { @@ -181,7 +179,7 @@ static void zipit_lcd_class_init(ObjectClass *klass, void *data) DeviceClass *dc = DEVICE_CLASS(klass); SSISlaveClass *k = SSI_SLAVE_CLASS(klass); - k->init = zipit_lcd_init; + k->realize = zipit_lcd_realize; k->transfer = zipit_lcd_transfer; dc->vmsd = &vmstate_zipit_lcd_state; } diff --git a/hw/audio/intel-hda.c b/hw/audio/intel-hda.c index 098b17d020..cd95340cd9 100644 --- a/hw/audio/intel-hda.c +++ b/hw/audio/intel-hda.c @@ -191,7 +191,7 @@ struct IntelHDAState { /* properties */ uint32_t debug; - uint32_t msi; + OnOffAuto msi; bool old_msi_addr; }; @@ -256,7 +256,7 @@ static void intel_hda_update_int_sts(IntelHDAState *d) static void intel_hda_update_irq(IntelHDAState *d) { - int msi = d->msi && msi_enabled(&d->pci); + bool msi = msi_enabled(&d->pci); int level; intel_hda_update_int_sts(d); @@ -1132,6 +1132,8 @@ static void intel_hda_realize(PCIDevice *pci, Error **errp) { IntelHDAState *d = INTEL_HDA(pci); uint8_t *conf = d->pci.config; + Error *err = NULL; + int ret; d->name = object_get_typename(OBJECT(d)); @@ -1140,12 +1142,27 @@ static void intel_hda_realize(PCIDevice *pci, Error **errp) /* HDCTL off 0x40 bit 0 selects signaling mode (1-HDA, 0 - Ac97) 18.1.19 */ conf[0x40] = 0x01; + if (d->msi != ON_OFF_AUTO_OFF) { + ret = msi_init(&d->pci, d->old_msi_addr ? 0x50 : 0x60, + 1, true, false, &err); + /* Any error other than -ENOTSUP(board's MSI support is broken) + * is a programming error */ + assert(!ret || ret == -ENOTSUP); + if (ret && d->msi == ON_OFF_AUTO_ON) { + /* Can't satisfy user's explicit msi=on request, fail */ + error_append_hint(&err, "You have to use msi=auto (default) or " + "msi=off with this machine type.\n"); + error_propagate(errp, err); + return; + } + assert(!err || d->msi == ON_OFF_AUTO_AUTO); + /* With msi=auto, we fall back to MSI off silently */ + error_free(err); + } + memory_region_init_io(&d->mmio, OBJECT(d), &intel_hda_mmio_ops, d, "intel-hda", 0x4000); pci_register_bar(&d->pci, 0, 0, &d->mmio); - if (d->msi) { - msi_init(&d->pci, d->old_msi_addr ? 0x50 : 0x60, 1, true, false); - } hda_codec_bus_init(DEVICE(pci), &d->codecs, sizeof(d->codecs), intel_hda_response, intel_hda_xfer); @@ -1235,7 +1252,7 @@ static const VMStateDescription vmstate_intel_hda = { static Property intel_hda_properties[] = { DEFINE_PROP_UINT32("debug", IntelHDAState, debug, 0), - DEFINE_PROP_UINT32("msi", IntelHDAState, msi, 1), + DEFINE_PROP_ON_OFF_AUTO("msi", IntelHDAState, msi, ON_OFF_AUTO_AUTO), DEFINE_PROP_BOOL("old_msi_addr", IntelHDAState, old_msi_addr, false), DEFINE_PROP_END_OF_LIST(), }; diff --git a/hw/block/m25p80.c b/hw/block/m25p80.c index 326b688e83..ca8c12c0f8 100644 --- a/hw/block/m25p80.c +++ b/hw/block/m25p80.c @@ -28,6 +28,7 @@ #include "hw/ssi/ssi.h" #include "qemu/bitops.h" #include "qemu/log.h" +#include "qapi/error.h" #ifndef M25P80_ERR_DEBUG #define M25P80_ERR_DEBUG 0 @@ -389,7 +390,7 @@ typedef struct Flash { uint32_t pos; uint8_t needed_bytes; uint8_t cmd_in_progress; - uint64_t cur_addr; + uint32_t cur_addr; uint32_t nonvolatile_cfg; /* Configuration register for Macronix */ uint32_t volatile_cfg; @@ -446,6 +447,11 @@ static inline Manufacturer get_man(Flash *s) static void blk_sync_complete(void *opaque, int ret) { + QEMUIOVector *iov = opaque; + + qemu_iovec_destroy(iov); + g_free(iov); + /* do nothing. Masters do not directly interact with the backing store, * only the working copy so no mutexing required. */ @@ -453,31 +459,33 @@ static void blk_sync_complete(void *opaque, int ret) static void flash_sync_page(Flash *s, int page) { - QEMUIOVector iov; + QEMUIOVector *iov; if (!s->blk || blk_is_read_only(s->blk)) { return; } - qemu_iovec_init(&iov, 1); - qemu_iovec_add(&iov, s->storage + page * s->pi->page_size, + iov = g_new(QEMUIOVector, 1); + qemu_iovec_init(iov, 1); + qemu_iovec_add(iov, s->storage + page * s->pi->page_size, s->pi->page_size); - blk_aio_pwritev(s->blk, page * s->pi->page_size, &iov, 0, - blk_sync_complete, NULL); + blk_aio_pwritev(s->blk, page * s->pi->page_size, iov, 0, + blk_sync_complete, iov); } static inline void flash_sync_area(Flash *s, int64_t off, int64_t len) { - QEMUIOVector iov; + QEMUIOVector *iov; if (!s->blk || blk_is_read_only(s->blk)) { return; } assert(!(len % BDRV_SECTOR_SIZE)); - qemu_iovec_init(&iov, 1); - qemu_iovec_add(&iov, s->storage + off, len); - blk_aio_pwritev(s->blk, off, &iov, 0, blk_sync_complete, NULL); + iov = g_new(QEMUIOVector, 1); + qemu_iovec_init(iov, 1); + qemu_iovec_add(iov, s->storage + off, len); + blk_aio_pwritev(s->blk, off, iov, 0, blk_sync_complete, iov); } static void flash_erase(Flash *s, int offset, FlashCMD cmd) @@ -530,9 +538,9 @@ static inline void flash_sync_dirty(Flash *s, int64_t newpage) } static inline -void flash_write8(Flash *s, uint64_t addr, uint8_t data) +void flash_write8(Flash *s, uint32_t addr, uint8_t data) { - int64_t page = addr / s->pi->page_size; + uint32_t page = addr / s->pi->page_size; uint8_t prev = s->storage[s->cur_addr]; if (!s->write_enable) { @@ -540,7 +548,7 @@ void flash_write8(Flash *s, uint64_t addr, uint8_t data) } if ((prev ^ data) & data) { - DB_PRINT_L(1, "programming zero to one! addr=%" PRIx64 " %" PRIx8 + DB_PRINT_L(1, "programming zero to one! addr=%" PRIx32 " %" PRIx8 " -> %" PRIx8 "\n", addr, prev, data); } @@ -581,18 +589,16 @@ static inline int get_addr_length(Flash *s) static void complete_collecting_data(Flash *s) { - int i; - - s->cur_addr = 0; + int i, n; - for (i = 0; i < get_addr_length(s); ++i) { + n = get_addr_length(s); + s->cur_addr = (n == 3 ? s->ear : 0); + for (i = 0; i < n; ++i) { s->cur_addr <<= 8; s->cur_addr |= s->data[i]; } - if (get_addr_length(s) == 3) { - s->cur_addr += s->ear * MAX_3BYTES_SIZE; - } + s->cur_addr &= s->size - 1; s->state = STATE_IDLE; @@ -1091,17 +1097,17 @@ static uint32_t m25p80_transfer8(SSISlave *ss, uint32_t tx) switch (s->state) { case STATE_PAGE_PROGRAM: - DB_PRINT_L(1, "page program cur_addr=%#" PRIx64 " data=%" PRIx8 "\n", + DB_PRINT_L(1, "page program cur_addr=%#" PRIx32 " data=%" PRIx8 "\n", s->cur_addr, (uint8_t)tx); flash_write8(s, s->cur_addr, (uint8_t)tx); - s->cur_addr++; + s->cur_addr = (s->cur_addr + 1) & (s->size - 1); break; case STATE_READ: r = s->storage[s->cur_addr]; - DB_PRINT_L(1, "READ 0x%" PRIx64 "=%" PRIx8 "\n", s->cur_addr, + DB_PRINT_L(1, "READ 0x%" PRIx32 "=%" PRIx8 "\n", s->cur_addr, (uint8_t)r); - s->cur_addr = (s->cur_addr + 1) % s->size; + s->cur_addr = (s->cur_addr + 1) & (s->size - 1); break; case STATE_COLLECTING_DATA: @@ -1132,9 +1138,8 @@ static uint32_t m25p80_transfer8(SSISlave *ss, uint32_t tx) return r; } -static int m25p80_init(SSISlave *ss) +static void m25p80_realize(SSISlave *ss, Error **errp) { - DriveInfo *dinfo; Flash *s = M25P80(ss); M25P80Class *mc = M25P80_GET_CLASS(s); @@ -1143,28 +1148,19 @@ static int m25p80_init(SSISlave *ss) s->size = s->pi->sector_size * s->pi->n_sectors; s->dirty_page = -1; - /* FIXME use a qdev drive property instead of drive_get_next() */ - dinfo = drive_get_next(IF_MTD); - - if (dinfo) { + if (s->blk) { DB_PRINT_L(0, "Binding to IF_MTD drive\n"); - s->blk = blk_by_legacy_dinfo(dinfo); - blk_attach_dev_nofail(s->blk, s); - s->storage = blk_blockalign(s->blk, s->size); - /* FIXME: Move to late init */ if (blk_pread(s->blk, 0, s->storage, s->size) != s->size) { - fprintf(stderr, "Failed to initialize SPI flash!\n"); - return 1; + error_setg(errp, "failed to read the initial flash content"); + return; } } else { DB_PRINT_L(0, "No BDRV - binding to RAM\n"); s->storage = blk_blockalign(NULL, s->size); memset(s->storage, 0xFF, s->size); } - - return 0; } static void m25p80_reset(DeviceState *d) @@ -1186,6 +1182,7 @@ static Property m25p80_properties[] = { DEFINE_PROP_UINT8("spansion-cr2nv", Flash, spansion_cr2nv, 0x8), DEFINE_PROP_UINT8("spansion-cr3nv", Flash, spansion_cr3nv, 0x2), DEFINE_PROP_UINT8("spansion-cr4nv", Flash, spansion_cr4nv, 0x10), + DEFINE_PROP_DRIVE("drive", Flash, blk), DEFINE_PROP_END_OF_LIST(), }; @@ -1201,7 +1198,8 @@ static const VMStateDescription vmstate_m25p80 = { VMSTATE_UINT32(pos, Flash), VMSTATE_UINT8(needed_bytes, Flash), VMSTATE_UINT8(cmd_in_progress, Flash), - VMSTATE_UINT64(cur_addr, Flash), + VMSTATE_UNUSED(4), + VMSTATE_UINT32(cur_addr, Flash), VMSTATE_BOOL(write_enable, Flash), VMSTATE_BOOL_V(reset_enable, Flash, 2), VMSTATE_UINT8_V(ear, Flash, 2), @@ -1224,7 +1222,7 @@ static void m25p80_class_init(ObjectClass *klass, void *data) SSISlaveClass *k = SSI_SLAVE_CLASS(klass); M25P80Class *mc = M25P80_CLASS(klass); - k->init = m25p80_init; + k->realize = m25p80_realize; k->transfer = m25p80_transfer8; k->set_cs = m25p80_cs; k->cs_polarity = SSI_CS_LOW; diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c index fb43bbaa46..ae86e944ea 100644 --- a/hw/block/virtio-blk.c +++ b/hw/block/virtio-blk.c @@ -384,7 +384,7 @@ static int multireq_compare(const void *a, const void *b) void virtio_blk_submit_multireq(BlockBackend *blk, MultiReqBuffer *mrb) { int i = 0, start = 0, num_reqs = 0, niov = 0, nb_sectors = 0; - int max_xfer_len = 0; + uint32_t max_transfer; int64_t sector_num = 0; if (mrb->num_reqs == 1) { @@ -393,8 +393,7 @@ void virtio_blk_submit_multireq(BlockBackend *blk, MultiReqBuffer *mrb) return; } - max_xfer_len = blk_get_max_transfer_length(mrb->reqs[0]->dev->blk); - max_xfer_len = MIN_NON_ZERO(max_xfer_len, BDRV_REQUEST_MAX_SECTORS); + max_transfer = blk_get_max_transfer(mrb->reqs[0]->dev->blk); qsort(mrb->reqs, mrb->num_reqs, sizeof(*mrb->reqs), &multireq_compare); @@ -410,8 +409,9 @@ void virtio_blk_submit_multireq(BlockBackend *blk, MultiReqBuffer *mrb) */ if (sector_num + nb_sectors != req->sector_num || niov > blk_get_max_iov(blk) - req->qiov.niov || - req->qiov.size / BDRV_SECTOR_SIZE > max_xfer_len || - nb_sectors > max_xfer_len - req->qiov.size / BDRV_SECTOR_SIZE) { + req->qiov.size > max_transfer || + nb_sectors > (max_transfer - + req->qiov.size) / BDRV_SECTOR_SIZE) { submit_requests(blk, mrb, start, num_reqs, niov); num_reqs = 0; } diff --git a/hw/core/Makefile.objs b/hw/core/Makefile.objs index 82a9ef84f8..cfd4840397 100644 --- a/hw/core/Makefile.objs +++ b/hw/core/Makefile.objs @@ -15,4 +15,5 @@ common-obj-$(CONFIG_SOFTMMU) += machine.o common-obj-$(CONFIG_SOFTMMU) += null-machine.o common-obj-$(CONFIG_SOFTMMU) += loader.o common-obj-$(CONFIG_SOFTMMU) += qdev-properties-system.o +common-obj-$(CONFIG_SOFTMMU) += register.o common-obj-$(CONFIG_PLATFORM_BUS) += platform-bus.o diff --git a/hw/core/machine.c b/hw/core/machine.c index ccdd5fa3e7..2fe6ff6f30 100644 --- a/hw/core/machine.c +++ b/hw/core/machine.c @@ -300,20 +300,6 @@ static void machine_set_firmware(Object *obj, const char *value, Error **errp) ms->firmware = g_strdup(value); } -static bool machine_get_iommu(Object *obj, Error **errp) -{ - MachineState *ms = MACHINE(obj); - - return ms->iommu; -} - -static void machine_set_iommu(Object *obj, bool value, Error **errp) -{ - MachineState *ms = MACHINE(obj); - - ms->iommu = value; -} - static void machine_set_suppress_vmdesc(Object *obj, bool value, Error **errp) { MachineState *ms = MACHINE(obj); @@ -493,12 +479,6 @@ static void machine_initfn(Object *obj) object_property_set_description(obj, "firmware", "Firmware image", NULL); - object_property_add_bool(obj, "iommu", - machine_get_iommu, - machine_set_iommu, NULL); - object_property_set_description(obj, "iommu", - "Set on/off to enable/disable Intel IOMMU (VT-d)", - NULL); object_property_add_bool(obj, "suppress-vmdesc", machine_get_suppress_vmdesc, machine_set_suppress_vmdesc, NULL); @@ -580,6 +560,24 @@ static void machine_class_finalize(ObjectClass *klass, void *data) } } +void machine_register_compat_props(MachineState *machine) +{ + MachineClass *mc = MACHINE_GET_CLASS(machine); + int i; + GlobalProperty *p; + + if (!mc->compat_props) { + return; + } + + for (i = 0; i < mc->compat_props->len; i++) { + p = g_array_index(mc->compat_props, GlobalProperty *, i); + /* Machine compat_props must never cause errors: */ + p->errp = &error_abort; + qdev_prop_register_global(p); + } +} + static const TypeInfo machine_info = { .name = TYPE_MACHINE, .parent = TYPE_OBJECT, diff --git a/hw/core/qdev-properties-system.c b/hw/core/qdev-properties-system.c index 891219ae05..65d9fa9f53 100644 --- a/hw/core/qdev-properties-system.c +++ b/hw/core/qdev-properties-system.c @@ -1,5 +1,5 @@ /* - * qdev property parsing and global properties + * qdev property parsing * (parts specific for qemu-system-*) * * This file is based on code from hw/qdev-properties.c from @@ -82,7 +82,7 @@ static void parse_drive(DeviceState *dev, const char *str, void **ptr, if (blk_attach_dev(blk, dev) < 0) { DriveInfo *dinfo = blk_legacy_dinfo(blk); - if (dinfo->type != IF_NONE) { + if (dinfo && dinfo->type != IF_NONE) { error_setg(errp, "Drive '%s' is already in use because " "it has been automatically connected to another " "device (did you need 'if=none' in the drive options?)", @@ -394,22 +394,3 @@ void qdev_set_nic_properties(DeviceState *dev, NICInfo *nd) } nd->instantiated = 1; } - -static int qdev_add_one_global(void *opaque, QemuOpts *opts, Error **errp) -{ - GlobalProperty *g; - - g = g_malloc0(sizeof(*g)); - g->driver = qemu_opt_get(opts, "driver"); - g->property = qemu_opt_get(opts, "property"); - g->value = qemu_opt_get(opts, "value"); - g->user_provided = true; - qdev_prop_register_global(g); - return 0; -} - -void qemu_add_globals(void) -{ - qemu_opts_foreach(qemu_find_opts("global"), - qdev_add_one_global, NULL, NULL); -} diff --git a/hw/core/qdev-properties.c b/hw/core/qdev-properties.c index e3b2184a60..3c20c8e4b2 100644 --- a/hw/core/qdev-properties.c +++ b/hw/core/qdev-properties.c @@ -1085,10 +1085,14 @@ static void qdev_prop_set_globals_for_type(DeviceState *dev, prop->used = true; object_property_parse(OBJECT(dev), prop->value, prop->property, &err); if (err != NULL) { - assert(prop->user_provided); - error_reportf_err(err, "Warning: global %s.%s=%s ignored: ", - prop->driver, prop->property, prop->value); - return; + error_prepend(&err, "can't apply global %s.%s=%s: ", + prop->driver, prop->property, prop->value); + if (prop->errp) { + error_propagate(prop->errp, err); + } else { + assert(prop->user_provided); + error_reportf_err(err, "Warning: "); + } } } } diff --git a/hw/core/register.c b/hw/core/register.c new file mode 100644 index 0000000000..4bfbc508de --- /dev/null +++ b/hw/core/register.c @@ -0,0 +1,287 @@ +/* + * Register Definition API + * + * Copyright (c) 2016 Xilinx Inc. + * Copyright (c) 2013 Peter Crosthwaite <peter.crosthwaite@xilinx.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include "qemu/osdep.h" +#include "hw/register.h" +#include "hw/qdev.h" +#include "qemu/log.h" + +static inline void register_write_val(RegisterInfo *reg, uint64_t val) +{ + g_assert(reg->data); + + switch (reg->data_size) { + case 1: + *(uint8_t *)reg->data = val; + break; + case 2: + *(uint16_t *)reg->data = val; + break; + case 4: + *(uint32_t *)reg->data = val; + break; + case 8: + *(uint64_t *)reg->data = val; + break; + default: + g_assert_not_reached(); + } +} + +static inline uint64_t register_read_val(RegisterInfo *reg) +{ + switch (reg->data_size) { + case 1: + return *(uint8_t *)reg->data; + case 2: + return *(uint16_t *)reg->data; + case 4: + return *(uint32_t *)reg->data; + case 8: + return *(uint64_t *)reg->data; + default: + g_assert_not_reached(); + } + return 0; /* unreachable */ +} + +void register_write(RegisterInfo *reg, uint64_t val, uint64_t we, + const char *prefix, bool debug) +{ + uint64_t old_val, new_val, test, no_w_mask; + const RegisterAccessInfo *ac; + + assert(reg); + + ac = reg->access; + + if (!ac || !ac->name) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: write to undefined device state " + "(written value: %#" PRIx64 ")\n", prefix, val); + return; + } + + old_val = reg->data ? register_read_val(reg) : ac->reset; + + test = (old_val ^ val) & ac->rsvd; + if (test) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: change of value in reserved bit" + "fields: %#" PRIx64 ")\n", prefix, test); + } + + test = val & ac->unimp; + if (test) { + qemu_log_mask(LOG_UNIMP, + "%s:%s writing %#" PRIx64 " to unimplemented bits:" \ + " %#" PRIx64 "", + prefix, reg->access->name, val, ac->unimp); + } + + /* Create the no write mask based on the read only, write to clear and + * reserved bit masks. + */ + no_w_mask = ac->ro | ac->w1c | ac->rsvd | ~we; + new_val = (val & ~no_w_mask) | (old_val & no_w_mask); + new_val &= ~(val & ac->w1c); + + if (ac->pre_write) { + new_val = ac->pre_write(reg, new_val); + } + + if (debug) { + qemu_log("%s:%s: write of value %#" PRIx64 "\n", prefix, ac->name, + new_val); + } + + register_write_val(reg, new_val); + + if (ac->post_write) { + ac->post_write(reg, new_val); + } +} + +uint64_t register_read(RegisterInfo *reg, uint64_t re, const char* prefix, + bool debug) +{ + uint64_t ret; + const RegisterAccessInfo *ac; + + assert(reg); + + ac = reg->access; + if (!ac || !ac->name) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: read from undefined device state\n", + prefix); + return 0; + } + + ret = reg->data ? register_read_val(reg) : ac->reset; + + register_write_val(reg, ret & ~(ac->cor & re)); + + /* Mask based on the read enable size */ + ret &= re; + + if (ac->post_read) { + ret = ac->post_read(reg, ret); + } + + if (debug) { + qemu_log("%s:%s: read of value %#" PRIx64 "\n", prefix, + ac->name, ret); + } + + return ret; +} + +void register_reset(RegisterInfo *reg) +{ + g_assert(reg); + + if (!reg->data || !reg->access) { + return; + } + + register_write_val(reg, reg->access->reset); +} + +void register_init(RegisterInfo *reg) +{ + assert(reg); + + if (!reg->data || !reg->access) { + return; + } + + object_initialize((void *)reg, sizeof(*reg), TYPE_REGISTER); +} + +void register_write_memory(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + RegisterInfoArray *reg_array = opaque; + RegisterInfo *reg = NULL; + uint64_t we; + int i; + + for (i = 0; i < reg_array->num_elements; i++) { + if (reg_array->r[i]->access->addr == addr) { + reg = reg_array->r[i]; + break; + } + } + + if (!reg) { + qemu_log_mask(LOG_GUEST_ERROR, "Write to unimplemented register at " \ + "address: %#" PRIx64 "\n", addr); + return; + } + + /* Generate appropriate write enable mask */ + if (reg->data_size < size) { + we = MAKE_64BIT_MASK(0, reg->data_size * 8); + } else { + we = MAKE_64BIT_MASK(0, size * 8); + } + + register_write(reg, value, we, reg_array->prefix, + reg_array->debug); +} + +uint64_t register_read_memory(void *opaque, hwaddr addr, + unsigned size) +{ + RegisterInfoArray *reg_array = opaque; + RegisterInfo *reg = NULL; + uint64_t read_val; + int i; + + for (i = 0; i < reg_array->num_elements; i++) { + if (reg_array->r[i]->access->addr == addr) { + reg = reg_array->r[i]; + break; + } + } + + if (!reg) { + qemu_log_mask(LOG_GUEST_ERROR, "Read to unimplemented register at " \ + "address: %#" PRIx64 "\n", addr); + return 0; + } + + read_val = register_read(reg, size * 8, reg_array->prefix, + reg_array->debug); + + return extract64(read_val, 0, size * 8); +} + +RegisterInfoArray *register_init_block32(DeviceState *owner, + const RegisterAccessInfo *rae, + int num, RegisterInfo *ri, + uint32_t *data, + const MemoryRegionOps *ops, + bool debug_enabled, + uint64_t memory_size) +{ + const char *device_prefix = object_get_typename(OBJECT(owner)); + RegisterInfoArray *r_array = g_new0(RegisterInfoArray, 1); + int i; + + r_array->r = g_new0(RegisterInfo *, num); + r_array->num_elements = num; + r_array->debug = debug_enabled; + r_array->prefix = device_prefix; + + for (i = 0; i < num; i++) { + int index = rae[i].addr / 4; + RegisterInfo *r = &ri[index]; + + *r = (RegisterInfo) { + .data = &data[index], + .data_size = sizeof(uint32_t), + .access = &rae[i], + .opaque = owner, + }; + register_init(r); + + r_array->r[i] = r; + } + + memory_region_init_io(&r_array->mem, OBJECT(owner), ops, r_array, + device_prefix, memory_size); + + return r_array; +} + +void register_finalize_block(RegisterInfoArray *r_array) +{ + object_unparent(OBJECT(&r_array->mem)); + g_free(r_array->r); + g_free(r_array); +} + +static const TypeInfo register_info = { + .name = TYPE_REGISTER, + .parent = TYPE_DEVICE, +}; + +static void register_register_types(void) +{ + type_register_static(®ister_info); +} + +type_init(register_register_types) diff --git a/hw/display/ads7846.c b/hw/display/ads7846.c index 05aa2d1e6b..166edade7d 100644 --- a/hw/display/ads7846.c +++ b/hw/display/ads7846.c @@ -133,7 +133,7 @@ static const VMStateDescription vmstate_ads7846 = { } }; -static int ads7846_init(SSISlave *d) +static void ads7846_realize(SSISlave *d, Error **errp) { DeviceState *dev = DEVICE(d); ADS7846State *s = FROM_SSI_SLAVE(ADS7846State, d); @@ -152,14 +152,13 @@ static int ads7846_init(SSISlave *d) ads7846_int_update(s); vmstate_register(NULL, -1, &vmstate_ads7846, s); - return 0; } static void ads7846_class_init(ObjectClass *klass, void *data) { SSISlaveClass *k = SSI_SLAVE_CLASS(klass); - k->init = ads7846_init; + k->realize = ads7846_realize; k->transfer = ads7846_transfer; } diff --git a/hw/display/dpcd.c b/hw/display/dpcd.c index 5a36855240..ce92ff6e2a 100644 --- a/hw/display/dpcd.c +++ b/hw/display/dpcd.c @@ -28,7 +28,7 @@ #include "qemu/osdep.h" #include "qemu/log.h" -#include "hw/misc/aux.h" +#include "hw/misc/auxbus.h" #include "hw/display/dpcd.h" #ifndef DEBUG_DPCD diff --git a/hw/display/qxl.c b/hw/display/qxl.c index 919dc5cd36..46cc86690c 100644 --- a/hw/display/qxl.c +++ b/hw/display/qxl.c @@ -504,6 +504,7 @@ static void interface_set_compression_level(QXLInstance *sin, int level) qxl_rom_set_dirty(qxl); } +#if SPICE_NEEDS_SET_MM_TIME static void interface_set_mm_time(QXLInstance *sin, uint32_t mm_time) { PCIQXLDevice *qxl = container_of(sin, PCIQXLDevice, ssd.qxl); @@ -517,6 +518,7 @@ static void interface_set_mm_time(QXLInstance *sin, uint32_t mm_time) qxl->rom->mm_clock = cpu_to_le32(mm_time); qxl_rom_set_dirty(qxl); } +#endif static void interface_get_init_info(QXLInstance *sin, QXLDevInitInfo *info) { @@ -893,7 +895,8 @@ static void interface_update_area_complete(QXLInstance *sin, int qxl_i; qemu_mutex_lock(&qxl->ssd.lock); - if (surface_id != 0 || !qxl->render_update_cookie_num) { + if (surface_id != 0 || !num_updated_rects || + !qxl->render_update_cookie_num) { qemu_mutex_unlock(&qxl->ssd.lock); return; } @@ -1068,7 +1071,9 @@ static const QXLInterface qxl_interface = { .attache_worker = interface_attach_worker, .set_compression_level = interface_set_compression_level, +#if SPICE_NEEDS_SET_MM_TIME .set_mm_time = interface_set_mm_time, +#endif .get_init_info = interface_get_init_info, /* the callbacks below are called from spice server thread context */ @@ -1243,6 +1248,7 @@ static int qxl_add_memslot(PCIQXLDevice *d, uint32_t slot_id, uint64_t delta, int pci_region; pcibus_t pci_start; pcibus_t pci_end; + MemoryRegion *mr; intptr_t virt_start; QXLDevMemSlot memslot; int i; @@ -1289,11 +1295,11 @@ static int qxl_add_memslot(PCIQXLDevice *d, uint32_t slot_id, uint64_t delta, switch (pci_region) { case QXL_RAM_RANGE_INDEX: - virt_start = (intptr_t)memory_region_get_ram_ptr(&d->vga.vram); + mr = &d->vga.vram; break; case QXL_VRAM_RANGE_INDEX: case 4 /* vram 64bit */: - virt_start = (intptr_t)memory_region_get_ram_ptr(&d->vram_bar); + mr = &d->vram_bar; break; default: /* should not happen */ @@ -1301,6 +1307,7 @@ static int qxl_add_memslot(PCIQXLDevice *d, uint32_t slot_id, uint64_t delta, return 1; } + virt_start = (intptr_t)memory_region_get_ram_ptr(mr); memslot.slot_id = slot_id; memslot.slot_group_id = MEMSLOT_GROUP_GUEST; /* guest group */ memslot.virt_start = virt_start + (guest_start - pci_start); @@ -1310,7 +1317,8 @@ static int qxl_add_memslot(PCIQXLDevice *d, uint32_t slot_id, uint64_t delta, qxl_rom_set_dirty(d); qemu_spice_add_memslot(&d->ssd, &memslot, async); - d->guest_slots[slot_id].ptr = (void*)memslot.virt_start; + d->guest_slots[slot_id].mr = mr; + d->guest_slots[slot_id].offset = memslot.virt_start - virt_start; d->guest_slots[slot_id].size = memslot.virt_end - memslot.virt_start; d->guest_slots[slot_id].delta = delta; d->guest_slots[slot_id].active = 1; @@ -1337,39 +1345,60 @@ static void qxl_reset_surfaces(PCIQXLDevice *d) } /* can be also called from spice server thread context */ -void *qxl_phys2virt(PCIQXLDevice *qxl, QXLPHYSICAL pqxl, int group_id) +static bool qxl_get_check_slot_offset(PCIQXLDevice *qxl, QXLPHYSICAL pqxl, + uint32_t *s, uint64_t *o) { uint64_t phys = le64_to_cpu(pqxl); uint32_t slot = (phys >> (64 - 8)) & 0xff; uint64_t offset = phys & 0xffffffffffff; - switch (group_id) { - case MEMSLOT_GROUP_HOST: - return (void *)(intptr_t)offset; - case MEMSLOT_GROUP_GUEST: - if (slot >= NUM_MEMSLOTS) { - qxl_set_guest_bug(qxl, "slot too large %d >= %d", slot, - NUM_MEMSLOTS); - return NULL; - } - if (!qxl->guest_slots[slot].active) { - qxl_set_guest_bug(qxl, "inactive slot %d\n", slot); - return NULL; - } - if (offset < qxl->guest_slots[slot].delta) { - qxl_set_guest_bug(qxl, + if (slot >= NUM_MEMSLOTS) { + qxl_set_guest_bug(qxl, "slot too large %d >= %d", slot, + NUM_MEMSLOTS); + return false; + } + if (!qxl->guest_slots[slot].active) { + qxl_set_guest_bug(qxl, "inactive slot %d\n", slot); + return false; + } + if (offset < qxl->guest_slots[slot].delta) { + qxl_set_guest_bug(qxl, "slot %d offset %"PRIu64" < delta %"PRIu64"\n", slot, offset, qxl->guest_slots[slot].delta); - return NULL; - } - offset -= qxl->guest_slots[slot].delta; - if (offset > qxl->guest_slots[slot].size) { - qxl_set_guest_bug(qxl, + return false; + } + offset -= qxl->guest_slots[slot].delta; + if (offset > qxl->guest_slots[slot].size) { + qxl_set_guest_bug(qxl, "slot %d offset %"PRIu64" > size %"PRIu64"\n", slot, offset, qxl->guest_slots[slot].size); + return false; + } + + *s = slot; + *o = offset; + return true; +} + +/* can be also called from spice server thread context */ +void *qxl_phys2virt(PCIQXLDevice *qxl, QXLPHYSICAL pqxl, int group_id) +{ + uint64_t offset; + uint32_t slot; + void *ptr; + + switch (group_id) { + case MEMSLOT_GROUP_HOST: + offset = le64_to_cpu(pqxl) & 0xffffffffffff; + return (void *)(intptr_t)offset; + case MEMSLOT_GROUP_GUEST: + if (!qxl_get_check_slot_offset(qxl, pqxl, &slot, &offset)) { return NULL; } - return qxl->guest_slots[slot].ptr + offset; + ptr = memory_region_get_ram_ptr(qxl->guest_slots[slot].mr); + ptr += qxl->guest_slots[slot].offset; + ptr += offset; + return ptr; } return NULL; } @@ -1784,9 +1813,23 @@ static void qxl_hw_update(void *opaque) qxl_render_update(qxl); } +static void qxl_dirty_one_surface(PCIQXLDevice *qxl, QXLPHYSICAL pqxl, + uint32_t height, int32_t stride) +{ + uint64_t offset; + uint32_t slot, size; + bool rc; + + rc = qxl_get_check_slot_offset(qxl, pqxl, &slot, &offset); + assert(rc == true); + size = height * abs(stride); + trace_qxl_surfaces_dirty(qxl->id, (int)offset, size); + qxl_set_dirty(qxl->guest_slots[slot].mr, + qxl->guest_slots[slot].offset + offset, size); +} + static void qxl_dirty_surfaces(PCIQXLDevice *qxl) { - uintptr_t vram_start; int i; if (qxl->mode != QXL_MODE_NATIVE && qxl->mode != QXL_MODE_COMPAT) { @@ -1794,16 +1837,13 @@ static void qxl_dirty_surfaces(PCIQXLDevice *qxl) } /* dirty the primary surface */ - qxl_set_dirty(&qxl->vga.vram, qxl->shadow_rom.draw_area_offset, - qxl->shadow_rom.surface0_area_size); - - vram_start = (uintptr_t)memory_region_get_ram_ptr(&qxl->vram_bar); + qxl_dirty_one_surface(qxl, qxl->guest_primary.surface.mem, + qxl->guest_primary.surface.height, + qxl->guest_primary.surface.stride); /* dirty the off-screen surfaces */ for (i = 0; i < qxl->ssd.num_surfaces; i++) { QXLSurfaceCmd *cmd; - intptr_t surface_offset; - int surface_size; if (qxl->guest_surfaces.cmds[i] == 0) { continue; @@ -1813,15 +1853,9 @@ static void qxl_dirty_surfaces(PCIQXLDevice *qxl) MEMSLOT_GROUP_GUEST); assert(cmd); assert(cmd->type == QXL_SURFACE_CMD_CREATE); - surface_offset = (intptr_t)qxl_phys2virt(qxl, - cmd->u.surface_create.data, - MEMSLOT_GROUP_GUEST); - assert(surface_offset); - surface_offset -= vram_start; - surface_size = cmd->u.surface_create.height * - abs(cmd->u.surface_create.stride); - trace_qxl_surfaces_dirty(qxl->id, i, (int)surface_offset, surface_size); - qxl_set_dirty(&qxl->vram_bar, surface_offset, surface_size); + qxl_dirty_one_surface(qxl, cmd->u.surface_create.data, + cmd->u.surface_create.height, + cmd->u.surface_create.stride); } } @@ -1914,7 +1948,7 @@ static void qxl_init_ramsize(PCIQXLDevice *qxl) /* vram (surfaces, 64bit, bar 4+5) */ if (qxl->vram_size_mb != -1) { - qxl->vram_size = qxl->vram_size_mb * 1024 * 1024; + qxl->vram_size = (uint64_t)qxl->vram_size_mb * 1024 * 1024; } if (qxl->vram_size < qxl->vram32_size) { qxl->vram_size = qxl->vram32_size; @@ -2020,9 +2054,9 @@ static void qxl_realize_common(PCIQXLDevice *qxl, Error **errp) dprint(qxl, 1, "ram/%s: %d MB [region 0]\n", qxl->id == 0 ? "pri" : "sec", qxl->vga.vram_size / (1024*1024)); - dprint(qxl, 1, "vram/32: %d MB [region 1]\n", + dprint(qxl, 1, "vram/32: %" PRIx64 "d MB [region 1]\n", qxl->vram32_size / (1024*1024)); - dprint(qxl, 1, "vram/64: %d MB %s\n", + dprint(qxl, 1, "vram/64: %" PRIx64 "d MB %s\n", qxl->vram_size / (1024*1024), qxl->vram32_size < qxl->vram_size ? "[region 4]" : "[unmapped]"); @@ -2276,7 +2310,7 @@ static VMStateDescription qxl_vmstate = { static Property qxl_properties[] = { DEFINE_PROP_UINT32("ram_size", PCIQXLDevice, vga.vram_size, 64 * 1024 * 1024), - DEFINE_PROP_UINT32("vram_size", PCIQXLDevice, vram32_size, + DEFINE_PROP_UINT64("vram_size", PCIQXLDevice, vram32_size, 64 * 1024 * 1024), DEFINE_PROP_UINT32("revision", PCIQXLDevice, revision, QXL_DEFAULT_REVISION), diff --git a/hw/display/qxl.h b/hw/display/qxl.h index 2ddf065e1f..fdb619d4a7 100644 --- a/hw/display/qxl.h +++ b/hw/display/qxl.h @@ -53,7 +53,8 @@ typedef struct PCIQXLDevice { struct guest_slots { QXLMemSlot slot; - void *ptr; + MemoryRegion *mr; + uint64_t offset; uint64_t size; uint64_t delta; uint32_t active; @@ -104,9 +105,9 @@ typedef struct PCIQXLDevice { #endif /* vram pci bar */ - uint32_t vram_size; + uint64_t vram_size; MemoryRegion vram_bar; - uint32_t vram32_size; + uint64_t vram32_size; MemoryRegion vram32_bar; /* io bar */ diff --git a/hw/display/ssd0323.c b/hw/display/ssd0323.c index 14c1bf339c..6d1faf44af 100644 --- a/hw/display/ssd0323.c +++ b/hw/display/ssd0323.c @@ -361,7 +361,7 @@ static const GraphicHwOps ssd0323_ops = { .gfx_update = ssd0323_update_display, }; -static int ssd0323_init(SSISlave *d) +static void ssd0323_realize(SSISlave *d, Error **errp) { DeviceState *dev = DEVICE(d); ssd0323_state *s = FROM_SSI_SLAVE(ssd0323_state, d); @@ -375,14 +375,13 @@ static int ssd0323_init(SSISlave *d) register_savevm(dev, "ssd0323_oled", -1, 1, ssd0323_save, ssd0323_load, s); - return 0; } static void ssd0323_class_init(ObjectClass *klass, void *data) { SSISlaveClass *k = SSI_SLAVE_CLASS(klass); - k->init = ssd0323_init; + k->realize = ssd0323_realize; k->transfer = ssd0323_transfer; k->cs_polarity = SSI_CS_HIGH; } diff --git a/hw/display/trace-events b/hw/display/trace-events index 30bebffd8b..9dd82cecde 100644 --- a/hw/display/trace-events +++ b/hw/display/trace-events @@ -105,7 +105,7 @@ qxl_spice_reset_image_cache(int qid) "%d" qxl_spice_reset_memslots(int qid) "%d" qxl_spice_update_area(int qid, uint32_t surface_id, uint32_t left, uint32_t right, uint32_t top, uint32_t bottom) "%d sid=%d [%d,%d,%d,%d]" qxl_spice_update_area_rest(int qid, uint32_t num_dirty_rects, uint32_t clear_dirty_region) "%d #d=%d clear=%d" -qxl_surfaces_dirty(int qid, int surface, int offset, int size) "%d surface=%d offset=%d size=%d" +qxl_surfaces_dirty(int qid, int offset, int size) "%d offset=%d size=%d" qxl_send_events(int qid, uint32_t events) "%d %d" qxl_send_events_vm_stopped(int qid, uint32_t events) "%d %d" qxl_set_guest_bug(int qid) "%d" diff --git a/hw/display/virtio-gpu-3d.c b/hw/display/virtio-gpu-3d.c index 29918a090b..d6c8c6e2dc 100644 --- a/hw/display/virtio-gpu-3d.c +++ b/hw/display/virtio-gpu-3d.c @@ -171,13 +171,14 @@ static void virgl_cmd_set_scanout(VirtIOGPU *g, virgl_renderer_force_ctx_0(); dpy_gl_scanout(g->scanout[ss.scanout_id].con, info.tex_id, info.flags & 1 /* FIXME: Y_0_TOP */, + info.width, info.height, ss.r.x, ss.r.y, ss.r.width, ss.r.height); } else { if (ss.scanout_id != 0) { dpy_gfx_replace_surface(g->scanout[ss.scanout_id].con, NULL); } dpy_gl_scanout(g->scanout[ss.scanout_id].con, 0, false, - 0, 0, 0, 0); + 0, 0, 0, 0, 0, 0); } g->scanout[ss.scanout_id].resource_id = ss.resource_id; } @@ -580,7 +581,7 @@ void virtio_gpu_virgl_reset(VirtIOGPU *g) if (i != 0) { dpy_gfx_replace_surface(g->scanout[i].con, NULL); } - dpy_gl_scanout(g->scanout[i].con, 0, false, 0, 0, 0, 0); + dpy_gl_scanout(g->scanout[i].con, 0, false, 0, 0, 0, 0, 0, 0); } } diff --git a/hw/display/virtio-gpu.c b/hw/display/virtio-gpu.c index 136c095b7d..f8b0274752 100644 --- a/hw/display/virtio-gpu.c +++ b/hw/display/virtio-gpu.c @@ -934,8 +934,14 @@ static void virtio_gpu_gl_block(void *opaque, bool block) { VirtIOGPU *g = opaque; - g->renderer_blocked = block; - if (!block) { + if (block) { + g->renderer_blocked++; + } else { + g->renderer_blocked--; + } + assert(g->renderer_blocked >= 0); + + if (g->renderer_blocked == 0) { virtio_gpu_process_cmdq(g); } } diff --git a/hw/display/xlnx_dp.c b/hw/display/xlnx_dp.c index be53b756c3..f43eb09304 100644 --- a/hw/display/xlnx_dp.c +++ b/hw/display/xlnx_dp.c @@ -438,10 +438,10 @@ static void xlnx_dp_aux_clear_tx_fifo(XlnxDPState *s) fifo8_reset(&s->tx_fifo); } -static void xlnx_dp_aux_push_tx_fifo(XlnxDPState *s, uint8_t val, size_t len) +static void xlnx_dp_aux_push_tx_fifo(XlnxDPState *s, uint8_t *buf, size_t len) { DPRINTF("Push %u data in tx_fifo\n", (unsigned)len); - fifo8_push_all(&s->tx_fifo, &val, len); + fifo8_push_all(&s->tx_fifo, buf, len); } static uint8_t xlnx_dp_aux_pop_tx_fifo(XlnxDPState *s) @@ -806,9 +806,11 @@ static void xlnx_dp_write(void *opaque, hwaddr offset, uint64_t value, * TODO: Power down things? */ break; - case DP_AUX_WRITE_FIFO: - xlnx_dp_aux_push_tx_fifo(s, value, 1); + case DP_AUX_WRITE_FIFO: { + uint8_t c = value; + xlnx_dp_aux_push_tx_fifo(s, &c, 1); break; + } case DP_AUX_CLOCK_DIVIDER: break; case DP_AUX_REPLY_COUNT: diff --git a/hw/dma/Makefile.objs b/hw/dma/Makefile.objs index 8b0823e593..087c8e6855 100644 --- a/hw/dma/Makefile.objs +++ b/hw/dma/Makefile.objs @@ -5,6 +5,7 @@ common-obj-$(CONFIG_PL330) += pl330.o common-obj-$(CONFIG_I82374) += i82374.o common-obj-$(CONFIG_I8257) += i8257.o common-obj-$(CONFIG_XILINX_AXI) += xilinx_axidma.o +common-obj-$(CONFIG_ZYNQ_DEVCFG) += xlnx-zynq-devcfg.o common-obj-$(CONFIG_ETRAXFS) += etraxfs_dma.o common-obj-$(CONFIG_STP2000) += sparc32_dma.o common-obj-$(CONFIG_SUN4M) += sun4m_iommu.o diff --git a/hw/dma/xlnx-zynq-devcfg.c b/hw/dma/xlnx-zynq-devcfg.c new file mode 100644 index 0000000000..3b10523430 --- /dev/null +++ b/hw/dma/xlnx-zynq-devcfg.c @@ -0,0 +1,400 @@ +/* + * QEMU model of the Xilinx Zynq Devcfg Interface + * + * (C) 2011 PetaLogix Pty Ltd + * (C) 2014 Xilinx Inc. + * Written by Peter Crosthwaite <peter.crosthwaite@xilinx.com> + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/dma/xlnx-zynq-devcfg.h" +#include "qemu/bitops.h" +#include "sysemu/sysemu.h" +#include "sysemu/dma.h" +#include "qemu/log.h" + +#define FREQ_HZ 900000000 + +#define BTT_MAX 0x400 + +#ifndef XLNX_ZYNQ_DEVCFG_ERR_DEBUG +#define XLNX_ZYNQ_DEVCFG_ERR_DEBUG 0 +#endif + +#define DB_PRINT(fmt, args...) do { \ + if (XLNX_ZYNQ_DEVCFG_ERR_DEBUG) { \ + qemu_log("%s: " fmt, __func__, ## args); \ + } \ +} while (0); + +REG32(CTRL, 0x00) + FIELD(CTRL, FORCE_RST, 31, 1) /* Not supported, wr ignored */ + FIELD(CTRL, PCAP_PR, 27, 1) /* Forced to 0 on bad unlock */ + FIELD(CTRL, PCAP_MODE, 26, 1) + FIELD(CTRL, MULTIBOOT_EN, 24, 1) + FIELD(CTRL, USER_MODE, 15, 1) + FIELD(CTRL, PCFG_AES_FUSE, 12, 1) + FIELD(CTRL, PCFG_AES_EN, 9, 3) + FIELD(CTRL, SEU_EN, 8, 1) + FIELD(CTRL, SEC_EN, 7, 1) + FIELD(CTRL, SPNIDEN, 6, 1) + FIELD(CTRL, SPIDEN, 5, 1) + FIELD(CTRL, NIDEN, 4, 1) + FIELD(CTRL, DBGEN, 3, 1) + FIELD(CTRL, DAP_EN, 0, 3) + +REG32(LOCK, 0x04) +#define AES_FUSE_LOCK 4 +#define AES_EN_LOCK 3 +#define SEU_LOCK 2 +#define SEC_LOCK 1 +#define DBG_LOCK 0 + +/* mapping bits in R_LOCK to what they lock in R_CTRL */ +static const uint32_t lock_ctrl_map[] = { + [AES_FUSE_LOCK] = R_CTRL_PCFG_AES_FUSE_MASK, + [AES_EN_LOCK] = R_CTRL_PCFG_AES_EN_MASK, + [SEU_LOCK] = R_CTRL_SEU_EN_MASK, + [SEC_LOCK] = R_CTRL_SEC_EN_MASK, + [DBG_LOCK] = R_CTRL_SPNIDEN_MASK | R_CTRL_SPIDEN_MASK | + R_CTRL_NIDEN_MASK | R_CTRL_DBGEN_MASK | + R_CTRL_DAP_EN_MASK, +}; + +REG32(CFG, 0x08) + FIELD(CFG, RFIFO_TH, 10, 2) + FIELD(CFG, WFIFO_TH, 8, 2) + FIELD(CFG, RCLK_EDGE, 7, 1) + FIELD(CFG, WCLK_EDGE, 6, 1) + FIELD(CFG, DISABLE_SRC_INC, 5, 1) + FIELD(CFG, DISABLE_DST_INC, 4, 1) +#define R_CFG_RESET 0x50B + +REG32(INT_STS, 0x0C) + FIELD(INT_STS, PSS_GTS_USR_B, 31, 1) + FIELD(INT_STS, PSS_FST_CFG_B, 30, 1) + FIELD(INT_STS, PSS_CFG_RESET_B, 27, 1) + FIELD(INT_STS, RX_FIFO_OV, 18, 1) + FIELD(INT_STS, WR_FIFO_LVL, 17, 1) + FIELD(INT_STS, RD_FIFO_LVL, 16, 1) + FIELD(INT_STS, DMA_CMD_ERR, 15, 1) + FIELD(INT_STS, DMA_Q_OV, 14, 1) + FIELD(INT_STS, DMA_DONE, 13, 1) + FIELD(INT_STS, DMA_P_DONE, 12, 1) + FIELD(INT_STS, P2D_LEN_ERR, 11, 1) + FIELD(INT_STS, PCFG_DONE, 2, 1) +#define R_INT_STS_RSVD ((0x7 << 24) | (0x1 << 19) | (0xF < 7)) + +REG32(INT_MASK, 0x10) + +REG32(STATUS, 0x14) + FIELD(STATUS, DMA_CMD_Q_F, 31, 1) + FIELD(STATUS, DMA_CMD_Q_E, 30, 1) + FIELD(STATUS, DMA_DONE_CNT, 28, 2) + FIELD(STATUS, RX_FIFO_LVL, 20, 5) + FIELD(STATUS, TX_FIFO_LVL, 12, 7) + FIELD(STATUS, PSS_GTS_USR_B, 11, 1) + FIELD(STATUS, PSS_FST_CFG_B, 10, 1) + FIELD(STATUS, PSS_CFG_RESET_B, 5, 1) + +REG32(DMA_SRC_ADDR, 0x18) +REG32(DMA_DST_ADDR, 0x1C) +REG32(DMA_SRC_LEN, 0x20) +REG32(DMA_DST_LEN, 0x24) +REG32(ROM_SHADOW, 0x28) +REG32(SW_ID, 0x30) +REG32(UNLOCK, 0x34) + +#define R_UNLOCK_MAGIC 0x757BDF0D + +REG32(MCTRL, 0x80) + FIELD(MCTRL, PS_VERSION, 28, 4) + FIELD(MCTRL, PCFG_POR_B, 8, 1) + FIELD(MCTRL, INT_PCAP_LPBK, 4, 1) + FIELD(MCTRL, QEMU, 3, 1) + +static void xlnx_zynq_devcfg_update_ixr(XlnxZynqDevcfg *s) +{ + qemu_set_irq(s->irq, ~s->regs[R_INT_MASK] & s->regs[R_INT_STS]); +} + +static void xlnx_zynq_devcfg_reset(DeviceState *dev) +{ + XlnxZynqDevcfg *s = XLNX_ZYNQ_DEVCFG(dev); + int i; + + for (i = 0; i < XLNX_ZYNQ_DEVCFG_R_MAX; ++i) { + register_reset(&s->regs_info[i]); + } +} + +static void xlnx_zynq_devcfg_dma_go(XlnxZynqDevcfg *s) +{ + do { + uint8_t buf[BTT_MAX]; + XlnxZynqDevcfgDMACmd *dmah = s->dma_cmd_fifo; + uint32_t btt = BTT_MAX; + bool loopback = s->regs[R_MCTRL] & R_MCTRL_INT_PCAP_LPBK_MASK; + + btt = MIN(btt, dmah->src_len); + if (loopback) { + btt = MIN(btt, dmah->dest_len); + } + DB_PRINT("reading %x bytes from %x\n", btt, dmah->src_addr); + dma_memory_read(&address_space_memory, dmah->src_addr, buf, btt); + dmah->src_len -= btt; + dmah->src_addr += btt; + if (loopback && (dmah->src_len || dmah->dest_len)) { + DB_PRINT("writing %x bytes from %x\n", btt, dmah->dest_addr); + dma_memory_write(&address_space_memory, dmah->dest_addr, buf, btt); + dmah->dest_len -= btt; + dmah->dest_addr += btt; + } + if (!dmah->src_len && !dmah->dest_len) { + DB_PRINT("dma operation finished\n"); + s->regs[R_INT_STS] |= R_INT_STS_DMA_DONE_MASK | + R_INT_STS_DMA_P_DONE_MASK; + s->dma_cmd_fifo_num--; + memmove(s->dma_cmd_fifo, &s->dma_cmd_fifo[1], + sizeof(s->dma_cmd_fifo) - sizeof(s->dma_cmd_fifo[0])); + } + xlnx_zynq_devcfg_update_ixr(s); + } while (s->dma_cmd_fifo_num); +} + +static void r_ixr_post_write(RegisterInfo *reg, uint64_t val) +{ + XlnxZynqDevcfg *s = XLNX_ZYNQ_DEVCFG(reg->opaque); + + xlnx_zynq_devcfg_update_ixr(s); +} + +static uint64_t r_ctrl_pre_write(RegisterInfo *reg, uint64_t val) +{ + XlnxZynqDevcfg *s = XLNX_ZYNQ_DEVCFG(reg->opaque); + int i; + + for (i = 0; i < ARRAY_SIZE(lock_ctrl_map); ++i) { + if (s->regs[R_LOCK] & 1 << i) { + val &= ~lock_ctrl_map[i]; + val |= lock_ctrl_map[i] & s->regs[R_CTRL]; + } + } + return val; +} + +static void r_ctrl_post_write(RegisterInfo *reg, uint64_t val) +{ + const char *device_prefix = object_get_typename(OBJECT(reg->opaque)); + uint32_t aes_en = FIELD_EX32(val, CTRL, PCFG_AES_EN); + + if (aes_en != 0 && aes_en != 7) { + qemu_log_mask(LOG_UNIMP, "%s: warning, aes-en bits inconsistent," + "unimplemented security reset should happen!\n", + device_prefix); + } +} + +static void r_unlock_post_write(RegisterInfo *reg, uint64_t val) +{ + XlnxZynqDevcfg *s = XLNX_ZYNQ_DEVCFG(reg->opaque); + const char *device_prefix = object_get_typename(OBJECT(s)); + + if (val == R_UNLOCK_MAGIC) { + DB_PRINT("successful unlock\n"); + s->regs[R_CTRL] |= R_CTRL_PCAP_PR_MASK; + s->regs[R_CTRL] |= R_CTRL_PCFG_AES_EN_MASK; + memory_region_set_enabled(&s->iomem, true); + } else { /* bad unlock attempt */ + qemu_log_mask(LOG_GUEST_ERROR, "%s: failed unlock\n", device_prefix); + s->regs[R_CTRL] &= ~R_CTRL_PCAP_PR_MASK; + s->regs[R_CTRL] &= ~R_CTRL_PCFG_AES_EN_MASK; + /* core becomes inaccessible */ + memory_region_set_enabled(&s->iomem, false); + } +} + +static uint64_t r_lock_pre_write(RegisterInfo *reg, uint64_t val) +{ + XlnxZynqDevcfg *s = XLNX_ZYNQ_DEVCFG(reg->opaque); + + /* once bits are locked they stay locked */ + return s->regs[R_LOCK] | val; +} + +static void r_dma_dst_len_post_write(RegisterInfo *reg, uint64_t val) +{ + XlnxZynqDevcfg *s = XLNX_ZYNQ_DEVCFG(reg->opaque); + + s->dma_cmd_fifo[s->dma_cmd_fifo_num] = (XlnxZynqDevcfgDMACmd) { + .src_addr = s->regs[R_DMA_SRC_ADDR] & ~0x3UL, + .dest_addr = s->regs[R_DMA_DST_ADDR] & ~0x3UL, + .src_len = s->regs[R_DMA_SRC_LEN] << 2, + .dest_len = s->regs[R_DMA_DST_LEN] << 2, + }; + s->dma_cmd_fifo_num++; + DB_PRINT("dma transfer started; %d total transfers pending\n", + s->dma_cmd_fifo_num); + xlnx_zynq_devcfg_dma_go(s); +} + +static const RegisterAccessInfo xlnx_zynq_devcfg_regs_info[] = { + { .name = "CTRL", .addr = A_CTRL, + .reset = R_CTRL_PCAP_PR_MASK | R_CTRL_PCAP_MODE_MASK | 0x3 << 13, + .rsvd = 0x1 << 28 | 0x3ff << 13 | 0x3 << 13, + .pre_write = r_ctrl_pre_write, + .post_write = r_ctrl_post_write, + }, + { .name = "LOCK", .addr = A_LOCK, + .rsvd = MAKE_64BIT_MASK(5, 64 - 5), + .pre_write = r_lock_pre_write, + }, + { .name = "CFG", .addr = A_CFG, + .reset = R_CFG_RESET, + .rsvd = 0xfffff00f, + }, + { .name = "INT_STS", .addr = A_INT_STS, + .w1c = ~R_INT_STS_RSVD, + .reset = R_INT_STS_PSS_GTS_USR_B_MASK | + R_INT_STS_PSS_CFG_RESET_B_MASK | + R_INT_STS_WR_FIFO_LVL_MASK, + .rsvd = R_INT_STS_RSVD, + .post_write = r_ixr_post_write, + }, + { .name = "INT_MASK", .addr = A_INT_MASK, + .reset = ~0, + .rsvd = R_INT_STS_RSVD, + .post_write = r_ixr_post_write, + }, + { .name = "STATUS", .addr = A_STATUS, + .reset = R_STATUS_DMA_CMD_Q_E_MASK | + R_STATUS_PSS_GTS_USR_B_MASK | + R_STATUS_PSS_CFG_RESET_B_MASK, + .ro = ~0, + }, + { .name = "DMA_SRC_ADDR", .addr = A_DMA_SRC_ADDR, }, + { .name = "DMA_DST_ADDR", .addr = A_DMA_DST_ADDR, }, + { .name = "DMA_SRC_LEN", .addr = A_DMA_SRC_LEN, + .ro = MAKE_64BIT_MASK(27, 64 - 27) }, + { .name = "DMA_DST_LEN", .addr = A_DMA_DST_LEN, + .ro = MAKE_64BIT_MASK(27, 64 - 27), + .post_write = r_dma_dst_len_post_write, + }, + { .name = "ROM_SHADOW", .addr = A_ROM_SHADOW, + .rsvd = ~0ull, + }, + { .name = "SW_ID", .addr = A_SW_ID, }, + { .name = "UNLOCK", .addr = A_UNLOCK, + .post_write = r_unlock_post_write, + }, + { .name = "MCTRL", .addr = R_MCTRL * 4, + /* Silicon 3.0 for version field, the mysterious reserved bit 23 + * and QEMU platform identifier. + */ + .reset = 0x2 << R_MCTRL_PS_VERSION_SHIFT | 1 << 23 | R_MCTRL_QEMU_MASK, + .ro = ~R_MCTRL_INT_PCAP_LPBK_MASK, + .rsvd = 0x00f00303, + }, +}; + +static const MemoryRegionOps xlnx_zynq_devcfg_reg_ops = { + .read = register_read_memory, + .write = register_write_memory, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + } +}; + +static const VMStateDescription vmstate_xlnx_zynq_devcfg_dma_cmd = { + .name = "xlnx_zynq_devcfg_dma_cmd", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(src_addr, XlnxZynqDevcfgDMACmd), + VMSTATE_UINT32(dest_addr, XlnxZynqDevcfgDMACmd), + VMSTATE_UINT32(src_len, XlnxZynqDevcfgDMACmd), + VMSTATE_UINT32(dest_len, XlnxZynqDevcfgDMACmd), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_xlnx_zynq_devcfg = { + .name = "xlnx_zynq_devcfg", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_STRUCT_ARRAY(dma_cmd_fifo, XlnxZynqDevcfg, + XLNX_ZYNQ_DEVCFG_DMA_CMD_FIFO_LEN, 0, + vmstate_xlnx_zynq_devcfg_dma_cmd, + XlnxZynqDevcfgDMACmd), + VMSTATE_UINT8(dma_cmd_fifo_num, XlnxZynqDevcfg), + VMSTATE_UINT32_ARRAY(regs, XlnxZynqDevcfg, XLNX_ZYNQ_DEVCFG_R_MAX), + VMSTATE_END_OF_LIST() + } +}; + +static void xlnx_zynq_devcfg_init(Object *obj) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + XlnxZynqDevcfg *s = XLNX_ZYNQ_DEVCFG(obj); + RegisterInfoArray *reg_array; + + sysbus_init_irq(sbd, &s->irq); + + memory_region_init(&s->iomem, obj, "devcfg", XLNX_ZYNQ_DEVCFG_R_MAX * 4); + reg_array = + register_init_block32(DEVICE(obj), xlnx_zynq_devcfg_regs_info, + ARRAY_SIZE(xlnx_zynq_devcfg_regs_info), + s->regs_info, s->regs, + &xlnx_zynq_devcfg_reg_ops, + XLNX_ZYNQ_DEVCFG_ERR_DEBUG, + XLNX_ZYNQ_DEVCFG_R_MAX); + memory_region_add_subregion(&s->iomem, + A_CTRL, + ®_array->mem); + + sysbus_init_mmio(sbd, &s->iomem); +} + +static void xlnx_zynq_devcfg_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = xlnx_zynq_devcfg_reset; + dc->vmsd = &vmstate_xlnx_zynq_devcfg; +} + +static const TypeInfo xlnx_zynq_devcfg_info = { + .name = TYPE_XLNX_ZYNQ_DEVCFG, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(XlnxZynqDevcfg), + .instance_init = xlnx_zynq_devcfg_init, + .class_init = xlnx_zynq_devcfg_class_init, +}; + +static void xlnx_zynq_devcfg_register_types(void) +{ + type_register_static(&xlnx_zynq_devcfg_info); +} + +type_init(xlnx_zynq_devcfg_register_types) diff --git a/hw/i386/acpi-build.c b/hw/i386/acpi-build.c index 5a594be8ee..fbba461a87 100644 --- a/hw/i386/acpi-build.c +++ b/hw/i386/acpi-build.c @@ -229,26 +229,27 @@ static Object *acpi_get_i386_pci_host(void) return OBJECT(host); } -static void acpi_get_pci_info(PcPciInfo *info) +static void acpi_get_pci_holes(Range *hole, Range *hole64) { Object *pci_host; - pci_host = acpi_get_i386_pci_host(); g_assert(pci_host); - info->w32.begin = object_property_get_int(pci_host, + range_set_bounds1(hole, + object_property_get_int(pci_host, PCI_HOST_PROP_PCI_HOLE_START, - NULL); - info->w32.end = object_property_get_int(pci_host, - PCI_HOST_PROP_PCI_HOLE_END, - NULL); - info->w64.begin = object_property_get_int(pci_host, + NULL), + object_property_get_int(pci_host, + PCI_HOST_PROP_PCI_HOLE_END, + NULL)); + range_set_bounds1(hole64, + object_property_get_int(pci_host, PCI_HOST_PROP_PCI_HOLE64_START, - NULL); - info->w64.end = object_property_get_int(pci_host, - PCI_HOST_PROP_PCI_HOLE64_END, - NULL); + NULL), + object_property_get_int(pci_host, + PCI_HOST_PROP_PCI_HOLE64_END, + NULL)); } #define ACPI_PORT_SMI_CMD 0x00b2 /* TODO: this is APM_CNT_IOPORT */ @@ -1890,7 +1891,7 @@ static Aml *build_q35_osc_method(void) static void build_dsdt(GArray *table_data, BIOSLinker *linker, AcpiPmInfo *pm, AcpiMiscInfo *misc, - PcPciInfo *pci, MachineState *machine) + Range *pci_hole, Range *pci_hole64, MachineState *machine) { CrsRangeEntry *entry; Aml *dsdt, *sb_scope, *scope, *dev, *method, *field, *pkg, *crs; @@ -2047,7 +2048,9 @@ build_dsdt(GArray *table_data, BIOSLinker *linker, AML_CACHEABLE, AML_READ_WRITE, 0, 0x000A0000, 0x000BFFFF, 0, 0x00020000)); - crs_replace_with_free_ranges(mem_ranges, pci->w32.begin, pci->w32.end - 1); + crs_replace_with_free_ranges(mem_ranges, + range_lob(pci_hole), + range_upb(pci_hole)); for (i = 0; i < mem_ranges->len; i++) { entry = g_ptr_array_index(mem_ranges, i); aml_append(crs, @@ -2057,12 +2060,12 @@ build_dsdt(GArray *table_data, BIOSLinker *linker, 0, entry->limit - entry->base + 1)); } - if (pci->w64.begin) { + if (!range_is_empty(pci_hole64)) { aml_append(crs, aml_qword_memory(AML_POS_DECODE, AML_MIN_FIXED, AML_MAX_FIXED, AML_CACHEABLE, AML_READ_WRITE, - 0, pci->w64.begin, pci->w64.end - 1, 0, - pci->w64.end - pci->w64.begin)); + 0, range_lob(pci_hole64), range_upb(pci_hole64), 0, + range_upb(pci_hole64) + 1 - range_lob(pci_hole64))); } if (misc->tpm_version != TPM_VERSION_UNSPEC) { @@ -2554,7 +2557,7 @@ void acpi_build(AcpiBuildTables *tables, MachineState *machine) AcpiPmInfo pm; AcpiMiscInfo misc; AcpiMcfgInfo mcfg; - PcPciInfo pci; + Range pci_hole, pci_hole64; uint8_t *u; size_t aml_len = 0; GArray *tables_blob = tables->table_data; @@ -2562,7 +2565,7 @@ void acpi_build(AcpiBuildTables *tables, MachineState *machine) acpi_get_pm_info(&pm); acpi_get_misc_info(&misc); - acpi_get_pci_info(&pci); + acpi_get_pci_holes(&pci_hole, &pci_hole64); acpi_get_slic_oem(&slic_oem); table_offsets = g_array_new(false, true /* clear */, @@ -2584,7 +2587,8 @@ void acpi_build(AcpiBuildTables *tables, MachineState *machine) /* DSDT is pointed to by FADT */ dsdt = tables_blob->len; - build_dsdt(tables_blob, tables->linker, &pm, &misc, &pci, machine); + build_dsdt(tables_blob, tables->linker, &pm, &misc, + &pci_hole, &pci_hole64, machine); /* Count the size of the DSDT and SSDT, we will need it for legacy * sizing of ACPI tables. diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c index 5eba704477..464f2a0518 100644 --- a/hw/i386/intel_iommu.c +++ b/hw/i386/intel_iommu.c @@ -25,6 +25,7 @@ #include "intel_iommu_internal.h" #include "hw/pci/pci.h" #include "hw/pci/pci_bus.h" +#include "hw/i386/pc.h" /*#define DEBUG_INTEL_IOMMU*/ #ifdef DEBUG_INTEL_IOMMU @@ -2026,8 +2027,20 @@ static void vtd_reset(DeviceState *dev) vtd_init(s); } +static AddressSpace *vtd_host_dma_iommu(PCIBus *bus, void *opaque, int devfn) +{ + IntelIOMMUState *s = opaque; + VTDAddressSpace *vtd_as; + + assert(0 <= devfn && devfn <= VTD_PCI_DEVFN_MAX); + + vtd_as = vtd_find_add_as(s, bus, devfn); + return &vtd_as->as; +} + static void vtd_realize(DeviceState *dev, Error **errp) { + PCIBus *bus = PC_MACHINE(qdev_get_machine())->bus; IntelIOMMUState *s = INTEL_IOMMU_DEVICE(dev); VTD_DPRINTF(GENERAL, ""); @@ -2041,6 +2054,8 @@ static void vtd_realize(DeviceState *dev, Error **errp) s->vtd_as_by_busptr = g_hash_table_new_full(vtd_uint64_hash, vtd_uint64_equal, g_free, g_free); vtd_init(s); + sysbus_mmio_map(SYS_BUS_DEVICE(s), 0, Q35_HOST_BRIDGE_IOMMU_ADDR); + pci_setup_iommu(bus, vtd_host_dma_iommu, dev); } static void vtd_class_init(ObjectClass *klass, void *data) @@ -2051,6 +2066,7 @@ static void vtd_class_init(ObjectClass *klass, void *data) dc->realize = vtd_realize; dc->vmsd = &vtd_vmstate; dc->props = vtd_properties; + dc->hotpluggable = false; } static const TypeInfo vtd_info = { diff --git a/hw/i386/pc.c b/hw/i386/pc.c index 44a8f3bcbd..f56e225a99 100644 --- a/hw/i386/pc.c +++ b/hw/i386/pc.c @@ -1039,21 +1039,17 @@ void pc_acpi_smi_interrupt(void *opaque, int irq, int level) } } -static X86CPU *pc_new_cpu(const char *cpu_model, int64_t apic_id, +static X86CPU *pc_new_cpu(const char *typename, int64_t apic_id, Error **errp) { X86CPU *cpu = NULL; Error *local_err = NULL; - cpu = cpu_x86_create(cpu_model, &local_err); - if (local_err != NULL) { - goto out; - } + cpu = X86_CPU(object_new(typename)); object_property_set_int(OBJECT(cpu), apic_id, "apic-id", &local_err); object_property_set_bool(OBJECT(cpu), true, "realized", &local_err); -out: if (local_err) { error_propagate(errp, local_err); object_unref(OBJECT(cpu)); @@ -1065,7 +1061,8 @@ out: void pc_hot_add_cpu(const int64_t id, Error **errp) { X86CPU *cpu; - MachineState *machine = MACHINE(qdev_get_machine()); + ObjectClass *oc; + PCMachineState *pcms = PC_MACHINE(qdev_get_machine()); int64_t apic_id = x86_cpu_apic_id_from_index(id); Error *local_err = NULL; @@ -1093,7 +1090,9 @@ void pc_hot_add_cpu(const int64_t id, Error **errp) return; } - cpu = pc_new_cpu(machine->cpu_model, apic_id, &local_err); + assert(pcms->possible_cpus->cpus[0].cpu); /* BSP is always present */ + oc = OBJECT_CLASS(CPU_GET_CLASS(pcms->possible_cpus->cpus[0].cpu)); + cpu = pc_new_cpu(object_class_get_name(oc), apic_id, &local_err); if (local_err) { error_propagate(errp, local_err); return; @@ -1104,6 +1103,10 @@ void pc_hot_add_cpu(const int64_t id, Error **errp) void pc_cpus_init(PCMachineState *pcms) { int i; + CPUClass *cc; + ObjectClass *oc; + const char *typename; + gchar **model_pieces; X86CPU *cpu = NULL; MachineState *machine = MACHINE(pcms); @@ -1116,6 +1119,22 @@ void pc_cpus_init(PCMachineState *pcms) #endif } + model_pieces = g_strsplit(machine->cpu_model, ",", 2); + if (!model_pieces[0]) { + error_report("Invalid/empty CPU model name"); + exit(1); + } + + oc = cpu_class_by_name(TYPE_X86_CPU, model_pieces[0]); + if (oc == NULL) { + error_report("Unable to find CPU definition: %s", model_pieces[0]); + exit(1); + } + typename = object_class_get_name(oc); + cc = CPU_CLASS(oc); + cc->parse_features(typename, model_pieces[1], &error_fatal); + g_strfreev(model_pieces); + /* Calculates the limit to CPU APIC ID values * * Limit for the APIC ID value, so that all @@ -1136,7 +1155,7 @@ void pc_cpus_init(PCMachineState *pcms) pcms->possible_cpus->cpus[i].arch_id = x86_cpu_apic_id_from_index(i); pcms->possible_cpus->len++; if (i < smp_cpus) { - cpu = pc_new_cpu(machine->cpu_model, x86_cpu_apic_id_from_index(i), + cpu = pc_new_cpu(typename, x86_cpu_apic_id_from_index(i), &error_fatal); pcms->possible_cpus->cpus[i].cpu = CPU(cpu); object_unref(OBJECT(cpu)); @@ -1147,6 +1166,34 @@ void pc_cpus_init(PCMachineState *pcms) smbios_set_cpuid(cpu->env.cpuid_version, cpu->env.features[FEAT_1_EDX]); } +static void pc_build_feature_control_file(PCMachineState *pcms) +{ + X86CPU *cpu = X86_CPU(pcms->possible_cpus->cpus[0].cpu); + CPUX86State *env = &cpu->env; + uint32_t unused, ecx, edx; + uint64_t feature_control_bits = 0; + uint64_t *val; + + cpu_x86_cpuid(env, 1, 0, &unused, &unused, &ecx, &edx); + if (ecx & CPUID_EXT_VMX) { + feature_control_bits |= FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX; + } + + if ((edx & (CPUID_EXT2_MCE | CPUID_EXT2_MCA)) == + (CPUID_EXT2_MCE | CPUID_EXT2_MCA) && + (env->mcg_cap & MCG_LMCE_P)) { + feature_control_bits |= FEATURE_CONTROL_LMCE; + } + + if (!feature_control_bits) { + return; + } + + val = g_malloc(sizeof(*val)); + *val = cpu_to_le64(feature_control_bits | FEATURE_CONTROL_LOCKED); + fw_cfg_add_file(pcms->fw_cfg, "etc/msr_feature_control", val, sizeof(*val)); +} + static void pc_machine_done(Notifier *notifier, void *data) { @@ -1174,6 +1221,7 @@ void pc_machine_done(Notifier *notifier, void *data) acpi_setup(); if (pcms->fw_cfg) { pc_build_smbios(pcms->fw_cfg); + pc_build_feature_control_file(pcms); } } @@ -1919,7 +1967,7 @@ static void pc_machine_initfn(Object *obj) pc_machine_get_hotplug_memory_region_size, NULL, NULL, NULL, &error_abort); - pcms->max_ram_below_4g = 0xe0000000; /* 3.5G */ + pcms->max_ram_below_4g = 0; /* use default */ object_property_add(obj, PC_MACHINE_MAX_RAM_BELOW_4G, "size", pc_machine_get_max_ram_below_4g, pc_machine_set_max_ram_below_4g, diff --git a/hw/i386/pc_piix.c b/hw/i386/pc_piix.c index c7d70af253..a07dc816bf 100644 --- a/hw/i386/pc_piix.c +++ b/hw/i386/pc_piix.c @@ -108,37 +108,43 @@ static void pc_init1(MachineState *machine, * so legacy non-PAE guests can get as much memory as possible in * the 32bit address space below 4G. * + * - Note that Xen has its own ram setp code in xen_ram_init(), + * called via xen_hvm_init(). + * * Examples: * qemu -M pc-1.7 -m 4G (old default) -> 3584M low, 512M high * qemu -M pc -m 4G (new default) -> 3072M low, 1024M high * qemu -M pc,max-ram-below-4g=2G -m 4G -> 2048M low, 2048M high * qemu -M pc,max-ram-below-4g=4G -m 3968M -> 3968M low (=4G-128M) */ - lowmem = pcms->max_ram_below_4g; - if (machine->ram_size >= pcms->max_ram_below_4g) { - if (pcmc->gigabyte_align) { - if (lowmem > 0xc0000000) { - lowmem = 0xc0000000; - } - if (lowmem & ((1ULL << 30) - 1)) { - error_report("Warning: Large machine and max_ram_below_4g " - "(%" PRIu64 ") not a multiple of 1G; " - "possible bad performance.", - pcms->max_ram_below_4g); + if (xen_enabled()) { + xen_hvm_init(pcms, &ram_memory); + } else { + if (!pcms->max_ram_below_4g) { + pcms->max_ram_below_4g = 0xe0000000; /* default: 3.5G */ + } + lowmem = pcms->max_ram_below_4g; + if (machine->ram_size >= pcms->max_ram_below_4g) { + if (pcmc->gigabyte_align) { + if (lowmem > 0xc0000000) { + lowmem = 0xc0000000; + } + if (lowmem & ((1ULL << 30) - 1)) { + error_report("Warning: Large machine and max_ram_below_4g " + "(%" PRIu64 ") not a multiple of 1G; " + "possible bad performance.", + pcms->max_ram_below_4g); + } } } - } - if (machine->ram_size >= lowmem) { - pcms->above_4g_mem_size = machine->ram_size - lowmem; - pcms->below_4g_mem_size = lowmem; - } else { - pcms->above_4g_mem_size = 0; - pcms->below_4g_mem_size = machine->ram_size; - } - - if (xen_enabled()) { - xen_hvm_init(pcms, &ram_memory); + if (machine->ram_size >= lowmem) { + pcms->above_4g_mem_size = machine->ram_size - lowmem; + pcms->below_4g_mem_size = lowmem; + } else { + pcms->above_4g_mem_size = 0; + pcms->below_4g_mem_size = machine->ram_size; + } } pc_cpus_init(pcms); diff --git a/hw/i386/pc_q35.c b/hw/i386/pc_q35.c index 04b2684d37..c0b9961928 100644 --- a/hw/i386/pc_q35.c +++ b/hw/i386/pc_q35.c @@ -94,6 +94,9 @@ static void pc_q35_init(MachineState *machine) /* Handle the machine opt max-ram-below-4g. It is basically doing * min(qemu limit, user limit). */ + if (!pcms->max_ram_below_4g) { + pcms->max_ram_below_4g = 1ULL << 32; /* default: 4G */; + } if (lowmem > pcms->max_ram_below_4g) { lowmem = pcms->max_ram_below_4g; if (machine->ram_size - lowmem > lowmem && @@ -176,7 +179,6 @@ static void pc_q35_init(MachineState *machine) qdev_init_nofail(DEVICE(q35_host)); phb = PCI_HOST_BRIDGE(q35_host); host_bus = phb->bus; - pcms->bus = phb->bus; /* create ISA bus */ lpc = pci_create_simple_multifunction(host_bus, PCI_DEVFN(ICH9_LPC_DEV, ICH9_LPC_FUNC), true, @@ -287,6 +289,7 @@ static void pc_q35_machine_options(MachineClass *m) m->default_machine_opts = "firmware=bios-256k.bin"; m->default_display = "std"; m->no_floppy = 1; + m->has_dynamic_sysbus = true; } static void pc_q35_2_7_machine_options(MachineClass *m) diff --git a/hw/ide/ich.c b/hw/ide/ich.c index 0a13334baa..920ec276ed 100644 --- a/hw/ide/ich.c +++ b/hw/ide/ich.c @@ -68,7 +68,6 @@ #include <hw/isa/isa.h> #include "sysemu/block-backend.h" #include "sysemu/dma.h" - #include <hw/ide/pci.h> #include <hw/ide/ahci.h> @@ -111,6 +110,7 @@ static void pci_ich9_ahci_realize(PCIDevice *dev, Error **errp) int sata_cap_offset; uint8_t *sata_cap; d = ICH_AHCI(dev); + int ret; ahci_realize(&d->ahci, DEVICE(dev), pci_get_address_space(dev), 6); @@ -146,7 +146,10 @@ static void pci_ich9_ahci_realize(PCIDevice *dev, Error **errp) /* Although the AHCI 1.3 specification states that the first capability * should be PMCAP, the Intel ICH9 data sheet specifies that the ICH9 * AHCI device puts the MSI capability first, pointing to 0x80. */ - msi_init(dev, ICH9_MSI_CAP_OFFSET, 1, true, false); + ret = msi_init(dev, ICH9_MSI_CAP_OFFSET, 1, true, false, NULL); + /* Any error other than -ENOTSUP(board's MSI support is broken) + * is a programming error. Fall back to INTx silently on -ENOTSUP */ + assert(!ret || ret == -ENOTSUP); } static void pci_ich9_uninit(PCIDevice *dev) diff --git a/hw/input/hid.c b/hw/input/hid.c index d92c7463ba..5e2850e655 100644 --- a/hw/input/hid.c +++ b/hw/input/hid.c @@ -27,6 +27,7 @@ #include "ui/console.h" #include "qemu/timer.h" #include "hw/input/hid.h" +#include "trace.h" #define HID_USAGE_ERROR_ROLLOVER 0x01 #define HID_USAGE_POSTFAIL 0x02 @@ -234,7 +235,7 @@ static void hid_keyboard_event(DeviceState *dev, QemuConsole *src, key->down, scancodes); if (hs->n + count > QUEUE_LENGTH) { - fprintf(stderr, "usb-kbd: warning: key event queue full\n"); + trace_hid_kbd_queue_full(); return; } for (i = 0; i < count; i++) { diff --git a/hw/input/trace-events b/hw/input/trace-events index 00fcec12b9..f24dff2f8b 100644 --- a/hw/input/trace-events +++ b/hw/input/trace-events @@ -23,3 +23,9 @@ milkymist_softusb_memory_write(uint32_t addr, uint32_t value) "addr %08x value % milkymist_softusb_mevt(uint8_t m) "m %d" milkymist_softusb_kevt(uint8_t m) "m %d" milkymist_softusb_pulse_irq(void) "Pulse IRQ" + +# hw/input/hid.c +hid_kbd_queue_full(void) "queue full" + +# hw/input/virtio +virtio_input_queue_full(void) "queue full" diff --git a/hw/input/virtio-input.c b/hw/input/virtio-input.c index f59749a943..edf69903a6 100644 --- a/hw/input/virtio-input.c +++ b/hw/input/virtio-input.c @@ -7,6 +7,7 @@ #include "qemu/osdep.h" #include "qapi/error.h" #include "qemu/iov.h" +#include "trace.h" #include "hw/qdev.h" #include "hw/virtio/virtio.h" @@ -47,7 +48,7 @@ void virtio_input_send(VirtIOInput *vinput, virtio_input_event *event) virtqueue_get_avail_bytes(vinput->evt, &have, NULL, need, 0); if (have < need) { vinput->qindex = 0; - fprintf(stderr, "%s: ENOSPC in vq, dropping events\n", __func__); + trace_virtio_input_queue_full(); return; } diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c index 890d5d7442..06d8db6bd6 100644 --- a/hw/intc/armv7m_nvic.c +++ b/hw/intc/armv7m_nvic.c @@ -187,11 +187,11 @@ static uint32_t nvic_readl(nvic_state *s, uint32_t offset) case 0x1c: /* SysTick Calibration Value. */ return 10000; case 0xd00: /* CPUID Base. */ - cpu = ARM_CPU(current_cpu); + cpu = ARM_CPU(qemu_get_cpu(0)); return cpu->midr; case 0xd04: /* Interrupt Control State. */ /* VECTACTIVE */ - cpu = ARM_CPU(current_cpu); + cpu = ARM_CPU(qemu_get_cpu(0)); val = cpu->env.v7m.exception; if (val == 1023) { val = 0; @@ -222,7 +222,7 @@ static uint32_t nvic_readl(nvic_state *s, uint32_t offset) val |= (1 << 31); return val; case 0xd08: /* Vector Table Offset. */ - cpu = ARM_CPU(current_cpu); + cpu = ARM_CPU(qemu_get_cpu(0)); return cpu->env.v7m.vecbase; case 0xd0c: /* Application Interrupt/Reset Control. */ return 0xfa050000; @@ -349,7 +349,7 @@ static void nvic_writel(nvic_state *s, uint32_t offset, uint32_t value) } break; case 0xd08: /* Vector Table Offset. */ - cpu = ARM_CPU(current_cpu); + cpu = ARM_CPU(qemu_get_cpu(0)); cpu->env.v7m.vecbase = value & 0xffffff80; break; case 0xd0c: /* Application Interrupt/Reset Control. */ diff --git a/hw/microblaze/petalogix_ml605_mmu.c b/hw/microblaze/petalogix_ml605_mmu.c index 07527b677b..4968bdbb28 100644 --- a/hw/microblaze/petalogix_ml605_mmu.c +++ b/hw/microblaze/petalogix_ml605_mmu.c @@ -191,9 +191,16 @@ petalogix_ml605_init(MachineState *machine) spi = (SSIBus *)qdev_get_child_bus(dev, "spi"); for (i = 0; i < NUM_SPI_FLASHES; i++) { + DriveInfo *dinfo = drive_get_next(IF_MTD); qemu_irq cs_line; - dev = ssi_create_slave(spi, "n25q128"); + dev = ssi_create_slave_no_init(spi, "n25q128"); + if (dinfo) { + qdev_prop_set_drive(dev, "drive", blk_by_legacy_dinfo(dinfo), + &error_fatal); + } + qdev_init_nofail(dev); + cs_line = qdev_get_gpio_in_named(dev, SSI_GPIO_CS, 0); sysbus_connect_irq(busdev, i+1, cs_line); } diff --git a/hw/misc/Makefile.objs b/hw/misc/Makefile.objs index 54020aa06c..4cfbd1024a 100644 --- a/hw/misc/Makefile.objs +++ b/hw/misc/Makefile.objs @@ -51,5 +51,5 @@ obj-$(CONFIG_MIPS_ITU) += mips_itu.o obj-$(CONFIG_PVPANIC) += pvpanic.o obj-$(CONFIG_EDU) += edu.o obj-$(CONFIG_HYPERV_TESTDEV) += hyperv_testdev.o -obj-$(CONFIG_AUX) += aux.o +obj-$(CONFIG_AUX) += auxbus.o obj-$(CONFIG_ASPEED_SOC) += aspeed_scu.o diff --git a/hw/misc/aux.c b/hw/misc/auxbus.c index 25d7712398..e4a7ba41de 100644 --- a/hw/misc/aux.c +++ b/hw/misc/auxbus.c @@ -1,5 +1,5 @@ /* - * aux.c + * auxbus.c * * Copyright 2015 : GreenSocs Ltd * http://www.greensocs.com/ , email: info@greensocs.com @@ -28,7 +28,7 @@ #include "qemu/osdep.h" #include "qemu/log.h" -#include "hw/misc/aux.h" +#include "hw/misc/auxbus.h" #include "hw/i2c/i2c.h" #include "monitor/monitor.h" @@ -153,12 +153,12 @@ AUXReply aux_request(AUXBus *bus, AUXCommand cmd, uint32_t address, case WRITE_I2C_MOT: case READ_I2C_MOT: is_write = cmd == READ_I2C_MOT ? false : true; + ret = AUX_I2C_NACK; if (!i2c_bus_busy(i2c_bus)) { /* * No transactions started.. */ if (i2c_start_transfer(i2c_bus, address, is_write)) { - ret = AUX_I2C_NACK; break; } } else if ((address != bus->last_i2c_address) || @@ -168,22 +168,22 @@ AUXReply aux_request(AUXBus *bus, AUXCommand cmd, uint32_t address, */ i2c_end_transfer(i2c_bus); if (i2c_start_transfer(i2c_bus, address, is_write)) { - ret = AUX_I2C_NACK; break; } } + bus->last_transaction = cmd; + bus->last_i2c_address = address; while (len > 0) { if (i2c_send_recv(i2c_bus, data++, is_write) < 0) { - ret = AUX_I2C_NACK; i2c_end_transfer(i2c_bus); break; } len--; } - bus->last_transaction = cmd; - bus->last_i2c_address = address; - ret = AUX_I2C_ACK; + if (len == 0) { + ret = AUX_I2C_ACK; + } break; default: DPRINTF("Not implemented!\n"); diff --git a/hw/misc/imx6_ccm.c b/hw/misc/imx6_ccm.c index ec58eef92d..17e15d4c92 100644 --- a/hw/misc/imx6_ccm.c +++ b/hw/misc/imx6_ccm.c @@ -371,6 +371,12 @@ static uint32_t imx6_ccm_get_clock_frequency(IMXCCMState *dev, IMXClk clock) case CLK_32k: freq = CKIL_FREQ; break; + case CLK_HIGH: + freq = 24000000; + break; + case CLK_HIGH_DIV: + freq = 24000000 / 8; + break; default: qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: unsupported clock %d\n", TYPE_IMX6_CCM, __func__, clock); diff --git a/hw/misc/max111x.c b/hw/misc/max111x.c index 9014f0f705..2a277bdb86 100644 --- a/hw/misc/max111x.c +++ b/hw/misc/max111x.c @@ -147,14 +147,14 @@ static int max111x_init(SSISlave *d, int inputs) return 0; } -static int max1110_init(SSISlave *dev) +static void max1110_realize(SSISlave *dev, Error **errp) { - return max111x_init(dev, 8); + max111x_init(dev, 8); } -static int max1111_init(SSISlave *dev) +static void max1111_realize(SSISlave *dev, Error **errp) { - return max111x_init(dev, 4); + max111x_init(dev, 4); } void max111x_set_input(DeviceState *dev, int line, uint8_t value) @@ -183,7 +183,7 @@ static void max1110_class_init(ObjectClass *klass, void *data) { SSISlaveClass *k = SSI_SLAVE_CLASS(klass); - k->init = max1110_init; + k->realize = max1110_realize; } static const TypeInfo max1110_info = { @@ -196,7 +196,7 @@ static void max1111_class_init(ObjectClass *klass, void *data) { SSISlaveClass *k = SSI_SLAVE_CLASS(klass); - k->init = max1111_init; + k->realize = max1111_realize; } static const TypeInfo max1111_info = { diff --git a/hw/net/e1000e.c b/hw/net/e1000e.c index 692283fdd7..b4758bc441 100644 --- a/hw/net/e1000e.c +++ b/hw/net/e1000e.c @@ -89,8 +89,7 @@ typedef struct E1000EState { #define E1000E_MSIX_TABLE (0x0000) #define E1000E_MSIX_PBA (0x2000) -#define E1000E_USE_MSI BIT(0) -#define E1000E_USE_MSIX BIT(1) +#define E1000E_USE_MSIX BIT(0) static uint64_t e1000e_mmio_read(void *opaque, hwaddr addr, unsigned size) @@ -264,32 +263,6 @@ static void e1000e_core_realize(E1000EState *s) } static void -e1000e_init_msi(E1000EState *s) -{ - int res; - - res = msi_init(PCI_DEVICE(s), - 0xD0, /* MSI capability offset */ - 1, /* MAC MSI interrupts */ - true, /* 64-bit message addresses supported */ - false); /* Per vector mask supported */ - - if (res > 0) { - s->intr_state |= E1000E_USE_MSI; - } else { - trace_e1000e_msi_init_fail(res); - } -} - -static void -e1000e_cleanup_msi(E1000EState *s) -{ - if (s->intr_state & E1000E_USE_MSI) { - msi_uninit(PCI_DEVICE(s)); - } -} - -static void e1000e_unuse_msix_vectors(E1000EState *s, int num_vectors) { int i; @@ -444,6 +417,7 @@ static void e1000e_pci_realize(PCIDevice *pci_dev, Error **errp) static const uint16_t e1000e_dsn_offset = 0x140; E1000EState *s = E1000E(pci_dev); uint8_t *macaddr; + int ret; trace_e1000e_cb_pci_realize(); @@ -493,7 +467,10 @@ static void e1000e_pci_realize(PCIDevice *pci_dev, Error **errp) hw_error("Failed to initialize PCIe capability"); } - e1000e_init_msi(s); + ret = msi_init(PCI_DEVICE(s), 0xD0, 1, true, false, NULL); + if (ret) { + trace_e1000e_msi_init_fail(ret); + } if (e1000e_add_pm_capability(pci_dev, e1000e_pmrb_offset, PCI_PM_CAP_DSI) < 0) { @@ -532,7 +509,7 @@ static void e1000e_pci_uninit(PCIDevice *pci_dev) qemu_del_nic(s->nic); e1000e_cleanup_msix(s); - e1000e_cleanup_msi(s); + msi_uninit(pci_dev); } static void e1000e_qdev_reset(DeviceState *dev) @@ -693,6 +670,7 @@ static void e1000e_class_init(ObjectClass *class, void *data) c->vendor_id = PCI_VENDOR_ID_INTEL; c->device_id = E1000_DEV_ID_82574L; c->revision = 0; + c->romfile = "efi-e1000e.rom"; c->class_id = PCI_CLASS_NETWORK_ETHERNET; c->is_express = 1; diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c index 50f4dcd655..11fabc0b0a 100644 --- a/hw/net/vhost_net.c +++ b/hw/net/vhost_net.c @@ -172,7 +172,7 @@ struct vhost_net *vhost_net_init(VhostNetOptions *options) } r = vhost_dev_init(&net->dev, options->opaque, - options->backend_type); + options->backend_type, options->busyloop_timeout); if (r < 0) { goto fail; } diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c index 7e6a60aa12..999989934e 100644 --- a/hw/net/virtio-net.c +++ b/hw/net/virtio-net.c @@ -1542,33 +1542,11 @@ static int virtio_net_load(QEMUFile *f, void *opaque, int version_id) { VirtIONet *n = opaque; VirtIODevice *vdev = VIRTIO_DEVICE(n); - int ret; if (version_id < 2 || version_id > VIRTIO_NET_VM_VERSION) return -EINVAL; - ret = virtio_load(vdev, f, version_id); - if (ret) { - return ret; - } - - if (virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) { - n->curr_guest_offloads = qemu_get_be64(f); - } else { - n->curr_guest_offloads = virtio_net_supported_guest_offloads(n); - } - - if (peer_has_vnet_hdr(n)) { - virtio_net_apply_guest_offloads(n); - } - - if (virtio_vdev_has_feature(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE) && - virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) { - n->announce_counter = SELF_ANNOUNCE_ROUNDS; - timer_mod(n->announce_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL)); - } - - return 0; + return virtio_load(vdev, f, version_id); } static int virtio_net_load_device(VirtIODevice *vdev, QEMUFile *f, @@ -1665,6 +1643,16 @@ static int virtio_net_load_device(VirtIODevice *vdev, QEMUFile *f, } } + if (virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) { + n->curr_guest_offloads = qemu_get_be64(f); + } else { + n->curr_guest_offloads = virtio_net_supported_guest_offloads(n); + } + + if (peer_has_vnet_hdr(n)) { + virtio_net_apply_guest_offloads(n); + } + virtio_net_set_queues(n); /* Find the first multicast entry in the saved MAC filter */ @@ -1682,6 +1670,12 @@ static int virtio_net_load_device(VirtIODevice *vdev, QEMUFile *f, qemu_get_subqueue(n->nic, i)->link_down = link_down; } + if (virtio_vdev_has_feature(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE) && + virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) { + n->announce_counter = SELF_ANNOUNCE_ROUNDS; + timer_mod(n->announce_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL)); + } + return 0; } diff --git a/hw/net/vmxnet3.c b/hw/net/vmxnet3.c index 92236d3919..e767fc64b8 100644 --- a/hw/net/vmxnet3.c +++ b/hw/net/vmxnet3.c @@ -283,8 +283,6 @@ typedef struct { /* Whether MSI-X support was installed successfully */ bool msix_used; - /* Whether MSI support was installed successfully */ - bool msi_used; hwaddr drv_shmem; hwaddr temp_shared_guest_driver_memory; @@ -366,7 +364,7 @@ static bool _vmxnet3_assert_interrupt_line(VMXNET3State *s, uint32_t int_idx) msix_notify(d, int_idx); return false; } - if (s->msi_used && msi_enabled(d)) { + if (msi_enabled(d)) { VMW_IRPRN("Sending MSI notification for vector %u", int_idx); msi_notify(d, int_idx); return false; @@ -390,7 +388,7 @@ static void _vmxnet3_deassert_interrupt_line(VMXNET3State *s, int lidx) * This function should never be called for MSI(X) interrupts * because deassertion never required for message interrupts */ - assert(!s->msi_used || !msi_enabled(d)); + assert(!msi_enabled(d)); VMW_IRPRN("Deasserting line for interrupt %u", lidx); pci_irq_deassert(d); @@ -427,7 +425,7 @@ static void vmxnet3_trigger_interrupt(VMXNET3State *s, int lidx) goto do_automask; } - if (s->msi_used && msi_enabled(d) && s->auto_int_masking) { + if (msi_enabled(d) && s->auto_int_masking) { goto do_automask; } @@ -1425,8 +1423,8 @@ static void vmxnet3_update_features(VMXNET3State *s) static bool vmxnet3_verify_intx(VMXNET3State *s, int intx) { - return s->msix_used || s->msi_used || (intx == - (pci_get_byte(s->parent_obj.config + PCI_INTERRUPT_PIN) - 1)); + return s->msix_used || msi_enabled(PCI_DEVICE(s)) + || intx == pci_get_byte(s->parent_obj.config + PCI_INTERRUPT_PIN) - 1; } static void vmxnet3_validate_interrupt_idx(bool is_msix, int idx) @@ -2216,35 +2214,12 @@ vmxnet3_cleanup_msix(VMXNET3State *s) } } -#define VMXNET3_USE_64BIT (true) -#define VMXNET3_PER_VECTOR_MASK (false) - -static bool -vmxnet3_init_msi(VMXNET3State *s) -{ - PCIDevice *d = PCI_DEVICE(s); - int res; - - res = msi_init(d, VMXNET3_MSI_OFFSET(s), VMXNET3_MAX_NMSIX_INTRS, - VMXNET3_USE_64BIT, VMXNET3_PER_VECTOR_MASK); - if (0 > res) { - VMW_WRPRN("Failed to initialize MSI, error %d", res); - s->msi_used = false; - } else { - s->msi_used = true; - } - - return s->msi_used; -} - static void vmxnet3_cleanup_msi(VMXNET3State *s) { PCIDevice *d = PCI_DEVICE(s); - if (s->msi_used) { - msi_uninit(d); - } + msi_uninit(d); } static void @@ -2298,10 +2273,15 @@ static uint64_t vmxnet3_device_serial_num(VMXNET3State *s) return dsn_payload; } + +#define VMXNET3_USE_64BIT (true) +#define VMXNET3_PER_VECTOR_MASK (false) + static void vmxnet3_pci_realize(PCIDevice *pci_dev, Error **errp) { DeviceState *dev = DEVICE(pci_dev); VMXNET3State *s = VMXNET3(pci_dev); + int ret; VMW_CBPRN("Starting init..."); @@ -2325,14 +2305,16 @@ static void vmxnet3_pci_realize(PCIDevice *pci_dev, Error **errp) /* Interrupt pin A */ pci_dev->config[PCI_INTERRUPT_PIN] = 0x01; + ret = msi_init(pci_dev, VMXNET3_MSI_OFFSET(s), VMXNET3_MAX_NMSIX_INTRS, + VMXNET3_USE_64BIT, VMXNET3_PER_VECTOR_MASK, NULL); + /* Any error other than -ENOTSUP(board's MSI support is broken) + * is a programming error. Fall back to INTx silently on -ENOTSUP */ + assert(!ret || ret == -ENOTSUP); + if (!vmxnet3_init_msix(s)) { VMW_WRPRN("Failed to initialize MSI-X, configuration is inconsistent."); } - if (!vmxnet3_init_msi(s)) { - VMW_WRPRN("Failed to initialize MSI, configuration is inconsistent."); - } - vmxnet3_net_init(s); if (pci_is_express(pci_dev)) { @@ -2719,6 +2701,7 @@ static void vmxnet3_class_init(ObjectClass *class, void *data) c->vendor_id = PCI_VENDOR_ID_VMWARE; c->device_id = PCI_DEVICE_ID_VMWARE_VMXNET3; c->revision = PCI_DEVICE_ID_VMWARE_VMXNET3_REVISION; + c->romfile = "efi-vmxnet3.rom"; c->class_id = PCI_CLASS_NETWORK_ETHERNET; c->subsystem_vendor_id = PCI_VENDOR_ID_VMWARE; c->subsystem_id = PCI_DEVICE_ID_VMWARE_VMXNET3; diff --git a/hw/pci-bridge/ioh3420.c b/hw/pci-bridge/ioh3420.c index b4a7806e2e..93c6f0b7a2 100644 --- a/hw/pci-bridge/ioh3420.c +++ b/hw/pci-bridge/ioh3420.c @@ -25,6 +25,7 @@ #include "hw/pci/msi.h" #include "hw/pci/pcie.h" #include "ioh3420.h" +#include "qapi/error.h" #define PCI_DEVICE_ID_IOH_EPORT 0x3420 /* D0:F0 express mode */ #define PCI_DEVICE_ID_IOH_REV 0x2 @@ -97,6 +98,7 @@ static int ioh3420_initfn(PCIDevice *d) PCIEPort *p = PCIE_PORT(d); PCIESlot *s = PCIE_SLOT(d); int rc; + Error *err = NULL; pci_bridge_initfn(d, TYPE_PCIE_BUS); pcie_port_init_reg(d); @@ -109,8 +111,10 @@ static int ioh3420_initfn(PCIDevice *d) rc = msi_init(d, IOH_EP_MSI_OFFSET, IOH_EP_MSI_NR_VECTOR, IOH_EP_MSI_SUPPORTED_FLAGS & PCI_MSI_FLAGS_64BIT, - IOH_EP_MSI_SUPPORTED_FLAGS & PCI_MSI_FLAGS_MASKBIT); + IOH_EP_MSI_SUPPORTED_FLAGS & PCI_MSI_FLAGS_MASKBIT, &err); if (rc < 0) { + assert(rc == -ENOTSUP); + error_report_err(err); goto err_bridge; } diff --git a/hw/pci-bridge/pci_bridge_dev.c b/hw/pci-bridge/pci_bridge_dev.c index 41ca47b15a..5dbd933cc1 100644 --- a/hw/pci-bridge/pci_bridge_dev.c +++ b/hw/pci-bridge/pci_bridge_dev.c @@ -42,9 +42,10 @@ struct PCIBridgeDev { MemoryRegion bar; uint8_t chassis_nr; -#define PCI_BRIDGE_DEV_F_MSI_REQ 0 -#define PCI_BRIDGE_DEV_F_SHPC_REQ 1 +#define PCI_BRIDGE_DEV_F_SHPC_REQ 0 uint32_t flags; + + OnOffAuto msi; }; typedef struct PCIBridgeDev PCIBridgeDev; @@ -53,6 +54,7 @@ static int pci_bridge_dev_initfn(PCIDevice *dev) PCIBridge *br = PCI_BRIDGE(dev); PCIBridgeDev *bridge_dev = PCI_BRIDGE_DEV(dev); int err; + Error *local_err = NULL; pci_bridge_initfn(dev, TYPE_PCI_BUS); @@ -66,7 +68,7 @@ static int pci_bridge_dev_initfn(PCIDevice *dev) } } else { /* MSI is not applicable without SHPC */ - bridge_dev->flags &= ~(1 << PCI_BRIDGE_DEV_F_MSI_REQ); + bridge_dev->msi = ON_OFF_AUTO_OFF; } err = slotid_cap_init(dev, 0, bridge_dev->chassis_nr, 0); @@ -74,12 +76,23 @@ static int pci_bridge_dev_initfn(PCIDevice *dev) goto slotid_error; } - if ((bridge_dev->flags & (1 << PCI_BRIDGE_DEV_F_MSI_REQ)) && - msi_nonbroken) { - err = msi_init(dev, 0, 1, true, true); - if (err < 0) { + if (bridge_dev->msi != ON_OFF_AUTO_OFF) { + /* it means SHPC exists, because MSI is needed by SHPC */ + + err = msi_init(dev, 0, 1, true, true, &local_err); + /* Any error other than -ENOTSUP(board's MSI support is broken) + * is a programming error */ + assert(!err || err == -ENOTSUP); + if (err && bridge_dev->msi == ON_OFF_AUTO_ON) { + /* Can't satisfy user's explicit msi=on request, fail */ + error_append_hint(&local_err, "You have to use msi=auto (default) " + "or msi=off with this machine type.\n"); + error_report_err(local_err); goto msi_error; } + assert(!local_err || bridge_dev->msi == ON_OFF_AUTO_AUTO); + /* With msi=auto, we fall back to MSI off silently */ + error_free(local_err); } if (shpc_present(dev)) { @@ -147,8 +160,8 @@ static Property pci_bridge_dev_properties[] = { /* Note: 0 is not a legal chassis number. */ DEFINE_PROP_UINT8(PCI_BRIDGE_DEV_PROP_CHASSIS_NR, PCIBridgeDev, chassis_nr, 0), - DEFINE_PROP_BIT(PCI_BRIDGE_DEV_PROP_MSI, PCIBridgeDev, flags, - PCI_BRIDGE_DEV_F_MSI_REQ, true), + DEFINE_PROP_ON_OFF_AUTO(PCI_BRIDGE_DEV_PROP_MSI, PCIBridgeDev, msi, + ON_OFF_AUTO_AUTO), DEFINE_PROP_BIT(PCI_BRIDGE_DEV_PROP_SHPC, PCIBridgeDev, flags, PCI_BRIDGE_DEV_F_SHPC_REQ, true), DEFINE_PROP_END_OF_LIST(), diff --git a/hw/pci-bridge/pci_expander_bridge.c b/hw/pci-bridge/pci_expander_bridge.c index ba320bd857..ab8612158d 100644 --- a/hw/pci-bridge/pci_expander_bridge.c +++ b/hw/pci-bridge/pci_expander_bridge.c @@ -149,6 +149,8 @@ static void pxb_host_class_init(ObjectClass *class, void *data) PCIHostBridgeClass *hc = PCI_HOST_BRIDGE_CLASS(class); dc->fw_name = "pci"; + /* Reason: Internal part of the pxb/pxb-pcie device, not usable by itself */ + dc->cannot_instantiate_with_device_add_yet = true; sbc->explicit_ofw_unit_address = pxb_host_ofw_unit_address; hc->root_bus_path = pxb_host_root_bus_path; } diff --git a/hw/pci-bridge/xio3130_downstream.c b/hw/pci-bridge/xio3130_downstream.c index e6d653de4f..f6149a302d 100644 --- a/hw/pci-bridge/xio3130_downstream.c +++ b/hw/pci-bridge/xio3130_downstream.c @@ -24,6 +24,7 @@ #include "hw/pci/msi.h" #include "hw/pci/pcie.h" #include "xio3130_downstream.h" +#include "qapi/error.h" #define PCI_DEVICE_ID_TI_XIO3130D 0x8233 /* downstream port */ #define XIO3130_REVISION 0x1 @@ -60,14 +61,17 @@ static int xio3130_downstream_initfn(PCIDevice *d) PCIEPort *p = PCIE_PORT(d); PCIESlot *s = PCIE_SLOT(d); int rc; + Error *err = NULL; pci_bridge_initfn(d, TYPE_PCIE_BUS); pcie_port_init_reg(d); rc = msi_init(d, XIO3130_MSI_OFFSET, XIO3130_MSI_NR_VECTOR, XIO3130_MSI_SUPPORTED_FLAGS & PCI_MSI_FLAGS_64BIT, - XIO3130_MSI_SUPPORTED_FLAGS & PCI_MSI_FLAGS_MASKBIT); + XIO3130_MSI_SUPPORTED_FLAGS & PCI_MSI_FLAGS_MASKBIT, &err); if (rc < 0) { + assert(rc == -ENOTSUP); + error_report_err(err); goto err_bridge; } diff --git a/hw/pci-bridge/xio3130_upstream.c b/hw/pci-bridge/xio3130_upstream.c index d97684474f..487edacc1d 100644 --- a/hw/pci-bridge/xio3130_upstream.c +++ b/hw/pci-bridge/xio3130_upstream.c @@ -24,6 +24,7 @@ #include "hw/pci/msi.h" #include "hw/pci/pcie.h" #include "xio3130_upstream.h" +#include "qapi/error.h" #define PCI_DEVICE_ID_TI_XIO3130U 0x8232 /* upstream port */ #define XIO3130_REVISION 0x2 @@ -56,14 +57,17 @@ static int xio3130_upstream_initfn(PCIDevice *d) { PCIEPort *p = PCIE_PORT(d); int rc; + Error *err = NULL; pci_bridge_initfn(d, TYPE_PCIE_BUS); pcie_port_init_reg(d); rc = msi_init(d, XIO3130_MSI_OFFSET, XIO3130_MSI_NR_VECTOR, XIO3130_MSI_SUPPORTED_FLAGS & PCI_MSI_FLAGS_64BIT, - XIO3130_MSI_SUPPORTED_FLAGS & PCI_MSI_FLAGS_MASKBIT); + XIO3130_MSI_SUPPORTED_FLAGS & PCI_MSI_FLAGS_MASKBIT, &err); if (rc < 0) { + assert(rc == -ENOTSUP); + error_report_err(err); goto err_bridge; } diff --git a/hw/pci-host/piix.c b/hw/pci-host/piix.c index df2b0e26f5..f9218aa952 100644 --- a/hw/pci-host/piix.c +++ b/hw/pci-host/piix.c @@ -48,7 +48,7 @@ typedef struct I440FXState { PCIHostState parent_obj; - PcPciInfo pci_info; + Range pci_hole; uint64_t pci_hole64_size; uint32_t short_root_bus; } I440FXState; @@ -221,8 +221,12 @@ static void i440fx_pcihost_get_pci_hole_start(Object *obj, Visitor *v, Error **errp) { I440FXState *s = I440FX_PCI_HOST_BRIDGE(obj); - uint32_t value = s->pci_info.w32.begin; + uint64_t val64; + uint32_t value; + val64 = range_is_empty(&s->pci_hole) ? 0 : range_lob(&s->pci_hole); + value = val64; + assert(value == val64); visit_type_uint32(v, name, &value, errp); } @@ -231,8 +235,12 @@ static void i440fx_pcihost_get_pci_hole_end(Object *obj, Visitor *v, Error **errp) { I440FXState *s = I440FX_PCI_HOST_BRIDGE(obj); - uint32_t value = s->pci_info.w32.end; + uint64_t val64; + uint32_t value; + val64 = range_is_empty(&s->pci_hole) ? 0 : range_upb(&s->pci_hole) + 1; + value = val64; + assert(value == val64); visit_type_uint32(v, name, &value, errp); } @@ -242,10 +250,11 @@ static void i440fx_pcihost_get_pci_hole64_start(Object *obj, Visitor *v, { PCIHostState *h = PCI_HOST_BRIDGE(obj); Range w64; + uint64_t value; pci_bus_get_w64_range(h->bus, &w64); - - visit_type_uint64(v, name, &w64.begin, errp); + value = range_is_empty(&w64) ? 0 : range_lob(&w64); + visit_type_uint64(v, name, &value, errp); } static void i440fx_pcihost_get_pci_hole64_end(Object *obj, Visitor *v, @@ -254,16 +263,16 @@ static void i440fx_pcihost_get_pci_hole64_end(Object *obj, Visitor *v, { PCIHostState *h = PCI_HOST_BRIDGE(obj); Range w64; + uint64_t value; pci_bus_get_w64_range(h->bus, &w64); - - visit_type_uint64(v, name, &w64.end, errp); + value = range_is_empty(&w64) ? 0 : range_upb(&w64) + 1; + visit_type_uint64(v, name, &value, errp); } static void i440fx_pcihost_initfn(Object *obj) { PCIHostState *s = PCI_HOST_BRIDGE(obj); - I440FXState *d = I440FX_PCI_HOST_BRIDGE(obj); memory_region_init_io(&s->conf_mem, obj, &pci_host_conf_le_ops, s, "pci-conf-idx", 4); @@ -285,8 +294,6 @@ static void i440fx_pcihost_initfn(Object *obj) object_property_add(obj, PCI_HOST_PROP_PCI_HOLE64_END, "int", i440fx_pcihost_get_pci_hole64_end, NULL, NULL, NULL, NULL); - - d->pci_info.w32.end = IO_APIC_DEFAULT_ADDRESS; } static void i440fx_pcihost_realize(DeviceState *dev, Error **errp) @@ -347,7 +354,8 @@ PCIBus *i440fx_init(const char *host_type, const char *pci_type, f->ram_memory = ram_memory; i440fx = I440FX_PCI_HOST_BRIDGE(dev); - i440fx->pci_info.w32.begin = below_4g_mem_size; + range_set_bounds(&i440fx->pci_hole, below_4g_mem_size, + IO_APIC_DEFAULT_ADDRESS - 1); /* setup pci memory mapping */ pc_pci_as_mapping_init(OBJECT(f), f->system_memory, @@ -865,6 +873,8 @@ static void i440fx_pcihost_class_init(ObjectClass *klass, void *data) dc->realize = i440fx_pcihost_realize; dc->fw_name = "pci"; dc->props = i440fx_props; + /* Reason: needs to be wired up by pc_init1 */ + dc->cannot_instantiate_with_device_add_yet = true; } static const TypeInfo i440fx_pcihost_info = { diff --git a/hw/pci-host/q35.c b/hw/pci-host/q35.c index 03be05dc0d..344f77b10c 100644 --- a/hw/pci-host/q35.c +++ b/hw/pci-host/q35.c @@ -52,6 +52,7 @@ static void q35_host_realize(DeviceState *dev, Error **errp) pci->bus = pci_bus_new(DEVICE(s), "pcie.0", s->mch.pci_address_space, s->mch.address_space_io, 0, TYPE_PCIE_BUS); + PC_MACHINE(qdev_get_machine())->bus = pci->bus; qdev_set_parent_bus(DEVICE(&s->mch), BUS(pci->bus)); qdev_init_nofail(DEVICE(&s->mch)); } @@ -73,8 +74,13 @@ static void q35_host_get_pci_hole_start(Object *obj, Visitor *v, Error **errp) { Q35PCIHost *s = Q35_HOST_DEVICE(obj); - uint32_t value = s->mch.pci_info.w32.begin; + uint64_t val64; + uint32_t value; + val64 = range_is_empty(&s->mch.pci_hole) + ? 0 : range_lob(&s->mch.pci_hole); + value = val64; + assert(value == val64); visit_type_uint32(v, name, &value, errp); } @@ -83,8 +89,13 @@ static void q35_host_get_pci_hole_end(Object *obj, Visitor *v, Error **errp) { Q35PCIHost *s = Q35_HOST_DEVICE(obj); - uint32_t value = s->mch.pci_info.w32.end; + uint64_t val64; + uint32_t value; + val64 = range_is_empty(&s->mch.pci_hole) + ? 0 : range_upb(&s->mch.pci_hole) + 1; + value = val64; + assert(value == val64); visit_type_uint32(v, name, &value, errp); } @@ -94,10 +105,11 @@ static void q35_host_get_pci_hole64_start(Object *obj, Visitor *v, { PCIHostState *h = PCI_HOST_BRIDGE(obj); Range w64; + uint64_t value; pci_bus_get_w64_range(h->bus, &w64); - - visit_type_uint64(v, name, &w64.begin, errp); + value = range_is_empty(&w64) ? 0 : range_lob(&w64); + visit_type_uint64(v, name, &value, errp); } static void q35_host_get_pci_hole64_end(Object *obj, Visitor *v, @@ -106,10 +118,11 @@ static void q35_host_get_pci_hole64_end(Object *obj, Visitor *v, { PCIHostState *h = PCI_HOST_BRIDGE(obj); Range w64; + uint64_t value; pci_bus_get_w64_range(h->bus, &w64); - - visit_type_uint64(v, name, &w64.end, errp); + value = range_is_empty(&w64) ? 0 : range_upb(&w64) + 1; + visit_type_uint64(v, name, &value, errp); } static void q35_host_get_mmcfg_size(Object *obj, Visitor *v, const char *name, @@ -142,6 +155,8 @@ static void q35_host_class_init(ObjectClass *klass, void *data) hc->root_bus_path = q35_host_root_bus_path; dc->realize = q35_host_realize; dc->props = mch_props; + /* Reason: needs to be wired up by pc_q35_init */ + dc->cannot_instantiate_with_device_add_yet = true; set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); dc->fw_name = "pci"; } @@ -202,9 +217,9 @@ static void q35_host_initfn(Object *obj) * it's not a power of two, which means an MTRR * can't cover it exactly. */ - s->mch.pci_info.w32.begin = MCH_HOST_BRIDGE_PCIEXBAR_DEFAULT + - MCH_HOST_BRIDGE_PCIEXBAR_MAX; - s->mch.pci_info.w32.end = IO_APIC_DEFAULT_ADDRESS; + range_set_bounds(&s->mch.pci_hole, + MCH_HOST_BRIDGE_PCIEXBAR_DEFAULT + MCH_HOST_BRIDGE_PCIEXBAR_MAX, + IO_APIC_DEFAULT_ADDRESS - 1); } static const TypeInfo q35_host_info = { @@ -272,10 +287,7 @@ static void mch_update_pciexbar(MCHPCIState *mch) break; case MCH_HOST_BRIDGE_PCIEXBAR_LENGTH_RVD: default: - enable = 0; - length = 0; abort(); - break; } addr = pciexbar & addr_mask; pcie_host_mmcfg_update(pehb, enable, addr, length); @@ -285,9 +297,13 @@ static void mch_update_pciexbar(MCHPCIState *mch) * which means an MTRR can't cover it exactly. */ if (enable) { - mch->pci_info.w32.begin = addr + length; + range_set_bounds(&mch->pci_hole, + addr + length, + IO_APIC_DEFAULT_ADDRESS - 1); } else { - mch->pci_info.w32.begin = MCH_HOST_BRIDGE_PCIEXBAR_DEFAULT; + range_set_bounds(&mch->pci_hole, + MCH_HOST_BRIDGE_PCIEXBAR_DEFAULT, + IO_APIC_DEFAULT_ADDRESS - 1); } } @@ -444,30 +460,6 @@ static void mch_reset(DeviceState *qdev) mch_update(mch); } -static AddressSpace *q35_host_dma_iommu(PCIBus *bus, void *opaque, int devfn) -{ - IntelIOMMUState *s = opaque; - VTDAddressSpace *vtd_as; - - assert(0 <= devfn && devfn <= VTD_PCI_DEVFN_MAX); - - vtd_as = vtd_find_add_as(s, bus, devfn); - return &vtd_as->as; -} - -static void mch_init_dmar(MCHPCIState *mch) -{ - PCIBus *pci_bus = PCI_BUS(qdev_get_parent_bus(DEVICE(mch))); - - mch->iommu = INTEL_IOMMU_DEVICE(qdev_create(NULL, TYPE_INTEL_IOMMU_DEVICE)); - object_property_add_child(OBJECT(mch), "intel-iommu", - OBJECT(mch->iommu), NULL); - qdev_init_nofail(DEVICE(mch->iommu)); - sysbus_mmio_map(SYS_BUS_DEVICE(mch->iommu), 0, Q35_HOST_BRIDGE_IOMMU_ADDR); - - pci_setup_iommu(pci_bus, q35_host_dma_iommu, mch->iommu); -} - static void mch_realize(PCIDevice *d, Error **errp) { int i; @@ -526,10 +518,6 @@ static void mch_realize(PCIDevice *d, Error **errp) mch->pci_address_space, &mch->pam_regions[i+1], PAM_EXPAN_BASE + i * PAM_EXPAN_SIZE, PAM_EXPAN_SIZE); } - /* Intel IOMMU (VT-d) */ - if (object_property_get_bool(qdev_get_machine(), "iommu", NULL)) { - mch_init_dmar(mch); - } } uint64_t mch_mcfg_base(void) diff --git a/hw/pci/msi.c b/hw/pci/msi.c index ed792251dd..a87b2278a3 100644 --- a/hw/pci/msi.c +++ b/hw/pci/msi.c @@ -22,6 +22,7 @@ #include "hw/pci/msi.h" #include "hw/xen/xen.h" #include "qemu/range.h" +#include "qapi/error.h" /* PCI_MSI_ADDRESS_LO */ #define PCI_MSI_ADDRESS_LO_MASK (~0x3) @@ -173,7 +174,8 @@ bool msi_enabled(const PCIDevice *dev) * If @msi64bit, make the device capable of sending a 64-bit message * address. * If @msi_per_vector_mask, make the device support per-vector masking. - * Return 0 on success, return -errno on error. + * @errp is for returning errors. + * Return 0 on success; set @errp and return -errno on error. * * -ENOTSUP means lacking msi support for a msi-capable platform. * -EINVAL means capability overlap, happens when @offset is non-zero, @@ -181,7 +183,8 @@ bool msi_enabled(const PCIDevice *dev) * if a real HW is broken. */ int msi_init(struct PCIDevice *dev, uint8_t offset, - unsigned int nr_vectors, bool msi64bit, bool msi_per_vector_mask) + unsigned int nr_vectors, bool msi64bit, + bool msi_per_vector_mask, Error **errp) { unsigned int vectors_order; uint16_t flags; @@ -189,6 +192,7 @@ int msi_init(struct PCIDevice *dev, uint8_t offset, int config_offset; if (!msi_nonbroken) { + error_setg(errp, "MSI is not supported by interrupt controller"); return -ENOTSUP; } @@ -212,7 +216,8 @@ int msi_init(struct PCIDevice *dev, uint8_t offset, } cap_size = msi_cap_sizeof(flags); - config_offset = pci_add_capability(dev, PCI_CAP_ID_MSI, offset, cap_size); + config_offset = pci_add_capability2(dev, PCI_CAP_ID_MSI, offset, + cap_size, errp); if (config_offset < 0) { return config_offset; } diff --git a/hw/pci/pci.c b/hw/pci/pci.c index 4b585f47b6..149994b815 100644 --- a/hw/pci/pci.c +++ b/hw/pci/pci.c @@ -78,10 +78,37 @@ static const VMStateDescription vmstate_pcibus = { } }; +static void pci_init_bus_master(PCIDevice *pci_dev) +{ + AddressSpace *dma_as = pci_device_iommu_address_space(pci_dev); + + memory_region_init_alias(&pci_dev->bus_master_enable_region, + OBJECT(pci_dev), "bus master", + dma_as->root, 0, memory_region_size(dma_as->root)); + memory_region_set_enabled(&pci_dev->bus_master_enable_region, false); + address_space_init(&pci_dev->bus_master_as, + &pci_dev->bus_master_enable_region, pci_dev->name); +} + +static void pcibus_machine_done(Notifier *notifier, void *data) +{ + PCIBus *bus = container_of(notifier, PCIBus, machine_done); + int i; + + for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) { + if (bus->devices[i]) { + pci_init_bus_master(bus->devices[i]); + } + } +} + static void pci_bus_realize(BusState *qbus, Error **errp) { PCIBus *bus = PCI_BUS(qbus); + bus->machine_done.notify = pcibus_machine_done; + qemu_add_machine_init_done_notifier(&bus->machine_done); + vmstate_register(NULL, -1, &vmstate_pcibus, bus); } @@ -89,6 +116,8 @@ static void pci_bus_unrealize(BusState *qbus, Error **errp) { PCIBus *bus = PCI_BUS(qbus); + qemu_remove_machine_init_done_notifier(&bus->machine_done); + vmstate_unregister(NULL, &vmstate_pcibus, bus); } @@ -920,7 +949,6 @@ static PCIDevice *do_pci_register_device(PCIDevice *pci_dev, PCIBus *bus, PCIConfigReadFunc *config_read = pc->config_read; PCIConfigWriteFunc *config_write = pc->config_write; Error *local_err = NULL; - AddressSpace *dma_as; DeviceState *dev = DEVICE(pci_dev); pci_dev->bus = bus; @@ -961,15 +989,10 @@ static PCIDevice *do_pci_register_device(PCIDevice *pci_dev, PCIBus *bus, pci_dev->devfn = devfn; pci_dev->requester_id_cache = pci_req_id_cache_get(pci_dev); - dma_as = pci_device_iommu_address_space(pci_dev); - - memory_region_init_alias(&pci_dev->bus_master_enable_region, - OBJECT(pci_dev), "bus master", - dma_as->root, 0, memory_region_size(dma_as->root)); - memory_region_set_enabled(&pci_dev->bus_master_enable_region, false); - address_space_init(&pci_dev->bus_master_as, &pci_dev->bus_master_enable_region, - name); + if (qdev_hotplug) { + pci_init_bus_master(pci_dev); + } pstrcpy(pci_dev->name, sizeof(pci_dev->name), name); pci_dev->irq_state = 0; pci_config_alloc(pci_dev); @@ -1051,7 +1074,7 @@ void pci_register_bar(PCIDevice *pci_dev, int region_num, uint8_t type, MemoryRegion *memory) { PCIIORegion *r; - uint32_t addr; + uint32_t addr; /* offset in pci config space */ uint64_t wmask; pcibus_t size = memory_region_size(memory); @@ -1067,15 +1090,20 @@ void pci_register_bar(PCIDevice *pci_dev, int region_num, r->addr = PCI_BAR_UNMAPPED; r->size = size; r->type = type; - r->memory = NULL; + r->memory = memory; + r->address_space = type & PCI_BASE_ADDRESS_SPACE_IO + ? pci_dev->bus->address_space_io + : pci_dev->bus->address_space_mem; wmask = ~(size - 1); - addr = pci_bar(pci_dev, region_num); if (region_num == PCI_ROM_SLOT) { /* ROM enable bit is writable */ wmask |= PCI_ROM_ADDRESS_ENABLE; } + + addr = pci_bar(pci_dev, region_num); pci_set_long(pci_dev->config + addr, type); + if (!(r->type & PCI_BASE_ADDRESS_SPACE_IO) && r->type & PCI_BASE_ADDRESS_MEM_TYPE_64) { pci_set_quad(pci_dev->wmask + addr, wmask); @@ -1084,11 +1112,6 @@ void pci_register_bar(PCIDevice *pci_dev, int region_num, pci_set_long(pci_dev->wmask + addr, wmask & 0xffffffff); pci_set_long(pci_dev->cmask + addr, 0xffffffff); } - pci_dev->io_regions[region_num].memory = memory; - pci_dev->io_regions[region_num].address_space - = type & PCI_BASE_ADDRESS_SPACE_IO - ? pci_dev->bus->address_space_io - : pci_dev->bus->address_space_mem; } static void pci_update_vga(PCIDevice *pci_dev) @@ -2510,13 +2533,13 @@ static void pci_dev_get_w64(PCIBus *b, PCIDevice *dev, void *opaque) if (limit >= base) { Range pref_range; - pref_range.begin = base; - pref_range.end = limit + 1; + range_set_bounds(&pref_range, base, limit); range_extend(range, &pref_range); } } for (i = 0; i < PCI_NUM_REGIONS; ++i) { PCIIORegion *r = &dev->io_regions[i]; + pcibus_t lob, upb; Range region_range; if (!r->size || @@ -2524,16 +2547,17 @@ static void pci_dev_get_w64(PCIBus *b, PCIDevice *dev, void *opaque) !(r->type & PCI_BASE_ADDRESS_MEM_TYPE_64)) { continue; } - region_range.begin = pci_bar_address(dev, i, r->type, r->size); - region_range.end = region_range.begin + r->size; - if (region_range.begin == PCI_BAR_UNMAPPED) { + lob = pci_bar_address(dev, i, r->type, r->size); + upb = lob + r->size - 1; + if (lob == PCI_BAR_UNMAPPED) { continue; } - region_range.begin = MAX(region_range.begin, 0x1ULL << 32); + lob = MAX(lob, 0x1ULL << 32); - if (region_range.end - 1 >= region_range.begin) { + if (upb >= lob) { + range_set_bounds(®ion_range, lob, upb); range_extend(range, ®ion_range); } } @@ -2541,7 +2565,7 @@ static void pci_dev_get_w64(PCIBus *b, PCIDevice *dev, void *opaque) void pci_bus_get_w64_range(PCIBus *bus, Range *range) { - range->begin = range->end = 0; + range_make_empty(range); pci_for_each_device_under_bus(bus, pci_dev_get_w64, range); } diff --git a/hw/pci/pcie_aer.c b/hw/pci/pcie_aer.c index e2d4e68ba3..048ce6a424 100644 --- a/hw/pci/pcie_aer.c +++ b/hw/pci/pcie_aer.c @@ -21,6 +21,7 @@ #include "qemu/osdep.h" #include "sysemu/sysemu.h" #include "qapi/qmp/types.h" +#include "qapi/qmp/qjson.h" #include "monitor/monitor.h" #include "hw/pci/pci_bridge.h" #include "hw/pci/pcie.h" diff --git a/hw/ppc/Makefile.objs b/hw/ppc/Makefile.objs index 5cc6608e50..91a3420f47 100644 --- a/hw/ppc/Makefile.objs +++ b/hw/ppc/Makefile.objs @@ -8,6 +8,7 @@ obj-$(CONFIG_PSERIES) += spapr_cpu_core.o ifeq ($(CONFIG_PCI)$(CONFIG_PSERIES)$(CONFIG_LINUX), yyy) obj-y += spapr_pci_vfio.o endif +obj-$(CONFIG_PSERIES) += spapr_rtas_ddw.o # PowerPC 4xx boards obj-y += ppc405_boards.o ppc4xx_devs.o ppc405_uc.o ppc440_bamboo.o obj-y += ppc4xx_pci.o diff --git a/hw/ppc/mac_newworld.c b/hw/ppc/mac_newworld.c index 32e88b3786..7d2510658d 100644 --- a/hw/ppc/mac_newworld.c +++ b/hw/ppc/mac_newworld.c @@ -380,6 +380,7 @@ static void ppc_core99_init(MachineState *machine) pci_bus = pci_pmac_init(pic, get_system_memory(), get_system_io()); machine_arch = ARCH_MAC99; } + object_property_set_bool(OBJECT(pci_bus), true, "realized", &error_abort); machine->usb |= defaults_enabled() && !machine->usb_disabled; diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c index 78ebd9ee38..7f33a1b2b5 100644 --- a/hw/ppc/spapr.c +++ b/hw/ppc/spapr.c @@ -1771,6 +1771,13 @@ static void ppc_spapr_init(MachineState *machine) spapr->vrma_adjust = 1; spapr->rma_size = MIN(spapr->rma_size, 0x10000000); } + + /* Actually we don't support unbounded RMA anymore since we + * added proper emulation of HV mode. The max we can get is + * 16G which also happens to be what we configure for PAPR + * mode so make sure we don't do anything bigger than that + */ + spapr->rma_size = MIN(spapr->rma_size, 0x400000000ull); } if (spapr->rma_size > node0_size) { @@ -2489,7 +2496,12 @@ DEFINE_SPAPR_MACHINE(2_7, "2.7", true); * pseries-2.6 */ #define SPAPR_COMPAT_2_6 \ - HW_COMPAT_2_6 + HW_COMPAT_2_6 \ + { \ + .driver = TYPE_SPAPR_PCI_HOST_BRIDGE,\ + .property = "ddw",\ + .value = stringify(off),\ + }, static void spapr_machine_2_6_instance_options(MachineState *machine) { diff --git a/hw/ppc/spapr_cpu_core.c b/hw/ppc/spapr_cpu_core.c index a384db5204..70b6b0b5ee 100644 --- a/hw/ppc/spapr_cpu_core.c +++ b/hw/ppc/spapr_cpu_core.c @@ -259,9 +259,9 @@ out: error_propagate(errp, local_err); } -static int spapr_cpu_core_realize_child(Object *child, void *opaque) +static void spapr_cpu_core_realize_child(Object *child, Error **errp) { - Error **errp = opaque, *local_err = NULL; + Error *local_err = NULL; sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine()); CPUState *cs = CPU(child); PowerPCCPU *cpu = POWERPC_CPU(cs); @@ -269,15 +269,14 @@ static int spapr_cpu_core_realize_child(Object *child, void *opaque) object_property_set_bool(child, true, "realized", &local_err); if (local_err) { error_propagate(errp, local_err); - return 1; + return; } spapr_cpu_init(spapr, cpu, &local_err); if (local_err) { error_propagate(errp, local_err); - return 1; + return; } - return 0; } static void spapr_cpu_core_realize(DeviceState *dev, Error **errp) @@ -287,13 +286,13 @@ static void spapr_cpu_core_realize(DeviceState *dev, Error **errp) const char *typename = object_class_get_name(sc->cpu_class); size_t size = object_type_get_instance_size(typename); Error *local_err = NULL; - Object *obj; - int i; + void *obj; + int i, j; sc->threads = g_malloc0(size * cc->nr_threads); for (i = 0; i < cc->nr_threads; i++) { char id[32]; - void *obj = sc->threads + i * size; + obj = sc->threads + i * size; object_initialize(obj, size, typename); snprintf(id, sizeof(id), "thread[%d]", i); @@ -303,12 +302,16 @@ static void spapr_cpu_core_realize(DeviceState *dev, Error **errp) } object_unref(obj); } - object_child_foreach(OBJECT(dev), spapr_cpu_core_realize_child, &local_err); - if (local_err) { - goto err; - } else { - return; + + for (j = 0; j < cc->nr_threads; j++) { + obj = sc->threads + j * size; + + spapr_cpu_core_realize_child(obj, &local_err); + if (local_err) { + goto err; + } } + return; err: while (--i >= 0) { diff --git a/hw/ppc/spapr_drc.c b/hw/ppc/spapr_drc.c index d276db3a72..26a067951c 100644 --- a/hw/ppc/spapr_drc.c +++ b/hw/ppc/spapr_drc.c @@ -300,7 +300,7 @@ static void prop_get_fdt(Object *obj, Visitor *v, const char *name, /* shouldn't ever see an FDT_END_NODE before FDT_BEGIN_NODE */ g_assert(fdt_depth > 0); visit_check_struct(v, &err); - visit_end_struct(v); + visit_end_struct(v, NULL); if (err) { error_propagate(errp, err); return; @@ -323,7 +323,7 @@ static void prop_get_fdt(Object *obj, Visitor *v, const char *name, return; } } - visit_end_list(v); + visit_end_list(v, NULL); break; } default: diff --git a/hw/ppc/spapr_hcall.c b/hw/ppc/spapr_hcall.c index e011ed4b66..73af112e1d 100644 --- a/hw/ppc/spapr_hcall.c +++ b/hw/ppc/spapr_hcall.c @@ -83,12 +83,12 @@ static target_ulong h_enter(PowerPCCPU *cpu, sPAPRMachineState *spapr, target_ulong pte_index = args[1]; target_ulong pteh = args[2]; target_ulong ptel = args[3]; - unsigned apshift, spshift; + unsigned apshift; target_ulong raddr; target_ulong index; uint64_t token; - apshift = ppc_hash64_hpte_page_shift_noslb(cpu, pteh, ptel, &spshift); + apshift = ppc_hash64_hpte_page_shift_noslb(cpu, pteh, ptel); if (!apshift) { /* Bad page size encoding */ return H_PARAMETER; diff --git a/hw/ppc/spapr_iommu.c b/hw/ppc/spapr_iommu.c index e230bacae1..d57b05d5c0 100644 --- a/hw/ppc/spapr_iommu.c +++ b/hw/ppc/spapr_iommu.c @@ -156,6 +156,16 @@ static uint64_t spapr_tce_get_min_page_size(MemoryRegion *iommu) return 1ULL << tcet->page_shift; } +static void spapr_tce_notify_started(MemoryRegion *iommu) +{ + spapr_tce_set_need_vfio(container_of(iommu, sPAPRTCETable, iommu), true); +} + +static void spapr_tce_notify_stopped(MemoryRegion *iommu) +{ + spapr_tce_set_need_vfio(container_of(iommu, sPAPRTCETable, iommu), false); +} + static int spapr_tce_table_post_load(void *opaque, int version_id) { sPAPRTCETable *tcet = SPAPR_TCE_TABLE(opaque); @@ -236,6 +246,8 @@ static const VMStateDescription vmstate_spapr_tce_table = { static MemoryRegionIOMMUOps spapr_iommu_ops = { .translate = spapr_tce_translate_iommu, .get_min_page_size = spapr_tce_get_min_page_size, + .notify_started = spapr_tce_notify_started, + .notify_stopped = spapr_tce_notify_stopped, }; static int spapr_tce_table_realize(DeviceState *dev) diff --git a/hw/ppc/spapr_pci.c b/hw/ppc/spapr_pci.c index 8c1e6b17c3..949c44fec8 100644 --- a/hw/ppc/spapr_pci.c +++ b/hw/ppc/spapr_pci.c @@ -35,6 +35,7 @@ #include "hw/ppc/spapr.h" #include "hw/pci-host/spapr.h" #include "exec/address-spaces.h" +#include "exec/ram_addr.h" #include <libfdt.h> #include "trace.h" #include "qemu/error-report.h" @@ -45,6 +46,7 @@ #include "hw/ppc/spapr_drc.h" #include "sysemu/device_tree.h" #include "sysemu/kvm.h" +#include "sysemu/hostmem.h" #include "hw/vfio/vfio.h" @@ -1087,12 +1089,6 @@ static void spapr_phb_add_pci_device(sPAPRDRConnector *drc, void *fdt = NULL; int fdt_start_offset = 0, fdt_size; - if (object_dynamic_cast(OBJECT(pdev), "vfio-pci")) { - sPAPRTCETable *tcet = spapr_tce_find_by_liobn(phb->dma_liobn); - - spapr_tce_set_need_vfio(tcet, true); - } - fdt = create_device_tree(&fdt_size); fdt_start_offset = spapr_create_pci_child_dt(phb, pdev, fdt, 0); if (!fdt_start_offset) { @@ -1310,11 +1306,14 @@ static void spapr_phb_realize(DeviceState *dev, Error **errp) PCIBus *bus; uint64_t msi_window_size = 4096; sPAPRTCETable *tcet; + const unsigned windows_supported = + sphb->ddw_enabled ? SPAPR_PCI_DMA_MAX_WINDOWS : 1; if (sphb->index != (uint32_t)-1) { hwaddr windows_base; - if ((sphb->buid != (uint64_t)-1) || (sphb->dma_liobn != (uint32_t)-1) + if ((sphb->buid != (uint64_t)-1) || (sphb->dma_liobn[0] != (uint32_t)-1) + || (sphb->dma_liobn[1] != (uint32_t)-1 && windows_supported == 2) || (sphb->mem_win_addr != (hwaddr)-1) || (sphb->io_win_addr != (hwaddr)-1)) { error_setg(errp, "Either \"index\" or other parameters must" @@ -1329,7 +1328,9 @@ static void spapr_phb_realize(DeviceState *dev, Error **errp) } sphb->buid = SPAPR_PCI_BASE_BUID + sphb->index; - sphb->dma_liobn = SPAPR_PCI_LIOBN(sphb->index, 0); + for (i = 0; i < windows_supported; ++i) { + sphb->dma_liobn[i] = SPAPR_PCI_LIOBN(sphb->index, i); + } windows_base = SPAPR_PCI_WINDOW_BASE + sphb->index * SPAPR_PCI_WINDOW_SPACING; @@ -1342,8 +1343,9 @@ static void spapr_phb_realize(DeviceState *dev, Error **errp) return; } - if (sphb->dma_liobn == (uint32_t)-1) { - error_setg(errp, "LIOBN not specified for PHB"); + if ((sphb->dma_liobn[0] == (uint32_t)-1) || + ((sphb->dma_liobn[1] == (uint32_t)-1) && (windows_supported > 1))) { + error_setg(errp, "LIOBN(s) not specified for PHB"); return; } @@ -1462,16 +1464,18 @@ static void spapr_phb_realize(DeviceState *dev, Error **errp) } } - tcet = spapr_tce_new_table(DEVICE(sphb), sphb->dma_liobn); - if (!tcet) { - error_setg(errp, "Unable to create TCE table for %s", - sphb->dtbusname); - return; + /* DMA setup */ + for (i = 0; i < windows_supported; ++i) { + tcet = spapr_tce_new_table(DEVICE(sphb), sphb->dma_liobn[i]); + if (!tcet) { + error_setg(errp, "Creating window#%d failed for %s", + i, sphb->dtbusname); + return; + } + memory_region_add_subregion_overlap(&sphb->iommu_root, 0, + spapr_tce_get_iommu(tcet), 0); } - memory_region_add_subregion_overlap(&sphb->iommu_root, 0, - spapr_tce_get_iommu(tcet), 0); - sphb->msi = g_hash_table_new_full(g_int_hash, g_int_equal, g_free, g_free); } @@ -1488,13 +1492,19 @@ static int spapr_phb_children_reset(Object *child, void *opaque) void spapr_phb_dma_reset(sPAPRPHBState *sphb) { - sPAPRTCETable *tcet = spapr_tce_find_by_liobn(sphb->dma_liobn); + int i; + sPAPRTCETable *tcet; - if (tcet && tcet->nb_table) { - spapr_tce_table_disable(tcet); + for (i = 0; i < SPAPR_PCI_DMA_MAX_WINDOWS; ++i) { + tcet = spapr_tce_find_by_liobn(sphb->dma_liobn[i]); + + if (tcet && tcet->nb_table) { + spapr_tce_table_disable(tcet); + } } /* Register default 32bit DMA window */ + tcet = spapr_tce_find_by_liobn(sphb->dma_liobn[0]); spapr_tce_table_enable(tcet, SPAPR_TCE_PAGE_SHIFT, sphb->dma_win_addr, sphb->dma_win_size >> SPAPR_TCE_PAGE_SHIFT); } @@ -1516,7 +1526,8 @@ static void spapr_phb_reset(DeviceState *qdev) static Property spapr_phb_properties[] = { DEFINE_PROP_UINT32("index", sPAPRPHBState, index, -1), DEFINE_PROP_UINT64("buid", sPAPRPHBState, buid, -1), - DEFINE_PROP_UINT32("liobn", sPAPRPHBState, dma_liobn, -1), + DEFINE_PROP_UINT32("liobn", sPAPRPHBState, dma_liobn[0], -1), + DEFINE_PROP_UINT32("liobn64", sPAPRPHBState, dma_liobn[1], -1), DEFINE_PROP_UINT64("mem_win_addr", sPAPRPHBState, mem_win_addr, -1), DEFINE_PROP_UINT64("mem_win_size", sPAPRPHBState, mem_win_size, SPAPR_PCI_MMIO_WIN_SIZE), @@ -1528,6 +1539,11 @@ static Property spapr_phb_properties[] = { /* Default DMA window is 0..1GB */ DEFINE_PROP_UINT64("dma_win_addr", sPAPRPHBState, dma_win_addr, 0), DEFINE_PROP_UINT64("dma_win_size", sPAPRPHBState, dma_win_size, 0x40000000), + DEFINE_PROP_UINT64("dma64_win_addr", sPAPRPHBState, dma64_win_addr, + 0x800000000000000ULL), + DEFINE_PROP_BOOL("ddw", sPAPRPHBState, ddw_enabled, true), + DEFINE_PROP_UINT64("pgsz", sPAPRPHBState, page_size_mask, + (1ULL << 12) | (1ULL << 16)), DEFINE_PROP_END_OF_LIST(), }; @@ -1604,7 +1620,7 @@ static const VMStateDescription vmstate_spapr_pci = { .post_load = spapr_pci_post_load, .fields = (VMStateField[]) { VMSTATE_UINT64_EQUAL(buid, sPAPRPHBState), - VMSTATE_UINT32_EQUAL(dma_liobn, sPAPRPHBState), + VMSTATE_UINT32_EQUAL(dma_liobn[0], sPAPRPHBState), VMSTATE_UINT64_EQUAL(mem_win_addr, sPAPRPHBState), VMSTATE_UINT64_EQUAL(mem_win_size, sPAPRPHBState), VMSTATE_UINT64_EQUAL(io_win_addr, sPAPRPHBState), @@ -1780,6 +1796,15 @@ int spapr_populate_pci_dt(sPAPRPHBState *phb, uint32_t interrupt_map_mask[] = { cpu_to_be32(b_ddddd(-1)|b_fff(0)), 0x0, 0x0, cpu_to_be32(-1)}; uint32_t interrupt_map[PCI_SLOT_MAX * PCI_NUM_PINS][7]; + uint32_t ddw_applicable[] = { + cpu_to_be32(RTAS_IBM_QUERY_PE_DMA_WINDOW), + cpu_to_be32(RTAS_IBM_CREATE_PE_DMA_WINDOW), + cpu_to_be32(RTAS_IBM_REMOVE_PE_DMA_WINDOW) + }; + uint32_t ddw_extensions[] = { + cpu_to_be32(1), + cpu_to_be32(RTAS_IBM_RESET_PE_DMA_WINDOW) + }; sPAPRTCETable *tcet; PCIBus *bus = PCI_HOST_BRIDGE(phb)->bus; sPAPRFDT s_fdt; @@ -1804,6 +1829,14 @@ int spapr_populate_pci_dt(sPAPRPHBState *phb, _FDT(fdt_setprop_cell(fdt, bus_off, "ibm,pci-config-space-type", 0x1)); _FDT(fdt_setprop_cell(fdt, bus_off, "ibm,pe-total-#msi", XICS_IRQS_SPAPR)); + /* Dynamic DMA window */ + if (phb->ddw_enabled) { + _FDT(fdt_setprop(fdt, bus_off, "ibm,ddw-applicable", &ddw_applicable, + sizeof(ddw_applicable))); + _FDT(fdt_setprop(fdt, bus_off, "ibm,ddw-extensions", + &ddw_extensions, sizeof(ddw_extensions))); + } + /* Build the interrupt-map, this must matches what is done * in pci_spapr_map_irq */ @@ -1827,7 +1860,7 @@ int spapr_populate_pci_dt(sPAPRPHBState *phb, _FDT(fdt_setprop(fdt, bus_off, "interrupt-map", &interrupt_map, sizeof(interrupt_map))); - tcet = spapr_tce_find_by_liobn(phb->dma_liobn); + tcet = spapr_tce_find_by_liobn(phb->dma_liobn[0]); if (!tcet) { return -1; } diff --git a/hw/ppc/spapr_rtas_ddw.c b/hw/ppc/spapr_rtas_ddw.c new file mode 100644 index 0000000000..177dcffc9b --- /dev/null +++ b/hw/ppc/spapr_rtas_ddw.c @@ -0,0 +1,295 @@ +/* + * QEMU sPAPR Dynamic DMA windows support + * + * Copyright (c) 2015 Alexey Kardashevskiy, IBM Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see <http://www.gnu.org/licenses/>. + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "qemu/error-report.h" +#include "hw/ppc/spapr.h" +#include "hw/pci-host/spapr.h" +#include "trace.h" + +static int spapr_phb_get_active_win_num_cb(Object *child, void *opaque) +{ + sPAPRTCETable *tcet; + + tcet = (sPAPRTCETable *) object_dynamic_cast(child, TYPE_SPAPR_TCE_TABLE); + if (tcet && tcet->nb_table) { + ++*(unsigned *)opaque; + } + return 0; +} + +static unsigned spapr_phb_get_active_win_num(sPAPRPHBState *sphb) +{ + unsigned ret = 0; + + object_child_foreach(OBJECT(sphb), spapr_phb_get_active_win_num_cb, &ret); + + return ret; +} + +static int spapr_phb_get_free_liobn_cb(Object *child, void *opaque) +{ + sPAPRTCETable *tcet; + + tcet = (sPAPRTCETable *) object_dynamic_cast(child, TYPE_SPAPR_TCE_TABLE); + if (tcet && !tcet->nb_table) { + *(uint32_t *)opaque = tcet->liobn; + return 1; + } + return 0; +} + +static unsigned spapr_phb_get_free_liobn(sPAPRPHBState *sphb) +{ + uint32_t liobn = 0; + + object_child_foreach(OBJECT(sphb), spapr_phb_get_free_liobn_cb, &liobn); + + return liobn; +} + +static uint32_t spapr_page_mask_to_query_mask(uint64_t page_mask) +{ + int i; + uint32_t mask = 0; + const struct { int shift; uint32_t mask; } masks[] = { + { 12, RTAS_DDW_PGSIZE_4K }, + { 16, RTAS_DDW_PGSIZE_64K }, + { 24, RTAS_DDW_PGSIZE_16M }, + { 25, RTAS_DDW_PGSIZE_32M }, + { 26, RTAS_DDW_PGSIZE_64M }, + { 27, RTAS_DDW_PGSIZE_128M }, + { 28, RTAS_DDW_PGSIZE_256M }, + { 34, RTAS_DDW_PGSIZE_16G }, + }; + + for (i = 0; i < ARRAY_SIZE(masks); ++i) { + if (page_mask & (1ULL << masks[i].shift)) { + mask |= masks[i].mask; + } + } + + return mask; +} + +static void rtas_ibm_query_pe_dma_window(PowerPCCPU *cpu, + sPAPRMachineState *spapr, + uint32_t token, uint32_t nargs, + target_ulong args, + uint32_t nret, target_ulong rets) +{ + sPAPRPHBState *sphb; + uint64_t buid, max_window_size; + uint32_t avail, addr, pgmask = 0; + MachineState *machine = MACHINE(spapr); + + if ((nargs != 3) || (nret != 5)) { + goto param_error_exit; + } + + buid = ((uint64_t)rtas_ld(args, 1) << 32) | rtas_ld(args, 2); + addr = rtas_ld(args, 0); + sphb = spapr_pci_find_phb(spapr, buid); + if (!sphb || !sphb->ddw_enabled) { + goto param_error_exit; + } + + /* Translate page mask to LoPAPR format */ + pgmask = spapr_page_mask_to_query_mask(sphb->page_size_mask); + + /* + * This is "Largest contiguous block of TCEs allocated specifically + * for (that is, are reserved for) this PE". + * Return the maximum number as maximum supported RAM size was in 4K pages. + */ + if (machine->ram_size == machine->maxram_size) { + max_window_size = machine->ram_size; + } else { + MemoryHotplugState *hpms = &spapr->hotplug_memory; + + max_window_size = hpms->base + memory_region_size(&hpms->mr); + } + + avail = SPAPR_PCI_DMA_MAX_WINDOWS - spapr_phb_get_active_win_num(sphb); + + rtas_st(rets, 0, RTAS_OUT_SUCCESS); + rtas_st(rets, 1, avail); + rtas_st(rets, 2, max_window_size >> SPAPR_TCE_PAGE_SHIFT); + rtas_st(rets, 3, pgmask); + rtas_st(rets, 4, 0); /* DMA migration mask, not supported */ + + trace_spapr_iommu_ddw_query(buid, addr, avail, max_window_size, pgmask); + return; + +param_error_exit: + rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); +} + +static void rtas_ibm_create_pe_dma_window(PowerPCCPU *cpu, + sPAPRMachineState *spapr, + uint32_t token, uint32_t nargs, + target_ulong args, + uint32_t nret, target_ulong rets) +{ + sPAPRPHBState *sphb; + sPAPRTCETable *tcet = NULL; + uint32_t addr, page_shift, window_shift, liobn; + uint64_t buid, win_addr; + int windows; + + if ((nargs != 5) || (nret != 4)) { + goto param_error_exit; + } + + buid = ((uint64_t)rtas_ld(args, 1) << 32) | rtas_ld(args, 2); + addr = rtas_ld(args, 0); + sphb = spapr_pci_find_phb(spapr, buid); + if (!sphb || !sphb->ddw_enabled) { + goto param_error_exit; + } + + page_shift = rtas_ld(args, 3); + window_shift = rtas_ld(args, 4); + liobn = spapr_phb_get_free_liobn(sphb); + windows = spapr_phb_get_active_win_num(sphb); + + if (!(sphb->page_size_mask & (1ULL << page_shift)) || + (window_shift < page_shift)) { + goto param_error_exit; + } + + if (!liobn || !sphb->ddw_enabled || windows == SPAPR_PCI_DMA_MAX_WINDOWS) { + goto hw_error_exit; + } + + tcet = spapr_tce_find_by_liobn(liobn); + if (!tcet) { + goto hw_error_exit; + } + + win_addr = (windows == 0) ? sphb->dma_win_addr : sphb->dma64_win_addr; + spapr_tce_table_enable(tcet, page_shift, win_addr, + 1ULL << (window_shift - page_shift)); + if (!tcet->nb_table) { + goto hw_error_exit; + } + + trace_spapr_iommu_ddw_create(buid, addr, 1ULL << page_shift, + 1ULL << window_shift, tcet->bus_offset, liobn); + + rtas_st(rets, 0, RTAS_OUT_SUCCESS); + rtas_st(rets, 1, liobn); + rtas_st(rets, 2, tcet->bus_offset >> 32); + rtas_st(rets, 3, tcet->bus_offset & ((uint32_t) -1)); + + return; + +hw_error_exit: + rtas_st(rets, 0, RTAS_OUT_HW_ERROR); + return; + +param_error_exit: + rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); +} + +static void rtas_ibm_remove_pe_dma_window(PowerPCCPU *cpu, + sPAPRMachineState *spapr, + uint32_t token, uint32_t nargs, + target_ulong args, + uint32_t nret, target_ulong rets) +{ + sPAPRPHBState *sphb; + sPAPRTCETable *tcet; + uint32_t liobn; + + if ((nargs != 1) || (nret != 1)) { + goto param_error_exit; + } + + liobn = rtas_ld(args, 0); + tcet = spapr_tce_find_by_liobn(liobn); + if (!tcet) { + goto param_error_exit; + } + + sphb = SPAPR_PCI_HOST_BRIDGE(OBJECT(tcet)->parent); + if (!sphb || !sphb->ddw_enabled || !tcet->nb_table) { + goto param_error_exit; + } + + spapr_tce_table_disable(tcet); + trace_spapr_iommu_ddw_remove(liobn); + + rtas_st(rets, 0, RTAS_OUT_SUCCESS); + return; + +param_error_exit: + rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); +} + +static void rtas_ibm_reset_pe_dma_window(PowerPCCPU *cpu, + sPAPRMachineState *spapr, + uint32_t token, uint32_t nargs, + target_ulong args, + uint32_t nret, target_ulong rets) +{ + sPAPRPHBState *sphb; + uint64_t buid; + uint32_t addr; + + if ((nargs != 3) || (nret != 1)) { + goto param_error_exit; + } + + buid = ((uint64_t)rtas_ld(args, 1) << 32) | rtas_ld(args, 2); + addr = rtas_ld(args, 0); + sphb = spapr_pci_find_phb(spapr, buid); + if (!sphb || !sphb->ddw_enabled) { + goto param_error_exit; + } + + spapr_phb_dma_reset(sphb); + trace_spapr_iommu_ddw_reset(buid, addr); + + rtas_st(rets, 0, RTAS_OUT_SUCCESS); + + return; + +param_error_exit: + rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); +} + +static void spapr_rtas_ddw_init(void) +{ + spapr_rtas_register(RTAS_IBM_QUERY_PE_DMA_WINDOW, + "ibm,query-pe-dma-window", + rtas_ibm_query_pe_dma_window); + spapr_rtas_register(RTAS_IBM_CREATE_PE_DMA_WINDOW, + "ibm,create-pe-dma-window", + rtas_ibm_create_pe_dma_window); + spapr_rtas_register(RTAS_IBM_REMOVE_PE_DMA_WINDOW, + "ibm,remove-pe-dma-window", + rtas_ibm_remove_pe_dma_window); + spapr_rtas_register(RTAS_IBM_RESET_PE_DMA_WINDOW, + "ibm,reset-pe-dma-window", + rtas_ibm_reset_pe_dma_window); +} + +type_init(spapr_rtas_ddw_init) diff --git a/hw/ppc/trace-events b/hw/ppc/trace-events index 6da713547f..900679bc9d 100644 --- a/hw/ppc/trace-events +++ b/hw/ppc/trace-events @@ -30,6 +30,10 @@ spapr_iommu_xlate(uint64_t liobn, uint64_t ioba, uint64_t tce, unsigned perm, un spapr_iommu_new_table(uint64_t liobn, void *table, int fd) "liobn=%"PRIx64" table=%p fd=%d" spapr_iommu_pre_save(uint64_t liobn, uint32_t nb, uint64_t offs, uint32_t ps) "liobn=%"PRIx64" %"PRIx32" bus_offset=%"PRIx64" ps=%"PRIu32 spapr_iommu_post_load(uint64_t liobn, uint32_t pre_nb, uint32_t post_nb, uint64_t offs, uint32_t ps) "liobn=%"PRIx64" %"PRIx32" => %"PRIx32" bus_offset=%"PRIx64" ps=%"PRIu32 +spapr_iommu_ddw_query(uint64_t buid, uint32_t cfgaddr, unsigned wa, uint64_t win_size, uint32_t pgmask) "buid=%"PRIx64" addr=%"PRIx32", %u windows available, max window size=%"PRIx64", mask=%"PRIx32 +spapr_iommu_ddw_create(uint64_t buid, uint32_t cfgaddr, uint64_t pg_size, uint64_t req_size, uint64_t start, uint32_t liobn) "buid=%"PRIx64" addr=%"PRIx32", page size=0x%"PRIx64", requested=0x%"PRIx64", start addr=%"PRIx64", liobn=%"PRIx32 +spapr_iommu_ddw_remove(uint32_t liobn) "liobn=%"PRIx32 +spapr_iommu_ddw_reset(uint64_t buid, uint32_t cfgaddr) "buid=%"PRIx64" addr=%"PRIx32 # hw/ppc/ppc.c ppc_tb_adjust(uint64_t offs1, uint64_t offs2, int64_t diff, int64_t seconds) "adjusted from 0x%"PRIx64" to 0x%"PRIx64", diff %"PRId64" (%"PRId64"s)" diff --git a/hw/s390x/Makefile.objs b/hw/s390x/Makefile.objs index 220361782d..41ac4ec325 100644 --- a/hw/s390x/Makefile.objs +++ b/hw/s390x/Makefile.objs @@ -8,6 +8,8 @@ obj-y += ipl.o obj-y += css.o obj-y += s390-virtio-ccw.o obj-y += virtio-ccw.o +obj-y += css-bridge.o +obj-y += ccw-device.o obj-y += s390-pci-bus.o s390-pci-inst.o obj-y += s390-skeys.o obj-$(CONFIG_KVM) += s390-skeys-kvm.o diff --git a/hw/s390x/ccw-device.c b/hw/s390x/ccw-device.c new file mode 100644 index 0000000000..28ea20440e --- /dev/null +++ b/hw/s390x/ccw-device.c @@ -0,0 +1,27 @@ +/* + * Common device infrastructure for devices in the virtual css + * + * Copyright 2016 IBM Corp. + * Author(s): Jing Liu <liujbjl@linux.vnet.ibm.com> + * + * This work is licensed under the terms of the GNU GPL, version 2 or (at + * your option) any later version. See the COPYING file in the top-level + * directory. + */ +#include "qemu/osdep.h" +#include "ccw-device.h" + +static const TypeInfo ccw_device_info = { + .name = TYPE_CCW_DEVICE, + .parent = TYPE_DEVICE, + .instance_size = sizeof(CcwDevice), + .class_size = sizeof(CCWDeviceClass), + .abstract = true, +}; + +static void ccw_device_register(void) +{ + type_register_static(&ccw_device_info); +} + +type_init(ccw_device_register) diff --git a/hw/s390x/ccw-device.h b/hw/s390x/ccw-device.h new file mode 100644 index 0000000000..59ba01b6c5 --- /dev/null +++ b/hw/s390x/ccw-device.h @@ -0,0 +1,43 @@ +/* + * Common device infrastructure for devices in the virtual css + * + * Copyright 2016 IBM Corp. + * Author(s): Jing Liu <liujbjl@linux.vnet.ibm.com> + * + * This work is licensed under the terms of the GNU GPL, version 2 or (at + * your option) any later version. See the COPYING file in the top-level + * directory. + */ + +#ifndef HW_S390X_CCW_DEVICE_H +#define HW_S390X_CCW_DEVICE_H +#include "qom/object.h" +#include "hw/qdev-core.h" +#include "hw/s390x/css.h" + +typedef struct CcwDevice { + DeviceState parent_obj; + SubchDev *sch; + /* <cssid>.<ssid>.<device number> */ + CssDevId bus_id; +} CcwDevice; + +typedef struct CCWDeviceClass { + DeviceClass parent_class; + void (*unplug)(HotplugHandler *, DeviceState *, Error **); +} CCWDeviceClass; + +static inline CcwDevice *to_ccw_dev_fast(DeviceState *d) +{ + return container_of(d, CcwDevice, parent_obj); +} + +#define TYPE_CCW_DEVICE "ccw-device" + +#define CCW_DEVICE(obj) OBJECT_CHECK(CcwDevice, (obj), TYPE_CCW_DEVICE) +#define CCW_DEVICE_GET_CLASS(obj) \ + OBJECT_GET_CLASS(CCWDeviceClass, (obj), TYPE_CCW_DEVICE) +#define CCW_DEVICE_CLASS(klass) \ + OBJECT_CLASS_CHECK(CCWDeviceClass, (klass), TYPE_CCW_DEVICE) + +#endif diff --git a/hw/s390x/css-bridge.c b/hw/s390x/css-bridge.c new file mode 100644 index 0000000000..e4c24e21f3 --- /dev/null +++ b/hw/s390x/css-bridge.c @@ -0,0 +1,124 @@ +/* + * css bridge implementation + * + * Copyright 2012,2016 IBM Corp. + * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> + * Pierre Morel <pmorel@linux.vnet.ibm.com> + * + * This work is licensed under the terms of the GNU GPL, version 2 or (at + * your option) any later version. See the COPYING file in the top-level + * directory. + */ +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/hotplug.h" +#include "hw/sysbus.h" +#include "qemu/bitops.h" +#include "hw/s390x/css.h" +#include "ccw-device.h" +#include "hw/s390x/css-bridge.h" + +/* + * Invoke device-specific unplug handler, disable the subchannel + * (including sending a channel report to the guest) and remove the + * device from the virtual css bus. + */ +static void ccw_device_unplug(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + CcwDevice *ccw_dev = CCW_DEVICE(dev); + CCWDeviceClass *k = CCW_DEVICE_GET_CLASS(ccw_dev); + SubchDev *sch = ccw_dev->sch; + Error *err = NULL; + + if (k->unplug) { + k->unplug(hotplug_dev, dev, &err); + if (err) { + error_propagate(errp, err); + return; + } + } + + /* + * We should arrive here only for device_del, since we don't support + * direct hot(un)plug of channels. + */ + assert(sch != NULL); + /* Subchannel is now disabled and no longer valid. */ + sch->curr_status.pmcw.flags &= ~(PMCW_FLAGS_MASK_ENA | + PMCW_FLAGS_MASK_DNV); + + css_generate_sch_crws(sch->cssid, sch->ssid, sch->schid, 1, 0); + + object_unparent(OBJECT(dev)); +} + +static void virtual_css_bus_reset(BusState *qbus) +{ + /* This should actually be modelled via the generic css */ + css_reset(); +} + +static void virtual_css_bus_class_init(ObjectClass *klass, void *data) +{ + BusClass *k = BUS_CLASS(klass); + + k->reset = virtual_css_bus_reset; +} + +static const TypeInfo virtual_css_bus_info = { + .name = TYPE_VIRTUAL_CSS_BUS, + .parent = TYPE_BUS, + .instance_size = sizeof(VirtualCssBus), + .class_init = virtual_css_bus_class_init, +}; + +VirtualCssBus *virtual_css_bus_init(void) +{ + VirtualCssBus *cbus; + BusState *bus; + DeviceState *dev; + + /* Create bridge device */ + dev = qdev_create(NULL, TYPE_VIRTUAL_CSS_BRIDGE); + qdev_init_nofail(dev); + + /* Create bus on bridge device */ + bus = qbus_create(TYPE_VIRTUAL_CSS_BUS, dev, "virtual-css"); + cbus = VIRTUAL_CSS_BUS(bus); + + /* Enable hotplugging */ + qbus_set_hotplug_handler(bus, dev, &error_abort); + + return cbus; + } + +/***************** Virtual-css Bus Bridge Device ********************/ + +static void virtual_css_bridge_class_init(ObjectClass *klass, void *data) +{ + HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + + hc->unplug = ccw_device_unplug; + set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); +} + +static const TypeInfo virtual_css_bridge_info = { + .name = TYPE_VIRTUAL_CSS_BRIDGE, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(SysBusDevice), + .class_init = virtual_css_bridge_class_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_HOTPLUG_HANDLER }, + { } + } +}; + +static void virtual_css_register(void) +{ + type_register_static(&virtual_css_bridge_info); + type_register_static(&virtual_css_bus_info); +} + +type_init(virtual_css_register) diff --git a/hw/s390x/css.c b/hw/s390x/css.c index 76668814da..54991f5d6f 100644 --- a/hw/s390x/css.c +++ b/hw/s390x/css.c @@ -1340,6 +1340,116 @@ SubchDev *css_find_subch(uint8_t m, uint8_t cssid, uint8_t ssid, uint16_t schid) return channel_subsys.css[real_cssid]->sch_set[ssid]->sch[schid]; } +/** + * Return free device number in subchannel set. + * + * Return index of the first free device number in the subchannel set + * identified by @p cssid and @p ssid, beginning the search at @p + * start and wrapping around at MAX_DEVNO. Return a value exceeding + * MAX_SCHID if there are no free device numbers in the subchannel + * set. + */ +static uint32_t css_find_free_devno(uint8_t cssid, uint8_t ssid, + uint16_t start) +{ + uint32_t round; + + for (round = 0; round <= MAX_DEVNO; round++) { + uint16_t devno = (start + round) % MAX_DEVNO; + + if (!css_devno_used(cssid, ssid, devno)) { + return devno; + } + } + return MAX_DEVNO + 1; +} + +/** + * Return first free subchannel (id) in subchannel set. + * + * Return index of the first free subchannel in the subchannel set + * identified by @p cssid and @p ssid, if there is any. Return a value + * exceeding MAX_SCHID if there are no free subchannels in the + * subchannel set. + */ +static uint32_t css_find_free_subch(uint8_t cssid, uint8_t ssid) +{ + uint32_t schid; + + for (schid = 0; schid <= MAX_SCHID; schid++) { + if (!css_find_subch(1, cssid, ssid, schid)) { + return schid; + } + } + return MAX_SCHID + 1; +} + +/** + * Return first free subchannel (id) in subchannel set for a device number + * + * Verify the device number @p devno is not used yet in the subchannel + * set identified by @p cssid and @p ssid. Set @p schid to the index + * of the first free subchannel in the subchannel set, if there is + * any. Return true if everything succeeded and false otherwise. + */ +static bool css_find_free_subch_for_devno(uint8_t cssid, uint8_t ssid, + uint16_t devno, uint16_t *schid, + Error **errp) +{ + uint32_t free_schid; + + assert(schid); + if (css_devno_used(cssid, ssid, devno)) { + error_setg(errp, "Device %x.%x.%04x already exists", + cssid, ssid, devno); + return false; + } + free_schid = css_find_free_subch(cssid, ssid); + if (free_schid > MAX_SCHID) { + error_setg(errp, "No free subchannel found for %x.%x.%04x", + cssid, ssid, devno); + return false; + } + *schid = free_schid; + return true; +} + +/** + * Return first free subchannel (id) and device number + * + * Locate the first free subchannel and first free device number in + * any of the subchannel sets of the channel subsystem identified by + * @p cssid. Return false if no free subchannel / device number could + * be found. Otherwise set @p ssid, @p devno and @p schid to identify + * the available subchannel and device number and return true. + * + * May modify @p ssid, @p devno and / or @p schid even if no free + * subchannel / device number could be found. + */ +static bool css_find_free_subch_and_devno(uint8_t cssid, uint8_t *ssid, + uint16_t *devno, uint16_t *schid, + Error **errp) +{ + uint32_t free_schid, free_devno; + + assert(ssid && devno && schid); + for (*ssid = 0; *ssid <= MAX_SSID; (*ssid)++) { + free_schid = css_find_free_subch(cssid, *ssid); + if (free_schid > MAX_SCHID) { + continue; + } + free_devno = css_find_free_devno(cssid, *ssid, free_schid); + if (free_devno > MAX_DEVNO) { + continue; + } + *schid = free_schid; + *devno = free_devno; + return true; + } + error_setg(errp, "Virtual channel subsystem is full!"); + return false; +} + bool css_subch_visible(SubchDev *sch) { if (sch->ssid > channel_subsys.max_ssid) { @@ -1762,3 +1872,36 @@ PropertyInfo css_devid_propinfo = { .get = get_css_devid, .set = set_css_devid, }; + +SubchDev *css_create_virtual_sch(CssDevId bus_id, Error **errp) +{ + uint16_t schid = 0; + SubchDev *sch; + + if (bus_id.valid) { + /* Enforce use of virtual cssid. */ + if (bus_id.cssid != VIRTUAL_CSSID) { + error_setg(errp, "cssid %hhx not valid for virtual devices", + bus_id.cssid); + return NULL; + } + if (!css_find_free_subch_for_devno(bus_id.cssid, bus_id.ssid, + bus_id.devid, &schid, errp)) { + return NULL; + } + } else { + bus_id.cssid = VIRTUAL_CSSID; + if (!css_find_free_subch_and_devno(bus_id.cssid, &bus_id.ssid, + &bus_id.devid, &schid, errp)) { + return NULL; + } + } + + sch = g_malloc0(sizeof(*sch)); + sch->cssid = bus_id.cssid; + sch->ssid = bus_id.ssid; + sch->devno = bus_id.devid; + sch->schid = schid; + css_subch_assign(sch->cssid, sch->ssid, schid, sch->devno, sch); + return sch; +} diff --git a/hw/s390x/ipl.c b/hw/s390x/ipl.c index e6bf7cf7c0..2e2664f22e 100644 --- a/hw/s390x/ipl.c +++ b/hw/s390x/ipl.c @@ -69,8 +69,8 @@ static const VMStateDescription vmstate_ipl = { .version_id = 0, .minimum_version_id = 0, .fields = (VMStateField[]) { - VMSTATE_UINT64(start_addr, S390IPLState), - VMSTATE_UINT64(bios_start_addr, S390IPLState), + VMSTATE_UINT64(compat_start_addr, S390IPLState), + VMSTATE_UINT64(compat_bios_start_addr, S390IPLState), VMSTATE_STRUCT(iplb, S390IPLState, 0, vmstate_iplb, IplParameterBlock), VMSTATE_BOOL(iplb_valid, S390IPLState), VMSTATE_UINT8(cssid, S390IPLState), @@ -192,6 +192,13 @@ static void s390_ipl_realize(DeviceState *dev, Error **errp) stq_p(rom_ptr(INITRD_PARM_SIZE), initrd_size); } } + /* + * Don't ever use the migrated values, they could come from a different + * BIOS and therefore don't work. But still migrate the values, so + * QEMUs relying on it don't break. + */ + ipl->compat_start_addr = ipl->start_addr; + ipl->compat_bios_start_addr = ipl->bios_start_addr; qemu_register_reset(qdev_reset_all_fn, dev); error: error_propagate(errp, err); @@ -214,10 +221,14 @@ static bool s390_gen_initial_iplb(S390IPLState *ipl) dev_st = get_boot_device(0); if (dev_st) { - VirtioCcwDevice *ccw_dev = (VirtioCcwDevice *) object_dynamic_cast( - OBJECT(qdev_get_parent_bus(dev_st)->parent), + VirtioCcwDevice *virtio_ccw_dev = (VirtioCcwDevice *) + object_dynamic_cast(OBJECT(qdev_get_parent_bus(dev_st)->parent), TYPE_VIRTIO_CCW_DEVICE); - if (ccw_dev) { + SCSIDevice *sd = (SCSIDevice *) object_dynamic_cast(OBJECT(dev_st), + TYPE_SCSI_DEVICE); + if (virtio_ccw_dev) { + CcwDevice *ccw_dev = CCW_DEVICE(virtio_ccw_dev); + ipl->iplb.len = cpu_to_be32(S390_IPLB_MIN_CCW_LEN); ipl->iplb.blk0_len = cpu_to_be32(S390_IPLB_MIN_CCW_LEN - S390_IPLB_HEADER_LEN); @@ -225,6 +236,22 @@ static bool s390_gen_initial_iplb(S390IPLState *ipl) ipl->iplb.ccw.devno = cpu_to_be16(ccw_dev->sch->devno); ipl->iplb.ccw.ssid = ccw_dev->sch->ssid & 3; return true; + } else if (sd) { + SCSIBus *bus = scsi_bus_from_device(sd); + VirtIOSCSI *vdev = container_of(bus, VirtIOSCSI, bus); + VirtIOSCSICcw *scsi_ccw = container_of(vdev, VirtIOSCSICcw, vdev); + CcwDevice *ccw_dev = CCW_DEVICE(scsi_ccw); + + ipl->iplb.len = cpu_to_be32(S390_IPLB_MIN_QEMU_SCSI_LEN); + ipl->iplb.blk0_len = + cpu_to_be32(S390_IPLB_MIN_QEMU_SCSI_LEN - S390_IPLB_HEADER_LEN); + ipl->iplb.pbt = S390_IPL_TYPE_QEMU_SCSI; + ipl->iplb.scsi.lun = cpu_to_be32(sd->lun); + ipl->iplb.scsi.target = cpu_to_be16(sd->id); + ipl->iplb.scsi.channel = cpu_to_be16(sd->channel); + ipl->iplb.scsi.devno = cpu_to_be16(ccw_dev->sch->devno); + ipl->iplb.scsi.ssid = ccw_dev->sch->ssid & 3; + return true; } } diff --git a/hw/s390x/ipl.h b/hw/s390x/ipl.h index 9aa4d942a7..c89109585a 100644 --- a/hw/s390x/ipl.h +++ b/hw/s390x/ipl.h @@ -46,6 +46,16 @@ struct IplBlockFcp { } QEMU_PACKED; typedef struct IplBlockFcp IplBlockFcp; +struct IplBlockQemuScsi { + uint32_t lun; + uint16_t target; + uint16_t channel; + uint8_t reserved0[77]; + uint8_t ssid; + uint16_t devno; +} QEMU_PACKED; +typedef struct IplBlockQemuScsi IplBlockQemuScsi; + union IplParameterBlock { struct { uint32_t len; @@ -59,6 +69,7 @@ union IplParameterBlock { union { IplBlockCcw ccw; IplBlockFcp fcp; + IplBlockQemuScsi scsi; }; } QEMU_PACKED; struct { @@ -82,7 +93,9 @@ struct S390IPLState { /*< private >*/ DeviceState parent_obj; uint64_t start_addr; + uint64_t compat_start_addr; uint64_t bios_start_addr; + uint64_t compat_bios_start_addr; bool enforce_bios; IplParameterBlock iplb; bool iplb_valid; @@ -102,10 +115,12 @@ typedef struct S390IPLState S390IPLState; #define S390_IPL_TYPE_FCP 0x00 #define S390_IPL_TYPE_CCW 0x02 +#define S390_IPL_TYPE_QEMU_SCSI 0xff #define S390_IPLB_HEADER_LEN 8 #define S390_IPLB_MIN_CCW_LEN 200 #define S390_IPLB_MIN_FCP_LEN 384 +#define S390_IPLB_MIN_QEMU_SCSI_LEN 200 static inline bool iplb_valid_len(IplParameterBlock *iplb) { diff --git a/hw/s390x/s390-pci-bus.c b/hw/s390x/s390-pci-bus.c index a77c10ce9e..640a4eaa61 100644 --- a/hw/s390x/s390-pci-bus.c +++ b/hw/s390x/s390-pci-bus.c @@ -12,6 +12,8 @@ */ #include "qemu/osdep.h" +#include "qapi/error.h" +#include "qapi/visitor.h" #include "qemu-common.h" #include "cpu.h" #include "s390-pci-bus.h" @@ -29,6 +31,19 @@ do { } while (0) #endif +static S390pciState *s390_get_phb(void) +{ + static S390pciState *phb; + + if (!phb) { + phb = S390_PCI_HOST_BRIDGE( + object_resolve_path(TYPE_S390_PCI_HOST_BRIDGE, NULL)); + assert(phb != NULL); + } + + return phb; +} + int chsc_sei_nt2_get_event(void *res) { ChscSeiNt2Res *nt2_res = (ChscSeiNt2Res *)res; @@ -36,12 +51,7 @@ int chsc_sei_nt2_get_event(void *res) PciCcdfErr *eccdf; int rc = 1; SeiContainer *sei_cont; - S390pciState *s = S390_PCI_HOST_BRIDGE( - object_resolve_path(TYPE_S390_PCI_HOST_BRIDGE, NULL)); - - if (!s) { - return rc; - } + S390pciState *s = s390_get_phb(); sei_cont = QTAILQ_FIRST(&s->pending_sei); if (sei_cont) { @@ -76,30 +86,40 @@ int chsc_sei_nt2_get_event(void *res) int chsc_sei_nt2_have_event(void) { - S390pciState *s = S390_PCI_HOST_BRIDGE( - object_resolve_path(TYPE_S390_PCI_HOST_BRIDGE, NULL)); + S390pciState *s = s390_get_phb(); - if (!s) { - return 0; + return !QTAILQ_EMPTY(&s->pending_sei); +} + +S390PCIBusDevice *s390_pci_find_next_avail_dev(S390PCIBusDevice *pbdev) +{ + int idx = 0; + S390PCIBusDevice *dev = NULL; + S390pciState *s = s390_get_phb(); + + if (pbdev) { + idx = (pbdev->fh & FH_MASK_INDEX) + 1; } - return !QTAILQ_EMPTY(&s->pending_sei); + for (; idx < PCI_SLOT_MAX; idx++) { + dev = s->pbdev[idx]; + if (dev && dev->state != ZPCI_FS_RESERVED) { + return dev; + } + } + + return NULL; } S390PCIBusDevice *s390_pci_find_dev_by_fid(uint32_t fid) { S390PCIBusDevice *pbdev; int i; - S390pciState *s = S390_PCI_HOST_BRIDGE( - object_resolve_path(TYPE_S390_PCI_HOST_BRIDGE, NULL)); - - if (!s) { - return NULL; - } + S390pciState *s = s390_get_phb(); for (i = 0; i < PCI_SLOT_MAX; i++) { - pbdev = &s->pbdev[i]; - if ((pbdev->fh != 0) && (pbdev->fid == fid)) { + pbdev = s->pbdev[i]; + if (pbdev && pbdev->fid == fid) { return pbdev; } } @@ -118,16 +138,22 @@ void s390_pci_sclp_configure(SCCB *sccb) goto out; } - if (pbdev) { - if (pbdev->configured) { - rc = SCLP_RC_NO_ACTION_REQUIRED; - } else { - pbdev->configured = true; - rc = SCLP_RC_NORMAL_COMPLETION; - } - } else { + if (!pbdev) { DPRINTF("sclp config no dev found\n"); rc = SCLP_RC_ADAPTER_ID_NOT_RECOGNIZED; + goto out; + } + + switch (pbdev->state) { + case ZPCI_FS_RESERVED: + rc = SCLP_RC_ADAPTER_IN_RESERVED_STATE; + break; + case ZPCI_FS_STANDBY: + pbdev->state = ZPCI_FS_DISABLED; + rc = SCLP_RC_NORMAL_COMPLETION; + break; + default: + rc = SCLP_RC_NO_ACTION_REQUIRED; } out: psccb->header.response_code = cpu_to_be16(rc); @@ -144,81 +170,96 @@ void s390_pci_sclp_deconfigure(SCCB *sccb) goto out; } - if (pbdev) { - if (!pbdev->configured) { - rc = SCLP_RC_NO_ACTION_REQUIRED; - } else { - if (pbdev->summary_ind) { - pci_dereg_irqs(pbdev); - } - if (pbdev->iommu_enabled) { - pci_dereg_ioat(pbdev); - } - pbdev->configured = false; - rc = SCLP_RC_NORMAL_COMPLETION; - } - } else { + if (!pbdev) { DPRINTF("sclp deconfig no dev found\n"); rc = SCLP_RC_ADAPTER_ID_NOT_RECOGNIZED; + goto out; + } + + switch (pbdev->state) { + case ZPCI_FS_RESERVED: + rc = SCLP_RC_ADAPTER_IN_RESERVED_STATE; + break; + case ZPCI_FS_STANDBY: + rc = SCLP_RC_NO_ACTION_REQUIRED; + break; + default: + if (pbdev->summary_ind) { + pci_dereg_irqs(pbdev); + } + if (pbdev->iommu_enabled) { + pci_dereg_ioat(pbdev); + } + pbdev->state = ZPCI_FS_STANDBY; + rc = SCLP_RC_NORMAL_COMPLETION; + + if (pbdev->release_timer) { + qdev_unplug(DEVICE(pbdev->pdev), NULL); + } } out: psccb->header.response_code = cpu_to_be16(rc); } -static uint32_t s390_pci_get_pfid(PCIDevice *pdev) +static S390PCIBusDevice *s390_pci_find_dev_by_uid(uint16_t uid) { - return PCI_SLOT(pdev->devfn); -} + int i; + S390PCIBusDevice *pbdev; + S390pciState *s = s390_get_phb(); -static uint32_t s390_pci_get_pfh(PCIDevice *pdev) -{ - return PCI_SLOT(pdev->devfn) | FH_VIRT; + for (i = 0; i < PCI_SLOT_MAX; i++) { + pbdev = s->pbdev[i]; + if (!pbdev) { + continue; + } + + if (pbdev->uid == uid) { + return pbdev; + } + } + + return NULL; } -S390PCIBusDevice *s390_pci_find_dev_by_idx(uint32_t idx) +static S390PCIBusDevice *s390_pci_find_dev_by_target(const char *target) { - S390PCIBusDevice *pbdev; int i; - int j = 0; - S390pciState *s = S390_PCI_HOST_BRIDGE( - object_resolve_path(TYPE_S390_PCI_HOST_BRIDGE, NULL)); + S390PCIBusDevice *pbdev; + S390pciState *s = s390_get_phb(); - if (!s) { + if (!target) { return NULL; } for (i = 0; i < PCI_SLOT_MAX; i++) { - pbdev = &s->pbdev[i]; - - if (pbdev->fh == 0) { + pbdev = s->pbdev[i]; + if (!pbdev) { continue; } - if (j == idx) { + if (!strcmp(pbdev->target, target)) { return pbdev; } - j++; } return NULL; } +S390PCIBusDevice *s390_pci_find_dev_by_idx(uint32_t idx) +{ + S390pciState *s = s390_get_phb(); + + return s->pbdev[idx & FH_MASK_INDEX]; +} + S390PCIBusDevice *s390_pci_find_dev_by_fh(uint32_t fh) { + S390pciState *s = s390_get_phb(); S390PCIBusDevice *pbdev; - int i; - S390pciState *s = S390_PCI_HOST_BRIDGE( - object_resolve_path(TYPE_S390_PCI_HOST_BRIDGE, NULL)); - if (!s || !fh) { - return NULL; - } - - for (i = 0; i < PCI_SLOT_MAX; i++) { - pbdev = &s->pbdev[i]; - if (pbdev->fh == fh) { - return pbdev; - } + pbdev = s->pbdev[fh & FH_MASK_INDEX]; + if (pbdev && pbdev->fh == fh) { + return pbdev; } return NULL; @@ -228,12 +269,7 @@ static void s390_pci_generate_event(uint8_t cc, uint16_t pec, uint32_t fh, uint32_t fid, uint64_t faddr, uint32_t e) { SeiContainer *sei_cont; - S390pciState *s = S390_PCI_HOST_BRIDGE( - object_resolve_path(TYPE_S390_PCI_HOST_BRIDGE, NULL)); - - if (!s) { - return; - } + S390pciState *s = s390_get_phb(); sei_cont = g_malloc0(sizeof(SeiContainer)); sei_cont->fh = fh; @@ -253,9 +289,8 @@ static void s390_pci_generate_plug_event(uint16_t pec, uint32_t fh, s390_pci_generate_event(2, pec, fh, fid, 0, 0); } -static void s390_pci_generate_error_event(uint16_t pec, uint32_t fh, - uint32_t fid, uint64_t faddr, - uint32_t e) +void s390_pci_generate_error_event(uint16_t pec, uint32_t fh, uint32_t fid, + uint64_t faddr, uint32_t e) { s390_pci_generate_event(1, pec, fh, fid, faddr, e); } @@ -357,8 +392,14 @@ static IOMMUTLBEntry s390_translate_iommu(MemoryRegion *iommu, hwaddr addr, .perm = IOMMU_NONE, }; - if (!pbdev->configured || !pbdev->pdev || - !(pbdev->fh & FH_ENABLED) || !pbdev->iommu_enabled) { + switch (pbdev->state) { + case ZPCI_FS_ENABLED: + case ZPCI_FS_BLOCKED: + if (!pbdev->iommu_enabled) { + return ret; + } + break; + default: return ret; } @@ -377,30 +418,13 @@ static IOMMUTLBEntry s390_translate_iommu(MemoryRegion *iommu, hwaddr addr, return ret; } - if (!pbdev->g_iota) { - pbdev->error_state = true; - pbdev->lgstg_blocked = true; - s390_pci_generate_error_event(ERR_EVENT_INVALAS, pbdev->fh, pbdev->fid, - addr, 0); - return ret; - } - if (addr < pbdev->pba || addr > pbdev->pal) { - pbdev->error_state = true; - pbdev->lgstg_blocked = true; - s390_pci_generate_error_event(ERR_EVENT_OORANGE, pbdev->fh, pbdev->fid, - addr, 0); return ret; } pte = s390_guest_io_table_walk(s390_pci_get_table_origin(pbdev->g_iota), addr); - if (!pte) { - pbdev->error_state = true; - pbdev->lgstg_blocked = true; - s390_pci_generate_error_event(ERR_EVENT_SERR, pbdev->fh, pbdev->fid, - addr, ERR_EVENT_Q_BIT); return ret; } @@ -426,7 +450,7 @@ static AddressSpace *s390_pci_dma_iommu(PCIBus *bus, void *opaque, int devfn) { S390pciState *s = opaque; - return &s->pbdev[PCI_SLOT(devfn)].as; + return &s->iommu[PCI_SLOT(devfn)]->as; } static uint8_t set_ind_atomic(uint64_t ind_loc, uint8_t to_be_set) @@ -454,22 +478,22 @@ static void s390_msi_ctrl_write(void *opaque, hwaddr addr, uint64_t data, { S390PCIBusDevice *pbdev; uint32_t io_int_word; - uint32_t fid = data >> ZPCI_MSI_VEC_BITS; + uint32_t idx = data >> ZPCI_MSI_VEC_BITS; uint32_t vec = data & ZPCI_MSI_VEC_MASK; uint64_t ind_bit; uint32_t sum_bit; uint32_t e = 0; - DPRINTF("write_msix data 0x%" PRIx64 " fid %d vec 0x%x\n", data, fid, vec); + DPRINTF("write_msix data 0x%" PRIx64 " idx %d vec 0x%x\n", data, idx, vec); - pbdev = s390_pci_find_dev_by_fid(fid); + pbdev = s390_pci_find_dev_by_idx(idx); if (!pbdev) { e |= (vec << ERR_EVENT_MVN_OFFSET); - s390_pci_generate_error_event(ERR_EVENT_NOMSI, 0, fid, addr, e); + s390_pci_generate_error_event(ERR_EVENT_NOMSI, idx, 0, addr, e); return; } - if (!(pbdev->fh & FH_ENABLED)) { + if (pbdev->state != ZPCI_FS_ENABLED) { return; } @@ -498,17 +522,15 @@ static const MemoryRegionOps s390_msi_ctrl_ops = { void s390_pci_iommu_enable(S390PCIBusDevice *pbdev) { - uint64_t size = pbdev->pal - pbdev->pba + 1; - - memory_region_init_iommu(&pbdev->iommu_mr, OBJECT(&pbdev->mr), - &s390_iommu_ops, "iommu-s390", size); - memory_region_add_subregion(&pbdev->mr, pbdev->pba, &pbdev->iommu_mr); + memory_region_init_iommu(&pbdev->iommu_mr, OBJECT(&pbdev->iommu->mr), + &s390_iommu_ops, "iommu-s390", pbdev->pal + 1); + memory_region_add_subregion(&pbdev->iommu->mr, 0, &pbdev->iommu_mr); pbdev->iommu_enabled = true; } void s390_pci_iommu_disable(S390PCIBusDevice *pbdev) { - memory_region_del_subregion(&pbdev->mr, &pbdev->iommu_mr); + memory_region_del_subregion(&pbdev->iommu->mr, &pbdev->iommu_mr); object_unparent(OBJECT(&pbdev->iommu_mr)); pbdev->iommu_enabled = false; } @@ -516,13 +538,15 @@ void s390_pci_iommu_disable(S390PCIBusDevice *pbdev) static void s390_pcihost_init_as(S390pciState *s) { int i; - S390PCIBusDevice *pbdev; + S390PCIIOMMU *iommu; for (i = 0; i < PCI_SLOT_MAX; i++) { - pbdev = &s->pbdev[i]; - memory_region_init(&pbdev->mr, OBJECT(s), + iommu = g_malloc0(sizeof(S390PCIIOMMU)); + memory_region_init(&iommu->mr, OBJECT(s), "iommu-root-s390", UINT64_MAX); - address_space_init(&pbdev->as, &pbdev->mr, "iommu-pci"); + address_space_init(&iommu->as, &iommu->mr, "iommu-pci"); + + s->iommu[i] = iommu; } memory_region_init_io(&s->msix_notify_mr, OBJECT(s), @@ -549,6 +573,10 @@ static int s390_pcihost_init(SysBusDevice *dev) bus = BUS(b); qbus_set_hotplug_handler(bus, DEVICE(dev), NULL); phb->bus = b; + + s->bus = S390_PCI_BUS(qbus_create(TYPE_S390_PCI_BUS, DEVICE(s), NULL)); + qbus_set_hotplug_handler(BUS(s->bus), DEVICE(s), NULL); + QTAILQ_INIT(&s->pending_sei); return 0; } @@ -581,51 +609,155 @@ static int s390_pcihost_setup_msix(S390PCIBusDevice *pbdev) return 0; } +static S390PCIBusDevice *s390_pci_device_new(const char *target) +{ + DeviceState *dev = NULL; + S390pciState *s = s390_get_phb(); + + dev = qdev_try_create(BUS(s->bus), TYPE_S390_PCI_DEVICE); + if (!dev) { + return NULL; + } + + qdev_prop_set_string(dev, "target", target); + qdev_init_nofail(dev); + + return S390_PCI_DEVICE(dev); +} + static void s390_pcihost_hot_plug(HotplugHandler *hotplug_dev, DeviceState *dev, Error **errp) { - PCIDevice *pci_dev = PCI_DEVICE(dev); - S390PCIBusDevice *pbdev; - S390pciState *s = S390_PCI_HOST_BRIDGE(pci_device_root_bus(pci_dev) - ->qbus.parent); + PCIDevice *pdev = NULL; + S390PCIBusDevice *pbdev = NULL; + S390pciState *s = s390_get_phb(); - pbdev = &s->pbdev[PCI_SLOT(pci_dev->devfn)]; + if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) { + pdev = PCI_DEVICE(dev); - pbdev->fid = s390_pci_get_pfid(pci_dev); - pbdev->pdev = pci_dev; - pbdev->configured = true; - pbdev->fh = s390_pci_get_pfh(pci_dev); + if (!dev->id) { + /* In the case the PCI device does not define an id */ + /* we generate one based on the PCI address */ + dev->id = g_strdup_printf("auto_%02x:%02x.%01x", + pci_bus_num(pdev->bus), + PCI_SLOT(pdev->devfn), + PCI_FUNC(pdev->devfn)); + } - s390_pcihost_setup_msix(pbdev); + pbdev = s390_pci_find_dev_by_target(dev->id); + if (!pbdev) { + pbdev = s390_pci_device_new(dev->id); + if (!pbdev) { + error_setg(errp, "create zpci device failed"); + } + } - if (dev->hotplugged) { - s390_pci_generate_plug_event(HP_EVENT_RESERVED_TO_STANDBY, - pbdev->fh, pbdev->fid); - s390_pci_generate_plug_event(HP_EVENT_TO_CONFIGURED, - pbdev->fh, pbdev->fid); + if (object_dynamic_cast(OBJECT(dev), "vfio-pci")) { + pbdev->fh |= FH_SHM_VFIO; + } else { + pbdev->fh |= FH_SHM_EMUL; + } + + pbdev->pdev = pdev; + pbdev->iommu = s->iommu[PCI_SLOT(pdev->devfn)]; + pbdev->state = ZPCI_FS_STANDBY; + s390_pcihost_setup_msix(pbdev); + + if (dev->hotplugged) { + s390_pci_generate_plug_event(HP_EVENT_RESERVED_TO_STANDBY, + pbdev->fh, pbdev->fid); + } + } else if (object_dynamic_cast(OBJECT(dev), TYPE_S390_PCI_DEVICE)) { + int idx; + + pbdev = S390_PCI_DEVICE(dev); + for (idx = 0; idx < PCI_SLOT_MAX; idx++) { + if (!s->pbdev[idx]) { + s->pbdev[idx] = pbdev; + pbdev->fh = idx; + return; + } + } + + error_setg(errp, "no slot for plugging zpci device"); } } +static void s390_pcihost_timer_cb(void *opaque) +{ + S390PCIBusDevice *pbdev = opaque; + + if (pbdev->summary_ind) { + pci_dereg_irqs(pbdev); + } + if (pbdev->iommu_enabled) { + pci_dereg_ioat(pbdev); + } + + pbdev->state = ZPCI_FS_STANDBY; + s390_pci_generate_plug_event(HP_EVENT_CONFIGURED_TO_STBRES, + pbdev->fh, pbdev->fid); + qdev_unplug(DEVICE(pbdev), NULL); +} + static void s390_pcihost_hot_unplug(HotplugHandler *hotplug_dev, DeviceState *dev, Error **errp) { - PCIDevice *pci_dev = PCI_DEVICE(dev); - S390pciState *s = S390_PCI_HOST_BRIDGE(pci_device_root_bus(pci_dev) - ->qbus.parent); - S390PCIBusDevice *pbdev = &s->pbdev[PCI_SLOT(pci_dev->devfn)]; + int i; + PCIDevice *pci_dev = NULL; + S390PCIBusDevice *pbdev = NULL; + S390pciState *s = s390_get_phb(); + + if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) { + pci_dev = PCI_DEVICE(dev); - if (pbdev->configured) { - pbdev->configured = false; - s390_pci_generate_plug_event(HP_EVENT_CONFIGURED_TO_STBRES, + for (i = 0 ; i < PCI_SLOT_MAX; i++) { + if (s->pbdev[i]->pdev == pci_dev) { + pbdev = s->pbdev[i]; + break; + } + } + + if (!pbdev) { + object_unparent(OBJECT(pci_dev)); + return; + } + } else if (object_dynamic_cast(OBJECT(dev), TYPE_S390_PCI_DEVICE)) { + pbdev = S390_PCI_DEVICE(dev); + pci_dev = pbdev->pdev; + } + + switch (pbdev->state) { + case ZPCI_FS_RESERVED: + goto out; + case ZPCI_FS_STANDBY: + break; + default: + s390_pci_generate_plug_event(HP_EVENT_DECONFIGURE_REQUEST, pbdev->fh, pbdev->fid); + pbdev->release_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, + s390_pcihost_timer_cb, + pbdev); + timer_mod(pbdev->release_timer, + qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + HOT_UNPLUG_TIMEOUT); + return; + } + + if (pbdev->release_timer && timer_pending(pbdev->release_timer)) { + timer_del(pbdev->release_timer); + timer_free(pbdev->release_timer); + pbdev->release_timer = NULL; } s390_pci_generate_plug_event(HP_EVENT_STANDBY_TO_RESERVED, pbdev->fh, pbdev->fid); - pbdev->fh = 0; - pbdev->fid = 0; - pbdev->pdev = NULL; object_unparent(OBJECT(pci_dev)); + pbdev->pdev = NULL; + pbdev->state = ZPCI_FS_RESERVED; +out: + pbdev->fid = 0; + s->pbdev[pbdev->fh & FH_MASK_INDEX] = NULL; + object_unparent(OBJECT(pbdev)); } static void s390_pcihost_class_init(ObjectClass *klass, void *data) @@ -652,9 +784,178 @@ static const TypeInfo s390_pcihost_info = { } }; +static const TypeInfo s390_pcibus_info = { + .name = TYPE_S390_PCI_BUS, + .parent = TYPE_BUS, + .instance_size = sizeof(S390PCIBus), +}; + +static uint16_t s390_pci_generate_uid(void) +{ + uint16_t uid = 0; + + do { + uid++; + if (!s390_pci_find_dev_by_uid(uid)) { + return uid; + } + } while (uid < ZPCI_MAX_UID); + + return UID_UNDEFINED; +} + +static uint32_t s390_pci_generate_fid(Error **errp) +{ + uint32_t fid = 0; + + while (fid <= ZPCI_MAX_FID) { + if (!s390_pci_find_dev_by_fid(fid)) { + return fid; + } + + if (fid == ZPCI_MAX_FID) { + break; + } + + fid++; + } + + error_setg(errp, "no free fid could be found"); + return 0; +} + +static void s390_pci_device_realize(DeviceState *dev, Error **errp) +{ + S390PCIBusDevice *zpci = S390_PCI_DEVICE(dev); + + if (!zpci->target) { + error_setg(errp, "target must be defined"); + return; + } + + if (s390_pci_find_dev_by_target(zpci->target)) { + error_setg(errp, "target %s already has an associated zpci device", + zpci->target); + return; + } + + if (zpci->uid == UID_UNDEFINED) { + zpci->uid = s390_pci_generate_uid(); + if (!zpci->uid) { + error_setg(errp, "no free uid could be found"); + return; + } + } else if (s390_pci_find_dev_by_uid(zpci->uid)) { + error_setg(errp, "uid %u already in use", zpci->uid); + return; + } + + if (!zpci->fid_defined) { + Error *local_error = NULL; + + zpci->fid = s390_pci_generate_fid(&local_error); + if (local_error) { + error_propagate(errp, local_error); + return; + } + } else if (s390_pci_find_dev_by_fid(zpci->fid)) { + error_setg(errp, "fid %u already in use", zpci->fid); + return; + } + + zpci->state = ZPCI_FS_RESERVED; +} + +static void s390_pci_device_reset(DeviceState *dev) +{ + S390PCIBusDevice *pbdev = S390_PCI_DEVICE(dev); + + switch (pbdev->state) { + case ZPCI_FS_RESERVED: + return; + case ZPCI_FS_STANDBY: + break; + default: + pbdev->fh &= ~FH_MASK_ENABLE; + pbdev->state = ZPCI_FS_DISABLED; + break; + } + + if (pbdev->summary_ind) { + pci_dereg_irqs(pbdev); + } + if (pbdev->iommu_enabled) { + pci_dereg_ioat(pbdev); + } + + pbdev->fmb_addr = 0; +} + +static void s390_pci_get_fid(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + Property *prop = opaque; + uint32_t *ptr = qdev_get_prop_ptr(DEVICE(obj), prop); + + visit_type_uint32(v, name, ptr, errp); +} + +static void s390_pci_set_fid(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + DeviceState *dev = DEVICE(obj); + S390PCIBusDevice *zpci = S390_PCI_DEVICE(obj); + Property *prop = opaque; + uint32_t *ptr = qdev_get_prop_ptr(dev, prop); + + if (dev->realized) { + qdev_prop_set_after_realize(dev, name, errp); + return; + } + + visit_type_uint32(v, name, ptr, errp); + zpci->fid_defined = true; +} + +static PropertyInfo s390_pci_fid_propinfo = { + .name = "zpci_fid", + .get = s390_pci_get_fid, + .set = s390_pci_set_fid, +}; + +#define DEFINE_PROP_S390_PCI_FID(_n, _s, _f) \ + DEFINE_PROP(_n, _s, _f, s390_pci_fid_propinfo, uint32_t) + +static Property s390_pci_device_properties[] = { + DEFINE_PROP_UINT16("uid", S390PCIBusDevice, uid, UID_UNDEFINED), + DEFINE_PROP_S390_PCI_FID("fid", S390PCIBusDevice, fid), + DEFINE_PROP_STRING("target", S390PCIBusDevice, target), + DEFINE_PROP_END_OF_LIST(), +}; + +static void s390_pci_device_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->desc = "zpci device"; + dc->reset = s390_pci_device_reset; + dc->bus_type = TYPE_S390_PCI_BUS; + dc->realize = s390_pci_device_realize; + dc->props = s390_pci_device_properties; +} + +static const TypeInfo s390_pci_device_info = { + .name = TYPE_S390_PCI_DEVICE, + .parent = TYPE_DEVICE, + .instance_size = sizeof(S390PCIBusDevice), + .class_init = s390_pci_device_class_init, +}; + static void s390_pci_register_types(void) { type_register_static(&s390_pcihost_info); + type_register_static(&s390_pcibus_info); + type_register_static(&s390_pci_device_info); } type_init(s390_pci_register_types) diff --git a/hw/s390x/s390-pci-bus.h b/hw/s390x/s390-pci-bus.h index 2c852d47fa..f1fbd3c1b6 100644 --- a/hw/s390x/s390-pci-bus.h +++ b/hw/s390x/s390-pci-bus.h @@ -21,16 +21,31 @@ #include "hw/s390x/css.h" #define TYPE_S390_PCI_HOST_BRIDGE "s390-pcihost" -#define FH_VIRT 0x00ff0000 -#define ENABLE_BIT_OFFSET 31 -#define FH_ENABLED (1 << ENABLE_BIT_OFFSET) +#define TYPE_S390_PCI_BUS "s390-pcibus" +#define TYPE_S390_PCI_DEVICE "zpci" +#define FH_MASK_ENABLE 0x80000000 +#define FH_MASK_INSTANCE 0x7f000000 +#define FH_MASK_SHM 0x00ff0000 +#define FH_MASK_INDEX 0x0000001f +#define FH_SHM_VFIO 0x00010000 +#define FH_SHM_EMUL 0x00020000 #define S390_PCIPT_ADAPTER 2 +#define ZPCI_MAX_FID 0xffffffff +#define ZPCI_MAX_UID 0xffff +#define UID_UNDEFINED 0 +#define UID_CHECKING_ENABLED 0x01 +#define HOT_UNPLUG_TIMEOUT (NANOSECONDS_PER_SECOND * 60 * 5) #define S390_PCI_HOST_BRIDGE(obj) \ OBJECT_CHECK(S390pciState, (obj), TYPE_S390_PCI_HOST_BRIDGE) +#define S390_PCI_BUS(obj) \ + OBJECT_CHECK(S390PCIBus, (obj), TYPE_S390_PCI_BUS) +#define S390_PCI_DEVICE(obj) \ + OBJECT_CHECK(S390PCIBusDevice, (obj), TYPE_S390_PCI_DEVICE) #define HP_EVENT_TO_CONFIGURED 0x0301 #define HP_EVENT_RESERVED_TO_STANDBY 0x0302 +#define HP_EVENT_DECONFIGURE_REQUEST 0x0303 #define HP_EVENT_CONFIGURED_TO_STBRES 0x0304 #define HP_EVENT_STANDBY_TO_RESERVED 0x0308 @@ -150,6 +165,34 @@ enum ZpciIoatDtype { #define ZPCI_TABLE_VALID_MASK 0x20 #define ZPCI_TABLE_PROT_MASK 0x200 +/* PCI Function States + * + * reserved: default; device has just been plugged or is in progress of being + * unplugged + * standby: device is present but not configured; transition from any + * configured state/to this state via sclp configure/deconfigure + * + * The following states make up the "configured" meta-state: + * disabled: device is configured but not enabled; transition between this + * state and enabled via clp enable/disable + * enbaled: device is ready for use; transition to disabled via clp disable; + * may enter an error state + * blocked: ignore all DMA and interrupts; transition back to enabled or from + * error state via mpcifc + * error: an error occured; transition back to enabled via mpcifc + * permanent error: an unrecoverable error occured; transition to standby via + * sclp deconfigure + */ +typedef enum { + ZPCI_FS_RESERVED, + ZPCI_FS_STANDBY, + ZPCI_FS_DISABLED, + ZPCI_FS_ENABLED, + ZPCI_FS_BLOCKED, + ZPCI_FS_ERROR, + ZPCI_FS_PERMANENT_ERROR, +} ZpciState; + typedef struct SeiContainer { QTAILQ_ENTRY(SeiContainer) link; uint32_t fid; @@ -214,14 +257,21 @@ typedef struct S390MsixInfo { uint32_t pba_offset; } S390MsixInfo; +typedef struct S390PCIIOMMU { + AddressSpace as; + MemoryRegion mr; +} S390PCIIOMMU; + typedef struct S390PCIBusDevice { + DeviceState qdev; PCIDevice *pdev; - bool configured; - bool error_state; - bool lgstg_blocked; + ZpciState state; bool iommu_enabled; + char *target; + uint16_t uid; uint32_t fh; uint32_t fid; + bool fid_defined; uint64_t g_iota; uint64_t pba; uint64_t pal; @@ -231,16 +281,22 @@ typedef struct S390PCIBusDevice { uint8_t sum; S390MsixInfo msix; AdapterRoutes routes; - AddressSpace as; - MemoryRegion mr; + S390PCIIOMMU *iommu; MemoryRegion iommu_mr; IndAddr *summary_ind; IndAddr *indicator; + QEMUTimer *release_timer; } S390PCIBusDevice; +typedef struct S390PCIBus { + BusState qbus; +} S390PCIBus; + typedef struct S390pciState { PCIHostState parent_obj; - S390PCIBusDevice pbdev[PCI_SLOT_MAX]; + S390PCIBus *bus; + S390PCIBusDevice *pbdev[PCI_SLOT_MAX]; + S390PCIIOMMU *iommu[PCI_SLOT_MAX]; AddressSpace msix_notify_as; MemoryRegion msix_notify_mr; QTAILQ_HEAD(, SeiContainer) pending_sei; @@ -252,8 +308,11 @@ void s390_pci_sclp_configure(SCCB *sccb); void s390_pci_sclp_deconfigure(SCCB *sccb); void s390_pci_iommu_enable(S390PCIBusDevice *pbdev); void s390_pci_iommu_disable(S390PCIBusDevice *pbdev); +void s390_pci_generate_error_event(uint16_t pec, uint32_t fh, uint32_t fid, + uint64_t faddr, uint32_t e); S390PCIBusDevice *s390_pci_find_dev_by_idx(uint32_t idx); S390PCIBusDevice *s390_pci_find_dev_by_fh(uint32_t fh); S390PCIBusDevice *s390_pci_find_dev_by_fid(uint32_t fid); +S390PCIBusDevice *s390_pci_find_next_avail_dev(S390PCIBusDevice *pbdev); #endif diff --git a/hw/s390x/s390-pci-inst.c b/hw/s390x/s390-pci-inst.c index 479375f65d..331bc4cfde 100644 --- a/hw/s390x/s390-pci-inst.c +++ b/hw/s390x/s390-pci-inst.c @@ -37,9 +37,9 @@ static void s390_set_status_code(CPUS390XState *env, static int list_pci(ClpReqRspListPci *rrb, uint8_t *cc) { - S390PCIBusDevice *pbdev; - uint32_t res_code, initial_l2, g_l2, finish; - int rc, idx; + S390PCIBusDevice *pbdev = NULL; + uint32_t res_code, initial_l2, g_l2; + int rc, i; uint64_t resume_token; rc = 0; @@ -56,8 +56,7 @@ static int list_pci(ClpReqRspListPci *rrb, uint8_t *cc) } if ((ldl_p(&rrb->request.fmt) & ~CLP_MASK_FMT) != 0 || - ldq_p(&rrb->request.reserved1) != 0 || - ldq_p(&rrb->request.reserved2) != 0) { + ldq_p(&rrb->request.reserved1) != 0) { res_code = CLP_RC_RESNOT0; rc = -EINVAL; goto out; @@ -72,6 +71,8 @@ static int list_pci(ClpReqRspListPci *rrb, uint8_t *cc) rc = -EINVAL; goto out; } + } else { + pbdev = s390_pci_find_next_avail_dev(NULL); } if (lduw_p(&rrb->response.hdr.len) < 48) { @@ -91,43 +92,40 @@ static int list_pci(ClpReqRspListPci *rrb, uint8_t *cc) stl_p(&rrb->response.fmt, 0); stq_p(&rrb->response.reserved1, 0); - stq_p(&rrb->response.reserved2, 0); - stl_p(&rrb->response.mdd, FH_VIRT); + stl_p(&rrb->response.mdd, FH_MASK_SHM); stw_p(&rrb->response.max_fn, PCI_MAX_FUNCTIONS); + rrb->response.flags = UID_CHECKING_ENABLED; rrb->response.entry_size = sizeof(ClpFhListEntry); - finish = 0; - idx = resume_token; + + i = 0; g_l2 = LIST_PCI_HDR_LEN; - do { - pbdev = s390_pci_find_dev_by_idx(idx); - if (!pbdev) { - finish = 1; - break; - } - stw_p(&rrb->response.fh_list[idx - resume_token].device_id, + while (g_l2 < initial_l2 && pbdev) { + stw_p(&rrb->response.fh_list[i].device_id, pci_get_word(pbdev->pdev->config + PCI_DEVICE_ID)); - stw_p(&rrb->response.fh_list[idx - resume_token].vendor_id, + stw_p(&rrb->response.fh_list[i].vendor_id, pci_get_word(pbdev->pdev->config + PCI_VENDOR_ID)); - stl_p(&rrb->response.fh_list[idx - resume_token].config, - pbdev->configured << 31); - stl_p(&rrb->response.fh_list[idx - resume_token].fid, pbdev->fid); - stl_p(&rrb->response.fh_list[idx - resume_token].fh, pbdev->fh); + /* Ignore RESERVED devices. */ + stl_p(&rrb->response.fh_list[i].config, + pbdev->state == ZPCI_FS_STANDBY ? 0 : 1 << 31); + stl_p(&rrb->response.fh_list[i].fid, pbdev->fid); + stl_p(&rrb->response.fh_list[i].fh, pbdev->fh); g_l2 += sizeof(ClpFhListEntry); /* Add endian check for DPRINTF? */ DPRINTF("g_l2 %d vendor id 0x%x device id 0x%x fid 0x%x fh 0x%x\n", - g_l2, - lduw_p(&rrb->response.fh_list[idx - resume_token].vendor_id), - lduw_p(&rrb->response.fh_list[idx - resume_token].device_id), - ldl_p(&rrb->response.fh_list[idx - resume_token].fid), - ldl_p(&rrb->response.fh_list[idx - resume_token].fh)); - idx++; - } while (g_l2 < initial_l2); - - if (finish == 1) { + g_l2, + lduw_p(&rrb->response.fh_list[i].vendor_id), + lduw_p(&rrb->response.fh_list[i].device_id), + ldl_p(&rrb->response.fh_list[i].fid), + ldl_p(&rrb->response.fh_list[i].fh)); + pbdev = s390_pci_find_next_avail_dev(pbdev); + i++; + } + + if (!pbdev) { resume_token = 0; } else { - resume_token = idx; + resume_token = pbdev->fh & FH_MASK_INDEX; } stq_p(&rrb->response.resume_token, resume_token); stw_p(&rrb->response.hdr.len, g_l2); @@ -212,14 +210,35 @@ int clp_service_call(S390CPU *cpu, uint8_t r2) switch (reqsetpci->oc) { case CLP_SET_ENABLE_PCI_FN: - pbdev->fh = pbdev->fh | FH_ENABLED; + switch (reqsetpci->ndas) { + case 0: + stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_DMAAS); + goto out; + case 1: + break; + default: + stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_RES); + goto out; + } + + if (pbdev->fh & FH_MASK_ENABLE) { + stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FHOP); + goto out; + } + + pbdev->fh |= FH_MASK_ENABLE; + pbdev->state = ZPCI_FS_ENABLED; stl_p(&ressetpci->fh, pbdev->fh); stw_p(&ressetpci->hdr.rsp, CLP_RC_OK); break; case CLP_SET_DISABLE_PCI_FN: - pbdev->fh = pbdev->fh & ~FH_ENABLED; - pbdev->error_state = false; - pbdev->lgstg_blocked = false; + if (!(pbdev->fh & FH_MASK_ENABLE)) { + stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FHOP); + goto out; + } + device_reset(DEVICE(pbdev)); + pbdev->fh &= ~FH_MASK_ENABLE; + pbdev->state = ZPCI_FS_DISABLED; stl_p(&ressetpci->fh, pbdev->fh); stw_p(&ressetpci->hdr.rsp, CLP_RC_OK); break; @@ -256,9 +275,10 @@ int clp_service_call(S390CPU *cpu, uint8_t r2) stq_p(&resquery->sdma, ZPCI_SDMA_ADDR); stq_p(&resquery->edma, ZPCI_EDMA_ADDR); + stl_p(&resquery->fid, pbdev->fid); stw_p(&resquery->pchid, 0); stw_p(&resquery->ug, 1); - stl_p(&resquery->uid, pbdev->fid); + stl_p(&resquery->uid, pbdev->uid); stw_p(&resquery->hdr.rsp, CLP_RC_OK); break; } @@ -317,16 +337,25 @@ int pcilg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2) offset = env->regs[r2 + 1]; pbdev = s390_pci_find_dev_by_fh(fh); - if (!pbdev || !(pbdev->fh & FH_ENABLED)) { + if (!pbdev) { DPRINTF("pcilg no pci dev\n"); setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE); return 0; } - if (pbdev->lgstg_blocked) { + switch (pbdev->state) { + case ZPCI_FS_RESERVED: + case ZPCI_FS_STANDBY: + case ZPCI_FS_DISABLED: + case ZPCI_FS_PERMANENT_ERROR: + setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE); + return 0; + case ZPCI_FS_ERROR: setcc(cpu, ZPCI_PCI_LS_ERR); s390_set_status_code(env, r2, ZPCI_PCI_ST_BLOCKED); return 0; + default: + break; } if (pcias < 6) { @@ -390,7 +419,8 @@ static void update_msix_table_msg_data(S390PCIBusDevice *pbdev, uint64_t offset, msg_data = (uint8_t *)data - offset % PCI_MSIX_ENTRY_SIZE + PCI_MSIX_ENTRY_VECTOR_CTRL; - val = pci_get_long(msg_data) | (pbdev->fid << ZPCI_MSI_VEC_BITS); + val = pci_get_long(msg_data) | + ((pbdev->fh & FH_MASK_INDEX) << ZPCI_MSI_VEC_BITS); pci_set_long(msg_data, val); DPRINTF("update msix msg_data to 0x%" PRIx64 "\n", *data); } @@ -434,16 +464,25 @@ int pcistg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2) offset = env->regs[r2 + 1]; pbdev = s390_pci_find_dev_by_fh(fh); - if (!pbdev || !(pbdev->fh & FH_ENABLED)) { + if (!pbdev) { DPRINTF("pcistg no pci dev\n"); setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE); return 0; } - if (pbdev->lgstg_blocked) { + switch (pbdev->state) { + case ZPCI_FS_RESERVED: + case ZPCI_FS_STANDBY: + case ZPCI_FS_DISABLED: + case ZPCI_FS_PERMANENT_ERROR: + setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE); + return 0; + case ZPCI_FS_ERROR: setcc(cpu, ZPCI_PCI_LS_ERR); s390_set_status_code(env, r2, ZPCI_PCI_ST_BLOCKED); return 0; + default: + break; } data = env->regs[r1]; @@ -525,18 +564,55 @@ int rpcit_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2) end = start + env->regs[r2 + 1]; pbdev = s390_pci_find_dev_by_fh(fh); - if (!pbdev || !(pbdev->fh & FH_ENABLED)) { + if (!pbdev) { DPRINTF("rpcit no pci dev\n"); setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE); goto out; } + switch (pbdev->state) { + case ZPCI_FS_RESERVED: + case ZPCI_FS_STANDBY: + case ZPCI_FS_DISABLED: + case ZPCI_FS_PERMANENT_ERROR: + setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE); + return 0; + case ZPCI_FS_ERROR: + setcc(cpu, ZPCI_PCI_LS_ERR); + s390_set_status_code(env, r1, ZPCI_MOD_ST_ERROR_RECOVER); + return 0; + default: + break; + } + + if (!pbdev->g_iota) { + pbdev->state = ZPCI_FS_ERROR; + setcc(cpu, ZPCI_PCI_LS_ERR); + s390_set_status_code(env, r1, ZPCI_PCI_ST_INSUF_RES); + s390_pci_generate_error_event(ERR_EVENT_INVALAS, pbdev->fh, pbdev->fid, + start, 0); + goto out; + } + + if (end < pbdev->pba || start > pbdev->pal) { + pbdev->state = ZPCI_FS_ERROR; + setcc(cpu, ZPCI_PCI_LS_ERR); + s390_set_status_code(env, r1, ZPCI_PCI_ST_INSUF_RES); + s390_pci_generate_error_event(ERR_EVENT_OORANGE, pbdev->fh, pbdev->fid, + start, 0); + goto out; + } + mr = &pbdev->iommu_mr; while (start < end) { entry = mr->iommu_ops->translate(mr, start, 0); if (!entry.translated_addr) { + pbdev->state = ZPCI_FS_ERROR; setcc(cpu, ZPCI_PCI_LS_ERR); + s390_set_status_code(env, r1, ZPCI_PCI_ST_INSUF_RES); + s390_pci_generate_error_event(ERR_EVENT_SERR, pbdev->fh, pbdev->fid, + start, ERR_EVENT_Q_BIT); goto out; } @@ -589,16 +665,25 @@ int pcistb_service_call(S390CPU *cpu, uint8_t r1, uint8_t r3, uint64_t gaddr, } pbdev = s390_pci_find_dev_by_fh(fh); - if (!pbdev || !(pbdev->fh & FH_ENABLED)) { + if (!pbdev) { DPRINTF("pcistb no pci dev fh 0x%x\n", fh); setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE); return 0; } - if (pbdev->lgstg_blocked) { + switch (pbdev->state) { + case ZPCI_FS_RESERVED: + case ZPCI_FS_STANDBY: + case ZPCI_FS_DISABLED: + case ZPCI_FS_PERMANENT_ERROR: + setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE); + return 0; + case ZPCI_FS_ERROR: setcc(cpu, ZPCI_PCI_LS_ERR); s390_set_status_code(env, r1, ZPCI_PCI_ST_BLOCKED); return 0; + default: + break; } mr = pbdev->pdev->io_regions[pcias].memory; @@ -742,12 +827,23 @@ int mpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar) } pbdev = s390_pci_find_dev_by_fh(fh); - if (!pbdev || !(pbdev->fh & FH_ENABLED)) { + if (!pbdev) { DPRINTF("mpcifc no pci dev fh 0x%x\n", fh); setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE); return 0; } + switch (pbdev->state) { + case ZPCI_FS_RESERVED: + case ZPCI_FS_STANDBY: + case ZPCI_FS_DISABLED: + case ZPCI_FS_PERMANENT_ERROR: + setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE); + return 0; + default: + break; + } + if (s390_cpu_virt_mem_read(cpu, fiba, ar, (uint8_t *)&fib, sizeof(fib))) { return 0; } @@ -814,11 +910,25 @@ int mpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar) } break; case ZPCI_MOD_FC_RESET_ERROR: - pbdev->error_state = false; - pbdev->lgstg_blocked = false; + switch (pbdev->state) { + case ZPCI_FS_BLOCKED: + case ZPCI_FS_ERROR: + pbdev->state = ZPCI_FS_ENABLED; + break; + default: + cc = ZPCI_PCI_LS_ERR; + s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE); + } break; case ZPCI_MOD_FC_RESET_BLOCK: - pbdev->lgstg_blocked = false; + switch (pbdev->state) { + case ZPCI_FS_ERROR: + pbdev->state = ZPCI_FS_BLOCKED; + break; + default: + cc = ZPCI_PCI_LS_ERR; + s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE); + } break; case ZPCI_MOD_FC_SET_MEASURE: pbdev->fmb_addr = ldq_p(&fib.fmb_addr); @@ -835,6 +945,7 @@ int mpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar) int stpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar) { CPUS390XState *env = &cpu->env; + uint8_t dmaas; uint32_t fh; ZpciFib fib; S390PCIBusDevice *pbdev; @@ -847,19 +958,59 @@ int stpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar) } fh = env->regs[r1] >> 32; + dmaas = (env->regs[r1] >> 16) & 0xff; + + if (dmaas) { + setcc(cpu, ZPCI_PCI_LS_ERR); + s390_set_status_code(env, r1, ZPCI_STPCIFC_ST_INVAL_DMAAS); + return 0; + } if (fiba & 0x7) { program_interrupt(env, PGM_SPECIFICATION, 6); return 0; } - pbdev = s390_pci_find_dev_by_fh(fh); + pbdev = s390_pci_find_dev_by_idx(fh & FH_MASK_INDEX); if (!pbdev) { setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE); return 0; } memset(&fib, 0, sizeof(fib)); + + switch (pbdev->state) { + case ZPCI_FS_RESERVED: + case ZPCI_FS_STANDBY: + setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE); + return 0; + case ZPCI_FS_DISABLED: + if (fh & FH_MASK_ENABLE) { + setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE); + return 0; + } + goto out; + /* BLOCKED bit is set to one coincident with the setting of ERROR bit. + * FH Enabled bit is set to one in states of ENABLED, BLOCKED or ERROR. */ + case ZPCI_FS_ERROR: + fib.fc |= 0x20; + case ZPCI_FS_BLOCKED: + fib.fc |= 0x40; + case ZPCI_FS_ENABLED: + fib.fc |= 0x80; + if (pbdev->iommu_enabled) { + fib.fc |= 0x10; + } + if (!(fh & FH_MASK_ENABLE)) { + env->regs[r1] |= 1ULL << 63; + } + break; + case ZPCI_FS_PERMANENT_ERROR: + setcc(cpu, ZPCI_PCI_LS_ERR); + s390_set_status_code(env, r1, ZPCI_STPCIFC_ST_PERM_ERROR); + return 0; + } + stq_p(&fib.pba, pbdev->pba); stq_p(&fib.pal, pbdev->pal); stq_p(&fib.iota, pbdev->g_iota); @@ -872,22 +1023,7 @@ int stpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar) ((uint32_t)pbdev->sum << 7) | pbdev->routes.adapter.summary_offset; stl_p(&fib.data, data); - if (pbdev->fh & FH_ENABLED) { - fib.fc |= 0x80; - } - - if (pbdev->error_state) { - fib.fc |= 0x40; - } - - if (pbdev->lgstg_blocked) { - fib.fc |= 0x20; - } - - if (pbdev->g_iota) { - fib.fc |= 0x10; - } - +out: if (s390_cpu_virt_mem_write(cpu, fiba, ar, (uint8_t *)&fib, sizeof(fib))) { return 0; } diff --git a/hw/s390x/s390-pci-inst.h b/hw/s390x/s390-pci-inst.h index b084f2346b..e1c2ee1529 100644 --- a/hw/s390x/s390-pci-inst.h +++ b/hw/s390x/s390-pci-inst.h @@ -104,7 +104,7 @@ typedef struct ClpRspListPci { uint64_t resume_token; uint32_t mdd; uint16_t max_fn; - uint8_t reserved2; + uint8_t flags; uint8_t entry_size; ClpFhListEntry fh_list[CLP_FH_LIST_NR_ENTRIES]; } QEMU_PACKED ClpRspListPci; @@ -249,6 +249,11 @@ typedef struct ClpReqRspQueryPciGrp { #define ZPCI_MOD_FC_RESET_BLOCK 9 #define ZPCI_MOD_FC_SET_MEASURE 10 +/* Store PCI Function Controls status codes */ +#define ZPCI_STPCIFC_ST_PERM_ERROR 8 +#define ZPCI_STPCIFC_ST_INVAL_DMAAS 28 +#define ZPCI_STPCIFC_ST_ERROR_RECOVER 40 + /* FIB function controls */ #define ZPCI_FIB_FC_ENABLED 0x80 #define ZPCI_FIB_FC_ERROR 0x40 diff --git a/hw/s390x/s390-virtio-ccw.c b/hw/s390x/s390-virtio-ccw.c index 52f079a884..caf0a682a7 100644 --- a/hw/s390x/s390-virtio-ccw.c +++ b/hw/s390x/s390-virtio-ccw.c @@ -27,9 +27,10 @@ #include "hw/compat.h" #include "ipl.h" #include "hw/s390x/s390-virtio-ccw.h" +#include "hw/s390x/css-bridge.h" static const char *const reset_dev_types[] = { - "virtual-css-bridge", + TYPE_VIRTUAL_CSS_BRIDGE, "s390-sclp-event-facility", "s390-flic", "diag288", diff --git a/hw/s390x/virtio-ccw.c b/hw/s390x/virtio-ccw.c index 8b709e362e..a554a24d06 100644 --- a/hw/s390x/virtio-ccw.c +++ b/hw/s390x/virtio-ccw.c @@ -33,31 +33,11 @@ #include "hw/s390x/css.h" #include "virtio-ccw.h" #include "trace.h" +#include "hw/s390x/css-bridge.h" static void virtio_ccw_bus_new(VirtioBusState *bus, size_t bus_size, VirtioCcwDevice *dev); -static void virtual_css_bus_reset(BusState *qbus) -{ - /* This should actually be modelled via the generic css */ - css_reset(); -} - - -static void virtual_css_bus_class_init(ObjectClass *klass, void *data) -{ - BusClass *k = BUS_CLASS(klass); - - k->reset = virtual_css_bus_reset; -} - -static const TypeInfo virtual_css_bus_info = { - .name = TYPE_VIRTUAL_CSS_BUS, - .parent = TYPE_BUS, - .instance_size = sizeof(VirtualCssBus), - .class_init = virtual_css_bus_class_init, -}; - VirtIODevice *virtio_ccw_get_vdev(SubchDev *sch) { VirtIODevice *vdev = NULL; @@ -117,32 +97,13 @@ static int virtio_ccw_ioeventfd_assign(DeviceState *d, EventNotifier *notifier, int n, bool assign) { VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); - SubchDev *sch = dev->sch; + CcwDevice *ccw_dev = CCW_DEVICE(dev); + SubchDev *sch = ccw_dev->sch; uint32_t sch_id = (css_build_subchannel_id(sch) << 16) | sch->schid; return s390_assign_subch_ioeventfd(notifier, sch_id, n, assign); } -VirtualCssBus *virtual_css_bus_init(void) -{ - VirtualCssBus *cbus; - BusState *bus; - DeviceState *dev; - - /* Create bridge device */ - dev = qdev_create(NULL, "virtual-css-bridge"); - qdev_init_nofail(dev); - - /* Create bus on bridge device */ - bus = qbus_create(TYPE_VIRTUAL_CSS_BUS, dev, "virtual-css"); - cbus = VIRTUAL_CSS_BUS(bus); - - /* Enable hotplugging */ - qbus_set_hotplug_handler(bus, dev, &error_abort); - - return cbus; -} - /* Communication blocks used by several channel commands. */ typedef struct VqInfoBlockLegacy { uint64_t queue; @@ -234,6 +195,8 @@ static int virtio_ccw_set_vqs(SubchDev *sch, VqInfoBlock *info, static void virtio_ccw_reset_virtio(VirtioCcwDevice *dev, VirtIODevice *vdev) { + CcwDevice *ccw_dev = CCW_DEVICE(dev); + virtio_ccw_stop_ioeventfd(dev); virtio_reset(vdev); if (dev->indicators) { @@ -248,7 +211,7 @@ static void virtio_ccw_reset_virtio(VirtioCcwDevice *dev, VirtIODevice *vdev) release_indicator(&dev->routes.adapter, dev->summary_indicator); dev->summary_indicator = NULL; } - dev->sch->thinint_active = false; + ccw_dev->sch->thinint_active = false; } static int virtio_ccw_handle_set_vq(SubchDev *sch, CCW1 ccw, bool check_len, @@ -703,116 +666,28 @@ static void virtio_sch_disable_cb(SubchDev *sch) static void virtio_ccw_device_realize(VirtioCcwDevice *dev, Error **errp) { - unsigned int schid; - bool found = false; - SubchDev *sch; - Error *err = NULL; VirtIOCCWDeviceClass *k = VIRTIO_CCW_DEVICE_GET_CLASS(dev); + CcwDevice *ccw_dev = CCW_DEVICE(dev); + SubchDev *sch = css_create_virtual_sch(ccw_dev->bus_id, errp); + Error *err = NULL; - sch = g_malloc0(sizeof(SubchDev)); - - sch->driver_data = dev; - dev->sch = sch; - - dev->indicators = NULL; - - /* Initialize subchannel structure. */ - sch->channel_prog = 0x0; - sch->last_cmd_valid = false; - sch->thinint_active = false; - /* - * Use a device number if provided. Otherwise, fall back to subchannel - * number. - */ - if (dev->bus_id.valid) { - /* Enforce use of virtual cssid. */ - if (dev->bus_id.cssid != VIRTUAL_CSSID) { - error_setg(errp, "cssid %x not valid for virtio devices", - dev->bus_id.cssid); - goto out_err; - } - if (css_devno_used(dev->bus_id.cssid, dev->bus_id.ssid, - dev->bus_id.devid)) { - error_setg(errp, "Device %x.%x.%04x already exists", - dev->bus_id.cssid, dev->bus_id.ssid, - dev->bus_id.devid); - goto out_err; - } - sch->cssid = dev->bus_id.cssid; - sch->ssid = dev->bus_id.ssid; - sch->devno = dev->bus_id.devid; - - /* Find the next free id. */ - for (schid = 0; schid <= MAX_SCHID; schid++) { - if (!css_find_subch(1, sch->cssid, sch->ssid, schid)) { - sch->schid = schid; - css_subch_assign(sch->cssid, sch->ssid, sch->schid, - sch->devno, sch); - found = true; - break; - } - } - if (!found) { - error_setg(errp, "No free subchannel found for %x.%x.%04x", - sch->cssid, sch->ssid, sch->devno); - goto out_err; - } - trace_virtio_ccw_new_device(sch->cssid, sch->ssid, sch->schid, - sch->devno, "user-configured"); - } else { - unsigned int cssid = VIRTUAL_CSSID, ssid, devno; - - for (ssid = 0; ssid <= MAX_SSID; ssid++) { - for (schid = 0; schid <= MAX_SCHID; schid++) { - if (!css_find_subch(1, cssid, ssid, schid)) { - sch->cssid = cssid; - sch->ssid = ssid; - sch->schid = schid; - devno = schid; - /* - * If the devno is already taken, look further in this - * subchannel set. - */ - while (css_devno_used(cssid, ssid, devno)) { - if (devno == MAX_SCHID) { - devno = 0; - } else if (devno == schid - 1) { - error_setg(errp, "No free devno found"); - goto out_err; - } else { - devno++; - } - } - sch->devno = devno; - css_subch_assign(cssid, ssid, schid, devno, sch); - found = true; - break; - } - } - if (found) { - break; - } - } - if (!found) { - error_setg(errp, "Virtual channel subsystem is full!"); - goto out_err; - } - trace_virtio_ccw_new_device(cssid, ssid, schid, devno, - "auto-configured"); + if (!sch) { + return; } - /* Build initial schib. */ - css_sch_build_virtual_schib(sch, 0, VIRTIO_CCW_CHPID_TYPE); - + sch->driver_data = dev; sch->ccw_cb = virtio_ccw_cb; sch->disable_cb = virtio_sch_disable_cb; - - /* Build senseid data. */ - memset(&sch->id, 0, sizeof(SenseId)); sch->id.reserved = 0xff; sch->id.cu_type = VIRTIO_CCW_CU_TYPE; - + ccw_dev->sch = sch; + dev->indicators = NULL; dev->revision = -1; + css_sch_build_virtual_schib(sch, 0, VIRTIO_CCW_CHPID_TYPE); + + trace_virtio_ccw_new_device( + sch->cssid, sch->ssid, sch->schid, sch->devno, + ccw_dev->bus_id.valid ? "user-configured" : "auto-configured"); if (k->realize) { k->realize(dev, &err); @@ -820,19 +695,15 @@ static void virtio_ccw_device_realize(VirtioCcwDevice *dev, Error **errp) if (err) { error_propagate(errp, err); css_subch_assign(sch->cssid, sch->ssid, sch->schid, sch->devno, NULL); - goto out_err; + ccw_dev->sch = NULL; + g_free(sch); } - - return; - -out_err: - dev->sch = NULL; - g_free(sch); } static int virtio_ccw_exit(VirtioCcwDevice *dev) { - SubchDev *sch = dev->sch; + CcwDevice *ccw_dev = CCW_DEVICE(dev); + SubchDev *sch = ccw_dev->sch; if (sch) { css_subch_assign(sch->cssid, sch->ssid, sch->schid, sch->devno, NULL); @@ -1013,7 +884,9 @@ static void virtio_ccw_rng_realize(VirtioCcwDevice *ccw_dev, Error **errp) */ static inline VirtioCcwDevice *to_virtio_ccw_dev_fast(DeviceState *d) { - return container_of(d, VirtioCcwDevice, parent_obj); + CcwDevice *ccw_dev = to_ccw_dev_fast(d); + + return container_of(ccw_dev, VirtioCcwDevice, parent_obj); } static uint8_t virtio_set_ind_atomic(SubchDev *sch, uint64_t ind_loc, @@ -1042,7 +915,8 @@ static uint8_t virtio_set_ind_atomic(SubchDev *sch, uint64_t ind_loc, static void virtio_ccw_notify(DeviceState *d, uint16_t vector) { VirtioCcwDevice *dev = to_virtio_ccw_dev_fast(d); - SubchDev *sch = dev->sch; + CcwDevice *ccw_dev = to_ccw_dev_fast(d); + SubchDev *sch = ccw_dev->sch; uint64_t indicators; /* queue indicators + secondary indicators */ @@ -1100,9 +974,10 @@ static void virtio_ccw_reset(DeviceState *d) { VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); VirtIODevice *vdev = virtio_bus_get_device(&dev->bus); + CcwDevice *ccw_dev = CCW_DEVICE(d); virtio_ccw_reset_virtio(dev, vdev); - css_reset_sch(dev->sch); + css_reset_sch(ccw_dev->sch); } static void virtio_ccw_vmstate_change(DeviceState *d, bool running) @@ -1118,7 +993,7 @@ static void virtio_ccw_vmstate_change(DeviceState *d, bool running) static bool virtio_ccw_query_guest_notifiers(DeviceState *d) { - VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); + CcwDevice *dev = CCW_DEVICE(d); return !!(dev->sch->curr_status.pmcw.flags & PMCW_FLAGS_MASK_ENA); } @@ -1126,8 +1001,9 @@ static bool virtio_ccw_query_guest_notifiers(DeviceState *d) static int virtio_ccw_get_mappings(VirtioCcwDevice *dev) { int r; + CcwDevice *ccw_dev = CCW_DEVICE(dev); - if (!dev->sch->thinint_active) { + if (!ccw_dev->sch->thinint_active) { return -EINVAL; } @@ -1249,7 +1125,8 @@ static int virtio_ccw_set_guest_notifiers(DeviceState *d, int nvqs, { VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); VirtIODevice *vdev = virtio_bus_get_device(&dev->bus); - bool with_irqfd = dev->sch->thinint_active && kvm_irqfds_enabled(); + CcwDevice *ccw_dev = CCW_DEVICE(d); + bool with_irqfd = ccw_dev->sch->thinint_active && kvm_irqfds_enabled(); int r, n; if (with_irqfd && assigned) { @@ -1308,7 +1185,8 @@ static int virtio_ccw_load_queue(DeviceState *d, int n, QEMUFile *f) static void virtio_ccw_save_config(DeviceState *d, QEMUFile *f) { VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); - SubchDev *s = dev->sch; + CcwDevice *ccw_dev = CCW_DEVICE(d); + SubchDev *s = ccw_dev->sch; VirtIODevice *vdev = virtio_ccw_get_vdev(s); subch_device_save(s, f); @@ -1342,7 +1220,8 @@ static void virtio_ccw_save_config(DeviceState *d, QEMUFile *f) static int virtio_ccw_load_config(DeviceState *d, QEMUFile *f) { VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); - SubchDev *s = dev->sch; + CcwDevice *ccw_dev = CCW_DEVICE(d); + SubchDev *s = ccw_dev->sch; VirtIODevice *vdev = virtio_ccw_get_vdev(s); int len; @@ -1387,7 +1266,8 @@ static void virtio_ccw_device_plugged(DeviceState *d, Error **errp) { VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); VirtIODevice *vdev = virtio_bus_get_device(&dev->bus); - SubchDev *sch = dev->sch; + CcwDevice *ccw_dev = CCW_DEVICE(d); + SubchDev *sch = ccw_dev->sch; int n = virtio_get_num_queues(vdev); if (virtio_get_num_queues(vdev) > VIRTIO_CCW_QUEUE_MAX) { @@ -1431,7 +1311,7 @@ static void virtio_ccw_device_unplugged(DeviceState *d) /**************** Virtio-ccw Bus Device Descriptions *******************/ static Property virtio_ccw_net_properties[] = { - DEFINE_PROP_CSS_DEV_ID("devno", VirtioCcwDevice, bus_id), + DEFINE_PROP_CSS_DEV_ID("devno", VirtioCcwDevice, parent_obj.bus_id), DEFINE_PROP_BIT("ioeventfd", VirtioCcwDevice, flags, VIRTIO_CCW_FLAG_USE_IOEVENTFD_BIT, true), DEFINE_PROP_UINT32("max_revision", VirtioCcwDevice, max_rev, @@ -1460,7 +1340,7 @@ static const TypeInfo virtio_ccw_net = { }; static Property virtio_ccw_blk_properties[] = { - DEFINE_PROP_CSS_DEV_ID("devno", VirtioCcwDevice, bus_id), + DEFINE_PROP_CSS_DEV_ID("devno", VirtioCcwDevice, parent_obj.bus_id), DEFINE_PROP_BIT("ioeventfd", VirtioCcwDevice, flags, VIRTIO_CCW_FLAG_USE_IOEVENTFD_BIT, true), DEFINE_PROP_UINT32("max_revision", VirtioCcwDevice, max_rev, @@ -1489,7 +1369,7 @@ static const TypeInfo virtio_ccw_blk = { }; static Property virtio_ccw_serial_properties[] = { - DEFINE_PROP_CSS_DEV_ID("devno", VirtioCcwDevice, bus_id), + DEFINE_PROP_CSS_DEV_ID("devno", VirtioCcwDevice, parent_obj.bus_id), DEFINE_PROP_BIT("ioeventfd", VirtioCcwDevice, flags, VIRTIO_CCW_FLAG_USE_IOEVENTFD_BIT, true), DEFINE_PROP_UINT32("max_revision", VirtioCcwDevice, max_rev, @@ -1518,7 +1398,7 @@ static const TypeInfo virtio_ccw_serial = { }; static Property virtio_ccw_balloon_properties[] = { - DEFINE_PROP_CSS_DEV_ID("devno", VirtioCcwDevice, bus_id), + DEFINE_PROP_CSS_DEV_ID("devno", VirtioCcwDevice, parent_obj.bus_id), DEFINE_PROP_BIT("ioeventfd", VirtioCcwDevice, flags, VIRTIO_CCW_FLAG_USE_IOEVENTFD_BIT, true), DEFINE_PROP_UINT32("max_revision", VirtioCcwDevice, max_rev, @@ -1547,7 +1427,7 @@ static const TypeInfo virtio_ccw_balloon = { }; static Property virtio_ccw_scsi_properties[] = { - DEFINE_PROP_CSS_DEV_ID("devno", VirtioCcwDevice, bus_id), + DEFINE_PROP_CSS_DEV_ID("devno", VirtioCcwDevice, parent_obj.bus_id), DEFINE_PROP_BIT("ioeventfd", VirtioCcwDevice, flags, VIRTIO_CCW_FLAG_USE_IOEVENTFD_BIT, true), DEFINE_PROP_UINT32("max_revision", VirtioCcwDevice, max_rev, @@ -1577,7 +1457,7 @@ static const TypeInfo virtio_ccw_scsi = { #ifdef CONFIG_VHOST_SCSI static Property vhost_ccw_scsi_properties[] = { - DEFINE_PROP_CSS_DEV_ID("devno", VirtioCcwDevice, bus_id), + DEFINE_PROP_CSS_DEV_ID("devno", VirtioCcwDevice, parent_obj.bus_id), DEFINE_PROP_UINT32("max_revision", VirtioCcwDevice, max_rev, VIRTIO_CCW_MAX_REV), DEFINE_PROP_END_OF_LIST(), @@ -1615,7 +1495,7 @@ static void virtio_ccw_rng_instance_init(Object *obj) } static Property virtio_ccw_rng_properties[] = { - DEFINE_PROP_CSS_DEV_ID("devno", VirtioCcwDevice, bus_id), + DEFINE_PROP_CSS_DEV_ID("devno", VirtioCcwDevice, parent_obj.bus_id), DEFINE_PROP_BIT("ioeventfd", VirtioCcwDevice, flags, VIRTIO_CCW_FLAG_USE_IOEVENTFD_BIT, true), DEFINE_PROP_UINT32("max_revision", VirtioCcwDevice, max_rev, @@ -1662,29 +1542,17 @@ static int virtio_ccw_busdev_exit(DeviceState *dev) static void virtio_ccw_busdev_unplug(HotplugHandler *hotplug_dev, DeviceState *dev, Error **errp) { - VirtioCcwDevice *_dev = (VirtioCcwDevice *)dev; - SubchDev *sch = _dev->sch; + VirtioCcwDevice *_dev = to_virtio_ccw_dev_fast(dev); virtio_ccw_stop_ioeventfd(_dev); - - /* - * We should arrive here only for device_del, since we don't support - * direct hot(un)plug of channels, but only through virtio. - */ - assert(sch != NULL); - /* Subchannel is now disabled and no longer valid. */ - sch->curr_status.pmcw.flags &= ~(PMCW_FLAGS_MASK_ENA | - PMCW_FLAGS_MASK_DNV); - - css_generate_sch_crws(sch->cssid, sch->ssid, sch->schid, 1, 0); - - object_unparent(OBJECT(dev)); } static void virtio_ccw_device_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); + CCWDeviceClass *k = CCW_DEVICE_CLASS(dc); + k->unplug = virtio_ccw_busdev_unplug; dc->realize = virtio_ccw_busdev_realize; dc->exit = virtio_ccw_busdev_exit; dc->bus_type = TYPE_VIRTUAL_CSS_BUS; @@ -1692,44 +1560,13 @@ static void virtio_ccw_device_class_init(ObjectClass *klass, void *data) static const TypeInfo virtio_ccw_device_info = { .name = TYPE_VIRTIO_CCW_DEVICE, - .parent = TYPE_DEVICE, + .parent = TYPE_CCW_DEVICE, .instance_size = sizeof(VirtioCcwDevice), .class_init = virtio_ccw_device_class_init, .class_size = sizeof(VirtIOCCWDeviceClass), .abstract = true, }; -/***************** Virtual-css Bus Bridge Device ********************/ -/* Only required to have the virtio bus as child in the system bus */ - -static int virtual_css_bridge_init(SysBusDevice *dev) -{ - /* nothing */ - return 0; -} - -static void virtual_css_bridge_class_init(ObjectClass *klass, void *data) -{ - SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass); - HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass); - DeviceClass *dc = DEVICE_CLASS(klass); - - k->init = virtual_css_bridge_init; - hc->unplug = virtio_ccw_busdev_unplug; - set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); -} - -static const TypeInfo virtual_css_bridge_info = { - .name = "virtual-css-bridge", - .parent = TYPE_SYS_BUS_DEVICE, - .instance_size = sizeof(SysBusDevice), - .class_init = virtual_css_bridge_class_init, - .interfaces = (InterfaceInfo[]) { - { TYPE_HOTPLUG_HANDLER }, - { } - } -}; - /* virtio-ccw-bus */ static void virtio_ccw_bus_new(VirtioBusState *bus, size_t bus_size, @@ -1775,7 +1612,7 @@ static const TypeInfo virtio_ccw_bus_info = { #ifdef CONFIG_VIRTFS static Property virtio_ccw_9p_properties[] = { - DEFINE_PROP_CSS_DEV_ID("devno", VirtioCcwDevice, bus_id), + DEFINE_PROP_CSS_DEV_ID("devno", VirtioCcwDevice, parent_obj.bus_id), DEFINE_PROP_BIT("ioeventfd", VirtioCcwDevice, flags, VIRTIO_CCW_FLAG_USE_IOEVENTFD_BIT, true), DEFINE_PROP_UINT32("max_revision", VirtioCcwDevice, max_rev, @@ -1824,7 +1661,6 @@ static const TypeInfo virtio_ccw_9p_info = { static void virtio_ccw_register(void) { type_register_static(&virtio_ccw_bus_info); - type_register_static(&virtual_css_bus_info); type_register_static(&virtio_ccw_device_info); type_register_static(&virtio_ccw_serial); type_register_static(&virtio_ccw_blk); @@ -1835,7 +1671,6 @@ static void virtio_ccw_register(void) type_register_static(&vhost_ccw_scsi); #endif type_register_static(&virtio_ccw_rng); - type_register_static(&virtual_css_bridge_info); #ifdef CONFIG_VIRTFS type_register_static(&virtio_ccw_9p_info); #endif diff --git a/hw/s390x/virtio-ccw.h b/hw/s390x/virtio-ccw.h index 0bfb5d93c6..3f76443fba 100644 --- a/hw/s390x/virtio-ccw.h +++ b/hw/s390x/virtio-ccw.h @@ -26,8 +26,8 @@ #include <hw/s390x/s390_flic.h> #include <hw/s390x/css.h> - -#define VIRTUAL_CSSID 0xfe +#include "ccw-device.h" +#include "hw/s390x/css-bridge.h" #define VIRTIO_CCW_CU_TYPE 0x3832 #define VIRTIO_CCW_CHPID_TYPE 0x32 @@ -67,7 +67,7 @@ typedef struct VirtioBusClass VirtioCcwBusClass; typedef struct VirtioCcwDevice VirtioCcwDevice; typedef struct VirtIOCCWDeviceClass { - DeviceClass parent_class; + CCWDeviceClass parent_class; void (*realize)(VirtioCcwDevice *dev, Error **errp); int (*exit)(VirtioCcwDevice *dev); } VirtIOCCWDeviceClass; @@ -78,9 +78,7 @@ typedef struct VirtIOCCWDeviceClass { #define VIRTIO_CCW_FLAG_USE_IOEVENTFD (1 << VIRTIO_CCW_FLAG_USE_IOEVENTFD_BIT) struct VirtioCcwDevice { - DeviceState parent_obj; - SubchDev *sch; - CssDevId bus_id; + CcwDevice parent_obj; int revision; uint32_t max_rev; VirtioBusState bus; @@ -103,15 +101,6 @@ static inline int virtio_ccw_rev_max(VirtioCcwDevice *dev) return dev->max_rev; } -/* virtual css bus type */ -typedef struct VirtualCssBus { - BusState parent_obj; -} VirtualCssBus; - -#define TYPE_VIRTUAL_CSS_BUS "virtual-css-bus" -#define VIRTUAL_CSS_BUS(obj) \ - OBJECT_CHECK(VirtualCssBus, (obj), TYPE_VIRTUAL_CSS_BUS) - /* virtio-scsi-ccw */ #define TYPE_VIRTIO_SCSI_CCW "virtio-scsi-ccw" @@ -191,7 +180,6 @@ typedef struct VirtIORNGCcw { VirtIORNG vdev; } VirtIORNGCcw; -VirtualCssBus *virtual_css_bus_init(void); void virtio_ccw_device_update_status(SubchDev *sch); VirtIODevice *virtio_ccw_get_vdev(SubchDev *sch); diff --git a/hw/scsi/megasas.c b/hw/scsi/megasas.c index d1772183cf..52a41239cf 100644 --- a/hw/scsi/megasas.c +++ b/hw/scsi/megasas.c @@ -29,7 +29,7 @@ #include "hw/scsi/scsi.h" #include "block/scsi.h" #include "trace.h" - +#include "qapi/error.h" #include "mfi.h" #define MEGASAS_VERSION_GEN1 "1.70" @@ -48,11 +48,7 @@ #define MEGASAS_FLAG_USE_JBOD 0 #define MEGASAS_MASK_USE_JBOD (1 << MEGASAS_FLAG_USE_JBOD) -#define MEGASAS_FLAG_USE_MSI 1 -#define MEGASAS_MASK_USE_MSI (1 << MEGASAS_FLAG_USE_MSI) -#define MEGASAS_FLAG_USE_MSIX 2 -#define MEGASAS_MASK_USE_MSIX (1 << MEGASAS_FLAG_USE_MSIX) -#define MEGASAS_FLAG_USE_QUEUE64 3 +#define MEGASAS_FLAG_USE_QUEUE64 1 #define MEGASAS_MASK_USE_QUEUE64 (1 << MEGASAS_FLAG_USE_QUEUE64) static const char *mfi_frame_desc[] = { @@ -96,6 +92,8 @@ typedef struct MegasasState { int busy; int diag; int adp_reset; + OnOffAuto msi; + OnOffAuto msix; MegasasCmd *event_cmd; int event_locale; @@ -157,14 +155,9 @@ static bool megasas_use_queue64(MegasasState *s) return s->flags & MEGASAS_MASK_USE_QUEUE64; } -static bool megasas_use_msi(MegasasState *s) -{ - return s->flags & MEGASAS_MASK_USE_MSI; -} - static bool megasas_use_msix(MegasasState *s) { - return s->flags & MEGASAS_MASK_USE_MSIX; + return s->msix != ON_OFF_AUTO_OFF; } static bool megasas_is_jbod(MegasasState *s) @@ -2309,9 +2302,7 @@ static void megasas_scsi_uninit(PCIDevice *d) if (megasas_use_msix(s)) { msix_uninit(d, &s->mmio_io, &s->mmio_io); } - if (megasas_use_msi(s)) { - msi_uninit(d); - } + msi_uninit(d); } static const struct SCSIBusInfo megasas_scsi_info = { @@ -2332,6 +2323,8 @@ static void megasas_scsi_realize(PCIDevice *dev, Error **errp) MegasasBaseClass *b = MEGASAS_DEVICE_GET_CLASS(s); uint8_t *pci_conf; int i, bar_type; + Error *err = NULL; + int ret; pci_conf = dev->config; @@ -2340,6 +2333,24 @@ static void megasas_scsi_realize(PCIDevice *dev, Error **errp) /* Interrupt pin 1 */ pci_conf[PCI_INTERRUPT_PIN] = 0x01; + if (s->msi != ON_OFF_AUTO_OFF) { + ret = msi_init(dev, 0x50, 1, true, false, &err); + /* Any error other than -ENOTSUP(board's MSI support is broken) + * is a programming error */ + assert(!ret || ret == -ENOTSUP); + if (ret && s->msi == ON_OFF_AUTO_ON) { + /* Can't satisfy user's explicit msi=on request, fail */ + error_append_hint(&err, "You have to use msi=auto (default) or " + "msi=off with this machine type.\n"); + error_propagate(errp, err); + return; + } else if (ret) { + /* With msi=auto, we fall back to MSI off silently */ + s->msi = ON_OFF_AUTO_OFF; + error_free(err); + } + } + memory_region_init_io(&s->mmio_io, OBJECT(s), &megasas_mmio_ops, s, "megasas-mmio", 0x4000); memory_region_init_io(&s->port_io, OBJECT(s), &megasas_port_ops, s, @@ -2347,14 +2358,10 @@ static void megasas_scsi_realize(PCIDevice *dev, Error **errp) memory_region_init_io(&s->queue_io, OBJECT(s), &megasas_queue_ops, s, "megasas-queue", 0x40000); - if (megasas_use_msi(s) && - msi_init(dev, 0x50, 1, true, false)) { - s->flags &= ~MEGASAS_MASK_USE_MSI; - } if (megasas_use_msix(s) && msix_init(dev, 15, &s->mmio_io, b->mmio_bar, 0x2000, &s->mmio_io, b->mmio_bar, 0x3800, 0x68)) { - s->flags &= ~MEGASAS_MASK_USE_MSIX; + s->msix = ON_OFF_AUTO_OFF; } if (pci_is_express(dev)) { pcie_endpoint_cap_init(dev, 0xa0); @@ -2422,10 +2429,8 @@ static Property megasas_properties_gen1[] = { MEGASAS_DEFAULT_FRAMES), DEFINE_PROP_STRING("hba_serial", MegasasState, hba_serial), DEFINE_PROP_UINT64("sas_address", MegasasState, sas_addr, 0), - DEFINE_PROP_BIT("use_msi", MegasasState, flags, - MEGASAS_FLAG_USE_MSI, false), - DEFINE_PROP_BIT("use_msix", MegasasState, flags, - MEGASAS_FLAG_USE_MSIX, false), + DEFINE_PROP_ON_OFF_AUTO("msi", MegasasState, msi, ON_OFF_AUTO_AUTO), + DEFINE_PROP_ON_OFF_AUTO("msix", MegasasState, msix, ON_OFF_AUTO_AUTO), DEFINE_PROP_BIT("use_jbod", MegasasState, flags, MEGASAS_FLAG_USE_JBOD, false), DEFINE_PROP_END_OF_LIST(), @@ -2438,10 +2443,8 @@ static Property megasas_properties_gen2[] = { MEGASAS_GEN2_DEFAULT_FRAMES), DEFINE_PROP_STRING("hba_serial", MegasasState, hba_serial), DEFINE_PROP_UINT64("sas_address", MegasasState, sas_addr, 0), - DEFINE_PROP_BIT("use_msi", MegasasState, flags, - MEGASAS_FLAG_USE_MSI, true), - DEFINE_PROP_BIT("use_msix", MegasasState, flags, - MEGASAS_FLAG_USE_MSIX, true), + DEFINE_PROP_ON_OFF_AUTO("msi", MegasasState, msi, ON_OFF_AUTO_AUTO), + DEFINE_PROP_ON_OFF_AUTO("msix", MegasasState, msix, ON_OFF_AUTO_AUTO), DEFINE_PROP_BIT("use_jbod", MegasasState, flags, MEGASAS_FLAG_USE_JBOD, false), DEFINE_PROP_END_OF_LIST(), diff --git a/hw/scsi/mptsas.c b/hw/scsi/mptsas.c index be88e161a9..1ae32fb7d3 100644 --- a/hw/scsi/mptsas.c +++ b/hw/scsi/mptsas.c @@ -32,7 +32,7 @@ #include "hw/scsi/scsi.h" #include "block/scsi.h" #include "trace.h" - +#include "qapi/error.h" #include "mptsas.h" #include "mpi.h" @@ -63,7 +63,7 @@ static void mptsas_update_interrupt(MPTSASState *s) PCIDevice *pci = (PCIDevice *) s; uint32_t state = s->intr_status & ~(s->intr_mask | MPI_HIS_IOP_DOORBELL_STATUS); - if (s->msi_in_use && msi_enabled(pci)) { + if (msi_enabled(pci)) { if (state) { trace_mptsas_irq_msi(s); msi_notify(pci, 0); @@ -1273,10 +1273,30 @@ static void mptsas_scsi_init(PCIDevice *dev, Error **errp) { DeviceState *d = DEVICE(dev); MPTSASState *s = MPT_SAS(dev); + Error *err = NULL; + int ret; dev->config[PCI_LATENCY_TIMER] = 0; dev->config[PCI_INTERRUPT_PIN] = 0x01; + if (s->msi != ON_OFF_AUTO_OFF) { + ret = msi_init(dev, 0, 1, true, false, &err); + /* Any error other than -ENOTSUP(board's MSI support is broken) + * is a programming error */ + assert(!ret || ret == -ENOTSUP); + if (ret && s->msi == ON_OFF_AUTO_ON) { + /* Can't satisfy user's explicit msi=on request, fail */ + error_append_hint(&err, "You have to use msi=auto (default) or " + "msi=off with this machine type.\n"); + error_propagate(errp, err); + return; + } + assert(!err || s->msi == ON_OFF_AUTO_AUTO); + /* With msi=auto, we fall back to MSI off silently */ + error_free(err); + + } + memory_region_init_io(&s->mmio_io, OBJECT(s), &mptsas_mmio_ops, s, "mptsas-mmio", 0x4000); memory_region_init_io(&s->port_io, OBJECT(s), &mptsas_port_ops, s, @@ -1284,11 +1304,6 @@ static void mptsas_scsi_init(PCIDevice *dev, Error **errp) memory_region_init_io(&s->diag_io, OBJECT(s), &mptsas_diag_ops, s, "mptsas-diag", 0x10000); - if (s->msi_available && - msi_init(dev, 0, 1, true, false) >= 0) { - s->msi_in_use = true; - } - pci_register_bar(dev, 0, PCI_BASE_ADDRESS_SPACE_IO, &s->port_io); pci_register_bar(dev, 1, PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_32, &s->mmio_io); @@ -1319,9 +1334,7 @@ static void mptsas_scsi_uninit(PCIDevice *dev) MPTSASState *s = MPT_SAS(dev); qemu_bh_delete(s->request_bh); - if (s->msi_in_use) { - msi_uninit(dev); - } + msi_uninit(dev); } static void mptsas_reset(DeviceState *dev) @@ -1357,7 +1370,6 @@ static const VMStateDescription vmstate_mptsas = { .post_load = mptsas_post_load, .fields = (VMStateField[]) { VMSTATE_PCI_DEVICE(dev, MPTSASState), - VMSTATE_BOOL(msi_in_use, MPTSASState), VMSTATE_UINT32(state, MPTSASState), VMSTATE_UINT8(who_init, MPTSASState), @@ -1403,7 +1415,7 @@ static const VMStateDescription vmstate_mptsas = { static Property mptsas_properties[] = { DEFINE_PROP_UINT64("sas_address", MPTSASState, sas_addr, 0), /* TODO: test MSI support under Windows */ - DEFINE_PROP_BIT("msi", MPTSASState, msi_available, 0, true), + DEFINE_PROP_ON_OFF_AUTO("msi", MPTSASState, msi, ON_OFF_AUTO_AUTO), DEFINE_PROP_END_OF_LIST(), }; diff --git a/hw/scsi/mptsas.h b/hw/scsi/mptsas.h index 595f81fb5b..da014a397e 100644 --- a/hw/scsi/mptsas.h +++ b/hw/scsi/mptsas.h @@ -27,11 +27,10 @@ struct MPTSASState { MemoryRegion diag_io; QEMUBH *request_bh; - uint32_t msi_available; + /* properties */ + OnOffAuto msi; uint64_t sas_addr; - bool msi_in_use; - /* Doorbell register */ uint32_t state; uint8_t who_init; diff --git a/hw/scsi/scsi-generic.c b/hw/scsi/scsi-generic.c index 6a2d89afba..7a588a7ad4 100644 --- a/hw/scsi/scsi-generic.c +++ b/hw/scsi/scsi-generic.c @@ -225,13 +225,14 @@ static void scsi_read_complete(void * opaque, int ret) if (s->type == TYPE_DISK && r->req.cmd.buf[0] == INQUIRY && r->req.cmd.buf[2] == 0xb0) { - uint32_t max_xfer_len = blk_get_max_transfer_length(s->conf.blk); - if (max_xfer_len) { - stl_be_p(&r->buf[8], max_xfer_len); - /* Also take care of the opt xfer len. */ - if (ldl_be_p(&r->buf[12]) > max_xfer_len) { - stl_be_p(&r->buf[12], max_xfer_len); - } + uint32_t max_transfer = + blk_get_max_transfer(s->conf.blk) / s->blocksize; + + assert(max_transfer); + stl_be_p(&r->buf[8], max_transfer); + /* Also take care of the opt xfer len. */ + if (ldl_be_p(&r->buf[12]) > max_transfer) { + stl_be_p(&r->buf[12], max_transfer); } } scsi_req_data(&r->req, len); diff --git a/hw/scsi/vhost-scsi.c b/hw/scsi/vhost-scsi.c index 9261d51da7..2a00f2f3c8 100644 --- a/hw/scsi/vhost-scsi.c +++ b/hw/scsi/vhost-scsi.c @@ -248,7 +248,7 @@ static void vhost_scsi_realize(DeviceState *dev, Error **errp) s->dev.backend_features = 0; ret = vhost_dev_init(&s->dev, (void *)(uintptr_t)vhostfd, - VHOST_BACKEND_TYPE_KERNEL); + VHOST_BACKEND_TYPE_KERNEL, 0); if (ret < 0) { error_setg(errp, "vhost-scsi: vhost initialization failed: %s", strerror(-ret)); diff --git a/hw/scsi/vmw_pvscsi.c b/hw/scsi/vmw_pvscsi.c index 2d7528d1dd..da71c8c8a5 100644 --- a/hw/scsi/vmw_pvscsi.c +++ b/hw/scsi/vmw_pvscsi.c @@ -121,8 +121,7 @@ typedef struct { uint8_t msg_ring_info_valid; /* Whether message ring initialized */ uint8_t use_msg; /* Whether to use message ring */ - uint8_t msi_used; /* Whether MSI support was installed successfully */ - + uint8_t msi_used; /* For migration compatibility */ PVSCSIRingInfo rings; /* Data transfer rings manager */ uint32_t resetting; /* Reset in progress */ @@ -362,7 +361,7 @@ pvscsi_update_irq_status(PVSCSIState *s) trace_pvscsi_update_irq_level(should_raise, s->reg_interrupt_enabled, s->reg_interrupt_status); - if (s->msi_used && msi_enabled(d)) { + if (msi_enabled(d)) { if (should_raise) { trace_pvscsi_update_irq_msi(); msi_notify(d, PVSCSI_VECTOR_COMPLETION); @@ -1056,22 +1055,20 @@ pvscsi_io_read(void *opaque, hwaddr addr, unsigned size) } -static bool +static void pvscsi_init_msi(PVSCSIState *s) { int res; PCIDevice *d = PCI_DEVICE(s); res = msi_init(d, PVSCSI_MSI_OFFSET(s), PVSCSI_MSIX_NUM_VECTORS, - PVSCSI_USE_64BIT, PVSCSI_PER_VECTOR_MASK); + PVSCSI_USE_64BIT, PVSCSI_PER_VECTOR_MASK, NULL); if (res < 0) { trace_pvscsi_init_msi_fail(res); s->msi_used = false; } else { s->msi_used = true; } - - return s->msi_used; } static void @@ -1079,9 +1076,7 @@ pvscsi_cleanup_msi(PVSCSIState *s) { PCIDevice *d = PCI_DEVICE(s); - if (s->msi_used) { - msi_uninit(d); - } + msi_uninit(d); } static const MemoryRegionOps pvscsi_ops = { diff --git a/hw/sd/ssi-sd.c b/hw/sd/ssi-sd.c index 075e4ed5df..3ff0886dd5 100644 --- a/hw/sd/ssi-sd.c +++ b/hw/sd/ssi-sd.c @@ -15,6 +15,7 @@ #include "sysemu/blockdev.h" #include "hw/ssi/ssi.h" #include "hw/sd/sd.h" +#include "qapi/error.h" //#define DEBUG_SSI_SD 1 @@ -249,7 +250,7 @@ static int ssi_sd_load(QEMUFile *f, void *opaque, int version_id) return 0; } -static int ssi_sd_init(SSISlave *d) +static void ssi_sd_realize(SSISlave *d, Error **errp) { DeviceState *dev = DEVICE(d); ssi_sd_state *s = FROM_SSI_SLAVE(ssi_sd_state, d); @@ -260,17 +261,17 @@ static int ssi_sd_init(SSISlave *d) dinfo = drive_get_next(IF_SD); s->sd = sd_init(dinfo ? blk_by_legacy_dinfo(dinfo) : NULL, true); if (s->sd == NULL) { - return -1; + error_setg(errp, "Device initialization failed."); + return; } register_savevm(dev, "ssi_sd", -1, 1, ssi_sd_save, ssi_sd_load, s); - return 0; } static void ssi_sd_class_init(ObjectClass *klass, void *data) { SSISlaveClass *k = SSI_SLAVE_CLASS(klass); - k->init = ssi_sd_init; + k->realize = ssi_sd_realize; k->transfer = ssi_sd_transfer; k->cs_polarity = SSI_CS_LOW; } diff --git a/hw/ssi/Makefile.objs b/hw/ssi/Makefile.objs index fcbb79ef01..c79a8dcd86 100644 --- a/hw/ssi/Makefile.objs +++ b/hw/ssi/Makefile.objs @@ -2,6 +2,7 @@ common-obj-$(CONFIG_PL022) += pl022.o common-obj-$(CONFIG_SSI) += ssi.o common-obj-$(CONFIG_XILINX_SPI) += xilinx_spi.o common-obj-$(CONFIG_XILINX_SPIPS) += xilinx_spips.o +common-obj-$(CONFIG_ASPEED_SOC) += aspeed_smc.o obj-$(CONFIG_OMAP) += omap_spi.o obj-$(CONFIG_IMX) += imx_spi.o diff --git a/hw/ssi/aspeed_smc.c b/hw/ssi/aspeed_smc.c new file mode 100644 index 0000000000..a371e302d4 --- /dev/null +++ b/hw/ssi/aspeed_smc.c @@ -0,0 +1,470 @@ +/* + * ASPEED AST2400 SMC Controller (SPI Flash Only) + * + * Copyright (C) 2016 IBM Corp. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/sysbus.h" +#include "sysemu/sysemu.h" +#include "qemu/log.h" +#include "include/qemu/error-report.h" +#include "exec/address-spaces.h" + +#include "hw/ssi/aspeed_smc.h" + +/* CE Type Setting Register */ +#define R_CONF (0x00 / 4) +#define CONF_LEGACY_DISABLE (1 << 31) +#define CONF_ENABLE_W4 20 +#define CONF_ENABLE_W3 19 +#define CONF_ENABLE_W2 18 +#define CONF_ENABLE_W1 17 +#define CONF_ENABLE_W0 16 +#define CONF_FLASH_TYPE4 9 +#define CONF_FLASH_TYPE3 7 +#define CONF_FLASH_TYPE2 5 +#define CONF_FLASH_TYPE1 3 +#define CONF_FLASH_TYPE0 1 + +/* CE Control Register */ +#define R_CE_CTRL (0x04 / 4) +#define CTRL_EXTENDED4 4 /* 32 bit addressing for SPI */ +#define CTRL_EXTENDED3 3 /* 32 bit addressing for SPI */ +#define CTRL_EXTENDED2 2 /* 32 bit addressing for SPI */ +#define CTRL_EXTENDED1 1 /* 32 bit addressing for SPI */ +#define CTRL_EXTENDED0 0 /* 32 bit addressing for SPI */ + +/* Interrupt Control and Status Register */ +#define R_INTR_CTRL (0x08 / 4) +#define INTR_CTRL_DMA_STATUS (1 << 11) +#define INTR_CTRL_CMD_ABORT_STATUS (1 << 10) +#define INTR_CTRL_WRITE_PROTECT_STATUS (1 << 9) +#define INTR_CTRL_DMA_EN (1 << 3) +#define INTR_CTRL_CMD_ABORT_EN (1 << 2) +#define INTR_CTRL_WRITE_PROTECT_EN (1 << 1) + +/* CEx Control Register */ +#define R_CTRL0 (0x10 / 4) +#define CTRL_CMD_SHIFT 16 +#define CTRL_CMD_MASK 0xff +#define CTRL_CE_STOP_ACTIVE (1 << 2) +#define CTRL_CMD_MODE_MASK 0x3 +#define CTRL_READMODE 0x0 +#define CTRL_FREADMODE 0x1 +#define CTRL_WRITEMODE 0x2 +#define CTRL_USERMODE 0x3 +#define R_CTRL1 (0x14 / 4) +#define R_CTRL2 (0x18 / 4) +#define R_CTRL3 (0x1C / 4) +#define R_CTRL4 (0x20 / 4) + +/* CEx Segment Address Register */ +#define R_SEG_ADDR0 (0x30 / 4) +#define SEG_SIZE_SHIFT 24 /* 8MB units */ +#define SEG_SIZE_MASK 0x7f +#define SEG_START_SHIFT 16 /* address bit [A29-A23] */ +#define SEG_START_MASK 0x7f +#define R_SEG_ADDR1 (0x34 / 4) +#define R_SEG_ADDR2 (0x38 / 4) +#define R_SEG_ADDR3 (0x3C / 4) +#define R_SEG_ADDR4 (0x40 / 4) + +/* Misc Control Register #1 */ +#define R_MISC_CTRL1 (0x50 / 4) + +/* Misc Control Register #2 */ +#define R_MISC_CTRL2 (0x54 / 4) + +/* DMA Control/Status Register */ +#define R_DMA_CTRL (0x80 / 4) +#define DMA_CTRL_DELAY_MASK 0xf +#define DMA_CTRL_DELAY_SHIFT 8 +#define DMA_CTRL_FREQ_MASK 0xf +#define DMA_CTRL_FREQ_SHIFT 4 +#define DMA_CTRL_MODE (1 << 3) +#define DMA_CTRL_CKSUM (1 << 2) +#define DMA_CTRL_DIR (1 << 1) +#define DMA_CTRL_EN (1 << 0) + +/* DMA Flash Side Address */ +#define R_DMA_FLASH_ADDR (0x84 / 4) + +/* DMA DRAM Side Address */ +#define R_DMA_DRAM_ADDR (0x88 / 4) + +/* DMA Length Register */ +#define R_DMA_LEN (0x8C / 4) + +/* Checksum Calculation Result */ +#define R_DMA_CHECKSUM (0x90 / 4) + +/* Misc Control Register #2 */ +#define R_TIMINGS (0x94 / 4) + +/* SPI controller registers and bits */ +#define R_SPI_CONF (0x00 / 4) +#define SPI_CONF_ENABLE_W0 0 +#define R_SPI_CTRL0 (0x4 / 4) +#define R_SPI_MISC_CTRL (0x10 / 4) +#define R_SPI_TIMINGS (0x14 / 4) + +/* + * Default segments mapping addresses and size for each slave per + * controller. These can be changed when board is initialized with the + * Segment Address Registers but they don't seem do be used on the + * field. + */ +static const AspeedSegments aspeed_segments_legacy[] = { + { 0x10000000, 32 * 1024 * 1024 }, +}; + +static const AspeedSegments aspeed_segments_fmc[] = { + { 0x20000000, 64 * 1024 * 1024 }, + { 0x24000000, 32 * 1024 * 1024 }, + { 0x26000000, 32 * 1024 * 1024 }, + { 0x28000000, 32 * 1024 * 1024 }, + { 0x2A000000, 32 * 1024 * 1024 } +}; + +static const AspeedSegments aspeed_segments_spi[] = { + { 0x30000000, 64 * 1024 * 1024 }, +}; + +static const AspeedSMCController controllers[] = { + { "aspeed.smc.smc", R_CONF, R_CE_CTRL, R_CTRL0, R_TIMINGS, + CONF_ENABLE_W0, 5, aspeed_segments_legacy, 0x6000000 }, + { "aspeed.smc.fmc", R_CONF, R_CE_CTRL, R_CTRL0, R_TIMINGS, + CONF_ENABLE_W0, 5, aspeed_segments_fmc, 0x10000000 }, + { "aspeed.smc.spi", R_SPI_CONF, 0xff, R_SPI_CTRL0, R_SPI_TIMINGS, + SPI_CONF_ENABLE_W0, 1, aspeed_segments_spi, 0x10000000 }, +}; + +static uint64_t aspeed_smc_flash_default_read(void *opaque, hwaddr addr, + unsigned size) +{ + qemu_log_mask(LOG_GUEST_ERROR, "%s: To 0x%" HWADDR_PRIx " of size %u" + PRIx64 "\n", __func__, addr, size); + return 0; +} + +static void aspeed_smc_flash_default_write(void *opaque, hwaddr addr, + uint64_t data, unsigned size) +{ + qemu_log_mask(LOG_GUEST_ERROR, "%s: To 0x%" HWADDR_PRIx " of size %u: 0x%" + PRIx64 "\n", __func__, addr, size, data); +} + +static const MemoryRegionOps aspeed_smc_flash_default_ops = { + .read = aspeed_smc_flash_default_read, + .write = aspeed_smc_flash_default_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 4, + }, +}; + +static inline int aspeed_smc_flash_mode(const AspeedSMCState *s, int cs) +{ + return s->regs[s->r_ctrl0 + cs] & CTRL_CMD_MODE_MASK; +} + +static inline bool aspeed_smc_is_usermode(const AspeedSMCState *s, int cs) +{ + return aspeed_smc_flash_mode(s, cs) == CTRL_USERMODE; +} + +static inline bool aspeed_smc_is_writable(const AspeedSMCState *s, int cs) +{ + return s->regs[s->r_conf] & (1 << (s->conf_enable_w0 + cs)); +} + +static uint64_t aspeed_smc_flash_read(void *opaque, hwaddr addr, unsigned size) +{ + AspeedSMCFlash *fl = opaque; + const AspeedSMCState *s = fl->controller; + uint64_t ret = 0; + int i; + + if (aspeed_smc_is_usermode(s, fl->id)) { + for (i = 0; i < size; i++) { + ret |= ssi_transfer(s->spi, 0x0) << (8 * i); + } + } else { + qemu_log_mask(LOG_UNIMP, "%s: usermode not implemented\n", + __func__); + ret = -1; + } + + return ret; +} + +static void aspeed_smc_flash_write(void *opaque, hwaddr addr, uint64_t data, + unsigned size) +{ + AspeedSMCFlash *fl = opaque; + const AspeedSMCState *s = fl->controller; + int i; + + if (!aspeed_smc_is_writable(s, fl->id)) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: flash is not writable at 0x%" + HWADDR_PRIx "\n", __func__, addr); + return; + } + + if (!aspeed_smc_is_usermode(s, fl->id)) { + qemu_log_mask(LOG_UNIMP, "%s: usermode not implemented\n", + __func__); + return; + } + + for (i = 0; i < size; i++) { + ssi_transfer(s->spi, (data >> (8 * i)) & 0xff); + } +} + +static const MemoryRegionOps aspeed_smc_flash_ops = { + .read = aspeed_smc_flash_read, + .write = aspeed_smc_flash_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 4, + }, +}; + +static bool aspeed_smc_is_ce_stop_active(const AspeedSMCState *s, int cs) +{ + return s->regs[s->r_ctrl0 + cs] & CTRL_CE_STOP_ACTIVE; +} + +static void aspeed_smc_update_cs(const AspeedSMCState *s) +{ + int i; + + for (i = 0; i < s->num_cs; ++i) { + qemu_set_irq(s->cs_lines[i], aspeed_smc_is_ce_stop_active(s, i)); + } +} + +static void aspeed_smc_reset(DeviceState *d) +{ + AspeedSMCState *s = ASPEED_SMC(d); + int i; + + memset(s->regs, 0, sizeof s->regs); + + /* Unselect all slaves */ + for (i = 0; i < s->num_cs; ++i) { + s->regs[s->r_ctrl0 + i] |= CTRL_CE_STOP_ACTIVE; + } + + aspeed_smc_update_cs(s); +} + +static bool aspeed_smc_is_implemented(AspeedSMCState *s, hwaddr addr) +{ + return (addr == s->r_conf || addr == s->r_timings || addr == s->r_ce_ctrl || + (addr >= s->r_ctrl0 && addr < s->r_ctrl0 + s->num_cs)); +} + +static uint64_t aspeed_smc_read(void *opaque, hwaddr addr, unsigned int size) +{ + AspeedSMCState *s = ASPEED_SMC(opaque); + + addr >>= 2; + + if (addr >= ARRAY_SIZE(s->regs)) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Out-of-bounds read at 0x%" HWADDR_PRIx "\n", + __func__, addr); + return 0; + } + + if (!aspeed_smc_is_implemented(s, addr)) { + qemu_log_mask(LOG_UNIMP, "%s: not implemented: 0x%" HWADDR_PRIx "\n", + __func__, addr); + return 0; + } + + return s->regs[addr]; +} + +static void aspeed_smc_write(void *opaque, hwaddr addr, uint64_t data, + unsigned int size) +{ + AspeedSMCState *s = ASPEED_SMC(opaque); + uint32_t value = data; + + addr >>= 2; + + if (addr >= ARRAY_SIZE(s->regs)) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Out-of-bounds write at 0x%" HWADDR_PRIx "\n", + __func__, addr); + return; + } + + if (!aspeed_smc_is_implemented(s, addr)) { + qemu_log_mask(LOG_UNIMP, "%s: not implemented: 0x%" HWADDR_PRIx "\n", + __func__, addr); + return; + } + + /* + * Not much to do apart from storing the value and set the cs + * lines if the register is a controlling one. + */ + s->regs[addr] = value; + if (addr >= s->r_ctrl0 && addr < s->r_ctrl0 + s->num_cs) { + aspeed_smc_update_cs(s); + } +} + +static const MemoryRegionOps aspeed_smc_ops = { + .read = aspeed_smc_read, + .write = aspeed_smc_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid.unaligned = true, +}; + +static void aspeed_smc_realize(DeviceState *dev, Error **errp) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + AspeedSMCState *s = ASPEED_SMC(dev); + AspeedSMCClass *mc = ASPEED_SMC_GET_CLASS(s); + int i; + char name[32]; + hwaddr offset = 0; + + s->ctrl = mc->ctrl; + + /* keep a copy under AspeedSMCState to speed up accesses */ + s->r_conf = s->ctrl->r_conf; + s->r_ce_ctrl = s->ctrl->r_ce_ctrl; + s->r_ctrl0 = s->ctrl->r_ctrl0; + s->r_timings = s->ctrl->r_timings; + s->conf_enable_w0 = s->ctrl->conf_enable_w0; + + /* Enforce some real HW limits */ + if (s->num_cs > s->ctrl->max_slaves) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: num_cs cannot exceed: %d\n", + __func__, s->ctrl->max_slaves); + s->num_cs = s->ctrl->max_slaves; + } + + s->spi = ssi_create_bus(dev, "spi"); + + /* Setup cs_lines for slaves */ + sysbus_init_irq(sbd, &s->irq); + s->cs_lines = g_new0(qemu_irq, s->num_cs); + ssi_auto_connect_slaves(dev, s->cs_lines, s->spi); + + for (i = 0; i < s->num_cs; ++i) { + sysbus_init_irq(sbd, &s->cs_lines[i]); + } + + aspeed_smc_reset(dev); + + memory_region_init_io(&s->mmio, OBJECT(s), &aspeed_smc_ops, s, + s->ctrl->name, ASPEED_SMC_R_MAX * 4); + sysbus_init_mmio(sbd, &s->mmio); + + /* + * Memory region where flash modules are remapped + */ + snprintf(name, sizeof(name), "%s.flash", s->ctrl->name); + + memory_region_init_io(&s->mmio_flash, OBJECT(s), + &aspeed_smc_flash_default_ops, s, name, + s->ctrl->mapping_window_size); + sysbus_init_mmio(sbd, &s->mmio_flash); + + s->flashes = g_new0(AspeedSMCFlash, s->num_cs); + + for (i = 0; i < s->num_cs; ++i) { + AspeedSMCFlash *fl = &s->flashes[i]; + + snprintf(name, sizeof(name), "%s.%d", s->ctrl->name, i); + + fl->id = i; + fl->controller = s; + fl->size = s->ctrl->segments[i].size; + memory_region_init_io(&fl->mmio, OBJECT(s), &aspeed_smc_flash_ops, + fl, name, fl->size); + memory_region_add_subregion(&s->mmio_flash, offset, &fl->mmio); + offset += fl->size; + } +} + +static const VMStateDescription vmstate_aspeed_smc = { + .name = "aspeed.smc", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(regs, AspeedSMCState, ASPEED_SMC_R_MAX), + VMSTATE_END_OF_LIST() + } +}; + +static Property aspeed_smc_properties[] = { + DEFINE_PROP_UINT32("num-cs", AspeedSMCState, num_cs, 1), + DEFINE_PROP_END_OF_LIST(), +}; + +static void aspeed_smc_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedSMCClass *mc = ASPEED_SMC_CLASS(klass); + + dc->realize = aspeed_smc_realize; + dc->reset = aspeed_smc_reset; + dc->props = aspeed_smc_properties; + dc->vmsd = &vmstate_aspeed_smc; + mc->ctrl = data; +} + +static const TypeInfo aspeed_smc_info = { + .name = TYPE_ASPEED_SMC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(AspeedSMCState), + .class_size = sizeof(AspeedSMCClass), + .abstract = true, +}; + +static void aspeed_smc_register_types(void) +{ + int i; + + type_register_static(&aspeed_smc_info); + for (i = 0; i < ARRAY_SIZE(controllers); ++i) { + TypeInfo ti = { + .name = controllers[i].name, + .parent = TYPE_ASPEED_SMC, + .class_init = aspeed_smc_class_init, + .class_data = (void *)&controllers[i], + }; + type_register(&ti); + } +} + +type_init(aspeed_smc_register_types) diff --git a/hw/ssi/ssi.c b/hw/ssi/ssi.c index 9791c0d947..7eaaf565fd 100644 --- a/hw/ssi/ssi.c +++ b/hw/ssi/ssi.c @@ -54,7 +54,7 @@ static uint32_t ssi_transfer_raw_default(SSISlave *dev, uint32_t val) return 0; } -static int ssi_slave_init(DeviceState *dev) +static void ssi_slave_realize(DeviceState *dev, Error **errp) { SSISlave *s = SSI_SLAVE(dev); SSISlaveClass *ssc = SSI_SLAVE_GET_CLASS(s); @@ -64,7 +64,7 @@ static int ssi_slave_init(DeviceState *dev) qdev_init_gpio_in_named(dev, ssi_cs_default, SSI_GPIO_CS, 1); } - return ssc->init(s); + ssc->realize(s, errp); } static void ssi_slave_class_init(ObjectClass *klass, void *data) @@ -72,7 +72,7 @@ static void ssi_slave_class_init(ObjectClass *klass, void *data) SSISlaveClass *ssc = SSI_SLAVE_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass); - dc->init = ssi_slave_init; + dc->realize = ssi_slave_realize; dc->bus_type = TYPE_SSI_BUS; if (!ssc->transfer_raw) { ssc->transfer_raw = ssi_transfer_raw_default; diff --git a/hw/timer/imx_gpt.c b/hw/timer/imx_gpt.c index 3c2f01ab99..82bc73cb86 100644 --- a/hw/timer/imx_gpt.c +++ b/hw/timer/imx_gpt.c @@ -14,7 +14,6 @@ #include "qemu/osdep.h" #include "hw/timer/imx_gpt.h" -#include "hw/misc/imx_ccm.h" #include "qemu/main-loop.h" #include "qemu/log.h" @@ -81,7 +80,18 @@ static const VMStateDescription vmstate_imx_timer_gpt = { } }; -static const IMXClk imx_gpt_clocks[] = { +static const IMXClk imx25_gpt_clocks[] = { + CLK_NONE, /* 000 No clock source */ + CLK_IPG, /* 001 ipg_clk, 532MHz*/ + CLK_IPG_HIGH, /* 010 ipg_clk_highfreq */ + CLK_NONE, /* 011 not defined */ + CLK_32k, /* 100 ipg_clk_32k */ + CLK_32k, /* 101 ipg_clk_32k */ + CLK_32k, /* 110 ipg_clk_32k */ + CLK_32k, /* 111 ipg_clk_32k */ +}; + +static const IMXClk imx31_gpt_clocks[] = { CLK_NONE, /* 000 No clock source */ CLK_IPG, /* 001 ipg_clk, 532MHz*/ CLK_IPG_HIGH, /* 010 ipg_clk_highfreq */ @@ -92,12 +102,23 @@ static const IMXClk imx_gpt_clocks[] = { CLK_NONE, /* 111 not defined */ }; +static const IMXClk imx6_gpt_clocks[] = { + CLK_NONE, /* 000 No clock source */ + CLK_IPG, /* 001 ipg_clk, 532MHz*/ + CLK_IPG_HIGH, /* 010 ipg_clk_highfreq */ + CLK_EXT, /* 011 External clock */ + CLK_32k, /* 100 ipg_clk_32k */ + CLK_HIGH_DIV, /* 101 reference clock / 8 */ + CLK_NONE, /* 110 not defined */ + CLK_HIGH, /* 111 reference clock */ +}; + static void imx_gpt_set_freq(IMXGPTState *s) { uint32_t clksrc = extract32(s->cr, GPT_CR_CLKSRC_SHIFT, 3); s->freq = imx_ccm_get_clock_frequency(s->ccm, - imx_gpt_clocks[clksrc]) / (1 + s->pr); + s->clocks[clksrc]) / (1 + s->pr); DPRINTF("Setting clksrc %d to frequency %d\n", clksrc, s->freq); @@ -453,16 +474,52 @@ static void imx_gpt_class_init(ObjectClass *klass, void *data) dc->desc = "i.MX general timer"; } -static const TypeInfo imx_gpt_info = { - .name = TYPE_IMX_GPT, +static void imx25_gpt_init(Object *obj) +{ + IMXGPTState *s = IMX_GPT(obj); + + s->clocks = imx25_gpt_clocks; +} + +static void imx31_gpt_init(Object *obj) +{ + IMXGPTState *s = IMX_GPT(obj); + + s->clocks = imx31_gpt_clocks; +} + +static void imx6_gpt_init(Object *obj) +{ + IMXGPTState *s = IMX_GPT(obj); + + s->clocks = imx6_gpt_clocks; +} + +static const TypeInfo imx25_gpt_info = { + .name = TYPE_IMX25_GPT, .parent = TYPE_SYS_BUS_DEVICE, .instance_size = sizeof(IMXGPTState), + .instance_init = imx25_gpt_init, .class_init = imx_gpt_class_init, }; +static const TypeInfo imx31_gpt_info = { + .name = TYPE_IMX31_GPT, + .parent = TYPE_IMX25_GPT, + .instance_init = imx31_gpt_init, +}; + +static const TypeInfo imx6_gpt_info = { + .name = TYPE_IMX6_GPT, + .parent = TYPE_IMX25_GPT, + .instance_init = imx6_gpt_init, +}; + static void imx_gpt_register_types(void) { - type_register_static(&imx_gpt_info); + type_register_static(&imx25_gpt_info); + type_register_static(&imx31_gpt_info); + type_register_static(&imx6_gpt_info); } type_init(imx_gpt_register_types) diff --git a/hw/usb/hcd-xhci.c b/hw/usb/hcd-xhci.c index 43ba61599a..976bfb0659 100644 --- a/hw/usb/hcd-xhci.c +++ b/hw/usb/hcd-xhci.c @@ -26,6 +26,7 @@ #include "hw/pci/msi.h" #include "hw/pci/msix.h" #include "trace.h" +#include "qapi/error.h" //#define DEBUG_XHCI //#define DEBUG_DATA @@ -461,6 +462,8 @@ struct XHCIState { uint32_t numslots; uint32_t flags; uint32_t max_pstreams_mask; + OnOffAuto msi; + OnOffAuto msix; /* Operational Registers */ uint32_t usbcmd; @@ -498,9 +501,7 @@ typedef struct XHCIEvRingSeg { } XHCIEvRingSeg; enum xhci_flags { - XHCI_FLAG_USE_MSI = 1, - XHCI_FLAG_USE_MSI_X, - XHCI_FLAG_SS_FIRST, + XHCI_FLAG_SS_FIRST = 1, XHCI_FLAG_FORCE_PCIE_ENDCAP, XHCI_FLAG_ENABLE_STREAMS, }; @@ -2363,6 +2364,8 @@ static TRBCCode xhci_address_slot(XHCIState *xhci, unsigned int slotid, slot->uport = uport; slot->ctx = octx; + /* Make sure device is in USB_STATE_DEFAULT state */ + usb_device_reset(dev); if (bsr) { slot_ctx[3] = SLOT_DEFAULT << SLOT_STATE_SHIFT; } else { @@ -2370,7 +2373,6 @@ static TRBCCode xhci_address_slot(XHCIState *xhci, unsigned int slotid, uint8_t buf[1]; slot_ctx[3] = (SLOT_ADDRESSED << SLOT_STATE_SHIFT) | slotid; - usb_device_reset(dev); memset(&p, 0, sizeof(p)); usb_packet_addbuf(&p, buf, sizeof(buf)); usb_packet_setup(&p, USB_TOKEN_OUT, @@ -3581,6 +3583,7 @@ static void usb_xhci_init(XHCIState *xhci) static void usb_xhci_realize(struct PCIDevice *dev, Error **errp) { int i, ret; + Error *err = NULL; XHCIState *xhci = XHCI(dev); @@ -3591,6 +3594,23 @@ static void usb_xhci_realize(struct PCIDevice *dev, Error **errp) usb_xhci_init(xhci); + if (xhci->msi != ON_OFF_AUTO_OFF) { + ret = msi_init(dev, 0x70, xhci->numintrs, true, false, &err); + /* Any error other than -ENOTSUP(board's MSI support is broken) + * is a programming error */ + assert(!ret || ret == -ENOTSUP); + if (ret && xhci->msi == ON_OFF_AUTO_ON) { + /* Can't satisfy user's explicit msi=on request, fail */ + error_append_hint(&err, "You have to use msi=auto (default) or " + "msi=off with this machine type.\n"); + error_propagate(errp, err); + return; + } + assert(!err || xhci->msi == ON_OFF_AUTO_AUTO); + /* With msi=auto, we fall back to MSI off silently */ + error_free(err); + } + if (xhci->numintrs > MAXINTRS) { xhci->numintrs = MAXINTRS; } @@ -3648,10 +3668,8 @@ static void usb_xhci_realize(struct PCIDevice *dev, Error **errp) assert(ret >= 0); } - if (xhci_get_flag(xhci, XHCI_FLAG_USE_MSI)) { - msi_init(dev, 0x70, xhci->numintrs, true, false); - } - if (xhci_get_flag(xhci, XHCI_FLAG_USE_MSI_X)) { + if (xhci->msix != ON_OFF_AUTO_OFF) { + /* TODO check for errors */ msix_init(dev, xhci->numintrs, &xhci->mem, 0, OFF_MSIX_TABLE, &xhci->mem, 0, OFF_MSIX_PBA, @@ -3872,8 +3890,8 @@ static const VMStateDescription vmstate_xhci = { }; static Property xhci_properties[] = { - DEFINE_PROP_BIT("msi", XHCIState, flags, XHCI_FLAG_USE_MSI, true), - DEFINE_PROP_BIT("msix", XHCIState, flags, XHCI_FLAG_USE_MSI_X, true), + DEFINE_PROP_ON_OFF_AUTO("msi", XHCIState, msi, ON_OFF_AUTO_AUTO), + DEFINE_PROP_ON_OFF_AUTO("msix", XHCIState, msix, ON_OFF_AUTO_AUTO), DEFINE_PROP_BIT("superspeed-ports-first", XHCIState, flags, XHCI_FLAG_SS_FIRST, true), DEFINE_PROP_BIT("force-pcie-endcap", XHCIState, flags, diff --git a/hw/usb/xen-usb.c b/hw/usb/xen-usb.c index 0fd34c62c4..7bed0cebe3 100644 --- a/hw/usb/xen-usb.c +++ b/hw/usb/xen-usb.c @@ -253,7 +253,8 @@ static int usbback_init_packet(struct usbback_req *usbback_req) case USBIF_PIPE_TYPE_CTRL: packet->parameter = *(uint64_t *)usbback_req->req.u.ctrl; - TR_REQ(xendev, "ctrl parameter: %lx, buflen: %x\n", packet->parameter, + TR_REQ(xendev, "ctrl parameter: %"PRIx64", buflen: %x\n", + packet->parameter, usbback_req->req.buffer_length); break; diff --git a/hw/vfio/Makefile.objs b/hw/vfio/Makefile.objs index ceddbb8f99..c25e32b029 100644 --- a/hw/vfio/Makefile.objs +++ b/hw/vfio/Makefile.objs @@ -4,4 +4,5 @@ obj-$(CONFIG_PCI) += pci.o pci-quirks.o obj-$(CONFIG_SOFTMMU) += platform.o obj-$(CONFIG_SOFTMMU) += calxeda-xgmac.o obj-$(CONFIG_SOFTMMU) += amd-xgbe.o +obj-$(CONFIG_SOFTMMU) += spapr.o endif diff --git a/hw/vfio/common.c b/hw/vfio/common.c index 7be638e0e3..f3c0522e7e 100644 --- a/hw/vfio/common.c +++ b/hw/vfio/common.c @@ -28,6 +28,7 @@ #include "exec/memory.h" #include "hw/hw.h" #include "qemu/error-report.h" +#include "qemu/range.h" #include "sysemu/kvm.h" #ifdef CONFIG_KVM #include "linux/kvm.h" @@ -241,6 +242,44 @@ static int vfio_dma_map(VFIOContainer *container, hwaddr iova, return -errno; } +static void vfio_host_win_add(VFIOContainer *container, + hwaddr min_iova, hwaddr max_iova, + uint64_t iova_pgsizes) +{ + VFIOHostDMAWindow *hostwin; + + QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) { + if (ranges_overlap(hostwin->min_iova, + hostwin->max_iova - hostwin->min_iova + 1, + min_iova, + max_iova - min_iova + 1)) { + hw_error("%s: Overlapped IOMMU are not enabled", __func__); + } + } + + hostwin = g_malloc0(sizeof(*hostwin)); + + hostwin->min_iova = min_iova; + hostwin->max_iova = max_iova; + hostwin->iova_pgsizes = iova_pgsizes; + QLIST_INSERT_HEAD(&container->hostwin_list, hostwin, hostwin_next); +} + +static int vfio_host_win_del(VFIOContainer *container, hwaddr min_iova, + hwaddr max_iova) +{ + VFIOHostDMAWindow *hostwin; + + QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) { + if (hostwin->min_iova == min_iova && hostwin->max_iova == max_iova) { + QLIST_REMOVE(hostwin, hostwin_next); + return 0; + } + } + + return -1; +} + static bool vfio_listener_skipped_section(MemoryRegionSection *section) { return (!memory_region_is_ram(section->mr) && @@ -329,6 +368,8 @@ static void vfio_listener_region_add(MemoryListener *listener, Int128 llend, llsize; void *vaddr; int ret; + VFIOHostDMAWindow *hostwin; + bool hostwin_found; if (vfio_listener_skipped_section(section)) { trace_vfio_listener_region_add_skip( @@ -354,7 +395,40 @@ static void vfio_listener_region_add(MemoryListener *listener, } end = int128_get64(int128_sub(llend, int128_one())); - if ((iova < container->min_iova) || (end > container->max_iova)) { + if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) { + VFIOHostDMAWindow *hostwin; + hwaddr pgsize = 0; + + /* For now intersections are not allowed, we may relax this later */ + QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) { + if (ranges_overlap(hostwin->min_iova, + hostwin->max_iova - hostwin->min_iova + 1, + section->offset_within_address_space, + int128_get64(section->size))) { + ret = -1; + goto fail; + } + } + + ret = vfio_spapr_create_window(container, section, &pgsize); + if (ret) { + goto fail; + } + + vfio_host_win_add(container, section->offset_within_address_space, + section->offset_within_address_space + + int128_get64(section->size) - 1, pgsize); + } + + hostwin_found = false; + QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) { + if (hostwin->min_iova <= iova && end <= hostwin->max_iova) { + hostwin_found = true; + break; + } + } + + if (!hostwin_found) { error_report("vfio: IOMMU container %p can't map guest IOVA region" " 0x%"HWADDR_PRIx"..0x%"HWADDR_PRIx, container, iova, end); @@ -369,10 +443,6 @@ static void vfio_listener_region_add(MemoryListener *listener, trace_vfio_listener_region_add_iommu(iova, end); /* - * FIXME: We should do some checking to see if the - * capabilities of the host VFIO IOMMU are adequate to model - * the guest IOMMU - * * FIXME: For VFIO iommu types which have KVM acceleration to * avoid bouncing all map/unmaps through qemu this way, this * would be the right place to wire that up (tell the KVM @@ -493,6 +563,18 @@ static void vfio_listener_region_del(MemoryListener *listener, "0x%"HWADDR_PRIx") = %d (%m)", container, iova, int128_get64(llsize), ret); } + + if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) { + vfio_spapr_remove_window(container, + section->offset_within_address_space); + if (vfio_host_win_del(container, + section->offset_within_address_space, + section->offset_within_address_space + + int128_get64(section->size) - 1) < 0) { + hw_error("%s: Cannot delete missing window at %"HWADDR_PRIx, + __func__, section->offset_within_address_space); + } + } } static const MemoryListener vfio_memory_listener = { @@ -503,6 +585,9 @@ static const MemoryListener vfio_memory_listener = { static void vfio_listener_release(VFIOContainer *container) { memory_listener_unregister(&container->listener); + if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) { + memory_listener_unregister(&container->prereg_listener); + } } static struct vfio_info_cap_header * @@ -861,8 +946,8 @@ static int vfio_connect_container(VFIOGroup *group, AddressSpace *as) goto free_container_exit; } - ret = ioctl(fd, VFIO_SET_IOMMU, - v2 ? VFIO_TYPE1v2_IOMMU : VFIO_TYPE1_IOMMU); + container->iommu_type = v2 ? VFIO_TYPE1v2_IOMMU : VFIO_TYPE1_IOMMU; + ret = ioctl(fd, VFIO_SET_IOMMU, container->iommu_type); if (ret) { error_report("vfio: failed to set iommu for container: %m"); ret = -errno; @@ -876,19 +961,18 @@ static int vfio_connect_container(VFIOGroup *group, AddressSpace *as) * existing Type1 IOMMUs generally support any IOVA we're * going to actually try in practice. */ - container->min_iova = 0; - container->max_iova = (hwaddr)-1; - - /* Assume just 4K IOVA page size */ - container->iova_pgsizes = 0x1000; info.argsz = sizeof(info); ret = ioctl(fd, VFIO_IOMMU_GET_INFO, &info); /* Ignore errors */ - if ((ret == 0) && (info.flags & VFIO_IOMMU_INFO_PGSIZES)) { - container->iova_pgsizes = info.iova_pgsizes; + if (ret || !(info.flags & VFIO_IOMMU_INFO_PGSIZES)) { + /* Assume 4k IOVA page size */ + info.iova_pgsizes = 4096; } - } else if (ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_SPAPR_TCE_IOMMU)) { + vfio_host_win_add(container, 0, (hwaddr)-1, info.iova_pgsizes); + } else if (ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_SPAPR_TCE_IOMMU) || + ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_SPAPR_TCE_v2_IOMMU)) { struct vfio_iommu_spapr_tce_info info; + bool v2 = !!ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_SPAPR_TCE_v2_IOMMU); ret = ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &fd); if (ret) { @@ -896,7 +980,9 @@ static int vfio_connect_container(VFIOGroup *group, AddressSpace *as) ret = -errno; goto free_container_exit; } - ret = ioctl(fd, VFIO_SET_IOMMU, VFIO_SPAPR_TCE_IOMMU); + container->iommu_type = + v2 ? VFIO_SPAPR_TCE_v2_IOMMU : VFIO_SPAPR_TCE_IOMMU; + ret = ioctl(fd, VFIO_SET_IOMMU, container->iommu_type); if (ret) { error_report("vfio: failed to set iommu for container: %m"); ret = -errno; @@ -908,30 +994,54 @@ static int vfio_connect_container(VFIOGroup *group, AddressSpace *as) * when container fd is closed so we do not call it explicitly * in this file. */ - ret = ioctl(fd, VFIO_IOMMU_ENABLE); - if (ret) { - error_report("vfio: failed to enable container: %m"); - ret = -errno; - goto free_container_exit; + if (!v2) { + ret = ioctl(fd, VFIO_IOMMU_ENABLE); + if (ret) { + error_report("vfio: failed to enable container: %m"); + ret = -errno; + goto free_container_exit; + } + } else { + container->prereg_listener = vfio_prereg_listener; + + memory_listener_register(&container->prereg_listener, + &address_space_memory); + if (container->error) { + memory_listener_unregister(&container->prereg_listener); + error_report("vfio: RAM memory listener initialization failed for container"); + goto free_container_exit; + } } - /* - * This only considers the host IOMMU's 32-bit window. At - * some point we need to add support for the optional 64-bit - * window and dynamic windows - */ info.argsz = sizeof(info); ret = ioctl(fd, VFIO_IOMMU_SPAPR_TCE_GET_INFO, &info); if (ret) { error_report("vfio: VFIO_IOMMU_SPAPR_TCE_GET_INFO failed: %m"); ret = -errno; + if (v2) { + memory_listener_unregister(&container->prereg_listener); + } goto free_container_exit; } - container->min_iova = info.dma32_window_start; - container->max_iova = container->min_iova + info.dma32_window_size - 1; - /* Assume just 4K IOVA pages for now */ - container->iova_pgsizes = 0x1000; + if (v2) { + /* + * There is a default window in just created container. + * To make region_add/del simpler, we better remove this + * window now and let those iommu_listener callbacks + * create/remove them when needed. + */ + ret = vfio_spapr_remove_window(container, info.dma32_window_start); + if (ret) { + goto free_container_exit; + } + } else { + /* The default table uses 4K pages */ + vfio_host_win_add(container, info.dma32_window_start, + info.dma32_window_start + + info.dma32_window_size - 1, + 0x1000); + } } else { error_report("vfio: No available IOMMU models"); ret = -EINVAL; diff --git a/hw/vfio/pci.c b/hw/vfio/pci.c index f2c679e47c..44783c50ab 100644 --- a/hw/vfio/pci.c +++ b/hw/vfio/pci.c @@ -31,6 +31,7 @@ #include "sysemu/sysemu.h" #include "pci.h" #include "trace.h" +#include "qapi/error.h" #define MSIX_CAP_LENGTH 12 @@ -1170,6 +1171,7 @@ static int vfio_msi_setup(VFIOPCIDevice *vdev, int pos) uint16_t ctrl; bool msi_64bit, msi_maskbit; int ret, entries; + Error *err = NULL; if (pread(vdev->vbasedev.fd, &ctrl, sizeof(ctrl), vdev->config_offset + pos + PCI_CAP_FLAGS) != sizeof(ctrl)) { @@ -1183,12 +1185,13 @@ static int vfio_msi_setup(VFIOPCIDevice *vdev, int pos) trace_vfio_msi_setup(vdev->vbasedev.name, pos); - ret = msi_init(&vdev->pdev, pos, entries, msi_64bit, msi_maskbit); + ret = msi_init(&vdev->pdev, pos, entries, msi_64bit, msi_maskbit, &err); if (ret < 0) { if (ret == -ENOTSUP) { return 0; } - error_report("vfio: msi_init failed"); + error_prepend(&err, "vfio: msi_init failed: "); + error_report_err(err); return ret; } vdev->msi_cap_size = 0xa + (msi_maskbit ? 0xa : 0) + (msi_64bit ? 0x4 : 0); diff --git a/hw/vfio/spapr.c b/hw/vfio/spapr.c new file mode 100644 index 0000000000..0af342332c --- /dev/null +++ b/hw/vfio/spapr.c @@ -0,0 +1,210 @@ +/* + * DMA memory preregistration + * + * Authors: + * Alexey Kardashevskiy <aik@ozlabs.ru> + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include <sys/ioctl.h> +#include <linux/vfio.h> + +#include "hw/vfio/vfio-common.h" +#include "hw/hw.h" +#include "qemu/error-report.h" +#include "trace.h" + +static bool vfio_prereg_listener_skipped_section(MemoryRegionSection *section) +{ + if (memory_region_is_iommu(section->mr)) { + hw_error("Cannot possibly preregister IOMMU memory"); + } + + return !memory_region_is_ram(section->mr) || + memory_region_is_skip_dump(section->mr); +} + +static void *vfio_prereg_gpa_to_vaddr(MemoryRegionSection *section, hwaddr gpa) +{ + return memory_region_get_ram_ptr(section->mr) + + section->offset_within_region + + (gpa - section->offset_within_address_space); +} + +static void vfio_prereg_listener_region_add(MemoryListener *listener, + MemoryRegionSection *section) +{ + VFIOContainer *container = container_of(listener, VFIOContainer, + prereg_listener); + const hwaddr gpa = section->offset_within_address_space; + hwaddr end; + int ret; + hwaddr page_mask = qemu_real_host_page_mask; + struct vfio_iommu_spapr_register_memory reg = { + .argsz = sizeof(reg), + .flags = 0, + }; + + if (vfio_prereg_listener_skipped_section(section)) { + trace_vfio_prereg_listener_region_add_skip( + section->offset_within_address_space, + section->offset_within_address_space + + int128_get64(int128_sub(section->size, int128_one()))); + return; + } + + if (unlikely((section->offset_within_address_space & ~page_mask) || + (section->offset_within_region & ~page_mask) || + (int128_get64(section->size) & ~page_mask))) { + error_report("%s received unaligned region", __func__); + return; + } + + end = section->offset_within_address_space + int128_get64(section->size); + if (gpa >= end) { + return; + } + + memory_region_ref(section->mr); + + reg.vaddr = (uintptr_t) vfio_prereg_gpa_to_vaddr(section, gpa); + reg.size = end - gpa; + + ret = ioctl(container->fd, VFIO_IOMMU_SPAPR_REGISTER_MEMORY, ®); + trace_vfio_prereg_register(reg.vaddr, reg.size, ret ? -errno : 0); + if (ret) { + /* + * On the initfn path, store the first error in the container so we + * can gracefully fail. Runtime, there's not much we can do other + * than throw a hardware error. + */ + if (!container->initialized) { + if (!container->error) { + container->error = ret; + } + } else { + hw_error("vfio: Memory registering failed, unable to continue"); + } + } +} + +static void vfio_prereg_listener_region_del(MemoryListener *listener, + MemoryRegionSection *section) +{ + VFIOContainer *container = container_of(listener, VFIOContainer, + prereg_listener); + const hwaddr gpa = section->offset_within_address_space; + hwaddr end; + int ret; + hwaddr page_mask = qemu_real_host_page_mask; + struct vfio_iommu_spapr_register_memory reg = { + .argsz = sizeof(reg), + .flags = 0, + }; + + if (vfio_prereg_listener_skipped_section(section)) { + trace_vfio_prereg_listener_region_del_skip( + section->offset_within_address_space, + section->offset_within_address_space + + int128_get64(int128_sub(section->size, int128_one()))); + return; + } + + if (unlikely((section->offset_within_address_space & ~page_mask) || + (section->offset_within_region & ~page_mask) || + (int128_get64(section->size) & ~page_mask))) { + error_report("%s received unaligned region", __func__); + return; + } + + end = section->offset_within_address_space + int128_get64(section->size); + if (gpa >= end) { + return; + } + + reg.vaddr = (uintptr_t) vfio_prereg_gpa_to_vaddr(section, gpa); + reg.size = end - gpa; + + ret = ioctl(container->fd, VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY, ®); + trace_vfio_prereg_unregister(reg.vaddr, reg.size, ret ? -errno : 0); +} + +const MemoryListener vfio_prereg_listener = { + .region_add = vfio_prereg_listener_region_add, + .region_del = vfio_prereg_listener_region_del, +}; + +int vfio_spapr_create_window(VFIOContainer *container, + MemoryRegionSection *section, + hwaddr *pgsize) +{ + int ret; + unsigned pagesize = memory_region_iommu_get_min_page_size(section->mr); + unsigned entries, pages; + struct vfio_iommu_spapr_tce_create create = { .argsz = sizeof(create) }; + + /* + * FIXME: For VFIO iommu types which have KVM acceleration to + * avoid bouncing all map/unmaps through qemu this way, this + * would be the right place to wire that up (tell the KVM + * device emulation the VFIO iommu handles to use). + */ + create.window_size = int128_get64(section->size); + create.page_shift = ctz64(pagesize); + /* + * SPAPR host supports multilevel TCE tables, there is some + * heuristic to decide how many levels we want for our table: + * 0..64 = 1; 65..4096 = 2; 4097..262144 = 3; 262145.. = 4 + */ + entries = create.window_size >> create.page_shift; + pages = MAX((entries * sizeof(uint64_t)) / getpagesize(), 1); + pages = MAX(pow2ceil(pages) - 1, 1); /* Round up */ + create.levels = ctz64(pages) / 6 + 1; + + ret = ioctl(container->fd, VFIO_IOMMU_SPAPR_TCE_CREATE, &create); + if (ret) { + error_report("Failed to create a window, ret = %d (%m)", ret); + return -errno; + } + + if (create.start_addr != section->offset_within_address_space) { + vfio_spapr_remove_window(container, create.start_addr); + + error_report("Host doesn't support DMA window at %"HWADDR_PRIx", must be %"PRIx64, + section->offset_within_address_space, + (uint64_t)create.start_addr); + ioctl(container->fd, VFIO_IOMMU_SPAPR_TCE_REMOVE, &remove); + return -EINVAL; + } + trace_vfio_spapr_create_window(create.page_shift, + create.window_size, + create.start_addr); + *pgsize = pagesize; + + return 0; +} + +int vfio_spapr_remove_window(VFIOContainer *container, + hwaddr offset_within_address_space) +{ + struct vfio_iommu_spapr_tce_remove remove = { + .argsz = sizeof(remove), + .start_addr = offset_within_address_space, + }; + int ret; + + ret = ioctl(container->fd, VFIO_IOMMU_SPAPR_TCE_REMOVE, &remove); + if (ret) { + error_report("Failed to remove window at %"PRIx64, + (uint64_t)remove.start_addr); + return -errno; + } + + trace_vfio_spapr_remove_window(offset_within_address_space); + + return 0; +} diff --git a/hw/vfio/trace-events b/hw/vfio/trace-events index a768fb54ec..4bb7690c46 100644 --- a/hw/vfio/trace-events +++ b/hw/vfio/trace-events @@ -115,3 +115,11 @@ vfio_platform_populate_interrupts(int pin, int count, int flags) "- IRQ index %d vfio_intp_interrupt_set_pending(int index) "irq %d is set PENDING" vfio_platform_start_level_irqfd_injection(int index, int fd, int resamplefd) "IRQ index=%d, fd = %d, resamplefd = %d" vfio_platform_start_edge_irqfd_injection(int index, int fd) "IRQ index=%d, fd = %d" + +# hw/vfio/spapr.c +vfio_prereg_listener_region_add_skip(uint64_t start, uint64_t end) "%"PRIx64" - %"PRIx64 +vfio_prereg_listener_region_del_skip(uint64_t start, uint64_t end) "%"PRIx64" - %"PRIx64 +vfio_prereg_register(uint64_t va, uint64_t size, int ret) "va=%"PRIx64" size=%"PRIx64" ret=%d" +vfio_prereg_unregister(uint64_t va, uint64_t size, int ret) "va=%"PRIx64" size=%"PRIx64" ret=%d" +vfio_spapr_create_window(int ps, uint64_t ws, uint64_t off) "pageshift=0x%x winsize=0x%"PRIx64" offset=0x%"PRIx64 +vfio_spapr_remove_window(uint64_t off) "offset=%"PRIx64 diff --git a/hw/virtio/vhost-backend.c b/hw/virtio/vhost-backend.c index b35890289f..d62372e597 100644 --- a/hw/virtio/vhost-backend.c +++ b/hw/virtio/vhost-backend.c @@ -138,6 +138,12 @@ static int vhost_kernel_set_vring_call(struct vhost_dev *dev, return vhost_kernel_call(dev, VHOST_SET_VRING_CALL, file); } +static int vhost_kernel_set_vring_busyloop_timeout(struct vhost_dev *dev, + struct vhost_vring_state *s) +{ + return vhost_kernel_call(dev, VHOST_SET_VRING_BUSYLOOP_TIMEOUT, s); +} + static int vhost_kernel_set_features(struct vhost_dev *dev, uint64_t features) { @@ -185,6 +191,8 @@ static const VhostOps kernel_ops = { .vhost_get_vring_base = vhost_kernel_get_vring_base, .vhost_set_vring_kick = vhost_kernel_set_vring_kick, .vhost_set_vring_call = vhost_kernel_set_vring_call, + .vhost_set_vring_busyloop_timeout = + vhost_kernel_set_vring_busyloop_timeout, .vhost_set_features = vhost_kernel_set_features, .vhost_get_features = vhost_kernel_get_features, .vhost_set_owner = vhost_kernel_set_owner, diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c index a01394d5ac..ec3abda9d5 100644 --- a/hw/virtio/vhost.c +++ b/hw/virtio/vhost.c @@ -960,6 +960,28 @@ static void vhost_eventfd_del(MemoryListener *listener, { } +static int vhost_virtqueue_set_busyloop_timeout(struct vhost_dev *dev, + int n, uint32_t timeout) +{ + int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n); + struct vhost_vring_state state = { + .index = vhost_vq_index, + .num = timeout, + }; + int r; + + if (!dev->vhost_ops->vhost_set_vring_busyloop_timeout) { + return -EINVAL; + } + + r = dev->vhost_ops->vhost_set_vring_busyloop_timeout(dev, &state); + if (r) { + return r; + } + + return 0; +} + static int vhost_virtqueue_init(struct vhost_dev *dev, struct vhost_virtqueue *vq, int n) { @@ -990,7 +1012,7 @@ static void vhost_virtqueue_cleanup(struct vhost_virtqueue *vq) } int vhost_dev_init(struct vhost_dev *hdev, void *opaque, - VhostBackendType backend_type) + VhostBackendType backend_type, uint32_t busyloop_timeout) { uint64_t features; int i, r; @@ -1031,6 +1053,17 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque, goto fail_vq; } } + + if (busyloop_timeout) { + for (i = 0; i < hdev->nvqs; ++i) { + r = vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i, + busyloop_timeout); + if (r < 0) { + goto fail_busyloop; + } + } + } + hdev->features = features; hdev->memory_listener = (MemoryListener) { @@ -1073,6 +1106,11 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque, hdev->memory_changed = false; memory_listener_register(&hdev->memory_listener, &address_space_memory); return 0; +fail_busyloop: + while (--i >= 0) { + vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i, 0); + } + i = hdev->nvqs; fail_vq: while (--i >= 0) { vhost_virtqueue_cleanup(hdev->vqs + i); diff --git a/hw/virtio/virtio-balloon.c b/hw/virtio/virtio-balloon.c index 557d3f9e0c..1a22e6d993 100644 --- a/hw/virtio/virtio-balloon.c +++ b/hw/virtio/virtio-balloon.c @@ -139,13 +139,13 @@ static void balloon_stats_get_all(Object *obj, Visitor *v, const char *name, } visit_check_struct(v, &err); out_nested: - visit_end_struct(v); + visit_end_struct(v, NULL); if (!err) { visit_check_struct(v, &err); } out_end: - visit_end_struct(v); + visit_end_struct(v, NULL); out: error_propagate(errp, err); } diff --git a/hw/virtio/virtio-bus.c b/hw/virtio/virtio-bus.c index 131376027b..a85b7c8abe 100644 --- a/hw/virtio/virtio-bus.c +++ b/hw/virtio/virtio-bus.c @@ -176,8 +176,8 @@ static int set_host_notifier_internal(DeviceState *proxy, VirtioBusState *bus, return r; } } else { - virtio_queue_set_host_notifier_fd_handler(vq, false, false); k->ioeventfd_assign(proxy, notifier, n, assign); + virtio_queue_set_host_notifier_fd_handler(vq, false, false); event_notifier_cleanup(notifier); } return r; @@ -251,31 +251,25 @@ int virtio_bus_set_host_notifier(VirtioBusState *bus, int n, bool assign) { VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(bus); DeviceState *proxy = DEVICE(BUS(bus)->parent); - VirtIODevice *vdev = virtio_bus_get_device(bus); - VirtQueue *vq = virtio_get_queue(vdev, n); if (!k->ioeventfd_started) { return -ENOSYS; } + k->ioeventfd_set_disabled(proxy, assign); if (assign) { /* * Stop using the generic ioeventfd, we are doing eventfd handling * ourselves below + * + * FIXME: We should just switch the handler and not deassign the + * ioeventfd. + * Otherwise, there's a window where we don't have an + * ioeventfd and we may end up with a notification where + * we don't expect one. */ - k->ioeventfd_set_disabled(proxy, true); - } - /* - * Just switch the handler, don't deassign the ioeventfd. - * Otherwise, there's a window where we don't have an - * ioeventfd and we may end up with a notification where - * we don't expect one. - */ - virtio_queue_set_host_notifier_fd_handler(vq, assign, !assign); - if (!assign) { - /* Use generic ioeventfd handler again. */ - k->ioeventfd_set_disabled(proxy, false); + virtio_bus_stop_ioeventfd(bus); } - return 0; + return set_host_notifier_internal(proxy, bus, n, assign, false); } static char *virtio_bus_get_dev_path(DeviceState *dev) diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c index 7ed06eafa6..18153d5a39 100644 --- a/hw/virtio/virtio.c +++ b/hw/virtio/virtio.c @@ -1499,6 +1499,16 @@ int virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id) } qemu_get_be32s(f, &features); + /* + * Temporarily set guest_features low bits - needed by + * virtio net load code testing for VIRTIO_NET_F_CTRL_GUEST_OFFLOADS + * VIRTIO_NET_F_GUEST_ANNOUNCE and VIRTIO_NET_F_CTRL_VQ. + * + * Note: devices should always test host features in future - don't create + * new dependencies like this. + */ + vdev->guest_features = features; + config_len = qemu_get_be32(f); /* |