diff options
Diffstat (limited to 'hw')
93 files changed, 4858 insertions, 1034 deletions
diff --git a/hw/arm/Kconfig b/hw/arm/Kconfig index 9aced9d54d..ab65ecd216 100644 --- a/hw/arm/Kconfig +++ b/hw/arm/Kconfig @@ -184,6 +184,20 @@ config REALVIEW select DS1338 # I2C RTC+NVRAM select USB_OHCI +config SBSA_REF + bool + imply PCI_DEVICES + select AHCI + select ARM_SMMUV3 + select GPIO_KEY + select PCI_EXPRESS + select PCI_EXPRESS_GENERIC_BRIDGE + select PFLASH_CFI01 + select PL011 # UART + select PL031 # RTC + select PL061 # GPIO + select USB_EHCI_SYSBUS + config SABRELITE bool select FSL_IMX6 diff --git a/hw/arm/Makefile.objs b/hw/arm/Makefile.objs index 994e67dd0d..43ce8d5b19 100644 --- a/hw/arm/Makefile.objs +++ b/hw/arm/Makefile.objs @@ -19,6 +19,7 @@ obj-$(CONFIG_SPITZ) += spitz.o obj-$(CONFIG_TOSA) += tosa.o obj-$(CONFIG_Z2) += z2.o obj-$(CONFIG_REALVIEW) += realview.o +obj-$(CONFIG_SBSA_REF) += sbsa-ref.o obj-$(CONFIG_STELLARIS) += stellaris.o obj-$(CONFIG_COLLIE) += collie.o obj-$(CONFIG_VERSATILE) += versatilepb.o diff --git a/hw/arm/aspeed.c b/hw/arm/aspeed.c index d2ad2da24b..8b6d304247 100644 --- a/hw/arm/aspeed.c +++ b/hw/arm/aspeed.c @@ -22,17 +22,18 @@ #include "hw/misc/tmp105.h" #include "qemu/log.h" #include "sysemu/block-backend.h" +#include "sysemu/sysemu.h" #include "hw/loader.h" #include "qemu/error-report.h" #include "qemu/units.h" static struct arm_boot_info aspeed_board_binfo = { .board_id = -1, /* device-tree-only board */ - .nb_cpus = 1, }; struct AspeedBoardState { AspeedSoCState soc; + MemoryRegion ram_container; MemoryRegion ram; MemoryRegion max_ram; }; @@ -72,6 +73,17 @@ struct AspeedBoardState { SCU_AST2500_HW_STRAP_ACPI_ENABLE | \ SCU_HW_STRAP_SPI_MODE(SCU_HW_STRAP_SPI_MASTER)) +/* Swift hardware value: 0xF11AD206 */ +#define SWIFT_BMC_HW_STRAP1 ( \ + AST2500_HW_STRAP1_DEFAULTS | \ + SCU_AST2500_HW_STRAP_SPI_AUTOFETCH_ENABLE | \ + SCU_AST2500_HW_STRAP_GPIO_STRAP_ENABLE | \ + SCU_AST2500_HW_STRAP_UART_DEBUG | \ + SCU_AST2500_HW_STRAP_DDR4_ENABLE | \ + SCU_H_PLL_BYPASS_EN | \ + SCU_AST2500_HW_STRAP_ACPI_ENABLE | \ + SCU_HW_STRAP_SPI_MODE(SCU_HW_STRAP_SPI_MASTER)) + /* Witherspoon hardware value: 0xF10AD216 (but use romulus definition) */ #define WITHERSPOON_BMC_HW_STRAP1 ROMULUS_BMC_HW_STRAP1 @@ -159,6 +171,10 @@ static void aspeed_board_init(MachineState *machine, ram_addr_t max_ram_size; bmc = g_new0(AspeedBoardState, 1); + + memory_region_init(&bmc->ram_container, NULL, "aspeed-ram-container", + UINT32_MAX); + object_initialize_child(OBJECT(machine), "soc", &bmc->soc, (sizeof(bmc->soc)), cfg->soc_name, &error_abort, NULL); @@ -171,6 +187,8 @@ static void aspeed_board_init(MachineState *machine, &error_abort); object_property_set_int(OBJECT(&bmc->soc), cfg->num_cs, "num-cs", &error_abort); + object_property_set_int(OBJECT(&bmc->soc), smp_cpus, "num-cpus", + &error_abort); if (machine->kernel_filename) { /* * When booting with a -kernel command line there is no u-boot @@ -191,18 +209,16 @@ static void aspeed_board_init(MachineState *machine, &error_abort); memory_region_allocate_system_memory(&bmc->ram, NULL, "ram", ram_size); - memory_region_add_subregion(get_system_memory(), sc->info->sdram_base, - &bmc->ram); - object_property_add_const_link(OBJECT(&bmc->soc), "ram", OBJECT(&bmc->ram), - &error_abort); + memory_region_add_subregion(&bmc->ram_container, 0, &bmc->ram); + memory_region_add_subregion(get_system_memory(), + sc->info->memmap[ASPEED_SDRAM], + &bmc->ram_container); max_ram_size = object_property_get_uint(OBJECT(&bmc->soc), "max-ram-size", &error_abort); memory_region_init_io(&bmc->max_ram, NULL, &max_ram_ops, NULL, "max_ram", max_ram_size - ram_size); - memory_region_add_subregion(get_system_memory(), - sc->info->sdram_base + ram_size, - &bmc->max_ram); + memory_region_add_subregion(&bmc->ram_container, ram_size, &bmc->max_ram); aspeed_board_init_flashes(&bmc->soc.fmc, cfg->fmc_model, &error_abort); aspeed_board_init_flashes(&bmc->soc.spi[0], cfg->spi_model, &error_abort); @@ -229,7 +245,8 @@ static void aspeed_board_init(MachineState *machine, aspeed_board_binfo.initrd_filename = machine->initrd_filename; aspeed_board_binfo.kernel_cmdline = machine->kernel_cmdline; aspeed_board_binfo.ram_size = ram_size; - aspeed_board_binfo.loader_start = sc->info->sdram_base; + aspeed_board_binfo.loader_start = sc->info->memmap[ASPEED_SDRAM]; + aspeed_board_binfo.nb_cpus = bmc->soc.num_cpus; if (cfg->i2c_init) { cfg->i2c_init(bmc); @@ -286,6 +303,35 @@ static void romulus_bmc_i2c_init(AspeedBoardState *bmc) i2c_create_slave(aspeed_i2c_get_bus(DEVICE(&soc->i2c), 11), "ds1338", 0x32); } +static void swift_bmc_i2c_init(AspeedBoardState *bmc) +{ + AspeedSoCState *soc = &bmc->soc; + + i2c_create_slave(aspeed_i2c_get_bus(DEVICE(&soc->i2c), 3), "pca9552", 0x60); + + /* The swift board expects a TMP275 but a TMP105 is compatible */ + i2c_create_slave(aspeed_i2c_get_bus(DEVICE(&soc->i2c), 7), "tmp105", 0x48); + /* The swift board expects a pca9551 but a pca9552 is compatible */ + i2c_create_slave(aspeed_i2c_get_bus(DEVICE(&soc->i2c), 7), "pca9552", 0x60); + + /* The swift board expects an Epson RX8900 RTC but a ds1338 is compatible */ + i2c_create_slave(aspeed_i2c_get_bus(DEVICE(&soc->i2c), 8), "ds1338", 0x32); + i2c_create_slave(aspeed_i2c_get_bus(DEVICE(&soc->i2c), 8), "pca9552", 0x60); + + i2c_create_slave(aspeed_i2c_get_bus(DEVICE(&soc->i2c), 9), "tmp423", 0x4c); + /* The swift board expects a pca9539 but a pca9552 is compatible */ + i2c_create_slave(aspeed_i2c_get_bus(DEVICE(&soc->i2c), 9), "pca9552", 0x74); + + i2c_create_slave(aspeed_i2c_get_bus(DEVICE(&soc->i2c), 10), "tmp423", 0x4c); + /* The swift board expects a pca9539 but a pca9552 is compatible */ + i2c_create_slave(aspeed_i2c_get_bus(DEVICE(&soc->i2c), 10), "pca9552", + 0x74); + + /* The swift board expects a TMP275 but a TMP105 is compatible */ + i2c_create_slave(aspeed_i2c_get_bus(DEVICE(&soc->i2c), 12), "tmp105", 0x48); + i2c_create_slave(aspeed_i2c_get_bus(DEVICE(&soc->i2c), 12), "tmp105", 0x4a); +} + static void witherspoon_bmc_i2c_init(AspeedBoardState *bmc) { AspeedSoCState *soc = &bmc->soc; @@ -326,7 +372,7 @@ static void aspeed_machine_class_init(ObjectClass *oc, void *data) mc->desc = board->desc; mc->init = aspeed_machine_init; - mc->max_cpus = 1; + mc->max_cpus = ASPEED_CPUS_NUM; mc->no_sdcard = 1; mc->no_floppy = 1; mc->no_cdrom = 1; @@ -377,6 +423,16 @@ static const AspeedBoardConfig aspeed_boards[] = { .i2c_init = romulus_bmc_i2c_init, .ram = 512 * MiB, }, { + .name = MACHINE_TYPE_NAME("swift-bmc"), + .desc = "OpenPOWER Swift BMC (ARM1176)", + .soc_name = "ast2500-a1", + .hw_strap1 = SWIFT_BMC_HW_STRAP1, + .fmc_model = "mx66l1g45g", + .spi_model = "mx66l1g45g", + .num_cs = 2, + .i2c_init = swift_bmc_i2c_init, + .ram = 512 * MiB, + }, { .name = MACHINE_TYPE_NAME("witherspoon-bmc"), .desc = "OpenPOWER Witherspoon BMC (ARM1176)", .soc_name = "ast2500-a1", diff --git a/hw/arm/aspeed_soc.c b/hw/arm/aspeed_soc.c index a2ea8c7484..c6fb3700f2 100644 --- a/hw/arm/aspeed_soc.c +++ b/hw/arm/aspeed_soc.c @@ -19,36 +19,99 @@ #include "hw/char/serial.h" #include "qemu/log.h" #include "qemu/module.h" +#include "qemu/error-report.h" #include "hw/i2c/aspeed_i2c.h" #include "net/net.h" -#define ASPEED_SOC_UART_5_BASE 0x00184000 #define ASPEED_SOC_IOMEM_SIZE 0x00200000 -#define ASPEED_SOC_IOMEM_BASE 0x1E600000 -#define ASPEED_SOC_FMC_BASE 0x1E620000 -#define ASPEED_SOC_SPI_BASE 0x1E630000 -#define ASPEED_SOC_SPI2_BASE 0x1E631000 -#define ASPEED_SOC_VIC_BASE 0x1E6C0000 -#define ASPEED_SOC_SDMC_BASE 0x1E6E0000 -#define ASPEED_SOC_SCU_BASE 0x1E6E2000 -#define ASPEED_SOC_SRAM_BASE 0x1E720000 -#define ASPEED_SOC_TIMER_BASE 0x1E782000 -#define ASPEED_SOC_WDT_BASE 0x1E785000 -#define ASPEED_SOC_I2C_BASE 0x1E78A000 -#define ASPEED_SOC_ETH1_BASE 0x1E660000 -#define ASPEED_SOC_ETH2_BASE 0x1E680000 - -static const int uart_irqs[] = { 9, 32, 33, 34, 10 }; -static const int timer_irqs[] = { 16, 17, 18, 35, 36, 37, 38, 39, }; - -#define AST2400_SDRAM_BASE 0x40000000 -#define AST2500_SDRAM_BASE 0x80000000 - -static const hwaddr aspeed_soc_ast2400_spi_bases[] = { ASPEED_SOC_SPI_BASE }; -static const char *aspeed_soc_ast2400_typenames[] = { "aspeed.smc.spi" }; -static const hwaddr aspeed_soc_ast2500_spi_bases[] = { ASPEED_SOC_SPI_BASE, - ASPEED_SOC_SPI2_BASE}; +static const hwaddr aspeed_soc_ast2400_memmap[] = { + [ASPEED_IOMEM] = 0x1E600000, + [ASPEED_FMC] = 0x1E620000, + [ASPEED_SPI1] = 0x1E630000, + [ASPEED_VIC] = 0x1E6C0000, + [ASPEED_SDMC] = 0x1E6E0000, + [ASPEED_SCU] = 0x1E6E2000, + [ASPEED_XDMA] = 0x1E6E7000, + [ASPEED_ADC] = 0x1E6E9000, + [ASPEED_SRAM] = 0x1E720000, + [ASPEED_GPIO] = 0x1E780000, + [ASPEED_RTC] = 0x1E781000, + [ASPEED_TIMER1] = 0x1E782000, + [ASPEED_WDT] = 0x1E785000, + [ASPEED_PWM] = 0x1E786000, + [ASPEED_LPC] = 0x1E789000, + [ASPEED_IBT] = 0x1E789140, + [ASPEED_I2C] = 0x1E78A000, + [ASPEED_ETH1] = 0x1E660000, + [ASPEED_ETH2] = 0x1E680000, + [ASPEED_UART1] = 0x1E783000, + [ASPEED_UART5] = 0x1E784000, + [ASPEED_VUART] = 0x1E787000, + [ASPEED_SDRAM] = 0x40000000, +}; + +static const hwaddr aspeed_soc_ast2500_memmap[] = { + [ASPEED_IOMEM] = 0x1E600000, + [ASPEED_FMC] = 0x1E620000, + [ASPEED_SPI1] = 0x1E630000, + [ASPEED_SPI2] = 0x1E631000, + [ASPEED_VIC] = 0x1E6C0000, + [ASPEED_SDMC] = 0x1E6E0000, + [ASPEED_SCU] = 0x1E6E2000, + [ASPEED_XDMA] = 0x1E6E7000, + [ASPEED_ADC] = 0x1E6E9000, + [ASPEED_SRAM] = 0x1E720000, + [ASPEED_GPIO] = 0x1E780000, + [ASPEED_RTC] = 0x1E781000, + [ASPEED_TIMER1] = 0x1E782000, + [ASPEED_WDT] = 0x1E785000, + [ASPEED_PWM] = 0x1E786000, + [ASPEED_LPC] = 0x1E789000, + [ASPEED_IBT] = 0x1E789140, + [ASPEED_I2C] = 0x1E78A000, + [ASPEED_ETH1] = 0x1E660000, + [ASPEED_ETH2] = 0x1E680000, + [ASPEED_UART1] = 0x1E783000, + [ASPEED_UART5] = 0x1E784000, + [ASPEED_VUART] = 0x1E787000, + [ASPEED_SDRAM] = 0x80000000, +}; + +static const int aspeed_soc_ast2400_irqmap[] = { + [ASPEED_UART1] = 9, + [ASPEED_UART2] = 32, + [ASPEED_UART3] = 33, + [ASPEED_UART4] = 34, + [ASPEED_UART5] = 10, + [ASPEED_VUART] = 8, + [ASPEED_FMC] = 19, + [ASPEED_SDMC] = 0, + [ASPEED_SCU] = 21, + [ASPEED_ADC] = 31, + [ASPEED_GPIO] = 20, + [ASPEED_RTC] = 22, + [ASPEED_TIMER1] = 16, + [ASPEED_TIMER2] = 17, + [ASPEED_TIMER3] = 18, + [ASPEED_TIMER4] = 35, + [ASPEED_TIMER5] = 36, + [ASPEED_TIMER6] = 37, + [ASPEED_TIMER7] = 38, + [ASPEED_TIMER8] = 39, + [ASPEED_WDT] = 27, + [ASPEED_PWM] = 28, + [ASPEED_LPC] = 8, + [ASPEED_IBT] = 8, /* LPC */ + [ASPEED_I2C] = 12, + [ASPEED_ETH1] = 2, + [ASPEED_ETH2] = 3, + [ASPEED_XDMA] = 6, +}; + +#define aspeed_soc_ast2500_irqmap aspeed_soc_ast2400_irqmap + +static const char *aspeed_soc_ast2400_typenames[] = { "aspeed.smc.spi" }; static const char *aspeed_soc_ast2500_typenames[] = { "aspeed.smc.ast2500-spi1", "aspeed.smc.ast2500-spi2" }; @@ -57,57 +120,71 @@ static const AspeedSoCInfo aspeed_socs[] = { .name = "ast2400-a0", .cpu_type = ARM_CPU_TYPE_NAME("arm926"), .silicon_rev = AST2400_A0_SILICON_REV, - .sdram_base = AST2400_SDRAM_BASE, .sram_size = 0x8000, .spis_num = 1, - .spi_bases = aspeed_soc_ast2400_spi_bases, .fmc_typename = "aspeed.smc.fmc", .spi_typename = aspeed_soc_ast2400_typenames, .wdts_num = 2, + .irqmap = aspeed_soc_ast2400_irqmap, + .memmap = aspeed_soc_ast2400_memmap, + .num_cpus = 1, }, { .name = "ast2400-a1", .cpu_type = ARM_CPU_TYPE_NAME("arm926"), .silicon_rev = AST2400_A1_SILICON_REV, - .sdram_base = AST2400_SDRAM_BASE, .sram_size = 0x8000, .spis_num = 1, - .spi_bases = aspeed_soc_ast2400_spi_bases, .fmc_typename = "aspeed.smc.fmc", .spi_typename = aspeed_soc_ast2400_typenames, .wdts_num = 2, + .irqmap = aspeed_soc_ast2400_irqmap, + .memmap = aspeed_soc_ast2400_memmap, + .num_cpus = 1, }, { .name = "ast2400", .cpu_type = ARM_CPU_TYPE_NAME("arm926"), .silicon_rev = AST2400_A0_SILICON_REV, - .sdram_base = AST2400_SDRAM_BASE, .sram_size = 0x8000, .spis_num = 1, - .spi_bases = aspeed_soc_ast2400_spi_bases, .fmc_typename = "aspeed.smc.fmc", .spi_typename = aspeed_soc_ast2400_typenames, .wdts_num = 2, + .irqmap = aspeed_soc_ast2400_irqmap, + .memmap = aspeed_soc_ast2400_memmap, + .num_cpus = 1, }, { .name = "ast2500-a1", .cpu_type = ARM_CPU_TYPE_NAME("arm1176"), .silicon_rev = AST2500_A1_SILICON_REV, - .sdram_base = AST2500_SDRAM_BASE, .sram_size = 0x9000, .spis_num = 2, - .spi_bases = aspeed_soc_ast2500_spi_bases, .fmc_typename = "aspeed.smc.ast2500-fmc", .spi_typename = aspeed_soc_ast2500_typenames, .wdts_num = 3, + .irqmap = aspeed_soc_ast2500_irqmap, + .memmap = aspeed_soc_ast2500_memmap, + .num_cpus = 1, }, }; +static qemu_irq aspeed_soc_get_irq(AspeedSoCState *s, int ctrl) +{ + AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s); + + return qdev_get_gpio_in(DEVICE(&s->vic), sc->info->irqmap[ctrl]); +} + static void aspeed_soc_init(Object *obj) { AspeedSoCState *s = ASPEED_SOC(obj); AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s); int i; - object_initialize_child(obj, "cpu", OBJECT(&s->cpu), sizeof(s->cpu), - sc->info->cpu_type, &error_abort, NULL); + for (i = 0; i < sc->info->num_cpus; i++) { + object_initialize_child(obj, "cpu[*]", OBJECT(&s->cpu[i]), + sizeof(s->cpu[i]), sc->info->cpu_type, + &error_abort, NULL); + } sysbus_init_child_obj(obj, "scu", OBJECT(&s->scu), sizeof(s->scu), TYPE_ASPEED_SCU); @@ -123,6 +200,9 @@ static void aspeed_soc_init(Object *obj) sysbus_init_child_obj(obj, "vic", OBJECT(&s->vic), sizeof(s->vic), TYPE_ASPEED_VIC); + sysbus_init_child_obj(obj, "rtc", OBJECT(&s->rtc), sizeof(s->rtc), + TYPE_ASPEED_RTC); + sysbus_init_child_obj(obj, "timerctrl", OBJECT(&s->timerctrl), sizeof(s->timerctrl), TYPE_ASPEED_TIMER); object_property_add_const_link(OBJECT(&s->timerctrl), "scu", @@ -155,10 +235,17 @@ static void aspeed_soc_init(Object *obj) sizeof(s->wdt[i]), TYPE_ASPEED_WDT); qdev_prop_set_uint32(DEVICE(&s->wdt[i]), "silicon-rev", sc->info->silicon_rev); + object_property_add_const_link(OBJECT(&s->wdt[i]), "scu", + OBJECT(&s->scu), &error_abort); } - sysbus_init_child_obj(obj, "ftgmac100", OBJECT(&s->ftgmac100), - sizeof(s->ftgmac100), TYPE_FTGMAC100); + for (i = 0; i < ASPEED_MACS_NUM; i++) { + sysbus_init_child_obj(obj, "ftgmac100[*]", OBJECT(&s->ftgmac100[i]), + sizeof(s->ftgmac100[i]), TYPE_FTGMAC100); + } + + sysbus_init_child_obj(obj, "xdma", OBJECT(&s->xdma), sizeof(s->xdma), + TYPE_ASPEED_XDMA); } static void aspeed_soc_realize(DeviceState *dev, Error **errp) @@ -169,14 +256,22 @@ static void aspeed_soc_realize(DeviceState *dev, Error **errp) Error *err = NULL, *local_err = NULL; /* IO space */ - create_unimplemented_device("aspeed_soc.io", - ASPEED_SOC_IOMEM_BASE, ASPEED_SOC_IOMEM_SIZE); + create_unimplemented_device("aspeed_soc.io", sc->info->memmap[ASPEED_IOMEM], + ASPEED_SOC_IOMEM_SIZE); + + if (s->num_cpus > sc->info->num_cpus) { + warn_report("%s: invalid number of CPUs %d, using default %d", + sc->info->name, s->num_cpus, sc->info->num_cpus); + s->num_cpus = sc->info->num_cpus; + } /* CPU */ - object_property_set_bool(OBJECT(&s->cpu), true, "realized", &err); - if (err) { - error_propagate(errp, err); - return; + for (i = 0; i < s->num_cpus; i++) { + object_property_set_bool(OBJECT(&s->cpu[i]), true, "realized", &err); + if (err) { + error_propagate(errp, err); + return; + } } /* SRAM */ @@ -186,8 +281,8 @@ static void aspeed_soc_realize(DeviceState *dev, Error **errp) error_propagate(errp, err); return; } - memory_region_add_subregion(get_system_memory(), ASPEED_SOC_SRAM_BASE, - &s->sram); + memory_region_add_subregion(get_system_memory(), + sc->info->memmap[ASPEED_SRAM], &s->sram); /* SCU */ object_property_set_bool(OBJECT(&s->scu), true, "realized", &err); @@ -195,7 +290,7 @@ static void aspeed_soc_realize(DeviceState *dev, Error **errp) error_propagate(errp, err); return; } - sysbus_mmio_map(SYS_BUS_DEVICE(&s->scu), 0, ASPEED_SOC_SCU_BASE); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->scu), 0, sc->info->memmap[ASPEED_SCU]); /* VIC */ object_property_set_bool(OBJECT(&s->vic), true, "realized", &err); @@ -203,29 +298,39 @@ static void aspeed_soc_realize(DeviceState *dev, Error **errp) error_propagate(errp, err); return; } - sysbus_mmio_map(SYS_BUS_DEVICE(&s->vic), 0, ASPEED_SOC_VIC_BASE); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->vic), 0, sc->info->memmap[ASPEED_VIC]); sysbus_connect_irq(SYS_BUS_DEVICE(&s->vic), 0, qdev_get_gpio_in(DEVICE(&s->cpu), ARM_CPU_IRQ)); sysbus_connect_irq(SYS_BUS_DEVICE(&s->vic), 1, qdev_get_gpio_in(DEVICE(&s->cpu), ARM_CPU_FIQ)); + /* RTC */ + object_property_set_bool(OBJECT(&s->rtc), true, "realized", &err); + if (err) { + error_propagate(errp, err); + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->rtc), 0, sc->info->memmap[ASPEED_RTC]); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->rtc), 0, + aspeed_soc_get_irq(s, ASPEED_RTC)); + /* Timer */ object_property_set_bool(OBJECT(&s->timerctrl), true, "realized", &err); if (err) { error_propagate(errp, err); return; } - sysbus_mmio_map(SYS_BUS_DEVICE(&s->timerctrl), 0, ASPEED_SOC_TIMER_BASE); - for (i = 0; i < ARRAY_SIZE(timer_irqs); i++) { - qemu_irq irq = qdev_get_gpio_in(DEVICE(&s->vic), timer_irqs[i]); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->timerctrl), 0, + sc->info->memmap[ASPEED_TIMER1]); + for (i = 0; i < ASPEED_TIMER_NR_TIMERS; i++) { + qemu_irq irq = aspeed_soc_get_irq(s, ASPEED_TIMER1 + i); sysbus_connect_irq(SYS_BUS_DEVICE(&s->timerctrl), i, irq); } /* UART - attach an 8250 to the IO space as our UART5 */ if (serial_hd(0)) { - qemu_irq uart5 = qdev_get_gpio_in(DEVICE(&s->vic), uart_irqs[4]); - serial_mm_init(get_system_memory(), - ASPEED_SOC_IOMEM_BASE + ASPEED_SOC_UART_5_BASE, 2, + qemu_irq uart5 = aspeed_soc_get_irq(s, ASPEED_UART5); + serial_mm_init(get_system_memory(), sc->info->memmap[ASPEED_UART5], 2, uart5, 38400, serial_hd(0), DEVICE_LITTLE_ENDIAN); } @@ -235,21 +340,27 @@ static void aspeed_soc_realize(DeviceState *dev, Error **errp) error_propagate(errp, err); return; } - sysbus_mmio_map(SYS_BUS_DEVICE(&s->i2c), 0, ASPEED_SOC_I2C_BASE); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->i2c), 0, sc->info->memmap[ASPEED_I2C]); sysbus_connect_irq(SYS_BUS_DEVICE(&s->i2c), 0, - qdev_get_gpio_in(DEVICE(&s->vic), 12)); + aspeed_soc_get_irq(s, ASPEED_I2C)); /* FMC, The number of CS is set at the board level */ + object_property_set_int(OBJECT(&s->fmc), sc->info->memmap[ASPEED_SDRAM], + "sdram-base", &err); + if (err) { + error_propagate(errp, err); + return; + } object_property_set_bool(OBJECT(&s->fmc), true, "realized", &err); if (err) { error_propagate(errp, err); return; } - sysbus_mmio_map(SYS_BUS_DEVICE(&s->fmc), 0, ASPEED_SOC_FMC_BASE); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->fmc), 0, sc->info->memmap[ASPEED_FMC]); sysbus_mmio_map(SYS_BUS_DEVICE(&s->fmc), 1, s->fmc.ctrl->flash_window_base); sysbus_connect_irq(SYS_BUS_DEVICE(&s->fmc), 0, - qdev_get_gpio_in(DEVICE(&s->vic), 19)); + aspeed_soc_get_irq(s, ASPEED_FMC)); /* SPI */ for (i = 0; i < sc->info->spis_num; i++) { @@ -261,7 +372,8 @@ static void aspeed_soc_realize(DeviceState *dev, Error **errp) error_propagate(errp, err); return; } - sysbus_mmio_map(SYS_BUS_DEVICE(&s->spi[i]), 0, sc->info->spi_bases[i]); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->spi[i]), 0, + sc->info->memmap[ASPEED_SPI1 + i]); sysbus_mmio_map(SYS_BUS_DEVICE(&s->spi[i]), 1, s->spi[i].ctrl->flash_window_base); } @@ -272,7 +384,7 @@ static void aspeed_soc_realize(DeviceState *dev, Error **errp) error_propagate(errp, err); return; } - sysbus_mmio_map(SYS_BUS_DEVICE(&s->sdmc), 0, ASPEED_SOC_SDMC_BASE); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->sdmc), 0, sc->info->memmap[ASPEED_SDMC]); /* Watch dog */ for (i = 0; i < sc->info->wdts_num; i++) { @@ -282,23 +394,42 @@ static void aspeed_soc_realize(DeviceState *dev, Error **errp) return; } sysbus_mmio_map(SYS_BUS_DEVICE(&s->wdt[i]), 0, - ASPEED_SOC_WDT_BASE + i * 0x20); + sc->info->memmap[ASPEED_WDT] + i * 0x20); } /* Net */ - qdev_set_nic_properties(DEVICE(&s->ftgmac100), &nd_table[0]); - object_property_set_bool(OBJECT(&s->ftgmac100), true, "aspeed", &err); - object_property_set_bool(OBJECT(&s->ftgmac100), true, "realized", - &local_err); - error_propagate(&err, local_err); + for (i = 0; i < nb_nics; i++) { + qdev_set_nic_properties(DEVICE(&s->ftgmac100[i]), &nd_table[i]); + object_property_set_bool(OBJECT(&s->ftgmac100[i]), true, "aspeed", + &err); + object_property_set_bool(OBJECT(&s->ftgmac100[i]), true, "realized", + &local_err); + error_propagate(&err, local_err); + if (err) { + error_propagate(errp, err); + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->ftgmac100[i]), 0, + sc->info->memmap[ASPEED_ETH1 + i]); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->ftgmac100[i]), 0, + aspeed_soc_get_irq(s, ASPEED_ETH1 + i)); + } + + /* XDMA */ + object_property_set_bool(OBJECT(&s->xdma), true, "realized", &err); if (err) { error_propagate(errp, err); return; } - sysbus_mmio_map(SYS_BUS_DEVICE(&s->ftgmac100), 0, ASPEED_SOC_ETH1_BASE); - sysbus_connect_irq(SYS_BUS_DEVICE(&s->ftgmac100), 0, - qdev_get_gpio_in(DEVICE(&s->vic), 2)); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->xdma), 0, + sc->info->memmap[ASPEED_XDMA]); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->xdma), 0, + aspeed_soc_get_irq(s, ASPEED_XDMA)); } +static Property aspeed_soc_properties[] = { + DEFINE_PROP_UINT32("num-cpus", AspeedSoCState, num_cpus, 0), + DEFINE_PROP_END_OF_LIST(), +}; static void aspeed_soc_class_init(ObjectClass *oc, void *data) { @@ -309,6 +440,7 @@ static void aspeed_soc_class_init(ObjectClass *oc, void *data) dc->realize = aspeed_soc_realize; /* Reason: Uses serial_hds and nd_table in realize() directly */ dc->user_creatable = false; + dc->props = aspeed_soc_properties; } static const TypeInfo aspeed_soc_type_info = { diff --git a/hw/arm/boot.c b/hw/arm/boot.c index b2f93f6bef..1fb24fbef2 100644 --- a/hw/arm/boot.c +++ b/hw/arm/boot.c @@ -1109,10 +1109,11 @@ static void arm_setup_direct_kernel_boot(ARMCPU *cpu, info->initrd_filename); exit(1); } - if (info->initrd_start + initrd_size > info->ram_size) { + if (info->initrd_start + initrd_size > ram_end) { error_report("could not load initrd '%s': " "too big to fit into RAM after the kernel", info->initrd_filename); + exit(1); } } else { initrd_size = 0; diff --git a/hw/arm/fsl-imx7.c b/hw/arm/fsl-imx7.c index b7e3526b4f..2eddf3f25c 100644 --- a/hw/arm/fsl-imx7.c +++ b/hw/arm/fsl-imx7.c @@ -526,6 +526,17 @@ static void fsl_imx7_realize(DeviceState *dev, Error **errp) */ create_unimplemented_device("lcdif", FSL_IMX7_LCDIF_ADDR, FSL_IMX7_LCDIF_SIZE); + + /* + * DMA APBH + */ + create_unimplemented_device("dma-apbh", FSL_IMX7_DMA_APBH_ADDR, + FSL_IMX7_DMA_APBH_SIZE); + /* + * PCIe PHY + */ + create_unimplemented_device("pcie-phy", FSL_IMX7_PCIE_PHY_ADDR, + FSL_IMX7_PCIE_PHY_SIZE); } static void fsl_imx7_class_init(ObjectClass *oc, void *data) diff --git a/hw/arm/msf2-som.c b/hw/arm/msf2-som.c index 8c550a8bdd..2c9984bb3b 100644 --- a/hw/arm/msf2-som.c +++ b/hw/arm/msf2-som.c @@ -53,6 +53,7 @@ static void emcraft_sf2_s2s010_init(MachineState *machine) if (strcmp(machine->cpu_type, mc->default_cpu_type) != 0) { error_report("This board can only be used with CPU %s", mc->default_cpu_type); + exit(1); } memory_region_init_ram(ddr, NULL, "ddr-ram", DDR_SIZE, diff --git a/hw/arm/sbsa-ref.c b/hw/arm/sbsa-ref.c new file mode 100644 index 0000000000..ee53f0ff60 --- /dev/null +++ b/hw/arm/sbsa-ref.c @@ -0,0 +1,806 @@ +/* + * ARM SBSA Reference Platform emulation + * + * Copyright (c) 2018 Linaro Limited + * Written by Hongbo Zhang <hongbo.zhang@linaro.org> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "qemu/units.h" +#include "sysemu/device_tree.h" +#include "sysemu/numa.h" +#include "sysemu/sysemu.h" +#include "exec/address-spaces.h" +#include "exec/hwaddr.h" +#include "kvm_arm.h" +#include "hw/arm/boot.h" +#include "hw/block/flash.h" +#include "hw/boards.h" +#include "hw/ide/internal.h" +#include "hw/ide/ahci_internal.h" +#include "hw/intc/arm_gicv3_common.h" +#include "hw/loader.h" +#include "hw/pci-host/gpex.h" +#include "hw/usb.h" +#include "net/net.h" + +#define RAMLIMIT_GB 8192 +#define RAMLIMIT_BYTES (RAMLIMIT_GB * GiB) + +#define NUM_IRQS 256 +#define NUM_SMMU_IRQS 4 +#define NUM_SATA_PORTS 6 + +#define VIRTUAL_PMU_IRQ 7 +#define ARCH_GIC_MAINT_IRQ 9 +#define ARCH_TIMER_VIRT_IRQ 11 +#define ARCH_TIMER_S_EL1_IRQ 13 +#define ARCH_TIMER_NS_EL1_IRQ 14 +#define ARCH_TIMER_NS_EL2_IRQ 10 + +enum { + SBSA_FLASH, + SBSA_MEM, + SBSA_CPUPERIPHS, + SBSA_GIC_DIST, + SBSA_GIC_REDIST, + SBSA_SMMU, + SBSA_UART, + SBSA_RTC, + SBSA_PCIE, + SBSA_PCIE_MMIO, + SBSA_PCIE_MMIO_HIGH, + SBSA_PCIE_PIO, + SBSA_PCIE_ECAM, + SBSA_GPIO, + SBSA_SECURE_UART, + SBSA_SECURE_UART_MM, + SBSA_SECURE_MEM, + SBSA_AHCI, + SBSA_EHCI, +}; + +typedef struct MemMapEntry { + hwaddr base; + hwaddr size; +} MemMapEntry; + +typedef struct { + MachineState parent; + struct arm_boot_info bootinfo; + int smp_cpus; + void *fdt; + int fdt_size; + int psci_conduit; + PFlashCFI01 *flash[2]; +} SBSAMachineState; + +#define TYPE_SBSA_MACHINE MACHINE_TYPE_NAME("sbsa-ref") +#define SBSA_MACHINE(obj) \ + OBJECT_CHECK(SBSAMachineState, (obj), TYPE_SBSA_MACHINE) + +static const MemMapEntry sbsa_ref_memmap[] = { + /* 512M boot ROM */ + [SBSA_FLASH] = { 0, 0x20000000 }, + /* 512M secure memory */ + [SBSA_SECURE_MEM] = { 0x20000000, 0x20000000 }, + /* Space reserved for CPU peripheral devices */ + [SBSA_CPUPERIPHS] = { 0x40000000, 0x00040000 }, + [SBSA_GIC_DIST] = { 0x40060000, 0x00010000 }, + [SBSA_GIC_REDIST] = { 0x40080000, 0x04000000 }, + [SBSA_UART] = { 0x60000000, 0x00001000 }, + [SBSA_RTC] = { 0x60010000, 0x00001000 }, + [SBSA_GPIO] = { 0x60020000, 0x00001000 }, + [SBSA_SECURE_UART] = { 0x60030000, 0x00001000 }, + [SBSA_SECURE_UART_MM] = { 0x60040000, 0x00001000 }, + [SBSA_SMMU] = { 0x60050000, 0x00020000 }, + /* Space here reserved for more SMMUs */ + [SBSA_AHCI] = { 0x60100000, 0x00010000 }, + [SBSA_EHCI] = { 0x60110000, 0x00010000 }, + /* Space here reserved for other devices */ + [SBSA_PCIE_PIO] = { 0x7fff0000, 0x00010000 }, + /* 32-bit address PCIE MMIO space */ + [SBSA_PCIE_MMIO] = { 0x80000000, 0x70000000 }, + /* 256M PCIE ECAM space */ + [SBSA_PCIE_ECAM] = { 0xf0000000, 0x10000000 }, + /* ~1TB PCIE MMIO space (4GB to 1024GB boundary) */ + [SBSA_PCIE_MMIO_HIGH] = { 0x100000000ULL, 0xFF00000000ULL }, + [SBSA_MEM] = { 0x10000000000ULL, RAMLIMIT_BYTES }, +}; + +static const int sbsa_ref_irqmap[] = { + [SBSA_UART] = 1, + [SBSA_RTC] = 2, + [SBSA_PCIE] = 3, /* ... to 6 */ + [SBSA_GPIO] = 7, + [SBSA_SECURE_UART] = 8, + [SBSA_SECURE_UART_MM] = 9, + [SBSA_AHCI] = 10, + [SBSA_EHCI] = 11, +}; + +/* + * Firmware on this machine only uses ACPI table to load OS, these limited + * device tree nodes are just to let firmware know the info which varies from + * command line parameters, so it is not necessary to be fully compatible + * with the kernel CPU and NUMA binding rules. + */ +static void create_fdt(SBSAMachineState *sms) +{ + void *fdt = create_device_tree(&sms->fdt_size); + const MachineState *ms = MACHINE(sms); + int cpu; + + if (!fdt) { + error_report("create_device_tree() failed"); + exit(1); + } + + sms->fdt = fdt; + + qemu_fdt_setprop_string(fdt, "/", "compatible", "linux,sbsa-ref"); + qemu_fdt_setprop_cell(fdt, "/", "#address-cells", 0x2); + qemu_fdt_setprop_cell(fdt, "/", "#size-cells", 0x2); + + if (have_numa_distance) { + int size = nb_numa_nodes * nb_numa_nodes * 3 * sizeof(uint32_t); + uint32_t *matrix = g_malloc0(size); + int idx, i, j; + + for (i = 0; i < nb_numa_nodes; i++) { + for (j = 0; j < nb_numa_nodes; j++) { + idx = (i * nb_numa_nodes + j) * 3; + matrix[idx + 0] = cpu_to_be32(i); + matrix[idx + 1] = cpu_to_be32(j); + matrix[idx + 2] = cpu_to_be32(numa_info[i].distance[j]); + } + } + + qemu_fdt_add_subnode(fdt, "/distance-map"); + qemu_fdt_setprop(fdt, "/distance-map", "distance-matrix", + matrix, size); + g_free(matrix); + } + + qemu_fdt_add_subnode(sms->fdt, "/cpus"); + + for (cpu = sms->smp_cpus - 1; cpu >= 0; cpu--) { + char *nodename = g_strdup_printf("/cpus/cpu@%d", cpu); + ARMCPU *armcpu = ARM_CPU(qemu_get_cpu(cpu)); + CPUState *cs = CPU(armcpu); + + qemu_fdt_add_subnode(sms->fdt, nodename); + + if (ms->possible_cpus->cpus[cs->cpu_index].props.has_node_id) { + qemu_fdt_setprop_cell(sms->fdt, nodename, "numa-node-id", + ms->possible_cpus->cpus[cs->cpu_index].props.node_id); + } + + g_free(nodename); + } +} + +#define SBSA_FLASH_SECTOR_SIZE (256 * KiB) + +static PFlashCFI01 *sbsa_flash_create1(SBSAMachineState *sms, + const char *name, + const char *alias_prop_name) +{ + /* + * Create a single flash device. We use the same parameters as + * the flash devices on the Versatile Express board. + */ + DeviceState *dev = qdev_create(NULL, TYPE_PFLASH_CFI01); + + qdev_prop_set_uint64(dev, "sector-length", SBSA_FLASH_SECTOR_SIZE); + qdev_prop_set_uint8(dev, "width", 4); + qdev_prop_set_uint8(dev, "device-width", 2); + qdev_prop_set_bit(dev, "big-endian", false); + qdev_prop_set_uint16(dev, "id0", 0x89); + qdev_prop_set_uint16(dev, "id1", 0x18); + qdev_prop_set_uint16(dev, "id2", 0x00); + qdev_prop_set_uint16(dev, "id3", 0x00); + qdev_prop_set_string(dev, "name", name); + object_property_add_child(OBJECT(sms), name, OBJECT(dev), + &error_abort); + object_property_add_alias(OBJECT(sms), alias_prop_name, + OBJECT(dev), "drive", &error_abort); + return PFLASH_CFI01(dev); +} + +static void sbsa_flash_create(SBSAMachineState *sms) +{ + sms->flash[0] = sbsa_flash_create1(sms, "sbsa.flash0", "pflash0"); + sms->flash[1] = sbsa_flash_create1(sms, "sbsa.flash1", "pflash1"); +} + +static void sbsa_flash_map1(PFlashCFI01 *flash, + hwaddr base, hwaddr size, + MemoryRegion *sysmem) +{ + DeviceState *dev = DEVICE(flash); + + assert(size % SBSA_FLASH_SECTOR_SIZE == 0); + assert(size / SBSA_FLASH_SECTOR_SIZE <= UINT32_MAX); + qdev_prop_set_uint32(dev, "num-blocks", size / SBSA_FLASH_SECTOR_SIZE); + qdev_init_nofail(dev); + + memory_region_add_subregion(sysmem, base, + sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), + 0)); +} + +static void sbsa_flash_map(SBSAMachineState *sms, + MemoryRegion *sysmem, + MemoryRegion *secure_sysmem) +{ + /* + * Map two flash devices to fill the SBSA_FLASH space in the memmap. + * sysmem is the system memory space. secure_sysmem is the secure view + * of the system, and the first flash device should be made visible only + * there. The second flash device is visible to both secure and nonsecure. + * If sysmem == secure_sysmem this means there is no separate Secure + * address space and both flash devices are generally visible. + */ + hwaddr flashsize = sbsa_ref_memmap[SBSA_FLASH].size / 2; + hwaddr flashbase = sbsa_ref_memmap[SBSA_FLASH].base; + + sbsa_flash_map1(sms->flash[0], flashbase, flashsize, + secure_sysmem); + sbsa_flash_map1(sms->flash[1], flashbase + flashsize, flashsize, + sysmem); +} + +static bool sbsa_firmware_init(SBSAMachineState *sms, + MemoryRegion *sysmem, + MemoryRegion *secure_sysmem) +{ + int i; + BlockBackend *pflash_blk0; + + /* Map legacy -drive if=pflash to machine properties */ + for (i = 0; i < ARRAY_SIZE(sms->flash); i++) { + pflash_cfi01_legacy_drive(sms->flash[i], + drive_get(IF_PFLASH, 0, i)); + } + + sbsa_flash_map(sms, sysmem, secure_sysmem); + + pflash_blk0 = pflash_cfi01_get_blk(sms->flash[0]); + + if (bios_name) { + char *fname; + MemoryRegion *mr; + int image_size; + + if (pflash_blk0) { + error_report("The contents of the first flash device may be " + "specified with -bios or with -drive if=pflash... " + "but you cannot use both options at once"); + exit(1); + } + + /* Fall back to -bios */ + + fname = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name); + if (!fname) { + error_report("Could not find ROM image '%s'", bios_name); + exit(1); + } + mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(sms->flash[0]), 0); + image_size = load_image_mr(fname, mr); + g_free(fname); + if (image_size < 0) { + error_report("Could not load ROM image '%s'", bios_name); + exit(1); + } + } + + return pflash_blk0 || bios_name; +} + +static void create_secure_ram(SBSAMachineState *sms, + MemoryRegion *secure_sysmem) +{ + MemoryRegion *secram = g_new(MemoryRegion, 1); + hwaddr base = sbsa_ref_memmap[SBSA_SECURE_MEM].base; + hwaddr size = sbsa_ref_memmap[SBSA_SECURE_MEM].size; + + memory_region_init_ram(secram, NULL, "sbsa-ref.secure-ram", size, + &error_fatal); + memory_region_add_subregion(secure_sysmem, base, secram); +} + +static void create_gic(SBSAMachineState *sms, qemu_irq *pic) +{ + DeviceState *gicdev; + SysBusDevice *gicbusdev; + const char *gictype; + uint32_t redist0_capacity, redist0_count; + int i; + + gictype = gicv3_class_name(); + + gicdev = qdev_create(NULL, gictype); + qdev_prop_set_uint32(gicdev, "revision", 3); + qdev_prop_set_uint32(gicdev, "num-cpu", smp_cpus); + /* + * Note that the num-irq property counts both internal and external + * interrupts; there are always 32 of the former (mandated by GIC spec). + */ + qdev_prop_set_uint32(gicdev, "num-irq", NUM_IRQS + 32); + qdev_prop_set_bit(gicdev, "has-security-extensions", true); + + redist0_capacity = + sbsa_ref_memmap[SBSA_GIC_REDIST].size / GICV3_REDIST_SIZE; + redist0_count = MIN(smp_cpus, redist0_capacity); + + qdev_prop_set_uint32(gicdev, "len-redist-region-count", 1); + qdev_prop_set_uint32(gicdev, "redist-region-count[0]", redist0_count); + + qdev_init_nofail(gicdev); + gicbusdev = SYS_BUS_DEVICE(gicdev); + sysbus_mmio_map(gicbusdev, 0, sbsa_ref_memmap[SBSA_GIC_DIST].base); + sysbus_mmio_map(gicbusdev, 1, sbsa_ref_memmap[SBSA_GIC_REDIST].base); + + /* + * Wire the outputs from each CPU's generic timer and the GICv3 + * maintenance interrupt signal to the appropriate GIC PPI inputs, + * and the GIC's IRQ/FIQ/VIRQ/VFIQ interrupt outputs to the CPU's inputs. + */ + for (i = 0; i < smp_cpus; i++) { + DeviceState *cpudev = DEVICE(qemu_get_cpu(i)); + int ppibase = NUM_IRQS + i * GIC_INTERNAL + GIC_NR_SGIS; + int irq; + /* + * Mapping from the output timer irq lines from the CPU to the + * GIC PPI inputs used for this board. + */ + const int timer_irq[] = { + [GTIMER_PHYS] = ARCH_TIMER_NS_EL1_IRQ, + [GTIMER_VIRT] = ARCH_TIMER_VIRT_IRQ, + [GTIMER_HYP] = ARCH_TIMER_NS_EL2_IRQ, + [GTIMER_SEC] = ARCH_TIMER_S_EL1_IRQ, + }; + + for (irq = 0; irq < ARRAY_SIZE(timer_irq); irq++) { + qdev_connect_gpio_out(cpudev, irq, + qdev_get_gpio_in(gicdev, + ppibase + timer_irq[irq])); + } + + qdev_connect_gpio_out_named(cpudev, "gicv3-maintenance-interrupt", 0, + qdev_get_gpio_in(gicdev, ppibase + + ARCH_GIC_MAINT_IRQ)); + qdev_connect_gpio_out_named(cpudev, "pmu-interrupt", 0, + qdev_get_gpio_in(gicdev, ppibase + + VIRTUAL_PMU_IRQ)); + + sysbus_connect_irq(gicbusdev, i, qdev_get_gpio_in(cpudev, ARM_CPU_IRQ)); + sysbus_connect_irq(gicbusdev, i + smp_cpus, + qdev_get_gpio_in(cpudev, ARM_CPU_FIQ)); + sysbus_connect_irq(gicbusdev, i + 2 * smp_cpus, + qdev_get_gpio_in(cpudev, ARM_CPU_VIRQ)); + sysbus_connect_irq(gicbusdev, i + 3 * smp_cpus, + qdev_get_gpio_in(cpudev, ARM_CPU_VFIQ)); + } + + for (i = 0; i < NUM_IRQS; i++) { + pic[i] = qdev_get_gpio_in(gicdev, i); + } +} + +static void create_uart(const SBSAMachineState *sms, qemu_irq *pic, int uart, + MemoryRegion *mem, Chardev *chr) +{ + hwaddr base = sbsa_ref_memmap[uart].base; + int irq = sbsa_ref_irqmap[uart]; + DeviceState *dev = qdev_create(NULL, "pl011"); + SysBusDevice *s = SYS_BUS_DEVICE(dev); + + qdev_prop_set_chr(dev, "chardev", chr); + qdev_init_nofail(dev); + memory_region_add_subregion(mem, base, + sysbus_mmio_get_region(s, 0)); + sysbus_connect_irq(s, 0, pic[irq]); +} + +static void create_rtc(const SBSAMachineState *sms, qemu_irq *pic) +{ + hwaddr base = sbsa_ref_memmap[SBSA_RTC].base; + int irq = sbsa_ref_irqmap[SBSA_RTC]; + + sysbus_create_simple("pl031", base, pic[irq]); +} + +static DeviceState *gpio_key_dev; +static void sbsa_ref_powerdown_req(Notifier *n, void *opaque) +{ + /* use gpio Pin 3 for power button event */ + qemu_set_irq(qdev_get_gpio_in(gpio_key_dev, 0), 1); +} + +static Notifier sbsa_ref_powerdown_notifier = { + .notify = sbsa_ref_powerdown_req +}; + +static void create_gpio(const SBSAMachineState *sms, qemu_irq *pic) +{ + DeviceState *pl061_dev; + hwaddr base = sbsa_ref_memmap[SBSA_GPIO].base; + int irq = sbsa_ref_irqmap[SBSA_GPIO]; + + pl061_dev = sysbus_create_simple("pl061", base, pic[irq]); + + gpio_key_dev = sysbus_create_simple("gpio-key", -1, + qdev_get_gpio_in(pl061_dev, 3)); + + /* connect powerdown request */ + qemu_register_powerdown_notifier(&sbsa_ref_powerdown_notifier); +} + +static void create_ahci(const SBSAMachineState *sms, qemu_irq *pic) +{ + hwaddr base = sbsa_ref_memmap[SBSA_AHCI].base; + int irq = sbsa_ref_irqmap[SBSA_AHCI]; + DeviceState *dev; + DriveInfo *hd[NUM_SATA_PORTS]; + SysbusAHCIState *sysahci; + AHCIState *ahci; + int i; + + dev = qdev_create(NULL, "sysbus-ahci"); + qdev_prop_set_uint32(dev, "num-ports", NUM_SATA_PORTS); + qdev_init_nofail(dev); + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base); + sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, pic[irq]); + + sysahci = SYSBUS_AHCI(dev); + ahci = &sysahci->ahci; + ide_drive_get(hd, ARRAY_SIZE(hd)); + for (i = 0; i < ahci->ports; i++) { + if (hd[i] == NULL) { + continue; + } + ide_create_drive(&ahci->dev[i].port, 0, hd[i]); + } +} + +static void create_ehci(const SBSAMachineState *sms, qemu_irq *pic) +{ + hwaddr base = sbsa_ref_memmap[SBSA_EHCI].base; + int irq = sbsa_ref_irqmap[SBSA_EHCI]; + + sysbus_create_simple("platform-ehci-usb", base, pic[irq]); +} + +static void create_smmu(const SBSAMachineState *sms, qemu_irq *pic, + PCIBus *bus) +{ + hwaddr base = sbsa_ref_memmap[SBSA_SMMU].base; + int irq = sbsa_ref_irqmap[SBSA_SMMU]; + DeviceState *dev; + int i; + + dev = qdev_create(NULL, "arm-smmuv3"); + + object_property_set_link(OBJECT(dev), OBJECT(bus), "primary-bus", + &error_abort); + qdev_init_nofail(dev); + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base); + for (i = 0; i < NUM_SMMU_IRQS; i++) { + sysbus_connect_irq(SYS_BUS_DEVICE(dev), i, pic[irq + i]); + } +} + +static void create_pcie(SBSAMachineState *sms, qemu_irq *pic) +{ + hwaddr base_ecam = sbsa_ref_memmap[SBSA_PCIE_ECAM].base; + hwaddr size_ecam = sbsa_ref_memmap[SBSA_PCIE_ECAM].size; + hwaddr base_mmio = sbsa_ref_memmap[SBSA_PCIE_MMIO].base; + hwaddr size_mmio = sbsa_ref_memmap[SBSA_PCIE_MMIO].size; + hwaddr base_mmio_high = sbsa_ref_memmap[SBSA_PCIE_MMIO_HIGH].base; + hwaddr size_mmio_high = sbsa_ref_memmap[SBSA_PCIE_MMIO_HIGH].size; + hwaddr base_pio = sbsa_ref_memmap[SBSA_PCIE_PIO].base; + int irq = sbsa_ref_irqmap[SBSA_PCIE]; + MemoryRegion *mmio_alias, *mmio_alias_high, *mmio_reg; + MemoryRegion *ecam_alias, *ecam_reg; + DeviceState *dev; + PCIHostState *pci; + int i; + + dev = qdev_create(NULL, TYPE_GPEX_HOST); + qdev_init_nofail(dev); + + /* Map ECAM space */ + ecam_alias = g_new0(MemoryRegion, 1); + ecam_reg = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 0); + memory_region_init_alias(ecam_alias, OBJECT(dev), "pcie-ecam", + ecam_reg, 0, size_ecam); + memory_region_add_subregion(get_system_memory(), base_ecam, ecam_alias); + + /* Map the MMIO space */ + mmio_alias = g_new0(MemoryRegion, 1); + mmio_reg = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 1); + memory_region_init_alias(mmio_alias, OBJECT(dev), "pcie-mmio", + mmio_reg, base_mmio, size_mmio); + memory_region_add_subregion(get_system_memory(), base_mmio, mmio_alias); + + /* Map the MMIO_HIGH space */ + mmio_alias_high = g_new0(MemoryRegion, 1); + memory_region_init_alias(mmio_alias_high, OBJECT(dev), "pcie-mmio-high", + mmio_reg, base_mmio_high, size_mmio_high); + memory_region_add_subregion(get_system_memory(), base_mmio_high, + mmio_alias_high); + + /* Map IO port space */ + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 2, base_pio); + + for (i = 0; i < GPEX_NUM_IRQS; i++) { + sysbus_connect_irq(SYS_BUS_DEVICE(dev), i, pic[irq + i]); + gpex_set_irq_num(GPEX_HOST(dev), i, irq + i); + } + + pci = PCI_HOST_BRIDGE(dev); + if (pci->bus) { + for (i = 0; i < nb_nics; i++) { + NICInfo *nd = &nd_table[i]; + + if (!nd->model) { + nd->model = g_strdup("e1000e"); + } + + pci_nic_init_nofail(nd, pci->bus, nd->model, NULL); + } + } + + pci_create_simple(pci->bus, -1, "VGA"); + + create_smmu(sms, pic, pci->bus); +} + +static void *sbsa_ref_dtb(const struct arm_boot_info *binfo, int *fdt_size) +{ + const SBSAMachineState *board = container_of(binfo, SBSAMachineState, + bootinfo); + + *fdt_size = board->fdt_size; + return board->fdt; +} + +static void sbsa_ref_init(MachineState *machine) +{ + SBSAMachineState *sms = SBSA_MACHINE(machine); + MachineClass *mc = MACHINE_GET_CLASS(machine); + MemoryRegion *sysmem = get_system_memory(); + MemoryRegion *secure_sysmem = NULL; + MemoryRegion *ram = g_new(MemoryRegion, 1); + bool firmware_loaded; + const CPUArchIdList *possible_cpus; + int n, sbsa_max_cpus; + qemu_irq pic[NUM_IRQS]; + + if (strcmp(machine->cpu_type, ARM_CPU_TYPE_NAME("cortex-a57"))) { + error_report("sbsa-ref: CPU type other than the built-in " + "cortex-a57 not supported"); + exit(1); + } + + if (kvm_enabled()) { + error_report("sbsa-ref: KVM is not supported for this machine"); + exit(1); + } + + /* + * The Secure view of the world is the same as the NonSecure, + * but with a few extra devices. Create it as a container region + * containing the system memory at low priority; any secure-only + * devices go in at higher priority and take precedence. + */ + secure_sysmem = g_new(MemoryRegion, 1); + memory_region_init(secure_sysmem, OBJECT(machine), "secure-memory", + UINT64_MAX); + memory_region_add_subregion_overlap(secure_sysmem, 0, sysmem, -1); + + firmware_loaded = sbsa_firmware_init(sms, sysmem, + secure_sysmem ?: sysmem); + + if (machine->kernel_filename && firmware_loaded) { + error_report("sbsa-ref: No fw_cfg device on this machine, " + "so -kernel option is not supported when firmware loaded, " + "please load OS from hard disk instead"); + exit(1); + } + + /* + * This machine has EL3 enabled, external firmware should supply PSCI + * implementation, so the QEMU's internal PSCI is disabled. + */ + sms->psci_conduit = QEMU_PSCI_CONDUIT_DISABLED; + + sbsa_max_cpus = sbsa_ref_memmap[SBSA_GIC_REDIST].size / GICV3_REDIST_SIZE; + + if (max_cpus > sbsa_max_cpus) { + error_report("Number of SMP CPUs requested (%d) exceeds max CPUs " + "supported by machine 'sbsa-ref' (%d)", + max_cpus, sbsa_max_cpus); + exit(1); + } + + sms->smp_cpus = smp_cpus; + + if (machine->ram_size > sbsa_ref_memmap[SBSA_MEM].size) { + error_report("sbsa-ref: cannot model more than %dGB RAM", RAMLIMIT_GB); + exit(1); + } + + possible_cpus = mc->possible_cpu_arch_ids(machine); + for (n = 0; n < possible_cpus->len; n++) { + Object *cpuobj; + CPUState *cs; + + if (n >= smp_cpus) { + break; + } + + cpuobj = object_new(possible_cpus->cpus[n].type); + object_property_set_int(cpuobj, possible_cpus->cpus[n].arch_id, + "mp-affinity", NULL); + + cs = CPU(cpuobj); + cs->cpu_index = n; + + numa_cpu_pre_plug(&possible_cpus->cpus[cs->cpu_index], DEVICE(cpuobj), + &error_fatal); + + if (object_property_find(cpuobj, "reset-cbar", NULL)) { + object_property_set_int(cpuobj, + sbsa_ref_memmap[SBSA_CPUPERIPHS].base, + "reset-cbar", &error_abort); + } + + object_property_set_link(cpuobj, OBJECT(sysmem), "memory", + &error_abort); + + object_property_set_link(cpuobj, OBJECT(secure_sysmem), + "secure-memory", &error_abort); + + object_property_set_bool(cpuobj, true, "realized", &error_fatal); + object_unref(cpuobj); + } + + memory_region_allocate_system_memory(ram, NULL, "sbsa-ref.ram", + machine->ram_size); + memory_region_add_subregion(sysmem, sbsa_ref_memmap[SBSA_MEM].base, ram); + + create_fdt(sms); + + create_secure_ram(sms, secure_sysmem); + + create_gic(sms, pic); + + create_uart(sms, pic, SBSA_UART, sysmem, serial_hd(0)); + create_uart(sms, pic, SBSA_SECURE_UART, secure_sysmem, serial_hd(1)); + /* Second secure UART for RAS and MM from EL0 */ + create_uart(sms, pic, SBSA_SECURE_UART_MM, secure_sysmem, serial_hd(2)); + + create_rtc(sms, pic); + + create_gpio(sms, pic); + + create_ahci(sms, pic); + + create_ehci(sms, pic); + + create_pcie(sms, pic); + + sms->bootinfo.ram_size = machine->ram_size; + sms->bootinfo.kernel_filename = machine->kernel_filename; + sms->bootinfo.nb_cpus = smp_cpus; + sms->bootinfo.board_id = -1; + sms->bootinfo.loader_start = sbsa_ref_memmap[SBSA_MEM].base; + sms->bootinfo.get_dtb = sbsa_ref_dtb; + sms->bootinfo.firmware_loaded = firmware_loaded; + arm_load_kernel(ARM_CPU(first_cpu), &sms->bootinfo); +} + +static uint64_t sbsa_ref_cpu_mp_affinity(SBSAMachineState *sms, int idx) +{ + uint8_t clustersz = ARM_DEFAULT_CPUS_PER_CLUSTER; + return arm_cpu_mp_affinity(idx, clustersz); +} + +static const CPUArchIdList *sbsa_ref_possible_cpu_arch_ids(MachineState *ms) +{ + SBSAMachineState *sms = SBSA_MACHINE(ms); + int n; + + if (ms->possible_cpus) { + assert(ms->possible_cpus->len == max_cpus); + return ms->possible_cpus; + } + + ms->possible_cpus = g_malloc0(sizeof(CPUArchIdList) + + sizeof(CPUArchId) * max_cpus); + ms->possible_cpus->len = max_cpus; + for (n = 0; n < ms->possible_cpus->len; n++) { + ms->possible_cpus->cpus[n].type = ms->cpu_type; + ms->possible_cpus->cpus[n].arch_id = + sbsa_ref_cpu_mp_affinity(sms, n); + ms->possible_cpus->cpus[n].props.has_thread_id = true; + ms->possible_cpus->cpus[n].props.thread_id = n; + } + return ms->possible_cpus; +} + +static CpuInstanceProperties +sbsa_ref_cpu_index_to_props(MachineState *ms, unsigned cpu_index) +{ + MachineClass *mc = MACHINE_GET_CLASS(ms); + const CPUArchIdList *possible_cpus = mc->possible_cpu_arch_ids(ms); + + assert(cpu_index < possible_cpus->len); + return possible_cpus->cpus[cpu_index].props; +} + +static int64_t +sbsa_ref_get_default_cpu_node_id(const MachineState *ms, int idx) +{ + return idx % nb_numa_nodes; +} + +static void sbsa_ref_instance_init(Object *obj) +{ + SBSAMachineState *sms = SBSA_MACHINE(obj); + + sbsa_flash_create(sms); +} + +static void sbsa_ref_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + + mc->init = sbsa_ref_init; + mc->desc = "QEMU 'SBSA Reference' ARM Virtual Machine"; + mc->default_cpu_type = ARM_CPU_TYPE_NAME("cortex-a57"); + mc->max_cpus = 512; + mc->pci_allow_0_address = true; + mc->minimum_page_bits = 12; + mc->block_default_type = IF_IDE; + mc->no_cdrom = 1; + mc->default_ram_size = 1 * GiB; + mc->default_cpus = 4; + mc->possible_cpu_arch_ids = sbsa_ref_possible_cpu_arch_ids; + mc->cpu_index_to_instance_props = sbsa_ref_cpu_index_to_props; + mc->get_default_cpu_node_id = sbsa_ref_get_default_cpu_node_id; +} + +static const TypeInfo sbsa_ref_info = { + .name = TYPE_SBSA_MACHINE, + .parent = TYPE_MACHINE, + .instance_init = sbsa_ref_instance_init, + .class_init = sbsa_ref_class_init, + .instance_size = sizeof(SBSAMachineState), +}; + +static void sbsa_ref_machine_init(void) +{ + type_register_static(&sbsa_ref_info); +} + +type_init(sbsa_ref_machine_init); diff --git a/hw/arm/virt.c b/hw/arm/virt.c index 431e2900fd..ed009fa447 100644 --- a/hw/arm/virt.c +++ b/hw/arm/virt.c @@ -176,6 +176,7 @@ static const int a15irqmap[] = { }; static const char *valid_cpus[] = { + ARM_CPU_TYPE_NAME("cortex-a7"), ARM_CPU_TYPE_NAME("cortex-a15"), ARM_CPU_TYPE_NAME("cortex-a53"), ARM_CPU_TYPE_NAME("cortex-a57"), diff --git a/hw/block/pflash_cfi01.c b/hw/block/pflash_cfi01.c index 35080d915f..db4a246b22 100644 --- a/hw/block/pflash_cfi01.c +++ b/hw/block/pflash_cfi01.c @@ -248,7 +248,6 @@ static uint32_t pflash_data_read(PFlashCFI01 *pfl, hwaddr offset, switch (width) { case 1: ret = p[offset]; - trace_pflash_data_read8(offset, ret); break; case 2: if (be) { @@ -258,7 +257,6 @@ static uint32_t pflash_data_read(PFlashCFI01 *pfl, hwaddr offset, ret = p[offset]; ret |= p[offset + 1] << 8; } - trace_pflash_data_read16(offset, ret); break; case 4: if (be) { @@ -272,12 +270,12 @@ static uint32_t pflash_data_read(PFlashCFI01 *pfl, hwaddr offset, ret |= p[offset + 2] << 16; ret |= p[offset + 3] << 24; } - trace_pflash_data_read32(offset, ret); break; default: DPRINTF("BUG in %s\n", __func__); abort(); } + trace_pflash_data_read(offset, width << 1, ret); return ret; } @@ -288,7 +286,6 @@ static uint32_t pflash_read(PFlashCFI01 *pfl, hwaddr offset, uint32_t ret; ret = -1; - trace_pflash_read(offset, pfl->cmd, width, pfl->wcycle); switch (pfl->cmd) { default: /* This should never happen : reset state & treat it as a read */ @@ -391,6 +388,8 @@ static uint32_t pflash_read(PFlashCFI01 *pfl, hwaddr offset, break; } + trace_pflash_io_read(offset, width, width << 1, ret, pfl->cmd, pfl->wcycle); + return ret; } @@ -414,7 +413,7 @@ static inline void pflash_data_write(PFlashCFI01 *pfl, hwaddr offset, { uint8_t *p = pfl->storage; - trace_pflash_data_write(offset, value, width, pfl->counter); + trace_pflash_data_write(offset, width << 1, value, pfl->counter); switch (width) { case 1: p[offset] = value; @@ -453,7 +452,7 @@ static void pflash_write(PFlashCFI01 *pfl, hwaddr offset, cmd = value; - trace_pflash_write(offset, value, width, pfl->wcycle); + trace_pflash_io_write(offset, width, width << 1, value, pfl->wcycle); if (!pfl->wcycle) { /* Set the device in I/O access mode */ memory_region_rom_device_set_romd(&pfl->mem, false); diff --git a/hw/block/pflash_cfi02.c b/hw/block/pflash_cfi02.c index eb106f4996..5392290c72 100644 --- a/hw/block/pflash_cfi02.c +++ b/hw/block/pflash_cfi02.c @@ -29,10 +29,7 @@ * - CFI queries * * It does not support flash interleaving. - * It does not implement boot blocs with reduced size * It does not implement software data protection as found in many real chips - * It does not implement erase suspend/resume commands - * It does not implement multiple sectors erase */ #include "qemu/osdep.h" @@ -40,6 +37,7 @@ #include "hw/block/block.h" #include "hw/block/flash.h" #include "qapi/error.h" +#include "qemu/bitmap.h" #include "qemu/timer.h" #include "sysemu/block-backend.h" #include "qemu/host-utils.h" @@ -47,26 +45,40 @@ #include "hw/sysbus.h" #include "trace.h" -//#define PFLASH_DEBUG -#ifdef PFLASH_DEBUG +#define PFLASH_DEBUG false #define DPRINTF(fmt, ...) \ do { \ - fprintf(stderr, "PFLASH: " fmt , ## __VA_ARGS__); \ + if (PFLASH_DEBUG) { \ + fprintf(stderr, "PFLASH: " fmt, ## __VA_ARGS__); \ + } \ } while (0) -#else -#define DPRINTF(fmt, ...) do { } while (0) -#endif #define PFLASH_LAZY_ROMD_THRESHOLD 42 +/* + * The size of the cfi_table indirectly depends on this and the start of the + * PRI table directly depends on it. 4 is the maximum size (and also what + * seems common) without changing the PRT table address. + */ +#define PFLASH_MAX_ERASE_REGIONS 4 + +/* Special write cycles for CFI queries. */ +enum { + WCYCLE_CFI = 7, + WCYCLE_AUTOSELECT_CFI = 8, +}; + struct PFlashCFI02 { /*< private >*/ SysBusDevice parent_obj; /*< public >*/ BlockBackend *blk; - uint32_t sector_len; - uint32_t nb_blocs; + uint32_t uniform_nb_blocs; + uint32_t uniform_sector_len; + uint32_t total_sectors; + uint32_t nb_blocs[PFLASH_MAX_ERASE_REGIONS]; + uint32_t sector_len[PFLASH_MAX_ERASE_REGIONS]; uint32_t chip_len; uint8_t mappings; uint8_t width; @@ -83,7 +95,7 @@ struct PFlashCFI02 { uint16_t ident3; uint16_t unlock_addr0; uint16_t unlock_addr1; - uint8_t cfi_table[0x52]; + uint8_t cfi_table[0x4d]; QEMUTimer timer; /* The device replicates the flash memory across its memory space. Emulate * that by having a container (.mem) filled with an array of aliases @@ -94,11 +106,63 @@ struct PFlashCFI02 { MemoryRegion orig_mem; int rom_mode; int read_counter; /* used for lazy switch-back to rom mode */ + int sectors_to_erase; + uint64_t erase_time_remaining; + unsigned long *sector_erase_map; char *name; void *storage; }; /* + * Toggle status bit DQ7. + */ +static inline void toggle_dq7(PFlashCFI02 *pfl) +{ + pfl->status ^= 0x80; +} + +/* + * Set status bit DQ7 to bit 7 of value. + */ +static inline void set_dq7(PFlashCFI02 *pfl, uint8_t value) +{ + pfl->status &= 0x7F; + pfl->status |= value & 0x80; +} + +/* + * Toggle status bit DQ6. + */ +static inline void toggle_dq6(PFlashCFI02 *pfl) +{ + pfl->status ^= 0x40; +} + +/* + * Turn on DQ3. + */ +static inline void assert_dq3(PFlashCFI02 *pfl) +{ + pfl->status |= 0x08; +} + +/* + * Turn off DQ3. + */ +static inline void reset_dq3(PFlashCFI02 *pfl) +{ + pfl->status &= ~0x08; +} + +/* + * Toggle status bit DQ2. + */ +static inline void toggle_dq2(PFlashCFI02 *pfl) +{ + pfl->status ^= 0x04; +} + +/* * Set up replicated mappings of the same region. */ static void pflash_setup_mappings(PFlashCFI02 *pfl) @@ -121,13 +185,63 @@ static void pflash_register_memory(PFlashCFI02 *pfl, int rom_mode) pfl->rom_mode = rom_mode; } -static void pflash_timer (void *opaque) +static size_t pflash_regions_count(PFlashCFI02 *pfl) +{ + return pfl->cfi_table[0x2c]; +} + +/* + * Returns the time it takes to erase the number of sectors scheduled for + * erasure based on CFI address 0x21 which is "Typical timeout per individual + * block erase 2^N ms." + */ +static uint64_t pflash_erase_time(PFlashCFI02 *pfl) +{ + /* + * If there are no sectors to erase (which can happen if all of the sectors + * to be erased are protected), then erase takes 100 us. Protected sectors + * aren't supported so this should never happen. + */ + return ((1ULL << pfl->cfi_table[0x21]) * pfl->sectors_to_erase) * SCALE_US; +} + +/* + * Returns true if the device is currently in erase suspend mode. + */ +static inline bool pflash_erase_suspend_mode(PFlashCFI02 *pfl) +{ + return pfl->erase_time_remaining > 0; +} + +static void pflash_timer(void *opaque) { PFlashCFI02 *pfl = opaque; trace_pflash_timer_expired(pfl->cmd); + if (pfl->cmd == 0x30) { + /* + * Sector erase. If DQ3 is 0 when the timer expires, then the 50 + * us erase timeout has expired so we need to start the timer for the + * sector erase algorithm. Otherwise, the erase completed and we should + * go back to read array mode. + */ + if ((pfl->status & 0x08) == 0) { + assert_dq3(pfl); + uint64_t timeout = pflash_erase_time(pfl); + timer_mod(&pfl->timer, + qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + timeout); + DPRINTF("%s: erase timeout fired; erasing %d sectors\n", + __func__, pfl->sectors_to_erase); + return; + } + DPRINTF("%s: sector erase complete\n", __func__); + bitmap_zero(pfl->sector_erase_map, pfl->total_sectors); + pfl->sectors_to_erase = 0; + reset_dq3(pfl); + } + /* Reset flash */ - pfl->status ^= 0x80; + toggle_dq7(pfl); if (pfl->bypass) { pfl->wcycle = 2; } else { @@ -137,15 +251,63 @@ static void pflash_timer (void *opaque) pfl->cmd = 0; } -static uint32_t pflash_read(PFlashCFI02 *pfl, hwaddr offset, - int width, int be) +/* + * Read data from flash. + */ +static uint64_t pflash_data_read(PFlashCFI02 *pfl, hwaddr offset, + unsigned int width) +{ + uint8_t *p = (uint8_t *)pfl->storage + offset; + uint64_t ret = pfl->be ? ldn_be_p(p, width) : ldn_le_p(p, width); + trace_pflash_data_read(offset, width << 1, ret); + return ret; +} + +typedef struct { + uint32_t len; + uint32_t num; +} SectorInfo; + +/* + * offset should be a byte offset of the QEMU device and _not_ a device + * offset. + */ +static SectorInfo pflash_sector_info(PFlashCFI02 *pfl, hwaddr offset) { + assert(offset < pfl->chip_len); + hwaddr addr = 0; + uint32_t sector_num = 0; + for (int i = 0; i < pflash_regions_count(pfl); ++i) { + uint64_t region_size = (uint64_t)pfl->nb_blocs[i] * pfl->sector_len[i]; + if (addr <= offset && offset < addr + region_size) { + return (SectorInfo) { + .len = pfl->sector_len[i], + .num = sector_num + (offset - addr) / pfl->sector_len[i], + }; + } + sector_num += pfl->nb_blocs[i]; + addr += region_size; + } + abort(); +} + +/* + * Returns true if the offset refers to a flash sector that is currently being + * erased. + */ +static bool pflash_sector_is_erasing(PFlashCFI02 *pfl, hwaddr offset) +{ + long sector_num = pflash_sector_info(pfl, offset).num; + return test_bit(sector_num, pfl->sector_erase_map); +} + +static uint64_t pflash_read(void *opaque, hwaddr offset, unsigned int width) +{ + PFlashCFI02 *pfl = opaque; hwaddr boff; - uint32_t ret; - uint8_t *p; + uint64_t ret; ret = -1; - trace_pflash_read(offset, pfl->cmd, width, pfl->wcycle); /* Lazy reset to ROMD mode after a certain amount of read accesses */ if (!pfl->rom_mode && pfl->wcycle == 0 && ++pfl->read_counter > PFLASH_LAZY_ROMD_THRESHOLD) { @@ -153,10 +315,9 @@ static uint32_t pflash_read(PFlashCFI02 *pfl, hwaddr offset, } offset &= pfl->chip_len - 1; boff = offset & 0xFF; - if (pfl->width == 2) + if (pfl->width == 2) { boff = boff >> 1; - else if (pfl->width == 4) - boff = boff >> 2; + } switch (pfl->cmd) { default: /* This should never happen : reset state & treat it as a read*/ @@ -164,45 +325,22 @@ static uint32_t pflash_read(PFlashCFI02 *pfl, hwaddr offset, pfl->wcycle = 0; pfl->cmd = 0; /* fall through to the read code */ - case 0x80: + case 0x80: /* Erase (unlock) */ /* We accept reads during second unlock sequence... */ case 0x00: - flash_read: - /* Flash area read */ - p = pfl->storage; - switch (width) { - case 1: - ret = p[offset]; - trace_pflash_data_read8(offset, ret); - break; - case 2: - if (be) { - ret = p[offset] << 8; - ret |= p[offset + 1]; - } else { - ret = p[offset]; - ret |= p[offset + 1] << 8; - } - trace_pflash_data_read16(offset, ret); - break; - case 4: - if (be) { - ret = p[offset] << 24; - ret |= p[offset + 1] << 16; - ret |= p[offset + 2] << 8; - ret |= p[offset + 3]; - } else { - ret = p[offset]; - ret |= p[offset + 1] << 8; - ret |= p[offset + 2] << 16; - ret |= p[offset + 3] << 24; - } - trace_pflash_data_read32(offset, ret); + if (pflash_erase_suspend_mode(pfl) && + pflash_sector_is_erasing(pfl, offset)) { + /* Toggle bit 2, but not 6. */ + toggle_dq2(pfl); + /* Status register read */ + ret = pfl->status; + DPRINTF("%s: status %" PRIx64 "\n", __func__, ret); break; } + /* Flash area read */ + ret = pflash_data_read(pfl, offset, width); break; - case 0x90: - /* flash ID read */ + case 0x90: /* flash ID read */ switch (boff) { case 0x00: case 0x01: @@ -214,23 +352,25 @@ static uint32_t pflash_read(PFlashCFI02 *pfl, hwaddr offset, case 0x0E: case 0x0F: ret = boff & 0x01 ? pfl->ident3 : pfl->ident2; - if (ret == (uint8_t)-1) { - goto flash_read; + if (ret != (uint8_t)-1) { + break; } - break; + /* Fall through to data read. */ default: - goto flash_read; + ret = pflash_data_read(pfl, offset, width); } - DPRINTF("%s: ID " TARGET_FMT_plx " %x\n", __func__, boff, ret); + DPRINTF("%s: ID " TARGET_FMT_plx " %" PRIx64 "\n", __func__, boff, ret); break; - case 0xA0: - case 0x10: - case 0x30: + case 0x10: /* Chip Erase */ + case 0x30: /* Sector Erase */ + /* Toggle bit 2 during erase, but not program. */ + toggle_dq2(pfl); + case 0xA0: /* Program */ + /* Toggle bit 6 */ + toggle_dq6(pfl); /* Status register read */ ret = pfl->status; - DPRINTF("%s: status %x\n", __func__, ret); - /* Toggle bit 6 */ - pfl->status ^= 0x40; + DPRINTF("%s: status %" PRIx64 "\n", __func__, ret); break; case 0x98: /* CFI query mode */ @@ -241,13 +381,13 @@ static uint32_t pflash_read(PFlashCFI02 *pfl, hwaddr offset, } break; } + trace_pflash_io_read(offset, width, width << 1, ret, pfl->cmd, pfl->wcycle); return ret; } /* update flash content on disk */ -static void pflash_update(PFlashCFI02 *pfl, int offset, - int size) +static void pflash_update(PFlashCFI02 *pfl, int offset, int size) { int offset_end; if (pfl->blk) { @@ -260,31 +400,56 @@ static void pflash_update(PFlashCFI02 *pfl, int offset, } } -static void pflash_write(PFlashCFI02 *pfl, hwaddr offset, - uint32_t value, int width, int be) +static void pflash_sector_erase(PFlashCFI02 *pfl, hwaddr offset) { + SectorInfo sector_info = pflash_sector_info(pfl, offset); + uint64_t sector_len = sector_info.len; + offset &= ~(sector_len - 1); + DPRINTF("%s: start sector erase at %0*" PRIx64 "-%0*" PRIx64 "\n", + __func__, pfl->width * 2, offset, + pfl->width * 2, offset + sector_len - 1); + if (!pfl->ro) { + uint8_t *p = pfl->storage; + memset(p + offset, 0xff, sector_len); + pflash_update(pfl, offset, sector_len); + } + set_dq7(pfl, 0x00); + ++pfl->sectors_to_erase; + set_bit(sector_info.num, pfl->sector_erase_map); + /* Set (or reset) the 50 us timer for additional erase commands. */ + timer_mod(&pfl->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 50000); +} + +static void pflash_write(void *opaque, hwaddr offset, uint64_t value, + unsigned int width) +{ + PFlashCFI02 *pfl = opaque; hwaddr boff; uint8_t *p; uint8_t cmd; + trace_pflash_io_write(offset, width, width << 1, value, pfl->wcycle); cmd = value; - if (pfl->cmd != 0xA0 && cmd == 0xF0) { -#if 0 - DPRINTF("%s: flash reset asked (%02x %02x)\n", - __func__, pfl->cmd, cmd); -#endif - goto reset_flash; + if (pfl->cmd != 0xA0) { + /* Reset does nothing during chip erase and sector erase. */ + if (cmd == 0xF0 && pfl->cmd != 0x10 && pfl->cmd != 0x30) { + if (pfl->wcycle == WCYCLE_AUTOSELECT_CFI) { + /* Return to autoselect mode. */ + pfl->wcycle = 3; + pfl->cmd = 0x90; + return; + } + goto reset_flash; + } } - trace_pflash_write(offset, value, width, pfl->wcycle); offset &= pfl->chip_len - 1; - DPRINTF("%s: offset " TARGET_FMT_plx " %08x %d\n", __func__, - offset, value, width); - boff = offset & (pfl->sector_len - 1); - if (pfl->width == 2) + boff = offset; + if (pfl->width == 2) { boff = boff >> 1; - else if (pfl->width == 4) - boff = boff >> 2; + } + /* Only the least-significant 11 bits are used in most cases. */ + boff &= 0x7FF; switch (pfl->wcycle) { case 0: /* Set the device in I/O access mode if required */ @@ -294,12 +459,30 @@ static void pflash_write(PFlashCFI02 *pfl, hwaddr offset, /* We're in read mode */ check_unlock0: if (boff == 0x55 && cmd == 0x98) { - enter_CFI_mode: /* Enter CFI query mode */ - pfl->wcycle = 7; + pfl->wcycle = WCYCLE_CFI; pfl->cmd = 0x98; return; } + /* Handle erase resume in erase suspend mode, otherwise reset. */ + if (cmd == 0x30) { /* Erase Resume */ + if (pflash_erase_suspend_mode(pfl)) { + /* Resume the erase. */ + timer_mod(&pfl->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + + pfl->erase_time_remaining); + pfl->erase_time_remaining = 0; + pfl->wcycle = 6; + pfl->cmd = 0x30; + set_dq7(pfl, 0x00); + assert_dq3(pfl); + return; + } + goto reset_flash; + } + /* Ignore erase suspend. */ + if (cmd == 0xB0) { /* Erase Suspend */ + return; + } if (boff != pfl->unlock_addr0 || cmd != 0xAA) { DPRINTF("%s: unlock0 failed " TARGET_FMT_plx " %02x %04x\n", __func__, boff, cmd, pfl->unlock_addr0); @@ -328,9 +511,9 @@ static void pflash_write(PFlashCFI02 *pfl, hwaddr offset, case 0x20: pfl->bypass = 1; goto do_bypass; - case 0x80: - case 0x90: - case 0xA0: + case 0x80: /* Erase */ + case 0x90: /* Autoselect */ + case 0xA0: /* Program */ pfl->cmd = cmd; DPRINTF("%s: starting command %02x\n", __func__, cmd); break; @@ -341,57 +524,54 @@ static void pflash_write(PFlashCFI02 *pfl, hwaddr offset, break; case 3: switch (pfl->cmd) { - case 0x80: + case 0x80: /* Erase */ /* We need another unlock sequence */ goto check_unlock0; - case 0xA0: - trace_pflash_data_write(offset, value, width, 0); - p = pfl->storage; + case 0xA0: /* Program */ + if (pflash_erase_suspend_mode(pfl) && + pflash_sector_is_erasing(pfl, offset)) { + /* Ignore writes to erasing sectors. */ + if (pfl->bypass) { + goto do_bypass; + } + goto reset_flash; + } + trace_pflash_data_write(offset, width << 1, value, 0); if (!pfl->ro) { - switch (width) { - case 1: - p[offset] &= value; - pflash_update(pfl, offset, 1); - break; - case 2: - if (be) { - p[offset] &= value >> 8; - p[offset + 1] &= value; - } else { - p[offset] &= value; - p[offset + 1] &= value >> 8; - } - pflash_update(pfl, offset, 2); - break; - case 4: - if (be) { - p[offset] &= value >> 24; - p[offset + 1] &= value >> 16; - p[offset + 2] &= value >> 8; - p[offset + 3] &= value; - } else { - p[offset] &= value; - p[offset + 1] &= value >> 8; - p[offset + 2] &= value >> 16; - p[offset + 3] &= value >> 24; - } - pflash_update(pfl, offset, 4); - break; + p = (uint8_t *)pfl->storage + offset; + if (pfl->be) { + uint64_t current = ldn_be_p(p, width); + stn_be_p(p, width, current & value); + } else { + uint64_t current = ldn_le_p(p, width); + stn_le_p(p, width, current & value); } + pflash_update(pfl, offset, width); } - pfl->status = 0x00 | ~(value & 0x80); + /* + * While programming, status bit DQ7 should hold the opposite + * value from how it was programmed. + */ + set_dq7(pfl, ~value); /* Let's pretend write is immediate */ if (pfl->bypass) goto do_bypass; goto reset_flash; - case 0x90: + case 0x90: /* Autoselect */ if (pfl->bypass && cmd == 0x00) { /* Unlock bypass reset */ goto reset_flash; } - /* We can enter CFI query mode from autoselect mode */ - if (boff == 0x55 && cmd == 0x98) - goto enter_CFI_mode; + /* + * We can enter CFI query mode from autoselect mode, but we must + * return to autoselect mode after a reset. + */ + if (boff == 0x55 && cmd == 0x98) { + /* Enter autoselect CFI query mode */ + pfl->wcycle = WCYCLE_AUTOSELECT_CFI; + pfl->cmd = 0x98; + return; + } /* No break here */ default: DPRINTF("%s: invalid write for command %02x\n", @@ -400,11 +580,11 @@ static void pflash_write(PFlashCFI02 *pfl, hwaddr offset, } case 4: switch (pfl->cmd) { - case 0xA0: + case 0xA0: /* Program */ /* Ignore writes while flash data write is occurring */ /* As we suppose write is immediate, this should never happen */ return; - case 0x80: + case 0x80: /* Erase */ goto check_unlock1; default: /* Should never happen */ @@ -414,8 +594,12 @@ static void pflash_write(PFlashCFI02 *pfl, hwaddr offset, } break; case 5: + if (pflash_erase_suspend_mode(pfl)) { + /* Erasing is not supported in erase suspend mode. */ + goto reset_flash; + } switch (cmd) { - case 0x10: + case 0x10: /* Chip Erase */ if (boff != pfl->unlock_addr0) { DPRINTF("%s: chip erase: invalid address " TARGET_FMT_plx "\n", __func__, offset); @@ -424,28 +608,16 @@ static void pflash_write(PFlashCFI02 *pfl, hwaddr offset, /* Chip erase */ DPRINTF("%s: start chip erase\n", __func__); if (!pfl->ro) { - memset(pfl->storage, 0xFF, pfl->chip_len); + memset(pfl->storage, 0xff, pfl->chip_len); pflash_update(pfl, 0, pfl->chip_len); } - pfl->status = 0x00; - /* Let's wait 5 seconds before chip erase is done */ + set_dq7(pfl, 0x00); + /* Wait the time specified at CFI address 0x22. */ timer_mod(&pfl->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + - (NANOSECONDS_PER_SECOND * 5)); + (1ULL << pfl->cfi_table[0x22]) * SCALE_MS); break; - case 0x30: - /* Sector erase */ - p = pfl->storage; - offset &= ~(pfl->sector_len - 1); - DPRINTF("%s: start sector erase at " TARGET_FMT_plx "\n", __func__, - offset); - if (!pfl->ro) { - memset(p + offset, 0xFF, pfl->sector_len); - pflash_update(pfl, offset, pfl->sector_len); - } - pfl->status = 0x00; - /* Let's wait 1/2 second before sector erase is done */ - timer_mod(&pfl->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + - (NANOSECONDS_PER_SECOND / 2)); + case 0x30: /* Sector erase */ + pflash_sector_erase(pfl, offset); break; default: DPRINTF("%s: invalid command %02x (wc 5)\n", __func__, cmd); @@ -455,11 +627,47 @@ static void pflash_write(PFlashCFI02 *pfl, hwaddr offset, break; case 6: switch (pfl->cmd) { - case 0x10: + case 0x10: /* Chip Erase */ /* Ignore writes during chip erase */ return; - case 0x30: - /* Ignore writes during sector erase */ + case 0x30: /* Sector erase */ + if (cmd == 0xB0) { + /* + * If erase suspend happens during the erase timeout (so DQ3 is + * 0), then the device suspends erasing immediately. Set the + * remaining time to be the total time to erase. Otherwise, + * there is a maximum amount of time it can take to enter + * suspend mode. Let's ignore that and suspend immediately and + * set the remaining time to the actual time remaining on the + * timer. + */ + if ((pfl->status & 0x08) == 0) { + pfl->erase_time_remaining = pflash_erase_time(pfl); + } else { + int64_t delta = timer_expire_time_ns(&pfl->timer) - + qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + /* Make sure we have a positive time remaining. */ + pfl->erase_time_remaining = delta <= 0 ? 1 : delta; + } + reset_dq3(pfl); + timer_del(&pfl->timer); + pfl->wcycle = 0; + pfl->cmd = 0; + return; + } + /* + * If DQ3 is 0, additional sector erase commands can be + * written and anything else (other than an erase suspend) resets + * the device. + */ + if ((pfl->status & 0x08) == 0) { + if (cmd == 0x30) { + pflash_sector_erase(pfl, offset); + } else { + goto reset_flash; + } + } + /* Ignore writes during the actual erase. */ return; default: /* Should never happen */ @@ -468,7 +676,9 @@ static void pflash_write(PFlashCFI02 *pfl, hwaddr offset, goto reset_flash; } break; - case 7: /* Special value for CFI queries */ + /* Special values for CFI queries */ + case WCYCLE_CFI: + case WCYCLE_AUTOSELECT_CFI: DPRINTF("%s: invalid write in CFI query mode\n", __func__); goto reset_flash; default: @@ -493,39 +703,10 @@ static void pflash_write(PFlashCFI02 *pfl, hwaddr offset, pfl->cmd = 0; } -static uint64_t pflash_be_readfn(void *opaque, hwaddr addr, unsigned size) -{ - return pflash_read(opaque, addr, size, 1); -} - -static void pflash_be_writefn(void *opaque, hwaddr addr, - uint64_t value, unsigned size) -{ - pflash_write(opaque, addr, value, size, 1); -} - -static uint64_t pflash_le_readfn(void *opaque, hwaddr addr, unsigned size) -{ - return pflash_read(opaque, addr, size, 0); -} - -static void pflash_le_writefn(void *opaque, hwaddr addr, - uint64_t value, unsigned size) -{ - pflash_write(opaque, addr, value, size, 0); -} - -static const MemoryRegionOps pflash_cfi02_ops_be = { - .read = pflash_be_readfn, - .write = pflash_be_writefn, - .valid.min_access_size = 1, - .valid.max_access_size = 4, - .endianness = DEVICE_NATIVE_ENDIAN, -}; - -static const MemoryRegionOps pflash_cfi02_ops_le = { - .read = pflash_le_readfn, - .write = pflash_le_writefn, +static const MemoryRegionOps pflash_cfi02_ops = { + .read = pflash_read, + .write = pflash_write, + .impl.max_access_size = 2, .valid.min_access_size = 1, .valid.max_access_size = 4, .endianness = DEVICE_NATIVE_ENDIAN, @@ -534,15 +715,14 @@ static const MemoryRegionOps pflash_cfi02_ops_le = { static void pflash_cfi02_realize(DeviceState *dev, Error **errp) { PFlashCFI02 *pfl = PFLASH_CFI02(dev); - uint32_t chip_len; int ret; Error *local_err = NULL; - if (pfl->sector_len == 0) { + if (pfl->uniform_sector_len == 0 && pfl->sector_len[0] == 0) { error_setg(errp, "attribute \"sector-length\" not specified or zero."); return; } - if (pfl->nb_blocs == 0) { + if (pfl->uniform_nb_blocs == 0 && pfl->nb_blocs[0] == 0) { error_setg(errp, "attribute \"num-blocks\" not specified or zero."); return; } @@ -551,18 +731,64 @@ static void pflash_cfi02_realize(DeviceState *dev, Error **errp) return; } - chip_len = pfl->sector_len * pfl->nb_blocs; + int nb_regions; + pfl->chip_len = 0; + pfl->total_sectors = 0; + for (nb_regions = 0; nb_regions < PFLASH_MAX_ERASE_REGIONS; ++nb_regions) { + if (pfl->nb_blocs[nb_regions] == 0) { + break; + } + pfl->total_sectors += pfl->nb_blocs[nb_regions]; + uint64_t sector_len_per_device = pfl->sector_len[nb_regions]; + + /* + * The size of each flash sector must be a power of 2 and it must be + * aligned at the same power of 2. + */ + if (sector_len_per_device & 0xff || + sector_len_per_device >= (1 << 24) || + !is_power_of_2(sector_len_per_device)) + { + error_setg(errp, "unsupported configuration: " + "sector length[%d] per device = %" PRIx64 ".", + nb_regions, sector_len_per_device); + return; + } + if (pfl->chip_len & (sector_len_per_device - 1)) { + error_setg(errp, "unsupported configuration: " + "flash region %d not correctly aligned.", + nb_regions); + return; + } + + pfl->chip_len += (uint64_t)pfl->sector_len[nb_regions] * + pfl->nb_blocs[nb_regions]; + } + + uint64_t uniform_len = (uint64_t)pfl->uniform_nb_blocs * + pfl->uniform_sector_len; + if (nb_regions == 0) { + nb_regions = 1; + pfl->nb_blocs[0] = pfl->uniform_nb_blocs; + pfl->sector_len[0] = pfl->uniform_sector_len; + pfl->chip_len = uniform_len; + pfl->total_sectors = pfl->uniform_nb_blocs; + } else if (uniform_len != 0 && uniform_len != pfl->chip_len) { + error_setg(errp, "\"num-blocks\"*\"sector-length\" " + "different from \"num-blocks0\"*\'sector-length0\" + ... + " + "\"num-blocks3\"*\"sector-length3\""); + return; + } - memory_region_init_rom_device(&pfl->orig_mem, OBJECT(pfl), pfl->be ? - &pflash_cfi02_ops_be : &pflash_cfi02_ops_le, - pfl, pfl->name, chip_len, &local_err); + memory_region_init_rom_device(&pfl->orig_mem, OBJECT(pfl), + &pflash_cfi02_ops, pfl, pfl->name, + pfl->chip_len, &local_err); if (local_err) { error_propagate(errp, local_err); return; } pfl->storage = memory_region_get_ram_ptr(&pfl->orig_mem); - pfl->chip_len = chip_len; if (pfl->blk) { uint64_t perm; @@ -577,13 +803,20 @@ static void pflash_cfi02_realize(DeviceState *dev, Error **errp) } if (pfl->blk) { - if (!blk_check_size_and_read_all(pfl->blk, pfl->storage, chip_len, - errp)) { + if (!blk_check_size_and_read_all(pfl->blk, pfl->storage, + pfl->chip_len, errp)) { vmstate_unregister_ram(&pfl->orig_mem, DEVICE(pfl)); return; } } + /* Only 11 bits are used in the comparison. */ + pfl->unlock_addr0 &= 0x7FF; + pfl->unlock_addr1 &= 0x7FF; + + /* Allocate memory for a bitmap for sectors being erased. */ + pfl->sector_erase_map = bitmap_new(pfl->total_sectors); + pflash_setup_mappings(pfl); pfl->rom_mode = 1; sysbus_init_mmio(SYS_BUS_DEVICE(dev), &pfl->mem); @@ -592,7 +825,9 @@ static void pflash_cfi02_realize(DeviceState *dev, Error **errp) pfl->wcycle = 0; pfl->cmd = 0; pfl->status = 0; + /* Hardcoded CFI table (mostly from SG29 Spansion flash) */ + const uint16_t pri_ofs = 0x40; /* Standard "QRY" string */ pfl->cfi_table[0x10] = 'Q'; pfl->cfi_table[0x11] = 'R'; @@ -601,8 +836,8 @@ static void pflash_cfi02_realize(DeviceState *dev, Error **errp) pfl->cfi_table[0x13] = 0x02; pfl->cfi_table[0x14] = 0x00; /* Primary extended table address */ - pfl->cfi_table[0x15] = 0x31; - pfl->cfi_table[0x16] = 0x00; + pfl->cfi_table[0x15] = pri_ofs; + pfl->cfi_table[0x16] = pri_ofs >> 8; /* Alternate command set (none) */ pfl->cfi_table[0x17] = 0x00; pfl->cfi_table[0x18] = 0x00; @@ -617,7 +852,7 @@ static void pflash_cfi02_realize(DeviceState *dev, Error **errp) pfl->cfi_table[0x1D] = 0x00; /* Vpp max (no Vpp pin) */ pfl->cfi_table[0x1E] = 0x00; - /* Reserved */ + /* Timeout per single byte/word write (128 ms) */ pfl->cfi_table[0x1F] = 0x07; /* Timeout for min size buffer write (NA) */ pfl->cfi_table[0x20] = 0x00; @@ -634,7 +869,7 @@ static void pflash_cfi02_realize(DeviceState *dev, Error **errp) /* Max timeout for chip erase */ pfl->cfi_table[0x26] = 0x0D; /* Device size */ - pfl->cfi_table[0x27] = ctz32(chip_len); + pfl->cfi_table[0x27] = ctz32(pfl->chip_len); /* Flash device interface (8 & 16 bits) */ pfl->cfi_table[0x28] = 0x02; pfl->cfi_table[0x29] = 0x00; @@ -643,37 +878,60 @@ static void pflash_cfi02_realize(DeviceState *dev, Error **errp) // pfl->cfi_table[0x2A] = 0x05; pfl->cfi_table[0x2A] = 0x00; pfl->cfi_table[0x2B] = 0x00; - /* Number of erase block regions (uniform) */ - pfl->cfi_table[0x2C] = 0x01; - /* Erase block region 1 */ - pfl->cfi_table[0x2D] = pfl->nb_blocs - 1; - pfl->cfi_table[0x2E] = (pfl->nb_blocs - 1) >> 8; - pfl->cfi_table[0x2F] = pfl->sector_len >> 8; - pfl->cfi_table[0x30] = pfl->sector_len >> 16; + /* Number of erase block regions */ + pfl->cfi_table[0x2c] = nb_regions; + /* Erase block regions */ + for (int i = 0; i < nb_regions; ++i) { + uint32_t sector_len_per_device = pfl->sector_len[i]; + pfl->cfi_table[0x2d + 4 * i] = pfl->nb_blocs[i] - 1; + pfl->cfi_table[0x2e + 4 * i] = (pfl->nb_blocs[i] - 1) >> 8; + pfl->cfi_table[0x2f + 4 * i] = sector_len_per_device >> 8; + pfl->cfi_table[0x30 + 4 * i] = sector_len_per_device >> 16; + } + assert(0x2c + 4 * nb_regions < pri_ofs); /* Extended */ - pfl->cfi_table[0x31] = 'P'; - pfl->cfi_table[0x32] = 'R'; - pfl->cfi_table[0x33] = 'I'; - - pfl->cfi_table[0x34] = '1'; - pfl->cfi_table[0x35] = '0'; - - pfl->cfi_table[0x36] = 0x00; - pfl->cfi_table[0x37] = 0x00; - pfl->cfi_table[0x38] = 0x00; - pfl->cfi_table[0x39] = 0x00; - - pfl->cfi_table[0x3a] = 0x00; - - pfl->cfi_table[0x3b] = 0x00; - pfl->cfi_table[0x3c] = 0x00; + pfl->cfi_table[0x00 + pri_ofs] = 'P'; + pfl->cfi_table[0x01 + pri_ofs] = 'R'; + pfl->cfi_table[0x02 + pri_ofs] = 'I'; + + /* Extended version 1.0 */ + pfl->cfi_table[0x03 + pri_ofs] = '1'; + pfl->cfi_table[0x04 + pri_ofs] = '0'; + + /* Address sensitive unlock required. */ + pfl->cfi_table[0x05 + pri_ofs] = 0x00; + /* Erase suspend to read/write. */ + pfl->cfi_table[0x06 + pri_ofs] = 0x02; + /* Sector protect not supported. */ + pfl->cfi_table[0x07 + pri_ofs] = 0x00; + /* Temporary sector unprotect not supported. */ + pfl->cfi_table[0x08 + pri_ofs] = 0x00; + + /* Sector protect/unprotect scheme. */ + pfl->cfi_table[0x09 + pri_ofs] = 0x00; + + /* Simultaneous operation not supported. */ + pfl->cfi_table[0x0a + pri_ofs] = 0x00; + /* Burst mode not supported. */ + pfl->cfi_table[0x0b + pri_ofs] = 0x00; + /* Page mode not supported. */ + pfl->cfi_table[0x0c + pri_ofs] = 0x00; + assert(0x0c + pri_ofs < ARRAY_SIZE(pfl->cfi_table)); } static Property pflash_cfi02_properties[] = { DEFINE_PROP_DRIVE("drive", PFlashCFI02, blk), - DEFINE_PROP_UINT32("num-blocks", PFlashCFI02, nb_blocs, 0), - DEFINE_PROP_UINT32("sector-length", PFlashCFI02, sector_len, 0), + DEFINE_PROP_UINT32("num-blocks", PFlashCFI02, uniform_nb_blocs, 0), + DEFINE_PROP_UINT32("sector-length", PFlashCFI02, uniform_sector_len, 0), + DEFINE_PROP_UINT32("num-blocks0", PFlashCFI02, nb_blocs[0], 0), + DEFINE_PROP_UINT32("sector-length0", PFlashCFI02, sector_len[0], 0), + DEFINE_PROP_UINT32("num-blocks1", PFlashCFI02, nb_blocs[1], 0), + DEFINE_PROP_UINT32("sector-length1", PFlashCFI02, sector_len[1], 0), + DEFINE_PROP_UINT32("num-blocks2", PFlashCFI02, nb_blocs[2], 0), + DEFINE_PROP_UINT32("sector-length2", PFlashCFI02, sector_len[2], 0), + DEFINE_PROP_UINT32("num-blocks3", PFlashCFI02, nb_blocs[3], 0), + DEFINE_PROP_UINT32("sector-length3", PFlashCFI02, sector_len[3], 0), DEFINE_PROP_UINT8("width", PFlashCFI02, width, 0), DEFINE_PROP_UINT8("mappings", PFlashCFI02, mappings, 0), DEFINE_PROP_UINT8("big-endian", PFlashCFI02, be, 0), @@ -691,6 +949,7 @@ static void pflash_cfi02_unrealize(DeviceState *dev, Error **errp) { PFlashCFI02 *pfl = PFLASH_CFI02(dev); timer_del(&pfl->timer); + g_free(pfl->sector_erase_map); } static void pflash_cfi02_class_init(ObjectClass *klass, void *data) diff --git a/hw/block/trace-events b/hw/block/trace-events index 97a17838ed..13d1b21dd4 100644 --- a/hw/block/trace-events +++ b/hw/block/trace-events @@ -7,13 +7,11 @@ fdc_ioport_write(uint8_t reg, uint8_t value) "write reg 0x%02x val 0x%02x" # pflash_cfi02.c # pflash_cfi01.c pflash_reset(void) "reset" -pflash_read(uint64_t offset, uint8_t cmd, int width, uint8_t wcycle) "offset:0x%04"PRIx64" cmd:0x%02x width:%d wcycle:%u" -pflash_write(uint64_t offset, uint32_t value, int width, uint8_t wcycle) "offset:0x%04"PRIx64" value:0x%03x width:%d wcycle:%u" pflash_timer_expired(uint8_t cmd) "command 0x%02x done" -pflash_data_read8(uint64_t offset, uint32_t value) "data offset:0x%04"PRIx64" value:0x%02x" -pflash_data_read16(uint64_t offset, uint32_t value) "data offset:0x%04"PRIx64" value:0x%04x" -pflash_data_read32(uint64_t offset, uint32_t value) "data offset:0x%04"PRIx64" value:0x%08x" -pflash_data_write(uint64_t offset, uint32_t value, int width, uint64_t counter) "data offset:0x%04"PRIx64" value:0x%08x width:%d counter:0x%016"PRIx64 +pflash_io_read(uint64_t offset, int width, int fmt_width, uint32_t value, uint8_t cmd, uint8_t wcycle) "offset:0x%04"PRIx64" width:%d value:0x%0*x cmd:0x%02x wcycle:%u" +pflash_io_write(uint64_t offset, int width, int fmt_width, uint32_t value, uint8_t wcycle) "offset:0x%04"PRIx64" width:%d value:0x%0*x wcycle:%u" +pflash_data_read(uint64_t offset, int width, uint32_t value) "data offset:0x%04"PRIx64" value:0x%0*x" +pflash_data_write(uint64_t offset, int width, uint32_t value, uint64_t counter) "data offset:0x%04"PRIx64" value:0x%0*x counter:0x%016"PRIx64 pflash_manufacturer_id(uint16_t id) "Read Manufacturer ID: 0x%04x" pflash_device_id(uint16_t id) "Read Device ID: 0x%04x" pflash_device_info(uint64_t offset) "Read Device Information offset:0x%04"PRIx64 diff --git a/hw/block/vhost-user-blk.c b/hw/block/vhost-user-blk.c index 9cb61336a6..85bc4017e7 100644 --- a/hw/block/vhost-user-blk.c +++ b/hw/block/vhost-user-blk.c @@ -191,7 +191,7 @@ static void vhost_user_blk_stop(VirtIODevice *vdev) static void vhost_user_blk_set_status(VirtIODevice *vdev, uint8_t status) { VHostUserBlk *s = VHOST_USER_BLK(vdev); - bool should_start = vdev->started; + bool should_start = virtio_device_started(vdev, status); int ret; if (!vdev->vm_running) { @@ -317,7 +317,7 @@ static int vhost_user_blk_connect(DeviceState *dev) } /* restore vhost state */ - if (vdev->started) { + if (virtio_device_started(vdev, vdev->status)) { ret = vhost_user_blk_start(vdev); if (ret < 0) { error_report("vhost-user-blk: vhost start failed: %s", diff --git a/hw/block/xen-block.c b/hw/block/xen-block.c index 8f224ef81d..69d73196e2 100644 --- a/hw/block/xen-block.c +++ b/hw/block/xen-block.c @@ -11,7 +11,7 @@ #include "qemu/option.h" #include "qapi/error.h" #include "qapi/qapi-commands-block-core.h" -#include "qapi/qapi-commands-misc.h" +#include "qapi/qapi-commands-qom.h" #include "qapi/qapi-visit-block-core.h" #include "qapi/qobject-input-visitor.h" #include "qapi/visitor.h" diff --git a/hw/core/Makefile.objs b/hw/core/Makefile.objs index a799c83815..585b734358 100644 --- a/hw/core/Makefile.objs +++ b/hw/core/Makefile.objs @@ -22,3 +22,7 @@ common-obj-$(CONFIG_SOFTMMU) += split-irq.o common-obj-$(CONFIG_PLATFORM_BUS) += platform-bus.o common-obj-$(CONFIG_SOFTMMU) += generic-loader.o common-obj-$(CONFIG_SOFTMMU) += null-machine.o + +obj-$(CONFIG_SOFTMMU) += machine-qmp-cmds.o +obj-$(CONFIG_SOFTMMU) += numa.o +common-obj-$(CONFIG_SOFTMMU) += machine-hmp-cmds.o diff --git a/hw/core/machine-hmp-cmds.c b/hw/core/machine-hmp-cmds.c new file mode 100644 index 0000000000..7fa6075f1e --- /dev/null +++ b/hw/core/machine-hmp-cmds.c @@ -0,0 +1,164 @@ +/* + * HMP commands related to machines and CPUs + * + * Copyright IBM, Corp. 2011 + * + * Authors: + * Anthony Liguori <aliguori@us.ibm.com> + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + * Contributions after 2012-01-13 are licensed under the terms of the + * GNU GPL, version 2 or (at your option) any later version. + */ + +#include "qemu/osdep.h" +#include "monitor/hmp.h" +#include "monitor/monitor.h" +#include "qapi/error.h" +#include "qapi/qapi-builtin-visit.h" +#include "qapi/qapi-commands-machine.h" +#include "qapi/qmp/qdict.h" +#include "qapi/string-output-visitor.h" +#include "qemu/error-report.h" +#include "sysemu/numa.h" + +void hmp_info_cpus(Monitor *mon, const QDict *qdict) +{ + CpuInfoFastList *cpu_list, *cpu; + + cpu_list = qmp_query_cpus_fast(NULL); + + for (cpu = cpu_list; cpu; cpu = cpu->next) { + int active = ' '; + + if (cpu->value->cpu_index == monitor_get_cpu_index()) { + active = '*'; + } + + monitor_printf(mon, "%c CPU #%" PRId64 ":", active, + cpu->value->cpu_index); + monitor_printf(mon, " thread_id=%" PRId64 "\n", cpu->value->thread_id); + } + + qapi_free_CpuInfoFastList(cpu_list); +} + +void hmp_cpu_add(Monitor *mon, const QDict *qdict) +{ + int cpuid; + Error *err = NULL; + + error_report("cpu_add is deprecated, please use device_add instead"); + + cpuid = qdict_get_int(qdict, "id"); + qmp_cpu_add(cpuid, &err); + hmp_handle_error(mon, &err); +} + +void hmp_hotpluggable_cpus(Monitor *mon, const QDict *qdict) +{ + Error *err = NULL; + HotpluggableCPUList *l = qmp_query_hotpluggable_cpus(&err); + HotpluggableCPUList *saved = l; + CpuInstanceProperties *c; + + if (err != NULL) { + hmp_handle_error(mon, &err); + return; + } + + monitor_printf(mon, "Hotpluggable CPUs:\n"); + while (l) { + monitor_printf(mon, " type: \"%s\"\n", l->value->type); + monitor_printf(mon, " vcpus_count: \"%" PRIu64 "\"\n", + l->value->vcpus_count); + if (l->value->has_qom_path) { + monitor_printf(mon, " qom_path: \"%s\"\n", l->value->qom_path); + } + + c = l->value->props; + monitor_printf(mon, " CPUInstance Properties:\n"); + if (c->has_node_id) { + monitor_printf(mon, " node-id: \"%" PRIu64 "\"\n", c->node_id); + } + if (c->has_socket_id) { + monitor_printf(mon, " socket-id: \"%" PRIu64 "\"\n", c->socket_id); + } + if (c->has_core_id) { + monitor_printf(mon, " core-id: \"%" PRIu64 "\"\n", c->core_id); + } + if (c->has_thread_id) { + monitor_printf(mon, " thread-id: \"%" PRIu64 "\"\n", c->thread_id); + } + + l = l->next; + } + + qapi_free_HotpluggableCPUList(saved); +} + +void hmp_info_memdev(Monitor *mon, const QDict *qdict) +{ + Error *err = NULL; + MemdevList *memdev_list = qmp_query_memdev(&err); + MemdevList *m = memdev_list; + Visitor *v; + char *str; + + while (m) { + v = string_output_visitor_new(false, &str); + visit_type_uint16List(v, NULL, &m->value->host_nodes, NULL); + monitor_printf(mon, "memory backend: %s\n", m->value->id); + monitor_printf(mon, " size: %" PRId64 "\n", m->value->size); + monitor_printf(mon, " merge: %s\n", + m->value->merge ? "true" : "false"); + monitor_printf(mon, " dump: %s\n", + m->value->dump ? "true" : "false"); + monitor_printf(mon, " prealloc: %s\n", + m->value->prealloc ? "true" : "false"); + monitor_printf(mon, " policy: %s\n", + HostMemPolicy_str(m->value->policy)); + visit_complete(v, &str); + monitor_printf(mon, " host nodes: %s\n", str); + + g_free(str); + visit_free(v); + m = m->next; + } + + monitor_printf(mon, "\n"); + + qapi_free_MemdevList(memdev_list); + hmp_handle_error(mon, &err); +} + +void hmp_info_numa(Monitor *mon, const QDict *qdict) +{ + int i; + NumaNodeMem *node_mem; + CpuInfoList *cpu_list, *cpu; + + cpu_list = qmp_query_cpus(&error_abort); + node_mem = g_new0(NumaNodeMem, nb_numa_nodes); + + query_numa_node_mem(node_mem); + monitor_printf(mon, "%d nodes\n", nb_numa_nodes); + for (i = 0; i < nb_numa_nodes; i++) { + monitor_printf(mon, "node %d cpus:", i); + for (cpu = cpu_list; cpu; cpu = cpu->next) { + if (cpu->value->has_props && cpu->value->props->has_node_id && + cpu->value->props->node_id == i) { + monitor_printf(mon, " %" PRIi64, cpu->value->CPU); + } + } + monitor_printf(mon, "\n"); + monitor_printf(mon, "node %d size: %" PRId64 " MB\n", i, + node_mem[i].node_mem >> 20); + monitor_printf(mon, "node %d plugged: %" PRId64 " MB\n", i, + node_mem[i].node_plugged_mem >> 20); + } + qapi_free_CpuInfoList(cpu_list); + g_free(node_mem); +} diff --git a/hw/core/machine-qmp-cmds.c b/hw/core/machine-qmp-cmds.c new file mode 100644 index 0000000000..1e08252af7 --- /dev/null +++ b/hw/core/machine-qmp-cmds.c @@ -0,0 +1,328 @@ +/* + * QMP commands related to machines and CPUs + * + * Copyright (C) 2014 Red Hat Inc + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "hw/boards.h" +#include "qapi/error.h" +#include "qapi/qapi-commands-machine.h" +#include "qapi/qmp/qerror.h" +#include "sysemu/hostmem.h" +#include "sysemu/hw_accel.h" +#include "sysemu/numa.h" +#include "sysemu/sysemu.h" + +CpuInfoList *qmp_query_cpus(Error **errp) +{ + MachineState *ms = MACHINE(qdev_get_machine()); + MachineClass *mc = MACHINE_GET_CLASS(ms); + CpuInfoList *head = NULL, *cur_item = NULL; + CPUState *cpu; + + CPU_FOREACH(cpu) { + CpuInfoList *info; +#if defined(TARGET_I386) + X86CPU *x86_cpu = X86_CPU(cpu); + CPUX86State *env = &x86_cpu->env; +#elif defined(TARGET_PPC) + PowerPCCPU *ppc_cpu = POWERPC_CPU(cpu); + CPUPPCState *env = &ppc_cpu->env; +#elif defined(TARGET_SPARC) + SPARCCPU *sparc_cpu = SPARC_CPU(cpu); + CPUSPARCState *env = &sparc_cpu->env; +#elif defined(TARGET_RISCV) + RISCVCPU *riscv_cpu = RISCV_CPU(cpu); + CPURISCVState *env = &riscv_cpu->env; +#elif defined(TARGET_MIPS) + MIPSCPU *mips_cpu = MIPS_CPU(cpu); + CPUMIPSState *env = &mips_cpu->env; +#elif defined(TARGET_TRICORE) + TriCoreCPU *tricore_cpu = TRICORE_CPU(cpu); + CPUTriCoreState *env = &tricore_cpu->env; +#elif defined(TARGET_S390X) + S390CPU *s390_cpu = S390_CPU(cpu); + CPUS390XState *env = &s390_cpu->env; +#endif + + cpu_synchronize_state(cpu); + + info = g_malloc0(sizeof(*info)); + info->value = g_malloc0(sizeof(*info->value)); + info->value->CPU = cpu->cpu_index; + info->value->current = (cpu == first_cpu); + info->value->halted = cpu->halted; + info->value->qom_path = object_get_canonical_path(OBJECT(cpu)); + info->value->thread_id = cpu->thread_id; +#if defined(TARGET_I386) + info->value->arch = CPU_INFO_ARCH_X86; + info->value->u.x86.pc = env->eip + env->segs[R_CS].base; +#elif defined(TARGET_PPC) + info->value->arch = CPU_INFO_ARCH_PPC; + info->value->u.ppc.nip = env->nip; +#elif defined(TARGET_SPARC) + info->value->arch = CPU_INFO_ARCH_SPARC; + info->value->u.q_sparc.pc = env->pc; + info->value->u.q_sparc.npc = env->npc; +#elif defined(TARGET_MIPS) + info->value->arch = CPU_INFO_ARCH_MIPS; + info->value->u.q_mips.PC = env->active_tc.PC; +#elif defined(TARGET_TRICORE) + info->value->arch = CPU_INFO_ARCH_TRICORE; + info->value->u.tricore.PC = env->PC; +#elif defined(TARGET_S390X) + info->value->arch = CPU_INFO_ARCH_S390; + info->value->u.s390.cpu_state = env->cpu_state; +#elif defined(TARGET_RISCV) + info->value->arch = CPU_INFO_ARCH_RISCV; + info->value->u.riscv.pc = env->pc; +#else + info->value->arch = CPU_INFO_ARCH_OTHER; +#endif + info->value->has_props = !!mc->cpu_index_to_instance_props; + if (info->value->has_props) { + CpuInstanceProperties *props; + props = g_malloc0(sizeof(*props)); + *props = mc->cpu_index_to_instance_props(ms, cpu->cpu_index); + info->value->props = props; + } + + /* XXX: waiting for the qapi to support GSList */ + if (!cur_item) { + head = cur_item = info; + } else { + cur_item->next = info; + cur_item = info; + } + } + + return head; +} + +static CpuInfoArch sysemu_target_to_cpuinfo_arch(SysEmuTarget target) +{ + /* + * The @SysEmuTarget -> @CpuInfoArch mapping below is based on the + * TARGET_ARCH -> TARGET_BASE_ARCH mapping in the "configure" script. + */ + switch (target) { + case SYS_EMU_TARGET_I386: + case SYS_EMU_TARGET_X86_64: + return CPU_INFO_ARCH_X86; + + case SYS_EMU_TARGET_PPC: + case SYS_EMU_TARGET_PPC64: + return CPU_INFO_ARCH_PPC; + + case SYS_EMU_TARGET_SPARC: + case SYS_EMU_TARGET_SPARC64: + return CPU_INFO_ARCH_SPARC; + + case SYS_EMU_TARGET_MIPS: + case SYS_EMU_TARGET_MIPSEL: + case SYS_EMU_TARGET_MIPS64: + case SYS_EMU_TARGET_MIPS64EL: + return CPU_INFO_ARCH_MIPS; + + case SYS_EMU_TARGET_TRICORE: + return CPU_INFO_ARCH_TRICORE; + + case SYS_EMU_TARGET_S390X: + return CPU_INFO_ARCH_S390; + + case SYS_EMU_TARGET_RISCV32: + case SYS_EMU_TARGET_RISCV64: + return CPU_INFO_ARCH_RISCV; + + default: + return CPU_INFO_ARCH_OTHER; + } +} + +static void cpustate_to_cpuinfo_s390(CpuInfoS390 *info, const CPUState *cpu) +{ +#ifdef TARGET_S390X + S390CPU *s390_cpu = S390_CPU(cpu); + CPUS390XState *env = &s390_cpu->env; + + info->cpu_state = env->cpu_state; +#else + abort(); +#endif +} + +/* + * fast means: we NEVER interrupt vCPU threads to retrieve + * information from KVM. + */ +CpuInfoFastList *qmp_query_cpus_fast(Error **errp) +{ + MachineState *ms = MACHINE(qdev_get_machine()); + MachineClass *mc = MACHINE_GET_CLASS(ms); + CpuInfoFastList *head = NULL, *cur_item = NULL; + SysEmuTarget target = qapi_enum_parse(&SysEmuTarget_lookup, TARGET_NAME, + -1, &error_abort); + CPUState *cpu; + + CPU_FOREACH(cpu) { + CpuInfoFastList *info = g_malloc0(sizeof(*info)); + info->value = g_malloc0(sizeof(*info->value)); + + info->value->cpu_index = cpu->cpu_index; + info->value->qom_path = object_get_canonical_path(OBJECT(cpu)); + info->value->thread_id = cpu->thread_id; + + info->value->has_props = !!mc->cpu_index_to_instance_props; + if (info->value->has_props) { + CpuInstanceProperties *props; + props = g_malloc0(sizeof(*props)); + *props = mc->cpu_index_to_instance_props(ms, cpu->cpu_index); + info->value->props = props; + } + + info->value->arch = sysemu_target_to_cpuinfo_arch(target); + info->value->target = target; + if (target == SYS_EMU_TARGET_S390X) { + cpustate_to_cpuinfo_s390(&info->value->u.s390x, cpu); + } + + if (!cur_item) { + head = cur_item = info; + } else { + cur_item->next = info; + cur_item = info; + } + } + + return head; +} + +MachineInfoList *qmp_query_machines(Error **errp) +{ + GSList *el, *machines = object_class_get_list(TYPE_MACHINE, false); + MachineInfoList *mach_list = NULL; + + for (el = machines; el; el = el->next) { + MachineClass *mc = el->data; + MachineInfoList *entry; + MachineInfo *info; + + info = g_malloc0(sizeof(*info)); + if (mc->is_default) { + info->has_is_default = true; + info->is_default = true; + } + + if (mc->alias) { + info->has_alias = true; + info->alias = g_strdup(mc->alias); + } + + info->name = g_strdup(mc->name); + info->cpu_max = !mc->max_cpus ? 1 : mc->max_cpus; + info->hotpluggable_cpus = mc->has_hotpluggable_cpus; + + entry = g_malloc0(sizeof(*entry)); + entry->value = info; + entry->next = mach_list; + mach_list = entry; + } + + g_slist_free(machines); + return mach_list; +} + +CurrentMachineParams *qmp_query_current_machine(Error **errp) +{ + CurrentMachineParams *params = g_malloc0(sizeof(*params)); + params->wakeup_suspend_support = qemu_wakeup_suspend_enabled(); + + return params; +} + +HotpluggableCPUList *qmp_query_hotpluggable_cpus(Error **errp) +{ + MachineState *ms = MACHINE(qdev_get_machine()); + MachineClass *mc = MACHINE_GET_CLASS(ms); + + if (!mc->has_hotpluggable_cpus) { + error_setg(errp, QERR_FEATURE_DISABLED, "query-hotpluggable-cpus"); + return NULL; + } + + return machine_query_hotpluggable_cpus(ms); +} + +void qmp_cpu_add(int64_t id, Error **errp) +{ + MachineClass *mc; + + mc = MACHINE_GET_CLASS(current_machine); + if (mc->hot_add_cpu) { + mc->hot_add_cpu(id, errp); + } else { + error_setg(errp, "Not supported"); + } +} + +void qmp_set_numa_node(NumaOptions *cmd, Error **errp) +{ + if (!runstate_check(RUN_STATE_PRECONFIG)) { + error_setg(errp, "The command is permitted only in '%s' state", + RunState_str(RUN_STATE_PRECONFIG)); + return; + } + + set_numa_options(MACHINE(qdev_get_machine()), cmd, errp); +} + +static int query_memdev(Object *obj, void *opaque) +{ + MemdevList **list = opaque; + MemdevList *m = NULL; + + if (object_dynamic_cast(obj, TYPE_MEMORY_BACKEND)) { + m = g_malloc0(sizeof(*m)); + + m->value = g_malloc0(sizeof(*m->value)); + + m->value->id = object_get_canonical_path_component(obj); + m->value->has_id = !!m->value->id; + + m->value->size = object_property_get_uint(obj, "size", + &error_abort); + m->value->merge = object_property_get_bool(obj, "merge", + &error_abort); + m->value->dump = object_property_get_bool(obj, "dump", + &error_abort); + m->value->prealloc = object_property_get_bool(obj, + "prealloc", + &error_abort); + m->value->policy = object_property_get_enum(obj, + "policy", + "HostMemPolicy", + &error_abort); + object_property_get_uint16List(obj, "host-nodes", + &m->value->host_nodes, + &error_abort); + + m->next = *list; + *list = m; + } + + return 0; +} + +MemdevList *qmp_query_memdev(Error **errp) +{ + Object *obj = object_get_objects_root(); + MemdevList *list = NULL; + + object_child_foreach(obj, query_memdev, &list); + return list; +} diff --git a/hw/core/machine.c b/hw/core/machine.c index ea5a01aa49..ea84bd6788 100644 --- a/hw/core/machine.c +++ b/hw/core/machine.c @@ -30,6 +30,7 @@ GlobalProperty hw_compat_4_0[] = { { "bochs-display", "edid", "false" }, { "virtio-vga", "edid", "false" }, { "virtio-gpu-pci", "edid", "false" }, + { "virtio-device", "use-started", "false" }, }; const size_t hw_compat_4_0_len = G_N_ELEMENTS(hw_compat_4_0); diff --git a/hw/core/numa.c b/hw/core/numa.c new file mode 100644 index 0000000000..66119d181b --- /dev/null +++ b/hw/core/numa.c @@ -0,0 +1,605 @@ +/* + * NUMA parameter parsing routines + * + * Copyright (c) 2014 Fujitsu Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "sysemu/numa.h" +#include "exec/cpu-common.h" +#include "exec/ramlist.h" +#include "qemu/bitmap.h" +#include "qemu/error-report.h" +#include "qapi/error.h" +#include "qapi/opts-visitor.h" +#include "qapi/qapi-visit-machine.h" +#include "hw/mem/pc-dimm.h" +#include "hw/mem/memory-device.h" +#include "qemu/option.h" +#include "qemu/config-file.h" +#include "qemu/cutils.h" + +QemuOptsList qemu_numa_opts = { + .name = "numa", + .implied_opt_name = "type", + .head = QTAILQ_HEAD_INITIALIZER(qemu_numa_opts.head), + .desc = { { 0 } } /* validated with OptsVisitor */ +}; + +static int have_memdevs = -1; +static int max_numa_nodeid; /* Highest specified NUMA node ID, plus one. + * For all nodes, nodeid < max_numa_nodeid + */ +int nb_numa_nodes; +bool have_numa_distance; +NodeInfo numa_info[MAX_NODES]; + + +static void parse_numa_node(MachineState *ms, NumaNodeOptions *node, + Error **errp) +{ + Error *err = NULL; + uint16_t nodenr; + uint16List *cpus = NULL; + MachineClass *mc = MACHINE_GET_CLASS(ms); + + if (node->has_nodeid) { + nodenr = node->nodeid; + } else { + nodenr = nb_numa_nodes; + } + + if (nodenr >= MAX_NODES) { + error_setg(errp, "Max number of NUMA nodes reached: %" + PRIu16 "", nodenr); + return; + } + + if (numa_info[nodenr].present) { + error_setg(errp, "Duplicate NUMA nodeid: %" PRIu16, nodenr); + return; + } + + if (!mc->cpu_index_to_instance_props || !mc->get_default_cpu_node_id) { + error_setg(errp, "NUMA is not supported by this machine-type"); + return; + } + for (cpus = node->cpus; cpus; cpus = cpus->next) { + CpuInstanceProperties props; + if (cpus->value >= max_cpus) { + error_setg(errp, + "CPU index (%" PRIu16 ")" + " should be smaller than maxcpus (%d)", + cpus->value, max_cpus); + return; + } + props = mc->cpu_index_to_instance_props(ms, cpus->value); + props.node_id = nodenr; + props.has_node_id = true; + machine_set_cpu_numa_node(ms, &props, &err); + if (err) { + error_propagate(errp, err); + return; + } + } + + if (node->has_mem && node->has_memdev) { + error_setg(errp, "cannot specify both mem= and memdev="); + return; + } + + if (have_memdevs == -1) { + have_memdevs = node->has_memdev; + } + if (node->has_memdev != have_memdevs) { + error_setg(errp, "memdev option must be specified for either " + "all or no nodes"); + return; + } + + if (node->has_mem) { + numa_info[nodenr].node_mem = node->mem; + } + if (node->has_memdev) { + Object *o; + o = object_resolve_path_type(node->memdev, TYPE_MEMORY_BACKEND, NULL); + if (!o) { + error_setg(errp, "memdev=%s is ambiguous", node->memdev); + return; + } + + object_ref(o); + numa_info[nodenr].node_mem = object_property_get_uint(o, "size", NULL); + numa_info[nodenr].node_memdev = MEMORY_BACKEND(o); + } + numa_info[nodenr].present = true; + max_numa_nodeid = MAX(max_numa_nodeid, nodenr + 1); + nb_numa_nodes++; +} + +static void parse_numa_distance(NumaDistOptions *dist, Error **errp) +{ + uint16_t src = dist->src; + uint16_t dst = dist->dst; + uint8_t val = dist->val; + + if (src >= MAX_NODES || dst >= MAX_NODES) { + error_setg(errp, "Parameter '%s' expects an integer between 0 and %d", + src >= MAX_NODES ? "src" : "dst", MAX_NODES - 1); + return; + } + + if (!numa_info[src].present || !numa_info[dst].present) { + error_setg(errp, "Source/Destination NUMA node is missing. " + "Please use '-numa node' option to declare it first."); + return; + } + + if (val < NUMA_DISTANCE_MIN) { + error_setg(errp, "NUMA distance (%" PRIu8 ") is invalid, " + "it shouldn't be less than %d.", + val, NUMA_DISTANCE_MIN); + return; + } + + if (src == dst && val != NUMA_DISTANCE_MIN) { + error_setg(errp, "Local distance of node %d should be %d.", + src, NUMA_DISTANCE_MIN); + return; + } + + numa_info[src].distance[dst] = val; + have_numa_distance = true; +} + +void set_numa_options(MachineState *ms, NumaOptions *object, Error **errp) +{ + Error *err = NULL; + + switch (object->type) { + case NUMA_OPTIONS_TYPE_NODE: + parse_numa_node(ms, &object->u.node, &err); + if (err) { + goto end; + } + break; + case NUMA_OPTIONS_TYPE_DIST: + parse_numa_distance(&object->u.dist, &err); + if (err) { + goto end; + } + break; + case NUMA_OPTIONS_TYPE_CPU: + if (!object->u.cpu.has_node_id) { + error_setg(&err, "Missing mandatory node-id property"); + goto end; + } + if (!numa_info[object->u.cpu.node_id].present) { + error_setg(&err, "Invalid node-id=%" PRId64 ", NUMA node must be " + "defined with -numa node,nodeid=ID before it's used with " + "-numa cpu,node-id=ID", object->u.cpu.node_id); + goto end; + } + + machine_set_cpu_numa_node(ms, qapi_NumaCpuOptions_base(&object->u.cpu), + &err); + break; + default: + abort(); + } + +end: + error_propagate(errp, err); +} + +static int parse_numa(void *opaque, QemuOpts *opts, Error **errp) +{ + NumaOptions *object = NULL; + MachineState *ms = MACHINE(opaque); + Error *err = NULL; + Visitor *v = opts_visitor_new(opts); + + visit_type_NumaOptions(v, NULL, &object, &err); + visit_free(v); + if (err) { + goto end; + } + + /* Fix up legacy suffix-less format */ + if ((object->type == NUMA_OPTIONS_TYPE_NODE) && object->u.node.has_mem) { + const char *mem_str = qemu_opt_get(opts, "mem"); + qemu_strtosz_MiB(mem_str, NULL, &object->u.node.mem); + } + + set_numa_options(ms, object, &err); + +end: + qapi_free_NumaOptions(object); + if (err) { + error_propagate(errp, err); + return -1; + } + + return 0; +} + +/* If all node pair distances are symmetric, then only distances + * in one direction are enough. If there is even one asymmetric + * pair, though, then all distances must be provided. The + * distance from a node to itself is always NUMA_DISTANCE_MIN, + * so providing it is never necessary. + */ +static void validate_numa_distance(void) +{ + int src, dst; + bool is_asymmetrical = false; + + for (src = 0; src < nb_numa_nodes; src++) { + for (dst = src; dst < nb_numa_nodes; dst++) { + if (numa_info[src].distance[dst] == 0 && + numa_info[dst].distance[src] == 0) { + if (src != dst) { + error_report("The distance between node %d and %d is " + "missing, at least one distance value " + "between each nodes should be provided.", + src, dst); + exit(EXIT_FAILURE); + } + } + + if (numa_info[src].distance[dst] != 0 && + numa_info[dst].distance[src] != 0 && + numa_info[src].distance[dst] != + numa_info[dst].distance[src]) { + is_asymmetrical = true; + } + } + } + + if (is_asymmetrical) { + for (src = 0; src < nb_numa_nodes; src++) { + for (dst = 0; dst < nb_numa_nodes; dst++) { + if (src != dst && numa_info[src].distance[dst] == 0) { + error_report("At least one asymmetrical pair of " + "distances is given, please provide distances " + "for both directions of all node pairs."); + exit(EXIT_FAILURE); + } + } + } + } +} + +static void complete_init_numa_distance(void) +{ + int src, dst; + + /* Fixup NUMA distance by symmetric policy because if it is an + * asymmetric distance table, it should be a complete table and + * there would not be any missing distance except local node, which + * is verified by validate_numa_distance above. + */ + for (src = 0; src < nb_numa_nodes; src++) { + for (dst = 0; dst < nb_numa_nodes; dst++) { + if (numa_info[src].distance[dst] == 0) { + if (src == dst) { + numa_info[src].distance[dst] = NUMA_DISTANCE_MIN; + } else { + numa_info[src].distance[dst] = numa_info[dst].distance[src]; + } + } + } + } +} + +void numa_legacy_auto_assign_ram(MachineClass *mc, NodeInfo *nodes, + int nb_nodes, ram_addr_t size) +{ + int i; + uint64_t usedmem = 0; + + /* Align each node according to the alignment + * requirements of the machine class + */ + + for (i = 0; i < nb_nodes - 1; i++) { + nodes[i].node_mem = (size / nb_nodes) & + ~((1 << mc->numa_mem_align_shift) - 1); + usedmem += nodes[i].node_mem; + } + nodes[i].node_mem = size - usedmem; +} + +void numa_default_auto_assign_ram(MachineClass *mc, NodeInfo *nodes, + int nb_nodes, ram_addr_t size) +{ + int i; + uint64_t usedmem = 0, node_mem; + uint64_t granularity = size / nb_nodes; + uint64_t propagate = 0; + + for (i = 0; i < nb_nodes - 1; i++) { + node_mem = (granularity + propagate) & + ~((1 << mc->numa_mem_align_shift) - 1); + propagate = granularity + propagate - node_mem; + nodes[i].node_mem = node_mem; + usedmem += node_mem; + } + nodes[i].node_mem = size - usedmem; +} + +void numa_complete_configuration(MachineState *ms) +{ + int i; + MachineClass *mc = MACHINE_GET_CLASS(ms); + + /* + * If memory hotplug is enabled (slots > 0) but without '-numa' + * options explicitly on CLI, guestes will break. + * + * Windows: won't enable memory hotplug without SRAT table at all + * + * Linux: if QEMU is started with initial memory all below 4Gb + * and no SRAT table present, guest kernel will use nommu DMA ops, + * which breaks 32bit hw drivers when memory is hotplugged and + * guest tries to use it with that drivers. + * + * Enable NUMA implicitly by adding a new NUMA node automatically. + */ + if (ms->ram_slots > 0 && nb_numa_nodes == 0 && + mc->auto_enable_numa_with_memhp) { + NumaNodeOptions node = { }; + parse_numa_node(ms, &node, &error_abort); + } + + assert(max_numa_nodeid <= MAX_NODES); + + /* No support for sparse NUMA node IDs yet: */ + for (i = max_numa_nodeid - 1; i >= 0; i--) { + /* Report large node IDs first, to make mistakes easier to spot */ + if (!numa_info[i].present) { + error_report("numa: Node ID missing: %d", i); + exit(1); + } + } + + /* This must be always true if all nodes are present: */ + assert(nb_numa_nodes == max_numa_nodeid); + + if (nb_numa_nodes > 0) { + uint64_t numa_total; + + if (nb_numa_nodes > MAX_NODES) { + nb_numa_nodes = MAX_NODES; + } + + /* If no memory size is given for any node, assume the default case + * and distribute the available memory equally across all nodes + */ + for (i = 0; i < nb_numa_nodes; i++) { + if (numa_info[i].node_mem != 0) { + break; + } + } + if (i == nb_numa_nodes) { + assert(mc->numa_auto_assign_ram); + mc->numa_auto_assign_ram(mc, numa_info, nb_numa_nodes, ram_size); + } + + numa_total = 0; + for (i = 0; i < nb_numa_nodes; i++) { + numa_total += numa_info[i].node_mem; + } + if (numa_total != ram_size) { + error_report("total memory for NUMA nodes (0x%" PRIx64 ")" + " should equal RAM size (0x" RAM_ADDR_FMT ")", + numa_total, ram_size); + exit(1); + } + + /* QEMU needs at least all unique node pair distances to build + * the whole NUMA distance table. QEMU treats the distance table + * as symmetric by default, i.e. distance A->B == distance B->A. + * Thus, QEMU is able to complete the distance table + * initialization even though only distance A->B is provided and + * distance B->A is not. QEMU knows the distance of a node to + * itself is always 10, so A->A distances may be omitted. When + * the distances of two nodes of a pair differ, i.e. distance + * A->B != distance B->A, then that means the distance table is + * asymmetric. In this case, the distances for both directions + * of all node pairs are required. + */ + if (have_numa_distance) { + /* Validate enough NUMA distance information was provided. */ + validate_numa_distance(); + + /* Validation succeeded, now fill in any missing distances. */ + complete_init_numa_distance(); + } + } +} + +void parse_numa_opts(MachineState *ms) +{ + qemu_opts_foreach(qemu_find_opts("numa"), parse_numa, ms, &error_fatal); +} + +void numa_cpu_pre_plug(const CPUArchId *slot, DeviceState *dev, Error **errp) +{ + int node_id = object_property_get_int(OBJECT(dev), "node-id", &error_abort); + + if (node_id == CPU_UNSET_NUMA_NODE_ID) { + /* due to bug in libvirt, it doesn't pass node-id from props on + * device_add as expected, so we have to fix it up here */ + if (slot->props.has_node_id) { + object_property_set_int(OBJECT(dev), slot->props.node_id, + "node-id", errp); + } + } else if (node_id != slot->props.node_id) { + error_setg(errp, "invalid node-id, must be %"PRId64, + slot->props.node_id); + } +} + +static void allocate_system_memory_nonnuma(MemoryRegion *mr, Object *owner, + const char *name, + uint64_t ram_size) +{ + if (mem_path) { +#ifdef __linux__ + Error *err = NULL; + memory_region_init_ram_from_file(mr, owner, name, ram_size, 0, 0, + mem_path, &err); + if (err) { + error_report_err(err); + if (mem_prealloc) { + exit(1); + } + error_report("falling back to regular RAM allocation."); + + /* Legacy behavior: if allocation failed, fall back to + * regular RAM allocation. + */ + mem_path = NULL; + memory_region_init_ram_nomigrate(mr, owner, name, ram_size, &error_fatal); + } +#else + fprintf(stderr, "-mem-path not supported on this host\n"); + exit(1); +#endif + } else { + memory_region_init_ram_nomigrate(mr, owner, name, ram_size, &error_fatal); + } + vmstate_register_ram_global(mr); +} + +void memory_region_allocate_system_memory(MemoryRegion *mr, Object *owner, + const char *name, + uint64_t ram_size) +{ + uint64_t addr = 0; + int i; + + if (nb_numa_nodes == 0 || !have_memdevs) { + allocate_system_memory_nonnuma(mr, owner, name, ram_size); + return; + } + + memory_region_init(mr, owner, name, ram_size); + for (i = 0; i < nb_numa_nodes; i++) { + uint64_t size = numa_info[i].node_mem; + HostMemoryBackend *backend = numa_info[i].node_memdev; + if (!backend) { + continue; + } + MemoryRegion *seg = host_memory_backend_get_memory(backend); + + if (memory_region_is_mapped(seg)) { + char *path = object_get_canonical_path_component(OBJECT(backend)); + error_report("memory backend %s is used multiple times. Each " + "-numa option must use a different memdev value.", + path); + g_free(path); + exit(1); + } + + host_memory_backend_set_mapped(backend, true); + memory_region_add_subregion(mr, addr, seg); + vmstate_register_ram_global(seg); + addr += size; + } +} + +static void numa_stat_memory_devices(NumaNodeMem node_mem[]) +{ + MemoryDeviceInfoList *info_list = qmp_memory_device_list(); + MemoryDeviceInfoList *info; + PCDIMMDeviceInfo *pcdimm_info; + VirtioPMEMDeviceInfo *vpi; + + for (info = info_list; info; info = info->next) { + MemoryDeviceInfo *value = info->value; + + if (value) { + switch (value->type) { + case MEMORY_DEVICE_INFO_KIND_DIMM: + case MEMORY_DEVICE_INFO_KIND_NVDIMM: + pcdimm_info = value->type == MEMORY_DEVICE_INFO_KIND_DIMM ? + value->u.dimm.data : value->u.nvdimm.data; + node_mem[pcdimm_info->node].node_mem += pcdimm_info->size; + node_mem[pcdimm_info->node].node_plugged_mem += + pcdimm_info->size; + break; + case MEMORY_DEVICE_INFO_KIND_VIRTIO_PMEM: + vpi = value->u.virtio_pmem.data; + /* TODO: once we support numa, assign to right node */ + node_mem[0].node_mem += vpi->size; + node_mem[0].node_plugged_mem += vpi->size; + break; + default: + g_assert_not_reached(); + } + } + } + qapi_free_MemoryDeviceInfoList(info_list); +} + +void query_numa_node_mem(NumaNodeMem node_mem[]) +{ + int i; + + if (nb_numa_nodes <= 0) { + return; + } + + numa_stat_memory_devices(node_mem); + for (i = 0; i < nb_numa_nodes; i++) { + node_mem[i].node_mem += numa_info[i].node_mem; + } +} + +void ram_block_notifier_add(RAMBlockNotifier *n) +{ + QLIST_INSERT_HEAD(&ram_list.ramblock_notifiers, n, next); +} + +void ram_block_notifier_remove(RAMBlockNotifier *n) +{ + QLIST_REMOVE(n, next); +} + +void ram_block_notify_add(void *host, size_t size) +{ + RAMBlockNotifier *notifier; + + QLIST_FOREACH(notifier, &ram_list.ramblock_notifiers, next) { + notifier->ram_block_added(notifier, host, size); + } +} + +void ram_block_notify_remove(void *host, size_t size) +{ + RAMBlockNotifier *notifier; + + QLIST_FOREACH(notifier, &ram_list.ramblock_notifiers, next) { + notifier->ram_block_removed(notifier, host, size); + } +} diff --git a/hw/core/qdev.c b/hw/core/qdev.c index f9b6efe509..94ebc0a4a1 100644 --- a/hw/core/qdev.c +++ b/hw/core/qdev.c @@ -29,7 +29,7 @@ #include "hw/qdev.h" #include "sysemu/sysemu.h" #include "qapi/error.h" -#include "qapi/qapi-events-misc.h" +#include "qapi/qapi-events-qdev.h" #include "qapi/qmp/qerror.h" #include "qapi/visitor.h" #include "qemu/error-report.h" diff --git a/hw/display/Kconfig b/hw/display/Kconfig index 910dccb2f7..cbdf7b1a67 100644 --- a/hw/display/Kconfig +++ b/hw/display/Kconfig @@ -130,3 +130,5 @@ config ATI_VGA default y if PCI_DEVICES depends on PCI select VGA + select BITBANG_I2C + select DDC diff --git a/hw/display/ati.c b/hw/display/ati.c index 76595d9511..a747c4cc98 100644 --- a/hw/display/ati.c +++ b/hw/display/ati.c @@ -26,6 +26,7 @@ #include "qapi/error.h" #include "hw/hw.h" #include "ui/console.h" +#include "hw/display/i2c-ddc.h" #include "trace.h" #define ATI_DEBUG_HW_CURSOR 0 @@ -88,6 +89,7 @@ static void ati_vga_switch_mode(ATIVGAState *s) DPRINTF("Switching to %dx%d %d %d @ %x\n", h, v, stride, bpp, offs); vbe_ioport_write_index(&s->vga, 0, VBE_DISPI_INDEX_ENABLE); vbe_ioport_write_data(&s->vga, 0, VBE_DISPI_DISABLED); + s->vga.big_endian_fb = false; /* reset VBE regs then set up mode */ s->vga.vbe_regs[VBE_DISPI_INDEX_XRES] = h; s->vga.vbe_regs[VBE_DISPI_INDEX_YRES] = v; @@ -215,6 +217,24 @@ static void ati_cursor_draw_line(VGACommonState *vga, uint8_t *d, int scr_y) } } +static uint64_t ati_i2c(bitbang_i2c_interface *i2c, uint64_t data, int base) +{ + bool c = (data & BIT(base + 17) ? !!(data & BIT(base + 1)) : 1); + bool d = (data & BIT(base + 16) ? !!(data & BIT(base)) : 1); + + bitbang_i2c_set(i2c, BITBANG_I2C_SCL, c); + d = bitbang_i2c_set(i2c, BITBANG_I2C_SDA, d); + + data &= ~0xf00ULL; + if (c) { + data |= BIT(base + 9); + } + if (d) { + data |= BIT(base + 8); + } + return data; +} + static inline uint64_t ati_reg_read_offs(uint32_t reg, int offs, unsigned int size) { @@ -266,7 +286,16 @@ static uint64_t ati_mm_read(void *opaque, hwaddr addr, unsigned int size) case DAC_CNTL: val = s->regs.dac_cntl; break; -/* case GPIO_MONID: FIXME hook up DDC I2C here */ + case GPIO_VGA_DDC: + val = s->regs.gpio_vga_ddc; + break; + case GPIO_DVI_DDC: + val = s->regs.gpio_dvi_ddc; + break; + case GPIO_MONID ... GPIO_MONID + 3: + val = ati_reg_read_offs(s->regs.gpio_monid, + addr - GPIO_MONID, size); + break; case PALETTE_INDEX: /* FIXME unaligned access */ val = vga_ioport_read(&s->vga, VGA_PEL_IR) << 16; @@ -391,9 +420,15 @@ static uint64_t ati_mm_read(void *opaque, hwaddr addr, unsigned int size) break; case DEFAULT_OFFSET: val = s->regs.default_offset; + if (s->dev_id != PCI_DEVICE_ID_ATI_RAGE128_PF) { + val >>= 10; + val |= s->regs.default_pitch << 16; + val |= s->regs.default_tile << 30; + } break; case DEFAULT_PITCH: val = s->regs.default_pitch; + val |= s->regs.default_tile << 16; break; case DEFAULT_SC_BOTTOM_RIGHT: val = s->regs.default_sc_bottom_right; @@ -497,7 +532,33 @@ static void ati_mm_write(void *opaque, hwaddr addr, s->regs.dac_cntl = data & 0xffffe3ff; s->vga.dac_8bit = !!(data & DAC_8BIT_EN); break; -/* case GPIO_MONID: FIXME hook up DDC I2C here */ + case GPIO_VGA_DDC: + if (s->dev_id != PCI_DEVICE_ID_ATI_RAGE128_PF) { + /* FIXME: Maybe add a property to select VGA or DVI port? */ + } + break; + case GPIO_DVI_DDC: + if (s->dev_id != PCI_DEVICE_ID_ATI_RAGE128_PF) { + s->regs.gpio_dvi_ddc = ati_i2c(&s->bbi2c, data, 0); + } + break; + case GPIO_MONID ... GPIO_MONID + 3: + /* FIXME What does Radeon have here? */ + if (s->dev_id == PCI_DEVICE_ID_ATI_RAGE128_PF) { + ati_reg_write_offs(&s->regs.gpio_monid, + addr - GPIO_MONID, data, size); + /* + * Rage128p accesses DDC used to get EDID via these bits. + * Only touch i2c when write overlaps 3rd byte because some + * drivers access this reg via multiple partial writes and + * without this spurious bits would be sent. + */ + if ((s->regs.gpio_monid & BIT(25)) && + addr <= GPIO_MONID + 2 && addr + size > GPIO_MONID + 2) { + s->regs.gpio_monid = ati_i2c(&s->bbi2c, s->regs.gpio_monid, 1); + } + } + break; case PALETTE_INDEX ... PALETTE_INDEX + 3: if (size == 4) { vga_ioport_write(&s->vga, VGA_PEL_IR, (data >> 16) & 0xff); @@ -629,10 +690,10 @@ static void ati_mm_write(void *opaque, hwaddr addr, case SRC_PITCH_OFFSET: if (s->dev_id == PCI_DEVICE_ID_ATI_RAGE128_PF) { s->regs.src_offset = (data & 0x1fffff) << 5; - s->regs.src_pitch = (data >> 21) & 0x3ff; + s->regs.src_pitch = (data & 0x7fe00000) >> 21; s->regs.src_tile = data >> 31; } else { - s->regs.src_offset = (data & 0x3fffff) << 11; + s->regs.src_offset = (data & 0x3fffff) << 10; s->regs.src_pitch = (data & 0x3fc00000) >> 16; s->regs.src_tile = (data >> 30) & 1; } @@ -640,10 +701,10 @@ static void ati_mm_write(void *opaque, hwaddr addr, case DST_PITCH_OFFSET: if (s->dev_id == PCI_DEVICE_ID_ATI_RAGE128_PF) { s->regs.dst_offset = (data & 0x1fffff) << 5; - s->regs.dst_pitch = (data >> 21) & 0x3ff; + s->regs.dst_pitch = (data & 0x7fe00000) >> 21; s->regs.dst_tile = data >> 31; } else { - s->regs.dst_offset = (data & 0x3fffff) << 11; + s->regs.dst_offset = (data & 0x3fffff) << 10; s->regs.dst_pitch = (data & 0x3fc00000) >> 16; s->regs.dst_tile = data >> 30; } @@ -723,13 +784,19 @@ static void ati_mm_write(void *opaque, hwaddr addr, s->regs.dp_write_mask = data; break; case DEFAULT_OFFSET: - data &= (s->dev_id == PCI_DEVICE_ID_ATI_RAGE128_PF ? - 0x03fffc00 : 0xfffffc00); - s->regs.default_offset = data; + if (s->dev_id == PCI_DEVICE_ID_ATI_RAGE128_PF) { + s->regs.default_offset = data & 0xfffffff0; + } else { + /* Radeon has DEFAULT_PITCH_OFFSET here like DST_PITCH_OFFSET */ + s->regs.default_offset = (data & 0x3fffff) << 10; + s->regs.default_pitch = (data & 0x3fc00000) >> 16; + s->regs.default_tile = data >> 30; + } break; case DEFAULT_PITCH: if (s->dev_id == PCI_DEVICE_ID_ATI_RAGE128_PF) { - s->regs.default_pitch = data & 0x103ff; + s->regs.default_pitch = data & 0x3fff; + s->regs.default_tile = (data >> 16) & 1; } break; case DEFAULT_SC_BOTTOM_RIGHT: @@ -788,6 +855,12 @@ static void ati_vga_realize(PCIDevice *dev, Error **errp) vga->cursor_draw_line = ati_cursor_draw_line; } + /* ddc, edid */ + I2CBus *i2cbus = i2c_init_bus(DEVICE(s), "ati-vga.ddc"); + bitbang_i2c_init(&s->bbi2c, i2cbus); + I2CSlave *i2cddc = I2C_SLAVE(qdev_create(BUS(i2cbus), TYPE_I2CDDC)); + i2c_set_slave_address(i2cddc, 0x50); + /* mmio register space */ memory_region_init_io(&s->mm, OBJECT(s), &ati_mm_ops, s, "ati.mmregs", 0x4000); @@ -837,7 +910,7 @@ static void ati_vga_class_init(ObjectClass *klass, void *data) k->class_id = PCI_CLASS_DISPLAY_VGA; k->vendor_id = PCI_VENDOR_ID_ATI; k->device_id = PCI_DEVICE_ID_ATI_RAGE128_PF; - k->romfile = "vgabios-stdvga.bin"; + k->romfile = "vgabios-ati.bin"; k->realize = ati_vga_realize; k->exit = ati_vga_exit; } diff --git a/hw/display/ati_2d.c b/hw/display/ati_2d.c index d83c29c6d9..42e82311eb 100644 --- a/hw/display/ati_2d.c +++ b/hw/display/ati_2d.c @@ -42,6 +42,8 @@ static int ati_bpp_from_datatype(ATIVGAState *s) } } +#define DEFAULT_CNTL (s->regs.dp_gui_master_cntl & GMC_DST_PITCH_OFFSET_CNTL) + void ati_2d_blt(ATIVGAState *s) { /* FIXME it is probably more complex than this and may need to be */ @@ -51,53 +53,88 @@ void ati_2d_blt(ATIVGAState *s) s->vga.vbe_start_addr, surface_data(ds), surface_stride(ds), surface_bits_per_pixel(ds), (s->regs.dp_mix & GMC_ROP3_MASK) >> 16); - DPRINTF("%d %d, %d %d, (%d,%d) -> (%d,%d) %dx%d\n", s->regs.src_offset, - s->regs.dst_offset, s->regs.src_pitch, s->regs.dst_pitch, + int dst_x = (s->regs.dp_cntl & DST_X_LEFT_TO_RIGHT ? + s->regs.dst_x : s->regs.dst_x + 1 - s->regs.dst_width); + int dst_y = (s->regs.dp_cntl & DST_Y_TOP_TO_BOTTOM ? + s->regs.dst_y : s->regs.dst_y + 1 - s->regs.dst_height); + int bpp = ati_bpp_from_datatype(s); + int dst_stride = DEFAULT_CNTL ? s->regs.dst_pitch : s->regs.default_pitch; + uint8_t *dst_bits = s->vga.vram_ptr + (DEFAULT_CNTL ? + s->regs.dst_offset : s->regs.default_offset); + + if (s->dev_id == PCI_DEVICE_ID_ATI_RAGE128_PF) { + dst_bits += s->regs.crtc_offset & 0x07ffffff; + dst_stride *= bpp; + } + uint8_t *end = s->vga.vram_ptr + s->vga.vram_size; + if (dst_bits >= end || dst_bits + dst_x + (dst_y + s->regs.dst_height) * + dst_stride >= end) { + qemu_log_mask(LOG_UNIMP, "blt outside vram not implemented\n"); + return; + } + DPRINTF("%d %d %d, %d %d %d, (%d,%d) -> (%d,%d) %dx%d %c %c\n", + s->regs.src_offset, s->regs.dst_offset, s->regs.default_offset, + s->regs.src_pitch, s->regs.dst_pitch, s->regs.default_pitch, s->regs.src_x, s->regs.src_y, s->regs.dst_x, s->regs.dst_y, - s->regs.dst_width, s->regs.dst_height); + s->regs.dst_width, s->regs.dst_height, + (s->regs.dp_cntl & DST_X_LEFT_TO_RIGHT ? '>' : '<'), + (s->regs.dp_cntl & DST_Y_TOP_TO_BOTTOM ? 'v' : '^')); switch (s->regs.dp_mix & GMC_ROP3_MASK) { case ROP3_SRCCOPY: { - uint8_t *src_bits, *dst_bits, *end; - int src_stride, dst_stride, bpp = ati_bpp_from_datatype(s); - src_bits = s->vga.vram_ptr + s->regs.src_offset; - dst_bits = s->vga.vram_ptr + s->regs.dst_offset; - src_stride = s->regs.src_pitch; - dst_stride = s->regs.dst_pitch; + int src_x = (s->regs.dp_cntl & DST_X_LEFT_TO_RIGHT ? + s->regs.src_x : s->regs.src_x + 1 - s->regs.dst_width); + int src_y = (s->regs.dp_cntl & DST_Y_TOP_TO_BOTTOM ? + s->regs.src_y : s->regs.src_y + 1 - s->regs.dst_height); + int src_stride = DEFAULT_CNTL ? + s->regs.src_pitch : s->regs.default_pitch; + uint8_t *src_bits = s->vga.vram_ptr + (DEFAULT_CNTL ? + s->regs.src_offset : s->regs.default_offset); if (s->dev_id == PCI_DEVICE_ID_ATI_RAGE128_PF) { src_bits += s->regs.crtc_offset & 0x07ffffff; - dst_bits += s->regs.crtc_offset & 0x07ffffff; src_stride *= bpp; - dst_stride *= bpp; } + if (src_bits >= end || src_bits + src_x + + (src_y + s->regs.dst_height) * src_stride >= end) { + qemu_log_mask(LOG_UNIMP, "blt outside vram not implemented\n"); + return; + } + src_stride /= sizeof(uint32_t); dst_stride /= sizeof(uint32_t); - DPRINTF("pixman_blt(%p, %p, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d)\n", src_bits, dst_bits, src_stride, dst_stride, bpp, bpp, - s->regs.src_x, s->regs.src_y, s->regs.dst_x, s->regs.dst_y, + src_x, src_y, dst_x, dst_y, s->regs.dst_width, s->regs.dst_height); - end = s->vga.vram_ptr + s->vga.vram_size; - if (src_bits >= end || dst_bits >= end || - src_bits + s->regs.src_x + (s->regs.src_y + s->regs.dst_height) * - src_stride * sizeof(uint32_t) >= end || - dst_bits + s->regs.dst_x + (s->regs.dst_y + s->regs.dst_height) * - dst_stride * sizeof(uint32_t) >= end) { - qemu_log_mask(LOG_UNIMP, "blt outside vram not implemented\n"); - return; + if (s->regs.dp_cntl & DST_X_LEFT_TO_RIGHT && + s->regs.dp_cntl & DST_Y_TOP_TO_BOTTOM) { + pixman_blt((uint32_t *)src_bits, (uint32_t *)dst_bits, + src_stride, dst_stride, bpp, bpp, + src_x, src_y, dst_x, dst_y, + s->regs.dst_width, s->regs.dst_height); + } else { + /* FIXME: We only really need a temporary if src and dst overlap */ + int llb = s->regs.dst_width * (bpp / 8); + int tmp_stride = DIV_ROUND_UP(llb, sizeof(uint32_t)); + uint32_t *tmp = g_malloc(tmp_stride * sizeof(uint32_t) * + s->regs.dst_height); + pixman_blt((uint32_t *)src_bits, tmp, + src_stride, tmp_stride, bpp, bpp, + src_x, src_y, 0, 0, + s->regs.dst_width, s->regs.dst_height); + pixman_blt(tmp, (uint32_t *)dst_bits, + tmp_stride, dst_stride, bpp, bpp, + 0, 0, dst_x, dst_y, + s->regs.dst_width, s->regs.dst_height); + g_free(tmp); } - pixman_blt((uint32_t *)src_bits, (uint32_t *)dst_bits, - src_stride, dst_stride, bpp, bpp, - s->regs.src_x, s->regs.src_y, - s->regs.dst_x, s->regs.dst_y, - s->regs.dst_width, s->regs.dst_height); if (dst_bits >= s->vga.vram_ptr + s->vga.vbe_start_addr && dst_bits < s->vga.vram_ptr + s->vga.vbe_start_addr + s->vga.vbe_regs[VBE_DISPI_INDEX_YRES] * s->vga.vbe_line_offset) { memory_region_set_dirty(&s->vga.vram, s->vga.vbe_start_addr + s->regs.dst_offset + - s->regs.dst_y * surface_stride(ds), + dst_y * surface_stride(ds), s->regs.dst_height * surface_stride(ds)); } s->regs.dst_x += s->regs.dst_width; @@ -108,54 +145,38 @@ void ati_2d_blt(ATIVGAState *s) case ROP3_BLACKNESS: case ROP3_WHITENESS: { - uint8_t *dst_bits, *end; - int dst_stride, bpp = ati_bpp_from_datatype(s); uint32_t filler = 0; - dst_bits = s->vga.vram_ptr + s->regs.dst_offset; - dst_stride = s->regs.dst_pitch; - - if (s->dev_id == PCI_DEVICE_ID_ATI_RAGE128_PF) { - dst_bits += s->regs.crtc_offset & 0x07ffffff; - dst_stride *= bpp; - } - dst_stride /= sizeof(uint32_t); switch (s->regs.dp_mix & GMC_ROP3_MASK) { case ROP3_PATCOPY: - filler = bswap32(s->regs.dp_brush_frgd_clr); + filler = s->regs.dp_brush_frgd_clr; break; case ROP3_BLACKNESS: - filler = rgb_to_pixel32(s->vga.palette[0], s->vga.palette[1], - s->vga.palette[2]) << 8 | 0xff; + filler = 0xffUL << 24 | rgb_to_pixel32(s->vga.palette[0], + s->vga.palette[1], s->vga.palette[2]); break; case ROP3_WHITENESS: - filler = rgb_to_pixel32(s->vga.palette[3], s->vga.palette[4], - s->vga.palette[5]) << 8 | 0xff; + filler = 0xffUL << 24 | rgb_to_pixel32(s->vga.palette[3], + s->vga.palette[4], s->vga.palette[5]); break; } + dst_stride /= sizeof(uint32_t); DPRINTF("pixman_fill(%p, %d, %d, %d, %d, %d, %d, %x)\n", dst_bits, dst_stride, bpp, s->regs.dst_x, s->regs.dst_y, s->regs.dst_width, s->regs.dst_height, filler); - end = s->vga.vram_ptr + s->vga.vram_size; - if (dst_bits >= end || - dst_bits + s->regs.dst_x + (s->regs.dst_y + s->regs.dst_height) * - dst_stride * sizeof(uint32_t) >= end) { - qemu_log_mask(LOG_UNIMP, "blt outside vram not implemented\n"); - return; - } pixman_fill((uint32_t *)dst_bits, dst_stride, bpp, - s->regs.dst_x, s->regs.dst_y, - s->regs.dst_width, s->regs.dst_height, - filler); + s->regs.dst_x, s->regs.dst_y, + s->regs.dst_width, s->regs.dst_height, + filler); if (dst_bits >= s->vga.vram_ptr + s->vga.vbe_start_addr && dst_bits < s->vga.vram_ptr + s->vga.vbe_start_addr + s->vga.vbe_regs[VBE_DISPI_INDEX_YRES] * s->vga.vbe_line_offset) { memory_region_set_dirty(&s->vga.vram, s->vga.vbe_start_addr + s->regs.dst_offset + - s->regs.dst_y * surface_stride(ds), + dst_y * surface_stride(ds), s->regs.dst_height * surface_stride(ds)); } s->regs.dst_y += s->regs.dst_height; diff --git a/hw/display/ati_dbg.c b/hw/display/ati_dbg.c index b045f81d06..88b3a11315 100644 --- a/hw/display/ati_dbg.c +++ b/hw/display/ati_dbg.c @@ -19,6 +19,8 @@ static struct ati_regdesc ati_reg_names[] = { {"CRTC_GEN_CNTL", 0x0050}, {"CRTC_EXT_CNTL", 0x0054}, {"DAC_CNTL", 0x0058}, + {"GPIO_VGA_DDC", 0x0060}, + {"GPIO_DVI_DDC", 0x0064}, {"GPIO_MONID", 0x0068}, {"I2C_CNTL_1", 0x0094}, {"PALETTE_INDEX", 0x00b0}, diff --git a/hw/display/ati_int.h b/hw/display/ati_int.h index 2f426064cf..31a1927b3e 100644 --- a/hw/display/ati_int.h +++ b/hw/display/ati_int.h @@ -10,6 +10,7 @@ #define ATI_INT_H #include "hw/pci/pci.h" +#include "hw/i2c/bitbang_i2c.h" #include "vga_int.h" /*#define DEBUG_ATI*/ @@ -35,6 +36,9 @@ typedef struct ATIVGARegs { uint32_t crtc_gen_cntl; uint32_t crtc_ext_cntl; uint32_t dac_cntl; + uint32_t gpio_vga_ddc; + uint32_t gpio_dvi_ddc; + uint32_t gpio_monid; uint32_t crtc_h_total_disp; uint32_t crtc_h_sync_strt_wid; uint32_t crtc_v_total_disp; @@ -70,6 +74,7 @@ typedef struct ATIVGARegs { uint32_t dp_write_mask; uint32_t default_offset; uint32_t default_pitch; + uint32_t default_tile; uint32_t default_sc_bottom_right; } ATIVGARegs; @@ -83,6 +88,7 @@ typedef struct ATIVGAState { uint16_t cursor_size; uint32_t cursor_offset; QEMUCursor *cursor; + bitbang_i2c_interface bbi2c; MemoryRegion io; MemoryRegion mm; ATIVGARegs regs; diff --git a/hw/display/ati_regs.h b/hw/display/ati_regs.h index 923bfd33ce..d7155c93d5 100644 --- a/hw/display/ati_regs.h +++ b/hw/display/ati_regs.h @@ -37,6 +37,8 @@ #define CRTC_GEN_CNTL 0x0050 #define CRTC_EXT_CNTL 0x0054 #define DAC_CNTL 0x0058 +#define GPIO_VGA_DDC 0x0060 +#define GPIO_DVI_DDC 0x0064 #define GPIO_MONID 0x0068 #define I2C_CNTL_1 0x0094 #define PALETTE_INDEX 0x00b0 @@ -368,8 +370,8 @@ #define BRUSH_SOLIDCOLOR 0x00000d00 /* DP_GUI_MASTER_CNTL bit constants */ -#define GMC_SRC_PITCH_OFFSET_DEFAULT 0x00000000 -#define GMC_DST_PITCH_OFFSET_DEFAULT 0x00000000 +#define GMC_SRC_PITCH_OFFSET_CNTL 0x00000001 +#define GMC_DST_PITCH_OFFSET_CNTL 0x00000002 #define GMC_SRC_CLIP_DEFAULT 0x00000000 #define GMC_DST_CLIP_DEFAULT 0x00000000 #define GMC_BRUSH_SOLIDCOLOR 0x000000d0 diff --git a/hw/display/virtio-gpu.c b/hw/display/virtio-gpu.c index 2b0f66b1d6..25d9e327fc 100644 --- a/hw/display/virtio-gpu.c +++ b/hw/display/virtio-gpu.c @@ -1002,6 +1002,11 @@ static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size, resource_id = qemu_get_be32(f); while (resource_id != 0) { + res = virtio_gpu_find_resource(g, resource_id); + if (res) { + return -EINVAL; + } + res = g_new0(struct virtio_gpu_simple_resource, 1); res->resource_id = resource_id; res->width = qemu_get_be32(f); @@ -1048,9 +1053,9 @@ static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size, if (res->iov[i].iov_base) { dma_memory_unmap(VIRTIO_DEVICE(g)->dma_as, res->iov[i].iov_base, - res->iov[i].iov_len, + len, DMA_DIRECTION_TO_DEVICE, - res->iov[i].iov_len); + 0); } /* ...and the mappings for previous loop iterations */ res->iov_cnt = i; diff --git a/hw/i2c/bitbang_i2c.c b/hw/i2c/bitbang_i2c.c index 5dfc72d9d7..60c7a9be0b 100644 --- a/hw/i2c/bitbang_i2c.c +++ b/hw/i2c/bitbang_i2c.c @@ -12,7 +12,7 @@ #include "qemu/osdep.h" #include "hw/hw.h" -#include "bitbang_i2c.h" +#include "hw/i2c/bitbang_i2c.h" #include "hw/sysbus.h" #include "qemu/module.h" @@ -25,39 +25,6 @@ do { printf("bitbang_i2c: " fmt , ## __VA_ARGS__); } while (0) #define DPRINTF(fmt, ...) do {} while(0) #endif -typedef enum bitbang_i2c_state { - STOPPED = 0, - SENDING_BIT7, - SENDING_BIT6, - SENDING_BIT5, - SENDING_BIT4, - SENDING_BIT3, - SENDING_BIT2, - SENDING_BIT1, - SENDING_BIT0, - WAITING_FOR_ACK, - RECEIVING_BIT7, - RECEIVING_BIT6, - RECEIVING_BIT5, - RECEIVING_BIT4, - RECEIVING_BIT3, - RECEIVING_BIT2, - RECEIVING_BIT1, - RECEIVING_BIT0, - SENDING_ACK, - SENT_NACK -} bitbang_i2c_state; - -struct bitbang_i2c_interface { - I2CBus *bus; - bitbang_i2c_state state; - int last_data; - int last_clock; - int device_out; - uint8_t buffer; - int current_addr; -}; - static void bitbang_i2c_enter_stop(bitbang_i2c_interface *i2c) { DPRINTF("STOP\n"); @@ -184,18 +151,12 @@ int bitbang_i2c_set(bitbang_i2c_interface *i2c, int line, int level) abort(); } -bitbang_i2c_interface *bitbang_i2c_init(I2CBus *bus) +void bitbang_i2c_init(bitbang_i2c_interface *s, I2CBus *bus) { - bitbang_i2c_interface *s; - - s = g_malloc0(sizeof(bitbang_i2c_interface)); - s->bus = bus; s->last_data = 1; s->last_clock = 1; s->device_out = 1; - - return s; } /* GPIO interface. */ @@ -207,7 +168,7 @@ typedef struct GPIOI2CState { SysBusDevice parent_obj; MemoryRegion dummy_iomem; - bitbang_i2c_interface *bitbang; + bitbang_i2c_interface bitbang; int last_level; qemu_irq out; } GPIOI2CState; @@ -216,7 +177,7 @@ static void bitbang_i2c_gpio_set(void *opaque, int irq, int level) { GPIOI2CState *s = opaque; - level = bitbang_i2c_set(s->bitbang, irq, level); + level = bitbang_i2c_set(&s->bitbang, irq, level); if (level != s->last_level) { s->last_level = level; qemu_set_irq(s->out, level); @@ -234,7 +195,7 @@ static void gpio_i2c_init(Object *obj) sysbus_init_mmio(sbd, &s->dummy_iomem); bus = i2c_init_bus(dev, "i2c"); - s->bitbang = bitbang_i2c_init(bus); + bitbang_i2c_init(&s->bitbang, bus); qdev_init_gpio_in(dev, bitbang_i2c_gpio_set, 2); qdev_init_gpio_out(dev, &s->out, 1); diff --git a/hw/i2c/bitbang_i2c.h b/hw/i2c/bitbang_i2c.h deleted file mode 100644 index 9443021710..0000000000 --- a/hw/i2c/bitbang_i2c.h +++ /dev/null @@ -1,12 +0,0 @@ -#ifndef BITBANG_I2C_H -#define BITBANG_I2C_H - -#include "hw/i2c/i2c.h" - -#define BITBANG_I2C_SDA 0 -#define BITBANG_I2C_SCL 1 - -bitbang_i2c_interface *bitbang_i2c_init(I2CBus *bus); -int bitbang_i2c_set(bitbang_i2c_interface *i2c, int line, int level); - -#endif diff --git a/hw/i2c/ppc4xx_i2c.c b/hw/i2c/ppc4xx_i2c.c index d606d3dbeb..462729db4e 100644 --- a/hw/i2c/ppc4xx_i2c.c +++ b/hw/i2c/ppc4xx_i2c.c @@ -30,7 +30,6 @@ #include "cpu.h" #include "hw/hw.h" #include "hw/i2c/ppc4xx_i2c.h" -#include "bitbang_i2c.h" #define PPC4xx_I2C_MEM_SIZE 18 @@ -312,9 +311,9 @@ static void ppc4xx_i2c_writeb(void *opaque, hwaddr addr, uint64_t value, case IIC_DIRECTCNTL: i2c->directcntl = value & (IIC_DIRECTCNTL_SDAC & IIC_DIRECTCNTL_SCLC); i2c->directcntl |= (value & IIC_DIRECTCNTL_SCLC ? 1 : 0); - bitbang_i2c_set(i2c->bitbang, BITBANG_I2C_SCL, + bitbang_i2c_set(&i2c->bitbang, BITBANG_I2C_SCL, i2c->directcntl & IIC_DIRECTCNTL_MSCL); - i2c->directcntl |= bitbang_i2c_set(i2c->bitbang, BITBANG_I2C_SDA, + i2c->directcntl |= bitbang_i2c_set(&i2c->bitbang, BITBANG_I2C_SDA, (value & IIC_DIRECTCNTL_SDAC) != 0) << 1; break; default: @@ -348,7 +347,7 @@ static void ppc4xx_i2c_init(Object *o) sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->iomem); sysbus_init_irq(SYS_BUS_DEVICE(s), &s->irq); s->bus = i2c_init_bus(DEVICE(s), "i2c"); - s->bitbang = bitbang_i2c_init(s->bus); + bitbang_i2c_init(&s->bitbang, s->bus); } static void ppc4xx_i2c_class_init(ObjectClass *klass, void *data) diff --git a/hw/i2c/versatile_i2c.c b/hw/i2c/versatile_i2c.c index e07be9890c..1ac2a6f59a 100644 --- a/hw/i2c/versatile_i2c.c +++ b/hw/i2c/versatile_i2c.c @@ -23,7 +23,7 @@ #include "qemu/osdep.h" #include "hw/sysbus.h" -#include "bitbang_i2c.h" +#include "hw/i2c/bitbang_i2c.h" #include "qemu/log.h" #include "qemu/module.h" @@ -35,7 +35,7 @@ typedef struct VersatileI2CState { SysBusDevice parent_obj; MemoryRegion iomem; - bitbang_i2c_interface *bitbang; + bitbang_i2c_interface bitbang; int out; int in; } VersatileI2CState; @@ -70,8 +70,8 @@ static void versatile_i2c_write(void *opaque, hwaddr offset, qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%x\n", __func__, (int)offset); } - bitbang_i2c_set(s->bitbang, BITBANG_I2C_SCL, (s->out & 1) != 0); - s->in = bitbang_i2c_set(s->bitbang, BITBANG_I2C_SDA, (s->out & 2) != 0); + bitbang_i2c_set(&s->bitbang, BITBANG_I2C_SCL, (s->out & 1) != 0); + s->in = bitbang_i2c_set(&s->bitbang, BITBANG_I2C_SDA, (s->out & 2) != 0); } static const MemoryRegionOps versatile_i2c_ops = { @@ -88,7 +88,7 @@ static void versatile_i2c_init(Object *obj) I2CBus *bus; bus = i2c_init_bus(dev, "i2c"); - s->bitbang = bitbang_i2c_init(bus); + bitbang_i2c_init(&s->bitbang, bus); memory_region_init_io(&s->iomem, obj, &versatile_i2c_ops, s, "versatile_i2c", 0x1000); sysbus_init_mmio(sbd, &s->iomem); diff --git a/hw/i386/Kconfig b/hw/i386/Kconfig index 9817888216..4ddf2a9c55 100644 --- a/hw/i386/Kconfig +++ b/hw/i386/Kconfig @@ -30,6 +30,7 @@ config PC # For ACPI builder: select SERIAL_ISA select ACPI_VMGENID + select VIRTIO_PMEM_SUPPORTED config PC_PCI bool diff --git a/hw/i386/pc.c b/hw/i386/pc.c index e96360b47a..b380bd7d74 100644 --- a/hw/i386/pc.c +++ b/hw/i386/pc.c @@ -79,6 +79,8 @@ #include "hw/i386/intel_iommu.h" #include "hw/net/ne2000-isa.h" #include "standard-headers/asm-x86/bootparam.h" +#include "hw/virtio/virtio-pmem-pci.h" +#include "hw/mem/memory-device.h" /* debug PC/ISA interrupts */ //#define DEBUG_IRQ @@ -913,14 +915,6 @@ bool e820_get_entry(int idx, uint32_t type, uint64_t *address, uint64_t *length) return false; } -/* Enables contiguous-apic-ID mode, for compatibility */ -static bool compat_apic_id_mode; - -void enable_compat_apic_id_mode(void) -{ - compat_apic_id_mode = true; -} - /* Calculates initial APIC ID for a specific CPU index * * Currently we need to be able to calculate the APIC ID from the CPU index @@ -928,13 +922,15 @@ void enable_compat_apic_id_mode(void) * no concept of "CPU index", and the NUMA tables on fw_cfg need the APIC ID of * all CPUs up to max_cpus. */ -static uint32_t x86_cpu_apic_id_from_index(unsigned int cpu_index) +static uint32_t x86_cpu_apic_id_from_index(PCMachineState *pcms, + unsigned int cpu_index) { + PCMachineClass *pcmc = PC_MACHINE_GET_CLASS(pcms); uint32_t correct_id; static bool warned; correct_id = x86_apicid_from_cpu_idx(smp_cores, smp_threads, cpu_index); - if (compat_apic_id_mode) { + if (pcmc->compat_apic_id_mode) { if (cpu_index != correct_id && !warned && !qtest_enabled()) { error_report("APIC IDs set in compatibility mode, " "CPU topology won't match the configuration"); @@ -1533,7 +1529,8 @@ static void pc_new_cpu(const char *typename, int64_t apic_id, Error **errp) void pc_hot_add_cpu(const int64_t id, Error **errp) { MachineState *ms = MACHINE(qdev_get_machine()); - int64_t apic_id = x86_cpu_apic_id_from_index(id); + PCMachineState *pcms = PC_MACHINE(ms); + int64_t apic_id = x86_cpu_apic_id_from_index(pcms, id); Error *local_err = NULL; if (id < 0) { @@ -1569,7 +1566,7 @@ void pc_cpus_init(PCMachineState *pcms) * * This is used for FW_CFG_MAX_CPUS. See comments on bochs_bios_init(). */ - pcms->apic_id_limit = x86_cpu_apic_id_from_index(max_cpus - 1) + 1; + pcms->apic_id_limit = x86_cpu_apic_id_from_index(pcms, max_cpus - 1) + 1; possible_cpus = mc->possible_cpu_arch_ids(ms); for (i = 0; i < smp_cpus; i++) { pc_new_cpu(possible_cpus->cpus[i].type, possible_cpus->cpus[i].arch_id, @@ -2395,6 +2392,65 @@ static void pc_cpu_pre_plug(HotplugHandler *hotplug_dev, numa_cpu_pre_plug(cpu_slot, dev, errp); } +static void pc_virtio_pmem_pci_pre_plug(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + HotplugHandler *hotplug_dev2 = qdev_get_bus_hotplug_handler(dev); + Error *local_err = NULL; + + if (!hotplug_dev2) { + /* + * Without a bus hotplug handler, we cannot control the plug/unplug + * order. This should never be the case on x86, however better add + * a safety net. + */ + error_setg(errp, "virtio-pmem-pci not supported on this bus."); + return; + } + /* + * First, see if we can plug this memory device at all. If that + * succeeds, branch of to the actual hotplug handler. + */ + memory_device_pre_plug(MEMORY_DEVICE(dev), MACHINE(hotplug_dev), NULL, + &local_err); + if (!local_err) { + hotplug_handler_pre_plug(hotplug_dev2, dev, &local_err); + } + error_propagate(errp, local_err); +} + +static void pc_virtio_pmem_pci_plug(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + HotplugHandler *hotplug_dev2 = qdev_get_bus_hotplug_handler(dev); + Error *local_err = NULL; + + /* + * Plug the memory device first and then branch off to the actual + * hotplug handler. If that one fails, we can easily undo the memory + * device bits. + */ + memory_device_plug(MEMORY_DEVICE(dev), MACHINE(hotplug_dev)); + hotplug_handler_plug(hotplug_dev2, dev, &local_err); + if (local_err) { + memory_device_unplug(MEMORY_DEVICE(dev), MACHINE(hotplug_dev)); + } + error_propagate(errp, local_err); +} + +static void pc_virtio_pmem_pci_unplug_request(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + /* We don't support virtio pmem hot unplug */ + error_setg(errp, "virtio pmem device unplug not supported."); +} + +static void pc_virtio_pmem_pci_unplug(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + /* We don't support virtio pmem hot unplug */ +} + static void pc_machine_device_pre_plug_cb(HotplugHandler *hotplug_dev, DeviceState *dev, Error **errp) { @@ -2402,6 +2458,8 @@ static void pc_machine_device_pre_plug_cb(HotplugHandler *hotplug_dev, pc_memory_pre_plug(hotplug_dev, dev, errp); } else if (object_dynamic_cast(OBJECT(dev), TYPE_CPU)) { pc_cpu_pre_plug(hotplug_dev, dev, errp); + } else if (object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_PMEM_PCI)) { + pc_virtio_pmem_pci_pre_plug(hotplug_dev, dev, errp); } } @@ -2412,6 +2470,8 @@ static void pc_machine_device_plug_cb(HotplugHandler *hotplug_dev, pc_memory_plug(hotplug_dev, dev, errp); } else if (object_dynamic_cast(OBJECT(dev), TYPE_CPU)) { pc_cpu_plug(hotplug_dev, dev, errp); + } else if (object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_PMEM_PCI)) { + pc_virtio_pmem_pci_plug(hotplug_dev, dev, errp); } } @@ -2422,6 +2482,8 @@ static void pc_machine_device_unplug_request_cb(HotplugHandler *hotplug_dev, pc_memory_unplug_request(hotplug_dev, dev, errp); } else if (object_dynamic_cast(OBJECT(dev), TYPE_CPU)) { pc_cpu_unplug_request_cb(hotplug_dev, dev, errp); + } else if (object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_PMEM_PCI)) { + pc_virtio_pmem_pci_unplug_request(hotplug_dev, dev, errp); } else { error_setg(errp, "acpi: device unplug request for not supported device" " type: %s", object_get_typename(OBJECT(dev))); @@ -2435,6 +2497,8 @@ static void pc_machine_device_unplug_cb(HotplugHandler *hotplug_dev, pc_memory_unplug(hotplug_dev, dev, errp); } else if (object_dynamic_cast(OBJECT(dev), TYPE_CPU)) { pc_cpu_unplug_cb(hotplug_dev, dev, errp); + } else if (object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_PMEM_PCI)) { + pc_virtio_pmem_pci_unplug(hotplug_dev, dev, errp); } else { error_setg(errp, "acpi: device unplug for not supported device" " type: %s", object_get_typename(OBJECT(dev))); @@ -2445,7 +2509,8 @@ static HotplugHandler *pc_get_hotplug_handler(MachineState *machine, DeviceState *dev) { if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM) || - object_dynamic_cast(OBJECT(dev), TYPE_CPU)) { + object_dynamic_cast(OBJECT(dev), TYPE_CPU) || + object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_PMEM_PCI)) { return HOTPLUG_HANDLER(machine); } @@ -2660,6 +2725,7 @@ static int64_t pc_get_default_cpu_node_id(const MachineState *ms, int idx) static const CPUArchIdList *pc_possible_cpu_arch_ids(MachineState *ms) { + PCMachineState *pcms = PC_MACHINE(ms); int i; if (ms->possible_cpus) { @@ -2679,7 +2745,7 @@ static const CPUArchIdList *pc_possible_cpu_arch_ids(MachineState *ms) ms->possible_cpus->cpus[i].type = ms->cpu_type; ms->possible_cpus->cpus[i].vcpus_count = 1; - ms->possible_cpus->cpus[i].arch_id = x86_cpu_apic_id_from_index(i); + ms->possible_cpus->cpus[i].arch_id = x86_cpu_apic_id_from_index(pcms, i); x86_topo_ids_from_apicid(ms->possible_cpus->cpus[i].arch_id, smp_cores, smp_threads, &topo); ms->possible_cpus->cpus[i].props.has_socket_id = true; diff --git a/hw/i386/pc_piix.c b/hw/i386/pc_piix.c index c07c4a5b38..f29de58636 100644 --- a/hw/i386/pc_piix.c +++ b/hw/i386/pc_piix.c @@ -358,7 +358,6 @@ static void pc_compat_1_4_fn(MachineState *machine) static void pc_compat_1_3(MachineState *machine) { pc_compat_1_4_fn(machine); - enable_compat_apic_id_mode(); } /* PC compat function for pc-0.14 to pc-1.2 */ @@ -708,6 +707,7 @@ DEFINE_I440FX_MACHINE(v1_4, "pc-i440fx-1.4", pc_compat_1_4_fn, static void pc_i440fx_1_3_machine_options(MachineClass *m) { + PCMachineClass *pcmc = PC_MACHINE_CLASS(m); static GlobalProperty compat[] = { PC_CPU_MODEL_IDS("1.3.0") { "usb-tablet", "usb_version", "1" }, @@ -718,6 +718,7 @@ static void pc_i440fx_1_3_machine_options(MachineClass *m) pc_i440fx_1_4_machine_options(m); m->hw_version = "1.3.0"; + pcmc->compat_apic_id_mode = true; compat_props_add(m->compat_props, compat, G_N_ELEMENTS(compat)); } diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c index b8ede30b3c..9f8f0d3ff5 100644 --- a/hw/intc/armv7m_nvic.c +++ b/hw/intc/armv7m_nvic.c @@ -812,15 +812,45 @@ void armv7m_nvic_get_pending_irq_info(void *opaque, int armv7m_nvic_complete_irq(void *opaque, int irq, bool secure) { NVICState *s = (NVICState *)opaque; - VecInfo *vec; + VecInfo *vec = NULL; int ret; assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq); - if (secure && exc_is_banked(irq)) { - vec = &s->sec_vectors[irq]; - } else { - vec = &s->vectors[irq]; + /* + * For negative priorities, v8M will forcibly deactivate the appropriate + * NMI or HardFault regardless of what interrupt we're being asked to + * deactivate (compare the DeActivate() pseudocode). This is a guard + * against software returning from NMI or HardFault with a corrupted + * IPSR and leaving the CPU in a negative-priority state. + * v7M does not do this, but simply deactivates the requested interrupt. + */ + if (arm_feature(&s->cpu->env, ARM_FEATURE_V8)) { + switch (armv7m_nvic_raw_execution_priority(s)) { + case -1: + if (s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) { + vec = &s->vectors[ARMV7M_EXCP_HARD]; + } else { + vec = &s->sec_vectors[ARMV7M_EXCP_HARD]; + } + break; + case -2: + vec = &s->vectors[ARMV7M_EXCP_NMI]; + break; + case -3: + vec = &s->sec_vectors[ARMV7M_EXCP_HARD]; + break; + default: + break; + } + } + + if (!vec) { + if (secure && exc_is_banked(irq)) { + vec = &s->sec_vectors[irq]; + } else { + vec = &s->vectors[irq]; + } } trace_nvic_complete_irq(irq, secure); @@ -830,7 +860,19 @@ int armv7m_nvic_complete_irq(void *opaque, int irq, bool secure) return -1; } - ret = nvic_rettobase(s); + /* + * If this is a configurable exception and it is currently + * targeting the opposite security state from the one we're trying + * to complete it for, this counts as an illegal exception return. + * We still need to deactivate whatever vector the logic above has + * selected, though, as it might not be the same as the one for the + * requested exception number. + */ + if (!exc_is_banked(irq) && exc_targets_secure(s, irq) != secure) { + ret = -1; + } else { + ret = nvic_rettobase(s); + } vec->active = 0; if (vec->level) { diff --git a/hw/intc/aspeed_vic.c b/hw/intc/aspeed_vic.c index 927638d532..266a309f3b 100644 --- a/hw/intc/aspeed_vic.c +++ b/hw/intc/aspeed_vic.c @@ -104,54 +104,63 @@ static void aspeed_vic_set_irq(void *opaque, int irq, int level) static uint64_t aspeed_vic_read(void *opaque, hwaddr offset, unsigned size) { - uint64_t val; - const bool high = !!(offset & 0x4); - hwaddr n_offset = (offset & ~0x4); AspeedVICState *s = (AspeedVICState *)opaque; + hwaddr n_offset; + uint64_t val; + bool high; if (offset < AVIC_NEW_BASE_OFFSET) { - qemu_log_mask(LOG_UNIMP, "%s: Ignoring read from legacy registers " - "at 0x%" HWADDR_PRIx "[%u]\n", __func__, offset, size); - return 0; + high = false; + n_offset = offset; + } else { + high = !!(offset & 0x4); + n_offset = (offset & ~0x4); } - n_offset -= AVIC_NEW_BASE_OFFSET; - switch (n_offset) { - case 0x0: /* IRQ Status */ + case 0x80: /* IRQ Status */ + case 0x00: val = s->raw & ~s->select & s->enable; break; - case 0x08: /* FIQ Status */ + case 0x88: /* FIQ Status */ + case 0x04: val = s->raw & s->select & s->enable; break; - case 0x10: /* Raw Interrupt Status */ + case 0x90: /* Raw Interrupt Status */ + case 0x08: val = s->raw; break; - case 0x18: /* Interrupt Selection */ + case 0x98: /* Interrupt Selection */ + case 0x0c: val = s->select; break; - case 0x20: /* Interrupt Enable */ + case 0xa0: /* Interrupt Enable */ + case 0x10: val = s->enable; break; - case 0x30: /* Software Interrupt */ + case 0xb0: /* Software Interrupt */ + case 0x18: val = s->trigger; break; - case 0x40: /* Interrupt Sensitivity */ + case 0xc0: /* Interrupt Sensitivity */ + case 0x24: val = s->sense; break; - case 0x48: /* Interrupt Both Edge Trigger Control */ + case 0xc8: /* Interrupt Both Edge Trigger Control */ + case 0x28: val = s->dual_edge; break; - case 0x50: /* Interrupt Event */ + case 0xd0: /* Interrupt Event */ + case 0x2c: val = s->event; break; - case 0x60: /* Edge Triggered Interrupt Status */ + case 0xe0: /* Edge Triggered Interrupt Status */ val = s->raw & ~s->sense; break; /* Illegal */ - case 0x28: /* Interrupt Enable Clear */ - case 0x38: /* Software Interrupt Clear */ - case 0x58: /* Edge Triggered Interrupt Clear */ + case 0xa8: /* Interrupt Enable Clear */ + case 0xb8: /* Software Interrupt Clear */ + case 0xd8: /* Edge Triggered Interrupt Clear */ qemu_log_mask(LOG_GUEST_ERROR, "%s: Read of write-only register with offset 0x%" HWADDR_PRIx "\n", __func__, offset); @@ -166,6 +175,8 @@ static uint64_t aspeed_vic_read(void *opaque, hwaddr offset, unsigned size) } if (high) { val = extract64(val, 32, 19); + } else { + val = extract64(val, 0, 32); } trace_aspeed_vic_read(offset, size, val); return val; @@ -174,19 +185,18 @@ static uint64_t aspeed_vic_read(void *opaque, hwaddr offset, unsigned size) static void aspeed_vic_write(void *opaque, hwaddr offset, uint64_t data, unsigned size) { - const bool high = !!(offset & 0x4); - hwaddr n_offset = (offset & ~0x4); AspeedVICState *s = (AspeedVICState *)opaque; + hwaddr n_offset; + bool high; if (offset < AVIC_NEW_BASE_OFFSET) { - qemu_log_mask(LOG_UNIMP, - "%s: Ignoring write to legacy registers at 0x%" - HWADDR_PRIx "[%u] <- 0x%" PRIx64 "\n", __func__, offset, - size, data); - return; + high = false; + n_offset = offset; + } else { + high = !!(offset & 0x4); + n_offset = (offset & ~0x4); } - n_offset -= AVIC_NEW_BASE_OFFSET; trace_aspeed_vic_write(offset, size, data); /* Given we have members using separate enable/clear registers, deposit64() @@ -201,7 +211,8 @@ static void aspeed_vic_write(void *opaque, hwaddr offset, uint64_t data, } switch (n_offset) { - case 0x18: /* Interrupt Selection */ + case 0x98: /* Interrupt Selection */ + case 0x0c: /* Register has deposit64() semantics - overwrite requested 32 bits */ if (high) { s->select &= AVIC_L_MASK; @@ -210,21 +221,25 @@ static void aspeed_vic_write(void *opaque, hwaddr offset, uint64_t data, } s->select |= data; break; - case 0x20: /* Interrupt Enable */ + case 0xa0: /* Interrupt Enable */ + case 0x10: s->enable |= data; break; - case 0x28: /* Interrupt Enable Clear */ + case 0xa8: /* Interrupt Enable Clear */ + case 0x14: s->enable &= ~data; break; - case 0x30: /* Software Interrupt */ + case 0xb0: /* Software Interrupt */ + case 0x18: qemu_log_mask(LOG_UNIMP, "%s: Software interrupts unavailable. " "IRQs requested: 0x%016" PRIx64 "\n", __func__, data); break; - case 0x38: /* Software Interrupt Clear */ + case 0xb8: /* Software Interrupt Clear */ + case 0x1c: qemu_log_mask(LOG_UNIMP, "%s: Software interrupts unavailable. " "IRQs to be cleared: 0x%016" PRIx64 "\n", __func__, data); break; - case 0x50: /* Interrupt Event */ + case 0xd0: /* Interrupt Event */ /* Register has deposit64() semantics - overwrite the top four valid * IRQ bits, as only the top four IRQs (GPIOs) can change their event * type */ @@ -236,15 +251,21 @@ static void aspeed_vic_write(void *opaque, hwaddr offset, uint64_t data, "Ignoring invalid write to interrupt event register"); } break; - case 0x58: /* Edge Triggered Interrupt Clear */ + case 0xd8: /* Edge Triggered Interrupt Clear */ + case 0x38: s->raw &= ~(data & ~s->sense); break; - case 0x00: /* IRQ Status */ - case 0x08: /* FIQ Status */ - case 0x10: /* Raw Interrupt Status */ - case 0x40: /* Interrupt Sensitivity */ - case 0x48: /* Interrupt Both Edge Trigger Control */ - case 0x60: /* Edge Triggered Interrupt Status */ + case 0x80: /* IRQ Status */ + case 0x00: + case 0x88: /* FIQ Status */ + case 0x04: + case 0x90: /* Raw Interrupt Status */ + case 0x08: + case 0xc0: /* Interrupt Sensitivity */ + case 0x24: + case 0xc8: /* Interrupt Both Edge Trigger Control */ + case 0x28: + case 0xe0: /* Edge Triggered Interrupt Status */ qemu_log_mask(LOG_GUEST_ERROR, "%s: Write of read-only register with offset 0x%" HWADDR_PRIx "\n", __func__, offset); diff --git a/hw/intc/pnv_xive.c b/hw/intc/pnv_xive.c index a55c2bbc88..4dc92ef1e3 100644 --- a/hw/intc/pnv_xive.c +++ b/hw/intc/pnv_xive.c @@ -169,7 +169,7 @@ static uint64_t pnv_xive_vst_addr_indirect(PnvXive *xive, uint32_t type, vsd = ldq_be_dma(&address_space_memory, vsd_addr); if (!(vsd & VSD_ADDRESS_MASK)) { - xive_error(xive, "VST: invalid %s entry %x !?", info->name, 0); + xive_error(xive, "VST: invalid %s entry %x !?", info->name, idx); return 0; } @@ -190,7 +190,7 @@ static uint64_t pnv_xive_vst_addr_indirect(PnvXive *xive, uint32_t type, vsd = ldq_be_dma(&address_space_memory, vsd_addr); if (!(vsd & VSD_ADDRESS_MASK)) { - xive_error(xive, "VST: invalid %s entry %x !?", info->name, 0); + xive_error(xive, "VST: invalid %s entry %x !?", info->name, idx); return 0; } @@ -294,8 +294,12 @@ static int pnv_xive_write_end(XiveRouter *xrtr, uint8_t blk, uint32_t idx, word_number); } -static int pnv_xive_end_update(PnvXive *xive, uint8_t blk, uint32_t idx) +static int pnv_xive_end_update(PnvXive *xive) { + uint8_t blk = GETFIELD(VC_EQC_CWATCH_BLOCKID, + xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]); + uint32_t idx = GETFIELD(VC_EQC_CWATCH_OFFSET, + xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]); int i; uint64_t eqc_watch[4]; @@ -307,6 +311,24 @@ static int pnv_xive_end_update(PnvXive *xive, uint8_t blk, uint32_t idx) XIVE_VST_WORD_ALL); } +static void pnv_xive_end_cache_load(PnvXive *xive) +{ + uint8_t blk = GETFIELD(VC_EQC_CWATCH_BLOCKID, + xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]); + uint32_t idx = GETFIELD(VC_EQC_CWATCH_OFFSET, + xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]); + uint64_t eqc_watch[4] = { 0 }; + int i; + + if (pnv_xive_vst_read(xive, VST_TSEL_EQDT, blk, idx, eqc_watch)) { + xive_error(xive, "VST: no END entry %x/%x !?", blk, idx); + } + + for (i = 0; i < ARRAY_SIZE(eqc_watch); i++) { + xive->regs[(VC_EQC_CWATCH_DAT0 >> 3) + i] = be64_to_cpu(eqc_watch[i]); + } +} + static int pnv_xive_get_nvt(XiveRouter *xrtr, uint8_t blk, uint32_t idx, XiveNVT *nvt) { @@ -320,8 +342,12 @@ static int pnv_xive_write_nvt(XiveRouter *xrtr, uint8_t blk, uint32_t idx, word_number); } -static int pnv_xive_nvt_update(PnvXive *xive, uint8_t blk, uint32_t idx) +static int pnv_xive_nvt_update(PnvXive *xive) { + uint8_t blk = GETFIELD(PC_VPC_CWATCH_BLOCKID, + xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]); + uint32_t idx = GETFIELD(PC_VPC_CWATCH_OFFSET, + xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]); int i; uint64_t vpc_watch[8]; @@ -333,6 +359,24 @@ static int pnv_xive_nvt_update(PnvXive *xive, uint8_t blk, uint32_t idx) XIVE_VST_WORD_ALL); } +static void pnv_xive_nvt_cache_load(PnvXive *xive) +{ + uint8_t blk = GETFIELD(PC_VPC_CWATCH_BLOCKID, + xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]); + uint32_t idx = GETFIELD(PC_VPC_CWATCH_OFFSET, + xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]); + uint64_t vpc_watch[8] = { 0 }; + int i; + + if (pnv_xive_vst_read(xive, VST_TSEL_VPDT, blk, idx, vpc_watch)) { + xive_error(xive, "VST: no NVT entry %x/%x !?", blk, idx); + } + + for (i = 0; i < ARRAY_SIZE(vpc_watch); i++) { + xive->regs[(PC_VPC_CWATCH_DAT0 >> 3) + i] = be64_to_cpu(vpc_watch[i]); + } +} + static int pnv_xive_get_eas(XiveRouter *xrtr, uint8_t blk, uint32_t idx, XiveEAS *eas) { @@ -346,12 +390,6 @@ static int pnv_xive_get_eas(XiveRouter *xrtr, uint8_t blk, uint32_t idx, return pnv_xive_vst_read(xive, VST_TSEL_IVT, blk, idx, eas); } -static int pnv_xive_eas_update(PnvXive *xive, uint8_t blk, uint32_t idx) -{ - /* All done. */ - return 0; -} - static XiveTCTX *pnv_xive_get_tctx(XiveRouter *xrtr, CPUState *cs) { PowerPCCPU *cpu = POWERPC_CPU(cs); @@ -781,8 +819,7 @@ static void pnv_xive_ic_reg_write(void *opaque, hwaddr offset, * support recently though) */ if (val & (VC_SBC_CONF_CPLX_CIST | VC_SBC_CONF_CIST_BOTH)) { - object_property_set_int(OBJECT(&xive->ipi_source), - XIVE_SRC_STORE_EOI, "flags", &error_fatal); + xive->ipi_source.esb_flags |= XIVE_SRC_STORE_EOI; } break; @@ -951,28 +988,43 @@ static void pnv_xive_ic_reg_write(void *opaque, hwaddr offset, * XIVE PC & VC cache updates for EAS, NVT and END */ case VC_IVC_SCRUB_MASK: - break; case VC_IVC_SCRUB_TRIG: - pnv_xive_eas_update(xive, GETFIELD(PC_SCRUB_BLOCK_ID, val), - GETFIELD(VC_SCRUB_OFFSET, val)); break; - case VC_EQC_SCRUB_MASK: case VC_EQC_CWATCH_SPEC: - case VC_EQC_CWATCH_DAT0 ... VC_EQC_CWATCH_DAT3: + val &= ~VC_EQC_CWATCH_CONFLICT; /* HW resets this bit */ + break; + case VC_EQC_CWATCH_DAT1 ... VC_EQC_CWATCH_DAT3: break; + case VC_EQC_CWATCH_DAT0: + /* writing to DATA0 triggers the cache write */ + xive->regs[reg] = val; + pnv_xive_end_update(xive); + break; + case VC_EQC_SCRUB_MASK: case VC_EQC_SCRUB_TRIG: - pnv_xive_end_update(xive, GETFIELD(VC_SCRUB_BLOCK_ID, val), - GETFIELD(VC_SCRUB_OFFSET, val)); + /* + * The scrubbing registers flush the cache in RAM and can also + * invalidate. + */ break; - case PC_VPC_SCRUB_MASK: case PC_VPC_CWATCH_SPEC: - case PC_VPC_CWATCH_DAT0 ... PC_VPC_CWATCH_DAT7: + val &= ~PC_VPC_CWATCH_CONFLICT; /* HW resets this bit */ + break; + case PC_VPC_CWATCH_DAT1 ... PC_VPC_CWATCH_DAT7: break; + case PC_VPC_CWATCH_DAT0: + /* writing to DATA0 triggers the cache write */ + xive->regs[reg] = val; + pnv_xive_nvt_update(xive); + break; + case PC_VPC_SCRUB_MASK: case PC_VPC_SCRUB_TRIG: - pnv_xive_nvt_update(xive, GETFIELD(PC_SCRUB_BLOCK_ID, val), - GETFIELD(PC_SCRUB_OFFSET, val)); + /* + * The scrubbing registers flush the cache in RAM and can also + * invalidate. + */ break; @@ -1023,15 +1075,6 @@ static uint64_t pnv_xive_ic_reg_read(void *opaque, hwaddr offset, unsigned size) case PC_GLOBAL_CONFIG: case PC_VPC_SCRUB_MASK: - case PC_VPC_CWATCH_SPEC: - case PC_VPC_CWATCH_DAT0: - case PC_VPC_CWATCH_DAT1: - case PC_VPC_CWATCH_DAT2: - case PC_VPC_CWATCH_DAT3: - case PC_VPC_CWATCH_DAT4: - case PC_VPC_CWATCH_DAT5: - case PC_VPC_CWATCH_DAT6: - case PC_VPC_CWATCH_DAT7: case VC_GLOBAL_CONFIG: case VC_AIB_TX_ORDER_TAG2: @@ -1044,12 +1087,6 @@ static uint64_t pnv_xive_ic_reg_read(void *opaque, hwaddr offset, unsigned size) case VC_IRQ_CONFIG_IPI_CASC: case VC_EQC_SCRUB_MASK: - case VC_EQC_CWATCH_DAT0: - case VC_EQC_CWATCH_DAT1: - case VC_EQC_CWATCH_DAT2: - case VC_EQC_CWATCH_DAT3: - - case VC_EQC_CWATCH_SPEC: case VC_IVC_SCRUB_MASK: case VC_SBC_CONFIG: case VC_AT_MACRO_KILL_MASK: @@ -1081,6 +1118,38 @@ static uint64_t pnv_xive_ic_reg_read(void *opaque, hwaddr offset, unsigned size) /* * XIVE PC & VC cache updates for EAS, NVT and END */ + case VC_EQC_CWATCH_SPEC: + xive->regs[reg] = ~(VC_EQC_CWATCH_FULL | VC_EQC_CWATCH_CONFLICT); + val = xive->regs[reg]; + break; + case VC_EQC_CWATCH_DAT0: + /* + * Load DATA registers from cache with data requested by the + * SPEC register + */ + pnv_xive_end_cache_load(xive); + val = xive->regs[reg]; + break; + case VC_EQC_CWATCH_DAT1 ... VC_EQC_CWATCH_DAT3: + val = xive->regs[reg]; + break; + + case PC_VPC_CWATCH_SPEC: + xive->regs[reg] = ~(PC_VPC_CWATCH_FULL | PC_VPC_CWATCH_CONFLICT); + val = xive->regs[reg]; + break; + case PC_VPC_CWATCH_DAT0: + /* + * Load DATA registers from cache with data requested by the + * SPEC register + */ + pnv_xive_nvt_cache_load(xive); + val = xive->regs[reg]; + break; + case PC_VPC_CWATCH_DAT1 ... PC_VPC_CWATCH_DAT7: + val = xive->regs[reg]; + break; + case PC_VPC_SCRUB_TRIG: case VC_IVC_SCRUB_TRIG: case VC_EQC_SCRUB_TRIG: diff --git a/hw/intc/spapr_xive.c b/hw/intc/spapr_xive.c index 58c2e5d890..3ae311d9ff 100644 --- a/hw/intc/spapr_xive.c +++ b/hw/intc/spapr_xive.c @@ -194,13 +194,6 @@ void spapr_xive_pic_print_info(SpaprXive *xive, Monitor *mon) } } -void spapr_xive_map_mmio(SpaprXive *xive) -{ - sysbus_mmio_map(SYS_BUS_DEVICE(xive), 0, xive->vc_base); - sysbus_mmio_map(SYS_BUS_DEVICE(xive), 1, xive->end_base); - sysbus_mmio_map(SYS_BUS_DEVICE(xive), 2, xive->tm_base); -} - void spapr_xive_mmio_set_enabled(SpaprXive *xive, bool enable) { memory_region_set_enabled(&xive->source.esb_mmio, enable); @@ -305,6 +298,7 @@ static void spapr_xive_realize(DeviceState *dev, Error **errp) error_propagate(errp, local_err); return; } + sysbus_init_mmio(SYS_BUS_DEVICE(xive), &xsrc->esb_mmio); /* * Initialize the END ESB source @@ -318,6 +312,7 @@ static void spapr_xive_realize(DeviceState *dev, Error **errp) error_propagate(errp, local_err); return; } + sysbus_init_mmio(SYS_BUS_DEVICE(xive), &end_xsrc->esb_mmio); /* Set the mapping address of the END ESB pages after the source ESBs */ xive->end_base = xive->vc_base + (1ull << xsrc->esb_shift) * xsrc->nr_irqs; @@ -333,31 +328,18 @@ static void spapr_xive_realize(DeviceState *dev, Error **errp) qemu_register_reset(spapr_xive_reset, dev); - /* Define all XIVE MMIO regions on SysBus */ - sysbus_init_mmio(SYS_BUS_DEVICE(xive), &xsrc->esb_mmio); - sysbus_init_mmio(SYS_BUS_DEVICE(xive), &end_xsrc->esb_mmio); - sysbus_init_mmio(SYS_BUS_DEVICE(xive), &xive->tm_mmio); -} - -void spapr_xive_init(SpaprXive *xive, Error **errp) -{ - XiveSource *xsrc = &xive->source; - - /* - * The emulated XIVE device can only be initialized once. If the - * ESB memory region has been already mapped, it means we have been - * through there. - */ - if (memory_region_is_mapped(&xsrc->esb_mmio)) { - return; - } - /* TIMA initialization */ memory_region_init_io(&xive->tm_mmio, OBJECT(xive), &xive_tm_ops, xive, "xive.tima", 4ull << TM_SHIFT); + sysbus_init_mmio(SYS_BUS_DEVICE(xive), &xive->tm_mmio); - /* Map all regions */ - spapr_xive_map_mmio(xive); + /* + * Map all regions. These will be enabled or disabled at reset and + * can also be overridden by KVM memory regions if active + */ + sysbus_mmio_map(SYS_BUS_DEVICE(xive), 0, xive->vc_base); + sysbus_mmio_map(SYS_BUS_DEVICE(xive), 1, xive->end_base); + sysbus_mmio_map(SYS_BUS_DEVICE(xive), 2, xive->tm_base); } static int spapr_xive_get_eas(XiveRouter *xrtr, uint8_t eas_blk, diff --git a/hw/intc/spapr_xive_kvm.c b/hw/intc/spapr_xive_kvm.c index b48f135838..3bf8e7a20e 100644 --- a/hw/intc/spapr_xive_kvm.c +++ b/hw/intc/spapr_xive_kvm.c @@ -724,12 +724,13 @@ void kvmppc_xive_connect(SpaprXive *xive, Error **errp) xsrc->esb_mmap = kvmppc_xive_mmap(xive, KVM_XIVE_ESB_PAGE_OFFSET, esb_len, &local_err); if (local_err) { - error_propagate(errp, local_err); - return; + goto fail; } - memory_region_init_ram_device_ptr(&xsrc->esb_mmio, OBJECT(xsrc), + memory_region_init_ram_device_ptr(&xsrc->esb_mmio_kvm, OBJECT(xsrc), "xive.esb", esb_len, xsrc->esb_mmap); + memory_region_add_subregion_overlap(&xsrc->esb_mmio, 0, + &xsrc->esb_mmio_kvm, 1); /* * 2. END ESB pages (No KVM support yet) @@ -741,11 +742,12 @@ void kvmppc_xive_connect(SpaprXive *xive, Error **errp) xive->tm_mmap = kvmppc_xive_mmap(xive, KVM_XIVE_TIMA_PAGE_OFFSET, tima_len, &local_err); if (local_err) { - error_propagate(errp, local_err); - return; + goto fail; } - memory_region_init_ram_device_ptr(&xive->tm_mmio, OBJECT(xive), + memory_region_init_ram_device_ptr(&xive->tm_mmio_kvm, OBJECT(xive), "xive.tima", tima_len, xive->tm_mmap); + memory_region_add_subregion_overlap(&xive->tm_mmio, 0, + &xive->tm_mmio_kvm, 1); xive->change = qemu_add_vm_change_state_handler( kvmppc_xive_change_state_handler, xive); @@ -756,24 +758,24 @@ void kvmppc_xive_connect(SpaprXive *xive, Error **errp) kvmppc_xive_cpu_connect(spapr_cpu_state(cpu)->tctx, &local_err); if (local_err) { - error_propagate(errp, local_err); - return; + goto fail; } } /* Update the KVM sources */ kvmppc_xive_source_reset(xsrc, &local_err); if (local_err) { - error_propagate(errp, local_err); - return; + goto fail; } kvm_kernel_irqchip = true; kvm_msi_via_irqfd_allowed = true; kvm_gsi_direct_mapping = true; + return; - /* Map all regions */ - spapr_xive_map_mmio(xive); +fail: + error_propagate(errp, local_err); + kvmppc_xive_disconnect(xive, NULL); } void kvmppc_xive_disconnect(SpaprXive *xive, Error **errp) @@ -795,21 +797,29 @@ void kvmppc_xive_disconnect(SpaprXive *xive, Error **errp) xsrc = &xive->source; esb_len = (1ull << xsrc->esb_shift) * xsrc->nr_irqs; - sysbus_mmio_unmap(SYS_BUS_DEVICE(xive), 0); - munmap(xsrc->esb_mmap, esb_len); - - sysbus_mmio_unmap(SYS_BUS_DEVICE(xive), 1); + if (xsrc->esb_mmap) { + memory_region_del_subregion(&xsrc->esb_mmio, &xsrc->esb_mmio_kvm); + object_unparent(OBJECT(&xsrc->esb_mmio_kvm)); + munmap(xsrc->esb_mmap, esb_len); + xsrc->esb_mmap = NULL; + } - sysbus_mmio_unmap(SYS_BUS_DEVICE(xive), 2); - munmap(xive->tm_mmap, 4ull << TM_SHIFT); + if (xive->tm_mmap) { + memory_region_del_subregion(&xive->tm_mmio, &xive->tm_mmio_kvm); + object_unparent(OBJECT(&xive->tm_mmio_kvm)); + munmap(xive->tm_mmap, 4ull << TM_SHIFT); + xive->tm_mmap = NULL; + } /* * When the KVM device fd is closed, the KVM device is destroyed * and removed from the list of devices of the VM. The VCPU * presenters are also detached from the device. */ - close(xive->fd); - xive->fd = -1; + if (xive->fd != -1) { + close(xive->fd); + xive->fd = -1; + } kvm_kernel_irqchip = false; kvm_msi_via_irqfd_allowed = false; @@ -819,5 +829,8 @@ void kvmppc_xive_disconnect(SpaprXive *xive, Error **errp) kvm_cpu_disable_all(); /* VM Change state handler is not needed anymore */ - qemu_del_vm_change_state_handler(xive->change); + if (xive->change) { + qemu_del_vm_change_state_handler(xive->change); + xive->change = NULL; + } } diff --git a/hw/intc/xics.c b/hw/intc/xics.c index 29f7d39781..faa976e2f8 100644 --- a/hw/intc/xics.c +++ b/hw/intc/xics.c @@ -267,7 +267,14 @@ static int icp_post_load(void *opaque, int version_id) ICPState *icp = opaque; if (kvm_irqchip_in_kernel()) { - return icp_set_kvm_state(icp); + Error *local_err = NULL; + int ret; + + ret = icp_set_kvm_state(icp, &local_err); + if (ret < 0) { + error_report_err(local_err); + return ret; + } } return 0; @@ -300,7 +307,12 @@ static void icp_reset_handler(void *dev) qemu_set_irq(icp->output, 0); if (kvm_irqchip_in_kernel()) { - icp_set_kvm_state(ICP(dev)); + Error *local_err = NULL; + + icp_set_kvm_state(ICP(dev), &local_err); + if (local_err) { + error_report_err(local_err); + } } } @@ -351,6 +363,7 @@ static void icp_realize(DeviceState *dev, Error **errp) return; } + /* Connect the presenter to the VCPU (required for CPU hotplug) */ if (kvm_irqchip_in_kernel()) { icp_kvm_realize(dev, &err); if (err) { @@ -563,7 +576,12 @@ static void ics_simple_reset(DeviceState *dev) icsc->parent_reset(dev); if (kvm_irqchip_in_kernel()) { - ics_set_kvm_state(ICS_BASE(dev)); + Error *local_err = NULL; + + ics_set_kvm_state(ICS_BASE(dev), &local_err); + if (local_err) { + error_report_err(local_err); + } } } @@ -679,7 +697,14 @@ static int ics_base_post_load(void *opaque, int version_id) ICSState *ics = opaque; if (kvm_irqchip_in_kernel()) { - return ics_set_kvm_state(ics); + Error *local_err = NULL; + int ret; + + ret = ics_set_kvm_state(ics, &local_err); + if (ret < 0) { + error_report_err(local_err); + return ret; + } } return 0; @@ -765,8 +790,13 @@ void ics_set_irq_type(ICSState *ics, int srcno, bool lsi) lsi ? XICS_FLAGS_IRQ_LSI : XICS_FLAGS_IRQ_MSI; if (kvm_irqchip_in_kernel()) { + Error *local_err = NULL; + ics_reset_irq(ics->irqs + srcno); - ics_set_kvm_state_one(ics, srcno); + ics_set_kvm_state_one(ics, srcno, &local_err); + if (local_err) { + error_report_err(local_err); + } } } diff --git a/hw/intc/xics_kvm.c b/hw/intc/xics_kvm.c index 5ba5b77561..51433b19b0 100644 --- a/hw/intc/xics_kvm.c +++ b/hw/intc/xics_kvm.c @@ -106,7 +106,7 @@ void icp_synchronize_state(ICPState *icp) } } -int icp_set_kvm_state(ICPState *icp) +int icp_set_kvm_state(ICPState *icp, Error **errp) { uint64_t state; int ret; @@ -126,10 +126,11 @@ int icp_set_kvm_state(ICPState *icp) | ((uint64_t)icp->pending_priority << KVM_REG_PPC_ICP_PPRI_SHIFT); ret = kvm_set_one_reg(icp->cs, KVM_REG_PPC_ICP_STATE, &state); - if (ret != 0) { - error_report("Unable to restore KVM interrupt controller state (0x%" - PRIx64 ") for CPU %ld: %s", state, kvm_arch_vcpu_id(icp->cs), - strerror(errno)); + if (ret < 0) { + error_setg_errno(errp, -ret, + "Unable to restore KVM interrupt controller state (0x%" + PRIx64 ") for CPU %ld", state, + kvm_arch_vcpu_id(icp->cs)); return ret; } @@ -240,10 +241,9 @@ void ics_synchronize_state(ICSState *ics) ics_get_kvm_state(ics); } -int ics_set_kvm_state_one(ICSState *ics, int srcno) +int ics_set_kvm_state_one(ICSState *ics, int srcno, Error **errp) { uint64_t state; - Error *local_err = NULL; ICSIRQState *irq = &ics->irqs[srcno]; int ret; @@ -278,16 +278,15 @@ int ics_set_kvm_state_one(ICSState *ics, int srcno) } ret = kvm_device_access(kernel_xics_fd, KVM_DEV_XICS_GRP_SOURCES, - srcno + ics->offset, &state, true, &local_err); - if (local_err) { - error_report_err(local_err); + srcno + ics->offset, &state, true, errp); + if (ret < 0) { return ret; } return 0; } -int ics_set_kvm_state(ICSState *ics) +int ics_set_kvm_state(ICSState *ics, Error **errp) { int i; @@ -297,10 +296,12 @@ int ics_set_kvm_state(ICSState *ics) } for (i = 0; i < ics->nr_irqs; i++) { + Error *local_err = NULL; int ret; - ret = ics_set_kvm_state_one(ics, i); - if (ret) { + ret = ics_set_kvm_state_one(ics, i, &local_err); + if (ret < 0) { + error_propagate(errp, local_err); return ret; } } @@ -331,16 +332,7 @@ void ics_kvm_set_irq(ICSState *ics, int srcno, int val) } } -static void rtas_dummy(PowerPCCPU *cpu, SpaprMachineState *spapr, - uint32_t token, - uint32_t nargs, target_ulong args, - uint32_t nret, target_ulong rets) -{ - error_report("pseries: %s must never be called for in-kernel XICS", - __func__); -} - -int xics_kvm_init(SpaprMachineState *spapr, Error **errp) +int xics_kvm_connect(SpaprMachineState *spapr, Error **errp) { int rc; CPUState *cs; @@ -357,42 +349,41 @@ int xics_kvm_init(SpaprMachineState *spapr, Error **errp) if (!kvm_enabled() || !kvm_check_extension(kvm_state, KVM_CAP_IRQ_XICS)) { error_setg(errp, "KVM and IRQ_XICS capability must be present for in-kernel XICS"); - goto fail; + return -1; } - spapr_rtas_register(RTAS_IBM_SET_XIVE, "ibm,set-xive", rtas_dummy); - spapr_rtas_register(RTAS_IBM_GET_XIVE, "ibm,get-xive", rtas_dummy); - spapr_rtas_register(RTAS_IBM_INT_OFF, "ibm,int-off", rtas_dummy); - spapr_rtas_register(RTAS_IBM_INT_ON, "ibm,int-on", rtas_dummy); - rc = kvmppc_define_rtas_kernel_token(RTAS_IBM_SET_XIVE, "ibm,set-xive"); if (rc < 0) { - error_setg(errp, "kvmppc_define_rtas_kernel_token: ibm,set-xive"); + error_setg_errno(&local_err, -rc, + "kvmppc_define_rtas_kernel_token: ibm,set-xive"); goto fail; } rc = kvmppc_define_rtas_kernel_token(RTAS_IBM_GET_XIVE, "ibm,get-xive"); if (rc < 0) { - error_setg(errp, "kvmppc_define_rtas_kernel_token: ibm,get-xive"); + error_setg_errno(&local_err, -rc, + "kvmppc_define_rtas_kernel_token: ibm,get-xive"); goto fail; } rc = kvmppc_define_rtas_kernel_token(RTAS_IBM_INT_ON, "ibm,int-on"); if (rc < 0) { - error_setg(errp, "kvmppc_define_rtas_kernel_token: ibm,int-on"); + error_setg_errno(&local_err, -rc, + "kvmppc_define_rtas_kernel_token: ibm,int-on"); goto fail; } rc = kvmppc_define_rtas_kernel_token(RTAS_IBM_INT_OFF, "ibm,int-off"); if (rc < 0) { - error_setg(errp, "kvmppc_define_rtas_kernel_token: ibm,int-off"); + error_setg_errno(&local_err, -rc, + "kvmppc_define_rtas_kernel_token: ibm,int-off"); goto fail; } /* Create the KVM XICS device */ rc = kvm_create_device(kvm_state, KVM_DEV_TYPE_XICS, false); if (rc < 0) { - error_setg_errno(errp, -rc, "Error on KVM_CREATE_DEVICE for XICS"); + error_setg_errno(&local_err, -rc, "Error on KVM_CREATE_DEVICE for XICS"); goto fail; } @@ -407,27 +398,30 @@ int xics_kvm_init(SpaprMachineState *spapr, Error **errp) icp_kvm_realize(DEVICE(spapr_cpu_state(cpu)->icp), &local_err); if (local_err) { - error_propagate(errp, local_err); goto fail; } } /* Update the KVM sources */ - ics_set_kvm_state(spapr->ics); + ics_set_kvm_state(spapr->ics, &local_err); + if (local_err) { + goto fail; + } /* Connect the presenters to the initial VCPUs of the machine */ CPU_FOREACH(cs) { PowerPCCPU *cpu = POWERPC_CPU(cs); - icp_set_kvm_state(spapr_cpu_state(cpu)->icp); + icp_set_kvm_state(spapr_cpu_state(cpu)->icp, &local_err); + if (local_err) { + goto fail; + } } return 0; fail: - kvmppc_define_rtas_kernel_token(0, "ibm,set-xive"); - kvmppc_define_rtas_kernel_token(0, "ibm,get-xive"); - kvmppc_define_rtas_kernel_token(0, "ibm,int-on"); - kvmppc_define_rtas_kernel_token(0, "ibm,int-off"); + error_propagate(errp, local_err); + xics_kvm_disconnect(spapr, NULL); return -1; } @@ -451,13 +445,10 @@ void xics_kvm_disconnect(SpaprMachineState *spapr, Error **errp) * removed from the list of devices of the VM. The VCPU presenters * are also detached from the device. */ - close(kernel_xics_fd); - kernel_xics_fd = -1; - - spapr_rtas_unregister(RTAS_IBM_SET_XIVE); - spapr_rtas_unregister(RTAS_IBM_GET_XIVE); - spapr_rtas_unregister(RTAS_IBM_INT_OFF); - spapr_rtas_unregister(RTAS_IBM_INT_ON); + if (kernel_xics_fd != -1) { + close(kernel_xics_fd); + kernel_xics_fd = -1; + } kvmppc_define_rtas_kernel_token(0, "ibm,set-xive"); kvmppc_define_rtas_kernel_token(0, "ibm,get-xive"); @@ -471,3 +462,33 @@ void xics_kvm_disconnect(SpaprMachineState *spapr, Error **errp) /* Clear the presenter from the VCPUs */ kvm_disable_icps(); } + +/* + * This is a heuristic to detect older KVMs on POWER9 hosts that don't + * support destruction of a KVM XICS device while the VM is running. + * Required to start a spapr machine with ic-mode=dual,kernel-irqchip=on. + */ +bool xics_kvm_has_broken_disconnect(SpaprMachineState *spapr) +{ + int rc; + + rc = kvm_create_device(kvm_state, KVM_DEV_TYPE_XICS, false); + if (rc < 0) { + /* + * The error is ignored on purpose. The KVM XICS setup code + * will catch it again anyway. The goal here is to see if + * close() actually destroys the device or not. + */ + return false; + } + + close(rc); + + rc = kvm_create_device(kvm_state, KVM_DEV_TYPE_XICS, false); + if (rc >= 0) { + close(rc); + return false; + } + + return errno == EEXIST; +} diff --git a/hw/intc/xics_spapr.c b/hw/intc/xics_spapr.c index 5a1835e8b1..7cd3c93d71 100644 --- a/hw/intc/xics_spapr.c +++ b/hw/intc/xics_spapr.c @@ -41,11 +41,32 @@ * Guest interfaces */ +static bool check_emulated_xics(SpaprMachineState *spapr, const char *func) +{ + if (spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT) || + kvm_irqchip_in_kernel()) { + error_report("pseries: %s must only be called for emulated XICS", + func); + return false; + } + + return true; +} + +#define CHECK_EMULATED_XICS_HCALL(spapr) \ + do { \ + if (!check_emulated_xics((spapr), __func__)) { \ + return H_HARDWARE; \ + } \ + } while (0) + static target_ulong h_cppr(PowerPCCPU *cpu, SpaprMachineState *spapr, target_ulong opcode, target_ulong *args) { target_ulong cppr = args[0]; + CHECK_EMULATED_XICS_HCALL(spapr); + icp_set_cppr(spapr_cpu_state(cpu)->icp, cppr); return H_SUCCESS; } @@ -56,6 +77,8 @@ static target_ulong h_ipi(PowerPCCPU *cpu, SpaprMachineState *spapr, target_ulong mfrr = args[1]; ICPState *icp = xics_icp_get(XICS_FABRIC(spapr), args[0]); + CHECK_EMULATED_XICS_HCALL(spapr); + if (!icp) { return H_PARAMETER; } @@ -69,6 +92,8 @@ static target_ulong h_xirr(PowerPCCPU *cpu, SpaprMachineState *spapr, { uint32_t xirr = icp_accept(spapr_cpu_state(cpu)->icp); + CHECK_EMULATED_XICS_HCALL(spapr); + args[0] = xirr; return H_SUCCESS; } @@ -78,6 +103,8 @@ static target_ulong h_xirr_x(PowerPCCPU *cpu, SpaprMachineState *spapr, { uint32_t xirr = icp_accept(spapr_cpu_state(cpu)->icp); + CHECK_EMULATED_XICS_HCALL(spapr); + args[0] = xirr; args[1] = cpu_get_host_ticks(); return H_SUCCESS; @@ -88,6 +115,8 @@ static target_ulong h_eoi(PowerPCCPU *cpu, SpaprMachineState *spapr, { target_ulong xirr = args[0]; + CHECK_EMULATED_XICS_HCALL(spapr); + icp_eoi(spapr_cpu_state(cpu)->icp, xirr); return H_SUCCESS; } @@ -99,6 +128,8 @@ static target_ulong h_ipoll(PowerPCCPU *cpu, SpaprMachineState *spapr, uint32_t mfrr; uint32_t xirr; + CHECK_EMULATED_XICS_HCALL(spapr); + if (!icp) { return H_PARAMETER; } @@ -111,6 +142,14 @@ static target_ulong h_ipoll(PowerPCCPU *cpu, SpaprMachineState *spapr, return H_SUCCESS; } +#define CHECK_EMULATED_XICS_RTAS(spapr, rets) \ + do { \ + if (!check_emulated_xics((spapr), __func__)) { \ + rtas_st((rets), 0, RTAS_OUT_HW_ERROR); \ + return; \ + } \ + } while (0) + static void rtas_set_xive(PowerPCCPU *cpu, SpaprMachineState *spapr, uint32_t token, uint32_t nargs, target_ulong args, @@ -119,6 +158,8 @@ static void rtas_set_xive(PowerPCCPU *cpu, SpaprMachineState *spapr, ICSState *ics = spapr->ics; uint32_t nr, srcno, server, priority; + CHECK_EMULATED_XICS_RTAS(spapr, rets); + if ((nargs != 3) || (nret != 1)) { rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); return; @@ -152,6 +193,8 @@ static void rtas_get_xive(PowerPCCPU *cpu, SpaprMachineState *spapr, ICSState *ics = spapr->ics; uint32_t nr, srcno; + CHECK_EMULATED_XICS_RTAS(spapr, rets); + if ((nargs != 1) || (nret != 3)) { rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); return; @@ -182,6 +225,8 @@ static void rtas_int_off(PowerPCCPU *cpu, SpaprMachineState *spapr, ICSState *ics = spapr->ics; uint32_t nr, srcno; + CHECK_EMULATED_XICS_RTAS(spapr, rets); + if ((nargs != 1) || (nret != 1)) { rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); return; @@ -213,6 +258,8 @@ static void rtas_int_on(PowerPCCPU *cpu, SpaprMachineState *spapr, ICSState *ics = spapr->ics; uint32_t nr, srcno; + CHECK_EMULATED_XICS_RTAS(spapr, rets); + if ((nargs != 1) || (nret != 1)) { rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); return; @@ -239,14 +286,6 @@ static void rtas_int_on(PowerPCCPU *cpu, SpaprMachineState *spapr, void xics_spapr_init(SpaprMachineState *spapr) { - /* Emulated mode can only be initialized once. */ - if (spapr->ics->init) { - return; - } - - spapr->ics->init = true; - - /* Registration of global state belongs into realize */ spapr_rtas_register(RTAS_IBM_SET_XIVE, "ibm,set-xive", rtas_set_xive); spapr_rtas_register(RTAS_IBM_GET_XIVE, "ibm,get-xive", rtas_get_xive); spapr_rtas_register(RTAS_IBM_INT_OFF, "ibm,int-off", rtas_int_off); diff --git a/hw/intc/xive.c b/hw/intc/xive.c index 6250c0414d..cf77bdb7d3 100644 --- a/hw/intc/xive.c +++ b/hw/intc/xive.c @@ -132,6 +132,11 @@ static void xive_tctx_set_cppr(XiveTCTX *tctx, uint8_t ring, uint8_t cppr) xive_tctx_notify(tctx, ring); } +static inline uint32_t xive_tctx_word2(uint8_t *ring) +{ + return *((uint32_t *) &ring[TM_WORD2]); +} + /* * XIVE Thread Interrupt Management Area (TIMA) */ @@ -150,11 +155,12 @@ static uint64_t xive_tm_ack_hv_reg(XiveTCTX *tctx, hwaddr offset, unsigned size) static uint64_t xive_tm_pull_pool_ctx(XiveTCTX *tctx, hwaddr offset, unsigned size) { - uint64_t ret; + uint32_t qw2w2_prev = xive_tctx_word2(&tctx->regs[TM_QW2_HV_POOL]); + uint32_t qw2w2; - ret = tctx->regs[TM_QW2_HV_POOL + TM_WORD2] & TM_QW2W2_POOL_CAM; - tctx->regs[TM_QW2_HV_POOL + TM_WORD2] &= ~TM_QW2W2_POOL_CAM; - return ret; + qw2w2 = xive_set_field32(TM_QW2W2_VP, qw2w2_prev, 0); + memcpy(&tctx->regs[TM_QW2_HV_POOL + TM_WORD2], &qw2w2, 4); + return qw2w2; } static void xive_tm_vt_push(XiveTCTX *tctx, hwaddr offset, @@ -182,31 +188,31 @@ static uint64_t xive_tm_vt_poll(XiveTCTX *tctx, hwaddr offset, unsigned size) */ static const uint8_t xive_tm_hw_view[] = { - /* QW-0 User */ 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, - /* QW-1 OS */ 3, 3, 3, 3, 3, 3, 0, 3, 3, 3, 3, 3, 0, 0, 0, 0, - /* QW-2 POOL */ 0, 0, 3, 3, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, - /* QW-3 PHYS */ 3, 3, 3, 3, 0, 3, 0, 3, 3, 0, 0, 3, 3, 3, 3, 0, + 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-0 User */ + 3, 3, 3, 3, 3, 3, 0, 2, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-1 OS */ + 0, 0, 3, 3, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-2 POOL */ + 3, 3, 3, 3, 0, 3, 0, 2, 3, 0, 0, 3, 3, 3, 3, 0, /* QW-3 PHYS */ }; static const uint8_t xive_tm_hv_view[] = { - /* QW-0 User */ 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, - /* QW-1 OS */ 3, 3, 3, 3, 3, 3, 0, 3, 3, 3, 3, 3, 0, 0, 0, 0, - /* QW-2 POOL */ 0, 0, 3, 3, 0, 0, 0, 0, 0, 3, 3, 3, 0, 0, 0, 0, - /* QW-3 PHYS */ 3, 3, 3, 3, 0, 3, 0, 3, 3, 0, 0, 3, 0, 0, 0, 0, + 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-0 User */ + 3, 3, 3, 3, 3, 3, 0, 2, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-1 OS */ + 0, 0, 3, 3, 0, 0, 0, 0, 0, 3, 3, 3, 0, 0, 0, 0, /* QW-2 POOL */ + 3, 3, 3, 3, 0, 3, 0, 2, 3, 0, 0, 3, 0, 0, 0, 0, /* QW-3 PHYS */ }; static const uint8_t xive_tm_os_view[] = { - /* QW-0 User */ 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, - /* QW-1 OS */ 2, 3, 2, 2, 2, 2, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, - /* QW-2 POOL */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - /* QW-3 PHYS */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-0 User */ + 2, 3, 2, 2, 2, 2, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-1 OS */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-2 POOL */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-3 PHYS */ }; static const uint8_t xive_tm_user_view[] = { - /* QW-0 User */ 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - /* QW-1 OS */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - /* QW-2 POOL */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - /* QW-3 PHYS */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-0 User */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-1 OS */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-2 POOL */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-3 PHYS */ }; /* @@ -484,11 +490,6 @@ const MemoryRegionOps xive_tm_ops = { }, }; -static inline uint32_t xive_tctx_word2(uint8_t *ring) -{ - return *((uint32_t *) &ring[TM_WORD2]); -} - static char *xive_tctx_ring_print(uint8_t *ring) { uint32_t w2 = xive_tctx_word2(ring); @@ -1229,27 +1230,16 @@ XiveTCTX *xive_router_get_tctx(XiveRouter *xrtr, CPUState *cs) } /* - * By default on P9, the HW CAM line (23bits) is hardwired to : - * - * 0x000||0b1||4Bit chip number||7Bit Thread number. + * Encode the HW CAM line in the block group mode format : * - * When the block grouping is enabled, the CAM line is changed to : - * - * 4Bit chip number||0x001||7Bit Thread number. + * chip << 19 | 0000000 0 0001 thread (7Bit) */ -static uint32_t hw_cam_line(uint8_t chip_id, uint8_t tid) -{ - return 1 << 11 | (chip_id & 0xf) << 7 | (tid & 0x7f); -} - -static bool xive_presenter_tctx_match_hw(XiveTCTX *tctx, - uint8_t nvt_blk, uint32_t nvt_idx) +static uint32_t xive_tctx_hw_cam_line(XiveTCTX *tctx) { CPUPPCState *env = &POWERPC_CPU(tctx->cs)->env; uint32_t pir = env->spr_cb[SPR_PIR].default_value; - return hw_cam_line((pir >> 8) & 0xf, pir & 0x7f) == - hw_cam_line(nvt_blk, nvt_idx); + return xive_nvt_cam_line((pir >> 8) & 0xf, 1 << 7 | (pir & 0x7f)); } /* @@ -1285,7 +1275,7 @@ static int xive_presenter_tctx_match(XiveTCTX *tctx, uint8_t format, /* PHYS ring */ if ((be32_to_cpu(qw3w2) & TM_QW3W2_VT) && - xive_presenter_tctx_match_hw(tctx, nvt_blk, nvt_idx)) { + cam == xive_tctx_hw_cam_line(tctx)) { return TM_QW3_HV_PHYS; } diff --git a/hw/mips/Kconfig b/hw/mips/Kconfig index cdc07e59b6..62aa01b29e 100644 --- a/hw/mips/Kconfig +++ b/hw/mips/Kconfig @@ -1,14 +1,44 @@ config R4K bool + select ISA_BUS + select SERIAL_ISA + select I8259 + select I8254 + select MC146818RTC + imply VGA_ISA + imply NE2000_ISA + select IDE_ISA + select PCKBD + select PFLASH_CFI01 config MALTA bool config MIPSSIM bool + select ISA_BUS + select SERIAL_ISA + select MIPSNET config JAZZ bool + select ISA_BUS + select RC4030 + select I8259 + select I8254 + select I8257 + select PCSPK + select VGA_ISA_MM + select G364FB + select DP8393X + select ESP + select FDC + select MC146818RTC + select PCKBD + select SERIAL + select PARALLEL + select DS1225Y + select JAZZ_LED config FULONG bool diff --git a/hw/misc/Makefile.objs b/hw/misc/Makefile.objs index 77b9df9796..e9aab519a1 100644 --- a/hw/misc/Makefile.objs +++ b/hw/misc/Makefile.objs @@ -74,6 +74,7 @@ obj-$(CONFIG_ARMSSE_MHU) += armsse-mhu.o obj-$(CONFIG_PVPANIC) += pvpanic.o obj-$(CONFIG_AUX) += auxbus.o +obj-$(CONFIG_ASPEED_SOC) += aspeed_xdma.o obj-$(CONFIG_ASPEED_SOC) += aspeed_scu.o aspeed_sdmc.o obj-$(CONFIG_MSF2) += msf2-sysreg.o obj-$(CONFIG_NRF51_SOC) += nrf51_rng.o diff --git a/hw/misc/aspeed_xdma.c b/hw/misc/aspeed_xdma.c new file mode 100644 index 0000000000..eebd4ad540 --- /dev/null +++ b/hw/misc/aspeed_xdma.c @@ -0,0 +1,165 @@ +/* + * ASPEED XDMA Controller + * Eddie James <eajames@linux.ibm.com> + * + * Copyright (C) 2019 IBM Corp + * SPDX-License-Identifer: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qemu/error-report.h" +#include "hw/misc/aspeed_xdma.h" +#include "qapi/error.h" + +#include "trace.h" + +#define XDMA_BMC_CMDQ_ADDR 0x10 +#define XDMA_BMC_CMDQ_ENDP 0x14 +#define XDMA_BMC_CMDQ_WRP 0x18 +#define XDMA_BMC_CMDQ_W_MASK 0x0003FFFF +#define XDMA_BMC_CMDQ_RDP 0x1C +#define XDMA_BMC_CMDQ_RDP_MAGIC 0xEE882266 +#define XDMA_IRQ_ENG_CTRL 0x20 +#define XDMA_IRQ_ENG_CTRL_US_COMP BIT(4) +#define XDMA_IRQ_ENG_CTRL_DS_COMP BIT(5) +#define XDMA_IRQ_ENG_CTRL_W_MASK 0xBFEFF07F +#define XDMA_IRQ_ENG_STAT 0x24 +#define XDMA_IRQ_ENG_STAT_US_COMP BIT(4) +#define XDMA_IRQ_ENG_STAT_DS_COMP BIT(5) +#define XDMA_IRQ_ENG_STAT_RESET 0xF8000000 +#define XDMA_MEM_SIZE 0x1000 + +#define TO_REG(addr) ((addr) / sizeof(uint32_t)) + +static uint64_t aspeed_xdma_read(void *opaque, hwaddr addr, unsigned int size) +{ + uint32_t val = 0; + AspeedXDMAState *xdma = opaque; + + if (addr < ASPEED_XDMA_REG_SIZE) { + val = xdma->regs[TO_REG(addr)]; + } + + return (uint64_t)val; +} + +static void aspeed_xdma_write(void *opaque, hwaddr addr, uint64_t val, + unsigned int size) +{ + unsigned int idx; + uint32_t val32 = (uint32_t)val; + AspeedXDMAState *xdma = opaque; + + if (addr >= ASPEED_XDMA_REG_SIZE) { + return; + } + + switch (addr) { + case XDMA_BMC_CMDQ_ENDP: + xdma->regs[TO_REG(addr)] = val32 & XDMA_BMC_CMDQ_W_MASK; + break; + case XDMA_BMC_CMDQ_WRP: + idx = TO_REG(addr); + xdma->regs[idx] = val32 & XDMA_BMC_CMDQ_W_MASK; + xdma->regs[TO_REG(XDMA_BMC_CMDQ_RDP)] = xdma->regs[idx]; + + trace_aspeed_xdma_write(addr, val); + + if (xdma->bmc_cmdq_readp_set) { + xdma->bmc_cmdq_readp_set = 0; + } else { + xdma->regs[TO_REG(XDMA_IRQ_ENG_STAT)] |= + XDMA_IRQ_ENG_STAT_US_COMP | XDMA_IRQ_ENG_STAT_DS_COMP; + + if (xdma->regs[TO_REG(XDMA_IRQ_ENG_CTRL)] & + (XDMA_IRQ_ENG_CTRL_US_COMP | XDMA_IRQ_ENG_CTRL_DS_COMP)) + qemu_irq_raise(xdma->irq); + } + break; + case XDMA_BMC_CMDQ_RDP: + trace_aspeed_xdma_write(addr, val); + + if (val32 == XDMA_BMC_CMDQ_RDP_MAGIC) { + xdma->bmc_cmdq_readp_set = 1; + } + break; + case XDMA_IRQ_ENG_CTRL: + xdma->regs[TO_REG(addr)] = val32 & XDMA_IRQ_ENG_CTRL_W_MASK; + break; + case XDMA_IRQ_ENG_STAT: + trace_aspeed_xdma_write(addr, val); + + idx = TO_REG(addr); + if (val32 & (XDMA_IRQ_ENG_STAT_US_COMP | XDMA_IRQ_ENG_STAT_DS_COMP)) { + xdma->regs[idx] &= + ~(XDMA_IRQ_ENG_STAT_US_COMP | XDMA_IRQ_ENG_STAT_DS_COMP); + qemu_irq_lower(xdma->irq); + } + break; + default: + xdma->regs[TO_REG(addr)] = val32; + break; + } +} + +static const MemoryRegionOps aspeed_xdma_ops = { + .read = aspeed_xdma_read, + .write = aspeed_xdma_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid.min_access_size = 4, + .valid.max_access_size = 4, +}; + +static void aspeed_xdma_realize(DeviceState *dev, Error **errp) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + AspeedXDMAState *xdma = ASPEED_XDMA(dev); + + sysbus_init_irq(sbd, &xdma->irq); + memory_region_init_io(&xdma->iomem, OBJECT(xdma), &aspeed_xdma_ops, xdma, + TYPE_ASPEED_XDMA, XDMA_MEM_SIZE); + sysbus_init_mmio(sbd, &xdma->iomem); +} + +static void aspeed_xdma_reset(DeviceState *dev) +{ + AspeedXDMAState *xdma = ASPEED_XDMA(dev); + + xdma->bmc_cmdq_readp_set = 0; + memset(xdma->regs, 0, ASPEED_XDMA_REG_SIZE); + xdma->regs[TO_REG(XDMA_IRQ_ENG_STAT)] = XDMA_IRQ_ENG_STAT_RESET; + + qemu_irq_lower(xdma->irq); +} + +static const VMStateDescription aspeed_xdma_vmstate = { + .name = TYPE_ASPEED_XDMA, + .version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(regs, AspeedXDMAState, ASPEED_XDMA_NUM_REGS), + VMSTATE_END_OF_LIST(), + }, +}; + +static void aspeed_xdma_class_init(ObjectClass *classp, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(classp); + + dc->realize = aspeed_xdma_realize; + dc->reset = aspeed_xdma_reset; + dc->vmsd = &aspeed_xdma_vmstate; +} + +static const TypeInfo aspeed_xdma_info = { + .name = TYPE_ASPEED_XDMA, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(AspeedXDMAState), + .class_init = aspeed_xdma_class_init, +}; + +static void aspeed_xdma_register_type(void) +{ + type_register_static(&aspeed_xdma_info); +} +type_init(aspeed_xdma_register_type); diff --git a/hw/misc/trace-events b/hw/misc/trace-events index 47e1bccf71..c1ea1aa437 100644 --- a/hw/misc/trace-events +++ b/hw/misc/trace-events @@ -140,3 +140,6 @@ armsse_cpuid_write(uint64_t offset, uint64_t data, unsigned size) "SSE-200 CPU_I # armsse-mhu.c armsse_mhu_read(uint64_t offset, uint64_t data, unsigned size) "SSE-200 MHU read: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u" armsse_mhu_write(uint64_t offset, uint64_t data, unsigned size) "SSE-200 MHU write: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u" + +# aspeed_xdma.c +aspeed_xdma_write(uint64_t offset, uint64_t data) "XDMA write: offset 0x%" PRIx64 " data 0x%" PRIx64 diff --git a/hw/net/ftgmac100.c b/hw/net/ftgmac100.c index eb760472e5..d2cded5e94 100644 --- a/hw/net/ftgmac100.c +++ b/hw/net/ftgmac100.c @@ -1017,8 +1017,6 @@ static void ftgmac100_realize(DeviceState *dev, Error **errp) sysbus_init_irq(sbd, &s->irq); qemu_macaddr_default_if_unset(&s->conf.macaddr); - s->conf.peers.ncs[0] = nd_table[0].netdev; - s->nic = qemu_new_nic(&net_ftgmac100_info, &s->conf, object_get_typename(OBJECT(dev)), DEVICE(dev)->id, s); diff --git a/hw/net/sunhme.c b/hw/net/sunhme.c index 1ebaee3c82..8b8603e696 100644 --- a/hw/net/sunhme.c +++ b/hw/net/sunhme.c @@ -44,6 +44,7 @@ #define HME_SEBI_STAT 0x100 #define HME_SEBI_STAT_LINUXBUG 0x108 #define HME_SEB_STAT_RXTOHOST 0x10000 +#define HME_SEB_STAT_NORXD 0x20000 #define HME_SEB_STAT_MIFIRQ 0x800000 #define HME_SEB_STAT_HOSTTOTX 0x1000000 #define HME_SEB_STAT_TXALL 0x2000000 @@ -209,6 +210,8 @@ static void sunhme_update_irq(SunHMEState *s) } level = (seb ? 1 : 0); + trace_sunhme_update_irq(mifmask, mif, sebmask, seb, level); + pci_set_irq(d, level); } @@ -371,10 +374,20 @@ static void sunhme_mac_write(void *opaque, hwaddr addr, uint64_t val, unsigned size) { SunHMEState *s = SUNHME(opaque); + uint64_t oldval = s->macregs[addr >> 2]; trace_sunhme_mac_write(addr, val); s->macregs[addr >> 2] = val; + + switch (addr) { + case HME_MACI_RXCFG: + if (!(oldval & HME_MAC_RXCFG_ENABLE) && + (val & HME_MAC_RXCFG_ENABLE)) { + qemu_flush_queued_packets(qemu_get_queue(s->nic)); + } + break; + } } static uint64_t sunhme_mac_read(void *opaque, hwaddr addr, @@ -647,7 +660,7 @@ static int sunhme_can_receive(NetClientState *nc) { SunHMEState *s = qemu_get_nic_opaque(nc); - return s->macregs[HME_MAC_RXCFG_ENABLE >> 2] & HME_MAC_RXCFG_ENABLE; + return s->macregs[HME_MACI_RXCFG >> 2] & HME_MAC_RXCFG_ENABLE; } static void sunhme_link_status_changed(NetClientState *nc) @@ -716,7 +729,7 @@ static ssize_t sunhme_receive(NetClientState *nc, const uint8_t *buf, /* Do nothing if MAC RX disabled */ if (!(s->macregs[HME_MACI_RXCFG >> 2] & HME_MAC_RXCFG_ENABLE)) { - return -1; + return 0; } trace_sunhme_rx_filter_destmac(buf[0], buf[1], buf[2], @@ -745,14 +758,14 @@ static ssize_t sunhme_receive(NetClientState *nc, const uint8_t *buf, /* Didn't match hash filter */ trace_sunhme_rx_filter_hash_nomatch(); trace_sunhme_rx_filter_reject(); - return 0; + return -1; } else { trace_sunhme_rx_filter_hash_match(); } } else { /* Not for us */ trace_sunhme_rx_filter_reject(); - return 0; + return -1; } } else { trace_sunhme_rx_filter_promisc_match(); @@ -775,6 +788,14 @@ static ssize_t sunhme_receive(NetClientState *nc, const uint8_t *buf, pci_dma_read(d, rb + cr * HME_DESC_SIZE, &status, 4); pci_dma_read(d, rb + cr * HME_DESC_SIZE + 4, &buffer, 4); + /* If we don't own the current descriptor then indicate overflow error */ + if (!(status & HME_XD_OWN)) { + s->sebregs[HME_SEBI_STAT >> 2] |= HME_SEB_STAT_NORXD; + sunhme_update_irq(s); + trace_sunhme_rx_norxd(); + return -1; + } + rxoffset = (s->erxregs[HME_ERXI_CFG >> 2] & HME_ERX_CFG_BYTEOFFSET) >> HME_ERX_CFG_BYTEOFFSET_SHIFT; diff --git a/hw/net/trace-events b/hw/net/trace-events index 3cd9e122df..58665655cc 100644 --- a/hw/net/trace-events +++ b/hw/net/trace-events @@ -359,6 +359,8 @@ sunhme_rx_filter_reject(void) "rejecting incoming frame" sunhme_rx_filter_accept(void) "accepting incoming frame" sunhme_rx_desc(uint32_t addr, int offset, uint32_t status, int len, int cr, int nr) "addr 0x%"PRIx32"(+0x%x) status 0x%"PRIx32 " len %d (ring %d/%d)" sunhme_rx_xsum_calc(uint16_t xsum) "calculated incoming xsum as 0x%x" +sunhme_rx_norxd(void) "no free rx descriptors available" +sunhme_update_irq(uint32_t mifmask, uint32_t mif, uint32_t sebmask, uint32_t seb, int level) "mifmask: 0x%x mif: 0x%x sebmask: 0x%x seb: 0x%x level: %d" # virtio-net.c virtio_net_announce_notify(void) "" diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c index c3f5fccfd1..b9e1cd71cf 100644 --- a/hw/net/virtio-net.c +++ b/hw/net/virtio-net.c @@ -2360,7 +2360,7 @@ static int virtio_net_post_load_device(void *opaque, int version_id) timer_mod(n->announce_timer.tm, qemu_clock_get_ms(n->announce_timer.type)); } else { - qemu_announce_timer_del(&n->announce_timer); + qemu_announce_timer_del(&n->announce_timer, false); } } @@ -2784,7 +2784,7 @@ static void virtio_net_device_unrealize(DeviceState *dev, Error **errp) virtio_net_del_queue(n, i); } - qemu_announce_timer_del(&n->announce_timer); + qemu_announce_timer_del(&n->announce_timer, false); g_free(n->vqs); qemu_del_nic(n->nic); virtio_net_rsc_cleanup(n); diff --git a/hw/pci-bridge/pcie_root_port.c b/hw/pci-bridge/pcie_root_port.c index 92f253c924..09019ca05d 100644 --- a/hw/pci-bridge/pcie_root_port.c +++ b/hw/pci-bridge/pcie_root_port.c @@ -31,10 +31,13 @@ static void rp_write_config(PCIDevice *d, uint32_t address, { uint32_t root_cmd = pci_get_long(d->config + d->exp.aer_cap + PCI_ERR_ROOT_COMMAND); + uint16_t slt_ctl, slt_sta; + + pcie_cap_slot_get(d, &slt_ctl, &slt_sta); pci_bridge_write_config(d, address, val, len); rp_aer_vector_update(d); - pcie_cap_slot_write_config(d, address, val, len); + pcie_cap_slot_write_config(d, slt_ctl, slt_sta, address, val, len); pcie_aer_write_config(d, address, val, len); pcie_aer_root_write_config(d, address, val, len, root_cmd); } diff --git a/hw/pci-bridge/xio3130_downstream.c b/hw/pci-bridge/xio3130_downstream.c index 264e37d6a6..899b0fd6c9 100644 --- a/hw/pci-bridge/xio3130_downstream.c +++ b/hw/pci-bridge/xio3130_downstream.c @@ -41,9 +41,12 @@ static void xio3130_downstream_write_config(PCIDevice *d, uint32_t address, uint32_t val, int len) { + uint16_t slt_ctl, slt_sta; + + pcie_cap_slot_get(d, &slt_sta, &slt_ctl); pci_bridge_write_config(d, address, val, len); pcie_cap_flr_write_config(d, address, val, len); - pcie_cap_slot_write_config(d, address, val, len); + pcie_cap_slot_write_config(d, slt_ctl, slt_sta, address, val, len); pcie_aer_write_config(d, address, val, len); } diff --git a/hw/pci-host/designware.c b/hw/pci-host/designware.c index 0fdfff5784..9ae8c0deb7 100644 --- a/hw/pci-host/designware.c +++ b/hw/pci-host/designware.c @@ -51,6 +51,8 @@ #define DESIGNWARE_PCIE_ATU_DEVFN(x) (((x) >> 16) & 0xff) #define DESIGNWARE_PCIE_ATU_UPPER_TARGET 0x91C +#define DESIGNWARE_PCIE_IRQ_MSI 3 + static DesignwarePCIEHost * designware_pcie_root_to_host(DesignwarePCIERoot *root) { @@ -67,7 +69,7 @@ static void designware_pcie_root_msi_write(void *opaque, hwaddr addr, root->msi.intr[0].status |= BIT(val) & root->msi.intr[0].enable; if (root->msi.intr[0].status & ~root->msi.intr[0].mask) { - qemu_set_irq(host->pci.irqs[0], 1); + qemu_set_irq(host->pci.irqs[DESIGNWARE_PCIE_IRQ_MSI], 1); } } @@ -290,23 +292,19 @@ static void designware_pcie_root_config_write(PCIDevice *d, uint32_t address, case DESIGNWARE_PCIE_MSI_ADDR_LO: root->msi.base &= 0xFFFFFFFF00000000ULL; root->msi.base |= val; + designware_pcie_root_update_msi_mapping(root); break; case DESIGNWARE_PCIE_MSI_ADDR_HI: root->msi.base &= 0x00000000FFFFFFFFULL; root->msi.base |= (uint64_t)val << 32; + designware_pcie_root_update_msi_mapping(root); break; - case DESIGNWARE_PCIE_MSI_INTR0_ENABLE: { - const bool update_msi_mapping = !root->msi.intr[0].enable ^ !!val; - + case DESIGNWARE_PCIE_MSI_INTR0_ENABLE: root->msi.intr[0].enable = val; - - if (update_msi_mapping) { - designware_pcie_root_update_msi_mapping(root); - } + designware_pcie_root_update_msi_mapping(root); break; - } case DESIGNWARE_PCIE_MSI_INTR0_MASK: root->msi.intr[0].mask = val; @@ -315,7 +313,7 @@ static void designware_pcie_root_config_write(PCIDevice *d, uint32_t address, case DESIGNWARE_PCIE_MSI_INTR0_STATUS: root->msi.intr[0].status ^= val; if (!root->msi.intr[0].status) { - qemu_set_irq(host->pci.irqs[0], 0); + qemu_set_irq(host->pci.irqs[DESIGNWARE_PCIE_IRQ_MSI], 0); } break; diff --git a/hw/pci/pcie.c b/hw/pci/pcie.c index 88c30ff74c..a6beb567bd 100644 --- a/hw/pci/pcie.c +++ b/hw/pci/pcie.c @@ -383,7 +383,7 @@ static void pcie_cap_slot_event(PCIDevice *dev, PCIExpressHotPlugEvent event) { /* Minor optimization: if nothing changed - no event is needed. */ if (pci_word_test_and_set_mask(dev->config + dev->exp.exp_cap + - PCI_EXP_SLTSTA, event)) { + PCI_EXP_SLTSTA, event) == event) { return; } hotplug_event_notify(dev); @@ -594,7 +594,16 @@ void pcie_cap_slot_reset(PCIDevice *dev) hotplug_event_update_event_status(dev); } +void pcie_cap_slot_get(PCIDevice *dev, uint16_t *slt_ctl, uint16_t *slt_sta) +{ + uint32_t pos = dev->exp.exp_cap; + uint8_t *exp_cap = dev->config + pos; + *slt_ctl = pci_get_word(exp_cap + PCI_EXP_SLTCTL); + *slt_sta = pci_get_word(exp_cap + PCI_EXP_SLTSTA); +} + void pcie_cap_slot_write_config(PCIDevice *dev, + uint16_t old_slt_ctl, uint16_t old_slt_sta, uint32_t addr, uint32_t val, int len) { uint32_t pos = dev->exp.exp_cap; @@ -602,6 +611,25 @@ void pcie_cap_slot_write_config(PCIDevice *dev, uint16_t sltsta = pci_get_word(exp_cap + PCI_EXP_SLTSTA); if (ranges_overlap(addr, len, pos + PCI_EXP_SLTSTA, 2)) { + /* + * Guests tend to clears all bits during init. + * If they clear bits that weren't set this is racy and will lose events: + * not a big problem for manual button presses, but a problem for us. + * As a work-around, detect this and revert status to what it was + * before the write. + * + * Note: in theory this can be detected as a duplicate button press + * which cancels the previous press. Does not seem to happen in + * practice as guests seem to only have this bug during init. + */ +#define PCIE_SLOT_EVENTS (PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD | \ + PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_PDC | \ + PCI_EXP_SLTSTA_CC) + + if (val & ~old_slt_sta & PCIE_SLOT_EVENTS) { + sltsta = (sltsta & ~PCIE_SLOT_EVENTS) | (old_slt_sta & PCIE_SLOT_EVENTS); + pci_set_word(exp_cap + PCI_EXP_SLTSTA, sltsta); + } hotplug_event_clear(dev); } @@ -619,11 +647,17 @@ void pcie_cap_slot_write_config(PCIDevice *dev, } /* - * If the slot is polulated, power indicator is off and power + * If the slot is populated, power indicator is off and power * controller is off, it is safe to detach the devices. + * + * Note: don't detach if condition was already true: + * this is a work around for guests that overwrite + * control of powered off slots before powering them on. */ if ((sltsta & PCI_EXP_SLTSTA_PDS) && (val & PCI_EXP_SLTCTL_PCC) && - ((val & PCI_EXP_SLTCTL_PIC_OFF) == PCI_EXP_SLTCTL_PIC_OFF)) { + (val & PCI_EXP_SLTCTL_PIC_OFF) == PCI_EXP_SLTCTL_PIC_OFF && + (!(old_slt_ctl & PCI_EXP_SLTCTL_PCC) || + (old_slt_ctl & PCI_EXP_SLTCTL_PIC_OFF) != PCI_EXP_SLTCTL_PIC_OFF)) { PCIBus *sec_bus = pci_bridge_get_sec_bus(PCI_BRIDGE(dev)); pci_for_each_device(sec_bus, pci_bus_num(sec_bus), pcie_unplug_device, NULL); diff --git a/hw/ppc/mac_newworld.c b/hw/ppc/mac_newworld.c index 4d835f32b5..c8d3245524 100644 --- a/hw/ppc/mac_newworld.c +++ b/hw/ppc/mac_newworld.c @@ -437,13 +437,11 @@ static void ppc_core99_init(MachineState *machine) } /* The NewWorld NVRAM is not located in the MacIO device */ -#ifdef CONFIG_KVM if (kvm_enabled() && getpagesize() > 4096) { /* We can't combine read-write and read-only in a single page, so move the NVRAM out of ROM again for KVM */ nvram_addr = 0xFFE00000; } -#endif dev = qdev_create(NULL, TYPE_MACIO_NVRAM); qdev_prop_set_uint32(dev, "size", 0x2000); qdev_prop_set_uint32(dev, "it_shift", 1); @@ -488,14 +486,12 @@ static void ppc_core99_init(MachineState *machine) fw_cfg_add_i32(fw_cfg, FW_CFG_PPC_IS_KVM, kvm_enabled()); if (kvm_enabled()) { -#ifdef CONFIG_KVM uint8_t *hypercall; hypercall = g_malloc(16); kvmppc_get_hypercall(env, hypercall, 16); fw_cfg_add_bytes(fw_cfg, FW_CFG_PPC_KVM_HC, hypercall, 16); fw_cfg_add_i32(fw_cfg, FW_CFG_PPC_KVM_PID, getpid()); -#endif } fw_cfg_add_i32(fw_cfg, FW_CFG_PPC_TBFREQ, tbfreq); /* Mac OS X requires a "known good" clock-frequency value; pass it one. */ diff --git a/hw/ppc/mac_oldworld.c b/hw/ppc/mac_oldworld.c index eddd005a7c..da751addc4 100644 --- a/hw/ppc/mac_oldworld.c +++ b/hw/ppc/mac_oldworld.c @@ -345,14 +345,12 @@ static void ppc_heathrow_init(MachineState *machine) fw_cfg_add_i32(fw_cfg, FW_CFG_PPC_IS_KVM, kvm_enabled()); if (kvm_enabled()) { -#ifdef CONFIG_KVM uint8_t *hypercall; hypercall = g_malloc(16); kvmppc_get_hypercall(env, hypercall, 16); fw_cfg_add_bytes(fw_cfg, FW_CFG_PPC_KVM_HC, hypercall, 16); fw_cfg_add_i32(fw_cfg, FW_CFG_PPC_KVM_PID, getpid()); -#endif } fw_cfg_add_i32(fw_cfg, FW_CFG_PPC_TBFREQ, tbfreq); /* Mac OS X requires a "known good" clock-frequency value; pass it one. */ diff --git a/hw/ppc/pnv.c b/hw/ppc/pnv.c index 9db43916ac..b87e01e5b9 100644 --- a/hw/ppc/pnv.c +++ b/hw/ppc/pnv.c @@ -860,6 +860,14 @@ static void pnv_chip_power8_realize(DeviceState *dev, Error **errp) Pnv8Psi *psi8 = &chip8->psi; Error *local_err = NULL; + /* XSCOM bridge is first */ + pnv_xscom_realize(chip, PNV_XSCOM_SIZE, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(chip), 0, PNV_XSCOM_BASE(chip)); + pcc->parent_realize(dev, &local_err); if (local_err) { error_propagate(errp, local_err); @@ -916,7 +924,6 @@ static void pnv_chip_power8e_class_init(ObjectClass *klass, void *data) k->isa_create = pnv_chip_power8_isa_create; k->dt_populate = pnv_chip_power8_dt_populate; k->pic_print_info = pnv_chip_power8_pic_print_info; - k->xscom_base = 0x003fc0000000000ull; dc->desc = "PowerNV Chip POWER8E"; device_class_set_parent_realize(dc, pnv_chip_power8_realize, @@ -936,7 +943,6 @@ static void pnv_chip_power8_class_init(ObjectClass *klass, void *data) k->isa_create = pnv_chip_power8_isa_create; k->dt_populate = pnv_chip_power8_dt_populate; k->pic_print_info = pnv_chip_power8_pic_print_info; - k->xscom_base = 0x003fc0000000000ull; dc->desc = "PowerNV Chip POWER8"; device_class_set_parent_realize(dc, pnv_chip_power8_realize, @@ -956,7 +962,6 @@ static void pnv_chip_power8nvl_class_init(ObjectClass *klass, void *data) k->isa_create = pnv_chip_power8nvl_isa_create; k->dt_populate = pnv_chip_power8_dt_populate; k->pic_print_info = pnv_chip_power8_pic_print_info; - k->xscom_base = 0x003fc0000000000ull; dc->desc = "PowerNV Chip POWER8NVL"; device_class_set_parent_realize(dc, pnv_chip_power8_realize, @@ -1024,6 +1029,14 @@ static void pnv_chip_power9_realize(DeviceState *dev, Error **errp) Pnv9Psi *psi9 = &chip9->psi; Error *local_err = NULL; + /* XSCOM bridge is first */ + pnv_xscom_realize(chip, PNV9_XSCOM_SIZE, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(chip), 0, PNV9_XSCOM_BASE(chip)); + pcc->parent_realize(dev, &local_err); if (local_err) { error_propagate(errp, local_err); @@ -1099,7 +1112,6 @@ static void pnv_chip_power9_class_init(ObjectClass *klass, void *data) k->isa_create = pnv_chip_power9_isa_create; k->dt_populate = pnv_chip_power9_dt_populate; k->pic_print_info = pnv_chip_power9_pic_print_info; - k->xscom_base = 0x00603fc00000000ull; dc->desc = "PowerNV Chip POWER9"; device_class_set_parent_realize(dc, pnv_chip_power9_realize, @@ -1136,11 +1148,6 @@ static void pnv_chip_core_sanitize(PnvChip *chip, Error **errp) } } -static void pnv_chip_instance_init(Object *obj) -{ - PNV_CHIP(obj)->xscom_base = PNV_CHIP_GET_CLASS(obj)->xscom_base; -} - static void pnv_chip_core_realize(PnvChip *chip, Error **errp) { Error *error = NULL; @@ -1206,14 +1213,6 @@ static void pnv_chip_realize(DeviceState *dev, Error **errp) PnvChip *chip = PNV_CHIP(dev); Error *error = NULL; - /* XSCOM bridge */ - pnv_xscom_realize(chip, &error); - if (error) { - error_propagate(errp, error); - return; - } - sysbus_mmio_map(SYS_BUS_DEVICE(chip), 0, PNV_XSCOM_BASE(chip)); - /* Cores */ pnv_chip_core_realize(chip, &error); if (error) { @@ -1398,7 +1397,6 @@ static const TypeInfo types[] = { .name = TYPE_PNV_CHIP, .parent = TYPE_SYS_BUS_DEVICE, .class_init = pnv_chip_class_init, - .instance_init = pnv_chip_instance_init, .instance_size = sizeof(PnvChip), .class_size = sizeof(PnvChipClass), .abstract = true, diff --git a/hw/ppc/pnv_xscom.c b/hw/ppc/pnv_xscom.c index 4e52885c9e..2b81c75f56 100644 --- a/hw/ppc/pnv_xscom.c +++ b/hw/ppc/pnv_xscom.c @@ -213,17 +213,17 @@ const MemoryRegionOps pnv_xscom_ops = { .endianness = DEVICE_BIG_ENDIAN, }; -void pnv_xscom_realize(PnvChip *chip, Error **errp) +void pnv_xscom_realize(PnvChip *chip, uint64_t size, Error **errp) { SysBusDevice *sbd = SYS_BUS_DEVICE(chip); char *name; name = g_strdup_printf("xscom-%x", chip->chip_id); memory_region_init_io(&chip->xscom_mmio, OBJECT(chip), &pnv_xscom_ops, - chip, name, PNV_XSCOM_SIZE); + chip, name, size); sysbus_init_mmio(sbd, &chip->xscom_mmio); - memory_region_init(&chip->xscom, OBJECT(chip), name, PNV_XSCOM_SIZE); + memory_region_init(&chip->xscom, OBJECT(chip), name, size); address_space_init(&chip->xscom_as, &chip->xscom, name); g_free(name); } @@ -265,12 +265,19 @@ static const char compat_p9[] = "ibm,power9-xscom\0ibm,xscom"; int pnv_dt_xscom(PnvChip *chip, void *fdt, int root_offset) { - uint64_t reg[] = { cpu_to_be64(PNV_XSCOM_BASE(chip)), - cpu_to_be64(PNV_XSCOM_SIZE) }; + uint64_t reg[2]; int xscom_offset; ForeachPopulateArgs args; char *name; + if (pnv_chip_is_power9(chip)) { + reg[0] = cpu_to_be64(PNV9_XSCOM_BASE(chip)); + reg[1] = cpu_to_be64(PNV9_XSCOM_SIZE); + } else { + reg[0] = cpu_to_be64(PNV_XSCOM_BASE(chip)); + reg[1] = cpu_to_be64(PNV_XSCOM_SIZE); + } + name = g_strdup_printf("xscom@%" PRIx64, be64_to_cpu(reg[0])); xscom_offset = fdt_add_subnode(fdt, root_offset, name); _FDT(xscom_offset); diff --git a/hw/ppc/ppc.c b/hw/ppc/ppc.c index 9d91e8481b..a9e508c496 100644 --- a/hw/ppc/ppc.c +++ b/hw/ppc/ppc.c @@ -80,9 +80,7 @@ void ppc_set_irq(PowerPCCPU *cpu, int n_IRQ, int level) } if (old_pending != env->pending_interrupts) { -#ifdef CONFIG_KVM kvmppc_set_interrupt(cpu, n_IRQ, level); -#endif } @@ -1036,10 +1034,7 @@ static void timebase_load(PPCTimebase *tb) CPU_FOREACH(cpu) { PowerPCCPU *pcpu = POWERPC_CPU(cpu); pcpu->env.tb_env->tb_offset = tb_off_adj; -#if defined(CONFIG_KVM) - kvm_set_one_reg(cpu, KVM_REG_PPC_TB_OFFSET, - &pcpu->env.tb_env->tb_offset); -#endif + kvmppc_set_reg_tb_offset(pcpu, pcpu->env.tb_env->tb_offset); } } diff --git a/hw/ppc/prep.c b/hw/ppc/prep.c index 2a8009e20b..a248ce480d 100644 --- a/hw/ppc/prep.c +++ b/hw/ppc/prep.c @@ -780,7 +780,6 @@ static void ibm_40p_init(MachineState *machine) fw_cfg_add_i32(fw_cfg, FW_CFG_PPC_IS_KVM, kvm_enabled()); if (kvm_enabled()) { -#ifdef CONFIG_KVM uint8_t *hypercall; fw_cfg_add_i32(fw_cfg, FW_CFG_PPC_TBFREQ, kvmppc_get_tbfreq()); @@ -788,7 +787,6 @@ static void ibm_40p_init(MachineState *machine) kvmppc_get_hypercall(env, hypercall, 16); fw_cfg_add_bytes(fw_cfg, FW_CFG_PPC_KVM_HC, hypercall, 16); fw_cfg_add_i32(fw_cfg, FW_CFG_PPC_KVM_PID, getpid()); -#endif } else { fw_cfg_add_i32(fw_cfg, FW_CFG_PPC_TBFREQ, NANOSECONDS_PER_SECOND); } diff --git a/hw/ppc/spapr_irq.c b/hw/ppc/spapr_irq.c index 3156daf093..ff3df0bbd8 100644 --- a/hw/ppc/spapr_irq.c +++ b/hw/ppc/spapr_irq.c @@ -62,7 +62,7 @@ void spapr_irq_msi_reset(SpaprMachineState *spapr) bitmap_clear(spapr->irq_map, 0, spapr->irq_map_nr); } -static void spapr_irq_init_device(SpaprMachineState *spapr, +static void spapr_irq_init_kvm(SpaprMachineState *spapr, SpaprIrq *irq, Error **errp) { MachineState *machine = MACHINE(spapr); @@ -88,8 +88,6 @@ static void spapr_irq_init_device(SpaprMachineState *spapr, error_prepend(&local_err, "kernel_irqchip allowed but unavailable: "); warn_report_err(local_err); } - - irq->init_emu(spapr, errp); } /* @@ -114,6 +112,8 @@ static void spapr_irq_init_xics(SpaprMachineState *spapr, int nr_irqs, } spapr->ics = ICS_BASE(obj); + + xics_spapr_init(spapr); } #define ICS_IRQ_FREE(ics, srcno) \ @@ -222,7 +222,7 @@ static void spapr_irq_reset_xics(SpaprMachineState *spapr, Error **errp) { Error *local_err = NULL; - spapr_irq_init_device(spapr, &spapr_irq_xics, &local_err); + spapr_irq_init_kvm(spapr, &spapr_irq_xics, &local_err); if (local_err) { error_propagate(errp, local_err); return; @@ -234,15 +234,10 @@ static const char *spapr_irq_get_nodename_xics(SpaprMachineState *spapr) return XICS_NODENAME; } -static void spapr_irq_init_emu_xics(SpaprMachineState *spapr, Error **errp) -{ - xics_spapr_init(spapr); -} - static void spapr_irq_init_kvm_xics(SpaprMachineState *spapr, Error **errp) { if (kvm_enabled()) { - xics_kvm_init(spapr, errp); + xics_kvm_connect(spapr, errp); } } @@ -266,7 +261,6 @@ SpaprIrq spapr_irq_xics = { .reset = spapr_irq_reset_xics, .set_irq = spapr_irq_set_irq_xics, .get_nodename = spapr_irq_get_nodename_xics, - .init_emu = spapr_irq_init_emu_xics, .init_kvm = spapr_irq_init_kvm_xics, }; @@ -384,7 +378,7 @@ static void spapr_irq_reset_xive(SpaprMachineState *spapr, Error **errp) spapr_xive_set_tctx_os_cam(spapr_cpu_state(cpu)->tctx); } - spapr_irq_init_device(spapr, &spapr_irq_xive, &local_err); + spapr_irq_init_kvm(spapr, &spapr_irq_xive, &local_err); if (local_err) { error_propagate(errp, local_err); return; @@ -410,11 +404,6 @@ static const char *spapr_irq_get_nodename_xive(SpaprMachineState *spapr) return spapr->xive->nodename; } -static void spapr_irq_init_emu_xive(SpaprMachineState *spapr, Error **errp) -{ - spapr_xive_init(spapr->xive, errp); -} - static void spapr_irq_init_kvm_xive(SpaprMachineState *spapr, Error **errp) { if (kvm_enabled()) { @@ -446,7 +435,6 @@ SpaprIrq spapr_irq_xive = { .reset = spapr_irq_reset_xive, .set_irq = spapr_irq_set_irq_xive, .get_nodename = spapr_irq_get_nodename_xive, - .init_emu = spapr_irq_init_emu_xive, .init_kvm = spapr_irq_init_kvm_xive, }; @@ -624,7 +612,6 @@ SpaprIrq spapr_irq_dual = { .reset = spapr_irq_reset_dual, .set_irq = spapr_irq_set_irq_dual, .get_nodename = spapr_irq_get_nodename_dual, - .init_emu = NULL, /* should not be used */ .init_kvm = NULL, /* should not be used */ }; @@ -668,6 +655,19 @@ static void spapr_irq_check(SpaprMachineState *spapr, Error **errp) return; } } + + /* + * On a POWER9 host, some older KVM XICS devices cannot be destroyed and + * re-created. Detect that early to avoid QEMU to exit later when the + * guest reboots. + */ + if (kvm_enabled() && + spapr->irq == &spapr_irq_dual && + machine_kernel_irqchip_required(machine) && + xics_kvm_has_broken_disconnect(spapr)) { + error_setg(errp, "KVM is too old to support ic-mode=dual,kernel-irqchip=on"); + return; + } } /* @@ -827,6 +827,5 @@ SpaprIrq spapr_irq_xics_legacy = { .reset = spapr_irq_reset_xics, .set_irq = spapr_irq_set_irq_xics, .get_nodename = spapr_irq_get_nodename_xics, - .init_emu = spapr_irq_init_emu_xics, .init_kvm = spapr_irq_init_kvm_xics, }; diff --git a/hw/ppc/spapr_pci.c b/hw/ppc/spapr_pci.c index 957ae88bbd..9003fe9010 100644 --- a/hw/ppc/spapr_pci.c +++ b/hw/ppc/spapr_pci.c @@ -1343,6 +1343,7 @@ static void spapr_dt_pci_device_cb(PCIBus *bus, PCIDevice *pdev, static int spapr_dt_pci_bus(SpaprPhbState *sphb, PCIBus *bus, void *fdt, int offset) { + Object *owner; PciWalkFdt cbinfo = { .fdt = fdt, .offset = offset, @@ -1356,15 +1357,20 @@ static int spapr_dt_pci_bus(SpaprPhbState *sphb, PCIBus *bus, _FDT(fdt_setprop_cell(fdt, offset, "#size-cells", RESOURCE_CELLS_SIZE)); - if (bus) { - pci_for_each_device_reverse(bus, pci_bus_num(bus), - spapr_dt_pci_device_cb, &cbinfo); - if (cbinfo.err) { - return cbinfo.err; - } + assert(bus); + pci_for_each_device_reverse(bus, pci_bus_num(bus), + spapr_dt_pci_device_cb, &cbinfo); + if (cbinfo.err) { + return cbinfo.err; } - ret = spapr_dt_drc(fdt, offset, OBJECT(bus->parent_dev), + if (pci_bus_is_root(bus)) { + owner = OBJECT(sphb); + } else { + owner = OBJECT(pci_bridge_get_device(bus)); + } + + ret = spapr_dt_drc(fdt, offset, owner, SPAPR_DR_CONNECTOR_TYPE_PCI); if (ret) { return ret; @@ -1782,6 +1788,12 @@ static void spapr_phb_unrealize(DeviceState *dev, Error **errp) memory_region_del_subregion(&sphb->iommu_root, &sphb->msiwindow); + /* + * An attached PCI device may have memory listeners, eg. VFIO PCI. We have + * unmapped all sections. Remove the listeners now, before destroying the + * address space. + */ + address_space_remove_listeners(&sphb->iommu_as); address_space_destroy(&sphb->iommu_as); qbus_set_hotplug_handler(BUS(phb->bus), NULL, &error_abort); @@ -1945,11 +1957,9 @@ static void spapr_phb_realize(DeviceState *dev, Error **errp) * For KVM we want to ensure that this memory is a full page so that * our memory slot is of page size granularity. */ -#ifdef CONFIG_KVM if (kvm_enabled()) { msi_window_size = getpagesize(); } -#endif memory_region_init_io(&sphb->msiwindow, OBJECT(sphb), &spapr_msi_ops, spapr, "msi", msi_window_size); diff --git a/hw/ppc/spapr_rtc.c b/hw/ppc/spapr_rtc.c index af1ef30a53..6cf0113b34 100644 --- a/hw/ppc/spapr_rtc.c +++ b/hw/ppc/spapr_rtc.c @@ -32,7 +32,7 @@ #include "sysemu/sysemu.h" #include "hw/ppc/spapr.h" #include "qapi/error.h" -#include "qapi/qapi-events-target.h" +#include "qapi/qapi-events-misc-target.h" #include "qemu/cutils.h" #include "qemu/module.h" diff --git a/hw/riscv/Makefile.objs b/hw/riscv/Makefile.objs index a65027304a..eb9d4f9ffc 100644 --- a/hw/riscv/Makefile.objs +++ b/hw/riscv/Makefile.objs @@ -1,3 +1,4 @@ +obj-y += boot.o obj-$(CONFIG_SPIKE) += riscv_htif.o obj-$(CONFIG_HART) += riscv_hart.o obj-$(CONFIG_SIFIVE_E) += sifive_e.o diff --git a/hw/riscv/boot.c b/hw/riscv/boot.c new file mode 100644 index 0000000000..ff023f42d0 --- /dev/null +++ b/hw/riscv/boot.c @@ -0,0 +1,105 @@ +/* + * QEMU RISC-V Boot Helper + * + * Copyright (c) 2017 SiFive, Inc. + * Copyright (c) 2019 Alistair Francis <alistair.francis@wdc.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "qemu/error-report.h" +#include "exec/cpu-defs.h" +#include "hw/loader.h" +#include "hw/riscv/boot.h" +#include "hw/boards.h" +#include "elf.h" + +#if defined(TARGET_RISCV32) +# define KERNEL_BOOT_ADDRESS 0x80400000 +#else +# define KERNEL_BOOT_ADDRESS 0x80200000 +#endif + +target_ulong riscv_load_firmware(const char *firmware_filename, + hwaddr firmware_load_addr) +{ + uint64_t firmware_entry, firmware_start, firmware_end; + + if (load_elf(firmware_filename, NULL, NULL, NULL, &firmware_entry, + &firmware_start, &firmware_end, 0, EM_RISCV, 1, 0) > 0) { + return firmware_entry; + } + + if (load_image_targphys_as(firmware_filename, firmware_load_addr, + ram_size, NULL) > 0) { + return firmware_load_addr; + } + + error_report("could not load firmware '%s'", firmware_filename); + exit(1); +} + +target_ulong riscv_load_kernel(const char *kernel_filename) +{ + uint64_t kernel_entry, kernel_high; + + if (load_elf(kernel_filename, NULL, NULL, NULL, + &kernel_entry, NULL, &kernel_high, 0, EM_RISCV, 1, 0) > 0) { + return kernel_entry; + } + + if (load_uimage_as(kernel_filename, &kernel_entry, NULL, NULL, + NULL, NULL, NULL) > 0) { + return kernel_entry; + } + + if (load_image_targphys_as(kernel_filename, KERNEL_BOOT_ADDRESS, + ram_size, NULL) > 0) { + return KERNEL_BOOT_ADDRESS; + } + + error_report("could not load kernel '%s'", kernel_filename); + exit(1); +} + +hwaddr riscv_load_initrd(const char *filename, uint64_t mem_size, + uint64_t kernel_entry, hwaddr *start) +{ + int size; + + /* + * We want to put the initrd far enough into RAM that when the + * kernel is uncompressed it will not clobber the initrd. However + * on boards without much RAM we must ensure that we still leave + * enough room for a decent sized initrd, and on boards with large + * amounts of RAM we must avoid the initrd being so far up in RAM + * that it is outside lowmem and inaccessible to the kernel. + * So for boards with less than 256MB of RAM we put the initrd + * halfway into RAM, and for boards with 256MB of RAM or more we put + * the initrd at 128MB. + */ + *start = kernel_entry + MIN(mem_size / 2, 128 * MiB); + + size = load_ramdisk(filename, *start, mem_size - *start); + if (size == -1) { + size = load_image_targphys(filename, *start, mem_size - *start); + if (size == -1) { + error_report("could not load ramdisk '%s'", filename); + exit(1); + } + } + + return *start + size; +} diff --git a/hw/riscv/sifive_e.c b/hw/riscv/sifive_e.c index 80ac56fa7d..d27f626529 100644 --- a/hw/riscv/sifive_e.c +++ b/hw/riscv/sifive_e.c @@ -44,10 +44,10 @@ #include "hw/riscv/sifive_prci.h" #include "hw/riscv/sifive_uart.h" #include "hw/riscv/sifive_e.h" +#include "hw/riscv/boot.h" #include "chardev/char.h" #include "sysemu/arch_init.h" #include "exec/address-spaces.h" -#include "elf.h" static const struct MemmapEntry { hwaddr base; @@ -74,19 +74,6 @@ static const struct MemmapEntry { [SIFIVE_E_DTIM] = { 0x80000000, 0x4000 } }; -static target_ulong load_kernel(const char *kernel_filename) -{ - uint64_t kernel_entry, kernel_high; - - if (load_elf(kernel_filename, NULL, NULL, NULL, - &kernel_entry, NULL, &kernel_high, - 0, EM_RISCV, 1, 0) < 0) { - error_report("could not load kernel '%s'", kernel_filename); - exit(1); - } - return kernel_entry; -} - static void sifive_mmio_emulate(MemoryRegion *parent, const char *name, uintptr_t offset, uintptr_t length) { @@ -131,7 +118,7 @@ static void riscv_sifive_e_init(MachineState *machine) memmap[SIFIVE_E_MROM].base, &address_space_memory); if (machine->kernel_filename) { - load_kernel(machine->kernel_filename); + riscv_load_kernel(machine->kernel_filename); } } @@ -158,17 +145,15 @@ static void riscv_sifive_e_soc_realize(DeviceState *dev, Error **errp) SiFiveESoCState *s = RISCV_E_SOC(dev); MemoryRegion *sys_mem = get_system_memory(); - MemoryRegion *xip_mem = g_new(MemoryRegion, 1); - MemoryRegion *mask_rom = g_new(MemoryRegion, 1); object_property_set_bool(OBJECT(&s->cpus), true, "realized", &error_abort); /* Mask ROM */ - memory_region_init_rom(mask_rom, NULL, "riscv.sifive.e.mrom", + memory_region_init_rom(&s->mask_rom, NULL, "riscv.sifive.e.mrom", memmap[SIFIVE_E_MROM].size, &error_fatal); memory_region_add_subregion(sys_mem, - memmap[SIFIVE_E_MROM].base, mask_rom); + memmap[SIFIVE_E_MROM].base, &s->mask_rom); /* MMIO */ s->plic = sifive_plic_create(memmap[SIFIVE_E_PLIC].base, @@ -228,10 +213,11 @@ static void riscv_sifive_e_soc_realize(DeviceState *dev, Error **errp) memmap[SIFIVE_E_PWM2].base, memmap[SIFIVE_E_PWM2].size); /* Flash memory */ - memory_region_init_ram(xip_mem, NULL, "riscv.sifive.e.xip", + memory_region_init_ram(&s->xip_mem, NULL, "riscv.sifive.e.xip", memmap[SIFIVE_E_XIP].size, &error_fatal); - memory_region_set_readonly(xip_mem, true); - memory_region_add_subregion(sys_mem, memmap[SIFIVE_E_XIP].base, xip_mem); + memory_region_set_readonly(&s->xip_mem, true); + memory_region_add_subregion(sys_mem, memmap[SIFIVE_E_XIP].base, + &s->xip_mem); } static void riscv_sifive_e_machine_init(MachineClass *mc) diff --git a/hw/riscv/sifive_prci.c b/hw/riscv/sifive_prci.c index fa136b5a9f..f406682c91 100644 --- a/hw/riscv/sifive_prci.c +++ b/hw/riscv/sifive_prci.c @@ -24,15 +24,18 @@ #include "target/riscv/cpu.h" #include "hw/riscv/sifive_prci.h" -/* currently implements enough to mock freedom-e-sdk BSP clock programming */ - static uint64_t sifive_prci_read(void *opaque, hwaddr addr, unsigned int size) { - if (addr == 0 /* PRCI_HFROSCCFG */) { - return 1 << 31; /* ROSC_RDY */ - } - if (addr == 8 /* PRCI_PLLCFG */) { - return 1 << 31; /* PLL_LOCK */ + SiFivePRCIState *s = opaque; + switch (addr) { + case SIFIVE_PRCI_HFROSCCFG: + return s->hfrosccfg; + case SIFIVE_PRCI_HFXOSCCFG: + return s->hfxosccfg; + case SIFIVE_PRCI_PLLCFG: + return s->pllcfg; + case SIFIVE_PRCI_PLLOUTDIV: + return s->plloutdiv; } hw_error("%s: read: addr=0x%x\n", __func__, (int)addr); return 0; @@ -41,7 +44,30 @@ static uint64_t sifive_prci_read(void *opaque, hwaddr addr, unsigned int size) static void sifive_prci_write(void *opaque, hwaddr addr, uint64_t val64, unsigned int size) { - /* discard writes */ + SiFivePRCIState *s = opaque; + switch (addr) { + case SIFIVE_PRCI_HFROSCCFG: + s->hfrosccfg = (uint32_t) val64; + /* OSC stays ready */ + s->hfrosccfg |= SIFIVE_PRCI_HFROSCCFG_RDY; + break; + case SIFIVE_PRCI_HFXOSCCFG: + s->hfxosccfg = (uint32_t) val64; + /* OSC stays ready */ + s->hfxosccfg |= SIFIVE_PRCI_HFXOSCCFG_RDY; + break; + case SIFIVE_PRCI_PLLCFG: + s->pllcfg = (uint32_t) val64; + /* PLL stays locked */ + s->pllcfg |= SIFIVE_PRCI_PLLCFG_LOCK; + break; + case SIFIVE_PRCI_PLLOUTDIV: + s->plloutdiv = (uint32_t) val64; + break; + default: + hw_error("%s: bad write: addr=0x%x v=0x%x\n", + __func__, (int)addr, (int)val64); + } } static const MemoryRegionOps sifive_prci_ops = { @@ -61,6 +87,13 @@ static void sifive_prci_init(Object *obj) memory_region_init_io(&s->mmio, obj, &sifive_prci_ops, s, TYPE_SIFIVE_PRCI, 0x8000); sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->mmio); + + s->hfrosccfg = (SIFIVE_PRCI_HFROSCCFG_RDY | SIFIVE_PRCI_HFROSCCFG_EN); + s->hfxosccfg = (SIFIVE_PRCI_HFROSCCFG_RDY | SIFIVE_PRCI_HFROSCCFG_EN); + s->pllcfg = (SIFIVE_PRCI_PLLCFG_REFSEL | SIFIVE_PRCI_PLLCFG_BYPASS | + SIFIVE_PRCI_PLLCFG_LOCK); + s->plloutdiv = SIFIVE_PRCI_PLLOUTDIV_DIV1; + } static const TypeInfo sifive_prci_info = { diff --git a/hw/riscv/sifive_u.c b/hw/riscv/sifive_u.c index 5ecc47cea3..4208671552 100644 --- a/hw/riscv/sifive_u.c +++ b/hw/riscv/sifive_u.c @@ -41,11 +41,11 @@ #include "hw/riscv/sifive_uart.h" #include "hw/riscv/sifive_prci.h" #include "hw/riscv/sifive_u.h" +#include "hw/riscv/boot.h" #include "chardev/char.h" #include "sysemu/arch_init.h" #include "sysemu/device_tree.h" #include "exec/address-spaces.h" -#include "elf.h" #include <libfdt.h> @@ -65,19 +65,6 @@ static const struct MemmapEntry { #define GEM_REVISION 0x10070109 -static target_ulong load_kernel(const char *kernel_filename) -{ - uint64_t kernel_entry, kernel_high; - - if (load_elf(kernel_filename, NULL, NULL, NULL, - &kernel_entry, NULL, &kernel_high, - 0, EM_RISCV, 1, 0) < 0) { - error_report("could not load kernel '%s'", kernel_filename); - exit(1); - } - return kernel_entry; -} - static void create_fdt(SiFiveUState *s, const struct MemmapEntry *memmap, uint64_t mem_size, const char *cmdline) { @@ -86,7 +73,7 @@ static void create_fdt(SiFiveUState *s, const struct MemmapEntry *memmap, uint32_t *cells; char *nodename; char ethclk_names[] = "pclk\0hclk\0tx_clk"; - uint32_t plic_phandle, ethclk_phandle; + uint32_t plic_phandle, ethclk_phandle, phandle = 1; fdt = s->fdt = create_device_tree(&s->fdt_size); if (!fdt) { @@ -121,6 +108,7 @@ static void create_fdt(SiFiveUState *s, const struct MemmapEntry *memmap, qemu_fdt_setprop_cell(fdt, "/cpus", "#address-cells", 0x1); for (cpu = s->soc.cpus.num_harts - 1; cpu >= 0; cpu--) { + int cpu_phandle = phandle++; nodename = g_strdup_printf("/cpus/cpu@%d", cpu); char *intc = g_strdup_printf("/cpus/cpu@%d/interrupt-controller", cpu); char *isa = riscv_isa_string(&s->soc.cpus.harts[cpu]); @@ -134,8 +122,8 @@ static void create_fdt(SiFiveUState *s, const struct MemmapEntry *memmap, qemu_fdt_setprop_cell(fdt, nodename, "reg", cpu); qemu_fdt_setprop_string(fdt, nodename, "device_type", "cpu"); qemu_fdt_add_subnode(fdt, intc); - qemu_fdt_setprop_cell(fdt, intc, "phandle", 1); - qemu_fdt_setprop_cell(fdt, intc, "linux,phandle", 1); + qemu_fdt_setprop_cell(fdt, intc, "phandle", cpu_phandle); + qemu_fdt_setprop_cell(fdt, intc, "linux,phandle", cpu_phandle); qemu_fdt_setprop_string(fdt, intc, "compatible", "riscv,cpu-intc"); qemu_fdt_setprop(fdt, intc, "interrupt-controller", NULL, 0); qemu_fdt_setprop_cell(fdt, intc, "#interrupt-cells", 1); @@ -167,6 +155,7 @@ static void create_fdt(SiFiveUState *s, const struct MemmapEntry *memmap, g_free(cells); g_free(nodename); + plic_phandle = phandle++; cells = g_new0(uint32_t, s->soc.cpus.num_harts * 4); for (cpu = 0; cpu < s->soc.cpus.num_harts; cpu++) { nodename = @@ -192,20 +181,21 @@ static void create_fdt(SiFiveUState *s, const struct MemmapEntry *memmap, qemu_fdt_setprop_string(fdt, nodename, "reg-names", "control"); qemu_fdt_setprop_cell(fdt, nodename, "riscv,max-priority", 7); qemu_fdt_setprop_cell(fdt, nodename, "riscv,ndev", 0x35); - qemu_fdt_setprop_cells(fdt, nodename, "phandle", 2); - qemu_fdt_setprop_cells(fdt, nodename, "linux,phandle", 2); + qemu_fdt_setprop_cells(fdt, nodename, "phandle", plic_phandle); + qemu_fdt_setprop_cells(fdt, nodename, "linux,phandle", plic_phandle); plic_phandle = qemu_fdt_get_phandle(fdt, nodename); g_free(cells); g_free(nodename); + ethclk_phandle = phandle++; nodename = g_strdup_printf("/soc/ethclk"); qemu_fdt_add_subnode(fdt, nodename); qemu_fdt_setprop_string(fdt, nodename, "compatible", "fixed-clock"); qemu_fdt_setprop_cell(fdt, nodename, "#clock-cells", 0x0); qemu_fdt_setprop_cell(fdt, nodename, "clock-frequency", SIFIVE_U_GEM_CLOCK_FREQ); - qemu_fdt_setprop_cell(fdt, nodename, "phandle", 3); - qemu_fdt_setprop_cell(fdt, nodename, "linux,phandle", 3); + qemu_fdt_setprop_cell(fdt, nodename, "phandle", ethclk_phandle); + qemu_fdt_setprop_cell(fdt, nodename, "linux,phandle", ethclk_phandle); ethclk_phandle = qemu_fdt_get_phandle(fdt, nodename); g_free(nodename); @@ -279,8 +269,12 @@ static void riscv_sifive_u_init(MachineState *machine) /* create device tree */ create_fdt(s, memmap, machine->ram_size, machine->kernel_cmdline); + if (machine->firmware) { + riscv_load_firmware(machine->firmware, memmap[SIFIVE_U_DRAM].base); + } + if (machine->kernel_filename) { - load_kernel(machine->kernel_filename); + riscv_load_kernel(machine->kernel_filename); } /* reset vector */ @@ -341,6 +335,8 @@ static void riscv_sifive_u_soc_realize(DeviceState *dev, Error **errp) MemoryRegion *system_memory = get_system_memory(); MemoryRegion *mask_rom = g_new(MemoryRegion, 1); qemu_irq plic_gpios[SIFIVE_U_PLIC_NUM_SOURCES]; + char *plic_hart_config; + size_t plic_hart_config_len; int i; Error *err = NULL; NICInfo *nd = &nd_table[0]; @@ -354,9 +350,21 @@ static void riscv_sifive_u_soc_realize(DeviceState *dev, Error **errp) memory_region_add_subregion(system_memory, memmap[SIFIVE_U_MROM].base, mask_rom); + /* create PLIC hart topology configuration string */ + plic_hart_config_len = (strlen(SIFIVE_U_PLIC_HART_CONFIG) + 1) * smp_cpus; + plic_hart_config = g_malloc0(plic_hart_config_len); + for (i = 0; i < smp_cpus; i++) { + if (i != 0) { + strncat(plic_hart_config, ",", plic_hart_config_len); + } + strncat(plic_hart_config, SIFIVE_U_PLIC_HART_CONFIG, + plic_hart_config_len); + plic_hart_config_len -= (strlen(SIFIVE_U_PLIC_HART_CONFIG) + 1); + } + /* MMIO */ s->plic = sifive_plic_create(memmap[SIFIVE_U_PLIC].base, - (char *)SIFIVE_U_PLIC_HART_CONFIG, + plic_hart_config, SIFIVE_U_PLIC_NUM_SOURCES, SIFIVE_U_PLIC_NUM_PRIORITIES, SIFIVE_U_PLIC_PRIORITY_BASE, diff --git a/hw/riscv/spike.c b/hw/riscv/spike.c index 5b33d4be3b..e68be00a5f 100644 --- a/hw/riscv/spike.c +++ b/hw/riscv/spike.c @@ -36,12 +36,12 @@ #include "hw/riscv/riscv_hart.h" #include "hw/riscv/sifive_clint.h" #include "hw/riscv/spike.h" +#include "hw/riscv/boot.h" #include "chardev/char.h" #include "sysemu/arch_init.h" #include "sysemu/device_tree.h" #include "sysemu/qtest.h" #include "exec/address-spaces.h" -#include "elf.h" #include <libfdt.h> @@ -54,19 +54,6 @@ static const struct MemmapEntry { [SPIKE_DRAM] = { 0x80000000, 0x0 }, }; -static target_ulong load_kernel(const char *kernel_filename) -{ - uint64_t kernel_entry, kernel_high; - - if (load_elf_ram_sym(kernel_filename, NULL, NULL, NULL, - &kernel_entry, NULL, &kernel_high, 0, EM_RISCV, 1, 0, - NULL, true, htif_symbol_callback) < 0) { - error_report("could not load kernel '%s'", kernel_filename); - exit(1); - } - return kernel_entry; -} - static void create_fdt(SpikeState *s, const struct MemmapEntry *memmap, uint64_t mem_size, const char *cmdline) { @@ -199,7 +186,7 @@ static void spike_board_init(MachineState *machine) mask_rom); if (machine->kernel_filename) { - load_kernel(machine->kernel_filename); + riscv_load_kernel(machine->kernel_filename); } /* reset vector */ @@ -287,7 +274,7 @@ static void spike_v1_10_0_board_init(MachineState *machine) mask_rom); if (machine->kernel_filename) { - load_kernel(machine->kernel_filename); + riscv_load_kernel(machine->kernel_filename); } /* reset vector */ @@ -372,7 +359,7 @@ static void spike_v1_09_1_board_init(MachineState *machine) mask_rom); if (machine->kernel_filename) { - load_kernel(machine->kernel_filename); + riscv_load_kernel(machine->kernel_filename); } /* reset vector */ diff --git a/hw/riscv/virt.c b/hw/riscv/virt.c index 84d94d0c42..d8181a4ff1 100644 --- a/hw/riscv/virt.c +++ b/hw/riscv/virt.c @@ -34,13 +34,13 @@ #include "hw/riscv/sifive_clint.h" #include "hw/riscv/sifive_test.h" #include "hw/riscv/virt.h" +#include "hw/riscv/boot.h" #include "chardev/char.h" #include "sysemu/arch_init.h" #include "sysemu/device_tree.h" #include "exec/address-spaces.h" #include "hw/pci/pci.h" #include "hw/pci-host/gpex.h" -#include "elf.h" #include <libfdt.h> @@ -61,47 +61,6 @@ static const struct MemmapEntry { [VIRT_PCIE_ECAM] = { 0x30000000, 0x10000000 }, }; -static target_ulong load_kernel(const char *kernel_filename) -{ - uint64_t kernel_entry, kernel_high; - - if (load_elf(kernel_filename, NULL, NULL, NULL, - &kernel_entry, NULL, &kernel_high, - 0, EM_RISCV, 1, 0) < 0) { - error_report("could not load kernel '%s'", kernel_filename); - exit(1); - } - return kernel_entry; -} - -static hwaddr load_initrd(const char *filename, uint64_t mem_size, - uint64_t kernel_entry, hwaddr *start) -{ - int size; - - /* We want to put the initrd far enough into RAM that when the - * kernel is uncompressed it will not clobber the initrd. However - * on boards without much RAM we must ensure that we still leave - * enough room for a decent sized initrd, and on boards with large - * amounts of RAM we must avoid the initrd being so far up in RAM - * that it is outside lowmem and inaccessible to the kernel. - * So for boards with less than 256MB of RAM we put the initrd - * halfway into RAM, and for boards with 256MB of RAM or more we put - * the initrd at 128MB. - */ - *start = kernel_entry + MIN(mem_size / 2, 128 * MiB); - - size = load_ramdisk(filename, *start, mem_size - *start); - if (size == -1) { - size = load_image_targphys(filename, *start, mem_size - *start); - if (size == -1) { - error_report("could not load ramdisk '%s'", filename); - exit(1); - } - } - return *start + size; -} - static void create_pcie_irq_map(void *fdt, char *nodename, uint32_t plic_phandle) { @@ -191,6 +150,7 @@ static void *create_fdt(RISCVVirtState *s, const struct MemmapEntry *memmap, for (cpu = s->soc.num_harts - 1; cpu >= 0; cpu--) { int cpu_phandle = phandle++; + int intc_phandle; nodename = g_strdup_printf("/cpus/cpu@%d", cpu); char *intc = g_strdup_printf("/cpus/cpu@%d/interrupt-controller", cpu); char *isa = riscv_isa_string(&s->soc.harts[cpu]); @@ -203,9 +163,12 @@ static void *create_fdt(RISCVVirtState *s, const struct MemmapEntry *memmap, qemu_fdt_setprop_string(fdt, nodename, "status", "okay"); qemu_fdt_setprop_cell(fdt, nodename, "reg", cpu); qemu_fdt_setprop_string(fdt, nodename, "device_type", "cpu"); + qemu_fdt_setprop_cell(fdt, nodename, "phandle", cpu_phandle); + qemu_fdt_setprop_cell(fdt, nodename, "linux,phandle", cpu_phandle); + intc_phandle = phandle++; qemu_fdt_add_subnode(fdt, intc); - qemu_fdt_setprop_cell(fdt, intc, "phandle", cpu_phandle); - qemu_fdt_setprop_cell(fdt, intc, "linux,phandle", cpu_phandle); + qemu_fdt_setprop_cell(fdt, intc, "phandle", intc_phandle); + qemu_fdt_setprop_cell(fdt, intc, "linux,phandle", intc_phandle); qemu_fdt_setprop_string(fdt, intc, "compatible", "riscv,cpu-intc"); qemu_fdt_setprop(fdt, intc, "interrupt-controller", NULL, 0); qemu_fdt_setprop_cell(fdt, intc, "#interrupt-cells", 1); @@ -214,6 +177,20 @@ static void *create_fdt(RISCVVirtState *s, const struct MemmapEntry *memmap, g_free(nodename); } + /* Add cpu-topology node */ + qemu_fdt_add_subnode(fdt, "/cpus/cpu-map"); + qemu_fdt_add_subnode(fdt, "/cpus/cpu-map/cluster0"); + for (cpu = s->soc.num_harts - 1; cpu >= 0; cpu--) { + char *core_nodename = g_strdup_printf("/cpus/cpu-map/cluster0/core%d", + cpu); + char *cpu_nodename = g_strdup_printf("/cpus/cpu@%d", cpu); + uint32_t intc_phandle = qemu_fdt_get_phandle(fdt, cpu_nodename); + qemu_fdt_add_subnode(fdt, core_nodename); + qemu_fdt_setprop_cell(fdt, core_nodename, "cpu", intc_phandle); + g_free(core_nodename); + g_free(cpu_nodename); + } + cells = g_new0(uint32_t, s->soc.num_harts * 4); for (cpu = 0; cpu < s->soc.num_harts; cpu++) { nodename = @@ -298,7 +275,7 @@ static void *create_fdt(RISCVVirtState *s, const struct MemmapEntry *memmap, qemu_fdt_setprop_string(fdt, nodename, "device_type", "pci"); qemu_fdt_setprop_cell(fdt, nodename, "linux,pci-domain", 0); qemu_fdt_setprop_cells(fdt, nodename, "bus-range", 0, - memmap[VIRT_PCIE_ECAM].base / + memmap[VIRT_PCIE_ECAM].size / PCIE_MMCFG_SIZE_MIN - 1); qemu_fdt_setprop(fdt, nodename, "dma-coherent", NULL, 0); qemu_fdt_setprop_cells(fdt, nodename, "reg", 0, memmap[VIRT_PCIE_ECAM].base, @@ -421,14 +398,18 @@ static void riscv_virt_board_init(MachineState *machine) memory_region_add_subregion(system_memory, memmap[VIRT_MROM].base, mask_rom); + if (machine->firmware) { + riscv_load_firmware(machine->firmware, memmap[VIRT_DRAM].base); + } + if (machine->kernel_filename) { - uint64_t kernel_entry = load_kernel(machine->kernel_filename); + uint64_t kernel_entry = riscv_load_kernel(machine->kernel_filename); if (machine->initrd_filename) { hwaddr start; - hwaddr end = load_initrd(machine->initrd_filename, - machine->ram_size, kernel_entry, - &start); + hwaddr end = riscv_load_initrd(machine->initrd_filename, + machine->ram_size, kernel_entry, + &start); qemu_fdt_setprop_cell(fdt, "/chosen", "linux,initrd-start", start); qemu_fdt_setprop_cell(fdt, "/chosen", "linux,initrd-end", diff --git a/hw/s390x/css.c b/hw/s390x/css.c index ad310b9f94..b92395f165 100644 --- a/hw/s390x/css.c +++ b/hw/s390x/css.c @@ -22,6 +22,7 @@ #include "trace.h" #include "hw/s390x/s390_flic.h" #include "hw/s390x/s390-virtio-ccw.h" +#include "hw/s390x/s390-ccw.h" typedef struct CrwContainer { CRW crw; @@ -1205,6 +1206,26 @@ static void sch_handle_start_func_virtual(SubchDev *sch) } +static void sch_handle_halt_func_passthrough(SubchDev *sch) +{ + int ret; + + ret = s390_ccw_halt(sch); + if (ret == -ENOSYS) { + sch_handle_halt_func(sch); + } +} + +static void sch_handle_clear_func_passthrough(SubchDev *sch) +{ + int ret; + + ret = s390_ccw_clear(sch); + if (ret == -ENOSYS) { + sch_handle_clear_func(sch); + } +} + static IOInstEnding sch_handle_start_func_passthrough(SubchDev *sch) { SCHIB *schib = &sch->curr_status; @@ -1244,11 +1265,9 @@ IOInstEnding do_subchannel_work_passthrough(SubchDev *sch) SCHIB *schib = &sch->curr_status; if (schib->scsw.ctrl & SCSW_FCTL_CLEAR_FUNC) { - /* TODO: Clear handling */ - sch_handle_clear_func(sch); + sch_handle_clear_func_passthrough(sch); } else if (schib->scsw.ctrl & SCSW_FCTL_HALT_FUNC) { - /* TODO: Halt handling */ - sch_handle_halt_func(sch); + sch_handle_halt_func_passthrough(sch); } else if (schib->scsw.ctrl & SCSW_FCTL_START_FUNC) { return sch_handle_start_func_passthrough(sch); } diff --git a/hw/s390x/s390-ccw.c b/hw/s390x/s390-ccw.c index 8403f0e3e9..22c6878b84 100644 --- a/hw/s390x/s390-ccw.c +++ b/hw/s390x/s390-ccw.c @@ -30,6 +30,26 @@ IOInstEnding s390_ccw_cmd_request(SubchDev *sch) return cdc->handle_request(sch); } +int s390_ccw_halt(SubchDev *sch) +{ + S390CCWDeviceClass *cdc = S390_CCW_DEVICE_GET_CLASS(sch->driver_data); + + if (!cdc->handle_halt) { + return -ENOSYS; + } + return cdc->handle_halt(sch); +} + +int s390_ccw_clear(SubchDev *sch) +{ + S390CCWDeviceClass *cdc = S390_CCW_DEVICE_GET_CLASS(sch->driver_data); + + if (!cdc->handle_clear) { + return -ENOSYS; + } + return cdc->handle_clear(sch); +} + static void s390_ccw_get_dev_info(S390CCWDevice *cdev, char *sysfsdev, Error **errp) diff --git a/hw/s390x/s390-skeys.c b/hw/s390x/s390-skeys.c index daac936698..e5bd92c0c7 100644 --- a/hw/s390x/s390-skeys.c +++ b/hw/s390x/s390-skeys.c @@ -14,7 +14,7 @@ #include "hw/boards.h" #include "hw/s390x/storage-keys.h" #include "qapi/error.h" -#include "qapi/qapi-commands-target.h" +#include "qapi/qapi-commands-misc-target.h" #include "qapi/qmp/qdict.h" #include "qemu/error-report.h" #include "sysemu/kvm.h" diff --git a/hw/sparc/sun4m.c b/hw/sparc/sun4m.c index 7e4f61fc3e..99f53e87f7 100644 --- a/hw/sparc/sun4m.c +++ b/hw/sparc/sun4m.c @@ -1406,6 +1406,7 @@ static void ss5_class_init(ObjectClass *oc, void *data) mc->is_default = 1; mc->default_boot_order = "c"; mc->default_cpu_type = SPARC_CPU_TYPE_NAME("Fujitsu-MB86904"); + mc->default_display = "tcx"; } static const TypeInfo ss5_type = { @@ -1424,6 +1425,7 @@ static void ss10_class_init(ObjectClass *oc, void *data) mc->max_cpus = 4; mc->default_boot_order = "c"; mc->default_cpu_type = SPARC_CPU_TYPE_NAME("TI-SuperSparc-II"); + mc->default_display = "tcx"; } static const TypeInfo ss10_type = { @@ -1442,6 +1444,7 @@ static void ss600mp_class_init(ObjectClass *oc, void *data) mc->max_cpus = 4; mc->default_boot_order = "c"; mc->default_cpu_type = SPARC_CPU_TYPE_NAME("TI-SuperSparc-II"); + mc->default_display = "tcx"; } static const TypeInfo ss600mp_type = { @@ -1460,6 +1463,7 @@ static void ss20_class_init(ObjectClass *oc, void *data) mc->max_cpus = 4; mc->default_boot_order = "c"; mc->default_cpu_type = SPARC_CPU_TYPE_NAME("TI-SuperSparc-II"); + mc->default_display = "tcx"; } static const TypeInfo ss20_type = { @@ -1477,6 +1481,7 @@ static void voyager_class_init(ObjectClass *oc, void *data) mc->block_default_type = IF_SCSI; mc->default_boot_order = "c"; mc->default_cpu_type = SPARC_CPU_TYPE_NAME("Fujitsu-MB86904"); + mc->default_display = "tcx"; } static const TypeInfo voyager_type = { @@ -1494,6 +1499,7 @@ static void ss_lx_class_init(ObjectClass *oc, void *data) mc->block_default_type = IF_SCSI; mc->default_boot_order = "c"; mc->default_cpu_type = SPARC_CPU_TYPE_NAME("TI-MicroSparc-I"); + mc->default_display = "tcx"; } static const TypeInfo ss_lx_type = { @@ -1511,6 +1517,7 @@ static void ss4_class_init(ObjectClass *oc, void *data) mc->block_default_type = IF_SCSI; mc->default_boot_order = "c"; mc->default_cpu_type = SPARC_CPU_TYPE_NAME("Fujitsu-MB86904"); + mc->default_display = "tcx"; } static const TypeInfo ss4_type = { @@ -1528,6 +1535,7 @@ static void scls_class_init(ObjectClass *oc, void *data) mc->block_default_type = IF_SCSI; mc->default_boot_order = "c"; mc->default_cpu_type = SPARC_CPU_TYPE_NAME("TI-MicroSparc-I"); + mc->default_display = "tcx"; } static const TypeInfo scls_type = { @@ -1545,6 +1553,7 @@ static void sbook_class_init(ObjectClass *oc, void *data) mc->block_default_type = IF_SCSI; mc->default_boot_order = "c"; mc->default_cpu_type = SPARC_CPU_TYPE_NAME("TI-MicroSparc-I"); + mc->default_display = "tcx"; } static const TypeInfo sbook_type = { diff --git a/hw/ssi/aspeed_smc.c b/hw/ssi/aspeed_smc.c index 9eda0d720b..81f2fb7f70 100644 --- a/hw/ssi/aspeed_smc.c +++ b/hw/ssi/aspeed_smc.c @@ -913,6 +913,7 @@ static const VMStateDescription vmstate_aspeed_smc = { static Property aspeed_smc_properties[] = { DEFINE_PROP_UINT32("num-cs", AspeedSMCState, num_cs, 1), + DEFINE_PROP_UINT64("sdram-base", AspeedSMCState, sdram_base, 0), DEFINE_PROP_END_OF_LIST(), }; diff --git a/hw/timer/Makefile.objs b/hw/timer/Makefile.objs index 0e9a4530f8..123d92c969 100644 --- a/hw/timer/Makefile.objs +++ b/hw/timer/Makefile.objs @@ -41,7 +41,7 @@ obj-$(CONFIG_MC146818RTC) += mc146818rtc.o obj-$(CONFIG_ALLWINNER_A10_PIT) += allwinner-a10-pit.o common-obj-$(CONFIG_STM32F2XX_TIMER) += stm32f2xx_timer.o -common-obj-$(CONFIG_ASPEED_SOC) += aspeed_timer.o +common-obj-$(CONFIG_ASPEED_SOC) += aspeed_timer.o aspeed_rtc.o common-obj-$(CONFIG_SUN4V_RTC) += sun4v-rtc.o common-obj-$(CONFIG_CMSDK_APB_TIMER) += cmsdk-apb-timer.o diff --git a/hw/timer/armv7m_systick.c b/hw/timer/armv7m_systick.c index a17317ce2f..94640743b5 100644 --- a/hw/timer/armv7m_systick.c +++ b/hw/timer/armv7m_systick.c @@ -75,11 +75,17 @@ static void systick_timer_tick(void *opaque) } } -static uint64_t systick_read(void *opaque, hwaddr addr, unsigned size) +static MemTxResult systick_read(void *opaque, hwaddr addr, uint64_t *data, + unsigned size, MemTxAttrs attrs) { SysTickState *s = opaque; uint32_t val; + if (attrs.user) { + /* Generate BusFault for unprivileged accesses */ + return MEMTX_ERROR; + } + switch (addr) { case 0x0: /* SysTick Control and Status. */ val = s->control; @@ -121,14 +127,21 @@ static uint64_t systick_read(void *opaque, hwaddr addr, unsigned size) } trace_systick_read(addr, val, size); - return val; + *data = val; + return MEMTX_OK; } -static void systick_write(void *opaque, hwaddr addr, - uint64_t value, unsigned size) +static MemTxResult systick_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size, + MemTxAttrs attrs) { SysTickState *s = opaque; + if (attrs.user) { + /* Generate BusFault for unprivileged accesses */ + return MEMTX_ERROR; + } + trace_systick_write(addr, value, size); switch (addr) { @@ -172,11 +185,12 @@ static void systick_write(void *opaque, hwaddr addr, qemu_log_mask(LOG_GUEST_ERROR, "SysTick: Bad write offset 0x%" HWADDR_PRIx "\n", addr); } + return MEMTX_OK; } static const MemoryRegionOps systick_ops = { - .read = systick_read, - .write = systick_write, + .read_with_attrs = systick_read, + .write_with_attrs = systick_write, .endianness = DEVICE_NATIVE_ENDIAN, .valid.min_access_size = 4, .valid.max_access_size = 4, diff --git a/hw/timer/aspeed_rtc.c b/hw/timer/aspeed_rtc.c new file mode 100644 index 0000000000..19f061c846 --- /dev/null +++ b/hw/timer/aspeed_rtc.c @@ -0,0 +1,180 @@ +/* + * ASPEED Real Time Clock + * Joel Stanley <joel@jms.id.au> + * + * Copyright 2019 IBM Corp + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "hw/timer/aspeed_rtc.h" +#include "qemu/log.h" +#include "qemu/timer.h" + +#include "trace.h" + +#define COUNTER1 (0x00 / 4) +#define COUNTER2 (0x04 / 4) +#define ALARM (0x08 / 4) +#define CONTROL (0x10 / 4) +#define ALARM_STATUS (0x14 / 4) + +#define RTC_UNLOCKED BIT(1) +#define RTC_ENABLED BIT(0) + +static void aspeed_rtc_calc_offset(AspeedRtcState *rtc) +{ + struct tm tm; + uint32_t year, cent; + uint32_t reg1 = rtc->reg[COUNTER1]; + uint32_t reg2 = rtc->reg[COUNTER2]; + + tm.tm_mday = (reg1 >> 24) & 0x1f; + tm.tm_hour = (reg1 >> 16) & 0x1f; + tm.tm_min = (reg1 >> 8) & 0x3f; + tm.tm_sec = (reg1 >> 0) & 0x3f; + + cent = (reg2 >> 16) & 0x1f; + year = (reg2 >> 8) & 0x7f; + tm.tm_mon = ((reg2 >> 0) & 0x0f) - 1; + tm.tm_year = year + (cent * 100) - 1900; + + rtc->offset = qemu_timedate_diff(&tm); +} + +static uint32_t aspeed_rtc_get_counter(AspeedRtcState *rtc, int r) +{ + uint32_t year, cent; + struct tm now; + + qemu_get_timedate(&now, rtc->offset); + + switch (r) { + case COUNTER1: + return (now.tm_mday << 24) | (now.tm_hour << 16) | + (now.tm_min << 8) | now.tm_sec; + case COUNTER2: + cent = (now.tm_year + 1900) / 100; + year = now.tm_year % 100; + return ((cent & 0x1f) << 16) | ((year & 0x7f) << 8) | + ((now.tm_mon + 1) & 0xf); + default: + g_assert_not_reached(); + } +} + +static uint64_t aspeed_rtc_read(void *opaque, hwaddr addr, + unsigned size) +{ + AspeedRtcState *rtc = opaque; + uint64_t val; + uint32_t r = addr >> 2; + + switch (r) { + case COUNTER1: + case COUNTER2: + if (rtc->reg[CONTROL] & RTC_ENABLED) { + rtc->reg[r] = aspeed_rtc_get_counter(rtc, r); + } + /* fall through */ + case CONTROL: + val = rtc->reg[r]; + break; + case ALARM: + case ALARM_STATUS: + default: + qemu_log_mask(LOG_UNIMP, "%s: 0x%" HWADDR_PRIx "\n", __func__, addr); + return 0; + } + + trace_aspeed_rtc_read(addr, val); + + return val; +} + +static void aspeed_rtc_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + AspeedRtcState *rtc = opaque; + uint32_t r = addr >> 2; + + switch (r) { + case COUNTER1: + case COUNTER2: + if (!(rtc->reg[CONTROL] & RTC_UNLOCKED)) { + break; + } + /* fall through */ + case CONTROL: + rtc->reg[r] = val; + aspeed_rtc_calc_offset(rtc); + break; + case ALARM: + case ALARM_STATUS: + default: + qemu_log_mask(LOG_UNIMP, "%s: 0x%" HWADDR_PRIx "\n", __func__, addr); + break; + } + trace_aspeed_rtc_write(addr, val); +} + +static void aspeed_rtc_reset(DeviceState *d) +{ + AspeedRtcState *rtc = ASPEED_RTC(d); + + rtc->offset = 0; + memset(rtc->reg, 0, sizeof(rtc->reg)); +} + +static const MemoryRegionOps aspeed_rtc_ops = { + .read = aspeed_rtc_read, + .write = aspeed_rtc_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static const VMStateDescription vmstate_aspeed_rtc = { + .name = TYPE_ASPEED_RTC, + .version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(reg, AspeedRtcState, 0x18), + VMSTATE_INT32(offset, AspeedRtcState), + VMSTATE_INT32(offset, AspeedRtcState), + VMSTATE_END_OF_LIST() + } +}; + +static void aspeed_rtc_realize(DeviceState *dev, Error **errp) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + AspeedRtcState *s = ASPEED_RTC(dev); + + sysbus_init_irq(sbd, &s->irq); + + memory_region_init_io(&s->iomem, OBJECT(s), &aspeed_rtc_ops, s, + "aspeed-rtc", 0x18ULL); + sysbus_init_mmio(sbd, &s->iomem); +} + +static void aspeed_rtc_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = aspeed_rtc_realize; + dc->vmsd = &vmstate_aspeed_rtc; + dc->reset = aspeed_rtc_reset; +} + +static const TypeInfo aspeed_rtc_info = { + .name = TYPE_ASPEED_RTC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(AspeedRtcState), + .class_init = aspeed_rtc_class_init, +}; + +static void aspeed_rtc_register_types(void) +{ + type_register_static(&aspeed_rtc_info); +} + +type_init(aspeed_rtc_register_types) diff --git a/hw/timer/aspeed_timer.c b/hw/timer/aspeed_timer.c index 2c3a4d0fe7..29cc5e8070 100644 --- a/hw/timer/aspeed_timer.c +++ b/hw/timer/aspeed_timer.c @@ -107,39 +107,49 @@ static inline uint64_t calculate_time(struct AspeedTimer *t, uint32_t ticks) return t->start + delta_ns; } +static inline uint32_t calculate_match(struct AspeedTimer *t, int i) +{ + return t->match[i] < t->reload ? t->match[i] : 0; +} + static uint64_t calculate_next(struct AspeedTimer *t) { - uint64_t next = 0; - uint32_t rate = calculate_rate(t); + uint64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + uint64_t next; - while (!next) { - /* We don't know the relationship between the values in the match - * registers, so sort using MAX/MIN/zero. We sort in that order as the - * timer counts down to zero. */ - uint64_t seq[] = { - calculate_time(t, MAX(t->match[0], t->match[1])), - calculate_time(t, MIN(t->match[0], t->match[1])), - calculate_time(t, 0), - }; - uint64_t reload_ns; - uint64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); - - if (now < seq[0]) { - next = seq[0]; - } else if (now < seq[1]) { - next = seq[1]; - } else if (now < seq[2]) { - next = seq[2]; - } else if (t->reload) { - reload_ns = muldiv64(t->reload, NANOSECONDS_PER_SECOND, rate); - t->start = now - ((now - t->start) % reload_ns); - } else { - /* no reload value, return 0 */ - break; - } + /* + * We don't know the relationship between the values in the match + * registers, so sort using MAX/MIN/zero. We sort in that order as + * the timer counts down to zero. + */ + + next = calculate_time(t, MAX(calculate_match(t, 0), calculate_match(t, 1))); + if (now < next) { + return next; + } + + next = calculate_time(t, MIN(calculate_match(t, 0), calculate_match(t, 1))); + if (now < next) { + return next; } - return next; + next = calculate_time(t, 0); + if (now < next) { + return next; + } + + /* We've missed all deadlines, fire interrupt and try again */ + timer_del(&t->timer); + + if (timer_overflow_interrupt(t)) { + t->level = !t->level; + qemu_set_irq(t->irq, t->level); + } + + next = MAX(MAX(calculate_match(t, 0), calculate_match(t, 1)), 0); + t->start = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + + return calculate_time(t, next); } static void aspeed_timer_mod(AspeedTimer *t) @@ -184,7 +194,11 @@ static uint64_t aspeed_timer_get_value(AspeedTimer *t, int reg) switch (reg) { case TIMER_REG_STATUS: - value = calculate_ticks(t, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)); + if (timer_enabled(t)) { + value = calculate_ticks(t, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)); + } else { + value = t->reload; + } break; case TIMER_REG_RELOAD: value = t->reload; @@ -261,7 +275,11 @@ static void aspeed_timer_set_value(AspeedTimerCtrlState *s, int timer, int reg, int64_t delta = (int64_t) value - (int64_t) calculate_ticks(t, now); uint32_t rate = calculate_rate(t); - t->start += muldiv64(delta, NANOSECONDS_PER_SECOND, rate); + if (delta >= 0) { + t->start += muldiv64(delta, NANOSECONDS_PER_SECOND, rate); + } else { + t->start -= muldiv64(-delta, NANOSECONDS_PER_SECOND, rate); + } aspeed_timer_mod(t); } break; diff --git a/hw/timer/mc146818rtc.c b/hw/timer/mc146818rtc.c index 0d79e000d2..ce4550b6f2 100644 --- a/hw/timer/mc146818rtc.c +++ b/hw/timer/mc146818rtc.c @@ -33,8 +33,8 @@ #include "sysemu/replay.h" #include "hw/timer/mc146818rtc.h" #include "qapi/error.h" -#include "qapi/qapi-commands-target.h" -#include "qapi/qapi-events-target.h" +#include "qapi/qapi-commands-misc-target.h" +#include "qapi/qapi-events-misc-target.h" #include "qapi/visitor.h" #include "exec/address-spaces.h" diff --git a/hw/timer/trace-events b/hw/timer/trace-events index dcaf3d6da6..db02a9142c 100644 --- a/hw/timer/trace-events +++ b/hw/timer/trace-events @@ -66,6 +66,10 @@ cmsdk_apb_dualtimer_read(uint64_t offset, uint64_t data, unsigned size) "CMSDK A cmsdk_apb_dualtimer_write(uint64_t offset, uint64_t data, unsigned size) "CMSDK APB dualtimer write: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u" cmsdk_apb_dualtimer_reset(void) "CMSDK APB dualtimer: reset" +# hw/timer/aspeed-rtc.c +aspeed_rtc_read(uint64_t addr, uint64_t value) "addr 0x%02" PRIx64 " value 0x%08" PRIx64 +aspeed_rtc_write(uint64_t addr, uint64_t value) "addr 0x%02" PRIx64 " value 0x%08" PRIx64 + # sun4v-rtc.c sun4v_rtc_read(uint64_t addr, uint64_t value) "read: addr 0x%" PRIx64 " value 0x%" PRIx64 sun4v_rtc_write(uint64_t addr, uint64_t value) "write: addr 0x%" PRIx64 " value 0x%" PRIx64 diff --git a/hw/vfio/ccw.c b/hw/vfio/ccw.c index 03a2becb3e..6d0296fe4d 100644 --- a/hw/vfio/ccw.c +++ b/hw/vfio/ccw.c @@ -2,9 +2,12 @@ * vfio based subchannel assignment support * * Copyright 2017 IBM Corp. + * Copyright 2019 Red Hat, Inc. + * * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com> * Xiao Feng Ren <renxiaof@linux.vnet.ibm.com> * Pierre Morel <pmorel@linux.vnet.ibm.com> + * Cornelia Huck <cohuck@redhat.com> * * This work is licensed under the terms of the GNU GPL, version 2 or (at * your option) any later version. See the COPYING file in the top-level @@ -33,6 +36,9 @@ struct VFIOCCWDevice { uint64_t io_region_size; uint64_t io_region_offset; struct ccw_io_region *io_region; + uint64_t async_cmd_region_size; + uint64_t async_cmd_region_offset; + struct ccw_cmd_region *async_cmd_region; EventNotifier io_notifier; bool force_orb_pfch; bool warned_orb_pfch; @@ -115,6 +121,87 @@ again: } } +static int vfio_ccw_handle_clear(SubchDev *sch) +{ + S390CCWDevice *cdev = sch->driver_data; + VFIOCCWDevice *vcdev = DO_UPCAST(VFIOCCWDevice, cdev, cdev); + struct ccw_cmd_region *region = vcdev->async_cmd_region; + int ret; + + if (!vcdev->async_cmd_region) { + /* Async command region not available, fall back to emulation */ + return -ENOSYS; + } + + memset(region, 0, sizeof(*region)); + region->command = VFIO_CCW_ASYNC_CMD_CSCH; + +again: + ret = pwrite(vcdev->vdev.fd, region, + vcdev->async_cmd_region_size, vcdev->async_cmd_region_offset); + if (ret != vcdev->async_cmd_region_size) { + if (errno == EAGAIN) { + goto again; + } + error_report("vfio-ccw: write cmd region failed with errno=%d", errno); + ret = -errno; + } else { + ret = region->ret_code; + } + switch (ret) { + case 0: + case -ENODEV: + case -EACCES: + return 0; + case -EFAULT: + default: + sch_gen_unit_exception(sch); + css_inject_io_interrupt(sch); + return 0; + } +} + +static int vfio_ccw_handle_halt(SubchDev *sch) +{ + S390CCWDevice *cdev = sch->driver_data; + VFIOCCWDevice *vcdev = DO_UPCAST(VFIOCCWDevice, cdev, cdev); + struct ccw_cmd_region *region = vcdev->async_cmd_region; + int ret; + + if (!vcdev->async_cmd_region) { + /* Async command region not available, fall back to emulation */ + return -ENOSYS; + } + + memset(region, 0, sizeof(*region)); + region->command = VFIO_CCW_ASYNC_CMD_HSCH; + +again: + ret = pwrite(vcdev->vdev.fd, region, + vcdev->async_cmd_region_size, vcdev->async_cmd_region_offset); + if (ret != vcdev->async_cmd_region_size) { + if (errno == EAGAIN) { + goto again; + } + error_report("vfio-ccw: write cmd region failed with errno=%d", errno); + ret = -errno; + } else { + ret = region->ret_code; + } + switch (ret) { + case 0: + case -EBUSY: + case -ENODEV: + case -EACCES: + return 0; + case -EFAULT: + default: + sch_gen_unit_exception(sch); + css_inject_io_interrupt(sch); + return 0; + } +} + static void vfio_ccw_reset(DeviceState *dev) { CcwDevice *ccw_dev = DO_UPCAST(CcwDevice, parent_obj, dev); @@ -198,9 +285,8 @@ static void vfio_ccw_register_io_notifier(VFIOCCWDevice *vcdev, Error **errp) { VFIODevice *vdev = &vcdev->vdev; struct vfio_irq_info *irq_info; - struct vfio_irq_set *irq_set; size_t argsz; - int32_t *pfd; + int fd; if (vdev->num_irqs < VFIO_CCW_IO_IRQ_INDEX + 1) { error_setg(errp, "vfio: unexpected number of io irqs %u", @@ -224,56 +310,32 @@ static void vfio_ccw_register_io_notifier(VFIOCCWDevice *vcdev, Error **errp) goto out_free_info; } - argsz = sizeof(*irq_set) + sizeof(*pfd); - irq_set = g_malloc0(argsz); - irq_set->argsz = argsz; - irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | - VFIO_IRQ_SET_ACTION_TRIGGER; - irq_set->index = VFIO_CCW_IO_IRQ_INDEX; - irq_set->start = 0; - irq_set->count = 1; - pfd = (int32_t *) &irq_set->data; - - *pfd = event_notifier_get_fd(&vcdev->io_notifier); - qemu_set_fd_handler(*pfd, vfio_ccw_io_notifier_handler, NULL, vcdev); - if (ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, irq_set)) { - error_setg(errp, "vfio: Failed to set up io notification"); - qemu_set_fd_handler(*pfd, NULL, NULL, vcdev); + fd = event_notifier_get_fd(&vcdev->io_notifier); + qemu_set_fd_handler(fd, vfio_ccw_io_notifier_handler, NULL, vcdev); + + if (vfio_set_irq_signaling(vdev, VFIO_CCW_IO_IRQ_INDEX, 0, + VFIO_IRQ_SET_ACTION_TRIGGER, fd, errp)) { + qemu_set_fd_handler(fd, NULL, NULL, vcdev); event_notifier_cleanup(&vcdev->io_notifier); } - g_free(irq_set); - out_free_info: g_free(irq_info); } static void vfio_ccw_unregister_io_notifier(VFIOCCWDevice *vcdev) { - struct vfio_irq_set *irq_set; - size_t argsz; - int32_t *pfd; - - argsz = sizeof(*irq_set) + sizeof(*pfd); - irq_set = g_malloc0(argsz); - irq_set->argsz = argsz; - irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | - VFIO_IRQ_SET_ACTION_TRIGGER; - irq_set->index = VFIO_CCW_IO_IRQ_INDEX; - irq_set->start = 0; - irq_set->count = 1; - pfd = (int32_t *) &irq_set->data; - *pfd = -1; + Error *err = NULL; - if (ioctl(vcdev->vdev.fd, VFIO_DEVICE_SET_IRQS, irq_set)) { - error_report("vfio: Failed to de-assign device io fd: %m"); + vfio_set_irq_signaling(&vcdev->vdev, VFIO_CCW_IO_IRQ_INDEX, 0, + VFIO_IRQ_SET_ACTION_TRIGGER, -1, &err); + if (err) { + error_reportf_err(err, VFIO_MSG_PREFIX, vcdev->vdev.name); } qemu_set_fd_handler(event_notifier_get_fd(&vcdev->io_notifier), NULL, NULL, vcdev); event_notifier_cleanup(&vcdev->io_notifier); - - g_free(irq_set); } static void vfio_ccw_get_region(VFIOCCWDevice *vcdev, Error **errp) @@ -288,9 +350,13 @@ static void vfio_ccw_get_region(VFIOCCWDevice *vcdev, Error **errp) return; } + /* + * We always expect at least the I/O region to be present. We also + * may have a variable number of regions governed by capabilities. + */ if (vdev->num_regions < VFIO_CCW_CONFIG_REGION_INDEX + 1) { - error_setg(errp, "vfio: Unexpected number of the I/O region %u", - vdev->num_regions); + error_setg(errp, "vfio: too few regions (%u), expected at least %u", + vdev->num_regions, VFIO_CCW_CONFIG_REGION_INDEX + 1); return; } @@ -310,11 +376,27 @@ static void vfio_ccw_get_region(VFIOCCWDevice *vcdev, Error **errp) vcdev->io_region_offset = info->offset; vcdev->io_region = g_malloc0(info->size); + /* check for the optional async command region */ + ret = vfio_get_dev_region_info(vdev, VFIO_REGION_TYPE_CCW, + VFIO_REGION_SUBTYPE_CCW_ASYNC_CMD, &info); + if (!ret) { + vcdev->async_cmd_region_size = info->size; + if (sizeof(*vcdev->async_cmd_region) != vcdev->async_cmd_region_size) { + error_setg(errp, "vfio: Unexpected size of the async cmd region"); + g_free(vcdev->io_region); + g_free(info); + return; + } + vcdev->async_cmd_region_offset = info->offset; + vcdev->async_cmd_region = g_malloc0(info->size); + } + g_free(info); } static void vfio_ccw_put_region(VFIOCCWDevice *vcdev) { + g_free(vcdev->async_cmd_region); g_free(vcdev->io_region); } @@ -487,6 +569,8 @@ static void vfio_ccw_class_init(ObjectClass *klass, void *data) dc->reset = vfio_ccw_reset; cdc->handle_request = vfio_ccw_handle_request; + cdc->handle_halt = vfio_ccw_handle_halt; + cdc->handle_clear = vfio_ccw_handle_clear; } static const TypeInfo vfio_ccw_info = { diff --git a/hw/vfio/pci.c b/hw/vfio/pci.c index ce3fe96efe..d7a4e1875c 100644 --- a/hw/vfio/pci.c +++ b/hw/vfio/pci.c @@ -551,9 +551,12 @@ static void vfio_msix_vector_release(PCIDevice *pdev, unsigned int nr) */ if (vector->virq >= 0) { int32_t fd = event_notifier_get_fd(&vector->interrupt); + Error *err = NULL; - vfio_set_irq_signaling(&vdev->vbasedev, VFIO_PCI_MSIX_IRQ_INDEX, nr, - VFIO_IRQ_SET_ACTION_TRIGGER, fd, NULL); + if (vfio_set_irq_signaling(&vdev->vbasedev, VFIO_PCI_MSIX_IRQ_INDEX, nr, + VFIO_IRQ_SET_ACTION_TRIGGER, fd, &err)) { + error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name); + } } } diff --git a/hw/virtio/Kconfig b/hw/virtio/Kconfig index e0452de4ba..3724ff8bac 100644 --- a/hw/virtio/Kconfig +++ b/hw/virtio/Kconfig @@ -29,3 +29,13 @@ config VIRTIO_CRYPTO bool default y depends on VIRTIO + +config VIRTIO_PMEM_SUPPORTED + bool + +config VIRTIO_PMEM + bool + default y + depends on VIRTIO + depends on VIRTIO_PMEM_SUPPORTED + select MEM_DEVICE diff --git a/hw/virtio/Makefile.objs b/hw/virtio/Makefile.objs index 5570ea8df8..964ce78607 100644 --- a/hw/virtio/Makefile.objs +++ b/hw/virtio/Makefile.objs @@ -12,6 +12,8 @@ common-obj-$(CONFIG_VIRTIO_MMIO) += virtio-mmio.o obj-$(CONFIG_VIRTIO_BALLOON) += virtio-balloon.o obj-$(CONFIG_VIRTIO_CRYPTO) += virtio-crypto.o obj-$(call land,$(CONFIG_VIRTIO_CRYPTO),$(CONFIG_VIRTIO_PCI)) += virtio-crypto-pci.o +obj-$(CONFIG_VIRTIO_PMEM) += virtio-pmem.o +common-obj-$(call land,$(CONFIG_VIRTIO_PMEM),$(CONFIG_VIRTIO_PCI)) += virtio-pmem-pci.o obj-$(CONFIG_VHOST_VSOCK) += vhost-vsock.o ifeq ($(CONFIG_VIRTIO_PCI),y) diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c index e6d5467e54..ce928f2429 100644 --- a/hw/virtio/virtio-pci.c +++ b/hw/virtio/virtio-pci.c @@ -1913,13 +1913,6 @@ static void virtio_pci_generic_class_init(ObjectClass *klass, void *data) dc->props = virtio_pci_generic_properties; } -/* Used when the generic type and the base type is the same */ -static void virtio_pci_generic_base_class_init(ObjectClass *klass, void *data) -{ - virtio_pci_base_class_init(klass, data); - virtio_pci_generic_class_init(klass, NULL); -} - static void virtio_pci_transitional_instance_init(Object *obj) { VirtIOPCIProxy *proxy = VIRTIO_PCI(obj); @@ -1938,15 +1931,15 @@ static void virtio_pci_non_transitional_instance_init(Object *obj) void virtio_pci_types_register(const VirtioPCIDeviceTypeInfo *t) { + char *base_name = NULL; TypeInfo base_type_info = { .name = t->base_name, .parent = t->parent ? t->parent : TYPE_VIRTIO_PCI, .instance_size = t->instance_size, .instance_init = t->instance_init, .class_size = t->class_size, - .class_init = virtio_pci_base_class_init, - .class_data = (void *)t, .abstract = true, + .interfaces = t->interfaces, }; TypeInfo generic_type_info = { .name = t->generic_name, @@ -1961,13 +1954,20 @@ void virtio_pci_types_register(const VirtioPCIDeviceTypeInfo *t) if (!base_type_info.name) { /* No base type -> register a single generic device type */ - base_type_info.name = t->generic_name; - base_type_info.class_init = virtio_pci_generic_base_class_init; - base_type_info.interfaces = generic_type_info.interfaces; - base_type_info.abstract = false; - generic_type_info.name = NULL; + /* use intermediate %s-base-type to add generic device props */ + base_name = g_strdup_printf("%s-base-type", t->generic_name); + base_type_info.name = base_name; + base_type_info.class_init = virtio_pci_generic_class_init; + + generic_type_info.parent = base_name; + generic_type_info.class_init = virtio_pci_base_class_init; + generic_type_info.class_data = (void *)t; + assert(!t->non_transitional_name); assert(!t->transitional_name); + } else { + base_type_info.class_init = virtio_pci_base_class_init; + base_type_info.class_data = (void *)t; } type_register(&base_type_info); @@ -2005,6 +2005,7 @@ void virtio_pci_types_register(const VirtioPCIDeviceTypeInfo *t) }; type_register(&transitional_type_info); } + g_free(base_name); } /* virtio-pci-bus */ diff --git a/hw/virtio/virtio-pci.h b/hw/virtio/virtio-pci.h index bfea2892a5..619d9098c1 100644 --- a/hw/virtio/virtio-pci.h +++ b/hw/virtio/virtio-pci.h @@ -252,6 +252,7 @@ typedef struct VirtioPCIDeviceTypeInfo { size_t class_size; void (*instance_init)(Object *obj); void (*class_init)(ObjectClass *klass, void *data); + InterfaceInfo *interfaces; } VirtioPCIDeviceTypeInfo; /* Register virtio-pci type(s). @t must be static. */ diff --git a/hw/virtio/virtio-pmem-pci.c b/hw/virtio/virtio-pmem-pci.c new file mode 100644 index 0000000000..8b2d0dbccc --- /dev/null +++ b/hw/virtio/virtio-pmem-pci.c @@ -0,0 +1,131 @@ +/* + * Virtio PMEM PCI device + * + * Copyright (C) 2018-2019 Red Hat, Inc. + * + * Authors: + * Pankaj Gupta <pagupta@redhat.com> + * David Hildenbrand <david@redhat.com> + * + * This work is licensed under the terms of the GNU GPL, version 2. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" + +#include "virtio-pmem-pci.h" +#include "hw/mem/memory-device.h" +#include "qapi/error.h" + +static void virtio_pmem_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) +{ + VirtIOPMEMPCI *pmem_pci = VIRTIO_PMEM_PCI(vpci_dev); + DeviceState *vdev = DEVICE(&pmem_pci->vdev); + + qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus)); + object_property_set_bool(OBJECT(vdev), true, "realized", errp); +} + +static void virtio_pmem_pci_set_addr(MemoryDeviceState *md, uint64_t addr, + Error **errp) +{ + object_property_set_uint(OBJECT(md), addr, VIRTIO_PMEM_ADDR_PROP, errp); +} + +static uint64_t virtio_pmem_pci_get_addr(const MemoryDeviceState *md) +{ + return object_property_get_uint(OBJECT(md), VIRTIO_PMEM_ADDR_PROP, + &error_abort); +} + +static MemoryRegion *virtio_pmem_pci_get_memory_region(MemoryDeviceState *md, + Error **errp) +{ + VirtIOPMEMPCI *pci_pmem = VIRTIO_PMEM_PCI(md); + VirtIOPMEM *pmem = VIRTIO_PMEM(&pci_pmem->vdev); + VirtIOPMEMClass *vpc = VIRTIO_PMEM_GET_CLASS(pmem); + + return vpc->get_memory_region(pmem, errp); +} + +static uint64_t virtio_pmem_pci_get_plugged_size(const MemoryDeviceState *md, + Error **errp) +{ + VirtIOPMEMPCI *pci_pmem = VIRTIO_PMEM_PCI(md); + VirtIOPMEM *pmem = VIRTIO_PMEM(&pci_pmem->vdev); + VirtIOPMEMClass *vpc = VIRTIO_PMEM_GET_CLASS(pmem); + MemoryRegion *mr = vpc->get_memory_region(pmem, errp); + + /* the plugged size corresponds to the region size */ + return mr ? 0 : memory_region_size(mr); +} + +static void virtio_pmem_pci_fill_device_info(const MemoryDeviceState *md, + MemoryDeviceInfo *info) +{ + VirtioPMEMDeviceInfo *vi = g_new0(VirtioPMEMDeviceInfo, 1); + VirtIOPMEMPCI *pci_pmem = VIRTIO_PMEM_PCI(md); + VirtIOPMEM *pmem = VIRTIO_PMEM(&pci_pmem->vdev); + VirtIOPMEMClass *vpc = VIRTIO_PMEM_GET_CLASS(pmem); + DeviceState *dev = DEVICE(md); + + if (dev->id) { + vi->has_id = true; + vi->id = g_strdup(dev->id); + } + + /* let the real device handle everything else */ + vpc->fill_device_info(pmem, vi); + + info->u.virtio_pmem.data = vi; + info->type = MEMORY_DEVICE_INFO_KIND_VIRTIO_PMEM; +} + +static void virtio_pmem_pci_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); + PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass); + MemoryDeviceClass *mdc = MEMORY_DEVICE_CLASS(klass); + + k->realize = virtio_pmem_pci_realize; + set_bit(DEVICE_CATEGORY_MISC, dc->categories); + pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET; + pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_PMEM; + pcidev_k->revision = VIRTIO_PCI_ABI_VERSION; + pcidev_k->class_id = PCI_CLASS_OTHERS; + + mdc->get_addr = virtio_pmem_pci_get_addr; + mdc->set_addr = virtio_pmem_pci_set_addr; + mdc->get_plugged_size = virtio_pmem_pci_get_plugged_size; + mdc->get_memory_region = virtio_pmem_pci_get_memory_region; + mdc->fill_device_info = virtio_pmem_pci_fill_device_info; +} + +static void virtio_pmem_pci_instance_init(Object *obj) +{ + VirtIOPMEMPCI *dev = VIRTIO_PMEM_PCI(obj); + + virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), + TYPE_VIRTIO_PMEM); +} + +static const VirtioPCIDeviceTypeInfo virtio_pmem_pci_info = { + .base_name = TYPE_VIRTIO_PMEM_PCI, + .generic_name = "virtio-pmem-pci", + .transitional_name = "virtio-pmem-pci-transitional", + .non_transitional_name = "virtio-pmem-pci-non-transitional", + .instance_size = sizeof(VirtIOPMEMPCI), + .instance_init = virtio_pmem_pci_instance_init, + .class_init = virtio_pmem_pci_class_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_MEMORY_DEVICE }, + { } + }, +}; + +static void virtio_pmem_pci_register_types(void) +{ + virtio_pci_types_register(&virtio_pmem_pci_info); +} +type_init(virtio_pmem_pci_register_types) diff --git a/hw/virtio/virtio-pmem-pci.h b/hw/virtio/virtio-pmem-pci.h new file mode 100644 index 0000000000..616abef093 --- /dev/null +++ b/hw/virtio/virtio-pmem-pci.h @@ -0,0 +1,34 @@ +/* + * Virtio PMEM PCI device + * + * Copyright (C) 2018-2019 Red Hat, Inc. + * + * Authors: + * Pankaj Gupta <pagupta@redhat.com> + * David Hildenbrand <david@redhat.com> + * + * This work is licensed under the terms of the GNU GPL, version 2. + * See the COPYING file in the top-level directory. + */ + +#ifndef QEMU_VIRTIO_PMEM_PCI_H +#define QEMU_VIRTIO_PMEM_PCI_H + +#include "hw/virtio/virtio-pci.h" +#include "hw/virtio/virtio-pmem.h" + +typedef struct VirtIOPMEMPCI VirtIOPMEMPCI; + +/* + * virtio-pmem-pci: This extends VirtioPCIProxy. + */ +#define TYPE_VIRTIO_PMEM_PCI "virtio-pmem-pci-base" +#define VIRTIO_PMEM_PCI(obj) \ + OBJECT_CHECK(VirtIOPMEMPCI, (obj), TYPE_VIRTIO_PMEM_PCI) + +struct VirtIOPMEMPCI { + VirtIOPCIProxy parent_obj; + VirtIOPMEM vdev; +}; + +#endif /* QEMU_VIRTIO_PMEM_PCI_H */ diff --git a/hw/virtio/virtio-pmem.c b/hw/virtio/virtio-pmem.c new file mode 100644 index 0000000000..adbfb603ab --- /dev/null +++ b/hw/virtio/virtio-pmem.c @@ -0,0 +1,189 @@ +/* + * Virtio PMEM device + * + * Copyright (C) 2018-2019 Red Hat, Inc. + * + * Authors: + * Pankaj Gupta <pagupta@redhat.com> + * David Hildenbrand <david@redhat.com> + * + * This work is licensed under the terms of the GNU GPL, version 2. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu-common.h" +#include "qemu/error-report.h" +#include "hw/virtio/virtio-pmem.h" +#include "hw/virtio/virtio-access.h" +#include "standard-headers/linux/virtio_ids.h" +#include "standard-headers/linux/virtio_pmem.h" +#include "block/aio.h" +#include "block/thread-pool.h" + +typedef struct VirtIODeviceRequest { + VirtQueueElement elem; + int fd; + VirtIOPMEM *pmem; + VirtIODevice *vdev; + struct virtio_pmem_req req; + struct virtio_pmem_resp resp; +} VirtIODeviceRequest; + +static int worker_cb(void *opaque) +{ + VirtIODeviceRequest *req_data = opaque; + int err = 0; + + /* flush raw backing image */ + err = fsync(req_data->fd); + if (err != 0) { + err = 1; + } + + virtio_stw_p(req_data->vdev, &req_data->resp.ret, err); + + return 0; +} + +static void done_cb(void *opaque, int ret) +{ + VirtIODeviceRequest *req_data = opaque; + int len = iov_from_buf(req_data->elem.in_sg, req_data->elem.in_num, 0, + &req_data->resp, sizeof(struct virtio_pmem_resp)); + + /* Callbacks are serialized, so no need to use atomic ops. */ + virtqueue_push(req_data->pmem->rq_vq, &req_data->elem, len); + virtio_notify((VirtIODevice *)req_data->pmem, req_data->pmem->rq_vq); + g_free(req_data); +} + +static void virtio_pmem_flush(VirtIODevice *vdev, VirtQueue *vq) +{ + VirtIODeviceRequest *req_data; + VirtIOPMEM *pmem = VIRTIO_PMEM(vdev); + HostMemoryBackend *backend = MEMORY_BACKEND(pmem->memdev); + ThreadPool *pool = aio_get_thread_pool(qemu_get_aio_context()); + + req_data = virtqueue_pop(vq, sizeof(VirtIODeviceRequest)); + if (!req_data) { + virtio_error(vdev, "virtio-pmem missing request data"); + return; + } + + if (req_data->elem.out_num < 1 || req_data->elem.in_num < 1) { + virtio_error(vdev, "virtio-pmem request not proper"); + g_free(req_data); + return; + } + req_data->fd = memory_region_get_fd(&backend->mr); + req_data->pmem = pmem; + req_data->vdev = vdev; + thread_pool_submit_aio(pool, worker_cb, req_data, done_cb, req_data); +} + +static void virtio_pmem_get_config(VirtIODevice *vdev, uint8_t *config) +{ + VirtIOPMEM *pmem = VIRTIO_PMEM(vdev); + struct virtio_pmem_config *pmemcfg = (struct virtio_pmem_config *) config; + + virtio_stq_p(vdev, &pmemcfg->start, pmem->start); + virtio_stq_p(vdev, &pmemcfg->size, memory_region_size(&pmem->memdev->mr)); +} + +static uint64_t virtio_pmem_get_features(VirtIODevice *vdev, uint64_t features, + Error **errp) +{ + return features; +} + +static void virtio_pmem_realize(DeviceState *dev, Error **errp) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + VirtIOPMEM *pmem = VIRTIO_PMEM(dev); + + if (!pmem->memdev) { + error_setg(errp, "virtio-pmem memdev not set"); + return; + } + + if (host_memory_backend_is_mapped(pmem->memdev)) { + char *path = object_get_canonical_path_component(OBJECT(pmem->memdev)); + error_setg(errp, "can't use already busy memdev: %s", path); + g_free(path); + return; + } + + host_memory_backend_set_mapped(pmem->memdev, true); + virtio_init(vdev, TYPE_VIRTIO_PMEM, VIRTIO_ID_PMEM, + sizeof(struct virtio_pmem_config)); + pmem->rq_vq = virtio_add_queue(vdev, 128, virtio_pmem_flush); +} + +static void virtio_pmem_unrealize(DeviceState *dev, Error **errp) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + VirtIOPMEM *pmem = VIRTIO_PMEM(dev); + + host_memory_backend_set_mapped(pmem->memdev, false); + virtio_cleanup(vdev); +} + +static void virtio_pmem_fill_device_info(const VirtIOPMEM *pmem, + VirtioPMEMDeviceInfo *vi) +{ + vi->memaddr = pmem->start; + vi->size = pmem->memdev ? memory_region_size(&pmem->memdev->mr) : 0; + vi->memdev = object_get_canonical_path(OBJECT(pmem->memdev)); +} + +static MemoryRegion *virtio_pmem_get_memory_region(VirtIOPMEM *pmem, + Error **errp) +{ + if (!pmem->memdev) { + error_setg(errp, "'%s' property must be set", VIRTIO_PMEM_MEMDEV_PROP); + return NULL; + } + + return &pmem->memdev->mr; +} + +static Property virtio_pmem_properties[] = { + DEFINE_PROP_UINT64(VIRTIO_PMEM_ADDR_PROP, VirtIOPMEM, start, 0), + DEFINE_PROP_LINK(VIRTIO_PMEM_MEMDEV_PROP, VirtIOPMEM, memdev, + TYPE_MEMORY_BACKEND, HostMemoryBackend *), + DEFINE_PROP_END_OF_LIST(), +}; + +static void virtio_pmem_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); + VirtIOPMEMClass *vpc = VIRTIO_PMEM_CLASS(klass); + + dc->props = virtio_pmem_properties; + + vdc->realize = virtio_pmem_realize; + vdc->unrealize = virtio_pmem_unrealize; + vdc->get_config = virtio_pmem_get_config; + vdc->get_features = virtio_pmem_get_features; + + vpc->fill_device_info = virtio_pmem_fill_device_info; + vpc->get_memory_region = virtio_pmem_get_memory_region; +} + +static TypeInfo virtio_pmem_info = { + .name = TYPE_VIRTIO_PMEM, + .parent = TYPE_VIRTIO_DEVICE, + .class_size = sizeof(VirtIOPMEMClass), + .class_init = virtio_pmem_class_init, + .instance_size = sizeof(VirtIOPMEM), +}; + +static void virtio_register_types(void) +{ + type_register_static(&virtio_pmem_info); +} + +type_init(virtio_register_types) diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c index e1e90fcfd6..18f9f4c372 100644 --- a/hw/virtio/virtio.c +++ b/hw/virtio/virtio.c @@ -1162,9 +1162,10 @@ int virtio_set_status(VirtIODevice *vdev, uint8_t val) } } } - vdev->started = val & VIRTIO_CONFIG_S_DRIVER_OK; - if (unlikely(vdev->start_on_kick && vdev->started)) { - vdev->start_on_kick = false; + + if ((vdev->status & VIRTIO_CONFIG_S_DRIVER_OK) != + (val & VIRTIO_CONFIG_S_DRIVER_OK)) { + virtio_set_started(vdev, val & VIRTIO_CONFIG_S_DRIVER_OK); } if (k->set_status) { @@ -1214,8 +1215,7 @@ void virtio_reset(void *opaque) k->reset(vdev); } - vdev->start_on_kick = (virtio_host_has_feature(vdev, VIRTIO_F_VERSION_1) && - !virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)); + vdev->start_on_kick = false; vdev->started = false; vdev->broken = false; vdev->guest_features = 0; @@ -1536,8 +1536,7 @@ static bool virtio_queue_notify_aio_vq(VirtQueue *vq) ret = vq->handle_aio_output(vdev, vq); if (unlikely(vdev->start_on_kick)) { - vdev->started = true; - vdev->start_on_kick = false; + virtio_set_started(vdev, true); } } @@ -1557,8 +1556,7 @@ static void virtio_queue_notify_vq(VirtQueue *vq) vq->handle_output(vdev, vq); if (unlikely(vdev->start_on_kick)) { - vdev->started = true; - vdev->start_on_kick = false; + virtio_set_started(vdev, true); } } } @@ -1576,11 +1574,10 @@ void virtio_queue_notify(VirtIODevice *vdev, int n) event_notifier_set(&vq->host_notifier); } else if (vq->handle_output) { vq->handle_output(vdev, vq); - } - if (unlikely(vdev->start_on_kick)) { - vdev->started = true; - vdev->start_on_kick = false; + if (unlikely(vdev->start_on_kick)) { + virtio_set_started(vdev, true); + } } } @@ -2069,14 +2066,21 @@ int virtio_set_features(VirtIODevice *vdev, uint64_t val) return -EINVAL; } ret = virtio_set_features_nocheck(vdev, val); - if (!ret && virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) { - /* VIRTIO_RING_F_EVENT_IDX changes the size of the caches. */ - int i; - for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { - if (vdev->vq[i].vring.num != 0) { - virtio_init_region_cache(vdev, i); + if (!ret) { + if (virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) { + /* VIRTIO_RING_F_EVENT_IDX changes the size of the caches. */ + int i; + for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { + if (vdev->vq[i].vring.num != 0) { + virtio_init_region_cache(vdev, i); + } } } + + if (!virtio_device_started(vdev, vdev->status) && + !virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) { + vdev->start_on_kick = true; + } } return ret; } @@ -2228,6 +2232,11 @@ int virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id) } } + if (!virtio_device_started(vdev, vdev->status) && + !virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) { + vdev->start_on_kick = true; + } + rcu_read_lock(); for (i = 0; i < num; i++) { if (vdev->vq[i].vring.desc) { @@ -2291,7 +2300,7 @@ static void virtio_vmstate_change(void *opaque, int running, RunState state) VirtIODevice *vdev = opaque; BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); - bool backend_run = running && vdev->started; + bool backend_run = running && virtio_device_started(vdev, vdev->status); vdev->vm_running = running; if (backend_run) { @@ -2330,8 +2339,7 @@ void virtio_init(VirtIODevice *vdev, const char *name, g_malloc0(sizeof(*vdev->vector_queues) * nvectors); } - vdev->start_on_kick = (virtio_host_has_feature(vdev, VIRTIO_F_VERSION_1) && - !virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)); + vdev->start_on_kick = false; vdev->started = false; vdev->device_id = device_id; vdev->status = 0; @@ -2669,6 +2677,7 @@ static void virtio_device_instance_finalize(Object *obj) static Property virtio_properties[] = { DEFINE_VIRTIO_COMMON_FEATURES(VirtIODevice, host_features), + DEFINE_PROP_BOOL("use-started", VirtIODevice, use_started, true), DEFINE_PROP_END_OF_LIST(), }; diff --git a/hw/watchdog/wdt_aspeed.c b/hw/watchdog/wdt_aspeed.c index 4a8409f0da..57fe24ae6b 100644 --- a/hw/watchdog/wdt_aspeed.c +++ b/hw/watchdog/wdt_aspeed.c @@ -44,6 +44,9 @@ #define WDT_RESTART_MAGIC 0x4755 +#define SCU_RESET_CONTROL1 (0x04 / 4) +#define SCU_RESET_SDRAM BIT(0) + static bool aspeed_wdt_is_enabled(const AspeedWDTState *s) { return s->regs[WDT_CTRL] & WDT_CTRL_ENABLE; @@ -222,6 +225,13 @@ static void aspeed_wdt_timer_expired(void *dev) { AspeedWDTState *s = ASPEED_WDT(dev); + /* Do not reset on SDRAM controller reset */ + if (s->scu->regs[SCU_RESET_CONTROL1] & SCU_RESET_SDRAM) { + timer_del(s->timer); + s->regs[WDT_CTRL] = 0; + return; + } + qemu_log_mask(CPU_LOG_RESET, "Watchdog timer expired.\n"); watchdog_perform_action(); timer_del(s->timer); @@ -233,6 +243,16 @@ static void aspeed_wdt_realize(DeviceState *dev, Error **errp) { SysBusDevice *sbd = SYS_BUS_DEVICE(dev); AspeedWDTState *s = ASPEED_WDT(dev); + Error *err = NULL; + Object *obj; + + obj = object_property_get_link(OBJECT(dev), "scu", &err); + if (!obj) { + error_propagate(errp, err); + error_prepend(errp, "required link 'scu' not found: "); + return; + } + s->scu = ASPEED_SCU(obj); if (!is_supported_silicon_rev(s->silicon_rev)) { error_setg(errp, "Unknown silicon revision: 0x%" PRIx32, |