diff options
61 files changed, 1316 insertions, 236 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index 606d9c08b5..caa5260e7c 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -363,6 +363,7 @@ M: Dmitry Solodkiy <d.solodkiy@samsung.com> L: qemu-arm@nongnu.org S: Maintained F: hw/*/exynos* +F: include/hw/arm/exynos4210.h Calxeda Highbank M: Rob Herring <robh@kernel.org> @@ -390,6 +391,7 @@ L: qemu-arm@nongnu.org S: Odd fixes F: hw/*/imx* F: hw/arm/kzm.c +F: include/hw/arm/fsl-imx31.h Integrator CP M: Peter Maydell <peter.maydell@linaro.org> @@ -432,6 +434,7 @@ F: hw/arm/spitz.c F: hw/arm/tosa.c F: hw/arm/z2.c F: hw/*/pxa2xx* +F: include/hw/arm/pxa.h Stellaris M: Peter Maydell <peter.maydell@linaro.org> @@ -768,6 +771,7 @@ OMAP M: Peter Maydell <peter.maydell@linaro.org> S: Maintained F: hw/*/omap* +F: include/hw/arm/omap.h IPack M: Alberto Garcia <berto@igalia.com> @@ -1241,6 +1245,7 @@ F: include/migration/ F: migration/ F: scripts/vmstate-static-checker.py F: tests/vmstate-static-checker-data/ +F: docs/migration.txt Seccomp M: Eduardo Otubo <eduardo.otubo@profitbricks.com> diff --git a/disas/mips.c b/disas/mips.c index 0e488d8578..249931b735 100644 --- a/disas/mips.c +++ b/disas/mips.c @@ -1405,6 +1405,10 @@ const struct mips_opcode mips_builtin_opcodes[] = {"cmp.sor.d", "D,S,T", 0x46a00019, 0xffe0003f, RD_S|RD_T|WR_D|FP_D, 0, I32R6}, {"cmp.sune.d", "D,S,T", 0x46a0001a, 0xffe0003f, RD_S|RD_T|WR_D|FP_D, 0, I32R6}, {"cmp.sne.d", "D,S,T", 0x46a0001b, 0xffe0003f, RD_S|RD_T|WR_D|FP_D, 0, I32R6}, +{"dvp", "", 0x41600024, 0xffffffff, TRAP, 0, I32R6}, +{"dvp", "t", 0x41600024, 0xffe0ffff, TRAP|WR_t, 0, I32R6}, +{"evp", "", 0x41600004, 0xffffffff, TRAP, 0, I32R6}, +{"evp", "t", 0x41600004, 0xffe0ffff, TRAP|WR_t, 0, I32R6}, /* MSA */ {"sll.b", "+d,+e,+f", 0x7800000d, 0xffe0003f, WR_VD|RD_VS|RD_VT, 0, MSA}, diff --git a/hw/arm/bcm2835_peripherals.c b/hw/arm/bcm2835_peripherals.c index 72467fd907..6d66fa0280 100644 --- a/hw/arm/bcm2835_peripherals.c +++ b/hw/arm/bcm2835_peripherals.c @@ -182,6 +182,13 @@ static void bcm2835_peripherals_realize(DeviceState *dev, Error **errp) sysbus_connect_irq(SYS_BUS_DEVICE(&s->sdhci), 0, qdev_get_gpio_in_named(DEVICE(&s->ic), BCM2835_IC_GPU_IRQ, INTERRUPT_ARASANSDIO)); + object_property_add_alias(OBJECT(s), "sd-bus", OBJECT(&s->sdhci), "sd-bus", + &err); + if (err) { + error_propagate(errp, err); + return; + } + } static void bcm2835_peripherals_class_init(ObjectClass *oc, void *data) diff --git a/hw/arm/bcm2836.c b/hw/arm/bcm2836.c index 15c7622ad1..032143905e 100644 --- a/hw/arm/bcm2836.c +++ b/hw/arm/bcm2836.c @@ -73,6 +73,13 @@ static void bcm2836_realize(DeviceState *dev, Error **errp) return; } + object_property_add_alias(OBJECT(s), "sd-bus", OBJECT(&s->peripherals), + "sd-bus", &err); + if (err) { + error_propagate(errp, err); + return; + } + sysbus_mmio_map_overlap(SYS_BUS_DEVICE(&s->peripherals), 0, BCM2836_PERI_BASE, 1); diff --git a/hw/arm/raspi.c b/hw/arm/raspi.c index 48d014c8d3..65822792fe 100644 --- a/hw/arm/raspi.c +++ b/hw/arm/raspi.c @@ -113,6 +113,10 @@ static void setup_boot(MachineState *machine, int version, size_t ram_size) static void raspi2_init(MachineState *machine) { RasPiState *s = g_new0(RasPiState, 1); + DriveInfo *di; + BlockBackend *blk; + BusState *bus; + DeviceState *carddev; object_initialize(&s->soc, sizeof(s->soc), TYPE_BCM2836); object_property_add_child(OBJECT(machine), "soc", OBJECT(&s->soc), @@ -133,6 +137,18 @@ static void raspi2_init(MachineState *machine) &error_abort); object_property_set_bool(OBJECT(&s->soc), true, "realized", &error_abort); + /* Create and plug in the SD cards */ + di = drive_get_next(IF_SD); + blk = di ? blk_by_legacy_dinfo(di) : NULL; + bus = qdev_get_child_bus(DEVICE(&s->soc), "sd-bus"); + if (bus == NULL) { + error_report("No SD bus found in SOC object"); + exit(1); + } + carddev = qdev_create(bus, TYPE_SD_CARD); + qdev_prop_set_drive(carddev, "drive", blk, &error_fatal); + object_property_set_bool(OBJECT(carddev), true, "realized", &error_fatal); + setup_boot(machine, 2, machine->ram_size); } diff --git a/hw/core/machine.c b/hw/core/machine.c index 6d1a0d8eeb..a8c4680b0c 100644 --- a/hw/core/machine.c +++ b/hw/core/machine.c @@ -312,6 +312,21 @@ static bool machine_get_suppress_vmdesc(Object *obj, Error **errp) return ms->suppress_vmdesc; } +static void machine_set_enforce_config_section(Object *obj, bool value, + Error **errp) +{ + MachineState *ms = MACHINE(obj); + + ms->enforce_config_section = value; +} + +static bool machine_get_enforce_config_section(Object *obj, Error **errp) +{ + MachineState *ms = MACHINE(obj); + + return ms->enforce_config_section; +} + static int error_on_sysbus_device(SysBusDevice *sbdev, void *opaque) { error_report("Option '-device %s' cannot be handled by this machine", @@ -467,6 +482,12 @@ static void machine_initfn(Object *obj) object_property_set_description(obj, "suppress-vmdesc", "Set on to disable self-describing migration", NULL); + object_property_add_bool(obj, "enforce-config-section", + machine_get_enforce_config_section, + machine_set_enforce_config_section, NULL); + object_property_set_description(obj, "enforce-config-section", + "Set on to enforce configuration section migration", + NULL); /* Register notifier when init is done for sysbus sanity checks */ ms->sysbus_notifier.notify = machine_init_notify; diff --git a/hw/gpio/pl061.c b/hw/gpio/pl061.c index 5ece8b068e..29dc7fc38e 100644 --- a/hw/gpio/pl061.c +++ b/hw/gpio/pl061.c @@ -60,6 +60,7 @@ typedef struct PL061State { qemu_irq irq; qemu_irq out[8]; const unsigned char *id; + uint32_t rsvd_start; /* reserved area: [rsvd_start, 0xfcc] */ } PL061State; static const VMStateDescription vmstate_pl061 = { @@ -152,12 +153,15 @@ static uint64_t pl061_read(void *opaque, hwaddr offset, { PL061State *s = (PL061State *)opaque; - if (offset >= 0xfd0 && offset < 0x1000) { - return s->id[(offset - 0xfd0) >> 2]; - } if (offset < 0x400) { return s->data & (offset >> 2); } + if (offset >= s->rsvd_start && offset <= 0xfcc) { + goto err_out; + } + if (offset >= 0xfd0 && offset < 0x1000) { + return s->id[(offset - 0xfd0) >> 2]; + } switch (offset) { case 0x400: /* Direction */ return s->dir; @@ -198,10 +202,12 @@ static uint64_t pl061_read(void *opaque, hwaddr offset, case 0x528: /* Analog mode select */ return s->amsel; default: - qemu_log_mask(LOG_GUEST_ERROR, - "pl061_read: Bad offset %x\n", (int)offset); - return 0; + break; } +err_out: + qemu_log_mask(LOG_GUEST_ERROR, + "pl061_read: Bad offset %x\n", (int)offset); + return 0; } static void pl061_write(void *opaque, hwaddr offset, @@ -216,6 +222,9 @@ static void pl061_write(void *opaque, hwaddr offset, pl061_update(s); return; } + if (offset >= s->rsvd_start) { + goto err_out; + } switch (offset) { case 0x400: /* Direction */ s->dir = value & 0xff; @@ -274,10 +283,13 @@ static void pl061_write(void *opaque, hwaddr offset, s->amsel = value & 0xff; break; default: - qemu_log_mask(LOG_GUEST_ERROR, - "pl061_write: Bad offset %x\n", (int)offset); + goto err_out; } pl061_update(s); + return; +err_out: + qemu_log_mask(LOG_GUEST_ERROR, + "pl061_write: Bad offset %x\n", (int)offset); } static void pl061_reset(DeviceState *dev) @@ -347,6 +359,7 @@ static void pl061_luminary_init(Object *obj) PL061State *s = PL061(obj); s->id = pl061_id_luminary; + s->rsvd_start = 0x52c; } static void pl061_init(Object *obj) @@ -354,6 +367,7 @@ static void pl061_init(Object *obj) PL061State *s = PL061(obj); s->id = pl061_id; + s->rsvd_start = 0x424; } static void pl061_class_init(ObjectClass *klass, void *data) diff --git a/hw/intc/xics.c b/hw/intc/xics.c index e66ae32881..213a370925 100644 --- a/hw/intc/xics.c +++ b/hw/intc/xics.c @@ -712,7 +712,7 @@ static int ics_find_free_block(ICSState *ics, int num, int alignnum) return -1; } -int xics_alloc(XICSState *icp, int src, int irq_hint, bool lsi) +int xics_alloc(XICSState *icp, int src, int irq_hint, bool lsi, Error **errp) { ICSState *ics = &icp->ics[src]; int irq; @@ -720,14 +720,14 @@ int xics_alloc(XICSState *icp, int src, int irq_hint, bool lsi) if (irq_hint) { assert(src == xics_find_source(icp, irq_hint)); if (!ICS_IRQ_FREE(ics, irq_hint - ics->offset)) { - trace_xics_alloc_failed_hint(src, irq_hint); + error_setg(errp, "can't allocate IRQ %d: already in use", irq_hint); return -1; } irq = irq_hint; } else { irq = ics_find_free_block(ics, 1, 1); if (irq < 0) { - trace_xics_alloc_failed_no_left(src); + error_setg(errp, "can't allocate IRQ: no IRQ left"); return -1; } irq += ics->offset; @@ -743,7 +743,8 @@ int xics_alloc(XICSState *icp, int src, int irq_hint, bool lsi) * Allocate block of consecutive IRQs, and return the number of the first IRQ in the block. * If align==true, aligns the first IRQ number to num. */ -int xics_alloc_block(XICSState *icp, int src, int num, bool lsi, bool align) +int xics_alloc_block(XICSState *icp, int src, int num, bool lsi, bool align, + Error **errp) { int i, first = -1; ICSState *ics = &icp->ics[src]; @@ -763,6 +764,10 @@ int xics_alloc_block(XICSState *icp, int src, int num, bool lsi, bool align) } else { first = ics_find_free_block(ics, num, 1); } + if (first < 0) { + error_setg(errp, "can't find a free %d-IRQ block", num); + return -1; + } if (first >= 0) { for (i = first; i < first + num; ++i) { diff --git a/hw/misc/macio/mac_dbdma.c b/hw/misc/macio/mac_dbdma.c index d81dea7b0d..6051f17dbd 100644 --- a/hw/misc/macio/mac_dbdma.c +++ b/hw/misc/macio/mac_dbdma.c @@ -557,11 +557,13 @@ void DBDMA_register_channel(void *dbdma, int nchan, qemu_irq irq, DBDMA_DPRINTF("DBDMA_register_channel 0x%x\n", nchan); + assert(rw); + assert(flush); + ch->irq = irq; ch->rw = rw; ch->flush = flush; ch->io.opaque = opaque; - ch->io.channel = ch; } static void @@ -775,6 +777,20 @@ static void dbdma_reset(void *opaque) memset(s->channels[i].regs, 0, DBDMA_SIZE); } +static void dbdma_unassigned_rw(DBDMA_io *io) +{ + DBDMA_channel *ch = io->channel; + qemu_log_mask(LOG_GUEST_ERROR, "%s: use of unassigned channel %d\n", + __func__, ch->channel); +} + +static void dbdma_unassigned_flush(DBDMA_io *io) +{ + DBDMA_channel *ch = io->channel; + qemu_log_mask(LOG_GUEST_ERROR, "%s: use of unassigned channel %d\n", + __func__, ch->channel); +} + void* DBDMA_init (MemoryRegion **dbdma_mem) { DBDMAState *s; @@ -784,8 +800,13 @@ void* DBDMA_init (MemoryRegion **dbdma_mem) for (i = 0; i < DBDMA_CHANNELS; i++) { DBDMA_io *io = &s->channels[i].io; + DBDMA_channel *ch = &s->channels[i]; qemu_iovec_init(&io->iov, 1); - s->channels[i].channel = i; + + ch->rw = dbdma_unassigned_rw; + ch->flush = dbdma_unassigned_flush; + ch->channel = i; + ch->io.channel = ch; } memory_region_init_io(&s->mem, NULL, &dbdma_ops, s, "dbdma", 0x1000); diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c index c119f55824..e9d4abf06a 100644 --- a/hw/ppc/spapr.c +++ b/hw/ppc/spapr.c @@ -2427,6 +2427,7 @@ static void spapr_machine_2_3_instance_options(MachineState *machine) spapr_machine_2_4_instance_options(machine); savevm_skip_section_footers(); global_state_set_optional(); + savevm_skip_configuration(); } static void spapr_machine_2_3_class_options(MachineClass *mc) @@ -2452,6 +2453,7 @@ DEFINE_SPAPR_MACHINE(2_3, "2.3", false); static void spapr_machine_2_2_instance_options(MachineState *machine) { spapr_machine_2_3_instance_options(machine); + machine->suppress_vmdesc = true; } static void spapr_machine_2_2_class_options(MachineClass *mc) diff --git a/hw/ppc/spapr_events.c b/hw/ppc/spapr_events.c index f5eac4b544..39f4682f95 100644 --- a/hw/ppc/spapr_events.c +++ b/hw/ppc/spapr_events.c @@ -588,7 +588,8 @@ out_no_events: void spapr_events_init(sPAPRMachineState *spapr) { QTAILQ_INIT(&spapr->pending_events); - spapr->check_exception_irq = xics_alloc(spapr->icp, 0, 0, false); + spapr->check_exception_irq = xics_alloc(spapr->icp, 0, 0, false, + &error_fatal); spapr->epow_notifier.notify = spapr_powerdown_req; qemu_register_powerdown_notifier(&spapr->epow_notifier); spapr_rtas_register(RTAS_CHECK_EXCEPTION, "check-exception", diff --git a/hw/ppc/spapr_pci.c b/hw/ppc/spapr_pci.c index cca9257fec..e8edad3ab7 100644 --- a/hw/ppc/spapr_pci.c +++ b/hw/ppc/spapr_pci.c @@ -275,11 +275,12 @@ static void rtas_ibm_change_msi(PowerPCCPU *cpu, sPAPRMachineState *spapr, unsigned int req_num = rtas_ld(args, 4); /* 0 == remove all */ unsigned int seq_num = rtas_ld(args, 5); unsigned int ret_intr_type; - unsigned int irq, max_irqs = 0, num = 0; + unsigned int irq, max_irqs = 0; sPAPRPHBState *phb = NULL; PCIDevice *pdev = NULL; spapr_pci_msi *msi; int *config_addr_key; + Error *err = NULL; switch (func) { case RTAS_CHANGE_MSI_FN: @@ -305,9 +306,10 @@ static void rtas_ibm_change_msi(PowerPCCPU *cpu, sPAPRMachineState *spapr, return; } + msi = (spapr_pci_msi *) g_hash_table_lookup(phb->msi, &config_addr); + /* Releasing MSIs */ if (!req_num) { - msi = (spapr_pci_msi *) g_hash_table_lookup(phb->msi, &config_addr); if (!msi) { trace_spapr_pci_msi("Releasing wrong config", config_addr); rtas_st(rets, 0, RTAS_OUT_HW_ERROR); @@ -316,10 +318,10 @@ static void rtas_ibm_change_msi(PowerPCCPU *cpu, sPAPRMachineState *spapr, xics_free(spapr->icp, msi->first_irq, msi->num); if (msi_present(pdev)) { - spapr_msi_setmsg(pdev, 0, false, 0, num); + spapr_msi_setmsg(pdev, 0, false, 0, 0); } if (msix_present(pdev)) { - spapr_msi_setmsg(pdev, 0, true, 0, num); + spapr_msi_setmsg(pdev, 0, true, 0, 0); } g_hash_table_remove(phb->msi, &config_addr); @@ -353,13 +355,20 @@ static void rtas_ibm_change_msi(PowerPCCPU *cpu, sPAPRMachineState *spapr, /* Allocate MSIs */ irq = xics_alloc_block(spapr->icp, 0, req_num, false, - ret_intr_type == RTAS_TYPE_MSI); - if (!irq) { - error_report("Cannot allocate MSIs for device %x", config_addr); + ret_intr_type == RTAS_TYPE_MSI, &err); + if (err) { + error_reportf_err(err, "Can't allocate MSIs for device %x: ", + config_addr); rtas_st(rets, 0, RTAS_OUT_HW_ERROR); return; } + /* Release previous MSIs */ + if (msi) { + xics_free(spapr->icp, msi->first_irq, msi->num); + g_hash_table_remove(phb->msi, &config_addr); + } + /* Setup MSI/MSIX vectors in the device (via cfgspace or MSIX BAR) */ spapr_msi_setmsg(pdev, SPAPR_PCI_MSI_WINDOW, ret_intr_type == RTAS_TYPE_MSIX, irq, req_num); @@ -1360,10 +1369,12 @@ static void spapr_phb_realize(DeviceState *dev, Error **errp) /* Initialize the LSI table */ for (i = 0; i < PCI_NUM_PINS; i++) { uint32_t irq; + Error *local_err = NULL; - irq = xics_alloc_block(spapr->icp, 0, 1, true, false); - if (!irq) { - error_setg(errp, "spapr_allocate_lsi failed"); + irq = xics_alloc_block(spapr->icp, 0, 1, true, false, &local_err); + if (local_err) { + error_propagate(errp, local_err); + error_prepend(errp, "can't allocate LSIs: "); return; } diff --git a/hw/ppc/spapr_rng.c b/hw/ppc/spapr_rng.c index 8484fcf547..a39d472b66 100644 --- a/hw/ppc/spapr_rng.c +++ b/hw/ppc/spapr_rng.c @@ -170,6 +170,7 @@ static void spapr_rng_class_init(ObjectClass *oc, void *data) dc->realize = spapr_rng_realize; set_bit(DEVICE_CATEGORY_MISC, dc->categories); dc->props = spapr_rng_properties; + dc->hotpluggable = false; } static const TypeInfo spapr_rng_info = { diff --git a/hw/ppc/spapr_vio.c b/hw/ppc/spapr_vio.c index ac6666a90b..0f61a550cb 100644 --- a/hw/ppc/spapr_vio.c +++ b/hw/ppc/spapr_vio.c @@ -431,6 +431,7 @@ static void spapr_vio_busdev_realize(DeviceState *qdev, Error **errp) VIOsPAPRDevice *dev = (VIOsPAPRDevice *)qdev; VIOsPAPRDeviceClass *pc = VIO_SPAPR_DEVICE_GET_CLASS(dev); char *id; + Error *local_err = NULL; if (dev->reg != -1) { /* @@ -463,9 +464,9 @@ static void spapr_vio_busdev_realize(DeviceState *qdev, Error **errp) dev->qdev.id = id; } - dev->irq = xics_alloc(spapr->icp, 0, dev->irq, false); - if (!dev->irq) { - error_setg(errp, "can't allocate IRQ"); + dev->irq = xics_alloc(spapr->icp, 0, dev->irq, false, &local_err); + if (local_err) { + error_propagate(errp, local_err); return; } diff --git a/hw/sd/sdhci.c b/hw/sd/sdhci.c index 73e7c87fbf..e087c17ad7 100644 --- a/hw/sd/sdhci.c +++ b/hw/sd/sdhci.c @@ -198,14 +198,13 @@ static void sdhci_reset(SDHCIState *s) * initialization */ memset(&s->sdmasysad, 0, (uintptr_t)&s->capareg - (uintptr_t)&s->sdmasysad); - if (!s->noeject_quirk) { - /* Reset other state based on current card insertion/readonly status */ - sdhci_set_inserted(dev, sdbus_get_inserted(&s->sdbus)); - sdhci_set_readonly(dev, sdbus_get_readonly(&s->sdbus)); - } + /* Reset other state based on current card insertion/readonly status */ + sdhci_set_inserted(dev, sdbus_get_inserted(&s->sdbus)); + sdhci_set_readonly(dev, sdbus_get_readonly(&s->sdbus)); s->data_count = 0; s->stopped_state = sdhc_not_stopped; + s->pending_insert_state = false; } static void sdhci_data_transfer(void *opaque); @@ -1097,6 +1096,13 @@ sdhci_write(void *opaque, hwaddr offset, uint64_t val, unsigned size) } else { s->norintsts &= ~SDHC_NIS_ERR; } + /* Quirk for Raspberry Pi: pending card insert interrupt + * appears when first enabled after power on */ + if ((s->norintstsen & SDHC_NISEN_INSERT) && s->pending_insert_state) { + assert(s->pending_insert_quirk); + s->norintsts |= SDHC_NIS_INSERT; + s->pending_insert_state = false; + } sdhci_update_irq(s); break; case SDHC_NORINTSIGEN: @@ -1183,6 +1189,24 @@ static void sdhci_uninitfn(SDHCIState *s) s->fifo_buffer = NULL; } +static bool sdhci_pending_insert_vmstate_needed(void *opaque) +{ + SDHCIState *s = opaque; + + return s->pending_insert_state; +} + +static const VMStateDescription sdhci_pending_insert_vmstate = { + .name = "sdhci/pending-insert", + .version_id = 1, + .minimum_version_id = 1, + .needed = sdhci_pending_insert_vmstate_needed, + .fields = (VMStateField[]) { + VMSTATE_BOOL(pending_insert_state, SDHCIState), + VMSTATE_END_OF_LIST() + }, +}; + const VMStateDescription sdhci_vmstate = { .name = "sdhci", .version_id = 1, @@ -1217,7 +1241,11 @@ const VMStateDescription sdhci_vmstate = { VMSTATE_TIMER_PTR(insert_timer, SDHCIState), VMSTATE_TIMER_PTR(transfer_timer, SDHCIState), VMSTATE_END_OF_LIST() - } + }, + .subsections = (const VMStateDescription*[]) { + &sdhci_pending_insert_vmstate, + NULL + }, }; /* Capabilities registers provide information on supported features of this @@ -1275,7 +1303,8 @@ static Property sdhci_sysbus_properties[] = { DEFINE_PROP_UINT32("capareg", SDHCIState, capareg, SDHC_CAPAB_REG_DEFAULT), DEFINE_PROP_UINT32("maxcurr", SDHCIState, maxcurr, 0), - DEFINE_PROP_BOOL("noeject-quirk", SDHCIState, noeject_quirk, false), + DEFINE_PROP_BOOL("pending-insert-quirk", SDHCIState, pending_insert_quirk, + false), DEFINE_PROP_END_OF_LIST(), }; @@ -1303,6 +1332,10 @@ static void sdhci_sysbus_realize(DeviceState *dev, Error ** errp) memory_region_init_io(&s->iomem, OBJECT(s), &sdhci_mmio_ops, s, "sdhci", SDHC_REGISTERS_MAP_SIZE); sysbus_init_mmio(sbd, &s->iomem); + + if (s->pending_insert_quirk) { + s->pending_insert_state = true; + } } static void sdhci_sysbus_class_init(ObjectClass *klass, void *data) diff --git a/include/hw/boards.h b/include/hw/boards.h index de3b3bdafd..b5d7eae3f3 100644 --- a/include/hw/boards.h +++ b/include/hw/boards.h @@ -127,6 +127,7 @@ struct MachineState { char *firmware; bool iommu; bool suppress_vmdesc; + bool enforce_config_section; ram_addr_t ram_size; ram_addr_t maxram_size; diff --git a/include/hw/ppc/xics.h b/include/hw/ppc/xics.h index 355a96623c..f60b06ae82 100644 --- a/include/hw/ppc/xics.h +++ b/include/hw/ppc/xics.h @@ -161,8 +161,9 @@ struct ICSIRQState { qemu_irq xics_get_qirq(XICSState *icp, int irq); void xics_set_irq_type(XICSState *icp, int irq, bool lsi); -int xics_alloc(XICSState *icp, int src, int irq_hint, bool lsi); -int xics_alloc_block(XICSState *icp, int src, int num, bool lsi, bool align); +int xics_alloc(XICSState *icp, int src, int irq_hint, bool lsi, Error **errp); +int xics_alloc_block(XICSState *icp, int src, int num, bool lsi, bool align, + Error **errp); void xics_free(XICSState *icp, int irq, int num); void xics_cpu_setup(XICSState *icp, PowerPCCPU *cpu); diff --git a/include/hw/sd/sdhci.h b/include/hw/sd/sdhci.h index 607a83e855..0f0c3f1e64 100644 --- a/include/hw/sd/sdhci.h +++ b/include/hw/sd/sdhci.h @@ -76,7 +76,8 @@ typedef struct SDHCIState { uint32_t buf_maxsz; uint16_t data_count; /* current element in FIFO buffer */ uint8_t stopped_state;/* Current SDHC state */ - bool noeject_quirk;/* Quirk to disable card insert/remove interrupts */ + bool pending_insert_quirk;/* Quirk for Raspberry Pi card insert int */ + bool pending_insert_state; /* Buffer Data Port Register - virtual access point to R and W buffers */ /* Software Reset Register - always reads as 0 */ /* Force Event Auto CMD12 Error Interrupt Reg - write only */ diff --git a/include/migration/migration.h b/include/migration/migration.h index 85b6026d10..ac2c12c2a5 100644 --- a/include/migration/migration.h +++ b/include/migration/migration.h @@ -104,6 +104,8 @@ struct MigrationIncomingState { QemuMutex rp_mutex; /* We send replies from multiple threads */ void *postcopy_tmp_page; + QEMUBH *bh; + int state; /* See savevm.c */ LoadStateEntry_Head loadvm_handlers; diff --git a/include/migration/vmstate.h b/include/migration/vmstate.h index 7246f29afe..84ee355ceb 100644 --- a/include/migration/vmstate.h +++ b/include/migration/vmstate.h @@ -88,21 +88,101 @@ struct VMStateInfo { }; enum VMStateFlags { + /* Ignored */ VMS_SINGLE = 0x001, + + /* The struct member at opaque + VMStateField.offset is a pointer + * to the actual field (e.g. struct a { uint8_t *b; + * }). Dereference the pointer before using it as basis for + * further pointer arithmetic (see e.g. VMS_ARRAY). Does not + * affect the meaning of VMStateField.num_offset or + * VMStateField.size_offset; see VMS_VARRAY* and VMS_VBUFFER for + * those. */ VMS_POINTER = 0x002, + + /* The field is an array of fixed size. VMStateField.num contains + * the number of entries in the array. The size of each entry is + * given by VMStateField.size and / or opaque + + * VMStateField.size_offset; see VMS_VBUFFER and + * VMS_MULTIPLY. Each array entry will be processed individually + * (VMStateField.info.get()/put() if VMS_STRUCT is not set, + * recursion into VMStateField.vmsd if VMS_STRUCT is set). May not + * be combined with VMS_VARRAY*. */ VMS_ARRAY = 0x004, + + /* The field is itself a struct, containing one or more + * fields. Recurse into VMStateField.vmsd. Most useful in + * combination with VMS_ARRAY / VMS_VARRAY*, recursing into each + * array entry. */ VMS_STRUCT = 0x008, - VMS_VARRAY_INT32 = 0x010, /* Array with size in int32_t field*/ - VMS_BUFFER = 0x020, /* static sized buffer */ + + /* The field is an array of variable size. The int32_t at opaque + + * VMStateField.num_offset contains the number of entries in the + * array. See the VMS_ARRAY description regarding array handling + * in general. May not be combined with VMS_ARRAY or any other + * VMS_VARRAY*. */ + VMS_VARRAY_INT32 = 0x010, + + /* Ignored */ + VMS_BUFFER = 0x020, + + /* The field is a (fixed-size or variable-size) array of pointers + * (e.g. struct a { uint8_t *b[]; }). Dereference each array entry + * before using it. Note: Does not imply any one of VMS_ARRAY / + * VMS_VARRAY*; these need to be set explicitly. */ VMS_ARRAY_OF_POINTER = 0x040, - VMS_VARRAY_UINT16 = 0x080, /* Array with size in uint16_t field */ - VMS_VBUFFER = 0x100, /* Buffer with size in int32_t field */ - VMS_MULTIPLY = 0x200, /* multiply "size" field by field_size */ - VMS_VARRAY_UINT8 = 0x400, /* Array with size in uint8_t field*/ - VMS_VARRAY_UINT32 = 0x800, /* Array with size in uint32_t field*/ - VMS_MUST_EXIST = 0x1000, /* Field must exist in input */ - VMS_ALLOC = 0x2000, /* Alloc a buffer on the destination */ - VMS_MULTIPLY_ELEMENTS = 0x4000, /* multiply varray size by field->num */ + + /* The field is an array of variable size. The uint16_t at opaque + * + VMStateField.num_offset (subject to VMS_MULTIPLY_ELEMENTS) + * contains the number of entries in the array. See the VMS_ARRAY + * description regarding array handling in general. May not be + * combined with VMS_ARRAY or any other VMS_VARRAY*. */ + VMS_VARRAY_UINT16 = 0x080, + + /* The size of the individual entries (a single array entry if + * VMS_ARRAY or any of VMS_VARRAY* are set, or the field itself if + * neither is set) is variable (i.e. not known at compile-time), + * but the same for all entries. Use the int32_t at opaque + + * VMStateField.size_offset (subject to VMS_MULTIPLY) to determine + * the size of each (and every) entry. */ + VMS_VBUFFER = 0x100, + + /* Multiply the entry size given by the int32_t at opaque + + * VMStateField.size_offset (see VMS_VBUFFER description) with + * VMStateField.size to determine the number of bytes to be + * allocated. Only valid in combination with VMS_VBUFFER. */ + VMS_MULTIPLY = 0x200, + + /* The field is an array of variable size. The uint8_t at opaque + + * VMStateField.num_offset (subject to VMS_MULTIPLY_ELEMENTS) + * contains the number of entries in the array. See the VMS_ARRAY + * description regarding array handling in general. May not be + * combined with VMS_ARRAY or any other VMS_VARRAY*. */ + VMS_VARRAY_UINT8 = 0x400, + + /* The field is an array of variable size. The uint32_t at opaque + * + VMStateField.num_offset (subject to VMS_MULTIPLY_ELEMENTS) + * contains the number of entries in the array. See the VMS_ARRAY + * description regarding array handling in general. May not be + * combined with VMS_ARRAY or any other VMS_VARRAY*. */ + VMS_VARRAY_UINT32 = 0x800, + + /* Fail loading the serialised VM state if this field is missing + * from the input. */ + VMS_MUST_EXIST = 0x1000, + + /* When loading serialised VM state, allocate memory for the + * (entire) field. Only valid in combination with + * VMS_POINTER. Note: Not all combinations with other flags are + * currently supported, e.g. VMS_ALLOC|VMS_ARRAY_OF_POINTER won't + * cause the individual entries to be allocated. */ + VMS_ALLOC = 0x2000, + + /* Multiply the number of entries given by the integer at opaque + + * VMStateField.num_offset (see VMS_VARRAY*) with VMStateField.num + * to determine the number of entries in the array. Only valid in + * combination with one of VMS_VARRAY*. */ + VMS_MULTIPLY_ELEMENTS = 0x4000, }; typedef struct { diff --git a/linux-user/arm/nwfpe/fpa11.h b/linux-user/arm/nwfpe/fpa11.h index 7e114eee8a..0b072843da 100644 --- a/linux-user/arm/nwfpe/fpa11.h +++ b/linux-user/arm/nwfpe/fpa11.h @@ -105,7 +105,7 @@ static inline void writeRegister(unsigned int x, unsigned int y) static inline void writeConditionCodes(unsigned int x) { - cpsr_write(user_registers,x,CPSR_NZCV); + cpsr_write(user_registers, x, CPSR_NZCV, CPSRWriteByInstr); } #define ARM_REG_PC 15 diff --git a/linux-user/main.c b/linux-user/main.c index 2a692e0f0b..700724effe 100644 --- a/linux-user/main.c +++ b/linux-user/main.c @@ -513,7 +513,7 @@ static void arm_kernel_cmpxchg64_helper(CPUARMState *env) env->regs[0] = -1; cpsr &= ~CPSR_C; } - cpsr_write(env, cpsr, CPSR_C); + cpsr_write(env, cpsr, CPSR_C, CPSRWriteByInstr); end_exclusive(); return; @@ -562,7 +562,7 @@ do_kernel_trap(CPUARMState *env) env->regs[0] = -1; cpsr &= ~CPSR_C; } - cpsr_write(env, cpsr, CPSR_C); + cpsr_write(env, cpsr, CPSR_C, CPSRWriteByInstr); end_exclusive(); break; case 0xffff0fe0: /* __kernel_get_tls */ @@ -4446,7 +4446,8 @@ int main(int argc, char **argv, char **envp) #elif defined(TARGET_ARM) { int i; - cpsr_write(env, regs->uregs[16], 0xffffffff); + cpsr_write(env, regs->uregs[16], CPSR_USER | CPSR_EXEC, + CPSRWriteByInstr); for(i = 0; i < 16; i++) { env->regs[i] = regs->uregs[i]; } diff --git a/linux-user/signal.c b/linux-user/signal.c index 327c03254c..962111cfdf 100644 --- a/linux-user/signal.c +++ b/linux-user/signal.c @@ -1611,7 +1611,7 @@ setup_return(CPUARMState *env, struct target_sigaction *ka, env->regs[13] = frame_addr; env->regs[14] = retcode; env->regs[15] = handler & (thumb ? ~1 : ~3); - cpsr_write(env, cpsr, 0xffffffff); + cpsr_write(env, cpsr, CPSR_IT | CPSR_T, CPSRWriteByInstr); } static abi_ulong *setup_sigframe_v2_vfp(abi_ulong *regspace, CPUARMState *env) @@ -1843,7 +1843,7 @@ restore_sigcontext(CPUARMState *env, struct target_sigcontext *sc) __get_user(env->regs[15], &sc->arm_pc); #ifdef TARGET_CONFIG_CPU_32 __get_user(cpsr, &sc->arm_cpsr); - cpsr_write(env, cpsr, CPSR_USER | CPSR_EXEC); + cpsr_write(env, cpsr, CPSR_USER | CPSR_EXEC, CPSRWriteByInstr); #endif err |= !valid_user_regs(env); diff --git a/migration/migration.c b/migration/migration.c index fc5e50b0be..0129d9f420 100644 --- a/migration/migration.c +++ b/migration/migration.c @@ -323,10 +323,56 @@ void qemu_start_incoming_migration(const char *uri, Error **errp) } } +static void process_incoming_migration_bh(void *opaque) +{ + Error *local_err = NULL; + MigrationIncomingState *mis = opaque; + + /* Make sure all file formats flush their mutable metadata */ + bdrv_invalidate_cache_all(&local_err); + if (local_err) { + migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE, + MIGRATION_STATUS_FAILED); + error_report_err(local_err); + migrate_decompress_threads_join(); + exit(EXIT_FAILURE); + } + + /* + * This must happen after all error conditions are dealt with and + * we're sure the VM is going to be running on this host. + */ + qemu_announce_self(); + + /* If global state section was not received or we are in running + state, we need to obey autostart. Any other state is set with + runstate_set. */ + + if (!global_state_received() || + global_state_get_runstate() == RUN_STATE_RUNNING) { + if (autostart) { + vm_start(); + } else { + runstate_set(RUN_STATE_PAUSED); + } + } else { + runstate_set(global_state_get_runstate()); + } + migrate_decompress_threads_join(); + /* + * This must happen after any state changes since as soon as an external + * observer sees this event they might start to prod at the VM assuming + * it's ready to use. + */ + migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE, + MIGRATION_STATUS_COMPLETED); + qemu_bh_delete(mis->bh); + migration_incoming_state_destroy(); +} + static void process_incoming_migration_co(void *opaque) { QEMUFile *f = opaque; - Error *local_err = NULL; MigrationIncomingState *mis; PostcopyState ps; int ret; @@ -369,45 +415,8 @@ static void process_incoming_migration_co(void *opaque) exit(EXIT_FAILURE); } - /* Make sure all file formats flush their mutable metadata */ - bdrv_invalidate_cache_all(&local_err); - if (local_err) { - migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE, - MIGRATION_STATUS_FAILED); - error_report_err(local_err); - migrate_decompress_threads_join(); - exit(EXIT_FAILURE); - } - - /* - * This must happen after all error conditions are dealt with and - * we're sure the VM is going to be running on this host. - */ - qemu_announce_self(); - - /* If global state section was not received or we are in running - state, we need to obey autostart. Any other state is set with - runstate_set. */ - - if (!global_state_received() || - global_state_get_runstate() == RUN_STATE_RUNNING) { - if (autostart) { - vm_start(); - } else { - runstate_set(RUN_STATE_PAUSED); - } - } else { - runstate_set(global_state_get_runstate()); - } - migrate_decompress_threads_join(); - /* - * This must happen after any state changes since as soon as an external - * observer sees this event they might start to prod at the VM assuming - * it's ready to use. - */ - migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE, - MIGRATION_STATUS_COMPLETED); - migration_incoming_state_destroy(); + mis->bh = qemu_bh_new(process_incoming_migration_bh, mis); + qemu_bh_schedule(mis->bh); } void process_incoming_migration(QEMUFile *f) diff --git a/migration/postcopy-ram.c b/migration/postcopy-ram.c index 254c629d48..fbd0064fce 100644 --- a/migration/postcopy-ram.c +++ b/migration/postcopy-ram.c @@ -52,14 +52,14 @@ struct PostcopyDiscardState { #if defined(__linux__) #include <poll.h> -#include <sys/eventfd.h> #include <sys/mman.h> #include <sys/ioctl.h> #include <sys/syscall.h> #include <asm/types.h> /* for __u64 */ #endif -#if defined(__linux__) && defined(__NR_userfaultfd) +#if defined(__linux__) && defined(__NR_userfaultfd) && defined(CONFIG_EVENTFD) +#include <sys/eventfd.h> #include <linux/userfaultfd.h> static bool ufd_version_check(int ufd) diff --git a/migration/savevm.c b/migration/savevm.c index 94f2894243..96e7db5967 100644 --- a/migration/savevm.c +++ b/migration/savevm.c @@ -878,13 +878,19 @@ bool qemu_savevm_state_blocked(Error **errp) return false; } +static bool enforce_config_section(void) +{ + MachineState *machine = MACHINE(qdev_get_machine()); + return machine->enforce_config_section; +} + void qemu_savevm_state_header(QEMUFile *f) { trace_savevm_state_header(); qemu_put_be32(f, QEMU_VM_FILE_MAGIC); qemu_put_be32(f, QEMU_VM_FILE_VERSION); - if (!savevm_state.skip_configuration) { + if (!savevm_state.skip_configuration || enforce_config_section()) { qemu_put_byte(f, QEMU_VM_CONFIGURATION); vmstate_save_state(f, &vmstate_configuration, &savevm_state, 0); } @@ -1088,12 +1094,11 @@ void qemu_savevm_state_complete_precopy(QEMUFile *f, bool iterable_only) json_prop_int(vmdesc, "instance_id", se->instance_id); save_section_header(f, se, QEMU_VM_SECTION_FULL); - vmstate_save(f, se, vmdesc); - - json_end_object(vmdesc); trace_savevm_section_end(se->idstr, se->section_id, 0); save_section_footer(f, se); + + json_end_object(vmdesc); } if (!in_postcopy) { @@ -1496,17 +1501,10 @@ static int loadvm_postcopy_handle_listen(MigrationIncomingState *mis) return 0; } -/* After all discards we can start running and asking for pages */ -static int loadvm_postcopy_handle_run(MigrationIncomingState *mis) +static void loadvm_postcopy_handle_run_bh(void *opaque) { - PostcopyState ps = postcopy_state_set(POSTCOPY_INCOMING_RUNNING); Error *local_err = NULL; - - trace_loadvm_postcopy_handle_run(); - if (ps != POSTCOPY_INCOMING_LISTENING) { - error_report("CMD_POSTCOPY_RUN in wrong postcopy state (%d)", ps); - return -1; - } + MigrationIncomingState *mis = opaque; /* TODO we should move all of this lot into postcopy_ram.c or a shared code * in migration.c @@ -1519,7 +1517,6 @@ static int loadvm_postcopy_handle_run(MigrationIncomingState *mis) bdrv_invalidate_cache_all(&local_err); if (local_err) { error_report_err(local_err); - return -1; } trace_loadvm_postcopy_handle_run_cpu_sync(); @@ -1535,6 +1532,23 @@ static int loadvm_postcopy_handle_run(MigrationIncomingState *mis) runstate_set(RUN_STATE_PAUSED); } + qemu_bh_delete(mis->bh); +} + +/* After all discards we can start running and asking for pages */ +static int loadvm_postcopy_handle_run(MigrationIncomingState *mis) +{ + PostcopyState ps = postcopy_state_set(POSTCOPY_INCOMING_RUNNING); + + trace_loadvm_postcopy_handle_run(); + if (ps != POSTCOPY_INCOMING_LISTENING) { + error_report("CMD_POSTCOPY_RUN in wrong postcopy state (%d)", ps); + return -1; + } + + mis->bh = qemu_bh_new(loadvm_postcopy_handle_run_bh, NULL); + qemu_bh_schedule(mis->bh); + /* We need to finish reading the stream from the package * and also stop reading anything more from the stream that loaded the * package (since it's now being read by the listener thread). @@ -1875,7 +1889,7 @@ int qemu_loadvm_state(QEMUFile *f) return -ENOTSUP; } - if (!savevm_state.skip_configuration) { + if (!savevm_state.skip_configuration || enforce_config_section()) { if (qemu_get_byte(f) != QEMU_VM_CONFIGURATION) { error_report("Configuration section missing"); return -EINVAL; diff --git a/pc-bios/openbios-ppc b/pc-bios/openbios-ppc Binary files differindex e44c0b3098..4a883843e5 100644 --- a/pc-bios/openbios-ppc +++ b/pc-bios/openbios-ppc diff --git a/pc-bios/openbios-sparc32 b/pc-bios/openbios-sparc32 Binary files differindex c75c835d54..e288624c7e 100644 --- a/pc-bios/openbios-sparc32 +++ b/pc-bios/openbios-sparc32 diff --git a/pc-bios/openbios-sparc64 b/pc-bios/openbios-sparc64 Binary files differindex b3304b44e7..f69e56c780 100644 --- a/pc-bios/openbios-sparc64 +++ b/pc-bios/openbios-sparc64 diff --git a/qemu-options.hx b/qemu-options.hx index 599db9474c..144e6a9b76 100644 --- a/qemu-options.hx +++ b/qemu-options.hx @@ -43,7 +43,8 @@ DEF("machine", HAS_ARG, QEMU_OPTION_machine, \ " aes-key-wrap=on|off controls support for AES key wrapping (default=on)\n" " dea-key-wrap=on|off controls support for DEA key wrapping (default=on)\n" " suppress-vmdesc=on|off disables self-describing migration (default=off)\n" - " nvdimm=on|off controls NVDIMM support (default=off)\n", + " nvdimm=on|off controls NVDIMM support (default=off)\n" + " enforce-config-section=on|off enforce configuration section migration (default=off)\n", QEMU_ARCH_ALL) STEXI @item -machine [type=]@var{name}[,prop=@var{value}[,...]] diff --git a/qga/commands-posix.c b/qga/commands-posix.c index 9589b2d634..9f51faea80 100644 --- a/qga/commands-posix.c +++ b/qga/commands-posix.c @@ -550,31 +550,24 @@ GuestFileWrite *qmp_guest_file_write(int64_t handle, const char *buf_b64, } struct GuestFileSeek *qmp_guest_file_seek(int64_t handle, int64_t offset, - int64_t whence_code, Error **errp) + GuestFileWhence *whence_code, + Error **errp) { GuestFileHandle *gfh = guest_file_handle_find(handle, errp); GuestFileSeek *seek_data = NULL; FILE *fh; int ret; int whence; + Error *err = NULL; if (!gfh) { return NULL; } /* We stupidly exposed 'whence':'int' in our qapi */ - switch (whence_code) { - case QGA_SEEK_SET: - whence = SEEK_SET; - break; - case QGA_SEEK_CUR: - whence = SEEK_CUR; - break; - case QGA_SEEK_END: - whence = SEEK_END; - break; - default: - error_setg(errp, "invalid whence code %"PRId64, whence_code); + whence = ga_parse_whence(whence_code, &err); + if (err) { + error_propagate(errp, err); return NULL; } diff --git a/qga/commands-win32.c b/qga/commands-win32.c index cf0757cd0f..d76327f5a3 100644 --- a/qga/commands-win32.c +++ b/qga/commands-win32.c @@ -385,7 +385,8 @@ done: } GuestFileSeek *qmp_guest_file_seek(int64_t handle, int64_t offset, - int64_t whence_code, Error **errp) + GuestFileWhence *whence_code, + Error **errp) { GuestFileHandle *gfh; GuestFileSeek *seek_data; @@ -394,6 +395,7 @@ GuestFileSeek *qmp_guest_file_seek(int64_t handle, int64_t offset, off_pos.QuadPart = offset; BOOL res; int whence; + Error *err = NULL; gfh = guest_file_handle_find(handle, errp); if (!gfh) { @@ -401,18 +403,9 @@ GuestFileSeek *qmp_guest_file_seek(int64_t handle, int64_t offset, } /* We stupidly exposed 'whence':'int' in our qapi */ - switch (whence_code) { - case QGA_SEEK_SET: - whence = SEEK_SET; - break; - case QGA_SEEK_CUR: - whence = SEEK_CUR; - break; - case QGA_SEEK_END: - whence = SEEK_END; - break; - default: - error_setg(errp, "invalid whence code %"PRId64, whence_code); + whence = ga_parse_whence(whence_code, &err); + if (err) { + error_propagate(errp, err); return NULL; } @@ -1230,7 +1223,71 @@ void qmp_guest_set_time(bool has_time, int64_t time_ns, Error **errp) GuestLogicalProcessorList *qmp_guest_get_vcpus(Error **errp) { - error_setg(errp, QERR_UNSUPPORTED); + PSYSTEM_LOGICAL_PROCESSOR_INFORMATION pslpi, ptr; + DWORD length; + GuestLogicalProcessorList *head, **link; + Error *local_err = NULL; + int64_t current; + + ptr = pslpi = NULL; + length = 0; + current = 0; + head = NULL; + link = &head; + + if ((GetLogicalProcessorInformation(pslpi, &length) == FALSE) && + (GetLastError() == ERROR_INSUFFICIENT_BUFFER) && + (length > sizeof(SYSTEM_LOGICAL_PROCESSOR_INFORMATION))) { + ptr = pslpi = g_malloc0(length); + if (GetLogicalProcessorInformation(pslpi, &length) == FALSE) { + error_setg(&local_err, "Failed to get processor information: %d", + (int)GetLastError()); + } + } else { + error_setg(&local_err, + "Failed to get processor information buffer length: %d", + (int)GetLastError()); + } + + while ((local_err == NULL) && (length > 0)) { + if (pslpi->Relationship == RelationProcessorCore) { + ULONG_PTR cpu_bits = pslpi->ProcessorMask; + + while (cpu_bits > 0) { + if (!!(cpu_bits & 1)) { + GuestLogicalProcessor *vcpu; + GuestLogicalProcessorList *entry; + + vcpu = g_malloc0(sizeof *vcpu); + vcpu->logical_id = current++; + vcpu->online = true; + vcpu->has_can_offline = false; + + entry = g_malloc0(sizeof *entry); + entry->value = vcpu; + + *link = entry; + link = &entry->next; + } + cpu_bits >>= 1; + } + } + length -= sizeof(SYSTEM_LOGICAL_PROCESSOR_INFORMATION); + pslpi++; /* next entry */ + } + + g_free(ptr); + + if (local_err == NULL) { + if (head != NULL) { + return head; + } + /* there's no guest with zero VCPUs */ + error_setg(&local_err, "Guest reported zero VCPUs"); + } + + qapi_free_GuestLogicalProcessorList(head); + error_propagate(errp, local_err); return NULL; } @@ -1246,11 +1303,12 @@ get_net_error_message(gint error) HMODULE module = NULL; gchar *retval = NULL; wchar_t *msg = NULL; - int flags, nchars; + int flags; + size_t nchars; - flags = FORMAT_MESSAGE_ALLOCATE_BUFFER - |FORMAT_MESSAGE_IGNORE_INSERTS - |FORMAT_MESSAGE_FROM_SYSTEM; + flags = FORMAT_MESSAGE_ALLOCATE_BUFFER | + FORMAT_MESSAGE_IGNORE_INSERTS | + FORMAT_MESSAGE_FROM_SYSTEM; if (error >= NERR_BASE && error <= MAX_NERR) { module = LoadLibraryExW(L"netmsg.dll", NULL, LOAD_LIBRARY_AS_DATAFILE); @@ -1265,8 +1323,10 @@ get_net_error_message(gint error) if (msg != NULL) { nchars = wcslen(msg); - if (nchars > 2 && msg[nchars-1] == '\n' && msg[nchars-2] == '\r') { - msg[nchars-2] = '\0'; + if (nchars >= 2 && + msg[nchars - 1] == L'\n' && + msg[nchars - 2] == L'\r') { + msg[nchars - 2] = L'\0'; } retval = g_utf16_to_utf8(msg, -1, NULL, NULL, NULL); @@ -1289,8 +1349,9 @@ void qmp_guest_set_user_password(const char *username, NET_API_STATUS nas; char *rawpasswddata = NULL; size_t rawpasswdlen; - wchar_t *user, *wpass; + wchar_t *user = NULL, *wpass = NULL; USER_INFO_1003 pi1003 = { 0, }; + GError *gerr = NULL; if (crypted) { error_setg(errp, QERR_UNSUPPORTED); @@ -1304,8 +1365,15 @@ void qmp_guest_set_user_password(const char *username, rawpasswddata = g_renew(char, rawpasswddata, rawpasswdlen + 1); rawpasswddata[rawpasswdlen] = '\0'; - user = g_utf8_to_utf16(username, -1, NULL, NULL, NULL); - wpass = g_utf8_to_utf16(rawpasswddata, -1, NULL, NULL, NULL); + user = g_utf8_to_utf16(username, -1, NULL, NULL, &gerr); + if (!user) { + goto done; + } + + wpass = g_utf8_to_utf16(rawpasswddata, -1, NULL, NULL, &gerr); + if (!wpass) { + goto done; + } pi1003.usri1003_password = wpass; nas = NetUserSetInfo(NULL, user, @@ -1318,6 +1386,11 @@ void qmp_guest_set_user_password(const char *username, g_free(msg); } +done: + if (gerr) { + error_setg(errp, QERR_QGA_COMMAND_FAILED, gerr->message); + g_error_free(gerr); + } g_free(user); g_free(wpass); g_free(rawpasswddata); @@ -1347,7 +1420,7 @@ GList *ga_command_blacklist_init(GList *blacklist) { const char *list_unsupported[] = { "guest-suspend-hybrid", - "guest-get-vcpus", "guest-set-vcpus", + "guest-set-vcpus", "guest-get-memory-blocks", "guest-set-memory-blocks", "guest-get-memory-block-size", "guest-fsfreeze-freeze-list", diff --git a/qga/commands.c b/qga/commands.c index 5b56786ef6..e091ee1af1 100644 --- a/qga/commands.c +++ b/qga/commands.c @@ -473,3 +473,24 @@ done: return ge; } + +/* Convert GuestFileWhence (either a raw integer or an enum value) into + * the guest's SEEK_ constants. */ +int ga_parse_whence(GuestFileWhence *whence, Error **errp) +{ + /* Exploit the fact that we picked values to match QGA_SEEK_*. */ + if (whence->type == QTYPE_QSTRING) { + whence->type = QTYPE_QINT; + whence->u.value = whence->u.name; + } + switch (whence->u.value) { + case QGA_SEEK_SET: + return SEEK_SET; + case QGA_SEEK_CUR: + return SEEK_CUR; + case QGA_SEEK_END: + return SEEK_END; + } + error_setg(errp, "invalid whence code %"PRId64, whence->u.value); + return -1; +} diff --git a/qga/guest-agent-core.h b/qga/guest-agent-core.h index 238dc6b08d..0a49516045 100644 --- a/qga/guest-agent-core.h +++ b/qga/guest-agent-core.h @@ -12,16 +12,10 @@ */ #include "qapi/qmp/dispatch.h" #include "qemu-common.h" +#include "qga-qmp-commands.h" #define QGA_READ_COUNT_DEFAULT 4096 -/* Mapping of whence codes used by guest-file-seek. */ -enum { - QGA_SEEK_SET = 0, - QGA_SEEK_CUR = 1, - QGA_SEEK_END = 2, -}; - typedef struct GAState GAState; typedef struct GACommandState GACommandState; extern GAState *ga_state; @@ -44,6 +38,7 @@ void ga_set_frozen(GAState *s); void ga_unset_frozen(GAState *s); const char *ga_fsfreeze_hook(GAState *s); int64_t ga_get_fd_handle(GAState *s, Error **errp); +int ga_parse_whence(GuestFileWhence *whence, Error **errp); #ifndef _WIN32 void reopen_fd_to_null(int fd); diff --git a/qga/installer/qemu-ga.wxs b/qga/installer/qemu-ga.wxs index 9473875723..7f9289122f 100644 --- a/qga/installer/qemu-ga.wxs +++ b/qga/installer/qemu-ga.wxs @@ -41,7 +41,7 @@ <Product Name="QEMU guest agent" - Id="*" + Id="{DF9974AD-E41A-4304-81AD-69AA8F299766}" UpgradeCode="{EB6B8302-C06E-4BEC-ADAC-932C68A3A98D}" Manufacturer="$(env.QEMU_GA_MANUFACTURER)" Version="$(env.QEMU_GA_VERSION)" diff --git a/qga/qapi-schema.json b/qga/qapi-schema.json index 01c9ee48d8..c21f3084dc 100644 --- a/qga/qapi-schema.json +++ b/qga/qapi-schema.json @@ -314,6 +314,34 @@ 'data': { 'position': 'int', 'eof': 'bool' } } ## +# @QGASeek: +# +# Symbolic names for use in @guest-file-seek +# +# @set: Set to the specified offset (same effect as 'whence':0) +# @cur: Add offset to the current location (same effect as 'whence':1) +# @end: Add offset to the end of the file (same effect as 'whence':2) +# +# Since: 2.6 +## +{ 'enum': 'QGASeek', 'data': [ 'set', 'cur', 'end' ] } + +## +# @GuestFileWhence: +# +# Controls the meaning of offset to @guest-file-seek. +# +# @value: Integral value (0 for set, 1 for cur, 2 for end), available +# for historical reasons, and might differ from the host's or +# guest's SEEK_* values (since: 0.15) +# @name: Symbolic name, and preferred interface +# +# Since: 2.6 +## +{ 'alternate': 'GuestFileWhence', + 'data': { 'value': 'int', 'name': 'QGASeek' } } + +## # @guest-file-seek: # # Seek to a position in the file, as with fseek(), and return the @@ -324,14 +352,15 @@ # # @offset: bytes to skip over in the file stream # -# @whence: 0 for SEEK_SET, 1 for SEEK_CUR, or 2 for SEEK_END +# @whence: Symbolic or numeric code for interpreting offset # # Returns: @GuestFileSeek on success. # # Since: 0.15.0 ## { 'command': 'guest-file-seek', - 'data': { 'handle': 'int', 'offset': 'int', 'whence': 'int' }, + 'data': { 'handle': 'int', 'offset': 'int', + 'whence': 'GuestFileWhence' }, 'returns': 'GuestFileSeek' } ## diff --git a/qga/vss-win32.c b/qga/vss-win32.c index 5182e3be91..9a0e46356a 100644 --- a/qga/vss-win32.c +++ b/qga/vss-win32.c @@ -150,7 +150,7 @@ void qga_vss_fsfreeze(int *nr_volume, Error **errp, bool freeze) const char *func_name = freeze ? "requester_freeze" : "requester_thaw"; QGAVSSRequesterFunc func; ErrorSet errset = { - .error_setg_win32 = error_setg_win32_internal, + .error_setg_win32_wrapper = error_setg_win32_internal, .errp = errp, }; diff --git a/qga/vss-win32/install.cpp b/qga/vss-win32/install.cpp index b0e4426c72..cd9cdb4a24 100644 --- a/qga/vss-win32/install.cpp +++ b/qga/vss-win32/install.cpp @@ -10,8 +10,7 @@ * See the COPYING file in the top-level directory. */ -#include <stdio.h> -#include <string.h> +#include "qemu/osdep.h" #include "vss-common.h" #include "inc/win2003/vscoordint.h" diff --git a/qga/vss-win32/provider.cpp b/qga/vss-win32/provider.cpp index d5129f8f65..d977393e33 100644 --- a/qga/vss-win32/provider.cpp +++ b/qga/vss-win32/provider.cpp @@ -10,7 +10,7 @@ * See the COPYING file in the top-level directory. */ -#include <stdio.h> +#include "qemu/osdep.h" #include "vss-common.h" #include "inc/win2003/vscoordint.h" #include "inc/win2003/vsprov.h" diff --git a/qga/vss-win32/requester.cpp b/qga/vss-win32/requester.cpp index 9b3e310971..b57d5170e8 100644 --- a/qga/vss-win32/requester.cpp +++ b/qga/vss-win32/requester.cpp @@ -10,7 +10,7 @@ * See the COPYING file in the top-level directory. */ -#include <stdio.h> +#include "qemu/osdep.h" #include "vss-common.h" #include "requester.h" #include "assert.h" @@ -23,9 +23,9 @@ /* Call QueryStatus every 10 ms while waiting for frozen event */ #define VSS_TIMEOUT_EVENT_MSEC 10 -#define err_set(e, err, fmt, ...) \ - ((e)->error_setg_win32((e)->errp, __FILE__, __LINE__, __func__, \ - err, fmt, ## __VA_ARGS__)) +#define err_set(e, err, fmt, ...) \ + ((e)->error_setg_win32_wrapper((e)->errp, __FILE__, __LINE__, __func__, \ + err, fmt, ## __VA_ARGS__)) /* Bad idea, works only when (e)->errp != NULL: */ #define err_is_set(e) ((e)->errp && *(e)->errp) /* To lift this restriction, error_propagate(), like we do in QEMU code */ diff --git a/qga/vss-win32/requester.h b/qga/vss-win32/requester.h index ad2bf3df61..2a39d734a2 100644 --- a/qga/vss-win32/requester.h +++ b/qga/vss-win32/requester.h @@ -27,7 +27,7 @@ typedef void (*ErrorSetFunc)(struct Error **errp, int win32_err, const char *fmt, ...) GCC_FMT_ATTR(6, 7); typedef struct ErrorSet { - ErrorSetFunc error_setg_win32; + ErrorSetFunc error_setg_win32_wrapper; struct Error **errp; /* restriction: must not be null */ } ErrorSet; diff --git a/roms/openbios b/roms/openbios -Subproject bd95e4c193905d5ed867e96f1a720ce4cb53b59 +Subproject 0dbda5d935f95391d16431cd3c079fbf53d668d diff --git a/target-arm/cpu-qom.h b/target-arm/cpu-qom.h index 1cc4502fc4..1061c08a10 100644 --- a/target-arm/cpu-qom.h +++ b/target-arm/cpu-qom.h @@ -155,6 +155,7 @@ typedef struct ARMCPU { uint32_t id_mmfr1; uint32_t id_mmfr2; uint32_t id_mmfr3; + uint32_t id_mmfr4; uint32_t id_isar0; uint32_t id_isar1; uint32_t id_isar2; diff --git a/target-arm/cpu.h b/target-arm/cpu.h index 3cbda73578..744f052a67 100644 --- a/target-arm/cpu.h +++ b/target-arm/cpu.h @@ -598,6 +598,7 @@ void pmccntr_sync(CPUARMState *env); #define MDCR_EDAD (1U << 20) #define MDCR_SPME (1U << 17) #define MDCR_SDD (1U << 16) +#define MDCR_SPD (3U << 14) #define MDCR_TDRA (1U << 11) #define MDCR_TDOSA (1U << 10) #define MDCR_TDA (1U << 9) @@ -606,6 +607,9 @@ void pmccntr_sync(CPUARMState *env); #define MDCR_TPM (1U << 6) #define MDCR_TPMCR (1U << 5) +/* Not all of the MDCR_EL3 bits are present in the 32-bit SDCR */ +#define SDCR_VALID_MASK (MDCR_EPMAD | MDCR_EDAD | MDCR_SPME | MDCR_SPD) + #define CPSR_M (0x1fU) #define CPSR_T (1U << 5) #define CPSR_F (1U << 6) @@ -718,8 +722,17 @@ static inline void pstate_write(CPUARMState *env, uint32_t val) /* Return the current CPSR value. */ uint32_t cpsr_read(CPUARMState *env); -/* Set the CPSR. Note that some bits of mask must be all-set or all-clear. */ -void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask); + +typedef enum CPSRWriteType { + CPSRWriteByInstr = 0, /* from guest MSR or CPS */ + CPSRWriteExceptionReturn = 1, /* from guest exception return insn */ + CPSRWriteRaw = 2, /* trust values, do not switch reg banks */ + CPSRWriteByGDBStub = 3, /* from the GDB stub */ +} CPSRWriteType; + +/* Set the CPSR. Note that some bits of mask must be all-set or all-clear.*/ +void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask, + CPSRWriteType write_type); /* Return the current xPSR value. */ static inline uint32_t xpsr_read(CPUARMState *env) diff --git a/target-arm/gdbstub.c b/target-arm/gdbstub.c index 08b91a4861..3ba9aadd48 100644 --- a/target-arm/gdbstub.c +++ b/target-arm/gdbstub.c @@ -94,7 +94,7 @@ int arm_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) return 4; case 25: /* CPSR */ - cpsr_write(env, tmp, 0xffffffff); + cpsr_write(env, tmp, 0xffffffff, CPSRWriteByGDBStub); return 4; } /* Unknown register. */ diff --git a/target-arm/helper.c b/target-arm/helper.c index 5a0447b93a..18c82967d3 100644 --- a/target-arm/helper.c +++ b/target-arm/helper.c @@ -439,6 +439,24 @@ static CPAccessResult access_tda(CPUARMState *env, const ARMCPRegInfo *ri, return CP_ACCESS_OK; } +/* Check for traps to performance monitor registers, which are controlled + * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3. + */ +static CPAccessResult access_tpm(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + int el = arm_current_el(env); + + if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM) + && !arm_is_secure_below_el3(env)) { + return CP_ACCESS_TRAP_EL2; + } + if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) { + return CP_ACCESS_TRAP_EL3; + } + return CP_ACCESS_OK; +} + static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { ARMCPU *cpu = arm_env_get_cpu(env); @@ -774,11 +792,22 @@ static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri, bool isread) { /* Performance monitor registers user accessibility is controlled - * by PMUSERENR. + * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable + * trapping to EL2 or EL3 for other accesses. */ - if (arm_current_el(env) == 0 && !env->cp15.c9_pmuserenr) { + int el = arm_current_el(env); + + if (el == 0 && !env->cp15.c9_pmuserenr) { return CP_ACCESS_TRAP; } + if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM) + && !arm_is_secure_below_el3(env)) { + return CP_ACCESS_TRAP_EL2; + } + if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) { + return CP_ACCESS_TRAP_EL3; + } + return CP_ACCESS_OK; } @@ -1101,28 +1130,28 @@ static const ARMCPRegInfo v7_cp_reginfo[] = { .access = PL0_RW, .type = ARM_CP_CONST, .resetvalue = 0, .accessfn = pmreg_access }, { .name = "PMUSERENR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 0, - .access = PL0_R | PL1_RW, + .access = PL0_R | PL1_RW, .accessfn = access_tpm, .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr), .resetvalue = 0, .writefn = pmuserenr_write, .raw_writefn = raw_write }, { .name = "PMUSERENR_EL0", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 0, - .access = PL0_R | PL1_RW, .type = ARM_CP_ALIAS, + .access = PL0_R | PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS, .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr), .resetvalue = 0, .writefn = pmuserenr_write, .raw_writefn = raw_write }, { .name = "PMINTENSET", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 1, - .access = PL1_RW, + .access = PL1_RW, .accessfn = access_tpm, .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten), .resetvalue = 0, .writefn = pmintenset_write, .raw_writefn = raw_write }, { .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2, - .access = PL1_RW, .type = ARM_CP_ALIAS, + .access = PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS, .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten), .writefn = pmintenclr_write, }, { .name = "PMINTENCLR_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 2, - .access = PL1_RW, .type = ARM_CP_ALIAS, + .access = PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS, .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten), .writefn = pmintenclr_write }, { .name = "VBAR", .state = ARM_CP_STATE_BOTH, @@ -3037,6 +3066,12 @@ static CPAccessResult fpexc32_access(CPUARMState *env, const ARMCPRegInfo *ri, return CP_ACCESS_OK; } +static void sdcr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + env->cp15.mdcr_el3 = value & SDCR_VALID_MASK; +} + static const ARMCPRegInfo v8_cp_reginfo[] = { /* Minimal set of EL0-visible registers. This will need to be expanded * significantly for system emulation of AArch64 CPUs. @@ -3331,6 +3366,15 @@ static const ARMCPRegInfo v8_cp_reginfo[] = { .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 3, .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_FIQ]) }, + { .name = "MDCR_EL3", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 3, .opc2 = 1, + .resetvalue = 0, + .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el3) }, + { .name = "SDCR", .type = ARM_CP_ALIAS, + .cp = 15, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 1, + .access = PL1_RW, .accessfn = access_trap_aa32s_el1, + .writefn = sdcr_write, + .fieldoffset = offsetoflow32(CPUARMState, cp15.mdcr_el3) }, REGINFO_SENTINEL }; @@ -3628,7 +3672,7 @@ static const ARMCPRegInfo el2_cp_reginfo[] = { .writefn = gt_hyp_cval_write, .raw_writefn = raw_write }, { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0, - .type = ARM_CP_IO, .access = PL2_RW, + .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW, .resetfn = gt_hyp_timer_reset, .readfn = gt_hyp_tval_read, .writefn = gt_hyp_tval_write }, { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH, @@ -3688,14 +3732,6 @@ static const ARMCPRegInfo el3_cp_reginfo[] = { .access = PL1_RW, .accessfn = access_trap_aa32s_el1, .fieldoffset = offsetoflow32(CPUARMState, cp15.scr_el3), .writefn = scr_write }, - { .name = "MDCR_EL3", .state = ARM_CP_STATE_AA64, - .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 3, .opc2 = 1, - .resetvalue = 0, - .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el3) }, - { .name = "SDCR", .type = ARM_CP_ALIAS, - .cp = 15, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 1, - .access = PL1_RW, .accessfn = access_trap_aa32s_el1, - .fieldoffset = offsetoflow32(CPUARMState, cp15.mdcr_el3) }, { .name = "SDER32_EL3", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 1, .access = PL3_RW, .resetvalue = 0, @@ -4280,12 +4316,14 @@ void register_cp_regs_for_features(ARMCPU *cpu) .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 5, .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->id_isar5 }, - /* 6..7 are as yet unallocated and must RAZ */ - { .name = "ID_ISAR6", .cp = 15, .crn = 0, .crm = 2, - .opc1 = 0, .opc2 = 6, .access = PL1_R, .type = ARM_CP_CONST, - .resetvalue = 0 }, - { .name = "ID_ISAR7", .cp = 15, .crn = 0, .crm = 2, - .opc1 = 0, .opc2 = 7, .access = PL1_R, .type = ARM_CP_CONST, + { .name = "ID_MMFR4", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 6, + .access = PL1_R, .type = ARM_CP_CONST, + .resetvalue = cpu->id_mmfr4 }, + /* 7 is as yet unallocated and must RAZ */ + { .name = "ID_ISAR7_RESERVED", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 7, + .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, REGINFO_SENTINEL }; @@ -4339,7 +4377,11 @@ void register_cp_regs_for_features(ARMCPU *cpu) define_arm_cp_regs(cpu, not_v7_cp_reginfo); } if (arm_feature(env, ARM_FEATURE_V8)) { - /* AArch64 ID registers, which all have impdef reset values */ + /* AArch64 ID registers, which all have impdef reset values. + * Note that within the ID register ranges the unused slots + * must all RAZ, not UNDEF; future architecture versions may + * define new registers here. + */ ARMCPRegInfo v8_idregs[] = { { .name = "ID_AA64PFR0_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 0, @@ -4349,6 +4391,30 @@ void register_cp_regs_for_features(ARMCPU *cpu) .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 1, .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->id_aa64pfr1}, + { .name = "ID_AA64PFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 2, + .access = PL1_R, .type = ARM_CP_CONST, + .resetvalue = 0 }, + { .name = "ID_AA64PFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 3, + .access = PL1_R, .type = ARM_CP_CONST, + .resetvalue = 0 }, + { .name = "ID_AA64PFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 4, + .access = PL1_R, .type = ARM_CP_CONST, + .resetvalue = 0 }, + { .name = "ID_AA64PFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 5, + .access = PL1_R, .type = ARM_CP_CONST, + .resetvalue = 0 }, + { .name = "ID_AA64PFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 6, + .access = PL1_R, .type = ARM_CP_CONST, + .resetvalue = 0 }, + { .name = "ID_AA64PFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 7, + .access = PL1_R, .type = ARM_CP_CONST, + .resetvalue = 0 }, { .name = "ID_AA64DFR0_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 0, .access = PL1_R, .type = ARM_CP_CONST, @@ -4362,6 +4428,14 @@ void register_cp_regs_for_features(ARMCPU *cpu) .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 1, .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->id_aa64dfr1 }, + { .name = "ID_AA64DFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 2, + .access = PL1_R, .type = ARM_CP_CONST, + .resetvalue = 0 }, + { .name = "ID_AA64DFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 3, + .access = PL1_R, .type = ARM_CP_CONST, + .resetvalue = 0 }, { .name = "ID_AA64AFR0_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 4, .access = PL1_R, .type = ARM_CP_CONST, @@ -4370,6 +4444,14 @@ void register_cp_regs_for_features(ARMCPU *cpu) .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 5, .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->id_aa64afr1 }, + { .name = "ID_AA64AFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 6, + .access = PL1_R, .type = ARM_CP_CONST, + .resetvalue = 0 }, + { .name = "ID_AA64AFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 7, + .access = PL1_R, .type = ARM_CP_CONST, + .resetvalue = 0 }, { .name = "ID_AA64ISAR0_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 0, .access = PL1_R, .type = ARM_CP_CONST, @@ -4378,6 +4460,30 @@ void register_cp_regs_for_features(ARMCPU *cpu) .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 1, .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->id_aa64isar1 }, + { .name = "ID_AA64ISAR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 2, + .access = PL1_R, .type = ARM_CP_CONST, + .resetvalue = 0 }, + { .name = "ID_AA64ISAR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 3, + .access = PL1_R, .type = ARM_CP_CONST, + .resetvalue = 0 }, + { .name = "ID_AA64ISAR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 4, + .access = PL1_R, .type = ARM_CP_CONST, + .resetvalue = 0 }, + { .name = "ID_AA64ISAR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 5, + .access = PL1_R, .type = ARM_CP_CONST, + .resetvalue = 0 }, + { .name = "ID_AA64ISAR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 6, + .access = PL1_R, .type = ARM_CP_CONST, + .resetvalue = 0 }, + { .name = "ID_AA64ISAR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 7, + .access = PL1_R, .type = ARM_CP_CONST, + .resetvalue = 0 }, { .name = "ID_AA64MMFR0_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0, .access = PL1_R, .type = ARM_CP_CONST, @@ -4386,6 +4492,30 @@ void register_cp_regs_for_features(ARMCPU *cpu) .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 1, .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->id_aa64mmfr1 }, + { .name = "ID_AA64MMFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 2, + .access = PL1_R, .type = ARM_CP_CONST, + .resetvalue = 0 }, + { .name = "ID_AA64MMFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 3, + .access = PL1_R, .type = ARM_CP_CONST, + .resetvalue = 0 }, + { .name = "ID_AA64MMFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 4, + .access = PL1_R, .type = ARM_CP_CONST, + .resetvalue = 0 }, + { .name = "ID_AA64MMFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 5, + .access = PL1_R, .type = ARM_CP_CONST, + .resetvalue = 0 }, + { .name = "ID_AA64MMFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 6, + .access = PL1_R, .type = ARM_CP_CONST, + .resetvalue = 0 }, + { .name = "ID_AA64MMFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 7, + .access = PL1_R, .type = ARM_CP_CONST, + .resetvalue = 0 }, { .name = "MVFR0_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 0, .access = PL1_R, .type = ARM_CP_CONST, @@ -4398,6 +4528,26 @@ void register_cp_regs_for_features(ARMCPU *cpu) .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2, .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->mvfr2 }, + { .name = "MVFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 3, + .access = PL1_R, .type = ARM_CP_CONST, + .resetvalue = 0 }, + { .name = "MVFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 4, + .access = PL1_R, .type = ARM_CP_CONST, + .resetvalue = 0 }, + { .name = "MVFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 5, + .access = PL1_R, .type = ARM_CP_CONST, + .resetvalue = 0 }, + { .name = "MVFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 6, + .access = PL1_R, .type = ARM_CP_CONST, + .resetvalue = 0 }, + { .name = "MVFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 7, + .access = PL1_R, .type = ARM_CP_CONST, + .resetvalue = 0 }, { .name = "PMCEID0", .state = ARM_CP_STATE_AA32, .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 6, .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, @@ -5200,23 +5350,47 @@ void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque) /* Helper coprocessor reset function for do-nothing-on-reset registers */ } -static int bad_mode_switch(CPUARMState *env, int mode) +static int bad_mode_switch(CPUARMState *env, int mode, CPSRWriteType write_type) { /* Return true if it is not valid for us to switch to * this CPU mode (ie all the UNPREDICTABLE cases in * the ARM ARM CPSRWriteByInstr pseudocode). */ + + /* Changes to or from Hyp via MSR and CPS are illegal. */ + if (write_type == CPSRWriteByInstr && + ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_HYP || + mode == ARM_CPU_MODE_HYP)) { + return 1; + } + switch (mode) { case ARM_CPU_MODE_USR: + return 0; case ARM_CPU_MODE_SYS: case ARM_CPU_MODE_SVC: case ARM_CPU_MODE_ABT: case ARM_CPU_MODE_UND: case ARM_CPU_MODE_IRQ: case ARM_CPU_MODE_FIQ: + /* Note that we don't implement the IMPDEF NSACR.RFR which in v7 + * allows FIQ mode to be Secure-only. (In v8 this doesn't exist.) + */ + /* If HCR.TGE is set then changes from Monitor to NS PL1 via MSR + * and CPS are treated as illegal mode changes. + */ + if (write_type == CPSRWriteByInstr && + (env->cp15.hcr_el2 & HCR_TGE) && + (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON && + !arm_is_secure_below_el3(env)) { + return 1; + } return 0; + case ARM_CPU_MODE_HYP: + return !arm_feature(env, ARM_FEATURE_EL2) + || arm_current_el(env) < 2 || arm_is_secure(env); case ARM_CPU_MODE_MON: - return !arm_is_secure(env); + return arm_current_el(env) < 3; default: return 1; } @@ -5233,7 +5407,8 @@ uint32_t cpsr_read(CPUARMState *env) | (env->GE << 16) | (env->daif & CPSR_AIF); } -void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask) +void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask, + CPSRWriteType write_type) { uint32_t changed_daif; @@ -5267,7 +5442,7 @@ void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask) * In a V8 implementation, it is permitted for privileged software to * change the CPSR A/F bits regardless of the SCR.AW/FW bits. */ - if (!arm_feature(env, ARM_FEATURE_V8) && + if (write_type != CPSRWriteRaw && !arm_feature(env, ARM_FEATURE_V8) && arm_feature(env, ARM_FEATURE_EL3) && !arm_feature(env, ARM_FEATURE_EL2) && !arm_is_secure(env)) { @@ -5314,13 +5489,24 @@ void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask) env->daif &= ~(CPSR_AIF & mask); env->daif |= val & CPSR_AIF & mask; - if ((env->uncached_cpsr ^ val) & mask & CPSR_M) { - if (bad_mode_switch(env, val & CPSR_M)) { - /* Attempt to switch to an invalid mode: this is UNPREDICTABLE. - * We choose to ignore the attempt and leave the CPSR M field - * untouched. + if (write_type != CPSRWriteRaw && + (env->uncached_cpsr & CPSR_M) != CPSR_USER && + ((env->uncached_cpsr ^ val) & mask & CPSR_M)) { + if (bad_mode_switch(env, val & CPSR_M, write_type)) { + /* Attempt to switch to an invalid mode: this is UNPREDICTABLE in + * v7, and has defined behaviour in v8: + * + leave CPSR.M untouched + * + allow changes to the other CPSR fields + * + set PSTATE.IL + * For user changes via the GDB stub, we don't set PSTATE.IL, + * as this would be unnecessarily harsh for a user error. */ mask &= ~CPSR_M; + if (write_type != CPSRWriteByGDBStub && + arm_feature(env, ARM_FEATURE_V8)) { + mask |= CPSR_IL; + val |= CPSR_IL; + } } else { switch_mode(env, val & CPSR_M); } diff --git a/target-arm/helper.h b/target-arm/helper.h index c98e9cea3d..ea13202b17 100644 --- a/target-arm/helper.h +++ b/target-arm/helper.h @@ -57,6 +57,7 @@ DEF_HELPER_2(pre_smc, void, env, i32) DEF_HELPER_1(check_breakpoints, void, env) DEF_HELPER_3(cpsr_write, void, env, i32, i32) +DEF_HELPER_2(cpsr_write_eret, void, env, i32) DEF_HELPER_1(cpsr_read, i32, env) DEF_HELPER_3(v7m_msr, void, env, i32, i32) diff --git a/target-arm/kvm32.c b/target-arm/kvm32.c index ea01932a65..d44a7f92b6 100644 --- a/target-arm/kvm32.c +++ b/target-arm/kvm32.c @@ -428,7 +428,7 @@ int kvm_arch_get_registers(CPUState *cs) if (ret) { return ret; } - cpsr_write(env, cpsr, 0xffffffff); + cpsr_write(env, cpsr, 0xffffffff, CPSRWriteRaw); /* Make sure the current mode regs are properly set */ mode = env->uncached_cpsr & CPSR_M; diff --git a/target-arm/kvm64.c b/target-arm/kvm64.c index 0f1b4d6a00..e8527bf0cc 100644 --- a/target-arm/kvm64.c +++ b/target-arm/kvm64.c @@ -722,8 +722,7 @@ int kvm_arch_get_registers(CPUState *cs) if (is_a64(env)) { pstate_write(env, val); } else { - env->uncached_cpsr = val & CPSR_M; - cpsr_write(env, val, 0xffffffff); + cpsr_write(env, val, 0xffffffff, CPSRWriteRaw); } /* KVM puts SP_EL0 in regs.sp and SP_EL1 in regs.sp_el1. On the diff --git a/target-arm/machine.c b/target-arm/machine.c index ed1925ae3e..03a73d950e 100644 --- a/target-arm/machine.c +++ b/target-arm/machine.c @@ -173,9 +173,7 @@ static int get_cpsr(QEMUFile *f, void *opaque, size_t size) return 0; } - /* Avoid mode switch when restoring CPSR */ - env->uncached_cpsr = val & CPSR_M; - cpsr_write(env, val, 0xffffffff); + cpsr_write(env, val, 0xffffffff, CPSRWriteRaw); return 0; } diff --git a/target-arm/op_helper.c b/target-arm/op_helper.c index 538887ce0c..4881e34177 100644 --- a/target-arm/op_helper.c +++ b/target-arm/op_helper.c @@ -422,7 +422,13 @@ uint32_t HELPER(cpsr_read)(CPUARMState *env) void HELPER(cpsr_write)(CPUARMState *env, uint32_t val, uint32_t mask) { - cpsr_write(env, val, mask); + cpsr_write(env, val, mask, CPSRWriteByInstr); +} + +/* Write the CPSR for a 32-bit exception return */ +void HELPER(cpsr_write_eret)(CPUARMState *env, uint32_t val) +{ + cpsr_write(env, val, CPSR_ERET_MASK, CPSRWriteExceptionReturn); } /* Access to user mode registers from privileged modes. */ @@ -773,8 +779,11 @@ void HELPER(exception_return)(CPUARMState *env) if (!return_to_aa64) { env->aarch64 = 0; - env->uncached_cpsr = spsr & CPSR_M; - cpsr_write(env, spsr, ~0); + /* We do a raw CPSR write because aarch64_sync_64_to_32() + * will sort the register banks out for us, and we've already + * caught all the bad-mode cases in el_from_spsr(). + */ + cpsr_write(env, spsr, ~0, CPSRWriteRaw); if (!arm_singlestep_active(env)) { env->uncached_cpsr &= ~PSTATE_SS; } diff --git a/target-arm/translate.c b/target-arm/translate.c index e69145d401..413f7de686 100644 --- a/target-arm/translate.c +++ b/target-arm/translate.c @@ -4094,7 +4094,7 @@ static void gen_exception_return(DisasContext *s, TCGv_i32 pc) TCGv_i32 tmp; store_reg(s, 15, pc); tmp = load_cpu_field(spsr); - gen_set_cpsr(tmp, CPSR_ERET_MASK); + gen_helper_cpsr_write_eret(cpu_env, tmp); tcg_temp_free_i32(tmp); s->is_jmp = DISAS_JUMP; } @@ -4102,7 +4102,7 @@ static void gen_exception_return(DisasContext *s, TCGv_i32 pc) /* Generate a v6 exception return. Marks both values as dead. */ static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr) { - gen_set_cpsr(cpsr, CPSR_ERET_MASK); + gen_helper_cpsr_write_eret(cpu_env, cpsr); tcg_temp_free_i32(cpsr); store_reg(s, 15, pc); s->is_jmp = DISAS_JUMP; @@ -9094,7 +9094,7 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn) if (exc_return) { /* Restore CPSR from SPSR. */ tmp = load_cpu_field(spsr); - gen_set_cpsr(tmp, CPSR_ERET_MASK); + gen_helper_cpsr_write_eret(cpu_env, tmp); tcg_temp_free_i32(tmp); s->is_jmp = DISAS_JUMP; } diff --git a/target-mips/cpu.c b/target-mips/cpu.c index 0b3f130cf2..7dc3a44a15 100644 --- a/target-mips/cpu.c +++ b/target-mips/cpu.c @@ -77,6 +77,15 @@ static bool mips_cpu_has_work(CPUState *cs) has_work = false; } } + /* MIPS Release 6 has the ability to halt the CPU. */ + if (env->CP0_Config5 & (1 << CP0C5_VP)) { + if (cs->interrupt_request & CPU_INTERRUPT_WAKE) { + has_work = true; + } + if (!mips_vp_active(env)) { + has_work = false; + } + } return has_work; } diff --git a/target-mips/cpu.h b/target-mips/cpu.h index bd23c2a054..1e2b070cc3 100644 --- a/target-mips/cpu.h +++ b/target-mips/cpu.h @@ -237,6 +237,8 @@ struct CPUMIPSState { int32_t CP0_Index; /* CP0_MVP* are per MVP registers. */ + int32_t CP0_VPControl; +#define CP0VPCtl_DIS 0 int32_t CP0_Random; int32_t CP0_VPEControl; #define CP0VPECo_YSI 21 @@ -286,6 +288,8 @@ struct CPUMIPSState { # define CP0EnLo_RI 31 # define CP0EnLo_XI 30 #endif + int32_t CP0_GlobalNumber; +#define CP0GN_VPId 0 target_ulong CP0_Context; target_ulong CP0_KScratch[MIPS_KSCRATCH_NUM]; int32_t CP0_PageMask; @@ -471,6 +475,7 @@ struct CPUMIPSState { #define CP0C5_XNP 13 #define CP0C5_UFE 9 #define CP0C5_FRE 8 +#define CP0C5_VP 7 #define CP0C5_SBRI 6 #define CP0C5_MVH 5 #define CP0C5_LLB 4 @@ -858,6 +863,26 @@ static inline int mips_vpe_active(CPUMIPSState *env) return active; } +static inline int mips_vp_active(CPUMIPSState *env) +{ + CPUState *other_cs = first_cpu; + + /* Check if the VP disabled other VPs (which means the VP is enabled) */ + if ((env->CP0_VPControl >> CP0VPCtl_DIS) & 1) { + return 1; + } + + /* Check if the virtual processor is disabled due to a DVP */ + CPU_FOREACH(other_cs) { + MIPSCPU *other_cpu = MIPS_CPU(other_cs); + if ((&other_cpu->env != env) && + ((other_cpu->env.CP0_VPControl >> CP0VPCtl_DIS) & 1)) { + return 0; + } + } + return 1; +} + #include "exec/exec-all.h" static inline void compute_hflags(CPUMIPSState *env) diff --git a/target-mips/helper.h b/target-mips/helper.h index 95b9149d89..1bc8bb20d1 100644 --- a/target-mips/helper.h +++ b/target-mips/helper.h @@ -176,6 +176,10 @@ DEF_HELPER_0(dmt, tl) DEF_HELPER_0(emt, tl) DEF_HELPER_1(dvpe, tl, env) DEF_HELPER_1(evpe, tl, env) + +/* R6 Multi-threading */ +DEF_HELPER_1(dvp, tl, env) +DEF_HELPER_1(evp, tl, env) #endif /* !CONFIG_USER_ONLY */ /* microMIPS functions */ diff --git a/target-mips/kvm.c b/target-mips/kvm.c index a8b8b32c26..950bc05b7c 100644 --- a/target-mips/kvm.c +++ b/target-mips/kvm.c @@ -30,6 +30,9 @@ #define DPRINTF(fmt, ...) \ do { if (DEBUG_KVM) { fprintf(stderr, fmt, ## __VA_ARGS__); } } while (0) +static int kvm_mips_fpu_cap; +static int kvm_mips_msa_cap; + const KVMCapabilityInfo kvm_arch_required_capabilities[] = { KVM_CAP_LAST_INFO }; @@ -46,16 +49,39 @@ int kvm_arch_init(MachineState *ms, KVMState *s) /* MIPS has 128 signals */ kvm_set_sigmask_len(s, 16); + kvm_mips_fpu_cap = kvm_check_extension(s, KVM_CAP_MIPS_FPU); + kvm_mips_msa_cap = kvm_check_extension(s, KVM_CAP_MIPS_MSA); + DPRINTF("%s\n", __func__); return 0; } int kvm_arch_init_vcpu(CPUState *cs) { + MIPSCPU *cpu = MIPS_CPU(cs); + CPUMIPSState *env = &cpu->env; int ret = 0; qemu_add_vm_change_state_handler(kvm_mips_update_state, cs); + if (kvm_mips_fpu_cap && env->CP0_Config1 & (1 << CP0C1_FP)) { + ret = kvm_vcpu_enable_cap(cs, KVM_CAP_MIPS_FPU, 0, 0); + if (ret < 0) { + /* mark unsupported so it gets disabled on reset */ + kvm_mips_fpu_cap = 0; + ret = 0; + } + } + + if (kvm_mips_msa_cap && env->CP0_Config3 & (1 << CP0C3_MSAP)) { + ret = kvm_vcpu_enable_cap(cs, KVM_CAP_MIPS_MSA, 0, 0); + if (ret < 0) { + /* mark unsupported so it gets disabled on reset */ + kvm_mips_msa_cap = 0; + ret = 0; + } + } + DPRINTF("%s\n", __func__); return ret; } @@ -64,10 +90,14 @@ void kvm_mips_reset_vcpu(MIPSCPU *cpu) { CPUMIPSState *env = &cpu->env; - if (env->CP0_Config1 & (1 << CP0C1_FP)) { - fprintf(stderr, "Warning: FPU not supported with KVM, disabling\n"); + if (!kvm_mips_fpu_cap && env->CP0_Config1 & (1 << CP0C1_FP)) { + fprintf(stderr, "Warning: KVM does not support FPU, disabling\n"); env->CP0_Config1 &= ~(1 << CP0C1_FP); } + if (!kvm_mips_msa_cap && env->CP0_Config3 & (1 << CP0C3_MSAP)) { + fprintf(stderr, "Warning: KVM does not support MSA, disabling\n"); + env->CP0_Config3 &= ~(1 << CP0C3_MSAP); + } DPRINTF("%s\n", __func__); } @@ -88,7 +118,6 @@ static inline int cpu_mips_io_interrupts_pending(MIPSCPU *cpu) { CPUMIPSState *env = &cpu->env; - DPRINTF("%s: %#x\n", __func__, env->CP0_Cause & (1 << (2 + CP0Ca_IP))); return env->CP0_Cause & (0x1 << (2 + CP0Ca_IP)); } @@ -117,7 +146,6 @@ void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run) MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run) { - DPRINTF("%s\n", __func__); return MEMTXATTRS_UNSPECIFIED; } @@ -230,6 +258,13 @@ int kvm_mips_set_ipi_interrupt(MIPSCPU *cpu, int irq, int level) #define KVM_REG_MIPS_CP0_STATUS MIPS_CP0_32(12, 0) #define KVM_REG_MIPS_CP0_CAUSE MIPS_CP0_32(13, 0) #define KVM_REG_MIPS_CP0_EPC MIPS_CP0_64(14, 0) +#define KVM_REG_MIPS_CP0_PRID MIPS_CP0_32(15, 0) +#define KVM_REG_MIPS_CP0_CONFIG MIPS_CP0_32(16, 0) +#define KVM_REG_MIPS_CP0_CONFIG1 MIPS_CP0_32(16, 1) +#define KVM_REG_MIPS_CP0_CONFIG2 MIPS_CP0_32(16, 2) +#define KVM_REG_MIPS_CP0_CONFIG3 MIPS_CP0_32(16, 3) +#define KVM_REG_MIPS_CP0_CONFIG4 MIPS_CP0_32(16, 4) +#define KVM_REG_MIPS_CP0_CONFIG5 MIPS_CP0_32(16, 5) #define KVM_REG_MIPS_CP0_ERROREPC MIPS_CP0_64(30, 0) static inline int kvm_mips_put_one_reg(CPUState *cs, uint64_t reg_id, @@ -243,6 +278,17 @@ static inline int kvm_mips_put_one_reg(CPUState *cs, uint64_t reg_id, return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg); } +static inline int kvm_mips_put_one_ureg(CPUState *cs, uint64_t reg_id, + uint32_t *addr) +{ + struct kvm_one_reg cp0reg = { + .id = reg_id, + .addr = (uintptr_t)addr + }; + + return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg); +} + static inline int kvm_mips_put_one_ulreg(CPUState *cs, uint64_t reg_id, target_ulong *addr) { @@ -256,7 +302,18 @@ static inline int kvm_mips_put_one_ulreg(CPUState *cs, uint64_t reg_id, } static inline int kvm_mips_put_one_reg64(CPUState *cs, uint64_t reg_id, - uint64_t *addr) + int64_t *addr) +{ + struct kvm_one_reg cp0reg = { + .id = reg_id, + .addr = (uintptr_t)addr + }; + + return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg); +} + +static inline int kvm_mips_put_one_ureg64(CPUState *cs, uint64_t reg_id, + uint64_t *addr) { struct kvm_one_reg cp0reg = { .id = reg_id, @@ -277,6 +334,17 @@ static inline int kvm_mips_get_one_reg(CPUState *cs, uint64_t reg_id, return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg); } +static inline int kvm_mips_get_one_ureg(CPUState *cs, uint64_t reg_id, + uint32_t *addr) +{ + struct kvm_one_reg cp0reg = { + .id = reg_id, + .addr = (uintptr_t)addr + }; + + return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg); +} + static inline int kvm_mips_get_one_ulreg(CPUState *cs, uint64_t reg_id, target_ulong *addr) { @@ -295,7 +363,7 @@ static inline int kvm_mips_get_one_ulreg(CPUState *cs, uint64_t reg_id, } static inline int kvm_mips_get_one_reg64(CPUState *cs, uint64_t reg_id, - uint64_t *addr) + int64_t *addr) { struct kvm_one_reg cp0reg = { .id = reg_id, @@ -305,6 +373,50 @@ static inline int kvm_mips_get_one_reg64(CPUState *cs, uint64_t reg_id, return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg); } +static inline int kvm_mips_get_one_ureg64(CPUState *cs, uint64_t reg_id, + uint64_t *addr) +{ + struct kvm_one_reg cp0reg = { + .id = reg_id, + .addr = (uintptr_t)addr + }; + + return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg); +} + +#define KVM_REG_MIPS_CP0_CONFIG_MASK (1U << CP0C0_M) +#define KVM_REG_MIPS_CP0_CONFIG1_MASK ((1U << CP0C1_M) | \ + (1U << CP0C1_FP)) +#define KVM_REG_MIPS_CP0_CONFIG2_MASK (1U << CP0C2_M) +#define KVM_REG_MIPS_CP0_CONFIG3_MASK ((1U << CP0C3_M) | \ + (1U << CP0C3_MSAP)) +#define KVM_REG_MIPS_CP0_CONFIG4_MASK (1U << CP0C4_M) +#define KVM_REG_MIPS_CP0_CONFIG5_MASK ((1U << CP0C5_MSAEn) | \ + (1U << CP0C5_UFE) | \ + (1U << CP0C5_FRE) | \ + (1U << CP0C5_UFR)) + +static inline int kvm_mips_change_one_reg(CPUState *cs, uint64_t reg_id, + int32_t *addr, int32_t mask) +{ + int err; + int32_t tmp, change; + + err = kvm_mips_get_one_reg(cs, reg_id, &tmp); + if (err < 0) { + return err; + } + + /* only change bits in mask */ + change = (*addr ^ tmp) & mask; + if (!change) { + return 0; + } + + tmp = tmp ^ change; + return kvm_mips_put_one_reg(cs, reg_id, &tmp); +} + /* * We freeze the KVM timer when either the VM clock is stopped or the state is * saved (the state is dirty). @@ -322,13 +434,13 @@ static int kvm_mips_save_count(CPUState *cs) int err, ret = 0; /* freeze KVM timer */ - err = kvm_mips_get_one_reg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl); + err = kvm_mips_get_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl); if (err < 0) { DPRINTF("%s: Failed to get COUNT_CTL (%d)\n", __func__, err); ret = err; } else if (!(count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)) { count_ctl |= KVM_REG_MIPS_COUNT_CTL_DC; - err = kvm_mips_put_one_reg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl); + err = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl); if (err < 0) { DPRINTF("%s: Failed to set COUNT_CTL.DC=1 (%d)\n", __func__, err); ret = err; @@ -364,14 +476,14 @@ static int kvm_mips_restore_count(CPUState *cs) int err_dc, err, ret = 0; /* check the timer is frozen */ - err_dc = kvm_mips_get_one_reg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl); + err_dc = kvm_mips_get_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl); if (err_dc < 0) { DPRINTF("%s: Failed to get COUNT_CTL (%d)\n", __func__, err_dc); ret = err_dc; } else if (!(count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)) { /* freeze timer (sets COUNT_RESUME for us) */ count_ctl |= KVM_REG_MIPS_COUNT_CTL_DC; - err = kvm_mips_put_one_reg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl); + err = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl); if (err < 0) { DPRINTF("%s: Failed to set COUNT_CTL.DC=1 (%d)\n", __func__, err); ret = err; @@ -395,7 +507,7 @@ static int kvm_mips_restore_count(CPUState *cs) /* resume KVM timer */ if (err_dc >= 0) { count_ctl &= ~KVM_REG_MIPS_COUNT_CTL_DC; - err = kvm_mips_put_one_reg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl); + err = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl); if (err < 0) { DPRINTF("%s: Failed to set COUNT_CTL.DC=0 (%d)\n", __func__, err); ret = err; @@ -428,8 +540,8 @@ static void kvm_mips_update_state(void *opaque, int running, RunState state) } else { /* Set clock restore time to now */ count_resume = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); - ret = kvm_mips_put_one_reg64(cs, KVM_REG_MIPS_COUNT_RESUME, - &count_resume); + ret = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_COUNT_RESUME, + &count_resume); if (ret < 0) { fprintf(stderr, "Failed setting COUNT_RESUME\n"); return; @@ -444,6 +556,167 @@ static void kvm_mips_update_state(void *opaque, int running, RunState state) } } +static int kvm_mips_put_fpu_registers(CPUState *cs, int level) +{ + MIPSCPU *cpu = MIPS_CPU(cs); + CPUMIPSState *env = &cpu->env; + int err, ret = 0; + unsigned int i; + + /* Only put FPU state if we're emulating a CPU with an FPU */ + if (env->CP0_Config1 & (1 << CP0C1_FP)) { + /* FPU Control Registers */ + if (level == KVM_PUT_FULL_STATE) { + err = kvm_mips_put_one_ureg(cs, KVM_REG_MIPS_FCR_IR, + &env->active_fpu.fcr0); + if (err < 0) { + DPRINTF("%s: Failed to put FCR_IR (%d)\n", __func__, err); + ret = err; + } + } + err = kvm_mips_put_one_ureg(cs, KVM_REG_MIPS_FCR_CSR, + &env->active_fpu.fcr31); + if (err < 0) { + DPRINTF("%s: Failed to put FCR_CSR (%d)\n", __func__, err); + ret = err; + } + + /* + * FPU register state is a subset of MSA vector state, so don't put FPU + * registers if we're emulating a CPU with MSA. + */ + if (!(env->CP0_Config3 & (1 << CP0C3_MSAP))) { + /* Floating point registers */ + for (i = 0; i < 32; ++i) { + if (env->CP0_Status & (1 << CP0St_FR)) { + err = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_FPR_64(i), + &env->active_fpu.fpr[i].d); + } else { + err = kvm_mips_get_one_ureg(cs, KVM_REG_MIPS_FPR_32(i), + &env->active_fpu.fpr[i].w[FP_ENDIAN_IDX]); + } + if (err < 0) { + DPRINTF("%s: Failed to put FPR%u (%d)\n", __func__, i, err); + ret = err; + } + } + } + } + + /* Only put MSA state if we're emulating a CPU with MSA */ + if (env->CP0_Config3 & (1 << CP0C3_MSAP)) { + /* MSA Control Registers */ + if (level == KVM_PUT_FULL_STATE) { + err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_MSA_IR, + &env->msair); + if (err < 0) { + DPRINTF("%s: Failed to put MSA_IR (%d)\n", __func__, err); + ret = err; + } + } + err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_MSA_CSR, + &env->active_tc.msacsr); + if (err < 0) { + DPRINTF("%s: Failed to put MSA_CSR (%d)\n", __func__, err); + ret = err; + } + + /* Vector registers (includes FP registers) */ + for (i = 0; i < 32; ++i) { + /* Big endian MSA not supported by QEMU yet anyway */ + err = kvm_mips_put_one_reg64(cs, KVM_REG_MIPS_VEC_128(i), + env->active_fpu.fpr[i].wr.d); + if (err < 0) { + DPRINTF("%s: Failed to put VEC%u (%d)\n", __func__, i, err); + ret = err; + } + } + } + + return ret; +} + +static int kvm_mips_get_fpu_registers(CPUState *cs) +{ + MIPSCPU *cpu = MIPS_CPU(cs); + CPUMIPSState *env = &cpu->env; + int err, ret = 0; + unsigned int i; + + /* Only get FPU state if we're emulating a CPU with an FPU */ + if (env->CP0_Config1 & (1 << CP0C1_FP)) { + /* FPU Control Registers */ + err = kvm_mips_get_one_ureg(cs, KVM_REG_MIPS_FCR_IR, + &env->active_fpu.fcr0); + if (err < 0) { + DPRINTF("%s: Failed to get FCR_IR (%d)\n", __func__, err); + ret = err; + } + err = kvm_mips_get_one_ureg(cs, KVM_REG_MIPS_FCR_CSR, + &env->active_fpu.fcr31); + if (err < 0) { + DPRINTF("%s: Failed to get FCR_CSR (%d)\n", __func__, err); + ret = err; + } else { + restore_fp_status(env); + } + + /* + * FPU register state is a subset of MSA vector state, so don't save FPU + * registers if we're emulating a CPU with MSA. + */ + if (!(env->CP0_Config3 & (1 << CP0C3_MSAP))) { + /* Floating point registers */ + for (i = 0; i < 32; ++i) { + if (env->CP0_Status & (1 << CP0St_FR)) { + err = kvm_mips_get_one_ureg64(cs, KVM_REG_MIPS_FPR_64(i), + &env->active_fpu.fpr[i].d); + } else { + err = kvm_mips_get_one_ureg(cs, KVM_REG_MIPS_FPR_32(i), + &env->active_fpu.fpr[i].w[FP_ENDIAN_IDX]); + } + if (err < 0) { + DPRINTF("%s: Failed to get FPR%u (%d)\n", __func__, i, err); + ret = err; + } + } + } + } + + /* Only get MSA state if we're emulating a CPU with MSA */ + if (env->CP0_Config3 & (1 << CP0C3_MSAP)) { + /* MSA Control Registers */ + err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_MSA_IR, + &env->msair); + if (err < 0) { + DPRINTF("%s: Failed to get MSA_IR (%d)\n", __func__, err); + ret = err; + } + err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_MSA_CSR, + &env->active_tc.msacsr); + if (err < 0) { + DPRINTF("%s: Failed to get MSA_CSR (%d)\n", __func__, err); + ret = err; + } else { + restore_msa_fp_status(env); + } + + /* Vector registers (includes FP registers) */ + for (i = 0; i < 32; ++i) { + /* Big endian MSA not supported by QEMU yet anyway */ + err = kvm_mips_get_one_reg64(cs, KVM_REG_MIPS_VEC_128(i), + env->active_fpu.fpr[i].wr.d); + if (err < 0) { + DPRINTF("%s: Failed to get VEC%u (%d)\n", __func__, i, err); + ret = err; + } + } + } + + return ret; +} + + static int kvm_mips_put_cp0_registers(CPUState *cs, int level) { MIPSCPU *cpu = MIPS_CPU(cs); @@ -522,6 +795,53 @@ static int kvm_mips_put_cp0_registers(CPUState *cs, int level) DPRINTF("%s: Failed to put CP0_EPC (%d)\n", __func__, err); ret = err; } + err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_PRID, &env->CP0_PRid); + if (err < 0) { + DPRINTF("%s: Failed to put CP0_PRID (%d)\n", __func__, err); + ret = err; + } + err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG, + &env->CP0_Config0, + KVM_REG_MIPS_CP0_CONFIG_MASK); + if (err < 0) { + DPRINTF("%s: Failed to change CP0_CONFIG (%d)\n", __func__, err); + ret = err; + } + err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG1, + &env->CP0_Config1, + KVM_REG_MIPS_CP0_CONFIG1_MASK); + if (err < 0) { + DPRINTF("%s: Failed to change CP0_CONFIG1 (%d)\n", __func__, err); + ret = err; + } + err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG2, + &env->CP0_Config2, + KVM_REG_MIPS_CP0_CONFIG2_MASK); + if (err < 0) { + DPRINTF("%s: Failed to change CP0_CONFIG2 (%d)\n", __func__, err); + ret = err; + } + err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG3, + &env->CP0_Config3, + KVM_REG_MIPS_CP0_CONFIG3_MASK); + if (err < 0) { + DPRINTF("%s: Failed to change CP0_CONFIG3 (%d)\n", __func__, err); + ret = err; + } + err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG4, + &env->CP0_Config4, + KVM_REG_MIPS_CP0_CONFIG4_MASK); + if (err < 0) { + DPRINTF("%s: Failed to change CP0_CONFIG4 (%d)\n", __func__, err); + ret = err; + } + err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG5, + &env->CP0_Config5, + KVM_REG_MIPS_CP0_CONFIG5_MASK); + if (err < 0) { + DPRINTF("%s: Failed to change CP0_CONFIG5 (%d)\n", __func__, err); + ret = err; + } err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_ERROREPC, &env->CP0_ErrorEPC); if (err < 0) { @@ -608,6 +928,41 @@ static int kvm_mips_get_cp0_registers(CPUState *cs) DPRINTF("%s: Failed to get CP0_EPC (%d)\n", __func__, err); ret = err; } + err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_PRID, &env->CP0_PRid); + if (err < 0) { + DPRINTF("%s: Failed to get CP0_PRID (%d)\n", __func__, err); + ret = err; + } + err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG, &env->CP0_Config0); + if (err < 0) { + DPRINTF("%s: Failed to get CP0_CONFIG (%d)\n", __func__, err); + ret = err; + } + err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG1, &env->CP0_Config1); + if (err < 0) { + DPRINTF("%s: Failed to get CP0_CONFIG1 (%d)\n", __func__, err); + ret = err; + } + err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG2, &env->CP0_Config2); + if (err < 0) { + DPRINTF("%s: Failed to get CP0_CONFIG2 (%d)\n", __func__, err); + ret = err; + } + err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG3, &env->CP0_Config3); + if (err < 0) { + DPRINTF("%s: Failed to get CP0_CONFIG3 (%d)\n", __func__, err); + ret = err; + } + err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG4, &env->CP0_Config4); + if (err < 0) { + DPRINTF("%s: Failed to get CP0_CONFIG4 (%d)\n", __func__, err); + ret = err; + } + err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG5, &env->CP0_Config5); + if (err < 0) { + DPRINTF("%s: Failed to get CP0_CONFIG5 (%d)\n", __func__, err); + ret = err; + } err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_ERROREPC, &env->CP0_ErrorEPC); if (err < 0) { @@ -646,6 +1001,11 @@ int kvm_arch_put_registers(CPUState *cs, int level) return ret; } + ret = kvm_mips_put_fpu_registers(cs, level); + if (ret < 0) { + return ret; + } + return ret; } @@ -673,6 +1033,7 @@ int kvm_arch_get_registers(CPUState *cs) env->active_tc.PC = regs.pc; kvm_mips_get_cp0_registers(cs); + kvm_mips_get_fpu_registers(cs); return ret; } diff --git a/target-mips/op_helper.c b/target-mips/op_helper.c index 684ec92c12..7c5669cc96 100644 --- a/target-mips/op_helper.c +++ b/target-mips/op_helper.c @@ -571,6 +571,14 @@ static bool mips_vpe_is_wfi(MIPSCPU *c) return cpu->halted && mips_vpe_active(env); } +static bool mips_vp_is_wfi(MIPSCPU *c) +{ + CPUState *cpu = CPU(c); + CPUMIPSState *env = &c->env; + + return cpu->halted && mips_vp_active(env); +} + static inline void mips_vpe_wake(MIPSCPU *c) { /* Dont set ->halted = 0 directly, let it be done via cpu_has_work @@ -1840,6 +1848,46 @@ target_ulong helper_yield(CPUMIPSState *env, target_ulong arg) return env->CP0_YQMask; } +/* R6 Multi-threading */ +#ifndef CONFIG_USER_ONLY +target_ulong helper_dvp(CPUMIPSState *env) +{ + CPUState *other_cs = first_cpu; + target_ulong prev = env->CP0_VPControl; + + if (!((env->CP0_VPControl >> CP0VPCtl_DIS) & 1)) { + CPU_FOREACH(other_cs) { + MIPSCPU *other_cpu = MIPS_CPU(other_cs); + /* Turn off all VPs except the one executing the dvp. */ + if (&other_cpu->env != env) { + mips_vpe_sleep(other_cpu); + } + } + env->CP0_VPControl |= (1 << CP0VPCtl_DIS); + } + return prev; +} + +target_ulong helper_evp(CPUMIPSState *env) +{ + CPUState *other_cs = first_cpu; + target_ulong prev = env->CP0_VPControl; + + if ((env->CP0_VPControl >> CP0VPCtl_DIS) & 1) { + CPU_FOREACH(other_cs) { + MIPSCPU *other_cpu = MIPS_CPU(other_cs); + if ((&other_cpu->env != env) && !mips_vp_is_wfi(other_cpu)) { + /* If the VP is WFI, don't disturb its sleep. + * Otherwise, wake it up. */ + mips_vpe_wake(other_cpu); + } + } + env->CP0_VPControl &= ~(1 << CP0VPCtl_DIS); + } + return prev; +} +#endif /* !CONFIG_USER_ONLY */ + #ifndef CONFIG_USER_ONLY /* TLB management */ static void r4k_mips_tlb_flush_extra (CPUMIPSState *env, int first) diff --git a/target-mips/translate.c b/target-mips/translate.c index 658926d594..a16656931b 100644 --- a/target-mips/translate.c +++ b/target-mips/translate.c @@ -894,6 +894,8 @@ enum { OPC_EVPE = 0x01 | (1 << 5) | OPC_MFMC0, OPC_DI = (0 << 5) | (0x0C << 11) | OPC_MFMC0, OPC_EI = (1 << 5) | (0x0C << 11) | OPC_MFMC0, + OPC_DVP = 0x04 | (0 << 3) | (1 << 5) | (0 << 11) | OPC_MFMC0, + OPC_EVP = 0x04 | (0 << 3) | (0 << 5) | (0 << 11) | OPC_MFMC0, }; /* Coprocessor 0 (with rs == C0) */ @@ -1429,6 +1431,7 @@ typedef struct DisasContext { bool mvh; int CP0_LLAddr_shift; bool ps; + bool vp; } DisasContext; enum { @@ -4950,6 +4953,11 @@ static void gen_mfc0(DisasContext *ctx, TCGv arg, int reg, int sel) gen_helper_mfc0_mvpconf1(arg, cpu_env); rn = "MVPConf1"; break; + case 4: + CP0_CHECK(ctx->vp); + gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_VPControl)); + rn = "VPControl"; + break; default: goto cp0_unimplemented; } @@ -5077,6 +5085,11 @@ static void gen_mfc0(DisasContext *ctx, TCGv arg, int reg, int sel) } rn = "EntryLo1"; break; + case 1: + CP0_CHECK(ctx->vp); + gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_GlobalNumber)); + rn = "GlobalNumber"; + break; default: goto cp0_unimplemented; } @@ -5597,6 +5610,11 @@ static void gen_mtc0(DisasContext *ctx, TCGv arg, int reg, int sel) /* ignored */ rn = "MVPConf1"; break; + case 4: + CP0_CHECK(ctx->vp); + /* ignored */ + rn = "VPControl"; + break; default: goto cp0_unimplemented; } @@ -5699,6 +5717,11 @@ static void gen_mtc0(DisasContext *ctx, TCGv arg, int reg, int sel) gen_helper_mtc0_entrylo1(cpu_env, arg); rn = "EntryLo1"; break; + case 1: + CP0_CHECK(ctx->vp); + /* ignored */ + rn = "GlobalNumber"; + break; default: goto cp0_unimplemented; } @@ -6234,6 +6257,11 @@ static void gen_dmfc0(DisasContext *ctx, TCGv arg, int reg, int sel) gen_helper_mfc0_mvpconf1(arg, cpu_env); rn = "MVPConf1"; break; + case 4: + CP0_CHECK(ctx->vp); + gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_VPControl)); + rn = "VPControl"; + break; default: goto cp0_unimplemented; } @@ -6335,6 +6363,11 @@ static void gen_dmfc0(DisasContext *ctx, TCGv arg, int reg, int sel) tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_EntryLo1)); rn = "EntryLo1"; break; + case 1: + CP0_CHECK(ctx->vp); + gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_GlobalNumber)); + rn = "GlobalNumber"; + break; default: goto cp0_unimplemented; } @@ -6841,6 +6874,11 @@ static void gen_dmtc0(DisasContext *ctx, TCGv arg, int reg, int sel) /* ignored */ rn = "MVPConf1"; break; + case 4: + CP0_CHECK(ctx->vp); + /* ignored */ + rn = "VPControl"; + break; default: goto cp0_unimplemented; } @@ -6941,6 +6979,11 @@ static void gen_dmtc0(DisasContext *ctx, TCGv arg, int reg, int sel) gen_helper_dmtc0_entrylo1(cpu_env, arg); rn = "EntryLo1"; break; + case 1: + CP0_CHECK(ctx->vp); + /* ignored */ + rn = "GlobalNumber"; + break; default: goto cp0_unimplemented; } @@ -19080,6 +19123,20 @@ static void decode_opc(CPUMIPSState *env, DisasContext *ctx) gen_helper_evpe(t0, cpu_env); gen_store_gpr(t0, rt); break; + case OPC_DVP: + check_insn(ctx, ISA_MIPS32R6); + if (ctx->vp) { + gen_helper_dvp(t0, cpu_env); + gen_store_gpr(t0, rt); + } + break; + case OPC_EVP: + check_insn(ctx, ISA_MIPS32R6); + if (ctx->vp) { + gen_helper_evp(t0, cpu_env); + gen_store_gpr(t0, rt); + } + break; case OPC_DI: check_insn(ctx, ISA_MIPS32R2); save_cpu_state(ctx, 1); @@ -19611,6 +19668,7 @@ void gen_intermediate_code(CPUMIPSState *env, struct TranslationBlock *tb) ctx.ulri = (env->CP0_Config3 >> CP0C3_ULRI) & 1; ctx.ps = ((env->active_fpu.fcr0 >> FCR0_PS) & 1) || (env->insn_flags & (INSN_LOONGSON2E | INSN_LOONGSON2F)); + ctx.vp = (env->CP0_Config5 >> CP0C5_VP) & 1; restore_cpu_state(env, &ctx); #ifdef CONFIG_USER_ONLY ctx.mem_idx = MIPS_HFLAG_UM; @@ -19996,6 +20054,7 @@ void cpu_state_reset(CPUMIPSState *env) env->CP0_Random = env->tlb->nb_tlb - 1; env->tlb->tlb_in_use = env->tlb->nb_tlb; env->CP0_Wired = 0; + env->CP0_GlobalNumber = (cs->cpu_index & 0xFF) << CP0GN_VPId; env->CP0_EBase = (cs->cpu_index & 0x3FF); if (kvm_enabled()) { env->CP0_EBase |= 0x40000000; diff --git a/target-mips/translate_init.c b/target-mips/translate_init.c index bb33c7cfeb..cdef59d952 100644 --- a/target-mips/translate_init.c +++ b/target-mips/translate_init.c @@ -665,7 +665,8 @@ static const mips_def_t mips_defs[] = (1 << CP0C3_RXI) | (1 << CP0C3_LPA), .CP0_Config4 = MIPS_CONFIG4 | (1U << CP0C4_M) | (3 << CP0C4_IE) | (0xfc << CP0C4_KScrExist), - .CP0_Config5 = MIPS_CONFIG5 | (1 << CP0C5_XNP) | (1 << CP0C5_LLB), + .CP0_Config5 = MIPS_CONFIG5 | (1 << CP0C5_XNP) | (1 << CP0C5_VP) | + (1 << CP0C5_LLB), .CP0_Config5_rw_bitmask = (1 << CP0C5_MSAEn) | (1 << CP0C5_SBRI) | (1 << CP0C5_FRE) | (1 << CP0C5_UFE), .CP0_LLAddr_rw_bitmask = 0, diff --git a/tests/test-qga.c b/tests/test-qga.c index 0973b487d2..72a89dec23 100644 --- a/tests/test-qga.c +++ b/tests/test-qga.c @@ -6,7 +6,6 @@ #include <sys/un.h> #include "libqtest.h" -#include "qga/guest-agent-core.h" typedef struct { char *test_dir; @@ -450,8 +449,8 @@ static void test_qga_file_ops(gconstpointer fix) /* seek */ cmd = g_strdup_printf("{'execute': 'guest-file-seek'," " 'arguments': { 'handle': %" PRId64 ", " - " 'offset': %d, 'whence': %d } }", - id, 6, QGA_SEEK_SET); + " 'offset': %d, 'whence': '%s' } }", + id, 6, "set"); ret = qmp_fd(fixture->fd, cmd); qmp_assert_no_error(ret); val = qdict_get_qdict(ret, "return"); @@ -543,8 +542,8 @@ static void test_qga_file_write_read(gconstpointer fix) /* seek to 0 */ cmd = g_strdup_printf("{'execute': 'guest-file-seek'," " 'arguments': { 'handle': %" PRId64 ", " - " 'offset': %d, 'whence': %d } }", - id, 0, QGA_SEEK_SET); + " 'offset': %d, 'whence': '%s' } }", + id, 0, "set"); ret = qmp_fd(fixture->fd, cmd); qmp_assert_no_error(ret); val = qdict_get_qdict(ret, "return"); diff --git a/trace-events b/trace-events index 61a133f6ee..075ec27100 100644 --- a/trace-events +++ b/trace-events @@ -1409,8 +1409,6 @@ xics_ics_write_xive(int nr, int srcno, int server, uint8_t priority) "ics_write_ xics_ics_reject(int nr, int srcno) "reject irq %#x [src %d]" xics_ics_eoi(int nr) "ics_eoi: irq %#x" xics_alloc(int src, int irq) "source#%d, irq %d" -xics_alloc_failed_hint(int src, int irq) "source#%d, irq %d is already in use" -xics_alloc_failed_no_left(int src) "source#%d, no irq left" xics_alloc_block(int src, int first, int num, bool lsi, int align) "source#%d, first irq %d, %d irqs, lsi=%d, alignnum %d" xics_ics_free(int src, int irq, int num) "Source#%d, first irq %d, %d irqs" xics_ics_free_warn(int src, int irq) "Source#%d, irq %d is already free" |