diff options
Diffstat (limited to 'hw')
37 files changed, 2865 insertions, 1151 deletions
diff --git a/hw/acpi/cpu_hotplug.c b/hw/acpi/cpu_hotplug.c index 6e8293aac9..3ac2045a95 100644 --- a/hw/acpi/cpu_hotplug.c +++ b/hw/acpi/cpu_hotplug.c @@ -128,7 +128,7 @@ void build_legacy_cpu_hotplug_aml(Aml *ctx, MachineState *machine, Aml *one = aml_int(1); MachineClass *mc = MACHINE_GET_CLASS(machine); const CPUArchIdList *apic_ids = mc->possible_cpu_arch_ids(machine); - PCMachineState *pcms = PC_MACHINE(machine); + X86MachineState *x86ms = X86_MACHINE(machine); /* * _MAT method - creates an madt apic buffer @@ -236,9 +236,9 @@ void build_legacy_cpu_hotplug_aml(Aml *ctx, MachineState *machine, /* The current AML generator can cover the APIC ID range [0..255], * inclusive, for VCPU hotplug. */ QEMU_BUILD_BUG_ON(ACPI_CPU_HOTPLUG_ID_LIMIT > 256); - if (pcms->apic_id_limit > ACPI_CPU_HOTPLUG_ID_LIMIT) { + if (x86ms->apic_id_limit > ACPI_CPU_HOTPLUG_ID_LIMIT) { error_report("max_cpus is too large. APIC ID of last CPU is %u", - pcms->apic_id_limit - 1); + x86ms->apic_id_limit - 1); exit(1); } @@ -315,8 +315,8 @@ void build_legacy_cpu_hotplug_aml(Aml *ctx, MachineState *machine, * ith up to 255 elements. Windows guests up to win2k8 fail when * VarPackageOp is used. */ - pkg = pcms->apic_id_limit <= 255 ? aml_package(pcms->apic_id_limit) : - aml_varpackage(pcms->apic_id_limit); + pkg = x86ms->apic_id_limit <= 255 ? aml_package(x86ms->apic_id_limit) : + aml_varpackage(x86ms->apic_id_limit); for (i = 0, apic_idx = 0; i < apic_ids->len; i++) { int apic_id = apic_ids->cpus[i].arch_id; diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c index 14e9f85b8b..9fa2eaf890 100644 --- a/hw/block/virtio-blk.c +++ b/hw/block/virtio-blk.c @@ -42,9 +42,9 @@ */ static VirtIOFeature feature_sizes[] = { {.flags = 1ULL << VIRTIO_BLK_F_DISCARD, - .end = virtio_endof(struct virtio_blk_config, discard_sector_alignment)}, + .end = endof(struct virtio_blk_config, discard_sector_alignment)}, {.flags = 1ULL << VIRTIO_BLK_F_WRITE_ZEROES, - .end = virtio_endof(struct virtio_blk_config, write_zeroes_may_unmap)}, + .end = endof(struct virtio_blk_config, write_zeroes_may_unmap)}, {} }; @@ -1052,7 +1052,7 @@ static void virtio_blk_save_device(VirtIODevice *vdev, QEMUFile *f) qemu_put_be32(f, virtio_get_queue_index(req->vq)); } - qemu_put_virtqueue_element(f, &req->elem); + qemu_put_virtqueue_element(vdev, f, &req->elem); req = req->next; } qemu_put_sbyte(f, 0); @@ -1206,10 +1206,15 @@ static void virtio_blk_device_unrealize(DeviceState *dev, Error **errp) { VirtIODevice *vdev = VIRTIO_DEVICE(dev); VirtIOBlock *s = VIRTIO_BLK(dev); + VirtIOBlkConf *conf = &s->conf; + unsigned i; blk_drain(s->blk); virtio_blk_data_plane_destroy(s->dataplane); s->dataplane = NULL; + for (i = 0; i < conf->num_queues; i++) { + virtio_del_queue(vdev, i); + } qemu_del_vm_change_state_handler(s->change); blockdev_mark_auto_del(s->blk); virtio_cleanup(vdev); diff --git a/hw/char/virtio-serial-bus.c b/hw/char/virtio-serial-bus.c index 4e0ed829ae..33259042a9 100644 --- a/hw/char/virtio-serial-bus.c +++ b/hw/char/virtio-serial-bus.c @@ -708,7 +708,7 @@ static void virtio_serial_save_device(VirtIODevice *vdev, QEMUFile *f) if (elem_popped) { qemu_put_be32s(f, &port->iov_idx); qemu_put_be64s(f, &port->iov_offset); - qemu_put_virtqueue_element(f, port->elem); + qemu_put_virtqueue_element(vdev, f, port->elem); } } } diff --git a/hw/i386/Kconfig b/hw/i386/Kconfig index c5c9d4900e..b25bb6d78a 100644 --- a/hw/i386/Kconfig +++ b/hw/i386/Kconfig @@ -92,6 +92,16 @@ config Q35 select SMBIOS select FW_CFG_DMA +config MICROVM + bool + imply SERIAL_ISA + select ISA_BUS + select APIC + select IOAPIC + select I8259 + select MC146818RTC + select VIRTIO_MMIO + config VTD bool diff --git a/hw/i386/Makefile.objs b/hw/i386/Makefile.objs index d3374e0831..0d195b5210 100644 --- a/hw/i386/Makefile.objs +++ b/hw/i386/Makefile.objs @@ -1,8 +1,10 @@ obj-$(CONFIG_KVM) += kvm/ obj-y += e820_memory_layout.o multiboot.o +obj-y += x86.o obj-y += pc.o obj-$(CONFIG_I440FX) += pc_piix.o obj-$(CONFIG_Q35) += pc_q35.o +obj-$(CONFIG_MICROVM) += microvm.o obj-y += fw_cfg.o pc_sysfw.o obj-y += x86-iommu.o obj-$(CONFIG_VTD) += intel_iommu.o diff --git a/hw/i386/acpi-build.c b/hw/i386/acpi-build.c index d9435ba0b3..9dd3dbb16c 100644 --- a/hw/i386/acpi-build.c +++ b/hw/i386/acpi-build.c @@ -361,6 +361,7 @@ static void build_madt(GArray *table_data, BIOSLinker *linker, PCMachineState *pcms) { MachineClass *mc = MACHINE_GET_CLASS(pcms); + X86MachineState *x86ms = X86_MACHINE(pcms); const CPUArchIdList *apic_ids = mc->possible_cpu_arch_ids(MACHINE(pcms)); int madt_start = table_data->len; AcpiDeviceIfClass *adevc = ACPI_DEVICE_IF_GET_CLASS(pcms->acpi_dev); @@ -390,7 +391,7 @@ build_madt(GArray *table_data, BIOSLinker *linker, PCMachineState *pcms) io_apic->address = cpu_to_le32(IO_APIC_DEFAULT_ADDRESS); io_apic->interrupt = cpu_to_le32(0); - if (pcms->apic_xrupt_override) { + if (x86ms->apic_xrupt_override) { intsrcovr = acpi_data_push(table_data, sizeof *intsrcovr); intsrcovr->type = ACPI_APIC_XRUPT_OVERRIDE; intsrcovr->length = sizeof(*intsrcovr); @@ -1831,6 +1832,7 @@ build_dsdt(GArray *table_data, BIOSLinker *linker, CrsRangeSet crs_range_set; PCMachineState *pcms = PC_MACHINE(machine); PCMachineClass *pcmc = PC_MACHINE_GET_CLASS(machine); + X86MachineState *x86ms = X86_MACHINE(machine); AcpiMcfgInfo mcfg; uint32_t nr_mem = machine->ram_slots; int root_bus_limit = 0xFF; @@ -2103,7 +2105,7 @@ build_dsdt(GArray *table_data, BIOSLinker *linker, * with half of the 16-bit control register. Hence, the total size * of the i/o region used is FW_CFG_CTL_SIZE; when using DMA, the * DMA control register is located at FW_CFG_DMA_IO_BASE + 4 */ - uint8_t io_size = object_property_get_bool(OBJECT(pcms->fw_cfg), + uint8_t io_size = object_property_get_bool(OBJECT(x86ms->fw_cfg), "dma_enabled", NULL) ? ROUND_UP(FW_CFG_CTL_SIZE, 4) + sizeof(dma_addr_t) : FW_CFG_CTL_SIZE; @@ -2336,6 +2338,7 @@ build_srat(GArray *table_data, BIOSLinker *linker, MachineState *machine) int srat_start, numa_start, slots; uint64_t mem_len, mem_base, next_base; MachineClass *mc = MACHINE_GET_CLASS(machine); + X86MachineState *x86ms = X86_MACHINE(machine); const CPUArchIdList *apic_ids = mc->possible_cpu_arch_ids(machine); PCMachineState *pcms = PC_MACHINE(machine); ram_addr_t hotplugabble_address_space_size = @@ -2406,16 +2409,16 @@ build_srat(GArray *table_data, BIOSLinker *linker, MachineState *machine) } /* Cut out the ACPI_PCI hole */ - if (mem_base <= pcms->below_4g_mem_size && - next_base > pcms->below_4g_mem_size) { - mem_len -= next_base - pcms->below_4g_mem_size; + if (mem_base <= x86ms->below_4g_mem_size && + next_base > x86ms->below_4g_mem_size) { + mem_len -= next_base - x86ms->below_4g_mem_size; if (mem_len > 0) { numamem = acpi_data_push(table_data, sizeof *numamem); build_srat_memory(numamem, mem_base, mem_len, i - 1, MEM_AFFINITY_ENABLED); } mem_base = 1ULL << 32; - mem_len = next_base - pcms->below_4g_mem_size; + mem_len = next_base - x86ms->below_4g_mem_size; next_base = mem_base + mem_len; } @@ -2634,6 +2637,7 @@ void acpi_build(AcpiBuildTables *tables, MachineState *machine) { PCMachineState *pcms = PC_MACHINE(machine); PCMachineClass *pcmc = PC_MACHINE_GET_CLASS(pcms); + X86MachineState *x86ms = X86_MACHINE(machine); GArray *table_offsets; unsigned facs, dsdt, rsdt, fadt; AcpiPmInfo pm; @@ -2795,7 +2799,7 @@ void acpi_build(AcpiBuildTables *tables, MachineState *machine) */ int legacy_aml_len = pcmc->legacy_acpi_table_size + - ACPI_BUILD_LEGACY_CPU_AML_SIZE * pcms->apic_id_limit; + ACPI_BUILD_LEGACY_CPU_AML_SIZE * x86ms->apic_id_limit; int legacy_table_size = ROUND_UP(tables_blob->len - aml_len + legacy_aml_len, ACPI_BUILD_ALIGN_SIZE); @@ -2885,13 +2889,14 @@ void acpi_setup(void) { PCMachineState *pcms = PC_MACHINE(qdev_get_machine()); PCMachineClass *pcmc = PC_MACHINE_GET_CLASS(pcms); + X86MachineState *x86ms = X86_MACHINE(pcms); AcpiBuildTables tables; AcpiBuildState *build_state; Object *vmgenid_dev; TPMIf *tpm; static FwCfgTPMConfig tpm_config; - if (!pcms->fw_cfg) { + if (!x86ms->fw_cfg) { ACPI_BUILD_DPRINTF("No fw cfg. Bailing out.\n"); return; } @@ -2922,7 +2927,7 @@ void acpi_setup(void) acpi_add_rom_blob(acpi_build_update, build_state, tables.linker->cmd_blob, "etc/table-loader", 0); - fw_cfg_add_file(pcms->fw_cfg, ACPI_BUILD_TPMLOG_FILE, + fw_cfg_add_file(x86ms->fw_cfg, ACPI_BUILD_TPMLOG_FILE, tables.tcpalog->data, acpi_data_len(tables.tcpalog)); tpm = tpm_find(); @@ -2932,13 +2937,13 @@ void acpi_setup(void) .tpm_version = tpm_get_version(tpm), .tpmppi_version = TPM_PPI_VERSION_1_30 }; - fw_cfg_add_file(pcms->fw_cfg, "etc/tpm/config", + fw_cfg_add_file(x86ms->fw_cfg, "etc/tpm/config", &tpm_config, sizeof tpm_config); } vmgenid_dev = find_vmgenid_dev(); if (vmgenid_dev) { - vmgenid_add_fw_cfg(VMGENID(vmgenid_dev), pcms->fw_cfg, + vmgenid_add_fw_cfg(VMGENID(vmgenid_dev), x86ms->fw_cfg, tables.vmgenid); } @@ -2951,7 +2956,7 @@ void acpi_setup(void) uint32_t rsdp_size = acpi_data_len(tables.rsdp); build_state->rsdp = g_memdup(tables.rsdp->data, rsdp_size); - fw_cfg_add_file_callback(pcms->fw_cfg, ACPI_BUILD_RSDP_FILE, + fw_cfg_add_file_callback(x86ms->fw_cfg, ACPI_BUILD_RSDP_FILE, acpi_build_update, NULL, build_state, build_state->rsdp, rsdp_size, true); build_state->rsdp_mr = NULL; diff --git a/hw/i386/amd_iommu.c b/hw/i386/amd_iommu.c index d3726361dd..d55dbf07fc 100644 --- a/hw/i386/amd_iommu.c +++ b/hw/i386/amd_iommu.c @@ -1540,6 +1540,7 @@ static void amdvi_realize(DeviceState *dev, Error **err) X86IOMMUState *x86_iommu = X86_IOMMU_DEVICE(dev); MachineState *ms = MACHINE(qdev_get_machine()); PCMachineState *pcms = PC_MACHINE(ms); + X86MachineState *x86ms = X86_MACHINE(ms); PCIBus *bus = pcms->bus; s->iotlb = g_hash_table_new_full(amdvi_uint64_hash, @@ -1568,7 +1569,7 @@ static void amdvi_realize(DeviceState *dev, Error **err) } /* Pseudo address space under root PCI bus. */ - pcms->ioapic_as = amdvi_host_dma_iommu(bus, s, AMDVI_IOAPIC_SB_DEVID); + x86ms->ioapic_as = amdvi_host_dma_iommu(bus, s, AMDVI_IOAPIC_SB_DEVID); /* set up MMIO */ memory_region_init_io(&s->mmio, OBJECT(s), &mmio_mem_ops, s, "amdvi-mmio", diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c index 771bed25c9..14e4e6ad62 100644 --- a/hw/i386/intel_iommu.c +++ b/hw/i386/intel_iommu.c @@ -3733,6 +3733,7 @@ static void vtd_realize(DeviceState *dev, Error **errp) { MachineState *ms = MACHINE(qdev_get_machine()); PCMachineState *pcms = PC_MACHINE(ms); + X86MachineState *x86ms = X86_MACHINE(ms); PCIBus *bus = pcms->bus; IntelIOMMUState *s = INTEL_IOMMU_DEVICE(dev); X86IOMMUState *x86_iommu = X86_IOMMU_DEVICE(dev); @@ -3773,7 +3774,7 @@ static void vtd_realize(DeviceState *dev, Error **errp) sysbus_mmio_map(SYS_BUS_DEVICE(s), 0, Q35_HOST_BRIDGE_IOMMU_ADDR); pci_setup_iommu(bus, vtd_host_dma_iommu, dev); /* Pseudo address space under root PCI bus. */ - pcms->ioapic_as = vtd_host_dma_iommu(bus, s, Q35_PSEUDO_DEVFN_IOAPIC); + x86ms->ioapic_as = vtd_host_dma_iommu(bus, s, Q35_PSEUDO_DEVFN_IOAPIC); qemu_add_machine_init_done_notifier(&vtd_machine_done_notify); } diff --git a/hw/i386/microvm.c b/hw/i386/microvm.c new file mode 100644 index 0000000000..8aacd6c8d1 --- /dev/null +++ b/hw/i386/microvm.c @@ -0,0 +1,572 @@ +/* + * Copyright (c) 2018 Intel Corporation + * Copyright (c) 2019 Red Hat, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include "qemu/osdep.h" +#include "qemu/error-report.h" +#include "qemu/cutils.h" +#include "qemu/units.h" +#include "qapi/error.h" +#include "qapi/visitor.h" +#include "qapi/qapi-visit-common.h" +#include "sysemu/sysemu.h" +#include "sysemu/cpus.h" +#include "sysemu/numa.h" +#include "sysemu/reset.h" + +#include "hw/loader.h" +#include "hw/irq.h" +#include "hw/kvm/clock.h" +#include "hw/i386/microvm.h" +#include "hw/i386/x86.h" +#include "hw/i386/pc.h" +#include "target/i386/cpu.h" +#include "hw/timer/i8254.h" +#include "hw/rtc/mc146818rtc.h" +#include "hw/char/serial.h" +#include "hw/i386/topology.h" +#include "hw/i386/e820_memory_layout.h" +#include "hw/i386/fw_cfg.h" +#include "hw/virtio/virtio-mmio.h" + +#include "cpu.h" +#include "elf.h" +#include "kvm_i386.h" +#include "hw/xen/start_info.h" + +#define MICROVM_BIOS_FILENAME "bios-microvm.bin" + +static void microvm_set_rtc(MicrovmMachineState *mms, ISADevice *s) +{ + X86MachineState *x86ms = X86_MACHINE(mms); + int val; + + val = MIN(x86ms->below_4g_mem_size / KiB, 640); + rtc_set_memory(s, 0x15, val); + rtc_set_memory(s, 0x16, val >> 8); + /* extended memory (next 64MiB) */ + if (x86ms->below_4g_mem_size > 1 * MiB) { + val = (x86ms->below_4g_mem_size - 1 * MiB) / KiB; + } else { + val = 0; + } + if (val > 65535) { + val = 65535; + } + rtc_set_memory(s, 0x17, val); + rtc_set_memory(s, 0x18, val >> 8); + rtc_set_memory(s, 0x30, val); + rtc_set_memory(s, 0x31, val >> 8); + /* memory between 16MiB and 4GiB */ + if (x86ms->below_4g_mem_size > 16 * MiB) { + val = (x86ms->below_4g_mem_size - 16 * MiB) / (64 * KiB); + } else { + val = 0; + } + if (val > 65535) { + val = 65535; + } + rtc_set_memory(s, 0x34, val); + rtc_set_memory(s, 0x35, val >> 8); + /* memory above 4GiB */ + val = x86ms->above_4g_mem_size / 65536; + rtc_set_memory(s, 0x5b, val); + rtc_set_memory(s, 0x5c, val >> 8); + rtc_set_memory(s, 0x5d, val >> 16); +} + +static void microvm_gsi_handler(void *opaque, int n, int level) +{ + GSIState *s = opaque; + + qemu_set_irq(s->ioapic_irq[n], level); +} + +static void microvm_devices_init(MicrovmMachineState *mms) +{ + X86MachineState *x86ms = X86_MACHINE(mms); + ISABus *isa_bus; + ISADevice *rtc_state; + GSIState *gsi_state; + int i; + + /* Core components */ + + gsi_state = g_malloc0(sizeof(*gsi_state)); + if (mms->pic == ON_OFF_AUTO_ON || mms->pic == ON_OFF_AUTO_AUTO) { + x86ms->gsi = qemu_allocate_irqs(gsi_handler, gsi_state, GSI_NUM_PINS); + } else { + x86ms->gsi = qemu_allocate_irqs(microvm_gsi_handler, + gsi_state, GSI_NUM_PINS); + } + + isa_bus = isa_bus_new(NULL, get_system_memory(), get_system_io(), + &error_abort); + isa_bus_irqs(isa_bus, x86ms->gsi); + + ioapic_init_gsi(gsi_state, "machine"); + + kvmclock_create(); + + for (i = 0; i < VIRTIO_NUM_TRANSPORTS; i++) { + sysbus_create_simple("virtio-mmio", + VIRTIO_MMIO_BASE + i * 512, + x86ms->gsi[VIRTIO_IRQ_BASE + i]); + } + + /* Optional and legacy devices */ + + if (mms->pic == ON_OFF_AUTO_ON || mms->pic == ON_OFF_AUTO_AUTO) { + qemu_irq *i8259; + + i8259 = i8259_init(isa_bus, pc_allocate_cpu_irq()); + for (i = 0; i < ISA_NUM_IRQS; i++) { + gsi_state->i8259_irq[i] = i8259[i]; + } + g_free(i8259); + } + + if (mms->pit == ON_OFF_AUTO_ON || mms->pit == ON_OFF_AUTO_AUTO) { + if (kvm_pit_in_kernel()) { + kvm_pit_init(isa_bus, 0x40); + } else { + i8254_pit_init(isa_bus, 0x40, 0, NULL); + } + } + + if (mms->rtc == ON_OFF_AUTO_ON || + (mms->rtc == ON_OFF_AUTO_AUTO && !kvm_enabled())) { + rtc_state = mc146818_rtc_init(isa_bus, 2000, NULL); + microvm_set_rtc(mms, rtc_state); + } + + if (mms->isa_serial) { + serial_hds_isa_init(isa_bus, 0, 1); + } + + if (bios_name == NULL) { + bios_name = MICROVM_BIOS_FILENAME; + } + x86_bios_rom_init(get_system_memory(), true); +} + +static void microvm_memory_init(MicrovmMachineState *mms) +{ + MachineState *machine = MACHINE(mms); + X86MachineState *x86ms = X86_MACHINE(mms); + MemoryRegion *ram, *ram_below_4g, *ram_above_4g; + MemoryRegion *system_memory = get_system_memory(); + FWCfgState *fw_cfg; + ram_addr_t lowmem; + int i; + + /* + * Check whether RAM fits below 4G (leaving 1/2 GByte for IO memory + * and 256 Mbytes for PCI Express Enhanced Configuration Access Mapping + * also known as MMCFG). + * If it doesn't, we need to split it in chunks below and above 4G. + * In any case, try to make sure that guest addresses aligned at + * 1G boundaries get mapped to host addresses aligned at 1G boundaries. + */ + if (machine->ram_size >= 0xb0000000) { + lowmem = 0x80000000; + } else { + lowmem = 0xb0000000; + } + + /* + * Handle the machine opt max-ram-below-4g. It is basically doing + * min(qemu limit, user limit). + */ + if (!x86ms->max_ram_below_4g) { + x86ms->max_ram_below_4g = 4 * GiB; + } + if (lowmem > x86ms->max_ram_below_4g) { + lowmem = x86ms->max_ram_below_4g; + if (machine->ram_size - lowmem > lowmem && + lowmem & (1 * GiB - 1)) { + warn_report("There is possibly poor performance as the ram size " + " (0x%" PRIx64 ") is more then twice the size of" + " max-ram-below-4g (%"PRIu64") and" + " max-ram-below-4g is not a multiple of 1G.", + (uint64_t)machine->ram_size, x86ms->max_ram_below_4g); + } + } + + if (machine->ram_size > lowmem) { + x86ms->above_4g_mem_size = machine->ram_size - lowmem; + x86ms->below_4g_mem_size = lowmem; + } else { + x86ms->above_4g_mem_size = 0; + x86ms->below_4g_mem_size = machine->ram_size; + } + + ram = g_malloc(sizeof(*ram)); + memory_region_allocate_system_memory(ram, NULL, "microvm.ram", + machine->ram_size); + + ram_below_4g = g_malloc(sizeof(*ram_below_4g)); + memory_region_init_alias(ram_below_4g, NULL, "ram-below-4g", ram, + 0, x86ms->below_4g_mem_size); + memory_region_add_subregion(system_memory, 0, ram_below_4g); + + e820_add_entry(0, x86ms->below_4g_mem_size, E820_RAM); + + if (x86ms->above_4g_mem_size > 0) { + ram_above_4g = g_malloc(sizeof(*ram_above_4g)); + memory_region_init_alias(ram_above_4g, NULL, "ram-above-4g", ram, + x86ms->below_4g_mem_size, + x86ms->above_4g_mem_size); + memory_region_add_subregion(system_memory, 0x100000000ULL, + ram_above_4g); + e820_add_entry(0x100000000ULL, x86ms->above_4g_mem_size, E820_RAM); + } + + fw_cfg = fw_cfg_init_io_dma(FW_CFG_IO_BASE, FW_CFG_IO_BASE + 4, + &address_space_memory); + + fw_cfg_add_i16(fw_cfg, FW_CFG_NB_CPUS, machine->smp.cpus); + fw_cfg_add_i16(fw_cfg, FW_CFG_MAX_CPUS, machine->smp.max_cpus); + fw_cfg_add_i64(fw_cfg, FW_CFG_RAM_SIZE, (uint64_t)machine->ram_size); + fw_cfg_add_i32(fw_cfg, FW_CFG_IRQ0_OVERRIDE, kvm_allows_irq0_override()); + fw_cfg_add_bytes(fw_cfg, FW_CFG_E820_TABLE, + &e820_reserve, sizeof(e820_reserve)); + fw_cfg_add_file(fw_cfg, "etc/e820", e820_table, + sizeof(struct e820_entry) * e820_get_num_entries()); + + rom_set_fw(fw_cfg); + + if (machine->kernel_filename != NULL) { + x86_load_linux(x86ms, fw_cfg, 0, true, true); + } + + if (mms->option_roms) { + for (i = 0; i < nb_option_roms; i++) { + rom_add_option(option_rom[i].name, option_rom[i].bootindex); + } + } + + x86ms->fw_cfg = fw_cfg; + x86ms->ioapic_as = &address_space_memory; +} + +static gchar *microvm_get_mmio_cmdline(gchar *name) +{ + gchar *cmdline; + gchar *separator; + long int index; + int ret; + + separator = g_strrstr(name, "."); + if (!separator) { + return NULL; + } + + if (qemu_strtol(separator + 1, NULL, 10, &index) != 0) { + return NULL; + } + + cmdline = g_malloc0(VIRTIO_CMDLINE_MAXLEN); + ret = g_snprintf(cmdline, VIRTIO_CMDLINE_MAXLEN, + " virtio_mmio.device=512@0x%lx:%ld", + VIRTIO_MMIO_BASE + index * 512, + VIRTIO_IRQ_BASE + index); + if (ret < 0 || ret >= VIRTIO_CMDLINE_MAXLEN) { + g_free(cmdline); + return NULL; + } + + return cmdline; +} + +static void microvm_fix_kernel_cmdline(MachineState *machine) +{ + X86MachineState *x86ms = X86_MACHINE(machine); + BusState *bus; + BusChild *kid; + char *cmdline; + + /* + * Find MMIO transports with attached devices, and add them to the kernel + * command line. + * + * Yes, this is a hack, but one that heavily improves the UX without + * introducing any significant issues. + */ + cmdline = g_strdup(machine->kernel_cmdline); + bus = sysbus_get_default(); + QTAILQ_FOREACH(kid, &bus->children, sibling) { + DeviceState *dev = kid->child; + ObjectClass *class = object_get_class(OBJECT(dev)); + + if (class == object_class_by_name(TYPE_VIRTIO_MMIO)) { + VirtIOMMIOProxy *mmio = VIRTIO_MMIO(OBJECT(dev)); + VirtioBusState *mmio_virtio_bus = &mmio->bus; + BusState *mmio_bus = &mmio_virtio_bus->parent_obj; + + if (!QTAILQ_EMPTY(&mmio_bus->children)) { + gchar *mmio_cmdline = microvm_get_mmio_cmdline(mmio_bus->name); + if (mmio_cmdline) { + char *newcmd = g_strjoin(NULL, cmdline, mmio_cmdline, NULL); + g_free(mmio_cmdline); + g_free(cmdline); + cmdline = newcmd; + } + } + } + } + + fw_cfg_modify_i32(x86ms->fw_cfg, FW_CFG_CMDLINE_SIZE, strlen(cmdline) + 1); + fw_cfg_modify_string(x86ms->fw_cfg, FW_CFG_CMDLINE_DATA, cmdline); +} + +static void microvm_machine_state_init(MachineState *machine) +{ + MicrovmMachineState *mms = MICROVM_MACHINE(machine); + X86MachineState *x86ms = X86_MACHINE(machine); + Error *local_err = NULL; + + microvm_memory_init(mms); + + x86_cpus_init(x86ms, CPU_VERSION_LATEST); + if (local_err) { + error_report_err(local_err); + exit(1); + } + + microvm_devices_init(mms); +} + +static void microvm_machine_reset(MachineState *machine) +{ + MicrovmMachineState *mms = MICROVM_MACHINE(machine); + CPUState *cs; + X86CPU *cpu; + + if (machine->kernel_filename != NULL && + mms->auto_kernel_cmdline && !mms->kernel_cmdline_fixed) { + microvm_fix_kernel_cmdline(machine); + mms->kernel_cmdline_fixed = true; + } + + qemu_devices_reset(); + + CPU_FOREACH(cs) { + cpu = X86_CPU(cs); + + if (cpu->apic_state) { + device_reset(cpu->apic_state); + } + } +} + +static void microvm_machine_get_pic(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + MicrovmMachineState *mms = MICROVM_MACHINE(obj); + OnOffAuto pic = mms->pic; + + visit_type_OnOffAuto(v, name, &pic, errp); +} + +static void microvm_machine_set_pic(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + MicrovmMachineState *mms = MICROVM_MACHINE(obj); + + visit_type_OnOffAuto(v, name, &mms->pic, errp); +} + +static void microvm_machine_get_pit(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + MicrovmMachineState *mms = MICROVM_MACHINE(obj); + OnOffAuto pit = mms->pit; + + visit_type_OnOffAuto(v, name, &pit, errp); +} + +static void microvm_machine_set_pit(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + MicrovmMachineState *mms = MICROVM_MACHINE(obj); + + visit_type_OnOffAuto(v, name, &mms->pit, errp); +} + +static void microvm_machine_get_rtc(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + MicrovmMachineState *mms = MICROVM_MACHINE(obj); + OnOffAuto rtc = mms->rtc; + + visit_type_OnOffAuto(v, name, &rtc, errp); +} + +static void microvm_machine_set_rtc(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + MicrovmMachineState *mms = MICROVM_MACHINE(obj); + + visit_type_OnOffAuto(v, name, &mms->rtc, errp); +} + +static bool microvm_machine_get_isa_serial(Object *obj, Error **errp) +{ + MicrovmMachineState *mms = MICROVM_MACHINE(obj); + + return mms->isa_serial; +} + +static void microvm_machine_set_isa_serial(Object *obj, bool value, + Error **errp) +{ + MicrovmMachineState *mms = MICROVM_MACHINE(obj); + + mms->isa_serial = value; +} + +static bool microvm_machine_get_option_roms(Object *obj, Error **errp) +{ + MicrovmMachineState *mms = MICROVM_MACHINE(obj); + + return mms->option_roms; +} + +static void microvm_machine_set_option_roms(Object *obj, bool value, + Error **errp) +{ + MicrovmMachineState *mms = MICROVM_MACHINE(obj); + + mms->option_roms = value; +} + +static bool microvm_machine_get_auto_kernel_cmdline(Object *obj, Error **errp) +{ + MicrovmMachineState *mms = MICROVM_MACHINE(obj); + + return mms->auto_kernel_cmdline; +} + +static void microvm_machine_set_auto_kernel_cmdline(Object *obj, bool value, + Error **errp) +{ + MicrovmMachineState *mms = MICROVM_MACHINE(obj); + + mms->auto_kernel_cmdline = value; +} + +static void microvm_machine_initfn(Object *obj) +{ + MicrovmMachineState *mms = MICROVM_MACHINE(obj); + + /* Configuration */ + mms->pic = ON_OFF_AUTO_AUTO; + mms->pit = ON_OFF_AUTO_AUTO; + mms->rtc = ON_OFF_AUTO_AUTO; + mms->isa_serial = true; + mms->option_roms = true; + mms->auto_kernel_cmdline = true; + + /* State */ + mms->kernel_cmdline_fixed = false; +} + +static void microvm_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + + mc->init = microvm_machine_state_init; + + mc->family = "microvm_i386"; + mc->desc = "microvm (i386)"; + mc->units_per_default_bus = 1; + mc->no_floppy = 1; + mc->max_cpus = 288; + mc->has_hotpluggable_cpus = false; + mc->auto_enable_numa_with_memhp = false; + mc->default_cpu_type = TARGET_DEFAULT_CPU_TYPE; + mc->nvdimm_supported = false; + + /* Avoid relying too much on kernel components */ + mc->default_kernel_irqchip_split = true; + + /* Machine class handlers */ + mc->reset = microvm_machine_reset; + + object_class_property_add(oc, MICROVM_MACHINE_PIC, "OnOffAuto", + microvm_machine_get_pic, + microvm_machine_set_pic, + NULL, NULL, &error_abort); + object_class_property_set_description(oc, MICROVM_MACHINE_PIC, + "Enable i8259 PIC", &error_abort); + + object_class_property_add(oc, MICROVM_MACHINE_PIT, "OnOffAuto", + microvm_machine_get_pit, + microvm_machine_set_pit, + NULL, NULL, &error_abort); + object_class_property_set_description(oc, MICROVM_MACHINE_PIT, + "Enable i8254 PIT", &error_abort); + + object_class_property_add(oc, MICROVM_MACHINE_RTC, "OnOffAuto", + microvm_machine_get_rtc, + microvm_machine_set_rtc, + NULL, NULL, &error_abort); + object_class_property_set_description(oc, MICROVM_MACHINE_RTC, + "Enable MC146818 RTC", &error_abort); + + object_class_property_add_bool(oc, MICROVM_MACHINE_ISA_SERIAL, + microvm_machine_get_isa_serial, + microvm_machine_set_isa_serial, + &error_abort); + object_class_property_set_description(oc, MICROVM_MACHINE_ISA_SERIAL, + "Set off to disable the instantiation an ISA serial port", + &error_abort); + + object_class_property_add_bool(oc, MICROVM_MACHINE_OPTION_ROMS, + microvm_machine_get_option_roms, + microvm_machine_set_option_roms, + &error_abort); + object_class_property_set_description(oc, MICROVM_MACHINE_OPTION_ROMS, + "Set off to disable loading option ROMs", &error_abort); + + object_class_property_add_bool(oc, MICROVM_MACHINE_AUTO_KERNEL_CMDLINE, + microvm_machine_get_auto_kernel_cmdline, + microvm_machine_set_auto_kernel_cmdline, + &error_abort); + object_class_property_set_description(oc, + MICROVM_MACHINE_AUTO_KERNEL_CMDLINE, + "Set off to disable adding virtio-mmio devices to the kernel cmdline", + &error_abort); +} + +static const TypeInfo microvm_machine_info = { + .name = TYPE_MICROVM_MACHINE, + .parent = TYPE_X86_MACHINE, + .instance_size = sizeof(MicrovmMachineState), + .instance_init = microvm_machine_initfn, + .class_size = sizeof(MicrovmMachineClass), + .class_init = microvm_class_init, + .interfaces = (InterfaceInfo[]) { + { } + }, +}; + +static void microvm_machine_init(void) +{ + type_register_static(µvm_machine_info); +} +type_init(microvm_machine_init); diff --git a/hw/i386/pc.c b/hw/i386/pc.c index 51b72439b4..96715f8a3f 100644 --- a/hw/i386/pc.c +++ b/hw/i386/pc.c @@ -24,6 +24,7 @@ #include "qemu/osdep.h" #include "qemu/units.h" +#include "hw/i386/x86.h" #include "hw/i386/pc.h" #include "hw/char/serial.h" #include "hw/char/parallel.h" @@ -68,6 +69,7 @@ #include "qemu/config-file.h" #include "qemu/error-report.h" #include "qemu/option.h" +#include "qemu/cutils.h" #include "hw/acpi/acpi.h" #include "hw/acpi/cpu_hotplug.h" #include "hw/boards.h" @@ -77,7 +79,6 @@ #include "qapi/qapi-visit-common.h" #include "qapi/visitor.h" #include "hw/core/cpu.h" -#include "hw/nmi.h" #include "hw/usb.h" #include "hw/i386/intel_iommu.h" #include "hw/net/ne2000-isa.h" @@ -102,9 +103,6 @@ struct hpet_fw_config hpet_cfg = {.count = UINT8_MAX}; -/* Physical Address of PVH entry point read from kernel ELF NOTE */ -static size_t pvh_start_addr; - GlobalProperty pc_compat_4_1[] = {}; const size_t pc_compat_4_1_len = G_N_ELEMENTS(pc_compat_4_1); @@ -357,6 +355,21 @@ void gsi_handler(void *opaque, int n, int level) qemu_set_irq(s->ioapic_irq[n], level); } +GSIState *pc_gsi_create(qemu_irq **irqs, bool pci_enabled) +{ + GSIState *s; + + s = g_new0(GSIState, 1); + if (kvm_ioapic_in_kernel()) { + kvm_pc_setup_irq_routing(pci_enabled); + *irqs = qemu_allocate_irqs(kvm_pc_gsi_handler, s, GSI_NUM_PINS); + } else { + *irqs = qemu_allocate_irqs(gsi_handler, s, GSI_NUM_PINS); + } + + return s; +} + static void ioport80_write(void *opaque, hwaddr addr, uint64_t data, unsigned size) { @@ -368,23 +381,12 @@ static uint64_t ioport80_read(void *opaque, hwaddr addr, unsigned size) } /* MSDOS compatibility mode FPU exception support */ -static qemu_irq ferr_irq; - -void pc_register_ferr_irq(qemu_irq irq) -{ - ferr_irq = irq; -} - -/* XXX: add IGNNE support */ -void cpu_set_ferr(CPUX86State *s) -{ - qemu_irq_raise(ferr_irq); -} - static void ioportF0_write(void *opaque, hwaddr addr, uint64_t data, unsigned size) { - qemu_irq_lower(ferr_irq); + if (tcg_enabled()) { + cpu_set_ignne(); + } } static uint64_t ioportF0_read(void *opaque, hwaddr addr, unsigned size) @@ -681,17 +683,18 @@ void pc_cmos_init(PCMachineState *pcms, { int val; static pc_cmos_init_late_arg arg; + X86MachineState *x86ms = X86_MACHINE(pcms); /* various important CMOS locations needed by PC/Bochs bios */ /* memory size */ /* base memory (first MiB) */ - val = MIN(pcms->below_4g_mem_size / KiB, 640); + val = MIN(x86ms->below_4g_mem_size / KiB, 640); rtc_set_memory(s, 0x15, val); rtc_set_memory(s, 0x16, val >> 8); /* extended memory (next 64MiB) */ - if (pcms->below_4g_mem_size > 1 * MiB) { - val = (pcms->below_4g_mem_size - 1 * MiB) / KiB; + if (x86ms->below_4g_mem_size > 1 * MiB) { + val = (x86ms->below_4g_mem_size - 1 * MiB) / KiB; } else { val = 0; } @@ -702,8 +705,8 @@ void pc_cmos_init(PCMachineState *pcms, rtc_set_memory(s, 0x30, val); rtc_set_memory(s, 0x31, val >> 8); /* memory between 16MiB and 4GiB */ - if (pcms->below_4g_mem_size > 16 * MiB) { - val = (pcms->below_4g_mem_size - 16 * MiB) / (64 * KiB); + if (x86ms->below_4g_mem_size > 16 * MiB) { + val = (x86ms->below_4g_mem_size - 16 * MiB) / (64 * KiB); } else { val = 0; } @@ -712,14 +715,14 @@ void pc_cmos_init(PCMachineState *pcms, rtc_set_memory(s, 0x34, val); rtc_set_memory(s, 0x35, val >> 8); /* memory above 4GiB */ - val = pcms->above_4g_mem_size / 65536; + val = x86ms->above_4g_mem_size / 65536; rtc_set_memory(s, 0x5b, val); rtc_set_memory(s, 0x5c, val >> 8); rtc_set_memory(s, 0x5d, val >> 16); object_property_add_link(OBJECT(pcms), "rtc_state", TYPE_ISA_DEVICE, - (Object **)&pcms->rtc, + (Object **)&x86ms->rtc, object_property_allow_set_link, OBJ_PROP_LINK_STRONG, &error_abort); object_property_set_link(OBJECT(pcms), OBJECT(s), @@ -866,478 +869,6 @@ static void handle_a20_line_change(void *opaque, int irq, int level) x86_cpu_set_a20(cpu, level); } -/* Calculates initial APIC ID for a specific CPU index - * - * Currently we need to be able to calculate the APIC ID from the CPU index - * alone (without requiring a CPU object), as the QEMU<->Seabios interfaces have - * no concept of "CPU index", and the NUMA tables on fw_cfg need the APIC ID of - * all CPUs up to max_cpus. - */ -static uint32_t x86_cpu_apic_id_from_index(PCMachineState *pcms, - unsigned int cpu_index) -{ - MachineState *ms = MACHINE(pcms); - PCMachineClass *pcmc = PC_MACHINE_GET_CLASS(pcms); - uint32_t correct_id; - static bool warned; - - correct_id = x86_apicid_from_cpu_idx(pcms->smp_dies, ms->smp.cores, - ms->smp.threads, cpu_index); - if (pcmc->compat_apic_id_mode) { - if (cpu_index != correct_id && !warned && !qtest_enabled()) { - error_report("APIC IDs set in compatibility mode, " - "CPU topology won't match the configuration"); - warned = true; - } - return cpu_index; - } else { - return correct_id; - } -} - -static long get_file_size(FILE *f) -{ - long where, size; - - /* XXX: on Unix systems, using fstat() probably makes more sense */ - - where = ftell(f); - fseek(f, 0, SEEK_END); - size = ftell(f); - fseek(f, where, SEEK_SET); - - return size; -} - -struct setup_data { - uint64_t next; - uint32_t type; - uint32_t len; - uint8_t data[0]; -} __attribute__((packed)); - - -/* - * The entry point into the kernel for PVH boot is different from - * the native entry point. The PVH entry is defined by the x86/HVM - * direct boot ABI and is available in an ELFNOTE in the kernel binary. - * - * This function is passed to load_elf() when it is called from - * load_elfboot() which then additionally checks for an ELF Note of - * type XEN_ELFNOTE_PHYS32_ENTRY and passes it to this function to - * parse the PVH entry address from the ELF Note. - * - * Due to trickery in elf_opts.h, load_elf() is actually available as - * load_elf32() or load_elf64() and this routine needs to be able - * to deal with being called as 32 or 64 bit. - * - * The address of the PVH entry point is saved to the 'pvh_start_addr' - * global variable. (although the entry point is 32-bit, the kernel - * binary can be either 32-bit or 64-bit). - */ -static uint64_t read_pvh_start_addr(void *arg1, void *arg2, bool is64) -{ - size_t *elf_note_data_addr; - - /* Check if ELF Note header passed in is valid */ - if (arg1 == NULL) { - return 0; - } - - if (is64) { - struct elf64_note *nhdr64 = (struct elf64_note *)arg1; - uint64_t nhdr_size64 = sizeof(struct elf64_note); - uint64_t phdr_align = *(uint64_t *)arg2; - uint64_t nhdr_namesz = nhdr64->n_namesz; - - elf_note_data_addr = - ((void *)nhdr64) + nhdr_size64 + - QEMU_ALIGN_UP(nhdr_namesz, phdr_align); - } else { - struct elf32_note *nhdr32 = (struct elf32_note *)arg1; - uint32_t nhdr_size32 = sizeof(struct elf32_note); - uint32_t phdr_align = *(uint32_t *)arg2; - uint32_t nhdr_namesz = nhdr32->n_namesz; - - elf_note_data_addr = - ((void *)nhdr32) + nhdr_size32 + - QEMU_ALIGN_UP(nhdr_namesz, phdr_align); - } - - pvh_start_addr = *elf_note_data_addr; - - return pvh_start_addr; -} - -static bool load_elfboot(const char *kernel_filename, - int kernel_file_size, - uint8_t *header, - size_t pvh_xen_start_addr, - FWCfgState *fw_cfg) -{ - uint32_t flags = 0; - uint32_t mh_load_addr = 0; - uint32_t elf_kernel_size = 0; - uint64_t elf_entry; - uint64_t elf_low, elf_high; - int kernel_size; - - if (ldl_p(header) != 0x464c457f) { - return false; /* no elfboot */ - } - - bool elf_is64 = header[EI_CLASS] == ELFCLASS64; - flags = elf_is64 ? - ((Elf64_Ehdr *)header)->e_flags : ((Elf32_Ehdr *)header)->e_flags; - - if (flags & 0x00010004) { /* LOAD_ELF_HEADER_HAS_ADDR */ - error_report("elfboot unsupported flags = %x", flags); - exit(1); - } - - uint64_t elf_note_type = XEN_ELFNOTE_PHYS32_ENTRY; - kernel_size = load_elf(kernel_filename, read_pvh_start_addr, - NULL, &elf_note_type, &elf_entry, - &elf_low, &elf_high, 0, I386_ELF_MACHINE, - 0, 0); - - if (kernel_size < 0) { - error_report("Error while loading elf kernel"); - exit(1); - } - mh_load_addr = elf_low; - elf_kernel_size = elf_high - elf_low; - - if (pvh_start_addr == 0) { - error_report("Error loading uncompressed kernel without PVH ELF Note"); - exit(1); - } - fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_ENTRY, pvh_start_addr); - fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_ADDR, mh_load_addr); - fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_SIZE, elf_kernel_size); - - return true; -} - -static void load_linux(PCMachineState *pcms, - FWCfgState *fw_cfg) -{ - uint16_t protocol; - int setup_size, kernel_size, cmdline_size; - int dtb_size, setup_data_offset; - uint32_t initrd_max; - uint8_t header[8192], *setup, *kernel; - hwaddr real_addr, prot_addr, cmdline_addr, initrd_addr = 0; - FILE *f; - char *vmode; - MachineState *machine = MACHINE(pcms); - PCMachineClass *pcmc = PC_MACHINE_GET_CLASS(pcms); - struct setup_data *setup_data; - const char *kernel_filename = machine->kernel_filename; - const char *initrd_filename = machine->initrd_filename; - const char *dtb_filename = machine->dtb; - const char *kernel_cmdline = machine->kernel_cmdline; - - /* Align to 16 bytes as a paranoia measure */ - cmdline_size = (strlen(kernel_cmdline)+16) & ~15; - - /* load the kernel header */ - f = fopen(kernel_filename, "rb"); - if (!f || !(kernel_size = get_file_size(f)) || - fread(header, 1, MIN(ARRAY_SIZE(header), kernel_size), f) != - MIN(ARRAY_SIZE(header), kernel_size)) { - fprintf(stderr, "qemu: could not load kernel '%s': %s\n", - kernel_filename, strerror(errno)); - exit(1); - } - - /* kernel protocol version */ -#if 0 - fprintf(stderr, "header magic: %#x\n", ldl_p(header+0x202)); -#endif - if (ldl_p(header+0x202) == 0x53726448) { - protocol = lduw_p(header+0x206); - } else { - /* - * This could be a multiboot kernel. If it is, let's stop treating it - * like a Linux kernel. - * Note: some multiboot images could be in the ELF format (the same of - * PVH), so we try multiboot first since we check the multiboot magic - * header before to load it. - */ - if (load_multiboot(fw_cfg, f, kernel_filename, initrd_filename, - kernel_cmdline, kernel_size, header)) { - return; - } - /* - * Check if the file is an uncompressed kernel file (ELF) and load it, - * saving the PVH entry point used by the x86/HVM direct boot ABI. - * If load_elfboot() is successful, populate the fw_cfg info. - */ - if (pcmc->pvh_enabled && - load_elfboot(kernel_filename, kernel_size, - header, pvh_start_addr, fw_cfg)) { - fclose(f); - - fw_cfg_add_i32(fw_cfg, FW_CFG_CMDLINE_SIZE, - strlen(kernel_cmdline) + 1); - fw_cfg_add_string(fw_cfg, FW_CFG_CMDLINE_DATA, kernel_cmdline); - - fw_cfg_add_i32(fw_cfg, FW_CFG_SETUP_SIZE, sizeof(header)); - fw_cfg_add_bytes(fw_cfg, FW_CFG_SETUP_DATA, - header, sizeof(header)); - - /* load initrd */ - if (initrd_filename) { - GMappedFile *mapped_file; - gsize initrd_size; - gchar *initrd_data; - GError *gerr = NULL; - - mapped_file = g_mapped_file_new(initrd_filename, false, &gerr); - if (!mapped_file) { - fprintf(stderr, "qemu: error reading initrd %s: %s\n", - initrd_filename, gerr->message); - exit(1); - } - pcms->initrd_mapped_file = mapped_file; - - initrd_data = g_mapped_file_get_contents(mapped_file); - initrd_size = g_mapped_file_get_length(mapped_file); - initrd_max = pcms->below_4g_mem_size - pcmc->acpi_data_size - 1; - if (initrd_size >= initrd_max) { - fprintf(stderr, "qemu: initrd is too large, cannot support." - "(max: %"PRIu32", need %"PRId64")\n", - initrd_max, (uint64_t)initrd_size); - exit(1); - } - - initrd_addr = (initrd_max - initrd_size) & ~4095; - - fw_cfg_add_i32(fw_cfg, FW_CFG_INITRD_ADDR, initrd_addr); - fw_cfg_add_i32(fw_cfg, FW_CFG_INITRD_SIZE, initrd_size); - fw_cfg_add_bytes(fw_cfg, FW_CFG_INITRD_DATA, initrd_data, - initrd_size); - } - - option_rom[nb_option_roms].bootindex = 0; - option_rom[nb_option_roms].name = "pvh.bin"; - nb_option_roms++; - - return; - } - protocol = 0; - } - - if (protocol < 0x200 || !(header[0x211] & 0x01)) { - /* Low kernel */ - real_addr = 0x90000; - cmdline_addr = 0x9a000 - cmdline_size; - prot_addr = 0x10000; - } else if (protocol < 0x202) { - /* High but ancient kernel */ - real_addr = 0x90000; - cmdline_addr = 0x9a000 - cmdline_size; - prot_addr = 0x100000; - } else { - /* High and recent kernel */ - real_addr = 0x10000; - cmdline_addr = 0x20000; - prot_addr = 0x100000; - } - -#if 0 - fprintf(stderr, - "qemu: real_addr = 0x" TARGET_FMT_plx "\n" - "qemu: cmdline_addr = 0x" TARGET_FMT_plx "\n" - "qemu: prot_addr = 0x" TARGET_FMT_plx "\n", - real_addr, - cmdline_addr, - prot_addr); -#endif - - /* highest address for loading the initrd */ - if (protocol >= 0x20c && - lduw_p(header+0x236) & XLF_CAN_BE_LOADED_ABOVE_4G) { - /* - * Linux has supported initrd up to 4 GB for a very long time (2007, - * long before XLF_CAN_BE_LOADED_ABOVE_4G which was added in 2013), - * though it only sets initrd_max to 2 GB to "work around bootloader - * bugs". Luckily, QEMU firmware(which does something like bootloader) - * has supported this. - * - * It's believed that if XLF_CAN_BE_LOADED_ABOVE_4G is set, initrd can - * be loaded into any address. - * - * In addition, initrd_max is uint32_t simply because QEMU doesn't - * support the 64-bit boot protocol (specifically the ext_ramdisk_image - * field). - * - * Therefore here just limit initrd_max to UINT32_MAX simply as well. - */ - initrd_max = UINT32_MAX; - } else if (protocol >= 0x203) { - initrd_max = ldl_p(header+0x22c); - } else { - initrd_max = 0x37ffffff; - } - - if (initrd_max >= pcms->below_4g_mem_size - pcmc->acpi_data_size) { - initrd_max = pcms->below_4g_mem_size - pcmc->acpi_data_size - 1; - } - - fw_cfg_add_i32(fw_cfg, FW_CFG_CMDLINE_ADDR, cmdline_addr); - fw_cfg_add_i32(fw_cfg, FW_CFG_CMDLINE_SIZE, strlen(kernel_cmdline)+1); - fw_cfg_add_string(fw_cfg, FW_CFG_CMDLINE_DATA, kernel_cmdline); - - if (protocol >= 0x202) { - stl_p(header+0x228, cmdline_addr); - } else { - stw_p(header+0x20, 0xA33F); - stw_p(header+0x22, cmdline_addr-real_addr); - } - - /* handle vga= parameter */ - vmode = strstr(kernel_cmdline, "vga="); - if (vmode) { - unsigned int video_mode; - /* skip "vga=" */ - vmode += 4; - if (!strncmp(vmode, "normal", 6)) { - video_mode = 0xffff; - } else if (!strncmp(vmode, "ext", 3)) { - video_mode = 0xfffe; - } else if (!strncmp(vmode, "ask", 3)) { - video_mode = 0xfffd; - } else { - video_mode = strtol(vmode, NULL, 0); - } - stw_p(header+0x1fa, video_mode); - } - - /* loader type */ - /* High nybble = B reserved for QEMU; low nybble is revision number. - If this code is substantially changed, you may want to consider - incrementing the revision. */ - if (protocol >= 0x200) { - header[0x210] = 0xB0; - } - /* heap */ - if (protocol >= 0x201) { - header[0x211] |= 0x80; /* CAN_USE_HEAP */ - stw_p(header+0x224, cmdline_addr-real_addr-0x200); - } - - /* load initrd */ - if (initrd_filename) { - GMappedFile *mapped_file; - gsize initrd_size; - gchar *initrd_data; - GError *gerr = NULL; - - if (protocol < 0x200) { - fprintf(stderr, "qemu: linux kernel too old to load a ram disk\n"); - exit(1); - } - - mapped_file = g_mapped_file_new(initrd_filename, false, &gerr); - if (!mapped_file) { - fprintf(stderr, "qemu: error reading initrd %s: %s\n", - initrd_filename, gerr->message); - exit(1); - } - pcms->initrd_mapped_file = mapped_file; - - initrd_data = g_mapped_file_get_contents(mapped_file); - initrd_size = g_mapped_file_get_length(mapped_file); - if (initrd_size >= initrd_max) { - fprintf(stderr, "qemu: initrd is too large, cannot support." - "(max: %"PRIu32", need %"PRId64")\n", - initrd_max, (uint64_t)initrd_size); - exit(1); - } - - initrd_addr = (initrd_max-initrd_size) & ~4095; - - fw_cfg_add_i32(fw_cfg, FW_CFG_INITRD_ADDR, initrd_addr); - fw_cfg_add_i32(fw_cfg, FW_CFG_INITRD_SIZE, initrd_size); - fw_cfg_add_bytes(fw_cfg, FW_CFG_INITRD_DATA, initrd_data, initrd_size); - - stl_p(header+0x218, initrd_addr); - stl_p(header+0x21c, initrd_size); - } - - /* load kernel and setup */ - setup_size = header[0x1f1]; - if (setup_size == 0) { - setup_size = 4; - } - setup_size = (setup_size+1)*512; - if (setup_size > kernel_size) { - fprintf(stderr, "qemu: invalid kernel header\n"); - exit(1); - } - kernel_size -= setup_size; - - setup = g_malloc(setup_size); - kernel = g_malloc(kernel_size); - fseek(f, 0, SEEK_SET); - if (fread(setup, 1, setup_size, f) != setup_size) { - fprintf(stderr, "fread() failed\n"); - exit(1); - } - if (fread(kernel, 1, kernel_size, f) != kernel_size) { - fprintf(stderr, "fread() failed\n"); - exit(1); - } - fclose(f); - - /* append dtb to kernel */ - if (dtb_filename) { - if (protocol < 0x209) { - fprintf(stderr, "qemu: Linux kernel too old to load a dtb\n"); - exit(1); - } - - dtb_size = get_image_size(dtb_filename); - if (dtb_size <= 0) { - fprintf(stderr, "qemu: error reading dtb %s: %s\n", - dtb_filename, strerror(errno)); - exit(1); - } - - setup_data_offset = QEMU_ALIGN_UP(kernel_size, 16); - kernel_size = setup_data_offset + sizeof(struct setup_data) + dtb_size; - kernel = g_realloc(kernel, kernel_size); - - stq_p(header+0x250, prot_addr + setup_data_offset); - - setup_data = (struct setup_data *)(kernel + setup_data_offset); - setup_data->next = 0; - setup_data->type = cpu_to_le32(SETUP_DTB); - setup_data->len = cpu_to_le32(dtb_size); - - load_image_size(dtb_filename, setup_data->data, dtb_size); - } - - memcpy(setup, header, MIN(sizeof(header), setup_size)); - - fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_ADDR, prot_addr); - fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_SIZE, kernel_size); - fw_cfg_add_bytes(fw_cfg, FW_CFG_KERNEL_DATA, kernel, kernel_size); - - fw_cfg_add_i32(fw_cfg, FW_CFG_SETUP_ADDR, real_addr); - fw_cfg_add_i32(fw_cfg, FW_CFG_SETUP_SIZE, setup_size); - fw_cfg_add_bytes(fw_cfg, FW_CFG_SETUP_DATA, setup, setup_size); - - option_rom[nb_option_roms].bootindex = 0; - option_rom[nb_option_roms].name = "linuxboot.bin"; - if (pcmc->linuxboot_dma_enabled && fw_cfg_dma_enabled(fw_cfg)) { - option_rom[nb_option_roms].name = "linuxboot_dma.bin"; - } - nb_option_roms++; -} - #define NE2000_NB_MAX 6 static const int ne2000_io[NE2000_NB_MAX] = { 0x300, 0x320, 0x340, 0x360, @@ -1374,31 +905,13 @@ void pc_acpi_smi_interrupt(void *opaque, int irq, int level) } } -static void pc_new_cpu(PCMachineState *pcms, int64_t apic_id, Error **errp) -{ - Object *cpu = NULL; - Error *local_err = NULL; - CPUX86State *env = NULL; - - cpu = object_new(MACHINE(pcms)->cpu_type); - - env = &X86_CPU(cpu)->env; - env->nr_dies = pcms->smp_dies; - - object_property_set_uint(cpu, apic_id, "apic-id", &local_err); - object_property_set_bool(cpu, true, "realized", &local_err); - - object_unref(cpu); - error_propagate(errp, local_err); -} - /* * This function is very similar to smp_parse() * in hw/core/machine.c but includes CPU die support. */ void pc_smp_parse(MachineState *ms, QemuOpts *opts) { - PCMachineState *pcms = PC_MACHINE(ms); + X86MachineState *x86ms = X86_MACHINE(ms); if (opts) { unsigned cpus = qemu_opt_get_number(opts, "cpus", 0); @@ -1462,7 +975,7 @@ void pc_smp_parse(MachineState *ms, QemuOpts *opts) ms->smp.cpus = cpus; ms->smp.cores = cores; ms->smp.threads = threads; - pcms->smp_dies = dies; + x86ms->smp_dies = dies; } if (ms->smp.cpus > 1) { @@ -1474,8 +987,8 @@ void pc_smp_parse(MachineState *ms, QemuOpts *opts) void pc_hot_add_cpu(MachineState *ms, const int64_t id, Error **errp) { - PCMachineState *pcms = PC_MACHINE(ms); - int64_t apic_id = x86_cpu_apic_id_from_index(pcms, id); + X86MachineState *x86ms = X86_MACHINE(ms); + int64_t apic_id = x86_cpu_apic_id_from_index(x86ms, id); Error *local_err = NULL; if (id < 0) { @@ -1490,38 +1003,14 @@ void pc_hot_add_cpu(MachineState *ms, const int64_t id, Error **errp) return; } - pc_new_cpu(PC_MACHINE(ms), apic_id, &local_err); + + x86_cpu_new(X86_MACHINE(ms), apic_id, &local_err); if (local_err) { error_propagate(errp, local_err); return; } } -void pc_cpus_init(PCMachineState *pcms) -{ - int i; - const CPUArchIdList *possible_cpus; - MachineState *ms = MACHINE(pcms); - MachineClass *mc = MACHINE_GET_CLASS(pcms); - PCMachineClass *pcmc = PC_MACHINE_CLASS(mc); - - x86_cpu_set_default_version(pcmc->default_cpu_version); - - /* Calculates the limit to CPU APIC ID values - * - * Limit for the APIC ID value, so that all - * CPU APIC IDs are < pcms->apic_id_limit. - * - * This is used for FW_CFG_MAX_CPUS. See comments on fw_cfg_arch_create(). - */ - pcms->apic_id_limit = x86_cpu_apic_id_from_index(pcms, - ms->smp.max_cpus - 1) + 1; - possible_cpus = mc->possible_cpu_arch_ids(ms); - for (i = 0; i < ms->smp.cpus; i++) { - pc_new_cpu(pcms, possible_cpus->cpus[i].arch_id, &error_fatal); - } -} - static void rtc_set_cpus_count(ISADevice *rtc, uint16_t cpus_count) { if (cpus_count > 0xff) { @@ -1540,10 +1029,11 @@ void pc_machine_done(Notifier *notifier, void *data) { PCMachineState *pcms = container_of(notifier, PCMachineState, machine_done); + X86MachineState *x86ms = X86_MACHINE(pcms); PCIBus *bus = pcms->bus; /* set the number of CPUs */ - rtc_set_cpus_count(pcms->rtc, pcms->boot_cpus); + rtc_set_cpus_count(x86ms->rtc, x86ms->boot_cpus); if (bus) { int extra_hosts = 0; @@ -1554,23 +1044,23 @@ void pc_machine_done(Notifier *notifier, void *data) extra_hosts++; } } - if (extra_hosts && pcms->fw_cfg) { + if (extra_hosts && x86ms->fw_cfg) { uint64_t *val = g_malloc(sizeof(*val)); *val = cpu_to_le64(extra_hosts); - fw_cfg_add_file(pcms->fw_cfg, + fw_cfg_add_file(x86ms->fw_cfg, "etc/extra-pci-roots", val, sizeof(*val)); } } acpi_setup(); - if (pcms->fw_cfg) { - fw_cfg_build_smbios(MACHINE(pcms), pcms->fw_cfg); - fw_cfg_build_feature_control(MACHINE(pcms), pcms->fw_cfg); + if (x86ms->fw_cfg) { + fw_cfg_build_smbios(MACHINE(pcms), x86ms->fw_cfg); + fw_cfg_build_feature_control(MACHINE(pcms), x86ms->fw_cfg); /* update FW_CFG_NB_CPUS to account for -device added CPUs */ - fw_cfg_modify_i16(pcms->fw_cfg, FW_CFG_NB_CPUS, pcms->boot_cpus); + fw_cfg_modify_i16(x86ms->fw_cfg, FW_CFG_NB_CPUS, x86ms->boot_cpus); } - if (pcms->apic_id_limit > 255 && !xen_enabled()) { + if (x86ms->apic_id_limit > 255 && !xen_enabled()) { IntelIOMMUState *iommu = INTEL_IOMMU_DEVICE(x86_iommu_get_default()); if (!iommu || !x86_iommu_ir_supported(X86_IOMMU_DEVICE(iommu)) || @@ -1588,8 +1078,9 @@ void pc_guest_info_init(PCMachineState *pcms) { int i; MachineState *ms = MACHINE(pcms); + X86MachineState *x86ms = X86_MACHINE(pcms); - pcms->apic_xrupt_override = kvm_allows_irq0_override(); + x86ms->apic_xrupt_override = kvm_allows_irq0_override(); pcms->numa_nodes = ms->numa_state->num_nodes; pcms->node_mem = g_malloc0(pcms->numa_nodes * sizeof *pcms->node_mem); @@ -1614,14 +1105,17 @@ void xen_load_linux(PCMachineState *pcms) { int i; FWCfgState *fw_cfg; + PCMachineClass *pcmc = PC_MACHINE_GET_CLASS(pcms); + X86MachineState *x86ms = X86_MACHINE(pcms); assert(MACHINE(pcms)->kernel_filename != NULL); fw_cfg = fw_cfg_init_io(FW_CFG_IO_BASE); - fw_cfg_add_i16(fw_cfg, FW_CFG_NB_CPUS, pcms->boot_cpus); + fw_cfg_add_i16(fw_cfg, FW_CFG_NB_CPUS, x86ms->boot_cpus); rom_set_fw(fw_cfg); - load_linux(pcms, fw_cfg); + x86_load_linux(x86ms, fw_cfg, pcmc->acpi_data_size, + pcmc->pvh_enabled, pcmc->linuxboot_dma_enabled); for (i = 0; i < nb_option_roms; i++) { assert(!strcmp(option_rom[i].name, "linuxboot.bin") || !strcmp(option_rom[i].name, "linuxboot_dma.bin") || @@ -1629,7 +1123,7 @@ void xen_load_linux(PCMachineState *pcms) !strcmp(option_rom[i].name, "multiboot.bin")); rom_add_option(option_rom[i].name, option_rom[i].bootindex); } - pcms->fw_cfg = fw_cfg; + x86ms->fw_cfg = fw_cfg; } void pc_memory_init(PCMachineState *pcms, @@ -1644,9 +1138,10 @@ void pc_memory_init(PCMachineState *pcms, MachineState *machine = MACHINE(pcms); MachineClass *mc = MACHINE_GET_CLASS(machine); PCMachineClass *pcmc = PC_MACHINE_GET_CLASS(pcms); + X86MachineState *x86ms = X86_MACHINE(pcms); - assert(machine->ram_size == pcms->below_4g_mem_size + - pcms->above_4g_mem_size); + assert(machine->ram_size == x86ms->below_4g_mem_size + + x86ms->above_4g_mem_size); linux_boot = (machine->kernel_filename != NULL); @@ -1660,17 +1155,17 @@ void pc_memory_init(PCMachineState *pcms, *ram_memory = ram; ram_below_4g = g_malloc(sizeof(*ram_below_4g)); memory_region_init_alias(ram_below_4g, NULL, "ram-below-4g", ram, - 0, pcms->below_4g_mem_size); + 0, x86ms->below_4g_mem_size); memory_region_add_subregion(system_memory, 0, ram_below_4g); - e820_add_entry(0, pcms->below_4g_mem_size, E820_RAM); - if (pcms->above_4g_mem_size > 0) { + e820_add_entry(0, x86ms->below_4g_mem_size, E820_RAM); + if (x86ms->above_4g_mem_size > 0) { ram_above_4g = g_malloc(sizeof(*ram_above_4g)); memory_region_init_alias(ram_above_4g, NULL, "ram-above-4g", ram, - pcms->below_4g_mem_size, - pcms->above_4g_mem_size); + x86ms->below_4g_mem_size, + x86ms->above_4g_mem_size); memory_region_add_subregion(system_memory, 0x100000000ULL, ram_above_4g); - e820_add_entry(0x100000000ULL, pcms->above_4g_mem_size, E820_RAM); + e820_add_entry(0x100000000ULL, x86ms->above_4g_mem_size, E820_RAM); } if (!pcmc->has_reserved_memory && @@ -1704,7 +1199,7 @@ void pc_memory_init(PCMachineState *pcms, } machine->device_memory->base = - ROUND_UP(0x100000000ULL + pcms->above_4g_mem_size, 1 * GiB); + ROUND_UP(0x100000000ULL + x86ms->above_4g_mem_size, 1 * GiB); if (pcmc->enforce_aligned_dimm) { /* size device region assuming 1G page max alignment per slot */ @@ -1739,7 +1234,7 @@ void pc_memory_init(PCMachineState *pcms, 1); fw_cfg = fw_cfg_arch_create(machine, - pcms->boot_cpus, pcms->apic_id_limit); + x86ms->boot_cpus, x86ms->apic_id_limit); rom_set_fw(fw_cfg); @@ -1756,16 +1251,17 @@ void pc_memory_init(PCMachineState *pcms, } if (linux_boot) { - load_linux(pcms, fw_cfg); + x86_load_linux(x86ms, fw_cfg, pcmc->acpi_data_size, + pcmc->pvh_enabled, pcmc->linuxboot_dma_enabled); } for (i = 0; i < nb_option_roms; i++) { rom_add_option(option_rom[i].name, option_rom[i].bootindex); } - pcms->fw_cfg = fw_cfg; + x86ms->fw_cfg = fw_cfg; /* Init default IOAPIC address space */ - pcms->ioapic_as = &address_space_memory; + x86ms->ioapic_as = &address_space_memory; /* Init ACPI memory hotplug IO base address */ pcms->memhp_io_base = ACPI_MEMORY_HOTPLUG_BASE; @@ -1780,6 +1276,7 @@ uint64_t pc_pci_hole64_start(void) PCMachineState *pcms = PC_MACHINE(qdev_get_machine()); PCMachineClass *pcmc = PC_MACHINE_GET_CLASS(pcms); MachineState *ms = MACHINE(pcms); + X86MachineState *x86ms = X86_MACHINE(pcms); uint64_t hole64_start = 0; if (pcmc->has_reserved_memory && ms->device_memory->base) { @@ -1788,7 +1285,7 @@ uint64_t pc_pci_hole64_start(void) hole64_start += memory_region_size(&ms->device_memory->mr); } } else { - hole64_start = 0x100000000ULL + pcms->above_4g_mem_size; + hole64_start = 0x100000000ULL + x86ms->above_4g_mem_size; } return ROUND_UP(hole64_start, 1 * GiB); @@ -1966,6 +1463,25 @@ void pc_nic_init(PCMachineClass *pcmc, ISABus *isa_bus, PCIBus *pci_bus) rom_reset_order_override(); } +void pc_i8259_create(ISABus *isa_bus, qemu_irq *i8259_irqs) +{ + qemu_irq *i8259; + + if (kvm_pic_in_kernel()) { + i8259 = kvm_i8259_init(isa_bus); + } else if (xen_enabled()) { + i8259 = xen_interrupt_controller_init(); + } else { + i8259 = i8259_init(isa_bus, pc_allocate_cpu_irq()); + } + + for (size_t i = 0; i < ISA_NUM_IRQS; i++) { + i8259_irqs[i] = i8259[i]; + } + + g_free(i8259); +} + void ioapic_init_gsi(GSIState *gsi_state, const char *parent_name) { DeviceState *dev; @@ -2127,6 +1643,7 @@ static void pc_cpu_plug(HotplugHandler *hotplug_dev, Error *local_err = NULL; X86CPU *cpu = X86_CPU(dev); PCMachineState *pcms = PC_MACHINE(hotplug_dev); + X86MachineState *x86ms = X86_MACHINE(pcms); if (pcms->acpi_dev) { hotplug_handler_plug(HOTPLUG_HANDLER(pcms->acpi_dev), dev, &local_err); @@ -2136,12 +1653,12 @@ static void pc_cpu_plug(HotplugHandler *hotplug_dev, } /* increment the number of CPUs */ - pcms->boot_cpus++; - if (pcms->rtc) { - rtc_set_cpus_count(pcms->rtc, pcms->boot_cpus); + x86ms->boot_cpus++; + if (x86ms->rtc) { + rtc_set_cpus_count(x86ms->rtc, x86ms->boot_cpus); } - if (pcms->fw_cfg) { - fw_cfg_modify_i16(pcms->fw_cfg, FW_CFG_NB_CPUS, pcms->boot_cpus); + if (x86ms->fw_cfg) { + fw_cfg_modify_i16(x86ms->fw_cfg, FW_CFG_NB_CPUS, x86ms->boot_cpus); } found_cpu = pc_find_cpu_slot(MACHINE(pcms), cpu->apic_id, NULL); @@ -2187,6 +1704,7 @@ static void pc_cpu_unplug_cb(HotplugHandler *hotplug_dev, Error *local_err = NULL; X86CPU *cpu = X86_CPU(dev); PCMachineState *pcms = PC_MACHINE(hotplug_dev); + X86MachineState *x86ms = X86_MACHINE(pcms); hotplug_handler_unplug(HOTPLUG_HANDLER(pcms->acpi_dev), dev, &local_err); if (local_err) { @@ -2198,10 +1716,10 @@ static void pc_cpu_unplug_cb(HotplugHandler *hotplug_dev, object_property_set_bool(OBJECT(dev), false, "realized", NULL); /* decrement the number of CPUs */ - pcms->boot_cpus--; + x86ms->boot_cpus--; /* Update the number of CPUs in CMOS */ - rtc_set_cpus_count(pcms->rtc, pcms->boot_cpus); - fw_cfg_modify_i16(pcms->fw_cfg, FW_CFG_NB_CPUS, pcms->boot_cpus); + rtc_set_cpus_count(x86ms->rtc, x86ms->boot_cpus); + fw_cfg_modify_i16(x86ms->fw_cfg, FW_CFG_NB_CPUS, x86ms->boot_cpus); out: error_propagate(errp, local_err); } @@ -2217,6 +1735,7 @@ static void pc_cpu_pre_plug(HotplugHandler *hotplug_dev, CPUX86State *env = &cpu->env; MachineState *ms = MACHINE(hotplug_dev); PCMachineState *pcms = PC_MACHINE(hotplug_dev); + X86MachineState *x86ms = X86_MACHINE(pcms); unsigned int smp_cores = ms->smp.cores; unsigned int smp_threads = ms->smp.threads; @@ -2226,7 +1745,7 @@ static void pc_cpu_pre_plug(HotplugHandler *hotplug_dev, return; } - env->nr_dies = pcms->smp_dies; + env->nr_dies = x86ms->smp_dies; /* * If APIC ID is not set, @@ -2234,13 +1753,13 @@ static void pc_cpu_pre_plug(HotplugHandler *hotplug_dev, */ if (cpu->apic_id == UNASSIGNED_APIC_ID) { int max_socket = (ms->smp.max_cpus - 1) / - smp_threads / smp_cores / pcms->smp_dies; + smp_threads / smp_cores / x86ms->smp_dies; /* * die-id was optional in QEMU 4.0 and older, so keep it optional * if there's only one die per socket. */ - if (cpu->die_id < 0 && pcms->smp_dies == 1) { + if (cpu->die_id < 0 && x86ms->smp_dies == 1) { cpu->die_id = 0; } @@ -2255,9 +1774,9 @@ static void pc_cpu_pre_plug(HotplugHandler *hotplug_dev, if (cpu->die_id < 0) { error_setg(errp, "CPU die-id is not set"); return; - } else if (cpu->die_id > pcms->smp_dies - 1) { + } else if (cpu->die_id > x86ms->smp_dies - 1) { error_setg(errp, "Invalid CPU die-id: %u must be in range 0:%u", - cpu->die_id, pcms->smp_dies - 1); + cpu->die_id, x86ms->smp_dies - 1); return; } if (cpu->core_id < 0) { @@ -2281,7 +1800,7 @@ static void pc_cpu_pre_plug(HotplugHandler *hotplug_dev, topo.die_id = cpu->die_id; topo.core_id = cpu->core_id; topo.smt_id = cpu->thread_id; - cpu->apic_id = apicid_from_topo_ids(pcms->smp_dies, smp_cores, + cpu->apic_id = apicid_from_topo_ids(x86ms->smp_dies, smp_cores, smp_threads, &topo); } @@ -2289,7 +1808,7 @@ static void pc_cpu_pre_plug(HotplugHandler *hotplug_dev, if (!cpu_slot) { MachineState *ms = MACHINE(pcms); - x86_topo_ids_from_apicid(cpu->apic_id, pcms->smp_dies, + x86_topo_ids_from_apicid(cpu->apic_id, x86ms->smp_dies, smp_cores, smp_threads, &topo); error_setg(errp, "Invalid CPU [socket: %u, die: %u, core: %u, thread: %u] with" @@ -2311,7 +1830,7 @@ static void pc_cpu_pre_plug(HotplugHandler *hotplug_dev, /* TODO: move socket_id/core_id/thread_id checks into x86_cpu_realizefn() * once -smp refactoring is complete and there will be CPU private * CPUState::nr_cores and CPUState::nr_threads fields instead of globals */ - x86_topo_ids_from_apicid(cpu->apic_id, pcms->smp_dies, + x86_topo_ids_from_apicid(cpu->apic_id, x86ms->smp_dies, smp_cores, smp_threads, &topo); if (cpu->socket_id != -1 && cpu->socket_id != topo.pkg_id) { error_setg(errp, "property socket-id: %u doesn't match set apic-id:" @@ -2493,45 +2012,6 @@ pc_machine_get_device_memory_region_size(Object *obj, Visitor *v, visit_type_int(v, name, &value, errp); } -static void pc_machine_get_max_ram_below_4g(Object *obj, Visitor *v, - const char *name, void *opaque, - Error **errp) -{ - PCMachineState *pcms = PC_MACHINE(obj); - uint64_t value = pcms->max_ram_below_4g; - - visit_type_size(v, name, &value, errp); -} - -static void pc_machine_set_max_ram_below_4g(Object *obj, Visitor *v, - const char *name, void *opaque, - Error **errp) -{ - PCMachineState *pcms = PC_MACHINE(obj); - Error *error = NULL; - uint64_t value; - - visit_type_size(v, name, &value, &error); - if (error) { - error_propagate(errp, error); - return; - } - if (value > 4 * GiB) { - error_setg(&error, - "Machine option 'max-ram-below-4g=%"PRIu64 - "' expects size less than or equal to 4G", value); - error_propagate(errp, error); - return; - } - - if (value < 1 * MiB) { - warn_report("Only %" PRIu64 " bytes of RAM below the 4GiB boundary," - "BIOS may not work with less than 1MiB", value); - } - - pcms->max_ram_below_4g = value; -} - static void pc_machine_get_vmport(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { @@ -2637,7 +2117,6 @@ static void pc_machine_initfn(Object *obj) { PCMachineState *pcms = PC_MACHINE(obj); - pcms->max_ram_below_4g = 0; /* use default */ pcms->smm = ON_OFF_AUTO_AUTO; #ifdef CONFIG_VMPORT pcms->vmport = ON_OFF_AUTO_AUTO; @@ -2649,7 +2128,6 @@ static void pc_machine_initfn(Object *obj) pcms->smbus_enabled = true; pcms->sata_enabled = true; pcms->pit_enabled = true; - pcms->smp_dies = 1; pc_system_flash_create(pcms); } @@ -2680,86 +2158,6 @@ static void pc_machine_wakeup(MachineState *machine) cpu_synchronize_all_post_reset(); } -static CpuInstanceProperties -pc_cpu_index_to_props(MachineState *ms, unsigned cpu_index) -{ - MachineClass *mc = MACHINE_GET_CLASS(ms); - const CPUArchIdList *possible_cpus = mc->possible_cpu_arch_ids(ms); - - assert(cpu_index < possible_cpus->len); - return possible_cpus->cpus[cpu_index].props; -} - -static int64_t pc_get_default_cpu_node_id(const MachineState *ms, int idx) -{ - X86CPUTopoInfo topo; - PCMachineState *pcms = PC_MACHINE(ms); - - assert(idx < ms->possible_cpus->len); - x86_topo_ids_from_apicid(ms->possible_cpus->cpus[idx].arch_id, - pcms->smp_dies, ms->smp.cores, - ms->smp.threads, &topo); - return topo.pkg_id % ms->numa_state->num_nodes; -} - -static const CPUArchIdList *pc_possible_cpu_arch_ids(MachineState *ms) -{ - PCMachineState *pcms = PC_MACHINE(ms); - int i; - unsigned int max_cpus = ms->smp.max_cpus; - - if (ms->possible_cpus) { - /* - * make sure that max_cpus hasn't changed since the first use, i.e. - * -smp hasn't been parsed after it - */ - assert(ms->possible_cpus->len == max_cpus); - return ms->possible_cpus; - } - - ms->possible_cpus = g_malloc0(sizeof(CPUArchIdList) + - sizeof(CPUArchId) * max_cpus); - ms->possible_cpus->len = max_cpus; - for (i = 0; i < ms->possible_cpus->len; i++) { - X86CPUTopoInfo topo; - - ms->possible_cpus->cpus[i].type = ms->cpu_type; - ms->possible_cpus->cpus[i].vcpus_count = 1; - ms->possible_cpus->cpus[i].arch_id = x86_cpu_apic_id_from_index(pcms, i); - x86_topo_ids_from_apicid(ms->possible_cpus->cpus[i].arch_id, - pcms->smp_dies, ms->smp.cores, - ms->smp.threads, &topo); - ms->possible_cpus->cpus[i].props.has_socket_id = true; - ms->possible_cpus->cpus[i].props.socket_id = topo.pkg_id; - if (pcms->smp_dies > 1) { - ms->possible_cpus->cpus[i].props.has_die_id = true; - ms->possible_cpus->cpus[i].props.die_id = topo.die_id; - } - ms->possible_cpus->cpus[i].props.has_core_id = true; - ms->possible_cpus->cpus[i].props.core_id = topo.core_id; - ms->possible_cpus->cpus[i].props.has_thread_id = true; - ms->possible_cpus->cpus[i].props.thread_id = topo.smt_id; - } - return ms->possible_cpus; -} - -static void x86_nmi(NMIState *n, int cpu_index, Error **errp) -{ - /* cpu index isn't used */ - CPUState *cs; - - CPU_FOREACH(cs) { - X86CPU *cpu = X86_CPU(cs); - - if (!cpu->apic_state) { - cpu_interrupt(cs, CPU_INTERRUPT_NMI); - } else { - apic_deliver_nmi(cpu->apic_state); - } - } -} - - static bool pc_hotplug_allowed(MachineState *ms, DeviceState *dev, Error **errp) { X86IOMMUState *iommu = x86_iommu_get_default(); @@ -2784,7 +2182,6 @@ static void pc_machine_class_init(ObjectClass *oc, void *data) MachineClass *mc = MACHINE_CLASS(oc); PCMachineClass *pcmc = PC_MACHINE_CLASS(oc); HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(oc); - NMIClass *nc = NMI_CLASS(oc); pcmc->pci_enabled = true; pcmc->has_acpi_build = true; @@ -2804,9 +2201,9 @@ static void pc_machine_class_init(ObjectClass *oc, void *data) assert(!mc->get_hotplug_handler); mc->get_hotplug_handler = pc_get_hotplug_handler; mc->hotplug_allowed = pc_hotplug_allowed; - mc->cpu_index_to_instance_props = pc_cpu_index_to_props; - mc->get_default_cpu_node_id = pc_get_default_cpu_node_id; - mc->possible_cpu_arch_ids = pc_possible_cpu_arch_ids; + mc->cpu_index_to_instance_props = x86_cpu_index_to_props; + mc->get_default_cpu_node_id = x86_get_default_cpu_node_id; + mc->possible_cpu_arch_ids = x86_possible_cpu_arch_ids; mc->auto_enable_numa_with_memhp = true; mc->has_hotpluggable_cpus = true; mc->default_boot_order = "cad"; @@ -2820,7 +2217,6 @@ static void pc_machine_class_init(ObjectClass *oc, void *data) hc->plug = pc_machine_device_plug_cb; hc->unplug_request = pc_machine_device_unplug_request_cb; hc->unplug = pc_machine_device_unplug_cb; - nc->nmi_monitor_handler = x86_nmi; mc->default_cpu_type = TARGET_DEFAULT_CPU_TYPE; mc->nvdimm_supported = true; mc->numa_mem_supported = true; @@ -2829,13 +2225,6 @@ static void pc_machine_class_init(ObjectClass *oc, void *data) pc_machine_get_device_memory_region_size, NULL, NULL, NULL, &error_abort); - object_class_property_add(oc, PC_MACHINE_MAX_RAM_BELOW_4G, "size", - pc_machine_get_max_ram_below_4g, pc_machine_set_max_ram_below_4g, - NULL, NULL, &error_abort); - - object_class_property_set_description(oc, PC_MACHINE_MAX_RAM_BELOW_4G, - "Maximum ram below the 4G boundary (32bit boundary)", &error_abort); - object_class_property_add(oc, PC_MACHINE_SMM, "OnOffAuto", pc_machine_get_smm, pc_machine_set_smm, NULL, NULL, &error_abort); @@ -2860,7 +2249,7 @@ static void pc_machine_class_init(ObjectClass *oc, void *data) static const TypeInfo pc_machine_info = { .name = TYPE_PC_MACHINE, - .parent = TYPE_MACHINE, + .parent = TYPE_X86_MACHINE, .abstract = true, .instance_size = sizeof(PCMachineState), .instance_init = pc_machine_initfn, @@ -2868,7 +2257,6 @@ static const TypeInfo pc_machine_info = { .class_init = pc_machine_class_init, .interfaces = (InterfaceInfo[]) { { TYPE_HOTPLUG_HANDLER }, - { TYPE_NMI }, { } }, }; diff --git a/hw/i386/pc_piix.c b/hw/i386/pc_piix.c index 6824b72124..c15929a1f5 100644 --- a/hw/i386/pc_piix.c +++ b/hw/i386/pc_piix.c @@ -27,6 +27,7 @@ #include "qemu/units.h" #include "hw/loader.h" +#include "hw/i386/x86.h" #include "hw/i386/pc.h" #include "hw/i386/apic.h" #include "hw/display/ramfb.h" @@ -56,7 +57,6 @@ #endif #include "migration/global_state.h" #include "migration/misc.h" -#include "kvm_i386.h" #include "sysemu/numa.h" #define MAX_IDE_BUS 2 @@ -73,6 +73,7 @@ static void pc_init1(MachineState *machine, { PCMachineState *pcms = PC_MACHINE(machine); PCMachineClass *pcmc = PC_MACHINE_GET_CLASS(pcms); + X86MachineState *x86ms = X86_MACHINE(machine); MemoryRegion *system_memory = get_system_memory(); MemoryRegion *system_io = get_system_io(); int i; @@ -80,7 +81,6 @@ static void pc_init1(MachineState *machine, ISABus *isa_bus; PCII440FXState *i440fx_state; int piix3_devfn = -1; - qemu_irq *i8259; qemu_irq smi_irq; GSIState *gsi_state; DriveInfo *hd[MAX_IDE_BUS * MAX_IDE_DEVS]; @@ -125,11 +125,11 @@ static void pc_init1(MachineState *machine, if (xen_enabled()) { xen_hvm_init(pcms, &ram_memory); } else { - if (!pcms->max_ram_below_4g) { - pcms->max_ram_below_4g = 0xe0000000; /* default: 3.5G */ + if (!x86ms->max_ram_below_4g) { + x86ms->max_ram_below_4g = 0xe0000000; /* default: 3.5G */ } - lowmem = pcms->max_ram_below_4g; - if (machine->ram_size >= pcms->max_ram_below_4g) { + lowmem = x86ms->max_ram_below_4g; + if (machine->ram_size >= x86ms->max_ram_below_4g) { if (pcmc->gigabyte_align) { if (lowmem > 0xc0000000) { lowmem = 0xc0000000; @@ -138,21 +138,21 @@ static void pc_init1(MachineState *machine, warn_report("Large machine and max_ram_below_4g " "(%" PRIu64 ") not a multiple of 1G; " "possible bad performance.", - pcms->max_ram_below_4g); + x86ms->max_ram_below_4g); } } } if (machine->ram_size >= lowmem) { - pcms->above_4g_mem_size = machine->ram_size - lowmem; - pcms->below_4g_mem_size = lowmem; + x86ms->above_4g_mem_size = machine->ram_size - lowmem; + x86ms->below_4g_mem_size = lowmem; } else { - pcms->above_4g_mem_size = 0; - pcms->below_4g_mem_size = machine->ram_size; + x86ms->above_4g_mem_size = 0; + x86ms->below_4g_mem_size = machine->ram_size; } } - pc_cpus_init(pcms); + x86_cpus_init(x86ms, pcmc->default_cpu_version); if (kvm_enabled() && pcmc->kvmclock_enabled) { kvmclock_create(); @@ -187,22 +187,15 @@ static void pc_init1(MachineState *machine, xen_load_linux(pcms); } - gsi_state = g_malloc0(sizeof(*gsi_state)); - if (kvm_ioapic_in_kernel()) { - kvm_pc_setup_irq_routing(pcmc->pci_enabled); - pcms->gsi = qemu_allocate_irqs(kvm_pc_gsi_handler, gsi_state, - GSI_NUM_PINS); - } else { - pcms->gsi = qemu_allocate_irqs(gsi_handler, gsi_state, GSI_NUM_PINS); - } + gsi_state = pc_gsi_create(&x86ms->gsi, pcmc->pci_enabled); if (pcmc->pci_enabled) { pci_bus = i440fx_init(host_type, pci_type, - &i440fx_state, &piix3_devfn, &isa_bus, pcms->gsi, + &i440fx_state, &piix3_devfn, &isa_bus, x86ms->gsi, system_memory, system_io, machine->ram_size, - pcms->below_4g_mem_size, - pcms->above_4g_mem_size, + x86ms->below_4g_mem_size, + x86ms->above_4g_mem_size, pci_memory, ram_memory); pcms->bus = pci_bus; } else { @@ -212,25 +205,17 @@ static void pc_init1(MachineState *machine, &error_abort); no_hpet = 1; } - isa_bus_irqs(isa_bus, pcms->gsi); + isa_bus_irqs(isa_bus, x86ms->gsi); - if (kvm_pic_in_kernel()) { - i8259 = kvm_i8259_init(isa_bus); - } else if (xen_enabled()) { - i8259 = xen_interrupt_controller_init(); - } else { - i8259 = i8259_init(isa_bus, pc_allocate_cpu_irq()); - } + pc_i8259_create(isa_bus, gsi_state->i8259_irq); - for (i = 0; i < ISA_NUM_IRQS; i++) { - gsi_state->i8259_irq[i] = i8259[i]; - } - g_free(i8259); if (pcmc->pci_enabled) { ioapic_init_gsi(gsi_state, "i440fx"); } - pc_register_ferr_irq(pcms->gsi[13]); + if (tcg_enabled()) { + x86_register_ferr_irq(x86ms->gsi[13]); + } pc_vga_init(isa_bus, pcmc->pci_enabled ? pci_bus : NULL); @@ -240,7 +225,7 @@ static void pc_init1(MachineState *machine, } /* init basic PC hardware */ - pc_basic_device_init(isa_bus, pcms->gsi, &rtc_state, true, + pc_basic_device_init(isa_bus, x86ms->gsi, &rtc_state, true, (pcms->vmport != ON_OFF_AUTO_ON), pcms->pit_enabled, 0x4); @@ -287,7 +272,7 @@ else { smi_irq = qemu_allocate_irq(pc_acpi_smi_interrupt, first_cpu, 0); /* TODO: Populate SPD eeprom data. */ pcms->smbus = piix4_pm_init(pci_bus, piix3_devfn + 3, 0xb100, - pcms->gsi[9], smi_irq, + x86ms->gsi[9], smi_irq, pc_machine_is_smm_enabled(pcms), &piix4_pm); smbus_eeprom_init(pcms->smbus, 8, NULL, 0); @@ -303,7 +288,7 @@ else { if (machine->nvdimms_state->is_enabled) { nvdimm_init_acpi_state(machine->nvdimms_state, system_io, - pcms->fw_cfg, OBJECT(pcms)); + x86ms->fw_cfg, OBJECT(pcms)); } } @@ -728,7 +713,7 @@ DEFINE_I440FX_MACHINE(v1_4, "pc-i440fx-1.4", pc_compat_1_4_fn, static void pc_i440fx_1_3_machine_options(MachineClass *m) { - PCMachineClass *pcmc = PC_MACHINE_CLASS(m); + X86MachineClass *x86mc = X86_MACHINE_CLASS(m); static GlobalProperty compat[] = { PC_CPU_MODEL_IDS("1.3.0") { "usb-tablet", "usb_version", "1" }, @@ -739,7 +724,7 @@ static void pc_i440fx_1_3_machine_options(MachineClass *m) pc_i440fx_1_4_machine_options(m); m->hw_version = "1.3.0"; - pcmc->compat_apic_id_mode = true; + x86mc->compat_apic_id_mode = true; compat_props_add(m->compat_props, compat, G_N_ELEMENTS(compat)); } diff --git a/hw/i386/pc_q35.c b/hw/i386/pc_q35.c index 748fc2ee15..d51f524727 100644 --- a/hw/i386/pc_q35.c +++ b/hw/i386/pc_q35.c @@ -36,11 +36,11 @@ #include "hw/rtc/mc146818rtc.h" #include "hw/xen/xen.h" #include "sysemu/kvm.h" -#include "kvm_i386.h" #include "hw/kvm/clock.h" #include "hw/pci-host/q35.h" #include "hw/qdev-properties.h" #include "exec/address-spaces.h" +#include "hw/i386/x86.h" #include "hw/i386/pc.h" #include "hw/i386/ich9.h" #include "hw/i386/amd_iommu.h" @@ -115,6 +115,7 @@ static void pc_q35_init(MachineState *machine) { PCMachineState *pcms = PC_MACHINE(machine); PCMachineClass *pcmc = PC_MACHINE_GET_CLASS(pcms); + X86MachineState *x86ms = X86_MACHINE(machine); Q35PCIHost *q35_host; PCIHostState *phb; PCIBus *host_bus; @@ -128,7 +129,6 @@ static void pc_q35_init(MachineState *machine) MemoryRegion *ram_memory; GSIState *gsi_state; ISABus *isa_bus; - qemu_irq *i8259; int i; ICH9LPCState *ich9_lpc; PCIDevice *ahci; @@ -152,34 +152,34 @@ static void pc_q35_init(MachineState *machine) /* Handle the machine opt max-ram-below-4g. It is basically doing * min(qemu limit, user limit). */ - if (!pcms->max_ram_below_4g) { - pcms->max_ram_below_4g = 1ULL << 32; /* default: 4G */; + if (!x86ms->max_ram_below_4g) { + x86ms->max_ram_below_4g = 4 * GiB; } - if (lowmem > pcms->max_ram_below_4g) { - lowmem = pcms->max_ram_below_4g; + if (lowmem > x86ms->max_ram_below_4g) { + lowmem = x86ms->max_ram_below_4g; if (machine->ram_size - lowmem > lowmem && lowmem & (1 * GiB - 1)) { warn_report("There is possibly poor performance as the ram size " " (0x%" PRIx64 ") is more then twice the size of" " max-ram-below-4g (%"PRIu64") and" " max-ram-below-4g is not a multiple of 1G.", - (uint64_t)machine->ram_size, pcms->max_ram_below_4g); + (uint64_t)machine->ram_size, x86ms->max_ram_below_4g); } } if (machine->ram_size >= lowmem) { - pcms->above_4g_mem_size = machine->ram_size - lowmem; - pcms->below_4g_mem_size = lowmem; + x86ms->above_4g_mem_size = machine->ram_size - lowmem; + x86ms->below_4g_mem_size = lowmem; } else { - pcms->above_4g_mem_size = 0; - pcms->below_4g_mem_size = machine->ram_size; + x86ms->above_4g_mem_size = 0; + x86ms->below_4g_mem_size = machine->ram_size; } if (xen_enabled()) { xen_hvm_init(pcms, &ram_memory); } - pc_cpus_init(pcms); + x86_cpus_init(x86ms, pcmc->default_cpu_version); kvmclock_create(); @@ -209,16 +209,6 @@ static void pc_q35_init(MachineState *machine) rom_memory, &ram_memory); } - /* irq lines */ - gsi_state = g_malloc0(sizeof(*gsi_state)); - if (kvm_ioapic_in_kernel()) { - kvm_pc_setup_irq_routing(pcmc->pci_enabled); - pcms->gsi = qemu_allocate_irqs(kvm_pc_gsi_handler, gsi_state, - GSI_NUM_PINS); - } else { - pcms->gsi = qemu_allocate_irqs(gsi_handler, gsi_state, GSI_NUM_PINS); - } - /* create pci host bus */ q35_host = Q35_HOST_DEVICE(qdev_create(NULL, TYPE_Q35_HOST_DEVICE)); @@ -231,9 +221,9 @@ static void pc_q35_init(MachineState *machine) MCH_HOST_PROP_SYSTEM_MEM, NULL); object_property_set_link(OBJECT(q35_host), OBJECT(system_io), MCH_HOST_PROP_IO_MEM, NULL); - object_property_set_int(OBJECT(q35_host), pcms->below_4g_mem_size, + object_property_set_int(OBJECT(q35_host), x86ms->below_4g_mem_size, PCI_HOST_BELOW_4G_MEM_SIZE, NULL); - object_property_set_int(OBJECT(q35_host), pcms->above_4g_mem_size, + object_property_set_int(OBJECT(q35_host), x86ms->above_4g_mem_size, PCI_HOST_ABOVE_4G_MEM_SIZE, NULL); /* pci */ qdev_init_nofail(DEVICE(q35_host)); @@ -252,34 +242,28 @@ static void pc_q35_init(MachineState *machine) object_property_set_link(OBJECT(machine), OBJECT(lpc), PC_MACHINE_ACPI_DEVICE_PROP, &error_abort); + /* irq lines */ + gsi_state = pc_gsi_create(&x86ms->gsi, pcmc->pci_enabled); + ich9_lpc = ICH9_LPC_DEVICE(lpc); lpc_dev = DEVICE(lpc); for (i = 0; i < GSI_NUM_PINS; i++) { - qdev_connect_gpio_out_named(lpc_dev, ICH9_GPIO_GSI, i, pcms->gsi[i]); + qdev_connect_gpio_out_named(lpc_dev, ICH9_GPIO_GSI, i, x86ms->gsi[i]); } pci_bus_irqs(host_bus, ich9_lpc_set_irq, ich9_lpc_map_irq, ich9_lpc, ICH9_LPC_NB_PIRQS); pci_bus_set_route_irq_fn(host_bus, ich9_route_intx_pin_to_irq); isa_bus = ich9_lpc->isa_bus; - if (kvm_pic_in_kernel()) { - i8259 = kvm_i8259_init(isa_bus); - } else if (xen_enabled()) { - i8259 = xen_interrupt_controller_init(); - } else { - i8259 = i8259_init(isa_bus, pc_allocate_cpu_irq()); - } - - for (i = 0; i < ISA_NUM_IRQS; i++) { - gsi_state->i8259_irq[i] = i8259[i]; - } - g_free(i8259); + pc_i8259_create(isa_bus, gsi_state->i8259_irq); if (pcmc->pci_enabled) { ioapic_init_gsi(gsi_state, "q35"); } - pc_register_ferr_irq(pcms->gsi[13]); + if (tcg_enabled()) { + x86_register_ferr_irq(x86ms->gsi[13]); + } assert(pcms->vmport != ON_OFF_AUTO__MAX); if (pcms->vmport == ON_OFF_AUTO_AUTO) { @@ -287,7 +271,7 @@ static void pc_q35_init(MachineState *machine) } /* init basic PC hardware */ - pc_basic_device_init(isa_bus, pcms->gsi, &rtc_state, !mc->no_floppy, + pc_basic_device_init(isa_bus, x86ms->gsi, &rtc_state, !mc->no_floppy, (pcms->vmport != ON_OFF_AUTO_ON), pcms->pit_enabled, 0xff0104); @@ -330,7 +314,7 @@ static void pc_q35_init(MachineState *machine) if (machine->nvdimms_state->is_enabled) { nvdimm_init_acpi_state(machine->nvdimms_state, system_io, - pcms->fw_cfg, OBJECT(pcms)); + x86ms->fw_cfg, OBJECT(pcms)); } } diff --git a/hw/i386/pc_sysfw.c b/hw/i386/pc_sysfw.c index a9983f0bfb..f5f3f466b0 100644 --- a/hw/i386/pc_sysfw.c +++ b/hw/i386/pc_sysfw.c @@ -31,6 +31,7 @@ #include "qemu/option.h" #include "qemu/units.h" #include "hw/sysbus.h" +#include "hw/i386/x86.h" #include "hw/i386/pc.h" #include "hw/loader.h" #include "hw/qdev-properties.h" @@ -38,8 +39,6 @@ #include "hw/block/flash.h" #include "sysemu/kvm.h" -#define BIOS_FILENAME "bios.bin" - /* * We don't have a theoretically justifiable exact lower bound on the base * address of any flash mapping. In practice, the IO-APIC MMIO range is @@ -211,59 +210,6 @@ static void pc_system_flash_map(PCMachineState *pcms, } } -static void old_pc_system_rom_init(MemoryRegion *rom_memory, bool isapc_ram_fw) -{ - char *filename; - MemoryRegion *bios, *isa_bios; - int bios_size, isa_bios_size; - int ret; - - /* BIOS load */ - if (bios_name == NULL) { - bios_name = BIOS_FILENAME; - } - filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name); - if (filename) { - bios_size = get_image_size(filename); - } else { - bios_size = -1; - } - if (bios_size <= 0 || - (bios_size % 65536) != 0) { - goto bios_error; - } - bios = g_malloc(sizeof(*bios)); - memory_region_init_ram(bios, NULL, "pc.bios", bios_size, &error_fatal); - if (!isapc_ram_fw) { - memory_region_set_readonly(bios, true); - } - ret = rom_add_file_fixed(bios_name, (uint32_t)(-bios_size), -1); - if (ret != 0) { - bios_error: - fprintf(stderr, "qemu: could not load PC BIOS '%s'\n", bios_name); - exit(1); - } - g_free(filename); - - /* map the last 128KB of the BIOS in ISA space */ - isa_bios_size = MIN(bios_size, 128 * KiB); - isa_bios = g_malloc(sizeof(*isa_bios)); - memory_region_init_alias(isa_bios, NULL, "isa-bios", bios, - bios_size - isa_bios_size, isa_bios_size); - memory_region_add_subregion_overlap(rom_memory, - 0x100000 - isa_bios_size, - isa_bios, - 1); - if (!isapc_ram_fw) { - memory_region_set_readonly(isa_bios, true); - } - - /* map all the bios at the top of memory */ - memory_region_add_subregion(rom_memory, - (uint32_t)(-bios_size), - bios); -} - void pc_system_firmware_init(PCMachineState *pcms, MemoryRegion *rom_memory) { @@ -272,7 +218,7 @@ void pc_system_firmware_init(PCMachineState *pcms, BlockBackend *pflash_blk[ARRAY_SIZE(pcms->flash)]; if (!pcmc->pci_enabled) { - old_pc_system_rom_init(rom_memory, true); + x86_bios_rom_init(rom_memory, true); return; } @@ -293,7 +239,7 @@ void pc_system_firmware_init(PCMachineState *pcms, if (!pflash_blk[0]) { /* Machine property pflash0 not set, use ROM mode */ - old_pc_system_rom_init(rom_memory, false); + x86_bios_rom_init(rom_memory, false); } else { if (kvm_enabled() && !kvm_readonly_mem_enabled()) { /* diff --git a/hw/i386/x86.c b/hw/i386/x86.c new file mode 100644 index 0000000000..fd84b23124 --- /dev/null +++ b/hw/i386/x86.c @@ -0,0 +1,795 @@ +/* + * Copyright (c) 2003-2004 Fabrice Bellard + * Copyright (c) 2019 Red Hat, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#include "qemu/osdep.h" +#include "qemu/error-report.h" +#include "qemu/option.h" +#include "qemu/cutils.h" +#include "qemu/units.h" +#include "qemu-common.h" +#include "qapi/error.h" +#include "qapi/qmp/qerror.h" +#include "qapi/qapi-visit-common.h" +#include "qapi/visitor.h" +#include "sysemu/qtest.h" +#include "sysemu/numa.h" +#include "sysemu/replay.h" +#include "sysemu/sysemu.h" + +#include "hw/i386/x86.h" +#include "target/i386/cpu.h" +#include "hw/i386/topology.h" +#include "hw/i386/fw_cfg.h" + +#include "hw/acpi/cpu_hotplug.h" +#include "hw/nmi.h" +#include "hw/loader.h" +#include "multiboot.h" +#include "elf.h" +#include "standard-headers/asm-x86/bootparam.h" + +#define BIOS_FILENAME "bios.bin" + +/* Physical Address of PVH entry point read from kernel ELF NOTE */ +static size_t pvh_start_addr; + +/* + * Calculates initial APIC ID for a specific CPU index + * + * Currently we need to be able to calculate the APIC ID from the CPU index + * alone (without requiring a CPU object), as the QEMU<->Seabios interfaces have + * no concept of "CPU index", and the NUMA tables on fw_cfg need the APIC ID of + * all CPUs up to max_cpus. + */ +uint32_t x86_cpu_apic_id_from_index(X86MachineState *x86ms, + unsigned int cpu_index) +{ + MachineState *ms = MACHINE(x86ms); + X86MachineClass *x86mc = X86_MACHINE_GET_CLASS(x86ms); + uint32_t correct_id; + static bool warned; + + correct_id = x86_apicid_from_cpu_idx(x86ms->smp_dies, ms->smp.cores, + ms->smp.threads, cpu_index); + if (x86mc->compat_apic_id_mode) { + if (cpu_index != correct_id && !warned && !qtest_enabled()) { + error_report("APIC IDs set in compatibility mode, " + "CPU topology won't match the configuration"); + warned = true; + } + return cpu_index; + } else { + return correct_id; + } +} + + +void x86_cpu_new(X86MachineState *x86ms, int64_t apic_id, Error **errp) +{ + Object *cpu = NULL; + Error *local_err = NULL; + CPUX86State *env = NULL; + + cpu = object_new(MACHINE(x86ms)->cpu_type); + + env = &X86_CPU(cpu)->env; + env->nr_dies = x86ms->smp_dies; + + object_property_set_uint(cpu, apic_id, "apic-id", &local_err); + object_property_set_bool(cpu, true, "realized", &local_err); + + object_unref(cpu); + error_propagate(errp, local_err); +} + +void x86_cpus_init(X86MachineState *x86ms, int default_cpu_version) +{ + int i; + const CPUArchIdList *possible_cpus; + MachineState *ms = MACHINE(x86ms); + MachineClass *mc = MACHINE_GET_CLASS(x86ms); + + x86_cpu_set_default_version(default_cpu_version); + + /* + * Calculates the limit to CPU APIC ID values + * + * Limit for the APIC ID value, so that all + * CPU APIC IDs are < x86ms->apic_id_limit. + * + * This is used for FW_CFG_MAX_CPUS. See comments on fw_cfg_arch_create(). + */ + x86ms->apic_id_limit = x86_cpu_apic_id_from_index(x86ms, + ms->smp.max_cpus - 1) + 1; + possible_cpus = mc->possible_cpu_arch_ids(ms); + for (i = 0; i < ms->smp.cpus; i++) { + x86_cpu_new(x86ms, possible_cpus->cpus[i].arch_id, &error_fatal); + } +} + +CpuInstanceProperties +x86_cpu_index_to_props(MachineState *ms, unsigned cpu_index) +{ + MachineClass *mc = MACHINE_GET_CLASS(ms); + const CPUArchIdList *possible_cpus = mc->possible_cpu_arch_ids(ms); + + assert(cpu_index < possible_cpus->len); + return possible_cpus->cpus[cpu_index].props; +} + +int64_t x86_get_default_cpu_node_id(const MachineState *ms, int idx) +{ + X86CPUTopoInfo topo; + X86MachineState *x86ms = X86_MACHINE(ms); + + assert(idx < ms->possible_cpus->len); + x86_topo_ids_from_apicid(ms->possible_cpus->cpus[idx].arch_id, + x86ms->smp_dies, ms->smp.cores, + ms->smp.threads, &topo); + return topo.pkg_id % ms->numa_state->num_nodes; +} + +const CPUArchIdList *x86_possible_cpu_arch_ids(MachineState *ms) +{ + X86MachineState *x86ms = X86_MACHINE(ms); + int i; + unsigned int max_cpus = ms->smp.max_cpus; + + if (ms->possible_cpus) { + /* + * make sure that max_cpus hasn't changed since the first use, i.e. + * -smp hasn't been parsed after it + */ + assert(ms->possible_cpus->len == max_cpus); + return ms->possible_cpus; + } + + ms->possible_cpus = g_malloc0(sizeof(CPUArchIdList) + + sizeof(CPUArchId) * max_cpus); + ms->possible_cpus->len = max_cpus; + for (i = 0; i < ms->possible_cpus->len; i++) { + X86CPUTopoInfo topo; + + ms->possible_cpus->cpus[i].type = ms->cpu_type; + ms->possible_cpus->cpus[i].vcpus_count = 1; + ms->possible_cpus->cpus[i].arch_id = + x86_cpu_apic_id_from_index(x86ms, i); + x86_topo_ids_from_apicid(ms->possible_cpus->cpus[i].arch_id, + x86ms->smp_dies, ms->smp.cores, + ms->smp.threads, &topo); + ms->possible_cpus->cpus[i].props.has_socket_id = true; + ms->possible_cpus->cpus[i].props.socket_id = topo.pkg_id; + if (x86ms->smp_dies > 1) { + ms->possible_cpus->cpus[i].props.has_die_id = true; + ms->possible_cpus->cpus[i].props.die_id = topo.die_id; + } + ms->possible_cpus->cpus[i].props.has_core_id = true; + ms->possible_cpus->cpus[i].props.core_id = topo.core_id; + ms->possible_cpus->cpus[i].props.has_thread_id = true; + ms->possible_cpus->cpus[i].props.thread_id = topo.smt_id; + } + return ms->possible_cpus; +} + +static void x86_nmi(NMIState *n, int cpu_index, Error **errp) +{ + /* cpu index isn't used */ + CPUState *cs; + + CPU_FOREACH(cs) { + X86CPU *cpu = X86_CPU(cs); + + if (!cpu->apic_state) { + cpu_interrupt(cs, CPU_INTERRUPT_NMI); + } else { + apic_deliver_nmi(cpu->apic_state); + } + } +} + +static long get_file_size(FILE *f) +{ + long where, size; + + /* XXX: on Unix systems, using fstat() probably makes more sense */ + + where = ftell(f); + fseek(f, 0, SEEK_END); + size = ftell(f); + fseek(f, where, SEEK_SET); + + return size; +} + +struct setup_data { + uint64_t next; + uint32_t type; + uint32_t len; + uint8_t data[0]; +} __attribute__((packed)); + + +/* + * The entry point into the kernel for PVH boot is different from + * the native entry point. The PVH entry is defined by the x86/HVM + * direct boot ABI and is available in an ELFNOTE in the kernel binary. + * + * This function is passed to load_elf() when it is called from + * load_elfboot() which then additionally checks for an ELF Note of + * type XEN_ELFNOTE_PHYS32_ENTRY and passes it to this function to + * parse the PVH entry address from the ELF Note. + * + * Due to trickery in elf_opts.h, load_elf() is actually available as + * load_elf32() or load_elf64() and this routine needs to be able + * to deal with being called as 32 or 64 bit. + * + * The address of the PVH entry point is saved to the 'pvh_start_addr' + * global variable. (although the entry point is 32-bit, the kernel + * binary can be either 32-bit or 64-bit). + */ +static uint64_t read_pvh_start_addr(void *arg1, void *arg2, bool is64) +{ + size_t *elf_note_data_addr; + + /* Check if ELF Note header passed in is valid */ + if (arg1 == NULL) { + return 0; + } + + if (is64) { + struct elf64_note *nhdr64 = (struct elf64_note *)arg1; + uint64_t nhdr_size64 = sizeof(struct elf64_note); + uint64_t phdr_align = *(uint64_t *)arg2; + uint64_t nhdr_namesz = nhdr64->n_namesz; + + elf_note_data_addr = + ((void *)nhdr64) + nhdr_size64 + + QEMU_ALIGN_UP(nhdr_namesz, phdr_align); + } else { + struct elf32_note *nhdr32 = (struct elf32_note *)arg1; + uint32_t nhdr_size32 = sizeof(struct elf32_note); + uint32_t phdr_align = *(uint32_t *)arg2; + uint32_t nhdr_namesz = nhdr32->n_namesz; + + elf_note_data_addr = + ((void *)nhdr32) + nhdr_size32 + + QEMU_ALIGN_UP(nhdr_namesz, phdr_align); + } + + pvh_start_addr = *elf_note_data_addr; + + return pvh_start_addr; +} + +static bool load_elfboot(const char *kernel_filename, + int kernel_file_size, + uint8_t *header, + size_t pvh_xen_start_addr, + FWCfgState *fw_cfg) +{ + uint32_t flags = 0; + uint32_t mh_load_addr = 0; + uint32_t elf_kernel_size = 0; + uint64_t elf_entry; + uint64_t elf_low, elf_high; + int kernel_size; + + if (ldl_p(header) != 0x464c457f) { + return false; /* no elfboot */ + } + + bool elf_is64 = header[EI_CLASS] == ELFCLASS64; + flags = elf_is64 ? + ((Elf64_Ehdr *)header)->e_flags : ((Elf32_Ehdr *)header)->e_flags; + + if (flags & 0x00010004) { /* LOAD_ELF_HEADER_HAS_ADDR */ + error_report("elfboot unsupported flags = %x", flags); + exit(1); + } + + uint64_t elf_note_type = XEN_ELFNOTE_PHYS32_ENTRY; + kernel_size = load_elf(kernel_filename, read_pvh_start_addr, + NULL, &elf_note_type, &elf_entry, + &elf_low, &elf_high, 0, I386_ELF_MACHINE, + 0, 0); + + if (kernel_size < 0) { + error_report("Error while loading elf kernel"); + exit(1); + } + mh_load_addr = elf_low; + elf_kernel_size = elf_high - elf_low; + + if (pvh_start_addr == 0) { + error_report("Error loading uncompressed kernel without PVH ELF Note"); + exit(1); + } + fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_ENTRY, pvh_start_addr); + fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_ADDR, mh_load_addr); + fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_SIZE, elf_kernel_size); + + return true; +} + +void x86_load_linux(X86MachineState *x86ms, + FWCfgState *fw_cfg, + int acpi_data_size, + bool pvh_enabled, + bool linuxboot_dma_enabled) +{ + uint16_t protocol; + int setup_size, kernel_size, cmdline_size; + int dtb_size, setup_data_offset; + uint32_t initrd_max; + uint8_t header[8192], *setup, *kernel; + hwaddr real_addr, prot_addr, cmdline_addr, initrd_addr = 0; + FILE *f; + char *vmode; + MachineState *machine = MACHINE(x86ms); + struct setup_data *setup_data; + const char *kernel_filename = machine->kernel_filename; + const char *initrd_filename = machine->initrd_filename; + const char *dtb_filename = machine->dtb; + const char *kernel_cmdline = machine->kernel_cmdline; + + /* Align to 16 bytes as a paranoia measure */ + cmdline_size = (strlen(kernel_cmdline) + 16) & ~15; + + /* load the kernel header */ + f = fopen(kernel_filename, "rb"); + if (!f) { + fprintf(stderr, "qemu: could not open kernel file '%s': %s\n", + kernel_filename, strerror(errno)); + exit(1); + } + + kernel_size = get_file_size(f); + if (!kernel_size || + fread(header, 1, MIN(ARRAY_SIZE(header), kernel_size), f) != + MIN(ARRAY_SIZE(header), kernel_size)) { + fprintf(stderr, "qemu: could not load kernel '%s': %s\n", + kernel_filename, strerror(errno)); + exit(1); + } + + /* kernel protocol version */ + if (ldl_p(header + 0x202) == 0x53726448) { + protocol = lduw_p(header + 0x206); + } else { + /* + * This could be a multiboot kernel. If it is, let's stop treating it + * like a Linux kernel. + * Note: some multiboot images could be in the ELF format (the same of + * PVH), so we try multiboot first since we check the multiboot magic + * header before to load it. + */ + if (load_multiboot(fw_cfg, f, kernel_filename, initrd_filename, + kernel_cmdline, kernel_size, header)) { + return; + } + /* + * Check if the file is an uncompressed kernel file (ELF) and load it, + * saving the PVH entry point used by the x86/HVM direct boot ABI. + * If load_elfboot() is successful, populate the fw_cfg info. + */ + if (pvh_enabled && + load_elfboot(kernel_filename, kernel_size, + header, pvh_start_addr, fw_cfg)) { + fclose(f); + + fw_cfg_add_i32(fw_cfg, FW_CFG_CMDLINE_SIZE, + strlen(kernel_cmdline) + 1); + fw_cfg_add_string(fw_cfg, FW_CFG_CMDLINE_DATA, kernel_cmdline); + + fw_cfg_add_i32(fw_cfg, FW_CFG_SETUP_SIZE, sizeof(header)); + fw_cfg_add_bytes(fw_cfg, FW_CFG_SETUP_DATA, + header, sizeof(header)); + + /* load initrd */ + if (initrd_filename) { + GMappedFile *mapped_file; + gsize initrd_size; + gchar *initrd_data; + GError *gerr = NULL; + + mapped_file = g_mapped_file_new(initrd_filename, false, &gerr); + if (!mapped_file) { + fprintf(stderr, "qemu: error reading initrd %s: %s\n", + initrd_filename, gerr->message); + exit(1); + } + x86ms->initrd_mapped_file = mapped_file; + + initrd_data = g_mapped_file_get_contents(mapped_file); + initrd_size = g_mapped_file_get_length(mapped_file); + initrd_max = x86ms->below_4g_mem_size - acpi_data_size - 1; + if (initrd_size >= initrd_max) { + fprintf(stderr, "qemu: initrd is too large, cannot support." + "(max: %"PRIu32", need %"PRId64")\n", + initrd_max, (uint64_t)initrd_size); + exit(1); + } + + initrd_addr = (initrd_max - initrd_size) & ~4095; + + fw_cfg_add_i32(fw_cfg, FW_CFG_INITRD_ADDR, initrd_addr); + fw_cfg_add_i32(fw_cfg, FW_CFG_INITRD_SIZE, initrd_size); + fw_cfg_add_bytes(fw_cfg, FW_CFG_INITRD_DATA, initrd_data, + initrd_size); + } + + option_rom[nb_option_roms].bootindex = 0; + option_rom[nb_option_roms].name = "pvh.bin"; + nb_option_roms++; + + return; + } + protocol = 0; + } + + if (protocol < 0x200 || !(header[0x211] & 0x01)) { + /* Low kernel */ + real_addr = 0x90000; + cmdline_addr = 0x9a000 - cmdline_size; + prot_addr = 0x10000; + } else if (protocol < 0x202) { + /* High but ancient kernel */ + real_addr = 0x90000; + cmdline_addr = 0x9a000 - cmdline_size; + prot_addr = 0x100000; + } else { + /* High and recent kernel */ + real_addr = 0x10000; + cmdline_addr = 0x20000; + prot_addr = 0x100000; + } + + /* highest address for loading the initrd */ + if (protocol >= 0x20c && + lduw_p(header + 0x236) & XLF_CAN_BE_LOADED_ABOVE_4G) { + /* + * Linux has supported initrd up to 4 GB for a very long time (2007, + * long before XLF_CAN_BE_LOADED_ABOVE_4G which was added in 2013), + * though it only sets initrd_max to 2 GB to "work around bootloader + * bugs". Luckily, QEMU firmware(which does something like bootloader) + * has supported this. + * + * It's believed that if XLF_CAN_BE_LOADED_ABOVE_4G is set, initrd can + * be loaded into any address. + * + * In addition, initrd_max is uint32_t simply because QEMU doesn't + * support the 64-bit boot protocol (specifically the ext_ramdisk_image + * field). + * + * Therefore here just limit initrd_max to UINT32_MAX simply as well. + */ + initrd_max = UINT32_MAX; + } else if (protocol >= 0x203) { + initrd_max = ldl_p(header + 0x22c); + } else { + initrd_max = 0x37ffffff; + } + + if (initrd_max >= x86ms->below_4g_mem_size - acpi_data_size) { + initrd_max = x86ms->below_4g_mem_size - acpi_data_size - 1; + } + + fw_cfg_add_i32(fw_cfg, FW_CFG_CMDLINE_ADDR, cmdline_addr); + fw_cfg_add_i32(fw_cfg, FW_CFG_CMDLINE_SIZE, strlen(kernel_cmdline) + 1); + fw_cfg_add_string(fw_cfg, FW_CFG_CMDLINE_DATA, kernel_cmdline); + + if (protocol >= 0x202) { + stl_p(header + 0x228, cmdline_addr); + } else { + stw_p(header + 0x20, 0xA33F); + stw_p(header + 0x22, cmdline_addr - real_addr); + } + + /* handle vga= parameter */ + vmode = strstr(kernel_cmdline, "vga="); + if (vmode) { + unsigned int video_mode; + int ret; + /* skip "vga=" */ + vmode += 4; + if (!strncmp(vmode, "normal", 6)) { + video_mode = 0xffff; + } else if (!strncmp(vmode, "ext", 3)) { + video_mode = 0xfffe; + } else if (!strncmp(vmode, "ask", 3)) { + video_mode = 0xfffd; + } else { + ret = qemu_strtoui(vmode, NULL, 0, &video_mode); + if (ret != 0) { + fprintf(stderr, "qemu: can't parse 'vga' parameter: %s\n", + strerror(-ret)); + exit(1); + } + } + stw_p(header + 0x1fa, video_mode); + } + + /* loader type */ + /* + * High nybble = B reserved for QEMU; low nybble is revision number. + * If this code is substantially changed, you may want to consider + * incrementing the revision. + */ + if (protocol >= 0x200) { + header[0x210] = 0xB0; + } + /* heap */ + if (protocol >= 0x201) { + header[0x211] |= 0x80; /* CAN_USE_HEAP */ + stw_p(header + 0x224, cmdline_addr - real_addr - 0x200); + } + + /* load initrd */ + if (initrd_filename) { + GMappedFile *mapped_file; + gsize initrd_size; + gchar *initrd_data; + GError *gerr = NULL; + + if (protocol < 0x200) { + fprintf(stderr, "qemu: linux kernel too old to load a ram disk\n"); + exit(1); + } + + mapped_file = g_mapped_file_new(initrd_filename, false, &gerr); + if (!mapped_file) { + fprintf(stderr, "qemu: error reading initrd %s: %s\n", + initrd_filename, gerr->message); + exit(1); + } + x86ms->initrd_mapped_file = mapped_file; + + initrd_data = g_mapped_file_get_contents(mapped_file); + initrd_size = g_mapped_file_get_length(mapped_file); + if (initrd_size >= initrd_max) { + fprintf(stderr, "qemu: initrd is too large, cannot support." + "(max: %"PRIu32", need %"PRId64")\n", + initrd_max, (uint64_t)initrd_size); + exit(1); + } + + initrd_addr = (initrd_max - initrd_size) & ~4095; + + fw_cfg_add_i32(fw_cfg, FW_CFG_INITRD_ADDR, initrd_addr); + fw_cfg_add_i32(fw_cfg, FW_CFG_INITRD_SIZE, initrd_size); + fw_cfg_add_bytes(fw_cfg, FW_CFG_INITRD_DATA, initrd_data, initrd_size); + + stl_p(header + 0x218, initrd_addr); + stl_p(header + 0x21c, initrd_size); + } + + /* load kernel and setup */ + setup_size = header[0x1f1]; + if (setup_size == 0) { + setup_size = 4; + } + setup_size = (setup_size + 1) * 512; + if (setup_size > kernel_size) { + fprintf(stderr, "qemu: invalid kernel header\n"); + exit(1); + } + kernel_size -= setup_size; + + setup = g_malloc(setup_size); + kernel = g_malloc(kernel_size); + fseek(f, 0, SEEK_SET); + if (fread(setup, 1, setup_size, f) != setup_size) { + fprintf(stderr, "fread() failed\n"); + exit(1); + } + if (fread(kernel, 1, kernel_size, f) != kernel_size) { + fprintf(stderr, "fread() failed\n"); + exit(1); + } + fclose(f); + + /* append dtb to kernel */ + if (dtb_filename) { + if (protocol < 0x209) { + fprintf(stderr, "qemu: Linux kernel too old to load a dtb\n"); + exit(1); + } + + dtb_size = get_image_size(dtb_filename); + if (dtb_size <= 0) { + fprintf(stderr, "qemu: error reading dtb %s: %s\n", + dtb_filename, strerror(errno)); + exit(1); + } + + setup_data_offset = QEMU_ALIGN_UP(kernel_size, 16); + kernel_size = setup_data_offset + sizeof(struct setup_data) + dtb_size; + kernel = g_realloc(kernel, kernel_size); + + stq_p(header + 0x250, prot_addr + setup_data_offset); + + setup_data = (struct setup_data *)(kernel + setup_data_offset); + setup_data->next = 0; + setup_data->type = cpu_to_le32(SETUP_DTB); + setup_data->len = cpu_to_le32(dtb_size); + + load_image_size(dtb_filename, setup_data->data, dtb_size); + } + + memcpy(setup, header, MIN(sizeof(header), setup_size)); + + fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_ADDR, prot_addr); + fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_SIZE, kernel_size); + fw_cfg_add_bytes(fw_cfg, FW_CFG_KERNEL_DATA, kernel, kernel_size); + + fw_cfg_add_i32(fw_cfg, FW_CFG_SETUP_ADDR, real_addr); + fw_cfg_add_i32(fw_cfg, FW_CFG_SETUP_SIZE, setup_size); + fw_cfg_add_bytes(fw_cfg, FW_CFG_SETUP_DATA, setup, setup_size); + + option_rom[nb_option_roms].bootindex = 0; + option_rom[nb_option_roms].name = "linuxboot.bin"; + if (linuxboot_dma_enabled && fw_cfg_dma_enabled(fw_cfg)) { + option_rom[nb_option_roms].name = "linuxboot_dma.bin"; + } + nb_option_roms++; +} + +void x86_bios_rom_init(MemoryRegion *rom_memory, bool isapc_ram_fw) +{ + char *filename; + MemoryRegion *bios, *isa_bios; + int bios_size, isa_bios_size; + int ret; + + /* BIOS load */ + if (bios_name == NULL) { + bios_name = BIOS_FILENAME; + } + filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name); + if (filename) { + bios_size = get_image_size(filename); + } else { + bios_size = -1; + } + if (bios_size <= 0 || + (bios_size % 65536) != 0) { + goto bios_error; + } + bios = g_malloc(sizeof(*bios)); + memory_region_init_ram(bios, NULL, "pc.bios", bios_size, &error_fatal); + if (!isapc_ram_fw) { + memory_region_set_readonly(bios, true); + } + ret = rom_add_file_fixed(bios_name, (uint32_t)(-bios_size), -1); + if (ret != 0) { + bios_error: + fprintf(stderr, "qemu: could not load PC BIOS '%s'\n", bios_name); + exit(1); + } + g_free(filename); + + /* map the last 128KB of the BIOS in ISA space */ + isa_bios_size = MIN(bios_size, 128 * KiB); + isa_bios = g_malloc(sizeof(*isa_bios)); + memory_region_init_alias(isa_bios, NULL, "isa-bios", bios, + bios_size - isa_bios_size, isa_bios_size); + memory_region_add_subregion_overlap(rom_memory, + 0x100000 - isa_bios_size, + isa_bios, + 1); + if (!isapc_ram_fw) { + memory_region_set_readonly(isa_bios, true); + } + + /* map all the bios at the top of memory */ + memory_region_add_subregion(rom_memory, + (uint32_t)(-bios_size), + bios); +} + +static void x86_machine_get_max_ram_below_4g(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + X86MachineState *x86ms = X86_MACHINE(obj); + uint64_t value = x86ms->max_ram_below_4g; + + visit_type_size(v, name, &value, errp); +} + +static void x86_machine_set_max_ram_below_4g(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + X86MachineState *x86ms = X86_MACHINE(obj); + Error *error = NULL; + uint64_t value; + + visit_type_size(v, name, &value, &error); + if (error) { + error_propagate(errp, error); + return; + } + if (value > 4 * GiB) { + error_setg(&error, + "Machine option 'max-ram-below-4g=%"PRIu64 + "' expects size less than or equal to 4G", value); + error_propagate(errp, error); + return; + } + + if (value < 1 * MiB) { + warn_report("Only %" PRIu64 " bytes of RAM below the 4GiB boundary," + "BIOS may not work with less than 1MiB", value); + } + + x86ms->max_ram_below_4g = value; +} + +static void x86_machine_initfn(Object *obj) +{ + X86MachineState *x86ms = X86_MACHINE(obj); + + x86ms->max_ram_below_4g = 0; /* use default */ + x86ms->smp_dies = 1; +} + +static void x86_machine_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + X86MachineClass *x86mc = X86_MACHINE_CLASS(oc); + NMIClass *nc = NMI_CLASS(oc); + + mc->cpu_index_to_instance_props = x86_cpu_index_to_props; + mc->get_default_cpu_node_id = x86_get_default_cpu_node_id; + mc->possible_cpu_arch_ids = x86_possible_cpu_arch_ids; + x86mc->compat_apic_id_mode = false; + nc->nmi_monitor_handler = x86_nmi; + + object_class_property_add(oc, X86_MACHINE_MAX_RAM_BELOW_4G, "size", + x86_machine_get_max_ram_below_4g, x86_machine_set_max_ram_below_4g, + NULL, NULL, &error_abort); + + object_class_property_set_description(oc, X86_MACHINE_MAX_RAM_BELOW_4G, + "Maximum ram below the 4G boundary (32bit boundary)", &error_abort); +} + +static const TypeInfo x86_machine_info = { + .name = TYPE_X86_MACHINE, + .parent = TYPE_MACHINE, + .abstract = true, + .instance_size = sizeof(X86MachineState), + .instance_init = x86_machine_initfn, + .class_size = sizeof(X86MachineClass), + .class_init = x86_machine_class_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_NMI }, + { } + }, +}; + +static void x86_machine_register_types(void) +{ + type_register_static(&x86_machine_info); +} + +type_init(x86_machine_register_types) diff --git a/hw/i386/xen/xen-hvm.c b/hw/i386/xen/xen-hvm.c index 6b5e5bb7f5..95f23a263c 100644 --- a/hw/i386/xen/xen-hvm.c +++ b/hw/i386/xen/xen-hvm.c @@ -197,11 +197,13 @@ qemu_irq *xen_interrupt_controller_init(void) static void xen_ram_init(PCMachineState *pcms, ram_addr_t ram_size, MemoryRegion **ram_memory_p) { + X86MachineState *x86ms = X86_MACHINE(pcms); MemoryRegion *sysmem = get_system_memory(); ram_addr_t block_len; - uint64_t user_lowmem = object_property_get_uint(qdev_get_machine(), - PC_MACHINE_MAX_RAM_BELOW_4G, - &error_abort); + uint64_t user_lowmem = + object_property_get_uint(qdev_get_machine(), + X86_MACHINE_MAX_RAM_BELOW_4G, + &error_abort); /* Handle the machine opt max-ram-below-4g. It is basically doing * min(xen limit, user limit). @@ -214,20 +216,20 @@ static void xen_ram_init(PCMachineState *pcms, } if (ram_size >= user_lowmem) { - pcms->above_4g_mem_size = ram_size - user_lowmem; - pcms->below_4g_mem_size = user_lowmem; + x86ms->above_4g_mem_size = ram_size - user_lowmem; + x86ms->below_4g_mem_size = user_lowmem; } else { - pcms->above_4g_mem_size = 0; - pcms->below_4g_mem_size = ram_size; + x86ms->above_4g_mem_size = 0; + x86ms->below_4g_mem_size = ram_size; } - if (!pcms->above_4g_mem_size) { + if (!x86ms->above_4g_mem_size) { block_len = ram_size; } else { /* * Xen does not allocate the memory continuously, it keeps a * hole of the size computed above or passed in. */ - block_len = (1ULL << 32) + pcms->above_4g_mem_size; + block_len = (1ULL << 32) + x86ms->above_4g_mem_size; } memory_region_init_ram(&ram_memory, NULL, "xen.ram", block_len, &error_fatal); @@ -244,12 +246,12 @@ static void xen_ram_init(PCMachineState *pcms, */ memory_region_init_alias(&ram_lo, NULL, "xen.ram.lo", &ram_memory, 0xc0000, - pcms->below_4g_mem_size - 0xc0000); + x86ms->below_4g_mem_size - 0xc0000); memory_region_add_subregion(sysmem, 0xc0000, &ram_lo); - if (pcms->above_4g_mem_size > 0) { + if (x86ms->above_4g_mem_size > 0) { memory_region_init_alias(&ram_hi, NULL, "xen.ram.hi", &ram_memory, 0x100000000ULL, - pcms->above_4g_mem_size); + x86ms->above_4g_mem_size); memory_region_add_subregion(sysmem, 0x100000000ULL, &ram_hi); } } @@ -265,7 +267,7 @@ void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size, MemoryRegion *mr, /* RAM already populated in Xen */ fprintf(stderr, "%s: do not alloc "RAM_ADDR_FMT " bytes of ram at "RAM_ADDR_FMT" when runstate is INMIGRATE\n", - __func__, size, ram_addr); + __func__, size, ram_addr); return; } diff --git a/hw/intc/apic.c b/hw/intc/apic.c index bce89911dc..2a74f7b4bf 100644 --- a/hw/intc/apic.c +++ b/hw/intc/apic.c @@ -610,7 +610,7 @@ int apic_accept_pic_intr(DeviceState *dev) if ((s->apicbase & MSR_IA32_APICBASE_ENABLE) == 0 || (lvt0 & APIC_LVT_MASKED) == 0) - return 1; + return isa_pic != NULL; return 0; } diff --git a/hw/intc/ioapic.c b/hw/intc/ioapic.c index 1ede055387..ead14e1888 100644 --- a/hw/intc/ioapic.c +++ b/hw/intc/ioapic.c @@ -89,7 +89,7 @@ static void ioapic_entry_parse(uint64_t entry, struct ioapic_entry_info *info) static void ioapic_service(IOAPICCommonState *s) { - AddressSpace *ioapic_as = PC_MACHINE(qdev_get_machine())->ioapic_as; + AddressSpace *ioapic_as = X86_MACHINE(qdev_get_machine())->ioapic_as; struct ioapic_entry_info info; uint8_t i; uint32_t mask; diff --git a/hw/intc/s390_flic_kvm.c b/hw/intc/s390_flic_kvm.c index cedccba8a9..c9ee80eaae 100644 --- a/hw/intc/s390_flic_kvm.c +++ b/hw/intc/s390_flic_kvm.c @@ -25,7 +25,7 @@ #include "migration/qemu-file-types.h" #include "trace.h" -#define FLIC_SAVE_INITIAL_SIZE getpagesize() +#define FLIC_SAVE_INITIAL_SIZE qemu_real_host_page_size #define FLIC_FAILED (-1UL) #define FLIC_SAVEVM_VERSION 1 diff --git a/hw/mem/Makefile.objs b/hw/mem/Makefile.objs index 3e2f7c5ca2..56345befd0 100644 --- a/hw/mem/Makefile.objs +++ b/hw/mem/Makefile.objs @@ -1,3 +1,3 @@ common-obj-$(CONFIG_DIMM) += pc-dimm.o -common-obj-$(CONFIG_MEM_DEVICE) += memory-device.o +common-obj-y += memory-device.o common-obj-$(CONFIG_NVDIMM) += nvdimm.o diff --git a/hw/misc/tmp421.c b/hw/misc/tmp421.c index 9f044705fa..c0bc150bca 100644 --- a/hw/misc/tmp421.c +++ b/hw/misc/tmp421.c @@ -120,7 +120,7 @@ static void tmp421_get_temperature(Object *obj, Visitor *v, const char *name, int tempid; if (sscanf(name, "temperature%d", &tempid) != 1) { - error_setg(errp, "error reading %s: %m", name); + error_setg(errp, "error reading %s: %s", name, g_strerror(errno)); return; } @@ -160,7 +160,7 @@ static void tmp421_set_temperature(Object *obj, Visitor *v, const char *name, } if (sscanf(name, "temperature%d", &tempid) != 1) { - error_setg(errp, "error reading %s: %m", name); + error_setg(errp, "error reading %s: %s", name, g_strerror(errno)); return; } diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c index e975700f95..6b82803fa7 100644 --- a/hw/net/vhost_net.c +++ b/hw/net/vhost_net.c @@ -49,6 +49,7 @@ static const int kernel_feature_bits[] = { VIRTIO_F_VERSION_1, VIRTIO_NET_F_MTU, VIRTIO_F_IOMMU_PLATFORM, + VIRTIO_F_RING_PACKED, VHOST_INVALID_FEATURE_BIT }; @@ -74,6 +75,7 @@ static const int user_feature_bits[] = { VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_MTU, VIRTIO_F_IOMMU_PLATFORM, + VIRTIO_F_RING_PACKED, /* This bit implies RARP isn't sent by QEMU out of band */ VIRTIO_NET_F_GUEST_ANNOUNCE, diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c index 9f11422337..2c4909c5f9 100644 --- a/hw/net/virtio-net.c +++ b/hw/net/virtio-net.c @@ -90,15 +90,15 @@ static inline __virtio16 *virtio_net_rsc_ext_num_dupacks( static VirtIOFeature feature_sizes[] = { {.flags = 1ULL << VIRTIO_NET_F_MAC, - .end = virtio_endof(struct virtio_net_config, mac)}, + .end = endof(struct virtio_net_config, mac)}, {.flags = 1ULL << VIRTIO_NET_F_STATUS, - .end = virtio_endof(struct virtio_net_config, status)}, + .end = endof(struct virtio_net_config, status)}, {.flags = 1ULL << VIRTIO_NET_F_MQ, - .end = virtio_endof(struct virtio_net_config, max_virtqueue_pairs)}, + .end = endof(struct virtio_net_config, max_virtqueue_pairs)}, {.flags = 1ULL << VIRTIO_NET_F_MTU, - .end = virtio_endof(struct virtio_net_config, mtu)}, + .end = endof(struct virtio_net_config, mtu)}, {.flags = 1ULL << VIRTIO_NET_F_SPEED_DUPLEX, - .end = virtio_endof(struct virtio_net_config, duplex)}, + .end = endof(struct virtio_net_config, duplex)}, {} }; diff --git a/hw/nvram/fw_cfg.c b/hw/nvram/fw_cfg.c index 7dc3ac378e..aef1727250 100644 --- a/hw/nvram/fw_cfg.c +++ b/hw/nvram/fw_cfg.c @@ -690,6 +690,15 @@ void fw_cfg_add_string(FWCfgState *s, uint16_t key, const char *value) fw_cfg_add_bytes(s, key, g_memdup(value, sz), sz); } +void fw_cfg_modify_string(FWCfgState *s, uint16_t key, const char *value) +{ + size_t sz = strlen(value) + 1; + char *old; + + old = fw_cfg_modify_bytes_read(s, key, g_memdup(value, sz), sz); + g_free(old); +} + void fw_cfg_add_i16(FWCfgState *s, uint16_t key, uint16_t value) { uint16_t *copy; @@ -720,6 +729,16 @@ void fw_cfg_add_i32(FWCfgState *s, uint16_t key, uint32_t value) fw_cfg_add_bytes(s, key, copy, sizeof(value)); } +void fw_cfg_modify_i32(FWCfgState *s, uint16_t key, uint32_t value) +{ + uint32_t *copy, *old; + + copy = g_malloc(sizeof(value)); + *copy = cpu_to_le32(value); + old = fw_cfg_modify_bytes_read(s, key, copy, sizeof(value)); + g_free(old); +} + void fw_cfg_add_i64(FWCfgState *s, uint16_t key, uint64_t value) { uint64_t *copy; @@ -730,6 +749,16 @@ void fw_cfg_add_i64(FWCfgState *s, uint16_t key, uint64_t value) fw_cfg_add_bytes(s, key, copy, sizeof(value)); } +void fw_cfg_modify_i64(FWCfgState *s, uint16_t key, uint64_t value) +{ + uint64_t *copy, *old; + + copy = g_malloc(sizeof(value)); + *copy = cpu_to_le64(value); + old = fw_cfg_modify_bytes_read(s, key, copy, sizeof(value)); + g_free(old); +} + void fw_cfg_set_order_override(FWCfgState *s, int order) { assert(s->fw_cfg_order_override == 0); diff --git a/hw/ppc/mac_newworld.c b/hw/ppc/mac_newworld.c index c5bbcc7433..3594517f0c 100644 --- a/hw/ppc/mac_newworld.c +++ b/hw/ppc/mac_newworld.c @@ -439,7 +439,7 @@ static void ppc_core99_init(MachineState *machine) } /* The NewWorld NVRAM is not located in the MacIO device */ - if (kvm_enabled() && getpagesize() > 4096) { + if (kvm_enabled() && qemu_real_host_page_size > 4096) { /* We can't combine read-write and read-only in a single page, so move the NVRAM out of ROM again for KVM */ nvram_addr = 0xFFE00000; diff --git a/hw/ppc/spapr_pci.c b/hw/ppc/spapr_pci.c index cc0e7829b6..f6fbcf99ed 100644 --- a/hw/ppc/spapr_pci.c +++ b/hw/ppc/spapr_pci.c @@ -1942,7 +1942,7 @@ static void spapr_phb_realize(DeviceState *dev, Error **errp) * our memory slot is of page size granularity. */ if (kvm_enabled()) { - msi_window_size = getpagesize(); + msi_window_size = qemu_real_host_page_size; } memory_region_init_io(&sphb->msiwindow, OBJECT(sphb), &spapr_msi_ops, spapr, diff --git a/hw/rdma/vmw/pvrdma_main.c b/hw/rdma/vmw/pvrdma_main.c index 3e36e13013..3722d9e772 100644 --- a/hw/rdma/vmw/pvrdma_main.c +++ b/hw/rdma/vmw/pvrdma_main.c @@ -601,7 +601,7 @@ static void pvrdma_realize(PCIDevice *pdev, Error **errp) rdma_info_report("Initializing device %s %x.%x", pdev->name, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); - if (TARGET_PAGE_SIZE != getpagesize()) { + if (TARGET_PAGE_SIZE != qemu_real_host_page_size) { error_setg(errp, "Target page size must be the same as host page size"); return; } diff --git a/hw/riscv/Kconfig b/hw/riscv/Kconfig index fb19b2df3a..b12660b9f8 100644 --- a/hw/riscv/Kconfig +++ b/hw/riscv/Kconfig @@ -36,4 +36,5 @@ config RISCV_VIRT select SERIAL select VIRTIO_MMIO select PCI_EXPRESS_GENERIC_BRIDGE + select PFLASH_CFI01 select SIFIVE diff --git a/hw/riscv/boot.c b/hw/riscv/boot.c index 2e92fb0680..7fee98d2f8 100644 --- a/hw/riscv/boot.c +++ b/hw/riscv/boot.c @@ -38,7 +38,7 @@ void riscv_find_and_load_firmware(MachineState *machine, const char *default_machine_firmware, hwaddr firmware_load_addr) { - char *firmware_filename; + char *firmware_filename = NULL; if (!machine->firmware) { /* @@ -70,14 +70,11 @@ void riscv_find_and_load_firmware(MachineState *machine, * if no -bios option is set without breaking anything. */ firmware_filename = riscv_find_firmware(default_machine_firmware); - } else { - firmware_filename = machine->firmware; - if (strcmp(firmware_filename, "none")) { - firmware_filename = riscv_find_firmware(firmware_filename); - } + } else if (strcmp(machine->firmware, "none")) { + firmware_filename = riscv_find_firmware(machine->firmware); } - if (strcmp(firmware_filename, "none")) { + if (firmware_filename) { /* If not "none" load the firmware */ riscv_load_firmware(firmware_filename, firmware_load_addr); g_free(firmware_filename); diff --git a/hw/riscv/sifive_u.c b/hw/riscv/sifive_u.c index 9f8e84bf2e..9552abf4dd 100644 --- a/hw/riscv/sifive_u.c +++ b/hw/riscv/sifive_u.c @@ -65,11 +65,13 @@ static const struct MemmapEntry { [SIFIVE_U_DEBUG] = { 0x0, 0x100 }, [SIFIVE_U_MROM] = { 0x1000, 0x11000 }, [SIFIVE_U_CLINT] = { 0x2000000, 0x10000 }, + [SIFIVE_U_L2LIM] = { 0x8000000, 0x2000000 }, [SIFIVE_U_PLIC] = { 0xc000000, 0x4000000 }, [SIFIVE_U_PRCI] = { 0x10000000, 0x1000 }, [SIFIVE_U_UART0] = { 0x10010000, 0x1000 }, [SIFIVE_U_UART1] = { 0x10011000, 0x1000 }, [SIFIVE_U_OTP] = { 0x10070000, 0x1000 }, + [SIFIVE_U_FLASH0] = { 0x20000000, 0x10000000 }, [SIFIVE_U_DRAM] = { 0x80000000, 0x0 }, [SIFIVE_U_GEM] = { 0x10090000, 0x2000 }, [SIFIVE_U_GEM_MGMT] = { 0x100a0000, 0x1000 }, @@ -151,8 +153,6 @@ static void create_fdt(SiFiveUState *s, const struct MemmapEntry *memmap, char *intc = g_strdup_printf("/cpus/cpu@%d/interrupt-controller", cpu); char *isa; qemu_fdt_add_subnode(fdt, nodename); - qemu_fdt_setprop_cell(fdt, nodename, "clock-frequency", - SIFIVE_U_CLOCK_FREQ); /* cpu 0 is the management hart that does not have mmu */ if (cpu != 0) { qemu_fdt_setprop_string(fdt, nodename, "mmu-type", "riscv,sv48"); @@ -272,6 +272,10 @@ static void create_fdt(SiFiveUState *s, const struct MemmapEntry *memmap, s->soc.gem.conf.macaddr.a, ETH_ALEN); qemu_fdt_setprop_cell(fdt, nodename, "#address-cells", 1); qemu_fdt_setprop_cell(fdt, nodename, "#size-cells", 0); + + qemu_fdt_add_subnode(fdt, "/aliases"); + qemu_fdt_setprop_string(fdt, "/aliases", "ethernet0", nodename); + g_free(nodename); nodename = g_strdup_printf("/soc/ethernet@%lx/ethernet-phy@0", @@ -299,7 +303,6 @@ static void create_fdt(SiFiveUState *s, const struct MemmapEntry *memmap, qemu_fdt_setprop_string(fdt, "/chosen", "bootargs", cmdline); } - qemu_fdt_add_subnode(fdt, "/aliases"); qemu_fdt_setprop_string(fdt, "/aliases", "serial0", nodename); g_free(nodename); @@ -308,10 +311,11 @@ static void create_fdt(SiFiveUState *s, const struct MemmapEntry *memmap, static void riscv_sifive_u_init(MachineState *machine) { const struct MemmapEntry *memmap = sifive_u_memmap; - - SiFiveUState *s = g_new0(SiFiveUState, 1); + SiFiveUState *s = RISCV_U_MACHINE(machine); MemoryRegion *system_memory = get_system_memory(); MemoryRegion *main_mem = g_new(MemoryRegion, 1); + MemoryRegion *flash0 = g_new(MemoryRegion, 1); + target_ulong start_addr = memmap[SIFIVE_U_DRAM].base; int i; /* Initialize SoC */ @@ -327,6 +331,12 @@ static void riscv_sifive_u_init(MachineState *machine) memory_region_add_subregion(system_memory, memmap[SIFIVE_U_DRAM].base, main_mem); + /* register QSPI0 Flash */ + memory_region_init_ram(flash0, NULL, "riscv.sifive.u.flash0", + memmap[SIFIVE_U_FLASH0].size, &error_fatal); + memory_region_add_subregion(system_memory, memmap[SIFIVE_U_FLASH0].base, + flash0); + /* create device tree */ create_fdt(s, memmap, machine->ram_size, machine->kernel_cmdline); @@ -348,6 +358,10 @@ static void riscv_sifive_u_init(MachineState *machine) } } + if (s->start_in_flash) { + start_addr = memmap[SIFIVE_U_FLASH0].base; + } + /* reset vector */ uint32_t reset_vec[8] = { 0x00000297, /* 1: auipc t0, %pcrel_hi(dtb) */ @@ -360,7 +374,7 @@ static void riscv_sifive_u_init(MachineState *machine) #endif 0x00028067, /* jr t0 */ 0x00000000, - memmap[SIFIVE_U_DRAM].base, /* start: .dword DRAM_BASE */ + start_addr, /* start: .dword */ 0x00000000, /* dtb: */ }; @@ -424,6 +438,33 @@ static void riscv_sifive_u_soc_init(Object *obj) TYPE_CADENCE_GEM); } +static bool sifive_u_get_start_in_flash(Object *obj, Error **errp) +{ + SiFiveUState *s = RISCV_U_MACHINE(obj); + + return s->start_in_flash; +} + +static void sifive_u_set_start_in_flash(Object *obj, bool value, Error **errp) +{ + SiFiveUState *s = RISCV_U_MACHINE(obj); + + s->start_in_flash = value; +} + +static void riscv_sifive_u_machine_instance_init(Object *obj) +{ + SiFiveUState *s = RISCV_U_MACHINE(obj); + + s->start_in_flash = false; + object_property_add_bool(obj, "start-in-flash", sifive_u_get_start_in_flash, + sifive_u_set_start_in_flash, NULL); + object_property_set_description(obj, "start-in-flash", + "Set on to tell QEMU's ROM to jump to " \ + "flash. Otherwise QEMU will jump to DRAM", + NULL); +} + static void riscv_sifive_u_soc_realize(DeviceState *dev, Error **errp) { MachineState *ms = MACHINE(qdev_get_machine()); @@ -431,6 +472,7 @@ static void riscv_sifive_u_soc_realize(DeviceState *dev, Error **errp) const struct MemmapEntry *memmap = sifive_u_memmap; MemoryRegion *system_memory = get_system_memory(); MemoryRegion *mask_rom = g_new(MemoryRegion, 1); + MemoryRegion *l2lim_mem = g_new(MemoryRegion, 1); qemu_irq plic_gpios[SIFIVE_U_PLIC_NUM_SOURCES]; char *plic_hart_config; size_t plic_hart_config_len; @@ -459,6 +501,20 @@ static void riscv_sifive_u_soc_realize(DeviceState *dev, Error **errp) memory_region_add_subregion(system_memory, memmap[SIFIVE_U_MROM].base, mask_rom); + /* + * Add L2-LIM at reset size. + * This should be reduced in size as the L2 Cache Controller WayEnable + * register is incremented. Unfortunately I don't see a nice (or any) way + * to handle reducing or blocking out the L2 LIM while still allowing it + * be re returned to all enabled after a reset. For the time being, just + * leave it enabled all the time. This won't break anything, but will be + * too generous to misbehaving guests. + */ + memory_region_init_ram(l2lim_mem, NULL, "riscv.sifive.u.l2lim", + memmap[SIFIVE_U_L2LIM].size, &error_fatal); + memory_region_add_subregion(system_memory, memmap[SIFIVE_U_L2LIM].base, + l2lim_mem); + /* create PLIC hart topology configuration string */ plic_hart_config_len = (strlen(SIFIVE_U_PLIC_HART_CONFIG) + 1) * ms->smp.cpus; @@ -522,17 +578,6 @@ static void riscv_sifive_u_soc_realize(DeviceState *dev, Error **errp) memmap[SIFIVE_U_GEM_MGMT].base, memmap[SIFIVE_U_GEM_MGMT].size); } -static void riscv_sifive_u_machine_init(MachineClass *mc) -{ - mc->desc = "RISC-V Board compatible with SiFive U SDK"; - mc->init = riscv_sifive_u_init; - mc->max_cpus = SIFIVE_U_MANAGEMENT_CPU_COUNT + SIFIVE_U_COMPUTE_CPU_COUNT; - mc->min_cpus = SIFIVE_U_MANAGEMENT_CPU_COUNT + 1; - mc->default_cpus = mc->min_cpus; -} - -DEFINE_MACHINE("sifive_u", riscv_sifive_u_machine_init) - static void riscv_sifive_u_soc_class_init(ObjectClass *oc, void *data) { DeviceClass *dc = DEVICE_CLASS(oc); @@ -556,3 +601,29 @@ static void riscv_sifive_u_soc_register_types(void) } type_init(riscv_sifive_u_soc_register_types) + +static void riscv_sifive_u_machine_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + + mc->desc = "RISC-V Board compatible with SiFive U SDK"; + mc->init = riscv_sifive_u_init; + mc->max_cpus = SIFIVE_U_MANAGEMENT_CPU_COUNT + SIFIVE_U_COMPUTE_CPU_COUNT; + mc->min_cpus = SIFIVE_U_MANAGEMENT_CPU_COUNT + 1; + mc->default_cpus = mc->min_cpus; +} + +static const TypeInfo riscv_sifive_u_machine_typeinfo = { + .name = MACHINE_TYPE_NAME("sifive_u"), + .parent = TYPE_MACHINE, + .class_init = riscv_sifive_u_machine_class_init, + .instance_init = riscv_sifive_u_machine_instance_init, + .instance_size = sizeof(SiFiveUState), +}; + +static void riscv_sifive_u_machine_init_register_types(void) +{ + type_register_static(&riscv_sifive_u_machine_typeinfo); +} + +type_init(riscv_sifive_u_machine_init_register_types) diff --git a/hw/riscv/spike.c b/hw/riscv/spike.c index d60415d190..8bbffbcd0f 100644 --- a/hw/riscv/spike.c +++ b/hw/riscv/spike.c @@ -102,8 +102,6 @@ static void create_fdt(SpikeState *s, const struct MemmapEntry *memmap, char *intc = g_strdup_printf("/cpus/cpu@%d/interrupt-controller", cpu); char *isa = riscv_isa_string(&s->soc.harts[cpu]); qemu_fdt_add_subnode(fdt, nodename); - qemu_fdt_setprop_cell(fdt, nodename, "clock-frequency", - SPIKE_CLOCK_FREQ); qemu_fdt_setprop_string(fdt, nodename, "mmu-type", "riscv,sv48"); qemu_fdt_setprop_string(fdt, nodename, "riscv,isa", isa); qemu_fdt_setprop_string(fdt, nodename, "compatible", "riscv"); diff --git a/hw/riscv/virt.c b/hw/riscv/virt.c index d36f5625ec..cc8f311e6b 100644 --- a/hw/riscv/virt.c +++ b/hw/riscv/virt.c @@ -26,6 +26,7 @@ #include "hw/boards.h" #include "hw/loader.h" #include "hw/sysbus.h" +#include "hw/qdev-properties.h" #include "hw/char/serial.h" #include "target/riscv/cpu.h" #include "hw/riscv/riscv_hart.h" @@ -61,12 +62,77 @@ static const struct MemmapEntry { [VIRT_PLIC] = { 0xc000000, 0x4000000 }, [VIRT_UART0] = { 0x10000000, 0x100 }, [VIRT_VIRTIO] = { 0x10001000, 0x1000 }, + [VIRT_FLASH] = { 0x20000000, 0x2000000 }, [VIRT_DRAM] = { 0x80000000, 0x0 }, [VIRT_PCIE_MMIO] = { 0x40000000, 0x40000000 }, [VIRT_PCIE_PIO] = { 0x03000000, 0x00010000 }, [VIRT_PCIE_ECAM] = { 0x30000000, 0x10000000 }, }; +#define VIRT_FLASH_SECTOR_SIZE (256 * KiB) + +static PFlashCFI01 *virt_flash_create1(RISCVVirtState *s, + const char *name, + const char *alias_prop_name) +{ + /* + * Create a single flash device. We use the same parameters as + * the flash devices on the ARM virt board. + */ + DeviceState *dev = qdev_create(NULL, TYPE_PFLASH_CFI01); + + qdev_prop_set_uint64(dev, "sector-length", VIRT_FLASH_SECTOR_SIZE); + qdev_prop_set_uint8(dev, "width", 4); + qdev_prop_set_uint8(dev, "device-width", 2); + qdev_prop_set_bit(dev, "big-endian", false); + qdev_prop_set_uint16(dev, "id0", 0x89); + qdev_prop_set_uint16(dev, "id1", 0x18); + qdev_prop_set_uint16(dev, "id2", 0x00); + qdev_prop_set_uint16(dev, "id3", 0x00); + qdev_prop_set_string(dev, "name", name); + + object_property_add_child(OBJECT(s), name, OBJECT(dev), + &error_abort); + object_property_add_alias(OBJECT(s), alias_prop_name, + OBJECT(dev), "drive", &error_abort); + + return PFLASH_CFI01(dev); +} + +static void virt_flash_create(RISCVVirtState *s) +{ + s->flash[0] = virt_flash_create1(s, "virt.flash0", "pflash0"); + s->flash[1] = virt_flash_create1(s, "virt.flash1", "pflash1"); +} + +static void virt_flash_map1(PFlashCFI01 *flash, + hwaddr base, hwaddr size, + MemoryRegion *sysmem) +{ + DeviceState *dev = DEVICE(flash); + + assert(size % VIRT_FLASH_SECTOR_SIZE == 0); + assert(size / VIRT_FLASH_SECTOR_SIZE <= UINT32_MAX); + qdev_prop_set_uint32(dev, "num-blocks", size / VIRT_FLASH_SECTOR_SIZE); + qdev_init_nofail(dev); + + memory_region_add_subregion(sysmem, base, + sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), + 0)); +} + +static void virt_flash_map(RISCVVirtState *s, + MemoryRegion *sysmem) +{ + hwaddr flashsize = virt_memmap[VIRT_FLASH].size / 2; + hwaddr flashbase = virt_memmap[VIRT_FLASH].base; + + virt_flash_map1(s->flash[0], flashbase, flashsize, + sysmem); + virt_flash_map1(s->flash[1], flashbase + flashsize, flashsize, + sysmem); +} + static void create_pcie_irq_map(void *fdt, char *nodename, uint32_t plic_phandle) { @@ -121,6 +187,8 @@ static void create_fdt(RISCVVirtState *s, const struct MemmapEntry *memmap, char *nodename; uint32_t plic_phandle, phandle = 1; int i; + hwaddr flashsize = virt_memmap[VIRT_FLASH].size / 2; + hwaddr flashbase = virt_memmap[VIRT_FLASH].base; fdt = s->fdt = create_device_tree(&s->fdt_size); if (!fdt) { @@ -161,8 +229,6 @@ static void create_fdt(RISCVVirtState *s, const struct MemmapEntry *memmap, char *intc = g_strdup_printf("/cpus/cpu@%d/interrupt-controller", cpu); char *isa = riscv_isa_string(&s->soc.harts[cpu]); qemu_fdt_add_subnode(fdt, nodename); - qemu_fdt_setprop_cell(fdt, nodename, "clock-frequency", - VIRT_CLOCK_FREQ); qemu_fdt_setprop_string(fdt, nodename, "mmu-type", "riscv,sv48"); qemu_fdt_setprop_string(fdt, nodename, "riscv,isa", isa); qemu_fdt_setprop_string(fdt, nodename, "compatible", "riscv"); @@ -316,6 +382,15 @@ static void create_fdt(RISCVVirtState *s, const struct MemmapEntry *memmap, qemu_fdt_setprop_string(fdt, "/chosen", "bootargs", cmdline); } g_free(nodename); + + nodename = g_strdup_printf("/flash@%" PRIx64, flashbase); + qemu_fdt_add_subnode(s->fdt, nodename); + qemu_fdt_setprop_string(s->fdt, nodename, "compatible", "cfi-flash"); + qemu_fdt_setprop_sized_cells(s->fdt, nodename, "reg", + 2, flashbase, 2, flashsize, + 2, flashbase + flashsize, 2, flashsize); + qemu_fdt_setprop_cell(s->fdt, nodename, "bank-width", 4); + g_free(nodename); } @@ -362,13 +437,13 @@ static inline DeviceState *gpex_pcie_init(MemoryRegion *sys_mem, static void riscv_virt_board_init(MachineState *machine) { const struct MemmapEntry *memmap = virt_memmap; - - RISCVVirtState *s = g_new0(RISCVVirtState, 1); + RISCVVirtState *s = RISCV_VIRT_MACHINE(machine); MemoryRegion *system_memory = get_system_memory(); MemoryRegion *main_mem = g_new(MemoryRegion, 1); MemoryRegion *mask_rom = g_new(MemoryRegion, 1); char *plic_hart_config; size_t plic_hart_config_len; + target_ulong start_addr = memmap[VIRT_DRAM].base; int i; unsigned int smp_cpus = machine->smp.cpus; @@ -415,6 +490,14 @@ static void riscv_virt_board_init(MachineState *machine) } } + if (drive_get(IF_PFLASH, 0, 0)) { + /* + * Pflash was supplied, let's overwrite the address we jump to after + * reset to the base of the flash. + */ + start_addr = virt_memmap[VIRT_FLASH].base; + } + /* reset vector */ uint32_t reset_vec[8] = { 0x00000297, /* 1: auipc t0, %pcrel_hi(dtb) */ @@ -427,7 +510,7 @@ static void riscv_virt_board_init(MachineState *machine) #endif 0x00028067, /* jr t0 */ 0x00000000, - memmap[VIRT_DRAM].base, /* start: .dword memmap[VIRT_DRAM].base */ + start_addr, /* start: .dword */ 0x00000000, /* dtb: */ }; @@ -496,15 +579,43 @@ static void riscv_virt_board_init(MachineState *machine) 0, qdev_get_gpio_in(DEVICE(s->plic), UART0_IRQ), 399193, serial_hd(0), DEVICE_LITTLE_ENDIAN); + virt_flash_create(s); + + for (i = 0; i < ARRAY_SIZE(s->flash); i++) { + /* Map legacy -drive if=pflash to machine properties */ + pflash_cfi01_legacy_drive(s->flash[i], + drive_get(IF_PFLASH, 0, i)); + } + virt_flash_map(s, system_memory); + g_free(plic_hart_config); } -static void riscv_virt_board_machine_init(MachineClass *mc) +static void riscv_virt_machine_instance_init(Object *obj) { - mc->desc = "RISC-V VirtIO Board (Privileged ISA v1.10)"; +} + +static void riscv_virt_machine_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + + mc->desc = "RISC-V VirtIO board"; mc->init = riscv_virt_board_init; - mc->max_cpus = 8; /* hardcoded limit in BBL */ + mc->max_cpus = 8; mc->default_cpu_type = VIRT_CPU; } -DEFINE_MACHINE("virt", riscv_virt_board_machine_init) +static const TypeInfo riscv_virt_machine_typeinfo = { + .name = MACHINE_TYPE_NAME("virt"), + .parent = TYPE_MACHINE, + .class_init = riscv_virt_machine_class_init, + .instance_init = riscv_virt_machine_instance_init, + .instance_size = sizeof(RISCVVirtState), +}; + +static void riscv_virt_machine_init_register_types(void) +{ + type_register_static(&riscv_virt_machine_typeinfo); +} + +type_init(riscv_virt_machine_init_register_types) diff --git a/hw/rtc/mc146818rtc.c b/hw/rtc/mc146818rtc.c index 9d4ed54f65..ee6bf82b40 100644 --- a/hw/rtc/mc146818rtc.c +++ b/hw/rtc/mc146818rtc.c @@ -38,12 +38,13 @@ #include "hw/rtc/mc146818rtc_regs.h" #include "migration/vmstate.h" #include "qapi/error.h" -#include "qapi/qapi-commands-misc-target.h" #include "qapi/qapi-events-misc-target.h" #include "qapi/visitor.h" #include "exec/address-spaces.h" +#include "hw/rtc/mc146818rtc_regs.h" #ifdef TARGET_I386 +#include "qapi/qapi-commands-misc-target.h" #include "hw/i386/apic.h" #endif @@ -72,36 +73,6 @@ #define RTC_CLOCK_RATE 32768 #define UIP_HOLD_LENGTH (8 * NANOSECONDS_PER_SECOND / 32768) -#define MC146818_RTC(obj) OBJECT_CHECK(RTCState, (obj), TYPE_MC146818_RTC) - -typedef struct RTCState { - ISADevice parent_obj; - - MemoryRegion io; - MemoryRegion coalesced_io; - uint8_t cmos_data[128]; - uint8_t cmos_index; - int32_t base_year; - uint64_t base_rtc; - uint64_t last_update; - int64_t offset; - qemu_irq irq; - int it_shift; - /* periodic timer */ - QEMUTimer *periodic_timer; - int64_t next_periodic_time; - /* update-ended timer */ - QEMUTimer *update_timer; - uint64_t next_alarm_time; - uint16_t irq_reinject_on_ack_count; - uint32_t irq_coalesced; - uint32_t period; - QEMUTimer *coalesced_timer; - LostTickPolicy lost_tick_policy; - Notifier suspend_notifier; - QLIST_ENTRY(RTCState) link; -} RTCState; - static void rtc_set_time(RTCState *s); static void rtc_update_time(RTCState *s); static void rtc_set_cmos(RTCState *s, const struct tm *tm); @@ -204,24 +175,28 @@ periodic_timer_update(RTCState *s, int64_t current_time, uint32_t old_period) period = rtc_periodic_clock_ticks(s); - if (period) { - /* compute 32 khz clock */ - cur_clock = - muldiv64(current_time, RTC_CLOCK_RATE, NANOSECONDS_PER_SECOND); + if (!period) { + s->irq_coalesced = 0; + timer_del(s->periodic_timer); + return; + } - /* - * if the periodic timer's update is due to period re-configuration, - * we should count the clock since last interrupt. - */ - if (old_period) { - int64_t last_periodic_clock, next_periodic_clock; - - next_periodic_clock = muldiv64(s->next_periodic_time, - RTC_CLOCK_RATE, NANOSECONDS_PER_SECOND); - last_periodic_clock = next_periodic_clock - old_period; - lost_clock = cur_clock - last_periodic_clock; - assert(lost_clock >= 0); - } + /* compute 32 khz clock */ + cur_clock = + muldiv64(current_time, RTC_CLOCK_RATE, NANOSECONDS_PER_SECOND); + + /* + * if the periodic timer's update is due to period re-configuration, + * we should count the clock since last interrupt. + */ + if (old_period) { + int64_t last_periodic_clock, next_periodic_clock; + + next_periodic_clock = muldiv64(s->next_periodic_time, + RTC_CLOCK_RATE, NANOSECONDS_PER_SECOND); + last_periodic_clock = next_periodic_clock - old_period; + lost_clock = cur_clock - last_periodic_clock; + assert(lost_clock >= 0); /* * s->irq_coalesced can change for two reasons: @@ -252,22 +227,19 @@ periodic_timer_update(RTCState *s, int64_t current_time, uint32_t old_period) rtc_coalesced_timer_update(s); } } else { - /* + /* * no way to compensate the interrupt if LOST_TICK_POLICY_SLEW * is not used, we should make the time progress anyway. */ lost_clock = MIN(lost_clock, period); } + } - assert(lost_clock >= 0 && lost_clock <= period); + assert(lost_clock >= 0 && lost_clock <= period); - next_irq_clock = cur_clock + period - lost_clock; - s->next_periodic_time = periodic_clock_to_ns(next_irq_clock) + 1; - timer_mod(s->periodic_timer, s->next_periodic_time); - } else { - s->irq_coalesced = 0; - timer_del(s->periodic_timer); - } + next_irq_clock = cur_clock + period - lost_clock; + s->next_periodic_time = periodic_clock_to_ns(next_irq_clock) + 1; + timer_mod(s->periodic_timer, s->next_periodic_time); } static void rtc_periodic_timer(void *opaque) @@ -993,17 +965,16 @@ static void rtc_realizefn(DeviceState *dev, Error **errp) object_property_add_tm(OBJECT(s), "date", rtc_get_date, NULL); qdev_init_gpio_out(dev, &s->irq, 1); + QLIST_INSERT_HEAD(&rtc_devices, s, link); } ISADevice *mc146818_rtc_init(ISABus *bus, int base_year, qemu_irq intercept_irq) { DeviceState *dev; ISADevice *isadev; - RTCState *s; isadev = isa_create(bus, TYPE_MC146818_RTC); dev = DEVICE(isadev); - s = MC146818_RTC(isadev); qdev_prop_set_int32(dev, "base_year", base_year); qdev_init_nofail(dev); if (intercept_irq) { @@ -1011,9 +982,8 @@ ISADevice *mc146818_rtc_init(ISABus *bus, int base_year, qemu_irq intercept_irq) } else { isa_connect_gpio_out(isadev, 0, RTC_ISA_IRQ); } - QLIST_INSERT_HEAD(&rtc_devices, s, link); - object_property_add_alias(qdev_get_machine(), "rtc-time", OBJECT(s), + object_property_add_alias(qdev_get_machine(), "rtc-time", OBJECT(isadev), "date", NULL); return isadev; @@ -1045,8 +1015,6 @@ static void rtc_class_initfn(ObjectClass *klass, void *data) dc->reset = rtc_resetdev; dc->vmsd = &vmstate_rtc; dc->props = mc146818rtc_properties; - /* Reason: needs to be wired up by rtc_init() */ - dc->user_creatable = false; } static const TypeInfo mc146818rtc_info = { diff --git a/hw/scsi/virtio-scsi.c b/hw/scsi/virtio-scsi.c index ee52aa7d17..e8b2b64d09 100644 --- a/hw/scsi/virtio-scsi.c +++ b/hw/scsi/virtio-scsi.c @@ -190,11 +190,12 @@ static void virtio_scsi_save_request(QEMUFile *f, SCSIRequest *sreq) { VirtIOSCSIReq *req = sreq->hba_private; VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(req->dev); + VirtIODevice *vdev = VIRTIO_DEVICE(req->dev); uint32_t n = virtio_get_queue_index(req->vq) - 2; assert(n < vs->conf.num_queues); qemu_put_be32s(f, &n); - qemu_put_virtqueue_element(f, &req->elem); + qemu_put_virtqueue_element(vdev, f, &req->elem); } static void *virtio_scsi_load_request(QEMUFile *f, SCSIRequest *sreq) diff --git a/hw/vfio/spapr.c b/hw/vfio/spapr.c index e853eebe11..33692fc86f 100644 --- a/hw/vfio/spapr.c +++ b/hw/vfio/spapr.c @@ -196,14 +196,15 @@ int vfio_spapr_create_window(VFIOContainer *container, * bits_per_level is a safe guess of how much we can allocate per level: * 8 is the current minimum for CONFIG_FORCE_MAX_ZONEORDER and MAX_ORDER * is usually bigger than that. - * Below we look at getpagesize() as TCEs are allocated from system pages. + * Below we look at qemu_real_host_page_size as TCEs are allocated from + * system pages. */ - bits_per_level = ctz64(getpagesize()) + 8; + bits_per_level = ctz64(qemu_real_host_page_size) + 8; create.levels = bits_total / bits_per_level; if (bits_total % bits_per_level) { ++create.levels; } - max_levels = (64 - create.page_shift) / ctz64(getpagesize()); + max_levels = (64 - create.page_shift) / ctz64(qemu_real_host_page_size); for ( ; create.levels <= max_levels; ++create.levels) { ret = ioctl(container->fd, VFIO_IOMMU_SPAPR_TCE_CREATE, &create); if (!ret) { diff --git a/hw/virtio/virtio-mmio.c b/hw/virtio/virtio-mmio.c index 3d5ca0f667..94d934c44b 100644 --- a/hw/virtio/virtio-mmio.c +++ b/hw/virtio/virtio-mmio.c @@ -29,57 +29,11 @@ #include "qemu/host-utils.h" #include "qemu/module.h" #include "sysemu/kvm.h" -#include "hw/virtio/virtio-bus.h" +#include "hw/virtio/virtio-mmio.h" #include "qemu/error-report.h" #include "qemu/log.h" #include "trace.h" -/* QOM macros */ -/* virtio-mmio-bus */ -#define TYPE_VIRTIO_MMIO_BUS "virtio-mmio-bus" -#define VIRTIO_MMIO_BUS(obj) \ - OBJECT_CHECK(VirtioBusState, (obj), TYPE_VIRTIO_MMIO_BUS) -#define VIRTIO_MMIO_BUS_GET_CLASS(obj) \ - OBJECT_GET_CLASS(VirtioBusClass, (obj), TYPE_VIRTIO_MMIO_BUS) -#define VIRTIO_MMIO_BUS_CLASS(klass) \ - OBJECT_CLASS_CHECK(VirtioBusClass, (klass), TYPE_VIRTIO_MMIO_BUS) - -/* virtio-mmio */ -#define TYPE_VIRTIO_MMIO "virtio-mmio" -#define VIRTIO_MMIO(obj) \ - OBJECT_CHECK(VirtIOMMIOProxy, (obj), TYPE_VIRTIO_MMIO) - -#define VIRT_MAGIC 0x74726976 /* 'virt' */ -#define VIRT_VERSION 2 -#define VIRT_VERSION_LEGACY 1 -#define VIRT_VENDOR 0x554D4551 /* 'QEMU' */ - -typedef struct VirtIOMMIOQueue { - uint16_t num; - bool enabled; - uint32_t desc[2]; - uint32_t avail[2]; - uint32_t used[2]; -} VirtIOMMIOQueue; - -typedef struct { - /* Generic */ - SysBusDevice parent_obj; - MemoryRegion iomem; - qemu_irq irq; - bool legacy; - /* Guest accessible state needing migration and reset */ - uint32_t host_features_sel; - uint32_t guest_features_sel; - uint32_t guest_page_shift; - /* virtio-bus */ - VirtioBusState bus; - bool format_transport_address; - /* Fields only used for non-legacy (v2) devices */ - uint32_t guest_features[2]; - VirtIOMMIOQueue vqs[VIRTIO_QUEUE_MAX]; -} VirtIOMMIOProxy; - static bool virtio_mmio_ioeventfd_enabled(DeviceState *d) { return kvm_eventfds_enabled(); diff --git a/hw/virtio/virtio-rng.c b/hw/virtio/virtio-rng.c index e93bed020f..b498a20332 100644 --- a/hw/virtio/virtio-rng.c +++ b/hw/virtio/virtio-rng.c @@ -238,6 +238,7 @@ static void virtio_rng_device_unrealize(DeviceState *dev, Error **errp) qemu_del_vm_change_state_handler(vrng->vmstate); timer_del(vrng->rate_limit_timer); timer_free(vrng->rate_limit_timer); + virtio_del_queue(vdev, 0); virtio_cleanup(vdev); } diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c index 527df03bfd..2e91dec567 100644 --- a/hw/virtio/virtio.c +++ b/hw/virtio/virtio.c @@ -43,6 +43,13 @@ typedef struct VRingDesc uint16_t next; } VRingDesc; +typedef struct VRingPackedDesc { + uint64_t addr; + uint32_t len; + uint16_t id; + uint16_t flags; +} VRingPackedDesc; + typedef struct VRingAvail { uint16_t flags; @@ -81,17 +88,26 @@ typedef struct VRing VRingMemoryRegionCaches *caches; } VRing; +typedef struct VRingPackedDescEvent { + uint16_t off_wrap; + uint16_t flags; +} VRingPackedDescEvent ; + struct VirtQueue { VRing vring; + VirtQueueElement *used_elems; /* Next head to pop */ uint16_t last_avail_idx; + bool last_avail_wrap_counter; /* Last avail_idx read from VQ. */ uint16_t shadow_avail_idx; + bool shadow_avail_wrap_counter; uint16_t used_idx; + bool used_wrap_counter; /* Last used index value we have signalled on */ uint16_t signalled_used; @@ -144,10 +160,9 @@ static void virtio_init_region_cache(VirtIODevice *vdev, int n) VRingMemoryRegionCaches *old = vq->vring.caches; VRingMemoryRegionCaches *new = NULL; hwaddr addr, size; - int event_size; int64_t len; + bool packed; - event_size = virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0; addr = vq->vring.desc; if (!addr) { @@ -155,14 +170,16 @@ static void virtio_init_region_cache(VirtIODevice *vdev, int n) } new = g_new0(VRingMemoryRegionCaches, 1); size = virtio_queue_get_desc_size(vdev, n); + packed = virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED) ? + true : false; len = address_space_cache_init(&new->desc, vdev->dma_as, - addr, size, false); + addr, size, packed); if (len < size) { virtio_error(vdev, "Cannot map desc"); goto err_desc; } - size = virtio_queue_get_used_size(vdev, n) + event_size; + size = virtio_queue_get_used_size(vdev, n); len = address_space_cache_init(&new->used, vdev->dma_as, vq->vring.used, size, true); if (len < size) { @@ -170,7 +187,7 @@ static void virtio_init_region_cache(VirtIODevice *vdev, int n) goto err_used; } - size = virtio_queue_get_avail_size(vdev, n) + event_size; + size = virtio_queue_get_avail_size(vdev, n); len = address_space_cache_init(&new->avail, vdev->dma_as, vq->vring.avail, size, false); if (len < size) { @@ -212,8 +229,8 @@ void virtio_queue_update_rings(VirtIODevice *vdev, int n) } /* Called within rcu_read_lock(). */ -static void vring_desc_read(VirtIODevice *vdev, VRingDesc *desc, - MemoryRegionCache *cache, int i) +static void vring_split_desc_read(VirtIODevice *vdev, VRingDesc *desc, + MemoryRegionCache *cache, int i) { address_space_read_cached(cache, i * sizeof(VRingDesc), desc, sizeof(VRingDesc)); @@ -223,6 +240,45 @@ static void vring_desc_read(VirtIODevice *vdev, VRingDesc *desc, virtio_tswap16s(vdev, &desc->next); } +static void vring_packed_event_read(VirtIODevice *vdev, + MemoryRegionCache *cache, + VRingPackedDescEvent *e) +{ + hwaddr off_off = offsetof(VRingPackedDescEvent, off_wrap); + hwaddr off_flags = offsetof(VRingPackedDescEvent, flags); + + address_space_read_cached(cache, off_flags, &e->flags, + sizeof(e->flags)); + /* Make sure flags is seen before off_wrap */ + smp_rmb(); + address_space_read_cached(cache, off_off, &e->off_wrap, + sizeof(e->off_wrap)); + virtio_tswap16s(vdev, &e->off_wrap); + virtio_tswap16s(vdev, &e->flags); +} + +static void vring_packed_off_wrap_write(VirtIODevice *vdev, + MemoryRegionCache *cache, + uint16_t off_wrap) +{ + hwaddr off = offsetof(VRingPackedDescEvent, off_wrap); + + virtio_tswap16s(vdev, &off_wrap); + address_space_write_cached(cache, off, &off_wrap, sizeof(off_wrap)); + address_space_cache_invalidate(cache, off, sizeof(off_wrap)); +} + +static void vring_packed_flags_write(VirtIODevice *vdev, + MemoryRegionCache *cache, uint16_t flags) +{ + hwaddr off = offsetof(VRingPackedDescEvent, flags); + + virtio_tswap16s(vdev, &flags); + address_space_write_cached(cache, off, &flags, sizeof(flags)); + address_space_cache_invalidate(cache, off, sizeof(flags)); +} + +/* Called within rcu_read_lock(). */ static VRingMemoryRegionCaches *vring_get_region_caches(struct VirtQueue *vq) { VRingMemoryRegionCaches *caches = atomic_rcu_read(&vq->vring.caches); @@ -329,14 +385,8 @@ static inline void vring_set_avail_event(VirtQueue *vq, uint16_t val) address_space_cache_invalidate(&caches->used, pa, sizeof(val)); } -void virtio_queue_set_notification(VirtQueue *vq, int enable) +static void virtio_queue_split_set_notification(VirtQueue *vq, int enable) { - vq->notification = enable; - - if (!vq->vring.desc) { - return; - } - rcu_read_lock(); if (virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX)) { vring_set_avail_event(vq, vring_avail_idx(vq)); @@ -352,11 +402,145 @@ void virtio_queue_set_notification(VirtQueue *vq, int enable) rcu_read_unlock(); } +static void virtio_queue_packed_set_notification(VirtQueue *vq, int enable) +{ + uint16_t off_wrap; + VRingPackedDescEvent e; + VRingMemoryRegionCaches *caches; + + rcu_read_lock(); + caches = vring_get_region_caches(vq); + vring_packed_event_read(vq->vdev, &caches->used, &e); + + if (!enable) { + e.flags = VRING_PACKED_EVENT_FLAG_DISABLE; + } else if (virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX)) { + off_wrap = vq->shadow_avail_idx | vq->shadow_avail_wrap_counter << 15; + vring_packed_off_wrap_write(vq->vdev, &caches->used, off_wrap); + /* Make sure off_wrap is wrote before flags */ + smp_wmb(); + e.flags = VRING_PACKED_EVENT_FLAG_DESC; + } else { + e.flags = VRING_PACKED_EVENT_FLAG_ENABLE; + } + + vring_packed_flags_write(vq->vdev, &caches->used, e.flags); + if (enable) { + /* Expose avail event/used flags before caller checks the avail idx. */ + smp_mb(); + } + rcu_read_unlock(); +} + +void virtio_queue_set_notification(VirtQueue *vq, int enable) +{ + vq->notification = enable; + + if (!vq->vring.desc) { + return; + } + + if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) { + virtio_queue_packed_set_notification(vq, enable); + } else { + virtio_queue_split_set_notification(vq, enable); + } +} + int virtio_queue_ready(VirtQueue *vq) { return vq->vring.avail != 0; } +static void vring_packed_desc_read_flags(VirtIODevice *vdev, + uint16_t *flags, + MemoryRegionCache *cache, + int i) +{ + address_space_read_cached(cache, + i * sizeof(VRingPackedDesc) + + offsetof(VRingPackedDesc, flags), + flags, sizeof(*flags)); + virtio_tswap16s(vdev, flags); +} + +static void vring_packed_desc_read(VirtIODevice *vdev, + VRingPackedDesc *desc, + MemoryRegionCache *cache, + int i, bool strict_order) +{ + hwaddr off = i * sizeof(VRingPackedDesc); + + vring_packed_desc_read_flags(vdev, &desc->flags, cache, i); + + if (strict_order) { + /* Make sure flags is read before the rest fields. */ + smp_rmb(); + } + + address_space_read_cached(cache, off + offsetof(VRingPackedDesc, addr), + &desc->addr, sizeof(desc->addr)); + address_space_read_cached(cache, off + offsetof(VRingPackedDesc, id), + &desc->id, sizeof(desc->id)); + address_space_read_cached(cache, off + offsetof(VRingPackedDesc, len), + &desc->len, sizeof(desc->len)); + virtio_tswap64s(vdev, &desc->addr); + virtio_tswap16s(vdev, &desc->id); + virtio_tswap32s(vdev, &desc->len); +} + +static void vring_packed_desc_write_data(VirtIODevice *vdev, + VRingPackedDesc *desc, + MemoryRegionCache *cache, + int i) +{ + hwaddr off_id = i * sizeof(VRingPackedDesc) + + offsetof(VRingPackedDesc, id); + hwaddr off_len = i * sizeof(VRingPackedDesc) + + offsetof(VRingPackedDesc, len); + + virtio_tswap32s(vdev, &desc->len); + virtio_tswap16s(vdev, &desc->id); + address_space_write_cached(cache, off_id, &desc->id, sizeof(desc->id)); + address_space_cache_invalidate(cache, off_id, sizeof(desc->id)); + address_space_write_cached(cache, off_len, &desc->len, sizeof(desc->len)); + address_space_cache_invalidate(cache, off_len, sizeof(desc->len)); +} + +static void vring_packed_desc_write_flags(VirtIODevice *vdev, + VRingPackedDesc *desc, + MemoryRegionCache *cache, + int i) +{ + hwaddr off = i * sizeof(VRingPackedDesc) + offsetof(VRingPackedDesc, flags); + + virtio_tswap16s(vdev, &desc->flags); + address_space_write_cached(cache, off, &desc->flags, sizeof(desc->flags)); + address_space_cache_invalidate(cache, off, sizeof(desc->flags)); +} + +static void vring_packed_desc_write(VirtIODevice *vdev, + VRingPackedDesc *desc, + MemoryRegionCache *cache, + int i, bool strict_order) +{ + vring_packed_desc_write_data(vdev, desc, cache, i); + if (strict_order) { + /* Make sure data is wrote before flags. */ + smp_wmb(); + } + vring_packed_desc_write_flags(vdev, desc, cache, i); +} + +static inline bool is_desc_avail(uint16_t flags, bool wrap_counter) +{ + bool avail, used; + + avail = !!(flags & (1 << VRING_PACKED_DESC_F_AVAIL)); + used = !!(flags & (1 << VRING_PACKED_DESC_F_USED)); + return (avail != used) && (avail == wrap_counter); +} + /* Fetch avail_idx from VQ memory only when we really need to know if * guest has added some buffers. * Called within rcu_read_lock(). */ @@ -377,7 +561,7 @@ static int virtio_queue_empty_rcu(VirtQueue *vq) return vring_avail_idx(vq) == vq->last_avail_idx; } -int virtio_queue_empty(VirtQueue *vq) +static int virtio_queue_split_empty(VirtQueue *vq) { bool empty; @@ -399,6 +583,41 @@ int virtio_queue_empty(VirtQueue *vq) return empty; } +static int virtio_queue_packed_empty_rcu(VirtQueue *vq) +{ + struct VRingPackedDesc desc; + VRingMemoryRegionCaches *cache; + + if (unlikely(!vq->vring.desc)) { + return 1; + } + + cache = vring_get_region_caches(vq); + vring_packed_desc_read_flags(vq->vdev, &desc.flags, &cache->desc, + vq->last_avail_idx); + + return !is_desc_avail(desc.flags, vq->last_avail_wrap_counter); +} + +static int virtio_queue_packed_empty(VirtQueue *vq) +{ + bool empty; + + rcu_read_lock(); + empty = virtio_queue_packed_empty_rcu(vq); + rcu_read_unlock(); + return empty; +} + +int virtio_queue_empty(VirtQueue *vq) +{ + if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) { + return virtio_queue_packed_empty(vq); + } else { + return virtio_queue_split_empty(vq); + } +} + static void virtqueue_unmap_sg(VirtQueue *vq, const VirtQueueElement *elem, unsigned int len) { @@ -436,10 +655,25 @@ static void virtqueue_unmap_sg(VirtQueue *vq, const VirtQueueElement *elem, void virtqueue_detach_element(VirtQueue *vq, const VirtQueueElement *elem, unsigned int len) { - vq->inuse--; + vq->inuse -= elem->ndescs; virtqueue_unmap_sg(vq, elem, len); } +static void virtqueue_split_rewind(VirtQueue *vq, unsigned int num) +{ + vq->last_avail_idx -= num; +} + +static void virtqueue_packed_rewind(VirtQueue *vq, unsigned int num) +{ + if (vq->last_avail_idx < num) { + vq->last_avail_idx = vq->vring.num + vq->last_avail_idx - num; + vq->last_avail_wrap_counter ^= 1; + } else { + vq->last_avail_idx -= num; + } +} + /* virtqueue_unpop: * @vq: The #VirtQueue * @elem: The #VirtQueueElement @@ -451,7 +685,13 @@ void virtqueue_detach_element(VirtQueue *vq, const VirtQueueElement *elem, void virtqueue_unpop(VirtQueue *vq, const VirtQueueElement *elem, unsigned int len) { - vq->last_avail_idx--; + + if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) { + virtqueue_packed_rewind(vq, 1); + } else { + virtqueue_split_rewind(vq, 1); + } + virtqueue_detach_element(vq, elem, len); } @@ -472,25 +712,21 @@ bool virtqueue_rewind(VirtQueue *vq, unsigned int num) if (num > vq->inuse) { return false; } - vq->last_avail_idx -= num; + vq->inuse -= num; + if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) { + virtqueue_packed_rewind(vq, num); + } else { + virtqueue_split_rewind(vq, num); + } return true; } -/* Called within rcu_read_lock(). */ -void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem, +static void virtqueue_split_fill(VirtQueue *vq, const VirtQueueElement *elem, unsigned int len, unsigned int idx) { VRingUsedElem uelem; - trace_virtqueue_fill(vq, elem, len, idx); - - virtqueue_unmap_sg(vq, elem, len); - - if (unlikely(vq->vdev->broken)) { - return; - } - if (unlikely(!vq->vring.used)) { return; } @@ -502,16 +738,72 @@ void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem, vring_used_write(vq, &uelem, idx); } +static void virtqueue_packed_fill(VirtQueue *vq, const VirtQueueElement *elem, + unsigned int len, unsigned int idx) +{ + vq->used_elems[idx].index = elem->index; + vq->used_elems[idx].len = len; + vq->used_elems[idx].ndescs = elem->ndescs; +} + +static void virtqueue_packed_fill_desc(VirtQueue *vq, + const VirtQueueElement *elem, + unsigned int idx, + bool strict_order) +{ + uint16_t head; + VRingMemoryRegionCaches *caches; + VRingPackedDesc desc = { + .id = elem->index, + .len = elem->len, + }; + bool wrap_counter = vq->used_wrap_counter; + + if (unlikely(!vq->vring.desc)) { + return; + } + + head = vq->used_idx + idx; + if (head >= vq->vring.num) { + head -= vq->vring.num; + wrap_counter ^= 1; + } + if (wrap_counter) { + desc.flags |= (1 << VRING_PACKED_DESC_F_AVAIL); + desc.flags |= (1 << VRING_PACKED_DESC_F_USED); + } else { + desc.flags &= ~(1 << VRING_PACKED_DESC_F_AVAIL); + desc.flags &= ~(1 << VRING_PACKED_DESC_F_USED); + } + + caches = vring_get_region_caches(vq); + vring_packed_desc_write(vq->vdev, &desc, &caches->desc, head, strict_order); +} + /* Called within rcu_read_lock(). */ -void virtqueue_flush(VirtQueue *vq, unsigned int count) +void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem, + unsigned int len, unsigned int idx) { - uint16_t old, new; + trace_virtqueue_fill(vq, elem, len, idx); + + virtqueue_unmap_sg(vq, elem, len); if (unlikely(vq->vdev->broken)) { - vq->inuse -= count; return; } + if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) { + virtqueue_packed_fill(vq, elem, len, idx); + } else { + virtqueue_split_fill(vq, elem, len, idx); + } +} + +/* Called within rcu_read_lock(). */ +static void virtqueue_split_flush(VirtQueue *vq, unsigned int count) +{ + uint16_t old, new; + if (unlikely(!vq->vring.used)) { return; } @@ -527,6 +819,43 @@ void virtqueue_flush(VirtQueue *vq, unsigned int count) vq->signalled_used_valid = false; } +static void virtqueue_packed_flush(VirtQueue *vq, unsigned int count) +{ + unsigned int i, ndescs = 0; + + if (unlikely(!vq->vring.desc)) { + return; + } + + for (i = 1; i < count; i++) { + virtqueue_packed_fill_desc(vq, &vq->used_elems[i], i, false); + ndescs += vq->used_elems[i].ndescs; + } + virtqueue_packed_fill_desc(vq, &vq->used_elems[0], 0, true); + ndescs += vq->used_elems[0].ndescs; + + vq->inuse -= ndescs; + vq->used_idx += ndescs; + if (vq->used_idx >= vq->vring.num) { + vq->used_idx -= vq->vring.num; + vq->used_wrap_counter ^= 1; + } +} + +void virtqueue_flush(VirtQueue *vq, unsigned int count) +{ + if (unlikely(vq->vdev->broken)) { + vq->inuse -= count; + return; + } + + if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) { + virtqueue_packed_flush(vq, count); + } else { + virtqueue_split_flush(vq, count); + } +} + void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem, unsigned int len) { @@ -579,9 +908,9 @@ enum { VIRTQUEUE_READ_DESC_MORE = 1, /* more buffers in chain */ }; -static int virtqueue_read_next_desc(VirtIODevice *vdev, VRingDesc *desc, - MemoryRegionCache *desc_cache, unsigned int max, - unsigned int *next) +static int virtqueue_split_read_next_desc(VirtIODevice *vdev, VRingDesc *desc, + MemoryRegionCache *desc_cache, + unsigned int max, unsigned int *next) { /* If this descriptor says it doesn't chain, we're done. */ if (!(desc->flags & VRING_DESC_F_NEXT)) { @@ -598,13 +927,13 @@ static int virtqueue_read_next_desc(VirtIODevice *vdev, VRingDesc *desc, return VIRTQUEUE_READ_DESC_ERROR; } - vring_desc_read(vdev, desc, desc_cache, *next); + vring_split_desc_read(vdev, desc, desc_cache, *next); return VIRTQUEUE_READ_DESC_MORE; } -void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes, - unsigned int *out_bytes, - unsigned max_in_bytes, unsigned max_out_bytes) +static void virtqueue_split_get_avail_bytes(VirtQueue *vq, + unsigned int *in_bytes, unsigned int *out_bytes, + unsigned max_in_bytes, unsigned max_out_bytes) { VirtIODevice *vdev = vq->vdev; unsigned int max, idx; @@ -614,27 +943,12 @@ void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes, int64_t len = 0; int rc; - if (unlikely(!vq->vring.desc)) { - if (in_bytes) { - *in_bytes = 0; - } - if (out_bytes) { - *out_bytes = 0; - } - return; - } - rcu_read_lock(); idx = vq->last_avail_idx; total_bufs = in_total = out_total = 0; max = vq->vring.num; caches = vring_get_region_caches(vq); - if (caches->desc.len < max * sizeof(VRingDesc)) { - virtio_error(vdev, "Cannot map descriptor ring"); - goto err; - } - while ((rc = virtqueue_num_heads(vq, idx)) > 0) { MemoryRegionCache *desc_cache = &caches->desc; unsigned int num_bufs; @@ -647,7 +961,7 @@ void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes, goto err; } - vring_desc_read(vdev, &desc, desc_cache, i); + vring_split_desc_read(vdev, &desc, desc_cache, i); if (desc.flags & VRING_DESC_F_INDIRECT) { if (!desc.len || (desc.len % sizeof(VRingDesc))) { @@ -673,7 +987,7 @@ void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes, max = desc.len / sizeof(VRingDesc); num_bufs = i = 0; - vring_desc_read(vdev, &desc, desc_cache, i); + vring_split_desc_read(vdev, &desc, desc_cache, i); } do { @@ -692,7 +1006,7 @@ void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes, goto done; } - rc = virtqueue_read_next_desc(vdev, &desc, desc_cache, max, &i); + rc = virtqueue_split_read_next_desc(vdev, &desc, desc_cache, max, &i); } while (rc == VIRTQUEUE_READ_DESC_MORE); if (rc == VIRTQUEUE_READ_DESC_ERROR) { @@ -727,6 +1041,186 @@ err: goto done; } +static int virtqueue_packed_read_next_desc(VirtQueue *vq, + VRingPackedDesc *desc, + MemoryRegionCache + *desc_cache, + unsigned int max, + unsigned int *next, + bool indirect) +{ + /* If this descriptor says it doesn't chain, we're done. */ + if (!indirect && !(desc->flags & VRING_DESC_F_NEXT)) { + return VIRTQUEUE_READ_DESC_DONE; + } + + ++*next; + if (*next == max) { + if (indirect) { + return VIRTQUEUE_READ_DESC_DONE; + } else { + (*next) -= vq->vring.num; + } + } + + vring_packed_desc_read(vq->vdev, desc, desc_cache, *next, false); + return VIRTQUEUE_READ_DESC_MORE; +} + +static void virtqueue_packed_get_avail_bytes(VirtQueue *vq, + unsigned int *in_bytes, + unsigned int *out_bytes, + unsigned max_in_bytes, + unsigned max_out_bytes) +{ + VirtIODevice *vdev = vq->vdev; + unsigned int max, idx; + unsigned int total_bufs, in_total, out_total; + MemoryRegionCache *desc_cache; + VRingMemoryRegionCaches *caches; + MemoryRegionCache indirect_desc_cache = MEMORY_REGION_CACHE_INVALID; + int64_t len = 0; + VRingPackedDesc desc; + bool wrap_counter; + + rcu_read_lock(); + idx = vq->last_avail_idx; + wrap_counter = vq->last_avail_wrap_counter; + total_bufs = in_total = out_total = 0; + + max = vq->vring.num; + caches = vring_get_region_caches(vq); + + for (;;) { + unsigned int num_bufs = total_bufs; + unsigned int i = idx; + int rc; + + desc_cache = &caches->desc; + vring_packed_desc_read(vdev, &desc, desc_cache, idx, true); + if (!is_desc_avail(desc.flags, wrap_counter)) { + break; + } + + if (desc.flags & VRING_DESC_F_INDIRECT) { + if (desc.len % sizeof(VRingPackedDesc)) { + virtio_error(vdev, "Invalid size for indirect buffer table"); + goto err; + } + + /* If we've got too many, that implies a descriptor loop. */ + if (num_bufs >= max) { + virtio_error(vdev, "Looped descriptor"); + goto err; + } + + /* loop over the indirect descriptor table */ + len = address_space_cache_init(&indirect_desc_cache, + vdev->dma_as, + desc.addr, desc.len, false); + desc_cache = &indirect_desc_cache; + if (len < desc.len) { + virtio_error(vdev, "Cannot map indirect buffer"); + goto err; + } + + max = desc.len / sizeof(VRingPackedDesc); + num_bufs = i = 0; + vring_packed_desc_read(vdev, &desc, desc_cache, i, false); + } + + do { + /* If we've got too many, that implies a descriptor loop. */ + if (++num_bufs > max) { + virtio_error(vdev, "Looped descriptor"); + goto err; + } + + if (desc.flags & VRING_DESC_F_WRITE) { + in_total += desc.len; + } else { + out_total += desc.len; + } + if (in_total >= max_in_bytes && out_total >= max_out_bytes) { + goto done; + } + + rc = virtqueue_packed_read_next_desc(vq, &desc, desc_cache, max, + &i, desc_cache == + &indirect_desc_cache); + } while (rc == VIRTQUEUE_READ_DESC_MORE); + + if (desc_cache == &indirect_desc_cache) { + address_space_cache_destroy(&indirect_desc_cache); + total_bufs++; + idx++; + } else { + idx += num_bufs - total_bufs; + total_bufs = num_bufs; + } + + if (idx >= vq->vring.num) { + idx -= vq->vring.num; + wrap_counter ^= 1; + } + } + + /* Record the index and wrap counter for a kick we want */ + vq->shadow_avail_idx = idx; + vq->shadow_avail_wrap_counter = wrap_counter; +done: + address_space_cache_destroy(&indirect_desc_cache); + if (in_bytes) { + *in_bytes = in_total; + } + if (out_bytes) { + *out_bytes = out_total; + } + rcu_read_unlock(); + return; + +err: + in_total = out_total = 0; + goto done; +} + +void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes, + unsigned int *out_bytes, + unsigned max_in_bytes, unsigned max_out_bytes) +{ + uint16_t desc_size; + VRingMemoryRegionCaches *caches; + + if (unlikely(!vq->vring.desc)) { + goto err; + } + + caches = vring_get_region_caches(vq); + desc_size = virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED) ? + sizeof(VRingPackedDesc) : sizeof(VRingDesc); + if (caches->desc.len < vq->vring.num * desc_size) { + virtio_error(vq->vdev, "Cannot map descriptor ring"); + goto err; + } + + if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) { + virtqueue_packed_get_avail_bytes(vq, in_bytes, out_bytes, + max_in_bytes, max_out_bytes); + } else { + virtqueue_split_get_avail_bytes(vq, in_bytes, out_bytes, + max_in_bytes, max_out_bytes); + } + + return; +err: + if (in_bytes) { + *in_bytes = 0; + } + if (out_bytes) { + *out_bytes = 0; + } +} + int virtqueue_avail_bytes(VirtQueue *vq, unsigned int in_bytes, unsigned int out_bytes) { @@ -851,7 +1345,7 @@ static void *virtqueue_alloc_element(size_t sz, unsigned out_num, unsigned in_nu return elem; } -void *virtqueue_pop(VirtQueue *vq, size_t sz) +static void *virtqueue_split_pop(VirtQueue *vq, size_t sz) { unsigned int i, head, max; VRingMemoryRegionCaches *caches; @@ -866,9 +1360,6 @@ void *virtqueue_pop(VirtQueue *vq, size_t sz) VRingDesc desc; int rc; - if (unlikely(vdev->broken)) { - return NULL; - } rcu_read_lock(); if (virtio_queue_empty_rcu(vq)) { goto done; @@ -904,7 +1395,7 @@ void *virtqueue_pop(VirtQueue *vq, size_t sz) } desc_cache = &caches->desc; - vring_desc_read(vdev, &desc, desc_cache, i); + vring_split_desc_read(vdev, &desc, desc_cache, i); if (desc.flags & VRING_DESC_F_INDIRECT) { if (!desc.len || (desc.len % sizeof(VRingDesc))) { virtio_error(vdev, "Invalid size for indirect buffer table"); @@ -922,7 +1413,7 @@ void *virtqueue_pop(VirtQueue *vq, size_t sz) max = desc.len / sizeof(VRingDesc); i = 0; - vring_desc_read(vdev, &desc, desc_cache, i); + vring_split_desc_read(vdev, &desc, desc_cache, i); } /* Collect all the descriptors */ @@ -953,7 +1444,7 @@ void *virtqueue_pop(VirtQueue *vq, size_t sz) goto err_undo_map; } - rc = virtqueue_read_next_desc(vdev, &desc, desc_cache, max, &i); + rc = virtqueue_split_read_next_desc(vdev, &desc, desc_cache, max, &i); } while (rc == VIRTQUEUE_READ_DESC_MORE); if (rc == VIRTQUEUE_READ_DESC_ERROR) { @@ -963,6 +1454,7 @@ void *virtqueue_pop(VirtQueue *vq, size_t sz) /* Now copy what we have collected and mapped */ elem = virtqueue_alloc_element(sz, out_num, in_num); elem->index = head; + elem->ndescs = 1; for (i = 0; i < out_num; i++) { elem->out_addr[i] = addr[i]; elem->out_sg[i] = iov[i]; @@ -986,23 +1478,204 @@ err_undo_map: goto done; } -/* virtqueue_drop_all: - * @vq: The #VirtQueue - * Drops all queued buffers and indicates them to the guest - * as if they are done. Useful when buffers can not be - * processed but must be returned to the guest. - */ -unsigned int virtqueue_drop_all(VirtQueue *vq) +static void *virtqueue_packed_pop(VirtQueue *vq, size_t sz) { + unsigned int i, max; + VRingMemoryRegionCaches *caches; + MemoryRegionCache indirect_desc_cache = MEMORY_REGION_CACHE_INVALID; + MemoryRegionCache *desc_cache; + int64_t len; + VirtIODevice *vdev = vq->vdev; + VirtQueueElement *elem = NULL; + unsigned out_num, in_num, elem_entries; + hwaddr addr[VIRTQUEUE_MAX_SIZE]; + struct iovec iov[VIRTQUEUE_MAX_SIZE]; + VRingPackedDesc desc; + uint16_t id; + int rc; + + rcu_read_lock(); + if (virtio_queue_packed_empty_rcu(vq)) { + goto done; + } + + /* When we start there are none of either input nor output. */ + out_num = in_num = elem_entries = 0; + + max = vq->vring.num; + + if (vq->inuse >= vq->vring.num) { + virtio_error(vdev, "Virtqueue size exceeded"); + goto done; + } + + i = vq->last_avail_idx; + + caches = vring_get_region_caches(vq); + if (caches->desc.len < max * sizeof(VRingDesc)) { + virtio_error(vdev, "Cannot map descriptor ring"); + goto done; + } + + desc_cache = &caches->desc; + vring_packed_desc_read(vdev, &desc, desc_cache, i, true); + id = desc.id; + if (desc.flags & VRING_DESC_F_INDIRECT) { + if (desc.len % sizeof(VRingPackedDesc)) { + virtio_error(vdev, "Invalid size for indirect buffer table"); + goto done; + } + + /* loop over the indirect descriptor table */ + len = address_space_cache_init(&indirect_desc_cache, vdev->dma_as, + desc.addr, desc.len, false); + desc_cache = &indirect_desc_cache; + if (len < desc.len) { + virtio_error(vdev, "Cannot map indirect buffer"); + goto done; + } + + max = desc.len / sizeof(VRingPackedDesc); + i = 0; + vring_packed_desc_read(vdev, &desc, desc_cache, i, false); + } + + /* Collect all the descriptors */ + do { + bool map_ok; + + if (desc.flags & VRING_DESC_F_WRITE) { + map_ok = virtqueue_map_desc(vdev, &in_num, addr + out_num, + iov + out_num, + VIRTQUEUE_MAX_SIZE - out_num, true, + desc.addr, desc.len); + } else { + if (in_num) { + virtio_error(vdev, "Incorrect order for descriptors"); + goto err_undo_map; + } + map_ok = virtqueue_map_desc(vdev, &out_num, addr, iov, + VIRTQUEUE_MAX_SIZE, false, + desc.addr, desc.len); + } + if (!map_ok) { + goto err_undo_map; + } + + /* If we've got too many, that implies a descriptor loop. */ + if (++elem_entries > max) { + virtio_error(vdev, "Looped descriptor"); + goto err_undo_map; + } + + rc = virtqueue_packed_read_next_desc(vq, &desc, desc_cache, max, &i, + desc_cache == + &indirect_desc_cache); + } while (rc == VIRTQUEUE_READ_DESC_MORE); + + /* Now copy what we have collected and mapped */ + elem = virtqueue_alloc_element(sz, out_num, in_num); + for (i = 0; i < out_num; i++) { + elem->out_addr[i] = addr[i]; + elem->out_sg[i] = iov[i]; + } + for (i = 0; i < in_num; i++) { + elem->in_addr[i] = addr[out_num + i]; + elem->in_sg[i] = iov[out_num + i]; + } + + elem->index = id; + elem->ndescs = (desc_cache == &indirect_desc_cache) ? 1 : elem_entries; + vq->last_avail_idx += elem->ndescs; + vq->inuse += elem->ndescs; + + if (vq->last_avail_idx >= vq->vring.num) { + vq->last_avail_idx -= vq->vring.num; + vq->last_avail_wrap_counter ^= 1; + } + + vq->shadow_avail_idx = vq->last_avail_idx; + vq->shadow_avail_wrap_counter = vq->last_avail_wrap_counter; + + trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num); +done: + address_space_cache_destroy(&indirect_desc_cache); + rcu_read_unlock(); + + return elem; + +err_undo_map: + virtqueue_undo_map_desc(out_num, in_num, iov); + goto done; +} + +void *virtqueue_pop(VirtQueue *vq, size_t sz) +{ + if (unlikely(vq->vdev->broken)) { + return NULL; + } + + if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) { + return virtqueue_packed_pop(vq, sz); + } else { + return virtqueue_split_pop(vq, sz); + } +} + +static unsigned int virtqueue_packed_drop_all(VirtQueue *vq) +{ + VRingMemoryRegionCaches *caches; + MemoryRegionCache *desc_cache; unsigned int dropped = 0; VirtQueueElement elem = {}; VirtIODevice *vdev = vq->vdev; - bool fEventIdx = virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); + VRingPackedDesc desc; - if (unlikely(vdev->broken)) { - return 0; + caches = vring_get_region_caches(vq); + desc_cache = &caches->desc; + + virtio_queue_set_notification(vq, 0); + + while (vq->inuse < vq->vring.num) { + unsigned int idx = vq->last_avail_idx; + /* + * works similar to virtqueue_pop but does not map buffers + * and does not allocate any memory. + */ + vring_packed_desc_read(vdev, &desc, desc_cache, + vq->last_avail_idx , true); + if (!is_desc_avail(desc.flags, vq->last_avail_wrap_counter)) { + break; + } + elem.index = desc.id; + elem.ndescs = 1; + while (virtqueue_packed_read_next_desc(vq, &desc, desc_cache, + vq->vring.num, &idx, false)) { + ++elem.ndescs; + } + /* + * immediately push the element, nothing to unmap + * as both in_num and out_num are set to 0. + */ + virtqueue_push(vq, &elem, 0); + dropped++; + vq->last_avail_idx += elem.ndescs; + if (vq->last_avail_idx >= vq->vring.num) { + vq->last_avail_idx -= vq->vring.num; + vq->last_avail_wrap_counter ^= 1; + } } + return dropped; +} + +static unsigned int virtqueue_split_drop_all(VirtQueue *vq) +{ + unsigned int dropped = 0; + VirtQueueElement elem = {}; + VirtIODevice *vdev = vq->vdev; + bool fEventIdx = virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); + while (!virtio_queue_empty(vq) && vq->inuse < vq->vring.num) { /* works similar to virtqueue_pop but does not map buffers * and does not allocate any memory */ @@ -1024,6 +1697,27 @@ unsigned int virtqueue_drop_all(VirtQueue *vq) return dropped; } +/* virtqueue_drop_all: + * @vq: The #VirtQueue + * Drops all queued buffers and indicates them to the guest + * as if they are done. Useful when buffers can not be + * processed but must be returned to the guest. + */ +unsigned int virtqueue_drop_all(VirtQueue *vq) +{ + struct VirtIODevice *vdev = vq->vdev; + + if (unlikely(vdev->broken)) { + return 0; + } + + if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) { + return virtqueue_packed_drop_all(vq); + } else { + return virtqueue_split_drop_all(vq); + } +} + /* Reading and writing a structure directly to QEMUFile is *awful*, but * it is what QEMU has always done by mistake. We can change it sooner * or later by bumping the version number of the affected vm states. @@ -1080,11 +1774,16 @@ void *qemu_get_virtqueue_element(VirtIODevice *vdev, QEMUFile *f, size_t sz) elem->out_sg[i].iov_len = data.out_sg[i].iov_len; } + if (virtio_host_has_feature(vdev, VIRTIO_F_RING_PACKED)) { + qemu_get_be32s(f, &elem->ndescs); + } + virtqueue_map(vdev, elem); return elem; } -void qemu_put_virtqueue_element(QEMUFile *f, VirtQueueElement *elem) +void qemu_put_virtqueue_element(VirtIODevice *vdev, QEMUFile *f, + VirtQueueElement *elem) { VirtQueueElementOld data; int i; @@ -1112,6 +1811,11 @@ void qemu_put_virtqueue_element(QEMUFile *f, VirtQueueElement *elem) /* Do not save iov_base as above. */ data.out_sg[i].iov_len = elem->out_sg[i].iov_len; } + + if (virtio_host_has_feature(vdev, VIRTIO_F_RING_PACKED)) { + qemu_put_be32s(f, &elem->ndescs); + } + qemu_put_buffer(f, (uint8_t *)&data, sizeof(VirtQueueElementOld)); } @@ -1236,6 +1940,9 @@ void virtio_reset(void *opaque) vdev->vq[i].last_avail_idx = 0; vdev->vq[i].shadow_avail_idx = 0; vdev->vq[i].used_idx = 0; + vdev->vq[i].last_avail_wrap_counter = true; + vdev->vq[i].shadow_avail_wrap_counter = true; + vdev->vq[i].used_wrap_counter = true; virtio_queue_set_vector(vdev, i, VIRTIO_NO_VECTOR); vdev->vq[i].signalled_used = 0; vdev->vq[i].signalled_used_valid = false; @@ -1626,6 +2333,8 @@ VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size, vdev->vq[i].vring.align = VIRTIO_PCI_VRING_ALIGN; vdev->vq[i].handle_output = handle_output; vdev->vq[i].handle_aio_output = NULL; + vdev->vq[i].used_elems = g_malloc0(sizeof(VirtQueueElement) * + queue_size); return &vdev->vq[i]; } @@ -1640,6 +2349,7 @@ void virtio_del_queue(VirtIODevice *vdev, int n) vdev->vq[n].vring.num_default = 0; vdev->vq[n].handle_output = NULL; vdev->vq[n].handle_aio_output = NULL; + g_free(vdev->vq[n].used_elems); } static void virtio_set_isr(VirtIODevice *vdev, int value) @@ -1654,8 +2364,7 @@ static void virtio_set_isr(VirtIODevice *vdev, int value) } } -/* Called within rcu_read_lock(). */ -static bool virtio_should_notify(VirtIODevice *vdev, VirtQueue *vq) +static bool virtio_split_should_notify(VirtIODevice *vdev, VirtQueue *vq) { uint16_t old, new; bool v; @@ -1678,6 +2387,54 @@ static bool virtio_should_notify(VirtIODevice *vdev, VirtQueue *vq) return !v || vring_need_event(vring_get_used_event(vq), new, old); } +static bool vring_packed_need_event(VirtQueue *vq, bool wrap, + uint16_t off_wrap, uint16_t new, + uint16_t old) +{ + int off = off_wrap & ~(1 << 15); + + if (wrap != off_wrap >> 15) { + off -= vq->vring.num; + } + + return vring_need_event(off, new, old); +} + +static bool virtio_packed_should_notify(VirtIODevice *vdev, VirtQueue *vq) +{ + VRingPackedDescEvent e; + uint16_t old, new; + bool v; + VRingMemoryRegionCaches *caches; + + caches = vring_get_region_caches(vq); + vring_packed_event_read(vdev, &caches->avail, &e); + + old = vq->signalled_used; + new = vq->signalled_used = vq->used_idx; + v = vq->signalled_used_valid; + vq->signalled_used_valid = true; + + if (e.flags == VRING_PACKED_EVENT_FLAG_DISABLE) { + return false; + } else if (e.flags == VRING_PACKED_EVENT_FLAG_ENABLE) { + return true; + } + + return !v || vring_packed_need_event(vq, vq->used_wrap_counter, + e.off_wrap, new, old); +} + +/* Called within rcu_read_lock(). */ +static bool virtio_should_notify(VirtIODevice *vdev, VirtQueue *vq) +{ + if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) { + return virtio_packed_should_notify(vdev, vq); + } else { + return virtio_split_should_notify(vdev, vq); + } +} + void virtio_notify_irqfd(VirtIODevice *vdev, VirtQueue *vq) { bool should_notify; @@ -1767,6 +2524,13 @@ static bool virtio_virtqueue_needed(void *opaque) return virtio_host_has_feature(vdev, VIRTIO_F_VERSION_1); } +static bool virtio_packed_virtqueue_needed(void *opaque) +{ + VirtIODevice *vdev = opaque; + + return virtio_host_has_feature(vdev, VIRTIO_F_RING_PACKED); +} + static bool virtio_ringsize_needed(void *opaque) { VirtIODevice *vdev = opaque; @@ -1815,6 +2579,20 @@ static const VMStateDescription vmstate_virtqueue = { } }; +static const VMStateDescription vmstate_packed_virtqueue = { + .name = "packed_virtqueue_state", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT16(last_avail_idx, struct VirtQueue), + VMSTATE_BOOL(last_avail_wrap_counter, struct VirtQueue), + VMSTATE_UINT16(used_idx, struct VirtQueue), + VMSTATE_BOOL(used_wrap_counter, struct VirtQueue), + VMSTATE_UINT32(inuse, struct VirtQueue), + VMSTATE_END_OF_LIST() + } +}; + static const VMStateDescription vmstate_virtio_virtqueues = { .name = "virtio/virtqueues", .version_id = 1, @@ -1827,6 +2605,18 @@ static const VMStateDescription vmstate_virtio_virtqueues = { } }; +static const VMStateDescription vmstate_virtio_packed_virtqueues = { + .name = "virtio/packed_virtqueues", + .version_id = 1, + .minimum_version_id = 1, + .needed = &virtio_packed_virtqueue_needed, + .fields = (VMStateField[]) { + VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice, + VIRTIO_QUEUE_MAX, 0, vmstate_packed_virtqueue, VirtQueue), + VMSTATE_END_OF_LIST() + } +}; + static const VMStateDescription vmstate_ringsize = { .name = "ringsize_state", .version_id = 1, @@ -1959,6 +2749,7 @@ static const VMStateDescription vmstate_virtio = { &vmstate_virtio_broken, &vmstate_virtio_extra_state, &vmstate_virtio_started, + &vmstate_virtio_packed_virtqueues, NULL } }; @@ -2258,6 +3049,13 @@ int virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id) virtio_queue_update_rings(vdev, i); } + if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) { + vdev->vq[i].shadow_avail_idx = vdev->vq[i].last_avail_idx; + vdev->vq[i].shadow_avail_wrap_counter = + vdev->vq[i].last_avail_wrap_counter; + continue; + } + nheads = vring_avail_idx(&vdev->vq[i]) - vdev->vq[i].last_avail_idx; /* Check it isn't doing strange things with descriptor numbers. */ if (nheads > vdev->vq[i].vring.num) { @@ -2399,28 +3197,98 @@ hwaddr virtio_queue_get_desc_size(VirtIODevice *vdev, int n) hwaddr virtio_queue_get_avail_size(VirtIODevice *vdev, int n) { + int s; + + if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) { + return sizeof(struct VRingPackedDescEvent); + } + + s = virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0; return offsetof(VRingAvail, ring) + - sizeof(uint16_t) * vdev->vq[n].vring.num; + sizeof(uint16_t) * vdev->vq[n].vring.num + s; } hwaddr virtio_queue_get_used_size(VirtIODevice *vdev, int n) { + int s; + + if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) { + return sizeof(struct VRingPackedDescEvent); + } + + s = virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0; return offsetof(VRingUsed, ring) + - sizeof(VRingUsedElem) * vdev->vq[n].vring.num; + sizeof(VRingUsedElem) * vdev->vq[n].vring.num + s; } -uint16_t virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n) +static unsigned int virtio_queue_packed_get_last_avail_idx(VirtIODevice *vdev, + int n) +{ + unsigned int avail, used; + + avail = vdev->vq[n].last_avail_idx; + avail |= ((uint16_t)vdev->vq[n].last_avail_wrap_counter) << 15; + + used = vdev->vq[n].used_idx; + used |= ((uint16_t)vdev->vq[n].used_wrap_counter) << 15; + + return avail | used << 16; +} + +static uint16_t virtio_queue_split_get_last_avail_idx(VirtIODevice *vdev, + int n) { return vdev->vq[n].last_avail_idx; } -void virtio_queue_set_last_avail_idx(VirtIODevice *vdev, int n, uint16_t idx) +unsigned int virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n) { - vdev->vq[n].last_avail_idx = idx; - vdev->vq[n].shadow_avail_idx = idx; + if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) { + return virtio_queue_packed_get_last_avail_idx(vdev, n); + } else { + return virtio_queue_split_get_last_avail_idx(vdev, n); + } } -void virtio_queue_restore_last_avail_idx(VirtIODevice *vdev, int n) +static void virtio_queue_packed_set_last_avail_idx(VirtIODevice *vdev, + int n, unsigned int idx) +{ + struct VirtQueue *vq = &vdev->vq[n]; + + vq->last_avail_idx = vq->shadow_avail_idx = idx & 0x7fff; + vq->last_avail_wrap_counter = + vq->shadow_avail_wrap_counter = !!(idx & 0x8000); + idx >>= 16; + vq->used_idx = idx & 0x7ffff; + vq->used_wrap_counter = !!(idx & 0x8000); +} + +static void virtio_queue_split_set_last_avail_idx(VirtIODevice *vdev, + int n, unsigned int idx) +{ + vdev->vq[n].last_avail_idx = idx; + vdev->vq[n].shadow_avail_idx = idx; +} + +void virtio_queue_set_last_avail_idx(VirtIODevice *vdev, int n, + unsigned int idx) +{ + if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) { + virtio_queue_packed_set_last_avail_idx(vdev, n, idx); + } else { + virtio_queue_split_set_last_avail_idx(vdev, n, idx); + } +} + +static void virtio_queue_packed_restore_last_avail_idx(VirtIODevice *vdev, + int n) +{ + /* We don't have a reference like avail idx in shared memory */ + return; +} + +static void virtio_queue_split_restore_last_avail_idx(VirtIODevice *vdev, + int n) { rcu_read_lock(); if (vdev->vq[n].vring.desc) { @@ -2430,7 +3298,22 @@ void virtio_queue_restore_last_avail_idx(VirtIODevice *vdev, int n) rcu_read_unlock(); } -void virtio_queue_update_used_idx(VirtIODevice *vdev, int n) +void virtio_queue_restore_last_avail_idx(VirtIODevice *vdev, int n) +{ + if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) { + virtio_queue_packed_restore_last_avail_idx(vdev, n); + } else { + virtio_queue_split_restore_last_avail_idx(vdev, n); + } +} + +static void virtio_queue_packed_update_used_idx(VirtIODevice *vdev, int n) +{ + /* used idx was updated through set_last_avail_idx() */ + return; +} + +static void virtio_split_packed_update_used_idx(VirtIODevice *vdev, int n) { rcu_read_lock(); if (vdev->vq[n].vring.desc) { @@ -2439,6 +3322,15 @@ void virtio_queue_update_used_idx(VirtIODevice *vdev, int n) rcu_read_unlock(); } +void virtio_queue_update_used_idx(VirtIODevice *vdev, int n) +{ + if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) { + return virtio_queue_packed_update_used_idx(vdev, n); + } else { + return virtio_split_packed_update_used_idx(vdev, n); + } +} + void virtio_queue_invalidate_signalled_used(VirtIODevice *vdev, int n) { vdev->vq[n].signalled_used_valid = false; @@ -2773,14 +3665,6 @@ static void virtio_device_stop_ioeventfd_impl(VirtIODevice *vdev) } } -void virtio_device_stop_ioeventfd(VirtIODevice *vdev) -{ - BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); - VirtioBusState *vbus = VIRTIO_BUS(qbus); - - virtio_bus_stop_ioeventfd(vbus); -} - int virtio_device_grab_ioeventfd(VirtIODevice *vdev) { BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); |