aboutsummaryrefslogtreecommitdiff
path: root/hw
diff options
context:
space:
mode:
Diffstat (limited to 'hw')
-rw-r--r--hw/arm/boot.c18
-rw-r--r--hw/arm/virt-acpi-build.c20
-rw-r--r--hw/core/machine.c5
-rw-r--r--hw/dma/xlnx-zdma.c10
-rw-r--r--hw/hppa/dino.c3
-rw-r--r--hw/i386/pc_piix.c8
-rw-r--r--hw/i386/pc_q35.c8
-rw-r--r--hw/i386/xen/trace-events3
-rw-r--r--hw/i386/xen/xen-hvm.c205
-rw-r--r--hw/i386/xen/xen-mapcache.c2
-rw-r--r--hw/intc/arm_gic_kvm.c1
-rw-r--r--hw/intc/arm_gicv3_cpuif.c12
-rw-r--r--hw/intc/arm_gicv3_kvm.c2
-rw-r--r--hw/nvram/fw_cfg.c12
-rw-r--r--hw/ppc/spapr.c14
-rw-r--r--hw/s390x/s390-pci-inst.c3
-rw-r--r--hw/s390x/s390-virtio-ccw.c10
-rw-r--r--hw/scsi/esp.c3
-rw-r--r--hw/tpm/tpm_emulator.c323
-rw-r--r--hw/tpm/tpm_tis.c52
-rw-r--r--hw/tpm/trace-events9
-rw-r--r--hw/vfio/common.c3
-rw-r--r--hw/virtio/vhost.c3
-rw-r--r--hw/xen/xen_pt_msi.c3
24 files changed, 607 insertions, 125 deletions
diff --git a/hw/arm/boot.c b/hw/arm/boot.c
index 9496f331a8..1e481662ad 100644
--- a/hw/arm/boot.c
+++ b/hw/arm/boot.c
@@ -926,6 +926,15 @@ void arm_load_kernel(ARMCPU *cpu, struct arm_boot_info *info)
static const ARMInsnFixup *primary_loader;
AddressSpace *as = arm_boot_address_space(cpu, info);
+ /* CPU objects (unlike devices) are not automatically reset on system
+ * reset, so we must always register a handler to do so. If we're
+ * actually loading a kernel, the handler is also responsible for
+ * arranging that we start it correctly.
+ */
+ for (cs = first_cpu; cs; cs = CPU_NEXT(cs)) {
+ qemu_register_reset(do_cpu_reset, ARM_CPU(cs));
+ }
+
/* The board code is not supposed to set secure_board_setup unless
* running its code in secure mode is actually possible, and KVM
* doesn't support secure.
@@ -1143,15 +1152,6 @@ void arm_load_kernel(ARMCPU *cpu, struct arm_boot_info *info)
ARM_CPU(cs)->env.boot_info = info;
}
- /* CPU objects (unlike devices) are not automatically reset on system
- * reset, so we must always register a handler to do so. If we're
- * actually loading a kernel, the handler is also responsible for
- * arranging that we start it correctly.
- */
- for (cs = first_cpu; cs; cs = CPU_NEXT(cs)) {
- qemu_register_reset(do_cpu_reset, ARM_CPU(cs));
- }
-
if (!info->skip_dtb_autoload && have_dtb(info)) {
if (arm_load_dtb(info->dtb_start, info, info->dtb_limit, as) < 0) {
exit(1);
diff --git a/hw/arm/virt-acpi-build.c b/hw/arm/virt-acpi-build.c
index 92ceee9c0f..74f5744e87 100644
--- a/hw/arm/virt-acpi-build.c
+++ b/hw/arm/virt-acpi-build.c
@@ -400,7 +400,7 @@ build_iort(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
AcpiIortItsGroup *its;
AcpiIortTable *iort;
AcpiIortSmmu3 *smmu;
- size_t node_size, iort_length, smmu_offset = 0;
+ size_t node_size, iort_node_offset, iort_length, smmu_offset = 0;
AcpiIortRC *rc;
iort = acpi_data_push(table_data, sizeof(*iort));
@@ -413,7 +413,12 @@ build_iort(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
iort_length = sizeof(*iort);
iort->node_count = cpu_to_le32(nb_nodes);
- iort->node_offset = cpu_to_le32(sizeof(*iort));
+ /*
+ * Use a copy in case table_data->data moves during acpi_data_push
+ * operations.
+ */
+ iort_node_offset = sizeof(*iort);
+ iort->node_offset = cpu_to_le32(iort_node_offset);
/* ITS group node */
node_size = sizeof(*its) + sizeof(uint32_t);
@@ -429,7 +434,7 @@ build_iort(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
int irq = vms->irqmap[VIRT_SMMU];
/* SMMUv3 node */
- smmu_offset = iort->node_offset + node_size;
+ smmu_offset = iort_node_offset + node_size;
node_size = sizeof(*smmu) + sizeof(*idmap);
iort_length += node_size;
smmu = acpi_data_push(table_data, node_size);
@@ -450,7 +455,7 @@ build_iort(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
idmap->id_count = cpu_to_le32(0xFFFF);
idmap->output_base = 0;
/* output IORT node is the ITS group node (the first node) */
- idmap->output_reference = cpu_to_le32(iort->node_offset);
+ idmap->output_reference = cpu_to_le32(iort_node_offset);
}
/* Root Complex Node */
@@ -479,9 +484,14 @@ build_iort(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
idmap->output_reference = cpu_to_le32(smmu_offset);
} else {
/* output IORT node is the ITS group node (the first node) */
- idmap->output_reference = cpu_to_le32(iort->node_offset);
+ idmap->output_reference = cpu_to_le32(iort_node_offset);
}
+ /*
+ * Update the pointer address in case table_data->data moves during above
+ * acpi_data_push operations.
+ */
+ iort = (AcpiIortTable *)(table_data->data + iort_start);
iort->length = cpu_to_le32(iort_length);
build_header(linker, table_data, (void *)(table_data->data + iort_start),
diff --git a/hw/core/machine.c b/hw/core/machine.c
index 2040177664..617e5f8d75 100644
--- a/hw/core/machine.c
+++ b/hw/core/machine.c
@@ -737,7 +737,7 @@ static char *cpu_slot_to_string(const CPUArchId *cpu)
return g_string_free(s, false);
}
-static void machine_numa_finish_init(MachineState *machine)
+static void machine_numa_finish_cpu_init(MachineState *machine)
{
int i;
bool default_mapping;
@@ -792,7 +792,8 @@ void machine_run_board_init(MachineState *machine)
MachineClass *machine_class = MACHINE_GET_CLASS(machine);
if (nb_numa_nodes) {
- machine_numa_finish_init(machine);
+ numa_complete_configuration(machine);
+ machine_numa_finish_cpu_init(machine);
}
/* If the machine supports the valid_cpu_types check and the user
diff --git a/hw/dma/xlnx-zdma.c b/hw/dma/xlnx-zdma.c
index 14d86c254b..8eea757aff 100644
--- a/hw/dma/xlnx-zdma.c
+++ b/hw/dma/xlnx-zdma.c
@@ -302,7 +302,7 @@ static bool zdma_load_descriptor(XlnxZDMA *s, uint64_t addr, void *buf)
qemu_log_mask(LOG_GUEST_ERROR,
"zdma: unaligned descriptor at %" PRIx64,
addr);
- memset(buf, 0xdeadbeef, sizeof(XlnxZDMADescr));
+ memset(buf, 0x0, sizeof(XlnxZDMADescr));
s->error = true;
return false;
}
@@ -707,9 +707,11 @@ static uint64_t zdma_read(void *opaque, hwaddr addr, unsigned size)
RegisterInfo *r = &s->regs_info[addr / 4];
if (!r->data) {
+ gchar *path = object_get_canonical_path(OBJECT(s));
qemu_log("%s: Decode error: read from %" HWADDR_PRIx "\n",
- object_get_canonical_path(OBJECT(s)),
+ path,
addr);
+ g_free(path);
ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, INV_APB, true);
zdma_ch_imr_update_irq(s);
return 0;
@@ -724,9 +726,11 @@ static void zdma_write(void *opaque, hwaddr addr, uint64_t value,
RegisterInfo *r = &s->regs_info[addr / 4];
if (!r->data) {
+ gchar *path = object_get_canonical_path(OBJECT(s));
qemu_log("%s: Decode error: write to %" HWADDR_PRIx "=%" PRIx64 "\n",
- object_get_canonical_path(OBJECT(s)),
+ path,
addr, value);
+ g_free(path);
ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, INV_APB, true);
zdma_ch_imr_update_irq(s);
return;
diff --git a/hw/hppa/dino.c b/hw/hppa/dino.c
index c5dcf3104d..26f2704cd5 100644
--- a/hw/hppa/dino.c
+++ b/hw/hppa/dino.c
@@ -137,7 +137,8 @@ static void gsc_to_pci_forwarding(DinoState *s)
}
static bool dino_chip_mem_valid(void *opaque, hwaddr addr,
- unsigned size, bool is_write)
+ unsigned size, bool is_write,
+ MemTxAttrs attrs)
{
switch (addr) {
case DINO_IAR0:
diff --git a/hw/i386/pc_piix.c b/hw/i386/pc_piix.c
index e36c7bbb40..b4c5b03274 100644
--- a/hw/i386/pc_piix.c
+++ b/hw/i386/pc_piix.c
@@ -425,19 +425,19 @@ static void pc_i440fx_machine_options(MachineClass *m)
m->default_display = "std";
}
-static void pc_i440fx_2_13_machine_options(MachineClass *m)
+static void pc_i440fx_3_0_machine_options(MachineClass *m)
{
pc_i440fx_machine_options(m);
m->alias = "pc";
m->is_default = 1;
}
-DEFINE_I440FX_MACHINE(v2_13, "pc-i440fx-2.13", NULL,
- pc_i440fx_2_13_machine_options);
+DEFINE_I440FX_MACHINE(v3_0, "pc-i440fx-3.0", NULL,
+ pc_i440fx_3_0_machine_options);
static void pc_i440fx_2_12_machine_options(MachineClass *m)
{
- pc_i440fx_2_13_machine_options(m);
+ pc_i440fx_3_0_machine_options(m);
m->is_default = 0;
m->alias = NULL;
SET_MACHINE_COMPAT(m, PC_COMPAT_2_12);
diff --git a/hw/i386/pc_q35.c b/hw/i386/pc_q35.c
index 2372457c6a..83d6d75efa 100644
--- a/hw/i386/pc_q35.c
+++ b/hw/i386/pc_q35.c
@@ -308,18 +308,18 @@ static void pc_q35_machine_options(MachineClass *m)
m->max_cpus = 288;
}
-static void pc_q35_2_13_machine_options(MachineClass *m)
+static void pc_q35_3_0_machine_options(MachineClass *m)
{
pc_q35_machine_options(m);
m->alias = "q35";
}
-DEFINE_Q35_MACHINE(v2_13, "pc-q35-2.13", NULL,
- pc_q35_2_13_machine_options);
+DEFINE_Q35_MACHINE(v3_0, "pc-q35-3.0", NULL,
+ pc_q35_3_0_machine_options);
static void pc_q35_2_12_machine_options(MachineClass *m)
{
- pc_q35_2_13_machine_options(m);
+ pc_q35_3_0_machine_options(m);
m->alias = NULL;
SET_MACHINE_COMPAT(m, PC_COMPAT_2_12);
}
diff --git a/hw/i386/xen/trace-events b/hw/i386/xen/trace-events
index 8dab7bcfe0..8a9077cd4e 100644
--- a/hw/i386/xen/trace-events
+++ b/hw/i386/xen/trace-events
@@ -15,6 +15,9 @@ cpu_ioreq_pio(void *req, uint32_t dir, uint32_t df, uint32_t data_is_ptr, uint64
cpu_ioreq_pio_read_reg(void *req, uint64_t data, uint64_t addr, uint32_t size) "I/O=%p pio read reg data=0x%"PRIx64" port=0x%"PRIx64" size=%d"
cpu_ioreq_pio_write_reg(void *req, uint64_t data, uint64_t addr, uint32_t size) "I/O=%p pio write reg data=0x%"PRIx64" port=0x%"PRIx64" size=%d"
cpu_ioreq_move(void *req, uint32_t dir, uint32_t df, uint32_t data_is_ptr, uint64_t addr, uint64_t data, uint32_t count, uint32_t size) "I/O=%p copy dir=%d df=%d ptr=%d port=0x%"PRIx64" data=0x%"PRIx64" count=%d size=%d"
+xen_map_resource_ioreq(uint32_t id, void *addr) "id: %u addr: %p"
+cpu_ioreq_config_read(void *req, uint32_t sbdf, uint32_t reg, uint32_t size, uint32_t data) "I/O=%p sbdf=0x%x reg=%u size=%u data=0x%x"
+cpu_ioreq_config_write(void *req, uint32_t sbdf, uint32_t reg, uint32_t size, uint32_t data) "I/O=%p sbdf=0x%x reg=%u size=%u data=0x%x"
# xen-mapcache.c
xen_map_cache(uint64_t phys_addr) "want 0x%"PRIx64
diff --git a/hw/i386/xen/xen-hvm.c b/hw/i386/xen/xen-hvm.c
index 6ffa3c22cc..935a3676c8 100644
--- a/hw/i386/xen/xen-hvm.c
+++ b/hw/i386/xen/xen-hvm.c
@@ -12,6 +12,7 @@
#include "cpu.h"
#include "hw/pci/pci.h"
+#include "hw/pci/pci_host.h"
#include "hw/i386/pc.h"
#include "hw/i386/apic-msidef.h"
#include "hw/xen/xen_common.h"
@@ -86,6 +87,14 @@ typedef struct XenPhysmap {
QLIST_ENTRY(XenPhysmap) list;
} XenPhysmap;
+static QLIST_HEAD(, XenPhysmap) xen_physmap;
+
+typedef struct XenPciDevice {
+ PCIDevice *pci_dev;
+ uint32_t sbdf;
+ QLIST_ENTRY(XenPciDevice) entry;
+} XenPciDevice;
+
typedef struct XenIOState {
ioservid_t ioservid;
shared_iopage_t *shared_page;
@@ -106,8 +115,8 @@ typedef struct XenIOState {
struct xs_handle *xenstore;
MemoryListener memory_listener;
MemoryListener io_listener;
+ QLIST_HEAD(, XenPciDevice) dev_list;
DeviceListener device_listener;
- QLIST_HEAD(, XenPhysmap) physmap;
hwaddr free_phys_offset;
const XenPhysmap *log_for_dirtybit;
@@ -274,14 +283,13 @@ void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size, MemoryRegion *mr,
g_free(pfn_list);
}
-static XenPhysmap *get_physmapping(XenIOState *state,
- hwaddr start_addr, ram_addr_t size)
+static XenPhysmap *get_physmapping(hwaddr start_addr, ram_addr_t size)
{
XenPhysmap *physmap = NULL;
start_addr &= TARGET_PAGE_MASK;
- QLIST_FOREACH(physmap, &state->physmap, list) {
+ QLIST_FOREACH(physmap, &xen_physmap, list) {
if (range_covers_byte(physmap->start_addr, physmap->size, start_addr)) {
return physmap;
}
@@ -289,23 +297,21 @@ static XenPhysmap *get_physmapping(XenIOState *state,
return NULL;
}
-#ifdef XEN_COMPAT_PHYSMAP
-static hwaddr xen_phys_offset_to_gaddr(hwaddr start_addr,
- ram_addr_t size, void *opaque)
+static hwaddr xen_phys_offset_to_gaddr(hwaddr phys_offset, ram_addr_t size)
{
- hwaddr addr = start_addr & TARGET_PAGE_MASK;
- XenIOState *xen_io_state = opaque;
+ hwaddr addr = phys_offset & TARGET_PAGE_MASK;
XenPhysmap *physmap = NULL;
- QLIST_FOREACH(physmap, &xen_io_state->physmap, list) {
+ QLIST_FOREACH(physmap, &xen_physmap, list) {
if (range_covers_byte(physmap->phys_offset, physmap->size, addr)) {
- return physmap->start_addr;
+ return physmap->start_addr + (phys_offset - physmap->phys_offset);
}
}
- return start_addr;
+ return phys_offset;
}
+#ifdef XEN_COMPAT_PHYSMAP
static int xen_save_physmap(XenIOState *state, XenPhysmap *physmap)
{
char path[80], value[17];
@@ -355,7 +361,7 @@ static int xen_add_to_physmap(XenIOState *state,
hwaddr phys_offset = memory_region_get_ram_addr(mr);
const char *mr_name;
- if (get_physmapping(state, start_addr, size)) {
+ if (get_physmapping(start_addr, size)) {
return 0;
}
if (size <= 0) {
@@ -384,7 +390,7 @@ go_physmap:
physmap->name = mr_name;
physmap->phys_offset = phys_offset;
- QLIST_INSERT_HEAD(&state->physmap, physmap, list);
+ QLIST_INSERT_HEAD(&xen_physmap, physmap, list);
if (runstate_check(RUN_STATE_INMIGRATE)) {
/* Now when we have a physmap entry we can replace a dummy mapping with
@@ -428,7 +434,7 @@ static int xen_remove_from_physmap(XenIOState *state,
XenPhysmap *physmap = NULL;
hwaddr phys_offset = 0;
- physmap = get_physmapping(state, start_addr, size);
+ physmap = get_physmapping(start_addr, size);
if (physmap == NULL) {
return -1;
}
@@ -570,6 +576,12 @@ static void xen_device_realize(DeviceListener *listener,
if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) {
PCIDevice *pci_dev = PCI_DEVICE(dev);
+ XenPciDevice *xendev = g_new(XenPciDevice, 1);
+
+ xendev->pci_dev = pci_dev;
+ xendev->sbdf = PCI_BUILD_BDF(pci_dev_bus_num(pci_dev),
+ pci_dev->devfn);
+ QLIST_INSERT_HEAD(&state->dev_list, xendev, entry);
xen_map_pcidev(xen_domid, state->ioservid, pci_dev);
}
@@ -582,8 +594,17 @@ static void xen_device_unrealize(DeviceListener *listener,
if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) {
PCIDevice *pci_dev = PCI_DEVICE(dev);
+ XenPciDevice *xendev, *next;
xen_unmap_pcidev(xen_domid, state->ioservid, pci_dev);
+
+ QLIST_FOREACH_SAFE(xendev, &state->dev_list, entry, next) {
+ if (xendev->pci_dev == pci_dev) {
+ QLIST_REMOVE(xendev, entry);
+ g_free(xendev);
+ break;
+ }
+ }
}
}
@@ -597,7 +618,7 @@ static void xen_sync_dirty_bitmap(XenIOState *state,
int rc, i, j;
const XenPhysmap *physmap = NULL;
- physmap = get_physmapping(state, start_addr, size);
+ physmap = get_physmapping(start_addr, size);
if (physmap == NULL) {
/* not handled */
return;
@@ -904,6 +925,62 @@ static void cpu_ioreq_move(ioreq_t *req)
}
}
+static void cpu_ioreq_config(XenIOState *state, ioreq_t *req)
+{
+ uint32_t sbdf = req->addr >> 32;
+ uint32_t reg = req->addr;
+ XenPciDevice *xendev;
+
+ if (req->size != sizeof(uint8_t) && req->size != sizeof(uint16_t) &&
+ req->size != sizeof(uint32_t)) {
+ hw_error("PCI config access: bad size (%u)", req->size);
+ }
+
+ if (req->count != 1) {
+ hw_error("PCI config access: bad count (%u)", req->count);
+ }
+
+ QLIST_FOREACH(xendev, &state->dev_list, entry) {
+ if (xendev->sbdf != sbdf) {
+ continue;
+ }
+
+ if (!req->data_is_ptr) {
+ if (req->dir == IOREQ_READ) {
+ req->data = pci_host_config_read_common(
+ xendev->pci_dev, reg, PCI_CONFIG_SPACE_SIZE,
+ req->size);
+ trace_cpu_ioreq_config_read(req, xendev->sbdf, reg,
+ req->size, req->data);
+ } else if (req->dir == IOREQ_WRITE) {
+ trace_cpu_ioreq_config_write(req, xendev->sbdf, reg,
+ req->size, req->data);
+ pci_host_config_write_common(
+ xendev->pci_dev, reg, PCI_CONFIG_SPACE_SIZE,
+ req->data, req->size);
+ }
+ } else {
+ uint32_t tmp;
+
+ if (req->dir == IOREQ_READ) {
+ tmp = pci_host_config_read_common(
+ xendev->pci_dev, reg, PCI_CONFIG_SPACE_SIZE,
+ req->size);
+ trace_cpu_ioreq_config_read(req, xendev->sbdf, reg,
+ req->size, tmp);
+ write_phys_req_item(req->data, req, 0, &tmp);
+ } else if (req->dir == IOREQ_WRITE) {
+ read_phys_req_item(req->data, req, 0, &tmp);
+ trace_cpu_ioreq_config_write(req, xendev->sbdf, reg,
+ req->size, tmp);
+ pci_host_config_write_common(
+ xendev->pci_dev, reg, PCI_CONFIG_SPACE_SIZE,
+ tmp, req->size);
+ }
+ }
+ }
+}
+
static void regs_to_cpu(vmware_regs_t *vmport_regs, ioreq_t *req)
{
X86CPU *cpu;
@@ -976,27 +1053,9 @@ static void handle_ioreq(XenIOState *state, ioreq_t *req)
case IOREQ_TYPE_INVALIDATE:
xen_invalidate_map_cache();
break;
- case IOREQ_TYPE_PCI_CONFIG: {
- uint32_t sbdf = req->addr >> 32;
- uint32_t val;
-
- /* Fake a write to port 0xCF8 so that
- * the config space access will target the
- * correct device model.
- */
- val = (1u << 31) |
- ((req->addr & 0x0f00) << 16) |
- ((sbdf & 0xffff) << 8) |
- (req->addr & 0xfc);
- do_outp(0xcf8, 4, val);
-
- /* Now issue the config space access via
- * port 0xCFC
- */
- req->addr = 0xcfc | (req->addr & 0x03);
- cpu_ioreq_pio(req);
+ case IOREQ_TYPE_PCI_CONFIG:
+ cpu_ioreq_config(state, req);
break;
- }
default:
hw_error("Invalid ioreq type 0x%x\n", req->type);
}
@@ -1222,7 +1281,7 @@ static void xen_read_physmap(XenIOState *state)
xen_domid, entries[i]);
physmap->name = xs_read(state->xenstore, 0, path, &len);
- QLIST_INSERT_HEAD(&state->physmap, physmap, list);
+ QLIST_INSERT_HEAD(&xen_physmap, physmap, list);
}
free(entries);
}
@@ -1239,13 +1298,39 @@ static void xen_wakeup_notifier(Notifier *notifier, void *data)
static int xen_map_ioreq_server(XenIOState *state)
{
+ void *addr = NULL;
+ xenforeignmemory_resource_handle *fres;
xen_pfn_t ioreq_pfn;
xen_pfn_t bufioreq_pfn;
evtchn_port_t bufioreq_evtchn;
int rc;
+ /*
+ * Attempt to map using the resource API and fall back to normal
+ * foreign mapping if this is not supported.
+ */
+ QEMU_BUILD_BUG_ON(XENMEM_resource_ioreq_server_frame_bufioreq != 0);
+ QEMU_BUILD_BUG_ON(XENMEM_resource_ioreq_server_frame_ioreq(0) != 1);
+ fres = xenforeignmemory_map_resource(xen_fmem, xen_domid,
+ XENMEM_resource_ioreq_server,
+ state->ioservid, 0, 2,
+ &addr,
+ PROT_READ | PROT_WRITE, 0);
+ if (fres != NULL) {
+ trace_xen_map_resource_ioreq(state->ioservid, addr);
+ state->buffered_io_page = addr;
+ state->shared_page = addr + TARGET_PAGE_SIZE;
+ } else if (errno != EOPNOTSUPP) {
+ error_report("failed to map ioreq server resources: error %d handle=%p",
+ errno, xen_xc);
+ return -1;
+ }
+
rc = xen_get_ioreq_server_info(xen_domid, state->ioservid,
- &ioreq_pfn, &bufioreq_pfn,
+ (state->shared_page == NULL) ?
+ &ioreq_pfn : NULL,
+ (state->buffered_io_page == NULL) ?
+ &bufioreq_pfn : NULL,
&bufioreq_evtchn);
if (rc < 0) {
error_report("failed to get ioreq server info: error %d handle=%p",
@@ -1253,27 +1338,37 @@ static int xen_map_ioreq_server(XenIOState *state)
return rc;
}
- DPRINTF("shared page at pfn %lx\n", ioreq_pfn);
- DPRINTF("buffered io page at pfn %lx\n", bufioreq_pfn);
- DPRINTF("buffered io evtchn is %x\n", bufioreq_evtchn);
-
- state->shared_page = xenforeignmemory_map(xen_fmem, xen_domid,
- PROT_READ | PROT_WRITE,
- 1, &ioreq_pfn, NULL);
if (state->shared_page == NULL) {
- error_report("map shared IO page returned error %d handle=%p",
- errno, xen_xc);
- return -1;
+ DPRINTF("shared page at pfn %lx\n", ioreq_pfn);
+
+ state->shared_page = xenforeignmemory_map(xen_fmem, xen_domid,
+ PROT_READ | PROT_WRITE,
+ 1, &ioreq_pfn, NULL);
+ if (state->shared_page == NULL) {
+ error_report("map shared IO page returned error %d handle=%p",
+ errno, xen_xc);
+ }
}
- state->buffered_io_page = xenforeignmemory_map(xen_fmem, xen_domid,
- PROT_READ | PROT_WRITE,
- 1, &bufioreq_pfn, NULL);
if (state->buffered_io_page == NULL) {
- error_report("map buffered IO page returned error %d", errno);
+ DPRINTF("buffered io page at pfn %lx\n", bufioreq_pfn);
+
+ state->buffered_io_page = xenforeignmemory_map(xen_fmem, xen_domid,
+ PROT_READ | PROT_WRITE,
+ 1, &bufioreq_pfn,
+ NULL);
+ if (state->buffered_io_page == NULL) {
+ error_report("map buffered IO page returned error %d", errno);
+ return -1;
+ }
+ }
+
+ if (state->shared_page == NULL || state->buffered_io_page == NULL) {
return -1;
}
+ DPRINTF("buffered io evtchn is %x\n", bufioreq_evtchn);
+
state->bufioreq_remote_port = bufioreq_evtchn;
return 0;
@@ -1374,7 +1469,6 @@ void xen_hvm_init(PCMachineState *pcms, MemoryRegion **ram_memory)
qemu_add_vm_change_state_handler(xen_hvm_change_state_handler, state);
state->memory_listener = xen_memory_listener;
- QLIST_INIT(&state->physmap);
memory_listener_register(&state->memory_listener, &address_space_memory);
state->log_for_dirtybit = NULL;
@@ -1382,6 +1476,7 @@ void xen_hvm_init(PCMachineState *pcms, MemoryRegion **ram_memory)
memory_listener_register(&state->io_listener, &address_space_io);
state->device_listener = xen_device_listener;
+ QLIST_INIT(&state->dev_list);
device_listener_register(&state->device_listener);
/* Initialize backend core & drivers */
@@ -1390,6 +1485,8 @@ void xen_hvm_init(PCMachineState *pcms, MemoryRegion **ram_memory)
goto err;
}
xen_be_register_common();
+
+ QLIST_INIT(&xen_physmap);
xen_read_physmap(state);
/* Disable ACPI build because Xen handles it */
@@ -1461,6 +1558,8 @@ void xen_hvm_modified_memory(ram_addr_t start, ram_addr_t length)
int rc;
ram_addr_t start_pfn, nb_pages;
+ start = xen_phys_offset_to_gaddr(start, length);
+
if (length == 0) {
length = TARGET_PAGE_SIZE;
}
diff --git a/hw/i386/xen/xen-mapcache.c b/hw/i386/xen/xen-mapcache.c
index 541b7693b3..628b813a11 100644
--- a/hw/i386/xen/xen-mapcache.c
+++ b/hw/i386/xen/xen-mapcache.c
@@ -318,7 +318,7 @@ tryagain:
mapcache->last_entry = NULL;
#ifdef XEN_COMPAT_PHYSMAP
if (!translated && mapcache->phys_offset_to_gaddr) {
- phys_addr = mapcache->phys_offset_to_gaddr(phys_addr, size, mapcache->opaque);
+ phys_addr = mapcache->phys_offset_to_gaddr(phys_addr, size);
translated = true;
goto tryagain;
}
diff --git a/hw/intc/arm_gic_kvm.c b/hw/intc/arm_gic_kvm.c
index 6f467e68a8..204369d0e2 100644
--- a/hw/intc/arm_gic_kvm.c
+++ b/hw/intc/arm_gic_kvm.c
@@ -572,7 +572,6 @@ static void kvm_arm_gic_realize(DeviceState *dev, Error **errp)
if (kvm_has_gsi_routing()) {
/* set up irq routing */
- kvm_init_irq_routing(kvm_state);
for (i = 0; i < s->num_irq - GIC_INTERNAL; ++i) {
kvm_irqchip_add_irq_route(kvm_state, i, 0, i);
}
diff --git a/hw/intc/arm_gicv3_cpuif.c b/hw/intc/arm_gicv3_cpuif.c
index cb9a3a542d..5c89be1af0 100644
--- a/hw/intc/arm_gicv3_cpuif.c
+++ b/hw/intc/arm_gicv3_cpuif.c
@@ -427,7 +427,7 @@ static uint64_t icv_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
{
GICv3CPUState *cs = icc_cs_from_env(env);
int regno = ri->opc2 & 3;
- int grp = ri->crm & 1 ? GICV3_G0 : GICV3_G1NS;
+ int grp = (ri->crm & 1) ? GICV3_G1NS : GICV3_G0;
uint64_t value = cs->ich_apr[grp][regno];
trace_gicv3_icv_ap_read(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
@@ -439,7 +439,7 @@ static void icv_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
{
GICv3CPUState *cs = icc_cs_from_env(env);
int regno = ri->opc2 & 3;
- int grp = ri->crm & 1 ? GICV3_G0 : GICV3_G1NS;
+ int grp = (ri->crm & 1) ? GICV3_G1NS : GICV3_G0;
trace_gicv3_icv_ap_write(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
@@ -1461,7 +1461,7 @@ static uint64_t icc_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
uint64_t value;
int regno = ri->opc2 & 3;
- int grp = ri->crm & 1 ? GICV3_G0 : GICV3_G1;
+ int grp = (ri->crm & 1) ? GICV3_G1 : GICV3_G0;
if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
return icv_ap_read(env, ri);
@@ -1483,7 +1483,7 @@ static void icc_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
GICv3CPUState *cs = icc_cs_from_env(env);
int regno = ri->opc2 & 3;
- int grp = ri->crm & 1 ? GICV3_G0 : GICV3_G1;
+ int grp = (ri->crm & 1) ? GICV3_G1 : GICV3_G0;
if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
icv_ap_write(env, ri, value);
@@ -2292,7 +2292,7 @@ static uint64_t ich_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
{
GICv3CPUState *cs = icc_cs_from_env(env);
int regno = ri->opc2 & 3;
- int grp = ri->crm & 1 ? GICV3_G0 : GICV3_G1NS;
+ int grp = (ri->crm & 1) ? GICV3_G1NS : GICV3_G0;
uint64_t value;
value = cs->ich_apr[grp][regno];
@@ -2305,7 +2305,7 @@ static void ich_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
{
GICv3CPUState *cs = icc_cs_from_env(env);
int regno = ri->opc2 & 3;
- int grp = ri->crm & 1 ? GICV3_G0 : GICV3_G1NS;
+ int grp = (ri->crm & 1) ? GICV3_G1NS : GICV3_G0;
trace_gicv3_ich_ap_write(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
diff --git a/hw/intc/arm_gicv3_kvm.c b/hw/intc/arm_gicv3_kvm.c
index ec371772b3..0279b86cd9 100644
--- a/hw/intc/arm_gicv3_kvm.c
+++ b/hw/intc/arm_gicv3_kvm.c
@@ -243,6 +243,7 @@ static void kvm_dist_putbmp(GICv3State *s, uint32_t offset,
if (clroffset != 0) {
reg = 0;
kvm_gicd_access(s, clroffset, &reg, true);
+ clroffset += 4;
}
reg = *gic_bmp_ptr32(bmp, irq);
kvm_gicd_access(s, offset, &reg, true);
@@ -760,7 +761,6 @@ static void kvm_arm_gicv3_realize(DeviceState *dev, Error **errp)
if (kvm_has_gsi_routing()) {
/* set up irq routing */
- kvm_init_irq_routing(kvm_state);
for (i = 0; i < s->num_irq - GIC_INTERNAL; ++i) {
kvm_irqchip_add_irq_route(kvm_state, i, 0, i);
}
diff --git a/hw/nvram/fw_cfg.c b/hw/nvram/fw_cfg.c
index 2a0739d0e9..b23e7f64a8 100644
--- a/hw/nvram/fw_cfg.c
+++ b/hw/nvram/fw_cfg.c
@@ -420,14 +420,16 @@ static void fw_cfg_dma_mem_write(void *opaque, hwaddr addr,
}
static bool fw_cfg_dma_mem_valid(void *opaque, hwaddr addr,
- unsigned size, bool is_write)
+ unsigned size, bool is_write,
+ MemTxAttrs attrs)
{
return !is_write || ((size == 4 && (addr == 0 || addr == 4)) ||
(size == 8 && addr == 0));
}
static bool fw_cfg_data_mem_valid(void *opaque, hwaddr addr,
- unsigned size, bool is_write)
+ unsigned size, bool is_write,
+ MemTxAttrs attrs)
{
return addr == 0;
}
@@ -439,7 +441,8 @@ static void fw_cfg_ctl_mem_write(void *opaque, hwaddr addr,
}
static bool fw_cfg_ctl_mem_valid(void *opaque, hwaddr addr,
- unsigned size, bool is_write)
+ unsigned size, bool is_write,
+ MemTxAttrs attrs)
{
return is_write && size == 2;
}
@@ -458,7 +461,8 @@ static void fw_cfg_comb_write(void *opaque, hwaddr addr,
}
static bool fw_cfg_comb_valid(void *opaque, hwaddr addr,
- unsigned size, bool is_write)
+ unsigned size, bool is_write,
+ MemTxAttrs attrs)
{
return (size == 1) || (is_write && size == 2);
}
diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
index 2e910428f3..2375cbee12 100644
--- a/hw/ppc/spapr.c
+++ b/hw/ppc/spapr.c
@@ -4070,18 +4070,18 @@ static const TypeInfo spapr_machine_info = {
type_init(spapr_machine_register_##suffix)
/*
- * pseries-2.13
+ * pseries-3.0
*/
-static void spapr_machine_2_13_instance_options(MachineState *machine)
+static void spapr_machine_3_0_instance_options(MachineState *machine)
{
}
-static void spapr_machine_2_13_class_options(MachineClass *mc)
+static void spapr_machine_3_0_class_options(MachineClass *mc)
{
/* Defaults for the latest behaviour inherited from the base class */
}
-DEFINE_SPAPR_MACHINE(2_13, "2.13", true);
+DEFINE_SPAPR_MACHINE(3_0, "3.0", true);
/*
* pseries-2.12
@@ -4090,18 +4090,18 @@ DEFINE_SPAPR_MACHINE(2_13, "2.13", true);
HW_COMPAT_2_12 \
{ \
.driver = TYPE_POWERPC_CPU, \
- .property = "pre-2.13-migration", \
+ .property = "pre-3.0-migration", \
.value = "on", \
},
static void spapr_machine_2_12_instance_options(MachineState *machine)
{
- spapr_machine_2_13_instance_options(machine);
+ spapr_machine_3_0_instance_options(machine);
}
static void spapr_machine_2_12_class_options(MachineClass *mc)
{
- spapr_machine_2_13_class_options(mc);
+ spapr_machine_3_0_class_options(mc);
SET_MACHINE_COMPAT(mc, SPAPR_COMPAT_2_12);
}
diff --git a/hw/s390x/s390-pci-inst.c b/hw/s390x/s390-pci-inst.c
index 02a815fd31..d1a5f79678 100644
--- a/hw/s390x/s390-pci-inst.c
+++ b/hw/s390x/s390-pci-inst.c
@@ -762,7 +762,8 @@ int pcistb_service_call(S390CPU *cpu, uint8_t r1, uint8_t r3, uint64_t gaddr,
mr = s390_get_subregion(mr, offset, len);
offset -= mr->addr;
- if (!memory_region_access_valid(mr, offset, len, true)) {
+ if (!memory_region_access_valid(mr, offset, len, true,
+ MEMTXATTRS_UNSPECIFIED)) {
s390_program_interrupt(env, PGM_OPERAND, 6, ra);
return 0;
}
diff --git a/hw/s390x/s390-virtio-ccw.c b/hw/s390x/s390-virtio-ccw.c
index e548d341a0..7ae5fb38dd 100644
--- a/hw/s390x/s390-virtio-ccw.c
+++ b/hw/s390x/s390-virtio-ccw.c
@@ -812,23 +812,23 @@ bool css_migration_enabled(void)
.value = "0",\
},
-static void ccw_machine_2_13_instance_options(MachineState *machine)
+static void ccw_machine_3_0_instance_options(MachineState *machine)
{
}
-static void ccw_machine_2_13_class_options(MachineClass *mc)
+static void ccw_machine_3_0_class_options(MachineClass *mc)
{
}
-DEFINE_CCW_MACHINE(2_13, "2.13", true);
+DEFINE_CCW_MACHINE(3_0, "3.0", true);
static void ccw_machine_2_12_instance_options(MachineState *machine)
{
- ccw_machine_2_13_instance_options(machine);
+ ccw_machine_3_0_instance_options(machine);
}
static void ccw_machine_2_12_class_options(MachineClass *mc)
{
- ccw_machine_2_13_class_options(mc);
+ ccw_machine_3_0_class_options(mc);
SET_MACHINE_COMPAT(mc, CCW_COMPAT_2_12);
}
DEFINE_CCW_MACHINE(2_12, "2.12", false);
diff --git a/hw/scsi/esp.c b/hw/scsi/esp.c
index 64ec285826..9ed9727744 100644
--- a/hw/scsi/esp.c
+++ b/hw/scsi/esp.c
@@ -564,7 +564,8 @@ void esp_reg_write(ESPState *s, uint32_t saddr, uint64_t val)
}
static bool esp_mem_accepts(void *opaque, hwaddr addr,
- unsigned size, bool is_write)
+ unsigned size, bool is_write,
+ MemTxAttrs attrs)
{
return (size == 1) || (is_write && size == 4);
}
diff --git a/hw/tpm/tpm_emulator.c b/hw/tpm/tpm_emulator.c
index 6418ef0831..10bc20dbec 100644
--- a/hw/tpm/tpm_emulator.c
+++ b/hw/tpm/tpm_emulator.c
@@ -4,7 +4,7 @@
* Copyright (c) 2017 Intel Corporation
* Author: Amarnath Valluri <amarnath.valluri@intel.com>
*
- * Copyright (c) 2010 - 2013 IBM Corporation
+ * Copyright (c) 2010 - 2013, 2018 IBM Corporation
* Authors:
* Stefan Berger <stefanb@us.ibm.com>
*
@@ -49,6 +49,19 @@
#define TPM_EMULATOR_IMPLEMENTS_ALL_CAPS(S, cap) (((S)->caps & (cap)) == (cap))
/* data structures */
+
+/* blobs from the TPM; part of VM state when migrating */
+typedef struct TPMBlobBuffers {
+ uint32_t permanent_flags;
+ TPMSizedBuffer permanent;
+
+ uint32_t volatil_flags;
+ TPMSizedBuffer volatil;
+
+ uint32_t savestate_flags;
+ TPMSizedBuffer savestate;
+} TPMBlobBuffers;
+
typedef struct TPMEmulator {
TPMBackend parent;
@@ -64,6 +77,8 @@ typedef struct TPMEmulator {
unsigned int established_flag:1;
unsigned int established_flag_cached:1;
+
+ TPMBlobBuffers state_blobs;
} TPMEmulator;
@@ -293,7 +308,8 @@ static int tpm_emulator_set_buffer_size(TPMBackend *tb,
return 0;
}
-static int tpm_emulator_startup_tpm(TPMBackend *tb, size_t buffersize)
+static int tpm_emulator_startup_tpm_resume(TPMBackend *tb, size_t buffersize,
+ bool is_resume)
{
TPMEmulator *tpm_emu = TPM_EMULATOR(tb);
ptm_init init = {
@@ -301,12 +317,17 @@ static int tpm_emulator_startup_tpm(TPMBackend *tb, size_t buffersize)
};
ptm_res res;
+ trace_tpm_emulator_startup_tpm_resume(is_resume, buffersize);
+
if (buffersize != 0 &&
tpm_emulator_set_buffer_size(tb, buffersize, NULL) < 0) {
goto err_exit;
}
- trace_tpm_emulator_startup_tpm();
+ if (is_resume) {
+ init.u.req.init_flags |= cpu_to_be32(PTM_INIT_FLAG_DELETE_VOLATILE);
+ }
+
if (tpm_emulator_ctrlcmd(tpm_emu, CMD_INIT, &init, sizeof(init),
sizeof(init)) < 0) {
error_report("tpm-emulator: could not send INIT: %s",
@@ -325,6 +346,11 @@ err_exit:
return -1;
}
+static int tpm_emulator_startup_tpm(TPMBackend *tb, size_t buffersize)
+{
+ return tpm_emulator_startup_tpm_resume(tb, buffersize, false);
+}
+
static bool tpm_emulator_get_tpm_established_flag(TPMBackend *tb)
{
TPMEmulator *tpm_emu = TPM_EMULATOR(tb);
@@ -423,16 +449,21 @@ static size_t tpm_emulator_get_buffer_size(TPMBackend *tb)
static int tpm_emulator_block_migration(TPMEmulator *tpm_emu)
{
Error *err = NULL;
+ ptm_cap caps = PTM_CAP_GET_STATEBLOB | PTM_CAP_SET_STATEBLOB |
+ PTM_CAP_STOP;
- error_setg(&tpm_emu->migration_blocker,
- "Migration disabled: TPM emulator not yet migratable");
- migrate_add_blocker(tpm_emu->migration_blocker, &err);
- if (err) {
- error_report_err(err);
- error_free(tpm_emu->migration_blocker);
- tpm_emu->migration_blocker = NULL;
+ if (!TPM_EMULATOR_IMPLEMENTS_ALL_CAPS(tpm_emu, caps)) {
+ error_setg(&tpm_emu->migration_blocker,
+ "Migration disabled: TPM emulator does not support "
+ "migration");
+ migrate_add_blocker(tpm_emu->migration_blocker, &err);
+ if (err) {
+ error_report_err(err);
+ error_free(tpm_emu->migration_blocker);
+ tpm_emu->migration_blocker = NULL;
- return -1;
+ return -1;
+ }
}
return 0;
@@ -570,6 +601,267 @@ static const QemuOptDesc tpm_emulator_cmdline_opts[] = {
{ /* end of list */ },
};
+/*
+ * Transfer a TPM state blob from the TPM into a provided buffer.
+ *
+ * @tpm_emu: TPMEmulator
+ * @type: the type of blob to transfer
+ * @tsb: the TPMSizeBuffer to fill with the blob
+ * @flags: the flags to return to the caller
+ */
+static int tpm_emulator_get_state_blob(TPMEmulator *tpm_emu,
+ uint8_t type,
+ TPMSizedBuffer *tsb,
+ uint32_t *flags)
+{
+ ptm_getstate pgs;
+ ptm_res res;
+ ssize_t n;
+ uint32_t totlength, length;
+
+ tpm_sized_buffer_reset(tsb);
+
+ pgs.u.req.state_flags = cpu_to_be32(PTM_STATE_FLAG_DECRYPTED);
+ pgs.u.req.type = cpu_to_be32(type);
+ pgs.u.req.offset = 0;
+
+ if (tpm_emulator_ctrlcmd(tpm_emu, CMD_GET_STATEBLOB,
+ &pgs, sizeof(pgs.u.req),
+ offsetof(ptm_getstate, u.resp.data)) < 0) {
+ error_report("tpm-emulator: could not get state blob type %d : %s",
+ type, strerror(errno));
+ return -1;
+ }
+
+ res = be32_to_cpu(pgs.u.resp.tpm_result);
+ if (res != 0 && (res & 0x800) == 0) {
+ error_report("tpm-emulator: Getting the stateblob (type %d) failed "
+ "with a TPM error 0x%x", type, res);
+ return -1;
+ }
+
+ totlength = be32_to_cpu(pgs.u.resp.totlength);
+ length = be32_to_cpu(pgs.u.resp.length);
+ if (totlength != length) {
+ error_report("tpm-emulator: Expecting to read %u bytes "
+ "but would get %u", totlength, length);
+ return -1;
+ }
+
+ *flags = be32_to_cpu(pgs.u.resp.state_flags);
+
+ if (totlength > 0) {
+ tsb->buffer = g_try_malloc(totlength);
+ if (!tsb->buffer) {
+ error_report("tpm-emulator: Out of memory allocating %u bytes",
+ totlength);
+ return -1;
+ }
+
+ n = qemu_chr_fe_read_all(&tpm_emu->ctrl_chr, tsb->buffer, totlength);
+ if (n != totlength) {
+ error_report("tpm-emulator: Could not read stateblob (type %d); "
+ "expected %u bytes, got %zd",
+ type, totlength, n);
+ return -1;
+ }
+ }
+ tsb->size = totlength;
+
+ trace_tpm_emulator_get_state_blob(type, tsb->size, *flags);
+
+ return 0;
+}
+
+static int tpm_emulator_get_state_blobs(TPMEmulator *tpm_emu)
+{
+ TPMBlobBuffers *state_blobs = &tpm_emu->state_blobs;
+
+ if (tpm_emulator_get_state_blob(tpm_emu, PTM_BLOB_TYPE_PERMANENT,
+ &state_blobs->permanent,
+ &state_blobs->permanent_flags) < 0 ||
+ tpm_emulator_get_state_blob(tpm_emu, PTM_BLOB_TYPE_VOLATILE,
+ &state_blobs->volatil,
+ &state_blobs->volatil_flags) < 0 ||
+ tpm_emulator_get_state_blob(tpm_emu, PTM_BLOB_TYPE_SAVESTATE,
+ &state_blobs->savestate,
+ &state_blobs->savestate_flags) < 0) {
+ goto err_exit;
+ }
+
+ return 0;
+
+ err_exit:
+ tpm_sized_buffer_reset(&state_blobs->volatil);
+ tpm_sized_buffer_reset(&state_blobs->permanent);
+ tpm_sized_buffer_reset(&state_blobs->savestate);
+
+ return -1;
+}
+
+/*
+ * Transfer a TPM state blob to the TPM emulator.
+ *
+ * @tpm_emu: TPMEmulator
+ * @type: the type of TPM state blob to transfer
+ * @tsb: TPMSizedBuffer containing the TPM state blob
+ * @flags: Flags describing the (encryption) state of the TPM state blob
+ */
+static int tpm_emulator_set_state_blob(TPMEmulator *tpm_emu,
+ uint32_t type,
+ TPMSizedBuffer *tsb,
+ uint32_t flags)
+{
+ ssize_t n;
+ ptm_setstate pss;
+ ptm_res tpm_result;
+
+ if (tsb->size == 0) {
+ return 0;
+ }
+
+ pss = (ptm_setstate) {
+ .u.req.state_flags = cpu_to_be32(flags),
+ .u.req.type = cpu_to_be32(type),
+ .u.req.length = cpu_to_be32(tsb->size),
+ };
+
+ /* write the header only */
+ if (tpm_emulator_ctrlcmd(tpm_emu, CMD_SET_STATEBLOB, &pss,
+ offsetof(ptm_setstate, u.req.data), 0) < 0) {
+ error_report("tpm-emulator: could not set state blob type %d : %s",
+ type, strerror(errno));
+ return -1;
+ }
+
+ /* now the body */
+ n = qemu_chr_fe_write_all(&tpm_emu->ctrl_chr, tsb->buffer, tsb->size);
+ if (n != tsb->size) {
+ error_report("tpm-emulator: Writing the stateblob (type %d) "
+ "failed; could not write %u bytes, but only %zd",
+ type, tsb->size, n);
+ return -1;
+ }
+
+ /* now get the result */
+ n = qemu_chr_fe_read_all(&tpm_emu->ctrl_chr,
+ (uint8_t *)&pss, sizeof(pss.u.resp));
+ if (n != sizeof(pss.u.resp)) {
+ error_report("tpm-emulator: Reading response from writing stateblob "
+ "(type %d) failed; expected %zu bytes, got %zd", type,
+ sizeof(pss.u.resp), n);
+ return -1;
+ }
+
+ tpm_result = be32_to_cpu(pss.u.resp.tpm_result);
+ if (tpm_result != 0) {
+ error_report("tpm-emulator: Setting the stateblob (type %d) failed "
+ "with a TPM error 0x%x", type, tpm_result);
+ return -1;
+ }
+
+ trace_tpm_emulator_set_state_blob(type, tsb->size, flags);
+
+ return 0;
+}
+
+/*
+ * Set all the TPM state blobs.
+ *
+ * Returns a negative errno code in case of error.
+ */
+static int tpm_emulator_set_state_blobs(TPMBackend *tb)
+{
+ TPMEmulator *tpm_emu = TPM_EMULATOR(tb);
+ TPMBlobBuffers *state_blobs = &tpm_emu->state_blobs;
+
+ trace_tpm_emulator_set_state_blobs();
+
+ if (tpm_emulator_stop_tpm(tb) < 0) {
+ trace_tpm_emulator_set_state_blobs_error("Could not stop TPM");
+ return -EIO;
+ }
+
+ if (tpm_emulator_set_state_blob(tpm_emu, PTM_BLOB_TYPE_PERMANENT,
+ &state_blobs->permanent,
+ state_blobs->permanent_flags) < 0 ||
+ tpm_emulator_set_state_blob(tpm_emu, PTM_BLOB_TYPE_VOLATILE,
+ &state_blobs->volatil,
+ state_blobs->volatil_flags) < 0 ||
+ tpm_emulator_set_state_blob(tpm_emu, PTM_BLOB_TYPE_SAVESTATE,
+ &state_blobs->savestate,
+ state_blobs->savestate_flags) < 0) {
+ return -EIO;
+ }
+
+ trace_tpm_emulator_set_state_blobs_done();
+
+ return 0;
+}
+
+static int tpm_emulator_pre_save(void *opaque)
+{
+ TPMBackend *tb = opaque;
+ TPMEmulator *tpm_emu = TPM_EMULATOR(tb);
+
+ trace_tpm_emulator_pre_save();
+
+ tpm_backend_finish_sync(tb);
+
+ /* get the state blobs from the TPM */
+ return tpm_emulator_get_state_blobs(tpm_emu);
+}
+
+/*
+ * Load the TPM state blobs into the TPM.
+ *
+ * Returns negative errno codes in case of error.
+ */
+static int tpm_emulator_post_load(void *opaque, int version_id)
+{
+ TPMBackend *tb = opaque;
+ int ret;
+
+ ret = tpm_emulator_set_state_blobs(tb);
+ if (ret < 0) {
+ return ret;
+ }
+
+ if (tpm_emulator_startup_tpm_resume(tb, 0, true) < 0) {
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static const VMStateDescription vmstate_tpm_emulator = {
+ .name = "tpm-emulator",
+ .version_id = 0,
+ .pre_save = tpm_emulator_pre_save,
+ .post_load = tpm_emulator_post_load,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(state_blobs.permanent_flags, TPMEmulator),
+ VMSTATE_UINT32(state_blobs.permanent.size, TPMEmulator),
+ VMSTATE_VBUFFER_ALLOC_UINT32(state_blobs.permanent.buffer,
+ TPMEmulator, 0, 0,
+ state_blobs.permanent.size),
+
+ VMSTATE_UINT32(state_blobs.volatil_flags, TPMEmulator),
+ VMSTATE_UINT32(state_blobs.volatil.size, TPMEmulator),
+ VMSTATE_VBUFFER_ALLOC_UINT32(state_blobs.volatil.buffer,
+ TPMEmulator, 0, 0,
+ state_blobs.volatil.size),
+
+ VMSTATE_UINT32(state_blobs.savestate_flags, TPMEmulator),
+ VMSTATE_UINT32(state_blobs.savestate.size, TPMEmulator),
+ VMSTATE_VBUFFER_ALLOC_UINT32(state_blobs.savestate.buffer,
+ TPMEmulator, 0, 0,
+ state_blobs.savestate.size),
+
+ VMSTATE_END_OF_LIST()
+ }
+};
+
static void tpm_emulator_inst_init(Object *obj)
{
TPMEmulator *tpm_emu = TPM_EMULATOR(obj);
@@ -579,6 +871,8 @@ static void tpm_emulator_inst_init(Object *obj)
tpm_emu->options = g_new0(TPMEmulatorOptions, 1);
tpm_emu->cur_locty_number = ~0;
qemu_mutex_init(&tpm_emu->mutex);
+
+ vmstate_register(NULL, -1, &vmstate_tpm_emulator, obj);
}
/*
@@ -600,6 +894,7 @@ static void tpm_emulator_shutdown(TPMEmulator *tpm_emu)
static void tpm_emulator_inst_finalize(Object *obj)
{
TPMEmulator *tpm_emu = TPM_EMULATOR(obj);
+ TPMBlobBuffers *state_blobs = &tpm_emu->state_blobs;
tpm_emulator_shutdown(tpm_emu);
@@ -614,7 +909,13 @@ static void tpm_emulator_inst_finalize(Object *obj)
error_free(tpm_emu->migration_blocker);
}
+ tpm_sized_buffer_reset(&state_blobs->volatil);
+ tpm_sized_buffer_reset(&state_blobs->permanent);
+ tpm_sized_buffer_reset(&state_blobs->savestate);
+
qemu_mutex_destroy(&tpm_emu->mutex);
+
+ vmstate_unregister(NULL, &vmstate_tpm_emulator, obj);
}
static void tpm_emulator_class_init(ObjectClass *klass, void *data)
diff --git a/hw/tpm/tpm_tis.c b/hw/tpm/tpm_tis.c
index 2ac7e74307..12f5c9a759 100644
--- a/hw/tpm/tpm_tis.c
+++ b/hw/tpm/tpm_tis.c
@@ -894,9 +894,57 @@ static void tpm_tis_reset(DeviceState *dev)
tpm_backend_startup_tpm(s->be_driver, s->be_buffer_size);
}
+/* persistent state handling */
+
+static int tpm_tis_pre_save(void *opaque)
+{
+ TPMState *s = opaque;
+ uint8_t locty = s->active_locty;
+
+ trace_tpm_tis_pre_save(locty, s->rw_offset);
+
+ if (DEBUG_TIS) {
+ tpm_tis_dump_state(opaque, 0);
+ }
+
+ /*
+ * Synchronize with backend completion.
+ */
+ tpm_backend_finish_sync(s->be_driver);
+
+ return 0;
+}
+
+static const VMStateDescription vmstate_locty = {
+ .name = "tpm-tis/locty",
+ .version_id = 0,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(state, TPMLocality),
+ VMSTATE_UINT32(inte, TPMLocality),
+ VMSTATE_UINT32(ints, TPMLocality),
+ VMSTATE_UINT8(access, TPMLocality),
+ VMSTATE_UINT32(sts, TPMLocality),
+ VMSTATE_UINT32(iface_id, TPMLocality),
+ VMSTATE_END_OF_LIST(),
+ }
+};
+
static const VMStateDescription vmstate_tpm_tis = {
- .name = "tpm",
- .unmigratable = 1,
+ .name = "tpm-tis",
+ .version_id = 0,
+ .pre_save = tpm_tis_pre_save,
+ .fields = (VMStateField[]) {
+ VMSTATE_BUFFER(buffer, TPMState),
+ VMSTATE_UINT16(rw_offset, TPMState),
+ VMSTATE_UINT8(active_locty, TPMState),
+ VMSTATE_UINT8(aborting_locty, TPMState),
+ VMSTATE_UINT8(next_locty, TPMState),
+
+ VMSTATE_STRUCT_ARRAY(loc, TPMState, TPM_TIS_NUM_LOCALITIES, 0,
+ vmstate_locty, TPMLocality),
+
+ VMSTATE_END_OF_LIST()
+ }
};
static Property tpm_tis_properties[] = {
diff --git a/hw/tpm/trace-events b/hw/tpm/trace-events
index 9a65384088..25bee0cecf 100644
--- a/hw/tpm/trace-events
+++ b/hw/tpm/trace-events
@@ -20,13 +20,19 @@ tpm_emulator_set_locality(uint8_t locty) "setting locality to %d"
tpm_emulator_handle_request(void) "processing TPM command"
tpm_emulator_probe_caps(uint64_t caps) "capabilities: 0x%"PRIx64
tpm_emulator_set_buffer_size(uint32_t buffersize, uint32_t minsize, uint32_t maxsize) "buffer size: %u, min: %u, max: %u"
-tpm_emulator_startup_tpm(void) "startup"
+tpm_emulator_startup_tpm_resume(bool is_resume, size_t buffersize) "is_resume: %d, buffer size: %zu"
tpm_emulator_get_tpm_established_flag(uint8_t flag) "got established flag: %d"
tpm_emulator_cancel_cmd_not_supt(void) "Backend does not support CANCEL_TPM_CMD"
tpm_emulator_handle_device_opts_tpm12(void) "TPM Version 1.2"
tpm_emulator_handle_device_opts_tpm2(void) "TPM Version 2"
tpm_emulator_handle_device_opts_unspec(void) "TPM Version Unspecified"
tpm_emulator_handle_device_opts_startup_error(void) "Startup error"
+tpm_emulator_get_state_blob(uint8_t type, uint32_t size, uint32_t flags) "got state blob type %d, %u bytes, flags 0x%08x"
+tpm_emulator_set_state_blob(uint8_t type, uint32_t size, uint32_t flags) "set state blob type %d, %u bytes, flags 0x%08x"
+tpm_emulator_set_state_blobs(void) "setting state blobs"
+tpm_emulator_set_state_blobs_error(const char *msg) "error while setting state blobs: %s"
+tpm_emulator_set_state_blobs_done(void) "Done setting state blobs"
+tpm_emulator_pre_save(void) ""
tpm_emulator_inst_init(void) ""
# hw/tpm/tpm_tis.c
@@ -44,3 +50,4 @@ tpm_tis_mmio_write_locty_seized(uint8_t locty, uint8_t active) "Locality %d seiz
tpm_tis_mmio_write_init_abort(void) "Initiating abort"
tpm_tis_mmio_write_lowering_irq(void) "Lowering IRQ"
tpm_tis_mmio_write_data2send(uint32_t value, unsigned size) "Data to send to TPM: 0x%08x (size=%d)"
+tpm_tis_pre_save(uint8_t locty, uint32_t rw_offset) "locty: %d, rw_offset = %u"
diff --git a/hw/vfio/common.c b/hw/vfio/common.c
index 07ffa0ba10..8e57265edf 100644
--- a/hw/vfio/common.c
+++ b/hw/vfio/common.c
@@ -324,7 +324,8 @@ static bool vfio_get_vaddr(IOMMUTLBEntry *iotlb, void **vaddr,
*/
mr = address_space_translate(&address_space_memory,
iotlb->translated_addr,
- &xlat, &len, writable);
+ &xlat, &len, writable,
+ MEMTXATTRS_UNSPECIFIED);
if (!memory_region_is_ram(mr)) {
error_report("iommu map to non memory area %"HWADDR_PRIx"",
xlat);
diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
index b08290036d..4565b69f83 100644
--- a/hw/virtio/vhost.c
+++ b/hw/virtio/vhost.c
@@ -897,7 +897,8 @@ int vhost_device_iotlb_miss(struct vhost_dev *dev, uint64_t iova, int write)
trace_vhost_iotlb_miss(dev, 1);
iotlb = address_space_get_iotlb_entry(dev->vdev->dma_as,
- iova, write);
+ iova, write,
+ MEMTXATTRS_UNSPECIFIED);
if (iotlb.target_as != NULL) {
ret = vhost_memory_region_lookup(dev, iotlb.translated_addr,
&uaddr, &len);
diff --git a/hw/xen/xen_pt_msi.c b/hw/xen/xen_pt_msi.c
index 6d1e3bdeb4..cc514f9157 100644
--- a/hw/xen/xen_pt_msi.c
+++ b/hw/xen/xen_pt_msi.c
@@ -498,7 +498,8 @@ static uint64_t pci_msix_read(void *opaque, hwaddr addr,
}
static bool pci_msix_accepts(void *opaque, hwaddr addr,
- unsigned size, bool is_write)
+ unsigned size, bool is_write,
+ MemTxAttrs attrs)
{
return !(addr & (size - 1));
}