aboutsummaryrefslogtreecommitdiff
path: root/hw
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2022-06-16 11:25:01 -0700
committerRichard Henderson <richard.henderson@linaro.org>2022-06-16 11:25:01 -0700
commita28498b1f9591e12dcbfdf06dc8f54e15926760e (patch)
tree161861ed5de8397d2b174738f6f7de50cc7c21eb /hw
parent213fda642dd5c2c132ebb7898d96e2991d0bd891 (diff)
parent8c97e4deeca9ad791ab369d3879ebfb0267b24ca (diff)
Merge tag 'for_upstream' of git://git.kernel.org/pub/scm/virt/kvm/mst/qemu into staging
virtio,pc,pci: fixes,cleanups,features more CXL patches RSA support for crypto fixes, cleanups all over the place Signed-off-by: Michael S. Tsirkin <mst@redhat.com> # -----BEGIN PGP SIGNATURE----- # # iQFDBAABCAAtFiEEXQn9CHHI+FuUyooNKB8NuNKNVGkFAmKrYLMPHG1zdEByZWRo # YXQuY29tAAoJECgfDbjSjVRpwpwH/2IS+V7wS3q/XXPz1HndJLpUP/z+mkeu9W6+ # X1U9CJ+66Ag4eD5T/jzoN0JEjiTeET/3xM+PY5NYZCh6QTAmA7EfFZv99oNWpGd1 # +nyxOdaMDPSscOKjLfDziVTi/QYIZBtU6TeixL9whkipYCqmgbs5gXV8ynltmKyF # bIJVeaXm5yQLcCTGzKzdXf+HmTErpEGDCDHFjzrLVjICRDdekElGVwYTn+ycl7p7 # oLsWWVDgqo0p86BITlrHUXUrxTXF3wyg2B59cT7Ilbb3o+Fa2GsP+o9IXMuVoNNp # A+zrq1QZ49UO3XwkS03xDDioUQ1T/V0L4w9dEfaGvpY4Horv0HI= # =PvmT # -----END PGP SIGNATURE----- # gpg: Signature made Thu 16 Jun 2022 09:56:19 AM PDT # gpg: using RSA key 5D09FD0871C8F85B94CA8A0D281F0DB8D28D5469 # gpg: issuer "mst@redhat.com" # gpg: Good signature from "Michael S. Tsirkin <mst@kernel.org>" [undefined] # gpg: aka "Michael S. Tsirkin <mst@redhat.com>" [undefined] # gpg: WARNING: This key is not certified with a trusted signature! # gpg: There is no indication that the signature belongs to the owner. # Primary key fingerprint: 0270 606B 6F3C DF3D 0B17 0970 C350 3912 AFBE 8E67 # Subkey fingerprint: 5D09 FD08 71C8 F85B 94CA 8A0D 281F 0DB8 D28D 5469 * tag 'for_upstream' of git://git.kernel.org/pub/scm/virt/kvm/mst/qemu: acpi/erst: fix fallthrough code upon validation failure vhost: also check queue state in the vhost_dev_set_log error routine crypto: Introduce RSA algorithm virtio-iommu: Add an assert check in translate routine virtio-iommu: Use recursive lock to avoid deadlock virtio-iommu: Add bypass mode support to assigned device virtio/vhost-user: Fix wrong vhost notifier GPtrArray size docs/cxl: Add switch documentation pci-bridge/cxl_downstream: Add a CXL switch downstream port pci-bridge/cxl_upstream: Add a CXL switch upstream port Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'hw')
-rw-r--r--hw/acpi/erst.c3
-rw-r--r--hw/cxl/cxl-host.c43
-rw-r--r--hw/pci-bridge/cxl_downstream.c249
-rw-r--r--hw/pci-bridge/cxl_upstream.c216
-rw-r--r--hw/pci-bridge/meson.build2
-rw-r--r--hw/virtio/trace-events1
-rw-r--r--hw/virtio/vhost-user.c2
-rw-r--r--hw/virtio/vhost.c4
-rw-r--r--hw/virtio/virtio-crypto.c323
-rw-r--r--hw/virtio/virtio-iommu.c135
10 files changed, 899 insertions, 79 deletions
diff --git a/hw/acpi/erst.c b/hw/acpi/erst.c
index de509c2b48..df856b2669 100644
--- a/hw/acpi/erst.c
+++ b/hw/acpi/erst.c
@@ -440,6 +440,7 @@ static void check_erst_backend_storage(ERSTDeviceState *s, Error **errp)
(record_size >= 4096) /* PAGE_SIZE */
)) {
error_setg(errp, "ERST record_size %u is invalid", record_size);
+ return;
}
/* Validity check header */
@@ -450,6 +451,7 @@ static void check_erst_backend_storage(ERSTDeviceState *s, Error **errp)
(le16_to_cpu(header->reserved) == 0)
)) {
error_setg(errp, "ERST backend storage header is invalid");
+ return;
}
/* Check storage_size against record_size */
@@ -457,6 +459,7 @@ static void check_erst_backend_storage(ERSTDeviceState *s, Error **errp)
(record_size > s->storage_size)) {
error_setg(errp, "ACPI ERST requires storage size be multiple of "
"record size (%uKiB)", record_size);
+ return;
}
/* Compute offset of first and last record storage slot */
diff --git a/hw/cxl/cxl-host.c b/hw/cxl/cxl-host.c
index efa14908d8..483d8eb13f 100644
--- a/hw/cxl/cxl-host.c
+++ b/hw/cxl/cxl-host.c
@@ -129,8 +129,9 @@ static bool cxl_hdm_find_target(uint32_t *cache_mem, hwaddr addr,
static PCIDevice *cxl_cfmws_find_device(CXLFixedWindow *fw, hwaddr addr)
{
- CXLComponentState *hb_cstate;
+ CXLComponentState *hb_cstate, *usp_cstate;
PCIHostState *hb;
+ CXLUpstreamPort *usp;
int rb_index;
uint32_t *cache_mem;
uint8_t target;
@@ -164,8 +165,46 @@ static PCIDevice *cxl_cfmws_find_device(CXLFixedWindow *fw, hwaddr addr)
}
d = pci_bridge_get_sec_bus(PCI_BRIDGE(rp))->devices[0];
+ if (!d) {
+ return NULL;
+ }
+
+ if (object_dynamic_cast(OBJECT(d), TYPE_CXL_TYPE3)) {
+ return d;
+ }
+
+ /*
+ * Could also be a switch. Note only one level of switching currently
+ * supported.
+ */
+ if (!object_dynamic_cast(OBJECT(d), TYPE_CXL_USP)) {
+ return NULL;
+ }
+ usp = CXL_USP(d);
+
+ usp_cstate = cxl_usp_to_cstate(usp);
+ if (!usp_cstate) {
+ return NULL;
+ }
+
+ cache_mem = usp_cstate->crb.cache_mem_registers;
+
+ target_found = cxl_hdm_find_target(cache_mem, addr, &target);
+ if (!target_found) {
+ return NULL;
+ }
+
+ d = pcie_find_port_by_pn(&PCI_BRIDGE(d)->sec_bus, target);
+ if (!d) {
+ return NULL;
+ }
+
+ d = pci_bridge_get_sec_bus(PCI_BRIDGE(d))->devices[0];
+ if (!d) {
+ return NULL;
+ }
- if (!d || !object_dynamic_cast(OBJECT(d), TYPE_CXL_TYPE3)) {
+ if (!object_dynamic_cast(OBJECT(d), TYPE_CXL_TYPE3)) {
return NULL;
}
diff --git a/hw/pci-bridge/cxl_downstream.c b/hw/pci-bridge/cxl_downstream.c
new file mode 100644
index 0000000000..a361e519d0
--- /dev/null
+++ b/hw/pci-bridge/cxl_downstream.c
@@ -0,0 +1,249 @@
+/*
+ * Emulated CXL Switch Downstream Port
+ *
+ * Copyright (c) 2022 Huawei Technologies.
+ *
+ * Based on xio3130_downstream.c
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/log.h"
+#include "hw/pci/msi.h"
+#include "hw/pci/pcie.h"
+#include "hw/pci/pcie_port.h"
+#include "qapi/error.h"
+
+typedef struct CXLDownStreamPort {
+ /*< private >*/
+ PCIESlot parent_obj;
+
+ /*< public >*/
+ CXLComponentState cxl_cstate;
+} CXLDownstreamPort;
+
+#define TYPE_CXL_DSP "cxl-downstream"
+DECLARE_INSTANCE_CHECKER(CXLDownstreamPort, CXL_DSP, TYPE_CXL_DSP)
+
+#define CXL_DOWNSTREAM_PORT_MSI_OFFSET 0x70
+#define CXL_DOWNSTREAM_PORT_MSI_NR_VECTOR 1
+#define CXL_DOWNSTREAM_PORT_EXP_OFFSET 0x90
+#define CXL_DOWNSTREAM_PORT_AER_OFFSET 0x100
+#define CXL_DOWNSTREAM_PORT_DVSEC_OFFSET \
+ (CXL_DOWNSTREAM_PORT_AER_OFFSET + PCI_ERR_SIZEOF)
+
+static void latch_registers(CXLDownstreamPort *dsp)
+{
+ uint32_t *reg_state = dsp->cxl_cstate.crb.cache_mem_registers;
+ uint32_t *write_msk = dsp->cxl_cstate.crb.cache_mem_regs_write_mask;
+
+ cxl_component_register_init_common(reg_state, write_msk,
+ CXL2_DOWNSTREAM_PORT);
+}
+
+/* TODO: Look at sharing this code acorss all CXL port types */
+static void cxl_dsp_dvsec_write_config(PCIDevice *dev, uint32_t addr,
+ uint32_t val, int len)
+{
+ CXLDownstreamPort *dsp = CXL_DSP(dev);
+ CXLComponentState *cxl_cstate = &dsp->cxl_cstate;
+
+ if (range_contains(&cxl_cstate->dvsecs[EXTENSIONS_PORT_DVSEC], addr)) {
+ uint8_t *reg = &dev->config[addr];
+ addr -= cxl_cstate->dvsecs[EXTENSIONS_PORT_DVSEC].lob;
+ if (addr == PORT_CONTROL_OFFSET) {
+ if (pci_get_word(reg) & PORT_CONTROL_UNMASK_SBR) {
+ /* unmask SBR */
+ qemu_log_mask(LOG_UNIMP, "SBR mask control is not supported\n");
+ }
+ if (pci_get_word(reg) & PORT_CONTROL_ALT_MEMID_EN) {
+ /* Alt Memory & ID Space Enable */
+ qemu_log_mask(LOG_UNIMP,
+ "Alt Memory & ID space is not supported\n");
+
+ }
+ }
+ }
+}
+
+static void cxl_dsp_config_write(PCIDevice *d, uint32_t address,
+ uint32_t val, int len)
+{
+ uint16_t slt_ctl, slt_sta;
+
+ pcie_cap_slot_get(d, &slt_ctl, &slt_sta);
+ pci_bridge_write_config(d, address, val, len);
+ pcie_cap_flr_write_config(d, address, val, len);
+ pcie_cap_slot_write_config(d, slt_ctl, slt_sta, address, val, len);
+ pcie_aer_write_config(d, address, val, len);
+
+ cxl_dsp_dvsec_write_config(d, address, val, len);
+}
+
+static void cxl_dsp_reset(DeviceState *qdev)
+{
+ PCIDevice *d = PCI_DEVICE(qdev);
+ CXLDownstreamPort *dsp = CXL_DSP(qdev);
+
+ pcie_cap_deverr_reset(d);
+ pcie_cap_slot_reset(d);
+ pcie_cap_arifwd_reset(d);
+ pci_bridge_reset(qdev);
+
+ latch_registers(dsp);
+}
+
+static void build_dvsecs(CXLComponentState *cxl)
+{
+ uint8_t *dvsec;
+
+ dvsec = (uint8_t *)&(CXLDVSECPortExtensions){ 0 };
+ cxl_component_create_dvsec(cxl, CXL2_DOWNSTREAM_PORT,
+ EXTENSIONS_PORT_DVSEC_LENGTH,
+ EXTENSIONS_PORT_DVSEC,
+ EXTENSIONS_PORT_DVSEC_REVID, dvsec);
+
+ dvsec = (uint8_t *)&(CXLDVSECPortFlexBus){
+ .cap = 0x27, /* Cache, IO, Mem, non-MLD */
+ .ctrl = 0x02, /* IO always enabled */
+ .status = 0x26, /* same */
+ .rcvd_mod_ts_data_phase1 = 0xef, /* WTF? */
+ };
+ cxl_component_create_dvsec(cxl, CXL2_DOWNSTREAM_PORT,
+ PCIE_FLEXBUS_PORT_DVSEC_LENGTH_2_0,
+ PCIE_FLEXBUS_PORT_DVSEC,
+ PCIE_FLEXBUS_PORT_DVSEC_REVID_2_0, dvsec);
+
+ dvsec = (uint8_t *)&(CXLDVSECPortGPF){
+ .rsvd = 0,
+ .phase1_ctrl = 1, /* 1μs timeout */
+ .phase2_ctrl = 1, /* 1μs timeout */
+ };
+ cxl_component_create_dvsec(cxl, CXL2_DOWNSTREAM_PORT,
+ GPF_PORT_DVSEC_LENGTH, GPF_PORT_DVSEC,
+ GPF_PORT_DVSEC_REVID, dvsec);
+
+ dvsec = (uint8_t *)&(CXLDVSECRegisterLocator){
+ .rsvd = 0,
+ .reg0_base_lo = RBI_COMPONENT_REG | CXL_COMPONENT_REG_BAR_IDX,
+ .reg0_base_hi = 0,
+ };
+ cxl_component_create_dvsec(cxl, CXL2_DOWNSTREAM_PORT,
+ REG_LOC_DVSEC_LENGTH, REG_LOC_DVSEC,
+ REG_LOC_DVSEC_REVID, dvsec);
+}
+
+static void cxl_dsp_realize(PCIDevice *d, Error **errp)
+{
+ PCIEPort *p = PCIE_PORT(d);
+ PCIESlot *s = PCIE_SLOT(d);
+ CXLDownstreamPort *dsp = CXL_DSP(d);
+ CXLComponentState *cxl_cstate = &dsp->cxl_cstate;
+ ComponentRegisters *cregs = &cxl_cstate->crb;
+ MemoryRegion *component_bar = &cregs->component_registers;
+ int rc;
+
+ pci_bridge_initfn(d, TYPE_PCIE_BUS);
+ pcie_port_init_reg(d);
+
+ rc = msi_init(d, CXL_DOWNSTREAM_PORT_MSI_OFFSET,
+ CXL_DOWNSTREAM_PORT_MSI_NR_VECTOR,
+ true, true, errp);
+ if (rc) {
+ assert(rc == -ENOTSUP);
+ goto err_bridge;
+ }
+
+ rc = pcie_cap_init(d, CXL_DOWNSTREAM_PORT_EXP_OFFSET,
+ PCI_EXP_TYPE_DOWNSTREAM, p->port,
+ errp);
+ if (rc < 0) {
+ goto err_msi;
+ }
+
+ pcie_cap_flr_init(d);
+ pcie_cap_deverr_init(d);
+ pcie_cap_slot_init(d, s);
+ pcie_cap_arifwd_init(d);
+
+ pcie_chassis_create(s->chassis);
+ rc = pcie_chassis_add_slot(s);
+ if (rc < 0) {
+ error_setg(errp, "Can't add chassis slot, error %d", rc);
+ goto err_pcie_cap;
+ }
+
+ rc = pcie_aer_init(d, PCI_ERR_VER, CXL_DOWNSTREAM_PORT_AER_OFFSET,
+ PCI_ERR_SIZEOF, errp);
+ if (rc < 0) {
+ goto err_chassis;
+ }
+
+ cxl_cstate->dvsec_offset = CXL_DOWNSTREAM_PORT_DVSEC_OFFSET;
+ cxl_cstate->pdev = d;
+ build_dvsecs(cxl_cstate);
+ cxl_component_register_block_init(OBJECT(d), cxl_cstate, TYPE_CXL_DSP);
+ pci_register_bar(d, CXL_COMPONENT_REG_BAR_IDX,
+ PCI_BASE_ADDRESS_SPACE_MEMORY |
+ PCI_BASE_ADDRESS_MEM_TYPE_64,
+ component_bar);
+
+ return;
+
+ err_chassis:
+ pcie_chassis_del_slot(s);
+ err_pcie_cap:
+ pcie_cap_exit(d);
+ err_msi:
+ msi_uninit(d);
+ err_bridge:
+ pci_bridge_exitfn(d);
+}
+
+static void cxl_dsp_exitfn(PCIDevice *d)
+{
+ PCIESlot *s = PCIE_SLOT(d);
+
+ pcie_aer_exit(d);
+ pcie_chassis_del_slot(s);
+ pcie_cap_exit(d);
+ msi_uninit(d);
+ pci_bridge_exitfn(d);
+}
+
+static void cxl_dsp_class_init(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PCIDeviceClass *k = PCI_DEVICE_CLASS(oc);
+
+ k->is_bridge = true;
+ k->config_write = cxl_dsp_config_write;
+ k->realize = cxl_dsp_realize;
+ k->exit = cxl_dsp_exitfn;
+ k->vendor_id = 0x19e5; /* Huawei */
+ k->device_id = 0xa129; /* Emulated CXL Switch Downstream Port */
+ k->revision = 0;
+ set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
+ dc->desc = "CXL Switch Downstream Port";
+ dc->reset = cxl_dsp_reset;
+}
+
+static const TypeInfo cxl_dsp_info = {
+ .name = TYPE_CXL_DSP,
+ .instance_size = sizeof(CXLDownstreamPort),
+ .parent = TYPE_PCIE_SLOT,
+ .class_init = cxl_dsp_class_init,
+ .interfaces = (InterfaceInfo[]) {
+ { INTERFACE_PCIE_DEVICE },
+ { INTERFACE_CXL_DEVICE },
+ { }
+ },
+};
+
+static void cxl_dsp_register_type(void)
+{
+ type_register_static(&cxl_dsp_info);
+}
+
+type_init(cxl_dsp_register_type);
diff --git a/hw/pci-bridge/cxl_upstream.c b/hw/pci-bridge/cxl_upstream.c
new file mode 100644
index 0000000000..a83a3e81e4
--- /dev/null
+++ b/hw/pci-bridge/cxl_upstream.c
@@ -0,0 +1,216 @@
+/*
+ * Emulated CXL Switch Upstream Port
+ *
+ * Copyright (c) 2022 Huawei Technologies.
+ *
+ * Based on xio3130_upstream.c
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/log.h"
+#include "hw/pci/msi.h"
+#include "hw/pci/pcie.h"
+#include "hw/pci/pcie_port.h"
+
+#define CXL_UPSTREAM_PORT_MSI_NR_VECTOR 1
+
+#define CXL_UPSTREAM_PORT_MSI_OFFSET 0x70
+#define CXL_UPSTREAM_PORT_PCIE_CAP_OFFSET 0x90
+#define CXL_UPSTREAM_PORT_AER_OFFSET 0x100
+#define CXL_UPSTREAM_PORT_DVSEC_OFFSET \
+ (CXL_UPSTREAM_PORT_AER_OFFSET + PCI_ERR_SIZEOF)
+
+typedef struct CXLUpstreamPort {
+ /*< private >*/
+ PCIEPort parent_obj;
+
+ /*< public >*/
+ CXLComponentState cxl_cstate;
+} CXLUpstreamPort;
+
+CXLComponentState *cxl_usp_to_cstate(CXLUpstreamPort *usp)
+{
+ return &usp->cxl_cstate;
+}
+
+static void cxl_usp_dvsec_write_config(PCIDevice *dev, uint32_t addr,
+ uint32_t val, int len)
+{
+ CXLUpstreamPort *usp = CXL_USP(dev);
+
+ if (range_contains(&usp->cxl_cstate.dvsecs[EXTENSIONS_PORT_DVSEC], addr)) {
+ uint8_t *reg = &dev->config[addr];
+ addr -= usp->cxl_cstate.dvsecs[EXTENSIONS_PORT_DVSEC].lob;
+ if (addr == PORT_CONTROL_OFFSET) {
+ if (pci_get_word(reg) & PORT_CONTROL_UNMASK_SBR) {
+ /* unmask SBR */
+ qemu_log_mask(LOG_UNIMP, "SBR mask control is not supported\n");
+ }
+ if (pci_get_word(reg) & PORT_CONTROL_ALT_MEMID_EN) {
+ /* Alt Memory & ID Space Enable */
+ qemu_log_mask(LOG_UNIMP,
+ "Alt Memory & ID space is not supported\n");
+ }
+ }
+ }
+}
+
+static void cxl_usp_write_config(PCIDevice *d, uint32_t address,
+ uint32_t val, int len)
+{
+ pci_bridge_write_config(d, address, val, len);
+ pcie_cap_flr_write_config(d, address, val, len);
+ pcie_aer_write_config(d, address, val, len);
+
+ cxl_usp_dvsec_write_config(d, address, val, len);
+}
+
+static void latch_registers(CXLUpstreamPort *usp)
+{
+ uint32_t *reg_state = usp->cxl_cstate.crb.cache_mem_registers;
+ uint32_t *write_msk = usp->cxl_cstate.crb.cache_mem_regs_write_mask;
+
+ cxl_component_register_init_common(reg_state, write_msk,
+ CXL2_UPSTREAM_PORT);
+ ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, TARGET_COUNT, 8);
+}
+
+static void cxl_usp_reset(DeviceState *qdev)
+{
+ PCIDevice *d = PCI_DEVICE(qdev);
+ CXLUpstreamPort *usp = CXL_USP(qdev);
+
+ pci_bridge_reset(qdev);
+ pcie_cap_deverr_reset(d);
+ latch_registers(usp);
+}
+
+static void build_dvsecs(CXLComponentState *cxl)
+{
+ uint8_t *dvsec;
+
+ dvsec = (uint8_t *)&(CXLDVSECPortExtensions){
+ .status = 0x1, /* Port Power Management Init Complete */
+ };
+ cxl_component_create_dvsec(cxl, CXL2_UPSTREAM_PORT,
+ EXTENSIONS_PORT_DVSEC_LENGTH,
+ EXTENSIONS_PORT_DVSEC,
+ EXTENSIONS_PORT_DVSEC_REVID, dvsec);
+ dvsec = (uint8_t *)&(CXLDVSECPortFlexBus){
+ .cap = 0x27, /* Cache, IO, Mem, non-MLD */
+ .ctrl = 0x27, /* Cache, IO, Mem */
+ .status = 0x26, /* same */
+ .rcvd_mod_ts_data_phase1 = 0xef, /* WTF? */
+ };
+ cxl_component_create_dvsec(cxl, CXL2_UPSTREAM_PORT,
+ PCIE_FLEXBUS_PORT_DVSEC_LENGTH_2_0,
+ PCIE_FLEXBUS_PORT_DVSEC,
+ PCIE_FLEXBUS_PORT_DVSEC_REVID_2_0, dvsec);
+
+ dvsec = (uint8_t *)&(CXLDVSECRegisterLocator){
+ .rsvd = 0,
+ .reg0_base_lo = RBI_COMPONENT_REG | CXL_COMPONENT_REG_BAR_IDX,
+ .reg0_base_hi = 0,
+ };
+ cxl_component_create_dvsec(cxl, CXL2_UPSTREAM_PORT,
+ REG_LOC_DVSEC_LENGTH, REG_LOC_DVSEC,
+ REG_LOC_DVSEC_REVID, dvsec);
+}
+
+static void cxl_usp_realize(PCIDevice *d, Error **errp)
+{
+ PCIEPort *p = PCIE_PORT(d);
+ CXLUpstreamPort *usp = CXL_USP(d);
+ CXLComponentState *cxl_cstate = &usp->cxl_cstate;
+ ComponentRegisters *cregs = &cxl_cstate->crb;
+ MemoryRegion *component_bar = &cregs->component_registers;
+ int rc;
+
+ pci_bridge_initfn(d, TYPE_PCIE_BUS);
+ pcie_port_init_reg(d);
+
+ rc = msi_init(d, CXL_UPSTREAM_PORT_MSI_OFFSET,
+ CXL_UPSTREAM_PORT_MSI_NR_VECTOR, true, true, errp);
+ if (rc) {
+ assert(rc == -ENOTSUP);
+ goto err_bridge;
+ }
+
+ rc = pcie_cap_init(d, CXL_UPSTREAM_PORT_PCIE_CAP_OFFSET,
+ PCI_EXP_TYPE_UPSTREAM, p->port, errp);
+ if (rc < 0) {
+ goto err_msi;
+ }
+
+ pcie_cap_flr_init(d);
+ pcie_cap_deverr_init(d);
+ rc = pcie_aer_init(d, PCI_ERR_VER, CXL_UPSTREAM_PORT_AER_OFFSET,
+ PCI_ERR_SIZEOF, errp);
+ if (rc) {
+ goto err_cap;
+ }
+
+ cxl_cstate->dvsec_offset = CXL_UPSTREAM_PORT_DVSEC_OFFSET;
+ cxl_cstate->pdev = d;
+ build_dvsecs(cxl_cstate);
+ cxl_component_register_block_init(OBJECT(d), cxl_cstate, TYPE_CXL_USP);
+ pci_register_bar(d, CXL_COMPONENT_REG_BAR_IDX,
+ PCI_BASE_ADDRESS_SPACE_MEMORY |
+ PCI_BASE_ADDRESS_MEM_TYPE_64,
+ component_bar);
+
+ return;
+
+err_cap:
+ pcie_cap_exit(d);
+err_msi:
+ msi_uninit(d);
+err_bridge:
+ pci_bridge_exitfn(d);
+}
+
+static void cxl_usp_exitfn(PCIDevice *d)
+{
+ pcie_aer_exit(d);
+ pcie_cap_exit(d);
+ msi_uninit(d);
+ pci_bridge_exitfn(d);
+}
+
+static void cxl_upstream_class_init(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PCIDeviceClass *k = PCI_DEVICE_CLASS(oc);
+
+ k->is_bridge = true;
+ k->config_write = cxl_usp_write_config;
+ k->realize = cxl_usp_realize;
+ k->exit = cxl_usp_exitfn;
+ k->vendor_id = 0x19e5; /* Huawei */
+ k->device_id = 0xa128; /* Emulated CXL Switch Upstream Port */
+ k->revision = 0;
+ set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
+ dc->desc = "CXL Switch Upstream Port";
+ dc->reset = cxl_usp_reset;
+}
+
+static const TypeInfo cxl_usp_info = {
+ .name = TYPE_CXL_USP,
+ .parent = TYPE_PCIE_PORT,
+ .instance_size = sizeof(CXLUpstreamPort),
+ .class_init = cxl_upstream_class_init,
+ .interfaces = (InterfaceInfo[]) {
+ { INTERFACE_PCIE_DEVICE },
+ { INTERFACE_CXL_DEVICE },
+ { }
+ },
+};
+
+static void cxl_usp_register_type(void)
+{
+ type_register_static(&cxl_usp_info);
+}
+
+type_init(cxl_usp_register_type);
diff --git a/hw/pci-bridge/meson.build b/hw/pci-bridge/meson.build
index fdbe2e07c5..243ceeda50 100644
--- a/hw/pci-bridge/meson.build
+++ b/hw/pci-bridge/meson.build
@@ -6,7 +6,7 @@ pci_ss.add(when: 'CONFIG_PCIE_PORT', if_true: files('pcie_root_port.c', 'gen_pci
pci_ss.add(when: 'CONFIG_PXB', if_true: files('pci_expander_bridge.c'),
if_false: files('pci_expander_bridge_stubs.c'))
pci_ss.add(when: 'CONFIG_XIO3130', if_true: files('xio3130_upstream.c', 'xio3130_downstream.c'))
-pci_ss.add(when: 'CONFIG_CXL', if_true: files('cxl_root_port.c'))
+pci_ss.add(when: 'CONFIG_CXL', if_true: files('cxl_root_port.c', 'cxl_upstream.c', 'cxl_downstream.c'))
# NewWorld PowerMac
pci_ss.add(when: 'CONFIG_DEC_PCI', if_true: files('dec.c'))
diff --git a/hw/virtio/trace-events b/hw/virtio/trace-events
index ab8e095b73..20af2e7ebd 100644
--- a/hw/virtio/trace-events
+++ b/hw/virtio/trace-events
@@ -124,6 +124,7 @@ virtio_iommu_remap(const char *name, uint64_t virt_start, uint64_t virt_end, uin
virtio_iommu_set_page_size_mask(const char *name, uint64_t old, uint64_t new) "mr=%s old_mask=0x%"PRIx64" new_mask=0x%"PRIx64
virtio_iommu_notify_flag_add(const char *name) "add notifier to mr %s"
virtio_iommu_notify_flag_del(const char *name) "del notifier from mr %s"
+virtio_iommu_switch_address_space(uint8_t bus, uint8_t slot, uint8_t fn, bool on) "Device %02x:%02x.%x switching address space (iommu enabled=%d)"
# virtio-mem.c
virtio_mem_send_response(uint16_t type) "type=%" PRIu16
diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
index 0594178224..4b9be26e84 100644
--- a/hw/virtio/vhost-user.c
+++ b/hw/virtio/vhost-user.c
@@ -1525,7 +1525,7 @@ static VhostUserHostNotifier *fetch_or_create_notifier(VhostUserState *u,
{
VhostUserHostNotifier *n = NULL;
if (idx >= u->notifiers->len) {
- g_ptr_array_set_size(u->notifiers, idx);
+ g_ptr_array_set_size(u->notifiers, idx + 1);
}
n = g_ptr_array_index(u->notifiers, idx);
diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
index dd3263df56..6c41fa13e3 100644
--- a/hw/virtio/vhost.c
+++ b/hw/virtio/vhost.c
@@ -886,6 +886,10 @@ static int vhost_dev_set_log(struct vhost_dev *dev, bool enable_log)
err_vq:
for (; i >= 0; --i) {
idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i);
+ addr = virtio_queue_get_desc_addr(dev->vdev, idx);
+ if (!addr) {
+ continue;
+ }
vhost_virtqueue_set_addr(dev, dev->vqs + i, idx,
dev->log_enabled);
}
diff --git a/hw/virtio/virtio-crypto.c b/hw/virtio/virtio-crypto.c
index c3829e7498..c1243c3f93 100644
--- a/hw/virtio/virtio-crypto.c
+++ b/hw/virtio/virtio-crypto.c
@@ -83,7 +83,8 @@ virtio_crypto_create_sym_session(VirtIOCrypto *vcrypto,
struct iovec *iov, unsigned int out_num)
{
VirtIODevice *vdev = VIRTIO_DEVICE(vcrypto);
- CryptoDevBackendSymSessionInfo info;
+ CryptoDevBackendSessionInfo info;
+ CryptoDevBackendSymSessionInfo *sym_info;
int64_t session_id;
int queue_index;
uint32_t op_type;
@@ -92,11 +93,13 @@ virtio_crypto_create_sym_session(VirtIOCrypto *vcrypto,
memset(&info, 0, sizeof(info));
op_type = ldl_le_p(&sess_req->op_type);
- info.op_type = op_type;
info.op_code = opcode;
+ sym_info = &info.u.sym_sess_info;
+ sym_info->op_type = op_type;
+
if (op_type == VIRTIO_CRYPTO_SYM_OP_CIPHER) {
- ret = virtio_crypto_cipher_session_helper(vdev, &info,
+ ret = virtio_crypto_cipher_session_helper(vdev, sym_info,
&sess_req->u.cipher.para,
&iov, &out_num);
if (ret < 0) {
@@ -105,47 +108,47 @@ virtio_crypto_create_sym_session(VirtIOCrypto *vcrypto,
} else if (op_type == VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING) {
size_t s;
/* cipher part */
- ret = virtio_crypto_cipher_session_helper(vdev, &info,
+ ret = virtio_crypto_cipher_session_helper(vdev, sym_info,
&sess_req->u.chain.para.cipher_param,
&iov, &out_num);
if (ret < 0) {
goto err;
}
/* hash part */
- info.alg_chain_order = ldl_le_p(
+ sym_info->alg_chain_order = ldl_le_p(
&sess_req->u.chain.para.alg_chain_order);
- info.add_len = ldl_le_p(&sess_req->u.chain.para.aad_len);
- info.hash_mode = ldl_le_p(&sess_req->u.chain.para.hash_mode);
- if (info.hash_mode == VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH) {
- info.hash_alg = ldl_le_p(&sess_req->u.chain.para.u.mac_param.algo);
- info.auth_key_len = ldl_le_p(
+ sym_info->add_len = ldl_le_p(&sess_req->u.chain.para.aad_len);
+ sym_info->hash_mode = ldl_le_p(&sess_req->u.chain.para.hash_mode);
+ if (sym_info->hash_mode == VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH) {
+ sym_info->hash_alg =
+ ldl_le_p(&sess_req->u.chain.para.u.mac_param.algo);
+ sym_info->auth_key_len = ldl_le_p(
&sess_req->u.chain.para.u.mac_param.auth_key_len);
- info.hash_result_len = ldl_le_p(
+ sym_info->hash_result_len = ldl_le_p(
&sess_req->u.chain.para.u.mac_param.hash_result_len);
- if (info.auth_key_len > vcrypto->conf.max_auth_key_len) {
+ if (sym_info->auth_key_len > vcrypto->conf.max_auth_key_len) {
error_report("virtio-crypto length of auth key is too big: %u",
- info.auth_key_len);
+ sym_info->auth_key_len);
ret = -VIRTIO_CRYPTO_ERR;
goto err;
}
/* get auth key */
- if (info.auth_key_len > 0) {
- DPRINTF("auth_keylen=%" PRIu32 "\n", info.auth_key_len);
- info.auth_key = g_malloc(info.auth_key_len);
- s = iov_to_buf(iov, out_num, 0, info.auth_key,
- info.auth_key_len);
- if (unlikely(s != info.auth_key_len)) {
+ if (sym_info->auth_key_len > 0) {
+ sym_info->auth_key = g_malloc(sym_info->auth_key_len);
+ s = iov_to_buf(iov, out_num, 0, sym_info->auth_key,
+ sym_info->auth_key_len);
+ if (unlikely(s != sym_info->auth_key_len)) {
virtio_error(vdev,
"virtio-crypto authenticated key incorrect");
ret = -EFAULT;
goto err;
}
- iov_discard_front(&iov, &out_num, info.auth_key_len);
+ iov_discard_front(&iov, &out_num, sym_info->auth_key_len);
}
- } else if (info.hash_mode == VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN) {
- info.hash_alg = ldl_le_p(
+ } else if (sym_info->hash_mode == VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN) {
+ sym_info->hash_alg = ldl_le_p(
&sess_req->u.chain.para.u.hash_param.algo);
- info.hash_result_len = ldl_le_p(
+ sym_info->hash_result_len = ldl_le_p(
&sess_req->u.chain.para.u.hash_param.hash_result_len);
} else {
/* VIRTIO_CRYPTO_SYM_HASH_MODE_NESTED */
@@ -161,13 +164,10 @@ virtio_crypto_create_sym_session(VirtIOCrypto *vcrypto,
}
queue_index = virtio_crypto_vq2q(queue_id);
- session_id = cryptodev_backend_sym_create_session(
+ session_id = cryptodev_backend_create_session(
vcrypto->cryptodev,
&info, queue_index, &local_err);
if (session_id >= 0) {
- DPRINTF("create session_id=%" PRIu64 " successfully\n",
- session_id);
-
ret = session_id;
} else {
if (local_err) {
@@ -177,11 +177,78 @@ virtio_crypto_create_sym_session(VirtIOCrypto *vcrypto,
}
err:
- g_free(info.cipher_key);
- g_free(info.auth_key);
+ g_free(sym_info->cipher_key);
+ g_free(sym_info->auth_key);
return ret;
}
+static int64_t
+virtio_crypto_create_asym_session(VirtIOCrypto *vcrypto,
+ struct virtio_crypto_akcipher_create_session_req *sess_req,
+ uint32_t queue_id, uint32_t opcode,
+ struct iovec *iov, unsigned int out_num)
+{
+ VirtIODevice *vdev = VIRTIO_DEVICE(vcrypto);
+ CryptoDevBackendSessionInfo info = {0};
+ CryptoDevBackendAsymSessionInfo *asym_info;
+ int64_t session_id;
+ int queue_index;
+ uint32_t algo, keytype, keylen;
+ g_autofree uint8_t *key = NULL;
+ Error *local_err = NULL;
+
+ algo = ldl_le_p(&sess_req->para.algo);
+ keytype = ldl_le_p(&sess_req->para.keytype);
+ keylen = ldl_le_p(&sess_req->para.keylen);
+
+ if ((keytype != VIRTIO_CRYPTO_AKCIPHER_KEY_TYPE_PUBLIC)
+ && (keytype != VIRTIO_CRYPTO_AKCIPHER_KEY_TYPE_PRIVATE)) {
+ error_report("unsupported asym keytype: %d", keytype);
+ return -VIRTIO_CRYPTO_NOTSUPP;
+ }
+
+ if (keylen) {
+ key = g_malloc(keylen);
+ if (iov_to_buf(iov, out_num, 0, key, keylen) != keylen) {
+ virtio_error(vdev, "virtio-crypto asym key incorrect");
+ return -EFAULT;
+ }
+ iov_discard_front(&iov, &out_num, keylen);
+ }
+
+ info.op_code = opcode;
+ asym_info = &info.u.asym_sess_info;
+ asym_info->algo = algo;
+ asym_info->keytype = keytype;
+ asym_info->keylen = keylen;
+ asym_info->key = key;
+ switch (asym_info->algo) {
+ case VIRTIO_CRYPTO_AKCIPHER_RSA:
+ asym_info->u.rsa.padding_algo =
+ ldl_le_p(&sess_req->para.u.rsa.padding_algo);
+ asym_info->u.rsa.hash_algo =
+ ldl_le_p(&sess_req->para.u.rsa.hash_algo);
+ break;
+
+ /* TODO DSA&ECDSA handling */
+
+ default:
+ return -VIRTIO_CRYPTO_ERR;
+ }
+
+ queue_index = virtio_crypto_vq2q(queue_id);
+ session_id = cryptodev_backend_create_session(vcrypto->cryptodev, &info,
+ queue_index, &local_err);
+ if (session_id < 0) {
+ if (local_err) {
+ error_report_err(local_err);
+ }
+ return -VIRTIO_CRYPTO_ERR;
+ }
+
+ return session_id;
+}
+
static uint8_t
virtio_crypto_handle_close_session(VirtIOCrypto *vcrypto,
struct virtio_crypto_destroy_session_req *close_sess_req,
@@ -195,7 +262,7 @@ virtio_crypto_handle_close_session(VirtIOCrypto *vcrypto,
session_id = ldq_le_p(&close_sess_req->session_id);
DPRINTF("close session, id=%" PRIu64 "\n", session_id);
- ret = cryptodev_backend_sym_close_session(
+ ret = cryptodev_backend_close_session(
vcrypto->cryptodev, session_id, queue_id, &local_err);
if (ret == 0) {
status = VIRTIO_CRYPTO_OK;
@@ -260,13 +327,22 @@ static void virtio_crypto_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
opcode = ldl_le_p(&ctrl.header.opcode);
queue_id = ldl_le_p(&ctrl.header.queue_id);
+ memset(&input, 0, sizeof(input));
switch (opcode) {
case VIRTIO_CRYPTO_CIPHER_CREATE_SESSION:
- memset(&input, 0, sizeof(input));
session_id = virtio_crypto_create_sym_session(vcrypto,
&ctrl.u.sym_create_session,
queue_id, opcode,
out_iov, out_num);
+ goto check_session;
+
+ case VIRTIO_CRYPTO_AKCIPHER_CREATE_SESSION:
+ session_id = virtio_crypto_create_asym_session(vcrypto,
+ &ctrl.u.akcipher_create_session,
+ queue_id, opcode,
+ out_iov, out_num);
+
+check_session:
/* Serious errors, need to reset virtio crypto device */
if (session_id == -EFAULT) {
virtqueue_detach_element(vq, elem, 0);
@@ -290,10 +366,12 @@ static void virtio_crypto_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
virtqueue_push(vq, elem, sizeof(input));
virtio_notify(vdev, vq);
break;
+
case VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION:
case VIRTIO_CRYPTO_HASH_DESTROY_SESSION:
case VIRTIO_CRYPTO_MAC_DESTROY_SESSION:
case VIRTIO_CRYPTO_AEAD_DESTROY_SESSION:
+ case VIRTIO_CRYPTO_AKCIPHER_DESTROY_SESSION:
status = virtio_crypto_handle_close_session(vcrypto,
&ctrl.u.destroy_session, queue_id);
/* The status only occupy one byte, we can directly use it */
@@ -311,7 +389,6 @@ static void virtio_crypto_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
case VIRTIO_CRYPTO_AEAD_CREATE_SESSION:
default:
error_report("virtio-crypto unsupported ctrl opcode: %d", opcode);
- memset(&input, 0, sizeof(input));
stl_le_p(&input.status, VIRTIO_CRYPTO_NOTSUPP);
s = iov_from_buf(in_iov, in_num, 0, &input, sizeof(input));
if (unlikely(s != sizeof(input))) {
@@ -339,28 +416,39 @@ static void virtio_crypto_init_request(VirtIOCrypto *vcrypto, VirtQueue *vq,
req->in_num = 0;
req->in_len = 0;
req->flags = CRYPTODEV_BACKEND_ALG__MAX;
- req->u.sym_op_info = NULL;
+ memset(&req->op_info, 0x00, sizeof(req->op_info));
}
static void virtio_crypto_free_request(VirtIOCryptoReq *req)
{
- if (req) {
- if (req->flags == CRYPTODEV_BACKEND_ALG_SYM) {
- size_t max_len;
- CryptoDevBackendSymOpInfo *op_info = req->u.sym_op_info;
-
- max_len = op_info->iv_len +
- op_info->aad_len +
- op_info->src_len +
- op_info->dst_len +
- op_info->digest_result_len;
-
- /* Zeroize and free request data structure */
- memset(op_info, 0, sizeof(*op_info) + max_len);
+ if (!req) {
+ return;
+ }
+
+ if (req->flags == CRYPTODEV_BACKEND_ALG_SYM) {
+ size_t max_len;
+ CryptoDevBackendSymOpInfo *op_info = req->op_info.u.sym_op_info;
+
+ max_len = op_info->iv_len +
+ op_info->aad_len +
+ op_info->src_len +
+ op_info->dst_len +
+ op_info->digest_result_len;
+
+ /* Zeroize and free request data structure */
+ memset(op_info, 0, sizeof(*op_info) + max_len);
+ g_free(op_info);
+ } else if (req->flags == CRYPTODEV_BACKEND_ALG_ASYM) {
+ CryptoDevBackendAsymOpInfo *op_info = req->op_info.u.asym_op_info;
+ if (op_info) {
+ g_free(op_info->src);
+ g_free(op_info->dst);
+ memset(op_info, 0, sizeof(*op_info));
g_free(op_info);
}
- g_free(req);
}
+
+ g_free(req);
}
static void
@@ -397,6 +485,35 @@ virtio_crypto_sym_input_data_helper(VirtIODevice *vdev,
}
}
+static void
+virtio_crypto_akcipher_input_data_helper(VirtIODevice *vdev,
+ VirtIOCryptoReq *req, int32_t status,
+ CryptoDevBackendAsymOpInfo *asym_op_info)
+{
+ size_t s, len;
+
+ if (status != VIRTIO_CRYPTO_OK) {
+ return;
+ }
+
+ len = asym_op_info->dst_len;
+ if (!len) {
+ return;
+ }
+
+ s = iov_from_buf(req->in_iov, req->in_num, 0, asym_op_info->dst, len);
+ if (s != len) {
+ virtio_error(vdev, "virtio-crypto asym dest data incorrect");
+ return;
+ }
+
+ iov_discard_front(&req->in_iov, &req->in_num, len);
+
+ /* For akcipher, dst_len may be changed after operation */
+ req->in_len = sizeof(struct virtio_crypto_inhdr) + asym_op_info->dst_len;
+}
+
+
static void virtio_crypto_req_complete(VirtIOCryptoReq *req, uint8_t status)
{
VirtIOCrypto *vcrypto = req->vcrypto;
@@ -404,7 +521,10 @@ static void virtio_crypto_req_complete(VirtIOCryptoReq *req, uint8_t status)
if (req->flags == CRYPTODEV_BACKEND_ALG_SYM) {
virtio_crypto_sym_input_data_helper(vdev, req, status,
- req->u.sym_op_info);
+ req->op_info.u.sym_op_info);
+ } else if (req->flags == CRYPTODEV_BACKEND_ALG_ASYM) {
+ virtio_crypto_akcipher_input_data_helper(vdev, req, status,
+ req->op_info.u.asym_op_info);
}
stb_p(&req->in->status, status);
virtqueue_push(req->vq, &req->elem, req->in_len);
@@ -543,42 +663,101 @@ err:
static int
virtio_crypto_handle_sym_req(VirtIOCrypto *vcrypto,
struct virtio_crypto_sym_data_req *req,
- CryptoDevBackendSymOpInfo **sym_op_info,
+ CryptoDevBackendOpInfo *op_info,
struct iovec *iov, unsigned int out_num)
{
VirtIODevice *vdev = VIRTIO_DEVICE(vcrypto);
+ CryptoDevBackendSymOpInfo *sym_op_info;
uint32_t op_type;
- CryptoDevBackendSymOpInfo *op_info;
op_type = ldl_le_p(&req->op_type);
-
if (op_type == VIRTIO_CRYPTO_SYM_OP_CIPHER) {
- op_info = virtio_crypto_sym_op_helper(vdev, &req->u.cipher.para,
+ sym_op_info = virtio_crypto_sym_op_helper(vdev, &req->u.cipher.para,
NULL, iov, out_num);
- if (!op_info) {
+ if (!sym_op_info) {
return -EFAULT;
}
- op_info->op_type = op_type;
} else if (op_type == VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING) {
- op_info = virtio_crypto_sym_op_helper(vdev, NULL,
+ sym_op_info = virtio_crypto_sym_op_helper(vdev, NULL,
&req->u.chain.para,
iov, out_num);
- if (!op_info) {
+ if (!sym_op_info) {
return -EFAULT;
}
- op_info->op_type = op_type;
} else {
/* VIRTIO_CRYPTO_SYM_OP_NONE */
error_report("virtio-crypto unsupported cipher type");
return -VIRTIO_CRYPTO_NOTSUPP;
}
- *sym_op_info = op_info;
+ sym_op_info->op_type = op_type;
+ op_info->u.sym_op_info = sym_op_info;
return 0;
}
static int
+virtio_crypto_handle_asym_req(VirtIOCrypto *vcrypto,
+ struct virtio_crypto_akcipher_data_req *req,
+ CryptoDevBackendOpInfo *op_info,
+ struct iovec *iov, unsigned int out_num)
+{
+ VirtIODevice *vdev = VIRTIO_DEVICE(vcrypto);
+ CryptoDevBackendAsymOpInfo *asym_op_info;
+ uint32_t src_len;
+ uint32_t dst_len;
+ uint32_t len;
+ uint8_t *src = NULL;
+ uint8_t *dst = NULL;
+
+ asym_op_info = g_malloc0(sizeof(CryptoDevBackendAsymOpInfo));
+ src_len = ldl_le_p(&req->para.src_data_len);
+ dst_len = ldl_le_p(&req->para.dst_data_len);
+
+ if (src_len > 0) {
+ src = g_malloc0(src_len);
+ len = iov_to_buf(iov, out_num, 0, src, src_len);
+ if (unlikely(len != src_len)) {
+ virtio_error(vdev, "virtio-crypto asym src data incorrect"
+ "expected %u, actual %u", src_len, len);
+ goto err;
+ }
+
+ iov_discard_front(&iov, &out_num, src_len);
+ }
+
+ if (dst_len > 0) {
+ dst = g_malloc0(dst_len);
+
+ if (op_info->op_code == VIRTIO_CRYPTO_AKCIPHER_VERIFY) {
+ len = iov_to_buf(iov, out_num, 0, dst, dst_len);
+ if (unlikely(len != dst_len)) {
+ virtio_error(vdev, "virtio-crypto asym dst data incorrect"
+ "expected %u, actual %u", dst_len, len);
+ goto err;
+ }
+
+ iov_discard_front(&iov, &out_num, dst_len);
+ }
+ }
+
+ asym_op_info->src_len = src_len;
+ asym_op_info->dst_len = dst_len;
+ asym_op_info->src = src;
+ asym_op_info->dst = dst;
+ op_info->u.asym_op_info = asym_op_info;
+
+ return 0;
+
+ err:
+ g_free(asym_op_info);
+ g_free(src);
+ g_free(dst);
+
+ return -EFAULT;
+}
+
+static int
virtio_crypto_handle_request(VirtIOCryptoReq *request)
{
VirtIOCrypto *vcrypto = request->vcrypto;
@@ -595,8 +774,7 @@ virtio_crypto_handle_request(VirtIOCryptoReq *request)
unsigned out_num;
uint32_t opcode;
uint8_t status = VIRTIO_CRYPTO_ERR;
- uint64_t session_id;
- CryptoDevBackendSymOpInfo *sym_op_info = NULL;
+ CryptoDevBackendOpInfo *op_info = &request->op_info;
Error *local_err = NULL;
if (elem->out_num < 1 || elem->in_num < 1) {
@@ -639,15 +817,28 @@ virtio_crypto_handle_request(VirtIOCryptoReq *request)
request->in_iov = in_iov;
opcode = ldl_le_p(&req.header.opcode);
- session_id = ldq_le_p(&req.header.session_id);
+ op_info->session_id = ldq_le_p(&req.header.session_id);
+ op_info->op_code = opcode;
switch (opcode) {
case VIRTIO_CRYPTO_CIPHER_ENCRYPT:
case VIRTIO_CRYPTO_CIPHER_DECRYPT:
+ op_info->algtype = request->flags = CRYPTODEV_BACKEND_ALG_SYM;
ret = virtio_crypto_handle_sym_req(vcrypto,
- &req.u.sym_req,
- &sym_op_info,
+ &req.u.sym_req, op_info,
+ out_iov, out_num);
+ goto check_result;
+
+ case VIRTIO_CRYPTO_AKCIPHER_ENCRYPT:
+ case VIRTIO_CRYPTO_AKCIPHER_DECRYPT:
+ case VIRTIO_CRYPTO_AKCIPHER_SIGN:
+ case VIRTIO_CRYPTO_AKCIPHER_VERIFY:
+ op_info->algtype = request->flags = CRYPTODEV_BACKEND_ALG_ASYM;
+ ret = virtio_crypto_handle_asym_req(vcrypto,
+ &req.u.akcipher_req, op_info,
out_iov, out_num);
+
+check_result:
/* Serious errors, need to reset virtio crypto device */
if (ret == -EFAULT) {
return -1;
@@ -655,11 +846,8 @@ virtio_crypto_handle_request(VirtIOCryptoReq *request)
virtio_crypto_req_complete(request, VIRTIO_CRYPTO_NOTSUPP);
virtio_crypto_free_request(request);
} else {
- sym_op_info->session_id = session_id;
/* Set request's parameter */
- request->flags = CRYPTODEV_BACKEND_ALG_SYM;
- request->u.sym_op_info = sym_op_info;
ret = cryptodev_backend_crypto_operation(vcrypto->cryptodev,
request, queue_index, &local_err);
if (ret < 0) {
@@ -674,6 +862,7 @@ virtio_crypto_handle_request(VirtIOCryptoReq *request)
virtio_crypto_free_request(request);
}
break;
+
case VIRTIO_CRYPTO_HASH:
case VIRTIO_CRYPTO_MAC:
case VIRTIO_CRYPTO_AEAD_ENCRYPT:
@@ -779,6 +968,7 @@ static void virtio_crypto_init_config(VirtIODevice *vdev)
vcrypto->conf.mac_algo_l = vcrypto->conf.cryptodev->conf.mac_algo_l;
vcrypto->conf.mac_algo_h = vcrypto->conf.cryptodev->conf.mac_algo_h;
vcrypto->conf.aead_algo = vcrypto->conf.cryptodev->conf.aead_algo;
+ vcrypto->conf.akcipher_algo = vcrypto->conf.cryptodev->conf.akcipher_algo;
vcrypto->conf.max_cipher_key_len =
vcrypto->conf.cryptodev->conf.max_cipher_key_len;
vcrypto->conf.max_auth_key_len =
@@ -891,6 +1081,7 @@ static void virtio_crypto_get_config(VirtIODevice *vdev, uint8_t *config)
stl_le_p(&crypto_cfg.max_cipher_key_len, c->conf.max_cipher_key_len);
stl_le_p(&crypto_cfg.max_auth_key_len, c->conf.max_auth_key_len);
stq_le_p(&crypto_cfg.max_size, c->conf.max_size);
+ stl_le_p(&crypto_cfg.akcipher_algo, c->conf.akcipher_algo);
memcpy(config, &crypto_cfg, c->config_size);
}
diff --git a/hw/virtio/virtio-iommu.c b/hw/virtio/virtio-iommu.c
index 2597e166f9..7c122ab957 100644
--- a/hw/virtio/virtio-iommu.c
+++ b/hw/virtio/virtio-iommu.c
@@ -69,6 +69,77 @@ static inline uint16_t virtio_iommu_get_bdf(IOMMUDevice *dev)
return PCI_BUILD_BDF(pci_bus_num(dev->bus), dev->devfn);
}
+static bool virtio_iommu_device_bypassed(IOMMUDevice *sdev)
+{
+ uint32_t sid;
+ bool bypassed;
+ VirtIOIOMMU *s = sdev->viommu;
+ VirtIOIOMMUEndpoint *ep;
+
+ sid = virtio_iommu_get_bdf(sdev);
+
+ qemu_rec_mutex_lock(&s->mutex);
+ /* need to check bypass before system reset */
+ if (!s->endpoints) {
+ bypassed = s->config.bypass;
+ goto unlock;
+ }
+
+ ep = g_tree_lookup(s->endpoints, GUINT_TO_POINTER(sid));
+ if (!ep || !ep->domain) {
+ bypassed = s->config.bypass;
+ } else {
+ bypassed = ep->domain->bypass;
+ }
+
+unlock:
+ qemu_rec_mutex_unlock(&s->mutex);
+ return bypassed;
+}
+
+/* Return whether the device is using IOMMU translation. */
+static bool virtio_iommu_switch_address_space(IOMMUDevice *sdev)
+{
+ bool use_remapping;
+
+ assert(sdev);
+
+ use_remapping = !virtio_iommu_device_bypassed(sdev);
+
+ trace_virtio_iommu_switch_address_space(pci_bus_num(sdev->bus),
+ PCI_SLOT(sdev->devfn),
+ PCI_FUNC(sdev->devfn),
+ use_remapping);
+
+ /* Turn off first then on the other */
+ if (use_remapping) {
+ memory_region_set_enabled(&sdev->bypass_mr, false);
+ memory_region_set_enabled(MEMORY_REGION(&sdev->iommu_mr), true);
+ } else {
+ memory_region_set_enabled(MEMORY_REGION(&sdev->iommu_mr), false);
+ memory_region_set_enabled(&sdev->bypass_mr, true);
+ }
+
+ return use_remapping;
+}
+
+static void virtio_iommu_switch_address_space_all(VirtIOIOMMU *s)
+{
+ GHashTableIter iter;
+ IOMMUPciBus *iommu_pci_bus;
+ int i;
+
+ g_hash_table_iter_init(&iter, s->as_by_busptr);
+ while (g_hash_table_iter_next(&iter, NULL, (void **)&iommu_pci_bus)) {
+ for (i = 0; i < PCI_DEVFN_MAX; i++) {
+ if (!iommu_pci_bus->pbdev[i]) {
+ continue;
+ }
+ virtio_iommu_switch_address_space(iommu_pci_bus->pbdev[i]);
+ }
+ }
+}
+
/**
* The bus number is used for lookup when SID based operations occur.
* In that case we lazily populate the IOMMUPciBus array from the bus hash
@@ -213,6 +284,7 @@ static gboolean virtio_iommu_notify_map_cb(gpointer key, gpointer value,
static void virtio_iommu_detach_endpoint_from_domain(VirtIOIOMMUEndpoint *ep)
{
VirtIOIOMMUDomain *domain = ep->domain;
+ IOMMUDevice *sdev = container_of(ep->iommu_mr, IOMMUDevice, iommu_mr);
if (!ep->domain) {
return;
@@ -221,6 +293,7 @@ static void virtio_iommu_detach_endpoint_from_domain(VirtIOIOMMUEndpoint *ep)
ep->iommu_mr);
QLIST_REMOVE(ep, next);
ep->domain = NULL;
+ virtio_iommu_switch_address_space(sdev);
}
static VirtIOIOMMUEndpoint *virtio_iommu_get_endpoint(VirtIOIOMMU *s,
@@ -323,12 +396,39 @@ static AddressSpace *virtio_iommu_find_add_as(PCIBus *bus, void *opaque,
trace_virtio_iommu_init_iommu_mr(name);
+ memory_region_init(&sdev->root, OBJECT(s), name, UINT64_MAX);
+ address_space_init(&sdev->as, &sdev->root, TYPE_VIRTIO_IOMMU);
+
+ /*
+ * Build the IOMMU disabled container with aliases to the
+ * shared MRs. Note that aliasing to a shared memory region
+ * could help the memory API to detect same FlatViews so we
+ * can have devices to share the same FlatView when in bypass
+ * mode. (either by not configuring virtio-iommu driver or with
+ * "iommu=pt"). It will greatly reduce the total number of
+ * FlatViews of the system hence VM runs faster.
+ */
+ memory_region_init_alias(&sdev->bypass_mr, OBJECT(s),
+ "system", get_system_memory(), 0,
+ memory_region_size(get_system_memory()));
+
memory_region_init_iommu(&sdev->iommu_mr, sizeof(sdev->iommu_mr),
TYPE_VIRTIO_IOMMU_MEMORY_REGION,
OBJECT(s), name,
UINT64_MAX);
- address_space_init(&sdev->as,
- MEMORY_REGION(&sdev->iommu_mr), TYPE_VIRTIO_IOMMU);
+
+ /*
+ * Hook both the containers under the root container, we
+ * switch between iommu & bypass MRs by enable/disable
+ * corresponding sub-containers
+ */
+ memory_region_add_subregion_overlap(&sdev->root, 0,
+ MEMORY_REGION(&sdev->iommu_mr),
+ 0);
+ memory_region_add_subregion_overlap(&sdev->root, 0,
+ &sdev->bypass_mr, 0);
+
+ virtio_iommu_switch_address_space(sdev);
g_free(name);
}
return &sdev->as;
@@ -342,6 +442,7 @@ static int virtio_iommu_attach(VirtIOIOMMU *s,
uint32_t flags = le32_to_cpu(req->flags);
VirtIOIOMMUDomain *domain;
VirtIOIOMMUEndpoint *ep;
+ IOMMUDevice *sdev;
trace_virtio_iommu_attach(domain_id, ep_id);
@@ -375,6 +476,8 @@ static int virtio_iommu_attach(VirtIOIOMMU *s,
QLIST_INSERT_HEAD(&domain->endpoint_list, ep, next);
ep->domain = domain;
+ sdev = container_of(ep->iommu_mr, IOMMUDevice, iommu_mr);
+ virtio_iommu_switch_address_space(sdev);
/* Replay domain mappings on the associated memory region */
g_tree_foreach(domain->mappings, virtio_iommu_notify_map_cb,
@@ -642,7 +745,7 @@ static void virtio_iommu_handle_command(VirtIODevice *vdev, VirtQueue *vq)
tail.status = VIRTIO_IOMMU_S_DEVERR;
goto out;
}
- qemu_mutex_lock(&s->mutex);
+ qemu_rec_mutex_lock(&s->mutex);
switch (head.type) {
case VIRTIO_IOMMU_T_ATTACH:
tail.status = virtio_iommu_handle_attach(s, iov, iov_cnt);
@@ -671,7 +774,7 @@ static void virtio_iommu_handle_command(VirtIODevice *vdev, VirtQueue *vq)
default:
tail.status = VIRTIO_IOMMU_S_UNSUPP;
}
- qemu_mutex_unlock(&s->mutex);
+ qemu_rec_mutex_unlock(&s->mutex);
out:
sz = iov_from_buf(elem->in_sg, elem->in_num, 0,
@@ -759,9 +862,13 @@ static IOMMUTLBEntry virtio_iommu_translate(IOMMUMemoryRegion *mr, hwaddr addr,
sid = virtio_iommu_get_bdf(sdev);
trace_virtio_iommu_translate(mr->parent_obj.name, sid, addr, flag);
- qemu_mutex_lock(&s->mutex);
+ qemu_rec_mutex_lock(&s->mutex);
ep = g_tree_lookup(s->endpoints, GUINT_TO_POINTER(sid));
+
+ if (bypass_allowed)
+ assert(ep && ep->domain && !ep->domain->bypass);
+
if (!ep) {
if (!bypass_allowed) {
error_report_once("%s sid=%d is not known!!", __func__, sid);
@@ -843,7 +950,7 @@ static IOMMUTLBEntry virtio_iommu_translate(IOMMUMemoryRegion *mr, hwaddr addr,
trace_virtio_iommu_translate_out(addr, entry.translated_addr, sid);
unlock:
- qemu_mutex_unlock(&s->mutex);
+ qemu_rec_mutex_unlock(&s->mutex);
return entry;
}
@@ -887,6 +994,7 @@ static void virtio_iommu_set_config(VirtIODevice *vdev,
return;
}
dev_config->bypass = in_config->bypass;
+ virtio_iommu_switch_address_space_all(dev);
}
trace_virtio_iommu_set_config(in_config->bypass);
@@ -931,7 +1039,7 @@ static void virtio_iommu_replay(IOMMUMemoryRegion *mr, IOMMUNotifier *n)
sid = virtio_iommu_get_bdf(sdev);
- qemu_mutex_lock(&s->mutex);
+ qemu_rec_mutex_lock(&s->mutex);
if (!s->endpoints) {
goto unlock;
@@ -945,7 +1053,7 @@ static void virtio_iommu_replay(IOMMUMemoryRegion *mr, IOMMUNotifier *n)
g_tree_foreach(ep->domain->mappings, virtio_iommu_remap, mr);
unlock:
- qemu_mutex_unlock(&s->mutex);
+ qemu_rec_mutex_unlock(&s->mutex);
}
static int virtio_iommu_notify_flag_changed(IOMMUMemoryRegion *iommu_mr,
@@ -1026,6 +1134,8 @@ static void virtio_iommu_system_reset(void *opaque)
* system reset
*/
s->config.bypass = s->boot_bypass;
+ virtio_iommu_switch_address_space_all(s);
+
}
static void virtio_iommu_device_realize(DeviceState *dev, Error **errp)
@@ -1041,6 +1151,11 @@ static void virtio_iommu_device_realize(DeviceState *dev, Error **errp)
virtio_iommu_handle_command);
s->event_vq = virtio_add_queue(vdev, VIOMMU_DEFAULT_QUEUE_SIZE, NULL);
+ /*
+ * config.bypass is needed to get initial address space early, such as
+ * in vfio realize
+ */
+ s->config.bypass = s->boot_bypass;
s->config.page_size_mask = TARGET_PAGE_MASK;
s->config.input_range.end = UINT64_MAX;
s->config.domain_range.end = UINT32_MAX;
@@ -1056,7 +1171,7 @@ static void virtio_iommu_device_realize(DeviceState *dev, Error **errp)
virtio_add_feature(&s->features, VIRTIO_IOMMU_F_PROBE);
virtio_add_feature(&s->features, VIRTIO_IOMMU_F_BYPASS_CONFIG);
- qemu_mutex_init(&s->mutex);
+ qemu_rec_mutex_init(&s->mutex);
s->as_by_busptr = g_hash_table_new_full(NULL, NULL, NULL, g_free);
@@ -1084,6 +1199,8 @@ static void virtio_iommu_device_unrealize(DeviceState *dev)
g_tree_destroy(s->endpoints);
}
+ qemu_rec_mutex_destroy(&s->mutex);
+
virtio_delete_queue(s->req_vq);
virtio_delete_queue(s->event_vq);
virtio_cleanup(vdev);