aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--MAINTAINERS17
-rw-r--r--backends/Makefile.objs3
-rw-r--r--backends/cryptodev-builtin.c361
-rw-r--r--backends/cryptodev.c245
-rw-r--r--block/backup.c5
-rw-r--r--block/commit.c10
-rw-r--r--block/gluster.c124
-rw-r--r--block/mirror.c30
-rw-r--r--block/rbd.c25
-rw-r--r--block/replication.c14
-rw-r--r--block/stream.c9
-rw-r--r--block/trace-events5
-rw-r--r--blockdev.c74
-rw-r--r--blockjob.c113
-rw-r--r--cpu-exec.c2
-rw-r--r--docs/specs/acpi_mem_hotplug.txt3
-rw-r--r--docs/specs/acpi_nvdimm.txt58
-rw-r--r--exec.c2
-rw-r--r--hw/acpi/Makefile.objs2
-rw-r--r--hw/acpi/ipmi.c1
-rw-r--r--hw/acpi/memory_hotplug.c31
-rw-r--r--hw/acpi/nvdimm.c468
-rw-r--r--hw/block/dataplane/virtio-blk.c73
-rw-r--r--hw/block/dataplane/virtio-blk.h6
-rw-r--r--hw/block/virtio-blk.c15
-rw-r--r--hw/core/hotplug.c11
-rw-r--r--hw/core/qdev.c20
-rw-r--r--hw/i386/acpi-build.c9
-rw-r--r--hw/i386/pc.c31
-rw-r--r--hw/ipmi/Makefile.objs2
-rw-r--r--hw/ipmi/ipmi.c10
-rw-r--r--hw/ipmi/ipmi_bmc_extern.c12
-rw-r--r--hw/ipmi/ipmi_bmc_sim.c7
-rw-r--r--hw/mem/nvdimm.c4
-rw-r--r--hw/s390x/virtio-ccw.c44
-rw-r--r--hw/s390x/virtio-ccw.h2
-rw-r--r--hw/scsi/virtio-scsi-dataplane.c56
-rw-r--r--hw/scsi/virtio-scsi.c24
-rw-r--r--hw/virtio/Makefile.objs2
-rw-r--r--hw/virtio/vhost.c5
-rw-r--r--hw/virtio/virtio-balloon.c31
-rw-r--r--hw/virtio/virtio-bus.c154
-rw-r--r--hw/virtio/virtio-crypto-pci.c77
-rw-r--r--hw/virtio/virtio-crypto.c898
-rw-r--r--hw/virtio/virtio-mmio.c35
-rw-r--r--hw/virtio/virtio-pci.c40
-rw-r--r--hw/virtio/virtio-pci.h17
-rw-r--r--hw/virtio/virtio.c153
-rw-r--r--include/block/block.h3
-rw-r--r--include/block/block_int.h26
-rw-r--r--include/block/blockjob.h256
-rw-r--r--include/block/blockjob_int.h239
-rw-r--r--include/hw/acpi/acpi_dev_interface.h1
-rw-r--r--include/hw/hotplug.h10
-rw-r--r--include/hw/mem/nvdimm.h27
-rw-r--r--include/hw/virtio/virtio-bus.h27
-rw-r--r--include/hw/virtio/virtio-crypto.h101
-rw-r--r--include/hw/virtio/virtio-scsi.h6
-rw-r--r--include/hw/virtio/virtio.h15
-rw-r--r--include/qemu/log.h16
-rw-r--r--include/standard-headers/linux/virtio_crypto.h429
-rw-r--r--include/standard-headers/linux/virtio_ids.h2
-rw-r--r--include/sysemu/cryptodev.h298
-rw-r--r--include/sysemu/os-posix.h12
-rw-r--r--include/sysemu/os-win32.h15
-rw-r--r--qapi/block-core.json2
-rw-r--r--qemu-img.c5
-rw-r--r--qemu-options.hx18
-rw-r--r--target-alpha/translate.c2
-rw-r--r--target-arm/translate-a64.c2
-rw-r--r--target-arm/translate.c2
-rw-r--r--target-cris/translate.c27
-rw-r--r--target-i386/translate.c4
-rw-r--r--target-lm32/translate.c2
-rw-r--r--target-m68k/translate.c2
-rw-r--r--target-microblaze/translate.c72
-rw-r--r--target-mips/translate.c2
-rw-r--r--target-openrisc/translate.c9
-rw-r--r--target-ppc/translate.c2
-rw-r--r--target-s390x/translate.c2
-rw-r--r--target-sh4/translate.c2
-rw-r--r--target-sparc/translate.c2
-rw-r--r--target-tilegx/translate.c6
-rw-r--r--target-tricore/translate.c2
-rw-r--r--target-unicore32/translate.c2
-rw-r--r--target-xtensa/translate.c2
-rw-r--r--tcg/tcg-op.c45
-rw-r--r--tcg/tcg-op.h4
-rw-r--r--tcg/tcg.c8
-rw-r--r--tcg/tcg.h38
-rw-r--r--tests/ipmi-bt-test.c2
-rw-r--r--tests/test-blockjob-txn.c5
-rw-r--r--tests/test-blockjob.c4
-rw-r--r--translate-all.c2
94 files changed, 4112 insertions, 988 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 3fecf458c0..82c814a919 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1016,6 +1016,13 @@ F: include/sysemu/rng*.h
F: backends/rng*.c
F: tests/virtio-rng-test.c
+virtio-crypto
+M: Gonglei <arei.gonglei@huawei.com>
+S: Supported
+F: hw/virtio/virtio-crypto.c
+F: hw/virtio/virtio-crypto-pci.c
+F: include/hw/virtio/virtio-crypto.h
+
nvme
M: Keith Busch <keith.busch@intel.com>
L: qemu-block@nongnu.org
@@ -1261,6 +1268,12 @@ S: Maintained
F: backends/hostmem*.c
F: include/sysemu/hostmem.h
+Cryptodev Backends
+M: Gonglei <arei.gonglei@huawei.com>
+S: Maintained
+F: include/sysemu/cryptodev*.h
+F: backends/cryptodev*.c
+
QAPI
M: Markus Armbruster <armbru@redhat.com>
M: Michael Roth <mdroth@linux.vnet.ibm.com>
@@ -1504,8 +1517,8 @@ F: tcg/mips/
F: disas/mips.c
PPC
-M: Vassili Karpov (malc) <av1474@comtv.ru>
-S: Maintained
+M: Richard Henderson <rth@twiddle.net>
+S: Odd Fixes
F: tcg/ppc/
F: disas/ppc.c
diff --git a/backends/Makefile.objs b/backends/Makefile.objs
index 31a3a894f5..18469980e6 100644
--- a/backends/Makefile.objs
+++ b/backends/Makefile.objs
@@ -9,3 +9,6 @@ common-obj-$(CONFIG_TPM) += tpm.o
common-obj-y += hostmem.o hostmem-ram.o
common-obj-$(CONFIG_LINUX) += hostmem-file.o
+
+common-obj-y += cryptodev.o
+common-obj-y += cryptodev-builtin.o
diff --git a/backends/cryptodev-builtin.c b/backends/cryptodev-builtin.c
new file mode 100644
index 0000000000..eda954b2a2
--- /dev/null
+++ b/backends/cryptodev-builtin.c
@@ -0,0 +1,361 @@
+/*
+ * QEMU Cryptodev backend for QEMU cipher APIs
+ *
+ * Copyright (c) 2016 HUAWEI TECHNOLOGIES CO., LTD.
+ *
+ * Authors:
+ * Gonglei <arei.gonglei@huawei.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "sysemu/cryptodev.h"
+#include "hw/boards.h"
+#include "qapi/error.h"
+#include "standard-headers/linux/virtio_crypto.h"
+#include "crypto/cipher.h"
+
+
+/**
+ * @TYPE_CRYPTODEV_BACKEND_BUILTIN:
+ * name of backend that uses QEMU cipher API
+ */
+#define TYPE_CRYPTODEV_BACKEND_BUILTIN "cryptodev-backend-builtin"
+
+#define CRYPTODEV_BACKEND_BUILTIN(obj) \
+ OBJECT_CHECK(CryptoDevBackendBuiltin, \
+ (obj), TYPE_CRYPTODEV_BACKEND_BUILTIN)
+
+typedef struct CryptoDevBackendBuiltin
+ CryptoDevBackendBuiltin;
+
+typedef struct CryptoDevBackendBuiltinSession {
+ QCryptoCipher *cipher;
+ uint8_t direction; /* encryption or decryption */
+ uint8_t type; /* cipher? hash? aead? */
+ QTAILQ_ENTRY(CryptoDevBackendBuiltinSession) next;
+} CryptoDevBackendBuiltinSession;
+
+/* Max number of symmetric sessions */
+#define MAX_NUM_SESSIONS 256
+
+#define CRYPTODEV_BUITLIN_MAX_AUTH_KEY_LEN 512
+#define CRYPTODEV_BUITLIN_MAX_CIPHER_KEY_LEN 64
+
+struct CryptoDevBackendBuiltin {
+ CryptoDevBackend parent_obj;
+
+ CryptoDevBackendBuiltinSession *sessions[MAX_NUM_SESSIONS];
+};
+
+static void cryptodev_builtin_init(
+ CryptoDevBackend *backend, Error **errp)
+{
+ /* Only support one queue */
+ int queues = backend->conf.peers.queues;
+ CryptoDevBackendClient *cc;
+
+ if (queues != 1) {
+ error_setg(errp,
+ "Only support one queue in cryptdov-builtin backend");
+ return;
+ }
+
+ cc = cryptodev_backend_new_client(
+ "cryptodev-builtin", NULL);
+ cc->info_str = g_strdup_printf("cryptodev-builtin0");
+ cc->queue_index = 0;
+ backend->conf.peers.ccs[0] = cc;
+
+ backend->conf.crypto_services =
+ 1u << VIRTIO_CRYPTO_SERVICE_CIPHER |
+ 1u << VIRTIO_CRYPTO_SERVICE_HASH |
+ 1u << VIRTIO_CRYPTO_SERVICE_MAC;
+ backend->conf.cipher_algo_l = 1u << VIRTIO_CRYPTO_CIPHER_AES_CBC;
+ backend->conf.hash_algo = 1u << VIRTIO_CRYPTO_HASH_SHA1;
+ /*
+ * Set the Maximum length of crypto request.
+ * Why this value? Just avoid to overflow when
+ * memory allocation for each crypto request.
+ */
+ backend->conf.max_size = LONG_MAX - sizeof(CryptoDevBackendSymOpInfo);
+ backend->conf.max_cipher_key_len = CRYPTODEV_BUITLIN_MAX_CIPHER_KEY_LEN;
+ backend->conf.max_auth_key_len = CRYPTODEV_BUITLIN_MAX_AUTH_KEY_LEN;
+}
+
+static int
+cryptodev_builtin_get_unused_session_index(
+ CryptoDevBackendBuiltin *builtin)
+{
+ size_t i;
+
+ for (i = 0; i < MAX_NUM_SESSIONS; i++) {
+ if (builtin->sessions[i] == NULL) {
+ return i;
+ }
+ }
+
+ return -1;
+}
+
+static int
+cryptodev_builtin_get_aes_algo(uint32_t key_len, Error **errp)
+{
+ int algo;
+
+ if (key_len == 128 / 8) {
+ algo = QCRYPTO_CIPHER_ALG_AES_128;
+ } else if (key_len == 192 / 8) {
+ algo = QCRYPTO_CIPHER_ALG_AES_192;
+ } else if (key_len == 256 / 8) {
+ algo = QCRYPTO_CIPHER_ALG_AES_256;
+ } else {
+ error_setg(errp, "Unsupported key length :%u", key_len);
+ return -1;
+ }
+
+ return algo;
+}
+
+static int cryptodev_builtin_create_cipher_session(
+ CryptoDevBackendBuiltin *builtin,
+ CryptoDevBackendSymSessionInfo *sess_info,
+ Error **errp)
+{
+ int algo;
+ int mode;
+ QCryptoCipher *cipher;
+ int index;
+ CryptoDevBackendBuiltinSession *sess;
+
+ if (sess_info->op_type != VIRTIO_CRYPTO_SYM_OP_CIPHER) {
+ error_setg(errp, "Unsupported optype :%u", sess_info->op_type);
+ return -1;
+ }
+
+ index = cryptodev_builtin_get_unused_session_index(builtin);
+ if (index < 0) {
+ error_setg(errp, "Total number of sessions created exceeds %u",
+ MAX_NUM_SESSIONS);
+ return -1;
+ }
+
+ switch (sess_info->cipher_alg) {
+ case VIRTIO_CRYPTO_CIPHER_AES_ECB:
+ algo = cryptodev_builtin_get_aes_algo(sess_info->key_len,
+ errp);
+ if (algo < 0) {
+ return -1;
+ }
+ mode = QCRYPTO_CIPHER_MODE_ECB;
+ break;
+ case VIRTIO_CRYPTO_CIPHER_AES_CBC:
+ algo = cryptodev_builtin_get_aes_algo(sess_info->key_len,
+ errp);
+ if (algo < 0) {
+ return -1;
+ }
+ mode = QCRYPTO_CIPHER_MODE_CBC;
+ break;
+ case VIRTIO_CRYPTO_CIPHER_AES_CTR:
+ algo = cryptodev_builtin_get_aes_algo(sess_info->key_len,
+ errp);
+ if (algo < 0) {
+ return -1;
+ }
+ mode = QCRYPTO_CIPHER_MODE_CTR;
+ break;
+ case VIRTIO_CRYPTO_CIPHER_DES_ECB:
+ algo = QCRYPTO_CIPHER_ALG_DES_RFB;
+ mode = QCRYPTO_CIPHER_MODE_ECB;
+ break;
+ default:
+ error_setg(errp, "Unsupported cipher alg :%u",
+ sess_info->cipher_alg);
+ return -1;
+ }
+
+ cipher = qcrypto_cipher_new(algo, mode,
+ sess_info->cipher_key,
+ sess_info->key_len,
+ errp);
+ if (!cipher) {
+ return -1;
+ }
+
+ sess = g_new0(CryptoDevBackendBuiltinSession, 1);
+ sess->cipher = cipher;
+ sess->direction = sess_info->direction;
+ sess->type = sess_info->op_type;
+
+ builtin->sessions[index] = sess;
+
+ return index;
+}
+
+static int64_t cryptodev_builtin_sym_create_session(
+ CryptoDevBackend *backend,
+ CryptoDevBackendSymSessionInfo *sess_info,
+ uint32_t queue_index, Error **errp)
+{
+ CryptoDevBackendBuiltin *builtin =
+ CRYPTODEV_BACKEND_BUILTIN(backend);
+ int64_t session_id = -1;
+ int ret;
+
+ switch (sess_info->op_code) {
+ case VIRTIO_CRYPTO_CIPHER_CREATE_SESSION:
+ ret = cryptodev_builtin_create_cipher_session(
+ builtin, sess_info, errp);
+ if (ret < 0) {
+ return ret;
+ } else {
+ session_id = ret;
+ }
+ break;
+ case VIRTIO_CRYPTO_HASH_CREATE_SESSION:
+ case VIRTIO_CRYPTO_MAC_CREATE_SESSION:
+ default:
+ error_setg(errp, "Unsupported opcode :%" PRIu32 "",
+ sess_info->op_code);
+ return -1;
+ }
+
+ return session_id;
+}
+
+static int cryptodev_builtin_sym_close_session(
+ CryptoDevBackend *backend,
+ uint64_t session_id,
+ uint32_t queue_index, Error **errp)
+{
+ CryptoDevBackendBuiltin *builtin =
+ CRYPTODEV_BACKEND_BUILTIN(backend);
+
+ if (session_id >= MAX_NUM_SESSIONS ||
+ builtin->sessions[session_id] == NULL) {
+ error_setg(errp, "Cannot find a valid session id: %" PRIu64 "",
+ session_id);
+ return -1;
+ }
+
+ qcrypto_cipher_free(builtin->sessions[session_id]->cipher);
+ g_free(builtin->sessions[session_id]);
+ builtin->sessions[session_id] = NULL;
+ return 0;
+}
+
+static int cryptodev_builtin_sym_operation(
+ CryptoDevBackend *backend,
+ CryptoDevBackendSymOpInfo *op_info,
+ uint32_t queue_index, Error **errp)
+{
+ CryptoDevBackendBuiltin *builtin =
+ CRYPTODEV_BACKEND_BUILTIN(backend);
+ CryptoDevBackendBuiltinSession *sess;
+ int ret;
+
+ if (op_info->session_id >= MAX_NUM_SESSIONS ||
+ builtin->sessions[op_info->session_id] == NULL) {
+ error_setg(errp, "Cannot find a valid session id: %" PRIu64 "",
+ op_info->session_id);
+ return -VIRTIO_CRYPTO_INVSESS;
+ }
+
+ if (op_info->op_type == VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING) {
+ error_setg(errp,
+ "Algorithm chain is unsupported for cryptdoev-builtin");
+ return -VIRTIO_CRYPTO_NOTSUPP;
+ }
+
+ sess = builtin->sessions[op_info->session_id];
+
+ ret = qcrypto_cipher_setiv(sess->cipher, op_info->iv,
+ op_info->iv_len, errp);
+ if (ret < 0) {
+ return -VIRTIO_CRYPTO_ERR;
+ }
+
+ if (sess->direction == VIRTIO_CRYPTO_OP_ENCRYPT) {
+ ret = qcrypto_cipher_encrypt(sess->cipher, op_info->src,
+ op_info->dst, op_info->src_len, errp);
+ if (ret < 0) {
+ return -VIRTIO_CRYPTO_ERR;
+ }
+ } else {
+ ret = qcrypto_cipher_decrypt(sess->cipher, op_info->src,
+ op_info->dst, op_info->src_len, errp);
+ if (ret < 0) {
+ return -VIRTIO_CRYPTO_ERR;
+ }
+ }
+ return VIRTIO_CRYPTO_OK;
+}
+
+static void cryptodev_builtin_cleanup(
+ CryptoDevBackend *backend,
+ Error **errp)
+{
+ CryptoDevBackendBuiltin *builtin =
+ CRYPTODEV_BACKEND_BUILTIN(backend);
+ size_t i;
+ int queues = backend->conf.peers.queues;
+ CryptoDevBackendClient *cc;
+
+ for (i = 0; i < MAX_NUM_SESSIONS; i++) {
+ if (builtin->sessions[i] != NULL) {
+ cryptodev_builtin_sym_close_session(
+ backend, i, 0, errp);
+ }
+ }
+
+ assert(queues == 1);
+
+ for (i = 0; i < queues; i++) {
+ cc = backend->conf.peers.ccs[i];
+ if (cc) {
+ cryptodev_backend_free_client(cc);
+ backend->conf.peers.ccs[i] = NULL;
+ }
+ }
+}
+
+static void
+cryptodev_builtin_class_init(ObjectClass *oc, void *data)
+{
+ CryptoDevBackendClass *bc = CRYPTODEV_BACKEND_CLASS(oc);
+
+ bc->init = cryptodev_builtin_init;
+ bc->cleanup = cryptodev_builtin_cleanup;
+ bc->create_session = cryptodev_builtin_sym_create_session;
+ bc->close_session = cryptodev_builtin_sym_close_session;
+ bc->do_sym_op = cryptodev_builtin_sym_operation;
+}
+
+static const TypeInfo cryptodev_builtin_info = {
+ .name = TYPE_CRYPTODEV_BACKEND_BUILTIN,
+ .parent = TYPE_CRYPTODEV_BACKEND,
+ .class_init = cryptodev_builtin_class_init,
+ .instance_size = sizeof(CryptoDevBackendBuiltin),
+};
+
+static void
+cryptodev_builtin_register_types(void)
+{
+ type_register_static(&cryptodev_builtin_info);
+}
+
+type_init(cryptodev_builtin_register_types);
diff --git a/backends/cryptodev.c b/backends/cryptodev.c
new file mode 100644
index 0000000000..4a49f9762f
--- /dev/null
+++ b/backends/cryptodev.c
@@ -0,0 +1,245 @@
+/*
+ * QEMU Crypto Device Implementation
+ *
+ * Copyright (c) 2016 HUAWEI TECHNOLOGIES CO., LTD.
+ *
+ * Authors:
+ * Gonglei <arei.gonglei@huawei.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "sysemu/cryptodev.h"
+#include "hw/boards.h"
+#include "qapi/error.h"
+#include "qapi/visitor.h"
+#include "qapi-types.h"
+#include "qapi-visit.h"
+#include "qemu/config-file.h"
+#include "qom/object_interfaces.h"
+#include "hw/virtio/virtio-crypto.h"
+
+
+static QTAILQ_HEAD(, CryptoDevBackendClient) crypto_clients;
+
+
+CryptoDevBackendClient *
+cryptodev_backend_new_client(const char *model,
+ const char *name)
+{
+ CryptoDevBackendClient *cc;
+
+ cc = g_malloc0(sizeof(CryptoDevBackendClient));
+ cc->model = g_strdup(model);
+ if (name) {
+ cc->name = g_strdup(name);
+ }
+
+ QTAILQ_INSERT_TAIL(&crypto_clients, cc, next);
+
+ return cc;
+}
+
+void cryptodev_backend_free_client(
+ CryptoDevBackendClient *cc)
+{
+ QTAILQ_REMOVE(&crypto_clients, cc, next);
+ g_free(cc->name);
+ g_free(cc->model);
+ g_free(cc->info_str);
+ g_free(cc);
+}
+
+void cryptodev_backend_cleanup(
+ CryptoDevBackend *backend,
+ Error **errp)
+{
+ CryptoDevBackendClass *bc =
+ CRYPTODEV_BACKEND_GET_CLASS(backend);
+
+ if (bc->cleanup) {
+ bc->cleanup(backend, errp);
+ }
+
+ backend->ready = false;
+}
+
+int64_t cryptodev_backend_sym_create_session(
+ CryptoDevBackend *backend,
+ CryptoDevBackendSymSessionInfo *sess_info,
+ uint32_t queue_index, Error **errp)
+{
+ CryptoDevBackendClass *bc =
+ CRYPTODEV_BACKEND_GET_CLASS(backend);
+
+ if (bc->create_session) {
+ return bc->create_session(backend, sess_info, queue_index, errp);
+ }
+
+ return -1;
+}
+
+int cryptodev_backend_sym_close_session(
+ CryptoDevBackend *backend,
+ uint64_t session_id,
+ uint32_t queue_index, Error **errp)
+{
+ CryptoDevBackendClass *bc =
+ CRYPTODEV_BACKEND_GET_CLASS(backend);
+
+ if (bc->close_session) {
+ return bc->close_session(backend, session_id, queue_index, errp);
+ }
+
+ return -1;
+}
+
+static int cryptodev_backend_sym_operation(
+ CryptoDevBackend *backend,
+ CryptoDevBackendSymOpInfo *op_info,
+ uint32_t queue_index, Error **errp)
+{
+ CryptoDevBackendClass *bc =
+ CRYPTODEV_BACKEND_GET_CLASS(backend);
+
+ if (bc->do_sym_op) {
+ return bc->do_sym_op(backend, op_info, queue_index, errp);
+ }
+
+ return -VIRTIO_CRYPTO_ERR;
+}
+
+int cryptodev_backend_crypto_operation(
+ CryptoDevBackend *backend,
+ void *opaque,
+ uint32_t queue_index, Error **errp)
+{
+ VirtIOCryptoReq *req = opaque;
+
+ if (req->flags == CRYPTODEV_BACKEND_ALG_SYM) {
+ CryptoDevBackendSymOpInfo *op_info;
+ op_info = req->u.sym_op_info;
+
+ return cryptodev_backend_sym_operation(backend,
+ op_info, queue_index, errp);
+ } else {
+ error_setg(errp, "Unsupported cryptodev alg type: %" PRIu32 "",
+ req->flags);
+ return -VIRTIO_CRYPTO_NOTSUPP;
+ }
+
+ return -VIRTIO_CRYPTO_ERR;
+}
+
+static void
+cryptodev_backend_get_queues(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ CryptoDevBackend *backend = CRYPTODEV_BACKEND(obj);
+ uint32_t value = backend->conf.peers.queues;
+
+ visit_type_uint32(v, name, &value, errp);
+}
+
+static void
+cryptodev_backend_set_queues(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ CryptoDevBackend *backend = CRYPTODEV_BACKEND(obj);
+ Error *local_err = NULL;
+ uint32_t value;
+
+ visit_type_uint32(v, name, &value, &local_err);
+ if (local_err) {
+ goto out;
+ }
+ if (!value) {
+ error_setg(&local_err, "Property '%s.%s' doesn't take value '%"
+ PRIu32 "'", object_get_typename(obj), name, value);
+ goto out;
+ }
+ backend->conf.peers.queues = value;
+out:
+ error_propagate(errp, local_err);
+}
+
+static void
+cryptodev_backend_complete(UserCreatable *uc, Error **errp)
+{
+ CryptoDevBackend *backend = CRYPTODEV_BACKEND(uc);
+ CryptoDevBackendClass *bc = CRYPTODEV_BACKEND_GET_CLASS(uc);
+ Error *local_err = NULL;
+
+ if (bc->init) {
+ bc->init(backend, &local_err);
+ if (local_err) {
+ goto out;
+ }
+ }
+ backend->ready = true;
+ return;
+
+out:
+ backend->ready = false;
+ error_propagate(errp, local_err);
+}
+
+static void cryptodev_backend_instance_init(Object *obj)
+{
+ object_property_add(obj, "queues", "int",
+ cryptodev_backend_get_queues,
+ cryptodev_backend_set_queues,
+ NULL, NULL, NULL);
+ /* Initialize devices' queues property to 1 */
+ object_property_set_int(obj, 1, "queues", NULL);
+}
+
+static void cryptodev_backend_finalize(Object *obj)
+{
+
+}
+
+static void
+cryptodev_backend_class_init(ObjectClass *oc, void *data)
+{
+ UserCreatableClass *ucc = USER_CREATABLE_CLASS(oc);
+
+ ucc->complete = cryptodev_backend_complete;
+
+ QTAILQ_INIT(&crypto_clients);
+}
+
+static const TypeInfo cryptodev_backend_info = {
+ .name = TYPE_CRYPTODEV_BACKEND,
+ .parent = TYPE_OBJECT,
+ .instance_size = sizeof(CryptoDevBackend),
+ .instance_init = cryptodev_backend_instance_init,
+ .instance_finalize = cryptodev_backend_finalize,
+ .class_size = sizeof(CryptoDevBackendClass),
+ .class_init = cryptodev_backend_class_init,
+ .interfaces = (InterfaceInfo[]) {
+ { TYPE_USER_CREATABLE },
+ { }
+ }
+};
+
+static void
+cryptodev_backend_register_types(void)
+{
+ type_register_static(&cryptodev_backend_info);
+}
+
+type_init(cryptodev_backend_register_types);
diff --git a/block/backup.c b/block/backup.c
index 44c7ff3d16..7b5d8a3757 100644
--- a/block/backup.c
+++ b/block/backup.c
@@ -16,7 +16,7 @@
#include "trace.h"
#include "block/block.h"
#include "block/block_int.h"
-#include "block/blockjob.h"
+#include "block/blockjob_int.h"
#include "block/block_backup.h"
#include "qapi/error.h"
#include "qapi/qmp/qerror.h"
@@ -543,6 +543,7 @@ void backup_start(const char *job_id, BlockDriverState *bs,
bool compress,
BlockdevOnError on_source_error,
BlockdevOnError on_target_error,
+ int creation_flags,
BlockCompletionFunc *cb, void *opaque,
BlockJobTxn *txn, Error **errp)
{
@@ -612,7 +613,7 @@ void backup_start(const char *job_id, BlockDriverState *bs,
}
job = block_job_create(job_id, &backup_job_driver, bs, speed,
- cb, opaque, errp);
+ creation_flags, cb, opaque, errp);
if (!job) {
goto error;
}
diff --git a/block/commit.c b/block/commit.c
index a5e17f610f..e1eda8908b 100644
--- a/block/commit.c
+++ b/block/commit.c
@@ -15,7 +15,7 @@
#include "qemu/osdep.h"
#include "trace.h"
#include "block/block_int.h"
-#include "block/blockjob.h"
+#include "block/blockjob_int.h"
#include "qapi/error.h"
#include "qapi/qmp/qerror.h"
#include "qemu/ratelimit.h"
@@ -209,8 +209,8 @@ static const BlockJobDriver commit_job_driver = {
void commit_start(const char *job_id, BlockDriverState *bs,
BlockDriverState *base, BlockDriverState *top, int64_t speed,
- BlockdevOnError on_error, BlockCompletionFunc *cb,
- void *opaque, const char *backing_file_str, Error **errp)
+ BlockdevOnError on_error, const char *backing_file_str,
+ Error **errp)
{
CommitBlockJob *s;
BlockReopenQueue *reopen_queue = NULL;
@@ -234,7 +234,7 @@ void commit_start(const char *job_id, BlockDriverState *bs,
}
s = block_job_create(job_id, &commit_job_driver, bs, speed,
- cb, opaque, errp);
+ BLOCK_JOB_DEFAULT, NULL, NULL, errp);
if (!s) {
return;
}
@@ -290,7 +290,7 @@ void commit_start(const char *job_id, BlockDriverState *bs,
s->on_error = on_error;
s->common.co = qemu_coroutine_create(commit_run, s);
- trace_commit_start(bs, base, top, s, s->common.co, opaque);
+ trace_commit_start(bs, base, top, s, s->common.co);
qemu_coroutine_enter(s->common.co);
}
diff --git a/block/gluster.c b/block/gluster.c
index af76d7d59a..0ce15f7adc 100644
--- a/block/gluster.c
+++ b/block/gluster.c
@@ -14,6 +14,7 @@
#include "qapi/qmp/qerror.h"
#include "qemu/uri.h"
#include "qemu/error-report.h"
+#include "qemu/cutils.h"
#define GLUSTER_OPT_FILENAME "filename"
#define GLUSTER_OPT_VOLUME "volume"
@@ -56,6 +57,19 @@ typedef struct BDRVGlusterReopenState {
} BDRVGlusterReopenState;
+typedef struct GlfsPreopened {
+ char *volume;
+ glfs_t *fs;
+ int ref;
+} GlfsPreopened;
+
+typedef struct ListElement {
+ QLIST_ENTRY(ListElement) list;
+ GlfsPreopened saved;
+} ListElement;
+
+static QLIST_HEAD(glfs_list, ListElement) glfs_list;
+
static QemuOptsList qemu_gluster_create_opts = {
.name = "qemu-gluster-create-opts",
.head = QTAILQ_HEAD_INITIALIZER(qemu_gluster_create_opts.head),
@@ -172,7 +186,7 @@ static QemuOptsList runtime_tcp_opts = {
},
{
.name = GLUSTER_OPT_PORT,
- .type = QEMU_OPT_NUMBER,
+ .type = QEMU_OPT_STRING,
.help = "port number on which glusterd is listening (default 24007)",
},
{
@@ -194,6 +208,57 @@ static QemuOptsList runtime_tcp_opts = {
},
};
+static void glfs_set_preopened(const char *volume, glfs_t *fs)
+{
+ ListElement *entry = NULL;
+
+ entry = g_new(ListElement, 1);
+
+ entry->saved.volume = g_strdup(volume);
+
+ entry->saved.fs = fs;
+ entry->saved.ref = 1;
+
+ QLIST_INSERT_HEAD(&glfs_list, entry, list);
+}
+
+static glfs_t *glfs_find_preopened(const char *volume)
+{
+ ListElement *entry = NULL;
+
+ QLIST_FOREACH(entry, &glfs_list, list) {
+ if (strcmp(entry->saved.volume, volume) == 0) {
+ entry->saved.ref++;
+ return entry->saved.fs;
+ }
+ }
+
+ return NULL;
+}
+
+static void glfs_clear_preopened(glfs_t *fs)
+{
+ ListElement *entry = NULL;
+
+ if (fs == NULL) {
+ return;
+ }
+
+ QLIST_FOREACH(entry, &glfs_list, list) {
+ if (entry->saved.fs == fs) {
+ if (--entry->saved.ref) {
+ return;
+ }
+
+ QLIST_REMOVE(entry, list);
+
+ glfs_fini(entry->saved.fs);
+ g_free(entry->saved.volume);
+ g_free(entry);
+ }
+ }
+}
+
static int parse_volume_options(BlockdevOptionsGluster *gconf, char *path)
{
char *p, *q;
@@ -330,22 +395,37 @@ static struct glfs *qemu_gluster_glfs_init(BlockdevOptionsGluster *gconf,
int ret;
int old_errno;
GlusterServerList *server;
+ unsigned long long port;
+
+ glfs = glfs_find_preopened(gconf->volume);
+ if (glfs) {
+ return glfs;
+ }
glfs = glfs_new(gconf->volume);
if (!glfs) {
goto out;
}
+ glfs_set_preopened(gconf->volume, glfs);
+
for (server = gconf->server; server; server = server->next) {
if (server->value->type == GLUSTER_TRANSPORT_UNIX) {
ret = glfs_set_volfile_server(glfs,
GlusterTransport_lookup[server->value->type],
server->value->u.q_unix.path, 0);
} else {
+ if (parse_uint_full(server->value->u.tcp.port, &port, 10) < 0 ||
+ port > 65535) {
+ error_setg(errp, "'%s' is not a valid port number",
+ server->value->u.tcp.port);
+ errno = EINVAL;
+ goto out;
+ }
ret = glfs_set_volfile_server(glfs,
GlusterTransport_lookup[server->value->type],
server->value->u.tcp.host,
- atoi(server->value->u.tcp.port));
+ (int)port);
}
if (ret < 0) {
@@ -387,7 +467,7 @@ static struct glfs *qemu_gluster_glfs_init(BlockdevOptionsGluster *gconf,
out:
if (glfs) {
old_errno = errno;
- glfs_fini(glfs);
+ glfs_clear_preopened(glfs);
errno = old_errno;
}
return NULL;
@@ -668,7 +748,10 @@ static void qemu_gluster_parse_flags(int bdrv_flags, int *open_flags)
*/
static bool qemu_gluster_test_seek(struct glfs_fd *fd)
{
- off_t ret, eof;
+ off_t ret = 0;
+
+#if defined SEEK_HOLE && defined SEEK_DATA
+ off_t eof;
eof = glfs_lseek(fd, 0, SEEK_END);
if (eof < 0) {
@@ -678,6 +761,8 @@ static bool qemu_gluster_test_seek(struct glfs_fd *fd)
/* this should always fail with ENXIO if SEEK_DATA is supported */
ret = glfs_lseek(fd, eof, SEEK_DATA);
+#endif
+
return (ret < 0) && (errno == ENXIO);
}
@@ -762,9 +847,9 @@ out:
if (s->fd) {
glfs_close(s->fd);
}
- if (s->glfs) {
- glfs_fini(s->glfs);
- }
+
+ glfs_clear_preopened(s->glfs);
+
return ret;
}
@@ -831,9 +916,8 @@ static void qemu_gluster_reopen_commit(BDRVReopenState *state)
if (s->fd) {
glfs_close(s->fd);
}
- if (s->glfs) {
- glfs_fini(s->glfs);
- }
+
+ glfs_clear_preopened(s->glfs);
/* use the newly opened image / connection */
s->fd = reop_s->fd;
@@ -858,9 +942,7 @@ static void qemu_gluster_reopen_abort(BDRVReopenState *state)
glfs_close(reop_s->fd);
}
- if (reop_s->glfs) {
- glfs_fini(reop_s->glfs);
- }
+ glfs_clear_preopened(reop_s->glfs);
g_free(state->opaque);
state->opaque = NULL;
@@ -984,9 +1066,7 @@ static int qemu_gluster_create(const char *filename,
out:
g_free(tmp);
qapi_free_BlockdevOptionsGluster(gconf);
- if (glfs) {
- glfs_fini(glfs);
- }
+ glfs_clear_preopened(glfs);
return ret;
}
@@ -1059,7 +1139,7 @@ static void qemu_gluster_close(BlockDriverState *bs)
glfs_close(s->fd);
s->fd = NULL;
}
- glfs_fini(s->glfs);
+ glfs_clear_preopened(s->glfs);
}
static coroutine_fn int qemu_gluster_co_flush_to_disk(BlockDriverState *bs)
@@ -1178,12 +1258,14 @@ static int find_allocation(BlockDriverState *bs, off_t start,
off_t *data, off_t *hole)
{
BDRVGlusterState *s = bs->opaque;
- off_t offs;
if (!s->supports_seek_data) {
- return -ENOTSUP;
+ goto exit;
}
+#if defined SEEK_HOLE && defined SEEK_DATA
+ off_t offs;
+
/*
* SEEK_DATA cases:
* D1. offs == start: start is in data
@@ -1247,6 +1329,10 @@ static int find_allocation(BlockDriverState *bs, off_t start,
/* D1 and H1 */
return -EBUSY;
+#endif
+
+exit:
+ return -ENOTSUP;
}
/*
diff --git a/block/mirror.c b/block/mirror.c
index 7e99f3a880..b2c1fb855b 100644
--- a/block/mirror.c
+++ b/block/mirror.c
@@ -13,7 +13,7 @@
#include "qemu/osdep.h"
#include "trace.h"
-#include "block/blockjob.h"
+#include "block/blockjob_int.h"
#include "block/block_int.h"
#include "sysemu/block-backend.h"
#include "qapi/error.h"
@@ -937,9 +937,9 @@ static const BlockJobDriver commit_active_job_driver = {
};
static void mirror_start_job(const char *job_id, BlockDriverState *bs,
- BlockDriverState *target, const char *replaces,
- int64_t speed, uint32_t granularity,
- int64_t buf_size,
+ int creation_flags, BlockDriverState *target,
+ const char *replaces, int64_t speed,
+ uint32_t granularity, int64_t buf_size,
BlockMirrorBackingMode backing_mode,
BlockdevOnError on_source_error,
BlockdevOnError on_target_error,
@@ -967,7 +967,8 @@ static void mirror_start_job(const char *job_id, BlockDriverState *bs,
buf_size = DEFAULT_MIRROR_BUF_SIZE;
}
- s = block_job_create(job_id, driver, bs, speed, cb, opaque, errp);
+ s = block_job_create(job_id, driver, bs, speed, creation_flags,
+ cb, opaque, errp);
if (!s) {
return;
}
@@ -1017,9 +1018,7 @@ void mirror_start(const char *job_id, BlockDriverState *bs,
MirrorSyncMode mode, BlockMirrorBackingMode backing_mode,
BlockdevOnError on_source_error,
BlockdevOnError on_target_error,
- bool unmap,
- BlockCompletionFunc *cb,
- void *opaque, Error **errp)
+ bool unmap, Error **errp)
{
bool is_none_mode;
BlockDriverState *base;
@@ -1030,17 +1029,16 @@ void mirror_start(const char *job_id, BlockDriverState *bs,
}
is_none_mode = mode == MIRROR_SYNC_MODE_NONE;
base = mode == MIRROR_SYNC_MODE_TOP ? backing_bs(bs) : NULL;
- mirror_start_job(job_id, bs, target, replaces,
+ mirror_start_job(job_id, bs, BLOCK_JOB_DEFAULT, target, replaces,
speed, granularity, buf_size, backing_mode,
- on_source_error, on_target_error, unmap, cb, opaque, errp,
+ on_source_error, on_target_error, unmap, NULL, NULL, errp,
&mirror_job_driver, is_none_mode, base, false);
}
void commit_active_start(const char *job_id, BlockDriverState *bs,
- BlockDriverState *base, int64_t speed,
- BlockdevOnError on_error,
- BlockCompletionFunc *cb,
- void *opaque, Error **errp,
+ BlockDriverState *base, int creation_flags,
+ int64_t speed, BlockdevOnError on_error,
+ BlockCompletionFunc *cb, void *opaque, Error **errp,
bool auto_complete)
{
int64_t length, base_length;
@@ -1079,9 +1077,9 @@ void commit_active_start(const char *job_id, BlockDriverState *bs,
}
}
- mirror_start_job(job_id, bs, base, NULL, speed, 0, 0,
+ mirror_start_job(job_id, bs, creation_flags, base, NULL, speed, 0, 0,
MIRROR_LEAVE_BACKING_CHAIN,
- on_error, on_error, false, cb, opaque, &local_err,
+ on_error, on_error, true, cb, opaque, &local_err,
&commit_active_job_driver, false, base, auto_complete);
if (local_err) {
error_propagate(errp, local_err);
diff --git a/block/rbd.c b/block/rbd.c
index f6e1d4bc11..a57b3e3c5d 100644
--- a/block/rbd.c
+++ b/block/rbd.c
@@ -365,45 +365,44 @@ static int qemu_rbd_create(const char *filename, QemuOpts *opts, Error **errp)
rados_conf_read_file(cluster, NULL);
} else if (conf[0] != '\0' &&
qemu_rbd_set_conf(cluster, conf, true, &local_err) < 0) {
- rados_shutdown(cluster);
error_propagate(errp, local_err);
- return -EIO;
+ ret = -EIO;
+ goto shutdown;
}
if (conf[0] != '\0' &&
qemu_rbd_set_conf(cluster, conf, false, &local_err) < 0) {
- rados_shutdown(cluster);
error_propagate(errp, local_err);
- return -EIO;
+ ret = -EIO;
+ goto shutdown;
}
if (qemu_rbd_set_auth(cluster, secretid, errp) < 0) {
- rados_shutdown(cluster);
- return -EIO;
+ ret = -EIO;
+ goto shutdown;
}
ret = rados_connect(cluster);
if (ret < 0) {
error_setg_errno(errp, -ret, "error connecting");
- rados_shutdown(cluster);
- return ret;
+ goto shutdown;
}
ret = rados_ioctx_create(cluster, pool, &io_ctx);
if (ret < 0) {
error_setg_errno(errp, -ret, "error opening pool %s", pool);
- rados_shutdown(cluster);
- return ret;
+ goto shutdown;
}
ret = rbd_create(io_ctx, name, bytes, &obj_order);
- rados_ioctx_destroy(io_ctx);
- rados_shutdown(cluster);
if (ret < 0) {
error_setg_errno(errp, -ret, "error rbd create");
- return ret;
}
+ rados_ioctx_destroy(io_ctx);
+
+shutdown:
+ rados_shutdown(cluster);
return ret;
}
diff --git a/block/replication.c b/block/replication.c
index 02aeaaf7d0..d5e2b0f497 100644
--- a/block/replication.c
+++ b/block/replication.c
@@ -508,10 +508,11 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode,
bdrv_op_block_all(top_bs, s->blocker);
bdrv_op_unblock(top_bs, BLOCK_OP_TYPE_DATAPLANE, s->blocker);
- backup_start("replication-backup", s->secondary_disk->bs,
- s->hidden_disk->bs, 0, MIRROR_SYNC_MODE_NONE, NULL, false,
+ backup_start(NULL, s->secondary_disk->bs, s->hidden_disk->bs, 0,
+ MIRROR_SYNC_MODE_NONE, NULL, false,
BLOCKDEV_ON_ERROR_REPORT, BLOCKDEV_ON_ERROR_REPORT,
- backup_job_completed, bs, NULL, &local_err);
+ BLOCK_JOB_INTERNAL, backup_job_completed, bs,
+ NULL, &local_err);
if (local_err) {
error_propagate(errp, local_err);
backup_job_cleanup(bs);
@@ -633,10 +634,9 @@ static void replication_stop(ReplicationState *rs, bool failover, Error **errp)
}
s->replication_state = BLOCK_REPLICATION_FAILOVER;
- commit_active_start("replication-commit", s->active_disk->bs,
- s->secondary_disk->bs, 0, BLOCKDEV_ON_ERROR_REPORT,
- replication_done,
- bs, errp, true);
+ commit_active_start(NULL, s->active_disk->bs, s->secondary_disk->bs,
+ BLOCK_JOB_INTERNAL, 0, BLOCKDEV_ON_ERROR_REPORT,
+ replication_done, bs, errp, true);
break;
default:
aio_context_release(aio_context);
diff --git a/block/stream.c b/block/stream.c
index b8ab89a105..b05856bd65 100644
--- a/block/stream.c
+++ b/block/stream.c
@@ -14,7 +14,7 @@
#include "qemu/osdep.h"
#include "trace.h"
#include "block/block_int.h"
-#include "block/blockjob.h"
+#include "block/blockjob_int.h"
#include "qapi/error.h"
#include "qapi/qmp/qerror.h"
#include "qemu/ratelimit.h"
@@ -222,15 +222,14 @@ static const BlockJobDriver stream_job_driver = {
void stream_start(const char *job_id, BlockDriverState *bs,
BlockDriverState *base, const char *backing_file_str,
- int64_t speed, BlockdevOnError on_error,
- BlockCompletionFunc *cb, void *opaque, Error **errp)
+ int64_t speed, BlockdevOnError on_error, Error **errp)
{
StreamBlockJob *s;
BlockDriverState *iter;
int orig_bs_flags;
s = block_job_create(job_id, &stream_job_driver, bs, speed,
- cb, opaque, errp);
+ BLOCK_JOB_DEFAULT, NULL, NULL, errp);
if (!s) {
return;
}
@@ -256,6 +255,6 @@ void stream_start(const char *job_id, BlockDriverState *bs,
s->on_error = on_error;
s->common.co = qemu_coroutine_create(stream_run, s);
- trace_stream_start(bs, base, s, s->common.co, opaque);
+ trace_stream_start(bs, base, s, s->common.co);
qemu_coroutine_enter(s->common.co);
}
diff --git a/block/trace-events b/block/trace-events
index aff8a9674d..882c9034c2 100644
--- a/block/trace-events
+++ b/block/trace-events
@@ -19,11 +19,11 @@ bdrv_co_do_copy_on_readv(void *bs, int64_t offset, unsigned int bytes, int64_t c
# block/stream.c
stream_one_iteration(void *s, int64_t sector_num, int nb_sectors, int is_allocated) "s %p sector_num %"PRId64" nb_sectors %d is_allocated %d"
-stream_start(void *bs, void *base, void *s, void *co, void *opaque) "bs %p base %p s %p co %p opaque %p"
+stream_start(void *bs, void *base, void *s, void *co) "bs %p base %p s %p co %p"
# block/commit.c
commit_one_iteration(void *s, int64_t sector_num, int nb_sectors, int is_allocated) "s %p sector_num %"PRId64" nb_sectors %d is_allocated %d"
-commit_start(void *bs, void *base, void *top, void *s, void *co, void *opaque) "bs %p base %p top %p s %p co %p opaque %p"
+commit_start(void *bs, void *base, void *top, void *s, void *co) "bs %p base %p top %p s %p co %p"
# block/mirror.c
mirror_start(void *bs, void *s, void *co, void *opaque) "bs %p s %p co %p opaque %p"
@@ -51,7 +51,6 @@ qmp_block_job_cancel(void *job) "job %p"
qmp_block_job_pause(void *job) "job %p"
qmp_block_job_resume(void *job) "job %p"
qmp_block_job_complete(void *job) "job %p"
-block_job_cb(void *bs, void *job, int ret) "bs %p job %p ret %d"
qmp_block_stream(void *bs, void *job) "bs %p job %p"
# block/raw-win32.c
diff --git a/blockdev.c b/blockdev.c
index ded13268f7..102ca9fe01 100644
--- a/blockdev.c
+++ b/blockdev.c
@@ -2905,31 +2905,6 @@ out:
aio_context_release(aio_context);
}
-static void block_job_cb(void *opaque, int ret)
-{
- /* Note that this function may be executed from another AioContext besides
- * the QEMU main loop. If you need to access anything that assumes the
- * QEMU global mutex, use a BH or introduce a mutex.
- */
-
- BlockDriverState *bs = opaque;
- const char *msg = NULL;
-
- trace_block_job_cb(bs, bs->job, ret);
-
- assert(bs->job);
-
- if (ret < 0) {
- msg = strerror(-ret);
- }
-
- if (block_job_is_cancelled(bs->job)) {
- block_job_event_cancelled(bs->job);
- } else {
- block_job_event_completed(bs->job, msg);
- }
-}
-
void qmp_block_stream(bool has_job_id, const char *job_id, const char *device,
bool has_base, const char *base,
bool has_base_node, const char *base_node,
@@ -3005,7 +2980,7 @@ void qmp_block_stream(bool has_job_id, const char *job_id, const char *device,
base_name = has_backing_file ? backing_file : base_name;
stream_start(has_job_id ? job_id : NULL, bs, base_bs, base_name,
- has_speed ? speed : 0, on_error, block_job_cb, bs, &local_err);
+ has_speed ? speed : 0, on_error, &local_err);
if (local_err) {
error_propagate(errp, local_err);
goto out;
@@ -3110,16 +3085,17 @@ void qmp_block_commit(bool has_job_id, const char *job_id, const char *device,
" but 'top' is the active layer");
goto out;
}
- commit_active_start(has_job_id ? job_id : NULL, bs, base_bs, speed,
- on_error, block_job_cb, bs, &local_err, false);
+ commit_active_start(has_job_id ? job_id : NULL, bs, base_bs,
+ BLOCK_JOB_DEFAULT, speed, on_error, NULL, NULL,
+ &local_err, false);
} else {
BlockDriverState *overlay_bs = bdrv_find_overlay(bs, top_bs);
if (bdrv_op_is_blocked(overlay_bs, BLOCK_OP_TYPE_COMMIT_TARGET, errp)) {
goto out;
}
commit_start(has_job_id ? job_id : NULL, bs, base_bs, top_bs, speed,
- on_error, block_job_cb, bs,
- has_backing_file ? backing_file : NULL, &local_err);
+ on_error, has_backing_file ? backing_file : NULL,
+ &local_err);
}
if (local_err != NULL) {
error_propagate(errp, local_err);
@@ -3239,7 +3215,8 @@ static void do_drive_backup(DriveBackup *backup, BlockJobTxn *txn, Error **errp)
backup_start(backup->job_id, bs, target_bs, backup->speed, backup->sync,
bmap, backup->compress, backup->on_source_error,
- backup->on_target_error, block_job_cb, bs, txn, &local_err);
+ backup->on_target_error, BLOCK_JOB_DEFAULT,
+ NULL, NULL, txn, &local_err);
bdrv_unref(target_bs);
if (local_err != NULL) {
error_propagate(errp, local_err);
@@ -3309,7 +3286,8 @@ void do_blockdev_backup(BlockdevBackup *backup, BlockJobTxn *txn, Error **errp)
}
backup_start(backup->job_id, bs, target_bs, backup->speed, backup->sync,
NULL, backup->compress, backup->on_source_error,
- backup->on_target_error, block_job_cb, bs, txn, &local_err);
+ backup->on_target_error, BLOCK_JOB_DEFAULT,
+ NULL, NULL, txn, &local_err);
if (local_err != NULL) {
error_propagate(errp, local_err);
}
@@ -3388,8 +3366,7 @@ static void blockdev_mirror_common(const char *job_id, BlockDriverState *bs,
mirror_start(job_id, bs, target,
has_replaces ? replaces : NULL,
speed, granularity, buf_size, sync, backing_mode,
- on_source_error, on_target_error, unmap,
- block_job_cb, bs, errp);
+ on_source_error, on_target_error, unmap, errp);
}
void qmp_drive_mirror(DriveMirror *arg, Error **errp)
@@ -3633,7 +3610,7 @@ void qmp_block_job_cancel(const char *device,
force = false;
}
- if (job->user_paused && !force) {
+ if (block_job_user_paused(job) && !force) {
error_setg(errp, "The block job for device '%s' is currently paused",
device);
goto out;
@@ -3650,13 +3627,12 @@ void qmp_block_job_pause(const char *device, Error **errp)
AioContext *aio_context;
BlockJob *job = find_block_job(device, &aio_context, errp);
- if (!job || job->user_paused) {
+ if (!job || block_job_user_paused(job)) {
return;
}
- job->user_paused = true;
trace_qmp_block_job_pause(job);
- block_job_pause(job);
+ block_job_user_pause(job);
aio_context_release(aio_context);
}
@@ -3665,14 +3641,13 @@ void qmp_block_job_resume(const char *device, Error **errp)
AioContext *aio_context;
BlockJob *job = find_block_job(device, &aio_context, errp);
- if (!job || !job->user_paused) {
+ if (!job || !block_job_user_paused(job)) {
return;
}
- job->user_paused = false;
trace_qmp_block_job_resume(job);
block_job_iostatus_reset(job);
- block_job_resume(job);
+ block_job_user_resume(job);
aio_context_release(aio_context);
}
@@ -3946,13 +3921,22 @@ BlockJobInfoList *qmp_query_block_jobs(Error **errp)
BlockJob *job;
for (job = block_job_next(NULL); job; job = block_job_next(job)) {
- BlockJobInfoList *elem = g_new0(BlockJobInfoList, 1);
- AioContext *aio_context = blk_get_aio_context(job->blk);
+ BlockJobInfoList *elem;
+ AioContext *aio_context;
+ if (block_job_is_internal(job)) {
+ continue;
+ }
+ elem = g_new0(BlockJobInfoList, 1);
+ aio_context = blk_get_aio_context(job->blk);
aio_context_acquire(aio_context);
- elem->value = block_job_query(job);
+ elem->value = block_job_query(job, errp);
aio_context_release(aio_context);
-
+ if (!elem->value) {
+ g_free(elem);
+ qapi_free_BlockJobInfoList(head);
+ return NULL;
+ }
*p_next = elem;
p_next = &elem->next;
}
diff --git a/blockjob.c b/blockjob.c
index 422851fde5..4aa14a4974 100644
--- a/blockjob.c
+++ b/blockjob.c
@@ -27,7 +27,7 @@
#include "qemu-common.h"
#include "trace.h"
#include "block/block.h"
-#include "block/blockjob.h"
+#include "block/blockjob_int.h"
#include "block/block_int.h"
#include "sysemu/block-backend.h"
#include "qapi/qmp/qerror.h"
@@ -38,6 +38,9 @@
#include "qemu/timer.h"
#include "qapi-event.h"
+static void block_job_event_cancelled(BlockJob *job);
+static void block_job_event_completed(BlockJob *job, const char *msg);
+
/* Transactional group of block jobs */
struct BlockJobTxn {
@@ -66,7 +69,7 @@ BlockJob *block_job_get(const char *id)
BlockJob *job;
QLIST_FOREACH(job, &block_jobs, job_list) {
- if (!strcmp(id, job->id)) {
+ if (job->id && !strcmp(id, job->id)) {
return job;
}
}
@@ -121,19 +124,18 @@ void block_job_add_bdrv(BlockJob *job, BlockDriverState *bs)
}
void *block_job_create(const char *job_id, const BlockJobDriver *driver,
- BlockDriverState *bs, int64_t speed,
+ BlockDriverState *bs, int64_t speed, int flags,
BlockCompletionFunc *cb, void *opaque, Error **errp)
{
BlockBackend *blk;
BlockJob *job;
- assert(cb);
if (bs->job) {
error_setg(errp, QERR_DEVICE_IN_USE, bdrv_get_device_name(bs));
return NULL;
}
- if (job_id == NULL) {
+ if (job_id == NULL && !(flags & BLOCK_JOB_INTERNAL)) {
job_id = bdrv_get_device_name(bs);
if (!*job_id) {
error_setg(errp, "An explicit job ID is required for this node");
@@ -141,14 +143,21 @@ void *block_job_create(const char *job_id, const BlockJobDriver *driver,
}
}
- if (!id_wellformed(job_id)) {
- error_setg(errp, "Invalid job ID '%s'", job_id);
- return NULL;
- }
+ if (job_id) {
+ if (flags & BLOCK_JOB_INTERNAL) {
+ error_setg(errp, "Cannot specify job ID for internal block job");
+ return NULL;
+ }
- if (block_job_get(job_id)) {
- error_setg(errp, "Job ID '%s' already in use", job_id);
- return NULL;
+ if (!id_wellformed(job_id)) {
+ error_setg(errp, "Invalid job ID '%s'", job_id);
+ return NULL;
+ }
+
+ if (block_job_get(job_id)) {
+ error_setg(errp, "Job ID '%s' already in use", job_id);
+ return NULL;
+ }
}
blk = blk_new();
@@ -188,6 +197,11 @@ void *block_job_create(const char *job_id, const BlockJobDriver *driver,
return job;
}
+bool block_job_is_internal(BlockJob *job)
+{
+ return (job->id == NULL);
+}
+
void block_job_ref(BlockJob *job)
{
++job->refcnt;
@@ -227,7 +241,20 @@ static void block_job_completed_single(BlockJob *job)
job->driver->abort(job);
}
}
- job->cb(job->opaque, job->ret);
+
+ if (job->cb) {
+ job->cb(job->opaque, job->ret);
+ }
+ if (block_job_is_cancelled(job)) {
+ block_job_event_cancelled(job);
+ } else {
+ const char *msg = NULL;
+ if (job->ret < 0) {
+ msg = strerror(-job->ret);
+ }
+ block_job_event_completed(job, msg);
+ }
+
if (job->txn) {
block_job_txn_unref(job->txn);
}
@@ -330,6 +357,8 @@ void block_job_set_speed(BlockJob *job, int64_t speed, Error **errp)
void block_job_complete(BlockJob *job, Error **errp)
{
+ /* Should not be reachable via external interface for internal jobs */
+ assert(job->id);
if (job->pause_count || job->cancelled || !job->driver->complete) {
error_setg(errp, "The active block job '%s' cannot be completed",
job->id);
@@ -344,11 +373,22 @@ void block_job_pause(BlockJob *job)
job->pause_count++;
}
+void block_job_user_pause(BlockJob *job)
+{
+ job->user_paused = true;
+ block_job_pause(job);
+}
+
static bool block_job_should_pause(BlockJob *job)
{
return job->pause_count > 0;
}
+bool block_job_user_paused(BlockJob *job)
+{
+ return job ? job->user_paused : 0;
+}
+
void coroutine_fn block_job_pause_point(BlockJob *job)
{
if (!block_job_should_pause(job)) {
@@ -385,6 +425,14 @@ void block_job_resume(BlockJob *job)
block_job_enter(job);
}
+void block_job_user_resume(BlockJob *job)
+{
+ if (job && job->user_paused && job->pause_count > 0) {
+ job->user_paused = false;
+ block_job_resume(job);
+ }
+}
+
void block_job_enter(BlockJob *job)
{
if (job->co && !job->busy) {
@@ -510,9 +558,15 @@ void block_job_yield(BlockJob *job)
block_job_pause_point(job);
}
-BlockJobInfo *block_job_query(BlockJob *job)
+BlockJobInfo *block_job_query(BlockJob *job, Error **errp)
{
- BlockJobInfo *info = g_new0(BlockJobInfo, 1);
+ BlockJobInfo *info;
+
+ if (block_job_is_internal(job)) {
+ error_setg(errp, "Cannot query QEMU internal jobs");
+ return NULL;
+ }
+ info = g_new0(BlockJobInfo, 1);
info->type = g_strdup(BlockJobType_lookup[job->driver->job_type]);
info->device = g_strdup(job->id);
info->len = job->len;
@@ -533,8 +587,12 @@ static void block_job_iostatus_set_err(BlockJob *job, int error)
}
}
-void block_job_event_cancelled(BlockJob *job)
+static void block_job_event_cancelled(BlockJob *job)
{
+ if (block_job_is_internal(job)) {
+ return;
+ }
+
qapi_event_send_block_job_cancelled(job->driver->job_type,
job->id,
job->len,
@@ -543,8 +601,12 @@ void block_job_event_cancelled(BlockJob *job)
&error_abort);
}
-void block_job_event_completed(BlockJob *job, const char *msg)
+static void block_job_event_completed(BlockJob *job, const char *msg)
{
+ if (block_job_is_internal(job)) {
+ return;
+ }
+
qapi_event_send_block_job_completed(job->driver->job_type,
job->id,
job->len,
@@ -559,6 +621,10 @@ void block_job_event_ready(BlockJob *job)
{
job->ready = true;
+ if (block_job_is_internal(job)) {
+ return;
+ }
+
qapi_event_send_block_job_ready(job->driver->job_type,
job->id,
job->len,
@@ -589,14 +655,15 @@ BlockErrorAction block_job_error_action(BlockJob *job, BlockdevOnError on_err,
default:
abort();
}
- qapi_event_send_block_job_error(job->id,
- is_read ? IO_OPERATION_TYPE_READ :
- IO_OPERATION_TYPE_WRITE,
- action, &error_abort);
+ if (!block_job_is_internal(job)) {
+ qapi_event_send_block_job_error(job->id,
+ is_read ? IO_OPERATION_TYPE_READ :
+ IO_OPERATION_TYPE_WRITE,
+ action, &error_abort);
+ }
if (action == BLOCK_ERROR_ACTION_STOP) {
/* make the pause user visible, which will be resumed from QMP. */
- job->user_paused = true;
- block_job_pause(job);
+ block_job_user_pause(job);
block_job_iostatus_set_err(job, error);
}
return action;
diff --git a/cpu-exec.c b/cpu-exec.c
index 3e408865a8..4188fed3c6 100644
--- a/cpu-exec.c
+++ b/cpu-exec.c
@@ -150,11 +150,13 @@ static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, TranslationBlock *itb)
#if defined(DEBUG_DISAS)
if (qemu_loglevel_mask(CPU_LOG_TB_CPU)
&& qemu_log_in_addr_range(itb->pc)) {
+ qemu_log_lock();
#if defined(TARGET_I386)
log_cpu_state(cpu, CPU_DUMP_CCOP);
#else
log_cpu_state(cpu, 0);
#endif
+ qemu_log_unlock();
}
#endif /* DEBUG_DISAS */
diff --git a/docs/specs/acpi_mem_hotplug.txt b/docs/specs/acpi_mem_hotplug.txt
index 3df3620ce4..cb26dd27c4 100644
--- a/docs/specs/acpi_mem_hotplug.txt
+++ b/docs/specs/acpi_mem_hotplug.txt
@@ -4,6 +4,9 @@ QEMU<->ACPI BIOS memory hotplug interface
ACPI BIOS GPE.3 handler is dedicated for notifying OS about memory hot-add
and hot-remove events.
+ACPI BIOS GPE.4 handler is dedicated for notifying OS about nvdimm device
+hot-add and hot-remove events.
+
Memory hot-plug interface (IO port 0xa00-0xa17, 1-4 byte access):
---------------------------------------------------------------
0xa00:
diff --git a/docs/specs/acpi_nvdimm.txt b/docs/specs/acpi_nvdimm.txt
index 0fdd251fc0..4aa5e3de29 100644
--- a/docs/specs/acpi_nvdimm.txt
+++ b/docs/specs/acpi_nvdimm.txt
@@ -127,6 +127,58 @@ _DSM process diagram:
| result from the page | | |
+--------------------------+ +--------------+
- _FIT implementation
- -------------------
- TODO (will fill it when nvdimm hotplug is introduced)
+Device Handle Reservation
+-------------------------
+As we mentioned above, byte 0 ~ byte 3 in the DSM memory save NVDIMM device
+handle. The handle is completely QEMU internal thing, the values in range
+[0, 0xFFFF] indicate nvdimm device (O means nvdimm root device named NVDR),
+other values are reserved by other purpose.
+
+Current reserved handle:
+0x10000 is reserved for QEMU internal DSM function called on the root
+device.
+
+QEMU internal use only _DSM function
+------------------------------------
+UUID, 648B9CF2-CDA1-4312-8AD9-49C4AF32BD62, is reserved for QEMU internal
+DSM function.
+
+There is the function introduced by QEMU and only used by QEMU internal.
+
+1) Read FIT
+ As we only reserved one page for NVDIMM ACPI it is impossible to map the
+ whole FIT data to guest's address space. This function is used by _FIT
+ method to read a piece of FIT data from QEMU.
+
+ Input parameters:
+ Arg0 – UUID {set to 648B9CF2-CDA1-4312-8AD9-49C4AF32BD62}
+ Arg1 – Revision ID (set to 1)
+ Arg2 - Function Index, 0x1
+ Arg3 - A package containing a buffer whose layout is as follows:
+
+ +----------+-------------+-------------+-----------------------------------+
+ | Filed | Byte Length | Byte Offset | Description |
+ +----------+-------------+-------------+-----------------------------------+
+ | offset | 4 | 0 | the offset of FIT buffer |
+ +----------+-------------+-------------+-----------------------------------+
+
+ Output:
+ +----------+-------------+-------------+-----------------------------------+
+ | Filed | Byte Length | Byte Offset | Description |
+ +----------+-------------+-------------+-----------------------------------+
+ | | | | return status codes |
+ | | | | 0x100 indicates fit has been |
+ | status | 4 | 0 | updated |
+ | | | | other follows Chapter 3 in DSM |
+ | | | | Spec Rev1 |
+ +----------+-------------+-------------+-----------------------------------+
+ | fit data | Varies | 4 | FIT data |
+ | | | | |
+ +----------+-------------+-------------+-----------------------------------+
+
+ The FIT offset is maintained by the caller itself, current offset plugs
+ the length returned by the function is the next offset we should read.
+ When all the FIT data has been read out, zero length is returned.
+
+ If it returns 0x100, OSPM should restart to read FIT (read from offset 0
+ again).
diff --git a/exec.c b/exec.c
index f3c2770d54..3d867f166c 100644
--- a/exec.c
+++ b/exec.c
@@ -911,11 +911,13 @@ void cpu_abort(CPUState *cpu, const char *fmt, ...)
fprintf(stderr, "\n");
cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
if (qemu_log_separate()) {
+ qemu_log_lock();
qemu_log("qemu: fatal: ");
qemu_log_vprintf(fmt, ap2);
qemu_log("\n");
log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
qemu_log_flush();
+ qemu_log_unlock();
qemu_log_close();
}
va_end(ap2);
diff --git a/hw/acpi/Makefile.objs b/hw/acpi/Makefile.objs
index 4b7da6639f..489e63bb75 100644
--- a/hw/acpi/Makefile.objs
+++ b/hw/acpi/Makefile.objs
@@ -3,7 +3,7 @@ common-obj-$(CONFIG_ACPI_X86_ICH) += ich9.o tco.o
common-obj-$(CONFIG_ACPI_CPU_HOTPLUG) += cpu_hotplug.o
common-obj-$(CONFIG_ACPI_MEMORY_HOTPLUG) += memory_hotplug.o memory_hotplug_acpi_table.o
common-obj-$(CONFIG_ACPI_CPU_HOTPLUG) += cpu.o
-obj-$(CONFIG_ACPI_NVDIMM) += nvdimm.o
+common-obj-$(CONFIG_ACPI_NVDIMM) += nvdimm.o
common-obj-$(CONFIG_ACPI) += acpi_interface.o
common-obj-$(CONFIG_ACPI) += bios-linker-loader.o
common-obj-$(CONFIG_ACPI) += aml-build.o
diff --git a/hw/acpi/ipmi.c b/hw/acpi/ipmi.c
index 7e74ce4460..651e2e94ea 100644
--- a/hw/acpi/ipmi.c
+++ b/hw/acpi/ipmi.c
@@ -99,6 +99,7 @@ void build_acpi_ipmi_devices(Aml *scope, BusState *bus)
ii = IPMI_INTERFACE(obj);
iic = IPMI_INTERFACE_GET_CLASS(obj);
+ memset(&info, 0, sizeof(info));
iic->get_fwinfo(ii, &info);
aml_append(scope, aml_ipmi_device(&info));
}
diff --git a/hw/acpi/memory_hotplug.c b/hw/acpi/memory_hotplug.c
index ec4e64b361..70f64517fd 100644
--- a/hw/acpi/memory_hotplug.c
+++ b/hw/acpi/memory_hotplug.c
@@ -2,6 +2,7 @@
#include "hw/acpi/memory_hotplug.h"
#include "hw/acpi/pc-hotplug.h"
#include "hw/mem/pc-dimm.h"
+#include "hw/mem/nvdimm.h"
#include "hw/boards.h"
#include "hw/qdev-core.h"
#include "trace.h"
@@ -232,11 +233,8 @@ void acpi_memory_plug_cb(HotplugHandler *hotplug_dev, MemHotplugState *mem_st,
DeviceState *dev, Error **errp)
{
MemStatus *mdev;
- DeviceClass *dc = DEVICE_GET_CLASS(dev);
-
- if (!dc->hotpluggable) {
- return;
- }
+ AcpiEventStatusBits event;
+ bool is_nvdimm = object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM);
mdev = acpi_memory_slot_status(mem_st, dev, errp);
if (!mdev) {
@@ -244,10 +242,23 @@ void acpi_memory_plug_cb(HotplugHandler *hotplug_dev, MemHotplugState *mem_st,
}
mdev->dimm = dev;
- mdev->is_enabled = true;
+
+ /*
+ * do not set is_enabled and is_inserting if the slot is plugged with
+ * a nvdimm device to stop OSPM inquires memory region from the slot.
+ */
+ if (is_nvdimm) {
+ event = ACPI_NVDIMM_HOTPLUG_STATUS;
+ } else {
+ mdev->is_enabled = true;
+ event = ACPI_MEMORY_HOTPLUG_STATUS;
+ }
+
if (dev->hotplugged) {
- mdev->is_inserting = true;
- acpi_send_event(DEVICE(hotplug_dev), ACPI_MEMORY_HOTPLUG_STATUS);
+ if (!is_nvdimm) {
+ mdev->is_inserting = true;
+ }
+ acpi_send_event(DEVICE(hotplug_dev), event);
}
}
@@ -262,6 +273,8 @@ void acpi_memory_unplug_request_cb(HotplugHandler *hotplug_dev,
return;
}
+ /* nvdimm device hot unplug is not supported yet. */
+ assert(!object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM));
mdev->is_removing = true;
acpi_send_event(DEVICE(hotplug_dev), ACPI_MEMORY_HOTPLUG_STATUS);
}
@@ -276,6 +289,8 @@ void acpi_memory_unplug_cb(MemHotplugState *mem_st,
return;
}
+ /* nvdimm device hot unplug is not supported yet. */
+ assert(!object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM));
mdev->is_enabled = false;
mdev->dimm = NULL;
}
diff --git a/hw/acpi/nvdimm.c b/hw/acpi/nvdimm.c
index e486128aa1..602ec54485 100644
--- a/hw/acpi/nvdimm.c
+++ b/hw/acpi/nvdimm.c
@@ -289,8 +289,6 @@ static void
nvdimm_build_structure_memdev(GArray *structures, DeviceState *dev)
{
NvdimmNfitMemDev *nfit_memdev;
- uint64_t addr = object_property_get_int(OBJECT(dev), PC_DIMM_ADDR_PROP,
- NULL);
uint64_t size = object_property_get_int(OBJECT(dev), PC_DIMM_SIZE_PROP,
NULL);
int slot = object_property_get_int(OBJECT(dev), PC_DIMM_SLOT_PROP,
@@ -314,7 +312,8 @@ nvdimm_build_structure_memdev(GArray *structures, DeviceState *dev)
/* The memory region on the device. */
nfit_memdev->region_len = cpu_to_le64(size);
- nfit_memdev->region_dpa = cpu_to_le64(addr);
+ /* The device address starts from 0. */
+ nfit_memdev->region_dpa = cpu_to_le64(0);
/* Only one interleave for PMEM. */
nfit_memdev->interleave_ways = cpu_to_le16(1);
@@ -349,8 +348,9 @@ static void nvdimm_build_structure_dcr(GArray *structures, DeviceState *dev)
(DSM) in DSM Spec Rev1.*/);
}
-static GArray *nvdimm_build_device_structure(GSList *device_list)
+static GArray *nvdimm_build_device_structure(void)
{
+ GSList *device_list = nvdimm_get_plugged_device_list();
GArray *structures = g_array_new(false, true /* clear */, 1);
for (; device_list; device_list = device_list->next) {
@@ -368,28 +368,58 @@ static GArray *nvdimm_build_device_structure(GSList *device_list)
/* build NVDIMM Control Region Structure. */
nvdimm_build_structure_dcr(structures, dev);
}
+ g_slist_free(device_list);
return structures;
}
-static void nvdimm_build_nfit(GSList *device_list, GArray *table_offsets,
+static void nvdimm_init_fit_buffer(NvdimmFitBuffer *fit_buf)
+{
+ qemu_mutex_init(&fit_buf->lock);
+ fit_buf->fit = g_array_new(false, true /* clear */, 1);
+}
+
+static void nvdimm_build_fit_buffer(NvdimmFitBuffer *fit_buf)
+{
+ qemu_mutex_lock(&fit_buf->lock);
+ g_array_free(fit_buf->fit, true);
+ fit_buf->fit = nvdimm_build_device_structure();
+ fit_buf->dirty = true;
+ qemu_mutex_unlock(&fit_buf->lock);
+}
+
+void nvdimm_acpi_hotplug(AcpiNVDIMMState *state)
+{
+ nvdimm_build_fit_buffer(&state->fit_buf);
+}
+
+static void nvdimm_build_nfit(AcpiNVDIMMState *state, GArray *table_offsets,
GArray *table_data, BIOSLinker *linker)
{
- GArray *structures = nvdimm_build_device_structure(device_list);
+ NvdimmFitBuffer *fit_buf = &state->fit_buf;
unsigned int header;
+ qemu_mutex_lock(&fit_buf->lock);
+
+ /* NVDIMM device is not plugged? */
+ if (!fit_buf->fit->len) {
+ goto exit;
+ }
+
acpi_add_table(table_offsets, table_data);
/* NFIT header. */
header = table_data->len;
acpi_data_push(table_data, sizeof(NvdimmNfitHeader));
/* NVDIMM device structures. */
- g_array_append_vals(table_data, structures->data, structures->len);
+ g_array_append_vals(table_data, fit_buf->fit->data, fit_buf->fit->len);
build_header(linker, table_data,
(void *)(table_data->data + header), "NFIT",
- sizeof(NvdimmNfitHeader) + structures->len, 1, NULL, NULL);
- g_array_free(structures, true);
+ sizeof(NvdimmNfitHeader) + fit_buf->fit->len, 1, NULL, NULL);
+
+exit:
+ qemu_mutex_unlock(&fit_buf->lock);
}
struct NvdimmDsmIn {
@@ -466,6 +496,22 @@ typedef struct NvdimmFuncSetLabelDataIn NvdimmFuncSetLabelDataIn;
QEMU_BUILD_BUG_ON(sizeof(NvdimmFuncSetLabelDataIn) +
offsetof(NvdimmDsmIn, arg3) > 4096);
+struct NvdimmFuncReadFITIn {
+ uint32_t offset; /* the offset of FIT buffer. */
+} QEMU_PACKED;
+typedef struct NvdimmFuncReadFITIn NvdimmFuncReadFITIn;
+QEMU_BUILD_BUG_ON(sizeof(NvdimmFuncReadFITIn) +
+ offsetof(NvdimmDsmIn, arg3) > 4096);
+
+struct NvdimmFuncReadFITOut {
+ /* the size of buffer filled by QEMU. */
+ uint32_t len;
+ uint32_t func_ret_status; /* return status code. */
+ uint8_t fit[0]; /* the FIT data. */
+} QEMU_PACKED;
+typedef struct NvdimmFuncReadFITOut NvdimmFuncReadFITOut;
+QEMU_BUILD_BUG_ON(sizeof(NvdimmFuncReadFITOut) > 4096);
+
static void
nvdimm_dsm_function0(uint32_t supported_func, hwaddr dsm_mem_addr)
{
@@ -486,6 +532,74 @@ nvdimm_dsm_no_payload(uint32_t func_ret_status, hwaddr dsm_mem_addr)
cpu_physical_memory_write(dsm_mem_addr, &out, sizeof(out));
}
+#define NVDIMM_QEMU_RSVD_HANDLE_ROOT 0x10000
+
+/* Read FIT data, defined in docs/specs/acpi_nvdimm.txt. */
+static void nvdimm_dsm_func_read_fit(AcpiNVDIMMState *state, NvdimmDsmIn *in,
+ hwaddr dsm_mem_addr)
+{
+ NvdimmFitBuffer *fit_buf = &state->fit_buf;
+ NvdimmFuncReadFITIn *read_fit;
+ NvdimmFuncReadFITOut *read_fit_out;
+ GArray *fit;
+ uint32_t read_len = 0, func_ret_status;
+ int size;
+
+ read_fit = (NvdimmFuncReadFITIn *)in->arg3;
+ le32_to_cpus(&read_fit->offset);
+
+ qemu_mutex_lock(&fit_buf->lock);
+ fit = fit_buf->fit;
+
+ nvdimm_debug("Read FIT: offset %#x FIT size %#x Dirty %s.\n",
+ read_fit->offset, fit->len, fit_buf->dirty ? "Yes" : "No");
+
+ if (read_fit->offset > fit->len) {
+ func_ret_status = 3 /* Invalid Input Parameters */;
+ goto exit;
+ }
+
+ /* It is the first time to read FIT. */
+ if (!read_fit->offset) {
+ fit_buf->dirty = false;
+ } else if (fit_buf->dirty) { /* FIT has been changed during RFIT. */
+ func_ret_status = 0x100 /* fit changed */;
+ goto exit;
+ }
+
+ func_ret_status = 0 /* Success */;
+ read_len = MIN(fit->len - read_fit->offset,
+ 4096 - sizeof(NvdimmFuncReadFITOut));
+
+exit:
+ size = sizeof(NvdimmFuncReadFITOut) + read_len;
+ read_fit_out = g_malloc(size);
+
+ read_fit_out->len = cpu_to_le32(size);
+ read_fit_out->func_ret_status = cpu_to_le32(func_ret_status);
+ memcpy(read_fit_out->fit, fit->data + read_fit->offset, read_len);
+
+ cpu_physical_memory_write(dsm_mem_addr, read_fit_out, size);
+
+ g_free(read_fit_out);
+ qemu_mutex_unlock(&fit_buf->lock);
+}
+
+static void nvdimm_dsm_reserved_root(AcpiNVDIMMState *state, NvdimmDsmIn *in,
+ hwaddr dsm_mem_addr)
+{
+ switch (in->function) {
+ case 0x0:
+ nvdimm_dsm_function0(0x1 | 1 << 1 /* Read FIT */, dsm_mem_addr);
+ return;
+ case 0x1 /*Read FIT */:
+ nvdimm_dsm_func_read_fit(state, in, dsm_mem_addr);
+ return;
+ }
+
+ nvdimm_dsm_no_payload(1 /* Not Supported */, dsm_mem_addr);
+}
+
static void nvdimm_dsm_root(NvdimmDsmIn *in, hwaddr dsm_mem_addr)
{
/*
@@ -643,8 +757,8 @@ static void nvdimm_dsm_set_label_data(NVDIMMDevice *nvdimm, NvdimmDsmIn *in,
return;
}
- assert(sizeof(*in) + sizeof(*set_label_data) + set_label_data->length <=
- 4096);
+ assert(offsetof(NvdimmDsmIn, arg3) +
+ sizeof(*set_label_data) + set_label_data->length <= 4096);
nvc->write_label_data(nvdimm, set_label_data->in_buf,
set_label_data->length, set_label_data->offset);
@@ -712,6 +826,7 @@ nvdimm_dsm_read(void *opaque, hwaddr addr, unsigned size)
static void
nvdimm_dsm_write(void *opaque, hwaddr addr, uint64_t val, unsigned size)
{
+ AcpiNVDIMMState *state = opaque;
NvdimmDsmIn *in;
hwaddr dsm_mem_addr = val;
@@ -739,6 +854,11 @@ nvdimm_dsm_write(void *opaque, hwaddr addr, uint64_t val, unsigned size)
goto exit;
}
+ if (in->handle == NVDIMM_QEMU_RSVD_HANDLE_ROOT) {
+ nvdimm_dsm_reserved_root(state, in, dsm_mem_addr);
+ goto exit;
+ }
+
/* Handle 0 is reserved for NVDIMM Root Device. */
if (!in->handle) {
nvdimm_dsm_root(in, dsm_mem_addr);
@@ -772,23 +892,105 @@ void nvdimm_init_acpi_state(AcpiNVDIMMState *state, MemoryRegion *io,
acpi_data_push(state->dsm_mem, sizeof(NvdimmDsmIn));
fw_cfg_add_file(fw_cfg, NVDIMM_DSM_MEM_FILE, state->dsm_mem->data,
state->dsm_mem->len);
+
+ nvdimm_init_fit_buffer(&state->fit_buf);
}
-#define NVDIMM_COMMON_DSM "NCAL"
-#define NVDIMM_ACPI_MEM_ADDR "MEMA"
+#define NVDIMM_COMMON_DSM "NCAL"
+#define NVDIMM_ACPI_MEM_ADDR "MEMA"
+
+#define NVDIMM_DSM_MEMORY "NRAM"
+#define NVDIMM_DSM_IOPORT "NPIO"
+
+#define NVDIMM_DSM_NOTIFY "NTFI"
+#define NVDIMM_DSM_HANDLE "HDLE"
+#define NVDIMM_DSM_REVISION "REVS"
+#define NVDIMM_DSM_FUNCTION "FUNC"
+#define NVDIMM_DSM_ARG3 "FARG"
+
+#define NVDIMM_DSM_OUT_BUF_SIZE "RLEN"
+#define NVDIMM_DSM_OUT_BUF "ODAT"
+
+#define NVDIMM_DSM_RFIT_STATUS "RSTA"
+
+#define NVDIMM_QEMU_RSVD_UUID "648B9CF2-CDA1-4312-8AD9-49C4AF32BD62"
static void nvdimm_build_common_dsm(Aml *dev)
{
- Aml *method, *ifctx, *function, *handle, *uuid, *dsm_mem, *result_size;
+ Aml *method, *ifctx, *function, *handle, *uuid, *dsm_mem, *elsectx2;
Aml *elsectx, *unsupport, *unpatched, *expected_uuid, *uuid_invalid;
- Aml *pckg, *pckg_index, *pckg_buf;
+ Aml *pckg, *pckg_index, *pckg_buf, *field, *dsm_out_buf, *dsm_out_buf_size;
uint8_t byte_list[1];
method = aml_method(NVDIMM_COMMON_DSM, 5, AML_SERIALIZED);
uuid = aml_arg(0);
function = aml_arg(2);
handle = aml_arg(4);
- dsm_mem = aml_name(NVDIMM_ACPI_MEM_ADDR);
+ dsm_mem = aml_local(6);
+ dsm_out_buf = aml_local(7);
+
+ aml_append(method, aml_store(aml_name(NVDIMM_ACPI_MEM_ADDR), dsm_mem));
+
+ /* map DSM memory and IO into ACPI namespace. */
+ aml_append(method, aml_operation_region(NVDIMM_DSM_IOPORT, AML_SYSTEM_IO,
+ aml_int(NVDIMM_ACPI_IO_BASE), NVDIMM_ACPI_IO_LEN));
+ aml_append(method, aml_operation_region(NVDIMM_DSM_MEMORY,
+ AML_SYSTEM_MEMORY, dsm_mem, sizeof(NvdimmDsmIn)));
+
+ /*
+ * DSM notifier:
+ * NVDIMM_DSM_NOTIFY: write the address of DSM memory and notify QEMU to
+ * emulate the access.
+ *
+ * It is the IO port so that accessing them will cause VM-exit, the
+ * control will be transferred to QEMU.
+ */
+ field = aml_field(NVDIMM_DSM_IOPORT, AML_DWORD_ACC, AML_NOLOCK,
+ AML_PRESERVE);
+ aml_append(field, aml_named_field(NVDIMM_DSM_NOTIFY,
+ sizeof(uint32_t) * BITS_PER_BYTE));
+ aml_append(method, field);
+
+ /*
+ * DSM input:
+ * NVDIMM_DSM_HANDLE: store device's handle, it's zero if the _DSM call
+ * happens on NVDIMM Root Device.
+ * NVDIMM_DSM_REVISION: store the Arg1 of _DSM call.
+ * NVDIMM_DSM_FUNCTION: store the Arg2 of _DSM call.
+ * NVDIMM_DSM_ARG3: store the Arg3 of _DSM call which is a Package
+ * containing function-specific arguments.
+ *
+ * They are RAM mapping on host so that these accesses never cause
+ * VM-EXIT.
+ */
+ field = aml_field(NVDIMM_DSM_MEMORY, AML_DWORD_ACC, AML_NOLOCK,
+ AML_PRESERVE);
+ aml_append(field, aml_named_field(NVDIMM_DSM_HANDLE,
+ sizeof(typeof_field(NvdimmDsmIn, handle)) * BITS_PER_BYTE));
+ aml_append(field, aml_named_field(NVDIMM_DSM_REVISION,
+ sizeof(typeof_field(NvdimmDsmIn, revision)) * BITS_PER_BYTE));
+ aml_append(field, aml_named_field(NVDIMM_DSM_FUNCTION,
+ sizeof(typeof_field(NvdimmDsmIn, function)) * BITS_PER_BYTE));
+ aml_append(field, aml_named_field(NVDIMM_DSM_ARG3,
+ (sizeof(NvdimmDsmIn) - offsetof(NvdimmDsmIn, arg3)) * BITS_PER_BYTE));
+ aml_append(method, field);
+
+ /*
+ * DSM output:
+ * NVDIMM_DSM_OUT_BUF_SIZE: the size of the buffer filled by QEMU.
+ * NVDIMM_DSM_OUT_BUF: the buffer QEMU uses to store the result.
+ *
+ * Since the page is reused by both input and out, the input data
+ * will be lost after storing new result into ODAT so we should fetch
+ * all the input data before writing the result.
+ */
+ field = aml_field(NVDIMM_DSM_MEMORY, AML_DWORD_ACC, AML_NOLOCK,
+ AML_PRESERVE);
+ aml_append(field, aml_named_field(NVDIMM_DSM_OUT_BUF_SIZE,
+ sizeof(typeof_field(NvdimmDsmOut, len)) * BITS_PER_BYTE));
+ aml_append(field, aml_named_field(NVDIMM_DSM_OUT_BUF,
+ (sizeof(NvdimmDsmOut) - offsetof(NvdimmDsmOut, data)) * BITS_PER_BYTE));
+ aml_append(method, field);
/*
* do not support any method if DSM memory address has not been
@@ -804,9 +1006,15 @@ static void nvdimm_build_common_dsm(Aml *dev)
/* UUID for NVDIMM Root Device */, expected_uuid));
aml_append(method, ifctx);
elsectx = aml_else();
- aml_append(elsectx, aml_store(
+ ifctx = aml_if(aml_equal(handle, aml_int(NVDIMM_QEMU_RSVD_HANDLE_ROOT)));
+ aml_append(ifctx, aml_store(aml_touuid(NVDIMM_QEMU_RSVD_UUID
+ /* UUID for QEMU internal use */), expected_uuid));
+ aml_append(elsectx, ifctx);
+ elsectx2 = aml_else();
+ aml_append(elsectx2, aml_store(
aml_touuid("4309AC30-0D11-11E4-9191-0800200C9A66")
/* UUID for NVDIMM Devices */, expected_uuid));
+ aml_append(elsectx, elsectx2);
aml_append(method, elsectx);
uuid_invalid = aml_lnot(aml_equal(uuid, expected_uuid));
@@ -832,9 +1040,9 @@ static void nvdimm_build_common_dsm(Aml *dev)
* it reserves 0 for root device and is the handle for NVDIMM devices.
* See the comments in nvdimm_slot_to_handle().
*/
- aml_append(method, aml_store(handle, aml_name("HDLE")));
- aml_append(method, aml_store(aml_arg(1), aml_name("REVS")));
- aml_append(method, aml_store(aml_arg(2), aml_name("FUNC")));
+ aml_append(method, aml_store(handle, aml_name(NVDIMM_DSM_HANDLE)));
+ aml_append(method, aml_store(aml_arg(1), aml_name(NVDIMM_DSM_REVISION)));
+ aml_append(method, aml_store(aml_arg(2), aml_name(NVDIMM_DSM_FUNCTION)));
/*
* The fourth parameter (Arg3) of _DSM is a package which contains
@@ -852,24 +1060,26 @@ static void nvdimm_build_common_dsm(Aml *dev)
pckg_buf = aml_local(3);
aml_append(ifctx, aml_store(aml_index(pckg, aml_int(0)), pckg_index));
aml_append(ifctx, aml_store(aml_derefof(pckg_index), pckg_buf));
- aml_append(ifctx, aml_store(pckg_buf, aml_name("ARG3")));
+ aml_append(ifctx, aml_store(pckg_buf, aml_name(NVDIMM_DSM_ARG3)));
aml_append(method, ifctx);
/*
* tell QEMU about the real address of DSM memory, then QEMU
* gets the control and fills the result in DSM memory.
*/
- aml_append(method, aml_store(dsm_mem, aml_name("NTFI")));
-
- result_size = aml_local(1);
- aml_append(method, aml_store(aml_name("RLEN"), result_size));
- aml_append(method, aml_store(aml_shiftleft(result_size, aml_int(3)),
- result_size));
- aml_append(method, aml_create_field(aml_name("ODAT"), aml_int(0),
- result_size, "OBUF"));
+ aml_append(method, aml_store(dsm_mem, aml_name(NVDIMM_DSM_NOTIFY)));
+
+ dsm_out_buf_size = aml_local(1);
+ /* RLEN is not included in the payload returned to guest. */
+ aml_append(method, aml_subtract(aml_name(NVDIMM_DSM_OUT_BUF_SIZE),
+ aml_int(4), dsm_out_buf_size));
+ aml_append(method, aml_store(aml_shiftleft(dsm_out_buf_size, aml_int(3)),
+ dsm_out_buf_size));
+ aml_append(method, aml_create_field(aml_name(NVDIMM_DSM_OUT_BUF),
+ aml_int(0), dsm_out_buf_size, "OBUF"));
aml_append(method, aml_concatenate(aml_buffer(0, NULL), aml_name("OBUF"),
- aml_arg(6)));
- aml_append(method, aml_return(aml_arg(6)));
+ dsm_out_buf));
+ aml_append(method, aml_return(dsm_out_buf));
aml_append(dev, method);
}
@@ -884,12 +1094,110 @@ static void nvdimm_build_device_dsm(Aml *dev, uint32_t handle)
aml_append(dev, method);
}
-static void nvdimm_build_nvdimm_devices(GSList *device_list, Aml *root_dev)
+static void nvdimm_build_fit(Aml *dev)
{
- for (; device_list; device_list = device_list->next) {
- DeviceState *dev = device_list->data;
- int slot = object_property_get_int(OBJECT(dev), PC_DIMM_SLOT_PROP,
- NULL);
+ Aml *method, *pkg, *buf, *buf_size, *offset, *call_result;
+ Aml *whilectx, *ifcond, *ifctx, *elsectx, *fit;
+
+ buf = aml_local(0);
+ buf_size = aml_local(1);
+ fit = aml_local(2);
+
+ aml_append(dev, aml_create_dword_field(aml_buffer(4, NULL),
+ aml_int(0), NVDIMM_DSM_RFIT_STATUS));
+
+ /* build helper function, RFIT. */
+ method = aml_method("RFIT", 1, AML_SERIALIZED);
+ aml_append(method, aml_create_dword_field(aml_buffer(4, NULL),
+ aml_int(0), "OFST"));
+
+ /* prepare input package. */
+ pkg = aml_package(1);
+ aml_append(method, aml_store(aml_arg(0), aml_name("OFST")));
+ aml_append(pkg, aml_name("OFST"));
+
+ /* call Read_FIT function. */
+ call_result = aml_call5(NVDIMM_COMMON_DSM,
+ aml_touuid(NVDIMM_QEMU_RSVD_UUID),
+ aml_int(1) /* Revision 1 */,
+ aml_int(0x1) /* Read FIT */,
+ pkg, aml_int(NVDIMM_QEMU_RSVD_HANDLE_ROOT));
+ aml_append(method, aml_store(call_result, buf));
+
+ /* handle _DSM result. */
+ aml_append(method, aml_create_dword_field(buf,
+ aml_int(0) /* offset at byte 0 */, "STAU"));
+
+ aml_append(method, aml_store(aml_name("STAU"),
+ aml_name(NVDIMM_DSM_RFIT_STATUS)));
+
+ /* if something is wrong during _DSM. */
+ ifcond = aml_equal(aml_int(0 /* Success */), aml_name("STAU"));
+ ifctx = aml_if(aml_lnot(ifcond));
+ aml_append(ifctx, aml_return(aml_buffer(0, NULL)));
+ aml_append(method, ifctx);
+
+ aml_append(method, aml_store(aml_sizeof(buf), buf_size));
+ aml_append(method, aml_subtract(buf_size,
+ aml_int(4) /* the size of "STAU" */,
+ buf_size));
+
+ /* if we read the end of fit. */
+ ifctx = aml_if(aml_equal(buf_size, aml_int(0)));
+ aml_append(ifctx, aml_return(aml_buffer(0, NULL)));
+ aml_append(method, ifctx);
+
+ aml_append(method, aml_store(aml_shiftleft(buf_size, aml_int(3)),
+ buf_size));
+ aml_append(method, aml_create_field(buf,
+ aml_int(4 * BITS_PER_BYTE), /* offset at byte 4.*/
+ buf_size, "BUFF"));
+ aml_append(method, aml_return(aml_name("BUFF")));
+ aml_append(dev, method);
+
+ /* build _FIT. */
+ method = aml_method("_FIT", 0, AML_SERIALIZED);
+ offset = aml_local(3);
+
+ aml_append(method, aml_store(aml_buffer(0, NULL), fit));
+ aml_append(method, aml_store(aml_int(0), offset));
+
+ whilectx = aml_while(aml_int(1));
+ aml_append(whilectx, aml_store(aml_call1("RFIT", offset), buf));
+ aml_append(whilectx, aml_store(aml_sizeof(buf), buf_size));
+
+ /*
+ * if fit buffer was changed during RFIT, read from the beginning
+ * again.
+ */
+ ifctx = aml_if(aml_equal(aml_name(NVDIMM_DSM_RFIT_STATUS),
+ aml_int(0x100 /* fit changed */)));
+ aml_append(ifctx, aml_store(aml_buffer(0, NULL), fit));
+ aml_append(ifctx, aml_store(aml_int(0), offset));
+ aml_append(whilectx, ifctx);
+
+ elsectx = aml_else();
+
+ /* finish fit read if no data is read out. */
+ ifctx = aml_if(aml_equal(buf_size, aml_int(0)));
+ aml_append(ifctx, aml_return(fit));
+ aml_append(elsectx, ifctx);
+
+ /* update the offset. */
+ aml_append(elsectx, aml_add(offset, buf_size, offset));
+ /* append the data we read out to the fit buffer. */
+ aml_append(elsectx, aml_concatenate(fit, buf, fit));
+ aml_append(whilectx, elsectx);
+ aml_append(method, whilectx);
+
+ aml_append(dev, method);
+}
+
+static void nvdimm_build_nvdimm_devices(Aml *root_dev, uint32_t ram_slots)
+{
+ uint32_t slot;
+
+ for (slot = 0; slot < ram_slots; slot++) {
uint32_t handle = nvdimm_slot_to_handle(slot);
Aml *nvdimm_dev;
@@ -910,11 +1218,11 @@ static void nvdimm_build_nvdimm_devices(GSList *device_list, Aml *root_dev)
}
}
-static void nvdimm_build_ssdt(GSList *device_list, GArray *table_offsets,
- GArray *table_data, BIOSLinker *linker,
- GArray *dsm_dma_arrea)
+static void nvdimm_build_ssdt(GArray *table_offsets, GArray *table_data,
+ BIOSLinker *linker, GArray *dsm_dma_arrea,
+ uint32_t ram_slots)
{
- Aml *ssdt, *sb_scope, *dev, *field;
+ Aml *ssdt, *sb_scope, *dev;
int mem_addr_offset, nvdimm_ssdt;
acpi_add_table(table_offsets, table_data);
@@ -939,69 +1247,13 @@ static void nvdimm_build_ssdt(GSList *device_list, GArray *table_offsets,
*/
aml_append(dev, aml_name_decl("_HID", aml_string("ACPI0012")));
- /* map DSM memory and IO into ACPI namespace. */
- aml_append(dev, aml_operation_region("NPIO", AML_SYSTEM_IO,
- aml_int(NVDIMM_ACPI_IO_BASE), NVDIMM_ACPI_IO_LEN));
- aml_append(dev, aml_operation_region("NRAM", AML_SYSTEM_MEMORY,
- aml_name(NVDIMM_ACPI_MEM_ADDR), sizeof(NvdimmDsmIn)));
-
- /*
- * DSM notifier:
- * NTFI: write the address of DSM memory and notify QEMU to emulate
- * the access.
- *
- * It is the IO port so that accessing them will cause VM-exit, the
- * control will be transferred to QEMU.
- */
- field = aml_field("NPIO", AML_DWORD_ACC, AML_NOLOCK, AML_PRESERVE);
- aml_append(field, aml_named_field("NTFI",
- sizeof(uint32_t) * BITS_PER_BYTE));
- aml_append(dev, field);
-
- /*
- * DSM input:
- * HDLE: store device's handle, it's zero if the _DSM call happens
- * on NVDIMM Root Device.
- * REVS: store the Arg1 of _DSM call.
- * FUNC: store the Arg2 of _DSM call.
- * ARG3: store the Arg3 of _DSM call.
- *
- * They are RAM mapping on host so that these accesses never cause
- * VM-EXIT.
- */
- field = aml_field("NRAM", AML_DWORD_ACC, AML_NOLOCK, AML_PRESERVE);
- aml_append(field, aml_named_field("HDLE",
- sizeof(typeof_field(NvdimmDsmIn, handle)) * BITS_PER_BYTE));
- aml_append(field, aml_named_field("REVS",
- sizeof(typeof_field(NvdimmDsmIn, revision)) * BITS_PER_BYTE));
- aml_append(field, aml_named_field("FUNC",
- sizeof(typeof_field(NvdimmDsmIn, function)) * BITS_PER_BYTE));
- aml_append(field, aml_named_field("ARG3",
- (sizeof(NvdimmDsmIn) - offsetof(NvdimmDsmIn, arg3)) * BITS_PER_BYTE));
- aml_append(dev, field);
-
- /*
- * DSM output:
- * RLEN: the size of the buffer filled by QEMU.
- * ODAT: the buffer QEMU uses to store the result.
- *
- * Since the page is reused by both input and out, the input data
- * will be lost after storing new result into ODAT so we should fetch
- * all the input data before writing the result.
- */
- field = aml_field("NRAM", AML_DWORD_ACC, AML_NOLOCK, AML_PRESERVE);
- aml_append(field, aml_named_field("RLEN",
- sizeof(typeof_field(NvdimmDsmOut, len)) * BITS_PER_BYTE));
- aml_append(field, aml_named_field("ODAT",
- (sizeof(NvdimmDsmOut) - offsetof(NvdimmDsmOut, data)) * BITS_PER_BYTE));
- aml_append(dev, field);
-
nvdimm_build_common_dsm(dev);
/* 0 is reserved for root device. */
nvdimm_build_device_dsm(dev, 0);
+ nvdimm_build_fit(dev);
- nvdimm_build_nvdimm_devices(device_list, dev);
+ nvdimm_build_nvdimm_devices(dev, ram_slots);
aml_append(sb_scope, dev);
aml_append(ssdt, sb_scope);
@@ -1026,17 +1278,17 @@ static void nvdimm_build_ssdt(GSList *device_list, GArray *table_offsets,
}
void nvdimm_build_acpi(GArray *table_offsets, GArray *table_data,
- BIOSLinker *linker, GArray *dsm_dma_arrea)
+ BIOSLinker *linker, AcpiNVDIMMState *state,
+ uint32_t ram_slots)
{
- GSList *device_list;
+ nvdimm_build_nfit(state, table_offsets, table_data, linker);
- /* no NVDIMM device is plugged. */
- device_list = nvdimm_get_plugged_device_list();
- if (!device_list) {
- return;
+ /*
+ * NVDIMM device is allowed to be plugged only if there is available
+ * slot.
+ */
+ if (ram_slots) {
+ nvdimm_build_ssdt(table_offsets, table_data, linker, state->dsm_mem,
+ ram_slots);
}
- nvdimm_build_nfit(device_list, table_offsets, table_data, linker);
- nvdimm_build_ssdt(device_list, table_offsets, table_data, linker,
- dsm_dma_arrea);
- g_slist_free(device_list);
}
diff --git a/hw/block/dataplane/virtio-blk.c b/hw/block/dataplane/virtio-blk.c
index 704a763603..90ef557c8c 100644
--- a/hw/block/dataplane/virtio-blk.c
+++ b/hw/block/dataplane/virtio-blk.c
@@ -88,23 +88,28 @@ void virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *conf,
*dataplane = NULL;
- if (!conf->iothread) {
- return;
- }
+ if (conf->iothread) {
+ if (!k->set_guest_notifiers || !k->ioeventfd_assign) {
+ error_setg(errp,
+ "device is incompatible with iothread "
+ "(transport does not support notifiers)");
+ return;
+ }
+ if (!virtio_device_ioeventfd_enabled(vdev)) {
+ error_setg(errp, "ioeventfd is required for iothread");
+ return;
+ }
- /* Don't try if transport does not support notifiers. */
- if (!k->set_guest_notifiers || !k->ioeventfd_started) {
- error_setg(errp,
- "device is incompatible with dataplane "
- "(transport does not support notifiers)");
- return;
+ /* If dataplane is (re-)enabled while the guest is running there could
+ * be block jobs that can conflict.
+ */
+ if (blk_op_is_blocked(conf->conf.blk, BLOCK_OP_TYPE_DATAPLANE, errp)) {
+ error_prepend(errp, "cannot start virtio-blk dataplane: ");
+ return;
+ }
}
-
- /* If dataplane is (re-)enabled while the guest is running there could be
- * block jobs that can conflict.
- */
- if (blk_op_is_blocked(conf->conf.blk, BLOCK_OP_TYPE_DATAPLANE, errp)) {
- error_prepend(errp, "cannot start dataplane thread: ");
+ /* Don't try if transport does not support notifiers. */
+ if (!virtio_device_ioeventfd_enabled(vdev)) {
return;
}
@@ -112,9 +117,13 @@ void virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *conf,
s->vdev = vdev;
s->conf = conf;
- s->iothread = conf->iothread;
- object_ref(OBJECT(s->iothread));
- s->ctx = iothread_get_aio_context(s->iothread);
+ if (conf->iothread) {
+ s->iothread = conf->iothread;
+ object_ref(OBJECT(s->iothread));
+ s->ctx = iothread_get_aio_context(s->iothread);
+ } else {
+ s->ctx = qemu_get_aio_context();
+ }
s->bh = aio_bh_new(s->ctx, notify_guest_bh, s);
s->batch_notify_vqs = bitmap_new(conf->num_queues);
@@ -124,14 +133,19 @@ void virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *conf,
/* Context: QEMU global mutex held */
void virtio_blk_data_plane_destroy(VirtIOBlockDataPlane *s)
{
+ VirtIOBlock *vblk;
+
if (!s) {
return;
}
- virtio_blk_data_plane_stop(s);
+ vblk = VIRTIO_BLK(s->vdev);
+ assert(!vblk->dataplane_started);
g_free(s->batch_notify_vqs);
qemu_bh_delete(s->bh);
- object_unref(OBJECT(s->iothread));
+ if (s->iothread) {
+ object_unref(OBJECT(s->iothread));
+ }
g_free(s);
}
@@ -147,17 +161,18 @@ static void virtio_blk_data_plane_handle_output(VirtIODevice *vdev,
}
/* Context: QEMU global mutex held */
-void virtio_blk_data_plane_start(VirtIOBlockDataPlane *s)
+int virtio_blk_data_plane_start(VirtIODevice *vdev)
{
- BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(s->vdev)));
+ VirtIOBlock *vblk = VIRTIO_BLK(vdev);
+ VirtIOBlockDataPlane *s = vblk->dataplane;
+ BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vblk)));
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
- VirtIOBlock *vblk = VIRTIO_BLK(s->vdev);
unsigned i;
unsigned nvqs = s->conf->num_queues;
int r;
if (vblk->dataplane_started || s->starting) {
- return;
+ return 0;
}
s->starting = true;
@@ -204,20 +219,22 @@ void virtio_blk_data_plane_start(VirtIOBlockDataPlane *s)
virtio_blk_data_plane_handle_output);
}
aio_context_release(s->ctx);
- return;
+ return 0;
fail_guest_notifiers:
vblk->dataplane_disabled = true;
s->starting = false;
vblk->dataplane_started = true;
+ return -ENOSYS;
}
/* Context: QEMU global mutex held */
-void virtio_blk_data_plane_stop(VirtIOBlockDataPlane *s)
+void virtio_blk_data_plane_stop(VirtIODevice *vdev)
{
- BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(s->vdev)));
+ VirtIOBlock *vblk = VIRTIO_BLK(vdev);
+ VirtIOBlockDataPlane *s = vblk->dataplane;
+ BusState *qbus = qdev_get_parent_bus(DEVICE(vblk));
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
- VirtIOBlock *vblk = VIRTIO_BLK(s->vdev);
unsigned i;
unsigned nvqs = s->conf->num_queues;
diff --git a/hw/block/dataplane/virtio-blk.h b/hw/block/dataplane/virtio-blk.h
index b1f0b95b32..db3f47b173 100644
--- a/hw/block/dataplane/virtio-blk.h
+++ b/hw/block/dataplane/virtio-blk.h
@@ -23,9 +23,9 @@ void virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *conf,
VirtIOBlockDataPlane **dataplane,
Error **errp);
void virtio_blk_data_plane_destroy(VirtIOBlockDataPlane *s);
-void virtio_blk_data_plane_start(VirtIOBlockDataPlane *s);
-void virtio_blk_data_plane_stop(VirtIOBlockDataPlane *s);
-void virtio_blk_data_plane_drain(VirtIOBlockDataPlane *s);
void virtio_blk_data_plane_notify(VirtIOBlockDataPlane *s, VirtQueue *vq);
+int virtio_blk_data_plane_start(VirtIODevice *vdev);
+void virtio_blk_data_plane_stop(VirtIODevice *vdev);
+
#endif /* HW_DATAPLANE_VIRTIO_BLK_H */
diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c
index 37fe72bdcd..0c5fd27593 100644
--- a/hw/block/virtio-blk.c
+++ b/hw/block/virtio-blk.c
@@ -611,7 +611,7 @@ static void virtio_blk_handle_output(VirtIODevice *vdev, VirtQueue *vq)
/* Some guests kick before setting VIRTIO_CONFIG_S_DRIVER_OK so start
* dataplane here instead of waiting for .set_status().
*/
- virtio_blk_data_plane_start(s->dataplane);
+ virtio_device_start_ioeventfd(vdev);
if (!s->dataplane_disabled) {
return;
}
@@ -687,11 +687,9 @@ static void virtio_blk_reset(VirtIODevice *vdev)
virtio_blk_free_request(req);
}
- if (s->dataplane) {
- virtio_blk_data_plane_stop(s->dataplane);
- }
aio_context_release(ctx);
+ assert(!s->dataplane_started);
blk_set_enable_write_cache(s->blk, s->original_wce);
}
@@ -789,9 +787,8 @@ static void virtio_blk_set_status(VirtIODevice *vdev, uint8_t status)
{
VirtIOBlock *s = VIRTIO_BLK(vdev);
- if (s->dataplane && !(status & (VIRTIO_CONFIG_S_DRIVER |
- VIRTIO_CONFIG_S_DRIVER_OK))) {
- virtio_blk_data_plane_stop(s->dataplane);
+ if (!(status & (VIRTIO_CONFIG_S_DRIVER | VIRTIO_CONFIG_S_DRIVER_OK))) {
+ assert(!s->dataplane_started);
}
if (!(status & VIRTIO_CONFIG_S_DRIVER_OK)) {
@@ -919,7 +916,7 @@ static void virtio_blk_device_realize(DeviceState *dev, Error **errp)
s->sector_mask = (s->conf.conf.logical_block_size / BDRV_SECTOR_SIZE) - 1;
for (i = 0; i < conf->num_queues; i++) {
- virtio_add_queue_aio(vdev, 128, virtio_blk_handle_output);
+ virtio_add_queue(vdev, 128, virtio_blk_handle_output);
}
virtio_blk_data_plane_create(vdev, conf, &s->dataplane, &err);
if (err != NULL) {
@@ -1002,6 +999,8 @@ static void virtio_blk_class_init(ObjectClass *klass, void *data)
vdc->reset = virtio_blk_reset;
vdc->save = virtio_blk_save_device;
vdc->load = virtio_blk_load_device;
+ vdc->start_ioeventfd = virtio_blk_data_plane_start;
+ vdc->stop_ioeventfd = virtio_blk_data_plane_stop;
}
static const TypeInfo virtio_blk_info = {
diff --git a/hw/core/hotplug.c b/hw/core/hotplug.c
index 17ac986685..ab34c19461 100644
--- a/hw/core/hotplug.c
+++ b/hw/core/hotplug.c
@@ -35,6 +35,17 @@ void hotplug_handler_plug(HotplugHandler *plug_handler,
}
}
+void hotplug_handler_post_plug(HotplugHandler *plug_handler,
+ DeviceState *plugged_dev,
+ Error **errp)
+{
+ HotplugHandlerClass *hdc = HOTPLUG_HANDLER_GET_CLASS(plug_handler);
+
+ if (hdc->post_plug) {
+ hdc->post_plug(plug_handler, plugged_dev, errp);
+ }
+}
+
void hotplug_handler_unplug_request(HotplugHandler *plug_handler,
DeviceState *plugged_dev,
Error **errp)
diff --git a/hw/core/qdev.c b/hw/core/qdev.c
index 57834423b9..d835e6259a 100644
--- a/hw/core/qdev.c
+++ b/hw/core/qdev.c
@@ -945,10 +945,21 @@ static void device_set_realized(Object *obj, bool value, Error **errp)
goto child_realize_fail;
}
}
+
if (dev->hotplugged) {
device_reset(dev);
}
dev->pending_deleted_event = false;
+ dev->realized = value;
+
+ if (hotplug_ctrl) {
+ hotplug_handler_post_plug(hotplug_ctrl, dev, &local_err);
+ }
+
+ if (local_err != NULL) {
+ dev->realized = value;
+ goto post_realize_fail;
+ }
} else if (!value && dev->realized) {
Error **local_errp = NULL;
QLIST_FOREACH(bus, &dev->child_bus, sibling) {
@@ -965,13 +976,14 @@ static void device_set_realized(Object *obj, bool value, Error **errp)
}
dev->pending_deleted_event = true;
DEVICE_LISTENER_CALL(unrealize, Reverse, dev);
- }
- if (local_err != NULL) {
- goto fail;
+ if (local_err != NULL) {
+ goto fail;
+ }
+
+ dev->realized = value;
}
- dev->realized = value;
return;
child_realize_fail:
diff --git a/hw/i386/acpi-build.c b/hw/i386/acpi-build.c
index 5cd1da9a87..ce9cc93af4 100644
--- a/hw/i386/acpi-build.c
+++ b/hw/i386/acpi-build.c
@@ -2068,6 +2068,13 @@ build_dsdt(GArray *table_data, BIOSLinker *linker,
method = aml_method("_E03", 0, AML_NOTSERIALIZED);
aml_append(method, aml_call0(MEMORY_HOTPLUG_HANDLER_PATH));
aml_append(scope, method);
+
+ if (pcms->acpi_nvdimm_state.is_enabled) {
+ method = aml_method("_E04", 0, AML_NOTSERIALIZED);
+ aml_append(method, aml_notify(aml_name("\\_SB.NVDR"),
+ aml_int(0x80)));
+ aml_append(scope, method);
+ }
}
aml_append(dsdt, scope);
@@ -2810,7 +2817,7 @@ void acpi_build(AcpiBuildTables *tables, MachineState *machine)
}
if (pcms->acpi_nvdimm_state.is_enabled) {
nvdimm_build_acpi(table_offsets, tables_blob, tables->linker,
- pcms->acpi_nvdimm_state.dsm_mem);
+ &pcms->acpi_nvdimm_state, machine->ram_slots);
}
/* Add tables supplied by user (if any) */
diff --git a/hw/i386/pc.c b/hw/i386/pc.c
index f56ea0f87b..c011552ac4 100644
--- a/hw/i386/pc.c
+++ b/hw/i386/pc.c
@@ -1721,6 +1721,16 @@ out:
error_propagate(errp, local_err);
}
+static void pc_dimm_post_plug(HotplugHandler *hotplug_dev,
+ DeviceState *dev, Error **errp)
+{
+ PCMachineState *pcms = PC_MACHINE(hotplug_dev);
+
+ if (object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM)) {
+ nvdimm_acpi_hotplug(&pcms->acpi_nvdimm_state);
+ }
+}
+
static void pc_dimm_unplug_request(HotplugHandler *hotplug_dev,
DeviceState *dev, Error **errp)
{
@@ -1734,6 +1744,12 @@ static void pc_dimm_unplug_request(HotplugHandler *hotplug_dev,
goto out;
}
+ if (object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM)) {
+ error_setg(&local_err,
+ "nvdimm device hot unplug is not supported yet.");
+ goto out;
+ }
+
hhc = HOTPLUG_HANDLER_GET_CLASS(pcms->acpi_dev);
hhc->unplug_request(HOTPLUG_HANDLER(pcms->acpi_dev), dev, &local_err);
@@ -1751,6 +1767,12 @@ static void pc_dimm_unplug(HotplugHandler *hotplug_dev,
HotplugHandlerClass *hhc;
Error *local_err = NULL;
+ if (object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM)) {
+ error_setg(&local_err,
+ "nvdimm device hot unplug is not supported yet.");
+ goto out;
+ }
+
hhc = HOTPLUG_HANDLER_GET_CLASS(pcms->acpi_dev);
hhc->unplug(HOTPLUG_HANDLER(pcms->acpi_dev), dev, &local_err);
@@ -1986,6 +2008,14 @@ static void pc_machine_device_plug_cb(HotplugHandler *hotplug_dev,
}
}
+static void pc_machine_device_post_plug_cb(HotplugHandler *hotplug_dev,
+ DeviceState *dev, Error **errp)
+{
+ if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) {
+ pc_dimm_post_plug(hotplug_dev, dev, errp);
+ }
+}
+
static void pc_machine_device_unplug_request_cb(HotplugHandler *hotplug_dev,
DeviceState *dev, Error **errp)
{
@@ -2290,6 +2320,7 @@ static void pc_machine_class_init(ObjectClass *oc, void *data)
mc->reset = pc_machine_reset;
hc->pre_plug = pc_machine_device_pre_plug_cb;
hc->plug = pc_machine_device_plug_cb;
+ hc->post_plug = pc_machine_device_post_plug_cb;
hc->unplug_request = pc_machine_device_unplug_request_cb;
hc->unplug = pc_machine_device_unplug_cb;
nc->nmi_monitor_handler = x86_nmi;
diff --git a/hw/ipmi/Makefile.objs b/hw/ipmi/Makefile.objs
index a90318d5ba..1b422bbee0 100644
--- a/hw/ipmi/Makefile.objs
+++ b/hw/ipmi/Makefile.objs
@@ -1,5 +1,5 @@
common-obj-$(CONFIG_IPMI) += ipmi.o
common-obj-$(CONFIG_IPMI_LOCAL) += ipmi_bmc_sim.o
-common-obj-$(CONFIG_IPMI_LOCAL) += ipmi_bmc_extern.o
+common-obj-$(CONFIG_IPMI_EXTERN) += ipmi_bmc_extern.o
common-obj-$(CONFIG_ISA_IPMI_KCS) += isa_ipmi_kcs.o
common-obj-$(CONFIG_ISA_IPMI_BT) += isa_ipmi_bt.o
diff --git a/hw/ipmi/ipmi.c b/hw/ipmi/ipmi.c
index f09f217e78..5cf1caa88a 100644
--- a/hw/ipmi/ipmi.c
+++ b/hw/ipmi/ipmi.c
@@ -51,7 +51,7 @@ static int ipmi_do_hw_op(IPMIInterface *s, enum ipmi_op op, int checkonly)
if (checkonly) {
return 0;
}
- qemu_system_powerdown_request();
+ qemu_system_shutdown_request();
return 0;
case IPMI_SEND_NMI:
@@ -61,9 +61,15 @@ static int ipmi_do_hw_op(IPMIInterface *s, enum ipmi_op op, int checkonly)
qmp_inject_nmi(NULL);
return 0;
+ case IPMI_SHUTDOWN_VIA_ACPI_OVERTEMP:
+ if (checkonly) {
+ return 0;
+ }
+ qemu_system_powerdown_request();
+ return 0;
+
case IPMI_POWERCYCLE_CHASSIS:
case IPMI_PULSE_DIAG_IRQ:
- case IPMI_SHUTDOWN_VIA_ACPI_OVERTEMP:
case IPMI_POWERON_CHASSIS:
default:
return IPMI_CC_COMMAND_NOT_SUPPORTED;
diff --git a/hw/ipmi/ipmi_bmc_extern.c b/hw/ipmi/ipmi_bmc_extern.c
index 4b310e5eff..e8e3d250b6 100644
--- a/hw/ipmi/ipmi_bmc_extern.c
+++ b/hw/ipmi/ipmi_bmc_extern.c
@@ -54,7 +54,8 @@
#define VM_CAPABILITIES_IRQ 0x04
#define VM_CAPABILITIES_NMI 0x08
#define VM_CAPABILITIES_ATTN 0x10
-#define VM_CMD_FORCEOFF 0x09
+#define VM_CAPABILITIES_GRACEFUL_SHUTDOWN 0x20
+#define VM_CMD_GRACEFUL_SHUTDOWN 0x09
#define TYPE_IPMI_BMC_EXTERN "ipmi-bmc-extern"
#define IPMI_BMC_EXTERN(obj) OBJECT_CHECK(IPMIBmcExtern, (obj), \
@@ -276,8 +277,8 @@ static void handle_hw_op(IPMIBmcExtern *ibe, unsigned char hw_op)
k->do_hw_op(s, IPMI_SEND_NMI, 0);
break;
- case VM_CMD_FORCEOFF:
- qemu_system_shutdown_request();
+ case VM_CMD_GRACEFUL_SHUTDOWN:
+ k->do_hw_op(s, IPMI_SHUTDOWN_VIA_ACPI_OVERTEMP, 0);
break;
}
}
@@ -401,6 +402,10 @@ static void chr_event(void *opaque, int event)
if (k->do_hw_op(ibe->parent.intf, IPMI_POWEROFF_CHASSIS, 1) == 0) {
v |= VM_CAPABILITIES_POWER;
}
+ if (k->do_hw_op(ibe->parent.intf, IPMI_SHUTDOWN_VIA_ACPI_OVERTEMP, 1)
+ == 0) {
+ v |= VM_CAPABILITIES_GRACEFUL_SHUTDOWN;
+ }
if (k->do_hw_op(ibe->parent.intf, IPMI_RESET_CHASSIS, 1) == 0) {
v |= VM_CAPABILITIES_RESET;
}
@@ -512,6 +517,7 @@ static void ipmi_bmc_extern_class_init(ObjectClass *oc, void *data)
bk->handle_command = ipmi_bmc_extern_handle_command;
bk->handle_reset = ipmi_bmc_extern_handle_reset;
+ dc->hotpluggable = false;
dc->realize = ipmi_bmc_extern_realize;
dc->props = ipmi_bmc_extern_properties;
}
diff --git a/hw/ipmi/ipmi_bmc_sim.c b/hw/ipmi/ipmi_bmc_sim.c
index 17c7c0ea07..c7883d6f5e 100644
--- a/hw/ipmi/ipmi_bmc_sim.c
+++ b/hw/ipmi/ipmi_bmc_sim.c
@@ -217,7 +217,6 @@ struct IPMIBmcSim {
/* Odd netfns are for responses, so we only need the even ones. */
const IPMINetfn *netfns[MAX_NETFNS / 2];
- QemuMutex lock;
/* We allow one event in the buffer */
uint8_t evtbuf[16];
@@ -940,7 +939,6 @@ static void get_msg(IPMIBmcSim *ibs,
{
IPMIRcvBufEntry *msg;
- qemu_mutex_lock(&ibs->lock);
if (QTAILQ_EMPTY(&ibs->rcvbufs)) {
rsp_buffer_set_error(rsp, 0x80); /* Queue empty */
goto out;
@@ -960,7 +958,6 @@ static void get_msg(IPMIBmcSim *ibs,
}
out:
- qemu_mutex_unlock(&ibs->lock);
return;
}
@@ -1055,11 +1052,9 @@ static void send_msg(IPMIBmcSim *ibs,
end_msg:
msg->buf[msg->len] = ipmb_checksum(msg->buf, msg->len, 0);
msg->len++;
- qemu_mutex_lock(&ibs->lock);
QTAILQ_INSERT_TAIL(&ibs->rcvbufs, msg, entry);
ibs->msg_flags |= IPMI_BMC_MSG_FLAG_RCV_MSG_QUEUE;
k->set_atn(s, 1, attn_irq_enabled(ibs));
- qemu_mutex_unlock(&ibs->lock);
}
static void do_watchdog_reset(IPMIBmcSim *ibs)
@@ -1753,7 +1748,6 @@ static void ipmi_sim_realize(DeviceState *dev, Error **errp)
unsigned int i;
IPMIBmcSim *ibs = IPMI_BMC_SIMULATOR(b);
- qemu_mutex_init(&ibs->lock);
QTAILQ_INIT(&ibs->rcvbufs);
ibs->bmc_global_enables = (1 << IPMI_BMC_EVENT_LOG_BIT);
@@ -1791,6 +1785,7 @@ static void ipmi_sim_class_init(ObjectClass *oc, void *data)
DeviceClass *dc = DEVICE_CLASS(oc);
IPMIBmcClass *bk = IPMI_BMC_CLASS(oc);
+ dc->hotpluggable = false;
dc->realize = ipmi_sim_realize;
bk->handle_command = ipmi_sim_handle_command;
}
diff --git a/hw/mem/nvdimm.c b/hw/mem/nvdimm.c
index 7895805a23..db896b0bb6 100644
--- a/hw/mem/nvdimm.c
+++ b/hw/mem/nvdimm.c
@@ -148,13 +148,9 @@ static MemoryRegion *nvdimm_get_vmstate_memory_region(PCDIMMDevice *dimm)
static void nvdimm_class_init(ObjectClass *oc, void *data)
{
- DeviceClass *dc = DEVICE_CLASS(oc);
PCDIMMDeviceClass *ddc = PC_DIMM_CLASS(oc);
NVDIMMClass *nvc = NVDIMM_CLASS(oc);
- /* nvdimm hotplug has not been supported yet. */
- dc->hotpluggable = false;
-
ddc->realize = nvdimm_realize;
ddc->get_memory_region = nvdimm_get_memory_region;
ddc->get_vmstate_memory_region = nvdimm_get_vmstate_memory_region;
diff --git a/hw/s390x/virtio-ccw.c b/hw/s390x/virtio-ccw.c
index ee136ab940..7d7f8f6e19 100644
--- a/hw/s390x/virtio-ccw.c
+++ b/hw/s390x/virtio-ccw.c
@@ -59,38 +59,11 @@ static void virtio_ccw_stop_ioeventfd(VirtioCcwDevice *dev)
virtio_bus_stop_ioeventfd(&dev->bus);
}
-static bool virtio_ccw_ioeventfd_started(DeviceState *d)
+static bool virtio_ccw_ioeventfd_enabled(DeviceState *d)
{
VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
- return dev->ioeventfd_started;
-}
-
-static void virtio_ccw_ioeventfd_set_started(DeviceState *d, bool started,
- bool err)
-{
- VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
-
- dev->ioeventfd_started = started;
- if (err) {
- /* Disable ioeventfd for this device. */
- dev->flags &= ~VIRTIO_CCW_FLAG_USE_IOEVENTFD;
- }
-}
-
-static bool virtio_ccw_ioeventfd_disabled(DeviceState *d)
-{
- VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
-
- return dev->ioeventfd_disabled ||
- !(dev->flags & VIRTIO_CCW_FLAG_USE_IOEVENTFD);
-}
-
-static void virtio_ccw_ioeventfd_set_disabled(DeviceState *d, bool disabled)
-{
- VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
-
- dev->ioeventfd_disabled = disabled;
+ return (dev->flags & VIRTIO_CCW_FLAG_USE_IOEVENTFD) != 0;
}
static int virtio_ccw_ioeventfd_assign(DeviceState *d, EventNotifier *notifier,
@@ -709,6 +682,10 @@ static void virtio_ccw_device_realize(VirtioCcwDevice *dev, Error **errp)
sch->cssid, sch->ssid, sch->schid, sch->devno,
ccw_dev->bus_id.valid ? "user-configured" : "auto-configured");
+ if (!kvm_eventfds_enabled()) {
+ dev->flags &= ~VIRTIO_CCW_FLAG_USE_IOEVENTFD;
+ }
+
if (k->realize) {
k->realize(dev, &err);
}
@@ -1311,10 +1288,6 @@ static void virtio_ccw_device_plugged(DeviceState *d, Error **errp)
return;
}
- if (!kvm_eventfds_enabled()) {
- dev->flags &= ~VIRTIO_CCW_FLAG_USE_IOEVENTFD;
- }
-
sch->id.cu_model = virtio_bus_get_vdev_id(&dev->bus);
@@ -1616,10 +1589,7 @@ static void virtio_ccw_bus_class_init(ObjectClass *klass, void *data)
k->pre_plugged = virtio_ccw_pre_plugged;
k->device_plugged = virtio_ccw_device_plugged;
k->device_unplugged = virtio_ccw_device_unplugged;
- k->ioeventfd_started = virtio_ccw_ioeventfd_started;
- k->ioeventfd_set_started = virtio_ccw_ioeventfd_set_started;
- k->ioeventfd_disabled = virtio_ccw_ioeventfd_disabled;
- k->ioeventfd_set_disabled = virtio_ccw_ioeventfd_set_disabled;
+ k->ioeventfd_enabled = virtio_ccw_ioeventfd_enabled;
k->ioeventfd_assign = virtio_ccw_ioeventfd_assign;
}
diff --git a/hw/s390x/virtio-ccw.h b/hw/s390x/virtio-ccw.h
index 565094e4fb..77d10f1671 100644
--- a/hw/s390x/virtio-ccw.h
+++ b/hw/s390x/virtio-ccw.h
@@ -86,8 +86,6 @@ struct VirtioCcwDevice {
int revision;
uint32_t max_rev;
VirtioBusState bus;
- bool ioeventfd_started;
- bool ioeventfd_disabled;
uint32_t flags;
uint8_t thinint_isc;
AdapterRoutes routes;
diff --git a/hw/scsi/virtio-scsi-dataplane.c b/hw/scsi/virtio-scsi-dataplane.c
index 9424f0e057..f2ea29dbc3 100644
--- a/hw/scsi/virtio-scsi-dataplane.c
+++ b/hw/scsi/virtio-scsi-dataplane.c
@@ -12,6 +12,7 @@
*/
#include "qemu/osdep.h"
+#include "qapi/error.h"
#include "hw/virtio/virtio-scsi.h"
#include "qemu/error-report.h"
#include "sysemu/block-backend.h"
@@ -21,20 +22,30 @@
#include "hw/virtio/virtio-access.h"
/* Context: QEMU global mutex held */
-void virtio_scsi_set_iothread(VirtIOSCSI *s, IOThread *iothread)
+void virtio_scsi_dataplane_setup(VirtIOSCSI *s, Error **errp)
{
- BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(s)));
- VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s);
+ VirtIODevice *vdev = VIRTIO_DEVICE(s);
+ BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
+ VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
- assert(!s->ctx);
- s->ctx = iothread_get_aio_context(vs->conf.iothread);
-
- /* Don't try if transport does not support notifiers. */
- if (!k->set_guest_notifiers || !k->ioeventfd_started) {
- fprintf(stderr, "virtio-scsi: Failed to set iothread "
- "(transport does not support notifiers)");
- exit(1);
+ if (vs->conf.iothread) {
+ if (!k->set_guest_notifiers || !k->ioeventfd_assign) {
+ error_setg(errp,
+ "device is incompatible with iothread "
+ "(transport does not support notifiers)");
+ return;
+ }
+ if (!virtio_device_ioeventfd_enabled(vdev)) {
+ error_setg(errp, "ioeventfd is required for iothread");
+ return;
+ }
+ s->ctx = iothread_get_aio_context(vs->conf.iothread);
+ } else {
+ if (!virtio_device_ioeventfd_enabled(vdev)) {
+ return;
+ }
+ s->ctx = qemu_get_aio_context();
}
}
@@ -105,19 +116,19 @@ static void virtio_scsi_clear_aio(VirtIOSCSI *s)
}
/* Context: QEMU global mutex held */
-void virtio_scsi_dataplane_start(VirtIOSCSI *s)
+int virtio_scsi_dataplane_start(VirtIODevice *vdev)
{
int i;
int rc;
- BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(s)));
+ BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
- VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s);
+ VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(vdev);
+ VirtIOSCSI *s = VIRTIO_SCSI(vdev);
if (s->dataplane_started ||
s->dataplane_starting ||
- s->dataplane_fenced ||
- s->ctx != iothread_get_aio_context(vs->conf.iothread)) {
- return;
+ s->dataplane_fenced) {
+ return 0;
}
s->dataplane_starting = true;
@@ -152,7 +163,7 @@ void virtio_scsi_dataplane_start(VirtIOSCSI *s)
s->dataplane_starting = false;
s->dataplane_started = true;
aio_context_release(s->ctx);
- return;
+ return 0;
fail_vrings:
virtio_scsi_clear_aio(s);
@@ -165,14 +176,16 @@ fail_guest_notifiers:
s->dataplane_fenced = true;
s->dataplane_starting = false;
s->dataplane_started = true;
+ return -ENOSYS;
}
/* Context: QEMU global mutex held */
-void virtio_scsi_dataplane_stop(VirtIOSCSI *s)
+void virtio_scsi_dataplane_stop(VirtIODevice *vdev)
{
- BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(s)));
+ BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
- VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s);
+ VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(vdev);
+ VirtIOSCSI *s = VIRTIO_SCSI(vdev);
int i;
if (!s->dataplane_started || s->dataplane_stopping) {
@@ -186,7 +199,6 @@ void virtio_scsi_dataplane_stop(VirtIOSCSI *s)
return;
}
s->dataplane_stopping = true;
- assert(s->ctx == iothread_get_aio_context(vs->conf.iothread));
aio_context_acquire(s->ctx);
virtio_scsi_clear_aio(s);
diff --git a/hw/scsi/virtio-scsi.c b/hw/scsi/virtio-scsi.c
index 4762f05274..3e5ae6ac0f 100644
--- a/hw/scsi/virtio-scsi.c
+++ b/hw/scsi/virtio-scsi.c
@@ -434,7 +434,7 @@ static void virtio_scsi_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
VirtIOSCSI *s = (VirtIOSCSI *)vdev;
if (s->ctx) {
- virtio_scsi_dataplane_start(s);
+ virtio_device_start_ioeventfd(vdev);
if (!s->dataplane_fenced) {
return;
}
@@ -610,7 +610,7 @@ static void virtio_scsi_handle_cmd(VirtIODevice *vdev, VirtQueue *vq)
VirtIOSCSI *s = (VirtIOSCSI *)vdev;
if (s->ctx) {
- virtio_scsi_dataplane_start(s);
+ virtio_device_start_ioeventfd(vdev);
if (!s->dataplane_fenced) {
return;
}
@@ -669,9 +669,7 @@ static void virtio_scsi_reset(VirtIODevice *vdev)
VirtIOSCSI *s = VIRTIO_SCSI(vdev);
VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(vdev);
- if (s->ctx) {
- virtio_scsi_dataplane_stop(s);
- }
+ assert(!s->dataplane_started);
s->resetting++;
qbus_reset_all(&s->bus.qbus);
s->resetting--;
@@ -749,7 +747,7 @@ static void virtio_scsi_handle_event(VirtIODevice *vdev, VirtQueue *vq)
VirtIOSCSI *s = VIRTIO_SCSI(vdev);
if (s->ctx) {
- virtio_scsi_dataplane_start(s);
+ virtio_device_start_ioeventfd(vdev);
if (!s->dataplane_fenced) {
return;
}
@@ -848,14 +846,10 @@ void virtio_scsi_common_realize(DeviceState *dev, Error **errp,
s->sense_size = VIRTIO_SCSI_SENSE_DEFAULT_SIZE;
s->cdb_size = VIRTIO_SCSI_CDB_DEFAULT_SIZE;
- s->ctrl_vq = virtio_add_queue_aio(vdev, VIRTIO_SCSI_VQ_SIZE, ctrl);
- s->event_vq = virtio_add_queue_aio(vdev, VIRTIO_SCSI_VQ_SIZE, evt);
+ s->ctrl_vq = virtio_add_queue(vdev, VIRTIO_SCSI_VQ_SIZE, ctrl);
+ s->event_vq = virtio_add_queue(vdev, VIRTIO_SCSI_VQ_SIZE, evt);
for (i = 0; i < s->conf.num_queues; i++) {
- s->cmd_vqs[i] = virtio_add_queue_aio(vdev, VIRTIO_SCSI_VQ_SIZE, cmd);
- }
-
- if (s->conf.iothread) {
- virtio_scsi_set_iothread(VIRTIO_SCSI(s), s->conf.iothread);
+ s->cmd_vqs[i] = virtio_add_queue(vdev, VIRTIO_SCSI_VQ_SIZE, cmd);
}
}
@@ -885,6 +879,8 @@ static void virtio_scsi_device_realize(DeviceState *dev, Error **errp)
return;
}
}
+
+ virtio_scsi_dataplane_setup(s, errp);
}
static void virtio_scsi_instance_init(Object *obj)
@@ -957,6 +953,8 @@ static void virtio_scsi_class_init(ObjectClass *klass, void *data)
vdc->set_config = virtio_scsi_set_config;
vdc->get_features = virtio_scsi_get_features;
vdc->reset = virtio_scsi_reset;
+ vdc->start_ioeventfd = virtio_scsi_dataplane_start;
+ vdc->stop_ioeventfd = virtio_scsi_dataplane_stop;
hc->plug = virtio_scsi_hotplug;
hc->unplug = virtio_scsi_hotunplug;
}
diff --git a/hw/virtio/Makefile.objs b/hw/virtio/Makefile.objs
index e71630812e..95c4c30ea1 100644
--- a/hw/virtio/Makefile.objs
+++ b/hw/virtio/Makefile.objs
@@ -7,3 +7,5 @@ obj-y += virtio.o virtio-balloon.o
obj-$(CONFIG_LINUX) += vhost.o vhost-backend.o vhost-user.o
obj-$(CONFIG_VHOST_VSOCK) += vhost-vsock.o
+obj-y += virtio-crypto.o
+obj-$(CONFIG_VIRTIO_PCI) += virtio-crypto-pci.o
diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
index bd051ab2e1..131f1643b2 100644
--- a/hw/virtio/vhost.c
+++ b/hw/virtio/vhost.c
@@ -1190,12 +1190,13 @@ int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
int i, r, e;
- if (!k->ioeventfd_started) {
+ if (!k->ioeventfd_assign) {
error_report("binding does not support host notifiers");
r = -ENOSYS;
goto fail;
}
+ virtio_device_stop_ioeventfd(vdev);
for (i = 0; i < hdev->nvqs; ++i) {
r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
true);
@@ -1215,6 +1216,7 @@ fail_vq:
}
assert (e >= 0);
}
+ virtio_device_start_ioeventfd(vdev);
fail:
return r;
}
@@ -1237,6 +1239,7 @@ void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
}
assert (r >= 0);
}
+ virtio_device_start_ioeventfd(vdev);
}
/* Test and clear event pending status.
diff --git a/hw/virtio/virtio-balloon.c b/hw/virtio/virtio-balloon.c
index 1d77028236..cfba053280 100644
--- a/hw/virtio/virtio-balloon.c
+++ b/hw/virtio/virtio-balloon.c
@@ -394,21 +394,9 @@ static void virtio_balloon_to_target(void *opaque, ram_addr_t target)
trace_virtio_balloon_to_target(target, dev->num_pages);
}
-static void virtio_balloon_save_device(VirtIODevice *vdev, QEMUFile *f)
+static int virtio_balloon_post_load_device(void *opaque, int version_id)
{
- VirtIOBalloon *s = VIRTIO_BALLOON(vdev);
-
- qemu_put_be32(f, s->num_pages);
- qemu_put_be32(f, s->actual);
-}
-
-static int virtio_balloon_load_device(VirtIODevice *vdev, QEMUFile *f,
- int version_id)
-{
- VirtIOBalloon *s = VIRTIO_BALLOON(vdev);
-
- s->num_pages = qemu_get_be32(f);
- s->actual = qemu_get_be32(f);
+ VirtIOBalloon *s = VIRTIO_BALLOON(opaque);
if (balloon_stats_enabled(s)) {
balloon_stats_change_timer(s, s->stats_poll_interval);
@@ -416,6 +404,18 @@ static int virtio_balloon_load_device(VirtIODevice *vdev, QEMUFile *f,
return 0;
}
+static const VMStateDescription vmstate_virtio_balloon_device = {
+ .name = "virtio-balloon-device",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .post_load = virtio_balloon_post_load_device,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(num_pages, VirtIOBalloon),
+ VMSTATE_UINT32(actual, VirtIOBalloon),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
static void virtio_balloon_device_realize(DeviceState *dev, Error **errp)
{
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
@@ -517,9 +517,8 @@ static void virtio_balloon_class_init(ObjectClass *klass, void *data)
vdc->get_config = virtio_balloon_get_config;
vdc->set_config = virtio_balloon_set_config;
vdc->get_features = virtio_balloon_get_features;
- vdc->save = virtio_balloon_save_device;
- vdc->load = virtio_balloon_load_device;
vdc->set_status = virtio_balloon_set_status;
+ vdc->vmsd = &vmstate_virtio_balloon_device;
}
static const TypeInfo virtio_balloon_info = {
diff --git a/hw/virtio/virtio-bus.c b/hw/virtio/virtio-bus.c
index 11f65bd225..bf61f66a04 100644
--- a/hw/virtio/virtio-bus.c
+++ b/hw/virtio/virtio-bus.c
@@ -147,131 +147,97 @@ void virtio_bus_set_vdev_config(VirtioBusState *bus, uint8_t *config)
}
}
-/*
- * This function handles both assigning the ioeventfd handler and
- * registering it with the kernel.
- * assign: register/deregister ioeventfd with the kernel
- * set_handler: use the generic ioeventfd handler
- */
-static int set_host_notifier_internal(DeviceState *proxy, VirtioBusState *bus,
- int n, bool assign, bool set_handler)
+int virtio_bus_start_ioeventfd(VirtioBusState *bus)
{
- VirtIODevice *vdev = virtio_bus_get_device(bus);
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(bus);
- VirtQueue *vq = virtio_get_queue(vdev, n);
- EventNotifier *notifier = virtio_queue_get_host_notifier(vq);
- int r = 0;
+ DeviceState *proxy = DEVICE(BUS(bus)->parent);
+ VirtIODevice *vdev = virtio_bus_get_device(bus);
+ VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
+ int r;
- if (assign) {
- r = event_notifier_init(notifier, 1);
- if (r < 0) {
- error_report("%s: unable to init event notifier: %s (%d)",
- __func__, strerror(-r), r);
- return r;
- }
- virtio_queue_set_host_notifier_fd_handler(vq, true, set_handler);
- r = k->ioeventfd_assign(proxy, notifier, n, assign);
- if (r < 0) {
- error_report("%s: unable to assign ioeventfd: %d", __func__, r);
- virtio_queue_set_host_notifier_fd_handler(vq, false, false);
- event_notifier_cleanup(notifier);
- return r;
- }
- } else {
- k->ioeventfd_assign(proxy, notifier, n, assign);
- virtio_queue_set_host_notifier_fd_handler(vq, false, false);
- event_notifier_cleanup(notifier);
+ if (!k->ioeventfd_assign || !k->ioeventfd_enabled(proxy)) {
+ return -ENOSYS;
}
- return r;
+ if (bus->ioeventfd_started) {
+ return 0;
+ }
+ r = vdc->start_ioeventfd(vdev);
+ if (r < 0) {
+ error_report("%s: failed. Fallback to userspace (slower).", __func__);
+ return r;
+ }
+ bus->ioeventfd_started = true;
+ return 0;
}
-void virtio_bus_start_ioeventfd(VirtioBusState *bus)
+void virtio_bus_stop_ioeventfd(VirtioBusState *bus)
{
- VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(bus);
- DeviceState *proxy = DEVICE(BUS(bus)->parent);
VirtIODevice *vdev;
- int n, r;
+ VirtioDeviceClass *vdc;
- if (!k->ioeventfd_started || k->ioeventfd_started(proxy)) {
+ if (!bus->ioeventfd_started) {
return;
}
- if (k->ioeventfd_disabled(proxy)) {
- return;
- }
- vdev = virtio_bus_get_device(bus);
- for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
- if (!virtio_queue_get_num(vdev, n)) {
- continue;
- }
- r = set_host_notifier_internal(proxy, bus, n, true, true);
- if (r < 0) {
- goto assign_error;
- }
- }
- k->ioeventfd_set_started(proxy, true, false);
- return;
-
-assign_error:
- while (--n >= 0) {
- if (!virtio_queue_get_num(vdev, n)) {
- continue;
- }
- r = set_host_notifier_internal(proxy, bus, n, false, false);
- assert(r >= 0);
- }
- k->ioeventfd_set_started(proxy, false, true);
- error_report("%s: failed. Fallback to userspace (slower).", __func__);
+ vdev = virtio_bus_get_device(bus);
+ vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
+ vdc->stop_ioeventfd(vdev);
+ bus->ioeventfd_started = false;
}
-void virtio_bus_stop_ioeventfd(VirtioBusState *bus)
+bool virtio_bus_ioeventfd_enabled(VirtioBusState *bus)
{
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(bus);
DeviceState *proxy = DEVICE(BUS(bus)->parent);
- VirtIODevice *vdev;
- int n, r;
- if (!k->ioeventfd_started || !k->ioeventfd_started(proxy)) {
- return;
- }
- vdev = virtio_bus_get_device(bus);
- for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
- if (!virtio_queue_get_num(vdev, n)) {
- continue;
- }
- r = set_host_notifier_internal(proxy, bus, n, false, false);
- assert(r >= 0);
- }
- k->ioeventfd_set_started(proxy, false, false);
+ return k->ioeventfd_assign && k->ioeventfd_enabled(proxy);
}
/*
- * This function switches from/to the generic ioeventfd handler.
- * assign==false means 'use generic ioeventfd handler'.
+ * This function switches ioeventfd on/off in the device.
+ * The caller must set or clear the handlers for the EventNotifier.
*/
int virtio_bus_set_host_notifier(VirtioBusState *bus, int n, bool assign)
{
+ VirtIODevice *vdev = virtio_bus_get_device(bus);
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(bus);
DeviceState *proxy = DEVICE(BUS(bus)->parent);
+ VirtQueue *vq = virtio_get_queue(vdev, n);
+ EventNotifier *notifier = virtio_queue_get_host_notifier(vq);
+ int r = 0;
- if (!k->ioeventfd_started) {
+ if (!k->ioeventfd_assign) {
return -ENOSYS;
}
- k->ioeventfd_set_disabled(proxy, assign);
+
if (assign) {
- /*
- * Stop using the generic ioeventfd, we are doing eventfd handling
- * ourselves below
- *
- * FIXME: We should just switch the handler and not deassign the
- * ioeventfd.
- * Otherwise, there's a window where we don't have an
- * ioeventfd and we may end up with a notification where
- * we don't expect one.
- */
- virtio_bus_stop_ioeventfd(bus);
+ assert(!bus->ioeventfd_started);
+ r = event_notifier_init(notifier, 1);
+ if (r < 0) {
+ error_report("%s: unable to init event notifier: %s (%d)",
+ __func__, strerror(-r), r);
+ return r;
+ }
+ r = k->ioeventfd_assign(proxy, notifier, n, true);
+ if (r < 0) {
+ error_report("%s: unable to assign ioeventfd: %d", __func__, r);
+ goto cleanup_event_notifier;
+ }
+ return 0;
+ } else {
+ if (!bus->ioeventfd_started) {
+ return 0;
+ }
+ k->ioeventfd_assign(proxy, notifier, n, false);
}
- return set_host_notifier_internal(proxy, bus, n, assign, false);
+
+cleanup_event_notifier:
+ /* Test and clear notifier after disabling event,
+ * in case poll callback didn't have time to run.
+ */
+ virtio_queue_host_notifier_read(notifier);
+ event_notifier_cleanup(notifier);
+ return r;
}
static char *virtio_bus_get_dev_path(DeviceState *dev)
diff --git a/hw/virtio/virtio-crypto-pci.c b/hw/virtio/virtio-crypto-pci.c
new file mode 100644
index 0000000000..21d998401a
--- /dev/null
+++ b/hw/virtio/virtio-crypto-pci.c
@@ -0,0 +1,77 @@
+/*
+ * Virtio crypto device
+ *
+ * Copyright (c) 2016 HUAWEI TECHNOLOGIES CO., LTD.
+ *
+ * Authors:
+ * Gonglei <arei.gonglei@huawei.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * (at your option) any later version. See the COPYING file in the
+ * top-level directory.
+ *
+ */
+#include "qemu/osdep.h"
+#include "hw/pci/pci.h"
+#include "hw/virtio/virtio.h"
+#include "hw/virtio/virtio-bus.h"
+#include "hw/virtio/virtio-pci.h"
+#include "hw/virtio/virtio-crypto.h"
+#include "qapi/error.h"
+
+static Property virtio_crypto_pci_properties[] = {
+ DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
+ VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
+ DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void virtio_crypto_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
+{
+ VirtIOCryptoPCI *vcrypto = VIRTIO_CRYPTO_PCI(vpci_dev);
+ DeviceState *vdev = DEVICE(&vcrypto->vdev);
+
+ qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
+ virtio_pci_force_virtio_1(vpci_dev);
+ object_property_set_bool(OBJECT(vdev), true, "realized", errp);
+ object_property_set_link(OBJECT(vcrypto),
+ OBJECT(vcrypto->vdev.conf.cryptodev), "cryptodev",
+ NULL);
+}
+
+static void virtio_crypto_pci_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
+ PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
+
+ k->realize = virtio_crypto_pci_realize;
+ set_bit(DEVICE_CATEGORY_MISC, dc->categories);
+ dc->props = virtio_crypto_pci_properties;
+
+ pcidev_k->class_id = PCI_CLASS_OTHERS;
+}
+
+static void virtio_crypto_initfn(Object *obj)
+{
+ VirtIOCryptoPCI *dev = VIRTIO_CRYPTO_PCI(obj);
+
+ virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
+ TYPE_VIRTIO_CRYPTO);
+ object_property_add_alias(obj, "cryptodev", OBJECT(&dev->vdev),
+ "cryptodev", &error_abort);
+}
+
+static const TypeInfo virtio_crypto_pci_info = {
+ .name = TYPE_VIRTIO_CRYPTO_PCI,
+ .parent = TYPE_VIRTIO_PCI,
+ .instance_size = sizeof(VirtIOCryptoPCI),
+ .instance_init = virtio_crypto_initfn,
+ .class_init = virtio_crypto_pci_class_init,
+};
+
+static void virtio_crypto_pci_register_types(void)
+{
+ type_register_static(&virtio_crypto_pci_info);
+}
+type_init(virtio_crypto_pci_register_types)
diff --git a/hw/virtio/virtio-crypto.c b/hw/virtio/virtio-crypto.c
new file mode 100644
index 0000000000..170114f52b
--- /dev/null
+++ b/hw/virtio/virtio-crypto.c
@@ -0,0 +1,898 @@
+/*
+ * Virtio crypto Support
+ *
+ * Copyright (c) 2016 HUAWEI TECHNOLOGIES CO., LTD.
+ *
+ * Authors:
+ * Gonglei <arei.gonglei@huawei.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * (at your option) any later version. See the COPYING file in the
+ * top-level directory.
+ */
+#include "qemu/osdep.h"
+#include "qemu/iov.h"
+#include "hw/qdev.h"
+#include "qapi/error.h"
+#include "qemu/error-report.h"
+
+#include "hw/virtio/virtio.h"
+#include "hw/virtio/virtio-crypto.h"
+#include "hw/virtio/virtio-access.h"
+#include "standard-headers/linux/virtio_ids.h"
+
+#define VIRTIO_CRYPTO_VM_VERSION 1
+
+/*
+ * Transfer virtqueue index to crypto queue index.
+ * The control virtqueue is after the data virtqueues
+ * so the input value doesn't need to be adjusted
+ */
+static inline int virtio_crypto_vq2q(int queue_index)
+{
+ return queue_index;
+}
+
+static int
+virtio_crypto_cipher_session_helper(VirtIODevice *vdev,
+ CryptoDevBackendSymSessionInfo *info,
+ struct virtio_crypto_cipher_session_para *cipher_para,
+ struct iovec **iov, unsigned int *out_num)
+{
+ VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(vdev);
+ unsigned int num = *out_num;
+
+ info->cipher_alg = ldl_le_p(&cipher_para->algo);
+ info->key_len = ldl_le_p(&cipher_para->keylen);
+ info->direction = ldl_le_p(&cipher_para->op);
+ DPRINTF("cipher_alg=%" PRIu32 ", info->direction=%" PRIu32 "\n",
+ info->cipher_alg, info->direction);
+
+ if (info->key_len > vcrypto->conf.max_cipher_key_len) {
+ error_report("virtio-crypto length of cipher key is too big: %u",
+ info->key_len);
+ return -VIRTIO_CRYPTO_ERR;
+ }
+ /* Get cipher key */
+ if (info->key_len > 0) {
+ size_t s;
+ DPRINTF("keylen=%" PRIu32 "\n", info->key_len);
+
+ info->cipher_key = g_malloc(info->key_len);
+ s = iov_to_buf(*iov, num, 0, info->cipher_key, info->key_len);
+ if (unlikely(s != info->key_len)) {
+ virtio_error(vdev, "virtio-crypto cipher key incorrect");
+ return -EFAULT;
+ }
+ iov_discard_front(iov, &num, info->key_len);
+ *out_num = num;
+ }
+
+ return 0;
+}
+
+static int64_t
+virtio_crypto_create_sym_session(VirtIOCrypto *vcrypto,
+ struct virtio_crypto_sym_create_session_req *sess_req,
+ uint32_t queue_id,
+ uint32_t opcode,
+ struct iovec *iov, unsigned int out_num)
+{
+ VirtIODevice *vdev = VIRTIO_DEVICE(vcrypto);
+ CryptoDevBackendSymSessionInfo info;
+ int64_t session_id;
+ int queue_index;
+ uint32_t op_type;
+ Error *local_err = NULL;
+ int ret;
+
+ memset(&info, 0, sizeof(info));
+ op_type = ldl_le_p(&sess_req->op_type);
+ info.op_type = op_type;
+ info.op_code = opcode;
+
+ if (op_type == VIRTIO_CRYPTO_SYM_OP_CIPHER) {
+ ret = virtio_crypto_cipher_session_helper(vdev, &info,
+ &sess_req->u.cipher.para,
+ &iov, &out_num);
+ if (ret < 0) {
+ goto err;
+ }
+ } else if (op_type == VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING) {
+ size_t s;
+ /* cipher part */
+ ret = virtio_crypto_cipher_session_helper(vdev, &info,
+ &sess_req->u.chain.para.cipher_param,
+ &iov, &out_num);
+ if (ret < 0) {
+ goto err;
+ }
+ /* hash part */
+ info.alg_chain_order = ldl_le_p(
+ &sess_req->u.chain.para.alg_chain_order);
+ info.add_len = ldl_le_p(&sess_req->u.chain.para.aad_len);
+ info.hash_mode = ldl_le_p(&sess_req->u.chain.para.hash_mode);
+ if (info.hash_mode == VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH) {
+ info.hash_alg = ldl_le_p(&sess_req->u.chain.para.u.mac_param.algo);
+ info.auth_key_len = ldl_le_p(
+ &sess_req->u.chain.para.u.mac_param.auth_key_len);
+ info.hash_result_len = ldl_le_p(
+ &sess_req->u.chain.para.u.mac_param.hash_result_len);
+ if (info.auth_key_len > vcrypto->conf.max_auth_key_len) {
+ error_report("virtio-crypto length of auth key is too big: %u",
+ info.auth_key_len);
+ ret = -VIRTIO_CRYPTO_ERR;
+ goto err;
+ }
+ /* get auth key */
+ if (info.auth_key_len > 0) {
+ DPRINTF("auth_keylen=%" PRIu32 "\n", info.auth_key_len);
+ info.auth_key = g_malloc(info.auth_key_len);
+ s = iov_to_buf(iov, out_num, 0, info.auth_key,
+ info.auth_key_len);
+ if (unlikely(s != info.auth_key_len)) {
+ virtio_error(vdev,
+ "virtio-crypto authenticated key incorrect");
+ ret = -EFAULT;
+ goto err;
+ }
+ iov_discard_front(&iov, &out_num, info.auth_key_len);
+ }
+ } else if (info.hash_mode == VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN) {
+ info.hash_alg = ldl_le_p(
+ &sess_req->u.chain.para.u.hash_param.algo);
+ info.hash_result_len = ldl_le_p(
+ &sess_req->u.chain.para.u.hash_param.hash_result_len);
+ } else {
+ /* VIRTIO_CRYPTO_SYM_HASH_MODE_NESTED */
+ error_report("unsupported hash mode");
+ ret = -VIRTIO_CRYPTO_NOTSUPP;
+ goto err;
+ }
+ } else {
+ /* VIRTIO_CRYPTO_SYM_OP_NONE */
+ error_report("unsupported cipher op_type: VIRTIO_CRYPTO_SYM_OP_NONE");
+ ret = -VIRTIO_CRYPTO_NOTSUPP;
+ goto err;
+ }
+
+ queue_index = virtio_crypto_vq2q(queue_id);
+ session_id = cryptodev_backend_sym_create_session(
+ vcrypto->cryptodev,
+ &info, queue_index, &local_err);
+ if (session_id >= 0) {
+ DPRINTF("create session_id=%" PRIu64 " successfully\n",
+ session_id);
+
+ ret = session_id;
+ } else {
+ if (local_err) {
+ error_report_err(local_err);
+ }
+ ret = -VIRTIO_CRYPTO_ERR;
+ }
+
+err:
+ g_free(info.cipher_key);
+ g_free(info.auth_key);
+ return ret;
+}
+
+static uint8_t
+virtio_crypto_handle_close_session(VirtIOCrypto *vcrypto,
+ struct virtio_crypto_destroy_session_req *close_sess_req,
+ uint32_t queue_id)
+{
+ int ret;
+ uint64_t session_id;
+ uint32_t status;
+ Error *local_err = NULL;
+
+ session_id = ldq_le_p(&close_sess_req->session_id);
+ DPRINTF("close session, id=%" PRIu64 "\n", session_id);
+
+ ret = cryptodev_backend_sym_close_session(
+ vcrypto->cryptodev, session_id, queue_id, &local_err);
+ if (ret == 0) {
+ status = VIRTIO_CRYPTO_OK;
+ } else {
+ if (local_err) {
+ error_report_err(local_err);
+ } else {
+ error_report("destroy session failed");
+ }
+ status = VIRTIO_CRYPTO_ERR;
+ }
+
+ return status;
+}
+
+static void virtio_crypto_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
+{
+ VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(vdev);
+ struct virtio_crypto_op_ctrl_req ctrl;
+ VirtQueueElement *elem;
+ struct iovec *in_iov;
+ struct iovec *out_iov;
+ unsigned in_num;
+ unsigned out_num;
+ uint32_t queue_id;
+ uint32_t opcode;
+ struct virtio_crypto_session_input input;
+ int64_t session_id;
+ uint8_t status;
+ size_t s;
+
+ for (;;) {
+ elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
+ if (!elem) {
+ break;
+ }
+ if (elem->out_num < 1 || elem->in_num < 1) {
+ virtio_error(vdev, "virtio-crypto ctrl missing headers");
+ virtqueue_detach_element(vq, elem, 0);
+ g_free(elem);
+ break;
+ }
+
+ out_num = elem->out_num;
+ out_iov = elem->out_sg;
+ in_num = elem->in_num;
+ in_iov = elem->in_sg;
+ if (unlikely(iov_to_buf(out_iov, out_num, 0, &ctrl, sizeof(ctrl))
+ != sizeof(ctrl))) {
+ virtio_error(vdev, "virtio-crypto request ctrl_hdr too short");
+ virtqueue_detach_element(vq, elem, 0);
+ g_free(elem);
+ break;
+ }
+ iov_discard_front(&out_iov, &out_num, sizeof(ctrl));
+
+ opcode = ldl_le_p(&ctrl.header.opcode);
+ queue_id = ldl_le_p(&ctrl.header.queue_id);
+
+ switch (opcode) {
+ case VIRTIO_CRYPTO_CIPHER_CREATE_SESSION:
+ memset(&input, 0, sizeof(input));
+ session_id = virtio_crypto_create_sym_session(vcrypto,
+ &ctrl.u.sym_create_session,
+ queue_id, opcode,
+ out_iov, out_num);
+ /* Serious errors, need to reset virtio crypto device */
+ if (session_id == -EFAULT) {
+ virtqueue_detach_element(vq, elem, 0);
+ break;
+ } else if (session_id == -VIRTIO_CRYPTO_NOTSUPP) {
+ stl_le_p(&input.status, VIRTIO_CRYPTO_NOTSUPP);
+ } else if (session_id == -VIRTIO_CRYPTO_ERR) {
+ stl_le_p(&input.status, VIRTIO_CRYPTO_ERR);
+ } else {
+ /* Set the session id */
+ stq_le_p(&input.session_id, session_id);
+ stl_le_p(&input.status, VIRTIO_CRYPTO_OK);
+ }
+
+ s = iov_from_buf(in_iov, in_num, 0, &input, sizeof(input));
+ if (unlikely(s != sizeof(input))) {
+ virtio_error(vdev, "virtio-crypto input incorrect");
+ virtqueue_detach_element(vq, elem, 0);
+ break;
+ }
+ virtqueue_push(vq, elem, sizeof(input));
+ virtio_notify(vdev, vq);
+ break;
+ case VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION:
+ case VIRTIO_CRYPTO_HASH_DESTROY_SESSION:
+ case VIRTIO_CRYPTO_MAC_DESTROY_SESSION:
+ case VIRTIO_CRYPTO_AEAD_DESTROY_SESSION:
+ status = virtio_crypto_handle_close_session(vcrypto,
+ &ctrl.u.destroy_session, queue_id);
+ /* The status only occupy one byte, we can directly use it */
+ s = iov_from_buf(in_iov, in_num, 0, &status, sizeof(status));
+ if (unlikely(s != sizeof(status))) {
+ virtio_error(vdev, "virtio-crypto status incorrect");
+ virtqueue_detach_element(vq, elem, 0);
+ break;
+ }
+ virtqueue_push(vq, elem, sizeof(status));
+ virtio_notify(vdev, vq);
+ break;
+ case VIRTIO_CRYPTO_HASH_CREATE_SESSION:
+ case VIRTIO_CRYPTO_MAC_CREATE_SESSION:
+ case VIRTIO_CRYPTO_AEAD_CREATE_SESSION:
+ default:
+ error_report("virtio-crypto unsupported ctrl opcode: %d", opcode);
+ memset(&input, 0, sizeof(input));
+ stl_le_p(&input.status, VIRTIO_CRYPTO_NOTSUPP);
+ s = iov_from_buf(in_iov, in_num, 0, &input, sizeof(input));
+ if (unlikely(s != sizeof(input))) {
+ virtio_error(vdev, "virtio-crypto input incorrect");
+ virtqueue_detach_element(vq, elem, 0);
+ break;
+ }
+ virtqueue_push(vq, elem, sizeof(input));
+ virtio_notify(vdev, vq);
+
+ break;
+ } /* end switch case */
+
+ g_free(elem);
+ } /* end for loop */
+}
+
+static void virtio_crypto_init_request(VirtIOCrypto *vcrypto, VirtQueue *vq,
+ VirtIOCryptoReq *req)
+{
+ req->vcrypto = vcrypto;
+ req->vq = vq;
+ req->in = NULL;
+ req->in_iov = NULL;
+ req->in_num = 0;
+ req->in_len = 0;
+ req->flags = CRYPTODEV_BACKEND_ALG__MAX;
+ req->u.sym_op_info = NULL;
+}
+
+static void virtio_crypto_free_request(VirtIOCryptoReq *req)
+{
+ if (req) {
+ if (req->flags == CRYPTODEV_BACKEND_ALG_SYM) {
+ g_free(req->u.sym_op_info);
+ }
+ g_free(req);
+ }
+}
+
+static void
+virtio_crypto_sym_input_data_helper(VirtIODevice *vdev,
+ VirtIOCryptoReq *req,
+ uint32_t status,
+ CryptoDevBackendSymOpInfo *sym_op_info)
+{
+ size_t s, len;
+
+ if (status != VIRTIO_CRYPTO_OK) {
+ return;
+ }
+
+ len = sym_op_info->dst_len;
+ /* Save the cipher result */
+ s = iov_from_buf(req->in_iov, req->in_num, 0, sym_op_info->dst, len);
+ if (s != len) {
+ virtio_error(vdev, "virtio-crypto dest data incorrect");
+ return;
+ }
+
+ iov_discard_front(&req->in_iov, &req->in_num, len);
+
+ if (sym_op_info->op_type ==
+ VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING) {
+ /* Save the digest result */
+ s = iov_from_buf(req->in_iov, req->in_num, 0,
+ sym_op_info->digest_result,
+ sym_op_info->digest_result_len);
+ if (s != sym_op_info->digest_result_len) {
+ virtio_error(vdev, "virtio-crypto digest result incorrect");
+ }
+ }
+}
+
+static void virtio_crypto_req_complete(VirtIOCryptoReq *req, uint8_t status)
+{
+ VirtIOCrypto *vcrypto = req->vcrypto;
+ VirtIODevice *vdev = VIRTIO_DEVICE(vcrypto);
+
+ if (req->flags == CRYPTODEV_BACKEND_ALG_SYM) {
+ virtio_crypto_sym_input_data_helper(vdev, req, status,
+ req->u.sym_op_info);
+ }
+ stb_p(&req->in->status, status);
+ virtqueue_push(req->vq, &req->elem, req->in_len);
+ virtio_notify(vdev, req->vq);
+}
+
+static VirtIOCryptoReq *
+virtio_crypto_get_request(VirtIOCrypto *s, VirtQueue *vq)
+{
+ VirtIOCryptoReq *req = virtqueue_pop(vq, sizeof(VirtIOCryptoReq));
+
+ if (req) {
+ virtio_crypto_init_request(s, vq, req);
+ }
+ return req;
+}
+
+static CryptoDevBackendSymOpInfo *
+virtio_crypto_sym_op_helper(VirtIODevice *vdev,
+ struct virtio_crypto_cipher_para *cipher_para,
+ struct virtio_crypto_alg_chain_data_para *alg_chain_para,
+ struct iovec *iov, unsigned int out_num)
+{
+ VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(vdev);
+ CryptoDevBackendSymOpInfo *op_info;
+ uint32_t src_len = 0, dst_len = 0;
+ uint32_t iv_len = 0;
+ uint32_t aad_len = 0, hash_result_len = 0;
+ uint32_t hash_start_src_offset = 0, len_to_hash = 0;
+ uint32_t cipher_start_src_offset = 0, len_to_cipher = 0;
+
+ size_t max_len, curr_size = 0;
+ size_t s;
+
+ /* Plain cipher */
+ if (cipher_para) {
+ iv_len = ldl_le_p(&cipher_para->iv_len);
+ src_len = ldl_le_p(&cipher_para->src_data_len);
+ dst_len = ldl_le_p(&cipher_para->dst_data_len);
+ } else if (alg_chain_para) { /* Algorithm chain */
+ iv_len = ldl_le_p(&alg_chain_para->iv_len);
+ src_len = ldl_le_p(&alg_chain_para->src_data_len);
+ dst_len = ldl_le_p(&alg_chain_para->dst_data_len);
+
+ aad_len = ldl_le_p(&alg_chain_para->aad_len);
+ hash_result_len = ldl_le_p(&alg_chain_para->hash_result_len);
+ hash_start_src_offset = ldl_le_p(
+ &alg_chain_para->hash_start_src_offset);
+ cipher_start_src_offset = ldl_le_p(
+ &alg_chain_para->cipher_start_src_offset);
+ len_to_cipher = ldl_le_p(&alg_chain_para->len_to_cipher);
+ len_to_hash = ldl_le_p(&alg_chain_para->len_to_hash);
+ } else {
+ return NULL;
+ }
+
+ max_len = iv_len + aad_len + src_len + dst_len + hash_result_len;
+ if (unlikely(max_len > vcrypto->conf.max_size)) {
+ virtio_error(vdev, "virtio-crypto too big length");
+ return NULL;
+ }
+
+ op_info = g_malloc0(sizeof(CryptoDevBackendSymOpInfo) + max_len);
+ op_info->iv_len = iv_len;
+ op_info->src_len = src_len;
+ op_info->dst_len = dst_len;
+ op_info->aad_len = aad_len;
+ op_info->digest_result_len = hash_result_len;
+ op_info->hash_start_src_offset = hash_start_src_offset;
+ op_info->len_to_hash = len_to_hash;
+ op_info->cipher_start_src_offset = cipher_start_src_offset;
+ op_info->len_to_cipher = len_to_cipher;
+ /* Handle the initilization vector */
+ if (op_info->iv_len > 0) {
+ DPRINTF("iv_len=%" PRIu32 "\n", op_info->iv_len);
+ op_info->iv = op_info->data + curr_size;
+
+ s = iov_to_buf(iov, out_num, 0, op_info->iv, op_info->iv_len);
+ if (unlikely(s != op_info->iv_len)) {
+ virtio_error(vdev, "virtio-crypto iv incorrect");
+ goto err;
+ }
+ iov_discard_front(&iov, &out_num, op_info->iv_len);
+ curr_size += op_info->iv_len;
+ }
+
+ /* Handle additional authentication data if exists */
+ if (op_info->aad_len > 0) {
+ DPRINTF("aad_len=%" PRIu32 "\n", op_info->aad_len);
+ op_info->aad_data = op_info->data + curr_size;
+
+ s = iov_to_buf(iov, out_num, 0, op_info->aad_data, op_info->aad_len);
+ if (unlikely(s != op_info->aad_len)) {
+ virtio_error(vdev, "virtio-crypto additional auth data incorrect");
+ goto err;
+ }
+ iov_discard_front(&iov, &out_num, op_info->aad_len);
+
+ curr_size += op_info->aad_len;
+ }
+
+ /* Handle the source data */
+ if (op_info->src_len > 0) {
+ DPRINTF("src_len=%" PRIu32 "\n", op_info->src_len);
+ op_info->src = op_info->data + curr_size;
+
+ s = iov_to_buf(iov, out_num, 0, op_info->src, op_info->src_len);
+ if (unlikely(s != op_info->src_len)) {
+ virtio_error(vdev, "virtio-crypto source data incorrect");
+ goto err;
+ }
+ iov_discard_front(&iov, &out_num, op_info->src_len);
+
+ curr_size += op_info->src_len;
+ }
+
+ /* Handle the destination data */
+ op_info->dst = op_info->data + curr_size;
+ curr_size += op_info->dst_len;
+
+ DPRINTF("dst_len=%" PRIu32 "\n", op_info->dst_len);
+
+ /* Handle the hash digest result */
+ if (hash_result_len > 0) {
+ DPRINTF("hash_result_len=%" PRIu32 "\n", hash_result_len);
+ op_info->digest_result = op_info->data + curr_size;
+ }
+
+ return op_info;
+
+err:
+ g_free(op_info);
+ return NULL;
+}
+
+static int
+virtio_crypto_handle_sym_req(VirtIOCrypto *vcrypto,
+ struct virtio_crypto_sym_data_req *req,
+ CryptoDevBackendSymOpInfo **sym_op_info,
+ struct iovec *iov, unsigned int out_num)
+{
+ VirtIODevice *vdev = VIRTIO_DEVICE(vcrypto);
+ uint32_t op_type;
+ CryptoDevBackendSymOpInfo *op_info;
+
+ op_type = ldl_le_p(&req->op_type);
+
+ if (op_type == VIRTIO_CRYPTO_SYM_OP_CIPHER) {
+ op_info = virtio_crypto_sym_op_helper(vdev, &req->u.cipher.para,
+ NULL, iov, out_num);
+ if (!op_info) {
+ return -EFAULT;
+ }
+ op_info->op_type = op_type;
+ } else if (op_type == VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING) {
+ op_info = virtio_crypto_sym_op_helper(vdev, NULL,
+ &req->u.chain.para,
+ iov, out_num);
+ if (!op_info) {
+ return -EFAULT;
+ }
+ op_info->op_type = op_type;
+ } else {
+ /* VIRTIO_CRYPTO_SYM_OP_NONE */
+ error_report("virtio-crypto unsupported cipher type");
+ return -VIRTIO_CRYPTO_NOTSUPP;
+ }
+
+ *sym_op_info = op_info;
+
+ return 0;
+}
+
+static int
+virtio_crypto_handle_request(VirtIOCryptoReq *request)
+{
+ VirtIOCrypto *vcrypto = request->vcrypto;
+ VirtIODevice *vdev = VIRTIO_DEVICE(vcrypto);
+ VirtQueueElement *elem = &request->elem;
+ int queue_index = virtio_crypto_vq2q(virtio_get_queue_index(request->vq));
+ struct virtio_crypto_op_data_req req;
+ int ret;
+ struct iovec *in_iov;
+ struct iovec *out_iov;
+ unsigned in_num;
+ unsigned out_num;
+ uint32_t opcode;
+ uint8_t status = VIRTIO_CRYPTO_ERR;
+ uint64_t session_id;
+ CryptoDevBackendSymOpInfo *sym_op_info = NULL;
+ Error *local_err = NULL;
+
+ if (elem->out_num < 1 || elem->in_num < 1) {
+ virtio_error(vdev, "virtio-crypto dataq missing headers");
+ return -1;
+ }
+
+ out_num = elem->out_num;
+ out_iov = elem->out_sg;
+ in_num = elem->in_num;
+ in_iov = elem->in_sg;
+ if (unlikely(iov_to_buf(out_iov, out_num, 0, &req, sizeof(req))
+ != sizeof(req))) {
+ virtio_error(vdev, "virtio-crypto request outhdr too short");
+ return -1;
+ }
+ iov_discard_front(&out_iov, &out_num, sizeof(req));
+
+ if (in_iov[in_num - 1].iov_len <
+ sizeof(struct virtio_crypto_inhdr)) {
+ virtio_error(vdev, "virtio-crypto request inhdr too short");
+ return -1;
+ }
+ /* We always touch the last byte, so just see how big in_iov is. */
+ request->in_len = iov_size(in_iov, in_num);
+ request->in = (void *)in_iov[in_num - 1].iov_base
+ + in_iov[in_num - 1].iov_len
+ - sizeof(struct virtio_crypto_inhdr);
+ iov_discard_back(in_iov, &in_num, sizeof(struct virtio_crypto_inhdr));
+
+ /*
+ * The length of operation result, including dest_data
+ * and digest_result if exists.
+ */
+ request->in_num = in_num;
+ request->in_iov = in_iov;
+
+ opcode = ldl_le_p(&req.header.opcode);
+ session_id = ldq_le_p(&req.header.session_id);
+
+ switch (opcode) {
+ case VIRTIO_CRYPTO_CIPHER_ENCRYPT:
+ case VIRTIO_CRYPTO_CIPHER_DECRYPT:
+ ret = virtio_crypto_handle_sym_req(vcrypto,
+ &req.u.sym_req,
+ &sym_op_info,
+ out_iov, out_num);
+ /* Serious errors, need to reset virtio crypto device */
+ if (ret == -EFAULT) {
+ return -1;
+ } else if (ret == -VIRTIO_CRYPTO_NOTSUPP) {
+ virtio_crypto_req_complete(request, VIRTIO_CRYPTO_NOTSUPP);
+ virtio_crypto_free_request(request);
+ } else {
+ sym_op_info->session_id = session_id;
+
+ /* Set request's parameter */
+ request->flags = CRYPTODEV_BACKEND_ALG_SYM;
+ request->u.sym_op_info = sym_op_info;
+ ret = cryptodev_backend_crypto_operation(vcrypto->cryptodev,
+ request, queue_index, &local_err);
+ if (ret < 0) {
+ status = -ret;
+ if (local_err) {
+ error_report_err(local_err);
+ }
+ } else { /* ret == VIRTIO_CRYPTO_OK */
+ status = ret;
+ }
+ virtio_crypto_req_complete(request, status);
+ virtio_crypto_free_request(request);
+ }
+ break;
+ case VIRTIO_CRYPTO_HASH:
+ case VIRTIO_CRYPTO_MAC:
+ case VIRTIO_CRYPTO_AEAD_ENCRYPT:
+ case VIRTIO_CRYPTO_AEAD_DECRYPT:
+ default:
+ error_report("virtio-crypto unsupported dataq opcode: %u",
+ opcode);
+ virtio_crypto_req_complete(request, VIRTIO_CRYPTO_NOTSUPP);
+ virtio_crypto_free_request(request);
+ }
+
+ return 0;
+}
+
+static void virtio_crypto_handle_dataq(VirtIODevice *vdev, VirtQueue *vq)
+{
+ VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(vdev);
+ VirtIOCryptoReq *req;
+
+ while ((req = virtio_crypto_get_request(vcrypto, vq))) {
+ if (virtio_crypto_handle_request(req) < 0) {
+ virtqueue_detach_element(req->vq, &req->elem, 0);
+ virtio_crypto_free_request(req);
+ break;
+ }
+ }
+}
+
+static void virtio_crypto_dataq_bh(void *opaque)
+{
+ VirtIOCryptoQueue *q = opaque;
+ VirtIOCrypto *vcrypto = q->vcrypto;
+ VirtIODevice *vdev = VIRTIO_DEVICE(vcrypto);
+
+ /* This happens when device was stopped but BH wasn't. */
+ if (!vdev->vm_running) {
+ return;
+ }
+
+ /* Just in case the driver is not ready on more */
+ if (unlikely(!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))) {
+ return;
+ }
+
+ virtio_crypto_handle_dataq(vdev, q->dataq);
+ virtio_queue_set_notification(q->dataq, 1);
+}
+
+static void
+virtio_crypto_handle_dataq_bh(VirtIODevice *vdev, VirtQueue *vq)
+{
+ VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(vdev);
+ VirtIOCryptoQueue *q =
+ &vcrypto->vqs[virtio_crypto_vq2q(virtio_get_queue_index(vq))];
+
+ /* This happens when device was stopped but VCPU wasn't. */
+ if (!vdev->vm_running) {
+ return;
+ }
+ virtio_queue_set_notification(vq, 0);
+ qemu_bh_schedule(q->dataq_bh);
+}
+
+static uint64_t virtio_crypto_get_features(VirtIODevice *vdev,
+ uint64_t features,
+ Error **errp)
+{
+ return features;
+}
+
+static void virtio_crypto_reset(VirtIODevice *vdev)
+{
+ VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(vdev);
+ /* multiqueue is disabled by default */
+ vcrypto->curr_queues = 1;
+ if (!vcrypto->cryptodev->ready) {
+ vcrypto->status &= ~VIRTIO_CRYPTO_S_HW_READY;
+ } else {
+ vcrypto->status |= VIRTIO_CRYPTO_S_HW_READY;
+ }
+}
+
+static void virtio_crypto_init_config(VirtIODevice *vdev)
+{
+ VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(vdev);
+
+ vcrypto->conf.crypto_services =
+ vcrypto->conf.cryptodev->conf.crypto_services;
+ vcrypto->conf.cipher_algo_l =
+ vcrypto->conf.cryptodev->conf.cipher_algo_l;
+ vcrypto->conf.cipher_algo_h =
+ vcrypto->conf.cryptodev->conf.cipher_algo_h;
+ vcrypto->conf.hash_algo = vcrypto->conf.cryptodev->conf.hash_algo;
+ vcrypto->conf.mac_algo_l = vcrypto->conf.cryptodev->conf.mac_algo_l;
+ vcrypto->conf.mac_algo_h = vcrypto->conf.cryptodev->conf.mac_algo_h;
+ vcrypto->conf.aead_algo = vcrypto->conf.cryptodev->conf.aead_algo;
+ vcrypto->conf.max_cipher_key_len =
+ vcrypto->conf.cryptodev->conf.max_cipher_key_len;
+ vcrypto->conf.max_auth_key_len =
+ vcrypto->conf.cryptodev->conf.max_auth_key_len;
+ vcrypto->conf.max_size = vcrypto->conf.cryptodev->conf.max_size;
+}
+
+static void virtio_crypto_device_realize(DeviceState *dev, Error **errp)
+{
+ VirtIODevice *vdev = VIRTIO_DEVICE(dev);
+ VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(dev);
+ int i;
+
+ vcrypto->cryptodev = vcrypto->conf.cryptodev;
+ if (vcrypto->cryptodev == NULL) {
+ error_setg(errp, "'cryptodev' parameter expects a valid object");
+ return;
+ }
+
+ vcrypto->max_queues = MAX(vcrypto->cryptodev->conf.peers.queues, 1);
+ if (vcrypto->max_queues + 1 > VIRTIO_QUEUE_MAX) {
+ error_setg(errp, "Invalid number of queues (= %" PRIu32 "), "
+ "must be a postive integer less than %d.",
+ vcrypto->max_queues, VIRTIO_QUEUE_MAX);
+ return;
+ }
+
+ virtio_init(vdev, "virtio-crypto", VIRTIO_ID_CRYPTO, vcrypto->config_size);
+ vcrypto->curr_queues = 1;
+ vcrypto->vqs = g_malloc0(sizeof(VirtIOCryptoQueue) * vcrypto->max_queues);
+ for (i = 0; i < vcrypto->max_queues; i++) {
+ vcrypto->vqs[i].dataq =
+ virtio_add_queue(vdev, 1024, virtio_crypto_handle_dataq_bh);
+ vcrypto->vqs[i].dataq_bh =
+ qemu_bh_new(virtio_crypto_dataq_bh, &vcrypto->vqs[i]);
+ vcrypto->vqs[i].vcrypto = vcrypto;
+ }
+
+ vcrypto->ctrl_vq = virtio_add_queue(vdev, 64, virtio_crypto_handle_ctrl);
+ if (!vcrypto->cryptodev->ready) {
+ vcrypto->status &= ~VIRTIO_CRYPTO_S_HW_READY;
+ } else {
+ vcrypto->status |= VIRTIO_CRYPTO_S_HW_READY;
+ }
+
+ virtio_crypto_init_config(vdev);
+}
+
+static void virtio_crypto_device_unrealize(DeviceState *dev, Error **errp)
+{
+ VirtIODevice *vdev = VIRTIO_DEVICE(dev);
+ VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(dev);
+ VirtIOCryptoQueue *q;
+ int i, max_queues;
+
+ max_queues = vcrypto->multiqueue ? vcrypto->max_queues : 1;
+ for (i = 0; i < max_queues; i++) {
+ virtio_del_queue(vdev, i);
+ q = &vcrypto->vqs[i];
+ qemu_bh_delete(q->dataq_bh);
+ }
+
+ g_free(vcrypto->vqs);
+
+ virtio_cleanup(vdev);
+}
+
+static const VMStateDescription vmstate_virtio_crypto = {
+ .name = "virtio-crypto",
+ .minimum_version_id = VIRTIO_CRYPTO_VM_VERSION,
+ .version_id = VIRTIO_CRYPTO_VM_VERSION,
+ .fields = (VMStateField[]) {
+ VMSTATE_VIRTIO_DEVICE,
+ VMSTATE_END_OF_LIST()
+ },
+};
+
+static Property virtio_crypto_properties[] = {
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void virtio_crypto_get_config(VirtIODevice *vdev, uint8_t *config)
+{
+ VirtIOCrypto *c = VIRTIO_CRYPTO(vdev);
+ struct virtio_crypto_config crypto_cfg;
+
+ /*
+ * Virtio-crypto device conforms to VIRTIO 1.0 which is always LE,
+ * so we can use LE accessors directly.
+ */
+ stl_le_p(&crypto_cfg.status, c->status);
+ stl_le_p(&crypto_cfg.max_dataqueues, c->max_queues);
+ stl_le_p(&crypto_cfg.crypto_services, c->conf.crypto_services);
+ stl_le_p(&crypto_cfg.cipher_algo_l, c->conf.cipher_algo_l);
+ stl_le_p(&crypto_cfg.cipher_algo_h, c->conf.cipher_algo_h);
+ stl_le_p(&crypto_cfg.hash_algo, c->conf.hash_algo);
+ stl_le_p(&crypto_cfg.mac_algo_l, c->conf.mac_algo_l);
+ stl_le_p(&crypto_cfg.mac_algo_h, c->conf.mac_algo_h);
+ stl_le_p(&crypto_cfg.aead_algo, c->conf.aead_algo);
+ stl_le_p(&crypto_cfg.max_cipher_key_len, c->conf.max_cipher_key_len);
+ stl_le_p(&crypto_cfg.max_auth_key_len, c->conf.max_auth_key_len);
+ stq_le_p(&crypto_cfg.max_size, c->conf.max_size);
+
+ memcpy(config, &crypto_cfg, c->config_size);
+}
+
+static void virtio_crypto_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
+
+ dc->props = virtio_crypto_properties;
+ dc->vmsd = &vmstate_virtio_crypto;
+ set_bit(DEVICE_CATEGORY_MISC, dc->categories);
+ vdc->realize = virtio_crypto_device_realize;
+ vdc->unrealize = virtio_crypto_device_unrealize;
+ vdc->get_config = virtio_crypto_get_config;
+ vdc->get_features = virtio_crypto_get_features;
+ vdc->reset = virtio_crypto_reset;
+}
+
+static void virtio_crypto_instance_init(Object *obj)
+{
+ VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(obj);
+
+ /*
+ * The default config_size is sizeof(struct virtio_crypto_config).
+ * Can be overriden with virtio_crypto_set_config_size.
+ */
+ vcrypto->config_size = sizeof(struct virtio_crypto_config);
+
+ object_property_add_link(obj, "cryptodev",
+ TYPE_CRYPTODEV_BACKEND,
+ (Object **)&vcrypto->conf.cryptodev,
+ qdev_prop_allow_set_link_before_realize,
+ OBJ_PROP_LINK_UNREF_ON_RELEASE, NULL);
+}
+
+static const TypeInfo virtio_crypto_info = {
+ .name = TYPE_VIRTIO_CRYPTO,
+ .parent = TYPE_VIRTIO_DEVICE,
+ .instance_size = sizeof(VirtIOCrypto),
+ .instance_init = virtio_crypto_instance_init,
+ .class_init = virtio_crypto_class_init,
+};
+
+static void virtio_register_types(void)
+{
+ type_register_static(&virtio_crypto_info);
+}
+
+type_init(virtio_register_types)
diff --git a/hw/virtio/virtio-mmio.c b/hw/virtio/virtio-mmio.c
index 13798b3cb8..a30270f902 100644
--- a/hw/virtio/virtio-mmio.c
+++ b/hw/virtio/virtio-mmio.c
@@ -89,38 +89,12 @@ typedef struct {
uint32_t guest_page_shift;
/* virtio-bus */
VirtioBusState bus;
- bool ioeventfd_disabled;
- bool ioeventfd_started;
bool format_transport_address;
} VirtIOMMIOProxy;
-static bool virtio_mmio_ioeventfd_started(DeviceState *d)
+static bool virtio_mmio_ioeventfd_enabled(DeviceState *d)
{
- VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
-
- return proxy->ioeventfd_started;
-}
-
-static void virtio_mmio_ioeventfd_set_started(DeviceState *d, bool started,
- bool err)
-{
- VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
-
- proxy->ioeventfd_started = started;
-}
-
-static bool virtio_mmio_ioeventfd_disabled(DeviceState *d)
-{
- VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
-
- return !kvm_eventfds_enabled() || proxy->ioeventfd_disabled;
-}
-
-static void virtio_mmio_ioeventfd_set_disabled(DeviceState *d, bool disabled)
-{
- VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
-
- proxy->ioeventfd_disabled = disabled;
+ return kvm_eventfds_enabled();
}
static int virtio_mmio_ioeventfd_assign(DeviceState *d,
@@ -557,10 +531,7 @@ static void virtio_mmio_bus_class_init(ObjectClass *klass, void *data)
k->save_config = virtio_mmio_save_config;
k->load_config = virtio_mmio_load_config;
k->set_guest_notifiers = virtio_mmio_set_guest_notifiers;
- k->ioeventfd_started = virtio_mmio_ioeventfd_started;
- k->ioeventfd_set_started = virtio_mmio_ioeventfd_set_started;
- k->ioeventfd_disabled = virtio_mmio_ioeventfd_disabled;
- k->ioeventfd_set_disabled = virtio_mmio_ioeventfd_set_disabled;
+ k->ioeventfd_enabled = virtio_mmio_ioeventfd_enabled;
k->ioeventfd_assign = virtio_mmio_ioeventfd_assign;
k->has_variable_vring_alignment = true;
bus_class->max_dev = 1;
diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
index 06831de5ff..62001b46d7 100644
--- a/hw/virtio/virtio-pci.c
+++ b/hw/virtio/virtio-pci.c
@@ -262,34 +262,11 @@ static int virtio_pci_load_queue(DeviceState *d, int n, QEMUFile *f)
return 0;
}
-static bool virtio_pci_ioeventfd_started(DeviceState *d)
+static bool virtio_pci_ioeventfd_enabled(DeviceState *d)
{
VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
- return proxy->ioeventfd_started;
-}
-
-static void virtio_pci_ioeventfd_set_started(DeviceState *d, bool started,
- bool err)
-{
- VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
-
- proxy->ioeventfd_started = started;
-}
-
-static bool virtio_pci_ioeventfd_disabled(DeviceState *d)
-{
- VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
-
- return proxy->ioeventfd_disabled ||
- !(proxy->flags & VIRTIO_PCI_FLAG_USE_IOEVENTFD);
-}
-
-static void virtio_pci_ioeventfd_set_disabled(DeviceState *d, bool disabled)
-{
- VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
-
- proxy->ioeventfd_disabled = disabled;
+ return (proxy->flags & VIRTIO_PCI_FLAG_USE_IOEVENTFD) != 0;
}
#define QEMU_VIRTIO_PCI_QUEUE_MEM_MULT 0x1000
@@ -1719,10 +1696,6 @@ static void virtio_pci_device_plugged(DeviceState *d, Error **errp)
pci_register_bar(&proxy->pci_dev, proxy->legacy_io_bar_idx,
PCI_BASE_ADDRESS_SPACE_IO, &proxy->bar);
}
-
- if (!kvm_has_many_ioeventfds()) {
- proxy->flags &= ~VIRTIO_PCI_FLAG_USE_IOEVENTFD;
- }
}
static void virtio_pci_device_unplugged(DeviceState *d)
@@ -1751,6 +1724,10 @@ static void virtio_pci_realize(PCIDevice *pci_dev, Error **errp)
bool pcie_port = pci_bus_is_express(pci_dev->bus) &&
!pci_bus_is_root(pci_dev->bus);
+ if (!kvm_has_many_ioeventfds()) {
+ proxy->flags &= ~VIRTIO_PCI_FLAG_USE_IOEVENTFD;
+ }
+
/*
* virtio pci bar layout used by default.
* subclasses can re-arrange things if needed.
@@ -2539,10 +2516,7 @@ static void virtio_pci_bus_class_init(ObjectClass *klass, void *data)
k->device_plugged = virtio_pci_device_plugged;
k->device_unplugged = virtio_pci_device_unplugged;
k->query_nvectors = virtio_pci_query_nvectors;
- k->ioeventfd_started = virtio_pci_ioeventfd_started;
- k->ioeventfd_set_started = virtio_pci_ioeventfd_set_started;
- k->ioeventfd_disabled = virtio_pci_ioeventfd_disabled;
- k->ioeventfd_set_disabled = virtio_pci_ioeventfd_set_disabled;
+ k->ioeventfd_enabled = virtio_pci_ioeventfd_enabled;
k->ioeventfd_assign = virtio_pci_ioeventfd_assign;
}
diff --git a/hw/virtio/virtio-pci.h b/hw/virtio/virtio-pci.h
index b4edea6987..b2a996fa83 100644
--- a/hw/virtio/virtio-pci.h
+++ b/hw/virtio/virtio-pci.h
@@ -25,6 +25,8 @@
#include "hw/virtio/virtio-bus.h"
#include "hw/virtio/virtio-input.h"
#include "hw/virtio/virtio-gpu.h"
+#include "hw/virtio/virtio-crypto.h"
+
#ifdef CONFIG_VIRTFS
#include "hw/9pfs/virtio-9p.h"
#endif
@@ -48,6 +50,7 @@ typedef struct VirtIOInputHIDPCI VirtIOInputHIDPCI;
typedef struct VirtIOInputHostPCI VirtIOInputHostPCI;
typedef struct VirtIOGPUPCI VirtIOGPUPCI;
typedef struct VHostVSockPCI VHostVSockPCI;
+typedef struct VirtIOCryptoPCI VirtIOCryptoPCI;
/* virtio-pci-bus */
@@ -158,8 +161,6 @@ struct VirtIOPCIProxy {
uint32_t guest_features[2];
VirtIOPCIQueue vqs[VIRTIO_QUEUE_MAX];
- bool ioeventfd_disabled;
- bool ioeventfd_started;
VirtIOIRQFD *vector_irqfd;
int nvqs_with_notifiers;
VirtioBusState bus;
@@ -352,6 +353,18 @@ struct VHostVSockPCI {
};
#endif
+/*
+ * virtio-crypto-pci: This extends VirtioPCIProxy.
+ */
+#define TYPE_VIRTIO_CRYPTO_PCI "virtio-crypto-pci"
+#define VIRTIO_CRYPTO_PCI(obj) \
+ OBJECT_CHECK(VirtIOCryptoPCI, (obj), TYPE_VIRTIO_CRYPTO_PCI)
+
+struct VirtIOCryptoPCI {
+ VirtIOPCIProxy parent_obj;
+ VirtIOCrypto vdev;
+};
+
/* Virtio ABI version, if we increment this, we break the guest driver. */
#define VIRTIO_PCI_ABI_VERSION 0
diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
index d48d1a98a7..bcbcfe063c 100644
--- a/hw/virtio/virtio.c
+++ b/hw/virtio/virtio.c
@@ -97,7 +97,6 @@ struct VirtQueue
uint16_t vector;
VirtIOHandleOutput handle_output;
VirtIOHandleOutput handle_aio_output;
- bool use_aio;
VirtIODevice *vdev;
EventNotifier guest_notifier;
EventNotifier host_notifier;
@@ -1287,9 +1286,8 @@ void virtio_queue_set_vector(VirtIODevice *vdev, int n, uint16_t vector)
}
}
-static VirtQueue *virtio_add_queue_internal(VirtIODevice *vdev, int queue_size,
- VirtIOHandleOutput handle_output,
- bool use_aio)
+VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
+ VirtIOHandleOutput handle_output)
{
int i;
@@ -1306,28 +1304,10 @@ static VirtQueue *virtio_add_queue_internal(VirtIODevice *vdev, int queue_size,
vdev->vq[i].vring.align = VIRTIO_PCI_VRING_ALIGN;
vdev->vq[i].handle_output = handle_output;
vdev->vq[i].handle_aio_output = NULL;
- vdev->vq[i].use_aio = use_aio;
return &vdev->vq[i];
}
-/* Add a virt queue and mark AIO.
- * An AIO queue will use the AioContext based event interface instead of the
- * default IOHandler and EventNotifier interface.
- */
-VirtQueue *virtio_add_queue_aio(VirtIODevice *vdev, int queue_size,
- VirtIOHandleOutput handle_output)
-{
- return virtio_add_queue_internal(vdev, queue_size, handle_output, true);
-}
-
-/* Add a normal virt queue (on the contrary to the AIO version above. */
-VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
- VirtIOHandleOutput handle_output)
-{
- return virtio_add_queue_internal(vdev, queue_size, handle_output, false);
-}
-
void virtio_del_queue(VirtIODevice *vdev, int n)
{
if (n < 0 || n >= VIRTIO_QUEUE_MAX) {
@@ -1635,6 +1615,10 @@ void virtio_save(VirtIODevice *vdev, QEMUFile *f)
vdc->save(vdev, f);
}
+ if (vdc->vmsd) {
+ vmstate_save_state(f, vdc->vmsd, vdev, NULL);
+ }
+
/* Subsections */
vmstate_save_state(f, &vmstate_virtio, vdev, NULL);
}
@@ -1781,6 +1765,13 @@ int virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id)
}
}
+ if (vdc->vmsd) {
+ ret = vmstate_load_state(f, vdc->vmsd, vdev, version_id);
+ if (ret) {
+ return ret;
+ }
+ }
+
/* Subsections */
ret = vmstate_load_state(f, &vmstate_virtio, vdev, 1);
if (ret) {
@@ -2051,7 +2042,7 @@ void virtio_queue_aio_set_host_notifier_handler(VirtQueue *vq, AioContext *ctx,
}
}
-static void virtio_queue_host_notifier_read(EventNotifier *n)
+void virtio_queue_host_notifier_read(EventNotifier *n)
{
VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
if (event_notifier_test_and_clear(n)) {
@@ -2059,32 +2050,6 @@ static void virtio_queue_host_notifier_read(EventNotifier *n)
}
}
-void virtio_queue_set_host_notifier_fd_handler(VirtQueue *vq, bool assign,
- bool set_handler)
-{
- AioContext *ctx = qemu_get_aio_context();
- if (assign && set_handler) {
- if (vq->use_aio) {
- aio_set_event_notifier(ctx, &vq->host_notifier, true,
- virtio_queue_host_notifier_read);
- } else {
- event_notifier_set_handler(&vq->host_notifier, true,
- virtio_queue_host_notifier_read);
- }
- } else {
- if (vq->use_aio) {
- aio_set_event_notifier(ctx, &vq->host_notifier, true, NULL);
- } else {
- event_notifier_set_handler(&vq->host_notifier, true, NULL);
- }
- }
- if (!assign) {
- /* Test and clear notifier before after disabling event,
- * in case poll callback didn't have time to run. */
- virtio_queue_host_notifier_read(&vq->host_notifier);
- }
-}
-
EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq)
{
return &vq->host_notifier;
@@ -2118,6 +2083,9 @@ static void virtio_device_realize(DeviceState *dev, Error **errp)
VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev);
Error *err = NULL;
+ /* Devices should either use vmsd or the load/save methods */
+ assert(!vdc->vmsd || !vdc->load);
+
if (vdc->realize != NULL) {
vdc->realize(dev, &err);
if (err != NULL) {
@@ -2158,15 +2126,102 @@ static Property virtio_properties[] = {
DEFINE_PROP_END_OF_LIST(),
};
+static int virtio_device_start_ioeventfd_impl(VirtIODevice *vdev)
+{
+ VirtioBusState *qbus = VIRTIO_BUS(qdev_get_parent_bus(DEVICE(vdev)));
+ int n, r, err;
+
+ for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
+ VirtQueue *vq = &vdev->vq[n];
+ if (!virtio_queue_get_num(vdev, n)) {
+ continue;
+ }
+ r = virtio_bus_set_host_notifier(qbus, n, true);
+ if (r < 0) {
+ err = r;
+ goto assign_error;
+ }
+ event_notifier_set_handler(&vq->host_notifier, true,
+ virtio_queue_host_notifier_read);
+ }
+
+ for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
+ /* Kick right away to begin processing requests already in vring */
+ VirtQueue *vq = &vdev->vq[n];
+ if (!vq->vring.num) {
+ continue;
+ }
+ event_notifier_set(&vq->host_notifier);
+ }
+ return 0;
+
+assign_error:
+ while (--n >= 0) {
+ VirtQueue *vq = &vdev->vq[n];
+ if (!virtio_queue_get_num(vdev, n)) {
+ continue;
+ }
+
+ event_notifier_set_handler(&vq->host_notifier, true, NULL);
+ r = virtio_bus_set_host_notifier(qbus, n, false);
+ assert(r >= 0);
+ }
+ return err;
+}
+
+int virtio_device_start_ioeventfd(VirtIODevice *vdev)
+{
+ BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
+ VirtioBusState *vbus = VIRTIO_BUS(qbus);
+
+ return virtio_bus_start_ioeventfd(vbus);
+}
+
+static void virtio_device_stop_ioeventfd_impl(VirtIODevice *vdev)
+{
+ VirtioBusState *qbus = VIRTIO_BUS(qdev_get_parent_bus(DEVICE(vdev)));
+ int n, r;
+
+ for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
+ VirtQueue *vq = &vdev->vq[n];
+
+ if (!virtio_queue_get_num(vdev, n)) {
+ continue;
+ }
+ event_notifier_set_handler(&vq->host_notifier, true, NULL);
+ r = virtio_bus_set_host_notifier(qbus, n, false);
+ assert(r >= 0);
+ }
+}
+
+void virtio_device_stop_ioeventfd(VirtIODevice *vdev)
+{
+ BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
+ VirtioBusState *vbus = VIRTIO_BUS(qbus);
+
+ virtio_bus_stop_ioeventfd(vbus);
+}
+
static void virtio_device_class_init(ObjectClass *klass, void *data)
{
/* Set the default value here. */
+ VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = virtio_device_realize;
dc->unrealize = virtio_device_unrealize;
dc->bus_type = TYPE_VIRTIO_BUS;
dc->props = virtio_properties;
+ vdc->start_ioeventfd = virtio_device_start_ioeventfd_impl;
+ vdc->stop_ioeventfd = virtio_device_stop_ioeventfd_impl;
+}
+
+bool virtio_device_ioeventfd_enabled(VirtIODevice *vdev)
+{
+ BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
+ VirtioBusState *vbus = VIRTIO_BUS(qbus);
+
+ return virtio_bus_ioeventfd_enabled(vbus);
}
static const TypeInfo virtio_device_info = {
diff --git a/include/block/block.h b/include/block/block.h
index b81a3e35ce..49bb0b239a 100644
--- a/include/block/block.h
+++ b/include/block/block.h
@@ -7,16 +7,15 @@
#include "qemu/coroutine.h"
#include "block/accounting.h"
#include "block/dirty-bitmap.h"
+#include "block/blockjob.h"
#include "qapi/qmp/qobject.h"
#include "qapi-types.h"
#include "qemu/hbitmap.h"
/* block.c */
typedef struct BlockDriver BlockDriver;
-typedef struct BlockJob BlockJob;
typedef struct BdrvChild BdrvChild;
typedef struct BdrvChildRole BdrvChildRole;
-typedef struct BlockJobTxn BlockJobTxn;
typedef struct BlockDriverInfo {
/* in bytes, 0 if irrelevant */
diff --git a/include/block/block_int.h b/include/block/block_int.h
index e7ff58419c..b02abbd618 100644
--- a/include/block/block_int.h
+++ b/include/block/block_int.h
@@ -665,8 +665,6 @@ int is_windows_drive(const char *filename);
* the new backing file if the job completes. Ignored if @base is %NULL.
* @speed: The maximum speed, in bytes per second, or 0 for unlimited.
* @on_error: The action to take upon error.
- * @cb: Completion function for the job.
- * @opaque: Opaque pointer value passed to @cb.
* @errp: Error object.
*
* Start a streaming operation on @bs. Clusters that are unallocated
@@ -678,8 +676,7 @@ int is_windows_drive(const char *filename);
*/
void stream_start(const char *job_id, BlockDriverState *bs,
BlockDriverState *base, const char *backing_file_str,
- int64_t speed, BlockdevOnError on_error,
- BlockCompletionFunc *cb, void *opaque, Error **errp);
+ int64_t speed, BlockdevOnError on_error, Error **errp);
/**
* commit_start:
@@ -690,22 +687,22 @@ void stream_start(const char *job_id, BlockDriverState *bs,
* @base: Block device that will be written into, and become the new top.
* @speed: The maximum speed, in bytes per second, or 0 for unlimited.
* @on_error: The action to take upon error.
- * @cb: Completion function for the job.
- * @opaque: Opaque pointer value passed to @cb.
* @backing_file_str: String to use as the backing file in @top's overlay
* @errp: Error object.
*
*/
void commit_start(const char *job_id, BlockDriverState *bs,
BlockDriverState *base, BlockDriverState *top, int64_t speed,
- BlockdevOnError on_error, BlockCompletionFunc *cb,
- void *opaque, const char *backing_file_str, Error **errp);
+ BlockdevOnError on_error, const char *backing_file_str,
+ Error **errp);
/**
* commit_active_start:
* @job_id: The id of the newly-created job, or %NULL to use the
* device name of @bs.
* @bs: Active block device to be committed.
* @base: Block device that will be written into, and become the new top.
+ * @creation_flags: Flags that control the behavior of the Job lifetime.
+ * See @BlockJobCreateFlags
* @speed: The maximum speed, in bytes per second, or 0 for unlimited.
* @on_error: The action to take upon error.
* @cb: Completion function for the job.
@@ -715,8 +712,8 @@ void commit_start(const char *job_id, BlockDriverState *bs,
*
*/
void commit_active_start(const char *job_id, BlockDriverState *bs,
- BlockDriverState *base, int64_t speed,
- BlockdevOnError on_error,
+ BlockDriverState *base, int creation_flags,
+ int64_t speed, BlockdevOnError on_error,
BlockCompletionFunc *cb,
void *opaque, Error **errp, bool auto_complete);
/*
@@ -735,8 +732,6 @@ void commit_active_start(const char *job_id, BlockDriverState *bs,
* @on_source_error: The action to take upon error reading from the source.
* @on_target_error: The action to take upon error writing to the target.
* @unmap: Whether to unmap target where source sectors only contain zeroes.
- * @cb: Completion function for the job.
- * @opaque: Opaque pointer value passed to @cb.
* @errp: Error object.
*
* Start a mirroring operation on @bs. Clusters that are allocated
@@ -750,9 +745,7 @@ void mirror_start(const char *job_id, BlockDriverState *bs,
MirrorSyncMode mode, BlockMirrorBackingMode backing_mode,
BlockdevOnError on_source_error,
BlockdevOnError on_target_error,
- bool unmap,
- BlockCompletionFunc *cb,
- void *opaque, Error **errp);
+ bool unmap, Error **errp);
/*
* backup_start:
@@ -765,6 +758,8 @@ void mirror_start(const char *job_id, BlockDriverState *bs,
* @sync_bitmap: The dirty bitmap if sync_mode is MIRROR_SYNC_MODE_INCREMENTAL.
* @on_source_error: The action to take upon error reading from the source.
* @on_target_error: The action to take upon error writing to the target.
+ * @creation_flags: Flags that control the behavior of the Job lifetime.
+ * See @BlockJobCreateFlags
* @cb: Completion function for the job.
* @opaque: Opaque pointer value passed to @cb.
* @txn: Transaction that this job is part of (may be NULL).
@@ -778,6 +773,7 @@ void backup_start(const char *job_id, BlockDriverState *bs,
bool compress,
BlockdevOnError on_source_error,
BlockdevOnError on_target_error,
+ int creation_flags,
BlockCompletionFunc *cb, void *opaque,
BlockJobTxn *txn, Error **errp);
diff --git a/include/block/blockjob.h b/include/block/blockjob.h
index 4dfb16b43f..356cacf004 100644
--- a/include/block/blockjob.h
+++ b/include/block/blockjob.h
@@ -28,85 +28,15 @@
#include "block/block.h"
-/**
- * BlockJobDriver:
- *
- * A class type for block job driver.
- */
-typedef struct BlockJobDriver {
- /** Derived BlockJob struct size */
- size_t instance_size;
-
- /** String describing the operation, part of query-block-jobs QMP API */
- BlockJobType job_type;
-
- /** Optional callback for job types that support setting a speed limit */
- void (*set_speed)(BlockJob *job, int64_t speed, Error **errp);
-
- /** Optional callback for job types that need to forward I/O status reset */
- void (*iostatus_reset)(BlockJob *job);
-
- /**
- * Optional callback for job types whose completion must be triggered
- * manually.
- */
- void (*complete)(BlockJob *job, Error **errp);
-
- /**
- * If the callback is not NULL, it will be invoked when all the jobs
- * belonging to the same transaction complete; or upon this job's
- * completion if it is not in a transaction. Skipped if NULL.
- *
- * All jobs will complete with a call to either .commit() or .abort() but
- * never both.
- */
- void (*commit)(BlockJob *job);
-
- /**
- * If the callback is not NULL, it will be invoked when any job in the
- * same transaction fails; or upon this job's failure (due to error or
- * cancellation) if it is not in a transaction. Skipped if NULL.
- *
- * All jobs will complete with a call to either .commit() or .abort() but
- * never both.
- */
- void (*abort)(BlockJob *job);
-
- /**
- * If the callback is not NULL, it will be invoked when the job transitions
- * into the paused state. Paused jobs must not perform any asynchronous
- * I/O or event loop activity. This callback is used to quiesce jobs.
- */
- void coroutine_fn (*pause)(BlockJob *job);
-
- /**
- * If the callback is not NULL, it will be invoked when the job transitions
- * out of the paused state. Any asynchronous I/O or event loop activity
- * should be restarted from this callback.
- */
- void coroutine_fn (*resume)(BlockJob *job);
-
- /*
- * If the callback is not NULL, it will be invoked before the job is
- * resumed in a new AioContext. This is the place to move any resources
- * besides job->blk to the new AioContext.
- */
- void (*attached_aio_context)(BlockJob *job, AioContext *new_context);
-
- /*
- * If the callback is not NULL, it will be invoked when the job has to be
- * synchronously cancelled or completed; it should drain BlockDriverStates
- * as required to ensure progress.
- */
- void (*drain)(BlockJob *job);
-} BlockJobDriver;
+typedef struct BlockJobDriver BlockJobDriver;
+typedef struct BlockJobTxn BlockJobTxn;
/**
* BlockJob:
*
* Long-running operation on a BlockDriverState.
*/
-struct BlockJob {
+typedef struct BlockJob {
/** The job type, including the job vtable. */
const BlockJobDriver *driver;
@@ -114,7 +44,7 @@ struct BlockJob {
BlockBackend *blk;
/**
- * The ID of the block job.
+ * The ID of the block job. May be NULL for internal jobs.
*/
char *id;
@@ -208,7 +138,12 @@ struct BlockJob {
/** Non-NULL if this job is part of a transaction */
BlockJobTxn *txn;
QLIST_ENTRY(BlockJob) txn_list;
-};
+} BlockJob;
+
+typedef enum BlockJobCreateFlags {
+ BLOCK_JOB_DEFAULT = 0x00,
+ BLOCK_JOB_INTERNAL = 0x01,
+} BlockJobCreateFlags;
/**
* block_job_next:
@@ -232,30 +167,6 @@ BlockJob *block_job_next(BlockJob *job);
BlockJob *block_job_get(const char *id);
/**
- * block_job_create:
- * @job_id: The id of the newly-created job, or %NULL to have one
- * generated automatically.
- * @job_type: The class object for the newly-created job.
- * @bs: The block
- * @speed: The maximum speed, in bytes per second, or 0 for unlimited.
- * @cb: Completion function for the job.
- * @opaque: Opaque pointer value passed to @cb.
- * @errp: Error object.
- *
- * Create a new long-running block device job and return it. The job
- * will call @cb asynchronously when the job completes. Note that
- * @bs may have been closed at the time the @cb it is called. If
- * this is the case, the job may be reported as either cancelled or
- * completed.
- *
- * This function is not part of the public job interface; it should be
- * called from a wrapper that is specific to the job type.
- */
-void *block_job_create(const char *job_id, const BlockJobDriver *driver,
- BlockDriverState *bs, int64_t speed,
- BlockCompletionFunc *cb, void *opaque, Error **errp);
-
-/**
* block_job_add_bdrv:
* @job: A block job
* @bs: A BlockDriverState that is involved in @job
@@ -267,52 +178,6 @@ void *block_job_create(const char *job_id, const BlockJobDriver *driver,
void block_job_add_bdrv(BlockJob *job, BlockDriverState *bs);
/**
- * block_job_sleep_ns:
- * @job: The job that calls the function.
- * @clock: The clock to sleep on.
- * @ns: How many nanoseconds to stop for.
- *
- * Put the job to sleep (assuming that it wasn't canceled) for @ns
- * nanoseconds. Canceling the job will interrupt the wait immediately.
- */
-void block_job_sleep_ns(BlockJob *job, QEMUClockType type, int64_t ns);
-
-/**
- * block_job_yield:
- * @job: The job that calls the function.
- *
- * Yield the block job coroutine.
- */
-void block_job_yield(BlockJob *job);
-
-/**
- * block_job_ref:
- * @bs: The block device.
- *
- * Grab a reference to the block job. Should be paired with block_job_unref.
- */
-void block_job_ref(BlockJob *job);
-
-/**
- * block_job_unref:
- * @bs: The block device.
- *
- * Release reference to the block job and release resources if it is the last
- * reference.
- */
-void block_job_unref(BlockJob *job);
-
-/**
- * block_job_completed:
- * @job: The job being completed.
- * @ret: The status code.
- *
- * Call the completion function that was registered at creation time, and
- * free @job.
- */
-void block_job_completed(BlockJob *job, int ret);
-
-/**
* block_job_set_speed:
* @job: The job to set the speed for.
* @speed: The new value
@@ -341,29 +206,12 @@ void block_job_cancel(BlockJob *job);
void block_job_complete(BlockJob *job, Error **errp);
/**
- * block_job_is_cancelled:
- * @job: The job being queried.
- *
- * Returns whether the job is scheduled for cancellation.
- */
-bool block_job_is_cancelled(BlockJob *job);
-
-/**
* block_job_query:
* @job: The job to get information about.
*
* Return information about a job.
*/
-BlockJobInfo *block_job_query(BlockJob *job);
-
-/**
- * block_job_pause_point:
- * @job: The job that is ready to pause.
- *
- * Pause now if block_job_pause() has been called. Block jobs that perform
- * lots of I/O must call this between requests so that the job can be paused.
- */
-void coroutine_fn block_job_pause_point(BlockJob *job);
+BlockJobInfo *block_job_query(BlockJob *job, Error **errp);
/**
* block_job_pause:
@@ -374,45 +222,38 @@ void coroutine_fn block_job_pause_point(BlockJob *job);
void block_job_pause(BlockJob *job);
/**
- * block_job_resume:
- * @job: The job to be resumed.
- *
- * Resume the specified job. Must be paired with a preceding block_job_pause.
- */
-void block_job_resume(BlockJob *job);
-
-/**
- * block_job_enter:
- * @job: The job to enter.
+ * block_job_user_pause:
+ * @job: The job to be paused.
*
- * Continue the specified job by entering the coroutine.
+ * Asynchronously pause the specified job.
+ * Do not allow a resume until a matching call to block_job_user_resume.
*/
-void block_job_enter(BlockJob *job);
+void block_job_user_pause(BlockJob *job);
/**
- * block_job_event_cancelled:
- * @job: The job whose information is requested.
+ * block_job_paused:
+ * @job: The job to query.
*
- * Send a BLOCK_JOB_CANCELLED event for the specified job.
+ * Returns true if the job is user-paused.
*/
-void block_job_event_cancelled(BlockJob *job);
+bool block_job_user_paused(BlockJob *job);
/**
- * block_job_ready:
- * @job: The job which is now ready to complete.
- * @msg: Error message. Only present on failure.
+ * block_job_resume:
+ * @job: The job to be resumed.
*
- * Send a BLOCK_JOB_COMPLETED event for the specified job.
+ * Resume the specified job. Must be paired with a preceding block_job_pause.
*/
-void block_job_event_completed(BlockJob *job, const char *msg);
+void block_job_resume(BlockJob *job);
/**
- * block_job_ready:
- * @job: The job which is now ready to complete.
+ * block_job_user_resume:
+ * @job: The job to be resumed.
*
- * Send a BLOCK_JOB_READY event for the specified job.
+ * Resume the specified job.
+ * Must be paired with a preceding block_job_user_pause.
*/
-void block_job_event_ready(BlockJob *job);
+void block_job_user_resume(BlockJob *job);
/**
* block_job_cancel_sync:
@@ -460,37 +301,6 @@ int block_job_complete_sync(BlockJob *job, Error **errp);
void block_job_iostatus_reset(BlockJob *job);
/**
- * block_job_error_action:
- * @job: The job to signal an error for.
- * @on_err: The error action setting.
- * @is_read: Whether the operation was a read.
- * @error: The error that was reported.
- *
- * Report an I/O error for a block job and possibly stop the VM. Return the
- * action that was selected based on @on_err and @error.
- */
-BlockErrorAction block_job_error_action(BlockJob *job, BlockdevOnError on_err,
- int is_read, int error);
-
-typedef void BlockJobDeferToMainLoopFn(BlockJob *job, void *opaque);
-
-/**
- * block_job_defer_to_main_loop:
- * @job: The job
- * @fn: The function to run in the main loop
- * @opaque: The opaque value that is passed to @fn
- *
- * Execute a given function in the main loop with the BlockDriverState
- * AioContext acquired. Block jobs must call bdrv_unref(), bdrv_close(), and
- * anything that uses bdrv_drain_all() in the main loop.
- *
- * The @job AioContext is held while @fn executes.
- */
-void block_job_defer_to_main_loop(BlockJob *job,
- BlockJobDeferToMainLoopFn *fn,
- void *opaque);
-
-/**
* block_job_txn_new:
*
* Allocate and return a new block job transaction. Jobs can be added to the
@@ -525,4 +335,12 @@ void block_job_txn_unref(BlockJobTxn *txn);
*/
void block_job_txn_add_job(BlockJobTxn *txn, BlockJob *job);
+/**
+ * block_job_is_internal:
+ * @job: The job to determine if it is user-visible or not.
+ *
+ * Returns true if the job should not be visible to the management layer.
+ */
+bool block_job_is_internal(BlockJob *job);
+
#endif
diff --git a/include/block/blockjob_int.h b/include/block/blockjob_int.h
new file mode 100644
index 0000000000..40275e4437
--- /dev/null
+++ b/include/block/blockjob_int.h
@@ -0,0 +1,239 @@
+/*
+ * Declarations for long-running block device operations
+ *
+ * Copyright (c) 2011 IBM Corp.
+ * Copyright (c) 2012 Red Hat, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef BLOCKJOB_INT_H
+#define BLOCKJOB_INT_H
+
+#include "block/blockjob.h"
+#include "block/block.h"
+
+/**
+ * BlockJobDriver:
+ *
+ * A class type for block job driver.
+ */
+struct BlockJobDriver {
+ /** Derived BlockJob struct size */
+ size_t instance_size;
+
+ /** String describing the operation, part of query-block-jobs QMP API */
+ BlockJobType job_type;
+
+ /** Optional callback for job types that support setting a speed limit */
+ void (*set_speed)(BlockJob *job, int64_t speed, Error **errp);
+
+ /** Optional callback for job types that need to forward I/O status reset */
+ void (*iostatus_reset)(BlockJob *job);
+
+ /**
+ * Optional callback for job types whose completion must be triggered
+ * manually.
+ */
+ void (*complete)(BlockJob *job, Error **errp);
+
+ /**
+ * If the callback is not NULL, it will be invoked when all the jobs
+ * belonging to the same transaction complete; or upon this job's
+ * completion if it is not in a transaction. Skipped if NULL.
+ *
+ * All jobs will complete with a call to either .commit() or .abort() but
+ * never both.
+ */
+ void (*commit)(BlockJob *job);
+
+ /**
+ * If the callback is not NULL, it will be invoked when any job in the
+ * same transaction fails; or upon this job's failure (due to error or
+ * cancellation) if it is not in a transaction. Skipped if NULL.
+ *
+ * All jobs will complete with a call to either .commit() or .abort() but
+ * never both.
+ */
+ void (*abort)(BlockJob *job);
+
+ /**
+ * If the callback is not NULL, it will be invoked when the job transitions
+ * into the paused state. Paused jobs must not perform any asynchronous
+ * I/O or event loop activity. This callback is used to quiesce jobs.
+ */
+ void coroutine_fn (*pause)(BlockJob *job);
+
+ /**
+ * If the callback is not NULL, it will be invoked when the job transitions
+ * out of the paused state. Any asynchronous I/O or event loop activity
+ * should be restarted from this callback.
+ */
+ void coroutine_fn (*resume)(BlockJob *job);
+
+ /*
+ * If the callback is not NULL, it will be invoked before the job is
+ * resumed in a new AioContext. This is the place to move any resources
+ * besides job->blk to the new AioContext.
+ */
+ void (*attached_aio_context)(BlockJob *job, AioContext *new_context);
+
+ /*
+ * If the callback is not NULL, it will be invoked when the job has to be
+ * synchronously cancelled or completed; it should drain BlockDriverStates
+ * as required to ensure progress.
+ */
+ void (*drain)(BlockJob *job);
+};
+
+/**
+ * block_job_create:
+ * @job_id: The id of the newly-created job, or %NULL to have one
+ * generated automatically.
+ * @job_type: The class object for the newly-created job.
+ * @bs: The block
+ * @speed: The maximum speed, in bytes per second, or 0 for unlimited.
+ * @cb: Completion function for the job.
+ * @opaque: Opaque pointer value passed to @cb.
+ * @errp: Error object.
+ *
+ * Create a new long-running block device job and return it. The job
+ * will call @cb asynchronously when the job completes. Note that
+ * @bs may have been closed at the time the @cb it is called. If
+ * this is the case, the job may be reported as either cancelled or
+ * completed.
+ *
+ * This function is not part of the public job interface; it should be
+ * called from a wrapper that is specific to the job type.
+ */
+void *block_job_create(const char *job_id, const BlockJobDriver *driver,
+ BlockDriverState *bs, int64_t speed, int flags,
+ BlockCompletionFunc *cb, void *opaque, Error **errp);
+
+/**
+ * block_job_sleep_ns:
+ * @job: The job that calls the function.
+ * @clock: The clock to sleep on.
+ * @ns: How many nanoseconds to stop for.
+ *
+ * Put the job to sleep (assuming that it wasn't canceled) for @ns
+ * nanoseconds. Canceling the job will interrupt the wait immediately.
+ */
+void block_job_sleep_ns(BlockJob *job, QEMUClockType type, int64_t ns);
+
+/**
+ * block_job_yield:
+ * @job: The job that calls the function.
+ *
+ * Yield the block job coroutine.
+ */
+void block_job_yield(BlockJob *job);
+
+/**
+ * block_job_ref:
+ * @bs: The block device.
+ *
+ * Grab a reference to the block job. Should be paired with block_job_unref.
+ */
+void block_job_ref(BlockJob *job);
+
+/**
+ * block_job_unref:
+ * @bs: The block device.
+ *
+ * Release reference to the block job and release resources if it is the last
+ * reference.
+ */
+void block_job_unref(BlockJob *job);
+
+/**
+ * block_job_completed:
+ * @job: The job being completed.
+ * @ret: The status code.
+ *
+ * Call the completion function that was registered at creation time, and
+ * free @job.
+ */
+void block_job_completed(BlockJob *job, int ret);
+
+/**
+ * block_job_is_cancelled:
+ * @job: The job being queried.
+ *
+ * Returns whether the job is scheduled for cancellation.
+ */
+bool block_job_is_cancelled(BlockJob *job);
+
+/**
+ * block_job_pause_point:
+ * @job: The job that is ready to pause.
+ *
+ * Pause now if block_job_pause() has been called. Block jobs that perform
+ * lots of I/O must call this between requests so that the job can be paused.
+ */
+void coroutine_fn block_job_pause_point(BlockJob *job);
+
+/**
+ * block_job_enter:
+ * @job: The job to enter.
+ *
+ * Continue the specified job by entering the coroutine.
+ */
+void block_job_enter(BlockJob *job);
+
+/**
+ * block_job_event_ready:
+ * @job: The job which is now ready to be completed.
+ *
+ * Send a BLOCK_JOB_READY event for the specified job.
+ */
+void block_job_event_ready(BlockJob *job);
+
+/**
+ * block_job_error_action:
+ * @job: The job to signal an error for.
+ * @on_err: The error action setting.
+ * @is_read: Whether the operation was a read.
+ * @error: The error that was reported.
+ *
+ * Report an I/O error for a block job and possibly stop the VM. Return the
+ * action that was selected based on @on_err and @error.
+ */
+BlockErrorAction block_job_error_action(BlockJob *job, BlockdevOnError on_err,
+ int is_read, int error);
+
+typedef void BlockJobDeferToMainLoopFn(BlockJob *job, void *opaque);
+
+/**
+ * block_job_defer_to_main_loop:
+ * @job: The job
+ * @fn: The function to run in the main loop
+ * @opaque: The opaque value that is passed to @fn
+ *
+ * Execute a given function in the main loop with the BlockDriverState
+ * AioContext acquired. Block jobs must call bdrv_unref(), bdrv_close(), and
+ * anything that uses bdrv_drain_all() in the main loop.
+ *
+ * The @job AioContext is held while @fn executes.
+ */
+void block_job_defer_to_main_loop(BlockJob *job,
+ BlockJobDeferToMainLoopFn *fn,
+ void *opaque);
+
+#endif
diff --git a/include/hw/acpi/acpi_dev_interface.h b/include/hw/acpi/acpi_dev_interface.h
index da4ef7fbd3..901a4ae876 100644
--- a/include/hw/acpi/acpi_dev_interface.h
+++ b/include/hw/acpi/acpi_dev_interface.h
@@ -10,6 +10,7 @@ typedef enum {
ACPI_PCI_HOTPLUG_STATUS = 2,
ACPI_CPU_HOTPLUG_STATUS = 4,
ACPI_MEMORY_HOTPLUG_STATUS = 8,
+ ACPI_NVDIMM_HOTPLUG_STATUS = 16,
} AcpiEventStatusBits;
#define TYPE_ACPI_DEVICE_IF "acpi-device-interface"
diff --git a/include/hw/hotplug.h b/include/hw/hotplug.h
index c0db869f85..10ca5b6504 100644
--- a/include/hw/hotplug.h
+++ b/include/hw/hotplug.h
@@ -47,6 +47,7 @@ typedef void (*hotplug_fn)(HotplugHandler *plug_handler,
* @parent: Opaque parent interface.
* @pre_plug: pre plug callback called at start of device.realize(true)
* @plug: plug callback called at end of device.realize(true).
+ * @post_pug: post plug callback called after device is successfully plugged.
* @unplug_request: unplug request callback.
* Used as a means to initiate device unplug for devices that
* require asynchronous unplug handling.
@@ -61,6 +62,7 @@ typedef struct HotplugHandlerClass {
/* <public> */
hotplug_fn pre_plug;
hotplug_fn plug;
+ hotplug_fn post_plug;
hotplug_fn unplug_request;
hotplug_fn unplug;
} HotplugHandlerClass;
@@ -83,6 +85,14 @@ void hotplug_handler_pre_plug(HotplugHandler *plug_handler,
DeviceState *plugged_dev,
Error **errp);
+/**
+ * hotplug_handler_post_plug:
+ *
+ * Call #HotplugHandlerClass.post_plug callback of @plug_handler.
+ */
+void hotplug_handler_post_plug(HotplugHandler *plug_handler,
+ DeviceState *plugged_dev,
+ Error **errp);
/**
* hotplug_handler_unplug_request:
diff --git a/include/hw/mem/nvdimm.h b/include/hw/mem/nvdimm.h
index 1cfe9e01c4..33cd421ace 100644
--- a/include/hw/mem/nvdimm.h
+++ b/include/hw/mem/nvdimm.h
@@ -98,12 +98,35 @@ typedef struct NVDIMMClass NVDIMMClass;
#define NVDIMM_ACPI_IO_BASE 0x0a18
#define NVDIMM_ACPI_IO_LEN 4
+/*
+ * The buffer, @fit, saves the FIT info for all the presented NVDIMM
+ * devices which is updated after the NVDIMM device is plugged or
+ * unplugged.
+ *
+ * Rules to use the buffer:
+ * 1) the user should hold the @lock to access the buffer.
+ * 2) mark @dirty whenever the buffer is updated.
+ *
+ * These rules preserve NVDIMM ACPI _FIT method to read incomplete
+ * or obsolete fit info if fit update happens during multiple RFIT
+ * calls.
+ */
+struct NvdimmFitBuffer {
+ QemuMutex lock;
+ GArray *fit;
+ bool dirty;
+};
+typedef struct NvdimmFitBuffer NvdimmFitBuffer;
+
struct AcpiNVDIMMState {
/* detect if NVDIMM support is enabled. */
bool is_enabled;
/* the data of the fw_cfg file NVDIMM_DSM_MEM_FILE. */
GArray *dsm_mem;
+
+ NvdimmFitBuffer fit_buf;
+
/* the IO region used by OSPM to transfer control to QEMU. */
MemoryRegion io_mr;
};
@@ -112,5 +135,7 @@ typedef struct AcpiNVDIMMState AcpiNVDIMMState;
void nvdimm_init_acpi_state(AcpiNVDIMMState *state, MemoryRegion *io,
FWCfgState *fw_cfg, Object *owner);
void nvdimm_build_acpi(GArray *table_offsets, GArray *table_data,
- BIOSLinker *linker, GArray *dsm_dma_arrea);
+ BIOSLinker *linker, AcpiNVDIMMState *state,
+ uint32_t ram_slots);
+void nvdimm_acpi_hotplug(AcpiNVDIMMState *state);
#endif
diff --git a/include/hw/virtio/virtio-bus.h b/include/hw/virtio/virtio-bus.h
index 2e4b67ea50..fdf7fdab81 100644
--- a/include/hw/virtio/virtio-bus.h
+++ b/include/hw/virtio/virtio-bus.h
@@ -70,21 +70,11 @@ typedef struct VirtioBusClass {
void (*device_unplugged)(DeviceState *d);
int (*query_nvectors)(DeviceState *d);
/*
- * ioeventfd handling: if the transport implements ioeventfd_started,
- * it must implement the other ioeventfd callbacks as well
+ * ioeventfd handling: if the transport implements ioeventfd_assign,
+ * it must implement ioeventfd_enabled as well.
*/
- /* Returns true if the ioeventfd has been started for the device. */
- bool (*ioeventfd_started)(DeviceState *d);
- /*
- * Sets the 'ioeventfd started' state after the ioeventfd has been
- * started/stopped for the device. err signifies whether an error
- * had occurred.
- */
- void (*ioeventfd_set_started)(DeviceState *d, bool started, bool err);
- /* Returns true if the ioeventfd has been disabled for the device. */
- bool (*ioeventfd_disabled)(DeviceState *d);
- /* Sets the 'ioeventfd disabled' state for the device. */
- void (*ioeventfd_set_disabled)(DeviceState *d, bool disabled);
+ /* Returns true if the ioeventfd is enabled for the device. */
+ bool (*ioeventfd_enabled)(DeviceState *d);
/*
* Assigns/deassigns the ioeventfd backing for the transport on
* the device for queue number n. Returns an error value on
@@ -102,6 +92,11 @@ typedef struct VirtioBusClass {
struct VirtioBusState {
BusState parent_obj;
+
+ /*
+ * Set if ioeventfd has been started.
+ */
+ bool ioeventfd_started;
};
void virtio_bus_device_plugged(VirtIODevice *vdev, Error **errp);
@@ -130,8 +125,10 @@ static inline VirtIODevice *virtio_bus_get_device(VirtioBusState *bus)
return (VirtIODevice *)qdev;
}
+/* Return whether the proxy allows ioeventfd. */
+bool virtio_bus_ioeventfd_enabled(VirtioBusState *bus);
/* Start the ioeventfd. */
-void virtio_bus_start_ioeventfd(VirtioBusState *bus);
+int virtio_bus_start_ioeventfd(VirtioBusState *bus);
/* Stop the ioeventfd. */
void virtio_bus_stop_ioeventfd(VirtioBusState *bus);
/* Switch from/to the generic ioeventfd handler */
diff --git a/include/hw/virtio/virtio-crypto.h b/include/hw/virtio/virtio-crypto.h
new file mode 100644
index 0000000000..a00a0bfaba
--- /dev/null
+++ b/include/hw/virtio/virtio-crypto.h
@@ -0,0 +1,101 @@
+/*
+ * Virtio crypto Support
+ *
+ * Copyright (c) 2016 HUAWEI TECHNOLOGIES CO., LTD.
+ *
+ * Authors:
+ * Gonglei <arei.gonglei@huawei.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * (at your option) any later version. See the COPYING file in the
+ * top-level directory.
+ */
+
+#ifndef _QEMU_VIRTIO_CRYPTO_H
+#define _QEMU_VIRTIO_CRYPTO_H
+
+#include "standard-headers/linux/virtio_crypto.h"
+#include "hw/virtio/virtio.h"
+#include "sysemu/iothread.h"
+#include "sysemu/cryptodev.h"
+
+
+#define DEBUG_VIRTIO_CRYPTO 0
+
+#define DPRINTF(fmt, ...) \
+do { \
+ if (DEBUG_VIRTIO_CRYPTO) { \
+ fprintf(stderr, "virtio_crypto: " fmt, ##__VA_ARGS__); \
+ } \
+} while (0)
+
+
+#define TYPE_VIRTIO_CRYPTO "virtio-crypto-device"
+#define VIRTIO_CRYPTO(obj) \
+ OBJECT_CHECK(VirtIOCrypto, (obj), TYPE_VIRTIO_CRYPTO)
+#define VIRTIO_CRYPTO_GET_PARENT_CLASS(obj) \
+ OBJECT_GET_PARENT_CLASS(obj, TYPE_VIRTIO_CRYPTO)
+
+
+typedef struct VirtIOCryptoConf {
+ CryptoDevBackend *cryptodev;
+
+ /* Supported service mask */
+ uint32_t crypto_services;
+
+ /* Detailed algorithms mask */
+ uint32_t cipher_algo_l;
+ uint32_t cipher_algo_h;
+ uint32_t hash_algo;
+ uint32_t mac_algo_l;
+ uint32_t mac_algo_h;
+ uint32_t aead_algo;
+
+ /* Maximum length of cipher key */
+ uint32_t max_cipher_key_len;
+ /* Maximum length of authenticated key */
+ uint32_t max_auth_key_len;
+ /* Maximum size of each crypto request's content */
+ uint64_t max_size;
+} VirtIOCryptoConf;
+
+struct VirtIOCrypto;
+
+typedef struct VirtIOCryptoReq {
+ VirtQueueElement elem;
+ /* flags of operation, such as type of algorithm */
+ uint32_t flags;
+ struct virtio_crypto_inhdr *in;
+ struct iovec *in_iov; /* Head address of dest iovec */
+ unsigned int in_num; /* Number of dest iovec */
+ size_t in_len;
+ VirtQueue *vq;
+ struct VirtIOCrypto *vcrypto;
+ union {
+ CryptoDevBackendSymOpInfo *sym_op_info;
+ } u;
+} VirtIOCryptoReq;
+
+typedef struct VirtIOCryptoQueue {
+ VirtQueue *dataq;
+ QEMUBH *dataq_bh;
+ struct VirtIOCrypto *vcrypto;
+} VirtIOCryptoQueue;
+
+typedef struct VirtIOCrypto {
+ VirtIODevice parent_obj;
+
+ VirtQueue *ctrl_vq;
+ VirtIOCryptoQueue *vqs;
+ VirtIOCryptoConf conf;
+ CryptoDevBackend *cryptodev;
+
+ uint32_t max_queues;
+ uint32_t status;
+
+ int multiqueue;
+ uint32_t curr_queues;
+ size_t config_size;
+} VirtIOCrypto;
+
+#endif /* _QEMU_VIRTIO_CRYPTO_H */
diff --git a/include/hw/virtio/virtio-scsi.h b/include/hw/virtio/virtio-scsi.h
index a1e0cfb449..9fbc7d7475 100644
--- a/include/hw/virtio/virtio-scsi.h
+++ b/include/hw/virtio/virtio-scsi.h
@@ -134,9 +134,9 @@ void virtio_scsi_free_req(VirtIOSCSIReq *req);
void virtio_scsi_push_event(VirtIOSCSI *s, SCSIDevice *dev,
uint32_t event, uint32_t reason);
-void virtio_scsi_set_iothread(VirtIOSCSI *s, IOThread *iothread);
-void virtio_scsi_dataplane_start(VirtIOSCSI *s);
-void virtio_scsi_dataplane_stop(VirtIOSCSI *s);
+void virtio_scsi_dataplane_setup(VirtIOSCSI *s, Error **errp);
+int virtio_scsi_dataplane_start(VirtIODevice *s);
+void virtio_scsi_dataplane_stop(VirtIODevice *s);
void virtio_scsi_dataplane_notify(VirtIODevice *vdev, VirtIOSCSIReq *req);
#endif /* QEMU_VIRTIO_SCSI_H */
diff --git a/include/hw/virtio/virtio.h b/include/hw/virtio/virtio.h
index b913aac455..ac65d6a594 100644
--- a/include/hw/virtio/virtio.h
+++ b/include/hw/virtio/virtio.h
@@ -125,8 +125,14 @@ typedef struct VirtioDeviceClass {
* must mask in frontend instead.
*/
void (*guest_notifier_mask)(VirtIODevice *vdev, int n, bool mask);
+ int (*start_ioeventfd)(VirtIODevice *vdev);
+ void (*stop_ioeventfd)(VirtIODevice *vdev);
+ /* Saving and loading of a device; trying to deprecate save/load
+ * use vmsd for new devices.
+ */
void (*save)(VirtIODevice *vdev, QEMUFile *f);
int (*load)(VirtIODevice *vdev, QEMUFile *f, int version_id);
+ const VMStateDescription *vmsd;
} VirtioDeviceClass;
void virtio_instance_init_common(Object *proxy_obj, void *data,
@@ -146,9 +152,6 @@ typedef void (*VirtIOHandleOutput)(VirtIODevice *, VirtQueue *);
VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
VirtIOHandleOutput handle_output);
-VirtQueue *virtio_add_queue_aio(VirtIODevice *vdev, int queue_size,
- VirtIOHandleOutput handle_output);
-
void virtio_del_queue(VirtIODevice *vdev, int n);
void *virtqueue_alloc_element(size_t sz, unsigned out_num, unsigned in_num);
@@ -265,9 +268,11 @@ uint16_t virtio_get_queue_index(VirtQueue *vq);
EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq);
void virtio_queue_set_guest_notifier_fd_handler(VirtQueue *vq, bool assign,
bool with_irqfd);
+int virtio_device_start_ioeventfd(VirtIODevice *vdev);
+void virtio_device_stop_ioeventfd(VirtIODevice *vdev);
+bool virtio_device_ioeventfd_enabled(VirtIODevice *vdev);
EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq);
-void virtio_queue_set_host_notifier_fd_handler(VirtQueue *vq, bool assign,
- bool set_handler);
+void virtio_queue_host_notifier_read(EventNotifier *n);
void virtio_queue_aio_set_host_notifier_handler(VirtQueue *vq, AioContext *ctx,
void (*fn)(VirtIODevice *,
VirtQueue *));
diff --git a/include/qemu/log.h b/include/qemu/log.h
index 00bf37fc0f..a50e994c21 100644
--- a/include/qemu/log.h
+++ b/include/qemu/log.h
@@ -51,6 +51,22 @@ static inline bool qemu_loglevel_mask(int mask)
return (qemu_loglevel & mask) != 0;
}
+/* Lock output for a series of related logs. Since this is not needed
+ * for a single qemu_log / qemu_log_mask / qemu_log_mask_and_addr, we
+ * assume that qemu_loglevel_mask has already been tested, and that
+ * qemu_loglevel is never set when qemu_logfile is unset.
+ */
+
+static inline void qemu_log_lock(void)
+{
+ qemu_flockfile(qemu_logfile);
+}
+
+static inline void qemu_log_unlock(void)
+{
+ qemu_funlockfile(qemu_logfile);
+}
+
/* Logging functions: */
/* main logging function
diff --git a/include/standard-headers/linux/virtio_crypto.h b/include/standard-headers/linux/virtio_crypto.h
new file mode 100644
index 0000000000..82275a84d8
--- /dev/null
+++ b/include/standard-headers/linux/virtio_crypto.h
@@ -0,0 +1,429 @@
+#ifndef _LINUX_VIRTIO_CRYPTO_H
+#define _LINUX_VIRTIO_CRYPTO_H
+/* This header is BSD licensed so anyone can use the definitions to implement
+ * compatible drivers/servers.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of IBM nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE. */
+
+#include "standard-headers/linux/types.h"
+#include "standard-headers/linux/virtio_config.h"
+#include "standard-headers/linux/virtio_types.h"
+
+
+#define VIRTIO_CRYPTO_SERVICE_CIPHER 0
+#define VIRTIO_CRYPTO_SERVICE_HASH 1
+#define VIRTIO_CRYPTO_SERVICE_MAC 2
+#define VIRTIO_CRYPTO_SERVICE_AEAD 3
+
+#define VIRTIO_CRYPTO_OPCODE(service, op) (((service) << 8) | (op))
+
+struct virtio_crypto_ctrl_header {
+#define VIRTIO_CRYPTO_CIPHER_CREATE_SESSION \
+ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_CIPHER, 0x02)
+#define VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION \
+ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_CIPHER, 0x03)
+#define VIRTIO_CRYPTO_HASH_CREATE_SESSION \
+ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_HASH, 0x02)
+#define VIRTIO_CRYPTO_HASH_DESTROY_SESSION \
+ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_HASH, 0x03)
+#define VIRTIO_CRYPTO_MAC_CREATE_SESSION \
+ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_MAC, 0x02)
+#define VIRTIO_CRYPTO_MAC_DESTROY_SESSION \
+ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_MAC, 0x03)
+#define VIRTIO_CRYPTO_AEAD_CREATE_SESSION \
+ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x02)
+#define VIRTIO_CRYPTO_AEAD_DESTROY_SESSION \
+ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x03)
+ __virtio32 opcode;
+ __virtio32 algo;
+ __virtio32 flag;
+ /* data virtqueue id */
+ __virtio32 queue_id;
+};
+
+struct virtio_crypto_cipher_session_para {
+#define VIRTIO_CRYPTO_NO_CIPHER 0
+#define VIRTIO_CRYPTO_CIPHER_ARC4 1
+#define VIRTIO_CRYPTO_CIPHER_AES_ECB 2
+#define VIRTIO_CRYPTO_CIPHER_AES_CBC 3
+#define VIRTIO_CRYPTO_CIPHER_AES_CTR 4
+#define VIRTIO_CRYPTO_CIPHER_DES_ECB 5
+#define VIRTIO_CRYPTO_CIPHER_DES_CBC 6
+#define VIRTIO_CRYPTO_CIPHER_3DES_ECB 7
+#define VIRTIO_CRYPTO_CIPHER_3DES_CBC 8
+#define VIRTIO_CRYPTO_CIPHER_3DES_CTR 9
+#define VIRTIO_CRYPTO_CIPHER_KASUMI_F8 10
+#define VIRTIO_CRYPTO_CIPHER_SNOW3G_UEA2 11
+#define VIRTIO_CRYPTO_CIPHER_AES_F8 12
+#define VIRTIO_CRYPTO_CIPHER_AES_XTS 13
+#define VIRTIO_CRYPTO_CIPHER_ZUC_EEA3 14
+ __virtio32 algo;
+ /* length of key */
+ __virtio32 keylen;
+
+#define VIRTIO_CRYPTO_OP_ENCRYPT 1
+#define VIRTIO_CRYPTO_OP_DECRYPT 2
+ /* encrypt or decrypt */
+ __virtio32 op;
+ __virtio32 padding;
+};
+
+struct virtio_crypto_session_input {
+ /* Device-writable part */
+ __virtio64 session_id;
+ __virtio32 status;
+ __virtio32 padding;
+};
+
+struct virtio_crypto_cipher_session_req {
+ struct virtio_crypto_cipher_session_para para;
+};
+
+struct virtio_crypto_hash_session_para {
+#define VIRTIO_CRYPTO_NO_HASH 0
+#define VIRTIO_CRYPTO_HASH_MD5 1
+#define VIRTIO_CRYPTO_HASH_SHA1 2
+#define VIRTIO_CRYPTO_HASH_SHA_224 3
+#define VIRTIO_CRYPTO_HASH_SHA_256 4
+#define VIRTIO_CRYPTO_HASH_SHA_384 5
+#define VIRTIO_CRYPTO_HASH_SHA_512 6
+#define VIRTIO_CRYPTO_HASH_SHA3_224 7
+#define VIRTIO_CRYPTO_HASH_SHA3_256 8
+#define VIRTIO_CRYPTO_HASH_SHA3_384 9
+#define VIRTIO_CRYPTO_HASH_SHA3_512 10
+#define VIRTIO_CRYPTO_HASH_SHA3_SHAKE128 11
+#define VIRTIO_CRYPTO_HASH_SHA3_SHAKE256 12
+ __virtio32 algo;
+ /* hash result length */
+ __virtio32 hash_result_len;
+};
+
+struct virtio_crypto_hash_create_session_req {
+ struct virtio_crypto_hash_session_para para;
+};
+
+struct virtio_crypto_mac_session_para {
+#define VIRTIO_CRYPTO_NO_MAC 0
+#define VIRTIO_CRYPTO_MAC_HMAC_MD5 1
+#define VIRTIO_CRYPTO_MAC_HMAC_SHA1 2
+#define VIRTIO_CRYPTO_MAC_HMAC_SHA_224 3
+#define VIRTIO_CRYPTO_MAC_HMAC_SHA_256 4
+#define VIRTIO_CRYPTO_MAC_HMAC_SHA_384 5
+#define VIRTIO_CRYPTO_MAC_HMAC_SHA_512 6
+#define VIRTIO_CRYPTO_MAC_CMAC_3DES 25
+#define VIRTIO_CRYPTO_MAC_CMAC_AES 26
+#define VIRTIO_CRYPTO_MAC_KASUMI_F9 27
+#define VIRTIO_CRYPTO_MAC_SNOW3G_UIA2 28
+#define VIRTIO_CRYPTO_MAC_GMAC_AES 41
+#define VIRTIO_CRYPTO_MAC_GMAC_TWOFISH 42
+#define VIRTIO_CRYPTO_MAC_CBCMAC_AES 49
+#define VIRTIO_CRYPTO_MAC_CBCMAC_KASUMI_F9 50
+#define VIRTIO_CRYPTO_MAC_XCBC_AES 53
+ __virtio32 algo;
+ /* hash result length */
+ __virtio32 hash_result_len;
+ /* length of authenticated key */
+ __virtio32 auth_key_len;
+ __virtio32 padding;
+};
+
+struct virtio_crypto_mac_create_session_req {
+ struct virtio_crypto_mac_session_para para;
+};
+
+struct virtio_crypto_aead_session_para {
+#define VIRTIO_CRYPTO_NO_AEAD 0
+#define VIRTIO_CRYPTO_AEAD_GCM 1
+#define VIRTIO_CRYPTO_AEAD_CCM 2
+#define VIRTIO_CRYPTO_AEAD_CHACHA20_POLY1305 3
+ __virtio32 algo;
+ /* length of key */
+ __virtio32 key_len;
+ /* digest result length */
+ __virtio32 digest_result_len;
+ /* length of the additional authenticated data (AAD) in bytes */
+ __virtio32 aad_len;
+ /* encrypt or decrypt, See above VIRTIO_CRYPTO_OP_* */
+ __virtio32 op;
+ __virtio32 padding;
+};
+
+struct virtio_crypto_aead_create_session_req {
+ struct virtio_crypto_aead_session_para para;
+};
+
+struct virtio_crypto_alg_chain_session_para {
+#define VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER 1
+#define VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH 2
+ __virtio32 alg_chain_order;
+/* Plain hash */
+#define VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN 1
+/* Authenticated hash (mac) */
+#define VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH 2
+/* Nested hash */
+#define VIRTIO_CRYPTO_SYM_HASH_MODE_NESTED 3
+ __virtio32 hash_mode;
+ struct virtio_crypto_cipher_session_para cipher_param;
+ union {
+ struct virtio_crypto_hash_session_para hash_param;
+ struct virtio_crypto_mac_session_para mac_param;
+ } u;
+ /* length of the additional authenticated data (AAD) in bytes */
+ __virtio32 aad_len;
+ __virtio32 padding;
+};
+
+struct virtio_crypto_alg_chain_session_req {
+ struct virtio_crypto_alg_chain_session_para para;
+};
+
+struct virtio_crypto_sym_create_session_req {
+ union {
+ struct virtio_crypto_cipher_session_req cipher;
+ struct virtio_crypto_alg_chain_session_req chain;
+ } u;
+
+ /* Device-readable part */
+
+/* No operation */
+#define VIRTIO_CRYPTO_SYM_OP_NONE 0
+/* Cipher only operation on the data */
+#define VIRTIO_CRYPTO_SYM_OP_CIPHER 1
+/* Chain any cipher with any hash or mac operation. The order
+ depends on the value of alg_chain_order param */
+#define VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING 2
+ __virtio32 op_type;
+ __virtio32 padding;
+};
+
+struct virtio_crypto_destroy_session_req {
+ /* Device-readable part */
+ __virtio64 session_id;
+};
+
+/* The request of the control viritqueue's packet */
+struct virtio_crypto_op_ctrl_req {
+ struct virtio_crypto_ctrl_header header;
+
+ union {
+ struct virtio_crypto_sym_create_session_req sym_create_session;
+ struct virtio_crypto_hash_create_session_req hash_create_session;
+ struct virtio_crypto_mac_create_session_req mac_create_session;
+ struct virtio_crypto_aead_create_session_req aead_create_session;
+ struct virtio_crypto_destroy_session_req destroy_session;
+ } u;
+};
+
+struct virtio_crypto_op_header {
+#define VIRTIO_CRYPTO_CIPHER_ENCRYPT \
+ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_CIPHER, 0x00)
+#define VIRTIO_CRYPTO_CIPHER_DECRYPT \
+ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_CIPHER, 0x01)
+#define VIRTIO_CRYPTO_HASH \
+ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_HASH, 0x00)
+#define VIRTIO_CRYPTO_MAC \
+ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_MAC, 0x00)
+#define VIRTIO_CRYPTO_AEAD_ENCRYPT \
+ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x00)
+#define VIRTIO_CRYPTO_AEAD_DECRYPT \
+ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x01)
+ __virtio32 opcode;
+ /* algo should be service-specific algorithms */
+ __virtio32 algo;
+ /* session_id should be service-specific algorithms */
+ __virtio64 session_id;
+ /* control flag to control the request */
+ __virtio32 flag;
+ __virtio32 padding;
+};
+
+struct virtio_crypto_cipher_para {
+ /*
+ * Byte Length of valid IV/Counter
+ *
+ * - For block ciphers in CBC or F8 mode, or for Kasumi in F8 mode, or for
+ * SNOW3G in UEA2 mode, this is the length of the IV (which
+ * must be the same as the block length of the cipher).
+ * - For block ciphers in CTR mode, this is the length of the counter
+ * (which must be the same as the block length of the cipher).
+ * - For AES-XTS, this is the 128bit tweak, i, from IEEE Std 1619-2007.
+ *
+ * The IV/Counter will be updated after every partial cryptographic
+ * operation.
+ */
+ __virtio32 iv_len;
+ /* length of source data */
+ __virtio32 src_data_len;
+ /* length of dst data */
+ __virtio32 dst_data_len;
+ __virtio32 padding;
+};
+
+struct virtio_crypto_hash_para {
+ /* length of source data */
+ __virtio32 src_data_len;
+ /* hash result length */
+ __virtio32 hash_result_len;
+};
+
+struct virtio_crypto_mac_para {
+ struct virtio_crypto_hash_para hash;
+};
+
+struct virtio_crypto_aead_para {
+ /*
+ * Byte Length of valid IV data pointed to by the below iv_addr
+ * parameter.
+ *
+ * - For GCM mode, this is either 12 (for 96-bit IVs) or 16, in which
+ * case iv_addr points to J0.
+ * - For CCM mode, this is the length of the nonce, which can be in the
+ * range 7 to 13 inclusive.
+ */
+ __virtio32 iv_len;
+ /* length of additional auth data */
+ __virtio32 aad_len;
+ /* length of source data */
+ __virtio32 src_data_len;
+ /* length of dst data */
+ __virtio32 dst_data_len;
+};
+
+struct virtio_crypto_cipher_data_req {
+ /* Device-readable part */
+ struct virtio_crypto_cipher_para para;
+};
+
+struct virtio_crypto_hash_data_req {
+ /* Device-readable part */
+ struct virtio_crypto_hash_para para;
+};
+
+struct virtio_crypto_mac_data_req {
+ /* Device-readable part */
+ struct virtio_crypto_mac_para para;
+};
+
+struct virtio_crypto_alg_chain_data_para {
+ __virtio32 iv_len;
+ /* Length of source data */
+ __virtio32 src_data_len;
+ /* Length of destination data */
+ __virtio32 dst_data_len;
+ /* Starting point for cipher processing in source data */
+ __virtio32 cipher_start_src_offset;
+ /* Length of the source data that the cipher will be computed on */
+ __virtio32 len_to_cipher;
+ /* Starting point for hash processing in source data */
+ __virtio32 hash_start_src_offset;
+ /* Length of the source data that the hash will be computed on */
+ __virtio32 len_to_hash;
+ /* Length of the additional auth data */
+ __virtio32 aad_len;
+ /* Length of the hash result */
+ __virtio32 hash_result_len;
+ __virtio32 reserved;
+};
+
+struct virtio_crypto_alg_chain_data_req {
+ /* Device-readable part */
+ struct virtio_crypto_alg_chain_data_para para;
+};
+
+struct virtio_crypto_sym_data_req {
+ union {
+ struct virtio_crypto_cipher_data_req cipher;
+ struct virtio_crypto_alg_chain_data_req chain;
+ } u;
+
+ /* See above VIRTIO_CRYPTO_SYM_OP_* */
+ __virtio32 op_type;
+ __virtio32 padding;
+};
+
+struct virtio_crypto_aead_data_req {
+ /* Device-readable part */
+ struct virtio_crypto_aead_para para;
+};
+
+/* The request of the data viritqueue's packet */
+struct virtio_crypto_op_data_req {
+ struct virtio_crypto_op_header header;
+
+ union {
+ struct virtio_crypto_sym_data_req sym_req;
+ struct virtio_crypto_hash_data_req hash_req;
+ struct virtio_crypto_mac_data_req mac_req;
+ struct virtio_crypto_aead_data_req aead_req;
+ } u;
+};
+
+#define VIRTIO_CRYPTO_OK 0
+#define VIRTIO_CRYPTO_ERR 1
+#define VIRTIO_CRYPTO_BADMSG 2
+#define VIRTIO_CRYPTO_NOTSUPP 3
+#define VIRTIO_CRYPTO_INVSESS 4 /* Invaild session id */
+
+/* The accelerator hardware is ready */
+#define VIRTIO_CRYPTO_S_HW_READY (1 << 0)
+#define VIRTIO_CRYPTO_S_STARTED (1 << 1)
+
+struct virtio_crypto_config {
+ /* See VIRTIO_CRYPTO_* above */
+ __virtio32 status;
+
+ /*
+ * Maximum number of data queue legal values are between 1 and 0x8000
+ */
+ __virtio32 max_dataqueues;
+
+ /* Specifies the services mask which the devcie support,
+ see VIRTIO_CRYPTO_SERVICE_* above */
+ __virtio32 crypto_services;
+
+ /* Detailed algorithms mask */
+ __virtio32 cipher_algo_l;
+ __virtio32 cipher_algo_h;
+ __virtio32 hash_algo;
+ __virtio32 mac_algo_l;
+ __virtio32 mac_algo_h;
+ __virtio32 aead_algo;
+
+ /* Maximum length of cipher key */
+ uint32_t max_cipher_key_len;
+ /* Maximum length of authenticated key */
+ uint32_t max_auth_key_len;
+
+ __virtio32 reserve;
+
+ /* The maximum size of per request's content */
+ __virtio64 max_size;
+};
+
+struct virtio_crypto_inhdr {
+ /* See VIRTIO_CRYPTO_* above */
+ uint8_t status;
+};
+
+#endif /* _LINUX_VIRTIO_CRYPTO_H */
diff --git a/include/standard-headers/linux/virtio_ids.h b/include/standard-headers/linux/virtio_ids.h
index 3228d58223..fe74e422d4 100644
--- a/include/standard-headers/linux/virtio_ids.h
+++ b/include/standard-headers/linux/virtio_ids.h
@@ -42,5 +42,5 @@
#define VIRTIO_ID_GPU 16 /* virtio GPU */
#define VIRTIO_ID_INPUT 18 /* virtio input */
#define VIRTIO_ID_VSOCK 19 /* virtio vsock transport */
-
+#define VIRTIO_ID_CRYPTO 20 /* virtio crypto */
#endif /* _LINUX_VIRTIO_IDS_H */
diff --git a/include/sysemu/cryptodev.h b/include/sysemu/cryptodev.h
new file mode 100644
index 0000000000..84526c0d35
--- /dev/null
+++ b/include/sysemu/cryptodev.h
@@ -0,0 +1,298 @@
+/*
+ * QEMU Crypto Device Implementation
+ *
+ * Copyright (c) 2016 HUAWEI TECHNOLOGIES CO., LTD.
+ *
+ * Authors:
+ * Gonglei <arei.gonglei@huawei.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+#ifndef CRYPTODEV_H
+#define CRYPTODEV_H
+
+#include "qom/object.h"
+#include "qemu-common.h"
+
+/**
+ * CryptoDevBackend:
+ *
+ * The CryptoDevBackend object is an interface
+ * for different cryptodev backends, which provides crypto
+ * operation wrapper.
+ *
+ */
+
+#define TYPE_CRYPTODEV_BACKEND "cryptodev-backend"
+
+#define CRYPTODEV_BACKEND(obj) \
+ OBJECT_CHECK(CryptoDevBackend, \
+ (obj), TYPE_CRYPTODEV_BACKEND)
+#define CRYPTODEV_BACKEND_GET_CLASS(obj) \
+ OBJECT_GET_CLASS(CryptoDevBackendClass, \
+ (obj), TYPE_CRYPTODEV_BACKEND)
+#define CRYPTODEV_BACKEND_CLASS(klass) \
+ OBJECT_CLASS_CHECK(CryptoDevBackendClass, \
+ (klass), TYPE_CRYPTODEV_BACKEND)
+
+
+#define MAX_CRYPTO_QUEUE_NUM 64
+
+typedef struct CryptoDevBackendConf CryptoDevBackendConf;
+typedef struct CryptoDevBackendPeers CryptoDevBackendPeers;
+typedef struct CryptoDevBackendClient
+ CryptoDevBackendClient;
+typedef struct CryptoDevBackend CryptoDevBackend;
+
+enum CryptoDevBackendAlgType {
+ CRYPTODEV_BACKEND_ALG_SYM,
+ CRYPTODEV_BACKEND_ALG__MAX,
+};
+
+/**
+ * CryptoDevBackendSymSessionInfo:
+ *
+ * @op_code: operation code (refer to virtio_crypto.h)
+ * @cipher_alg: algorithm type of CIPHER
+ * @key_len: byte length of cipher key
+ * @hash_alg: algorithm type of HASH/MAC
+ * @hash_result_len: byte length of HASH operation result
+ * @auth_key_len: byte length of authenticated key
+ * @add_len: byte length of additional authenticated data
+ * @op_type: operation type (refer to virtio_crypto.h)
+ * @direction: encryption or direction for CIPHER
+ * @hash_mode: HASH mode for HASH operation (refer to virtio_crypto.h)
+ * @alg_chain_order: order of algorithm chaining (CIPHER then HASH,
+ * or HASH then CIPHER)
+ * @cipher_key: point to a key of CIPHER
+ * @auth_key: point to an authenticated key of MAC
+ *
+ */
+typedef struct CryptoDevBackendSymSessionInfo {
+ /* corresponding with virtio crypto spec */
+ uint32_t op_code;
+ uint32_t cipher_alg;
+ uint32_t key_len;
+ uint32_t hash_alg;
+ uint32_t hash_result_len;
+ uint32_t auth_key_len;
+ uint32_t add_len;
+ uint8_t op_type;
+ uint8_t direction;
+ uint8_t hash_mode;
+ uint8_t alg_chain_order;
+ uint8_t *cipher_key;
+ uint8_t *auth_key;
+} CryptoDevBackendSymSessionInfo;
+
+/**
+ * CryptoDevBackendSymOpInfo:
+ *
+ * @session_id: session index which was previously
+ * created by cryptodev_backend_sym_create_session()
+ * @aad_len: byte length of additional authenticated data
+ * @iv_len: byte length of initialization vector or counter
+ * @src_len: byte length of source data
+ * @dst_len: byte length of destination data
+ * @digest_result_len: byte length of hash digest result
+ * @hash_start_src_offset: Starting point for hash processing, specified
+ * as number of bytes from start of packet in source data, only used for
+ * algorithm chain
+ * @cipher_start_src_offset: Starting point for cipher processing, specified
+ * as number of bytes from start of packet in source data, only used for
+ * algorithm chain
+ * @len_to_hash: byte length of source data on which the hash
+ * operation will be computed, only used for algorithm chain
+ * @len_to_cipher: byte length of source data on which the cipher
+ * operation will be computed, only used for algorithm chain
+ * @op_type: operation type (refer to virtio_crypto.h)
+ * @iv: point to the initialization vector or counter
+ * @src: point to the source data
+ * @dst: point to the destination data
+ * @aad_data: point to the additional authenticated data
+ * @digest_result: point to the digest result data
+ * @data[0]: point to the extensional memory by one memory allocation
+ *
+ */
+typedef struct CryptoDevBackendSymOpInfo {
+ uint64_t session_id;
+ uint32_t aad_len;
+ uint32_t iv_len;
+ uint32_t src_len;
+ uint32_t dst_len;
+ uint32_t digest_result_len;
+ uint32_t hash_start_src_offset;
+ uint32_t cipher_start_src_offset;
+ uint32_t len_to_hash;
+ uint32_t len_to_cipher;
+ uint8_t op_type;
+ uint8_t *iv;
+ uint8_t *src;
+ uint8_t *dst;
+ uint8_t *aad_data;
+ uint8_t *digest_result;
+ uint8_t data[0];
+} CryptoDevBackendSymOpInfo;
+
+typedef struct CryptoDevBackendClass {
+ ObjectClass parent_class;
+
+ void (*init)(CryptoDevBackend *backend, Error **errp);
+ void (*cleanup)(CryptoDevBackend *backend, Error **errp);
+
+ int64_t (*create_session)(CryptoDevBackend *backend,
+ CryptoDevBackendSymSessionInfo *sess_info,
+ uint32_t queue_index, Error **errp);
+ int (*close_session)(CryptoDevBackend *backend,
+ uint64_t session_id,
+ uint32_t queue_index, Error **errp);
+ int (*do_sym_op)(CryptoDevBackend *backend,
+ CryptoDevBackendSymOpInfo *op_info,
+ uint32_t queue_index, Error **errp);
+} CryptoDevBackendClass;
+
+
+struct CryptoDevBackendClient {
+ char *model;
+ char *name;
+ char *info_str;
+ unsigned int queue_index;
+ QTAILQ_ENTRY(CryptoDevBackendClient) next;
+};
+
+struct CryptoDevBackendPeers {
+ CryptoDevBackendClient *ccs[MAX_CRYPTO_QUEUE_NUM];
+ uint32_t queues;
+};
+
+struct CryptoDevBackendConf {
+ CryptoDevBackendPeers peers;
+
+ /* Supported service mask */
+ uint32_t crypto_services;
+
+ /* Detailed algorithms mask */
+ uint32_t cipher_algo_l;
+ uint32_t cipher_algo_h;
+ uint32_t hash_algo;
+ uint32_t mac_algo_l;
+ uint32_t mac_algo_h;
+ uint32_t aead_algo;
+ /* Maximum length of cipher key */
+ uint32_t max_cipher_key_len;
+ /* Maximum length of authenticated key */
+ uint32_t max_auth_key_len;
+ /* Maximum size of each crypto request's content */
+ uint64_t max_size;
+};
+
+struct CryptoDevBackend {
+ Object parent_obj;
+
+ bool ready;
+ CryptoDevBackendConf conf;
+};
+
+/**
+ * cryptodev_backend_new_client:
+ * @model: the cryptodev backend model
+ * @name: the cryptodev backend name, can be NULL
+ *
+ * Creates a new cryptodev backend client object
+ * with the @name in the model @model.
+ *
+ * The returned object must be released with
+ * cryptodev_backend_free_client() when no
+ * longer required
+ *
+ * Returns: a new cryptodev backend client object
+ */
+CryptoDevBackendClient *
+cryptodev_backend_new_client(const char *model,
+ const char *name);
+/**
+ * cryptodev_backend_free_client:
+ * @cc: the cryptodev backend client object
+ *
+ * Release the memory associated with @cc that
+ * was previously allocated by cryptodev_backend_new_client()
+ */
+void cryptodev_backend_free_client(
+ CryptoDevBackendClient *cc);
+
+/**
+ * cryptodev_backend_cleanup:
+ * @backend: the cryptodev backend object
+ * @errp: pointer to a NULL-initialized error object
+ *
+ * Clean the resouce associated with @backend that realizaed
+ * by the specific backend's init() callback
+ */
+void cryptodev_backend_cleanup(
+ CryptoDevBackend *backend,
+ Error **errp);
+
+/**
+ * cryptodev_backend_sym_create_session:
+ * @backend: the cryptodev backend object
+ * @sess_info: parameters needed by session creating
+ * @queue_index: queue index of cryptodev backend client
+ * @errp: pointer to a NULL-initialized error object
+ *
+ * Create a session for symmetric algorithms
+ *
+ * Returns: session id on success, or -1 on error
+ */
+int64_t cryptodev_backend_sym_create_session(
+ CryptoDevBackend *backend,
+ CryptoDevBackendSymSessionInfo *sess_info,
+ uint32_t queue_index, Error **errp);
+
+/**
+ * cryptodev_backend_sym_close_session:
+ * @backend: the cryptodev backend object
+ * @session_id: the session id
+ * @queue_index: queue index of cryptodev backend client
+ * @errp: pointer to a NULL-initialized error object
+ *
+ * Close a session for symmetric algorithms which was previously
+ * created by cryptodev_backend_sym_create_session()
+ *
+ * Returns: 0 on success, or Negative on error
+ */
+int cryptodev_backend_sym_close_session(
+ CryptoDevBackend *backend,
+ uint64_t session_id,
+ uint32_t queue_index, Error **errp);
+
+/**
+ * cryptodev_backend_crypto_operation:
+ * @backend: the cryptodev backend object
+ * @opaque: pointer to a VirtIOCryptoReq object
+ * @queue_index: queue index of cryptodev backend client
+ * @errp: pointer to a NULL-initialized error object
+ *
+ * Do crypto operation, such as encryption and
+ * decryption
+ *
+ * Returns: VIRTIO_CRYPTO_OK on success,
+ * or -VIRTIO_CRYPTO_* on error
+ */
+int cryptodev_backend_crypto_operation(
+ CryptoDevBackend *backend,
+ void *opaque,
+ uint32_t queue_index, Error **errp);
+
+#endif /* CRYPTODEV_H */
diff --git a/include/sysemu/os-posix.h b/include/sysemu/os-posix.h
index 3cfedbc28b..b0a6c0695b 100644
--- a/include/sysemu/os-posix.h
+++ b/include/sysemu/os-posix.h
@@ -87,4 +87,16 @@ void *qemu_alloc_stack(size_t *sz);
*/
void qemu_free_stack(void *stack, size_t sz);
+/* POSIX and Mingw32 differ in the name of the stdio lock functions. */
+
+static inline void qemu_flockfile(FILE *f)
+{
+ flockfile(f);
+}
+
+static inline void qemu_funlockfile(FILE *f)
+{
+ funlockfile(f);
+}
+
#endif
diff --git a/include/sysemu/os-win32.h b/include/sysemu/os-win32.h
index 17aad3b20f..ff18b23db1 100644
--- a/include/sysemu/os-win32.h
+++ b/include/sysemu/os-win32.h
@@ -103,6 +103,21 @@ static inline char *realpath(const char *path, char *resolved_path)
return resolved_path;
}
+/* ??? Mingw appears to export _lock_file and _unlock_file as the functions
+ * with which to lock a stdio handle. But something is wrong in the markup,
+ * either in the header or the library, such that we get undefined references
+ * to "_imp___lock_file" etc when linking. Since we seem to have no other
+ * alternative, and the usage within the logging functions isn't critical,
+ * ignore FILE locking.
+ */
+
+static inline void qemu_flockfile(FILE *f)
+{
+}
+
+static inline void qemu_funlockfile(FILE *f)
+{
+}
/* We wrap all the sockets functions so that we can
* set errno based on WSAGetLastError()
diff --git a/qapi/block-core.json b/qapi/block-core.json
index 5af040b740..bcd3b9effe 100644
--- a/qapi/block-core.json
+++ b/qapi/block-core.json
@@ -2197,7 +2197,7 @@
#
# @debug-level: #optional libgfapi log level (default '4' which is Error)
#
-# @logfile: #optional libgfapi log file (default /dev/stderr)
+# @logfile: #optional libgfapi log file (default /dev/stderr) (Since 2.8)
#
# Since: 2.7
##
diff --git a/qemu-img.c b/qemu-img.c
index ac7f40d91a..6949b73ca5 100644
--- a/qemu-img.c
+++ b/qemu-img.c
@@ -933,8 +933,9 @@ static int img_commit(int argc, char **argv)
aio_context = bdrv_get_aio_context(bs);
aio_context_acquire(aio_context);
- commit_active_start("commit", bs, base_bs, 0, BLOCKDEV_ON_ERROR_REPORT,
- common_block_job_cb, &cbi, &local_err, false);
+ commit_active_start("commit", bs, base_bs, BLOCK_JOB_DEFAULT, 0,
+ BLOCKDEV_ON_ERROR_REPORT, common_block_job_cb, &cbi,
+ &local_err, false);
aio_context_release(aio_context);
if (local_err) {
goto done;
diff --git a/qemu-options.hx b/qemu-options.hx
index 95332cc05b..4536e18ac0 100644
--- a/qemu-options.hx
+++ b/qemu-options.hx
@@ -3948,6 +3948,24 @@ secondary:
If you want to know the detail of above command line, you can read
the colo-compare git log.
+@item -object cryptodev-backend-builtin,id=@var{id}[,queues=@var{queues}]
+
+Creates a cryptodev backend which executes crypto opreation from
+the QEMU cipher APIS. The @var{id} parameter is
+a unique ID that will be used to reference this cryptodev backend from
+the @option{virtio-crypto} device. The @var{queues} parameter is optional,
+which specify the queue number of cryptodev backend, the default of
+@var{queues} is 1.
+
+@example
+
+ # qemu-system-x86_64 \
+ [...] \
+ -object cryptodev-backend-builtin,id=cryptodev0 \
+ -device virtio-crypto-pci,id=crypto0,cryptodev=cryptodev0 \
+ [...]
+@end example
+
@item -object secret,id=@var{id},data=@var{string},format=@var{raw|base64}[,keyid=@var{secretid},iv=@var{string}]
@item -object secret,id=@var{id},file=@var{filename},format=@var{raw|base64}[,keyid=@var{secretid},iv=@var{string}]
diff --git a/target-alpha/translate.c b/target-alpha/translate.c
index 03e47765ed..114927b751 100644
--- a/target-alpha/translate.c
+++ b/target-alpha/translate.c
@@ -2994,9 +2994,11 @@ void gen_intermediate_code(CPUAlphaState *env, struct TranslationBlock *tb)
#ifdef DEBUG_DISAS
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
&& qemu_log_in_addr_range(pc_start)) {
+ qemu_log_lock();
qemu_log("IN: %s\n", lookup_symbol(pc_start));
log_target_disas(cs, pc_start, ctx.pc - pc_start, 1);
qemu_log("\n");
+ qemu_log_unlock();
}
#endif
}
diff --git a/target-arm/translate-a64.c b/target-arm/translate-a64.c
index ded924a0a9..de48747376 100644
--- a/target-arm/translate-a64.c
+++ b/target-arm/translate-a64.c
@@ -11420,11 +11420,13 @@ done_generating:
#ifdef DEBUG_DISAS
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM) &&
qemu_log_in_addr_range(pc_start)) {
+ qemu_log_lock();
qemu_log("----------------\n");
qemu_log("IN: %s\n", lookup_symbol(pc_start));
log_target_disas(cs, pc_start, dc->pc - pc_start,
4 | (bswap_code(dc->sctlr_b) ? 2 : 0));
qemu_log("\n");
+ qemu_log_unlock();
}
#endif
tb->size = dc->pc - pc_start;
diff --git a/target-arm/translate.c b/target-arm/translate.c
index 718f7d06f3..0ad9070b45 100644
--- a/target-arm/translate.c
+++ b/target-arm/translate.c
@@ -11963,11 +11963,13 @@ done_generating:
#ifdef DEBUG_DISAS
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM) &&
qemu_log_in_addr_range(pc_start)) {
+ qemu_log_lock();
qemu_log("----------------\n");
qemu_log("IN: %s\n", lookup_symbol(pc_start));
log_target_disas(cs, pc_start, dc->pc - pc_start,
dc->thumb | (dc->sctlr_b << 1));
qemu_log("\n");
+ qemu_log_unlock();
}
#endif
tb->size = dc->pc - pc_start;
diff --git a/target-cris/translate.c b/target-cris/translate.c
index b5ab0a5fb2..b91042743f 100644
--- a/target-cris/translate.c
+++ b/target-cris/translate.c
@@ -3135,29 +3135,6 @@ void gen_intermediate_code(CPUCRISState *env, struct TranslationBlock *tb)
dc->cpustate_changed = 0;
- if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
- qemu_log(
- "pc=%x %x flg=%" PRIx64 " bt=%x ds=%u ccs=%x\n"
- "pid=%x usp=%x\n"
- "%x.%x.%x.%x\n"
- "%x.%x.%x.%x\n"
- "%x.%x.%x.%x\n"
- "%x.%x.%x.%x\n",
- dc->pc, dc->ppc,
- (uint64_t)tb->flags,
- env->btarget, (unsigned)tb->flags & 7,
- env->pregs[PR_CCS],
- env->pregs[PR_PID], env->pregs[PR_USP],
- env->regs[0], env->regs[1], env->regs[2], env->regs[3],
- env->regs[4], env->regs[5], env->regs[6], env->regs[7],
- env->regs[8], env->regs[9],
- env->regs[10], env->regs[11],
- env->regs[12], env->regs[13],
- env->regs[14], env->regs[15]);
- qemu_log("--------------\n");
- qemu_log("IN: %s\n", lookup_symbol(pc_start));
- }
-
next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
num_insns = 0;
max_insns = tb->cflags & CF_COUNT_MASK;
@@ -3313,10 +3290,14 @@ void gen_intermediate_code(CPUCRISState *env, struct TranslationBlock *tb)
#if !DISAS_CRIS
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
&& qemu_log_in_addr_range(pc_start)) {
+ qemu_log_lock();
+ qemu_log("--------------\n");
+ qemu_log("IN: %s\n", lookup_symbol(pc_start));
log_target_disas(cs, pc_start, dc->pc - pc_start,
env->pregs[PR_VR]);
qemu_log("\nisize=%d osize=%d\n",
dc->pc - pc_start, tcg_op_buf_count());
+ qemu_log_unlock();
}
#endif
#endif
diff --git a/target-i386/translate.c b/target-i386/translate.c
index 927b366534..324103c885 100644
--- a/target-i386/translate.c
+++ b/target-i386/translate.c
@@ -2432,11 +2432,13 @@ static void gen_unknown_opcode(CPUX86State *env, DisasContext *s)
if (qemu_loglevel_mask(LOG_UNIMP)) {
target_ulong pc = s->pc_start, end = s->pc;
+ qemu_log_lock();
qemu_log("ILLOPC: " TARGET_FMT_lx ":", pc);
for (; pc < end; ++pc) {
qemu_log(" %02x", cpu_ldub_code(env, pc));
}
qemu_log("\n");
+ qemu_log_unlock();
}
}
@@ -8470,6 +8472,7 @@ done_generating:
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
&& qemu_log_in_addr_range(pc_start)) {
int disas_flags;
+ qemu_log_lock();
qemu_log("----------------\n");
qemu_log("IN: %s\n", lookup_symbol(pc_start));
#ifdef TARGET_X86_64
@@ -8480,6 +8483,7 @@ done_generating:
disas_flags = !dc->code32;
log_target_disas(cs, pc_start, pc_ptr - pc_start, disas_flags);
qemu_log("\n");
+ qemu_log_unlock();
}
#endif
diff --git a/target-lm32/translate.c b/target-lm32/translate.c
index 842af63a98..692882f447 100644
--- a/target-lm32/translate.c
+++ b/target-lm32/translate.c
@@ -1148,10 +1148,12 @@ void gen_intermediate_code(CPULM32State *env, struct TranslationBlock *tb)
#ifdef DEBUG_DISAS
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
&& qemu_log_in_addr_range(pc_start)) {
+ qemu_log_lock();
qemu_log("\n");
log_target_disas(cs, pc_start, dc->pc - pc_start, 0);
qemu_log("\nisize=%d osize=%d\n",
dc->pc - pc_start, tcg_op_buf_count());
+ qemu_log_unlock();
}
#endif
}
diff --git a/target-m68k/translate.c b/target-m68k/translate.c
index ee0ffe3e07..9ad974f86a 100644
--- a/target-m68k/translate.c
+++ b/target-m68k/translate.c
@@ -3549,10 +3549,12 @@ void gen_intermediate_code(CPUM68KState *env, TranslationBlock *tb)
#ifdef DEBUG_DISAS
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
&& qemu_log_in_addr_range(pc_start)) {
+ qemu_log_lock();
qemu_log("----------------\n");
qemu_log("IN: %s\n", lookup_symbol(pc_start));
log_target_disas(cs, pc_start, dc->pc - pc_start, 0);
qemu_log("\n");
+ qemu_log_unlock();
}
#endif
tb->size = dc->pc - pc_start;
diff --git a/target-microblaze/translate.c b/target-microblaze/translate.c
index 80098ece15..de2090ac71 100644
--- a/target-microblaze/translate.c
+++ b/target-microblaze/translate.c
@@ -581,50 +581,10 @@ static void dec_msr(DisasContext *dc)
}
}
-/* 64-bit signed mul, lower result in d and upper in d2. */
-static void t_gen_muls(TCGv d, TCGv d2, TCGv a, TCGv b)
-{
- TCGv_i64 t0, t1;
-
- t0 = tcg_temp_new_i64();
- t1 = tcg_temp_new_i64();
-
- tcg_gen_ext_i32_i64(t0, a);
- tcg_gen_ext_i32_i64(t1, b);
- tcg_gen_mul_i64(t0, t0, t1);
-
- tcg_gen_extrl_i64_i32(d, t0);
- tcg_gen_shri_i64(t0, t0, 32);
- tcg_gen_extrl_i64_i32(d2, t0);
-
- tcg_temp_free_i64(t0);
- tcg_temp_free_i64(t1);
-}
-
-/* 64-bit unsigned muls, lower result in d and upper in d2. */
-static void t_gen_mulu(TCGv d, TCGv d2, TCGv a, TCGv b)
-{
- TCGv_i64 t0, t1;
-
- t0 = tcg_temp_new_i64();
- t1 = tcg_temp_new_i64();
-
- tcg_gen_extu_i32_i64(t0, a);
- tcg_gen_extu_i32_i64(t1, b);
- tcg_gen_mul_i64(t0, t0, t1);
-
- tcg_gen_extrl_i64_i32(d, t0);
- tcg_gen_shri_i64(t0, t0, 32);
- tcg_gen_extrl_i64_i32(d2, t0);
-
- tcg_temp_free_i64(t0);
- tcg_temp_free_i64(t1);
-}
-
/* Multiplier unit. */
static void dec_mul(DisasContext *dc)
{
- TCGv d[2];
+ TCGv tmp;
unsigned int subcode;
if ((dc->tb_flags & MSR_EE_FLAG)
@@ -636,13 +596,11 @@ static void dec_mul(DisasContext *dc)
}
subcode = dc->imm & 3;
- d[0] = tcg_temp_new();
- d[1] = tcg_temp_new();
if (dc->type_b) {
LOG_DIS("muli r%d r%d %x\n", dc->rd, dc->ra, dc->imm);
- t_gen_mulu(cpu_R[dc->rd], d[1], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
- goto done;
+ tcg_gen_mul_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
+ return;
}
/* mulh, mulhsu and mulhu are not available if C_USE_HW_MUL is < 2. */
@@ -651,30 +609,29 @@ static void dec_mul(DisasContext *dc)
/* nop??? */
}
+ tmp = tcg_temp_new();
switch (subcode) {
case 0:
LOG_DIS("mul r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
- t_gen_mulu(cpu_R[dc->rd], d[1], cpu_R[dc->ra], cpu_R[dc->rb]);
+ tcg_gen_mul_tl(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
break;
case 1:
LOG_DIS("mulh r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
- t_gen_muls(d[0], cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
+ tcg_gen_muls2_tl(tmp, cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
break;
case 2:
LOG_DIS("mulhsu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
- t_gen_muls(d[0], cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
+ tcg_gen_mulsu2_tl(tmp, cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
break;
case 3:
LOG_DIS("mulhu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
- t_gen_mulu(d[0], cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
+ tcg_gen_mulu2_tl(tmp, cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
break;
default:
cpu_abort(CPU(dc->cpu), "unknown MUL insn %x\n", subcode);
break;
}
-done:
- tcg_temp_free(d[0]);
- tcg_temp_free(d[1]);
+ tcg_temp_free(tmp);
}
/* Div unit. */
@@ -1670,13 +1627,6 @@ void gen_intermediate_code(CPUMBState *env, struct TranslationBlock *tb)
cpu_abort(cs, "Microblaze: unaligned PC=%x\n", pc_start);
}
- if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
-#if !SIM_COMPAT
- qemu_log("--------------\n");
- log_cpu_state(CPU(cpu), 0);
-#endif
- }
-
next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
num_insns = 0;
max_insns = tb->cflags & CF_COUNT_MASK;
@@ -1820,12 +1770,14 @@ void gen_intermediate_code(CPUMBState *env, struct TranslationBlock *tb)
#if !SIM_COMPAT
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
&& qemu_log_in_addr_range(pc_start)) {
- qemu_log("\n");
+ qemu_log_lock();
+ qemu_log("--------------\n");
#if DISAS_GNU
log_target_disas(cs, pc_start, dc->pc - pc_start, 0);
#endif
qemu_log("\nisize=%d osize=%d\n",
dc->pc - pc_start, tcg_op_buf_count());
+ qemu_log_unlock();
}
#endif
#endif
diff --git a/target-mips/translate.c b/target-mips/translate.c
index 55c2ca0c7b..d8dde7a2f5 100644
--- a/target-mips/translate.c
+++ b/target-mips/translate.c
@@ -20043,9 +20043,11 @@ done_generating:
LOG_DISAS("\n");
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
&& qemu_log_in_addr_range(pc_start)) {
+ qemu_log_lock();
qemu_log("IN: %s\n", lookup_symbol(pc_start));
log_target_disas(cs, pc_start, ctx.pc - pc_start, 0);
qemu_log("\n");
+ qemu_log_unlock();
}
#endif
}
diff --git a/target-openrisc/translate.c b/target-openrisc/translate.c
index 28c944657c..229361aed1 100644
--- a/target-openrisc/translate.c
+++ b/target-openrisc/translate.c
@@ -1651,10 +1651,6 @@ void gen_intermediate_code(CPUOpenRISCState *env, struct TranslationBlock *tb)
dc->synced_flags = dc->tb_flags = tb->flags;
dc->delayed_branch = !!(dc->tb_flags & D_FLAG);
dc->singlestep_enabled = cs->singlestep_enabled;
- if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
- qemu_log("-----------------------------------------\n");
- log_cpu_state(CPU(cpu), 0);
- }
next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
num_insns = 0;
@@ -1754,10 +1750,13 @@ void gen_intermediate_code(CPUOpenRISCState *env, struct TranslationBlock *tb)
#ifdef DEBUG_DISAS
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
&& qemu_log_in_addr_range(pc_start)) {
- qemu_log("\n");
+ qemu_log_lock();
+ qemu_log("----------------\n");
+ qemu_log("IN: %s\n", lookup_symbol(pc_start));
log_target_disas(cs, pc_start, dc->pc - pc_start, 0);
qemu_log("\nisize=%d osize=%d\n",
dc->pc - pc_start, tcg_op_buf_count());
+ qemu_log_unlock();
}
#endif
}
diff --git a/target-ppc/translate.c b/target-ppc/translate.c
index 43505a936c..54f35e9904 100644
--- a/target-ppc/translate.c
+++ b/target-ppc/translate.c
@@ -7211,9 +7211,11 @@ void gen_intermediate_code(CPUPPCState *env, struct TranslationBlock *tb)
int flags;
flags = env->bfd_mach;
flags |= ctx.le_mode << 16;
+ qemu_log_lock();
qemu_log("IN: %s\n", lookup_symbol(pc_start));
log_target_disas(cs, pc_start, ctx.nip - pc_start, flags);
qemu_log("\n");
+ qemu_log_unlock();
}
#endif
}
diff --git a/target-s390x/translate.c b/target-s390x/translate.c
index 1a07d70b21..02bc7058fd 100644
--- a/target-s390x/translate.c
+++ b/target-s390x/translate.c
@@ -5432,9 +5432,11 @@ void gen_intermediate_code(CPUS390XState *env, struct TranslationBlock *tb)
#if defined(S390X_DEBUG_DISAS)
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
&& qemu_log_in_addr_range(pc_start)) {
+ qemu_log_lock();
qemu_log("IN: %s\n", lookup_symbol(pc_start));
log_target_disas(cs, pc_start, dc.pc - pc_start, 1);
qemu_log("\n");
+ qemu_log_unlock();
}
#endif
}
diff --git a/target-sh4/translate.c b/target-sh4/translate.c
index ca80cf70ca..c89a14733f 100644
--- a/target-sh4/translate.c
+++ b/target-sh4/translate.c
@@ -1927,9 +1927,11 @@ void gen_intermediate_code(CPUSH4State * env, struct TranslationBlock *tb)
#ifdef DEBUG_DISAS
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
&& qemu_log_in_addr_range(pc_start)) {
+ qemu_log_lock();
qemu_log("IN:\n"); /* , lookup_symbol(pc_start)); */
log_target_disas(cs, pc_start, ctx.pc - pc_start, 0);
qemu_log("\n");
+ qemu_log_unlock();
}
#endif
}
diff --git a/target-sparc/translate.c b/target-sparc/translate.c
index a13b76ebd9..2205f89837 100644
--- a/target-sparc/translate.c
+++ b/target-sparc/translate.c
@@ -5796,10 +5796,12 @@ void gen_intermediate_code(CPUSPARCState * env, TranslationBlock * tb)
#ifdef DEBUG_DISAS
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
&& qemu_log_in_addr_range(pc_start)) {
+ qemu_log_lock();
qemu_log("--------------\n");
qemu_log("IN: %s\n", lookup_symbol(pc_start));
log_target_disas(cs, pc_start, last_pc + 4 - pc_start, 0);
qemu_log("\n");
+ qemu_log_unlock();
}
#endif
}
diff --git a/target-tilegx/translate.c b/target-tilegx/translate.c
index 11c9732389..9c734eeba3 100644
--- a/target-tilegx/translate.c
+++ b/target-tilegx/translate.c
@@ -2391,6 +2391,7 @@ void gen_intermediate_code(CPUTLGState *env, struct TranslationBlock *tb)
TCGV_UNUSED_I64(dc->zero);
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
+ qemu_log_lock();
qemu_log("IN: %s\n", lookup_symbol(pc_start));
}
if (!max_insns) {
@@ -2429,7 +2430,10 @@ void gen_intermediate_code(CPUTLGState *env, struct TranslationBlock *tb)
tb->size = dc->pc - pc_start;
tb->icount = num_insns;
- qemu_log_mask(CPU_LOG_TB_IN_ASM, "\n");
+ if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
+ qemu_log("\n");
+ qemu_log_unlock();
+ }
}
void restore_state_to_opc(CPUTLGState *env, TranslationBlock *tb,
diff --git a/target-tricore/translate.c b/target-tricore/translate.c
index 9a50df9a88..36f734a662 100644
--- a/target-tricore/translate.c
+++ b/target-tricore/translate.c
@@ -8789,9 +8789,11 @@ void gen_intermediate_code(CPUTriCoreState *env, struct TranslationBlock *tb)
#ifdef DEBUG_DISAS
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
&& qemu_log_in_addr_range(pc_start)) {
+ qemu_log_lock();
qemu_log("IN: %s\n", lookup_symbol(pc_start));
log_target_disas(cs, pc_start, ctx.pc - pc_start, 0);
qemu_log("\n");
+ qemu_log_unlock();
}
#endif
}
diff --git a/target-unicore32/translate.c b/target-unicore32/translate.c
index 09354f92d2..514d460408 100644
--- a/target-unicore32/translate.c
+++ b/target-unicore32/translate.c
@@ -2024,10 +2024,12 @@ done_generating:
#ifdef DEBUG_DISAS
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
&& qemu_log_in_addr_range(pc_start)) {
+ qemu_log_lock();
qemu_log("----------------\n");
qemu_log("IN: %s\n", lookup_symbol(pc_start));
log_target_disas(cs, pc_start, dc->pc - pc_start, 0);
qemu_log("\n");
+ qemu_log_unlock();
}
#endif
tb->size = dc->pc - pc_start;
diff --git a/target-xtensa/translate.c b/target-xtensa/translate.c
index fb0fa56f1e..0858c296ea 100644
--- a/target-xtensa/translate.c
+++ b/target-xtensa/translate.c
@@ -3155,10 +3155,12 @@ void gen_intermediate_code(CPUXtensaState *env, TranslationBlock *tb)
#ifdef DEBUG_DISAS
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
&& qemu_log_in_addr_range(pc_start)) {
+ qemu_log_lock();
qemu_log("----------------\n");
qemu_log("IN: %s\n", lookup_symbol(pc_start));
log_target_disas(cs, pc_start, dc.pc - pc_start, 0);
qemu_log("\n");
+ qemu_log_unlock();
}
#endif
tb->size = dc.pc - pc_start;
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
index bb2bfeef3c..6e2fb3522f 100644
--- a/tcg/tcg-op.c
+++ b/tcg/tcg-op.c
@@ -678,6 +678,33 @@ void tcg_gen_muls2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2)
}
}
+void tcg_gen_mulsu2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2)
+{
+ if (TCG_TARGET_REG_BITS == 32) {
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ TCGv_i32 t1 = tcg_temp_new_i32();
+ TCGv_i32 t2 = tcg_temp_new_i32();
+ tcg_gen_mulu2_i32(t0, t1, arg1, arg2);
+ /* Adjust for negative input for the signed arg1. */
+ tcg_gen_sari_i32(t2, arg1, 31);
+ tcg_gen_and_i32(t2, t2, arg2);
+ tcg_gen_sub_i32(rh, t1, t2);
+ tcg_gen_mov_i32(rl, t0);
+ tcg_temp_free_i32(t0);
+ tcg_temp_free_i32(t1);
+ tcg_temp_free_i32(t2);
+ } else {
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ tcg_gen_ext_i32_i64(t0, arg1);
+ tcg_gen_extu_i32_i64(t1, arg2);
+ tcg_gen_mul_i64(t0, t0, t1);
+ tcg_gen_extr_i64_i32(rl, rh, t0);
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+ }
+}
+
void tcg_gen_ext8s_i32(TCGv_i32 ret, TCGv_i32 arg)
{
if (TCG_TARGET_HAS_ext8s_i32) {
@@ -790,7 +817,7 @@ void tcg_gen_ld8u_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset)
void tcg_gen_ld8s_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset)
{
tcg_gen_ld8s_i32(TCGV_LOW(ret), arg2, offset);
- tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_HIGH(ret), 31);
+ tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31);
}
void tcg_gen_ld16u_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset)
@@ -1748,6 +1775,22 @@ void tcg_gen_muls2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2)
}
}
+void tcg_gen_mulsu2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2)
+{
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv_i64 t2 = tcg_temp_new_i64();
+ tcg_gen_mulu2_i64(t0, t1, arg1, arg2);
+ /* Adjust for negative input for the signed arg1. */
+ tcg_gen_sari_i64(t2, arg1, 63);
+ tcg_gen_and_i64(t2, t2, arg2);
+ tcg_gen_sub_i64(rh, t1, t2);
+ tcg_gen_mov_i64(rl, t0);
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+ tcg_temp_free_i64(t2);
+}
+
/* Size changing operations. */
void tcg_gen_extrl_i64_i32(TCGv_i32 ret, TCGv_i64 arg)
diff --git a/tcg/tcg-op.h b/tcg/tcg-op.h
index 89b59e867a..6d044b7c5b 100644
--- a/tcg/tcg-op.h
+++ b/tcg/tcg-op.h
@@ -306,6 +306,7 @@ void tcg_gen_sub2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 al,
TCGv_i32 ah, TCGv_i32 bl, TCGv_i32 bh);
void tcg_gen_mulu2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2);
void tcg_gen_muls2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2);
+void tcg_gen_mulsu2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2);
void tcg_gen_ext8s_i32(TCGv_i32 ret, TCGv_i32 arg);
void tcg_gen_ext16s_i32(TCGv_i32 ret, TCGv_i32 arg);
void tcg_gen_ext8u_i32(TCGv_i32 ret, TCGv_i32 arg);
@@ -482,6 +483,7 @@ void tcg_gen_sub2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 al,
TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh);
void tcg_gen_mulu2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2);
void tcg_gen_muls2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2);
+void tcg_gen_mulsu2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2);
void tcg_gen_not_i64(TCGv_i64 ret, TCGv_i64 arg);
void tcg_gen_ext8s_i64(TCGv_i64 ret, TCGv_i64 arg);
void tcg_gen_ext16s_i64(TCGv_i64 ret, TCGv_i64 arg);
@@ -956,6 +958,7 @@ void tcg_gen_atomic_xor_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp);
#define tcg_gen_sub2_tl tcg_gen_sub2_i64
#define tcg_gen_mulu2_tl tcg_gen_mulu2_i64
#define tcg_gen_muls2_tl tcg_gen_muls2_i64
+#define tcg_gen_mulsu2_tl tcg_gen_mulsu2_i64
#define tcg_gen_atomic_cmpxchg_tl tcg_gen_atomic_cmpxchg_i64
#define tcg_gen_atomic_xchg_tl tcg_gen_atomic_xchg_i64
#define tcg_gen_atomic_fetch_add_tl tcg_gen_atomic_fetch_add_i64
@@ -1043,6 +1046,7 @@ void tcg_gen_atomic_xor_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp);
#define tcg_gen_sub2_tl tcg_gen_sub2_i32
#define tcg_gen_mulu2_tl tcg_gen_mulu2_i32
#define tcg_gen_muls2_tl tcg_gen_muls2_i32
+#define tcg_gen_mulsu2_tl tcg_gen_mulsu2_i32
#define tcg_gen_atomic_cmpxchg_tl tcg_gen_atomic_cmpxchg_i32
#define tcg_gen_atomic_xchg_tl tcg_gen_atomic_xchg_i32
#define tcg_gen_atomic_fetch_add_tl tcg_gen_atomic_fetch_add_i32
diff --git a/tcg/tcg.c b/tcg/tcg.c
index 2d3e498bc2..aabf94f365 100644
--- a/tcg/tcg.c
+++ b/tcg/tcg.c
@@ -412,10 +412,12 @@ void tcg_prologue_init(TCGContext *s)
#ifdef DEBUG_DISAS
if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM)) {
+ qemu_log_lock();
qemu_log("PROLOGUE: [size=%zu]\n", prologue_size);
log_disas(buf0, prologue_size);
qemu_log("\n");
qemu_log_flush();
+ qemu_log_unlock();
}
#endif
}
@@ -2542,9 +2544,11 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb)
#ifdef DEBUG_DISAS
if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP)
&& qemu_log_in_addr_range(tb->pc))) {
+ qemu_log_lock();
qemu_log("OP:\n");
tcg_dump_ops(s);
qemu_log("\n");
+ qemu_log_unlock();
}
#endif
@@ -2570,9 +2574,11 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb)
#ifdef DEBUG_DISAS
if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_IND)
&& qemu_log_in_addr_range(tb->pc))) {
+ qemu_log_lock();
qemu_log("OP before indirect lowering:\n");
tcg_dump_ops(s);
qemu_log("\n");
+ qemu_log_unlock();
}
#endif
/* Replace indirect temps with direct temps. */
@@ -2590,9 +2596,11 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb)
#ifdef DEBUG_DISAS
if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_OPT)
&& qemu_log_in_addr_range(tb->pc))) {
+ qemu_log_lock();
qemu_log("OP after optimization and liveness analysis:\n");
tcg_dump_ops(s);
qemu_log("\n");
+ qemu_log_unlock();
}
#endif
diff --git a/tcg/tcg.h b/tcg/tcg.h
index dc1281fb4e..a35e4c4fd4 100644
--- a/tcg/tcg.h
+++ b/tcg/tcg.h
@@ -376,14 +376,36 @@ static inline unsigned get_alignment_bits(TCGMemOp memop)
typedef tcg_target_ulong TCGArg;
-/* Define a type and accessor macros for variables. Using pointer types
- is nice because it gives some level of type safely. Converting to and
- from intptr_t rather than int reduces the number of sign-extension
- instructions that get implied on 64-bit hosts. Users of tcg_gen_* don't
- need to know about any of this, and should treat TCGv as an opaque type.
- In addition we do typechecking for different types of variables. TCGv_i32
- and TCGv_i64 are 32/64-bit variables respectively. TCGv and TCGv_ptr
- are aliases for target_ulong and host pointer sized values respectively. */
+/* Define type and accessor macros for TCG variables.
+
+ TCG variables are the inputs and outputs of TCG ops, as described
+ in tcg/README. Target CPU front-end code uses these types to deal
+ with TCG variables as it emits TCG code via the tcg_gen_* functions.
+ They come in several flavours:
+ * TCGv_i32 : 32 bit integer type
+ * TCGv_i64 : 64 bit integer type
+ * TCGv_ptr : a host pointer type
+ * TCGv : an integer type the same size as target_ulong
+ (an alias for either TCGv_i32 or TCGv_i64)
+ The compiler's type checking will complain if you mix them
+ up and pass the wrong sized TCGv to a function.
+
+ Users of tcg_gen_* don't need to know about any of the internal
+ details of these, and should treat them as opaque types.
+ You won't be able to look inside them in a debugger either.
+
+ Internal implementation details follow:
+
+ Note that there is no definition of the structs TCGv_i32_d etc anywhere.
+ This is deliberate, because the values we store in variables of type
+ TCGv_i32 are not really pointers-to-structures. They're just small
+ integers, but keeping them in pointer types like this means that the
+ compiler will complain if you accidentally pass a TCGv_i32 to a
+ function which takes a TCGv_i64, and so on. Only the internals of
+ TCG need to care about the actual contents of the types, and they always
+ box and unbox via the MAKE_TCGV_* and GET_TCGV_* functions.
+ Converting to and from intptr_t rather than int reduces the number
+ of sign-extension instructions that get implied on 64-bit hosts. */
typedef struct TCGv_i32_d *TCGv_i32;
typedef struct TCGv_i64_d *TCGv_i64;
diff --git a/tests/ipmi-bt-test.c b/tests/ipmi-bt-test.c
index ad1e97e91e..e84dd6889b 100644
--- a/tests/ipmi-bt-test.c
+++ b/tests/ipmi-bt-test.c
@@ -309,7 +309,7 @@ static void test_connect(void)
uint8_t msg[100];
unsigned int msglen;
static uint8_t exp1[] = { 0xff, 0x01, 0xa1 }; /* A protocol version */
- static uint8_t exp2[] = { 0x08, 0x1f, 0xa1 }; /* A capabilities cmd */
+ static uint8_t exp2[] = { 0x08, 0x3f, 0xa1 }; /* A capabilities cmd */
FD_ZERO(&readfds);
FD_SET(emu_lfd, &readfds);
diff --git a/tests/test-blockjob-txn.c b/tests/test-blockjob-txn.c
index d049cba8a3..f9afc3be41 100644
--- a/tests/test-blockjob-txn.c
+++ b/tests/test-blockjob-txn.c
@@ -13,7 +13,7 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
#include "qemu/main-loop.h"
-#include "block/blockjob.h"
+#include "block/blockjob_int.h"
#include "sysemu/block-backend.h"
typedef struct {
@@ -98,7 +98,8 @@ static BlockJob *test_block_job_start(unsigned int iterations,
bs = bdrv_new();
snprintf(job_id, sizeof(job_id), "job%u", counter++);
s = block_job_create(job_id, &test_block_job_driver, bs, 0,
- test_block_job_cb, data, &error_abort);
+ BLOCK_JOB_DEFAULT, test_block_job_cb,
+ data, &error_abort);
s->iterations = iterations;
s->use_timer = use_timer;
s->rc = rc;
diff --git a/tests/test-blockjob.c b/tests/test-blockjob.c
index 5b0e934a0c..60b78a3342 100644
--- a/tests/test-blockjob.c
+++ b/tests/test-blockjob.c
@@ -13,7 +13,7 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
#include "qemu/main-loop.h"
-#include "block/blockjob.h"
+#include "block/blockjob_int.h"
#include "sysemu/block-backend.h"
static const BlockJobDriver test_block_job_driver = {
@@ -31,7 +31,7 @@ static BlockJob *do_test_id(BlockBackend *blk, const char *id,
Error *errp = NULL;
job = block_job_create(id, &test_block_job_driver, blk_bs(blk), 0,
- block_job_cb, NULL, &errp);
+ BLOCK_JOB_DEFAULT, block_job_cb, NULL, &errp);
if (should_succeed) {
g_assert_null(errp);
g_assert_nonnull(job);
diff --git a/translate-all.c b/translate-all.c
index e6a8b07363..3dd9214904 100644
--- a/translate-all.c
+++ b/translate-all.c
@@ -1355,10 +1355,12 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
#ifdef DEBUG_DISAS
if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) &&
qemu_log_in_addr_range(tb->pc)) {
+ qemu_log_lock();
qemu_log("OUT: [size=%d]\n", gen_code_size);
log_disas(tb->tc_ptr, gen_code_size);
qemu_log("\n");
qemu_log_flush();
+ qemu_log_unlock();
}
#endif