aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDr. David Alan Gilbert <dgilbert@redhat.com>2018-06-13 20:17:51 +0100
committerDr. David Alan Gilbert <dgilbert@redhat.com>2020-01-23 16:41:36 +0000
commite4c55a3c144493b436e40031e2eed61a84eca47b (patch)
tree7c91f7c1a479572a13c5b1d33699769626047365
parentf2cef5fb9ae20136ca18d16328787b69b3abfa18 (diff)
virtiofsd: Start queue threads
Start a thread for each queue when we get notified it's been started. Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com> fix by: Signed-off-by: Jun Piao <piaojun@huawei.com> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Reviewed-by: Daniel P. Berrangé <berrange@redhat.com> Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
-rw-r--r--tools/virtiofsd/fuse_virtio.c89
1 files changed, 89 insertions, 0 deletions
diff --git a/tools/virtiofsd/fuse_virtio.c b/tools/virtiofsd/fuse_virtio.c
index 4819e56568..2a94bb3cca 100644
--- a/tools/virtiofsd/fuse_virtio.c
+++ b/tools/virtiofsd/fuse_virtio.c
@@ -11,6 +11,7 @@
* See the file COPYING.LIB
*/
+#include "qemu/osdep.h"
#include "fuse_virtio.h"
#include "fuse_i.h"
#include "standard-headers/linux/fuse.h"
@@ -30,6 +31,15 @@
#include "contrib/libvhost-user/libvhost-user.h"
+struct fv_QueueInfo {
+ pthread_t thread;
+ struct fv_VuDev *virtio_dev;
+
+ /* Our queue index, corresponds to array position */
+ int qidx;
+ int kick_fd;
+};
+
/*
* We pass the dev element into libvhost-user
* and then use it to get back to the outer
@@ -38,6 +48,13 @@
struct fv_VuDev {
VuDev dev;
struct fuse_session *se;
+
+ /*
+ * The following pair of fields are only accessed in the main
+ * virtio_loop
+ */
+ size_t nqueues;
+ struct fv_QueueInfo **qi;
};
/* From spec */
@@ -83,6 +100,75 @@ static void fv_panic(VuDev *dev, const char *err)
exit(EXIT_FAILURE);
}
+static void *fv_queue_thread(void *opaque)
+{
+ struct fv_QueueInfo *qi = opaque;
+ fuse_log(FUSE_LOG_INFO, "%s: Start for queue %d kick_fd %d\n", __func__,
+ qi->qidx, qi->kick_fd);
+ while (1) {
+ /* TODO */
+ }
+
+ return NULL;
+}
+
+/* Callback from libvhost-user on start or stop of a queue */
+static void fv_queue_set_started(VuDev *dev, int qidx, bool started)
+{
+ struct fv_VuDev *vud = container_of(dev, struct fv_VuDev, dev);
+ struct fv_QueueInfo *ourqi;
+
+ fuse_log(FUSE_LOG_INFO, "%s: qidx=%d started=%d\n", __func__, qidx,
+ started);
+ assert(qidx >= 0);
+
+ /*
+ * Ignore additional request queues for now. passthrough_ll.c must be
+ * audited for thread-safety issues first. It was written with a
+ * well-behaved client in mind and may not protect against all types of
+ * races yet.
+ */
+ if (qidx > 1) {
+ fuse_log(FUSE_LOG_ERR,
+ "%s: multiple request queues not yet implemented, please only "
+ "configure 1 request queue\n",
+ __func__);
+ exit(EXIT_FAILURE);
+ }
+
+ if (started) {
+ /* Fire up a thread to watch this queue */
+ if (qidx >= vud->nqueues) {
+ vud->qi = realloc(vud->qi, (qidx + 1) * sizeof(vud->qi[0]));
+ assert(vud->qi);
+ memset(vud->qi + vud->nqueues, 0,
+ sizeof(vud->qi[0]) * (1 + (qidx - vud->nqueues)));
+ vud->nqueues = qidx + 1;
+ }
+ if (!vud->qi[qidx]) {
+ vud->qi[qidx] = calloc(sizeof(struct fv_QueueInfo), 1);
+ assert(vud->qi[qidx]);
+ vud->qi[qidx]->virtio_dev = vud;
+ vud->qi[qidx]->qidx = qidx;
+ } else {
+ /* Shouldn't have been started */
+ assert(vud->qi[qidx]->kick_fd == -1);
+ }
+ ourqi = vud->qi[qidx];
+ ourqi->kick_fd = dev->vq[qidx].kick_fd;
+ if (pthread_create(&ourqi->thread, NULL, fv_queue_thread, ourqi)) {
+ fuse_log(FUSE_LOG_ERR, "%s: Failed to create thread for queue %d\n",
+ __func__, qidx);
+ assert(0);
+ }
+ } else {
+ /* TODO: Kill the thread */
+ assert(qidx < vud->nqueues);
+ ourqi = vud->qi[qidx];
+ ourqi->kick_fd = -1;
+ }
+}
+
static bool fv_queue_order(VuDev *dev, int qidx)
{
return false;
@@ -92,6 +178,9 @@ static const VuDevIface fv_iface = {
.get_features = fv_get_features,
.set_features = fv_set_features,
+ /* Don't need process message, we've not got any at vhost-user level */
+ .queue_set_started = fv_queue_set_started,
+
.queue_is_processed_in_order = fv_queue_order,
};