aboutsummaryrefslogtreecommitdiff
path: root/hw/9pfs/9p.c
diff options
context:
space:
mode:
authorAntonios Motakis <antonios.motakis@huawei.com>2019-10-07 17:02:45 +0200
committerGreg Kurz <groug@kaod.org>2019-10-10 11:36:14 +0200
commitf3fe4a2d92bb4ee5b599b8b1eb781b2ae68af36c (patch)
tree18644ef2c8f83406ba6208976a76feb9af906166 /hw/9pfs/9p.c
parent1a6ed33cc56997479bbe5b48337ff8da44585bd4 (diff)
9p: stat_to_qid: implement slow path
stat_to_qid attempts via qid_path_prefixmap to map unique files (which are identified by 64 bit inode nr and 32 bit device id) to a 64 QID path value. However this implementation makes some assumptions about inode number generation on the host. If qid_path_prefixmap fails, we still have 48 bits available in the QID path to fall back to a less memory efficient full mapping. Signed-off-by: Antonios Motakis <antonios.motakis@huawei.com> [CS: - Rebased to https://github.com/gkurz/qemu/commits/9p-next (SHA1 7fc4c49e91). - Updated hash calls to new xxhash API. - Removed unnecessary parantheses in qpf_lookup_func(). - Removed unnecessary g_malloc0() result checks. - Log error message when running out of prefixes in qid_path_fullmap(). - Log warning message about potential degraded performance in qid_path_prefixmap(). - Wrapped qpf_table initialization to dedicated qpf_table_init() function. - Fixed typo in comment. ] Signed-off-by: Christian Schoenebeck <qemu_oss@crudebyte.com> Signed-off-by: Greg Kurz <groug@kaod.org>
Diffstat (limited to 'hw/9pfs/9p.c')
-rw-r--r--hw/9pfs/9p.c74
1 files changed, 67 insertions, 7 deletions
diff --git a/hw/9pfs/9p.c b/hw/9pfs/9p.c
index 8eb89c5c7d..d9be2d45d3 100644
--- a/hw/9pfs/9p.c
+++ b/hw/9pfs/9p.c
@@ -579,23 +579,34 @@ static uint32_t qpp_hash(QppEntry e)
return qemu_xxhash7(e.ino_prefix, e.dev, 0, 0, 0);
}
+static uint32_t qpf_hash(QpfEntry e)
+{
+ return qemu_xxhash7(e.ino, e.dev, 0, 0, 0);
+}
+
static bool qpp_lookup_func(const void *obj, const void *userp)
{
const QppEntry *e1 = obj, *e2 = userp;
return e1->dev == e2->dev && e1->ino_prefix == e2->ino_prefix;
}
-static void qpp_table_remove(void *p, uint32_t h, void *up)
+static bool qpf_lookup_func(const void *obj, const void *userp)
+{
+ const QpfEntry *e1 = obj, *e2 = userp;
+ return e1->dev == e2->dev && e1->ino == e2->ino;
+}
+
+static void qp_table_remove(void *p, uint32_t h, void *up)
{
g_free(p);
}
-static void qpp_table_destroy(struct qht *ht)
+static void qp_table_destroy(struct qht *ht)
{
if (!ht || !ht->map) {
return;
}
- qht_iter(ht, qpp_table_remove, NULL);
+ qht_iter(ht, qp_table_remove, NULL);
qht_destroy(ht);
}
@@ -604,6 +615,50 @@ static void qpp_table_init(struct qht *ht)
qht_init(ht, qpp_lookup_func, 1, QHT_MODE_AUTO_RESIZE);
}
+static void qpf_table_init(struct qht *ht)
+{
+ qht_init(ht, qpf_lookup_func, 1 << 16, QHT_MODE_AUTO_RESIZE);
+}
+
+static int qid_path_fullmap(V9fsPDU *pdu, const struct stat *stbuf,
+ uint64_t *path)
+{
+ QpfEntry lookup = {
+ .dev = stbuf->st_dev,
+ .ino = stbuf->st_ino
+ }, *val;
+ uint32_t hash = qpf_hash(lookup);
+
+ /* most users won't need the fullmap, so init the table lazily */
+ if (!pdu->s->qpf_table.map) {
+ qpf_table_init(&pdu->s->qpf_table);
+ }
+
+ val = qht_lookup(&pdu->s->qpf_table, &lookup, hash);
+
+ if (!val) {
+ if (pdu->s->qp_fullpath_next == 0) {
+ /* no more files can be mapped :'( */
+ error_report_once(
+ "9p: No more prefixes available for remapping inodes from "
+ "host to guest."
+ );
+ return -ENFILE;
+ }
+
+ val = g_malloc0(sizeof(QppEntry));
+ *val = lookup;
+
+ /* new unique inode and device combo */
+ val->path = pdu->s->qp_fullpath_next++;
+ pdu->s->qp_fullpath_next &= QPATH_INO_MASK;
+ qht_insert(&pdu->s->qpf_table, val, hash, NULL);
+ }
+
+ *path = val->path;
+ return 0;
+}
+
/*
* stat_to_qid needs to map inode number (64 bits) and device id (32 bits)
* to a unique QID path (64 bits). To avoid having to map and keep track
@@ -629,9 +684,8 @@ static int qid_path_prefixmap(V9fsPDU *pdu, const struct stat *stbuf,
if (!val) {
if (pdu->s->qp_prefix_next == 0) {
/* we ran out of prefixes */
- error_report_once(
- "9p: No more prefixes available for remapping inodes from "
- "host to guest."
+ warn_report_once(
+ "9p: Potential degraded performance of inode remapping"
);
return -ENFILE;
}
@@ -656,6 +710,10 @@ static int stat_to_qid(V9fsPDU *pdu, const struct stat *stbuf, V9fsQID *qidp)
if (pdu->s->ctx.export_flags & V9FS_REMAP_INODES) {
/* map inode+device to qid path (fast path) */
err = qid_path_prefixmap(pdu, stbuf, &qidp->path);
+ if (err == -ENFILE) {
+ /* fast path didn't work, fall back to full map */
+ err = qid_path_fullmap(pdu, stbuf, &qidp->path);
+ }
if (err) {
return err;
}
@@ -3820,6 +3878,7 @@ int v9fs_device_realize_common(V9fsState *s, const V9fsTransport *t,
qpp_table_init(&s->qpp_table);
s->qp_prefix_next = 1; /* reserve 0 to detect overflow */
+ s->qp_fullpath_next = 1;
s->ctx.fst = &fse->fst;
fsdev_throttle_init(s->ctx.fst);
@@ -3842,7 +3901,8 @@ void v9fs_device_unrealize_common(V9fsState *s, Error **errp)
fsdev_throttle_cleanup(s->ctx.fst);
}
g_free(s->tag);
- qpp_table_destroy(&s->qpp_table);
+ qp_table_destroy(&s->qpp_table);
+ qp_table_destroy(&s->qpf_table);
g_free(s->ctx.fs_root);
}