aboutsummaryrefslogtreecommitdiff
path: root/hw/9pfs/9p.c
diff options
context:
space:
mode:
authorChristian Schoenebeck <qemu_oss@crudebyte.com>2019-10-07 17:02:45 +0200
committerGreg Kurz <groug@kaod.org>2019-10-10 11:36:23 +0200
commit6b6aa8285d7ecc172ed59903bb61f26c19ba2538 (patch)
treed4705bbc262e7c218c60b13fef30361fd07ae776 /hw/9pfs/9p.c
parentf3fe4a2d92bb4ee5b599b8b1eb781b2ae68af36c (diff)
9p: Use variable length suffixes for inode remapping
Use variable length suffixes for inode remapping instead of the fixed 16 bit size prefixes before. With this change the inode numbers on guest will typically be much smaller (e.g. around >2^1 .. >2^7 instead of >2^48 with the previous fixed size inode remapping. Additionally this solution is more efficient, since inode numbers in practice can take almost their entire 64 bit range on guest as well, so there is less likely a need for generating and tracking additional suffixes, which might also be beneficial for nested virtualization where each level of virtualization would shift up the inode bits and increase the chance of expensive remapping actions. The "Exponential Golomb" algorithm is used as basis for generating the variable length suffixes. The algorithm has a parameter k which controls the distribution of bits on increasing indeces (minimum bits at low index vs. maximum bits at high index). With k=0 the generated suffixes look like: Index Dec/Bin -> Generated Suffix Bin 1 [1] -> [1] (1 bits) 2 [10] -> [010] (3 bits) 3 [11] -> [110] (3 bits) 4 [100] -> [00100] (5 bits) 5 [101] -> [10100] (5 bits) 6 [110] -> [01100] (5 bits) 7 [111] -> [11100] (5 bits) 8 [1000] -> [0001000] (7 bits) 9 [1001] -> [1001000] (7 bits) 10 [1010] -> [0101000] (7 bits) 11 [1011] -> [1101000] (7 bits) 12 [1100] -> [0011000] (7 bits) ... 65533 [1111111111111101] -> [1011111111111111000000000000000] (31 bits) 65534 [1111111111111110] -> [0111111111111111000000000000000] (31 bits) 65535 [1111111111111111] -> [1111111111111111000000000000000] (31 bits) Hence minBits=1 maxBits=31 And with k=5 they would look like: Index Dec/Bin -> Generated Suffix Bin 1 [1] -> [000001] (6 bits) 2 [10] -> [100001] (6 bits) 3 [11] -> [010001] (6 bits) 4 [100] -> [110001] (6 bits) 5 [101] -> [001001] (6 bits) 6 [110] -> [101001] (6 bits) 7 [111] -> [011001] (6 bits) 8 [1000] -> [111001] (6 bits) 9 [1001] -> [000101] (6 bits) 10 [1010] -> [100101] (6 bits) 11 [1011] -> [010101] (6 bits) 12 [1100] -> [110101] (6 bits) ... 65533 [1111111111111101] -> [0011100000000000100000000000] (28 bits) 65534 [1111111111111110] -> [1011100000000000100000000000] (28 bits) 65535 [1111111111111111] -> [0111100000000000100000000000] (28 bits) Hence minBits=6 maxBits=28 Signed-off-by: Christian Schoenebeck <qemu_oss@crudebyte.com> Signed-off-by: Greg Kurz <groug@kaod.org>
Diffstat (limited to 'hw/9pfs/9p.c')
-rw-r--r--hw/9pfs/9p.c268
1 files changed, 238 insertions, 30 deletions
diff --git a/hw/9pfs/9p.c b/hw/9pfs/9p.c
index d9be2d45d3..37abcdb71e 100644
--- a/hw/9pfs/9p.c
+++ b/hw/9pfs/9p.c
@@ -27,6 +27,7 @@
#include "migration/blocker.h"
#include "sysemu/qtest.h"
#include "qemu/xxhash.h"
+#include <math.h>
int open_fd_hw;
int total_open_fd;
@@ -573,6 +574,116 @@ static void coroutine_fn virtfs_reset(V9fsPDU *pdu)
P9_STAT_MODE_NAMED_PIPE | \
P9_STAT_MODE_SOCKET)
+/* Mirrors all bits of a byte. So e.g. binary 10100000 would become 00000101. */
+static inline uint8_t mirror8bit(uint8_t byte)
+{
+ return (byte * 0x0202020202ULL & 0x010884422010ULL) % 1023;
+}
+
+/* Same as mirror8bit() just for a 64 bit data type instead for a byte. */
+static inline uint64_t mirror64bit(uint64_t value)
+{
+ return ((uint64_t)mirror8bit(value & 0xff) << 56) |
+ ((uint64_t)mirror8bit((value >> 8) & 0xff) << 48) |
+ ((uint64_t)mirror8bit((value >> 16) & 0xff) << 40) |
+ ((uint64_t)mirror8bit((value >> 24) & 0xff) << 32) |
+ ((uint64_t)mirror8bit((value >> 32) & 0xff) << 24) |
+ ((uint64_t)mirror8bit((value >> 40) & 0xff) << 16) |
+ ((uint64_t)mirror8bit((value >> 48) & 0xff) << 8) |
+ ((uint64_t)mirror8bit((value >> 56) & 0xff));
+}
+
+/**
+ * @brief Parameter k for the Exponential Golomb algorihm to be used.
+ *
+ * The smaller this value, the smaller the minimum bit count for the Exp.
+ * Golomb generated affixes will be (at lowest index) however for the
+ * price of having higher maximum bit count of generated affixes (at highest
+ * index). Likewise increasing this parameter yields in smaller maximum bit
+ * count for the price of having higher minimum bit count.
+ *
+ * In practice that means: a good value for k depends on the expected amount
+ * of devices to be exposed by one export. For a small amount of devices k
+ * should be small, for a large amount of devices k might be increased
+ * instead. The default of k=0 should be fine for most users though.
+ *
+ * @b IMPORTANT: In case this ever becomes a runtime parameter; the value of
+ * k should not change as long as guest is still running! Because that would
+ * cause completely different inode numbers to be generated on guest.
+ */
+#define EXP_GOLOMB_K 0
+
+/**
+ * @brief Exponential Golomb algorithm for arbitrary k (including k=0).
+ *
+ * The Exponential Golomb algorithm generates @b prefixes (@b not suffixes!)
+ * with growing length and with the mathematical property of being
+ * "prefix-free". The latter means the generated prefixes can be prepended
+ * in front of arbitrary numbers and the resulting concatenated numbers are
+ * guaranteed to be always unique.
+ *
+ * This is a minor adjustment to the original Exp. Golomb algorithm in the
+ * sense that lowest allowed index (@param n) starts with 1, not with zero.
+ *
+ * @param n - natural number (or index) of the prefix to be generated
+ * (1, 2, 3, ...)
+ * @param k - parameter k of Exp. Golomb algorithm to be used
+ * (see comment on EXP_GOLOMB_K macro for details about k)
+ */
+static VariLenAffix expGolombEncode(uint64_t n, int k)
+{
+ const uint64_t value = n + (1 << k) - 1;
+ const int bits = (int) log2(value) + 1;
+ return (VariLenAffix) {
+ .type = AffixType_Prefix,
+ .value = value,
+ .bits = bits + MAX((bits - 1 - k), 0)
+ };
+}
+
+/**
+ * @brief Converts a suffix into a prefix, or a prefix into a suffix.
+ *
+ * Simply mirror all bits of the affix value, for the purpose to preserve
+ * respectively the mathematical "prefix-free" or "suffix-free" property
+ * after the conversion.
+ *
+ * If a passed prefix is suitable to create unique numbers, then the
+ * returned suffix is suitable to create unique numbers as well (and vice
+ * versa).
+ */
+static VariLenAffix invertAffix(const VariLenAffix *affix)
+{
+ return (VariLenAffix) {
+ .type =
+ (affix->type == AffixType_Suffix) ?
+ AffixType_Prefix : AffixType_Suffix,
+ .value =
+ mirror64bit(affix->value) >>
+ ((sizeof(affix->value) * 8) - affix->bits),
+ .bits = affix->bits
+ };
+}
+
+/**
+ * @brief Generates suffix numbers with "suffix-free" property.
+ *
+ * This is just a wrapper function on top of the Exp. Golomb algorithm.
+ *
+ * Since the Exp. Golomb algorithm generates prefixes, but we need suffixes,
+ * this function converts the Exp. Golomb prefixes into appropriate suffixes
+ * which are still suitable for generating unique numbers.
+ *
+ * @param n - natural number (or index) of the suffix to be generated
+ * (1, 2, 3, ...)
+ */
+static VariLenAffix affixForIndex(uint64_t index)
+{
+ VariLenAffix prefix;
+ prefix = expGolombEncode(index, EXP_GOLOMB_K);
+ return invertAffix(&prefix); /* convert prefix to suffix */
+}
+
/* creative abuse of tb_hash_func7, which is based on xxhash */
static uint32_t qpp_hash(QppEntry e)
{
@@ -584,13 +695,19 @@ static uint32_t qpf_hash(QpfEntry e)
return qemu_xxhash7(e.ino, e.dev, 0, 0, 0);
}
-static bool qpp_lookup_func(const void *obj, const void *userp)
+static bool qpd_cmp_func(const void *obj, const void *userp)
+{
+ const QpdEntry *e1 = obj, *e2 = userp;
+ return e1->dev == e2->dev;
+}
+
+static bool qpp_cmp_func(const void *obj, const void *userp)
{
const QppEntry *e1 = obj, *e2 = userp;
return e1->dev == e2->dev && e1->ino_prefix == e2->ino_prefix;
}
-static bool qpf_lookup_func(const void *obj, const void *userp)
+static bool qpf_cmp_func(const void *obj, const void *userp)
{
const QpfEntry *e1 = obj, *e2 = userp;
return e1->dev == e2->dev && e1->ino == e2->ino;
@@ -610,16 +727,70 @@ static void qp_table_destroy(struct qht *ht)
qht_destroy(ht);
}
+static void qpd_table_init(struct qht *ht)
+{
+ qht_init(ht, qpd_cmp_func, 1, QHT_MODE_AUTO_RESIZE);
+}
+
static void qpp_table_init(struct qht *ht)
{
- qht_init(ht, qpp_lookup_func, 1, QHT_MODE_AUTO_RESIZE);
+ qht_init(ht, qpp_cmp_func, 1, QHT_MODE_AUTO_RESIZE);
}
static void qpf_table_init(struct qht *ht)
{
- qht_init(ht, qpf_lookup_func, 1 << 16, QHT_MODE_AUTO_RESIZE);
+ qht_init(ht, qpf_cmp_func, 1 << 16, QHT_MODE_AUTO_RESIZE);
+}
+
+/*
+ * Returns how many (high end) bits of inode numbers of the passed fs
+ * device shall be used (in combination with the device number) to
+ * generate hash values for qpp_table entries.
+ *
+ * This function is required if variable length suffixes are used for inode
+ * number mapping on guest level. Since a device may end up having multiple
+ * entries in qpp_table, each entry most probably with a different suffix
+ * length, we thus need this function in conjunction with qpd_table to
+ * "agree" about a fix amount of bits (per device) to be always used for
+ * generating hash values for the purpose of accessing qpp_table in order
+ * get consistent behaviour when accessing qpp_table.
+ */
+static int qid_inode_prefix_hash_bits(V9fsPDU *pdu, dev_t dev)
+{
+ QpdEntry lookup = {
+ .dev = dev
+ }, *val;
+ uint32_t hash = dev;
+ VariLenAffix affix;
+
+ val = qht_lookup(&pdu->s->qpd_table, &lookup, hash);
+ if (!val) {
+ val = g_malloc0(sizeof(QpdEntry));
+ *val = lookup;
+ affix = affixForIndex(pdu->s->qp_affix_next);
+ val->prefix_bits = affix.bits;
+ qht_insert(&pdu->s->qpd_table, val, hash, NULL);
+ pdu->s->qp_ndevices++;
+ }
+ return val->prefix_bits;
}
+/**
+ * @brief Slow / full mapping host inode nr -> guest inode nr.
+ *
+ * This function performs a slower and much more costly remapping of an
+ * original file inode number on host to an appropriate different inode
+ * number on guest. For every (dev, inode) combination on host a new
+ * sequential number is generated, cached and exposed as inode number on
+ * guest.
+ *
+ * This is just a "last resort" fallback solution if the much faster/cheaper
+ * qid_path_suffixmap() failed. In practice this slow / full mapping is not
+ * expected ever to be used at all though.
+ *
+ * @see qid_path_suffixmap() for details
+ *
+ */
static int qid_path_fullmap(V9fsPDU *pdu, const struct stat *stbuf,
uint64_t *path)
{
@@ -628,11 +799,7 @@ static int qid_path_fullmap(V9fsPDU *pdu, const struct stat *stbuf,
.ino = stbuf->st_ino
}, *val;
uint32_t hash = qpf_hash(lookup);
-
- /* most users won't need the fullmap, so init the table lazily */
- if (!pdu->s->qpf_table.map) {
- qpf_table_init(&pdu->s->qpf_table);
- }
+ VariLenAffix affix;
val = qht_lookup(&pdu->s->qpf_table, &lookup, hash);
@@ -650,8 +817,11 @@ static int qid_path_fullmap(V9fsPDU *pdu, const struct stat *stbuf,
*val = lookup;
/* new unique inode and device combo */
- val->path = pdu->s->qp_fullpath_next++;
- pdu->s->qp_fullpath_next &= QPATH_INO_MASK;
+ affix = affixForIndex(
+ 1ULL << (sizeof(pdu->s->qp_affix_next) * 8)
+ );
+ val->path = (pdu->s->qp_fullpath_next++ << affix.bits) | affix.value;
+ pdu->s->qp_fullpath_next &= ((1ULL << (64 - affix.bits)) - 1);
qht_insert(&pdu->s->qpf_table, val, hash, NULL);
}
@@ -659,31 +829,60 @@ static int qid_path_fullmap(V9fsPDU *pdu, const struct stat *stbuf,
return 0;
}
-/*
- * stat_to_qid needs to map inode number (64 bits) and device id (32 bits)
- * to a unique QID path (64 bits). To avoid having to map and keep track
- * of up to 2^64 objects, we map only the 16 highest bits of the inode plus
- * the device id to the 16 highest bits of the QID path. The 48 lowest bits
- * of the QID path equal to the lowest bits of the inode number.
+/**
+ * @brief Quick mapping host inode nr -> guest inode nr.
*
- * This takes advantage of the fact that inode number are usually not
- * random but allocated sequentially, so we have fewer items to keep
- * track of.
+ * This function performs quick remapping of an original file inode number
+ * on host to an appropriate different inode number on guest. This remapping
+ * of inodes is required to avoid inode nr collisions on guest which would
+ * happen if the 9p export contains more than 1 exported file system (or
+ * more than 1 file system data set), because unlike on host level where the
+ * files would have different device nrs, all files exported by 9p would
+ * share the same device nr on guest (the device nr of the virtual 9p device
+ * that is).
+ *
+ * Inode remapping is performed by chopping off high end bits of the original
+ * inode number from host, shifting the result upwards and then assigning a
+ * generated suffix number for the low end bits, where the same suffix number
+ * will be shared by all inodes with the same device id AND the same high end
+ * bits that have been chopped off. That approach utilizes the fact that inode
+ * numbers very likely share the same high end bits (i.e. due to their common
+ * sequential generation by file systems) and hence we only have to generate
+ * and track a very limited amount of suffixes in practice due to that.
+ *
+ * We generate variable size suffixes for that purpose. The 1st generated
+ * suffix will only have 1 bit and hence we only need to chop off 1 bit from
+ * the original inode number. The subsequent suffixes being generated will
+ * grow in (bit) size subsequently, i.e. the 2nd and 3rd suffix being
+ * generated will have 3 bits and hence we have to chop off 3 bits from their
+ * original inodes, and so on. That approach of using variable length suffixes
+ * (i.e. over fixed size ones) utilizes the fact that in practice only a very
+ * limited amount of devices are shared by the same export (e.g. typically
+ * less than 2 dozen devices per 9p export), so in practice we need to chop
+ * off less bits than with fixed size prefixes and yet are flexible to add
+ * new devices at runtime below host's export directory at any time without
+ * having to reboot guest nor requiring to reconfigure guest for that. And due
+ * to the very limited amount of original high end bits that we chop off that
+ * way, the total amount of suffixes we need to generate is less than by using
+ * fixed size prefixes and hence it also improves performance of the inode
+ * remapping algorithm, and finally has the nice side effect that the inode
+ * numbers on guest will be much smaller & human friendly. ;-)
*/
-static int qid_path_prefixmap(V9fsPDU *pdu, const struct stat *stbuf,
+static int qid_path_suffixmap(V9fsPDU *pdu, const struct stat *stbuf,
uint64_t *path)
{
+ const int ino_hash_bits = qid_inode_prefix_hash_bits(pdu, stbuf->st_dev);
QppEntry lookup = {
.dev = stbuf->st_dev,
- .ino_prefix = (uint16_t) (stbuf->st_ino >> 48)
+ .ino_prefix = (uint16_t) (stbuf->st_ino >> (64 - ino_hash_bits))
}, *val;
uint32_t hash = qpp_hash(lookup);
val = qht_lookup(&pdu->s->qpp_table, &lookup, hash);
if (!val) {
- if (pdu->s->qp_prefix_next == 0) {
- /* we ran out of prefixes */
+ if (pdu->s->qp_affix_next == 0) {
+ /* we ran out of affixes */
warn_report_once(
"9p: Potential degraded performance of inode remapping"
);
@@ -693,12 +892,13 @@ static int qid_path_prefixmap(V9fsPDU *pdu, const struct stat *stbuf,
val = g_malloc0(sizeof(QppEntry));
*val = lookup;
- /* new unique inode prefix and device combo */
- val->qp_prefix = pdu->s->qp_prefix_next++;
+ /* new unique inode affix and device combo */
+ val->qp_affix_index = pdu->s->qp_affix_next++;
+ val->qp_affix = affixForIndex(val->qp_affix_index);
qht_insert(&pdu->s->qpp_table, val, hash, NULL);
}
-
- *path = ((uint64_t)val->qp_prefix << 48) | (stbuf->st_ino & QPATH_INO_MASK);
+ /* assuming generated affix to be suffix type, not prefix */
+ *path = (stbuf->st_ino << val->qp_affix.bits) | val->qp_affix.value;
return 0;
}
@@ -709,7 +909,7 @@ static int stat_to_qid(V9fsPDU *pdu, const struct stat *stbuf, V9fsQID *qidp)
if (pdu->s->ctx.export_flags & V9FS_REMAP_INODES) {
/* map inode+device to qid path (fast path) */
- err = qid_path_prefixmap(pdu, stbuf, &qidp->path);
+ err = qid_path_suffixmap(pdu, stbuf, &qidp->path);
if (err == -ENFILE) {
/* fast path didn't work, fall back to full map */
err = qid_path_fullmap(pdu, stbuf, &qidp->path);
@@ -3876,8 +4076,15 @@ int v9fs_device_realize_common(V9fsState *s, const V9fsTransport *t,
s->dev_id = stat.st_dev;
+ /* init inode remapping : */
+ /* hash table for variable length inode suffixes */
+ qpd_table_init(&s->qpd_table);
+ /* hash table for slow/full inode remapping (most users won't need it) */
+ qpf_table_init(&s->qpf_table);
+ /* hash table for quick inode remapping */
qpp_table_init(&s->qpp_table);
- s->qp_prefix_next = 1; /* reserve 0 to detect overflow */
+ s->qp_ndevices = 0;
+ s->qp_affix_next = 1; /* reserve 0 to detect overflow */
s->qp_fullpath_next = 1;
s->ctx.fst = &fse->fst;
@@ -3901,6 +4108,7 @@ void v9fs_device_unrealize_common(V9fsState *s, Error **errp)
fsdev_throttle_cleanup(s->ctx.fst);
}
g_free(s->tag);
+ qp_table_destroy(&s->qpd_table);
qp_table_destroy(&s->qpp_table);
qp_table_destroy(&s->qpf_table);
g_free(s->ctx.fs_root);