diff options
author | Peter Maydell <peter.maydell@linaro.org> | 2018-06-21 17:54:26 +0100 |
---|---|---|
committer | Peter Maydell <peter.maydell@linaro.org> | 2018-06-21 17:54:26 +0100 |
commit | 33836a731562e3d07b3a83f26e81c6b1482d216c (patch) | |
tree | a089ff809414c0ccfb142cd2936749bca2bf38a4 /util | |
parent | 46012db666990ff2eed1d3dc199ab8006439a93b (diff) | |
parent | 9f754620651d3432114f4bb89c7f12cbea814b3e (diff) |
Merge remote-tracking branch 'remotes/rth/tags/pull-tcg-20180615' into staging
TCG patch queue:
Workaround macos assembler lossage.
Eliminate tb_lock.
Fix TB code generation overflow.
# gpg: Signature made Fri 15 Jun 2018 20:40:56 BST
# gpg: using RSA key 64DF38E8AF7E215F
# gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>"
# Primary key fingerprint: 7A48 1E78 868B 4DB6 A85A 05C0 64DF 38E8 AF7E 215F
* remotes/rth/tags/pull-tcg-20180615:
tcg: Reduce max TB opcode count
tcg: remove tb_lock
translate-all: remove tb_lock mention from cpu_restore_state_from_tb
cputlb: remove tb_lock from tlb_flush functions
translate-all: protect TB jumps with a per-destination-TB lock
translate-all: discard TB when tb_link_page returns an existing matching TB
translate-all: introduce assert_no_pages_locked
translate-all: add page_locked assertions
translate-all: use per-page locking in !user-mode
translate-all: move tb_invalidate_phys_page_range up in the file
translate-all: work page-by-page in tb_invalidate_phys_range_1
translate-all: remove hole in PageDesc
translate-all: make l1_map lockless
translate-all: iterate over TBs in a page with PAGE_FOR_EACH_TB
tcg: move tb_ctx.tb_phys_invalidate_count to tcg_ctx
tcg: track TBs with per-region BST's
qht: return existing entry when qht_insert fails
qht: require a default comparison function
tcg/i386: Use byte form of xgetbv instruction
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'util')
-rw-r--r-- | util/qht.c | 41 |
1 files changed, 28 insertions, 13 deletions
diff --git a/util/qht.c b/util/qht.c index 55b0738cd1..c138777a9c 100644 --- a/util/qht.c +++ b/util/qht.c @@ -351,11 +351,14 @@ static struct qht_map *qht_map_create(size_t n_buckets) return map; } -void qht_init(struct qht *ht, size_t n_elems, unsigned int mode) +void qht_init(struct qht *ht, qht_cmp_func_t cmp, size_t n_elems, + unsigned int mode) { struct qht_map *map; size_t n_buckets = qht_elems_to_buckets(n_elems); + g_assert(cmp); + ht->cmp = cmp; ht->mode = mode; qemu_mutex_init(&ht->lock); map = qht_map_create(n_buckets); @@ -479,8 +482,8 @@ void *qht_lookup__slowpath(struct qht_bucket *b, qht_lookup_func_t func, return ret; } -void *qht_lookup(struct qht *ht, qht_lookup_func_t func, const void *userp, - uint32_t hash) +void *qht_lookup_custom(struct qht *ht, const void *userp, uint32_t hash, + qht_lookup_func_t func) { struct qht_bucket *b; struct qht_map *map; @@ -502,10 +505,15 @@ void *qht_lookup(struct qht *ht, qht_lookup_func_t func, const void *userp, return qht_lookup__slowpath(b, func, userp, hash); } +void *qht_lookup(struct qht *ht, const void *userp, uint32_t hash) +{ + return qht_lookup_custom(ht, userp, hash, ht->cmp); +} + /* call with head->lock held */ -static bool qht_insert__locked(struct qht *ht, struct qht_map *map, - struct qht_bucket *head, void *p, uint32_t hash, - bool *needs_resize) +static void *qht_insert__locked(struct qht *ht, struct qht_map *map, + struct qht_bucket *head, void *p, uint32_t hash, + bool *needs_resize) { struct qht_bucket *b = head; struct qht_bucket *prev = NULL; @@ -515,8 +523,9 @@ static bool qht_insert__locked(struct qht *ht, struct qht_map *map, do { for (i = 0; i < QHT_BUCKET_ENTRIES; i++) { if (b->pointers[i]) { - if (unlikely(b->pointers[i] == p)) { - return false; + if (unlikely(b->hashes[i] == hash && + ht->cmp(b->pointers[i], p))) { + return b->pointers[i]; } } else { goto found; @@ -545,7 +554,7 @@ static bool qht_insert__locked(struct qht *ht, struct qht_map *map, atomic_set(&b->hashes[i], hash); atomic_set(&b->pointers[i], p); seqlock_write_end(&head->sequence); - return true; + return NULL; } static __attribute__((noinline)) void qht_grow_maybe(struct qht *ht) @@ -569,25 +578,31 @@ static __attribute__((noinline)) void qht_grow_maybe(struct qht *ht) qemu_mutex_unlock(&ht->lock); } -bool qht_insert(struct qht *ht, void *p, uint32_t hash) +bool qht_insert(struct qht *ht, void *p, uint32_t hash, void **existing) { struct qht_bucket *b; struct qht_map *map; bool needs_resize = false; - bool ret; + void *prev; /* NULL pointers are not supported */ qht_debug_assert(p); b = qht_bucket_lock__no_stale(ht, hash, &map); - ret = qht_insert__locked(ht, map, b, p, hash, &needs_resize); + prev = qht_insert__locked(ht, map, b, p, hash, &needs_resize); qht_bucket_debug__locked(b); qemu_spin_unlock(&b->lock); if (unlikely(needs_resize) && ht->mode & QHT_MODE_AUTO_RESIZE) { qht_grow_maybe(ht); } - return ret; + if (likely(prev == NULL)) { + return true; + } + if (existing) { + *existing = prev; + } + return false; } static inline bool qht_entry_is_last(struct qht_bucket *b, int pos) |