diff options
Diffstat (limited to 'util')
-rw-r--r-- | util/oslib-posix.c | 27 | ||||
-rw-r--r-- | util/oslib-win32.c | 7 | ||||
-rw-r--r-- | util/qht.c | 65 |
3 files changed, 69 insertions, 30 deletions
diff --git a/util/oslib-posix.c b/util/oslib-posix.c index aaec1891f5..8ec99ccb4f 100644 --- a/util/oslib-posix.c +++ b/util/oslib-posix.c @@ -46,6 +46,7 @@ #ifdef __FreeBSD__ #include <sys/sysctl.h> +#include <libutil.h> #endif #include "qemu/mmap-alloc.h" @@ -434,6 +435,32 @@ int qemu_read_password(char *buf, int buf_size) } +char *qemu_get_pid_name(pid_t pid) +{ + char *name = NULL; + +#if defined(__FreeBSD__) + /* BSDs don't have /proc, but they provide a nice substitute */ + struct kinfo_proc *proc = kinfo_getproc(pid); + + if (proc) { + name = g_strdup(proc->ki_comm); + free(proc); + } +#else + /* Assume a system with reasonable procfs */ + char *pid_path; + size_t len; + + pid_path = g_strdup_printf("/proc/%d/cmdline", pid); + g_file_get_contents(pid_path, &name, &len, NULL); + g_free(pid_path); +#endif + + return name; +} + + pid_t qemu_fork(Error **errp) { sigset_t oldmask, newmask; diff --git a/util/oslib-win32.c b/util/oslib-win32.c index 4c1dcf1e66..d09863cc9d 100644 --- a/util/oslib-win32.c +++ b/util/oslib-win32.c @@ -575,6 +575,13 @@ int qemu_read_password(char *buf, int buf_size) } +char *qemu_get_pid_name(pid_t pid) +{ + /* XXX Implement me */ + abort(); +} + + pid_t qemu_fork(Error **errp) { errno = ENOSYS; diff --git a/util/qht.c b/util/qht.c index 16a8d7950e..ff4d2e6974 100644 --- a/util/qht.c +++ b/util/qht.c @@ -133,7 +133,8 @@ struct qht_map { /* trigger a resize when n_added_buckets > n_buckets / div */ #define QHT_NR_ADDED_BUCKETS_THRESHOLD_DIV 8 -static void qht_do_resize(struct qht *ht, struct qht_map *new); +static void qht_do_resize_reset(struct qht *ht, struct qht_map *new, + bool reset); static void qht_grow_maybe(struct qht *ht); #ifdef QHT_DEBUG @@ -379,7 +380,7 @@ static void qht_bucket_reset__locked(struct qht_bucket *head) if (b->pointers[i] == NULL) { goto done; } - b->hashes[i] = 0; + atomic_set(&b->hashes[i], 0); atomic_set(&b->pointers[i], NULL); } b = b->next; @@ -408,12 +409,21 @@ void qht_reset(struct qht *ht) qht_map_unlock_buckets(map); } +static inline void qht_do_resize(struct qht *ht, struct qht_map *new) +{ + qht_do_resize_reset(ht, new, false); +} + +static inline void qht_do_resize_and_reset(struct qht *ht, struct qht_map *new) +{ + qht_do_resize_reset(ht, new, true); +} + bool qht_reset_size(struct qht *ht, size_t n_elems) { - struct qht_map *new; + struct qht_map *new = NULL; struct qht_map *map; size_t n_buckets; - bool resize = false; n_buckets = qht_elems_to_buckets(n_elems); @@ -421,18 +431,11 @@ bool qht_reset_size(struct qht *ht, size_t n_elems) map = ht->map; if (n_buckets != map->n_buckets) { new = qht_map_create(n_buckets); - resize = true; - } - - qht_map_lock_buckets(map); - qht_map_reset__all_locked(map); - if (resize) { - qht_do_resize(ht, new); } - qht_map_unlock_buckets(map); + qht_do_resize_and_reset(ht, new); qemu_mutex_unlock(&ht->lock); - return resize; + return !!new; } static inline @@ -444,7 +447,7 @@ void *qht_do_lookup(struct qht_bucket *head, qht_lookup_func_t func, do { for (i = 0; i < QHT_BUCKET_ENTRIES; i++) { - if (b->hashes[i] == hash) { + if (atomic_read(&b->hashes[i]) == hash) { /* The pointer is dereferenced before seqlock_read_retry, * so (unlike qht_insert__locked) we need to use * atomic_rcu_read here. @@ -538,8 +541,8 @@ static bool qht_insert__locked(struct qht *ht, struct qht_map *map, if (new) { atomic_rcu_set(&prev->next, b); } - b->hashes[i] = hash; /* smp_wmb() implicit in seqlock_write_begin. */ + atomic_set(&b->hashes[i], hash); atomic_set(&b->pointers[i], p); seqlock_write_end(&head->sequence); return true; @@ -561,9 +564,7 @@ static __attribute__((noinline)) void qht_grow_maybe(struct qht *ht) if (qht_map_needs_resize(map)) { struct qht_map *new = qht_map_create(map->n_buckets * 2); - qht_map_lock_buckets(map); qht_do_resize(ht, new); - qht_map_unlock_buckets(map); } qemu_mutex_unlock(&ht->lock); } @@ -607,10 +608,10 @@ qht_entry_move(struct qht_bucket *to, int i, struct qht_bucket *from, int j) qht_debug_assert(to->pointers[i]); qht_debug_assert(from->pointers[j]); - to->hashes[i] = from->hashes[j]; + atomic_set(&to->hashes[i], from->hashes[j]); atomic_set(&to->pointers[i], from->pointers[j]); - from->hashes[j] = 0; + atomic_set(&from->hashes[j], 0); atomic_set(&from->pointers[j], NULL); } @@ -739,24 +740,31 @@ static void qht_map_copy(struct qht *ht, void *p, uint32_t hash, void *userp) } /* - * Call with ht->lock and all bucket locks held. - * - * Creating the @new map here would add unnecessary delay while all the locks - * are held--holding up the bucket locks is particularly bad, since no writes - * can occur while these are held. Thus, we let callers create the new map, - * hopefully without the bucket locks held. + * Atomically perform a resize and/or reset. + * Call with ht->lock held. */ -static void qht_do_resize(struct qht *ht, struct qht_map *new) +static void qht_do_resize_reset(struct qht *ht, struct qht_map *new, bool reset) { struct qht_map *old; old = ht->map; - g_assert_cmpuint(new->n_buckets, !=, old->n_buckets); + qht_map_lock_buckets(old); + if (reset) { + qht_map_reset__all_locked(old); + } + + if (new == NULL) { + qht_map_unlock_buckets(old); + return; + } + + g_assert_cmpuint(new->n_buckets, !=, old->n_buckets); qht_map_iter__all_locked(ht, old, qht_map_copy, new); qht_map_debug__all_locked(new); atomic_rcu_set(&ht->map, new); + qht_map_unlock_buckets(old); call_rcu(old, qht_map_destroy, rcu); } @@ -768,12 +776,9 @@ bool qht_resize(struct qht *ht, size_t n_elems) qemu_mutex_lock(&ht->lock); if (n_buckets != ht->map->n_buckets) { struct qht_map *new; - struct qht_map *old = ht->map; new = qht_map_create(n_buckets); - qht_map_lock_buckets(old); qht_do_resize(ht, new); - qht_map_unlock_buckets(old); ret = true; } qemu_mutex_unlock(&ht->lock); |