diff options
author | Peter Maydell <peter.maydell@linaro.org> | 2020-09-24 18:48:45 +0100 |
---|---|---|
committer | Peter Maydell <peter.maydell@linaro.org> | 2020-09-24 18:48:45 +0100 |
commit | 8c1c07929feae876202ba26f07a540c5115c18cd (patch) | |
tree | 20f6c8e2ac556bfb3c88a98c0d0cb2689de0263e /tests | |
parent | 1bd5556f6686365e76f7ff67fe67260c449e8345 (diff) | |
parent | d73415a315471ac0b127ed3fad45c8ec5d711de1 (diff) |
Merge remote-tracking branch 'remotes/stefanha/tags/block-pull-request' into staging
Pull request
This includes the atomic_ -> qatomic_ rename that touches many files and is
prone to conflicts.
# gpg: Signature made Wed 23 Sep 2020 17:08:43 BST
# gpg: using RSA key 8695A8BFD3F97CDAAC35775A9CA4ABB381AB73C8
# gpg: Good signature from "Stefan Hajnoczi <stefanha@redhat.com>" [full]
# gpg: aka "Stefan Hajnoczi <stefanha@gmail.com>" [full]
# Primary key fingerprint: 8695 A8BF D3F9 7CDA AC35 775A 9CA4 ABB3 81AB 73C8
* remotes/stefanha/tags/block-pull-request:
qemu/atomic.h: rename atomic_ to qatomic_
tests: add test-fdmon-epoll
fdmon-poll: reset npfd when upgrading to fdmon-epoll
gitmodules: add qemu.org vbootrom submodule
gitmodules: switch to qemu.org meson mirror
gitmodules: switch to qemu.org qboot mirror
docs/system: clarify deprecation schedule
virtio-crypto: don't modify elem->in/out_sg
virtio-blk: undo destructive iov_discard_*() operations
util/iov: add iov_discard_undo()
virtio: add vhost-user-fs-ccw device
libvhost-user: handle endianness as mandated by the spec
MAINTAINERS: add Stefan Hajnoczi as block/nvme.c maintainer
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'tests')
-rw-r--r-- | tests/atomic64-bench.c | 14 | ||||
-rw-r--r-- | tests/atomic_add-bench.c | 14 | ||||
-rw-r--r-- | tests/iothread.c | 2 | ||||
-rw-r--r-- | tests/meson.build | 3 | ||||
-rw-r--r-- | tests/qht-bench.c | 12 | ||||
-rw-r--r-- | tests/rcutorture.c | 24 | ||||
-rw-r--r-- | tests/test-aio-multithread.c | 52 | ||||
-rw-r--r-- | tests/test-fdmon-epoll.c | 73 | ||||
-rw-r--r-- | tests/test-iov.c | 165 | ||||
-rw-r--r-- | tests/test-logging.c | 4 | ||||
-rw-r--r-- | tests/test-rcu-list.c | 38 | ||||
-rw-r--r-- | tests/test-thread-pool.c | 10 |
12 files changed, 327 insertions, 84 deletions
diff --git a/tests/atomic64-bench.c b/tests/atomic64-bench.c index 121a8c14f4..e474753d34 100644 --- a/tests/atomic64-bench.c +++ b/tests/atomic64-bench.c @@ -56,17 +56,17 @@ static void *thread_func(void *arg) { struct thread_info *info = arg; - atomic_inc(&n_ready_threads); - while (!atomic_read(&test_start)) { + qatomic_inc(&n_ready_threads); + while (!qatomic_read(&test_start)) { cpu_relax(); } - while (!atomic_read(&test_stop)) { + while (!qatomic_read(&test_stop)) { unsigned int index; info->r = xorshift64star(info->r); index = info->r & (range - 1); - atomic_read_i64(&counts[index].i64); + qatomic_read_i64(&counts[index].i64); info->accesses++; } return NULL; @@ -76,13 +76,13 @@ static void run_test(void) { unsigned int i; - while (atomic_read(&n_ready_threads) != n_threads) { + while (qatomic_read(&n_ready_threads) != n_threads) { cpu_relax(); } - atomic_set(&test_start, true); + qatomic_set(&test_start, true); g_usleep(duration * G_USEC_PER_SEC); - atomic_set(&test_stop, true); + qatomic_set(&test_stop, true); for (i = 0; i < n_threads; i++) { qemu_thread_join(&threads[i]); diff --git a/tests/atomic_add-bench.c b/tests/atomic_add-bench.c index 5666f6bbff..f05471ab45 100644 --- a/tests/atomic_add-bench.c +++ b/tests/atomic_add-bench.c @@ -53,12 +53,12 @@ static void *thread_func(void *arg) { struct thread_info *info = arg; - atomic_inc(&n_ready_threads); - while (!atomic_read(&test_start)) { + qatomic_inc(&n_ready_threads); + while (!qatomic_read(&test_start)) { cpu_relax(); } - while (!atomic_read(&test_stop)) { + while (!qatomic_read(&test_stop)) { unsigned int index; info->r = xorshift64star(info->r); @@ -68,7 +68,7 @@ static void *thread_func(void *arg) counts[index].val += 1; qemu_mutex_unlock(&counts[index].lock); } else { - atomic_inc(&counts[index].val); + qatomic_inc(&counts[index].val); } } return NULL; @@ -78,13 +78,13 @@ static void run_test(void) { unsigned int i; - while (atomic_read(&n_ready_threads) != n_threads) { + while (qatomic_read(&n_ready_threads) != n_threads) { cpu_relax(); } - atomic_set(&test_start, true); + qatomic_set(&test_start, true); g_usleep(duration * G_USEC_PER_SEC); - atomic_set(&test_stop, true); + qatomic_set(&test_stop, true); for (i = 0; i < n_threads; i++) { qemu_thread_join(&threads[i]); diff --git a/tests/iothread.c b/tests/iothread.c index d3a2ee9a01..afde12b4ef 100644 --- a/tests/iothread.c +++ b/tests/iothread.c @@ -74,7 +74,7 @@ static void *iothread_run(void *opaque) qemu_cond_signal(&iothread->init_done_cond); qemu_mutex_unlock(&iothread->init_done_lock); - while (!atomic_read(&iothread->stopping)) { + while (!qatomic_read(&iothread->stopping)) { aio_poll(iothread->ctx, true); } diff --git a/tests/meson.build b/tests/meson.build index 8c3e930687..04072a64ca 100644 --- a/tests/meson.build +++ b/tests/meson.build @@ -170,6 +170,9 @@ if have_block if 'CONFIG_NETTLE' in config_host or 'CONFIG_GCRYPT' in config_host tests += {'test-crypto-pbkdf': [io]} endif + if 'CONFIG_EPOLL_CREATE1' in config_host + tests += {'test-fdmon-epoll': [testblock]} + endif benchs += { 'benchmark-crypto-hash': [crypto], 'benchmark-crypto-hmac': [crypto], diff --git a/tests/qht-bench.c b/tests/qht-bench.c index 362f03cb03..2e5b70ccd0 100644 --- a/tests/qht-bench.c +++ b/tests/qht-bench.c @@ -209,13 +209,13 @@ static void *thread_func(void *p) rcu_register_thread(); - atomic_inc(&n_ready_threads); - while (!atomic_read(&test_start)) { + qatomic_inc(&n_ready_threads); + while (!qatomic_read(&test_start)) { cpu_relax(); } rcu_read_lock(); - while (!atomic_read(&test_stop)) { + while (!qatomic_read(&test_stop)) { info->seed = xorshift64star(info->seed); info->func(info); } @@ -423,13 +423,13 @@ static void run_test(void) { int i; - while (atomic_read(&n_ready_threads) != n_rw_threads + n_rz_threads) { + while (qatomic_read(&n_ready_threads) != n_rw_threads + n_rz_threads) { cpu_relax(); } - atomic_set(&test_start, true); + qatomic_set(&test_start, true); g_usleep(duration * G_USEC_PER_SEC); - atomic_set(&test_stop, true); + qatomic_set(&test_stop, true); for (i = 0; i < n_rw_threads; i++) { qemu_thread_join(&rw_threads[i]); diff --git a/tests/rcutorture.c b/tests/rcutorture.c index 732f03abda..de6f649058 100644 --- a/tests/rcutorture.c +++ b/tests/rcutorture.c @@ -123,7 +123,7 @@ static void *rcu_read_perf_test(void *arg) rcu_register_thread(); *(struct rcu_reader_data **)arg = &rcu_reader; - atomic_inc(&nthreadsrunning); + qatomic_inc(&nthreadsrunning); while (goflag == GOFLAG_INIT) { g_usleep(1000); } @@ -149,7 +149,7 @@ static void *rcu_update_perf_test(void *arg) rcu_register_thread(); *(struct rcu_reader_data **)arg = &rcu_reader; - atomic_inc(&nthreadsrunning); + qatomic_inc(&nthreadsrunning); while (goflag == GOFLAG_INIT) { g_usleep(1000); } @@ -172,7 +172,7 @@ static void perftestinit(void) static void perftestrun(int nthreads, int duration, int nreaders, int nupdaters) { - while (atomic_read(&nthreadsrunning) < nthreads) { + while (qatomic_read(&nthreadsrunning) < nthreads) { g_usleep(1000); } goflag = GOFLAG_RUN; @@ -259,8 +259,8 @@ static void *rcu_read_stress_test(void *arg) } while (goflag == GOFLAG_RUN) { rcu_read_lock(); - p = atomic_rcu_read(&rcu_stress_current); - if (atomic_read(&p->mbtest) == 0) { + p = qatomic_rcu_read(&rcu_stress_current); + if (qatomic_read(&p->mbtest) == 0) { n_mberror++; } rcu_read_lock(); @@ -268,7 +268,7 @@ static void *rcu_read_stress_test(void *arg) garbage++; } rcu_read_unlock(); - pc = atomic_read(&p->age); + pc = qatomic_read(&p->age); rcu_read_unlock(); if ((pc > RCU_STRESS_PIPE_LEN) || (pc < 0)) { pc = RCU_STRESS_PIPE_LEN; @@ -301,7 +301,7 @@ static void *rcu_read_stress_test(void *arg) static void *rcu_update_stress_test(void *arg) { int i, rcu_stress_idx = 0; - struct rcu_stress *cp = atomic_read(&rcu_stress_current); + struct rcu_stress *cp = qatomic_read(&rcu_stress_current); rcu_register_thread(); *(struct rcu_reader_data **)arg = &rcu_reader; @@ -319,11 +319,11 @@ static void *rcu_update_stress_test(void *arg) p = &rcu_stress_array[rcu_stress_idx]; /* catching up with ourselves would be a bug */ assert(p != cp); - atomic_set(&p->mbtest, 0); + qatomic_set(&p->mbtest, 0); smp_mb(); - atomic_set(&p->age, 0); - atomic_set(&p->mbtest, 1); - atomic_rcu_set(&rcu_stress_current, p); + qatomic_set(&p->age, 0); + qatomic_set(&p->mbtest, 1); + qatomic_rcu_set(&rcu_stress_current, p); cp = p; /* * New RCU structure is now live, update pipe counts on old @@ -331,7 +331,7 @@ static void *rcu_update_stress_test(void *arg) */ for (i = 0; i < RCU_STRESS_PIPE_LEN; i++) { if (i != rcu_stress_idx) { - atomic_set(&rcu_stress_array[i].age, + qatomic_set(&rcu_stress_array[i].age, rcu_stress_array[i].age + 1); } } diff --git a/tests/test-aio-multithread.c b/tests/test-aio-multithread.c index d3144be7e0..a555cc8835 100644 --- a/tests/test-aio-multithread.c +++ b/tests/test-aio-multithread.c @@ -118,16 +118,16 @@ static bool schedule_next(int n) { Coroutine *co; - co = atomic_xchg(&to_schedule[n], NULL); + co = qatomic_xchg(&to_schedule[n], NULL); if (!co) { - atomic_inc(&count_retry); + qatomic_inc(&count_retry); return false; } if (n == id) { - atomic_inc(&count_here); + qatomic_inc(&count_here); } else { - atomic_inc(&count_other); + qatomic_inc(&count_other); } aio_co_schedule(ctx[n], co); @@ -143,13 +143,13 @@ static coroutine_fn void test_multi_co_schedule_entry(void *opaque) { g_assert(to_schedule[id] == NULL); - while (!atomic_mb_read(&now_stopping)) { + while (!qatomic_mb_read(&now_stopping)) { int n; n = g_test_rand_int_range(0, NUM_CONTEXTS); schedule_next(n); - atomic_mb_set(&to_schedule[id], qemu_coroutine_self()); + qatomic_mb_set(&to_schedule[id], qemu_coroutine_self()); qemu_coroutine_yield(); g_assert(to_schedule[id] == NULL); } @@ -171,7 +171,7 @@ static void test_multi_co_schedule(int seconds) g_usleep(seconds * 1000000); - atomic_mb_set(&now_stopping, true); + qatomic_mb_set(&now_stopping, true); for (i = 0; i < NUM_CONTEXTS; i++) { ctx_run(i, finish_cb, NULL); to_schedule[i] = NULL; @@ -202,7 +202,7 @@ static CoMutex comutex; static void coroutine_fn test_multi_co_mutex_entry(void *opaque) { - while (!atomic_mb_read(&now_stopping)) { + while (!qatomic_mb_read(&now_stopping)) { qemu_co_mutex_lock(&comutex); counter++; qemu_co_mutex_unlock(&comutex); @@ -212,9 +212,9 @@ static void coroutine_fn test_multi_co_mutex_entry(void *opaque) * exits before the coroutine is woken up, causing a spurious * assertion failure. */ - atomic_inc(&atomic_counter); + qatomic_inc(&atomic_counter); } - atomic_dec(&running); + qatomic_dec(&running); } static void test_multi_co_mutex(int threads, int seconds) @@ -236,7 +236,7 @@ static void test_multi_co_mutex(int threads, int seconds) g_usleep(seconds * 1000000); - atomic_mb_set(&now_stopping, true); + qatomic_mb_set(&now_stopping, true); while (running > 0) { g_usleep(100000); } @@ -296,9 +296,9 @@ static void mcs_mutex_lock(void) nodes[id].next = -1; nodes[id].locked = 1; - prev = atomic_xchg(&mutex_head, id); + prev = qatomic_xchg(&mutex_head, id); if (prev != -1) { - atomic_set(&nodes[prev].next, id); + qatomic_set(&nodes[prev].next, id); qemu_futex_wait(&nodes[id].locked, 1); } } @@ -306,13 +306,13 @@ static void mcs_mutex_lock(void) static void mcs_mutex_unlock(void) { int next; - if (atomic_read(&nodes[id].next) == -1) { - if (atomic_read(&mutex_head) == id && - atomic_cmpxchg(&mutex_head, id, -1) == id) { + if (qatomic_read(&nodes[id].next) == -1) { + if (qatomic_read(&mutex_head) == id && + qatomic_cmpxchg(&mutex_head, id, -1) == id) { /* Last item in the list, exit. */ return; } - while (atomic_read(&nodes[id].next) == -1) { + while (qatomic_read(&nodes[id].next) == -1) { /* mcs_mutex_lock did the xchg, but has not updated * nodes[prev].next yet. */ @@ -320,20 +320,20 @@ static void mcs_mutex_unlock(void) } /* Wake up the next in line. */ - next = atomic_read(&nodes[id].next); + next = qatomic_read(&nodes[id].next); nodes[next].locked = 0; qemu_futex_wake(&nodes[next].locked, 1); } static void test_multi_fair_mutex_entry(void *opaque) { - while (!atomic_mb_read(&now_stopping)) { + while (!qatomic_mb_read(&now_stopping)) { mcs_mutex_lock(); counter++; mcs_mutex_unlock(); - atomic_inc(&atomic_counter); + qatomic_inc(&atomic_counter); } - atomic_dec(&running); + qatomic_dec(&running); } static void test_multi_fair_mutex(int threads, int seconds) @@ -355,7 +355,7 @@ static void test_multi_fair_mutex(int threads, int seconds) g_usleep(seconds * 1000000); - atomic_mb_set(&now_stopping, true); + qatomic_mb_set(&now_stopping, true); while (running > 0) { g_usleep(100000); } @@ -383,13 +383,13 @@ static QemuMutex mutex; static void test_multi_mutex_entry(void *opaque) { - while (!atomic_mb_read(&now_stopping)) { + while (!qatomic_mb_read(&now_stopping)) { qemu_mutex_lock(&mutex); counter++; qemu_mutex_unlock(&mutex); - atomic_inc(&atomic_counter); + qatomic_inc(&atomic_counter); } - atomic_dec(&running); + qatomic_dec(&running); } static void test_multi_mutex(int threads, int seconds) @@ -411,7 +411,7 @@ static void test_multi_mutex(int threads, int seconds) g_usleep(seconds * 1000000); - atomic_mb_set(&now_stopping, true); + qatomic_mb_set(&now_stopping, true); while (running > 0) { g_usleep(100000); } diff --git a/tests/test-fdmon-epoll.c b/tests/test-fdmon-epoll.c new file mode 100644 index 0000000000..11fd8a2fa9 --- /dev/null +++ b/tests/test-fdmon-epoll.c @@ -0,0 +1,73 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * fdmon-epoll tests + * + * Copyright (c) 2020 Red Hat, Inc. + */ + +#include "qemu/osdep.h" +#include "block/aio.h" +#include "qapi/error.h" +#include "qemu/main-loop.h" + +static AioContext *ctx; + +static void dummy_fd_handler(EventNotifier *notifier) +{ + event_notifier_test_and_clear(notifier); +} + +static void add_event_notifiers(EventNotifier *notifiers, size_t n) +{ + for (size_t i = 0; i < n; i++) { + event_notifier_init(¬ifiers[i], false); + aio_set_event_notifier(ctx, ¬ifiers[i], false, + dummy_fd_handler, NULL); + } +} + +static void remove_event_notifiers(EventNotifier *notifiers, size_t n) +{ + for (size_t i = 0; i < n; i++) { + aio_set_event_notifier(ctx, ¬ifiers[i], false, NULL, NULL); + event_notifier_cleanup(¬ifiers[i]); + } +} + +/* Check that fd handlers work when external clients are disabled */ +static void test_external_disabled(void) +{ + EventNotifier notifiers[100]; + + /* fdmon-epoll is only enabled when many fd handlers are registered */ + add_event_notifiers(notifiers, G_N_ELEMENTS(notifiers)); + + event_notifier_set(¬ifiers[0]); + assert(aio_poll(ctx, true)); + + aio_disable_external(ctx); + event_notifier_set(¬ifiers[0]); + assert(aio_poll(ctx, true)); + aio_enable_external(ctx); + + remove_event_notifiers(notifiers, G_N_ELEMENTS(notifiers)); +} + +int main(int argc, char **argv) +{ + /* + * This code relies on the fact that fdmon-io_uring disables itself when + * the glib main loop is in use. The main loop uses fdmon-poll and upgrades + * to fdmon-epoll when the number of fds exceeds a threshold. + */ + qemu_init_main_loop(&error_fatal); + ctx = qemu_get_aio_context(); + + while (g_main_context_iteration(NULL, false)) { + /* Do nothing */ + } + + g_test_init(&argc, &argv, NULL); + g_test_add_func("/fdmon-epoll/external-disabled", test_external_disabled); + return g_test_run(); +} diff --git a/tests/test-iov.c b/tests/test-iov.c index 458ca25099..9c415e2f1f 100644 --- a/tests/test-iov.c +++ b/tests/test-iov.c @@ -26,6 +26,12 @@ static void iov_free(struct iovec *iov, unsigned niov) g_free(iov); } +static bool iov_equals(const struct iovec *a, const struct iovec *b, + unsigned niov) +{ + return memcmp(a, b, sizeof(a[0]) * niov) == 0; +} + static void test_iov_bytes(struct iovec *iov, unsigned niov, size_t offset, size_t bytes) { @@ -335,6 +341,87 @@ static void test_discard_front(void) iov_free(iov, iov_cnt); } +static void test_discard_front_undo(void) +{ + IOVDiscardUndo undo; + struct iovec *iov; + struct iovec *iov_tmp; + struct iovec *iov_orig; + unsigned int iov_cnt; + unsigned int iov_cnt_tmp; + size_t size; + + /* Discard zero bytes */ + iov_random(&iov, &iov_cnt); + iov_orig = g_memdup(iov, sizeof(iov[0]) * iov_cnt); + iov_tmp = iov; + iov_cnt_tmp = iov_cnt; + iov_discard_front_undoable(&iov_tmp, &iov_cnt_tmp, 0, &undo); + iov_discard_undo(&undo); + assert(iov_equals(iov, iov_orig, iov_cnt)); + g_free(iov_orig); + iov_free(iov, iov_cnt); + + /* Discard more bytes than vector size */ + iov_random(&iov, &iov_cnt); + iov_orig = g_memdup(iov, sizeof(iov[0]) * iov_cnt); + iov_tmp = iov; + iov_cnt_tmp = iov_cnt; + size = iov_size(iov, iov_cnt); + iov_discard_front_undoable(&iov_tmp, &iov_cnt_tmp, size + 1, &undo); + iov_discard_undo(&undo); + assert(iov_equals(iov, iov_orig, iov_cnt)); + g_free(iov_orig); + iov_free(iov, iov_cnt); + + /* Discard entire vector */ + iov_random(&iov, &iov_cnt); + iov_orig = g_memdup(iov, sizeof(iov[0]) * iov_cnt); + iov_tmp = iov; + iov_cnt_tmp = iov_cnt; + size = iov_size(iov, iov_cnt); + iov_discard_front_undoable(&iov_tmp, &iov_cnt_tmp, size, &undo); + iov_discard_undo(&undo); + assert(iov_equals(iov, iov_orig, iov_cnt)); + g_free(iov_orig); + iov_free(iov, iov_cnt); + + /* Discard within first element */ + iov_random(&iov, &iov_cnt); + iov_orig = g_memdup(iov, sizeof(iov[0]) * iov_cnt); + iov_tmp = iov; + iov_cnt_tmp = iov_cnt; + size = g_test_rand_int_range(1, iov->iov_len); + iov_discard_front_undoable(&iov_tmp, &iov_cnt_tmp, size, &undo); + iov_discard_undo(&undo); + assert(iov_equals(iov, iov_orig, iov_cnt)); + g_free(iov_orig); + iov_free(iov, iov_cnt); + + /* Discard entire first element */ + iov_random(&iov, &iov_cnt); + iov_orig = g_memdup(iov, sizeof(iov[0]) * iov_cnt); + iov_tmp = iov; + iov_cnt_tmp = iov_cnt; + iov_discard_front_undoable(&iov_tmp, &iov_cnt_tmp, iov->iov_len, &undo); + iov_discard_undo(&undo); + assert(iov_equals(iov, iov_orig, iov_cnt)); + g_free(iov_orig); + iov_free(iov, iov_cnt); + + /* Discard within second element */ + iov_random(&iov, &iov_cnt); + iov_orig = g_memdup(iov, sizeof(iov[0]) * iov_cnt); + iov_tmp = iov; + iov_cnt_tmp = iov_cnt; + size = iov->iov_len + g_test_rand_int_range(1, iov[1].iov_len); + iov_discard_front_undoable(&iov_tmp, &iov_cnt_tmp, size, &undo); + iov_discard_undo(&undo); + assert(iov_equals(iov, iov_orig, iov_cnt)); + g_free(iov_orig); + iov_free(iov, iov_cnt); +} + static void test_discard_back(void) { struct iovec *iov; @@ -404,6 +491,82 @@ static void test_discard_back(void) iov_free(iov, iov_cnt); } +static void test_discard_back_undo(void) +{ + IOVDiscardUndo undo; + struct iovec *iov; + struct iovec *iov_orig; + unsigned int iov_cnt; + unsigned int iov_cnt_tmp; + size_t size; + + /* Discard zero bytes */ + iov_random(&iov, &iov_cnt); + iov_orig = g_memdup(iov, sizeof(iov[0]) * iov_cnt); + iov_cnt_tmp = iov_cnt; + iov_discard_back_undoable(iov, &iov_cnt_tmp, 0, &undo); + iov_discard_undo(&undo); + assert(iov_equals(iov, iov_orig, iov_cnt)); + g_free(iov_orig); + iov_free(iov, iov_cnt); + + /* Discard more bytes than vector size */ + iov_random(&iov, &iov_cnt); + iov_orig = g_memdup(iov, sizeof(iov[0]) * iov_cnt); + iov_cnt_tmp = iov_cnt; + size = iov_size(iov, iov_cnt); + iov_discard_back_undoable(iov, &iov_cnt_tmp, size + 1, &undo); + iov_discard_undo(&undo); + assert(iov_equals(iov, iov_orig, iov_cnt)); + g_free(iov_orig); + iov_free(iov, iov_cnt); + + /* Discard entire vector */ + iov_random(&iov, &iov_cnt); + iov_orig = g_memdup(iov, sizeof(iov[0]) * iov_cnt); + iov_cnt_tmp = iov_cnt; + size = iov_size(iov, iov_cnt); + iov_discard_back_undoable(iov, &iov_cnt_tmp, size, &undo); + iov_discard_undo(&undo); + assert(iov_equals(iov, iov_orig, iov_cnt)); + g_free(iov_orig); + iov_free(iov, iov_cnt); + + /* Discard within last element */ + iov_random(&iov, &iov_cnt); + iov_orig = g_memdup(iov, sizeof(iov[0]) * iov_cnt); + iov_cnt_tmp = iov_cnt; + size = g_test_rand_int_range(1, iov[iov_cnt - 1].iov_len); + iov_discard_back_undoable(iov, &iov_cnt_tmp, size, &undo); + iov_discard_undo(&undo); + assert(iov_equals(iov, iov_orig, iov_cnt)); + g_free(iov_orig); + iov_free(iov, iov_cnt); + + /* Discard entire last element */ + iov_random(&iov, &iov_cnt); + iov_orig = g_memdup(iov, sizeof(iov[0]) * iov_cnt); + iov_cnt_tmp = iov_cnt; + size = iov[iov_cnt - 1].iov_len; + iov_discard_back_undoable(iov, &iov_cnt_tmp, size, &undo); + iov_discard_undo(&undo); + assert(iov_equals(iov, iov_orig, iov_cnt)); + g_free(iov_orig); + iov_free(iov, iov_cnt); + + /* Discard within second-to-last element */ + iov_random(&iov, &iov_cnt); + iov_orig = g_memdup(iov, sizeof(iov[0]) * iov_cnt); + iov_cnt_tmp = iov_cnt; + size = iov[iov_cnt - 1].iov_len + + g_test_rand_int_range(1, iov[iov_cnt - 2].iov_len); + iov_discard_back_undoable(iov, &iov_cnt_tmp, size, &undo); + iov_discard_undo(&undo); + assert(iov_equals(iov, iov_orig, iov_cnt)); + g_free(iov_orig); + iov_free(iov, iov_cnt); +} + int main(int argc, char **argv) { g_test_init(&argc, &argv, NULL); @@ -412,5 +575,7 @@ int main(int argc, char **argv) g_test_add_func("/basic/iov/io", test_io); g_test_add_func("/basic/iov/discard-front", test_discard_front); g_test_add_func("/basic/iov/discard-back", test_discard_back); + g_test_add_func("/basic/iov/discard-front-undo", test_discard_front_undo); + g_test_add_func("/basic/iov/discard-back-undo", test_discard_back_undo); return g_test_run(); } diff --git a/tests/test-logging.c b/tests/test-logging.c index 8b1522cfed..ccb819f193 100644 --- a/tests/test-logging.c +++ b/tests/test-logging.c @@ -133,7 +133,7 @@ static void test_logfile_write(gconstpointer data) */ qemu_set_log_filename(file_path, &error_abort); rcu_read_lock(); - logfile = atomic_rcu_read(&qemu_logfile); + logfile = qatomic_rcu_read(&qemu_logfile); orig_fd = logfile->fd; g_assert(logfile && logfile->fd); fprintf(logfile->fd, "%s 1st write to file\n", __func__); @@ -141,7 +141,7 @@ static void test_logfile_write(gconstpointer data) /* Change the logfile and ensure that the handle is still valid. */ qemu_set_log_filename(file_path1, &error_abort); - logfile2 = atomic_rcu_read(&qemu_logfile); + logfile2 = qatomic_rcu_read(&qemu_logfile); g_assert(logfile->fd == orig_fd); g_assert(logfile2->fd != logfile->fd); fprintf(logfile->fd, "%s 2nd write to file\n", __func__); diff --git a/tests/test-rcu-list.c b/tests/test-rcu-list.c index 92be51ec50..49641e1936 100644 --- a/tests/test-rcu-list.c +++ b/tests/test-rcu-list.c @@ -106,7 +106,7 @@ static void reclaim_list_el(struct rcu_head *prcu) struct list_element *el = container_of(prcu, struct list_element, rcu); g_free(el); /* Accessed only from call_rcu thread. */ - atomic_set_i64(&n_reclaims, n_reclaims + 1); + qatomic_set_i64(&n_reclaims, n_reclaims + 1); } #if TEST_LIST_TYPE == 1 @@ -172,16 +172,16 @@ static void *rcu_q_reader(void *arg) rcu_register_thread(); *(struct rcu_reader_data **)arg = &rcu_reader; - atomic_inc(&nthreadsrunning); - while (atomic_read(&goflag) == GOFLAG_INIT) { + qatomic_inc(&nthreadsrunning); + while (qatomic_read(&goflag) == GOFLAG_INIT) { g_usleep(1000); } - while (atomic_read(&goflag) == GOFLAG_RUN) { + while (qatomic_read(&goflag) == GOFLAG_RUN) { rcu_read_lock(); TEST_LIST_FOREACH_RCU(el, &Q_list_head, entry) { n_reads_local++; - if (atomic_read(&goflag) == GOFLAG_STOP) { + if (qatomic_read(&goflag) == GOFLAG_STOP) { break; } } @@ -207,12 +207,12 @@ static void *rcu_q_updater(void *arg) struct list_element *el, *prev_el; *(struct rcu_reader_data **)arg = &rcu_reader; - atomic_inc(&nthreadsrunning); - while (atomic_read(&goflag) == GOFLAG_INIT) { + qatomic_inc(&nthreadsrunning); + while (qatomic_read(&goflag) == GOFLAG_INIT) { g_usleep(1000); } - while (atomic_read(&goflag) == GOFLAG_RUN) { + while (qatomic_read(&goflag) == GOFLAG_RUN) { target_el = select_random_el(RCU_Q_LEN); j = 0; /* FOREACH_RCU could work here but let's use both macros */ @@ -226,7 +226,7 @@ static void *rcu_q_updater(void *arg) break; } } - if (atomic_read(&goflag) == GOFLAG_STOP) { + if (qatomic_read(&goflag) == GOFLAG_STOP) { break; } target_el = select_random_el(RCU_Q_LEN); @@ -248,7 +248,7 @@ static void *rcu_q_updater(void *arg) qemu_mutex_lock(&counts_mutex); n_nodes += n_nodes_local; n_updates += n_updates_local; - atomic_set_i64(&n_nodes_removed, n_nodes_removed + n_removed_local); + qatomic_set_i64(&n_nodes_removed, n_nodes_removed + n_removed_local); qemu_mutex_unlock(&counts_mutex); return NULL; } @@ -271,13 +271,13 @@ static void rcu_qtest_init(void) static void rcu_qtest_run(int duration, int nreaders) { int nthreads = nreaders + 1; - while (atomic_read(&nthreadsrunning) < nthreads) { + while (qatomic_read(&nthreadsrunning) < nthreads) { g_usleep(1000); } - atomic_set(&goflag, GOFLAG_RUN); + qatomic_set(&goflag, GOFLAG_RUN); sleep(duration); - atomic_set(&goflag, GOFLAG_STOP); + qatomic_set(&goflag, GOFLAG_STOP); wait_all_threads(); } @@ -302,21 +302,23 @@ static void rcu_qtest(const char *test, int duration, int nreaders) n_removed_local++; } qemu_mutex_lock(&counts_mutex); - atomic_set_i64(&n_nodes_removed, n_nodes_removed + n_removed_local); + qatomic_set_i64(&n_nodes_removed, n_nodes_removed + n_removed_local); qemu_mutex_unlock(&counts_mutex); synchronize_rcu(); - while (atomic_read_i64(&n_nodes_removed) > atomic_read_i64(&n_reclaims)) { + while (qatomic_read_i64(&n_nodes_removed) > + qatomic_read_i64(&n_reclaims)) { g_usleep(100); synchronize_rcu(); } if (g_test_in_charge) { - g_assert_cmpint(atomic_read_i64(&n_nodes_removed), ==, - atomic_read_i64(&n_reclaims)); + g_assert_cmpint(qatomic_read_i64(&n_nodes_removed), ==, + qatomic_read_i64(&n_reclaims)); } else { printf("%s: %d readers; 1 updater; nodes read: " \ "%lld, nodes removed: %"PRIi64"; nodes reclaimed: %"PRIi64"\n", test, nthreadsrunning - 1, n_reads, - atomic_read_i64(&n_nodes_removed), atomic_read_i64(&n_reclaims)); + qatomic_read_i64(&n_nodes_removed), + qatomic_read_i64(&n_reclaims)); exit(0); } } diff --git a/tests/test-thread-pool.c b/tests/test-thread-pool.c index 0b675923f6..70dc6314a1 100644 --- a/tests/test-thread-pool.c +++ b/tests/test-thread-pool.c @@ -21,15 +21,15 @@ typedef struct { static int worker_cb(void *opaque) { WorkerTestData *data = opaque; - return atomic_fetch_inc(&data->n); + return qatomic_fetch_inc(&data->n); } static int long_cb(void *opaque) { WorkerTestData *data = opaque; - if (atomic_cmpxchg(&data->n, 0, 1) == 0) { + if (qatomic_cmpxchg(&data->n, 0, 1) == 0) { g_usleep(2000000); - atomic_or(&data->n, 2); + qatomic_or(&data->n, 2); } return 0; } @@ -172,7 +172,7 @@ static void do_test_cancel(bool sync) /* Cancel the jobs that haven't been started yet. */ num_canceled = 0; for (i = 0; i < 100; i++) { - if (atomic_cmpxchg(&data[i].n, 0, 4) == 0) { + if (qatomic_cmpxchg(&data[i].n, 0, 4) == 0) { data[i].ret = -ECANCELED; if (sync) { bdrv_aio_cancel(data[i].aiocb); @@ -186,7 +186,7 @@ static void do_test_cancel(bool sync) g_assert_cmpint(num_canceled, <, 100); for (i = 0; i < 100; i++) { - if (data[i].aiocb && atomic_read(&data[i].n) < 4) { + if (data[i].aiocb && qatomic_read(&data[i].n) < 4) { if (sync) { /* Canceling the others will be a blocking operation. */ bdrv_aio_cancel(data[i].aiocb); |