aboutsummaryrefslogtreecommitdiff
path: root/migration/ram.c
diff options
context:
space:
mode:
authorXiao Guangrong <xiaoguangrong@tencent.com>2018-08-21 16:10:20 +0800
committerJuan Quintela <quintela@redhat.com>2018-08-22 12:34:11 +0200
commit1d58872a910cdfe8df79be5ffb4c4c6c13768f40 (patch)
treee9512fe2fb45ad8555dcda61f2ee40314889e790 /migration/ram.c
parent923709896b1b01fb982c93492ad01b233e6b6023 (diff)
migration: do not wait for free thread
Instead of putting the main thread to sleep state to wait for free compression thread, we can directly post it out as normal page that reduces the latency and uses CPUs more efficiently A parameter, compress-wait-thread, is introduced, it can be enabled if the user really wants the old behavior Reviewed-by: Peter Xu <peterx@redhat.com> Signed-off-by: Xiao Guangrong <xiaoguangrong@tencent.com> Reviewed-by: Juan Quintela <quintela@redhat.com> Signed-off-by: Juan Quintela <quintela@redhat.com>
Diffstat (limited to 'migration/ram.c')
-rw-r--r--migration/ram.c45
1 files changed, 26 insertions, 19 deletions
diff --git a/migration/ram.c b/migration/ram.c
index 2f9e8bd7e0..498834bdbe 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -1896,30 +1896,34 @@ static int compress_page_with_multi_thread(RAMState *rs, RAMBlock *block,
ram_addr_t offset)
{
int idx, thread_count, bytes_xmit = -1, pages = -1;
+ bool wait = migrate_compress_wait_thread();
thread_count = migrate_compress_threads();
qemu_mutex_lock(&comp_done_lock);
- while (true) {
- for (idx = 0; idx < thread_count; idx++) {
- if (comp_param[idx].done) {
- comp_param[idx].done = false;
- bytes_xmit = qemu_put_qemu_file(rs->f, comp_param[idx].file);
- qemu_mutex_lock(&comp_param[idx].mutex);
- set_compress_params(&comp_param[idx], block, offset);
- qemu_cond_signal(&comp_param[idx].cond);
- qemu_mutex_unlock(&comp_param[idx].mutex);
- pages = 1;
- ram_counters.normal++;
- ram_counters.transferred += bytes_xmit;
- break;
- }
- }
- if (pages > 0) {
+retry:
+ for (idx = 0; idx < thread_count; idx++) {
+ if (comp_param[idx].done) {
+ comp_param[idx].done = false;
+ bytes_xmit = qemu_put_qemu_file(rs->f, comp_param[idx].file);
+ qemu_mutex_lock(&comp_param[idx].mutex);
+ set_compress_params(&comp_param[idx], block, offset);
+ qemu_cond_signal(&comp_param[idx].cond);
+ qemu_mutex_unlock(&comp_param[idx].mutex);
+ pages = 1;
+ ram_counters.normal++;
+ ram_counters.transferred += bytes_xmit;
break;
- } else {
- qemu_cond_wait(&comp_done_cond, &comp_done_lock);
}
}
+
+ /*
+ * wait for the free thread if the user specifies 'compress-wait-thread',
+ * otherwise we will post the page out in the main thread as normal page.
+ */
+ if (pages < 0 && wait) {
+ qemu_cond_wait(&comp_done_cond, &comp_done_lock);
+ goto retry;
+ }
qemu_mutex_unlock(&comp_done_lock);
return pages;
@@ -2233,7 +2237,10 @@ static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss,
* CPU resource.
*/
if (block == rs->last_sent_block && save_page_use_compression(rs)) {
- return compress_page_with_multi_thread(rs, block, offset);
+ res = compress_page_with_multi_thread(rs, block, offset);
+ if (res > 0) {
+ return res;
+ }
} else if (migrate_use_multifd()) {
return ram_save_multifd_page(rs, block, offset);
}