aboutsummaryrefslogtreecommitdiff
path: root/migration/ram.c
diff options
context:
space:
mode:
authorJuan Quintela <quintela@redhat.com>2016-01-14 16:52:55 +0100
committerJuan Quintela <quintela@redhat.com>2017-09-22 14:11:22 +0200
commitf986c3d2569c6aa5cd03a6b8bbb5371e4b608d3c (patch)
tree1580e728ca8e0636f89e14f993c6319b29306bce /migration/ram.c
parent0fb86605eac50d488b1a8d4a9d6986defc3adca9 (diff)
migration: Create multifd migration threads
Creation of the threads, nothing inside yet. Signed-off-by: Juan Quintela <quintela@redhat.com> Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com> -- Use pointers instead of long array names Move to use semaphores instead of conditions as paolo suggestion Put all the state inside one struct. Use a counter for the number of threads created. Needed during cancellation. Add error return to thread creation Add id field Rename functions to multifd_save/load_setup/cleanup Change recv parameters to a pointer to struct Change back to a struct Use Error * for _cleanup
Diffstat (limited to 'migration/ram.c')
-rw-r--r--migration/ram.c202
1 files changed, 202 insertions, 0 deletions
diff --git a/migration/ram.c b/migration/ram.c
index e18b3e2d4f..bb369e72d4 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -356,6 +356,208 @@ static void compress_threads_save_setup(void)
}
}
+/* Multiple fd's */
+
+struct MultiFDSendParams {
+ uint8_t id;
+ char *name;
+ QemuThread thread;
+ QemuSemaphore sem;
+ QemuMutex mutex;
+ bool quit;
+};
+typedef struct MultiFDSendParams MultiFDSendParams;
+
+struct {
+ MultiFDSendParams *params;
+ /* number of created threads */
+ int count;
+} *multifd_send_state;
+
+static void terminate_multifd_send_threads(Error *errp)
+{
+ int i;
+
+ for (i = 0; i < multifd_send_state->count; i++) {
+ MultiFDSendParams *p = &multifd_send_state->params[i];
+
+ qemu_mutex_lock(&p->mutex);
+ p->quit = true;
+ qemu_sem_post(&p->sem);
+ qemu_mutex_unlock(&p->mutex);
+ }
+}
+
+int multifd_save_cleanup(Error **errp)
+{
+ int i;
+ int ret = 0;
+
+ if (!migrate_use_multifd()) {
+ return 0;
+ }
+ terminate_multifd_send_threads(NULL);
+ for (i = 0; i < multifd_send_state->count; i++) {
+ MultiFDSendParams *p = &multifd_send_state->params[i];
+
+ qemu_thread_join(&p->thread);
+ qemu_mutex_destroy(&p->mutex);
+ qemu_sem_destroy(&p->sem);
+ g_free(p->name);
+ p->name = NULL;
+ }
+ g_free(multifd_send_state->params);
+ multifd_send_state->params = NULL;
+ g_free(multifd_send_state);
+ multifd_send_state = NULL;
+ return ret;
+}
+
+static void *multifd_send_thread(void *opaque)
+{
+ MultiFDSendParams *p = opaque;
+
+ while (true) {
+ qemu_mutex_lock(&p->mutex);
+ if (p->quit) {
+ qemu_mutex_unlock(&p->mutex);
+ break;
+ }
+ qemu_mutex_unlock(&p->mutex);
+ qemu_sem_wait(&p->sem);
+ }
+
+ return NULL;
+}
+
+int multifd_save_setup(void)
+{
+ int thread_count;
+ uint8_t i;
+
+ if (!migrate_use_multifd()) {
+ return 0;
+ }
+ thread_count = migrate_multifd_channels();
+ multifd_send_state = g_malloc0(sizeof(*multifd_send_state));
+ multifd_send_state->params = g_new0(MultiFDSendParams, thread_count);
+ multifd_send_state->count = 0;
+ for (i = 0; i < thread_count; i++) {
+ MultiFDSendParams *p = &multifd_send_state->params[i];
+
+ qemu_mutex_init(&p->mutex);
+ qemu_sem_init(&p->sem, 0);
+ p->quit = false;
+ p->id = i;
+ p->name = g_strdup_printf("multifdsend_%d", i);
+ qemu_thread_create(&p->thread, p->name, multifd_send_thread, p,
+ QEMU_THREAD_JOINABLE);
+
+ multifd_send_state->count++;
+ }
+ return 0;
+}
+
+struct MultiFDRecvParams {
+ uint8_t id;
+ char *name;
+ QemuThread thread;
+ QemuSemaphore sem;
+ QemuMutex mutex;
+ bool quit;
+};
+typedef struct MultiFDRecvParams MultiFDRecvParams;
+
+struct {
+ MultiFDRecvParams *params;
+ /* number of created threads */
+ int count;
+} *multifd_recv_state;
+
+static void terminate_multifd_recv_threads(Error *errp)
+{
+ int i;
+
+ for (i = 0; i < multifd_recv_state->count; i++) {
+ MultiFDRecvParams *p = &multifd_recv_state->params[i];
+
+ qemu_mutex_lock(&p->mutex);
+ p->quit = true;
+ qemu_sem_post(&p->sem);
+ qemu_mutex_unlock(&p->mutex);
+ }
+}
+
+int multifd_load_cleanup(Error **errp)
+{
+ int i;
+ int ret = 0;
+
+ if (!migrate_use_multifd()) {
+ return 0;
+ }
+ terminate_multifd_recv_threads(NULL);
+ for (i = 0; i < multifd_recv_state->count; i++) {
+ MultiFDRecvParams *p = &multifd_recv_state->params[i];
+
+ qemu_thread_join(&p->thread);
+ qemu_mutex_destroy(&p->mutex);
+ qemu_sem_destroy(&p->sem);
+ g_free(p->name);
+ p->name = NULL;
+ }
+ g_free(multifd_recv_state->params);
+ multifd_recv_state->params = NULL;
+ g_free(multifd_recv_state);
+ multifd_recv_state = NULL;
+
+ return ret;
+}
+
+static void *multifd_recv_thread(void *opaque)
+{
+ MultiFDRecvParams *p = opaque;
+
+ while (true) {
+ qemu_mutex_lock(&p->mutex);
+ if (p->quit) {
+ qemu_mutex_unlock(&p->mutex);
+ break;
+ }
+ qemu_mutex_unlock(&p->mutex);
+ qemu_sem_wait(&p->sem);
+ }
+
+ return NULL;
+}
+
+int multifd_load_setup(void)
+{
+ int thread_count;
+ uint8_t i;
+
+ if (!migrate_use_multifd()) {
+ return 0;
+ }
+ thread_count = migrate_multifd_channels();
+ multifd_recv_state = g_malloc0(sizeof(*multifd_recv_state));
+ multifd_recv_state->params = g_new0(MultiFDRecvParams, thread_count);
+ multifd_recv_state->count = 0;
+ for (i = 0; i < thread_count; i++) {
+ MultiFDRecvParams *p = &multifd_recv_state->params[i];
+
+ qemu_mutex_init(&p->mutex);
+ qemu_sem_init(&p->sem, 0);
+ p->quit = false;
+ p->id = i;
+ p->name = g_strdup_printf("multifdrecv_%d", i);
+ qemu_thread_create(&p->thread, p->name, multifd_recv_thread, p,
+ QEMU_THREAD_JOINABLE);
+ multifd_recv_state->count++;
+ }
+ return 0;
+}
+
/**
* save_page_header: write page header to wire
*