diff options
author | Dr. David Alan Gilbert <dgilbert@redhat.com> | 2018-03-12 17:21:07 +0000 |
---|---|---|
committer | Michael S. Tsirkin <mst@redhat.com> | 2018-03-20 05:03:28 +0200 |
commit | 55d754b307f6e10503dcf2e2136e6e0aef8c80f5 (patch) | |
tree | e5cf51b84a2fdc9c1ae8f1d465f1da7a2ae99d36 /contrib/libvhost-user | |
parent | 6864a7b5aced6d8d9b287b92db8d7a996ea2e8a3 (diff) |
postcopy+vhost-user: Split set_mem_table for postcopy
Split the set_mem_table routines in both qemu and libvhost-user
because the postcopy versions are going to be quite different
once changes in the later patches are added. However, this patch
doesn't produce any functional change, just the split.
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Diffstat (limited to 'contrib/libvhost-user')
-rw-r--r-- | contrib/libvhost-user/libvhost-user.c | 53 |
1 files changed, 53 insertions, 0 deletions
diff --git a/contrib/libvhost-user/libvhost-user.c b/contrib/libvhost-user/libvhost-user.c index e53b1953df..b2de8ed0a8 100644 --- a/contrib/libvhost-user/libvhost-user.c +++ b/contrib/libvhost-user/libvhost-user.c @@ -449,6 +449,55 @@ vu_reset_device_exec(VuDev *dev, VhostUserMsg *vmsg) } static bool +vu_set_mem_table_exec_postcopy(VuDev *dev, VhostUserMsg *vmsg) +{ + int i; + VhostUserMemory *memory = &vmsg->payload.memory; + dev->nregions = memory->nregions; + /* TODO: Postcopy specific code */ + DPRINT("Nregions: %d\n", memory->nregions); + for (i = 0; i < dev->nregions; i++) { + void *mmap_addr; + VhostUserMemoryRegion *msg_region = &memory->regions[i]; + VuDevRegion *dev_region = &dev->regions[i]; + + DPRINT("Region %d\n", i); + DPRINT(" guest_phys_addr: 0x%016"PRIx64"\n", + msg_region->guest_phys_addr); + DPRINT(" memory_size: 0x%016"PRIx64"\n", + msg_region->memory_size); + DPRINT(" userspace_addr 0x%016"PRIx64"\n", + msg_region->userspace_addr); + DPRINT(" mmap_offset 0x%016"PRIx64"\n", + msg_region->mmap_offset); + + dev_region->gpa = msg_region->guest_phys_addr; + dev_region->size = msg_region->memory_size; + dev_region->qva = msg_region->userspace_addr; + dev_region->mmap_offset = msg_region->mmap_offset; + + /* We don't use offset argument of mmap() since the + * mapped address has to be page aligned, and we use huge + * pages. */ + mmap_addr = mmap(0, dev_region->size + dev_region->mmap_offset, + PROT_READ | PROT_WRITE, MAP_SHARED, + vmsg->fds[i], 0); + + if (mmap_addr == MAP_FAILED) { + vu_panic(dev, "region mmap error: %s", strerror(errno)); + } else { + dev_region->mmap_addr = (uint64_t)(uintptr_t)mmap_addr; + DPRINT(" mmap_addr: 0x%016"PRIx64"\n", + dev_region->mmap_addr); + } + + close(vmsg->fds[i]); + } + + return false; +} + +static bool vu_set_mem_table_exec(VuDev *dev, VhostUserMsg *vmsg) { int i; @@ -464,6 +513,10 @@ vu_set_mem_table_exec(VuDev *dev, VhostUserMsg *vmsg) } dev->nregions = memory->nregions; + if (dev->postcopy_listening) { + return vu_set_mem_table_exec_postcopy(dev, vmsg); + } + DPRINT("Nregions: %d\n", memory->nregions); for (i = 0; i < dev->nregions; i++) { void *mmap_addr; |