aboutsummaryrefslogtreecommitdiff
path: root/contrib/libvhost-user/libvhost-user.c
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/libvhost-user/libvhost-user.c')
-rw-r--r--contrib/libvhost-user/libvhost-user.c103
1 files changed, 103 insertions, 0 deletions
diff --git a/contrib/libvhost-user/libvhost-user.c b/contrib/libvhost-user/libvhost-user.c
index 9f039b707e..d8ee7a23a3 100644
--- a/contrib/libvhost-user/libvhost-user.c
+++ b/contrib/libvhost-user/libvhost-user.c
@@ -138,6 +138,7 @@ vu_request_to_string(unsigned int req)
REQ(VHOST_USER_GPU_SET_SOCKET),
REQ(VHOST_USER_VRING_KICK),
REQ(VHOST_USER_GET_MAX_MEM_SLOTS),
+ REQ(VHOST_USER_ADD_MEM_REG),
REQ(VHOST_USER_MAX),
};
#undef REQ
@@ -663,6 +664,106 @@ generate_faults(VuDev *dev) {
}
static bool
+vu_add_mem_reg(VuDev *dev, VhostUserMsg *vmsg) {
+ int i;
+ bool track_ramblocks = dev->postcopy_listening;
+ VhostUserMemoryRegion m = vmsg->payload.memreg.region, *msg_region = &m;
+ VuDevRegion *dev_region = &dev->regions[dev->nregions];
+ void *mmap_addr;
+
+ /*
+ * If we are in postcopy mode and we receive a u64 payload with a 0 value
+ * we know all the postcopy client bases have been recieved, and we
+ * should start generating faults.
+ */
+ if (track_ramblocks &&
+ vmsg->size == sizeof(vmsg->payload.u64) &&
+ vmsg->payload.u64 == 0) {
+ (void)generate_faults(dev);
+ return false;
+ }
+
+ DPRINT("Adding region: %d\n", dev->nregions);
+ DPRINT(" guest_phys_addr: 0x%016"PRIx64"\n",
+ msg_region->guest_phys_addr);
+ DPRINT(" memory_size: 0x%016"PRIx64"\n",
+ msg_region->memory_size);
+ DPRINT(" userspace_addr 0x%016"PRIx64"\n",
+ msg_region->userspace_addr);
+ DPRINT(" mmap_offset 0x%016"PRIx64"\n",
+ msg_region->mmap_offset);
+
+ dev_region->gpa = msg_region->guest_phys_addr;
+ dev_region->size = msg_region->memory_size;
+ dev_region->qva = msg_region->userspace_addr;
+ dev_region->mmap_offset = msg_region->mmap_offset;
+
+ /*
+ * We don't use offset argument of mmap() since the
+ * mapped address has to be page aligned, and we use huge
+ * pages.
+ */
+ if (track_ramblocks) {
+ /*
+ * In postcopy we're using PROT_NONE here to catch anyone
+ * accessing it before we userfault.
+ */
+ mmap_addr = mmap(0, dev_region->size + dev_region->mmap_offset,
+ PROT_NONE, MAP_SHARED,
+ vmsg->fds[0], 0);
+ } else {
+ mmap_addr = mmap(0, dev_region->size + dev_region->mmap_offset,
+ PROT_READ | PROT_WRITE, MAP_SHARED, vmsg->fds[0],
+ 0);
+ }
+
+ if (mmap_addr == MAP_FAILED) {
+ vu_panic(dev, "region mmap error: %s", strerror(errno));
+ } else {
+ dev_region->mmap_addr = (uint64_t)(uintptr_t)mmap_addr;
+ DPRINT(" mmap_addr: 0x%016"PRIx64"\n",
+ dev_region->mmap_addr);
+ }
+
+ close(vmsg->fds[0]);
+
+ if (track_ramblocks) {
+ /*
+ * Return the address to QEMU so that it can translate the ufd
+ * fault addresses back.
+ */
+ msg_region->userspace_addr = (uintptr_t)(mmap_addr +
+ dev_region->mmap_offset);
+
+ /* Send the message back to qemu with the addresses filled in. */
+ vmsg->fd_num = 0;
+ if (!vu_send_reply(dev, dev->sock, vmsg)) {
+ vu_panic(dev, "failed to respond to add-mem-region for postcopy");
+ return false;
+ }
+
+ DPRINT("Successfully added new region in postcopy\n");
+ dev->nregions++;
+ return false;
+
+ } else {
+ for (i = 0; i < dev->max_queues; i++) {
+ if (dev->vq[i].vring.desc) {
+ if (map_ring(dev, &dev->vq[i])) {
+ vu_panic(dev, "remapping queue %d for new memory region",
+ i);
+ }
+ }
+ }
+
+ DPRINT("Successfully added new region\n");
+ dev->nregions++;
+ vmsg_set_reply_u64(vmsg, 0);
+ return true;
+ }
+}
+
+static bool
vu_set_mem_table_exec_postcopy(VuDev *dev, VhostUserMsg *vmsg)
{
int i;
@@ -1668,6 +1769,8 @@ vu_process_message(VuDev *dev, VhostUserMsg *vmsg)
return vu_handle_vring_kick(dev, vmsg);
case VHOST_USER_GET_MAX_MEM_SLOTS:
return vu_handle_get_max_memslots(dev, vmsg);
+ case VHOST_USER_ADD_MEM_REG:
+ return vu_add_mem_reg(dev, vmsg);
default:
vmsg_close_fds(vmsg);
vu_panic(dev, "Unhandled request: %d", vmsg->request);