aboutsummaryrefslogtreecommitdiff
path: root/include/hw/virtio/vhost-vdpa.h
blob: 11ac14085a6edc5c51f1598992bad2892df37957 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
/*
 * vhost-vdpa.h
 *
 * Copyright(c) 2017-2018 Intel Corporation.
 * Copyright(c) 2020 Red Hat, Inc.
 *
 * This work is licensed under the terms of the GNU GPL, version 2 or later.
 * See the COPYING file in the top-level directory.
 *
 */

#ifndef HW_VIRTIO_VHOST_VDPA_H
#define HW_VIRTIO_VHOST_VDPA_H

#include <gmodule.h>

#include "hw/virtio/vhost-iova-tree.h"
#include "hw/virtio/vhost-shadow-virtqueue.h"
#include "hw/virtio/virtio.h"
#include "standard-headers/linux/vhost_types.h"

/*
 * ASID dedicated to map guest's addresses.  If SVQ is disabled it maps GPA to
 * qemu's IOVA.  If SVQ is enabled it maps also the SVQ vring here
 */
#define VHOST_VDPA_GUEST_PA_ASID 0

typedef struct VhostVDPAHostNotifier {
    MemoryRegion mr;
    void *addr;
} VhostVDPAHostNotifier;

/* Info shared by all vhost_vdpa device models */
typedef struct vhost_vdpa_shared {
    int device_fd;
    struct vhost_vdpa_iova_range iova_range;

    /* IOVA mapping used by the Shadow Virtqueue */
    VhostIOVATree *iova_tree;

    /* Copy of backend features */
    uint64_t backend_cap;

    bool iotlb_batch_begin_sent;

    /* Vdpa must send shadow addresses as IOTLB key for data queues, not GPA */
    bool shadow_data;
} VhostVDPAShared;

typedef struct vhost_vdpa {
    int index;
    uint32_t msg_type;
    uint32_t address_space_id;
    MemoryListener listener;
    uint64_t acked_features;
    bool shadow_vqs_enabled;
    /* Device suspended successfully */
    bool suspended;
    VhostVDPAShared *shared;
    GPtrArray *shadow_vqs;
    const VhostShadowVirtqueueOps *shadow_vq_ops;
    void *shadow_vq_ops_opaque;
    struct vhost_dev *dev;
    Error *migration_blocker;
    VhostVDPAHostNotifier notifier[VIRTIO_QUEUE_MAX];
    QLIST_HEAD(, vdpa_iommu) iommu_list;
    IOMMUNotifier n;
} VhostVDPA;

int vhost_vdpa_get_iova_range(int fd, struct vhost_vdpa_iova_range *iova_range);
int vhost_vdpa_set_vring_ready(struct vhost_vdpa *v, unsigned idx);

int vhost_vdpa_dma_map(struct vhost_vdpa *v, uint32_t asid, hwaddr iova,
                       hwaddr size, void *vaddr, bool readonly);
int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, uint32_t asid, hwaddr iova,
                         hwaddr size);

typedef struct vdpa_iommu {
    struct vhost_vdpa *dev;
    IOMMUMemoryRegion *iommu_mr;
    hwaddr iommu_offset;
    IOMMUNotifier n;
    QLIST_ENTRY(vdpa_iommu) iommu_next;
} VDPAIOMMUState;


#endif