1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
|
/*
* Virtio GPU Device
*
* Copyright Red Hat, Inc. 2013-2014
*
* Authors:
* Dave Airlie <airlied@redhat.com>
* Gerd Hoffmann <kraxel@redhat.com>
*
* This work is licensed under the terms of the GNU GPL, version 2.
* See the COPYING file in the top-level directory.
*/
#ifndef HW_VIRTIO_GPU_H
#define HW_VIRTIO_GPU_H
#include "qemu/queue.h"
#include "ui/qemu-pixman.h"
#include "ui/console.h"
#include "hw/virtio/virtio.h"
#include "qemu/log.h"
#include "sysemu/vhost-user-backend.h"
#include "standard-headers/linux/virtio_gpu.h"
#include "standard-headers/linux/virtio_ids.h"
#include "qom/object.h"
#define TYPE_VIRTIO_GPU_BASE "virtio-gpu-base"
OBJECT_DECLARE_TYPE(VirtIOGPUBase, VirtIOGPUBaseClass,
VIRTIO_GPU_BASE)
#define TYPE_VIRTIO_GPU "virtio-gpu-device"
OBJECT_DECLARE_TYPE(VirtIOGPU, VirtIOGPUClass, VIRTIO_GPU)
#define TYPE_VIRTIO_GPU_GL "virtio-gpu-gl-device"
OBJECT_DECLARE_SIMPLE_TYPE(VirtIOGPUGL, VIRTIO_GPU_GL)
#define TYPE_VHOST_USER_GPU "vhost-user-gpu"
OBJECT_DECLARE_SIMPLE_TYPE(VhostUserGPU, VHOST_USER_GPU)
#define TYPE_VIRTIO_GPU_RUTABAGA "virtio-gpu-rutabaga-device"
OBJECT_DECLARE_SIMPLE_TYPE(VirtIOGPURutabaga, VIRTIO_GPU_RUTABAGA)
struct virtio_gpu_simple_resource {
uint32_t resource_id;
uint32_t width;
uint32_t height;
uint32_t format;
uint64_t *addrs;
struct iovec *iov;
unsigned int iov_cnt;
uint32_t scanout_bitmask;
pixman_image_t *image;
qemu_pixman_shareable share_handle;
uint64_t hostmem;
uint64_t blob_size;
void *blob;
int dmabuf_fd;
uint8_t *remapped;
QTAILQ_ENTRY(virtio_gpu_simple_resource) next;
};
struct virtio_gpu_framebuffer {
pixman_format_code_t format;
uint32_t bytes_pp;
uint32_t width, height;
uint32_t stride;
uint32_t offset;
};
struct virtio_gpu_scanout {
QemuConsole *con;
DisplaySurface *ds;
uint32_t width, height;
int x, y;
int invalidate;
uint32_t resource_id;
struct virtio_gpu_update_cursor cursor;
QEMUCursor *current_cursor;
struct virtio_gpu_framebuffer fb;
};
struct virtio_gpu_requested_state {
uint16_t width_mm, height_mm;
uint32_t width, height;
uint32_t refresh_rate;
int x, y;
};
enum virtio_gpu_base_conf_flags {
VIRTIO_GPU_FLAG_VIRGL_ENABLED = 1,
VIRTIO_GPU_FLAG_STATS_ENABLED,
VIRTIO_GPU_FLAG_EDID_ENABLED,
VIRTIO_GPU_FLAG_DMABUF_ENABLED,
VIRTIO_GPU_FLAG_BLOB_ENABLED,
VIRTIO_GPU_FLAG_CONTEXT_INIT_ENABLED,
VIRTIO_GPU_FLAG_RUTABAGA_ENABLED,
VIRTIO_GPU_FLAG_VENUS_ENABLED,
};
#define virtio_gpu_virgl_enabled(_cfg) \
(_cfg.flags & (1 << VIRTIO_GPU_FLAG_VIRGL_ENABLED))
#define virtio_gpu_stats_enabled(_cfg) \
(_cfg.flags & (1 << VIRTIO_GPU_FLAG_STATS_ENABLED))
#define virtio_gpu_edid_enabled(_cfg) \
(_cfg.flags & (1 << VIRTIO_GPU_FLAG_EDID_ENABLED))
#define virtio_gpu_dmabuf_enabled(_cfg) \
(_cfg.flags & (1 << VIRTIO_GPU_FLAG_DMABUF_ENABLED))
#define virtio_gpu_blob_enabled(_cfg) \
(_cfg.flags & (1 << VIRTIO_GPU_FLAG_BLOB_ENABLED))
#define virtio_gpu_context_init_enabled(_cfg) \
(_cfg.flags & (1 << VIRTIO_GPU_FLAG_CONTEXT_INIT_ENABLED))
#define virtio_gpu_rutabaga_enabled(_cfg) \
(_cfg.flags & (1 << VIRTIO_GPU_FLAG_RUTABAGA_ENABLED))
#define virtio_gpu_hostmem_enabled(_cfg) \
(_cfg.hostmem > 0)
#define virtio_gpu_venus_enabled(_cfg) \
(_cfg.flags & (1 << VIRTIO_GPU_FLAG_VENUS_ENABLED))
struct virtio_gpu_base_conf {
uint32_t max_outputs;
uint32_t flags;
uint32_t xres;
uint32_t yres;
uint64_t hostmem;
};
struct virtio_gpu_ctrl_command {
VirtQueueElement elem;
VirtQueue *vq;
struct virtio_gpu_ctrl_hdr cmd_hdr;
uint32_t error;
bool finished;
QTAILQ_ENTRY(virtio_gpu_ctrl_command) next;
};
struct VirtIOGPUBase {
VirtIODevice parent_obj;
Error *migration_blocker;
struct virtio_gpu_base_conf conf;
struct virtio_gpu_config virtio_config;
const GraphicHwOps *hw_ops;
int renderer_blocked;
int enable;
MemoryRegion hostmem;
struct virtio_gpu_scanout scanout[VIRTIO_GPU_MAX_SCANOUTS];
int enabled_output_bitmask;
struct virtio_gpu_requested_state req_state[VIRTIO_GPU_MAX_SCANOUTS];
};
struct VirtIOGPUBaseClass {
VirtioDeviceClass parent;
void (*gl_flushed)(VirtIOGPUBase *g);
};
#define VIRTIO_GPU_BASE_PROPERTIES(_state, _conf) \
DEFINE_PROP_UINT32("max_outputs", _state, _conf.max_outputs, 1), \
DEFINE_PROP_BIT("edid", _state, _conf.flags, \
VIRTIO_GPU_FLAG_EDID_ENABLED, true), \
DEFINE_PROP_UINT32("xres", _state, _conf.xres, 1280), \
DEFINE_PROP_UINT32("yres", _state, _conf.yres, 800)
typedef struct VGPUDMABuf {
QemuDmaBuf *buf;
uint32_t scanout_id;
QTAILQ_ENTRY(VGPUDMABuf) next;
} VGPUDMABuf;
struct VirtIOGPU {
VirtIOGPUBase parent_obj;
uint8_t scanout_vmstate_version;
uint64_t conf_max_hostmem;
VirtQueue *ctrl_vq;
VirtQueue *cursor_vq;
QEMUBH *ctrl_bh;
QEMUBH *cursor_bh;
QEMUBH *reset_bh;
QemuCond reset_cond;
bool reset_finished;
QTAILQ_HEAD(, virtio_gpu_simple_resource) reslist;
QTAILQ_HEAD(, virtio_gpu_ctrl_command) cmdq;
QTAILQ_HEAD(, virtio_gpu_ctrl_command) fenceq;
uint64_t hostmem;
bool processing_cmdq;
uint32_t inflight;
struct {
uint32_t max_inflight;
uint32_t requests;
uint32_t req_3d;
uint32_t bytes_3d;
} stats;
struct {
QTAILQ_HEAD(, VGPUDMABuf) bufs;
VGPUDMABuf *primary[VIRTIO_GPU_MAX_SCANOUTS];
} dmabuf;
GArray *capset_ids;
};
struct VirtIOGPUClass {
VirtIOGPUBaseClass parent;
void (*handle_ctrl)(VirtIODevice *vdev, VirtQueue *vq);
void (*process_cmd)(VirtIOGPU *g, struct virtio_gpu_ctrl_command *cmd);
void (*update_cursor_data)(VirtIOGPU *g,
struct virtio_gpu_scanout *s,
uint32_t resource_id);
void (*resource_destroy)(VirtIOGPU *g,
struct virtio_gpu_simple_resource *res,
Error **errp);
};
/* VirtIOGPUGL renderer states */
typedef enum {
RS_START, /* starting state */
RS_INIT_FAILED, /* failed initialisation */
RS_INITED, /* initialised and working */
RS_RESET, /* inited and reset pending, moves to start after reset */
} RenderState;
struct VirtIOGPUGL {
struct VirtIOGPU parent_obj;
RenderState renderer_state;
QEMUTimer *fence_poll;
QEMUTimer *print_stats;
QEMUBH *cmdq_resume_bh;
};
struct VhostUserGPU {
VirtIOGPUBase parent_obj;
VhostUserBackend *vhost;
int vhost_gpu_fd; /* closed by the chardev */
CharBackend vhost_chr;
QemuDmaBuf *dmabuf[VIRTIO_GPU_MAX_SCANOUTS];
bool backend_blocked;
};
#define MAX_SLOTS 4096
struct MemoryRegionInfo {
int used;
MemoryRegion mr;
uint32_t resource_id;
};
struct rutabaga;
struct VirtIOGPURutabaga {
VirtIOGPU parent_obj;
struct MemoryRegionInfo memory_regions[MAX_SLOTS];
uint64_t capset_mask;
char *wayland_socket_path;
char *wsi;
bool headless;
uint32_t num_capsets;
struct rutabaga *rutabaga;
};
#define VIRTIO_GPU_FILL_CMD(out) do { \
size_t virtiogpufillcmd_s_ = \
iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num, 0, \
&out, sizeof(out)); \
if (virtiogpufillcmd_s_ != sizeof(out)) { \
qemu_log_mask(LOG_GUEST_ERROR, \
"%s: command size incorrect %zu vs %zu\n", \
__func__, virtiogpufillcmd_s_, sizeof(out)); \
return; \
} \
} while (0)
/* virtio-gpu-base.c */
bool virtio_gpu_base_device_realize(DeviceState *qdev,
VirtIOHandleOutput ctrl_cb,
VirtIOHandleOutput cursor_cb,
Error **errp);
void virtio_gpu_base_device_unrealize(DeviceState *qdev);
void virtio_gpu_base_reset(VirtIOGPUBase *g);
void virtio_gpu_base_fill_display_info(VirtIOGPUBase *g,
struct virtio_gpu_resp_display_info *dpy_info);
void virtio_gpu_base_generate_edid(VirtIOGPUBase *g, int scanout,
struct virtio_gpu_resp_edid *edid);
/* virtio-gpu.c */
struct virtio_gpu_simple_resource *
virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id);
void virtio_gpu_ctrl_response(VirtIOGPU *g,
struct virtio_gpu_ctrl_command *cmd,
struct virtio_gpu_ctrl_hdr *resp,
size_t resp_len);
void virtio_gpu_ctrl_response_nodata(VirtIOGPU *g,
struct virtio_gpu_ctrl_command *cmd,
enum virtio_gpu_ctrl_type type);
void virtio_gpu_get_display_info(VirtIOGPU *g,
struct virtio_gpu_ctrl_command *cmd);
void virtio_gpu_get_edid(VirtIOGPU *g,
struct virtio_gpu_ctrl_command *cmd);
int virtio_gpu_create_mapping_iov(VirtIOGPU *g,
uint32_t nr_entries, uint32_t offset,
struct virtio_gpu_ctrl_command *cmd,
uint64_t **addr, struct iovec **iov,
uint32_t *niov);
void virtio_gpu_cleanup_mapping_iov(VirtIOGPU *g,
struct iovec *iov, uint32_t count);
void virtio_gpu_cleanup_mapping(VirtIOGPU *g,
struct virtio_gpu_simple_resource *res);
void virtio_gpu_process_cmdq(VirtIOGPU *g);
void virtio_gpu_device_realize(DeviceState *qdev, Error **errp);
void virtio_gpu_reset(VirtIODevice *vdev);
void virtio_gpu_simple_process_cmd(VirtIOGPU *g, struct virtio_gpu_ctrl_command *cmd);
void virtio_gpu_update_cursor_data(VirtIOGPU *g,
struct virtio_gpu_scanout *s,
uint32_t resource_id);
/**
* virtio_gpu_scanout_blob_to_fb() - fill out fb based on scanout data
* fb: the frame-buffer descriptor to fill out
* ss: the scanout blob data
* blob_size: size of scanout blob data
*
* This will check we have enough space for the frame taking into
* account that stride.
*
* Returns true on success, otherwise logs guest error and returns false
*/
bool virtio_gpu_scanout_blob_to_fb(struct virtio_gpu_framebuffer *fb,
struct virtio_gpu_set_scanout_blob *ss,
uint64_t blob_size);
/* virtio-gpu-udmabuf.c */
bool virtio_gpu_have_udmabuf(void);
void virtio_gpu_init_udmabuf(struct virtio_gpu_simple_resource *res);
void virtio_gpu_fini_udmabuf(struct virtio_gpu_simple_resource *res);
int virtio_gpu_update_dmabuf(VirtIOGPU *g,
uint32_t scanout_id,
struct virtio_gpu_simple_resource *res,
struct virtio_gpu_framebuffer *fb,
struct virtio_gpu_rect *r);
void virtio_gpu_update_scanout(VirtIOGPU *g,
uint32_t scanout_id,
struct virtio_gpu_simple_resource *res,
struct virtio_gpu_framebuffer *fb,
struct virtio_gpu_rect *r);
void virtio_gpu_disable_scanout(VirtIOGPU *g, int scanout_id);
/* virtio-gpu-3d.c */
void virtio_gpu_virgl_process_cmd(VirtIOGPU *g,
struct virtio_gpu_ctrl_command *cmd);
void virtio_gpu_virgl_fence_poll(VirtIOGPU *g);
void virtio_gpu_virgl_reset_scanout(VirtIOGPU *g);
void virtio_gpu_virgl_reset(VirtIOGPU *g);
int virtio_gpu_virgl_init(VirtIOGPU *g);
GArray *virtio_gpu_virgl_get_capsets(VirtIOGPU *g);
#endif
|