1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
|
/*
* Image streaming
*
* Copyright IBM, Corp. 2011
*
* Authors:
* Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
*
* This work is licensed under the terms of the GNU LGPL, version 2 or later.
* See the COPYING.LIB file in the top-level directory.
*
*/
#include "trace.h"
#include "block_int.h"
enum {
/*
* Size of data buffer for populating the image file. This should be large
* enough to process multiple clusters in a single call, so that populating
* contiguous regions of the image is efficient.
*/
STREAM_BUFFER_SIZE = 512 * 1024, /* in bytes */
};
typedef struct StreamBlockJob {
BlockJob common;
BlockDriverState *base;
} StreamBlockJob;
static int coroutine_fn stream_populate(BlockDriverState *bs,
int64_t sector_num, int nb_sectors,
void *buf)
{
struct iovec iov = {
.iov_base = buf,
.iov_len = nb_sectors * BDRV_SECTOR_SIZE,
};
QEMUIOVector qiov;
qemu_iovec_init_external(&qiov, &iov, 1);
/* Copy-on-read the unallocated clusters */
return bdrv_co_copy_on_readv(bs, sector_num, nb_sectors, &qiov);
}
static void coroutine_fn stream_run(void *opaque)
{
StreamBlockJob *s = opaque;
BlockDriverState *bs = s->common.bs;
int64_t sector_num, end;
int ret = 0;
int n;
void *buf;
s->common.len = bdrv_getlength(bs);
if (s->common.len < 0) {
block_job_complete(&s->common, s->common.len);
return;
}
end = s->common.len >> BDRV_SECTOR_BITS;
buf = qemu_blockalign(bs, STREAM_BUFFER_SIZE);
/* Turn on copy-on-read for the whole block device so that guest read
* requests help us make progress. Only do this when copying the entire
* backing chain since the copy-on-read operation does not take base into
* account.
*/
if (!base) {
bdrv_enable_copy_on_read(bs);
}
for (sector_num = 0; sector_num < end; sector_num += n) {
if (block_job_is_cancelled(&s->common)) {
break;
}
/* TODO rate-limit */
/* Note that even when no rate limit is applied we need to yield with
* no pending I/O here so that qemu_aio_flush() is able to return.
*/
co_sleep_ns(rt_clock, 0);
ret = bdrv_co_is_allocated(bs, sector_num,
STREAM_BUFFER_SIZE / BDRV_SECTOR_SIZE, &n);
trace_stream_one_iteration(s, sector_num, n, ret);
if (ret == 0) {
ret = stream_populate(bs, sector_num, n, buf);
}
if (ret < 0) {
break;
}
/* Publish progress */
s->common.offset += n * BDRV_SECTOR_SIZE;
}
if (!base) {
bdrv_disable_copy_on_read(bs);
}
if (sector_num == end && ret == 0) {
ret = bdrv_change_backing_file(bs, NULL, NULL);
}
qemu_vfree(buf);
block_job_complete(&s->common, ret);
}
static BlockJobType stream_job_type = {
.instance_size = sizeof(StreamBlockJob),
.job_type = "stream",
};
int stream_start(BlockDriverState *bs, BlockDriverState *base,
BlockDriverCompletionFunc *cb, void *opaque)
{
StreamBlockJob *s;
Coroutine *co;
s = block_job_create(&stream_job_type, bs, cb, opaque);
if (!s) {
return -EBUSY; /* bs must already be in use */
}
s->base = base;
co = qemu_coroutine_create(stream_run);
trace_stream_start(bs, base, s, co, opaque);
qemu_coroutine_enter(co, s);
return 0;
}
|