diff options
author | lirans@il.ibm.com <lirans@il.ibm.com> | 2009-11-02 15:40:41 +0200 |
---|---|---|
committer | Anthony Liguori <aliguori@us.ibm.com> | 2009-11-17 08:03:31 -0600 |
commit | 7cd1e32a860895ccca89eb90a0226efbcd969b55 (patch) | |
tree | 3b2c8712ea67b77d1a7a4b86a532744f9daa2aac /block.c | |
parent | 127338e6107c0763a56451cc8da489349a1e82f7 (diff) |
Expose a mechanism to trace block writes
To support live migration without shared storage we need to be able to trace
writes to disk while migrating. This Patch expose dirty block tracking per
device to be polled from upper layer.
Changes from v4:
- Register dirty tracking for each block device.
- Minor coding style issues.
- Block.c will now manage a dirty bitmap per device once
bdrv_set_dirty_tracking() is called. Bitmap is polled by the upper
layer (block-migration.c).
Signed-off-by: Liran Schour <lirans@il.ibm.com>
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
Diffstat (limited to 'block.c')
-rw-r--r-- | block.c | 78 |
1 files changed, 77 insertions, 1 deletions
@@ -43,6 +43,7 @@ #define SECTOR_BITS 9 #define SECTOR_SIZE (1 << SECTOR_BITS) +#define SECTORS_PER_DIRTY_CHUNK 8 static BlockDriverAIOCB *bdrv_aio_readv_em(BlockDriverState *bs, int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, @@ -641,6 +642,18 @@ int bdrv_read(BlockDriverState *bs, int64_t sector_num, return drv->bdrv_read(bs, sector_num, buf, nb_sectors); } +static void set_dirty_bitmap(BlockDriverState *bs, int64_t sector_num, + int nb_sectors, int dirty) +{ + int64_t start, end; + start = sector_num / SECTORS_PER_DIRTY_CHUNK; + end = (sector_num + nb_sectors) / SECTORS_PER_DIRTY_CHUNK; + + for(; start <= end; start++) { + bs->dirty_bitmap[start] = dirty; + } +} + /* Return < 0 if error. Important errors are: -EIO generic I/O error (may happen for all errors) -ENOMEDIUM No media inserted. @@ -657,7 +670,11 @@ int bdrv_write(BlockDriverState *bs, int64_t sector_num, return -EACCES; if (bdrv_check_request(bs, sector_num, nb_sectors)) return -EIO; - + + if(bs->dirty_tracking) { + set_dirty_bitmap(bs, sector_num, nb_sectors, 1); + } + return drv->bdrv_write(bs, sector_num, buf, nb_sectors); } @@ -1203,6 +1220,11 @@ int bdrv_write_compressed(BlockDriverState *bs, int64_t sector_num, return -ENOTSUP; if (bdrv_check_request(bs, sector_num, nb_sectors)) return -EIO; + + if(bs->dirty_tracking) { + set_dirty_bitmap(bs, sector_num, nb_sectors, 1); + } + return drv->bdrv_write_compressed(bs, sector_num, buf, nb_sectors); } @@ -1400,6 +1422,10 @@ BlockDriverAIOCB *bdrv_aio_writev(BlockDriverState *bs, int64_t sector_num, if (bdrv_check_request(bs, sector_num, nb_sectors)) return NULL; + if(bs->dirty_tracking) { + set_dirty_bitmap(bs, sector_num, nb_sectors, 1); + } + ret = drv->bdrv_aio_writev(bs, sector_num, qiov, nb_sectors, cb, opaque); @@ -1930,7 +1956,57 @@ BlockDriverAIOCB *bdrv_aio_ioctl(BlockDriverState *bs, return NULL; } + + void *qemu_blockalign(BlockDriverState *bs, size_t size) { return qemu_memalign((bs && bs->buffer_alignment) ? bs->buffer_alignment : 512, size); } + +void bdrv_set_dirty_tracking(BlockDriverState *bs, int enable) +{ + int64_t bitmap_size; + if(enable) { + if(bs->dirty_tracking == 0) { + int64_t i; + uint8_t test; + bitmap_size = (bdrv_getlength(bs) >> SECTOR_BITS); + bitmap_size /= SECTORS_PER_DIRTY_CHUNK; + bitmap_size++; + + bs->dirty_bitmap = qemu_mallocz(bitmap_size); + + bs->dirty_tracking = enable; + for(i = 0; i < bitmap_size; i++) test = bs->dirty_bitmap[i]; + } + } else { + if(bs->dirty_tracking != 0) { + qemu_free(bs->dirty_bitmap); + bs->dirty_tracking = enable; + } + } +} + +int bdrv_get_dirty(BlockDriverState *bs, int64_t sector) +{ + int64_t chunk = sector / (int64_t)SECTORS_PER_DIRTY_CHUNK; + + if(bs->dirty_bitmap != NULL && + (sector << SECTOR_BITS) <= bdrv_getlength(bs)) { + return bs->dirty_bitmap[chunk]; + } else { + return 0; + } +} + +void bdrv_reset_dirty(BlockDriverState *bs, int64_t cur_sector, + int nr_sectors) +{ + set_dirty_bitmap(bs, cur_sector, nr_sectors, 0); +} + +int bdrv_get_sectors_per_chunk(void) +{ + /* size must be 2^x */ + return SECTORS_PER_DIRTY_CHUNK; +} |