diff options
69 files changed, 2932 insertions, 1140 deletions
@@ -4396,55 +4396,65 @@ void bdrv_img_create(const char *filename, const char *fmt, backing_fmt = qemu_opt_get(opts, BLOCK_OPT_BACKING_FMT); - // The size for the image must always be specified, with one exception: - // If we are using a backing file, we can obtain the size from there + /* The size for the image must always be specified, unless we have a backing + * file and we have not been forbidden from opening it. */ size = qemu_opt_get_size(opts, BLOCK_OPT_SIZE, 0); - if (size == -1) { - if (backing_file) { - BlockDriverState *bs; - char *full_backing = g_new0(char, PATH_MAX); - int64_t size; - int back_flags; - QDict *backing_options = NULL; - - bdrv_get_full_backing_filename_from_filename(filename, backing_file, - full_backing, PATH_MAX, - &local_err); - if (local_err) { - g_free(full_backing); - goto out; - } + if (backing_file && !(flags & BDRV_O_NO_BACKING)) { + BlockDriverState *bs; + char *full_backing = g_new0(char, PATH_MAX); + int back_flags; + QDict *backing_options = NULL; + + bdrv_get_full_backing_filename_from_filename(filename, backing_file, + full_backing, PATH_MAX, + &local_err); + if (local_err) { + g_free(full_backing); + goto out; + } - /* backing files always opened read-only */ - back_flags = flags; - back_flags &= ~(BDRV_O_RDWR | BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING); + /* backing files always opened read-only */ + back_flags = flags; + back_flags &= ~(BDRV_O_RDWR | BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING); - if (backing_fmt) { - backing_options = qdict_new(); - qdict_put_str(backing_options, "driver", backing_fmt); - } + if (backing_fmt) { + backing_options = qdict_new(); + qdict_put_str(backing_options, "driver", backing_fmt); + } - bs = bdrv_open(full_backing, NULL, backing_options, back_flags, - &local_err); - g_free(full_backing); - if (!bs) { - goto out; - } - size = bdrv_getlength(bs); - if (size < 0) { - error_setg_errno(errp, -size, "Could not get size of '%s'", - backing_file); - bdrv_unref(bs); - goto out; + bs = bdrv_open(full_backing, NULL, backing_options, back_flags, + &local_err); + g_free(full_backing); + if (!bs && size != -1) { + /* Couldn't open BS, but we have a size, so it's nonfatal */ + warn_reportf_err(local_err, + "Could not verify backing image. " + "This may become an error in future versions.\n"); + local_err = NULL; + } else if (!bs) { + /* Couldn't open bs, do not have size */ + error_append_hint(&local_err, + "Could not open backing image to determine size.\n"); + goto out; + } else { + if (size == -1) { + /* Opened BS, have no size */ + size = bdrv_getlength(bs); + if (size < 0) { + error_setg_errno(errp, -size, "Could not get size of '%s'", + backing_file); + bdrv_unref(bs); + goto out; + } + qemu_opt_set_number(opts, BLOCK_OPT_SIZE, size, &error_abort); } - - qemu_opt_set_number(opts, BLOCK_OPT_SIZE, size, &error_abort); - bdrv_unref(bs); - } else { - error_setg(errp, "Image creation needs a size parameter"); - goto out; } + } /* (backing_file && !(flags & BDRV_O_NO_BACKING)) */ + + if (size == -1) { + error_setg(errp, "Image creation needs a size parameter"); + goto out; } if (!quiet) { diff --git a/block/block-backend.c b/block/block-backend.c index fe3542b3f8..968438c149 100644 --- a/block/block-backend.c +++ b/block/block-backend.c @@ -83,7 +83,6 @@ static const AIOCBInfo block_backend_aiocb_info = { static void drive_info_del(DriveInfo *dinfo); static BlockBackend *bdrv_first_blk(BlockDriverState *bs); -static char *blk_get_attached_dev_id(BlockBackend *blk); /* All BlockBackends */ static QTAILQ_HEAD(, BlockBackend) block_backends = @@ -343,7 +342,7 @@ void blk_unref(BlockBackend *blk) * Behaves similarly to blk_next() but iterates over all BlockBackends, even the * ones which are hidden (i.e. are not referenced by the monitor). */ -static BlockBackend *blk_all_next(BlockBackend *blk) +BlockBackend *blk_all_next(BlockBackend *blk) { return blk ? QTAILQ_NEXT(blk, link) : QTAILQ_FIRST(&block_backends); @@ -726,7 +725,7 @@ void *blk_get_attached_dev(BlockBackend *blk) /* Return the qdev ID, or if no ID is assigned the QOM path, of the block * device attached to the BlockBackend. */ -static char *blk_get_attached_dev_id(BlockBackend *blk) +char *blk_get_attached_dev_id(BlockBackend *blk) { DeviceState *dev; diff --git a/block/commit.c b/block/commit.c index 13143608f8..5cc910f567 100644 --- a/block/commit.c +++ b/block/commit.c @@ -90,7 +90,9 @@ static void commit_complete(BlockJob *job, void *opaque) /* Make sure overlay_bs and top stay around until bdrv_set_backing_hd() */ bdrv_ref(top); - bdrv_ref(overlay_bs); + if (overlay_bs) { + bdrv_ref(overlay_bs); + } /* Remove base node parent that still uses BLK_PERM_WRITE/RESIZE before * the normal backing chain can be restored. */ diff --git a/block/qapi.c b/block/qapi.c index 080eb8f115..95b2e2daa5 100644 --- a/block/qapi.c +++ b/block/qapi.c @@ -322,11 +322,21 @@ static void bdrv_query_info(BlockBackend *blk, BlockInfo **p_info, { BlockInfo *info = g_malloc0(sizeof(*info)); BlockDriverState *bs = blk_bs(blk); + char *qdev; + info->device = g_strdup(blk_name(blk)); info->type = g_strdup("unknown"); info->locked = blk_dev_is_medium_locked(blk); info->removable = blk_dev_has_removable_media(blk); + qdev = blk_get_attached_dev_id(blk); + if (qdev && *qdev) { + info->has_qdev = true; + info->qdev = qdev; + } else { + g_free(qdev); + } + if (blk_dev_has_tray(blk)) { info->has_tray_open = true; info->tray_open = blk_dev_is_tray_open(blk); @@ -462,8 +472,14 @@ BlockInfoList *qmp_query_block(Error **errp) BlockBackend *blk; Error *local_err = NULL; - for (blk = blk_next(NULL); blk; blk = blk_next(blk)) { - BlockInfoList *info = g_malloc0(sizeof(*info)); + for (blk = blk_all_next(NULL); blk; blk = blk_all_next(blk)) { + BlockInfoList *info; + + if (!*blk_name(blk) && !blk_get_attached_dev(blk)) { + continue; + } + + info = g_malloc0(sizeof(*info)); bdrv_query_info(blk, &info->value, &local_err); if (local_err) { error_propagate(errp, local_err); diff --git a/block/throttle-groups.c b/block/throttle-groups.c index da2b490c38..890bfded3f 100644 --- a/block/throttle-groups.c +++ b/block/throttle-groups.c @@ -61,6 +61,7 @@ typedef struct ThrottleGroup { QLIST_HEAD(, BlockBackendPublic) head; BlockBackend *tokens[2]; bool any_timer_armed[2]; + QEMUClockType clock_type; /* These two are protected by the global throttle_groups_lock */ unsigned refcount; @@ -98,6 +99,12 @@ ThrottleState *throttle_group_incref(const char *name) if (!tg) { tg = g_new0(ThrottleGroup, 1); tg->name = g_strdup(name); + tg->clock_type = QEMU_CLOCK_REALTIME; + + if (qtest_enabled()) { + /* For testing block IO throttling only */ + tg->clock_type = QEMU_CLOCK_VIRTUAL; + } qemu_mutex_init(&tg->lock); throttle_init(&tg->ts); QLIST_INIT(&tg->head); @@ -310,7 +317,7 @@ static void schedule_next_request(BlockBackend *blk, bool is_write) token = blk; } else { ThrottleTimers *tt = &blk_get_public(token)->throttle_timers; - int64_t now = qemu_clock_get_ns(tt->clock_type); + int64_t now = qemu_clock_get_ns(tg->clock_type); timer_mod(tt->timers[is_write], now); tg->any_timer_armed[is_write] = true; } @@ -419,18 +426,10 @@ void throttle_group_restart_blk(BlockBackend *blk) void throttle_group_config(BlockBackend *blk, ThrottleConfig *cfg) { BlockBackendPublic *blkp = blk_get_public(blk); - ThrottleTimers *tt = &blkp->throttle_timers; ThrottleState *ts = blkp->throttle_state; ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts); qemu_mutex_lock(&tg->lock); - /* throttle_config() cancels the timers */ - if (timer_pending(tt->timers[0])) { - tg->any_timer_armed[0] = false; - } - if (timer_pending(tt->timers[1])) { - tg->any_timer_armed[1] = false; - } - throttle_config(ts, tt, cfg); + throttle_config(ts, tg->clock_type, cfg); qemu_mutex_unlock(&tg->lock); throttle_group_restart_blk(blk); @@ -497,13 +496,6 @@ void throttle_group_register_blk(BlockBackend *blk, const char *groupname) BlockBackendPublic *blkp = blk_get_public(blk); ThrottleState *ts = throttle_group_incref(groupname); ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts); - int clock_type = QEMU_CLOCK_REALTIME; - - if (qtest_enabled()) { - /* For testing block IO throttling only */ - clock_type = QEMU_CLOCK_VIRTUAL; - } - blkp->throttle_state = ts; qemu_mutex_lock(&tg->lock); @@ -518,7 +510,7 @@ void throttle_group_register_blk(BlockBackend *blk, const char *groupname) throttle_timers_init(&blkp->throttle_timers, blk_get_aio_context(blk), - clock_type, + tg->clock_type, read_timer_cb, write_timer_cb, blk); diff --git a/block/vmdk.c b/block/vmdk.c index 24d71b5982..0fc97391a6 100644 --- a/block/vmdk.c +++ b/block/vmdk.c @@ -242,10 +242,11 @@ static void vmdk_free_last_extent(BlockDriverState *bs) s->extents = g_renew(VmdkExtent, s->extents, s->num_extents); } -static uint32_t vmdk_read_cid(BlockDriverState *bs, int parent) +/* Return -ve errno, or 0 on success and write CID into *pcid. */ +static int vmdk_read_cid(BlockDriverState *bs, int parent, uint32_t *pcid) { char *desc; - uint32_t cid = 0xffffffff; + uint32_t cid; const char *p_name, *cid_str; size_t cid_str_size; BDRVVmdkState *s = bs->opaque; @@ -254,8 +255,7 @@ static uint32_t vmdk_read_cid(BlockDriverState *bs, int parent) desc = g_malloc0(DESC_SIZE); ret = bdrv_pread(bs->file, s->desc_offset, desc, DESC_SIZE); if (ret < 0) { - g_free(desc); - return 0; + goto out; } if (parent) { @@ -268,13 +268,21 @@ static uint32_t vmdk_read_cid(BlockDriverState *bs, int parent) desc[DESC_SIZE - 1] = '\0'; p_name = strstr(desc, cid_str); - if (p_name != NULL) { - p_name += cid_str_size; - sscanf(p_name, "%" SCNx32, &cid); + if (p_name == NULL) { + ret = -EINVAL; + goto out; } + p_name += cid_str_size; + if (sscanf(p_name, "%" SCNx32, &cid) != 1) { + ret = -EINVAL; + goto out; + } + *pcid = cid; + ret = 0; +out: g_free(desc); - return cid; + return ret; } static int vmdk_write_cid(BlockDriverState *bs, uint32_t cid) @@ -322,7 +330,10 @@ static int vmdk_is_cid_valid(BlockDriverState *bs) if (!s->cid_checked && bs->backing) { BlockDriverState *p_bs = bs->backing->bs; - cur_pcid = vmdk_read_cid(p_bs, 0); + if (vmdk_read_cid(p_bs, 0, &cur_pcid) != 0) { + /* read failure: report as not valid */ + return 0; + } if (s->parent_cid != cur_pcid) { /* CID not valid */ return 0; @@ -975,8 +986,14 @@ static int vmdk_open(BlockDriverState *bs, QDict *options, int flags, if (ret) { goto fail; } - s->cid = vmdk_read_cid(bs, 0); - s->parent_cid = vmdk_read_cid(bs, 1); + ret = vmdk_read_cid(bs, 0, &s->cid); + if (ret) { + goto fail; + } + ret = vmdk_read_cid(bs, 1, &s->parent_cid); + if (ret) { + goto fail; + } qemu_co_mutex_init(&s->lock); /* Disable migration when VMDK images are used */ @@ -2008,8 +2025,11 @@ static int vmdk_create(const char *filename, QemuOpts *opts, Error **errp) ret = -EINVAL; goto exit; } - parent_cid = vmdk_read_cid(blk_bs(blk), 0); + ret = vmdk_read_cid(blk_bs(blk), 0, &parent_cid); blk_unref(blk); + if (ret) { + goto exit; + } snprintf(parent_desc_line, BUF_SIZE, "parentFileNameHint=\"%s\"", backing_file); } diff --git a/block/vpc.c b/block/vpc.c index 8057d42a23..10e6519d78 100644 --- a/block/vpc.c +++ b/block/vpc.c @@ -460,17 +460,23 @@ static int vpc_reopen_prepare(BDRVReopenState *state, /* * Returns the absolute byte offset of the given sector in the image file. * If the sector is not allocated, -1 is returned instead. + * If an error occurred trying to write an updated block bitmap back to + * the file, -2 is returned, and the error value is written to *err. + * This can only happen for a write operation. * * The parameter write must be 1 if the offset will be used for a write * operation (the block bitmaps is updated then), 0 otherwise. + * If write is true then err must not be NULL. */ static inline int64_t get_image_offset(BlockDriverState *bs, uint64_t offset, - bool write) + bool write, int *err) { BDRVVPCState *s = bs->opaque; uint64_t bitmap_offset, block_offset; uint32_t pagetable_index, offset_in_block; + assert(!(write && err == NULL)); + pagetable_index = offset / s->block_size; offset_in_block = offset % s->block_size; @@ -487,10 +493,15 @@ static inline int64_t get_image_offset(BlockDriverState *bs, uint64_t offset, correctness. */ if (write && (s->last_bitmap_offset != bitmap_offset)) { uint8_t bitmap[s->bitmap_size]; + int r; s->last_bitmap_offset = bitmap_offset; memset(bitmap, 0xff, s->bitmap_size); - bdrv_pwrite_sync(bs->file, bitmap_offset, bitmap, s->bitmap_size); + r = bdrv_pwrite_sync(bs->file, bitmap_offset, bitmap, s->bitmap_size); + if (r < 0) { + *err = r; + return -2; + } } return block_offset; @@ -561,7 +572,7 @@ static int64_t alloc_block(BlockDriverState* bs, int64_t offset) if (ret < 0) goto fail; - return get_image_offset(bs, offset, false); + return get_image_offset(bs, offset, false, NULL); fail: s->free_data_block_offset -= (s->block_size + s->bitmap_size); @@ -601,7 +612,7 @@ vpc_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes, qemu_iovec_init(&local_qiov, qiov->niov); while (bytes > 0) { - image_offset = get_image_offset(bs, offset, false); + image_offset = get_image_offset(bs, offset, false, NULL); n_bytes = MIN(bytes, s->block_size - (offset % s->block_size)); if (image_offset == -1) { @@ -650,7 +661,11 @@ vpc_co_pwritev(BlockDriverState *bs, uint64_t offset, uint64_t bytes, qemu_iovec_init(&local_qiov, qiov->niov); while (bytes > 0) { - image_offset = get_image_offset(bs, offset, true); + image_offset = get_image_offset(bs, offset, true, &ret); + if (image_offset == -2) { + /* Failed to write block bitmap: can't proceed with write */ + goto fail; + } n_bytes = MIN(bytes, s->block_size - (offset % s->block_size)); if (image_offset == -1) { @@ -702,7 +717,7 @@ static int64_t coroutine_fn vpc_co_get_block_status(BlockDriverState *bs, qemu_co_mutex_lock(&s->lock); - offset = get_image_offset(bs, sector_num << BDRV_SECTOR_BITS, false); + offset = get_image_offset(bs, sector_num << BDRV_SECTOR_BITS, false, NULL); start = offset; allocated = (offset != -1); *pnum = 0; @@ -727,7 +742,8 @@ static int64_t coroutine_fn vpc_co_get_block_status(BlockDriverState *bs, if (nb_sectors == 0) { break; } - offset = get_image_offset(bs, sector_num << BDRV_SECTOR_BITS, false); + offset = get_image_offset(bs, sector_num << BDRV_SECTOR_BITS, false, + NULL); } while (offset == -1); qemu_co_mutex_unlock(&s->lock); diff --git a/block/vvfat.c b/block/vvfat.c index 4dae790203..a9e207f7f0 100644 --- a/block/vvfat.c +++ b/block/vvfat.c @@ -71,6 +71,17 @@ void nonono(const char* file, int line, const char* msg) { #endif +/* bootsector OEM name. see related compatibility problems at: + * https://jdebp.eu/FGA/volume-boot-block-oem-name-field.html + * http://seasip.info/Misc/oemid.html + */ +#define BOOTSECTOR_OEM_NAME "MSWIN4.1" + +#define DIR_DELETED 0xe5 +#define DIR_KANJI DIR_DELETED +#define DIR_KANJI_FAKE 0x05 +#define DIR_FREE 0x00 + /* dynamic array functions */ typedef struct array_t { char* pointer; @@ -104,6 +115,7 @@ static inline int array_ensure_allocated(array_t* array, int index) array->pointer = g_realloc(array->pointer, new_size); if (!array->pointer) return -1; + memset(array->pointer + array->size, 0, new_size - array->size); array->size = new_size; array->next = index + 1; } @@ -466,7 +478,7 @@ static direntry_t *create_long_filename(BDRVVVFATState *s, const char *filename) static char is_free(const direntry_t* direntry) { - return direntry->name[0]==0xe5 || direntry->name[0]==0x00; + return direntry->name[0] == DIR_DELETED || direntry->name[0] == DIR_FREE; } static char is_volume_label(const direntry_t* direntry) @@ -487,7 +499,7 @@ static char is_short_name(const direntry_t* direntry) static char is_directory(const direntry_t* direntry) { - return direntry->attributes & 0x10 && direntry->name[0] != 0xe5; + return direntry->attributes & 0x10 && direntry->name[0] != DIR_DELETED; } static inline char is_dot(const direntry_t* direntry) @@ -537,7 +549,7 @@ static direntry_t *create_short_filename(BDRVVVFATState *s, const gchar *p, *last_dot = NULL; gunichar c; bool lossy_conversion = false; - char tail[11]; + char tail[8]; if (!entry) { return NULL; @@ -589,8 +601,8 @@ static direntry_t *create_short_filename(BDRVVVFATState *s, } } - if (entry->name[0] == 0xe5) { - entry->name[0] = 0x05; + if (entry->name[0] == DIR_KANJI) { + entry->name[0] = DIR_KANJI_FAKE; } /* numeric-tail generation */ @@ -602,7 +614,8 @@ static direntry_t *create_short_filename(BDRVVVFATState *s, for (i = lossy_conversion ? 1 : 0; i < 999999; i++) { direntry_t *entry1; if (i > 0) { - int len = sprintf(tail, "~%d", i); + int len = snprintf(tail, sizeof(tail), "~%u", (unsigned)i); + assert(len <= 7); memcpy(entry->name + MIN(j, 8 - len), tail, len); } for (entry1 = array_get(&(s->directory), directory_start); @@ -1023,7 +1036,7 @@ static int init_directories(BDRVVVFATState* s, bootsector->jump[0]=0xeb; bootsector->jump[1]=0x3e; bootsector->jump[2]=0x90; - memcpy(bootsector->name, "MSWIN4.1", 8); + memcpy(bootsector->name, BOOTSECTOR_OEM_NAME, 8); bootsector->sector_size=cpu_to_le16(0x200); bootsector->sectors_per_cluster=s->sectors_per_cluster; bootsector->reserved_sectors=cpu_to_le16(1); @@ -1658,6 +1671,7 @@ typedef struct { * filename length is 0x3f * 13 bytes. */ unsigned char name[0x3f * 13 + 1]; + gunichar2 name2[0x3f * 13 + 1]; int checksum, len; int sequence_number; } long_file_name; @@ -1679,16 +1693,21 @@ static int parse_long_name(long_file_name* lfn, return 1; if (pointer[0] & 0x40) { + /* first entry; do some initialization */ lfn->sequence_number = pointer[0] & 0x3f; lfn->checksum = pointer[13]; lfn->name[0] = 0; lfn->name[lfn->sequence_number * 13] = 0; - } else if ((pointer[0] & 0x3f) != --lfn->sequence_number) + } else if ((pointer[0] & 0x3f) != --lfn->sequence_number) { + /* not the expected sequence number */ return -1; - else if (pointer[13] != lfn->checksum) + } else if (pointer[13] != lfn->checksum) { + /* not the expected checksum */ return -2; - else if (pointer[12] || pointer[26] || pointer[27]) + } else if (pointer[12] || pointer[26] || pointer[27]) { + /* invalid zero fields */ return -3; + } offset = 13 * (lfn->sequence_number - 1); for (i = 0, j = 1; i < 13; i++, j+=2) { @@ -1697,16 +1716,29 @@ static int parse_long_name(long_file_name* lfn, else if (j == 26) j = 28; - if (pointer[j+1] == 0) - lfn->name[offset + i] = pointer[j]; - else if (pointer[j+1] != 0xff || (pointer[0] & 0x40) == 0) - return -4; - else - lfn->name[offset + i] = 0; + if (pointer[j] == 0 && pointer[j + 1] == 0) { + /* end of long file name */ + break; + } + gunichar2 c = (pointer[j + 1] << 8) + pointer[j]; + lfn->name2[offset + i] = c; } - if (pointer[0] & 0x40) - lfn->len = offset + strlen((char*)lfn->name + offset); + if (pointer[0] & 0x40) { + /* first entry; set len */ + lfn->len = offset + i; + } + if ((pointer[0] & 0x3f) == 0x01) { + /* last entry; finalize entry */ + glong olen; + gchar *utf8 = g_utf16_to_utf8(lfn->name2, lfn->len, NULL, &olen, NULL); + if (!utf8) { + return -4; + } + lfn->len = olen; + memcpy(lfn->name, utf8, olen + 1); + g_free(utf8); + } return 0; } @@ -1722,12 +1754,14 @@ static int parse_short_name(BDRVVVFATState* s, for (j = 7; j >= 0 && direntry->name[j] == ' '; j--); for (i = 0; i <= j; i++) { - if (direntry->name[i] <= ' ' || direntry->name[i] > 0x7f) + uint8_t c = direntry->name[i]; + if (c != to_valid_short_char(c)) { return -1; - else if (s->downcase_short_names) + } else if (s->downcase_short_names) { lfn->name[i] = qemu_tolower(direntry->name[i]); - else + } else { lfn->name[i] = direntry->name[i]; + } } for (j = 2; j >= 0 && direntry->name[8 + j] == ' '; j--) { @@ -1737,7 +1771,7 @@ static int parse_short_name(BDRVVVFATState* s, lfn->name[i + j + 1] = '\0'; for (;j >= 0; j--) { uint8_t c = direntry->name[8 + j]; - if (c <= ' ' || c > 0x7f) { + if (c != to_valid_short_char(c)) { return -2; } else if (s->downcase_short_names) { lfn->name[i + j] = qemu_tolower(c); @@ -1748,8 +1782,8 @@ static int parse_short_name(BDRVVVFATState* s, } else lfn->name[i + j + 1] = '\0'; - if (lfn->name[0] == 0x05) { - lfn->name[0] = 0xe5; + if (lfn->name[0] == DIR_KANJI_FAKE) { + lfn->name[0] = DIR_KANJI; } lfn->len = strlen((char*)lfn->name); @@ -2955,7 +2989,6 @@ DLOG(checkpoint()); /* * Some sanity checks: * - do not allow writing to the boot sector - * - do not allow to write non-ASCII filenames */ if (sector_num < s->offset_to_fat) @@ -2989,13 +3022,8 @@ DLOG(checkpoint()); direntries = (direntry_t*)(buf + 0x200 * (begin - sector_num)); for (k = 0; k < (end - begin) * 0x10; k++) { - /* do not allow non-ASCII filenames */ - if (parse_long_name(&lfn, direntries + k) < 0) { - fprintf(stderr, "Warning: non-ASCII filename\n"); - return -1; - } /* no access to the direntry of a read-only file */ - else if (is_short_name(direntries+k) && + if (is_short_name(direntries + k) && (direntries[k].attributes & 1)) { if (memcmp(direntries + k, array_get(&(s->directory), dir_index + k), diff --git a/blockdev.c b/blockdev.c index 7f53cc8bb3..6469f161df 100644 --- a/blockdev.c +++ b/blockdev.c @@ -1710,7 +1710,8 @@ static void external_snapshot_prepare(BlkActionState *common, } flags = state->old_bs->open_flags; - flags &= ~(BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING | BDRV_O_COPY_ON_READ); + flags &= ~(BDRV_O_SNAPSHOT | BDRV_O_COPY_ON_READ); + flags |= BDRV_O_NO_BACKING; /* create new image w/backing file */ mode = s->has_mode ? s->mode : NEW_IMAGE_MODE_ABSOLUTE_PATHS; @@ -1735,8 +1736,6 @@ static void external_snapshot_prepare(BlkActionState *common, qdict_put_str(options, "node-name", snapshot_node_name); } qdict_put_str(options, "driver", format); - - flags |= BDRV_O_NO_BACKING; } state->new_bs = bdrv_open(new_image_file, snapshot_ref, options, flags, @@ -3548,6 +3547,9 @@ void qmp_drive_mirror(DriveMirror *arg, Error **errp) backing_mode = MIRROR_OPEN_BACKING_CHAIN; } + /* Don't open backing image in create() */ + flags |= BDRV_O_NO_BACKING; + if ((arg->sync == MIRROR_SYNC_MODE_FULL || !source) && arg->mode != NEW_IMAGE_MODE_EXISTING) { @@ -3587,8 +3589,7 @@ void qmp_drive_mirror(DriveMirror *arg, Error **errp) /* Mirroring takes care of copy-on-write using the source's backing * file. */ - target_bs = bdrv_open(arg->target, NULL, options, - flags | BDRV_O_NO_BACKING, errp); + target_bs = bdrv_open(arg->target, NULL, options, flags, errp); if (!target_bs) { goto out; } @@ -2113,6 +2113,24 @@ EOF # Xen unstable elif cat > $TMPC <<EOF && +#undef XC_WANT_COMPAT_MAP_FOREIGN_API +#include <xenforeignmemory.h> +int main(void) { + xenforeignmemory_handle *xfmem; + + xfmem = xenforeignmemory_open(0, 0); + xenforeignmemory_map2(xfmem, 0, 0, 0, 0, 0, 0, 0); + + return 0; +} +EOF + compile_prog "" "$xen_libs -lxendevicemodel $xen_stable_libs" + then + xen_stable_libs="-lxendevicemodel $xen_stable_libs" + xen_ctrl_version=41000 + xen=yes + elif + cat > $TMPC <<EOF && #undef XC_WANT_COMPAT_DEVICEMODEL_API #define __XEN_TOOLS__ #include <xendevicemodel.h> diff --git a/fsdev/qemu-fsdev-throttle.c b/fsdev/qemu-fsdev-throttle.c index 7ae4e86646..49eebb5412 100644 --- a/fsdev/qemu-fsdev-throttle.c +++ b/fsdev/qemu-fsdev-throttle.c @@ -86,7 +86,7 @@ void fsdev_throttle_init(FsThrottle *fst) fsdev_throttle_read_timer_cb, fsdev_throttle_write_timer_cb, fst); - throttle_config(&fst->ts, &fst->tt, &fst->cfg); + throttle_config(&fst->ts, QEMU_CLOCK_REALTIME, &fst->cfg); qemu_co_queue_init(&fst->throttled_reqs[0]); qemu_co_queue_init(&fst->throttled_reqs[1]); } @@ -401,16 +401,16 @@ static void print_block_info(Monitor *mon, BlockInfo *info, assert(!info || !info->has_inserted || info->inserted == inserted); - if (info) { + if (info && *info->device) { monitor_printf(mon, "%s", info->device); if (inserted && inserted->has_node_name) { monitor_printf(mon, " (%s)", inserted->node_name); } } else { - assert(inserted); + assert(info || inserted); monitor_printf(mon, "%s", - inserted->has_node_name - ? inserted->node_name + inserted && inserted->has_node_name ? inserted->node_name + : info && info->has_qdev ? info->qdev : "<anonymous>"); } @@ -425,6 +425,9 @@ static void print_block_info(Monitor *mon, BlockInfo *info, } if (info) { + if (info->has_qdev) { + monitor_printf(mon, " Attached to: %s\n", info->qdev); + } if (info->has_io_status && info->io_status != BLOCK_DEVICE_IO_STATUS_OK) { monitor_printf(mon, " I/O status: %s\n", BlockDeviceIoStatus_lookup[info->io_status]); diff --git a/hw/alpha/dp264.c b/hw/alpha/dp264.c index 85405da3df..3b307ad873 100644 --- a/hw/alpha/dp264.c +++ b/hw/alpha/dp264.c @@ -123,7 +123,6 @@ static void clipper_init(MachineState *machine) /* Start all cpus at the PALcode RESET entry point. */ for (i = 0; i < smp_cpus; ++i) { - cpus[i]->env.pal_mode = 1; cpus[i]->env.pc = palcode_entry; cpus[i]->env.palbr = palcode_entry; } diff --git a/hw/core/qdev-properties.c b/hw/core/qdev-properties.c index 58a8f92d92..078fc5d239 100644 --- a/hw/core/qdev-properties.c +++ b/hw/core/qdev-properties.c @@ -404,6 +404,31 @@ static void set_uint64(Object *obj, Visitor *v, const char *name, visit_type_uint64(v, name, ptr, errp); } +static void get_int64(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + DeviceState *dev = DEVICE(obj); + Property *prop = opaque; + int64_t *ptr = qdev_get_prop_ptr(dev, prop); + + visit_type_int64(v, name, ptr, errp); +} + +static void set_int64(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + DeviceState *dev = DEVICE(obj); + Property *prop = opaque; + int64_t *ptr = qdev_get_prop_ptr(dev, prop); + + if (dev->realized) { + qdev_prop_set_after_realize(dev, name, errp); + return; + } + + visit_type_int64(v, name, ptr, errp); +} + const PropertyInfo qdev_prop_uint64 = { .name = "uint64", .get = get_uint64, @@ -411,6 +436,13 @@ const PropertyInfo qdev_prop_uint64 = { .set_default_value = set_default_value_uint, }; +const PropertyInfo qdev_prop_int64 = { + .name = "int64", + .get = get_int64, + .set = set_int64, + .set_default_value = set_default_value_int, +}; + /* --- string --- */ static void release_string(Object *obj, const char *name, void *opaque) diff --git a/hw/i386/pc_q35.c b/hw/i386/pc_q35.c index 1653a47f0a..169a214d50 100644 --- a/hw/i386/pc_q35.c +++ b/hw/i386/pc_q35.c @@ -242,8 +242,8 @@ static void pc_q35_init(MachineState *machine) true, "ich9-ahci"); idebus[0] = qdev_get_child_bus(&ahci->qdev, "ide.0"); idebus[1] = qdev_get_child_bus(&ahci->qdev, "ide.1"); - g_assert(MAX_SATA_PORTS == ICH_AHCI(ahci)->ahci.ports); - ide_drive_get(hd, ICH_AHCI(ahci)->ahci.ports); + g_assert(MAX_SATA_PORTS == ahci_get_num_ports(ahci)); + ide_drive_get(hd, ahci_get_num_ports(ahci)); ahci_ide_create_devs(ahci, hd); } else { idebus[0] = idebus[1] = NULL; diff --git a/hw/i386/xen/xen-hvm.c b/hw/i386/xen/xen-hvm.c index 3d951a3794..d9ccd5d0d6 100644 --- a/hw/i386/xen/xen-hvm.c +++ b/hw/i386/xen/xen-hvm.c @@ -288,6 +288,7 @@ static XenPhysmap *get_physmapping(XenIOState *state, return NULL; } +#ifdef XEN_COMPAT_PHYSMAP static hwaddr xen_phys_offset_to_gaddr(hwaddr start_addr, ram_addr_t size, void *opaque) { @@ -304,6 +305,42 @@ static hwaddr xen_phys_offset_to_gaddr(hwaddr start_addr, return start_addr; } +static int xen_save_physmap(XenIOState *state, XenPhysmap *physmap) +{ + char path[80], value[17]; + + snprintf(path, sizeof(path), + "/local/domain/0/device-model/%d/physmap/%"PRIx64"/start_addr", + xen_domid, (uint64_t)physmap->phys_offset); + snprintf(value, sizeof(value), "%"PRIx64, (uint64_t)physmap->start_addr); + if (!xs_write(state->xenstore, 0, path, value, strlen(value))) { + return -1; + } + snprintf(path, sizeof(path), + "/local/domain/0/device-model/%d/physmap/%"PRIx64"/size", + xen_domid, (uint64_t)physmap->phys_offset); + snprintf(value, sizeof(value), "%"PRIx64, (uint64_t)physmap->size); + if (!xs_write(state->xenstore, 0, path, value, strlen(value))) { + return -1; + } + if (physmap->name) { + snprintf(path, sizeof(path), + "/local/domain/0/device-model/%d/physmap/%"PRIx64"/name", + xen_domid, (uint64_t)physmap->phys_offset); + if (!xs_write(state->xenstore, 0, path, + physmap->name, strlen(physmap->name))) { + return -1; + } + } + return 0; +} +#else +static int xen_save_physmap(XenIOState *state, XenPhysmap *physmap) +{ + return 0; +} +#endif + static int xen_add_to_physmap(XenIOState *state, hwaddr start_addr, ram_addr_t size, @@ -315,7 +352,6 @@ static int xen_add_to_physmap(XenIOState *state, XenPhysmap *physmap = NULL; hwaddr pfn, start_gpfn; hwaddr phys_offset = memory_region_get_ram_addr(mr); - char path[80], value[17]; const char *mr_name; if (get_physmapping(state, start_addr, size)) { @@ -338,6 +374,26 @@ go_physmap: DPRINTF("mapping vram to %"HWADDR_PRIx" - %"HWADDR_PRIx"\n", start_addr, start_addr + size); + mr_name = memory_region_name(mr); + + physmap = g_malloc(sizeof(XenPhysmap)); + + physmap->start_addr = start_addr; + physmap->size = size; + physmap->name = mr_name; + physmap->phys_offset = phys_offset; + + QLIST_INSERT_HEAD(&state->physmap, physmap, list); + + if (runstate_check(RUN_STATE_INMIGRATE)) { + /* Now when we have a physmap entry we can replace a dummy mapping with + * a real one of guest foreign memory. */ + uint8_t *p = xen_replace_cache_entry(phys_offset, start_addr, size); + assert(p && p == memory_region_get_ram_ptr(mr)); + + return 0; + } + pfn = phys_offset >> TARGET_PAGE_BITS; start_gpfn = start_addr >> TARGET_PAGE_BITS; for (i = 0; i < size >> TARGET_PAGE_BITS; i++) { @@ -352,46 +408,11 @@ go_physmap: } } - mr_name = memory_region_name(mr); - - physmap = g_malloc(sizeof (XenPhysmap)); - - physmap->start_addr = start_addr; - physmap->size = size; - physmap->name = mr_name; - physmap->phys_offset = phys_offset; - - QLIST_INSERT_HEAD(&state->physmap, physmap, list); - xc_domain_pin_memory_cacheattr(xen_xc, xen_domid, start_addr >> TARGET_PAGE_BITS, (start_addr + size - 1) >> TARGET_PAGE_BITS, XEN_DOMCTL_MEM_CACHEATTR_WB); - - snprintf(path, sizeof(path), - "/local/domain/0/device-model/%d/physmap/%"PRIx64"/start_addr", - xen_domid, (uint64_t)phys_offset); - snprintf(value, sizeof(value), "%"PRIx64, (uint64_t)start_addr); - if (!xs_write(state->xenstore, 0, path, value, strlen(value))) { - return -1; - } - snprintf(path, sizeof(path), - "/local/domain/0/device-model/%d/physmap/%"PRIx64"/size", - xen_domid, (uint64_t)phys_offset); - snprintf(value, sizeof(value), "%"PRIx64, (uint64_t)size); - if (!xs_write(state->xenstore, 0, path, value, strlen(value))) { - return -1; - } - if (mr_name) { - snprintf(path, sizeof(path), - "/local/domain/0/device-model/%d/physmap/%"PRIx64"/name", - xen_domid, (uint64_t)phys_offset); - if (!xs_write(state->xenstore, 0, path, mr_name, strlen(mr_name))) { - return -1; - } - } - - return 0; + return xen_save_physmap(state, physmap); } static int xen_remove_from_physmap(XenIOState *state, @@ -1152,6 +1173,7 @@ static void xen_exit_notifier(Notifier *n, void *data) xs_daemon_close(state->xenstore); } +#ifdef XEN_COMPAT_PHYSMAP static void xen_read_physmap(XenIOState *state) { XenPhysmap *physmap = NULL; @@ -1199,6 +1221,11 @@ static void xen_read_physmap(XenIOState *state) } free(entries); } +#else +static void xen_read_physmap(XenIOState *state) +{ +} +#endif static void xen_wakeup_notifier(Notifier *notifier, void *data) { @@ -1325,7 +1352,11 @@ void xen_hvm_init(PCMachineState *pcms, MemoryRegion **ram_memory) state->bufioreq_local_port = rc; /* Init RAM management */ +#ifdef XEN_COMPAT_PHYSMAP xen_map_cache_init(xen_phys_offset_to_gaddr, state); +#else + xen_map_cache_init(NULL, state); +#endif xen_ram_init(pcms, ram_size, ram_memory); qemu_add_vm_change_state_handler(xen_hvm_change_state_handler, state); diff --git a/hw/i386/xen/xen-mapcache.c b/hw/i386/xen/xen-mapcache.c index e60156c04f..2a1fbd13cc 100644 --- a/hw/i386/xen/xen-mapcache.c +++ b/hw/i386/xen/xen-mapcache.c @@ -53,6 +53,8 @@ typedef struct MapCacheEntry { uint8_t *vaddr_base; unsigned long *valid_mapping; uint8_t lock; +#define XEN_MAPCACHE_ENTRY_DUMMY (1 << 0) + uint8_t flags; hwaddr size; struct MapCacheEntry *next; } MapCacheEntry; @@ -149,8 +151,10 @@ void xen_map_cache_init(phys_offset_to_gaddr_t f, void *opaque) } static void xen_remap_bucket(MapCacheEntry *entry, + void *vaddr, hwaddr size, - hwaddr address_index) + hwaddr address_index, + bool dummy) { uint8_t *vaddr_base; xen_pfn_t *pfns; @@ -164,7 +168,9 @@ static void xen_remap_bucket(MapCacheEntry *entry, err = g_malloc0(nb_pfn * sizeof (int)); if (entry->vaddr_base != NULL) { - ram_block_notify_remove(entry->vaddr_base, entry->size); + if (!(entry->flags & XEN_MAPCACHE_ENTRY_DUMMY)) { + ram_block_notify_remove(entry->vaddr_base, entry->size); + } if (munmap(entry->vaddr_base, entry->size) != 0) { perror("unmap fails"); exit(-1); @@ -177,11 +183,29 @@ static void xen_remap_bucket(MapCacheEntry *entry, pfns[i] = (address_index << (MCACHE_BUCKET_SHIFT-XC_PAGE_SHIFT)) + i; } - vaddr_base = xenforeignmemory_map(xen_fmem, xen_domid, PROT_READ|PROT_WRITE, - nb_pfn, pfns, err); - if (vaddr_base == NULL) { - perror("xenforeignmemory_map"); - exit(-1); + if (!dummy) { + vaddr_base = xenforeignmemory_map2(xen_fmem, xen_domid, vaddr, + PROT_READ | PROT_WRITE, 0, + nb_pfn, pfns, err); + if (vaddr_base == NULL) { + perror("xenforeignmemory_map2"); + exit(-1); + } + } else { + /* + * We create dummy mappings where we are unable to create a foreign + * mapping immediately due to certain circumstances (i.e. on resume now) + */ + vaddr_base = mmap(vaddr, size, PROT_READ | PROT_WRITE, + MAP_ANON | MAP_SHARED, -1, 0); + if (vaddr_base == NULL) { + perror("mmap"); + exit(-1); + } + } + + if (!(entry->flags & XEN_MAPCACHE_ENTRY_DUMMY)) { + ram_block_notify_add(vaddr_base, size); } entry->vaddr_base = vaddr_base; @@ -190,7 +214,12 @@ static void xen_remap_bucket(MapCacheEntry *entry, entry->valid_mapping = (unsigned long *) g_malloc0(sizeof(unsigned long) * BITS_TO_LONGS(size >> XC_PAGE_SHIFT)); - ram_block_notify_add(entry->vaddr_base, entry->size); + if (dummy) { + entry->flags |= XEN_MAPCACHE_ENTRY_DUMMY; + } else { + entry->flags &= ~(XEN_MAPCACHE_ENTRY_DUMMY); + } + bitmap_zero(entry->valid_mapping, nb_pfn); for (i = 0; i < nb_pfn; i++) { if (!err[i]) { @@ -210,7 +239,8 @@ static uint8_t *xen_map_cache_unlocked(hwaddr phys_addr, hwaddr size, hwaddr address_offset; hwaddr cache_size = size; hwaddr test_bit_size; - bool translated = false; + bool translated G_GNUC_UNUSED = false; + bool dummy = false; tryagain: address_index = phys_addr >> MCACHE_BUCKET_SHIFT; @@ -262,14 +292,14 @@ tryagain: if (!entry) { entry = g_malloc0(sizeof (MapCacheEntry)); pentry->next = entry; - xen_remap_bucket(entry, cache_size, address_index); + xen_remap_bucket(entry, NULL, cache_size, address_index, dummy); } else if (!entry->lock) { if (!entry->vaddr_base || entry->paddr_index != address_index || entry->size != cache_size || !test_bits(address_offset >> XC_PAGE_SHIFT, test_bit_size >> XC_PAGE_SHIFT, entry->valid_mapping)) { - xen_remap_bucket(entry, cache_size, address_index); + xen_remap_bucket(entry, NULL, cache_size, address_index, dummy); } } @@ -277,11 +307,17 @@ tryagain: test_bit_size >> XC_PAGE_SHIFT, entry->valid_mapping)) { mapcache->last_entry = NULL; +#ifdef XEN_COMPAT_PHYSMAP if (!translated && mapcache->phys_offset_to_gaddr) { phys_addr = mapcache->phys_offset_to_gaddr(phys_addr, size, mapcache->opaque); translated = true; goto tryagain; } +#endif + if (!dummy && runstate_check(RUN_STATE_INMIGRATE)) { + dummy = true; + goto tryagain; + } trace_xen_map_cache_return(NULL); return NULL; } @@ -462,3 +498,66 @@ void xen_invalidate_map_cache(void) mapcache_unlock(); } + +static uint8_t *xen_replace_cache_entry_unlocked(hwaddr old_phys_addr, + hwaddr new_phys_addr, + hwaddr size) +{ + MapCacheEntry *entry; + hwaddr address_index, address_offset; + hwaddr test_bit_size, cache_size = size; + + address_index = old_phys_addr >> MCACHE_BUCKET_SHIFT; + address_offset = old_phys_addr & (MCACHE_BUCKET_SIZE - 1); + + assert(size); + /* test_bit_size is always a multiple of XC_PAGE_SIZE */ + test_bit_size = size + (old_phys_addr & (XC_PAGE_SIZE - 1)); + if (test_bit_size % XC_PAGE_SIZE) { + test_bit_size += XC_PAGE_SIZE - (test_bit_size % XC_PAGE_SIZE); + } + cache_size = size + address_offset; + if (cache_size % MCACHE_BUCKET_SIZE) { + cache_size += MCACHE_BUCKET_SIZE - (cache_size % MCACHE_BUCKET_SIZE); + } + + entry = &mapcache->entry[address_index % mapcache->nr_buckets]; + while (entry && !(entry->paddr_index == address_index && + entry->size == cache_size)) { + entry = entry->next; + } + if (!entry) { + DPRINTF("Trying to update an entry for %lx " \ + "that is not in the mapcache!\n", old_phys_addr); + return NULL; + } + + address_index = new_phys_addr >> MCACHE_BUCKET_SHIFT; + address_offset = new_phys_addr & (MCACHE_BUCKET_SIZE - 1); + + fprintf(stderr, "Replacing a dummy mapcache entry for %lx with %lx\n", + old_phys_addr, new_phys_addr); + + xen_remap_bucket(entry, entry->vaddr_base, + cache_size, address_index, false); + if (!test_bits(address_offset >> XC_PAGE_SHIFT, + test_bit_size >> XC_PAGE_SHIFT, + entry->valid_mapping)) { + DPRINTF("Unable to update a mapcache entry for %lx!\n", old_phys_addr); + return NULL; + } + + return entry->vaddr_base + address_offset; +} + +uint8_t *xen_replace_cache_entry(hwaddr old_phys_addr, + hwaddr new_phys_addr, + hwaddr size) +{ + uint8_t *p; + + mapcache_lock(); + p = xen_replace_cache_entry_unlocked(old_phys_addr, new_phys_addr, size); + mapcache_unlock(); + return p; +} diff --git a/hw/i386/xen/xen_platform.c b/hw/i386/xen/xen_platform.c index f23155832b..9ba7474566 100644 --- a/hw/i386/xen/xen_platform.c +++ b/hw/i386/xen/xen_platform.c @@ -87,10 +87,30 @@ static void log_writeb(PCIXenPlatformState *s, char val) } } -/* Xen Platform, Fixed IOPort */ -#define UNPLUG_ALL_DISKS 1 -#define UNPLUG_ALL_NICS 2 -#define UNPLUG_AUX_IDE_DISKS 4 +/* + * Unplug device flags. + * + * The logic got a little confused at some point in the past but this is + * what they do now. + * + * bit 0: Unplug all IDE and SCSI disks. + * bit 1: Unplug all NICs. + * bit 2: Unplug IDE disks except primary master. This is overridden if + * bit 0 is also present in the mask. + * bit 3: Unplug all NVMe disks. + * + */ +#define _UNPLUG_IDE_SCSI_DISKS 0 +#define UNPLUG_IDE_SCSI_DISKS (1u << _UNPLUG_IDE_SCSI_DISKS) + +#define _UNPLUG_ALL_NICS 1 +#define UNPLUG_ALL_NICS (1u << _UNPLUG_ALL_NICS) + +#define _UNPLUG_AUX_IDE_DISKS 2 +#define UNPLUG_AUX_IDE_DISKS (1u << _UNPLUG_AUX_IDE_DISKS) + +#define _UNPLUG_NVME_DISKS 3 +#define UNPLUG_NVME_DISKS (1u << _UNPLUG_NVME_DISKS) static void unplug_nic(PCIBus *b, PCIDevice *d, void *o) { @@ -122,7 +142,7 @@ static void unplug_disks(PCIBus *b, PCIDevice *d, void *opaque) { uint32_t flags = *(uint32_t *)opaque; bool aux = (flags & UNPLUG_AUX_IDE_DISKS) && - !(flags & UNPLUG_ALL_DISKS); + !(flags & UNPLUG_IDE_SCSI_DISKS); /* We have to ignore passthrough devices */ if (!strcmp(d->name, "xen-pci-passthrough")) { @@ -135,12 +155,16 @@ static void unplug_disks(PCIBus *b, PCIDevice *d, void *opaque) break; case PCI_CLASS_STORAGE_SCSI: - case PCI_CLASS_STORAGE_EXPRESS: if (!aux) { object_unparent(OBJECT(d)); } break; + case PCI_CLASS_STORAGE_EXPRESS: + if (flags & UNPLUG_NVME_DISKS) { + object_unparent(OBJECT(d)); + } + default: break; } @@ -158,10 +182,9 @@ static void platform_fixed_ioport_writew(void *opaque, uint32_t addr, uint32_t v switch (addr) { case 0: { PCIDevice *pci_dev = PCI_DEVICE(s); - /* Unplug devices. Value is a bitmask of which devices to - unplug, with bit 0 the disk devices, bit 1 the network - devices, and bit 2 the non-primary-master IDE devices. */ - if (val & (UNPLUG_ALL_DISKS | UNPLUG_AUX_IDE_DISKS)) { + /* Unplug devices. See comment above flag definitions */ + if (val & (UNPLUG_IDE_SCSI_DISKS | UNPLUG_AUX_IDE_DISKS | + UNPLUG_NVME_DISKS)) { DPRINTF("unplug disks\n"); pci_unplug_disks(pci_dev->bus, val); } @@ -349,14 +372,14 @@ static void xen_platform_ioport_writeb(void *opaque, hwaddr addr, * If VMDP was to control both disk and LAN it would use 4. * If it controlled just disk or just LAN, it would use 8 below. */ - pci_unplug_disks(pci_dev->bus, UNPLUG_ALL_DISKS); + pci_unplug_disks(pci_dev->bus, UNPLUG_IDE_SCSI_DISKS); pci_unplug_nics(pci_dev->bus); } break; case 8: switch (val) { case 1: - pci_unplug_disks(pci_dev->bus, UNPLUG_ALL_DISKS); + pci_unplug_disks(pci_dev->bus, UNPLUG_IDE_SCSI_DISKS); break; case 2: pci_unplug_nics(pci_dev->bus); diff --git a/hw/ide/ahci.c b/hw/ide/ahci.c index 874d3fe280..406a1b5579 100644 --- a/hw/ide/ahci.c +++ b/hw/ide/ahci.c @@ -32,7 +32,7 @@ #include "sysemu/dma.h" #include "hw/ide/internal.h" #include "hw/ide/pci.h" -#include "hw/ide/ahci.h" +#include "hw/ide/ahci_internal.h" #define DEBUG_AHCI 0 @@ -1833,6 +1833,14 @@ static void sysbus_ahci_register_types(void) type_init(sysbus_ahci_register_types) +int32_t ahci_get_num_ports(PCIDevice *dev) +{ + AHCIPCIState *d = ICH_AHCI(dev); + AHCIState *ahci = &d->ahci; + + return ahci->ports; +} + void ahci_ide_create_devs(PCIDevice *dev, DriveInfo **hd) { AHCIPCIState *d = ICH_AHCI(dev); diff --git a/hw/ide/ahci_internal.h b/hw/ide/ahci_internal.h new file mode 100644 index 0000000000..1e21169e79 --- /dev/null +++ b/hw/ide/ahci_internal.h @@ -0,0 +1,359 @@ +/* + * QEMU AHCI Emulation + * + * Copyright (c) 2010 qiaochong@loongson.cn + * Copyright (c) 2010 Roland Elek <elek.roland@gmail.com> + * Copyright (c) 2010 Sebastian Herbszt <herbszt@gmx.de> + * Copyright (c) 2010 Alexander Graf <agraf@suse.de> + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + * + */ + +#ifndef HW_IDE_AHCI_INTERNAL_H +#define HW_IDE_AHCI_INTERNAL_H + +#include "hw/ide/ahci.h" +#include "hw/sysbus.h" + +#define AHCI_MEM_BAR_SIZE 0x1000 +#define AHCI_MAX_PORTS 32 +#define AHCI_MAX_SG 168 /* hardware max is 64K */ +#define AHCI_DMA_BOUNDARY 0xffffffff +#define AHCI_USE_CLUSTERING 0 +#define AHCI_MAX_CMDS 32 +#define AHCI_CMD_SZ 32 +#define AHCI_CMD_SLOT_SZ (AHCI_MAX_CMDS * AHCI_CMD_SZ) +#define AHCI_RX_FIS_SZ 256 +#define AHCI_CMD_TBL_CDB 0x40 +#define AHCI_CMD_TBL_HDR_SZ 0x80 +#define AHCI_CMD_TBL_SZ (AHCI_CMD_TBL_HDR_SZ + (AHCI_MAX_SG * 16)) +#define AHCI_CMD_TBL_AR_SZ (AHCI_CMD_TBL_SZ * AHCI_MAX_CMDS) +#define AHCI_PORT_PRIV_DMA_SZ (AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_AR_SZ + \ + AHCI_RX_FIS_SZ) + +#define AHCI_IRQ_ON_SG (1U << 31) +#define AHCI_CMD_ATAPI (1 << 5) +#define AHCI_CMD_WRITE (1 << 6) +#define AHCI_CMD_PREFETCH (1 << 7) +#define AHCI_CMD_RESET (1 << 8) +#define AHCI_CMD_CLR_BUSY (1 << 10) + +#define RX_FIS_D2H_REG 0x40 /* offset of D2H Register FIS data */ +#define RX_FIS_SDB 0x58 /* offset of SDB FIS data */ +#define RX_FIS_UNK 0x60 /* offset of Unknown FIS data */ + +/* global controller registers */ +#define HOST_CAP 0x00 /* host capabilities */ +#define HOST_CTL 0x04 /* global host control */ +#define HOST_IRQ_STAT 0x08 /* interrupt status */ +#define HOST_PORTS_IMPL 0x0c /* bitmap of implemented ports */ +#define HOST_VERSION 0x10 /* AHCI spec. version compliancy */ + +/* HOST_CTL bits */ +#define HOST_CTL_RESET (1 << 0) /* reset controller; self-clear */ +#define HOST_CTL_IRQ_EN (1 << 1) /* global IRQ enable */ +#define HOST_CTL_AHCI_EN (1U << 31) /* AHCI enabled */ + +/* HOST_CAP bits */ +#define HOST_CAP_SSC (1 << 14) /* Slumber capable */ +#define HOST_CAP_AHCI (1 << 18) /* AHCI only */ +#define HOST_CAP_CLO (1 << 24) /* Command List Override support */ +#define HOST_CAP_SSS (1 << 27) /* Staggered Spin-up */ +#define HOST_CAP_NCQ (1 << 30) /* Native Command Queueing */ +#define HOST_CAP_64 (1U << 31) /* PCI DAC (64-bit DMA) support */ + +/* registers for each SATA port */ +#define PORT_LST_ADDR 0x00 /* command list DMA addr */ +#define PORT_LST_ADDR_HI 0x04 /* command list DMA addr hi */ +#define PORT_FIS_ADDR 0x08 /* FIS rx buf addr */ +#define PORT_FIS_ADDR_HI 0x0c /* FIS rx buf addr hi */ +#define PORT_IRQ_STAT 0x10 /* interrupt status */ +#define PORT_IRQ_MASK 0x14 /* interrupt enable/disable mask */ +#define PORT_CMD 0x18 /* port command */ +#define PORT_TFDATA 0x20 /* taskfile data */ +#define PORT_SIG 0x24 /* device TF signature */ +#define PORT_SCR_STAT 0x28 /* SATA phy register: SStatus */ +#define PORT_SCR_CTL 0x2c /* SATA phy register: SControl */ +#define PORT_SCR_ERR 0x30 /* SATA phy register: SError */ +#define PORT_SCR_ACT 0x34 /* SATA phy register: SActive */ +#define PORT_CMD_ISSUE 0x38 /* command issue */ +#define PORT_RESERVED 0x3c /* reserved */ + +/* PORT_IRQ_{STAT,MASK} bits */ +#define PORT_IRQ_COLD_PRES (1U << 31) /* cold presence detect */ +#define PORT_IRQ_TF_ERR (1 << 30) /* task file error */ +#define PORT_IRQ_HBUS_ERR (1 << 29) /* host bus fatal error */ +#define PORT_IRQ_HBUS_DATA_ERR (1 << 28) /* host bus data error */ +#define PORT_IRQ_IF_ERR (1 << 27) /* interface fatal error */ +#define PORT_IRQ_IF_NONFATAL (1 << 26) /* interface non-fatal error */ +#define PORT_IRQ_OVERFLOW (1 << 24) /* xfer exhausted available S/G */ +#define PORT_IRQ_BAD_PMP (1 << 23) /* incorrect port multiplier */ + +#define PORT_IRQ_PHYRDY (1 << 22) /* PhyRdy changed */ +#define PORT_IRQ_DEV_ILCK (1 << 7) /* device interlock */ +#define PORT_IRQ_CONNECT (1 << 6) /* port connect change status */ +#define PORT_IRQ_SG_DONE (1 << 5) /* descriptor processed */ +#define PORT_IRQ_UNK_FIS (1 << 4) /* unknown FIS rx'd */ +#define PORT_IRQ_SDB_FIS (1 << 3) /* Set Device Bits FIS rx'd */ +#define PORT_IRQ_DMAS_FIS (1 << 2) /* DMA Setup FIS rx'd */ +#define PORT_IRQ_PIOS_FIS (1 << 1) /* PIO Setup FIS rx'd */ +#define PORT_IRQ_D2H_REG_FIS (1 << 0) /* D2H Register FIS rx'd */ + +#define PORT_IRQ_FREEZE (PORT_IRQ_HBUS_ERR | PORT_IRQ_IF_ERR | \ + PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY | \ + PORT_IRQ_UNK_FIS) +#define PORT_IRQ_ERROR (PORT_IRQ_FREEZE | PORT_IRQ_TF_ERR | \ + PORT_IRQ_HBUS_DATA_ERR) +#define DEF_PORT_IRQ (PORT_IRQ_ERROR | PORT_IRQ_SG_DONE | \ + PORT_IRQ_SDB_FIS | PORT_IRQ_DMAS_FIS | \ + PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS) + +/* PORT_CMD bits */ +#define PORT_CMD_ATAPI (1 << 24) /* Device is ATAPI */ +#define PORT_CMD_LIST_ON (1 << 15) /* cmd list DMA engine running */ +#define PORT_CMD_FIS_ON (1 << 14) /* FIS DMA engine running */ +#define PORT_CMD_FIS_RX (1 << 4) /* Enable FIS receive DMA engine */ +#define PORT_CMD_CLO (1 << 3) /* Command list override */ +#define PORT_CMD_POWER_ON (1 << 2) /* Power up device */ +#define PORT_CMD_SPIN_UP (1 << 1) /* Spin up device */ +#define PORT_CMD_START (1 << 0) /* Enable port DMA engine */ + +#define PORT_CMD_ICC_MASK (0xfU << 28) /* i/f ICC state mask */ +#define PORT_CMD_ICC_ACTIVE (0x1 << 28) /* Put i/f in active state */ +#define PORT_CMD_ICC_PARTIAL (0x2 << 28) /* Put i/f in partial state */ +#define PORT_CMD_ICC_SLUMBER (0x6 << 28) /* Put i/f in slumber state */ + +#define PORT_CMD_RO_MASK 0x007dffe0 /* Which CMD bits are read only? */ + +/* ap->flags bits */ +#define AHCI_FLAG_NO_NCQ (1 << 24) +#define AHCI_FLAG_IGN_IRQ_IF_ERR (1 << 25) /* ignore IRQ_IF_ERR */ +#define AHCI_FLAG_HONOR_PI (1 << 26) /* honor PORTS_IMPL */ +#define AHCI_FLAG_IGN_SERR_INTERNAL (1 << 27) /* ignore SERR_INTERNAL */ +#define AHCI_FLAG_32BIT_ONLY (1 << 28) /* force 32bit */ + +#define ATA_SRST (1 << 2) /* software reset */ + +#define STATE_RUN 0 +#define STATE_RESET 1 + +#define SATA_SCR_SSTATUS_DET_NODEV 0x0 +#define SATA_SCR_SSTATUS_DET_DEV_PRESENT_PHY_UP 0x3 + +#define SATA_SCR_SSTATUS_SPD_NODEV 0x00 +#define SATA_SCR_SSTATUS_SPD_GEN1 0x10 + +#define SATA_SCR_SSTATUS_IPM_NODEV 0x000 +#define SATA_SCR_SSTATUS_IPM_ACTIVE 0X100 + +#define AHCI_SCR_SCTL_DET 0xf + +#define SATA_FIS_TYPE_REGISTER_H2D 0x27 +#define SATA_FIS_REG_H2D_UPDATE_COMMAND_REGISTER 0x80 +#define SATA_FIS_TYPE_REGISTER_D2H 0x34 +#define SATA_FIS_TYPE_PIO_SETUP 0x5f +#define SATA_FIS_TYPE_SDB 0xA1 + +#define AHCI_CMD_HDR_CMD_FIS_LEN 0x1f +#define AHCI_CMD_HDR_PRDT_LEN 16 + +#define SATA_SIGNATURE_CDROM 0xeb140101 +#define SATA_SIGNATURE_DISK 0x00000101 + +#define AHCI_GENERIC_HOST_CONTROL_REGS_MAX_ADDR 0x20 + /* Shouldn't this be 0x2c? */ + +#define AHCI_PORT_REGS_START_ADDR 0x100 +#define AHCI_PORT_ADDR_OFFSET_MASK 0x7f +#define AHCI_PORT_ADDR_OFFSET_LEN 0x80 + +#define AHCI_NUM_COMMAND_SLOTS 31 +#define AHCI_SUPPORTED_SPEED 20 +#define AHCI_SUPPORTED_SPEED_GEN1 1 +#define AHCI_VERSION_1_0 0x10000 + +#define AHCI_PROGMODE_MAJOR_REV_1 1 + +#define AHCI_COMMAND_TABLE_ACMD 0x40 + +#define AHCI_PRDT_SIZE_MASK 0x3fffff + +#define IDE_FEATURE_DMA 1 + +#define READ_FPDMA_QUEUED 0x60 +#define WRITE_FPDMA_QUEUED 0x61 +#define NCQ_NON_DATA 0x63 +#define RECEIVE_FPDMA_QUEUED 0x65 +#define SEND_FPDMA_QUEUED 0x64 + +#define NCQ_FIS_FUA_MASK 0x80 +#define NCQ_FIS_RARC_MASK 0x01 + +#define RES_FIS_DSFIS 0x00 +#define RES_FIS_PSFIS 0x20 +#define RES_FIS_RFIS 0x40 +#define RES_FIS_SDBFIS 0x58 +#define RES_FIS_UFIS 0x60 + +#define SATA_CAP_SIZE 0x8 +#define SATA_CAP_REV 0x2 +#define SATA_CAP_BAR 0x4 + +typedef struct AHCIPortRegs { + uint32_t lst_addr; + uint32_t lst_addr_hi; + uint32_t fis_addr; + uint32_t fis_addr_hi; + uint32_t irq_stat; + uint32_t irq_mask; + uint32_t cmd; + uint32_t unused0; + uint32_t tfdata; + uint32_t sig; + uint32_t scr_stat; + uint32_t scr_ctl; + uint32_t scr_err; + uint32_t scr_act; + uint32_t cmd_issue; + uint32_t reserved; +} AHCIPortRegs; + +typedef struct AHCICmdHdr { + uint16_t opts; + uint16_t prdtl; + uint32_t status; + uint64_t tbl_addr; + uint32_t reserved[4]; +} QEMU_PACKED AHCICmdHdr; + +typedef struct AHCI_SG { + uint64_t addr; + uint32_t reserved; + uint32_t flags_size; +} QEMU_PACKED AHCI_SG; + +typedef struct NCQTransferState { + AHCIDevice *drive; + BlockAIOCB *aiocb; + AHCICmdHdr *cmdh; + QEMUSGList sglist; + BlockAcctCookie acct; + uint32_t sector_count; + uint64_t lba; + uint8_t tag; + uint8_t cmd; + uint8_t slot; + bool used; + bool halt; +} NCQTransferState; + +struct AHCIDevice { + IDEDMA dma; + IDEBus port; + int port_no; + uint32_t port_state; + uint32_t finished; + AHCIPortRegs port_regs; + struct AHCIState *hba; + QEMUBH *check_bh; + uint8_t *lst; + uint8_t *res_fis; + bool done_atapi_packet; + int32_t busy_slot; + bool init_d2h_sent; + AHCICmdHdr *cur_cmd; + NCQTransferState ncq_tfs[AHCI_MAX_CMDS]; +}; + +struct AHCIPCIState { + /*< private >*/ + PCIDevice parent_obj; + /*< public >*/ + + AHCIState ahci; +}; + +#define TYPE_ICH9_AHCI "ich9-ahci" + +#define ICH_AHCI(obj) \ + OBJECT_CHECK(AHCIPCIState, (obj), TYPE_ICH9_AHCI) + +extern const VMStateDescription vmstate_ahci; + +#define VMSTATE_AHCI(_field, _state) { \ + .name = (stringify(_field)), \ + .size = sizeof(AHCIState), \ + .vmsd = &vmstate_ahci, \ + .flags = VMS_STRUCT, \ + .offset = vmstate_offset_value(_state, _field, AHCIState), \ +} + +/** + * NCQFrame is the same as a Register H2D FIS (described in SATA 3.2), + * but some fields have been re-mapped and re-purposed, as seen in + * SATA 3.2 section 13.6.4.1 ("READ FPDMA QUEUED") + * + * cmd_fis[3], feature 7:0, becomes sector count 7:0. + * cmd_fis[7], device 7:0, uses bit 7 as the Force Unit Access bit. + * cmd_fis[11], feature 15:8, becomes sector count 15:8. + * cmd_fis[12], count 7:0, becomes the NCQ TAG (7:3) and RARC bit (0) + * cmd_fis[13], count 15:8, becomes the priority value (7:6) + * bytes 16-19 become an le32 "auxiliary" field. + */ +typedef struct NCQFrame { + uint8_t fis_type; + uint8_t c; + uint8_t command; + uint8_t sector_count_low; /* (feature 7:0) */ + uint8_t lba0; + uint8_t lba1; + uint8_t lba2; + uint8_t fua; /* (device 7:0) */ + uint8_t lba3; + uint8_t lba4; + uint8_t lba5; + uint8_t sector_count_high; /* (feature 15:8) */ + uint8_t tag; /* (count 0:7) */ + uint8_t prio; /* (count 15:8) */ + uint8_t icc; + uint8_t control; + uint8_t aux0; + uint8_t aux1; + uint8_t aux2; + uint8_t aux3; +} QEMU_PACKED NCQFrame; + +typedef struct SDBFIS { + uint8_t type; + uint8_t flags; + uint8_t status; + uint8_t error; + uint32_t payload; +} QEMU_PACKED SDBFIS; + +void ahci_realize(AHCIState *s, DeviceState *qdev, AddressSpace *as, int ports); +void ahci_init(AHCIState *s, DeviceState *qdev); +void ahci_uninit(AHCIState *s); + +void ahci_reset(AHCIState *s); + +#define TYPE_SYSBUS_AHCI "sysbus-ahci" +#define SYSBUS_AHCI(obj) OBJECT_CHECK(SysbusAHCIState, (obj), TYPE_SYSBUS_AHCI) + +#define TYPE_ALLWINNER_AHCI "allwinner-ahci" +#define ALLWINNER_AHCI(obj) OBJECT_CHECK(AllwinnerAHCIState, (obj), \ + TYPE_ALLWINNER_AHCI) + +#endif /* HW_IDE_AHCI_H */ diff --git a/hw/ide/ich.c b/hw/ide/ich.c index 989fca5e9f..9472a60cab 100644 --- a/hw/ide/ich.c +++ b/hw/ide/ich.c @@ -69,7 +69,7 @@ #include "sysemu/block-backend.h" #include "sysemu/dma.h" #include "hw/ide/pci.h" -#include "hw/ide/ahci.h" +#include "hw/ide/ahci_internal.h" #define ICH9_MSI_CAP_OFFSET 0x80 #define ICH9_SATA_CAP_OFFSET 0xA8 diff --git a/hw/ide/qdev.c b/hw/ide/qdev.c index 299e592fa2..cc2f5bd280 100644 --- a/hw/ide/qdev.c +++ b/hw/ide/qdev.c @@ -164,6 +164,7 @@ static int ide_dev_initfn(IDEDevice *dev, IDEDriveKind kind) IDEBus *bus = DO_UPCAST(IDEBus, qbus, dev->qdev.parent_bus); IDEState *s = bus->ifs + dev->unit; Error *err = NULL; + int ret; if (!dev->conf.blk) { if (kind != IDE_CD) { @@ -172,6 +173,8 @@ static int ide_dev_initfn(IDEDevice *dev, IDEDriveKind kind) } else { /* Anonymous BlockBackend for an empty drive */ dev->conf.blk = blk_new(0, BLK_PERM_ALL); + ret = blk_attach_dev(dev->conf.blk, &dev->qdev); + assert(ret == 0); } } diff --git a/hw/mips/boston.c b/hw/mips/boston.c index 7985c60dde..776ee283e1 100644 --- a/hw/mips/boston.c +++ b/hw/mips/boston.c @@ -538,8 +538,8 @@ static void boston_mach_init(MachineState *machine) ahci = pci_create_simple_multifunction(&PCI_BRIDGE(&pcie2->root)->sec_bus, PCI_DEVFN(0, 0), true, TYPE_ICH9_AHCI); - g_assert(ARRAY_SIZE(hd) == ICH_AHCI(ahci)->ahci.ports); - ide_drive_get(hd, ICH_AHCI(ahci)->ahci.ports); + g_assert(ARRAY_SIZE(hd) == ahci_get_num_ports(ahci)); + ide_drive_get(hd, ahci_get_num_ports(ahci)); ahci_ide_create_devs(ahci, hd); if (machine->firmware) { diff --git a/hw/scsi/scsi-disk.c b/hw/scsi/scsi-disk.c index a53f058621..5f1e5e8070 100644 --- a/hw/scsi/scsi-disk.c +++ b/hw/scsi/scsi-disk.c @@ -2384,9 +2384,14 @@ static void scsi_hd_realize(SCSIDevice *dev, Error **errp) static void scsi_cd_realize(SCSIDevice *dev, Error **errp) { SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); + int ret; if (!dev->conf.blk) { + /* Anonymous BlockBackend for an empty drive. As we put it into + * dev->conf, qdev takes care of detaching on unplug. */ dev->conf.blk = blk_new(0, BLK_PERM_ALL); + ret = blk_attach_dev(dev->conf.blk, &dev->qdev); + assert(ret == 0); } s->qdev.blocksize = 2048; diff --git a/hw/xen/xen_pt_config_init.c b/hw/xen/xen_pt_config_init.c index 6f18366f67..1f04ec5eec 100644 --- a/hw/xen/xen_pt_config_init.c +++ b/hw/xen/xen_pt_config_init.c @@ -1535,6 +1535,7 @@ static XenPTRegInfo xen_pt_emu_reg_igd_opregion[] = { .offset = 0x0, .size = 4, .init_val = 0, + .emu_mask = 0xFFFFFFFF, .u.dw.read = xen_pt_intel_opregion_read, .u.dw.write = xen_pt_intel_opregion_write, }, diff --git a/hw/xen/xen_pt_msi.c b/hw/xen/xen_pt_msi.c index 62add0639f..ff9a79f5d2 100644 --- a/hw/xen/xen_pt_msi.c +++ b/hw/xen/xen_pt_msi.c @@ -535,7 +535,11 @@ int xen_pt_msix_init(XenPCIPassthroughState *s, uint32_t base) return -1; } - xen_host_pci_get_word(hd, base + PCI_MSIX_FLAGS, &control); + rc = xen_host_pci_get_word(hd, base + PCI_MSIX_FLAGS, &control); + if (rc) { + XEN_PT_ERR(d, "Failed to read PCI_MSIX_FLAGS field\n"); + return rc; + } total_entries = control & PCI_MSIX_FLAGS_QSIZE; total_entries += 1; @@ -554,7 +558,11 @@ int xen_pt_msix_init(XenPCIPassthroughState *s, uint32_t base) + XC_PAGE_SIZE - 1) & XC_PAGE_MASK); - xen_host_pci_get_long(hd, base + PCI_MSIX_TABLE, &table_off); + rc = xen_host_pci_get_long(hd, base + PCI_MSIX_TABLE, &table_off); + if (rc) { + XEN_PT_ERR(d, "Failed to read PCI_MSIX_TABLE field\n"); + goto error_out; + } bar_index = msix->bar_index = table_off & PCI_MSIX_FLAGS_BIRMASK; table_off = table_off & ~PCI_MSIX_FLAGS_BIRMASK; msix->table_base = s->real_device.io_regions[bar_index].base_addr; diff --git a/include/hw/ide/ahci.h b/include/hw/ide/ahci.h index 0ca7c65820..5a06537e6b 100644 --- a/include/hw/ide/ahci.h +++ b/include/hw/ide/ahci.h @@ -26,189 +26,7 @@ #include "hw/sysbus.h" -#define AHCI_MEM_BAR_SIZE 0x1000 -#define AHCI_MAX_PORTS 32 -#define AHCI_MAX_SG 168 /* hardware max is 64K */ -#define AHCI_DMA_BOUNDARY 0xffffffff -#define AHCI_USE_CLUSTERING 0 -#define AHCI_MAX_CMDS 32 -#define AHCI_CMD_SZ 32 -#define AHCI_CMD_SLOT_SZ (AHCI_MAX_CMDS * AHCI_CMD_SZ) -#define AHCI_RX_FIS_SZ 256 -#define AHCI_CMD_TBL_CDB 0x40 -#define AHCI_CMD_TBL_HDR_SZ 0x80 -#define AHCI_CMD_TBL_SZ (AHCI_CMD_TBL_HDR_SZ + (AHCI_MAX_SG * 16)) -#define AHCI_CMD_TBL_AR_SZ (AHCI_CMD_TBL_SZ * AHCI_MAX_CMDS) -#define AHCI_PORT_PRIV_DMA_SZ (AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_AR_SZ + \ - AHCI_RX_FIS_SZ) - -#define AHCI_IRQ_ON_SG (1U << 31) -#define AHCI_CMD_ATAPI (1 << 5) -#define AHCI_CMD_WRITE (1 << 6) -#define AHCI_CMD_PREFETCH (1 << 7) -#define AHCI_CMD_RESET (1 << 8) -#define AHCI_CMD_CLR_BUSY (1 << 10) - -#define RX_FIS_D2H_REG 0x40 /* offset of D2H Register FIS data */ -#define RX_FIS_SDB 0x58 /* offset of SDB FIS data */ -#define RX_FIS_UNK 0x60 /* offset of Unknown FIS data */ - -/* global controller registers */ -#define HOST_CAP 0x00 /* host capabilities */ -#define HOST_CTL 0x04 /* global host control */ -#define HOST_IRQ_STAT 0x08 /* interrupt status */ -#define HOST_PORTS_IMPL 0x0c /* bitmap of implemented ports */ -#define HOST_VERSION 0x10 /* AHCI spec. version compliancy */ - -/* HOST_CTL bits */ -#define HOST_CTL_RESET (1 << 0) /* reset controller; self-clear */ -#define HOST_CTL_IRQ_EN (1 << 1) /* global IRQ enable */ -#define HOST_CTL_AHCI_EN (1U << 31) /* AHCI enabled */ - -/* HOST_CAP bits */ -#define HOST_CAP_SSC (1 << 14) /* Slumber capable */ -#define HOST_CAP_AHCI (1 << 18) /* AHCI only */ -#define HOST_CAP_CLO (1 << 24) /* Command List Override support */ -#define HOST_CAP_SSS (1 << 27) /* Staggered Spin-up */ -#define HOST_CAP_NCQ (1 << 30) /* Native Command Queueing */ -#define HOST_CAP_64 (1U << 31) /* PCI DAC (64-bit DMA) support */ - -/* registers for each SATA port */ -#define PORT_LST_ADDR 0x00 /* command list DMA addr */ -#define PORT_LST_ADDR_HI 0x04 /* command list DMA addr hi */ -#define PORT_FIS_ADDR 0x08 /* FIS rx buf addr */ -#define PORT_FIS_ADDR_HI 0x0c /* FIS rx buf addr hi */ -#define PORT_IRQ_STAT 0x10 /* interrupt status */ -#define PORT_IRQ_MASK 0x14 /* interrupt enable/disable mask */ -#define PORT_CMD 0x18 /* port command */ -#define PORT_TFDATA 0x20 /* taskfile data */ -#define PORT_SIG 0x24 /* device TF signature */ -#define PORT_SCR_STAT 0x28 /* SATA phy register: SStatus */ -#define PORT_SCR_CTL 0x2c /* SATA phy register: SControl */ -#define PORT_SCR_ERR 0x30 /* SATA phy register: SError */ -#define PORT_SCR_ACT 0x34 /* SATA phy register: SActive */ -#define PORT_CMD_ISSUE 0x38 /* command issue */ -#define PORT_RESERVED 0x3c /* reserved */ - -/* PORT_IRQ_{STAT,MASK} bits */ -#define PORT_IRQ_COLD_PRES (1U << 31) /* cold presence detect */ -#define PORT_IRQ_TF_ERR (1 << 30) /* task file error */ -#define PORT_IRQ_HBUS_ERR (1 << 29) /* host bus fatal error */ -#define PORT_IRQ_HBUS_DATA_ERR (1 << 28) /* host bus data error */ -#define PORT_IRQ_IF_ERR (1 << 27) /* interface fatal error */ -#define PORT_IRQ_IF_NONFATAL (1 << 26) /* interface non-fatal error */ -#define PORT_IRQ_OVERFLOW (1 << 24) /* xfer exhausted available S/G */ -#define PORT_IRQ_BAD_PMP (1 << 23) /* incorrect port multiplier */ - -#define PORT_IRQ_PHYRDY (1 << 22) /* PhyRdy changed */ -#define PORT_IRQ_DEV_ILCK (1 << 7) /* device interlock */ -#define PORT_IRQ_CONNECT (1 << 6) /* port connect change status */ -#define PORT_IRQ_SG_DONE (1 << 5) /* descriptor processed */ -#define PORT_IRQ_UNK_FIS (1 << 4) /* unknown FIS rx'd */ -#define PORT_IRQ_SDB_FIS (1 << 3) /* Set Device Bits FIS rx'd */ -#define PORT_IRQ_DMAS_FIS (1 << 2) /* DMA Setup FIS rx'd */ -#define PORT_IRQ_PIOS_FIS (1 << 1) /* PIO Setup FIS rx'd */ -#define PORT_IRQ_D2H_REG_FIS (1 << 0) /* D2H Register FIS rx'd */ - -#define PORT_IRQ_FREEZE (PORT_IRQ_HBUS_ERR | PORT_IRQ_IF_ERR | \ - PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY | \ - PORT_IRQ_UNK_FIS) -#define PORT_IRQ_ERROR (PORT_IRQ_FREEZE | PORT_IRQ_TF_ERR | \ - PORT_IRQ_HBUS_DATA_ERR) -#define DEF_PORT_IRQ (PORT_IRQ_ERROR | PORT_IRQ_SG_DONE | \ - PORT_IRQ_SDB_FIS | PORT_IRQ_DMAS_FIS | \ - PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS) - -/* PORT_CMD bits */ -#define PORT_CMD_ATAPI (1 << 24) /* Device is ATAPI */ -#define PORT_CMD_LIST_ON (1 << 15) /* cmd list DMA engine running */ -#define PORT_CMD_FIS_ON (1 << 14) /* FIS DMA engine running */ -#define PORT_CMD_FIS_RX (1 << 4) /* Enable FIS receive DMA engine */ -#define PORT_CMD_CLO (1 << 3) /* Command list override */ -#define PORT_CMD_POWER_ON (1 << 2) /* Power up device */ -#define PORT_CMD_SPIN_UP (1 << 1) /* Spin up device */ -#define PORT_CMD_START (1 << 0) /* Enable port DMA engine */ - -#define PORT_CMD_ICC_MASK (0xfU << 28) /* i/f ICC state mask */ -#define PORT_CMD_ICC_ACTIVE (0x1 << 28) /* Put i/f in active state */ -#define PORT_CMD_ICC_PARTIAL (0x2 << 28) /* Put i/f in partial state */ -#define PORT_CMD_ICC_SLUMBER (0x6 << 28) /* Put i/f in slumber state */ - -#define PORT_CMD_RO_MASK 0x007dffe0 /* Which CMD bits are read only? */ - -/* ap->flags bits */ -#define AHCI_FLAG_NO_NCQ (1 << 24) -#define AHCI_FLAG_IGN_IRQ_IF_ERR (1 << 25) /* ignore IRQ_IF_ERR */ -#define AHCI_FLAG_HONOR_PI (1 << 26) /* honor PORTS_IMPL */ -#define AHCI_FLAG_IGN_SERR_INTERNAL (1 << 27) /* ignore SERR_INTERNAL */ -#define AHCI_FLAG_32BIT_ONLY (1 << 28) /* force 32bit */ - -#define ATA_SRST (1 << 2) /* software reset */ - -#define STATE_RUN 0 -#define STATE_RESET 1 - -#define SATA_SCR_SSTATUS_DET_NODEV 0x0 -#define SATA_SCR_SSTATUS_DET_DEV_PRESENT_PHY_UP 0x3 - -#define SATA_SCR_SSTATUS_SPD_NODEV 0x00 -#define SATA_SCR_SSTATUS_SPD_GEN1 0x10 - -#define SATA_SCR_SSTATUS_IPM_NODEV 0x000 -#define SATA_SCR_SSTATUS_IPM_ACTIVE 0X100 - -#define AHCI_SCR_SCTL_DET 0xf - -#define SATA_FIS_TYPE_REGISTER_H2D 0x27 -#define SATA_FIS_REG_H2D_UPDATE_COMMAND_REGISTER 0x80 -#define SATA_FIS_TYPE_REGISTER_D2H 0x34 -#define SATA_FIS_TYPE_PIO_SETUP 0x5f -#define SATA_FIS_TYPE_SDB 0xA1 - -#define AHCI_CMD_HDR_CMD_FIS_LEN 0x1f -#define AHCI_CMD_HDR_PRDT_LEN 16 - -#define SATA_SIGNATURE_CDROM 0xeb140101 -#define SATA_SIGNATURE_DISK 0x00000101 - -#define AHCI_GENERIC_HOST_CONTROL_REGS_MAX_ADDR 0x20 - /* Shouldn't this be 0x2c? */ - -#define AHCI_PORT_REGS_START_ADDR 0x100 -#define AHCI_PORT_ADDR_OFFSET_MASK 0x7f -#define AHCI_PORT_ADDR_OFFSET_LEN 0x80 - -#define AHCI_NUM_COMMAND_SLOTS 31 -#define AHCI_SUPPORTED_SPEED 20 -#define AHCI_SUPPORTED_SPEED_GEN1 1 -#define AHCI_VERSION_1_0 0x10000 - -#define AHCI_PROGMODE_MAJOR_REV_1 1 - -#define AHCI_COMMAND_TABLE_ACMD 0x40 - -#define AHCI_PRDT_SIZE_MASK 0x3fffff - -#define IDE_FEATURE_DMA 1 - -#define READ_FPDMA_QUEUED 0x60 -#define WRITE_FPDMA_QUEUED 0x61 -#define NCQ_NON_DATA 0x63 -#define RECEIVE_FPDMA_QUEUED 0x65 -#define SEND_FPDMA_QUEUED 0x64 - -#define NCQ_FIS_FUA_MASK 0x80 -#define NCQ_FIS_RARC_MASK 0x01 - -#define RES_FIS_DSFIS 0x00 -#define RES_FIS_PSFIS 0x20 -#define RES_FIS_RFIS 0x40 -#define RES_FIS_SDBFIS 0x58 -#define RES_FIS_UFIS 0x60 - -#define SATA_CAP_SIZE 0x8 -#define SATA_CAP_REV 0x2 -#define SATA_CAP_BAR 0x4 +typedef struct AHCIDevice AHCIDevice; typedef struct AHCIControlRegs { uint32_t cap; @@ -218,74 +36,6 @@ typedef struct AHCIControlRegs { uint32_t version; } AHCIControlRegs; -typedef struct AHCIPortRegs { - uint32_t lst_addr; - uint32_t lst_addr_hi; - uint32_t fis_addr; - uint32_t fis_addr_hi; - uint32_t irq_stat; - uint32_t irq_mask; - uint32_t cmd; - uint32_t unused0; - uint32_t tfdata; - uint32_t sig; - uint32_t scr_stat; - uint32_t scr_ctl; - uint32_t scr_err; - uint32_t scr_act; - uint32_t cmd_issue; - uint32_t reserved; -} AHCIPortRegs; - -typedef struct AHCICmdHdr { - uint16_t opts; - uint16_t prdtl; - uint32_t status; - uint64_t tbl_addr; - uint32_t reserved[4]; -} QEMU_PACKED AHCICmdHdr; - -typedef struct AHCI_SG { - uint64_t addr; - uint32_t reserved; - uint32_t flags_size; -} QEMU_PACKED AHCI_SG; - -typedef struct AHCIDevice AHCIDevice; - -typedef struct NCQTransferState { - AHCIDevice *drive; - BlockAIOCB *aiocb; - AHCICmdHdr *cmdh; - QEMUSGList sglist; - BlockAcctCookie acct; - uint32_t sector_count; - uint64_t lba; - uint8_t tag; - uint8_t cmd; - uint8_t slot; - bool used; - bool halt; -} NCQTransferState; - -struct AHCIDevice { - IDEDMA dma; - IDEBus port; - int port_no; - uint32_t port_state; - uint32_t finished; - AHCIPortRegs port_regs; - struct AHCIState *hba; - QEMUBH *check_bh; - uint8_t *lst; - uint8_t *res_fis; - bool done_atapi_packet; - int32_t busy_slot; - bool init_d2h_sent; - AHCICmdHdr *cur_cmd; - NCQTransferState ncq_tfs[AHCI_MAX_CMDS]; -}; - typedef struct AHCIState { DeviceState *container; @@ -300,78 +50,14 @@ typedef struct AHCIState { AddressSpace *as; } AHCIState; -typedef struct AHCIPCIState { - /*< private >*/ - PCIDevice parent_obj; - /*< public >*/ - - AHCIState ahci; -} AHCIPCIState; +typedef struct AHCIPCIState AHCIPCIState; #define TYPE_ICH9_AHCI "ich9-ahci" #define ICH_AHCI(obj) \ OBJECT_CHECK(AHCIPCIState, (obj), TYPE_ICH9_AHCI) -extern const VMStateDescription vmstate_ahci; - -#define VMSTATE_AHCI(_field, _state) { \ - .name = (stringify(_field)), \ - .size = sizeof(AHCIState), \ - .vmsd = &vmstate_ahci, \ - .flags = VMS_STRUCT, \ - .offset = vmstate_offset_value(_state, _field, AHCIState), \ -} - -/** - * NCQFrame is the same as a Register H2D FIS (described in SATA 3.2), - * but some fields have been re-mapped and re-purposed, as seen in - * SATA 3.2 section 13.6.4.1 ("READ FPDMA QUEUED") - * - * cmd_fis[3], feature 7:0, becomes sector count 7:0. - * cmd_fis[7], device 7:0, uses bit 7 as the Force Unit Access bit. - * cmd_fis[11], feature 15:8, becomes sector count 15:8. - * cmd_fis[12], count 7:0, becomes the NCQ TAG (7:3) and RARC bit (0) - * cmd_fis[13], count 15:8, becomes the priority value (7:6) - * bytes 16-19 become an le32 "auxiliary" field. - */ -typedef struct NCQFrame { - uint8_t fis_type; - uint8_t c; - uint8_t command; - uint8_t sector_count_low; /* (feature 7:0) */ - uint8_t lba0; - uint8_t lba1; - uint8_t lba2; - uint8_t fua; /* (device 7:0) */ - uint8_t lba3; - uint8_t lba4; - uint8_t lba5; - uint8_t sector_count_high; /* (feature 15:8) */ - uint8_t tag; /* (count 0:7) */ - uint8_t prio; /* (count 15:8) */ - uint8_t icc; - uint8_t control; - uint8_t aux0; - uint8_t aux1; - uint8_t aux2; - uint8_t aux3; -} QEMU_PACKED NCQFrame; - -typedef struct SDBFIS { - uint8_t type; - uint8_t flags; - uint8_t status; - uint8_t error; - uint32_t payload; -} QEMU_PACKED SDBFIS; - -void ahci_realize(AHCIState *s, DeviceState *qdev, AddressSpace *as, int ports); -void ahci_init(AHCIState *s, DeviceState *qdev); -void ahci_uninit(AHCIState *s); - -void ahci_reset(AHCIState *s); - +int32_t ahci_get_num_ports(PCIDevice *dev); void ahci_ide_create_devs(PCIDevice *dev, DriveInfo **hd); #define TYPE_SYSBUS_AHCI "sysbus-ahci" diff --git a/include/hw/qdev-properties.h b/include/hw/qdev-properties.h index 39297961f3..e2321f1cc1 100644 --- a/include/hw/qdev-properties.h +++ b/include/hw/qdev-properties.h @@ -13,6 +13,7 @@ extern const PropertyInfo qdev_prop_uint16; extern const PropertyInfo qdev_prop_uint32; extern const PropertyInfo qdev_prop_int32; extern const PropertyInfo qdev_prop_uint64; +extern const PropertyInfo qdev_prop_int64; extern const PropertyInfo qdev_prop_size; extern const PropertyInfo qdev_prop_string; extern const PropertyInfo qdev_prop_chr; @@ -157,6 +158,8 @@ extern const PropertyInfo qdev_prop_link; DEFINE_PROP_SIGNED(_n, _s, _f, _d, qdev_prop_int32, int32_t) #define DEFINE_PROP_UINT64(_n, _s, _f, _d) \ DEFINE_PROP_UNSIGNED(_n, _s, _f, _d, qdev_prop_uint64, uint64_t) +#define DEFINE_PROP_INT64(_n, _s, _f, _d) \ + DEFINE_PROP_SIGNED(_n, _s, _f, _d, qdev_prop_int64, int64_t) #define DEFINE_PROP_SIZE(_n, _s, _f, _d) \ DEFINE_PROP_UNSIGNED(_n, _s, _f, _d, qdev_prop_size, uint64_t) #define DEFINE_PROP_PCI_DEVFN(_n, _s, _f, _d) \ diff --git a/include/hw/xen/xen_common.h b/include/hw/xen/xen_common.h index e00ddd7b5b..86c7f26106 100644 --- a/include/hw/xen/xen_common.h +++ b/include/hw/xen/xen_common.h @@ -78,6 +78,21 @@ static inline void *xenforeignmemory_map(xc_interface *h, uint32_t dom, extern xenforeignmemory_handle *xen_fmem; +#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 41000 + +#define XEN_COMPAT_PHYSMAP +static inline void *xenforeignmemory_map2(xenforeignmemory_handle *h, + uint32_t dom, void *addr, + int prot, int flags, size_t pages, + const xen_pfn_t arr[/*pages*/], + int err[/*pages*/]) +{ + assert(addr == NULL && flags == 0); + return xenforeignmemory_map(h, dom, prot, pages, arr, err); +} + +#endif + #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40900 typedef xc_interface xendevicemodel_handle; diff --git a/include/migration/colo.h b/include/migration/colo.h index be6beba301..ff9874ea16 100644 --- a/include/migration/colo.h +++ b/include/migration/colo.h @@ -15,7 +15,6 @@ #include "qemu-common.h" -bool colo_supported(void); void colo_info_init(void); void migrate_start_colo_process(MigrationState *s); diff --git a/include/qemu/throttle.h b/include/qemu/throttle.h index 9109657609..d056008c18 100644 --- a/include/qemu/throttle.h +++ b/include/qemu/throttle.h @@ -139,7 +139,7 @@ bool throttle_enabled(ThrottleConfig *cfg); bool throttle_is_valid(ThrottleConfig *cfg, Error **errp); void throttle_config(ThrottleState *ts, - ThrottleTimers *tt, + QEMUClockType clock_type, ThrottleConfig *cfg); void throttle_get_config(ThrottleState *ts, ThrottleConfig *cfg); diff --git a/include/sysemu/block-backend.h b/include/sysemu/block-backend.h index d9ea0cdb0f..4a3730596b 100644 --- a/include/sysemu/block-backend.h +++ b/include/sysemu/block-backend.h @@ -100,6 +100,7 @@ void blk_remove_all_bs(void); const char *blk_name(const BlockBackend *blk); BlockBackend *blk_by_name(const char *name); BlockBackend *blk_next(BlockBackend *blk); +BlockBackend *blk_all_next(BlockBackend *blk); bool monitor_add_blk(BlockBackend *blk, const char *name, Error **errp); void monitor_remove_blk(BlockBackend *blk); @@ -126,6 +127,7 @@ int blk_attach_dev(BlockBackend *blk, DeviceState *dev); void blk_attach_dev_legacy(BlockBackend *blk, void *dev); void blk_detach_dev(BlockBackend *blk, void *dev); void *blk_get_attached_dev(BlockBackend *blk); +char *blk_get_attached_dev_id(BlockBackend *blk); BlockBackend *blk_by_dev(void *dev); BlockBackend *blk_by_qdev_id(const char *id, Error **errp); void blk_set_dev_ops(BlockBackend *blk, const BlockDevOps *ops, void *opaque); diff --git a/include/sysemu/xen-mapcache.h b/include/sysemu/xen-mapcache.h index 01daaad00c..bd4d49e0a4 100644 --- a/include/sysemu/xen-mapcache.h +++ b/include/sysemu/xen-mapcache.h @@ -21,7 +21,9 @@ uint8_t *xen_map_cache(hwaddr phys_addr, hwaddr size, ram_addr_t xen_ram_addr_from_mapcache(void *ptr); void xen_invalidate_map_cache_entry(uint8_t *buffer); void xen_invalidate_map_cache(void); - +uint8_t *xen_replace_cache_entry(hwaddr old_phys_addr, + hwaddr new_phys_addr, + hwaddr size); #else static inline void xen_map_cache_init(phys_offset_to_gaddr_t f, @@ -50,6 +52,13 @@ static inline void xen_invalidate_map_cache(void) { } +static inline uint8_t *xen_replace_cache_entry(hwaddr old_phys_addr, + hwaddr new_phys_addr, + hwaddr size) +{ + abort(); +} + #endif #endif /* XEN_MAPCACHE_H */ diff --git a/io/channel.c b/io/channel.c index cdf74540c1..1cfb8b33a2 100644 --- a/io/channel.c +++ b/io/channel.c @@ -279,15 +279,9 @@ static void qio_channel_set_aio_fd_handlers(QIOChannel *ioc) void qio_channel_attach_aio_context(QIOChannel *ioc, AioContext *ctx) { - AioContext *old_ctx; - if (ioc->ctx == ctx) { - return; - } - - old_ctx = ioc->ctx ? ioc->ctx : iohandler_get_aio_context(); - qio_channel_set_aio_fd_handler(ioc, old_ctx, NULL, NULL, NULL); + assert(!ioc->read_coroutine); + assert(!ioc->write_coroutine); ioc->ctx = ctx; - qio_channel_set_aio_fd_handlers(ioc); } void qio_channel_detach_aio_context(QIOChannel *ioc) diff --git a/linux-user/main.c b/linux-user/main.c index ad03c9e8b2..2b38d39d87 100644 --- a/linux-user/main.c +++ b/linux-user/main.c @@ -3037,16 +3037,13 @@ void cpu_loop(CPUAlphaState *env) abi_long sysret; while (1) { + bool arch_interrupt = true; + cpu_exec_start(cs); trapnr = cpu_exec(cs); cpu_exec_end(cs); process_queued_cpu_work(cs); - /* All of the traps imply a transition through PALcode, which - implies an REI instruction has been executed. Which means - that the intr_flag should be cleared. */ - env->intr_flag = 0; - switch (trapnr) { case EXCP_RESET: fprintf(stderr, "Reset requested. Exit\n"); @@ -3063,7 +3060,6 @@ void cpu_loop(CPUAlphaState *env) exit(EXIT_FAILURE); break; case EXCP_MMFAULT: - env->lock_addr = -1; info.si_signo = TARGET_SIGSEGV; info.si_errno = 0; info.si_code = (page_get_flags(env->trap_arg0) & PAGE_VALID @@ -3072,7 +3068,6 @@ void cpu_loop(CPUAlphaState *env) queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); break; case EXCP_UNALIGN: - env->lock_addr = -1; info.si_signo = TARGET_SIGBUS; info.si_errno = 0; info.si_code = TARGET_BUS_ADRALN; @@ -3081,7 +3076,6 @@ void cpu_loop(CPUAlphaState *env) break; case EXCP_OPCDEC: do_sigill: - env->lock_addr = -1; info.si_signo = TARGET_SIGILL; info.si_errno = 0; info.si_code = TARGET_ILL_ILLOPC; @@ -3089,7 +3083,6 @@ void cpu_loop(CPUAlphaState *env) queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); break; case EXCP_ARITH: - env->lock_addr = -1; info.si_signo = TARGET_SIGFPE; info.si_errno = 0; info.si_code = TARGET_FPE_FLTINV; @@ -3100,7 +3093,6 @@ void cpu_loop(CPUAlphaState *env) /* No-op. Linux simply re-enables the FPU. */ break; case EXCP_CALL_PAL: - env->lock_addr = -1; switch (env->error_code) { case 0x80: /* BPT */ @@ -3197,10 +3189,11 @@ void cpu_loop(CPUAlphaState *env) case EXCP_DEBUG: info.si_signo = gdb_handlesig(cs, TARGET_SIGTRAP); if (info.si_signo) { - env->lock_addr = -1; info.si_errno = 0; info.si_code = TARGET_TRAP_BRKPT; queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); + } else { + arch_interrupt = false; } break; case EXCP_INTERRUPT: @@ -3208,6 +3201,7 @@ void cpu_loop(CPUAlphaState *env) break; case EXCP_ATOMIC: cpu_exec_step_atomic(cs); + arch_interrupt = false; break; default: printf ("Unhandled trap: 0x%x\n", trapnr); @@ -3215,6 +3209,15 @@ void cpu_loop(CPUAlphaState *env) exit(EXIT_FAILURE); } process_pending_signals (env); + + /* Most of the traps imply a transition through PALcode, which + implies an REI instruction has been executed. Which means + that RX and LOCK_ADDR should be cleared. But there are a + few exceptions for traps internal to QEMU. */ + if (arch_interrupt) { + env->flags &= ~ENV_FLAG_RX_FLAG; + env->lock_addr = -1; + } } } #endif /* TARGET_ALPHA */ diff --git a/linux-user/signal.c b/linux-user/signal.c index 3d18d1b3ee..d68bd26013 100644 --- a/linux-user/signal.c +++ b/linux-user/signal.c @@ -3471,6 +3471,30 @@ static abi_ulong get_sigframe(struct target_sigaction *ka, return (sp - frame_size) & -8ul; } +/* Notice when we're in the middle of a gUSA region and reset. + Note that this will only occur for !parallel_cpus, as we will + translate such sequences differently in a parallel context. */ +static void unwind_gusa(CPUSH4State *regs) +{ + /* If the stack pointer is sufficiently negative, and we haven't + completed the sequence, then reset to the entry to the region. */ + /* ??? The SH4 kernel checks for and address above 0xC0000000. + However, the page mappings in qemu linux-user aren't as restricted + and we wind up with the normal stack mapped above 0xF0000000. + That said, there is no reason why the kernel should be allowing + a gUSA region that spans 1GB. Use a tighter check here, for what + can actually be enabled by the immediate move. */ + if (regs->gregs[15] >= -128u && regs->pc < regs->gregs[0]) { + /* Reset the PC to before the gUSA region, as computed from + R0 = region end, SP = -(region size), plus one more for the + insn that actually initializes SP to the region size. */ + regs->pc = regs->gregs[0] + regs->gregs[15] - 2; + + /* Reset the SP to the saved version in R1. */ + regs->gregs[15] = regs->gregs[1]; + } +} + static void setup_sigcontext(struct target_sigcontext *sc, CPUSH4State *regs, unsigned long mask) { @@ -3525,6 +3549,7 @@ static void restore_sigcontext(CPUSH4State *regs, struct target_sigcontext *sc) __get_user(regs->fpul, &sc->sc_fpul); regs->tra = -1; /* disable syscall checks */ + regs->flags &= ~(DELAY_SLOT_MASK | GUSA_MASK); } static void setup_frame(int sig, struct target_sigaction *ka, @@ -3534,6 +3559,8 @@ static void setup_frame(int sig, struct target_sigaction *ka, abi_ulong frame_addr; int i; + unwind_gusa(regs); + frame_addr = get_sigframe(ka, regs->gregs[15], sizeof(*frame)); trace_user_setup_frame(regs, frame_addr); if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { @@ -3566,6 +3593,7 @@ static void setup_frame(int sig, struct target_sigaction *ka, regs->gregs[5] = 0; regs->gregs[6] = frame_addr += offsetof(typeof(*frame), sc); regs->pc = (unsigned long) ka->_sa_handler; + regs->flags &= ~(DELAY_SLOT_MASK | GUSA_MASK); unlock_user_struct(frame, frame_addr, 1); return; @@ -3583,6 +3611,8 @@ static void setup_rt_frame(int sig, struct target_sigaction *ka, abi_ulong frame_addr; int i; + unwind_gusa(regs); + frame_addr = get_sigframe(ka, regs->gregs[15], sizeof(*frame)); trace_user_setup_rt_frame(regs, frame_addr); if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { @@ -3626,6 +3656,7 @@ static void setup_rt_frame(int sig, struct target_sigaction *ka, regs->gregs[5] = frame_addr + offsetof(typeof(*frame), info); regs->gregs[6] = frame_addr + offsetof(typeof(*frame), uc); regs->pc = (unsigned long) ka->_sa_handler; + regs->flags &= ~(DELAY_SLOT_MASK | GUSA_MASK); unlock_user_struct(frame, frame_addr, 1); return; diff --git a/migration/colo.c b/migration/colo.c index ef35f00c9a..a4255432ac 100644 --- a/migration/colo.c +++ b/migration/colo.c @@ -29,11 +29,6 @@ static bool vmstate_loading; #define COLO_BUFFER_BASE_SIZE (4 * 1024 * 1024) -bool colo_supported(void) -{ - return true; -} - bool migration_in_colo_state(void) { MigrationState *s = migrate_get_current(); diff --git a/migration/migration.c b/migration/migration.c index a0db40d364..76153914d1 100644 --- a/migration/migration.c +++ b/migration/migration.c @@ -102,14 +102,22 @@ enum mig_rp_message_type { static MigrationState *current_migration; +static bool migration_object_check(MigrationState *ms, Error **errp); + void migration_object_init(void) { MachineState *ms = MACHINE(qdev_get_machine()); + Error *err = NULL; /* This can only be called once. */ assert(!current_migration); current_migration = MIGRATION_OBJ(object_new(TYPE_MIGRATION)); + if (!migration_object_check(current_migration, &err)) { + error_report_err(err); + exit(1); + } + /* * We cannot really do this in migration_instance_init() since at * that time global properties are not yet applied, then this @@ -348,6 +356,7 @@ static void process_incoming_migration_co(void *opaque) migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE, MIGRATION_STATUS_FAILED); error_report("load of migration failed: %s", strerror(-ret)); + qemu_fclose(mis->from_src_file); exit(EXIT_FAILURE); } mis->bh = qemu_bh_new(process_incoming_migration_bh, mis); @@ -403,9 +412,6 @@ MigrationCapabilityStatusList *qmp_query_migrate_capabilities(Error **errp) continue; } #endif - if (i == MIGRATION_CAPABILITY_X_COLO && !colo_supported()) { - continue; - } if (head == NULL) { head = g_malloc0(sizeof(*caps)); caps = head; @@ -582,51 +588,49 @@ MigrationInfo *qmp_query_migrate(Error **errp) return info; } -void qmp_migrate_set_capabilities(MigrationCapabilityStatusList *params, - Error **errp) +/** + * @migration_caps_check - check capability validity + * + * @cap_list: old capability list, array of bool + * @params: new capabilities to be applied soon + * @errp: set *errp if the check failed, with reason + * + * Returns true if check passed, otherwise false. + */ +static bool migrate_caps_check(bool *cap_list, + MigrationCapabilityStatusList *params, + Error **errp) { - MigrationState *s = migrate_get_current(); MigrationCapabilityStatusList *cap; - bool old_postcopy_cap = migrate_postcopy_ram(); + bool old_postcopy_cap; - if (migration_is_setup_or_active(s->state)) { - error_setg(errp, QERR_MIGRATION_ACTIVE); - return; - } + old_postcopy_cap = cap_list[MIGRATION_CAPABILITY_POSTCOPY_RAM]; for (cap = params; cap; cap = cap->next) { + cap_list[cap->value->capability] = cap->value->state; + } + #ifndef CONFIG_LIVE_BLOCK_MIGRATION - if (cap->value->capability == MIGRATION_CAPABILITY_BLOCK - && cap->value->state) { - error_setg(errp, "QEMU compiled without old-style (blk/-b, inc/-i) " - "block migration"); - error_append_hint(errp, "Use drive_mirror+NBD instead.\n"); - continue; - } -#endif - if (cap->value->capability == MIGRATION_CAPABILITY_X_COLO) { - if (!colo_supported()) { - error_setg(errp, "COLO is not currently supported, please" - " configure with --enable-colo option in order to" - " support COLO feature"); - continue; - } - } - s->enabled_capabilities[cap->value->capability] = cap->value->state; + if (cap_list[MIGRATION_CAPABILITY_BLOCK]) { + error_setg(errp, "QEMU compiled without old-style (blk/-b, inc/-i) " + "block migration"); + error_append_hint(errp, "Use drive_mirror+NBD instead.\n"); + return false; } +#endif - if (migrate_postcopy_ram()) { - if (migrate_use_compression()) { + if (cap_list[MIGRATION_CAPABILITY_POSTCOPY_RAM]) { + if (cap_list[MIGRATION_CAPABILITY_COMPRESS]) { /* The decompression threads asynchronously write into RAM * rather than use the atomic copies needed to avoid * userfaulting. It should be possible to fix the decompression * threads for compatibility in future. */ - error_report("Postcopy is not currently compatible with " - "compression"); - s->enabled_capabilities[MIGRATION_CAPABILITY_POSTCOPY_RAM] = - false; + error_setg(errp, "Postcopy is not currently compatible " + "with compression"); + return false; } + /* This check is reasonably expensive, so only when it's being * set the first time, also it's only the destination that needs * special support. @@ -636,96 +640,141 @@ void qmp_migrate_set_capabilities(MigrationCapabilityStatusList *params, /* postcopy_ram_supported_by_host will have emitted a more * detailed message */ - error_report("Postcopy is not supported"); - s->enabled_capabilities[MIGRATION_CAPABILITY_POSTCOPY_RAM] = - false; + error_setg(errp, "Postcopy is not supported"); + return false; } } + + return true; } -void qmp_migrate_set_parameters(MigrationParameters *params, Error **errp) +void qmp_migrate_set_capabilities(MigrationCapabilityStatusList *params, + Error **errp) { MigrationState *s = migrate_get_current(); + MigrationCapabilityStatusList *cap; + if (migration_is_setup_or_active(s->state)) { + error_setg(errp, QERR_MIGRATION_ACTIVE); + return; + } + + if (!migrate_caps_check(s->enabled_capabilities, params, errp)) { + return; + } + + for (cap = params; cap; cap = cap->next) { + s->enabled_capabilities[cap->value->capability] = cap->value->state; + } +} + +/* + * Check whether the parameters are valid. Error will be put into errp + * (if provided). Return true if valid, otherwise false. + */ +static bool migrate_params_check(MigrationParameters *params, Error **errp) +{ if (params->has_compress_level && (params->compress_level < 0 || params->compress_level > 9)) { error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "compress_level", "is invalid, it should be in the range of 0 to 9"); - return; + return false; } + if (params->has_compress_threads && (params->compress_threads < 1 || params->compress_threads > 255)) { error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "compress_threads", "is invalid, it should be in the range of 1 to 255"); - return; + return false; } + if (params->has_decompress_threads && (params->decompress_threads < 1 || params->decompress_threads > 255)) { error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "decompress_threads", "is invalid, it should be in the range of 1 to 255"); - return; + return false; } + if (params->has_cpu_throttle_initial && (params->cpu_throttle_initial < 1 || params->cpu_throttle_initial > 99)) { error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cpu_throttle_initial", "an integer in the range of 1 to 99"); - return; + return false; } + if (params->has_cpu_throttle_increment && (params->cpu_throttle_increment < 1 || params->cpu_throttle_increment > 99)) { error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cpu_throttle_increment", "an integer in the range of 1 to 99"); - return; + return false; } + if (params->has_max_bandwidth && (params->max_bandwidth < 0 || params->max_bandwidth > SIZE_MAX)) { error_setg(errp, "Parameter 'max_bandwidth' expects an integer in the" " range of 0 to %zu bytes/second", SIZE_MAX); - return; + return false; } + if (params->has_downtime_limit && (params->downtime_limit < 0 || params->downtime_limit > MAX_MIGRATE_DOWNTIME)) { error_setg(errp, "Parameter 'downtime_limit' expects an integer in " "the range of 0 to %d milliseconds", MAX_MIGRATE_DOWNTIME); - return; + return false; } + if (params->has_x_checkpoint_delay && (params->x_checkpoint_delay < 0)) { error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "x_checkpoint_delay", "is invalid, it should be positive"); + return false; } + return true; +} + +static void migrate_params_apply(MigrationParameters *params) +{ + MigrationState *s = migrate_get_current(); + if (params->has_compress_level) { s->parameters.compress_level = params->compress_level; } + if (params->has_compress_threads) { s->parameters.compress_threads = params->compress_threads; } + if (params->has_decompress_threads) { s->parameters.decompress_threads = params->decompress_threads; } + if (params->has_cpu_throttle_initial) { s->parameters.cpu_throttle_initial = params->cpu_throttle_initial; } + if (params->has_cpu_throttle_increment) { s->parameters.cpu_throttle_increment = params->cpu_throttle_increment; } + if (params->has_tls_creds) { g_free(s->parameters.tls_creds); s->parameters.tls_creds = g_strdup(params->tls_creds); } + if (params->has_tls_hostname) { g_free(s->parameters.tls_hostname); s->parameters.tls_hostname = g_strdup(params->tls_hostname); } + if (params->has_max_bandwidth) { s->parameters.max_bandwidth = params->max_bandwidth; if (s->to_dst_file) { @@ -733,6 +782,7 @@ void qmp_migrate_set_parameters(MigrationParameters *params, Error **errp) s->parameters.max_bandwidth / XFER_LIMIT_RATIO); } } + if (params->has_downtime_limit) { s->parameters.downtime_limit = params->downtime_limit; } @@ -743,11 +793,22 @@ void qmp_migrate_set_parameters(MigrationParameters *params, Error **errp) colo_checkpoint_notify(s); } } + if (params->has_block_incremental) { s->parameters.block_incremental = params->block_incremental; } } +void qmp_migrate_set_parameters(MigrationParameters *params, Error **errp) +{ + if (!migrate_params_check(params, errp)) { + /* Invalid parameter */ + return; + } + + migrate_params_apply(params); +} + void qmp_migrate_start_postcopy(Error **errp) { @@ -781,14 +842,27 @@ void migrate_set_state(int *state, int old_state, int new_state) } } -void migrate_set_block_enabled(bool value, Error **errp) +static MigrationCapabilityStatusList *migrate_cap_add( + MigrationCapabilityStatusList *list, + MigrationCapability index, + bool state) { MigrationCapabilityStatusList *cap; cap = g_new0(MigrationCapabilityStatusList, 1); cap->value = g_new0(MigrationCapabilityStatus, 1); - cap->value->capability = MIGRATION_CAPABILITY_BLOCK; - cap->value->state = value; + cap->value->capability = index; + cap->value->state = state; + cap->next = list; + + return cap; +} + +void migrate_set_block_enabled(bool value, Error **errp) +{ + MigrationCapabilityStatusList *cap; + + cap = migrate_cap_add(NULL, MIGRATION_CAPABILITY_BLOCK, value); qmp_migrate_set_capabilities(cap, errp); qapi_free_MigrationCapabilityStatusList(cap); } @@ -2001,6 +2075,9 @@ void migration_global_dump(Monitor *mon) ms->send_configuration, ms->send_section_footer); } +#define DEFINE_PROP_MIG_CAP(name, x) \ + DEFINE_PROP_BOOL(name, MigrationState, enabled_capabilities[x], false) + static Property migration_properties[] = { DEFINE_PROP_BOOL("store-global-state", MigrationState, store_global_state, true), @@ -2009,6 +2086,45 @@ static Property migration_properties[] = { send_configuration, true), DEFINE_PROP_BOOL("send-section-footer", MigrationState, send_section_footer, true), + + /* Migration parameters */ + DEFINE_PROP_INT64("x-compress-level", MigrationState, + parameters.compress_level, + DEFAULT_MIGRATE_COMPRESS_LEVEL), + DEFINE_PROP_INT64("x-compress-threads", MigrationState, + parameters.compress_threads, + DEFAULT_MIGRATE_COMPRESS_THREAD_COUNT), + DEFINE_PROP_INT64("x-decompress-threads", MigrationState, + parameters.decompress_threads, + DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT), + DEFINE_PROP_INT64("x-cpu-throttle-initial", MigrationState, + parameters.cpu_throttle_initial, + DEFAULT_MIGRATE_CPU_THROTTLE_INITIAL), + DEFINE_PROP_INT64("x-cpu-throttle-increment", MigrationState, + parameters.cpu_throttle_increment, + DEFAULT_MIGRATE_CPU_THROTTLE_INCREMENT), + DEFINE_PROP_INT64("x-max-bandwidth", MigrationState, + parameters.max_bandwidth, MAX_THROTTLE), + DEFINE_PROP_INT64("x-downtime-limit", MigrationState, + parameters.downtime_limit, + DEFAULT_MIGRATE_SET_DOWNTIME), + DEFINE_PROP_INT64("x-checkpoint-delay", MigrationState, + parameters.x_checkpoint_delay, + DEFAULT_MIGRATE_X_CHECKPOINT_DELAY), + + /* Migration capabilities */ + DEFINE_PROP_MIG_CAP("x-xbzrle", MIGRATION_CAPABILITY_XBZRLE), + DEFINE_PROP_MIG_CAP("x-rdma-pin-all", MIGRATION_CAPABILITY_RDMA_PIN_ALL), + DEFINE_PROP_MIG_CAP("x-auto-converge", MIGRATION_CAPABILITY_AUTO_CONVERGE), + DEFINE_PROP_MIG_CAP("x-zero-blocks", MIGRATION_CAPABILITY_ZERO_BLOCKS), + DEFINE_PROP_MIG_CAP("x-compress", MIGRATION_CAPABILITY_COMPRESS), + DEFINE_PROP_MIG_CAP("x-events", MIGRATION_CAPABILITY_EVENTS), + DEFINE_PROP_MIG_CAP("x-postcopy-ram", MIGRATION_CAPABILITY_POSTCOPY_RAM), + DEFINE_PROP_MIG_CAP("x-colo", MIGRATION_CAPABILITY_X_COLO), + DEFINE_PROP_MIG_CAP("x-release-ram", MIGRATION_CAPABILITY_RELEASE_RAM), + DEFINE_PROP_MIG_CAP("x-block", MIGRATION_CAPABILITY_BLOCK), + DEFINE_PROP_MIG_CAP("x-return-path", MIGRATION_CAPABILITY_RETURN_PATH), + DEFINE_PROP_END_OF_LIST(), }; @@ -2023,22 +2139,54 @@ static void migration_class_init(ObjectClass *klass, void *data) static void migration_instance_init(Object *obj) { MigrationState *ms = MIGRATION_OBJ(obj); + MigrationParameters *params = &ms->parameters; ms->state = MIGRATION_STATUS_NONE; ms->xbzrle_cache_size = DEFAULT_MIGRATE_CACHE_SIZE; ms->mbps = -1; - ms->parameters = (MigrationParameters) { - .compress_level = DEFAULT_MIGRATE_COMPRESS_LEVEL, - .compress_threads = DEFAULT_MIGRATE_COMPRESS_THREAD_COUNT, - .decompress_threads = DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT, - .cpu_throttle_initial = DEFAULT_MIGRATE_CPU_THROTTLE_INITIAL, - .cpu_throttle_increment = DEFAULT_MIGRATE_CPU_THROTTLE_INCREMENT, - .max_bandwidth = MAX_THROTTLE, - .downtime_limit = DEFAULT_MIGRATE_SET_DOWNTIME, - .x_checkpoint_delay = DEFAULT_MIGRATE_X_CHECKPOINT_DELAY, - }; - ms->parameters.tls_creds = g_strdup(""); - ms->parameters.tls_hostname = g_strdup(""); + + params->tls_hostname = g_strdup(""); + params->tls_creds = g_strdup(""); + + /* Set has_* up only for parameter checks */ + params->has_compress_level = true; + params->has_compress_threads = true; + params->has_decompress_threads = true; + params->has_cpu_throttle_initial = true; + params->has_cpu_throttle_increment = true; + params->has_max_bandwidth = true; + params->has_downtime_limit = true; + params->has_x_checkpoint_delay = true; + params->has_block_incremental = true; +} + +/* + * Return true if check pass, false otherwise. Error will be put + * inside errp if provided. + */ +static bool migration_object_check(MigrationState *ms, Error **errp) +{ + MigrationCapabilityStatusList *head = NULL; + /* Assuming all off */ + bool cap_list[MIGRATION_CAPABILITY__MAX] = { 0 }, ret; + int i; + + if (!migrate_params_check(&ms->parameters, errp)) { + return false; + } + + for (i = 0; i < MIGRATION_CAPABILITY__MAX; i++) { + if (ms->enabled_capabilities[i]) { + head = migrate_cap_add(head, i, true); + } + } + + ret = migrate_caps_check(cap_list, head, errp); + + /* It works with head == NULL */ + qapi_free_MigrationCapabilityStatusList(head); + + return ret; } static const TypeInfo migration_type = { diff --git a/migration/rdma.c b/migration/rdma.c index c6bc607a03..ca56594328 100644 --- a/migration/rdma.c +++ b/migration/rdma.c @@ -165,20 +165,6 @@ enum { RDMA_CONTROL_UNREGISTER_FINISHED, /* unpinning finished */ }; -static const char *control_desc[] = { - [RDMA_CONTROL_NONE] = "NONE", - [RDMA_CONTROL_ERROR] = "ERROR", - [RDMA_CONTROL_READY] = "READY", - [RDMA_CONTROL_QEMU_FILE] = "QEMU FILE", - [RDMA_CONTROL_RAM_BLOCKS_REQUEST] = "RAM BLOCKS REQUEST", - [RDMA_CONTROL_RAM_BLOCKS_RESULT] = "RAM BLOCKS RESULT", - [RDMA_CONTROL_COMPRESS] = "COMPRESS", - [RDMA_CONTROL_REGISTER_REQUEST] = "REGISTER REQUEST", - [RDMA_CONTROL_REGISTER_RESULT] = "REGISTER RESULT", - [RDMA_CONTROL_REGISTER_FINISHED] = "REGISTER FINISHED", - [RDMA_CONTROL_UNREGISTER_REQUEST] = "UNREGISTER REQUEST", - [RDMA_CONTROL_UNREGISTER_FINISHED] = "UNREGISTER FINISHED", -}; /* * Memory and MR structures used to represent an IB Send/Recv work request. @@ -251,6 +237,30 @@ typedef struct QEMU_PACKED RDMADestBlock { uint32_t padding; } RDMADestBlock; +static const char *control_desc(unsigned int rdma_control) +{ + static const char *strs[] = { + [RDMA_CONTROL_NONE] = "NONE", + [RDMA_CONTROL_ERROR] = "ERROR", + [RDMA_CONTROL_READY] = "READY", + [RDMA_CONTROL_QEMU_FILE] = "QEMU FILE", + [RDMA_CONTROL_RAM_BLOCKS_REQUEST] = "RAM BLOCKS REQUEST", + [RDMA_CONTROL_RAM_BLOCKS_RESULT] = "RAM BLOCKS RESULT", + [RDMA_CONTROL_COMPRESS] = "COMPRESS", + [RDMA_CONTROL_REGISTER_REQUEST] = "REGISTER REQUEST", + [RDMA_CONTROL_REGISTER_RESULT] = "REGISTER RESULT", + [RDMA_CONTROL_REGISTER_FINISHED] = "REGISTER FINISHED", + [RDMA_CONTROL_UNREGISTER_REQUEST] = "UNREGISTER REQUEST", + [RDMA_CONTROL_UNREGISTER_FINISHED] = "UNREGISTER FINISHED", + }; + + if (rdma_control > RDMA_CONTROL_UNREGISTER_FINISHED) { + return "??BAD CONTROL VALUE??"; + } + + return strs[rdma_control]; +} + static uint64_t htonll(uint64_t v) { union { uint32_t lv[2]; uint64_t llv; } u; @@ -1466,6 +1476,56 @@ static uint64_t qemu_rdma_poll(RDMAContext *rdma, uint64_t *wr_id_out, return 0; } +/* Wait for activity on the completion channel. + * Returns 0 on success, none-0 on error. + */ +static int qemu_rdma_wait_comp_channel(RDMAContext *rdma) +{ + /* + * Coroutine doesn't start until migration_fd_process_incoming() + * so don't yield unless we know we're running inside of a coroutine. + */ + if (rdma->migration_started_on_destination) { + yield_until_fd_readable(rdma->comp_channel->fd); + } else { + /* This is the source side, we're in a separate thread + * or destination prior to migration_fd_process_incoming() + * we can't yield; so we have to poll the fd. + * But we need to be able to handle 'cancel' or an error + * without hanging forever. + */ + while (!rdma->error_state && !rdma->received_error) { + GPollFD pfds[1]; + pfds[0].fd = rdma->comp_channel->fd; + pfds[0].events = G_IO_IN | G_IO_HUP | G_IO_ERR; + /* 0.1s timeout, should be fine for a 'cancel' */ + switch (qemu_poll_ns(pfds, 1, 100 * 1000 * 1000)) { + case 1: /* fd active */ + return 0; + + case 0: /* Timeout, go around again */ + break; + + default: /* Error of some type - + * I don't trust errno from qemu_poll_ns + */ + error_report("%s: poll failed", __func__); + return -EPIPE; + } + + if (migrate_get_current()->state == MIGRATION_STATUS_CANCELLING) { + /* Bail out and let the cancellation happen */ + return -EPIPE; + } + } + } + + if (rdma->received_error) { + return -EPIPE; + } + return rdma->error_state; +} + /* * Block until the next work request has completed. * @@ -1513,22 +1573,21 @@ static int qemu_rdma_block_for_wrid(RDMAContext *rdma, int wrid_requested, } while (1) { - /* - * Coroutine doesn't start until migration_fd_process_incoming() - * so don't yield unless we know we're running inside of a coroutine. - */ - if (rdma->migration_started_on_destination) { - yield_until_fd_readable(rdma->comp_channel->fd); + ret = qemu_rdma_wait_comp_channel(rdma); + if (ret) { + goto err_block_for_wrid; } - if (ibv_get_cq_event(rdma->comp_channel, &cq, &cq_ctx)) { + ret = ibv_get_cq_event(rdma->comp_channel, &cq, &cq_ctx); + if (ret) { perror("ibv_get_cq_event"); goto err_block_for_wrid; } num_cq_events++; - if (ibv_req_notify_cq(cq, 0)) { + ret = -ibv_req_notify_cq(cq, 0); + if (ret) { goto err_block_for_wrid; } @@ -1564,6 +1623,8 @@ err_block_for_wrid: if (num_cq_events) { ibv_ack_cq_events(cq, num_cq_events); } + + rdma->error_state = ret; return ret; } @@ -1590,7 +1651,7 @@ static int qemu_rdma_post_send_control(RDMAContext *rdma, uint8_t *buf, .num_sge = 1, }; - trace_qemu_rdma_post_send_control(control_desc[head->type]); + trace_qemu_rdma_post_send_control(control_desc(head->type)); /* * We don't actually need to do a memcpy() in here if we used @@ -1669,16 +1730,16 @@ static int qemu_rdma_exchange_get_response(RDMAContext *rdma, network_to_control((void *) rdma->wr_data[idx].control); memcpy(head, rdma->wr_data[idx].control, sizeof(RDMAControlHeader)); - trace_qemu_rdma_exchange_get_response_start(control_desc[expecting]); + trace_qemu_rdma_exchange_get_response_start(control_desc(expecting)); if (expecting == RDMA_CONTROL_NONE) { - trace_qemu_rdma_exchange_get_response_none(control_desc[head->type], + trace_qemu_rdma_exchange_get_response_none(control_desc(head->type), head->type); } else if (head->type != expecting || head->type == RDMA_CONTROL_ERROR) { error_report("Was expecting a %s (%d) control message" ", but got: %s (%d), length: %d", - control_desc[expecting], expecting, - control_desc[head->type], head->type, head->len); + control_desc(expecting), expecting, + control_desc(head->type), head->type, head->len); if (head->type == RDMA_CONTROL_ERROR) { rdma->received_error = true; } @@ -1788,7 +1849,7 @@ static int qemu_rdma_exchange_send(RDMAContext *rdma, RDMAControlHeader *head, } } - trace_qemu_rdma_exchange_send_waiting(control_desc[resp->type]); + trace_qemu_rdma_exchange_send_waiting(control_desc(resp->type)); ret = qemu_rdma_exchange_get_response(rdma, resp, resp->type, RDMA_WRID_DATA); @@ -1800,7 +1861,7 @@ static int qemu_rdma_exchange_send(RDMAContext *rdma, RDMAControlHeader *head, if (resp_idx) { *resp_idx = RDMA_WRID_DATA; } - trace_qemu_rdma_exchange_send_received(control_desc[resp->type]); + trace_qemu_rdma_exchange_send_received(control_desc(resp->type)); } rdma->control_ready_expected = 1; @@ -2208,7 +2269,9 @@ static void qemu_rdma_cleanup(RDMAContext *rdma) int ret, idx; if (rdma->cm_id && rdma->connected) { - if (rdma->error_state && !rdma->received_error) { + if ((rdma->error_state || + migrate_get_current()->state == MIGRATION_STATUS_CANCELLING) && + !rdma->received_error) { RDMAControlHeader head = { .len = 0, .type = RDMA_CONTROL_ERROR, .repeat = 1, @@ -2365,6 +2428,12 @@ static int qemu_rdma_connect(RDMAContext *rdma, Error **errp) caps_to_network(&cap); + ret = qemu_rdma_post_recv_control(rdma, RDMA_WRID_READY); + if (ret) { + ERROR(errp, "posting second control recv"); + goto err_rdma_source_connect; + } + ret = rdma_connect(rdma->cm_id, &conn_param); if (ret) { perror("rdma_connect"); @@ -2405,12 +2474,6 @@ static int qemu_rdma_connect(RDMAContext *rdma, Error **errp) rdma_ack_cm_event(cm_event); - ret = qemu_rdma_post_recv_control(rdma, RDMA_WRID_READY); - if (ret) { - ERROR(errp, "posting second control recv!"); - goto err_rdma_source_connect; - } - rdma->control_ready_expected = 1; rdma->nb_sent = 0; return 0; @@ -3350,7 +3413,7 @@ static int qemu_rdma_registration_handle(QEMUFile *f, void *opaque) ret = -EIO; goto out; default: - error_report("Unknown control message %s", control_desc[head.type]); + error_report("Unknown control message %s", control_desc(head.type)); ret = -EIO; goto out; } diff --git a/qapi/block-core.json b/qapi/block-core.json index c437aa50ef..ff8e2ba0cb 100644 --- a/qapi/block-core.json +++ b/qapi/block-core.json @@ -457,6 +457,9 @@ # # @device: The device name associated with the virtual device. # +# @qdev: The qdev ID, or if no ID is assigned, the QOM path of the block +# device. (since 2.10) +# # @type: This field is returned only for compatibility reasons, it should # not be used (always returns 'unknown') # @@ -482,7 +485,7 @@ # Since: 0.14.0 ## { 'struct': 'BlockInfo', - 'data': {'device': 'str', 'type': 'str', 'removable': 'bool', + 'data': {'device': 'str', '*qdev': 'str', 'type': 'str', 'removable': 'bool', 'locked': 'bool', '*inserted': 'BlockDeviceInfo', '*tray_open': 'bool', '*io-status': 'BlockDeviceIoStatus', '*dirty-bitmaps': ['BlockDirtyInfo'] } } @@ -577,6 +580,7 @@ # } # } # }, +# "qdev": "ide_disk", # "type":"unknown" # }, # { @@ -584,12 +588,15 @@ # "device":"ide1-cd0", # "locked":false, # "removable":true, +# "qdev": "/machine/unattached/device[23]", +# "tray_open": false, # "type":"unknown" # }, # { # "device":"floppy0", # "locked":false, # "removable":true, +# "qdev": "/machine/unattached/device[20]", # "type":"unknown" # }, # { diff --git a/qemu-img-cmds.hx b/qemu-img-cmds.hx index ac5946bc4f..3763f13625 100644 --- a/qemu-img-cmds.hx +++ b/qemu-img-cmds.hx @@ -22,9 +22,9 @@ STEXI ETEXI DEF("create", img_create, - "create [-q] [--object objectdef] [-f fmt] [-b backing_file] [-F backing_fmt] [-o options] filename [size]") + "create [-q] [--object objectdef] [-f fmt] [-b backing_file] [-F backing_fmt] [-u] [-o options] filename [size]") STEXI -@item create [--object @var{objectdef}] [-q] [-f @var{fmt}] [-b @var{backing_file}] [-F @var{backing_fmt}] [-o @var{options}] @var{filename} [@var{size}] +@item create [--object @var{objectdef}] [-q] [-f @var{fmt}] [-b @var{backing_file}] [-F @var{backing_fmt}] [-u] [-o @var{options}] @var{filename} [@var{size}] ETEXI DEF("commit", img_commit, diff --git a/qemu-img.c b/qemu-img.c index 182e697f81..eb32b93e90 100644 --- a/qemu-img.c +++ b/qemu-img.c @@ -150,9 +150,11 @@ static void QEMU_NORETURN help(void) " 'snapshot_id_or_name' is deprecated, use 'snapshot_param'\n" " instead\n" " '-c' indicates that target image must be compressed (qcow format only)\n" - " '-u' enables unsafe rebasing. It is assumed that old and new backing file\n" - " match exactly. The image doesn't need a working backing file before\n" - " rebasing in this case (useful for renaming the backing file)\n" + " '-u' allows unsafe backing chains. For rebasing, it is assumed that old and\n" + " new backing file match exactly. The image doesn't need a working\n" + " backing file before rebasing in this case (useful for renaming the\n" + " backing file). For image creation, allow creating without attempting\n" + " to open the backing file.\n" " '-h' with or without a command shows this help and lists the supported formats\n" " '-p' show progress of command (only certain commands)\n" " '-q' use Quiet mode - do not print any output (except errors)\n" @@ -429,6 +431,7 @@ static int img_create(int argc, char **argv) char *options = NULL; Error *local_err = NULL; bool quiet = false; + int flags = 0; for(;;) { static const struct option long_options[] = { @@ -436,7 +439,7 @@ static int img_create(int argc, char **argv) {"object", required_argument, 0, OPTION_OBJECT}, {0, 0, 0, 0} }; - c = getopt_long(argc, argv, ":F:b:f:ho:q", + c = getopt_long(argc, argv, ":F:b:f:ho:qu", long_options, NULL); if (c == -1) { break; @@ -476,6 +479,9 @@ static int img_create(int argc, char **argv) case 'q': quiet = true; break; + case 'u': + flags |= BDRV_O_NO_BACKING; + break; case OPTION_OBJECT: { QemuOpts *opts; opts = qemu_opts_parse_noisily(&qemu_object_opts, @@ -528,7 +534,7 @@ static int img_create(int argc, char **argv) } bdrv_img_create(filename, fmt, base_filename, base_fmt, - options, img_size, 0, quiet, &local_err); + options, img_size, flags, quiet, &local_err); if (local_err) { error_reportf_err(local_err, "%s: ", filename); goto fail; diff --git a/qemu-img.texi b/qemu-img.texi index f11f6036ad..72dabd6b3e 100644 --- a/qemu-img.texi +++ b/qemu-img.texi @@ -233,7 +233,7 @@ If @code{-r} is specified, exit codes representing the image state refer to the state after (the attempt at) repairing it. That is, a successful @code{-r all} will yield the exit code 0, independently of the image state before. -@item create [-f @var{fmt}] [-b @var{backing_file}] [-F @var{backing_fmt}] [-o @var{options}] @var{filename} [@var{size}] +@item create [-f @var{fmt}] [-b @var{backing_file}] [-F @var{backing_fmt}] [-u] [-o @var{options}] @var{filename} [@var{size}] Create the new disk image @var{filename} of size @var{size} and format @var{fmt}. Depending on the file format, you can add one or more @var{options} @@ -244,6 +244,13 @@ only the differences from @var{backing_file}. No size needs to be specified in this case. @var{backing_file} will never be modified unless you use the @code{commit} monitor command (or qemu-img commit). +Note that a given backing file will be opened to check that it is valid. Use +the @code{-u} option to enable unsafe backing file mode, which means that the +image will be created even if the associated backing file cannot be opened. A +matching backing file must be created or additional options be used to make the +backing file specification valid when you want to use an image created this +way. + The size can also be specified using the @var{size} option with @code{-o}, it doesn't need to be specified separately in this case. diff --git a/target/alpha/cpu.c b/target/alpha/cpu.c index 8186c9d379..76150f48d3 100644 --- a/target/alpha/cpu.c +++ b/target/alpha/cpu.c @@ -276,14 +276,15 @@ static void alpha_cpu_initfn(Object *obj) alpha_translate_init(); + env->lock_addr = -1; #if defined(CONFIG_USER_ONLY) - env->ps = PS_USER_MODE; + env->flags = ENV_FLAG_PS_USER | ENV_FLAG_FEN; cpu_alpha_store_fpcr(env, (FPCR_INVD | FPCR_DZED | FPCR_OVFD | FPCR_UNFD | FPCR_INED | FPCR_DNOD | FPCR_DYN_NORMAL)); +#else + env->flags = ENV_FLAG_PAL_MODE | ENV_FLAG_FEN; #endif - env->lock_addr = -1; - env->fen = 1; } static void alpha_cpu_class_init(ObjectClass *oc, void *data) diff --git a/target/alpha/cpu.h b/target/alpha/cpu.h index 691ac00c0b..e95be2b34b 100644 --- a/target/alpha/cpu.h +++ b/target/alpha/cpu.h @@ -242,13 +242,11 @@ struct CPUAlphaState { uint8_t fpcr_dyn_round; uint8_t fpcr_flush_to_zero; - /* The Internal Processor Registers. Some of these we assume always - exist for use in user-mode. */ - uint8_t ps; - uint8_t intr_flag; - uint8_t pal_mode; - uint8_t fen; + /* Mask of PALmode, Processor State et al. Most of this gets copied + into the TranslatorBlock flags and controls code generation. */ + uint32_t flags; + /* The high 32-bits of the processor cycle counter. */ uint32_t pcc_ofs; /* These pass data from the exception logic in the translator and @@ -398,24 +396,37 @@ enum { }; /* Processor status constants. */ -enum { - /* Low 3 bits are interrupt mask level. */ - PS_INT_MASK = 7, +/* Low 3 bits are interrupt mask level. */ +#define PS_INT_MASK 7u - /* Bits 4 and 5 are the mmu mode. The VMS PALcode uses all 4 modes; - The Unix PALcode only uses bit 4. */ - PS_USER_MODE = 8 -}; +/* Bits 4 and 5 are the mmu mode. The VMS PALcode uses all 4 modes; + The Unix PALcode only uses bit 4. */ +#define PS_USER_MODE 8u + +/* CPUAlphaState->flags constants. These are layed out so that we + can set or reset the pieces individually by assigning to the byte, + or manipulated as a whole. */ + +#define ENV_FLAG_PAL_SHIFT 0 +#define ENV_FLAG_PS_SHIFT 8 +#define ENV_FLAG_RX_SHIFT 16 +#define ENV_FLAG_FEN_SHIFT 24 + +#define ENV_FLAG_PAL_MODE (1u << ENV_FLAG_PAL_SHIFT) +#define ENV_FLAG_PS_USER (PS_USER_MODE << ENV_FLAG_PS_SHIFT) +#define ENV_FLAG_RX_FLAG (1u << ENV_FLAG_RX_SHIFT) +#define ENV_FLAG_FEN (1u << ENV_FLAG_FEN_SHIFT) + +#define ENV_FLAG_TB_MASK \ + (ENV_FLAG_PAL_MODE | ENV_FLAG_PS_USER | ENV_FLAG_FEN) static inline int cpu_mmu_index(CPUAlphaState *env, bool ifetch) { - if (env->pal_mode) { - return MMU_KERNEL_IDX; - } else if (env->ps & PS_USER_MODE) { - return MMU_USER_IDX; - } else { - return MMU_KERNEL_IDX; + int ret = env->flags & ENV_FLAG_PS_USER ? MMU_USER_IDX : MMU_KERNEL_IDX; + if (env->flags & ENV_FLAG_PAL_MODE) { + ret = MMU_KERNEL_IDX; } + return ret; } enum { @@ -482,40 +493,12 @@ QEMU_NORETURN void alpha_cpu_unassigned_access(CPUState *cpu, hwaddr addr, int unused, unsigned size); #endif -/* Bits in TB->FLAGS that control how translation is processed. */ -enum { - TB_FLAGS_PAL_MODE = 1, - TB_FLAGS_FEN = 2, - TB_FLAGS_USER_MODE = 8, - - TB_FLAGS_AMASK_SHIFT = 4, - TB_FLAGS_AMASK_BWX = AMASK_BWX << TB_FLAGS_AMASK_SHIFT, - TB_FLAGS_AMASK_FIX = AMASK_FIX << TB_FLAGS_AMASK_SHIFT, - TB_FLAGS_AMASK_CIX = AMASK_CIX << TB_FLAGS_AMASK_SHIFT, - TB_FLAGS_AMASK_MVI = AMASK_MVI << TB_FLAGS_AMASK_SHIFT, - TB_FLAGS_AMASK_TRAP = AMASK_TRAP << TB_FLAGS_AMASK_SHIFT, - TB_FLAGS_AMASK_PREFETCH = AMASK_PREFETCH << TB_FLAGS_AMASK_SHIFT, -}; - static inline void cpu_get_tb_cpu_state(CPUAlphaState *env, target_ulong *pc, target_ulong *cs_base, uint32_t *pflags) { - int flags = 0; - *pc = env->pc; *cs_base = 0; - - if (env->pal_mode) { - flags = TB_FLAGS_PAL_MODE; - } else { - flags = env->ps & PS_USER_MODE; - } - if (env->fen) { - flags |= TB_FLAGS_FEN; - } - flags |= env->amask << TB_FLAGS_AMASK_SHIFT; - - *pflags = flags; + *pflags = env->flags & ENV_FLAG_TB_MASK; } #endif /* ALPHA_CPU_H */ diff --git a/target/alpha/helper.c b/target/alpha/helper.c index a5c308859b..34121f4cad 100644 --- a/target/alpha/helper.c +++ b/target/alpha/helper.c @@ -81,7 +81,7 @@ void helper_store_fpcr(CPUAlphaState *env, uint64_t val) static uint64_t *cpu_alpha_addr_gr(CPUAlphaState *env, unsigned reg) { #ifndef CONFIG_USER_ONLY - if (env->pal_mode) { + if (env->flags & ENV_FLAG_PAL_MODE) { if (reg >= 8 && reg <= 14) { return &env->shadow[reg - 8]; } else if (reg == 25) { @@ -364,13 +364,13 @@ void alpha_cpu_do_interrupt(CPUState *cs) /* Remember where the exception happened. Emulate real hardware in that the low bit of the PC indicates PALmode. */ - env->exc_addr = env->pc | env->pal_mode; + env->exc_addr = env->pc | (env->flags & ENV_FLAG_PAL_MODE); /* Continue execution at the PALcode entry point. */ env->pc = env->palbr + i; /* Switch to PALmode. */ - env->pal_mode = 1; + env->flags |= ENV_FLAG_PAL_MODE; #endif /* !USER_ONLY */ } @@ -381,14 +381,14 @@ bool alpha_cpu_exec_interrupt(CPUState *cs, int interrupt_request) int idx = -1; /* We never take interrupts while in PALmode. */ - if (env->pal_mode) { + if (env->flags & ENV_FLAG_PAL_MODE) { return false; } /* Fall through the switch, collecting the highest priority interrupt that isn't masked by the processor status IPL. */ /* ??? This hard-codes the OSF/1 interrupt levels. */ - switch (env->ps & PS_INT_MASK) { + switch ((env->flags >> ENV_FLAG_PS_SHIFT) & PS_INT_MASK) { case 0 ... 3: if (interrupt_request & CPU_INTERRUPT_HARD) { idx = EXCP_DEV_INTERRUPT; @@ -432,7 +432,7 @@ void alpha_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf, int i; cpu_fprintf(f, " PC " TARGET_FMT_lx " PS %02x\n", - env->pc, env->ps); + env->pc, extract32(env->flags, ENV_FLAG_PS_SHIFT, 8)); for (i = 0; i < 31; i++) { cpu_fprintf(f, "IR%02d %s " TARGET_FMT_lx " ", i, linux_reg_names[i], cpu_alpha_load_gr(env, i)); diff --git a/target/alpha/machine.c b/target/alpha/machine.c index a102645315..0914ba5fc1 100644 --- a/target/alpha/machine.c +++ b/target/alpha/machine.c @@ -48,11 +48,7 @@ static VMStateField vmstate_env_fields[] = { VMSTATE_UINTTL(lock_addr, CPUAlphaState), VMSTATE_UINTTL(lock_value, CPUAlphaState), - VMSTATE_UINT8(ps, CPUAlphaState), - VMSTATE_UINT8(intr_flag, CPUAlphaState), - VMSTATE_UINT8(pal_mode, CPUAlphaState), - VMSTATE_UINT8(fen, CPUAlphaState), - + VMSTATE_UINT32(flags, CPUAlphaState), VMSTATE_UINT32(pcc_ofs, CPUAlphaState), VMSTATE_UINTTL(trap_arg0, CPUAlphaState), @@ -74,8 +70,8 @@ static VMStateField vmstate_env_fields[] = { static const VMStateDescription vmstate_env = { .name = "env", - .version_id = 2, - .minimum_version_id = 2, + .version_id = 3, + .minimum_version_id = 3, .fields = vmstate_env_fields, }; diff --git a/target/alpha/translate.c b/target/alpha/translate.c index 232af9e177..90e6d5285f 100644 --- a/target/alpha/translate.c +++ b/target/alpha/translate.c @@ -49,16 +49,18 @@ struct DisasContext { #ifndef CONFIG_USER_ONLY uint64_t palbr; #endif + uint32_t tbflags; int mem_idx; + /* implver and amask values for this CPU. */ + int implver; + int amask; + /* Current rounding mode for this TB. */ int tb_rm; /* Current flush-to-zero setting for this TB. */ int tb_ftz; - /* implver value for this CPU. */ - int implver; - /* The set of registers active in the current context. */ TCGv *ir; @@ -267,6 +269,27 @@ static TCGv dest_fpr(DisasContext *ctx, unsigned reg) } } +static int get_flag_ofs(unsigned shift) +{ + int ofs = offsetof(CPUAlphaState, flags); +#ifdef HOST_WORDS_BIGENDIAN + ofs += 3 - (shift / 8); +#else + ofs += shift / 8; +#endif + return ofs; +} + +static void ld_flag_byte(TCGv val, unsigned shift) +{ + tcg_gen_ld8u_i64(val, cpu_env, get_flag_ofs(shift)); +} + +static void st_flag_byte(TCGv val, unsigned shift) +{ + tcg_gen_st8_i64(val, cpu_env, get_flag_ofs(shift)); +} + static void gen_excp_1(int exception, int error_code) { TCGv_i32 tmp1, tmp2; @@ -451,7 +474,7 @@ static ExitStatus gen_store_conditional(DisasContext *ctx, int ra, int rb, static bool in_superpage(DisasContext *ctx, int64_t addr) { #ifndef CONFIG_USER_ONLY - return ((ctx->tb->flags & TB_FLAGS_USER_MODE) == 0 + return ((ctx->tbflags & ENV_FLAG_PS_USER) == 0 && addr >> TARGET_VIRT_ADDR_SPACE_BITS == -1 && ((addr >> 41) & 3) == 2); #else @@ -542,16 +565,16 @@ static ExitStatus gen_bcond_internal(DisasContext *ctx, TCGCond cond, static ExitStatus gen_bcond(DisasContext *ctx, TCGCond cond, int ra, int32_t disp, int mask) { - TCGv cmp_tmp; - if (mask) { - cmp_tmp = tcg_temp_new(); - tcg_gen_andi_i64(cmp_tmp, load_gpr(ctx, ra), 1); - } else { - cmp_tmp = load_gpr(ctx, ra); - } + TCGv tmp = tcg_temp_new(); + ExitStatus ret; - return gen_bcond_internal(ctx, cond, cmp_tmp, disp); + tcg_gen_andi_i64(tmp, load_gpr(ctx, ra), 1); + ret = gen_bcond_internal(ctx, cond, tmp, disp); + tcg_temp_free(tmp); + return ret; + } + return gen_bcond_internal(ctx, cond, load_gpr(ctx, ra), disp); } /* Fold -0.0 for comparison with COND. */ @@ -590,8 +613,12 @@ static ExitStatus gen_fbcond(DisasContext *ctx, TCGCond cond, int ra, int32_t disp) { TCGv cmp_tmp = tcg_temp_new(); + ExitStatus ret; + gen_fold_mzero(cond, cmp_tmp, load_fpr(ctx, ra)); - return gen_bcond_internal(ctx, cond, cmp_tmp, disp); + ret = gen_bcond_internal(ctx, cond, cmp_tmp, disp); + tcg_temp_free(cmp_tmp); + return ret; } static void gen_fcmov(DisasContext *ctx, TCGCond cond, int ra, int rb, int rc) @@ -1123,16 +1150,15 @@ static void gen_msk_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit, static void gen_rx(DisasContext *ctx, int ra, int set) { - TCGv_i32 tmp; + TCGv tmp; if (ra != 31) { - tcg_gen_ld8u_i64(ctx->ir[ra], cpu_env, - offsetof(CPUAlphaState, intr_flag)); + ld_flag_byte(ctx->ir[ra], ENV_FLAG_RX_SHIFT); } - tmp = tcg_const_i32(set); - tcg_gen_st8_i32(tmp, cpu_env, offsetof(CPUAlphaState, intr_flag)); - tcg_temp_free_i32(tmp); + tmp = tcg_const_i64(set); + st_flag_byte(ctx->ir[ra], ENV_FLAG_RX_SHIFT); + tcg_temp_free(tmp); } static ExitStatus gen_call_pal(DisasContext *ctx, int palcode) @@ -1166,8 +1192,7 @@ static ExitStatus gen_call_pal(DisasContext *ctx, int palcode) #ifndef CONFIG_USER_ONLY /* Privileged PAL code */ - if (palcode < 0x40 && (ctx->tb->flags & TB_FLAGS_USER_MODE) == 0) { - TCGv tmp; + if (palcode < 0x40 && (ctx->tbflags & ENV_FLAG_PS_USER) == 0) { switch (palcode) { case 0x01: /* CFLUSH */ @@ -1197,14 +1222,15 @@ static ExitStatus gen_call_pal(DisasContext *ctx, int palcode) /* SWPIPL */ /* Note that we already know we're in kernel mode, so we know that PS only contains the 3 IPL bits. */ - tcg_gen_ld8u_i64(ctx->ir[IR_V0], cpu_env, - offsetof(CPUAlphaState, ps)); + ld_flag_byte(ctx->ir[IR_V0], ENV_FLAG_PS_SHIFT); /* But make sure and store only the 3 IPL bits from the user. */ - tmp = tcg_temp_new(); - tcg_gen_andi_i64(tmp, ctx->ir[IR_A0], PS_INT_MASK); - tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUAlphaState, ps)); - tcg_temp_free(tmp); + { + TCGv tmp = tcg_temp_new(); + tcg_gen_andi_i64(tmp, ctx->ir[IR_A0], PS_INT_MASK); + st_flag_byte(tmp, ENV_FLAG_PS_SHIFT); + tcg_temp_free(tmp); + } /* Allow interrupts to be recognized right away. */ tcg_gen_movi_i64(cpu_pc, ctx->pc); @@ -1212,9 +1238,9 @@ static ExitStatus gen_call_pal(DisasContext *ctx, int palcode) case 0x36: /* RDPS */ - tcg_gen_ld8u_i64(ctx->ir[IR_V0], cpu_env, - offsetof(CPUAlphaState, ps)); + ld_flag_byte(ctx->ir[IR_V0], ENV_FLAG_PS_SHIFT); break; + case 0x38: /* WRUSP */ tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env, @@ -1233,9 +1259,12 @@ static ExitStatus gen_call_pal(DisasContext *ctx, int palcode) case 0x3E: /* WTINT */ - tmp = tcg_const_i64(1); - tcg_gen_st32_i64(tmp, cpu_env, -offsetof(AlphaCPU, env) + - offsetof(CPUState, halted)); + { + TCGv_i32 tmp = tcg_const_i32(1); + tcg_gen_st_i32(tmp, cpu_env, -offsetof(AlphaCPU, env) + + offsetof(CPUState, halted)); + tcg_temp_free_i32(tmp); + } tcg_gen_movi_i64(ctx->ir[IR_V0], 0); return gen_excp(ctx, EXCP_HALTED, 0); @@ -1257,11 +1286,11 @@ static ExitStatus gen_call_pal(DisasContext *ctx, int palcode) uint64_t exc_addr = ctx->pc; uint64_t entry = ctx->palbr; - if (ctx->tb->flags & TB_FLAGS_PAL_MODE) { + if (ctx->tbflags & ENV_FLAG_PAL_MODE) { exc_addr |= 1; } else { tcg_gen_movi_i64(tmp, 1); - tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUAlphaState, pal_mode)); + st_flag_byte(tmp, ENV_FLAG_PAL_SHIFT); } tcg_gen_movi_i64(tmp, exc_addr); @@ -1291,14 +1320,11 @@ static ExitStatus gen_call_pal(DisasContext *ctx, int palcode) #ifndef CONFIG_USER_ONLY -#define PR_BYTE 0x100000 #define PR_LONG 0x200000 static int cpu_pr_data(int pr) { switch (pr) { - case 0: return offsetof(CPUAlphaState, ps) | PR_BYTE; - case 1: return offsetof(CPUAlphaState, fen) | PR_BYTE; case 2: return offsetof(CPUAlphaState, pcc_ofs) | PR_LONG; case 3: return offsetof(CPUAlphaState, trap_arg0); case 4: return offsetof(CPUAlphaState, trap_arg1); @@ -1348,14 +1374,19 @@ static ExitStatus gen_mfpr(DisasContext *ctx, TCGv va, int regno) } break; + case 0: /* PS */ + ld_flag_byte(va, ENV_FLAG_PS_SHIFT); + break; + case 1: /* FEN */ + ld_flag_byte(va, ENV_FLAG_FEN_SHIFT); + break; + default: /* The basic registers are data only, and unknown registers are read-zero, write-ignore. */ data = cpu_pr_data(regno); if (data == 0) { tcg_gen_movi_i64(va, 0); - } else if (data & PR_BYTE) { - tcg_gen_ld8u_i64(va, cpu_env, data & ~PR_BYTE); } else if (data & PR_LONG) { tcg_gen_ld32s_i64(va, cpu_env, data & ~PR_LONG); } else { @@ -1369,7 +1400,6 @@ static ExitStatus gen_mfpr(DisasContext *ctx, TCGv va, int regno) static ExitStatus gen_mtpr(DisasContext *ctx, TCGv vb, int regno) { - TCGv tmp; int data; switch (regno) { @@ -1385,9 +1415,12 @@ static ExitStatus gen_mtpr(DisasContext *ctx, TCGv vb, int regno) case 253: /* WAIT */ - tmp = tcg_const_i64(1); - tcg_gen_st32_i64(tmp, cpu_env, -offsetof(AlphaCPU, env) + - offsetof(CPUState, halted)); + { + TCGv_i32 tmp = tcg_const_i32(1); + tcg_gen_st_i32(tmp, cpu_env, -offsetof(AlphaCPU, env) + + offsetof(CPUState, halted)); + tcg_temp_free_i32(tmp); + } return gen_excp(ctx, EXCP_HALTED, 0); case 252: @@ -1415,14 +1448,19 @@ static ExitStatus gen_mtpr(DisasContext *ctx, TCGv vb, int regno) tcg_gen_mov_i64(cpu_std_ir[regno], vb); break; + case 0: /* PS */ + st_flag_byte(vb, ENV_FLAG_PS_SHIFT); + break; + case 1: /* FEN */ + st_flag_byte(vb, ENV_FLAG_FEN_SHIFT); + break; + default: /* The basic registers are data only, and unknown registers are read-zero, write-ignore. */ data = cpu_pr_data(regno); if (data != 0) { - if (data & PR_BYTE) { - tcg_gen_st8_i64(vb, cpu_env, data & ~PR_BYTE); - } else if (data & PR_LONG) { + if (data & PR_LONG) { tcg_gen_st32_i64(vb, cpu_env, data & ~PR_LONG); } else { tcg_gen_st_i64(vb, cpu_env, data); @@ -1442,9 +1480,16 @@ static ExitStatus gen_mtpr(DisasContext *ctx, TCGv vb, int regno) } \ } while (0) +#define REQUIRE_AMASK(FLAG) \ + do { \ + if ((ctx->amask & AMASK_##FLAG) == 0) { \ + goto invalid_opc; \ + } \ + } while (0) + #define REQUIRE_TB_FLAG(FLAG) \ do { \ - if ((ctx->tb->flags & (FLAG)) == 0) { \ + if ((ctx->tbflags & (FLAG)) == 0) { \ goto invalid_opc; \ } \ } while (0) @@ -1532,7 +1577,7 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn) case 0x0A: /* LDBU */ - REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX); + REQUIRE_AMASK(BWX); gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0); break; case 0x0B: @@ -1541,17 +1586,17 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn) break; case 0x0C: /* LDWU */ - REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX); + REQUIRE_AMASK(BWX); gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0); break; case 0x0D: /* STW */ - REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX); + REQUIRE_AMASK(BWX); gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0); break; case 0x0E: /* STB */ - REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX); + REQUIRE_AMASK(BWX); gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0); break; case 0x0F: @@ -1832,10 +1877,7 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn) case 0x61: /* AMASK */ REQUIRE_REG_31(ra); - { - uint64_t amask = ctx->tb->flags >> TB_FLAGS_AMASK_SHIFT; - tcg_gen_andi_i64(vc, vb, ~amask); - } + tcg_gen_andi_i64(vc, vb, ~ctx->amask); break; case 0x64: /* CMOVLE */ @@ -2048,7 +2090,7 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn) break; case 0x14: - REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX); + REQUIRE_AMASK(FIX); vc = dest_fpr(ctx, rc); switch (fpfn) { /* fn11 & 0x3F */ case 0x04: @@ -2424,7 +2466,7 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn) case 0x19: /* HW_MFPR (PALcode) */ #ifndef CONFIG_USER_ONLY - REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE); + REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE); va = dest_gpr(ctx, ra); ret = gen_mfpr(ctx, va, insn & 0xffff); break; @@ -2446,7 +2488,7 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn) case 0x1B: /* HW_LD (PALcode) */ #ifndef CONFIG_USER_ONLY - REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE); + REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE); { TCGv addr = tcg_temp_new(); vb = load_gpr(ctx, rb); @@ -2525,14 +2567,14 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn) vc = dest_gpr(ctx, rc); if (fn7 == 0x70) { /* FTOIT */ - REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX); + REQUIRE_AMASK(FIX); REQUIRE_REG_31(rb); va = load_fpr(ctx, ra); tcg_gen_mov_i64(vc, va); break; } else if (fn7 == 0x78) { /* FTOIS */ - REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX); + REQUIRE_AMASK(FIX); REQUIRE_REG_31(rb); t32 = tcg_temp_new_i32(); va = load_fpr(ctx, ra); @@ -2546,117 +2588,117 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn) switch (fn7) { case 0x00: /* SEXTB */ - REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX); + REQUIRE_AMASK(BWX); REQUIRE_REG_31(ra); tcg_gen_ext8s_i64(vc, vb); break; case 0x01: /* SEXTW */ - REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX); + REQUIRE_AMASK(BWX); REQUIRE_REG_31(ra); tcg_gen_ext16s_i64(vc, vb); break; case 0x30: /* CTPOP */ - REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX); + REQUIRE_AMASK(CIX); REQUIRE_REG_31(ra); REQUIRE_NO_LIT; tcg_gen_ctpop_i64(vc, vb); break; case 0x31: /* PERR */ - REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI); + REQUIRE_AMASK(MVI); REQUIRE_NO_LIT; va = load_gpr(ctx, ra); gen_helper_perr(vc, va, vb); break; case 0x32: /* CTLZ */ - REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX); + REQUIRE_AMASK(CIX); REQUIRE_REG_31(ra); REQUIRE_NO_LIT; tcg_gen_clzi_i64(vc, vb, 64); break; case 0x33: /* CTTZ */ - REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX); + REQUIRE_AMASK(CIX); REQUIRE_REG_31(ra); REQUIRE_NO_LIT; tcg_gen_ctzi_i64(vc, vb, 64); break; case 0x34: /* UNPKBW */ - REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI); + REQUIRE_AMASK(MVI); REQUIRE_REG_31(ra); REQUIRE_NO_LIT; gen_helper_unpkbw(vc, vb); break; case 0x35: /* UNPKBL */ - REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI); + REQUIRE_AMASK(MVI); REQUIRE_REG_31(ra); REQUIRE_NO_LIT; gen_helper_unpkbl(vc, vb); break; case 0x36: /* PKWB */ - REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI); + REQUIRE_AMASK(MVI); REQUIRE_REG_31(ra); REQUIRE_NO_LIT; gen_helper_pkwb(vc, vb); break; case 0x37: /* PKLB */ - REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI); + REQUIRE_AMASK(MVI); REQUIRE_REG_31(ra); REQUIRE_NO_LIT; gen_helper_pklb(vc, vb); break; case 0x38: /* MINSB8 */ - REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI); + REQUIRE_AMASK(MVI); va = load_gpr(ctx, ra); gen_helper_minsb8(vc, va, vb); break; case 0x39: /* MINSW4 */ - REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI); + REQUIRE_AMASK(MVI); va = load_gpr(ctx, ra); gen_helper_minsw4(vc, va, vb); break; case 0x3A: /* MINUB8 */ - REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI); + REQUIRE_AMASK(MVI); va = load_gpr(ctx, ra); gen_helper_minub8(vc, va, vb); break; case 0x3B: /* MINUW4 */ - REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI); + REQUIRE_AMASK(MVI); va = load_gpr(ctx, ra); gen_helper_minuw4(vc, va, vb); break; case 0x3C: /* MAXUB8 */ - REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI); + REQUIRE_AMASK(MVI); va = load_gpr(ctx, ra); gen_helper_maxub8(vc, va, vb); break; case 0x3D: /* MAXUW4 */ - REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI); + REQUIRE_AMASK(MVI); va = load_gpr(ctx, ra); gen_helper_maxuw4(vc, va, vb); break; case 0x3E: /* MAXSB8 */ - REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI); + REQUIRE_AMASK(MVI); va = load_gpr(ctx, ra); gen_helper_maxsb8(vc, va, vb); break; case 0x3F: /* MAXSW4 */ - REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI); + REQUIRE_AMASK(MVI); va = load_gpr(ctx, ra); gen_helper_maxsw4(vc, va, vb); break; @@ -2668,7 +2710,7 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn) case 0x1D: /* HW_MTPR (PALcode) */ #ifndef CONFIG_USER_ONLY - REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE); + REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE); vb = load_gpr(ctx, rb); ret = gen_mtpr(ctx, vb, insn & 0xffff); break; @@ -2679,7 +2721,7 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn) case 0x1E: /* HW_RET (PALcode) */ #ifndef CONFIG_USER_ONLY - REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE); + REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE); if (rb == 31) { /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return address from EXC_ADDR. This turns out to be useful for our @@ -2689,12 +2731,13 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn) } else { vb = load_gpr(ctx, rb); } + tcg_gen_movi_i64(cpu_lock_addr, -1); tmp = tcg_temp_new(); tcg_gen_movi_i64(tmp, 0); - tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUAlphaState, intr_flag)); - tcg_gen_movi_i64(cpu_lock_addr, -1); + st_flag_byte(tmp, ENV_FLAG_RX_SHIFT); tcg_gen_andi_i64(tmp, vb, 1); - tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUAlphaState, pal_mode)); + st_flag_byte(tmp, ENV_FLAG_PAL_SHIFT); + tcg_temp_free(tmp); tcg_gen_andi_i64(cpu_pc, vb, ~3); /* Allow interrupts to be recognized right away. */ ret = EXIT_PC_UPDATED_NOCHAIN; @@ -2706,7 +2749,7 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn) case 0x1F: /* HW_ST (PALcode) */ #ifndef CONFIG_USER_ONLY - REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE); + REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE); { switch ((insn >> 12) & 0xF) { case 0x0: @@ -2927,15 +2970,17 @@ void gen_intermediate_code(CPUAlphaState *env, struct TranslationBlock *tb) ctx.tb = tb; ctx.pc = pc_start; + ctx.tbflags = tb->flags; ctx.mem_idx = cpu_mmu_index(env, false); ctx.implver = env->implver; + ctx.amask = env->amask; ctx.singlestep_enabled = cs->singlestep_enabled; #ifdef CONFIG_USER_ONLY ctx.ir = cpu_std_ir; #else ctx.palbr = env->palbr; - ctx.ir = (tb->flags & TB_FLAGS_PAL_MODE ? cpu_pal_ir : cpu_std_ir); + ctx.ir = (ctx.tbflags & ENV_FLAG_PAL_MODE ? cpu_pal_ir : cpu_std_ir); #endif /* ??? Every TB begins with unset rounding mode, to be initialized on @@ -2968,6 +3013,8 @@ void gen_intermediate_code(CPUAlphaState *env, struct TranslationBlock *tb) } gen_tb_start(tb); + tcg_clear_temp_count(); + do { tcg_gen_insn_start(ctx.pc); num_insns++; @@ -2990,6 +3037,10 @@ void gen_intermediate_code(CPUAlphaState *env, struct TranslationBlock *tb) ret = translate_one(ctxp, insn); free_context_temps(ctxp); + if (tcg_check_temp_count()) { + qemu_log("TCG temporary leak before "TARGET_FMT_lx"\n", ctx.pc); + } + /* If we reach a page boundary, are single stepping, or exhaust instruction count, stop generation. */ if (ret == NO_EXIT diff --git a/target/sh4/cpu.c b/target/sh4/cpu.c index 9da7e1ed38..8536f6d002 100644 --- a/target/sh4/cpu.c +++ b/target/sh4/cpu.c @@ -39,7 +39,7 @@ static void superh_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb) SuperHCPU *cpu = SUPERH_CPU(cs); cpu->env.pc = tb->pc; - cpu->env.flags = tb->flags; + cpu->env.flags = tb->flags & TB_FLAG_ENVFLAGS_MASK; } static bool superh_cpu_has_work(CPUState *cs) diff --git a/target/sh4/cpu.h b/target/sh4/cpu.h index ffb91687b8..3c47f0de89 100644 --- a/target/sh4/cpu.h +++ b/target/sh4/cpu.h @@ -96,6 +96,21 @@ #define DELAY_SLOT_CONDITIONAL (1 << 1) #define DELAY_SLOT_RTE (1 << 2) +#define TB_FLAG_PENDING_MOVCA (1 << 3) + +#define GUSA_SHIFT 4 +#ifdef CONFIG_USER_ONLY +#define GUSA_EXCLUSIVE (1 << 12) +#define GUSA_MASK ((0xff << GUSA_SHIFT) | GUSA_EXCLUSIVE) +#else +/* Provide dummy versions of the above to allow tests against tbflags + to be elided while avoiding ifdefs. */ +#define GUSA_EXCLUSIVE 0 +#define GUSA_MASK 0 +#endif + +#define TB_FLAG_ENVFLAGS_MASK (DELAY_SLOT_MASK | GUSA_MASK) + typedef struct tlb_t { uint32_t vpn; /* virtual page number */ uint32_t ppn; /* physical page number */ @@ -366,8 +381,6 @@ static inline int cpu_ptel_pr (uint32_t ptel) #define PTEA_TC (1 << 3) #define cpu_ptea_tc(ptea) (((ptea) & PTEA_TC) >> 3) -#define TB_FLAG_PENDING_MOVCA (1 << 4) - static inline target_ulong cpu_read_sr(CPUSH4State *env) { return env->sr | (env->sr_m << SR_M) | @@ -387,12 +400,13 @@ static inline void cpu_get_tb_cpu_state(CPUSH4State *env, target_ulong *pc, target_ulong *cs_base, uint32_t *flags) { *pc = env->pc; - *cs_base = 0; - *flags = (env->flags & DELAY_SLOT_MASK) /* Bits 0- 2 */ + /* For a gUSA region, notice the end of the region. */ + *cs_base = env->flags & GUSA_MASK ? env->gregs[0] : 0; + *flags = env->flags /* TB_FLAG_ENVFLAGS_MASK: bits 0-2, 4-12 */ | (env->fpscr & (FPSCR_FR | FPSCR_SZ | FPSCR_PR)) /* Bits 19-21 */ | (env->sr & ((1u << SR_MD) | (1u << SR_RB))) /* Bits 29-30 */ | (env->sr & (1u << SR_FD)) /* Bit 15 */ - | (env->movcal_backup ? TB_FLAG_PENDING_MOVCA : 0); /* Bit 4 */ + | (env->movcal_backup ? TB_FLAG_PENDING_MOVCA : 0); /* Bit 3 */ } #endif /* SH4_CPU_H */ diff --git a/target/sh4/helper.h b/target/sh4/helper.h index dce859caea..1e768fcbc7 100644 --- a/target/sh4/helper.h +++ b/target/sh4/helper.h @@ -6,6 +6,7 @@ DEF_HELPER_1(raise_slot_fpu_disable, noreturn, env) DEF_HELPER_1(debug, noreturn, env) DEF_HELPER_1(sleep, noreturn, env) DEF_HELPER_2(trapa, noreturn, env, i32) +DEF_HELPER_1(exclusive, noreturn, env) DEF_HELPER_3(movcal, void, env, i32, i32) DEF_HELPER_1(discard_movcal_backup, void, env) @@ -16,17 +17,15 @@ DEF_HELPER_3(macw, void, env, i32, i32) DEF_HELPER_2(ld_fpscr, void, env, i32) -DEF_HELPER_FLAGS_1(fabs_FT, TCG_CALL_NO_RWG_SE, f32, f32) -DEF_HELPER_FLAGS_1(fabs_DT, TCG_CALL_NO_RWG_SE, f64, f64) DEF_HELPER_FLAGS_3(fadd_FT, TCG_CALL_NO_WG, f32, env, f32, f32) DEF_HELPER_FLAGS_3(fadd_DT, TCG_CALL_NO_WG, f64, env, f64, f64) DEF_HELPER_FLAGS_2(fcnvsd_FT_DT, TCG_CALL_NO_WG, f64, env, f32) DEF_HELPER_FLAGS_2(fcnvds_DT_FT, TCG_CALL_NO_WG, f32, env, f64) -DEF_HELPER_3(fcmp_eq_FT, void, env, f32, f32) -DEF_HELPER_3(fcmp_eq_DT, void, env, f64, f64) -DEF_HELPER_3(fcmp_gt_FT, void, env, f32, f32) -DEF_HELPER_3(fcmp_gt_DT, void, env, f64, f64) +DEF_HELPER_FLAGS_3(fcmp_eq_FT, TCG_CALL_NO_WG, i32, env, f32, f32) +DEF_HELPER_FLAGS_3(fcmp_eq_DT, TCG_CALL_NO_WG, i32, env, f64, f64) +DEF_HELPER_FLAGS_3(fcmp_gt_FT, TCG_CALL_NO_WG, i32, env, f32, f32) +DEF_HELPER_FLAGS_3(fcmp_gt_DT, TCG_CALL_NO_WG, i32, env, f64, f64) DEF_HELPER_FLAGS_3(fdiv_FT, TCG_CALL_NO_WG, f32, env, f32, f32) DEF_HELPER_FLAGS_3(fdiv_DT, TCG_CALL_NO_WG, f64, env, f64, f64) DEF_HELPER_FLAGS_2(float_FT, TCG_CALL_NO_WG, f32, env, i32) @@ -34,11 +33,11 @@ DEF_HELPER_FLAGS_2(float_DT, TCG_CALL_NO_WG, f64, env, i32) DEF_HELPER_FLAGS_4(fmac_FT, TCG_CALL_NO_WG, f32, env, f32, f32, f32) DEF_HELPER_FLAGS_3(fmul_FT, TCG_CALL_NO_WG, f32, env, f32, f32) DEF_HELPER_FLAGS_3(fmul_DT, TCG_CALL_NO_WG, f64, env, f64, f64) -DEF_HELPER_FLAGS_1(fneg_T, TCG_CALL_NO_RWG_SE, f32, f32) DEF_HELPER_FLAGS_3(fsub_FT, TCG_CALL_NO_WG, f32, env, f32, f32) DEF_HELPER_FLAGS_3(fsub_DT, TCG_CALL_NO_WG, f64, env, f64, f64) DEF_HELPER_FLAGS_2(fsqrt_FT, TCG_CALL_NO_WG, f32, env, f32) DEF_HELPER_FLAGS_2(fsqrt_DT, TCG_CALL_NO_WG, f64, env, f64) +DEF_HELPER_FLAGS_2(fsrra_FT, TCG_CALL_NO_WG, f32, env, f32) DEF_HELPER_FLAGS_2(ftrc_FT, TCG_CALL_NO_WG, i32, env, f32) DEF_HELPER_FLAGS_2(ftrc_DT, TCG_CALL_NO_WG, i32, env, f64) DEF_HELPER_3(fipr, void, env, i32, i32) diff --git a/target/sh4/op_helper.c b/target/sh4/op_helper.c index 528a40ac1d..d798f239cf 100644 --- a/target/sh4/op_helper.c +++ b/target/sh4/op_helper.c @@ -115,6 +115,12 @@ void helper_trapa(CPUSH4State *env, uint32_t tra) raise_exception(env, 0x160, 0); } +void helper_exclusive(CPUSH4State *env) +{ + /* We do not want cpu_restore_state to run. */ + cpu_loop_exit_atomic(ENV_GET_CPU(env), 0); +} + void helper_movcal(CPUSH4State *env, uint32_t address, uint32_t value) { if (cpu_sh4_is_cached (env, address)) @@ -219,29 +225,29 @@ static void update_fpscr(CPUSH4State *env, uintptr_t retaddr) xcpt = get_float_exception_flags(&env->fp_status); - /* Clear the flag entries */ - env->fpscr &= ~FPSCR_FLAG_MASK; + /* Clear the cause entries */ + env->fpscr &= ~FPSCR_CAUSE_MASK; if (unlikely(xcpt)) { if (xcpt & float_flag_invalid) { - env->fpscr |= FPSCR_FLAG_V; + env->fpscr |= FPSCR_CAUSE_V; } if (xcpt & float_flag_divbyzero) { - env->fpscr |= FPSCR_FLAG_Z; + env->fpscr |= FPSCR_CAUSE_Z; } if (xcpt & float_flag_overflow) { - env->fpscr |= FPSCR_FLAG_O; + env->fpscr |= FPSCR_CAUSE_O; } if (xcpt & float_flag_underflow) { - env->fpscr |= FPSCR_FLAG_U; + env->fpscr |= FPSCR_CAUSE_U; } if (xcpt & float_flag_inexact) { - env->fpscr |= FPSCR_FLAG_I; + env->fpscr |= FPSCR_CAUSE_I; } - /* Accumulate in cause entries */ - env->fpscr |= (env->fpscr & FPSCR_FLAG_MASK) - << (FPSCR_CAUSE_SHIFT - FPSCR_FLAG_SHIFT); + /* Accumulate in flag entries */ + env->fpscr |= (env->fpscr & FPSCR_CAUSE_MASK) + >> (FPSCR_CAUSE_SHIFT - FPSCR_FLAG_SHIFT); /* Generate an exception if enabled */ cause = (env->fpscr & FPSCR_CAUSE_MASK) >> FPSCR_CAUSE_SHIFT; @@ -252,16 +258,6 @@ static void update_fpscr(CPUSH4State *env, uintptr_t retaddr) } } -float32 helper_fabs_FT(float32 t0) -{ - return float32_abs(t0); -} - -float64 helper_fabs_DT(float64 t0) -{ - return float64_abs(t0); -} - float32 helper_fadd_FT(CPUSH4State *env, float32 t0, float32 t1) { set_float_exception_flags(0, &env->fp_status); @@ -278,56 +274,44 @@ float64 helper_fadd_DT(CPUSH4State *env, float64 t0, float64 t1) return t0; } -void helper_fcmp_eq_FT(CPUSH4State *env, float32 t0, float32 t1) +uint32_t helper_fcmp_eq_FT(CPUSH4State *env, float32 t0, float32 t1) { int relation; set_float_exception_flags(0, &env->fp_status); relation = float32_compare(t0, t1, &env->fp_status); - if (unlikely(relation == float_relation_unordered)) { - update_fpscr(env, GETPC()); - } else { - env->sr_t = (relation == float_relation_equal); - } + update_fpscr(env, GETPC()); + return relation == float_relation_equal; } -void helper_fcmp_eq_DT(CPUSH4State *env, float64 t0, float64 t1) +uint32_t helper_fcmp_eq_DT(CPUSH4State *env, float64 t0, float64 t1) { int relation; set_float_exception_flags(0, &env->fp_status); relation = float64_compare(t0, t1, &env->fp_status); - if (unlikely(relation == float_relation_unordered)) { - update_fpscr(env, GETPC()); - } else { - env->sr_t = (relation == float_relation_equal); - } + update_fpscr(env, GETPC()); + return relation == float_relation_equal; } -void helper_fcmp_gt_FT(CPUSH4State *env, float32 t0, float32 t1) +uint32_t helper_fcmp_gt_FT(CPUSH4State *env, float32 t0, float32 t1) { int relation; set_float_exception_flags(0, &env->fp_status); relation = float32_compare(t0, t1, &env->fp_status); - if (unlikely(relation == float_relation_unordered)) { - update_fpscr(env, GETPC()); - } else { - env->sr_t = (relation == float_relation_greater); - } + update_fpscr(env, GETPC()); + return relation == float_relation_greater; } -void helper_fcmp_gt_DT(CPUSH4State *env, float64 t0, float64 t1) +uint32_t helper_fcmp_gt_DT(CPUSH4State *env, float64 t0, float64 t1) { int relation; set_float_exception_flags(0, &env->fp_status); relation = float64_compare(t0, t1, &env->fp_status); - if (unlikely(relation == float_relation_unordered)) { - update_fpscr(env, GETPC()); - } else { - env->sr_t = (relation == float_relation_greater); - } + update_fpscr(env, GETPC()); + return relation == float_relation_greater; } float64 helper_fcnvsd_FT_DT(CPUSH4State *env, float32 t0) @@ -406,11 +390,6 @@ float64 helper_fmul_DT(CPUSH4State *env, float64 t0, float64 t1) return t0; } -float32 helper_fneg_T(float32 t0) -{ - return float32_chs(t0); -} - float32 helper_fsqrt_FT(CPUSH4State *env, float32 t0) { set_float_exception_flags(0, &env->fp_status); @@ -427,6 +406,22 @@ float64 helper_fsqrt_DT(CPUSH4State *env, float64 t0) return t0; } +float32 helper_fsrra_FT(CPUSH4State *env, float32 t0) +{ + set_float_exception_flags(0, &env->fp_status); + /* "Approximate" 1/sqrt(x) via actual computation. */ + t0 = float32_sqrt(t0, &env->fp_status); + t0 = float32_div(float32_one, t0, &env->fp_status); + /* Since this is supposed to be an approximation, an imprecision + exception is required. One supposes this also follows the usual + IEEE rule that other exceptions take precidence. */ + if (get_float_exception_flags(&env->fp_status) == 0) { + set_float_exception_flags(float_flag_inexact, &env->fp_status); + } + update_fpscr(env, GETPC()); + return t0; +} + float32 helper_fsub_FT(CPUSH4State *env, float32 t0, float32 t1) { set_float_exception_flags(0, &env->fp_status); diff --git a/target/sh4/translate.c b/target/sh4/translate.c index 8bc132b27b..498bb99dc1 100644 --- a/target/sh4/translate.c +++ b/target/sh4/translate.c @@ -41,6 +41,8 @@ typedef struct DisasContext { uint32_t envflags; /* should stay in sync with env->flags using TCG ops */ int bstate; int memidx; + int gbank; + int fbank; uint32_t delayed_pc; int singlestep_enabled; uint32_t features; @@ -64,7 +66,7 @@ enum { /* global register indexes */ static TCGv_env cpu_env; -static TCGv cpu_gregs[24]; +static TCGv cpu_gregs[32]; static TCGv cpu_sr, cpu_sr_m, cpu_sr_q, cpu_sr_t; static TCGv cpu_pc, cpu_ssr, cpu_spc, cpu_gbr; static TCGv cpu_vbr, cpu_sgr, cpu_dbr, cpu_mach, cpu_macl; @@ -98,16 +100,19 @@ void sh4_translate_init(void) "FPR12_BANK1", "FPR13_BANK1", "FPR14_BANK1", "FPR15_BANK1", }; - if (done_init) + if (done_init) { return; + } cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env"); tcg_ctx.tcg_env = cpu_env; - for (i = 0; i < 24; i++) + for (i = 0; i < 24; i++) { cpu_gregs[i] = tcg_global_mem_new_i32(cpu_env, offsetof(CPUSH4State, gregs[i]), gregnames[i]); + } + memcpy(cpu_gregs + 24, cpu_gregs + 8, 8 * sizeof(TCGv)); cpu_pc = tcg_global_mem_new_i32(cpu_env, offsetof(CPUSH4State, pc), "PC"); @@ -220,17 +225,22 @@ static inline void gen_save_cpu_state(DisasContext *ctx, bool save_pc) if (ctx->delayed_pc != (uint32_t) -1) { tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc); } - if ((ctx->tbflags & DELAY_SLOT_MASK) != ctx->envflags) { + if ((ctx->tbflags & TB_FLAG_ENVFLAGS_MASK) != ctx->envflags) { tcg_gen_movi_i32(cpu_flags, ctx->envflags); } } +static inline bool use_exit_tb(DisasContext *ctx) +{ + return (ctx->tbflags & GUSA_EXCLUSIVE) != 0; +} + static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest) { - if (unlikely(ctx->singlestep_enabled)) { + /* Use a direct jump if in same page and singlestep not enabled */ + if (unlikely(ctx->singlestep_enabled || use_exit_tb(ctx))) { return false; } - #ifndef CONFIG_USER_ONLY return (ctx->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK); #else @@ -241,69 +251,110 @@ static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest) static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest) { if (use_goto_tb(ctx, dest)) { - /* Use a direct jump if in same page and singlestep not enabled */ tcg_gen_goto_tb(n); tcg_gen_movi_i32(cpu_pc, dest); tcg_gen_exit_tb((uintptr_t)ctx->tb + n); } else { tcg_gen_movi_i32(cpu_pc, dest); - if (ctx->singlestep_enabled) + if (ctx->singlestep_enabled) { gen_helper_debug(cpu_env); - tcg_gen_exit_tb(0); + } else if (use_exit_tb(ctx)) { + tcg_gen_exit_tb(0); + } else { + tcg_gen_lookup_and_goto_ptr(cpu_pc); + } } } static void gen_jump(DisasContext * ctx) { - if (ctx->delayed_pc == (uint32_t) - 1) { + if (ctx->delayed_pc == -1) { /* Target is not statically known, it comes necessarily from a delayed jump as immediate jump are conditinal jumps */ tcg_gen_mov_i32(cpu_pc, cpu_delayed_pc); tcg_gen_discard_i32(cpu_delayed_pc); - if (ctx->singlestep_enabled) + if (ctx->singlestep_enabled) { gen_helper_debug(cpu_env); - tcg_gen_exit_tb(0); + } else if (use_exit_tb(ctx)) { + tcg_gen_exit_tb(0); + } else { + tcg_gen_lookup_and_goto_ptr(cpu_pc); + } } else { gen_goto_tb(ctx, 0, ctx->delayed_pc); } } /* Immediate conditional jump (bt or bf) */ -static void gen_conditional_jump(DisasContext * ctx, - target_ulong ift, target_ulong ifnott) +static void gen_conditional_jump(DisasContext *ctx, target_ulong dest, + bool jump_if_true) { TCGLabel *l1 = gen_new_label(); + TCGCond cond_not_taken = jump_if_true ? TCG_COND_EQ : TCG_COND_NE; + + if (ctx->tbflags & GUSA_EXCLUSIVE) { + /* When in an exclusive region, we must continue to the end. + Therefore, exit the region on a taken branch, but otherwise + fall through to the next instruction. */ + tcg_gen_brcondi_i32(cond_not_taken, cpu_sr_t, 0, l1); + tcg_gen_movi_i32(cpu_flags, ctx->envflags & ~GUSA_MASK); + /* Note that this won't actually use a goto_tb opcode because we + disallow it in use_goto_tb, but it handles exit + singlestep. */ + gen_goto_tb(ctx, 0, dest); + gen_set_label(l1); + return; + } + gen_save_cpu_state(ctx, false); - tcg_gen_brcondi_i32(TCG_COND_NE, cpu_sr_t, 0, l1); - gen_goto_tb(ctx, 0, ifnott); + tcg_gen_brcondi_i32(cond_not_taken, cpu_sr_t, 0, l1); + gen_goto_tb(ctx, 0, dest); gen_set_label(l1); - gen_goto_tb(ctx, 1, ift); + gen_goto_tb(ctx, 1, ctx->pc + 2); ctx->bstate = BS_BRANCH; } /* Delayed conditional jump (bt or bf) */ static void gen_delayed_conditional_jump(DisasContext * ctx) { - TCGLabel *l1; - TCGv ds; + TCGLabel *l1 = gen_new_label(); + TCGv ds = tcg_temp_new(); - l1 = gen_new_label(); - ds = tcg_temp_new(); tcg_gen_mov_i32(ds, cpu_delayed_cond); tcg_gen_discard_i32(cpu_delayed_cond); + + if (ctx->tbflags & GUSA_EXCLUSIVE) { + /* When in an exclusive region, we must continue to the end. + Therefore, exit the region on a taken branch, but otherwise + fall through to the next instruction. */ + tcg_gen_brcondi_i32(TCG_COND_EQ, ds, 0, l1); + + /* Leave the gUSA region. */ + tcg_gen_movi_i32(cpu_flags, ctx->envflags & ~GUSA_MASK); + gen_jump(ctx); + + gen_set_label(l1); + return; + } + tcg_gen_brcondi_i32(TCG_COND_NE, ds, 0, l1); gen_goto_tb(ctx, 1, ctx->pc + 2); gen_set_label(l1); gen_jump(ctx); } -static inline void gen_load_fpr64(TCGv_i64 t, int reg) +static inline void gen_load_fpr64(DisasContext *ctx, TCGv_i64 t, int reg) { + /* We have already signaled illegal instruction for odd Dr. */ + tcg_debug_assert((reg & 1) == 0); + reg ^= ctx->fbank; tcg_gen_concat_i32_i64(t, cpu_fregs[reg + 1], cpu_fregs[reg]); } -static inline void gen_store_fpr64 (TCGv_i64 t, int reg) +static inline void gen_store_fpr64(DisasContext *ctx, TCGv_i64 t, int reg) { + /* We have already signaled illegal instruction for odd Dr. */ + tcg_debug_assert((reg & 1) == 0); + reg ^= ctx->fbank; tcg_gen_extr_i64_i32(cpu_fregs[reg + 1], cpu_fregs[reg], t); } @@ -317,49 +368,40 @@ static inline void gen_store_fpr64 (TCGv_i64 t, int reg) #define B11_8 ((ctx->opcode >> 8) & 0xf) #define B15_12 ((ctx->opcode >> 12) & 0xf) -#define REG(x) ((x) < 8 && (ctx->tbflags & (1u << SR_MD))\ - && (ctx->tbflags & (1u << SR_RB))\ - ? (cpu_gregs[x + 16]) : (cpu_gregs[x])) +#define REG(x) cpu_gregs[(x) ^ ctx->gbank] +#define ALTREG(x) cpu_gregs[(x) ^ ctx->gbank ^ 0x10] +#define FREG(x) cpu_fregs[(x) ^ ctx->fbank] -#define ALTREG(x) ((x) < 8 && (!(ctx->tbflags & (1u << SR_MD))\ - || !(ctx->tbflags & (1u << SR_RB)))\ - ? (cpu_gregs[x + 16]) : (cpu_gregs[x])) - -#define FREG(x) (ctx->tbflags & FPSCR_FR ? (x) ^ 0x10 : (x)) #define XHACK(x) ((((x) & 1 ) << 4) | ((x) & 0xe)) -#define XREG(x) (ctx->tbflags & FPSCR_FR ? XHACK(x) ^ 0x10 : XHACK(x)) -#define DREG(x) FREG(x) /* Assumes lsb of (x) is always 0 */ #define CHECK_NOT_DELAY_SLOT \ - if (ctx->envflags & DELAY_SLOT_MASK) { \ - gen_save_cpu_state(ctx, true); \ - gen_helper_raise_slot_illegal_instruction(cpu_env); \ - ctx->bstate = BS_EXCP; \ - return; \ + if (ctx->envflags & DELAY_SLOT_MASK) { \ + goto do_illegal_slot; \ + } + +#define CHECK_PRIVILEGED \ + if (IS_USER(ctx)) { \ + goto do_illegal; \ + } + +#define CHECK_FPU_ENABLED \ + if (ctx->tbflags & (1u << SR_FD)) { \ + goto do_fpu_disabled; \ } -#define CHECK_PRIVILEGED \ - if (IS_USER(ctx)) { \ - gen_save_cpu_state(ctx, true); \ - if (ctx->envflags & DELAY_SLOT_MASK) { \ - gen_helper_raise_slot_illegal_instruction(cpu_env); \ - } else { \ - gen_helper_raise_illegal_instruction(cpu_env); \ - } \ - ctx->bstate = BS_EXCP; \ - return; \ +#define CHECK_FPSCR_PR_0 \ + if (ctx->tbflags & FPSCR_PR) { \ + goto do_illegal; \ } -#define CHECK_FPU_ENABLED \ - if (ctx->tbflags & (1u << SR_FD)) { \ - gen_save_cpu_state(ctx, true); \ - if (ctx->envflags & DELAY_SLOT_MASK) { \ - gen_helper_raise_slot_fpu_disable(cpu_env); \ - } else { \ - gen_helper_raise_fpu_disable(cpu_env); \ - } \ - ctx->bstate = BS_EXCP; \ - return; \ +#define CHECK_FPSCR_PR_1 \ + if (!(ctx->tbflags & FPSCR_PR)) { \ + goto do_illegal; \ + } + +#define CHECK_SH4A \ + if (!(ctx->features & SH_FEATURE_SH4A)) { \ + goto do_illegal; \ } static void _decode_opc(DisasContext * ctx) @@ -441,13 +483,20 @@ static void _decode_opc(DisasContext * ctx) tcg_gen_movi_i32(cpu_sr_t, 1); return; case 0xfbfd: /* frchg */ + CHECK_FPSCR_PR_0 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_FR); ctx->bstate = BS_STOP; return; case 0xf3fd: /* fschg */ + CHECK_FPSCR_PR_0 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_SZ); ctx->bstate = BS_STOP; return; + case 0xf7fd: /* fpchg */ + CHECK_SH4A + tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_PR); + ctx->bstate = BS_STOP; + return; case 0x0009: /* nop */ return; case 0x001b: /* sleep */ @@ -475,6 +524,15 @@ static void _decode_opc(DisasContext * ctx) } return; case 0xe000: /* mov #imm,Rn */ +#ifdef CONFIG_USER_ONLY + /* Detect the start of a gUSA region. If so, update envflags + and end the TB. This will allow us to see the end of the + region (stored in R0) in the next TB. */ + if (B11_8 == 15 && B7_0s < 0 && parallel_cpus) { + ctx->envflags = deposit32(ctx->envflags, GUSA_SHIFT, 8, B7_0s); + ctx->bstate = BS_STOP; + } +#endif tcg_gen_movi_i32(REG(B11_8), B7_0s); return; case 0x9000: /* mov.w @(disp,PC),Rn */ @@ -938,75 +996,66 @@ static void _decode_opc(DisasContext * ctx) case 0xf00c: /* fmov {F,D,X}Rm,{F,D,X}Rn - FPSCR: Nothing */ CHECK_FPU_ENABLED if (ctx->tbflags & FPSCR_SZ) { - TCGv_i64 fp = tcg_temp_new_i64(); - gen_load_fpr64(fp, XREG(B7_4)); - gen_store_fpr64(fp, XREG(B11_8)); - tcg_temp_free_i64(fp); + int xsrc = XHACK(B7_4); + int xdst = XHACK(B11_8); + tcg_gen_mov_i32(FREG(xdst), FREG(xsrc)); + tcg_gen_mov_i32(FREG(xdst + 1), FREG(xsrc + 1)); } else { - tcg_gen_mov_i32(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]); + tcg_gen_mov_i32(FREG(B11_8), FREG(B7_4)); } return; case 0xf00a: /* fmov {F,D,X}Rm,@Rn - FPSCR: Nothing */ CHECK_FPU_ENABLED if (ctx->tbflags & FPSCR_SZ) { - TCGv addr_hi = tcg_temp_new(); - int fr = XREG(B7_4); - tcg_gen_addi_i32(addr_hi, REG(B11_8), 4); - tcg_gen_qemu_st_i32(cpu_fregs[fr], REG(B11_8), - ctx->memidx, MO_TEUL); - tcg_gen_qemu_st_i32(cpu_fregs[fr+1], addr_hi, - ctx->memidx, MO_TEUL); - tcg_temp_free(addr_hi); + TCGv_i64 fp = tcg_temp_new_i64(); + gen_load_fpr64(ctx, fp, XHACK(B7_4)); + tcg_gen_qemu_st_i64(fp, REG(B11_8), ctx->memidx, MO_TEQ); + tcg_temp_free_i64(fp); } else { - tcg_gen_qemu_st_i32(cpu_fregs[FREG(B7_4)], REG(B11_8), - ctx->memidx, MO_TEUL); + tcg_gen_qemu_st_i32(FREG(B7_4), REG(B11_8), ctx->memidx, MO_TEUL); } return; case 0xf008: /* fmov @Rm,{F,D,X}Rn - FPSCR: Nothing */ CHECK_FPU_ENABLED if (ctx->tbflags & FPSCR_SZ) { - TCGv addr_hi = tcg_temp_new(); - int fr = XREG(B11_8); - tcg_gen_addi_i32(addr_hi, REG(B7_4), 4); - tcg_gen_qemu_ld_i32(cpu_fregs[fr], REG(B7_4), ctx->memidx, MO_TEUL); - tcg_gen_qemu_ld_i32(cpu_fregs[fr+1], addr_hi, ctx->memidx, MO_TEUL); - tcg_temp_free(addr_hi); + TCGv_i64 fp = tcg_temp_new_i64(); + tcg_gen_qemu_ld_i64(fp, REG(B7_4), ctx->memidx, MO_TEQ); + gen_store_fpr64(ctx, fp, XHACK(B11_8)); + tcg_temp_free_i64(fp); } else { - tcg_gen_qemu_ld_i32(cpu_fregs[FREG(B11_8)], REG(B7_4), - ctx->memidx, MO_TEUL); + tcg_gen_qemu_ld_i32(FREG(B11_8), REG(B7_4), ctx->memidx, MO_TEUL); } return; case 0xf009: /* fmov @Rm+,{F,D,X}Rn - FPSCR: Nothing */ CHECK_FPU_ENABLED if (ctx->tbflags & FPSCR_SZ) { - TCGv addr_hi = tcg_temp_new(); - int fr = XREG(B11_8); - tcg_gen_addi_i32(addr_hi, REG(B7_4), 4); - tcg_gen_qemu_ld_i32(cpu_fregs[fr], REG(B7_4), ctx->memidx, MO_TEUL); - tcg_gen_qemu_ld_i32(cpu_fregs[fr+1], addr_hi, ctx->memidx, MO_TEUL); - tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 8); - tcg_temp_free(addr_hi); + TCGv_i64 fp = tcg_temp_new_i64(); + tcg_gen_qemu_ld_i64(fp, REG(B7_4), ctx->memidx, MO_TEQ); + gen_store_fpr64(ctx, fp, XHACK(B11_8)); + tcg_temp_free_i64(fp); + tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 8); } else { - tcg_gen_qemu_ld_i32(cpu_fregs[FREG(B11_8)], REG(B7_4), - ctx->memidx, MO_TEUL); + tcg_gen_qemu_ld_i32(FREG(B11_8), REG(B7_4), ctx->memidx, MO_TEUL); tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4); } return; case 0xf00b: /* fmov {F,D,X}Rm,@-Rn - FPSCR: Nothing */ CHECK_FPU_ENABLED - TCGv addr = tcg_temp_new_i32(); - tcg_gen_subi_i32(addr, REG(B11_8), 4); - if (ctx->tbflags & FPSCR_SZ) { - int fr = XREG(B7_4); - tcg_gen_qemu_st_i32(cpu_fregs[fr+1], addr, ctx->memidx, MO_TEUL); - tcg_gen_subi_i32(addr, addr, 4); - tcg_gen_qemu_st_i32(cpu_fregs[fr], addr, ctx->memidx, MO_TEUL); - } else { - tcg_gen_qemu_st_i32(cpu_fregs[FREG(B7_4)], addr, - ctx->memidx, MO_TEUL); - } - tcg_gen_mov_i32(REG(B11_8), addr); - tcg_temp_free(addr); + { + TCGv addr = tcg_temp_new_i32(); + if (ctx->tbflags & FPSCR_SZ) { + TCGv_i64 fp = tcg_temp_new_i64(); + gen_load_fpr64(ctx, fp, XHACK(B7_4)); + tcg_gen_subi_i32(addr, REG(B11_8), 8); + tcg_gen_qemu_st_i64(fp, addr, ctx->memidx, MO_TEQ); + tcg_temp_free_i64(fp); + } else { + tcg_gen_subi_i32(addr, REG(B11_8), 4); + tcg_gen_qemu_st_i32(FREG(B7_4), addr, ctx->memidx, MO_TEUL); + } + tcg_gen_mov_i32(REG(B11_8), addr); + tcg_temp_free(addr); + } return; case 0xf006: /* fmov @(R0,Rm),{F,D,X}Rm - FPSCR: Nothing */ CHECK_FPU_ENABLED @@ -1014,15 +1063,12 @@ static void _decode_opc(DisasContext * ctx) TCGv addr = tcg_temp_new_i32(); tcg_gen_add_i32(addr, REG(B7_4), REG(0)); if (ctx->tbflags & FPSCR_SZ) { - int fr = XREG(B11_8); - tcg_gen_qemu_ld_i32(cpu_fregs[fr], addr, - ctx->memidx, MO_TEUL); - tcg_gen_addi_i32(addr, addr, 4); - tcg_gen_qemu_ld_i32(cpu_fregs[fr+1], addr, - ctx->memidx, MO_TEUL); + TCGv_i64 fp = tcg_temp_new_i64(); + tcg_gen_qemu_ld_i64(fp, addr, ctx->memidx, MO_TEQ); + gen_store_fpr64(ctx, fp, XHACK(B11_8)); + tcg_temp_free_i64(fp); } else { - tcg_gen_qemu_ld_i32(cpu_fregs[FREG(B11_8)], addr, - ctx->memidx, MO_TEUL); + tcg_gen_qemu_ld_i32(FREG(B11_8), addr, ctx->memidx, MO_TEUL); } tcg_temp_free(addr); } @@ -1033,15 +1079,12 @@ static void _decode_opc(DisasContext * ctx) TCGv addr = tcg_temp_new(); tcg_gen_add_i32(addr, REG(B11_8), REG(0)); if (ctx->tbflags & FPSCR_SZ) { - int fr = XREG(B7_4); - tcg_gen_qemu_ld_i32(cpu_fregs[fr], addr, - ctx->memidx, MO_TEUL); - tcg_gen_addi_i32(addr, addr, 4); - tcg_gen_qemu_ld_i32(cpu_fregs[fr+1], addr, - ctx->memidx, MO_TEUL); + TCGv_i64 fp = tcg_temp_new_i64(); + gen_load_fpr64(ctx, fp, XHACK(B7_4)); + tcg_gen_qemu_st_i64(fp, addr, ctx->memidx, MO_TEQ); + tcg_temp_free_i64(fp); } else { - tcg_gen_qemu_st_i32(cpu_fregs[FREG(B7_4)], addr, - ctx->memidx, MO_TEUL); + tcg_gen_qemu_st_i32(FREG(B7_4), addr, ctx->memidx, MO_TEUL); } tcg_temp_free(addr); } @@ -1057,12 +1100,13 @@ static void _decode_opc(DisasContext * ctx) if (ctx->tbflags & FPSCR_PR) { TCGv_i64 fp0, fp1; - if (ctx->opcode & 0x0110) - break; /* illegal instruction */ + if (ctx->opcode & 0x0110) { + goto do_illegal; + } fp0 = tcg_temp_new_i64(); fp1 = tcg_temp_new_i64(); - gen_load_fpr64(fp0, DREG(B11_8)); - gen_load_fpr64(fp1, DREG(B7_4)); + gen_load_fpr64(ctx, fp0, B11_8); + gen_load_fpr64(ctx, fp1, B7_4); switch (ctx->opcode & 0xf00f) { case 0xf000: /* fadd Rm,Rn */ gen_helper_fadd_DT(fp0, cpu_env, fp0, fp1); @@ -1077,61 +1121,51 @@ static void _decode_opc(DisasContext * ctx) gen_helper_fdiv_DT(fp0, cpu_env, fp0, fp1); break; case 0xf004: /* fcmp/eq Rm,Rn */ - gen_helper_fcmp_eq_DT(cpu_env, fp0, fp1); + gen_helper_fcmp_eq_DT(cpu_sr_t, cpu_env, fp0, fp1); return; case 0xf005: /* fcmp/gt Rm,Rn */ - gen_helper_fcmp_gt_DT(cpu_env, fp0, fp1); + gen_helper_fcmp_gt_DT(cpu_sr_t, cpu_env, fp0, fp1); return; } - gen_store_fpr64(fp0, DREG(B11_8)); + gen_store_fpr64(ctx, fp0, B11_8); tcg_temp_free_i64(fp0); tcg_temp_free_i64(fp1); } else { switch (ctx->opcode & 0xf00f) { case 0xf000: /* fadd Rm,Rn */ - gen_helper_fadd_FT(cpu_fregs[FREG(B11_8)], cpu_env, - cpu_fregs[FREG(B11_8)], - cpu_fregs[FREG(B7_4)]); + gen_helper_fadd_FT(FREG(B11_8), cpu_env, + FREG(B11_8), FREG(B7_4)); break; case 0xf001: /* fsub Rm,Rn */ - gen_helper_fsub_FT(cpu_fregs[FREG(B11_8)], cpu_env, - cpu_fregs[FREG(B11_8)], - cpu_fregs[FREG(B7_4)]); + gen_helper_fsub_FT(FREG(B11_8), cpu_env, + FREG(B11_8), FREG(B7_4)); break; case 0xf002: /* fmul Rm,Rn */ - gen_helper_fmul_FT(cpu_fregs[FREG(B11_8)], cpu_env, - cpu_fregs[FREG(B11_8)], - cpu_fregs[FREG(B7_4)]); + gen_helper_fmul_FT(FREG(B11_8), cpu_env, + FREG(B11_8), FREG(B7_4)); break; case 0xf003: /* fdiv Rm,Rn */ - gen_helper_fdiv_FT(cpu_fregs[FREG(B11_8)], cpu_env, - cpu_fregs[FREG(B11_8)], - cpu_fregs[FREG(B7_4)]); + gen_helper_fdiv_FT(FREG(B11_8), cpu_env, + FREG(B11_8), FREG(B7_4)); break; case 0xf004: /* fcmp/eq Rm,Rn */ - gen_helper_fcmp_eq_FT(cpu_env, cpu_fregs[FREG(B11_8)], - cpu_fregs[FREG(B7_4)]); + gen_helper_fcmp_eq_FT(cpu_sr_t, cpu_env, + FREG(B11_8), FREG(B7_4)); return; case 0xf005: /* fcmp/gt Rm,Rn */ - gen_helper_fcmp_gt_FT(cpu_env, cpu_fregs[FREG(B11_8)], - cpu_fregs[FREG(B7_4)]); + gen_helper_fcmp_gt_FT(cpu_sr_t, cpu_env, + FREG(B11_8), FREG(B7_4)); return; } } } return; case 0xf00e: /* fmac FR0,RM,Rn */ - { - CHECK_FPU_ENABLED - if (ctx->tbflags & FPSCR_PR) { - break; /* illegal instruction */ - } else { - gen_helper_fmac_FT(cpu_fregs[FREG(B11_8)], cpu_env, - cpu_fregs[FREG(0)], cpu_fregs[FREG(B7_4)], - cpu_fregs[FREG(B11_8)]); - return; - } - } + CHECK_FPU_ENABLED + CHECK_FPSCR_PR_0 + gen_helper_fmac_FT(FREG(B11_8), cpu_env, + FREG(0), FREG(B7_4), FREG(B11_8)); + return; } switch (ctx->opcode & 0xff00) { @@ -1153,7 +1187,7 @@ static void _decode_opc(DisasContext * ctx) return; case 0x8b00: /* bf label */ CHECK_NOT_DELAY_SLOT - gen_conditional_jump(ctx, ctx->pc + 2, ctx->pc + 4 + B7_0s * 2); + gen_conditional_jump(ctx, ctx->pc + 4 + B7_0s * 2, false); return; case 0x8f00: /* bf/s label */ CHECK_NOT_DELAY_SLOT @@ -1163,7 +1197,7 @@ static void _decode_opc(DisasContext * ctx) return; case 0x8900: /* bt label */ CHECK_NOT_DELAY_SLOT - gen_conditional_jump(ctx, ctx->pc + 4 + B7_0s * 2, ctx->pc + 2); + gen_conditional_jump(ctx, ctx->pc + 4 + B7_0s * 2, true); return; case 0x8d00: /* bt/s label */ CHECK_NOT_DELAY_SLOT @@ -1455,7 +1489,7 @@ static void _decode_opc(DisasContext * ctx) LDST(ssr, 0x403e, 0x4037, 0x0032, 0x4033, CHECK_PRIVILEGED) LDST(spc, 0x404e, 0x4047, 0x0042, 0x4043, CHECK_PRIVILEGED) ST(sgr, 0x003a, 0x4032, CHECK_PRIVILEGED) - LD(sgr, 0x403a, 0x4036, CHECK_PRIVILEGED if (!(ctx->features & SH_FEATURE_SH4A)) break;) + LD(sgr, 0x403a, 0x4036, CHECK_PRIVILEGED CHECK_SH4A) LDST(dbr, 0x40fa, 0x40f6, 0x00fa, 0x40f2, CHECK_PRIVILEGED) LDST(mach, 0x400a, 0x4006, 0x000a, 0x4002, {}) LDST(macl, 0x401a, 0x4016, 0x001a, 0x4012, {}) @@ -1505,21 +1539,19 @@ static void _decode_opc(DisasContext * ctx) ctx->has_movcal = 1; return; case 0x40a9: /* movua.l @Rm,R0 */ + CHECK_SH4A /* Load non-boundary-aligned data */ - if (ctx->features & SH_FEATURE_SH4A) { - tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, - MO_TEUL | MO_UNALN); - return; - } + tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, + MO_TEUL | MO_UNALN); + return; break; case 0x40e9: /* movua.l @Rm+,R0 */ + CHECK_SH4A /* Load non-boundary-aligned data */ - if (ctx->features & SH_FEATURE_SH4A) { - tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, - MO_TEUL | MO_UNALN); - tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); - return; - } + tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, + MO_TEUL | MO_UNALN); + tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); + return; break; case 0x0029: /* movt Rn */ tcg_gen_mov_i32(REG(B11_8), cpu_sr_t); @@ -1530,7 +1562,8 @@ static void _decode_opc(DisasContext * ctx) If (T == 1) R0 -> (Rn) 0 -> LDST */ - if (ctx->features & SH_FEATURE_SH4A) { + CHECK_SH4A + { TCGLabel *label = gen_new_label(); tcg_gen_mov_i32(cpu_sr_t, cpu_ldst); tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ldst, 0, label); @@ -1538,8 +1571,7 @@ static void _decode_opc(DisasContext * ctx) gen_set_label(label); tcg_gen_movi_i32(cpu_ldst, 0); return; - } else - break; + } case 0x0063: /* MOVLI.L @Rm,R0 1 -> LDST @@ -1547,13 +1579,11 @@ static void _decode_opc(DisasContext * ctx) When interrupt/exception occurred 0 -> LDST */ - if (ctx->features & SH_FEATURE_SH4A) { - tcg_gen_movi_i32(cpu_ldst, 0); - tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, MO_TESL); - tcg_gen_movi_i32(cpu_ldst, 1); - return; - } else - break; + CHECK_SH4A + tcg_gen_movi_i32(cpu_ldst, 0); + tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, MO_TESL); + tcg_gen_movi_i32(cpu_ldst, 1); + return; case 0x0093: /* ocbi @Rn */ { gen_helper_ocbi(cpu_env, REG(B11_8)); @@ -1568,20 +1598,15 @@ static void _decode_opc(DisasContext * ctx) case 0x0083: /* pref @Rn */ return; case 0x00d3: /* prefi @Rn */ - if (ctx->features & SH_FEATURE_SH4A) - return; - else - break; + CHECK_SH4A + return; case 0x00e3: /* icbi @Rn */ - if (ctx->features & SH_FEATURE_SH4A) - return; - else - break; + CHECK_SH4A + return; case 0x00ab: /* synco */ - if (ctx->features & SH_FEATURE_SH4A) { - tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC); - return; - } + CHECK_SH4A + tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC); + return; break; case 0x4024: /* rotcl Rn */ { @@ -1653,98 +1678,88 @@ static void _decode_opc(DisasContext * ctx) return; case 0xf00d: /* fsts FPUL,FRn - FPSCR: Nothing */ CHECK_FPU_ENABLED - tcg_gen_mov_i32(cpu_fregs[FREG(B11_8)], cpu_fpul); + tcg_gen_mov_i32(FREG(B11_8), cpu_fpul); return; case 0xf01d: /* flds FRm,FPUL - FPSCR: Nothing */ CHECK_FPU_ENABLED - tcg_gen_mov_i32(cpu_fpul, cpu_fregs[FREG(B11_8)]); + tcg_gen_mov_i32(cpu_fpul, FREG(B11_8)); return; case 0xf02d: /* float FPUL,FRn/DRn - FPSCR: R[PR,Enable.I]/W[Cause,Flag] */ CHECK_FPU_ENABLED if (ctx->tbflags & FPSCR_PR) { TCGv_i64 fp; - if (ctx->opcode & 0x0100) - break; /* illegal instruction */ + if (ctx->opcode & 0x0100) { + goto do_illegal; + } fp = tcg_temp_new_i64(); gen_helper_float_DT(fp, cpu_env, cpu_fpul); - gen_store_fpr64(fp, DREG(B11_8)); + gen_store_fpr64(ctx, fp, B11_8); tcg_temp_free_i64(fp); } else { - gen_helper_float_FT(cpu_fregs[FREG(B11_8)], cpu_env, cpu_fpul); + gen_helper_float_FT(FREG(B11_8), cpu_env, cpu_fpul); } return; case 0xf03d: /* ftrc FRm/DRm,FPUL - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */ CHECK_FPU_ENABLED if (ctx->tbflags & FPSCR_PR) { TCGv_i64 fp; - if (ctx->opcode & 0x0100) - break; /* illegal instruction */ + if (ctx->opcode & 0x0100) { + goto do_illegal; + } fp = tcg_temp_new_i64(); - gen_load_fpr64(fp, DREG(B11_8)); + gen_load_fpr64(ctx, fp, B11_8); gen_helper_ftrc_DT(cpu_fpul, cpu_env, fp); tcg_temp_free_i64(fp); } else { - gen_helper_ftrc_FT(cpu_fpul, cpu_env, cpu_fregs[FREG(B11_8)]); + gen_helper_ftrc_FT(cpu_fpul, cpu_env, FREG(B11_8)); } return; case 0xf04d: /* fneg FRn/DRn - FPSCR: Nothing */ CHECK_FPU_ENABLED - { - gen_helper_fneg_T(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)]); - } + tcg_gen_xori_i32(FREG(B11_8), FREG(B11_8), 0x80000000); return; - case 0xf05d: /* fabs FRn/DRn */ + case 0xf05d: /* fabs FRn/DRn - FPCSR: Nothing */ CHECK_FPU_ENABLED - if (ctx->tbflags & FPSCR_PR) { - if (ctx->opcode & 0x0100) - break; /* illegal instruction */ - TCGv_i64 fp = tcg_temp_new_i64(); - gen_load_fpr64(fp, DREG(B11_8)); - gen_helper_fabs_DT(fp, fp); - gen_store_fpr64(fp, DREG(B11_8)); - tcg_temp_free_i64(fp); - } else { - gen_helper_fabs_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)]); - } + tcg_gen_andi_i32(FREG(B11_8), FREG(B11_8), 0x7fffffff); return; case 0xf06d: /* fsqrt FRn */ CHECK_FPU_ENABLED if (ctx->tbflags & FPSCR_PR) { - if (ctx->opcode & 0x0100) - break; /* illegal instruction */ + if (ctx->opcode & 0x0100) { + goto do_illegal; + } TCGv_i64 fp = tcg_temp_new_i64(); - gen_load_fpr64(fp, DREG(B11_8)); + gen_load_fpr64(ctx, fp, B11_8); gen_helper_fsqrt_DT(fp, cpu_env, fp); - gen_store_fpr64(fp, DREG(B11_8)); + gen_store_fpr64(ctx, fp, B11_8); tcg_temp_free_i64(fp); } else { - gen_helper_fsqrt_FT(cpu_fregs[FREG(B11_8)], cpu_env, - cpu_fregs[FREG(B11_8)]); + gen_helper_fsqrt_FT(FREG(B11_8), cpu_env, FREG(B11_8)); } return; case 0xf07d: /* fsrra FRn */ CHECK_FPU_ENABLED + CHECK_FPSCR_PR_0 + gen_helper_fsrra_FT(FREG(B11_8), cpu_env, FREG(B11_8)); break; case 0xf08d: /* fldi0 FRn - FPSCR: R[PR] */ CHECK_FPU_ENABLED - if (!(ctx->tbflags & FPSCR_PR)) { - tcg_gen_movi_i32(cpu_fregs[FREG(B11_8)], 0); - } - return; + CHECK_FPSCR_PR_0 + tcg_gen_movi_i32(FREG(B11_8), 0); + return; case 0xf09d: /* fldi1 FRn - FPSCR: R[PR] */ CHECK_FPU_ENABLED - if (!(ctx->tbflags & FPSCR_PR)) { - tcg_gen_movi_i32(cpu_fregs[FREG(B11_8)], 0x3f800000); - } - return; + CHECK_FPSCR_PR_0 + tcg_gen_movi_i32(FREG(B11_8), 0x3f800000); + return; case 0xf0ad: /* fcnvsd FPUL,DRn */ CHECK_FPU_ENABLED { TCGv_i64 fp = tcg_temp_new_i64(); gen_helper_fcnvsd_FT_DT(fp, cpu_env, cpu_fpul); - gen_store_fpr64(fp, DREG(B11_8)); + gen_store_fpr64(ctx, fp, B11_8); tcg_temp_free_i64(fp); } return; @@ -1752,17 +1767,17 @@ static void _decode_opc(DisasContext * ctx) CHECK_FPU_ENABLED { TCGv_i64 fp = tcg_temp_new_i64(); - gen_load_fpr64(fp, DREG(B11_8)); + gen_load_fpr64(ctx, fp, B11_8); gen_helper_fcnvds_DT_FT(cpu_fpul, cpu_env, fp); tcg_temp_free_i64(fp); } return; case 0xf0ed: /* fipr FVm,FVn */ CHECK_FPU_ENABLED - if ((ctx->tbflags & FPSCR_PR) == 0) { - TCGv m, n; - m = tcg_const_i32((ctx->opcode >> 8) & 3); - n = tcg_const_i32((ctx->opcode >> 10) & 3); + CHECK_FPSCR_PR_1 + { + TCGv m = tcg_const_i32((ctx->opcode >> 8) & 3); + TCGv n = tcg_const_i32((ctx->opcode >> 10) & 3); gen_helper_fipr(cpu_env, m, n); tcg_temp_free(m); tcg_temp_free(n); @@ -1771,10 +1786,12 @@ static void _decode_opc(DisasContext * ctx) break; case 0xf0fd: /* ftrv XMTRX,FVn */ CHECK_FPU_ENABLED - if ((ctx->opcode & 0x0300) == 0x0100 && - (ctx->tbflags & FPSCR_PR) == 0) { - TCGv n; - n = tcg_const_i32((ctx->opcode >> 10) & 3); + CHECK_FPSCR_PR_1 + { + if ((ctx->opcode & 0x0300) != 0x0100) { + goto do_illegal; + } + TCGv n = tcg_const_i32((ctx->opcode >> 10) & 3); gen_helper_ftrv(cpu_env, n); tcg_temp_free(n); return; @@ -1786,13 +1803,27 @@ static void _decode_opc(DisasContext * ctx) ctx->opcode, ctx->pc); fflush(stderr); #endif - gen_save_cpu_state(ctx, true); + do_illegal: if (ctx->envflags & DELAY_SLOT_MASK) { + do_illegal_slot: + gen_save_cpu_state(ctx, true); gen_helper_raise_slot_illegal_instruction(cpu_env); } else { + gen_save_cpu_state(ctx, true); gen_helper_raise_illegal_instruction(cpu_env); } ctx->bstate = BS_EXCP; + return; + + do_fpu_disabled: + gen_save_cpu_state(ctx, true); + if (ctx->envflags & DELAY_SLOT_MASK) { + gen_helper_raise_slot_fpu_disable(cpu_env); + } else { + gen_helper_raise_fpu_disable(cpu_env); + } + ctx->bstate = BS_EXCP; + return; } static void decode_opc(DisasContext * ctx) @@ -1804,6 +1835,18 @@ static void decode_opc(DisasContext * ctx) if (old_flags & DELAY_SLOT_MASK) { /* go out of the delay slot */ ctx->envflags &= ~DELAY_SLOT_MASK; + + /* When in an exclusive region, we must continue to the end + for conditional branches. */ + if (ctx->tbflags & GUSA_EXCLUSIVE + && old_flags & DELAY_SLOT_CONDITIONAL) { + gen_delayed_conditional_jump(ctx); + return; + } + /* Otherwise this is probably an invalid gUSA region. + Drop the GUSA bits so the next TB doesn't see them. */ + ctx->envflags &= ~GUSA_MASK; + tcg_gen_movi_i32(cpu_flags, ctx->envflags); ctx->bstate = BS_BRANCH; if (old_flags & DELAY_SLOT_CONDITIONAL) { @@ -1811,9 +1854,381 @@ static void decode_opc(DisasContext * ctx) } else { gen_jump(ctx); } + } +} + +#ifdef CONFIG_USER_ONLY +/* For uniprocessors, SH4 uses optimistic restartable atomic sequences. + Upon an interrupt, a real kernel would simply notice magic values in + the registers and reset the PC to the start of the sequence. + + For QEMU, we cannot do this in quite the same way. Instead, we notice + the normal start of such a sequence (mov #-x,r15). While we can handle + any sequence via cpu_exec_step_atomic, we can recognize the "normal" + sequences and transform them into atomic operations as seen by the host. +*/ +static int decode_gusa(DisasContext *ctx, CPUSH4State *env, int *pmax_insns) +{ + uint16_t insns[5]; + int ld_adr, ld_dst, ld_mop; + int op_dst, op_src, op_opc; + int mv_src, mt_dst, st_src, st_mop; + TCGv op_arg; + + uint32_t pc = ctx->pc; + uint32_t pc_end = ctx->tb->cs_base; + int backup = sextract32(ctx->tbflags, GUSA_SHIFT, 8); + int max_insns = (pc_end - pc) / 2; + int i; + + if (pc != pc_end + backup || max_insns < 2) { + /* This is a malformed gUSA region. Don't do anything special, + since the interpreter is likely to get confused. */ + ctx->envflags &= ~GUSA_MASK; + return 0; + } + if (ctx->tbflags & GUSA_EXCLUSIVE) { + /* Regardless of single-stepping or the end of the page, + we must complete execution of the gUSA region while + holding the exclusive lock. */ + *pmax_insns = max_insns; + return 0; } + + /* The state machine below will consume only a few insns. + If there are more than that in a region, fail now. */ + if (max_insns > ARRAY_SIZE(insns)) { + goto fail; + } + + /* Read all of the insns for the region. */ + for (i = 0; i < max_insns; ++i) { + insns[i] = cpu_lduw_code(env, pc + i * 2); + } + + ld_adr = ld_dst = ld_mop = -1; + mv_src = -1; + op_dst = op_src = op_opc = -1; + mt_dst = -1; + st_src = st_mop = -1; + TCGV_UNUSED(op_arg); + i = 0; + +#define NEXT_INSN \ + do { if (i >= max_insns) goto fail; ctx->opcode = insns[i++]; } while (0) + + /* + * Expect a load to begin the region. + */ + NEXT_INSN; + switch (ctx->opcode & 0xf00f) { + case 0x6000: /* mov.b @Rm,Rn */ + ld_mop = MO_SB; + break; + case 0x6001: /* mov.w @Rm,Rn */ + ld_mop = MO_TESW; + break; + case 0x6002: /* mov.l @Rm,Rn */ + ld_mop = MO_TESL; + break; + default: + goto fail; + } + ld_adr = B7_4; + ld_dst = B11_8; + if (ld_adr == ld_dst) { + goto fail; + } + /* Unless we see a mov, any two-operand operation must use ld_dst. */ + op_dst = ld_dst; + + /* + * Expect an optional register move. + */ + NEXT_INSN; + switch (ctx->opcode & 0xf00f) { + case 0x6003: /* mov Rm,Rn */ + /* Here we want to recognize ld_dst being saved for later consumtion, + or for another input register being copied so that ld_dst need not + be clobbered during the operation. */ + op_dst = B11_8; + mv_src = B7_4; + if (op_dst == ld_dst) { + /* Overwriting the load output. */ + goto fail; + } + if (mv_src != ld_dst) { + /* Copying a new input; constrain op_src to match the load. */ + op_src = ld_dst; + } + break; + + default: + /* Put back and re-examine as operation. */ + --i; + } + + /* + * Expect the operation. + */ + NEXT_INSN; + switch (ctx->opcode & 0xf00f) { + case 0x300c: /* add Rm,Rn */ + op_opc = INDEX_op_add_i32; + goto do_reg_op; + case 0x2009: /* and Rm,Rn */ + op_opc = INDEX_op_and_i32; + goto do_reg_op; + case 0x200a: /* xor Rm,Rn */ + op_opc = INDEX_op_xor_i32; + goto do_reg_op; + case 0x200b: /* or Rm,Rn */ + op_opc = INDEX_op_or_i32; + do_reg_op: + /* The operation register should be as expected, and the + other input cannot depend on the load. */ + if (op_dst != B11_8) { + goto fail; + } + if (op_src < 0) { + /* Unconstrainted input. */ + op_src = B7_4; + } else if (op_src == B7_4) { + /* Constrained input matched load. All operations are + commutative; "swap" them by "moving" the load output + to the (implicit) first argument and the move source + to the (explicit) second argument. */ + op_src = mv_src; + } else { + goto fail; + } + op_arg = REG(op_src); + break; + + case 0x6007: /* not Rm,Rn */ + if (ld_dst != B7_4 || mv_src >= 0) { + goto fail; + } + op_dst = B11_8; + op_opc = INDEX_op_xor_i32; + op_arg = tcg_const_i32(-1); + break; + + case 0x7000 ... 0x700f: /* add #imm,Rn */ + if (op_dst != B11_8 || mv_src >= 0) { + goto fail; + } + op_opc = INDEX_op_add_i32; + op_arg = tcg_const_i32(B7_0s); + break; + + case 0x3000: /* cmp/eq Rm,Rn */ + /* Looking for the middle of a compare-and-swap sequence, + beginning with the compare. Operands can be either order, + but with only one overlapping the load. */ + if ((ld_dst == B11_8) + (ld_dst == B7_4) != 1 || mv_src >= 0) { + goto fail; + } + op_opc = INDEX_op_setcond_i32; /* placeholder */ + op_src = (ld_dst == B11_8 ? B7_4 : B11_8); + op_arg = REG(op_src); + + NEXT_INSN; + switch (ctx->opcode & 0xff00) { + case 0x8b00: /* bf label */ + case 0x8f00: /* bf/s label */ + if (pc + (i + 1 + B7_0s) * 2 != pc_end) { + goto fail; + } + if ((ctx->opcode & 0xff00) == 0x8b00) { /* bf label */ + break; + } + /* We're looking to unconditionally modify Rn with the + result of the comparison, within the delay slot of + the branch. This is used by older gcc. */ + NEXT_INSN; + if ((ctx->opcode & 0xf0ff) == 0x0029) { /* movt Rn */ + mt_dst = B11_8; + } else { + goto fail; + } + break; + + default: + goto fail; + } + break; + + case 0x2008: /* tst Rm,Rn */ + /* Looking for a compare-and-swap against zero. */ + if (ld_dst != B11_8 || ld_dst != B7_4 || mv_src >= 0) { + goto fail; + } + op_opc = INDEX_op_setcond_i32; + op_arg = tcg_const_i32(0); + + NEXT_INSN; + if ((ctx->opcode & 0xff00) != 0x8900 /* bt label */ + || pc + (i + 1 + B7_0s) * 2 != pc_end) { + goto fail; + } + break; + + default: + /* Put back and re-examine as store. */ + --i; + } + + /* + * Expect the store. + */ + /* The store must be the last insn. */ + if (i != max_insns - 1) { + goto fail; + } + NEXT_INSN; + switch (ctx->opcode & 0xf00f) { + case 0x2000: /* mov.b Rm,@Rn */ + st_mop = MO_UB; + break; + case 0x2001: /* mov.w Rm,@Rn */ + st_mop = MO_UW; + break; + case 0x2002: /* mov.l Rm,@Rn */ + st_mop = MO_UL; + break; + default: + goto fail; + } + /* The store must match the load. */ + if (ld_adr != B11_8 || st_mop != (ld_mop & MO_SIZE)) { + goto fail; + } + st_src = B7_4; + +#undef NEXT_INSN + + /* + * Emit the operation. + */ + tcg_gen_insn_start(pc, ctx->envflags); + switch (op_opc) { + case -1: + /* No operation found. Look for exchange pattern. */ + if (st_src == ld_dst || mv_src >= 0) { + goto fail; + } + tcg_gen_atomic_xchg_i32(REG(ld_dst), REG(ld_adr), REG(st_src), + ctx->memidx, ld_mop); + break; + + case INDEX_op_add_i32: + if (op_dst != st_src) { + goto fail; + } + if (op_dst == ld_dst && st_mop == MO_UL) { + tcg_gen_atomic_add_fetch_i32(REG(ld_dst), REG(ld_adr), + op_arg, ctx->memidx, ld_mop); + } else { + tcg_gen_atomic_fetch_add_i32(REG(ld_dst), REG(ld_adr), + op_arg, ctx->memidx, ld_mop); + if (op_dst != ld_dst) { + /* Note that mop sizes < 4 cannot use add_fetch + because it won't carry into the higher bits. */ + tcg_gen_add_i32(REG(op_dst), REG(ld_dst), op_arg); + } + } + break; + + case INDEX_op_and_i32: + if (op_dst != st_src) { + goto fail; + } + if (op_dst == ld_dst) { + tcg_gen_atomic_and_fetch_i32(REG(ld_dst), REG(ld_adr), + op_arg, ctx->memidx, ld_mop); + } else { + tcg_gen_atomic_fetch_and_i32(REG(ld_dst), REG(ld_adr), + op_arg, ctx->memidx, ld_mop); + tcg_gen_and_i32(REG(op_dst), REG(ld_dst), op_arg); + } + break; + + case INDEX_op_or_i32: + if (op_dst != st_src) { + goto fail; + } + if (op_dst == ld_dst) { + tcg_gen_atomic_or_fetch_i32(REG(ld_dst), REG(ld_adr), + op_arg, ctx->memidx, ld_mop); + } else { + tcg_gen_atomic_fetch_or_i32(REG(ld_dst), REG(ld_adr), + op_arg, ctx->memidx, ld_mop); + tcg_gen_or_i32(REG(op_dst), REG(ld_dst), op_arg); + } + break; + + case INDEX_op_xor_i32: + if (op_dst != st_src) { + goto fail; + } + if (op_dst == ld_dst) { + tcg_gen_atomic_xor_fetch_i32(REG(ld_dst), REG(ld_adr), + op_arg, ctx->memidx, ld_mop); + } else { + tcg_gen_atomic_fetch_xor_i32(REG(ld_dst), REG(ld_adr), + op_arg, ctx->memidx, ld_mop); + tcg_gen_xor_i32(REG(op_dst), REG(ld_dst), op_arg); + } + break; + + case INDEX_op_setcond_i32: + if (st_src == ld_dst) { + goto fail; + } + tcg_gen_atomic_cmpxchg_i32(REG(ld_dst), REG(ld_adr), op_arg, + REG(st_src), ctx->memidx, ld_mop); + tcg_gen_setcond_i32(TCG_COND_EQ, cpu_sr_t, REG(ld_dst), op_arg); + if (mt_dst >= 0) { + tcg_gen_mov_i32(REG(mt_dst), cpu_sr_t); + } + break; + + default: + g_assert_not_reached(); + } + + /* If op_src is not a valid register, then op_arg was a constant. */ + if (op_src < 0) { + tcg_temp_free_i32(op_arg); + } + + /* The entire region has been translated. */ + ctx->envflags &= ~GUSA_MASK; + ctx->pc = pc_end; + return max_insns; + + fail: + qemu_log_mask(LOG_UNIMP, "Unrecognized gUSA sequence %08x-%08x\n", + pc, pc_end); + + /* Restart with the EXCLUSIVE bit set, within a TB run via + cpu_exec_step_atomic holding the exclusive lock. */ + tcg_gen_insn_start(pc, ctx->envflags); + ctx->envflags |= GUSA_EXCLUSIVE; + gen_save_cpu_state(ctx, false); + gen_helper_exclusive(cpu_env); + ctx->bstate = BS_EXCP; + + /* We're not executing an instruction, but we must report one for the + purposes of accounting within the TB. We might as well report the + entire region consumed via ctx->pc so that it's immediately available + in the disassembly dump. */ + ctx->pc = pc_end; + return 1; } +#endif void gen_intermediate_code(CPUSH4State * env, struct TranslationBlock *tb) { @@ -1827,7 +2242,7 @@ void gen_intermediate_code(CPUSH4State * env, struct TranslationBlock *tb) pc_start = tb->pc; ctx.pc = pc_start; ctx.tbflags = (uint32_t)tb->flags; - ctx.envflags = tb->flags & DELAY_SLOT_MASK; + ctx.envflags = tb->flags & TB_FLAG_ENVFLAGS_MASK; ctx.bstate = BS_NONE; ctx.memidx = (ctx.tbflags & (1u << SR_MD)) == 0 ? 1 : 0; /* We don't know if the delayed pc came from a dynamic or static branch, @@ -1837,18 +2252,38 @@ void gen_intermediate_code(CPUSH4State * env, struct TranslationBlock *tb) ctx.singlestep_enabled = cs->singlestep_enabled; ctx.features = env->features; ctx.has_movcal = (ctx.tbflags & TB_FLAG_PENDING_MOVCA); + ctx.gbank = ((ctx.tbflags & (1 << SR_MD)) && + (ctx.tbflags & (1 << SR_RB))) * 0x10; + ctx.fbank = ctx.tbflags & FPSCR_FR ? 0x10 : 0; - num_insns = 0; max_insns = tb->cflags & CF_COUNT_MASK; if (max_insns == 0) { max_insns = CF_COUNT_MASK; } - if (max_insns > TCG_MAX_INSNS) { - max_insns = TCG_MAX_INSNS; + max_insns = MIN(max_insns, TCG_MAX_INSNS); + + /* Since the ISA is fixed-width, we can bound by the number + of instructions remaining on the page. */ + num_insns = -(ctx.pc | TARGET_PAGE_MASK) / 2; + max_insns = MIN(max_insns, num_insns); + + /* Single stepping means just that. */ + if (ctx.singlestep_enabled || singlestep) { + max_insns = 1; } gen_tb_start(tb); - while (ctx.bstate == BS_NONE && !tcg_op_buf_full()) { + num_insns = 0; + +#ifdef CONFIG_USER_ONLY + if (ctx.tbflags & GUSA_MASK) { + num_insns = decode_gusa(&ctx, env, &max_insns); + } +#endif + + while (ctx.bstate == BS_NONE + && num_insns < max_insns + && !tcg_op_buf_full()) { tcg_gen_insn_start(ctx.pc, ctx.envflags); num_insns++; @@ -1872,18 +2307,16 @@ void gen_intermediate_code(CPUSH4State * env, struct TranslationBlock *tb) ctx.opcode = cpu_lduw_code(env, ctx.pc); decode_opc(&ctx); ctx.pc += 2; - if ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0) - break; - if (cs->singlestep_enabled) { - break; - } - if (num_insns >= max_insns) - break; - if (singlestep) - break; } - if (tb->cflags & CF_LAST_IO) + if (tb->cflags & CF_LAST_IO) { gen_io_end(); + } + + if (ctx.tbflags & GUSA_EXCLUSIVE) { + /* Ending the region of exclusivity. Clear the bits. */ + ctx.envflags &= ~GUSA_MASK; + } + if (cs->singlestep_enabled) { gen_save_cpu_state(&ctx, true); gen_helper_debug(cpu_env); diff --git a/tests/ahci-test.c b/tests/ahci-test.c index ef17629345..999121bb7c 100644 --- a/tests/ahci-test.c +++ b/tests/ahci-test.c @@ -1132,9 +1132,9 @@ static void test_migrate_sanity(void) AHCIQState *src, *dst; char *uri = g_strdup_printf("unix:%s", mig_socket); - src = ahci_boot("-m 1024 -M q35 " + src = ahci_boot("-m 384 -M q35 " "-drive if=ide,file=%s,format=%s ", tmp_path, imgfmt); - dst = ahci_boot("-m 1024 -M q35 " + dst = ahci_boot("-m 384 -M q35 " "-drive if=ide,file=%s,format=%s " "-incoming %s", tmp_path, imgfmt, uri); @@ -1157,10 +1157,10 @@ static void ahci_migrate_simple(uint8_t cmd_read, uint8_t cmd_write) unsigned char *rx = g_malloc0(bufsize); char *uri = g_strdup_printf("unix:%s", mig_socket); - src = ahci_boot_and_enable("-m 1024 -M q35 " + src = ahci_boot_and_enable("-m 384 -M q35 " "-drive if=ide,format=%s,file=%s ", imgfmt, tmp_path); - dst = ahci_boot("-m 1024 -M q35 " + dst = ahci_boot("-m 384 -M q35 " "-drive if=ide,format=%s,file=%s " "-incoming %s", imgfmt, tmp_path, uri); diff --git a/tests/qemu-iotests/067 b/tests/qemu-iotests/067 index 38d23fce6b..5d4ca4bc61 100755 --- a/tests/qemu-iotests/067 +++ b/tests/qemu-iotests/067 @@ -137,6 +137,19 @@ run_qemu <<EOF { "execute": "quit" } EOF +echo +echo === Empty drive with -device and device_del === +echo + +run_qemu -device virtio-scsi-pci -device scsi-cd,id=cd0 <<EOF +{ "execute": "qmp_capabilities" } +{ "execute": "query-block" } +{ "execute": "device_del", "arguments": { "id": "cd0" } } +{ "execute": "system_reset" } +{ "execute": "query-block" } +{ "execute": "quit" } +EOF + # success, all done echo "*** done" rm -f $seq.full diff --git a/tests/qemu-iotests/067.out b/tests/qemu-iotests/067.out index 782eae27a0..bd70557ddc 100644 --- a/tests/qemu-iotests/067.out +++ b/tests/qemu-iotests/067.out @@ -57,6 +57,7 @@ Testing: -drive file=TEST_DIR/t.qcow2,format=qcow2,if=none,id=disk -device virti "file": "TEST_DIR/t.qcow2", "encryption_key_missing": false }, + "qdev": "/machine/peripheral/virtio0/virtio-backend", "type": "unknown" } ] @@ -415,4 +416,43 @@ Testing: "return": { } } + +=== Empty drive with -device and device_del === + +Testing: -device virtio-scsi-pci -device scsi-cd,id=cd0 +{ + QMP_VERSION +} +{ + "return": { + } +} +{ + "return": [ + { + "device": "", + "locked": false, + "removable": true, + "qdev": "cd0", + "tray_open": false, + "type": "unknown" + } + ] +} +{ + "return": { + } +} +{ + "return": { + } +} +{ + "return": [ + ] +} +{ + "return": { + } +} *** done diff --git a/tests/qemu-iotests/082 b/tests/qemu-iotests/082 index ad1d9fadc1..d5c83d45ed 100755 --- a/tests/qemu-iotests/082 +++ b/tests/qemu-iotests/082 @@ -85,8 +85,8 @@ run_qemu_img create -f $IMGFMT -o cluster_size=4k -o help "$TEST_IMG" $size run_qemu_img create -f $IMGFMT -o cluster_size=4k -o \? "$TEST_IMG" $size # Looks like a help option, but is part of the backing file name -run_qemu_img create -f $IMGFMT -o backing_file="$TEST_IMG",,help "$TEST_IMG" $size -run_qemu_img create -f $IMGFMT -o backing_file="$TEST_IMG",,\? "$TEST_IMG" $size +run_qemu_img create -f $IMGFMT -u -o backing_file="$TEST_IMG",,help "$TEST_IMG" $size +run_qemu_img create -f $IMGFMT -u -o backing_file="$TEST_IMG",,\? "$TEST_IMG" $size # Try to trick qemu-img into creating escaped commas run_qemu_img create -f $IMGFMT -o backing_file="$TEST_IMG", -o help "$TEST_IMG" $size diff --git a/tests/qemu-iotests/082.out b/tests/qemu-iotests/082.out index dbed67f2ba..1527fbe1b7 100644 --- a/tests/qemu-iotests/082.out +++ b/tests/qemu-iotests/082.out @@ -210,10 +210,10 @@ lazy_refcounts Postpone refcount updates refcount_bits Width of a reference count entry in bits nocow Turn off copy-on-write (valid only on btrfs) -Testing: create -f qcow2 -o backing_file=TEST_DIR/t.qcow2,,help TEST_DIR/t.qcow2 128M +Testing: create -f qcow2 -u -o backing_file=TEST_DIR/t.qcow2,,help TEST_DIR/t.qcow2 128M Formatting 'TEST_DIR/t.qcow2', fmt=qcow2 size=134217728 backing_file=TEST_DIR/t.qcow2,,help cluster_size=65536 lazy_refcounts=off refcount_bits=16 -Testing: create -f qcow2 -o backing_file=TEST_DIR/t.qcow2,,? TEST_DIR/t.qcow2 128M +Testing: create -f qcow2 -u -o backing_file=TEST_DIR/t.qcow2,,? TEST_DIR/t.qcow2 128M Formatting 'TEST_DIR/t.qcow2', fmt=qcow2 size=134217728 backing_file=TEST_DIR/t.qcow2,,? cluster_size=65536 lazy_refcounts=off refcount_bits=16 Testing: create -f qcow2 -o backing_file=TEST_DIR/t.qcow2, -o help TEST_DIR/t.qcow2 128M diff --git a/tests/qemu-iotests/085 b/tests/qemu-iotests/085 index b97adcd8db..71efe50d34 100755 --- a/tests/qemu-iotests/085 +++ b/tests/qemu-iotests/085 @@ -104,7 +104,7 @@ function add_snapshot_image() { base_image="${TEST_DIR}/$((${1}-1))-${snapshot_virt0}" snapshot_file="${TEST_DIR}/${1}-${snapshot_virt0}" - _make_test_img -b "${base_image}" "$size" + _make_test_img -u -b "${base_image}" "$size" mv "${TEST_IMG}" "${snapshot_file}" do_blockdev_add "$1" "'backing': '', " "${snapshot_file}" } diff --git a/tests/qemu-iotests/111.out b/tests/qemu-iotests/111.out index 683c01a679..5279c462fc 100644 --- a/tests/qemu-iotests/111.out +++ b/tests/qemu-iotests/111.out @@ -1,3 +1,4 @@ QA output created by 111 qemu-img: TEST_DIR/t.IMGFMT: Could not open 'TEST_DIR/t.IMGFMT.inexistent': No such file or directory +Could not open backing image to determine size. *** done diff --git a/tests/qemu-iotests/139 b/tests/qemu-iotests/139 index 175d8f0008..9ff51d9647 100644 --- a/tests/qemu-iotests/139 +++ b/tests/qemu-iotests/139 @@ -65,7 +65,7 @@ class TestBlockdevDel(iotests.QMPTestCase): # Add a BlockDriverState that will be used as overlay for the base_img BDS def addBlockDriverStateOverlay(self, node): self.checkBlockDriverState(node, False) - iotests.qemu_img('create', '-f', iotests.imgfmt, + iotests.qemu_img('create', '-u', '-f', iotests.imgfmt, '-b', base_img, new_img, '1M') opts = {'driver': iotests.imgfmt, 'node-name': node, diff --git a/tests/qemu-iotests/156 b/tests/qemu-iotests/156 index d799b73e1e..2c4a06e2d8 100755 --- a/tests/qemu-iotests/156 +++ b/tests/qemu-iotests/156 @@ -66,7 +66,7 @@ _send_qemu_cmd $QEMU_HANDLE \ 'return' # Create snapshot -TEST_IMG="$TEST_IMG.overlay" _make_test_img -b "$TEST_IMG" 1M +TEST_IMG="$TEST_IMG.overlay" _make_test_img -u -b "$TEST_IMG" 1M _send_qemu_cmd $QEMU_HANDLE \ "{ 'execute': 'blockdev-snapshot-sync', 'arguments': { 'device': 'source', diff --git a/tests/qemu-iotests/158 b/tests/qemu-iotests/158 index 823c12002e..24ac600a4a 100755 --- a/tests/qemu-iotests/158 +++ b/tests/qemu-iotests/158 @@ -66,7 +66,7 @@ echo "== verify pattern ==" $QEMU_IO --object $SECRET -c "read -P 0xa 0 $size" --image-opts $IMGSPECBASE | _filter_qemu_io | _filter_testdir echo "== create overlay ==" -_make_test_img --object $SECRET -o "encryption=on,encrypt.key-secret=sec0" -b "$TEST_IMG_BASE" $size +_make_test_img -u --object $SECRET -o "encryption=on,encrypt.key-secret=sec0" -b "$TEST_IMG_BASE" $size echo echo "== writing part of a cluster ==" diff --git a/tests/qemu-iotests/186 b/tests/qemu-iotests/186 new file mode 100755 index 0000000000..ab83ee402a --- /dev/null +++ b/tests/qemu-iotests/186 @@ -0,0 +1,147 @@ +#!/bin/bash +# +# Test 'info block' with all kinds of configurations +# +# Copyright (C) 2017 Red Hat, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. +# + +# creator +owner=kwolf@redhat.com + +seq=`basename $0` +echo "QA output created by $seq" + +here=`pwd` +status=1 # failure is the default! + +_cleanup() +{ + _cleanup_test_img +} +trap "_cleanup; exit \$status" 0 1 2 3 15 + +# get standard environment, filters and checks +. ./common.rc +. ./common.filter + +_supported_fmt qcow2 +_supported_proto file +_supported_os Linux + +if [ "$QEMU_DEFAULT_MACHINE" != "pc" ]; then + _notrun "Requires a PC machine" +fi + +function do_run_qemu() +{ + echo Testing: "$@" + + ( + if ! test -t 0; then + while read cmd; do + echo $cmd + done + fi + echo quit + ) | $QEMU -S -nodefaults -display none -device virtio-scsi-pci -monitor stdio "$@" + echo +} + +function check_info_block() +{ + echo "info block" | + QEMU_OPTIONS="" do_run_qemu "$@" | _filter_win32 | _filter_hmp | + _filter_qemu | _filter_generated_node_ids +} + + +size=64M +_make_test_img $size + +removable="floppy ide-cd scsi-cd" +fixed="ide-hd scsi-hd virtio-blk-pci" + +echo +echo "=== Empty drives ===" +echo + +for dev in $removable; do + check_info_block -device $dev + check_info_block -device $dev,id=qdev_id +done + +echo +echo "=== -blockdev/-device=<node-name> ===" +echo + +for dev in $fixed $removable; do + check_info_block -blockdev driver=null-co,node-name=null -device $dev,drive=null + check_info_block -blockdev driver=null-co,node-name=null -device $dev,drive=null,id=qdev_id +done + +echo +echo "=== -drive if=none/-device=<node-name> ===" +echo + +# This creates two BlockBackends that will show up in 'info block'! +# A monitor-owned one from -drive, and anonymous one from -device +for dev in $fixed $removable; do + check_info_block -drive if=none,driver=null-co,node-name=null -device $dev,drive=null,id=qdev_id +done + +echo +echo "=== -drive if=none/-device=<bb-name> (with medium) ===" +echo + +for dev in $fixed $removable; do + check_info_block -drive if=none,driver=null-co,node-name=null -device $dev,drive=none0 + check_info_block -drive if=none,driver=null-co,node-name=null -device $dev,drive=none0,id=qdev_id +done + +echo +echo "=== -drive if=none/-device=<bb-name> (without medium) ===" +echo + +check_info_block -drive if=none + +for dev in $removable; do + check_info_block -drive if=none -device $dev,drive=none0 + check_info_block -drive if=none -device $dev,drive=none0,id=qdev_id +done + +echo +echo "=== -drive if=... ===" +echo + +check_info_block -drive if=floppy +check_info_block -drive if=floppy,driver=null-co + +check_info_block -drive if=ide,driver=null-co +check_info_block -drive if=ide,media=cdrom +check_info_block -drive if=ide,driver=null-co,media=cdrom + +check_info_block -drive if=scsi,driver=null-co +check_info_block -drive if=scsi,media=cdrom +check_info_block -drive if=scsi,driver=null-co,media=cdrom + +check_info_block -drive if=virtio,driver=null-co + +check_info_block -drive if=pflash,driver=null-co,size=1M + +# success, all done +echo "*** done" +rm -f $seq.full +status=0 diff --git a/tests/qemu-iotests/186.out b/tests/qemu-iotests/186.out new file mode 100644 index 0000000000..b963b12d64 --- /dev/null +++ b/tests/qemu-iotests/186.out @@ -0,0 +1,489 @@ +QA output created by 186 +Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864 + +=== Empty drives === + +Testing: -device floppy +QEMU X.Y.Z monitor - type 'help' for more information +(qemu) info block +/machine/peripheral-anon/device[1]: [not inserted] + Attached to: /machine/peripheral-anon/device[1] + Removable device: not locked, tray closed +(qemu) quit + +Testing: -device floppy,id=qdev_id +QEMU X.Y.Z monitor - type 'help' for more information +(qemu) info block +qdev_id: [not inserted] + Attached to: qdev_id + Removable device: not locked, tray closed +(qemu) quit + +Testing: -device ide-cd +QEMU X.Y.Z monitor - type 'help' for more information +(qemu) info block +/machine/peripheral-anon/device[1]: [not inserted] + Attached to: /machine/peripheral-anon/device[1] + Removable device: not locked, tray closed +(qemu) quit + +Testing: -device ide-cd,id=qdev_id +QEMU X.Y.Z monitor - type 'help' for more information +(qemu) info block +qdev_id: [not inserted] + Attached to: qdev_id + Removable device: not locked, tray closed +(qemu) quit + +Testing: -device scsi-cd +QEMU X.Y.Z monitor - type 'help' for more information +(qemu) info block +/machine/peripheral-anon/device[1]: [not inserted] + Attached to: /machine/peripheral-anon/device[1] + Removable device: not locked, tray closed +(qemu) quit + +Testing: -device scsi-cd,id=qdev_id +QEMU X.Y.Z monitor - type 'help' for more information +(qemu) info block +qdev_id: [not inserted] + Attached to: qdev_id + Removable device: not locked, tray closed +(qemu) quit + + +=== -blockdev/-device=<node-name> === + +Testing: -blockdev driver=null-co,node-name=null -device ide-hd,drive=null +QEMU X.Y.Z monitor - type 'help' for more information +(qemu) info block +null: null-co:// (null-co) + Attached to: /machine/peripheral-anon/device[1] + Cache mode: writeback +(qemu) quit + +Testing: -blockdev driver=null-co,node-name=null -device ide-hd,drive=null,id=qdev_id +QEMU X.Y.Z monitor - type 'help' for more information +(qemu) info block +null: null-co:// (null-co) + Attached to: qdev_id + Cache mode: writeback +(qemu) quit + +Testing: -blockdev driver=null-co,node-name=null -device scsi-hd,drive=null +QEMU X.Y.Z monitor - type 'help' for more information +(qemu) info block +null: null-co:// (null-co) + Attached to: /machine/peripheral-anon/device[1] + Cache mode: writeback +(qemu) quit + +Testing: -blockdev driver=null-co,node-name=null -device scsi-hd,drive=null,id=qdev_id +QEMU X.Y.Z monitor - type 'help' for more information +(qemu) info block +null: null-co:// (null-co) + Attached to: qdev_id + Cache mode: writeback +(qemu) quit + +Testing: -blockdev driver=null-co,node-name=null -device virtio-blk-pci,drive=null +QEMU X.Y.Z monitor - type 'help' for more information +(qemu) info block +null: null-co:// (null-co) + Attached to: /machine/peripheral-anon/device[1]/virtio-backend + Cache mode: writeback +(qemu) quit + +Testing: -blockdev driver=null-co,node-name=null -device virtio-blk-pci,drive=null,id=qdev_id +QEMU X.Y.Z monitor - type 'help' for more information +(qemu) info block +null: null-co:// (null-co) + Attached to: /machine/peripheral/qdev_id/virtio-backend + Cache mode: writeback +(qemu) quit + +Testing: -blockdev driver=null-co,node-name=null -device floppy,drive=null +QEMU X.Y.Z monitor - type 'help' for more information +(qemu) info block +null: null-co:// (null-co) + Attached to: /machine/peripheral-anon/device[1] + Removable device: not locked, tray closed + Cache mode: writeback +(qemu) quit + +Testing: -blockdev driver=null-co,node-name=null -device floppy,drive=null,id=qdev_id +QEMU X.Y.Z monitor - type 'help' for more information +(qemu) info block +null: null-co:// (null-co) + Attached to: qdev_id + Removable device: not locked, tray closed + Cache mode: writeback +(qemu) quit + +Testing: -blockdev driver=null-co,node-name=null -device ide-cd,drive=null +QEMU X.Y.Z monitor - type 'help' for more information +(qemu) info block +null: null-co:// (null-co) + Attached to: /machine/peripheral-anon/device[1] + Removable device: not locked, tray closed + Cache mode: writeback +(qemu) quit + +Testing: -blockdev driver=null-co,node-name=null -device ide-cd,drive=null,id=qdev_id +QEMU X.Y.Z monitor - type 'help' for more information +(qemu) info block +null: null-co:// (null-co) + Attached to: qdev_id + Removable device: not locked, tray closed + Cache mode: writeback +(qemu) quit + +Testing: -blockdev driver=null-co,node-name=null -device scsi-cd,drive=null +QEMU X.Y.Z monitor - type 'help' for more information +(qemu) info block +null: null-co:// (null-co) + Attached to: /machine/peripheral-anon/device[1] + Removable device: not locked, tray closed + Cache mode: writeback +(qemu) quit + +Testing: -blockdev driver=null-co,node-name=null -device scsi-cd,drive=null,id=qdev_id +QEMU X.Y.Z monitor - type 'help' for more information +(qemu) info block +null: null-co:// (null-co) + Attached to: qdev_id + Removable device: not locked, tray closed + Cache mode: writeback +(qemu) quit + + +=== -drive if=none/-device=<node-name> === + +Testing: -drive if=none,driver=null-co,node-name=null -device ide-hd,drive=null,id=qdev_id +QEMU X.Y.Z monitor - type 'help' for more information +(qemu) info block +none0 (null): null-co:// (null-co) + Removable device: not locked, tray closed + Cache mode: writeback + +null: null-co:// (null-co) + Attached to: qdev_id + Cache mode: writeback +(qemu) quit + +Testing: -drive if=none,driver=null-co,node-name=null -device scsi-hd,drive=null,id=qdev_id +QEMU X.Y.Z monitor - type 'help' for more information +(qemu) info block +none0 (null): null-co:// (null-co) + Removable device: not locked, tray closed + Cache mode: writeback + +null: null-co:// (null-co) + Attached to: qdev_id + Cache mode: writeback +(qemu) quit + +Testing: -drive if=none,driver=null-co,node-name=null -device virtio-blk-pci,drive=null,id=qdev_id +QEMU X.Y.Z monitor - type 'help' for more information +(qemu) info block +none0 (null): null-co:// (null-co) + Removable device: not locked, tray closed + Cache mode: writeback + +null: null-co:// (null-co) + Attached to: /machine/peripheral/qdev_id/virtio-backend + Cache mode: writeback +(qemu) quit + +Testing: -drive if=none,driver=null-co,node-name=null -device floppy,drive=null,id=qdev_id +QEMU X.Y.Z monitor - type 'help' for more information +(qemu) info block +none0 (null): null-co:// (null-co) + Removable device: not locked, tray closed + Cache mode: writeback + +null: null-co:// (null-co) + Attached to: qdev_id + Removable device: not locked, tray closed + Cache mode: writeback +(qemu) quit + +Testing: -drive if=none,driver=null-co,node-name=null -device ide-cd,drive=null,id=qdev_id +QEMU X.Y.Z monitor - type 'help' for more information +(qemu) info block +none0 (null): null-co:// (null-co) + Removable device: not locked, tray closed + Cache mode: writeback + +null: null-co:// (null-co) + Attached to: qdev_id + Removable device: not locked, tray closed + Cache mode: writeback +(qemu) quit + +Testing: -drive if=none,driver=null-co,node-name=null -device scsi-cd,drive=null,id=qdev_id +QEMU X.Y.Z monitor - type 'help' for more information +(qemu) info block +none0 (null): null-co:// (null-co) + Removable device: not locked, tray closed + Cache mode: writeback + +null: null-co:// (null-co) + Attached to: qdev_id + Removable device: not locked, tray closed + Cache mode: writeback +(qemu) quit + + +=== -drive if=none/-device=<bb-name> (with medium) === + +Testing: -drive if=none,driver=null-co,node-name=null -device ide-hd,drive=none0 +QEMU X.Y.Z monitor - type 'help' for more information +(qemu) info block +none0 (null): null-co:// (null-co) + Attached to: /machine/peripheral-anon/device[1] + Cache mode: writeback +(qemu) quit + +Testing: -drive if=none,driver=null-co,node-name=null -device ide-hd,drive=none0,id=qdev_id +QEMU X.Y.Z monitor - type 'help' for more information +(qemu) info block +none0 (null): null-co:// (null-co) + Attached to: qdev_id + Cache mode: writeback +(qemu) quit + +Testing: -drive if=none,driver=null-co,node-name=null -device scsi-hd,drive=none0 +QEMU X.Y.Z monitor - type 'help' for more information +(qemu) info block +none0 (null): null-co:// (null-co) + Attached to: /machine/peripheral-anon/device[1] + Cache mode: writeback +(qemu) quit + +Testing: -drive if=none,driver=null-co,node-name=null -device scsi-hd,drive=none0,id=qdev_id +QEMU X.Y.Z monitor - type 'help' for more information +(qemu) info block +none0 (null): null-co:// (null-co) + Attached to: qdev_id + Cache mode: writeback +(qemu) quit + +Testing: -drive if=none,driver=null-co,node-name=null -device virtio-blk-pci,drive=none0 +QEMU X.Y.Z monitor - type 'help' for more information +(qemu) info block +none0 (null): null-co:// (null-co) + Attached to: /machine/peripheral-anon/device[1]/virtio-backend + Cache mode: writeback +(qemu) quit + +Testing: -drive if=none,driver=null-co,node-name=null -device virtio-blk-pci,drive=none0,id=qdev_id +QEMU X.Y.Z monitor - type 'help' for more information +(qemu) info block +none0 (null): null-co:// (null-co) + Attached to: /machine/peripheral/qdev_id/virtio-backend + Cache mode: writeback +(qemu) quit + +Testing: -drive if=none,driver=null-co,node-name=null -device floppy,drive=none0 +QEMU X.Y.Z monitor - type 'help' for more information +(qemu) info block +none0 (null): null-co:// (null-co) + Attached to: /machine/peripheral-anon/device[1] + Removable device: not locked, tray closed + Cache mode: writeback +(qemu) quit + +Testing: -drive if=none,driver=null-co,node-name=null -device floppy,drive=none0,id=qdev_id +QEMU X.Y.Z monitor - type 'help' for more information +(qemu) info block +none0 (null): null-co:// (null-co) + Attached to: qdev_id + Removable device: not locked, tray closed + Cache mode: writeback +(qemu) quit + +Testing: -drive if=none,driver=null-co,node-name=null -device ide-cd,drive=none0 +QEMU X.Y.Z monitor - type 'help' for more information +(qemu) info block +none0 (null): null-co:// (null-co) + Attached to: /machine/peripheral-anon/device[1] + Removable device: not locked, tray closed + Cache mode: writeback +(qemu) quit + +Testing: -drive if=none,driver=null-co,node-name=null -device ide-cd,drive=none0,id=qdev_id +QEMU X.Y.Z monitor - type 'help' for more information +(qemu) info block +none0 (null): null-co:// (null-co) + Attached to: qdev_id + Removable device: not locked, tray closed + Cache mode: writeback +(qemu) quit + +Testing: -drive if=none,driver=null-co,node-name=null -device scsi-cd,drive=none0 +QEMU X.Y.Z monitor - type 'help' for more information +(qemu) info block +none0 (null): null-co:// (null-co) + Attached to: /machine/peripheral-anon/device[1] + Removable device: not locked, tray closed + Cache mode: writeback +(qemu) quit + +Testing: -drive if=none,driver=null-co,node-name=null -device scsi-cd,drive=none0,id=qdev_id +QEMU X.Y.Z monitor - type 'help' for more information +(qemu) info block +none0 (null): null-co:// (null-co) + Attached to: qdev_id + Removable device: not locked, tray closed + Cache mode: writeback +(qemu) quit + + +=== -drive if=none/-device=<bb-name> (without medium) === + +Testing: -drive if=none +QEMU X.Y.Z monitor - type 'help' for more information +(qemu) info block +none0: [not inserted] + Removable device: not locked, tray closed +(qemu) quit + +Testing: -drive if=none -device floppy,drive=none0 +QEMU X.Y.Z monitor - type 'help' for more information +(qemu) info block +none0: [not inserted] + Attached to: /machine/peripheral-anon/device[1] + Removable device: not locked, tray closed +(qemu) quit + +Testing: -drive if=none -device floppy,drive=none0,id=qdev_id +QEMU X.Y.Z monitor - type 'help' for more information +(qemu) info block +none0: [not inserted] + Attached to: qdev_id + Removable device: not locked, tray closed +(qemu) quit + +Testing: -drive if=none -device ide-cd,drive=none0 +QEMU X.Y.Z monitor - type 'help' for more information +(qemu) info block +none0: [not inserted] + Attached to: /machine/peripheral-anon/device[1] + Removable device: not locked, tray closed +(qemu) quit + +Testing: -drive if=none -device ide-cd,drive=none0,id=qdev_id +QEMU X.Y.Z monitor - type 'help' for more information +(qemu) info block +none0: [not inserted] + Attached to: qdev_id + Removable device: not locked, tray closed +(qemu) quit + +Testing: -drive if=none -device scsi-cd,drive=none0 +QEMU X.Y.Z monitor - type 'help' for more information +(qemu) info block +none0: [not inserted] + Attached to: /machine/peripheral-anon/device[1] + Removable device: not locked, tray closed +(qemu) quit + +Testing: -drive if=none -device scsi-cd,drive=none0,id=qdev_id +QEMU X.Y.Z monitor - type 'help' for more information +(qemu) info block +none0: [not inserted] + Attached to: qdev_id + Removable device: not locked, tray closed +(qemu) quit + + +=== -drive if=... === + +Testing: -drive if=floppy +QEMU X.Y.Z monitor - type 'help' for more information +(qemu) info block +floppy0: [not inserted] + Attached to: /machine/unattached/device[17] + Removable device: not locked, tray closed +(qemu) quit + +Testing: -drive if=floppy,driver=null-co +QEMU X.Y.Z monitor - type 'help' for more information +(qemu) info block +floppy0 (NODE_NAME): null-co:// (null-co) + Attached to: /machine/unattached/device[17] + Removable device: not locked, tray closed + Cache mode: writeback +(qemu) quit + +Testing: -drive if=ide,driver=null-co +QEMU X.Y.Z monitor - type 'help' for more information +(qemu) info block +ide0-hd0 (NODE_NAME): null-co:// (null-co) + Attached to: /machine/unattached/device[18] + Cache mode: writeback +(qemu) quit + +Testing: -drive if=ide,media=cdrom +QEMU X.Y.Z monitor - type 'help' for more information +(qemu) info block +ide0-cd0: [not inserted] + Attached to: /machine/unattached/device[18] + Removable device: not locked, tray closed +(qemu) quit + +Testing: -drive if=ide,driver=null-co,media=cdrom +QEMU X.Y.Z monitor - type 'help' for more information +(qemu) info block +ide0-cd0 (NODE_NAME): null-co:// (null-co, read-only) + Attached to: /machine/unattached/device[18] + Removable device: not locked, tray closed + Cache mode: writeback +(qemu) quit + +warning: qemu-system-x86_64: -drive if=scsi,driver=null-co: bus=0,unit=0 is deprecated with this machine type +Testing: -drive if=scsi,driver=null-co +QEMU X.Y.Z monitor - type 'help' for more information +(qemu) info block +scsi0-hd0 (NODE_NAME): null-co:// (null-co) + Attached to: /machine/unattached/device[27]/scsi.0/legacy[0] + Cache mode: writeback +(qemu) quit + +warning: qemu-system-x86_64: -drive if=scsi,media=cdrom: bus=0,unit=0 is deprecated with this machine type +Testing: -drive if=scsi,media=cdrom +QEMU X.Y.Z monitor - type 'help' for more information +(qemu) info block +scsi0-cd0: [not inserted] + Attached to: /machine/unattached/device[27]/scsi.0/legacy[0] + Removable device: not locked, tray closed +(qemu) quit + +warning: qemu-system-x86_64: -drive if=scsi,driver=null-co,media=cdrom: bus=0,unit=0 is deprecated with this machine type +Testing: -drive if=scsi,driver=null-co,media=cdrom +QEMU X.Y.Z monitor - type 'help' for more information +(qemu) info block +scsi0-cd0 (NODE_NAME): null-co:// (null-co, read-only) + Attached to: /machine/unattached/device[27]/scsi.0/legacy[0] + Removable device: not locked, tray closed + Cache mode: writeback +(qemu) quit + +Testing: -drive if=virtio,driver=null-co +QEMU X.Y.Z monitor - type 'help' for more information +(qemu) info block +virtio0 (NODE_NAME): null-co:// (null-co) + Attached to: /machine/peripheral-anon/device[1]/virtio-backend + Cache mode: writeback +(qemu) quit + +Testing: -drive if=pflash,driver=null-co,size=1M +QEMU X.Y.Z monitor - type 'help' for more information +(qemu) info block +pflash0 (NODE_NAME): json:{"driver": "null-co", "size": "1M"} (null-co) + Attached to: /machine/unattached/device[2] + Cache mode: writeback +(qemu) quit + +*** done diff --git a/tests/qemu-iotests/189 b/tests/qemu-iotests/189 index 54ad980a4e..e695475722 100755 --- a/tests/qemu-iotests/189 +++ b/tests/qemu-iotests/189 @@ -66,7 +66,7 @@ echo "== verify pattern ==" $QEMU_IO --object $SECRET0 -c "read -P 0xa 0 $size" --image-opts $IMGSPECBASE | _filter_qemu_io | _filter_testdir echo "== create overlay ==" -_make_test_img --object $SECRET1 -o "encrypt.format=luks,encrypt.key-secret=sec1,encrypt.iter-time=10" -b "$TEST_IMG_BASE" $size +_make_test_img --object $SECRET1 -o "encrypt.format=luks,encrypt.key-secret=sec1,encrypt.iter-time=10" -u -b "$TEST_IMG_BASE" $size echo echo "== writing part of a cluster ==" diff --git a/tests/qemu-iotests/group b/tests/qemu-iotests/group index 2aba585287..0961f8cc4e 100644 --- a/tests/qemu-iotests/group +++ b/tests/qemu-iotests/group @@ -181,5 +181,6 @@ 182 rw auto quick 183 rw auto migration 185 rw auto +186 rw auto 188 rw auto quick 189 rw auto quick diff --git a/tests/test-throttle.c b/tests/test-throttle.c index a9201b1fea..768f11dfed 100644 --- a/tests/test-throttle.c +++ b/tests/test-throttle.c @@ -228,7 +228,7 @@ static void test_config_functions(void) read_timer_cb, write_timer_cb, &ts); /* structure reset by throttle_init previous_leak should be null */ g_assert(!ts.previous_leak); - throttle_config(&ts, &tt, &orig_cfg); + throttle_config(&ts, QEMU_CLOCK_VIRTUAL, &orig_cfg); /* has previous leak been initialized by throttle_config ? */ g_assert(ts.previous_leak); @@ -486,7 +486,7 @@ static bool do_test_accounting(bool is_ops, /* are we testing bps or ops */ throttle_init(&ts); throttle_timers_init(&tt, ctx, QEMU_CLOCK_VIRTUAL, read_timer_cb, write_timer_cb, &ts); - throttle_config(&ts, &tt, &cfg); + throttle_config(&ts, QEMU_CLOCK_VIRTUAL, &cfg); /* account a read */ throttle_account(&ts, false, size); diff --git a/util/throttle.c b/util/throttle.c index 3570ed25fc..b2a52b8b34 100644 --- a/util/throttle.c +++ b/util/throttle.c @@ -388,22 +388,14 @@ static void throttle_unfix_bucket(LeakyBucket *bkt) } } -/* take care of canceling a timer */ -static void throttle_cancel_timer(QEMUTimer *timer) -{ - assert(timer != NULL); - - timer_del(timer); -} - /* Used to configure the throttle * * @ts: the throttle state we are working on - * @tt: the throttle timers we use in this aio context + * @clock_type: the group's clock_type * @cfg: the config to set */ void throttle_config(ThrottleState *ts, - ThrottleTimers *tt, + QEMUClockType clock_type, ThrottleConfig *cfg) { int i; @@ -414,11 +406,7 @@ void throttle_config(ThrottleState *ts, throttle_fix_bucket(&ts->cfg.buckets[i]); } - ts->previous_leak = qemu_clock_get_ns(tt->clock_type); - - for (i = 0; i < 2; i++) { - throttle_cancel_timer(tt->timers[i]); - } + ts->previous_leak = qemu_clock_get_ns(clock_type); } /* used to get config |