diff options
120 files changed, 4799 insertions, 1748 deletions
@@ -564,8 +564,9 @@ qemu.1: qemu-doc.texi qemu-options.texi qemu-monitor.texi qemu-monitor-info.texi perl -Ww -- $(SRC_PATH)/scripts/texi2pod.pl $< qemu.pod && \ $(POD2MAN) --section=1 --center=" " --release=" " qemu.pod > $@, \ " GEN $@") +qemu.1: qemu-option-trace.texi -qemu-img.1: qemu-img.texi qemu-img-cmds.texi +qemu-img.1: qemu-img.texi qemu-option-trace.texi qemu-img-cmds.texi $(call quiet-command, \ perl -Ww -- $(SRC_PATH)/scripts/texi2pod.pl $< qemu-img.pod && \ $(POD2MAN) --section=1 --center=" " --release=" " qemu-img.pod > $@, \ @@ -577,7 +578,7 @@ fsdev/virtfs-proxy-helper.1: fsdev/virtfs-proxy-helper.texi $(POD2MAN) --section=1 --center=" " --release=" " fsdev/virtfs-proxy-helper.pod > $@, \ " GEN $@") -qemu-nbd.8: qemu-nbd.texi +qemu-nbd.8: qemu-nbd.texi qemu-option-trace.texi $(call quiet-command, \ perl -Ww -- $(SRC_PATH)/scripts/texi2pod.pl $< qemu-nbd.pod && \ $(POD2MAN) --section=8 --center=" " --release=" " qemu-nbd.pod > $@, \ @@ -595,7 +596,7 @@ info: qemu-doc.info qemu-tech.info pdf: qemu-doc.pdf qemu-tech.pdf qemu-doc.dvi qemu-doc.html qemu-doc.info qemu-doc.pdf: \ - qemu-img.texi qemu-nbd.texi qemu-options.texi \ + qemu-img.texi qemu-nbd.texi qemu-options.texi qemu-option-trace.texi \ qemu-monitor.texi qemu-img-cmds.texi qemu-ga.texi \ qemu-monitor-info.texi diff --git a/Makefile.target b/Makefile.target index d720b3e733..a440bcb5b8 100644 --- a/Makefile.target +++ b/Makefile.target @@ -108,11 +108,8 @@ obj-$(CONFIG_LIBDECNUMBER) += libdecnumber/dpd/decimal128.o ifdef CONFIG_LINUX_USER -# Note that we only add linux-user/host/$ARCH if it exists, and -# that it must come before linux-user/host/generic in the search path. QEMU_CFLAGS+=-I$(SRC_PATH)/linux-user/$(TARGET_ABI_DIR) \ - $(patsubst %,-I%,$(wildcard $(SRC_PATH)/linux-user/host/$(ARCH))) \ - -I$(SRC_PATH)/linux-user/host/generic \ + -I$(SRC_PATH)/linux-user/host/$(ARCH) \ -I$(SRC_PATH)/linux-user obj-y += linux-user/ diff --git a/block/backup.c b/block/backup.c index 581269b29a..f87f8d539b 100644 --- a/block/backup.c +++ b/block/backup.c @@ -489,7 +489,6 @@ void backup_start(BlockDriverState *bs, BlockDriverState *target, assert(bs); assert(target); - assert(cb); if (bs == target) { error_setg(errp, "Source and target cannot be the same"); diff --git a/block/gluster.c b/block/gluster.c index d361d8e847..16f7778a50 100644 --- a/block/gluster.c +++ b/block/gluster.c @@ -24,6 +24,8 @@ typedef struct GlusterAIOCB { typedef struct BDRVGlusterState { struct glfs *glfs; struct glfs_fd *fd; + bool supports_seek_data; + int debug_level; } BDRVGlusterState; typedef struct GlusterConf { @@ -32,6 +34,7 @@ typedef struct GlusterConf { char *volname; char *image; char *transport; + int debug_level; } GlusterConf; static void qemu_gluster_gconf_free(GlusterConf *gconf) @@ -194,11 +197,7 @@ static struct glfs *qemu_gluster_init(GlusterConf *gconf, const char *filename, goto out; } - /* - * TODO: Use GF_LOG_ERROR instead of hard code value of 4 here when - * GlusterFS makes GF_LOG_* macros available to libgfapi users. - */ - ret = glfs_set_logging(glfs, "-", 4); + ret = glfs_set_logging(glfs, "-", gconf->debug_level); if (ret < 0) { goto out; } @@ -256,16 +255,26 @@ static void gluster_finish_aiocb(struct glfs_fd *fd, ssize_t ret, void *arg) qemu_bh_schedule(acb->bh); } +#define GLUSTER_OPT_FILENAME "filename" +#define GLUSTER_OPT_DEBUG "debug" +#define GLUSTER_DEBUG_DEFAULT 4 +#define GLUSTER_DEBUG_MAX 9 + /* TODO Convert to fine grained options */ static QemuOptsList runtime_opts = { .name = "gluster", .head = QTAILQ_HEAD_INITIALIZER(runtime_opts.head), .desc = { { - .name = "filename", + .name = GLUSTER_OPT_FILENAME, .type = QEMU_OPT_STRING, .help = "URL to the gluster image", }, + { + .name = GLUSTER_OPT_DEBUG, + .type = QEMU_OPT_NUMBER, + .help = "Gluster log level, valid range is 0-9", + }, { /* end of list */ } }, }; @@ -287,6 +296,28 @@ static void qemu_gluster_parse_flags(int bdrv_flags, int *open_flags) } } +/* + * Do SEEK_DATA/HOLE to detect if it is functional. Older broken versions of + * gfapi incorrectly return the current offset when SEEK_DATA/HOLE is used. + * - Corrected versions return -1 and set errno to EINVAL. + * - Versions that support SEEK_DATA/HOLE correctly, will return -1 and set + * errno to ENXIO when SEEK_DATA is called with a position of EOF. + */ +static bool qemu_gluster_test_seek(struct glfs_fd *fd) +{ + off_t ret, eof; + + eof = glfs_lseek(fd, 0, SEEK_END); + if (eof < 0) { + /* this should never occur */ + return false; + } + + /* this should always fail with ENXIO if SEEK_DATA is supported */ + ret = glfs_lseek(fd, eof, SEEK_DATA); + return (ret < 0) && (errno == ENXIO); +} + static int qemu_gluster_open(BlockDriverState *bs, QDict *options, int bdrv_flags, Error **errp) { @@ -306,8 +337,17 @@ static int qemu_gluster_open(BlockDriverState *bs, QDict *options, goto out; } - filename = qemu_opt_get(opts, "filename"); + filename = qemu_opt_get(opts, GLUSTER_OPT_FILENAME); + s->debug_level = qemu_opt_get_number(opts, GLUSTER_OPT_DEBUG, + GLUSTER_DEBUG_DEFAULT); + if (s->debug_level < 0) { + s->debug_level = 0; + } else if (s->debug_level > GLUSTER_DEBUG_MAX) { + s->debug_level = GLUSTER_DEBUG_MAX; + } + + gconf->debug_level = s->debug_level; s->glfs = qemu_gluster_init(gconf, filename, errp); if (!s->glfs) { ret = -errno; @@ -338,6 +378,8 @@ static int qemu_gluster_open(BlockDriverState *bs, QDict *options, ret = -errno; } + s->supports_seek_data = qemu_gluster_test_seek(s->fd); + out: qemu_opts_del(opts); qemu_gluster_gconf_free(gconf); @@ -363,6 +405,7 @@ static int qemu_gluster_reopen_prepare(BDRVReopenState *state, BlockReopenQueue *queue, Error **errp) { int ret = 0; + BDRVGlusterState *s; BDRVGlusterReopenState *reop_s; GlusterConf *gconf = NULL; int open_flags = 0; @@ -370,6 +413,8 @@ static int qemu_gluster_reopen_prepare(BDRVReopenState *state, assert(state != NULL); assert(state->bs != NULL); + s = state->bs->opaque; + state->opaque = g_new0(BDRVGlusterReopenState, 1); reop_s = state->opaque; @@ -377,6 +422,7 @@ static int qemu_gluster_reopen_prepare(BDRVReopenState *state, gconf = g_new0(GlusterConf, 1); + gconf->debug_level = s->debug_level; reop_s->glfs = qemu_gluster_init(gconf, state->bs->filename, errp); if (reop_s->glfs == NULL) { ret = -errno; @@ -510,6 +556,14 @@ static int qemu_gluster_create(const char *filename, char *tmp = NULL; GlusterConf *gconf = g_new0(GlusterConf, 1); + gconf->debug_level = qemu_opt_get_number_del(opts, GLUSTER_OPT_DEBUG, + GLUSTER_DEBUG_DEFAULT); + if (gconf->debug_level < 0) { + gconf->debug_level = 0; + } else if (gconf->debug_level > GLUSTER_DEBUG_MAX) { + gconf->debug_level = GLUSTER_DEBUG_MAX; + } + glfs = qemu_gluster_init(gconf, filename, errp); if (!glfs) { ret = -errno; @@ -727,6 +781,159 @@ static int qemu_gluster_has_zero_init(BlockDriverState *bs) return 0; } +/* + * Find allocation range in @bs around offset @start. + * May change underlying file descriptor's file offset. + * If @start is not in a hole, store @start in @data, and the + * beginning of the next hole in @hole, and return 0. + * If @start is in a non-trailing hole, store @start in @hole and the + * beginning of the next non-hole in @data, and return 0. + * If @start is in a trailing hole or beyond EOF, return -ENXIO. + * If we can't find out, return a negative errno other than -ENXIO. + * + * (Shamefully copied from raw-posix.c, only miniscule adaptions.) + */ +static int find_allocation(BlockDriverState *bs, off_t start, + off_t *data, off_t *hole) +{ + BDRVGlusterState *s = bs->opaque; + off_t offs; + + if (!s->supports_seek_data) { + return -ENOTSUP; + } + + /* + * SEEK_DATA cases: + * D1. offs == start: start is in data + * D2. offs > start: start is in a hole, next data at offs + * D3. offs < 0, errno = ENXIO: either start is in a trailing hole + * or start is beyond EOF + * If the latter happens, the file has been truncated behind + * our back since we opened it. All bets are off then. + * Treating like a trailing hole is simplest. + * D4. offs < 0, errno != ENXIO: we learned nothing + */ + offs = glfs_lseek(s->fd, start, SEEK_DATA); + if (offs < 0) { + return -errno; /* D3 or D4 */ + } + assert(offs >= start); + + if (offs > start) { + /* D2: in hole, next data at offs */ + *hole = start; + *data = offs; + return 0; + } + + /* D1: in data, end not yet known */ + + /* + * SEEK_HOLE cases: + * H1. offs == start: start is in a hole + * If this happens here, a hole has been dug behind our back + * since the previous lseek(). + * H2. offs > start: either start is in data, next hole at offs, + * or start is in trailing hole, EOF at offs + * Linux treats trailing holes like any other hole: offs == + * start. Solaris seeks to EOF instead: offs > start (blech). + * If that happens here, a hole has been dug behind our back + * since the previous lseek(). + * H3. offs < 0, errno = ENXIO: start is beyond EOF + * If this happens, the file has been truncated behind our + * back since we opened it. Treat it like a trailing hole. + * H4. offs < 0, errno != ENXIO: we learned nothing + * Pretend we know nothing at all, i.e. "forget" about D1. + */ + offs = glfs_lseek(s->fd, start, SEEK_HOLE); + if (offs < 0) { + return -errno; /* D1 and (H3 or H4) */ + } + assert(offs >= start); + + if (offs > start) { + /* + * D1 and H2: either in data, next hole at offs, or it was in + * data but is now in a trailing hole. In the latter case, + * all bets are off. Treating it as if it there was data all + * the way to EOF is safe, so simply do that. + */ + *data = start; + *hole = offs; + return 0; + } + + /* D1 and H1 */ + return -EBUSY; +} + +/* + * Returns the allocation status of the specified sectors. + * + * If 'sector_num' is beyond the end of the disk image the return value is 0 + * and 'pnum' is set to 0. + * + * 'pnum' is set to the number of sectors (including and immediately following + * the specified sector) that are known to be in the same + * allocated/unallocated state. + * + * 'nb_sectors' is the max value 'pnum' should be set to. If nb_sectors goes + * beyond the end of the disk image it will be clamped. + * + * (Based on raw_co_get_block_status() from raw-posix.c.) + */ +static int64_t coroutine_fn qemu_gluster_co_get_block_status( + BlockDriverState *bs, int64_t sector_num, int nb_sectors, int *pnum, + BlockDriverState **file) +{ + BDRVGlusterState *s = bs->opaque; + off_t start, data = 0, hole = 0; + int64_t total_size; + int ret = -EINVAL; + + if (!s->fd) { + return ret; + } + + start = sector_num * BDRV_SECTOR_SIZE; + total_size = bdrv_getlength(bs); + if (total_size < 0) { + return total_size; + } else if (start >= total_size) { + *pnum = 0; + return 0; + } else if (start + nb_sectors * BDRV_SECTOR_SIZE > total_size) { + nb_sectors = DIV_ROUND_UP(total_size - start, BDRV_SECTOR_SIZE); + } + + ret = find_allocation(bs, start, &data, &hole); + if (ret == -ENXIO) { + /* Trailing hole */ + *pnum = nb_sectors; + ret = BDRV_BLOCK_ZERO; + } else if (ret < 0) { + /* No info available, so pretend there are no holes */ + *pnum = nb_sectors; + ret = BDRV_BLOCK_DATA; + } else if (data == start) { + /* On a data extent, compute sectors to the end of the extent, + * possibly including a partial sector at EOF. */ + *pnum = MIN(nb_sectors, DIV_ROUND_UP(hole - start, BDRV_SECTOR_SIZE)); + ret = BDRV_BLOCK_DATA; + } else { + /* On a hole, compute sectors to the beginning of the next extent. */ + assert(hole == start); + *pnum = MIN(nb_sectors, (data - start) / BDRV_SECTOR_SIZE); + ret = BDRV_BLOCK_ZERO; + } + + *file = bs; + + return ret | BDRV_BLOCK_OFFSET_VALID | start; +} + + static QemuOptsList qemu_gluster_create_opts = { .name = "qemu-gluster-create-opts", .head = QTAILQ_HEAD_INITIALIZER(qemu_gluster_create_opts.head), @@ -741,6 +948,11 @@ static QemuOptsList qemu_gluster_create_opts = { .type = QEMU_OPT_STRING, .help = "Preallocation mode (allowed values: off, full)" }, + { + .name = GLUSTER_OPT_DEBUG, + .type = QEMU_OPT_NUMBER, + .help = "Gluster log level, valid range is 0-9", + }, { /* end of list */ } } }; @@ -769,6 +981,7 @@ static BlockDriver bdrv_gluster = { #ifdef CONFIG_GLUSTERFS_ZEROFILL .bdrv_co_pwrite_zeroes = qemu_gluster_co_pwrite_zeroes, #endif + .bdrv_co_get_block_status = qemu_gluster_co_get_block_status, .create_opts = &qemu_gluster_create_opts, }; @@ -796,6 +1009,7 @@ static BlockDriver bdrv_gluster_tcp = { #ifdef CONFIG_GLUSTERFS_ZEROFILL .bdrv_co_pwrite_zeroes = qemu_gluster_co_pwrite_zeroes, #endif + .bdrv_co_get_block_status = qemu_gluster_co_get_block_status, .create_opts = &qemu_gluster_create_opts, }; @@ -823,6 +1037,7 @@ static BlockDriver bdrv_gluster_unix = { #ifdef CONFIG_GLUSTERFS_ZEROFILL .bdrv_co_pwrite_zeroes = qemu_gluster_co_pwrite_zeroes, #endif + .bdrv_co_get_block_status = qemu_gluster_co_get_block_status, .create_opts = &qemu_gluster_create_opts, }; @@ -850,6 +1065,7 @@ static BlockDriver bdrv_gluster_rdma = { #ifdef CONFIG_GLUSTERFS_ZEROFILL .bdrv_co_pwrite_zeroes = qemu_gluster_co_pwrite_zeroes, #endif + .bdrv_co_get_block_status = qemu_gluster_co_get_block_status, .create_opts = &qemu_gluster_create_opts, }; diff --git a/block/mirror.c b/block/mirror.c index a04ed9c7a4..8d96049555 100644 --- a/block/mirror.c +++ b/block/mirror.c @@ -218,7 +218,9 @@ static inline void mirror_wait_for_io(MirrorBlockJob *s) } /* Submit async read while handling COW. - * Returns: nb_sectors if no alignment is necessary, or + * Returns: The number of sectors copied after and including sector_num, + * excluding any sectors copied prior to sector_num due to alignment. + * This will be nb_sectors if no alignment is necessary, or * (new_end - sector_num) if tail is rounded up or down due to * alignment or buffer limit. */ @@ -227,14 +229,18 @@ static int mirror_do_read(MirrorBlockJob *s, int64_t sector_num, { BlockBackend *source = s->common.blk; int sectors_per_chunk, nb_chunks; - int ret = nb_sectors; + int ret; MirrorOp *op; + int max_sectors; sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS; + max_sectors = sectors_per_chunk * s->max_iov; /* We can only handle as much as buf_size at a time. */ nb_sectors = MIN(s->buf_size >> BDRV_SECTOR_BITS, nb_sectors); + nb_sectors = MIN(max_sectors, nb_sectors); assert(nb_sectors); + ret = nb_sectors; if (s->cow_bitmap) { ret += mirror_cow_align(s, §or_num, &nb_sectors); @@ -327,7 +333,7 @@ static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s) first_chunk = sector_num / sectors_per_chunk; while (test_bit(first_chunk, s->in_flight_bitmap)) { - trace_mirror_yield_in_flight(s, first_chunk, s->in_flight); + trace_mirror_yield_in_flight(s, sector_num, s->in_flight); mirror_wait_for_io(s); } @@ -769,7 +775,7 @@ static void mirror_complete(BlockJob *job, Error **errp) } } - /* check the target bs is not blocked and block all operations on it */ + /* block all operations on to_replace bs */ if (s->replaces) { AioContext *replace_aio_context; diff --git a/block/nfs.c b/block/nfs.c index 9f51cc3f10..15d6832c4c 100644 --- a/block/nfs.c +++ b/block/nfs.c @@ -1,7 +1,7 @@ /* * QEMU Block driver for native access to files on NFS shares * - * Copyright (c) 2014 Peter Lieven <pl@kamp.de> + * Copyright (c) 2014-2016 Peter Lieven <pl@kamp.de> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal @@ -38,6 +38,7 @@ #include <nfsc/libnfs.h> #define QEMU_NFS_MAX_READAHEAD_SIZE 1048576 +#define QEMU_NFS_MAX_PAGECACHE_SIZE (8388608 / NFS_BLKSIZE) #define QEMU_NFS_MAX_DEBUG_LEVEL 2 typedef struct NFSClient { @@ -47,6 +48,7 @@ typedef struct NFSClient { bool has_zero_init; AioContext *aio_context; blkcnt_t st_blocks; + bool cache_used; } NFSClient; typedef struct NFSRPC { @@ -278,7 +280,7 @@ static void nfs_file_close(BlockDriverState *bs) } static int64_t nfs_client_open(NFSClient *client, const char *filename, - int flags, Error **errp) + int flags, Error **errp, int open_flags) { int ret = -EINVAL, i; struct stat st; @@ -330,12 +332,38 @@ static int64_t nfs_client_open(NFSClient *client, const char *filename, nfs_set_tcp_syncnt(client->context, val); #ifdef LIBNFS_FEATURE_READAHEAD } else if (!strcmp(qp->p[i].name, "readahead")) { + if (open_flags & BDRV_O_NOCACHE) { + error_setg(errp, "Cannot enable NFS readahead " + "if cache.direct = on"); + goto fail; + } if (val > QEMU_NFS_MAX_READAHEAD_SIZE) { error_report("NFS Warning: Truncating NFS readahead" " size to %d", QEMU_NFS_MAX_READAHEAD_SIZE); val = QEMU_NFS_MAX_READAHEAD_SIZE; } nfs_set_readahead(client->context, val); +#ifdef LIBNFS_FEATURE_PAGECACHE + nfs_set_pagecache_ttl(client->context, 0); +#endif + client->cache_used = true; +#endif +#ifdef LIBNFS_FEATURE_PAGECACHE + nfs_set_pagecache_ttl(client->context, 0); + } else if (!strcmp(qp->p[i].name, "pagecache")) { + if (open_flags & BDRV_O_NOCACHE) { + error_setg(errp, "Cannot enable NFS pagecache " + "if cache.direct = on"); + goto fail; + } + if (val > QEMU_NFS_MAX_PAGECACHE_SIZE) { + error_report("NFS Warning: Truncating NFS pagecache" + " size to %d pages", QEMU_NFS_MAX_PAGECACHE_SIZE); + val = QEMU_NFS_MAX_PAGECACHE_SIZE; + } + nfs_set_pagecache(client->context, val); + nfs_set_pagecache_ttl(client->context, 0); + client->cache_used = true; #endif #ifdef LIBNFS_FEATURE_DEBUG } else if (!strcmp(qp->p[i].name, "debug")) { @@ -418,7 +446,7 @@ static int nfs_file_open(BlockDriverState *bs, QDict *options, int flags, } ret = nfs_client_open(client, qemu_opt_get(opts, "filename"), (flags & BDRV_O_RDWR) ? O_RDWR : O_RDONLY, - errp); + errp, bs->open_flags); if (ret < 0) { goto out; } @@ -454,7 +482,7 @@ static int nfs_file_create(const char *url, QemuOpts *opts, Error **errp) total_size = ROUND_UP(qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0), BDRV_SECTOR_SIZE); - ret = nfs_client_open(client, url, O_CREAT, errp); + ret = nfs_client_open(client, url, O_CREAT, errp, 0); if (ret < 0) { goto out; } @@ -516,6 +544,12 @@ static int nfs_reopen_prepare(BDRVReopenState *state, return -EACCES; } + if ((state->flags & BDRV_O_NOCACHE) && client->cache_used) { + error_setg(errp, "Cannot disable cache if libnfs readahead or" + " pagecache is enabled"); + return -EINVAL; + } + /* Update cache for read-only reopens */ if (!(state->flags & BDRV_O_RDWR)) { ret = nfs_fstat(client->context, client->fh, &st); @@ -530,6 +564,15 @@ static int nfs_reopen_prepare(BDRVReopenState *state, return 0; } +#ifdef LIBNFS_FEATURE_PAGECACHE +static void nfs_invalidate_cache(BlockDriverState *bs, + Error **errp) +{ + NFSClient *client = bs->opaque; + nfs_pagecache_invalidate(client->context, client->fh); +} +#endif + static BlockDriver bdrv_nfs = { .format_name = "nfs", .protocol_name = "nfs", @@ -553,6 +596,10 @@ static BlockDriver bdrv_nfs = { .bdrv_detach_aio_context = nfs_detach_aio_context, .bdrv_attach_aio_context = nfs_attach_aio_context, + +#ifdef LIBNFS_FEATURE_PAGECACHE + .bdrv_invalidate_cache = nfs_invalidate_cache, +#endif }; static void nfs_block_init(void) diff --git a/blockjob.c b/blockjob.c index 90c4e262b0..205da9df4e 100644 --- a/blockjob.c +++ b/blockjob.c @@ -110,6 +110,7 @@ void *block_job_create(const BlockJobDriver *driver, BlockDriverState *bs, BlockBackend *blk; BlockJob *job; + assert(cb); if (bs->job) { error_setg(errp, QERR_DEVICE_IN_USE, bdrv_get_device_name(bs)); return NULL; diff --git a/bsd-user/syscall.c b/bsd-user/syscall.c index a9fe8693c1..66492aaf5d 100644 --- a/bsd-user/syscall.c +++ b/bsd-user/syscall.c @@ -315,12 +315,14 @@ abi_long do_freebsd_syscall(void *cpu_env, int num, abi_long arg1, abi_long arg5, abi_long arg6, abi_long arg7, abi_long arg8) { + CPUState *cpu = ENV_GET_CPU(cpu_env); abi_long ret; void *p; #ifdef DEBUG gemu_log("freebsd syscall %d\n", num); #endif + trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8); if(do_strace) print_freebsd_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6); @@ -400,6 +402,7 @@ abi_long do_freebsd_syscall(void *cpu_env, int num, abi_long arg1, #endif if (do_strace) print_freebsd_syscall_ret(num, ret); + trace_guest_user_syscall_ret(cpu, num, ret); return ret; efault: ret = -TARGET_EFAULT; @@ -410,12 +413,14 @@ abi_long do_netbsd_syscall(void *cpu_env, int num, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5, abi_long arg6) { + CPUState *cpu = ENV_GET_CPU(cpu_env); abi_long ret; void *p; #ifdef DEBUG gemu_log("netbsd syscall %d\n", num); #endif + trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4, arg5, arg6, 0, 0); if(do_strace) print_netbsd_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6); @@ -472,6 +477,7 @@ abi_long do_netbsd_syscall(void *cpu_env, int num, abi_long arg1, #endif if (do_strace) print_netbsd_syscall_ret(num, ret); + trace_guest_user_syscall_ret(cpu, num, ret); return ret; efault: ret = -TARGET_EFAULT; @@ -482,12 +488,14 @@ abi_long do_openbsd_syscall(void *cpu_env, int num, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5, abi_long arg6) { + CPUState *cpu = ENV_GET_CPU(cpu_env); abi_long ret; void *p; #ifdef DEBUG gemu_log("openbsd syscall %d\n", num); #endif + trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4, arg5, arg6, 0, 0); if(do_strace) print_openbsd_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6); @@ -544,6 +552,7 @@ abi_long do_openbsd_syscall(void *cpu_env, int num, abi_long arg1, #endif if (do_strace) print_openbsd_syscall_ret(num, ret); + trace_guest_user_syscall_ret(cpu, num, ret); return ret; efault: ret = -TARGET_EFAULT; @@ -1216,6 +1216,13 @@ esac QEMU_CFLAGS="$CPU_CFLAGS $QEMU_CFLAGS" EXTRA_CFLAGS="$CPU_CFLAGS $EXTRA_CFLAGS" +# For user-mode emulation the host arch has to be one we explicitly +# support, even if we're using TCI. +if [ "$ARCH" = "unknown" ]; then + bsd_user="no" + linux_user="no" +fi + default_target_list="" mak_wilds="" @@ -1380,7 +1387,6 @@ fi if test "$ARCH" = "unknown"; then if test "$tcg_interpreter" = "yes" ; then echo "Unsupported CPU = $cpu, will use TCG with TCI (experimental)" - ARCH=tci else error_exit "Unsupported CPU = $cpu, try --enable-tcg-interpreter" fi @@ -1792,8 +1798,10 @@ int foo(void *a) __attribute__((ifunc("bar_ifunc"))); int main(int argc, char *argv[]) { return foo(argv[0]);} EOF if compile_object "" ; then - if readelf --syms $TMPO |grep "IFUNC.*foo" >/dev/null 2>&1; then - avx2_opt="yes" + if has readelf; then + if readelf --syms $TMPO 2>/dev/null |grep -q "IFUNC.*foo"; then + avx2_opt="yes" + fi fi fi @@ -4703,7 +4711,7 @@ if test "$cpu" = "s390x" ; then fi # Probe for the need for relocating the user-only binary. -if test "$pie" = "no" ; then +if ( [ "$linux_user" = yes ] || [ "$bsd_user" = yes ] ) && [ "$pie" = no ]; then textseg_addr= case "$cpu" in arm | i386 | ppc* | s390* | sparc* | x86_64 | x32) @@ -4725,6 +4733,16 @@ EOF # In case ld does not support -Ttext-segment, edit the default linker # script via sed to set the .text start addr. This is needed on FreeBSD # at least. + if ! $ld --verbose >/dev/null 2>&1; then + error_exit \ + "We need to link the QEMU user mode binaries at a" \ + "specific text address. Unfortunately your linker" \ + "doesn't support either the -Ttext-segment option or" \ + "printing the default linker script with --verbose." \ + "If you don't want the user mode binaries, pass the" \ + "--disable-user option to configure." + fi + $ld --verbose | sed \ -e '1,/==================================================/d' \ -e '/==================================================/,$d' \ @@ -30,6 +30,8 @@ #include "exec/ram_addr.h" #include "exec/exec-all.h" #include "tcg/tcg.h" +#include "qemu/error-report.h" +#include "exec/log.h" /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */ /* #define DEBUG_TLB */ @@ -427,6 +429,39 @@ void tlb_set_page(CPUState *cpu, target_ulong vaddr, prot, mmu_idx, size); } +static void report_bad_exec(CPUState *cpu, target_ulong addr) +{ + /* Accidentally executing outside RAM or ROM is quite common for + * several user-error situations, so report it in a way that + * makes it clear that this isn't a QEMU bug and provide suggestions + * about what a user could do to fix things. + */ + error_report("Trying to execute code outside RAM or ROM at 0x" + TARGET_FMT_lx, addr); + error_printf("This usually means one of the following happened:\n\n" + "(1) You told QEMU to execute a kernel for the wrong machine " + "type, and it crashed on startup (eg trying to run a " + "raspberry pi kernel on a versatilepb QEMU machine)\n" + "(2) You didn't give QEMU a kernel or BIOS filename at all, " + "and QEMU executed a ROM full of no-op instructions until " + "it fell off the end\n" + "(3) Your guest kernel has a bug and crashed by jumping " + "off into nowhere\n\n" + "This is almost always one of the first two, so check your " + "command line and that you are using the right type of kernel " + "for this machine.\n" + "If you think option (3) is likely then you can try debugging " + "your guest with the -d debug options; in particular " + "-d guest_errors will cause the log to include a dump of the " + "guest register state at this point.\n\n" + "Execution cannot continue; stopping here.\n\n"); + + /* Report also to the logs, with more detail including register dump */ + qemu_log_mask(LOG_GUEST_ERROR, "qemu: fatal: Trying to execute code " + "outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr); + log_cpu_state_mask(LOG_GUEST_ERROR, cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP); +} + /* NOTE: this function can trigger an exception */ /* NOTE2: the returned address is not exactly the physical address: it * is actually a ram_addr_t (in system mode; the user mode emulation @@ -455,8 +490,8 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr) if (cc->do_unassigned_access) { cc->do_unassigned_access(cpu, addr, false, true, 0, 4); } else { - cpu_abort(cpu, "Trying to execute code outside RAM or ROM at 0x" - TARGET_FMT_lx "\n", addr); + report_bad_exec(cpu, addr); + exit(1); } } p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend); diff --git a/dma-helpers.c b/dma-helpers.c index b521d84ebd..9defc101b7 100644 --- a/dma-helpers.c +++ b/dma-helpers.c @@ -185,10 +185,17 @@ static void dma_aio_cancel(BlockAIOCB *acb) } } +static AioContext *dma_get_aio_context(BlockAIOCB *acb) +{ + DMAAIOCB *dbs = container_of(acb, DMAAIOCB, common); + + return dbs->ctx; +} static const AIOCBInfo dma_aiocb_info = { .aiocb_size = sizeof(DMAAIOCB), .cancel_async = dma_aio_cancel, + .get_aio_context = dma_get_aio_context, }; BlockAIOCB *dma_blk_io(AioContext *ctx, diff --git a/fpu/softfloat-specialize.h b/fpu/softfloat-specialize.h index a4cbdad452..43d08903a5 100644 --- a/fpu/softfloat-specialize.h +++ b/fpu/softfloat-specialize.h @@ -79,16 +79,6 @@ this code that are retained. * version 2 or later. See the COPYING file in the top-level directory. */ -/* Does the target distinguish signaling NaNs from non-signaling NaNs - * by setting the most significant bit of the mantissa for a signaling NaN? - * (The more common choice is to have it be zero for SNaN and one for QNaN.) - */ -#if defined(TARGET_MIPS) || defined(TARGET_SH4) || defined(TARGET_UNICORE32) -#define SNAN_BIT_IS_ONE 1 -#else -#define SNAN_BIT_IS_ONE 0 -#endif - #if defined(TARGET_XTENSA) /* Define for architectures which deviate from IEEE in not supporting * signaling NaNs (so all NaNs are treated as quiet). @@ -99,73 +89,106 @@ this code that are retained. /*---------------------------------------------------------------------------- | The pattern for a default generated half-precision NaN. *----------------------------------------------------------------------------*/ +float16 float16_default_nan(float_status *status) +{ #if defined(TARGET_ARM) -const float16 float16_default_nan = const_float16(0x7E00); -#elif SNAN_BIT_IS_ONE -const float16 float16_default_nan = const_float16(0x7DFF); + return const_float16(0x7E00); +#else + if (status->snan_bit_is_one) { + return const_float16(0x7DFF); + } else { +#if defined(TARGET_MIPS) + return const_float16(0x7E00); #else -const float16 float16_default_nan = const_float16(0xFE00); + return const_float16(0xFE00); #endif + } +#endif +} /*---------------------------------------------------------------------------- | The pattern for a default generated single-precision NaN. *----------------------------------------------------------------------------*/ +float32 float32_default_nan(float_status *status) +{ #if defined(TARGET_SPARC) -const float32 float32_default_nan = const_float32(0x7FFFFFFF); + return const_float32(0x7FFFFFFF); #elif defined(TARGET_PPC) || defined(TARGET_ARM) || defined(TARGET_ALPHA) || \ defined(TARGET_XTENSA) || defined(TARGET_S390X) || defined(TARGET_TRICORE) -const float32 float32_default_nan = const_float32(0x7FC00000); -#elif SNAN_BIT_IS_ONE -const float32 float32_default_nan = const_float32(0x7FBFFFFF); + return const_float32(0x7FC00000); #else -const float32 float32_default_nan = const_float32(0xFFC00000); + if (status->snan_bit_is_one) { + return const_float32(0x7FBFFFFF); + } else { +#if defined(TARGET_MIPS) + return const_float32(0x7FC00000); +#else + return const_float32(0xFFC00000); +#endif + } #endif +} /*---------------------------------------------------------------------------- | The pattern for a default generated double-precision NaN. *----------------------------------------------------------------------------*/ +float64 float64_default_nan(float_status *status) +{ #if defined(TARGET_SPARC) -const float64 float64_default_nan = const_float64(LIT64( 0x7FFFFFFFFFFFFFFF )); + return const_float64(LIT64(0x7FFFFFFFFFFFFFFF)); #elif defined(TARGET_PPC) || defined(TARGET_ARM) || defined(TARGET_ALPHA) || \ defined(TARGET_S390X) -const float64 float64_default_nan = const_float64(LIT64( 0x7FF8000000000000 )); -#elif SNAN_BIT_IS_ONE -const float64 float64_default_nan = const_float64(LIT64(0x7FF7FFFFFFFFFFFF)); + return const_float64(LIT64(0x7FF8000000000000)); +#else + if (status->snan_bit_is_one) { + return const_float64(LIT64(0x7FF7FFFFFFFFFFFF)); + } else { +#if defined(TARGET_MIPS) + return const_float64(LIT64(0x7FF8000000000000)); #else -const float64 float64_default_nan = const_float64(LIT64( 0xFFF8000000000000 )); + return const_float64(LIT64(0xFFF8000000000000)); #endif + } +#endif +} /*---------------------------------------------------------------------------- | The pattern for a default generated extended double-precision NaN. *----------------------------------------------------------------------------*/ -#if SNAN_BIT_IS_ONE -#define floatx80_default_nan_high 0x7FFF -#define floatx80_default_nan_low LIT64(0xBFFFFFFFFFFFFFFF) -#else -#define floatx80_default_nan_high 0xFFFF -#define floatx80_default_nan_low LIT64( 0xC000000000000000 ) -#endif +floatx80 floatx80_default_nan(float_status *status) +{ + floatx80 r; -const floatx80 floatx80_default_nan - = make_floatx80_init(floatx80_default_nan_high, floatx80_default_nan_low); + if (status->snan_bit_is_one) { + r.low = LIT64(0xBFFFFFFFFFFFFFFF); + r.high = 0x7FFF; + } else { + r.low = LIT64(0xC000000000000000); + r.high = 0xFFFF; + } + return r; +} /*---------------------------------------------------------------------------- -| The pattern for a default generated quadruple-precision NaN. The `high' and -| `low' values hold the most- and least-significant bits, respectively. +| The pattern for a default generated quadruple-precision NaN. *----------------------------------------------------------------------------*/ -#if SNAN_BIT_IS_ONE -#define float128_default_nan_high LIT64(0x7FFF7FFFFFFFFFFF) -#define float128_default_nan_low LIT64(0xFFFFFFFFFFFFFFFF) -#elif defined(TARGET_S390X) -#define float128_default_nan_high LIT64( 0x7FFF800000000000 ) -#define float128_default_nan_low LIT64( 0x0000000000000000 ) +float128 float128_default_nan(float_status *status) +{ + float128 r; + + if (status->snan_bit_is_one) { + r.low = LIT64(0xFFFFFFFFFFFFFFFF); + r.high = LIT64(0x7FFF7FFFFFFFFFFF); + } else { + r.low = LIT64(0x0000000000000000); +#if defined(TARGET_S390X) + r.high = LIT64(0x7FFF800000000000); #else -#define float128_default_nan_high LIT64( 0xFFFF800000000000 ) -#define float128_default_nan_low LIT64( 0x0000000000000000 ) + r.high = LIT64(0xFFFF800000000000); #endif - -const float128 float128_default_nan - = make_float128_init(float128_default_nan_high, float128_default_nan_low); + } + return r; +} /*---------------------------------------------------------------------------- | Raises the exceptions specified by `flags'. Floating-point traps can be @@ -188,12 +211,12 @@ typedef struct { } commonNaNT; #ifdef NO_SIGNALING_NANS -int float16_is_quiet_nan(float16 a_) +int float16_is_quiet_nan(float16 a_, float_status *status) { return float16_is_any_nan(a_); } -int float16_is_signaling_nan(float16 a_) +int float16_is_signaling_nan(float16 a_, float_status *status) { return 0; } @@ -203,14 +226,14 @@ int float16_is_signaling_nan(float16 a_) | NaN; otherwise returns 0. *----------------------------------------------------------------------------*/ -int float16_is_quiet_nan(float16 a_) +int float16_is_quiet_nan(float16 a_, float_status *status) { uint16_t a = float16_val(a_); -#if SNAN_BIT_IS_ONE - return (((a >> 9) & 0x3F) == 0x3E) && (a & 0x1FF); -#else - return ((a & ~0x8000) >= 0x7c80); -#endif + if (status->snan_bit_is_one) { + return (((a >> 9) & 0x3F) == 0x3E) && (a & 0x1FF); + } else { + return ((a & ~0x8000) >= 0x7C80); + } } /*---------------------------------------------------------------------------- @@ -218,14 +241,14 @@ int float16_is_quiet_nan(float16 a_) | NaN; otherwise returns 0. *----------------------------------------------------------------------------*/ -int float16_is_signaling_nan(float16 a_) +int float16_is_signaling_nan(float16 a_, float_status *status) { uint16_t a = float16_val(a_); -#if SNAN_BIT_IS_ONE - return ((a & ~0x8000) >= 0x7c80); -#else - return (((a >> 9) & 0x3F) == 0x3E) && (a & 0x1FF); -#endif + if (status->snan_bit_is_one) { + return ((a & ~0x8000) >= 0x7C80); + } else { + return (((a >> 9) & 0x3F) == 0x3E) && (a & 0x1FF); + } } #endif @@ -233,20 +256,16 @@ int float16_is_signaling_nan(float16 a_) | Returns a quiet NaN if the half-precision floating point value `a' is a | signaling NaN; otherwise returns `a'. *----------------------------------------------------------------------------*/ -float16 float16_maybe_silence_nan(float16 a_) +float16 float16_maybe_silence_nan(float16 a_, float_status *status) { - if (float16_is_signaling_nan(a_)) { -#if SNAN_BIT_IS_ONE -# if defined(TARGET_MIPS) || defined(TARGET_SH4) || defined(TARGET_UNICORE32) - return float16_default_nan; -# else -# error Rules for silencing a signaling NaN are target-specific -# endif -#else - uint16_t a = float16_val(a_); - a |= (1 << 9); - return make_float16(a); -#endif + if (float16_is_signaling_nan(a_, status)) { + if (status->snan_bit_is_one) { + return float16_default_nan(status); + } else { + uint16_t a = float16_val(a_); + a |= (1 << 9); + return make_float16(a); + } } return a_; } @@ -261,12 +280,12 @@ static commonNaNT float16ToCommonNaN(float16 a, float_status *status) { commonNaNT z; - if (float16_is_signaling_nan(a)) { + if (float16_is_signaling_nan(a, status)) { float_raise(float_flag_invalid, status); } z.sign = float16_val(a) >> 15; z.low = 0; - z.high = ((uint64_t) float16_val(a))<<54; + z.high = ((uint64_t) float16_val(a)) << 54; return z; } @@ -277,27 +296,27 @@ static commonNaNT float16ToCommonNaN(float16 a, float_status *status) static float16 commonNaNToFloat16(commonNaNT a, float_status *status) { - uint16_t mantissa = a.high>>54; + uint16_t mantissa = a.high >> 54; if (status->default_nan_mode) { - return float16_default_nan; + return float16_default_nan(status); } if (mantissa) { return make_float16(((((uint16_t) a.sign) << 15) | (0x1F << 10) | mantissa)); } else { - return float16_default_nan; + return float16_default_nan(status); } } #ifdef NO_SIGNALING_NANS -int float32_is_quiet_nan(float32 a_) +int float32_is_quiet_nan(float32 a_, float_status *status) { return float32_is_any_nan(a_); } -int float32_is_signaling_nan(float32 a_) +int float32_is_signaling_nan(float32 a_, float_status *status) { return 0; } @@ -307,14 +326,14 @@ int float32_is_signaling_nan(float32 a_) | NaN; otherwise returns 0. *----------------------------------------------------------------------------*/ -int float32_is_quiet_nan( float32 a_ ) +int float32_is_quiet_nan(float32 a_, float_status *status) { uint32_t a = float32_val(a_); -#if SNAN_BIT_IS_ONE - return (((a >> 22) & 0x1ff) == 0x1fe) && (a & 0x003fffff); -#else - return ((uint32_t)(a << 1) >= 0xff800000); -#endif + if (status->snan_bit_is_one) { + return (((a >> 22) & 0x1FF) == 0x1FE) && (a & 0x003FFFFF); + } else { + return ((uint32_t)(a << 1) >= 0xFF800000); + } } /*---------------------------------------------------------------------------- @@ -322,14 +341,14 @@ int float32_is_quiet_nan( float32 a_ ) | NaN; otherwise returns 0. *----------------------------------------------------------------------------*/ -int float32_is_signaling_nan( float32 a_ ) +int float32_is_signaling_nan(float32 a_, float_status *status) { uint32_t a = float32_val(a_); -#if SNAN_BIT_IS_ONE - return ((uint32_t)(a << 1) >= 0xff800000); -#else - return ( ( ( a>>22 ) & 0x1FF ) == 0x1FE ) && ( a & 0x003FFFFF ); -#endif + if (status->snan_bit_is_one) { + return ((uint32_t)(a << 1) >= 0xFF800000); + } else { + return (((a >> 22) & 0x1FF) == 0x1FE) && (a & 0x003FFFFF); + } } #endif @@ -338,20 +357,16 @@ int float32_is_signaling_nan( float32 a_ ) | signaling NaN; otherwise returns `a'. *----------------------------------------------------------------------------*/ -float32 float32_maybe_silence_nan( float32 a_ ) +float32 float32_maybe_silence_nan(float32 a_, float_status *status) { - if (float32_is_signaling_nan(a_)) { -#if SNAN_BIT_IS_ONE -# if defined(TARGET_MIPS) || defined(TARGET_SH4) || defined(TARGET_UNICORE32) - return float32_default_nan; -# else -# error Rules for silencing a signaling NaN are target-specific -# endif -#else - uint32_t a = float32_val(a_); - a |= (1 << 22); - return make_float32(a); -#endif + if (float32_is_signaling_nan(a_, status)) { + if (status->snan_bit_is_one) { + return float32_default_nan(status); + } else { + uint32_t a = float32_val(a_); + a |= (1 << 22); + return make_float32(a); + } } return a_; } @@ -366,12 +381,12 @@ static commonNaNT float32ToCommonNaN(float32 a, float_status *status) { commonNaNT z; - if (float32_is_signaling_nan(a)) { + if (float32_is_signaling_nan(a, status)) { float_raise(float_flag_invalid, status); } - z.sign = float32_val(a)>>31; + z.sign = float32_val(a) >> 31; z.low = 0; - z.high = ( (uint64_t) float32_val(a) )<<41; + z.high = ((uint64_t)float32_val(a)) << 41; return z; } @@ -382,17 +397,18 @@ static commonNaNT float32ToCommonNaN(float32 a, float_status *status) static float32 commonNaNToFloat32(commonNaNT a, float_status *status) { - uint32_t mantissa = a.high>>41; + uint32_t mantissa = a.high >> 41; if (status->default_nan_mode) { - return float32_default_nan; + return float32_default_nan(status); } - if ( mantissa ) + if (mantissa) { return make_float32( - ( ( (uint32_t) a.sign )<<31 ) | 0x7F800000 | ( a.high>>41 ) ); - else - return float32_default_nan; + (((uint32_t)a.sign) << 31) | 0x7F800000 | (a.high >> 41)); + } else { + return float32_default_nan(status); + } } /*---------------------------------------------------------------------------- @@ -494,11 +510,10 @@ static int pickNaN(flag aIsQNaN, flag aIsSNaN, flag bIsQNaN, flag bIsSNaN, return aIsLargerSignificand ? 0 : 1; } return bIsQNaN ? 1 : 0; - } - else if (aIsQNaN) { - if (bIsSNaN || !bIsQNaN) + } else if (aIsQNaN) { + if (bIsSNaN || !bIsQNaN) { return 0; - else { + } else { return aIsLargerSignificand ? 0 : 1; } } else { @@ -556,19 +571,36 @@ static int pickNaNMulAdd(flag aIsQNaN, flag aIsSNaN, flag bIsQNaN, flag bIsSNaN, return 3; } - /* Prefer sNaN over qNaN, in the a, b, c order. */ - if (aIsSNaN) { - return 0; - } else if (bIsSNaN) { - return 1; - } else if (cIsSNaN) { - return 2; - } else if (aIsQNaN) { - return 0; - } else if (bIsQNaN) { - return 1; + if (status->snan_bit_is_one) { + /* Prefer sNaN over qNaN, in the a, b, c order. */ + if (aIsSNaN) { + return 0; + } else if (bIsSNaN) { + return 1; + } else if (cIsSNaN) { + return 2; + } else if (aIsQNaN) { + return 0; + } else if (bIsQNaN) { + return 1; + } else { + return 2; + } } else { - return 2; + /* Prefer sNaN over qNaN, in the c, a, b order. */ + if (cIsSNaN) { + return 2; + } else if (aIsSNaN) { + return 0; + } else if (bIsSNaN) { + return 1; + } else if (cIsQNaN) { + return 2; + } else if (aIsQNaN) { + return 0; + } else { + return 1; + } } } #elif defined(TARGET_PPC) @@ -626,10 +658,10 @@ static float32 propagateFloat32NaN(float32 a, float32 b, float_status *status) flag aIsLargerSignificand; uint32_t av, bv; - aIsQuietNaN = float32_is_quiet_nan( a ); - aIsSignalingNaN = float32_is_signaling_nan( a ); - bIsQuietNaN = float32_is_quiet_nan( b ); - bIsSignalingNaN = float32_is_signaling_nan( b ); + aIsQuietNaN = float32_is_quiet_nan(a, status); + aIsSignalingNaN = float32_is_signaling_nan(a, status); + bIsQuietNaN = float32_is_quiet_nan(b, status); + bIsSignalingNaN = float32_is_signaling_nan(b, status); av = float32_val(a); bv = float32_val(b); @@ -637,12 +669,13 @@ static float32 propagateFloat32NaN(float32 a, float32 b, float_status *status) float_raise(float_flag_invalid, status); } - if (status->default_nan_mode) - return float32_default_nan; + if (status->default_nan_mode) { + return float32_default_nan(status); + } - if ((uint32_t)(av<<1) < (uint32_t)(bv<<1)) { + if ((uint32_t)(av << 1) < (uint32_t)(bv << 1)) { aIsLargerSignificand = 0; - } else if ((uint32_t)(bv<<1) < (uint32_t)(av<<1)) { + } else if ((uint32_t)(bv << 1) < (uint32_t)(av << 1)) { aIsLargerSignificand = 1; } else { aIsLargerSignificand = (av < bv) ? 1 : 0; @@ -650,9 +683,9 @@ static float32 propagateFloat32NaN(float32 a, float32 b, float_status *status) if (pickNaN(aIsQuietNaN, aIsSignalingNaN, bIsQuietNaN, bIsSignalingNaN, aIsLargerSignificand)) { - return float32_maybe_silence_nan(b); + return float32_maybe_silence_nan(b, status); } else { - return float32_maybe_silence_nan(a); + return float32_maybe_silence_nan(a, status); } } @@ -673,12 +706,12 @@ static float32 propagateFloat32MulAddNaN(float32 a, float32 b, cIsQuietNaN, cIsSignalingNaN; int which; - aIsQuietNaN = float32_is_quiet_nan(a); - aIsSignalingNaN = float32_is_signaling_nan(a); - bIsQuietNaN = float32_is_quiet_nan(b); - bIsSignalingNaN = float32_is_signaling_nan(b); - cIsQuietNaN = float32_is_quiet_nan(c); - cIsSignalingNaN = float32_is_signaling_nan(c); + aIsQuietNaN = float32_is_quiet_nan(a, status); + aIsSignalingNaN = float32_is_signaling_nan(a, status); + bIsQuietNaN = float32_is_quiet_nan(b, status); + bIsSignalingNaN = float32_is_signaling_nan(b, status); + cIsQuietNaN = float32_is_quiet_nan(c, status); + cIsSignalingNaN = float32_is_signaling_nan(c, status); if (aIsSignalingNaN | bIsSignalingNaN | cIsSignalingNaN) { float_raise(float_flag_invalid, status); @@ -692,29 +725,29 @@ static float32 propagateFloat32MulAddNaN(float32 a, float32 b, /* Note that this check is after pickNaNMulAdd so that function * has an opportunity to set the Invalid flag. */ - return float32_default_nan; + return float32_default_nan(status); } switch (which) { case 0: - return float32_maybe_silence_nan(a); + return float32_maybe_silence_nan(a, status); case 1: - return float32_maybe_silence_nan(b); + return float32_maybe_silence_nan(b, status); case 2: - return float32_maybe_silence_nan(c); + return float32_maybe_silence_nan(c, status); case 3: default: - return float32_default_nan; + return float32_default_nan(status); } } #ifdef NO_SIGNALING_NANS -int float64_is_quiet_nan(float64 a_) +int float64_is_quiet_nan(float64 a_, float_status *status) { return float64_is_any_nan(a_); } -int float64_is_signaling_nan(float64 a_) +int float64_is_signaling_nan(float64 a_, float_status *status) { return 0; } @@ -724,15 +757,15 @@ int float64_is_signaling_nan(float64 a_) | NaN; otherwise returns 0. *----------------------------------------------------------------------------*/ -int float64_is_quiet_nan( float64 a_ ) +int float64_is_quiet_nan(float64 a_, float_status *status) { uint64_t a = float64_val(a_); -#if SNAN_BIT_IS_ONE - return (((a >> 51) & 0xfff) == 0xffe) - && (a & 0x0007ffffffffffffULL); -#else - return ((a << 1) >= 0xfff0000000000000ULL); -#endif + if (status->snan_bit_is_one) { + return (((a >> 51) & 0xFFF) == 0xFFE) + && (a & 0x0007FFFFFFFFFFFFULL); + } else { + return ((a << 1) >= 0xFFF0000000000000ULL); + } } /*---------------------------------------------------------------------------- @@ -740,16 +773,15 @@ int float64_is_quiet_nan( float64 a_ ) | NaN; otherwise returns 0. *----------------------------------------------------------------------------*/ -int float64_is_signaling_nan( float64 a_ ) +int float64_is_signaling_nan(float64 a_, float_status *status) { uint64_t a = float64_val(a_); -#if SNAN_BIT_IS_ONE - return ((a << 1) >= 0xfff0000000000000ULL); -#else - return - ( ( ( a>>51 ) & 0xFFF ) == 0xFFE ) - && ( a & LIT64( 0x0007FFFFFFFFFFFF ) ); -#endif + if (status->snan_bit_is_one) { + return ((a << 1) >= 0xFFF0000000000000ULL); + } else { + return (((a >> 51) & 0xFFF) == 0xFFE) + && (a & LIT64(0x0007FFFFFFFFFFFF)); + } } #endif @@ -758,20 +790,16 @@ int float64_is_signaling_nan( float64 a_ ) | signaling NaN; otherwise returns `a'. *----------------------------------------------------------------------------*/ -float64 float64_maybe_silence_nan( float64 a_ ) +float64 float64_maybe_silence_nan(float64 a_, float_status *status) { - if (float64_is_signaling_nan(a_)) { -#if SNAN_BIT_IS_ONE -# if defined(TARGET_MIPS) || defined(TARGET_SH4) || defined(TARGET_UNICORE32) - return float64_default_nan; -# else -# error Rules for silencing a signaling NaN are target-specific -# endif -#else - uint64_t a = float64_val(a_); - a |= LIT64( 0x0008000000000000 ); - return make_float64(a); -#endif + if (float64_is_signaling_nan(a_, status)) { + if (status->snan_bit_is_one) { + return float64_default_nan(status); + } else { + uint64_t a = float64_val(a_); + a |= LIT64(0x0008000000000000); + return make_float64(a); + } } return a_; } @@ -786,12 +814,12 @@ static commonNaNT float64ToCommonNaN(float64 a, float_status *status) { commonNaNT z; - if (float64_is_signaling_nan(a)) { + if (float64_is_signaling_nan(a, status)) { float_raise(float_flag_invalid, status); } - z.sign = float64_val(a)>>63; + z.sign = float64_val(a) >> 63; z.low = 0; - z.high = float64_val(a)<<12; + z.high = float64_val(a) << 12; return z; } @@ -802,19 +830,20 @@ static commonNaNT float64ToCommonNaN(float64 a, float_status *status) static float64 commonNaNToFloat64(commonNaNT a, float_status *status) { - uint64_t mantissa = a.high>>12; + uint64_t mantissa = a.high >> 12; if (status->default_nan_mode) { - return float64_default_nan; + return float64_default_nan(status); } - if ( mantissa ) + if (mantissa) { return make_float64( - ( ( (uint64_t) a.sign )<<63 ) - | LIT64( 0x7FF0000000000000 ) - | ( a.high>>12 )); - else - return float64_default_nan; + (((uint64_t) a.sign) << 63) + | LIT64(0x7FF0000000000000) + | (a.high >> 12)); + } else { + return float64_default_nan(status); + } } /*---------------------------------------------------------------------------- @@ -829,10 +858,10 @@ static float64 propagateFloat64NaN(float64 a, float64 b, float_status *status) flag aIsLargerSignificand; uint64_t av, bv; - aIsQuietNaN = float64_is_quiet_nan( a ); - aIsSignalingNaN = float64_is_signaling_nan( a ); - bIsQuietNaN = float64_is_quiet_nan( b ); - bIsSignalingNaN = float64_is_signaling_nan( b ); + aIsQuietNaN = float64_is_quiet_nan(a, status); + aIsSignalingNaN = float64_is_signaling_nan(a, status); + bIsQuietNaN = float64_is_quiet_nan(b, status); + bIsSignalingNaN = float64_is_signaling_nan(b, status); av = float64_val(a); bv = float64_val(b); @@ -840,12 +869,13 @@ static float64 propagateFloat64NaN(float64 a, float64 b, float_status *status) float_raise(float_flag_invalid, status); } - if (status->default_nan_mode) - return float64_default_nan; + if (status->default_nan_mode) { + return float64_default_nan(status); + } - if ((uint64_t)(av<<1) < (uint64_t)(bv<<1)) { + if ((uint64_t)(av << 1) < (uint64_t)(bv << 1)) { aIsLargerSignificand = 0; - } else if ((uint64_t)(bv<<1) < (uint64_t)(av<<1)) { + } else if ((uint64_t)(bv << 1) < (uint64_t)(av << 1)) { aIsLargerSignificand = 1; } else { aIsLargerSignificand = (av < bv) ? 1 : 0; @@ -853,9 +883,9 @@ static float64 propagateFloat64NaN(float64 a, float64 b, float_status *status) if (pickNaN(aIsQuietNaN, aIsSignalingNaN, bIsQuietNaN, bIsSignalingNaN, aIsLargerSignificand)) { - return float64_maybe_silence_nan(b); + return float64_maybe_silence_nan(b, status); } else { - return float64_maybe_silence_nan(a); + return float64_maybe_silence_nan(a, status); } } @@ -876,12 +906,12 @@ static float64 propagateFloat64MulAddNaN(float64 a, float64 b, cIsQuietNaN, cIsSignalingNaN; int which; - aIsQuietNaN = float64_is_quiet_nan(a); - aIsSignalingNaN = float64_is_signaling_nan(a); - bIsQuietNaN = float64_is_quiet_nan(b); - bIsSignalingNaN = float64_is_signaling_nan(b); - cIsQuietNaN = float64_is_quiet_nan(c); - cIsSignalingNaN = float64_is_signaling_nan(c); + aIsQuietNaN = float64_is_quiet_nan(a, status); + aIsSignalingNaN = float64_is_signaling_nan(a, status); + bIsQuietNaN = float64_is_quiet_nan(b, status); + bIsSignalingNaN = float64_is_signaling_nan(b, status); + cIsQuietNaN = float64_is_quiet_nan(c, status); + cIsSignalingNaN = float64_is_signaling_nan(c, status); if (aIsSignalingNaN | bIsSignalingNaN | cIsSignalingNaN) { float_raise(float_flag_invalid, status); @@ -895,29 +925,29 @@ static float64 propagateFloat64MulAddNaN(float64 a, float64 b, /* Note that this check is after pickNaNMulAdd so that function * has an opportunity to set the Invalid flag. */ - return float64_default_nan; + return float64_default_nan(status); } switch (which) { case 0: - return float64_maybe_silence_nan(a); + return float64_maybe_silence_nan(a, status); case 1: - return float64_maybe_silence_nan(b); + return float64_maybe_silence_nan(b, status); case 2: - return float64_maybe_silence_nan(c); + return float64_maybe_silence_nan(c, status); case 3: default: - return float64_default_nan; + return float64_default_nan(status); } } #ifdef NO_SIGNALING_NANS -int floatx80_is_quiet_nan(floatx80 a_) +int floatx80_is_quiet_nan(floatx80 a_, float_status *status) { return floatx80_is_any_nan(a_); } -int floatx80_is_signaling_nan(floatx80 a_) +int floatx80_is_signaling_nan(floatx80 a_, float_status *status) { return 0; } @@ -928,19 +958,19 @@ int floatx80_is_signaling_nan(floatx80 a_) | function for other types as floatx80 has an explicit bit. *----------------------------------------------------------------------------*/ -int floatx80_is_quiet_nan( floatx80 a ) +int floatx80_is_quiet_nan(floatx80 a, float_status *status) { -#if SNAN_BIT_IS_ONE - uint64_t aLow; + if (status->snan_bit_is_one) { + uint64_t aLow; - aLow = a.low & ~0x4000000000000000ULL; - return ((a.high & 0x7fff) == 0x7fff) - && (aLow << 1) - && (a.low == aLow); -#else - return ( ( a.high & 0x7FFF ) == 0x7FFF ) - && (LIT64( 0x8000000000000000 ) <= ((uint64_t) ( a.low<<1 ))); -#endif + aLow = a.low & ~0x4000000000000000ULL; + return ((a.high & 0x7FFF) == 0x7FFF) + && (aLow << 1) + && (a.low == aLow); + } else { + return ((a.high & 0x7FFF) == 0x7FFF) + && (LIT64(0x8000000000000000) <= ((uint64_t)(a.low << 1))); + } } /*---------------------------------------------------------------------------- @@ -949,20 +979,19 @@ int floatx80_is_quiet_nan( floatx80 a ) | function for other types as floatx80 has an explicit bit. *----------------------------------------------------------------------------*/ -int floatx80_is_signaling_nan( floatx80 a ) +int floatx80_is_signaling_nan(floatx80 a, float_status *status) { -#if SNAN_BIT_IS_ONE - return ((a.high & 0x7fff) == 0x7fff) - && ((a.low << 1) >= 0x8000000000000000ULL); -#else - uint64_t aLow; + if (status->snan_bit_is_one) { + return ((a.high & 0x7FFF) == 0x7FFF) + && ((a.low << 1) >= 0x8000000000000000ULL); + } else { + uint64_t aLow; - aLow = a.low & ~ LIT64( 0x4000000000000000 ); - return - ( ( a.high & 0x7FFF ) == 0x7FFF ) - && (uint64_t) ( aLow<<1 ) - && ( a.low == aLow ); -#endif + aLow = a.low & ~LIT64(0x4000000000000000); + return ((a.high & 0x7FFF) == 0x7FFF) + && (uint64_t)(aLow << 1) + && (a.low == aLow); + } } #endif @@ -971,20 +1000,15 @@ int floatx80_is_signaling_nan( floatx80 a ) | `a' is a signaling NaN; otherwise returns `a'. *----------------------------------------------------------------------------*/ -floatx80 floatx80_maybe_silence_nan( floatx80 a ) +floatx80 floatx80_maybe_silence_nan(floatx80 a, float_status *status) { - if (floatx80_is_signaling_nan(a)) { -#if SNAN_BIT_IS_ONE -# if defined(TARGET_MIPS) || defined(TARGET_SH4) || defined(TARGET_UNICORE32) - a.low = floatx80_default_nan_low; - a.high = floatx80_default_nan_high; -# else -# error Rules for silencing a signaling NaN are target-specific -# endif -#else - a.low |= LIT64( 0xC000000000000000 ); - return a; -#endif + if (floatx80_is_signaling_nan(a, status)) { + if (status->snan_bit_is_one) { + a = floatx80_default_nan(status); + } else { + a.low |= LIT64(0xC000000000000000); + return a; + } } return a; } @@ -997,19 +1021,21 @@ floatx80 floatx80_maybe_silence_nan( floatx80 a ) static commonNaNT floatx80ToCommonNaN(floatx80 a, float_status *status) { + floatx80 dflt; commonNaNT z; - if (floatx80_is_signaling_nan(a)) { + if (floatx80_is_signaling_nan(a, status)) { float_raise(float_flag_invalid, status); } - if ( a.low >> 63 ) { + if (a.low >> 63) { z.sign = a.high >> 15; z.low = 0; z.high = a.low << 1; } else { - z.sign = floatx80_default_nan_high >> 15; + dflt = floatx80_default_nan(status); + z.sign = dflt.high >> 15; z.low = 0; - z.high = floatx80_default_nan_low << 1; + z.high = dflt.low << 1; } return z; } @@ -1024,19 +1050,15 @@ static floatx80 commonNaNToFloatx80(commonNaNT a, float_status *status) floatx80 z; if (status->default_nan_mode) { - z.low = floatx80_default_nan_low; - z.high = floatx80_default_nan_high; - return z; + return floatx80_default_nan(status); } if (a.high >> 1) { - z.low = LIT64( 0x8000000000000000 ) | a.high >> 1; - z.high = ( ( (uint16_t) a.sign )<<15 ) | 0x7FFF; + z.low = LIT64(0x8000000000000000) | a.high >> 1; + z.high = (((uint16_t)a.sign) << 15) | 0x7FFF; } else { - z.low = floatx80_default_nan_low; - z.high = floatx80_default_nan_high; + z = floatx80_default_nan(status); } - return z; } @@ -1052,19 +1074,17 @@ static floatx80 propagateFloatx80NaN(floatx80 a, floatx80 b, flag aIsQuietNaN, aIsSignalingNaN, bIsQuietNaN, bIsSignalingNaN; flag aIsLargerSignificand; - aIsQuietNaN = floatx80_is_quiet_nan( a ); - aIsSignalingNaN = floatx80_is_signaling_nan( a ); - bIsQuietNaN = floatx80_is_quiet_nan( b ); - bIsSignalingNaN = floatx80_is_signaling_nan( b ); + aIsQuietNaN = floatx80_is_quiet_nan(a, status); + aIsSignalingNaN = floatx80_is_signaling_nan(a, status); + bIsQuietNaN = floatx80_is_quiet_nan(b, status); + bIsSignalingNaN = floatx80_is_signaling_nan(b, status); if (aIsSignalingNaN | bIsSignalingNaN) { float_raise(float_flag_invalid, status); } if (status->default_nan_mode) { - a.low = floatx80_default_nan_low; - a.high = floatx80_default_nan_high; - return a; + return floatx80_default_nan(status); } if (a.low < b.low) { @@ -1077,19 +1097,19 @@ static floatx80 propagateFloatx80NaN(floatx80 a, floatx80 b, if (pickNaN(aIsQuietNaN, aIsSignalingNaN, bIsQuietNaN, bIsSignalingNaN, aIsLargerSignificand)) { - return floatx80_maybe_silence_nan(b); + return floatx80_maybe_silence_nan(b, status); } else { - return floatx80_maybe_silence_nan(a); + return floatx80_maybe_silence_nan(a, status); } } #ifdef NO_SIGNALING_NANS -int float128_is_quiet_nan(float128 a_) +int float128_is_quiet_nan(float128 a_, float_status *status) { return float128_is_any_nan(a_); } -int float128_is_signaling_nan(float128 a_) +int float128_is_signaling_nan(float128 a_, float_status *status) { return 0; } @@ -1099,16 +1119,15 @@ int float128_is_signaling_nan(float128 a_) | NaN; otherwise returns 0. *----------------------------------------------------------------------------*/ -int float128_is_quiet_nan( float128 a ) +int float128_is_quiet_nan(float128 a, float_status *status) { -#if SNAN_BIT_IS_ONE - return (((a.high >> 47) & 0xffff) == 0xfffe) - && (a.low || (a.high & 0x00007fffffffffffULL)); -#else - return - ((a.high << 1) >= 0xffff000000000000ULL) - && (a.low || (a.high & 0x0000ffffffffffffULL)); -#endif + if (status->snan_bit_is_one) { + return (((a.high >> 47) & 0xFFFF) == 0xFFFE) + && (a.low || (a.high & 0x00007FFFFFFFFFFFULL)); + } else { + return ((a.high << 1) >= 0xFFFF000000000000ULL) + && (a.low || (a.high & 0x0000FFFFFFFFFFFFULL)); + } } /*---------------------------------------------------------------------------- @@ -1116,17 +1135,15 @@ int float128_is_quiet_nan( float128 a ) | signaling NaN; otherwise returns 0. *----------------------------------------------------------------------------*/ -int float128_is_signaling_nan( float128 a ) +int float128_is_signaling_nan(float128 a, float_status *status) { -#if SNAN_BIT_IS_ONE - return - ((a.high << 1) >= 0xffff000000000000ULL) - && (a.low || (a.high & 0x0000ffffffffffffULL)); -#else - return - ( ( ( a.high>>47 ) & 0xFFFF ) == 0xFFFE ) - && ( a.low || ( a.high & LIT64( 0x00007FFFFFFFFFFF ) ) ); -#endif + if (status->snan_bit_is_one) { + return ((a.high << 1) >= 0xFFFF000000000000ULL) + && (a.low || (a.high & 0x0000FFFFFFFFFFFFULL)); + } else { + return (((a.high >> 47) & 0xFFFF) == 0xFFFE) + && (a.low || (a.high & LIT64(0x00007FFFFFFFFFFF))); + } } #endif @@ -1135,20 +1152,15 @@ int float128_is_signaling_nan( float128 a ) | a signaling NaN; otherwise returns `a'. *----------------------------------------------------------------------------*/ -float128 float128_maybe_silence_nan( float128 a ) +float128 float128_maybe_silence_nan(float128 a, float_status *status) { - if (float128_is_signaling_nan(a)) { -#if SNAN_BIT_IS_ONE -# if defined(TARGET_MIPS) || defined(TARGET_SH4) || defined(TARGET_UNICORE32) - a.low = float128_default_nan_low; - a.high = float128_default_nan_high; -# else -# error Rules for silencing a signaling NaN are target-specific -# endif -#else - a.high |= LIT64( 0x0000800000000000 ); - return a; -#endif + if (float128_is_signaling_nan(a, status)) { + if (status->snan_bit_is_one) { + a = float128_default_nan(status); + } else { + a.high |= LIT64(0x0000800000000000); + return a; + } } return a; } @@ -1163,11 +1175,11 @@ static commonNaNT float128ToCommonNaN(float128 a, float_status *status) { commonNaNT z; - if (float128_is_signaling_nan(a)) { + if (float128_is_signaling_nan(a, status)) { float_raise(float_flag_invalid, status); } - z.sign = a.high>>63; - shortShift128Left( a.high, a.low, 16, &z.high, &z.low ); + z.sign = a.high >> 63; + shortShift128Left(a.high, a.low, 16, &z.high, &z.low); return z; } @@ -1181,13 +1193,11 @@ static float128 commonNaNToFloat128(commonNaNT a, float_status *status) float128 z; if (status->default_nan_mode) { - z.low = float128_default_nan_low; - z.high = float128_default_nan_high; - return z; + return float128_default_nan(status); } - shift128Right( a.high, a.low, 16, &z.high, &z.low ); - z.high |= ( ( (uint64_t) a.sign )<<63 ) | LIT64( 0x7FFF000000000000 ); + shift128Right(a.high, a.low, 16, &z.high, &z.low); + z.high |= (((uint64_t)a.sign) << 63) | LIT64(0x7FFF000000000000); return z; } @@ -1203,24 +1213,22 @@ static float128 propagateFloat128NaN(float128 a, float128 b, flag aIsQuietNaN, aIsSignalingNaN, bIsQuietNaN, bIsSignalingNaN; flag aIsLargerSignificand; - aIsQuietNaN = float128_is_quiet_nan( a ); - aIsSignalingNaN = float128_is_signaling_nan( a ); - bIsQuietNaN = float128_is_quiet_nan( b ); - bIsSignalingNaN = float128_is_signaling_nan( b ); + aIsQuietNaN = float128_is_quiet_nan(a, status); + aIsSignalingNaN = float128_is_signaling_nan(a, status); + bIsQuietNaN = float128_is_quiet_nan(b, status); + bIsSignalingNaN = float128_is_signaling_nan(b, status); if (aIsSignalingNaN | bIsSignalingNaN) { float_raise(float_flag_invalid, status); } if (status->default_nan_mode) { - a.low = float128_default_nan_low; - a.high = float128_default_nan_high; - return a; + return float128_default_nan(status); } - if (lt128(a.high<<1, a.low, b.high<<1, b.low)) { + if (lt128(a.high << 1, a.low, b.high << 1, b.low)) { aIsLargerSignificand = 0; - } else if (lt128(b.high<<1, b.low, a.high<<1, a.low)) { + } else if (lt128(b.high << 1, b.low, a.high << 1, a.low)) { aIsLargerSignificand = 1; } else { aIsLargerSignificand = (a.high < b.high) ? 1 : 0; @@ -1228,9 +1236,8 @@ static float128 propagateFloat128NaN(float128 a, float128 b, if (pickNaN(aIsQuietNaN, aIsSignalingNaN, bIsQuietNaN, bIsSignalingNaN, aIsLargerSignificand)) { - return float128_maybe_silence_nan(b); + return float128_maybe_silence_nan(b, status); } else { - return float128_maybe_silence_nan(a); + return float128_maybe_silence_nan(a, status); } } - diff --git a/fpu/softfloat.c b/fpu/softfloat.c index 166c48e434..9b1eccff24 100644 --- a/fpu/softfloat.c +++ b/fpu/softfloat.c @@ -2105,7 +2105,7 @@ static float32 subFloat32Sigs(float32 a, float32 b, flag zSign, return propagateFloat32NaN(a, b, status); } float_raise(float_flag_invalid, status); - return float32_default_nan; + return float32_default_nan(status); } if ( aExp == 0 ) { aExp = 1; @@ -2234,7 +2234,7 @@ float32 float32_mul(float32 a, float32 b, float_status *status) } if ( ( bExp | bSig ) == 0 ) { float_raise(float_flag_invalid, status); - return float32_default_nan; + return float32_default_nan(status); } return packFloat32( zSign, 0xFF, 0 ); } @@ -2244,7 +2244,7 @@ float32 float32_mul(float32 a, float32 b, float_status *status) } if ( ( aExp | aSig ) == 0 ) { float_raise(float_flag_invalid, status); - return float32_default_nan; + return float32_default_nan(status); } return packFloat32( zSign, 0xFF, 0 ); } @@ -2299,7 +2299,7 @@ float32 float32_div(float32 a, float32 b, float_status *status) return propagateFloat32NaN(a, b, status); } float_raise(float_flag_invalid, status); - return float32_default_nan; + return float32_default_nan(status); } return packFloat32( zSign, 0xFF, 0 ); } @@ -2313,7 +2313,7 @@ float32 float32_div(float32 a, float32 b, float_status *status) if ( bSig == 0 ) { if ( ( aExp | aSig ) == 0 ) { float_raise(float_flag_invalid, status); - return float32_default_nan; + return float32_default_nan(status); } float_raise(float_flag_divbyzero, status); return packFloat32( zSign, 0xFF, 0 ); @@ -2367,7 +2367,7 @@ float32 float32_rem(float32 a, float32 b, float_status *status) return propagateFloat32NaN(a, b, status); } float_raise(float_flag_invalid, status); - return float32_default_nan; + return float32_default_nan(status); } if ( bExp == 0xFF ) { if (bSig) { @@ -2378,7 +2378,7 @@ float32 float32_rem(float32 a, float32 b, float_status *status) if ( bExp == 0 ) { if ( bSig == 0 ) { float_raise(float_flag_invalid, status); - return float32_default_nan; + return float32_default_nan(status); } normalizeFloat32Subnormal( bSig, &bExp, &bSig ); } @@ -2493,7 +2493,7 @@ float32 float32_muladd(float32 a, float32 b, float32 c, int flags, if (infzero) { float_raise(float_flag_invalid, status); - return float32_default_nan; + return float32_default_nan(status); } if (flags & float_muladd_negate_c) { @@ -2514,7 +2514,7 @@ float32 float32_muladd(float32 a, float32 b, float32 c, int flags, if (pInf && (pSign ^ cSign)) { /* addition of opposite-signed infinities => InvalidOperation */ float_raise(float_flag_invalid, status); - return float32_default_nan; + return float32_default_nan(status); } /* Otherwise generate an infinity of the same sign */ return packFloat32(cSign ^ signflip, 0xff, 0); @@ -2690,12 +2690,12 @@ float32 float32_sqrt(float32 a, float_status *status) } if ( ! aSign ) return a; float_raise(float_flag_invalid, status); - return float32_default_nan; + return float32_default_nan(status); } if ( aSign ) { if ( ( aExp | aSig ) == 0 ) return a; float_raise(float_flag_invalid, status); - return float32_default_nan; + return float32_default_nan(status); } if ( aExp == 0 ) { if ( aSig == 0 ) return float32_zero; @@ -2828,7 +2828,7 @@ float32 float32_log2(float32 a, float_status *status) } if ( aSign ) { float_raise(float_flag_invalid, status); - return float32_default_nan; + return float32_default_nan(status); } if ( aExp == 0xFF ) { if (aSig) { @@ -2974,7 +2974,8 @@ int float32_eq_quiet(float32 a, float32 b, float_status *status) if ( ( ( extractFloat32Exp( a ) == 0xFF ) && extractFloat32Frac( a ) ) || ( ( extractFloat32Exp( b ) == 0xFF ) && extractFloat32Frac( b ) ) ) { - if ( float32_is_signaling_nan( a ) || float32_is_signaling_nan( b ) ) { + if (float32_is_signaling_nan(a, status) + || float32_is_signaling_nan(b, status)) { float_raise(float_flag_invalid, status); } return 0; @@ -3000,7 +3001,8 @@ int float32_le_quiet(float32 a, float32 b, float_status *status) if ( ( ( extractFloat32Exp( a ) == 0xFF ) && extractFloat32Frac( a ) ) || ( ( extractFloat32Exp( b ) == 0xFF ) && extractFloat32Frac( b ) ) ) { - if ( float32_is_signaling_nan( a ) || float32_is_signaling_nan( b ) ) { + if (float32_is_signaling_nan(a, status) + || float32_is_signaling_nan(b, status)) { float_raise(float_flag_invalid, status); } return 0; @@ -3031,7 +3033,8 @@ int float32_lt_quiet(float32 a, float32 b, float_status *status) if ( ( ( extractFloat32Exp( a ) == 0xFF ) && extractFloat32Frac( a ) ) || ( ( extractFloat32Exp( b ) == 0xFF ) && extractFloat32Frac( b ) ) ) { - if ( float32_is_signaling_nan( a ) || float32_is_signaling_nan( b ) ) { + if (float32_is_signaling_nan(a, status) + || float32_is_signaling_nan(b, status)) { float_raise(float_flag_invalid, status); } return 0; @@ -3060,7 +3063,8 @@ int float32_unordered_quiet(float32 a, float32 b, float_status *status) if ( ( ( extractFloat32Exp( a ) == 0xFF ) && extractFloat32Frac( a ) ) || ( ( extractFloat32Exp( b ) == 0xFF ) && extractFloat32Frac( b ) ) ) { - if ( float32_is_signaling_nan( a ) || float32_is_signaling_nan( b ) ) { + if (float32_is_signaling_nan(a, status) + || float32_is_signaling_nan(b, status)) { float_raise(float_flag_invalid, status); } return 1; @@ -3896,7 +3900,7 @@ static float64 subFloat64Sigs(float64 a, float64 b, flag zSign, return propagateFloat64NaN(a, b, status); } float_raise(float_flag_invalid, status); - return float64_default_nan; + return float64_default_nan(status); } if ( aExp == 0 ) { aExp = 1; @@ -4023,7 +4027,7 @@ float64 float64_mul(float64 a, float64 b, float_status *status) } if ( ( bExp | bSig ) == 0 ) { float_raise(float_flag_invalid, status); - return float64_default_nan; + return float64_default_nan(status); } return packFloat64( zSign, 0x7FF, 0 ); } @@ -4033,7 +4037,7 @@ float64 float64_mul(float64 a, float64 b, float_status *status) } if ( ( aExp | aSig ) == 0 ) { float_raise(float_flag_invalid, status); - return float64_default_nan; + return float64_default_nan(status); } return packFloat64( zSign, 0x7FF, 0 ); } @@ -4090,7 +4094,7 @@ float64 float64_div(float64 a, float64 b, float_status *status) return propagateFloat64NaN(a, b, status); } float_raise(float_flag_invalid, status); - return float64_default_nan; + return float64_default_nan(status); } return packFloat64( zSign, 0x7FF, 0 ); } @@ -4104,7 +4108,7 @@ float64 float64_div(float64 a, float64 b, float_status *status) if ( bSig == 0 ) { if ( ( aExp | aSig ) == 0 ) { float_raise(float_flag_invalid, status); - return float64_default_nan; + return float64_default_nan(status); } float_raise(float_flag_divbyzero, status); return packFloat64( zSign, 0x7FF, 0 ); @@ -4162,7 +4166,7 @@ float64 float64_rem(float64 a, float64 b, float_status *status) return propagateFloat64NaN(a, b, status); } float_raise(float_flag_invalid, status); - return float64_default_nan; + return float64_default_nan(status); } if ( bExp == 0x7FF ) { if (bSig) { @@ -4173,7 +4177,7 @@ float64 float64_rem(float64 a, float64 b, float_status *status) if ( bExp == 0 ) { if ( bSig == 0 ) { float_raise(float_flag_invalid, status); - return float64_default_nan; + return float64_default_nan(status); } normalizeFloat64Subnormal( bSig, &bExp, &bSig ); } @@ -4275,7 +4279,7 @@ float64 float64_muladd(float64 a, float64 b, float64 c, int flags, if (infzero) { float_raise(float_flag_invalid, status); - return float64_default_nan; + return float64_default_nan(status); } if (flags & float_muladd_negate_c) { @@ -4296,7 +4300,7 @@ float64 float64_muladd(float64 a, float64 b, float64 c, int flags, if (pInf && (pSign ^ cSign)) { /* addition of opposite-signed infinities => InvalidOperation */ float_raise(float_flag_invalid, status); - return float64_default_nan; + return float64_default_nan(status); } /* Otherwise generate an infinity of the same sign */ return packFloat64(cSign ^ signflip, 0x7ff, 0); @@ -4494,12 +4498,12 @@ float64 float64_sqrt(float64 a, float_status *status) } if ( ! aSign ) return a; float_raise(float_flag_invalid, status); - return float64_default_nan; + return float64_default_nan(status); } if ( aSign ) { if ( ( aExp | aSig ) == 0 ) return a; float_raise(float_flag_invalid, status); - return float64_default_nan; + return float64_default_nan(status); } if ( aExp == 0 ) { if ( aSig == 0 ) return float64_zero; @@ -4547,7 +4551,7 @@ float64 float64_log2(float64 a, float_status *status) } if ( aSign ) { float_raise(float_flag_invalid, status); - return float64_default_nan; + return float64_default_nan(status); } if ( aExp == 0x7FF ) { if (aSig) { @@ -4694,7 +4698,8 @@ int float64_eq_quiet(float64 a, float64 b, float_status *status) if ( ( ( extractFloat64Exp( a ) == 0x7FF ) && extractFloat64Frac( a ) ) || ( ( extractFloat64Exp( b ) == 0x7FF ) && extractFloat64Frac( b ) ) ) { - if ( float64_is_signaling_nan( a ) || float64_is_signaling_nan( b ) ) { + if (float64_is_signaling_nan(a, status) + || float64_is_signaling_nan(b, status)) { float_raise(float_flag_invalid, status); } return 0; @@ -4722,7 +4727,8 @@ int float64_le_quiet(float64 a, float64 b, float_status *status) if ( ( ( extractFloat64Exp( a ) == 0x7FF ) && extractFloat64Frac( a ) ) || ( ( extractFloat64Exp( b ) == 0x7FF ) && extractFloat64Frac( b ) ) ) { - if ( float64_is_signaling_nan( a ) || float64_is_signaling_nan( b ) ) { + if (float64_is_signaling_nan(a, status) + || float64_is_signaling_nan(b, status)) { float_raise(float_flag_invalid, status); } return 0; @@ -4753,7 +4759,8 @@ int float64_lt_quiet(float64 a, float64 b, float_status *status) if ( ( ( extractFloat64Exp( a ) == 0x7FF ) && extractFloat64Frac( a ) ) || ( ( extractFloat64Exp( b ) == 0x7FF ) && extractFloat64Frac( b ) ) ) { - if ( float64_is_signaling_nan( a ) || float64_is_signaling_nan( b ) ) { + if (float64_is_signaling_nan(a, status) + || float64_is_signaling_nan(b, status)) { float_raise(float_flag_invalid, status); } return 0; @@ -4782,7 +4789,8 @@ int float64_unordered_quiet(float64 a, float64 b, float_status *status) if ( ( ( extractFloat64Exp( a ) == 0x7FF ) && extractFloat64Frac( a ) ) || ( ( extractFloat64Exp( b ) == 0x7FF ) && extractFloat64Frac( b ) ) ) { - if ( float64_is_signaling_nan( a ) || float64_is_signaling_nan( b ) ) { + if (float64_is_signaling_nan(a, status) + || float64_is_signaling_nan(b, status)) { float_raise(float_flag_invalid, status); } return 1; @@ -5207,7 +5215,6 @@ static floatx80 subFloatx80Sigs(floatx80 a, floatx80 b, flag zSign, int32_t aExp, bExp, zExp; uint64_t aSig, bSig, zSig0, zSig1; int32_t expDiff; - floatx80 z; aSig = extractFloatx80Frac( a ); aExp = extractFloatx80Exp( a ); @@ -5221,9 +5228,7 @@ static floatx80 subFloatx80Sigs(floatx80 a, floatx80 b, flag zSign, return propagateFloatx80NaN(a, b, status); } float_raise(float_flag_invalid, status); - z.low = floatx80_default_nan_low; - z.high = floatx80_default_nan_high; - return z; + return floatx80_default_nan(status); } if ( aExp == 0 ) { aExp = 1; @@ -5317,7 +5322,6 @@ floatx80 floatx80_mul(floatx80 a, floatx80 b, float_status *status) flag aSign, bSign, zSign; int32_t aExp, bExp, zExp; uint64_t aSig, bSig, zSig0, zSig1; - floatx80 z; aSig = extractFloatx80Frac( a ); aExp = extractFloatx80Exp( a ); @@ -5341,9 +5345,7 @@ floatx80 floatx80_mul(floatx80 a, floatx80 b, float_status *status) if ( ( aExp | aSig ) == 0 ) { invalid: float_raise(float_flag_invalid, status); - z.low = floatx80_default_nan_low; - z.high = floatx80_default_nan_high; - return z; + return floatx80_default_nan(status); } return packFloatx80( zSign, 0x7FFF, LIT64( 0x8000000000000000 ) ); } @@ -5377,7 +5379,6 @@ floatx80 floatx80_div(floatx80 a, floatx80 b, float_status *status) int32_t aExp, bExp, zExp; uint64_t aSig, bSig, zSig0, zSig1; uint64_t rem0, rem1, rem2, term0, term1, term2; - floatx80 z; aSig = extractFloatx80Frac( a ); aExp = extractFloatx80Exp( a ); @@ -5409,9 +5410,7 @@ floatx80 floatx80_div(floatx80 a, floatx80 b, float_status *status) if ( ( aExp | aSig ) == 0 ) { invalid: float_raise(float_flag_invalid, status); - z.low = floatx80_default_nan_low; - z.high = floatx80_default_nan_high; - return z; + return floatx80_default_nan(status); } float_raise(float_flag_divbyzero, status); return packFloatx80( zSign, 0x7FFF, LIT64( 0x8000000000000000 ) ); @@ -5461,7 +5460,6 @@ floatx80 floatx80_rem(floatx80 a, floatx80 b, float_status *status) int32_t aExp, bExp, expDiff; uint64_t aSig0, aSig1, bSig; uint64_t q, term0, term1, alternateASig0, alternateASig1; - floatx80 z; aSig0 = extractFloatx80Frac( a ); aExp = extractFloatx80Exp( a ); @@ -5485,9 +5483,7 @@ floatx80 floatx80_rem(floatx80 a, floatx80 b, float_status *status) if ( bSig == 0 ) { invalid: float_raise(float_flag_invalid, status); - z.low = floatx80_default_nan_low; - z.high = floatx80_default_nan_high; - return z; + return floatx80_default_nan(status); } normalizeFloatx80Subnormal( bSig, &bExp, &bSig ); } @@ -5559,7 +5555,6 @@ floatx80 floatx80_sqrt(floatx80 a, float_status *status) int32_t aExp, zExp; uint64_t aSig0, aSig1, zSig0, zSig1, doubleZSig0; uint64_t rem0, rem1, rem2, rem3, term0, term1, term2, term3; - floatx80 z; aSig0 = extractFloatx80Frac( a ); aExp = extractFloatx80Exp( a ); @@ -5575,9 +5570,7 @@ floatx80 floatx80_sqrt(floatx80 a, float_status *status) if ( ( aExp | aSig0 ) == 0 ) return a; invalid: float_raise(float_flag_invalid, status); - z.low = floatx80_default_nan_low; - z.high = floatx80_default_nan_high; - return z; + return floatx80_default_nan(status); } if ( aExp == 0 ) { if ( aSig0 == 0 ) return packFloatx80( 0, 0, 0 ); @@ -5745,8 +5738,8 @@ int floatx80_eq_quiet(floatx80 a, floatx80 b, float_status *status) || ( ( extractFloatx80Exp( b ) == 0x7FFF ) && (uint64_t) ( extractFloatx80Frac( b )<<1 ) ) ) { - if ( floatx80_is_signaling_nan( a ) - || floatx80_is_signaling_nan( b ) ) { + if (floatx80_is_signaling_nan(a, status) + || floatx80_is_signaling_nan(b, status)) { float_raise(float_flag_invalid, status); } return 0; @@ -5776,8 +5769,8 @@ int floatx80_le_quiet(floatx80 a, floatx80 b, float_status *status) || ( ( extractFloatx80Exp( b ) == 0x7FFF ) && (uint64_t) ( extractFloatx80Frac( b )<<1 ) ) ) { - if ( floatx80_is_signaling_nan( a ) - || floatx80_is_signaling_nan( b ) ) { + if (floatx80_is_signaling_nan(a, status) + || floatx80_is_signaling_nan(b, status)) { float_raise(float_flag_invalid, status); } return 0; @@ -5812,8 +5805,8 @@ int floatx80_lt_quiet(floatx80 a, floatx80 b, float_status *status) || ( ( extractFloatx80Exp( b ) == 0x7FFF ) && (uint64_t) ( extractFloatx80Frac( b )<<1 ) ) ) { - if ( floatx80_is_signaling_nan( a ) - || floatx80_is_signaling_nan( b ) ) { + if (floatx80_is_signaling_nan(a, status) + || floatx80_is_signaling_nan(b, status)) { float_raise(float_flag_invalid, status); } return 0; @@ -5845,8 +5838,8 @@ int floatx80_unordered_quiet(floatx80 a, floatx80 b, float_status *status) || ( ( extractFloatx80Exp( b ) == 0x7FFF ) && (uint64_t) ( extractFloatx80Frac( b )<<1 ) ) ) { - if ( floatx80_is_signaling_nan( a ) - || floatx80_is_signaling_nan( b ) ) { + if (floatx80_is_signaling_nan(a, status) + || floatx80_is_signaling_nan(b, status)) { float_raise(float_flag_invalid, status); } return 1; @@ -6385,7 +6378,6 @@ static float128 subFloat128Sigs(float128 a, float128 b, flag zSign, int32_t aExp, bExp, zExp; uint64_t aSig0, aSig1, bSig0, bSig1, zSig0, zSig1; int32_t expDiff; - float128 z; aSig1 = extractFloat128Frac1( a ); aSig0 = extractFloat128Frac0( a ); @@ -6403,9 +6395,7 @@ static float128 subFloat128Sigs(float128 a, float128 b, flag zSign, return propagateFloat128NaN(a, b, status); } float_raise(float_flag_invalid, status); - z.low = float128_default_nan_low; - z.high = float128_default_nan_high; - return z; + return float128_default_nan(status); } if ( aExp == 0 ) { aExp = 1; @@ -6515,7 +6505,6 @@ float128 float128_mul(float128 a, float128 b, float_status *status) flag aSign, bSign, zSign; int32_t aExp, bExp, zExp; uint64_t aSig0, aSig1, bSig0, bSig1, zSig0, zSig1, zSig2, zSig3; - float128 z; aSig1 = extractFloat128Frac1( a ); aSig0 = extractFloat128Frac0( a ); @@ -6541,9 +6530,7 @@ float128 float128_mul(float128 a, float128 b, float_status *status) if ( ( aExp | aSig0 | aSig1 ) == 0 ) { invalid: float_raise(float_flag_invalid, status); - z.low = float128_default_nan_low; - z.high = float128_default_nan_high; - return z; + return float128_default_nan(status); } return packFloat128( zSign, 0x7FFF, 0, 0 ); } @@ -6582,7 +6569,6 @@ float128 float128_div(float128 a, float128 b, float_status *status) int32_t aExp, bExp, zExp; uint64_t aSig0, aSig1, bSig0, bSig1, zSig0, zSig1, zSig2; uint64_t rem0, rem1, rem2, rem3, term0, term1, term2, term3; - float128 z; aSig1 = extractFloat128Frac1( a ); aSig0 = extractFloat128Frac0( a ); @@ -6616,9 +6602,7 @@ float128 float128_div(float128 a, float128 b, float_status *status) if ( ( aExp | aSig0 | aSig1 ) == 0 ) { invalid: float_raise(float_flag_invalid, status); - z.low = float128_default_nan_low; - z.high = float128_default_nan_high; - return z; + return float128_default_nan(status); } float_raise(float_flag_divbyzero, status); return packFloat128( zSign, 0x7FFF, 0, 0 ); @@ -6673,7 +6657,6 @@ float128 float128_rem(float128 a, float128 b, float_status *status) uint64_t aSig0, aSig1, bSig0, bSig1, q, term0, term1, term2; uint64_t allZero, alternateASig0, alternateASig1, sigMean1; int64_t sigMean0; - float128 z; aSig1 = extractFloat128Frac1( a ); aSig0 = extractFloat128Frac0( a ); @@ -6699,9 +6682,7 @@ float128 float128_rem(float128 a, float128 b, float_status *status) if ( ( bSig0 | bSig1 ) == 0 ) { invalid: float_raise(float_flag_invalid, status); - z.low = float128_default_nan_low; - z.high = float128_default_nan_high; - return z; + return float128_default_nan(status); } normalizeFloat128Subnormal( bSig0, bSig1, &bExp, &bSig0, &bSig1 ); } @@ -6782,7 +6763,6 @@ float128 float128_sqrt(float128 a, float_status *status) int32_t aExp, zExp; uint64_t aSig0, aSig1, zSig0, zSig1, zSig2, doubleZSig0; uint64_t rem0, rem1, rem2, rem3, term0, term1, term2, term3; - float128 z; aSig1 = extractFloat128Frac1( a ); aSig0 = extractFloat128Frac0( a ); @@ -6799,9 +6779,7 @@ float128 float128_sqrt(float128 a, float_status *status) if ( ( aExp | aSig0 | aSig1 ) == 0 ) return a; invalid: float_raise(float_flag_invalid, status); - z.low = float128_default_nan_low; - z.high = float128_default_nan_high; - return z; + return float128_default_nan(status); } if ( aExp == 0 ) { if ( ( aSig0 | aSig1 ) == 0 ) return packFloat128( 0, 0, 0, 0 ); @@ -6969,8 +6947,8 @@ int float128_eq_quiet(float128 a, float128 b, float_status *status) || ( ( extractFloat128Exp( b ) == 0x7FFF ) && ( extractFloat128Frac0( b ) | extractFloat128Frac1( b ) ) ) ) { - if ( float128_is_signaling_nan( a ) - || float128_is_signaling_nan( b ) ) { + if (float128_is_signaling_nan(a, status) + || float128_is_signaling_nan(b, status)) { float_raise(float_flag_invalid, status); } return 0; @@ -7000,8 +6978,8 @@ int float128_le_quiet(float128 a, float128 b, float_status *status) || ( ( extractFloat128Exp( b ) == 0x7FFF ) && ( extractFloat128Frac0( b ) | extractFloat128Frac1( b ) ) ) ) { - if ( float128_is_signaling_nan( a ) - || float128_is_signaling_nan( b ) ) { + if (float128_is_signaling_nan(a, status) + || float128_is_signaling_nan(b, status)) { float_raise(float_flag_invalid, status); } return 0; @@ -7036,8 +7014,8 @@ int float128_lt_quiet(float128 a, float128 b, float_status *status) || ( ( extractFloat128Exp( b ) == 0x7FFF ) && ( extractFloat128Frac0( b ) | extractFloat128Frac1( b ) ) ) ) { - if ( float128_is_signaling_nan( a ) - || float128_is_signaling_nan( b ) ) { + if (float128_is_signaling_nan(a, status) + || float128_is_signaling_nan(b, status)) { float_raise(float_flag_invalid, status); } return 0; @@ -7070,8 +7048,8 @@ int float128_unordered_quiet(float128 a, float128 b, float_status *status) || ( ( extractFloat128Exp( b ) == 0x7FFF ) && ( extractFloat128Frac0( b ) | extractFloat128Frac1( b ) ) ) ) { - if ( float128_is_signaling_nan( a ) - || float128_is_signaling_nan( b ) ) { + if (float128_is_signaling_nan(a, status) + || float128_is_signaling_nan(b, status)) { float_raise(float_flag_invalid, status); } return 1; @@ -7351,8 +7329,8 @@ static inline int float ## s ## _compare_internal(float ## s a, float ## s b,\ ( ( extractFloat ## s ## Exp( b ) == nan_exp ) && \ extractFloat ## s ## Frac( b ) )) { \ if (!is_quiet || \ - float ## s ## _is_signaling_nan( a ) || \ - float ## s ## _is_signaling_nan( b ) ) { \ + float ## s ## _is_signaling_nan(a, status) || \ + float ## s ## _is_signaling_nan(b, status)) { \ float_raise(float_flag_invalid, status); \ } \ return float_relation_unordered; \ @@ -7401,8 +7379,8 @@ static inline int floatx80_compare_internal(floatx80 a, floatx80 b, ( ( extractFloatx80Exp( b ) == 0x7fff ) && ( extractFloatx80Frac( b )<<1 ) )) { if (!is_quiet || - floatx80_is_signaling_nan( a ) || - floatx80_is_signaling_nan( b ) ) { + floatx80_is_signaling_nan(a, status) || + floatx80_is_signaling_nan(b, status)) { float_raise(float_flag_invalid, status); } return float_relation_unordered; @@ -7447,8 +7425,8 @@ static inline int float128_compare_internal(float128 a, float128 b, ( ( extractFloat128Exp( b ) == 0x7fff ) && ( extractFloat128Frac0( b ) | extractFloat128Frac1( b ) ) )) { if (!is_quiet || - float128_is_signaling_nan( a ) || - float128_is_signaling_nan( b ) ) { + float128_is_signaling_nan(a, status) || + float128_is_signaling_nan(b, status)) { float_raise(float_flag_invalid, status); } return float_relation_unordered; @@ -7508,11 +7486,11 @@ static inline float ## s float ## s ## _minmax(float ## s a, float ## s b, \ if (float ## s ## _is_any_nan(a) || \ float ## s ## _is_any_nan(b)) { \ if (isieee) { \ - if (float ## s ## _is_quiet_nan(a) && \ + if (float ## s ## _is_quiet_nan(a, status) && \ !float ## s ##_is_any_nan(b)) { \ return b; \ - } else if (float ## s ## _is_quiet_nan(b) && \ - !float ## s ## _is_any_nan(a)) { \ + } else if (float ## s ## _is_quiet_nan(b, status) && \ + !float ## s ## _is_any_nan(a)) { \ return a; \ } \ } \ @@ -2457,17 +2457,17 @@ void hmp_hotpluggable_cpus(Monitor *mon, const QDict *qdict) c = l->value->props; monitor_printf(mon, " CPUInstance Properties:\n"); - if (c->has_node) { - monitor_printf(mon, " node: \"%" PRIu64 "\"\n", c->node); + if (c->has_node_id) { + monitor_printf(mon, " node-id: \"%" PRIu64 "\"\n", c->node_id); } - if (c->has_socket) { - monitor_printf(mon, " socket: \"%" PRIu64 "\"\n", c->socket); + if (c->has_socket_id) { + monitor_printf(mon, " socket-id: \"%" PRIu64 "\"\n", c->socket_id); } - if (c->has_core) { - monitor_printf(mon, " core: \"%" PRIu64 "\"\n", c->core); + if (c->has_core_id) { + monitor_printf(mon, " core-id: \"%" PRIu64 "\"\n", c->core_id); } - if (c->has_thread) { - monitor_printf(mon, " thread: \"%" PRIu64 "\"\n", c->thread); + if (c->has_thread_id) { + monitor_printf(mon, " thread-id: \"%" PRIu64 "\"\n", c->thread_id); } l = l->next; diff --git a/hw/arm/ast2400.c b/hw/arm/ast2400.c index 4a9de0e10c..b14a82fcde 100644 --- a/hw/arm/ast2400.c +++ b/hw/arm/ast2400.c @@ -24,9 +24,12 @@ #define AST2400_IOMEM_SIZE 0x00200000 #define AST2400_IOMEM_BASE 0x1E600000 #define AST2400_VIC_BASE 0x1E6C0000 +#define AST2400_SCU_BASE 0x1E6E2000 #define AST2400_TIMER_BASE 0x1E782000 #define AST2400_I2C_BASE 0x1E78A000 +#define AST2400_A0_SILICON_REV 0x02000303 + static const int uart_irqs[] = { 9, 32, 33, 34, 10 }; static const int timer_irqs[] = { 16, 17, 18, 35, 36, 37, 38, 39, }; @@ -72,6 +75,16 @@ static void ast2400_init(Object *obj) object_initialize(&s->i2c, sizeof(s->i2c), TYPE_ASPEED_I2C); object_property_add_child(obj, "i2c", OBJECT(&s->i2c), NULL); qdev_set_parent_bus(DEVICE(&s->i2c), sysbus_get_default()); + + object_initialize(&s->scu, sizeof(s->scu), TYPE_ASPEED_SCU); + object_property_add_child(obj, "scu", OBJECT(&s->scu), NULL); + qdev_set_parent_bus(DEVICE(&s->scu), sysbus_get_default()); + qdev_prop_set_uint32(DEVICE(&s->scu), "silicon-rev", + AST2400_A0_SILICON_REV); + object_property_add_alias(obj, "hw-strap1", OBJECT(&s->scu), + "hw-strap1", &error_abort); + object_property_add_alias(obj, "hw-strap2", OBJECT(&s->scu), + "hw-strap2", &error_abort); } static void ast2400_realize(DeviceState *dev, Error **errp) @@ -110,6 +123,14 @@ static void ast2400_realize(DeviceState *dev, Error **errp) sysbus_connect_irq(SYS_BUS_DEVICE(&s->timerctrl), i, irq); } + /* SCU */ + object_property_set_bool(OBJECT(&s->scu), true, "realized", &err); + if (err) { + error_propagate(errp, err); + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->scu), 0, AST2400_SCU_BASE); + /* UART - attach an 8250 to the IO space as our UART5 */ if (serial_hds[0]) { qemu_irq uart5 = qdev_get_gpio_in(DEVICE(&s->vic), uart_irqs[4]); diff --git a/hw/arm/palmetto-bmc.c b/hw/arm/palmetto-bmc.c index a51d960510..b8eed21348 100644 --- a/hw/arm/palmetto-bmc.c +++ b/hw/arm/palmetto-bmc.c @@ -44,6 +44,8 @@ static void palmetto_bmc_init(MachineState *machine) &bmc->ram); object_property_add_const_link(OBJECT(&bmc->soc), "ram", OBJECT(&bmc->ram), &error_abort); + object_property_set_int(OBJECT(&bmc->soc), 0x120CE416, "hw-strap1", + &error_abort); object_property_set_bool(OBJECT(&bmc->soc), true, "realized", &error_abort); diff --git a/hw/block/dataplane/virtio-blk.c b/hw/block/dataplane/virtio-blk.c index 2073f9a270..54b9ac1da6 100644 --- a/hw/block/dataplane/virtio-blk.c +++ b/hw/block/dataplane/virtio-blk.c @@ -31,11 +31,9 @@ struct VirtIOBlockDataPlane { bool stopping; VirtIOBlkConf *conf; - VirtIODevice *vdev; - VirtQueue *vq; /* virtqueue vring */ - EventNotifier *guest_notifier; /* irq */ QEMUBH *bh; /* bh for guest notification */ + unsigned long *batch_notify_vqs; /* Note that these EventNotifiers are assigned by value. This is * fine as long as you do not call event_notifier_cleanup on them @@ -47,20 +45,36 @@ struct VirtIOBlockDataPlane { }; /* Raise an interrupt to signal guest, if necessary */ -void virtio_blk_data_plane_notify(VirtIOBlockDataPlane *s) +void virtio_blk_data_plane_notify(VirtIOBlockDataPlane *s, VirtQueue *vq) { + set_bit(virtio_get_queue_index(vq), s->batch_notify_vqs); qemu_bh_schedule(s->bh); } static void notify_guest_bh(void *opaque) { VirtIOBlockDataPlane *s = opaque; + unsigned nvqs = s->conf->num_queues; + unsigned long bitmap[BITS_TO_LONGS(nvqs)]; + unsigned j; - if (!virtio_should_notify(s->vdev, s->vq)) { - return; - } + memcpy(bitmap, s->batch_notify_vqs, sizeof(bitmap)); + memset(s->batch_notify_vqs, 0, sizeof(bitmap)); + + for (j = 0; j < nvqs; j += BITS_PER_LONG) { + unsigned long bits = bitmap[j]; - event_notifier_set(s->guest_notifier); + while (bits != 0) { + unsigned i = j + ctzl(bits); + VirtQueue *vq = virtio_get_queue(s->vdev, i); + + if (virtio_should_notify(s->vdev, vq)) { + event_notifier_set(virtio_queue_get_guest_notifier(vq)); + } + + bits &= bits - 1; /* clear right-most bit */ + } + } } /* Context: QEMU global mutex held */ @@ -79,7 +93,7 @@ void virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *conf, } /* Don't try if transport does not support notifiers. */ - if (!k->set_guest_notifiers || !k->set_host_notifier) { + if (!k->set_guest_notifiers || !k->ioeventfd_started) { error_setg(errp, "device is incompatible with dataplane " "(transport does not support notifiers)"); @@ -104,6 +118,7 @@ void virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *conf, } s->ctx = iothread_get_aio_context(s->iothread); s->bh = aio_bh_new(s->ctx, notify_guest_bh, s); + s->batch_notify_vqs = bitmap_new(conf->num_queues); *dataplane = s; } @@ -116,6 +131,7 @@ void virtio_blk_data_plane_destroy(VirtIOBlockDataPlane *s) } virtio_blk_data_plane_stop(s); + g_free(s->batch_notify_vqs); qemu_bh_delete(s->bh); object_unref(OBJECT(s->iothread)); g_free(s); @@ -138,6 +154,8 @@ void virtio_blk_data_plane_start(VirtIOBlockDataPlane *s) BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(s->vdev))); VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); VirtIOBlock *vblk = VIRTIO_BLK(s->vdev); + unsigned i; + unsigned nvqs = s->conf->num_queues; int r; if (vblk->dataplane_started || s->starting) { @@ -145,22 +163,25 @@ void virtio_blk_data_plane_start(VirtIOBlockDataPlane *s) } s->starting = true; - s->vq = virtio_get_queue(s->vdev, 0); /* Set up guest notifier (irq) */ - r = k->set_guest_notifiers(qbus->parent, 1, true); + r = k->set_guest_notifiers(qbus->parent, nvqs, true); if (r != 0) { fprintf(stderr, "virtio-blk failed to set guest notifier (%d), " "ensure -enable-kvm is set\n", r); goto fail_guest_notifiers; } - s->guest_notifier = virtio_queue_get_guest_notifier(s->vq); /* Set up virtqueue notify */ - r = k->set_host_notifier(qbus->parent, 0, true); - if (r != 0) { - fprintf(stderr, "virtio-blk failed to set host notifier (%d)\n", r); - goto fail_host_notifier; + for (i = 0; i < nvqs; i++) { + r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, true); + if (r != 0) { + fprintf(stderr, "virtio-blk failed to set host notifier (%d)\n", r); + while (i--) { + virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, false); + } + goto fail_guest_notifiers; + } } s->starting = false; @@ -170,17 +191,23 @@ void virtio_blk_data_plane_start(VirtIOBlockDataPlane *s) blk_set_aio_context(s->conf->conf.blk, s->ctx); /* Kick right away to begin processing requests already in vring */ - event_notifier_set(virtio_queue_get_host_notifier(s->vq)); + for (i = 0; i < nvqs; i++) { + VirtQueue *vq = virtio_get_queue(s->vdev, i); + + event_notifier_set(virtio_queue_get_host_notifier(vq)); + } /* Get this show started by hooking up our callbacks */ aio_context_acquire(s->ctx); - virtio_queue_aio_set_host_notifier_handler(s->vq, s->ctx, - virtio_blk_data_plane_handle_output); + for (i = 0; i < nvqs; i++) { + VirtQueue *vq = virtio_get_queue(s->vdev, i); + + virtio_queue_aio_set_host_notifier_handler(vq, s->ctx, + virtio_blk_data_plane_handle_output); + } aio_context_release(s->ctx); return; - fail_host_notifier: - k->set_guest_notifiers(qbus->parent, 1, false); fail_guest_notifiers: vblk->dataplane_disabled = true; s->starting = false; @@ -193,6 +220,8 @@ void virtio_blk_data_plane_stop(VirtIOBlockDataPlane *s) BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(s->vdev))); VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); VirtIOBlock *vblk = VIRTIO_BLK(s->vdev); + unsigned i; + unsigned nvqs = s->conf->num_queues; if (!vblk->dataplane_started || s->stopping) { return; @@ -210,17 +239,23 @@ void virtio_blk_data_plane_stop(VirtIOBlockDataPlane *s) aio_context_acquire(s->ctx); /* Stop notifications for new requests from guest */ - virtio_queue_aio_set_host_notifier_handler(s->vq, s->ctx, NULL); + for (i = 0; i < nvqs; i++) { + VirtQueue *vq = virtio_get_queue(s->vdev, i); + + virtio_queue_aio_set_host_notifier_handler(vq, s->ctx, NULL); + } /* Drain and switch bs back to the QEMU main loop */ blk_set_aio_context(s->conf->conf.blk, qemu_get_aio_context()); aio_context_release(s->ctx); - k->set_host_notifier(qbus->parent, 0, false); + for (i = 0; i < nvqs; i++) { + virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, false); + } /* Clean up guest notifier (irq) */ - k->set_guest_notifiers(qbus->parent, 1, false); + k->set_guest_notifiers(qbus->parent, nvqs, false); vblk->dataplane_started = false; s->stopping = false; diff --git a/hw/block/dataplane/virtio-blk.h b/hw/block/dataplane/virtio-blk.h index 0714c11a2b..b1f0b95b32 100644 --- a/hw/block/dataplane/virtio-blk.h +++ b/hw/block/dataplane/virtio-blk.h @@ -26,6 +26,6 @@ void virtio_blk_data_plane_destroy(VirtIOBlockDataPlane *s); void virtio_blk_data_plane_start(VirtIOBlockDataPlane *s); void virtio_blk_data_plane_stop(VirtIOBlockDataPlane *s); void virtio_blk_data_plane_drain(VirtIOBlockDataPlane *s); -void virtio_blk_data_plane_notify(VirtIOBlockDataPlane *s); +void virtio_blk_data_plane_notify(VirtIOBlockDataPlane *s, VirtQueue *vq); #endif /* HW_DATAPLANE_VIRTIO_BLK_H */ diff --git a/hw/block/m25p80.c b/hw/block/m25p80.c index 51d8596056..326b688e83 100644 --- a/hw/block/m25p80.c +++ b/hw/block/m25p80.c @@ -53,12 +53,17 @@ /* 16 MiB max in 3 byte address mode */ #define MAX_3BYTES_SIZE 0x1000000 +#define SPI_NOR_MAX_ID_LEN 6 + typedef struct FlashPartInfo { const char *part_name; - /* jedec code. (jedec >> 16) & 0xff is the 1st byte, >> 8 the 2nd etc */ - uint32_t jedec; - /* extended jedec code */ - uint16_t ext_jedec; + /* + * This array stores the ID bytes. + * The first three bytes are the JEDIC ID. + * JEDEC ID zero means "no ID" (mostly older chips). + */ + uint8_t id[SPI_NOR_MAX_ID_LEN]; + uint8_t id_len; /* there is confusion between manufacturers as to what a sector is. In this * device model, a "sector" is the size that is erased by the ERASE_SECTOR * command (opcode 0xd8). @@ -70,11 +75,33 @@ typedef struct FlashPartInfo { } FlashPartInfo; /* adapted from linux */ - -#define INFO(_part_name, _jedec, _ext_jedec, _sector_size, _n_sectors, _flags)\ - .part_name = (_part_name),\ - .jedec = (_jedec),\ - .ext_jedec = (_ext_jedec),\ +/* Used when the "_ext_id" is two bytes at most */ +#define INFO(_part_name, _jedec_id, _ext_id, _sector_size, _n_sectors, _flags)\ + .part_name = _part_name,\ + .id = {\ + ((_jedec_id) >> 16) & 0xff,\ + ((_jedec_id) >> 8) & 0xff,\ + (_jedec_id) & 0xff,\ + ((_ext_id) >> 8) & 0xff,\ + (_ext_id) & 0xff,\ + },\ + .id_len = (!(_jedec_id) ? 0 : (3 + ((_ext_id) ? 2 : 0))),\ + .sector_size = (_sector_size),\ + .n_sectors = (_n_sectors),\ + .page_size = 256,\ + .flags = (_flags), + +#define INFO6(_part_name, _jedec_id, _ext_id, _sector_size, _n_sectors, _flags)\ + .part_name = _part_name,\ + .id = {\ + ((_jedec_id) >> 16) & 0xff,\ + ((_jedec_id) >> 8) & 0xff,\ + (_jedec_id) & 0xff,\ + ((_ext_id) >> 16) & 0xff,\ + ((_ext_id) >> 8) & 0xff,\ + (_ext_id) & 0xff,\ + },\ + .id_len = 6,\ .sector_size = (_sector_size),\ .n_sectors = (_n_sectors),\ .page_size = 256,\ @@ -102,12 +129,26 @@ typedef struct FlashPartInfo { #define EVCFG_QUAD_IO_ENABLED (1 << 7) #define NVCFG_4BYTE_ADDR_MASK (1 << 0) #define NVCFG_LOWER_SEGMENT_MASK (1 << 1) -#define CFG_UPPER_128MB_SEG_ENABLED 0x3 /* Numonyx (Micron) Flag Status Register macros */ #define FSR_4BYTE_ADDR_MODE_ENABLED 0x1 #define FSR_FLASH_READY (1 << 7) +/* Spansion configuration registers macros. */ +#define SPANSION_QUAD_CFG_POS 0 +#define SPANSION_QUAD_CFG_LEN 1 +#define SPANSION_DUMMY_CLK_POS 0 +#define SPANSION_DUMMY_CLK_LEN 4 +#define SPANSION_ADDR_LEN_POS 7 +#define SPANSION_ADDR_LEN_LEN 1 + +/* + * Spansion read mode command length in bytes, + * the mode is currently not supported. +*/ + +#define SPANSION_CONTINUOUS_READ_MODE_CMD_LEN 1 + static const FlashPartInfo known_devices[] = { /* Atmel -- some are (confusingly) marketed as "DataFlash" */ { INFO("at25fs010", 0x1f6601, 0, 32 << 10, 4, ER_4K) }, @@ -158,6 +199,8 @@ static const FlashPartInfo known_devices[] = { { INFO("mx25l12855e", 0xc22618, 0, 64 << 10, 256, 0) }, { INFO("mx25l25635e", 0xc22019, 0, 64 << 10, 512, 0) }, { INFO("mx25l25655e", 0xc22619, 0, 64 << 10, 512, 0) }, + { INFO("mx66u51235f", 0xc2253a, 0, 64 << 10, 1024, ER_4K | ER_32K) }, + { INFO("mx66u1g45g", 0xc2253b, 0, 64 << 10, 2048, ER_4K | ER_32K) }, /* Micron */ { INFO("n25q032a11", 0x20bb16, 0, 64 << 10, 64, ER_4K) }, @@ -168,6 +211,11 @@ static const FlashPartInfo known_devices[] = { { INFO("n25q128a13", 0x20ba18, 0, 64 << 10, 256, ER_4K) }, { INFO("n25q256a11", 0x20bb19, 0, 64 << 10, 512, ER_4K) }, { INFO("n25q256a13", 0x20ba19, 0, 64 << 10, 512, ER_4K) }, + { INFO("n25q128", 0x20ba18, 0, 64 << 10, 256, 0) }, + { INFO("n25q256a", 0x20ba19, 0, 64 << 10, 512, ER_4K) }, + { INFO("n25q512a", 0x20ba20, 0, 64 << 10, 1024, ER_4K) }, + { INFO("mt25ql01g", 0x20ba21, 0, 64 << 10, 2048, ER_4K) }, + { INFO("mt25qu01g", 0x20bb21, 0, 64 << 10, 2048, ER_4K) }, /* Spansion -- single (large) sector size only, at least * for the chips listed here (without boot sectors). @@ -176,8 +224,8 @@ static const FlashPartInfo known_devices[] = { { INFO("s25sl064p", 0x010216, 0x4d00, 64 << 10, 128, ER_4K) }, { INFO("s25fl256s0", 0x010219, 0x4d00, 256 << 10, 128, 0) }, { INFO("s25fl256s1", 0x010219, 0x4d01, 64 << 10, 512, 0) }, - { INFO("s25fl512s", 0x010220, 0x4d00, 256 << 10, 256, 0) }, - { INFO("s70fl01gs", 0x010221, 0x4d00, 256 << 10, 256, 0) }, + { INFO6("s25fl512s", 0x010220, 0x4d0080, 256 << 10, 256, 0) }, + { INFO6("s70fl01gs", 0x010221, 0x4d0080, 256 << 10, 512, 0) }, { INFO("s25sl12800", 0x012018, 0x0300, 256 << 10, 64, 0) }, { INFO("s25sl12801", 0x012018, 0x0301, 64 << 10, 256, 0) }, { INFO("s25fl129p0", 0x012018, 0x4d00, 256 << 10, 64, 0) }, @@ -190,6 +238,10 @@ static const FlashPartInfo known_devices[] = { { INFO("s25fl016k", 0xef4015, 0, 64 << 10, 32, ER_4K | ER_32K) }, { INFO("s25fl064k", 0xef4017, 0, 64 << 10, 128, ER_4K | ER_32K) }, + /* Spansion -- boot sectors support */ + { INFO6("s25fs512s", 0x010220, 0x4d0081, 256 << 10, 256, 0) }, + { INFO6("s70fs01gs", 0x010221, 0x4d0081, 256 << 10, 512, 0) }, + /* SST -- large erase sizes are "overlays", "sectors" are 4<< 10 */ { INFO("sst25vf040b", 0xbf258d, 0, 64 << 10, 8, ER_4K) }, { INFO("sst25vf080b", 0xbf258e, 0, 64 << 10, 16, ER_4K) }, @@ -240,10 +292,6 @@ static const FlashPartInfo known_devices[] = { { INFO("w25q80", 0xef5014, 0, 64 << 10, 16, ER_4K) }, { INFO("w25q80bl", 0xef4014, 0, 64 << 10, 16, ER_4K) }, { INFO("w25q256", 0xef4019, 0, 64 << 10, 512, ER_4K) }, - - { INFO("n25q128", 0x20ba18, 0, 64 << 10, 256, 0) }, - { INFO("n25q256a", 0x20ba19, 0, 64 << 10, 512, ER_4K) }, - { INFO("n25q512a", 0x20ba20, 0, 64 << 10, 1024, ER_4K) }, }; typedef enum { @@ -255,6 +303,7 @@ typedef enum { JEDEC_READ = 0x9f, BULK_ERASE = 0xc7, READ_FSR = 0x70, + RDCR = 0x15, READ = 0x03, READ4 = 0x13, @@ -271,12 +320,14 @@ typedef enum { PP = 0x02, PP4 = 0x12, + PP4_4 = 0x3e, DPP = 0xa2, QPP = 0x32, ERASE_4K = 0x20, ERASE4_4K = 0x21, ERASE_32K = 0x52, + ERASE4_32K = 0x5c, ERASE_SECTOR = 0xd8, ERASE4_SECTOR = 0xdc, @@ -289,6 +340,13 @@ typedef enum { RESET_ENABLE = 0x66, RESET_MEMORY = 0x99, + /* + * Micron: 0x35 - enable QPI + * Spansion: 0x35 - read control register + */ + RDCR_EQIO = 0x35, + RSTQIO = 0xf5, + RNVCR = 0xB5, WNVCR = 0xB1, @@ -304,9 +362,18 @@ typedef enum { STATE_PAGE_PROGRAM, STATE_READ, STATE_COLLECTING_DATA, + STATE_COLLECTING_VAR_LEN_DATA, STATE_READING_DATA, } CMDState; +typedef enum { + MAN_SPANSION, + MAN_MACRONIX, + MAN_NUMONYX, + MAN_WINBOND, + MAN_GENERIC, +} Manufacturer; + typedef struct Flash { SSISlave parent_obj; @@ -324,11 +391,22 @@ typedef struct Flash { uint8_t cmd_in_progress; uint64_t cur_addr; uint32_t nonvolatile_cfg; + /* Configuration register for Macronix */ uint32_t volatile_cfg; uint32_t enh_volatile_cfg; + /* Spansion cfg registers. */ + uint8_t spansion_cr1nv; + uint8_t spansion_cr2nv; + uint8_t spansion_cr3nv; + uint8_t spansion_cr4nv; + uint8_t spansion_cr1v; + uint8_t spansion_cr2v; + uint8_t spansion_cr3v; + uint8_t spansion_cr4v; bool write_enable; bool four_bytes_address_mode; bool reset_enable; + bool quad_enable; uint8_t ear; int64_t dirty_page; @@ -350,6 +428,22 @@ typedef struct M25P80Class { #define M25P80_GET_CLASS(obj) \ OBJECT_GET_CLASS(M25P80Class, (obj), TYPE_M25P80) +static inline Manufacturer get_man(Flash *s) +{ + switch (s->pi->id[0]) { + case 0x20: + return MAN_NUMONYX; + case 0xEF: + return MAN_WINBOND; + case 0x01: + return MAN_SPANSION; + case 0xC2: + return MAN_MACRONIX; + default: + return MAN_GENERIC; + } +} + static void blk_sync_complete(void *opaque, int ret) { /* do nothing. Masters do not directly interact with the backing store, @@ -398,6 +492,7 @@ static void flash_erase(Flash *s, int offset, FlashCMD cmd) capa_to_assert = ER_4K; break; case ERASE_32K: + case ERASE4_32K: len = 32 << 10; capa_to_assert = ER_32K; break; @@ -468,9 +563,11 @@ static inline int get_addr_length(Flash *s) switch (s->cmd_in_progress) { case PP4: + case PP4_4: case READ4: case QIOR4: case ERASE4_4K: + case ERASE4_32K: case ERASE4_SECTOR: case FAST_READ4: case DOR4: @@ -494,7 +591,7 @@ static void complete_collecting_data(Flash *s) } if (get_addr_length(s) == 3) { - s->cur_addr += (s->ear & 0x3) * MAX_3BYTES_SIZE; + s->cur_addr += s->ear * MAX_3BYTES_SIZE; } s->state = STATE_IDLE; @@ -504,6 +601,7 @@ static void complete_collecting_data(Flash *s) case QPP: case PP: case PP4: + case PP4_4: s->state = STATE_PAGE_PROGRAM; break; case READ: @@ -523,11 +621,25 @@ static void complete_collecting_data(Flash *s) case ERASE_4K: case ERASE4_4K: case ERASE_32K: + case ERASE4_32K: case ERASE_SECTOR: case ERASE4_SECTOR: flash_erase(s, s->cur_addr, s->cmd_in_progress); break; case WRSR: + switch (get_man(s)) { + case MAN_SPANSION: + s->quad_enable = !!(s->data[1] & 0x02); + break; + case MAN_MACRONIX: + s->quad_enable = extract32(s->data[0], 6, 1); + if (s->len > 1) { + s->four_bytes_address_mode = extract32(s->data[1], 5, 1); + } + break; + default: + break; + } if (s->write_enable) { s->write_enable = false; } @@ -561,8 +673,10 @@ static void reset_memory(Flash *s) s->state = STATE_IDLE; s->write_enable = false; s->reset_enable = false; + s->quad_enable = false; - if (((s->pi->jedec >> 16) & 0xFF) == JEDEC_NUMONYX) { + switch (get_man(s)) { + case MAN_NUMONYX: s->volatile_cfg = 0; s->volatile_cfg |= VCFG_DUMMY; s->volatile_cfg |= VCFG_WRAP_SEQUENTIAL; @@ -592,16 +706,147 @@ static void reset_memory(Flash *s) s->four_bytes_address_mode = true; } if (!(s->nonvolatile_cfg & NVCFG_LOWER_SEGMENT_MASK)) { - s->ear = CFG_UPPER_128MB_SEG_ENABLED; + s->ear = s->size / MAX_3BYTES_SIZE - 1; } + break; + case MAN_MACRONIX: + s->volatile_cfg = 0x7; + break; + case MAN_SPANSION: + s->spansion_cr1v = s->spansion_cr1nv; + s->spansion_cr2v = s->spansion_cr2nv; + s->spansion_cr3v = s->spansion_cr3nv; + s->spansion_cr4v = s->spansion_cr4nv; + s->quad_enable = extract32(s->spansion_cr1v, + SPANSION_QUAD_CFG_POS, + SPANSION_QUAD_CFG_LEN + ); + s->four_bytes_address_mode = extract32(s->spansion_cr2v, + SPANSION_ADDR_LEN_POS, + SPANSION_ADDR_LEN_LEN + ); + break; + default: + break; } DB_PRINT_L(0, "Reset done.\n"); } +static void decode_fast_read_cmd(Flash *s) +{ + s->needed_bytes = get_addr_length(s); + switch (get_man(s)) { + /* Dummy cycles - modeled with bytes writes instead of bits */ + case MAN_WINBOND: + s->needed_bytes += 8; + break; + case MAN_NUMONYX: + s->needed_bytes += extract32(s->volatile_cfg, 4, 4); + break; + case MAN_MACRONIX: + if (extract32(s->volatile_cfg, 6, 2) == 1) { + s->needed_bytes += 6; + } else { + s->needed_bytes += 8; + } + break; + case MAN_SPANSION: + s->needed_bytes += extract32(s->spansion_cr2v, + SPANSION_DUMMY_CLK_POS, + SPANSION_DUMMY_CLK_LEN + ); + break; + default: + break; + } + s->pos = 0; + s->len = 0; + s->state = STATE_COLLECTING_DATA; +} + +static void decode_dio_read_cmd(Flash *s) +{ + s->needed_bytes = get_addr_length(s); + /* Dummy cycles modeled with bytes writes instead of bits */ + switch (get_man(s)) { + case MAN_WINBOND: + s->needed_bytes += 8; + break; + case MAN_SPANSION: + s->needed_bytes += SPANSION_CONTINUOUS_READ_MODE_CMD_LEN; + s->needed_bytes += extract32(s->spansion_cr2v, + SPANSION_DUMMY_CLK_POS, + SPANSION_DUMMY_CLK_LEN + ); + break; + case MAN_NUMONYX: + s->needed_bytes += extract32(s->volatile_cfg, 4, 4); + break; + case MAN_MACRONIX: + switch (extract32(s->volatile_cfg, 6, 2)) { + case 1: + s->needed_bytes += 6; + break; + case 2: + s->needed_bytes += 8; + break; + default: + s->needed_bytes += 4; + break; + } + break; + default: + break; + } + s->pos = 0; + s->len = 0; + s->state = STATE_COLLECTING_DATA; +} + +static void decode_qio_read_cmd(Flash *s) +{ + s->needed_bytes = get_addr_length(s); + /* Dummy cycles modeled with bytes writes instead of bits */ + switch (get_man(s)) { + case MAN_WINBOND: + s->needed_bytes += 8; + break; + case MAN_SPANSION: + s->needed_bytes += SPANSION_CONTINUOUS_READ_MODE_CMD_LEN; + s->needed_bytes += extract32(s->spansion_cr2v, + SPANSION_DUMMY_CLK_POS, + SPANSION_DUMMY_CLK_LEN + ); + break; + case MAN_NUMONYX: + s->needed_bytes += extract32(s->volatile_cfg, 4, 4); + break; + case MAN_MACRONIX: + switch (extract32(s->volatile_cfg, 6, 2)) { + case 1: + s->needed_bytes += 4; + break; + case 2: + s->needed_bytes += 8; + break; + default: + s->needed_bytes += 6; + break; + } + break; + default: + break; + } + s->pos = 0; + s->len = 0; + s->state = STATE_COLLECTING_DATA; +} + static void decode_new_cmd(Flash *s, uint32_t value) { s->cmd_in_progress = value; + int i; DB_PRINT_L(0, "decoded new command:%x\n", value); if (value != RESET_MEMORY) { @@ -613,6 +858,7 @@ static void decode_new_cmd(Flash *s, uint32_t value) case ERASE_4K: case ERASE4_4K: case ERASE_32K: + case ERASE4_32K: case ERASE_SECTOR: case ERASE4_SECTOR: case READ: @@ -621,6 +867,7 @@ static void decode_new_cmd(Flash *s, uint32_t value) case QPP: case PP: case PP4: + case PP4_4: s->needed_bytes = get_addr_length(s); s->pos = 0; s->len = 0; @@ -633,56 +880,35 @@ static void decode_new_cmd(Flash *s, uint32_t value) case DOR4: case QOR: case QOR4: - s->needed_bytes = get_addr_length(s); - if (((s->pi->jedec >> 16) & 0xFF) == JEDEC_NUMONYX) { - /* Dummy cycles modeled with bytes writes instead of bits */ - s->needed_bytes += extract32(s->volatile_cfg, 4, 4); - } - s->pos = 0; - s->len = 0; - s->state = STATE_COLLECTING_DATA; + decode_fast_read_cmd(s); break; case DIOR: case DIOR4: - switch ((s->pi->jedec >> 16) & 0xFF) { - case JEDEC_WINBOND: - case JEDEC_SPANSION: - s->needed_bytes = 4; - break; - default: - s->needed_bytes = get_addr_length(s); - /* Dummy cycles modeled with bytes writes instead of bits */ - s->needed_bytes += extract32(s->volatile_cfg, 4, 4); - } - s->pos = 0; - s->len = 0; - s->state = STATE_COLLECTING_DATA; + decode_dio_read_cmd(s); break; case QIOR: case QIOR4: - switch ((s->pi->jedec >> 16) & 0xFF) { - case JEDEC_WINBOND: - case JEDEC_SPANSION: - s->needed_bytes = 6; - break; - default: - s->needed_bytes = get_addr_length(s); - /* Dummy cycles modeled with bytes writes instead of bits */ - s->needed_bytes += extract32(s->volatile_cfg, 4, 4); - } - s->pos = 0; - s->len = 0; - s->state = STATE_COLLECTING_DATA; + decode_qio_read_cmd(s); break; case WRSR: if (s->write_enable) { - s->needed_bytes = 1; + switch (get_man(s)) { + case MAN_SPANSION: + s->needed_bytes = 2; + s->state = STATE_COLLECTING_DATA; + break; + case MAN_MACRONIX: + s->needed_bytes = 2; + s->state = STATE_COLLECTING_VAR_LEN_DATA; + break; + default: + s->needed_bytes = 1; + s->state = STATE_COLLECTING_DATA; + } s->pos = 0; - s->len = 0; - s->state = STATE_COLLECTING_DATA; } break; @@ -695,6 +921,9 @@ static void decode_new_cmd(Flash *s, uint32_t value) case RDSR: s->data[0] = (!!s->write_enable) << 1; + if (get_man(s) == MAN_MACRONIX) { + s->data[0] |= (!!s->quad_enable) << 6; + } s->pos = 0; s->len = 1; s->state = STATE_READING_DATA; @@ -712,17 +941,20 @@ static void decode_new_cmd(Flash *s, uint32_t value) case JEDEC_READ: DB_PRINT_L(0, "populated jedec code\n"); - s->data[0] = (s->pi->jedec >> 16) & 0xff; - s->data[1] = (s->pi->jedec >> 8) & 0xff; - s->data[2] = s->pi->jedec & 0xff; - if (s->pi->ext_jedec) { - s->data[3] = (s->pi->ext_jedec >> 8) & 0xff; - s->data[4] = s->pi->ext_jedec & 0xff; - s->len = 5; - } else { - s->len = 3; + for (i = 0; i < s->pi->id_len; i++) { + s->data[i] = s->pi->id[i]; } + + s->len = s->pi->id_len; + s->pos = 0; + s->state = STATE_READING_DATA; + break; + + case RDCR: + s->data[0] = s->volatile_cfg & 0xFF; + s->data[0] |= (!!s->four_bytes_address_mode) << 5; s->pos = 0; + s->len = 1; s->state = STATE_READING_DATA; break; @@ -765,7 +997,7 @@ static void decode_new_cmd(Flash *s, uint32_t value) s->state = STATE_READING_DATA; break; case WNVCR: - if (s->write_enable) { + if (s->write_enable && get_man(s) == MAN_NUMONYX) { s->needed_bytes = 2; s->pos = 0; s->len = 0; @@ -808,6 +1040,24 @@ static void decode_new_cmd(Flash *s, uint32_t value) reset_memory(s); } break; + case RDCR_EQIO: + switch (get_man(s)) { + case MAN_SPANSION: + s->data[0] = (!!s->quad_enable) << 1; + s->pos = 0; + s->len = 1; + s->state = STATE_READING_DATA; + break; + case MAN_MACRONIX: + s->quad_enable = true; + break; + default: + break; + } + break; + case RSTQIO: + s->quad_enable = false; + break; default: qemu_log_mask(LOG_GUEST_ERROR, "M25P80: Unknown cmd %x\n", value); break; @@ -819,6 +1069,9 @@ static int m25p80_cs(SSISlave *ss, bool select) Flash *s = M25P80(ss); if (select) { + if (s->state == STATE_COLLECTING_VAR_LEN_DATA) { + complete_collecting_data(s); + } s->len = 0; s->pos = 0; s->state = STATE_IDLE; @@ -852,6 +1105,7 @@ static uint32_t m25p80_transfer8(SSISlave *ss, uint32_t tx) break; case STATE_COLLECTING_DATA: + case STATE_COLLECTING_VAR_LEN_DATA: s->data[s->len] = (uint8_t)tx; s->len++; @@ -926,13 +1180,18 @@ static void m25p80_pre_save(void *opaque) } static Property m25p80_properties[] = { + /* This is default value for Micron flash */ DEFINE_PROP_UINT32("nonvolatile-cfg", Flash, nonvolatile_cfg, 0x8FFF), + DEFINE_PROP_UINT8("spansion-cr1nv", Flash, spansion_cr1nv, 0x0), + DEFINE_PROP_UINT8("spansion-cr2nv", Flash, spansion_cr2nv, 0x8), + DEFINE_PROP_UINT8("spansion-cr3nv", Flash, spansion_cr3nv, 0x2), + DEFINE_PROP_UINT8("spansion-cr4nv", Flash, spansion_cr4nv, 0x10), DEFINE_PROP_END_OF_LIST(), }; static const VMStateDescription vmstate_m25p80 = { .name = "xilinx_spi", - .version_id = 2, + .version_id = 3, .minimum_version_id = 1, .pre_save = m25p80_pre_save, .fields = (VMStateField[]) { @@ -950,6 +1209,11 @@ static const VMStateDescription vmstate_m25p80 = { VMSTATE_UINT32_V(nonvolatile_cfg, Flash, 2), VMSTATE_UINT32_V(volatile_cfg, Flash, 2), VMSTATE_UINT32_V(enh_volatile_cfg, Flash, 2), + VMSTATE_BOOL_V(quad_enable, Flash, 3), + VMSTATE_UINT8_V(spansion_cr1nv, Flash, 3), + VMSTATE_UINT8_V(spansion_cr2nv, Flash, 3), + VMSTATE_UINT8_V(spansion_cr3nv, Flash, 3), + VMSTATE_UINT8_V(spansion_cr4nv, Flash, 3), VMSTATE_END_OF_LIST() } }; diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c index 284e64667c..fb43bbaa46 100644 --- a/hw/block/virtio-blk.c +++ b/hw/block/virtio-blk.c @@ -29,9 +29,11 @@ #include "hw/virtio/virtio-bus.h" #include "hw/virtio/virtio-access.h" -void virtio_blk_init_request(VirtIOBlock *s, VirtIOBlockReq *req) +void virtio_blk_init_request(VirtIOBlock *s, VirtQueue *vq, + VirtIOBlockReq *req) { req->dev = s; + req->vq = vq; req->qiov.size = 0; req->in_len = 0; req->next = NULL; @@ -53,11 +55,11 @@ static void virtio_blk_req_complete(VirtIOBlockReq *req, unsigned char status) trace_virtio_blk_req_complete(req, status); stb_p(&req->in->status, status); - virtqueue_push(s->vq, &req->elem, req->in_len); + virtqueue_push(req->vq, &req->elem, req->in_len); if (s->dataplane_started && !s->dataplane_disabled) { - virtio_blk_data_plane_notify(s->dataplane); + virtio_blk_data_plane_notify(s->dataplane, req->vq); } else { - virtio_notify(vdev, s->vq); + virtio_notify(vdev, req->vq); } } @@ -187,12 +189,12 @@ out: #endif -static VirtIOBlockReq *virtio_blk_get_request(VirtIOBlock *s) +static VirtIOBlockReq *virtio_blk_get_request(VirtIOBlock *s, VirtQueue *vq) { - VirtIOBlockReq *req = virtqueue_pop(s->vq, sizeof(VirtIOBlockReq)); + VirtIOBlockReq *req = virtqueue_pop(vq, sizeof(VirtIOBlockReq)); if (req) { - virtio_blk_init_request(s, req); + virtio_blk_init_request(s, vq, req); } return req; } @@ -583,7 +585,7 @@ void virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq) blk_io_plug(s->blk); - while ((req = virtio_blk_get_request(s))) { + while ((req = virtio_blk_get_request(s, vq))) { virtio_blk_handle_request(req, &mrb); } @@ -708,6 +710,7 @@ static void virtio_blk_update_config(VirtIODevice *vdev, uint8_t *config) blkcfg.physical_block_exp = get_physical_block_exp(conf); blkcfg.alignment_offset = 0; blkcfg.wce = blk_enable_write_cache(s->blk); + virtio_stw_p(vdev, &blkcfg.num_queues, s->conf.num_queues); memcpy(config, &blkcfg, sizeof(struct virtio_blk_config)); } @@ -751,6 +754,9 @@ static uint64_t virtio_blk_get_features(VirtIODevice *vdev, uint64_t features, if (blk_is_read_only(s->blk)) { virtio_add_feature(&features, VIRTIO_BLK_F_RO); } + if (s->conf.num_queues > 1) { + virtio_add_feature(&features, VIRTIO_BLK_F_MQ); + } return features; } @@ -795,11 +801,6 @@ static void virtio_blk_set_status(VirtIODevice *vdev, uint8_t status) static void virtio_blk_save(QEMUFile *f, void *opaque) { VirtIODevice *vdev = VIRTIO_DEVICE(opaque); - VirtIOBlock *s = VIRTIO_BLK(vdev); - - if (s->dataplane) { - virtio_blk_data_plane_stop(s->dataplane); - } virtio_save(vdev, f); } @@ -811,6 +812,11 @@ static void virtio_blk_save_device(VirtIODevice *vdev, QEMUFile *f) while (req) { qemu_put_sbyte(f, 1); + + if (s->conf.num_queues > 1) { + qemu_put_be32(f, virtio_get_queue_index(req->vq)); + } + qemu_put_virtqueue_element(f, &req->elem); req = req->next; } @@ -834,9 +840,22 @@ static int virtio_blk_load_device(VirtIODevice *vdev, QEMUFile *f, VirtIOBlock *s = VIRTIO_BLK(vdev); while (qemu_get_sbyte(f)) { + unsigned nvqs = s->conf.num_queues; + unsigned vq_idx = 0; VirtIOBlockReq *req; + + if (nvqs > 1) { + vq_idx = qemu_get_be32(f); + + if (vq_idx >= nvqs) { + error_report("Invalid virtqueue index in request list: %#x", + vq_idx); + return -EINVAL; + } + } + req = qemu_get_virtqueue_element(f, sizeof(VirtIOBlockReq)); - virtio_blk_init_request(s, req); + virtio_blk_init_request(s, virtio_get_queue(vdev, vq_idx), req); req->next = s->rq; s->rq = req; } @@ -862,6 +881,7 @@ static void virtio_blk_device_realize(DeviceState *dev, Error **errp) VirtIOBlkConf *conf = &s->conf; Error *err = NULL; static int virtio_blk_id; + unsigned i; if (!conf->conf.blk) { error_setg(errp, "drive property not set"); @@ -871,6 +891,10 @@ static void virtio_blk_device_realize(DeviceState *dev, Error **errp) error_setg(errp, "Device needs media, but drive is empty"); return; } + if (!conf->num_queues) { + error_setg(errp, "num-queues property must be larger than 0"); + return; + } blkconf_serial(&conf->conf, &conf->serial); s->original_wce = blk_enable_write_cache(conf->conf.blk); @@ -888,7 +912,9 @@ static void virtio_blk_device_realize(DeviceState *dev, Error **errp) s->rq = NULL; s->sector_mask = (s->conf.conf.logical_block_size / BDRV_SECTOR_SIZE) - 1; - s->vq = virtio_add_queue(vdev, 128, virtio_blk_handle_output); + for (i = 0; i < conf->num_queues; i++) { + virtio_add_queue(vdev, 128, virtio_blk_handle_output); + } virtio_blk_data_plane_create(vdev, conf, &s->dataplane, &err); if (err != NULL) { error_propagate(errp, err); @@ -941,6 +967,7 @@ static Property virtio_blk_properties[] = { #endif DEFINE_PROP_BIT("request-merging", VirtIOBlock, conf.request_merging, 0, true), + DEFINE_PROP_UINT16("num-queues", VirtIOBlock, conf.num_queues, 1), DEFINE_PROP_END_OF_LIST(), }; diff --git a/hw/char/cadence_uart.c b/hw/char/cadence_uart.c index 65179fa500..e3bc52f7df 100644 --- a/hw/char/cadence_uart.c +++ b/hw/char/cadence_uart.c @@ -288,8 +288,11 @@ static gboolean cadence_uart_xmit(GIOChannel *chan, GIOCondition cond, } ret = qemu_chr_fe_write(s->chr, s->tx_fifo, s->tx_count); - s->tx_count -= ret; - memmove(s->tx_fifo, s->tx_fifo + ret, s->tx_count); + + if (ret >= 0) { + s->tx_count -= ret; + memmove(s->tx_fifo, s->tx_fifo + ret, s->tx_count); + } if (s->tx_count) { guint r = qemu_chr_fe_add_watch(s->chr, G_IO_OUT|G_IO_HUP, diff --git a/hw/ide/macio.c b/hw/ide/macio.c index fa57352fc8..56cc50661f 100644 --- a/hw/ide/macio.c +++ b/hw/ide/macio.c @@ -406,7 +406,7 @@ static void pmac_ide_flush(DBDMA_io *io) IDEState *s = idebus_active_if(&m->bus); if (s->bus->dma->aiocb) { - blk_drain_all(); + blk_drain(s->blk); } } diff --git a/hw/intc/arm_gicv3_cpuif.c b/hw/intc/arm_gicv3_cpuif.c index 5b2972ea9c..4633172bec 100644 --- a/hw/intc/arm_gicv3_cpuif.c +++ b/hw/intc/arm_gicv3_cpuif.c @@ -975,6 +975,7 @@ static CPAccessResult gicv3_irqfiq_access(CPUARMState *env, if (!is_a64(env) && !arm_is_el3_or_mon(env)) { r = CP_ACCESS_TRAP_EL3; } + break; default: g_assert_not_reached(); } @@ -1006,6 +1007,7 @@ static CPAccessResult gicv3_fiq_access(CPUARMState *env, if (!is_a64(env) && !arm_is_el3_or_mon(env)) { r = CP_ACCESS_TRAP_EL3; } + break; default: g_assert_not_reached(); } diff --git a/hw/intc/xics.c b/hw/intc/xics.c index cce7f3d112..2e83d41b14 100644 --- a/hw/intc/xics.c +++ b/hw/intc/xics.c @@ -694,17 +694,6 @@ static void ics_set_irq_type(ICSState *ics, int srcno, bool lsi) lsi ? XICS_FLAGS_IRQ_LSI : XICS_FLAGS_IRQ_MSI; } -void xics_set_irq_type(XICSState *icp, int irq, bool lsi) -{ - int src = xics_find_source(icp, irq); - ICSState *ics; - - assert(src >= 0); - - ics = &icp->ics[src]; - ics_set_irq_type(ics, irq - ics->offset, lsi); -} - #define ICS_IRQ_FREE(ics, srcno) \ (!((ics)->irqs[(srcno)].flags & (XICS_FLAGS_IRQ_MASK))) diff --git a/hw/misc/Makefile.objs b/hw/misc/Makefile.objs index ffb49c11ac..54020aa06c 100644 --- a/hw/misc/Makefile.objs +++ b/hw/misc/Makefile.objs @@ -52,3 +52,4 @@ obj-$(CONFIG_PVPANIC) += pvpanic.o obj-$(CONFIG_EDU) += edu.o obj-$(CONFIG_HYPERV_TESTDEV) += hyperv_testdev.o obj-$(CONFIG_AUX) += aux.o +obj-$(CONFIG_ASPEED_SOC) += aspeed_scu.o diff --git a/hw/misc/aspeed_scu.c b/hw/misc/aspeed_scu.c new file mode 100644 index 0000000000..23f51752b0 --- /dev/null +++ b/hw/misc/aspeed_scu.c @@ -0,0 +1,284 @@ +/* + * ASPEED System Control Unit + * + * Andrew Jeffery <andrew@aj.id.au> + * + * Copyright 2016 IBM Corp. + * + * This code is licensed under the GPL version 2 or later. See + * the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "hw/misc/aspeed_scu.h" +#include "hw/qdev-properties.h" +#include "qapi/error.h" +#include "qapi/visitor.h" +#include "qemu/bitops.h" +#include "qemu/log.h" +#include "trace.h" + +#define TO_REG(offset) ((offset) >> 2) + +#define PROT_KEY TO_REG(0x00) +#define SYS_RST_CTRL TO_REG(0x04) +#define CLK_SEL TO_REG(0x08) +#define CLK_STOP_CTRL TO_REG(0x0C) +#define FREQ_CNTR_CTRL TO_REG(0x10) +#define FREQ_CNTR_EVAL TO_REG(0x14) +#define IRQ_CTRL TO_REG(0x18) +#define D2PLL_PARAM TO_REG(0x1C) +#define MPLL_PARAM TO_REG(0x20) +#define HPLL_PARAM TO_REG(0x24) +#define FREQ_CNTR_RANGE TO_REG(0x28) +#define MISC_CTRL1 TO_REG(0x2C) +#define PCI_CTRL1 TO_REG(0x30) +#define PCI_CTRL2 TO_REG(0x34) +#define PCI_CTRL3 TO_REG(0x38) +#define SYS_RST_STATUS TO_REG(0x3C) +#define SOC_SCRATCH1 TO_REG(0x40) +#define SOC_SCRATCH2 TO_REG(0x44) +#define MAC_CLK_DELAY TO_REG(0x48) +#define MISC_CTRL2 TO_REG(0x4C) +#define VGA_SCRATCH1 TO_REG(0x50) +#define VGA_SCRATCH2 TO_REG(0x54) +#define VGA_SCRATCH3 TO_REG(0x58) +#define VGA_SCRATCH4 TO_REG(0x5C) +#define VGA_SCRATCH5 TO_REG(0x60) +#define VGA_SCRATCH6 TO_REG(0x64) +#define VGA_SCRATCH7 TO_REG(0x68) +#define VGA_SCRATCH8 TO_REG(0x6C) +#define HW_STRAP1 TO_REG(0x70) +#define RNG_CTRL TO_REG(0x74) +#define RNG_DATA TO_REG(0x78) +#define SILICON_REV TO_REG(0x7C) +#define PINMUX_CTRL1 TO_REG(0x80) +#define PINMUX_CTRL2 TO_REG(0x84) +#define PINMUX_CTRL3 TO_REG(0x88) +#define PINMUX_CTRL4 TO_REG(0x8C) +#define PINMUX_CTRL5 TO_REG(0x90) +#define PINMUX_CTRL6 TO_REG(0x94) +#define WDT_RST_CTRL TO_REG(0x9C) +#define PINMUX_CTRL7 TO_REG(0xA0) +#define PINMUX_CTRL8 TO_REG(0xA4) +#define PINMUX_CTRL9 TO_REG(0xA8) +#define WAKEUP_EN TO_REG(0xC0) +#define WAKEUP_CTRL TO_REG(0xC4) +#define HW_STRAP2 TO_REG(0xD0) +#define FREE_CNTR4 TO_REG(0xE0) +#define FREE_CNTR4_EXT TO_REG(0xE4) +#define CPU2_CTRL TO_REG(0x100) +#define CPU2_BASE_SEG1 TO_REG(0x104) +#define CPU2_BASE_SEG2 TO_REG(0x108) +#define CPU2_BASE_SEG3 TO_REG(0x10C) +#define CPU2_BASE_SEG4 TO_REG(0x110) +#define CPU2_BASE_SEG5 TO_REG(0x114) +#define CPU2_CACHE_CTRL TO_REG(0x118) +#define UART_HPLL_CLK TO_REG(0x160) +#define PCIE_CTRL TO_REG(0x180) +#define BMC_MMIO_CTRL TO_REG(0x184) +#define RELOC_DECODE_BASE1 TO_REG(0x188) +#define RELOC_DECODE_BASE2 TO_REG(0x18C) +#define MAILBOX_DECODE_BASE TO_REG(0x190) +#define SRAM_DECODE_BASE1 TO_REG(0x194) +#define SRAM_DECODE_BASE2 TO_REG(0x198) +#define BMC_REV TO_REG(0x19C) +#define BMC_DEV_ID TO_REG(0x1A4) + +#define PROT_KEY_UNLOCK 0x1688A8A8 +#define SCU_IO_REGION_SIZE 0x20000 + +#define AST2400_A0_SILICON_REV 0x02000303U + +static const uint32_t ast2400_a0_resets[ASPEED_SCU_NR_REGS] = { + [SYS_RST_CTRL] = 0xFFCFFEDCU, + [CLK_SEL] = 0xF3F40000U, + [CLK_STOP_CTRL] = 0x19FC3E8BU, + [D2PLL_PARAM] = 0x00026108U, + [MPLL_PARAM] = 0x00030291U, + [HPLL_PARAM] = 0x00000291U, + [MISC_CTRL1] = 0x00000010U, + [PCI_CTRL1] = 0x20001A03U, + [PCI_CTRL2] = 0x20001A03U, + [PCI_CTRL3] = 0x04000030U, + [SYS_RST_STATUS] = 0x00000001U, + [SOC_SCRATCH1] = 0x000000C0U, /* SoC completed DRAM init */ + [MISC_CTRL2] = 0x00000023U, + [RNG_CTRL] = 0x0000000EU, + [PINMUX_CTRL2] = 0x0000F000U, + [PINMUX_CTRL3] = 0x01000000U, + [PINMUX_CTRL4] = 0x000000FFU, + [PINMUX_CTRL5] = 0x0000A000U, + [WDT_RST_CTRL] = 0x003FFFF3U, + [PINMUX_CTRL8] = 0xFFFF0000U, + [PINMUX_CTRL9] = 0x000FFFFFU, + [FREE_CNTR4] = 0x000000FFU, + [FREE_CNTR4_EXT] = 0x000000FFU, + [CPU2_BASE_SEG1] = 0x80000000U, + [CPU2_BASE_SEG4] = 0x1E600000U, + [CPU2_BASE_SEG5] = 0xC0000000U, + [UART_HPLL_CLK] = 0x00001903U, + [PCIE_CTRL] = 0x0000007BU, + [BMC_DEV_ID] = 0x00002402U +}; + +static uint64_t aspeed_scu_read(void *opaque, hwaddr offset, unsigned size) +{ + AspeedSCUState *s = ASPEED_SCU(opaque); + int reg = TO_REG(offset); + + if (reg >= ARRAY_SIZE(s->regs)) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Out-of-bounds read at offset 0x%" HWADDR_PRIx "\n", + __func__, offset); + return 0; + } + + switch (reg) { + case WAKEUP_EN: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Read of write-only offset 0x%" HWADDR_PRIx "\n", + __func__, offset); + break; + } + + return s->regs[reg]; +} + +static void aspeed_scu_write(void *opaque, hwaddr offset, uint64_t data, + unsigned size) +{ + AspeedSCUState *s = ASPEED_SCU(opaque); + int reg = TO_REG(offset); + + if (reg >= ARRAY_SIZE(s->regs)) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Out-of-bounds write at offset 0x%" HWADDR_PRIx "\n", + __func__, offset); + return; + } + + if (reg > PROT_KEY && reg < CPU2_BASE_SEG1 && + s->regs[PROT_KEY] != PROT_KEY_UNLOCK) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: SCU is locked!\n", __func__); + return; + } + + trace_aspeed_scu_write(offset, size, data); + + switch (reg) { + case FREQ_CNTR_EVAL: + case VGA_SCRATCH1 ... VGA_SCRATCH8: + case RNG_DATA: + case SILICON_REV: + case FREE_CNTR4: + case FREE_CNTR4_EXT: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Write to read-only offset 0x%" HWADDR_PRIx "\n", + __func__, offset); + return; + } + + s->regs[reg] = data; +} + +static const MemoryRegionOps aspeed_scu_ops = { + .read = aspeed_scu_read, + .write = aspeed_scu_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid.min_access_size = 4, + .valid.max_access_size = 4, + .valid.unaligned = false, +}; + +static void aspeed_scu_reset(DeviceState *dev) +{ + AspeedSCUState *s = ASPEED_SCU(dev); + const uint32_t *reset; + + switch (s->silicon_rev) { + case AST2400_A0_SILICON_REV: + reset = ast2400_a0_resets; + break; + default: + g_assert_not_reached(); + } + + memcpy(s->regs, reset, sizeof(s->regs)); + s->regs[SILICON_REV] = s->silicon_rev; + s->regs[HW_STRAP1] = s->hw_strap1; + s->regs[HW_STRAP2] = s->hw_strap2; +} + +static uint32_t aspeed_silicon_revs[] = { AST2400_A0_SILICON_REV, }; + +static bool is_supported_silicon_rev(uint32_t silicon_rev) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(aspeed_silicon_revs); i++) { + if (silicon_rev == aspeed_silicon_revs[i]) { + return true; + } + } + + return false; +} + +static void aspeed_scu_realize(DeviceState *dev, Error **errp) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + AspeedSCUState *s = ASPEED_SCU(dev); + + if (!is_supported_silicon_rev(s->silicon_rev)) { + error_setg(errp, "Unknown silicon revision: 0x%" PRIx32, + s->silicon_rev); + return; + } + + memory_region_init_io(&s->iomem, OBJECT(s), &aspeed_scu_ops, s, + TYPE_ASPEED_SCU, SCU_IO_REGION_SIZE); + + sysbus_init_mmio(sbd, &s->iomem); +} + +static const VMStateDescription vmstate_aspeed_scu = { + .name = "aspeed.scu", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(regs, AspeedSCUState, ASPEED_SCU_NR_REGS), + VMSTATE_END_OF_LIST() + } +}; + +static Property aspeed_scu_properties[] = { + DEFINE_PROP_UINT32("silicon-rev", AspeedSCUState, silicon_rev, 0), + DEFINE_PROP_UINT32("hw-strap1", AspeedSCUState, hw_strap1, 0), + DEFINE_PROP_UINT32("hw-strap2", AspeedSCUState, hw_strap1, 0), + DEFINE_PROP_END_OF_LIST(), +}; + +static void aspeed_scu_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + dc->realize = aspeed_scu_realize; + dc->reset = aspeed_scu_reset; + dc->desc = "ASPEED System Control Unit"; + dc->vmsd = &vmstate_aspeed_scu; + dc->props = aspeed_scu_properties; +} + +static const TypeInfo aspeed_scu_info = { + .name = TYPE_ASPEED_SCU, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(AspeedSCUState), + .class_init = aspeed_scu_class_init, +}; + +static void aspeed_scu_register_types(void) +{ + type_register_static(&aspeed_scu_info); +} + +type_init(aspeed_scu_register_types); diff --git a/hw/misc/trace-events b/hw/misc/trace-events index 16b6701cbf..ea52a14d78 100644 --- a/hw/misc/trace-events +++ b/hw/misc/trace-events @@ -50,3 +50,6 @@ milkymist_pfpu_memory_read(uint32_t addr, uint32_t value) "addr %08x value %08x" milkymist_pfpu_memory_write(uint32_t addr, uint32_t value) "addr %08x value %08x" milkymist_pfpu_vectout(uint32_t a, uint32_t b, uint32_t dma_ptr) "a %08x b %08x dma_ptr %08x" milkymist_pfpu_pulse_irq(void) "Pulse IRQ" + +# hw/misc/aspeed_scu.c +aspeed_scu_write(uint64_t offset, unsigned size, uint32_t data) "To 0x%" PRIx64 " of size %u: 0x%" PRIx32 diff --git a/hw/net/cadence_gem.c b/hw/net/cadence_gem.c index 0346f3e335..8a4be1e667 100644 --- a/hw/net/cadence_gem.c +++ b/hw/net/cadence_gem.c @@ -274,6 +274,11 @@ static inline unsigned tx_desc_get_last(unsigned *desc) return (desc[1] & DESC_1_TX_LAST) ? 1 : 0; } +static inline void tx_desc_set_last(unsigned *desc) +{ + desc[1] |= DESC_1_TX_LAST; +} + static inline unsigned tx_desc_get_length(unsigned *desc) { return desc[1] & DESC_1_LENGTH; @@ -664,6 +669,13 @@ static ssize_t gem_receive(NetClientState *nc, const uint8_t *buf, size_t size) GEM_DMACFG_RBUFSZ_S) * GEM_DMACFG_RBUFSZ_MUL; bytes_to_copy = size; + /* Hardware allows a zero value here but warns against it. To avoid QEMU + * indefinite loops we enforce a minimum value here + */ + if (rxbufsize < GEM_DMACFG_RBUFSZ_MUL) { + rxbufsize = GEM_DMACFG_RBUFSZ_MUL; + } + /* Pad to minimum length. Assume FCS field is stripped, logic * below will increment it to the real minimum of 64 when * not FCS stripping @@ -932,6 +944,7 @@ static void gem_transmit(CadenceGEMState *s) /* read next descriptor */ if (tx_desc_get_wrap(desc)) { + tx_desc_set_last(desc); packet_desc_addr = s->regs[GEM_TXQBASE]; } else { packet_desc_addr += 8; diff --git a/hw/net/e1000.c b/hw/net/e1000.c index 1202371271..06ca7b2638 100644 --- a/hw/net/e1000.c +++ b/hw/net/e1000.c @@ -536,7 +536,7 @@ e1000_send_packet(E1000State *s, const uint8_t *buf, int size) static void xmit_seg(E1000State *s) { - uint16_t len, *sp; + uint16_t len; unsigned int frames = s->tx.tso_frames, css, sofar; struct e1000_tx *tp = &s->tx; @@ -547,7 +547,7 @@ xmit_seg(E1000State *s) if (tp->props.ip) { /* IPv4 */ stw_be_p(tp->data+css+2, tp->size - css); stw_be_p(tp->data+css+4, - be16_to_cpup((uint16_t *)(tp->data+css+4))+frames); + lduw_be_p(tp->data + css + 4) + frames); } else { /* IPv6 */ stw_be_p(tp->data+css+4, tp->size - css); } @@ -567,8 +567,9 @@ xmit_seg(E1000State *s) if (tp->props.sum_needed & E1000_TXD_POPTS_TXSM) { unsigned int phsum; // add pseudo-header length before checksum calculation - sp = (uint16_t *)(tp->data + tp->props.tucso); - phsum = be16_to_cpup(sp) + len; + void *sp = tp->data + tp->props.tucso; + + phsum = lduw_be_p(sp) + len; phsum = (phsum >> 16) + (phsum & 0xffff); stw_be_p(sp, phsum); } @@ -759,9 +760,9 @@ receive_filter(E1000State *s, const uint8_t *buf, int size) if (e1000x_is_vlan_packet(buf, le16_to_cpu(s->mac_reg[VET])) && e1000x_vlan_rx_filter_enabled(s->mac_reg)) { - uint16_t vid = be16_to_cpup((uint16_t *)(buf + 14)); - uint32_t vfta = le32_to_cpup((uint32_t *)(s->mac_reg + VFTA) + - ((vid >> 5) & 0x7f)); + uint16_t vid = lduw_be_p(buf + 14); + uint32_t vfta = ldl_le_p((uint32_t*)(s->mac_reg + VFTA) + + ((vid >> 5) & 0x7f)); if ((vfta & (1 << (vid & 0x1f))) == 0) return 0; } @@ -889,8 +890,7 @@ e1000_receive_iov(NetClientState *nc, const struct iovec *iov, int iovcnt) if (e1000x_vlan_enabled(s->mac_reg) && e1000x_is_vlan_packet(filter_buf, le16_to_cpu(s->mac_reg[VET]))) { - vlan_special = cpu_to_le16(be16_to_cpup((uint16_t *)(filter_buf - + 14))); + vlan_special = cpu_to_le16(lduw_be_p(filter_buf + 14)); iov_ofs = 4; if (filter_buf == iov->iov_base) { memmove(filter_buf + 4, filter_buf, 12); diff --git a/hw/net/e1000e_core.c b/hw/net/e1000e_core.c index 4549acb120..6050d8b7f8 100644 --- a/hw/net/e1000e_core.c +++ b/hw/net/e1000e_core.c @@ -1019,9 +1019,9 @@ e1000e_receive_filter(E1000ECore *core, const uint8_t *buf, int size) if (e1000x_is_vlan_packet(buf, core->vet) && e1000x_vlan_rx_filter_enabled(core->mac)) { - uint16_t vid = be16_to_cpup((uint16_t *)(buf + 14)); - uint32_t vfta = le32_to_cpup((uint32_t *)(core->mac + VFTA) + - ((vid >> 5) & 0x7f)); + uint16_t vid = lduw_be_p(buf + 14); + uint32_t vfta = ldl_le_p((uint32_t *)(core->mac + VFTA) + + ((vid >> 5) & 0x7f)); if ((vfta & (1 << (vid & 0x1f))) == 0) { trace_e1000e_rx_flt_vlan_mismatch(vid); return false; diff --git a/hw/net/e1000x_common.c b/hw/net/e1000x_common.c index 94f85c98c8..eb0e097137 100644 --- a/hw/net/e1000x_common.c +++ b/hw/net/e1000x_common.c @@ -47,7 +47,7 @@ bool e1000x_rx_ready(PCIDevice *d, uint32_t *mac) bool e1000x_is_vlan_packet(const uint8_t *buf, uint16_t vet) { - uint16_t eth_proto = be16_to_cpup((uint16_t *)(buf + 12)); + uint16_t eth_proto = lduw_be_p(buf + 12); bool res = (eth_proto == vet); trace_e1000x_vlan_is_vlan_pkt(res, eth_proto, vet); diff --git a/hw/net/eepro100.c b/hw/net/eepro100.c index 9b4b9b59d2..b10c419838 100644 --- a/hw/net/eepro100.c +++ b/hw/net/eepro100.c @@ -352,14 +352,14 @@ static unsigned e100_compute_mcast_idx(const uint8_t *ep) static uint16_t e100_read_reg2(EEPRO100State *s, E100RegisterOffset addr) { assert(!((uintptr_t)&s->mem[addr] & 1)); - return le16_to_cpup((uint16_t *)&s->mem[addr]); + return lduw_le_p(&s->mem[addr]); } /* Read a 32 bit control/status (CSR) register. */ static uint32_t e100_read_reg4(EEPRO100State *s, E100RegisterOffset addr) { assert(!((uintptr_t)&s->mem[addr] & 3)); - return le32_to_cpup((uint32_t *)&s->mem[addr]); + return ldl_le_p(&s->mem[addr]); } /* Write a 16 bit control/status (CSR) register. */ @@ -367,7 +367,7 @@ static void e100_write_reg2(EEPRO100State *s, E100RegisterOffset addr, uint16_t val) { assert(!((uintptr_t)&s->mem[addr] & 1)); - cpu_to_le16w((uint16_t *)&s->mem[addr], val); + stw_le_p(&s->mem[addr], val); } /* Read a 32 bit control/status (CSR) register. */ @@ -375,7 +375,7 @@ static void e100_write_reg4(EEPRO100State *s, E100RegisterOffset addr, uint32_t val) { assert(!((uintptr_t)&s->mem[addr] & 3)); - cpu_to_le32w((uint32_t *)&s->mem[addr], val); + stl_le_p(&s->mem[addr], val); } #if defined(DEBUG_EEPRO100) diff --git a/hw/net/mipsnet.c b/hw/net/mipsnet.c index cf8b8236df..5115adcaea 100644 --- a/hw/net/mipsnet.c +++ b/hw/net/mipsnet.c @@ -183,10 +183,12 @@ static void mipsnet_ioport_write(void *opaque, hwaddr addr, break; case MIPSNET_TX_DATA_BUFFER: s->tx_buffer[s->tx_written++] = val; - if (s->tx_written == s->tx_count) { + if ((s->tx_written >= MAX_ETH_FRAME_SIZE) + || (s->tx_written == s->tx_count)) { /* Send buffer. */ - trace_mipsnet_send(s->tx_count); - qemu_send_packet(qemu_get_queue(s->nic), s->tx_buffer, s->tx_count); + trace_mipsnet_send(s->tx_written); + qemu_send_packet(qemu_get_queue(s->nic), + s->tx_buffer, s->tx_written); s->tx_count = s->tx_written = 0; s->intctl |= MIPSNET_INTCTL_TXDONE; s->busy = 1; diff --git a/hw/net/rocker/rocker_tlv.h b/hw/net/rocker/rocker_tlv.h index e3c4ab6793..88561648f0 100644 --- a/hw/net/rocker/rocker_tlv.h +++ b/hw/net/rocker/rocker_tlv.h @@ -106,17 +106,17 @@ static inline uint64_t rocker_tlv_get_u64(const RockerTlv *tlv) static inline uint16_t rocker_tlv_get_le16(const RockerTlv *tlv) { - return le16_to_cpup((uint16_t *) rocker_tlv_data(tlv)); + return lduw_le_p(rocker_tlv_data(tlv)); } static inline uint32_t rocker_tlv_get_le32(const RockerTlv *tlv) { - return le32_to_cpup((uint32_t *) rocker_tlv_data(tlv)); + return ldl_le_p(rocker_tlv_data(tlv)); } static inline uint64_t rocker_tlv_get_le64(const RockerTlv *tlv) { - return le64_to_cpup((uint64_t *) rocker_tlv_data(tlv)); + return ldq_le_p(rocker_tlv_data(tlv)); } static inline void rocker_tlv_parse(RockerTlv **tb, int maxtype, diff --git a/hw/net/rtl8139.c b/hw/net/rtl8139.c index 562c1fded2..07297cb78f 100644 --- a/hw/net/rtl8139.c +++ b/hw/net/rtl8139.c @@ -1013,8 +1013,8 @@ static ssize_t rtl8139_do_receive(NetClientState *nc, const uint8_t *buf, size_t uint32_t rx_space = rxdw0 & CP_RX_BUFFER_SIZE_MASK; /* write VLAN info to descriptor variables. */ - if (s->CpCmd & CPlusRxVLAN && be16_to_cpup((uint16_t *) - &buf[ETH_ALEN * 2]) == ETH_P_VLAN) { + if (s->CpCmd & CPlusRxVLAN && + lduw_be_p(&buf[ETH_ALEN * 2]) == ETH_P_VLAN) { dot1q_buf = &buf[ETH_ALEN * 2]; size -= VLAN_HLEN; /* if too small buffer, use the tailroom added duing expansion */ @@ -1024,11 +1024,10 @@ static ssize_t rtl8139_do_receive(NetClientState *nc, const uint8_t *buf, size_t rxdw1 &= ~CP_RX_VLAN_TAG_MASK; /* BE + ~le_to_cpu()~ + cpu_to_le() = BE */ - rxdw1 |= CP_RX_TAVA | le16_to_cpup((uint16_t *) - &dot1q_buf[ETHER_TYPE_LEN]); + rxdw1 |= CP_RX_TAVA | lduw_le_p(&dot1q_buf[ETHER_TYPE_LEN]); DPRINTF("C+ Rx mode : extracted vlan tag with tci: ""%u\n", - be16_to_cpup((uint16_t *)&dot1q_buf[ETHER_TYPE_LEN])); + lduw_be_p(&dot1q_buf[ETHER_TYPE_LEN])); } else { /* reset VLAN tag flag */ rxdw1 &= ~CP_RX_TAVA; @@ -1352,29 +1351,6 @@ static void RTL8139TallyCounters_dma_write(RTL8139State *s, dma_addr_t tc_addr) pci_dma_write(d, tc_addr + 62, (uint8_t *)&val16, 2); } -/* Loads values of tally counters from VM state file */ - -static const VMStateDescription vmstate_tally_counters = { - .name = "tally_counters", - .version_id = 1, - .minimum_version_id = 1, - .fields = (VMStateField[]) { - VMSTATE_UINT64(TxOk, RTL8139TallyCounters), - VMSTATE_UINT64(RxOk, RTL8139TallyCounters), - VMSTATE_UINT64(TxERR, RTL8139TallyCounters), - VMSTATE_UINT32(RxERR, RTL8139TallyCounters), - VMSTATE_UINT16(MissPkt, RTL8139TallyCounters), - VMSTATE_UINT16(FAE, RTL8139TallyCounters), - VMSTATE_UINT32(Tx1Col, RTL8139TallyCounters), - VMSTATE_UINT32(TxMCol, RTL8139TallyCounters), - VMSTATE_UINT64(RxOkPhy, RTL8139TallyCounters), - VMSTATE_UINT64(RxOkBrd, RTL8139TallyCounters), - VMSTATE_UINT16(TxAbt, RTL8139TallyCounters), - VMSTATE_UINT16(TxUndrn, RTL8139TallyCounters), - VMSTATE_END_OF_LIST() - } -}; - static void rtl8139_ChipCmd_write(RTL8139State *s, uint32_t val) { DeviceState *d = DEVICE(s); @@ -3222,7 +3198,7 @@ static void rtl8139_pre_save(void *opaque) static const VMStateDescription vmstate_rtl8139 = { .name = "rtl8139", - .version_id = 4, + .version_id = 5, .minimum_version_id = 3, .post_load = rtl8139_post_load, .pre_save = rtl8139_pre_save, @@ -3293,8 +3269,19 @@ static const VMStateDescription vmstate_rtl8139 = { VMSTATE_UINT32(TimerInt, RTL8139State), VMSTATE_INT64(TCTR_base, RTL8139State), - VMSTATE_STRUCT(tally_counters, RTL8139State, 0, - vmstate_tally_counters, RTL8139TallyCounters), + VMSTATE_UINT64(tally_counters.TxOk, RTL8139State), + VMSTATE_UINT64(tally_counters.RxOk, RTL8139State), + VMSTATE_UINT64(tally_counters.TxERR, RTL8139State), + VMSTATE_UINT32(tally_counters.RxERR, RTL8139State), + VMSTATE_UINT16(tally_counters.MissPkt, RTL8139State), + VMSTATE_UINT16(tally_counters.FAE, RTL8139State), + VMSTATE_UINT32(tally_counters.Tx1Col, RTL8139State), + VMSTATE_UINT32(tally_counters.TxMCol, RTL8139State), + VMSTATE_UINT64(tally_counters.RxOkPhy, RTL8139State), + VMSTATE_UINT64(tally_counters.RxOkBrd, RTL8139State), + VMSTATE_UINT32_V(tally_counters.RxOkMul, RTL8139State, 5), + VMSTATE_UINT16(tally_counters.TxAbt, RTL8139State), + VMSTATE_UINT16(tally_counters.TxUndrn, RTL8139State), VMSTATE_UINT32_V(cplus_enabled, RTL8139State, 4), VMSTATE_END_OF_LIST() diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c index 5798f87d8e..7e6a60aa12 100644 --- a/hw/net/virtio-net.c +++ b/hw/net/virtio-net.c @@ -1051,7 +1051,7 @@ static int receive_filter(VirtIONet *n, const uint8_t *buf, int size) ptr += n->host_hdr_len; if (!memcmp(&ptr[12], vlan, sizeof(vlan))) { - int vid = be16_to_cpup((uint16_t *)(ptr + 14)) & 0xfff; + int vid = lduw_be_p(ptr + 14) & 0xfff; if (!(n->vlans[vid >> 5] & (1U << (vid & 0x1f)))) return 0; } diff --git a/hw/net/vmware_utils.h b/hw/net/vmware_utils.h index c0dbb2ff41..550060170e 100644 --- a/hw/net/vmware_utils.h +++ b/hw/net/vmware_utils.h @@ -26,97 +26,104 @@ * */ static inline void -vmw_shmem_read(hwaddr addr, void *buf, int len) +vmw_shmem_read(PCIDevice *d, hwaddr addr, void *buf, int len) { VMW_SHPRN("SHMEM r: %" PRIx64 ", len: %d to %p", addr, len, buf); - cpu_physical_memory_read(addr, buf, len); + pci_dma_read(d, addr, buf, len); } static inline void -vmw_shmem_write(hwaddr addr, void *buf, int len) +vmw_shmem_write(PCIDevice *d, hwaddr addr, void *buf, int len) { VMW_SHPRN("SHMEM w: %" PRIx64 ", len: %d to %p", addr, len, buf); - cpu_physical_memory_write(addr, buf, len); + pci_dma_write(d, addr, buf, len); } static inline void -vmw_shmem_rw(hwaddr addr, void *buf, int len, int is_write) +vmw_shmem_rw(PCIDevice *d, hwaddr addr, void *buf, int len, int is_write) { VMW_SHPRN("SHMEM r/w: %" PRIx64 ", len: %d (to %p), is write: %d", addr, len, buf, is_write); - cpu_physical_memory_rw(addr, buf, len, is_write); + if (is_write) + pci_dma_write(d, addr, buf, len); + else + pci_dma_read(d, addr, buf, len); } static inline void -vmw_shmem_set(hwaddr addr, uint8_t val, int len) +vmw_shmem_set(PCIDevice *d, hwaddr addr, uint8_t val, int len) { int i; VMW_SHPRN("SHMEM set: %" PRIx64 ", len: %d (value 0x%X)", addr, len, val); for (i = 0; i < len; i++) { - cpu_physical_memory_write(addr + i, &val, 1); + pci_dma_write(d, addr + i, &val, 1); } } static inline uint32_t -vmw_shmem_ld8(hwaddr addr) +vmw_shmem_ld8(PCIDevice *d, hwaddr addr) { - uint8_t res = ldub_phys(&address_space_memory, addr); + uint8_t res; + pci_dma_read(d, addr, &res, 1); VMW_SHPRN("SHMEM load8: %" PRIx64 " (value 0x%X)", addr, res); return res; } static inline void -vmw_shmem_st8(hwaddr addr, uint8_t value) +vmw_shmem_st8(PCIDevice *d, hwaddr addr, uint8_t value) { VMW_SHPRN("SHMEM store8: %" PRIx64 " (value 0x%X)", addr, value); - stb_phys(&address_space_memory, addr, value); + pci_dma_write(d, addr, &value, 1); } static inline uint32_t -vmw_shmem_ld16(hwaddr addr) +vmw_shmem_ld16(PCIDevice *d, hwaddr addr) { - uint16_t res = lduw_le_phys(&address_space_memory, addr); + uint16_t res; + pci_dma_read(d, addr, &res, 2); VMW_SHPRN("SHMEM load16: %" PRIx64 " (value 0x%X)", addr, res); return res; } static inline void -vmw_shmem_st16(hwaddr addr, uint16_t value) +vmw_shmem_st16(PCIDevice *d, hwaddr addr, uint16_t value) { VMW_SHPRN("SHMEM store16: %" PRIx64 " (value 0x%X)", addr, value); - stw_le_phys(&address_space_memory, addr, value); + pci_dma_write(d, addr, &value, 2); } static inline uint32_t -vmw_shmem_ld32(hwaddr addr) +vmw_shmem_ld32(PCIDevice *d, hwaddr addr) { - uint32_t res = ldl_le_phys(&address_space_memory, addr); + uint32_t res; + pci_dma_read(d, addr, &res, 4); VMW_SHPRN("SHMEM load32: %" PRIx64 " (value 0x%X)", addr, res); return res; } static inline void -vmw_shmem_st32(hwaddr addr, uint32_t value) +vmw_shmem_st32(PCIDevice *d, hwaddr addr, uint32_t value) { VMW_SHPRN("SHMEM store32: %" PRIx64 " (value 0x%X)", addr, value); - stl_le_phys(&address_space_memory, addr, value); + pci_dma_write(d, addr, &value, 4); } static inline uint64_t -vmw_shmem_ld64(hwaddr addr) +vmw_shmem_ld64(PCIDevice *d, hwaddr addr) { - uint64_t res = ldq_le_phys(&address_space_memory, addr); + uint64_t res; + pci_dma_read(d, addr, &res, 8); VMW_SHPRN("SHMEM load64: %" PRIx64 " (value %" PRIx64 ")", addr, res); return res; } static inline void -vmw_shmem_st64(hwaddr addr, uint64_t value) +vmw_shmem_st64(PCIDevice *d, hwaddr addr, uint64_t value) { VMW_SHPRN("SHMEM store64: %" PRIx64 " (value %" PRIx64 ")", addr, value); - stq_le_phys(&address_space_memory, addr, value); + pci_dma_write(d, addr, &value, 8); } /* Macros for simplification of operations on array-style registers */ diff --git a/hw/net/vmxnet3.c b/hw/net/vmxnet3.c index d97897670d..92236d3919 100644 --- a/hw/net/vmxnet3.c +++ b/hw/net/vmxnet3.c @@ -74,54 +74,54 @@ #define VMXNET3_MAX_NMSIX_INTRS (1) /* Macros for rings descriptors access */ -#define VMXNET3_READ_TX_QUEUE_DESCR8(dpa, field) \ - (vmw_shmem_ld8(dpa + offsetof(struct Vmxnet3_TxQueueDesc, field))) +#define VMXNET3_READ_TX_QUEUE_DESCR8(_d, dpa, field) \ + (vmw_shmem_ld8(_d, dpa + offsetof(struct Vmxnet3_TxQueueDesc, field))) -#define VMXNET3_WRITE_TX_QUEUE_DESCR8(dpa, field, value) \ - (vmw_shmem_st8(dpa + offsetof(struct Vmxnet3_TxQueueDesc, field, value))) +#define VMXNET3_WRITE_TX_QUEUE_DESCR8(_d, dpa, field, value) \ + (vmw_shmem_st8(_d, dpa + offsetof(struct Vmxnet3_TxQueueDesc, field, value))) -#define VMXNET3_READ_TX_QUEUE_DESCR32(dpa, field) \ - (vmw_shmem_ld32(dpa + offsetof(struct Vmxnet3_TxQueueDesc, field))) +#define VMXNET3_READ_TX_QUEUE_DESCR32(_d, dpa, field) \ + (vmw_shmem_ld32(_d, dpa + offsetof(struct Vmxnet3_TxQueueDesc, field))) -#define VMXNET3_WRITE_TX_QUEUE_DESCR32(dpa, field, value) \ - (vmw_shmem_st32(dpa + offsetof(struct Vmxnet3_TxQueueDesc, field), value)) +#define VMXNET3_WRITE_TX_QUEUE_DESCR32(_d, dpa, field, value) \ + (vmw_shmem_st32(_d, dpa + offsetof(struct Vmxnet3_TxQueueDesc, field), value)) -#define VMXNET3_READ_TX_QUEUE_DESCR64(dpa, field) \ - (vmw_shmem_ld64(dpa + offsetof(struct Vmxnet3_TxQueueDesc, field))) +#define VMXNET3_READ_TX_QUEUE_DESCR64(_d, dpa, field) \ + (vmw_shmem_ld64(_d, dpa + offsetof(struct Vmxnet3_TxQueueDesc, field))) -#define VMXNET3_WRITE_TX_QUEUE_DESCR64(dpa, field, value) \ - (vmw_shmem_st64(dpa + offsetof(struct Vmxnet3_TxQueueDesc, field), value)) +#define VMXNET3_WRITE_TX_QUEUE_DESCR64(_d, dpa, field, value) \ + (vmw_shmem_st64(_d, dpa + offsetof(struct Vmxnet3_TxQueueDesc, field), value)) -#define VMXNET3_READ_RX_QUEUE_DESCR64(dpa, field) \ - (vmw_shmem_ld64(dpa + offsetof(struct Vmxnet3_RxQueueDesc, field))) +#define VMXNET3_READ_RX_QUEUE_DESCR64(_d, dpa, field) \ + (vmw_shmem_ld64(_d, dpa + offsetof(struct Vmxnet3_RxQueueDesc, field))) -#define VMXNET3_READ_RX_QUEUE_DESCR32(dpa, field) \ - (vmw_shmem_ld32(dpa + offsetof(struct Vmxnet3_RxQueueDesc, field))) +#define VMXNET3_READ_RX_QUEUE_DESCR32(_d, dpa, field) \ + (vmw_shmem_ld32(_d, dpa + offsetof(struct Vmxnet3_RxQueueDesc, field))) -#define VMXNET3_WRITE_RX_QUEUE_DESCR64(dpa, field, value) \ - (vmw_shmem_st64(dpa + offsetof(struct Vmxnet3_RxQueueDesc, field), value)) +#define VMXNET3_WRITE_RX_QUEUE_DESCR64(_d, dpa, field, value) \ + (vmw_shmem_st64(_d, dpa + offsetof(struct Vmxnet3_RxQueueDesc, field), value)) -#define VMXNET3_WRITE_RX_QUEUE_DESCR8(dpa, field, value) \ - (vmw_shmem_st8(dpa + offsetof(struct Vmxnet3_RxQueueDesc, field), value)) +#define VMXNET3_WRITE_RX_QUEUE_DESCR8(_d, dpa, field, value) \ + (vmw_shmem_st8(_d, dpa + offsetof(struct Vmxnet3_RxQueueDesc, field), value)) /* Macros for guest driver shared area access */ -#define VMXNET3_READ_DRV_SHARED64(shpa, field) \ - (vmw_shmem_ld64(shpa + offsetof(struct Vmxnet3_DriverShared, field))) +#define VMXNET3_READ_DRV_SHARED64(_d, shpa, field) \ + (vmw_shmem_ld64(_d, shpa + offsetof(struct Vmxnet3_DriverShared, field))) -#define VMXNET3_READ_DRV_SHARED32(shpa, field) \ - (vmw_shmem_ld32(shpa + offsetof(struct Vmxnet3_DriverShared, field))) +#define VMXNET3_READ_DRV_SHARED32(_d, shpa, field) \ + (vmw_shmem_ld32(_d, shpa + offsetof(struct Vmxnet3_DriverShared, field))) -#define VMXNET3_WRITE_DRV_SHARED32(shpa, field, val) \ - (vmw_shmem_st32(shpa + offsetof(struct Vmxnet3_DriverShared, field), val)) +#define VMXNET3_WRITE_DRV_SHARED32(_d, shpa, field, val) \ + (vmw_shmem_st32(_d, shpa + offsetof(struct Vmxnet3_DriverShared, field), val)) -#define VMXNET3_READ_DRV_SHARED16(shpa, field) \ - (vmw_shmem_ld16(shpa + offsetof(struct Vmxnet3_DriverShared, field))) +#define VMXNET3_READ_DRV_SHARED16(_d, shpa, field) \ + (vmw_shmem_ld16(_d, shpa + offsetof(struct Vmxnet3_DriverShared, field))) -#define VMXNET3_READ_DRV_SHARED8(shpa, field) \ - (vmw_shmem_ld8(shpa + offsetof(struct Vmxnet3_DriverShared, field))) +#define VMXNET3_READ_DRV_SHARED8(_d, shpa, field) \ + (vmw_shmem_ld8(_d, shpa + offsetof(struct Vmxnet3_DriverShared, field))) -#define VMXNET3_READ_DRV_SHARED(shpa, field, b, l) \ - (vmw_shmem_read(shpa + offsetof(struct Vmxnet3_DriverShared, field), b, l)) +#define VMXNET3_READ_DRV_SHARED(_d, shpa, field, b, l) \ + (vmw_shmem_read(_d, shpa + offsetof(struct Vmxnet3_DriverShared, field), b, l)) #define VMXNET_FLAG_IS_SET(field, flag) (((field) & (flag)) == (flag)) @@ -147,7 +147,8 @@ typedef struct { uint8_t gen; } Vmxnet3Ring; -static inline void vmxnet3_ring_init(Vmxnet3Ring *ring, +static inline void vmxnet3_ring_init(PCIDevice *d, + Vmxnet3Ring *ring, hwaddr pa, size_t size, size_t cell_size, @@ -160,7 +161,7 @@ static inline void vmxnet3_ring_init(Vmxnet3Ring *ring, ring->next = 0; if (zero_region) { - vmw_shmem_set(pa, 0, size * cell_size); + vmw_shmem_set(d, pa, 0, size * cell_size); } } @@ -190,14 +191,16 @@ static inline hwaddr vmxnet3_ring_curr_cell_pa(Vmxnet3Ring *ring) return ring->pa + ring->next * ring->cell_size; } -static inline void vmxnet3_ring_read_curr_cell(Vmxnet3Ring *ring, void *buff) +static inline void vmxnet3_ring_read_curr_cell(PCIDevice *d, Vmxnet3Ring *ring, + void *buff) { - vmw_shmem_read(vmxnet3_ring_curr_cell_pa(ring), buff, ring->cell_size); + vmw_shmem_read(d, vmxnet3_ring_curr_cell_pa(ring), buff, ring->cell_size); } -static inline void vmxnet3_ring_write_curr_cell(Vmxnet3Ring *ring, void *buff) +static inline void vmxnet3_ring_write_curr_cell(PCIDevice *d, Vmxnet3Ring *ring, + void *buff) { - vmw_shmem_write(vmxnet3_ring_curr_cell_pa(ring), buff, ring->cell_size); + vmw_shmem_write(d, vmxnet3_ring_curr_cell_pa(ring), buff, ring->cell_size); } static inline size_t vmxnet3_ring_curr_cell_idx(Vmxnet3Ring *ring) @@ -456,9 +459,9 @@ vmxnet3_on_interrupt_mask_changed(VMXNET3State *s, int lidx, bool is_masked) vmxnet3_update_interrupt_line_state(s, lidx); } -static bool vmxnet3_verify_driver_magic(hwaddr dshmem) +static bool vmxnet3_verify_driver_magic(PCIDevice *d, hwaddr dshmem) { - return (VMXNET3_READ_DRV_SHARED32(dshmem, magic) == VMXNET3_REV1_MAGIC); + return (VMXNET3_READ_DRV_SHARED32(d, dshmem, magic) == VMXNET3_REV1_MAGIC); } #define VMXNET3_GET_BYTE(x, byte_num) (((x) >> (byte_num)*8) & 0xFF) @@ -526,13 +529,14 @@ vmxnet3_dec_rx_completion_counter(VMXNET3State *s, int qidx) static void vmxnet3_complete_packet(VMXNET3State *s, int qidx, uint32_t tx_ridx) { struct Vmxnet3_TxCompDesc txcq_descr; + PCIDevice *d = PCI_DEVICE(s); VMXNET3_RING_DUMP(VMW_RIPRN, "TXC", qidx, &s->txq_descr[qidx].comp_ring); txcq_descr.txdIdx = tx_ridx; txcq_descr.gen = vmxnet3_ring_curr_gen(&s->txq_descr[qidx].comp_ring); - vmxnet3_ring_write_curr_cell(&s->txq_descr[qidx].comp_ring, &txcq_descr); + vmxnet3_ring_write_curr_cell(d, &s->txq_descr[qidx].comp_ring, &txcq_descr); /* Flush changes in TX descriptor before changing the counter value */ smp_wmb(); @@ -688,13 +692,14 @@ vmxnet3_pop_next_tx_descr(VMXNET3State *s, uint32_t *descr_idx) { Vmxnet3Ring *ring = &s->txq_descr[qidx].tx_ring; + PCIDevice *d = PCI_DEVICE(s); - vmxnet3_ring_read_curr_cell(ring, txd); + vmxnet3_ring_read_curr_cell(d, ring, txd); if (txd->gen == vmxnet3_ring_curr_gen(ring)) { /* Only read after generation field verification */ smp_rmb(); /* Re-read to be sure we got the latest version */ - vmxnet3_ring_read_curr_cell(ring, txd); + vmxnet3_ring_read_curr_cell(d, ring, txd); VMXNET3_RING_DUMP(VMW_RIPRN, "TX", qidx, ring); *descr_idx = vmxnet3_ring_curr_cell_idx(ring); vmxnet3_inc_tx_consumption_counter(s, qidx); @@ -782,9 +787,11 @@ static inline void vmxnet3_read_next_rx_descr(VMXNET3State *s, int qidx, int ridx, struct Vmxnet3_RxDesc *dbuf, uint32_t *didx) { + PCIDevice *d = PCI_DEVICE(s); + Vmxnet3Ring *ring = &s->rxq_descr[qidx].rx_ring[ridx]; *didx = vmxnet3_ring_curr_cell_idx(ring); - vmxnet3_ring_read_curr_cell(ring, dbuf); + vmxnet3_ring_read_curr_cell(d, ring, dbuf); } static inline uint8_t @@ -802,9 +809,8 @@ vmxnet3_pop_rxc_descr(VMXNET3State *s, int qidx, uint32_t *descr_gen) hwaddr daddr = vmxnet3_ring_curr_cell_pa(&s->rxq_descr[qidx].comp_ring); - pci_dma_read(PCI_DEVICE(s), daddr, - &rxcd, sizeof(struct Vmxnet3_RxCompDesc)); - + pci_dma_read(PCI_DEVICE(s), + daddr, &rxcd, sizeof(struct Vmxnet3_RxCompDesc)); ring_gen = vmxnet3_ring_curr_gen(&s->rxq_descr[qidx].comp_ring); if (rxcd.gen != ring_gen) { @@ -1058,6 +1064,7 @@ static bool vmxnet3_indicate_packet(VMXNET3State *s) { struct Vmxnet3_RxDesc rxd; + PCIDevice *d = PCI_DEVICE(s); bool is_head = true; uint32_t rxd_idx; uint32_t rx_ridx = 0; @@ -1091,7 +1098,7 @@ vmxnet3_indicate_packet(VMXNET3State *s) } chunk_size = MIN(bytes_left, rxd.len); - vmxnet3_pci_dma_writev(PCI_DEVICE(s), data, bytes_copied, + vmxnet3_pci_dma_writev(d, data, bytes_copied, le64_to_cpu(rxd.addr), chunk_size); bytes_copied += chunk_size; bytes_left -= chunk_size; @@ -1099,7 +1106,7 @@ vmxnet3_indicate_packet(VMXNET3State *s) vmxnet3_dump_rx_descr(&rxd); if (ready_rxcd_pa != 0) { - pci_dma_write(PCI_DEVICE(s), ready_rxcd_pa, &rxcd, sizeof(rxcd)); + pci_dma_write(d, ready_rxcd_pa, &rxcd, sizeof(rxcd)); } memset(&rxcd, 0, sizeof(struct Vmxnet3_RxCompDesc)); @@ -1131,7 +1138,7 @@ vmxnet3_indicate_packet(VMXNET3State *s) rxcd.eop = 1; rxcd.err = (bytes_left != 0); - pci_dma_write(PCI_DEVICE(s), ready_rxcd_pa, &rxcd, sizeof(rxcd)); + pci_dma_write(d, ready_rxcd_pa, &rxcd, sizeof(rxcd)); /* Flush RX descriptor changes */ smp_wmb(); @@ -1250,7 +1257,9 @@ static void vmxnet3_reset(VMXNET3State *s) static void vmxnet3_update_rx_mode(VMXNET3State *s) { - s->rx_mode = VMXNET3_READ_DRV_SHARED32(s->drv_shmem, + PCIDevice *d = PCI_DEVICE(s); + + s->rx_mode = VMXNET3_READ_DRV_SHARED32(d, s->drv_shmem, devRead.rxFilterConf.rxMode); VMW_CFPRN("RX mode: 0x%08X", s->rx_mode); } @@ -1258,9 +1267,10 @@ static void vmxnet3_update_rx_mode(VMXNET3State *s) static void vmxnet3_update_vlan_filters(VMXNET3State *s) { int i; + PCIDevice *d = PCI_DEVICE(s); /* Copy configuration from shared memory */ - VMXNET3_READ_DRV_SHARED(s->drv_shmem, + VMXNET3_READ_DRV_SHARED(d, s->drv_shmem, devRead.rxFilterConf.vfTable, s->vlan_table, sizeof(s->vlan_table)); @@ -1281,8 +1291,10 @@ static void vmxnet3_update_vlan_filters(VMXNET3State *s) static void vmxnet3_update_mcast_filters(VMXNET3State *s) { + PCIDevice *d = PCI_DEVICE(s); + uint16_t list_bytes = - VMXNET3_READ_DRV_SHARED16(s->drv_shmem, + VMXNET3_READ_DRV_SHARED16(d, s->drv_shmem, devRead.rxFilterConf.mfTableLen); s->mcast_list_len = list_bytes / sizeof(s->mcast_list[0]); @@ -1299,10 +1311,10 @@ static void vmxnet3_update_mcast_filters(VMXNET3State *s) } else { int i; hwaddr mcast_list_pa = - VMXNET3_READ_DRV_SHARED64(s->drv_shmem, + VMXNET3_READ_DRV_SHARED64(d, s->drv_shmem, devRead.rxFilterConf.mfTablePA); - pci_dma_read(PCI_DEVICE(s), mcast_list_pa, s->mcast_list, list_bytes); + pci_dma_read(d, mcast_list_pa, s->mcast_list, list_bytes); VMW_CFPRN("Current multicast list len is %d:", s->mcast_list_len); for (i = 0; i < s->mcast_list_len; i++) { @@ -1328,19 +1340,20 @@ static uint32_t vmxnet3_get_interrupt_config(VMXNET3State *s) static void vmxnet3_fill_stats(VMXNET3State *s) { int i; + PCIDevice *d = PCI_DEVICE(s); if (!s->device_active) return; for (i = 0; i < s->txq_num; i++) { - pci_dma_write(PCI_DEVICE(s), + pci_dma_write(d, s->txq_descr[i].tx_stats_pa, &s->txq_descr[i].txq_stats, sizeof(s->txq_descr[i].txq_stats)); } for (i = 0; i < s->rxq_num; i++) { - pci_dma_write(PCI_DEVICE(s), + pci_dma_write(d, s->rxq_descr[i].rx_stats_pa, &s->rxq_descr[i].rxq_stats, sizeof(s->rxq_descr[i].rxq_stats)); @@ -1350,8 +1363,9 @@ static void vmxnet3_fill_stats(VMXNET3State *s) static void vmxnet3_adjust_by_guest_type(VMXNET3State *s) { struct Vmxnet3_GOSInfo gos; + PCIDevice *d = PCI_DEVICE(s); - VMXNET3_READ_DRV_SHARED(s->drv_shmem, devRead.misc.driverInfo.gos, + VMXNET3_READ_DRV_SHARED(d, s->drv_shmem, devRead.misc.driverInfo.gos, &gos, sizeof(gos)); s->rx_packets_compound = (gos.gosType == VMXNET3_GOS_TYPE_WIN) ? false : true; @@ -1371,13 +1385,14 @@ vmxnet3_dump_conf_descr(const char *name, static void vmxnet3_update_pm_state(VMXNET3State *s) { struct Vmxnet3_VariableLenConfDesc pm_descr; + PCIDevice *d = PCI_DEVICE(s); pm_descr.confLen = - VMXNET3_READ_DRV_SHARED32(s->drv_shmem, devRead.pmConfDesc.confLen); + VMXNET3_READ_DRV_SHARED32(d, s->drv_shmem, devRead.pmConfDesc.confLen); pm_descr.confVer = - VMXNET3_READ_DRV_SHARED32(s->drv_shmem, devRead.pmConfDesc.confVer); + VMXNET3_READ_DRV_SHARED32(d, s->drv_shmem, devRead.pmConfDesc.confVer); pm_descr.confPA = - VMXNET3_READ_DRV_SHARED64(s->drv_shmem, devRead.pmConfDesc.confPA); + VMXNET3_READ_DRV_SHARED64(d, s->drv_shmem, devRead.pmConfDesc.confPA); vmxnet3_dump_conf_descr("PM State", &pm_descr); } @@ -1386,8 +1401,9 @@ static void vmxnet3_update_features(VMXNET3State *s) { uint32_t guest_features; int rxcso_supported; + PCIDevice *d = PCI_DEVICE(s); - guest_features = VMXNET3_READ_DRV_SHARED32(s->drv_shmem, + guest_features = VMXNET3_READ_DRV_SHARED32(d, s->drv_shmem, devRead.misc.uptFeatures); rxcso_supported = VMXNET_FLAG_IS_SET(guest_features, UPT1_F_RXCSUM); @@ -1462,12 +1478,13 @@ static void vmxnet3_activate_device(VMXNET3State *s) { int i; static const uint32_t VMXNET3_DEF_TX_THRESHOLD = 1; + PCIDevice *d = PCI_DEVICE(s); hwaddr qdescr_table_pa; uint64_t pa; uint32_t size; /* Verify configuration consistency */ - if (!vmxnet3_verify_driver_magic(s->drv_shmem)) { + if (!vmxnet3_verify_driver_magic(d, s->drv_shmem)) { VMW_ERPRN("Device configuration received from driver is invalid"); return; } @@ -1483,11 +1500,11 @@ static void vmxnet3_activate_device(VMXNET3State *s) vmxnet3_update_pm_state(s); vmxnet3_setup_rx_filtering(s); /* Cache fields from shared memory */ - s->mtu = VMXNET3_READ_DRV_SHARED32(s->drv_shmem, devRead.misc.mtu); + s->mtu = VMXNET3_READ_DRV_SHARED32(d, s->drv_shmem, devRead.misc.mtu); VMW_CFPRN("MTU is %u", s->mtu); s->max_rx_frags = - VMXNET3_READ_DRV_SHARED16(s->drv_shmem, devRead.misc.maxNumRxSG); + VMXNET3_READ_DRV_SHARED16(d, s->drv_shmem, devRead.misc.maxNumRxSG); if (s->max_rx_frags == 0) { s->max_rx_frags = 1; @@ -1496,24 +1513,24 @@ static void vmxnet3_activate_device(VMXNET3State *s) VMW_CFPRN("Max RX fragments is %u", s->max_rx_frags); s->event_int_idx = - VMXNET3_READ_DRV_SHARED8(s->drv_shmem, devRead.intrConf.eventIntrIdx); + VMXNET3_READ_DRV_SHARED8(d, s->drv_shmem, devRead.intrConf.eventIntrIdx); assert(vmxnet3_verify_intx(s, s->event_int_idx)); VMW_CFPRN("Events interrupt line is %u", s->event_int_idx); s->auto_int_masking = - VMXNET3_READ_DRV_SHARED8(s->drv_shmem, devRead.intrConf.autoMask); + VMXNET3_READ_DRV_SHARED8(d, s->drv_shmem, devRead.intrConf.autoMask); VMW_CFPRN("Automatic interrupt masking is %d", (int)s->auto_int_masking); s->txq_num = - VMXNET3_READ_DRV_SHARED8(s->drv_shmem, devRead.misc.numTxQueues); + VMXNET3_READ_DRV_SHARED8(d, s->drv_shmem, devRead.misc.numTxQueues); s->rxq_num = - VMXNET3_READ_DRV_SHARED8(s->drv_shmem, devRead.misc.numRxQueues); + VMXNET3_READ_DRV_SHARED8(d, s->drv_shmem, devRead.misc.numRxQueues); VMW_CFPRN("Number of TX/RX queues %u/%u", s->txq_num, s->rxq_num); vmxnet3_validate_queues(s); qdescr_table_pa = - VMXNET3_READ_DRV_SHARED64(s->drv_shmem, devRead.misc.queueDescPA); + VMXNET3_READ_DRV_SHARED64(d, s->drv_shmem, devRead.misc.queueDescPA); VMW_CFPRN("TX queues descriptors table is at 0x%" PRIx64, qdescr_table_pa); /* @@ -1529,25 +1546,25 @@ static void vmxnet3_activate_device(VMXNET3State *s) /* Read interrupt number for this TX queue */ s->txq_descr[i].intr_idx = - VMXNET3_READ_TX_QUEUE_DESCR8(qdescr_pa, conf.intrIdx); + VMXNET3_READ_TX_QUEUE_DESCR8(d, qdescr_pa, conf.intrIdx); assert(vmxnet3_verify_intx(s, s->txq_descr[i].intr_idx)); VMW_CFPRN("TX Queue %d interrupt: %d", i, s->txq_descr[i].intr_idx); /* Read rings memory locations for TX queues */ - pa = VMXNET3_READ_TX_QUEUE_DESCR64(qdescr_pa, conf.txRingBasePA); - size = VMXNET3_READ_TX_QUEUE_DESCR32(qdescr_pa, conf.txRingSize); + pa = VMXNET3_READ_TX_QUEUE_DESCR64(d, qdescr_pa, conf.txRingBasePA); + size = VMXNET3_READ_TX_QUEUE_DESCR32(d, qdescr_pa, conf.txRingSize); - vmxnet3_ring_init(&s->txq_descr[i].tx_ring, pa, size, + vmxnet3_ring_init(d, &s->txq_descr[i].tx_ring, pa, size, sizeof(struct Vmxnet3_TxDesc), false); VMXNET3_RING_DUMP(VMW_CFPRN, "TX", i, &s->txq_descr[i].tx_ring); s->max_tx_frags += size; /* TXC ring */ - pa = VMXNET3_READ_TX_QUEUE_DESCR64(qdescr_pa, conf.compRingBasePA); - size = VMXNET3_READ_TX_QUEUE_DESCR32(qdescr_pa, conf.compRingSize); - vmxnet3_ring_init(&s->txq_descr[i].comp_ring, pa, size, + pa = VMXNET3_READ_TX_QUEUE_DESCR64(d, qdescr_pa, conf.compRingBasePA); + size = VMXNET3_READ_TX_QUEUE_DESCR32(d, qdescr_pa, conf.compRingSize); + vmxnet3_ring_init(d, &s->txq_descr[i].comp_ring, pa, size, sizeof(struct Vmxnet3_TxCompDesc), true); VMXNET3_RING_DUMP(VMW_CFPRN, "TXC", i, &s->txq_descr[i].comp_ring); @@ -1558,7 +1575,7 @@ static void vmxnet3_activate_device(VMXNET3State *s) sizeof(s->txq_descr[i].txq_stats)); /* Fill device-managed parameters for queues */ - VMXNET3_WRITE_TX_QUEUE_DESCR32(qdescr_pa, + VMXNET3_WRITE_TX_QUEUE_DESCR32(d, qdescr_pa, ctrl.txThreshold, VMXNET3_DEF_TX_THRESHOLD); } @@ -1578,7 +1595,7 @@ static void vmxnet3_activate_device(VMXNET3State *s) /* Read interrupt number for this RX queue */ s->rxq_descr[i].intr_idx = - VMXNET3_READ_TX_QUEUE_DESCR8(qd_pa, conf.intrIdx); + VMXNET3_READ_TX_QUEUE_DESCR8(d, qd_pa, conf.intrIdx); assert(vmxnet3_verify_intx(s, s->rxq_descr[i].intr_idx)); VMW_CFPRN("RX Queue %d interrupt: %d", i, s->rxq_descr[i].intr_idx); @@ -1586,18 +1603,18 @@ static void vmxnet3_activate_device(VMXNET3State *s) /* Read rings memory locations */ for (j = 0; j < VMXNET3_RX_RINGS_PER_QUEUE; j++) { /* RX rings */ - pa = VMXNET3_READ_RX_QUEUE_DESCR64(qd_pa, conf.rxRingBasePA[j]); - size = VMXNET3_READ_RX_QUEUE_DESCR32(qd_pa, conf.rxRingSize[j]); - vmxnet3_ring_init(&s->rxq_descr[i].rx_ring[j], pa, size, + pa = VMXNET3_READ_RX_QUEUE_DESCR64(d, qd_pa, conf.rxRingBasePA[j]); + size = VMXNET3_READ_RX_QUEUE_DESCR32(d, qd_pa, conf.rxRingSize[j]); + vmxnet3_ring_init(d, &s->rxq_descr[i].rx_ring[j], pa, size, sizeof(struct Vmxnet3_RxDesc), false); VMW_CFPRN("RX queue %d:%d: Base: %" PRIx64 ", Size: %d", i, j, pa, size); } /* RXC ring */ - pa = VMXNET3_READ_RX_QUEUE_DESCR64(qd_pa, conf.compRingBasePA); - size = VMXNET3_READ_RX_QUEUE_DESCR32(qd_pa, conf.compRingSize); - vmxnet3_ring_init(&s->rxq_descr[i].comp_ring, pa, size, + pa = VMXNET3_READ_RX_QUEUE_DESCR64(d, qd_pa, conf.compRingBasePA); + size = VMXNET3_READ_RX_QUEUE_DESCR32(d, qd_pa, conf.compRingSize); + vmxnet3_ring_init(d, &s->rxq_descr[i].comp_ring, pa, size, sizeof(struct Vmxnet3_RxCompDesc), true); VMW_CFPRN("RXC queue %d: Base: %" PRIx64 ", Size: %d", i, pa, size); @@ -1764,19 +1781,21 @@ static uint64_t vmxnet3_get_command_status(VMXNET3State *s) static void vmxnet3_set_events(VMXNET3State *s, uint32_t val) { uint32_t events; + PCIDevice *d = PCI_DEVICE(s); VMW_CBPRN("Setting events: 0x%x", val); - events = VMXNET3_READ_DRV_SHARED32(s->drv_shmem, ecr) | val; - VMXNET3_WRITE_DRV_SHARED32(s->drv_shmem, ecr, events); + events = VMXNET3_READ_DRV_SHARED32(d, s->drv_shmem, ecr) | val; + VMXNET3_WRITE_DRV_SHARED32(d, s->drv_shmem, ecr, events); } static void vmxnet3_ack_events(VMXNET3State *s, uint32_t val) { + PCIDevice *d = PCI_DEVICE(s); uint32_t events; VMW_CBPRN("Clearing events: 0x%x", val); - events = VMXNET3_READ_DRV_SHARED32(s->drv_shmem, ecr) & ~val; - VMXNET3_WRITE_DRV_SHARED32(s->drv_shmem, ecr, events); + events = VMXNET3_READ_DRV_SHARED32(d, s->drv_shmem, ecr) & ~val; + VMXNET3_WRITE_DRV_SHARED32(d, s->drv_shmem, ecr, events); } static void diff --git a/hw/ppc/ppce500_spin.c b/hw/ppc/ppce500_spin.c index 76bd78bfd7..225177b5af 100644 --- a/hw/ppc/ppce500_spin.c +++ b/hw/ppc/ppce500_spin.c @@ -104,7 +104,7 @@ static void spin_kick(void *data) hwaddr map_start; cpu_synchronize_state(cpu); - stl_p(&curspin->pir, env->spr[SPR_PIR]); + stl_p(&curspin->pir, env->spr[SPR_BOOKE_PIR]); env->nip = ldq_p(&curspin->addr) & (map_size - 1); env->gpr[3] = ldq_p(&curspin->r3); env->gpr[4] = 0; diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c index 778fa255a9..0b6bb9ce1a 100644 --- a/hw/ppc/spapr.c +++ b/hw/ppc/spapr.c @@ -2367,8 +2367,8 @@ static HotpluggableCPUList *spapr_query_hotpluggable_cpus(MachineState *machine) cpu_item->type = spapr_get_cpu_core_type(machine->cpu_model); cpu_item->vcpus_count = smp_threads; - cpu_props->has_core = true; - cpu_props->core = i * smt; + cpu_props->has_core_id = true; + cpu_props->core_id = i * smt; /* TODO: add 'has_node/node' here to describe to which node core belongs */ diff --git a/hw/s390x/virtio-ccw.c b/hw/s390x/virtio-ccw.c index 1625e6b38b..8b709e362e 100644 --- a/hw/s390x/virtio-ccw.c +++ b/hw/s390x/virtio-ccw.c @@ -69,92 +69,58 @@ VirtIODevice *virtio_ccw_get_vdev(SubchDev *sch) return vdev; } -static int virtio_ccw_set_guest2host_notifier(VirtioCcwDevice *dev, int n, - bool assign, bool set_handler) +static void virtio_ccw_start_ioeventfd(VirtioCcwDevice *dev) { - VirtIODevice *vdev = virtio_bus_get_device(&dev->bus); - VirtQueue *vq = virtio_get_queue(vdev, n); - EventNotifier *notifier = virtio_queue_get_host_notifier(vq); - int r = 0; - SubchDev *sch = dev->sch; - uint32_t sch_id = (css_build_subchannel_id(sch) << 16) | sch->schid; + virtio_bus_start_ioeventfd(&dev->bus); +} - if (assign) { - r = event_notifier_init(notifier, 1); - if (r < 0) { - error_report("%s: unable to init event notifier: %d", __func__, r); - return r; - } - virtio_queue_set_host_notifier_fd_handler(vq, true, set_handler); - r = s390_assign_subch_ioeventfd(notifier, sch_id, n, assign); - if (r < 0) { - error_report("%s: unable to assign ioeventfd: %d", __func__, r); - virtio_queue_set_host_notifier_fd_handler(vq, false, false); - event_notifier_cleanup(notifier); - return r; - } - } else { - virtio_queue_set_host_notifier_fd_handler(vq, false, false); - s390_assign_subch_ioeventfd(notifier, sch_id, n, assign); - event_notifier_cleanup(notifier); - } - return r; +static void virtio_ccw_stop_ioeventfd(VirtioCcwDevice *dev) +{ + virtio_bus_stop_ioeventfd(&dev->bus); } -static void virtio_ccw_start_ioeventfd(VirtioCcwDevice *dev) +static bool virtio_ccw_ioeventfd_started(DeviceState *d) { - VirtIODevice *vdev; - int n, r; + VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); - if (!(dev->flags & VIRTIO_CCW_FLAG_USE_IOEVENTFD) || - dev->ioeventfd_disabled || - dev->ioeventfd_started) { - return; - } - vdev = virtio_bus_get_device(&dev->bus); - for (n = 0; n < VIRTIO_CCW_QUEUE_MAX; n++) { - if (!virtio_queue_get_num(vdev, n)) { - continue; - } - r = virtio_ccw_set_guest2host_notifier(dev, n, true, true); - if (r < 0) { - goto assign_error; - } - } - dev->ioeventfd_started = true; - return; + return dev->ioeventfd_started; +} - assign_error: - while (--n >= 0) { - if (!virtio_queue_get_num(vdev, n)) { - continue; - } - r = virtio_ccw_set_guest2host_notifier(dev, n, false, false); - assert(r >= 0); +static void virtio_ccw_ioeventfd_set_started(DeviceState *d, bool started, + bool err) +{ + VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); + + dev->ioeventfd_started = started; + if (err) { + /* Disable ioeventfd for this device. */ + dev->flags &= ~VIRTIO_CCW_FLAG_USE_IOEVENTFD; } - dev->ioeventfd_started = false; - /* Disable ioeventfd for this device. */ - dev->flags &= ~VIRTIO_CCW_FLAG_USE_IOEVENTFD; - error_report("%s: failed. Fallback to userspace (slower).", __func__); } -static void virtio_ccw_stop_ioeventfd(VirtioCcwDevice *dev) +static bool virtio_ccw_ioeventfd_disabled(DeviceState *d) { - VirtIODevice *vdev; - int n, r; + VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); - if (!dev->ioeventfd_started) { - return; - } - vdev = virtio_bus_get_device(&dev->bus); - for (n = 0; n < VIRTIO_CCW_QUEUE_MAX; n++) { - if (!virtio_queue_get_num(vdev, n)) { - continue; - } - r = virtio_ccw_set_guest2host_notifier(dev, n, false, false); - assert(r >= 0); - } - dev->ioeventfd_started = false; + return dev->ioeventfd_disabled || + !(dev->flags & VIRTIO_CCW_FLAG_USE_IOEVENTFD); +} + +static void virtio_ccw_ioeventfd_set_disabled(DeviceState *d, bool disabled) +{ + VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); + + dev->ioeventfd_disabled = disabled; +} + +static int virtio_ccw_ioeventfd_assign(DeviceState *d, EventNotifier *notifier, + int n, bool assign) +{ + VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); + SubchDev *sch = dev->sch; + uint32_t sch_id = (css_build_subchannel_id(sch) << 16) | sch->schid; + + return s390_assign_subch_ioeventfd(notifier, sch_id, n, assign); } VirtualCssBus *virtual_css_bus_init(void) @@ -1157,19 +1123,6 @@ static bool virtio_ccw_query_guest_notifiers(DeviceState *d) return !!(dev->sch->curr_status.pmcw.flags & PMCW_FLAGS_MASK_ENA); } -static int virtio_ccw_set_host_notifier(DeviceState *d, int n, bool assign) -{ - VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); - - /* Stop using the generic ioeventfd, we are doing eventfd handling - * ourselves below */ - dev->ioeventfd_disabled = assign; - if (assign) { - virtio_ccw_stop_ioeventfd(dev); - } - return virtio_ccw_set_guest2host_notifier(dev, n, assign, false); -} - static int virtio_ccw_get_mappings(VirtioCcwDevice *dev) { int r; @@ -1798,7 +1751,6 @@ static void virtio_ccw_bus_class_init(ObjectClass *klass, void *data) k->notify = virtio_ccw_notify; k->vmstate_change = virtio_ccw_vmstate_change; k->query_guest_notifiers = virtio_ccw_query_guest_notifiers; - k->set_host_notifier = virtio_ccw_set_host_notifier; k->set_guest_notifiers = virtio_ccw_set_guest_notifiers; k->save_queue = virtio_ccw_save_queue; k->load_queue = virtio_ccw_load_queue; @@ -1807,6 +1759,11 @@ static void virtio_ccw_bus_class_init(ObjectClass *klass, void *data) k->device_plugged = virtio_ccw_device_plugged; k->post_plugged = virtio_ccw_post_plugged; k->device_unplugged = virtio_ccw_device_unplugged; + k->ioeventfd_started = virtio_ccw_ioeventfd_started; + k->ioeventfd_set_started = virtio_ccw_ioeventfd_set_started; + k->ioeventfd_disabled = virtio_ccw_ioeventfd_disabled; + k->ioeventfd_set_disabled = virtio_ccw_ioeventfd_set_disabled; + k->ioeventfd_assign = virtio_ccw_ioeventfd_assign; } static const TypeInfo virtio_ccw_bus_info = { diff --git a/hw/scsi/virtio-scsi-dataplane.c b/hw/scsi/virtio-scsi-dataplane.c index 1a49f1e4b7..18ced31493 100644 --- a/hw/scsi/virtio-scsi-dataplane.c +++ b/hw/scsi/virtio-scsi-dataplane.c @@ -31,7 +31,7 @@ void virtio_scsi_set_iothread(VirtIOSCSI *s, IOThread *iothread) s->ctx = iothread_get_aio_context(vs->conf.iothread); /* Don't try if transport does not support notifiers. */ - if (!k->set_guest_notifiers || !k->set_host_notifier) { + if (!k->set_guest_notifiers || !k->ioeventfd_started) { fprintf(stderr, "virtio-scsi: Failed to set iothread " "(transport does not support notifiers)"); exit(1); @@ -69,11 +69,10 @@ static int virtio_scsi_vring_init(VirtIOSCSI *s, VirtQueue *vq, int n, void (*fn)(VirtIODevice *vdev, VirtQueue *vq)) { BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(s))); - VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); int rc; /* Set up virtqueue notify */ - rc = k->set_host_notifier(qbus->parent, n, true); + rc = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), n, true); if (rc != 0) { fprintf(stderr, "virtio-scsi: Failed to set host notifier (%d)\n", rc); @@ -159,7 +158,7 @@ fail_vrings: virtio_scsi_clear_aio(s); aio_context_release(s->ctx); for (i = 0; i < vs->conf.num_queues + 2; i++) { - k->set_host_notifier(qbus->parent, i, false); + virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, false); } k->set_guest_notifiers(qbus->parent, vs->conf.num_queues + 2, false); fail_guest_notifiers: @@ -198,7 +197,7 @@ void virtio_scsi_dataplane_stop(VirtIOSCSI *s) aio_context_release(s->ctx); for (i = 0; i < vs->conf.num_queues + 2; i++) { - k->set_host_notifier(qbus->parent, i, false); + virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, false); } /* Clean up guest notifier (irq) */ diff --git a/hw/scsi/virtio-scsi.c b/hw/scsi/virtio-scsi.c index 71d09d3ef3..e8179d6616 100644 --- a/hw/scsi/virtio-scsi.c +++ b/hw/scsi/virtio-scsi.c @@ -666,11 +666,6 @@ static void virtio_scsi_reset(VirtIODevice *vdev) static void virtio_scsi_save(QEMUFile *f, void *opaque) { VirtIODevice *vdev = VIRTIO_DEVICE(opaque); - VirtIOSCSI *s = VIRTIO_SCSI(vdev); - - if (s->dataplane_started) { - virtio_scsi_dataplane_stop(s); - } virtio_save(vdev, f); } diff --git a/hw/sh4/sh_pci.c b/hw/sh4/sh_pci.c index e820a32307..1747628f3d 100644 --- a/hw/sh4/sh_pci.c +++ b/hw/sh4/sh_pci.c @@ -55,7 +55,7 @@ static void sh_pci_reg_write (void *p, hwaddr addr, uint64_t val, switch(addr) { case 0 ... 0xfc: - cpu_to_le32w((uint32_t*)(pcic->dev->config + addr), val); + stl_le_p(pcic->dev->config + addr, val); break; case 0x1c0: pcic->par = val; @@ -85,7 +85,7 @@ static uint64_t sh_pci_reg_read (void *p, hwaddr addr, switch(addr) { case 0 ... 0xfc: - return le32_to_cpup((uint32_t*)(pcic->dev->config + addr)); + return ldl_le_p(pcic->dev->config + addr); case 0x1c0: return pcic->par; case 0x1c4: diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c index 81cc5b0ae3..a01394d5ac 100644 --- a/hw/virtio/vhost.c +++ b/hw/virtio/vhost.c @@ -1110,14 +1110,15 @@ int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev) VirtioBusState *vbus = VIRTIO_BUS(qbus); VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus); int i, r, e; - if (!k->set_host_notifier) { + if (!k->ioeventfd_started) { fprintf(stderr, "binding does not support host notifiers\n"); r = -ENOSYS; goto fail; } for (i = 0; i < hdev->nvqs; ++i) { - r = k->set_host_notifier(qbus->parent, hdev->vq_index + i, true); + r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i, + true); if (r < 0) { fprintf(stderr, "vhost VQ %d notifier binding failed: %d\n", i, -r); goto fail_vq; @@ -1127,7 +1128,8 @@ int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev) return 0; fail_vq: while (--i >= 0) { - e = k->set_host_notifier(qbus->parent, hdev->vq_index + i, false); + e = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i, + false); if (e < 0) { fprintf(stderr, "vhost VQ %d notifier cleanup error: %d\n", i, -r); fflush(stderr); @@ -1146,12 +1148,11 @@ fail: void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev) { BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); - VirtioBusState *vbus = VIRTIO_BUS(qbus); - VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus); int i, r; for (i = 0; i < hdev->nvqs; ++i) { - r = k->set_host_notifier(qbus->parent, hdev->vq_index + i, false); + r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i, + false); if (r < 0) { fprintf(stderr, "vhost VQ %d notifier cleanup failed: %d\n", i, -r); fflush(stderr); diff --git a/hw/virtio/virtio-bus.c b/hw/virtio/virtio-bus.c index 574f0e23f8..131376027b 100644 --- a/hw/virtio/virtio-bus.c +++ b/hw/virtio/virtio-bus.c @@ -146,6 +146,138 @@ void virtio_bus_set_vdev_config(VirtioBusState *bus, uint8_t *config) } } +/* + * This function handles both assigning the ioeventfd handler and + * registering it with the kernel. + * assign: register/deregister ioeventfd with the kernel + * set_handler: use the generic ioeventfd handler + */ +static int set_host_notifier_internal(DeviceState *proxy, VirtioBusState *bus, + int n, bool assign, bool set_handler) +{ + VirtIODevice *vdev = virtio_bus_get_device(bus); + VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(bus); + VirtQueue *vq = virtio_get_queue(vdev, n); + EventNotifier *notifier = virtio_queue_get_host_notifier(vq); + int r = 0; + + if (assign) { + r = event_notifier_init(notifier, 1); + if (r < 0) { + error_report("%s: unable to init event notifier: %d", __func__, r); + return r; + } + virtio_queue_set_host_notifier_fd_handler(vq, true, set_handler); + r = k->ioeventfd_assign(proxy, notifier, n, assign); + if (r < 0) { + error_report("%s: unable to assign ioeventfd: %d", __func__, r); + virtio_queue_set_host_notifier_fd_handler(vq, false, false); + event_notifier_cleanup(notifier); + return r; + } + } else { + virtio_queue_set_host_notifier_fd_handler(vq, false, false); + k->ioeventfd_assign(proxy, notifier, n, assign); + event_notifier_cleanup(notifier); + } + return r; +} + +void virtio_bus_start_ioeventfd(VirtioBusState *bus) +{ + VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(bus); + DeviceState *proxy = DEVICE(BUS(bus)->parent); + VirtIODevice *vdev; + int n, r; + + if (!k->ioeventfd_started || k->ioeventfd_started(proxy)) { + return; + } + if (k->ioeventfd_disabled(proxy)) { + return; + } + vdev = virtio_bus_get_device(bus); + for (n = 0; n < VIRTIO_QUEUE_MAX; n++) { + if (!virtio_queue_get_num(vdev, n)) { + continue; + } + r = set_host_notifier_internal(proxy, bus, n, true, true); + if (r < 0) { + goto assign_error; + } + } + k->ioeventfd_set_started(proxy, true, false); + return; + +assign_error: + while (--n >= 0) { + if (!virtio_queue_get_num(vdev, n)) { + continue; + } + + r = set_host_notifier_internal(proxy, bus, n, false, false); + assert(r >= 0); + } + k->ioeventfd_set_started(proxy, false, true); + error_report("%s: failed. Fallback to userspace (slower).", __func__); +} + +void virtio_bus_stop_ioeventfd(VirtioBusState *bus) +{ + VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(bus); + DeviceState *proxy = DEVICE(BUS(bus)->parent); + VirtIODevice *vdev; + int n, r; + + if (!k->ioeventfd_started || !k->ioeventfd_started(proxy)) { + return; + } + vdev = virtio_bus_get_device(bus); + for (n = 0; n < VIRTIO_QUEUE_MAX; n++) { + if (!virtio_queue_get_num(vdev, n)) { + continue; + } + r = set_host_notifier_internal(proxy, bus, n, false, false); + assert(r >= 0); + } + k->ioeventfd_set_started(proxy, false, false); +} + +/* + * This function switches from/to the generic ioeventfd handler. + * assign==false means 'use generic ioeventfd handler'. + */ +int virtio_bus_set_host_notifier(VirtioBusState *bus, int n, bool assign) +{ + VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(bus); + DeviceState *proxy = DEVICE(BUS(bus)->parent); + VirtIODevice *vdev = virtio_bus_get_device(bus); + VirtQueue *vq = virtio_get_queue(vdev, n); + + if (!k->ioeventfd_started) { + return -ENOSYS; + } + if (assign) { + /* + * Stop using the generic ioeventfd, we are doing eventfd handling + * ourselves below + */ + k->ioeventfd_set_disabled(proxy, true); + } + /* + * Just switch the handler, don't deassign the ioeventfd. + * Otherwise, there's a window where we don't have an + * ioeventfd and we may end up with a notification where + * we don't expect one. + */ + virtio_queue_set_host_notifier_fd_handler(vq, assign, !assign); + if (!assign) { + /* Use generic ioeventfd handler again. */ + k->ioeventfd_set_disabled(proxy, false); + } + return 0; +} + static char *virtio_bus_get_dev_path(DeviceState *dev) { BusState *bus = qdev_get_parent_bus(dev); diff --git a/hw/virtio/virtio-mmio.c b/hw/virtio/virtio-mmio.c index d4cd91f8c4..eb84b74532 100644 --- a/hw/virtio/virtio-mmio.c +++ b/hw/virtio/virtio-mmio.c @@ -93,90 +93,59 @@ typedef struct { bool ioeventfd_started; } VirtIOMMIOProxy; -static int virtio_mmio_set_host_notifier_internal(VirtIOMMIOProxy *proxy, - int n, bool assign, - bool set_handler) +static bool virtio_mmio_ioeventfd_started(DeviceState *d) { - VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); - VirtQueue *vq = virtio_get_queue(vdev, n); - EventNotifier *notifier = virtio_queue_get_host_notifier(vq); - int r = 0; + VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d); - if (assign) { - r = event_notifier_init(notifier, 1); - if (r < 0) { - error_report("%s: unable to init event notifier: %d", - __func__, r); - return r; - } - virtio_queue_set_host_notifier_fd_handler(vq, true, set_handler); - memory_region_add_eventfd(&proxy->iomem, VIRTIO_MMIO_QUEUENOTIFY, 4, - true, n, notifier); - } else { - memory_region_del_eventfd(&proxy->iomem, VIRTIO_MMIO_QUEUENOTIFY, 4, - true, n, notifier); - virtio_queue_set_host_notifier_fd_handler(vq, false, false); - event_notifier_cleanup(notifier); - } - return r; + return proxy->ioeventfd_started; } -static void virtio_mmio_start_ioeventfd(VirtIOMMIOProxy *proxy) +static void virtio_mmio_ioeventfd_set_started(DeviceState *d, bool started, + bool err) { - VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); - int n, r; + VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d); - if (!kvm_eventfds_enabled() || - proxy->ioeventfd_disabled || - proxy->ioeventfd_started) { - return; - } + proxy->ioeventfd_started = started; +} - for (n = 0; n < VIRTIO_QUEUE_MAX; n++) { - if (!virtio_queue_get_num(vdev, n)) { - continue; - } +static bool virtio_mmio_ioeventfd_disabled(DeviceState *d) +{ + VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d); - r = virtio_mmio_set_host_notifier_internal(proxy, n, true, true); - if (r < 0) { - goto assign_error; - } - } - proxy->ioeventfd_started = true; - return; + return !kvm_eventfds_enabled() || proxy->ioeventfd_disabled; +} -assign_error: - while (--n >= 0) { - if (!virtio_queue_get_num(vdev, n)) { - continue; - } +static void virtio_mmio_ioeventfd_set_disabled(DeviceState *d, bool disabled) +{ + VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d); - r = virtio_mmio_set_host_notifier_internal(proxy, n, false, false); - assert(r >= 0); - } - proxy->ioeventfd_started = false; - error_report("%s: failed. Fallback to a userspace (slower).", __func__); + proxy->ioeventfd_disabled = disabled; } -static void virtio_mmio_stop_ioeventfd(VirtIOMMIOProxy *proxy) +static int virtio_mmio_ioeventfd_assign(DeviceState *d, + EventNotifier *notifier, + int n, bool assign) { - int r; - int n; - VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); + VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d); - if (!proxy->ioeventfd_started) { - return; + if (assign) { + memory_region_add_eventfd(&proxy->iomem, VIRTIO_MMIO_QUEUENOTIFY, 4, + true, n, notifier); + } else { + memory_region_del_eventfd(&proxy->iomem, VIRTIO_MMIO_QUEUENOTIFY, 4, + true, n, notifier); } + return 0; +} - for (n = 0; n < VIRTIO_QUEUE_MAX; n++) { - if (!virtio_queue_get_num(vdev, n)) { - continue; - } +static void virtio_mmio_start_ioeventfd(VirtIOMMIOProxy *proxy) +{ + virtio_bus_start_ioeventfd(&proxy->bus); +} - r = virtio_mmio_set_host_notifier_internal(proxy, n, false, false); - assert(r >= 0); - } - proxy->ioeventfd_started = false; +static void virtio_mmio_stop_ioeventfd(VirtIOMMIOProxy *proxy) +{ + virtio_bus_stop_ioeventfd(&proxy->bus); } static uint64_t virtio_mmio_read(void *opaque, hwaddr offset, unsigned size) @@ -498,25 +467,6 @@ assign_error: return r; } -static int virtio_mmio_set_host_notifier(DeviceState *opaque, int n, - bool assign) -{ - VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque); - - /* Stop using ioeventfd for virtqueue kick if the device starts using host - * notifiers. This makes it easy to avoid stepping on each others' toes. - */ - proxy->ioeventfd_disabled = assign; - if (assign) { - virtio_mmio_stop_ioeventfd(proxy); - } - /* We don't need to start here: it's not needed because backend - * currently only stops on status change away from ok, - * reset, vmstop and such. If we do add code to start here, - * need to check vmstate, device state etc. */ - return virtio_mmio_set_host_notifier_internal(proxy, n, assign, false); -} - /* virtio-mmio device */ static void virtio_mmio_realizefn(DeviceState *d, Error **errp) @@ -558,8 +508,12 @@ static void virtio_mmio_bus_class_init(ObjectClass *klass, void *data) k->notify = virtio_mmio_update_irq; k->save_config = virtio_mmio_save_config; k->load_config = virtio_mmio_load_config; - k->set_host_notifier = virtio_mmio_set_host_notifier; k->set_guest_notifiers = virtio_mmio_set_guest_notifiers; + k->ioeventfd_started = virtio_mmio_ioeventfd_started; + k->ioeventfd_set_started = virtio_mmio_ioeventfd_set_started; + k->ioeventfd_disabled = virtio_mmio_ioeventfd_disabled; + k->ioeventfd_set_disabled = virtio_mmio_ioeventfd_set_disabled; + k->ioeventfd_assign = virtio_mmio_ioeventfd_assign; k->has_variable_vring_alignment = true; bus_class->max_dev = 1; } diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c index 1a0278304b..2b34b43060 100644 --- a/hw/virtio/virtio-pci.c +++ b/hw/virtio/virtio-pci.c @@ -262,14 +262,44 @@ static int virtio_pci_load_queue(DeviceState *d, int n, QEMUFile *f) return 0; } +static bool virtio_pci_ioeventfd_started(DeviceState *d) +{ + VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); + + return proxy->ioeventfd_started; +} + +static void virtio_pci_ioeventfd_set_started(DeviceState *d, bool started, + bool err) +{ + VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); + + proxy->ioeventfd_started = started; +} + +static bool virtio_pci_ioeventfd_disabled(DeviceState *d) +{ + VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); + + return proxy->ioeventfd_disabled || + !(proxy->flags & VIRTIO_PCI_FLAG_USE_IOEVENTFD); +} + +static void virtio_pci_ioeventfd_set_disabled(DeviceState *d, bool disabled) +{ + VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); + + proxy->ioeventfd_disabled = disabled; +} + #define QEMU_VIRTIO_PCI_QUEUE_MEM_MULT 0x1000 -static int virtio_pci_set_host_notifier_internal(VirtIOPCIProxy *proxy, - int n, bool assign, bool set_handler) +static int virtio_pci_ioeventfd_assign(DeviceState *d, EventNotifier *notifier, + int n, bool assign) { + VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); VirtQueue *vq = virtio_get_queue(vdev, n); - EventNotifier *notifier = virtio_queue_get_host_notifier(vq); bool legacy = !(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_LEGACY); bool modern = !(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_MODERN); bool fast_mmio = kvm_ioeventfd_any_length_enabled(); @@ -280,16 +310,8 @@ static int virtio_pci_set_host_notifier_internal(VirtIOPCIProxy *proxy, hwaddr modern_addr = QEMU_VIRTIO_PCI_QUEUE_MEM_MULT * virtio_get_queue_index(vq); hwaddr legacy_addr = VIRTIO_PCI_QUEUE_NOTIFY; - int r = 0; if (assign) { - r = event_notifier_init(notifier, 1); - if (r < 0) { - error_report("%s: unable to init event notifier: %d", - __func__, r); - return r; - } - virtio_queue_set_host_notifier_fd_handler(vq, true, set_handler); if (modern) { if (fast_mmio) { memory_region_add_eventfd(modern_mr, modern_addr, 0, @@ -325,68 +347,18 @@ static int virtio_pci_set_host_notifier_internal(VirtIOPCIProxy *proxy, memory_region_del_eventfd(legacy_mr, legacy_addr, 2, true, n, notifier); } - virtio_queue_set_host_notifier_fd_handler(vq, false, false); - event_notifier_cleanup(notifier); } - return r; + return 0; } static void virtio_pci_start_ioeventfd(VirtIOPCIProxy *proxy) { - VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); - int n, r; - - if (!(proxy->flags & VIRTIO_PCI_FLAG_USE_IOEVENTFD) || - proxy->ioeventfd_disabled || - proxy->ioeventfd_started) { - return; - } - - for (n = 0; n < VIRTIO_QUEUE_MAX; n++) { - if (!virtio_queue_get_num(vdev, n)) { - continue; - } - - r = virtio_pci_set_host_notifier_internal(proxy, n, true, true); - if (r < 0) { - goto assign_error; - } - } - proxy->ioeventfd_started = true; - return; - -assign_error: - while (--n >= 0) { - if (!virtio_queue_get_num(vdev, n)) { - continue; - } - - r = virtio_pci_set_host_notifier_internal(proxy, n, false, false); - assert(r >= 0); - } - proxy->ioeventfd_started = false; - error_report("%s: failed. Fallback to a userspace (slower).", __func__); + virtio_bus_start_ioeventfd(&proxy->bus); } static void virtio_pci_stop_ioeventfd(VirtIOPCIProxy *proxy) { - VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); - int r; - int n; - - if (!proxy->ioeventfd_started) { - return; - } - - for (n = 0; n < VIRTIO_QUEUE_MAX; n++) { - if (!virtio_queue_get_num(vdev, n)) { - continue; - } - - r = virtio_pci_set_host_notifier_internal(proxy, n, false, false); - assert(r >= 0); - } - proxy->ioeventfd_started = false; + virtio_bus_stop_ioeventfd(&proxy->bus); } static void virtio_ioport_write(void *opaque, uint32_t addr, uint32_t val) @@ -1110,24 +1082,6 @@ assign_error: return r; } -static int virtio_pci_set_host_notifier(DeviceState *d, int n, bool assign) -{ - VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); - - /* Stop using ioeventfd for virtqueue kick if the device starts using host - * notifiers. This makes it easy to avoid stepping on each others' toes. - */ - proxy->ioeventfd_disabled = assign; - if (assign) { - virtio_pci_stop_ioeventfd(proxy); - } - /* We don't need to start here: it's not needed because backend - * currently only stops on status change away from ok, - * reset, vmstop and such. If we do add code to start here, - * need to check vmstate, device state etc. */ - return virtio_pci_set_host_notifier_internal(proxy, n, assign, false); -} - static void virtio_pci_vmstate_change(DeviceState *d, bool running) { VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); @@ -2488,12 +2442,16 @@ static void virtio_pci_bus_class_init(ObjectClass *klass, void *data) k->load_extra_state = virtio_pci_load_extra_state; k->has_extra_state = virtio_pci_has_extra_state; k->query_guest_notifiers = virtio_pci_query_guest_notifiers; - k->set_host_notifier = virtio_pci_set_host_notifier; k->set_guest_notifiers = virtio_pci_set_guest_notifiers; k->vmstate_change = virtio_pci_vmstate_change; k->device_plugged = virtio_pci_device_plugged; k->device_unplugged = virtio_pci_device_unplugged; k->query_nvectors = virtio_pci_query_nvectors; + k->ioeventfd_started = virtio_pci_ioeventfd_started; + k->ioeventfd_set_started = virtio_pci_ioeventfd_set_started; + k->ioeventfd_disabled = virtio_pci_ioeventfd_disabled; + k->ioeventfd_set_disabled = virtio_pci_ioeventfd_set_disabled; + k->ioeventfd_assign = virtio_pci_ioeventfd_assign; } static const TypeInfo virtio_pci_bus_info = { diff --git a/include/elf.h b/include/elf.h index 8533b2a8b0..745739ab8c 100644 --- a/include/elf.h +++ b/include/elf.h @@ -53,6 +53,8 @@ typedef int64_t Elf64_Sxword; #define EF_MIPS_OPTIONS_FIRST 0x00000080 #define EF_MIPS_32BITMODE 0x00000100 #define EF_MIPS_ABI 0x0000f000 +#define EF_MIPS_FP64 0x00000200 +#define EF_MIPS_NAN2008 0x00000400 #define EF_MIPS_ARCH 0xf0000000 /* These constants define the different elf file types */ diff --git a/include/fpu/softfloat.h b/include/fpu/softfloat.h index c937062530..95a11032d1 100644 --- a/include/fpu/softfloat.h +++ b/include/fpu/softfloat.h @@ -205,6 +205,7 @@ typedef struct float_status { /* should denormalised inputs go to zero and set the input_denormal flag? */ flag flush_inputs_to_zero; flag default_nan_mode; + flag snan_bit_is_one; } float_status; static inline void set_float_detect_tininess(int val, float_status *status) @@ -236,6 +237,10 @@ static inline void set_default_nan_mode(flag val, float_status *status) { status->default_nan_mode = val; } +static inline void set_snan_bit_is_one(flag val, float_status *status) +{ + status->snan_bit_is_one = val; +} static inline int get_float_detect_tininess(float_status *status) { return status->float_detect_tininess; @@ -342,9 +347,9 @@ float64 float16_to_float64(float16 a, flag ieee, float_status *status); /*---------------------------------------------------------------------------- | Software half-precision operations. *----------------------------------------------------------------------------*/ -int float16_is_quiet_nan( float16 ); -int float16_is_signaling_nan( float16 ); -float16 float16_maybe_silence_nan( float16 ); +int float16_is_quiet_nan(float16, float_status *status); +int float16_is_signaling_nan(float16, float_status *status); +float16 float16_maybe_silence_nan(float16, float_status *status); static inline int float16_is_any_nan(float16 a) { @@ -354,7 +359,7 @@ static inline int float16_is_any_nan(float16 a) /*---------------------------------------------------------------------------- | The pattern for a default generated half-precision NaN. *----------------------------------------------------------------------------*/ -extern const float16 float16_default_nan; +float16 float16_default_nan(float_status *status); /*---------------------------------------------------------------------------- | Software IEC/IEEE single-precision conversion routines. @@ -404,9 +409,9 @@ float32 float32_minnum(float32, float32, float_status *status); float32 float32_maxnum(float32, float32, float_status *status); float32 float32_minnummag(float32, float32, float_status *status); float32 float32_maxnummag(float32, float32, float_status *status); -int float32_is_quiet_nan( float32 ); -int float32_is_signaling_nan( float32 ); -float32 float32_maybe_silence_nan( float32 ); +int float32_is_quiet_nan(float32, float_status *status); +int float32_is_signaling_nan(float32, float_status *status); +float32 float32_maybe_silence_nan(float32, float_status *status); float32 float32_scalbn(float32, int, float_status *status); static inline float32 float32_abs(float32 a) @@ -466,7 +471,7 @@ static inline float32 float32_set_sign(float32 a, int sign) /*---------------------------------------------------------------------------- | The pattern for a default generated single-precision NaN. *----------------------------------------------------------------------------*/ -extern const float32 float32_default_nan; +float32 float32_default_nan(float_status *status); /*---------------------------------------------------------------------------- | Software IEC/IEEE double-precision conversion routines. @@ -516,9 +521,9 @@ float64 float64_minnum(float64, float64, float_status *status); float64 float64_maxnum(float64, float64, float_status *status); float64 float64_minnummag(float64, float64, float_status *status); float64 float64_maxnummag(float64, float64, float_status *status); -int float64_is_quiet_nan( float64 a ); -int float64_is_signaling_nan( float64 ); -float64 float64_maybe_silence_nan( float64 ); +int float64_is_quiet_nan(float64 a, float_status *status); +int float64_is_signaling_nan(float64, float_status *status); +float64 float64_maybe_silence_nan(float64, float_status *status); float64 float64_scalbn(float64, int, float_status *status); static inline float64 float64_abs(float64 a) @@ -578,7 +583,7 @@ static inline float64 float64_set_sign(float64 a, int sign) /*---------------------------------------------------------------------------- | The pattern for a default generated double-precision NaN. *----------------------------------------------------------------------------*/ -extern const float64 float64_default_nan; +float64 float64_default_nan(float_status *status); /*---------------------------------------------------------------------------- | Software IEC/IEEE extended double-precision conversion routines. @@ -611,9 +616,9 @@ int floatx80_lt_quiet(floatx80, floatx80, float_status *status); int floatx80_unordered_quiet(floatx80, floatx80, float_status *status); int floatx80_compare(floatx80, floatx80, float_status *status); int floatx80_compare_quiet(floatx80, floatx80, float_status *status); -int floatx80_is_quiet_nan( floatx80 ); -int floatx80_is_signaling_nan( floatx80 ); -floatx80 floatx80_maybe_silence_nan( floatx80 ); +int floatx80_is_quiet_nan(floatx80, float_status *status); +int floatx80_is_signaling_nan(floatx80, float_status *status); +floatx80 floatx80_maybe_silence_nan(floatx80, float_status *status); floatx80 floatx80_scalbn(floatx80, int, float_status *status); static inline floatx80 floatx80_abs(floatx80 a) @@ -663,7 +668,7 @@ static inline int floatx80_is_any_nan(floatx80 a) /*---------------------------------------------------------------------------- | The pattern for a default generated extended double-precision NaN. *----------------------------------------------------------------------------*/ -extern const floatx80 floatx80_default_nan; +floatx80 floatx80_default_nan(float_status *status); /*---------------------------------------------------------------------------- | Software IEC/IEEE quadruple-precision conversion routines. @@ -696,9 +701,9 @@ int float128_lt_quiet(float128, float128, float_status *status); int float128_unordered_quiet(float128, float128, float_status *status); int float128_compare(float128, float128, float_status *status); int float128_compare_quiet(float128, float128, float_status *status); -int float128_is_quiet_nan( float128 ); -int float128_is_signaling_nan( float128 ); -float128 float128_maybe_silence_nan( float128 ); +int float128_is_quiet_nan(float128, float_status *status); +int float128_is_signaling_nan(float128, float_status *status); +float128 float128_maybe_silence_nan(float128, float_status *status); float128 float128_scalbn(float128, int, float_status *status); static inline float128 float128_abs(float128 a) @@ -744,6 +749,6 @@ static inline int float128_is_any_nan(float128 a) /*---------------------------------------------------------------------------- | The pattern for a default generated quadruple-precision NaN. *----------------------------------------------------------------------------*/ -extern const float128 float128_default_nan; +float128 float128_default_nan(float_status *status); #endif /* !SOFTFLOAT_H */ diff --git a/include/hw/arm/ast2400.h b/include/hw/arm/ast2400.h index c05ed53767..f1a64fd389 100644 --- a/include/hw/arm/ast2400.h +++ b/include/hw/arm/ast2400.h @@ -14,6 +14,7 @@ #include "hw/arm/arm.h" #include "hw/intc/aspeed_vic.h" +#include "hw/misc/aspeed_scu.h" #include "hw/timer/aspeed_timer.h" #include "hw/i2c/aspeed_i2c.h" @@ -27,6 +28,7 @@ typedef struct AST2400State { AspeedVICState vic; AspeedTimerCtrlState timerctrl; AspeedI2CState i2c; + AspeedSCUState scu; } AST2400State; #define TYPE_AST2400 "ast2400" diff --git a/include/hw/cpu/core.h b/include/hw/cpu/core.h index 4540a7d34f..79ac79c29c 100644 --- a/include/hw/cpu/core.h +++ b/include/hw/cpu/core.h @@ -26,6 +26,9 @@ typedef struct CPUCore { int nr_threads; } CPUCore; +/* Note: topology field names need to be kept in sync with + * 'CpuInstanceProperties' */ + #define CPU_CORE_PROP_CORE_ID "core-id" #endif diff --git a/include/hw/misc/aspeed_scu.h b/include/hw/misc/aspeed_scu.h new file mode 100644 index 0000000000..6b8e46f85f --- /dev/null +++ b/include/hw/misc/aspeed_scu.h @@ -0,0 +1,34 @@ +/* + * ASPEED System Control Unit + * + * Andrew Jeffery <andrew@aj.id.au> + * + * Copyright 2016 IBM Corp. + * + * This code is licensed under the GPL version 2 or later. See + * the COPYING file in the top-level directory. + */ +#ifndef ASPEED_SCU_H +#define ASPEED_SCU_H + +#include "hw/sysbus.h" + +#define TYPE_ASPEED_SCU "aspeed.scu" +#define ASPEED_SCU(obj) OBJECT_CHECK(AspeedSCUState, (obj), TYPE_ASPEED_SCU) + +#define ASPEED_SCU_NR_REGS (0x1A8 >> 2) + +typedef struct AspeedSCUState { + /*< private >*/ + SysBusDevice parent_obj; + + /*< public >*/ + MemoryRegion iomem; + + uint32_t regs[ASPEED_SCU_NR_REGS]; + uint32_t silicon_rev; + uint32_t hw_strap1; + uint32_t hw_strap2; +} AspeedSCUState; + +#endif /* ASPEED_SCU_H */ diff --git a/include/hw/ppc/xics.h b/include/hw/ppc/xics.h index 9091054003..6925677197 100644 --- a/include/hw/ppc/xics.h +++ b/include/hw/ppc/xics.h @@ -160,7 +160,6 @@ struct ICSIRQState { #define XICS_IRQS 1024 qemu_irq xics_get_qirq(XICSState *icp, int irq); -void xics_set_irq_type(XICSState *icp, int irq, bool lsi); int xics_alloc(XICSState *icp, int src, int irq_hint, bool lsi, Error **errp); int xics_alloc_block(XICSState *icp, int src, int num, bool lsi, bool align, Error **errp); diff --git a/include/hw/virtio/virtio-blk.h b/include/hw/virtio/virtio-blk.h index 8f2b056515..e9bf463f53 100644 --- a/include/hw/virtio/virtio-blk.h +++ b/include/hw/virtio/virtio-blk.h @@ -38,6 +38,7 @@ struct VirtIOBlkConf uint32_t scsi; uint32_t config_wce; uint32_t request_merging; + uint16_t num_queues; }; struct VirtIOBlockDataPlane; @@ -46,7 +47,6 @@ struct VirtIOBlockReq; typedef struct VirtIOBlock { VirtIODevice parent_obj; BlockBackend *blk; - VirtQueue *vq; void *rq; QEMUBH *bh; VirtIOBlkConf conf; @@ -62,6 +62,7 @@ typedef struct VirtIOBlockReq { VirtQueueElement elem; int64_t sector_num; VirtIOBlock *dev; + VirtQueue *vq; struct virtio_blk_inhdr *in; struct virtio_blk_outhdr out; QEMUIOVector qiov; @@ -79,7 +80,8 @@ typedef struct MultiReqBuffer { bool is_write; } MultiReqBuffer; -void virtio_blk_init_request(VirtIOBlock *s, VirtIOBlockReq *req); +void virtio_blk_init_request(VirtIOBlock *s, VirtQueue *vq, + VirtIOBlockReq *req); void virtio_blk_free_request(VirtIOBlockReq *req); void virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb); diff --git a/include/hw/virtio/virtio-bus.h b/include/hw/virtio/virtio-bus.h index 3f2c1363d0..f3e5ef3f5b 100644 --- a/include/hw/virtio/virtio-bus.h +++ b/include/hw/virtio/virtio-bus.h @@ -52,7 +52,6 @@ typedef struct VirtioBusClass { bool (*has_extra_state)(DeviceState *d); bool (*query_guest_notifiers)(DeviceState *d); int (*set_guest_notifiers)(DeviceState *d, int nvqs, bool assign); - int (*set_host_notifier)(DeviceState *d, int n, bool assigned); void (*vmstate_change)(DeviceState *d, bool running); /* * transport independent init function. @@ -71,6 +70,29 @@ typedef struct VirtioBusClass { void (*device_unplugged)(DeviceState *d); int (*query_nvectors)(DeviceState *d); /* + * ioeventfd handling: if the transport implements ioeventfd_started, + * it must implement the other ioeventfd callbacks as well + */ + /* Returns true if the ioeventfd has been started for the device. */ + bool (*ioeventfd_started)(DeviceState *d); + /* + * Sets the 'ioeventfd started' state after the ioeventfd has been + * started/stopped for the device. err signifies whether an error + * had occurred. + */ + void (*ioeventfd_set_started)(DeviceState *d, bool started, bool err); + /* Returns true if the ioeventfd has been disabled for the device. */ + bool (*ioeventfd_disabled)(DeviceState *d); + /* Sets the 'ioeventfd disabled' state for the device. */ + void (*ioeventfd_set_disabled)(DeviceState *d, bool disabled); + /* + * Assigns/deassigns the ioeventfd backing for the transport on + * the device for queue number n. Returns an error value on + * failure. + */ + int (*ioeventfd_assign)(DeviceState *d, EventNotifier *notifier, + int n, bool assign); + /* * Does the transport have variable vring alignment? * (ie can it ever call virtio_queue_set_align()?) * Note that changing this will break migration for this transport. @@ -111,4 +133,11 @@ static inline VirtIODevice *virtio_bus_get_device(VirtioBusState *bus) return (VirtIODevice *)qdev; } +/* Start the ioeventfd. */ +void virtio_bus_start_ioeventfd(VirtioBusState *bus); +/* Stop the ioeventfd. */ +void virtio_bus_stop_ioeventfd(VirtioBusState *bus); +/* Switch from/to the generic ioeventfd handler */ +int virtio_bus_set_host_notifier(VirtioBusState *bus, int n, bool assign); + #endif /* VIRTIO_BUS_H */ diff --git a/include/qemu/sockets.h b/include/qemu/sockets.h index 5dd2648169..462033a4de 100644 --- a/include/qemu/sockets.h +++ b/include/qemu/sockets.h @@ -111,4 +111,18 @@ SocketAddress *socket_remote_address(int fd, Error **errp); void qapi_copy_SocketAddress(SocketAddress **p_dest, SocketAddress *src); +/** + * socket_address_to_string: + * @addr: the socket address struct + * @errp: pointer to uninitialized error object + * + * Get the string representation of the socket + * address. A pointer to the char array containing + * string format will be returned, the caller is + * required to release the returned value when no + * longer required with g_free. + * + * Returns: the socket address in string format, or NULL on error + */ +char *socket_address_to_string(struct SocketAddress *addr, Error **errp); #endif /* QEMU_SOCKET_H */ diff --git a/linux-user/host/aarch64/hostdep.h b/linux-user/host/aarch64/hostdep.h new file mode 100644 index 0000000000..b79eaf1811 --- /dev/null +++ b/linux-user/host/aarch64/hostdep.h @@ -0,0 +1,38 @@ +/* + * hostdep.h : things which are dependent on the host architecture + * + * * Written by Peter Maydell <peter.maydell@linaro.org> + * + * Copyright (C) 2016 Linaro Limited + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef QEMU_HOSTDEP_H +#define QEMU_HOSTDEP_H + +/* We have a safe-syscall.inc.S */ +#define HAVE_SAFE_SYSCALL + +#ifndef __ASSEMBLER__ + +/* These are defined by the safe-syscall.inc.S file */ +extern char safe_syscall_start[]; +extern char safe_syscall_end[]; + +/* Adjust the signal context to rewind out of safe-syscall if we're in it */ +static inline void rewind_if_in_safe_syscall(void *puc) +{ + struct ucontext *uc = puc; + __u64 *pcreg = &uc->uc_mcontext.pc; + + if (*pcreg > (uintptr_t)safe_syscall_start + && *pcreg < (uintptr_t)safe_syscall_end) { + *pcreg = (uintptr_t)safe_syscall_start; + } +} + +#endif /* __ASSEMBLER__ */ + +#endif diff --git a/linux-user/host/aarch64/safe-syscall.inc.S b/linux-user/host/aarch64/safe-syscall.inc.S new file mode 100644 index 0000000000..58a2329b37 --- /dev/null +++ b/linux-user/host/aarch64/safe-syscall.inc.S @@ -0,0 +1,75 @@ +/* + * safe-syscall.inc.S : host-specific assembly fragment + * to handle signals occurring at the same time as system calls. + * This is intended to be included by linux-user/safe-syscall.S + * + * Written by Richard Henderson <rth@twiddle.net> + * Copyright (C) 2016 Red Hat, Inc. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + + .global safe_syscall_base + .global safe_syscall_start + .global safe_syscall_end + .type safe_syscall_base, #function + .type safe_syscall_start, #function + .type safe_syscall_end, #function + + /* This is the entry point for making a system call. The calling + * convention here is that of a C varargs function with the + * first argument an 'int *' to the signal_pending flag, the + * second one the system call number (as a 'long'), and all further + * arguments being syscall arguments (also 'long'). + * We return a long which is the syscall's return value, which + * may be negative-errno on failure. Conversion to the + * -1-and-errno-set convention is done by the calling wrapper. + */ +safe_syscall_base: + .cfi_startproc + /* The syscall calling convention isn't the same as the + * C one: + * we enter with x0 == *signal_pending + * x1 == syscall number + * x2 ... x7, (stack) == syscall arguments + * and return the result in x0 + * and the syscall instruction needs + * x8 == syscall number + * x0 ... x7 == syscall arguments + * and returns the result in x0 + * Shuffle everything around appropriately. + */ + mov x9, x0 /* signal_pending pointer */ + mov x8, x1 /* syscall number */ + mov x0, x2 /* syscall arguments */ + mov x1, x3 + mov x2, x4 + mov x3, x5 + mov x4, x6 + mov x6, x7 + ldr x7, [sp] + + /* This next sequence of code works in conjunction with the + * rewind_if_safe_syscall_function(). If a signal is taken + * and the interrupted PC is anywhere between 'safe_syscall_start' + * and 'safe_syscall_end' then we rewind it to 'safe_syscall_start'. + * The code sequence must therefore be able to cope with this, and + * the syscall instruction must be the final one in the sequence. + */ +safe_syscall_start: + /* if signal_pending is non-zero, don't do the call */ + ldr w10, [x9] + cbnz w10, 0f + svc 0x0 +safe_syscall_end: + /* code path for having successfully executed the syscall */ + ret + +0: + /* code path when we didn't execute the syscall */ + mov x0, #-TARGET_ERESTARTSYS + ret + .cfi_endproc + + .size safe_syscall_base, .-safe_syscall_base diff --git a/linux-user/host/arm/hostdep.h b/linux-user/host/arm/hostdep.h new file mode 100644 index 0000000000..8e1ff2ffc5 --- /dev/null +++ b/linux-user/host/arm/hostdep.h @@ -0,0 +1,38 @@ +/* + * hostdep.h : things which are dependent on the host architecture + * + * * Written by Peter Maydell <peter.maydell@linaro.org> + * + * Copyright (C) 2016 Linaro Limited + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef QEMU_HOSTDEP_H +#define QEMU_HOSTDEP_H + +/* We have a safe-syscall.inc.S */ +#define HAVE_SAFE_SYSCALL + +#ifndef __ASSEMBLER__ + +/* These are defined by the safe-syscall.inc.S file */ +extern char safe_syscall_start[]; +extern char safe_syscall_end[]; + +/* Adjust the signal context to rewind out of safe-syscall if we're in it */ +static inline void rewind_if_in_safe_syscall(void *puc) +{ + struct ucontext *uc = puc; + unsigned long *pcreg = &uc->uc_mcontext.arm_pc; + + if (*pcreg > (uintptr_t)safe_syscall_start + && *pcreg < (uintptr_t)safe_syscall_end) { + *pcreg = (uintptr_t)safe_syscall_start; + } +} + +#endif /* __ASSEMBLER__ */ + +#endif diff --git a/linux-user/host/arm/safe-syscall.inc.S b/linux-user/host/arm/safe-syscall.inc.S new file mode 100644 index 0000000000..88c4958504 --- /dev/null +++ b/linux-user/host/arm/safe-syscall.inc.S @@ -0,0 +1,90 @@ +/* + * safe-syscall.inc.S : host-specific assembly fragment + * to handle signals occurring at the same time as system calls. + * This is intended to be included by linux-user/safe-syscall.S + * + * Written by Richard Henderson <rth@twiddle.net> + * Copyright (C) 2016 Red Hat, Inc. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + + .global safe_syscall_base + .global safe_syscall_start + .global safe_syscall_end + .type safe_syscall_base, %function + + .cfi_sections .debug_frame + + .text + .syntax unified + .arm + .align 2 + + /* This is the entry point for making a system call. The calling + * convention here is that of a C varargs function with the + * first argument an 'int *' to the signal_pending flag, the + * second one the system call number (as a 'long'), and all further + * arguments being syscall arguments (also 'long'). + * We return a long which is the syscall's return value, which + * may be negative-errno on failure. Conversion to the + * -1-and-errno-set convention is done by the calling wrapper. + */ +safe_syscall_base: + .fnstart + .cfi_startproc + mov r12, sp /* save entry stack */ + push { r4, r5, r6, r7, r8, lr } + .save { r4, r5, r6, r7, r8, lr } + .cfi_adjust_cfa_offset 24 + .cfi_rel_offset r4, 0 + .cfi_rel_offset r5, 4 + .cfi_rel_offset r6, 8 + .cfi_rel_offset r7, 12 + .cfi_rel_offset r8, 16 + .cfi_rel_offset lr, 20 + + /* The syscall calling convention isn't the same as the C one: + * we enter with r0 == *signal_pending + * r1 == syscall number + * r2, r3, [sp+0] ... [sp+12] == syscall arguments + * and return the result in r0 + * and the syscall instruction needs + * r7 == syscall number + * r0 ... r6 == syscall arguments + * and returns the result in r0 + * Shuffle everything around appropriately. + * Note the 16 bytes that we pushed to save registers. + */ + mov r8, r0 /* copy signal_pending */ + mov r7, r1 /* syscall number */ + mov r0, r2 /* syscall args */ + mov r1, r3 + ldm r12, { r2, r3, r4, r5, r6 } + + /* This next sequence of code works in conjunction with the + * rewind_if_safe_syscall_function(). If a signal is taken + * and the interrupted PC is anywhere between 'safe_syscall_start' + * and 'safe_syscall_end' then we rewind it to 'safe_syscall_start'. + * The code sequence must therefore be able to cope with this, and + * the syscall instruction must be the final one in the sequence. + */ +safe_syscall_start: + /* if signal_pending is non-zero, don't do the call */ + ldr r12, [r8] /* signal_pending */ + tst r12, r12 + bne 1f + swi 0 +safe_syscall_end: + /* code path for having successfully executed the syscall */ + pop { r4, r5, r6, r7, r8, pc } + +1: + /* code path when we didn't execute the syscall */ + ldr r0, =-TARGET_ERESTARTSYS + pop { r4, r5, r6, r7, r8, pc } + .fnend + .cfi_endproc + + .size safe_syscall_base, .-safe_syscall_base diff --git a/linux-user/host/i386/hostdep.h b/linux-user/host/i386/hostdep.h new file mode 100644 index 0000000000..5a12f4adce --- /dev/null +++ b/linux-user/host/i386/hostdep.h @@ -0,0 +1,38 @@ +/* + * hostdep.h : things which are dependent on the host architecture + * + * * Written by Peter Maydell <peter.maydell@linaro.org> + * + * Copyright (C) 2016 Linaro Limited + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef QEMU_HOSTDEP_H +#define QEMU_HOSTDEP_H + +/* We have a safe-syscall.inc.S */ +#define HAVE_SAFE_SYSCALL + +#ifndef __ASSEMBLER__ + +/* These are defined by the safe-syscall.inc.S file */ +extern char safe_syscall_start[]; +extern char safe_syscall_end[]; + +/* Adjust the signal context to rewind out of safe-syscall if we're in it */ +static inline void rewind_if_in_safe_syscall(void *puc) +{ + struct ucontext *uc = puc; + greg_t *pcreg = &uc->uc_mcontext.gregs[REG_EIP]; + + if (*pcreg > (uintptr_t)safe_syscall_start + && *pcreg < (uintptr_t)safe_syscall_end) { + *pcreg = (uintptr_t)safe_syscall_start; + } +} + +#endif /* __ASSEMBLER__ */ + +#endif diff --git a/linux-user/host/i386/safe-syscall.inc.S b/linux-user/host/i386/safe-syscall.inc.S new file mode 100644 index 0000000000..766d0ded98 --- /dev/null +++ b/linux-user/host/i386/safe-syscall.inc.S @@ -0,0 +1,112 @@ +/* + * safe-syscall.inc.S : host-specific assembly fragment + * to handle signals occurring at the same time as system calls. + * This is intended to be included by linux-user/safe-syscall.S + * + * Written by Richard Henderson <rth@twiddle.net> + * Copyright (C) 2016 Red Hat, Inc. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + + .global safe_syscall_base + .global safe_syscall_start + .global safe_syscall_end + .type safe_syscall_base, @function + + /* This is the entry point for making a system call. The calling + * convention here is that of a C varargs function with the + * first argument an 'int *' to the signal_pending flag, the + * second one the system call number (as a 'long'), and all further + * arguments being syscall arguments (also 'long'). + * We return a long which is the syscall's return value, which + * may be negative-errno on failure. Conversion to the + * -1-and-errno-set convention is done by the calling wrapper. + */ +safe_syscall_base: + .cfi_startproc + push %ebp + .cfi_adjust_cfa_offset 4 + .cfi_rel_offset ebp, 0 + push %esi + .cfi_adjust_cfa_offset 4 + .cfi_rel_offset esi, 0 + push %edi + .cfi_adjust_cfa_offset 4 + .cfi_rel_offset edi, 0 + push %ebx + .cfi_adjust_cfa_offset 4 + .cfi_rel_offset ebx, 0 + + /* The syscall calling convention isn't the same as the C one: + * we enter with 0(%esp) == return address + * 4(%esp) == *signal_pending + * 8(%esp) == syscall number + * 12(%esp) ... 32(%esp) == syscall arguments + * and return the result in eax + * and the syscall instruction needs + * eax == syscall number + * ebx, ecx, edx, esi, edi, ebp == syscall arguments + * and returns the result in eax + * Shuffle everything around appropriately. + * Note the 16 bytes that we pushed to save registers. + */ + mov 12+16(%esp), %ebx /* the syscall arguments */ + mov 16+16(%esp), %ecx + mov 20+16(%esp), %edx + mov 24+16(%esp), %esi + mov 28+16(%esp), %edi + mov 32+16(%esp), %ebp + + /* This next sequence of code works in conjunction with the + * rewind_if_safe_syscall_function(). If a signal is taken + * and the interrupted PC is anywhere between 'safe_syscall_start' + * and 'safe_syscall_end' then we rewind it to 'safe_syscall_start'. + * The code sequence must therefore be able to cope with this, and + * the syscall instruction must be the final one in the sequence. + */ +safe_syscall_start: + /* if signal_pending is non-zero, don't do the call */ + mov 4+16(%esp), %eax /* signal_pending */ + cmp $0, (%eax) + jnz 1f + mov 8+16(%esp), %eax /* syscall number */ + int $0x80 +safe_syscall_end: + /* code path for having successfully executed the syscall */ + pop %ebx + .cfi_remember_state + .cfi_def_cfa_offset -4 + .cfi_restore ebx + pop %edi + .cfi_def_cfa_offset -4 + .cfi_restore edi + pop %esi + .cfi_def_cfa_offset -4 + .cfi_restore esi + pop %ebp + .cfi_def_cfa_offset -4 + .cfi_restore ebp + ret + +1: + /* code path when we didn't execute the syscall */ + .cfi_restore_state + mov $-TARGET_ERESTARTSYS, %eax + pop %ebx + .cfi_def_cfa_offset -4 + .cfi_restore ebx + pop %edi + .cfi_def_cfa_offset -4 + .cfi_restore edi + pop %esi + .cfi_def_cfa_offset -4 + .cfi_restore esi + pop %ebp + .cfi_def_cfa_offset -4 + .cfi_restore ebp + ret + .cfi_endproc + + .size safe_syscall_base, .-safe_syscall_base diff --git a/linux-user/host/generic/hostdep.h b/linux-user/host/ia64/hostdep.h index cfabc3590b..7609bf5cd7 100644 --- a/linux-user/host/generic/hostdep.h +++ b/linux-user/host/ia64/hostdep.h @@ -1,6 +1,5 @@ /* - * hostdep.h : fallback generic version of header for things - * which are dependent on the host architecture + * hostdep.h : things which are dependent on the host architecture * * * Written by Peter Maydell <peter.maydell@linaro.org> * @@ -13,8 +12,4 @@ #ifndef QEMU_HOSTDEP_H #define QEMU_HOSTDEP_H -/* This is the fallback header which is only used if the host - * architecture doesn't provide one in linux-user/host/$ARCH. - */ - #endif diff --git a/linux-user/host/mips/hostdep.h b/linux-user/host/mips/hostdep.h new file mode 100644 index 0000000000..7609bf5cd7 --- /dev/null +++ b/linux-user/host/mips/hostdep.h @@ -0,0 +1,15 @@ +/* + * hostdep.h : things which are dependent on the host architecture + * + * * Written by Peter Maydell <peter.maydell@linaro.org> + * + * Copyright (C) 2016 Linaro Limited + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef QEMU_HOSTDEP_H +#define QEMU_HOSTDEP_H + +#endif diff --git a/linux-user/host/ppc/hostdep.h b/linux-user/host/ppc/hostdep.h new file mode 100644 index 0000000000..7609bf5cd7 --- /dev/null +++ b/linux-user/host/ppc/hostdep.h @@ -0,0 +1,15 @@ +/* + * hostdep.h : things which are dependent on the host architecture + * + * * Written by Peter Maydell <peter.maydell@linaro.org> + * + * Copyright (C) 2016 Linaro Limited + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef QEMU_HOSTDEP_H +#define QEMU_HOSTDEP_H + +#endif diff --git a/linux-user/host/ppc64/hostdep.h b/linux-user/host/ppc64/hostdep.h new file mode 100644 index 0000000000..310e7d1b73 --- /dev/null +++ b/linux-user/host/ppc64/hostdep.h @@ -0,0 +1,38 @@ +/* + * hostdep.h : things which are dependent on the host architecture + * + * * Written by Peter Maydell <peter.maydell@linaro.org> + * + * Copyright (C) 2016 Linaro Limited + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef QEMU_HOSTDEP_H +#define QEMU_HOSTDEP_H + +/* We have a safe-syscall.inc.S */ +#define HAVE_SAFE_SYSCALL + +#ifndef __ASSEMBLER__ + +/* These are defined by the safe-syscall.inc.S file */ +extern char safe_syscall_start[]; +extern char safe_syscall_end[]; + +/* Adjust the signal context to rewind out of safe-syscall if we're in it */ +static inline void rewind_if_in_safe_syscall(void *puc) +{ + struct ucontext *uc = puc; + unsigned long *pcreg = &uc->uc_mcontext.gp_regs[PT_NIP]; + + if (*pcreg > (uintptr_t)safe_syscall_start + && *pcreg < (uintptr_t)safe_syscall_end) { + *pcreg = (uintptr_t)safe_syscall_start; + } +} + +#endif /* __ASSEMBLER__ */ + +#endif diff --git a/linux-user/host/ppc64/safe-syscall.inc.S b/linux-user/host/ppc64/safe-syscall.inc.S new file mode 100644 index 0000000000..d30050a67c --- /dev/null +++ b/linux-user/host/ppc64/safe-syscall.inc.S @@ -0,0 +1,92 @@ +/* + * safe-syscall.inc.S : host-specific assembly fragment + * to handle signals occurring at the same time as system calls. + * This is intended to be included by linux-user/safe-syscall.S + * + * Written by Richard Henderson <rth@twiddle.net> + * Copyright (C) 2016 Red Hat, Inc. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + + .global safe_syscall_base + .global safe_syscall_start + .global safe_syscall_end + .type safe_syscall_base, @function + + .text + + /* This is the entry point for making a system call. The calling + * convention here is that of a C varargs function with the + * first argument an 'int *' to the signal_pending flag, the + * second one the system call number (as a 'long'), and all further + * arguments being syscall arguments (also 'long'). + * We return a long which is the syscall's return value, which + * may be negative-errno on failure. Conversion to the + * -1-and-errno-set convention is done by the calling wrapper. + */ +#if _CALL_ELF == 2 +safe_syscall_base: + .cfi_startproc + .localentry safe_syscall_base,0 +#else + .section ".opd","aw" + .align 3 +safe_syscall_base: + .quad .L.safe_syscall_base,.TOC.@tocbase,0 + .previous +.L.safe_syscall_base: + .cfi_startproc +#endif + /* We enter with r3 == *signal_pending + * r4 == syscall number + * r5 ... r10 == syscall arguments + * and return the result in r3 + * and the syscall instruction needs + * r0 == syscall number + * r3 ... r8 == syscall arguments + * and returns the result in r3 + * Shuffle everything around appropriately. + */ + mr 11, 3 /* signal_pending */ + mr 0, 4 /* syscall number */ + mr 3, 5 /* syscall arguments */ + mr 4, 6 + mr 5, 7 + mr 6, 8 + mr 7, 9 + mr 8, 10 + + /* This next sequence of code works in conjunction with the + * rewind_if_safe_syscall_function(). If a signal is taken + * and the interrupted PC is anywhere between 'safe_syscall_start' + * and 'safe_syscall_end' then we rewind it to 'safe_syscall_start'. + * The code sequence must therefore be able to cope with this, and + * the syscall instruction must be the final one in the sequence. + */ +safe_syscall_start: + /* if signal_pending is non-zero, don't do the call */ + lwz 12, 0(11) + cmpwi 0, 12, 0 + bne- 0f + sc +safe_syscall_end: + /* code path when we did execute the syscall */ + bnslr+ + + /* syscall failed; return negative errno */ + neg 3, 3 + blr + + /* code path when we didn't execute the syscall */ +0: addi 3, 0, -TARGET_ERESTARTSYS + blr + .cfi_endproc + +#if _CALL_ELF == 2 + .size safe_syscall_base, .-safe_syscall_base +#else + .size safe_syscall_base, .-.L.safe_syscall_base + .size .L.safe_syscall_base, .-.L.safe_syscall_base +#endif diff --git a/linux-user/host/s390/hostdep.h b/linux-user/host/s390/hostdep.h new file mode 100644 index 0000000000..7609bf5cd7 --- /dev/null +++ b/linux-user/host/s390/hostdep.h @@ -0,0 +1,15 @@ +/* + * hostdep.h : things which are dependent on the host architecture + * + * * Written by Peter Maydell <peter.maydell@linaro.org> + * + * Copyright (C) 2016 Linaro Limited + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef QEMU_HOSTDEP_H +#define QEMU_HOSTDEP_H + +#endif diff --git a/linux-user/host/s390x/hostdep.h b/linux-user/host/s390x/hostdep.h new file mode 100644 index 0000000000..e95871c46a --- /dev/null +++ b/linux-user/host/s390x/hostdep.h @@ -0,0 +1,38 @@ +/* + * hostdep.h : things which are dependent on the host architecture + * + * * Written by Peter Maydell <peter.maydell@linaro.org> + * + * Copyright (C) 2016 Linaro Limited + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef QEMU_HOSTDEP_H +#define QEMU_HOSTDEP_H + +/* We have a safe-syscall.inc.S */ +#define HAVE_SAFE_SYSCALL + +#ifndef __ASSEMBLER__ + +/* These are defined by the safe-syscall.inc.S file */ +extern char safe_syscall_start[]; +extern char safe_syscall_end[]; + +/* Adjust the signal context to rewind out of safe-syscall if we're in it */ +static inline void rewind_if_in_safe_syscall(void *puc) +{ + struct ucontext *uc = puc; + unsigned long *pcreg = &uc->uc_mcontext.psw.addr; + + if (*pcreg > (uintptr_t)safe_syscall_start + && *pcreg < (uintptr_t)safe_syscall_end) { + *pcreg = (uintptr_t)safe_syscall_start; + } +} + +#endif /* __ASSEMBLER__ */ + +#endif diff --git a/linux-user/host/s390x/safe-syscall.inc.S b/linux-user/host/s390x/safe-syscall.inc.S new file mode 100644 index 0000000000..f1b446abf6 --- /dev/null +++ b/linux-user/host/s390x/safe-syscall.inc.S @@ -0,0 +1,90 @@ +/* + * safe-syscall.inc.S : host-specific assembly fragment + * to handle signals occurring at the same time as system calls. + * This is intended to be included by linux-user/safe-syscall.S + * + * Written by Richard Henderson <rth@twiddle.net> + * Copyright (C) 2016 Red Hat, Inc. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + + .global safe_syscall_base + .global safe_syscall_start + .global safe_syscall_end + .type safe_syscall_base, @function + + /* This is the entry point for making a system call. The calling + * convention here is that of a C varargs function with the + * first argument an 'int *' to the signal_pending flag, the + * second one the system call number (as a 'long'), and all further + * arguments being syscall arguments (also 'long'). + * We return a long which is the syscall's return value, which + * may be negative-errno on failure. Conversion to the + * -1-and-errno-set convention is done by the calling wrapper. + */ +safe_syscall_base: + .cfi_startproc + stmg %r6,%r15,48(%r15) /* save all call-saved registers */ + .cfi_offset %r15,-40 + .cfi_offset %r14,-48 + .cfi_offset %r13,-56 + .cfi_offset %r12,-64 + .cfi_offset %r11,-72 + .cfi_offset %r10,-80 + .cfi_offset %r9,-88 + .cfi_offset %r8,-96 + .cfi_offset %r7,-104 + .cfi_offset %r6,-112 + lgr %r1,%r15 + lg %r0,8(%r15) /* load eos */ + aghi %r15,-160 + .cfi_adjust_cfa_offset 160 + stg %r1,0(%r15) /* store back chain */ + stg %r0,8(%r15) /* store eos */ + + /* The syscall calling convention isn't the same as the + * C one: + * we enter with r2 == *signal_pending + * r3 == syscall number + * r4, r5, r6, (stack) == syscall arguments + * and return the result in r2 + * and the syscall instruction needs + * r1 == syscall number + * r2 ... r7 == syscall arguments + * and returns the result in r2 + * Shuffle everything around appropriately. + */ + lgr %r8,%r2 /* signal_pending pointer */ + lgr %r1,%r3 /* syscall number */ + lgr %r2,%r4 /* syscall args */ + lgr %r3,%r5 + lgr %r4,%r6 + lmg %r5,%r7,320(%r15) + + /* This next sequence of code works in conjunction with the + * rewind_if_safe_syscall_function(). If a signal is taken + * and the interrupted PC is anywhere between 'safe_syscall_start' + * and 'safe_syscall_end' then we rewind it to 'safe_syscall_start'. + * The code sequence must therefore be able to cope with this, and + * the syscall instruction must be the final one in the sequence. + */ +safe_syscall_start: + /* if signal_pending is non-zero, don't do the call */ + lt %r0,0(%r8) + jne 2f + svc 0 +safe_syscall_end: + +1: lg %r15,0(%r15) /* load back chain */ + .cfi_remember_state + .cfi_adjust_cfa_offset -160 + lmg %r6,%r15,48(%r15) /* load saved registers */ + br %r14 + .cfi_restore_state +2: lghi %r2, -TARGET_ERESTARTSYS + j 1b + .cfi_endproc + + .size safe_syscall_base, .-safe_syscall_base diff --git a/linux-user/host/sparc/hostdep.h b/linux-user/host/sparc/hostdep.h new file mode 100644 index 0000000000..7609bf5cd7 --- /dev/null +++ b/linux-user/host/sparc/hostdep.h @@ -0,0 +1,15 @@ +/* + * hostdep.h : things which are dependent on the host architecture + * + * * Written by Peter Maydell <peter.maydell@linaro.org> + * + * Copyright (C) 2016 Linaro Limited + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef QEMU_HOSTDEP_H +#define QEMU_HOSTDEP_H + +#endif diff --git a/linux-user/host/sparc64/hostdep.h b/linux-user/host/sparc64/hostdep.h new file mode 100644 index 0000000000..7609bf5cd7 --- /dev/null +++ b/linux-user/host/sparc64/hostdep.h @@ -0,0 +1,15 @@ +/* + * hostdep.h : things which are dependent on the host architecture + * + * * Written by Peter Maydell <peter.maydell@linaro.org> + * + * Copyright (C) 2016 Linaro Limited + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef QEMU_HOSTDEP_H +#define QEMU_HOSTDEP_H + +#endif diff --git a/linux-user/host/x32/hostdep.h b/linux-user/host/x32/hostdep.h new file mode 100644 index 0000000000..7609bf5cd7 --- /dev/null +++ b/linux-user/host/x32/hostdep.h @@ -0,0 +1,15 @@ +/* + * hostdep.h : things which are dependent on the host architecture + * + * * Written by Peter Maydell <peter.maydell@linaro.org> + * + * Copyright (C) 2016 Linaro Limited + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef QEMU_HOSTDEP_H +#define QEMU_HOSTDEP_H + +#endif diff --git a/linux-user/host/x86_64/safe-syscall.inc.S b/linux-user/host/x86_64/safe-syscall.inc.S index e09368d450..f36992daa3 100644 --- a/linux-user/host/x86_64/safe-syscall.inc.S +++ b/linux-user/host/x86_64/safe-syscall.inc.S @@ -67,8 +67,8 @@ safe_syscall_base: */ safe_syscall_start: /* if signal_pending is non-zero, don't do the call */ - testl $1, (%rbp) - jnz return_ERESTARTSYS + cmpl $0, (%rbp) + jnz 1f syscall safe_syscall_end: /* code path for having successfully executed the syscall */ @@ -78,7 +78,7 @@ safe_syscall_end: .cfi_restore rbp ret -return_ERESTARTSYS: +1: /* code path when we didn't execute the syscall */ .cfi_restore_state mov $-TARGET_ERESTARTSYS, %rax diff --git a/linux-user/main.c b/linux-user/main.c index d9420cf7b5..617a179f14 100644 --- a/linux-user/main.c +++ b/linux-user/main.c @@ -4687,6 +4687,20 @@ int main(int argc, char **argv, char **envp) if (regs->cp0_epc & 1) { env->hflags |= MIPS_HFLAG_M16; } + if (((info->elf_flags & EF_MIPS_NAN2008) != 0) != + ((env->active_fpu.fcr31 & (1 << FCR31_NAN2008)) != 0)) { + if ((env->active_fpu.fcr31_rw_bitmask & + (1 << FCR31_NAN2008)) == 0) { + fprintf(stderr, "ELF binary's NaN mode not supported by CPU\n"); + exit(1); + } + if ((info->elf_flags & EF_MIPS_NAN2008) != 0) { + env->active_fpu.fcr31 |= (1 << FCR31_NAN2008); + } else { + env->active_fpu.fcr31 &= ~(1 << FCR31_NAN2008); + } + restore_snan_bit_mode(env); + } } #elif defined(TARGET_OPENRISC) { diff --git a/linux-user/qemu.h b/linux-user/qemu.h index 56f29c35b5..e8a5aede95 100644 --- a/linux-user/qemu.h +++ b/linux-user/qemu.h @@ -20,6 +20,11 @@ #define THREAD __thread +/* This is the size of the host kernel's sigset_t, needed where we make + * direct system calls that take a sigset_t pointer and a size. + */ +#define SIGSET_T_SIZE (_NSIG / 8) + /* This struct is used to hold certain information about the image. * Basically, it replicates in user space what would be certain * task_struct fields in the kernel diff --git a/linux-user/signal.c b/linux-user/signal.c index 1dadddf2dd..9d980456ec 100644 --- a/linux-user/signal.c +++ b/linux-user/signal.c @@ -278,6 +278,14 @@ static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo, tinfo->si_errno = 0; tinfo->si_code = info->si_code; + /* This memset serves two purposes: + * (1) ensure we don't leak random junk to the guest later + * (2) placate false positives from gcc about fields + * being used uninitialized if it chooses to inline both this + * function and tswap_siginfo() into host_to_target_siginfo(). + */ + memset(tinfo->_sifields._pad, 0, sizeof(tinfo->_sifields._pad)); + /* This is awkward, because we have to use a combination of * the si_code and si_signo to figure out which of the union's * members are valid. (Within the host kernel it is always possible @@ -397,8 +405,9 @@ static void tswap_siginfo(target_siginfo_t *tinfo, void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info) { - host_to_target_siginfo_noswap(tinfo, info); - tswap_siginfo(tinfo, tinfo); + target_siginfo_t tgt_tmp; + host_to_target_siginfo_noswap(&tgt_tmp, info); + tswap_siginfo(tinfo, &tgt_tmp); } /* XXX: we support only POSIX RT signals are used. */ @@ -627,8 +636,16 @@ static void host_signal_handler(int host_signum, siginfo_t *info, * code in case the guest code provokes one in the window between * now and it getting out to the main loop. Signals will be * unblocked again in process_pending_signals(). + * + * WARNING: we cannot use sigfillset() here because the uc_sigmask + * field is a kernel sigset_t, which is much smaller than the + * libc sigset_t which sigfillset() operates on. Using sigfillset() + * would write 0xff bytes off the end of the structure and trash + * data on the struct. + * We can't use sizeof(uc->uc_sigmask) either, because the libc + * headers define the struct field with the wrong (too large) type. */ - sigfillset(&uc->uc_sigmask); + memset(&uc->uc_sigmask, 0xff, SIGSET_T_SIZE); sigdelset(&uc->uc_sigmask, SIGSEGV); sigdelset(&uc->uc_sigmask, SIGBUS); diff --git a/linux-user/strace.c b/linux-user/strace.c index 4046b81705..cc10dc4703 100644 --- a/linux-user/strace.c +++ b/linux-user/strace.c @@ -5,6 +5,9 @@ #include <sys/shm.h> #include <sys/select.h> #include <sys/mount.h> +#include <arpa/inet.h> +#include <netinet/tcp.h> +#include <linux/if_packet.h> #include <sched.h> #include "qemu.h" @@ -57,10 +60,15 @@ UNUSED static void print_open_flags(abi_long, int); UNUSED static void print_syscall_prologue(const struct syscallname *); UNUSED static void print_syscall_epilogue(const struct syscallname *); UNUSED static void print_string(abi_long, int); +UNUSED static void print_buf(abi_long addr, abi_long len, int last); UNUSED static void print_raw_param(const char *, abi_long, int); UNUSED static void print_timeval(abi_ulong, int); UNUSED static void print_number(abi_long, int); UNUSED static void print_signal(abi_ulong, int); +UNUSED static void print_sockaddr(abi_ulong addr, abi_long addrlen); +UNUSED static void print_socket_domain(int domain); +UNUSED static void print_socket_type(int type); +UNUSED static void print_socket_protocol(int domain, int type, int protocol); /* * Utility functions @@ -146,6 +154,165 @@ print_signal(abi_ulong arg, int last) gemu_log("%s%s", signal_name, get_comma(last)); } +static void +print_sockaddr(abi_ulong addr, abi_long addrlen) +{ + struct target_sockaddr *sa; + int i; + int sa_family; + + sa = lock_user(VERIFY_READ, addr, addrlen, 1); + if (sa) { + sa_family = tswap16(sa->sa_family); + switch (sa_family) { + case AF_UNIX: { + struct target_sockaddr_un *un = (struct target_sockaddr_un *)sa; + int i; + gemu_log("{sun_family=AF_UNIX,sun_path=\""); + for (i = 0; i < addrlen - + offsetof(struct target_sockaddr_un, sun_path) && + un->sun_path[i]; i++) { + gemu_log("%c", un->sun_path[i]); + } + gemu_log("\"}"); + break; + } + case AF_INET: { + struct target_sockaddr_in *in = (struct target_sockaddr_in *)sa; + uint8_t *c = (uint8_t *)&in->sin_addr.s_addr; + gemu_log("{sin_family=AF_INET,sin_port=htons(%d),", + ntohs(in->sin_port)); + gemu_log("sin_addr=inet_addr(\"%d.%d.%d.%d\")", + c[0], c[1], c[2], c[3]); + gemu_log("}"); + break; + } + case AF_PACKET: { + struct target_sockaddr_ll *ll = (struct target_sockaddr_ll *)sa; + uint8_t *c = (uint8_t *)&ll->sll_addr; + gemu_log("{sll_family=AF_PACKET," + "sll_protocol=htons(0x%04x),if%d,pkttype=", + ntohs(ll->sll_protocol), ll->sll_ifindex); + switch (ll->sll_pkttype) { + case PACKET_HOST: + gemu_log("PACKET_HOST"); + break; + case PACKET_BROADCAST: + gemu_log("PACKET_BROADCAST"); + break; + case PACKET_MULTICAST: + gemu_log("PACKET_MULTICAST"); + break; + case PACKET_OTHERHOST: + gemu_log("PACKET_OTHERHOST"); + break; + case PACKET_OUTGOING: + gemu_log("PACKET_OUTGOING"); + break; + default: + gemu_log("%d", ll->sll_pkttype); + break; + } + gemu_log(",sll_addr=%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x", + c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7]); + gemu_log("}"); + break; + } + default: + gemu_log("{sa_family=%d, sa_data={", sa->sa_family); + for (i = 0; i < 13; i++) { + gemu_log("%02x, ", sa->sa_data[i]); + } + gemu_log("%02x}", sa->sa_data[i]); + gemu_log("}"); + break; + } + unlock_user(sa, addr, 0); + } else { + print_raw_param("0x"TARGET_ABI_FMT_lx, addr, 0); + } + gemu_log(", "TARGET_ABI_FMT_ld, addrlen); +} + +static void +print_socket_domain(int domain) +{ + switch (domain) { + case PF_UNIX: + gemu_log("PF_UNIX"); + break; + case PF_INET: + gemu_log("PF_INET"); + break; + case PF_PACKET: + gemu_log("PF_PACKET"); + break; + default: + gemu_log("%d", domain); + break; + } +} + +static void +print_socket_type(int type) +{ + switch (type) { + case TARGET_SOCK_DGRAM: + gemu_log("SOCK_DGRAM"); + break; + case TARGET_SOCK_STREAM: + gemu_log("SOCK_STREAM"); + break; + case TARGET_SOCK_RAW: + gemu_log("SOCK_RAW"); + break; + case TARGET_SOCK_RDM: + gemu_log("SOCK_RDM"); + break; + case TARGET_SOCK_SEQPACKET: + gemu_log("SOCK_SEQPACKET"); + break; + case TARGET_SOCK_PACKET: + gemu_log("SOCK_PACKET"); + break; + } +} + +static void +print_socket_protocol(int domain, int type, int protocol) +{ + if (domain == AF_PACKET || + (domain == AF_INET && type == TARGET_SOCK_PACKET)) { + switch (protocol) { + case 0x0003: + gemu_log("ETH_P_ALL"); + break; + default: + gemu_log("%d", protocol); + } + return; + } + + switch (protocol) { + case IPPROTO_IP: + gemu_log("IPPROTO_IP"); + break; + case IPPROTO_TCP: + gemu_log("IPPROTO_TCP"); + break; + case IPPROTO_UDP: + gemu_log("IPPROTO_UDP"); + break; + case IPPROTO_RAW: + gemu_log("IPPROTO_RAW"); + break; + default: + gemu_log("%d", protocol); + break; + } +} + + #ifdef TARGET_NR__newselect static void print_fdset(int n, abi_ulong target_fds_addr) @@ -497,6 +664,26 @@ UNUSED static struct flags clone_flags[] = { FLAG_END, }; +UNUSED static struct flags msg_flags[] = { + /* send */ + FLAG_GENERIC(MSG_CONFIRM), + FLAG_GENERIC(MSG_DONTROUTE), + FLAG_GENERIC(MSG_DONTWAIT), + FLAG_GENERIC(MSG_EOR), + FLAG_GENERIC(MSG_MORE), + FLAG_GENERIC(MSG_NOSIGNAL), + FLAG_GENERIC(MSG_OOB), + /* recv */ + FLAG_GENERIC(MSG_CMSG_CLOEXEC), + FLAG_GENERIC(MSG_ERRQUEUE), + FLAG_GENERIC(MSG_PEEK), + FLAG_GENERIC(MSG_TRUNC), + FLAG_GENERIC(MSG_WAITALL), + /* recvmsg */ + FLAG_GENERIC(MSG_CTRUNC), + FLAG_END, +}; + /* * print_xxx utility functions. These are used to print syscall * parameters in certain format. All of these have parameter @@ -618,6 +805,36 @@ print_string(abi_long addr, int last) } } +#define MAX_PRINT_BUF 40 +static void +print_buf(abi_long addr, abi_long len, int last) +{ + uint8_t *s; + int i; + + s = lock_user(VERIFY_READ, addr, len, 1); + if (s) { + gemu_log("\""); + for (i = 0; i < MAX_PRINT_BUF && i < len; i++) { + if (isprint(s[i])) { + gemu_log("%c", s[i]); + } else { + gemu_log("\\%o", s[i]); + } + } + gemu_log("\""); + if (i != len) { + gemu_log("..."); + } + if (!last) { + gemu_log(","); + } + unlock_user(s, addr, 0); + } else { + print_pointer(addr, last); + } +} + /* * Prints out raw parameter using given format. Caller needs * to do byte swapping if needed. @@ -740,33 +957,31 @@ print_chmod(const struct syscallname *name, #endif #ifdef TARGET_NR_clone +static void do_print_clone(unsigned int flags, abi_ulong newsp, + abi_ulong parent_tidptr, target_ulong newtls, + abi_ulong child_tidptr) +{ + print_flags(clone_flags, flags, 0); + print_raw_param("child_stack=0x" TARGET_ABI_FMT_lx, newsp, 0); + print_raw_param("parent_tidptr=0x" TARGET_ABI_FMT_lx, parent_tidptr, 0); + print_raw_param("tls=0x" TARGET_ABI_FMT_lx, newtls, 0); + print_raw_param("child_tidptr=0x" TARGET_ABI_FMT_lx, child_tidptr, 1); +} + static void print_clone(const struct syscallname *name, - abi_long arg0, abi_long arg1, abi_long arg2, - abi_long arg3, abi_long arg4, abi_long arg5) + abi_long arg1, abi_long arg2, abi_long arg3, + abi_long arg4, abi_long arg5, abi_long arg6) { print_syscall_prologue(name); -#if defined(TARGET_M68K) - print_flags(clone_flags, arg0, 0); - print_raw_param("newsp=0x" TARGET_ABI_FMT_lx, arg1, 1); -#elif defined(TARGET_SH4) || defined(TARGET_ALPHA) - print_flags(clone_flags, arg0, 0); - print_raw_param("child_stack=0x" TARGET_ABI_FMT_lx, arg1, 0); - print_raw_param("parent_tidptr=0x" TARGET_ABI_FMT_lx, arg2, 0); - print_raw_param("child_tidptr=0x" TARGET_ABI_FMT_lx, arg3, 0); - print_raw_param("tls=0x" TARGET_ABI_FMT_lx, arg4, 1); -#elif defined(TARGET_CRIS) - print_raw_param("child_stack=0x" TARGET_ABI_FMT_lx, arg0, 0); - print_flags(clone_flags, arg1, 0); - print_raw_param("parent_tidptr=0x" TARGET_ABI_FMT_lx, arg2, 0); - print_raw_param("tls=0x" TARGET_ABI_FMT_lx, arg3, 0); - print_raw_param("child_tidptr=0x" TARGET_ABI_FMT_lx, arg4, 1); +#if defined(TARGET_MICROBLAZE) + do_print_clone(arg1, arg2, arg4, arg6, arg5); +#elif defined(TARGET_CLONE_BACKWARDS) + do_print_clone(arg1, arg2, arg3, arg4, arg5); +#elif defined(TARGET_CLONE_BACKWARDS2) + do_print_clone(arg2, arg1, arg3, arg5, arg4); #else - print_flags(clone_flags, arg0, 0); - print_raw_param("child_stack=0x" TARGET_ABI_FMT_lx, arg1, 0); - print_raw_param("parent_tidptr=0x" TARGET_ABI_FMT_lx, arg2, 0); - print_raw_param("tls=0x" TARGET_ABI_FMT_lx, arg3, 0); - print_raw_param("child_tidptr=0x" TARGET_ABI_FMT_lx, arg4, 1); + do_print_clone(arg1, arg2, arg3, arg5, arg4); #endif print_syscall_epilogue(name); } @@ -918,6 +1133,13 @@ print_fcntl(const struct syscallname *name, case TARGET_F_GETLEASE: gemu_log("F_GETLEASE"); break; + case TARGET_F_SETPIPE_SZ: + gemu_log("F_SETPIPE_SZ,"); + print_raw_param(TARGET_ABI_FMT_ld, arg2, 1); + break; + case TARGET_F_GETPIPE_SZ: + gemu_log("F_GETPIPE_SZ"); + break; case TARGET_F_DUPFD_CLOEXEC: gemu_log("F_DUPFD_CLOEXEC,"); print_raw_param(TARGET_ABI_FMT_ld, arg2, 1); @@ -1003,6 +1225,361 @@ print__llseek(const struct syscallname *name, } #endif +#if defined(TARGET_NR_socket) +static void +print_socket(const struct syscallname *name, + abi_long arg0, abi_long arg1, abi_long arg2, + abi_long arg3, abi_long arg4, abi_long arg5) +{ + abi_ulong domain = arg0, type = arg1, protocol = arg2; + + print_syscall_prologue(name); + print_socket_domain(domain); + gemu_log(","); + print_socket_type(type); + gemu_log(","); + if (domain == AF_PACKET || + (domain == AF_INET && type == TARGET_SOCK_PACKET)) { + protocol = tswap16(protocol); + } + print_socket_protocol(domain, type, protocol); + print_syscall_epilogue(name); +} + +#endif + +#if defined(TARGET_NR_socketcall) + +#define get_user_ualx(x, gaddr, idx) \ + get_user_ual(x, (gaddr) + (idx) * sizeof(abi_long)) + +static void do_print_socket(const char *name, abi_long arg1) +{ + abi_ulong domain, type, protocol; + + get_user_ualx(domain, arg1, 0); + get_user_ualx(type, arg1, 1); + get_user_ualx(protocol, arg1, 2); + gemu_log("%s(", name); + print_socket_domain(domain); + gemu_log(","); + print_socket_type(type); + gemu_log(","); + if (domain == AF_PACKET || + (domain == AF_INET && type == TARGET_SOCK_PACKET)) { + protocol = tswap16(protocol); + } + print_socket_protocol(domain, type, protocol); + gemu_log(")"); +} + +static void do_print_sockaddr(const char *name, abi_long arg1) +{ + abi_ulong sockfd, addr, addrlen; + + get_user_ualx(sockfd, arg1, 0); + get_user_ualx(addr, arg1, 1); + get_user_ualx(addrlen, arg1, 2); + + gemu_log("%s(", name); + print_raw_param(TARGET_ABI_FMT_ld, sockfd, 0); + print_sockaddr(addr, addrlen); + gemu_log(")"); +} + +static void do_print_listen(const char *name, abi_long arg1) +{ + abi_ulong sockfd, backlog; + + get_user_ualx(sockfd, arg1, 0); + get_user_ualx(backlog, arg1, 1); + + gemu_log("%s(", name); + print_raw_param(TARGET_ABI_FMT_ld, sockfd, 0); + print_raw_param(TARGET_ABI_FMT_ld, backlog, 1); + gemu_log(")"); +} + +static void do_print_socketpair(const char *name, abi_long arg1) +{ + abi_ulong domain, type, protocol, tab; + + get_user_ualx(domain, arg1, 0); + get_user_ualx(type, arg1, 1); + get_user_ualx(protocol, arg1, 2); + get_user_ualx(tab, arg1, 3); + + gemu_log("%s(", name); + print_socket_domain(domain); + gemu_log(","); + print_socket_type(type); + gemu_log(","); + print_socket_protocol(domain, type, protocol); + gemu_log(","); + print_raw_param(TARGET_ABI_FMT_lx, tab, 1); + gemu_log(")"); +} + +static void do_print_sendrecv(const char *name, abi_long arg1) +{ + abi_ulong sockfd, msg, len, flags; + + get_user_ualx(sockfd, arg1, 0); + get_user_ualx(msg, arg1, 1); + get_user_ualx(len, arg1, 2); + get_user_ualx(flags, arg1, 3); + + gemu_log("%s(", name); + print_raw_param(TARGET_ABI_FMT_ld, sockfd, 0); + print_buf(msg, len, 0); + print_raw_param(TARGET_ABI_FMT_ld, len, 0); + print_flags(msg_flags, flags, 1); + gemu_log(")"); +} + +static void do_print_msgaddr(const char *name, abi_long arg1) +{ + abi_ulong sockfd, msg, len, flags, addr, addrlen; + + get_user_ualx(sockfd, arg1, 0); + get_user_ualx(msg, arg1, 1); + get_user_ualx(len, arg1, 2); + get_user_ualx(flags, arg1, 3); + get_user_ualx(addr, arg1, 4); + get_user_ualx(addrlen, arg1, 5); + + gemu_log("%s(", name); + print_raw_param(TARGET_ABI_FMT_ld, sockfd, 0); + print_buf(msg, len, 0); + print_raw_param(TARGET_ABI_FMT_ld, len, 0); + print_flags(msg_flags, flags, 0); + print_sockaddr(addr, addrlen); + gemu_log(")"); +} + +static void do_print_shutdown(const char *name, abi_long arg1) +{ + abi_ulong sockfd, how; + + get_user_ualx(sockfd, arg1, 0); + get_user_ualx(how, arg1, 1); + + gemu_log("shutdown("); + print_raw_param(TARGET_ABI_FMT_ld, sockfd, 0); + switch (how) { + case SHUT_RD: + gemu_log("SHUT_RD"); + break; + case SHUT_WR: + gemu_log("SHUT_WR"); + break; + case SHUT_RDWR: + gemu_log("SHUT_RDWR"); + break; + default: + print_raw_param(TARGET_ABI_FMT_ld, how, 1); + break; + } + gemu_log(")"); +} + +static void do_print_msg(const char *name, abi_long arg1) +{ + abi_ulong sockfd, msg, flags; + + get_user_ualx(sockfd, arg1, 0); + get_user_ualx(msg, arg1, 1); + get_user_ualx(flags, arg1, 2); + + gemu_log("%s(", name); + print_raw_param(TARGET_ABI_FMT_ld, sockfd, 0); + print_pointer(msg, 0); + print_flags(msg_flags, flags, 1); + gemu_log(")"); +} + +static void do_print_sockopt(const char *name, abi_long arg1) +{ + abi_ulong sockfd, level, optname, optval, optlen; + + get_user_ualx(sockfd, arg1, 0); + get_user_ualx(level, arg1, 1); + get_user_ualx(optname, arg1, 2); + get_user_ualx(optval, arg1, 3); + get_user_ualx(optlen, arg1, 4); + + gemu_log("%s(", name); + print_raw_param(TARGET_ABI_FMT_ld, sockfd, 0); + switch (level) { + case SOL_TCP: + gemu_log("SOL_TCP,"); + print_raw_param(TARGET_ABI_FMT_ld, optname, 0); + print_pointer(optval, 0); + break; + case SOL_IP: + gemu_log("SOL_IP,"); + print_raw_param(TARGET_ABI_FMT_ld, optname, 0); + print_pointer(optval, 0); + break; + case SOL_RAW: + gemu_log("SOL_RAW,"); + print_raw_param(TARGET_ABI_FMT_ld, optname, 0); + print_pointer(optval, 0); + break; + case TARGET_SOL_SOCKET: + gemu_log("SOL_SOCKET,"); + switch (optname) { + case TARGET_SO_DEBUG: + gemu_log("SO_DEBUG,"); +print_optint: + print_number(optval, 0); + break; + case TARGET_SO_REUSEADDR: + gemu_log("SO_REUSEADDR,"); + goto print_optint; + case TARGET_SO_TYPE: + gemu_log("SO_TYPE,"); + goto print_optint; + case TARGET_SO_ERROR: + gemu_log("SO_ERROR,"); + goto print_optint; + case TARGET_SO_DONTROUTE: + gemu_log("SO_DONTROUTE,"); + goto print_optint; + case TARGET_SO_BROADCAST: + gemu_log("SO_BROADCAST,"); + goto print_optint; + case TARGET_SO_SNDBUF: + gemu_log("SO_SNDBUF,"); + goto print_optint; + case TARGET_SO_RCVBUF: + gemu_log("SO_RCVBUF,"); + goto print_optint; + case TARGET_SO_KEEPALIVE: + gemu_log("SO_KEEPALIVE,"); + goto print_optint; + case TARGET_SO_OOBINLINE: + gemu_log("SO_OOBINLINE,"); + goto print_optint; + case TARGET_SO_NO_CHECK: + gemu_log("SO_NO_CHECK,"); + goto print_optint; + case TARGET_SO_PRIORITY: + gemu_log("SO_PRIORITY,"); + goto print_optint; + case TARGET_SO_BSDCOMPAT: + gemu_log("SO_BSDCOMPAT,"); + goto print_optint; + case TARGET_SO_PASSCRED: + gemu_log("SO_PASSCRED,"); + goto print_optint; + case TARGET_SO_TIMESTAMP: + gemu_log("SO_TIMESTAMP,"); + goto print_optint; + case TARGET_SO_RCVLOWAT: + gemu_log("SO_RCVLOWAT,"); + goto print_optint; + case TARGET_SO_RCVTIMEO: + gemu_log("SO_RCVTIMEO,"); + print_timeval(optval, 0); + break; + case TARGET_SO_SNDTIMEO: + gemu_log("SO_SNDTIMEO,"); + print_timeval(optval, 0); + break; + case TARGET_SO_ATTACH_FILTER: { + struct target_sock_fprog *fprog; + + gemu_log("SO_ATTACH_FILTER,"); + + if (lock_user_struct(VERIFY_READ, fprog, optval, 0)) { + struct target_sock_filter *filter; + gemu_log("{"); + if (lock_user_struct(VERIFY_READ, filter, + tswapal(fprog->filter), 0)) { + int i; + for (i = 0; i < tswap16(fprog->len) - 1; i++) { + gemu_log("[%d]{0x%x,%d,%d,0x%x},", + i, tswap16(filter[i].code), + filter[i].jt, filter[i].jf, + tswap32(filter[i].k)); + } + gemu_log("[%d]{0x%x,%d,%d,0x%x}", + i, tswap16(filter[i].code), + filter[i].jt, filter[i].jf, + tswap32(filter[i].k)); + } else { + gemu_log(TARGET_ABI_FMT_lx, tswapal(fprog->filter)); + } + gemu_log(",%d},", tswap16(fprog->len)); + unlock_user(fprog, optval, 0); + } else { + print_pointer(optval, 0); + } + break; + } + default: + print_raw_param(TARGET_ABI_FMT_ld, optname, 0); + print_pointer(optval, 0); + break; + } + break; + default: + print_raw_param(TARGET_ABI_FMT_ld, level, 0); + print_raw_param(TARGET_ABI_FMT_ld, optname, 0); + print_pointer(optval, 0); + break; + } + print_raw_param(TARGET_ABI_FMT_ld, optlen, 1); + gemu_log(")"); +} + +#define PRINT_SOCKOP(name, func) \ + [SOCKOP_##name] = { #name, func } + +static struct { + const char *name; + void (*print)(const char *, abi_long); +} scall[] = { + PRINT_SOCKOP(socket, do_print_socket), + PRINT_SOCKOP(bind, do_print_sockaddr), + PRINT_SOCKOP(connect, do_print_sockaddr), + PRINT_SOCKOP(listen, do_print_listen), + PRINT_SOCKOP(accept, do_print_sockaddr), + PRINT_SOCKOP(getsockname, do_print_sockaddr), + PRINT_SOCKOP(getpeername, do_print_sockaddr), + PRINT_SOCKOP(socketpair, do_print_socketpair), + PRINT_SOCKOP(send, do_print_sendrecv), + PRINT_SOCKOP(recv, do_print_sendrecv), + PRINT_SOCKOP(sendto, do_print_msgaddr), + PRINT_SOCKOP(recvfrom, do_print_msgaddr), + PRINT_SOCKOP(shutdown, do_print_shutdown), + PRINT_SOCKOP(sendmsg, do_print_msg), + PRINT_SOCKOP(recvmsg, do_print_msg), + PRINT_SOCKOP(setsockopt, do_print_sockopt), + PRINT_SOCKOP(getsockopt, do_print_sockopt), +}; + +static void +print_socketcall(const struct syscallname *name, + abi_long arg0, abi_long arg1, abi_long arg2, + abi_long arg3, abi_long arg4, abi_long arg5) +{ + if (arg0 >= 0 && arg0 < ARRAY_SIZE(scall) && scall[arg0].print) { + scall[arg0].print(scall[arg0].name, arg1); + return; + } + print_syscall_prologue(name); + print_raw_param(TARGET_ABI_FMT_ld, arg0, 0); + print_raw_param(TARGET_ABI_FMT_ld, arg1, 0); + print_raw_param(TARGET_ABI_FMT_ld, arg2, 0); + print_raw_param(TARGET_ABI_FMT_ld, arg3, 0); + print_raw_param(TARGET_ABI_FMT_ld, arg4, 0); + print_raw_param(TARGET_ABI_FMT_ld, arg5, 0); + print_syscall_epilogue(name); +} +#endif + #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) || \ defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) static void diff --git a/linux-user/strace.list b/linux-user/strace.list index aa0cd735cc..aa967a2475 100644 --- a/linux-user/strace.list +++ b/linux-user/strace.list @@ -337,7 +337,8 @@ { TARGET_NR_getsockopt, "getsockopt" , NULL, NULL, NULL }, #endif #ifdef TARGET_NR_get_thread_area -{ TARGET_NR_get_thread_area, "get_thread_area" , NULL, NULL, NULL }, +{ TARGET_NR_get_thread_area, "get_thread_area", "%s(0x"TARGET_ABI_FMT_lx")", + NULL, NULL }, #endif #ifdef TARGET_NR_gettid { TARGET_NR_gettid, "gettid" , NULL, NULL, NULL }, @@ -1234,7 +1235,8 @@ { TARGET_NR_setsockopt, "setsockopt" , NULL, NULL, NULL }, #endif #ifdef TARGET_NR_set_thread_area -{ TARGET_NR_set_thread_area, "set_thread_area" , NULL, NULL, NULL }, +{ TARGET_NR_set_thread_area, "set_thread_area", "%s(0x"TARGET_ABI_FMT_lx")", + NULL, NULL }, #endif #ifdef TARGET_NR_set_tid_address { TARGET_NR_set_tid_address, "set_tid_address" , NULL, NULL, NULL }, @@ -1291,10 +1293,10 @@ { TARGET_NR_sigsuspend, "sigsuspend" , NULL, NULL, NULL }, #endif #ifdef TARGET_NR_socket -{ TARGET_NR_socket, "socket" , NULL, NULL, NULL }, +{ TARGET_NR_socket, "socket" , NULL, print_socket, NULL }, #endif #ifdef TARGET_NR_socketcall -{ TARGET_NR_socketcall, "socketcall" , NULL, NULL, NULL }, +{ TARGET_NR_socketcall, "socketcall" , NULL, print_socketcall, NULL }, #endif #ifdef TARGET_NR_socketpair { TARGET_NR_socketpair, "socketpair" , NULL, NULL, NULL }, diff --git a/linux-user/syscall.c b/linux-user/syscall.c index 1c17b741c2..28ee45a937 100644 --- a/linux-user/syscall.c +++ b/linux-user/syscall.c @@ -123,11 +123,6 @@ int __clone2(int (*fn)(void *), void *child_stack_base, #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2]) #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2]) -/* This is the size of the host kernel's sigset_t, needed where we make - * direct system calls that take a sigset_t pointer and a size. - */ -#define SIGSET_T_SIZE (_NSIG / 8) - #undef _syscall0 #undef _syscall1 #undef _syscall2 @@ -783,6 +778,16 @@ safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr, * the libc function. */ #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__) +/* Similarly for fcntl. Note that callers must always: + * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK + * use the flock64 struct rather than unsuffixed flock + * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts. + */ +#ifdef __NR_fcntl64 +#define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__) +#else +#define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__) +#endif static inline int host_to_target_sock_type(int host_type) { @@ -1687,6 +1692,7 @@ static abi_long target_to_host_for_each_nlmsg(struct nlmsghdr *nlh, struct nlmsgerr *e = NLMSG_DATA(nlh); e->error = tswap32(e->error); tswap_nlmsghdr(&e->msg); + return 0; } default: ret = target_to_host_nlmsg(nlh); @@ -1942,29 +1948,35 @@ static abi_long host_to_target_data_route(struct nlmsghdr *nlh) case RTM_NEWLINK: case RTM_DELLINK: case RTM_GETLINK: - ifi = NLMSG_DATA(nlh); - ifi->ifi_type = tswap16(ifi->ifi_type); - ifi->ifi_index = tswap32(ifi->ifi_index); - ifi->ifi_flags = tswap32(ifi->ifi_flags); - ifi->ifi_change = tswap32(ifi->ifi_change); - host_to_target_link_rtattr(IFLA_RTA(ifi), - nlmsg_len - NLMSG_LENGTH(sizeof(*ifi))); + if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) { + ifi = NLMSG_DATA(nlh); + ifi->ifi_type = tswap16(ifi->ifi_type); + ifi->ifi_index = tswap32(ifi->ifi_index); + ifi->ifi_flags = tswap32(ifi->ifi_flags); + ifi->ifi_change = tswap32(ifi->ifi_change); + host_to_target_link_rtattr(IFLA_RTA(ifi), + nlmsg_len - NLMSG_LENGTH(sizeof(*ifi))); + } break; case RTM_NEWADDR: case RTM_DELADDR: case RTM_GETADDR: - ifa = NLMSG_DATA(nlh); - ifa->ifa_index = tswap32(ifa->ifa_index); - host_to_target_addr_rtattr(IFA_RTA(ifa), - nlmsg_len - NLMSG_LENGTH(sizeof(*ifa))); + if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) { + ifa = NLMSG_DATA(nlh); + ifa->ifa_index = tswap32(ifa->ifa_index); + host_to_target_addr_rtattr(IFA_RTA(ifa), + nlmsg_len - NLMSG_LENGTH(sizeof(*ifa))); + } break; case RTM_NEWROUTE: case RTM_DELROUTE: case RTM_GETROUTE: - rtm = NLMSG_DATA(nlh); - rtm->rtm_flags = tswap32(rtm->rtm_flags); - host_to_target_route_rtattr(RTM_RTA(rtm), - nlmsg_len - NLMSG_LENGTH(sizeof(*rtm))); + if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) { + rtm = NLMSG_DATA(nlh); + rtm->rtm_flags = tswap32(rtm->rtm_flags); + host_to_target_route_rtattr(RTM_RTA(rtm), + nlmsg_len - NLMSG_LENGTH(sizeof(*rtm))); + } break; default: return -TARGET_EINVAL; @@ -2080,30 +2092,36 @@ static abi_long target_to_host_data_route(struct nlmsghdr *nlh) break; case RTM_NEWLINK: case RTM_DELLINK: - ifi = NLMSG_DATA(nlh); - ifi->ifi_type = tswap16(ifi->ifi_type); - ifi->ifi_index = tswap32(ifi->ifi_index); - ifi->ifi_flags = tswap32(ifi->ifi_flags); - ifi->ifi_change = tswap32(ifi->ifi_change); - target_to_host_link_rtattr(IFLA_RTA(ifi), nlh->nlmsg_len - - NLMSG_LENGTH(sizeof(*ifi))); + if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) { + ifi = NLMSG_DATA(nlh); + ifi->ifi_type = tswap16(ifi->ifi_type); + ifi->ifi_index = tswap32(ifi->ifi_index); + ifi->ifi_flags = tswap32(ifi->ifi_flags); + ifi->ifi_change = tswap32(ifi->ifi_change); + target_to_host_link_rtattr(IFLA_RTA(ifi), nlh->nlmsg_len - + NLMSG_LENGTH(sizeof(*ifi))); + } break; case RTM_GETADDR: case RTM_NEWADDR: case RTM_DELADDR: - ifa = NLMSG_DATA(nlh); - ifa->ifa_index = tswap32(ifa->ifa_index); - target_to_host_addr_rtattr(IFA_RTA(ifa), nlh->nlmsg_len - - NLMSG_LENGTH(sizeof(*ifa))); + if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) { + ifa = NLMSG_DATA(nlh); + ifa->ifa_index = tswap32(ifa->ifa_index); + target_to_host_addr_rtattr(IFA_RTA(ifa), nlh->nlmsg_len - + NLMSG_LENGTH(sizeof(*ifa))); + } break; case RTM_GETROUTE: break; case RTM_NEWROUTE: case RTM_DELROUTE: - rtm = NLMSG_DATA(nlh); - rtm->rtm_flags = tswap32(rtm->rtm_flags); - target_to_host_route_rtattr(RTM_RTA(rtm), nlh->nlmsg_len - - NLMSG_LENGTH(sizeof(*rtm))); + if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) { + rtm = NLMSG_DATA(nlh); + rtm->rtm_flags = tswap32(rtm->rtm_flags); + target_to_host_route_rtattr(RTM_RTA(rtm), nlh->nlmsg_len - + NLMSG_LENGTH(sizeof(*rtm))); + } break; default: return -TARGET_EOPNOTSUPP; @@ -2985,7 +3003,7 @@ static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp, len = ret; if (fd_trans_host_to_target_data(fd)) { ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base, - msg.msg_iov->iov_len); + len); } else { ret = host_to_target_cmsg(msgp, &msg); } @@ -5541,11 +5559,11 @@ static int target_to_host_fcntl_cmd(int cmd) case TARGET_F_SETFL: return cmd; case TARGET_F_GETLK: - return F_GETLK; - case TARGET_F_SETLK: - return F_SETLK; - case TARGET_F_SETLKW: - return F_SETLKW; + return F_GETLK64; + case TARGET_F_SETLK: + return F_SETLK64; + case TARGET_F_SETLKW: + return F_SETLKW64; case TARGET_F_GETOWN: return F_GETOWN; case TARGET_F_SETOWN: @@ -5580,6 +5598,10 @@ static int target_to_host_fcntl_cmd(int cmd) case TARGET_F_SETOWN_EX: return F_SETOWN_EX; #endif + case TARGET_F_SETPIPE_SZ: + return F_SETPIPE_SZ; + case TARGET_F_GETPIPE_SZ: + return F_GETPIPE_SZ; default: return -TARGET_EINVAL; } @@ -5596,12 +5618,134 @@ static const bitmask_transtbl flock_tbl[] = { { 0, 0, 0, 0 } }; -static abi_long do_fcntl(int fd, int cmd, abi_ulong arg) +static inline abi_long copy_from_user_flock(struct flock64 *fl, + abi_ulong target_flock_addr) +{ + struct target_flock *target_fl; + short l_type; + + if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) { + return -TARGET_EFAULT; + } + + __get_user(l_type, &target_fl->l_type); + fl->l_type = target_to_host_bitmask(l_type, flock_tbl); + __get_user(fl->l_whence, &target_fl->l_whence); + __get_user(fl->l_start, &target_fl->l_start); + __get_user(fl->l_len, &target_fl->l_len); + __get_user(fl->l_pid, &target_fl->l_pid); + unlock_user_struct(target_fl, target_flock_addr, 0); + return 0; +} + +static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr, + const struct flock64 *fl) { - struct flock fl; struct target_flock *target_fl; + short l_type; + + if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) { + return -TARGET_EFAULT; + } + + l_type = host_to_target_bitmask(fl->l_type, flock_tbl); + __put_user(l_type, &target_fl->l_type); + __put_user(fl->l_whence, &target_fl->l_whence); + __put_user(fl->l_start, &target_fl->l_start); + __put_user(fl->l_len, &target_fl->l_len); + __put_user(fl->l_pid, &target_fl->l_pid); + unlock_user_struct(target_fl, target_flock_addr, 1); + return 0; +} + +typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr); +typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl); + +#if defined(TARGET_ARM) && TARGET_ABI_BITS == 32 +static inline abi_long copy_from_user_eabi_flock64(struct flock64 *fl, + abi_ulong target_flock_addr) +{ + struct target_eabi_flock64 *target_fl; + short l_type; + + if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) { + return -TARGET_EFAULT; + } + + __get_user(l_type, &target_fl->l_type); + fl->l_type = target_to_host_bitmask(l_type, flock_tbl); + __get_user(fl->l_whence, &target_fl->l_whence); + __get_user(fl->l_start, &target_fl->l_start); + __get_user(fl->l_len, &target_fl->l_len); + __get_user(fl->l_pid, &target_fl->l_pid); + unlock_user_struct(target_fl, target_flock_addr, 0); + return 0; +} + +static inline abi_long copy_to_user_eabi_flock64(abi_ulong target_flock_addr, + const struct flock64 *fl) +{ + struct target_eabi_flock64 *target_fl; + short l_type; + + if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) { + return -TARGET_EFAULT; + } + + l_type = host_to_target_bitmask(fl->l_type, flock_tbl); + __put_user(l_type, &target_fl->l_type); + __put_user(fl->l_whence, &target_fl->l_whence); + __put_user(fl->l_start, &target_fl->l_start); + __put_user(fl->l_len, &target_fl->l_len); + __put_user(fl->l_pid, &target_fl->l_pid); + unlock_user_struct(target_fl, target_flock_addr, 1); + return 0; +} +#endif + +static inline abi_long copy_from_user_flock64(struct flock64 *fl, + abi_ulong target_flock_addr) +{ + struct target_flock64 *target_fl; + short l_type; + + if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) { + return -TARGET_EFAULT; + } + + __get_user(l_type, &target_fl->l_type); + fl->l_type = target_to_host_bitmask(l_type, flock_tbl); + __get_user(fl->l_whence, &target_fl->l_whence); + __get_user(fl->l_start, &target_fl->l_start); + __get_user(fl->l_len, &target_fl->l_len); + __get_user(fl->l_pid, &target_fl->l_pid); + unlock_user_struct(target_fl, target_flock_addr, 0); + return 0; +} + +static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr, + const struct flock64 *fl) +{ + struct target_flock64 *target_fl; + short l_type; + + if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) { + return -TARGET_EFAULT; + } + + l_type = host_to_target_bitmask(fl->l_type, flock_tbl); + __put_user(l_type, &target_fl->l_type); + __put_user(fl->l_whence, &target_fl->l_whence); + __put_user(fl->l_start, &target_fl->l_start); + __put_user(fl->l_len, &target_fl->l_len); + __put_user(fl->l_pid, &target_fl->l_pid); + unlock_user_struct(target_fl, target_flock_addr, 1); + return 0; +} + +static abi_long do_fcntl(int fd, int cmd, abi_ulong arg) +{ struct flock64 fl64; - struct target_flock64 *target_fl64; #ifdef F_GETOWN_EX struct f_owner_ex fox; struct target_f_owner_ex *target_fox; @@ -5614,94 +5758,60 @@ static abi_long do_fcntl(int fd, int cmd, abi_ulong arg) switch(cmd) { case TARGET_F_GETLK: - if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1)) - return -TARGET_EFAULT; - fl.l_type = - target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl); - fl.l_whence = tswap16(target_fl->l_whence); - fl.l_start = tswapal(target_fl->l_start); - fl.l_len = tswapal(target_fl->l_len); - fl.l_pid = tswap32(target_fl->l_pid); - unlock_user_struct(target_fl, arg, 0); - ret = get_errno(fcntl(fd, host_cmd, &fl)); + ret = copy_from_user_flock(&fl64, arg); + if (ret) { + return ret; + } + ret = get_errno(safe_fcntl(fd, host_cmd, &fl64)); if (ret == 0) { - if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0)) - return -TARGET_EFAULT; - target_fl->l_type = - host_to_target_bitmask(tswap16(fl.l_type), flock_tbl); - target_fl->l_whence = tswap16(fl.l_whence); - target_fl->l_start = tswapal(fl.l_start); - target_fl->l_len = tswapal(fl.l_len); - target_fl->l_pid = tswap32(fl.l_pid); - unlock_user_struct(target_fl, arg, 1); + ret = copy_to_user_flock(arg, &fl64); } break; case TARGET_F_SETLK: case TARGET_F_SETLKW: - if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1)) - return -TARGET_EFAULT; - fl.l_type = - target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl); - fl.l_whence = tswap16(target_fl->l_whence); - fl.l_start = tswapal(target_fl->l_start); - fl.l_len = tswapal(target_fl->l_len); - fl.l_pid = tswap32(target_fl->l_pid); - unlock_user_struct(target_fl, arg, 0); - ret = get_errno(fcntl(fd, host_cmd, &fl)); + ret = copy_from_user_flock(&fl64, arg); + if (ret) { + return ret; + } + ret = get_errno(safe_fcntl(fd, host_cmd, &fl64)); break; case TARGET_F_GETLK64: - if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1)) - return -TARGET_EFAULT; - fl64.l_type = - target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1; - fl64.l_whence = tswap16(target_fl64->l_whence); - fl64.l_start = tswap64(target_fl64->l_start); - fl64.l_len = tswap64(target_fl64->l_len); - fl64.l_pid = tswap32(target_fl64->l_pid); - unlock_user_struct(target_fl64, arg, 0); - ret = get_errno(fcntl(fd, host_cmd, &fl64)); + ret = copy_from_user_flock64(&fl64, arg); + if (ret) { + return ret; + } + ret = get_errno(safe_fcntl(fd, host_cmd, &fl64)); if (ret == 0) { - if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0)) - return -TARGET_EFAULT; - target_fl64->l_type = - host_to_target_bitmask(tswap16(fl64.l_type), flock_tbl) >> 1; - target_fl64->l_whence = tswap16(fl64.l_whence); - target_fl64->l_start = tswap64(fl64.l_start); - target_fl64->l_len = tswap64(fl64.l_len); - target_fl64->l_pid = tswap32(fl64.l_pid); - unlock_user_struct(target_fl64, arg, 1); + ret = copy_to_user_flock64(arg, &fl64); } break; case TARGET_F_SETLK64: case TARGET_F_SETLKW64: - if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1)) - return -TARGET_EFAULT; - fl64.l_type = - target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1; - fl64.l_whence = tswap16(target_fl64->l_whence); - fl64.l_start = tswap64(target_fl64->l_start); - fl64.l_len = tswap64(target_fl64->l_len); - fl64.l_pid = tswap32(target_fl64->l_pid); - unlock_user_struct(target_fl64, arg, 0); - ret = get_errno(fcntl(fd, host_cmd, &fl64)); + ret = copy_from_user_flock64(&fl64, arg); + if (ret) { + return ret; + } + ret = get_errno(safe_fcntl(fd, host_cmd, &fl64)); break; case TARGET_F_GETFL: - ret = get_errno(fcntl(fd, host_cmd, arg)); + ret = get_errno(safe_fcntl(fd, host_cmd, arg)); if (ret >= 0) { ret = host_to_target_bitmask(ret, fcntl_flags_tbl); } break; case TARGET_F_SETFL: - ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl))); + ret = get_errno(safe_fcntl(fd, host_cmd, + target_to_host_bitmask(arg, + fcntl_flags_tbl))); break; #ifdef F_GETOWN_EX case TARGET_F_GETOWN_EX: - ret = get_errno(fcntl(fd, host_cmd, &fox)); + ret = get_errno(safe_fcntl(fd, host_cmd, &fox)); if (ret >= 0) { if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0)) return -TARGET_EFAULT; @@ -5719,7 +5829,7 @@ static abi_long do_fcntl(int fd, int cmd, abi_ulong arg) fox.type = tswap32(target_fox->type); fox.pid = tswap32(target_fox->pid); unlock_user_struct(target_fox, arg, 0); - ret = get_errno(fcntl(fd, host_cmd, &fox)); + ret = get_errno(safe_fcntl(fd, host_cmd, &fox)); break; #endif @@ -5729,11 +5839,13 @@ static abi_long do_fcntl(int fd, int cmd, abi_ulong arg) case TARGET_F_GETSIG: case TARGET_F_SETLEASE: case TARGET_F_GETLEASE: - ret = get_errno(fcntl(fd, host_cmd, arg)); + case TARGET_F_SETPIPE_SZ: + case TARGET_F_GETPIPE_SZ: + ret = get_errno(safe_fcntl(fd, host_cmd, arg)); break; default: - ret = get_errno(fcntl(fd, cmd, arg)); + ret = get_errno(safe_fcntl(fd, cmd, arg)); break; } return ret; @@ -6690,6 +6802,7 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1, #ifdef DEBUG gemu_log("syscall %d", num); #endif + trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8); if(do_strace) print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6); @@ -7783,8 +7896,11 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1, case TARGET_NR_rt_sigqueueinfo: { siginfo_t uinfo; - if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1))) + + p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1); + if (!p) { goto efault; + } target_to_host_siginfo(&uinfo, p); unlock_user(p, arg1, 0); ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo)); @@ -10132,9 +10248,14 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1, { int cmd; struct flock64 fl; - struct target_flock64 *target_fl; + from_flock64_fn *copyfrom = copy_from_user_flock64; + to_flock64_fn *copyto = copy_to_user_flock64; + #ifdef TARGET_ARM - struct target_eabi_flock64 *target_efl; + if (((CPUARMState *)cpu_env)->eabi) { + copyfrom = copy_from_user_eabi_flock64; + copyto = copy_to_user_eabi_flock64; + } #endif cmd = target_to_host_fcntl_cmd(arg2); @@ -10145,80 +10266,23 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1, switch(arg2) { case TARGET_F_GETLK64: -#ifdef TARGET_ARM - if (((CPUARMState *)cpu_env)->eabi) { - if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1)) - goto efault; - fl.l_type = tswap16(target_efl->l_type); - fl.l_whence = tswap16(target_efl->l_whence); - fl.l_start = tswap64(target_efl->l_start); - fl.l_len = tswap64(target_efl->l_len); - fl.l_pid = tswap32(target_efl->l_pid); - unlock_user_struct(target_efl, arg3, 0); - } else -#endif - { - if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1)) - goto efault; - fl.l_type = tswap16(target_fl->l_type); - fl.l_whence = tswap16(target_fl->l_whence); - fl.l_start = tswap64(target_fl->l_start); - fl.l_len = tswap64(target_fl->l_len); - fl.l_pid = tswap32(target_fl->l_pid); - unlock_user_struct(target_fl, arg3, 0); + ret = copyfrom(&fl, arg3); + if (ret) { + break; } ret = get_errno(fcntl(arg1, cmd, &fl)); - if (ret == 0) { -#ifdef TARGET_ARM - if (((CPUARMState *)cpu_env)->eabi) { - if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0)) - goto efault; - target_efl->l_type = tswap16(fl.l_type); - target_efl->l_whence = tswap16(fl.l_whence); - target_efl->l_start = tswap64(fl.l_start); - target_efl->l_len = tswap64(fl.l_len); - target_efl->l_pid = tswap32(fl.l_pid); - unlock_user_struct(target_efl, arg3, 1); - } else -#endif - { - if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0)) - goto efault; - target_fl->l_type = tswap16(fl.l_type); - target_fl->l_whence = tswap16(fl.l_whence); - target_fl->l_start = tswap64(fl.l_start); - target_fl->l_len = tswap64(fl.l_len); - target_fl->l_pid = tswap32(fl.l_pid); - unlock_user_struct(target_fl, arg3, 1); - } - } + if (ret == 0) { + ret = copyto(arg3, &fl); + } break; case TARGET_F_SETLK64: case TARGET_F_SETLKW64: -#ifdef TARGET_ARM - if (((CPUARMState *)cpu_env)->eabi) { - if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1)) - goto efault; - fl.l_type = tswap16(target_efl->l_type); - fl.l_whence = tswap16(target_efl->l_whence); - fl.l_start = tswap64(target_efl->l_start); - fl.l_len = tswap64(target_efl->l_len); - fl.l_pid = tswap32(target_efl->l_pid); - unlock_user_struct(target_efl, arg3, 0); - } else -#endif - { - if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1)) - goto efault; - fl.l_type = tswap16(target_fl->l_type); - fl.l_whence = tswap16(target_fl->l_whence); - fl.l_start = tswap64(target_fl->l_start); - fl.l_len = tswap64(target_fl->l_len); - fl.l_pid = tswap32(target_fl->l_pid); - unlock_user_struct(target_fl, arg3, 0); + ret = copyfrom(&fl, arg3); + if (ret) { + break; } - ret = get_errno(fcntl(arg1, cmd, &fl)); + ret = get_errno(safe_fcntl(arg1, cmd, &fl)); break; default: ret = do_fcntl(arg1, arg2, arg3); @@ -11182,6 +11246,7 @@ fail: #endif if(do_strace) print_syscall_ret(num, ret); + trace_guest_user_syscall_ret(cpu, num, ret); return ret; efault: ret = -TARGET_EFAULT; diff --git a/linux-user/syscall_defs.h b/linux-user/syscall_defs.h index 6ee9251c50..dce1bcc91d 100644 --- a/linux-user/syscall_defs.h +++ b/linux-user/syscall_defs.h @@ -135,6 +135,24 @@ struct target_sockaddr_ll { uint8_t sll_addr[8]; /* Physical layer address */ }; +struct target_sockaddr_un { + uint16_t su_family; + uint8_t sun_path[108]; +}; + +struct target_in_addr { + uint32_t s_addr; /* big endian */ +}; + +struct target_sockaddr_in { + uint16_t sin_family; + int16_t sin_port; /* big endian */ + struct target_in_addr sin_addr; + uint8_t __pad[sizeof(struct target_sockaddr) - + sizeof(uint16_t) - sizeof(int16_t) - + sizeof(struct target_in_addr)]; +}; + struct target_sock_filter { abi_ushort code; uint8_t jt; @@ -147,10 +165,6 @@ struct target_sock_fprog { abi_ulong filter; }; -struct target_in_addr { - uint32_t s_addr; /* big endian */ -}; - struct target_ip_mreq { struct target_in_addr imr_multiaddr; struct target_in_addr imr_address; @@ -2166,6 +2180,8 @@ struct target_statfs64 { #define TARGET_F_SETLEASE (TARGET_F_LINUX_SPECIFIC_BASE + 0) #define TARGET_F_GETLEASE (TARGET_F_LINUX_SPECIFIC_BASE + 1) #define TARGET_F_DUPFD_CLOEXEC (TARGET_F_LINUX_SPECIFIC_BASE + 6) +#define TARGET_F_SETPIPE_SZ (TARGET_F_LINUX_SPECIFIC_BASE + 7) +#define TARGET_F_GETPIPE_SZ (TARGET_F_LINUX_SPECIFIC_BASE + 8) #define TARGET_F_NOTIFY (TARGET_F_LINUX_SPECIFIC_BASE+2) #if defined(TARGET_ALPHA) @@ -722,7 +722,7 @@ ssize_t qemu_deliver_packet_iov(NetClientState *sender, return 0; } - if (nc->info->receive_iov) { + if (nc->info->receive_iov && !(flags & QEMU_NET_PACKET_FLAG_RAW)) { ret = nc->info->receive_iov(nc, iov, iovcnt); } else { ret = nc_sendv_compat(nc, iov, iovcnt, flags); diff --git a/net/socket.c b/net/socket.c index 333fb9ecfa..ae6f92101d 100644 --- a/net/socket.c +++ b/net/socket.c @@ -489,41 +489,30 @@ static int net_socket_listen_init(NetClientState *peer, { NetClientState *nc; NetSocketState *s; - struct sockaddr_in saddr; - int fd, ret; + SocketAddress *saddr; + int ret; + Error *local_error = NULL; - if (parse_host_port(&saddr, host_str) < 0) - return -1; - - fd = qemu_socket(PF_INET, SOCK_STREAM, 0); - if (fd < 0) { - perror("socket"); + saddr = socket_parse(host_str, &local_error); + if (saddr == NULL) { + error_report_err(local_error); return -1; } - qemu_set_nonblock(fd); - socket_set_fast_reuse(fd); - - ret = bind(fd, (struct sockaddr *)&saddr, sizeof(saddr)); + ret = socket_listen(saddr, &local_error); if (ret < 0) { - perror("bind"); - closesocket(fd); - return -1; - } - ret = listen(fd, 0); - if (ret < 0) { - perror("listen"); - closesocket(fd); + error_report_err(local_error); return -1; } nc = qemu_new_net_client(&net_socket_info, peer, model, name); s = DO_UPCAST(NetSocketState, nc, nc); s->fd = -1; - s->listen_fd = fd; + s->listen_fd = ret; s->nc.link_down = true; qemu_set_fd_handler(s->listen_fd, net_socket_accept, NULL, s); + qapi_free_SocketAddress(saddr); return 0; } @@ -534,10 +523,15 @@ static int net_socket_connect_init(NetClientState *peer, { NetSocketState *s; int fd, connected, ret; - struct sockaddr_in saddr; + char *addr_str; + SocketAddress *saddr; + Error *local_error = NULL; - if (parse_host_port(&saddr, host_str) < 0) + saddr = socket_parse(host_str, &local_error); + if (saddr == NULL) { + error_report_err(local_error); return -1; + } fd = qemu_socket(PF_INET, SOCK_STREAM, 0); if (fd < 0) { @@ -545,10 +539,9 @@ static int net_socket_connect_init(NetClientState *peer, return -1; } qemu_set_nonblock(fd); - connected = 0; for(;;) { - ret = connect(fd, (struct sockaddr *)&saddr, sizeof(saddr)); + ret = socket_connect(saddr, &local_error, NULL, NULL); if (ret < 0) { if (errno == EINTR || errno == EWOULDBLOCK) { /* continue */ @@ -557,7 +550,7 @@ static int net_socket_connect_init(NetClientState *peer, errno == EINVAL) { break; } else { - perror("connect"); + error_report_err(local_error); closesocket(fd); return -1; } @@ -569,9 +562,15 @@ static int net_socket_connect_init(NetClientState *peer, s = net_socket_fd_init(peer, model, name, fd, connected); if (!s) return -1; + + addr_str = socket_address_to_string(saddr, &local_error); + if (addr_str == NULL) + return -1; + snprintf(s->nc.info_str, sizeof(s->nc.info_str), - "socket: connect to %s:%d", - inet_ntoa(saddr.sin_addr), ntohs(saddr.sin_port)); + "socket: connect to %s", addr_str); + qapi_free_SocketAddress(saddr); + g_free(addr_str); return 0; } diff --git a/qapi-schema.json b/qapi-schema.json index 84b6708125..ba3bf14749 100644 --- a/qapi-schema.json +++ b/qapi-schema.json @@ -2986,11 +2986,14 @@ # @cpu-max: maximum number of CPUs supported by the machine type # (since 1.5.0) # +# @hotpluggable-cpus: cpu hotplug via -device is supported (since 2.7.0) +# # Since: 1.2.0 ## { 'struct': 'MachineInfo', 'data': { 'name': 'str', '*alias': 'str', - '*is-default': 'bool', 'cpu-max': 'int' } } + '*is-default': 'bool', 'cpu-max': 'int', + 'hotpluggable-cpus': 'bool'} } ## # @query-machines: @@ -4265,20 +4268,21 @@ # Note: currently there are 4 properties that could be present # but management should be prepared to pass through other # properties with device_add command to allow for future -# interface extension. +# interface extension. This also requires the filed names to be kept in +# sync with the properties passed to -device/device_add. # -# @node: #optional NUMA node ID the CPU belongs to -# @socket: #optional socket number within node/board the CPU belongs to -# @core: #optional core number within socket the CPU belongs to -# @thread: #optional thread number within core the CPU belongs to +# @node-id: #optional NUMA node ID the CPU belongs to +# @socket-id: #optional socket number within node/board the CPU belongs to +# @core-id: #optional core number within socket the CPU belongs to +# @thread-id: #optional thread number within core the CPU belongs to # # Since: 2.7 ## { 'struct': 'CpuInstanceProperties', - 'data': { '*node': 'int', - '*socket': 'int', - '*core': 'int', - '*thread': 'int' + 'data': { '*node-id': 'int', + '*socket-id': 'int', + '*core-id': 'int', + '*thread-id': 'int' } } diff --git a/qemu-img.c b/qemu-img.c index 14e2661a5c..3322a1e5fc 100644 --- a/qemu-img.c +++ b/qemu-img.c @@ -32,6 +32,7 @@ #include "qemu/config-file.h" #include "qemu/option.h" #include "qemu/error-report.h" +#include "qemu/log.h" #include "qom/object_interfaces.h" #include "sysemu/sysemu.h" #include "sysemu/block-backend.h" @@ -39,6 +40,7 @@ #include "block/blockjob.h" #include "block/qapi.h" #include "crypto/init.h" +#include "trace/control.h" #include <getopt.h> #define QEMU_IMG_VERSION "qemu-img version " QEMU_VERSION QEMU_PKGVERSION \ @@ -91,9 +93,14 @@ static void QEMU_NORETURN help(void) { const char *help_msg = QEMU_IMG_VERSION - "usage: qemu-img command [command options]\n" + "usage: qemu-img [standard options] command [command options]\n" "QEMU disk image utility\n" "\n" + " '-h', '--help' display this help and exit\n" + " '-V', '--version' output version information and exit\n" + " '-T', '--trace' [[enable=]<pattern>][,events=<file>][,file=<file>]\n" + " specify tracing options\n" + "\n" "Command syntax:\n" #define DEF(option, callback, arg_string) \ " " arg_string "\n" @@ -3803,10 +3810,12 @@ int main(int argc, char **argv) const img_cmd_t *cmd; const char *cmdname; Error *local_error = NULL; + char *trace_file = NULL; int c; static const struct option long_options[] = { {"help", no_argument, 0, 'h'}, - {"version", no_argument, 0, 'v'}, + {"version", no_argument, 0, 'V'}, + {"trace", required_argument, NULL, 'T'}, {0, 0, 0, 0} }; @@ -3829,27 +3838,48 @@ int main(int argc, char **argv) if (argc < 2) { error_exit("Not enough arguments"); } - cmdname = argv[1]; qemu_add_opts(&qemu_object_opts); qemu_add_opts(&qemu_source_opts); + qemu_add_opts(&qemu_trace_opts); - /* find the command */ - for (cmd = img_cmds; cmd->name != NULL; cmd++) { - if (!strcmp(cmdname, cmd->name)) { - return cmd->handler(argc - 1, argv + 1); + while ((c = getopt_long(argc, argv, "+hVT:", long_options, NULL)) != -1) { + switch (c) { + case 'h': + help(); + return 0; + case 'V': + printf(QEMU_IMG_VERSION); + return 0; + case 'T': + g_free(trace_file); + trace_file = trace_opt_parse(optarg); + break; } } - c = getopt_long(argc, argv, "h", long_options, NULL); + cmdname = argv[optind]; - if (c == 'h') { - help(); - } - if (c == 'v') { - printf(QEMU_IMG_VERSION); + /* reset getopt_long scanning */ + argc -= optind; + if (argc < 1) { return 0; } + argv += optind; + optind = 1; + + if (!trace_init_backends()) { + exit(1); + } + trace_init_file(trace_file); + qemu_set_log(LOG_TRACE); + + /* find the command */ + for (cmd = img_cmds; cmd->name != NULL; cmd++) { + if (!strcmp(cmdname, cmd->name)) { + return cmd->handler(argc, argv); + } + } /* not found */ error_exit("Command not found: %s", cmdname); diff --git a/qemu-img.texi b/qemu-img.texi index cbe50e9b88..449a19c710 100644 --- a/qemu-img.texi +++ b/qemu-img.texi @@ -1,6 +1,6 @@ @example @c man begin SYNOPSIS -@command{qemu-img} @var{command} [@var{command} @var{options}] +@command{qemu-img} [@var{standard} @var{options}] @var{command} [@var{command} @var{options}] @c man end @end example @@ -16,6 +16,17 @@ inconsistent state. @c man begin OPTIONS +Standard options: +@table @option +@item -h, --help +Display this help and exit +@item -V, --version +Display version information and exit +@item -T, --trace [[enable=]@var{pattern}][,events=@var{file}][,file=@var{file}] +@findex --trace +@include qemu-option-trace.texi +@end table + The following commands are supported: @include qemu-img-cmds.texi @@ -18,6 +18,7 @@ #include "qemu/option.h" #include "qemu/config-file.h" #include "qemu/readline.h" +#include "qemu/log.h" #include "qapi/qmp/qstring.h" #include "qom/object_interfaces.h" #include "sysemu/block-backend.h" @@ -253,7 +254,9 @@ static void usage(const char *name) " -k, --native-aio use kernel AIO implementation (on Linux only)\n" " -t, --cache=MODE use the given cache mode for the image\n" " -d, --discard=MODE use the given discard mode for the image\n" -" -T, --trace FILE enable trace events listed in the given file\n" +" -T, --trace [[enable=]<pattern>][,events=<file>][,file=<file>]\n" +" specify tracing options\n" +" see qemu-img(1) man page for full description\n" " -h, --help display this help and exit\n" " -V, --version output version information and exit\n" "\n" @@ -458,6 +461,7 @@ int main(int argc, char **argv) Error *local_error = NULL; QDict *opts = NULL; const char *format = NULL; + char *trace_file = NULL; #ifdef CONFIG_POSIX signal(SIGPIPE, SIG_IGN); @@ -470,6 +474,7 @@ int main(int argc, char **argv) module_call_init(MODULE_INIT_QOM); qemu_add_opts(&qemu_object_opts); + qemu_add_opts(&qemu_trace_opts); bdrv_init(); while ((c = getopt_long(argc, argv, sopt, lopt, &opt_index)) != -1) { @@ -509,9 +514,8 @@ int main(int argc, char **argv) } break; case 'T': - if (!trace_init_backends()) { - exit(1); /* error message will have been printed */ - } + g_free(trace_file); + trace_file = trace_opt_parse(optarg); break; case 'V': printf("%s version %s\n", progname, QEMU_VERSION); @@ -557,6 +561,12 @@ int main(int argc, char **argv) exit(1); } + if (!trace_init_backends()) { + exit(1); + } + trace_init_file(trace_file); + qemu_set_log(LOG_TRACE); + /* initialize commands */ qemuio_add_command(&quit_cmd); qemuio_add_command(&open_cmd); diff --git a/qemu-nbd.c b/qemu-nbd.c index 9519db324b..321f02bd15 100644 --- a/qemu-nbd.c +++ b/qemu-nbd.c @@ -27,12 +27,14 @@ #include "qemu/error-report.h" #include "qemu/config-file.h" #include "qemu/bswap.h" +#include "qemu/log.h" #include "block/snapshot.h" #include "qapi/util.h" #include "qapi/qmp/qstring.h" #include "qom/object_interfaces.h" #include "io/channel-socket.h" #include "crypto/init.h" +#include "trace/control.h" #include <getopt.h> #include <libgen.h> @@ -88,6 +90,8 @@ static void usage(const char *name) "General purpose options:\n" " --object type,id=ID,... define an object such as 'secret' for providing\n" " passwords and/or encryption keys\n" +" -T, --trace [[enable=]<pattern>][,events=<file>][,file=<file>]\n" +" specify tracing options\n" #ifdef __linux__ "Kernel NBD client support:\n" " -c, --connect=DEV connect FILE to the local NBD device DEV\n" @@ -470,7 +474,7 @@ int main(int argc, char **argv) off_t fd_size; QemuOpts *sn_opts = NULL; const char *sn_id_or_name = NULL; - const char *sopt = "hVb:o:p:rsnP:c:dvk:e:f:tl:x:"; + const char *sopt = "hVb:o:p:rsnP:c:dvk:e:f:tl:x:T:"; struct option lopt[] = { { "help", no_argument, NULL, 'h' }, { "version", no_argument, NULL, 'V' }, @@ -498,6 +502,7 @@ int main(int argc, char **argv) { "export-name", required_argument, NULL, 'x' }, { "tls-creds", required_argument, NULL, QEMU_NBD_OPT_TLSCREDS }, { "image-opts", no_argument, NULL, QEMU_NBD_OPT_IMAGE_OPTS }, + { "trace", required_argument, NULL, 'T' }, { NULL, 0, NULL, 0 } }; int ch; @@ -518,6 +523,7 @@ int main(int argc, char **argv) const char *tlscredsid = NULL; bool imageOpts = false; bool writethrough = true; + char *trace_file = NULL; /* The client thread uses SIGTERM to interrupt the server. A signal * handler ensures that "qemu-nbd -v -c" exits with a nice status code. @@ -531,6 +537,7 @@ int main(int argc, char **argv) module_call_init(MODULE_INIT_QOM); qemu_add_opts(&qemu_object_opts); + qemu_add_opts(&qemu_trace_opts); qemu_init_exec_dir(argv[0]); while ((ch = getopt_long(argc, argv, sopt, lopt, &opt_ind)) != -1) { @@ -703,6 +710,10 @@ int main(int argc, char **argv) case QEMU_NBD_OPT_IMAGE_OPTS: imageOpts = true; break; + case 'T': + g_free(trace_file); + trace_file = trace_opt_parse(optarg); + break; } } @@ -718,6 +729,12 @@ int main(int argc, char **argv) exit(EXIT_FAILURE); } + if (!trace_init_backends()) { + exit(1); + } + trace_init_file(trace_file); + qemu_set_log(LOG_TRACE); + if (tlscredsid) { if (sockpath) { error_report("TLS is only supported with IPv4/IPv6"); diff --git a/qemu-nbd.texi b/qemu-nbd.texi index 9f23343450..91ebf04b5b 100644 --- a/qemu-nbd.texi +++ b/qemu-nbd.texi @@ -92,6 +92,9 @@ Display extra debugging information Display this help and exit @item -V, --version Display version information and exit +@item -T, --trace [[enable=]@var{pattern}][,events=@var{file}][,file=@var{file}] +@findex --trace +@include qemu-option-trace.texi @end table @c man end diff --git a/qemu-option-trace.texi b/qemu-option-trace.texi new file mode 100644 index 0000000000..693ab5a3e1 --- /dev/null +++ b/qemu-option-trace.texi @@ -0,0 +1,25 @@ +Specify tracing options. + +@table @option +@item [enable=]@var{pattern} +Immediately enable events matching @var{pattern}. +The file must contain one event name (as listed in the @file{trace-events-all} +file) per line; globbing patterns are accepted too. This option is only +available if QEMU has been compiled with the @var{simple}, @var{stderr} +or @var{ftrace} tracing backend. To specify multiple events or patterns, +specify the @option{-trace} option multiple times. + +Use @code{-trace help} to print a list of names of trace points. + +@item events=@var{file} +Immediately enable events listed in @var{file}. +The file must contain one event name (as listed in the @file{trace-events-all} +file) per line; globbing patterns are accepted too. This option is only +available if QEMU has been compiled with the @var{simple}, @var{stderr} or +@var{ftrace} tracing backend. + +@item file=@var{file} +Log output traces to @var{file}. +This option is only available if QEMU has been compiled with +the @var{simple} tracing backend. +@end table diff --git a/qemu-options.hx b/qemu-options.hx index 44c658fd4e..a95a936e55 100644 --- a/qemu-options.hx +++ b/qemu-options.hx @@ -3669,34 +3669,9 @@ DEF("trace", HAS_ARG, QEMU_OPTION_trace, STEXI HXCOMM This line is not accurate, as some sub-options are backend-specific but HXCOMM HX does not support conditional compilation of text. -@item -trace [events=@var{file}][,file=@var{file}] +@item -trace [[enable=]@var{pattern}][,events=@var{file}][,file=@var{file}] @findex -trace - -Specify tracing options. - -@table @option -@item [enable=]@var{pattern} -Immediately enable events matching @var{pattern}. -The file must contain one event name (as listed in the @file{trace-events-all} -file) per line; globbing patterns are accepted too. This option is only -available if QEMU has been compiled with the @var{simple}, @var{stderr} -or @var{ftrace} tracing backend. To specify multiple events or patterns, -specify the @option{-trace} option multiple times. - -Use @code{-trace help} to print a list of names of trace points. - -@item events=@var{file} -Immediately enable events listed in @var{file}. -The file must contain one event name (as listed in the @file{trace-events-all} -file) per line; globbing patterns are accepted too. This option is only -available if QEMU has been compiled with the @var{simple}, @var{stderr} or -@var{ftrace} tracing backend. - -@item file=@var{file} -Log output traces to @var{file}. -This option is only available if QEMU has been compiled with -the @var{simple} tracing backend. -@end table +@include qemu-option-trace.texi ETEXI HXCOMM Internal use diff --git a/target-alpha/translate.c b/target-alpha/translate.c index 243567b8fc..0ea0e6e146 100644 --- a/target-alpha/translate.c +++ b/target-alpha/translate.c @@ -449,10 +449,13 @@ static ExitStatus gen_store_conditional(DisasContext *ctx, int ra, int rb, static bool in_superpage(DisasContext *ctx, int64_t addr) { +#ifndef CONFIG_USER_ONLY return ((ctx->tb->flags & TB_FLAGS_USER_MODE) == 0 - && addr < 0 - && ((addr >> 41) & 3) == 2 - && addr >> TARGET_VIRT_ADDR_SPACE_BITS == addr >> 63); + && addr >> TARGET_VIRT_ADDR_SPACE_BITS == -1 + && ((addr >> 41) & 3) == 2); +#else + return false; +#endif } static bool use_goto_tb(DisasContext *ctx, uint64_t dest) diff --git a/target-arm/helper-a64.c b/target-arm/helper-a64.c index 7c63556697..41e48a41b4 100644 --- a/target-arm/helper-a64.c +++ b/target-arm/helper-a64.c @@ -344,12 +344,12 @@ float32 HELPER(frecpx_f32)(float32 a, void *fpstp) if (float32_is_any_nan(a)) { float32 nan = a; - if (float32_is_signaling_nan(a)) { + if (float32_is_signaling_nan(a, fpst)) { float_raise(float_flag_invalid, fpst); - nan = float32_maybe_silence_nan(a); + nan = float32_maybe_silence_nan(a, fpst); } if (fpst->default_nan_mode) { - nan = float32_default_nan; + nan = float32_default_nan(fpst); } return nan; } @@ -373,12 +373,12 @@ float64 HELPER(frecpx_f64)(float64 a, void *fpstp) if (float64_is_any_nan(a)) { float64 nan = a; - if (float64_is_signaling_nan(a)) { + if (float64_is_signaling_nan(a, fpst)) { float_raise(float_flag_invalid, fpst); - nan = float64_maybe_silence_nan(a); + nan = float64_maybe_silence_nan(a, fpst); } if (fpst->default_nan_mode) { - nan = float64_default_nan; + nan = float64_default_nan(fpst); } return nan; } @@ -407,7 +407,7 @@ float32 HELPER(fcvtx_f64_to_f32)(float64 a, CPUARMState *env) set_float_rounding_mode(float_round_to_zero, &tstat); set_float_exception_flags(0, &tstat); r = float64_to_float32(a, &tstat); - r = float32_maybe_silence_nan(r); + r = float32_maybe_silence_nan(r, &tstat); exflags = get_float_exception_flags(&tstat); if (exflags & float_flag_inexact) { r = make_float32(float32_val(r) | 1); diff --git a/target-arm/helper.c b/target-arm/helper.c index 35ff7722cb..1f9cdacc59 100644 --- a/target-arm/helper.c +++ b/target-arm/helper.c @@ -8678,7 +8678,7 @@ float64 VFP_HELPER(fcvtd, s)(float32 x, CPUARMState *env) /* ARM requires that S<->D conversion of any kind of NaN generates * a quiet NaN by forcing the most significant frac bit to 1. */ - return float64_maybe_silence_nan(r); + return float64_maybe_silence_nan(r, &env->vfp.fp_status); } float32 VFP_HELPER(fcvts, d)(float64 x, CPUARMState *env) @@ -8687,7 +8687,7 @@ float32 VFP_HELPER(fcvts, d)(float64 x, CPUARMState *env) /* ARM requires that S<->D conversion of any kind of NaN generates * a quiet NaN by forcing the most significant frac bit to 1. */ - return float32_maybe_silence_nan(r); + return float32_maybe_silence_nan(r, &env->vfp.fp_status); } /* VFP3 fixed point conversion. */ @@ -8786,7 +8786,7 @@ static float32 do_fcvt_f16_to_f32(uint32_t a, CPUARMState *env, float_status *s) int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0; float32 r = float16_to_float32(make_float16(a), ieee, s); if (ieee) { - return float32_maybe_silence_nan(r); + return float32_maybe_silence_nan(r, s); } return r; } @@ -8796,7 +8796,7 @@ static uint32_t do_fcvt_f32_to_f16(float32 a, CPUARMState *env, float_status *s) int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0; float16 r = float32_to_float16(a, ieee, s); if (ieee) { - r = float16_maybe_silence_nan(r); + r = float16_maybe_silence_nan(r, s); } return float16_val(r); } @@ -8826,7 +8826,7 @@ float64 HELPER(vfp_fcvt_f16_to_f64)(uint32_t a, CPUARMState *env) int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0; float64 r = float16_to_float64(make_float16(a), ieee, &env->vfp.fp_status); if (ieee) { - return float64_maybe_silence_nan(r); + return float64_maybe_silence_nan(r, &env->vfp.fp_status); } return r; } @@ -8836,7 +8836,7 @@ uint32_t HELPER(vfp_fcvt_f64_to_f16)(float64 a, CPUARMState *env) int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0; float16 r = float64_to_float16(a, ieee, &env->vfp.fp_status); if (ieee) { - r = float16_maybe_silence_nan(r); + r = float16_maybe_silence_nan(r, &env->vfp.fp_status); } return float16_val(r); } @@ -8986,12 +8986,12 @@ float32 HELPER(recpe_f32)(float32 input, void *fpstp) if (float32_is_any_nan(f32)) { float32 nan = f32; - if (float32_is_signaling_nan(f32)) { + if (float32_is_signaling_nan(f32, fpst)) { float_raise(float_flag_invalid, fpst); - nan = float32_maybe_silence_nan(f32); + nan = float32_maybe_silence_nan(f32, fpst); } if (fpst->default_nan_mode) { - nan = float32_default_nan; + nan = float32_default_nan(fpst); } return nan; } else if (float32_is_infinity(f32)) { @@ -9040,12 +9040,12 @@ float64 HELPER(recpe_f64)(float64 input, void *fpstp) /* Deal with any special cases */ if (float64_is_any_nan(f64)) { float64 nan = f64; - if (float64_is_signaling_nan(f64)) { + if (float64_is_signaling_nan(f64, fpst)) { float_raise(float_flag_invalid, fpst); - nan = float64_maybe_silence_nan(f64); + nan = float64_maybe_silence_nan(f64, fpst); } if (fpst->default_nan_mode) { - nan = float64_default_nan; + nan = float64_default_nan(fpst); } return nan; } else if (float64_is_infinity(f64)) { @@ -9147,12 +9147,12 @@ float32 HELPER(rsqrte_f32)(float32 input, void *fpstp) if (float32_is_any_nan(f32)) { float32 nan = f32; - if (float32_is_signaling_nan(f32)) { + if (float32_is_signaling_nan(f32, s)) { float_raise(float_flag_invalid, s); - nan = float32_maybe_silence_nan(f32); + nan = float32_maybe_silence_nan(f32, s); } if (s->default_nan_mode) { - nan = float32_default_nan; + nan = float32_default_nan(s); } return nan; } else if (float32_is_zero(f32)) { @@ -9160,7 +9160,7 @@ float32 HELPER(rsqrte_f32)(float32 input, void *fpstp) return float32_set_sign(float32_infinity, float32_is_neg(f32)); } else if (float32_is_neg(f32)) { float_raise(float_flag_invalid, s); - return float32_default_nan; + return float32_default_nan(s); } else if (float32_is_infinity(f32)) { return float32_zero; } @@ -9211,12 +9211,12 @@ float64 HELPER(rsqrte_f64)(float64 input, void *fpstp) if (float64_is_any_nan(f64)) { float64 nan = f64; - if (float64_is_signaling_nan(f64)) { + if (float64_is_signaling_nan(f64, s)) { float_raise(float_flag_invalid, s); - nan = float64_maybe_silence_nan(f64); + nan = float64_maybe_silence_nan(f64, s); } if (s->default_nan_mode) { - nan = float64_default_nan; + nan = float64_default_nan(s); } return nan; } else if (float64_is_zero(f64)) { @@ -9224,7 +9224,7 @@ float64 HELPER(rsqrte_f64)(float64 input, void *fpstp) return float64_set_sign(float64_infinity, float64_is_neg(f64)); } else if (float64_is_neg(f64)) { float_raise(float_flag_invalid, s); - return float64_default_nan; + return float64_default_nan(s); } else if (float64_is_infinity(f64)) { return float64_zero; } diff --git a/target-m68k/helper.c b/target-m68k/helper.c index 427cbedfd5..f52d0e3036 100644 --- a/target-m68k/helper.c +++ b/target-m68k/helper.c @@ -558,10 +558,10 @@ float64 HELPER(sub_cmp_f64)(CPUM68KState *env, float64 a, float64 b) /* ??? Should flush denormals to zero. */ float64 res; res = float64_sub(a, b, &env->fp_status); - if (float64_is_quiet_nan(res)) { + if (float64_is_quiet_nan(res, &env->fp_status)) { /* +/-inf compares equal against itself, but sub returns nan. */ - if (!float64_is_quiet_nan(a) - && !float64_is_quiet_nan(b)) { + if (!float64_is_quiet_nan(a, &env->fp_status) + && !float64_is_quiet_nan(b, &env->fp_status)) { res = float64_zero; if (float64_lt_quiet(a, res, &env->fp_status)) res = float64_chs(res); diff --git a/target-microblaze/op_helper.c b/target-microblaze/op_helper.c index 0533939389..74a043c2ac 100644 --- a/target-microblaze/op_helper.c +++ b/target-microblaze/op_helper.c @@ -288,12 +288,14 @@ uint32_t helper_fcmp_un(CPUMBState *env, uint32_t a, uint32_t b) fa.l = a; fb.l = b; - if (float32_is_signaling_nan(fa.f) || float32_is_signaling_nan(fb.f)) { + if (float32_is_signaling_nan(fa.f, &env->fp_status) || + float32_is_signaling_nan(fb.f, &env->fp_status)) { update_fpu_flags(env, float_flag_invalid); r = 1; } - if (float32_is_quiet_nan(fa.f) || float32_is_quiet_nan(fb.f)) { + if (float32_is_quiet_nan(fa.f, &env->fp_status) || + float32_is_quiet_nan(fb.f, &env->fp_status)) { r = 1; } diff --git a/target-mips/cpu.h b/target-mips/cpu.h index 30b4712c71..1037f9b7eb 100644 --- a/target-mips/cpu.h +++ b/target-mips/cpu.h @@ -111,7 +111,9 @@ struct CPUMIPSFPUContext { #define FCR0_PRID 8 #define FCR0_REV 0 /* fcsr */ + uint32_t fcr31_rw_bitmask; uint32_t fcr31; +#define FCR31_FS 24 #define FCR31_ABS2008 19 #define FCR31_NAN2008 18 #define SET_FP_COND(num,env) do { ((env).fcr31) |= ((num) ? (1 << ((num) + 24)) : (1 << 23)); } while(0) @@ -823,6 +825,11 @@ void cpu_mips_soft_irq(CPUMIPSState *env, int irq, int level); /* helper.c */ int mips_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw, int mmu_idx); + +/* op_helper.c */ +uint32_t float_class_s(uint32_t arg, float_status *fst); +uint64_t float_class_d(uint64_t arg, float_status *fst); + #if !defined(CONFIG_USER_ONLY) void r4k_invalidate_tlb (CPUMIPSState *env, int idx, int use_extra); hwaddr cpu_mips_translate_address (CPUMIPSState *env, target_ulong address, @@ -842,14 +849,21 @@ static inline void restore_rounding_mode(CPUMIPSState *env) static inline void restore_flush_mode(CPUMIPSState *env) { - set_flush_to_zero((env->active_fpu.fcr31 & (1 << 24)) != 0, + set_flush_to_zero((env->active_fpu.fcr31 & (1 << FCR31_FS)) != 0, &env->active_fpu.fp_status); } +static inline void restore_snan_bit_mode(CPUMIPSState *env) +{ + set_snan_bit_is_one((env->active_fpu.fcr31 & (1 << FCR31_NAN2008)) == 0, + &env->active_fpu.fp_status); +} + static inline void restore_fp_status(CPUMIPSState *env) { restore_rounding_mode(env); restore_flush_mode(env); + restore_snan_bit_mode(env); } static inline void restore_msa_fp_status(CPUMIPSState *env) diff --git a/target-mips/gdbstub.c b/target-mips/gdbstub.c index 2707ff5c2b..7c682289c2 100644 --- a/target-mips/gdbstub.c +++ b/target-mips/gdbstub.c @@ -90,11 +90,9 @@ int mips_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) if (env->CP0_Config1 & (1 << CP0C1_FP) && n >= 38 && n < 72) { switch (n) { case 70: - env->active_fpu.fcr31 = tmp & 0xFF83FFFF; - /* set rounding mode */ - restore_rounding_mode(env); - /* set flush-to-zero mode */ - restore_flush_mode(env); + env->active_fpu.fcr31 = (tmp & env->active_fpu.fcr31_rw_bitmask) | + (env->active_fpu.fcr31 & ~(env->active_fpu.fcr31_rw_bitmask)); + restore_fp_status(env); break; case 71: /* FIR is read-only. Ignore writes. */ diff --git a/target-mips/helper.h b/target-mips/helper.h index 594341d258..666936c81b 100644 --- a/target-mips/helper.h +++ b/target-mips/helper.h @@ -207,8 +207,6 @@ DEF_HELPER_4(ctc1, void, env, tl, i32, i32) DEF_HELPER_2(float_cvtd_s, i64, env, i32) DEF_HELPER_2(float_cvtd_w, i64, env, i32) DEF_HELPER_2(float_cvtd_l, i64, env, i64) -DEF_HELPER_2(float_cvtl_d, i64, env, i64) -DEF_HELPER_2(float_cvtl_s, i64, env, i32) DEF_HELPER_2(float_cvtps_pw, i64, env, i64) DEF_HELPER_2(float_cvtpw_ps, i64, env, i64) DEF_HELPER_2(float_cvts_d, i32, env, i64) @@ -216,14 +214,12 @@ DEF_HELPER_2(float_cvts_w, i32, env, i32) DEF_HELPER_2(float_cvts_l, i32, env, i64) DEF_HELPER_2(float_cvts_pl, i32, env, i32) DEF_HELPER_2(float_cvts_pu, i32, env, i32) -DEF_HELPER_2(float_cvtw_s, i32, env, i32) -DEF_HELPER_2(float_cvtw_d, i32, env, i64) DEF_HELPER_3(float_addr_ps, i64, env, i64, i64) DEF_HELPER_3(float_mulr_ps, i64, env, i64, i64) -DEF_HELPER_FLAGS_1(float_class_s, TCG_CALL_NO_RWG_SE, i32, i32) -DEF_HELPER_FLAGS_1(float_class_d, TCG_CALL_NO_RWG_SE, i64, i64) +DEF_HELPER_FLAGS_2(float_class_s, TCG_CALL_NO_RWG_SE, i32, env, i32) +DEF_HELPER_FLAGS_2(float_class_d, TCG_CALL_NO_RWG_SE, i64, env, i64) #define FOP_PROTO(op) \ DEF_HELPER_4(float_ ## op ## _s, i32, env, i32, i32, i32) \ @@ -242,14 +238,20 @@ FOP_PROTO(mina) #undef FOP_PROTO #define FOP_PROTO(op) \ -DEF_HELPER_2(float_ ## op ## l_s, i64, env, i32) \ -DEF_HELPER_2(float_ ## op ## l_d, i64, env, i64) \ -DEF_HELPER_2(float_ ## op ## w_s, i32, env, i32) \ -DEF_HELPER_2(float_ ## op ## w_d, i32, env, i64) +DEF_HELPER_2(float_ ## op ## _l_s, i64, env, i32) \ +DEF_HELPER_2(float_ ## op ## _l_d, i64, env, i64) \ +DEF_HELPER_2(float_ ## op ## _w_s, i32, env, i32) \ +DEF_HELPER_2(float_ ## op ## _w_d, i32, env, i64) +FOP_PROTO(cvt) FOP_PROTO(round) FOP_PROTO(trunc) FOP_PROTO(ceil) FOP_PROTO(floor) +FOP_PROTO(cvt_2008) +FOP_PROTO(round_2008) +FOP_PROTO(trunc_2008) +FOP_PROTO(ceil_2008) +FOP_PROTO(floor_2008) #undef FOP_PROTO #define FOP_PROTO(op) \ diff --git a/target-mips/msa_helper.c b/target-mips/msa_helper.c index ae92fcbe28..1fdb0d9792 100644 --- a/target-mips/msa_helper.c +++ b/target-mips/msa_helper.c @@ -1495,11 +1495,11 @@ MSA_UNOP_DF(pcnt) #define FLOAT_ONE32 make_float32(0x3f8 << 20) #define FLOAT_ONE64 make_float64(0x3ffULL << 52) -#define FLOAT_SNAN16 (float16_default_nan ^ 0x0220) +#define FLOAT_SNAN16(s) (float16_default_nan(s) ^ 0x0220) /* 0x7c20 */ -#define FLOAT_SNAN32 (float32_default_nan ^ 0x00400020) +#define FLOAT_SNAN32(s) (float32_default_nan(s) ^ 0x00400020) /* 0x7f800020 */ -#define FLOAT_SNAN64 (float64_default_nan ^ 0x0008000000000020ULL) +#define FLOAT_SNAN64(s) (float64_default_nan(s) ^ 0x0008000000000020ULL) /* 0x7ff0000000000020 */ static inline void clear_msacsr_cause(CPUMIPSState *env) @@ -1612,7 +1612,7 @@ static inline float16 float16_from_float32(int32_t a, flag ieee, float16 f_val; f_val = float32_to_float16((float32)a, ieee, status); - f_val = float16_maybe_silence_nan(f_val); + f_val = float16_maybe_silence_nan(f_val, status); return a < 0 ? (f_val | (1 << 15)) : f_val; } @@ -1622,7 +1622,7 @@ static inline float32 float32_from_float64(int64_t a, float_status *status) float32 f_val; f_val = float64_to_float32((float64)a, status); - f_val = float32_maybe_silence_nan(f_val); + f_val = float32_maybe_silence_nan(f_val, status); return a < 0 ? (f_val | (1 << 31)) : f_val; } @@ -1633,7 +1633,7 @@ static inline float32 float32_from_float16(int16_t a, flag ieee, float32 f_val; f_val = float16_to_float32((float16)a, ieee, status); - f_val = float32_maybe_silence_nan(f_val); + f_val = float32_maybe_silence_nan(f_val, status); return a < 0 ? (f_val | (1 << 31)) : f_val; } @@ -1643,7 +1643,7 @@ static inline float64 float64_from_float32(int32_t a, float_status *status) float64 f_val; f_val = float32_to_float64((float64)a, status); - f_val = float64_maybe_silence_nan(f_val); + f_val = float64_maybe_silence_nan(f_val, status); return a < 0 ? (f_val | (1ULL << 63)) : f_val; } @@ -1789,7 +1789,7 @@ static inline int32_t float64_to_q32(float64 a, float_status *status) c = update_msacsr(env, CLEAR_IS_INEXACT, 0); \ \ if (get_enabled_exceptions(env, c)) { \ - DEST = ((FLOAT_SNAN ## BITS >> 6) << 6) | c; \ + DEST = ((FLOAT_SNAN ## BITS(status) >> 6) << 6) | c; \ } \ } while (0) @@ -2388,7 +2388,7 @@ void helper_msa_fsne_df(CPUMIPSState *env, uint32_t df, uint32_t wd, c = update_msacsr(env, 0, IS_DENORMAL(DEST, BITS)); \ \ if (get_enabled_exceptions(env, c)) { \ - DEST = ((FLOAT_SNAN ## BITS >> 6) << 6) | c; \ + DEST = ((FLOAT_SNAN ## BITS(status) >> 6) << 6) | c; \ } \ } while (0) @@ -2524,7 +2524,7 @@ void helper_msa_fdiv_df(CPUMIPSState *env, uint32_t df, uint32_t wd, c = update_msacsr(env, 0, IS_DENORMAL(DEST, BITS)); \ \ if (get_enabled_exceptions(env, c)) { \ - DEST = ((FLOAT_SNAN ## BITS >> 6) << 6) | c; \ + DEST = ((FLOAT_SNAN ## BITS(status) >> 6) << 6) | c; \ } \ } while (0) @@ -2643,7 +2643,7 @@ void helper_msa_fexp2_df(CPUMIPSState *env, uint32_t df, uint32_t wd, c = update_msacsr(env, 0, IS_DENORMAL(DEST, BITS)); \ \ if (get_enabled_exceptions(env, c)) { \ - DEST = ((FLOAT_SNAN ## BITS >> 6) << 6) | c; \ + DEST = ((FLOAT_SNAN ## BITS(status) >> 6) << 6) | c; \ } \ } while (0) @@ -2694,7 +2694,7 @@ void helper_msa_fexdo_df(CPUMIPSState *env, uint32_t df, uint32_t wd, c = update_msacsr(env, CLEAR_FS_UNDERFLOW, 0); \ \ if (get_enabled_exceptions(env, c)) { \ - DEST = ((FLOAT_SNAN ## XBITS >> 6) << 6) | c; \ + DEST = ((FLOAT_SNAN ## XBITS(status) >> 6) << 6) | c; \ } \ } while (0) @@ -2731,9 +2731,9 @@ void helper_msa_ftq_df(CPUMIPSState *env, uint32_t df, uint32_t wd, msa_move_v(pwd, pwx); } -#define NUMBER_QNAN_PAIR(ARG1, ARG2, BITS) \ - !float ## BITS ## _is_any_nan(ARG1) \ - && float ## BITS ## _is_quiet_nan(ARG2) +#define NUMBER_QNAN_PAIR(ARG1, ARG2, BITS, STATUS) \ + !float ## BITS ## _is_any_nan(ARG1) \ + && float ## BITS ## _is_quiet_nan(ARG2, STATUS) #define MSA_FLOAT_MAXOP(DEST, OP, ARG1, ARG2, BITS) \ do { \ @@ -2745,18 +2745,18 @@ void helper_msa_ftq_df(CPUMIPSState *env, uint32_t df, uint32_t wd, c = update_msacsr(env, 0, 0); \ \ if (get_enabled_exceptions(env, c)) { \ - DEST = ((FLOAT_SNAN ## BITS >> 6) << 6) | c; \ + DEST = ((FLOAT_SNAN ## BITS(status) >> 6) << 6) | c; \ } \ } while (0) -#define FMAXMIN_A(F, G, X, _S, _T, BITS) \ +#define FMAXMIN_A(F, G, X, _S, _T, BITS, STATUS) \ do { \ uint## BITS ##_t S = _S, T = _T; \ uint## BITS ##_t as, at, xs, xt, xd; \ - if (NUMBER_QNAN_PAIR(S, T, BITS)) { \ + if (NUMBER_QNAN_PAIR(S, T, BITS, STATUS)) { \ T = S; \ } \ - else if (NUMBER_QNAN_PAIR(T, S, BITS)) { \ + else if (NUMBER_QNAN_PAIR(T, S, BITS, STATUS)) { \ S = T; \ } \ as = float## BITS ##_abs(S); \ @@ -2770,6 +2770,7 @@ void helper_msa_ftq_df(CPUMIPSState *env, uint32_t df, uint32_t wd, void helper_msa_fmin_df(CPUMIPSState *env, uint32_t df, uint32_t wd, uint32_t ws, uint32_t wt) { + float_status *status = &env->active_tc.msa_fp_status; wr_t wx, *pwx = &wx; wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); @@ -2781,9 +2782,9 @@ void helper_msa_fmin_df(CPUMIPSState *env, uint32_t df, uint32_t wd, switch (df) { case DF_WORD: for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { - if (NUMBER_QNAN_PAIR(pws->w[i], pwt->w[i], 32)) { + if (NUMBER_QNAN_PAIR(pws->w[i], pwt->w[i], 32, status)) { MSA_FLOAT_MAXOP(pwx->w[i], min, pws->w[i], pws->w[i], 32); - } else if (NUMBER_QNAN_PAIR(pwt->w[i], pws->w[i], 32)) { + } else if (NUMBER_QNAN_PAIR(pwt->w[i], pws->w[i], 32, status)) { MSA_FLOAT_MAXOP(pwx->w[i], min, pwt->w[i], pwt->w[i], 32); } else { MSA_FLOAT_MAXOP(pwx->w[i], min, pws->w[i], pwt->w[i], 32); @@ -2792,9 +2793,9 @@ void helper_msa_fmin_df(CPUMIPSState *env, uint32_t df, uint32_t wd, break; case DF_DOUBLE: for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { - if (NUMBER_QNAN_PAIR(pws->d[i], pwt->d[i], 64)) { + if (NUMBER_QNAN_PAIR(pws->d[i], pwt->d[i], 64, status)) { MSA_FLOAT_MAXOP(pwx->d[i], min, pws->d[i], pws->d[i], 64); - } else if (NUMBER_QNAN_PAIR(pwt->d[i], pws->d[i], 64)) { + } else if (NUMBER_QNAN_PAIR(pwt->d[i], pws->d[i], 64, status)) { MSA_FLOAT_MAXOP(pwx->d[i], min, pwt->d[i], pwt->d[i], 64); } else { MSA_FLOAT_MAXOP(pwx->d[i], min, pws->d[i], pwt->d[i], 64); @@ -2813,6 +2814,7 @@ void helper_msa_fmin_df(CPUMIPSState *env, uint32_t df, uint32_t wd, void helper_msa_fmin_a_df(CPUMIPSState *env, uint32_t df, uint32_t wd, uint32_t ws, uint32_t wt) { + float_status *status = &env->active_tc.msa_fp_status; wr_t wx, *pwx = &wx; wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); @@ -2824,12 +2826,12 @@ void helper_msa_fmin_a_df(CPUMIPSState *env, uint32_t df, uint32_t wd, switch (df) { case DF_WORD: for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { - FMAXMIN_A(min, max, pwx->w[i], pws->w[i], pwt->w[i], 32); + FMAXMIN_A(min, max, pwx->w[i], pws->w[i], pwt->w[i], 32, status); } break; case DF_DOUBLE: for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { - FMAXMIN_A(min, max, pwx->d[i], pws->d[i], pwt->d[i], 64); + FMAXMIN_A(min, max, pwx->d[i], pws->d[i], pwt->d[i], 64, status); } break; default: @@ -2844,6 +2846,7 @@ void helper_msa_fmin_a_df(CPUMIPSState *env, uint32_t df, uint32_t wd, void helper_msa_fmax_df(CPUMIPSState *env, uint32_t df, uint32_t wd, uint32_t ws, uint32_t wt) { + float_status *status = &env->active_tc.msa_fp_status; wr_t wx, *pwx = &wx; wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); @@ -2855,9 +2858,9 @@ void helper_msa_fmax_df(CPUMIPSState *env, uint32_t df, uint32_t wd, switch (df) { case DF_WORD: for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { - if (NUMBER_QNAN_PAIR(pws->w[i], pwt->w[i], 32)) { + if (NUMBER_QNAN_PAIR(pws->w[i], pwt->w[i], 32, status)) { MSA_FLOAT_MAXOP(pwx->w[i], max, pws->w[i], pws->w[i], 32); - } else if (NUMBER_QNAN_PAIR(pwt->w[i], pws->w[i], 32)) { + } else if (NUMBER_QNAN_PAIR(pwt->w[i], pws->w[i], 32, status)) { MSA_FLOAT_MAXOP(pwx->w[i], max, pwt->w[i], pwt->w[i], 32); } else { MSA_FLOAT_MAXOP(pwx->w[i], max, pws->w[i], pwt->w[i], 32); @@ -2866,9 +2869,9 @@ void helper_msa_fmax_df(CPUMIPSState *env, uint32_t df, uint32_t wd, break; case DF_DOUBLE: for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { - if (NUMBER_QNAN_PAIR(pws->d[i], pwt->d[i], 64)) { + if (NUMBER_QNAN_PAIR(pws->d[i], pwt->d[i], 64, status)) { MSA_FLOAT_MAXOP(pwx->d[i], max, pws->d[i], pws->d[i], 64); - } else if (NUMBER_QNAN_PAIR(pwt->d[i], pws->d[i], 64)) { + } else if (NUMBER_QNAN_PAIR(pwt->d[i], pws->d[i], 64, status)) { MSA_FLOAT_MAXOP(pwx->d[i], max, pwt->d[i], pwt->d[i], 64); } else { MSA_FLOAT_MAXOP(pwx->d[i], max, pws->d[i], pwt->d[i], 64); @@ -2887,6 +2890,7 @@ void helper_msa_fmax_df(CPUMIPSState *env, uint32_t df, uint32_t wd, void helper_msa_fmax_a_df(CPUMIPSState *env, uint32_t df, uint32_t wd, uint32_t ws, uint32_t wt) { + float_status *status = &env->active_tc.msa_fp_status; wr_t wx, *pwx = &wx; wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); @@ -2898,12 +2902,12 @@ void helper_msa_fmax_a_df(CPUMIPSState *env, uint32_t df, uint32_t wd, switch (df) { case DF_WORD: for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { - FMAXMIN_A(max, min, pwx->w[i], pws->w[i], pwt->w[i], 32); + FMAXMIN_A(max, min, pwx->w[i], pws->w[i], pwt->w[i], 32, status); } break; case DF_DOUBLE: for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { - FMAXMIN_A(max, min, pwx->d[i], pws->d[i], pwt->d[i], 64); + FMAXMIN_A(max, min, pwx->d[i], pws->d[i], pwt->d[i], 64, status); } break; default: @@ -2918,16 +2922,18 @@ void helper_msa_fmax_a_df(CPUMIPSState *env, uint32_t df, uint32_t wd, void helper_msa_fclass_df(CPUMIPSState *env, uint32_t df, uint32_t wd, uint32_t ws) { + float_status* status = &env->active_tc.msa_fp_status; + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); if (df == DF_WORD) { - pwd->w[0] = helper_float_class_s(pws->w[0]); - pwd->w[1] = helper_float_class_s(pws->w[1]); - pwd->w[2] = helper_float_class_s(pws->w[2]); - pwd->w[3] = helper_float_class_s(pws->w[3]); + pwd->w[0] = float_class_s(pws->w[0], status); + pwd->w[1] = float_class_s(pws->w[1], status); + pwd->w[2] = float_class_s(pws->w[2], status); + pwd->w[3] = float_class_s(pws->w[3], status); } else { - pwd->d[0] = helper_float_class_d(pws->d[0]); - pwd->d[1] = helper_float_class_d(pws->d[1]); + pwd->d[0] = float_class_d(pws->d[0], status); + pwd->d[1] = float_class_d(pws->d[1], status); } } @@ -2941,7 +2947,7 @@ void helper_msa_fclass_df(CPUMIPSState *env, uint32_t df, c = update_msacsr(env, CLEAR_FS_UNDERFLOW, 0); \ \ if (get_enabled_exceptions(env, c)) { \ - DEST = ((FLOAT_SNAN ## BITS >> 6) << 6) | c; \ + DEST = ((FLOAT_SNAN ## BITS(status) >> 6) << 6) | c; \ } else if (float ## BITS ## _is_any_nan(ARG)) { \ DEST = 0; \ } \ @@ -3045,12 +3051,12 @@ void helper_msa_fsqrt_df(CPUMIPSState *env, uint32_t df, uint32_t wd, set_float_exception_flags(0, status); \ DEST = float ## BITS ## _ ## div(FLOAT_ONE ## BITS, ARG, status); \ c = update_msacsr(env, float ## BITS ## _is_infinity(ARG) || \ - float ## BITS ## _is_quiet_nan(DEST) ? \ + float ## BITS ## _is_quiet_nan(DEST, status) ? \ 0 : RECIPROCAL_INEXACT, \ IS_DENORMAL(DEST, BITS)); \ \ if (get_enabled_exceptions(env, c)) { \ - DEST = ((FLOAT_SNAN ## BITS >> 6) << 6) | c; \ + DEST = ((FLOAT_SNAN ## BITS(status) >> 6) << 6) | c; \ } \ } while (0) @@ -3166,7 +3172,7 @@ void helper_msa_frint_df(CPUMIPSState *env, uint32_t df, uint32_t wd, c = update_msacsr(env, 0, IS_DENORMAL(DEST, BITS)); \ \ if (get_enabled_exceptions(env, c)) { \ - DEST = ((FLOAT_SNAN ## BITS >> 6) << 6) | c; \ + DEST = ((FLOAT_SNAN ## BITS(status) >> 6) << 6) | c; \ } \ } while (0) diff --git a/target-mips/op_helper.c b/target-mips/op_helper.c index 1ae1dda0af..69daade24e 100644 --- a/target-mips/op_helper.c +++ b/target-mips/op_helper.c @@ -2447,6 +2447,7 @@ void mips_cpu_unassigned_access(CPUState *cs, hwaddr addr, #define FLOAT_TWO32 make_float32(1 << 30) #define FLOAT_TWO64 make_float64(1ULL << 62) + #define FP_TO_INT32_OVERFLOW 0x7fffffff #define FP_TO_INT64_OVERFLOW 0x7fffffffffffffffULL @@ -2574,21 +2575,13 @@ void helper_ctc1(CPUMIPSState *env, target_ulong arg1, uint32_t fs, uint32_t rt) ((arg1 & 0x4) << 22); break; case 31: - if (env->insn_flags & ISA_MIPS32R6) { - uint32_t mask = 0xfefc0000; - env->active_fpu.fcr31 = (arg1 & ~mask) | - (env->active_fpu.fcr31 & mask); - } else if (!(arg1 & 0x007c0000)) { - env->active_fpu.fcr31 = arg1; - } + env->active_fpu.fcr31 = (arg1 & env->active_fpu.fcr31_rw_bitmask) | + (env->active_fpu.fcr31 & ~(env->active_fpu.fcr31_rw_bitmask)); break; default: return; } - /* set rounding mode */ - restore_rounding_mode(env); - /* set flush-to-zero mode */ - restore_flush_mode(env); + restore_fp_status(env); set_float_exception_flags(0, &env->active_fpu.fp_status); if ((GET_FP_ENABLE(env->active_fpu.fcr31) | 0x20) & GET_FP_CAUSE(env->active_fpu.fcr31)) do_raise_exception(env, EXCP_FPE, GETPC()); @@ -2659,7 +2652,7 @@ uint64_t helper_float_cvtd_s(CPUMIPSState *env, uint32_t fst0) uint64_t fdt2; fdt2 = float32_to_float64(fst0, &env->active_fpu.fp_status); - fdt2 = float64_maybe_silence_nan(fdt2); + fdt2 = float64_maybe_silence_nan(fdt2, &env->active_fpu.fp_status); update_fcr31(env, GETPC()); return fdt2; } @@ -2682,7 +2675,7 @@ uint64_t helper_float_cvtd_l(CPUMIPSState *env, uint64_t dt0) return fdt2; } -uint64_t helper_float_cvtl_d(CPUMIPSState *env, uint64_t fdt0) +uint64_t helper_float_cvt_l_d(CPUMIPSState *env, uint64_t fdt0) { uint64_t dt2; @@ -2695,7 +2688,7 @@ uint64_t helper_float_cvtl_d(CPUMIPSState *env, uint64_t fdt0) return dt2; } -uint64_t helper_float_cvtl_s(CPUMIPSState *env, uint32_t fst0) +uint64_t helper_float_cvt_l_s(CPUMIPSState *env, uint32_t fst0) { uint64_t dt2; @@ -2749,7 +2742,7 @@ uint32_t helper_float_cvts_d(CPUMIPSState *env, uint64_t fdt0) uint32_t fst2; fst2 = float64_to_float32(fdt0, &env->active_fpu.fp_status); - fst2 = float32_maybe_silence_nan(fst2); + fst2 = float32_maybe_silence_nan(fst2, &env->active_fpu.fp_status); update_fcr31(env, GETPC()); return fst2; } @@ -2790,7 +2783,7 @@ uint32_t helper_float_cvts_pu(CPUMIPSState *env, uint32_t wth0) return wt2; } -uint32_t helper_float_cvtw_s(CPUMIPSState *env, uint32_t fst0) +uint32_t helper_float_cvt_w_s(CPUMIPSState *env, uint32_t fst0) { uint32_t wt2; @@ -2803,7 +2796,7 @@ uint32_t helper_float_cvtw_s(CPUMIPSState *env, uint32_t fst0) return wt2; } -uint32_t helper_float_cvtw_d(CPUMIPSState *env, uint64_t fdt0) +uint32_t helper_float_cvt_w_d(CPUMIPSState *env, uint64_t fdt0) { uint32_t wt2; @@ -2816,7 +2809,7 @@ uint32_t helper_float_cvtw_d(CPUMIPSState *env, uint64_t fdt0) return wt2; } -uint64_t helper_float_roundl_d(CPUMIPSState *env, uint64_t fdt0) +uint64_t helper_float_round_l_d(CPUMIPSState *env, uint64_t fdt0) { uint64_t dt2; @@ -2831,7 +2824,7 @@ uint64_t helper_float_roundl_d(CPUMIPSState *env, uint64_t fdt0) return dt2; } -uint64_t helper_float_roundl_s(CPUMIPSState *env, uint32_t fst0) +uint64_t helper_float_round_l_s(CPUMIPSState *env, uint32_t fst0) { uint64_t dt2; @@ -2846,7 +2839,7 @@ uint64_t helper_float_roundl_s(CPUMIPSState *env, uint32_t fst0) return dt2; } -uint32_t helper_float_roundw_d(CPUMIPSState *env, uint64_t fdt0) +uint32_t helper_float_round_w_d(CPUMIPSState *env, uint64_t fdt0) { uint32_t wt2; @@ -2861,7 +2854,7 @@ uint32_t helper_float_roundw_d(CPUMIPSState *env, uint64_t fdt0) return wt2; } -uint32_t helper_float_roundw_s(CPUMIPSState *env, uint32_t fst0) +uint32_t helper_float_round_w_s(CPUMIPSState *env, uint32_t fst0) { uint32_t wt2; @@ -2876,7 +2869,7 @@ uint32_t helper_float_roundw_s(CPUMIPSState *env, uint32_t fst0) return wt2; } -uint64_t helper_float_truncl_d(CPUMIPSState *env, uint64_t fdt0) +uint64_t helper_float_trunc_l_d(CPUMIPSState *env, uint64_t fdt0) { uint64_t dt2; @@ -2889,7 +2882,7 @@ uint64_t helper_float_truncl_d(CPUMIPSState *env, uint64_t fdt0) return dt2; } -uint64_t helper_float_truncl_s(CPUMIPSState *env, uint32_t fst0) +uint64_t helper_float_trunc_l_s(CPUMIPSState *env, uint32_t fst0) { uint64_t dt2; @@ -2902,7 +2895,7 @@ uint64_t helper_float_truncl_s(CPUMIPSState *env, uint32_t fst0) return dt2; } -uint32_t helper_float_truncw_d(CPUMIPSState *env, uint64_t fdt0) +uint32_t helper_float_trunc_w_d(CPUMIPSState *env, uint64_t fdt0) { uint32_t wt2; @@ -2915,7 +2908,7 @@ uint32_t helper_float_truncw_d(CPUMIPSState *env, uint64_t fdt0) return wt2; } -uint32_t helper_float_truncw_s(CPUMIPSState *env, uint32_t fst0) +uint32_t helper_float_trunc_w_s(CPUMIPSState *env, uint32_t fst0) { uint32_t wt2; @@ -2928,7 +2921,7 @@ uint32_t helper_float_truncw_s(CPUMIPSState *env, uint32_t fst0) return wt2; } -uint64_t helper_float_ceill_d(CPUMIPSState *env, uint64_t fdt0) +uint64_t helper_float_ceil_l_d(CPUMIPSState *env, uint64_t fdt0) { uint64_t dt2; @@ -2943,7 +2936,7 @@ uint64_t helper_float_ceill_d(CPUMIPSState *env, uint64_t fdt0) return dt2; } -uint64_t helper_float_ceill_s(CPUMIPSState *env, uint32_t fst0) +uint64_t helper_float_ceil_l_s(CPUMIPSState *env, uint32_t fst0) { uint64_t dt2; @@ -2958,7 +2951,7 @@ uint64_t helper_float_ceill_s(CPUMIPSState *env, uint32_t fst0) return dt2; } -uint32_t helper_float_ceilw_d(CPUMIPSState *env, uint64_t fdt0) +uint32_t helper_float_ceil_w_d(CPUMIPSState *env, uint64_t fdt0) { uint32_t wt2; @@ -2973,7 +2966,7 @@ uint32_t helper_float_ceilw_d(CPUMIPSState *env, uint64_t fdt0) return wt2; } -uint32_t helper_float_ceilw_s(CPUMIPSState *env, uint32_t fst0) +uint32_t helper_float_ceil_w_s(CPUMIPSState *env, uint32_t fst0) { uint32_t wt2; @@ -2988,7 +2981,7 @@ uint32_t helper_float_ceilw_s(CPUMIPSState *env, uint32_t fst0) return wt2; } -uint64_t helper_float_floorl_d(CPUMIPSState *env, uint64_t fdt0) +uint64_t helper_float_floor_l_d(CPUMIPSState *env, uint64_t fdt0) { uint64_t dt2; @@ -3003,7 +2996,7 @@ uint64_t helper_float_floorl_d(CPUMIPSState *env, uint64_t fdt0) return dt2; } -uint64_t helper_float_floorl_s(CPUMIPSState *env, uint32_t fst0) +uint64_t helper_float_floor_l_s(CPUMIPSState *env, uint32_t fst0) { uint64_t dt2; @@ -3018,7 +3011,7 @@ uint64_t helper_float_floorl_s(CPUMIPSState *env, uint32_t fst0) return dt2; } -uint32_t helper_float_floorw_d(CPUMIPSState *env, uint64_t fdt0) +uint32_t helper_float_floor_w_d(CPUMIPSState *env, uint64_t fdt0) { uint32_t wt2; @@ -3033,7 +3026,7 @@ uint32_t helper_float_floorw_d(CPUMIPSState *env, uint64_t fdt0) return wt2; } -uint32_t helper_float_floorw_s(CPUMIPSState *env, uint32_t fst0) +uint32_t helper_float_floor_w_s(CPUMIPSState *env, uint32_t fst0) { uint32_t wt2; @@ -3048,6 +3041,334 @@ uint32_t helper_float_floorw_s(CPUMIPSState *env, uint32_t fst0) return wt2; } +uint64_t helper_float_cvt_2008_l_d(CPUMIPSState *env, uint64_t fdt0) +{ + uint64_t dt2; + + dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status); + if (get_float_exception_flags(&env->active_fpu.fp_status) + & float_flag_invalid) { + if (float64_is_any_nan(fdt0)) { + dt2 = 0; + } + } + update_fcr31(env, GETPC()); + return dt2; +} + +uint64_t helper_float_cvt_2008_l_s(CPUMIPSState *env, uint32_t fst0) +{ + uint64_t dt2; + + dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status); + if (get_float_exception_flags(&env->active_fpu.fp_status) + & float_flag_invalid) { + if (float32_is_any_nan(fst0)) { + dt2 = 0; + } + } + update_fcr31(env, GETPC()); + return dt2; +} + +uint32_t helper_float_cvt_2008_w_d(CPUMIPSState *env, uint64_t fdt0) +{ + uint32_t wt2; + + wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status); + if (get_float_exception_flags(&env->active_fpu.fp_status) + & float_flag_invalid) { + if (float64_is_any_nan(fdt0)) { + wt2 = 0; + } + } + update_fcr31(env, GETPC()); + return wt2; +} + +uint32_t helper_float_cvt_2008_w_s(CPUMIPSState *env, uint32_t fst0) +{ + uint32_t wt2; + + wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status); + if (get_float_exception_flags(&env->active_fpu.fp_status) + & float_flag_invalid) { + if (float32_is_any_nan(fst0)) { + wt2 = 0; + } + } + update_fcr31(env, GETPC()); + return wt2; +} + +uint64_t helper_float_round_2008_l_d(CPUMIPSState *env, uint64_t fdt0) +{ + uint64_t dt2; + + set_float_rounding_mode(float_round_nearest_even, + &env->active_fpu.fp_status); + dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status); + restore_rounding_mode(env); + if (get_float_exception_flags(&env->active_fpu.fp_status) + & float_flag_invalid) { + if (float64_is_any_nan(fdt0)) { + dt2 = 0; + } + } + update_fcr31(env, GETPC()); + return dt2; +} + +uint64_t helper_float_round_2008_l_s(CPUMIPSState *env, uint32_t fst0) +{ + uint64_t dt2; + + set_float_rounding_mode(float_round_nearest_even, + &env->active_fpu.fp_status); + dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status); + restore_rounding_mode(env); + if (get_float_exception_flags(&env->active_fpu.fp_status) + & float_flag_invalid) { + if (float32_is_any_nan(fst0)) { + dt2 = 0; + } + } + update_fcr31(env, GETPC()); + return dt2; +} + +uint32_t helper_float_round_2008_w_d(CPUMIPSState *env, uint64_t fdt0) +{ + uint32_t wt2; + + set_float_rounding_mode(float_round_nearest_even, + &env->active_fpu.fp_status); + wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status); + restore_rounding_mode(env); + if (get_float_exception_flags(&env->active_fpu.fp_status) + & float_flag_invalid) { + if (float64_is_any_nan(fdt0)) { + wt2 = 0; + } + } + update_fcr31(env, GETPC()); + return wt2; +} + +uint32_t helper_float_round_2008_w_s(CPUMIPSState *env, uint32_t fst0) +{ + uint32_t wt2; + + set_float_rounding_mode(float_round_nearest_even, + &env->active_fpu.fp_status); + wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status); + restore_rounding_mode(env); + if (get_float_exception_flags(&env->active_fpu.fp_status) + & float_flag_invalid) { + if (float32_is_any_nan(fst0)) { + wt2 = 0; + } + } + update_fcr31(env, GETPC()); + return wt2; +} + +uint64_t helper_float_trunc_2008_l_d(CPUMIPSState *env, uint64_t fdt0) +{ + uint64_t dt2; + + dt2 = float64_to_int64_round_to_zero(fdt0, &env->active_fpu.fp_status); + if (get_float_exception_flags(&env->active_fpu.fp_status) + & float_flag_invalid) { + if (float64_is_any_nan(fdt0)) { + dt2 = 0; + } + } + update_fcr31(env, GETPC()); + return dt2; +} + +uint64_t helper_float_trunc_2008_l_s(CPUMIPSState *env, uint32_t fst0) +{ + uint64_t dt2; + + dt2 = float32_to_int64_round_to_zero(fst0, &env->active_fpu.fp_status); + if (get_float_exception_flags(&env->active_fpu.fp_status) + & float_flag_invalid) { + if (float32_is_any_nan(fst0)) { + dt2 = 0; + } + } + update_fcr31(env, GETPC()); + return dt2; +} + +uint32_t helper_float_trunc_2008_w_d(CPUMIPSState *env, uint64_t fdt0) +{ + uint32_t wt2; + + wt2 = float64_to_int32_round_to_zero(fdt0, &env->active_fpu.fp_status); + if (get_float_exception_flags(&env->active_fpu.fp_status) + & float_flag_invalid) { + if (float64_is_any_nan(fdt0)) { + wt2 = 0; + } + } + update_fcr31(env, GETPC()); + return wt2; +} + +uint32_t helper_float_trunc_2008_w_s(CPUMIPSState *env, uint32_t fst0) +{ + uint32_t wt2; + + wt2 = float32_to_int32_round_to_zero(fst0, &env->active_fpu.fp_status); + if (get_float_exception_flags(&env->active_fpu.fp_status) + & float_flag_invalid) { + if (float32_is_any_nan(fst0)) { + wt2 = 0; + } + } + update_fcr31(env, GETPC()); + return wt2; +} + +uint64_t helper_float_ceil_2008_l_d(CPUMIPSState *env, uint64_t fdt0) +{ + uint64_t dt2; + + set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status); + dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status); + restore_rounding_mode(env); + if (get_float_exception_flags(&env->active_fpu.fp_status) + & float_flag_invalid) { + if (float64_is_any_nan(fdt0)) { + dt2 = 0; + } + } + update_fcr31(env, GETPC()); + return dt2; +} + +uint64_t helper_float_ceil_2008_l_s(CPUMIPSState *env, uint32_t fst0) +{ + uint64_t dt2; + + set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status); + dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status); + restore_rounding_mode(env); + if (get_float_exception_flags(&env->active_fpu.fp_status) + & float_flag_invalid) { + if (float32_is_any_nan(fst0)) { + dt2 = 0; + } + } + update_fcr31(env, GETPC()); + return dt2; +} + +uint32_t helper_float_ceil_2008_w_d(CPUMIPSState *env, uint64_t fdt0) +{ + uint32_t wt2; + + set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status); + wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status); + restore_rounding_mode(env); + if (get_float_exception_flags(&env->active_fpu.fp_status) + & float_flag_invalid) { + if (float64_is_any_nan(fdt0)) { + wt2 = 0; + } + } + update_fcr31(env, GETPC()); + return wt2; +} + +uint32_t helper_float_ceil_2008_w_s(CPUMIPSState *env, uint32_t fst0) +{ + uint32_t wt2; + + set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status); + wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status); + restore_rounding_mode(env); + if (get_float_exception_flags(&env->active_fpu.fp_status) + & float_flag_invalid) { + if (float32_is_any_nan(fst0)) { + wt2 = 0; + } + } + update_fcr31(env, GETPC()); + return wt2; +} + +uint64_t helper_float_floor_2008_l_d(CPUMIPSState *env, uint64_t fdt0) +{ + uint64_t dt2; + + set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status); + dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status); + restore_rounding_mode(env); + if (get_float_exception_flags(&env->active_fpu.fp_status) + & float_flag_invalid) { + if (float64_is_any_nan(fdt0)) { + dt2 = 0; + } + } + update_fcr31(env, GETPC()); + return dt2; +} + +uint64_t helper_float_floor_2008_l_s(CPUMIPSState *env, uint32_t fst0) +{ + uint64_t dt2; + + set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status); + dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status); + restore_rounding_mode(env); + if (get_float_exception_flags(&env->active_fpu.fp_status) + & float_flag_invalid) { + if (float32_is_any_nan(fst0)) { + dt2 = 0; + } + } + update_fcr31(env, GETPC()); + return dt2; +} + +uint32_t helper_float_floor_2008_w_d(CPUMIPSState *env, uint64_t fdt0) +{ + uint32_t wt2; + + set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status); + wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status); + restore_rounding_mode(env); + if (get_float_exception_flags(&env->active_fpu.fp_status) + & float_flag_invalid) { + if (float64_is_any_nan(fdt0)) { + wt2 = 0; + } + } + update_fcr31(env, GETPC()); + return wt2; +} + +uint32_t helper_float_floor_2008_w_s(CPUMIPSState *env, uint32_t fst0) +{ + uint32_t wt2; + + set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status); + wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status); + restore_rounding_mode(env); + if (get_float_exception_flags(&env->active_fpu.fp_status) + & float_flag_invalid) { + if (float32_is_any_nan(fst0)) { + wt2 = 0; + } + } + update_fcr31(env, GETPC()); + return wt2; +} + /* unary operations, not modifying fp status */ #define FLOAT_UNOP(name) \ uint64_t helper_float_ ## name ## _d(uint64_t fdt0) \ @@ -3199,11 +3520,12 @@ FLOAT_RINT(rint_d, 64) #define FLOAT_CLASS_POSITIVE_ZERO 0x200 #define FLOAT_CLASS(name, bits) \ -uint ## bits ## _t helper_float_ ## name (uint ## bits ## _t arg) \ +uint ## bits ## _t float_ ## name (uint ## bits ## _t arg, \ + float_status *status) \ { \ - if (float ## bits ## _is_signaling_nan(arg)) { \ + if (float ## bits ## _is_signaling_nan(arg, status)) { \ return FLOAT_CLASS_SIGNALING_NAN; \ - } else if (float ## bits ## _is_quiet_nan(arg)) { \ + } else if (float ## bits ## _is_quiet_nan(arg, status)) { \ return FLOAT_CLASS_QUIET_NAN; \ } else if (float ## bits ## _is_neg(arg)) { \ if (float ## bits ## _is_infinity(arg)) { \ @@ -3226,6 +3548,12 @@ uint ## bits ## _t helper_float_ ## name (uint ## bits ## _t arg) \ return FLOAT_CLASS_POSITIVE_NORMAL; \ } \ } \ +} \ + \ +uint ## bits ## _t helper_float_ ## name (CPUMIPSState *env, \ + uint ## bits ## _t arg) \ +{ \ + return float_ ## name(arg, &env->active_fpu.fp_status); \ } FLOAT_CLASS(class_s, 32) diff --git a/target-mips/translate.c b/target-mips/translate.c index aaa1d02683..cc321e9cce 100644 --- a/target-mips/translate.c +++ b/target-mips/translate.c @@ -1435,6 +1435,8 @@ typedef struct DisasContext { bool vp; bool cmgcr; bool mrp; + bool nan2008; + bool abs2008; } DisasContext; enum { @@ -8890,7 +8892,11 @@ static void gen_farith (DisasContext *ctx, enum fopcode op1, TCGv_i32 fp0 = tcg_temp_new_i32(); gen_load_fpr32(ctx, fp0, fs); - gen_helper_float_abs_s(fp0, fp0); + if (ctx->abs2008) { + tcg_gen_andi_i32(fp0, fp0, 0x7fffffffUL); + } else { + gen_helper_float_abs_s(fp0, fp0); + } gen_store_fpr32(ctx, fp0, fd); tcg_temp_free_i32(fp0); } @@ -8909,7 +8915,11 @@ static void gen_farith (DisasContext *ctx, enum fopcode op1, TCGv_i32 fp0 = tcg_temp_new_i32(); gen_load_fpr32(ctx, fp0, fs); - gen_helper_float_chs_s(fp0, fp0); + if (ctx->abs2008) { + tcg_gen_xori_i32(fp0, fp0, 1UL << 31); + } else { + gen_helper_float_chs_s(fp0, fp0); + } gen_store_fpr32(ctx, fp0, fd); tcg_temp_free_i32(fp0); } @@ -8921,7 +8931,11 @@ static void gen_farith (DisasContext *ctx, enum fopcode op1, TCGv_i64 fp64 = tcg_temp_new_i64(); gen_load_fpr32(ctx, fp32, fs); - gen_helper_float_roundl_s(fp64, cpu_env, fp32); + if (ctx->nan2008) { + gen_helper_float_round_2008_l_s(fp64, cpu_env, fp32); + } else { + gen_helper_float_round_l_s(fp64, cpu_env, fp32); + } tcg_temp_free_i32(fp32); gen_store_fpr64(ctx, fp64, fd); tcg_temp_free_i64(fp64); @@ -8934,7 +8948,11 @@ static void gen_farith (DisasContext *ctx, enum fopcode op1, TCGv_i64 fp64 = tcg_temp_new_i64(); gen_load_fpr32(ctx, fp32, fs); - gen_helper_float_truncl_s(fp64, cpu_env, fp32); + if (ctx->nan2008) { + gen_helper_float_trunc_2008_l_s(fp64, cpu_env, fp32); + } else { + gen_helper_float_trunc_l_s(fp64, cpu_env, fp32); + } tcg_temp_free_i32(fp32); gen_store_fpr64(ctx, fp64, fd); tcg_temp_free_i64(fp64); @@ -8947,7 +8965,11 @@ static void gen_farith (DisasContext *ctx, enum fopcode op1, TCGv_i64 fp64 = tcg_temp_new_i64(); gen_load_fpr32(ctx, fp32, fs); - gen_helper_float_ceill_s(fp64, cpu_env, fp32); + if (ctx->nan2008) { + gen_helper_float_ceil_2008_l_s(fp64, cpu_env, fp32); + } else { + gen_helper_float_ceil_l_s(fp64, cpu_env, fp32); + } tcg_temp_free_i32(fp32); gen_store_fpr64(ctx, fp64, fd); tcg_temp_free_i64(fp64); @@ -8960,7 +8982,11 @@ static void gen_farith (DisasContext *ctx, enum fopcode op1, TCGv_i64 fp64 = tcg_temp_new_i64(); gen_load_fpr32(ctx, fp32, fs); - gen_helper_float_floorl_s(fp64, cpu_env, fp32); + if (ctx->nan2008) { + gen_helper_float_floor_2008_l_s(fp64, cpu_env, fp32); + } else { + gen_helper_float_floor_l_s(fp64, cpu_env, fp32); + } tcg_temp_free_i32(fp32); gen_store_fpr64(ctx, fp64, fd); tcg_temp_free_i64(fp64); @@ -8971,7 +8997,11 @@ static void gen_farith (DisasContext *ctx, enum fopcode op1, TCGv_i32 fp0 = tcg_temp_new_i32(); gen_load_fpr32(ctx, fp0, fs); - gen_helper_float_roundw_s(fp0, cpu_env, fp0); + if (ctx->nan2008) { + gen_helper_float_round_2008_w_s(fp0, cpu_env, fp0); + } else { + gen_helper_float_round_w_s(fp0, cpu_env, fp0); + } gen_store_fpr32(ctx, fp0, fd); tcg_temp_free_i32(fp0); } @@ -8981,7 +9011,11 @@ static void gen_farith (DisasContext *ctx, enum fopcode op1, TCGv_i32 fp0 = tcg_temp_new_i32(); gen_load_fpr32(ctx, fp0, fs); - gen_helper_float_truncw_s(fp0, cpu_env, fp0); + if (ctx->nan2008) { + gen_helper_float_trunc_2008_w_s(fp0, cpu_env, fp0); + } else { + gen_helper_float_trunc_w_s(fp0, cpu_env, fp0); + } gen_store_fpr32(ctx, fp0, fd); tcg_temp_free_i32(fp0); } @@ -8991,7 +9025,11 @@ static void gen_farith (DisasContext *ctx, enum fopcode op1, TCGv_i32 fp0 = tcg_temp_new_i32(); gen_load_fpr32(ctx, fp0, fs); - gen_helper_float_ceilw_s(fp0, cpu_env, fp0); + if (ctx->nan2008) { + gen_helper_float_ceil_2008_w_s(fp0, cpu_env, fp0); + } else { + gen_helper_float_ceil_w_s(fp0, cpu_env, fp0); + } gen_store_fpr32(ctx, fp0, fd); tcg_temp_free_i32(fp0); } @@ -9001,7 +9039,11 @@ static void gen_farith (DisasContext *ctx, enum fopcode op1, TCGv_i32 fp0 = tcg_temp_new_i32(); gen_load_fpr32(ctx, fp0, fs); - gen_helper_float_floorw_s(fp0, cpu_env, fp0); + if (ctx->nan2008) { + gen_helper_float_floor_2008_w_s(fp0, cpu_env, fp0); + } else { + gen_helper_float_floor_w_s(fp0, cpu_env, fp0); + } gen_store_fpr32(ctx, fp0, fd); tcg_temp_free_i32(fp0); } @@ -9121,7 +9163,7 @@ static void gen_farith (DisasContext *ctx, enum fopcode op1, { TCGv_i32 fp0 = tcg_temp_new_i32(); gen_load_fpr32(ctx, fp0, fs); - gen_helper_float_class_s(fp0, fp0); + gen_helper_float_class_s(fp0, cpu_env, fp0); gen_store_fpr32(ctx, fp0, fd); tcg_temp_free_i32(fp0); } @@ -9250,7 +9292,11 @@ static void gen_farith (DisasContext *ctx, enum fopcode op1, TCGv_i32 fp0 = tcg_temp_new_i32(); gen_load_fpr32(ctx, fp0, fs); - gen_helper_float_cvtw_s(fp0, cpu_env, fp0); + if (ctx->nan2008) { + gen_helper_float_cvt_2008_w_s(fp0, cpu_env, fp0); + } else { + gen_helper_float_cvt_w_s(fp0, cpu_env, fp0); + } gen_store_fpr32(ctx, fp0, fd); tcg_temp_free_i32(fp0); } @@ -9262,7 +9308,11 @@ static void gen_farith (DisasContext *ctx, enum fopcode op1, TCGv_i64 fp64 = tcg_temp_new_i64(); gen_load_fpr32(ctx, fp32, fs); - gen_helper_float_cvtl_s(fp64, cpu_env, fp32); + if (ctx->nan2008) { + gen_helper_float_cvt_2008_l_s(fp64, cpu_env, fp32); + } else { + gen_helper_float_cvt_l_s(fp64, cpu_env, fp32); + } tcg_temp_free_i32(fp32); gen_store_fpr64(ctx, fp64, fd); tcg_temp_free_i64(fp64); @@ -9380,7 +9430,11 @@ static void gen_farith (DisasContext *ctx, enum fopcode op1, TCGv_i64 fp0 = tcg_temp_new_i64(); gen_load_fpr64(ctx, fp0, fs); - gen_helper_float_abs_d(fp0, fp0); + if (ctx->abs2008) { + tcg_gen_andi_i64(fp0, fp0, 0x7fffffffffffffffULL); + } else { + gen_helper_float_abs_d(fp0, fp0); + } gen_store_fpr64(ctx, fp0, fd); tcg_temp_free_i64(fp0); } @@ -9401,7 +9455,11 @@ static void gen_farith (DisasContext *ctx, enum fopcode op1, TCGv_i64 fp0 = tcg_temp_new_i64(); gen_load_fpr64(ctx, fp0, fs); - gen_helper_float_chs_d(fp0, fp0); + if (ctx->abs2008) { + tcg_gen_xori_i64(fp0, fp0, 1ULL << 63); + } else { + gen_helper_float_chs_d(fp0, fp0); + } gen_store_fpr64(ctx, fp0, fd); tcg_temp_free_i64(fp0); } @@ -9412,7 +9470,11 @@ static void gen_farith (DisasContext *ctx, enum fopcode op1, TCGv_i64 fp0 = tcg_temp_new_i64(); gen_load_fpr64(ctx, fp0, fs); - gen_helper_float_roundl_d(fp0, cpu_env, fp0); + if (ctx->nan2008) { + gen_helper_float_round_2008_l_d(fp0, cpu_env, fp0); + } else { + gen_helper_float_round_l_d(fp0, cpu_env, fp0); + } gen_store_fpr64(ctx, fp0, fd); tcg_temp_free_i64(fp0); } @@ -9423,7 +9485,11 @@ static void gen_farith (DisasContext *ctx, enum fopcode op1, TCGv_i64 fp0 = tcg_temp_new_i64(); gen_load_fpr64(ctx, fp0, fs); - gen_helper_float_truncl_d(fp0, cpu_env, fp0); + if (ctx->nan2008) { + gen_helper_float_trunc_2008_l_d(fp0, cpu_env, fp0); + } else { + gen_helper_float_trunc_l_d(fp0, cpu_env, fp0); + } gen_store_fpr64(ctx, fp0, fd); tcg_temp_free_i64(fp0); } @@ -9434,7 +9500,11 @@ static void gen_farith (DisasContext *ctx, enum fopcode op1, TCGv_i64 fp0 = tcg_temp_new_i64(); gen_load_fpr64(ctx, fp0, fs); - gen_helper_float_ceill_d(fp0, cpu_env, fp0); + if (ctx->nan2008) { + gen_helper_float_ceil_2008_l_d(fp0, cpu_env, fp0); + } else { + gen_helper_float_ceil_l_d(fp0, cpu_env, fp0); + } gen_store_fpr64(ctx, fp0, fd); tcg_temp_free_i64(fp0); } @@ -9445,7 +9515,11 @@ static void gen_farith (DisasContext *ctx, enum fopcode op1, TCGv_i64 fp0 = tcg_temp_new_i64(); gen_load_fpr64(ctx, fp0, fs); - gen_helper_float_floorl_d(fp0, cpu_env, fp0); + if (ctx->nan2008) { + gen_helper_float_floor_2008_l_d(fp0, cpu_env, fp0); + } else { + gen_helper_float_floor_l_d(fp0, cpu_env, fp0); + } gen_store_fpr64(ctx, fp0, fd); tcg_temp_free_i64(fp0); } @@ -9457,7 +9531,11 @@ static void gen_farith (DisasContext *ctx, enum fopcode op1, TCGv_i64 fp64 = tcg_temp_new_i64(); gen_load_fpr64(ctx, fp64, fs); - gen_helper_float_roundw_d(fp32, cpu_env, fp64); + if (ctx->nan2008) { + gen_helper_float_round_2008_w_d(fp32, cpu_env, fp64); + } else { + gen_helper_float_round_w_d(fp32, cpu_env, fp64); + } tcg_temp_free_i64(fp64); gen_store_fpr32(ctx, fp32, fd); tcg_temp_free_i32(fp32); @@ -9470,7 +9548,11 @@ static void gen_farith (DisasContext *ctx, enum fopcode op1, TCGv_i64 fp64 = tcg_temp_new_i64(); gen_load_fpr64(ctx, fp64, fs); - gen_helper_float_truncw_d(fp32, cpu_env, fp64); + if (ctx->nan2008) { + gen_helper_float_trunc_2008_w_d(fp32, cpu_env, fp64); + } else { + gen_helper_float_trunc_w_d(fp32, cpu_env, fp64); + } tcg_temp_free_i64(fp64); gen_store_fpr32(ctx, fp32, fd); tcg_temp_free_i32(fp32); @@ -9483,7 +9565,11 @@ static void gen_farith (DisasContext *ctx, enum fopcode op1, TCGv_i64 fp64 = tcg_temp_new_i64(); gen_load_fpr64(ctx, fp64, fs); - gen_helper_float_ceilw_d(fp32, cpu_env, fp64); + if (ctx->nan2008) { + gen_helper_float_ceil_2008_w_d(fp32, cpu_env, fp64); + } else { + gen_helper_float_ceil_w_d(fp32, cpu_env, fp64); + } tcg_temp_free_i64(fp64); gen_store_fpr32(ctx, fp32, fd); tcg_temp_free_i32(fp32); @@ -9496,7 +9582,11 @@ static void gen_farith (DisasContext *ctx, enum fopcode op1, TCGv_i64 fp64 = tcg_temp_new_i64(); gen_load_fpr64(ctx, fp64, fs); - gen_helper_float_floorw_d(fp32, cpu_env, fp64); + if (ctx->nan2008) { + gen_helper_float_floor_2008_w_d(fp32, cpu_env, fp64); + } else { + gen_helper_float_floor_w_d(fp32, cpu_env, fp64); + } tcg_temp_free_i64(fp64); gen_store_fpr32(ctx, fp32, fd); tcg_temp_free_i32(fp32); @@ -9619,7 +9709,7 @@ static void gen_farith (DisasContext *ctx, enum fopcode op1, { TCGv_i64 fp0 = tcg_temp_new_i64(); gen_load_fpr64(ctx, fp0, fs); - gen_helper_float_class_d(fp0, fp0); + gen_helper_float_class_d(fp0, cpu_env, fp0); gen_store_fpr64(ctx, fp0, fd); tcg_temp_free_i64(fp0); } @@ -9769,7 +9859,11 @@ static void gen_farith (DisasContext *ctx, enum fopcode op1, TCGv_i64 fp64 = tcg_temp_new_i64(); gen_load_fpr64(ctx, fp64, fs); - gen_helper_float_cvtw_d(fp32, cpu_env, fp64); + if (ctx->nan2008) { + gen_helper_float_cvt_2008_w_d(fp32, cpu_env, fp64); + } else { + gen_helper_float_cvt_w_d(fp32, cpu_env, fp64); + } tcg_temp_free_i64(fp64); gen_store_fpr32(ctx, fp32, fd); tcg_temp_free_i32(fp32); @@ -9781,7 +9875,11 @@ static void gen_farith (DisasContext *ctx, enum fopcode op1, TCGv_i64 fp0 = tcg_temp_new_i64(); gen_load_fpr64(ctx, fp0, fs); - gen_helper_float_cvtl_d(fp0, cpu_env, fp0); + if (ctx->nan2008) { + gen_helper_float_cvt_2008_l_d(fp0, cpu_env, fp0); + } else { + gen_helper_float_cvt_l_d(fp0, cpu_env, fp0); + } gen_store_fpr64(ctx, fp0, fd); tcg_temp_free_i64(fp0); } @@ -19786,6 +19884,8 @@ void gen_intermediate_code(CPUMIPSState *env, struct TranslationBlock *tb) (env->insn_flags & (INSN_LOONGSON2E | INSN_LOONGSON2F)); ctx.vp = (env->CP0_Config5 >> CP0C5_VP) & 1; ctx.mrp = (env->CP0_Config5 >> CP0C5_MRP) & 1; + ctx.nan2008 = (env->active_fpu.fcr31 >> FCR31_NAN2008) & 1; + ctx.abs2008 = (env->active_fpu.fcr31 >> FCR31_ABS2008) & 1; restore_cpu_state(env, &ctx); #ifdef CONFIG_USER_ONLY ctx.mem_idx = MIPS_HFLAG_UM; @@ -20141,6 +20241,7 @@ void cpu_state_reset(CPUMIPSState *env) env->CP0_PageGrain_rw_bitmask = env->cpu_model->CP0_PageGrain_rw_bitmask; env->CP0_PageGrain = env->cpu_model->CP0_PageGrain; env->active_fpu.fcr0 = env->cpu_model->CP1_fcr0; + env->active_fpu.fcr31_rw_bitmask = env->cpu_model->CP1_fcr31_rw_bitmask; env->active_fpu.fcr31 = env->cpu_model->CP1_fcr31; env->msair = env->cpu_model->MSAIR; env->insn_flags = env->cpu_model->insn_flags; @@ -20251,8 +20352,7 @@ void cpu_state_reset(CPUMIPSState *env) } compute_hflags(env); - restore_rounding_mode(env); - restore_flush_mode(env); + restore_fp_status(env); restore_pamask(env); cs->exception_index = EXCP_NONE; diff --git a/target-mips/translate_init.c b/target-mips/translate_init.c index 5af077d0de..b10284cc5d 100644 --- a/target-mips/translate_init.c +++ b/target-mips/translate_init.c @@ -84,6 +84,7 @@ struct mips_def_t { int32_t CP0_TCStatus_rw_bitmask; int32_t CP0_SRSCtl; int32_t CP1_fcr0; + int32_t CP1_fcr31_rw_bitmask; int32_t CP1_fcr31; int32_t MSAIR; int32_t SEGBITS; @@ -273,6 +274,8 @@ static const mips_def_t mips_defs[] = .CP0_Status_rw_bitmask = 0x3678FF1F, .CP1_fcr0 = (1 << FCR0_F64) | (1 << FCR0_L) | (1 << FCR0_W) | (1 << FCR0_D) | (1 << FCR0_S) | (0x93 << FCR0_PRID), + .CP1_fcr31 = 0, + .CP1_fcr31_rw_bitmask = 0xFF83FFFF, .SEGBITS = 32, .PABITS = 32, .insn_flags = CPU_MIPS32R2 | ASE_MIPS16, @@ -303,6 +306,8 @@ static const mips_def_t mips_defs[] = (0xff << CP0TCSt_TASID), .CP1_fcr0 = (1 << FCR0_F64) | (1 << FCR0_L) | (1 << FCR0_W) | (1 << FCR0_D) | (1 << FCR0_S) | (0x95 << FCR0_PRID), + .CP1_fcr31 = 0, + .CP1_fcr31_rw_bitmask = 0xFF83FFFF, .CP0_SRSCtl = (0xf << CP0SRSCtl_HSS), .CP0_SRSConf0_rw_bitmask = 0x3fffffff, .CP0_SRSConf0 = (1U << CP0SRSC0_M) | (0x3fe << CP0SRSC0_SRS3) | @@ -343,6 +348,8 @@ static const mips_def_t mips_defs[] = .CP0_Status_rw_bitmask = 0x3778FF1F, .CP1_fcr0 = (1 << FCR0_F64) | (1 << FCR0_L) | (1 << FCR0_W) | (1 << FCR0_D) | (1 << FCR0_S) | (0x93 << FCR0_PRID), + .CP1_fcr31 = 0, + .CP1_fcr31_rw_bitmask = 0xFF83FFFF, .SEGBITS = 32, .PABITS = 32, .insn_flags = CPU_MIPS32R2 | ASE_MIPS16 | ASE_DSP | ASE_DSPR2, @@ -427,6 +434,7 @@ static const mips_def_t mips_defs[] = (1 << FCR0_F64) | (1 << FCR0_L) | (1 << FCR0_W) | (1 << FCR0_D) | (1 << FCR0_S) | (0x03 << FCR0_PRID), .CP1_fcr31 = (1 << FCR31_ABS2008) | (1 << FCR31_NAN2008), + .CP1_fcr31_rw_bitmask = 0xFF83FFFF, .SEGBITS = 32, .PABITS = 40, .insn_flags = CPU_MIPS32R5 | ASE_MSA, @@ -465,6 +473,7 @@ static const mips_def_t mips_defs[] = (1 << FCR0_L) | (1 << FCR0_W) | (1 << FCR0_D) | (1 << FCR0_S) | (0x00 << FCR0_PRID) | (0x0 << FCR0_REV), .CP1_fcr31 = (1 << FCR31_ABS2008) | (1 << FCR31_NAN2008), + .CP1_fcr31_rw_bitmask = 0x0103FFFF, .SEGBITS = 32, .PABITS = 32, .insn_flags = CPU_MIPS32R6 | ASE_MICROMIPS, @@ -485,6 +494,8 @@ static const mips_def_t mips_defs[] = .CP0_Status_rw_bitmask = 0x3678FFFF, /* The R4000 has a full 64bit FPU but doesn't use the fcr0 bits. */ .CP1_fcr0 = (0x5 << FCR0_PRID) | (0x0 << FCR0_REV), + .CP1_fcr31 = 0, + .CP1_fcr31_rw_bitmask = 0x0183FFFF, .SEGBITS = 40, .PABITS = 36, .insn_flags = CPU_MIPS3, @@ -503,6 +514,8 @@ static const mips_def_t mips_defs[] = .CP0_Status_rw_bitmask = 0x3678FFFF, /* The VR5432 has a full 64bit FPU but doesn't use the fcr0 bits. */ .CP1_fcr0 = (0x54 << FCR0_PRID) | (0x0 << FCR0_REV), + .CP1_fcr31 = 0, + .CP1_fcr31_rw_bitmask = 0xFF83FFFF, .SEGBITS = 40, .PABITS = 32, .insn_flags = CPU_VR54XX, @@ -548,6 +561,8 @@ static const mips_def_t mips_defs[] = /* The 5Kf has F64 / L / W but doesn't use the fcr0 bits. */ .CP1_fcr0 = (1 << FCR0_D) | (1 << FCR0_S) | (0x81 << FCR0_PRID) | (0x0 << FCR0_REV), + .CP1_fcr31 = 0, + .CP1_fcr31_rw_bitmask = 0xFF83FFFF, .SEGBITS = 42, .PABITS = 36, .insn_flags = CPU_MIPS64, @@ -575,6 +590,8 @@ static const mips_def_t mips_defs[] = .CP1_fcr0 = (1 << FCR0_3D) | (1 << FCR0_PS) | (1 << FCR0_D) | (1 << FCR0_S) | (0x82 << FCR0_PRID) | (0x0 << FCR0_REV), + .CP1_fcr31 = 0, + .CP1_fcr31_rw_bitmask = 0xFF83FFFF, .SEGBITS = 40, .PABITS = 36, .insn_flags = CPU_MIPS64 | ASE_MIPS3D, @@ -601,6 +618,8 @@ static const mips_def_t mips_defs[] = .CP1_fcr0 = (1 << FCR0_F64) | (1 << FCR0_3D) | (1 << FCR0_PS) | (1 << FCR0_L) | (1 << FCR0_W) | (1 << FCR0_D) | (1 << FCR0_S) | (0x00 << FCR0_PRID) | (0x0 << FCR0_REV), + .CP1_fcr31 = 0, + .CP1_fcr31_rw_bitmask = 0xFF83FFFF, .SEGBITS = 42, .PABITS = 36, .insn_flags = CPU_MIPS64R2 | ASE_MIPS3D, @@ -686,6 +705,7 @@ static const mips_def_t mips_defs[] = (1 << FCR0_L) | (1 << FCR0_W) | (1 << FCR0_D) | (1 << FCR0_S) | (0x00 << FCR0_PRID) | (0x0 << FCR0_REV), .CP1_fcr31 = (1 << FCR31_ABS2008) | (1 << FCR31_NAN2008), + .CP1_fcr31_rw_bitmask = 0x0103FFFF, .SEGBITS = 48, .PABITS = 48, .insn_flags = CPU_MIPS64R6 | ASE_MSA, @@ -704,6 +724,8 @@ static const mips_def_t mips_defs[] = .CCRes = 2, .CP0_Status_rw_bitmask = 0x35D0FFFF, .CP1_fcr0 = (0x5 << FCR0_PRID) | (0x1 << FCR0_REV), + .CP1_fcr31 = 0, + .CP1_fcr31_rw_bitmask = 0xFF83FFFF, .SEGBITS = 40, .PABITS = 40, .insn_flags = CPU_LOONGSON2E, @@ -722,6 +744,8 @@ static const mips_def_t mips_defs[] = .CCRes = 2, .CP0_Status_rw_bitmask = 0xF5D0FF1F, /* Bits 7:5 not writable. */ .CP1_fcr0 = (0x5 << FCR0_PRID) | (0x1 << FCR0_REV), + .CP1_fcr31 = 0, + .CP1_fcr31_rw_bitmask = 0xFF83FFFF, .SEGBITS = 40, .PABITS = 40, .insn_flags = CPU_LOONGSON2F, @@ -749,6 +773,8 @@ static const mips_def_t mips_defs[] = .CP1_fcr0 = (1 << FCR0_F64) | (1 << FCR0_3D) | (1 << FCR0_PS) | (1 << FCR0_L) | (1 << FCR0_W) | (1 << FCR0_D) | (1 << FCR0_S) | (0x00 << FCR0_PRID) | (0x0 << FCR0_REV), + .CP1_fcr31 = 0, + .CP1_fcr31_rw_bitmask = 0xFF83FFFF, .SEGBITS = 42, .PABITS = 36, .insn_flags = CPU_MIPS64R2 | ASE_DSP | ASE_DSPR2, @@ -892,4 +918,7 @@ static void msa_reset(CPUMIPSState *env) /* clear float_status nan mode */ set_default_nan_mode(0, &env->active_tc.msa_fp_status); + + /* set proper signanling bit meaning ("1" means "quiet") */ + set_snan_bit_is_one(0, &env->active_tc.msa_fp_status); } diff --git a/target-ppc/fpu_helper.c b/target-ppc/fpu_helper.c index 6fd56a868d..4ef893be2c 100644 --- a/target-ppc/fpu_helper.c +++ b/target-ppc/fpu_helper.c @@ -73,7 +73,7 @@ void helper_compute_fprf(CPUPPCState *env, uint64_t arg) farg.ll = arg; isneg = float64_is_neg(farg.d); if (unlikely(float64_is_any_nan(farg.d))) { - if (float64_is_signaling_nan(farg.d)) { + if (float64_is_signaling_nan(farg.d, &env->fp_status)) { /* Signaling NaN: flags are undefined */ fprf = 0x00; } else { @@ -534,8 +534,8 @@ uint64_t helper_fadd(CPUPPCState *env, uint64_t arg1, uint64_t arg2) /* Magnitude subtraction of infinities */ farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1); } else { - if (unlikely(float64_is_signaling_nan(farg1.d) || - float64_is_signaling_nan(farg2.d))) { + if (unlikely(float64_is_signaling_nan(farg1.d, &env->fp_status) || + float64_is_signaling_nan(farg2.d, &env->fp_status))) { /* sNaN addition */ fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1); } @@ -558,8 +558,8 @@ uint64_t helper_fsub(CPUPPCState *env, uint64_t arg1, uint64_t arg2) /* Magnitude subtraction of infinities */ farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1); } else { - if (unlikely(float64_is_signaling_nan(farg1.d) || - float64_is_signaling_nan(farg2.d))) { + if (unlikely(float64_is_signaling_nan(farg1.d, &env->fp_status) || + float64_is_signaling_nan(farg2.d, &env->fp_status))) { /* sNaN subtraction */ fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1); } @@ -582,8 +582,8 @@ uint64_t helper_fmul(CPUPPCState *env, uint64_t arg1, uint64_t arg2) /* Multiplication of zero by infinity */ farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1); } else { - if (unlikely(float64_is_signaling_nan(farg1.d) || - float64_is_signaling_nan(farg2.d))) { + if (unlikely(float64_is_signaling_nan(farg1.d, &env->fp_status) || + float64_is_signaling_nan(farg2.d, &env->fp_status))) { /* sNaN multiplication */ fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1); } @@ -609,8 +609,8 @@ uint64_t helper_fdiv(CPUPPCState *env, uint64_t arg1, uint64_t arg2) /* Division of zero by zero */ farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXZDZ, 1); } else { - if (unlikely(float64_is_signaling_nan(farg1.d) || - float64_is_signaling_nan(farg2.d))) { + if (unlikely(float64_is_signaling_nan(farg1.d, &env->fp_status) || + float64_is_signaling_nan(farg2.d, &env->fp_status))) { /* sNaN division */ fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1); } @@ -632,7 +632,7 @@ uint64_t helper_##op(CPUPPCState *env, uint64_t arg) \ if (unlikely(env->fp_status.float_exception_flags)) { \ if (float64_is_any_nan(arg)) { \ fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 1); \ - if (float64_is_signaling_nan(arg)) { \ + if (float64_is_signaling_nan(arg, &env->fp_status)) { \ fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1); \ } \ farg.ll = nanval; \ @@ -681,7 +681,7 @@ static inline uint64_t do_fri(CPUPPCState *env, uint64_t arg, farg.ll = arg; - if (unlikely(float64_is_signaling_nan(farg.d))) { + if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) { /* sNaN round */ fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1); farg.ll = arg | 0x0008000000000000ULL; @@ -737,9 +737,9 @@ uint64_t helper_fmadd(CPUPPCState *env, uint64_t arg1, uint64_t arg2, /* Multiplication of zero by infinity */ farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1); } else { - if (unlikely(float64_is_signaling_nan(farg1.d) || - float64_is_signaling_nan(farg2.d) || - float64_is_signaling_nan(farg3.d))) { + if (unlikely(float64_is_signaling_nan(farg1.d, &env->fp_status) || + float64_is_signaling_nan(farg2.d, &env->fp_status) || + float64_is_signaling_nan(farg3.d, &env->fp_status))) { /* sNaN operation */ fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1); } @@ -780,9 +780,9 @@ uint64_t helper_fmsub(CPUPPCState *env, uint64_t arg1, uint64_t arg2, /* Multiplication of zero by infinity */ farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1); } else { - if (unlikely(float64_is_signaling_nan(farg1.d) || - float64_is_signaling_nan(farg2.d) || - float64_is_signaling_nan(farg3.d))) { + if (unlikely(float64_is_signaling_nan(farg1.d, &env->fp_status) || + float64_is_signaling_nan(farg2.d, &env->fp_status) || + float64_is_signaling_nan(farg3.d, &env->fp_status))) { /* sNaN operation */ fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1); } @@ -821,9 +821,9 @@ uint64_t helper_fnmadd(CPUPPCState *env, uint64_t arg1, uint64_t arg2, /* Multiplication of zero by infinity */ farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1); } else { - if (unlikely(float64_is_signaling_nan(farg1.d) || - float64_is_signaling_nan(farg2.d) || - float64_is_signaling_nan(farg3.d))) { + if (unlikely(float64_is_signaling_nan(farg1.d, &env->fp_status) || + float64_is_signaling_nan(farg2.d, &env->fp_status) || + float64_is_signaling_nan(farg3.d, &env->fp_status))) { /* sNaN operation */ fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1); } @@ -866,9 +866,9 @@ uint64_t helper_fnmsub(CPUPPCState *env, uint64_t arg1, uint64_t arg2, /* Multiplication of zero by infinity */ farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1); } else { - if (unlikely(float64_is_signaling_nan(farg1.d) || - float64_is_signaling_nan(farg2.d) || - float64_is_signaling_nan(farg3.d))) { + if (unlikely(float64_is_signaling_nan(farg1.d, &env->fp_status) || + float64_is_signaling_nan(farg2.d, &env->fp_status) || + float64_is_signaling_nan(farg3.d, &env->fp_status))) { /* sNaN operation */ fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1); } @@ -903,7 +903,7 @@ uint64_t helper_frsp(CPUPPCState *env, uint64_t arg) farg.ll = arg; - if (unlikely(float64_is_signaling_nan(farg.d))) { + if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) { /* sNaN square root */ fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1); } @@ -921,7 +921,7 @@ uint64_t helper_fsqrt(CPUPPCState *env, uint64_t arg) farg.ll = arg; if (unlikely(float64_is_any_nan(farg.d))) { - if (unlikely(float64_is_signaling_nan(farg.d))) { + if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) { /* sNaN reciprocal square root */ fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1); farg.ll = float64_snan_to_qnan(farg.ll); @@ -942,7 +942,7 @@ uint64_t helper_fre(CPUPPCState *env, uint64_t arg) farg.ll = arg; - if (unlikely(float64_is_signaling_nan(farg.d))) { + if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) { /* sNaN reciprocal */ fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1); } @@ -958,7 +958,7 @@ uint64_t helper_fres(CPUPPCState *env, uint64_t arg) farg.ll = arg; - if (unlikely(float64_is_signaling_nan(farg.d))) { + if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) { /* sNaN reciprocal */ fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1); } @@ -977,7 +977,7 @@ uint64_t helper_frsqrte(CPUPPCState *env, uint64_t arg) farg.ll = arg; if (unlikely(float64_is_any_nan(farg.d))) { - if (unlikely(float64_is_signaling_nan(farg.d))) { + if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) { /* sNaN reciprocal square root */ fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1); farg.ll = float64_snan_to_qnan(farg.ll); @@ -1100,8 +1100,8 @@ void helper_fcmpu(CPUPPCState *env, uint64_t arg1, uint64_t arg2, env->fpscr |= ret << FPSCR_FPRF; env->crf[crfD] = ret; if (unlikely(ret == 0x01UL - && (float64_is_signaling_nan(farg1.d) || - float64_is_signaling_nan(farg2.d)))) { + && (float64_is_signaling_nan(farg1.d, &env->fp_status) || + float64_is_signaling_nan(farg2.d, &env->fp_status)))) { /* sNaN comparison */ fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1); } @@ -1131,8 +1131,8 @@ void helper_fcmpo(CPUPPCState *env, uint64_t arg1, uint64_t arg2, env->fpscr |= ret << FPSCR_FPRF; env->crf[crfD] = ret; if (unlikely(ret == 0x01UL)) { - if (float64_is_signaling_nan(farg1.d) || - float64_is_signaling_nan(farg2.d)) { + if (float64_is_signaling_nan(farg1.d, &env->fp_status) || + float64_is_signaling_nan(farg2.d, &env->fp_status)) { /* sNaN comparison */ fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXVC, 1); @@ -1168,7 +1168,7 @@ static inline int32_t efsctsi(CPUPPCState *env, uint32_t val) u.l = val; /* NaN are not treated the same way IEEE 754 does */ - if (unlikely(float32_is_quiet_nan(u.f))) { + if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) { return 0; } @@ -1181,7 +1181,7 @@ static inline uint32_t efsctui(CPUPPCState *env, uint32_t val) u.l = val; /* NaN are not treated the same way IEEE 754 does */ - if (unlikely(float32_is_quiet_nan(u.f))) { + if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) { return 0; } @@ -1194,7 +1194,7 @@ static inline uint32_t efsctsiz(CPUPPCState *env, uint32_t val) u.l = val; /* NaN are not treated the same way IEEE 754 does */ - if (unlikely(float32_is_quiet_nan(u.f))) { + if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) { return 0; } @@ -1207,7 +1207,7 @@ static inline uint32_t efsctuiz(CPUPPCState *env, uint32_t val) u.l = val; /* NaN are not treated the same way IEEE 754 does */ - if (unlikely(float32_is_quiet_nan(u.f))) { + if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) { return 0; } @@ -1245,7 +1245,7 @@ static inline uint32_t efsctsf(CPUPPCState *env, uint32_t val) u.l = val; /* NaN are not treated the same way IEEE 754 does */ - if (unlikely(float32_is_quiet_nan(u.f))) { + if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) { return 0; } tmp = uint64_to_float32(1ULL << 32, &env->vec_status); @@ -1261,7 +1261,7 @@ static inline uint32_t efsctuf(CPUPPCState *env, uint32_t val) u.l = val; /* NaN are not treated the same way IEEE 754 does */ - if (unlikely(float32_is_quiet_nan(u.f))) { + if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) { return 0; } tmp = uint64_to_float32(1ULL << 32, &env->vec_status); @@ -1839,8 +1839,8 @@ void helper_##name(CPUPPCState *env, uint32_t opcode) \ if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \ if (tp##_is_infinity(xa.fld) && tp##_is_infinity(xb.fld)) { \ fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, sfprf); \ - } else if (tp##_is_signaling_nan(xa.fld) || \ - tp##_is_signaling_nan(xb.fld)) { \ + } else if (tp##_is_signaling_nan(xa.fld, &tstat) || \ + tp##_is_signaling_nan(xb.fld, &tstat)) { \ fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \ } \ } \ @@ -1894,8 +1894,8 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \ if ((tp##_is_infinity(xa.fld) && tp##_is_zero(xb.fld)) || \ (tp##_is_infinity(xb.fld) && tp##_is_zero(xa.fld))) { \ fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, sfprf); \ - } else if (tp##_is_signaling_nan(xa.fld) || \ - tp##_is_signaling_nan(xb.fld)) { \ + } else if (tp##_is_signaling_nan(xa.fld, &tstat) || \ + tp##_is_signaling_nan(xb.fld, &tstat)) { \ fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \ } \ } \ @@ -1948,8 +1948,8 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \ } else if (tp##_is_zero(xa.fld) && \ tp##_is_zero(xb.fld)) { \ fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXZDZ, sfprf); \ - } else if (tp##_is_signaling_nan(xa.fld) || \ - tp##_is_signaling_nan(xb.fld)) { \ + } else if (tp##_is_signaling_nan(xa.fld, &tstat) || \ + tp##_is_signaling_nan(xb.fld, &tstat)) { \ fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \ } \ } \ @@ -1990,7 +1990,7 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \ helper_reset_fpstatus(env); \ \ for (i = 0; i < nels; i++) { \ - if (unlikely(tp##_is_signaling_nan(xb.fld))) { \ + if (unlikely(tp##_is_signaling_nan(xb.fld, &env->fp_status))) { \ fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \ } \ xt.fld = tp##_div(tp##_one, xb.fld, &env->fp_status); \ @@ -2039,7 +2039,7 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \ if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \ if (tp##_is_neg(xb.fld) && !tp##_is_zero(xb.fld)) { \ fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, sfprf); \ - } else if (tp##_is_signaling_nan(xb.fld)) { \ + } else if (tp##_is_signaling_nan(xb.fld, &tstat)) { \ fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \ } \ } \ @@ -2089,7 +2089,7 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \ if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \ if (tp##_is_neg(xb.fld) && !tp##_is_zero(xb.fld)) { \ fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, sfprf); \ - } else if (tp##_is_signaling_nan(xb.fld)) { \ + } else if (tp##_is_signaling_nan(xb.fld, &tstat)) { \ fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \ } \ } \ @@ -2274,9 +2274,9 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \ env->fp_status.float_exception_flags |= tstat.float_exception_flags; \ \ if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \ - if (tp##_is_signaling_nan(xa.fld) || \ - tp##_is_signaling_nan(b->fld) || \ - tp##_is_signaling_nan(c->fld)) { \ + if (tp##_is_signaling_nan(xa.fld, &tstat) || \ + tp##_is_signaling_nan(b->fld, &tstat) || \ + tp##_is_signaling_nan(c->fld, &tstat)) { \ fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \ tstat.float_exception_flags &= ~float_flag_invalid; \ } \ @@ -2358,8 +2358,8 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \ \ if (unlikely(float64_is_any_nan(xa.VsrD(0)) || \ float64_is_any_nan(xb.VsrD(0)))) { \ - if (float64_is_signaling_nan(xa.VsrD(0)) || \ - float64_is_signaling_nan(xb.VsrD(0))) { \ + if (float64_is_signaling_nan(xa.VsrD(0), &env->fp_status) || \ + float64_is_signaling_nan(xb.VsrD(0), &env->fp_status)) { \ fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \ } \ if (ordered) { \ @@ -2406,8 +2406,8 @@ void helper_##name(CPUPPCState *env, uint32_t opcode) \ \ for (i = 0; i < nels; i++) { \ xt.fld = tp##_##op(xa.fld, xb.fld, &env->fp_status); \ - if (unlikely(tp##_is_signaling_nan(xa.fld) || \ - tp##_is_signaling_nan(xb.fld))) { \ + if (unlikely(tp##_is_signaling_nan(xa.fld, &env->fp_status) || \ + tp##_is_signaling_nan(xb.fld, &env->fp_status))) { \ fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \ } \ } \ @@ -2446,8 +2446,8 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \ for (i = 0; i < nels; i++) { \ if (unlikely(tp##_is_any_nan(xa.fld) || \ tp##_is_any_nan(xb.fld))) { \ - if (tp##_is_signaling_nan(xa.fld) || \ - tp##_is_signaling_nan(xb.fld)) { \ + if (tp##_is_signaling_nan(xa.fld, &env->fp_status) || \ + tp##_is_signaling_nan(xb.fld, &env->fp_status)) { \ fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \ } \ if (svxvc) { \ @@ -2500,7 +2500,8 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \ \ for (i = 0; i < nels; i++) { \ xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status); \ - if (unlikely(stp##_is_signaling_nan(xb.sfld))) { \ + if (unlikely(stp##_is_signaling_nan(xb.sfld, \ + &env->fp_status))) { \ fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \ xt.tfld = ttp##_snan_to_qnan(xt.tfld); \ } \ @@ -2555,7 +2556,7 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \ \ for (i = 0; i < nels; i++) { \ if (unlikely(stp##_is_any_nan(xb.sfld))) { \ - if (stp##_is_signaling_nan(xb.sfld)) { \ + if (stp##_is_signaling_nan(xb.sfld, &env->fp_status)) { \ fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \ } \ fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 0); \ @@ -2664,7 +2665,8 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \ } \ \ for (i = 0; i < nels; i++) { \ - if (unlikely(tp##_is_signaling_nan(xb.fld))) { \ + if (unlikely(tp##_is_signaling_nan(xb.fld, \ + &env->fp_status))) { \ fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \ xt.fld = tp##_snan_to_qnan(xb.fld); \ } else { \ diff --git a/target-s390x/fpu_helper.c b/target-s390x/fpu_helper.c index 4ddb388392..e604e9f7be 100644 --- a/target-s390x/fpu_helper.c +++ b/target-s390x/fpu_helper.c @@ -267,7 +267,7 @@ uint64_t HELPER(ldeb)(CPUS390XState *env, uint64_t f2) { float64 ret = float32_to_float64(f2, &env->fpu_status); handle_exceptions(env, GETPC()); - return float64_maybe_silence_nan(ret); + return float64_maybe_silence_nan(ret, &env->fpu_status); } /* convert 128-bit float to 64-bit float */ @@ -275,7 +275,7 @@ uint64_t HELPER(ldxb)(CPUS390XState *env, uint64_t ah, uint64_t al) { float64 ret = float128_to_float64(make_float128(ah, al), &env->fpu_status); handle_exceptions(env, GETPC()); - return float64_maybe_silence_nan(ret); + return float64_maybe_silence_nan(ret, &env->fpu_status); } /* convert 64-bit float to 128-bit float */ @@ -283,7 +283,7 @@ uint64_t HELPER(lxdb)(CPUS390XState *env, uint64_t f2) { float128 ret = float64_to_float128(f2, &env->fpu_status); handle_exceptions(env, GETPC()); - return RET128(float128_maybe_silence_nan(ret)); + return RET128(float128_maybe_silence_nan(ret, &env->fpu_status)); } /* convert 32-bit float to 128-bit float */ @@ -291,7 +291,7 @@ uint64_t HELPER(lxeb)(CPUS390XState *env, uint64_t f2) { float128 ret = float32_to_float128(f2, &env->fpu_status); handle_exceptions(env, GETPC()); - return RET128(float128_maybe_silence_nan(ret)); + return RET128(float128_maybe_silence_nan(ret, &env->fpu_status)); } /* convert 64-bit float to 32-bit float */ @@ -299,7 +299,7 @@ uint64_t HELPER(ledb)(CPUS390XState *env, uint64_t f2) { float32 ret = float64_to_float32(f2, &env->fpu_status); handle_exceptions(env, GETPC()); - return float32_maybe_silence_nan(ret); + return float32_maybe_silence_nan(ret, &env->fpu_status); } /* convert 128-bit float to 32-bit float */ @@ -307,7 +307,7 @@ uint64_t HELPER(lexb)(CPUS390XState *env, uint64_t ah, uint64_t al) { float32 ret = float128_to_float32(make_float128(ah, al), &env->fpu_status); handle_exceptions(env, GETPC()); - return float32_maybe_silence_nan(ret); + return float32_maybe_silence_nan(ret, &env->fpu_status); } /* 32-bit FP compare */ @@ -624,7 +624,7 @@ uint64_t HELPER(msdb)(CPUS390XState *env, uint64_t f1, } /* test data class 32-bit */ -uint32_t HELPER(tceb)(uint64_t f1, uint64_t m2) +uint32_t HELPER(tceb)(CPUS390XState *env, uint64_t f1, uint64_t m2) { float32 v1 = f1; int neg = float32_is_neg(v1); @@ -633,7 +633,8 @@ uint32_t HELPER(tceb)(uint64_t f1, uint64_t m2) if ((float32_is_zero(v1) && (m2 & (1 << (11-neg)))) || (float32_is_infinity(v1) && (m2 & (1 << (5-neg)))) || (float32_is_any_nan(v1) && (m2 & (1 << (3-neg)))) || - (float32_is_signaling_nan(v1) && (m2 & (1 << (1-neg))))) { + (float32_is_signaling_nan(v1, &env->fpu_status) && + (m2 & (1 << (1-neg))))) { cc = 1; } else if (m2 & (1 << (9-neg))) { /* assume normalized number */ @@ -644,7 +645,7 @@ uint32_t HELPER(tceb)(uint64_t f1, uint64_t m2) } /* test data class 64-bit */ -uint32_t HELPER(tcdb)(uint64_t v1, uint64_t m2) +uint32_t HELPER(tcdb)(CPUS390XState *env, uint64_t v1, uint64_t m2) { int neg = float64_is_neg(v1); uint32_t cc = 0; @@ -652,7 +653,8 @@ uint32_t HELPER(tcdb)(uint64_t v1, uint64_t m2) if ((float64_is_zero(v1) && (m2 & (1 << (11-neg)))) || (float64_is_infinity(v1) && (m2 & (1 << (5-neg)))) || (float64_is_any_nan(v1) && (m2 & (1 << (3-neg)))) || - (float64_is_signaling_nan(v1) && (m2 & (1 << (1-neg))))) { + (float64_is_signaling_nan(v1, &env->fpu_status) && + (m2 & (1 << (1-neg))))) { cc = 1; } else if (m2 & (1 << (9-neg))) { /* assume normalized number */ @@ -663,7 +665,8 @@ uint32_t HELPER(tcdb)(uint64_t v1, uint64_t m2) } /* test data class 128-bit */ -uint32_t HELPER(tcxb)(uint64_t ah, uint64_t al, uint64_t m2) +uint32_t HELPER(tcxb)(CPUS390XState *env, uint64_t ah, + uint64_t al, uint64_t m2) { float128 v1 = make_float128(ah, al); int neg = float128_is_neg(v1); @@ -672,7 +675,8 @@ uint32_t HELPER(tcxb)(uint64_t ah, uint64_t al, uint64_t m2) if ((float128_is_zero(v1) && (m2 & (1 << (11-neg)))) || (float128_is_infinity(v1) && (m2 & (1 << (5-neg)))) || (float128_is_any_nan(v1) && (m2 & (1 << (3-neg)))) || - (float128_is_signaling_nan(v1) && (m2 & (1 << (1-neg))))) { + (float128_is_signaling_nan(v1, &env->fpu_status) && + (m2 & (1 << (1-neg))))) { cc = 1; } else if (m2 & (1 << (9-neg))) { /* assume normalized number */ diff --git a/target-s390x/helper.h b/target-s390x/helper.h index 7e06119e99..207a6e7d1c 100644 --- a/target-s390x/helper.h +++ b/target-s390x/helper.h @@ -67,9 +67,9 @@ DEF_HELPER_FLAGS_4(maeb, TCG_CALL_NO_WG, i64, env, i64, i64, i64) DEF_HELPER_FLAGS_4(madb, TCG_CALL_NO_WG, i64, env, i64, i64, i64) DEF_HELPER_FLAGS_4(mseb, TCG_CALL_NO_WG, i64, env, i64, i64, i64) DEF_HELPER_FLAGS_4(msdb, TCG_CALL_NO_WG, i64, env, i64, i64, i64) -DEF_HELPER_FLAGS_2(tceb, TCG_CALL_NO_RWG_SE, i32, i64, i64) -DEF_HELPER_FLAGS_2(tcdb, TCG_CALL_NO_RWG_SE, i32, i64, i64) -DEF_HELPER_FLAGS_3(tcxb, TCG_CALL_NO_RWG_SE, i32, i64, i64, i64) +DEF_HELPER_FLAGS_3(tceb, TCG_CALL_NO_RWG_SE, i32, env, i64, i64) +DEF_HELPER_FLAGS_3(tcdb, TCG_CALL_NO_RWG_SE, i32, env, i64, i64) +DEF_HELPER_FLAGS_4(tcxb, TCG_CALL_NO_RWG_SE, i32, env, i64, i64, i64) DEF_HELPER_FLAGS_1(clz, TCG_CALL_NO_RWG_SE, i64, i64) DEF_HELPER_FLAGS_2(sqeb, TCG_CALL_NO_WG, i64, env, i64) DEF_HELPER_FLAGS_2(sqdb, TCG_CALL_NO_WG, i64, env, i64) diff --git a/target-s390x/translate.c b/target-s390x/translate.c index 3c3487a5a9..1a07d70b21 100644 --- a/target-s390x/translate.c +++ b/target-s390x/translate.c @@ -3986,21 +3986,21 @@ static ExitStatus op_svc(DisasContext *s, DisasOps *o) static ExitStatus op_tceb(DisasContext *s, DisasOps *o) { - gen_helper_tceb(cc_op, o->in1, o->in2); + gen_helper_tceb(cc_op, cpu_env, o->in1, o->in2); set_cc_static(s); return NO_EXIT; } static ExitStatus op_tcdb(DisasContext *s, DisasOps *o) { - gen_helper_tcdb(cc_op, o->in1, o->in2); + gen_helper_tcdb(cc_op, cpu_env, o->in1, o->in2); set_cc_static(s); return NO_EXIT; } static ExitStatus op_tcxb(DisasContext *s, DisasOps *o) { - gen_helper_tcxb(cc_op, o->out, o->out2, o->in2); + gen_helper_tcxb(cc_op, cpu_env, o->out, o->out2, o->in2); set_cc_static(s); return NO_EXIT; } diff --git a/target-sh4/cpu.c b/target-sh4/cpu.c index 794b625d8e..f589532e18 100644 --- a/target-sh4/cpu.c +++ b/target-sh4/cpu.c @@ -71,6 +71,7 @@ static void superh_cpu_reset(CPUState *s) set_flush_to_zero(1, &env->fp_status); #endif set_default_nan_mode(1, &env->fp_status); + set_snan_bit_is_one(1, &env->fp_status); } static void superh_cpu_disas_set_info(CPUState *cpu, disassemble_info *info) diff --git a/target-sparc/translate.c b/target-sparc/translate.c index afd46b878f..0f4faf7062 100644 --- a/target-sparc/translate.c +++ b/target-sparc/translate.c @@ -4679,12 +4679,15 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) case 0xd: /* ldstub -- XXX: should be atomically */ { TCGv r_const; + TCGv tmp = tcg_temp_new(); gen_address_mask(dc, cpu_addr); - tcg_gen_qemu_ld8u(cpu_val, cpu_addr, dc->mem_idx); + tcg_gen_qemu_ld8u(tmp, cpu_addr, dc->mem_idx); r_const = tcg_const_tl(0xff); tcg_gen_qemu_st8(r_const, cpu_addr, dc->mem_idx); + tcg_gen_mov_tl(cpu_val, tmp); tcg_temp_free(r_const); + tcg_temp_free(tmp); } break; case 0x0f: diff --git a/target-unicore32/cpu.c b/target-unicore32/cpu.c index 3990433eb8..e7a4984260 100644 --- a/target-unicore32/cpu.c +++ b/target-unicore32/cpu.c @@ -78,6 +78,7 @@ static void unicore_ii_cpu_initfn(Object *obj) set_feature(env, UC32_HWCAP_CMOV); set_feature(env, UC32_HWCAP_UCF64); + set_snan_bit_is_one(1, &env->ucf64.fp_status); } static void uc32_any_cpu_initfn(Object *obj) @@ -90,6 +91,7 @@ static void uc32_any_cpu_initfn(Object *obj) set_feature(env, UC32_HWCAP_CMOV); set_feature(env, UC32_HWCAP_UCF64); + set_snan_bit_is_one(1, &env->ucf64.fp_status); } static const UniCore32CPUInfo uc32_cpus[] = { diff --git a/tests/Makefile.include b/tests/Makefile.include index fd2dba49a7..6c09962f75 100644 --- a/tests/Makefile.include +++ b/tests/Makefile.include @@ -251,7 +251,7 @@ check-qtest-sparc64-y = tests/endianness-test$(EXESUF) gcov-files-sparc-y += hw/timer/m48t59.c gcov-files-sparc64-y += hw/timer/m48t59.c check-qtest-arm-y = tests/tmp105-test$(EXESUF) -check-qtest-arm-y = tests/ds1338-test$(EXESUF) +check-qtest-arm-y += tests/ds1338-test$(EXESUF) gcov-files-arm-y += hw/misc/tmp105.c check-qtest-arm-y += tests/virtio-blk-test$(EXESUF) gcov-files-arm-y += arm-softmmu/hw/block/virtio-blk.c diff --git a/tests/qemu-iotests/041 b/tests/qemu-iotests/041 index ed1d9d464c..cbf5e0ba5c 100755 --- a/tests/qemu-iotests/041 +++ b/tests/qemu-iotests/041 @@ -727,6 +727,36 @@ class TestUnbackedSource(iotests.QMPTestCase): self.complete_and_wait() self.assert_no_active_block_jobs() +class TestGranularity(iotests.QMPTestCase): + image_len = 10 * 1024 * 1024 # MB + + def setUp(self): + qemu_img('create', '-f', iotests.imgfmt, test_img, + str(TestGranularity.image_len)) + qemu_io('-c', 'write 0 %d' % (self.image_len), + test_img) + self.vm = iotests.VM().add_drive(test_img) + self.vm.launch() + + def tearDown(self): + self.vm.shutdown() + self.assertTrue(iotests.compare_images(test_img, target_img), + 'target image does not match source after mirroring') + os.remove(test_img) + os.remove(target_img) + + def test_granularity(self): + self.assert_no_active_block_jobs() + result = self.vm.qmp('drive-mirror', device='drive0', + sync='full', target=target_img, + mode='absolute-paths', granularity=8192) + self.assert_qmp(result, 'return', {}) + event = self.vm.get_qmp_event(wait=60.0) + # Failures will manifest as COMPLETED/ERROR. + self.assert_qmp(event, 'event', 'BLOCK_JOB_READY') + self.complete_and_wait(drive='drive0', wait_ready=False) + self.assert_no_active_block_jobs() + class TestRepairQuorum(iotests.QMPTestCase): """ This class test quorum file repair using drive-mirror. It's mostly a fork of TestSingleDrive """ diff --git a/tests/qemu-iotests/041.out b/tests/qemu-iotests/041.out index b0cadc8245..b67d0504a6 100644 --- a/tests/qemu-iotests/041.out +++ b/tests/qemu-iotests/041.out @@ -1,5 +1,5 @@ -........................................................................... +............................................................................ ---------------------------------------------------------------------- -Ran 75 tests +Ran 76 tests OK diff --git a/trace-events b/trace-events index 9d76de8574..476705996b 100644 --- a/trace-events +++ b/trace-events @@ -156,3 +156,19 @@ memory_region_tb_write(int cpu_index, uint64_t addr, uint64_t value, unsigned si # # Targets: TCG(all) disable vcpu tcg guest_mem_before(TCGv vaddr, uint8_t info) "info=%d", "vaddr=0x%016"PRIx64" info=%d" + +# @num: System call number. +# @arg*: System call argument value. +# +# Start executing a guest system call in syscall emulation mode. +# +# Targets: TCG(all) +disable vcpu guest_user_syscall(uint64_t num, uint64_t arg1, uint64_t arg2, uint64_t arg3, uint64_t arg4, uint64_t arg5, uint64_t arg6, uint64_t arg7, uint64_t arg8) "num=0x%016"PRIx64" arg1=0x%016"PRIx64" arg2=0x%016"PRIx64" arg3=0x%016"PRIx64" arg4=0x%016"PRIx64" arg5=0x%016"PRIx64" arg6=0x%016"PRIx64" arg7=0x%016"PRIx64" arg8=0x%016"PRIx64 + +# @num: System call number. +# @ret: System call result value. +# +# Finish executing a guest system call in syscall emulation mode. +# +# Targets: TCG(all) +disable vcpu guest_user_syscall_ret(uint64_t num, uint64_t ret) "num=0x%016"PRIx64" ret=0x%016"PRIx64 diff --git a/trace/control.c b/trace/control.c index e1556a3570..86de8b9983 100644 --- a/trace/control.c +++ b/trace/control.c @@ -21,11 +21,33 @@ #endif #include "qapi/error.h" #include "qemu/error-report.h" +#include "qemu/config-file.h" #include "monitor/monitor.h" int trace_events_enabled_count; bool trace_events_dstate[TRACE_EVENT_COUNT]; +QemuOptsList qemu_trace_opts = { + .name = "trace", + .implied_opt_name = "enable", + .head = QTAILQ_HEAD_INITIALIZER(qemu_trace_opts.head), + .desc = { + { + .name = "enable", + .type = QEMU_OPT_STRING, + }, + { + .name = "events", + .type = QEMU_OPT_STRING, + },{ + .name = "file", + .type = QEMU_OPT_STRING, + }, + { /* end of list */ } + }, +}; + + TraceEvent *trace_event_name(const char *name) { assert(name != NULL); @@ -142,7 +164,7 @@ void trace_enable_events(const char *line_buf) } } -void trace_init_events(const char *fname) +static void trace_init_events(const char *fname) { Location loc; FILE *fp; @@ -217,3 +239,21 @@ bool trace_init_backends(void) return true; } + +char *trace_opt_parse(const char *optarg) +{ + char *trace_file; + QemuOpts *opts = qemu_opts_parse_noisily(qemu_find_opts("trace"), + optarg, true); + if (!opts) { + exit(1); + } + if (qemu_opt_get(opts, "enable")) { + trace_enable_events(qemu_opt_get(opts, "enable")); + } + trace_init_events(qemu_opt_get(opts, "events")); + trace_file = g_strdup(qemu_opt_get(opts, "file")); + qemu_opts_del(opts); + + return trace_file; +} diff --git a/trace/control.h b/trace/control.h index e2ba6d4de1..a2dd3eaedf 100644 --- a/trace/control.h +++ b/trace/control.h @@ -160,17 +160,6 @@ static void trace_event_set_state_dynamic(TraceEvent *ev, bool state); bool trace_init_backends(void); /** - * trace_init_events: - * @events: Name of file with events to be enabled at startup; may be NULL. - * Corresponds to commandline option "-trace events=...". - * - * Read the list of enabled tracing events. - * - * Returns: Whether the backends could be successfully initialized. - */ -void trace_init_events(const char *file); - -/** * trace_init_file: * @file: Name of trace output file; may be NULL. * Corresponds to commandline option "-trace file=...". @@ -197,6 +186,20 @@ void trace_list_events(void); */ void trace_enable_events(const char *line_buf); +/** + * Definition of QEMU options describing trace subsystem configuration + */ +extern QemuOptsList qemu_trace_opts; + +/** + * trace_opt_parse: + * @optarg: A string argument of --trace command line argument + * + * Initialize tracing subsystem. + * + * Returns the filename to save trace to. It must be freed with g_free(). + */ +char *trace_opt_parse(const char *optarg); #include "trace/control-internal.h" diff --git a/user-exec.c b/user-exec.c index 50e95a68de..95f9f97c5c 100644 --- a/user-exec.c +++ b/user-exec.c @@ -117,14 +117,7 @@ static inline int handle_cpu_signal(uintptr_t pc, unsigned long address, #if defined(__i386__) -#if defined(__APPLE__) -#include <sys/ucontext.h> - -#define EIP_sig(context) (*((unsigned long *)&(context)->uc_mcontext->ss.eip)) -#define TRAP_sig(context) ((context)->uc_mcontext->es.trapno) -#define ERROR_sig(context) ((context)->uc_mcontext->es.err) -#define MASK_sig(context) ((context)->uc_sigmask) -#elif defined(__NetBSD__) +#if defined(__NetBSD__) #include <ucontext.h> #define EIP_sig(context) ((context)->uc_mcontext.__gregs[_REG_EIP]) @@ -274,44 +267,6 @@ int cpu_signal_handler(int host_signum, void *pinfo, #define TRAP_sig(context) ((context)->uc_mcontext.mc_exc) #endif /* __FreeBSD__|| __FreeBSD_kernel__ */ -#ifdef __APPLE__ -#include <sys/ucontext.h> -typedef struct ucontext SIGCONTEXT; -/* All Registers access - only for local access */ -#define REG_sig(reg_name, context) \ - ((context)->uc_mcontext->ss.reg_name) -#define FLOATREG_sig(reg_name, context) \ - ((context)->uc_mcontext->fs.reg_name) -#define EXCEPREG_sig(reg_name, context) \ - ((context)->uc_mcontext->es.reg_name) -#define VECREG_sig(reg_name, context) \ - ((context)->uc_mcontext->vs.reg_name) -/* Gpr Registers access */ -#define GPR_sig(reg_num, context) REG_sig(r##reg_num, context) -/* Program counter */ -#define IAR_sig(context) REG_sig(srr0, context) -/* Machine State Register (Supervisor) */ -#define MSR_sig(context) REG_sig(srr1, context) -#define CTR_sig(context) REG_sig(ctr, context) -/* Link register */ -#define XER_sig(context) REG_sig(xer, context) -/* User's integer exception register */ -#define LR_sig(context) REG_sig(lr, context) -/* Condition register */ -#define CR_sig(context) REG_sig(cr, context) -/* Float Registers access */ -#define FLOAT_sig(reg_num, context) \ - FLOATREG_sig(fpregs[reg_num], context) -#define FPSCR_sig(context) \ - ((double)FLOATREG_sig(fpscr, context)) -/* Exception Registers access */ -/* Fault registers for coredump */ -#define DAR_sig(context) EXCEPREG_sig(dar, context) -#define DSISR_sig(context) EXCEPREG_sig(dsisr, context) -/* number of powerpc exception taken */ -#define TRAP_sig(context) EXCEPREG_sig(exception, context) -#endif /* __APPLE__ */ - int cpu_signal_handler(int host_signum, void *pinfo, void *puc) { @@ -494,24 +449,6 @@ int cpu_signal_handler(int host_signum, void *pinfo, void *puc) is_write, &uc->uc_sigmask); } -#elif defined(__mc68000) - -int cpu_signal_handler(int host_signum, void *pinfo, - void *puc) -{ - siginfo_t *info = pinfo; - struct ucontext *uc = puc; - unsigned long pc; - int is_write; - - pc = uc->uc_mcontext.gregs[16]; - /* XXX: compute is_write */ - is_write = 0; - return handle_cpu_signal(pc, (unsigned long)info->si_addr, - is_write, - &uc->uc_sigmask); -} - #elif defined(__ia64) #ifndef __ISR_VALID @@ -616,48 +553,6 @@ int cpu_signal_handler(int host_signum, void *pinfo, is_write, &uc->uc_sigmask); } -#elif defined(__hppa__) - -int cpu_signal_handler(int host_signum, void *pinfo, - void *puc) -{ - siginfo_t *info = pinfo; - struct ucontext *uc = puc; - unsigned long pc = uc->uc_mcontext.sc_iaoq[0]; - uint32_t insn = *(uint32_t *)pc; - int is_write = 0; - - /* XXX: need kernel patch to get write flag faster. */ - switch (insn >> 26) { - case 0x1a: /* STW */ - case 0x19: /* STH */ - case 0x18: /* STB */ - case 0x1b: /* STWM */ - is_write = 1; - break; - - case 0x09: /* CSTWX, FSTWX, FSTWS */ - case 0x0b: /* CSTDX, FSTDX, FSTDS */ - /* Distinguish from coprocessor load ... */ - is_write = (insn >> 9) & 1; - break; - - case 0x03: - switch ((insn >> 6) & 15) { - case 0xa: /* STWS */ - case 0x9: /* STHS */ - case 0x8: /* STBS */ - case 0xe: /* STWAS */ - case 0xc: /* STBYS */ - is_write = 1; - } - break; - } - - return handle_cpu_signal(pc, (unsigned long)info->si_addr, - is_write, &uc->uc_sigmask); -} - #else #error host CPU specific signal handler needed diff --git a/util/qemu-sockets.c b/util/qemu-sockets.c index 5d03695d10..cc2b043907 100644 --- a/util/qemu-sockets.c +++ b/util/qemu-sockets.c @@ -1169,3 +1169,39 @@ void qapi_copy_SocketAddress(SocketAddress **p_dest, qmp_input_visitor_cleanup(qiv); qobject_decref(obj); } + +char *socket_address_to_string(struct SocketAddress *addr, Error **errp) +{ + char *buf; + InetSocketAddress *inet; + char host_port[INET6_ADDRSTRLEN + 5 + 4]; + + switch (addr->type) { + case SOCKET_ADDRESS_KIND_INET: + inet = addr->u.inet.data; + if (strchr(inet->host, ':') == NULL) { + snprintf(host_port, sizeof(host_port), "%s:%s", inet->host, + inet->port); + buf = g_strdup(host_port); + } else { + snprintf(host_port, sizeof(host_port), "[%s]:%s", inet->host, + inet->port); + buf = g_strdup(host_port); + } + break; + + case SOCKET_ADDRESS_KIND_UNIX: + buf = g_strdup(addr->u.q_unix.data->path); + break; + + case SOCKET_ADDRESS_KIND_FD: + buf = g_strdup(addr->u.fd.data->str); + break; + + default: + error_setg(errp, "socket family %d unsupported", + addr->type); + return NULL; + } + return buf; +} @@ -262,26 +262,6 @@ static QemuOptsList qemu_sandbox_opts = { }, }; -static QemuOptsList qemu_trace_opts = { - .name = "trace", - .implied_opt_name = "enable", - .head = QTAILQ_HEAD_INITIALIZER(qemu_trace_opts.head), - .desc = { - { - .name = "enable", - .type = QEMU_OPT_STRING, - }, - { - .name = "events", - .type = QEMU_OPT_STRING, - },{ - .name = "file", - .type = QEMU_OPT_STRING, - }, - { /* end of list */ } - }, -}; - static QemuOptsList qemu_option_rom_opts = { .name = "option-rom", .implied_opt_name = "romfile", @@ -1526,6 +1506,7 @@ MachineInfoList *qmp_query_machines(Error **errp) info->name = g_strdup(mc->name); info->cpu_max = !mc->max_cpus ? 1 : mc->max_cpus; + info->hotpluggable_cpus = !!mc->query_hotpluggable_cpus; entry = g_malloc0(sizeof(*entry)); entry->value = info; @@ -3864,23 +3845,9 @@ int main(int argc, char **argv, char **envp) xen_mode = XEN_ATTACH; break; case QEMU_OPTION_trace: - { - opts = qemu_opts_parse_noisily(qemu_find_opts("trace"), - optarg, true); - if (!opts) { - exit(1); - } - if (qemu_opt_get(opts, "enable")) { - trace_enable_events(qemu_opt_get(opts, "enable")); - } - trace_init_events(qemu_opt_get(opts, "events")); - if (trace_file) { - g_free(trace_file); - } - trace_file = g_strdup(qemu_opt_get(opts, "file")); - qemu_opts_del(opts); + g_free(trace_file); + trace_file = trace_opt_parse(optarg); break; - } case QEMU_OPTION_readconfig: { int ret = qemu_read_config_file(optarg); |