diff options
70 files changed, 5839 insertions, 478 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index a74c04c2a9..142f68a689 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -161,6 +161,12 @@ S: Maintained F: target-xtensa/ F: hw/xtensa/ +TriCore +M: Bastian Koppelmann <kbastian@mail.uni-paderborn.de> +S: Maintained +F: target-tricore/ +F: hw/tricore/ + Guest CPU Cores (KVM): ---------------------- diff --git a/aio-posix.c b/aio-posix.c index 2eada2e049..d3ac06e238 100644 --- a/aio-posix.c +++ b/aio-posix.c @@ -100,6 +100,11 @@ void aio_set_event_notifier(AioContext *ctx, (IOHandler *)io_read, NULL, notifier); } +bool aio_prepare(AioContext *ctx) +{ + return false; +} + bool aio_pending(AioContext *ctx) { AioHandler *node; @@ -119,12 +124,21 @@ bool aio_pending(AioContext *ctx) return false; } -static bool aio_dispatch(AioContext *ctx) +bool aio_dispatch(AioContext *ctx) { AioHandler *node; bool progress = false; /* + * If there are callbacks left that have been queued, we need to call them. + * Do not call select in this case, because it is possible that the caller + * does not need a complete flush (as is the case for aio_poll loops). + */ + if (aio_bh_poll(ctx)) { + progress = true; + } + + /* * We have to walk very carefully in case aio_set_fd_handler is * called while we're walking. */ @@ -184,22 +198,9 @@ bool aio_poll(AioContext *ctx, bool blocking) /* aio_notify can avoid the expensive event_notifier_set if * everything (file descriptors, bottom halves, timers) will - * be re-evaluated before the next blocking poll(). This happens - * in two cases: - * - * 1) when aio_poll is called with blocking == false - * - * 2) when we are called after poll(). If we are called before - * poll(), bottom halves will not be re-evaluated and we need - * aio_notify() if blocking == true. - * - * The first aio_dispatch() only does something when AioContext is - * running as a GSource, and in that case aio_poll is used only - * with blocking == false, so this optimization is already quite - * effective. However, the code is ugly and should be restructured - * to have a single aio_dispatch() call. To do this, we need to - * reorganize aio_poll into a prepare/poll/dispatch model like - * glib's. + * be re-evaluated before the next blocking poll(). This is + * already true when aio_poll is called with blocking == false; + * if blocking == true, it is only true after poll() returns. * * If we're in a nested event loop, ctx->dispatching might be true. * In that case we can restore it just before returning, but we @@ -207,26 +208,6 @@ bool aio_poll(AioContext *ctx, bool blocking) */ aio_set_dispatching(ctx, !blocking); - /* - * If there are callbacks left that have been queued, we need to call them. - * Do not call select in this case, because it is possible that the caller - * does not need a complete flush (as is the case for aio_poll loops). - */ - if (aio_bh_poll(ctx)) { - blocking = false; - progress = true; - } - - /* Re-evaluate condition (1) above. */ - aio_set_dispatching(ctx, !blocking); - if (aio_dispatch(ctx)) { - progress = true; - } - - if (progress && !blocking) { - goto out; - } - ctx->walking_handlers++; g_array_set_size(ctx->pollfds, 0); @@ -249,7 +230,7 @@ bool aio_poll(AioContext *ctx, bool blocking) /* wait until next event */ ret = qemu_poll_ns((GPollFD *)ctx->pollfds->data, ctx->pollfds->len, - blocking ? timerlistgroup_deadline_ns(&ctx->tlg) : 0); + blocking ? aio_compute_timeout(ctx) : 0); /* if we have any readable fds, dispatch event */ if (ret > 0) { @@ -268,7 +249,6 @@ bool aio_poll(AioContext *ctx, bool blocking) progress = true; } -out: aio_set_dispatching(ctx, was_dispatching); return progress; } diff --git a/aio-win32.c b/aio-win32.c index c12f61e97d..61e3d2ddfe 100644 --- a/aio-win32.c +++ b/aio-win32.c @@ -22,12 +22,80 @@ struct AioHandler { EventNotifier *e; + IOHandler *io_read; + IOHandler *io_write; EventNotifierHandler *io_notify; GPollFD pfd; int deleted; + void *opaque; QLIST_ENTRY(AioHandler) node; }; +void aio_set_fd_handler(AioContext *ctx, + int fd, + IOHandler *io_read, + IOHandler *io_write, + void *opaque) +{ + /* fd is a SOCKET in our case */ + AioHandler *node; + + QLIST_FOREACH(node, &ctx->aio_handlers, node) { + if (node->pfd.fd == fd && !node->deleted) { + break; + } + } + + /* Are we deleting the fd handler? */ + if (!io_read && !io_write) { + if (node) { + /* If the lock is held, just mark the node as deleted */ + if (ctx->walking_handlers) { + node->deleted = 1; + node->pfd.revents = 0; + } else { + /* Otherwise, delete it for real. We can't just mark it as + * deleted because deleted nodes are only cleaned up after + * releasing the walking_handlers lock. + */ + QLIST_REMOVE(node, node); + g_free(node); + } + } + } else { + HANDLE event; + + if (node == NULL) { + /* Alloc and insert if it's not already there */ + node = g_malloc0(sizeof(AioHandler)); + node->pfd.fd = fd; + QLIST_INSERT_HEAD(&ctx->aio_handlers, node, node); + } + + node->pfd.events = 0; + if (node->io_read) { + node->pfd.events |= G_IO_IN; + } + if (node->io_write) { + node->pfd.events |= G_IO_OUT; + } + + node->e = &ctx->notifier; + + /* Update handler with latest information */ + node->opaque = opaque; + node->io_read = io_read; + node->io_write = io_write; + + event = event_notifier_get_handle(&ctx->notifier); + WSAEventSelect(node->pfd.fd, event, + FD_READ | FD_ACCEPT | FD_CLOSE | + FD_CONNECT | FD_WRITE | FD_OOB); + } + + aio_notify(ctx); +} + void aio_set_event_notifier(AioContext *ctx, EventNotifier *e, EventNotifierHandler *io_notify) @@ -76,55 +144,82 @@ void aio_set_event_notifier(AioContext *ctx, aio_notify(ctx); } -bool aio_pending(AioContext *ctx) +bool aio_prepare(AioContext *ctx) { + static struct timeval tv0; AioHandler *node; + bool have_select_revents = false; + fd_set rfds, wfds; + /* fill fd sets */ + FD_ZERO(&rfds); + FD_ZERO(&wfds); QLIST_FOREACH(node, &ctx->aio_handlers, node) { - if (node->pfd.revents && node->io_notify) { - return true; + if (node->io_read) { + FD_SET ((SOCKET)node->pfd.fd, &rfds); + } + if (node->io_write) { + FD_SET ((SOCKET)node->pfd.fd, &wfds); } } - return false; + if (select(0, &rfds, &wfds, NULL, &tv0) > 0) { + QLIST_FOREACH(node, &ctx->aio_handlers, node) { + node->pfd.revents = 0; + if (FD_ISSET(node->pfd.fd, &rfds)) { + node->pfd.revents |= G_IO_IN; + have_select_revents = true; + } + + if (FD_ISSET(node->pfd.fd, &wfds)) { + node->pfd.revents |= G_IO_OUT; + have_select_revents = true; + } + } + } + + return have_select_revents; } -bool aio_poll(AioContext *ctx, bool blocking) +bool aio_pending(AioContext *ctx) { AioHandler *node; - HANDLE events[MAXIMUM_WAIT_OBJECTS + 1]; - bool progress; - int count; - int timeout; - progress = false; + QLIST_FOREACH(node, &ctx->aio_handlers, node) { + if (node->pfd.revents && node->io_notify) { + return true; + } - /* - * If there are callbacks left that have been queued, we need to call then. - * Do not call select in this case, because it is possible that the caller - * does not need a complete flush (as is the case for aio_poll loops). - */ - if (aio_bh_poll(ctx)) { - blocking = false; - progress = true; + if ((node->pfd.revents & G_IO_IN) && node->io_read) { + return true; + } + if ((node->pfd.revents & G_IO_OUT) && node->io_write) { + return true; + } } - /* Run timers */ - progress |= timerlistgroup_run_timers(&ctx->tlg); + return false; +} + +static bool aio_dispatch_handlers(AioContext *ctx, HANDLE event) +{ + AioHandler *node; + bool progress = false; /* - * Then dispatch any pending callbacks from the GSource. - * * We have to walk very carefully in case aio_set_fd_handler is * called while we're walking. */ node = QLIST_FIRST(&ctx->aio_handlers); while (node) { AioHandler *tmp; + int revents = node->pfd.revents; ctx->walking_handlers++; - if (node->pfd.revents && node->io_notify) { + if (!node->deleted && + (revents || event_notifier_get_handle(node->e) == event) && + node->io_notify) { node->pfd.revents = 0; node->io_notify(node->e); @@ -134,6 +229,28 @@ bool aio_poll(AioContext *ctx, bool blocking) } } + if (!node->deleted && + (node->io_read || node->io_write)) { + node->pfd.revents = 0; + if ((revents & G_IO_IN) && node->io_read) { + node->io_read(node->opaque); + progress = true; + } + if ((revents & G_IO_OUT) && node->io_write) { + node->io_write(node->opaque); + progress = true; + } + + /* if the next select() will return an event, we have progressed */ + if (event == event_notifier_get_handle(&ctx->notifier)) { + WSANETWORKEVENTS ev; + WSAEnumNetworkEvents(node->pfd.fd, event, &ev); + if (ev.lNetworkEvents) { + progress = true; + } + } + } + tmp = node; node = QLIST_NEXT(node, node); @@ -145,10 +262,47 @@ bool aio_poll(AioContext *ctx, bool blocking) } } - if (progress && !blocking) { - return true; + return progress; +} + +bool aio_dispatch(AioContext *ctx) +{ + bool progress; + + progress = aio_bh_poll(ctx); + progress |= aio_dispatch_handlers(ctx, INVALID_HANDLE_VALUE); + progress |= timerlistgroup_run_timers(&ctx->tlg); + return progress; +} + +bool aio_poll(AioContext *ctx, bool blocking) +{ + AioHandler *node; + HANDLE events[MAXIMUM_WAIT_OBJECTS + 1]; + bool was_dispatching, progress, have_select_revents, first; + int count; + int timeout; + + if (aio_prepare(ctx)) { + blocking = false; + have_select_revents = true; } + was_dispatching = ctx->dispatching; + progress = false; + + /* aio_notify can avoid the expensive event_notifier_set if + * everything (file descriptors, bottom halves, timers) will + * be re-evaluated before the next blocking poll(). This is + * already true when aio_poll is called with blocking == false; + * if blocking == true, it is only true after poll() returns. + * + * If we're in a nested event loop, ctx->dispatching might be true. + * In that case we can restore it just before returning, but we + * have to clear it now. + */ + aio_set_dispatching(ctx, !blocking); + ctx->walking_handlers++; /* fill fd sets */ @@ -160,64 +314,42 @@ bool aio_poll(AioContext *ctx, bool blocking) } ctx->walking_handlers--; + first = true; /* wait until next event */ while (count > 0) { + HANDLE event; int ret; - timeout = blocking ? - qemu_timeout_ns_to_ms(timerlistgroup_deadline_ns(&ctx->tlg)) : 0; + timeout = blocking + ? qemu_timeout_ns_to_ms(aio_compute_timeout(ctx)) : 0; ret = WaitForMultipleObjects(count, events, FALSE, timeout); + aio_set_dispatching(ctx, true); + + if (first && aio_bh_poll(ctx)) { + progress = true; + } + first = false; /* if we have any signaled events, dispatch event */ - if ((DWORD) (ret - WAIT_OBJECT_0) >= count) { + event = NULL; + if ((DWORD) (ret - WAIT_OBJECT_0) < count) { + event = events[ret - WAIT_OBJECT_0]; + } else if (!have_select_revents) { break; } + have_select_revents = false; blocking = false; - /* we have to walk very carefully in case - * aio_set_fd_handler is called while we're walking */ - node = QLIST_FIRST(&ctx->aio_handlers); - while (node) { - AioHandler *tmp; - - ctx->walking_handlers++; - - if (!node->deleted && - event_notifier_get_handle(node->e) == events[ret - WAIT_OBJECT_0] && - node->io_notify) { - node->io_notify(node->e); - - /* aio_notify() does not count as progress */ - if (node->e != &ctx->notifier) { - progress = true; - } - } - - tmp = node; - node = QLIST_NEXT(node, node); - - ctx->walking_handlers--; - - if (!ctx->walking_handlers && tmp->deleted) { - QLIST_REMOVE(tmp, node); - g_free(tmp); - } - } + progress |= aio_dispatch_handlers(ctx, event); /* Try again, but only call each handler once. */ events[ret - WAIT_OBJECT_0] = events[--count]; } - if (blocking) { - /* Run the timers a second time. We do this because otherwise aio_wait - * will not note progress - and will stop a drain early - if we have - * a timer that was not ready to run entering g_poll but is ready - * after g_poll. This will only do anything if a timer has expired. - */ - progress |= timerlistgroup_run_timers(&ctx->tlg); - } + progress |= timerlistgroup_run_timers(&ctx->tlg); + aio_set_dispatching(ctx, was_dispatching); return progress; } diff --git a/arch_init.c b/arch_init.c index 28ece769d8..c974f3fea9 100644 --- a/arch_init.c +++ b/arch_init.c @@ -104,6 +104,8 @@ int graphic_depth = 32; #define QEMU_ARCH QEMU_ARCH_XTENSA #elif defined(TARGET_UNICORE32) #define QEMU_ARCH QEMU_ARCH_UNICORE32 +#elif defined(TARGET_TRICORE) +#define QEMU_ARCH QEMU_ARCH_TRICORE #endif const uint32_t arch_type = QEMU_ARCH; @@ -152,39 +152,48 @@ void qemu_bh_delete(QEMUBH *bh) bh->deleted = 1; } -static gboolean -aio_ctx_prepare(GSource *source, gint *timeout) +int64_t +aio_compute_timeout(AioContext *ctx) { - AioContext *ctx = (AioContext *) source; + int64_t deadline; + int timeout = -1; QEMUBH *bh; - int deadline; - /* We assume there is no timeout already supplied */ - *timeout = -1; for (bh = ctx->first_bh; bh; bh = bh->next) { if (!bh->deleted && bh->scheduled) { if (bh->idle) { /* idle bottom halves will be polled at least * every 10ms */ - *timeout = 10; + timeout = 10000000; } else { /* non-idle bottom halves will be executed * immediately */ - *timeout = 0; - return true; + return 0; } } } - deadline = qemu_timeout_ns_to_ms(timerlistgroup_deadline_ns(&ctx->tlg)); + deadline = timerlistgroup_deadline_ns(&ctx->tlg); if (deadline == 0) { - *timeout = 0; - return true; + return 0; } else { - *timeout = qemu_soonest_timeout(*timeout, deadline); + return qemu_soonest_timeout(timeout, deadline); + } +} + +static gboolean +aio_ctx_prepare(GSource *source, gint *timeout) +{ + AioContext *ctx = (AioContext *) source; + + /* We assume there is no timeout already supplied */ + *timeout = qemu_timeout_ns_to_ms(aio_compute_timeout(ctx)); + + if (aio_prepare(ctx)) { + *timeout = 0; } - return false; + return *timeout == 0; } static gboolean @@ -209,7 +218,7 @@ aio_ctx_dispatch(GSource *source, AioContext *ctx = (AioContext *) source; assert(callback == NULL); - aio_poll(ctx, false); + aio_dispatch(ctx); return true; } @@ -1819,6 +1819,8 @@ void bdrv_reopen_abort(BDRVReopenState *reopen_state) void bdrv_close(BlockDriverState *bs) { + BdrvAioNotifier *ban, *ban_next; + if (bs->job) { block_job_cancel_sync(bs->job); } @@ -1863,6 +1865,11 @@ void bdrv_close(BlockDriverState *bs) if (bs->io_limits_enabled) { bdrv_io_limits_disable(bs); } + + QLIST_FOREACH_SAFE(ban, &bs->aio_notifiers, list, ban_next) { + g_free(ban); + } + QLIST_INIT(&bs->aio_notifiers); } void bdrv_close_all(void) @@ -4546,6 +4553,12 @@ static int multiwrite_merge(BlockDriverState *bs, BlockRequest *reqs, // Add the second request qemu_iovec_concat(qiov, reqs[i].qiov, 0, reqs[i].qiov->size); + // Add tail of first request, if necessary + if (qiov->size < reqs[outidx].qiov->size) { + qemu_iovec_concat(qiov, reqs[outidx].qiov, qiov->size, + reqs[outidx].qiov->size - qiov->size); + } + reqs[outidx].nb_sectors = qiov->size >> 9; reqs[outidx].qiov = qiov; @@ -5729,10 +5742,16 @@ AioContext *bdrv_get_aio_context(BlockDriverState *bs) void bdrv_detach_aio_context(BlockDriverState *bs) { + BdrvAioNotifier *baf; + if (!bs->drv) { return; } + QLIST_FOREACH(baf, &bs->aio_notifiers, list) { + baf->detach_aio_context(baf->opaque); + } + if (bs->io_limits_enabled) { throttle_detach_aio_context(&bs->throttle_state); } @@ -5752,6 +5771,8 @@ void bdrv_detach_aio_context(BlockDriverState *bs) void bdrv_attach_aio_context(BlockDriverState *bs, AioContext *new_context) { + BdrvAioNotifier *ban; + if (!bs->drv) { return; } @@ -5770,6 +5791,10 @@ void bdrv_attach_aio_context(BlockDriverState *bs, if (bs->io_limits_enabled) { throttle_attach_aio_context(&bs->throttle_state, new_context); } + + QLIST_FOREACH(ban, &bs->aio_notifiers, list) { + ban->attached_aio_context(new_context, ban->opaque); + } } void bdrv_set_aio_context(BlockDriverState *bs, AioContext *new_context) @@ -5786,6 +5811,43 @@ void bdrv_set_aio_context(BlockDriverState *bs, AioContext *new_context) aio_context_release(new_context); } +void bdrv_add_aio_context_notifier(BlockDriverState *bs, + void (*attached_aio_context)(AioContext *new_context, void *opaque), + void (*detach_aio_context)(void *opaque), void *opaque) +{ + BdrvAioNotifier *ban = g_new(BdrvAioNotifier, 1); + *ban = (BdrvAioNotifier){ + .attached_aio_context = attached_aio_context, + .detach_aio_context = detach_aio_context, + .opaque = opaque + }; + + QLIST_INSERT_HEAD(&bs->aio_notifiers, ban, list); +} + +void bdrv_remove_aio_context_notifier(BlockDriverState *bs, + void (*attached_aio_context)(AioContext *, + void *), + void (*detach_aio_context)(void *), + void *opaque) +{ + BdrvAioNotifier *ban, *ban_next; + + QLIST_FOREACH_SAFE(ban, &bs->aio_notifiers, list, ban_next) { + if (ban->attached_aio_context == attached_aio_context && + ban->detach_aio_context == detach_aio_context && + ban->opaque == opaque) + { + QLIST_REMOVE(ban, list); + g_free(ban); + + return; + } + } + + abort(); +} + void bdrv_add_before_write_notifier(BlockDriverState *bs, NotifierWithReturn *notifier) { diff --git a/block/Makefile.objs b/block/Makefile.objs index 858d2b387b..f45f9399aa 100644 --- a/block/Makefile.objs +++ b/block/Makefile.objs @@ -10,7 +10,6 @@ block-obj-$(CONFIG_WIN32) += raw-win32.o win32-aio.o block-obj-$(CONFIG_POSIX) += raw-posix.o block-obj-$(CONFIG_LINUX_AIO) += linux-aio.o -ifeq ($(CONFIG_POSIX),y) block-obj-y += nbd.o nbd-client.o sheepdog.o block-obj-$(CONFIG_LIBISCSI) += iscsi.o block-obj-$(CONFIG_LIBNFS) += nfs.o @@ -19,7 +18,6 @@ block-obj-$(CONFIG_RBD) += rbd.o block-obj-$(CONFIG_GLUSTERFS) += gluster.o block-obj-$(CONFIG_ARCHIPELAGO) += archipelago.o block-obj-$(CONFIG_LIBSSH2) += ssh.o -endif common-obj-y += stream.o common-obj-y += commit.o diff --git a/block/blkverify.c b/block/blkverify.c index 7c78ca41a5..163064cf6b 100644 --- a/block/blkverify.c +++ b/block/blkverify.c @@ -158,6 +158,7 @@ static int blkverify_open(BlockDriverState *bs, QDict *options, int flags, ret = 0; fail: + qemu_opts_del(opts); return ret; } diff --git a/block/curl.c b/block/curl.c index d4b85d20a5..025833994c 100644 --- a/block/curl.c +++ b/block/curl.c @@ -63,6 +63,7 @@ static CURLMcode __curl_multi_socket_action(CURLM *multi_handle, #define CURL_NUM_ACB 8 #define SECTOR_SIZE 512 #define READ_AHEAD_DEFAULT (256 * 1024) +#define CURL_TIMEOUT_DEFAULT 5 #define FIND_RET_NONE 0 #define FIND_RET_OK 1 @@ -71,6 +72,8 @@ static CURLMcode __curl_multi_socket_action(CURLM *multi_handle, #define CURL_BLOCK_OPT_URL "url" #define CURL_BLOCK_OPT_READAHEAD "readahead" #define CURL_BLOCK_OPT_SSLVERIFY "sslverify" +#define CURL_BLOCK_OPT_TIMEOUT "timeout" +#define CURL_BLOCK_OPT_COOKIE "cookie" struct BDRVCURLState; @@ -109,6 +112,8 @@ typedef struct BDRVCURLState { char *url; size_t readahead_size; bool sslverify; + int timeout; + char *cookie; bool accept_range; AioContext *aio_context; } BDRVCURLState; @@ -352,7 +357,7 @@ static void curl_multi_timeout_do(void *arg) #endif } -static CURLState *curl_init_state(BDRVCURLState *s) +static CURLState *curl_init_state(BlockDriverState *bs, BDRVCURLState *s) { CURLState *state = NULL; int i, j; @@ -370,7 +375,7 @@ static CURLState *curl_init_state(BDRVCURLState *s) break; } if (!state) { - aio_poll(state->s->aio_context, true); + aio_poll(bdrv_get_aio_context(bs), true); } } while(!state); @@ -382,7 +387,10 @@ static CURLState *curl_init_state(BDRVCURLState *s) curl_easy_setopt(state->curl, CURLOPT_URL, s->url); curl_easy_setopt(state->curl, CURLOPT_SSL_VERIFYPEER, (long) s->sslverify); - curl_easy_setopt(state->curl, CURLOPT_TIMEOUT, 5); + if (s->cookie) { + curl_easy_setopt(state->curl, CURLOPT_COOKIE, s->cookie); + } + curl_easy_setopt(state->curl, CURLOPT_TIMEOUT, s->timeout); curl_easy_setopt(state->curl, CURLOPT_WRITEFUNCTION, (void *)curl_read_cb); curl_easy_setopt(state->curl, CURLOPT_WRITEDATA, (void *)state); @@ -489,6 +497,16 @@ static QemuOptsList runtime_opts = { .type = QEMU_OPT_BOOL, .help = "Verify SSL certificate" }, + { + .name = CURL_BLOCK_OPT_TIMEOUT, + .type = QEMU_OPT_NUMBER, + .help = "Curl timeout" + }, + { + .name = CURL_BLOCK_OPT_COOKIE, + .type = QEMU_OPT_STRING, + .help = "Pass the cookie or list of cookies with each request" + }, { /* end of list */ } }, }; @@ -501,6 +519,7 @@ static int curl_open(BlockDriverState *bs, QDict *options, int flags, QemuOpts *opts; Error *local_err = NULL; const char *file; + const char *cookie; double d; static int inited = 0; @@ -525,8 +544,14 @@ static int curl_open(BlockDriverState *bs, QDict *options, int flags, goto out_noclean; } + s->timeout = qemu_opt_get_number(opts, CURL_BLOCK_OPT_TIMEOUT, + CURL_TIMEOUT_DEFAULT); + s->sslverify = qemu_opt_get_bool(opts, CURL_BLOCK_OPT_SSLVERIFY, true); + cookie = qemu_opt_get(opts, CURL_BLOCK_OPT_COOKIE); + s->cookie = g_strdup(cookie); + file = qemu_opt_get(opts, CURL_BLOCK_OPT_URL); if (file == NULL) { error_setg(errp, "curl block driver requires an 'url' option"); @@ -541,7 +566,7 @@ static int curl_open(BlockDriverState *bs, QDict *options, int flags, DPRINTF("CURL: Opening %s\n", file); s->aio_context = bdrv_get_aio_context(bs); s->url = g_strdup(file); - state = curl_init_state(s); + state = curl_init_state(bs, s); if (!state) goto out_noclean; @@ -582,6 +607,7 @@ out: curl_easy_cleanup(state->curl); state->curl = NULL; out_noclean: + g_free(s->cookie); g_free(s->url); qemu_opts_del(opts); return -EINVAL; @@ -625,7 +651,7 @@ static void curl_readv_bh_cb(void *p) } // No cache found, so let's start a new request - state = curl_init_state(s); + state = curl_init_state(acb->common.bs, s); if (!state) { acb->common.cb(acb->common.opaque, -EIO); qemu_aio_release(acb); @@ -684,6 +710,7 @@ static void curl_close(BlockDriverState *bs) DPRINTF("CURL: Close\n"); curl_detach_aio_context(bs); + g_free(s->cookie); g_free(s->url); } diff --git a/block/linux-aio.c b/block/linux-aio.c index 7ac7e8c99c..9aca758b10 100644 --- a/block/linux-aio.c +++ b/block/linux-aio.c @@ -51,6 +51,12 @@ struct qemu_laio_state { /* io queue for submit at batch */ LaioQueue io_q; + + /* I/O completion processing */ + QEMUBH *completion_bh; + struct io_event events[MAX_EVENTS]; + int event_idx; + int event_max; }; static inline ssize_t io_event_ret(struct io_event *ev) @@ -86,27 +92,58 @@ static void qemu_laio_process_completion(struct qemu_laio_state *s, qemu_aio_release(laiocb); } -static void qemu_laio_completion_cb(EventNotifier *e) +/* The completion BH fetches completed I/O requests and invokes their + * callbacks. + * + * The function is somewhat tricky because it supports nested event loops, for + * example when a request callback invokes aio_poll(). In order to do this, + * the completion events array and index are kept in qemu_laio_state. The BH + * reschedules itself as long as there are completions pending so it will + * either be called again in a nested event loop or will be called after all + * events have been completed. When there are no events left to complete, the + * BH returns without rescheduling. + */ +static void qemu_laio_completion_bh(void *opaque) { - struct qemu_laio_state *s = container_of(e, struct qemu_laio_state, e); - - while (event_notifier_test_and_clear(&s->e)) { - struct io_event events[MAX_EVENTS]; - struct timespec ts = { 0 }; - int nevents, i; + struct qemu_laio_state *s = opaque; + /* Fetch more completion events when empty */ + if (s->event_idx == s->event_max) { do { - nevents = io_getevents(s->ctx, MAX_EVENTS, MAX_EVENTS, events, &ts); - } while (nevents == -EINTR); + struct timespec ts = { 0 }; + s->event_max = io_getevents(s->ctx, MAX_EVENTS, MAX_EVENTS, + s->events, &ts); + } while (s->event_max == -EINTR); + + s->event_idx = 0; + if (s->event_max <= 0) { + s->event_max = 0; + return; /* no more events */ + } + } - for (i = 0; i < nevents; i++) { - struct iocb *iocb = events[i].obj; - struct qemu_laiocb *laiocb = - container_of(iocb, struct qemu_laiocb, iocb); + /* Reschedule so nested event loops see currently pending completions */ + qemu_bh_schedule(s->completion_bh); - laiocb->ret = io_event_ret(&events[i]); - qemu_laio_process_completion(s, laiocb); - } + /* Process completion events */ + while (s->event_idx < s->event_max) { + struct iocb *iocb = s->events[s->event_idx].obj; + struct qemu_laiocb *laiocb = + container_of(iocb, struct qemu_laiocb, iocb); + + laiocb->ret = io_event_ret(&s->events[s->event_idx]); + s->event_idx++; + + qemu_laio_process_completion(s, laiocb); + } +} + +static void qemu_laio_completion_cb(EventNotifier *e) +{ + struct qemu_laio_state *s = container_of(e, struct qemu_laio_state, e); + + if (event_notifier_test_and_clear(&s->e)) { + qemu_bh_schedule(s->completion_bh); } } @@ -272,12 +309,14 @@ void laio_detach_aio_context(void *s_, AioContext *old_context) struct qemu_laio_state *s = s_; aio_set_event_notifier(old_context, &s->e, NULL); + qemu_bh_delete(s->completion_bh); } void laio_attach_aio_context(void *s_, AioContext *new_context) { struct qemu_laio_state *s = s_; + s->completion_bh = aio_bh_new(new_context, qemu_laio_completion_bh, s); aio_set_event_notifier(new_context, &s->e, qemu_laio_completion_cb); } diff --git a/block/nfs.c b/block/nfs.c index 93d87f3256..194f301501 100644 --- a/block/nfs.c +++ b/block/nfs.c @@ -393,16 +393,20 @@ static int nfs_file_open(BlockDriverState *bs, QDict *options, int flags, qemu_opts_absorb_qdict(opts, options, &local_err); if (local_err) { error_propagate(errp, local_err); - return -EINVAL; + ret = -EINVAL; + goto out; } ret = nfs_client_open(client, qemu_opt_get(opts, "filename"), (flags & BDRV_O_RDWR) ? O_RDWR : O_RDONLY, errp); if (ret < 0) { - return ret; + goto out; } bs->total_sectors = ret; - return 0; + ret = 0; +out: + qemu_opts_del(opts); + return ret; } static int nfs_file_create(const char *url, QemuOpts *opts, Error **errp) diff --git a/block/quorum.c b/block/quorum.c index 0de07bb036..093382e8f5 100644 --- a/block/quorum.c +++ b/block/quorum.c @@ -29,6 +29,7 @@ #define QUORUM_OPT_VOTE_THRESHOLD "vote-threshold" #define QUORUM_OPT_BLKVERIFY "blkverify" #define QUORUM_OPT_REWRITE "rewrite-corrupted" +#define QUORUM_OPT_READ_PATTERN "read-pattern" /* This union holds a vote hash value */ typedef union QuorumVoteValue { @@ -79,6 +80,8 @@ typedef struct BDRVQuorumState { bool rewrite_corrupted;/* true if the driver must rewrite-on-read corrupted * block if Quorum is reached. */ + + QuorumReadPattern read_pattern; } BDRVQuorumState; typedef struct QuorumAIOCB QuorumAIOCB; @@ -122,6 +125,7 @@ struct QuorumAIOCB { bool is_read; int vote_ret; + int child_iter; /* which child to read in fifo pattern */ }; static bool quorum_vote(QuorumAIOCB *acb); @@ -148,7 +152,6 @@ static AIOCBInfo quorum_aiocb_info = { static void quorum_aio_finalize(QuorumAIOCB *acb) { - BDRVQuorumState *s = acb->common.bs->opaque; int i, ret = 0; if (acb->vote_ret) { @@ -158,7 +161,8 @@ static void quorum_aio_finalize(QuorumAIOCB *acb) acb->common.cb(acb->common.opaque, ret); if (acb->is_read) { - for (i = 0; i < s->num_children; i++) { + /* on the quorum case acb->child_iter == s->num_children - 1 */ + for (i = 0; i <= acb->child_iter; i++) { qemu_vfree(acb->qcrs[i].buf); qemu_iovec_destroy(&acb->qcrs[i].qiov); } @@ -261,6 +265,21 @@ static void quorum_rewrite_aio_cb(void *opaque, int ret) quorum_aio_finalize(acb); } +static BlockDriverAIOCB *read_fifo_child(QuorumAIOCB *acb); + +static void quorum_copy_qiov(QEMUIOVector *dest, QEMUIOVector *source) +{ + int i; + assert(dest->niov == source->niov); + assert(dest->size == source->size); + for (i = 0; i < source->niov; i++) { + assert(dest->iov[i].iov_len == source->iov[i].iov_len); + memcpy(dest->iov[i].iov_base, + source->iov[i].iov_base, + source->iov[i].iov_len); + } +} + static void quorum_aio_cb(void *opaque, int ret) { QuorumChildRequest *sacb = opaque; @@ -268,6 +287,21 @@ static void quorum_aio_cb(void *opaque, int ret) BDRVQuorumState *s = acb->common.bs->opaque; bool rewrite = false; + if (acb->is_read && s->read_pattern == QUORUM_READ_PATTERN_FIFO) { + /* We try to read next child in FIFO order if we fail to read */ + if (ret < 0 && ++acb->child_iter < s->num_children) { + read_fifo_child(acb); + return; + } + + if (ret == 0) { + quorum_copy_qiov(acb->qiov, &acb->qcrs[acb->child_iter].qiov); + } + acb->vote_ret = ret; + quorum_aio_finalize(acb); + return; + } + sacb->ret = ret; acb->count++; if (ret == 0) { @@ -348,19 +382,6 @@ static bool quorum_rewrite_bad_versions(BDRVQuorumState *s, QuorumAIOCB *acb, return count; } -static void quorum_copy_qiov(QEMUIOVector *dest, QEMUIOVector *source) -{ - int i; - assert(dest->niov == source->niov); - assert(dest->size == source->size); - for (i = 0; i < source->niov; i++) { - assert(dest->iov[i].iov_len == source->iov[i].iov_len); - memcpy(dest->iov[i].iov_base, - source->iov[i].iov_base, - source->iov[i].iov_len); - } -} - static void quorum_count_vote(QuorumVotes *votes, QuorumVoteValue *value, int index) @@ -620,34 +641,62 @@ free_exit: return rewrite; } -static BlockDriverAIOCB *quorum_aio_readv(BlockDriverState *bs, - int64_t sector_num, - QEMUIOVector *qiov, - int nb_sectors, - BlockDriverCompletionFunc *cb, - void *opaque) +static BlockDriverAIOCB *read_quorum_children(QuorumAIOCB *acb) { - BDRVQuorumState *s = bs->opaque; - QuorumAIOCB *acb = quorum_aio_get(s, bs, qiov, sector_num, - nb_sectors, cb, opaque); + BDRVQuorumState *s = acb->common.bs->opaque; int i; - acb->is_read = true; - for (i = 0; i < s->num_children; i++) { - acb->qcrs[i].buf = qemu_blockalign(s->bs[i], qiov->size); - qemu_iovec_init(&acb->qcrs[i].qiov, qiov->niov); - qemu_iovec_clone(&acb->qcrs[i].qiov, qiov, acb->qcrs[i].buf); + acb->qcrs[i].buf = qemu_blockalign(s->bs[i], acb->qiov->size); + qemu_iovec_init(&acb->qcrs[i].qiov, acb->qiov->niov); + qemu_iovec_clone(&acb->qcrs[i].qiov, acb->qiov, acb->qcrs[i].buf); } for (i = 0; i < s->num_children; i++) { - bdrv_aio_readv(s->bs[i], sector_num, &acb->qcrs[i].qiov, nb_sectors, - quorum_aio_cb, &acb->qcrs[i]); + bdrv_aio_readv(s->bs[i], acb->sector_num, &acb->qcrs[i].qiov, + acb->nb_sectors, quorum_aio_cb, &acb->qcrs[i]); } return &acb->common; } +static BlockDriverAIOCB *read_fifo_child(QuorumAIOCB *acb) +{ + BDRVQuorumState *s = acb->common.bs->opaque; + + acb->qcrs[acb->child_iter].buf = qemu_blockalign(s->bs[acb->child_iter], + acb->qiov->size); + qemu_iovec_init(&acb->qcrs[acb->child_iter].qiov, acb->qiov->niov); + qemu_iovec_clone(&acb->qcrs[acb->child_iter].qiov, acb->qiov, + acb->qcrs[acb->child_iter].buf); + bdrv_aio_readv(s->bs[acb->child_iter], acb->sector_num, + &acb->qcrs[acb->child_iter].qiov, acb->nb_sectors, + quorum_aio_cb, &acb->qcrs[acb->child_iter]); + + return &acb->common; +} + +static BlockDriverAIOCB *quorum_aio_readv(BlockDriverState *bs, + int64_t sector_num, + QEMUIOVector *qiov, + int nb_sectors, + BlockDriverCompletionFunc *cb, + void *opaque) +{ + BDRVQuorumState *s = bs->opaque; + QuorumAIOCB *acb = quorum_aio_get(s, bs, qiov, sector_num, + nb_sectors, cb, opaque); + acb->is_read = true; + + if (s->read_pattern == QUORUM_READ_PATTERN_QUORUM) { + acb->child_iter = s->num_children - 1; + return read_quorum_children(acb); + } + + acb->child_iter = 0; + return read_fifo_child(acb); +} + static BlockDriverAIOCB *quorum_aio_writev(BlockDriverState *bs, int64_t sector_num, QEMUIOVector *qiov, @@ -787,16 +836,39 @@ static QemuOptsList quorum_runtime_opts = { .type = QEMU_OPT_BOOL, .help = "Rewrite corrupted block on read quorum", }, + { + .name = QUORUM_OPT_READ_PATTERN, + .type = QEMU_OPT_STRING, + .help = "Allowed pattern: quorum, fifo. Quorum is default", + }, { /* end of list */ } }, }; +static int parse_read_pattern(const char *opt) +{ + int i; + + if (!opt) { + /* Set quorum as default */ + return QUORUM_READ_PATTERN_QUORUM; + } + + for (i = 0; i < QUORUM_READ_PATTERN_MAX; i++) { + if (!strcmp(opt, QuorumReadPattern_lookup[i])) { + return i; + } + } + + return -EINVAL; +} + static int quorum_open(BlockDriverState *bs, QDict *options, int flags, Error **errp) { BDRVQuorumState *s = bs->opaque; Error *local_err = NULL; - QemuOpts *opts; + QemuOpts *opts = NULL; bool *opened; QDict *sub = NULL; QList *list = NULL; @@ -832,28 +904,37 @@ static int quorum_open(BlockDriverState *bs, QDict *options, int flags, } s->threshold = qemu_opt_get_number(opts, QUORUM_OPT_VOTE_THRESHOLD, 0); - - /* and validate it against s->num_children */ - ret = quorum_valid_threshold(s->threshold, s->num_children, &local_err); + ret = parse_read_pattern(qemu_opt_get(opts, QUORUM_OPT_READ_PATTERN)); if (ret < 0) { + error_setg(&local_err, "Please set read-pattern as fifo or quorum"); goto exit; } + s->read_pattern = ret; - /* is the driver in blkverify mode */ - if (qemu_opt_get_bool(opts, QUORUM_OPT_BLKVERIFY, false) && - s->num_children == 2 && s->threshold == 2) { - s->is_blkverify = true; - } else if (qemu_opt_get_bool(opts, QUORUM_OPT_BLKVERIFY, false)) { - fprintf(stderr, "blkverify mode is set by setting blkverify=on " - "and using two files with vote_threshold=2\n"); - } + if (s->read_pattern == QUORUM_READ_PATTERN_QUORUM) { + /* and validate it against s->num_children */ + ret = quorum_valid_threshold(s->threshold, s->num_children, &local_err); + if (ret < 0) { + goto exit; + } - s->rewrite_corrupted = qemu_opt_get_bool(opts, QUORUM_OPT_REWRITE, false); - if (s->rewrite_corrupted && s->is_blkverify) { - error_setg(&local_err, - "rewrite-corrupted=on cannot be used with blkverify=on"); - ret = -EINVAL; - goto exit; + /* is the driver in blkverify mode */ + if (qemu_opt_get_bool(opts, QUORUM_OPT_BLKVERIFY, false) && + s->num_children == 2 && s->threshold == 2) { + s->is_blkverify = true; + } else if (qemu_opt_get_bool(opts, QUORUM_OPT_BLKVERIFY, false)) { + fprintf(stderr, "blkverify mode is set by setting blkverify=on " + "and using two files with vote_threshold=2\n"); + } + + s->rewrite_corrupted = qemu_opt_get_bool(opts, QUORUM_OPT_REWRITE, + false); + if (s->rewrite_corrupted && s->is_blkverify) { + error_setg(&local_err, + "rewrite-corrupted=on cannot be used with blkverify=on"); + ret = -EINVAL; + goto exit; + } } /* allocate the children BlockDriverState array */ @@ -908,6 +989,7 @@ close_exit: g_free(s->bs); g_free(opened); exit: + qemu_opts_del(opts); /* propagate error */ if (local_err) { error_propagate(errp, local_err); diff --git a/block/sheepdog.c b/block/sheepdog.c index 12cbd9dcb4..f91afc3a5b 100644 --- a/block/sheepdog.c +++ b/block/sheepdog.c @@ -103,6 +103,9 @@ #define SD_INODE_SIZE (sizeof(SheepdogInode)) #define CURRENT_VDI_ID 0 +#define LOCK_TYPE_NORMAL 0 +#define LOCK_TYPE_SHARED 1 /* for iSCSI multipath */ + typedef struct SheepdogReq { uint8_t proto_ver; uint8_t opcode; @@ -166,7 +169,8 @@ typedef struct SheepdogVdiReq { uint8_t copy_policy; uint8_t reserved[2]; uint32_t snapid; - uint32_t pad[3]; + uint32_t type; + uint32_t pad[2]; } SheepdogVdiReq; typedef struct SheepdogVdiRsp { @@ -712,7 +716,6 @@ static void coroutine_fn send_pending_req(BDRVSheepdogState *s, uint64_t oid) static coroutine_fn void reconnect_to_sdog(void *opaque) { - Error *local_err = NULL; BDRVSheepdogState *s = opaque; AIOReq *aio_req, *next; @@ -727,6 +730,7 @@ static coroutine_fn void reconnect_to_sdog(void *opaque) /* Try to reconnect the sheepdog server every one second. */ while (s->fd < 0) { + Error *local_err = NULL; s->fd = get_sheep_fd(s, &local_err); if (s->fd < 0) { DPRINTF("Wait for connection to be established\n"); @@ -1090,6 +1094,7 @@ static int find_vdi_name(BDRVSheepdogState *s, const char *filename, memset(&hdr, 0, sizeof(hdr)); if (lock) { hdr.opcode = SD_OP_LOCK_VDI; + hdr.type = LOCK_TYPE_NORMAL; } else { hdr.opcode = SD_OP_GET_VDI_INFO; } @@ -1110,6 +1115,8 @@ static int find_vdi_name(BDRVSheepdogState *s, const char *filename, sd_strerror(rsp->result), filename, snapid, tag); if (rsp->result == SD_RES_NO_VDI) { ret = -ENOENT; + } else if (rsp->result == SD_RES_VDI_LOCKED) { + ret = -EBUSY; } else { ret = -EIO; } @@ -1793,6 +1800,7 @@ static void sd_close(BlockDriverState *bs) memset(&hdr, 0, sizeof(hdr)); hdr.opcode = SD_OP_RELEASE_VDI; + hdr.type = LOCK_TYPE_NORMAL; hdr.base_vdi_id = s->inode.vdi_id; wlen = strlen(s->name) + 1; hdr.data_length = wlen; diff --git a/blockdev.c b/blockdev.c index 6a204c662d..e37b068e9e 100644 --- a/blockdev.c +++ b/blockdev.c @@ -1757,6 +1757,7 @@ int do_drive_del(Monitor *mon, const QDict *qdict, QObject **ret_data) { const char *id = qdict_get_str(qdict, "id"); BlockDriverState *bs; + AioContext *aio_context; Error *local_err = NULL; bs = bdrv_find(id); @@ -1764,9 +1765,14 @@ int do_drive_del(Monitor *mon, const QDict *qdict, QObject **ret_data) error_report("Device '%s' not found", id); return -1; } + + aio_context = bdrv_get_aio_context(bs); + aio_context_acquire(aio_context); + if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_DRIVE_DEL, &local_err)) { error_report("%s", error_get_pretty(local_err)); error_free(local_err); + aio_context_release(aio_context); return -1; } @@ -1790,6 +1796,7 @@ int do_drive_del(Monitor *mon, const QDict *qdict, QObject **ret_data) drive_del(drive_get_by_blockdev(bs)); } + aio_context_release(aio_context); return 0; } @@ -2179,11 +2186,12 @@ void qmp_drive_mirror(const char *device, const char *target, } if (granularity != 0 && (granularity < 512 || granularity > 1048576 * 64)) { - error_set(errp, QERR_INVALID_PARAMETER, device); + error_set(errp, QERR_INVALID_PARAMETER_VALUE, "granularity", + "a value in range [512B, 64MB]"); return; } if (granularity & (granularity - 1)) { - error_set(errp, QERR_INVALID_PARAMETER, device); + error_set(errp, QERR_INVALID_PARAMETER_VALUE, "granularity", "power of 2"); return; } diff --git a/blockjob.c b/blockjob.c index ca0b4e25d0..0689fdd2b5 100644 --- a/blockjob.c +++ b/blockjob.c @@ -205,7 +205,7 @@ void block_job_sleep_ns(BlockJob *job, QEMUClockType type, int64_t ns) if (block_job_is_paused(job)) { qemu_coroutine_yield(); } else { - co_sleep_ns(type, ns); + co_aio_sleep_ns(bdrv_get_aio_context(job->bs), type, ns); } job->busy = true; } @@ -5045,6 +5045,8 @@ case "$target_name" in TARGET_BASE_ARCH=mips echo "TARGET_ABI_MIPSN64=y" >> $config_target_mak ;; + tricore) + ;; moxie) ;; or32) @@ -5093,6 +5095,7 @@ case "$target_name" in echo "TARGET_ABI32=y" >> $config_target_mak ;; s390x) + gdb_xml_files="s390x-core64.xml s390-acr.xml s390-fpr.xml" ;; unicore32) ;; diff --git a/cpu-exec.c b/cpu-exec.c index c6aad742e1..7b5d2e21d0 100644 --- a/cpu-exec.c +++ b/cpu-exec.c @@ -387,6 +387,7 @@ int cpu_exec(CPUArchState *env) #elif defined(TARGET_CRIS) #elif defined(TARGET_S390X) #elif defined(TARGET_XTENSA) +#elif defined(TARGET_TRICORE) /* XXXXX */ #else #error unsupported target CPU @@ -444,7 +445,8 @@ int cpu_exec(CPUArchState *env) } #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \ defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \ - defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32) + defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || \ + defined(TARGET_UNICORE32) || defined(TARGET_TRICORE) if (interrupt_request & CPU_INTERRUPT_HALT) { cpu->interrupt_request &= ~CPU_INTERRUPT_HALT; cpu->halted = 1; @@ -560,6 +562,12 @@ int cpu_exec(CPUArchState *env) cc->do_interrupt(cpu); next_tb = 0; } +#elif defined(TARGET_TRICORE) + if ((interrupt_request & CPU_INTERRUPT_HARD)) { + cc->do_interrupt(cpu); + next_tb = 0; + } + #elif defined(TARGET_OPENRISC) { int idx = -1; @@ -846,6 +854,7 @@ int cpu_exec(CPUArchState *env) | env->cc_dest | (env->cc_x << 4); #elif defined(TARGET_MICROBLAZE) #elif defined(TARGET_MIPS) +#elif defined(TARGET_TRICORE) #elif defined(TARGET_MOXIE) #elif defined(TARGET_OPENRISC) #elif defined(TARGET_SH4) @@ -1410,6 +1410,9 @@ CpuInfoList *qmp_query_cpus(Error **errp) #elif defined(TARGET_MIPS) MIPSCPU *mips_cpu = MIPS_CPU(cpu); CPUMIPSState *env = &mips_cpu->env; +#elif defined(TARGET_TRICORE) + TriCoreCPU *tricore_cpu = TRICORE_CPU(cpu); + CPUTriCoreState *env = &tricore_cpu->env; #endif cpu_synchronize_state(cpu); @@ -1434,6 +1437,9 @@ CpuInfoList *qmp_query_cpus(Error **errp) #elif defined(TARGET_MIPS) info->value->has_PC = true; info->value->PC = env->active_tc.PC; +#elif defined(TARGET_TRICORE) + info->value->has_PC = true; + info->value->PC = env->PC; #endif /* XXX: waiting for the qapi to support GSList */ @@ -60,8 +60,10 @@ void tlb_flush(CPUState *cpu, int flush_global) cpu->current_tb = NULL; memset(env->tlb_table, -1, sizeof(env->tlb_table)); + memset(env->tlb_v_table, -1, sizeof(env->tlb_v_table)); memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache)); + env->vtlb_index = 0; env->tlb_flush_addr = -1; env->tlb_flush_mask = 0; tlb_flush_count++; @@ -108,6 +110,14 @@ void tlb_flush_page(CPUState *cpu, target_ulong addr) tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr); } + /* check whether there are entries that need to be flushed in the vtlb */ + for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { + int k; + for (k = 0; k < CPU_VTLB_SIZE; k++) { + tlb_flush_entry(&env->tlb_v_table[mmu_idx][k], addr); + } + } + tb_flush_jmp_cache(cpu, addr); } @@ -172,6 +182,11 @@ void cpu_tlb_reset_dirty_all(ram_addr_t start1, ram_addr_t length) tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i], start1, length); } + + for (i = 0; i < CPU_VTLB_SIZE; i++) { + tlb_reset_dirty_range(&env->tlb_v_table[mmu_idx][i], + start1, length); + } } } } @@ -195,6 +210,13 @@ void tlb_set_dirty(CPUArchState *env, target_ulong vaddr) for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr); } + + for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { + int k; + for (k = 0; k < CPU_VTLB_SIZE; k++) { + tlb_set_dirty1(&env->tlb_v_table[mmu_idx][k], vaddr); + } + } } /* Our TLB does not support large pages, so remember the area covered by @@ -235,6 +257,7 @@ void tlb_set_page(CPUState *cpu, target_ulong vaddr, uintptr_t addend; CPUTLBEntry *te; hwaddr iotlb, xlat, sz; + unsigned vidx = env->vtlb_index++ % CPU_VTLB_SIZE; assert(size >= TARGET_PAGE_SIZE); if (size != TARGET_PAGE_SIZE) { @@ -267,8 +290,14 @@ void tlb_set_page(CPUState *cpu, target_ulong vaddr, prot, &address); index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); - env->iotlb[mmu_idx][index] = iotlb - vaddr; te = &env->tlb_table[mmu_idx][index]; + + /* do not discard the translation in te, evict it into a victim tlb */ + env->tlb_v_table[mmu_idx][vidx] = *te; + env->iotlb_v[mmu_idx][vidx] = env->iotlb[mmu_idx][index]; + + /* refill the tlb */ + env->iotlb[mmu_idx][index] = iotlb - vaddr; te->addend = addend - vaddr; if (prot & PAGE_READ) { te->addr_read = address; diff --git a/default-configs/tricore-softmmu.mak b/default-configs/tricore-softmmu.mak new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/default-configs/tricore-softmmu.mak diff --git a/gdb-xml/s390-acr.xml b/gdb-xml/s390-acr.xml new file mode 100644 index 0000000000..71dfb20528 --- /dev/null +++ b/gdb-xml/s390-acr.xml @@ -0,0 +1,26 @@ +<?xml version="1.0"?> +<!-- Copyright (C) 2010-2014 Free Software Foundation, Inc. + + Copying and distribution of this file, with or without modification, + are permitted in any medium without royalty provided the copyright + notice and this notice are preserved. --> + +<!DOCTYPE feature SYSTEM "gdb-target.dtd"> +<feature name="org.gnu.gdb.s390.acr"> + <reg name="acr0" bitsize="32" type="uint32" group="access"/> + <reg name="acr1" bitsize="32" type="uint32" group="access"/> + <reg name="acr2" bitsize="32" type="uint32" group="access"/> + <reg name="acr3" bitsize="32" type="uint32" group="access"/> + <reg name="acr4" bitsize="32" type="uint32" group="access"/> + <reg name="acr5" bitsize="32" type="uint32" group="access"/> + <reg name="acr6" bitsize="32" type="uint32" group="access"/> + <reg name="acr7" bitsize="32" type="uint32" group="access"/> + <reg name="acr8" bitsize="32" type="uint32" group="access"/> + <reg name="acr9" bitsize="32" type="uint32" group="access"/> + <reg name="acr10" bitsize="32" type="uint32" group="access"/> + <reg name="acr11" bitsize="32" type="uint32" group="access"/> + <reg name="acr12" bitsize="32" type="uint32" group="access"/> + <reg name="acr13" bitsize="32" type="uint32" group="access"/> + <reg name="acr14" bitsize="32" type="uint32" group="access"/> + <reg name="acr15" bitsize="32" type="uint32" group="access"/> +</feature> diff --git a/gdb-xml/s390-fpr.xml b/gdb-xml/s390-fpr.xml new file mode 100644 index 0000000000..7de0c136ad --- /dev/null +++ b/gdb-xml/s390-fpr.xml @@ -0,0 +1,27 @@ +<?xml version="1.0"?> +<!-- Copyright (C) 2010-2014 Free Software Foundation, Inc. + + Copying and distribution of this file, with or without modification, + are permitted in any medium without royalty provided the copyright + notice and this notice are preserved. --> + +<!DOCTYPE feature SYSTEM "gdb-target.dtd"> +<feature name="org.gnu.gdb.s390.fpr"> + <reg name="fpc" bitsize="32" type="uint32" group="float"/> + <reg name="f0" bitsize="64" type="ieee_double" group="float"/> + <reg name="f1" bitsize="64" type="ieee_double" group="float"/> + <reg name="f2" bitsize="64" type="ieee_double" group="float"/> + <reg name="f3" bitsize="64" type="ieee_double" group="float"/> + <reg name="f4" bitsize="64" type="ieee_double" group="float"/> + <reg name="f5" bitsize="64" type="ieee_double" group="float"/> + <reg name="f6" bitsize="64" type="ieee_double" group="float"/> + <reg name="f7" bitsize="64" type="ieee_double" group="float"/> + <reg name="f8" bitsize="64" type="ieee_double" group="float"/> + <reg name="f9" bitsize="64" type="ieee_double" group="float"/> + <reg name="f10" bitsize="64" type="ieee_double" group="float"/> + <reg name="f11" bitsize="64" type="ieee_double" group="float"/> + <reg name="f12" bitsize="64" type="ieee_double" group="float"/> + <reg name="f13" bitsize="64" type="ieee_double" group="float"/> + <reg name="f14" bitsize="64" type="ieee_double" group="float"/> + <reg name="f15" bitsize="64" type="ieee_double" group="float"/> +</feature> diff --git a/gdb-xml/s390x-core64.xml b/gdb-xml/s390x-core64.xml new file mode 100644 index 0000000000..15234378ee --- /dev/null +++ b/gdb-xml/s390x-core64.xml @@ -0,0 +1,28 @@ +<?xml version="1.0"?> +<!-- Copyright (C) 2010-2014 Free Software Foundation, Inc. + + Copying and distribution of this file, with or without modification, + are permitted in any medium without royalty provided the copyright + notice and this notice are preserved. --> + +<!DOCTYPE feature SYSTEM "gdb-target.dtd"> +<feature name="org.gnu.gdb.s390.core"> + <reg name="pswm" bitsize="64" type="uint64" group="psw"/> + <reg name="pswa" bitsize="64" type="uint64" group="psw"/> + <reg name="r0" bitsize="64" type="uint64" group="general"/> + <reg name="r1" bitsize="64" type="uint64" group="general"/> + <reg name="r2" bitsize="64" type="uint64" group="general"/> + <reg name="r3" bitsize="64" type="uint64" group="general"/> + <reg name="r4" bitsize="64" type="uint64" group="general"/> + <reg name="r5" bitsize="64" type="uint64" group="general"/> + <reg name="r6" bitsize="64" type="uint64" group="general"/> + <reg name="r7" bitsize="64" type="uint64" group="general"/> + <reg name="r8" bitsize="64" type="uint64" group="general"/> + <reg name="r9" bitsize="64" type="uint64" group="general"/> + <reg name="r10" bitsize="64" type="uint64" group="general"/> + <reg name="r11" bitsize="64" type="uint64" group="general"/> + <reg name="r12" bitsize="64" type="uint64" group="general"/> + <reg name="r13" bitsize="64" type="uint64" group="general"/> + <reg name="r14" bitsize="64" type="uint64" group="general"/> + <reg name="r15" bitsize="64" type="uint64" group="general"/> +</feature> diff --git a/hw/block/dataplane/virtio-blk.c b/hw/block/dataplane/virtio-blk.c index c07adc6e4f..b55188cb82 100644 --- a/hw/block/dataplane/virtio-blk.c +++ b/hw/block/dataplane/virtio-blk.c @@ -194,6 +194,7 @@ void virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *blk, error_setg(&s->blocker, "block device is in use by data plane"); bdrv_op_block_all(blk->conf.bs, s->blocker); bdrv_op_unblock(blk->conf.bs, BLOCK_OP_TYPE_RESIZE, s->blocker); + bdrv_op_unblock(blk->conf.bs, BLOCK_OP_TYPE_DRIVE_DEL, s->blocker); *dataplane = s; } diff --git a/hw/ide/qdev.c b/hw/ide/qdev.c index b4a467116e..efab95b320 100644 --- a/hw/ide/qdev.c +++ b/hw/ide/qdev.c @@ -59,7 +59,7 @@ static char *idebus_get_fw_dev_path(DeviceState *dev) { char path[30]; - snprintf(path, sizeof(path), "%s@%d", qdev_fw_name(dev), + snprintf(path, sizeof(path), "%s@%x", qdev_fw_name(dev), ((IDEBus*)dev->parent_bus)->bus_id); return g_strdup(path); diff --git a/hw/s390x/s390-virtio-ccw.c b/hw/s390x/s390-virtio-ccw.c index 004b2c20c5..e538b1f686 100644 --- a/hw/s390x/s390-virtio-ccw.c +++ b/hw/s390x/s390-virtio-ccw.c @@ -17,6 +17,7 @@ #include "ioinst.h" #include "css.h" #include "virtio-ccw.h" +#include "qemu/config-file.h" #define TYPE_S390_CCW_MACHINE "s390-ccw-machine" @@ -86,17 +87,35 @@ static void ccw_init(MachineState *machine) ram_addr_t my_ram_size = machine->ram_size; MemoryRegion *sysmem = get_system_memory(); MemoryRegion *ram = g_new(MemoryRegion, 1); - int shift = 0; + sclpMemoryHotplugDev *mhd = init_sclp_memory_hotplug_dev(); uint8_t *storage_keys; int ret; VirtualCssBus *css_bus; - - /* s390x ram size detection needs a 16bit multiplier + an increment. So - guests > 64GB can be specified in 2MB steps etc. */ - while ((my_ram_size >> (20 + shift)) > 65535) { - shift++; + QemuOpts *opts = qemu_opts_find(qemu_find_opts("memory"), NULL); + ram_addr_t pad_size = 0; + ram_addr_t maxmem = qemu_opt_get_size(opts, "maxmem", my_ram_size); + ram_addr_t standby_mem_size = maxmem - my_ram_size; + + /* The storage increment size is a multiple of 1M and is a power of 2. + * The number of storage increments must be MAX_STORAGE_INCREMENTS or fewer. + * The variable 'mhd->increment_size' is an exponent of 2 that can be + * used to calculate the size (in bytes) of an increment. */ + mhd->increment_size = 20; + while ((my_ram_size >> mhd->increment_size) > MAX_STORAGE_INCREMENTS) { + mhd->increment_size++; + } + while ((standby_mem_size >> mhd->increment_size) > MAX_STORAGE_INCREMENTS) { + mhd->increment_size++; } - my_ram_size = my_ram_size >> (20 + shift) << (20 + shift); + + /* The core and standby memory areas need to be aligned with + * the increment size. In effect, this can cause the + * user-specified memory size to be rounded down to align + * with the nearest increment boundary. */ + standby_mem_size = standby_mem_size >> mhd->increment_size + << mhd->increment_size; + my_ram_size = my_ram_size >> mhd->increment_size + << mhd->increment_size; /* let's propagate the changed ram size into the global variable. */ ram_size = my_ram_size; @@ -111,11 +130,22 @@ static void ccw_init(MachineState *machine) /* register hypercalls */ virtio_ccw_register_hcalls(); - /* allocate RAM */ + /* allocate RAM for core */ memory_region_init_ram(ram, NULL, "s390.ram", my_ram_size); vmstate_register_ram_global(ram); memory_region_add_subregion(sysmem, 0, ram); + /* If the size of ram is not on a MEM_SECTION_SIZE boundary, + calculate the pad size necessary to force this boundary. */ + if (standby_mem_size) { + if (my_ram_size % MEM_SECTION_SIZE) { + pad_size = MEM_SECTION_SIZE - my_ram_size % MEM_SECTION_SIZE; + } + my_ram_size += standby_mem_size + pad_size; + mhd->pad_size = pad_size; + mhd->standby_mem_size = standby_mem_size; + } + /* allocate storage keys */ storage_keys = g_malloc0(my_ram_size / TARGET_PAGE_SIZE); diff --git a/hw/s390x/s390-virtio.c b/hw/s390x/s390-virtio.c index 1a75a1cf81..4ca52b7190 100644 --- a/hw/s390x/s390-virtio.c +++ b/hw/s390x/s390-virtio.c @@ -230,18 +230,21 @@ static void s390_init(MachineState *machine) ram_addr_t my_ram_size = machine->ram_size; MemoryRegion *sysmem = get_system_memory(); MemoryRegion *ram = g_new(MemoryRegion, 1); - int shift = 0; + int increment_size = 20; uint8_t *storage_keys; void *virtio_region; hwaddr virtio_region_len; hwaddr virtio_region_start; - /* s390x ram size detection needs a 16bit multiplier + an increment. So - guests > 64GB can be specified in 2MB steps etc. */ - while ((my_ram_size >> (20 + shift)) > 65535) { - shift++; + /* + * The storage increment size is a multiple of 1M and is a power of 2. + * The number of storage increments must be MAX_STORAGE_INCREMENTS or + * fewer. + */ + while ((my_ram_size >> increment_size) > MAX_STORAGE_INCREMENTS) { + increment_size++; } - my_ram_size = my_ram_size >> (20 + shift) << (20 + shift); + my_ram_size = my_ram_size >> increment_size << increment_size; /* let's propagate the changed ram size into the global variable. */ ram_size = my_ram_size; diff --git a/hw/s390x/sclp.c b/hw/s390x/sclp.c index d8ddf35e58..02b3275132 100644 --- a/hw/s390x/sclp.c +++ b/hw/s390x/sclp.c @@ -16,7 +16,8 @@ #include "sysemu/kvm.h" #include "exec/memory.h" #include "sysemu/sysemu.h" - +#include "exec/address-spaces.h" +#include "qemu/config-file.h" #include "hw/s390x/sclp.h" #include "hw/s390x/event-facility.h" @@ -33,10 +34,19 @@ static inline SCLPEventFacility *get_event_facility(void) static void read_SCP_info(SCCB *sccb) { ReadInfo *read_info = (ReadInfo *) sccb; + sclpMemoryHotplugDev *mhd = get_sclp_memory_hotplug_dev(); CPUState *cpu; - int shift = 0; int cpu_count = 0; int i = 0; + int increment_size = 20; + int rnsize, rnmax; + QemuOpts *opts = qemu_opts_find(qemu_find_opts("memory"), NULL); + int slots = qemu_opt_get_number(opts, "slots", 0); + int max_avail_slots = s390_get_memslot_count(kvm_state); + + if (slots > max_avail_slots) { + slots = max_avail_slots; + } CPU_FOREACH(cpu) { cpu_count++; @@ -54,14 +64,235 @@ static void read_SCP_info(SCCB *sccb) read_info->facilities = cpu_to_be64(SCLP_HAS_CPU_INFO); - while ((ram_size >> (20 + shift)) > 65535) { - shift++; + /* + * The storage increment size is a multiple of 1M and is a power of 2. + * The number of storage increments must be MAX_STORAGE_INCREMENTS or fewer. + */ + while ((ram_size >> increment_size) > MAX_STORAGE_INCREMENTS) { + increment_size++; + } + rnmax = ram_size >> increment_size; + + /* Memory Hotplug is only supported for the ccw machine type */ + if (mhd) { + while ((mhd->standby_mem_size >> increment_size) > + MAX_STORAGE_INCREMENTS) { + increment_size++; + } + assert(increment_size == mhd->increment_size); + + mhd->standby_subregion_size = MEM_SECTION_SIZE; + /* Deduct the memory slot already used for core */ + if (slots > 0) { + while ((mhd->standby_subregion_size * (slots - 1) + < mhd->standby_mem_size)) { + mhd->standby_subregion_size = mhd->standby_subregion_size << 1; + } + } + /* + * Initialize mapping of guest standby memory sections indicating which + * are and are not online. Assume all standby memory begins offline. + */ + if (mhd->standby_state_map == 0) { + if (mhd->standby_mem_size % mhd->standby_subregion_size) { + mhd->standby_state_map = g_malloc0((mhd->standby_mem_size / + mhd->standby_subregion_size + 1) * + (mhd->standby_subregion_size / + MEM_SECTION_SIZE)); + } else { + mhd->standby_state_map = g_malloc0(mhd->standby_mem_size / + MEM_SECTION_SIZE); + } + } + mhd->padded_ram_size = ram_size + mhd->pad_size; + mhd->rzm = 1 << mhd->increment_size; + rnmax = ((ram_size + mhd->standby_mem_size + mhd->pad_size) + >> mhd->increment_size); + + read_info->facilities |= cpu_to_be64(SCLP_FC_ASSIGN_ATTACH_READ_STOR); + } + + rnsize = 1 << (increment_size - 20); + if (rnsize <= 128) { + read_info->rnsize = rnsize; + } else { + read_info->rnsize = 0; + read_info->rnsize2 = cpu_to_be32(rnsize); + } + + if (rnmax < 0x10000) { + read_info->rnmax = cpu_to_be16(rnmax); + } else { + read_info->rnmax = cpu_to_be16(0); + read_info->rnmax2 = cpu_to_be64(rnmax); } - read_info->rnmax = cpu_to_be16(ram_size >> (20 + shift)); - read_info->rnsize = 1 << shift; + sccb->h.response_code = cpu_to_be16(SCLP_RC_NORMAL_READ_COMPLETION); } +static void read_storage_element0_info(SCCB *sccb) +{ + int i, assigned; + int subincrement_id = SCLP_STARTING_SUBINCREMENT_ID; + ReadStorageElementInfo *storage_info = (ReadStorageElementInfo *) sccb; + sclpMemoryHotplugDev *mhd = get_sclp_memory_hotplug_dev(); + + assert(mhd); + + if ((ram_size >> mhd->increment_size) >= 0x10000) { + sccb->h.response_code = cpu_to_be16(SCLP_RC_SCCB_BOUNDARY_VIOLATION); + return; + } + + /* Return information regarding core memory */ + storage_info->max_id = cpu_to_be16(mhd->standby_mem_size ? 1 : 0); + assigned = ram_size >> mhd->increment_size; + storage_info->assigned = cpu_to_be16(assigned); + + for (i = 0; i < assigned; i++) { + storage_info->entries[i] = cpu_to_be32(subincrement_id); + subincrement_id += SCLP_INCREMENT_UNIT; + } + sccb->h.response_code = cpu_to_be16(SCLP_RC_NORMAL_READ_COMPLETION); +} + +static void read_storage_element1_info(SCCB *sccb) +{ + ReadStorageElementInfo *storage_info = (ReadStorageElementInfo *) sccb; + sclpMemoryHotplugDev *mhd = get_sclp_memory_hotplug_dev(); + + assert(mhd); + + if ((mhd->standby_mem_size >> mhd->increment_size) >= 0x10000) { + sccb->h.response_code = cpu_to_be16(SCLP_RC_SCCB_BOUNDARY_VIOLATION); + return; + } + + /* Return information regarding standby memory */ + storage_info->max_id = cpu_to_be16(mhd->standby_mem_size ? 1 : 0); + storage_info->assigned = cpu_to_be16(mhd->standby_mem_size >> + mhd->increment_size); + storage_info->standby = cpu_to_be16(mhd->standby_mem_size >> + mhd->increment_size); + sccb->h.response_code = cpu_to_be16(SCLP_RC_STANDBY_READ_COMPLETION); +} + +static void attach_storage_element(SCCB *sccb, uint16_t element) +{ + int i, assigned, subincrement_id; + AttachStorageElement *attach_info = (AttachStorageElement *) sccb; + sclpMemoryHotplugDev *mhd = get_sclp_memory_hotplug_dev(); + + assert(mhd); + + if (element != 1) { + sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_SCLP_COMMAND); + return; + } + + assigned = mhd->standby_mem_size >> mhd->increment_size; + attach_info->assigned = cpu_to_be16(assigned); + subincrement_id = ((ram_size >> mhd->increment_size) << 16) + + SCLP_STARTING_SUBINCREMENT_ID; + for (i = 0; i < assigned; i++) { + attach_info->entries[i] = cpu_to_be32(subincrement_id); + subincrement_id += SCLP_INCREMENT_UNIT; + } + sccb->h.response_code = cpu_to_be16(SCLP_RC_NORMAL_COMPLETION); +} + +static void assign_storage(SCCB *sccb) +{ + MemoryRegion *mr = NULL; + uint64_t this_subregion_size; + AssignStorage *assign_info = (AssignStorage *) sccb; + sclpMemoryHotplugDev *mhd = get_sclp_memory_hotplug_dev(); + assert(mhd); + ram_addr_t assign_addr = (assign_info->rn - 1) * mhd->rzm; + MemoryRegion *sysmem = get_system_memory(); + + if ((assign_addr % MEM_SECTION_SIZE == 0) && + (assign_addr >= mhd->padded_ram_size)) { + /* Re-use existing memory region if found */ + mr = memory_region_find(sysmem, assign_addr, 1).mr; + if (!mr) { + + MemoryRegion *standby_ram = g_new(MemoryRegion, 1); + + /* offset to align to standby_subregion_size for allocation */ + ram_addr_t offset = assign_addr - + (assign_addr - mhd->padded_ram_size) + % mhd->standby_subregion_size; + + /* strlen("standby.ram") + 4 (Max of KVM_MEMORY_SLOTS) + NULL */ + char id[16]; + snprintf(id, 16, "standby.ram%d", + (int)((offset - mhd->padded_ram_size) / + mhd->standby_subregion_size) + 1); + + /* Allocate a subregion of the calculated standby_subregion_size */ + if (offset + mhd->standby_subregion_size > + mhd->padded_ram_size + mhd->standby_mem_size) { + this_subregion_size = mhd->padded_ram_size + + mhd->standby_mem_size - offset; + } else { + this_subregion_size = mhd->standby_subregion_size; + } + + memory_region_init_ram(standby_ram, NULL, id, this_subregion_size); + vmstate_register_ram_global(standby_ram); + memory_region_add_subregion(sysmem, offset, standby_ram); + } + /* The specified subregion is no longer in standby */ + mhd->standby_state_map[(assign_addr - mhd->padded_ram_size) + / MEM_SECTION_SIZE] = 1; + } + sccb->h.response_code = cpu_to_be16(SCLP_RC_NORMAL_COMPLETION); +} + +static void unassign_storage(SCCB *sccb) +{ + MemoryRegion *mr = NULL; + AssignStorage *assign_info = (AssignStorage *) sccb; + sclpMemoryHotplugDev *mhd = get_sclp_memory_hotplug_dev(); + assert(mhd); + ram_addr_t unassign_addr = (assign_info->rn - 1) * mhd->rzm; + MemoryRegion *sysmem = get_system_memory(); + + /* if the addr is a multiple of 256 MB */ + if ((unassign_addr % MEM_SECTION_SIZE == 0) && + (unassign_addr >= mhd->padded_ram_size)) { + mhd->standby_state_map[(unassign_addr - + mhd->padded_ram_size) / MEM_SECTION_SIZE] = 0; + + /* find the specified memory region and destroy it */ + mr = memory_region_find(sysmem, unassign_addr, 1).mr; + if (mr) { + int i; + int is_removable = 1; + ram_addr_t map_offset = (unassign_addr - mhd->padded_ram_size - + (unassign_addr - mhd->padded_ram_size) + % mhd->standby_subregion_size); + /* Mark all affected subregions as 'standby' once again */ + for (i = 0; + i < (mhd->standby_subregion_size / MEM_SECTION_SIZE); + i++) { + + if (mhd->standby_state_map[i + map_offset / MEM_SECTION_SIZE]) { + is_removable = 0; + break; + } + } + if (is_removable) { + memory_region_del_subregion(sysmem, mr); + object_unparent(OBJECT(mr)); + g_free(mr); + } + } + } + sccb->h.response_code = cpu_to_be16(SCLP_RC_NORMAL_COMPLETION); +} + /* Provide information about the CPU */ static void sclp_read_cpu_info(SCCB *sccb) { @@ -103,6 +334,22 @@ static void sclp_execute(SCCB *sccb, uint32_t code) case SCLP_CMDW_READ_CPU_INFO: sclp_read_cpu_info(sccb); break; + case SCLP_READ_STORAGE_ELEMENT_INFO: + if (code & 0xff00) { + read_storage_element1_info(sccb); + } else { + read_storage_element0_info(sccb); + } + break; + case SCLP_ATTACH_STORAGE_ELEMENT: + attach_storage_element(sccb, (code & 0xff00) >> 8); + break; + case SCLP_ASSIGN_STORAGE: + assign_storage(sccb); + break; + case SCLP_UNASSIGN_STORAGE: + unassign_storage(sccb); + break; default: efc->command_handler(ef, sccb, code); break; @@ -183,3 +430,33 @@ void s390_sclp_init(void) OBJECT(dev), NULL); qdev_init_nofail(dev); } + +sclpMemoryHotplugDev *init_sclp_memory_hotplug_dev(void) +{ + DeviceState *dev; + dev = qdev_create(NULL, TYPE_SCLP_MEMORY_HOTPLUG_DEV); + object_property_add_child(qdev_get_machine(), + TYPE_SCLP_MEMORY_HOTPLUG_DEV, + OBJECT(dev), NULL); + qdev_init_nofail(dev); + return SCLP_MEMORY_HOTPLUG_DEV(object_resolve_path( + TYPE_SCLP_MEMORY_HOTPLUG_DEV, NULL)); +} + +sclpMemoryHotplugDev *get_sclp_memory_hotplug_dev(void) +{ + return SCLP_MEMORY_HOTPLUG_DEV(object_resolve_path( + TYPE_SCLP_MEMORY_HOTPLUG_DEV, NULL)); +} + +static TypeInfo sclp_memory_hotplug_dev_info = { + .name = TYPE_SCLP_MEMORY_HOTPLUG_DEV, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(sclpMemoryHotplugDev), +}; + +static void register_types(void) +{ + type_register_static(&sclp_memory_hotplug_dev_info); +} +type_init(register_types); diff --git a/hw/tricore/Makefile.objs b/hw/tricore/Makefile.objs new file mode 100644 index 0000000000..435e095cff --- /dev/null +++ b/hw/tricore/Makefile.objs @@ -0,0 +1 @@ +obj-y += tricore_testboard.o diff --git a/hw/tricore/tricore_testboard.c b/hw/tricore/tricore_testboard.c new file mode 100644 index 0000000000..f412e27f1c --- /dev/null +++ b/hw/tricore/tricore_testboard.c @@ -0,0 +1,124 @@ +/* + * TriCore Baseboard System emulation. + * + * Copyright (c) 2013-2014 Bastian Koppelmann C-Lab/University Paderborn + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + + +#include "hw/hw.h" +#include "hw/devices.h" +#include "net/net.h" +#include "sysemu/sysemu.h" +#include "hw/boards.h" +#include "hw/loader.h" +#include "sysemu/blockdev.h" +#include "exec/address-spaces.h" +#include "hw/block/flash.h" +#include "elf.h" +#include "hw/tricore/tricore.h" +#include "qemu/error-report.h" + + +/* Board init. */ + +static struct tricore_boot_info tricoretb_binfo; + +static void tricore_load_kernel(CPUTriCoreState *env) +{ + uint64_t entry; + long kernel_size; + + kernel_size = load_elf(tricoretb_binfo.kernel_filename, NULL, + NULL, (uint64_t *)&entry, NULL, + NULL, 0, + ELF_MACHINE, 1); + if (kernel_size <= 0) { + error_report("qemu: no kernel file '%s'", + tricoretb_binfo.kernel_filename); + exit(1); + } + env->PC = entry; + +} + +static void tricore_testboard_init(MachineState *machine, int board_id) +{ + TriCoreCPU *cpu; + CPUTriCoreState *env; + + MemoryRegion *sysmem = get_system_memory(); + MemoryRegion *ext_cram = g_new(MemoryRegion, 1); + MemoryRegion *ext_dram = g_new(MemoryRegion, 1); + MemoryRegion *int_cram = g_new(MemoryRegion, 1); + MemoryRegion *int_dram = g_new(MemoryRegion, 1); + MemoryRegion *pcp_data = g_new(MemoryRegion, 1); + MemoryRegion *pcp_text = g_new(MemoryRegion, 1); + + if (!machine->cpu_model) { + machine->cpu_model = "tc1796"; + } + cpu = cpu_tricore_init(machine->cpu_model); + env = &cpu->env; + if (!cpu) { + error_report("Unable to find CPU definition"); + exit(1); + } + memory_region_init_ram(ext_cram, NULL, "powerlink_ext_c.ram", 2*1024*1024); + vmstate_register_ram_global(ext_cram); + memory_region_init_ram(ext_dram, NULL, "powerlink_ext_d.ram", 4*1024*1024); + vmstate_register_ram_global(ext_dram); + memory_region_init_ram(int_cram, NULL, "powerlink_int_c.ram", 48*1024); + vmstate_register_ram_global(int_cram); + memory_region_init_ram(int_dram, NULL, "powerlink_int_d.ram", 48*1024); + vmstate_register_ram_global(int_dram); + memory_region_init_ram(pcp_data, NULL, "powerlink_pcp_data.ram", 16*1024); + vmstate_register_ram_global(pcp_data); + memory_region_init_ram(pcp_text, NULL, "powerlink_pcp_text.ram", 32*1024); + vmstate_register_ram_global(pcp_text); + + memory_region_add_subregion(sysmem, 0x80000000, ext_cram); + memory_region_add_subregion(sysmem, 0xa1000000, ext_dram); + memory_region_add_subregion(sysmem, 0xd4000000, int_cram); + memory_region_add_subregion(sysmem, 0xd0000000, int_dram); + memory_region_add_subregion(sysmem, 0xf0050000, pcp_data); + memory_region_add_subregion(sysmem, 0xf0060000, pcp_text); + + tricoretb_binfo.ram_size = machine->ram_size; + tricoretb_binfo.kernel_filename = machine->kernel_filename; + + if (machine->kernel_filename) { + tricore_load_kernel(env); + } +} + +static void tricoreboard_init(MachineState *machine) +{ + tricore_testboard_init(machine, 0x183); +} + +static QEMUMachine ttb_machine = { + .name = "tricore_testboard", + .desc = "a minimal TriCore board", + .init = tricoreboard_init, + .is_default = 0, +}; + +static void tricore_testboard_machine_init(void) +{ + qemu_register_machine(&ttb_machine); +} + +machine_init(tricore_testboard_machine_init); diff --git a/include/block/aio.h b/include/block/aio.h index c23de3cd1f..4603c0f066 100644 --- a/include/block/aio.h +++ b/include/block/aio.h @@ -205,12 +205,25 @@ void qemu_bh_cancel(QEMUBH *bh); void qemu_bh_delete(QEMUBH *bh); /* Return whether there are any pending callbacks from the GSource - * attached to the AioContext. + * attached to the AioContext, before g_poll is invoked. + * + * This is used internally in the implementation of the GSource. + */ +bool aio_prepare(AioContext *ctx); + +/* Return whether there are any pending callbacks from the GSource + * attached to the AioContext, after g_poll is invoked. * * This is used internally in the implementation of the GSource. */ bool aio_pending(AioContext *ctx); +/* Dispatch any pending callbacks from the GSource attached to the AioContext. + * + * This is used internally in the implementation of the GSource. + */ +bool aio_dispatch(AioContext *ctx); + /* Progress in completing AIO work to occur. This can issue new pending * aio as a result of executing I/O completion or bh callbacks. * @@ -226,7 +239,6 @@ bool aio_pending(AioContext *ctx); */ bool aio_poll(AioContext *ctx, bool blocking); -#ifdef CONFIG_POSIX /* Register a file descriptor and associated callbacks. Behaves very similarly * to qemu_set_fd_handler2. Unlike qemu_set_fd_handler2, these callbacks will * be invoked when using aio_poll(). @@ -239,7 +251,6 @@ void aio_set_fd_handler(AioContext *ctx, IOHandler *io_read, IOHandler *io_write, void *opaque); -#endif /* Register an event notifier and associated callbacks. Behaves very similarly * to event_notifier_set_handler. Unlike event_notifier_set_handler, these callbacks @@ -303,4 +314,12 @@ static inline void aio_timer_init(AioContext *ctx, timer_init(ts, ctx->tlg.tl[type], scale, cb, opaque); } +/** + * aio_compute_timeout: + * @ctx: the aio context + * + * Compute the timeout that a blocking aio_poll should use. + */ +int64_t aio_compute_timeout(AioContext *ctx); + #endif diff --git a/include/block/block_int.h b/include/block/block_int.h index 233489547e..8a61215ac0 100644 --- a/include/block/block_int.h +++ b/include/block/block_int.h @@ -294,6 +294,15 @@ typedef struct BlockLimits { typedef struct BdrvOpBlocker BdrvOpBlocker; +typedef struct BdrvAioNotifier { + void (*attached_aio_context)(AioContext *new_context, void *opaque); + void (*detach_aio_context)(void *opaque); + + void *opaque; + + QLIST_ENTRY(BdrvAioNotifier) list; +} BdrvAioNotifier; + /* * Note: the function bdrv_append() copies and swaps contents of * BlockDriverStates, so if you add new fields to this struct, please @@ -320,6 +329,10 @@ struct BlockDriverState { void *dev_opaque; AioContext *aio_context; /* event loop used for fd handlers, timers, etc */ + /* long-running tasks intended to always use the same AioContext as this + * BDS may register themselves in this list to be notified of changes + * regarding this BDS's context */ + QLIST_HEAD(, BdrvAioNotifier) aio_notifiers; char filename[1024]; char backing_file[1024]; /* if non zero, the image is a diff of @@ -437,6 +450,34 @@ void bdrv_detach_aio_context(BlockDriverState *bs); void bdrv_attach_aio_context(BlockDriverState *bs, AioContext *new_context); +/** + * bdrv_add_aio_context_notifier: + * + * If a long-running job intends to be always run in the same AioContext as a + * certain BDS, it may use this function to be notified of changes regarding the + * association of the BDS to an AioContext. + * + * attached_aio_context() is called after the target BDS has been attached to a + * new AioContext; detach_aio_context() is called before the target BDS is being + * detached from its old AioContext. + */ +void bdrv_add_aio_context_notifier(BlockDriverState *bs, + void (*attached_aio_context)(AioContext *new_context, void *opaque), + void (*detach_aio_context)(void *opaque), void *opaque); + +/** + * bdrv_remove_aio_context_notifier: + * + * Unsubscribe of change notifications regarding the BDS's AioContext. The + * parameters given here have to be the same as those given to + * bdrv_add_aio_context_notifier(). + */ +void bdrv_remove_aio_context_notifier(BlockDriverState *bs, + void (*aio_context_attached)(AioContext *, + void *), + void (*aio_context_detached)(void *), + void *opaque); + #ifdef _WIN32 int is_windows_drive(const char *filename); #endif diff --git a/include/block/coroutine.h b/include/block/coroutine.h index b9b7f488c9..793df0ef8b 100644 --- a/include/block/coroutine.h +++ b/include/block/coroutine.h @@ -203,14 +203,6 @@ void qemu_co_rwlock_unlock(CoRwlock *lock); /** * Yield the coroutine for a given duration * - * Note this function uses timers and hence only works when a main loop is in - * use. See main-loop.h and do not use from qemu-tool programs. - */ -void coroutine_fn co_sleep_ns(QEMUClockType type, int64_t ns); - -/** - * Yield the coroutine for a given duration - * * Behaves similarly to co_sleep_ns(), but the sleeping coroutine will be * resumed when using aio_poll(). */ diff --git a/include/elf.h b/include/elf.h index e88d52fd76..70107f0c3f 100644 --- a/include/elf.h +++ b/include/elf.h @@ -92,6 +92,8 @@ typedef int64_t Elf64_Sxword; #define EM_SPARCV9 43 /* SPARC v9 64-bit */ +#define EM_TRICORE 44 /* Infineon TriCore */ + #define EM_IA_64 50 /* HP/Intel IA-64 */ #define EM_X86_64 62 /* AMD x86-64 */ diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h index 2dd6206d4a..0ca6f0b953 100644 --- a/include/exec/cpu-defs.h +++ b/include/exec/cpu-defs.h @@ -71,6 +71,8 @@ typedef uint64_t target_ulong; #if !defined(CONFIG_USER_ONLY) #define CPU_TLB_BITS 8 #define CPU_TLB_SIZE (1 << CPU_TLB_BITS) +/* use a fully associative victim tlb of 8 entries */ +#define CPU_VTLB_SIZE 8 #if HOST_LONG_BITS == 32 && TARGET_LONG_BITS == 32 #define CPU_TLB_ENTRY_BITS 4 @@ -103,9 +105,12 @@ QEMU_BUILD_BUG_ON(sizeof(CPUTLBEntry) != (1 << CPU_TLB_ENTRY_BITS)); #define CPU_COMMON_TLB \ /* The meaning of the MMU modes is defined in the target code. */ \ CPUTLBEntry tlb_table[NB_MMU_MODES][CPU_TLB_SIZE]; \ - hwaddr iotlb[NB_MMU_MODES][CPU_TLB_SIZE]; \ + CPUTLBEntry tlb_v_table[NB_MMU_MODES][CPU_VTLB_SIZE]; \ + hwaddr iotlb[NB_MMU_MODES][CPU_TLB_SIZE]; \ + hwaddr iotlb_v[NB_MMU_MODES][CPU_VTLB_SIZE]; \ target_ulong tlb_flush_addr; \ - target_ulong tlb_flush_mask; + target_ulong tlb_flush_mask; \ + target_ulong vtlb_index; \ #else diff --git a/include/hw/s390x/sclp.h b/include/hw/s390x/sclp.h index 7ef16226df..5c435749e1 100644 --- a/include/hw/s390x/sclp.h +++ b/include/hw/s390x/sclp.h @@ -37,6 +37,7 @@ #define SCLP_STARTING_SUBINCREMENT_ID 0x10001 #define SCLP_INCREMENT_UNIT 0x10000 #define MAX_AVAIL_SLOTS 32 +#define MAX_STORAGE_INCREMENTS 1020 /* CPU hotplug SCLP codes */ #define SCLP_HAS_CPU_INFO 0x0C00000000000000ULL @@ -156,6 +157,23 @@ typedef struct SCCB { char data[SCCB_DATA_LEN]; } QEMU_PACKED SCCB; +typedef struct sclpMemoryHotplugDev sclpMemoryHotplugDev; + +#define TYPE_SCLP_MEMORY_HOTPLUG_DEV "sclp-memory-hotplug-dev" +#define SCLP_MEMORY_HOTPLUG_DEV(obj) \ + OBJECT_CHECK(sclpMemoryHotplugDev, (obj), TYPE_SCLP_MEMORY_HOTPLUG_DEV) + +struct sclpMemoryHotplugDev { + SysBusDevice parent; + ram_addr_t standby_mem_size; + ram_addr_t padded_ram_size; + ram_addr_t pad_size; + ram_addr_t standby_subregion_size; + ram_addr_t rzm; + int increment_size; + char *standby_state_map; +}; + static inline int sccb_data_len(SCCB *sccb) { return be16_to_cpu(sccb->h.length) - sizeof(sccb->h); @@ -163,6 +181,8 @@ static inline int sccb_data_len(SCCB *sccb) void s390_sclp_init(void); +sclpMemoryHotplugDev *init_sclp_memory_hotplug_dev(void); +sclpMemoryHotplugDev *get_sclp_memory_hotplug_dev(void); void sclp_service_interrupt(uint32_t sccb); void raise_irq_cpu_hotplug(void); diff --git a/include/hw/tricore/tricore.h b/include/hw/tricore/tricore.h new file mode 100644 index 0000000000..5f13252788 --- /dev/null +++ b/include/hw/tricore/tricore.h @@ -0,0 +1,11 @@ +#ifndef TRICORE_MISC_H +#define TRICORE_MISC_H 1 + +#include "exec/memory.h" +#include "hw/irq.h" + +struct tricore_boot_info { + uint64_t ram_size; + const char *kernel_filename; +}; +#endif diff --git a/include/sysemu/arch_init.h b/include/sysemu/arch_init.h index 182d48d8c3..8939233f37 100644 --- a/include/sysemu/arch_init.h +++ b/include/sysemu/arch_init.h @@ -22,6 +22,7 @@ enum { QEMU_ARCH_OPENRISC = 8192, QEMU_ARCH_UNICORE32 = 0x4000, QEMU_ARCH_MOXIE = 0x8000, + QEMU_ARCH_TRICORE = 0x10000, }; extern const uint32_t arch_type; @@ -18,6 +18,7 @@ #include "block/nbd.h" #include "block/block.h" +#include "block/block_int.h" #include "block/coroutine.h" @@ -107,6 +108,8 @@ struct NBDExport { uint32_t nbdflags; QTAILQ_HEAD(, NBDClient) clients; QTAILQ_ENTRY(NBDExport) next; + + AioContext *ctx; }; static QTAILQ_HEAD(, NBDExport) exports = QTAILQ_HEAD_INITIALIZER(exports); @@ -123,6 +126,8 @@ struct NBDClient { CoMutex send_lock; Coroutine *send_coroutine; + bool can_read; + QTAILQ_ENTRY(NBDClient) next; int nb_requests; bool closing; @@ -130,6 +135,10 @@ struct NBDClient { /* That's all folks */ +static void nbd_set_handlers(NBDClient *client); +static void nbd_unset_handlers(NBDClient *client); +static void nbd_update_can_read(NBDClient *client); + ssize_t nbd_wr_sync(int fd, void *buffer, size_t size, bool do_read) { size_t offset = 0; @@ -156,7 +165,7 @@ ssize_t nbd_wr_sync(int fd, void *buffer, size_t size, bool do_read) err = socket_error(); /* recoverable error */ - if (err == EINTR || (offset > 0 && err == EAGAIN)) { + if (err == EINTR || (offset > 0 && (err == EAGAIN || err == EWOULDBLOCK))) { continue; } @@ -862,7 +871,7 @@ void nbd_client_put(NBDClient *client) */ assert(client->closing); - qemu_set_fd_handler2(client->sock, NULL, NULL, NULL, NULL); + nbd_unset_handlers(client); close(client->sock); client->sock = -1; if (client->exp) { @@ -898,6 +907,7 @@ static NBDRequest *nbd_request_get(NBDClient *client) assert(client->nb_requests <= MAX_NBD_REQUESTS - 1); client->nb_requests++; + nbd_update_can_read(client); req = g_slice_new0(NBDRequest); nbd_client_get(client); @@ -914,12 +924,39 @@ static void nbd_request_put(NBDRequest *req) } g_slice_free(NBDRequest, req); - if (client->nb_requests-- == MAX_NBD_REQUESTS) { - qemu_notify_event(); - } + client->nb_requests--; + nbd_update_can_read(client); nbd_client_put(client); } +static void bs_aio_attached(AioContext *ctx, void *opaque) +{ + NBDExport *exp = opaque; + NBDClient *client; + + TRACE("Export %s: Attaching clients to AIO context %p\n", exp->name, ctx); + + exp->ctx = ctx; + + QTAILQ_FOREACH(client, &exp->clients, next) { + nbd_set_handlers(client); + } +} + +static void bs_aio_detach(void *opaque) +{ + NBDExport *exp = opaque; + NBDClient *client; + + TRACE("Export %s: Detaching clients from AIO context %p\n", exp->name, exp->ctx); + + QTAILQ_FOREACH(client, &exp->clients, next) { + nbd_unset_handlers(client); + } + + exp->ctx = NULL; +} + NBDExport *nbd_export_new(BlockDriverState *bs, off_t dev_offset, off_t size, uint32_t nbdflags, void (*close)(NBDExport *)) @@ -932,7 +969,9 @@ NBDExport *nbd_export_new(BlockDriverState *bs, off_t dev_offset, exp->nbdflags = nbdflags; exp->size = size == -1 ? bdrv_getlength(bs) : size; exp->close = close; + exp->ctx = bdrv_get_aio_context(bs); bdrv_ref(bs); + bdrv_add_aio_context_notifier(bs, bs_aio_attached, bs_aio_detach, exp); return exp; } @@ -980,6 +1019,8 @@ void nbd_export_close(NBDExport *exp) nbd_export_set_name(exp, NULL); nbd_export_put(exp); if (exp->bs) { + bdrv_remove_aio_context_notifier(exp->bs, bs_aio_attached, + bs_aio_detach, exp); bdrv_unref(exp->bs); exp->bs = NULL; } @@ -1023,10 +1064,6 @@ void nbd_export_close_all(void) } } -static int nbd_can_read(void *opaque); -static void nbd_read(void *opaque); -static void nbd_restart_write(void *opaque); - static ssize_t nbd_co_send_reply(NBDRequest *req, struct nbd_reply *reply, int len) { @@ -1035,9 +1072,8 @@ static ssize_t nbd_co_send_reply(NBDRequest *req, struct nbd_reply *reply, ssize_t rc, ret; qemu_co_mutex_lock(&client->send_lock); - qemu_set_fd_handler2(csock, nbd_can_read, nbd_read, - nbd_restart_write, client); client->send_coroutine = qemu_coroutine_self(); + nbd_set_handlers(client); if (!len) { rc = nbd_send_reply(csock, reply); @@ -1054,7 +1090,7 @@ static ssize_t nbd_co_send_reply(NBDRequest *req, struct nbd_reply *reply, } client->send_coroutine = NULL; - qemu_set_fd_handler2(csock, nbd_can_read, nbd_read, NULL, client); + nbd_set_handlers(client); qemu_co_mutex_unlock(&client->send_lock); return rc; } @@ -1067,6 +1103,8 @@ static ssize_t nbd_co_receive_request(NBDRequest *req, struct nbd_request *reque ssize_t rc; client->recv_coroutine = qemu_coroutine_self(); + nbd_update_can_read(client); + rc = nbd_receive_request(csock, request); if (rc < 0) { if (rc != -EAGAIN) { @@ -1108,6 +1146,8 @@ static ssize_t nbd_co_receive_request(NBDRequest *req, struct nbd_request *reque out: client->recv_coroutine = NULL; + nbd_update_can_read(client); + return rc; } @@ -1259,13 +1299,6 @@ out: nbd_client_close(client); } -static int nbd_can_read(void *opaque) -{ - NBDClient *client = opaque; - - return client->recv_coroutine || client->nb_requests < MAX_NBD_REQUESTS; -} - static void nbd_read(void *opaque) { NBDClient *client = opaque; @@ -1284,6 +1317,37 @@ static void nbd_restart_write(void *opaque) qemu_coroutine_enter(client->send_coroutine, NULL); } +static void nbd_set_handlers(NBDClient *client) +{ + if (client->exp && client->exp->ctx) { + aio_set_fd_handler(client->exp->ctx, client->sock, + client->can_read ? nbd_read : NULL, + client->send_coroutine ? nbd_restart_write : NULL, + client); + } +} + +static void nbd_unset_handlers(NBDClient *client) +{ + if (client->exp && client->exp->ctx) { + aio_set_fd_handler(client->exp->ctx, client->sock, NULL, NULL, NULL); + } +} + +static void nbd_update_can_read(NBDClient *client) +{ + bool can_read = client->recv_coroutine || + client->nb_requests < MAX_NBD_REQUESTS; + + if (can_read != client->can_read) { + client->can_read = can_read; + nbd_set_handlers(client); + + /* There is no need to invoke aio_notify(), since aio_set_fd_handler() + * in nbd_set_handlers() will have taken care of that */ + } +} + NBDClient *nbd_client_new(NBDExport *exp, int csock, void (*close)(NBDClient *)) { @@ -1292,13 +1356,14 @@ NBDClient *nbd_client_new(NBDExport *exp, int csock, client->refcount = 1; client->exp = exp; client->sock = csock; + client->can_read = true; if (nbd_send_negotiate(client)) { g_free(client); return NULL; } client->close = close; qemu_co_mutex_init(&client->send_lock); - qemu_set_fd_handler2(csock, nbd_can_read, nbd_read, NULL, client); + nbd_set_handlers(client); if (exp) { QTAILQ_INSERT_TAIL(&exp->clients, client, next); diff --git a/pc-bios/s390-ccw.img b/pc-bios/s390-ccw.img Binary files differindex e3ea0d5664..44873ad181 100644 --- a/pc-bios/s390-ccw.img +++ b/pc-bios/s390-ccw.img diff --git a/pc-bios/s390-ccw/bootmap.c b/pc-bios/s390-ccw/bootmap.c index f1756796df..115d8bbac6 100644 --- a/pc-bios/s390-ccw/bootmap.c +++ b/pc-bios/s390-ccw/bootmap.c @@ -40,11 +40,6 @@ static void jump_to_IPL_2(void) ResetInfo *current = 0; void (*ipl)(void) = (void *) (uint64_t) current->ipl_continue; - debug_print_addr("set IPL addr to", ipl); - - /* Ensure the guest output starts fresh */ - sclp_print("\n"); - *current = save; ipl(); /* should not return */ } @@ -64,6 +59,11 @@ static void jump_to_IPL_code(uint64_t address) current->ipl_addr = (uint32_t) (uint64_t) &jump_to_IPL_2; current->ipl_continue = address & 0x7fffffff; + debug_print_int("set IPL addr to", current->ipl_continue); + + /* Ensure the guest output starts fresh */ + sclp_print("\n"); + /* * HACK ALERT. * We use the load normal reset to keep r15 unchanged. jump_to_IPL_2 @@ -93,11 +93,23 @@ static inline void verify_boot_info(BootInfo *bip) "Bad block size in zIPL section of the 1st record."); } -static bool eckd_valid_address(BootMapPointer *p) +static block_number_t eckd_block_num(BootMapPointer *p) { + const uint64_t sectors = virtio_get_sectors(); + const uint64_t heads = virtio_get_heads(); const uint64_t cylinder = p->eckd.cylinder + ((p->eckd.head & 0xfff0) << 12); const uint64_t head = p->eckd.head & 0x000f; + const block_number_t block = sectors * heads * cylinder + + sectors * head + + p->eckd.sector + - 1; /* block nr starts with zero */ + return block; +} + +static bool eckd_valid_address(BootMapPointer *p) +{ + const uint64_t head = p->eckd.head & 0x000f; if (head >= virtio_get_heads() || p->eckd.sector > virtio_get_sectors() @@ -105,27 +117,14 @@ static bool eckd_valid_address(BootMapPointer *p) return false; } - if (!virtio_guessed_disk_nature() && cylinder >= virtio_get_cylinders()) { + if (!virtio_guessed_disk_nature() && + eckd_block_num(p) >= virtio_get_blocks()) { return false; } return true; } -static block_number_t eckd_block_num(BootMapPointer *p) -{ - const uint64_t sectors = virtio_get_sectors(); - const uint64_t heads = virtio_get_heads(); - const uint64_t cylinder = p->eckd.cylinder - + ((p->eckd.head & 0xfff0) << 12); - const uint64_t head = p->eckd.head & 0x000f; - const block_number_t block = sectors * heads * cylinder - + sectors * head - + p->eckd.sector - - 1; /* block nr starts with zero */ - return block; -} - static block_number_t load_eckd_segments(block_number_t blk, uint64_t *address) { block_number_t block_nr; @@ -223,7 +222,6 @@ static void ipl_eckd_cdl(void) memset(sec, FREE_SPACE_FILLER, sizeof(sec)); read_block(1, ipl2, "Cannot read IPL2 record at block 1"); - IPL_assert(magic_match(ipl2, IPL2_MAGIC), "No IPL2 record"); mbr = &ipl2->u.x.mbr; IPL_assert(magic_match(mbr, ZIPL_MAGIC), "No zIPL section in IPL2 record."); @@ -247,12 +245,10 @@ static void ipl_eckd_cdl(void) /* no return */ } -static void ipl_eckd_ldl(ECKD_IPL_mode_t mode) +static void print_eckd_ldl_msg(ECKD_IPL_mode_t mode) { LDL_VTOC *vlbl = (void *)sec; /* already read, 3rd block */ char msg[4] = { '?', '.', '\n', '\0' }; - block_number_t block_nr; - BootInfo *bip; sclp_print((mode == ECKD_CMS) ? "CMS" : "LDL"); sclp_print(" version "); @@ -272,12 +268,27 @@ static void ipl_eckd_ldl(ECKD_IPL_mode_t mode) } sclp_print(msg); print_volser(vlbl->volser); +} + +static void ipl_eckd_ldl(ECKD_IPL_mode_t mode) +{ + block_number_t block_nr; + BootInfo *bip = (void *)(sec + 0x70); /* BootInfo is MBR for LDL */ + + if (mode != ECKD_LDL_UNLABELED) { + print_eckd_ldl_msg(mode); + } /* DO NOT read BootMap pointer (only one, xECKD) at block #2 */ memset(sec, FREE_SPACE_FILLER, sizeof(sec)); - read_block(0, sec, "Cannot read block 0"); - bip = (void *)(sec + 0x70); /* "boot info" is "eckd mbr" for LDL */ + read_block(0, sec, "Cannot read block 0 to grab boot info."); + if (mode == ECKD_LDL_UNLABELED) { + if (!magic_match(bip->magic, ZIPL_MAGIC)) { + return; /* not applicable layout */ + } + sclp_print("unlabeled LDL.\n"); + } verify_boot_info(bip); block_nr = eckd_block_num((void *)&(bip->bp.ipl.bm_ptr.eckd.bptr)); @@ -285,17 +296,23 @@ static void ipl_eckd_ldl(ECKD_IPL_mode_t mode) /* no return */ } -static void ipl_eckd(ECKD_IPL_mode_t mode) +static void print_eckd_msg(void) { - switch (mode) { - case ECKD_CDL: - ipl_eckd_cdl(); /* no return */ - case ECKD_CMS: - case ECKD_LDL: - ipl_eckd_ldl(mode); /* no return */ - default: - virtio_panic("\n! Unknown ECKD IPL mode !\n"); + char msg[] = "Using ECKD scheme (block size *****), "; + char *p = &msg[34], *q = &msg[30]; + int n = virtio_get_block_size(); + + /* Fill in the block size and show up the message */ + if (n > 0 && n <= 99999) { + while (n) { + *p-- = '0' + (n % 10); + n /= 10; + } + while (p >= q) { + *p-- = ' '; + } } + sclp_print(msg); } /*********************************************************************** @@ -447,14 +464,13 @@ void zipl_load(void) } /* We have failed to follow the SCSI scheme, so */ - sclp_print("Using ECKD scheme.\n"); if (virtio_guessed_disk_nature()) { sclp_print("Using guessed DASD geometry.\n"); virtio_assume_eckd(); } - + print_eckd_msg(); if (magic_match(mbr->magic, IPL1_MAGIC)) { - ipl_eckd(ECKD_CDL); /* no return */ + ipl_eckd_cdl(); /* no return */ } /* LDL/CMS? */ @@ -462,11 +478,18 @@ void zipl_load(void) read_block(2, vlbl, "Cannot read block 2"); if (magic_match(vlbl->magic, CMS1_MAGIC)) { - ipl_eckd(ECKD_CMS); /* no return */ + ipl_eckd_ldl(ECKD_CMS); /* no return */ } if (magic_match(vlbl->magic, LNX1_MAGIC)) { - ipl_eckd(ECKD_LDL); /* no return */ + ipl_eckd_ldl(ECKD_LDL); /* no return */ } - virtio_panic("\n* invalid MBR magic *\n"); + ipl_eckd_ldl(ECKD_LDL_UNLABELED); /* it still may return */ + /* + * Ok, it is not a LDL by any means. + * It still might be a CDL with zero record keys for IPL1 and IPL2 + */ + ipl_eckd_cdl(); + + virtio_panic("\n* this can never happen *\n"); } diff --git a/pc-bios/s390-ccw/bootmap.h b/pc-bios/s390-ccw/bootmap.h index 30ef22fe61..6a4823d544 100644 --- a/pc-bios/s390-ccw/bootmap.h +++ b/pc-bios/s390-ccw/bootmap.h @@ -257,9 +257,9 @@ typedef struct IplVolumeLabel { typedef enum { ECKD_NO_IPL, - ECKD_CDL, ECKD_CMS, ECKD_LDL, + ECKD_LDL_UNLABELED, } ECKD_IPL_mode_t; /* utility code below */ diff --git a/pc-bios/s390-ccw/virtio.c b/pc-bios/s390-ccw/virtio.c index 31b23b086c..c0540d1cd4 100644 --- a/pc-bios/s390-ccw/virtio.c +++ b/pc-bios/s390-ccw/virtio.c @@ -275,12 +275,14 @@ void virtio_assume_scsi(void) { guessed_disk_nature = true; blk_cfg.blk_size = 512; + blk_cfg.physical_block_exp = 0; } void virtio_assume_eckd(void) { guessed_disk_nature = true; blk_cfg.blk_size = 4096; + blk_cfg.physical_block_exp = 0; /* this must be here to calculate code segment position */ blk_cfg.geometry.heads = 15; @@ -290,36 +292,52 @@ void virtio_assume_eckd(void) bool virtio_disk_is_scsi(void) { if (guessed_disk_nature) { - return (blk_cfg.blk_size == 512); + return (virtio_get_block_size() == 512); } return (blk_cfg.geometry.heads == 255) && (blk_cfg.geometry.sectors == 63) - && (blk_cfg.blk_size == 512); + && (virtio_get_block_size() == 512); +} + +/* + * Other supported value pairs, if any, would need to be added here. + * Note: head count is always 15. + */ +static inline u8 virtio_eckd_sectors_for_block_size(int size) +{ + switch (size) { + case 512: + return 49; + case 1024: + return 33; + case 2048: + return 21; + case 4096: + return 12; + } + return 0; } bool virtio_disk_is_eckd(void) { + const int block_size = virtio_get_block_size(); + if (guessed_disk_nature) { - return (blk_cfg.blk_size == 4096); + return (block_size == 4096); } return (blk_cfg.geometry.heads == 15) - && (blk_cfg.geometry.sectors == 12) - && (blk_cfg.blk_size == 4096); + && (blk_cfg.geometry.sectors == + virtio_eckd_sectors_for_block_size(block_size)); } bool virtio_ipl_disk_is_valid(void) { - return blk_cfg.blk_size && (virtio_disk_is_scsi() || virtio_disk_is_eckd()); + return virtio_disk_is_scsi() || virtio_disk_is_eckd(); } int virtio_get_block_size(void) { - return blk_cfg.blk_size; -} - -uint16_t virtio_get_cylinders(void) -{ - return blk_cfg.geometry.cylinders; + return blk_cfg.blk_size << blk_cfg.physical_block_exp; } uint8_t virtio_get_heads(void) @@ -332,6 +350,12 @@ uint8_t virtio_get_sectors(void) return blk_cfg.geometry.sectors; } +uint64_t virtio_get_blocks(void) +{ + return blk_cfg.capacity / + (virtio_get_block_size() / VIRTIO_SECTOR_SIZE); +} + void virtio_setup_block(struct subchannel_id schid) { struct vq_info_block info; diff --git a/pc-bios/s390-ccw/virtio.h b/pc-bios/s390-ccw/virtio.h index f1fb1b08fa..c23466b8db 100644 --- a/pc-bios/s390-ccw/virtio.h +++ b/pc-bios/s390-ccw/virtio.h @@ -192,9 +192,9 @@ extern bool virtio_disk_is_scsi(void); extern bool virtio_disk_is_eckd(void); extern bool virtio_ipl_disk_is_valid(void); extern int virtio_get_block_size(void); -extern uint16_t virtio_get_cylinders(void); extern uint8_t virtio_get_heads(void); extern uint8_t virtio_get_sectors(void); +extern uint64_t virtio_get_blocks(void); extern int virtio_read_many(ulong sector, void *load_addr, int sec_num); #define VIRTIO_SECTOR_SIZE 512 diff --git a/qapi/block-core.json b/qapi/block-core.json index fb74c56e32..a685d02728 100644 --- a/qapi/block-core.json +++ b/qapi/block-core.json @@ -1419,6 +1419,19 @@ 'raw': 'BlockdevRef' } } ## +# @QuorumReadPattern +# +# An enumeration of quorum read patterns. +# +# @quorum: read all the children and do a quorum vote on reads +# +# @fifo: read only from the first child that has not failed +# +# Since: 2.2 +## +{ 'enum': 'QuorumReadPattern', 'data': [ 'quorum', 'fifo' ] } + +## # @BlockdevOptionsQuorum # # Driver specific block device options for Quorum @@ -1433,12 +1446,17 @@ # @rewrite-corrupted: #optional rewrite corrupted data when quorum is reached # (Since 2.1) # +# @read-pattern: #optional choose read pattern and set to quorum by default +# (Since 2.2) +# # Since: 2.0 ## { 'type': 'BlockdevOptionsQuorum', 'data': { '*blkverify': 'bool', 'children': [ 'BlockdevRef' ], - 'vote-threshold': 'int', '*rewrite-corrupted': 'bool' } } + 'vote-threshold': 'int', + '*rewrite-corrupted': 'bool', + '*read-pattern': 'QuorumReadPattern' } } ## # @BlockdevOptions diff --git a/qemu-coroutine-io.c b/qemu-coroutine-io.c index 054ca70627..d4049260da 100644 --- a/qemu-coroutine-io.c +++ b/qemu-coroutine-io.c @@ -34,13 +34,15 @@ qemu_co_sendv_recvv(int sockfd, struct iovec *iov, unsigned iov_cnt, { size_t done = 0; ssize_t ret; + int err; while (done < bytes) { ret = iov_send_recv(sockfd, iov, iov_cnt, offset + done, bytes - done, do_send); if (ret > 0) { done += ret; } else if (ret < 0) { - if (errno == EAGAIN) { + err = socket_error(); + if (err == EAGAIN || err == EWOULDBLOCK) { qemu_coroutine_yield(); } else if (done == 0) { return -1; diff --git a/qemu-coroutine-sleep.c b/qemu-coroutine-sleep.c index ad78fbaa2a..9abb7fdf31 100644 --- a/qemu-coroutine-sleep.c +++ b/qemu-coroutine-sleep.c @@ -27,18 +27,6 @@ static void co_sleep_cb(void *opaque) qemu_coroutine_enter(sleep_cb->co, NULL); } -void coroutine_fn co_sleep_ns(QEMUClockType type, int64_t ns) -{ - CoSleepCB sleep_cb = { - .co = qemu_coroutine_self(), - }; - sleep_cb.ts = timer_new(type, SCALE_NS, co_sleep_cb, &sleep_cb); - timer_mod(sleep_cb.ts, qemu_clock_get_ns(type) + ns); - qemu_coroutine_yield(); - timer_del(sleep_cb.ts); - timer_free(sleep_cb.ts); -} - void coroutine_fn co_aio_sleep_ns(AioContext *ctx, QEMUClockType type, int64_t ns) { diff --git a/qemu-img.c b/qemu-img.c index 2052b14b84..ff29ed1c67 100644 --- a/qemu-img.c +++ b/qemu-img.c @@ -752,7 +752,7 @@ static int img_commit(int argc, char **argv) ret = bdrv_parse_cache_flags(cache, &flags); if (ret < 0) { error_report("Invalid cache option: %s", cache); - return -1; + return 1; } bs = bdrv_new_open("image", filename, fmt, flags, true, quiet); @@ -999,6 +999,9 @@ static int img_compare(int argc, char **argv) filename1 = argv[optind++]; filename2 = argv[optind++]; + /* Initialize before goto out */ + qemu_progress_init(progress, 2.0); + flags = BDRV_O_FLAGS; ret = bdrv_parse_cache_flags(cache, &flags); if (ret < 0) { @@ -1007,9 +1010,6 @@ static int img_compare(int argc, char **argv) goto out3; } - /* Initialize before goto out */ - qemu_progress_init(progress, 2.0); - bs1 = bdrv_new_open("image 1", filename1, fmt1, flags, true, quiet); if (!bs1) { error_report("Can't open file %s", filename1); @@ -2304,7 +2304,7 @@ static int img_snapshot(int argc, char **argv) static int img_rebase(int argc, char **argv) { - BlockDriverState *bs, *bs_old_backing = NULL, *bs_new_backing = NULL; + BlockDriverState *bs = NULL, *bs_old_backing = NULL, *bs_new_backing = NULL; BlockDriver *old_backing_drv, *new_backing_drv; char *filename; const char *fmt, *cache, *src_cache, *out_basefmt, *out_baseimg; @@ -2376,14 +2376,14 @@ static int img_rebase(int argc, char **argv) ret = bdrv_parse_cache_flags(cache, &flags); if (ret < 0) { error_report("Invalid cache option: %s", cache); - return -1; + goto out; } src_flags = BDRV_O_FLAGS; ret = bdrv_parse_cache_flags(src_cache, &src_flags); if (ret < 0) { error_report("Invalid source cache option: %s", src_cache); - return -1; + goto out; } /* @@ -2394,7 +2394,8 @@ static int img_rebase(int argc, char **argv) */ bs = bdrv_new_open("image", filename, fmt, flags, true, quiet); if (!bs) { - return 1; + ret = -1; + goto out; } /* Find the right drivers for the backing files */ @@ -2420,11 +2421,7 @@ static int img_rebase(int argc, char **argv) } /* For safe rebasing we need to compare old and new backing file */ - if (unsafe) { - /* Make the compiler happy */ - bs_old_backing = NULL; - bs_new_backing = NULL; - } else { + if (!unsafe) { char backing_name[1024]; bs_old_backing = bdrv_new("old_backing", &error_abort); diff --git a/qemu-options.hx b/qemu-options.hx index c573dd8893..ecd0e34269 100644 --- a/qemu-options.hx +++ b/qemu-options.hx @@ -225,7 +225,8 @@ DEF("m", HAS_ARG, QEMU_OPTION_m, " size: initial amount of guest memory (default: " stringify(DEFAULT_RAM_SIZE) "MiB)\n" " slots: number of hotplug slots (default: none)\n" - " maxmem: maximum amount of guest memory (default: none)\n", + " maxmem: maximum amount of guest memory (default: none)\n" + "NOTE: Some architectures might enforce a specific granularity\n", QEMU_ARCH_ALL) STEXI @item -m [size=]@var{megs} @@ -2351,6 +2352,16 @@ multiple of 512 bytes. It defaults to 256k. @item sslverify Whether to verify the remote server's certificate when connecting over SSL. It can have the value 'on' or 'off'. It defaults to 'on'. + +@item cookie +Send this cookie (it can also be a list of cookies separated by ';') with +each outgoing request. Only supported when using protocols such as HTTP +which support cookies, otherwise ignored. + +@item timeout +Set the timeout in seconds of the CURL connection. This timeout is the time +that CURL waits for a response from the remote server to get the size of the +image to be downloaded. If not set, the default timeout of 5 seconds is used. @end table Note that when passing options to qemu explicitly, @option{driver} is the value @@ -2372,9 +2383,10 @@ qemu-system-x86_64 -drive file=/tmp/Fedora-x86_64-20-20131211.1-sda.qcow2,copy-o @end example Example: boot from an image stored on a VMware vSphere server with a self-signed -certificate using a local overlay for writes and a readahead of 64k +certificate using a local overlay for writes, a readahead of 64k and a timeout +of 10 seconds. @example -qemu-img create -f qcow2 -o backing_file='json:@{"file.driver":"https",, "file.url":"https://user:password@@vsphere.example.com/folder/test/test-flat.vmdk?dcPath=Datacenter&dsName=datastore1",, "file.sslverify":"off",, "file.readahead":"64k"@}' /tmp/test.qcow2 +qemu-img create -f qcow2 -o backing_file='json:@{"file.driver":"https",, "file.url":"https://user:password@@vsphere.example.com/folder/test/test-flat.vmdk?dcPath=Datacenter&dsName=datastore1",, "file.sslverify":"off",, "file.readahead":"64k",, "file.timeout":10@}' /tmp/test.qcow2 qemu-system-x86_64 -drive file=/tmp/test.qcow2 @end example diff --git a/softmmu_template.h b/softmmu_template.h index 5a07f991a1..88e33900b6 100644 --- a/softmmu_template.h +++ b/softmmu_template.h @@ -116,6 +116,31 @@ # define helper_te_st_name helper_le_st_name #endif +/* macro to check the victim tlb */ +#define VICTIM_TLB_HIT(ty) \ +({ \ + /* we are about to do a page table walk. our last hope is the \ + * victim tlb. try to refill from the victim tlb before walking the \ + * page table. */ \ + int vidx; \ + hwaddr tmpiotlb; \ + CPUTLBEntry tmptlb; \ + for (vidx = CPU_VTLB_SIZE-1; vidx >= 0; --vidx) { \ + if (env->tlb_v_table[mmu_idx][vidx].ty == (addr & TARGET_PAGE_MASK)) {\ + /* found entry in victim tlb, swap tlb and iotlb */ \ + tmptlb = env->tlb_table[mmu_idx][index]; \ + env->tlb_table[mmu_idx][index] = env->tlb_v_table[mmu_idx][vidx]; \ + env->tlb_v_table[mmu_idx][vidx] = tmptlb; \ + tmpiotlb = env->iotlb[mmu_idx][index]; \ + env->iotlb[mmu_idx][index] = env->iotlb_v[mmu_idx][vidx]; \ + env->iotlb_v[mmu_idx][vidx] = tmpiotlb; \ + break; \ + } \ + } \ + /* return true when there is a vtlb hit, i.e. vidx >=0 */ \ + vidx >= 0; \ +}) + #ifndef SOFTMMU_CODE_ACCESS static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env, hwaddr physaddr, @@ -161,7 +186,10 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx, mmu_idx, retaddr); } #endif - tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, mmu_idx, retaddr); + if (!VICTIM_TLB_HIT(ADDR_READ)) { + tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, + mmu_idx, retaddr); + } tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ; } @@ -246,7 +274,10 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx, mmu_idx, retaddr); } #endif - tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, mmu_idx, retaddr); + if (!VICTIM_TLB_HIT(ADDR_READ)) { + tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, + mmu_idx, retaddr); + } tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ; } @@ -368,7 +399,9 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, cpu_unaligned_access(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr); } #endif - tlb_fill(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr); + if (!VICTIM_TLB_HIT(addr_write)) { + tlb_fill(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr); + } tlb_addr = env->tlb_table[mmu_idx][index].addr_write; } @@ -444,7 +477,9 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, cpu_unaligned_access(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr); } #endif - tlb_fill(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr); + if (!VICTIM_TLB_HIT(addr_write)) { + tlb_fill(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr); + } tlb_addr = env->tlb_table[mmu_idx][index].addr_write; } diff --git a/target-s390x/cpu-qom.h b/target-s390x/cpu-qom.h index f9c96d13a9..80dd74142c 100644 --- a/target-s390x/cpu-qom.h +++ b/target-s390x/cpu-qom.h @@ -89,5 +89,6 @@ hwaddr s390_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); hwaddr s390_cpu_get_phys_addr_debug(CPUState *cpu, vaddr addr); int s390_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg); int s390_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); +void s390_cpu_gdb_init(CPUState *cs); #endif diff --git a/target-s390x/cpu.c b/target-s390x/cpu.c index c3082b73c5..97a92168a8 100644 --- a/target-s390x/cpu.c +++ b/target-s390x/cpu.c @@ -165,7 +165,7 @@ static void s390_cpu_machine_reset_cb(void *opaque) { S390CPU *cpu = opaque; - cpu_reset(CPU(cpu)); + run_on_cpu(CPU(cpu), s390_do_cpu_full_reset, CPU(cpu)); } #endif @@ -174,8 +174,13 @@ static void s390_cpu_realizefn(DeviceState *dev, Error **errp) CPUState *cs = CPU(dev); S390CPUClass *scc = S390_CPU_GET_CLASS(dev); + s390_cpu_gdb_init(cs); qemu_init_vcpu(cs); +#if !defined(CONFIG_USER_ONLY) + run_on_cpu(cs, s390_do_cpu_full_reset, cs); +#else cpu_reset(cs); +#endif scc->parent_realize(dev, errp); } @@ -259,7 +264,8 @@ static void s390_cpu_class_init(ObjectClass *oc, void *data) cc->write_elf64_qemunote = s390_cpu_write_elf64_qemunote; #endif dc->vmsd = &vmstate_s390_cpu; - cc->gdb_num_core_regs = S390_NUM_REGS; + cc->gdb_num_core_regs = S390_NUM_CORE_REGS; + cc->gdb_core_xml_file = "s390x-core64.xml"; } static const TypeInfo s390_cpu_type_info = { diff --git a/target-s390x/cpu.h b/target-s390x/cpu.h index b13761d925..62940c398a 100644 --- a/target-s390x/cpu.h +++ b/target-s390x/cpu.h @@ -353,6 +353,21 @@ static inline hwaddr decode_basedisp_s(CPUS390XState *env, uint32_t ipb) /* Base/displacement are at the same locations. */ #define decode_basedisp_rs decode_basedisp_s +/* helper functions for run_on_cpu() */ +static inline void s390_do_cpu_reset(void *arg) +{ + CPUState *cs = arg; + S390CPUClass *scc = S390_CPU_GET_CLASS(cs); + + scc->cpu_reset(cs); +} +static inline void s390_do_cpu_full_reset(void *arg) +{ + CPUState *cs = arg; + + cpu_reset(cs); +} + void s390x_tod_timer(void *opaque); void s390x_cpu_timer(void *opaque); @@ -551,44 +566,8 @@ void s390_cpu_list(FILE *f, fprintf_function cpu_fprintf); #define S390_R13_REGNUM 15 #define S390_R14_REGNUM 16 #define S390_R15_REGNUM 17 -/* Access Registers. */ -#define S390_A0_REGNUM 18 -#define S390_A1_REGNUM 19 -#define S390_A2_REGNUM 20 -#define S390_A3_REGNUM 21 -#define S390_A4_REGNUM 22 -#define S390_A5_REGNUM 23 -#define S390_A6_REGNUM 24 -#define S390_A7_REGNUM 25 -#define S390_A8_REGNUM 26 -#define S390_A9_REGNUM 27 -#define S390_A10_REGNUM 28 -#define S390_A11_REGNUM 29 -#define S390_A12_REGNUM 30 -#define S390_A13_REGNUM 31 -#define S390_A14_REGNUM 32 -#define S390_A15_REGNUM 33 -/* Floating Point Control Word. */ -#define S390_FPC_REGNUM 34 -/* Floating Point Registers. */ -#define S390_F0_REGNUM 35 -#define S390_F1_REGNUM 36 -#define S390_F2_REGNUM 37 -#define S390_F3_REGNUM 38 -#define S390_F4_REGNUM 39 -#define S390_F5_REGNUM 40 -#define S390_F6_REGNUM 41 -#define S390_F7_REGNUM 42 -#define S390_F8_REGNUM 43 -#define S390_F9_REGNUM 44 -#define S390_F10_REGNUM 45 -#define S390_F11_REGNUM 46 -#define S390_F12_REGNUM 47 -#define S390_F13_REGNUM 48 -#define S390_F14_REGNUM 49 -#define S390_F15_REGNUM 50 -/* Total. */ -#define S390_NUM_REGS 51 +/* Total Core Registers. */ +#define S390_NUM_CORE_REGS 18 /* CC optimization */ @@ -1045,6 +1024,10 @@ static inline void cpu_inject_crw_mchk(S390CPU *cpu) cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD); } +/* from s390-virtio-ccw */ +#define MEM_SECTION_SIZE 0x10000000UL +#define MAX_AVAIL_SLOTS 32 + /* fpu_helper.c */ uint32_t set_cc_nz_f32(float32 v); uint32_t set_cc_nz_f64(float64 v); @@ -1067,6 +1050,7 @@ void kvm_s390_enable_css_support(S390CPU *cpu); int kvm_s390_assign_subch_ioeventfd(EventNotifier *notifier, uint32_t sch, int vq, bool assign); int kvm_s390_cpu_restart(S390CPU *cpu); +int kvm_s390_get_memslot_count(KVMState *s); void kvm_s390_clear_cmma_callback(void *opaque); #else static inline void kvm_s390_io_interrupt(uint16_t subchannel_id, @@ -1094,6 +1078,10 @@ static inline int kvm_s390_cpu_restart(S390CPU *cpu) static inline void kvm_s390_clear_cmma_callback(void *opaque) { } +static inline int kvm_s390_get_memslot_count(KVMState *s) +{ + return MAX_AVAIL_SLOTS; +} #endif static inline void cmma_reset(S390CPU *cpu) @@ -1112,6 +1100,15 @@ static inline int s390_cpu_restart(S390CPU *cpu) return -ENOSYS; } +static inline int s390_get_memslot_count(KVMState *s) +{ + if (kvm_enabled()) { + return kvm_s390_get_memslot_count(s); + } else { + return MAX_AVAIL_SLOTS; + } +} + void s390_io_interrupt(uint16_t subchannel_id, uint16_t subchannel_nr, uint32_t io_int_parm, uint32_t io_int_word); void s390_crw_mchk(void); diff --git a/target-s390x/gdbstub.c b/target-s390x/gdbstub.c index a129742e2f..8945f0271d 100644 --- a/target-s390x/gdbstub.c +++ b/target-s390x/gdbstub.c @@ -31,21 +31,18 @@ int s390_cpu_gdb_read_register(CPUState *cs, uint8_t *mem_buf, int n) switch (n) { case S390_PSWM_REGNUM: - cc_op = calc_cc(env, env->cc_op, env->cc_src, env->cc_dst, env->cc_vr); - val = deposit64(env->psw.mask, 44, 2, cc_op); - return gdb_get_regl(mem_buf, val); + if (tcg_enabled()) { + cc_op = calc_cc(env, env->cc_op, env->cc_src, env->cc_dst, + env->cc_vr); + val = deposit64(env->psw.mask, 44, 2, cc_op); + return gdb_get_regl(mem_buf, val); + } + return gdb_get_regl(mem_buf, env->psw.mask); case S390_PSWA_REGNUM: return gdb_get_regl(mem_buf, env->psw.addr); case S390_R0_REGNUM ... S390_R15_REGNUM: - return gdb_get_regl(mem_buf, env->regs[n-S390_R0_REGNUM]); - case S390_A0_REGNUM ... S390_A15_REGNUM: - return gdb_get_reg32(mem_buf, env->aregs[n-S390_A0_REGNUM]); - case S390_FPC_REGNUM: - return gdb_get_reg32(mem_buf, env->fpc); - case S390_F0_REGNUM ... S390_F15_REGNUM: - return gdb_get_reg64(mem_buf, env->fregs[n-S390_F0_REGNUM].ll); + return gdb_get_regl(mem_buf, env->regs[n - S390_R0_REGNUM]); } - return 0; } @@ -53,36 +50,94 @@ int s390_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) { S390CPU *cpu = S390_CPU(cs); CPUS390XState *env = &cpu->env; - target_ulong tmpl; - uint32_t tmp32; - int r = 8; - tmpl = ldtul_p(mem_buf); - tmp32 = ldl_p(mem_buf); + target_ulong tmpl = ldtul_p(mem_buf); switch (n) { case S390_PSWM_REGNUM: env->psw.mask = tmpl; - env->cc_op = extract64(tmpl, 44, 2); + if (tcg_enabled()) { + env->cc_op = extract64(tmpl, 44, 2); + } break; case S390_PSWA_REGNUM: env->psw.addr = tmpl; break; case S390_R0_REGNUM ... S390_R15_REGNUM: - env->regs[n-S390_R0_REGNUM] = tmpl; + env->regs[n - S390_R0_REGNUM] = tmpl; break; + default: + return 0; + } + return 8; +} + +/* the values represent the positions in s390-acr.xml */ +#define S390_A0_REGNUM 0 +#define S390_A15_REGNUM 15 +/* total number of registers in s390-acr.xml */ +#define S390_NUM_AC_REGS 16 + +static int cpu_read_ac_reg(CPUS390XState *env, uint8_t *mem_buf, int n) +{ + switch (n) { case S390_A0_REGNUM ... S390_A15_REGNUM: - env->aregs[n-S390_A0_REGNUM] = tmp32; - r = 4; - break; + return gdb_get_reg32(mem_buf, env->aregs[n]); + default: + return 0; + } +} + +static int cpu_write_ac_reg(CPUS390XState *env, uint8_t *mem_buf, int n) +{ + switch (n) { + case S390_A0_REGNUM ... S390_A15_REGNUM: + env->aregs[n] = ldl_p(mem_buf); + return 4; + default: + return 0; + } +} + +/* the values represent the positions in s390-fpr.xml */ +#define S390_FPC_REGNUM 0 +#define S390_F0_REGNUM 1 +#define S390_F15_REGNUM 16 +/* total number of registers in s390-fpr.xml */ +#define S390_NUM_FP_REGS 17 + +static int cpu_read_fp_reg(CPUS390XState *env, uint8_t *mem_buf, int n) +{ + switch (n) { case S390_FPC_REGNUM: - env->fpc = tmp32; - r = 4; - break; + return gdb_get_reg32(mem_buf, env->fpc); case S390_F0_REGNUM ... S390_F15_REGNUM: - env->fregs[n-S390_F0_REGNUM].ll = tmpl; - break; + return gdb_get_reg64(mem_buf, env->fregs[n - S390_F0_REGNUM].ll); default: return 0; } - return r; +} + +static int cpu_write_fp_reg(CPUS390XState *env, uint8_t *mem_buf, int n) +{ + switch (n) { + case S390_FPC_REGNUM: + env->fpc = ldl_p(mem_buf); + return 4; + case S390_F0_REGNUM ... S390_F15_REGNUM: + env->fregs[n - S390_F0_REGNUM].ll = ldtul_p(mem_buf); + return 8; + default: + return 0; + } +} + +void s390_cpu_gdb_init(CPUState *cs) +{ + gdb_register_coprocessor(cs, cpu_read_ac_reg, + cpu_write_ac_reg, + S390_NUM_AC_REGS, "s390-acr.xml", 0); + + gdb_register_coprocessor(cs, cpu_read_fp_reg, + cpu_write_fp_reg, + S390_NUM_FP_REGS, "s390-fpr.xml", 0); } diff --git a/target-s390x/kvm.c b/target-s390x/kvm.c index a32d91aa01..a85a480c6a 100644 --- a/target-s390x/kvm.c +++ b/target-s390x/kvm.c @@ -916,23 +916,30 @@ static int handle_diag(S390CPU *cpu, struct kvm_run *run, uint32_t ipb) return r; } -static int kvm_s390_cpu_start(S390CPU *cpu) +static void sigp_cpu_start(void *arg) { + CPUState *cs = arg; + S390CPU *cpu = S390_CPU(cs); + s390_add_running_cpu(cpu); - qemu_cpu_kick(CPU(cpu)); DPRINTF("DONE: KVM cpu start: %p\n", &cpu->env); - return 0; } -int kvm_s390_cpu_restart(S390CPU *cpu) +static void sigp_cpu_restart(void *arg) { + CPUState *cs = arg; + S390CPU *cpu = S390_CPU(cs); struct kvm_s390_irq irq = { .type = KVM_S390_RESTART, }; kvm_s390_vcpu_interrupt(cpu, &irq); s390_add_running_cpu(cpu); - qemu_cpu_kick(CPU(cpu)); +} + +int kvm_s390_cpu_restart(S390CPU *cpu) +{ + run_on_cpu(CPU(cpu), sigp_cpu_restart, CPU(cpu)); DPRINTF("DONE: KVM cpu restart: %p\n", &cpu->env); return 0; } @@ -980,10 +987,12 @@ static int handle_sigp(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1) switch (order_code) { case SIGP_START: - cc = kvm_s390_cpu_start(target_cpu); + run_on_cpu(CPU(target_cpu), sigp_cpu_start, CPU(target_cpu)); + cc = 0; break; case SIGP_RESTART: - cc = kvm_s390_cpu_restart(target_cpu); + run_on_cpu(CPU(target_cpu), sigp_cpu_restart, CPU(target_cpu)); + cc = 0; break; case SIGP_SET_ARCH: *statusreg &= 0xffffffff00000000UL; @@ -1306,3 +1315,8 @@ int kvm_s390_assign_subch_ioeventfd(EventNotifier *notifier, uint32_t sch, } return kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick); } + +int kvm_s390_get_memslot_count(KVMState *s) +{ + return kvm_check_extension(s, KVM_CAP_NR_MEMSLOTS); +} diff --git a/target-s390x/misc_helper.c b/target-s390x/misc_helper.c index 0b625826ef..ef9758a96a 100644 --- a/target-s390x/misc_helper.c +++ b/target-s390x/misc_helper.c @@ -114,33 +114,16 @@ uint32_t HELPER(servc)(CPUS390XState *env, uint64_t r1, uint64_t r2) } #ifndef CONFIG_USER_ONLY -static void cpu_reset_all(void) -{ - CPUState *cs; - S390CPUClass *scc; - - CPU_FOREACH(cs) { - scc = S390_CPU_GET_CLASS(cs); - scc->cpu_reset(cs); - } -} - -static void cpu_full_reset_all(void) -{ - CPUState *cpu; - - CPU_FOREACH(cpu) { - cpu_reset(cpu); - } -} - static int modified_clear_reset(S390CPU *cpu) { S390CPUClass *scc = S390_CPU_GET_CLASS(cpu); + CPUState *t; pause_all_vcpus(); cpu_synchronize_all_states(); - cpu_full_reset_all(); + CPU_FOREACH(t) { + run_on_cpu(t, s390_do_cpu_full_reset, t); + } cmma_reset(cpu); io_subsystem_reset(); scc->load_normal(CPU(cpu)); @@ -152,10 +135,13 @@ static int modified_clear_reset(S390CPU *cpu) static int load_normal_reset(S390CPU *cpu) { S390CPUClass *scc = S390_CPU_GET_CLASS(cpu); + CPUState *t; pause_all_vcpus(); cpu_synchronize_all_states(); - cpu_reset_all(); + CPU_FOREACH(t) { + run_on_cpu(t, s390_do_cpu_reset, t); + } cmma_reset(cpu); io_subsystem_reset(); scc->initial_cpu_reset(CPU(cpu)); diff --git a/target-tricore/Makefile.objs b/target-tricore/Makefile.objs new file mode 100644 index 0000000000..21e820d8f9 --- /dev/null +++ b/target-tricore/Makefile.objs @@ -0,0 +1 @@ +obj-y += translate.o helper.o cpu.o op_helper.o diff --git a/target-tricore/cpu-qom.h b/target-tricore/cpu-qom.h new file mode 100644 index 0000000000..470215ac9e --- /dev/null +++ b/target-tricore/cpu-qom.h @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2012-2014 Bastian Koppelmann C-Lab/University Paderborn + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef QEMU_TRICORE_CPU_QOM_H +#define QEMU_TRICORE_CPU_QOM_H + +#include "qom/cpu.h" + + +#define TYPE_TRICORE_CPU "tricore-cpu" + +#define TRICORE_CPU_CLASS(klass) \ + OBJECT_CLASS_CHECK(TriCoreCPUClass, (klass), TYPE_TRICORE_CPU) +#define TRICORE_CPU(obj) \ + OBJECT_CHECK(TriCoreCPU, (obj), TYPE_TRICORE_CPU) +#define TRICORE_CPU_GET_CLASS(obj) \ + OBJECT_GET_CLASS(TriCoreCPUClass, (obj), TYPE_TRICORE_CPU) + +typedef struct TriCoreCPUClass { + /*< private >*/ + CPUClass parent_class; + /*< public >*/ + + DeviceRealize parent_realize; + void (*parent_reset)(CPUState *cpu); +} TriCoreCPUClass; + +/** + * TriCoreCPU: + * @env: #CPUTriCoreState + * + * A TriCore CPU. + */ +typedef struct TriCoreCPU { + /*< private >*/ + CPUState parent_obj; + /*< public >*/ + + CPUTriCoreState env; +} TriCoreCPU; + +static inline TriCoreCPU *tricore_env_get_cpu(CPUTriCoreState *env) +{ + return TRICORE_CPU(container_of(env, TriCoreCPU, env)); +} + +#define ENV_GET_CPU(e) CPU(tricore_env_get_cpu(e)) + +#define ENV_OFFSET offsetof(TriCoreCPU, env) + +hwaddr tricore_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); +void tricore_cpu_do_interrupt(CPUState *cpu); +void tricore_cpu_dump_state(CPUState *cpu, FILE *f, + fprintf_function cpu_fprintf, int flags); + + +#endif /*QEMU_TRICORE_CPU_QOM_H */ diff --git a/target-tricore/cpu.c b/target-tricore/cpu.c new file mode 100644 index 0000000000..db9f404b44 --- /dev/null +++ b/target-tricore/cpu.c @@ -0,0 +1,192 @@ +/* + * TriCore emulation for qemu: main translation routines. + * + * Copyright (c) 2012-2014 Bastian Koppelmann C-Lab/University Paderborn + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#include "cpu.h" +#include "qemu-common.h" + +static inline void set_feature(CPUTriCoreState *env, int feature) +{ + env->features |= 1ULL << feature; +} + +static void tricore_cpu_set_pc(CPUState *cs, vaddr value) +{ + TriCoreCPU *cpu = TRICORE_CPU(cs); + CPUTriCoreState *env = &cpu->env; + + env->PC = value & ~(target_ulong)1; +} + +static void tricore_cpu_synchronize_from_tb(CPUState *cs, + TranslationBlock *tb) +{ + TriCoreCPU *cpu = TRICORE_CPU(cs); + CPUTriCoreState *env = &cpu->env; + + env->PC = tb->pc; +} + +static void tricore_cpu_reset(CPUState *s) +{ + TriCoreCPU *cpu = TRICORE_CPU(s); + TriCoreCPUClass *tcc = TRICORE_CPU_GET_CLASS(cpu); + CPUTriCoreState *env = &cpu->env; + + tcc->parent_reset(s); + + tlb_flush(s, 1); + + cpu_state_reset(env); +} + +static bool tricore_cpu_has_work(CPUState *cs) +{ + return true; +} + +static void tricore_cpu_realizefn(DeviceState *dev, Error **errp) +{ + CPUState *cs = CPU(dev); + TriCoreCPUClass *tcc = TRICORE_CPU_GET_CLASS(dev); + + cpu_reset(cs); + qemu_init_vcpu(cs); + + tcc->parent_realize(dev, errp); +} + + +static void tricore_cpu_initfn(Object *obj) +{ + CPUState *cs = CPU(obj); + TriCoreCPU *cpu = TRICORE_CPU(obj); + CPUTriCoreState *env = &cpu->env; + + cs->env_ptr = env; + cpu_exec_init(env); + + if (tcg_enabled()) { + tricore_tcg_init(); + } +} + +static ObjectClass *tricore_cpu_class_by_name(const char *cpu_model) +{ + ObjectClass *oc; + char *typename; + + if (!cpu_model) { + return NULL; + } + + typename = g_strdup_printf("%s-" TYPE_TRICORE_CPU, cpu_model); + oc = object_class_by_name(typename); + g_free(typename); + if (!oc || !object_class_dynamic_cast(oc, TYPE_TRICORE_CPU) || + object_class_is_abstract(oc)) { + return NULL; + } + return oc; +} + +static void tc1796_initfn(Object *obj) +{ + TriCoreCPU *cpu = TRICORE_CPU(obj); + + set_feature(&cpu->env, TRICORE_FEATURE_13); +} + +static void aurix_initfn(Object *obj) +{ + TriCoreCPU *cpu = TRICORE_CPU(obj); + + set_feature(&cpu->env, TRICORE_FEATURE_16); +} + +typedef struct TriCoreCPUInfo { + const char *name; + void (*initfn)(Object *obj); + void (*class_init)(ObjectClass *oc, void *data); +} TriCoreCPUInfo; + +static const TriCoreCPUInfo tricore_cpus[] = { + { .name = "tc1796", .initfn = tc1796_initfn }, + { .name = "aurix", .initfn = aurix_initfn }, + { .name = NULL } +}; + +static void tricore_cpu_class_init(ObjectClass *c, void *data) +{ + TriCoreCPUClass *mcc = TRICORE_CPU_CLASS(c); + CPUClass *cc = CPU_CLASS(c); + DeviceClass *dc = DEVICE_CLASS(c); + + mcc->parent_realize = dc->realize; + dc->realize = tricore_cpu_realizefn; + + mcc->parent_reset = cc->reset; + cc->reset = tricore_cpu_reset; + cc->class_by_name = tricore_cpu_class_by_name; + cc->has_work = tricore_cpu_has_work; + + cc->do_interrupt = tricore_cpu_do_interrupt; + cc->dump_state = tricore_cpu_dump_state; + cc->set_pc = tricore_cpu_set_pc; + cc->synchronize_from_tb = tricore_cpu_synchronize_from_tb; + +} + +static void cpu_register(const TriCoreCPUInfo *info) +{ + TypeInfo type_info = { + .parent = TYPE_TRICORE_CPU, + .instance_size = sizeof(TriCoreCPU), + .instance_init = info->initfn, + .class_size = sizeof(TriCoreCPUClass), + .class_init = info->class_init, + }; + + type_info.name = g_strdup_printf("%s-" TYPE_TRICORE_CPU, info->name); + type_register(&type_info); + g_free((void *)type_info.name); +} + +static const TypeInfo tricore_cpu_type_info = { + .name = TYPE_TRICORE_CPU, + .parent = TYPE_CPU, + .instance_size = sizeof(TriCoreCPU), + .instance_init = tricore_cpu_initfn, + .abstract = true, + .class_size = sizeof(TriCoreCPUClass), + .class_init = tricore_cpu_class_init, +}; + +static void tricore_cpu_register_types(void) +{ + const TriCoreCPUInfo *info = tricore_cpus; + + type_register_static(&tricore_cpu_type_info); + + while (info->name) { + cpu_register(info); + info++; + } +} + +type_init(tricore_cpu_register_types) diff --git a/target-tricore/cpu.h b/target-tricore/cpu.h new file mode 100644 index 0000000000..b036ff1159 --- /dev/null +++ b/target-tricore/cpu.h @@ -0,0 +1,405 @@ +/* + * TriCore emulation for qemu: main CPU struct. + * + * Copyright (c) 2012-2014 Bastian Koppelmann C-Lab/University Paderborn + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ +#if !defined(__TRICORE_CPU_H__) +#define __TRICORE_CPU_H__ + +#include "tricore-defs.h" +#include "config.h" +#include "qemu-common.h" +#include "exec/cpu-defs.h" +#include "fpu/softfloat.h" + +#define ELF_MACHINE EM_TRICORE + +#define CPUArchState struct CPUTriCoreState + +struct CPUTriCoreState; + +struct tricore_boot_info; + +#define NB_MMU_MODES 3 + +typedef struct tricore_def_t tricore_def_t; + +typedef struct CPUTriCoreState CPUTriCoreState; +struct CPUTriCoreState { + /* GPR Register */ + uint32_t gpr_a[16]; + uint32_t gpr_d[16]; + /* CSFR Register */ + uint32_t PCXI; +/* Frequently accessed PSW_USB bits are stored separately for efficiency. + This contains all the other bits. Use psw_{read,write} to access + the whole PSW. */ + uint32_t PSW; + + /* PSW flag cache for faster execution + */ + uint32_t PSW_USB_C; + uint32_t PSW_USB_V; /* Only if bit 31 set, then flag is set */ + uint32_t PSW_USB_SV; /* Only if bit 31 set, then flag is set */ + uint32_t PSW_USB_AV; /* Only if bit 31 set, then flag is set. */ + uint32_t PSW_USB_SAV; /* Only if bit 31 set, then flag is set. */ + + uint32_t PC; + uint32_t SYSCON; + uint32_t CPU_ID; + uint32_t BIV; + uint32_t BTV; + uint32_t ISP; + uint32_t ICR; + uint32_t FCX; + uint32_t LCX; + uint32_t COMPAT; + + /* Mem Protection Register */ + uint32_t DPR0_0L; + uint32_t DPR0_0U; + uint32_t DPR0_1L; + uint32_t DPR0_1U; + uint32_t DPR0_2L; + uint32_t DPR0_2U; + uint32_t DPR0_3L; + uint32_t DPR0_3U; + + uint32_t DPR1_0L; + uint32_t DPR1_0U; + uint32_t DPR1_1L; + uint32_t DPR1_1U; + uint32_t DPR1_2L; + uint32_t DPR1_2U; + uint32_t DPR1_3L; + uint32_t DPR1_3U; + + uint32_t DPR2_0L; + uint32_t DPR2_0U; + uint32_t DPR2_1L; + uint32_t DPR2_1U; + uint32_t DPR2_2L; + uint32_t DPR2_2U; + uint32_t DPR2_3L; + uint32_t DPR2_3U; + + uint32_t DPR3_0L; + uint32_t DPR3_0U; + uint32_t DPR3_1L; + uint32_t DPR3_1U; + uint32_t DPR3_2L; + uint32_t DPR3_2U; + uint32_t DPR3_3L; + uint32_t DPR3_3U; + + uint32_t CPR0_0L; + uint32_t CPR0_0U; + uint32_t CPR0_1L; + uint32_t CPR0_1U; + uint32_t CPR0_2L; + uint32_t CPR0_2U; + uint32_t CPR0_3L; + uint32_t CPR0_3U; + + uint32_t CPR1_0L; + uint32_t CPR1_0U; + uint32_t CPR1_1L; + uint32_t CPR1_1U; + uint32_t CPR1_2L; + uint32_t CPR1_2U; + uint32_t CPR1_3L; + uint32_t CPR1_3U; + + uint32_t CPR2_0L; + uint32_t CPR2_0U; + uint32_t CPR2_1L; + uint32_t CPR2_1U; + uint32_t CPR2_2L; + uint32_t CPR2_2U; + uint32_t CPR2_3L; + uint32_t CPR2_3U; + + uint32_t CPR3_0L; + uint32_t CPR3_0U; + uint32_t CPR3_1L; + uint32_t CPR3_1U; + uint32_t CPR3_2L; + uint32_t CPR3_2U; + uint32_t CPR3_3L; + uint32_t CPR3_3U; + + uint32_t DPM0; + uint32_t DPM1; + uint32_t DPM2; + uint32_t DPM3; + + uint32_t CPM0; + uint32_t CPM1; + uint32_t CPM2; + uint32_t CPM3; + + /* Memory Management Registers */ + uint32_t MMU_CON; + uint32_t MMU_ASI; + uint32_t MMU_TVA; + uint32_t MMU_TPA; + uint32_t MMU_TPX; + uint32_t MMU_TFA; + /* {1.3.1 only */ + uint32_t BMACON; + uint32_t SMACON; + uint32_t DIEAR; + uint32_t DIETR; + uint32_t CCDIER; + uint32_t MIECON; + uint32_t PIEAR; + uint32_t PIETR; + uint32_t CCPIER; + /*} */ + /* Debug Registers */ + uint32_t DBGSR; + uint32_t EXEVT; + uint32_t CREVT; + uint32_t SWEVT; + uint32_t TR0EVT; + uint32_t TR1EVT; + uint32_t DMS; + uint32_t DCX; + uint32_t DBGTCR; + uint32_t CCTRL; + uint32_t CCNT; + uint32_t ICNT; + uint32_t M1CNT; + uint32_t M2CNT; + uint32_t M3CNT; + /* Floating Point Registers */ + /* XXX: */ + + /* QEMU */ + int error_code; + uint32_t hflags; /* CPU State */ + + CPU_COMMON + + /* Internal CPU feature flags. */ + uint64_t features; + + const tricore_def_t *cpu_model; + void *irq[8]; + struct QEMUTimer *timer; /* Internal timer */ +}; + +#define MASK_PCXI_PCPN 0xff000000 +#define MASK_PCXI_PIE 0x00800000 +#define MASK_PCXI_UL 0x00400000 +#define MASK_PCXI_PCXS 0x000f0000 +#define MASK_PCXI_PCXO 0x0000ffff + +#define MASK_PSW_USB 0xff000000 +#define MASK_USB_C 0x80000000 +#define MASK_USB_V 0x40000000 +#define MASK_USB_SV 0x20000000 +#define MASK_USB_AV 0x10000000 +#define MASK_USB_SAV 0x08000000 +#define MASK_PSW_PRS 0x00003000 +#define MASK_PSW_IO 0x00000c00 +#define MASK_PSW_IS 0x00000200 +#define MASK_PSW_GW 0x00000100 +#define MASK_PSW_CDE 0x00000080 +#define MASK_PSW_CDC 0x0000007f + +#define MASK_SYSCON_PRO_TEN 0x2 +#define MASK_SYSCON_FCD_SF 0x1 + +#define MASK_CPUID_MOD 0xffff0000 +#define MASK_CPUID_MOD_32B 0x0000ff00 +#define MASK_CPUID_REV 0x000000ff + +#define MASK_ICR_PIPN 0x00ff0000 +#define MASK_ICR_IE 0x00000100 +#define MASK_ICR_CCPN 0x000000ff + +#define MASK_FCX_FCXS 0x000f0000 +#define MASK_FCX_FCXO 0x0000ffff + +#define MASK_LCX_LCXS 0x000f0000 +#define MASK_LCX_LCX0 0x0000ffff + +#define TRICORE_HFLAG_UM0 0x00002 /* user mode-0 flag */ +#define TRICORE_HFLAG_UM1 0x00001 /* user mode-1 flag */ +#define TRICORE_HFLAG_SM 0x00000 /* kernel mode flag */ + +enum tricore_features { + TRICORE_FEATURE_13, + TRICORE_FEATURE_131, + TRICORE_FEATURE_16, +}; + +static inline int tricore_feature(CPUTriCoreState *env, int feature) +{ + return (env->features & (1ULL << feature)) != 0; +} + +/* TriCore Traps Classes*/ +enum { + TRAPC_NONE = -1, + TRAPC_MMU = 0, + TRAPC_PROT = 1, + TRAPC_INSN_ERR = 2, + TRAPC_CTX_MNG = 3, + TRAPC_SYSBUS = 4, + TRAPC_ASSERT = 5, + TRAPC_SYSCALL = 6, + TRAPC_NMI = 7, +}; + +/* Class 0 TIN */ +enum { + TIN0_VAF = 0, + TIN0_VAP = 1, +}; + +/* Class 1 TIN */ +enum { + TIN1_PRIV = 1, + TIN1_MPR = 2, + TIN1_MPW = 3, + TIN1_MPX = 4, + TIN1_MPP = 5, + TIN1_MPN = 6, + TIN1_GRWP = 7, +}; + +/* Class 2 TIN */ +enum { + TIN2_IOPC = 1, + TIN2_UOPC = 2, + TIN2_OPD = 3, + TIN2_ALN = 4, + TIN2_MEM = 5, +}; + +/* Class 3 TIN */ +enum { + TIN3_FCD = 1, + TIN3_CDO = 2, + TIN3_CDU = 3, + TIN3_FCU = 4, + TIN3_CSU = 5, + TIN3_CTYP = 6, + TIN3_NEST = 7, +}; + +/* Class 4 TIN */ +enum { + TIN4_PSE = 1, + TIN4_DSE = 2, + TIN4_DAE = 3, + TIN4_CAE = 4, + TIN4_PIE = 5, + TIN4_DIE = 6, +}; + +/* Class 5 TIN */ +enum { + TIN5_OVF = 1, + TIN5_SOVF = 1, +}; + +/* Class 6 TIN + * + * Is always TIN6_SYS + */ + +/* Class 7 TIN */ +enum { + TIN7_NMI = 0, +}; + +uint32_t psw_read(CPUTriCoreState *env); +void psw_write(CPUTriCoreState *env, uint32_t val); + +#include "cpu-qom.h" +#define MMU_USER_IDX 2 + +void tricore_cpu_list(FILE *f, fprintf_function cpu_fprintf); + +#define cpu_exec cpu_tricore_exec +#define cpu_signal_handler cpu_tricore_signal_handler +#define cpu_list tricore_cpu_list + +static inline int cpu_mmu_index(CPUTriCoreState *env) +{ + return 0; +} + + + +#include "exec/cpu-all.h" + +enum { + /* 1 bit to define user level / supervisor access */ + ACCESS_USER = 0x00, + ACCESS_SUPER = 0x01, + /* 1 bit to indicate direction */ + ACCESS_STORE = 0x02, + /* Type of instruction that generated the access */ + ACCESS_CODE = 0x10, /* Code fetch access */ + ACCESS_INT = 0x20, /* Integer load/store access */ + ACCESS_FLOAT = 0x30, /* floating point load/store access */ +}; + +void cpu_state_reset(CPUTriCoreState *s); +int cpu_tricore_exec(CPUTriCoreState *s); +void tricore_tcg_init(void); +int cpu_tricore_signal_handler(int host_signum, void *pinfo, void *puc); + +static inline void cpu_get_tb_cpu_state(CPUTriCoreState *env, target_ulong *pc, + target_ulong *cs_base, int *flags) +{ + *pc = env->PC; + *cs_base = 0; + *flags = 0; +} + +TriCoreCPU *cpu_tricore_init(const char *cpu_model); + +static inline CPUTriCoreState *cpu_init(const char *cpu_model) +{ + TriCoreCPU *cpu = cpu_tricore_init(cpu_model); + if (cpu == NULL) { + return NULL; + } + return &cpu->env; + +} + + +/* helpers.c */ +int cpu_tricore_handle_mmu_fault(CPUState *cpu, target_ulong address, + int rw, int mmu_idx); +#define cpu_handle_mmu_fault cpu_tricore_handle_mmu_fault + +#include "exec/exec-all.h" + +static inline void cpu_pc_from_tb(CPUTriCoreState *env, TranslationBlock *tb) +{ + env->PC = tb->pc; +} + +void do_interrupt(CPUTriCoreState *env); + +#endif /*__TRICORE_CPU_H__ */ diff --git a/target-tricore/helper.c b/target-tricore/helper.c new file mode 100644 index 0000000000..e4af6f1ac2 --- /dev/null +++ b/target-tricore/helper.c @@ -0,0 +1,144 @@ +/* + * Copyright (c) 2012-2014 Bastian Koppelmann C-Lab/University Paderborn + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#include <stdarg.h> +#include <stdlib.h> +#include <stdio.h> +#include <string.h> +#include <inttypes.h> +#include <signal.h> + +#include "cpu.h" + +enum { + TLBRET_DIRTY = -4, + TLBRET_INVALID = -3, + TLBRET_NOMATCH = -2, + TLBRET_BADADDR = -1, + TLBRET_MATCH = 0 +}; + +#if defined(CONFIG_SOFTMMU) +static int get_physical_address(CPUTriCoreState *env, hwaddr *physical, + int *prot, target_ulong address, + int rw, int access_type) +{ + int ret = TLBRET_MATCH; + + *physical = address & 0xFFFFFFFF; + *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; + + return ret; +} +#endif + +/* TODO: Add exeption support*/ +static void raise_mmu_exception(CPUTriCoreState *env, target_ulong address, + int rw, int tlb_error) +{ +} + +int cpu_tricore_handle_mmu_fault(CPUState *cs, target_ulong address, + int rw, int mmu_idx) +{ + TriCoreCPU *cpu = TRICORE_CPU(cs); + CPUTriCoreState *env = &cpu->env; + hwaddr physical; + int prot; + int access_type; + int ret = 0; + + rw &= 1; + access_type = ACCESS_INT; + ret = get_physical_address(env, &physical, &prot, + address, rw, access_type); + qemu_log("%s address=" TARGET_FMT_lx " ret %d physical " TARGET_FMT_plx + " prot %d\n", __func__, address, ret, physical, prot); + + if (ret == TLBRET_MATCH) { + tlb_set_page(cs, address & TARGET_PAGE_MASK, + physical & TARGET_PAGE_MASK, prot | PAGE_EXEC, + mmu_idx, TARGET_PAGE_SIZE); + ret = 0; + } else if (ret < 0) { + raise_mmu_exception(env, address, rw, ret); + ret = 1; + } + + return ret; +} + +void tricore_cpu_do_interrupt(CPUState *cs) +{ +} + +TriCoreCPU *cpu_tricore_init(const char *cpu_model) +{ + return TRICORE_CPU(cpu_generic_init(TYPE_TRICORE_CPU, cpu_model)); +} + +static void tricore_cpu_list_entry(gpointer data, gpointer user_data) +{ + ObjectClass *oc = data; + CPUListState *s = user_data; + const char *typename; + char *name; + + typename = object_class_get_name(oc); + name = g_strndup(typename, strlen(typename) - strlen("-" TYPE_TRICORE_CPU)); + (*s->cpu_fprintf)(s->file, " %s\n", + name); + g_free(name); +} + +void tricore_cpu_list(FILE *f, fprintf_function cpu_fprintf) +{ + CPUListState s = { + .file = f, + .cpu_fprintf = cpu_fprintf, + }; + GSList *list; + + list = object_class_get_list(TYPE_TRICORE_CPU, false); + (*cpu_fprintf)(f, "Available CPUs:\n"); + g_slist_foreach(list, tricore_cpu_list_entry, &s); + g_slist_free(list); +} + +uint32_t psw_read(CPUTriCoreState *env) +{ + /* clear all USB bits */ + env->PSW &= 0xffffff; + /* now set them from the cache */ + env->PSW |= ((env->PSW_USB_C != 0) << 31); + env->PSW |= ((env->PSW_USB_V & (1 << 31)) >> 1); + env->PSW |= ((env->PSW_USB_SV & (1 << 31)) >> 2); + env->PSW |= ((env->PSW_USB_AV & (1 << 31)) >> 3); + env->PSW |= ((env->PSW_USB_SAV & (1 << 31)) >> 4); + + return env->PSW; +} + +void psw_write(CPUTriCoreState *env, uint32_t val) +{ + env->PSW_USB_C = (val & MASK_USB_C); + env->PSW_USB_V = (val & MASK_USB_V << 1); + env->PSW_USB_SV = (val & MASK_USB_SV << 2); + env->PSW_USB_AV = ((val & MASK_USB_AV) << 3); + env->PSW_USB_SAV = ((val & MASK_USB_SAV) << 4); + env->PSW = val; +} diff --git a/target-tricore/helper.h b/target-tricore/helper.h new file mode 100644 index 0000000000..7b7d74b32c --- /dev/null +++ b/target-tricore/helper.h @@ -0,0 +1,25 @@ +/* + * Copyright (c) 2012-2014 Bastian Koppelmann C-Lab/University Paderborn + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +/* Arithmetic */ +DEF_HELPER_3(add_ssov, i32, env, i32, i32) +DEF_HELPER_3(sub_ssov, i32, env, i32, i32) +/* CSA */ +DEF_HELPER_2(call, void, env, i32) +DEF_HELPER_1(ret, void, env) +DEF_HELPER_2(bisr, void, env, i32) +DEF_HELPER_1(rfe, void, env) diff --git a/target-tricore/op_helper.c b/target-tricore/op_helper.c new file mode 100644 index 0000000000..6376f074e5 --- /dev/null +++ b/target-tricore/op_helper.c @@ -0,0 +1,392 @@ +/* + * Copyright (c) 2012-2014 Bastian Koppelmann C-Lab/University Paderborn + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ +#include <stdlib.h> +#include "cpu.h" +#include "qemu/host-utils.h" +#include "exec/helper-proto.h" +#include "exec/cpu_ldst.h" + +#define SSOV(env, ret, arg, len) do { \ + int64_t max_pos = INT##len ##_MAX; \ + int64_t max_neg = INT##len ##_MIN; \ + if (arg > max_pos) { \ + env->PSW_USB_V = (1 << 31); \ + env->PSW_USB_SV = (1 << 31); \ + ret = (target_ulong)max_pos; \ + } else { \ + if (arg < max_neg) { \ + env->PSW_USB_V = (1 << 31); \ + env->PSW_USB_SV = (1 << 31); \ + ret = (target_ulong)max_neg; \ + } else { \ + env->PSW_USB_V = 0; \ + ret = (target_ulong)arg; \ + } \ + } \ + env->PSW_USB_AV = arg ^ arg * 2u; \ + env->PSW_USB_SAV |= env->PSW_USB_AV; \ +} while (0) + +target_ulong helper_add_ssov(CPUTriCoreState *env, target_ulong r1, + target_ulong r2) +{ + target_ulong ret; + int64_t t1 = sextract64(r1, 0, 32); + int64_t t2 = sextract64(r2, 0, 32); + int64_t result = t1 + t2; + SSOV(env, ret, result, 32); + return ret; +} + +target_ulong helper_sub_ssov(CPUTriCoreState *env, target_ulong r1, + target_ulong r2) +{ + target_ulong ret; + int64_t t1 = sextract64(r1, 0, 32); + int64_t t2 = sextract64(r2, 0, 32); + int64_t result = t1 - t2; + SSOV(env, ret, result, 32); + return ret; +} + +/* context save area (CSA) related helpers */ + +static int cdc_increment(target_ulong *psw) +{ + if ((*psw & MASK_PSW_CDC) == 0x7f) { + return 0; + } + + (*psw)++; + /* check for overflow */ + int lo = clo32((*psw & MASK_PSW_CDC) << (32 - 7)); + int mask = (1u << (7 - lo)) - 1; + int count = *psw & mask; + if (count == 0) { + (*psw)--; + return 1; + } + return 0; +} + +static int cdc_decrement(target_ulong *psw) +{ + if ((*psw & MASK_PSW_CDC) == 0x7f) { + return 0; + } + /* check for underflow */ + int lo = clo32((*psw & MASK_PSW_CDC) << (32 - 7)); + int mask = (1u << (7 - lo)) - 1; + int count = *psw & mask; + if (count == 0) { + return 1; + } + (*psw)--; + return 0; +} + +static bool cdc_zero(target_ulong *psw) +{ + int cdc = *psw & MASK_PSW_CDC; + /* Returns TRUE if PSW.CDC.COUNT == 0 or if PSW.CDC == + 7'b1111111, otherwise returns FALSE. */ + if (cdc == 0x7f) { + return true; + } + /* find CDC.COUNT */ + int lo = clo32((*psw & MASK_PSW_CDC) << (32 - 7)); + int mask = (1u << (7 - lo)) - 1; + int count = *psw & mask; + return count == 0; +} + +static void save_context_upper(CPUTriCoreState *env, int ea, + target_ulong *new_FCX) +{ + *new_FCX = cpu_ldl_data(env, ea); + cpu_stl_data(env, ea, env->PCXI); + cpu_stl_data(env, ea+4, env->PSW); + cpu_stl_data(env, ea+8, env->gpr_a[10]); + cpu_stl_data(env, ea+12, env->gpr_a[11]); + cpu_stl_data(env, ea+16, env->gpr_d[8]); + cpu_stl_data(env, ea+20, env->gpr_d[9]); + cpu_stl_data(env, ea+24, env->gpr_d[10]); + cpu_stl_data(env, ea+28, env->gpr_d[11]); + cpu_stl_data(env, ea+32, env->gpr_a[12]); + cpu_stl_data(env, ea+36, env->gpr_a[13]); + cpu_stl_data(env, ea+40, env->gpr_a[14]); + cpu_stl_data(env, ea+44, env->gpr_a[15]); + cpu_stl_data(env, ea+48, env->gpr_d[12]); + cpu_stl_data(env, ea+52, env->gpr_d[13]); + cpu_stl_data(env, ea+56, env->gpr_d[14]); + cpu_stl_data(env, ea+60, env->gpr_d[15]); + +} + +static void save_context_lower(CPUTriCoreState *env, int ea, + target_ulong *new_FCX) +{ + *new_FCX = cpu_ldl_data(env, ea); + cpu_stl_data(env, ea, env->PCXI); + cpu_stl_data(env, ea+4, env->PSW); + cpu_stl_data(env, ea+8, env->gpr_a[2]); + cpu_stl_data(env, ea+12, env->gpr_a[3]); + cpu_stl_data(env, ea+16, env->gpr_d[0]); + cpu_stl_data(env, ea+20, env->gpr_d[1]); + cpu_stl_data(env, ea+24, env->gpr_d[2]); + cpu_stl_data(env, ea+28, env->gpr_d[3]); + cpu_stl_data(env, ea+32, env->gpr_a[4]); + cpu_stl_data(env, ea+36, env->gpr_a[5]); + cpu_stl_data(env, ea+40, env->gpr_a[6]); + cpu_stl_data(env, ea+44, env->gpr_a[7]); + cpu_stl_data(env, ea+48, env->gpr_d[4]); + cpu_stl_data(env, ea+52, env->gpr_d[5]); + cpu_stl_data(env, ea+56, env->gpr_d[6]); + cpu_stl_data(env, ea+60, env->gpr_d[7]); +} + +static void restore_context_upper(CPUTriCoreState *env, int ea, + target_ulong *new_PCXI, target_ulong *new_PSW) +{ + *new_PCXI = cpu_ldl_data(env, ea); + *new_PSW = cpu_ldl_data(env, ea+4); + env->gpr_a[10] = cpu_ldl_data(env, ea+8); + env->gpr_a[11] = cpu_ldl_data(env, ea+12); + env->gpr_d[8] = cpu_ldl_data(env, ea+16); + env->gpr_d[9] = cpu_ldl_data(env, ea+20); + env->gpr_d[10] = cpu_ldl_data(env, ea+24); + env->gpr_d[11] = cpu_ldl_data(env, ea+28); + env->gpr_a[12] = cpu_ldl_data(env, ea+32); + env->gpr_a[13] = cpu_ldl_data(env, ea+36); + env->gpr_a[14] = cpu_ldl_data(env, ea+40); + env->gpr_a[15] = cpu_ldl_data(env, ea+44); + env->gpr_d[12] = cpu_ldl_data(env, ea+48); + env->gpr_d[13] = cpu_ldl_data(env, ea+52); + env->gpr_d[14] = cpu_ldl_data(env, ea+56); + env->gpr_d[15] = cpu_ldl_data(env, ea+60); + cpu_stl_data(env, ea, env->FCX); +} + +void helper_call(CPUTriCoreState *env, uint32_t next_pc) +{ + target_ulong tmp_FCX; + target_ulong ea; + target_ulong new_FCX; + target_ulong psw; + + psw = psw_read(env); + /* if (FCX == 0) trap(FCU); */ + if (env->FCX == 0) { + /* FCU trap */ + } + /* if (PSW.CDE) then if (cdc_increment()) then trap(CDO); */ + if (psw & MASK_PSW_CDE) { + if (cdc_increment(&psw)) { + /* CDO trap */ + } + } + /* PSW.CDE = 1;*/ + psw |= MASK_PSW_CDE; + /* tmp_FCX = FCX; */ + tmp_FCX = env->FCX; + /* EA = {FCX.FCXS, 6'b0, FCX.FCXO, 6'b0}; */ + ea = ((env->FCX & MASK_FCX_FCXS) << 12) + + ((env->FCX & MASK_FCX_FCXO) << 6); + /* new_FCX = M(EA, word); + M(EA, 16 * word) = {PCXI, PSW, A[10], A[11], D[8], D[9], D[10], D[11], + A[12], A[13], A[14], A[15], D[12], D[13], D[14], + D[15]}; */ + save_context_upper(env, ea, &new_FCX); + + /* PCXI.PCPN = ICR.CCPN; */ + env->PCXI = (env->PCXI & 0xffffff) + + ((env->ICR & MASK_ICR_CCPN) << 24); + /* PCXI.PIE = ICR.IE; */ + env->PCXI = ((env->PCXI & ~MASK_PCXI_PIE) + + ((env->ICR & MASK_ICR_IE) << 15)); + /* PCXI.UL = 1; */ + env->PCXI |= MASK_PCXI_UL; + + /* PCXI[19: 0] = FCX[19: 0]; */ + env->PCXI = (env->PCXI & 0xfff00000) + (env->FCX & 0xfffff); + /* FCX[19: 0] = new_FCX[19: 0]; */ + env->FCX = (env->FCX & 0xfff00000) + (new_FCX & 0xfffff); + /* A[11] = next_pc[31: 0]; */ + env->gpr_a[11] = next_pc; + + /* if (tmp_FCX == LCX) trap(FCD);*/ + if (tmp_FCX == env->LCX) { + /* FCD trap */ + } + psw_write(env, psw); +} + +void helper_ret(CPUTriCoreState *env) +{ + target_ulong ea; + target_ulong new_PCXI; + target_ulong new_PSW, psw; + + psw = psw_read(env); + /* if (PSW.CDE) then if (cdc_decrement()) then trap(CDU);*/ + if (env->PSW & MASK_PSW_CDE) { + if (cdc_decrement(&(env->PSW))) { + /* CDU trap */ + } + } + /* if (PCXI[19: 0] == 0) then trap(CSU); */ + if ((env->PCXI & 0xfffff) == 0) { + /* CSU trap */ + } + /* if (PCXI.UL == 0) then trap(CTYP); */ + if ((env->PCXI & MASK_PCXI_UL) == 0) { + /* CTYP trap */ + } + /* PC = {A11 [31: 1], 1’b0}; */ + env->PC = env->gpr_a[11] & 0xfffffffe; + + /* EA = {PCXI.PCXS, 6'b0, PCXI.PCXO, 6'b0}; */ + ea = ((env->PCXI & MASK_PCXI_PCXS) << 12) + + ((env->PCXI & MASK_PCXI_PCXO) << 6); + /* {new_PCXI, new_PSW, A[10], A[11], D[8], D[9], D[10], D[11], A[12], + A[13], A[14], A[15], D[12], D[13], D[14], D[15]} = M(EA, 16 * word); + M(EA, word) = FCX; */ + restore_context_upper(env, ea, &new_PCXI, &new_PSW); + /* FCX[19: 0] = PCXI[19: 0]; */ + env->FCX = (env->FCX & 0xfff00000) + (env->PCXI & 0x000fffff); + /* PCXI = new_PCXI; */ + env->PCXI = new_PCXI; + + if (tricore_feature(env, TRICORE_FEATURE_13)) { + /* PSW = new_PSW */ + psw_write(env, new_PSW); + } else { + /* PSW = {new_PSW[31:26], PSW[25:24], new_PSW[23:0]}; */ + psw_write(env, (new_PSW & ~(0x3000000)) + (psw & (0x3000000))); + } +} + +void helper_bisr(CPUTriCoreState *env, uint32_t const9) +{ + target_ulong tmp_FCX; + target_ulong ea; + target_ulong new_FCX; + + if (env->FCX == 0) { + /* FCU trap */ + } + + tmp_FCX = env->FCX; + ea = ((env->FCX & 0xf0000) << 12) + ((env->FCX & 0xffff) << 6); + + save_context_lower(env, ea, &new_FCX); + + /* PCXI.PCPN = ICR.CCPN */ + env->PCXI = (env->PCXI & 0xffffff) + + ((env->ICR & MASK_ICR_CCPN) << 24); + /* PCXI.PIE = ICR.IE */ + env->PCXI = ((env->PCXI & ~MASK_PCXI_PIE) + + ((env->ICR & MASK_ICR_IE) << 15)); + /* PCXI.UL = 0 */ + env->PCXI &= ~(MASK_PCXI_UL); + /* PCXI[19: 0] = FCX[19: 0] */ + env->PCXI = (env->PCXI & 0xfff00000) + (env->FCX & 0xfffff); + /* FXC[19: 0] = new_FCX[19: 0] */ + env->FCX = (env->FCX & 0xfff00000) + (new_FCX & 0xfffff); + /* ICR.IE = 1 */ + env->ICR |= MASK_ICR_IE; + + env->ICR |= const9; /* ICR.CCPN = const9[7: 0];*/ + + if (tmp_FCX == env->LCX) { + /* FCD trap */ + } +} + +void helper_rfe(CPUTriCoreState *env) +{ + target_ulong ea; + target_ulong new_PCXI; + target_ulong new_PSW; + /* if (PCXI[19: 0] == 0) then trap(CSU); */ + if ((env->PCXI & 0xfffff) == 0) { + /* raise csu trap */ + } + /* if (PCXI.UL == 0) then trap(CTYP); */ + if ((env->PCXI & MASK_PCXI_UL) == 0) { + /* raise CTYP trap */ + } + /* if (!cdc_zero() AND PSW.CDE) then trap(NEST); */ + if (!cdc_zero(&(env->PSW)) && (env->PSW & MASK_PSW_CDE)) { + /* raise MNG trap */ + } + /* ICR.IE = PCXI.PIE; */ + env->ICR = (env->ICR & ~MASK_ICR_IE) + ((env->PCXI & MASK_PCXI_PIE) >> 15); + /* ICR.CCPN = PCXI.PCPN; */ + env->ICR = (env->ICR & ~MASK_ICR_CCPN) + + ((env->PCXI & MASK_PCXI_PCPN) >> 24); + /*EA = {PCXI.PCXS, 6'b0, PCXI.PCXO, 6'b0};*/ + ea = ((env->PCXI & MASK_PCXI_PCXS) << 12) + + ((env->PCXI & MASK_PCXI_PCXO) << 6); + /*{new_PCXI, PSW, A[10], A[11], D[8], D[9], D[10], D[11], A[12], + A[13], A[14], A[15], D[12], D[13], D[14], D[15]} = M(EA, 16 * word); + M(EA, word) = FCX;*/ + restore_context_upper(env, ea, &new_PCXI, &new_PSW); + /* FCX[19: 0] = PCXI[19: 0]; */ + env->FCX = (env->FCX & 0xfff00000) + (env->PCXI & 0x000fffff); + /* PCXI = new_PCXI; */ + env->PCXI = new_PCXI; + /* write psw */ + psw_write(env, new_PSW); +} + +static inline void QEMU_NORETURN do_raise_exception_err(CPUTriCoreState *env, + uint32_t exception, + int error_code, + uintptr_t pc) +{ + CPUState *cs = CPU(tricore_env_get_cpu(env)); + cs->exception_index = exception; + env->error_code = error_code; + + if (pc) { + /* now we have a real cpu fault */ + cpu_restore_state(cs, pc); + } + + cpu_loop_exit(cs); +} + +static inline void QEMU_NORETURN do_raise_exception(CPUTriCoreState *env, + uint32_t exception, + uintptr_t pc) +{ + do_raise_exception_err(env, exception, 0, pc); +} + +void tlb_fill(CPUState *cs, target_ulong addr, int is_write, int mmu_idx, + uintptr_t retaddr) +{ + int ret; + ret = cpu_tricore_handle_mmu_fault(cs, addr, is_write, mmu_idx); + if (ret) { + TriCoreCPU *cpu = TRICORE_CPU(cs); + CPUTriCoreState *env = &cpu->env; + do_raise_exception_err(env, cs->exception_index, + env->error_code, retaddr); + } +} diff --git a/target-tricore/translate.c b/target-tricore/translate.c new file mode 100644 index 0000000000..4f654deb65 --- /dev/null +++ b/target-tricore/translate.c @@ -0,0 +1,1263 @@ +/* + * TriCore emulation for qemu: main translation routines. + * + * Copyright (c) 2013-2014 Bastian Koppelmann C-Lab/University Paderborn + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + + +#include "cpu.h" +#include "disas/disas.h" +#include "tcg-op.h" +#include "exec/cpu_ldst.h" + +#include "exec/helper-proto.h" +#include "exec/helper-gen.h" + +#include "tricore-opcodes.h" + +/* + * TCG registers + */ +static TCGv cpu_PC; +static TCGv cpu_PCXI; +static TCGv cpu_PSW; +static TCGv cpu_ICR; +/* GPR registers */ +static TCGv cpu_gpr_a[16]; +static TCGv cpu_gpr_d[16]; +/* PSW Flag cache */ +static TCGv cpu_PSW_C; +static TCGv cpu_PSW_V; +static TCGv cpu_PSW_SV; +static TCGv cpu_PSW_AV; +static TCGv cpu_PSW_SAV; +/* CPU env */ +static TCGv_ptr cpu_env; + +#include "exec/gen-icount.h" + +static const char *regnames_a[] = { + "a0" , "a1" , "a2" , "a3" , "a4" , "a5" , + "a6" , "a7" , "a8" , "a9" , "sp" , "a11" , + "a12" , "a13" , "a14" , "a15", + }; + +static const char *regnames_d[] = { + "d0" , "d1" , "d2" , "d3" , "d4" , "d5" , + "d6" , "d7" , "d8" , "d9" , "d10" , "d11" , + "d12" , "d13" , "d14" , "d15", + }; + +typedef struct DisasContext { + struct TranslationBlock *tb; + target_ulong pc, saved_pc, next_pc; + uint32_t opcode; + int singlestep_enabled; + /* Routine used to access memory */ + int mem_idx; + uint32_t hflags, saved_hflags; + int bstate; +} DisasContext; + +enum { + + BS_NONE = 0, + BS_STOP = 1, + BS_BRANCH = 2, + BS_EXCP = 3, +}; + +void tricore_cpu_dump_state(CPUState *cs, FILE *f, + fprintf_function cpu_fprintf, int flags) +{ + TriCoreCPU *cpu = TRICORE_CPU(cs); + CPUTriCoreState *env = &cpu->env; + int i; + + cpu_fprintf(f, "PC=%08x\n", env->PC); + for (i = 0; i < 16; ++i) { + if ((i & 3) == 0) { + cpu_fprintf(f, "GPR A%02d:", i); + } + cpu_fprintf(f, " %s " TARGET_FMT_lx, regnames_a[i], env->gpr_a[i]); + } + for (i = 0; i < 16; ++i) { + if ((i & 3) == 0) { + cpu_fprintf(f, "GPR D%02d:", i); + } + cpu_fprintf(f, " %s " TARGET_FMT_lx, regnames_d[i], env->gpr_d[i]); + } + +} + +/* + * Functions to generate micro-ops + */ + +/* Makros for generating helpers */ + +#define gen_helper_1arg(name, arg) do { \ + TCGv_i32 helper_tmp = tcg_const_i32(arg); \ + gen_helper_##name(cpu_env, helper_tmp); \ + tcg_temp_free_i32(helper_tmp); \ + } while (0) + +/* Functions for load/save to/from memory */ + +static inline void gen_offset_ld(DisasContext *ctx, TCGv r1, TCGv r2, + int16_t con, TCGMemOp mop) +{ + TCGv temp = tcg_temp_new(); + tcg_gen_addi_tl(temp, r2, con); + tcg_gen_qemu_ld_tl(r1, temp, ctx->mem_idx, mop); + tcg_temp_free(temp); +} + +static inline void gen_offset_st(DisasContext *ctx, TCGv r1, TCGv r2, + int16_t con, TCGMemOp mop) +{ + TCGv temp = tcg_temp_new(); + tcg_gen_addi_tl(temp, r2, con); + tcg_gen_qemu_st_tl(r1, temp, ctx->mem_idx, mop); + tcg_temp_free(temp); +} + +/* Functions for arithmetic instructions */ + +static inline void gen_add_d(TCGv ret, TCGv r1, TCGv r2) +{ + TCGv t0 = tcg_temp_new_i32(); + TCGv result = tcg_temp_new_i32(); + /* Addition and set V/SV bits */ + tcg_gen_add_tl(result, r1, r2); + /* calc V bit */ + tcg_gen_xor_tl(cpu_PSW_V, result, r1); + tcg_gen_xor_tl(t0, r1, r2); + tcg_gen_andc_tl(cpu_PSW_V, cpu_PSW_V, t0); + /* Calc SV bit */ + tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); + /* Calc AV/SAV bits */ + tcg_gen_add_tl(cpu_PSW_AV, result, result); + tcg_gen_xor_tl(cpu_PSW_AV, result, cpu_PSW_AV); + /* calc SAV */ + tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); + /* write back result */ + tcg_gen_mov_tl(ret, result); + + tcg_temp_free(result); + tcg_temp_free(t0); +} + +static inline void gen_addi_d(TCGv ret, TCGv r1, target_ulong r2) +{ + TCGv temp = tcg_const_i32(r2); + gen_add_d(ret, r1, temp); + tcg_temp_free(temp); +} + +static inline void gen_cond_add(TCGCond cond, TCGv r1, TCGv r2, TCGv r3, + TCGv r4) +{ + TCGv temp = tcg_temp_new(); + TCGv temp2 = tcg_temp_new(); + TCGv result = tcg_temp_new(); + TCGv mask = tcg_temp_new(); + TCGv t0 = tcg_const_i32(0); + + /* create mask for sticky bits */ + tcg_gen_setcond_tl(cond, mask, r4, t0); + tcg_gen_shli_tl(mask, mask, 31); + + tcg_gen_add_tl(result, r1, r2); + /* Calc PSW_V */ + tcg_gen_xor_tl(temp, result, r1); + tcg_gen_xor_tl(temp2, r1, r2); + tcg_gen_andc_tl(temp, temp, temp2); + tcg_gen_movcond_tl(cond, cpu_PSW_V, r4, t0, temp, cpu_PSW_V); + /* Set PSW_SV */ + tcg_gen_and_tl(temp, temp, mask); + tcg_gen_or_tl(cpu_PSW_SV, temp, cpu_PSW_SV); + /* calc AV bit */ + tcg_gen_add_tl(temp, result, result); + tcg_gen_xor_tl(temp, temp, result); + tcg_gen_movcond_tl(cond, cpu_PSW_AV, r4, t0, temp, cpu_PSW_AV); + /* calc SAV bit */ + tcg_gen_and_tl(temp, temp, mask); + tcg_gen_or_tl(cpu_PSW_SAV, temp, cpu_PSW_SAV); + /* write back result */ + tcg_gen_movcond_tl(cond, r3, r4, t0, result, r3); + + tcg_temp_free(t0); + tcg_temp_free(temp); + tcg_temp_free(temp2); + tcg_temp_free(result); + tcg_temp_free(mask); +} + +static inline void gen_condi_add(TCGCond cond, TCGv r1, int32_t r2, + TCGv r3, TCGv r4) +{ + TCGv temp = tcg_const_i32(r2); + gen_cond_add(cond, r1, temp, r3, r4); + tcg_temp_free(temp); +} + +static inline void gen_sub_d(TCGv ret, TCGv r1, TCGv r2) +{ + TCGv temp = tcg_temp_new_i32(); + TCGv result = tcg_temp_new_i32(); + + tcg_gen_sub_tl(result, r1, r2); + /* calc V bit */ + tcg_gen_xor_tl(cpu_PSW_V, result, r1); + tcg_gen_xor_tl(temp, r1, r2); + tcg_gen_and_tl(cpu_PSW_V, cpu_PSW_V, temp); + /* calc SV bit */ + tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); + /* Calc AV bit */ + tcg_gen_add_tl(cpu_PSW_AV, result, result); + tcg_gen_xor_tl(cpu_PSW_AV, result, cpu_PSW_AV); + /* calc SAV bit */ + tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); + /* write back result */ + tcg_gen_mov_tl(ret, result); + + tcg_temp_free(temp); + tcg_temp_free(result); +} + +static inline void gen_mul_i32s(TCGv ret, TCGv r1, TCGv r2) +{ + TCGv high = tcg_temp_new(); + TCGv low = tcg_temp_new(); + + tcg_gen_muls2_tl(low, high, r1, r2); + tcg_gen_mov_tl(ret, low); + /* calc V bit */ + tcg_gen_sari_tl(low, low, 31); + tcg_gen_setcond_tl(TCG_COND_NE, cpu_PSW_V, high, low); + tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31); + /* calc SV bit */ + tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); + /* Calc AV bit */ + tcg_gen_add_tl(cpu_PSW_AV, ret, ret); + tcg_gen_xor_tl(cpu_PSW_AV, ret, cpu_PSW_AV); + /* calc SAV bit */ + tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); + + tcg_temp_free(high); + tcg_temp_free(low); +} + +static void gen_saturate(TCGv ret, TCGv arg, int32_t up, int32_t low) +{ + TCGv sat_neg = tcg_const_i32(low); + TCGv temp = tcg_const_i32(up); + + /* sat_neg = (arg < low ) ? low : arg; */ + tcg_gen_movcond_tl(TCG_COND_LT, sat_neg, arg, sat_neg, sat_neg, arg); + + /* ret = (sat_neg > up ) ? up : sat_neg; */ + tcg_gen_movcond_tl(TCG_COND_GT, ret, sat_neg, temp, temp, sat_neg); + + tcg_temp_free(sat_neg); + tcg_temp_free(temp); +} + +static void gen_saturate_u(TCGv ret, TCGv arg, int32_t up) +{ + TCGv temp = tcg_const_i32(up); + /* sat_neg = (arg > up ) ? up : arg; */ + tcg_gen_movcond_tl(TCG_COND_GTU, ret, arg, temp, temp, arg); + tcg_temp_free(temp); +} + +static void gen_shi(TCGv ret, TCGv r1, int32_t shift_count) +{ + if (shift_count == -32) { + tcg_gen_movi_tl(ret, 0); + } else if (shift_count >= 0) { + tcg_gen_shli_tl(ret, r1, shift_count); + } else { + tcg_gen_shri_tl(ret, r1, -shift_count); + } +} + +static void gen_shaci(TCGv ret, TCGv r1, int32_t shift_count) +{ + uint32_t msk, msk_start; + TCGv temp = tcg_temp_new(); + TCGv temp2 = tcg_temp_new(); + TCGv t_0 = tcg_const_i32(0); + + if (shift_count == 0) { + /* Clear PSW.C and PSW.V */ + tcg_gen_movi_tl(cpu_PSW_C, 0); + tcg_gen_mov_tl(cpu_PSW_V, cpu_PSW_C); + tcg_gen_mov_tl(ret, r1); + } else if (shift_count == -32) { + /* set PSW.C */ + tcg_gen_mov_tl(cpu_PSW_C, r1); + /* fill ret completly with sign bit */ + tcg_gen_sari_tl(ret, r1, 31); + /* clear PSW.V */ + tcg_gen_movi_tl(cpu_PSW_V, 0); + } else if (shift_count > 0) { + TCGv t_max = tcg_const_i32(0x7FFFFFFF >> shift_count); + TCGv t_min = tcg_const_i32(((int32_t) -0x80000000) >> shift_count); + + /* calc carry */ + msk_start = 32 - shift_count; + msk = ((1 << shift_count) - 1) << msk_start; + tcg_gen_andi_tl(cpu_PSW_C, r1, msk); + /* calc v/sv bits */ + tcg_gen_setcond_tl(TCG_COND_GT, temp, r1, t_max); + tcg_gen_setcond_tl(TCG_COND_LT, temp2, r1, t_min); + tcg_gen_or_tl(cpu_PSW_V, temp, temp2); + tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31); + /* calc sv */ + tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_V, cpu_PSW_SV); + /* do shift */ + tcg_gen_shli_tl(ret, r1, shift_count); + + tcg_temp_free(t_max); + tcg_temp_free(t_min); + } else { + /* clear PSW.V */ + tcg_gen_movi_tl(cpu_PSW_V, 0); + /* calc carry */ + msk = (1 << -shift_count) - 1; + tcg_gen_andi_tl(cpu_PSW_C, r1, msk); + /* do shift */ + tcg_gen_sari_tl(ret, r1, -shift_count); + } + /* calc av overflow bit */ + tcg_gen_add_tl(cpu_PSW_AV, ret, ret); + tcg_gen_xor_tl(cpu_PSW_AV, ret, cpu_PSW_AV); + /* calc sav overflow bit */ + tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); + + tcg_temp_free(temp); + tcg_temp_free(temp2); + tcg_temp_free(t_0); +} + +static inline void gen_adds(TCGv ret, TCGv r1, TCGv r2) +{ + gen_helper_add_ssov(ret, cpu_env, r1, r2); +} + +static inline void gen_subs(TCGv ret, TCGv r1, TCGv r2) +{ + gen_helper_sub_ssov(ret, cpu_env, r1, r2); +} + +/* helpers for generating program flow micro-ops */ + +static inline void gen_save_pc(target_ulong pc) +{ + tcg_gen_movi_tl(cpu_PC, pc); +} + +static inline void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest) +{ + TranslationBlock *tb; + tb = ctx->tb; + if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) && + likely(!ctx->singlestep_enabled)) { + tcg_gen_goto_tb(n); + gen_save_pc(dest); + tcg_gen_exit_tb((uintptr_t)tb + n); + } else { + gen_save_pc(dest); + if (ctx->singlestep_enabled) { + /* raise exception debug */ + } + tcg_gen_exit_tb(0); + } +} + +static inline void gen_branch_cond(DisasContext *ctx, TCGCond cond, TCGv r1, + TCGv r2, int16_t address) +{ + int jumpLabel; + jumpLabel = gen_new_label(); + tcg_gen_brcond_tl(cond, r1, r2, jumpLabel); + + gen_goto_tb(ctx, 1, ctx->next_pc); + + gen_set_label(jumpLabel); + gen_goto_tb(ctx, 0, ctx->pc + address * 2); +} + +static inline void gen_branch_condi(DisasContext *ctx, TCGCond cond, TCGv r1, + int r2, int16_t address) +{ + TCGv temp = tcg_const_i32(r2); + gen_branch_cond(ctx, cond, r1, temp, address); + tcg_temp_free(temp); +} + +static void gen_loop(DisasContext *ctx, int r1, int32_t offset) +{ + int l1; + l1 = gen_new_label(); + + tcg_gen_subi_tl(cpu_gpr_a[r1], cpu_gpr_a[r1], 1); + tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_gpr_a[r1], -1, l1); + gen_goto_tb(ctx, 1, ctx->pc + offset); + gen_set_label(l1); + gen_goto_tb(ctx, 0, ctx->next_pc); +} + +static void gen_compute_branch(DisasContext *ctx, uint32_t opc, int r1, + int r2 , int32_t constant , int32_t offset) +{ + TCGv temp; + + switch (opc) { +/* SB-format jumps */ + case OPC1_16_SB_J: + case OPC1_32_B_J: + gen_goto_tb(ctx, 0, ctx->pc + offset * 2); + break; + case OPC1_16_SB_CALL: + gen_helper_1arg(call, ctx->next_pc); + gen_goto_tb(ctx, 0, ctx->pc + offset * 2); + break; + case OPC1_16_SB_JZ: + gen_branch_condi(ctx, TCG_COND_EQ, cpu_gpr_d[15], 0, offset); + break; + case OPC1_16_SB_JNZ: + gen_branch_condi(ctx, TCG_COND_NE, cpu_gpr_d[15], 0, offset); + break; +/* SBC-format jumps */ + case OPC1_16_SBC_JEQ: + gen_branch_condi(ctx, TCG_COND_EQ, cpu_gpr_d[15], constant, offset); + break; + case OPC1_16_SBC_JNE: + gen_branch_condi(ctx, TCG_COND_NE, cpu_gpr_d[15], constant, offset); + break; +/* SBRN-format jumps */ + case OPC1_16_SBRN_JZ_T: + temp = tcg_temp_new(); + tcg_gen_andi_tl(temp, cpu_gpr_d[15], 0x1u << constant); + gen_branch_condi(ctx, TCG_COND_EQ, temp, 0, offset); + tcg_temp_free(temp); + break; + case OPC1_16_SBRN_JNZ_T: + temp = tcg_temp_new(); + tcg_gen_andi_tl(temp, cpu_gpr_d[15], 0x1u << constant); + gen_branch_condi(ctx, TCG_COND_NE, temp, 0, offset); + tcg_temp_free(temp); + break; +/* SBR-format jumps */ + case OPC1_16_SBR_JEQ: + gen_branch_cond(ctx, TCG_COND_EQ, cpu_gpr_d[r1], cpu_gpr_d[15], + offset); + break; + case OPC1_16_SBR_JNE: + gen_branch_cond(ctx, TCG_COND_NE, cpu_gpr_d[r1], cpu_gpr_d[15], + offset); + break; + case OPC1_16_SBR_JNZ: + gen_branch_condi(ctx, TCG_COND_NE, cpu_gpr_d[r1], 0, offset); + break; + case OPC1_16_SBR_JNZ_A: + gen_branch_condi(ctx, TCG_COND_NE, cpu_gpr_a[r1], 0, offset); + break; + case OPC1_16_SBR_JGEZ: + gen_branch_condi(ctx, TCG_COND_GE, cpu_gpr_d[r1], 0, offset); + break; + case OPC1_16_SBR_JGTZ: + gen_branch_condi(ctx, TCG_COND_GT, cpu_gpr_d[r1], 0, offset); + break; + case OPC1_16_SBR_JLEZ: + gen_branch_condi(ctx, TCG_COND_LE, cpu_gpr_d[r1], 0, offset); + break; + case OPC1_16_SBR_JLTZ: + gen_branch_condi(ctx, TCG_COND_LT, cpu_gpr_d[r1], 0, offset); + break; + case OPC1_16_SBR_JZ: + gen_branch_condi(ctx, TCG_COND_EQ, cpu_gpr_d[r1], 0, offset); + break; + case OPC1_16_SBR_JZ_A: + gen_branch_condi(ctx, TCG_COND_EQ, cpu_gpr_a[r1], 0, offset); + break; + case OPC1_16_SBR_LOOP: + gen_loop(ctx, r1, offset * 2 - 32); + break; +/* SR-format jumps */ + case OPC1_16_SR_JI: + tcg_gen_andi_tl(cpu_PC, cpu_gpr_a[r1], 0xfffffffe); + tcg_gen_exit_tb(0); + break; + case OPC2_16_SR_RET: + gen_helper_ret(cpu_env); + tcg_gen_exit_tb(0); + break; + default: + printf("Branch Error at %x\n", ctx->pc); + } + ctx->bstate = BS_BRANCH; +} + + +/* + * Functions for decoding instructions + */ + +static void decode_src_opc(DisasContext *ctx, int op1) +{ + int r1; + int32_t const4; + TCGv temp, temp2; + + r1 = MASK_OP_SRC_S1D(ctx->opcode); + const4 = MASK_OP_SRC_CONST4_SEXT(ctx->opcode); + + switch (op1) { + case OPC1_16_SRC_ADD: + gen_addi_d(cpu_gpr_d[r1], cpu_gpr_d[r1], const4); + break; + case OPC1_16_SRC_ADD_A15: + gen_addi_d(cpu_gpr_d[r1], cpu_gpr_d[15], const4); + break; + case OPC1_16_SRC_ADD_15A: + gen_addi_d(cpu_gpr_d[15], cpu_gpr_d[r1], const4); + break; + case OPC1_16_SRC_ADD_A: + tcg_gen_addi_tl(cpu_gpr_a[r1], cpu_gpr_a[r1], const4); + break; + case OPC1_16_SRC_CADD: + gen_condi_add(TCG_COND_NE, cpu_gpr_d[r1], const4, cpu_gpr_d[r1], + cpu_gpr_d[15]); + break; + case OPC1_16_SRC_CADDN: + gen_condi_add(TCG_COND_EQ, cpu_gpr_d[r1], const4, cpu_gpr_d[r1], + cpu_gpr_d[15]); + break; + case OPC1_16_SRC_CMOV: + temp = tcg_const_tl(0); + temp2 = tcg_const_tl(const4); + tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr_d[r1], cpu_gpr_d[15], temp, + temp2, cpu_gpr_d[r1]); + tcg_temp_free(temp); + tcg_temp_free(temp2); + break; + case OPC1_16_SRC_CMOVN: + temp = tcg_const_tl(0); + temp2 = tcg_const_tl(const4); + tcg_gen_movcond_tl(TCG_COND_EQ, cpu_gpr_d[r1], cpu_gpr_d[15], temp, + temp2, cpu_gpr_d[r1]); + tcg_temp_free(temp); + tcg_temp_free(temp2); + break; + case OPC1_16_SRC_EQ: + tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_gpr_d[15], cpu_gpr_d[r1], + const4); + break; + case OPC1_16_SRC_LT: + tcg_gen_setcondi_tl(TCG_COND_LT, cpu_gpr_d[15], cpu_gpr_d[r1], + const4); + break; + case OPC1_16_SRC_MOV: + tcg_gen_movi_tl(cpu_gpr_d[r1], const4); + break; + case OPC1_16_SRC_MOV_A: + const4 = MASK_OP_SRC_CONST4(ctx->opcode); + tcg_gen_movi_tl(cpu_gpr_a[r1], const4); + break; + case OPC1_16_SRC_SH: + gen_shi(cpu_gpr_d[r1], cpu_gpr_d[r1], const4); + break; + case OPC1_16_SRC_SHA: + gen_shaci(cpu_gpr_d[r1], cpu_gpr_d[r1], const4); + break; + } +} + +static void decode_srr_opc(DisasContext *ctx, int op1) +{ + int r1, r2; + TCGv temp; + + r1 = MASK_OP_SRR_S1D(ctx->opcode); + r2 = MASK_OP_SRR_S2(ctx->opcode); + + switch (op1) { + case OPC1_16_SRR_ADD: + gen_add_d(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]); + break; + case OPC1_16_SRR_ADD_A15: + gen_add_d(cpu_gpr_d[r1], cpu_gpr_d[15], cpu_gpr_d[r2]); + break; + case OPC1_16_SRR_ADD_15A: + gen_add_d(cpu_gpr_d[15], cpu_gpr_d[r1], cpu_gpr_d[r2]); + break; + case OPC1_16_SRR_ADD_A: + tcg_gen_add_tl(cpu_gpr_a[r1], cpu_gpr_a[r1], cpu_gpr_a[r2]); + break; + case OPC1_16_SRR_ADDS: + gen_adds(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]); + break; + case OPC1_16_SRR_AND: + tcg_gen_and_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]); + break; + case OPC1_16_SRR_CMOV: + temp = tcg_const_tl(0); + tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr_d[r1], cpu_gpr_d[15], temp, + cpu_gpr_d[r2], cpu_gpr_d[r1]); + tcg_temp_free(temp); + break; + case OPC1_16_SRR_CMOVN: + temp = tcg_const_tl(0); + tcg_gen_movcond_tl(TCG_COND_EQ, cpu_gpr_d[r1], cpu_gpr_d[15], temp, + cpu_gpr_d[r2], cpu_gpr_d[r1]); + tcg_temp_free(temp); + break; + case OPC1_16_SRR_EQ: + tcg_gen_setcond_tl(TCG_COND_EQ, cpu_gpr_d[15], cpu_gpr_d[r1], + cpu_gpr_d[r2]); + break; + case OPC1_16_SRR_LT: + tcg_gen_setcond_tl(TCG_COND_LT, cpu_gpr_d[15], cpu_gpr_d[r1], + cpu_gpr_d[r2]); + break; + case OPC1_16_SRR_MOV: + tcg_gen_mov_tl(cpu_gpr_d[r1], cpu_gpr_d[r2]); + break; + case OPC1_16_SRR_MOV_A: + tcg_gen_mov_tl(cpu_gpr_a[r1], cpu_gpr_d[r2]); + break; + case OPC1_16_SRR_MOV_AA: + tcg_gen_mov_tl(cpu_gpr_a[r1], cpu_gpr_a[r2]); + break; + case OPC1_16_SRR_MOV_D: + tcg_gen_mov_tl(cpu_gpr_d[r1], cpu_gpr_a[r2]); + break; + case OPC1_16_SRR_MUL: + gen_mul_i32s(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]); + break; + case OPC1_16_SRR_OR: + tcg_gen_or_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]); + break; + case OPC1_16_SRR_SUB: + gen_sub_d(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]); + break; + case OPC1_16_SRR_SUB_A15B: + gen_sub_d(cpu_gpr_d[r1], cpu_gpr_d[15], cpu_gpr_d[r2]); + break; + case OPC1_16_SRR_SUB_15AB: + gen_sub_d(cpu_gpr_d[15], cpu_gpr_d[r1], cpu_gpr_d[r2]); + break; + case OPC1_16_SRR_SUBS: + gen_subs(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]); + break; + case OPC1_16_SRR_XOR: + tcg_gen_xor_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]); + break; + } +} + +static void decode_ssr_opc(DisasContext *ctx, int op1) +{ + int r1, r2; + + r1 = MASK_OP_SSR_S1(ctx->opcode); + r2 = MASK_OP_SSR_S2(ctx->opcode); + + switch (op1) { + case OPC1_16_SSR_ST_A: + tcg_gen_qemu_st_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUL); + break; + case OPC1_16_SSR_ST_A_POSTINC: + tcg_gen_qemu_st_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUL); + tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 4); + break; + case OPC1_16_SSR_ST_B: + tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_UB); + break; + case OPC1_16_SSR_ST_B_POSTINC: + tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_UB); + tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 1); + break; + case OPC1_16_SSR_ST_H: + tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUW); + break; + case OPC1_16_SSR_ST_H_POSTINC: + tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUW); + tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 2); + break; + case OPC1_16_SSR_ST_W: + tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUL); + break; + case OPC1_16_SSR_ST_W_POSTINC: + tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUL); + tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 4); + break; + } +} + +static void decode_sc_opc(DisasContext *ctx, int op1) +{ + int32_t const16; + + const16 = MASK_OP_SC_CONST8(ctx->opcode); + + switch (op1) { + case OPC1_16_SC_AND: + tcg_gen_andi_tl(cpu_gpr_d[15], cpu_gpr_d[15], const16); + break; + case OPC1_16_SC_BISR: + gen_helper_1arg(bisr, const16 & 0xff); + break; + case OPC1_16_SC_LD_A: + gen_offset_ld(ctx, cpu_gpr_a[15], cpu_gpr_a[10], const16 * 4, MO_LESL); + break; + case OPC1_16_SC_LD_W: + gen_offset_ld(ctx, cpu_gpr_d[15], cpu_gpr_a[10], const16 * 4, MO_LESL); + break; + case OPC1_16_SC_MOV: + tcg_gen_movi_tl(cpu_gpr_d[15], const16); + break; + case OPC1_16_SC_OR: + tcg_gen_ori_tl(cpu_gpr_d[15], cpu_gpr_d[15], const16); + break; + case OPC1_16_SC_ST_A: + gen_offset_st(ctx, cpu_gpr_a[15], cpu_gpr_a[10], const16 * 4, MO_LESL); + break; + case OPC1_16_SC_ST_W: + gen_offset_st(ctx, cpu_gpr_d[15], cpu_gpr_a[10], const16 * 4, MO_LESL); + break; + case OPC1_16_SC_SUB_A: + tcg_gen_subi_tl(cpu_gpr_a[10], cpu_gpr_a[10], const16); + break; + } +} + +static void decode_slr_opc(DisasContext *ctx, int op1) +{ + int r1, r2; + + r1 = MASK_OP_SLR_D(ctx->opcode); + r2 = MASK_OP_SLR_S2(ctx->opcode); + + switch (op1) { +/* SLR-format */ + case OPC1_16_SLR_LD_A: + tcg_gen_qemu_ld_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESL); + break; + case OPC1_16_SLR_LD_A_POSTINC: + tcg_gen_qemu_ld_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESL); + tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 4); + break; + case OPC1_16_SLR_LD_BU: + tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_UB); + break; + case OPC1_16_SLR_LD_BU_POSTINC: + tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_UB); + tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 1); + break; + case OPC1_16_SLR_LD_H: + tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESW); + break; + case OPC1_16_SLR_LD_H_POSTINC: + tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESW); + tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 2); + break; + case OPC1_16_SLR_LD_W: + tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESW); + break; + case OPC1_16_SLR_LD_W_POSTINC: + tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESW); + tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 4); + break; + } +} + +static void decode_sro_opc(DisasContext *ctx, int op1) +{ + int r2; + int32_t address; + + r2 = MASK_OP_SRO_S2(ctx->opcode); + address = MASK_OP_SRO_OFF4(ctx->opcode); + +/* SRO-format */ + switch (op1) { + case OPC1_16_SRO_LD_A: + gen_offset_ld(ctx, cpu_gpr_a[15], cpu_gpr_a[r2], address * 4, MO_LESL); + break; + case OPC1_16_SRO_LD_BU: + gen_offset_ld(ctx, cpu_gpr_d[15], cpu_gpr_a[r2], address, MO_UB); + break; + case OPC1_16_SRO_LD_H: + gen_offset_ld(ctx, cpu_gpr_d[15], cpu_gpr_a[r2], address, MO_LESW); + break; + case OPC1_16_SRO_LD_W: + gen_offset_ld(ctx, cpu_gpr_d[15], cpu_gpr_a[r2], address * 4, MO_LESL); + break; + case OPC1_16_SRO_ST_A: + gen_offset_st(ctx, cpu_gpr_a[15], cpu_gpr_a[r2], address * 4, MO_LESL); + break; + case OPC1_16_SRO_ST_B: + gen_offset_st(ctx, cpu_gpr_d[15], cpu_gpr_a[r2], address, MO_UB); + break; + case OPC1_16_SRO_ST_H: + gen_offset_st(ctx, cpu_gpr_d[15], cpu_gpr_a[r2], address * 2, MO_LESW); + break; + case OPC1_16_SRO_ST_W: + gen_offset_st(ctx, cpu_gpr_d[15], cpu_gpr_a[r2], address * 4, MO_LESL); + break; + } +} + +static void decode_sr_system(CPUTriCoreState *env, DisasContext *ctx) +{ + uint32_t op2; + op2 = MASK_OP_SR_OP2(ctx->opcode); + + switch (op2) { + case OPC2_16_SR_NOP: + break; + case OPC2_16_SR_RET: + gen_compute_branch(ctx, op2, 0, 0, 0, 0); + break; + case OPC2_16_SR_RFE: + gen_helper_rfe(cpu_env); + tcg_gen_exit_tb(0); + ctx->bstate = BS_BRANCH; + break; + case OPC2_16_SR_DEBUG: + /* raise EXCP_DEBUG */ + break; + } +} + +static void decode_sr_accu(CPUTriCoreState *env, DisasContext *ctx) +{ + uint32_t op2; + uint32_t r1; + TCGv temp; + + r1 = MASK_OP_SR_S1D(ctx->opcode); + op2 = MASK_OP_SR_OP2(ctx->opcode); + + switch (op2) { + case OPC2_16_SR_RSUB: + /* overflow only if r1 = -0x80000000 */ + temp = tcg_const_i32(-0x80000000); + /* calc V bit */ + tcg_gen_setcond_tl(TCG_COND_EQ, cpu_PSW_V, cpu_gpr_d[r1], temp); + tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31); + /* calc SV bit */ + tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); + /* sub */ + tcg_gen_neg_tl(cpu_gpr_d[r1], cpu_gpr_d[r1]); + /* calc av */ + tcg_gen_add_tl(cpu_PSW_AV, cpu_gpr_d[r1], cpu_gpr_d[r1]); + tcg_gen_xor_tl(cpu_PSW_AV, cpu_gpr_d[r1], cpu_PSW_AV); + /* calc sav */ + tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); + tcg_temp_free(temp); + break; + case OPC2_16_SR_SAT_B: + gen_saturate(cpu_gpr_d[r1], cpu_gpr_d[r1], 0x7f, -0x80); + break; + case OPC2_16_SR_SAT_BU: + gen_saturate_u(cpu_gpr_d[r1], cpu_gpr_d[r1], 0xff); + break; + case OPC2_16_SR_SAT_H: + gen_saturate(cpu_gpr_d[r1], cpu_gpr_d[r1], 0x7fff, -0x8000); + break; + case OPC2_16_SR_SAT_HU: + gen_saturate_u(cpu_gpr_d[r1], cpu_gpr_d[r1], 0xffff); + break; + } +} + +static void decode_16Bit_opc(CPUTriCoreState *env, DisasContext *ctx) +{ + int op1; + int r1, r2; + int32_t const16; + int32_t address; + TCGv temp; + + op1 = MASK_OP_MAJOR(ctx->opcode); + + /* handle ADDSC.A opcode only being 6 bit long */ + if (unlikely((op1 & 0x3f) == OPC1_16_SRRS_ADDSC_A)) { + op1 = OPC1_16_SRRS_ADDSC_A; + } + + switch (op1) { + case OPC1_16_SRC_ADD: + case OPC1_16_SRC_ADD_A15: + case OPC1_16_SRC_ADD_15A: + case OPC1_16_SRC_ADD_A: + case OPC1_16_SRC_CADD: + case OPC1_16_SRC_CADDN: + case OPC1_16_SRC_CMOV: + case OPC1_16_SRC_CMOVN: + case OPC1_16_SRC_EQ: + case OPC1_16_SRC_LT: + case OPC1_16_SRC_MOV: + case OPC1_16_SRC_MOV_A: + case OPC1_16_SRC_SH: + case OPC1_16_SRC_SHA: + decode_src_opc(ctx, op1); + break; +/* SRR-format */ + case OPC1_16_SRR_ADD: + case OPC1_16_SRR_ADD_A15: + case OPC1_16_SRR_ADD_15A: + case OPC1_16_SRR_ADD_A: + case OPC1_16_SRR_ADDS: + case OPC1_16_SRR_AND: + case OPC1_16_SRR_CMOV: + case OPC1_16_SRR_CMOVN: + case OPC1_16_SRR_EQ: + case OPC1_16_SRR_LT: + case OPC1_16_SRR_MOV: + case OPC1_16_SRR_MOV_A: + case OPC1_16_SRR_MOV_AA: + case OPC1_16_SRR_MOV_D: + case OPC1_16_SRR_MUL: + case OPC1_16_SRR_OR: + case OPC1_16_SRR_SUB: + case OPC1_16_SRR_SUB_A15B: + case OPC1_16_SRR_SUB_15AB: + case OPC1_16_SRR_SUBS: + case OPC1_16_SRR_XOR: + decode_srr_opc(ctx, op1); + break; +/* SSR-format */ + case OPC1_16_SSR_ST_A: + case OPC1_16_SSR_ST_A_POSTINC: + case OPC1_16_SSR_ST_B: + case OPC1_16_SSR_ST_B_POSTINC: + case OPC1_16_SSR_ST_H: + case OPC1_16_SSR_ST_H_POSTINC: + case OPC1_16_SSR_ST_W: + case OPC1_16_SSR_ST_W_POSTINC: + decode_ssr_opc(ctx, op1); + break; +/* SRRS-format */ + case OPC1_16_SRRS_ADDSC_A: + r2 = MASK_OP_SRRS_S2(ctx->opcode); + r1 = MASK_OP_SRRS_S1D(ctx->opcode); + const16 = MASK_OP_SRRS_N(ctx->opcode); + temp = tcg_temp_new(); + tcg_gen_shli_tl(temp, cpu_gpr_d[15], const16); + tcg_gen_add_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], temp); + tcg_temp_free(temp); + break; +/* SLRO-format */ + case OPC1_16_SLRO_LD_A: + r1 = MASK_OP_SLRO_D(ctx->opcode); + const16 = MASK_OP_SLRO_OFF4(ctx->opcode); + gen_offset_ld(ctx, cpu_gpr_a[r1], cpu_gpr_a[15], const16 * 4, MO_LESL); + break; + case OPC1_16_SLRO_LD_BU: + r1 = MASK_OP_SLRO_D(ctx->opcode); + const16 = MASK_OP_SLRO_OFF4(ctx->opcode); + gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[15], const16, MO_UB); + break; + case OPC1_16_SLRO_LD_H: + r1 = MASK_OP_SLRO_D(ctx->opcode); + const16 = MASK_OP_SLRO_OFF4(ctx->opcode); + gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[15], const16 * 2, MO_LESW); + break; + case OPC1_16_SLRO_LD_W: + r1 = MASK_OP_SLRO_D(ctx->opcode); + const16 = MASK_OP_SLRO_OFF4(ctx->opcode); + gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[15], const16 * 4, MO_LESL); + break; +/* SB-format */ + case OPC1_16_SB_CALL: + case OPC1_16_SB_J: + case OPC1_16_SB_JNZ: + case OPC1_16_SB_JZ: + address = MASK_OP_SB_DISP8_SEXT(ctx->opcode); + gen_compute_branch(ctx, op1, 0, 0, 0, address); + break; +/* SBC-format */ + case OPC1_16_SBC_JEQ: + case OPC1_16_SBC_JNE: + address = MASK_OP_SBC_DISP4(ctx->opcode); + const16 = MASK_OP_SBC_CONST4_SEXT(ctx->opcode); + gen_compute_branch(ctx, op1, 0, 0, const16, address); + break; +/* SBRN-format */ + case OPC1_16_SBRN_JNZ_T: + case OPC1_16_SBRN_JZ_T: + address = MASK_OP_SBRN_DISP4(ctx->opcode); + const16 = MASK_OP_SBRN_N(ctx->opcode); + gen_compute_branch(ctx, op1, 0, 0, const16, address); + break; +/* SBR-format */ + case OPC1_16_SBR_JEQ: + case OPC1_16_SBR_JGEZ: + case OPC1_16_SBR_JGTZ: + case OPC1_16_SBR_JLEZ: + case OPC1_16_SBR_JLTZ: + case OPC1_16_SBR_JNE: + case OPC1_16_SBR_JNZ: + case OPC1_16_SBR_JNZ_A: + case OPC1_16_SBR_JZ: + case OPC1_16_SBR_JZ_A: + case OPC1_16_SBR_LOOP: + r1 = MASK_OP_SBR_S2(ctx->opcode); + address = MASK_OP_SBR_DISP4(ctx->opcode); + gen_compute_branch(ctx, op1, r1, 0, 0, address); + break; +/* SC-format */ + case OPC1_16_SC_AND: + case OPC1_16_SC_BISR: + case OPC1_16_SC_LD_A: + case OPC1_16_SC_LD_W: + case OPC1_16_SC_MOV: + case OPC1_16_SC_OR: + case OPC1_16_SC_ST_A: + case OPC1_16_SC_ST_W: + case OPC1_16_SC_SUB_A: + decode_sc_opc(ctx, op1); + break; +/* SLR-format */ + case OPC1_16_SLR_LD_A: + case OPC1_16_SLR_LD_A_POSTINC: + case OPC1_16_SLR_LD_BU: + case OPC1_16_SLR_LD_BU_POSTINC: + case OPC1_16_SLR_LD_H: + case OPC1_16_SLR_LD_H_POSTINC: + case OPC1_16_SLR_LD_W: + case OPC1_16_SLR_LD_W_POSTINC: + decode_slr_opc(ctx, op1); + break; +/* SRO-format */ + case OPC1_16_SRO_LD_A: + case OPC1_16_SRO_LD_BU: + case OPC1_16_SRO_LD_H: + case OPC1_16_SRO_LD_W: + case OPC1_16_SRO_ST_A: + case OPC1_16_SRO_ST_B: + case OPC1_16_SRO_ST_H: + case OPC1_16_SRO_ST_W: + decode_sro_opc(ctx, op1); + break; +/* SSRO-format */ + case OPC1_16_SSRO_ST_A: + r1 = MASK_OP_SSRO_S1(ctx->opcode); + const16 = MASK_OP_SSRO_OFF4(ctx->opcode); + gen_offset_st(ctx, cpu_gpr_a[r1], cpu_gpr_a[15], const16 * 4, MO_LESL); + break; + case OPC1_16_SSRO_ST_B: + r1 = MASK_OP_SSRO_S1(ctx->opcode); + const16 = MASK_OP_SSRO_OFF4(ctx->opcode); + gen_offset_st(ctx, cpu_gpr_d[r1], cpu_gpr_a[15], const16, MO_UB); + break; + case OPC1_16_SSRO_ST_H: + r1 = MASK_OP_SSRO_S1(ctx->opcode); + const16 = MASK_OP_SSRO_OFF4(ctx->opcode); + gen_offset_st(ctx, cpu_gpr_d[r1], cpu_gpr_a[15], const16 * 2, MO_LESW); + break; + case OPC1_16_SSRO_ST_W: + r1 = MASK_OP_SSRO_S1(ctx->opcode); + const16 = MASK_OP_SSRO_OFF4(ctx->opcode); + gen_offset_st(ctx, cpu_gpr_d[r1], cpu_gpr_a[15], const16 * 4, MO_LESL); + break; +/* SR-format */ + case OPCM_16_SR_SYSTEM: + decode_sr_system(env, ctx); + break; + case OPCM_16_SR_ACCU: + decode_sr_accu(env, ctx); + break; + case OPC1_16_SR_JI: + r1 = MASK_OP_SR_S1D(ctx->opcode); + gen_compute_branch(ctx, op1, r1, 0, 0, 0); + break; + case OPC1_16_SR_NOT: + r1 = MASK_OP_SR_S1D(ctx->opcode); + tcg_gen_not_tl(cpu_gpr_d[r1], cpu_gpr_d[r1]); + break; + } +} + +static void decode_32Bit_opc(CPUTriCoreState *env, DisasContext *ctx) +{ +} + +static void decode_opc(CPUTriCoreState *env, DisasContext *ctx, int *is_branch) +{ + /* 16-Bit Instruction */ + if ((ctx->opcode & 0x1) == 0) { + ctx->next_pc = ctx->pc + 2; + decode_16Bit_opc(env, ctx); + /* 32-Bit Instruction */ + } else { + ctx->next_pc = ctx->pc + 4; + decode_32Bit_opc(env, ctx); + } +} + +static inline void +gen_intermediate_code_internal(TriCoreCPU *cpu, struct TranslationBlock *tb, + int search_pc) +{ + CPUState *cs = CPU(cpu); + CPUTriCoreState *env = &cpu->env; + DisasContext ctx; + target_ulong pc_start; + int num_insns; + uint16_t *gen_opc_end; + + if (search_pc) { + qemu_log("search pc %d\n", search_pc); + } + + num_insns = 0; + pc_start = tb->pc; + gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE; + ctx.pc = pc_start; + ctx.saved_pc = -1; + ctx.tb = tb; + ctx.singlestep_enabled = cs->singlestep_enabled; + ctx.bstate = BS_NONE; + ctx.mem_idx = cpu_mmu_index(env); + + tcg_clear_temp_count(); + gen_tb_start(); + while (ctx.bstate == BS_NONE) { + ctx.opcode = cpu_ldl_code(env, ctx.pc); + decode_opc(env, &ctx, 0); + + num_insns++; + + if (tcg_ctx.gen_opc_ptr >= gen_opc_end) { + gen_save_pc(ctx.next_pc); + tcg_gen_exit_tb(0); + break; + } + if (singlestep) { + gen_save_pc(ctx.next_pc); + tcg_gen_exit_tb(0); + break; + } + ctx.pc = ctx.next_pc; + } + + gen_tb_end(tb, num_insns); + *tcg_ctx.gen_opc_ptr = INDEX_op_end; + if (search_pc) { + printf("done_generating search pc\n"); + } else { + tb->size = ctx.pc - pc_start; + tb->icount = num_insns; + } + if (tcg_check_temp_count()) { + printf("LEAK at %08x\n", env->PC); + } + +#ifdef DEBUG_DISAS + if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) { + qemu_log("IN: %s\n", lookup_symbol(pc_start)); + log_target_disas(env, pc_start, ctx.pc - pc_start, 0); + qemu_log("\n"); + } +#endif +} + +void +gen_intermediate_code(CPUTriCoreState *env, struct TranslationBlock *tb) +{ + gen_intermediate_code_internal(tricore_env_get_cpu(env), tb, false); +} + +void +gen_intermediate_code_pc(CPUTriCoreState *env, struct TranslationBlock *tb) +{ + gen_intermediate_code_internal(tricore_env_get_cpu(env), tb, true); +} + +void +restore_state_to_opc(CPUTriCoreState *env, TranslationBlock *tb, int pc_pos) +{ + env->PC = tcg_ctx.gen_opc_pc[pc_pos]; +} +/* + * + * Initialization + * + */ + +void cpu_state_reset(CPUTriCoreState *env) +{ + /* Reset Regs to Default Value */ + env->PSW = 0xb80; +} + +static void tricore_tcg_init_csfr(void) +{ + cpu_PCXI = tcg_global_mem_new(TCG_AREG0, + offsetof(CPUTriCoreState, PCXI), "PCXI"); + cpu_PSW = tcg_global_mem_new(TCG_AREG0, + offsetof(CPUTriCoreState, PSW), "PSW"); + cpu_PC = tcg_global_mem_new(TCG_AREG0, + offsetof(CPUTriCoreState, PC), "PC"); + cpu_ICR = tcg_global_mem_new(TCG_AREG0, + offsetof(CPUTriCoreState, ICR), "ICR"); +} + +void tricore_tcg_init(void) +{ + int i; + static int inited; + if (inited) { + return; + } + cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env"); + /* reg init */ + for (i = 0 ; i < 16 ; i++) { + cpu_gpr_a[i] = tcg_global_mem_new(TCG_AREG0, + offsetof(CPUTriCoreState, gpr_a[i]), + regnames_a[i]); + } + for (i = 0 ; i < 16 ; i++) { + cpu_gpr_d[i] = tcg_global_mem_new(TCG_AREG0, + offsetof(CPUTriCoreState, gpr_d[i]), + regnames_d[i]); + } + tricore_tcg_init_csfr(); + /* init PSW flag cache */ + cpu_PSW_C = tcg_global_mem_new(TCG_AREG0, + offsetof(CPUTriCoreState, PSW_USB_C), + "PSW_C"); + cpu_PSW_V = tcg_global_mem_new(TCG_AREG0, + offsetof(CPUTriCoreState, PSW_USB_V), + "PSW_V"); + cpu_PSW_SV = tcg_global_mem_new(TCG_AREG0, + offsetof(CPUTriCoreState, PSW_USB_SV), + "PSW_SV"); + cpu_PSW_AV = tcg_global_mem_new(TCG_AREG0, + offsetof(CPUTriCoreState, PSW_USB_AV), + "PSW_AV"); + cpu_PSW_SAV = tcg_global_mem_new(TCG_AREG0, + offsetof(CPUTriCoreState, PSW_USB_SAV), + "PSW_SAV"); +} diff --git a/target-tricore/tricore-defs.h b/target-tricore/tricore-defs.h new file mode 100644 index 0000000000..4350b03044 --- /dev/null +++ b/target-tricore/tricore-defs.h @@ -0,0 +1,28 @@ +/* + * Copyright (c) 2012-2014 Bastian Koppelmann C-Lab/University Paderborn + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#if !defined(__QEMU_TRICORE_DEFS_H__) +#define __QEMU_TRICORE_DEFS_H__ + +#define TARGET_PAGE_BITS 14 +#define TARGET_LONG_BITS 32 +#define TARGET_PHYS_ADDR_SPACE_BITS 32 +#define TARGET_VIRT_ADDR_SPACE_BITS 32 + +#define TRICORE_TLB_MAX 128 + +#endif /* __QEMU_TRICORE_DEFS_H__ */ diff --git a/target-tricore/tricore-opcodes.h b/target-tricore/tricore-opcodes.h new file mode 100644 index 0000000000..9c6ec013ac --- /dev/null +++ b/target-tricore/tricore-opcodes.h @@ -0,0 +1,1406 @@ +/* + * Copyright (c) 2012-2014 Bastian Koppelmann C-Lab/University Paderborn + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +/* + * Opcode Masks for Tricore + * Format MASK_OP_InstrFormatName_Field + */ + +/* This creates a mask with bits start .. end set to 1 and applies it to op */ +#define MASK_BITS_SHIFT(op, start, end) (extract32(op, (start), \ + (end) - (start) + 1)) +#define MASK_BITS_SHIFT_SEXT(op, start, end) (sextract32(op, (start),\ + (end) - (start) + 1)) + +/* new opcode masks */ + +#define MASK_OP_MAJOR(op) MASK_BITS_SHIFT(op, 0, 7) + +/* 16-Bit Formats */ +#define MASK_OP_SB_DISP8(op) MASK_BITS_SHIFT(op, 8, 15) +#define MASK_OP_SB_DISP8_SEXT(op) MASK_BITS_SHIFT_SEXT(op, 8, 15) + +#define MASK_OP_SBC_CONST4(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_SBC_CONST4_SEXT(op) MASK_BITS_SHIFT_SEXT(op, 12, 15) +#define MASK_OP_SBC_DISP4(op) MASK_BITS_SHIFT(op, 8, 11) + +#define MASK_OP_SBR_S2(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_SBR_DISP4(op) MASK_BITS_SHIFT(op, 8, 11) + +#define MASK_OP_SBRN_N(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_SBRN_DISP4(op) MASK_BITS_SHIFT(op, 8, 11) + +#define MASK_OP_SC_CONST8(op) MASK_BITS_SHIFT(op, 8, 15) + +#define MASK_OP_SLR_S2(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_SLR_D(op) MASK_BITS_SHIFT(op, 8, 11) + +#define MASK_OP_SLRO_OFF4(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_SLRO_D(op) MASK_BITS_SHIFT(op, 8, 11) + +#define MASK_OP_SR_OP2(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_SR_S1D(op) MASK_BITS_SHIFT(op, 8, 11) + +#define MASK_OP_SRC_CONST4(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_SRC_CONST4_SEXT(op) MASK_BITS_SHIFT_SEXT(op, 12, 15) +#define MASK_OP_SRC_S1D(op) MASK_BITS_SHIFT(op, 8, 11) + +#define MASK_OP_SRO_S2(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_SRO_OFF4(op) MASK_BITS_SHIFT(op, 8, 11) + +#define MASK_OP_SRR_S2(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_SRR_S1D(op) MASK_BITS_SHIFT(op, 8, 11) + +#define MASK_OP_SRRS_S2(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_SRRS_S1D(op) MASK_BITS_SHIFT(op, 8, 11) +#define MASK_OP_SRRS_N(op) MASK_BITS_SHIFT(op, 6, 7) + +#define MASK_OP_SSR_S2(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_SSR_S1(op) MASK_BITS_SHIFT(op, 8, 11) + +#define MASK_OP_SSRO_OFF4(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_SSRO_S1(op) MASK_BITS_SHIFT(op, 8, 11) + +/* 32-Bit Formats */ + +/* ABS Format */ +#define MASK_OP_ABS_OFF18(op) (MASK_BITS_SHIFT(op, 16, 21) + \ + (MASK_BITS_SHIFT(op, 28, 31) << 6) + \ + (MASK_BITS_SHIFT(op, 22, 25) << 10) +\ + (MASK_BITS_SHIFT(op, 12, 15) << 14)) +#define MASK_OP_ABS_OP2(op) MASK_BITS_SHIFT(op, 26, 27) +#define MASK_OP_ABS_S1D(op) MASK_BITS_SHIFT(op, 8, 11) + +/* ABSB Format */ +#define MASK_OP_ABSB_OFF18(op) MASK_OP_ABS_OFF18(op) +#define MASK_OP_ABSB_OP2(op) MASK_BITS_SHIFT(op, 26, 27) +#define MASK_OP_ABSB_B(op) MASK_BITS_SHIFT(op, 11, 11) +#define MASK_OP_ABSB_BPOS(op) MASK_BITS_SHIFT(op, 7, 10) + +/* B Format */ +#define MASK_OP_B_DISP24(op) (MASK_BITS_SHIFT(op, 16, 31) + \ + (MASK_BITS_SHIFT(op, 8, 15) << 16)) +/* BIT Format */ +#define MASK_OP_BIT_D(op) MASK_BITS_SHIFT(op, 28, 31) +#define MASK_OP_BIT_POS2(op) MASK_BITS_SHIFT(op, 23, 27) +#define MASK_OP_BIT_OP2(op) MASK_BITS_SHIFT(op, 21, 22) +#define MASK_OP_BIT_POS1(op) MASK_BITS_SHIFT(op, 16, 20) +#define MASK_OP_BIT_S2(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_BIT_S1(op) MASK_BITS_SHIFT(op, 8, 11) + +/* BO Format */ +#define MASK_OP_BO_OFF10(op) (MASK_BITS_SHIFT(op, 16, 21) + \ + (MASK_BITS_SHIFT(op, 28, 31) << 6)) +#define MASK_OP_BO_OP2(op) MASK_BITS_SHIFT(op, 22, 27) +#define MASK_OP_BO_S2(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_BO_S1D(op) MASK_BITS_SHIFT(op, 8, 11) + +/* BOL Format */ +#define MASK_OP_BOL_OFF16(op) ((MASK_BITS_SHIFT(op, 16, 21) + \ + (MASK_BITS_SHIFT(op, 28, 31) << 6)) + \ + (MASK_BITS_SHIFT(op, 22, 27) >> 10)) + +#define MASK_OP_BOL_S2(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_BOL_S1D(op) MASK_BITS_SHIFT(op, 8, 11) + +/* BRC Format */ +#define MASK_OP_BRC_OP2(op) MASK_BITS_SHIFT(op, 31, 31) +#define MASK_OP_BRC_DISP15(op) MASK_BITS_SHIFT(op, 16, 30) +#define MASK_OP_BRC_CONST4(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_BRC_S1(op) MASK_BITS_SHIFT(op, 8, 11) + +/* BRN Format */ +#define MASK_OP_BRN_OP2(op) MASK_BITS_SHIFT(op, 31, 31) +#define MASK_OP_BRN_DISP15(op) MASK_BITS_SHIFT(op, 16, 30) +#define MASK_OP_BRN_N(op) (MASK_BITS_SHIFT(op, 12, 15) + \ + (MASK_BITS_SHIFT(op, 7, 7) << 4)) +#define MASK_OP_BRN_S1(op) MASK_BITS_SHIFT(op, 8, 11) +/* BRR Format */ +#define MASK_OP_BRR_OP2(op) MASK_BITS_SHIFT(op, 31, 31) +#define MASK_OP_BRR_DISP15(op) MASK_BITS_SHIFT(op, 16, 30) +#define MASK_OP_BRR_S2(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_BRR_S1(op) MASK_BITS_SHIFT(op, 8, 11) + +/* META MASK for similar instr Formats */ +#define MASK_OP_META_D(op) MASK_BITS_SHIFT(op, 28, 31) +#define MASK_OP_META_S1(op) MASK_BITS_SHIFT(op, 8, 11) + +/* RC Format */ +#define MASK_OP_RC_D(op) MASK_OP_META_D(op) +#define MASK_OP_RC_OP2(op) MASK_BITS_SHIFT(op, 21, 27) +#define MASK_OP_RC_CONST9(op) MASK_BITS_SHIFT(op, 12, 20) +#define MASK_OP_RC_S1(op) MASK_OP_META_S1(op) + +/* RCPW Format */ + +#define MASK_OP_RCPW_D(op) MASK_OP_META_D(op) +#define MASK_OP_RCPW_POS(op) MASK_BITS_SHIFT(op, 23, 27) +#define MASK_OP_RCPW_OP2(op) MASK_BITS_SHIFT(op, 21, 22) +#define MASK_OP_RCPW_WIDTH(op) MASK_BITS_SHIFT(op, 16, 20) +#define MASK_OP_RCPW_CONST4(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_RCPW_S1(op) MASK_OP_META_S1(op) + +/* RCR Format */ + +#define MASK_OP_RCR_D(op) MASK_OP_META_D(op) +#define MASK_OP_RCR_S3(op) MASK_BITS_SHIFT(op, 24, 27) +#define MASK_OP_RCR_OP2(op) MASK_BITS_SHIFT(op, 21, 23) +#define MASK_OP_RCR_CONST9(op) MASK_BITS_SHIFT(op, 12, 20) +#define MASK_OP_RCR_S1(op) MASK_OP_META_S1(op) + +/* RCRR Format */ + +#define MASK_OP_RCRR_D(op) MASK_OP_META_D(op) +#define MASK_OP_RCRR_S3(op) MASK_BITS_SHIFT(op, 24, 27) +#define MASK_OP_RCRR_OP2(op) MASK_BITS_SHIFT(op, 21, 23) +#define MASK_OP_RCRR_CONST4(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_RCRR_S1(op) MASK_OP_META_S1(op) + +/* RCRW Format */ + +#define MASK_OP_RCRW_D(op) MASK_OP_META_D(op) +#define MASK_OP_RCRW_S3(op) MASK_BITS_SHIFT(op, 24, 27) +#define MASK_OP_RCRW_OP2(op) MASK_BITS_SHIFT(op, 21, 23) +#define MASK_OP_RCRW_WIDTH(op) MASK_BITS_SHIFT(op, 16, 20) +#define MASK_OP_RCRW_CONST4(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_RCRW_S1(op) MASK_OP_META_S1(op) + +/* RLC Format */ + +#define MASK_OP_RLC_D(op) MASK_OP_META_D(op) +#define MASK_OP_RLC_CONST16(op) MASK_BITS_SHIFT(op, 12, 27) +#define MASK_OP_RLC_S1(op) MASK_OP_META_S1(op) + +/* RR Format */ +#define MASK_OP_RR_D(op) MASK_OP_META_D(op) +#define MASK_OP_RR_OP2(op) MASK_BITS_SHIFT(op, 20, 27) +#define MASK_OP_RR_N(op) MASK_BITS_SHIFT(op, 16, 17) +#define MASK_OP_RR_S2(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_RR_S1(op) MASK_OP_META_S1(op) + +/* RR1 Format */ +#define MASK_OP_RR1_D(op) MASK_OP_META_D(op) +#define MASK_OP_RR1_OP2(op) MASK_BITS_SHIFT(op, 18, 27) +#define MASK_OP_RR1_N(op) MASK_BITS_SHIFT(op, 16, 17) +#define MASK_OP_RR1_S2(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_RR1_S1(op) MASK_OP_META_S1(op) + +/* RR2 Format */ +#define MASK_OP_RR2_D(op) MASK_OP_META_D(op) +#define MASK_OP_RR2_OP2(op) MASK_BITS_SHIFT(op, 16, 27) +#define MASK_OP_RR2_S2(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_RR2_S1(op) MASK_OP_META_S1(op) + +/* RRPW Format */ +#define MASK_OP_RRPW_D(op) MASK_OP_META_D(op) +#define MASK_OP_RRPW_POS(op) MASK_BITS_SHIFT(op, 23, 27) +#define MASK_OP_RRPW_OP2(op) MASK_BITS_SHIFT(op, 21, 22) +#define MASK_OP_RRPW_WIDTH(op) MASK_BITS_SHIFT(op, 16, 20) +#define MASK_OP_RRPW_S2(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_RRPW_S1(op) MASK_OP_META_S1(op) + +/* RRR Format */ +#define MASK_OP_RRR_D(op) MASK_OP_META_D(op) +#define MASK_OP_RRR_S3(op) MASK_BITS_SHIFT(op, 24, 27) +#define MASK_OP_RRR_OP2(op) MASK_BITS_SHIFT(op, 20, 23) +#define MASK_OP_RRR_N(op) MASK_BITS_SHIFT(op, 16, 17) +#define MASK_OP_RRR_S2(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_RRR_S1(op) MASK_OP_META_S1(op) + +/* RRR1 Format */ +#define MASK_OP_RRR1_D(op) MASK_OP_META_D(op) +#define MASK_OP_RRR1_S3(op) MASK_BITS_SHIFT(op, 24, 27) +#define MASK_OP_RRR1_OP2(op) MASK_BITS_SHIFT(op, 18, 23) +#define MASK_OP_RRR1_N(op) MASK_BITS_SHIFT(op, 16, 17) +#define MASK_OP_RRR1_S2(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_RRR1_S1(op) MASK_OP_META_S1(op) + +/* RRR2 Format */ +#define MASK_OP_RRR2_D(op) MASK_OP_META_D(op) +#define MASK_OP_RRR2_S3(op) MASK_BITS_SHIFT(op, 24, 27) +#define MASK_OP_RRR2_OP2(op) MASK_BITS_SHIFT(op, 16, 23) +#define MASK_OP_RRR2_S2(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_RRR2_S1(op) MASK_OP_META_S1(op) + +/* RRRR Format */ +#define MASK_OP_RRRR_D(op) MASK_OP_META_D(op) +#define MASK_OP_RRRR_S3(op) MASK_BITS_SHIFT(op, 24, 27) +#define MASK_OP_RRRR_OP2(op) MASK_BITS_SHIFT(op, 21, 23) +#define MASK_OP_RRRR_S2(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_RRRR_S1(op) MASK_OP_META_S1(op) + +/* RRRW Format */ +#define MASK_OP_RRRW_D(op) MASK_OP_META_D(op) +#define MASK_OP_RRRW_S3(op) MASK_BITS_SHIFT(op, 24, 27) +#define MASK_OP_RRRW_OP2(op) MASK_BITS_SHIFT(op, 21, 23) +#define MASK_OP_RRRW_WIDTH(op) MASK_BITS_SHIFT(op, 16, 20) +#define MASK_OP_RRRW_S2(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_RRRW_S1(op) MASK_OP_META_S1(op) + +/* SYS Format */ +#define MASK_OP_SYS_OP2(op) MASK_BITS_SHIFT(op, 22, 27) +#define MASK_OP_SYS_S1D(op) MASK_OP_META_S1(op) + + + +/* + * Tricore Opcodes Enums + * + * Format: OPC(1|2|M)_InstrLen_Name + * OPC1 = only op1 field is used + * OPC2 = op1 and op2 field used part of OPCM + * OPCM = op1 field used to group Instr + * InstrLen = 16|32 + * Name = Name of Instr + */ + +/* 16-Bit */ +enum { + + OPCM_16_SR_SYSTEM = 0x00, + OPCM_16_SR_ACCU = 0x32, + + OPC1_16_SRC_ADD = 0xc2, + OPC1_16_SRC_ADD_A15 = 0x92, + OPC1_16_SRC_ADD_15A = 0x9a, + OPC1_16_SRR_ADD = 0x42, + OPC1_16_SRR_ADD_A15 = 0x12, + OPC1_16_SRR_ADD_15A = 0x1a, + OPC1_16_SRC_ADD_A = 0xb0, + OPC1_16_SRR_ADD_A = 0x30, + OPC1_16_SRR_ADDS = 0x22, + OPC1_16_SRRS_ADDSC_A = 0x10, + OPC1_16_SC_AND = 0x16, + OPC1_16_SRR_AND = 0x26, + OPC1_16_SC_BISR = 0xe0, + OPC1_16_SRC_CADD = 0x8a, + OPC1_16_SRC_CADDN = 0xca, + OPC1_16_SB_CALL = 0x5c, + OPC1_16_SRC_CMOV = 0xaa, + OPC1_16_SRR_CMOV = 0x2a, + OPC1_16_SRC_CMOVN = 0xea, + OPC1_16_SRR_CMOVN = 0x6a, + OPC1_16_SRC_EQ = 0xba, + OPC1_16_SRR_EQ = 0x3a, + OPC1_16_SB_J = 0x3c, + OPC1_16_SBC_JEQ = 0x1e, + OPC1_16_SBR_JEQ = 0x3e, + OPC1_16_SBR_JGEZ = 0xce, + OPC1_16_SBR_JGTZ = 0x4e, + OPC1_16_SR_JI = 0xdc, + OPC1_16_SBR_JLEZ = 0x8e, + OPC1_16_SBR_JLTZ = 0x0e, + OPC1_16_SBC_JNE = 0x5e, + OPC1_16_SBR_JNE = 0x7e, + OPC1_16_SB_JNZ = 0xee, + OPC1_16_SBR_JNZ = 0xf6, + OPC1_16_SBR_JNZ_A = 0x7c, + OPC1_16_SBRN_JNZ_T = 0xae, + OPC1_16_SB_JZ = 0x6e, + OPC1_16_SBR_JZ = 0x76, + OPC1_16_SBR_JZ_A = 0xbc, + OPC1_16_SBRN_JZ_T = 0x2e, + OPC1_16_SC_LD_A = 0xd8, + OPC1_16_SLR_LD_A = 0xd4, + OPC1_16_SLR_LD_A_POSTINC = 0xc4, + OPC1_16_SLRO_LD_A = 0xc8, + OPC1_16_SRO_LD_A = 0xcc, + OPC1_16_SLR_LD_BU = 0x14, + OPC1_16_SLR_LD_BU_POSTINC = 0x04, + OPC1_16_SLRO_LD_BU = 0x08, + OPC1_16_SRO_LD_BU = 0x0c, + OPC1_16_SLR_LD_H = 0x94, + OPC1_16_SLR_LD_H_POSTINC = 0x84, + OPC1_16_SLRO_LD_H = 0x88, + OPC1_16_SRO_LD_H = 0x8c, + OPC1_16_SC_LD_W = 0x58, + OPC1_16_SLR_LD_W = 0x54, + OPC1_16_SLR_LD_W_POSTINC = 0x44, + OPC1_16_SLRO_LD_W = 0x48, + OPC1_16_SRO_LD_W = 0x4c, + OPC1_16_SBR_LOOP = 0xfc, + OPC1_16_SRC_LT = 0xfa, + OPC1_16_SRR_LT = 0x7a, + OPC1_16_SC_MOV = 0xda, + OPC1_16_SRC_MOV = 0x82, + OPC1_16_SRR_MOV = 0x02, + OPC1_16_SRC_MOV_E = 0xd2,/* 1.6 only */ + OPC1_16_SRC_MOV_A = 0xa0, + OPC1_16_SRR_MOV_A = 0x60, + OPC1_16_SRR_MOV_AA = 0x40, + OPC1_16_SRR_MOV_D = 0x80, + OPC1_16_SRR_MUL = 0xe2, + OPC1_16_SR_NOT = 0x46, + OPC1_16_SC_OR = 0x96, + OPC1_16_SRR_OR = 0xa6, + OPC1_16_SRC_SH = 0x06, + OPC1_16_SRC_SHA = 0x86, + OPC1_16_SC_ST_A = 0xf8, + OPC1_16_SRO_ST_A = 0xec, + OPC1_16_SSR_ST_A = 0xf4, + OPC1_16_SSR_ST_A_POSTINC = 0xe4, + OPC1_16_SSRO_ST_A = 0xe8, + OPC1_16_SRO_ST_B = 0x2c, + OPC1_16_SSR_ST_B = 0x34, + OPC1_16_SSR_ST_B_POSTINC = 0x24, + OPC1_16_SSRO_ST_B = 0x28, + OPC1_16_SRO_ST_H = 0xac, + OPC1_16_SSR_ST_H = 0xb4, + OPC1_16_SSR_ST_H_POSTINC = 0xa4, + OPC1_16_SSRO_ST_H = 0xa8, + OPC1_16_SC_ST_W = 0x78, + OPC1_16_SRO_ST_W = 0x6c, + OPC1_16_SSR_ST_W = 0x74, + OPC1_16_SSR_ST_W_POSTINC = 0x64, + OPC1_16_SSRO_ST_W = 0x68, + OPC1_16_SRR_SUB = 0xa2, + OPC1_16_SRR_SUB_A15B = 0x52, + OPC1_16_SRR_SUB_15AB = 0x5a, + OPC1_16_SC_SUB_A = 0x20, + OPC1_16_SRR_SUBS = 0x62, + OPC1_16_SRR_XOR = 0xc6, + +}; + +/* + * SR Format + */ +/* OPCM_16_SR_SYSTEM */ +enum { + + OPC2_16_SR_NOP = 0x00, + OPC2_16_SR_RET = 0x09, + OPC2_16_SR_RFE = 0x08, + OPC2_16_SR_DEBUG = 0x0a, +}; +/* OPCM_16_SR_ACCU */ +enum { + OPC2_16_SR_RSUB = 0x05, + OPC2_16_SR_SAT_B = 0x00, + OPC2_16_SR_SAT_BU = 0x01, + OPC2_16_SR_SAT_H = 0x02, + OPC2_16_SR_SAT_HU = 0x03, + +}; + +/* 32-Bit */ + +enum { +/* ABS Format 1, M */ + OPCM_32_ABS_LDW = 0x85, + OPCM_32_ABS_LDB = 0x05, + OPCM_32_ABS_LDMST_SWAP = 0xe5, + OPCM_32_ABS_LDST_CONTEXT = 0x15, + OPCM_32_ABS_STORE = 0xa5, + OPCM_32_ABS_STOREB_H = 0x25, + OPC1_32_ABS_STOREQ = 0x65, + OPC1_32_ABS_LD_Q = 0x45, + OPC1_32_ABS_LEA = 0xc5, +/* ABSB Format */ + OPC1_32_ABSB_ST_T = 0xd5, +/* B Format */ + OPC1_32_B_CALL = 0x6d, + OPC1_32_B_CALLA = 0xed, + OPC1_32_B_J = 0x1d, + OPC1_32_B_JA = 0x9d, + OPC1_32_B_JL = 0x5d, + OPC1_32_B_JLA = 0xdd, +/* Bit Format */ + OPCM_32_BIT_ANDACC = 0x47, + OPCM_32_BIT_LOGICAL_T1 = 0x87, + OPCM_32_BIT_INSERT = 0x67, + OPCM_32_BIT_LOGICAL_T2 = 0x07, + OPCM_32_BIT_ORAND = 0xc7, + OPCM_32_BIT_SH_LOGIC1 = 0x27, + OPCM_32_BIT_SH_LOGIC2 = 0xa7, +/* BO Format */ + OPCM_32_BO_ADDRMODE_POST_PRE_BASE = 0x89, + OPCM_32_BO_ADDRMODE_BITREVERSE_CIRCULAR = 0xa9, + OPCM_32_BO_ADDRMODE_LD_POST_PRE_BASE = 0x09, + OPCM_32_BO_ADDRMODE_LD_BITREVERSE_CIRCULAR = 0x29, + OPCM_32_BO_ADDRMODE_STCTX_POST_PRE_BASE = 0x49, + OPCM_32_BO_ADDRMODE_LDMST_BITREVERSE_CIRCULAR = 0x69, +/* BOL Format */ + OPC1_32_BOL_LD_A_LONGOFF = 0x99, + OPC1_32_BOL_LD_W_LONFOFF = 0x19, + OPC1_32_BOL_LEA_LONGOFF = 0xd9, + OPC1_32_BOL_ST_W_LONGOFF = 0x59, + OPC1_32_BOL_ST_A_LONGOFF = 0xb5, /* 1.6 only */ +/* BRC Format */ + OPCM_32_BRC_EQ_NEQ = 0xdf, + OPCM_32_BRC_GE = 0xff, + OPCM_32_BRC_JLT = 0xbf, + OPCM_32_BRC_JNE = 0x9f, +/* BRN Format */ + OPCM_32_BRN_JTT = 0x6f, +/* BRR Format */ + OPCM_32_BRR_EQ_NEQ = 0x5f, + OPCM_32_BRR_ADDR_EQ_NEQ = 0x7d, + OPCM_32_BRR_GE = 0x7f, + OPCM_32_BRR_JLT = 0x3f, + OPCM_32_BRR_JNE = 0x1f, + OPCM_32_BRR_JNZ = 0xbd, + OPCM_32_BRR_LOOP = 0xfd, +/* RC Format */ + OPCM_32_RC_LOGICAL_SHIFT = 0x8f, + OPCM_32_RC_ACCUMULATOR = 0x8b, + OPCM_32_RC_SERVICEROUTINE = 0xad, + OPCM_32_RC_MUL = 0x53, +/* RCPW Format */ + OPCM_32_RCPW_MASK_INSERT = 0xb7, +/* RCR Format */ + OPCM_32_RCR_COND_SELECT = 0xab, + OPCM_32_RCR_MADD = 0x13, + OPCM_32_RCR_MSUB = 0x33, +/* RCRR Format */ + OPC1_32_RCRR_INSERT = 0x97, +/* RCRW Format */ + OPCM_32_RCRW_MASK_INSERT = 0xd7, +/* RLC Format */ + OPC1_32_RLC_ADDI = 0x1b, + OPC1_32_RLC_ADDIH = 0x9b, + OPC1_32_RLC_ADDIH_A = 0x11, + OPC1_32_RLC_MFCR = 0x4d, + OPC1_32_RLC_MOV = 0x3b, + OPC1_32_RLC_MOV_U = 0xbb, + OPC1_32_RLC_MOV_H = 0x7b, + OPC1_32_RLC_MOVH_A = 0x91, + OPC1_32_RLC_MTCR = 0xcd, +/* RR Format */ + OPCM_32_RR_LOGICAL_SHIFT = 0x0f, + OPCM_32_RR_ACCUMULATOR = 0x0b, + OPCM_32_RR_ADRESS = 0x01, + OPCM_32_RR_FLOAT = 0x4b, + OPCM_32_RR_IDIRECT = 0x2d, +/* RR1 Format */ + OPCM_32_RR1_MUL = 0xb3, + OPCM_32_RR1_MULQ = 0x93, +/* RR2 Format */ + OPCM_32_RR2_MUL = 0x73, +/* RRPW Format */ + OPCM_32_RRPW_EXTRACT_INSERT = 0x37, + OPC1_32_RRPW_DEXTR = 0x77, +/* RRR Format */ + OPCM_32_RRR_COND_SELECT = 0x2b, + OPCM_32_RRR_FLOAT = 0x6b, +/* RRR1 Format */ + OPCM_32_RRR1_MADD = 0x83, + OPCM_32_RRR1_MADDQ_H = 0x43, + OPCM_32_RRR1_MADDSU_H = 0xc3, + OPCM_32_RRR1_MSUB_H = 0xa3, + OPCM_32_RRR1_MSUB_Q = 0x63, + OPCM_32_RRR1_MSUBADS_H = 0xe3, +/* RRR2 Format */ + OPCM_32_RRR2_MADD = 0x03, + OPCM_32_RRR2_MSUB = 0x23, +/* RRRR Format */ + OPCM_32_RRRR_EXTRACT_INSERT = 0x17, +/* RRRW Format */ + OPCM_32_RRRW_EXTRACT_INSERT = 0x57, +/* SYS Format */ + OPCM_32_SYS_INTERRUPTS = 0x0d, + OPC1_32_SYS_RSTV = 0x2f, +}; + + + +/* + * ABS Format + */ + +/* OPCM_32_ABS_LDW */ +enum { + + OPC2_32_ABS_LD_A = 0x02, + OPC2_32_ABS_LD_D = 0x01, + OPC2_32_ABS_LD_DA = 0x03, + OPC2_32_ABS_LD_W = 0x00, +}; + +/* OPCM_32_ABS_LDB */ +enum { + OPC2_32_ABS_LD_B = 0x00, + OPC2_32_ABS_LD_BU = 0x01, + OPC2_32_ABS_LD_H = 0x02, + OPC2_32_ABS_LD_HU = 0x03, +}; +/* OPCM_32_ABS_LDMST_SWAP */ +enum { + OPC2_32_ABS_LDMST = 0x01, + OPC2_32_ABS_SWAP_W = 0x00, +}; +/* OPCM_32_ABS_LDST_CONTEXT */ +enum { + OPC2_32_ABS_LDLCX = 0x02, + OPC2_32_ABS_LDUCX = 0x03, + OPC2_32_ABS_STLCX = 0x00, + OPC2_32_ABS_STUCX = 0x01, +}; +/* OPCM_32_ABS_STORE */ +enum { + OPC2_32_ABS_ST_A = 0x02, + OPC2_32_ABS_ST_D = 0x01, + OPC2_32_ABS_ST_DA = 0x03, + OPC2_32_ABS_ST_W = 0x00, +}; +/* OPCM_32_ABS_STOREB_H */ +enum { + OPC2_32_ABS_ST_B = 0x00, + OPC2_32_ABS_ST_H = 0x02, +}; +/* + * Bit Format + */ +/* OPCM_32_BIT_ANDACC */ +enum { + OPC2_32_BIT_AND_AND_T = 0x00, + OPC2_32_BIT_AND_ANDN_T = 0x03, + OPC2_32_BIT_AND_NOR_T = 0x02, + OPC2_32_BIT_AND_OR_T = 0x01, +}; +/* OPCM_32_BIT_LOGICAL_T */ +enum { + OPC2_32_BIT_AND_T = 0x00, + OPC2_32_BIT_ANDN_T = 0x03, + OPC2_32_BIT_NOR_T = 0x02, + OPC2_32_BIT_OR_T = 0x01, +}; +/* OPCM_32_BIT_INSERT */ +enum { + OPC2_32_BIT_INS_T = 0x00, + OPC2_32_BIT_INSN_T = 0x01, +}; +/* OPCM_32_BIT_LOGICAL_T2 */ +enum { + OPC2_32_BIT_NAND_T = 0x00, + OPC2_32_BIT_ORN_T = 0x01, + OPC2_32_BIT_XNOR_T = 0x02, + OPC2_32_BIT_XOR_T = 0x03, +}; +/* OPCM_32_BIT_ORAND */ +enum { + OPC2_32_BIT_OR_AND_T = 0x00, + OPC2_32_BIT_OR_ANDN_T = 0x03, + OPC2_32_BIT_OR_NOR_T = 0x02, + OPC2_32_BIT_OR_OR_T = 0x01, +}; +/*OPCM_32_BIT_SH_LOGIC1 */ +enum { + OPC2_32_BIT_SH_AND_T = 0x00, + OPC2_32_BIT_SH_ANDN_T = 0x03, + OPC2_32_BIT_SH_NOR_T = 0x02, + OPC2_32_BIT_SH_OR_T = 0x01, +}; +/* OPCM_32_BIT_SH_LOGIC2 */ +enum { + OPC2_32_BIT_SH_NAND_T = 0x00, + OPC2_32_BIT_SH_ORN_T = 0x01, + OPC2_32_BIT_SH_XNOR_T = 0x02, + OPC2_32_BIT_SH_XOR_T = 0x03, +}; +/* + * BO Format + */ +/* OPCM_32_BO_ADDRMODE_POST_PRE_BASE */ +enum { + OPC2_32_BO_CACHEA_I_SHORTOFF = 0x2e, + OPC2_32_BO_CACHEA_I_POSTINC = 0x0e, + OPC2_32_BO_CACHEA_I_PREINC = 0x1e, + OPC2_32_BO_CACHEA_W_SHORTOFF = 0x2c, + OPC2_32_BO_CACHEA_W_POSTINC = 0x0c, + OPC2_32_BO_CACHEA_W_PREINC = 0x1c, + OPC2_32_BO_CACHEA_WI_SHORTOFF = 0x2d, + OPC2_32_BO_CACHEA_WI_POSTINC = 0x0d, + OPC2_32_BO_CACHEA_WI_PREINC = 0x1d, + /* 1.3.1 only */ + OPC2_32_BO_CACHEI_W_SHORTOFF = 0x2b, + OPC2_32_BO_CACHEI_W_POSTINC = 0x0b, + OPC2_32_BO_CACHEI_W_PREINC = 0x1b, + OPC2_32_BO_CACHEI_WI_SHORTOFF = 0x2f, + OPC2_32_BO_CACHEI_WI_POSTINC = 0x0f, + OPC2_32_BO_CACHEI_WI_PREINC = 0x1f, + /* end 1.3.1 only */ + OPC2_32_BO_ST_A_SHORTOFF = 0x26, + OPC2_32_BO_ST_A_POSTINC = 0x06, + OPC2_32_BO_ST_A_PREINC = 0x16, + OPC2_32_BO_ST_B_SHORTOFF = 0x20, + OPC2_32_BO_ST_B_POSTINC = 0x00, + OPC2_32_BO_ST_B_PREINC = 0x10, + OPC2_32_BO_ST_D_SHORTOFF = 0x25, + OPC2_32_BO_ST_D_POSTINC = 0x05, + OPC2_32_BO_ST_D_PREINC = 0x15, + OPC2_32_BO_ST_DA_SHORTOFF = 0x27, + OPC2_32_BO_ST_DA_POSTINC = 0x07, + OPC2_32_BO_ST_DA_PREINC = 0x17, + OPC2_32_BO_ST_H_SHORTOFF = 0x22, + OPC2_32_BO_ST_H_POSTINC = 0x02, + OPC2_32_BO_ST_H_PREINC = 0x12, + OPC2_32_BO_ST_Q_SHORTOFF = 0x28, + OPC2_32_BO_ST_Q_POSTINC = 0x08, + OPC2_32_BO_ST_Q_PREINC = 0x18, + OPC2_32_BO_ST_W_SHORTOFF = 0x24, + OPC2_32_BO_ST_W_POSTINC = 0x04, + OPC2_32_BO_ST_W_PREINC = 0x14, +}; +/* OPCM_32_BO_ADDRMODE_BITREVERSE_CIRCULAR */ +enum { + OPC2_32_BO_CACHEA_I_BR = 0x0e, + OPC2_32_BO_CACHEA_I_CIRC = 0x1e, + OPC2_32_BO_CACHEA_W_BR = 0x0c, + OPC2_32_BO_CACHEA_W_CIRC = 0x1c, + OPC2_32_BO_CACHEA_WI_BR = 0x0d, + OPC2_32_BO_CACHEA_WI_CIRC = 0x1d, + OPC2_32_BO_ST_A_BR = 0x06, + OPC2_32_BO_ST_A_CIRC = 0x16, + OPC2_32_BO_ST_B_BR = 0x00, + OPC2_32_BO_ST_B_CIRC = 0x10, + OPC2_32_BO_ST_D_BR = 0x05, + OPC2_32_BO_ST_D_CIRC = 0x15, + OPC2_32_BO_ST_DA_BR = 0x07, + OPC2_32_BO_ST_DA_CIRC = 0x17, + OPC2_32_BO_ST_H_BR = 0x02, + OPC2_32_BO_ST_H_CIRC = 0x12, + OPC2_32_BO_ST_Q_BR = 0x08, + OPC2_32_BO_ST_Q_CIRC = 0x18, + OPC2_32_BO_ST_W_BR = 0x04, + OPC2_32_BO_ST_W_CIRC = 0x14, +}; +/* OPCM_32_BO_ADDRMODE_LD_POST_PRE_BASE */ +enum { + OPC2_32_BO_LD_A_SHORTOFF = 0x26, + OPC2_32_BO_LD_A_POSTINC = 0x06, + OPC2_32_BO_LD_A_PREINC = 0x16, + OPC2_32_BO_LD_B_SHORTOFF = 0x20, + OPC2_32_BO_LD_B_POSTINC = 0x00, + OPC2_32_BO_LD_B_PREINC = 0x10, + OPC2_32_BO_LD_BU_SHORTOFF = 0x21, + OPC2_32_BO_LD_BU_POSTINC = 0x01, + OPC2_32_BO_LD_BU_PREINC = 0x11, + OPC2_32_BO_LD_D_SHORTOFF = 0x25, + OPC2_32_BO_LD_D_POSTINC = 0x05, + OPC2_32_BO_LD_D_PREINC = 0x15, + OPC2_32_BO_LD_DA_SHORTOFF = 0x27, + OPC2_32_BO_LD_DA_POSTINC = 0x07, + OPC2_32_BO_LD_DA_PREINC = 0x17, + OPC2_32_BO_LD_H_SHORTOFF = 0x22, + OPC2_32_BO_LD_H_POSTINC = 0x02, + OPC2_32_BO_LD_H_PREINC = 0x12, + OPC2_32_BO_LD_HU_SHORTOFF = 0x23, + OPC2_32_BO_LD_HU_POSTINC = 0x03, + OPC2_32_BO_LD_HU_PREINC = 0x13, + OPC2_32_BO_LD_Q_SHORTOFF = 0x28, + OPC2_32_BO_LD_Q_POSTINC = 0x08, + OPC2_32_BO_LD_Q_PREINC = 0x18, + OPC2_32_BO_LD_W_SHORTOFF = 0x24, + OPC2_32_BO_LD_W_POSTINC = 0x04, + OPC2_32_BO_LD_W_PREINC = 0x14, +}; +/* OPCM_32_BO_ADDRMODE_LD_BITREVERSE_CIRCULAR */ +enum { + OPC2_32_BO_LD_A_BR = 0x06, + OPC2_32_BO_LD_A_CIRC = 0x16, + OPC2_32_BO_LD_B_BR = 0x00, + OPC2_32_BO_LD_B_CIRC = 0x10, + OPC2_32_BO_LD_BU_BR = 0x01, + OPC2_32_BO_LD_BU_CIRC = 0x11, + OPC2_32_BO_LD_D_BR = 0x05, + OPC2_32_BO_LD_D_CIRC = 0x15, + OPC2_32_BO_LD_DA_BR = 0x07, + OPC2_32_BO_LD_DA_CIRC = 0x17, + OPC2_32_BO_LD_H_BR = 0x02, + OPC2_32_BO_LD_H_CIRC = 0x12, + OPC2_32_BO_LD_HU_BR = 0x03, + OPC2_32_BO_LD_HU_CIRC = 0x13, + OPC2_32_BO_LD_Q_BR = 0x08, + OPC2_32_BO_LD_Q_CIRC = 0x18, + OPC2_32_BO_LD_W_BR = 0x04, + OPC2_32_BO_LD_W_CIRC = 0x14, +}; +/* OPCM_32_BO_ADDRMODE_STCTX_POST_PRE_BASE */ +enum { + OPC2_32_BO_LDLCX_SHORTOFF = 0x24, + OPC2_32_BO_LDMST_SHORTOFF = 0x21, + OPC2_32_BO_LDMST_POSTINC = 0x01, + OPC2_32_BO_LDMST_PREINC = 0x11, + OPC2_32_BO_LDUCX_SHORTOFF = 0x25, + OPC2_32_BO_LEA_SHORTOFF = 0x28, + OPC2_32_BO_STLCX_SHORTOFF = 0x26, + OPC2_32_BO_STUCX_SHORTOFF = 0x27, + OPC2_32_BO_SWAP_W_SHORTOFF = 0x20, + OPC2_32_BO_SWAP_W_POSTINC = 0x00, + OPC2_32_BO_SWAP_W_PREINC = 0x10, +}; +/*OPCM_32_BO_ADDRMODE_LDMST_BITREVERSE_CIRCULAR */ +enum { + OPC2_32_BO_LDMST_BR = 0x01, + OPC2_32_BO_LDMST_CIRC = 0x11, + OPC2_32_BO_SWAP_W_BR = 0x00, + OPC2_32_BO_SWAP_W_CIRC = 0x10, +}; +/* + * BRC Format + */ +/*OPCM_32_BRC_EQ_NEQ */ +enum { + OPC2_32_BRC_JEQ = 0x00, + OPC2_32_BRC_JNE = 0x01, +}; +/* OPCM_32_BRC_GE */ +enum { + OP2_BRC_JGE = 0x00, + OPC_BRC_JGE_U = 0x01, +}; +/* OPCM_32_BRC_JLT */ +enum { + OPC2_32_BRC_JLT = 0x00, + OPC2_32_BRC_JLT_U = 0x01, +}; +/* OPCM_32_BRC_JNE */ +enum { + OPC2_32_BRC_JNED = 0x01, + OPC2_32_BRC_JNEI = 0x00, +}; +/* + * BRN Format + */ +/* OPCM_32_BRN_JTT */ +enum { + OPC2_32_BRN_JNZ_T = 0x01, + OPC2_32_BRN_JZ_T = 0x00, +}; +/* + * BRR Format + */ +/* OPCM_32_BRR_EQ_NEQ */ +enum { + OPC2_32_BRR_JEQ = 0x00, + OPC2_32_BRR_JNE = 0x01, +}; +/* OPCM_32_BRR_ADDR_EQ_NEQ */ +enum { + OPC2_32_BRR_JEQ_A = 0x00, + OPC2_32_BRR_JNE_A = 0x01, +}; +/*OPCM_32_BRR_GE */ +enum { + OPC2_32_BRR_JGE = 0x00, + OPC2_32_BRR_JGE_U = 0x01, +}; +/* OPCM_32_BRR_JLT */ +enum { + OPC2_32_BRR_JLT = 0x00, + OPC2_32_BRR_JLT_U = 0x01, +}; +/* OPCM_32_BRR_JNE */ +enum { + OPC2_32_BRR_JNED = 0x01, + OPC2_32_BRR_JNEI = 0x00, +}; +/* OPCM_32_BRR_JNZ */ +enum { + OPC2_32_BRR_JNZ_A = 0x01, + OPC2_32_BRR_JZ_A = 0x00, +}; +/* OPCM_32_BRR_LOOP */ +enum { + OPC2_32_BRR_LOOP = 0x00, + OPC2_32_BRR_LOOPU = 0x01, +}; +/* + * RC Format + */ +/* OPCM_32_RC_LOGICAL_SHIFT */ +enum { + OPC2_32_RC_AND = 0x08, + OPC2_32_RC_ANDN = 0x0e, + OPC2_32_RC_NAND = 0x09, + OPC2_32_RC_NOR = 0x0b, + OPC2_32_RC_OR = 0x0a, + OPC2_32_RC_ORN = 0x0f, + OPC2_32_RC_SH = 0x00, + OPC2_32_RC_SH_H = 0x40, + OPC2_32_RC_SHA = 0x01, + OPC2_32_RC_SHA_H = 0x41, + OPC2_32_RC_SHAS = 0x02, + OPC2_32_RC_XNOR = 0x0d, + OPC2_32_RC_XOR = 0x0c, +}; +/* OPCM_32_RC_ACCUMULATOR */ +enum { + OPC2_32_RC_ABSDIF = 0x0e, + OPC2_32_RC_ABSDIFS = 0x0f, + OPC2_32_RC_ADD = 0x00, + OPC2_32_RC_ADDC = 0x05, + OPC2_32_RC_ADDS = 0x02, + OPC2_32_RC_ADDS_U = 0x03, + OPC2_32_RC_ADDX = 0x04, + OPC2_32_RC_AND_EQ = 0x20, + OPC2_32_RC_AND_GE = 0x24, + OPC2_32_RC_AND_GE_U = 0x25, + OPC2_32_RC_AND_LT = 0x22, + OPC2_32_RC_AND_LT_U = 0x23, + OPC2_32_RC_AND_NE = 0x21, + OPC2_32_RC_EQ = 0x10, + OPC2_32_RC_EQANY_B = 0x56, + OPC2_32_RC_EQANY_H = 0x76, + OPC2_32_RC_GE = 0x14, + OPC2_32_RC_GE_U = 0x15, + OPC2_32_RC_LT = 0x12, + OPC2_32_RC_LT_U = 0x13, + OPC2_32_RC_MAX = 0x1a, + OPC2_32_RC_MAX_U = 0x1b, + OPC2_32_RC_MIN = 0x18, + OPC2_32_RC_MIN_U = 0x19, + OPC2_32_RC_NE = 0x11, + OPC2_32_RC_OR_EQ = 0x27, + OPC2_32_RC_OR_GE = 0x2b, + OPC2_32_RC_OR_GE_U = 0x2c, + OPC2_32_RC_OR_LT = 0x29, + OPC2_32_RC_OR_LT_U = 0x2a, + OPC2_32_RC_OR_NE = 0x28, + OPC2_32_RC_RSUB = 0x08, + OPC2_32_RC_RSUBS = 0x0a, + OPC2_32_RC_RSUBS_U = 0x0b, + OPC2_32_RC_SH_EQ = 0x37, + OPC2_32_RC_SH_GE = 0x3b, + OPC2_32_RC_SH_GE_U = 0x3c, + OPC2_32_RC_SH_LT = 0x39, + OPC2_32_RC_SH_LT_U = 0x3a, + OPC2_32_RC_SH_NE = 0x38, + OPC2_32_RC_XOR_EQ = 0x2f, + OPC2_32_RC_XOR_GE = 0x33, + OPC2_32_RC_XOR_GE_U = 0x34, + OPC2_32_RC_XOR_LT = 0x31, + OPC2_32_RC_XOR_LT_U = 0x32, + OPC2_32_RC_XOR_NE = 0x30, +}; +/* OPCM_32_RC_SERVICEROUTINE */ +enum { + OPC2_32_RC_BISR = 0x00, + OPC2_32_RC_SYSCALL = 0x04, +}; +/* OPCM_32_RC_MUL */ +enum { + OPC2_32_RC_MUL_32 = 0x01, + OPC2_32_RC_MUL_64 = 0x03, + OPC2_32_RC_MULS_32 = 0x05, + OPC2_32_RC_MUL_U_64 = 0x02, + OPC2_32_RC_MULS_U_32 = 0x04, +}; +/* + * RCPW Format + */ +/* OPCM_32_RCPW_MASK_INSERT */ +enum { + OPC2_32_RCPW_IMASK = 0x01, + OPC2_32_RCPW_INSERT = 0x00, +}; +/* + * RCR Format + */ +/* OPCM_32_RCR_COND_SELECT */ +enum { + OPC2_32_RCR_CADD = 0x00, + OPC2_32_RCR_CADDN = 0x01, + OPC2_32_RCR_SEL = 0x04, + OPC2_32_RCR_SELN = 0x05, +}; +/* OPCM_32_RCR_MADD */ +enum { + OPC2_32_RCR_MADD_32 = 0x01, + OPC2_32_RCR_MADD_64 = 0x03, + OPC2_32_RCR_MADDS_32 = 0x05, + OPC2_32_RCR_MADDS_64 = 0x07, + OPC2_32_RCR_MADD_U_64 = 0x02, + OPC2_32_RCR_MADDS_U_32 = 0x04, + OPC2_32_RCR_MADDS_U_64 = 0x06, +}; +/* OPCM_32_RCR_MSUB */ +enum { + OPC2_32_RCR_MSUB_32 = 0x01, + OPC2_32_RCR_MSUB_64 = 0x03, + OPC2_32_RCR_MSUBS_32 = 0x05, + OPC2_32_RCR_MSUBS_64 = 0x07, + OPC2_32_RCR_MSUB_U_32 = 0x02, + OPC2_32_RCR_MSUBS_U_32 = 0x04, + OPC2_32_RCR_MSUBS_U_64 = 0x06, +}; +/* + * RCRW Format + */ +/* OPCM_32_RCRW_MASK_INSERT */ +enum { + OPC2_32_RCRW_IMASK = 0x01, + OPC2_32_RCRW_INSERT = 0x00, +}; + +/* + * RR Format + */ +/* OPCM_32_RR_LOGICAL_SHIFT */ +enum { + OPC2_32_RR_AND = 0x08, + OPC2_32_RR_ANDN = 0x0e, + OPC2_32_RR_CLO = 0x1c, + OPC2_32_RR_CLO_H = 0x7d, + OPC2_32_RR_CLS = 0x1d, + OPC2_32_RR_CLS_H = 0x7e, + OPC2_32_RR_CLZ = 0x1b, + OPC2_32_RR_CLZ_H = 0x7c, + OPC2_32_RR_NAND = 0x09, + OPC2_32_RR_NOR = 0x0b, + OPC2_32_RR_OR = 0x0a, + OPC2_32_RR_ORN = 0x0f, + OPC2_32_RR_SH = 0x00, + OPC2_32_RR_SH_H = 0x40, + OPC2_32_RR_SHA = 0x01, + OPC2_32_RR_SHA_H = 0x41, + OPC2_32_RR_SHAS = 0x02, + OPC2_32_RR_XNOR = 0x0d, + OPC2_32_RR_XOR = 0x0c, +}; +/* OPCM_32_RR_ACCUMULATOR */ +enum { + OPC2_32_RR_ABS = 0x1c, + OPC2_32_RR_ABS_B = 0x5c, + OPC2_32_RR_ABS_H = 0x7c, + OPC2_32_RR_ABSDIF = 0x0e, + OPC2_32_RR_ABSDIF_B = 0x4e, + OPC2_32_RR_ABSDIF_H = 0x6e, + OPC2_32_RR_ABSDIFS = 0x0f, + OPC2_32_RR_ABSDIFS_H = 0x6f, + OPC2_32_RR_ABSS = 0x1d, + OPC2_32_RR_ABSS_H = 0x7d, + OPC2_32_RR_ADD = 0x00, + OPC2_32_RR_ADD_B = 0x40, + OPC2_32_RR_ADD_H = 0x60, + OPC2_32_RR_ADDC = 0x05, + OPC2_32_RR_ADDS = 0x02, + OPC2_32_RR_ADDS_H = 0x62, + OPC2_32_RR_ADDS_HU = 0x63, + OPC2_32_RR_ADDS_U = 0x03, + OPC2_32_RR_ADDX = 0x04, + OPC2_32_RR_AND_EQ = 0x20, + OPC2_32_RR_AND_GE = 0x24, + OPC2_32_RR_AND_GE_U = 0x25, + OPC2_32_RR_AND_LT = 0x22, + OPC2_32_RR_AND_LT_U = 0x23, + OPC2_32_RR_AND_NE = 0x21, + OPC2_32_RR_EQ = 0x10, + OPC2_32_RR_EQ_B = 0x50, + OPC2_32_RR_EQ_H = 0x70, + OPC2_32_RR_EQ_W = 0x90, + OPC2_32_RR_EQANY_B = 0x56, + OPC2_32_RR_EQANY_H = 0x76, + OPC2_32_RR_GE = 0x14, + OPC2_32_RR_GE_U = 0x15, + OPC2_32_RR_LT = 0x12, + OPC2_32_RR_LT_U = 0x13, + OPC2_32_RR_LT_B = 0x52, + OPC2_32_RR_LT_BU = 0x53, + OPC2_32_RR_LT_H = 0x72, + OPC2_32_RR_LT_HU = 0x73, + OPC2_32_RR_LT_W = 0x92, + OPC2_32_RR_LT_WU = 0x93, + OPC2_32_RR_MAX = 0x1a, + OPC2_32_RR_MAX_U = 0x1b, + OPC2_32_RR_MAX_B = 0x5a, + OPC2_32_RR_MAX_BU = 0x5b, + OPC2_32_RR_MAX_H = 0x7a, + OPC2_32_RR_MAX_HU = 0x7b, + OPC2_32_RR_MIN = 0x19, + OPC2_32_RR_MIN_U = 0x18, + OPC2_32_RR_MIN_B = 0x58, + OPC2_32_RR_MIN_BU = 0x59, + OPC2_32_RR_MIN_H = 0x78, + OPC2_32_RR_MIN_HU = 0x79, + OPC2_32_RR_MOV = 0x1f, + OPC2_32_RR_NE = 0x11, + OPC2_32_RR_OR_EQ = 0x27, + OPC2_32_RR_OR_GE = 0x2b, + OPC2_32_RR_OR_GE_U = 0x2c, + OPC2_32_RR_OR_LT = 0x29, + OPC2_32_RR_OR_LT_U = 0x2a, + OPC2_32_RR_OR_NE = 0x28, + OPC2_32_RR_SAT_B = 0x5e, + OPC2_32_RR_SAT_BU = 0x5f, + OPC2_32_RR_SAT_H = 0x7e, + OPC2_32_RR_SAT_HU = 0x7f, + OPC2_32_RR_SH_EQ = 0x37, + OPC2_32_RR_SH_GE = 0x3b, + OPC2_32_RR_SH_GE_U = 0x3c, + OPC2_32_RR_SH_LT = 0x39, + OPC2_32_RR_SH_LT_U = 0x3a, + OPC2_32_RR_SH_NE = 0x38, + OPC2_32_RR_SUB = 0x08, + OPC2_32_RR_SUB_B = 0x48, + OPC2_32_RR_SUB_H = 0x68, + OPC2_32_RR_SUBC = 0x0d, + OPC2_32_RR_SUBS = 0x0a, + OPC2_32_RR_SUBS_U = 0x0b, + OPC2_32_RR_SUBS_H = 0x6a, + OPC2_32_RR_SUBS_HU = 0x6b, + OPC2_32_RR_SUBX = 0x0c, + OPC2_32_RR_XOR_EQ = 0x2f, + OPC2_32_RR_XOR_GE = 0x33, + OPC2_32_RR_XOR_GE_U = 0x34, + OPC2_32_RR_XOR_LT = 0x31, + OPC2_32_RR_XOR_LT_U = 0x32, + OPC2_32_RR_XOR_NE = 0x30, +}; +/* OPCM_32_RR_ADRESS */ +enum { + OPC2_32_RR_ADD_A = 0x01, + OPC2_32_RR_ADDSC_A = 0x60, + OPC2_32_RR_ADDSC_AT = 0x62, + OPC2_32_RR_EQ_A = 0x40, + OPC2_32_RR_EQZ = 0x48, + OPC2_32_RR_GE_A = 0x43, + OPC2_32_RR_LT_A = 0x42, + OPC2_32_RR_MOV_A = 0x63, + OPC2_32_RR_MOV_AA = 0x00, + OPC2_32_RR_MOV_D = 0x4c, + OPC2_32_RR_NE_A = 0x41, + OPC2_32_RR_NEZ_A = 0x49, + OPC2_32_RR_SUB_A = 0x02, +}; +/* OPCM_32_RR_FLOAT */ +enum { + OPC2_32_RR_BMERGE = 0x01, + OPC2_32_RR_BSPLIT = 0x09, + OPC2_32_RR_DVINIT_B = 0x5a, + OPC2_32_RR_DVINIT_BU = 0x4a, + OPC2_32_RR_DVINIT_H = 0x3a, + OPC2_32_RR_DVINIT_HU = 0x2a, + OPC2_32_RR_DVINIT = 0x1a, + OPC2_32_RR_DVINIT_U = 0x0a, + OPC2_32_RR_PARITY = 0x02, + OPC2_32_RR_UNPACK = 0x08, +}; +/* OPCM_32_RR_IDIRECT */ +enum { + OPC2_32_RR_JI = 0x03, + OPC2_32_RR_JLI = 0x02, + OPC2_32_RR_CALLI = 0x00, +}; +/* + * RR1 Format + */ +/* OPCM_32_RR1_MUL */ +enum { + OPC2_32_RR1_MUL_H_32_LL = 0x1a, + OPC2_32_RR1_MUL_H_32_LU = 0x19, + OPC2_32_RR1_MUL_H_32_UL = 0x18, + OPC2_32_RR1_MUL_H_32_UU = 0x1b, + OPC2_32_RR1_MULM_H_64_LL = 0x1e, + OPC2_32_RR1_MULM_H_64_LU = 0x1d, + OPC2_32_RR1_MULM_H_64_UL = 0x1c, + OPC2_32_RR1_MULM_H_64_UU = 0x1f, + OPC2_32_RR1_MULR_H_16_LL = 0x0e, + OPC2_32_RR1_MULR_H_16_LU = 0x0d, + OPC2_32_RR1_MULR_H_16_UL = 0x0c, + OPC2_32_RR1_MULR_H_16_UU = 0x0f, +}; +/* OPCM_32_RR1_MULQ */ +enum { + OPC2_32_RR1_MUL_Q_32 = 0x02, + OPC2_32_RR1_MUL_Q_64 = 0x1b, + OPC2_32_RR1_MUL_Q_32_L = 0x01, + OPC2_32_RR1_MUL_Q_64_L = 0x19, + OPC2_32_RR1_MUL_Q_32_U = 0x00, + OPC2_32_RR1_MUL_Q_64_U = 0x18, + OPC2_32_RR1_MUL_Q_32_LL = 0x05, + OPC2_32_RR1_MUL_Q_32_UU = 0x04, + OPC2_32_RR1_MULR_Q_32_L = 0x07, + OPC2_32_RR1_MULR_Q_32_U = 0x06, +}; +/* + * RR2 Format + */ +/* OPCM_32_RR2_MUL */ +enum { + OPC2_32_RR2_MUL_32 = 0x0a, + OPC2_32_RR2_MUL_64 = 0x6a, + OPC2_32_RR2_MULS_32 = 0x8a, + OPC2_32_RR2_MUL_U_64 = 0x68, + OPC2_32_RR2_MULS_U_32 = 0x88, +}; +/* + * RRPW Format + */ +/* OPCM_32_RRPW_EXTRACT_INSERT */ +enum { + + OPC2_32_RRPW_EXTR = 0x02, + OPC2_32_RRPW_EXTR_U = 0x03, + OPC2_32_RRPW_IMASK = 0x01, + OPC2_32_RRPW_INSERT = 0x00, +}; +/* + * RRR Format + */ +/* OPCM_32_RRR_COND_SELECT */ +enum { + OPC2_32_RRR_CADD = 0x00, + OPC2_32_RRR_CADDN = 0x01, + OPC2_32_RRR_CSUB = 0x02, + OPC2_32_RRR_CSUBN = 0x03, + OPC2_32_RRR_SEL = 0x04, + OPC2_32_RRR_SELN = 0x05, +}; +/* OPCM_32_RRR_FLOAT */ +enum { + OPC2_32_RRR_DVADJ = 0x0d, + OPC2_32_RRR_DVSTEP = 0x0f, + OPC2_32_RRR_DVSTEP_U = 0x0e, + OPC2_32_RRR_IXMAX = 0x0a, + OPC2_32_RRR_IXMAX_U = 0x0b, + OPC2_32_RRR_IXMIN = 0x08, + OPC2_32_RRR_IXMIN_U = 0x09, + OPC2_32_RRR_PACK = 0x00, +}; +/* + * RRR1 Format + */ +/* OPCM_32_RRR1_MADD */ +enum { + OPC2_32_RRR1_MADD_H_LL = 0x1a, + OPC2_32_RRR1_MADD_H_LU = 0x19, + OPC2_32_RRR1_MADD_H_UL = 0x18, + OPC2_32_RRR1_MADD_H_UU = 0x1b, + OPC2_32_RRR1_MADDS_H_LL = 0x3a, + OPC2_32_RRR1_MADDS_H_LU = 0x39, + OPC2_32_RRR1_MADDS_H_UL = 0x38, + OPC2_32_RRR1_MADDS_H_UU = 0x3b, + OPC2_32_RRR1_MADDM_H_LL = 0x1e, + OPC2_32_RRR1_MADDM_H_LU = 0x1d, + OPC2_32_RRR1_MADDM_H_UL = 0x1c, + OPC2_32_RRR1_MADDM_H_UU = 0x1f, + OPC2_32_RRR1_MADDMS_H_LL = 0x3e, + OPC2_32_RRR1_MADDMS_H_LU = 0x3d, + OPC2_32_RRR1_MADDMS_H_UL = 0x3c, + OPC2_32_RRR1_MADDMS_H_UU = 0x3f, + OPC2_32_RRR1_MADDR_H_LL = 0x0e, + OPC2_32_RRR1_MADDR_H_LU = 0x0d, + OPC2_32_RRR1_MADDR_H_UL = 0x0c, + OPC2_32_RRR1_MADDR_H_UU = 0x0f, + OPC2_32_RRR1_MADDRS_H_LL = 0x2e, + OPC2_32_RRR1_MADDRS_H_LU = 0x2d, + OPC2_32_RRR1_MADDRS_H_UL = 0x2c, + OPC2_32_RRR1_MADDRS_H_UU = 0x2f, +}; +/* OPCM_32_RRR1_MADDQ_H */ +enum { + OPC2_32_RRR1_MADD_Q_32 = 0x02, + OPC2_32_RRR1_MADD_Q_64 = 0x1b, + OPC2_32_RRR1_MADD_Q_32_L = 0x01, + OPC2_32_RRR1_MADD_Q_64_L = 0x19, + OPC2_32_RRR1_MADD_Q_32_U = 0x00, + OPC2_32_RRR1_MADD_Q_64_U = 0x18, + OPC2_32_RRR1_MADD_Q_32_LL = 0x05, + OPC2_32_RRR1_MADD_Q_64_LL = 0x1d, + OPC2_32_RRR1_MADD_Q_32_UU = 0x04, + OPC2_32_RRR1_MADD_Q_64_UU = 0x1c, + OPC2_32_RRR1_MADDS_Q_32 = 0x22, + OPC2_32_RRR1_MADDS_Q_64 = 0x3b, + OPC2_32_RRR1_MADDS_Q_32_L = 0x21, + OPC2_32_RRR1_MADDS_Q_64_L = 0x39, + OPC2_32_RRR1_MADDS_Q_32_U = 0x20, + OPC2_32_RRR1_MADDS_Q_64_U = 0x38, + OPC2_32_RRR1_MADDS_Q_32_LL = 0x25, + OPC2_32_RRR1_MADDS_Q_64_LL = 0x3d, + OPC2_32_RRR1_MADDS_Q_32_UU = 0x24, + OPC2_32_RRR1_MADDS_Q_64_UU = 0x3c, + OPC2_32_RRR1_MADDR_H_16_UL = 0x1e, + OPC2_32_RRR1_MADDRS_H_16_UL = 0x3e, + OPC2_32_RRR1_MADDR_Q_32_L = 0x07, + OPC2_32_RRR1_MADDR_Q_32_U = 0x06, + OPC2_32_RRR1_MADDRS_Q_32_LL = 0x27, + OPC2_32_RRR1_MADDRS_Q_32_UU = 0x26, +}; +/* OPCM_32_RRR1_MADDSU_H */ +enum { + OPC2_32_RRR1_MADDSU_H_32_LL = 0x1a, + OPC2_32_RRR1_MADDSU_H_32_LU = 0x19, + OPC2_32_RRR1_MADDSU_H_32_UL = 0x18, + OPC2_32_RRR1_MADDSU_H_32_UU = 0x1b, + OPC2_32_RRR1_MADDSUS_H_32_LL = 0x3a, + OPC2_32_RRR1_MADDSUS_H_32_LU = 0x39, + OPC2_32_RRR1_MADDSUS_H_32_UL = 0x38, + OPC2_32_RRR1_MADDSUS_H_32_UU = 0x3b, + OPC2_32_RRR1_MADDSUM_H_64_LL = 0x1e, + OPC2_32_RRR1_MADDSUM_H_64_LU = 0x1d, + OPC2_32_RRR1_MADDSUM_H_64_UL = 0x1c, + OPC2_32_RRR1_MADDSUM_H_64_UU = 0x1f, + OPC2_32_RRR1_MADDSUMS_H_64_LL = 0x3e, + OPC2_32_RRR1_MADDSUMS_H_64_LU = 0x3d, + OPC2_32_RRR1_MADDSUMS_H_64_UL = 0x3c, + OPC2_32_RRR1_MADDSUMS_H_64_UU = 0x3f, + OPC2_32_RRR1_MADDSUR_H_16_LL = 0x0e, + OPC2_32_RRR1_MADDSUR_H_16_LU = 0x0d, + OPC2_32_RRR1_MADDSUR_H_16_UL = 0x0c, + OPC2_32_RRR1_MADDSUR_H_16_UU = 0x0f, + OPC2_32_RRR1_MADDSURS_H_16_LL = 0x2e, + OPC2_32_RRR1_MADDSURS_H_16_LU = 0x2d, + OPC2_32_RRR1_MADDSURS_H_16_UL = 0x2c, + OPC2_32_RRR1_MADDSURS_H_16_UU = 0x2f, +}; +/* OPCM_32_RRR1_MSUB_H */ +enum { + OPC2_32_RRR1_MSUB_H_32_LL = 0x1a, + OPC2_32_RRR1_MSUB_H_32_LU = 0x19, + OPC2_32_RRR1_MSUB_H_32_UL = 0x18, + OPC2_32_RRR1_MSUB_H_32_UU = 0x1b, + OPC2_32_RRR1_MSUBS_H_32_LL = 0x3a, + OPC2_32_RRR1_MSUBS_H_32_LU = 0x39, + OPC2_32_RRR1_MSUBS_H_32_UL = 0x38, + OPC2_32_RRR1_MSUBS_H_32_UU = 0x3b, + OPC2_32_RRR1_MSUBM_H_64_LL = 0x1e, + OPC2_32_RRR1_MSUBM_H_64_LU = 0x1d, + OPC2_32_RRR1_MSUBM_H_64_UL = 0x1c, + OPC2_32_RRR1_MSUBM_H_64_UU = 0x1f, + OPC2_32_RRR1_MSUBMS_H_64_LL = 0x3e, + OPC2_32_RRR1_MSUBMS_H_64_LU = 0x3d, + OPC2_32_RRR1_MSUBMS_H_64_UL = 0x3c, + OPC2_32_RRR1_MSUBMS_H_64_UU = 0x3f, + OPC2_32_RRR1_MSUBR_H_16_LL = 0x0e, + OPC2_32_RRR1_MSUBR_H_16_LU = 0x0d, + OPC2_32_RRR1_MSUBR_H_16_UL = 0x0c, + OPC2_32_RRR1_MSUBR_H_16_UU = 0x0f, + OPC2_32_RRR1_MSUBRS_H_16_LL = 0x2e, + OPC2_32_RRR1_MSUBRS_H_16_LU = 0x2d, + OPC2_32_RRR1_MSUBRS_H_16_UL = 0x2c, + OPC2_32_RRR1_MSUBRS_H_16_UU = 0x2f, +}; +/* OPCM_32_RRR1_MSUB_Q */ +enum { + OPC2_32_RRR1_MSUB_Q_32 = 0x02, + OPC2_32_RRR1_MSUB_Q_64 = 0x1b, + OPC2_32_RRR1_MSUB_Q_32_L = 0x01, + OPC2_32_RRR1_MSUB_Q_64_L = 0x19, + OPC2_32_RRR1_MSUB_Q_32_U = 0x00, + OPC2_32_RRR1_MSUB_Q_64_U = 0x18, + OPC2_32_RRR1_MSUB_Q_32_LL = 0x05, + OPC2_32_RRR1_MSUB_Q_64_LL = 0x1d, + OPC2_32_RRR1_MSUB_Q_32_UU = 0x04, + OPC2_32_RRR1_MSUB_Q_64_UU = 0x1c, + OPC2_32_RRR1_MSUBS_Q_32 = 0x22, + OPC2_32_RRR1_MSUBS_Q_64 = 0x3b, + OPC2_32_RRR1_MSUBS_Q_32_L = 0x21, + OPC2_32_RRR1_MSUBS_Q_64_L = 0x39, + OPC2_32_RRR1_MSUBS_Q_32_U = 0x20, + OPC2_32_RRR1_MSUBS_Q_64_U = 0x38, + OPC2_32_RRR1_MSUBS_Q_32_LL = 0x25, + OPC2_32_RRR1_MSUBS_Q_64_LL = 0x3d, + OPC2_32_RRR1_MSUBS_Q_32_UU = 0x24, + OPC2_32_RRR1_MSUBS_Q_64_UU = 0x3c, + OPC2_32_RRR1_MSUBR_H_32_UL = 0x1e, + OPC2_32_RRR1_MSUBRS_H_32_UL = 0x3e, + OPC2_32_RRR1_MSUBR_Q_32_LL = 0x07, + OPC2_32_RRR1_MSUBR_Q_32_UU = 0x06, + OPC2_32_RRR1_MSUBRS_Q_32_LL = 0x27, + OPC2_32_RRR1_MSUBRS_Q_32_UU = 0x26, +}; +/* OPCM_32_RRR1_MSUBADS_H */ +enum { + OPC2_32_RRR1_MSUBAD_H_32_LL = 0x1a, + OPC2_32_RRR1_MSUBAD_H_32_LU = 0x19, + OPC2_32_RRR1_MSUBAD_H_32_UL = 0x18, + OPC2_32_RRR1_MSUBAD_H_32_UU = 0x1b, + OPC2_32_RRR1_MSUBADS_H_32_LL = 0x3a, + OPC2_32_RRR1_MSUBADS_H_32_LU = 0x39, + OPC2_32_RRR1_MSUBADS_H_32_UL = 0x38, + OPC2_32_RRR1_MSUBADS_H_32_UU = 0x3b, + OPC2_32_RRR1_MSUBADM_H_64_LL = 0x1e, + OPC2_32_RRR1_MSUBADM_H_64_LU = 0x1d, + OPC2_32_RRR1_MSUBADM_H_64_UL = 0x1c, + OPC2_32_RRR1_MSUBADM_H_64_UU = 0x1f, + OPC2_32_RRR1_MSUBADMS_H_64_LL = 0x3e, + OPC2_32_RRR1_MSUBADMS_H_64_LU = 0x3d, + OPC2_32_RRR1_MSUBADMS_H_64_UL = 0x3c, + OPC2_32_RRR1_MSUBADMS_H_16_UU = 0x3f, + OPC2_32_RRR1_MSUBADR_H_16_LL = 0x0e, + OPC2_32_RRR1_MSUBADR_H_16_LU = 0x0d, + OPC2_32_RRR1_MSUBADR_H_16_UL = 0x0c, + OPC2_32_RRR1_MSUBADR_H_16_UU = 0x0f, + OPC2_32_RRR1_MSUBADRS_H_16_LL = 0x2e, + OPC2_32_RRR1_MSUBADRS_H_16_LU = 0x2d, + OPC2_32_RRR1_MSUBADRS_H_16_UL = 0x2c, + OPC2_32_RRR1_MSUBADRS_H_16_UU = 0x2f, +}; +/* + * RRR2 Format + */ +/* OPCM_32_RRR2_MADD */ +enum { + OPC2_32_RRR2_MADD_32 = 0x0a, + OPC2_32_RRR2_MADD_64 = 0x6a, + OPC2_32_RRR2_MADDS_32 = 0x8a, + OPC2_32_RRR2_MADDS_64 = 0xea, + OPC2_32_RRR2_MADD_U_32 = 0x68, + OPC2_32_RRR2_MADDS_U_32 = 0x88, + OPC2_32_RRR2_MADDS_U_64 = 0xe8, +}; +/* OPCM_32_RRR2_MSUB */ +enum { + OPC2_32_RRR2_MSUB_32 = 0x0a, + OPC2_32_RRR2_MSUB_64 = 0x6a, + OPC2_32_RRR2_MSUBS_32 = 0x8a, + OPC2_32_RRR2_MSUBS_64 = 0xea, + OPC2_32_RRR2_MSUB_U_64 = 0x68, + OPC2_32_RRR2_MSUBS_U_32 = 0x88, + OPC2_32_RRR2_MSUBS_U_64 = 0xe8, +}; +/* + * RRRR Format + */ +/* OPCM_32_RRRR_EXTRACT_INSERT */ +enum { + OPC2_32_RRRR_DEXTR = 0x04, + OPC2_32_RRRR_EXTR = 0x02, + OPC2_32_RRRR_EXTR_U = 0x03, + OPC2_32_RRRR_INSERT = 0x00, +}; +/* + * RRRW Format + */ +/* OPCM_32_RRRW_EXTRACT_INSERT */ +enum { + OPC2_32_RRRW_EXTR = 0x02, + OPC2_32_RRRW_EXTR_U = 0x03, + OPC2_32_RRRW_IMASK = 0x01, + OPC2_32_RRRW_INSERT = 0x00, +}; +/* + * SYS Format + */ +/* OPCM_32_SYS_INTERRUPTS */ +enum { + OPC2_32_SYS_DEBUG = 0x04, + OPC2_32_SYS_DISABLE = 0x0d, + OPC2_32_SYS_DSYNC = 0x12, + OPC2_32_SYS_ENABLE = 0x0c, + OPC2_32_SYS_ISYNC = 0x13, + OPC2_32_SYS_NOP = 0x00, + OPC2_32_SYS_RET = 0x06, + OPC2_32_SYS_RFE = 0x07, + OPC2_32_SYS_RFM = 0x05, + OPC2_32_SYS_RSLCX = 0x09, + OPC2_32_SYS_SVLCX = 0x08, + OPC2_32_SYS_TRAPSV = 0x15, + OPC2_32_SYS_TRAPV = 0x14, +}; diff --git a/tests/qemu-iotests/100 b/tests/qemu-iotests/100 new file mode 100755 index 0000000000..9124aba760 --- /dev/null +++ b/tests/qemu-iotests/100 @@ -0,0 +1,134 @@ +#!/bin/bash +# +# Test simple read/write using plain bdrv_read/bdrv_write +# +# Copyright (C) 2014 Red Hat, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. +# + +# creator +owner=stefanha@redhat.com + +seq=`basename $0` +echo "QA output created by $seq" + +here=`pwd` +tmp=/tmp/$$ +status=1 # failure is the default! + +_cleanup() +{ + _cleanup_test_img +} +trap "_cleanup; exit \$status" 0 1 2 3 15 + +# get standard environment, filters and checks +. ./common.rc +. ./common.filter + +_supported_fmt generic +_supported_proto generic +_supported_os Linux + + +size=128M + +echo +echo "== Single request ==" +_make_test_img $size +$QEMU_IO -c "multiwrite 0 4k" "$TEST_IMG" | _filter_qemu_io + +echo +echo "== verify pattern ==" +$QEMU_IO -c "read -P 0xcd 0 4k" "$TEST_IMG" | _filter_qemu_io +$QEMU_IO -c "read -P 0 4k 4k" "$TEST_IMG" | _filter_qemu_io + +echo +echo "== Sequential requests ==" +_make_test_img $size +$QEMU_IO -c "multiwrite 0 4k ; 4k 4k" "$TEST_IMG" | _filter_qemu_io + +echo +echo "== verify pattern ==" +$QEMU_IO -c "read -P 0xcd 0 4k" "$TEST_IMG" | _filter_qemu_io +$QEMU_IO -c "read -P 0xce 4k 4k" "$TEST_IMG" | _filter_qemu_io +$QEMU_IO -c "read -P 0 8k 4k" "$TEST_IMG" | _filter_qemu_io + +echo +echo "== Superset overlapping requests ==" +_make_test_img $size +$QEMU_IO -c "multiwrite 0 4k ; 1k 2k" "$TEST_IMG" | _filter_qemu_io + +echo +echo "== verify pattern ==" +# Order of overlapping in-flight requests is not guaranteed so we cannot verify +# [1k, 3k) since it could have either pattern 0xcd or 0xce. +$QEMU_IO -c "read -P 0xcd 0 1k" "$TEST_IMG" | _filter_qemu_io +$QEMU_IO -c "read -P 0xcd 3k 1k" "$TEST_IMG" | _filter_qemu_io +$QEMU_IO -c "read -P 0 4k 4k" "$TEST_IMG" | _filter_qemu_io + +echo +echo "== Subset overlapping requests ==" +_make_test_img $size +$QEMU_IO -c "multiwrite 1k 2k ; 0k 4k" "$TEST_IMG" | _filter_qemu_io + +echo +echo "== verify pattern ==" +# Order of overlapping in-flight requests is not guaranteed so we cannot verify +# [1k, 3k) since it could have either pattern 0xcd or 0xce. +$QEMU_IO -c "read -P 0xce 0 1k" "$TEST_IMG" | _filter_qemu_io +$QEMU_IO -c "read -P 0xce 3k 1k" "$TEST_IMG" | _filter_qemu_io +$QEMU_IO -c "read -P 0 4k 4k" "$TEST_IMG" | _filter_qemu_io + +echo +echo "== Head overlapping requests ==" +_make_test_img $size +$QEMU_IO -c "multiwrite 0k 2k ; 0k 4k" "$TEST_IMG" | _filter_qemu_io + +echo +echo "== verify pattern ==" +# Order of overlapping in-flight requests is not guaranteed so we cannot verify +# [0k, 2k) since it could have either pattern 0xcd or 0xce. +$QEMU_IO -c "read -P 0xce 2k 2k" "$TEST_IMG" | _filter_qemu_io +$QEMU_IO -c "read -P 0 4k 4k" "$TEST_IMG" | _filter_qemu_io + +echo +echo "== Tail overlapping requests ==" +_make_test_img $size +$QEMU_IO -c "multiwrite 2k 2k ; 0k 4k" "$TEST_IMG" | _filter_qemu_io + +echo +echo "== verify pattern ==" +# Order of overlapping in-flight requests is not guaranteed so we cannot verify +# [2k, 4k) since it could have either pattern 0xcd or 0xce. +$QEMU_IO -c "read -P 0xce 0k 2k" "$TEST_IMG" | _filter_qemu_io +$QEMU_IO -c "read -P 0 4k 4k" "$TEST_IMG" | _filter_qemu_io + +echo +echo "== Disjoint requests ==" +_make_test_img $size +$QEMU_IO -c "multiwrite 0 4k ; 64k 4k" "$TEST_IMG" | _filter_qemu_io + +echo +echo "== verify pattern ==" +$QEMU_IO -c "read -P 0xcd 0 4k" "$TEST_IMG" | _filter_qemu_io +$QEMU_IO -c "read -P 0 4k 60k" "$TEST_IMG" | _filter_qemu_io +$QEMU_IO -c "read -P 0xce 64k 4k" "$TEST_IMG" | _filter_qemu_io +$QEMU_IO -c "read -P 0 68k 4k" "$TEST_IMG" | _filter_qemu_io + +# success, all done +echo "*** done" +rm -f $seq.full +status=0 diff --git a/tests/qemu-iotests/100.out b/tests/qemu-iotests/100.out new file mode 100644 index 0000000000..2d6e9f0a7d --- /dev/null +++ b/tests/qemu-iotests/100.out @@ -0,0 +1,89 @@ +QA output created by 100 + +== Single request == +Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134217728 +wrote 4096/4096 bytes at offset 0 +4 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + +== verify pattern == +read 4096/4096 bytes at offset 0 +4 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +read 4096/4096 bytes at offset 4096 +4 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + +== Sequential requests == +Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134217728 +wrote 8192/8192 bytes at offset 0 +8 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + +== verify pattern == +read 4096/4096 bytes at offset 0 +4 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +read 4096/4096 bytes at offset 4096 +4 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +read 4096/4096 bytes at offset 8192 +4 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + +== Superset overlapping requests == +Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134217728 +wrote 6144/6144 bytes at offset 0 +6 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + +== verify pattern == +read 1024/1024 bytes at offset 0 +1 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +read 1024/1024 bytes at offset 3072 +1 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +read 4096/4096 bytes at offset 4096 +4 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + +== Subset overlapping requests == +Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134217728 +wrote 6144/6144 bytes at offset 1024 +6 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + +== verify pattern == +read 1024/1024 bytes at offset 0 +1 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +read 1024/1024 bytes at offset 3072 +1 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +read 4096/4096 bytes at offset 4096 +4 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + +== Head overlapping requests == +Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134217728 +wrote 6144/6144 bytes at offset 0 +6 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + +== verify pattern == +read 2048/2048 bytes at offset 2048 +2 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +read 4096/4096 bytes at offset 4096 +4 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + +== Tail overlapping requests == +Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134217728 +wrote 6144/6144 bytes at offset 2048 +6 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + +== verify pattern == +read 2048/2048 bytes at offset 0 +2 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +read 4096/4096 bytes at offset 4096 +4 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + +== Disjoint requests == +Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134217728 +wrote 8192/8192 bytes at offset 0 +8 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + +== verify pattern == +read 4096/4096 bytes at offset 0 +4 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +read 61440/61440 bytes at offset 4096 +60 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +read 4096/4096 bytes at offset 65536 +4 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +read 4096/4096 bytes at offset 69632 +4 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +*** done diff --git a/tests/qemu-iotests/group b/tests/qemu-iotests/group index 2803d68d62..0920b28db4 100644 --- a/tests/qemu-iotests/group +++ b/tests/qemu-iotests/group @@ -101,5 +101,6 @@ 092 rw auto quick 095 rw auto quick 099 rw auto quick +100 rw auto quick 101 rw auto quick 103 rw auto quick diff --git a/tests/test-aio.c b/tests/test-aio.c index f12b6e0ae8..c6a8713e83 100644 --- a/tests/test-aio.c +++ b/tests/test-aio.c @@ -57,8 +57,6 @@ static void bh_test_cb(void *opaque) } } -#if !defined(_WIN32) - static void timer_test_cb(void *opaque) { TimerTestData *data = opaque; @@ -68,12 +66,10 @@ static void timer_test_cb(void *opaque) } } -static void dummy_io_handler_read(void *opaque) +static void dummy_io_handler_read(EventNotifier *e) { } -#endif /* !_WIN32 */ - static void bh_delete_cb(void *opaque) { BHTestData *data = opaque; @@ -428,24 +424,18 @@ static void test_wait_event_notifier_noflush(void) event_notifier_cleanup(&data.e); } -#if !defined(_WIN32) - static void test_timer_schedule(void) { TimerTestData data = { .n = 0, .ctx = ctx, .ns = SCALE_MS * 750LL, .max = 2, .clock_type = QEMU_CLOCK_VIRTUAL }; - int pipefd[2]; + EventNotifier e; /* aio_poll will not block to wait for timers to complete unless it has * an fd to wait on. Fixing this breaks other tests. So create a dummy one. */ - g_assert(!qemu_pipe(pipefd)); - qemu_set_nonblock(pipefd[0]); - qemu_set_nonblock(pipefd[1]); - - aio_set_fd_handler(ctx, pipefd[0], - dummy_io_handler_read, NULL, NULL); + event_notifier_init(&e, false); + aio_set_event_notifier(ctx, &e, dummy_io_handler_read); aio_poll(ctx, false); aio_timer_init(ctx, &data.timer, data.clock_type, @@ -484,15 +474,12 @@ static void test_timer_schedule(void) g_assert(!aio_poll(ctx, false)); g_assert_cmpint(data.n, ==, 2); - aio_set_fd_handler(ctx, pipefd[0], NULL, NULL, NULL); - close(pipefd[0]); - close(pipefd[1]); + aio_set_event_notifier(ctx, &e, NULL); + event_notifier_cleanup(&e); timer_del(&data.timer); } -#endif /* !_WIN32 */ - /* Now the same tests, using the context as a GSource. They are * very similar to the ones above, with g_main_context_iteration * replacing aio_poll. However: @@ -775,25 +762,19 @@ static void test_source_wait_event_notifier_noflush(void) event_notifier_cleanup(&data.e); } -#if !defined(_WIN32) - static void test_source_timer_schedule(void) { TimerTestData data = { .n = 0, .ctx = ctx, .ns = SCALE_MS * 750LL, .max = 2, .clock_type = QEMU_CLOCK_VIRTUAL }; - int pipefd[2]; + EventNotifier e; int64_t expiry; /* aio_poll will not block to wait for timers to complete unless it has * an fd to wait on. Fixing this breaks other tests. So create a dummy one. */ - g_assert(!qemu_pipe(pipefd)); - qemu_set_nonblock(pipefd[0]); - qemu_set_nonblock(pipefd[1]); - - aio_set_fd_handler(ctx, pipefd[0], - dummy_io_handler_read, NULL, NULL); + event_notifier_init(&e, false); + aio_set_event_notifier(ctx, &e, dummy_io_handler_read); do {} while (g_main_context_iteration(NULL, false)); aio_timer_init(ctx, &data.timer, data.clock_type, @@ -818,15 +799,12 @@ static void test_source_timer_schedule(void) g_assert_cmpint(data.n, ==, 2); g_assert(qemu_clock_get_ns(data.clock_type) > expiry); - aio_set_fd_handler(ctx, pipefd[0], NULL, NULL, NULL); - close(pipefd[0]); - close(pipefd[1]); + aio_set_event_notifier(ctx, &e, NULL); + event_notifier_cleanup(&e); timer_del(&data.timer); } -#endif /* !_WIN32 */ - /* End of tests. */ @@ -857,9 +835,7 @@ int main(int argc, char **argv) g_test_add_func("/aio/event/wait", test_wait_event_notifier); g_test_add_func("/aio/event/wait/no-flush-cb", test_wait_event_notifier_noflush); g_test_add_func("/aio/event/flush", test_flush_event_notifier); -#if !defined(_WIN32) g_test_add_func("/aio/timer/schedule", test_timer_schedule); -#endif g_test_add_func("/aio-gsource/notify", test_source_notify); g_test_add_func("/aio-gsource/flush", test_source_flush); @@ -874,8 +850,6 @@ int main(int argc, char **argv) g_test_add_func("/aio-gsource/event/wait", test_source_wait_event_notifier); g_test_add_func("/aio-gsource/event/wait/no-flush-cb", test_source_wait_event_notifier_noflush); g_test_add_func("/aio-gsource/event/flush", test_source_flush_event_notifier); -#if !defined(_WIN32) g_test_add_func("/aio-gsource/timer/schedule", test_source_timer_schedule); -#endif return g_test_run(); } |