diff options
88 files changed, 7768 insertions, 5147 deletions
diff --git a/.gitignore b/.gitignore index 59c343c414..6d2acab09a 100644 --- a/.gitignore +++ b/.gitignore @@ -15,6 +15,7 @@ libdis* libhw32 libhw64 libuser +linux-headers/asm qapi-generated qemu-doc.html qemu-tech.html diff --git a/MAINTAINERS b/MAINTAINERS index 2b4c5d727e..4535eeb61f 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -118,7 +118,7 @@ F: target-i386/ Xtensa M: Max Filippov <jcmvbkbc@gmail.com> -W: http://kkv.spb.su/doku.php?id=etc:users:jcmvbkbc:qemu-target-xtensa +W: http://wiki.osll.spb.ru/doku.php?id=etc:users:jcmvbkbc:qemu-target-xtensa S: Maintained F: target-xtensa/ @@ -348,10 +348,15 @@ F: hw/pc.[ch] hw/pc_piix.c Xtensa Machines --------------- -DC232B +sim M: Max Filippov <jcmvbkbc@gmail.com> S: Maintained -F: hw/xtensa_dc232b.c +F: hw/xtensa_sim.c + +Avnet LX60 +M: Max Filippov <jcmvbkbc@gmail.com> +S: Maintained +F: hw/xtensa_lx60.c Devices ------- diff --git a/Makefile.target b/Makefile.target index 1e90df7e70..530c1d1e63 100644 --- a/Makefile.target +++ b/Makefile.target @@ -78,7 +78,8 @@ libobj-$(TARGET_SPARC64) += vis_helper.o libobj-$(CONFIG_NEED_MMU) += mmu.o libobj-$(TARGET_ARM) += neon_helper.o iwmmxt_helper.o ifeq ($(TARGET_BASE_ARCH), sparc) -libobj-y += fop_helper.o cc_helper.o win_helper.o cpu_init.o +libobj-y += fop_helper.o cc_helper.o win_helper.o mmu_helper.o ldst_helper.o +libobj-y += cpu_init.o endif libobj-$(TARGET_SPARC) += int32_helper.o libobj-$(TARGET_SPARC64) += int64_helper.o @@ -97,7 +98,7 @@ tcg/tcg.o: cpu.h # HELPER_CFLAGS is used for all the code compiled with static register # variables -op_helper.o win_helper.o user-exec.o: QEMU_CFLAGS += $(HELPER_CFLAGS) +op_helper.o ldst_helper.o user-exec.o: QEMU_CFLAGS += $(HELPER_CFLAGS) # Note: this is a workaround. The real fix is to avoid compiling # cpu_signal_handler() in user-exec.c. @@ -361,6 +362,7 @@ obj-arm-y += syborg_virtio.o obj-arm-y += vexpress.o obj-arm-y += strongarm.o obj-arm-y += collie.o +obj-arm-y += pl041.o lm4549.o obj-sh4-y = shix.o r2d.o sh7750.o sh7750_regnames.o tc58128.o obj-sh4-y += sh_timer.o sh_serial.o sh_intc.o sh_pci.o sm501.o @@ -53,17 +53,12 @@ static BlockDriverAIOCB *bdrv_aio_readv_em(BlockDriverState *bs, static BlockDriverAIOCB *bdrv_aio_writev_em(BlockDriverState *bs, int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, BlockDriverCompletionFunc *cb, void *opaque); -static BlockDriverAIOCB *bdrv_aio_flush_em(BlockDriverState *bs, - BlockDriverCompletionFunc *cb, void *opaque); -static BlockDriverAIOCB *bdrv_aio_noop_em(BlockDriverState *bs, - BlockDriverCompletionFunc *cb, void *opaque); static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs, int64_t sector_num, int nb_sectors, QEMUIOVector *iov); static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs, int64_t sector_num, int nb_sectors, QEMUIOVector *iov); -static int coroutine_fn bdrv_co_flush_em(BlockDriverState *bs); static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs, int64_t sector_num, int nb_sectors, QEMUIOVector *qiov); static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs, @@ -203,9 +198,6 @@ void bdrv_register(BlockDriver *bdrv) } } - if (!bdrv->bdrv_aio_flush) - bdrv->bdrv_aio_flush = bdrv_aio_flush_em; - QLIST_INSERT_HEAD(&bdrv_drivers, bdrv, list); } @@ -1027,11 +1019,6 @@ static int bdrv_check_request(BlockDriverState *bs, int64_t sector_num, nb_sectors * BDRV_SECTOR_SIZE); } -static inline bool bdrv_has_async_flush(BlockDriver *drv) -{ - return drv->bdrv_aio_flush != bdrv_aio_flush_em; -} - typedef struct RwCo { BlockDriverState *bs; int64_t sector_num; @@ -1759,33 +1746,6 @@ const char *bdrv_get_device_name(BlockDriverState *bs) return bs->device_name; } -int bdrv_flush(BlockDriverState *bs) -{ - if (bs->open_flags & BDRV_O_NO_FLUSH) { - return 0; - } - - if (bs->drv && bdrv_has_async_flush(bs->drv) && qemu_in_coroutine()) { - return bdrv_co_flush_em(bs); - } - - if (bs->drv && bs->drv->bdrv_flush) { - return bs->drv->bdrv_flush(bs); - } - - /* - * Some block drivers always operate in either writethrough or unsafe mode - * and don't support bdrv_flush therefore. Usually qemu doesn't know how - * the server works (because the behaviour is hardcoded or depends on - * server-side configuration), so we can't ensure that everything is safe - * on disk. Returning an error doesn't work because that would break guests - * even if the server operates in writethrough mode. - * - * Let's hope the user knows what he's doing. - */ - return 0; -} - void bdrv_flush_all(void) { BlockDriverState *bs; @@ -1808,17 +1768,6 @@ int bdrv_has_zero_init(BlockDriverState *bs) return 1; } -int bdrv_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors) -{ - if (!bs->drv) { - return -ENOMEDIUM; - } - if (!bs->drv->bdrv_discard) { - return 0; - } - return bs->drv->bdrv_discard(bs, sector_num, nb_sectors); -} - /* * Returns true iff the specified sector is present in the disk image. Drivers * not implementing the functionality are assumed to not support backing files, @@ -2610,22 +2559,6 @@ fail: return -1; } -BlockDriverAIOCB *bdrv_aio_flush(BlockDriverState *bs, - BlockDriverCompletionFunc *cb, void *opaque) -{ - BlockDriver *drv = bs->drv; - - trace_bdrv_aio_flush(bs, opaque); - - if (bs->open_flags & BDRV_O_NO_FLUSH) { - return bdrv_aio_noop_em(bs, cb, opaque); - } - - if (!drv) - return NULL; - return drv->bdrv_aio_flush(bs, cb, opaque); -} - void bdrv_aio_cancel(BlockDriverAIOCB *acb) { acb->pool->cancel(acb); @@ -2735,7 +2668,7 @@ static AIOPool bdrv_em_co_aio_pool = { .cancel = bdrv_aio_co_cancel_em, }; -static void bdrv_co_rw_bh(void *opaque) +static void bdrv_co_em_bh(void *opaque) { BlockDriverAIOCBCoroutine *acb = opaque; @@ -2758,7 +2691,7 @@ static void coroutine_fn bdrv_co_do_rw(void *opaque) acb->req.nb_sectors, acb->req.qiov); } - acb->bh = qemu_bh_new(bdrv_co_rw_bh, acb); + acb->bh = qemu_bh_new(bdrv_co_em_bh, acb); qemu_bh_schedule(acb->bh); } @@ -2785,41 +2718,56 @@ static BlockDriverAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs, return &acb->common; } -static BlockDriverAIOCB *bdrv_aio_flush_em(BlockDriverState *bs, +static void coroutine_fn bdrv_aio_flush_co_entry(void *opaque) +{ + BlockDriverAIOCBCoroutine *acb = opaque; + BlockDriverState *bs = acb->common.bs; + + acb->req.error = bdrv_co_flush(bs); + acb->bh = qemu_bh_new(bdrv_co_em_bh, acb); + qemu_bh_schedule(acb->bh); +} + +BlockDriverAIOCB *bdrv_aio_flush(BlockDriverState *bs, BlockDriverCompletionFunc *cb, void *opaque) { - BlockDriverAIOCBSync *acb; + trace_bdrv_aio_flush(bs, opaque); - acb = qemu_aio_get(&bdrv_em_aio_pool, bs, cb, opaque); - acb->is_write = 1; /* don't bounce in the completion hadler */ - acb->qiov = NULL; - acb->bounce = NULL; - acb->ret = 0; + Coroutine *co; + BlockDriverAIOCBCoroutine *acb; - if (!acb->bh) - acb->bh = qemu_bh_new(bdrv_aio_bh_cb, acb); + acb = qemu_aio_get(&bdrv_em_co_aio_pool, bs, cb, opaque); + co = qemu_coroutine_create(bdrv_aio_flush_co_entry); + qemu_coroutine_enter(co, acb); - bdrv_flush(bs); - qemu_bh_schedule(acb->bh); return &acb->common; } -static BlockDriverAIOCB *bdrv_aio_noop_em(BlockDriverState *bs, +static void coroutine_fn bdrv_aio_discard_co_entry(void *opaque) +{ + BlockDriverAIOCBCoroutine *acb = opaque; + BlockDriverState *bs = acb->common.bs; + + acb->req.error = bdrv_co_discard(bs, acb->req.sector, acb->req.nb_sectors); + acb->bh = qemu_bh_new(bdrv_co_em_bh, acb); + qemu_bh_schedule(acb->bh); +} + +BlockDriverAIOCB *bdrv_aio_discard(BlockDriverState *bs, + int64_t sector_num, int nb_sectors, BlockDriverCompletionFunc *cb, void *opaque) { - BlockDriverAIOCBSync *acb; + Coroutine *co; + BlockDriverAIOCBCoroutine *acb; - acb = qemu_aio_get(&bdrv_em_aio_pool, bs, cb, opaque); - acb->is_write = 1; /* don't bounce in the completion handler */ - acb->qiov = NULL; - acb->bounce = NULL; - acb->ret = 0; + trace_bdrv_aio_discard(bs, sector_num, nb_sectors, opaque); - if (!acb->bh) { - acb->bh = qemu_bh_new(bdrv_aio_bh_cb, acb); - } + acb = qemu_aio_get(&bdrv_em_co_aio_pool, bs, cb, opaque); + acb->req.sector = sector_num; + acb->req.nb_sectors = nb_sectors; + co = qemu_coroutine_create(bdrv_aio_discard_co_entry); + qemu_coroutine_enter(co, acb); - qemu_bh_schedule(acb->bh); return &acb->common; } @@ -2916,19 +2864,131 @@ static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs, return bdrv_co_io_em(bs, sector_num, nb_sectors, iov, true); } -static int coroutine_fn bdrv_co_flush_em(BlockDriverState *bs) +static void coroutine_fn bdrv_flush_co_entry(void *opaque) { - CoroutineIOCompletion co = { - .coroutine = qemu_coroutine_self(), + RwCo *rwco = opaque; + + rwco->ret = bdrv_co_flush(rwco->bs); +} + +int coroutine_fn bdrv_co_flush(BlockDriverState *bs) +{ + if (bs->open_flags & BDRV_O_NO_FLUSH) { + return 0; + } else if (!bs->drv) { + return 0; + } else if (bs->drv->bdrv_co_flush) { + return bs->drv->bdrv_co_flush(bs); + } else if (bs->drv->bdrv_aio_flush) { + BlockDriverAIOCB *acb; + CoroutineIOCompletion co = { + .coroutine = qemu_coroutine_self(), + }; + + acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co); + if (acb == NULL) { + return -EIO; + } else { + qemu_coroutine_yield(); + return co.ret; + } + } else { + /* + * Some block drivers always operate in either writethrough or unsafe + * mode and don't support bdrv_flush therefore. Usually qemu doesn't + * know how the server works (because the behaviour is hardcoded or + * depends on server-side configuration), so we can't ensure that + * everything is safe on disk. Returning an error doesn't work because + * that would break guests even if the server operates in writethrough + * mode. + * + * Let's hope the user knows what he's doing. + */ + return 0; + } +} + +int bdrv_flush(BlockDriverState *bs) +{ + Coroutine *co; + RwCo rwco = { + .bs = bs, + .ret = NOT_DONE, }; - BlockDriverAIOCB *acb; - acb = bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co); - if (!acb) { + if (qemu_in_coroutine()) { + /* Fast-path if already in coroutine context */ + bdrv_flush_co_entry(&rwco); + } else { + co = qemu_coroutine_create(bdrv_flush_co_entry); + qemu_coroutine_enter(co, &rwco); + while (rwco.ret == NOT_DONE) { + qemu_aio_wait(); + } + } + + return rwco.ret; +} + +static void coroutine_fn bdrv_discard_co_entry(void *opaque) +{ + RwCo *rwco = opaque; + + rwco->ret = bdrv_co_discard(rwco->bs, rwco->sector_num, rwco->nb_sectors); +} + +int coroutine_fn bdrv_co_discard(BlockDriverState *bs, int64_t sector_num, + int nb_sectors) +{ + if (!bs->drv) { + return -ENOMEDIUM; + } else if (bdrv_check_request(bs, sector_num, nb_sectors)) { return -EIO; + } else if (bs->read_only) { + return -EROFS; + } else if (bs->drv->bdrv_co_discard) { + return bs->drv->bdrv_co_discard(bs, sector_num, nb_sectors); + } else if (bs->drv->bdrv_aio_discard) { + BlockDriverAIOCB *acb; + CoroutineIOCompletion co = { + .coroutine = qemu_coroutine_self(), + }; + + acb = bs->drv->bdrv_aio_discard(bs, sector_num, nb_sectors, + bdrv_co_io_em_complete, &co); + if (acb == NULL) { + return -EIO; + } else { + qemu_coroutine_yield(); + return co.ret; + } + } else { + return 0; } - qemu_coroutine_yield(); - return co.ret; +} + +int bdrv_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors) +{ + Coroutine *co; + RwCo rwco = { + .bs = bs, + .sector_num = sector_num, + .nb_sectors = nb_sectors, + .ret = NOT_DONE, + }; + + if (qemu_in_coroutine()) { + /* Fast-path if already in coroutine context */ + bdrv_discard_co_entry(&rwco); + } else { + co = qemu_coroutine_create(bdrv_discard_co_entry); + qemu_coroutine_enter(co, &rwco); + while (rwco.ret == NOT_DONE) { + qemu_aio_wait(); + } + } + + return rwco.ret; } /**************************************************************/ @@ -166,6 +166,9 @@ BlockDriverAIOCB *bdrv_aio_writev(BlockDriverState *bs, int64_t sector_num, BlockDriverCompletionFunc *cb, void *opaque); BlockDriverAIOCB *bdrv_aio_flush(BlockDriverState *bs, BlockDriverCompletionFunc *cb, void *opaque); +BlockDriverAIOCB *bdrv_aio_discard(BlockDriverState *bs, + int64_t sector_num, int nb_sectors, + BlockDriverCompletionFunc *cb, void *opaque); void bdrv_aio_cancel(BlockDriverAIOCB *acb); typedef struct BlockRequest { @@ -191,10 +194,12 @@ BlockDriverAIOCB *bdrv_aio_ioctl(BlockDriverState *bs, /* Ensure contents are flushed to disk. */ int bdrv_flush(BlockDriverState *bs); +int coroutine_fn bdrv_co_flush(BlockDriverState *bs); void bdrv_flush_all(void); void bdrv_close_all(void); int bdrv_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors); +int bdrv_co_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors); int bdrv_has_zero_init(BlockDriverState *bs); int bdrv_is_allocated(BlockDriverState *bs, int64_t sector_num, int nb_sectors, int *pnum); diff --git a/block/blkdebug.c b/block/blkdebug.c index b3c5d42cef..9b885359e4 100644 --- a/block/blkdebug.c +++ b/block/blkdebug.c @@ -397,11 +397,6 @@ static void blkdebug_close(BlockDriverState *bs) } } -static int blkdebug_flush(BlockDriverState *bs) -{ - return bdrv_flush(bs->file); -} - static BlockDriverAIOCB *blkdebug_aio_flush(BlockDriverState *bs, BlockDriverCompletionFunc *cb, void *opaque) { @@ -454,7 +449,6 @@ static BlockDriver bdrv_blkdebug = { .bdrv_file_open = blkdebug_open, .bdrv_close = blkdebug_close, - .bdrv_flush = blkdebug_flush, .bdrv_aio_readv = blkdebug_aio_readv, .bdrv_aio_writev = blkdebug_aio_writev, diff --git a/block/blkverify.c b/block/blkverify.c index c7522b4093..483f3b3cfe 100644 --- a/block/blkverify.c +++ b/block/blkverify.c @@ -116,14 +116,6 @@ static void blkverify_close(BlockDriverState *bs) s->test_file = NULL; } -static int blkverify_flush(BlockDriverState *bs) -{ - BDRVBlkverifyState *s = bs->opaque; - - /* Only flush test file, the raw file is not important */ - return bdrv_flush(s->test_file); -} - static int64_t blkverify_getlength(BlockDriverState *bs) { BDRVBlkverifyState *s = bs->opaque; @@ -368,7 +360,6 @@ static BlockDriver bdrv_blkverify = { .bdrv_file_open = blkverify_open, .bdrv_close = blkverify_close, - .bdrv_flush = blkverify_flush, .bdrv_aio_readv = blkverify_aio_readv, .bdrv_aio_writev = blkverify_aio_writev, diff --git a/block/bochs.c b/block/bochs.c index 3c2f8d1b12..ab7944dc43 100644 --- a/block/bochs.c +++ b/block/bochs.c @@ -80,6 +80,7 @@ struct bochs_header { }; typedef struct BDRVBochsState { + CoMutex lock; uint32_t *catalog_bitmap; int catalog_size; @@ -150,6 +151,7 @@ static int bochs_open(BlockDriverState *bs, int flags) s->extent_size = le32_to_cpu(bochs.extra.redolog.extent); + qemu_co_mutex_init(&s->lock); return 0; fail: return -1; @@ -207,6 +209,17 @@ static int bochs_read(BlockDriverState *bs, int64_t sector_num, return 0; } +static coroutine_fn int bochs_co_read(BlockDriverState *bs, int64_t sector_num, + uint8_t *buf, int nb_sectors) +{ + int ret; + BDRVBochsState *s = bs->opaque; + qemu_co_mutex_lock(&s->lock); + ret = bochs_read(bs, sector_num, buf, nb_sectors); + qemu_co_mutex_unlock(&s->lock); + return ret; +} + static void bochs_close(BlockDriverState *bs) { BDRVBochsState *s = bs->opaque; @@ -218,7 +231,7 @@ static BlockDriver bdrv_bochs = { .instance_size = sizeof(BDRVBochsState), .bdrv_probe = bochs_probe, .bdrv_open = bochs_open, - .bdrv_read = bochs_read, + .bdrv_read = bochs_co_read, .bdrv_close = bochs_close, }; diff --git a/block/cloop.c b/block/cloop.c index 8cff9f2cac..775f8a98e1 100644 --- a/block/cloop.c +++ b/block/cloop.c @@ -27,6 +27,7 @@ #include <zlib.h> typedef struct BDRVCloopState { + CoMutex lock; uint32_t block_size; uint32_t n_blocks; uint64_t* offsets; @@ -93,6 +94,7 @@ static int cloop_open(BlockDriverState *bs, int flags) s->sectors_per_block = s->block_size/512; bs->total_sectors = s->n_blocks*s->sectors_per_block; + qemu_co_mutex_init(&s->lock); return 0; cloop_close: @@ -144,6 +146,17 @@ static int cloop_read(BlockDriverState *bs, int64_t sector_num, return 0; } +static coroutine_fn int cloop_co_read(BlockDriverState *bs, int64_t sector_num, + uint8_t *buf, int nb_sectors) +{ + int ret; + BDRVCloopState *s = bs->opaque; + qemu_co_mutex_lock(&s->lock); + ret = cloop_read(bs, sector_num, buf, nb_sectors); + qemu_co_mutex_unlock(&s->lock); + return ret; +} + static void cloop_close(BlockDriverState *bs) { BDRVCloopState *s = bs->opaque; @@ -159,7 +172,7 @@ static BlockDriver bdrv_cloop = { .instance_size = sizeof(BDRVCloopState), .bdrv_probe = cloop_probe, .bdrv_open = cloop_open, - .bdrv_read = cloop_read, + .bdrv_read = cloop_co_read, .bdrv_close = cloop_close, }; diff --git a/block/cow.c b/block/cow.c index 4cf543c832..707c0aad88 100644 --- a/block/cow.c +++ b/block/cow.c @@ -42,6 +42,7 @@ struct cow_header_v2 { }; typedef struct BDRVCowState { + CoMutex lock; int64_t cow_sectors_offset; } BDRVCowState; @@ -84,6 +85,7 @@ static int cow_open(BlockDriverState *bs, int flags) bitmap_size = ((bs->total_sectors + 7) >> 3) + sizeof(cow_header); s->cow_sectors_offset = (bitmap_size + 511) & ~511; + qemu_co_mutex_init(&s->lock); return 0; fail: return -1; @@ -199,6 +201,17 @@ static int cow_read(BlockDriverState *bs, int64_t sector_num, return 0; } +static coroutine_fn int cow_co_read(BlockDriverState *bs, int64_t sector_num, + uint8_t *buf, int nb_sectors) +{ + int ret; + BDRVCowState *s = bs->opaque; + qemu_co_mutex_lock(&s->lock); + ret = cow_read(bs, sector_num, buf, nb_sectors); + qemu_co_mutex_unlock(&s->lock); + return ret; +} + static int cow_write(BlockDriverState *bs, int64_t sector_num, const uint8_t *buf, int nb_sectors) { @@ -213,6 +226,17 @@ static int cow_write(BlockDriverState *bs, int64_t sector_num, return cow_update_bitmap(bs, sector_num, nb_sectors); } +static coroutine_fn int cow_co_write(BlockDriverState *bs, int64_t sector_num, + const uint8_t *buf, int nb_sectors) +{ + int ret; + BDRVCowState *s = bs->opaque; + qemu_co_mutex_lock(&s->lock); + ret = cow_write(bs, sector_num, buf, nb_sectors); + qemu_co_mutex_unlock(&s->lock); + return ret; +} + static void cow_close(BlockDriverState *bs) { } @@ -282,9 +306,9 @@ exit: return ret; } -static int cow_flush(BlockDriverState *bs) +static coroutine_fn int cow_co_flush(BlockDriverState *bs) { - return bdrv_flush(bs->file); + return bdrv_co_flush(bs->file); } static QEMUOptionParameter cow_create_options[] = { @@ -306,11 +330,11 @@ static BlockDriver bdrv_cow = { .instance_size = sizeof(BDRVCowState), .bdrv_probe = cow_probe, .bdrv_open = cow_open, - .bdrv_read = cow_read, - .bdrv_write = cow_write, + .bdrv_read = cow_co_read, + .bdrv_write = cow_co_write, .bdrv_close = cow_close, .bdrv_create = cow_create, - .bdrv_flush = cow_flush, + .bdrv_co_flush = cow_co_flush, .bdrv_is_allocated = cow_is_allocated, .create_options = cow_create_options, diff --git a/block/dmg.c b/block/dmg.c index 64c3cce46a..37902a4347 100644 --- a/block/dmg.c +++ b/block/dmg.c @@ -28,6 +28,7 @@ #include <zlib.h> typedef struct BDRVDMGState { + CoMutex lock; /* each chunk contains a certain number of sectors, * offsets[i] is the offset in the .dmg file, * lengths[i] is the length of the compressed chunk, @@ -177,6 +178,7 @@ static int dmg_open(BlockDriverState *bs, int flags) s->current_chunk = s->n_chunks; + qemu_co_mutex_init(&s->lock); return 0; fail: return -1; @@ -280,6 +282,17 @@ static int dmg_read(BlockDriverState *bs, int64_t sector_num, return 0; } +static coroutine_fn int dmg_co_read(BlockDriverState *bs, int64_t sector_num, + uint8_t *buf, int nb_sectors) +{ + int ret; + BDRVDMGState *s = bs->opaque; + qemu_co_mutex_lock(&s->lock); + ret = dmg_read(bs, sector_num, buf, nb_sectors); + qemu_co_mutex_unlock(&s->lock); + return ret; +} + static void dmg_close(BlockDriverState *bs) { BDRVDMGState *s = bs->opaque; @@ -300,7 +313,7 @@ static BlockDriver bdrv_dmg = { .instance_size = sizeof(BDRVDMGState), .bdrv_probe = dmg_probe, .bdrv_open = dmg_open, - .bdrv_read = dmg_read, + .bdrv_read = dmg_co_read, .bdrv_close = dmg_close, }; diff --git a/block/nbd.c b/block/nbd.c index 76f04d863c..882b2dc84a 100644 --- a/block/nbd.c +++ b/block/nbd.c @@ -47,6 +47,7 @@ #endif typedef struct BDRVNBDState { + CoMutex lock; int sock; uint32_t nbdflags; off_t size; @@ -175,6 +176,7 @@ static int nbd_open(BlockDriverState *bs, const char* filename, int flags) */ result = nbd_establish_connection(bs); + qemu_co_mutex_init(&s->lock); return result; } @@ -238,6 +240,28 @@ static int nbd_write(BlockDriverState *bs, int64_t sector_num, return 0; } +static coroutine_fn int nbd_co_read(BlockDriverState *bs, int64_t sector_num, + uint8_t *buf, int nb_sectors) +{ + int ret; + BDRVNBDState *s = bs->opaque; + qemu_co_mutex_lock(&s->lock); + ret = nbd_read(bs, sector_num, buf, nb_sectors); + qemu_co_mutex_unlock(&s->lock); + return ret; +} + +static coroutine_fn int nbd_co_write(BlockDriverState *bs, int64_t sector_num, + const uint8_t *buf, int nb_sectors) +{ + int ret; + BDRVNBDState *s = bs->opaque; + qemu_co_mutex_lock(&s->lock); + ret = nbd_write(bs, sector_num, buf, nb_sectors); + qemu_co_mutex_unlock(&s->lock); + return ret; +} + static void nbd_close(BlockDriverState *bs) { BDRVNBDState *s = bs->opaque; @@ -258,8 +282,8 @@ static BlockDriver bdrv_nbd = { .format_name = "nbd", .instance_size = sizeof(BDRVNBDState), .bdrv_file_open = nbd_open, - .bdrv_read = nbd_read, - .bdrv_write = nbd_write, + .bdrv_read = nbd_co_read, + .bdrv_write = nbd_co_write, .bdrv_close = nbd_close, .bdrv_getlength = nbd_getlength, .protocol_name = "nbd", diff --git a/block/parallels.c b/block/parallels.c index c64103ddbb..d30f0ecf77 100644 --- a/block/parallels.c +++ b/block/parallels.c @@ -46,6 +46,7 @@ struct parallels_header { } QEMU_PACKED; typedef struct BDRVParallelsState { + CoMutex lock; uint32_t *catalog_bitmap; int catalog_size; @@ -95,6 +96,7 @@ static int parallels_open(BlockDriverState *bs, int flags) for (i = 0; i < s->catalog_size; i++) le32_to_cpus(&s->catalog_bitmap[i]); + qemu_co_mutex_init(&s->lock); return 0; fail: if (s->catalog_bitmap) @@ -134,6 +136,17 @@ static int parallels_read(BlockDriverState *bs, int64_t sector_num, return 0; } +static coroutine_fn int parallels_co_read(BlockDriverState *bs, int64_t sector_num, + uint8_t *buf, int nb_sectors) +{ + int ret; + BDRVParallelsState *s = bs->opaque; + qemu_co_mutex_lock(&s->lock); + ret = parallels_read(bs, sector_num, buf, nb_sectors); + qemu_co_mutex_unlock(&s->lock); + return ret; +} + static void parallels_close(BlockDriverState *bs) { BDRVParallelsState *s = bs->opaque; @@ -145,7 +158,7 @@ static BlockDriver bdrv_parallels = { .instance_size = sizeof(BDRVParallelsState), .bdrv_probe = parallels_probe, .bdrv_open = parallels_open, - .bdrv_read = parallels_read, + .bdrv_read = parallels_co_read, .bdrv_close = parallels_close, }; diff --git a/block/qcow.c b/block/qcow.c index eba5a04c44..ab36b2995c 100644 --- a/block/qcow.c +++ b/block/qcow.c @@ -781,15 +781,9 @@ static int qcow_write_compressed(BlockDriverState *bs, int64_t sector_num, return 0; } -static int qcow_flush(BlockDriverState *bs) +static coroutine_fn int qcow_co_flush(BlockDriverState *bs) { - return bdrv_flush(bs->file); -} - -static BlockDriverAIOCB *qcow_aio_flush(BlockDriverState *bs, - BlockDriverCompletionFunc *cb, void *opaque) -{ - return bdrv_aio_flush(bs->file, cb, opaque); + return bdrv_co_flush(bs->file); } static int qcow_get_info(BlockDriverState *bs, BlockDriverInfo *bdi) @@ -826,13 +820,12 @@ static BlockDriver bdrv_qcow = { .bdrv_open = qcow_open, .bdrv_close = qcow_close, .bdrv_create = qcow_create, - .bdrv_flush = qcow_flush, .bdrv_is_allocated = qcow_is_allocated, .bdrv_set_key = qcow_set_key, .bdrv_make_empty = qcow_make_empty, - .bdrv_co_readv = qcow_co_readv, - .bdrv_co_writev = qcow_co_writev, - .bdrv_aio_flush = qcow_aio_flush, + .bdrv_co_readv = qcow_co_readv, + .bdrv_co_writev = qcow_co_writev, + .bdrv_co_flush = qcow_co_flush, .bdrv_write_compressed = qcow_write_compressed, .bdrv_get_info = qcow_get_info, diff --git a/block/qcow2-cluster.c b/block/qcow2-cluster.c index 2f76311354..f4e049fa90 100644 --- a/block/qcow2-cluster.c +++ b/block/qcow2-cluster.c @@ -568,8 +568,10 @@ uint64_t qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs, } cluster_offset = be64_to_cpu(l2_table[l2_index]); - if (cluster_offset & QCOW_OFLAG_COPIED) - return cluster_offset & ~QCOW_OFLAG_COPIED; + if (cluster_offset & QCOW_OFLAG_COPIED) { + qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); + return 0; + } if (cluster_offset) qcow2_free_any_clusters(bs, cluster_offset, 1); diff --git a/block/qcow2.c b/block/qcow2.c index 510ff6897f..a181932b67 100644 --- a/block/qcow2.c +++ b/block/qcow2.c @@ -978,11 +978,17 @@ static int qcow2_make_empty(BlockDriverState *bs) return 0; } -static int qcow2_discard(BlockDriverState *bs, int64_t sector_num, - int nb_sectors) +static coroutine_fn int qcow2_co_discard(BlockDriverState *bs, + int64_t sector_num, int nb_sectors) { - return qcow2_discard_clusters(bs, sector_num << BDRV_SECTOR_BITS, + int ret; + BDRVQcowState *s = bs->opaque; + + qemu_co_mutex_lock(&s->lock); + ret = qcow2_discard_clusters(bs, sector_num << BDRV_SECTOR_BITS, nb_sectors); + qemu_co_mutex_unlock(&s->lock); + return ret; } static int qcow2_truncate(BlockDriverState *bs, int64_t offset) @@ -1053,8 +1059,8 @@ static int qcow2_write_compressed(BlockDriverState *bs, int64_t sector_num, Z_DEFLATED, -12, 9, Z_DEFAULT_STRATEGY); if (ret != 0) { - g_free(out_buf); - return -1; + ret = -EINVAL; + goto fail; } strm.avail_in = s->cluster_size; @@ -1064,9 +1070,9 @@ static int qcow2_write_compressed(BlockDriverState *bs, int64_t sector_num, ret = deflate(&strm, Z_FINISH); if (ret != Z_STREAM_END && ret != Z_OK) { - g_free(out_buf); deflateEnd(&strm); - return -1; + ret = -EINVAL; + goto fail; } out_len = strm.next_out - out_buf; @@ -1074,29 +1080,37 @@ static int qcow2_write_compressed(BlockDriverState *bs, int64_t sector_num, if (ret != Z_STREAM_END || out_len >= s->cluster_size) { /* could not compress: write normal cluster */ - bdrv_write(bs, sector_num, buf, s->cluster_sectors); + ret = bdrv_write(bs, sector_num, buf, s->cluster_sectors); + if (ret < 0) { + goto fail; + } } else { cluster_offset = qcow2_alloc_compressed_cluster_offset(bs, sector_num << 9, out_len); - if (!cluster_offset) - return -1; + if (!cluster_offset) { + ret = -EIO; + goto fail; + } cluster_offset &= s->cluster_offset_mask; BLKDBG_EVENT(bs->file, BLKDBG_WRITE_COMPRESSED); - if (bdrv_pwrite(bs->file, cluster_offset, out_buf, out_len) != out_len) { - g_free(out_buf); - return -1; + ret = bdrv_pwrite(bs->file, cluster_offset, out_buf, out_len); + if (ret < 0) { + goto fail; } } + ret = 0; +fail: g_free(out_buf); - return 0; + return ret; } -static int qcow2_flush(BlockDriverState *bs) +static int qcow2_co_flush(BlockDriverState *bs) { BDRVQcowState *s = bs->opaque; int ret; + qemu_co_mutex_lock(&s->lock); ret = qcow2_cache_flush(bs, s->l2_table_cache); if (ret < 0) { return ret; @@ -1106,28 +1120,9 @@ static int qcow2_flush(BlockDriverState *bs) if (ret < 0) { return ret; } + qemu_co_mutex_unlock(&s->lock); - return bdrv_flush(bs->file); -} - -static BlockDriverAIOCB *qcow2_aio_flush(BlockDriverState *bs, - BlockDriverCompletionFunc *cb, - void *opaque) -{ - BDRVQcowState *s = bs->opaque; - int ret; - - ret = qcow2_cache_flush(bs, s->l2_table_cache); - if (ret < 0) { - return NULL; - } - - ret = qcow2_cache_flush(bs, s->refcount_block_cache); - if (ret < 0) { - return NULL; - } - - return bdrv_aio_flush(bs->file, cb, opaque); + return bdrv_co_flush(bs->file); } static int64_t qcow2_vm_state_offset(BDRVQcowState *s) @@ -1242,16 +1237,15 @@ static BlockDriver bdrv_qcow2 = { .bdrv_open = qcow2_open, .bdrv_close = qcow2_close, .bdrv_create = qcow2_create, - .bdrv_flush = qcow2_flush, .bdrv_is_allocated = qcow2_is_allocated, .bdrv_set_key = qcow2_set_key, .bdrv_make_empty = qcow2_make_empty, .bdrv_co_readv = qcow2_co_readv, .bdrv_co_writev = qcow2_co_writev, - .bdrv_aio_flush = qcow2_aio_flush, + .bdrv_co_flush = qcow2_co_flush, - .bdrv_discard = qcow2_discard, + .bdrv_co_discard = qcow2_co_discard, .bdrv_truncate = qcow2_truncate, .bdrv_write_compressed = qcow2_write_compressed, diff --git a/block/qed.c b/block/qed.c index e6720dbd6f..d032a4574c 100644 --- a/block/qed.c +++ b/block/qed.c @@ -532,11 +532,6 @@ static void bdrv_qed_close(BlockDriverState *bs) qemu_vfree(s->l1_table); } -static int bdrv_qed_flush(BlockDriverState *bs) -{ - return bdrv_flush(bs->file); -} - static int qed_create(const char *filename, uint32_t cluster_size, uint64_t image_size, uint32_t table_size, const char *backing_file, const char *backing_fmt) @@ -1480,7 +1475,6 @@ static BlockDriver bdrv_qed = { .bdrv_open = bdrv_qed_open, .bdrv_close = bdrv_qed_close, .bdrv_create = bdrv_qed_create, - .bdrv_flush = bdrv_qed_flush, .bdrv_is_allocated = bdrv_qed_is_allocated, .bdrv_make_empty = bdrv_qed_make_empty, .bdrv_aio_readv = bdrv_qed_aio_readv, diff --git a/block/raw-posix.c b/block/raw-posix.c index c7f5544edd..a3de373586 100644 --- a/block/raw-posix.c +++ b/block/raw-posix.c @@ -583,19 +583,6 @@ static int raw_create(const char *filename, QEMUOptionParameter *options) return result; } -static int raw_flush(BlockDriverState *bs) -{ - BDRVRawState *s = bs->opaque; - int ret; - - ret = qemu_fdatasync(s->fd); - if (ret < 0) { - return -errno; - } - - return 0; -} - #ifdef CONFIG_XFS static int xfs_discard(BDRVRawState *s, int64_t sector_num, int nb_sectors) { @@ -615,7 +602,8 @@ static int xfs_discard(BDRVRawState *s, int64_t sector_num, int nb_sectors) } #endif -static int raw_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors) +static coroutine_fn int raw_co_discard(BlockDriverState *bs, + int64_t sector_num, int nb_sectors) { #ifdef CONFIG_XFS BDRVRawState *s = bs->opaque; @@ -645,8 +633,7 @@ static BlockDriver bdrv_file = { .bdrv_file_open = raw_open, .bdrv_close = raw_close, .bdrv_create = raw_create, - .bdrv_flush = raw_flush, - .bdrv_discard = raw_discard, + .bdrv_co_discard = raw_co_discard, .bdrv_aio_readv = raw_aio_readv, .bdrv_aio_writev = raw_aio_writev, @@ -915,7 +902,6 @@ static BlockDriver bdrv_host_device = { .bdrv_create = hdev_create, .create_options = raw_create_options, .bdrv_has_zero_init = hdev_has_zero_init, - .bdrv_flush = raw_flush, .bdrv_aio_readv = raw_aio_readv, .bdrv_aio_writev = raw_aio_writev, @@ -1035,7 +1021,6 @@ static BlockDriver bdrv_host_floppy = { .bdrv_create = hdev_create, .create_options = raw_create_options, .bdrv_has_zero_init = hdev_has_zero_init, - .bdrv_flush = raw_flush, .bdrv_aio_readv = raw_aio_readv, .bdrv_aio_writev = raw_aio_writev, @@ -1135,7 +1120,6 @@ static BlockDriver bdrv_host_cdrom = { .bdrv_create = hdev_create, .create_options = raw_create_options, .bdrv_has_zero_init = hdev_has_zero_init, - .bdrv_flush = raw_flush, .bdrv_aio_readv = raw_aio_readv, .bdrv_aio_writev = raw_aio_writev, @@ -1255,7 +1239,6 @@ static BlockDriver bdrv_host_cdrom = { .bdrv_create = hdev_create, .create_options = raw_create_options, .bdrv_has_zero_init = hdev_has_zero_init, - .bdrv_flush = raw_flush, .bdrv_aio_readv = raw_aio_readv, .bdrv_aio_writev = raw_aio_writev, diff --git a/block/raw-win32.c b/block/raw-win32.c index b7dd357c6d..f5f73bcd64 100644 --- a/block/raw-win32.c +++ b/block/raw-win32.c @@ -281,7 +281,7 @@ static BlockDriver bdrv_file = { .bdrv_file_open = raw_open, .bdrv_close = raw_close, .bdrv_create = raw_create, - .bdrv_flush = raw_flush, + .bdrv_co_flush = raw_flush, .bdrv_read = raw_read, .bdrv_write = raw_write, .bdrv_truncate = raw_truncate, @@ -409,7 +409,7 @@ static BlockDriver bdrv_host_device = { .bdrv_probe_device = hdev_probe_device, .bdrv_file_open = hdev_open, .bdrv_close = raw_close, - .bdrv_flush = raw_flush, + .bdrv_co_flush = raw_flush, .bdrv_has_zero_init = hdev_has_zero_init, .bdrv_read = raw_read, diff --git a/block/raw.c b/block/raw.c index 5ca606b68a..33cc4716d3 100644 --- a/block/raw.c +++ b/block/raw.c @@ -25,15 +25,9 @@ static void raw_close(BlockDriverState *bs) { } -static int raw_flush(BlockDriverState *bs) +static int coroutine_fn raw_co_flush(BlockDriverState *bs) { - return bdrv_flush(bs->file); -} - -static BlockDriverAIOCB *raw_aio_flush(BlockDriverState *bs, - BlockDriverCompletionFunc *cb, void *opaque) -{ - return bdrv_aio_flush(bs->file, cb, opaque); + return bdrv_co_flush(bs->file); } static int64_t raw_getlength(BlockDriverState *bs) @@ -51,9 +45,10 @@ static int raw_probe(const uint8_t *buf, int buf_size, const char *filename) return 1; /* everything can be opened as raw image */ } -static int raw_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors) +static int coroutine_fn raw_co_discard(BlockDriverState *bs, + int64_t sector_num, int nb_sectors) { - return bdrv_discard(bs->file, sector_num, nb_sectors); + return bdrv_co_discard(bs->file, sector_num, nb_sectors); } static int raw_is_inserted(BlockDriverState *bs) @@ -115,16 +110,16 @@ static BlockDriver bdrv_raw = { .bdrv_open = raw_open, .bdrv_close = raw_close, + .bdrv_co_readv = raw_co_readv, .bdrv_co_writev = raw_co_writev, - .bdrv_flush = raw_flush, + .bdrv_co_flush = raw_co_flush, + .bdrv_co_discard = raw_co_discard, + .bdrv_probe = raw_probe, .bdrv_getlength = raw_getlength, .bdrv_truncate = raw_truncate, - .bdrv_aio_flush = raw_aio_flush, - .bdrv_discard = raw_discard, - .bdrv_is_inserted = raw_is_inserted, .bdrv_media_changed = raw_media_changed, .bdrv_eject = raw_eject, diff --git a/block/rbd.c b/block/rbd.c index 3068c829fe..c684e0cb0b 100644 --- a/block/rbd.c +++ b/block/rbd.c @@ -705,7 +705,7 @@ static BlockDriverAIOCB *qemu_rbd_aio_writev(BlockDriverState *bs, return rbd_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 1); } -static int qemu_rbd_flush(BlockDriverState *bs) +static int qemu_rbd_co_flush(BlockDriverState *bs) { #if LIBRBD_VERSION_CODE >= LIBRBD_VERSION(0, 1, 1) /* rbd_flush added in 0.1.1 */ @@ -851,7 +851,7 @@ static BlockDriver bdrv_rbd = { .bdrv_file_open = qemu_rbd_open, .bdrv_close = qemu_rbd_close, .bdrv_create = qemu_rbd_create, - .bdrv_flush = qemu_rbd_flush, + .bdrv_co_flush = qemu_rbd_co_flush, .bdrv_get_info = qemu_rbd_getinfo, .create_options = qemu_rbd_create_options, .bdrv_getlength = qemu_rbd_getlength, diff --git a/block/sheepdog.c b/block/sheepdog.c index ae857e294c..9f8060960f 100644 --- a/block/sheepdog.c +++ b/block/sheepdog.c @@ -396,7 +396,7 @@ static inline int free_aio_req(BDRVSheepdogState *s, AIOReq *aio_req) return !QLIST_EMPTY(&acb->aioreq_head); } -static void sd_finish_aiocb(SheepdogAIOCB *acb) +static void coroutine_fn sd_finish_aiocb(SheepdogAIOCB *acb) { if (!acb->canceled) { qemu_coroutine_enter(acb->coroutine, NULL); @@ -735,7 +735,7 @@ out: return ret; } -static int add_aio_request(BDRVSheepdogState *s, AIOReq *aio_req, +static int coroutine_fn add_aio_request(BDRVSheepdogState *s, AIOReq *aio_req, struct iovec *iov, int niov, int create, enum AIOCBState aiocb_type); @@ -743,7 +743,7 @@ static int add_aio_request(BDRVSheepdogState *s, AIOReq *aio_req, * This function searchs pending requests to the object `oid', and * sends them. */ -static void send_pending_req(BDRVSheepdogState *s, uint64_t oid, uint32_t id) +static void coroutine_fn send_pending_req(BDRVSheepdogState *s, uint64_t oid, uint32_t id) { AIOReq *aio_req, *next; SheepdogAIOCB *acb; @@ -777,7 +777,7 @@ static void send_pending_req(BDRVSheepdogState *s, uint64_t oid, uint32_t id) * This function is registered as a fd handler, and called from the * main loop when s->fd is ready for reading responses. */ -static void aio_read_response(void *opaque) +static void coroutine_fn aio_read_response(void *opaque) { SheepdogObjRsp rsp; BDRVSheepdogState *s = opaque; @@ -1064,7 +1064,7 @@ out: return ret; } -static int add_aio_request(BDRVSheepdogState *s, AIOReq *aio_req, +static int coroutine_fn add_aio_request(BDRVSheepdogState *s, AIOReq *aio_req, struct iovec *iov, int niov, int create, enum AIOCBState aiocb_type) { @@ -1517,7 +1517,7 @@ static int sd_truncate(BlockDriverState *bs, int64_t offset) * update metadata, this sends a write request to the vdi object. * Otherwise, this switches back to sd_co_readv/writev. */ -static void sd_write_done(SheepdogAIOCB *acb) +static void coroutine_fn sd_write_done(SheepdogAIOCB *acb) { int ret; BDRVSheepdogState *s = acb->common.bs->opaque; @@ -1615,7 +1615,7 @@ out: * Returns 1 when we need to wait a response, 0 when there is no sent * request and -errno in error cases. */ -static int sd_co_rw_vector(void *p) +static int coroutine_fn sd_co_rw_vector(void *p) { SheepdogAIOCB *acb = p; int ret = 0; diff --git a/block/vdi.c b/block/vdi.c index 1d5ad2bf49..883046d5a2 100644 --- a/block/vdi.c +++ b/block/vdi.c @@ -936,10 +936,10 @@ static void vdi_close(BlockDriverState *bs) { } -static int vdi_flush(BlockDriverState *bs) +static coroutine_fn int vdi_co_flush(BlockDriverState *bs) { logout("\n"); - return bdrv_flush(bs->file); + return bdrv_co_flush(bs->file); } @@ -975,7 +975,7 @@ static BlockDriver bdrv_vdi = { .bdrv_open = vdi_open, .bdrv_close = vdi_close, .bdrv_create = vdi_create, - .bdrv_flush = vdi_flush, + .bdrv_co_flush = vdi_co_flush, .bdrv_is_allocated = vdi_is_allocated, .bdrv_make_empty = vdi_make_empty, diff --git a/block/vmdk.c b/block/vmdk.c index a75dcc2b88..e93828eeae 100644 --- a/block/vmdk.c +++ b/block/vmdk.c @@ -90,6 +90,7 @@ typedef struct VmdkExtent { } VmdkExtent; typedef struct BDRVVmdkState { + CoMutex lock; int desc_offset; bool cid_updated; uint32_t parent_cid; @@ -283,10 +284,12 @@ static int vmdk_parent_open(BlockDriverState *bs) char *p_name; char desc[DESC_SIZE + 1]; BDRVVmdkState *s = bs->opaque; + int ret; desc[DESC_SIZE] = '\0'; - if (bdrv_pread(bs->file, s->desc_offset, desc, DESC_SIZE) != DESC_SIZE) { - return -1; + ret = bdrv_pread(bs->file, s->desc_offset, desc, DESC_SIZE); + if (ret < 0) { + return ret; } p_name = strstr(desc, "parentFileNameHint"); @@ -296,10 +299,10 @@ static int vmdk_parent_open(BlockDriverState *bs) p_name += sizeof("parentFileNameHint") + 1; end_name = strchr(p_name, '\"'); if (end_name == NULL) { - return -1; + return -EINVAL; } if ((end_name - p_name) > sizeof(bs->backing_file) - 1) { - return -1; + return -EINVAL; } pstrcpy(bs->backing_file, end_name - p_name + 1, p_name); @@ -622,19 +625,7 @@ static int vmdk_open_desc_file(BlockDriverState *bs, int flags, return -ENOTSUP; } s->desc_offset = 0; - ret = vmdk_parse_extents(buf, bs, bs->file->filename); - if (ret) { - vmdk_free_extents(bs); - return ret; - } - - /* try to open parent images, if exist */ - if (vmdk_parent_open(bs)) { - vmdk_free_extents(bs); - return -EINVAL; - } - s->parent_cid = vmdk_read_cid(bs, 1); - return 0; + return vmdk_parse_extents(buf, bs, bs->file->filename); } static int vmdk_open(BlockDriverState *bs, int flags) @@ -644,17 +635,24 @@ static int vmdk_open(BlockDriverState *bs, int flags) if (vmdk_open_sparse(bs, bs->file, flags) == 0) { s->desc_offset = 0x200; - /* try to open parent images, if exist */ - ret = vmdk_parent_open(bs); + } else { + ret = vmdk_open_desc_file(bs, flags, 0); if (ret) { - vmdk_free_extents(bs); - return ret; + goto fail; } - s->parent_cid = vmdk_read_cid(bs, 1); - return 0; - } else { - return vmdk_open_desc_file(bs, flags, 0); } + /* try to open parent images, if exist */ + ret = vmdk_parent_open(bs); + if (ret) { + goto fail; + } + s->parent_cid = vmdk_read_cid(bs, 1); + qemu_co_mutex_init(&s->lock); + return ret; + +fail: + vmdk_free_extents(bs); + return ret; } static int get_whole_cluster(BlockDriverState *bs, @@ -1026,6 +1024,17 @@ static int vmdk_read(BlockDriverState *bs, int64_t sector_num, return 0; } +static coroutine_fn int vmdk_co_read(BlockDriverState *bs, int64_t sector_num, + uint8_t *buf, int nb_sectors) +{ + int ret; + BDRVVmdkState *s = bs->opaque; + qemu_co_mutex_lock(&s->lock); + ret = vmdk_read(bs, sector_num, buf, nb_sectors); + qemu_co_mutex_unlock(&s->lock); + return ret; +} + static int vmdk_write(BlockDriverState *bs, int64_t sector_num, const uint8_t *buf, int nb_sectors) { @@ -1107,6 +1116,17 @@ static int vmdk_write(BlockDriverState *bs, int64_t sector_num, return 0; } +static coroutine_fn int vmdk_co_write(BlockDriverState *bs, int64_t sector_num, + const uint8_t *buf, int nb_sectors) +{ + int ret; + BDRVVmdkState *s = bs->opaque; + qemu_co_mutex_lock(&s->lock); + ret = vmdk_write(bs, sector_num, buf, nb_sectors); + qemu_co_mutex_unlock(&s->lock); + return ret; +} + static int vmdk_create_extent(const char *filename, int64_t filesize, bool flat, bool compress) @@ -1473,14 +1493,14 @@ static void vmdk_close(BlockDriverState *bs) vmdk_free_extents(bs); } -static int vmdk_flush(BlockDriverState *bs) +static coroutine_fn int vmdk_co_flush(BlockDriverState *bs) { int i, ret, err; BDRVVmdkState *s = bs->opaque; - ret = bdrv_flush(bs->file); + ret = bdrv_co_flush(bs->file); for (i = 0; i < s->num_extents; i++) { - err = bdrv_flush(s->extents[i].file); + err = bdrv_co_flush(s->extents[i].file); if (err < 0) { ret = err; } @@ -1543,11 +1563,11 @@ static BlockDriver bdrv_vmdk = { .instance_size = sizeof(BDRVVmdkState), .bdrv_probe = vmdk_probe, .bdrv_open = vmdk_open, - .bdrv_read = vmdk_read, - .bdrv_write = vmdk_write, + .bdrv_read = vmdk_co_read, + .bdrv_write = vmdk_co_write, .bdrv_close = vmdk_close, .bdrv_create = vmdk_create, - .bdrv_flush = vmdk_flush, + .bdrv_co_flush = vmdk_co_flush, .bdrv_is_allocated = vmdk_is_allocated, .bdrv_get_allocated_file_size = vmdk_get_allocated_file_size, diff --git a/block/vpc.c b/block/vpc.c index cb6c570f44..79be7d051b 100644 --- a/block/vpc.c +++ b/block/vpc.c @@ -110,6 +110,7 @@ struct vhd_dyndisk_header { }; typedef struct BDRVVPCState { + CoMutex lock; uint8_t footer_buf[HEADER_SIZE]; uint64_t free_data_block_offset; int max_table_entries; @@ -226,6 +227,7 @@ static int vpc_open(BlockDriverState *bs, int flags) s->last_pagetable = -1; #endif + qemu_co_mutex_init(&s->lock); return 0; fail: return err; @@ -407,6 +409,17 @@ static int vpc_read(BlockDriverState *bs, int64_t sector_num, return 0; } +static coroutine_fn int vpc_co_read(BlockDriverState *bs, int64_t sector_num, + uint8_t *buf, int nb_sectors) +{ + int ret; + BDRVVPCState *s = bs->opaque; + qemu_co_mutex_lock(&s->lock); + ret = vpc_read(bs, sector_num, buf, nb_sectors); + qemu_co_mutex_unlock(&s->lock); + return ret; +} + static int vpc_write(BlockDriverState *bs, int64_t sector_num, const uint8_t *buf, int nb_sectors) { @@ -443,9 +456,20 @@ static int vpc_write(BlockDriverState *bs, int64_t sector_num, return 0; } -static int vpc_flush(BlockDriverState *bs) +static coroutine_fn int vpc_co_write(BlockDriverState *bs, int64_t sector_num, + const uint8_t *buf, int nb_sectors) +{ + int ret; + BDRVVPCState *s = bs->opaque; + qemu_co_mutex_lock(&s->lock); + ret = vpc_write(bs, sector_num, buf, nb_sectors); + qemu_co_mutex_unlock(&s->lock); + return ret; +} + +static coroutine_fn int vpc_co_flush(BlockDriverState *bs) { - return bdrv_flush(bs->file); + return bdrv_co_flush(bs->file); } /* @@ -639,9 +663,9 @@ static BlockDriver bdrv_vpc = { .instance_size = sizeof(BDRVVPCState), .bdrv_probe = vpc_probe, .bdrv_open = vpc_open, - .bdrv_read = vpc_read, - .bdrv_write = vpc_write, - .bdrv_flush = vpc_flush, + .bdrv_read = vpc_co_read, + .bdrv_write = vpc_co_write, + .bdrv_co_flush = vpc_co_flush, .bdrv_close = vpc_close, .bdrv_create = vpc_create, diff --git a/block/vvfat.c b/block/vvfat.c index 7e9e35a3a3..e1fcdbc45b 100644 --- a/block/vvfat.c +++ b/block/vvfat.c @@ -317,6 +317,7 @@ static void print_mapping(const struct mapping_t* mapping); /* here begins the real VVFAT driver */ typedef struct BDRVVVFATState { + CoMutex lock; BlockDriverState* bs; /* pointer to parent */ unsigned int first_sectors_number; /* 1 for a single partition, 0x40 for a disk with partition table */ unsigned char first_sectors[0x40*0x200]; @@ -1065,6 +1066,7 @@ DLOG(if (stderr == NULL) { bs->heads = bs->cyls = bs->secs = 0; // assert(is_consistent(s)); + qemu_co_mutex_init(&s->lock); return 0; } @@ -1279,6 +1281,17 @@ DLOG(fprintf(stderr, "sector %d not allocated\n", (int)sector_num)); return 0; } +static coroutine_fn int vvfat_co_read(BlockDriverState *bs, int64_t sector_num, + uint8_t *buf, int nb_sectors) +{ + int ret; + BDRVVVFATState *s = bs->opaque; + qemu_co_mutex_lock(&s->lock); + ret = vvfat_read(bs, sector_num, buf, nb_sectors); + qemu_co_mutex_unlock(&s->lock); + return ret; +} + /* LATER TODO: statify all functions */ /* @@ -2714,6 +2727,17 @@ DLOG(checkpoint()); return 0; } +static coroutine_fn int vvfat_co_write(BlockDriverState *bs, int64_t sector_num, + const uint8_t *buf, int nb_sectors) +{ + int ret; + BDRVVVFATState *s = bs->opaque; + qemu_co_mutex_lock(&s->lock); + ret = vvfat_write(bs, sector_num, buf, nb_sectors); + qemu_co_mutex_unlock(&s->lock); + return ret; +} + static int vvfat_is_allocated(BlockDriverState *bs, int64_t sector_num, int nb_sectors, int* n) { @@ -2803,8 +2827,8 @@ static BlockDriver bdrv_vvfat = { .format_name = "vvfat", .instance_size = sizeof(BDRVVVFATState), .bdrv_file_open = vvfat_open, - .bdrv_read = vvfat_read, - .bdrv_write = vvfat_write, + .bdrv_read = vvfat_co_read, + .bdrv_write = vvfat_co_write, .bdrv_close = vvfat_close, .bdrv_is_allocated = vvfat_is_allocated, .protocol_name = "fat", diff --git a/block_int.h b/block_int.h index f2f4f2db38..dac00f504f 100644 --- a/block_int.h +++ b/block_int.h @@ -62,7 +62,6 @@ struct BlockDriver { const uint8_t *buf, int nb_sectors); void (*bdrv_close)(BlockDriverState *bs); int (*bdrv_create)(const char *filename, QEMUOptionParameter *options); - int (*bdrv_flush)(BlockDriverState *bs); int (*bdrv_is_allocated)(BlockDriverState *bs, int64_t sector_num, int nb_sectors, int *pnum); int (*bdrv_set_key)(BlockDriverState *bs, const char *key); @@ -76,13 +75,17 @@ struct BlockDriver { BlockDriverCompletionFunc *cb, void *opaque); BlockDriverAIOCB *(*bdrv_aio_flush)(BlockDriverState *bs, BlockDriverCompletionFunc *cb, void *opaque); - int (*bdrv_discard)(BlockDriverState *bs, int64_t sector_num, - int nb_sectors); + BlockDriverAIOCB *(*bdrv_aio_discard)(BlockDriverState *bs, + int64_t sector_num, int nb_sectors, + BlockDriverCompletionFunc *cb, void *opaque); int coroutine_fn (*bdrv_co_readv)(BlockDriverState *bs, int64_t sector_num, int nb_sectors, QEMUIOVector *qiov); int coroutine_fn (*bdrv_co_writev)(BlockDriverState *bs, int64_t sector_num, int nb_sectors, QEMUIOVector *qiov); + int coroutine_fn (*bdrv_co_flush)(BlockDriverState *bs); + int coroutine_fn (*bdrv_co_discard)(BlockDriverState *bs, + int64_t sector_num, int nb_sectors); int (*bdrv_aio_multiwrite)(BlockDriverState *bs, BlockRequest *reqs, int num_reqs); diff --git a/exec-all.h b/exec-all.h index 72ef246793..85a37bf1ed 100644 --- a/exec-all.h +++ b/exec-all.h @@ -340,8 +340,7 @@ static inline tb_page_addr_t get_page_addr_code(CPUState *env1, target_ulong add cpu_abort(env1, "Trying to execute code outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr); #endif } - p = (void *)(unsigned long)addr - + env1->tlb_table[mmu_idx][page_index].addend; + p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend); return qemu_ram_addr_from_host_nofail(p); } #endif diff --git a/fpu/softfloat-specialize.h b/fpu/softfloat-specialize.h index c165205a49..c5e2dab9f6 100644 --- a/fpu/softfloat-specialize.h +++ b/fpu/softfloat-specialize.h @@ -420,6 +420,82 @@ static int pickNaN(flag aIsQNaN, flag aIsSNaN, flag bIsQNaN, flag bIsSNaN, #endif /*---------------------------------------------------------------------------- +| Select which NaN to propagate for a three-input operation. +| For the moment we assume that no CPU needs the 'larger significand' +| information. +| Return values : 0 : a; 1 : b; 2 : c; 3 : default-NaN +*----------------------------------------------------------------------------*/ +#if defined(TARGET_ARM) +static int pickNaNMulAdd(flag aIsQNaN, flag aIsSNaN, flag bIsQNaN, flag bIsSNaN, + flag cIsQNaN, flag cIsSNaN, flag infzero STATUS_PARAM) +{ + /* For ARM, the (inf,zero,qnan) case sets InvalidOp and returns + * the default NaN + */ + if (infzero && cIsQNaN) { + float_raise(float_flag_invalid STATUS_VAR); + return 3; + } + + /* This looks different from the ARM ARM pseudocode, because the ARM ARM + * puts the operands to a fused mac operation (a*b)+c in the order c,a,b. + */ + if (cIsSNaN) { + return 2; + } else if (aIsSNaN) { + return 0; + } else if (bIsSNaN) { + return 1; + } else if (cIsQNaN) { + return 2; + } else if (aIsQNaN) { + return 0; + } else { + return 1; + } +} +#elif defined(TARGET_PPC) +static int pickNaNMulAdd(flag aIsQNaN, flag aIsSNaN, flag bIsQNaN, flag bIsSNaN, + flag cIsQNaN, flag cIsSNaN, flag infzero STATUS_PARAM) +{ + /* For PPC, the (inf,zero,qnan) case sets InvalidOp, but we prefer + * to return an input NaN if we have one (ie c) rather than generating + * a default NaN + */ + if (infzero) { + float_raise(float_flag_invalid STATUS_VAR); + return 2; + } + + /* If fRA is a NaN return it; otherwise if fRB is a NaN return it; + * otherwise return fRC. Note that muladd on PPC is (fRA * fRC) + frB + */ + if (aIsSNaN || aIsQNaN) { + return 0; + } else if (cIsSNaN || cIsQNaN) { + return 2; + } else { + return 1; + } +} +#else +/* A default implementation: prefer a to b to c. + * This is unlikely to actually match any real implementation. + */ +static int pickNaNMulAdd(flag aIsQNaN, flag aIsSNaN, flag bIsQNaN, flag bIsSNaN, + flag cIsQNaN, flag cIsSNaN, flag infzero STATUS_PARAM) +{ + if (aIsSNaN || aIsQNaN) { + return 0; + } else if (bIsSNaN || bIsQNaN) { + return 1; + } else { + return 2; + } +} +#endif + +/*---------------------------------------------------------------------------- | Takes two single-precision floating-point values `a' and `b', one of which | is a NaN, and returns the appropriate NaN result. If either `a' or `b' is a | signaling NaN, the invalid exception is raised. @@ -460,6 +536,57 @@ static float32 propagateFloat32NaN( float32 a, float32 b STATUS_PARAM) } /*---------------------------------------------------------------------------- +| Takes three single-precision floating-point values `a', `b' and `c', one of +| which is a NaN, and returns the appropriate NaN result. If any of `a', +| `b' or `c' is a signaling NaN, the invalid exception is raised. +| The input infzero indicates whether a*b was 0*inf or inf*0 (in which case +| obviously c is a NaN, and whether to propagate c or some other NaN is +| implementation defined). +*----------------------------------------------------------------------------*/ + +static float32 propagateFloat32MulAddNaN(float32 a, float32 b, + float32 c, flag infzero STATUS_PARAM) +{ + flag aIsQuietNaN, aIsSignalingNaN, bIsQuietNaN, bIsSignalingNaN, + cIsQuietNaN, cIsSignalingNaN; + int which; + + aIsQuietNaN = float32_is_quiet_nan(a); + aIsSignalingNaN = float32_is_signaling_nan(a); + bIsQuietNaN = float32_is_quiet_nan(b); + bIsSignalingNaN = float32_is_signaling_nan(b); + cIsQuietNaN = float32_is_quiet_nan(c); + cIsSignalingNaN = float32_is_signaling_nan(c); + + if (aIsSignalingNaN | bIsSignalingNaN | cIsSignalingNaN) { + float_raise(float_flag_invalid STATUS_VAR); + } + + which = pickNaNMulAdd(aIsQuietNaN, aIsSignalingNaN, + bIsQuietNaN, bIsSignalingNaN, + cIsQuietNaN, cIsSignalingNaN, infzero STATUS_VAR); + + if (STATUS(default_nan_mode)) { + /* Note that this check is after pickNaNMulAdd so that function + * has an opportunity to set the Invalid flag. + */ + return float32_default_nan; + } + + switch (which) { + case 0: + return float32_maybe_silence_nan(a); + case 1: + return float32_maybe_silence_nan(b); + case 2: + return float32_maybe_silence_nan(c); + case 3: + default: + return float32_default_nan; + } +} + +/*---------------------------------------------------------------------------- | Returns 1 if the double-precision floating-point value `a' is a quiet | NaN; otherwise returns 0. *----------------------------------------------------------------------------*/ @@ -596,6 +723,57 @@ static float64 propagateFloat64NaN( float64 a, float64 b STATUS_PARAM) } /*---------------------------------------------------------------------------- +| Takes three double-precision floating-point values `a', `b' and `c', one of +| which is a NaN, and returns the appropriate NaN result. If any of `a', +| `b' or `c' is a signaling NaN, the invalid exception is raised. +| The input infzero indicates whether a*b was 0*inf or inf*0 (in which case +| obviously c is a NaN, and whether to propagate c or some other NaN is +| implementation defined). +*----------------------------------------------------------------------------*/ + +static float64 propagateFloat64MulAddNaN(float64 a, float64 b, + float64 c, flag infzero STATUS_PARAM) +{ + flag aIsQuietNaN, aIsSignalingNaN, bIsQuietNaN, bIsSignalingNaN, + cIsQuietNaN, cIsSignalingNaN; + int which; + + aIsQuietNaN = float64_is_quiet_nan(a); + aIsSignalingNaN = float64_is_signaling_nan(a); + bIsQuietNaN = float64_is_quiet_nan(b); + bIsSignalingNaN = float64_is_signaling_nan(b); + cIsQuietNaN = float64_is_quiet_nan(c); + cIsSignalingNaN = float64_is_signaling_nan(c); + + if (aIsSignalingNaN | bIsSignalingNaN | cIsSignalingNaN) { + float_raise(float_flag_invalid STATUS_VAR); + } + + which = pickNaNMulAdd(aIsQuietNaN, aIsSignalingNaN, + bIsQuietNaN, bIsSignalingNaN, + cIsQuietNaN, cIsSignalingNaN, infzero STATUS_VAR); + + if (STATUS(default_nan_mode)) { + /* Note that this check is after pickNaNMulAdd so that function + * has an opportunity to set the Invalid flag. + */ + return float64_default_nan; + } + + switch (which) { + case 0: + return float64_maybe_silence_nan(a); + case 1: + return float64_maybe_silence_nan(b); + case 2: + return float64_maybe_silence_nan(c); + case 3: + default: + return float64_default_nan; + } +} + +/*---------------------------------------------------------------------------- | Returns 1 if the extended double-precision floating-point value `a' is a | quiet NaN; otherwise returns 0. This slightly differs from the same | function for other types as floatx80 has an explicit bit. diff --git a/fpu/softfloat.c b/fpu/softfloat.c index 3aafa81d58..81a7d1ae09 100644 --- a/fpu/softfloat.c +++ b/fpu/softfloat.c @@ -2118,6 +2118,213 @@ float32 float32_rem( float32 a, float32 b STATUS_PARAM ) } /*---------------------------------------------------------------------------- +| Returns the result of multiplying the single-precision floating-point values +| `a' and `b' then adding 'c', with no intermediate rounding step after the +| multiplication. The operation is performed according to the IEC/IEEE +| Standard for Binary Floating-Point Arithmetic 754-2008. +| The flags argument allows the caller to select negation of the +| addend, the intermediate product, or the final result. (The difference +| between this and having the caller do a separate negation is that negating +| externally will flip the sign bit on NaNs.) +*----------------------------------------------------------------------------*/ + +float32 float32_muladd(float32 a, float32 b, float32 c, int flags STATUS_PARAM) +{ + flag aSign, bSign, cSign, zSign; + int aExp, bExp, cExp, pExp, zExp, expDiff; + uint32_t aSig, bSig, cSig; + flag pInf, pZero, pSign; + uint64_t pSig64, cSig64, zSig64; + uint32_t pSig; + int shiftcount; + flag signflip, infzero; + + a = float32_squash_input_denormal(a STATUS_VAR); + b = float32_squash_input_denormal(b STATUS_VAR); + c = float32_squash_input_denormal(c STATUS_VAR); + aSig = extractFloat32Frac(a); + aExp = extractFloat32Exp(a); + aSign = extractFloat32Sign(a); + bSig = extractFloat32Frac(b); + bExp = extractFloat32Exp(b); + bSign = extractFloat32Sign(b); + cSig = extractFloat32Frac(c); + cExp = extractFloat32Exp(c); + cSign = extractFloat32Sign(c); + + infzero = ((aExp == 0 && aSig == 0 && bExp == 0xff && bSig == 0) || + (aExp == 0xff && aSig == 0 && bExp == 0 && bSig == 0)); + + /* It is implementation-defined whether the cases of (0,inf,qnan) + * and (inf,0,qnan) raise InvalidOperation or not (and what QNaN + * they return if they do), so we have to hand this information + * off to the target-specific pick-a-NaN routine. + */ + if (((aExp == 0xff) && aSig) || + ((bExp == 0xff) && bSig) || + ((cExp == 0xff) && cSig)) { + return propagateFloat32MulAddNaN(a, b, c, infzero STATUS_VAR); + } + + if (infzero) { + float_raise(float_flag_invalid STATUS_VAR); + return float32_default_nan; + } + + if (flags & float_muladd_negate_c) { + cSign ^= 1; + } + + signflip = (flags & float_muladd_negate_result) ? 1 : 0; + + /* Work out the sign and type of the product */ + pSign = aSign ^ bSign; + if (flags & float_muladd_negate_product) { + pSign ^= 1; + } + pInf = (aExp == 0xff) || (bExp == 0xff); + pZero = ((aExp | aSig) == 0) || ((bExp | bSig) == 0); + + if (cExp == 0xff) { + if (pInf && (pSign ^ cSign)) { + /* addition of opposite-signed infinities => InvalidOperation */ + float_raise(float_flag_invalid STATUS_VAR); + return float32_default_nan; + } + /* Otherwise generate an infinity of the same sign */ + return packFloat32(cSign ^ signflip, 0xff, 0); + } + + if (pInf) { + return packFloat32(pSign ^ signflip, 0xff, 0); + } + + if (pZero) { + if (cExp == 0) { + if (cSig == 0) { + /* Adding two exact zeroes */ + if (pSign == cSign) { + zSign = pSign; + } else if (STATUS(float_rounding_mode) == float_round_down) { + zSign = 1; + } else { + zSign = 0; + } + return packFloat32(zSign ^ signflip, 0, 0); + } + /* Exact zero plus a denorm */ + if (STATUS(flush_to_zero)) { + float_raise(float_flag_output_denormal STATUS_VAR); + return packFloat32(cSign ^ signflip, 0, 0); + } + } + /* Zero plus something non-zero : just return the something */ + return c ^ (signflip << 31); + } + + if (aExp == 0) { + normalizeFloat32Subnormal(aSig, &aExp, &aSig); + } + if (bExp == 0) { + normalizeFloat32Subnormal(bSig, &bExp, &bSig); + } + + /* Calculate the actual result a * b + c */ + + /* Multiply first; this is easy. */ + /* NB: we subtract 0x7e where float32_mul() subtracts 0x7f + * because we want the true exponent, not the "one-less-than" + * flavour that roundAndPackFloat32() takes. + */ + pExp = aExp + bExp - 0x7e; + aSig = (aSig | 0x00800000) << 7; + bSig = (bSig | 0x00800000) << 8; + pSig64 = (uint64_t)aSig * bSig; + if ((int64_t)(pSig64 << 1) >= 0) { + pSig64 <<= 1; + pExp--; + } + + zSign = pSign ^ signflip; + + /* Now pSig64 is the significand of the multiply, with the explicit bit in + * position 62. + */ + if (cExp == 0) { + if (!cSig) { + /* Throw out the special case of c being an exact zero now */ + shift64RightJamming(pSig64, 32, &pSig64); + pSig = pSig64; + return roundAndPackFloat32(zSign, pExp - 1, + pSig STATUS_VAR); + } + normalizeFloat32Subnormal(cSig, &cExp, &cSig); + } + + cSig64 = (uint64_t)cSig << (62 - 23); + cSig64 |= LIT64(0x4000000000000000); + expDiff = pExp - cExp; + + if (pSign == cSign) { + /* Addition */ + if (expDiff > 0) { + /* scale c to match p */ + shift64RightJamming(cSig64, expDiff, &cSig64); + zExp = pExp; + } else if (expDiff < 0) { + /* scale p to match c */ + shift64RightJamming(pSig64, -expDiff, &pSig64); + zExp = cExp; + } else { + /* no scaling needed */ + zExp = cExp; + } + /* Add significands and make sure explicit bit ends up in posn 62 */ + zSig64 = pSig64 + cSig64; + if ((int64_t)zSig64 < 0) { + shift64RightJamming(zSig64, 1, &zSig64); + } else { + zExp--; + } + } else { + /* Subtraction */ + if (expDiff > 0) { + shift64RightJamming(cSig64, expDiff, &cSig64); + zSig64 = pSig64 - cSig64; + zExp = pExp; + } else if (expDiff < 0) { + shift64RightJamming(pSig64, -expDiff, &pSig64); + zSig64 = cSig64 - pSig64; + zExp = cExp; + zSign ^= 1; + } else { + zExp = pExp; + if (cSig64 < pSig64) { + zSig64 = pSig64 - cSig64; + } else if (pSig64 < cSig64) { + zSig64 = cSig64 - pSig64; + zSign ^= 1; + } else { + /* Exact zero */ + zSign = signflip; + if (STATUS(float_rounding_mode) == float_round_down) { + zSign ^= 1; + } + return packFloat32(zSign, 0, 0); + } + } + --zExp; + /* Normalize to put the explicit bit back into bit 62. */ + shiftcount = countLeadingZeros64(zSig64) - 1; + zSig64 <<= shiftcount; + zExp -= shiftcount; + } + shift64RightJamming(zSig64, 32, &zSig64); + return roundAndPackFloat32(zSign, zExp, zSig64 STATUS_VAR); +} + + +/*---------------------------------------------------------------------------- | Returns the square root of the single-precision floating-point value `a'. | The operation is performed according to the IEC/IEEE Standard for Binary | Floating-Point Arithmetic. @@ -3465,6 +3672,226 @@ float64 float64_rem( float64 a, float64 b STATUS_PARAM ) } /*---------------------------------------------------------------------------- +| Returns the result of multiplying the double-precision floating-point values +| `a' and `b' then adding 'c', with no intermediate rounding step after the +| multiplication. The operation is performed according to the IEC/IEEE +| Standard for Binary Floating-Point Arithmetic 754-2008. +| The flags argument allows the caller to select negation of the +| addend, the intermediate product, or the final result. (The difference +| between this and having the caller do a separate negation is that negating +| externally will flip the sign bit on NaNs.) +*----------------------------------------------------------------------------*/ + +float64 float64_muladd(float64 a, float64 b, float64 c, int flags STATUS_PARAM) +{ + flag aSign, bSign, cSign, zSign; + int aExp, bExp, cExp, pExp, zExp, expDiff; + uint64_t aSig, bSig, cSig; + flag pInf, pZero, pSign; + uint64_t pSig0, pSig1, cSig0, cSig1, zSig0, zSig1; + int shiftcount; + flag signflip, infzero; + + a = float64_squash_input_denormal(a STATUS_VAR); + b = float64_squash_input_denormal(b STATUS_VAR); + c = float64_squash_input_denormal(c STATUS_VAR); + aSig = extractFloat64Frac(a); + aExp = extractFloat64Exp(a); + aSign = extractFloat64Sign(a); + bSig = extractFloat64Frac(b); + bExp = extractFloat64Exp(b); + bSign = extractFloat64Sign(b); + cSig = extractFloat64Frac(c); + cExp = extractFloat64Exp(c); + cSign = extractFloat64Sign(c); + + infzero = ((aExp == 0 && aSig == 0 && bExp == 0x7ff && bSig == 0) || + (aExp == 0x7ff && aSig == 0 && bExp == 0 && bSig == 0)); + + /* It is implementation-defined whether the cases of (0,inf,qnan) + * and (inf,0,qnan) raise InvalidOperation or not (and what QNaN + * they return if they do), so we have to hand this information + * off to the target-specific pick-a-NaN routine. + */ + if (((aExp == 0x7ff) && aSig) || + ((bExp == 0x7ff) && bSig) || + ((cExp == 0x7ff) && cSig)) { + return propagateFloat64MulAddNaN(a, b, c, infzero STATUS_VAR); + } + + if (infzero) { + float_raise(float_flag_invalid STATUS_VAR); + return float64_default_nan; + } + + if (flags & float_muladd_negate_c) { + cSign ^= 1; + } + + signflip = (flags & float_muladd_negate_result) ? 1 : 0; + + /* Work out the sign and type of the product */ + pSign = aSign ^ bSign; + if (flags & float_muladd_negate_product) { + pSign ^= 1; + } + pInf = (aExp == 0x7ff) || (bExp == 0x7ff); + pZero = ((aExp | aSig) == 0) || ((bExp | bSig) == 0); + + if (cExp == 0x7ff) { + if (pInf && (pSign ^ cSign)) { + /* addition of opposite-signed infinities => InvalidOperation */ + float_raise(float_flag_invalid STATUS_VAR); + return float64_default_nan; + } + /* Otherwise generate an infinity of the same sign */ + return packFloat64(cSign ^ signflip, 0x7ff, 0); + } + + if (pInf) { + return packFloat64(pSign ^ signflip, 0x7ff, 0); + } + + if (pZero) { + if (cExp == 0) { + if (cSig == 0) { + /* Adding two exact zeroes */ + if (pSign == cSign) { + zSign = pSign; + } else if (STATUS(float_rounding_mode) == float_round_down) { + zSign = 1; + } else { + zSign = 0; + } + return packFloat64(zSign ^ signflip, 0, 0); + } + /* Exact zero plus a denorm */ + if (STATUS(flush_to_zero)) { + float_raise(float_flag_output_denormal STATUS_VAR); + return packFloat64(cSign ^ signflip, 0, 0); + } + } + /* Zero plus something non-zero : just return the something */ + return c ^ ((uint64_t)signflip << 63); + } + + if (aExp == 0) { + normalizeFloat64Subnormal(aSig, &aExp, &aSig); + } + if (bExp == 0) { + normalizeFloat64Subnormal(bSig, &bExp, &bSig); + } + + /* Calculate the actual result a * b + c */ + + /* Multiply first; this is easy. */ + /* NB: we subtract 0x3fe where float64_mul() subtracts 0x3ff + * because we want the true exponent, not the "one-less-than" + * flavour that roundAndPackFloat64() takes. + */ + pExp = aExp + bExp - 0x3fe; + aSig = (aSig | LIT64(0x0010000000000000))<<10; + bSig = (bSig | LIT64(0x0010000000000000))<<11; + mul64To128(aSig, bSig, &pSig0, &pSig1); + if ((int64_t)(pSig0 << 1) >= 0) { + shortShift128Left(pSig0, pSig1, 1, &pSig0, &pSig1); + pExp--; + } + + zSign = pSign ^ signflip; + + /* Now [pSig0:pSig1] is the significand of the multiply, with the explicit + * bit in position 126. + */ + if (cExp == 0) { + if (!cSig) { + /* Throw out the special case of c being an exact zero now */ + shift128RightJamming(pSig0, pSig1, 64, &pSig0, &pSig1); + return roundAndPackFloat64(zSign, pExp - 1, + pSig1 STATUS_VAR); + } + normalizeFloat64Subnormal(cSig, &cExp, &cSig); + } + + /* Shift cSig and add the explicit bit so [cSig0:cSig1] is the + * significand of the addend, with the explicit bit in position 126. + */ + cSig0 = cSig << (126 - 64 - 52); + cSig1 = 0; + cSig0 |= LIT64(0x4000000000000000); + expDiff = pExp - cExp; + + if (pSign == cSign) { + /* Addition */ + if (expDiff > 0) { + /* scale c to match p */ + shift128RightJamming(cSig0, cSig1, expDiff, &cSig0, &cSig1); + zExp = pExp; + } else if (expDiff < 0) { + /* scale p to match c */ + shift128RightJamming(pSig0, pSig1, -expDiff, &pSig0, &pSig1); + zExp = cExp; + } else { + /* no scaling needed */ + zExp = cExp; + } + /* Add significands and make sure explicit bit ends up in posn 126 */ + add128(pSig0, pSig1, cSig0, cSig1, &zSig0, &zSig1); + if ((int64_t)zSig0 < 0) { + shift128RightJamming(zSig0, zSig1, 1, &zSig0, &zSig1); + } else { + zExp--; + } + shift128RightJamming(zSig0, zSig1, 64, &zSig0, &zSig1); + return roundAndPackFloat64(zSign, zExp, zSig1 STATUS_VAR); + } else { + /* Subtraction */ + if (expDiff > 0) { + shift128RightJamming(cSig0, cSig1, expDiff, &cSig0, &cSig1); + sub128(pSig0, pSig1, cSig0, cSig1, &zSig0, &zSig1); + zExp = pExp; + } else if (expDiff < 0) { + shift128RightJamming(pSig0, pSig1, -expDiff, &pSig0, &pSig1); + sub128(cSig0, cSig1, pSig0, pSig1, &zSig0, &zSig1); + zExp = cExp; + zSign ^= 1; + } else { + zExp = pExp; + if (lt128(cSig0, cSig1, pSig0, pSig1)) { + sub128(pSig0, pSig1, cSig0, cSig1, &zSig0, &zSig1); + } else if (lt128(pSig0, pSig1, cSig0, cSig1)) { + sub128(cSig0, cSig1, pSig0, pSig1, &zSig0, &zSig1); + zSign ^= 1; + } else { + /* Exact zero */ + zSign = signflip; + if (STATUS(float_rounding_mode) == float_round_down) { + zSign ^= 1; + } + return packFloat64(zSign, 0, 0); + } + } + --zExp; + /* Do the equivalent of normalizeRoundAndPackFloat64() but + * starting with the significand in a pair of uint64_t. + */ + if (zSig0) { + shiftcount = countLeadingZeros64(zSig0) - 1; + shortShift128Left(zSig0, zSig1, shiftcount, &zSig0, &zSig1); + if (zSig1) { + zSig0 |= 1; + } + zExp -= shiftcount; + } else { + shiftcount = countLeadingZeros64(zSig1) - 1; + zSig0 = zSig1 << shiftcount; + zExp -= (shiftcount + 64); + } + return roundAndPackFloat64(zSign, zExp, zSig0 STATUS_VAR); + } +} + +/*---------------------------------------------------------------------------- | Returns the square root of the double-precision floating-point value `a'. | The operation is performed according to the IEC/IEEE Standard for Binary | Floating-Point Arithmetic. diff --git a/fpu/softfloat.h b/fpu/softfloat.h index 618ddee569..07c2929613 100644 --- a/fpu/softfloat.h +++ b/fpu/softfloat.h @@ -212,6 +212,18 @@ void set_floatx80_rounding_precision(int val STATUS_PARAM); void float_raise( int8 flags STATUS_PARAM); /*---------------------------------------------------------------------------- +| Options to indicate which negations to perform in float*_muladd() +| Using these differs from negating an input or output before calling +| the muladd function in that this means that a NaN doesn't have its +| sign bit inverted before it is propagated. +*----------------------------------------------------------------------------*/ +enum { + float_muladd_negate_c = 1, + float_muladd_negate_product = 2, + float_muladd_negate_result = 3, +}; + +/*---------------------------------------------------------------------------- | Software IEC/IEEE integer-to-floating-point conversion routines. *----------------------------------------------------------------------------*/ float32 int32_to_float32( int32 STATUS_PARAM ); @@ -269,6 +281,7 @@ float32 float32_sub( float32, float32 STATUS_PARAM ); float32 float32_mul( float32, float32 STATUS_PARAM ); float32 float32_div( float32, float32 STATUS_PARAM ); float32 float32_rem( float32, float32 STATUS_PARAM ); +float32 float32_muladd(float32, float32, float32, int STATUS_PARAM); float32 float32_sqrt( float32 STATUS_PARAM ); float32 float32_exp2( float32 STATUS_PARAM ); float32 float32_log2( float32 STATUS_PARAM ); @@ -375,6 +388,7 @@ float64 float64_sub( float64, float64 STATUS_PARAM ); float64 float64_mul( float64, float64 STATUS_PARAM ); float64 float64_div( float64, float64 STATUS_PARAM ); float64 float64_rem( float64, float64 STATUS_PARAM ); +float64 float64_muladd(float64, float64, float64, int STATUS_PARAM); float64 float64_sqrt( float64 STATUS_PARAM ); float64 float64_log2( float64 STATUS_PARAM ); int float64_eq( float64, float64 STATUS_PARAM ); @@ -814,7 +814,11 @@ static int cpu_gdb_read_register(CPUState *env, uint8_t *mem_buf, int n) #if defined(TARGET_ABI32) || !defined(TARGET_SPARC64) if (n < 64) { /* fprs */ - GET_REG32(*((uint32_t *)&env->fpr[n - 32])); + if (n & 1) { + GET_REG32(env->fpr[(n - 32) / 2].l.lower); + } else { + GET_REG32(env->fpr[(n - 32) / 2].l.upper); + } } /* Y, PSR, WIM, TBR, PC, NPC, FPSR, CPSR */ switch (n) { @@ -831,15 +835,15 @@ static int cpu_gdb_read_register(CPUState *env, uint8_t *mem_buf, int n) #else if (n < 64) { /* f0-f31 */ - GET_REG32(*((uint32_t *)&env->fpr[n - 32])); + if (n & 1) { + GET_REG32(env->fpr[(n - 32) / 2].l.lower); + } else { + GET_REG32(env->fpr[(n - 32) / 2].l.upper); + } } if (n < 80) { /* f32-f62 (double width, even numbers only) */ - uint64_t val; - - val = (uint64_t)*((uint32_t *)&env->fpr[(n - 64) * 2 + 32]) << 32; - val |= *((uint32_t *)&env->fpr[(n - 64) * 2 + 33]); - GET_REG64(val); + GET_REG64(env->fpr[(n - 32) / 2].ll); } switch (n) { case 80: GET_REGL(env->pc); @@ -878,7 +882,12 @@ static int cpu_gdb_write_register(CPUState *env, uint8_t *mem_buf, int n) #if defined(TARGET_ABI32) || !defined(TARGET_SPARC64) else if (n < 64) { /* fprs */ - *((uint32_t *)&env->fpr[n - 32]) = tmp; + /* f0-f31 */ + if (n & 1) { + env->fpr[(n - 32) / 2].l.lower = tmp; + } else { + env->fpr[(n - 32) / 2].l.upper = tmp; + } } else { /* Y, PSR, WIM, TBR, PC, NPC, FPSR, CPSR */ switch (n) { @@ -896,12 +905,16 @@ static int cpu_gdb_write_register(CPUState *env, uint8_t *mem_buf, int n) #else else if (n < 64) { /* f0-f31 */ - env->fpr[n] = ldfl_p(mem_buf); + tmp = ldl_p(mem_buf); + if (n & 1) { + env->fpr[(n - 32) / 2].l.lower = tmp; + } else { + env->fpr[(n - 32) / 2].l.upper = tmp; + } return 4; } else if (n < 80) { /* f32-f62 (double width, even numbers only) */ - *((uint32_t *)&env->fpr[(n - 64) * 2 + 32]) = tmp >> 32; - *((uint32_t *)&env->fpr[(n - 64) * 2 + 33]) = tmp; + env->fpr[(n - 32) / 2].ll = tmp; } else { switch (n) { case 80: env->pc = tmp; break; diff --git a/hmp-commands.hx b/hmp-commands.hx index ab08d583df..e1812676e3 100644 --- a/hmp-commands.hx +++ b/hmp-commands.hx @@ -824,7 +824,8 @@ ETEXI .params = "protocol hostname port tls-port cert-subject", .help = "send migration info to spice/vnc client", .user_print = monitor_user_noop, - .mhandler.cmd_new = client_migrate_info, + .mhandler.cmd_async = client_migrate_info, + .flags = MONITOR_CMD_ASYNC, }, STEXI diff --git a/hw/9pfs/virtio-9p.c b/hw/9pfs/virtio-9p.c index aab3bebcc7..8b6813f8de 100644 --- a/hw/9pfs/virtio-9p.c +++ b/hw/9pfs/virtio-9p.c @@ -969,7 +969,7 @@ static void complete_pdu(V9fsState *s, V9fsPDU *pdu, ssize_t len) if (s->proto_version == V9FS_PROTO_2000L) { id = P9_RLERROR; } - trace_complete_pdu(pdu->tag, pdu->id, err); /* Trace ERROR */ + trace_v9fs_rerror(pdu->tag, pdu->id, err); /* Trace ERROR */ } /* fill out the header */ @@ -1332,11 +1332,11 @@ static void v9fs_attach(void *opaque) } offset += pdu_marshal(pdu, offset, "Q", &qid); err = offset; + trace_v9fs_attach_return(pdu->tag, pdu->id, + qid.type, qid.version, qid.path); out: put_fid(pdu, fidp); out_nofid: - trace_v9fs_attach_return(pdu->tag, pdu->id, - qid.type, qid.version, qid.path); complete_pdu(s, pdu, err); v9fs_string_free(&uname); v9fs_string_free(&aname); @@ -1371,13 +1371,12 @@ static void v9fs_stat(void *opaque) } offset += pdu_marshal(pdu, offset, "wS", 0, &v9stat); err = offset; + trace_v9fs_stat_return(pdu->tag, pdu->id, v9stat.mode, + v9stat.atime, v9stat.mtime, v9stat.length); v9fs_stat_free(&v9stat); out: put_fid(pdu, fidp); out_nofid: - trace_v9fs_stat_return(pdu->tag, pdu->id, v9stat.mode, - v9stat.atime, v9stat.mtime, v9stat.length); - complete_pdu(s, pdu, err); } @@ -1421,13 +1420,12 @@ static void v9fs_getattr(void *opaque) } retval = offset; retval += pdu_marshal(pdu, offset, "A", &v9stat_dotl); -out: - put_fid(pdu, fidp); -out_nofid: trace_v9fs_getattr_return(pdu->tag, pdu->id, v9stat_dotl.st_result_mask, v9stat_dotl.st_mode, v9stat_dotl.st_uid, v9stat_dotl.st_gid); - +out: + put_fid(pdu, fidp); +out_nofid: complete_pdu(s, pdu, retval); } @@ -1605,6 +1603,7 @@ static void v9fs_walk(void *opaque) v9fs_path_copy(&newfidp->path, &path); } err = v9fs_walk_marshal(pdu, nwnames, qids); + trace_v9fs_walk_return(pdu->tag, pdu->id, nwnames, qids); out: put_fid(pdu, fidp); if (newfidp) { @@ -1613,7 +1612,6 @@ out: v9fs_path_free(&dpath); v9fs_path_free(&path); out_nofid: - trace_v9fs_walk_return(pdu->tag, pdu->id, nwnames, qids); complete_pdu(s, pdu, err); if (nwnames && nwnames <= P9_MAXWELEM) { for (name_idx = 0; name_idx < nwnames; name_idx++) { @@ -1648,10 +1646,10 @@ static int32_t get_iounit(V9fsPDU *pdu, V9fsPath *path) static void v9fs_open(void *opaque) { int flags; - int iounit; int32_t fid; int32_t mode; V9fsQID qid; + int iounit = 0; ssize_t err = 0; size_t offset = 7; struct stat stbuf; @@ -1709,11 +1707,11 @@ static void v9fs_open(void *opaque) offset += pdu_marshal(pdu, offset, "Qd", &qid, iounit); err = offset; } + trace_v9fs_open_return(pdu->tag, pdu->id, + qid.type, qid.version, qid.path, iounit); out: put_fid(pdu, fidp); out_nofid: - trace_v9fs_open_return(pdu->tag, pdu->id, - qid.type, qid.version, qid.path, iounit); complete_pdu(s, pdu, err); } @@ -1759,11 +1757,11 @@ static void v9fs_lcreate(void *opaque) stat_to_qid(&stbuf, &qid); offset += pdu_marshal(pdu, offset, "Qd", &qid, iounit); err = offset; + trace_v9fs_lcreate_return(pdu->tag, pdu->id, + qid.type, qid.version, qid.path, iounit); out: put_fid(pdu, fidp); out_nofid: - trace_v9fs_lcreate_return(pdu->tag, pdu->id, - qid.type, qid.version, qid.path, iounit); complete_pdu(pdu->s, pdu, err); v9fs_string_free(&name); } @@ -1978,10 +1976,10 @@ static void v9fs_read(void *opaque) } else { err = -EINVAL; } + trace_v9fs_read_return(pdu->tag, pdu->id, count, err); out: put_fid(pdu, fidp); out_nofid: - trace_v9fs_read_return(pdu->tag, pdu->id, count, err); complete_pdu(s, pdu, err); } @@ -2090,10 +2088,10 @@ static void v9fs_readdir(void *opaque) retval = offset; retval += pdu_marshal(pdu, offset, "d", count); retval += count; + trace_v9fs_readdir_return(pdu->tag, pdu->id, count, retval); out: put_fid(pdu, fidp); out_nofid: - trace_v9fs_readdir_return(pdu->tag, pdu->id, count, retval); complete_pdu(s, pdu, retval); } @@ -2202,10 +2200,10 @@ static void v9fs_write(void *opaque) } while (total < count && len > 0); offset += pdu_marshal(pdu, offset, "d", total); err = offset; + trace_v9fs_write_return(pdu->tag, pdu->id, total, err); out: put_fid(pdu, fidp); out_nofid: - trace_v9fs_write_return(pdu->tag, pdu->id, total, err); complete_pdu(s, pdu, err); } @@ -2362,11 +2360,11 @@ static void v9fs_create(void *opaque) stat_to_qid(&stbuf, &qid); offset += pdu_marshal(pdu, offset, "Qd", &qid, iounit); err = offset; + trace_v9fs_create_return(pdu->tag, pdu->id, + qid.type, qid.version, qid.path, iounit); out: put_fid(pdu, fidp); out_nofid: - trace_v9fs_create_return(pdu->tag, pdu->id, - qid.type, qid.version, qid.path, iounit); complete_pdu(pdu->s, pdu, err); v9fs_string_free(&name); v9fs_string_free(&extension); @@ -2401,11 +2399,11 @@ static void v9fs_symlink(void *opaque) stat_to_qid(&stbuf, &qid); offset += pdu_marshal(pdu, offset, "Q", &qid); err = offset; + trace_v9fs_symlink_return(pdu->tag, pdu->id, + qid.type, qid.version, qid.path); out: put_fid(pdu, dfidp); out_nofid: - trace_v9fs_symlink_return(pdu->tag, pdu->id, - qid.type, qid.version, qid.path); complete_pdu(pdu->s, pdu, err); v9fs_string_free(&name); v9fs_string_free(&symname); @@ -2950,10 +2948,11 @@ static void v9fs_mknod(void *opaque) stat_to_qid(&stbuf, &qid); err = offset; err += pdu_marshal(pdu, offset, "Q", &qid); + trace_v9fs_mknod_return(pdu->tag, pdu->id, + qid.type, qid.version, qid.path); out: put_fid(pdu, fidp); out_nofid: - trace_v9fs_mknod_return(pdu->tag, pdu->id, qid.type, qid.version, qid.path); complete_pdu(s, pdu, err); v9fs_string_free(&name); } @@ -3049,12 +3048,11 @@ static void v9fs_getlock(void *opaque) glock->start, glock->length, glock->proc_id, &glock->client_id); err = offset; + trace_v9fs_getlock_return(pdu->tag, pdu->id, glock->type, glock->start, + glock->length, glock->proc_id); out: put_fid(pdu, fidp); out_nofid: - trace_v9fs_getlock_return(pdu->tag, pdu->id, glock->type, glock->start, - glock->length, glock->proc_id); - complete_pdu(s, pdu, err); v9fs_string_free(&glock->client_id); g_free(glock); @@ -3089,11 +3087,11 @@ static void v9fs_mkdir(void *opaque) stat_to_qid(&stbuf, &qid); offset += pdu_marshal(pdu, offset, "Q", &qid); err = offset; + trace_v9fs_mkdir_return(pdu->tag, pdu->id, + qid.type, qid.version, qid.path, err); out: put_fid(pdu, fidp); out_nofid: - trace_v9fs_mkdir_return(pdu->tag, pdu->id, - qid.type, qid.version, qid.path, err); complete_pdu(pdu->s, pdu, err); v9fs_string_free(&name); } @@ -3183,13 +3181,13 @@ static void v9fs_xattrwalk(void *opaque) offset += pdu_marshal(pdu, offset, "q", size); err = offset; } + trace_v9fs_xattrwalk_return(pdu->tag, pdu->id, size); out: put_fid(pdu, file_fidp); if (xattr_fidp) { put_fid(pdu, xattr_fidp); } out_nofid: - trace_v9fs_xattrwalk_return(pdu->tag, pdu->id, size); complete_pdu(s, pdu, err); v9fs_string_free(&name); } @@ -3260,11 +3258,11 @@ static void v9fs_readlink(void *opaque) } offset += pdu_marshal(pdu, offset, "s", &target); err = offset; + trace_v9fs_readlink_return(pdu->tag, pdu->id, target.data); v9fs_string_free(&target); out: put_fid(pdu, fidp); out_nofid: - trace_v9fs_readlink_return(pdu->tag, pdu->id, target.data); complete_pdu(pdu->s, pdu, err); } @@ -434,6 +434,7 @@ static uint32_t fdctrl_read (void *opaque, uint32_t reg) FDCtrl *fdctrl = opaque; uint32_t retval; + reg &= 7; switch (reg) { case FD_REG_SRA: retval = fdctrl_read_statusA(fdctrl); @@ -471,6 +472,7 @@ static void fdctrl_write (void *opaque, uint32_t reg, uint32_t value) FLOPPY_DPRINTF("write reg%d: 0x%02x\n", reg & 7, value); + reg &= 7; switch (reg) { case FD_REG_DOR: fdctrl_write_dor(fdctrl, value); @@ -1945,6 +1947,18 @@ static int sun4m_fdc_init1(SysBusDevice *dev) return fdctrl_init_common(fdctrl); } +void fdc_get_bs(BlockDriverState *bs[], ISADevice *dev) +{ + FDCtrlISABus *isa = DO_UPCAST(FDCtrlISABus, busdev, dev); + FDCtrl *fdctrl = &isa->state; + int i; + + for (i = 0; i < MAX_FD; i++) { + bs[i] = fdctrl->drives[i].bs; + } +} + + static const VMStateDescription vmstate_isa_fdc ={ .name = "fdc", .version_id = 2, @@ -7,14 +7,15 @@ /* fdc.c */ #define MAX_FD 2 -static inline void fdctrl_init_isa(DriveInfo **fds) +static inline ISADevice *fdctrl_init_isa(DriveInfo **fds) { ISADevice *dev; dev = isa_try_create("isa-fdc"); if (!dev) { - return; + return NULL; } + if (fds[0]) { qdev_prop_set_drive_nofail(&dev->qdev, "driveA", fds[0]->bdrv); } @@ -22,10 +23,14 @@ static inline void fdctrl_init_isa(DriveInfo **fds) qdev_prop_set_drive_nofail(&dev->qdev, "driveB", fds[1]->bdrv); } qdev_init_nofail(&dev->qdev); + + return dev; } void fdctrl_init_sysbus(qemu_irq irq, int dma_chann, target_phys_addr_t mmio_base, DriveInfo **fds); void sun4m_fdctrl_init(qemu_irq irq, target_phys_addr_t io_base, DriveInfo **fds, qemu_irq *fdc_tc); +void fdc_get_bs(BlockDriverState *bs[], ISADevice *dev); + #endif diff --git a/hw/lm4549.c b/hw/lm4549.c new file mode 100644 index 0000000000..4d5b83125f --- /dev/null +++ b/hw/lm4549.c @@ -0,0 +1,336 @@ +/* + * LM4549 Audio Codec Interface + * + * Copyright (c) 2011 + * Written by Mathieu Sonet - www.elasticsheep.com + * + * This code is licenced under the GPL. + * + * ***************************************************************** + * + * This driver emulates the LM4549 codec. + * + * It supports only one playback voice and no record voice. + */ + +#include "hw.h" +#include "audio/audio.h" +#include "lm4549.h" + +#if 0 +#define LM4549_DEBUG 1 +#endif + +#if 0 +#define LM4549_DUMP_DAC_INPUT 1 +#endif + +#ifdef LM4549_DEBUG +#define DPRINTF(fmt, ...) \ +do { printf("lm4549: " fmt , ## __VA_ARGS__); } while (0) +#else +#define DPRINTF(fmt, ...) do {} while (0) +#endif + +#if defined(LM4549_DUMP_DAC_INPUT) +#include <stdio.h> +static FILE *fp_dac_input; +#endif + +/* LM4549 register list */ +enum { + LM4549_Reset = 0x00, + LM4549_Master_Volume = 0x02, + LM4549_Line_Out_Volume = 0x04, + LM4549_Master_Volume_Mono = 0x06, + LM4549_PC_Beep_Volume = 0x0A, + LM4549_Phone_Volume = 0x0C, + LM4549_Mic_Volume = 0x0E, + LM4549_Line_In_Volume = 0x10, + LM4549_CD_Volume = 0x12, + LM4549_Video_Volume = 0x14, + LM4549_Aux_Volume = 0x16, + LM4549_PCM_Out_Volume = 0x18, + LM4549_Record_Select = 0x1A, + LM4549_Record_Gain = 0x1C, + LM4549_General_Purpose = 0x20, + LM4549_3D_Control = 0x22, + LM4549_Powerdown_Ctrl_Stat = 0x26, + LM4549_Ext_Audio_ID = 0x28, + LM4549_Ext_Audio_Stat_Ctrl = 0x2A, + LM4549_PCM_Front_DAC_Rate = 0x2C, + LM4549_PCM_ADC_Rate = 0x32, + LM4549_Vendor_ID1 = 0x7C, + LM4549_Vendor_ID2 = 0x7E +}; + +static void lm4549_reset(lm4549_state *s) +{ + uint16_t *regfile = s->regfile; + + regfile[LM4549_Reset] = 0x0d50; + regfile[LM4549_Master_Volume] = 0x8008; + regfile[LM4549_Line_Out_Volume] = 0x8000; + regfile[LM4549_Master_Volume_Mono] = 0x8000; + regfile[LM4549_PC_Beep_Volume] = 0x0000; + regfile[LM4549_Phone_Volume] = 0x8008; + regfile[LM4549_Mic_Volume] = 0x8008; + regfile[LM4549_Line_In_Volume] = 0x8808; + regfile[LM4549_CD_Volume] = 0x8808; + regfile[LM4549_Video_Volume] = 0x8808; + regfile[LM4549_Aux_Volume] = 0x8808; + regfile[LM4549_PCM_Out_Volume] = 0x8808; + regfile[LM4549_Record_Select] = 0x0000; + regfile[LM4549_Record_Gain] = 0x8000; + regfile[LM4549_General_Purpose] = 0x0000; + regfile[LM4549_3D_Control] = 0x0101; + regfile[LM4549_Powerdown_Ctrl_Stat] = 0x000f; + regfile[LM4549_Ext_Audio_ID] = 0x0001; + regfile[LM4549_Ext_Audio_Stat_Ctrl] = 0x0000; + regfile[LM4549_PCM_Front_DAC_Rate] = 0xbb80; + regfile[LM4549_PCM_ADC_Rate] = 0xbb80; + regfile[LM4549_Vendor_ID1] = 0x4e53; + regfile[LM4549_Vendor_ID2] = 0x4331; +} + +static void lm4549_audio_transfer(lm4549_state *s) +{ + uint32_t written_bytes, written_samples; + uint32_t i; + + /* Activate the voice */ + AUD_set_active_out(s->voice, 1); + s->voice_is_active = 1; + + /* Try to write the buffer content */ + written_bytes = AUD_write(s->voice, s->buffer, + s->buffer_level * sizeof(uint16_t)); + written_samples = written_bytes >> 1; + +#if defined(LM4549_DUMP_DAC_INPUT) + fwrite(s->buffer, sizeof(uint8_t), written_bytes, fp_dac_input); +#endif + + s->buffer_level -= written_samples; + + if (s->buffer_level > 0) { + /* Move the data back to the start of the buffer */ + for (i = 0; i < s->buffer_level; i++) { + s->buffer[i] = s->buffer[i + written_samples]; + } + } +} + +static void lm4549_audio_out_callback(void *opaque, int free) +{ + lm4549_state *s = (lm4549_state *)opaque; + static uint32_t prev_buffer_level; + +#ifdef LM4549_DEBUG + int size = AUD_get_buffer_size_out(s->voice); + DPRINTF("audio_out_callback size = %i free = %i\n", size, free); +#endif + + /* Detect that no data are consumed + => disable the voice */ + if (s->buffer_level == prev_buffer_level) { + AUD_set_active_out(s->voice, 0); + s->voice_is_active = 0; + } + prev_buffer_level = s->buffer_level; + + /* Check if a buffer transfer is pending */ + if (s->buffer_level == LM4549_BUFFER_SIZE) { + lm4549_audio_transfer(s); + + /* Request more data */ + if (s->data_req_cb != NULL) { + (s->data_req_cb)(s->opaque); + } + } +} + +uint32_t lm4549_read(lm4549_state *s, target_phys_addr_t offset) +{ + uint16_t *regfile = s->regfile; + uint32_t value = 0; + + /* Read the stored value */ + assert(offset < 128); + value = regfile[offset]; + + DPRINTF("read [0x%02x] = 0x%04x\n", offset, value); + + return value; +} + +void lm4549_write(lm4549_state *s, + target_phys_addr_t offset, uint32_t value) +{ + uint16_t *regfile = s->regfile; + + assert(offset < 128); + DPRINTF("write [0x%02x] = 0x%04x\n", offset, value); + + switch (offset) { + case LM4549_Reset: + lm4549_reset(s); + break; + + case LM4549_PCM_Front_DAC_Rate: + regfile[LM4549_PCM_Front_DAC_Rate] = value; + DPRINTF("DAC rate change = %i\n", value); + + /* Re-open a voice with the new sample rate */ + struct audsettings as; + as.freq = value; + as.nchannels = 2; + as.fmt = AUD_FMT_S16; + as.endianness = 0; + + s->voice = AUD_open_out( + &s->card, + s->voice, + "lm4549.out", + s, + lm4549_audio_out_callback, + &as + ); + break; + + case LM4549_Powerdown_Ctrl_Stat: + value &= ~0xf; + value |= regfile[LM4549_Powerdown_Ctrl_Stat] & 0xf; + regfile[LM4549_Powerdown_Ctrl_Stat] = value; + break; + + case LM4549_Ext_Audio_ID: + case LM4549_Vendor_ID1: + case LM4549_Vendor_ID2: + DPRINTF("Write to read-only register 0x%x\n", (int)offset); + break; + + default: + /* Store the new value */ + regfile[offset] = value; + break; + } +} + +uint32_t lm4549_write_samples(lm4549_state *s, uint32_t left, uint32_t right) +{ + /* The left and right samples are in 20-bit resolution. + The LM4549 has 18-bit resolution and only uses the bits [19:2]. + This model supports 16-bit playback. + */ + + if (s->buffer_level >= LM4549_BUFFER_SIZE) { + DPRINTF("write_sample Buffer full\n"); + return 0; + } + + /* Store 16-bit samples in the buffer */ + s->buffer[s->buffer_level++] = (left >> 4); + s->buffer[s->buffer_level++] = (right >> 4); + + if (s->buffer_level == LM4549_BUFFER_SIZE) { + /* Trigger the transfer of the buffer to the audio host */ + lm4549_audio_transfer(s); + } + + return 1; +} + +static int lm4549_post_load(void *opaque, int version_id) +{ + lm4549_state *s = (lm4549_state *)opaque; + uint16_t *regfile = s->regfile; + + /* Re-open a voice with the current sample rate */ + uint32_t freq = regfile[LM4549_PCM_Front_DAC_Rate]; + + DPRINTF("post_load freq = %i\n", freq); + DPRINTF("post_load voice_is_active = %i\n", s->voice_is_active); + + struct audsettings as; + as.freq = freq; + as.nchannels = 2; + as.fmt = AUD_FMT_S16; + as.endianness = 0; + + s->voice = AUD_open_out( + &s->card, + s->voice, + "lm4549.out", + s, + lm4549_audio_out_callback, + &as + ); + + /* Request data */ + if (s->voice_is_active == 1) { + lm4549_audio_out_callback(s, AUD_get_buffer_size_out(s->voice)); + } + + return 0; +} + +void lm4549_init(lm4549_state *s, lm4549_callback data_req_cb, void* opaque) +{ + struct audsettings as; + + /* Store the callback and opaque pointer */ + s->data_req_cb = data_req_cb; + s->opaque = opaque; + + /* Init the registers */ + lm4549_reset(s); + + /* Register an audio card */ + AUD_register_card("lm4549", &s->card); + + /* Open a default voice */ + as.freq = 48000; + as.nchannels = 2; + as.fmt = AUD_FMT_S16; + as.endianness = 0; + + s->voice = AUD_open_out( + &s->card, + s->voice, + "lm4549.out", + s, + lm4549_audio_out_callback, + &as + ); + + AUD_set_volume_out(s->voice, 0, 255, 255); + + s->voice_is_active = 0; + + /* Reset the input buffer */ + memset(s->buffer, 0x00, sizeof(s->buffer)); + s->buffer_level = 0; + +#if defined(LM4549_DUMP_DAC_INPUT) + fp_dac_input = fopen("lm4549_dac_input.pcm", "wb"); + if (!fp_dac_input) { + hw_error("Unable to open lm4549_dac_input.pcm for writing\n"); + } +#endif +} + +const VMStateDescription vmstate_lm4549_state = { + .name = "lm4549_state", + .version_id = 1, + .minimum_version_id = 1, + .minimum_version_id_old = 1, + .post_load = &lm4549_post_load, + .fields = (VMStateField[]) { + VMSTATE_UINT32(voice_is_active, lm4549_state), + VMSTATE_UINT16_ARRAY(regfile, lm4549_state, 128), + VMSTATE_UINT16_ARRAY(buffer, lm4549_state, LM4549_BUFFER_SIZE), + VMSTATE_UINT32(buffer_level, lm4549_state), + VMSTATE_END_OF_LIST() + } +}; diff --git a/hw/lm4549.h b/hw/lm4549.h new file mode 100644 index 0000000000..70d0ac1750 --- /dev/null +++ b/hw/lm4549.h @@ -0,0 +1,43 @@ +/* + * LM4549 Audio Codec Interface + * + * Copyright (c) 2011 + * Written by Mathieu Sonet - www.elasticsheep.com + * + * This code is licenced under the GPL. + * + * ***************************************************************** + */ + +#ifndef HW_LM4549_H +#define HW_LM4549_H + +#include "audio/audio.h" + +typedef void (*lm4549_callback)(void *opaque); + +#define LM4549_BUFFER_SIZE (512 * 2) /* 512 16-bit stereo samples */ + + +typedef struct { + QEMUSoundCard card; + SWVoiceOut *voice; + uint32_t voice_is_active; + + uint16_t regfile[128]; + lm4549_callback data_req_cb; + void *opaque; + + uint16_t buffer[LM4549_BUFFER_SIZE]; + uint32_t buffer_level; +} lm4549_state; + +extern const VMStateDescription vmstate_lm4549_state; + + +void lm4549_init(lm4549_state *s, lm4549_callback data_req, void *opaque); +uint32_t lm4549_read(lm4549_state *s, target_phys_addr_t offset); +void lm4549_write(lm4549_state *s, target_phys_addr_t offset, uint32_t value); +uint32_t lm4549_write_samples(lm4549_state *s, uint32_t left, uint32_t right); + +#endif /* #ifndef HW_LM4549_H */ @@ -331,12 +331,12 @@ static void pc_cmos_init_late(void *opaque) void pc_cmos_init(ram_addr_t ram_size, ram_addr_t above_4g_mem_size, const char *boot_device, - BusState *idebus0, BusState *idebus1, + ISADevice *floppy, BusState *idebus0, BusState *idebus1, ISADevice *s) { int val, nb, nb_heads, max_track, last_sect, i; FDriveType fd_type[2]; - DriveInfo *fd[2]; + BlockDriverState *fd[MAX_FD]; static pc_cmos_init_late_arg arg; /* various important CMOS locations needed by PC/Bochs bios */ @@ -378,14 +378,16 @@ void pc_cmos_init(ram_addr_t ram_size, ram_addr_t above_4g_mem_size, } /* floppy type */ - for (i = 0; i < 2; i++) { - fd[i] = drive_get(IF_FLOPPY, 0, i); - if (fd[i] && bdrv_is_inserted(fd[i]->bdrv)) { - bdrv_get_floppy_geometry_hint(fd[i]->bdrv, &nb_heads, &max_track, - &last_sect, FDRIVE_DRV_NONE, - &fd_type[i]); - } else { - fd_type[i] = FDRIVE_DRV_NONE; + if (floppy) { + fdc_get_bs(fd, floppy); + for (i = 0; i < 2; i++) { + if (fd[i] && bdrv_is_inserted(fd[i])) { + bdrv_get_floppy_geometry_hint(fd[i], &nb_heads, &max_track, + &last_sect, FDRIVE_DRV_NONE, + &fd_type[i]); + } else { + fd_type[i] = FDRIVE_DRV_NONE; + } } } val = (cmos_get_fd_drive_type(fd_type[0]) << 4) | @@ -1124,6 +1126,7 @@ static void cpu_request_exit(void *opaque, int irq, int level) void pc_basic_device_init(qemu_irq *gsi, ISADevice **rtc_state, + ISADevice **floppy, bool no_vmport) { int i; @@ -1188,7 +1191,7 @@ void pc_basic_device_init(qemu_irq *gsi, for(i = 0; i < MAX_FD; i++) { fd[i] = drive_get(IF_FLOPPY, 0, i); } - fdctrl_init_isa(fd); + *floppy = fdctrl_init_isa(fd); } void pc_pci_device_init(PCIBus *pci_bus) @@ -142,11 +142,12 @@ qemu_irq *pc_allocate_cpu_irq(void); void pc_vga_init(PCIBus *pci_bus); void pc_basic_device_init(qemu_irq *gsi, ISADevice **rtc_state, + ISADevice **floppy, bool no_vmport); void pc_init_ne2k_isa(NICInfo *nd); void pc_cmos_init(ram_addr_t ram_size, ram_addr_t above_4g_mem_size, const char *boot_device, - BusState *ide0, BusState *ide1, + ISADevice *floppy, BusState *ide0, BusState *ide1, ISADevice *s); void pc_pci_device_init(PCIBus *pci_bus); diff --git a/hw/pc_piix.c b/hw/pc_piix.c index c89042f1ce..8c7f2b7337 100644 --- a/hw/pc_piix.c +++ b/hw/pc_piix.c @@ -95,6 +95,7 @@ static void pc_init1(MemoryRegion *system_memory, DriveInfo *hd[MAX_IDE_BUS * MAX_IDE_DEVS]; BusState *idebus[MAX_IDE_BUS]; ISADevice *rtc_state; + ISADevice *floppy; MemoryRegion *ram_memory; MemoryRegion *pci_memory; MemoryRegion *rom_memory; @@ -174,7 +175,7 @@ static void pc_init1(MemoryRegion *system_memory, } /* init basic PC hardware */ - pc_basic_device_init(gsi, &rtc_state, xen_enabled()); + pc_basic_device_init(gsi, &rtc_state, &floppy, xen_enabled()); for(i = 0; i < nb_nics; i++) { NICInfo *nd = &nd_table[i]; @@ -207,7 +208,7 @@ static void pc_init1(MemoryRegion *system_memory, audio_init(gsi, pci_enabled ? pci_bus : NULL); pc_cmos_init(below_4g_mem_size, above_4g_mem_size, boot_device, - idebus[0], idebus[1], rtc_state); + floppy, idebus[0], idebus[1], rtc_state); if (pci_enabled && usb_enabled) { usb_uhci_piix3_init(pci_bus, piix3_devfn + 2); diff --git a/hw/pl041.c b/hw/pl041.c new file mode 100644 index 0000000000..efd52ac42f --- /dev/null +++ b/hw/pl041.c @@ -0,0 +1,636 @@ +/* + * Arm PrimeCell PL041 Advanced Audio Codec Interface + * + * Copyright (c) 2011 + * Written by Mathieu Sonet - www.elasticsheep.com + * + * This code is licenced under the GPL. + * + * ***************************************************************** + * + * This driver emulates the ARM AACI interface + * connected to a LM4549 codec. + * + * Limitations: + * - Supports only a playback on one channel (Versatile/Vexpress) + * - Supports only one TX FIFO in compact-mode or non-compact mode. + * - Supports playback of 12, 16, 18 and 20 bits samples. + * - Record is not supported. + * - The PL041 is hardwired to a LM4549 codec. + * + */ + +#include "sysbus.h" + +#include "pl041.h" +#include "lm4549.h" + +#if 0 +#define PL041_DEBUG_LEVEL 1 +#endif + +#if defined(PL041_DEBUG_LEVEL) && (PL041_DEBUG_LEVEL >= 1) +#define DBG_L1(fmt, ...) \ +do { printf("pl041: " fmt , ## __VA_ARGS__); } while (0) +#else +#define DBG_L1(fmt, ...) \ +do { } while (0) +#endif + +#if defined(PL041_DEBUG_LEVEL) && (PL041_DEBUG_LEVEL >= 2) +#define DBG_L2(fmt, ...) \ +do { printf("pl041: " fmt , ## __VA_ARGS__); } while (0) +#else +#define DBG_L2(fmt, ...) \ +do { } while (0) +#endif + + +#define MAX_FIFO_DEPTH (1024) +#define DEFAULT_FIFO_DEPTH (8) + +#define SLOT1_RW (1 << 19) + +/* This FIFO only stores 20-bit samples on 32-bit words. + So its level is independent of the selected mode */ +typedef struct { + uint32_t level; + uint32_t data[MAX_FIFO_DEPTH]; +} pl041_fifo; + +typedef struct { + pl041_fifo tx_fifo; + uint8_t tx_enabled; + uint8_t tx_compact_mode; + uint8_t tx_sample_size; + + pl041_fifo rx_fifo; + uint8_t rx_enabled; + uint8_t rx_compact_mode; + uint8_t rx_sample_size; +} pl041_channel; + +typedef struct { + SysBusDevice busdev; + MemoryRegion iomem; + qemu_irq irq; + + uint32_t fifo_depth; /* FIFO depth in non-compact mode */ + + pl041_regfile regs; + pl041_channel fifo1; + lm4549_state codec; +} pl041_state; + + +static const unsigned char pl041_default_id[8] = { + 0x41, 0x10, 0x04, 0x00, 0x0d, 0xf0, 0x05, 0xb1 +}; + +#if defined(PL041_DEBUG_LEVEL) +#define REGISTER(name, offset) #name, +static const char *pl041_regs_name[] = { + #include "pl041.hx" +}; +#undef REGISTER +#endif + + +#if defined(PL041_DEBUG_LEVEL) +static const char *get_reg_name(target_phys_addr_t offset) +{ + if (offset <= PL041_dr1_7) { + return pl041_regs_name[offset >> 2]; + } + + return "unknown"; +} +#endif + +static uint8_t pl041_compute_periphid3(pl041_state *s) +{ + uint8_t id3 = 1; /* One channel */ + + /* Add the fifo depth information */ + switch (s->fifo_depth) { + case 8: + id3 |= 0 << 3; + break; + case 32: + id3 |= 1 << 3; + break; + case 64: + id3 |= 2 << 3; + break; + case 128: + id3 |= 3 << 3; + break; + case 256: + id3 |= 4 << 3; + break; + case 512: + id3 |= 5 << 3; + break; + case 1024: + id3 |= 6 << 3; + break; + case 2048: + id3 |= 7 << 3; + break; + } + + return id3; +} + +static void pl041_reset(pl041_state *s) +{ + DBG_L1("pl041_reset\n"); + + memset(&s->regs, 0x00, sizeof(pl041_regfile)); + + s->regs.slfr = SL1TXEMPTY | SL2TXEMPTY | SL12TXEMPTY; + s->regs.sr1 = TXFE | RXFE | TXHE; + s->regs.isr1 = 0; + + memset(&s->fifo1, 0x00, sizeof(s->fifo1)); +} + + +static void pl041_fifo1_write(pl041_state *s, uint32_t value) +{ + pl041_channel *channel = &s->fifo1; + pl041_fifo *fifo = &s->fifo1.tx_fifo; + + /* Push the value in the FIFO */ + if (channel->tx_compact_mode == 0) { + /* Non-compact mode */ + + if (fifo->level < s->fifo_depth) { + /* Pad the value with 0 to obtain a 20-bit sample */ + switch (channel->tx_sample_size) { + case 12: + value = (value << 8) & 0xFFFFF; + break; + case 16: + value = (value << 4) & 0xFFFFF; + break; + case 18: + value = (value << 2) & 0xFFFFF; + break; + case 20: + default: + break; + } + + /* Store the sample in the FIFO */ + fifo->data[fifo->level++] = value; + } +#if defined(PL041_DEBUG_LEVEL) + else { + DBG_L1("fifo1 write: overrun\n"); + } +#endif + } else { + /* Compact mode */ + + if ((fifo->level + 2) < s->fifo_depth) { + uint32_t i = 0; + uint32_t sample = 0; + + for (i = 0; i < 2; i++) { + sample = value & 0xFFFF; + value = value >> 16; + + /* Pad each sample with 0 to obtain a 20-bit sample */ + switch (channel->tx_sample_size) { + case 12: + sample = sample << 8; + break; + case 16: + default: + sample = sample << 4; + break; + } + + /* Store the sample in the FIFO */ + fifo->data[fifo->level++] = sample; + } + } +#if defined(PL041_DEBUG_LEVEL) + else { + DBG_L1("fifo1 write: overrun\n"); + } +#endif + } + + /* Update the status register */ + if (fifo->level > 0) { + s->regs.sr1 &= ~(TXUNDERRUN | TXFE); + } + + if (fifo->level >= (s->fifo_depth / 2)) { + s->regs.sr1 &= ~TXHE; + } + + if (fifo->level >= s->fifo_depth) { + s->regs.sr1 |= TXFF; + } + + DBG_L2("fifo1_push sr1 = 0x%08x\n", s->regs.sr1); +} + +static void pl041_fifo1_transmit(pl041_state *s) +{ + pl041_channel *channel = &s->fifo1; + pl041_fifo *fifo = &s->fifo1.tx_fifo; + uint32_t slots = s->regs.txcr1 & TXSLOT_MASK; + uint32_t written_samples; + + /* Check if FIFO1 transmit is enabled */ + if ((channel->tx_enabled) && (slots & (TXSLOT3 | TXSLOT4))) { + if (fifo->level >= (s->fifo_depth / 2)) { + int i; + + DBG_L1("Transfer FIFO level = %i\n", fifo->level); + + /* Try to transfer the whole FIFO */ + for (i = 0; i < (fifo->level / 2); i++) { + uint32_t left = fifo->data[i * 2]; + uint32_t right = fifo->data[i * 2 + 1]; + + /* Transmit two 20-bit samples to the codec */ + if (lm4549_write_samples(&s->codec, left, right) == 0) { + DBG_L1("Codec buffer full\n"); + break; + } + } + + written_samples = i * 2; + if (written_samples > 0) { + /* Update the FIFO level */ + fifo->level -= written_samples; + + /* Move back the pending samples to the start of the FIFO */ + for (i = 0; i < fifo->level; i++) { + fifo->data[i] = fifo->data[written_samples + i]; + } + + /* Update the status register */ + s->regs.sr1 &= ~TXFF; + + if (fifo->level <= (s->fifo_depth / 2)) { + s->regs.sr1 |= TXHE; + } + + if (fifo->level == 0) { + s->regs.sr1 |= TXFE | TXUNDERRUN; + DBG_L1("Empty FIFO\n"); + } + } + } + } +} + +static void pl041_isr1_update(pl041_state *s) +{ + /* Update ISR1 */ + if (s->regs.sr1 & TXUNDERRUN) { + s->regs.isr1 |= URINTR; + } else { + s->regs.isr1 &= ~URINTR; + } + + if (s->regs.sr1 & TXHE) { + s->regs.isr1 |= TXINTR; + } else { + s->regs.isr1 &= ~TXINTR; + } + + if (!(s->regs.sr1 & TXBUSY) && (s->regs.sr1 & TXFE)) { + s->regs.isr1 |= TXCINTR; + } else { + s->regs.isr1 &= ~TXCINTR; + } + + /* Update the irq state */ + qemu_set_irq(s->irq, ((s->regs.isr1 & s->regs.ie1) > 0) ? 1 : 0); + DBG_L2("Set interrupt sr1 = 0x%08x isr1 = 0x%08x masked = 0x%08x\n", + s->regs.sr1, s->regs.isr1, s->regs.isr1 & s->regs.ie1); +} + +static void pl041_request_data(void *opaque) +{ + pl041_state *s = (pl041_state *)opaque; + + /* Trigger pending transfers */ + pl041_fifo1_transmit(s); + pl041_isr1_update(s); +} + +static uint64_t pl041_read(void *opaque, target_phys_addr_t offset, + unsigned size) +{ + pl041_state *s = (pl041_state *)opaque; + int value; + + if ((offset >= PL041_periphid0) && (offset <= PL041_pcellid3)) { + if (offset == PL041_periphid3) { + value = pl041_compute_periphid3(s); + } else { + value = pl041_default_id[(offset - PL041_periphid0) >> 2]; + } + + DBG_L1("pl041_read [0x%08x] => 0x%08x\n", offset, value); + return value; + } else if (offset <= PL041_dr4_7) { + value = *((uint32_t *)&s->regs + (offset >> 2)); + } else { + DBG_L1("pl041_read: Reserved offset %x\n", (int)offset); + return 0; + } + + switch (offset) { + case PL041_allints: + value = s->regs.isr1 & 0x7F; + break; + } + + DBG_L1("pl041_read [0x%08x] %s => 0x%08x\n", offset, + get_reg_name(offset), value); + + return value; +} + +static void pl041_write(void *opaque, target_phys_addr_t offset, + uint64_t value, unsigned size) +{ + pl041_state *s = (pl041_state *)opaque; + uint16_t control, data; + uint32_t result; + + DBG_L1("pl041_write [0x%08x] %s <= 0x%08x\n", offset, + get_reg_name(offset), (unsigned int)value); + + /* Write the register */ + if (offset <= PL041_dr4_7) { + *((uint32_t *)&s->regs + (offset >> 2)) = value; + } else { + DBG_L1("pl041_write: Reserved offset %x\n", (int)offset); + return; + } + + /* Execute the actions */ + switch (offset) { + case PL041_txcr1: + { + pl041_channel *channel = &s->fifo1; + + uint32_t txen = s->regs.txcr1 & TXEN; + uint32_t tsize = (s->regs.txcr1 & TSIZE_MASK) >> TSIZE_MASK_BIT; + uint32_t compact_mode = (s->regs.txcr1 & TXCOMPACT) ? 1 : 0; +#if defined(PL041_DEBUG_LEVEL) + uint32_t slots = (s->regs.txcr1 & TXSLOT_MASK) >> TXSLOT_MASK_BIT; + uint32_t txfen = (s->regs.txcr1 & TXFEN) > 0 ? 1 : 0; +#endif + + DBG_L1("=> txen = %i slots = 0x%01x tsize = %i compact = %i " + "txfen = %i\n", txen, slots, tsize, compact_mode, txfen); + + channel->tx_enabled = txen; + channel->tx_compact_mode = compact_mode; + + switch (tsize) { + case 0: + channel->tx_sample_size = 16; + break; + case 1: + channel->tx_sample_size = 18; + break; + case 2: + channel->tx_sample_size = 20; + break; + case 3: + channel->tx_sample_size = 12; + break; + } + + DBG_L1("TX enabled = %i\n", channel->tx_enabled); + DBG_L1("TX compact mode = %i\n", channel->tx_compact_mode); + DBG_L1("TX sample width = %i\n", channel->tx_sample_size); + + /* Check if compact mode is allowed with selected tsize */ + if (channel->tx_compact_mode == 1) { + if ((channel->tx_sample_size == 18) || + (channel->tx_sample_size == 20)) { + channel->tx_compact_mode = 0; + DBG_L1("Compact mode not allowed with 18/20-bit sample size\n"); + } + } + + break; + } + case PL041_sl1tx: + s->regs.slfr &= ~SL1TXEMPTY; + + control = (s->regs.sl1tx >> 12) & 0x7F; + data = (s->regs.sl2tx >> 4) & 0xFFFF; + + if ((s->regs.sl1tx & SLOT1_RW) == 0) { + /* Write operation */ + lm4549_write(&s->codec, control, data); + } else { + /* Read operation */ + result = lm4549_read(&s->codec, control); + + /* Store the returned value */ + s->regs.sl1rx = s->regs.sl1tx & ~SLOT1_RW; + s->regs.sl2rx = result << 4; + + s->regs.slfr &= ~(SL1RXBUSY | SL2RXBUSY); + s->regs.slfr |= SL1RXVALID | SL2RXVALID; + } + break; + + case PL041_sl2tx: + s->regs.sl2tx = value; + s->regs.slfr &= ~SL2TXEMPTY; + break; + + case PL041_intclr: + DBG_L1("=> Clear interrupt intclr = 0x%08x isr1 = 0x%08x\n", + s->regs.intclr, s->regs.isr1); + + if (s->regs.intclr & TXUEC1) { + s->regs.sr1 &= ~TXUNDERRUN; + } + break; + + case PL041_maincr: + { +#if defined(PL041_DEBUG_LEVEL) + char debug[] = " AACIFE SL1RXEN SL1TXEN"; + if (!(value & AACIFE)) { + debug[0] = '!'; + } + if (!(value & SL1RXEN)) { + debug[8] = '!'; + } + if (!(value & SL1TXEN)) { + debug[17] = '!'; + } + DBG_L1("%s\n", debug); +#endif + + if ((s->regs.maincr & AACIFE) == 0) { + pl041_reset(s); + } + break; + } + + case PL041_dr1_0: + case PL041_dr1_1: + case PL041_dr1_2: + case PL041_dr1_3: + pl041_fifo1_write(s, value); + break; + } + + /* Transmit the FIFO content */ + pl041_fifo1_transmit(s); + + /* Update the ISR1 register */ + pl041_isr1_update(s); +} + +static void pl041_device_reset(DeviceState *d) +{ + pl041_state *s = DO_UPCAST(pl041_state, busdev.qdev, d); + + pl041_reset(s); +} + +static const MemoryRegionOps pl041_ops = { + .read = pl041_read, + .write = pl041_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static int pl041_init(SysBusDevice *dev) +{ + pl041_state *s = FROM_SYSBUS(pl041_state, dev); + + DBG_L1("pl041_init 0x%08x\n", (uint32_t)s); + + /* Check the device properties */ + switch (s->fifo_depth) { + case 8: + case 32: + case 64: + case 128: + case 256: + case 512: + case 1024: + case 2048: + break; + case 16: + default: + /* NC FIFO depth of 16 is not allowed because its id bits in + AACIPERIPHID3 overlap with the id for the default NC FIFO depth */ + fprintf(stderr, "pl041: unsupported non-compact fifo depth [%i]\n", + s->fifo_depth); + return -1; + } + + /* Connect the device to the sysbus */ + memory_region_init_io(&s->iomem, &pl041_ops, s, "pl041", 0x1000); + sysbus_init_mmio_region(dev, &s->iomem); + sysbus_init_irq(dev, &s->irq); + + /* Init the codec */ + lm4549_init(&s->codec, &pl041_request_data, (void *)s); + + return 0; +} + +static const VMStateDescription vmstate_pl041_regfile = { + .name = "pl041_regfile", + .version_id = 1, + .minimum_version_id = 1, + .minimum_version_id_old = 1, + .fields = (VMStateField[]) { +#define REGISTER(name, offset) VMSTATE_UINT32(name, pl041_regfile), + #include "pl041.hx" +#undef REGISTER + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_pl041_fifo = { + .name = "pl041_fifo", + .version_id = 1, + .minimum_version_id = 1, + .minimum_version_id_old = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(level, pl041_fifo), + VMSTATE_UINT32_ARRAY(data, pl041_fifo, MAX_FIFO_DEPTH), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_pl041_channel = { + .name = "pl041_channel", + .version_id = 1, + .minimum_version_id = 1, + .minimum_version_id_old = 1, + .fields = (VMStateField[]) { + VMSTATE_STRUCT(tx_fifo, pl041_channel, 0, + vmstate_pl041_fifo, pl041_fifo), + VMSTATE_UINT8(tx_enabled, pl041_channel), + VMSTATE_UINT8(tx_compact_mode, pl041_channel), + VMSTATE_UINT8(tx_sample_size, pl041_channel), + VMSTATE_STRUCT(rx_fifo, pl041_channel, 0, + vmstate_pl041_fifo, pl041_fifo), + VMSTATE_UINT8(rx_enabled, pl041_channel), + VMSTATE_UINT8(rx_compact_mode, pl041_channel), + VMSTATE_UINT8(rx_sample_size, pl041_channel), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_pl041 = { + .name = "pl041", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(fifo_depth, pl041_state), + VMSTATE_STRUCT(regs, pl041_state, 0, + vmstate_pl041_regfile, pl041_regfile), + VMSTATE_STRUCT(fifo1, pl041_state, 0, + vmstate_pl041_channel, pl041_channel), + VMSTATE_STRUCT(codec, pl041_state, 0, + vmstate_lm4549_state, lm4549_state), + VMSTATE_END_OF_LIST() + } +}; + +static SysBusDeviceInfo pl041_device_info = { + .init = pl041_init, + .qdev.name = "pl041", + .qdev.size = sizeof(pl041_state), + .qdev.vmsd = &vmstate_pl041, + .qdev.reset = pl041_device_reset, + .qdev.no_user = 1, + .qdev.props = (Property[]) { + /* Non-compact FIFO depth property */ + DEFINE_PROP_UINT32("nc_fifo_depth", pl041_state, + fifo_depth, DEFAULT_FIFO_DEPTH), + DEFINE_PROP_END_OF_LIST(), + }, +}; + +static void pl041_register_device(void) +{ + sysbus_register_withprop(&pl041_device_info); +} + +device_init(pl041_register_device) diff --git a/hw/pl041.h b/hw/pl041.h new file mode 100644 index 0000000000..1f224326e5 --- /dev/null +++ b/hw/pl041.h @@ -0,0 +1,135 @@ +/* + * Arm PrimeCell PL041 Advanced Audio Codec Interface + * + * Copyright (c) 2011 + * Written by Mathieu Sonet - www.elasticsheep.com + * + * This code is licenced under the GPL. + * + * ***************************************************************** + */ + +#ifndef HW_PL041_H +#define HW_PL041_H + +/* Register file */ +#define REGISTER(name, offset) uint32_t name; +typedef struct { + #include "pl041.hx" +} pl041_regfile; +#undef REGISTER + +/* Register addresses */ +#define REGISTER(name, offset) PL041_##name = offset, +enum { + #include "pl041.hx" + + PL041_periphid0 = 0xFE0, + PL041_periphid1 = 0xFE4, + PL041_periphid2 = 0xFE8, + PL041_periphid3 = 0xFEC, + PL041_pcellid0 = 0xFF0, + PL041_pcellid1 = 0xFF4, + PL041_pcellid2 = 0xFF8, + PL041_pcellid3 = 0xFFC, +}; +#undef REGISTER + +/* Register bits */ + +/* IEx */ +#define TXCIE (1 << 0) +#define RXTIE (1 << 1) +#define TXIE (1 << 2) +#define RXIE (1 << 3) +#define RXOIE (1 << 4) +#define TXUIE (1 << 5) +#define RXTOIE (1 << 6) + +/* TXCRx */ +#define TXEN (1 << 0) +#define TXSLOT1 (1 << 1) +#define TXSLOT2 (1 << 2) +#define TXSLOT3 (1 << 3) +#define TXSLOT4 (1 << 4) +#define TXCOMPACT (1 << 15) +#define TXFEN (1 << 16) + +#define TXSLOT_MASK_BIT (1) +#define TXSLOT_MASK (0xFFF << TXSLOT_MASK_BIT) + +#define TSIZE_MASK_BIT (13) +#define TSIZE_MASK (0x3 << TSIZE_MASK_BIT) + +#define TSIZE_16BITS (0x0 << TSIZE_MASK_BIT) +#define TSIZE_18BITS (0x1 << TSIZE_MASK_BIT) +#define TSIZE_20BITS (0x2 << TSIZE_MASK_BIT) +#define TSIZE_12BITS (0x3 << TSIZE_MASK_BIT) + +/* SRx */ +#define RXFE (1 << 0) +#define TXFE (1 << 1) +#define RXHF (1 << 2) +#define TXHE (1 << 3) +#define RXFF (1 << 4) +#define TXFF (1 << 5) +#define RXBUSY (1 << 6) +#define TXBUSY (1 << 7) +#define RXOVERRUN (1 << 8) +#define TXUNDERRUN (1 << 9) +#define RXTIMEOUT (1 << 10) +#define RXTOFE (1 << 11) + +/* ISRx */ +#define TXCINTR (1 << 0) +#define RXTOINTR (1 << 1) +#define TXINTR (1 << 2) +#define RXINTR (1 << 3) +#define ORINTR (1 << 4) +#define URINTR (1 << 5) +#define RXTOFEINTR (1 << 6) + +/* SLFR */ +#define SL1RXBUSY (1 << 0) +#define SL1TXBUSY (1 << 1) +#define SL2RXBUSY (1 << 2) +#define SL2TXBUSY (1 << 3) +#define SL12RXBUSY (1 << 4) +#define SL12TXBUSY (1 << 5) +#define SL1RXVALID (1 << 6) +#define SL1TXEMPTY (1 << 7) +#define SL2RXVALID (1 << 8) +#define SL2TXEMPTY (1 << 9) +#define SL12RXVALID (1 << 10) +#define SL12TXEMPTY (1 << 11) +#define RAWGPIOINT (1 << 12) +#define RWIS (1 << 13) + +/* MAINCR */ +#define AACIFE (1 << 0) +#define LOOPBACK (1 << 1) +#define LOWPOWER (1 << 2) +#define SL1RXEN (1 << 3) +#define SL1TXEN (1 << 4) +#define SL2RXEN (1 << 5) +#define SL2TXEN (1 << 6) +#define SL12RXEN (1 << 7) +#define SL12TXEN (1 << 8) +#define DMAENABLE (1 << 9) + +/* INTCLR */ +#define WISC (1 << 0) +#define RXOEC1 (1 << 1) +#define RXOEC2 (1 << 2) +#define RXOEC3 (1 << 3) +#define RXOEC4 (1 << 4) +#define TXUEC1 (1 << 5) +#define TXUEC2 (1 << 6) +#define TXUEC3 (1 << 7) +#define TXUEC4 (1 << 8) +#define RXTOFEC1 (1 << 9) +#define RXTOFEC2 (1 << 10) +#define RXTOFEC3 (1 << 11) +#define RXTOFEC4 (1 << 12) + +#endif /* #ifndef HW_PL041_H */ diff --git a/hw/pl041.hx b/hw/pl041.hx new file mode 100644 index 0000000000..e972996725 --- /dev/null +++ b/hw/pl041.hx @@ -0,0 +1,81 @@ +/* + * Arm PrimeCell PL041 Advanced Audio Codec Interface + * + * Copyright (c) 2011 + * Written by Mathieu Sonet - www.elasticsheep.com + * + * This code is licenced under the GPL. + * + * ***************************************************************** + */ + +/* PL041 register file description */ + +REGISTER( rxcr1, 0x00 ) +REGISTER( txcr1, 0x04 ) +REGISTER( sr1, 0x08 ) +REGISTER( isr1, 0x0C ) +REGISTER( ie1, 0x10 ) +REGISTER( rxcr2, 0x14 ) +REGISTER( txcr2, 0x18 ) +REGISTER( sr2, 0x1C ) +REGISTER( isr2, 0x20 ) +REGISTER( ie2, 0x24 ) +REGISTER( rxcr3, 0x28 ) +REGISTER( txcr3, 0x2C ) +REGISTER( sr3, 0x30 ) +REGISTER( isr3, 0x34 ) +REGISTER( ie3, 0x38 ) +REGISTER( rxcr4, 0x3C ) +REGISTER( txcr4, 0x40 ) +REGISTER( sr4, 0x44 ) +REGISTER( isr4, 0x48 ) +REGISTER( ie4, 0x4C ) +REGISTER( sl1rx, 0x50 ) +REGISTER( sl1tx, 0x54 ) +REGISTER( sl2rx, 0x58 ) +REGISTER( sl2tx, 0x5C ) +REGISTER( sl12rx, 0x60 ) +REGISTER( sl12tx, 0x64 ) +REGISTER( slfr, 0x68 ) +REGISTER( slistat, 0x6C ) +REGISTER( slien, 0x70 ) +REGISTER( intclr, 0x74 ) +REGISTER( maincr, 0x78 ) +REGISTER( reset, 0x7C ) +REGISTER( sync, 0x80 ) +REGISTER( allints, 0x84 ) +REGISTER( mainfr, 0x88 ) +REGISTER( unused, 0x8C ) +REGISTER( dr1_0, 0x90 ) +REGISTER( dr1_1, 0x94 ) +REGISTER( dr1_2, 0x98 ) +REGISTER( dr1_3, 0x9C ) +REGISTER( dr1_4, 0xA0 ) +REGISTER( dr1_5, 0xA4 ) +REGISTER( dr1_6, 0xA8 ) +REGISTER( dr1_7, 0xAC ) +REGISTER( dr2_0, 0xB0 ) +REGISTER( dr2_1, 0xB4 ) +REGISTER( dr2_2, 0xB8 ) +REGISTER( dr2_3, 0xBC ) +REGISTER( dr2_4, 0xC0 ) +REGISTER( dr2_5, 0xC4 ) +REGISTER( dr2_6, 0xC8 ) +REGISTER( dr2_7, 0xCC ) +REGISTER( dr3_0, 0xD0 ) +REGISTER( dr3_1, 0xD4 ) +REGISTER( dr3_2, 0xD8 ) +REGISTER( dr3_3, 0xDC ) +REGISTER( dr3_4, 0xE0 ) +REGISTER( dr3_5, 0xE4 ) +REGISTER( dr3_6, 0xE8 ) +REGISTER( dr3_7, 0xEC ) +REGISTER( dr4_0, 0xF0 ) +REGISTER( dr4_1, 0xF4 ) +REGISTER( dr4_2, 0xF8 ) +REGISTER( dr4_3, 0xFC ) +REGISTER( dr4_4, 0x100 ) +REGISTER( dr4_5, 0x104 ) +REGISTER( dr4_6, 0x108 ) +REGISTER( dr4_7, 0x10C ) @@ -18,8 +18,6 @@ * along with this program; if not, see <http://www.gnu.org/licenses/>. */ -#include <pthread.h> - #include "qemu-common.h" #include "qemu-timer.h" #include "qemu-queue.h" @@ -238,6 +236,9 @@ void qxl_spice_reset_image_cache(PCIQXLDevice *qxl) void qxl_spice_reset_cursor(PCIQXLDevice *qxl) { qxl->ssd.worker->reset_cursor(qxl->ssd.worker); + qemu_mutex_lock(&qxl->track_lock); + qxl->guest_cursor = 0; + qemu_mutex_unlock(&qxl->track_lock); } @@ -330,6 +331,7 @@ static void init_qxl_ram(PCIQXLDevice *d) d->ram->magic = cpu_to_le32(QXL_RAM_MAGIC); d->ram->int_pending = cpu_to_le32(0); d->ram->int_mask = cpu_to_le32(0); + d->ram->update_surface = 0; SPICE_RING_INIT(&d->ram->cmd_ring); SPICE_RING_INIT(&d->ram->cursor_ring); SPICE_RING_INIT(&d->ram->release_ring); @@ -402,7 +404,9 @@ static void qxl_track_command(PCIQXLDevice *qxl, struct QXLCommandExt *ext) { QXLCursorCmd *cmd = qxl_phys2virt(qxl, ext->cmd.data, ext->group_id); if (cmd->type == QXL_CURSOR_SET) { + qemu_mutex_lock(&qxl->track_lock); qxl->guest_cursor = ext->cmd.data; + qemu_mutex_unlock(&qxl->track_lock); } break; } @@ -1067,6 +1071,7 @@ static int qxl_destroy_primary(PCIQXLDevice *d, qxl_async_io async) d->mode = QXL_MODE_UNDEFINED; qemu_spice_destroy_primary_surface(&d->ssd, 0, async); + qxl_spice_reset_cursor(d); return 1; } @@ -1215,10 +1220,6 @@ async_common: if (!SPICE_RING_IS_EMPTY(&d->ram->release_ring)) { break; } - pthread_yield(); - if (!SPICE_RING_IS_EMPTY(&d->ram->release_ring)) { - break; - } d->oom_running = 1; qxl_spice_oom(d); d->oom_running = 0; @@ -1372,7 +1373,7 @@ static void qxl_send_events(PCIQXLDevice *d, uint32_t events) if ((old_pending & le_events) == le_events) { return; } - if (pthread_self() == d->main) { + if (qemu_thread_is_self(&d->main)) { qxl_update_irq(d); } else { if (write(d->pipe[1], d, 1) != 1) { @@ -1391,7 +1392,7 @@ static void init_pipe_signaling(PCIQXLDevice *d) fcntl(d->pipe[1], F_SETFL, O_NONBLOCK); fcntl(d->pipe[0], F_SETOWN, getpid()); - d->main = pthread_self(); + qemu_thread_get_self(&d->main); qemu_set_fd_handler(d->pipe[0], pipe_read, NULL, d); } @@ -1710,10 +1711,12 @@ static int qxl_post_load(void *opaque, int version) cmds[out].group_id = MEMSLOT_GROUP_GUEST; out++; } - cmds[out].cmd.data = d->guest_cursor; - cmds[out].cmd.type = QXL_CMD_CURSOR; - cmds[out].group_id = MEMSLOT_GROUP_GUEST; - out++; + if (d->guest_cursor) { + cmds[out].cmd.data = d->guest_cursor; + cmds[out].cmd.type = QXL_CMD_CURSOR; + cmds[out].group_id = MEMSLOT_GROUP_GUEST; + out++; + } qxl_spice_loadvm_commands(d, cmds, out); g_free(cmds); @@ -1787,6 +1790,19 @@ static VMStateDescription qxl_vmstate = { }, }; +static Property qxl_properties[] = { + DEFINE_PROP_UINT32("ram_size", PCIQXLDevice, vga.vram_size, + 64 * 1024 * 1024), + DEFINE_PROP_UINT32("vram_size", PCIQXLDevice, vram_size, + 64 * 1024 * 1024), + DEFINE_PROP_UINT32("revision", PCIQXLDevice, revision, + QXL_DEFAULT_REVISION), + DEFINE_PROP_UINT32("debug", PCIQXLDevice, debug, 0), + DEFINE_PROP_UINT32("guestdebug", PCIQXLDevice, guestdebug, 0), + DEFINE_PROP_UINT32("cmdlog", PCIQXLDevice, cmdlog, 0), + DEFINE_PROP_END_OF_LIST(), +}; + static PCIDeviceInfo qxl_info_primary = { .qdev.name = "qxl-vga", .qdev.desc = "Spice QXL GPU (primary, vga compatible)", @@ -1799,18 +1815,7 @@ static PCIDeviceInfo qxl_info_primary = { .vendor_id = REDHAT_PCI_VENDOR_ID, .device_id = QXL_DEVICE_ID_STABLE, .class_id = PCI_CLASS_DISPLAY_VGA, - .qdev.props = (Property[]) { - DEFINE_PROP_UINT32("ram_size", PCIQXLDevice, vga.vram_size, - 64 * 1024 * 1024), - DEFINE_PROP_UINT32("vram_size", PCIQXLDevice, vram_size, - 64 * 1024 * 1024), - DEFINE_PROP_UINT32("revision", PCIQXLDevice, revision, - QXL_DEFAULT_REVISION), - DEFINE_PROP_UINT32("debug", PCIQXLDevice, debug, 0), - DEFINE_PROP_UINT32("guestdebug", PCIQXLDevice, guestdebug, 0), - DEFINE_PROP_UINT32("cmdlog", PCIQXLDevice, cmdlog, 0), - DEFINE_PROP_END_OF_LIST(), - } + .qdev.props = qxl_properties, }; static PCIDeviceInfo qxl_info_secondary = { @@ -1823,18 +1828,7 @@ static PCIDeviceInfo qxl_info_secondary = { .vendor_id = REDHAT_PCI_VENDOR_ID, .device_id = QXL_DEVICE_ID_STABLE, .class_id = PCI_CLASS_DISPLAY_OTHER, - .qdev.props = (Property[]) { - DEFINE_PROP_UINT32("ram_size", PCIQXLDevice, vga.vram_size, - 64 * 1024 * 1024), - DEFINE_PROP_UINT32("vram_size", PCIQXLDevice, vram_size, - 64 * 1024 * 1024), - DEFINE_PROP_UINT32("revision", PCIQXLDevice, revision, - QXL_DEFAULT_REVISION), - DEFINE_PROP_UINT32("debug", PCIQXLDevice, debug, 0), - DEFINE_PROP_UINT32("guestdebug", PCIQXLDevice, guestdebug, 0), - DEFINE_PROP_UINT32("cmdlog", PCIQXLDevice, cmdlog, 0), - DEFINE_PROP_END_OF_LIST(), - } + .qdev.props = qxl_properties, }; static void qxl_register(void) @@ -4,6 +4,7 @@ #include "hw.h" #include "pci.h" #include "vga_int.h" +#include "qemu-thread.h" #include "ui/qemu-spice.h" #include "ui/spice-display.h" @@ -63,7 +64,7 @@ typedef struct PCIQXLDevice { QemuMutex track_lock; /* thread signaling */ - pthread_t main; + QemuThread main; int pipe[2]; /* ram pci bar */ diff --git a/hw/realview.c b/hw/realview.c index 14281b0f06..9a8e63c8f5 100644 --- a/hw/realview.c +++ b/hw/realview.c @@ -125,7 +125,7 @@ static void realview_init(ram_addr_t ram_size, MemoryRegion *ram_hi = g_new(MemoryRegion, 1); MemoryRegion *ram_alias = g_new(MemoryRegion, 1); MemoryRegion *ram_hack = g_new(MemoryRegion, 1); - DeviceState *dev, *sysctl, *gpio2; + DeviceState *dev, *sysctl, *gpio2, *pl041; SysBusDevice *busdev; qemu_irq *irqp; qemu_irq pic[64]; @@ -232,6 +232,12 @@ static void realview_init(ram_addr_t ram_size, pic[n] = qdev_get_gpio_in(dev, n); } + pl041 = qdev_create(NULL, "pl041"); + qdev_prop_set_uint32(pl041, "nc_fifo_depth", 512); + qdev_init_nofail(pl041); + sysbus_mmio_map(sysbus_from_qdev(pl041), 0, 0x10004000); + sysbus_connect_irq(sysbus_from_qdev(pl041), 0, pic[19]); + sysbus_create_simple("pl050_keyboard", 0x10006000, pic[20]); sysbus_create_simple("pl050_mouse", 0x10007000, pic[21]); diff --git a/hw/versatilepb.c b/hw/versatilepb.c index 68402cc479..6370600bb3 100644 --- a/hw/versatilepb.c +++ b/hw/versatilepb.c @@ -182,6 +182,7 @@ static void versatile_init(ram_addr_t ram_size, qemu_irq sic[32]; DeviceState *dev, *sysctl; SysBusDevice *busdev; + DeviceState *pl041; PCIBus *pci_bus; NICInfo *nd; int n; @@ -273,6 +274,13 @@ static void versatile_init(ram_addr_t ram_size, /* Add PL031 Real Time Clock. */ sysbus_create_simple("pl031", 0x101e8000, pic[10]); + /* Add PL041 AACI Interface to the LM4549 codec */ + pl041 = qdev_create(NULL, "pl041"); + qdev_prop_set_uint32(pl041, "nc_fifo_depth", 512); + qdev_init_nofail(pl041); + sysbus_mmio_map(sysbus_from_qdev(pl041), 0, 0x10004000); + sysbus_connect_irq(sysbus_from_qdev(pl041), 0, sic[24]); + /* Memory map for Versatile/PB: */ /* 0x10000000 System registers. */ /* 0x10001000 PCI controller config registers. */ diff --git a/hw/vexpress.c b/hw/vexpress.c index c9766dd0c4..0940a26d73 100644 --- a/hw/vexpress.c +++ b/hw/vexpress.c @@ -41,7 +41,7 @@ static void vexpress_a9_init(ram_addr_t ram_size, { CPUState *env = NULL; ram_addr_t ram_offset, vram_offset, sram_offset; - DeviceState *dev, *sysctl; + DeviceState *dev, *sysctl, *pl041; SysBusDevice *busdev; qemu_irq *irqp; qemu_irq pic[64]; @@ -118,6 +118,11 @@ static void vexpress_a9_init(ram_addr_t ram_size, /* 0x10001000 SP810 system control */ /* 0x10002000 serial bus PCI */ /* 0x10004000 PL041 audio */ + pl041 = qdev_create(NULL, "pl041"); + qdev_prop_set_uint32(pl041, "nc_fifo_depth", 512); + qdev_init_nofail(pl041); + sysbus_mmio_map(sysbus_from_qdev(pl041), 0, 0x10004000); + sysbus_connect_irq(sysbus_from_qdev(pl041), 0, pic[11]); dev = sysbus_create_varargs("pl181", 0x10005000, pic[9], pic[10], NULL); /* Wire up MMC card detect and read-only signals */ diff --git a/hw/xen_disk.c b/hw/xen_disk.c index 8a9fac499b..286bbac54a 100644 --- a/hw/xen_disk.c +++ b/hw/xen_disk.c @@ -620,7 +620,7 @@ static void blk_alloc(struct XenDevice *xendev) static int blk_init(struct XenDevice *xendev) { struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev); - int index, qflags, have_barriers, info = 0; + int index, qflags, info = 0; /* read xenstore entries */ if (blkdev->params == NULL) { @@ -706,7 +706,6 @@ static int blk_init(struct XenDevice *xendev) blkdev->bs->drv ? blkdev->bs->drv->format_name : "-"); blkdev->file_size = 0; } - have_barriers = blkdev->bs->drv && blkdev->bs->drv->bdrv_flush ? 1 : 0; xen_be_printf(xendev, 1, "type \"%s\", fileproto \"%s\", filename \"%s\"," " size %" PRId64 " (%" PRId64 " MB)\n", @@ -714,7 +713,7 @@ static int blk_init(struct XenDevice *xendev) blkdev->file_size, blkdev->file_size >> 20); /* fill info */ - xenstore_write_be_int(&blkdev->xendev, "feature-barrier", have_barriers); + xenstore_write_be_int(&blkdev->xendev, "feature-barrier", 1); xenstore_write_be_int(&blkdev->xendev, "info", info); xenstore_write_be_int(&blkdev->xendev, "sector-size", blkdev->file_blk); xenstore_write_be_int(&blkdev->xendev, "sectors", diff --git a/libcacard/cac.c b/libcacard/cac.c index f4b0b1b057..927a4ca296 100644 --- a/libcacard/cac.c +++ b/libcacard/cac.c @@ -266,7 +266,8 @@ static void cac_delete_pki_applet_private(VCardAppletPrivate *applet_private) { CACPKIAppletData *pki_applet_data = NULL; - if (pki_applet_data == NULL) { + + if (applet_private == NULL) { return; } pki_applet_data = &(applet_private->u.pki_data); diff --git a/libcacard/card_7816.c b/libcacard/card_7816.c index 9fd59d4a5f..6fe27d5631 100644 --- a/libcacard/card_7816.c +++ b/libcacard/card_7816.c @@ -754,7 +754,7 @@ vcard_process_apdu(VCard *card, VCardAPDU *apdu, VCardResponse **response) return vcard7816_vm_process_apdu(card, apdu, response); case VCARD_DIRECT: /* if we are type direct, then the applet should handle everything */ - assert("VCARD_DIRECT: applet failure"); + assert(!"VCARD_DIRECT: applet failure"); break; } *response = diff --git a/libcacard/vscclient.c b/libcacard/vscclient.c index 2191f6038c..e317a25faf 100644 --- a/libcacard/vscclient.c +++ b/libcacard/vscclient.c @@ -357,6 +357,7 @@ connect_to_qemu( if (sock < 0) { /* Error */ fprintf(stderr, "Error opening socket!\n"); + return -1; } memset(&hints, 0, sizeof(struct addrinfo)); @@ -370,13 +371,13 @@ connect_to_qemu( if (ret != 0) { /* Error */ fprintf(stderr, "getaddrinfo failed\n"); - return 5; + return -1; } if (connect(sock, server->ai_addr, server->ai_addrlen) < 0) { /* Error */ fprintf(stderr, "Could not connect\n"); - return 5; + return -1; } if (verbose) { printf("Connected (sizeof Header=%zd)!\n", sizeof(VSCMsgHeader)); @@ -505,6 +506,10 @@ main( qemu_host = strdup(argv[argc - 2]); qemu_port = strdup(argv[argc - 1]); sock = connect_to_qemu(qemu_host, qemu_port); + if (sock == -1) { + fprintf(stderr, "error opening socket, exiting.\n"); + exit(5); + } qemu_mutex_init(&write_lock); qemu_mutex_init(&pending_reader_lock); diff --git a/linux-user/signal.c b/linux-user/signal.c index 40c5eb1846..f3b767ea7e 100644 --- a/linux-user/signal.c +++ b/linux-user/signal.c @@ -2296,12 +2296,14 @@ void sparc64_set_context(CPUSPARCState *env) */ err |= __get_user(env->fprs, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_fprs)); { - uint32_t *src, *dst; - src = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs; - dst = env->fpr; - /* XXX: check that the CPU storage is the same as user context */ - for (i = 0; i < 64; i++, dst++, src++) - err |= __get_user(*dst, src); + uint32_t *src = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs; + for (i = 0; i < 64; i++, src++) { + if (i & 1) { + err |= __get_user(env->fpr[i/2].l.lower, src); + } else { + err |= __get_user(env->fpr[i/2].l.upper, src); + } + } } err |= __get_user(env->fsr, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_fsr)); @@ -2390,12 +2392,14 @@ void sparc64_get_context(CPUSPARCState *env) err |= __put_user(i7, &(mcp->mc_i7)); { - uint32_t *src, *dst; - src = env->fpr; - dst = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs; - /* XXX: check that the CPU storage is the same as user context */ - for (i = 0; i < 64; i++, dst++, src++) - err |= __put_user(*src, dst); + uint32_t *dst = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs; + for (i = 0; i < 64; i++, dst++) { + if (i & 1) { + err |= __put_user(env->fpr[i/2].l.lower, dst); + } else { + err |= __put_user(env->fpr[i/2].l.upper, dst); + } + } } err |= __put_user(env->fsr, &(mcp->mc_fpregs.mcfpu_fsr)); err |= __put_user(env->gsr, &(mcp->mc_fpregs.mcfpu_gsr)); diff --git a/main-loop.c b/main-loop.c index bfecdb7769..60e9748324 100644 --- a/main-loop.c +++ b/main-loop.c @@ -21,36 +21,16 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ -#include "config-host.h" -#include <unistd.h> -#include <signal.h> -#include <time.h> -#include <errno.h> -#include <sys/time.h> -#include <stdbool.h> -#ifdef _WIN32 -#include <windows.h> -#include <winsock2.h> -#include <ws2tcpip.h> -#else -#include <sys/socket.h> -#include <netinet/in.h> -#include <net/if.h> -#include <arpa/inet.h> -#include <sys/select.h> -#include <sys/stat.h> -#include "compatfd.h" -#endif - -#include <glib.h> - -#include "main-loop.h" +#include "qemu-common.h" #include "qemu-timer.h" -#include "slirp/libslirp.h" +#include "slirp/slirp.h" +#include "main-loop.h" #ifndef _WIN32 +#include "compatfd.h" + static int io_thread_fd = -1; void qemu_notify_event(void) diff --git a/migration.c b/migration.c index bdca72e008..d6935678b9 100644 --- a/migration.c +++ b/migration.c @@ -372,11 +372,22 @@ void remove_migration_state_change_notifier(Notifier *notify) notifier_list_remove(&migration_state_notifiers, notify); } +bool migration_is_active(MigrationState *s) +{ + return s->state == MIG_STATE_ACTIVE; +} + bool migration_has_finished(MigrationState *s) { return s->state == MIG_STATE_COMPLETED; } +bool migration_has_failed(MigrationState *s) +{ + return (s->state == MIG_STATE_CANCELLED || + s->state == MIG_STATE_ERROR); +} + void migrate_fd_connect(MigrationState *s) { int ret; diff --git a/migration.h b/migration.h index a1f80d0728..1b8ee58530 100644 --- a/migration.h +++ b/migration.h @@ -76,7 +76,9 @@ void migrate_fd_connect(MigrationState *s); void add_migration_state_change_notifier(Notifier *notify); void remove_migration_state_change_notifier(Notifier *notify); +bool migration_is_active(MigrationState *); bool migration_has_finished(MigrationState *); +bool migration_has_failed(MigrationState *); uint64_t ram_bytes_remaining(void); uint64_t ram_bytes_transferred(void); @@ -1153,7 +1153,8 @@ static int add_graphics_client(Monitor *mon, const QDict *qdict, QObject **ret_d return -1; } -static int client_migrate_info(Monitor *mon, const QDict *qdict, QObject **ret_data) +static int client_migrate_info(Monitor *mon, const QDict *qdict, + MonitorCompletion cb, void *opaque) { const char *protocol = qdict_get_str(qdict, "protocol"); const char *hostname = qdict_get_str(qdict, "hostname"); @@ -1168,7 +1169,8 @@ static int client_migrate_info(Monitor *mon, const QDict *qdict, QObject **ret_d return -1; } - ret = qemu_spice_migrate_info(hostname, port, tls_port, subject); + ret = qemu_spice_migrate_info(hostname, port, tls_port, subject, + cb, opaque); if (ret != 0) { qerror_report(QERR_UNDEFINED_ERROR); return -1; @@ -3471,55 +3473,55 @@ static const MonitorDef monitor_defs[] = { #endif { "tbr", offsetof(CPUState, tbr) }, { "fsr", offsetof(CPUState, fsr) }, - { "f0", offsetof(CPUState, fpr[0]) }, - { "f1", offsetof(CPUState, fpr[1]) }, - { "f2", offsetof(CPUState, fpr[2]) }, - { "f3", offsetof(CPUState, fpr[3]) }, - { "f4", offsetof(CPUState, fpr[4]) }, - { "f5", offsetof(CPUState, fpr[5]) }, - { "f6", offsetof(CPUState, fpr[6]) }, - { "f7", offsetof(CPUState, fpr[7]) }, - { "f8", offsetof(CPUState, fpr[8]) }, - { "f9", offsetof(CPUState, fpr[9]) }, - { "f10", offsetof(CPUState, fpr[10]) }, - { "f11", offsetof(CPUState, fpr[11]) }, - { "f12", offsetof(CPUState, fpr[12]) }, - { "f13", offsetof(CPUState, fpr[13]) }, - { "f14", offsetof(CPUState, fpr[14]) }, - { "f15", offsetof(CPUState, fpr[15]) }, - { "f16", offsetof(CPUState, fpr[16]) }, - { "f17", offsetof(CPUState, fpr[17]) }, - { "f18", offsetof(CPUState, fpr[18]) }, - { "f19", offsetof(CPUState, fpr[19]) }, - { "f20", offsetof(CPUState, fpr[20]) }, - { "f21", offsetof(CPUState, fpr[21]) }, - { "f22", offsetof(CPUState, fpr[22]) }, - { "f23", offsetof(CPUState, fpr[23]) }, - { "f24", offsetof(CPUState, fpr[24]) }, - { "f25", offsetof(CPUState, fpr[25]) }, - { "f26", offsetof(CPUState, fpr[26]) }, - { "f27", offsetof(CPUState, fpr[27]) }, - { "f28", offsetof(CPUState, fpr[28]) }, - { "f29", offsetof(CPUState, fpr[29]) }, - { "f30", offsetof(CPUState, fpr[30]) }, - { "f31", offsetof(CPUState, fpr[31]) }, + { "f0", offsetof(CPUState, fpr[0].l.upper) }, + { "f1", offsetof(CPUState, fpr[0].l.lower) }, + { "f2", offsetof(CPUState, fpr[1].l.upper) }, + { "f3", offsetof(CPUState, fpr[1].l.lower) }, + { "f4", offsetof(CPUState, fpr[2].l.upper) }, + { "f5", offsetof(CPUState, fpr[2].l.lower) }, + { "f6", offsetof(CPUState, fpr[3].l.upper) }, + { "f7", offsetof(CPUState, fpr[3].l.lower) }, + { "f8", offsetof(CPUState, fpr[4].l.upper) }, + { "f9", offsetof(CPUState, fpr[4].l.lower) }, + { "f10", offsetof(CPUState, fpr[5].l.upper) }, + { "f11", offsetof(CPUState, fpr[5].l.lower) }, + { "f12", offsetof(CPUState, fpr[6].l.upper) }, + { "f13", offsetof(CPUState, fpr[6].l.lower) }, + { "f14", offsetof(CPUState, fpr[7].l.upper) }, + { "f15", offsetof(CPUState, fpr[7].l.lower) }, + { "f16", offsetof(CPUState, fpr[8].l.upper) }, + { "f17", offsetof(CPUState, fpr[8].l.lower) }, + { "f18", offsetof(CPUState, fpr[9].l.upper) }, + { "f19", offsetof(CPUState, fpr[9].l.lower) }, + { "f20", offsetof(CPUState, fpr[10].l.upper) }, + { "f21", offsetof(CPUState, fpr[10].l.lower) }, + { "f22", offsetof(CPUState, fpr[11].l.upper) }, + { "f23", offsetof(CPUState, fpr[11].l.lower) }, + { "f24", offsetof(CPUState, fpr[12].l.upper) }, + { "f25", offsetof(CPUState, fpr[12].l.lower) }, + { "f26", offsetof(CPUState, fpr[13].l.upper) }, + { "f27", offsetof(CPUState, fpr[13].l.lower) }, + { "f28", offsetof(CPUState, fpr[14].l.upper) }, + { "f29", offsetof(CPUState, fpr[14].l.lower) }, + { "f30", offsetof(CPUState, fpr[15].l.upper) }, + { "f31", offsetof(CPUState, fpr[15].l.lower) }, #ifdef TARGET_SPARC64 - { "f32", offsetof(CPUState, fpr[32]) }, - { "f34", offsetof(CPUState, fpr[34]) }, - { "f36", offsetof(CPUState, fpr[36]) }, - { "f38", offsetof(CPUState, fpr[38]) }, - { "f40", offsetof(CPUState, fpr[40]) }, - { "f42", offsetof(CPUState, fpr[42]) }, - { "f44", offsetof(CPUState, fpr[44]) }, - { "f46", offsetof(CPUState, fpr[46]) }, - { "f48", offsetof(CPUState, fpr[48]) }, - { "f50", offsetof(CPUState, fpr[50]) }, - { "f52", offsetof(CPUState, fpr[52]) }, - { "f54", offsetof(CPUState, fpr[54]) }, - { "f56", offsetof(CPUState, fpr[56]) }, - { "f58", offsetof(CPUState, fpr[58]) }, - { "f60", offsetof(CPUState, fpr[60]) }, - { "f62", offsetof(CPUState, fpr[62]) }, + { "f32", offsetof(CPUState, fpr[16]) }, + { "f34", offsetof(CPUState, fpr[17]) }, + { "f36", offsetof(CPUState, fpr[18]) }, + { "f38", offsetof(CPUState, fpr[19]) }, + { "f40", offsetof(CPUState, fpr[20]) }, + { "f42", offsetof(CPUState, fpr[21]) }, + { "f44", offsetof(CPUState, fpr[22]) }, + { "f46", offsetof(CPUState, fpr[23]) }, + { "f48", offsetof(CPUState, fpr[24]) }, + { "f50", offsetof(CPUState, fpr[25]) }, + { "f52", offsetof(CPUState, fpr[26]) }, + { "f54", offsetof(CPUState, fpr[27]) }, + { "f56", offsetof(CPUState, fpr[28]) }, + { "f58", offsetof(CPUState, fpr[29]) }, + { "f60", offsetof(CPUState, fpr[30]) }, + { "f62", offsetof(CPUState, fpr[31]) }, { "asi", offsetof(CPUState, asi) }, { "pstate", offsetof(CPUState, pstate) }, { "cansave", offsetof(CPUState, cansave) }, diff --git a/oslib-posix.c b/oslib-posix.c index a304fb0f53..dbc8ee8960 100644 --- a/oslib-posix.c +++ b/oslib-posix.c @@ -103,6 +103,13 @@ void qemu_vfree(void *ptr) free(ptr); } +void socket_set_block(int fd) +{ + int f; + f = fcntl(fd, F_GETFL); + fcntl(fd, F_SETFL, f & ~O_NONBLOCK); +} + void socket_set_nonblock(int fd) { int f; diff --git a/oslib-win32.c b/oslib-win32.c index 5f0759ffc4..5e3de7dc8a 100644 --- a/oslib-win32.c +++ b/oslib-win32.c @@ -73,6 +73,12 @@ void qemu_vfree(void *ptr) VirtualFree(ptr, 0, MEM_RELEASE); } +void socket_set_block(int fd) +{ + unsigned long opt = 0; + ioctlsocket(fd, FIONBIO, &opt); +} + void socket_set_nonblock(int fd) { unsigned long opt = 1; diff --git a/qemu-img.c b/qemu-img.c index 6a3973163f..86127f0b11 100644 --- a/qemu-img.c +++ b/qemu-img.c @@ -824,6 +824,8 @@ static int img_convert(int argc, char **argv) if (compress) { QEMUOptionParameter *encryption = get_option_parameter(param, BLOCK_OPT_ENCRYPT); + QEMUOptionParameter *preallocation = + get_option_parameter(param, BLOCK_OPT_PREALLOC); if (!drv->bdrv_write_compressed) { error_report("Compression not supported for this file format"); @@ -837,6 +839,15 @@ static int img_convert(int argc, char **argv) ret = -1; goto out; } + + if (preallocation && preallocation->value.s + && strcmp(preallocation->value.s, "off")) + { + error_report("Compression and preallocation not supported at " + "the same time"); + ret = -1; + goto out; + } } /* Create the new image */ @@ -1248,6 +1248,7 @@ static int aio_write_f(int argc, char **argv) case 'P': pattern = parse_pattern(optarg); if (pattern < 0) { + free(ctx); return 0; } break; diff --git a/qemu_socket.h b/qemu_socket.h index 180e4dbd9b..9e32fac651 100644 --- a/qemu_socket.h +++ b/qemu_socket.h @@ -35,6 +35,7 @@ int inet_aton(const char *cp, struct in_addr *ia); /* misc helpers */ int qemu_socket(int domain, int type, int protocol); int qemu_accept(int s, struct sockaddr *addr, socklen_t *addrlen); +void socket_set_block(int fd); void socket_set_nonblock(int fd); int send_all(int fd, const void *buf, int len1); diff --git a/qmp-commands.hx b/qmp-commands.hx index 4328e8b86c..cb60d0cdf1 100644 --- a/qmp-commands.hx +++ b/qmp-commands.hx @@ -569,7 +569,8 @@ EQMP .params = "protocol hostname port tls-port cert-subject", .help = "send migration info to spice/vnc client", .user_print = monitor_user_noop, - .mhandler.cmd_new = client_migrate_info, + .mhandler.cmd_async = client_migrate_info, + .flags = MONITOR_CMD_ASYNC, }, SQMP diff --git a/scripts/analyse-9p-simpletrace.py b/scripts/analyse-9p-simpletrace.py index 4358d6b594..b6d58fde96 100755 --- a/scripts/analyse-9p-simpletrace.py +++ b/scripts/analyse-9p-simpletrace.py @@ -7,11 +7,11 @@ import simpletrace class VirtFSRequestTracker(simpletrace.Analyzer): - def begin(self): - print "Pretty printing 9p simpletrace log ..." + def begin(self): + print "Pretty printing 9p simpletrace log ..." - def complete_pdu(self, tag, id, err): - print "ERROR (tag =", tag, ", id =", id, ",err =", err, ")" + def v9fs_rerror(self, tag, id, err): + print "RERROR (tag =", tag, ", id =", id, ",err =", err, ")" def v9fs_version(self, tag, id, msize, version): print "TVERSION (tag =", tag, ", msize =", msize, ", version =", version, ")" @@ -22,121 +22,121 @@ class VirtFSRequestTracker(simpletrace.Analyzer): def v9fs_attach(self, tag, id, fid, afid, uname, aname): print "TATTACH (tag =", tag, ", fid =", fid, ", afid =", afid, ", uname =", uname, ", aname =", aname, ")" - def v9fs_attach_return(self, tag, id, type, verison, path): - print "RATTACH (tag =", tag, ", qid={type =", type, ", version =", version, ", path =", path, "})" + def v9fs_attach_return(self, tag, id, type, version, path): + print "RATTACH (tag =", tag, ", qid={type =", type, ", version =", version, ", path =", path, "})" - def v9fs_stat(self, tag, id, fid): - print "TSTAT (tag =", tag, ", fid =", fid, ")" + def v9fs_stat(self, tag, id, fid): + print "TSTAT (tag =", tag, ", fid =", fid, ")" - def v9fs_stat_return(self, tag, id, mode, atime, mtime, length): - print "RSTAT (tag =", tag, ", mode =", mode, ", atime =", atime, ", mtime =", mtime, ", length =", length, ")" + def v9fs_stat_return(self, tag, id, mode, atime, mtime, length): + print "RSTAT (tag =", tag, ", mode =", mode, ", atime =", atime, ", mtime =", mtime, ", length =", length, ")" - def v9fs_getattr(self, tag, id, fid, request_mask): - print "TGETATTR (tag =", tag, ", fid =", fid, ", request_mask =", hex(request_mask), ")" + def v9fs_getattr(self, tag, id, fid, request_mask): + print "TGETATTR (tag =", tag, ", fid =", fid, ", request_mask =", hex(request_mask), ")" - def v9fs_getattr_return(self, tag, id, result_mask, mode, uid, gid): - print "RGETATTR (tag =", tag, ", result_mask =", hex(result_mask), ", mode =", oct(mode), ", uid =", uid, ", gid =", gid, ")" + def v9fs_getattr_return(self, tag, id, result_mask, mode, uid, gid): + print "RGETATTR (tag =", tag, ", result_mask =", hex(result_mask), ", mode =", oct(mode), ", uid =", uid, ", gid =", gid, ")" - def v9fs_walk(self, tag, id, fid, newfid, nwnames): - print "TWALK (tag =", tag, ", fid =", fid, ", newfid =", newfid, ", nwnames =", nwnames, ")" + def v9fs_walk(self, tag, id, fid, newfid, nwnames): + print "TWALK (tag =", tag, ", fid =", fid, ", newfid =", newfid, ", nwnames =", nwnames, ")" - def v9fs_walk_return(self, tag, id, nwnames, qids): - print "RWALK (tag =", tag, ", nwnames =", nwnames, ", qids =", hex(qids), ")" + def v9fs_walk_return(self, tag, id, nwnames, qids): + print "RWALK (tag =", tag, ", nwnames =", nwnames, ", qids =", hex(qids), ")" - def v9fs_open(self, tag, id, fid, mode): - print "TOPEN (tag =", tag, ", fid =", fid, ", mode =", oct(mode), ")" + def v9fs_open(self, tag, id, fid, mode): + print "TOPEN (tag =", tag, ", fid =", fid, ", mode =", oct(mode), ")" - def v9fs_open_return(self, tag, id, type, version, path, iounit): - print "ROPEN (tag =", tag, ", qid={type =", type, ", version =", version, ", path =", path, "}, iounit =", iounit, ")" + def v9fs_open_return(self, tag, id, type, version, path, iounit): + print "ROPEN (tag =", tag, ", qid={type =", type, ", version =", version, ", path =", path, "}, iounit =", iounit, ")" - def v9fs_lcreate(self, tag, id, dfid, flags, mode, gid): - print "TLCREATE (tag =", tag, ", dfid =", dfid, ", flags =", oct(flags), ", mode =", oct(mode), ", gid =", gid, ")" + def v9fs_lcreate(self, tag, id, dfid, flags, mode, gid): + print "TLCREATE (tag =", tag, ", dfid =", dfid, ", flags =", oct(flags), ", mode =", oct(mode), ", gid =", gid, ")" - def v9fs_lcreate_return(self, id, type, version, path, iounit): - print "RLCREATE (tag =", tag, ", qid={type =", type, ", version =", version, ", path =", path, "}, iounit =", iounit, ")" + def v9fs_lcreate_return(self, tag, id, type, version, path, iounit): + print "RLCREATE (tag =", tag, ", qid={type =", type, ", version =", version, ", path =", path, "}, iounit =", iounit, ")" - def v9fs_fsync(self, tag, id, fid, datasync): - print "TFSYNC (tag =", tag, ", fid =", fid, ", datasync =", datasync, ")" + def v9fs_fsync(self, tag, id, fid, datasync): + print "TFSYNC (tag =", tag, ", fid =", fid, ", datasync =", datasync, ")" - def v9fs_clunk(self, tag, id, fid): - print "TCLUNK (tag =", tag, ", fid =", fid, ")" + def v9fs_clunk(self, tag, id, fid): + print "TCLUNK (tag =", tag, ", fid =", fid, ")" - def v9fs_read(self, tag, id, fid, off, max_count): - print "TREAD (tag =", tag, ", fid =", fid, ", off =", off, ", max_count =", max_count, ")" + def v9fs_read(self, tag, id, fid, off, max_count): + print "TREAD (tag =", tag, ", fid =", fid, ", off =", off, ", max_count =", max_count, ")" - def v9fs_read_return(self, tag, id, count, err): - print "RREAD (tag =", tag, ", count =", count, ", err =", err, ")" + def v9fs_read_return(self, tag, id, count, err): + print "RREAD (tag =", tag, ", count =", count, ", err =", err, ")" - def v9fs_readdir(self, tag, id, fid, offset, max_count): - print "TREADDIR (tag =", tag, ", fid =", fid, ", offset =", offset, ", max_count =", max_count, ")" + def v9fs_readdir(self, tag, id, fid, offset, max_count): + print "TREADDIR (tag =", tag, ", fid =", fid, ", offset =", offset, ", max_count =", max_count, ")" - def v9fs_readdir_return(self, tag, id, count, retval): - print "RREADDIR (tag =", tag, ", count =", count, ", retval =", retval, ")" + def v9fs_readdir_return(self, tag, id, count, retval): + print "RREADDIR (tag =", tag, ", count =", count, ", retval =", retval, ")" - def v9fs_write(self, tag, id, fid, off, count, cnt): - print "TWRITE (tag =", tag, ", fid =", fid, ", off =", off, ", count =", count, ", cnt =", cnt, ")" + def v9fs_write(self, tag, id, fid, off, count, cnt): + print "TWRITE (tag =", tag, ", fid =", fid, ", off =", off, ", count =", count, ", cnt =", cnt, ")" - def v9fs_write_return(self, tag, id, total, err): - print "RWRITE (tag =", tag, ", total =", total, ", err =", err, ")" + def v9fs_write_return(self, tag, id, total, err): + print "RWRITE (tag =", tag, ", total =", total, ", err =", err, ")" - def v9fs_create(self, tag, id, fid, perm, name, mode): - print "TCREATE (tag =", tag, ", fid =", fid, ", perm =", oct(perm), ", name =", name, ", mode =", oct(mode), ")" + def v9fs_create(self, tag, id, fid, name, perm, mode): + print "TCREATE (tag =", tag, ", fid =", fid, ", perm =", oct(perm), ", name =", name, ", mode =", oct(mode), ")" - def v9fs_create_return(self, tag, id, type, verison, path, iounit): - print "RCREATE (tag =", tag, ", qid={type =", type, ", version =", version, ", path =", path, "}, iounit =", iounit, ")" + def v9fs_create_return(self, tag, id, type, version, path, iounit): + print "RCREATE (tag =", tag, ", qid={type =", type, ", version =", version, ", path =", path, "}, iounit =", iounit, ")" - def v9fs_symlink(self, tag, id, fid, name, symname, gid): - print "TSYMLINK (tag =", tag, ", fid =", fid, ", name =", name, ", symname =", symname, ", gid =", gid, ")" + def v9fs_symlink(self, tag, id, fid, name, symname, gid): + print "TSYMLINK (tag =", tag, ", fid =", fid, ", name =", name, ", symname =", symname, ", gid =", gid, ")" - def v9fs_symlink_return(self, tag, id, type, version, path): - print "RSYMLINK (tag =", tag, ", qid={type =", type, ", version =", version, ", path =", path, "})" + def v9fs_symlink_return(self, tag, id, type, version, path): + print "RSYMLINK (tag =", tag, ", qid={type =", type, ", version =", version, ", path =", path, "})" - def v9fs_flush(self, tag, id, flush_tag): - print "TFLUSH (tag =", tag, ", flush_tag =", flush_tag, ")" + def v9fs_flush(self, tag, id, flush_tag): + print "TFLUSH (tag =", tag, ", flush_tag =", flush_tag, ")" - def v9fs_link(self, tag, id, dfid, oldfid, name): - print "TLINK (tag =", tag, ", dfid =", dfid, ", oldfid =", oldfid, ", name =", name, ")" + def v9fs_link(self, tag, id, dfid, oldfid, name): + print "TLINK (tag =", tag, ", dfid =", dfid, ", oldfid =", oldfid, ", name =", name, ")" - def v9fs_remove(self, tag, id, fid): - print "TREMOVE (tag =", tag, ", fid =", fid, ")" + def v9fs_remove(self, tag, id, fid): + print "TREMOVE (tag =", tag, ", fid =", fid, ")" - def v9fs_wstat(self, tag, id, fid, mode, atime, mtime): - print "TWSTAT (tag =", tag, ", fid =", fid, ", mode =", oct(mode), ", atime =", atime, "mtime =", mtime, ")" + def v9fs_wstat(self, tag, id, fid, mode, atime, mtime): + print "TWSTAT (tag =", tag, ", fid =", fid, ", mode =", oct(mode), ", atime =", atime, "mtime =", mtime, ")" - def v9fs_mknod(self, tag, id, fid, mode, major, minor): - print "TMKNOD (tag =", tag, ", fid =", fid, ", mode =", oct(mode), ", major =", major, ", minor =", minor, ")" + def v9fs_mknod(self, tag, id, fid, mode, major, minor): + print "TMKNOD (tag =", tag, ", fid =", fid, ", mode =", oct(mode), ", major =", major, ", minor =", minor, ")" - def v9fs_lock(self, tag, id, fid, type, start, length): - print "TLOCK (tag =", tag, ", fid =", fid, "type =", type, ", start =", start, ", length =", length, ")" + def v9fs_lock(self, tag, id, fid, type, start, length): + print "TLOCK (tag =", tag, ", fid =", fid, "type =", type, ", start =", start, ", length =", length, ")" - def v9fs_lock_return(self, tag, id, status): - print "RLOCK (tag =", tag, ", status =", status, ")" + def v9fs_lock_return(self, tag, id, status): + print "RLOCK (tag =", tag, ", status =", status, ")" - def v9fs_getlock(self, tag, id, fid, type, start, length): - print "TGETLOCK (tag =", tag, ", fid =", fid, "type =", type, ", start =", start, ", length =", length, ")" + def v9fs_getlock(self, tag, id, fid, type, start, length): + print "TGETLOCK (tag =", tag, ", fid =", fid, "type =", type, ", start =", start, ", length =", length, ")" - def v9fs_getlock_return(self, tag, id, type, start, length, proc_id): - print "RGETLOCK (tag =", tag, "type =", type, ", start =", start, ", length =", length, ", proc_id =", proc_id, ")" + def v9fs_getlock_return(self, tag, id, type, start, length, proc_id): + print "RGETLOCK (tag =", tag, "type =", type, ", start =", start, ", length =", length, ", proc_id =", proc_id, ")" - def v9fs_mkdir(self, tag, id, fid, name, mode, gid): - print "TMKDIR (tag =", tag, ", fid =", fid, ", name =", name, ", mode =", mode, ", gid =", gid, ")" + def v9fs_mkdir(self, tag, id, fid, name, mode, gid): + print "TMKDIR (tag =", tag, ", fid =", fid, ", name =", name, ", mode =", mode, ", gid =", gid, ")" - def v9fs_mkdir_return(self, tag, id, type, version, path, err): - print "RMKDIR (tag =", tag, ", qid={type =", type, ", version =", version, ", path =", path, "}, err =", err, ")" + def v9fs_mkdir_return(self, tag, id, type, version, path, err): + print "RMKDIR (tag =", tag, ", qid={type =", type, ", version =", version, ", path =", path, "}, err =", err, ")" - def v9fs_xattrwalk(self, tag, id, fid, newfid, name): - print "TXATTRWALK (tag =", tag, ", fid =", fid, ", newfid =", newfid, ", xattr name =", name, ")" + def v9fs_xattrwalk(self, tag, id, fid, newfid, name): + print "TXATTRWALK (tag =", tag, ", fid =", fid, ", newfid =", newfid, ", xattr name =", name, ")" - def v9fs_xattrwalk_return(self, tag, id, size): - print "RXATTRWALK (tag =", tag, ", xattrsize =", size, ")" + def v9fs_xattrwalk_return(self, tag, id, size): + print "RXATTRWALK (tag =", tag, ", xattrsize =", size, ")" - def v9fs_xattrcreate(self, tag, id, fid, name, size, flags): - print "TXATTRCREATE (tag =", tag, ", fid =", fid, ", name =", name, ", xattrsize =", size, ", flags =", flags, ")" + def v9fs_xattrcreate(self, tag, id, fid, name, size, flags): + print "TXATTRCREATE (tag =", tag, ", fid =", fid, ", name =", name, ", xattrsize =", size, ", flags =", flags, ")" - def v9fs_readlink(self, tag, id, fid): - print "TREADLINK (tag =", tag, ", fid =", fid, ")" + def v9fs_readlink(self, tag, id, fid): + print "TREADLINK (tag =", tag, ", fid =", fid, ")" - def v9fs_readlink_return(self, tag, id, target): - print "RREADLINK (tag =", tag, ", target =", target, ")" + def v9fs_readlink_return(self, tag, id, target): + print "RREADLINK (tag =", tag, ", target =", target, ")" simpletrace.run(VirtFSRequestTracker()) diff --git a/target-arm/cpu.h b/target-arm/cpu.h index 6ab780d7ef..c4d742f084 100644 --- a/target-arm/cpu.h +++ b/target-arm/cpu.h @@ -366,7 +366,7 @@ enum arm_features { ARM_FEATURE_VFP3, ARM_FEATURE_VFP_FP16, ARM_FEATURE_NEON, - ARM_FEATURE_DIV, + ARM_FEATURE_THUMB_DIV, /* divide supported in Thumb encoding */ ARM_FEATURE_M, /* Microcontroller profile. */ ARM_FEATURE_OMAPCP, /* OMAP specific CP15 ops handling. */ ARM_FEATURE_THUMB2EE, @@ -375,6 +375,8 @@ enum arm_features { ARM_FEATURE_V5, ARM_FEATURE_STRONGARM, ARM_FEATURE_VAPA, /* cp15 VA to PA lookups */ + ARM_FEATURE_ARM_DIV, /* divide supported in ARM encoding */ + ARM_FEATURE_VFP4, /* VFPv4 (implies that NEON is v2) */ }; static inline int arm_feature(CPUARMState *env, int feature) diff --git a/target-arm/helper.c b/target-arm/helper.c index e2428eb7b2..97af4d0bba 100644 --- a/target-arm/helper.c +++ b/target-arm/helper.c @@ -193,7 +193,7 @@ static void cpu_reset_model_id(CPUARMState *env, uint32_t id) set_feature(env, ARM_FEATURE_THUMB2); set_feature(env, ARM_FEATURE_V7); set_feature(env, ARM_FEATURE_M); - set_feature(env, ARM_FEATURE_DIV); + set_feature(env, ARM_FEATURE_THUMB_DIV); break; case ARM_CPUID_ANY: /* For userspace emulation. */ set_feature(env, ARM_FEATURE_V4T); @@ -204,10 +204,11 @@ static void cpu_reset_model_id(CPUARMState *env, uint32_t id) set_feature(env, ARM_FEATURE_THUMB2); set_feature(env, ARM_FEATURE_VFP); set_feature(env, ARM_FEATURE_VFP3); + set_feature(env, ARM_FEATURE_VFP4); set_feature(env, ARM_FEATURE_VFP_FP16); set_feature(env, ARM_FEATURE_NEON); set_feature(env, ARM_FEATURE_THUMB2EE); - set_feature(env, ARM_FEATURE_DIV); + set_feature(env, ARM_FEATURE_ARM_DIV); set_feature(env, ARM_FEATURE_V7MP); break; case ARM_CPUID_TI915T: @@ -261,6 +262,9 @@ static void cpu_reset_model_id(CPUARMState *env, uint32_t id) if (arm_feature(env, ARM_FEATURE_V7)) { set_feature(env, ARM_FEATURE_VAPA); } + if (arm_feature(env, ARM_FEATURE_ARM_DIV)) { + set_feature(env, ARM_FEATURE_THUMB_DIV); + } } void cpu_reset(CPUARMState *env) @@ -471,7 +475,7 @@ static uint32_t cpu_arm_find_by_name(const char *name) void cpu_arm_close(CPUARMState *env) { - free(env); + g_free(env); } uint32_t cpsr_read(CPUARMState *env) @@ -3039,8 +3043,7 @@ float32 HELPER(rsqrte_f32)(float32 a, CPUState *env) val64 = float64_val(f64); - val = ((val64 >> 63) & 0x80000000) - | ((result_exp & 0xff) << 23) + val = ((result_exp & 0xff) << 23) | ((val64 >> 29) & 0x7fffff); return make_float32(val); } @@ -3082,6 +3085,19 @@ uint32_t HELPER(rsqrte_u32)(uint32_t a, CPUState *env) return 0x80000000 | ((float64_val(f64) >> 21) & 0x7fffffff); } +/* VFPv4 fused multiply-accumulate */ +float32 VFP_HELPER(muladd, s)(float32 a, float32 b, float32 c, void *fpstp) +{ + float_status *fpst = fpstp; + return float32_muladd(a, b, c, 0, fpst); +} + +float64 VFP_HELPER(muladd, d)(float64 a, float64 b, float64 c, void *fpstp) +{ + float_status *fpst = fpstp; + return float64_muladd(a, b, c, 0, fpst); +} + void HELPER(set_teecr)(CPUState *env, uint32_t val) { val &= 1; diff --git a/target-arm/helper.h b/target-arm/helper.h index 3ad1cb0881..16dd5fcc89 100644 --- a/target-arm/helper.h +++ b/target-arm/helper.h @@ -132,6 +132,9 @@ DEF_HELPER_2(vfp_fcvt_f32_to_f16, i32, f32, env) DEF_HELPER_2(neon_fcvt_f16_to_f32, f32, i32, env) DEF_HELPER_2(neon_fcvt_f32_to_f16, i32, f32, env) +DEF_HELPER_4(vfp_muladdd, f64, f64, f64, f64, ptr) +DEF_HELPER_4(vfp_muladds, f32, f32, f32, f32, ptr) + DEF_HELPER_3(recps_f32, f32, f32, f32, env) DEF_HELPER_3(rsqrts_f32, f32, f32, f32, env) DEF_HELPER_2(recpe_f32, f32, f32, env) diff --git a/target-arm/machine.c b/target-arm/machine.c index 7d4fc545a6..aaee9b9c11 100644 --- a/target-arm/machine.c +++ b/target-arm/machine.c @@ -189,7 +189,7 @@ int cpu_load(QEMUFile *f, void *opaque, int version_id) env->vfp.vec_stride = qemu_get_be32(f); if (arm_feature(env, ARM_FEATURE_VFP3)) { - for (i = 0; i < 16; i++) { + for (i = 16; i < 32; i++) { CPU_DoubleU u; u.l.upper = qemu_get_be32(f); u.l.lower = qemu_get_be32(f); diff --git a/target-arm/translate.c b/target-arm/translate.c index 75c0ad413a..0f35b60946 100644 --- a/target-arm/translate.c +++ b/target-arm/translate.c @@ -3141,6 +3141,57 @@ static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn) case 8: /* div: fn / fm */ gen_vfp_div(dp); break; + case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */ + case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */ + case 12: /* VFMA : fd = muladd( fd, fn, fm) */ + case 13: /* VFMS : fd = muladd( fd, -fn, fm) */ + /* These are fused multiply-add, and must be done as one + * floating point operation with no rounding between the + * multiplication and addition steps. + * NB that doing the negations here as separate steps is + * correct : an input NaN should come out with its sign bit + * flipped if it is a negated-input. + */ + if (!arm_feature(env, ARM_FEATURE_VFP4)) { + return 1; + } + if (dp) { + TCGv_ptr fpst; + TCGv_i64 frd; + if (op & 1) { + /* VFNMS, VFMS */ + gen_helper_vfp_negd(cpu_F0d, cpu_F0d); + } + frd = tcg_temp_new_i64(); + tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd)); + if (op & 2) { + /* VFNMA, VFNMS */ + gen_helper_vfp_negd(frd, frd); + } + fpst = get_fpstatus_ptr(0); + gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d, + cpu_F1d, frd, fpst); + tcg_temp_free_ptr(fpst); + tcg_temp_free_i64(frd); + } else { + TCGv_ptr fpst; + TCGv_i32 frd; + if (op & 1) { + /* VFNMS, VFMS */ + gen_helper_vfp_negs(cpu_F0s, cpu_F0s); + } + frd = tcg_temp_new_i32(); + tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd)); + if (op & 2) { + gen_helper_vfp_negs(frd, frd); + } + fpst = get_fpstatus_ptr(0); + gen_helper_vfp_muladds(cpu_F0s, cpu_F0s, + cpu_F1s, frd, fpst); + tcg_temp_free_ptr(fpst); + tcg_temp_free_i32(frd); + } + break; case 14: /* fconst */ if (!arm_feature(env, ARM_FEATURE_VFP3)) return 1; @@ -4417,6 +4468,7 @@ static void gen_neon_narrow_op(int op, int u, int size, TCGv dest, TCGv_i64 src) #define NEON_3R_VPMIN 21 #define NEON_3R_VQDMULH_VQRDMULH 22 #define NEON_3R_VPADD 23 +#define NEON_3R_VFM 25 /* VFMA, VFMS : float fused multiply-add */ #define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */ #define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */ #define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */ @@ -4449,6 +4501,7 @@ static const uint8_t neon_3r_sizes[] = { [NEON_3R_VPMIN] = 0x7, [NEON_3R_VQDMULH_VQRDMULH] = 0x6, [NEON_3R_VPADD] = 0x7, + [NEON_3R_VFM] = 0x5, /* size bit 1 encodes op */ [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */ [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */ [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */ @@ -4726,6 +4779,11 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn) return 1; } break; + case NEON_3R_VFM: + if (!arm_feature(env, ARM_FEATURE_VFP4) || u) { + return 1; + } + break; default: break; } @@ -5006,6 +5064,20 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn) else gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env); break; + case NEON_3R_VFM: + { + /* VFMA, VFMS: fused multiply-add */ + TCGv_ptr fpstatus = get_fpstatus_ptr(1); + TCGv_i32 tmp3 = neon_load_reg(rd, pass); + if (size) { + /* VFMS */ + gen_helper_vfp_negs(tmp, tmp); + } + gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus); + tcg_temp_free_i32(tmp3); + tcg_temp_free_ptr(fpstatus); + break; + } default: abort(); } @@ -7569,11 +7641,16 @@ static void disas_arm_insn(CPUState * env, DisasContext *s) } break; case 2: /* Multiplies (Type 3). */ - tmp = load_reg(s, rm); - tmp2 = load_reg(s, rs); - if (insn & (1 << 20)) { + switch ((insn >> 20) & 0x7) { + case 5: + if (((insn >> 6) ^ (insn >> 7)) & 1) { + /* op2 not 00x or 11x : UNDEF */ + goto illegal_op; + } /* Signed multiply most significant [accumulate]. (SMMUL, SMMLA, SMMLS) */ + tmp = load_reg(s, rm); + tmp2 = load_reg(s, rs); tmp64 = gen_muls_i64_i32(tmp, tmp2); if (rd != 15) { @@ -7592,7 +7669,15 @@ static void disas_arm_insn(CPUState * env, DisasContext *s) tcg_gen_trunc_i64_i32(tmp, tmp64); tcg_temp_free_i64(tmp64); store_reg(s, rn, tmp); - } else { + break; + case 0: + case 4: + /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */ + if (insn & (1 << 7)) { + goto illegal_op; + } + tmp = load_reg(s, rm); + tmp2 = load_reg(s, rs); if (insn & (1 << 5)) gen_swap_half(tmp2); gen_smul_dual(tmp, tmp2); @@ -7625,6 +7710,28 @@ static void disas_arm_insn(CPUState * env, DisasContext *s) } store_reg(s, rn, tmp); } + break; + case 1: + case 3: + /* SDIV, UDIV */ + if (!arm_feature(env, ARM_FEATURE_ARM_DIV)) { + goto illegal_op; + } + if (((insn >> 5) & 7) || (rd != 15)) { + goto illegal_op; + } + tmp = load_reg(s, rm); + tmp2 = load_reg(s, rs); + if (insn & (1 << 21)) { + gen_helper_udiv(tmp, tmp, tmp2); + } else { + gen_helper_sdiv(tmp, tmp, tmp2); + } + tcg_temp_free_i32(tmp2); + store_reg(s, rn, tmp); + break; + default: + goto illegal_op; } break; case 3: @@ -8497,8 +8604,9 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1) tmp2 = load_reg(s, rm); if ((op & 0x50) == 0x10) { /* sdiv, udiv */ - if (!arm_feature(env, ARM_FEATURE_DIV)) + if (!arm_feature(env, ARM_FEATURE_THUMB_DIV)) { goto illegal_op; + } if (op & 0x20) gen_helper_udiv(tmp, tmp, tmp2); else diff --git a/target-sparc/cpu.h b/target-sparc/cpu.h index 6bf9275a74..38a707466c 100644 --- a/target-sparc/cpu.h +++ b/target-sparc/cpu.h @@ -3,16 +3,17 @@ #include "config.h" #include "qemu-common.h" +#include "bswap.h" #if !defined(TARGET_SPARC64) #define TARGET_LONG_BITS 32 -#define TARGET_FPREGS 32 +#define TARGET_DPREGS 16 #define TARGET_PAGE_BITS 12 /* 4k */ #define TARGET_PHYS_ADDR_SPACE_BITS 36 #define TARGET_VIRT_ADDR_SPACE_BITS 32 #else #define TARGET_LONG_BITS 64 -#define TARGET_FPREGS 64 +#define TARGET_DPREGS 32 #define TARGET_PAGE_BITS 13 /* 8k */ #define TARGET_PHYS_ADDR_SPACE_BITS 41 # ifdef TARGET_ABI32 @@ -335,6 +336,27 @@ enum { #define SFSR_CT_NOTRANS (3ULL << 4) #define SFSR_CT_MASK (3ULL << 4) +/* Leon3 cache control */ + +/* Cache control: emulate the behavior of cache control registers but without + any effect on the emulated */ + +#define CACHE_STATE_MASK 0x3 +#define CACHE_DISABLED 0x0 +#define CACHE_FROZEN 0x1 +#define CACHE_ENABLED 0x3 + +/* Cache Control register fields */ + +#define CACHE_CTRL_IF (1 << 4) /* Instruction Cache Freeze on Interrupt */ +#define CACHE_CTRL_DF (1 << 5) /* Data Cache Freeze on Interrupt */ +#define CACHE_CTRL_DP (1 << 14) /* Data cache flush pending */ +#define CACHE_CTRL_IP (1 << 15) /* Instruction cache flush pending */ +#define CACHE_CTRL_IB (1 << 16) /* Instruction burst fetch */ +#define CACHE_CTRL_FI (1 << 21) /* Flush Instruction cache (Write only) */ +#define CACHE_CTRL_FD (1 << 22) /* Flush Data cache (Write only) */ +#define CACHE_CTRL_DS (1 << 23) /* Data cache snoop enable */ + typedef struct SparcTLBEntry { uint64_t tag; uint64_t tte; @@ -374,7 +396,7 @@ typedef struct CPUSPARCState { uint32_t psr; /* processor state register */ target_ulong fsr; /* FPU state register */ - float32 fpr[TARGET_FPREGS]; /* floating point registers */ + CPU_DoubleU fpr[TARGET_DPREGS]; /* floating point registers */ uint32_t cwp; /* index of current register window (extracted from PSR) */ #if !defined(TARGET_SPARC64) || defined(TARGET_ABI32) @@ -442,7 +464,6 @@ typedef struct CPUSPARCState { uint64_t prom_addr; #endif /* temporary float registers */ - float64 dt0, dt1; float128 qt0, qt1; float_status fp_status; #if defined(TARGET_SPARC64) @@ -478,7 +499,7 @@ typedef struct CPUSPARCState { sparc_def_t *def; void *irq_manager; - void (*qemu_irq_ack) (void *irq_manager, int intno); + void (*qemu_irq_ack)(CPUState *env, void *irq_manager, int intno); /* Leon3 cache control */ uint32_t cache_control; @@ -489,7 +510,7 @@ typedef struct CPUSPARCState { CPUSPARCState *cpu_sparc_init(const char *cpu_model); void cpu_sparc_set_id(CPUSPARCState *env, unsigned int cpu); void sparc_cpu_list(FILE *f, fprintf_function cpu_fprintf); -/* helper.c */ +/* mmu_helper.c */ int cpu_sparc_handle_mmu_fault(CPUSPARCState *env1, target_ulong address, int rw, int mmu_idx); #define cpu_handle_mmu_fault cpu_sparc_handle_mmu_fault @@ -523,8 +544,9 @@ int cpu_cwp_inc(CPUState *env1, int cwp); int cpu_cwp_dec(CPUState *env1, int cwp); void cpu_set_cwp(CPUState *env1, int new_cwp); -/* op_helper.c */ -void leon3_irq_manager(void *irq_manager, int intno); +/* int_helper.c */ +void do_interrupt(CPUState *env); +void leon3_irq_manager(CPUState *env, void *irq_manager, int intno); /* sun4m.c, sun4u.c */ void cpu_check_irqs(CPUSPARCState *env); @@ -721,9 +743,6 @@ static inline bool tb_am_enabled(int tb_flags) #endif } -/* helper.c */ -void do_interrupt(CPUState *env); - static inline bool cpu_has_work(CPUState *env1) { return (env1->interrupt_request & CPU_INTERRUPT_HARD) && diff --git a/target-sparc/cpu_init.c b/target-sparc/cpu_init.c index 6954800af0..c7269b54a8 100644 --- a/target-sparc/cpu_init.c +++ b/target-sparc/cpu_init.c @@ -813,11 +813,11 @@ void cpu_dump_state(CPUState *env, FILE *f, fprintf_function cpu_fprintf, } } cpu_fprintf(f, "\nFloating Point Registers:\n"); - for (i = 0; i < TARGET_FPREGS; i++) { + for (i = 0; i < TARGET_DPREGS; i++) { if ((i & 3) == 0) { - cpu_fprintf(f, "%%f%02d:", i); + cpu_fprintf(f, "%%f%02d:", i * 2); } - cpu_fprintf(f, " %016f", *(float *)&env->fpr[i]); + cpu_fprintf(f, " %016" PRIx64, env->fpr[i].ll); if ((i & 3) == 3) { cpu_fprintf(f, "\n"); } diff --git a/target-sparc/fop_helper.c b/target-sparc/fop_helper.c index 23502f3020..c7a2512117 100644 --- a/target-sparc/fop_helper.c +++ b/target-sparc/fop_helper.c @@ -20,26 +20,74 @@ #include "cpu.h" #include "helper.h" -#define DT0 (env->dt0) -#define DT1 (env->dt1) #define QT0 (env->qt0) #define QT1 (env->qt1) +static void check_ieee_exceptions(CPUState *env) +{ + target_ulong status; + + status = get_float_exception_flags(&env->fp_status); + if (status) { + /* Copy IEEE 754 flags into FSR */ + if (status & float_flag_invalid) { + env->fsr |= FSR_NVC; + } + if (status & float_flag_overflow) { + env->fsr |= FSR_OFC; + } + if (status & float_flag_underflow) { + env->fsr |= FSR_UFC; + } + if (status & float_flag_divbyzero) { + env->fsr |= FSR_DZC; + } + if (status & float_flag_inexact) { + env->fsr |= FSR_NXC; + } + + if ((env->fsr & FSR_CEXC_MASK) & ((env->fsr & FSR_TEM_MASK) >> 23)) { + /* Unmasked exception, generate a trap */ + env->fsr |= FSR_FTT_IEEE_EXCP; + helper_raise_exception(env, TT_FP_EXCP); + } else { + /* Accumulate exceptions */ + env->fsr |= (env->fsr & FSR_CEXC_MASK) << 5; + } + } +} + +static inline void clear_float_exceptions(CPUState *env) +{ + set_float_exception_flags(0, &env->fp_status); +} + #define F_HELPER(name, p) void helper_f##name##p(CPUState *env) #define F_BINOP(name) \ - float32 helper_f ## name ## s (CPUState * env, float32 src1,\ + float32 helper_f ## name ## s (CPUState *env, float32 src1, \ float32 src2) \ { \ - return float32_ ## name (src1, src2, &env->fp_status); \ + float32 ret; \ + clear_float_exceptions(env); \ + ret = float32_ ## name (src1, src2, &env->fp_status); \ + check_ieee_exceptions(env); \ + return ret; \ } \ - F_HELPER(name, d) \ + float64 helper_f ## name ## d (CPUState * env, float64 src1,\ + float64 src2) \ { \ - DT0 = float64_ ## name (DT0, DT1, &env->fp_status); \ + float64 ret; \ + clear_float_exceptions(env); \ + ret = float64_ ## name (src1, src2, &env->fp_status); \ + check_ieee_exceptions(env); \ + return ret; \ } \ F_HELPER(name, q) \ { \ + clear_float_exceptions(env); \ QT0 = float128_ ## name (QT0, QT1, &env->fp_status); \ + check_ieee_exceptions(env); \ } F_BINOP(add); @@ -48,18 +96,24 @@ F_BINOP(mul); F_BINOP(div); #undef F_BINOP -void helper_fsmuld(CPUState *env, float32 src1, float32 src2) +float64 helper_fsmuld(CPUState *env, float32 src1, float32 src2) { - DT0 = float64_mul(float32_to_float64(src1, &env->fp_status), + float64 ret; + clear_float_exceptions(env); + ret = float64_mul(float32_to_float64(src1, &env->fp_status), float32_to_float64(src2, &env->fp_status), &env->fp_status); + check_ieee_exceptions(env); + return ret; } -void helper_fdmulq(CPUState *env) +void helper_fdmulq(CPUState *env, float64 src1, float64 src2) { - QT0 = float128_mul(float64_to_float128(DT0, &env->fp_status), - float64_to_float128(DT1, &env->fp_status), + clear_float_exceptions(env); + QT0 = float128_mul(float64_to_float128(src1, &env->fp_status), + float64_to_float128(src2, &env->fp_status), &env->fp_status); + check_ieee_exceptions(env); } float32 helper_fnegs(float32 src) @@ -68,9 +122,9 @@ float32 helper_fnegs(float32 src) } #ifdef TARGET_SPARC64 -F_HELPER(neg, d) +float64 helper_fnegd(float64 src) { - DT0 = float64_chs(DT1); + return float64_chs(src); } F_HELPER(neg, q) @@ -82,98 +136,158 @@ F_HELPER(neg, q) /* Integer to float conversion. */ float32 helper_fitos(CPUState *env, int32_t src) { - return int32_to_float32(src, &env->fp_status); + /* Inexact error possible converting int to float. */ + float32 ret; + clear_float_exceptions(env); + ret = int32_to_float32(src, &env->fp_status); + check_ieee_exceptions(env); + return ret; } -void helper_fitod(CPUState *env, int32_t src) +float64 helper_fitod(CPUState *env, int32_t src) { - DT0 = int32_to_float64(src, &env->fp_status); + /* No possible exceptions converting int to double. */ + return int32_to_float64(src, &env->fp_status); } void helper_fitoq(CPUState *env, int32_t src) { + /* No possible exceptions converting int to long double. */ QT0 = int32_to_float128(src, &env->fp_status); } #ifdef TARGET_SPARC64 -float32 helper_fxtos(CPUState *env) +float32 helper_fxtos(CPUState *env, int64_t src) { - return int64_to_float32(*((int64_t *)&DT1), &env->fp_status); + float32 ret; + clear_float_exceptions(env); + ret = int64_to_float32(src, &env->fp_status); + check_ieee_exceptions(env); + return ret; } -F_HELPER(xto, d) +float64 helper_fxtod(CPUState *env, int64_t src) { - DT0 = int64_to_float64(*((int64_t *)&DT1), &env->fp_status); + float64 ret; + clear_float_exceptions(env); + ret = int64_to_float64(src, &env->fp_status); + check_ieee_exceptions(env); + return ret; } -F_HELPER(xto, q) +void helper_fxtoq(CPUState *env, int64_t src) { - QT0 = int64_to_float128(*((int64_t *)&DT1), &env->fp_status); + /* No possible exceptions converting long long to long double. */ + QT0 = int64_to_float128(src, &env->fp_status); } #endif #undef F_HELPER /* floating point conversion */ -float32 helper_fdtos(CPUState *env) +float32 helper_fdtos(CPUState *env, float64 src) { - return float64_to_float32(DT1, &env->fp_status); + float32 ret; + clear_float_exceptions(env); + ret = float64_to_float32(src, &env->fp_status); + check_ieee_exceptions(env); + return ret; } -void helper_fstod(CPUState *env, float32 src) +float64 helper_fstod(CPUState *env, float32 src) { - DT0 = float32_to_float64(src, &env->fp_status); + float64 ret; + clear_float_exceptions(env); + ret = float32_to_float64(src, &env->fp_status); + check_ieee_exceptions(env); + return ret; } float32 helper_fqtos(CPUState *env) { - return float128_to_float32(QT1, &env->fp_status); + float32 ret; + clear_float_exceptions(env); + ret = float128_to_float32(QT1, &env->fp_status); + check_ieee_exceptions(env); + return ret; } void helper_fstoq(CPUState *env, float32 src) { + clear_float_exceptions(env); QT0 = float32_to_float128(src, &env->fp_status); + check_ieee_exceptions(env); } -void helper_fqtod(CPUState *env) +float64 helper_fqtod(CPUState *env) { - DT0 = float128_to_float64(QT1, &env->fp_status); + float64 ret; + clear_float_exceptions(env); + ret = float128_to_float64(QT1, &env->fp_status); + check_ieee_exceptions(env); + return ret; } -void helper_fdtoq(CPUState *env) +void helper_fdtoq(CPUState *env, float64 src) { - QT0 = float64_to_float128(DT1, &env->fp_status); + clear_float_exceptions(env); + QT0 = float64_to_float128(src, &env->fp_status); + check_ieee_exceptions(env); } /* Float to integer conversion. */ int32_t helper_fstoi(CPUState *env, float32 src) { - return float32_to_int32_round_to_zero(src, &env->fp_status); + int32_t ret; + clear_float_exceptions(env); + ret = float32_to_int32_round_to_zero(src, &env->fp_status); + check_ieee_exceptions(env); + return ret; } -int32_t helper_fdtoi(CPUState *env) +int32_t helper_fdtoi(CPUState *env, float64 src) { - return float64_to_int32_round_to_zero(DT1, &env->fp_status); + int32_t ret; + clear_float_exceptions(env); + ret = float64_to_int32_round_to_zero(src, &env->fp_status); + check_ieee_exceptions(env); + return ret; } int32_t helper_fqtoi(CPUState *env) { - return float128_to_int32_round_to_zero(QT1, &env->fp_status); + int32_t ret; + clear_float_exceptions(env); + ret = float128_to_int32_round_to_zero(QT1, &env->fp_status); + check_ieee_exceptions(env); + return ret; } #ifdef TARGET_SPARC64 -void helper_fstox(CPUState *env, float32 src) +int64_t helper_fstox(CPUState *env, float32 src) { - *((int64_t *)&DT0) = float32_to_int64_round_to_zero(src, &env->fp_status); + int64_t ret; + clear_float_exceptions(env); + ret = float32_to_int64_round_to_zero(src, &env->fp_status); + check_ieee_exceptions(env); + return ret; } -void helper_fdtox(CPUState *env) +int64_t helper_fdtox(CPUState *env, float64 src) { - *((int64_t *)&DT0) = float64_to_int64_round_to_zero(DT1, &env->fp_status); + int64_t ret; + clear_float_exceptions(env); + ret = float64_to_int64_round_to_zero(src, &env->fp_status); + check_ieee_exceptions(env); + return ret; } -void helper_fqtox(CPUState *env) +int64_t helper_fqtox(CPUState *env) { - *((int64_t *)&DT0) = float128_to_int64_round_to_zero(QT1, &env->fp_status); + int64_t ret; + clear_float_exceptions(env); + ret = float128_to_int64_round_to_zero(QT1, &env->fp_status); + check_ieee_exceptions(env); + return ret; } #endif @@ -183,9 +297,9 @@ float32 helper_fabss(float32 src) } #ifdef TARGET_SPARC64 -void helper_fabsd(CPUState *env) +float64 helper_fabsd(float64 src) { - DT0 = float64_abs(DT1); + return float64_abs(src); } void helper_fabsq(CPUState *env) @@ -196,17 +310,27 @@ void helper_fabsq(CPUState *env) float32 helper_fsqrts(CPUState *env, float32 src) { - return float32_sqrt(src, &env->fp_status); + float32 ret; + clear_float_exceptions(env); + ret = float32_sqrt(src, &env->fp_status); + check_ieee_exceptions(env); + return ret; } -void helper_fsqrtd(CPUState *env) +float64 helper_fsqrtd(CPUState *env, float64 src) { - DT0 = float64_sqrt(DT1, &env->fp_status); + float64 ret; + clear_float_exceptions(env); + ret = float64_sqrt(src, &env->fp_status); + check_ieee_exceptions(env); + return ret; } void helper_fsqrtq(CPUState *env) { + clear_float_exceptions(env); QT0 = float128_sqrt(QT1, &env->fp_status); + check_ieee_exceptions(env); } #define GEN_FCMP(name, size, reg1, reg2, FS, E) \ @@ -245,8 +369,8 @@ void helper_fsqrtq(CPUState *env) break; \ } \ } -#define GEN_FCMPS(name, size, FS, E) \ - void glue(helper_, name)(CPUState *env, float32 src1, float32 src2) \ +#define GEN_FCMP_T(name, size, FS, E) \ + void glue(helper_, name)(CPUState *env, size src1, size src2) \ { \ env->fsr &= FSR_FTT_NMASK; \ if (E && (glue(size, _is_any_nan)(src1) || \ @@ -282,80 +406,42 @@ void helper_fsqrtq(CPUState *env) } \ } -GEN_FCMPS(fcmps, float32, 0, 0); -GEN_FCMP(fcmpd, float64, DT0, DT1, 0, 0); +GEN_FCMP_T(fcmps, float32, 0, 0); +GEN_FCMP_T(fcmpd, float64, 0, 0); -GEN_FCMPS(fcmpes, float32, 0, 1); -GEN_FCMP(fcmped, float64, DT0, DT1, 0, 1); +GEN_FCMP_T(fcmpes, float32, 0, 1); +GEN_FCMP_T(fcmped, float64, 0, 1); GEN_FCMP(fcmpq, float128, QT0, QT1, 0, 0); GEN_FCMP(fcmpeq, float128, QT0, QT1, 0, 1); #ifdef TARGET_SPARC64 -GEN_FCMPS(fcmps_fcc1, float32, 22, 0); -GEN_FCMP(fcmpd_fcc1, float64, DT0, DT1, 22, 0); +GEN_FCMP_T(fcmps_fcc1, float32, 22, 0); +GEN_FCMP_T(fcmpd_fcc1, float64, 22, 0); GEN_FCMP(fcmpq_fcc1, float128, QT0, QT1, 22, 0); -GEN_FCMPS(fcmps_fcc2, float32, 24, 0); -GEN_FCMP(fcmpd_fcc2, float64, DT0, DT1, 24, 0); +GEN_FCMP_T(fcmps_fcc2, float32, 24, 0); +GEN_FCMP_T(fcmpd_fcc2, float64, 24, 0); GEN_FCMP(fcmpq_fcc2, float128, QT0, QT1, 24, 0); -GEN_FCMPS(fcmps_fcc3, float32, 26, 0); -GEN_FCMP(fcmpd_fcc3, float64, DT0, DT1, 26, 0); +GEN_FCMP_T(fcmps_fcc3, float32, 26, 0); +GEN_FCMP_T(fcmpd_fcc3, float64, 26, 0); GEN_FCMP(fcmpq_fcc3, float128, QT0, QT1, 26, 0); -GEN_FCMPS(fcmpes_fcc1, float32, 22, 1); -GEN_FCMP(fcmped_fcc1, float64, DT0, DT1, 22, 1); +GEN_FCMP_T(fcmpes_fcc1, float32, 22, 1); +GEN_FCMP_T(fcmped_fcc1, float64, 22, 1); GEN_FCMP(fcmpeq_fcc1, float128, QT0, QT1, 22, 1); -GEN_FCMPS(fcmpes_fcc2, float32, 24, 1); -GEN_FCMP(fcmped_fcc2, float64, DT0, DT1, 24, 1); +GEN_FCMP_T(fcmpes_fcc2, float32, 24, 1); +GEN_FCMP_T(fcmped_fcc2, float64, 24, 1); GEN_FCMP(fcmpeq_fcc2, float128, QT0, QT1, 24, 1); -GEN_FCMPS(fcmpes_fcc3, float32, 26, 1); -GEN_FCMP(fcmped_fcc3, float64, DT0, DT1, 26, 1); +GEN_FCMP_T(fcmpes_fcc3, float32, 26, 1); +GEN_FCMP_T(fcmped_fcc3, float64, 26, 1); GEN_FCMP(fcmpeq_fcc3, float128, QT0, QT1, 26, 1); #endif -#undef GEN_FCMPS - -void helper_check_ieee_exceptions(CPUState *env) -{ - target_ulong status; - - status = get_float_exception_flags(&env->fp_status); - if (status) { - /* Copy IEEE 754 flags into FSR */ - if (status & float_flag_invalid) { - env->fsr |= FSR_NVC; - } - if (status & float_flag_overflow) { - env->fsr |= FSR_OFC; - } - if (status & float_flag_underflow) { - env->fsr |= FSR_UFC; - } - if (status & float_flag_divbyzero) { - env->fsr |= FSR_DZC; - } - if (status & float_flag_inexact) { - env->fsr |= FSR_NXC; - } - - if ((env->fsr & FSR_CEXC_MASK) & ((env->fsr & FSR_TEM_MASK) >> 23)) { - /* Unmasked exception, generate a trap */ - env->fsr |= FSR_FTT_IEEE_EXCP; - helper_raise_exception(env, TT_FP_EXCP); - } else { - /* Accumulate exceptions */ - env->fsr |= (env->fsr & FSR_CEXC_MASK) << 5; - } - } -} - -void helper_clear_float_exceptions(CPUState *env) -{ - set_float_exception_flags(0, &env->fp_status); -} +#undef GEN_FCMP_T +#undef GEN_FCMP static inline void set_fsr(CPUState *env) { diff --git a/target-sparc/helper.c b/target-sparc/helper.c index 7a25605fa7..18609c449c 100644 --- a/target-sparc/helper.c +++ b/target-sparc/helper.c @@ -1,5 +1,5 @@ /* - * sparc helpers + * Misc Sparc helpers * * Copyright (c) 2003-2005 Fabrice Bellard * @@ -22,900 +22,127 @@ #include "helper.h" #include "sysemu.h" -//#define DEBUG_MMU - -#ifdef DEBUG_MMU -#define DPRINTF_MMU(fmt, ...) \ - do { printf("MMU: " fmt , ## __VA_ARGS__); } while (0) -#else -#define DPRINTF_MMU(fmt, ...) do {} while (0) -#endif - -/* Sparc MMU emulation */ - -#if defined(CONFIG_USER_ONLY) - -int cpu_sparc_handle_mmu_fault(CPUState *env1, target_ulong address, int rw, - int mmu_idx) +void helper_raise_exception(CPUState *env, int tt) { - if (rw & 2) - env1->exception_index = TT_TFAULT; - else - env1->exception_index = TT_DFAULT; - return 1; + env->exception_index = tt; + cpu_loop_exit(env); } -#else - -#ifndef TARGET_SPARC64 -/* - * Sparc V8 Reference MMU (SRMMU) - */ -static const int access_table[8][8] = { - { 0, 0, 0, 0, 8, 0, 12, 12 }, - { 0, 0, 0, 0, 8, 0, 0, 0 }, - { 8, 8, 0, 0, 0, 8, 12, 12 }, - { 8, 8, 0, 0, 0, 8, 0, 0 }, - { 8, 0, 8, 0, 8, 8, 12, 12 }, - { 8, 0, 8, 0, 8, 0, 8, 0 }, - { 8, 8, 8, 0, 8, 8, 12, 12 }, - { 8, 8, 8, 0, 8, 8, 8, 0 } -}; - -static const int perm_table[2][8] = { - { - PAGE_READ, - PAGE_READ | PAGE_WRITE, - PAGE_READ | PAGE_EXEC, - PAGE_READ | PAGE_WRITE | PAGE_EXEC, - PAGE_EXEC, - PAGE_READ | PAGE_WRITE, - PAGE_READ | PAGE_EXEC, - PAGE_READ | PAGE_WRITE | PAGE_EXEC - }, - { - PAGE_READ, - PAGE_READ | PAGE_WRITE, - PAGE_READ | PAGE_EXEC, - PAGE_READ | PAGE_WRITE | PAGE_EXEC, - PAGE_EXEC, - PAGE_READ, - 0, - 0, - } -}; - -static int get_physical_address(CPUState *env, target_phys_addr_t *physical, - int *prot, int *access_index, - target_ulong address, int rw, int mmu_idx, - target_ulong *page_size) +void helper_debug(CPUState *env) { - int access_perms = 0; - target_phys_addr_t pde_ptr; - uint32_t pde; - int error_code = 0, is_dirty, is_user; - unsigned long page_offset; - - is_user = mmu_idx == MMU_USER_IDX; - - if ((env->mmuregs[0] & MMU_E) == 0) { /* MMU disabled */ - *page_size = TARGET_PAGE_SIZE; - // Boot mode: instruction fetches are taken from PROM - if (rw == 2 && (env->mmuregs[0] & env->def->mmu_bm)) { - *physical = env->prom_addr | (address & 0x7ffffULL); - *prot = PAGE_READ | PAGE_EXEC; - return 0; - } - *physical = address; - *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; - return 0; - } - - *access_index = ((rw & 1) << 2) | (rw & 2) | (is_user? 0 : 1); - *physical = 0xffffffffffff0000ULL; - - /* SPARC reference MMU table walk: Context table->L1->L2->PTE */ - /* Context base + context number */ - pde_ptr = (env->mmuregs[1] << 4) + (env->mmuregs[2] << 2); - pde = ldl_phys(pde_ptr); - - /* Ctx pde */ - switch (pde & PTE_ENTRYTYPE_MASK) { - default: - case 0: /* Invalid */ - return 1 << 2; - case 2: /* L0 PTE, maybe should not happen? */ - case 3: /* Reserved */ - return 4 << 2; - case 1: /* L0 PDE */ - pde_ptr = ((address >> 22) & ~3) + ((pde & ~3) << 4); - pde = ldl_phys(pde_ptr); - - switch (pde & PTE_ENTRYTYPE_MASK) { - default: - case 0: /* Invalid */ - return (1 << 8) | (1 << 2); - case 3: /* Reserved */ - return (1 << 8) | (4 << 2); - case 1: /* L1 PDE */ - pde_ptr = ((address & 0xfc0000) >> 16) + ((pde & ~3) << 4); - pde = ldl_phys(pde_ptr); - - switch (pde & PTE_ENTRYTYPE_MASK) { - default: - case 0: /* Invalid */ - return (2 << 8) | (1 << 2); - case 3: /* Reserved */ - return (2 << 8) | (4 << 2); - case 1: /* L2 PDE */ - pde_ptr = ((address & 0x3f000) >> 10) + ((pde & ~3) << 4); - pde = ldl_phys(pde_ptr); - - switch (pde & PTE_ENTRYTYPE_MASK) { - default: - case 0: /* Invalid */ - return (3 << 8) | (1 << 2); - case 1: /* PDE, should not happen */ - case 3: /* Reserved */ - return (3 << 8) | (4 << 2); - case 2: /* L3 PTE */ - page_offset = (address & TARGET_PAGE_MASK) & - (TARGET_PAGE_SIZE - 1); - } - *page_size = TARGET_PAGE_SIZE; - break; - case 2: /* L2 PTE */ - page_offset = address & 0x3ffff; - *page_size = 0x40000; - } - break; - case 2: /* L1 PTE */ - page_offset = address & 0xffffff; - *page_size = 0x1000000; - } - } - - /* check access */ - access_perms = (pde & PTE_ACCESS_MASK) >> PTE_ACCESS_SHIFT; - error_code = access_table[*access_index][access_perms]; - if (error_code && !((env->mmuregs[0] & MMU_NF) && is_user)) - return error_code; - - /* update page modified and dirty bits */ - is_dirty = (rw & 1) && !(pde & PG_MODIFIED_MASK); - if (!(pde & PG_ACCESSED_MASK) || is_dirty) { - pde |= PG_ACCESSED_MASK; - if (is_dirty) - pde |= PG_MODIFIED_MASK; - stl_phys_notdirty(pde_ptr, pde); - } - - /* the page can be put in the TLB */ - *prot = perm_table[is_user][access_perms]; - if (!(pde & PG_MODIFIED_MASK)) { - /* only set write access if already dirty... otherwise wait - for dirty access */ - *prot &= ~PAGE_WRITE; - } - - /* Even if large ptes, we map only one 4KB page in the cache to - avoid filling it too fast */ - *physical = ((target_phys_addr_t)(pde & PTE_ADDR_MASK) << 4) + page_offset; - return error_code; + env->exception_index = EXCP_DEBUG; + cpu_loop_exit(env); } -/* Perform address translation */ -int cpu_sparc_handle_mmu_fault (CPUState *env, target_ulong address, int rw, - int mmu_idx) +void helper_shutdown(void) { - target_phys_addr_t paddr; - target_ulong vaddr; - target_ulong page_size; - int error_code = 0, prot, access_index; - - error_code = get_physical_address(env, &paddr, &prot, &access_index, - address, rw, mmu_idx, &page_size); - if (error_code == 0) { - vaddr = address & TARGET_PAGE_MASK; - paddr &= TARGET_PAGE_MASK; -#ifdef DEBUG_MMU - printf("Translate at " TARGET_FMT_lx " -> " TARGET_FMT_plx ", vaddr " - TARGET_FMT_lx "\n", address, paddr, vaddr); +#if !defined(CONFIG_USER_ONLY) + qemu_system_shutdown_request(); #endif - tlb_set_page(env, vaddr, paddr, prot, mmu_idx, page_size); - return 0; - } - - if (env->mmuregs[3]) /* Fault status register */ - env->mmuregs[3] = 1; /* overflow (not read before another fault) */ - env->mmuregs[3] |= (access_index << 5) | error_code | 2; - env->mmuregs[4] = address; /* Fault address register */ - - if ((env->mmuregs[0] & MMU_NF) || env->psret == 0) { - // No fault mode: if a mapping is available, just override - // permissions. If no mapping is available, redirect accesses to - // neverland. Fake/overridden mappings will be flushed when - // switching to normal mode. - vaddr = address & TARGET_PAGE_MASK; - prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; - tlb_set_page(env, vaddr, paddr, prot, mmu_idx, TARGET_PAGE_SIZE); - return 0; - } else { - if (rw & 2) - env->exception_index = TT_TFAULT; - else - env->exception_index = TT_DFAULT; - return 1; - } } -target_ulong mmu_probe(CPUState *env, target_ulong address, int mmulev) -{ - target_phys_addr_t pde_ptr; - uint32_t pde; - - /* Context base + context number */ - pde_ptr = (target_phys_addr_t)(env->mmuregs[1] << 4) + - (env->mmuregs[2] << 2); - pde = ldl_phys(pde_ptr); - - switch (pde & PTE_ENTRYTYPE_MASK) { - default: - case 0: /* Invalid */ - case 2: /* PTE, maybe should not happen? */ - case 3: /* Reserved */ - return 0; - case 1: /* L1 PDE */ - if (mmulev == 3) - return pde; - pde_ptr = ((address >> 22) & ~3) + ((pde & ~3) << 4); - pde = ldl_phys(pde_ptr); - - switch (pde & PTE_ENTRYTYPE_MASK) { - default: - case 0: /* Invalid */ - case 3: /* Reserved */ - return 0; - case 2: /* L1 PTE */ - return pde; - case 1: /* L2 PDE */ - if (mmulev == 2) - return pde; - pde_ptr = ((address & 0xfc0000) >> 16) + ((pde & ~3) << 4); - pde = ldl_phys(pde_ptr); - - switch (pde & PTE_ENTRYTYPE_MASK) { - default: - case 0: /* Invalid */ - case 3: /* Reserved */ - return 0; - case 2: /* L2 PTE */ - return pde; - case 1: /* L3 PDE */ - if (mmulev == 1) - return pde; - pde_ptr = ((address & 0x3f000) >> 10) + ((pde & ~3) << 4); - pde = ldl_phys(pde_ptr); - - switch (pde & PTE_ENTRYTYPE_MASK) { - default: - case 0: /* Invalid */ - case 1: /* PDE, should not happen */ - case 3: /* Reserved */ - return 0; - case 2: /* L3 PTE */ - return pde; - } - } - } - } - return 0; -} - -void dump_mmu(FILE *f, fprintf_function cpu_fprintf, CPUState *env) -{ - target_ulong va, va1, va2; - unsigned int n, m, o; - target_phys_addr_t pde_ptr, pa; - uint32_t pde; - - pde_ptr = (env->mmuregs[1] << 4) + (env->mmuregs[2] << 2); - pde = ldl_phys(pde_ptr); - (*cpu_fprintf)(f, "Root ptr: " TARGET_FMT_plx ", ctx: %d\n", - (target_phys_addr_t)env->mmuregs[1] << 4, env->mmuregs[2]); - for (n = 0, va = 0; n < 256; n++, va += 16 * 1024 * 1024) { - pde = mmu_probe(env, va, 2); - if (pde) { - pa = cpu_get_phys_page_debug(env, va); - (*cpu_fprintf)(f, "VA: " TARGET_FMT_lx ", PA: " TARGET_FMT_plx - " PDE: " TARGET_FMT_lx "\n", va, pa, pde); - for (m = 0, va1 = va; m < 64; m++, va1 += 256 * 1024) { - pde = mmu_probe(env, va1, 1); - if (pde) { - pa = cpu_get_phys_page_debug(env, va1); - (*cpu_fprintf)(f, " VA: " TARGET_FMT_lx ", PA: " - TARGET_FMT_plx " PDE: " TARGET_FMT_lx "\n", - va1, pa, pde); - for (o = 0, va2 = va1; o < 64; o++, va2 += 4 * 1024) { - pde = mmu_probe(env, va2, 0); - if (pde) { - pa = cpu_get_phys_page_debug(env, va2); - (*cpu_fprintf)(f, " VA: " TARGET_FMT_lx ", PA: " - TARGET_FMT_plx " PTE: " - TARGET_FMT_lx "\n", - va2, pa, pde); - } - } - } - } - } - } -} - -/* Gdb expects all registers windows to be flushed in ram. This function handles - * reads (and only reads) in stack frames as if windows were flushed. We assume - * that the sparc ABI is followed. - */ -int target_memory_rw_debug(CPUState *env, target_ulong addr, - uint8_t *buf, int len, int is_write) +#ifdef TARGET_SPARC64 +target_ulong helper_popc(target_ulong val) { - int i; - int len1; - int cwp = env->cwp; - - if (!is_write) { - for (i = 0; i < env->nwindows; i++) { - int off; - target_ulong fp = env->regbase[cwp * 16 + 22]; - - /* Assume fp == 0 means end of frame. */ - if (fp == 0) { - break; - } - - cwp = cpu_cwp_inc(env, cwp + 1); - - /* Invalid window ? */ - if (env->wim & (1 << cwp)) { - break; - } - - /* According to the ABI, the stack is growing downward. */ - if (addr + len < fp) { - break; - } - - /* Not in this frame. */ - if (addr > fp + 64) { - continue; - } - - /* Handle access before this window. */ - if (addr < fp) { - len1 = fp - addr; - if (cpu_memory_rw_debug(env, addr, buf, len1, is_write) != 0) { - return -1; - } - addr += len1; - len -= len1; - buf += len1; - } - - /* Access byte per byte to registers. Not very efficient but speed - * is not critical. - */ - off = addr - fp; - len1 = 64 - off; - - if (len1 > len) { - len1 = len; - } - - for (; len1; len1--) { - int reg = cwp * 16 + 8 + (off >> 2); - union { - uint32_t v; - uint8_t c[4]; - } u; - u.v = cpu_to_be32(env->regbase[reg]); - *buf++ = u.c[off & 3]; - addr++; - len--; - off++; - } - - if (len == 0) { - return 0; - } - } - } - return cpu_memory_rw_debug(env, addr, buf, len, is_write); + return ctpop64(val); } -#else /* !TARGET_SPARC64 */ - -// 41 bit physical address space -static inline target_phys_addr_t ultrasparc_truncate_physical(uint64_t x) +void helper_tick_set_count(void *opaque, uint64_t count) { - return x & 0x1ffffffffffULL; +#if !defined(CONFIG_USER_ONLY) + cpu_tick_set_count(opaque, count); +#endif } -/* - * UltraSparc IIi I/DMMUs - */ - -// Returns true if TTE tag is valid and matches virtual address value in context -// requires virtual address mask value calculated from TTE entry size -static inline int ultrasparc_tag_match(SparcTLBEntry *tlb, - uint64_t address, uint64_t context, - target_phys_addr_t *physical) +uint64_t helper_tick_get_count(void *opaque) { - uint64_t mask; - - switch (TTE_PGSIZE(tlb->tte)) { - default: - case 0x0: // 8k - mask = 0xffffffffffffe000ULL; - break; - case 0x1: // 64k - mask = 0xffffffffffff0000ULL; - break; - case 0x2: // 512k - mask = 0xfffffffffff80000ULL; - break; - case 0x3: // 4M - mask = 0xffffffffffc00000ULL; - break; - } - - // valid, context match, virtual address match? - if (TTE_IS_VALID(tlb->tte) && - (TTE_IS_GLOBAL(tlb->tte) || tlb_compare_context(tlb, context)) - && compare_masked(address, tlb->tag, mask)) - { - // decode physical address - *physical = ((tlb->tte & mask) | (address & ~mask)) & 0x1ffffffe000ULL; - return 1; - } - +#if !defined(CONFIG_USER_ONLY) + return cpu_tick_get_count(opaque); +#else return 0; +#endif } -static int get_physical_address_data(CPUState *env, - target_phys_addr_t *physical, int *prot, - target_ulong address, int rw, int mmu_idx) +void helper_tick_set_limit(void *opaque, uint64_t limit) { - unsigned int i; - uint64_t context; - uint64_t sfsr = 0; - - int is_user = (mmu_idx == MMU_USER_IDX || - mmu_idx == MMU_USER_SECONDARY_IDX); - - if ((env->lsu & DMMU_E) == 0) { /* DMMU disabled */ - *physical = ultrasparc_truncate_physical(address); - *prot = PAGE_READ | PAGE_WRITE; - return 0; - } - - switch(mmu_idx) { - case MMU_USER_IDX: - case MMU_KERNEL_IDX: - context = env->dmmu.mmu_primary_context & 0x1fff; - sfsr |= SFSR_CT_PRIMARY; - break; - case MMU_USER_SECONDARY_IDX: - case MMU_KERNEL_SECONDARY_IDX: - context = env->dmmu.mmu_secondary_context & 0x1fff; - sfsr |= SFSR_CT_SECONDARY; - break; - case MMU_NUCLEUS_IDX: - sfsr |= SFSR_CT_NUCLEUS; - /* FALLTHRU */ - default: - context = 0; - break; - } - - if (rw == 1) { - sfsr |= SFSR_WRITE_BIT; - } else if (rw == 4) { - sfsr |= SFSR_NF_BIT; - } - - for (i = 0; i < 64; i++) { - // ctx match, vaddr match, valid? - if (ultrasparc_tag_match(&env->dtlb[i], address, context, physical)) { - int do_fault = 0; - - // access ok? - /* multiple bits in SFSR.FT may be set on TT_DFAULT */ - if (TTE_IS_PRIV(env->dtlb[i].tte) && is_user) { - do_fault = 1; - sfsr |= SFSR_FT_PRIV_BIT; /* privilege violation */ - - DPRINTF_MMU("DFAULT at %" PRIx64 " context %" PRIx64 - " mmu_idx=%d tl=%d\n", - address, context, mmu_idx, env->tl); - } - if (rw == 4) { - if (TTE_IS_SIDEEFFECT(env->dtlb[i].tte)) { - do_fault = 1; - sfsr |= SFSR_FT_NF_E_BIT; - } - } else { - if (TTE_IS_NFO(env->dtlb[i].tte)) { - do_fault = 1; - sfsr |= SFSR_FT_NFO_BIT; - } - } - - if (do_fault) { - /* faults above are reported with TT_DFAULT. */ - env->exception_index = TT_DFAULT; - } else if (!TTE_IS_W_OK(env->dtlb[i].tte) && (rw == 1)) { - do_fault = 1; - env->exception_index = TT_DPROT; - - DPRINTF_MMU("DPROT at %" PRIx64 " context %" PRIx64 - " mmu_idx=%d tl=%d\n", - address, context, mmu_idx, env->tl); - } - - if (!do_fault) { - *prot = PAGE_READ; - if (TTE_IS_W_OK(env->dtlb[i].tte)) { - *prot |= PAGE_WRITE; - } - - TTE_SET_USED(env->dtlb[i].tte); - - return 0; - } - - if (env->dmmu.sfsr & SFSR_VALID_BIT) { /* Fault status register */ - sfsr |= SFSR_OW_BIT; /* overflow (not read before - another fault) */ - } - - if (env->pstate & PS_PRIV) { - sfsr |= SFSR_PR_BIT; - } - - /* FIXME: ASI field in SFSR must be set */ - env->dmmu.sfsr = sfsr | SFSR_VALID_BIT; - - env->dmmu.sfar = address; /* Fault address register */ - - env->dmmu.tag_access = (address & ~0x1fffULL) | context; - - return 1; - } - } - - DPRINTF_MMU("DMISS at %" PRIx64 " context %" PRIx64 "\n", - address, context); - - /* - * On MMU misses: - * - UltraSPARC IIi: SFSR and SFAR unmodified - * - JPS1: SFAR updated and some fields of SFSR updated - */ - env->dmmu.tag_access = (address & ~0x1fffULL) | context; - env->exception_index = TT_DMISS; - return 1; +#if !defined(CONFIG_USER_ONLY) + cpu_tick_set_limit(opaque, limit); +#endif } +#endif -static int get_physical_address_code(CPUState *env, - target_phys_addr_t *physical, int *prot, - target_ulong address, int mmu_idx) +static target_ulong helper_udiv_common(CPUState *env, target_ulong a, + target_ulong b, int cc) { - unsigned int i; - uint64_t context; + int overflow = 0; + uint64_t x0; + uint32_t x1; - int is_user = (mmu_idx == MMU_USER_IDX || - mmu_idx == MMU_USER_SECONDARY_IDX); + x0 = (a & 0xffffffff) | ((int64_t) (env->y) << 32); + x1 = (b & 0xffffffff); - if ((env->lsu & IMMU_E) == 0 || (env->pstate & PS_RED) != 0) { - /* IMMU disabled */ - *physical = ultrasparc_truncate_physical(address); - *prot = PAGE_EXEC; - return 0; + if (x1 == 0) { + helper_raise_exception(env, TT_DIV_ZERO); } - if (env->tl == 0) { - /* PRIMARY context */ - context = env->dmmu.mmu_primary_context & 0x1fff; - } else { - /* NUCLEUS context */ - context = 0; + x0 = x0 / x1; + if (x0 > 0xffffffff) { + x0 = 0xffffffff; + overflow = 1; } - for (i = 0; i < 64; i++) { - // ctx match, vaddr match, valid? - if (ultrasparc_tag_match(&env->itlb[i], - address, context, physical)) { - // access ok? - if (TTE_IS_PRIV(env->itlb[i].tte) && is_user) { - /* Fault status register */ - if (env->immu.sfsr & SFSR_VALID_BIT) { - env->immu.sfsr = SFSR_OW_BIT; /* overflow (not read before - another fault) */ - } else { - env->immu.sfsr = 0; - } - if (env->pstate & PS_PRIV) { - env->immu.sfsr |= SFSR_PR_BIT; - } - if (env->tl > 0) { - env->immu.sfsr |= SFSR_CT_NUCLEUS; - } - - /* FIXME: ASI field in SFSR must be set */ - env->immu.sfsr |= SFSR_FT_PRIV_BIT | SFSR_VALID_BIT; - env->exception_index = TT_TFAULT; - - env->immu.tag_access = (address & ~0x1fffULL) | context; - - DPRINTF_MMU("TFAULT at %" PRIx64 " context %" PRIx64 "\n", - address, context); - - return 1; - } - *prot = PAGE_EXEC; - TTE_SET_USED(env->itlb[i].tte); - return 0; - } - } - - DPRINTF_MMU("TMISS at %" PRIx64 " context %" PRIx64 "\n", - address, context); - - /* Context is stored in DMMU (dmmuregs[1]) also for IMMU */ - env->immu.tag_access = (address & ~0x1fffULL) | context; - env->exception_index = TT_TMISS; - return 1; -} - -static int get_physical_address(CPUState *env, target_phys_addr_t *physical, - int *prot, int *access_index, - target_ulong address, int rw, int mmu_idx, - target_ulong *page_size) -{ - /* ??? We treat everything as a small page, then explicitly flush - everything when an entry is evicted. */ - *page_size = TARGET_PAGE_SIZE; - -#if defined (DEBUG_MMU) - /* safety net to catch wrong softmmu index use from dynamic code */ - if (env->tl > 0 && mmu_idx != MMU_NUCLEUS_IDX) { - DPRINTF_MMU("get_physical_address %s tl=%d mmu_idx=%d" - " primary context=%" PRIx64 - " secondary context=%" PRIx64 - " address=%" PRIx64 - "\n", - (rw == 2 ? "CODE" : "DATA"), - env->tl, mmu_idx, - env->dmmu.mmu_primary_context, - env->dmmu.mmu_secondary_context, - address); + if (cc) { + env->cc_dst = x0; + env->cc_src2 = overflow; + env->cc_op = CC_OP_DIV; } -#endif - - if (rw == 2) - return get_physical_address_code(env, physical, prot, address, - mmu_idx); - else - return get_physical_address_data(env, physical, prot, address, rw, - mmu_idx); + return x0; } -/* Perform address translation */ -int cpu_sparc_handle_mmu_fault (CPUState *env, target_ulong address, int rw, - int mmu_idx) +target_ulong helper_udiv(CPUState *env, target_ulong a, target_ulong b) { - target_ulong virt_addr, vaddr; - target_phys_addr_t paddr; - target_ulong page_size; - int error_code = 0, prot, access_index; - - error_code = get_physical_address(env, &paddr, &prot, &access_index, - address, rw, mmu_idx, &page_size); - if (error_code == 0) { - virt_addr = address & TARGET_PAGE_MASK; - vaddr = virt_addr + ((address & TARGET_PAGE_MASK) & - (TARGET_PAGE_SIZE - 1)); - - DPRINTF_MMU("Translate at %" PRIx64 " -> %" PRIx64 "," - " vaddr %" PRIx64 - " mmu_idx=%d" - " tl=%d" - " primary context=%" PRIx64 - " secondary context=%" PRIx64 - "\n", - address, paddr, vaddr, mmu_idx, env->tl, - env->dmmu.mmu_primary_context, - env->dmmu.mmu_secondary_context); - - tlb_set_page(env, vaddr, paddr, prot, mmu_idx, page_size); - return 0; - } - // XXX - return 1; + return helper_udiv_common(env, a, b, 0); } -void dump_mmu(FILE *f, fprintf_function cpu_fprintf, CPUState *env) +target_ulong helper_udiv_cc(CPUState *env, target_ulong a, target_ulong b) { - unsigned int i; - const char *mask; - - (*cpu_fprintf)(f, "MMU contexts: Primary: %" PRId64 ", Secondary: %" - PRId64 "\n", - env->dmmu.mmu_primary_context, - env->dmmu.mmu_secondary_context); - if ((env->lsu & DMMU_E) == 0) { - (*cpu_fprintf)(f, "DMMU disabled\n"); - } else { - (*cpu_fprintf)(f, "DMMU dump\n"); - for (i = 0; i < 64; i++) { - switch (TTE_PGSIZE(env->dtlb[i].tte)) { - default: - case 0x0: - mask = " 8k"; - break; - case 0x1: - mask = " 64k"; - break; - case 0x2: - mask = "512k"; - break; - case 0x3: - mask = " 4M"; - break; - } - if (TTE_IS_VALID(env->dtlb[i].tte)) { - (*cpu_fprintf)(f, "[%02u] VA: %" PRIx64 ", PA: %llx" - ", %s, %s, %s, %s, ctx %" PRId64 " %s\n", - i, - env->dtlb[i].tag & (uint64_t)~0x1fffULL, - TTE_PA(env->dtlb[i].tte), - mask, - TTE_IS_PRIV(env->dtlb[i].tte) ? "priv" : "user", - TTE_IS_W_OK(env->dtlb[i].tte) ? "RW" : "RO", - TTE_IS_LOCKED(env->dtlb[i].tte) ? - "locked" : "unlocked", - env->dtlb[i].tag & (uint64_t)0x1fffULL, - TTE_IS_GLOBAL(env->dtlb[i].tte)? - "global" : "local"); - } - } - } - if ((env->lsu & IMMU_E) == 0) { - (*cpu_fprintf)(f, "IMMU disabled\n"); - } else { - (*cpu_fprintf)(f, "IMMU dump\n"); - for (i = 0; i < 64; i++) { - switch (TTE_PGSIZE(env->itlb[i].tte)) { - default: - case 0x0: - mask = " 8k"; - break; - case 0x1: - mask = " 64k"; - break; - case 0x2: - mask = "512k"; - break; - case 0x3: - mask = " 4M"; - break; - } - if (TTE_IS_VALID(env->itlb[i].tte)) { - (*cpu_fprintf)(f, "[%02u] VA: %" PRIx64 ", PA: %llx" - ", %s, %s, %s, ctx %" PRId64 " %s\n", - i, - env->itlb[i].tag & (uint64_t)~0x1fffULL, - TTE_PA(env->itlb[i].tte), - mask, - TTE_IS_PRIV(env->itlb[i].tte) ? "priv" : "user", - TTE_IS_LOCKED(env->itlb[i].tte) ? - "locked" : "unlocked", - env->itlb[i].tag & (uint64_t)0x1fffULL, - TTE_IS_GLOBAL(env->itlb[i].tte)? - "global" : "local"); - } - } - } + return helper_udiv_common(env, a, b, 1); } -#endif /* TARGET_SPARC64 */ - -static int cpu_sparc_get_phys_page(CPUState *env, target_phys_addr_t *phys, - target_ulong addr, int rw, int mmu_idx) +static target_ulong helper_sdiv_common(CPUState *env, target_ulong a, + target_ulong b, int cc) { - target_ulong page_size; - int prot, access_index; - - return get_physical_address(env, phys, &prot, &access_index, addr, rw, - mmu_idx, &page_size); -} + int overflow = 0; + int64_t x0; + int32_t x1; -#if defined(TARGET_SPARC64) -target_phys_addr_t cpu_get_phys_page_nofault(CPUState *env, target_ulong addr, - int mmu_idx) -{ - target_phys_addr_t phys_addr; + x0 = (a & 0xffffffff) | ((int64_t) (env->y) << 32); + x1 = (b & 0xffffffff); - if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 4, mmu_idx) != 0) { - return -1; + if (x1 == 0) { + helper_raise_exception(env, TT_DIV_ZERO); } - return phys_addr; -} -#endif - -target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr) -{ - target_phys_addr_t phys_addr; - int mmu_idx = cpu_mmu_index(env); - if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 2, mmu_idx) != 0) { - if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 0, mmu_idx) != 0) { - return -1; - } + x0 = x0 / x1; + if ((int32_t) x0 != x0) { + x0 = x0 < 0 ? 0x80000000 : 0x7fffffff; + overflow = 1; } - if (cpu_get_physical_page_desc(phys_addr) == IO_MEM_UNASSIGNED) { - return -1; - } - return phys_addr; -} -#endif - -/* misc op helpers */ -void helper_raise_exception(CPUState *env, int tt) -{ - env->exception_index = tt; - cpu_loop_exit(env); -} - -void helper_debug(CPUState *env) -{ - env->exception_index = EXCP_DEBUG; - cpu_loop_exit(env); -} - -void helper_shutdown(void) -{ -#if !defined(CONFIG_USER_ONLY) - qemu_system_shutdown_request(); -#endif -} - -#ifdef TARGET_SPARC64 -target_ulong helper_popc(target_ulong val) -{ - return ctpop64(val); -} -void helper_tick_set_count(void *opaque, uint64_t count) -{ -#if !defined(CONFIG_USER_ONLY) - cpu_tick_set_count(opaque, count); -#endif + if (cc) { + env->cc_dst = x0; + env->cc_src2 = overflow; + env->cc_op = CC_OP_DIV; + } + return x0; } -uint64_t helper_tick_get_count(void *opaque) +target_ulong helper_sdiv(CPUState *env, target_ulong a, target_ulong b) { -#if !defined(CONFIG_USER_ONLY) - return cpu_tick_get_count(opaque); -#else - return 0; -#endif + return helper_sdiv_common(env, a, b, 0); } -void helper_tick_set_limit(void *opaque, uint64_t limit) +target_ulong helper_sdiv_cc(CPUState *env, target_ulong a, target_ulong b) { -#if !defined(CONFIG_USER_ONLY) - cpu_tick_set_limit(opaque, limit); -#endif + return helper_sdiv_common(env, a, b, 1); } -#endif diff --git a/target-sparc/helper.h b/target-sparc/helper.h index 57d0073f2b..faaf8dc7ad 100644 --- a/target-sparc/helper.h +++ b/target-sparc/helper.h @@ -1,46 +1,43 @@ #include "def-helper.h" #ifndef TARGET_SPARC64 -DEF_HELPER_0(rett, void) -DEF_HELPER_1(wrpsr, void, tl) -DEF_HELPER_0(rdpsr, tl) +DEF_HELPER_1(rett, void, env) +DEF_HELPER_2(wrpsr, void, env, tl) +DEF_HELPER_1(rdpsr, tl, env) #else -DEF_HELPER_1(wrpil, void, tl) -DEF_HELPER_1(wrpstate, void, tl) -DEF_HELPER_0(done, void) -DEF_HELPER_0(retry, void) -DEF_HELPER_0(flushw, void) -DEF_HELPER_0(saved, void) -DEF_HELPER_0(restored, void) -DEF_HELPER_0(rdccr, tl) -DEF_HELPER_1(wrccr, void, tl) -DEF_HELPER_0(rdcwp, tl) -DEF_HELPER_1(wrcwp, void, tl) -DEF_HELPER_3(array8, tl, env, tl, tl) -DEF_HELPER_3(alignaddr, tl, env, tl, tl) +DEF_HELPER_2(wrpil, void, env, tl) +DEF_HELPER_2(wrpstate, void, env, tl) +DEF_HELPER_1(done, void, env) +DEF_HELPER_1(retry, void, env) +DEF_HELPER_1(flushw, void, env) +DEF_HELPER_1(saved, void, env) +DEF_HELPER_1(restored, void, env) +DEF_HELPER_1(rdccr, tl, env) +DEF_HELPER_2(wrccr, void, env, tl) +DEF_HELPER_1(rdcwp, tl, env) +DEF_HELPER_2(wrcwp, void, env, tl) +DEF_HELPER_FLAGS_2(array8, TCG_CALL_CONST | TCG_CALL_PURE, tl, tl, tl) DEF_HELPER_1(popc, tl, tl) DEF_HELPER_3(ldda_asi, void, tl, int, int) DEF_HELPER_4(ldf_asi, void, tl, int, int, int) DEF_HELPER_4(stf_asi, void, tl, int, int, int) DEF_HELPER_4(cas_asi, tl, tl, tl, tl, i32) DEF_HELPER_4(casx_asi, tl, tl, tl, tl, i32) -DEF_HELPER_1(set_softint, void, i64) -DEF_HELPER_1(clear_softint, void, i64) -DEF_HELPER_1(write_softint, void, i64) +DEF_HELPER_2(set_softint, void, env, i64) +DEF_HELPER_2(clear_softint, void, env, i64) +DEF_HELPER_2(write_softint, void, env, i64) DEF_HELPER_2(tick_set_count, void, ptr, i64) DEF_HELPER_1(tick_get_count, i64, ptr) DEF_HELPER_2(tick_set_limit, void, ptr, i64) #endif DEF_HELPER_2(check_align, void, tl, i32) DEF_HELPER_1(debug, void, env) -DEF_HELPER_0(save, void) -DEF_HELPER_0(restore, void) -DEF_HELPER_2(udiv, tl, tl, tl) -DEF_HELPER_2(udiv_cc, tl, tl, tl) -DEF_HELPER_2(sdiv, tl, tl, tl) -DEF_HELPER_2(sdiv_cc, tl, tl, tl) -DEF_HELPER_2(stdf, void, tl, int) -DEF_HELPER_2(lddf, void, tl, int) +DEF_HELPER_1(save, void, env) +DEF_HELPER_1(restore, void, env) +DEF_HELPER_3(udiv, tl, env, tl, tl) +DEF_HELPER_3(udiv_cc, tl, env, tl, tl) +DEF_HELPER_3(sdiv, tl, env, tl, tl) +DEF_HELPER_3(sdiv_cc, tl, env, tl, tl) DEF_HELPER_2(ldqf, void, tl, int) DEF_HELPER_2(stqf, void, tl, int) #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64) @@ -48,33 +45,31 @@ DEF_HELPER_4(ld_asi, i64, tl, int, int, int) DEF_HELPER_4(st_asi, void, tl, i64, int, int) #endif DEF_HELPER_2(ldfsr, void, env, i32) -DEF_HELPER_1(check_ieee_exceptions, void, env) -DEF_HELPER_1(clear_float_exceptions, void, env) -DEF_HELPER_1(fabss, f32, f32) +DEF_HELPER_FLAGS_1(fabss, TCG_CALL_CONST | TCG_CALL_PURE, f32, f32) DEF_HELPER_2(fsqrts, f32, env, f32) -DEF_HELPER_1(fsqrtd, void, env) +DEF_HELPER_2(fsqrtd, f64, env, f64) DEF_HELPER_3(fcmps, void, env, f32, f32) -DEF_HELPER_1(fcmpd, void, env) +DEF_HELPER_3(fcmpd, void, env, f64, f64) DEF_HELPER_3(fcmpes, void, env, f32, f32) -DEF_HELPER_1(fcmped, void, env) +DEF_HELPER_3(fcmped, void, env, f64, f64) DEF_HELPER_1(fsqrtq, void, env) DEF_HELPER_1(fcmpq, void, env) DEF_HELPER_1(fcmpeq, void, env) #ifdef TARGET_SPARC64 DEF_HELPER_2(ldxfsr, void, env, i64) -DEF_HELPER_1(fabsd, void, env) +DEF_HELPER_FLAGS_1(fabsd, TCG_CALL_CONST | TCG_CALL_PURE, f64, f64) DEF_HELPER_3(fcmps_fcc1, void, env, f32, f32) DEF_HELPER_3(fcmps_fcc2, void, env, f32, f32) DEF_HELPER_3(fcmps_fcc3, void, env, f32, f32) -DEF_HELPER_1(fcmpd_fcc1, void, env) -DEF_HELPER_1(fcmpd_fcc2, void, env) -DEF_HELPER_1(fcmpd_fcc3, void, env) +DEF_HELPER_3(fcmpd_fcc1, void, env, f64, f64) +DEF_HELPER_3(fcmpd_fcc2, void, env, f64, f64) +DEF_HELPER_3(fcmpd_fcc3, void, env, f64, f64) DEF_HELPER_3(fcmpes_fcc1, void, env, f32, f32) DEF_HELPER_3(fcmpes_fcc2, void, env, f32, f32) DEF_HELPER_3(fcmpes_fcc3, void, env, f32, f32) -DEF_HELPER_1(fcmped_fcc1, void, env) -DEF_HELPER_1(fcmped_fcc2, void, env) -DEF_HELPER_1(fcmped_fcc3, void, env) +DEF_HELPER_3(fcmped_fcc1, void, env, f64, f64) +DEF_HELPER_3(fcmped_fcc2, void, env, f64, f64) +DEF_HELPER_3(fcmped_fcc3, void, env, f64, f64) DEF_HELPER_1(fabsq, void, env) DEF_HELPER_1(fcmpq_fcc1, void, env) DEF_HELPER_1(fcmpq_fcc2, void, env) @@ -86,77 +81,88 @@ DEF_HELPER_1(fcmpeq_fcc3, void, env) DEF_HELPER_2(raise_exception, void, env, int) DEF_HELPER_0(shutdown, void) #define F_HELPER_0_1(name) DEF_HELPER_1(f ## name, void, env) -#define F_HELPER_DQ_0_1(name) \ - F_HELPER_0_1(name ## d); \ - F_HELPER_0_1(name ## q) -F_HELPER_DQ_0_1(add); -F_HELPER_DQ_0_1(sub); -F_HELPER_DQ_0_1(mul); -F_HELPER_DQ_0_1(div); +DEF_HELPER_3(faddd, f64, env, f64, f64) +DEF_HELPER_3(fsubd, f64, env, f64, f64) +DEF_HELPER_3(fmuld, f64, env, f64, f64) +DEF_HELPER_3(fdivd, f64, env, f64, f64) +F_HELPER_0_1(addq) +F_HELPER_0_1(subq) +F_HELPER_0_1(mulq) +F_HELPER_0_1(divq) DEF_HELPER_3(fadds, f32, env, f32, f32) DEF_HELPER_3(fsubs, f32, env, f32, f32) DEF_HELPER_3(fmuls, f32, env, f32, f32) DEF_HELPER_3(fdivs, f32, env, f32, f32) -DEF_HELPER_3(fsmuld, void, env, f32, f32) -F_HELPER_0_1(dmulq); +DEF_HELPER_3(fsmuld, f64, env, f32, f32) +DEF_HELPER_3(fdmulq, void, env, f64, f64); -DEF_HELPER_1(fnegs, f32, f32) -DEF_HELPER_2(fitod, void, env, s32) +DEF_HELPER_FLAGS_1(fnegs, TCG_CALL_CONST | TCG_CALL_PURE, f32, f32) +DEF_HELPER_2(fitod, f64, env, s32) DEF_HELPER_2(fitoq, void, env, s32) DEF_HELPER_2(fitos, f32, env, s32) #ifdef TARGET_SPARC64 -DEF_HELPER_1(fnegd, void, env) +DEF_HELPER_FLAGS_1(fnegd, TCG_CALL_CONST | TCG_CALL_PURE, f64, f64) DEF_HELPER_1(fnegq, void, env) -DEF_HELPER_1(fxtos, i32, env) -F_HELPER_DQ_0_1(xto); +DEF_HELPER_2(fxtos, f32, env, s64) +DEF_HELPER_2(fxtod, f64, env, s64) +DEF_HELPER_2(fxtoq, void, env, s64) #endif -DEF_HELPER_1(fdtos, f32, env) -DEF_HELPER_2(fstod, void, env, f32) +DEF_HELPER_2(fdtos, f32, env, f64) +DEF_HELPER_2(fstod, f64, env, f32) DEF_HELPER_1(fqtos, f32, env) DEF_HELPER_2(fstoq, void, env, f32) -F_HELPER_0_1(qtod); -F_HELPER_0_1(dtoq); +DEF_HELPER_1(fqtod, f64, env) +DEF_HELPER_2(fdtoq, void, env, f64) DEF_HELPER_2(fstoi, s32, env, f32) -DEF_HELPER_1(fdtoi, s32, env) +DEF_HELPER_2(fdtoi, s32, env, f64) DEF_HELPER_1(fqtoi, s32, env) #ifdef TARGET_SPARC64 -DEF_HELPER_2(fstox, void, env, i32) -F_HELPER_0_1(dtox); -F_HELPER_0_1(qtox); -F_HELPER_0_1(aligndata); +DEF_HELPER_2(fstox, s64, env, f32) +DEF_HELPER_2(fdtox, s64, env, f64) +DEF_HELPER_1(fqtox, s64, env) -F_HELPER_0_1(pmerge); -F_HELPER_0_1(mul8x16); -F_HELPER_0_1(mul8x16al); -F_HELPER_0_1(mul8x16au); -F_HELPER_0_1(mul8sux16); -F_HELPER_0_1(mul8ulx16); -F_HELPER_0_1(muld8sux16); -F_HELPER_0_1(muld8ulx16); -F_HELPER_0_1(expand); -#define VIS_HELPER(name) \ - F_HELPER_0_1(name##16); \ - DEF_HELPER_3(f ## name ## 16s, i32, env, i32, i32) \ - F_HELPER_0_1(name##32); \ - DEF_HELPER_3(f ## name ## 32s, i32, env, i32, i32) +DEF_HELPER_FLAGS_2(fpmerge, TCG_CALL_CONST | TCG_CALL_PURE, i64, i64, i64) +DEF_HELPER_FLAGS_2(fmul8x16, TCG_CALL_CONST | TCG_CALL_PURE, i64, i64, i64) +DEF_HELPER_FLAGS_2(fmul8x16al, TCG_CALL_CONST | TCG_CALL_PURE, i64, i64, i64) +DEF_HELPER_FLAGS_2(fmul8x16au, TCG_CALL_CONST | TCG_CALL_PURE, i64, i64, i64) +DEF_HELPER_FLAGS_2(fmul8sux16, TCG_CALL_CONST | TCG_CALL_PURE, i64, i64, i64) +DEF_HELPER_FLAGS_2(fmul8ulx16, TCG_CALL_CONST | TCG_CALL_PURE, i64, i64, i64) +DEF_HELPER_FLAGS_2(fmuld8sux16, TCG_CALL_CONST | TCG_CALL_PURE, i64, i64, i64) +DEF_HELPER_FLAGS_2(fmuld8ulx16, TCG_CALL_CONST | TCG_CALL_PURE, i64, i64, i64) +DEF_HELPER_FLAGS_2(fexpand, TCG_CALL_CONST | TCG_CALL_PURE, i64, i64, i64) +DEF_HELPER_FLAGS_3(pdist, TCG_CALL_CONST | TCG_CALL_PURE, i64, i64, i64, i64) +DEF_HELPER_FLAGS_2(fpack16, TCG_CALL_CONST | TCG_CALL_PURE, i32, i64, i64) +DEF_HELPER_FLAGS_3(fpack32, TCG_CALL_CONST | TCG_CALL_PURE, i64, i64, i64, i64) +DEF_HELPER_FLAGS_2(fpackfix, TCG_CALL_CONST | TCG_CALL_PURE, i32, i64, i64) +DEF_HELPER_FLAGS_3(bshuffle, TCG_CALL_CONST | TCG_CALL_PURE, i64, i64, i64, i64) +#define VIS_HELPER(name) \ + DEF_HELPER_FLAGS_2(f ## name ## 16, TCG_CALL_CONST | TCG_CALL_PURE, \ + i64, i64, i64) \ + DEF_HELPER_FLAGS_2(f ## name ## 16s, TCG_CALL_CONST | TCG_CALL_PURE, \ + i32, i32, i32) \ + DEF_HELPER_FLAGS_2(f ## name ## 32, TCG_CALL_CONST | TCG_CALL_PURE, \ + i64, i64, i64) \ + DEF_HELPER_FLAGS_2(f ## name ## 32s, TCG_CALL_CONST | TCG_CALL_PURE, \ + i32, i32, i32) VIS_HELPER(padd); VIS_HELPER(psub); -#define VIS_CMPHELPER(name) \ - DEF_HELPER_1(f##name##16, i64, env); \ - DEF_HELPER_1(f##name##32, i64, env) +#define VIS_CMPHELPER(name) \ + DEF_HELPER_FLAGS_2(f##name##16, TCG_CALL_CONST | TCG_CALL_PURE, \ + i64, i64, i64) \ + DEF_HELPER_FLAGS_2(f##name##32, TCG_CALL_CONST | TCG_CALL_PURE, \ + i64, i64, i64) VIS_CMPHELPER(cmpgt); VIS_CMPHELPER(cmpeq); VIS_CMPHELPER(cmple); VIS_CMPHELPER(cmpne); #endif #undef F_HELPER_0_1 -#undef F_HELPER_DQ_0_1 #undef VIS_HELPER #undef VIS_CMPHELPER DEF_HELPER_1(compute_psr, void, env); diff --git a/target-sparc/int32_helper.c b/target-sparc/int32_helper.c index 219a6c64cd..3a749bf5df 100644 --- a/target-sparc/int32_helper.c +++ b/target-sparc/int32_helper.c @@ -18,6 +18,7 @@ */ #include "cpu.h" +#include "trace.h" //#define DEBUG_PCALL @@ -119,7 +120,44 @@ void do_interrupt(CPUState *env) #if !defined(CONFIG_USER_ONLY) /* IRQ acknowledgment */ if ((intno & ~15) == TT_EXTINT && env->qemu_irq_ack != NULL) { - env->qemu_irq_ack(env->irq_manager, intno); + env->qemu_irq_ack(env, env->irq_manager, intno); } #endif } + +#if !defined(CONFIG_USER_ONLY) +static void leon3_cache_control_int(CPUState *env) +{ + uint32_t state = 0; + + if (env->cache_control & CACHE_CTRL_IF) { + /* Instruction cache state */ + state = env->cache_control & CACHE_STATE_MASK; + if (state == CACHE_ENABLED) { + state = CACHE_FROZEN; + trace_int_helper_icache_freeze(); + } + + env->cache_control &= ~CACHE_STATE_MASK; + env->cache_control |= state; + } + + if (env->cache_control & CACHE_CTRL_DF) { + /* Data cache state */ + state = (env->cache_control >> 2) & CACHE_STATE_MASK; + if (state == CACHE_ENABLED) { + state = CACHE_FROZEN; + trace_int_helper_dcache_freeze(); + } + + env->cache_control &= ~(CACHE_STATE_MASK << 2); + env->cache_control |= (state << 2); + } +} + +void leon3_irq_manager(CPUState *env, void *irq_manager, int intno) +{ + leon3_irq_ack(irq_manager, intno); + leon3_cache_control_int(env); +} +#endif diff --git a/target-sparc/int64_helper.c b/target-sparc/int64_helper.c index 2bb1910ed9..1d471db999 100644 --- a/target-sparc/int64_helper.c +++ b/target-sparc/int64_helper.c @@ -18,6 +18,8 @@ */ #include "cpu.h" +#include "helper.h" +#include "trace.h" //#define DEBUG_PCALL @@ -162,3 +164,38 @@ trap_state *cpu_tsptr(CPUState* env) { return &env->ts[env->tl & MAXTL_MASK]; } + +static bool do_modify_softint(CPUState *env, uint32_t value) +{ + if (env->softint != value) { + env->softint = value; +#if !defined(CONFIG_USER_ONLY) + if (cpu_interrupts_enabled(env)) { + cpu_check_irqs(env); + } +#endif + return true; + } + return false; +} + +void helper_set_softint(CPUState *env, uint64_t value) +{ + if (do_modify_softint(env, env->softint | (uint32_t)value)) { + trace_int_helper_set_softint(env->softint); + } +} + +void helper_clear_softint(CPUState *env, uint64_t value) +{ + if (do_modify_softint(env, env->softint & (uint32_t)~value)) { + trace_int_helper_clear_softint(env->softint); + } +} + +void helper_write_softint(CPUState *env, uint64_t value) +{ + if (do_modify_softint(env, (uint32_t)value)) { + trace_int_helper_write_softint(env->softint); + } +} diff --git a/target-sparc/ldst_helper.c b/target-sparc/ldst_helper.c new file mode 100644 index 0000000000..b59707ecd2 --- /dev/null +++ b/target-sparc/ldst_helper.c @@ -0,0 +1,2371 @@ +/* + * Helpers for loads and stores + * + * Copyright (c) 2003-2005 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#include "cpu.h" +#include "dyngen-exec.h" +#include "helper.h" + +#if !defined(CONFIG_USER_ONLY) +#include "softmmu_exec.h" +#endif + +//#define DEBUG_MMU +//#define DEBUG_MXCC +//#define DEBUG_UNALIGNED +//#define DEBUG_UNASSIGNED +//#define DEBUG_ASI +//#define DEBUG_CACHE_CONTROL + +#ifdef DEBUG_MMU +#define DPRINTF_MMU(fmt, ...) \ + do { printf("MMU: " fmt , ## __VA_ARGS__); } while (0) +#else +#define DPRINTF_MMU(fmt, ...) do {} while (0) +#endif + +#ifdef DEBUG_MXCC +#define DPRINTF_MXCC(fmt, ...) \ + do { printf("MXCC: " fmt , ## __VA_ARGS__); } while (0) +#else +#define DPRINTF_MXCC(fmt, ...) do {} while (0) +#endif + +#ifdef DEBUG_ASI +#define DPRINTF_ASI(fmt, ...) \ + do { printf("ASI: " fmt , ## __VA_ARGS__); } while (0) +#endif + +#ifdef DEBUG_CACHE_CONTROL +#define DPRINTF_CACHE_CONTROL(fmt, ...) \ + do { printf("CACHE_CONTROL: " fmt , ## __VA_ARGS__); } while (0) +#else +#define DPRINTF_CACHE_CONTROL(fmt, ...) do {} while (0) +#endif + +#ifdef TARGET_SPARC64 +#ifndef TARGET_ABI32 +#define AM_CHECK(env1) ((env1)->pstate & PS_AM) +#else +#define AM_CHECK(env1) (1) +#endif +#endif + +#define QT0 (env->qt0) +#define QT1 (env->qt1) + +#if !defined(CONFIG_USER_ONLY) +static void do_unassigned_access(target_phys_addr_t addr, int is_write, + int is_exec, int is_asi, int size); +#else +#ifdef TARGET_SPARC64 +static void do_unassigned_access(target_ulong addr, int is_write, int is_exec, + int is_asi, int size); +#endif +#endif + +#if defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY) +/* Calculates TSB pointer value for fault page size 8k or 64k */ +static uint64_t ultrasparc_tsb_pointer(uint64_t tsb_register, + uint64_t tag_access_register, + int page_size) +{ + uint64_t tsb_base = tsb_register & ~0x1fffULL; + int tsb_split = (tsb_register & 0x1000ULL) ? 1 : 0; + int tsb_size = tsb_register & 0xf; + + /* discard lower 13 bits which hold tag access context */ + uint64_t tag_access_va = tag_access_register & ~0x1fffULL; + + /* now reorder bits */ + uint64_t tsb_base_mask = ~0x1fffULL; + uint64_t va = tag_access_va; + + /* move va bits to correct position */ + if (page_size == 8*1024) { + va >>= 9; + } else if (page_size == 64*1024) { + va >>= 12; + } + + if (tsb_size) { + tsb_base_mask <<= tsb_size; + } + + /* calculate tsb_base mask and adjust va if split is in use */ + if (tsb_split) { + if (page_size == 8*1024) { + va &= ~(1ULL << (13 + tsb_size)); + } else if (page_size == 64*1024) { + va |= (1ULL << (13 + tsb_size)); + } + tsb_base_mask <<= 1; + } + + return ((tsb_base & tsb_base_mask) | (va & ~tsb_base_mask)) & ~0xfULL; +} + +/* Calculates tag target register value by reordering bits + in tag access register */ +static uint64_t ultrasparc_tag_target(uint64_t tag_access_register) +{ + return ((tag_access_register & 0x1fff) << 48) | (tag_access_register >> 22); +} + +static void replace_tlb_entry(SparcTLBEntry *tlb, + uint64_t tlb_tag, uint64_t tlb_tte, + CPUState *env1) +{ + target_ulong mask, size, va, offset; + + /* flush page range if translation is valid */ + if (TTE_IS_VALID(tlb->tte)) { + + mask = 0xffffffffffffe000ULL; + mask <<= 3 * ((tlb->tte >> 61) & 3); + size = ~mask + 1; + + va = tlb->tag & mask; + + for (offset = 0; offset < size; offset += TARGET_PAGE_SIZE) { + tlb_flush_page(env1, va + offset); + } + } + + tlb->tag = tlb_tag; + tlb->tte = tlb_tte; +} + +static void demap_tlb(SparcTLBEntry *tlb, target_ulong demap_addr, + const char *strmmu, CPUState *env1) +{ + unsigned int i; + target_ulong mask; + uint64_t context; + + int is_demap_context = (demap_addr >> 6) & 1; + + /* demap context */ + switch ((demap_addr >> 4) & 3) { + case 0: /* primary */ + context = env1->dmmu.mmu_primary_context; + break; + case 1: /* secondary */ + context = env1->dmmu.mmu_secondary_context; + break; + case 2: /* nucleus */ + context = 0; + break; + case 3: /* reserved */ + default: + return; + } + + for (i = 0; i < 64; i++) { + if (TTE_IS_VALID(tlb[i].tte)) { + + if (is_demap_context) { + /* will remove non-global entries matching context value */ + if (TTE_IS_GLOBAL(tlb[i].tte) || + !tlb_compare_context(&tlb[i], context)) { + continue; + } + } else { + /* demap page + will remove any entry matching VA */ + mask = 0xffffffffffffe000ULL; + mask <<= 3 * ((tlb[i].tte >> 61) & 3); + + if (!compare_masked(demap_addr, tlb[i].tag, mask)) { + continue; + } + + /* entry should be global or matching context value */ + if (!TTE_IS_GLOBAL(tlb[i].tte) && + !tlb_compare_context(&tlb[i], context)) { + continue; + } + } + + replace_tlb_entry(&tlb[i], 0, 0, env1); +#ifdef DEBUG_MMU + DPRINTF_MMU("%s demap invalidated entry [%02u]\n", strmmu, i); + dump_mmu(stdout, fprintf, env1); +#endif + } + } +} + +static void replace_tlb_1bit_lru(SparcTLBEntry *tlb, + uint64_t tlb_tag, uint64_t tlb_tte, + const char *strmmu, CPUState *env1) +{ + unsigned int i, replace_used; + + /* Try replacing invalid entry */ + for (i = 0; i < 64; i++) { + if (!TTE_IS_VALID(tlb[i].tte)) { + replace_tlb_entry(&tlb[i], tlb_tag, tlb_tte, env1); +#ifdef DEBUG_MMU + DPRINTF_MMU("%s lru replaced invalid entry [%i]\n", strmmu, i); + dump_mmu(stdout, fprintf, env1); +#endif + return; + } + } + + /* All entries are valid, try replacing unlocked entry */ + + for (replace_used = 0; replace_used < 2; ++replace_used) { + + /* Used entries are not replaced on first pass */ + + for (i = 0; i < 64; i++) { + if (!TTE_IS_LOCKED(tlb[i].tte) && !TTE_IS_USED(tlb[i].tte)) { + + replace_tlb_entry(&tlb[i], tlb_tag, tlb_tte, env1); +#ifdef DEBUG_MMU + DPRINTF_MMU("%s lru replaced unlocked %s entry [%i]\n", + strmmu, (replace_used ? "used" : "unused"), i); + dump_mmu(stdout, fprintf, env1); +#endif + return; + } + } + + /* Now reset used bit and search for unused entries again */ + + for (i = 0; i < 64; i++) { + TTE_SET_UNUSED(tlb[i].tte); + } + } + +#ifdef DEBUG_MMU + DPRINTF_MMU("%s lru replacement failed: no entries available\n", strmmu); +#endif + /* error state? */ +} + +#endif + +static inline target_ulong address_mask(CPUState *env1, target_ulong addr) +{ +#ifdef TARGET_SPARC64 + if (AM_CHECK(env1)) { + addr &= 0xffffffffULL; + } +#endif + return addr; +} + +/* returns true if access using this ASI is to have address translated by MMU + otherwise access is to raw physical address */ +static inline int is_translating_asi(int asi) +{ +#ifdef TARGET_SPARC64 + /* Ultrasparc IIi translating asi + - note this list is defined by cpu implementation + */ + switch (asi) { + case 0x04 ... 0x11: + case 0x16 ... 0x19: + case 0x1E ... 0x1F: + case 0x24 ... 0x2C: + case 0x70 ... 0x73: + case 0x78 ... 0x79: + case 0x80 ... 0xFF: + return 1; + + default: + return 0; + } +#else + /* TODO: check sparc32 bits */ + return 0; +#endif +} + +static inline target_ulong asi_address_mask(CPUState *env1, + int asi, target_ulong addr) +{ + if (is_translating_asi(asi)) { + return address_mask(env, addr); + } else { + return addr; + } +} + +void helper_check_align(target_ulong addr, uint32_t align) +{ + if (addr & align) { +#ifdef DEBUG_UNALIGNED + printf("Unaligned access to 0x" TARGET_FMT_lx " from 0x" TARGET_FMT_lx + "\n", addr, env->pc); +#endif + helper_raise_exception(env, TT_UNALIGNED); + } +} + +#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY) && \ + defined(DEBUG_MXCC) +static void dump_mxcc(CPUState *env) +{ + printf("mxccdata: %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64 + "\n", + env->mxccdata[0], env->mxccdata[1], + env->mxccdata[2], env->mxccdata[3]); + printf("mxccregs: %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64 + "\n" + " %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64 + "\n", + env->mxccregs[0], env->mxccregs[1], + env->mxccregs[2], env->mxccregs[3], + env->mxccregs[4], env->mxccregs[5], + env->mxccregs[6], env->mxccregs[7]); +} +#endif + +#if (defined(TARGET_SPARC64) || !defined(CONFIG_USER_ONLY)) \ + && defined(DEBUG_ASI) +static void dump_asi(const char *txt, target_ulong addr, int asi, int size, + uint64_t r1) +{ + switch (size) { + case 1: + DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %02" PRIx64 "\n", txt, + addr, asi, r1 & 0xff); + break; + case 2: + DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %04" PRIx64 "\n", txt, + addr, asi, r1 & 0xffff); + break; + case 4: + DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %08" PRIx64 "\n", txt, + addr, asi, r1 & 0xffffffff); + break; + case 8: + DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %016" PRIx64 "\n", txt, + addr, asi, r1); + break; + } +} +#endif + +#ifndef TARGET_SPARC64 +#ifndef CONFIG_USER_ONLY + + +/* Leon3 cache control */ + +static void leon3_cache_control_st(target_ulong addr, uint64_t val, int size) +{ + DPRINTF_CACHE_CONTROL("st addr:%08x, val:%" PRIx64 ", size:%d\n", + addr, val, size); + + if (size != 4) { + DPRINTF_CACHE_CONTROL("32bits only\n"); + return; + } + + switch (addr) { + case 0x00: /* Cache control */ + + /* These values must always be read as zeros */ + val &= ~CACHE_CTRL_FD; + val &= ~CACHE_CTRL_FI; + val &= ~CACHE_CTRL_IB; + val &= ~CACHE_CTRL_IP; + val &= ~CACHE_CTRL_DP; + + env->cache_control = val; + break; + case 0x04: /* Instruction cache configuration */ + case 0x08: /* Data cache configuration */ + /* Read Only */ + break; + default: + DPRINTF_CACHE_CONTROL("write unknown register %08x\n", addr); + break; + }; +} + +static uint64_t leon3_cache_control_ld(target_ulong addr, int size) +{ + uint64_t ret = 0; + + if (size != 4) { + DPRINTF_CACHE_CONTROL("32bits only\n"); + return 0; + } + + switch (addr) { + case 0x00: /* Cache control */ + ret = env->cache_control; + break; + + /* Configuration registers are read and only always keep those + predefined values */ + + case 0x04: /* Instruction cache configuration */ + ret = 0x10220000; + break; + case 0x08: /* Data cache configuration */ + ret = 0x18220000; + break; + default: + DPRINTF_CACHE_CONTROL("read unknown register %08x\n", addr); + break; + }; + DPRINTF_CACHE_CONTROL("ld addr:%08x, ret:0x%" PRIx64 ", size:%d\n", + addr, ret, size); + return ret; +} + +uint64_t helper_ld_asi(target_ulong addr, int asi, int size, int sign) +{ + uint64_t ret = 0; +#if defined(DEBUG_MXCC) || defined(DEBUG_ASI) + uint32_t last_addr = addr; +#endif + + helper_check_align(addr, size - 1); + switch (asi) { + case 2: /* SuperSparc MXCC registers and Leon3 cache control */ + switch (addr) { + case 0x00: /* Leon3 Cache Control */ + case 0x08: /* Leon3 Instruction Cache config */ + case 0x0C: /* Leon3 Date Cache config */ + if (env->def->features & CPU_FEATURE_CACHE_CTRL) { + ret = leon3_cache_control_ld(addr, size); + } + break; + case 0x01c00a00: /* MXCC control register */ + if (size == 8) { + ret = env->mxccregs[3]; + } else { + DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr, + size); + } + break; + case 0x01c00a04: /* MXCC control register */ + if (size == 4) { + ret = env->mxccregs[3]; + } else { + DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr, + size); + } + break; + case 0x01c00c00: /* Module reset register */ + if (size == 8) { + ret = env->mxccregs[5]; + /* should we do something here? */ + } else { + DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr, + size); + } + break; + case 0x01c00f00: /* MBus port address register */ + if (size == 8) { + ret = env->mxccregs[7]; + } else { + DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr, + size); + } + break; + default: + DPRINTF_MXCC("%08x: unimplemented address, size: %d\n", addr, + size); + break; + } + DPRINTF_MXCC("asi = %d, size = %d, sign = %d, " + "addr = %08x -> ret = %" PRIx64 "," + "addr = %08x\n", asi, size, sign, last_addr, ret, addr); +#ifdef DEBUG_MXCC + dump_mxcc(env); +#endif + break; + case 3: /* MMU probe */ + { + int mmulev; + + mmulev = (addr >> 8) & 15; + if (mmulev > 4) { + ret = 0; + } else { + ret = mmu_probe(env, addr, mmulev); + } + DPRINTF_MMU("mmu_probe: 0x%08x (lev %d) -> 0x%08" PRIx64 "\n", + addr, mmulev, ret); + } + break; + case 4: /* read MMU regs */ + { + int reg = (addr >> 8) & 0x1f; + + ret = env->mmuregs[reg]; + if (reg == 3) { /* Fault status cleared on read */ + env->mmuregs[3] = 0; + } else if (reg == 0x13) { /* Fault status read */ + ret = env->mmuregs[3]; + } else if (reg == 0x14) { /* Fault address read */ + ret = env->mmuregs[4]; + } + DPRINTF_MMU("mmu_read: reg[%d] = 0x%08" PRIx64 "\n", reg, ret); + } + break; + case 5: /* Turbosparc ITLB Diagnostic */ + case 6: /* Turbosparc DTLB Diagnostic */ + case 7: /* Turbosparc IOTLB Diagnostic */ + break; + case 9: /* Supervisor code access */ + switch (size) { + case 1: + ret = ldub_code(addr); + break; + case 2: + ret = lduw_code(addr); + break; + default: + case 4: + ret = ldl_code(addr); + break; + case 8: + ret = ldq_code(addr); + break; + } + break; + case 0xa: /* User data access */ + switch (size) { + case 1: + ret = ldub_user(addr); + break; + case 2: + ret = lduw_user(addr); + break; + default: + case 4: + ret = ldl_user(addr); + break; + case 8: + ret = ldq_user(addr); + break; + } + break; + case 0xb: /* Supervisor data access */ + switch (size) { + case 1: + ret = ldub_kernel(addr); + break; + case 2: + ret = lduw_kernel(addr); + break; + default: + case 4: + ret = ldl_kernel(addr); + break; + case 8: + ret = ldq_kernel(addr); + break; + } + break; + case 0xc: /* I-cache tag */ + case 0xd: /* I-cache data */ + case 0xe: /* D-cache tag */ + case 0xf: /* D-cache data */ + break; + case 0x20: /* MMU passthrough */ + switch (size) { + case 1: + ret = ldub_phys(addr); + break; + case 2: + ret = lduw_phys(addr); + break; + default: + case 4: + ret = ldl_phys(addr); + break; + case 8: + ret = ldq_phys(addr); + break; + } + break; + case 0x21 ... 0x2f: /* MMU passthrough, 0x100000000 to 0xfffffffff */ + switch (size) { + case 1: + ret = ldub_phys((target_phys_addr_t)addr + | ((target_phys_addr_t)(asi & 0xf) << 32)); + break; + case 2: + ret = lduw_phys((target_phys_addr_t)addr + | ((target_phys_addr_t)(asi & 0xf) << 32)); + break; + default: + case 4: + ret = ldl_phys((target_phys_addr_t)addr + | ((target_phys_addr_t)(asi & 0xf) << 32)); + break; + case 8: + ret = ldq_phys((target_phys_addr_t)addr + | ((target_phys_addr_t)(asi & 0xf) << 32)); + break; + } + break; + case 0x30: /* Turbosparc secondary cache diagnostic */ + case 0x31: /* Turbosparc RAM snoop */ + case 0x32: /* Turbosparc page table descriptor diagnostic */ + case 0x39: /* data cache diagnostic register */ + ret = 0; + break; + case 0x38: /* SuperSPARC MMU Breakpoint Control Registers */ + { + int reg = (addr >> 8) & 3; + + switch (reg) { + case 0: /* Breakpoint Value (Addr) */ + ret = env->mmubpregs[reg]; + break; + case 1: /* Breakpoint Mask */ + ret = env->mmubpregs[reg]; + break; + case 2: /* Breakpoint Control */ + ret = env->mmubpregs[reg]; + break; + case 3: /* Breakpoint Status */ + ret = env->mmubpregs[reg]; + env->mmubpregs[reg] = 0ULL; + break; + } + DPRINTF_MMU("read breakpoint reg[%d] 0x%016" PRIx64 "\n", reg, + ret); + } + break; + case 0x49: /* SuperSPARC MMU Counter Breakpoint Value */ + ret = env->mmubpctrv; + break; + case 0x4a: /* SuperSPARC MMU Counter Breakpoint Control */ + ret = env->mmubpctrc; + break; + case 0x4b: /* SuperSPARC MMU Counter Breakpoint Status */ + ret = env->mmubpctrs; + break; + case 0x4c: /* SuperSPARC MMU Breakpoint Action */ + ret = env->mmubpaction; + break; + case 8: /* User code access, XXX */ + default: + do_unassigned_access(addr, 0, 0, asi, size); + ret = 0; + break; + } + if (sign) { + switch (size) { + case 1: + ret = (int8_t) ret; + break; + case 2: + ret = (int16_t) ret; + break; + case 4: + ret = (int32_t) ret; + break; + default: + break; + } + } +#ifdef DEBUG_ASI + dump_asi("read ", last_addr, asi, size, ret); +#endif + return ret; +} + +void helper_st_asi(target_ulong addr, uint64_t val, int asi, int size) +{ + helper_check_align(addr, size - 1); + switch (asi) { + case 2: /* SuperSparc MXCC registers and Leon3 cache control */ + switch (addr) { + case 0x00: /* Leon3 Cache Control */ + case 0x08: /* Leon3 Instruction Cache config */ + case 0x0C: /* Leon3 Date Cache config */ + if (env->def->features & CPU_FEATURE_CACHE_CTRL) { + leon3_cache_control_st(addr, val, size); + } + break; + + case 0x01c00000: /* MXCC stream data register 0 */ + if (size == 8) { + env->mxccdata[0] = val; + } else { + DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr, + size); + } + break; + case 0x01c00008: /* MXCC stream data register 1 */ + if (size == 8) { + env->mxccdata[1] = val; + } else { + DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr, + size); + } + break; + case 0x01c00010: /* MXCC stream data register 2 */ + if (size == 8) { + env->mxccdata[2] = val; + } else { + DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr, + size); + } + break; + case 0x01c00018: /* MXCC stream data register 3 */ + if (size == 8) { + env->mxccdata[3] = val; + } else { + DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr, + size); + } + break; + case 0x01c00100: /* MXCC stream source */ + if (size == 8) { + env->mxccregs[0] = val; + } else { + DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr, + size); + } + env->mxccdata[0] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) + + 0); + env->mxccdata[1] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) + + 8); + env->mxccdata[2] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) + + 16); + env->mxccdata[3] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) + + 24); + break; + case 0x01c00200: /* MXCC stream destination */ + if (size == 8) { + env->mxccregs[1] = val; + } else { + DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr, + size); + } + stq_phys((env->mxccregs[1] & 0xffffffffULL) + 0, + env->mxccdata[0]); + stq_phys((env->mxccregs[1] & 0xffffffffULL) + 8, + env->mxccdata[1]); + stq_phys((env->mxccregs[1] & 0xffffffffULL) + 16, + env->mxccdata[2]); + stq_phys((env->mxccregs[1] & 0xffffffffULL) + 24, + env->mxccdata[3]); + break; + case 0x01c00a00: /* MXCC control register */ + if (size == 8) { + env->mxccregs[3] = val; + } else { + DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr, + size); + } + break; + case 0x01c00a04: /* MXCC control register */ + if (size == 4) { + env->mxccregs[3] = (env->mxccregs[3] & 0xffffffff00000000ULL) + | val; + } else { + DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr, + size); + } + break; + case 0x01c00e00: /* MXCC error register */ + /* writing a 1 bit clears the error */ + if (size == 8) { + env->mxccregs[6] &= ~val; + } else { + DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr, + size); + } + break; + case 0x01c00f00: /* MBus port address register */ + if (size == 8) { + env->mxccregs[7] = val; + } else { + DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr, + size); + } + break; + default: + DPRINTF_MXCC("%08x: unimplemented address, size: %d\n", addr, + size); + break; + } + DPRINTF_MXCC("asi = %d, size = %d, addr = %08x, val = %" PRIx64 "\n", + asi, size, addr, val); +#ifdef DEBUG_MXCC + dump_mxcc(env); +#endif + break; + case 3: /* MMU flush */ + { + int mmulev; + + mmulev = (addr >> 8) & 15; + DPRINTF_MMU("mmu flush level %d\n", mmulev); + switch (mmulev) { + case 0: /* flush page */ + tlb_flush_page(env, addr & 0xfffff000); + break; + case 1: /* flush segment (256k) */ + case 2: /* flush region (16M) */ + case 3: /* flush context (4G) */ + case 4: /* flush entire */ + tlb_flush(env, 1); + break; + default: + break; + } +#ifdef DEBUG_MMU + dump_mmu(stdout, fprintf, env); +#endif + } + break; + case 4: /* write MMU regs */ + { + int reg = (addr >> 8) & 0x1f; + uint32_t oldreg; + + oldreg = env->mmuregs[reg]; + switch (reg) { + case 0: /* Control Register */ + env->mmuregs[reg] = (env->mmuregs[reg] & 0xff000000) | + (val & 0x00ffffff); + /* Mappings generated during no-fault mode or MMU + disabled mode are invalid in normal mode */ + if ((oldreg & (MMU_E | MMU_NF | env->def->mmu_bm)) != + (env->mmuregs[reg] & (MMU_E | MMU_NF | env->def->mmu_bm))) { + tlb_flush(env, 1); + } + break; + case 1: /* Context Table Pointer Register */ + env->mmuregs[reg] = val & env->def->mmu_ctpr_mask; + break; + case 2: /* Context Register */ + env->mmuregs[reg] = val & env->def->mmu_cxr_mask; + if (oldreg != env->mmuregs[reg]) { + /* we flush when the MMU context changes because + QEMU has no MMU context support */ + tlb_flush(env, 1); + } + break; + case 3: /* Synchronous Fault Status Register with Clear */ + case 4: /* Synchronous Fault Address Register */ + break; + case 0x10: /* TLB Replacement Control Register */ + env->mmuregs[reg] = val & env->def->mmu_trcr_mask; + break; + case 0x13: /* Synchronous Fault Status Register with Read + and Clear */ + env->mmuregs[3] = val & env->def->mmu_sfsr_mask; + break; + case 0x14: /* Synchronous Fault Address Register */ + env->mmuregs[4] = val; + break; + default: + env->mmuregs[reg] = val; + break; + } + if (oldreg != env->mmuregs[reg]) { + DPRINTF_MMU("mmu change reg[%d]: 0x%08x -> 0x%08x\n", + reg, oldreg, env->mmuregs[reg]); + } +#ifdef DEBUG_MMU + dump_mmu(stdout, fprintf, env); +#endif + } + break; + case 5: /* Turbosparc ITLB Diagnostic */ + case 6: /* Turbosparc DTLB Diagnostic */ + case 7: /* Turbosparc IOTLB Diagnostic */ + break; + case 0xa: /* User data access */ + switch (size) { + case 1: + stb_user(addr, val); + break; + case 2: + stw_user(addr, val); + break; + default: + case 4: + stl_user(addr, val); + break; + case 8: + stq_user(addr, val); + break; + } + break; + case 0xb: /* Supervisor data access */ + switch (size) { + case 1: + stb_kernel(addr, val); + break; + case 2: + stw_kernel(addr, val); + break; + default: + case 4: + stl_kernel(addr, val); + break; + case 8: + stq_kernel(addr, val); + break; + } + break; + case 0xc: /* I-cache tag */ + case 0xd: /* I-cache data */ + case 0xe: /* D-cache tag */ + case 0xf: /* D-cache data */ + case 0x10: /* I/D-cache flush page */ + case 0x11: /* I/D-cache flush segment */ + case 0x12: /* I/D-cache flush region */ + case 0x13: /* I/D-cache flush context */ + case 0x14: /* I/D-cache flush user */ + break; + case 0x17: /* Block copy, sta access */ + { + /* val = src + addr = dst + copy 32 bytes */ + unsigned int i; + uint32_t src = val & ~3, dst = addr & ~3, temp; + + for (i = 0; i < 32; i += 4, src += 4, dst += 4) { + temp = ldl_kernel(src); + stl_kernel(dst, temp); + } + } + break; + case 0x1f: /* Block fill, stda access */ + { + /* addr = dst + fill 32 bytes with val */ + unsigned int i; + uint32_t dst = addr & 7; + + for (i = 0; i < 32; i += 8, dst += 8) { + stq_kernel(dst, val); + } + } + break; + case 0x20: /* MMU passthrough */ + { + switch (size) { + case 1: + stb_phys(addr, val); + break; + case 2: + stw_phys(addr, val); + break; + case 4: + default: + stl_phys(addr, val); + break; + case 8: + stq_phys(addr, val); + break; + } + } + break; + case 0x21 ... 0x2f: /* MMU passthrough, 0x100000000 to 0xfffffffff */ + { + switch (size) { + case 1: + stb_phys((target_phys_addr_t)addr + | ((target_phys_addr_t)(asi & 0xf) << 32), val); + break; + case 2: + stw_phys((target_phys_addr_t)addr + | ((target_phys_addr_t)(asi & 0xf) << 32), val); + break; + case 4: + default: + stl_phys((target_phys_addr_t)addr + | ((target_phys_addr_t)(asi & 0xf) << 32), val); + break; + case 8: + stq_phys((target_phys_addr_t)addr + | ((target_phys_addr_t)(asi & 0xf) << 32), val); + break; + } + } + break; + case 0x30: /* store buffer tags or Turbosparc secondary cache diagnostic */ + case 0x31: /* store buffer data, Ross RT620 I-cache flush or + Turbosparc snoop RAM */ + case 0x32: /* store buffer control or Turbosparc page table + descriptor diagnostic */ + case 0x36: /* I-cache flash clear */ + case 0x37: /* D-cache flash clear */ + break; + case 0x38: /* SuperSPARC MMU Breakpoint Control Registers*/ + { + int reg = (addr >> 8) & 3; + + switch (reg) { + case 0: /* Breakpoint Value (Addr) */ + env->mmubpregs[reg] = (val & 0xfffffffffULL); + break; + case 1: /* Breakpoint Mask */ + env->mmubpregs[reg] = (val & 0xfffffffffULL); + break; + case 2: /* Breakpoint Control */ + env->mmubpregs[reg] = (val & 0x7fULL); + break; + case 3: /* Breakpoint Status */ + env->mmubpregs[reg] = (val & 0xfULL); + break; + } + DPRINTF_MMU("write breakpoint reg[%d] 0x%016x\n", reg, + env->mmuregs[reg]); + } + break; + case 0x49: /* SuperSPARC MMU Counter Breakpoint Value */ + env->mmubpctrv = val & 0xffffffff; + break; + case 0x4a: /* SuperSPARC MMU Counter Breakpoint Control */ + env->mmubpctrc = val & 0x3; + break; + case 0x4b: /* SuperSPARC MMU Counter Breakpoint Status */ + env->mmubpctrs = val & 0x3; + break; + case 0x4c: /* SuperSPARC MMU Breakpoint Action */ + env->mmubpaction = val & 0x1fff; + break; + case 8: /* User code access, XXX */ + case 9: /* Supervisor code access, XXX */ + default: + do_unassigned_access(addr, 1, 0, asi, size); + break; + } +#ifdef DEBUG_ASI + dump_asi("write", addr, asi, size, val); +#endif +} + +#endif /* CONFIG_USER_ONLY */ +#else /* TARGET_SPARC64 */ + +#ifdef CONFIG_USER_ONLY +uint64_t helper_ld_asi(target_ulong addr, int asi, int size, int sign) +{ + uint64_t ret = 0; +#if defined(DEBUG_ASI) + target_ulong last_addr = addr; +#endif + + if (asi < 0x80) { + helper_raise_exception(env, TT_PRIV_ACT); + } + + helper_check_align(addr, size - 1); + addr = asi_address_mask(env, asi, addr); + + switch (asi) { + case 0x82: /* Primary no-fault */ + case 0x8a: /* Primary no-fault LE */ + if (page_check_range(addr, size, PAGE_READ) == -1) { +#ifdef DEBUG_ASI + dump_asi("read ", last_addr, asi, size, ret); +#endif + return 0; + } + /* Fall through */ + case 0x80: /* Primary */ + case 0x88: /* Primary LE */ + { + switch (size) { + case 1: + ret = ldub_raw(addr); + break; + case 2: + ret = lduw_raw(addr); + break; + case 4: + ret = ldl_raw(addr); + break; + default: + case 8: + ret = ldq_raw(addr); + break; + } + } + break; + case 0x83: /* Secondary no-fault */ + case 0x8b: /* Secondary no-fault LE */ + if (page_check_range(addr, size, PAGE_READ) == -1) { +#ifdef DEBUG_ASI + dump_asi("read ", last_addr, asi, size, ret); +#endif + return 0; + } + /* Fall through */ + case 0x81: /* Secondary */ + case 0x89: /* Secondary LE */ + /* XXX */ + break; + default: + break; + } + + /* Convert from little endian */ + switch (asi) { + case 0x88: /* Primary LE */ + case 0x89: /* Secondary LE */ + case 0x8a: /* Primary no-fault LE */ + case 0x8b: /* Secondary no-fault LE */ + switch (size) { + case 2: + ret = bswap16(ret); + break; + case 4: + ret = bswap32(ret); + break; + case 8: + ret = bswap64(ret); + break; + default: + break; + } + default: + break; + } + + /* Convert to signed number */ + if (sign) { + switch (size) { + case 1: + ret = (int8_t) ret; + break; + case 2: + ret = (int16_t) ret; + break; + case 4: + ret = (int32_t) ret; + break; + default: + break; + } + } +#ifdef DEBUG_ASI + dump_asi("read ", last_addr, asi, size, ret); +#endif + return ret; +} + +void helper_st_asi(target_ulong addr, target_ulong val, int asi, int size) +{ +#ifdef DEBUG_ASI + dump_asi("write", addr, asi, size, val); +#endif + if (asi < 0x80) { + helper_raise_exception(env, TT_PRIV_ACT); + } + + helper_check_align(addr, size - 1); + addr = asi_address_mask(env, asi, addr); + + /* Convert to little endian */ + switch (asi) { + case 0x88: /* Primary LE */ + case 0x89: /* Secondary LE */ + switch (size) { + case 2: + val = bswap16(val); + break; + case 4: + val = bswap32(val); + break; + case 8: + val = bswap64(val); + break; + default: + break; + } + default: + break; + } + + switch (asi) { + case 0x80: /* Primary */ + case 0x88: /* Primary LE */ + { + switch (size) { + case 1: + stb_raw(addr, val); + break; + case 2: + stw_raw(addr, val); + break; + case 4: + stl_raw(addr, val); + break; + case 8: + default: + stq_raw(addr, val); + break; + } + } + break; + case 0x81: /* Secondary */ + case 0x89: /* Secondary LE */ + /* XXX */ + return; + + case 0x82: /* Primary no-fault, RO */ + case 0x83: /* Secondary no-fault, RO */ + case 0x8a: /* Primary no-fault LE, RO */ + case 0x8b: /* Secondary no-fault LE, RO */ + default: + do_unassigned_access(addr, 1, 0, 1, size); + return; + } +} + +#else /* CONFIG_USER_ONLY */ + +uint64_t helper_ld_asi(target_ulong addr, int asi, int size, int sign) +{ + uint64_t ret = 0; +#if defined(DEBUG_ASI) + target_ulong last_addr = addr; +#endif + + asi &= 0xff; + + if ((asi < 0x80 && (env->pstate & PS_PRIV) == 0) + || (cpu_has_hypervisor(env) + && asi >= 0x30 && asi < 0x80 + && !(env->hpstate & HS_PRIV))) { + helper_raise_exception(env, TT_PRIV_ACT); + } + + helper_check_align(addr, size - 1); + addr = asi_address_mask(env, asi, addr); + + /* process nonfaulting loads first */ + if ((asi & 0xf6) == 0x82) { + int mmu_idx; + + /* secondary space access has lowest asi bit equal to 1 */ + if (env->pstate & PS_PRIV) { + mmu_idx = (asi & 1) ? MMU_KERNEL_SECONDARY_IDX : MMU_KERNEL_IDX; + } else { + mmu_idx = (asi & 1) ? MMU_USER_SECONDARY_IDX : MMU_USER_IDX; + } + + if (cpu_get_phys_page_nofault(env, addr, mmu_idx) == -1ULL) { +#ifdef DEBUG_ASI + dump_asi("read ", last_addr, asi, size, ret); +#endif + /* env->exception_index is set in get_physical_address_data(). */ + helper_raise_exception(env, env->exception_index); + } + + /* convert nonfaulting load ASIs to normal load ASIs */ + asi &= ~0x02; + } + + switch (asi) { + case 0x10: /* As if user primary */ + case 0x11: /* As if user secondary */ + case 0x18: /* As if user primary LE */ + case 0x19: /* As if user secondary LE */ + case 0x80: /* Primary */ + case 0x81: /* Secondary */ + case 0x88: /* Primary LE */ + case 0x89: /* Secondary LE */ + case 0xe2: /* UA2007 Primary block init */ + case 0xe3: /* UA2007 Secondary block init */ + if ((asi & 0x80) && (env->pstate & PS_PRIV)) { + if (cpu_hypervisor_mode(env)) { + switch (size) { + case 1: + ret = ldub_hypv(addr); + break; + case 2: + ret = lduw_hypv(addr); + break; + case 4: + ret = ldl_hypv(addr); + break; + default: + case 8: + ret = ldq_hypv(addr); + break; + } + } else { + /* secondary space access has lowest asi bit equal to 1 */ + if (asi & 1) { + switch (size) { + case 1: + ret = ldub_kernel_secondary(addr); + break; + case 2: + ret = lduw_kernel_secondary(addr); + break; + case 4: + ret = ldl_kernel_secondary(addr); + break; + default: + case 8: + ret = ldq_kernel_secondary(addr); + break; + } + } else { + switch (size) { + case 1: + ret = ldub_kernel(addr); + break; + case 2: + ret = lduw_kernel(addr); + break; + case 4: + ret = ldl_kernel(addr); + break; + default: + case 8: + ret = ldq_kernel(addr); + break; + } + } + } + } else { + /* secondary space access has lowest asi bit equal to 1 */ + if (asi & 1) { + switch (size) { + case 1: + ret = ldub_user_secondary(addr); + break; + case 2: + ret = lduw_user_secondary(addr); + break; + case 4: + ret = ldl_user_secondary(addr); + break; + default: + case 8: + ret = ldq_user_secondary(addr); + break; + } + } else { + switch (size) { + case 1: + ret = ldub_user(addr); + break; + case 2: + ret = lduw_user(addr); + break; + case 4: + ret = ldl_user(addr); + break; + default: + case 8: + ret = ldq_user(addr); + break; + } + } + } + break; + case 0x14: /* Bypass */ + case 0x15: /* Bypass, non-cacheable */ + case 0x1c: /* Bypass LE */ + case 0x1d: /* Bypass, non-cacheable LE */ + { + switch (size) { + case 1: + ret = ldub_phys(addr); + break; + case 2: + ret = lduw_phys(addr); + break; + case 4: + ret = ldl_phys(addr); + break; + default: + case 8: + ret = ldq_phys(addr); + break; + } + break; + } + case 0x24: /* Nucleus quad LDD 128 bit atomic */ + case 0x2c: /* Nucleus quad LDD 128 bit atomic LE + Only ldda allowed */ + helper_raise_exception(env, TT_ILL_INSN); + return 0; + case 0x04: /* Nucleus */ + case 0x0c: /* Nucleus Little Endian (LE) */ + { + switch (size) { + case 1: + ret = ldub_nucleus(addr); + break; + case 2: + ret = lduw_nucleus(addr); + break; + case 4: + ret = ldl_nucleus(addr); + break; + default: + case 8: + ret = ldq_nucleus(addr); + break; + } + break; + } + case 0x4a: /* UPA config */ + /* XXX */ + break; + case 0x45: /* LSU */ + ret = env->lsu; + break; + case 0x50: /* I-MMU regs */ + { + int reg = (addr >> 3) & 0xf; + + if (reg == 0) { + /* I-TSB Tag Target register */ + ret = ultrasparc_tag_target(env->immu.tag_access); + } else { + ret = env->immuregs[reg]; + } + + break; + } + case 0x51: /* I-MMU 8k TSB pointer */ + { + /* env->immuregs[5] holds I-MMU TSB register value + env->immuregs[6] holds I-MMU Tag Access register value */ + ret = ultrasparc_tsb_pointer(env->immu.tsb, env->immu.tag_access, + 8*1024); + break; + } + case 0x52: /* I-MMU 64k TSB pointer */ + { + /* env->immuregs[5] holds I-MMU TSB register value + env->immuregs[6] holds I-MMU Tag Access register value */ + ret = ultrasparc_tsb_pointer(env->immu.tsb, env->immu.tag_access, + 64*1024); + break; + } + case 0x55: /* I-MMU data access */ + { + int reg = (addr >> 3) & 0x3f; + + ret = env->itlb[reg].tte; + break; + } + case 0x56: /* I-MMU tag read */ + { + int reg = (addr >> 3) & 0x3f; + + ret = env->itlb[reg].tag; + break; + } + case 0x58: /* D-MMU regs */ + { + int reg = (addr >> 3) & 0xf; + + if (reg == 0) { + /* D-TSB Tag Target register */ + ret = ultrasparc_tag_target(env->dmmu.tag_access); + } else { + ret = env->dmmuregs[reg]; + } + break; + } + case 0x59: /* D-MMU 8k TSB pointer */ + { + /* env->dmmuregs[5] holds D-MMU TSB register value + env->dmmuregs[6] holds D-MMU Tag Access register value */ + ret = ultrasparc_tsb_pointer(env->dmmu.tsb, env->dmmu.tag_access, + 8*1024); + break; + } + case 0x5a: /* D-MMU 64k TSB pointer */ + { + /* env->dmmuregs[5] holds D-MMU TSB register value + env->dmmuregs[6] holds D-MMU Tag Access register value */ + ret = ultrasparc_tsb_pointer(env->dmmu.tsb, env->dmmu.tag_access, + 64*1024); + break; + } + case 0x5d: /* D-MMU data access */ + { + int reg = (addr >> 3) & 0x3f; + + ret = env->dtlb[reg].tte; + break; + } + case 0x5e: /* D-MMU tag read */ + { + int reg = (addr >> 3) & 0x3f; + + ret = env->dtlb[reg].tag; + break; + } + case 0x46: /* D-cache data */ + case 0x47: /* D-cache tag access */ + case 0x4b: /* E-cache error enable */ + case 0x4c: /* E-cache asynchronous fault status */ + case 0x4d: /* E-cache asynchronous fault address */ + case 0x4e: /* E-cache tag data */ + case 0x66: /* I-cache instruction access */ + case 0x67: /* I-cache tag access */ + case 0x6e: /* I-cache predecode */ + case 0x6f: /* I-cache LRU etc. */ + case 0x76: /* E-cache tag */ + case 0x7e: /* E-cache tag */ + break; + case 0x5b: /* D-MMU data pointer */ + case 0x48: /* Interrupt dispatch, RO */ + case 0x49: /* Interrupt data receive */ + case 0x7f: /* Incoming interrupt vector, RO */ + /* XXX */ + break; + case 0x54: /* I-MMU data in, WO */ + case 0x57: /* I-MMU demap, WO */ + case 0x5c: /* D-MMU data in, WO */ + case 0x5f: /* D-MMU demap, WO */ + case 0x77: /* Interrupt vector, WO */ + default: + do_unassigned_access(addr, 0, 0, 1, size); + ret = 0; + break; + } + + /* Convert from little endian */ + switch (asi) { + case 0x0c: /* Nucleus Little Endian (LE) */ + case 0x18: /* As if user primary LE */ + case 0x19: /* As if user secondary LE */ + case 0x1c: /* Bypass LE */ + case 0x1d: /* Bypass, non-cacheable LE */ + case 0x88: /* Primary LE */ + case 0x89: /* Secondary LE */ + switch(size) { + case 2: + ret = bswap16(ret); + break; + case 4: + ret = bswap32(ret); + break; + case 8: + ret = bswap64(ret); + break; + default: + break; + } + default: + break; + } + + /* Convert to signed number */ + if (sign) { + switch (size) { + case 1: + ret = (int8_t) ret; + break; + case 2: + ret = (int16_t) ret; + break; + case 4: + ret = (int32_t) ret; + break; + default: + break; + } + } +#ifdef DEBUG_ASI + dump_asi("read ", last_addr, asi, size, ret); +#endif + return ret; +} + +void helper_st_asi(target_ulong addr, target_ulong val, int asi, int size) +{ +#ifdef DEBUG_ASI + dump_asi("write", addr, asi, size, val); +#endif + + asi &= 0xff; + + if ((asi < 0x80 && (env->pstate & PS_PRIV) == 0) + || (cpu_has_hypervisor(env) + && asi >= 0x30 && asi < 0x80 + && !(env->hpstate & HS_PRIV))) { + helper_raise_exception(env, TT_PRIV_ACT); + } + + helper_check_align(addr, size - 1); + addr = asi_address_mask(env, asi, addr); + + /* Convert to little endian */ + switch (asi) { + case 0x0c: /* Nucleus Little Endian (LE) */ + case 0x18: /* As if user primary LE */ + case 0x19: /* As if user secondary LE */ + case 0x1c: /* Bypass LE */ + case 0x1d: /* Bypass, non-cacheable LE */ + case 0x88: /* Primary LE */ + case 0x89: /* Secondary LE */ + switch (size) { + case 2: + val = bswap16(val); + break; + case 4: + val = bswap32(val); + break; + case 8: + val = bswap64(val); + break; + default: + break; + } + default: + break; + } + + switch (asi) { + case 0x10: /* As if user primary */ + case 0x11: /* As if user secondary */ + case 0x18: /* As if user primary LE */ + case 0x19: /* As if user secondary LE */ + case 0x80: /* Primary */ + case 0x81: /* Secondary */ + case 0x88: /* Primary LE */ + case 0x89: /* Secondary LE */ + case 0xe2: /* UA2007 Primary block init */ + case 0xe3: /* UA2007 Secondary block init */ + if ((asi & 0x80) && (env->pstate & PS_PRIV)) { + if (cpu_hypervisor_mode(env)) { + switch (size) { + case 1: + stb_hypv(addr, val); + break; + case 2: + stw_hypv(addr, val); + break; + case 4: + stl_hypv(addr, val); + break; + case 8: + default: + stq_hypv(addr, val); + break; + } + } else { + /* secondary space access has lowest asi bit equal to 1 */ + if (asi & 1) { + switch (size) { + case 1: + stb_kernel_secondary(addr, val); + break; + case 2: + stw_kernel_secondary(addr, val); + break; + case 4: + stl_kernel_secondary(addr, val); + break; + case 8: + default: + stq_kernel_secondary(addr, val); + break; + } + } else { + switch (size) { + case 1: + stb_kernel(addr, val); + break; + case 2: + stw_kernel(addr, val); + break; + case 4: + stl_kernel(addr, val); + break; + case 8: + default: + stq_kernel(addr, val); + break; + } + } + } + } else { + /* secondary space access has lowest asi bit equal to 1 */ + if (asi & 1) { + switch (size) { + case 1: + stb_user_secondary(addr, val); + break; + case 2: + stw_user_secondary(addr, val); + break; + case 4: + stl_user_secondary(addr, val); + break; + case 8: + default: + stq_user_secondary(addr, val); + break; + } + } else { + switch (size) { + case 1: + stb_user(addr, val); + break; + case 2: + stw_user(addr, val); + break; + case 4: + stl_user(addr, val); + break; + case 8: + default: + stq_user(addr, val); + break; + } + } + } + break; + case 0x14: /* Bypass */ + case 0x15: /* Bypass, non-cacheable */ + case 0x1c: /* Bypass LE */ + case 0x1d: /* Bypass, non-cacheable LE */ + { + switch (size) { + case 1: + stb_phys(addr, val); + break; + case 2: + stw_phys(addr, val); + break; + case 4: + stl_phys(addr, val); + break; + case 8: + default: + stq_phys(addr, val); + break; + } + } + return; + case 0x24: /* Nucleus quad LDD 128 bit atomic */ + case 0x2c: /* Nucleus quad LDD 128 bit atomic LE + Only ldda allowed */ + helper_raise_exception(env, TT_ILL_INSN); + return; + case 0x04: /* Nucleus */ + case 0x0c: /* Nucleus Little Endian (LE) */ + { + switch (size) { + case 1: + stb_nucleus(addr, val); + break; + case 2: + stw_nucleus(addr, val); + break; + case 4: + stl_nucleus(addr, val); + break; + default: + case 8: + stq_nucleus(addr, val); + break; + } + break; + } + + case 0x4a: /* UPA config */ + /* XXX */ + return; + case 0x45: /* LSU */ + { + uint64_t oldreg; + + oldreg = env->lsu; + env->lsu = val & (DMMU_E | IMMU_E); + /* Mappings generated during D/I MMU disabled mode are + invalid in normal mode */ + if (oldreg != env->lsu) { + DPRINTF_MMU("LSU change: 0x%" PRIx64 " -> 0x%" PRIx64 "\n", + oldreg, env->lsu); +#ifdef DEBUG_MMU + dump_mmu(stdout, fprintf, env1); +#endif + tlb_flush(env, 1); + } + return; + } + case 0x50: /* I-MMU regs */ + { + int reg = (addr >> 3) & 0xf; + uint64_t oldreg; + + oldreg = env->immuregs[reg]; + switch (reg) { + case 0: /* RO */ + return; + case 1: /* Not in I-MMU */ + case 2: + return; + case 3: /* SFSR */ + if ((val & 1) == 0) { + val = 0; /* Clear SFSR */ + } + env->immu.sfsr = val; + break; + case 4: /* RO */ + return; + case 5: /* TSB access */ + DPRINTF_MMU("immu TSB write: 0x%016" PRIx64 " -> 0x%016" + PRIx64 "\n", env->immu.tsb, val); + env->immu.tsb = val; + break; + case 6: /* Tag access */ + env->immu.tag_access = val; + break; + case 7: + case 8: + return; + default: + break; + } + + if (oldreg != env->immuregs[reg]) { + DPRINTF_MMU("immu change reg[%d]: 0x%016" PRIx64 " -> 0x%016" + PRIx64 "\n", reg, oldreg, env->immuregs[reg]); + } +#ifdef DEBUG_MMU + dump_mmu(stdout, fprintf, env); +#endif + return; + } + case 0x54: /* I-MMU data in */ + replace_tlb_1bit_lru(env->itlb, env->immu.tag_access, val, "immu", env); + return; + case 0x55: /* I-MMU data access */ + { + /* TODO: auto demap */ + + unsigned int i = (addr >> 3) & 0x3f; + + replace_tlb_entry(&env->itlb[i], env->immu.tag_access, val, env); + +#ifdef DEBUG_MMU + DPRINTF_MMU("immu data access replaced entry [%i]\n", i); + dump_mmu(stdout, fprintf, env); +#endif + return; + } + case 0x57: /* I-MMU demap */ + demap_tlb(env->itlb, addr, "immu", env); + return; + case 0x58: /* D-MMU regs */ + { + int reg = (addr >> 3) & 0xf; + uint64_t oldreg; + + oldreg = env->dmmuregs[reg]; + switch (reg) { + case 0: /* RO */ + case 4: + return; + case 3: /* SFSR */ + if ((val & 1) == 0) { + val = 0; /* Clear SFSR, Fault address */ + env->dmmu.sfar = 0; + } + env->dmmu.sfsr = val; + break; + case 1: /* Primary context */ + env->dmmu.mmu_primary_context = val; + /* can be optimized to only flush MMU_USER_IDX + and MMU_KERNEL_IDX entries */ + tlb_flush(env, 1); + break; + case 2: /* Secondary context */ + env->dmmu.mmu_secondary_context = val; + /* can be optimized to only flush MMU_USER_SECONDARY_IDX + and MMU_KERNEL_SECONDARY_IDX entries */ + tlb_flush(env, 1); + break; + case 5: /* TSB access */ + DPRINTF_MMU("dmmu TSB write: 0x%016" PRIx64 " -> 0x%016" + PRIx64 "\n", env->dmmu.tsb, val); + env->dmmu.tsb = val; + break; + case 6: /* Tag access */ + env->dmmu.tag_access = val; + break; + case 7: /* Virtual Watchpoint */ + case 8: /* Physical Watchpoint */ + default: + env->dmmuregs[reg] = val; + break; + } + + if (oldreg != env->dmmuregs[reg]) { + DPRINTF_MMU("dmmu change reg[%d]: 0x%016" PRIx64 " -> 0x%016" + PRIx64 "\n", reg, oldreg, env->dmmuregs[reg]); + } +#ifdef DEBUG_MMU + dump_mmu(stdout, fprintf, env); +#endif + return; + } + case 0x5c: /* D-MMU data in */ + replace_tlb_1bit_lru(env->dtlb, env->dmmu.tag_access, val, "dmmu", env); + return; + case 0x5d: /* D-MMU data access */ + { + unsigned int i = (addr >> 3) & 0x3f; + + replace_tlb_entry(&env->dtlb[i], env->dmmu.tag_access, val, env); + +#ifdef DEBUG_MMU + DPRINTF_MMU("dmmu data access replaced entry [%i]\n", i); + dump_mmu(stdout, fprintf, env); +#endif + return; + } + case 0x5f: /* D-MMU demap */ + demap_tlb(env->dtlb, addr, "dmmu", env); + return; + case 0x49: /* Interrupt data receive */ + /* XXX */ + return; + case 0x46: /* D-cache data */ + case 0x47: /* D-cache tag access */ + case 0x4b: /* E-cache error enable */ + case 0x4c: /* E-cache asynchronous fault status */ + case 0x4d: /* E-cache asynchronous fault address */ + case 0x4e: /* E-cache tag data */ + case 0x66: /* I-cache instruction access */ + case 0x67: /* I-cache tag access */ + case 0x6e: /* I-cache predecode */ + case 0x6f: /* I-cache LRU etc. */ + case 0x76: /* E-cache tag */ + case 0x7e: /* E-cache tag */ + return; + case 0x51: /* I-MMU 8k TSB pointer, RO */ + case 0x52: /* I-MMU 64k TSB pointer, RO */ + case 0x56: /* I-MMU tag read, RO */ + case 0x59: /* D-MMU 8k TSB pointer, RO */ + case 0x5a: /* D-MMU 64k TSB pointer, RO */ + case 0x5b: /* D-MMU data pointer, RO */ + case 0x5e: /* D-MMU tag read, RO */ + case 0x48: /* Interrupt dispatch, RO */ + case 0x7f: /* Incoming interrupt vector, RO */ + case 0x82: /* Primary no-fault, RO */ + case 0x83: /* Secondary no-fault, RO */ + case 0x8a: /* Primary no-fault LE, RO */ + case 0x8b: /* Secondary no-fault LE, RO */ + default: + do_unassigned_access(addr, 1, 0, 1, size); + return; + } +} +#endif /* CONFIG_USER_ONLY */ + +void helper_ldda_asi(target_ulong addr, int asi, int rd) +{ + if ((asi < 0x80 && (env->pstate & PS_PRIV) == 0) + || (cpu_has_hypervisor(env) + && asi >= 0x30 && asi < 0x80 + && !(env->hpstate & HS_PRIV))) { + helper_raise_exception(env, TT_PRIV_ACT); + } + + addr = asi_address_mask(env, asi, addr); + + switch (asi) { +#if !defined(CONFIG_USER_ONLY) + case 0x24: /* Nucleus quad LDD 128 bit atomic */ + case 0x2c: /* Nucleus quad LDD 128 bit atomic LE */ + helper_check_align(addr, 0xf); + if (rd == 0) { + env->gregs[1] = ldq_nucleus(addr + 8); + if (asi == 0x2c) { + bswap64s(&env->gregs[1]); + } + } else if (rd < 8) { + env->gregs[rd] = ldq_nucleus(addr); + env->gregs[rd + 1] = ldq_nucleus(addr + 8); + if (asi == 0x2c) { + bswap64s(&env->gregs[rd]); + bswap64s(&env->gregs[rd + 1]); + } + } else { + env->regwptr[rd] = ldq_nucleus(addr); + env->regwptr[rd + 1] = ldq_nucleus(addr + 8); + if (asi == 0x2c) { + bswap64s(&env->regwptr[rd]); + bswap64s(&env->regwptr[rd + 1]); + } + } + break; +#endif + default: + helper_check_align(addr, 0x3); + if (rd == 0) { + env->gregs[1] = helper_ld_asi(addr + 4, asi, 4, 0); + } else if (rd < 8) { + env->gregs[rd] = helper_ld_asi(addr, asi, 4, 0); + env->gregs[rd + 1] = helper_ld_asi(addr + 4, asi, 4, 0); + } else { + env->regwptr[rd] = helper_ld_asi(addr, asi, 4, 0); + env->regwptr[rd + 1] = helper_ld_asi(addr + 4, asi, 4, 0); + } + break; + } +} + +void helper_ldf_asi(target_ulong addr, int asi, int size, int rd) +{ + unsigned int i; + target_ulong val; + + helper_check_align(addr, 3); + addr = asi_address_mask(env, asi, addr); + + switch (asi) { + case 0xf0: /* UA2007/JPS1 Block load primary */ + case 0xf1: /* UA2007/JPS1 Block load secondary */ + case 0xf8: /* UA2007/JPS1 Block load primary LE */ + case 0xf9: /* UA2007/JPS1 Block load secondary LE */ + if (rd & 7) { + helper_raise_exception(env, TT_ILL_INSN); + return; + } + helper_check_align(addr, 0x3f); + for (i = 0; i < 8; i++, rd += 2, addr += 8) { + env->fpr[rd/2].ll = helper_ld_asi(addr, asi & 0x8f, 8, 0); + } + return; + + case 0x16: /* UA2007 Block load primary, user privilege */ + case 0x17: /* UA2007 Block load secondary, user privilege */ + case 0x1e: /* UA2007 Block load primary LE, user privilege */ + case 0x1f: /* UA2007 Block load secondary LE, user privilege */ + case 0x70: /* JPS1 Block load primary, user privilege */ + case 0x71: /* JPS1 Block load secondary, user privilege */ + case 0x78: /* JPS1 Block load primary LE, user privilege */ + case 0x79: /* JPS1 Block load secondary LE, user privilege */ + if (rd & 7) { + helper_raise_exception(env, TT_ILL_INSN); + return; + } + helper_check_align(addr, 0x3f); + for (i = 0; i < 8; i++, rd += 2, addr += 4) { + env->fpr[rd/2].ll = helper_ld_asi(addr, asi & 0x19, 8, 0); + } + return; + + default: + break; + } + + switch (size) { + default: + case 4: + val = helper_ld_asi(addr, asi, size, 0); + if (rd & 1) { + env->fpr[rd/2].l.lower = val; + } else { + env->fpr[rd/2].l.upper = val; + } + break; + case 8: + env->fpr[rd/2].ll = helper_ld_asi(addr, asi, size, 0); + break; + case 16: + env->fpr[rd/2].ll = helper_ld_asi(addr, asi, 8, 0); + env->fpr[rd/2 + 1].ll = helper_ld_asi(addr + 8, asi, 8, 0); + break; + } +} + +void helper_stf_asi(target_ulong addr, int asi, int size, int rd) +{ + unsigned int i; + target_ulong val; + + helper_check_align(addr, 3); + addr = asi_address_mask(env, asi, addr); + + switch (asi) { + case 0xe0: /* UA2007/JPS1 Block commit store primary (cache flush) */ + case 0xe1: /* UA2007/JPS1 Block commit store secondary (cache flush) */ + case 0xf0: /* UA2007/JPS1 Block store primary */ + case 0xf1: /* UA2007/JPS1 Block store secondary */ + case 0xf8: /* UA2007/JPS1 Block store primary LE */ + case 0xf9: /* UA2007/JPS1 Block store secondary LE */ + if (rd & 7) { + helper_raise_exception(env, TT_ILL_INSN); + return; + } + helper_check_align(addr, 0x3f); + for (i = 0; i < 8; i++, rd += 2, addr += 8) { + helper_st_asi(addr, env->fpr[rd/2].ll, asi & 0x8f, 8); + } + + return; + case 0x16: /* UA2007 Block load primary, user privilege */ + case 0x17: /* UA2007 Block load secondary, user privilege */ + case 0x1e: /* UA2007 Block load primary LE, user privilege */ + case 0x1f: /* UA2007 Block load secondary LE, user privilege */ + case 0x70: /* JPS1 Block store primary, user privilege */ + case 0x71: /* JPS1 Block store secondary, user privilege */ + case 0x78: /* JPS1 Block load primary LE, user privilege */ + case 0x79: /* JPS1 Block load secondary LE, user privilege */ + if (rd & 7) { + helper_raise_exception(env, TT_ILL_INSN); + return; + } + helper_check_align(addr, 0x3f); + for (i = 0; i < 8; i++, rd += 2, addr += 8) { + helper_st_asi(addr, env->fpr[rd/2].ll, asi & 0x19, 8); + } + + return; + default: + break; + } + + switch (size) { + default: + case 4: + if (rd & 1) { + val = env->fpr[rd/2].l.lower; + } else { + val = env->fpr[rd/2].l.upper; + } + helper_st_asi(addr, val, asi, size); + break; + case 8: + helper_st_asi(addr, env->fpr[rd/2].ll, asi, size); + break; + case 16: + helper_st_asi(addr, env->fpr[rd/2].ll, asi, 8); + helper_st_asi(addr + 8, env->fpr[rd/2 + 1].ll, asi, 8); + break; + } +} + +target_ulong helper_cas_asi(target_ulong addr, target_ulong val1, + target_ulong val2, uint32_t asi) +{ + target_ulong ret; + + val2 &= 0xffffffffUL; + ret = helper_ld_asi(addr, asi, 4, 0); + ret &= 0xffffffffUL; + if (val2 == ret) { + helper_st_asi(addr, val1 & 0xffffffffUL, asi, 4); + } + return ret; +} + +target_ulong helper_casx_asi(target_ulong addr, target_ulong val1, + target_ulong val2, uint32_t asi) +{ + target_ulong ret; + + ret = helper_ld_asi(addr, asi, 8, 0); + if (val2 == ret) { + helper_st_asi(addr, val1, asi, 8); + } + return ret; +} +#endif /* TARGET_SPARC64 */ + +void helper_ldqf(target_ulong addr, int mem_idx) +{ + /* XXX add 128 bit load */ + CPU_QuadU u; + + helper_check_align(addr, 7); +#if !defined(CONFIG_USER_ONLY) + switch (mem_idx) { + case MMU_USER_IDX: + u.ll.upper = ldq_user(addr); + u.ll.lower = ldq_user(addr + 8); + QT0 = u.q; + break; + case MMU_KERNEL_IDX: + u.ll.upper = ldq_kernel(addr); + u.ll.lower = ldq_kernel(addr + 8); + QT0 = u.q; + break; +#ifdef TARGET_SPARC64 + case MMU_HYPV_IDX: + u.ll.upper = ldq_hypv(addr); + u.ll.lower = ldq_hypv(addr + 8); + QT0 = u.q; + break; +#endif + default: + DPRINTF_MMU("helper_ldqf: need to check MMU idx %d\n", mem_idx); + break; + } +#else + u.ll.upper = ldq_raw(address_mask(env, addr)); + u.ll.lower = ldq_raw(address_mask(env, addr + 8)); + QT0 = u.q; +#endif +} + +void helper_stqf(target_ulong addr, int mem_idx) +{ + /* XXX add 128 bit store */ + CPU_QuadU u; + + helper_check_align(addr, 7); +#if !defined(CONFIG_USER_ONLY) + switch (mem_idx) { + case MMU_USER_IDX: + u.q = QT0; + stq_user(addr, u.ll.upper); + stq_user(addr + 8, u.ll.lower); + break; + case MMU_KERNEL_IDX: + u.q = QT0; + stq_kernel(addr, u.ll.upper); + stq_kernel(addr + 8, u.ll.lower); + break; +#ifdef TARGET_SPARC64 + case MMU_HYPV_IDX: + u.q = QT0; + stq_hypv(addr, u.ll.upper); + stq_hypv(addr + 8, u.ll.lower); + break; +#endif + default: + DPRINTF_MMU("helper_stqf: need to check MMU idx %d\n", mem_idx); + break; + } +#else + u.q = QT0; + stq_raw(address_mask(env, addr), u.ll.upper); + stq_raw(address_mask(env, addr + 8), u.ll.lower); +#endif +} + +#ifndef TARGET_SPARC64 +#if !defined(CONFIG_USER_ONLY) +static void do_unassigned_access(target_phys_addr_t addr, int is_write, + int is_exec, int is_asi, int size) +{ + int fault_type; + +#ifdef DEBUG_UNASSIGNED + if (is_asi) { + printf("Unassigned mem %s access of %d byte%s to " TARGET_FMT_plx + " asi 0x%02x from " TARGET_FMT_lx "\n", + is_exec ? "exec" : is_write ? "write" : "read", size, + size == 1 ? "" : "s", addr, is_asi, env->pc); + } else { + printf("Unassigned mem %s access of %d byte%s to " TARGET_FMT_plx + " from " TARGET_FMT_lx "\n", + is_exec ? "exec" : is_write ? "write" : "read", size, + size == 1 ? "" : "s", addr, env->pc); + } +#endif + /* Don't overwrite translation and access faults */ + fault_type = (env->mmuregs[3] & 0x1c) >> 2; + if ((fault_type > 4) || (fault_type == 0)) { + env->mmuregs[3] = 0; /* Fault status register */ + if (is_asi) { + env->mmuregs[3] |= 1 << 16; + } + if (env->psrs) { + env->mmuregs[3] |= 1 << 5; + } + if (is_exec) { + env->mmuregs[3] |= 1 << 6; + } + if (is_write) { + env->mmuregs[3] |= 1 << 7; + } + env->mmuregs[3] |= (5 << 2) | 2; + /* SuperSPARC will never place instruction fault addresses in the FAR */ + if (!is_exec) { + env->mmuregs[4] = addr; /* Fault address register */ + } + } + /* overflow (same type fault was not read before another fault) */ + if (fault_type == ((env->mmuregs[3] & 0x1c)) >> 2) { + env->mmuregs[3] |= 1; + } + + if ((env->mmuregs[0] & MMU_E) && !(env->mmuregs[0] & MMU_NF)) { + if (is_exec) { + helper_raise_exception(env, TT_CODE_ACCESS); + } else { + helper_raise_exception(env, TT_DATA_ACCESS); + } + } + + /* flush neverland mappings created during no-fault mode, + so the sequential MMU faults report proper fault types */ + if (env->mmuregs[0] & MMU_NF) { + tlb_flush(env, 1); + } +} +#endif +#else +#if defined(CONFIG_USER_ONLY) +static void do_unassigned_access(target_ulong addr, int is_write, int is_exec, + int is_asi, int size) +#else +static void do_unassigned_access(target_phys_addr_t addr, int is_write, + int is_exec, int is_asi, int size) +#endif +{ +#ifdef DEBUG_UNASSIGNED + printf("Unassigned mem access to " TARGET_FMT_plx " from " TARGET_FMT_lx + "\n", addr, env->pc); +#endif + + if (is_exec) { + helper_raise_exception(env, TT_CODE_ACCESS); + } else { + helper_raise_exception(env, TT_DATA_ACCESS); + } +} +#endif + +#if !defined(CONFIG_USER_ONLY) +void cpu_unassigned_access(CPUState *env1, target_phys_addr_t addr, + int is_write, int is_exec, int is_asi, int size) +{ + CPUState *saved_env; + + saved_env = env; + env = env1; + do_unassigned_access(addr, is_write, is_exec, is_asi, size); + env = saved_env; +} +#endif diff --git a/target-sparc/machine.c b/target-sparc/machine.c index 56ae0412cd..235b088a45 100644 --- a/target-sparc/machine.c +++ b/target-sparc/machine.c @@ -21,13 +21,9 @@ void cpu_save(QEMUFile *f, void *opaque) qemu_put_betls(f, &env->regbase[i]); /* FPU */ - for(i = 0; i < TARGET_FPREGS; i++) { - union { - float32 f; - uint32_t i; - } u; - u.f = env->fpr[i]; - qemu_put_be32(f, u.i); + for (i = 0; i < TARGET_DPREGS; i++) { + qemu_put_be32(f, env->fpr[i].l.upper); + qemu_put_be32(f, env->fpr[i].l.lower); } qemu_put_betls(f, &env->pc); @@ -128,13 +124,9 @@ int cpu_load(QEMUFile *f, void *opaque, int version_id) qemu_get_betls(f, &env->regbase[i]); /* FPU */ - for(i = 0; i < TARGET_FPREGS; i++) { - union { - float32 f; - uint32_t i; - } u; - u.i = qemu_get_be32(f); - env->fpr[i] = u.f; + for (i = 0; i < TARGET_DPREGS; i++) { + env->fpr[i].l.upper = qemu_get_be32(f); + env->fpr[i].l.lower = qemu_get_be32(f); } qemu_get_betls(f, &env->pc); diff --git a/target-sparc/mmu_helper.c b/target-sparc/mmu_helper.c new file mode 100644 index 0000000000..8cdc224ae3 --- /dev/null +++ b/target-sparc/mmu_helper.c @@ -0,0 +1,853 @@ +/* + * Sparc MMU helpers + * + * Copyright (c) 2003-2005 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#include "cpu.h" +#include "trace.h" + +/* Sparc MMU emulation */ + +#if defined(CONFIG_USER_ONLY) + +int cpu_sparc_handle_mmu_fault(CPUState *env1, target_ulong address, int rw, + int mmu_idx) +{ + if (rw & 2) { + env1->exception_index = TT_TFAULT; + } else { + env1->exception_index = TT_DFAULT; + } + return 1; +} + +#else + +#ifndef TARGET_SPARC64 +/* + * Sparc V8 Reference MMU (SRMMU) + */ +static const int access_table[8][8] = { + { 0, 0, 0, 0, 8, 0, 12, 12 }, + { 0, 0, 0, 0, 8, 0, 0, 0 }, + { 8, 8, 0, 0, 0, 8, 12, 12 }, + { 8, 8, 0, 0, 0, 8, 0, 0 }, + { 8, 0, 8, 0, 8, 8, 12, 12 }, + { 8, 0, 8, 0, 8, 0, 8, 0 }, + { 8, 8, 8, 0, 8, 8, 12, 12 }, + { 8, 8, 8, 0, 8, 8, 8, 0 } +}; + +static const int perm_table[2][8] = { + { + PAGE_READ, + PAGE_READ | PAGE_WRITE, + PAGE_READ | PAGE_EXEC, + PAGE_READ | PAGE_WRITE | PAGE_EXEC, + PAGE_EXEC, + PAGE_READ | PAGE_WRITE, + PAGE_READ | PAGE_EXEC, + PAGE_READ | PAGE_WRITE | PAGE_EXEC + }, + { + PAGE_READ, + PAGE_READ | PAGE_WRITE, + PAGE_READ | PAGE_EXEC, + PAGE_READ | PAGE_WRITE | PAGE_EXEC, + PAGE_EXEC, + PAGE_READ, + 0, + 0, + } +}; + +static int get_physical_address(CPUState *env, target_phys_addr_t *physical, + int *prot, int *access_index, + target_ulong address, int rw, int mmu_idx, + target_ulong *page_size) +{ + int access_perms = 0; + target_phys_addr_t pde_ptr; + uint32_t pde; + int error_code = 0, is_dirty, is_user; + unsigned long page_offset; + + is_user = mmu_idx == MMU_USER_IDX; + + if ((env->mmuregs[0] & MMU_E) == 0) { /* MMU disabled */ + *page_size = TARGET_PAGE_SIZE; + /* Boot mode: instruction fetches are taken from PROM */ + if (rw == 2 && (env->mmuregs[0] & env->def->mmu_bm)) { + *physical = env->prom_addr | (address & 0x7ffffULL); + *prot = PAGE_READ | PAGE_EXEC; + return 0; + } + *physical = address; + *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; + return 0; + } + + *access_index = ((rw & 1) << 2) | (rw & 2) | (is_user ? 0 : 1); + *physical = 0xffffffffffff0000ULL; + + /* SPARC reference MMU table walk: Context table->L1->L2->PTE */ + /* Context base + context number */ + pde_ptr = (env->mmuregs[1] << 4) + (env->mmuregs[2] << 2); + pde = ldl_phys(pde_ptr); + + /* Ctx pde */ + switch (pde & PTE_ENTRYTYPE_MASK) { + default: + case 0: /* Invalid */ + return 1 << 2; + case 2: /* L0 PTE, maybe should not happen? */ + case 3: /* Reserved */ + return 4 << 2; + case 1: /* L0 PDE */ + pde_ptr = ((address >> 22) & ~3) + ((pde & ~3) << 4); + pde = ldl_phys(pde_ptr); + + switch (pde & PTE_ENTRYTYPE_MASK) { + default: + case 0: /* Invalid */ + return (1 << 8) | (1 << 2); + case 3: /* Reserved */ + return (1 << 8) | (4 << 2); + case 1: /* L1 PDE */ + pde_ptr = ((address & 0xfc0000) >> 16) + ((pde & ~3) << 4); + pde = ldl_phys(pde_ptr); + + switch (pde & PTE_ENTRYTYPE_MASK) { + default: + case 0: /* Invalid */ + return (2 << 8) | (1 << 2); + case 3: /* Reserved */ + return (2 << 8) | (4 << 2); + case 1: /* L2 PDE */ + pde_ptr = ((address & 0x3f000) >> 10) + ((pde & ~3) << 4); + pde = ldl_phys(pde_ptr); + + switch (pde & PTE_ENTRYTYPE_MASK) { + default: + case 0: /* Invalid */ + return (3 << 8) | (1 << 2); + case 1: /* PDE, should not happen */ + case 3: /* Reserved */ + return (3 << 8) | (4 << 2); + case 2: /* L3 PTE */ + page_offset = (address & TARGET_PAGE_MASK) & + (TARGET_PAGE_SIZE - 1); + } + *page_size = TARGET_PAGE_SIZE; + break; + case 2: /* L2 PTE */ + page_offset = address & 0x3ffff; + *page_size = 0x40000; + } + break; + case 2: /* L1 PTE */ + page_offset = address & 0xffffff; + *page_size = 0x1000000; + } + } + + /* check access */ + access_perms = (pde & PTE_ACCESS_MASK) >> PTE_ACCESS_SHIFT; + error_code = access_table[*access_index][access_perms]; + if (error_code && !((env->mmuregs[0] & MMU_NF) && is_user)) { + return error_code; + } + + /* update page modified and dirty bits */ + is_dirty = (rw & 1) && !(pde & PG_MODIFIED_MASK); + if (!(pde & PG_ACCESSED_MASK) || is_dirty) { + pde |= PG_ACCESSED_MASK; + if (is_dirty) { + pde |= PG_MODIFIED_MASK; + } + stl_phys_notdirty(pde_ptr, pde); + } + + /* the page can be put in the TLB */ + *prot = perm_table[is_user][access_perms]; + if (!(pde & PG_MODIFIED_MASK)) { + /* only set write access if already dirty... otherwise wait + for dirty access */ + *prot &= ~PAGE_WRITE; + } + + /* Even if large ptes, we map only one 4KB page in the cache to + avoid filling it too fast */ + *physical = ((target_phys_addr_t)(pde & PTE_ADDR_MASK) << 4) + page_offset; + return error_code; +} + +/* Perform address translation */ +int cpu_sparc_handle_mmu_fault(CPUState *env, target_ulong address, int rw, + int mmu_idx) +{ + target_phys_addr_t paddr; + target_ulong vaddr; + target_ulong page_size; + int error_code = 0, prot, access_index; + + error_code = get_physical_address(env, &paddr, &prot, &access_index, + address, rw, mmu_idx, &page_size); + if (error_code == 0) { + vaddr = address & TARGET_PAGE_MASK; + paddr &= TARGET_PAGE_MASK; +#ifdef DEBUG_MMU + printf("Translate at " TARGET_FMT_lx " -> " TARGET_FMT_plx ", vaddr " + TARGET_FMT_lx "\n", address, paddr, vaddr); +#endif + tlb_set_page(env, vaddr, paddr, prot, mmu_idx, page_size); + return 0; + } + + if (env->mmuregs[3]) { /* Fault status register */ + env->mmuregs[3] = 1; /* overflow (not read before another fault) */ + } + env->mmuregs[3] |= (access_index << 5) | error_code | 2; + env->mmuregs[4] = address; /* Fault address register */ + + if ((env->mmuregs[0] & MMU_NF) || env->psret == 0) { + /* No fault mode: if a mapping is available, just override + permissions. If no mapping is available, redirect accesses to + neverland. Fake/overridden mappings will be flushed when + switching to normal mode. */ + vaddr = address & TARGET_PAGE_MASK; + prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; + tlb_set_page(env, vaddr, paddr, prot, mmu_idx, TARGET_PAGE_SIZE); + return 0; + } else { + if (rw & 2) { + env->exception_index = TT_TFAULT; + } else { + env->exception_index = TT_DFAULT; + } + return 1; + } +} + +target_ulong mmu_probe(CPUState *env, target_ulong address, int mmulev) +{ + target_phys_addr_t pde_ptr; + uint32_t pde; + + /* Context base + context number */ + pde_ptr = (target_phys_addr_t)(env->mmuregs[1] << 4) + + (env->mmuregs[2] << 2); + pde = ldl_phys(pde_ptr); + + switch (pde & PTE_ENTRYTYPE_MASK) { + default: + case 0: /* Invalid */ + case 2: /* PTE, maybe should not happen? */ + case 3: /* Reserved */ + return 0; + case 1: /* L1 PDE */ + if (mmulev == 3) { + return pde; + } + pde_ptr = ((address >> 22) & ~3) + ((pde & ~3) << 4); + pde = ldl_phys(pde_ptr); + + switch (pde & PTE_ENTRYTYPE_MASK) { + default: + case 0: /* Invalid */ + case 3: /* Reserved */ + return 0; + case 2: /* L1 PTE */ + return pde; + case 1: /* L2 PDE */ + if (mmulev == 2) { + return pde; + } + pde_ptr = ((address & 0xfc0000) >> 16) + ((pde & ~3) << 4); + pde = ldl_phys(pde_ptr); + + switch (pde & PTE_ENTRYTYPE_MASK) { + default: + case 0: /* Invalid */ + case 3: /* Reserved */ + return 0; + case 2: /* L2 PTE */ + return pde; + case 1: /* L3 PDE */ + if (mmulev == 1) { + return pde; + } + pde_ptr = ((address & 0x3f000) >> 10) + ((pde & ~3) << 4); + pde = ldl_phys(pde_ptr); + + switch (pde & PTE_ENTRYTYPE_MASK) { + default: + case 0: /* Invalid */ + case 1: /* PDE, should not happen */ + case 3: /* Reserved */ + return 0; + case 2: /* L3 PTE */ + return pde; + } + } + } + } + return 0; +} + +void dump_mmu(FILE *f, fprintf_function cpu_fprintf, CPUState *env) +{ + target_ulong va, va1, va2; + unsigned int n, m, o; + target_phys_addr_t pde_ptr, pa; + uint32_t pde; + + pde_ptr = (env->mmuregs[1] << 4) + (env->mmuregs[2] << 2); + pde = ldl_phys(pde_ptr); + (*cpu_fprintf)(f, "Root ptr: " TARGET_FMT_plx ", ctx: %d\n", + (target_phys_addr_t)env->mmuregs[1] << 4, env->mmuregs[2]); + for (n = 0, va = 0; n < 256; n++, va += 16 * 1024 * 1024) { + pde = mmu_probe(env, va, 2); + if (pde) { + pa = cpu_get_phys_page_debug(env, va); + (*cpu_fprintf)(f, "VA: " TARGET_FMT_lx ", PA: " TARGET_FMT_plx + " PDE: " TARGET_FMT_lx "\n", va, pa, pde); + for (m = 0, va1 = va; m < 64; m++, va1 += 256 * 1024) { + pde = mmu_probe(env, va1, 1); + if (pde) { + pa = cpu_get_phys_page_debug(env, va1); + (*cpu_fprintf)(f, " VA: " TARGET_FMT_lx ", PA: " + TARGET_FMT_plx " PDE: " TARGET_FMT_lx "\n", + va1, pa, pde); + for (o = 0, va2 = va1; o < 64; o++, va2 += 4 * 1024) { + pde = mmu_probe(env, va2, 0); + if (pde) { + pa = cpu_get_phys_page_debug(env, va2); + (*cpu_fprintf)(f, " VA: " TARGET_FMT_lx ", PA: " + TARGET_FMT_plx " PTE: " + TARGET_FMT_lx "\n", + va2, pa, pde); + } + } + } + } + } + } +} + +/* Gdb expects all registers windows to be flushed in ram. This function handles + * reads (and only reads) in stack frames as if windows were flushed. We assume + * that the sparc ABI is followed. + */ +int target_memory_rw_debug(CPUState *env, target_ulong addr, + uint8_t *buf, int len, int is_write) +{ + int i; + int len1; + int cwp = env->cwp; + + if (!is_write) { + for (i = 0; i < env->nwindows; i++) { + int off; + target_ulong fp = env->regbase[cwp * 16 + 22]; + + /* Assume fp == 0 means end of frame. */ + if (fp == 0) { + break; + } + + cwp = cpu_cwp_inc(env, cwp + 1); + + /* Invalid window ? */ + if (env->wim & (1 << cwp)) { + break; + } + + /* According to the ABI, the stack is growing downward. */ + if (addr + len < fp) { + break; + } + + /* Not in this frame. */ + if (addr > fp + 64) { + continue; + } + + /* Handle access before this window. */ + if (addr < fp) { + len1 = fp - addr; + if (cpu_memory_rw_debug(env, addr, buf, len1, is_write) != 0) { + return -1; + } + addr += len1; + len -= len1; + buf += len1; + } + + /* Access byte per byte to registers. Not very efficient but speed + * is not critical. + */ + off = addr - fp; + len1 = 64 - off; + + if (len1 > len) { + len1 = len; + } + + for (; len1; len1--) { + int reg = cwp * 16 + 8 + (off >> 2); + union { + uint32_t v; + uint8_t c[4]; + } u; + u.v = cpu_to_be32(env->regbase[reg]); + *buf++ = u.c[off & 3]; + addr++; + len--; + off++; + } + + if (len == 0) { + return 0; + } + } + } + return cpu_memory_rw_debug(env, addr, buf, len, is_write); +} + +#else /* !TARGET_SPARC64 */ + +/* 41 bit physical address space */ +static inline target_phys_addr_t ultrasparc_truncate_physical(uint64_t x) +{ + return x & 0x1ffffffffffULL; +} + +/* + * UltraSparc IIi I/DMMUs + */ + +/* Returns true if TTE tag is valid and matches virtual address value + in context requires virtual address mask value calculated from TTE + entry size */ +static inline int ultrasparc_tag_match(SparcTLBEntry *tlb, + uint64_t address, uint64_t context, + target_phys_addr_t *physical) +{ + uint64_t mask; + + switch (TTE_PGSIZE(tlb->tte)) { + default: + case 0x0: /* 8k */ + mask = 0xffffffffffffe000ULL; + break; + case 0x1: /* 64k */ + mask = 0xffffffffffff0000ULL; + break; + case 0x2: /* 512k */ + mask = 0xfffffffffff80000ULL; + break; + case 0x3: /* 4M */ + mask = 0xffffffffffc00000ULL; + break; + } + + /* valid, context match, virtual address match? */ + if (TTE_IS_VALID(tlb->tte) && + (TTE_IS_GLOBAL(tlb->tte) || tlb_compare_context(tlb, context)) + && compare_masked(address, tlb->tag, mask)) { + /* decode physical address */ + *physical = ((tlb->tte & mask) | (address & ~mask)) & 0x1ffffffe000ULL; + return 1; + } + + return 0; +} + +static int get_physical_address_data(CPUState *env, + target_phys_addr_t *physical, int *prot, + target_ulong address, int rw, int mmu_idx) +{ + unsigned int i; + uint64_t context; + uint64_t sfsr = 0; + + int is_user = (mmu_idx == MMU_USER_IDX || + mmu_idx == MMU_USER_SECONDARY_IDX); + + if ((env->lsu & DMMU_E) == 0) { /* DMMU disabled */ + *physical = ultrasparc_truncate_physical(address); + *prot = PAGE_READ | PAGE_WRITE; + return 0; + } + + switch (mmu_idx) { + case MMU_USER_IDX: + case MMU_KERNEL_IDX: + context = env->dmmu.mmu_primary_context & 0x1fff; + sfsr |= SFSR_CT_PRIMARY; + break; + case MMU_USER_SECONDARY_IDX: + case MMU_KERNEL_SECONDARY_IDX: + context = env->dmmu.mmu_secondary_context & 0x1fff; + sfsr |= SFSR_CT_SECONDARY; + break; + case MMU_NUCLEUS_IDX: + sfsr |= SFSR_CT_NUCLEUS; + /* FALLTHRU */ + default: + context = 0; + break; + } + + if (rw == 1) { + sfsr |= SFSR_WRITE_BIT; + } else if (rw == 4) { + sfsr |= SFSR_NF_BIT; + } + + for (i = 0; i < 64; i++) { + /* ctx match, vaddr match, valid? */ + if (ultrasparc_tag_match(&env->dtlb[i], address, context, physical)) { + int do_fault = 0; + + /* access ok? */ + /* multiple bits in SFSR.FT may be set on TT_DFAULT */ + if (TTE_IS_PRIV(env->dtlb[i].tte) && is_user) { + do_fault = 1; + sfsr |= SFSR_FT_PRIV_BIT; /* privilege violation */ + trace_mmu_helper_dfault(address, context, mmu_idx, env->tl); + } + if (rw == 4) { + if (TTE_IS_SIDEEFFECT(env->dtlb[i].tte)) { + do_fault = 1; + sfsr |= SFSR_FT_NF_E_BIT; + } + } else { + if (TTE_IS_NFO(env->dtlb[i].tte)) { + do_fault = 1; + sfsr |= SFSR_FT_NFO_BIT; + } + } + + if (do_fault) { + /* faults above are reported with TT_DFAULT. */ + env->exception_index = TT_DFAULT; + } else if (!TTE_IS_W_OK(env->dtlb[i].tte) && (rw == 1)) { + do_fault = 1; + env->exception_index = TT_DPROT; + + trace_mmu_helper_dprot(address, context, mmu_idx, env->tl); + } + + if (!do_fault) { + *prot = PAGE_READ; + if (TTE_IS_W_OK(env->dtlb[i].tte)) { + *prot |= PAGE_WRITE; + } + + TTE_SET_USED(env->dtlb[i].tte); + + return 0; + } + + if (env->dmmu.sfsr & SFSR_VALID_BIT) { /* Fault status register */ + sfsr |= SFSR_OW_BIT; /* overflow (not read before + another fault) */ + } + + if (env->pstate & PS_PRIV) { + sfsr |= SFSR_PR_BIT; + } + + /* FIXME: ASI field in SFSR must be set */ + env->dmmu.sfsr = sfsr | SFSR_VALID_BIT; + + env->dmmu.sfar = address; /* Fault address register */ + + env->dmmu.tag_access = (address & ~0x1fffULL) | context; + + return 1; + } + } + + trace_mmu_helper_dmiss(address, context); + + /* + * On MMU misses: + * - UltraSPARC IIi: SFSR and SFAR unmodified + * - JPS1: SFAR updated and some fields of SFSR updated + */ + env->dmmu.tag_access = (address & ~0x1fffULL) | context; + env->exception_index = TT_DMISS; + return 1; +} + +static int get_physical_address_code(CPUState *env, + target_phys_addr_t *physical, int *prot, + target_ulong address, int mmu_idx) +{ + unsigned int i; + uint64_t context; + + int is_user = (mmu_idx == MMU_USER_IDX || + mmu_idx == MMU_USER_SECONDARY_IDX); + + if ((env->lsu & IMMU_E) == 0 || (env->pstate & PS_RED) != 0) { + /* IMMU disabled */ + *physical = ultrasparc_truncate_physical(address); + *prot = PAGE_EXEC; + return 0; + } + + if (env->tl == 0) { + /* PRIMARY context */ + context = env->dmmu.mmu_primary_context & 0x1fff; + } else { + /* NUCLEUS context */ + context = 0; + } + + for (i = 0; i < 64; i++) { + /* ctx match, vaddr match, valid? */ + if (ultrasparc_tag_match(&env->itlb[i], + address, context, physical)) { + /* access ok? */ + if (TTE_IS_PRIV(env->itlb[i].tte) && is_user) { + /* Fault status register */ + if (env->immu.sfsr & SFSR_VALID_BIT) { + env->immu.sfsr = SFSR_OW_BIT; /* overflow (not read before + another fault) */ + } else { + env->immu.sfsr = 0; + } + if (env->pstate & PS_PRIV) { + env->immu.sfsr |= SFSR_PR_BIT; + } + if (env->tl > 0) { + env->immu.sfsr |= SFSR_CT_NUCLEUS; + } + + /* FIXME: ASI field in SFSR must be set */ + env->immu.sfsr |= SFSR_FT_PRIV_BIT | SFSR_VALID_BIT; + env->exception_index = TT_TFAULT; + + env->immu.tag_access = (address & ~0x1fffULL) | context; + + trace_mmu_helper_tfault(address, context); + + return 1; + } + *prot = PAGE_EXEC; + TTE_SET_USED(env->itlb[i].tte); + return 0; + } + } + + trace_mmu_helper_tmiss(address, context); + + /* Context is stored in DMMU (dmmuregs[1]) also for IMMU */ + env->immu.tag_access = (address & ~0x1fffULL) | context; + env->exception_index = TT_TMISS; + return 1; +} + +static int get_physical_address(CPUState *env, target_phys_addr_t *physical, + int *prot, int *access_index, + target_ulong address, int rw, int mmu_idx, + target_ulong *page_size) +{ + /* ??? We treat everything as a small page, then explicitly flush + everything when an entry is evicted. */ + *page_size = TARGET_PAGE_SIZE; + + /* safety net to catch wrong softmmu index use from dynamic code */ + if (env->tl > 0 && mmu_idx != MMU_NUCLEUS_IDX) { + if (rw == 2) { + trace_mmu_helper_get_phys_addr_code(env->tl, mmu_idx, + env->dmmu.mmu_primary_context, + env->dmmu.mmu_secondary_context, + address); + } else { + trace_mmu_helper_get_phys_addr_data(env->tl, mmu_idx, + env->dmmu.mmu_primary_context, + env->dmmu.mmu_secondary_context, + address); + } + } + + if (rw == 2) { + return get_physical_address_code(env, physical, prot, address, + mmu_idx); + } else { + return get_physical_address_data(env, physical, prot, address, rw, + mmu_idx); + } +} + +/* Perform address translation */ +int cpu_sparc_handle_mmu_fault(CPUState *env, target_ulong address, int rw, + int mmu_idx) +{ + target_ulong virt_addr, vaddr; + target_phys_addr_t paddr; + target_ulong page_size; + int error_code = 0, prot, access_index; + + error_code = get_physical_address(env, &paddr, &prot, &access_index, + address, rw, mmu_idx, &page_size); + if (error_code == 0) { + virt_addr = address & TARGET_PAGE_MASK; + vaddr = virt_addr + ((address & TARGET_PAGE_MASK) & + (TARGET_PAGE_SIZE - 1)); + + trace_mmu_helper_mmu_fault(address, paddr, mmu_idx, env->tl, + env->dmmu.mmu_primary_context, + env->dmmu.mmu_secondary_context); + + tlb_set_page(env, vaddr, paddr, prot, mmu_idx, page_size); + return 0; + } + /* XXX */ + return 1; +} + +void dump_mmu(FILE *f, fprintf_function cpu_fprintf, CPUState *env) +{ + unsigned int i; + const char *mask; + + (*cpu_fprintf)(f, "MMU contexts: Primary: %" PRId64 ", Secondary: %" + PRId64 "\n", + env->dmmu.mmu_primary_context, + env->dmmu.mmu_secondary_context); + if ((env->lsu & DMMU_E) == 0) { + (*cpu_fprintf)(f, "DMMU disabled\n"); + } else { + (*cpu_fprintf)(f, "DMMU dump\n"); + for (i = 0; i < 64; i++) { + switch (TTE_PGSIZE(env->dtlb[i].tte)) { + default: + case 0x0: + mask = " 8k"; + break; + case 0x1: + mask = " 64k"; + break; + case 0x2: + mask = "512k"; + break; + case 0x3: + mask = " 4M"; + break; + } + if (TTE_IS_VALID(env->dtlb[i].tte)) { + (*cpu_fprintf)(f, "[%02u] VA: %" PRIx64 ", PA: %llx" + ", %s, %s, %s, %s, ctx %" PRId64 " %s\n", + i, + env->dtlb[i].tag & (uint64_t)~0x1fffULL, + TTE_PA(env->dtlb[i].tte), + mask, + TTE_IS_PRIV(env->dtlb[i].tte) ? "priv" : "user", + TTE_IS_W_OK(env->dtlb[i].tte) ? "RW" : "RO", + TTE_IS_LOCKED(env->dtlb[i].tte) ? + "locked" : "unlocked", + env->dtlb[i].tag & (uint64_t)0x1fffULL, + TTE_IS_GLOBAL(env->dtlb[i].tte) ? + "global" : "local"); + } + } + } + if ((env->lsu & IMMU_E) == 0) { + (*cpu_fprintf)(f, "IMMU disabled\n"); + } else { + (*cpu_fprintf)(f, "IMMU dump\n"); + for (i = 0; i < 64; i++) { + switch (TTE_PGSIZE(env->itlb[i].tte)) { + default: + case 0x0: + mask = " 8k"; + break; + case 0x1: + mask = " 64k"; + break; + case 0x2: + mask = "512k"; + break; + case 0x3: + mask = " 4M"; + break; + } + if (TTE_IS_VALID(env->itlb[i].tte)) { + (*cpu_fprintf)(f, "[%02u] VA: %" PRIx64 ", PA: %llx" + ", %s, %s, %s, ctx %" PRId64 " %s\n", + i, + env->itlb[i].tag & (uint64_t)~0x1fffULL, + TTE_PA(env->itlb[i].tte), + mask, + TTE_IS_PRIV(env->itlb[i].tte) ? "priv" : "user", + TTE_IS_LOCKED(env->itlb[i].tte) ? + "locked" : "unlocked", + env->itlb[i].tag & (uint64_t)0x1fffULL, + TTE_IS_GLOBAL(env->itlb[i].tte) ? + "global" : "local"); + } + } + } +} + +#endif /* TARGET_SPARC64 */ + +static int cpu_sparc_get_phys_page(CPUState *env, target_phys_addr_t *phys, + target_ulong addr, int rw, int mmu_idx) +{ + target_ulong page_size; + int prot, access_index; + + return get_physical_address(env, phys, &prot, &access_index, addr, rw, + mmu_idx, &page_size); +} + +#if defined(TARGET_SPARC64) +target_phys_addr_t cpu_get_phys_page_nofault(CPUState *env, target_ulong addr, + int mmu_idx) +{ + target_phys_addr_t phys_addr; + + if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 4, mmu_idx) != 0) { + return -1; + } + return phys_addr; +} +#endif + +target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr) +{ + target_phys_addr_t phys_addr; + int mmu_idx = cpu_mmu_index(env); + + if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 2, mmu_idx) != 0) { + if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 0, mmu_idx) != 0) { + return -1; + } + } + if (cpu_get_physical_page_desc(phys_addr) == IO_MEM_UNASSIGNED) { + return -1; + } + return phys_addr; +} +#endif diff --git a/target-sparc/op_helper.c b/target-sparc/op_helper.c index cb0bf2e2a6..02b660ddf9 100644 --- a/target-sparc/op_helper.c +++ b/target-sparc/op_helper.c @@ -3,2490 +3,6 @@ #include "helper.h" #if !defined(CONFIG_USER_ONLY) -#include "softmmu_exec.h" -#endif - -//#define DEBUG_MMU -//#define DEBUG_MXCC -//#define DEBUG_UNALIGNED -//#define DEBUG_UNASSIGNED -//#define DEBUG_ASI -//#define DEBUG_PSTATE -//#define DEBUG_CACHE_CONTROL - -#ifdef DEBUG_MMU -#define DPRINTF_MMU(fmt, ...) \ - do { printf("MMU: " fmt , ## __VA_ARGS__); } while (0) -#else -#define DPRINTF_MMU(fmt, ...) do {} while (0) -#endif - -#ifdef DEBUG_MXCC -#define DPRINTF_MXCC(fmt, ...) \ - do { printf("MXCC: " fmt , ## __VA_ARGS__); } while (0) -#else -#define DPRINTF_MXCC(fmt, ...) do {} while (0) -#endif - -#ifdef DEBUG_ASI -#define DPRINTF_ASI(fmt, ...) \ - do { printf("ASI: " fmt , ## __VA_ARGS__); } while (0) -#endif - -#ifdef DEBUG_PSTATE -#define DPRINTF_PSTATE(fmt, ...) \ - do { printf("PSTATE: " fmt , ## __VA_ARGS__); } while (0) -#else -#define DPRINTF_PSTATE(fmt, ...) do {} while (0) -#endif - -#ifdef DEBUG_CACHE_CONTROL -#define DPRINTF_CACHE_CONTROL(fmt, ...) \ - do { printf("CACHE_CONTROL: " fmt , ## __VA_ARGS__); } while (0) -#else -#define DPRINTF_CACHE_CONTROL(fmt, ...) do {} while (0) -#endif - -#ifdef TARGET_SPARC64 -#ifndef TARGET_ABI32 -#define AM_CHECK(env1) ((env1)->pstate & PS_AM) -#else -#define AM_CHECK(env1) (1) -#endif -#endif - -#define DT0 (env->dt0) -#define DT1 (env->dt1) -#define QT0 (env->qt0) -#define QT1 (env->qt1) - -/* Leon3 cache control */ - -/* Cache control: emulate the behavior of cache control registers but without - any effect on the emulated */ - -#define CACHE_STATE_MASK 0x3 -#define CACHE_DISABLED 0x0 -#define CACHE_FROZEN 0x1 -#define CACHE_ENABLED 0x3 - -/* Cache Control register fields */ - -#define CACHE_CTRL_IF (1 << 4) /* Instruction Cache Freeze on Interrupt */ -#define CACHE_CTRL_DF (1 << 5) /* Data Cache Freeze on Interrupt */ -#define CACHE_CTRL_DP (1 << 14) /* Data cache flush pending */ -#define CACHE_CTRL_IP (1 << 15) /* Instruction cache flush pending */ -#define CACHE_CTRL_IB (1 << 16) /* Instruction burst fetch */ -#define CACHE_CTRL_FI (1 << 21) /* Flush Instruction cache (Write only) */ -#define CACHE_CTRL_FD (1 << 22) /* Flush Data cache (Write only) */ -#define CACHE_CTRL_DS (1 << 23) /* Data cache snoop enable */ - -#if !defined(CONFIG_USER_ONLY) -static void do_unassigned_access(target_phys_addr_t addr, int is_write, - int is_exec, int is_asi, int size); -#else -#ifdef TARGET_SPARC64 -static void do_unassigned_access(target_ulong addr, int is_write, int is_exec, - int is_asi, int size); -#endif -#endif - -#if defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY) -/* Calculates TSB pointer value for fault page size 8k or 64k */ -static uint64_t ultrasparc_tsb_pointer(uint64_t tsb_register, - uint64_t tag_access_register, - int page_size) -{ - uint64_t tsb_base = tsb_register & ~0x1fffULL; - int tsb_split = (tsb_register & 0x1000ULL) ? 1 : 0; - int tsb_size = tsb_register & 0xf; - - /* discard lower 13 bits which hold tag access context */ - uint64_t tag_access_va = tag_access_register & ~0x1fffULL; - - /* now reorder bits */ - uint64_t tsb_base_mask = ~0x1fffULL; - uint64_t va = tag_access_va; - - /* move va bits to correct position */ - if (page_size == 8*1024) { - va >>= 9; - } else if (page_size == 64*1024) { - va >>= 12; - } - - if (tsb_size) { - tsb_base_mask <<= tsb_size; - } - - /* calculate tsb_base mask and adjust va if split is in use */ - if (tsb_split) { - if (page_size == 8*1024) { - va &= ~(1ULL << (13 + tsb_size)); - } else if (page_size == 64*1024) { - va |= (1ULL << (13 + tsb_size)); - } - tsb_base_mask <<= 1; - } - - return ((tsb_base & tsb_base_mask) | (va & ~tsb_base_mask)) & ~0xfULL; -} - -/* Calculates tag target register value by reordering bits - in tag access register */ -static uint64_t ultrasparc_tag_target(uint64_t tag_access_register) -{ - return ((tag_access_register & 0x1fff) << 48) | (tag_access_register >> 22); -} - -static void replace_tlb_entry(SparcTLBEntry *tlb, - uint64_t tlb_tag, uint64_t tlb_tte, - CPUState *env1) -{ - target_ulong mask, size, va, offset; - - /* flush page range if translation is valid */ - if (TTE_IS_VALID(tlb->tte)) { - - mask = 0xffffffffffffe000ULL; - mask <<= 3 * ((tlb->tte >> 61) & 3); - size = ~mask + 1; - - va = tlb->tag & mask; - - for (offset = 0; offset < size; offset += TARGET_PAGE_SIZE) { - tlb_flush_page(env1, va + offset); - } - } - - tlb->tag = tlb_tag; - tlb->tte = tlb_tte; -} - -static void demap_tlb(SparcTLBEntry *tlb, target_ulong demap_addr, - const char *strmmu, CPUState *env1) -{ - unsigned int i; - target_ulong mask; - uint64_t context; - - int is_demap_context = (demap_addr >> 6) & 1; - - /* demap context */ - switch ((demap_addr >> 4) & 3) { - case 0: /* primary */ - context = env1->dmmu.mmu_primary_context; - break; - case 1: /* secondary */ - context = env1->dmmu.mmu_secondary_context; - break; - case 2: /* nucleus */ - context = 0; - break; - case 3: /* reserved */ - default: - return; - } - - for (i = 0; i < 64; i++) { - if (TTE_IS_VALID(tlb[i].tte)) { - - if (is_demap_context) { - /* will remove non-global entries matching context value */ - if (TTE_IS_GLOBAL(tlb[i].tte) || - !tlb_compare_context(&tlb[i], context)) { - continue; - } - } else { - /* demap page - will remove any entry matching VA */ - mask = 0xffffffffffffe000ULL; - mask <<= 3 * ((tlb[i].tte >> 61) & 3); - - if (!compare_masked(demap_addr, tlb[i].tag, mask)) { - continue; - } - - /* entry should be global or matching context value */ - if (!TTE_IS_GLOBAL(tlb[i].tte) && - !tlb_compare_context(&tlb[i], context)) { - continue; - } - } - - replace_tlb_entry(&tlb[i], 0, 0, env1); -#ifdef DEBUG_MMU - DPRINTF_MMU("%s demap invalidated entry [%02u]\n", strmmu, i); - dump_mmu(stdout, fprintf, env1); -#endif - } - } -} - -static void replace_tlb_1bit_lru(SparcTLBEntry *tlb, - uint64_t tlb_tag, uint64_t tlb_tte, - const char *strmmu, CPUState *env1) -{ - unsigned int i, replace_used; - - /* Try replacing invalid entry */ - for (i = 0; i < 64; i++) { - if (!TTE_IS_VALID(tlb[i].tte)) { - replace_tlb_entry(&tlb[i], tlb_tag, tlb_tte, env1); -#ifdef DEBUG_MMU - DPRINTF_MMU("%s lru replaced invalid entry [%i]\n", strmmu, i); - dump_mmu(stdout, fprintf, env1); -#endif - return; - } - } - - /* All entries are valid, try replacing unlocked entry */ - - for (replace_used = 0; replace_used < 2; ++replace_used) { - - /* Used entries are not replaced on first pass */ - - for (i = 0; i < 64; i++) { - if (!TTE_IS_LOCKED(tlb[i].tte) && !TTE_IS_USED(tlb[i].tte)) { - - replace_tlb_entry(&tlb[i], tlb_tag, tlb_tte, env1); -#ifdef DEBUG_MMU - DPRINTF_MMU("%s lru replaced unlocked %s entry [%i]\n", - strmmu, (replace_used ? "used" : "unused"), i); - dump_mmu(stdout, fprintf, env1); -#endif - return; - } - } - - /* Now reset used bit and search for unused entries again */ - - for (i = 0; i < 64; i++) { - TTE_SET_UNUSED(tlb[i].tte); - } - } - -#ifdef DEBUG_MMU - DPRINTF_MMU("%s lru replacement failed: no entries available\n", strmmu); -#endif - /* error state? */ -} - -#endif - -static inline target_ulong address_mask(CPUState *env1, target_ulong addr) -{ -#ifdef TARGET_SPARC64 - if (AM_CHECK(env1)) { - addr &= 0xffffffffULL; - } -#endif - return addr; -} - -/* returns true if access using this ASI is to have address translated by MMU - otherwise access is to raw physical address */ -static inline int is_translating_asi(int asi) -{ -#ifdef TARGET_SPARC64 - /* Ultrasparc IIi translating asi - - note this list is defined by cpu implementation - */ - switch (asi) { - case 0x04 ... 0x11: - case 0x16 ... 0x19: - case 0x1E ... 0x1F: - case 0x24 ... 0x2C: - case 0x70 ... 0x73: - case 0x78 ... 0x79: - case 0x80 ... 0xFF: - return 1; - - default: - return 0; - } -#else - /* TODO: check sparc32 bits */ - return 0; -#endif -} - -static inline target_ulong asi_address_mask(CPUState *env1, - int asi, target_ulong addr) -{ - if (is_translating_asi(asi)) { - return address_mask(env, addr); - } else { - return addr; - } -} - -void helper_check_align(target_ulong addr, uint32_t align) -{ - if (addr & align) { -#ifdef DEBUG_UNALIGNED - printf("Unaligned access to 0x" TARGET_FMT_lx " from 0x" TARGET_FMT_lx - "\n", addr, env->pc); -#endif - helper_raise_exception(env, TT_UNALIGNED); - } -} - -#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY) && \ - defined(DEBUG_MXCC) -static void dump_mxcc(CPUState *env) -{ - printf("mxccdata: %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64 - "\n", - env->mxccdata[0], env->mxccdata[1], - env->mxccdata[2], env->mxccdata[3]); - printf("mxccregs: %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64 - "\n" - " %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64 - "\n", - env->mxccregs[0], env->mxccregs[1], - env->mxccregs[2], env->mxccregs[3], - env->mxccregs[4], env->mxccregs[5], - env->mxccregs[6], env->mxccregs[7]); -} -#endif - -#if (defined(TARGET_SPARC64) || !defined(CONFIG_USER_ONLY)) \ - && defined(DEBUG_ASI) -static void dump_asi(const char *txt, target_ulong addr, int asi, int size, - uint64_t r1) -{ - switch (size) { - case 1: - DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %02" PRIx64 "\n", txt, - addr, asi, r1 & 0xff); - break; - case 2: - DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %04" PRIx64 "\n", txt, - addr, asi, r1 & 0xffff); - break; - case 4: - DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %08" PRIx64 "\n", txt, - addr, asi, r1 & 0xffffffff); - break; - case 8: - DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %016" PRIx64 "\n", txt, - addr, asi, r1); - break; - } -} -#endif - -#ifndef TARGET_SPARC64 -#ifndef CONFIG_USER_ONLY - - -/* Leon3 cache control */ - -static void leon3_cache_control_int(void) -{ - uint32_t state = 0; - - if (env->cache_control & CACHE_CTRL_IF) { - /* Instruction cache state */ - state = env->cache_control & CACHE_STATE_MASK; - if (state == CACHE_ENABLED) { - state = CACHE_FROZEN; - DPRINTF_CACHE_CONTROL("Instruction cache: freeze\n"); - } - - env->cache_control &= ~CACHE_STATE_MASK; - env->cache_control |= state; - } - - if (env->cache_control & CACHE_CTRL_DF) { - /* Data cache state */ - state = (env->cache_control >> 2) & CACHE_STATE_MASK; - if (state == CACHE_ENABLED) { - state = CACHE_FROZEN; - DPRINTF_CACHE_CONTROL("Data cache: freeze\n"); - } - - env->cache_control &= ~(CACHE_STATE_MASK << 2); - env->cache_control |= (state << 2); - } -} - -static void leon3_cache_control_st(target_ulong addr, uint64_t val, int size) -{ - DPRINTF_CACHE_CONTROL("st addr:%08x, val:%" PRIx64 ", size:%d\n", - addr, val, size); - - if (size != 4) { - DPRINTF_CACHE_CONTROL("32bits only\n"); - return; - } - - switch (addr) { - case 0x00: /* Cache control */ - - /* These values must always be read as zeros */ - val &= ~CACHE_CTRL_FD; - val &= ~CACHE_CTRL_FI; - val &= ~CACHE_CTRL_IB; - val &= ~CACHE_CTRL_IP; - val &= ~CACHE_CTRL_DP; - - env->cache_control = val; - break; - case 0x04: /* Instruction cache configuration */ - case 0x08: /* Data cache configuration */ - /* Read Only */ - break; - default: - DPRINTF_CACHE_CONTROL("write unknown register %08x\n", addr); - break; - }; -} - -static uint64_t leon3_cache_control_ld(target_ulong addr, int size) -{ - uint64_t ret = 0; - - if (size != 4) { - DPRINTF_CACHE_CONTROL("32bits only\n"); - return 0; - } - - switch (addr) { - case 0x00: /* Cache control */ - ret = env->cache_control; - break; - - /* Configuration registers are read and only always keep those - predefined values */ - - case 0x04: /* Instruction cache configuration */ - ret = 0x10220000; - break; - case 0x08: /* Data cache configuration */ - ret = 0x18220000; - break; - default: - DPRINTF_CACHE_CONTROL("read unknown register %08x\n", addr); - break; - }; - DPRINTF_CACHE_CONTROL("ld addr:%08x, ret:0x%" PRIx64 ", size:%d\n", - addr, ret, size); - return ret; -} - -void leon3_irq_manager(void *irq_manager, int intno) -{ - leon3_irq_ack(irq_manager, intno); - leon3_cache_control_int(); -} - -uint64_t helper_ld_asi(target_ulong addr, int asi, int size, int sign) -{ - uint64_t ret = 0; -#if defined(DEBUG_MXCC) || defined(DEBUG_ASI) - uint32_t last_addr = addr; -#endif - - helper_check_align(addr, size - 1); - switch (asi) { - case 2: /* SuperSparc MXCC registers and Leon3 cache control */ - switch (addr) { - case 0x00: /* Leon3 Cache Control */ - case 0x08: /* Leon3 Instruction Cache config */ - case 0x0C: /* Leon3 Date Cache config */ - if (env->def->features & CPU_FEATURE_CACHE_CTRL) { - ret = leon3_cache_control_ld(addr, size); - } - break; - case 0x01c00a00: /* MXCC control register */ - if (size == 8) { - ret = env->mxccregs[3]; - } else { - DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr, - size); - } - break; - case 0x01c00a04: /* MXCC control register */ - if (size == 4) { - ret = env->mxccregs[3]; - } else { - DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr, - size); - } - break; - case 0x01c00c00: /* Module reset register */ - if (size == 8) { - ret = env->mxccregs[5]; - /* should we do something here? */ - } else { - DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr, - size); - } - break; - case 0x01c00f00: /* MBus port address register */ - if (size == 8) { - ret = env->mxccregs[7]; - } else { - DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr, - size); - } - break; - default: - DPRINTF_MXCC("%08x: unimplemented address, size: %d\n", addr, - size); - break; - } - DPRINTF_MXCC("asi = %d, size = %d, sign = %d, " - "addr = %08x -> ret = %" PRIx64 "," - "addr = %08x\n", asi, size, sign, last_addr, ret, addr); -#ifdef DEBUG_MXCC - dump_mxcc(env); -#endif - break; - case 3: /* MMU probe */ - { - int mmulev; - - mmulev = (addr >> 8) & 15; - if (mmulev > 4) { - ret = 0; - } else { - ret = mmu_probe(env, addr, mmulev); - } - DPRINTF_MMU("mmu_probe: 0x%08x (lev %d) -> 0x%08" PRIx64 "\n", - addr, mmulev, ret); - } - break; - case 4: /* read MMU regs */ - { - int reg = (addr >> 8) & 0x1f; - - ret = env->mmuregs[reg]; - if (reg == 3) { /* Fault status cleared on read */ - env->mmuregs[3] = 0; - } else if (reg == 0x13) { /* Fault status read */ - ret = env->mmuregs[3]; - } else if (reg == 0x14) { /* Fault address read */ - ret = env->mmuregs[4]; - } - DPRINTF_MMU("mmu_read: reg[%d] = 0x%08" PRIx64 "\n", reg, ret); - } - break; - case 5: /* Turbosparc ITLB Diagnostic */ - case 6: /* Turbosparc DTLB Diagnostic */ - case 7: /* Turbosparc IOTLB Diagnostic */ - break; - case 9: /* Supervisor code access */ - switch (size) { - case 1: - ret = ldub_code(addr); - break; - case 2: - ret = lduw_code(addr); - break; - default: - case 4: - ret = ldl_code(addr); - break; - case 8: - ret = ldq_code(addr); - break; - } - break; - case 0xa: /* User data access */ - switch (size) { - case 1: - ret = ldub_user(addr); - break; - case 2: - ret = lduw_user(addr); - break; - default: - case 4: - ret = ldl_user(addr); - break; - case 8: - ret = ldq_user(addr); - break; - } - break; - case 0xb: /* Supervisor data access */ - switch (size) { - case 1: - ret = ldub_kernel(addr); - break; - case 2: - ret = lduw_kernel(addr); - break; - default: - case 4: - ret = ldl_kernel(addr); - break; - case 8: - ret = ldq_kernel(addr); - break; - } - break; - case 0xc: /* I-cache tag */ - case 0xd: /* I-cache data */ - case 0xe: /* D-cache tag */ - case 0xf: /* D-cache data */ - break; - case 0x20: /* MMU passthrough */ - switch (size) { - case 1: - ret = ldub_phys(addr); - break; - case 2: - ret = lduw_phys(addr); - break; - default: - case 4: - ret = ldl_phys(addr); - break; - case 8: - ret = ldq_phys(addr); - break; - } - break; - case 0x21 ... 0x2f: /* MMU passthrough, 0x100000000 to 0xfffffffff */ - switch (size) { - case 1: - ret = ldub_phys((target_phys_addr_t)addr - | ((target_phys_addr_t)(asi & 0xf) << 32)); - break; - case 2: - ret = lduw_phys((target_phys_addr_t)addr - | ((target_phys_addr_t)(asi & 0xf) << 32)); - break; - default: - case 4: - ret = ldl_phys((target_phys_addr_t)addr - | ((target_phys_addr_t)(asi & 0xf) << 32)); - break; - case 8: - ret = ldq_phys((target_phys_addr_t)addr - | ((target_phys_addr_t)(asi & 0xf) << 32)); - break; - } - break; - case 0x30: /* Turbosparc secondary cache diagnostic */ - case 0x31: /* Turbosparc RAM snoop */ - case 0x32: /* Turbosparc page table descriptor diagnostic */ - case 0x39: /* data cache diagnostic register */ - ret = 0; - break; - case 0x38: /* SuperSPARC MMU Breakpoint Control Registers */ - { - int reg = (addr >> 8) & 3; - - switch (reg) { - case 0: /* Breakpoint Value (Addr) */ - ret = env->mmubpregs[reg]; - break; - case 1: /* Breakpoint Mask */ - ret = env->mmubpregs[reg]; - break; - case 2: /* Breakpoint Control */ - ret = env->mmubpregs[reg]; - break; - case 3: /* Breakpoint Status */ - ret = env->mmubpregs[reg]; - env->mmubpregs[reg] = 0ULL; - break; - } - DPRINTF_MMU("read breakpoint reg[%d] 0x%016" PRIx64 "\n", reg, - ret); - } - break; - case 0x49: /* SuperSPARC MMU Counter Breakpoint Value */ - ret = env->mmubpctrv; - break; - case 0x4a: /* SuperSPARC MMU Counter Breakpoint Control */ - ret = env->mmubpctrc; - break; - case 0x4b: /* SuperSPARC MMU Counter Breakpoint Status */ - ret = env->mmubpctrs; - break; - case 0x4c: /* SuperSPARC MMU Breakpoint Action */ - ret = env->mmubpaction; - break; - case 8: /* User code access, XXX */ - default: - do_unassigned_access(addr, 0, 0, asi, size); - ret = 0; - break; - } - if (sign) { - switch (size) { - case 1: - ret = (int8_t) ret; - break; - case 2: - ret = (int16_t) ret; - break; - case 4: - ret = (int32_t) ret; - break; - default: - break; - } - } -#ifdef DEBUG_ASI - dump_asi("read ", last_addr, asi, size, ret); -#endif - return ret; -} - -void helper_st_asi(target_ulong addr, uint64_t val, int asi, int size) -{ - helper_check_align(addr, size - 1); - switch (asi) { - case 2: /* SuperSparc MXCC registers and Leon3 cache control */ - switch (addr) { - case 0x00: /* Leon3 Cache Control */ - case 0x08: /* Leon3 Instruction Cache config */ - case 0x0C: /* Leon3 Date Cache config */ - if (env->def->features & CPU_FEATURE_CACHE_CTRL) { - leon3_cache_control_st(addr, val, size); - } - break; - - case 0x01c00000: /* MXCC stream data register 0 */ - if (size == 8) { - env->mxccdata[0] = val; - } else { - DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr, - size); - } - break; - case 0x01c00008: /* MXCC stream data register 1 */ - if (size == 8) { - env->mxccdata[1] = val; - } else { - DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr, - size); - } - break; - case 0x01c00010: /* MXCC stream data register 2 */ - if (size == 8) { - env->mxccdata[2] = val; - } else { - DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr, - size); - } - break; - case 0x01c00018: /* MXCC stream data register 3 */ - if (size == 8) { - env->mxccdata[3] = val; - } else { - DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr, - size); - } - break; - case 0x01c00100: /* MXCC stream source */ - if (size == 8) { - env->mxccregs[0] = val; - } else { - DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr, - size); - } - env->mxccdata[0] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) + - 0); - env->mxccdata[1] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) + - 8); - env->mxccdata[2] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) + - 16); - env->mxccdata[3] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) + - 24); - break; - case 0x01c00200: /* MXCC stream destination */ - if (size == 8) { - env->mxccregs[1] = val; - } else { - DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr, - size); - } - stq_phys((env->mxccregs[1] & 0xffffffffULL) + 0, - env->mxccdata[0]); - stq_phys((env->mxccregs[1] & 0xffffffffULL) + 8, - env->mxccdata[1]); - stq_phys((env->mxccregs[1] & 0xffffffffULL) + 16, - env->mxccdata[2]); - stq_phys((env->mxccregs[1] & 0xffffffffULL) + 24, - env->mxccdata[3]); - break; - case 0x01c00a00: /* MXCC control register */ - if (size == 8) { - env->mxccregs[3] = val; - } else { - DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr, - size); - } - break; - case 0x01c00a04: /* MXCC control register */ - if (size == 4) { - env->mxccregs[3] = (env->mxccregs[3] & 0xffffffff00000000ULL) - | val; - } else { - DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr, - size); - } - break; - case 0x01c00e00: /* MXCC error register */ - /* writing a 1 bit clears the error */ - if (size == 8) { - env->mxccregs[6] &= ~val; - } else { - DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr, - size); - } - break; - case 0x01c00f00: /* MBus port address register */ - if (size == 8) { - env->mxccregs[7] = val; - } else { - DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr, - size); - } - break; - default: - DPRINTF_MXCC("%08x: unimplemented address, size: %d\n", addr, - size); - break; - } - DPRINTF_MXCC("asi = %d, size = %d, addr = %08x, val = %" PRIx64 "\n", - asi, size, addr, val); -#ifdef DEBUG_MXCC - dump_mxcc(env); -#endif - break; - case 3: /* MMU flush */ - { - int mmulev; - - mmulev = (addr >> 8) & 15; - DPRINTF_MMU("mmu flush level %d\n", mmulev); - switch (mmulev) { - case 0: /* flush page */ - tlb_flush_page(env, addr & 0xfffff000); - break; - case 1: /* flush segment (256k) */ - case 2: /* flush region (16M) */ - case 3: /* flush context (4G) */ - case 4: /* flush entire */ - tlb_flush(env, 1); - break; - default: - break; - } -#ifdef DEBUG_MMU - dump_mmu(stdout, fprintf, env); -#endif - } - break; - case 4: /* write MMU regs */ - { - int reg = (addr >> 8) & 0x1f; - uint32_t oldreg; - - oldreg = env->mmuregs[reg]; - switch (reg) { - case 0: /* Control Register */ - env->mmuregs[reg] = (env->mmuregs[reg] & 0xff000000) | - (val & 0x00ffffff); - /* Mappings generated during no-fault mode or MMU - disabled mode are invalid in normal mode */ - if ((oldreg & (MMU_E | MMU_NF | env->def->mmu_bm)) != - (env->mmuregs[reg] & (MMU_E | MMU_NF | env->def->mmu_bm))) { - tlb_flush(env, 1); - } - break; - case 1: /* Context Table Pointer Register */ - env->mmuregs[reg] = val & env->def->mmu_ctpr_mask; - break; - case 2: /* Context Register */ - env->mmuregs[reg] = val & env->def->mmu_cxr_mask; - if (oldreg != env->mmuregs[reg]) { - /* we flush when the MMU context changes because - QEMU has no MMU context support */ - tlb_flush(env, 1); - } - break; - case 3: /* Synchronous Fault Status Register with Clear */ - case 4: /* Synchronous Fault Address Register */ - break; - case 0x10: /* TLB Replacement Control Register */ - env->mmuregs[reg] = val & env->def->mmu_trcr_mask; - break; - case 0x13: /* Synchronous Fault Status Register with Read - and Clear */ - env->mmuregs[3] = val & env->def->mmu_sfsr_mask; - break; - case 0x14: /* Synchronous Fault Address Register */ - env->mmuregs[4] = val; - break; - default: - env->mmuregs[reg] = val; - break; - } - if (oldreg != env->mmuregs[reg]) { - DPRINTF_MMU("mmu change reg[%d]: 0x%08x -> 0x%08x\n", - reg, oldreg, env->mmuregs[reg]); - } -#ifdef DEBUG_MMU - dump_mmu(stdout, fprintf, env); -#endif - } - break; - case 5: /* Turbosparc ITLB Diagnostic */ - case 6: /* Turbosparc DTLB Diagnostic */ - case 7: /* Turbosparc IOTLB Diagnostic */ - break; - case 0xa: /* User data access */ - switch (size) { - case 1: - stb_user(addr, val); - break; - case 2: - stw_user(addr, val); - break; - default: - case 4: - stl_user(addr, val); - break; - case 8: - stq_user(addr, val); - break; - } - break; - case 0xb: /* Supervisor data access */ - switch (size) { - case 1: - stb_kernel(addr, val); - break; - case 2: - stw_kernel(addr, val); - break; - default: - case 4: - stl_kernel(addr, val); - break; - case 8: - stq_kernel(addr, val); - break; - } - break; - case 0xc: /* I-cache tag */ - case 0xd: /* I-cache data */ - case 0xe: /* D-cache tag */ - case 0xf: /* D-cache data */ - case 0x10: /* I/D-cache flush page */ - case 0x11: /* I/D-cache flush segment */ - case 0x12: /* I/D-cache flush region */ - case 0x13: /* I/D-cache flush context */ - case 0x14: /* I/D-cache flush user */ - break; - case 0x17: /* Block copy, sta access */ - { - /* val = src - addr = dst - copy 32 bytes */ - unsigned int i; - uint32_t src = val & ~3, dst = addr & ~3, temp; - - for (i = 0; i < 32; i += 4, src += 4, dst += 4) { - temp = ldl_kernel(src); - stl_kernel(dst, temp); - } - } - break; - case 0x1f: /* Block fill, stda access */ - { - /* addr = dst - fill 32 bytes with val */ - unsigned int i; - uint32_t dst = addr & 7; - - for (i = 0; i < 32; i += 8, dst += 8) { - stq_kernel(dst, val); - } - } - break; - case 0x20: /* MMU passthrough */ - { - switch (size) { - case 1: - stb_phys(addr, val); - break; - case 2: - stw_phys(addr, val); - break; - case 4: - default: - stl_phys(addr, val); - break; - case 8: - stq_phys(addr, val); - break; - } - } - break; - case 0x21 ... 0x2f: /* MMU passthrough, 0x100000000 to 0xfffffffff */ - { - switch (size) { - case 1: - stb_phys((target_phys_addr_t)addr - | ((target_phys_addr_t)(asi & 0xf) << 32), val); - break; - case 2: - stw_phys((target_phys_addr_t)addr - | ((target_phys_addr_t)(asi & 0xf) << 32), val); - break; - case 4: - default: - stl_phys((target_phys_addr_t)addr - | ((target_phys_addr_t)(asi & 0xf) << 32), val); - break; - case 8: - stq_phys((target_phys_addr_t)addr - | ((target_phys_addr_t)(asi & 0xf) << 32), val); - break; - } - } - break; - case 0x30: /* store buffer tags or Turbosparc secondary cache diagnostic */ - case 0x31: /* store buffer data, Ross RT620 I-cache flush or - Turbosparc snoop RAM */ - case 0x32: /* store buffer control or Turbosparc page table - descriptor diagnostic */ - case 0x36: /* I-cache flash clear */ - case 0x37: /* D-cache flash clear */ - break; - case 0x38: /* SuperSPARC MMU Breakpoint Control Registers*/ - { - int reg = (addr >> 8) & 3; - - switch (reg) { - case 0: /* Breakpoint Value (Addr) */ - env->mmubpregs[reg] = (val & 0xfffffffffULL); - break; - case 1: /* Breakpoint Mask */ - env->mmubpregs[reg] = (val & 0xfffffffffULL); - break; - case 2: /* Breakpoint Control */ - env->mmubpregs[reg] = (val & 0x7fULL); - break; - case 3: /* Breakpoint Status */ - env->mmubpregs[reg] = (val & 0xfULL); - break; - } - DPRINTF_MMU("write breakpoint reg[%d] 0x%016x\n", reg, - env->mmuregs[reg]); - } - break; - case 0x49: /* SuperSPARC MMU Counter Breakpoint Value */ - env->mmubpctrv = val & 0xffffffff; - break; - case 0x4a: /* SuperSPARC MMU Counter Breakpoint Control */ - env->mmubpctrc = val & 0x3; - break; - case 0x4b: /* SuperSPARC MMU Counter Breakpoint Status */ - env->mmubpctrs = val & 0x3; - break; - case 0x4c: /* SuperSPARC MMU Breakpoint Action */ - env->mmubpaction = val & 0x1fff; - break; - case 8: /* User code access, XXX */ - case 9: /* Supervisor code access, XXX */ - default: - do_unassigned_access(addr, 1, 0, asi, size); - break; - } -#ifdef DEBUG_ASI - dump_asi("write", addr, asi, size, val); -#endif -} - -#endif /* CONFIG_USER_ONLY */ -#else /* TARGET_SPARC64 */ - -#ifdef CONFIG_USER_ONLY -uint64_t helper_ld_asi(target_ulong addr, int asi, int size, int sign) -{ - uint64_t ret = 0; -#if defined(DEBUG_ASI) - target_ulong last_addr = addr; -#endif - - if (asi < 0x80) { - helper_raise_exception(env, TT_PRIV_ACT); - } - - helper_check_align(addr, size - 1); - addr = asi_address_mask(env, asi, addr); - - switch (asi) { - case 0x82: /* Primary no-fault */ - case 0x8a: /* Primary no-fault LE */ - if (page_check_range(addr, size, PAGE_READ) == -1) { -#ifdef DEBUG_ASI - dump_asi("read ", last_addr, asi, size, ret); -#endif - return 0; - } - /* Fall through */ - case 0x80: /* Primary */ - case 0x88: /* Primary LE */ - { - switch (size) { - case 1: - ret = ldub_raw(addr); - break; - case 2: - ret = lduw_raw(addr); - break; - case 4: - ret = ldl_raw(addr); - break; - default: - case 8: - ret = ldq_raw(addr); - break; - } - } - break; - case 0x83: /* Secondary no-fault */ - case 0x8b: /* Secondary no-fault LE */ - if (page_check_range(addr, size, PAGE_READ) == -1) { -#ifdef DEBUG_ASI - dump_asi("read ", last_addr, asi, size, ret); -#endif - return 0; - } - /* Fall through */ - case 0x81: /* Secondary */ - case 0x89: /* Secondary LE */ - /* XXX */ - break; - default: - break; - } - - /* Convert from little endian */ - switch (asi) { - case 0x88: /* Primary LE */ - case 0x89: /* Secondary LE */ - case 0x8a: /* Primary no-fault LE */ - case 0x8b: /* Secondary no-fault LE */ - switch (size) { - case 2: - ret = bswap16(ret); - break; - case 4: - ret = bswap32(ret); - break; - case 8: - ret = bswap64(ret); - break; - default: - break; - } - default: - break; - } - - /* Convert to signed number */ - if (sign) { - switch (size) { - case 1: - ret = (int8_t) ret; - break; - case 2: - ret = (int16_t) ret; - break; - case 4: - ret = (int32_t) ret; - break; - default: - break; - } - } -#ifdef DEBUG_ASI - dump_asi("read ", last_addr, asi, size, ret); -#endif - return ret; -} - -void helper_st_asi(target_ulong addr, target_ulong val, int asi, int size) -{ -#ifdef DEBUG_ASI - dump_asi("write", addr, asi, size, val); -#endif - if (asi < 0x80) { - helper_raise_exception(env, TT_PRIV_ACT); - } - - helper_check_align(addr, size - 1); - addr = asi_address_mask(env, asi, addr); - - /* Convert to little endian */ - switch (asi) { - case 0x88: /* Primary LE */ - case 0x89: /* Secondary LE */ - switch (size) { - case 2: - val = bswap16(val); - break; - case 4: - val = bswap32(val); - break; - case 8: - val = bswap64(val); - break; - default: - break; - } - default: - break; - } - - switch (asi) { - case 0x80: /* Primary */ - case 0x88: /* Primary LE */ - { - switch (size) { - case 1: - stb_raw(addr, val); - break; - case 2: - stw_raw(addr, val); - break; - case 4: - stl_raw(addr, val); - break; - case 8: - default: - stq_raw(addr, val); - break; - } - } - break; - case 0x81: /* Secondary */ - case 0x89: /* Secondary LE */ - /* XXX */ - return; - - case 0x82: /* Primary no-fault, RO */ - case 0x83: /* Secondary no-fault, RO */ - case 0x8a: /* Primary no-fault LE, RO */ - case 0x8b: /* Secondary no-fault LE, RO */ - default: - do_unassigned_access(addr, 1, 0, 1, size); - return; - } -} - -#else /* CONFIG_USER_ONLY */ - -uint64_t helper_ld_asi(target_ulong addr, int asi, int size, int sign) -{ - uint64_t ret = 0; -#if defined(DEBUG_ASI) - target_ulong last_addr = addr; -#endif - - asi &= 0xff; - - if ((asi < 0x80 && (env->pstate & PS_PRIV) == 0) - || (cpu_has_hypervisor(env) - && asi >= 0x30 && asi < 0x80 - && !(env->hpstate & HS_PRIV))) { - helper_raise_exception(env, TT_PRIV_ACT); - } - - helper_check_align(addr, size - 1); - addr = asi_address_mask(env, asi, addr); - - /* process nonfaulting loads first */ - if ((asi & 0xf6) == 0x82) { - int mmu_idx; - - /* secondary space access has lowest asi bit equal to 1 */ - if (env->pstate & PS_PRIV) { - mmu_idx = (asi & 1) ? MMU_KERNEL_SECONDARY_IDX : MMU_KERNEL_IDX; - } else { - mmu_idx = (asi & 1) ? MMU_USER_SECONDARY_IDX : MMU_USER_IDX; - } - - if (cpu_get_phys_page_nofault(env, addr, mmu_idx) == -1ULL) { -#ifdef DEBUG_ASI - dump_asi("read ", last_addr, asi, size, ret); -#endif - /* env->exception_index is set in get_physical_address_data(). */ - helper_raise_exception(env, env->exception_index); - } - - /* convert nonfaulting load ASIs to normal load ASIs */ - asi &= ~0x02; - } - - switch (asi) { - case 0x10: /* As if user primary */ - case 0x11: /* As if user secondary */ - case 0x18: /* As if user primary LE */ - case 0x19: /* As if user secondary LE */ - case 0x80: /* Primary */ - case 0x81: /* Secondary */ - case 0x88: /* Primary LE */ - case 0x89: /* Secondary LE */ - case 0xe2: /* UA2007 Primary block init */ - case 0xe3: /* UA2007 Secondary block init */ - if ((asi & 0x80) && (env->pstate & PS_PRIV)) { - if (cpu_hypervisor_mode(env)) { - switch (size) { - case 1: - ret = ldub_hypv(addr); - break; - case 2: - ret = lduw_hypv(addr); - break; - case 4: - ret = ldl_hypv(addr); - break; - default: - case 8: - ret = ldq_hypv(addr); - break; - } - } else { - /* secondary space access has lowest asi bit equal to 1 */ - if (asi & 1) { - switch (size) { - case 1: - ret = ldub_kernel_secondary(addr); - break; - case 2: - ret = lduw_kernel_secondary(addr); - break; - case 4: - ret = ldl_kernel_secondary(addr); - break; - default: - case 8: - ret = ldq_kernel_secondary(addr); - break; - } - } else { - switch (size) { - case 1: - ret = ldub_kernel(addr); - break; - case 2: - ret = lduw_kernel(addr); - break; - case 4: - ret = ldl_kernel(addr); - break; - default: - case 8: - ret = ldq_kernel(addr); - break; - } - } - } - } else { - /* secondary space access has lowest asi bit equal to 1 */ - if (asi & 1) { - switch (size) { - case 1: - ret = ldub_user_secondary(addr); - break; - case 2: - ret = lduw_user_secondary(addr); - break; - case 4: - ret = ldl_user_secondary(addr); - break; - default: - case 8: - ret = ldq_user_secondary(addr); - break; - } - } else { - switch (size) { - case 1: - ret = ldub_user(addr); - break; - case 2: - ret = lduw_user(addr); - break; - case 4: - ret = ldl_user(addr); - break; - default: - case 8: - ret = ldq_user(addr); - break; - } - } - } - break; - case 0x14: /* Bypass */ - case 0x15: /* Bypass, non-cacheable */ - case 0x1c: /* Bypass LE */ - case 0x1d: /* Bypass, non-cacheable LE */ - { - switch (size) { - case 1: - ret = ldub_phys(addr); - break; - case 2: - ret = lduw_phys(addr); - break; - case 4: - ret = ldl_phys(addr); - break; - default: - case 8: - ret = ldq_phys(addr); - break; - } - break; - } - case 0x24: /* Nucleus quad LDD 128 bit atomic */ - case 0x2c: /* Nucleus quad LDD 128 bit atomic LE - Only ldda allowed */ - helper_raise_exception(env, TT_ILL_INSN); - return 0; - case 0x04: /* Nucleus */ - case 0x0c: /* Nucleus Little Endian (LE) */ - { - switch (size) { - case 1: - ret = ldub_nucleus(addr); - break; - case 2: - ret = lduw_nucleus(addr); - break; - case 4: - ret = ldl_nucleus(addr); - break; - default: - case 8: - ret = ldq_nucleus(addr); - break; - } - break; - } - case 0x4a: /* UPA config */ - /* XXX */ - break; - case 0x45: /* LSU */ - ret = env->lsu; - break; - case 0x50: /* I-MMU regs */ - { - int reg = (addr >> 3) & 0xf; - - if (reg == 0) { - /* I-TSB Tag Target register */ - ret = ultrasparc_tag_target(env->immu.tag_access); - } else { - ret = env->immuregs[reg]; - } - - break; - } - case 0x51: /* I-MMU 8k TSB pointer */ - { - /* env->immuregs[5] holds I-MMU TSB register value - env->immuregs[6] holds I-MMU Tag Access register value */ - ret = ultrasparc_tsb_pointer(env->immu.tsb, env->immu.tag_access, - 8*1024); - break; - } - case 0x52: /* I-MMU 64k TSB pointer */ - { - /* env->immuregs[5] holds I-MMU TSB register value - env->immuregs[6] holds I-MMU Tag Access register value */ - ret = ultrasparc_tsb_pointer(env->immu.tsb, env->immu.tag_access, - 64*1024); - break; - } - case 0x55: /* I-MMU data access */ - { - int reg = (addr >> 3) & 0x3f; - - ret = env->itlb[reg].tte; - break; - } - case 0x56: /* I-MMU tag read */ - { - int reg = (addr >> 3) & 0x3f; - - ret = env->itlb[reg].tag; - break; - } - case 0x58: /* D-MMU regs */ - { - int reg = (addr >> 3) & 0xf; - - if (reg == 0) { - /* D-TSB Tag Target register */ - ret = ultrasparc_tag_target(env->dmmu.tag_access); - } else { - ret = env->dmmuregs[reg]; - } - break; - } - case 0x59: /* D-MMU 8k TSB pointer */ - { - /* env->dmmuregs[5] holds D-MMU TSB register value - env->dmmuregs[6] holds D-MMU Tag Access register value */ - ret = ultrasparc_tsb_pointer(env->dmmu.tsb, env->dmmu.tag_access, - 8*1024); - break; - } - case 0x5a: /* D-MMU 64k TSB pointer */ - { - /* env->dmmuregs[5] holds D-MMU TSB register value - env->dmmuregs[6] holds D-MMU Tag Access register value */ - ret = ultrasparc_tsb_pointer(env->dmmu.tsb, env->dmmu.tag_access, - 64*1024); - break; - } - case 0x5d: /* D-MMU data access */ - { - int reg = (addr >> 3) & 0x3f; - - ret = env->dtlb[reg].tte; - break; - } - case 0x5e: /* D-MMU tag read */ - { - int reg = (addr >> 3) & 0x3f; - - ret = env->dtlb[reg].tag; - break; - } - case 0x46: /* D-cache data */ - case 0x47: /* D-cache tag access */ - case 0x4b: /* E-cache error enable */ - case 0x4c: /* E-cache asynchronous fault status */ - case 0x4d: /* E-cache asynchronous fault address */ - case 0x4e: /* E-cache tag data */ - case 0x66: /* I-cache instruction access */ - case 0x67: /* I-cache tag access */ - case 0x6e: /* I-cache predecode */ - case 0x6f: /* I-cache LRU etc. */ - case 0x76: /* E-cache tag */ - case 0x7e: /* E-cache tag */ - break; - case 0x5b: /* D-MMU data pointer */ - case 0x48: /* Interrupt dispatch, RO */ - case 0x49: /* Interrupt data receive */ - case 0x7f: /* Incoming interrupt vector, RO */ - /* XXX */ - break; - case 0x54: /* I-MMU data in, WO */ - case 0x57: /* I-MMU demap, WO */ - case 0x5c: /* D-MMU data in, WO */ - case 0x5f: /* D-MMU demap, WO */ - case 0x77: /* Interrupt vector, WO */ - default: - do_unassigned_access(addr, 0, 0, 1, size); - ret = 0; - break; - } - - /* Convert from little endian */ - switch (asi) { - case 0x0c: /* Nucleus Little Endian (LE) */ - case 0x18: /* As if user primary LE */ - case 0x19: /* As if user secondary LE */ - case 0x1c: /* Bypass LE */ - case 0x1d: /* Bypass, non-cacheable LE */ - case 0x88: /* Primary LE */ - case 0x89: /* Secondary LE */ - switch(size) { - case 2: - ret = bswap16(ret); - break; - case 4: - ret = bswap32(ret); - break; - case 8: - ret = bswap64(ret); - break; - default: - break; - } - default: - break; - } - - /* Convert to signed number */ - if (sign) { - switch (size) { - case 1: - ret = (int8_t) ret; - break; - case 2: - ret = (int16_t) ret; - break; - case 4: - ret = (int32_t) ret; - break; - default: - break; - } - } -#ifdef DEBUG_ASI - dump_asi("read ", last_addr, asi, size, ret); -#endif - return ret; -} - -void helper_st_asi(target_ulong addr, target_ulong val, int asi, int size) -{ -#ifdef DEBUG_ASI - dump_asi("write", addr, asi, size, val); -#endif - - asi &= 0xff; - - if ((asi < 0x80 && (env->pstate & PS_PRIV) == 0) - || (cpu_has_hypervisor(env) - && asi >= 0x30 && asi < 0x80 - && !(env->hpstate & HS_PRIV))) { - helper_raise_exception(env, TT_PRIV_ACT); - } - - helper_check_align(addr, size - 1); - addr = asi_address_mask(env, asi, addr); - - /* Convert to little endian */ - switch (asi) { - case 0x0c: /* Nucleus Little Endian (LE) */ - case 0x18: /* As if user primary LE */ - case 0x19: /* As if user secondary LE */ - case 0x1c: /* Bypass LE */ - case 0x1d: /* Bypass, non-cacheable LE */ - case 0x88: /* Primary LE */ - case 0x89: /* Secondary LE */ - switch (size) { - case 2: - val = bswap16(val); - break; - case 4: - val = bswap32(val); - break; - case 8: - val = bswap64(val); - break; - default: - break; - } - default: - break; - } - - switch (asi) { - case 0x10: /* As if user primary */ - case 0x11: /* As if user secondary */ - case 0x18: /* As if user primary LE */ - case 0x19: /* As if user secondary LE */ - case 0x80: /* Primary */ - case 0x81: /* Secondary */ - case 0x88: /* Primary LE */ - case 0x89: /* Secondary LE */ - case 0xe2: /* UA2007 Primary block init */ - case 0xe3: /* UA2007 Secondary block init */ - if ((asi & 0x80) && (env->pstate & PS_PRIV)) { - if (cpu_hypervisor_mode(env)) { - switch (size) { - case 1: - stb_hypv(addr, val); - break; - case 2: - stw_hypv(addr, val); - break; - case 4: - stl_hypv(addr, val); - break; - case 8: - default: - stq_hypv(addr, val); - break; - } - } else { - /* secondary space access has lowest asi bit equal to 1 */ - if (asi & 1) { - switch (size) { - case 1: - stb_kernel_secondary(addr, val); - break; - case 2: - stw_kernel_secondary(addr, val); - break; - case 4: - stl_kernel_secondary(addr, val); - break; - case 8: - default: - stq_kernel_secondary(addr, val); - break; - } - } else { - switch (size) { - case 1: - stb_kernel(addr, val); - break; - case 2: - stw_kernel(addr, val); - break; - case 4: - stl_kernel(addr, val); - break; - case 8: - default: - stq_kernel(addr, val); - break; - } - } - } - } else { - /* secondary space access has lowest asi bit equal to 1 */ - if (asi & 1) { - switch (size) { - case 1: - stb_user_secondary(addr, val); - break; - case 2: - stw_user_secondary(addr, val); - break; - case 4: - stl_user_secondary(addr, val); - break; - case 8: - default: - stq_user_secondary(addr, val); - break; - } - } else { - switch (size) { - case 1: - stb_user(addr, val); - break; - case 2: - stw_user(addr, val); - break; - case 4: - stl_user(addr, val); - break; - case 8: - default: - stq_user(addr, val); - break; - } - } - } - break; - case 0x14: /* Bypass */ - case 0x15: /* Bypass, non-cacheable */ - case 0x1c: /* Bypass LE */ - case 0x1d: /* Bypass, non-cacheable LE */ - { - switch (size) { - case 1: - stb_phys(addr, val); - break; - case 2: - stw_phys(addr, val); - break; - case 4: - stl_phys(addr, val); - break; - case 8: - default: - stq_phys(addr, val); - break; - } - } - return; - case 0x24: /* Nucleus quad LDD 128 bit atomic */ - case 0x2c: /* Nucleus quad LDD 128 bit atomic LE - Only ldda allowed */ - helper_raise_exception(env, TT_ILL_INSN); - return; - case 0x04: /* Nucleus */ - case 0x0c: /* Nucleus Little Endian (LE) */ - { - switch (size) { - case 1: - stb_nucleus(addr, val); - break; - case 2: - stw_nucleus(addr, val); - break; - case 4: - stl_nucleus(addr, val); - break; - default: - case 8: - stq_nucleus(addr, val); - break; - } - break; - } - - case 0x4a: /* UPA config */ - /* XXX */ - return; - case 0x45: /* LSU */ - { - uint64_t oldreg; - - oldreg = env->lsu; - env->lsu = val & (DMMU_E | IMMU_E); - /* Mappings generated during D/I MMU disabled mode are - invalid in normal mode */ - if (oldreg != env->lsu) { - DPRINTF_MMU("LSU change: 0x%" PRIx64 " -> 0x%" PRIx64 "\n", - oldreg, env->lsu); -#ifdef DEBUG_MMU - dump_mmu(stdout, fprintf, env1); -#endif - tlb_flush(env, 1); - } - return; - } - case 0x50: /* I-MMU regs */ - { - int reg = (addr >> 3) & 0xf; - uint64_t oldreg; - - oldreg = env->immuregs[reg]; - switch (reg) { - case 0: /* RO */ - return; - case 1: /* Not in I-MMU */ - case 2: - return; - case 3: /* SFSR */ - if ((val & 1) == 0) { - val = 0; /* Clear SFSR */ - } - env->immu.sfsr = val; - break; - case 4: /* RO */ - return; - case 5: /* TSB access */ - DPRINTF_MMU("immu TSB write: 0x%016" PRIx64 " -> 0x%016" - PRIx64 "\n", env->immu.tsb, val); - env->immu.tsb = val; - break; - case 6: /* Tag access */ - env->immu.tag_access = val; - break; - case 7: - case 8: - return; - default: - break; - } - - if (oldreg != env->immuregs[reg]) { - DPRINTF_MMU("immu change reg[%d]: 0x%016" PRIx64 " -> 0x%016" - PRIx64 "\n", reg, oldreg, env->immuregs[reg]); - } -#ifdef DEBUG_MMU - dump_mmu(stdout, fprintf, env); -#endif - return; - } - case 0x54: /* I-MMU data in */ - replace_tlb_1bit_lru(env->itlb, env->immu.tag_access, val, "immu", env); - return; - case 0x55: /* I-MMU data access */ - { - /* TODO: auto demap */ - - unsigned int i = (addr >> 3) & 0x3f; - - replace_tlb_entry(&env->itlb[i], env->immu.tag_access, val, env); - -#ifdef DEBUG_MMU - DPRINTF_MMU("immu data access replaced entry [%i]\n", i); - dump_mmu(stdout, fprintf, env); -#endif - return; - } - case 0x57: /* I-MMU demap */ - demap_tlb(env->itlb, addr, "immu", env); - return; - case 0x58: /* D-MMU regs */ - { - int reg = (addr >> 3) & 0xf; - uint64_t oldreg; - - oldreg = env->dmmuregs[reg]; - switch (reg) { - case 0: /* RO */ - case 4: - return; - case 3: /* SFSR */ - if ((val & 1) == 0) { - val = 0; /* Clear SFSR, Fault address */ - env->dmmu.sfar = 0; - } - env->dmmu.sfsr = val; - break; - case 1: /* Primary context */ - env->dmmu.mmu_primary_context = val; - /* can be optimized to only flush MMU_USER_IDX - and MMU_KERNEL_IDX entries */ - tlb_flush(env, 1); - break; - case 2: /* Secondary context */ - env->dmmu.mmu_secondary_context = val; - /* can be optimized to only flush MMU_USER_SECONDARY_IDX - and MMU_KERNEL_SECONDARY_IDX entries */ - tlb_flush(env, 1); - break; - case 5: /* TSB access */ - DPRINTF_MMU("dmmu TSB write: 0x%016" PRIx64 " -> 0x%016" - PRIx64 "\n", env->dmmu.tsb, val); - env->dmmu.tsb = val; - break; - case 6: /* Tag access */ - env->dmmu.tag_access = val; - break; - case 7: /* Virtual Watchpoint */ - case 8: /* Physical Watchpoint */ - default: - env->dmmuregs[reg] = val; - break; - } - - if (oldreg != env->dmmuregs[reg]) { - DPRINTF_MMU("dmmu change reg[%d]: 0x%016" PRIx64 " -> 0x%016" - PRIx64 "\n", reg, oldreg, env->dmmuregs[reg]); - } -#ifdef DEBUG_MMU - dump_mmu(stdout, fprintf, env); -#endif - return; - } - case 0x5c: /* D-MMU data in */ - replace_tlb_1bit_lru(env->dtlb, env->dmmu.tag_access, val, "dmmu", env); - return; - case 0x5d: /* D-MMU data access */ - { - unsigned int i = (addr >> 3) & 0x3f; - - replace_tlb_entry(&env->dtlb[i], env->dmmu.tag_access, val, env); - -#ifdef DEBUG_MMU - DPRINTF_MMU("dmmu data access replaced entry [%i]\n", i); - dump_mmu(stdout, fprintf, env); -#endif - return; - } - case 0x5f: /* D-MMU demap */ - demap_tlb(env->dtlb, addr, "dmmu", env); - return; - case 0x49: /* Interrupt data receive */ - /* XXX */ - return; - case 0x46: /* D-cache data */ - case 0x47: /* D-cache tag access */ - case 0x4b: /* E-cache error enable */ - case 0x4c: /* E-cache asynchronous fault status */ - case 0x4d: /* E-cache asynchronous fault address */ - case 0x4e: /* E-cache tag data */ - case 0x66: /* I-cache instruction access */ - case 0x67: /* I-cache tag access */ - case 0x6e: /* I-cache predecode */ - case 0x6f: /* I-cache LRU etc. */ - case 0x76: /* E-cache tag */ - case 0x7e: /* E-cache tag */ - return; - case 0x51: /* I-MMU 8k TSB pointer, RO */ - case 0x52: /* I-MMU 64k TSB pointer, RO */ - case 0x56: /* I-MMU tag read, RO */ - case 0x59: /* D-MMU 8k TSB pointer, RO */ - case 0x5a: /* D-MMU 64k TSB pointer, RO */ - case 0x5b: /* D-MMU data pointer, RO */ - case 0x5e: /* D-MMU tag read, RO */ - case 0x48: /* Interrupt dispatch, RO */ - case 0x7f: /* Incoming interrupt vector, RO */ - case 0x82: /* Primary no-fault, RO */ - case 0x83: /* Secondary no-fault, RO */ - case 0x8a: /* Primary no-fault LE, RO */ - case 0x8b: /* Secondary no-fault LE, RO */ - default: - do_unassigned_access(addr, 1, 0, 1, size); - return; - } -} -#endif /* CONFIG_USER_ONLY */ - -void helper_ldda_asi(target_ulong addr, int asi, int rd) -{ - if ((asi < 0x80 && (env->pstate & PS_PRIV) == 0) - || (cpu_has_hypervisor(env) - && asi >= 0x30 && asi < 0x80 - && !(env->hpstate & HS_PRIV))) { - helper_raise_exception(env, TT_PRIV_ACT); - } - - addr = asi_address_mask(env, asi, addr); - - switch (asi) { -#if !defined(CONFIG_USER_ONLY) - case 0x24: /* Nucleus quad LDD 128 bit atomic */ - case 0x2c: /* Nucleus quad LDD 128 bit atomic LE */ - helper_check_align(addr, 0xf); - if (rd == 0) { - env->gregs[1] = ldq_nucleus(addr + 8); - if (asi == 0x2c) { - bswap64s(&env->gregs[1]); - } - } else if (rd < 8) { - env->gregs[rd] = ldq_nucleus(addr); - env->gregs[rd + 1] = ldq_nucleus(addr + 8); - if (asi == 0x2c) { - bswap64s(&env->gregs[rd]); - bswap64s(&env->gregs[rd + 1]); - } - } else { - env->regwptr[rd] = ldq_nucleus(addr); - env->regwptr[rd + 1] = ldq_nucleus(addr + 8); - if (asi == 0x2c) { - bswap64s(&env->regwptr[rd]); - bswap64s(&env->regwptr[rd + 1]); - } - } - break; -#endif - default: - helper_check_align(addr, 0x3); - if (rd == 0) { - env->gregs[1] = helper_ld_asi(addr + 4, asi, 4, 0); - } else if (rd < 8) { - env->gregs[rd] = helper_ld_asi(addr, asi, 4, 0); - env->gregs[rd + 1] = helper_ld_asi(addr + 4, asi, 4, 0); - } else { - env->regwptr[rd] = helper_ld_asi(addr, asi, 4, 0); - env->regwptr[rd + 1] = helper_ld_asi(addr + 4, asi, 4, 0); - } - break; - } -} - -void helper_ldf_asi(target_ulong addr, int asi, int size, int rd) -{ - unsigned int i; - CPU_DoubleU u; - - helper_check_align(addr, 3); - addr = asi_address_mask(env, asi, addr); - - switch (asi) { - case 0xf0: /* UA2007/JPS1 Block load primary */ - case 0xf1: /* UA2007/JPS1 Block load secondary */ - case 0xf8: /* UA2007/JPS1 Block load primary LE */ - case 0xf9: /* UA2007/JPS1 Block load secondary LE */ - if (rd & 7) { - helper_raise_exception(env, TT_ILL_INSN); - return; - } - helper_check_align(addr, 0x3f); - for (i = 0; i < 16; i++) { - *(uint32_t *)&env->fpr[rd++] = helper_ld_asi(addr, asi & 0x8f, 4, - 0); - addr += 4; - } - - return; - case 0x16: /* UA2007 Block load primary, user privilege */ - case 0x17: /* UA2007 Block load secondary, user privilege */ - case 0x1e: /* UA2007 Block load primary LE, user privilege */ - case 0x1f: /* UA2007 Block load secondary LE, user privilege */ - case 0x70: /* JPS1 Block load primary, user privilege */ - case 0x71: /* JPS1 Block load secondary, user privilege */ - case 0x78: /* JPS1 Block load primary LE, user privilege */ - case 0x79: /* JPS1 Block load secondary LE, user privilege */ - if (rd & 7) { - helper_raise_exception(env, TT_ILL_INSN); - return; - } - helper_check_align(addr, 0x3f); - for (i = 0; i < 16; i++) { - *(uint32_t *)&env->fpr[rd++] = helper_ld_asi(addr, asi & 0x19, 4, - 0); - addr += 4; - } - - return; - default: - break; - } - - switch (size) { - default: - case 4: - *((uint32_t *)&env->fpr[rd]) = helper_ld_asi(addr, asi, size, 0); - break; - case 8: - u.ll = helper_ld_asi(addr, asi, size, 0); - *((uint32_t *)&env->fpr[rd++]) = u.l.upper; - *((uint32_t *)&env->fpr[rd++]) = u.l.lower; - break; - case 16: - u.ll = helper_ld_asi(addr, asi, 8, 0); - *((uint32_t *)&env->fpr[rd++]) = u.l.upper; - *((uint32_t *)&env->fpr[rd++]) = u.l.lower; - u.ll = helper_ld_asi(addr + 8, asi, 8, 0); - *((uint32_t *)&env->fpr[rd++]) = u.l.upper; - *((uint32_t *)&env->fpr[rd++]) = u.l.lower; - break; - } -} - -void helper_stf_asi(target_ulong addr, int asi, int size, int rd) -{ - unsigned int i; - target_ulong val = 0; - CPU_DoubleU u; - - helper_check_align(addr, 3); - addr = asi_address_mask(env, asi, addr); - - switch (asi) { - case 0xe0: /* UA2007/JPS1 Block commit store primary (cache flush) */ - case 0xe1: /* UA2007/JPS1 Block commit store secondary (cache flush) */ - case 0xf0: /* UA2007/JPS1 Block store primary */ - case 0xf1: /* UA2007/JPS1 Block store secondary */ - case 0xf8: /* UA2007/JPS1 Block store primary LE */ - case 0xf9: /* UA2007/JPS1 Block store secondary LE */ - if (rd & 7) { - helper_raise_exception(env, TT_ILL_INSN); - return; - } - helper_check_align(addr, 0x3f); - for (i = 0; i < 16; i++) { - val = *(uint32_t *)&env->fpr[rd++]; - helper_st_asi(addr, val, asi & 0x8f, 4); - addr += 4; - } - - return; - case 0x16: /* UA2007 Block load primary, user privilege */ - case 0x17: /* UA2007 Block load secondary, user privilege */ - case 0x1e: /* UA2007 Block load primary LE, user privilege */ - case 0x1f: /* UA2007 Block load secondary LE, user privilege */ - case 0x70: /* JPS1 Block store primary, user privilege */ - case 0x71: /* JPS1 Block store secondary, user privilege */ - case 0x78: /* JPS1 Block load primary LE, user privilege */ - case 0x79: /* JPS1 Block load secondary LE, user privilege */ - if (rd & 7) { - helper_raise_exception(env, TT_ILL_INSN); - return; - } - helper_check_align(addr, 0x3f); - for (i = 0; i < 16; i++) { - val = *(uint32_t *)&env->fpr[rd++]; - helper_st_asi(addr, val, asi & 0x19, 4); - addr += 4; - } - - return; - default: - break; - } - - switch (size) { - default: - case 4: - helper_st_asi(addr, *(uint32_t *)&env->fpr[rd], asi, size); - break; - case 8: - u.l.upper = *(uint32_t *)&env->fpr[rd++]; - u.l.lower = *(uint32_t *)&env->fpr[rd++]; - helper_st_asi(addr, u.ll, asi, size); - break; - case 16: - u.l.upper = *(uint32_t *)&env->fpr[rd++]; - u.l.lower = *(uint32_t *)&env->fpr[rd++]; - helper_st_asi(addr, u.ll, asi, 8); - u.l.upper = *(uint32_t *)&env->fpr[rd++]; - u.l.lower = *(uint32_t *)&env->fpr[rd++]; - helper_st_asi(addr + 8, u.ll, asi, 8); - break; - } -} - -target_ulong helper_cas_asi(target_ulong addr, target_ulong val1, - target_ulong val2, uint32_t asi) -{ - target_ulong ret; - - val2 &= 0xffffffffUL; - ret = helper_ld_asi(addr, asi, 4, 0); - ret &= 0xffffffffUL; - if (val2 == ret) { - helper_st_asi(addr, val1 & 0xffffffffUL, asi, 4); - } - return ret; -} - -target_ulong helper_casx_asi(target_ulong addr, target_ulong val1, - target_ulong val2, uint32_t asi) -{ - target_ulong ret; - - ret = helper_ld_asi(addr, asi, 8, 0); - if (val2 == ret) { - helper_st_asi(addr, val1, asi, 8); - } - return ret; -} -#endif /* TARGET_SPARC64 */ - -static target_ulong helper_udiv_common(target_ulong a, target_ulong b, int cc) -{ - int overflow = 0; - uint64_t x0; - uint32_t x1; - - x0 = (a & 0xffffffff) | ((int64_t) (env->y) << 32); - x1 = (b & 0xffffffff); - - if (x1 == 0) { - helper_raise_exception(env, TT_DIV_ZERO); - } - - x0 = x0 / x1; - if (x0 > 0xffffffff) { - x0 = 0xffffffff; - overflow = 1; - } - - if (cc) { - env->cc_dst = x0; - env->cc_src2 = overflow; - env->cc_op = CC_OP_DIV; - } - return x0; -} - -target_ulong helper_udiv(target_ulong a, target_ulong b) -{ - return helper_udiv_common(a, b, 0); -} - -target_ulong helper_udiv_cc(target_ulong a, target_ulong b) -{ - return helper_udiv_common(a, b, 1); -} - -static target_ulong helper_sdiv_common(target_ulong a, target_ulong b, int cc) -{ - int overflow = 0; - int64_t x0; - int32_t x1; - - x0 = (a & 0xffffffff) | ((int64_t) (env->y) << 32); - x1 = (b & 0xffffffff); - - if (x1 == 0) { - helper_raise_exception(env, TT_DIV_ZERO); - } - - x0 = x0 / x1; - if ((int32_t) x0 != x0) { - x0 = x0 < 0 ? 0x80000000 : 0x7fffffff; - overflow = 1; - } - - if (cc) { - env->cc_dst = x0; - env->cc_src2 = overflow; - env->cc_op = CC_OP_DIV; - } - return x0; -} - -target_ulong helper_sdiv(target_ulong a, target_ulong b) -{ - return helper_sdiv_common(a, b, 0); -} - -target_ulong helper_sdiv_cc(target_ulong a, target_ulong b) -{ - return helper_sdiv_common(a, b, 1); -} - -void helper_stdf(target_ulong addr, int mem_idx) -{ - helper_check_align(addr, 7); -#if !defined(CONFIG_USER_ONLY) - switch (mem_idx) { - case MMU_USER_IDX: - stfq_user(addr, DT0); - break; - case MMU_KERNEL_IDX: - stfq_kernel(addr, DT0); - break; -#ifdef TARGET_SPARC64 - case MMU_HYPV_IDX: - stfq_hypv(addr, DT0); - break; -#endif - default: - DPRINTF_MMU("helper_stdf: need to check MMU idx %d\n", mem_idx); - break; - } -#else - stfq_raw(address_mask(env, addr), DT0); -#endif -} - -void helper_lddf(target_ulong addr, int mem_idx) -{ - helper_check_align(addr, 7); -#if !defined(CONFIG_USER_ONLY) - switch (mem_idx) { - case MMU_USER_IDX: - DT0 = ldfq_user(addr); - break; - case MMU_KERNEL_IDX: - DT0 = ldfq_kernel(addr); - break; -#ifdef TARGET_SPARC64 - case MMU_HYPV_IDX: - DT0 = ldfq_hypv(addr); - break; -#endif - default: - DPRINTF_MMU("helper_lddf: need to check MMU idx %d\n", mem_idx); - break; - } -#else - DT0 = ldfq_raw(address_mask(env, addr)); -#endif -} - -void helper_ldqf(target_ulong addr, int mem_idx) -{ - /* XXX add 128 bit load */ - CPU_QuadU u; - - helper_check_align(addr, 7); -#if !defined(CONFIG_USER_ONLY) - switch (mem_idx) { - case MMU_USER_IDX: - u.ll.upper = ldq_user(addr); - u.ll.lower = ldq_user(addr + 8); - QT0 = u.q; - break; - case MMU_KERNEL_IDX: - u.ll.upper = ldq_kernel(addr); - u.ll.lower = ldq_kernel(addr + 8); - QT0 = u.q; - break; -#ifdef TARGET_SPARC64 - case MMU_HYPV_IDX: - u.ll.upper = ldq_hypv(addr); - u.ll.lower = ldq_hypv(addr + 8); - QT0 = u.q; - break; -#endif - default: - DPRINTF_MMU("helper_ldqf: need to check MMU idx %d\n", mem_idx); - break; - } -#else - u.ll.upper = ldq_raw(address_mask(env, addr)); - u.ll.lower = ldq_raw(address_mask(env, addr + 8)); - QT0 = u.q; -#endif -} - -void helper_stqf(target_ulong addr, int mem_idx) -{ - /* XXX add 128 bit store */ - CPU_QuadU u; - - helper_check_align(addr, 7); -#if !defined(CONFIG_USER_ONLY) - switch (mem_idx) { - case MMU_USER_IDX: - u.q = QT0; - stq_user(addr, u.ll.upper); - stq_user(addr + 8, u.ll.lower); - break; - case MMU_KERNEL_IDX: - u.q = QT0; - stq_kernel(addr, u.ll.upper); - stq_kernel(addr + 8, u.ll.lower); - break; -#ifdef TARGET_SPARC64 - case MMU_HYPV_IDX: - u.q = QT0; - stq_hypv(addr, u.ll.upper); - stq_hypv(addr + 8, u.ll.lower); - break; -#endif - default: - DPRINTF_MMU("helper_stqf: need to check MMU idx %d\n", mem_idx); - break; - } -#else - u.q = QT0; - stq_raw(address_mask(env, addr), u.ll.upper); - stq_raw(address_mask(env, addr + 8), u.ll.lower); -#endif -} - -#ifdef TARGET_SPARC64 -static void do_modify_softint(const char *operation, uint32_t value) -{ - if (env->softint != value) { - env->softint = value; - DPRINTF_PSTATE(": %s new %08x\n", operation, env->softint); -#if !defined(CONFIG_USER_ONLY) - if (cpu_interrupts_enabled(env)) { - cpu_check_irqs(env); - } -#endif - } -} - -void helper_set_softint(uint64_t value) -{ - do_modify_softint("helper_set_softint", env->softint | (uint32_t)value); -} - -void helper_clear_softint(uint64_t value) -{ - do_modify_softint("helper_clear_softint", env->softint & (uint32_t)~value); -} - -void helper_write_softint(uint64_t value) -{ - do_modify_softint("helper_write_softint", (uint32_t)value); -} -#endif - -#if !defined(CONFIG_USER_ONLY) - static void do_unaligned_access(target_ulong addr, int is_write, int is_user, void *retaddr); @@ -2556,103 +72,3 @@ void tlb_fill(CPUState *env1, target_ulong addr, int is_write, int mmu_idx, } #endif /* !CONFIG_USER_ONLY */ - -#ifndef TARGET_SPARC64 -#if !defined(CONFIG_USER_ONLY) -static void do_unassigned_access(target_phys_addr_t addr, int is_write, - int is_exec, int is_asi, int size) -{ - int fault_type; - -#ifdef DEBUG_UNASSIGNED - if (is_asi) { - printf("Unassigned mem %s access of %d byte%s to " TARGET_FMT_plx - " asi 0x%02x from " TARGET_FMT_lx "\n", - is_exec ? "exec" : is_write ? "write" : "read", size, - size == 1 ? "" : "s", addr, is_asi, env->pc); - } else { - printf("Unassigned mem %s access of %d byte%s to " TARGET_FMT_plx - " from " TARGET_FMT_lx "\n", - is_exec ? "exec" : is_write ? "write" : "read", size, - size == 1 ? "" : "s", addr, env->pc); - } -#endif - /* Don't overwrite translation and access faults */ - fault_type = (env->mmuregs[3] & 0x1c) >> 2; - if ((fault_type > 4) || (fault_type == 0)) { - env->mmuregs[3] = 0; /* Fault status register */ - if (is_asi) { - env->mmuregs[3] |= 1 << 16; - } - if (env->psrs) { - env->mmuregs[3] |= 1 << 5; - } - if (is_exec) { - env->mmuregs[3] |= 1 << 6; - } - if (is_write) { - env->mmuregs[3] |= 1 << 7; - } - env->mmuregs[3] |= (5 << 2) | 2; - /* SuperSPARC will never place instruction fault addresses in the FAR */ - if (!is_exec) { - env->mmuregs[4] = addr; /* Fault address register */ - } - } - /* overflow (same type fault was not read before another fault) */ - if (fault_type == ((env->mmuregs[3] & 0x1c)) >> 2) { - env->mmuregs[3] |= 1; - } - - if ((env->mmuregs[0] & MMU_E) && !(env->mmuregs[0] & MMU_NF)) { - if (is_exec) { - helper_raise_exception(env, TT_CODE_ACCESS); - } else { - helper_raise_exception(env, TT_DATA_ACCESS); - } - } - - /* flush neverland mappings created during no-fault mode, - so the sequential MMU faults report proper fault types */ - if (env->mmuregs[0] & MMU_NF) { - tlb_flush(env, 1); - } -} -#endif -#else -#if defined(CONFIG_USER_ONLY) -static void do_unassigned_access(target_ulong addr, int is_write, int is_exec, - int is_asi, int size) -#else -static void do_unassigned_access(target_phys_addr_t addr, int is_write, - int is_exec, int is_asi, int size) -#endif -{ -#ifdef DEBUG_UNASSIGNED - printf("Unassigned mem access to " TARGET_FMT_plx " from " TARGET_FMT_lx - "\n", addr, env->pc); -#endif - - if (is_exec) { - helper_raise_exception(env, TT_CODE_ACCESS); - } else { - helper_raise_exception(env, TT_DATA_ACCESS); - } -} -#endif - -#if !defined(CONFIG_USER_ONLY) -void cpu_unassigned_access(CPUState *env1, target_phys_addr_t addr, - int is_write, int is_exec, int is_asi, int size) -{ - CPUState *saved_env; - - saved_env = env; - env = env1; - /* Ignore unassigned accesses outside of CPU context */ - if (env1) { - do_unassigned_access(addr, is_write, is_exec, is_asi, size); - } - env = saved_env; -} -#endif diff --git a/target-sparc/translate.c b/target-sparc/translate.c index 528ca920de..93185402fd 100644 --- a/target-sparc/translate.c +++ b/target-sparc/translate.c @@ -63,7 +63,7 @@ static TCGv cpu_tmp0; static TCGv_i32 cpu_tmp32; static TCGv_i64 cpu_tmp64; /* Floating point registers */ -static TCGv_i32 cpu_fpr[TARGET_FPREGS]; +static TCGv_i64 cpu_fpr[TARGET_DPREGS]; static target_ulong gen_opc_npc[OPC_BUF_SIZE]; static target_ulong gen_opc_jump_pc[2]; @@ -82,6 +82,8 @@ typedef struct DisasContext { uint32_t cc_op; /* current CC operation */ struct TranslationBlock *tb; sparc_def_t *def; + TCGv_i32 t32[3]; + int n_t32; } DisasContext; // This function uses non-native bit order @@ -114,67 +116,116 @@ static int sign_extend(int x, int len) #define IS_IMM (insn & (1<<13)) +static inline void gen_update_fprs_dirty(int rd) +{ +#if defined(TARGET_SPARC64) + tcg_gen_ori_i32(cpu_fprs, cpu_fprs, (rd < 32) ? 1 : 2); +#endif +} + /* floating point registers moves */ -static void gen_op_load_fpr_DT0(unsigned int src) +static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src) +{ +#if TCG_TARGET_REG_BITS == 32 + if (src & 1) { + return TCGV_LOW(cpu_fpr[src / 2]); + } else { + return TCGV_HIGH(cpu_fpr[src / 2]); + } +#else + if (src & 1) { + return MAKE_TCGV_I32(GET_TCGV_I64(cpu_fpr[src / 2])); + } else { + TCGv_i32 ret = tcg_temp_local_new_i32(); + TCGv_i64 t = tcg_temp_new_i64(); + + tcg_gen_shri_i64(t, cpu_fpr[src / 2], 32); + tcg_gen_trunc_i64_i32(ret, t); + tcg_temp_free_i64(t); + + dc->t32[dc->n_t32++] = ret; + assert(dc->n_t32 <= ARRAY_SIZE(dc->t32)); + + return ret; + } +#endif +} + +static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v) +{ +#if TCG_TARGET_REG_BITS == 32 + if (dst & 1) { + tcg_gen_mov_i32(TCGV_LOW(cpu_fpr[dst / 2]), v); + } else { + tcg_gen_mov_i32(TCGV_HIGH(cpu_fpr[dst / 2]), v); + } +#else + TCGv_i64 t = MAKE_TCGV_I64(GET_TCGV_I32(v)); + tcg_gen_deposit_i64(cpu_fpr[dst / 2], cpu_fpr[dst / 2], t, + (dst & 1 ? 0 : 32), 32); +#endif + gen_update_fprs_dirty(dst); +} + +static TCGv_i32 gen_dest_fpr_F(void) +{ + return cpu_tmp32; +} + +static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src) { - tcg_gen_st_i32(cpu_fpr[src], cpu_env, offsetof(CPUSPARCState, dt0) + - offsetof(CPU_DoubleU, l.upper)); - tcg_gen_st_i32(cpu_fpr[src + 1], cpu_env, offsetof(CPUSPARCState, dt0) + - offsetof(CPU_DoubleU, l.lower)); + src = DFPREG(src); + return cpu_fpr[src / 2]; } -static void gen_op_load_fpr_DT1(unsigned int src) +static void gen_store_fpr_D(DisasContext *dc, unsigned int dst, TCGv_i64 v) { - tcg_gen_st_i32(cpu_fpr[src], cpu_env, offsetof(CPUSPARCState, dt1) + - offsetof(CPU_DoubleU, l.upper)); - tcg_gen_st_i32(cpu_fpr[src + 1], cpu_env, offsetof(CPUSPARCState, dt1) + - offsetof(CPU_DoubleU, l.lower)); + dst = DFPREG(dst); + tcg_gen_mov_i64(cpu_fpr[dst / 2], v); + gen_update_fprs_dirty(dst); } -static void gen_op_store_DT0_fpr(unsigned int dst) +static TCGv_i64 gen_dest_fpr_D(void) { - tcg_gen_ld_i32(cpu_fpr[dst], cpu_env, offsetof(CPUSPARCState, dt0) + - offsetof(CPU_DoubleU, l.upper)); - tcg_gen_ld_i32(cpu_fpr[dst + 1], cpu_env, offsetof(CPUSPARCState, dt0) + - offsetof(CPU_DoubleU, l.lower)); + return cpu_tmp64; } static void gen_op_load_fpr_QT0(unsigned int src) { - tcg_gen_st_i32(cpu_fpr[src], cpu_env, offsetof(CPUSPARCState, qt0) + - offsetof(CPU_QuadU, l.upmost)); - tcg_gen_st_i32(cpu_fpr[src + 1], cpu_env, offsetof(CPUSPARCState, qt0) + - offsetof(CPU_QuadU, l.upper)); - tcg_gen_st_i32(cpu_fpr[src + 2], cpu_env, offsetof(CPUSPARCState, qt0) + - offsetof(CPU_QuadU, l.lower)); - tcg_gen_st_i32(cpu_fpr[src + 3], cpu_env, offsetof(CPUSPARCState, qt0) + - offsetof(CPU_QuadU, l.lowest)); + tcg_gen_st_i64(cpu_fpr[src / 2], cpu_env, offsetof(CPUSPARCState, qt0) + + offsetof(CPU_QuadU, ll.upper)); + tcg_gen_st_i64(cpu_fpr[src/2 + 1], cpu_env, offsetof(CPUSPARCState, qt0) + + offsetof(CPU_QuadU, ll.lower)); } static void gen_op_load_fpr_QT1(unsigned int src) { - tcg_gen_st_i32(cpu_fpr[src], cpu_env, offsetof(CPUSPARCState, qt1) + - offsetof(CPU_QuadU, l.upmost)); - tcg_gen_st_i32(cpu_fpr[src + 1], cpu_env, offsetof(CPUSPARCState, qt1) + - offsetof(CPU_QuadU, l.upper)); - tcg_gen_st_i32(cpu_fpr[src + 2], cpu_env, offsetof(CPUSPARCState, qt1) + - offsetof(CPU_QuadU, l.lower)); - tcg_gen_st_i32(cpu_fpr[src + 3], cpu_env, offsetof(CPUSPARCState, qt1) + - offsetof(CPU_QuadU, l.lowest)); + tcg_gen_st_i64(cpu_fpr[src / 2], cpu_env, offsetof(CPUSPARCState, qt1) + + offsetof(CPU_QuadU, ll.upper)); + tcg_gen_st_i64(cpu_fpr[src/2 + 1], cpu_env, offsetof(CPUSPARCState, qt1) + + offsetof(CPU_QuadU, ll.lower)); } static void gen_op_store_QT0_fpr(unsigned int dst) { - tcg_gen_ld_i32(cpu_fpr[dst], cpu_env, offsetof(CPUSPARCState, qt0) + - offsetof(CPU_QuadU, l.upmost)); - tcg_gen_ld_i32(cpu_fpr[dst + 1], cpu_env, offsetof(CPUSPARCState, qt0) + - offsetof(CPU_QuadU, l.upper)); - tcg_gen_ld_i32(cpu_fpr[dst + 2], cpu_env, offsetof(CPUSPARCState, qt0) + - offsetof(CPU_QuadU, l.lower)); - tcg_gen_ld_i32(cpu_fpr[dst + 3], cpu_env, offsetof(CPUSPARCState, qt0) + - offsetof(CPU_QuadU, l.lowest)); + tcg_gen_ld_i64(cpu_fpr[dst / 2], cpu_env, offsetof(CPUSPARCState, qt0) + + offsetof(CPU_QuadU, ll.upper)); + tcg_gen_ld_i64(cpu_fpr[dst/2 + 1], cpu_env, offsetof(CPUSPARCState, qt0) + + offsetof(CPU_QuadU, ll.lower)); } +#ifdef TARGET_SPARC64 +static void gen_move_Q(unsigned int rd, unsigned int rs) +{ + rd = QFPREG(rd); + rs = QFPREG(rs); + + tcg_gen_mov_i64(cpu_fpr[rd / 2], cpu_fpr[rs / 2]); + tcg_gen_mov_i64(cpu_fpr[rd / 2 + 1], cpu_fpr[rs / 2 + 1]); + gen_update_fprs_dirty(rd); +} +#endif + /* moves */ #ifdef CONFIG_USER_ONLY #define supervisor(dc) 0 @@ -1419,20 +1470,20 @@ static inline void gen_op_fcmps(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2) } } -static inline void gen_op_fcmpd(int fccno) +static inline void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2) { switch (fccno) { case 0: - gen_helper_fcmpd(cpu_env); + gen_helper_fcmpd(cpu_env, r_rs1, r_rs2); break; case 1: - gen_helper_fcmpd_fcc1(cpu_env); + gen_helper_fcmpd_fcc1(cpu_env, r_rs1, r_rs2); break; case 2: - gen_helper_fcmpd_fcc2(cpu_env); + gen_helper_fcmpd_fcc2(cpu_env, r_rs1, r_rs2); break; case 3: - gen_helper_fcmpd_fcc3(cpu_env); + gen_helper_fcmpd_fcc3(cpu_env, r_rs1, r_rs2); break; } } @@ -1473,20 +1524,20 @@ static inline void gen_op_fcmpes(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2) } } -static inline void gen_op_fcmped(int fccno) +static inline void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2) { switch (fccno) { case 0: - gen_helper_fcmped(cpu_env); + gen_helper_fcmped(cpu_env, r_rs1, r_rs2); break; case 1: - gen_helper_fcmped_fcc1(cpu_env); + gen_helper_fcmped_fcc1(cpu_env, r_rs1, r_rs2); break; case 2: - gen_helper_fcmped_fcc2(cpu_env); + gen_helper_fcmped_fcc2(cpu_env, r_rs1, r_rs2); break; case 3: - gen_helper_fcmped_fcc3(cpu_env); + gen_helper_fcmped_fcc3(cpu_env, r_rs1, r_rs2); break; } } @@ -1516,9 +1567,9 @@ static inline void gen_op_fcmps(int fccno, TCGv r_rs1, TCGv r_rs2) gen_helper_fcmps(cpu_env, r_rs1, r_rs2); } -static inline void gen_op_fcmpd(int fccno) +static inline void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2) { - gen_helper_fcmpd(cpu_env); + gen_helper_fcmpd(cpu_env, r_rs1, r_rs2); } static inline void gen_op_fcmpq(int fccno) @@ -1531,9 +1582,9 @@ static inline void gen_op_fcmpes(int fccno, TCGv r_rs1, TCGv r_rs2) gen_helper_fcmpes(cpu_env, r_rs1, r_rs2); } -static inline void gen_op_fcmped(int fccno) +static inline void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2) { - gen_helper_fcmped(cpu_env); + gen_helper_fcmped(cpu_env, r_rs1, r_rs2); } static inline void gen_op_fcmpeq(int fccno) @@ -1570,21 +1621,313 @@ static int gen_trap_ifnofpu(DisasContext *dc, TCGv r_cond) return 0; } -static inline void gen_update_fprs_dirty(int rd) +static inline void gen_op_clear_ieee_excp_and_FTT(void) { -#if defined(TARGET_SPARC64) - tcg_gen_ori_i32(cpu_fprs, cpu_fprs, (rd < 32) ? 1 : 2); + tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_CEXC_NMASK); +} + +static inline void gen_fop_FF(DisasContext *dc, int rd, int rs, + void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32)) +{ + TCGv_i32 dst, src; + + src = gen_load_fpr_F(dc, rs); + dst = gen_dest_fpr_F(); + + gen(dst, cpu_env, src); + + gen_store_fpr_F(dc, rd, dst); +} + +static inline void gen_ne_fop_FF(DisasContext *dc, int rd, int rs, + void (*gen)(TCGv_i32, TCGv_i32)) +{ + TCGv_i32 dst, src; + + src = gen_load_fpr_F(dc, rs); + dst = gen_dest_fpr_F(); + + gen(dst, src); + + gen_store_fpr_F(dc, rd, dst); +} + +static inline void gen_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2, + void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32)) +{ + TCGv_i32 dst, src1, src2; + + src1 = gen_load_fpr_F(dc, rs1); + src2 = gen_load_fpr_F(dc, rs2); + dst = gen_dest_fpr_F(); + + gen(dst, cpu_env, src1, src2); + + gen_store_fpr_F(dc, rd, dst); +} + +#ifdef TARGET_SPARC64 +static inline void gen_ne_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2, + void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32)) +{ + TCGv_i32 dst, src1, src2; + + src1 = gen_load_fpr_F(dc, rs1); + src2 = gen_load_fpr_F(dc, rs2); + dst = gen_dest_fpr_F(); + + gen(dst, src1, src2); + + gen_store_fpr_F(dc, rd, dst); +} #endif + +static inline void gen_fop_DD(DisasContext *dc, int rd, int rs, + void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64)) +{ + TCGv_i64 dst, src; + + src = gen_load_fpr_D(dc, rs); + dst = gen_dest_fpr_D(); + + gen(dst, cpu_env, src); + + gen_store_fpr_D(dc, rd, dst); } -static inline void gen_op_clear_ieee_excp_and_FTT(void) +#ifdef TARGET_SPARC64 +static inline void gen_ne_fop_DD(DisasContext *dc, int rd, int rs, + void (*gen)(TCGv_i64, TCGv_i64)) { - tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_CEXC_NMASK); + TCGv_i64 dst, src; + + src = gen_load_fpr_D(dc, rs); + dst = gen_dest_fpr_D(); + + gen(dst, src); + + gen_store_fpr_D(dc, rd, dst); } +#endif -static inline void gen_clear_float_exceptions(void) +static inline void gen_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2, + void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64)) { - gen_helper_clear_float_exceptions(cpu_env); + TCGv_i64 dst, src1, src2; + + src1 = gen_load_fpr_D(dc, rs1); + src2 = gen_load_fpr_D(dc, rs2); + dst = gen_dest_fpr_D(); + + gen(dst, cpu_env, src1, src2); + + gen_store_fpr_D(dc, rd, dst); +} + +#ifdef TARGET_SPARC64 +static inline void gen_ne_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2, + void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64)) +{ + TCGv_i64 dst, src1, src2; + + src1 = gen_load_fpr_D(dc, rs1); + src2 = gen_load_fpr_D(dc, rs2); + dst = gen_dest_fpr_D(); + + gen(dst, src1, src2); + + gen_store_fpr_D(dc, rd, dst); +} + +static inline void gen_gsr_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2, + void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64)) +{ + TCGv_i64 dst, src1, src2; + + src1 = gen_load_fpr_D(dc, rs1); + src2 = gen_load_fpr_D(dc, rs2); + dst = gen_dest_fpr_D(); + + gen(dst, cpu_gsr, src1, src2); + + gen_store_fpr_D(dc, rd, dst); +} + +static inline void gen_ne_fop_DDDD(DisasContext *dc, int rd, int rs1, int rs2, + void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64)) +{ + TCGv_i64 dst, src0, src1, src2; + + src1 = gen_load_fpr_D(dc, rs1); + src2 = gen_load_fpr_D(dc, rs2); + src0 = gen_load_fpr_D(dc, rd); + dst = gen_dest_fpr_D(); + + gen(dst, src0, src1, src2); + + gen_store_fpr_D(dc, rd, dst); +} +#endif + +static inline void gen_fop_QQ(DisasContext *dc, int rd, int rs, + void (*gen)(TCGv_ptr)) +{ + gen_op_load_fpr_QT1(QFPREG(rs)); + + gen(cpu_env); + + gen_op_store_QT0_fpr(QFPREG(rd)); + gen_update_fprs_dirty(QFPREG(rd)); +} + +#ifdef TARGET_SPARC64 +static inline void gen_ne_fop_QQ(DisasContext *dc, int rd, int rs, + void (*gen)(TCGv_ptr)) +{ + gen_op_load_fpr_QT1(QFPREG(rs)); + + gen(cpu_env); + + gen_op_store_QT0_fpr(QFPREG(rd)); + gen_update_fprs_dirty(QFPREG(rd)); +} +#endif + +static inline void gen_fop_QQQ(DisasContext *dc, int rd, int rs1, int rs2, + void (*gen)(TCGv_ptr)) +{ + gen_op_load_fpr_QT0(QFPREG(rs1)); + gen_op_load_fpr_QT1(QFPREG(rs2)); + + gen(cpu_env); + + gen_op_store_QT0_fpr(QFPREG(rd)); + gen_update_fprs_dirty(QFPREG(rd)); +} + +static inline void gen_fop_DFF(DisasContext *dc, int rd, int rs1, int rs2, + void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32, TCGv_i32)) +{ + TCGv_i64 dst; + TCGv_i32 src1, src2; + + src1 = gen_load_fpr_F(dc, rs1); + src2 = gen_load_fpr_F(dc, rs2); + dst = gen_dest_fpr_D(); + + gen(dst, cpu_env, src1, src2); + + gen_store_fpr_D(dc, rd, dst); +} + +static inline void gen_fop_QDD(DisasContext *dc, int rd, int rs1, int rs2, + void (*gen)(TCGv_ptr, TCGv_i64, TCGv_i64)) +{ + TCGv_i64 src1, src2; + + src1 = gen_load_fpr_D(dc, rs1); + src2 = gen_load_fpr_D(dc, rs2); + + gen(cpu_env, src1, src2); + + gen_op_store_QT0_fpr(QFPREG(rd)); + gen_update_fprs_dirty(QFPREG(rd)); +} + +#ifdef TARGET_SPARC64 +static inline void gen_fop_DF(DisasContext *dc, int rd, int rs, + void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32)) +{ + TCGv_i64 dst; + TCGv_i32 src; + + src = gen_load_fpr_F(dc, rs); + dst = gen_dest_fpr_D(); + + gen(dst, cpu_env, src); + + gen_store_fpr_D(dc, rd, dst); +} +#endif + +static inline void gen_ne_fop_DF(DisasContext *dc, int rd, int rs, + void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32)) +{ + TCGv_i64 dst; + TCGv_i32 src; + + src = gen_load_fpr_F(dc, rs); + dst = gen_dest_fpr_D(); + + gen(dst, cpu_env, src); + + gen_store_fpr_D(dc, rd, dst); +} + +static inline void gen_fop_FD(DisasContext *dc, int rd, int rs, + void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i64)) +{ + TCGv_i32 dst; + TCGv_i64 src; + + src = gen_load_fpr_D(dc, rs); + dst = gen_dest_fpr_F(); + + gen(dst, cpu_env, src); + + gen_store_fpr_F(dc, rd, dst); +} + +static inline void gen_fop_FQ(DisasContext *dc, int rd, int rs, + void (*gen)(TCGv_i32, TCGv_ptr)) +{ + TCGv_i32 dst; + + gen_op_load_fpr_QT1(QFPREG(rs)); + dst = gen_dest_fpr_F(); + + gen(dst, cpu_env); + + gen_store_fpr_F(dc, rd, dst); +} + +static inline void gen_fop_DQ(DisasContext *dc, int rd, int rs, + void (*gen)(TCGv_i64, TCGv_ptr)) +{ + TCGv_i64 dst; + + gen_op_load_fpr_QT1(QFPREG(rs)); + dst = gen_dest_fpr_D(); + + gen(dst, cpu_env); + + gen_store_fpr_D(dc, rd, dst); +} + +static inline void gen_ne_fop_QF(DisasContext *dc, int rd, int rs, + void (*gen)(TCGv_ptr, TCGv_i32)) +{ + TCGv_i32 src; + + src = gen_load_fpr_F(dc, rs); + + gen(cpu_env, src); + + gen_op_store_QT0_fpr(QFPREG(rd)); + gen_update_fprs_dirty(QFPREG(rd)); +} + +static inline void gen_ne_fop_QD(DisasContext *dc, int rd, int rs, + void (*gen)(TCGv_ptr, TCGv_i64)) +{ + TCGv_i64 src; + + src = gen_load_fpr_D(dc, rs); + + gen(cpu_env, src); + + gen_op_store_QT0_fpr(QFPREG(rd)); + gen_update_fprs_dirty(QFPREG(rd)); } /* asi moves */ @@ -1878,6 +2221,148 @@ static inline void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr, TCGv_ptr cpu_env) tcg_temp_free_i32(r_tl); } + +static void gen_edge(DisasContext *dc, TCGv dst, TCGv s1, TCGv s2, + int width, bool cc, bool left) +{ + TCGv lo1, lo2, t1, t2; + uint64_t amask, tabl, tabr; + int shift, imask, omask; + + if (cc) { + tcg_gen_mov_tl(cpu_cc_src, s1); + tcg_gen_mov_tl(cpu_cc_src2, s2); + tcg_gen_sub_tl(cpu_cc_dst, s1, s2); + tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB); + dc->cc_op = CC_OP_SUB; + } + + /* Theory of operation: there are two tables, left and right (not to + be confused with the left and right versions of the opcode). These + are indexed by the low 3 bits of the inputs. To make things "easy", + these tables are loaded into two constants, TABL and TABR below. + The operation index = (input & imask) << shift calculates the index + into the constant, while val = (table >> index) & omask calculates + the value we're looking for. */ + switch (width) { + case 8: + imask = 0x7; + shift = 3; + omask = 0xff; + if (left) { + tabl = 0x80c0e0f0f8fcfeffULL; + tabr = 0xff7f3f1f0f070301ULL; + } else { + tabl = 0x0103070f1f3f7fffULL; + tabr = 0xfffefcf8f0e0c080ULL; + } + break; + case 16: + imask = 0x6; + shift = 1; + omask = 0xf; + if (left) { + tabl = 0x8cef; + tabr = 0xf731; + } else { + tabl = 0x137f; + tabr = 0xfec8; + } + break; + case 32: + imask = 0x4; + shift = 0; + omask = 0x3; + if (left) { + tabl = (2 << 2) | 3; + tabr = (3 << 2) | 1; + } else { + tabl = (1 << 2) | 3; + tabr = (3 << 2) | 2; + } + break; + default: + abort(); + } + + lo1 = tcg_temp_new(); + lo2 = tcg_temp_new(); + tcg_gen_andi_tl(lo1, s1, imask); + tcg_gen_andi_tl(lo2, s2, imask); + tcg_gen_shli_tl(lo1, lo1, shift); + tcg_gen_shli_tl(lo2, lo2, shift); + + t1 = tcg_const_tl(tabl); + t2 = tcg_const_tl(tabr); + tcg_gen_shr_tl(lo1, t1, lo1); + tcg_gen_shr_tl(lo2, t2, lo2); + tcg_gen_andi_tl(dst, lo1, omask); + tcg_gen_andi_tl(lo2, lo2, omask); + + amask = -8; + if (AM_CHECK(dc)) { + amask &= 0xffffffffULL; + } + tcg_gen_andi_tl(s1, s1, amask); + tcg_gen_andi_tl(s2, s2, amask); + + /* We want to compute + dst = (s1 == s2 ? lo1 : lo1 & lo2). + We've already done dst = lo1, so this reduces to + dst &= (s1 == s2 ? -1 : lo2) + Which we perform by + lo2 |= -(s1 == s2) + dst &= lo2 + */ + tcg_gen_setcond_tl(TCG_COND_EQ, t1, s1, s2); + tcg_gen_neg_tl(t1, t1); + tcg_gen_or_tl(lo2, lo2, t1); + tcg_gen_and_tl(dst, dst, lo2); + + tcg_temp_free(lo1); + tcg_temp_free(lo2); + tcg_temp_free(t1); + tcg_temp_free(t2); +} + +static void gen_alignaddr(TCGv dst, TCGv s1, TCGv s2, bool left) +{ + TCGv tmp = tcg_temp_new(); + + tcg_gen_add_tl(tmp, s1, s2); + tcg_gen_andi_tl(dst, tmp, -8); + if (left) { + tcg_gen_neg_tl(tmp, tmp); + } + tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3); + + tcg_temp_free(tmp); +} + +static void gen_faligndata(TCGv dst, TCGv gsr, TCGv s1, TCGv s2) +{ + TCGv t1, t2, shift; + + t1 = tcg_temp_new(); + t2 = tcg_temp_new(); + shift = tcg_temp_new(); + + tcg_gen_andi_tl(shift, gsr, 7); + tcg_gen_shli_tl(shift, shift, 3); + tcg_gen_shl_tl(t1, s1, shift); + + /* A shift of 64 does not produce 0 in TCG. Divide this into a + shift of (up to 63) followed by a constant shift of 1. */ + tcg_gen_xori_tl(shift, shift, 63); + tcg_gen_shr_tl(t2, s2, shift); + tcg_gen_shri_tl(t2, t2, 1); + + tcg_gen_or_tl(dst, t1, t2); + + tcg_temp_free(t1); + tcg_temp_free(t2); + tcg_temp_free(shift); +} #endif #define CHECK_IU_FEATURE(dc, FEATURE) \ @@ -1892,6 +2377,8 @@ static void disas_sparc_insn(DisasContext * dc) { unsigned int insn, opc, rs1, rs2, rd; TCGv cpu_src1, cpu_src2, cpu_tmp1, cpu_tmp2; + TCGv_i32 cpu_src1_32, cpu_src2_32, cpu_dst_32; + TCGv_i64 cpu_src1_64, cpu_src2_64, cpu_dst_64; target_long simm; if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) @@ -2107,7 +2594,7 @@ static void disas_sparc_insn(DisasContext * dc) #ifdef TARGET_SPARC64 case 0x2: /* V9 rdccr */ gen_helper_compute_psr(cpu_env); - gen_helper_rdccr(cpu_dst); + gen_helper_rdccr(cpu_dst, cpu_env); gen_movl_TN_reg(rd, cpu_dst); break; case 0x3: /* V9 rdasi */ @@ -2184,7 +2671,7 @@ static void disas_sparc_insn(DisasContext * dc) goto priv_insn; gen_helper_compute_psr(cpu_env); dc->cc_op = CC_OP_FLAGS; - gen_helper_rdpsr(cpu_dst); + gen_helper_rdpsr(cpu_dst, cpu_env); #else CHECK_IU_FEATURE(dc, HYPV); if (!hypervisor(dc)) @@ -2297,7 +2784,7 @@ static void disas_sparc_insn(DisasContext * dc) tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32); break; case 9: // cwp - gen_helper_rdcwp(cpu_tmp0); + gen_helper_rdcwp(cpu_tmp0, cpu_env); break; case 10: // cansave tcg_gen_ld_i32(cpu_tmp32, cpu_env, @@ -2351,7 +2838,7 @@ static void disas_sparc_insn(DisasContext * dc) } else if (xop == 0x2b) { /* rdtbr / V9 flushw */ #ifdef TARGET_SPARC64 save_state(dc, cpu_cond); - gen_helper_flushw(); + gen_helper_flushw(cpu_env); #else if (!supervisor(dc)) goto priv_insn; @@ -2369,350 +2856,162 @@ static void disas_sparc_insn(DisasContext * dc) save_state(dc, cpu_cond); switch (xop) { case 0x1: /* fmovs */ - tcg_gen_mov_i32(cpu_fpr[rd], cpu_fpr[rs2]); - gen_update_fprs_dirty(rd); + cpu_src1_32 = gen_load_fpr_F(dc, rs2); + gen_store_fpr_F(dc, rd, cpu_src1_32); break; case 0x5: /* fnegs */ - gen_helper_fnegs(cpu_fpr[rd], cpu_fpr[rs2]); - gen_update_fprs_dirty(rd); + gen_ne_fop_FF(dc, rd, rs2, gen_helper_fnegs); break; case 0x9: /* fabss */ - gen_helper_fabss(cpu_fpr[rd], cpu_fpr[rs2]); - gen_update_fprs_dirty(rd); + gen_ne_fop_FF(dc, rd, rs2, gen_helper_fabss); break; case 0x29: /* fsqrts */ CHECK_FPU_FEATURE(dc, FSQRT); - gen_clear_float_exceptions(); - gen_helper_fsqrts(cpu_tmp32, cpu_env, cpu_fpr[rs2]); - gen_helper_check_ieee_exceptions(cpu_env); - tcg_gen_mov_i32(cpu_fpr[rd], cpu_tmp32); - gen_update_fprs_dirty(rd); + gen_fop_FF(dc, rd, rs2, gen_helper_fsqrts); break; case 0x2a: /* fsqrtd */ CHECK_FPU_FEATURE(dc, FSQRT); - gen_op_load_fpr_DT1(DFPREG(rs2)); - gen_clear_float_exceptions(); - gen_helper_fsqrtd(cpu_env); - gen_helper_check_ieee_exceptions(cpu_env); - gen_op_store_DT0_fpr(DFPREG(rd)); - gen_update_fprs_dirty(DFPREG(rd)); + gen_fop_DD(dc, rd, rs2, gen_helper_fsqrtd); break; case 0x2b: /* fsqrtq */ CHECK_FPU_FEATURE(dc, FLOAT128); - gen_op_load_fpr_QT1(QFPREG(rs2)); - gen_clear_float_exceptions(); - gen_helper_fsqrtq(cpu_env); - gen_helper_check_ieee_exceptions(cpu_env); - gen_op_store_QT0_fpr(QFPREG(rd)); - gen_update_fprs_dirty(QFPREG(rd)); + gen_fop_QQ(dc, rd, rs2, gen_helper_fsqrtq); break; case 0x41: /* fadds */ - gen_clear_float_exceptions(); - gen_helper_fadds(cpu_tmp32, cpu_env, cpu_fpr[rs1], - cpu_fpr[rs2]); - gen_helper_check_ieee_exceptions(cpu_env); - tcg_gen_mov_i32(cpu_fpr[rd], cpu_tmp32); - gen_update_fprs_dirty(rd); + gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fadds); break; case 0x42: /* faddd */ - gen_op_load_fpr_DT0(DFPREG(rs1)); - gen_op_load_fpr_DT1(DFPREG(rs2)); - gen_clear_float_exceptions(); - gen_helper_faddd(cpu_env); - gen_helper_check_ieee_exceptions(cpu_env); - gen_op_store_DT0_fpr(DFPREG(rd)); - gen_update_fprs_dirty(DFPREG(rd)); + gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_faddd); break; case 0x43: /* faddq */ CHECK_FPU_FEATURE(dc, FLOAT128); - gen_op_load_fpr_QT0(QFPREG(rs1)); - gen_op_load_fpr_QT1(QFPREG(rs2)); - gen_clear_float_exceptions(); - gen_helper_faddq(cpu_env); - gen_helper_check_ieee_exceptions(cpu_env); - gen_op_store_QT0_fpr(QFPREG(rd)); - gen_update_fprs_dirty(QFPREG(rd)); + gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_faddq); break; case 0x45: /* fsubs */ - gen_clear_float_exceptions(); - gen_helper_fsubs(cpu_tmp32, cpu_env, cpu_fpr[rs1], - cpu_fpr[rs2]); - gen_helper_check_ieee_exceptions(cpu_env); - tcg_gen_mov_i32(cpu_fpr[rd], cpu_tmp32); - gen_update_fprs_dirty(rd); + gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fsubs); break; case 0x46: /* fsubd */ - gen_op_load_fpr_DT0(DFPREG(rs1)); - gen_op_load_fpr_DT1(DFPREG(rs2)); - gen_clear_float_exceptions(); - gen_helper_fsubd(cpu_env); - gen_helper_check_ieee_exceptions(cpu_env); - gen_op_store_DT0_fpr(DFPREG(rd)); - gen_update_fprs_dirty(DFPREG(rd)); + gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fsubd); break; case 0x47: /* fsubq */ CHECK_FPU_FEATURE(dc, FLOAT128); - gen_op_load_fpr_QT0(QFPREG(rs1)); - gen_op_load_fpr_QT1(QFPREG(rs2)); - gen_clear_float_exceptions(); - gen_helper_fsubq(cpu_env); - gen_helper_check_ieee_exceptions(cpu_env); - gen_op_store_QT0_fpr(QFPREG(rd)); - gen_update_fprs_dirty(QFPREG(rd)); + gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fsubq); break; case 0x49: /* fmuls */ CHECK_FPU_FEATURE(dc, FMUL); - gen_clear_float_exceptions(); - gen_helper_fmuls(cpu_tmp32, cpu_env, cpu_fpr[rs1], - cpu_fpr[rs2]); - gen_helper_check_ieee_exceptions(cpu_env); - tcg_gen_mov_i32(cpu_fpr[rd], cpu_tmp32); - gen_update_fprs_dirty(rd); + gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fmuls); break; case 0x4a: /* fmuld */ CHECK_FPU_FEATURE(dc, FMUL); - gen_op_load_fpr_DT0(DFPREG(rs1)); - gen_op_load_fpr_DT1(DFPREG(rs2)); - gen_clear_float_exceptions(); - gen_helper_fmuld(cpu_env); - gen_helper_check_ieee_exceptions(cpu_env); - gen_op_store_DT0_fpr(DFPREG(rd)); - gen_update_fprs_dirty(DFPREG(rd)); + gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld); break; case 0x4b: /* fmulq */ CHECK_FPU_FEATURE(dc, FLOAT128); CHECK_FPU_FEATURE(dc, FMUL); - gen_op_load_fpr_QT0(QFPREG(rs1)); - gen_op_load_fpr_QT1(QFPREG(rs2)); - gen_clear_float_exceptions(); - gen_helper_fmulq(cpu_env); - gen_helper_check_ieee_exceptions(cpu_env); - gen_op_store_QT0_fpr(QFPREG(rd)); - gen_update_fprs_dirty(QFPREG(rd)); + gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fmulq); break; case 0x4d: /* fdivs */ - gen_clear_float_exceptions(); - gen_helper_fdivs(cpu_tmp32, cpu_env, cpu_fpr[rs1], - cpu_fpr[rs2]); - gen_helper_check_ieee_exceptions(cpu_env); - tcg_gen_mov_i32(cpu_fpr[rd], cpu_tmp32); - gen_update_fprs_dirty(rd); + gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fdivs); break; case 0x4e: /* fdivd */ - gen_op_load_fpr_DT0(DFPREG(rs1)); - gen_op_load_fpr_DT1(DFPREG(rs2)); - gen_clear_float_exceptions(); - gen_helper_fdivd(cpu_env); - gen_helper_check_ieee_exceptions(cpu_env); - gen_op_store_DT0_fpr(DFPREG(rd)); - gen_update_fprs_dirty(DFPREG(rd)); + gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fdivd); break; case 0x4f: /* fdivq */ CHECK_FPU_FEATURE(dc, FLOAT128); - gen_op_load_fpr_QT0(QFPREG(rs1)); - gen_op_load_fpr_QT1(QFPREG(rs2)); - gen_clear_float_exceptions(); - gen_helper_fdivq(cpu_env); - gen_helper_check_ieee_exceptions(cpu_env); - gen_op_store_QT0_fpr(QFPREG(rd)); - gen_update_fprs_dirty(QFPREG(rd)); + gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fdivq); break; case 0x69: /* fsmuld */ CHECK_FPU_FEATURE(dc, FSMULD); - gen_clear_float_exceptions(); - gen_helper_fsmuld(cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]); - gen_helper_check_ieee_exceptions(cpu_env); - gen_op_store_DT0_fpr(DFPREG(rd)); - gen_update_fprs_dirty(DFPREG(rd)); + gen_fop_DFF(dc, rd, rs1, rs2, gen_helper_fsmuld); break; case 0x6e: /* fdmulq */ CHECK_FPU_FEATURE(dc, FLOAT128); - gen_op_load_fpr_DT0(DFPREG(rs1)); - gen_op_load_fpr_DT1(DFPREG(rs2)); - gen_clear_float_exceptions(); - gen_helper_fdmulq(cpu_env); - gen_helper_check_ieee_exceptions(cpu_env); - gen_op_store_QT0_fpr(QFPREG(rd)); - gen_update_fprs_dirty(QFPREG(rd)); + gen_fop_QDD(dc, rd, rs1, rs2, gen_helper_fdmulq); break; case 0xc4: /* fitos */ - gen_clear_float_exceptions(); - gen_helper_fitos(cpu_tmp32, cpu_env, cpu_fpr[rs2]); - gen_helper_check_ieee_exceptions(cpu_env); - tcg_gen_mov_i32(cpu_fpr[rd], cpu_tmp32); - gen_update_fprs_dirty(rd); + gen_fop_FF(dc, rd, rs2, gen_helper_fitos); break; case 0xc6: /* fdtos */ - gen_op_load_fpr_DT1(DFPREG(rs2)); - gen_clear_float_exceptions(); - gen_helper_fdtos(cpu_tmp32, cpu_env); - gen_helper_check_ieee_exceptions(cpu_env); - tcg_gen_mov_i32(cpu_fpr[rd], cpu_tmp32); - gen_update_fprs_dirty(rd); + gen_fop_FD(dc, rd, rs2, gen_helper_fdtos); break; case 0xc7: /* fqtos */ CHECK_FPU_FEATURE(dc, FLOAT128); - gen_op_load_fpr_QT1(QFPREG(rs2)); - gen_clear_float_exceptions(); - gen_helper_fqtos(cpu_tmp32, cpu_env); - gen_helper_check_ieee_exceptions(cpu_env); - tcg_gen_mov_i32(cpu_fpr[rd], cpu_tmp32); - gen_update_fprs_dirty(rd); + gen_fop_FQ(dc, rd, rs2, gen_helper_fqtos); break; case 0xc8: /* fitod */ - gen_helper_fitod(cpu_env, cpu_fpr[rs2]); - gen_op_store_DT0_fpr(DFPREG(rd)); - gen_update_fprs_dirty(DFPREG(rd)); + gen_ne_fop_DF(dc, rd, rs2, gen_helper_fitod); break; case 0xc9: /* fstod */ - gen_helper_fstod(cpu_env, cpu_fpr[rs2]); - gen_op_store_DT0_fpr(DFPREG(rd)); - gen_update_fprs_dirty(DFPREG(rd)); + gen_ne_fop_DF(dc, rd, rs2, gen_helper_fstod); break; case 0xcb: /* fqtod */ CHECK_FPU_FEATURE(dc, FLOAT128); - gen_op_load_fpr_QT1(QFPREG(rs2)); - gen_clear_float_exceptions(); - gen_helper_fqtod(cpu_env); - gen_helper_check_ieee_exceptions(cpu_env); - gen_op_store_DT0_fpr(DFPREG(rd)); - gen_update_fprs_dirty(DFPREG(rd)); + gen_fop_DQ(dc, rd, rs2, gen_helper_fqtod); break; case 0xcc: /* fitoq */ CHECK_FPU_FEATURE(dc, FLOAT128); - gen_helper_fitoq(cpu_env, cpu_fpr[rs2]); - gen_op_store_QT0_fpr(QFPREG(rd)); - gen_update_fprs_dirty(QFPREG(rd)); + gen_ne_fop_QF(dc, rd, rs2, gen_helper_fitoq); break; case 0xcd: /* fstoq */ CHECK_FPU_FEATURE(dc, FLOAT128); - gen_helper_fstoq(cpu_env, cpu_fpr[rs2]); - gen_op_store_QT0_fpr(QFPREG(rd)); - gen_update_fprs_dirty(QFPREG(rd)); + gen_ne_fop_QF(dc, rd, rs2, gen_helper_fstoq); break; case 0xce: /* fdtoq */ CHECK_FPU_FEATURE(dc, FLOAT128); - gen_op_load_fpr_DT1(DFPREG(rs2)); - gen_helper_fdtoq(cpu_env); - gen_op_store_QT0_fpr(QFPREG(rd)); - gen_update_fprs_dirty(QFPREG(rd)); + gen_ne_fop_QD(dc, rd, rs2, gen_helper_fdtoq); break; case 0xd1: /* fstoi */ - gen_clear_float_exceptions(); - gen_helper_fstoi(cpu_tmp32, cpu_env, cpu_fpr[rs2]); - gen_helper_check_ieee_exceptions(cpu_env); - tcg_gen_mov_i32(cpu_fpr[rd], cpu_tmp32); - gen_update_fprs_dirty(rd); + gen_fop_FF(dc, rd, rs2, gen_helper_fstoi); break; case 0xd2: /* fdtoi */ - gen_op_load_fpr_DT1(DFPREG(rs2)); - gen_clear_float_exceptions(); - gen_helper_fdtoi(cpu_tmp32, cpu_env); - gen_helper_check_ieee_exceptions(cpu_env); - tcg_gen_mov_i32(cpu_fpr[rd], cpu_tmp32); - gen_update_fprs_dirty(rd); + gen_fop_FD(dc, rd, rs2, gen_helper_fdtoi); break; case 0xd3: /* fqtoi */ CHECK_FPU_FEATURE(dc, FLOAT128); - gen_op_load_fpr_QT1(QFPREG(rs2)); - gen_clear_float_exceptions(); - gen_helper_fqtoi(cpu_tmp32, cpu_env); - gen_helper_check_ieee_exceptions(cpu_env); - tcg_gen_mov_i32(cpu_fpr[rd], cpu_tmp32); - gen_update_fprs_dirty(rd); + gen_fop_FQ(dc, rd, rs2, gen_helper_fqtoi); break; #ifdef TARGET_SPARC64 case 0x2: /* V9 fmovd */ - tcg_gen_mov_i32(cpu_fpr[DFPREG(rd)], cpu_fpr[DFPREG(rs2)]); - tcg_gen_mov_i32(cpu_fpr[DFPREG(rd) + 1], - cpu_fpr[DFPREG(rs2) + 1]); - gen_update_fprs_dirty(DFPREG(rd)); + cpu_src1_64 = gen_load_fpr_D(dc, rs2); + gen_store_fpr_D(dc, rd, cpu_src1_64); break; case 0x3: /* V9 fmovq */ CHECK_FPU_FEATURE(dc, FLOAT128); - tcg_gen_mov_i32(cpu_fpr[QFPREG(rd)], cpu_fpr[QFPREG(rs2)]); - tcg_gen_mov_i32(cpu_fpr[QFPREG(rd) + 1], - cpu_fpr[QFPREG(rs2) + 1]); - tcg_gen_mov_i32(cpu_fpr[QFPREG(rd) + 2], - cpu_fpr[QFPREG(rs2) + 2]); - tcg_gen_mov_i32(cpu_fpr[QFPREG(rd) + 3], - cpu_fpr[QFPREG(rs2) + 3]); - gen_update_fprs_dirty(QFPREG(rd)); + gen_move_Q(rd, rs2); break; case 0x6: /* V9 fnegd */ - gen_op_load_fpr_DT1(DFPREG(rs2)); - gen_helper_fnegd(cpu_env); - gen_op_store_DT0_fpr(DFPREG(rd)); - gen_update_fprs_dirty(DFPREG(rd)); + gen_ne_fop_DD(dc, rd, rs2, gen_helper_fnegd); break; case 0x7: /* V9 fnegq */ CHECK_FPU_FEATURE(dc, FLOAT128); - gen_op_load_fpr_QT1(QFPREG(rs2)); - gen_helper_fnegq(cpu_env); - gen_op_store_QT0_fpr(QFPREG(rd)); - gen_update_fprs_dirty(QFPREG(rd)); + gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fnegq); break; case 0xa: /* V9 fabsd */ - gen_op_load_fpr_DT1(DFPREG(rs2)); - gen_helper_fabsd(cpu_env); - gen_op_store_DT0_fpr(DFPREG(rd)); - gen_update_fprs_dirty(DFPREG(rd)); + gen_ne_fop_DD(dc, rd, rs2, gen_helper_fabsd); break; case 0xb: /* V9 fabsq */ CHECK_FPU_FEATURE(dc, FLOAT128); - gen_op_load_fpr_QT1(QFPREG(rs2)); - gen_helper_fabsq(cpu_env); - gen_op_store_QT0_fpr(QFPREG(rd)); - gen_update_fprs_dirty(QFPREG(rd)); + gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fabsq); break; case 0x81: /* V9 fstox */ - gen_clear_float_exceptions(); - gen_helper_fstox(cpu_env, cpu_fpr[rs2]); - gen_helper_check_ieee_exceptions(cpu_env); - gen_op_store_DT0_fpr(DFPREG(rd)); - gen_update_fprs_dirty(DFPREG(rd)); + gen_fop_DF(dc, rd, rs2, gen_helper_fstox); break; case 0x82: /* V9 fdtox */ - gen_op_load_fpr_DT1(DFPREG(rs2)); - gen_clear_float_exceptions(); - gen_helper_fdtox(cpu_env); - gen_helper_check_ieee_exceptions(cpu_env); - gen_op_store_DT0_fpr(DFPREG(rd)); - gen_update_fprs_dirty(DFPREG(rd)); + gen_fop_DD(dc, rd, rs2, gen_helper_fdtox); break; case 0x83: /* V9 fqtox */ CHECK_FPU_FEATURE(dc, FLOAT128); - gen_op_load_fpr_QT1(QFPREG(rs2)); - gen_clear_float_exceptions(); - gen_helper_fqtox(cpu_env); - gen_helper_check_ieee_exceptions(cpu_env); - gen_op_store_DT0_fpr(DFPREG(rd)); - gen_update_fprs_dirty(DFPREG(rd)); + gen_fop_DQ(dc, rd, rs2, gen_helper_fqtox); break; case 0x84: /* V9 fxtos */ - gen_op_load_fpr_DT1(DFPREG(rs2)); - gen_clear_float_exceptions(); - gen_helper_fxtos(cpu_tmp32, cpu_env); - gen_helper_check_ieee_exceptions(cpu_env); - tcg_gen_mov_i32(cpu_fpr[rd], cpu_tmp32); - gen_update_fprs_dirty(rd); + gen_fop_FD(dc, rd, rs2, gen_helper_fxtos); break; case 0x88: /* V9 fxtod */ - gen_op_load_fpr_DT1(DFPREG(rs2)); - gen_clear_float_exceptions(); - gen_helper_fxtod(cpu_env); - gen_helper_check_ieee_exceptions(cpu_env); - gen_op_store_DT0_fpr(DFPREG(rd)); - gen_update_fprs_dirty(DFPREG(rd)); + gen_fop_DD(dc, rd, rs2, gen_helper_fxtod); break; case 0x8c: /* V9 fxtoq */ CHECK_FPU_FEATURE(dc, FLOAT128); - gen_op_load_fpr_DT1(DFPREG(rs2)); - gen_clear_float_exceptions(); - gen_helper_fxtoq(cpu_env); - gen_helper_check_ieee_exceptions(cpu_env); - gen_op_store_QT0_fpr(QFPREG(rd)); - gen_update_fprs_dirty(QFPREG(rd)); + gen_ne_fop_QD(dc, rd, rs2, gen_helper_fxtoq); break; #endif default: @@ -2738,8 +3037,8 @@ static void disas_sparc_insn(DisasContext * dc) cpu_src1 = get_src1(insn, cpu_src1); tcg_gen_brcondi_tl(gen_tcg_cond_reg[cond], cpu_src1, 0, l1); - tcg_gen_mov_i32(cpu_fpr[rd], cpu_fpr[rs2]); - gen_update_fprs_dirty(rd); + cpu_src1_32 = gen_load_fpr_F(dc, rs2); + gen_store_fpr_F(dc, rd, cpu_src1_32); gen_set_label(l1); break; } else if ((xop & 0x11f) == 0x006) { // V9 fmovdr @@ -2750,9 +3049,8 @@ static void disas_sparc_insn(DisasContext * dc) cpu_src1 = get_src1(insn, cpu_src1); tcg_gen_brcondi_tl(gen_tcg_cond_reg[cond], cpu_src1, 0, l1); - tcg_gen_mov_i32(cpu_fpr[DFPREG(rd)], cpu_fpr[DFPREG(rs2)]); - tcg_gen_mov_i32(cpu_fpr[DFPREG(rd) + 1], cpu_fpr[DFPREG(rs2) + 1]); - gen_update_fprs_dirty(DFPREG(rd)); + cpu_src1_64 = gen_load_fpr_D(dc, rs2); + gen_store_fpr_D(dc, rd, cpu_src1_64); gen_set_label(l1); break; } else if ((xop & 0x11f) == 0x007) { // V9 fmovqr @@ -2764,11 +3062,7 @@ static void disas_sparc_insn(DisasContext * dc) cpu_src1 = get_src1(insn, cpu_src1); tcg_gen_brcondi_tl(gen_tcg_cond_reg[cond], cpu_src1, 0, l1); - tcg_gen_mov_i32(cpu_fpr[QFPREG(rd)], cpu_fpr[QFPREG(rs2)]); - tcg_gen_mov_i32(cpu_fpr[QFPREG(rd) + 1], cpu_fpr[QFPREG(rs2) + 1]); - tcg_gen_mov_i32(cpu_fpr[QFPREG(rd) + 2], cpu_fpr[QFPREG(rs2) + 2]); - tcg_gen_mov_i32(cpu_fpr[QFPREG(rd) + 3], cpu_fpr[QFPREG(rs2) + 3]); - gen_update_fprs_dirty(QFPREG(rd)); + gen_move_Q(rd, rs2); gen_set_label(l1); break; } @@ -2786,8 +3080,8 @@ static void disas_sparc_insn(DisasContext * dc) gen_fcond(r_cond, fcc, cond); \ tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, \ 0, l1); \ - tcg_gen_mov_i32(cpu_fpr[rd], cpu_fpr[rs2]); \ - gen_update_fprs_dirty(rd); \ + cpu_src1_32 = gen_load_fpr_F(dc, rs2); \ + gen_store_fpr_F(dc, rd, cpu_src1_32); \ gen_set_label(l1); \ tcg_temp_free(r_cond); \ } @@ -2802,11 +3096,8 @@ static void disas_sparc_insn(DisasContext * dc) gen_fcond(r_cond, fcc, cond); \ tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, \ 0, l1); \ - tcg_gen_mov_i32(cpu_fpr[DFPREG(rd)], \ - cpu_fpr[DFPREG(rs2)]); \ - tcg_gen_mov_i32(cpu_fpr[DFPREG(rd) + 1], \ - cpu_fpr[DFPREG(rs2) + 1]); \ - gen_update_fprs_dirty(DFPREG(rd)); \ + cpu_src1_64 = gen_load_fpr_D(dc, rs2); \ + gen_store_fpr_D(dc, rd, cpu_src1_64); \ gen_set_label(l1); \ tcg_temp_free(r_cond); \ } @@ -2821,15 +3112,7 @@ static void disas_sparc_insn(DisasContext * dc) gen_fcond(r_cond, fcc, cond); \ tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, \ 0, l1); \ - tcg_gen_mov_i32(cpu_fpr[QFPREG(rd)], \ - cpu_fpr[QFPREG(rs2)]); \ - tcg_gen_mov_i32(cpu_fpr[QFPREG(rd) + 1], \ - cpu_fpr[QFPREG(rs2) + 1]); \ - tcg_gen_mov_i32(cpu_fpr[QFPREG(rd) + 2], \ - cpu_fpr[QFPREG(rs2) + 2]); \ - tcg_gen_mov_i32(cpu_fpr[QFPREG(rd) + 3], \ - cpu_fpr[QFPREG(rs2) + 3]); \ - gen_update_fprs_dirty(QFPREG(rd)); \ + gen_move_Q(rd, rs2); \ gen_set_label(l1); \ tcg_temp_free(r_cond); \ } @@ -2887,8 +3170,8 @@ static void disas_sparc_insn(DisasContext * dc) gen_cond(r_cond, icc, cond, dc); \ tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, \ 0, l1); \ - tcg_gen_mov_i32(cpu_fpr[rd], cpu_fpr[rs2]); \ - gen_update_fprs_dirty(rd); \ + cpu_src1_32 = gen_load_fpr_F(dc, rs2); \ + gen_store_fpr_F(dc, rd, cpu_src1_32); \ gen_set_label(l1); \ tcg_temp_free(r_cond); \ } @@ -2903,10 +3186,8 @@ static void disas_sparc_insn(DisasContext * dc) gen_cond(r_cond, icc, cond, dc); \ tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, \ 0, l1); \ - tcg_gen_mov_i32(cpu_fpr[DFPREG(rd)], \ - cpu_fpr[DFPREG(rs2)]); \ - tcg_gen_mov_i32(cpu_fpr[DFPREG(rd) + 1], \ - cpu_fpr[DFPREG(rs2) + 1]); \ + cpu_src1_64 = gen_load_fpr_D(dc, rs2); \ + gen_store_fpr_D(dc, rd, cpu_src1_64); \ gen_update_fprs_dirty(DFPREG(rd)); \ gen_set_label(l1); \ tcg_temp_free(r_cond); \ @@ -2922,15 +3203,7 @@ static void disas_sparc_insn(DisasContext * dc) gen_cond(r_cond, icc, cond, dc); \ tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, \ 0, l1); \ - tcg_gen_mov_i32(cpu_fpr[QFPREG(rd)], \ - cpu_fpr[QFPREG(rs2)]); \ - tcg_gen_mov_i32(cpu_fpr[QFPREG(rd) + 1], \ - cpu_fpr[QFPREG(rs2) + 1]); \ - tcg_gen_mov_i32(cpu_fpr[QFPREG(rd) + 2], \ - cpu_fpr[QFPREG(rs2) + 2]); \ - tcg_gen_mov_i32(cpu_fpr[QFPREG(rd) + 3], \ - cpu_fpr[QFPREG(rs2) + 3]); \ - gen_update_fprs_dirty(QFPREG(rd)); \ + gen_move_Q(rd, rs2); \ gen_set_label(l1); \ tcg_temp_free(r_cond); \ } @@ -2960,12 +3233,14 @@ static void disas_sparc_insn(DisasContext * dc) #undef FMOVQCC #endif case 0x51: /* fcmps, V9 %fcc */ - gen_op_fcmps(rd & 3, cpu_fpr[rs1], cpu_fpr[rs2]); + cpu_src1_32 = gen_load_fpr_F(dc, rs1); + cpu_src2_32 = gen_load_fpr_F(dc, rs2); + gen_op_fcmps(rd & 3, cpu_src1_32, cpu_src2_32); break; case 0x52: /* fcmpd, V9 %fcc */ - gen_op_load_fpr_DT0(DFPREG(rs1)); - gen_op_load_fpr_DT1(DFPREG(rs2)); - gen_op_fcmpd(rd & 3); + cpu_src1_64 = gen_load_fpr_D(dc, rs1); + cpu_src2_64 = gen_load_fpr_D(dc, rs2); + gen_op_fcmpd(rd & 3, cpu_src1_64, cpu_src2_64); break; case 0x53: /* fcmpq, V9 %fcc */ CHECK_FPU_FEATURE(dc, FLOAT128); @@ -2974,12 +3249,14 @@ static void disas_sparc_insn(DisasContext * dc) gen_op_fcmpq(rd & 3); break; case 0x55: /* fcmpes, V9 %fcc */ - gen_op_fcmpes(rd & 3, cpu_fpr[rs1], cpu_fpr[rs2]); + cpu_src1_32 = gen_load_fpr_F(dc, rs1); + cpu_src2_32 = gen_load_fpr_F(dc, rs2); + gen_op_fcmpes(rd & 3, cpu_src1_32, cpu_src2_32); break; case 0x56: /* fcmped, V9 %fcc */ - gen_op_load_fpr_DT0(DFPREG(rs1)); - gen_op_load_fpr_DT1(DFPREG(rs2)); - gen_op_fcmped(rd & 3); + cpu_src1_64 = gen_load_fpr_D(dc, rs1); + cpu_src2_64 = gen_load_fpr_D(dc, rs2); + gen_op_fcmped(rd & 3, cpu_src1_64, cpu_src2_64); break; case 0x57: /* fcmpeq, V9 %fcc */ CHECK_FPU_FEATURE(dc, FLOAT128); @@ -3271,19 +3548,23 @@ static void disas_sparc_insn(DisasContext * dc) case 0xe: /* udiv */ CHECK_IU_FEATURE(dc, DIV); if (xop & 0x10) { - gen_helper_udiv_cc(cpu_dst, cpu_src1, cpu_src2); + gen_helper_udiv_cc(cpu_dst, cpu_env, cpu_src1, + cpu_src2); dc->cc_op = CC_OP_DIV; } else { - gen_helper_udiv(cpu_dst, cpu_src1, cpu_src2); + gen_helper_udiv(cpu_dst, cpu_env, cpu_src1, + cpu_src2); } break; case 0xf: /* sdiv */ CHECK_IU_FEATURE(dc, DIV); if (xop & 0x10) { - gen_helper_sdiv_cc(cpu_dst, cpu_src1, cpu_src2); + gen_helper_sdiv_cc(cpu_dst, cpu_env, cpu_src1, + cpu_src2); dc->cc_op = CC_OP_DIV; } else { - gen_helper_sdiv(cpu_dst, cpu_src1, cpu_src2); + gen_helper_sdiv(cpu_dst, cpu_env, cpu_src1, + cpu_src2); } break; default: @@ -3379,7 +3660,7 @@ static void disas_sparc_insn(DisasContext * dc) #else case 0x2: /* V9 wrccr */ tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2); - gen_helper_wrccr(cpu_dst); + gen_helper_wrccr(cpu_env, cpu_dst); tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS); dc->cc_op = CC_OP_FLAGS; break; @@ -3412,19 +3693,19 @@ static void disas_sparc_insn(DisasContext * dc) if (!supervisor(dc)) goto illegal_insn; tcg_gen_xor_tl(cpu_tmp64, cpu_src1, cpu_src2); - gen_helper_set_softint(cpu_tmp64); + gen_helper_set_softint(cpu_env, cpu_tmp64); break; case 0x15: /* Softint clear */ if (!supervisor(dc)) goto illegal_insn; tcg_gen_xor_tl(cpu_tmp64, cpu_src1, cpu_src2); - gen_helper_clear_softint(cpu_tmp64); + gen_helper_clear_softint(cpu_env, cpu_tmp64); break; case 0x16: /* Softint write */ if (!supervisor(dc)) goto illegal_insn; tcg_gen_xor_tl(cpu_tmp64, cpu_src1, cpu_src2); - gen_helper_write_softint(cpu_tmp64); + gen_helper_write_softint(cpu_env, cpu_tmp64); break; case 0x17: /* Tick compare */ #if !defined(CONFIG_USER_ONLY) @@ -3499,10 +3780,10 @@ static void disas_sparc_insn(DisasContext * dc) #ifdef TARGET_SPARC64 switch (rd) { case 0: - gen_helper_saved(); + gen_helper_saved(cpu_env); break; case 1: - gen_helper_restored(); + gen_helper_restored(cpu_env); break; case 2: /* UA2005 allclean */ case 3: /* UA2005 otherw */ @@ -3514,7 +3795,7 @@ static void disas_sparc_insn(DisasContext * dc) } #else tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2); - gen_helper_wrpsr(cpu_dst); + gen_helper_wrpsr(cpu_env, cpu_dst); tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS); dc->cc_op = CC_OP_FLAGS; save_state(dc, cpu_cond); @@ -3598,7 +3879,7 @@ static void disas_sparc_insn(DisasContext * dc) tcg_gen_mov_tl(r_tmp, cpu_tmp0); save_state(dc, cpu_cond); - gen_helper_wrpstate(r_tmp); + gen_helper_wrpstate(cpu_env, r_tmp); tcg_temp_free(r_tmp); dc->npc = DYNAMIC_PC; } @@ -3617,10 +3898,10 @@ static void disas_sparc_insn(DisasContext * dc) } break; case 8: // pil - gen_helper_wrpil(cpu_tmp0); + gen_helper_wrpil(cpu_env, cpu_tmp0); break; case 9: // cwp - gen_helper_wrcwp(cpu_tmp0); + gen_helper_wrcwp(cpu_env, cpu_tmp0); break; case 10: // cansave tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0); @@ -3815,31 +4096,101 @@ static void disas_sparc_insn(DisasContext * dc) switch (opf) { case 0x000: /* VIS I edge8cc */ + CHECK_FPU_FEATURE(dc, VIS1); + gen_movl_reg_TN(rs1, cpu_src1); + gen_movl_reg_TN(rs2, cpu_src2); + gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 0); + gen_movl_TN_reg(rd, cpu_dst); + break; case 0x001: /* VIS II edge8n */ + CHECK_FPU_FEATURE(dc, VIS2); + gen_movl_reg_TN(rs1, cpu_src1); + gen_movl_reg_TN(rs2, cpu_src2); + gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 0); + gen_movl_TN_reg(rd, cpu_dst); + break; case 0x002: /* VIS I edge8lcc */ + CHECK_FPU_FEATURE(dc, VIS1); + gen_movl_reg_TN(rs1, cpu_src1); + gen_movl_reg_TN(rs2, cpu_src2); + gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 1); + gen_movl_TN_reg(rd, cpu_dst); + break; case 0x003: /* VIS II edge8ln */ + CHECK_FPU_FEATURE(dc, VIS2); + gen_movl_reg_TN(rs1, cpu_src1); + gen_movl_reg_TN(rs2, cpu_src2); + gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 1); + gen_movl_TN_reg(rd, cpu_dst); + break; case 0x004: /* VIS I edge16cc */ + CHECK_FPU_FEATURE(dc, VIS1); + gen_movl_reg_TN(rs1, cpu_src1); + gen_movl_reg_TN(rs2, cpu_src2); + gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 0); + gen_movl_TN_reg(rd, cpu_dst); + break; case 0x005: /* VIS II edge16n */ + CHECK_FPU_FEATURE(dc, VIS2); + gen_movl_reg_TN(rs1, cpu_src1); + gen_movl_reg_TN(rs2, cpu_src2); + gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 0); + gen_movl_TN_reg(rd, cpu_dst); + break; case 0x006: /* VIS I edge16lcc */ + CHECK_FPU_FEATURE(dc, VIS1); + gen_movl_reg_TN(rs1, cpu_src1); + gen_movl_reg_TN(rs2, cpu_src2); + gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 1); + gen_movl_TN_reg(rd, cpu_dst); + break; case 0x007: /* VIS II edge16ln */ + CHECK_FPU_FEATURE(dc, VIS2); + gen_movl_reg_TN(rs1, cpu_src1); + gen_movl_reg_TN(rs2, cpu_src2); + gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 1); + gen_movl_TN_reg(rd, cpu_dst); + break; case 0x008: /* VIS I edge32cc */ + CHECK_FPU_FEATURE(dc, VIS1); + gen_movl_reg_TN(rs1, cpu_src1); + gen_movl_reg_TN(rs2, cpu_src2); + gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 0); + gen_movl_TN_reg(rd, cpu_dst); + break; case 0x009: /* VIS II edge32n */ + CHECK_FPU_FEATURE(dc, VIS2); + gen_movl_reg_TN(rs1, cpu_src1); + gen_movl_reg_TN(rs2, cpu_src2); + gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 0); + gen_movl_TN_reg(rd, cpu_dst); + break; case 0x00a: /* VIS I edge32lcc */ + CHECK_FPU_FEATURE(dc, VIS1); + gen_movl_reg_TN(rs1, cpu_src1); + gen_movl_reg_TN(rs2, cpu_src2); + gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 1); + gen_movl_TN_reg(rd, cpu_dst); + break; case 0x00b: /* VIS II edge32ln */ - // XXX - goto illegal_insn; + CHECK_FPU_FEATURE(dc, VIS2); + gen_movl_reg_TN(rs1, cpu_src1); + gen_movl_reg_TN(rs2, cpu_src2); + gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 1); + gen_movl_TN_reg(rd, cpu_dst); + break; case 0x010: /* VIS I array8 */ CHECK_FPU_FEATURE(dc, VIS1); cpu_src1 = get_src1(insn, cpu_src1); gen_movl_reg_TN(rs2, cpu_src2); - gen_helper_array8(cpu_dst, cpu_env, cpu_src1, cpu_src2); + gen_helper_array8(cpu_dst, cpu_src1, cpu_src2); gen_movl_TN_reg(rd, cpu_dst); break; case 0x012: /* VIS I array16 */ CHECK_FPU_FEATURE(dc, VIS1); cpu_src1 = get_src1(insn, cpu_src1); gen_movl_reg_TN(rs2, cpu_src2); - gen_helper_array8(cpu_dst, cpu_env, cpu_src1, cpu_src2); + gen_helper_array8(cpu_dst, cpu_src1, cpu_src2); tcg_gen_shli_i64(cpu_dst, cpu_dst, 1); gen_movl_TN_reg(rd, cpu_dst); break; @@ -3847,7 +4198,7 @@ static void disas_sparc_insn(DisasContext * dc) CHECK_FPU_FEATURE(dc, VIS1); cpu_src1 = get_src1(insn, cpu_src1); gen_movl_reg_TN(rs2, cpu_src2); - gen_helper_array8(cpu_dst, cpu_env, cpu_src1, cpu_src2); + gen_helper_array8(cpu_dst, cpu_src1, cpu_src2); tcg_gen_shli_i64(cpu_dst, cpu_dst, 2); gen_movl_TN_reg(rd, cpu_dst); break; @@ -3855,424 +4206,317 @@ static void disas_sparc_insn(DisasContext * dc) CHECK_FPU_FEATURE(dc, VIS1); cpu_src1 = get_src1(insn, cpu_src1); gen_movl_reg_TN(rs2, cpu_src2); - gen_helper_alignaddr(cpu_dst, cpu_env, cpu_src1, cpu_src2); + gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 0); gen_movl_TN_reg(rd, cpu_dst); break; - case 0x019: /* VIS II bmask */ case 0x01a: /* VIS I alignaddrl */ - // XXX - goto illegal_insn; + CHECK_FPU_FEATURE(dc, VIS1); + cpu_src1 = get_src1(insn, cpu_src1); + gen_movl_reg_TN(rs2, cpu_src2); + gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 1); + gen_movl_TN_reg(rd, cpu_dst); + break; + case 0x019: /* VIS II bmask */ + CHECK_FPU_FEATURE(dc, VIS2); + cpu_src1 = get_src1(insn, cpu_src1); + cpu_src2 = get_src1(insn, cpu_src2); + tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2); + tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, cpu_dst, 32, 32); + gen_movl_TN_reg(rd, cpu_dst); + break; case 0x020: /* VIS I fcmple16 */ CHECK_FPU_FEATURE(dc, VIS1); - gen_op_load_fpr_DT0(DFPREG(rs1)); - gen_op_load_fpr_DT1(DFPREG(rs2)); - gen_helper_fcmple16(cpu_dst, cpu_env); + cpu_src1_64 = gen_load_fpr_D(dc, rs1); + cpu_src2_64 = gen_load_fpr_D(dc, rs2); + gen_helper_fcmple16(cpu_dst, cpu_src1_64, cpu_src2_64); gen_movl_TN_reg(rd, cpu_dst); break; case 0x022: /* VIS I fcmpne16 */ CHECK_FPU_FEATURE(dc, VIS1); - gen_op_load_fpr_DT0(DFPREG(rs1)); - gen_op_load_fpr_DT1(DFPREG(rs2)); - gen_helper_fcmpne16(cpu_dst, cpu_env); + cpu_src1_64 = gen_load_fpr_D(dc, rs1); + cpu_src2_64 = gen_load_fpr_D(dc, rs2); + gen_helper_fcmpne16(cpu_dst, cpu_src1_64, cpu_src2_64); gen_movl_TN_reg(rd, cpu_dst); break; case 0x024: /* VIS I fcmple32 */ CHECK_FPU_FEATURE(dc, VIS1); - gen_op_load_fpr_DT0(DFPREG(rs1)); - gen_op_load_fpr_DT1(DFPREG(rs2)); - gen_helper_fcmple32(cpu_dst, cpu_env); + cpu_src1_64 = gen_load_fpr_D(dc, rs1); + cpu_src2_64 = gen_load_fpr_D(dc, rs2); + gen_helper_fcmple32(cpu_dst, cpu_src1_64, cpu_src2_64); gen_movl_TN_reg(rd, cpu_dst); break; case 0x026: /* VIS I fcmpne32 */ CHECK_FPU_FEATURE(dc, VIS1); - gen_op_load_fpr_DT0(DFPREG(rs1)); - gen_op_load_fpr_DT1(DFPREG(rs2)); - gen_helper_fcmpne32(cpu_dst, cpu_env); + cpu_src1_64 = gen_load_fpr_D(dc, rs1); + cpu_src2_64 = gen_load_fpr_D(dc, rs2); + gen_helper_fcmpne32(cpu_dst, cpu_src1_64, cpu_src2_64); gen_movl_TN_reg(rd, cpu_dst); break; case 0x028: /* VIS I fcmpgt16 */ CHECK_FPU_FEATURE(dc, VIS1); - gen_op_load_fpr_DT0(DFPREG(rs1)); - gen_op_load_fpr_DT1(DFPREG(rs2)); - gen_helper_fcmpgt16(cpu_dst, cpu_env); + cpu_src1_64 = gen_load_fpr_D(dc, rs1); + cpu_src2_64 = gen_load_fpr_D(dc, rs2); + gen_helper_fcmpgt16(cpu_dst, cpu_src1_64, cpu_src2_64); gen_movl_TN_reg(rd, cpu_dst); break; case 0x02a: /* VIS I fcmpeq16 */ CHECK_FPU_FEATURE(dc, VIS1); - gen_op_load_fpr_DT0(DFPREG(rs1)); - gen_op_load_fpr_DT1(DFPREG(rs2)); - gen_helper_fcmpeq16(cpu_dst, cpu_env); + cpu_src1_64 = gen_load_fpr_D(dc, rs1); + cpu_src2_64 = gen_load_fpr_D(dc, rs2); + gen_helper_fcmpeq16(cpu_dst, cpu_src1_64, cpu_src2_64); gen_movl_TN_reg(rd, cpu_dst); break; case 0x02c: /* VIS I fcmpgt32 */ CHECK_FPU_FEATURE(dc, VIS1); - gen_op_load_fpr_DT0(DFPREG(rs1)); - gen_op_load_fpr_DT1(DFPREG(rs2)); - gen_helper_fcmpgt32(cpu_dst, cpu_env); + cpu_src1_64 = gen_load_fpr_D(dc, rs1); + cpu_src2_64 = gen_load_fpr_D(dc, rs2); + gen_helper_fcmpgt32(cpu_dst, cpu_src1_64, cpu_src2_64); gen_movl_TN_reg(rd, cpu_dst); break; case 0x02e: /* VIS I fcmpeq32 */ CHECK_FPU_FEATURE(dc, VIS1); - gen_op_load_fpr_DT0(DFPREG(rs1)); - gen_op_load_fpr_DT1(DFPREG(rs2)); - gen_helper_fcmpeq32(cpu_dst, cpu_env); + cpu_src1_64 = gen_load_fpr_D(dc, rs1); + cpu_src2_64 = gen_load_fpr_D(dc, rs2); + gen_helper_fcmpeq32(cpu_dst, cpu_src1_64, cpu_src2_64); gen_movl_TN_reg(rd, cpu_dst); break; case 0x031: /* VIS I fmul8x16 */ CHECK_FPU_FEATURE(dc, VIS1); - gen_op_load_fpr_DT0(DFPREG(rs1)); - gen_op_load_fpr_DT1(DFPREG(rs2)); - gen_helper_fmul8x16(cpu_env); - gen_op_store_DT0_fpr(DFPREG(rd)); - gen_update_fprs_dirty(DFPREG(rd)); + gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16); break; case 0x033: /* VIS I fmul8x16au */ CHECK_FPU_FEATURE(dc, VIS1); - gen_op_load_fpr_DT0(DFPREG(rs1)); - gen_op_load_fpr_DT1(DFPREG(rs2)); - gen_helper_fmul8x16au(cpu_env); - gen_op_store_DT0_fpr(DFPREG(rd)); - gen_update_fprs_dirty(DFPREG(rd)); + gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16au); break; case 0x035: /* VIS I fmul8x16al */ CHECK_FPU_FEATURE(dc, VIS1); - gen_op_load_fpr_DT0(DFPREG(rs1)); - gen_op_load_fpr_DT1(DFPREG(rs2)); - gen_helper_fmul8x16al(cpu_env); - gen_op_store_DT0_fpr(DFPREG(rd)); - gen_update_fprs_dirty(DFPREG(rd)); + gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16al); break; case 0x036: /* VIS I fmul8sux16 */ CHECK_FPU_FEATURE(dc, VIS1); - gen_op_load_fpr_DT0(DFPREG(rs1)); - gen_op_load_fpr_DT1(DFPREG(rs2)); - gen_helper_fmul8sux16(cpu_env); - gen_op_store_DT0_fpr(DFPREG(rd)); - gen_update_fprs_dirty(DFPREG(rd)); + gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8sux16); break; case 0x037: /* VIS I fmul8ulx16 */ CHECK_FPU_FEATURE(dc, VIS1); - gen_op_load_fpr_DT0(DFPREG(rs1)); - gen_op_load_fpr_DT1(DFPREG(rs2)); - gen_helper_fmul8ulx16(cpu_env); - gen_op_store_DT0_fpr(DFPREG(rd)); - gen_update_fprs_dirty(DFPREG(rd)); + gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8ulx16); break; case 0x038: /* VIS I fmuld8sux16 */ CHECK_FPU_FEATURE(dc, VIS1); - gen_op_load_fpr_DT0(DFPREG(rs1)); - gen_op_load_fpr_DT1(DFPREG(rs2)); - gen_helper_fmuld8sux16(cpu_env); - gen_op_store_DT0_fpr(DFPREG(rd)); - gen_update_fprs_dirty(DFPREG(rd)); + gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8sux16); break; case 0x039: /* VIS I fmuld8ulx16 */ CHECK_FPU_FEATURE(dc, VIS1); - gen_op_load_fpr_DT0(DFPREG(rs1)); - gen_op_load_fpr_DT1(DFPREG(rs2)); - gen_helper_fmuld8ulx16(cpu_env); - gen_op_store_DT0_fpr(DFPREG(rd)); - gen_update_fprs_dirty(DFPREG(rd)); + gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8ulx16); break; case 0x03a: /* VIS I fpack32 */ + CHECK_FPU_FEATURE(dc, VIS1); + gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpack32); + break; case 0x03b: /* VIS I fpack16 */ + CHECK_FPU_FEATURE(dc, VIS1); + cpu_src1_64 = gen_load_fpr_D(dc, rs2); + cpu_dst_32 = gen_dest_fpr_F(); + gen_helper_fpack16(cpu_dst_32, cpu_gsr, cpu_src1_64); + gen_store_fpr_F(dc, rd, cpu_dst_32); + break; case 0x03d: /* VIS I fpackfix */ + CHECK_FPU_FEATURE(dc, VIS1); + cpu_src1_64 = gen_load_fpr_D(dc, rs2); + cpu_dst_32 = gen_dest_fpr_F(); + gen_helper_fpackfix(cpu_dst_32, cpu_gsr, cpu_src1_64); + gen_store_fpr_F(dc, rd, cpu_dst_32); + break; case 0x03e: /* VIS I pdist */ - // XXX - goto illegal_insn; + CHECK_FPU_FEATURE(dc, VIS1); + gen_ne_fop_DDDD(dc, rd, rs1, rs2, gen_helper_pdist); + break; case 0x048: /* VIS I faligndata */ CHECK_FPU_FEATURE(dc, VIS1); - gen_op_load_fpr_DT0(DFPREG(rs1)); - gen_op_load_fpr_DT1(DFPREG(rs2)); - gen_helper_faligndata(cpu_env); - gen_op_store_DT0_fpr(DFPREG(rd)); - gen_update_fprs_dirty(DFPREG(rd)); + gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_faligndata); break; case 0x04b: /* VIS I fpmerge */ CHECK_FPU_FEATURE(dc, VIS1); - gen_op_load_fpr_DT0(DFPREG(rs1)); - gen_op_load_fpr_DT1(DFPREG(rs2)); - gen_helper_fpmerge(cpu_env); - gen_op_store_DT0_fpr(DFPREG(rd)); - gen_update_fprs_dirty(DFPREG(rd)); + gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpmerge); break; case 0x04c: /* VIS II bshuffle */ - // XXX - goto illegal_insn; + CHECK_FPU_FEATURE(dc, VIS2); + gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_bshuffle); + break; case 0x04d: /* VIS I fexpand */ CHECK_FPU_FEATURE(dc, VIS1); - gen_op_load_fpr_DT0(DFPREG(rs1)); - gen_op_load_fpr_DT1(DFPREG(rs2)); - gen_helper_fexpand(cpu_env); - gen_op_store_DT0_fpr(DFPREG(rd)); - gen_update_fprs_dirty(DFPREG(rd)); + gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fexpand); break; case 0x050: /* VIS I fpadd16 */ CHECK_FPU_FEATURE(dc, VIS1); - gen_op_load_fpr_DT0(DFPREG(rs1)); - gen_op_load_fpr_DT1(DFPREG(rs2)); - gen_helper_fpadd16(cpu_env); - gen_op_store_DT0_fpr(DFPREG(rd)); - gen_update_fprs_dirty(DFPREG(rd)); + gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd16); break; case 0x051: /* VIS I fpadd16s */ CHECK_FPU_FEATURE(dc, VIS1); - gen_helper_fpadd16s(cpu_fpr[rd], cpu_env, - cpu_fpr[rs1], cpu_fpr[rs2]); - gen_update_fprs_dirty(rd); + gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpadd16s); break; case 0x052: /* VIS I fpadd32 */ CHECK_FPU_FEATURE(dc, VIS1); - gen_op_load_fpr_DT0(DFPREG(rs1)); - gen_op_load_fpr_DT1(DFPREG(rs2)); - gen_helper_fpadd32(cpu_env); - gen_op_store_DT0_fpr(DFPREG(rd)); - gen_update_fprs_dirty(DFPREG(rd)); + gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd32); break; case 0x053: /* VIS I fpadd32s */ CHECK_FPU_FEATURE(dc, VIS1); - gen_helper_fpadd32s(cpu_fpr[rd], cpu_env, - cpu_fpr[rs1], cpu_fpr[rs2]); - gen_update_fprs_dirty(rd); + gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_add_i32); break; case 0x054: /* VIS I fpsub16 */ CHECK_FPU_FEATURE(dc, VIS1); - gen_op_load_fpr_DT0(DFPREG(rs1)); - gen_op_load_fpr_DT1(DFPREG(rs2)); - gen_helper_fpsub16(cpu_env); - gen_op_store_DT0_fpr(DFPREG(rd)); - gen_update_fprs_dirty(DFPREG(rd)); + gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub16); break; case 0x055: /* VIS I fpsub16s */ CHECK_FPU_FEATURE(dc, VIS1); - gen_helper_fpsub16s(cpu_fpr[rd], cpu_env, - cpu_fpr[rs1], cpu_fpr[rs2]); - gen_update_fprs_dirty(rd); + gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpsub16s); break; case 0x056: /* VIS I fpsub32 */ CHECK_FPU_FEATURE(dc, VIS1); - gen_op_load_fpr_DT0(DFPREG(rs1)); - gen_op_load_fpr_DT1(DFPREG(rs2)); - gen_helper_fpsub32(cpu_env); - gen_op_store_DT0_fpr(DFPREG(rd)); - gen_update_fprs_dirty(DFPREG(rd)); + gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub32); break; case 0x057: /* VIS I fpsub32s */ CHECK_FPU_FEATURE(dc, VIS1); - gen_helper_fpsub32s(cpu_fpr[rd], cpu_env, - cpu_fpr[rs1], cpu_fpr[rs2]); - gen_update_fprs_dirty(rd); + gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_sub_i32); break; case 0x060: /* VIS I fzero */ CHECK_FPU_FEATURE(dc, VIS1); - tcg_gen_movi_i32(cpu_fpr[DFPREG(rd)], 0); - tcg_gen_movi_i32(cpu_fpr[DFPREG(rd) + 1], 0); - gen_update_fprs_dirty(DFPREG(rd)); + cpu_dst_64 = gen_dest_fpr_D(); + tcg_gen_movi_i64(cpu_dst_64, 0); + gen_store_fpr_D(dc, rd, cpu_dst_64); break; case 0x061: /* VIS I fzeros */ CHECK_FPU_FEATURE(dc, VIS1); - tcg_gen_movi_i32(cpu_fpr[rd], 0); - gen_update_fprs_dirty(rd); + cpu_dst_32 = gen_dest_fpr_F(); + tcg_gen_movi_i32(cpu_dst_32, 0); + gen_store_fpr_F(dc, rd, cpu_dst_32); break; case 0x062: /* VIS I fnor */ CHECK_FPU_FEATURE(dc, VIS1); - tcg_gen_nor_i32(cpu_fpr[DFPREG(rd)], cpu_fpr[DFPREG(rs1)], - cpu_fpr[DFPREG(rs2)]); - tcg_gen_nor_i32(cpu_fpr[DFPREG(rd) + 1], - cpu_fpr[DFPREG(rs1) + 1], - cpu_fpr[DFPREG(rs2) + 1]); - gen_update_fprs_dirty(DFPREG(rd)); + gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nor_i64); break; case 0x063: /* VIS I fnors */ CHECK_FPU_FEATURE(dc, VIS1); - tcg_gen_nor_i32(cpu_fpr[rd], cpu_fpr[rs1], cpu_fpr[rs2]); - gen_update_fprs_dirty(rd); + gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nor_i32); break; case 0x064: /* VIS I fandnot2 */ CHECK_FPU_FEATURE(dc, VIS1); - tcg_gen_andc_i32(cpu_fpr[DFPREG(rd)], cpu_fpr[DFPREG(rs1)], - cpu_fpr[DFPREG(rs2)]); - tcg_gen_andc_i32(cpu_fpr[DFPREG(rd) + 1], - cpu_fpr[DFPREG(rs1) + 1], - cpu_fpr[DFPREG(rs2) + 1]); - gen_update_fprs_dirty(DFPREG(rd)); + gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_andc_i64); break; case 0x065: /* VIS I fandnot2s */ CHECK_FPU_FEATURE(dc, VIS1); - tcg_gen_andc_i32(cpu_fpr[rd], cpu_fpr[rs1], cpu_fpr[rs2]); - gen_update_fprs_dirty(rd); + gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_andc_i32); break; case 0x066: /* VIS I fnot2 */ CHECK_FPU_FEATURE(dc, VIS1); - tcg_gen_not_i32(cpu_fpr[DFPREG(rd)], cpu_fpr[DFPREG(rs2)]); - tcg_gen_not_i32(cpu_fpr[DFPREG(rd) + 1], - cpu_fpr[DFPREG(rs2) + 1]); - gen_update_fprs_dirty(DFPREG(rd)); + gen_ne_fop_DD(dc, rd, rs2, tcg_gen_not_i64); break; case 0x067: /* VIS I fnot2s */ CHECK_FPU_FEATURE(dc, VIS1); - tcg_gen_not_i32(cpu_fpr[rd], cpu_fpr[rs2]); - gen_update_fprs_dirty(rd); + gen_ne_fop_FF(dc, rd, rs2, tcg_gen_not_i32); break; case 0x068: /* VIS I fandnot1 */ CHECK_FPU_FEATURE(dc, VIS1); - tcg_gen_andc_i32(cpu_fpr[DFPREG(rd)], cpu_fpr[DFPREG(rs2)], - cpu_fpr[DFPREG(rs1)]); - tcg_gen_andc_i32(cpu_fpr[DFPREG(rd) + 1], - cpu_fpr[DFPREG(rs2) + 1], - cpu_fpr[DFPREG(rs1) + 1]); - gen_update_fprs_dirty(DFPREG(rd)); + gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_andc_i64); break; case 0x069: /* VIS I fandnot1s */ CHECK_FPU_FEATURE(dc, VIS1); - tcg_gen_andc_i32(cpu_fpr[rd], cpu_fpr[rs2], cpu_fpr[rs1]); - gen_update_fprs_dirty(rd); + gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_andc_i32); break; case 0x06a: /* VIS I fnot1 */ CHECK_FPU_FEATURE(dc, VIS1); - tcg_gen_not_i32(cpu_fpr[DFPREG(rd)], cpu_fpr[DFPREG(rs1)]); - tcg_gen_not_i32(cpu_fpr[DFPREG(rd) + 1], - cpu_fpr[DFPREG(rs1) + 1]); - gen_update_fprs_dirty(DFPREG(rd)); + gen_ne_fop_DD(dc, rd, rs1, tcg_gen_not_i64); break; case 0x06b: /* VIS I fnot1s */ CHECK_FPU_FEATURE(dc, VIS1); - tcg_gen_not_i32(cpu_fpr[rd], cpu_fpr[rs1]); - gen_update_fprs_dirty(rd); + gen_ne_fop_FF(dc, rd, rs1, tcg_gen_not_i32); break; case 0x06c: /* VIS I fxor */ CHECK_FPU_FEATURE(dc, VIS1); - tcg_gen_xor_i32(cpu_fpr[DFPREG(rd)], cpu_fpr[DFPREG(rs1)], - cpu_fpr[DFPREG(rs2)]); - tcg_gen_xor_i32(cpu_fpr[DFPREG(rd) + 1], - cpu_fpr[DFPREG(rs1) + 1], - cpu_fpr[DFPREG(rs2) + 1]); - gen_update_fprs_dirty(DFPREG(rd)); + gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_xor_i64); break; case 0x06d: /* VIS I fxors */ CHECK_FPU_FEATURE(dc, VIS1); - tcg_gen_xor_i32(cpu_fpr[rd], cpu_fpr[rs1], cpu_fpr[rs2]); - gen_update_fprs_dirty(rd); + gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_xor_i32); break; case 0x06e: /* VIS I fnand */ CHECK_FPU_FEATURE(dc, VIS1); - tcg_gen_nand_i32(cpu_fpr[DFPREG(rd)], cpu_fpr[DFPREG(rs1)], - cpu_fpr[DFPREG(rs2)]); - tcg_gen_nand_i32(cpu_fpr[DFPREG(rd) + 1], - cpu_fpr[DFPREG(rs1) + 1], - cpu_fpr[DFPREG(rs2) + 1]); - gen_update_fprs_dirty(DFPREG(rd)); + gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nand_i64); break; case 0x06f: /* VIS I fnands */ CHECK_FPU_FEATURE(dc, VIS1); - tcg_gen_nand_i32(cpu_fpr[rd], cpu_fpr[rs1], cpu_fpr[rs2]); - gen_update_fprs_dirty(rd); + gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nand_i32); break; case 0x070: /* VIS I fand */ CHECK_FPU_FEATURE(dc, VIS1); - tcg_gen_and_i32(cpu_fpr[DFPREG(rd)], cpu_fpr[DFPREG(rs1)], - cpu_fpr[DFPREG(rs2)]); - tcg_gen_and_i32(cpu_fpr[DFPREG(rd) + 1], - cpu_fpr[DFPREG(rs1) + 1], - cpu_fpr[DFPREG(rs2) + 1]); - gen_update_fprs_dirty(DFPREG(rd)); + gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_and_i64); break; case 0x071: /* VIS I fands */ CHECK_FPU_FEATURE(dc, VIS1); - tcg_gen_and_i32(cpu_fpr[rd], cpu_fpr[rs1], cpu_fpr[rs2]); - gen_update_fprs_dirty(rd); + gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_and_i32); break; case 0x072: /* VIS I fxnor */ CHECK_FPU_FEATURE(dc, VIS1); - tcg_gen_xori_i32(cpu_tmp32, cpu_fpr[DFPREG(rs2)], -1); - tcg_gen_xor_i32(cpu_fpr[DFPREG(rd)], cpu_tmp32, - cpu_fpr[DFPREG(rs1)]); - tcg_gen_xori_i32(cpu_tmp32, cpu_fpr[DFPREG(rs2) + 1], -1); - tcg_gen_xor_i32(cpu_fpr[DFPREG(rd) + 1], cpu_tmp32, - cpu_fpr[DFPREG(rs1) + 1]); - gen_update_fprs_dirty(DFPREG(rd)); + gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_eqv_i64); break; case 0x073: /* VIS I fxnors */ CHECK_FPU_FEATURE(dc, VIS1); - tcg_gen_xori_i32(cpu_tmp32, cpu_fpr[rs2], -1); - tcg_gen_xor_i32(cpu_fpr[rd], cpu_tmp32, cpu_fpr[rs1]); - gen_update_fprs_dirty(rd); + gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_eqv_i32); break; case 0x074: /* VIS I fsrc1 */ CHECK_FPU_FEATURE(dc, VIS1); - tcg_gen_mov_i32(cpu_fpr[DFPREG(rd)], cpu_fpr[DFPREG(rs1)]); - tcg_gen_mov_i32(cpu_fpr[DFPREG(rd) + 1], - cpu_fpr[DFPREG(rs1) + 1]); - gen_update_fprs_dirty(DFPREG(rd)); + cpu_src1_64 = gen_load_fpr_D(dc, rs1); + gen_store_fpr_D(dc, rd, cpu_src1_64); break; case 0x075: /* VIS I fsrc1s */ CHECK_FPU_FEATURE(dc, VIS1); - tcg_gen_mov_i32(cpu_fpr[rd], cpu_fpr[rs1]); - gen_update_fprs_dirty(rd); + cpu_src1_32 = gen_load_fpr_F(dc, rs1); + gen_store_fpr_F(dc, rd, cpu_src1_32); break; case 0x076: /* VIS I fornot2 */ CHECK_FPU_FEATURE(dc, VIS1); - tcg_gen_orc_i32(cpu_fpr[DFPREG(rd)], cpu_fpr[DFPREG(rs1)], - cpu_fpr[DFPREG(rs2)]); - tcg_gen_orc_i32(cpu_fpr[DFPREG(rd) + 1], - cpu_fpr[DFPREG(rs1) + 1], - cpu_fpr[DFPREG(rs2) + 1]); - gen_update_fprs_dirty(DFPREG(rd)); + gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_orc_i64); break; case 0x077: /* VIS I fornot2s */ CHECK_FPU_FEATURE(dc, VIS1); - tcg_gen_orc_i32(cpu_fpr[rd], cpu_fpr[rs1], cpu_fpr[rs2]); - gen_update_fprs_dirty(rd); + gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_orc_i32); break; case 0x078: /* VIS I fsrc2 */ CHECK_FPU_FEATURE(dc, VIS1); - gen_op_load_fpr_DT0(DFPREG(rs2)); - gen_op_store_DT0_fpr(DFPREG(rd)); - gen_update_fprs_dirty(DFPREG(rd)); + cpu_src1_64 = gen_load_fpr_D(dc, rs2); + gen_store_fpr_D(dc, rd, cpu_src1_64); break; case 0x079: /* VIS I fsrc2s */ CHECK_FPU_FEATURE(dc, VIS1); - tcg_gen_mov_i32(cpu_fpr[rd], cpu_fpr[rs2]); - gen_update_fprs_dirty(rd); + cpu_src1_32 = gen_load_fpr_F(dc, rs2); + gen_store_fpr_F(dc, rd, cpu_src1_32); break; case 0x07a: /* VIS I fornot1 */ CHECK_FPU_FEATURE(dc, VIS1); - tcg_gen_orc_i32(cpu_fpr[DFPREG(rd)], cpu_fpr[DFPREG(rs2)], - cpu_fpr[DFPREG(rs1)]); - tcg_gen_orc_i32(cpu_fpr[DFPREG(rd) + 1], - cpu_fpr[DFPREG(rs2) + 1], - cpu_fpr[DFPREG(rs1) + 1]); - gen_update_fprs_dirty(DFPREG(rd)); + gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_orc_i64); break; case 0x07b: /* VIS I fornot1s */ CHECK_FPU_FEATURE(dc, VIS1); - tcg_gen_orc_i32(cpu_fpr[rd], cpu_fpr[rs2], cpu_fpr[rs1]); - gen_update_fprs_dirty(rd); + gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_orc_i32); break; case 0x07c: /* VIS I for */ CHECK_FPU_FEATURE(dc, VIS1); - tcg_gen_or_i32(cpu_fpr[DFPREG(rd)], cpu_fpr[DFPREG(rs1)], - cpu_fpr[DFPREG(rs2)]); - tcg_gen_or_i32(cpu_fpr[DFPREG(rd) + 1], - cpu_fpr[DFPREG(rs1) + 1], - cpu_fpr[DFPREG(rs2) + 1]); - gen_update_fprs_dirty(DFPREG(rd)); + gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_or_i64); break; case 0x07d: /* VIS I fors */ CHECK_FPU_FEATURE(dc, VIS1); - tcg_gen_or_i32(cpu_fpr[rd], cpu_fpr[rs1], cpu_fpr[rs2]); - gen_update_fprs_dirty(rd); + gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_or_i32); break; case 0x07e: /* VIS I fone */ CHECK_FPU_FEATURE(dc, VIS1); - tcg_gen_movi_i32(cpu_fpr[DFPREG(rd)], -1); - tcg_gen_movi_i32(cpu_fpr[DFPREG(rd) + 1], -1); - gen_update_fprs_dirty(DFPREG(rd)); + cpu_dst_64 = gen_dest_fpr_D(); + tcg_gen_movi_i64(cpu_dst_64, -1); + gen_store_fpr_D(dc, rd, cpu_dst_64); break; case 0x07f: /* VIS I fones */ CHECK_FPU_FEATURE(dc, VIS1); - tcg_gen_movi_i32(cpu_fpr[rd], -1); - gen_update_fprs_dirty(rd); + cpu_dst_32 = gen_dest_fpr_F(); + tcg_gen_movi_i32(cpu_dst_32, -1); + gen_store_fpr_F(dc, rd, cpu_dst_32); break; case 0x080: /* VIS I shutdown */ case 0x081: /* VIS II siam */ @@ -4307,7 +4551,7 @@ static void disas_sparc_insn(DisasContext * dc) } else tcg_gen_mov_tl(cpu_dst, cpu_src1); } - gen_helper_restore(); + gen_helper_restore(cpu_env); gen_mov_pc_npc(dc, cpu_cond); r_const = tcg_const_i32(3); gen_helper_check_align(cpu_dst, r_const); @@ -4359,7 +4603,7 @@ static void disas_sparc_insn(DisasContext * dc) tcg_temp_free_i32(r_const); tcg_gen_mov_tl(cpu_npc, cpu_dst); dc->npc = DYNAMIC_PC; - gen_helper_rett(); + gen_helper_rett(cpu_env); } goto jmp_insn; #endif @@ -4370,12 +4614,12 @@ static void disas_sparc_insn(DisasContext * dc) break; case 0x3c: /* save */ save_state(dc, cpu_cond); - gen_helper_save(); + gen_helper_save(cpu_env); gen_movl_TN_reg(rd, cpu_dst); break; case 0x3d: /* restore */ save_state(dc, cpu_cond); - gen_helper_restore(); + gen_helper_restore(cpu_env); gen_movl_TN_reg(rd, cpu_dst); break; #if !defined(CONFIG_USER_ONLY) && defined(TARGET_SPARC64) @@ -4387,14 +4631,14 @@ static void disas_sparc_insn(DisasContext * dc) goto priv_insn; dc->npc = DYNAMIC_PC; dc->pc = DYNAMIC_PC; - gen_helper_done(); + gen_helper_done(cpu_env); goto jmp_insn; case 1: if (!supervisor(dc)) goto priv_insn; dc->npc = DYNAMIC_PC; dc->pc = DYNAMIC_PC; - gen_helper_retry(); + gen_helper_retry(cpu_env); goto jmp_insn; default: goto illegal_insn; @@ -4655,8 +4899,9 @@ static void disas_sparc_insn(DisasContext * dc) case 0x20: /* ldf, load fpreg */ gen_address_mask(dc, cpu_addr); tcg_gen_qemu_ld32u(cpu_tmp0, cpu_addr, dc->mem_idx); - tcg_gen_trunc_tl_i32(cpu_fpr[rd], cpu_tmp0); - gen_update_fprs_dirty(rd); + cpu_dst_32 = gen_dest_fpr_F(); + tcg_gen_trunc_tl_i32(cpu_dst_32, cpu_tmp0); + gen_store_fpr_F(dc, rd, cpu_dst_32); break; case 0x21: /* ldfsr, V9 ldxfsr */ #ifdef TARGET_SPARC64 @@ -4690,16 +4935,10 @@ static void disas_sparc_insn(DisasContext * dc) } break; case 0x23: /* lddf, load double fpreg */ - { - TCGv_i32 r_const; - - r_const = tcg_const_i32(dc->mem_idx); - gen_address_mask(dc, cpu_addr); - gen_helper_lddf(cpu_addr, r_const); - tcg_temp_free_i32(r_const); - gen_op_store_DT0_fpr(DFPREG(rd)); - gen_update_fprs_dirty(DFPREG(rd)); - } + gen_address_mask(dc, cpu_addr); + cpu_dst_64 = gen_dest_fpr_D(); + tcg_gen_qemu_ld64(cpu_dst_64, cpu_addr, dc->mem_idx); + gen_store_fpr_D(dc, rd, cpu_dst_64); break; default: goto illegal_insn; @@ -4806,7 +5045,8 @@ static void disas_sparc_insn(DisasContext * dc) switch (xop) { case 0x24: /* stf, store fpreg */ gen_address_mask(dc, cpu_addr); - tcg_gen_ext_i32_tl(cpu_tmp0, cpu_fpr[rd]); + cpu_src1_32 = gen_load_fpr_F(dc, rd); + tcg_gen_ext_i32_tl(cpu_tmp0, cpu_src1_32); tcg_gen_qemu_st32(cpu_tmp0, cpu_addr, dc->mem_idx); break; case 0x25: /* stfsr, V9 stxfsr */ @@ -4849,15 +5089,9 @@ static void disas_sparc_insn(DisasContext * dc) #endif #endif case 0x27: /* stdf, store double fpreg */ - { - TCGv_i32 r_const; - - gen_op_load_fpr_DT0(DFPREG(rd)); - r_const = tcg_const_i32(dc->mem_idx); - gen_address_mask(dc, cpu_addr); - gen_helper_stdf(cpu_addr, r_const); - tcg_temp_free_i32(r_const); - } + gen_address_mask(dc, cpu_addr); + cpu_src1_64 = gen_load_fpr_D(dc, rd); + tcg_gen_qemu_st64(cpu_src1_64, cpu_addr, dc->mem_idx); break; default: goto illegal_insn; @@ -4992,6 +5226,13 @@ static void disas_sparc_insn(DisasContext * dc) egress: tcg_temp_free(cpu_tmp1); tcg_temp_free(cpu_tmp2); + if (dc->n_t32 != 0) { + int i; + for (i = dc->n_t32 - 1; i >= 0; --i) { + tcg_temp_free_i32(dc->t32[i]); + } + dc->n_t32 = 0; + } } static inline void gen_intermediate_code_internal(TranslationBlock * tb, @@ -5091,6 +5332,7 @@ static inline void gen_intermediate_code_internal(TranslationBlock * tb, tcg_temp_free_i64(cpu_tmp64); tcg_temp_free_i32(cpu_tmp32); tcg_temp_free(cpu_tmp0); + if (tb->cflags & CF_LAST_IO) gen_io_end(); if (!dc->is_br) { @@ -5155,15 +5397,11 @@ void gen_intermediate_code_init(CPUSPARCState *env) "g6", "g7", }; - static const char * const fregnames[64] = { - "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", - "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15", - "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23", - "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31", - "f32", "f33", "f34", "f35", "f36", "f37", "f38", "f39", - "f40", "f41", "f42", "f43", "f44", "f45", "f46", "f47", - "f48", "f49", "f50", "f51", "f52", "f53", "f54", "f55", - "f56", "f57", "f58", "f59", "f60", "f61", "f62", "f63", + static const char * const fregnames[32] = { + "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14", + "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30", + "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46", + "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62", }; /* init various static tables */ @@ -5233,14 +5471,16 @@ void gen_intermediate_code_init(CPUSPARCState *env) cpu_tbr = tcg_global_mem_new(TCG_AREG0, offsetof(CPUState, tbr), "tbr"); #endif - for (i = 1; i < 8; i++) + for (i = 1; i < 8; i++) { cpu_gregs[i] = tcg_global_mem_new(TCG_AREG0, offsetof(CPUState, gregs[i]), gregnames[i]); - for (i = 0; i < TARGET_FPREGS; i++) - cpu_fpr[i] = tcg_global_mem_new_i32(TCG_AREG0, + } + for (i = 0; i < TARGET_DPREGS; i++) { + cpu_fpr[i] = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUState, fpr[i]), fregnames[i]); + } /* register helpers */ diff --git a/target-sparc/vis_helper.c b/target-sparc/vis_helper.c index a22c10bb43..a992c293af 100644 --- a/target-sparc/vis_helper.c +++ b/target-sparc/vis_helper.c @@ -20,11 +20,6 @@ #include "cpu.h" #include "helper.h" -#define DT0 (env->dt0) -#define DT1 (env->dt1) -#define QT0 (env->qt0) -#define QT1 (env->qt1) - /* This function uses non-native bit order */ #define GET_FIELD(X, FROM, TO) \ ((X) >> (63 - (TO)) & ((1ULL << ((TO) - (FROM) + 1)) - 1)) @@ -33,8 +28,7 @@ #define GET_FIELD_SP(X, FROM, TO) \ GET_FIELD(X, 63 - (TO), 63 - (FROM)) -target_ulong helper_array8(CPUState *env, target_ulong pixel_addr, - target_ulong cubesize) +target_ulong helper_array8(target_ulong pixel_addr, target_ulong cubesize) { return (GET_FIELD_SP(pixel_addr, 60, 63) << (17 + 2 * cubesize)) | (GET_FIELD_SP(pixel_addr, 39, 39 + cubesize - 1) << (17 + cubesize)) | @@ -47,29 +41,6 @@ target_ulong helper_array8(CPUState *env, target_ulong pixel_addr, GET_FIELD_SP(pixel_addr, 11, 12); } -target_ulong helper_alignaddr(CPUState *env, target_ulong addr, - target_ulong offset) -{ - uint64_t tmp; - - tmp = addr + offset; - env->gsr &= ~7ULL; - env->gsr |= tmp & 7ULL; - return tmp & ~7ULL; -} - -void helper_faligndata(CPUState *env) -{ - uint64_t tmp; - - tmp = (*((uint64_t *)&DT0)) << ((env->gsr & 7) * 8); - /* on many architectures a shift of 64 does nothing */ - if ((env->gsr & 7) != 0) { - tmp |= (*((uint64_t *)&DT1)) >> (64 - (env->gsr & 7) * 8); - } - *((uint64_t *)&DT0) = tmp; -} - #ifdef HOST_WORDS_BIGENDIAN #define VIS_B64(n) b[7 - (n)] #define VIS_W64(n) w[3 - (n)] @@ -102,12 +73,12 @@ typedef union { float32 f; } VIS32; -void helper_fpmerge(CPUState *env) +uint64_t helper_fpmerge(uint64_t src1, uint64_t src2) { VIS64 s, d; - s.d = DT0; - d.d = DT1; + s.ll = src1; + d.ll = src2; /* Reverse calculation order to handle overlap */ d.VIS_B64(7) = s.VIS_B64(3); @@ -119,16 +90,16 @@ void helper_fpmerge(CPUState *env) d.VIS_B64(1) = s.VIS_B64(0); /* d.VIS_B64(0) = d.VIS_B64(0); */ - DT0 = d.d; + return d.ll; } -void helper_fmul8x16(CPUState *env) +uint64_t helper_fmul8x16(uint64_t src1, uint64_t src2) { VIS64 s, d; uint32_t tmp; - s.d = DT0; - d.d = DT1; + s.ll = src1; + d.ll = src2; #define PMUL(r) \ tmp = (int32_t)d.VIS_SW64(r) * (int32_t)s.VIS_B64(r); \ @@ -143,16 +114,16 @@ void helper_fmul8x16(CPUState *env) PMUL(3); #undef PMUL - DT0 = d.d; + return d.ll; } -void helper_fmul8x16al(CPUState *env) +uint64_t helper_fmul8x16al(uint64_t src1, uint64_t src2) { VIS64 s, d; uint32_t tmp; - s.d = DT0; - d.d = DT1; + s.ll = src1; + d.ll = src2; #define PMUL(r) \ tmp = (int32_t)d.VIS_SW64(1) * (int32_t)s.VIS_B64(r); \ @@ -167,16 +138,16 @@ void helper_fmul8x16al(CPUState *env) PMUL(3); #undef PMUL - DT0 = d.d; + return d.ll; } -void helper_fmul8x16au(CPUState *env) +uint64_t helper_fmul8x16au(uint64_t src1, uint64_t src2) { VIS64 s, d; uint32_t tmp; - s.d = DT0; - d.d = DT1; + s.ll = src1; + d.ll = src2; #define PMUL(r) \ tmp = (int32_t)d.VIS_SW64(0) * (int32_t)s.VIS_B64(r); \ @@ -191,16 +162,16 @@ void helper_fmul8x16au(CPUState *env) PMUL(3); #undef PMUL - DT0 = d.d; + return d.ll; } -void helper_fmul8sux16(CPUState *env) +uint64_t helper_fmul8sux16(uint64_t src1, uint64_t src2) { VIS64 s, d; uint32_t tmp; - s.d = DT0; - d.d = DT1; + s.ll = src1; + d.ll = src2; #define PMUL(r) \ tmp = (int32_t)d.VIS_SW64(r) * ((int32_t)s.VIS_SW64(r) >> 8); \ @@ -215,16 +186,16 @@ void helper_fmul8sux16(CPUState *env) PMUL(3); #undef PMUL - DT0 = d.d; + return d.ll; } -void helper_fmul8ulx16(CPUState *env) +uint64_t helper_fmul8ulx16(uint64_t src1, uint64_t src2) { VIS64 s, d; uint32_t tmp; - s.d = DT0; - d.d = DT1; + s.ll = src1; + d.ll = src2; #define PMUL(r) \ tmp = (int32_t)d.VIS_SW64(r) * ((uint32_t)s.VIS_B64(r * 2)); \ @@ -239,16 +210,16 @@ void helper_fmul8ulx16(CPUState *env) PMUL(3); #undef PMUL - DT0 = d.d; + return d.ll; } -void helper_fmuld8sux16(CPUState *env) +uint64_t helper_fmuld8sux16(uint64_t src1, uint64_t src2) { VIS64 s, d; uint32_t tmp; - s.d = DT0; - d.d = DT1; + s.ll = src1; + d.ll = src2; #define PMUL(r) \ tmp = (int32_t)d.VIS_SW64(r) * ((int32_t)s.VIS_SW64(r) >> 8); \ @@ -262,16 +233,16 @@ void helper_fmuld8sux16(CPUState *env) PMUL(0); #undef PMUL - DT0 = d.d; + return d.ll; } -void helper_fmuld8ulx16(CPUState *env) +uint64_t helper_fmuld8ulx16(uint64_t src1, uint64_t src2) { VIS64 s, d; uint32_t tmp; - s.d = DT0; - d.d = DT1; + s.ll = src1; + d.ll = src2; #define PMUL(r) \ tmp = (int32_t)d.VIS_SW64(r) * ((uint32_t)s.VIS_B64(r * 2)); \ @@ -285,42 +256,41 @@ void helper_fmuld8ulx16(CPUState *env) PMUL(0); #undef PMUL - DT0 = d.d; + return d.ll; } -void helper_fexpand(CPUState *env) +uint64_t helper_fexpand(uint64_t src1, uint64_t src2) { VIS32 s; VIS64 d; - s.l = (uint32_t)(*(uint64_t *)&DT0 & 0xffffffff); - d.d = DT1; + s.l = (uint32_t)src1; + d.ll = src2; d.VIS_W64(0) = s.VIS_B32(0) << 4; d.VIS_W64(1) = s.VIS_B32(1) << 4; d.VIS_W64(2) = s.VIS_B32(2) << 4; d.VIS_W64(3) = s.VIS_B32(3) << 4; - DT0 = d.d; + return d.ll; } #define VIS_HELPER(name, F) \ - void name##16(CPUState *env) \ + uint64_t name##16(uint64_t src1, uint64_t src2) \ { \ VIS64 s, d; \ \ - s.d = DT0; \ - d.d = DT1; \ + s.ll = src1; \ + d.ll = src2; \ \ d.VIS_W64(0) = F(d.VIS_W64(0), s.VIS_W64(0)); \ d.VIS_W64(1) = F(d.VIS_W64(1), s.VIS_W64(1)); \ d.VIS_W64(2) = F(d.VIS_W64(2), s.VIS_W64(2)); \ d.VIS_W64(3) = F(d.VIS_W64(3), s.VIS_W64(3)); \ \ - DT0 = d.d; \ + return d.ll; \ } \ \ - uint32_t name##16s(CPUState *env, uint32_t src1, \ - uint32_t src2) \ + uint32_t name##16s(uint32_t src1, uint32_t src2) \ { \ VIS32 s, d; \ \ @@ -333,21 +303,20 @@ void helper_fexpand(CPUState *env) return d.l; \ } \ \ - void name##32(CPUState *env) \ + uint64_t name##32(uint64_t src1, uint64_t src2) \ { \ VIS64 s, d; \ \ - s.d = DT0; \ - d.d = DT1; \ + s.ll = src1; \ + d.ll = src2; \ \ d.VIS_L64(0) = F(d.VIS_L64(0), s.VIS_L64(0)); \ d.VIS_L64(1) = F(d.VIS_L64(1), s.VIS_L64(1)); \ \ - DT0 = d.d; \ + return d.ll; \ } \ \ - uint32_t name##32s(CPUState *env, uint32_t src1, \ - uint32_t src2) \ + uint32_t name##32s(uint32_t src1, uint32_t src2) \ { \ VIS32 s, d; \ \ @@ -365,12 +334,12 @@ VIS_HELPER(helper_fpadd, FADD) VIS_HELPER(helper_fpsub, FSUB) #define VIS_CMPHELPER(name, F) \ - uint64_t name##16(CPUState *env) \ + uint64_t name##16(uint64_t src1, uint64_t src2) \ { \ VIS64 s, d; \ \ - s.d = DT0; \ - d.d = DT1; \ + s.ll = src1; \ + d.ll = src2; \ \ d.VIS_W64(0) = F(s.VIS_W64(0), d.VIS_W64(0)) ? 1 : 0; \ d.VIS_W64(0) |= F(s.VIS_W64(1), d.VIS_W64(1)) ? 2 : 0; \ @@ -381,12 +350,12 @@ VIS_HELPER(helper_fpsub, FSUB) return d.ll; \ } \ \ - uint64_t name##32(CPUState *env) \ + uint64_t name##32(uint64_t src1, uint64_t src2) \ { \ VIS64 s, d; \ \ - s.d = DT0; \ - d.d = DT1; \ + s.ll = src1; \ + d.ll = src2; \ \ d.VIS_L64(0) = F(s.VIS_L64(0), d.VIS_L64(0)) ? 1 : 0; \ d.VIS_L64(0) |= F(s.VIS_L64(1), d.VIS_L64(1)) ? 2 : 0; \ @@ -404,3 +373,117 @@ VIS_CMPHELPER(helper_fcmpgt, FCMPGT) VIS_CMPHELPER(helper_fcmpeq, FCMPEQ) VIS_CMPHELPER(helper_fcmple, FCMPLE) VIS_CMPHELPER(helper_fcmpne, FCMPNE) + +uint64_t helper_pdist(uint64_t sum, uint64_t src1, uint64_t src2) +{ + int i; + for (i = 0; i < 8; i++) { + int s1, s2; + + s1 = (src1 >> (56 - (i * 8))) & 0xff; + s2 = (src2 >> (56 - (i * 8))) & 0xff; + + /* Absolute value of difference. */ + s1 -= s2; + if (s1 < 0) { + s1 = -s1; + } + + sum += s1; + } + + return sum; +} + +uint32_t helper_fpack16(uint64_t gsr, uint64_t rs2) +{ + int scale = (gsr >> 3) & 0xf; + uint32_t ret = 0; + int byte; + + for (byte = 0; byte < 4; byte++) { + uint32_t val; + int16_t src = rs2 >> (byte * 16); + int32_t scaled = src << scale; + int32_t from_fixed = scaled >> 7; + + val = (from_fixed < 0 ? 0 : + from_fixed > 255 ? 255 : from_fixed); + + ret |= val << (8 * byte); + } + + return ret; +} + +uint64_t helper_fpack32(uint64_t gsr, uint64_t rs1, uint64_t rs2) +{ + int scale = (gsr >> 3) & 0x1f; + uint64_t ret = 0; + int word; + + ret = (rs1 << 8) & ~(0x000000ff000000ffULL); + for (word = 0; word < 2; word++) { + uint64_t val; + int32_t src = rs2 >> (word * 32); + int64_t scaled = (int64_t)src << scale; + int64_t from_fixed = scaled >> 23; + + val = (from_fixed < 0 ? 0 : + (from_fixed > 255) ? 255 : from_fixed); + + ret |= val << (32 * word); + } + + return ret; +} + +uint32_t helper_fpackfix(uint64_t gsr, uint64_t rs2) +{ + int scale = (gsr >> 3) & 0x1f; + uint32_t ret = 0; + int word; + + for (word = 0; word < 2; word++) { + uint32_t val; + int32_t src = rs2 >> (word * 32); + int64_t scaled = src << scale; + int64_t from_fixed = scaled >> 16; + + val = (from_fixed < -32768 ? -32768 : + from_fixed > 32767 ? 32767 : from_fixed); + + ret |= (val & 0xffff) << (word * 16); + } + + return ret; +} + +uint64 helper_bshuffle(uint64_t gsr, uint64_t src1, uint64_t src2) +{ + union { + uint64_t ll[2]; + uint8_t b[16]; + } s; + VIS64 r; + uint32_t i, mask, host; + + /* Set up S such that we can index across all of the bytes. */ +#ifdef HOST_WORDS_BIGENDIAN + s.ll[0] = src1; + s.ll[1] = src2; + host = 0; +#else + s.ll[1] = src1; + s.ll[0] = src2; + host = 15; +#endif + mask = gsr >> 32; + + for (i = 0; i < 8; ++i) { + unsigned e = (mask >> (28 - i*4)) & 0xf; + r.VIS_B64(i) = s.b[e ^ host]; + } + + return r.ll; +} diff --git a/target-sparc/win_helper.c b/target-sparc/win_helper.c index 8bf2123e9c..a68c649e7e 100644 --- a/target-sparc/win_helper.c +++ b/target-sparc/win_helper.c @@ -18,17 +18,8 @@ */ #include "cpu.h" -#include "dyngen-exec.h" #include "helper.h" - -//#define DEBUG_PSTATE - -#ifdef DEBUG_PSTATE -#define DPRINTF_PSTATE(fmt, ...) \ - do { printf("PSTATE: " fmt , ## __VA_ARGS__); } while (0) -#else -#define DPRINTF_PSTATE(fmt, ...) do {} while (0) -#endif +#include "trace.h" static inline void memcpy32(target_ulong *dst, const target_ulong *src) { @@ -42,7 +33,7 @@ static inline void memcpy32(target_ulong *dst, const target_ulong *src) dst[7] = src[7]; } -static void set_cwp(int new_cwp) +void cpu_set_cwp(CPUState *env, int new_cwp) { /* put the modified wrap registers at their proper location */ if (env->cwp == env->nwindows - 1) { @@ -57,17 +48,7 @@ static void set_cwp(int new_cwp) env->regwptr = env->regbase + (new_cwp * 16); } -void cpu_set_cwp(CPUState *env1, int new_cwp) -{ - CPUState *saved_env; - - saved_env = env; - env = env1; - set_cwp(new_cwp); - env = saved_env; -} - -static target_ulong get_psr(void) +target_ulong cpu_get_psr(CPUState *env) { helper_compute_psr(env); @@ -83,19 +64,7 @@ static target_ulong get_psr(void) #endif } -target_ulong cpu_get_psr(CPUState *env1) -{ - CPUState *saved_env; - target_ulong ret; - - saved_env = env; - env = env1; - ret = get_psr(); - env = saved_env; - return ret; -} - -static void put_psr(target_ulong val) +void cpu_put_psr(CPUState *env, target_ulong val) { env->psr = val & PSR_ICC; #if !defined(TARGET_SPARC64) @@ -109,22 +78,12 @@ static void put_psr(target_ulong val) env->psrs = (val & PSR_S) ? 1 : 0; env->psrps = (val & PSR_PS) ? 1 : 0; env->psret = (val & PSR_ET) ? 1 : 0; - set_cwp(val & PSR_CWP); + cpu_set_cwp(env, val & PSR_CWP); #endif env->cc_op = CC_OP_FLAGS; } -void cpu_put_psr(CPUState *env1, target_ulong val) -{ - CPUState *saved_env; - - saved_env = env; - env = env1; - put_psr(val); - env = saved_env; -} - -static int cwp_inc(int cwp) +int cpu_cwp_inc(CPUState *env, int cwp) { if (unlikely(cwp >= env->nwindows)) { cwp -= env->nwindows; @@ -132,19 +91,7 @@ static int cwp_inc(int cwp) return cwp; } -int cpu_cwp_inc(CPUState *env1, int cwp) -{ - CPUState *saved_env; - target_ulong ret; - - saved_env = env; - env = env1; - ret = cwp_inc(cwp); - env = saved_env; - return ret; -} - -static int cwp_dec(int cwp) +int cpu_cwp_dec(CPUState *env, int cwp) { if (unlikely(cwp < 0)) { cwp += env->nwindows; @@ -152,20 +99,8 @@ static int cwp_dec(int cwp) return cwp; } -int cpu_cwp_dec(CPUState *env1, int cwp) -{ - CPUState *saved_env; - target_ulong ret; - - saved_env = env; - env = env1; - ret = cwp_dec(cwp); - env = saved_env; - return ret; -} - #ifndef TARGET_SPARC64 -void helper_rett(void) +void helper_rett(CPUState *env) { unsigned int cwp; @@ -174,39 +109,39 @@ void helper_rett(void) } env->psret = 1; - cwp = cwp_inc(env->cwp + 1) ; + cwp = cpu_cwp_inc(env, env->cwp + 1) ; if (env->wim & (1 << cwp)) { helper_raise_exception(env, TT_WIN_UNF); } - set_cwp(cwp); + cpu_set_cwp(env, cwp); env->psrs = env->psrps; } /* XXX: use another pointer for %iN registers to avoid slow wrapping handling ? */ -void helper_save(void) +void helper_save(CPUState *env) { uint32_t cwp; - cwp = cwp_dec(env->cwp - 1); + cwp = cpu_cwp_dec(env, env->cwp - 1); if (env->wim & (1 << cwp)) { helper_raise_exception(env, TT_WIN_OVF); } - set_cwp(cwp); + cpu_set_cwp(env, cwp); } -void helper_restore(void) +void helper_restore(CPUState *env) { uint32_t cwp; - cwp = cwp_inc(env->cwp + 1); + cwp = cpu_cwp_inc(env, env->cwp + 1); if (env->wim & (1 << cwp)) { helper_raise_exception(env, TT_WIN_UNF); } - set_cwp(cwp); + cpu_set_cwp(env, cwp); } -void helper_wrpsr(target_ulong new_psr) +void helper_wrpsr(CPUState *env, target_ulong new_psr) { if ((new_psr & PSR_CWP) >= env->nwindows) { helper_raise_exception(env, TT_ILL_INSN); @@ -215,19 +150,19 @@ void helper_wrpsr(target_ulong new_psr) } } -target_ulong helper_rdpsr(void) +target_ulong helper_rdpsr(CPUState *env) { - return get_psr(); + return cpu_get_psr(env); } #else /* XXX: use another pointer for %iN registers to avoid slow wrapping handling ? */ -void helper_save(void) +void helper_save(CPUState *env) { uint32_t cwp; - cwp = cwp_dec(env->cwp - 1); + cwp = cpu_cwp_dec(env, env->cwp - 1); if (env->cansave == 0) { helper_raise_exception(env, TT_SPILL | (env->otherwin != 0 ? (TT_WOTHER | @@ -240,16 +175,16 @@ void helper_save(void) } else { env->cansave--; env->canrestore++; - set_cwp(cwp); + cpu_set_cwp(env, cwp); } } } -void helper_restore(void) +void helper_restore(CPUState *env) { uint32_t cwp; - cwp = cwp_inc(env->cwp + 1); + cwp = cpu_cwp_inc(env, env->cwp + 1); if (env->canrestore == 0) { helper_raise_exception(env, TT_FILL | (env->otherwin != 0 ? (TT_WOTHER | @@ -258,11 +193,11 @@ void helper_restore(void) } else { env->cansave++; env->canrestore--; - set_cwp(cwp); + cpu_set_cwp(env, cwp); } } -void helper_flushw(void) +void helper_flushw(CPUState *env) { if (env->cansave != env->nwindows - 2) { helper_raise_exception(env, TT_SPILL | (env->otherwin != 0 ? @@ -272,7 +207,7 @@ void helper_flushw(void) } } -void helper_saved(void) +void helper_saved(CPUState *env) { env->cansave++; if (env->otherwin == 0) { @@ -282,7 +217,7 @@ void helper_saved(void) } } -void helper_restored(void) +void helper_restored(CPUState *env) { env->canrestore++; if (env->cleanwin < env->nwindows - 1) { @@ -295,110 +230,62 @@ void helper_restored(void) } } -static target_ulong get_ccr(void) +target_ulong cpu_get_ccr(CPUState *env) { target_ulong psr; - psr = get_psr(); + psr = cpu_get_psr(env); return ((env->xcc >> 20) << 4) | ((psr & PSR_ICC) >> 20); } -target_ulong cpu_get_ccr(CPUState *env1) -{ - CPUState *saved_env; - target_ulong ret; - - saved_env = env; - env = env1; - ret = get_ccr(); - env = saved_env; - return ret; -} - -static void put_ccr(target_ulong val) +void cpu_put_ccr(CPUState *env, target_ulong val) { env->xcc = (val >> 4) << 20; env->psr = (val & 0xf) << 20; CC_OP = CC_OP_FLAGS; } -void cpu_put_ccr(CPUState *env1, target_ulong val) -{ - CPUState *saved_env; - - saved_env = env; - env = env1; - put_ccr(val); - env = saved_env; -} - -static target_ulong get_cwp64(void) +target_ulong cpu_get_cwp64(CPUState *env) { return env->nwindows - 1 - env->cwp; } -target_ulong cpu_get_cwp64(CPUState *env1) -{ - CPUState *saved_env; - target_ulong ret; - - saved_env = env; - env = env1; - ret = get_cwp64(); - env = saved_env; - return ret; -} - -static void put_cwp64(int cwp) +void cpu_put_cwp64(CPUState *env, int cwp) { if (unlikely(cwp >= env->nwindows || cwp < 0)) { cwp %= env->nwindows; } - set_cwp(env->nwindows - 1 - cwp); -} - -void cpu_put_cwp64(CPUState *env1, int cwp) -{ - CPUState *saved_env; - - saved_env = env; - env = env1; - put_cwp64(cwp); - env = saved_env; + cpu_set_cwp(env, env->nwindows - 1 - cwp); } -target_ulong helper_rdccr(void) +target_ulong helper_rdccr(CPUState *env) { - return get_ccr(); + return cpu_get_ccr(env); } -void helper_wrccr(target_ulong new_ccr) +void helper_wrccr(CPUState *env, target_ulong new_ccr) { - put_ccr(new_ccr); + cpu_put_ccr(env, new_ccr); } /* CWP handling is reversed in V9, but we still use the V8 register order. */ -target_ulong helper_rdcwp(void) +target_ulong helper_rdcwp(CPUState *env) { - return get_cwp64(); + return cpu_get_cwp64(env); } -void helper_wrcwp(target_ulong new_cwp) +void helper_wrcwp(CPUState *env, target_ulong new_cwp) { - put_cwp64(new_cwp); + cpu_put_cwp64(env, new_cwp); } -static inline uint64_t *get_gregset(uint32_t pstate) +static inline uint64_t *get_gregset(CPUState *env, uint32_t pstate) { switch (pstate) { default: - DPRINTF_PSTATE("ERROR in get_gregset: active pstate bits=%x%s%s%s\n", - pstate, - (pstate & PS_IG) ? " IG" : "", - (pstate & PS_MG) ? " MG" : "", - (pstate & PS_AG) ? " AG" : ""); + trace_win_helper_gregset_error(pstate); /* pass through to normal set of global registers */ case 0: return env->bgregs; @@ -411,7 +298,7 @@ static inline uint64_t *get_gregset(uint32_t pstate) } } -static inline void change_pstate(uint32_t new_pstate) +void cpu_change_pstate(CPUState *env, uint32_t new_pstate) { uint32_t pstate_regs, new_pstate_regs; uint64_t *src, *dst; @@ -425,23 +312,22 @@ static inline void change_pstate(uint32_t new_pstate) new_pstate_regs = new_pstate & 0xc01; if (new_pstate_regs != pstate_regs) { - DPRINTF_PSTATE("change_pstate: switching regs old=%x new=%x\n", - pstate_regs, new_pstate_regs); + trace_win_helper_switch_pstate(pstate_regs, new_pstate_regs); + /* Switch global register bank */ - src = get_gregset(new_pstate_regs); - dst = get_gregset(pstate_regs); + src = get_gregset(env, new_pstate_regs); + dst = get_gregset(env, pstate_regs); memcpy32(dst, env->gregs); memcpy32(env->gregs, src); } else { - DPRINTF_PSTATE("change_pstate: regs new=%x (unchanged)\n", - new_pstate_regs); + trace_win_helper_no_switch_pstate(new_pstate_regs); } env->pstate = new_pstate; } -void helper_wrpstate(target_ulong new_state) +void helper_wrpstate(CPUState *env, target_ulong new_state) { - change_pstate(new_state & 0xf3f); + cpu_change_pstate(env, new_state & 0xf3f); #if !defined(CONFIG_USER_ONLY) if (cpu_interrupts_enabled(env)) { @@ -450,21 +336,10 @@ void helper_wrpstate(target_ulong new_state) #endif } -void cpu_change_pstate(CPUState *env1, uint32_t new_pstate) -{ - CPUState *saved_env; - - saved_env = env; - env = env1; - change_pstate(new_pstate); - env = saved_env; -} - -void helper_wrpil(target_ulong new_pil) +void helper_wrpil(CPUState *env, target_ulong new_pil) { #if !defined(CONFIG_USER_ONLY) - DPRINTF_PSTATE("helper_wrpil old=%x new=%x\n", - env->psrpil, (uint32_t)new_pil); + trace_win_helper_wrpil(env->psrpil, (uint32_t)new_pil); env->psrpil = new_pil; @@ -474,19 +349,19 @@ void helper_wrpil(target_ulong new_pil) #endif } -void helper_done(void) +void helper_done(CPUState *env) { trap_state *tsptr = cpu_tsptr(env); env->pc = tsptr->tnpc; env->npc = tsptr->tnpc + 4; - put_ccr(tsptr->tstate >> 32); + cpu_put_ccr(env, tsptr->tstate >> 32); env->asi = (tsptr->tstate >> 24) & 0xff; - change_pstate((tsptr->tstate >> 8) & 0xf3f); - put_cwp64(tsptr->tstate & 0xff); + cpu_change_pstate(env, (tsptr->tstate >> 8) & 0xf3f); + cpu_put_cwp64(env, tsptr->tstate & 0xff); env->tl--; - DPRINTF_PSTATE("... helper_done tl=%d\n", env->tl); + trace_win_helper_done(env->tl); #if !defined(CONFIG_USER_ONLY) if (cpu_interrupts_enabled(env)) { @@ -495,19 +370,19 @@ void helper_done(void) #endif } -void helper_retry(void) +void helper_retry(CPUState *env) { trap_state *tsptr = cpu_tsptr(env); env->pc = tsptr->tpc; env->npc = tsptr->tnpc; - put_ccr(tsptr->tstate >> 32); + cpu_put_ccr(env, tsptr->tstate >> 32); env->asi = (tsptr->tstate >> 24) & 0xff; - change_pstate((tsptr->tstate >> 8) & 0xf3f); - put_cwp64(tsptr->tstate & 0xff); + cpu_change_pstate(env, (tsptr->tstate >> 8) & 0xf3f); + cpu_put_cwp64(env, tsptr->tstate & 0xff); env->tl--; - DPRINTF_PSTATE("... helper_retry tl=%d\n", env->tl); + trace_win_helper_retry(env->tl); #if !defined(CONFIG_USER_ONLY) if (cpu_interrupts_enabled(env)) { diff --git a/tcg/tcg-op.h b/tcg/tcg-op.h index fea5983669..24ec7fc128 100644 --- a/tcg/tcg-op.h +++ b/tcg/tcg-op.h @@ -2045,38 +2045,75 @@ static inline void tcg_gen_deposit_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2, unsigned int ofs, unsigned int len) { + uint32_t mask; + TCGv_i32 t1; + + if (ofs == 0 && len == 32) { + tcg_gen_mov_i32(ret, arg2); + return; + } if (TCG_TARGET_HAS_deposit_i32 && TCG_TARGET_deposit_i32_valid(ofs, len)) { tcg_gen_op5ii_i32(INDEX_op_deposit_i32, ret, arg1, arg2, ofs, len); - } else { - uint32_t mask = (1u << len) - 1; - TCGv_i32 t1 = tcg_temp_new_i32 (); + return; + } + + mask = (1u << len) - 1; + t1 = tcg_temp_new_i32(); + if (ofs + len < 32) { tcg_gen_andi_i32(t1, arg2, mask); tcg_gen_shli_i32(t1, t1, ofs); - tcg_gen_andi_i32(ret, arg1, ~(mask << ofs)); - tcg_gen_or_i32(ret, ret, t1); - - tcg_temp_free_i32(t1); + } else { + tcg_gen_shli_i32(t1, arg2, ofs); } + tcg_gen_andi_i32(ret, arg1, ~(mask << ofs)); + tcg_gen_or_i32(ret, ret, t1); + + tcg_temp_free_i32(t1); } static inline void tcg_gen_deposit_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2, unsigned int ofs, unsigned int len) { + uint64_t mask; + TCGv_i64 t1; + + if (ofs == 0 && len == 64) { + tcg_gen_mov_i64(ret, arg2); + return; + } if (TCG_TARGET_HAS_deposit_i64 && TCG_TARGET_deposit_i64_valid(ofs, len)) { tcg_gen_op5ii_i64(INDEX_op_deposit_i64, ret, arg1, arg2, ofs, len); - } else { - uint64_t mask = (1ull << len) - 1; - TCGv_i64 t1 = tcg_temp_new_i64 (); + return; + } +#if TCG_TARGET_REG_BITS == 32 + if (ofs >= 32) { + tcg_gen_deposit_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), + TCGV_LOW(arg2), ofs - 32, len); + return; + } + if (ofs + len <= 32) { + tcg_gen_deposit_i32(TCGV_LOW(ret), TCGV_LOW(arg1), + TCGV_LOW(arg2), ofs, len); + return; + } +#endif + + mask = (1ull << len) - 1; + t1 = tcg_temp_new_i64(); + + if (ofs + len < 64) { tcg_gen_andi_i64(t1, arg2, mask); tcg_gen_shli_i64(t1, t1, ofs); - tcg_gen_andi_i64(ret, arg1, ~(mask << ofs)); - tcg_gen_or_i64(ret, ret, t1); - - tcg_temp_free_i64(t1); + } else { + tcg_gen_shli_i64(t1, arg2, ofs); } + tcg_gen_andi_i64(ret, arg1, ~(mask << ofs)); + tcg_gen_or_i64(ret, ret, t1); + + tcg_temp_free_i64(t1); } /***************************************/ diff --git a/trace-events b/trace-events index 7f9cec4071..7d05c82279 100644 --- a/trace-events +++ b/trace-events @@ -61,6 +61,7 @@ multiwrite_cb(void *mcb, int ret) "mcb %p ret %d" bdrv_aio_multiwrite(void *mcb, int num_callbacks, int num_reqs) "mcb %p num_callbacks %d num_reqs %d" bdrv_aio_multiwrite_earlyfail(void *mcb) "mcb %p" bdrv_aio_multiwrite_latefail(void *mcb, int i) "mcb %p i %d" +bdrv_aio_discard(void *bs, int64_t sector_num, int nb_sectors, void *opaque) "bs %p sector_num %"PRId64" nb_sectors %d opaque %p" bdrv_aio_flush(void *bs, void *opaque) "bs %p opaque %p" bdrv_aio_readv(void *bs, int64_t sector_num, int nb_sectors, void *opaque) "bs %p sector_num %"PRId64" nb_sectors %d opaque %p" bdrv_aio_writev(void *bs, int64_t sector_num, int nb_sectors, void *opaque) "bs %p sector_num %"PRId64" nb_sectors %d opaque %p" @@ -554,7 +555,7 @@ open_eth_desc_read(uint32_t addr, uint32_t v) "DESC[%04x] -> %08x" open_eth_desc_write(uint32_t addr, uint32_t v) "DESC[%04x] <- %08x" # hw/9pfs/virtio-9p.c -complete_pdu(uint16_t tag, uint8_t id, int err) "tag %d id %d err %d" +v9fs_rerror(uint16_t tag, uint8_t id, int err) "tag %d id %d err %d" v9fs_version(uint16_t tag, uint8_t id, int32_t msize, char* version) "tag %d id %d msize %d version %s" v9fs_version_return(uint16_t tag, uint8_t id, int32_t msize, char* version) "tag %d id %d msize %d version %s" v9fs_attach(uint16_t tag, uint8_t id, int32_t fid, int32_t afid, char* uname, char* aname) "tag %u id %u fid %d afid %d uname %s aname %s" @@ -598,3 +599,28 @@ v9fs_xattrwalk_return(uint16_t tag, uint8_t id, int64_t size) "tag %d id %d size v9fs_xattrcreate(uint16_t tag, uint8_t id, int32_t fid, char* name, int64_t size, int flags) "tag %d id %d fid %d name %s size %"PRId64" flags %d" v9fs_readlink(uint16_t tag, uint8_t id, int32_t fid) "tag %d id %d fid %d" v9fs_readlink_return(uint16_t tag, uint8_t id, char* target) "tag %d id %d name %s" + +# target-sparc/mmu_helper.c +mmu_helper_dfault(uint64_t address, uint64_t context, int mmu_idx, uint32_t tl) "DFAULT at %"PRIx64" context %"PRIx64" mmu_idx=%d tl=%d" +mmu_helper_dprot(uint64_t address, uint64_t context, int mmu_idx, uint32_t tl) "DPROT at %"PRIx64" context %"PRIx64" mmu_idx=%d tl=%d" +mmu_helper_dmiss(uint64_t address, uint64_t context) "DMISS at %"PRIx64" context %"PRIx64"" +mmu_helper_tfault(uint64_t address, uint64_t context) "TFAULT at %"PRIx64" context %"PRIx64"" +mmu_helper_tmiss(uint64_t address, uint64_t context) "TMISS at %"PRIx64" context %"PRIx64"" +mmu_helper_get_phys_addr_code(uint32_t tl, int mmu_idx, uint64_t prim_context, uint64_t sec_context, uint64_t address) "tl=%d mmu_idx=%d primary context=%"PRIx64" secondary context=%"PRIx64" address=%"PRIx64"" +mmu_helper_get_phys_addr_data(uint32_t tl, int mmu_idx, uint64_t prim_context, uint64_t sec_context, uint64_t address) "tl=%d mmu_idx=%d primary context=%"PRIx64" secondary context=%"PRIx64" address=%"PRIx64"" +mmu_helper_mmu_fault(uint64_t address, uint64_t paddr, int mmu_idx, uint32_t tl, uint64_t prim_context, uint64_t sec_context) "Translate at %"PRIx64" -> %"PRIx64", mmu_idx=%d tl=%d primary context=%"PRIx64" secondary context=%"PRIx64"" + +# target-sparc/int_helper.c +int_helper_set_softint(uint32_t softint) "new %08x" +int_helper_clear_softint(uint32_t softint) "new %08x" +int_helper_write_softint(uint32_t softint) "new %08x" +int_helper_icache_freeze(void) "Instruction cache: freeze" +int_helper_dcache_freeze(void) "Data cache: freeze" + +# target-sparc/win_helper.c +win_helper_gregset_error(uint32_t pstate) "ERROR in get_gregset: active pstate bits=%x" +win_helper_switch_pstate(uint32_t pstate_regs, uint32_t new_pstate_regs) "change_pstate: switching regs old=%x new=%x" +win_helper_no_switch_pstate(uint32_t new_pstate_regs) "change_pstate: regs new=%x (unchanged)" +win_helper_wrpil(uint32_t psrpil, uint32_t new_pil) "old=%x new=%x" +win_helper_done(uint32_t tl) "tl=%d" +win_helper_retry(uint32_t tl) "tl=%d" diff --git a/ui/qemu-spice.h b/ui/qemu-spice.h index f34be69f52..c35b29c1f6 100644 --- a/ui/qemu-spice.h +++ b/ui/qemu-spice.h @@ -25,6 +25,7 @@ #include "qemu-option.h" #include "qemu-config.h" #include "qemu-char.h" +#include "monitor.h" extern int using_spice; @@ -37,7 +38,8 @@ int qemu_spice_set_passwd(const char *passwd, bool fail_if_connected, bool disconnect_if_connected); int qemu_spice_set_pw_expire(time_t expires); int qemu_spice_migrate_info(const char *hostname, int port, int tls_port, - const char *subject); + const char *subject, + MonitorCompletion cb, void *opaque); void do_info_spice_print(Monitor *mon, const QObject *data); void do_info_spice(Monitor *mon, QObject **ret_data); @@ -45,6 +47,7 @@ void do_info_spice(Monitor *mon, QObject **ret_data); int qemu_chr_open_spice(QemuOpts *opts, CharDriverState **_chr); #else /* CONFIG_SPICE */ +#include "monitor.h" #define using_spice 0 static inline int qemu_spice_set_passwd(const char *passwd, @@ -57,8 +60,13 @@ static inline int qemu_spice_set_pw_expire(time_t expires) { return -1; } -static inline int qemu_spice_migrate_info(const char *h, int p, int t, const char *s) -{ return -1; } +static inline int qemu_spice_migrate_info(const char *h, int p, int t, + const char *s, + MonitorCompletion cb, void *opaque) +{ + cb(opaque, NULL); + return -1; +} #endif /* CONFIG_SPICE */ diff --git a/ui/spice-core.c b/ui/spice-core.c index b33366e5d7..6d3dab6960 100644 --- a/ui/spice-core.c +++ b/ui/spice-core.c @@ -19,10 +19,10 @@ #include <spice-experimental.h> #include <netdb.h> -#include <pthread.h> #include "qemu-common.h" #include "qemu-spice.h" +#include "qemu-thread.h" #include "qemu-timer.h" #include "qemu-queue.h" #include "qemu-x509.h" @@ -45,7 +45,7 @@ static char *auth_passwd; static time_t auth_expires = TIME_MAX; int using_spice = 0; -static pthread_t me; +static QemuThread me; struct SpiceTimer { QEMUTimer *timer; @@ -133,7 +133,7 @@ static SpiceWatch *watch_add(int fd, int event_mask, SpiceWatchFunc func, void * static void watch_remove(SpiceWatch *watch) { - watch_update_mask(watch, 0); + qemu_set_fd_handler(watch->fd, NULL, NULL, NULL); QTAILQ_REMOVE(&watches, watch, next); g_free(watch); } @@ -229,7 +229,7 @@ static void channel_event(int event, SpiceChannelEventInfo *info) * thread and grab the iothread lock if so before calling qemu * functions. */ - bool need_lock = !pthread_equal(me, pthread_self()); + bool need_lock = !qemu_thread_is_self(&me); if (need_lock) { qemu_mutex_lock_iothread(); } @@ -288,6 +288,38 @@ static SpiceCoreInterface core_interface = { #endif }; +#ifdef SPICE_INTERFACE_MIGRATION +typedef struct SpiceMigration { + SpiceMigrateInstance sin; + struct { + MonitorCompletion *cb; + void *opaque; + } connect_complete; +} SpiceMigration; + +static void migrate_connect_complete_cb(SpiceMigrateInstance *sin); + +static const SpiceMigrateInterface migrate_interface = { + .base.type = SPICE_INTERFACE_MIGRATION, + .base.description = "migration", + .base.major_version = SPICE_INTERFACE_MIGRATION_MAJOR, + .base.minor_version = SPICE_INTERFACE_MIGRATION_MINOR, + .migrate_connect_complete = migrate_connect_complete_cb, + .migrate_end_complete = NULL, +}; + +static SpiceMigration spice_migrate; + +static void migrate_connect_complete_cb(SpiceMigrateInstance *sin) +{ + SpiceMigration *sm = container_of(sin, SpiceMigration, sin); + if (sm->connect_complete.cb) { + sm->connect_complete.cb(sm->connect_complete.opaque, NULL); + } + sm->connect_complete.cb = NULL; +} +#endif + /* config string parsing */ static int name2enum(const char *string, const char *table[], int entries) @@ -413,7 +445,7 @@ void do_info_spice(Monitor *mon, QObject **ret_data) int port, tls_port; char version_string[20]; /* 12 = |255.255.255\0| is the max */ - if (!spice_server) { + if (!spice_server || !opts) { *ret_data = qobject_from_jsonf("{ 'enabled': false }"); return; } @@ -449,18 +481,39 @@ static void migration_state_notifier(Notifier *notifier, void *data) { MigrationState *s = data; - if (migration_has_finished(s)) { + if (migration_is_active(s)) { +#ifdef SPICE_INTERFACE_MIGRATION + spice_server_migrate_start(spice_server); +#endif + } else if (migration_has_finished(s)) { #if SPICE_SERVER_VERSION >= 0x000701 /* 0.7.1 */ +#ifndef SPICE_INTERFACE_MIGRATION spice_server_migrate_switch(spice_server); +#else + spice_server_migrate_end(spice_server, true); + } else if (migration_has_failed(s)) { + spice_server_migrate_end(spice_server, false); +#endif #endif } } int qemu_spice_migrate_info(const char *hostname, int port, int tls_port, - const char *subject) + const char *subject, + MonitorCompletion *cb, void *opaque) { - return spice_server_migrate_info(spice_server, hostname, - port, tls_port, subject); + int ret; +#ifdef SPICE_INTERFACE_MIGRATION + spice_migrate.connect_complete.cb = cb; + spice_migrate.connect_complete.opaque = opaque; + ret = spice_server_migrate_connect(spice_server, hostname, + port, tls_port, subject); +#else + ret = spice_server_migrate_info(spice_server, hostname, + port, tls_port, subject); + cb(opaque, NULL); +#endif + return ret; } static int add_channel(const char *name, const char *value, void *opaque) @@ -503,7 +556,7 @@ void qemu_spice_init(void) spice_image_compression_t compression; spice_wan_compression_t wan_compr; - me = pthread_self(); + qemu_thread_get_self(&me); if (!opts) { return; @@ -650,6 +703,11 @@ void qemu_spice_init(void) migration_state.notify = migration_state_notifier; add_migration_state_change_notifier(&migration_state); +#ifdef SPICE_INTERFACE_MIGRATION + spice_migrate.sin.base.sif = &migrate_interface.base; + spice_migrate.connect_complete.cb = NULL; + qemu_spice_add_interface(&spice_migrate.sin.base); +#endif qemu_spice_input_init(); qemu_spice_audio_init(); |