diff options
51 files changed, 2448 insertions, 449 deletions
diff --git a/CODING_STYLE b/CODING_STYLE index f53180bf3f..2fa0c0b65b 100644 --- a/CODING_STYLE +++ b/CODING_STYLE @@ -116,3 +116,10 @@ if (a == 1) { Rationale: Yoda conditions (as in 'if (1 == a)') are awkward to read. Besides, good compilers already warn users when '==' is mis-typed as '=', even when the constant is on the right. + +7. Comment style + +We use traditional C-style /* */ comments and avoid // comments. + +Rationale: The // form is valid in C99, so this is purely a matter of +consistency of style. The checkpatch script will warn you about this. diff --git a/block/curl.c b/block/curl.c index 2939cc77e9..e83dcd8f50 100644 --- a/block/curl.c +++ b/block/curl.c @@ -135,6 +135,7 @@ typedef struct BDRVCURLState { char *cookie; bool accept_range; AioContext *aio_context; + QemuMutex mutex; char *username; char *password; char *proxyusername; @@ -333,6 +334,7 @@ static int curl_find_buf(BDRVCURLState *s, size_t start, size_t len, return FIND_RET_NONE; } +/* Called with s->mutex held. */ static void curl_multi_check_completion(BDRVCURLState *s) { int msgs_in_queue; @@ -374,7 +376,9 @@ static void curl_multi_check_completion(BDRVCURLState *s) continue; } + qemu_mutex_unlock(&s->mutex); acb->common.cb(acb->common.opaque, -EPROTO); + qemu_mutex_lock(&s->mutex); qemu_aio_unref(acb); state->acb[i] = NULL; } @@ -386,6 +390,7 @@ static void curl_multi_check_completion(BDRVCURLState *s) } } +/* Called with s->mutex held. */ static void curl_multi_do_locked(CURLState *s) { CURLSocket *socket, *next_socket; @@ -409,19 +414,19 @@ static void curl_multi_do(void *arg) { CURLState *s = (CURLState *)arg; - aio_context_acquire(s->s->aio_context); + qemu_mutex_lock(&s->s->mutex); curl_multi_do_locked(s); - aio_context_release(s->s->aio_context); + qemu_mutex_unlock(&s->s->mutex); } static void curl_multi_read(void *arg) { CURLState *s = (CURLState *)arg; - aio_context_acquire(s->s->aio_context); + qemu_mutex_lock(&s->s->mutex); curl_multi_do_locked(s); curl_multi_check_completion(s->s); - aio_context_release(s->s->aio_context); + qemu_mutex_unlock(&s->s->mutex); } static void curl_multi_timeout_do(void *arg) @@ -434,11 +439,11 @@ static void curl_multi_timeout_do(void *arg) return; } - aio_context_acquire(s->aio_context); + qemu_mutex_lock(&s->mutex); curl_multi_socket_action(s->multi, CURL_SOCKET_TIMEOUT, 0, &running); curl_multi_check_completion(s); - aio_context_release(s->aio_context); + qemu_mutex_unlock(&s->mutex); #else abort(); #endif @@ -771,6 +776,7 @@ static int curl_open(BlockDriverState *bs, QDict *options, int flags, curl_easy_cleanup(state->curl); state->curl = NULL; + qemu_mutex_init(&s->mutex); curl_attach_aio_context(bs, bdrv_get_aio_context(bs)); qemu_opts_del(opts); @@ -801,12 +807,11 @@ static void curl_readv_bh_cb(void *p) CURLAIOCB *acb = p; BlockDriverState *bs = acb->common.bs; BDRVCURLState *s = bs->opaque; - AioContext *ctx = bdrv_get_aio_context(bs); size_t start = acb->sector_num * BDRV_SECTOR_SIZE; size_t end; - aio_context_acquire(ctx); + qemu_mutex_lock(&s->mutex); // In case we have the requested data already (e.g. read-ahead), // we can just call the callback and be done. @@ -854,7 +859,7 @@ static void curl_readv_bh_cb(void *p) curl_multi_socket_action(s->multi, CURL_SOCKET_TIMEOUT, 0, &running); out: - aio_context_release(ctx); + qemu_mutex_unlock(&s->mutex); if (ret != -EINPROGRESS) { acb->common.cb(acb->common.opaque, ret); qemu_aio_unref(acb); @@ -883,6 +888,7 @@ static void curl_close(BlockDriverState *bs) DPRINTF("CURL: Close\n"); curl_detach_aio_context(bs); + qemu_mutex_destroy(&s->mutex); g_free(s->cookie); g_free(s->url); diff --git a/block/iscsi.c b/block/iscsi.c index c4f813bfd2..76319a1a6e 100644 --- a/block/iscsi.c +++ b/block/iscsi.c @@ -58,6 +58,7 @@ typedef struct IscsiLun { int events; QEMUTimer *nop_timer; QEMUTimer *event_timer; + QemuMutex mutex; struct scsi_inquiry_logical_block_provisioning lbp; struct scsi_inquiry_block_limits bl; unsigned char *zeroblock; @@ -252,6 +253,7 @@ static int iscsi_translate_sense(struct scsi_sense *sense) return ret; } +/* Called (via iscsi_service) with QemuMutex held. */ static void iscsi_co_generic_cb(struct iscsi_context *iscsi, int status, void *command_data, void *opaque) @@ -352,6 +354,7 @@ static const AIOCBInfo iscsi_aiocb_info = { static void iscsi_process_read(void *arg); static void iscsi_process_write(void *arg); +/* Called with QemuMutex held. */ static void iscsi_set_events(IscsiLun *iscsilun) { @@ -395,10 +398,10 @@ iscsi_process_read(void *arg) IscsiLun *iscsilun = arg; struct iscsi_context *iscsi = iscsilun->iscsi; - aio_context_acquire(iscsilun->aio_context); + qemu_mutex_lock(&iscsilun->mutex); iscsi_service(iscsi, POLLIN); iscsi_set_events(iscsilun); - aio_context_release(iscsilun->aio_context); + qemu_mutex_unlock(&iscsilun->mutex); } static void @@ -407,10 +410,10 @@ iscsi_process_write(void *arg) IscsiLun *iscsilun = arg; struct iscsi_context *iscsi = iscsilun->iscsi; - aio_context_acquire(iscsilun->aio_context); + qemu_mutex_lock(&iscsilun->mutex); iscsi_service(iscsi, POLLOUT); iscsi_set_events(iscsilun); - aio_context_release(iscsilun->aio_context); + qemu_mutex_unlock(&iscsilun->mutex); } static int64_t sector_lun2qemu(int64_t sector, IscsiLun *iscsilun) @@ -589,6 +592,7 @@ iscsi_co_writev_flags(BlockDriverState *bs, int64_t sector_num, int nb_sectors, uint64_t lba; uint32_t num_sectors; bool fua = flags & BDRV_REQ_FUA; + int r = 0; if (fua) { assert(iscsilun->dpofua); @@ -604,6 +608,7 @@ iscsi_co_writev_flags(BlockDriverState *bs, int64_t sector_num, int nb_sectors, lba = sector_qemu2lun(sector_num, iscsilun); num_sectors = sector_qemu2lun(nb_sectors, iscsilun); iscsi_co_init_iscsitask(iscsilun, &iTask); + qemu_mutex_lock(&iscsilun->mutex); retry: if (iscsilun->use_16_for_rw) { #if LIBISCSI_API_VERSION >= (20160603) @@ -640,7 +645,9 @@ retry: #endif while (!iTask.complete) { iscsi_set_events(iscsilun); + qemu_mutex_unlock(&iscsilun->mutex); qemu_coroutine_yield(); + qemu_mutex_lock(&iscsilun->mutex); } if (iTask.task != NULL) { @@ -655,12 +662,15 @@ retry: if (iTask.status != SCSI_STATUS_GOOD) { iscsi_allocmap_set_invalid(iscsilun, sector_num, nb_sectors); - return iTask.err_code; + r = iTask.err_code; + goto out_unlock; } iscsi_allocmap_set_allocated(iscsilun, sector_num, nb_sectors); - return 0; +out_unlock: + qemu_mutex_unlock(&iscsilun->mutex); + return r; } @@ -693,18 +703,21 @@ static int64_t coroutine_fn iscsi_co_get_block_status(BlockDriverState *bs, goto out; } + qemu_mutex_lock(&iscsilun->mutex); retry: if (iscsi_get_lba_status_task(iscsilun->iscsi, iscsilun->lun, sector_qemu2lun(sector_num, iscsilun), 8 + 16, iscsi_co_generic_cb, &iTask) == NULL) { ret = -ENOMEM; - goto out; + goto out_unlock; } while (!iTask.complete) { iscsi_set_events(iscsilun); + qemu_mutex_unlock(&iscsilun->mutex); qemu_coroutine_yield(); + qemu_mutex_lock(&iscsilun->mutex); } if (iTask.do_retry) { @@ -721,20 +734,20 @@ retry: * because the device is busy or the cmd is not * supported) we pretend all blocks are allocated * for backwards compatibility */ - goto out; + goto out_unlock; } lbas = scsi_datain_unmarshall(iTask.task); if (lbas == NULL) { ret = -EIO; - goto out; + goto out_unlock; } lbasd = &lbas->descriptors[0]; if (sector_qemu2lun(sector_num, iscsilun) != lbasd->lba) { ret = -EIO; - goto out; + goto out_unlock; } *pnum = sector_lun2qemu(lbasd->num_blocks, iscsilun); @@ -756,6 +769,8 @@ retry: if (*pnum > nb_sectors) { *pnum = nb_sectors; } +out_unlock: + qemu_mutex_unlock(&iscsilun->mutex); out: if (iTask.task != NULL) { scsi_free_scsi_task(iTask.task); @@ -818,6 +833,7 @@ static int coroutine_fn iscsi_co_readv(BlockDriverState *bs, num_sectors = sector_qemu2lun(nb_sectors, iscsilun); iscsi_co_init_iscsitask(iscsilun, &iTask); + qemu_mutex_lock(&iscsilun->mutex); retry: if (iscsilun->use_16_for_rw) { #if LIBISCSI_API_VERSION >= (20160603) @@ -855,7 +871,9 @@ retry: #endif while (!iTask.complete) { iscsi_set_events(iscsilun); + qemu_mutex_unlock(&iscsilun->mutex); qemu_coroutine_yield(); + qemu_mutex_lock(&iscsilun->mutex); } if (iTask.task != NULL) { @@ -867,6 +885,7 @@ retry: iTask.complete = 0; goto retry; } + qemu_mutex_unlock(&iscsilun->mutex); if (iTask.status != SCSI_STATUS_GOOD) { return iTask.err_code; @@ -881,6 +900,7 @@ static int coroutine_fn iscsi_co_flush(BlockDriverState *bs) struct IscsiTask iTask; iscsi_co_init_iscsitask(iscsilun, &iTask); + qemu_mutex_lock(&iscsilun->mutex); retry: if (iscsi_synchronizecache10_task(iscsilun->iscsi, iscsilun->lun, 0, 0, 0, 0, iscsi_co_generic_cb, &iTask) == NULL) { @@ -889,7 +909,9 @@ retry: while (!iTask.complete) { iscsi_set_events(iscsilun); + qemu_mutex_unlock(&iscsilun->mutex); qemu_coroutine_yield(); + qemu_mutex_lock(&iscsilun->mutex); } if (iTask.task != NULL) { @@ -901,6 +923,7 @@ retry: iTask.complete = 0; goto retry; } + qemu_mutex_unlock(&iscsilun->mutex); if (iTask.status != SCSI_STATUS_GOOD) { return iTask.err_code; @@ -910,6 +933,7 @@ retry: } #ifdef __linux__ +/* Called (via iscsi_service) with QemuMutex held. */ static void iscsi_aio_ioctl_cb(struct iscsi_context *iscsi, int status, void *command_data, void *opaque) @@ -1034,6 +1058,7 @@ static BlockAIOCB *iscsi_aio_ioctl(BlockDriverState *bs, acb->task->expxferlen = acb->ioh->dxfer_len; data.size = 0; + qemu_mutex_lock(&iscsilun->mutex); if (acb->task->xfer_dir == SCSI_XFER_WRITE) { if (acb->ioh->iovec_count == 0) { data.data = acb->ioh->dxferp; @@ -1049,6 +1074,7 @@ static BlockAIOCB *iscsi_aio_ioctl(BlockDriverState *bs, iscsi_aio_ioctl_cb, (data.size > 0) ? &data : NULL, acb) != 0) { + qemu_mutex_unlock(&iscsilun->mutex); scsi_free_scsi_task(acb->task); qemu_aio_unref(acb); return NULL; @@ -1068,6 +1094,7 @@ static BlockAIOCB *iscsi_aio_ioctl(BlockDriverState *bs, } iscsi_set_events(iscsilun); + qemu_mutex_unlock(&iscsilun->mutex); return &acb->common; } @@ -1092,6 +1119,7 @@ coroutine_fn iscsi_co_pdiscard(BlockDriverState *bs, int64_t offset, int count) IscsiLun *iscsilun = bs->opaque; struct IscsiTask iTask; struct unmap_list list; + int r = 0; if (!is_byte_request_lun_aligned(offset, count, iscsilun)) { return -ENOTSUP; @@ -1106,15 +1134,19 @@ coroutine_fn iscsi_co_pdiscard(BlockDriverState *bs, int64_t offset, int count) list.num = count / iscsilun->block_size; iscsi_co_init_iscsitask(iscsilun, &iTask); + qemu_mutex_lock(&iscsilun->mutex); retry: if (iscsi_unmap_task(iscsilun->iscsi, iscsilun->lun, 0, 0, &list, 1, iscsi_co_generic_cb, &iTask) == NULL) { - return -ENOMEM; + r = -ENOMEM; + goto out_unlock; } while (!iTask.complete) { iscsi_set_events(iscsilun); + qemu_mutex_unlock(&iscsilun->mutex); qemu_coroutine_yield(); + qemu_mutex_lock(&iscsilun->mutex); } if (iTask.task != NULL) { @@ -1131,17 +1163,20 @@ retry: /* the target might fail with a check condition if it is not happy with the alignment of the UNMAP request we silently fail in this case */ - return 0; + goto out_unlock; } if (iTask.status != SCSI_STATUS_GOOD) { - return iTask.err_code; + r = iTask.err_code; + goto out_unlock; } iscsi_allocmap_set_invalid(iscsilun, offset >> BDRV_SECTOR_BITS, count >> BDRV_SECTOR_BITS); - return 0; +out_unlock: + qemu_mutex_unlock(&iscsilun->mutex); + return r; } static int @@ -1153,6 +1188,7 @@ coroutine_fn iscsi_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset, uint64_t lba; uint32_t nb_blocks; bool use_16_for_ws = iscsilun->use_16_for_rw; + int r = 0; if (!is_byte_request_lun_aligned(offset, count, iscsilun)) { return -ENOTSUP; @@ -1186,6 +1222,7 @@ coroutine_fn iscsi_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset, } } + qemu_mutex_lock(&iscsilun->mutex); iscsi_co_init_iscsitask(iscsilun, &iTask); retry: if (use_16_for_ws) { @@ -1205,7 +1242,9 @@ retry: while (!iTask.complete) { iscsi_set_events(iscsilun); + qemu_mutex_unlock(&iscsilun->mutex); qemu_coroutine_yield(); + qemu_mutex_lock(&iscsilun->mutex); } if (iTask.status == SCSI_STATUS_CHECK_CONDITION && @@ -1215,7 +1254,8 @@ retry: /* WRITE SAME is not supported by the target */ iscsilun->has_write_same = false; scsi_free_scsi_task(iTask.task); - return -ENOTSUP; + r = -ENOTSUP; + goto out_unlock; } if (iTask.task != NULL) { @@ -1231,7 +1271,8 @@ retry: if (iTask.status != SCSI_STATUS_GOOD) { iscsi_allocmap_set_invalid(iscsilun, offset >> BDRV_SECTOR_BITS, count >> BDRV_SECTOR_BITS); - return iTask.err_code; + r = iTask.err_code; + goto out_unlock; } if (flags & BDRV_REQ_MAY_UNMAP) { @@ -1242,7 +1283,9 @@ retry: count >> BDRV_SECTOR_BITS); } - return 0; +out_unlock: + qemu_mutex_unlock(&iscsilun->mutex); + return r; } static void apply_chap(struct iscsi_context *iscsi, QemuOpts *opts, @@ -1331,7 +1374,7 @@ static void iscsi_nop_timed_event(void *opaque) { IscsiLun *iscsilun = opaque; - aio_context_acquire(iscsilun->aio_context); + qemu_mutex_lock(&iscsilun->mutex); if (iscsi_get_nops_in_flight(iscsilun->iscsi) >= MAX_NOP_FAILURES) { error_report("iSCSI: NOP timeout. Reconnecting..."); iscsilun->request_timed_out = true; @@ -1344,7 +1387,7 @@ static void iscsi_nop_timed_event(void *opaque) iscsi_set_events(iscsilun); out: - aio_context_release(iscsilun->aio_context); + qemu_mutex_unlock(&iscsilun->mutex); } static void iscsi_readcapacity_sync(IscsiLun *iscsilun, Error **errp) @@ -1890,6 +1933,7 @@ static int iscsi_open(BlockDriverState *bs, QDict *options, int flags, scsi_free_scsi_task(task); task = NULL; + qemu_mutex_init(&iscsilun->mutex); iscsi_attach_aio_context(bs, iscsilun->aio_context); /* Guess the internal cluster (page) size of the iscsi target by the means @@ -1935,6 +1979,7 @@ static void iscsi_close(BlockDriverState *bs) iscsi_destroy_context(iscsi); g_free(iscsilun->zeroblock); iscsi_allocmap_free(iscsilun); + qemu_mutex_destroy(&iscsilun->mutex); memset(iscsilun, 0, sizeof(IscsiLun)); } diff --git a/block/mirror.c b/block/mirror.c index 3d50857300..1b34b366d0 100644 --- a/block/mirror.c +++ b/block/mirror.c @@ -386,7 +386,7 @@ static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s) nb_chunks * sectors_per_chunk); bitmap_set(s->in_flight_bitmap, sector_num / sectors_per_chunk, nb_chunks); while (nb_chunks > 0 && sector_num < end) { - int ret; + int64_t ret; int io_sectors, io_sectors_acct; BlockDriverState *file; enum MirrorMethod { diff --git a/block/nfs.c b/block/nfs.c index ffb54be065..890d5d4aff 100644 --- a/block/nfs.c +++ b/block/nfs.c @@ -54,6 +54,7 @@ typedef struct NFSClient { int events; bool has_zero_init; AioContext *aio_context; + QemuMutex mutex; blkcnt_t st_blocks; bool cache_used; NFSServer *server; @@ -191,6 +192,7 @@ static void nfs_parse_filename(const char *filename, QDict *options, static void nfs_process_read(void *arg); static void nfs_process_write(void *arg); +/* Called with QemuMutex held. */ static void nfs_set_events(NFSClient *client) { int ev = nfs_which_events(client->context); @@ -209,20 +211,20 @@ static void nfs_process_read(void *arg) { NFSClient *client = arg; - aio_context_acquire(client->aio_context); + qemu_mutex_lock(&client->mutex); nfs_service(client->context, POLLIN); nfs_set_events(client); - aio_context_release(client->aio_context); + qemu_mutex_unlock(&client->mutex); } static void nfs_process_write(void *arg) { NFSClient *client = arg; - aio_context_acquire(client->aio_context); + qemu_mutex_lock(&client->mutex); nfs_service(client->context, POLLOUT); nfs_set_events(client); - aio_context_release(client->aio_context); + qemu_mutex_unlock(&client->mutex); } static void nfs_co_init_task(BlockDriverState *bs, NFSRPC *task) @@ -242,6 +244,7 @@ static void nfs_co_generic_bh_cb(void *opaque) aio_co_wake(task->co); } +/* Called (via nfs_service) with QemuMutex held. */ static void nfs_co_generic_cb(int ret, struct nfs_context *nfs, void *data, void *private_data) @@ -273,12 +276,15 @@ static int coroutine_fn nfs_co_preadv(BlockDriverState *bs, uint64_t offset, nfs_co_init_task(bs, &task); task.iov = iov; + qemu_mutex_lock(&client->mutex); if (nfs_pread_async(client->context, client->fh, offset, bytes, nfs_co_generic_cb, &task) != 0) { + qemu_mutex_unlock(&client->mutex); return -ENOMEM; } nfs_set_events(client); + qemu_mutex_unlock(&client->mutex); while (!task.complete) { qemu_coroutine_yield(); } @@ -317,9 +323,11 @@ static int coroutine_fn nfs_co_pwritev(BlockDriverState *bs, uint64_t offset, buf = iov->iov[0].iov_base; } + qemu_mutex_lock(&client->mutex); if (nfs_pwrite_async(client->context, client->fh, offset, bytes, buf, nfs_co_generic_cb, &task) != 0) { + qemu_mutex_unlock(&client->mutex); if (my_buffer) { g_free(buf); } @@ -327,6 +335,7 @@ static int coroutine_fn nfs_co_pwritev(BlockDriverState *bs, uint64_t offset, } nfs_set_events(client); + qemu_mutex_unlock(&client->mutex); while (!task.complete) { qemu_coroutine_yield(); } @@ -349,12 +358,15 @@ static int coroutine_fn nfs_co_flush(BlockDriverState *bs) nfs_co_init_task(bs, &task); + qemu_mutex_lock(&client->mutex); if (nfs_fsync_async(client->context, client->fh, nfs_co_generic_cb, &task) != 0) { + qemu_mutex_unlock(&client->mutex); return -ENOMEM; } nfs_set_events(client); + qemu_mutex_unlock(&client->mutex); while (!task.complete) { qemu_coroutine_yield(); } @@ -440,6 +452,7 @@ static void nfs_file_close(BlockDriverState *bs) { NFSClient *client = bs->opaque; nfs_client_close(client); + qemu_mutex_destroy(&client->mutex); } static NFSServer *nfs_config(QDict *options, Error **errp) @@ -647,6 +660,7 @@ static int nfs_file_open(BlockDriverState *bs, QDict *options, int flags, if (ret < 0) { return ret; } + qemu_mutex_init(&client->mutex); bs->total_sectors = ret; ret = 0; return ret; @@ -702,6 +716,7 @@ static int nfs_has_zero_init(BlockDriverState *bs) return client->has_zero_init; } +/* Called (via nfs_service) with QemuMutex held. */ static void nfs_get_allocated_file_size_cb(int ret, struct nfs_context *nfs, void *data, void *private_data) @@ -5894,6 +5894,7 @@ case "$target_name" in TARGET_BASE_ARCH=i386 ;; alpha) + mttcg="yes" ;; arm|armeb) TARGET_ARCH=arm @@ -769,14 +769,13 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr) pd = iotlbentry->addr & ~TARGET_PAGE_MASK; mr = iotlb_to_region(cpu, pd, iotlbentry->attrs); if (memory_region_is_unassigned(mr)) { - CPUClass *cc = CPU_GET_CLASS(cpu); - - if (cc->do_unassigned_access) { - cc->do_unassigned_access(cpu, addr, false, true, 0, 4); - } else { - report_bad_exec(cpu, addr); - exit(1); - } + cpu_unassigned_access(cpu, addr, false, true, 0, 4); + /* The CPU's unassigned access hook might have longjumped out + * with an exception. If it didn't (or there was no hook) then + * we can't proceed further. + */ + report_bad_exec(cpu, addr); + exit(1); } p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend); return qemu_ram_addr_from_host_nofail(p); diff --git a/fpu/softfloat.c b/fpu/softfloat.c index 485a006aa7..7af14e29aa 100644 --- a/fpu/softfloat.c +++ b/fpu/softfloat.c @@ -7492,7 +7492,7 @@ uint64_t float64_to_uint64_round_to_zero(float64 a, float_status *status) { signed char current_rounding_mode = status->float_rounding_mode; set_float_rounding_mode(float_round_to_zero, status); - int64_t v = float64_to_uint64(a, status); + uint64_t v = float64_to_uint64(a, status); set_float_rounding_mode(current_rounding_mode, status); return v; } diff --git a/hw/acpi/tco.c b/hw/acpi/tco.c index 8ce7daf23a..b4adac88cd 100644 --- a/hw/acpi/tco.c +++ b/hw/acpi/tco.c @@ -49,6 +49,7 @@ static inline void tco_timer_reload(TCOIORegs *tr) static inline void tco_timer_stop(TCOIORegs *tr) { tr->expire_time = -1; + timer_del(tr->tco_timer); } static void tco_timer_expired(void *opaque) diff --git a/hw/arm/bcm2835_peripherals.c b/hw/arm/bcm2835_peripherals.c index 2e641a3989..9ed22d5bc8 100644 --- a/hw/arm/bcm2835_peripherals.c +++ b/hw/arm/bcm2835_peripherals.c @@ -86,6 +86,11 @@ static void bcm2835_peripherals_init(Object *obj) object_property_add_const_link(OBJECT(&s->property), "dma-mr", OBJECT(&s->gpu_bus_mr), &error_abort); + /* Random Number Generator */ + object_initialize(&s->rng, sizeof(s->rng), TYPE_BCM2835_RNG); + object_property_add_child(obj, "rng", OBJECT(&s->rng), NULL); + qdev_set_parent_bus(DEVICE(&s->rng), sysbus_get_default()); + /* Extended Mass Media Controller */ object_initialize(&s->sdhci, sizeof(s->sdhci), TYPE_SYSBUS_SDHCI); object_property_add_child(obj, "sdhci", OBJECT(&s->sdhci), NULL); @@ -226,6 +231,16 @@ static void bcm2835_peripherals_realize(DeviceState *dev, Error **errp) sysbus_connect_irq(SYS_BUS_DEVICE(&s->property), 0, qdev_get_gpio_in(DEVICE(&s->mboxes), MBOX_CHAN_PROPERTY)); + /* Random Number Generator */ + object_property_set_bool(OBJECT(&s->rng), true, "realized", &err); + if (err) { + error_propagate(errp, err); + return; + } + + memory_region_add_subregion(&s->peri_mr, RNG_OFFSET, + sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->rng), 0)); + /* Extended Mass Media Controller */ object_property_set_int(OBJECT(&s->sdhci), BCM2835_SDHC_CAPAREG, "capareg", &err); diff --git a/hw/arm/exynos4210.c b/hw/arm/exynos4210.c index be3c96d21e..1d2b50cc4e 100644 --- a/hw/arm/exynos4210.c +++ b/hw/arm/exynos4210.c @@ -24,6 +24,7 @@ #include "qemu/osdep.h" #include "qapi/error.h" #include "qemu-common.h" +#include "qemu/log.h" #include "cpu.h" #include "hw/boards.h" #include "sysemu/sysemu.h" @@ -74,6 +75,9 @@ /* PMU SFR base address */ #define EXYNOS4210_PMU_BASE_ADDR 0x10020000 +/* Clock controller SFR base address */ +#define EXYNOS4210_CLK_BASE_ADDR 0x10030000 + /* Display controllers (FIMD) */ #define EXYNOS4210_FIMD0_BASE_ADDR 0x11C00000 @@ -138,6 +142,16 @@ void exynos4210_write_secondary(ARMCPU *cpu, info->smp_loader_start); } +static uint64_t exynos4210_calc_affinity(int cpu) +{ + uint64_t mp_affinity; + + /* Exynos4210 has 0x9 as cluster ID */ + mp_affinity = (0x9 << ARM_AFF1_SHIFT) | cpu; + + return mp_affinity; +} + Exynos4210State *exynos4210_init(MemoryRegion *system_mem, unsigned long ram_size) { @@ -163,6 +177,8 @@ Exynos4210State *exynos4210_init(MemoryRegion *system_mem, } s->cpu[n] = ARM_CPU(cpuobj); + object_property_set_int(cpuobj, exynos4210_calc_affinity(n), + "mp-affinity", &error_abort); object_property_set_int(cpuobj, EXYNOS4210_SMP_PRIVATE_BASE_ADDR, "reset-cbar", &error_abort); object_property_set_bool(cpuobj, true, "realized", &error_fatal); @@ -297,6 +313,8 @@ Exynos4210State *exynos4210_init(MemoryRegion *system_mem, */ sysbus_create_simple("exynos4210.pmu", EXYNOS4210_PMU_BASE_ADDR, NULL); + sysbus_create_simple("exynos4210.clk", EXYNOS4210_CLK_BASE_ADDR, NULL); + /* PWM */ sysbus_create_varargs("exynos4210.pwm", EXYNOS4210_PWM_BASE_ADDR, s->irq_table[exynos4210_get_irq(22, 0)], diff --git a/hw/arm/virt.c b/hw/arm/virt.c index f3440f2ccb..5f62a0321e 100644 --- a/hw/arm/virt.c +++ b/hw/arm/virt.c @@ -535,7 +535,6 @@ static void create_v2m(VirtMachineState *vms, qemu_irq *pic) static void create_gic(VirtMachineState *vms, qemu_irq *pic) { /* We create a standalone GIC */ - VirtMachineClass *vmc = VIRT_MACHINE_GET_CLASS(vms); DeviceState *gicdev; SysBusDevice *gicbusdev; const char *gictype; @@ -605,7 +604,7 @@ static void create_gic(VirtMachineState *vms, qemu_irq *pic) fdt_add_gic_node(vms); - if (type == 3 && !vmc->no_its) { + if (type == 3 && vms->its) { create_its(vms, gicdev); } else if (type == 2) { create_v2m(vms, pic); @@ -1378,6 +1377,7 @@ static void machvirt_init(MachineState *machine) } object_property_set_bool(cpuobj, true, "realized", NULL); + object_unref(cpuobj); } fdt_add_timer_nodes(vms); fdt_add_cpu_nodes(vms); @@ -1480,6 +1480,20 @@ static void virt_set_highmem(Object *obj, bool value, Error **errp) vms->highmem = value; } +static bool virt_get_its(Object *obj, Error **errp) +{ + VirtMachineState *vms = VIRT_MACHINE(obj); + + return vms->its; +} + +static void virt_set_its(Object *obj, bool value, Error **errp) +{ + VirtMachineState *vms = VIRT_MACHINE(obj); + + vms->its = value; +} + static char *virt_get_gic_version(Object *obj, Error **errp) { VirtMachineState *vms = VIRT_MACHINE(obj); @@ -1540,6 +1554,7 @@ type_init(machvirt_machine_init); static void virt_2_9_instance_init(Object *obj) { VirtMachineState *vms = VIRT_MACHINE(obj); + VirtMachineClass *vmc = VIRT_MACHINE_GET_CLASS(vms); /* EL3 is disabled by default on virt: this makes us consistent * between KVM and TCG for this board, and it also allows us to @@ -1579,6 +1594,19 @@ static void virt_2_9_instance_init(Object *obj) "Set GIC version. " "Valid values are 2, 3 and host", NULL); + if (vmc->no_its) { + vms->its = false; + } else { + /* Default allows ITS instantiation */ + vms->its = true; + object_property_add_bool(obj, "its", virt_get_its, + virt_set_its, NULL); + object_property_set_description(obj, "its", + "Set on/off to enable/disable " + "ITS instantiation", + NULL); + } + vms->memmap = a15memmap; vms->irqmap = a15irqmap; } diff --git a/hw/core/or-irq.c b/hw/core/or-irq.c index 1ac090d1a4..1485d5b285 100644 --- a/hw/core/or-irq.c +++ b/hw/core/or-irq.c @@ -89,6 +89,9 @@ static void or_irq_class_init(ObjectClass *klass, void *data) dc->props = or_irq_properties; dc->realize = or_irq_realize; dc->vmsd = &vmstate_or_irq; + + /* Reason: Needs to be wired up to work, e.g. see stm32f205_soc.c */ + dc->cannot_instantiate_with_device_add_yet = true; } static const TypeInfo or_irq_type_info = { diff --git a/hw/core/register.c b/hw/core/register.c index 4bfbc508de..dc335a79a9 100644 --- a/hw/core/register.c +++ b/hw/core/register.c @@ -59,6 +59,15 @@ static inline uint64_t register_read_val(RegisterInfo *reg) return 0; /* unreachable */ } +static inline uint64_t register_enabled_mask(int data_size, unsigned size) +{ + if (data_size < size) { + size = data_size; + } + + return MAKE_64BIT_MASK(0, size * 8); +} + void register_write(RegisterInfo *reg, uint64_t val, uint64_t we, const char *prefix, bool debug) { @@ -192,11 +201,7 @@ void register_write_memory(void *opaque, hwaddr addr, } /* Generate appropriate write enable mask */ - if (reg->data_size < size) { - we = MAKE_64BIT_MASK(0, reg->data_size * 8); - } else { - we = MAKE_64BIT_MASK(0, size * 8); - } + we = register_enabled_mask(reg->data_size, size); register_write(reg, value, we, reg_array->prefix, reg_array->debug); @@ -208,6 +213,7 @@ uint64_t register_read_memory(void *opaque, hwaddr addr, RegisterInfoArray *reg_array = opaque; RegisterInfo *reg = NULL; uint64_t read_val; + uint64_t re; int i; for (i = 0; i < reg_array->num_elements; i++) { @@ -223,7 +229,10 @@ uint64_t register_read_memory(void *opaque, hwaddr addr, return 0; } - read_val = register_read(reg, size * 8, reg_array->prefix, + /* Generate appropriate read enable mask */ + re = register_enabled_mask(reg->data_size, size); + + read_val = register_read(reg, re, reg_array->prefix, reg_array->debug); return extract64(read_val, 0, size * 8); @@ -274,9 +283,18 @@ void register_finalize_block(RegisterInfoArray *r_array) g_free(r_array); } +static void register_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + /* Reason: needs to be wired up to work */ + dc->cannot_instantiate_with_device_add_yet = true; +} + static const TypeInfo register_info = { .name = TYPE_REGISTER, .parent = TYPE_DEVICE, + .class_init = register_class_init, }; static void register_register_types(void) diff --git a/hw/display/milkymist-tmu2.c b/hw/display/milkymist-tmu2.c index 7528665510..59120ddb67 100644 --- a/hw/display/milkymist-tmu2.c +++ b/hw/display/milkymist-tmu2.c @@ -293,7 +293,7 @@ static void tmu2_start(MilkymistTMU2State *s) cpu_physical_memory_unmap(mesh, mesh_len, 0, mesh_len); /* Write back the OpenGL framebuffer to the QEMU framebuffer */ - fb_len = 2 * s->regs[R_DSTHRES] * s->regs[R_DSTVRES]; + fb_len = 2ULL * s->regs[R_DSTHRES] * s->regs[R_DSTVRES]; fb = cpu_physical_memory_map(s->regs[R_DSTFBUF], &fb_len, 1); if (fb == NULL) { glDeleteTextures(1, &texture); diff --git a/hw/intc/arm_gic.c b/hw/intc/arm_gic.c index 521aac3cc6..8e5a9d8a3e 100644 --- a/hw/intc/arm_gic.c +++ b/hw/intc/arm_gic.c @@ -156,17 +156,6 @@ static void gic_set_irq_11mpcore(GICState *s, int irq, int level, } } -static void gic_set_irq_nvic(GICState *s, int irq, int level, - int cm, int target) -{ - if (level) { - GIC_SET_LEVEL(irq, cm); - GIC_SET_PENDING(irq, target); - } else { - GIC_CLEAR_LEVEL(irq, cm); - } -} - static void gic_set_irq_generic(GICState *s, int irq, int level, int cm, int target) { @@ -214,8 +203,6 @@ static void gic_set_irq(void *opaque, int irq, int level) if (s->revision == REV_11MPCORE) { gic_set_irq_11mpcore(s, irq, level, cm, target); - } else if (s->revision == REV_NVIC) { - gic_set_irq_nvic(s, irq, level, cm, target); } else { gic_set_irq_generic(s, irq, level, cm, target); } @@ -367,7 +354,7 @@ uint32_t gic_acknowledge_irq(GICState *s, int cpu, MemTxAttrs attrs) return 1023; } - if (s->revision == REV_11MPCORE || s->revision == REV_NVIC) { + if (s->revision == REV_11MPCORE) { /* Clear pending flags for both level and edge triggered interrupts. * Level triggered IRQs will be reasserted once they become inactive. */ @@ -589,11 +576,6 @@ void gic_complete_irq(GICState *s, int cpu, int irq, MemTxAttrs attrs) DPRINTF("Set %d pending mask %x\n", irq, cm); GIC_SET_PENDING(irq, cm); } - } else if (s->revision == REV_NVIC) { - if (GIC_TEST_LEVEL(irq, cm)) { - DPRINTF("Set nvic %d pending mask %x\n", irq, cm); - GIC_SET_PENDING(irq, cm); - } } group = gic_has_groups(s) && GIC_TEST_GROUP(irq, cm); @@ -768,7 +750,7 @@ static uint32_t gic_dist_readb(void *opaque, hwaddr offset, MemTxAttrs attrs) } else if (offset < 0xf10) { goto bad_reg; } else if (offset < 0xf30) { - if (s->revision == REV_11MPCORE || s->revision == REV_NVIC) { + if (s->revision == REV_11MPCORE) { goto bad_reg; } @@ -802,9 +784,6 @@ static uint32_t gic_dist_readb(void *opaque, hwaddr offset, MemTxAttrs attrs) case 2: res = gic_id_gicv2[(offset - 0xfd0) >> 2]; break; - case REV_NVIC: - /* Shouldn't be able to get here */ - abort(); default: res = 0; } @@ -1028,7 +1007,7 @@ static void gic_dist_writeb(void *opaque, hwaddr offset, continue; /* Ignore Non-secure access of Group0 IRQ */ } - if (s->revision == REV_11MPCORE || s->revision == REV_NVIC) { + if (s->revision == REV_11MPCORE) { if (value & (1 << (i * 2))) { GIC_SET_MODEL(irq + i); } else { @@ -1046,7 +1025,7 @@ static void gic_dist_writeb(void *opaque, hwaddr offset, goto bad_reg; } else if (offset < 0xf20) { /* GICD_CPENDSGIRn */ - if (s->revision == REV_11MPCORE || s->revision == REV_NVIC) { + if (s->revision == REV_11MPCORE) { goto bad_reg; } irq = (offset - 0xf10); @@ -1060,7 +1039,7 @@ static void gic_dist_writeb(void *opaque, hwaddr offset, } } else if (offset < 0xf30) { /* GICD_SPENDSGIRn */ - if (s->revision == REV_11MPCORE || s->revision == REV_NVIC) { + if (s->revision == REV_11MPCORE) { goto bad_reg; } irq = (offset - 0xf20); diff --git a/hw/intc/arm_gic_common.c b/hw/intc/arm_gic_common.c index 4a8df44fb1..70f1134823 100644 --- a/hw/intc/arm_gic_common.c +++ b/hw/intc/arm_gic_common.c @@ -99,9 +99,7 @@ void gic_init_irqs_and_mmio(GICState *s, qemu_irq_handler handler, * [N+32..N+63] PPIs for CPU 1 * ... */ - if (s->revision != REV_NVIC) { - i += (GIC_INTERNAL * s->num_cpu); - } + i += (GIC_INTERNAL * s->num_cpu); qdev_init_gpio_in(DEVICE(s), handler, i); for (i = 0; i < s->num_cpu; i++) { @@ -121,16 +119,12 @@ void gic_init_irqs_and_mmio(GICState *s, qemu_irq_handler handler, memory_region_init_io(&s->iomem, OBJECT(s), ops, s, "gic_dist", 0x1000); sysbus_init_mmio(sbd, &s->iomem); - if (s->revision != REV_NVIC) { - /* This is the main CPU interface "for this core". It is always - * present because it is required by both software emulation and KVM. - * NVIC is not handled here because its CPU interface is different, - * neither it can use KVM. - */ - memory_region_init_io(&s->cpuiomem[0], OBJECT(s), ops ? &ops[1] : NULL, - s, "gic_cpu", s->revision == 2 ? 0x2000 : 0x100); - sysbus_init_mmio(sbd, &s->cpuiomem[0]); - } + /* This is the main CPU interface "for this core". It is always + * present because it is required by both software emulation and KVM. + */ + memory_region_init_io(&s->cpuiomem[0], OBJECT(s), ops ? &ops[1] : NULL, + s, "gic_cpu", s->revision == 2 ? 0x2000 : 0x100); + sysbus_init_mmio(sbd, &s->cpuiomem[0]); } static void arm_gic_common_realize(DeviceState *dev, Error **errp) @@ -162,7 +156,7 @@ static void arm_gic_common_realize(DeviceState *dev, Error **errp) } if (s->security_extn && - (s->revision == REV_11MPCORE || s->revision == REV_NVIC)) { + (s->revision == REV_11MPCORE)) { error_setg(errp, "this GIC revision does not implement " "the security extensions"); return; @@ -255,7 +249,6 @@ static Property arm_gic_common_properties[] = { DEFINE_PROP_UINT32("num-irq", GICState, num_irq, 32), /* Revision can be 1 or 2 for GIC architecture specification * versions 1 or 2, or 0 to indicate the legacy 11MPCore GIC. - * (Internally, 0xffffffff also indicates "not a GIC but an NVIC".) */ DEFINE_PROP_UINT32("revision", GICState, revision, 1), /* True if the GIC should implement the security extensions */ diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c index fe5c303de9..76097b4830 100644 --- a/hw/intc/armv7m_nvic.c +++ b/hw/intc/armv7m_nvic.c @@ -17,47 +17,90 @@ #include "hw/sysbus.h" #include "qemu/timer.h" #include "hw/arm/arm.h" +#include "target/arm/cpu.h" #include "exec/address-spaces.h" -#include "gic_internal.h" #include "qemu/log.h" +#include "trace.h" + +/* IRQ number counting: + * + * the num-irq property counts the number of external IRQ lines + * + * NVICState::num_irq counts the total number of exceptions + * (external IRQs, the 15 internal exceptions including reset, + * and one for the unused exception number 0). + * + * NVIC_MAX_IRQ is the highest permitted number of external IRQ lines. + * + * NVIC_MAX_VECTORS is the highest permitted number of exceptions. + * + * Iterating through all exceptions should typically be done with + * for (i = 1; i < s->num_irq; i++) to avoid the unused slot 0. + * + * The external qemu_irq lines are the NVIC's external IRQ lines, + * so line 0 is exception 16. + * + * In the terminology of the architecture manual, "interrupts" are + * a subcategory of exception referring to the external interrupts + * (which are exception numbers NVIC_FIRST_IRQ and upward). + * For historical reasons QEMU tends to use "interrupt" and + * "exception" more or less interchangeably. + */ +#define NVIC_FIRST_IRQ 16 +#define NVIC_MAX_VECTORS 512 +#define NVIC_MAX_IRQ (NVIC_MAX_VECTORS - NVIC_FIRST_IRQ) + +/* Effective running priority of the CPU when no exception is active + * (higher than the highest possible priority value) + */ +#define NVIC_NOEXC_PRIO 0x100 + +typedef struct VecInfo { + /* Exception priorities can range from -3 to 255; only the unmodifiable + * priority values for RESET, NMI and HardFault can be negative. + */ + int16_t prio; + uint8_t enabled; + uint8_t pending; + uint8_t active; + uint8_t level; /* exceptions <=15 never set level */ +} VecInfo; + +typedef struct NVICState { + /*< private >*/ + SysBusDevice parent_obj; + /*< public >*/ -typedef struct { - GICState gic; ARMCPU *cpu; + + VecInfo vectors[NVIC_MAX_VECTORS]; + uint32_t prigroup; + + /* vectpending and exception_prio are both cached state that can + * be recalculated from the vectors[] array and the prigroup field. + */ + unsigned int vectpending; /* highest prio pending enabled exception */ + int exception_prio; /* group prio of the highest prio active exception */ + struct { uint32_t control; uint32_t reload; int64_t tick; QEMUTimer *timer; } systick; + MemoryRegion sysregmem; - MemoryRegion gic_iomem_alias; MemoryRegion container; + uint32_t num_irq; + qemu_irq excpout; qemu_irq sysresetreq; -} nvic_state; +} NVICState; #define TYPE_NVIC "armv7m_nvic" -/** - * NVICClass: - * @parent_reset: the parent class' reset handler. - * - * A model of the v7M NVIC and System Controller - */ -typedef struct NVICClass { - /*< private >*/ - ARMGICClass parent_class; - /*< public >*/ - DeviceRealize parent_realize; - void (*parent_reset)(DeviceState *dev); -} NVICClass; - -#define NVIC_CLASS(klass) \ - OBJECT_CLASS_CHECK(NVICClass, (klass), TYPE_NVIC) -#define NVIC_GET_CLASS(obj) \ - OBJECT_GET_CLASS(NVICClass, (obj), TYPE_NVIC) + #define NVIC(obj) \ - OBJECT_CHECK(nvic_state, (obj), TYPE_NVIC) + OBJECT_CHECK(NVICState, (obj), TYPE_NVIC) static const uint8_t nvic_id[] = { 0x00, 0xb0, 0x1b, 0x00, 0x0d, 0xe0, 0x05, 0xb1 @@ -74,7 +117,7 @@ static const uint8_t nvic_id[] = { int system_clock_scale; /* Conversion factor from qemu timer to SysTick frequencies. */ -static inline int64_t systick_scale(nvic_state *s) +static inline int64_t systick_scale(NVICState *s) { if (s->systick.control & SYSTICK_CLKSOURCE) return system_clock_scale; @@ -82,7 +125,7 @@ static inline int64_t systick_scale(nvic_state *s) return 1000; } -static void systick_reload(nvic_state *s, int reset) +static void systick_reload(NVICState *s, int reset) { /* The Cortex-M3 Devices Generic User Guide says that "When the * ENABLE bit is set to 1, the counter loads the RELOAD value from the @@ -101,7 +144,7 @@ static void systick_reload(nvic_state *s, int reset) static void systick_timer_tick(void * opaque) { - nvic_state *s = (nvic_state *)opaque; + NVICState *s = (NVICState *)opaque; s->systick.control |= SYSTICK_COUNTFLAG; if (s->systick.control & SYSTICK_TICKINT) { /* Trigger the interrupt. */ @@ -114,7 +157,7 @@ static void systick_timer_tick(void * opaque) } } -static void systick_reset(nvic_state *s) +static void systick_reset(NVICState *s) { s->systick.control = 0; s->systick.reload = 0; @@ -122,47 +165,351 @@ static void systick_reset(nvic_state *s) timer_del(s->systick.timer); } -/* The external routines use the hardware vector numbering, ie. the first - IRQ is #16. The internal GIC routines use #32 as the first IRQ. */ +static int nvic_pending_prio(NVICState *s) +{ + /* return the priority of the current pending interrupt, + * or NVIC_NOEXC_PRIO if no interrupt is pending + */ + return s->vectpending ? s->vectors[s->vectpending].prio : NVIC_NOEXC_PRIO; +} + +/* Return the value of the ISCR RETTOBASE bit: + * 1 if there is exactly one active exception + * 0 if there is more than one active exception + * UNKNOWN if there are no active exceptions (we choose 1, + * which matches the choice Cortex-M3 is documented as making). + * + * NB: some versions of the documentation talk about this + * counting "active exceptions other than the one shown by IPSR"; + * this is only different in the obscure corner case where guest + * code has manually deactivated an exception and is about + * to fail an exception-return integrity check. The definition + * above is the one from the v8M ARM ARM and is also in line + * with the behaviour documented for the Cortex-M3. + */ +static bool nvic_rettobase(NVICState *s) +{ + int irq, nhand = 0; + + for (irq = ARMV7M_EXCP_RESET; irq < s->num_irq; irq++) { + if (s->vectors[irq].active) { + nhand++; + if (nhand == 2) { + return 0; + } + } + } + + return 1; +} + +/* Return the value of the ISCR ISRPENDING bit: + * 1 if an external interrupt is pending + * 0 if no external interrupt is pending + */ +static bool nvic_isrpending(NVICState *s) +{ + int irq; + + /* We can shortcut if the highest priority pending interrupt + * happens to be external or if there is nothing pending. + */ + if (s->vectpending > NVIC_FIRST_IRQ) { + return true; + } + if (s->vectpending == 0) { + return false; + } + + for (irq = NVIC_FIRST_IRQ; irq < s->num_irq; irq++) { + if (s->vectors[irq].pending) { + return true; + } + } + return false; +} + +/* Return a mask word which clears the subpriority bits from + * a priority value for an M-profile exception, leaving only + * the group priority. + */ +static inline uint32_t nvic_gprio_mask(NVICState *s) +{ + return ~0U << (s->prigroup + 1); +} + +/* Recompute vectpending and exception_prio */ +static void nvic_recompute_state(NVICState *s) +{ + int i; + int pend_prio = NVIC_NOEXC_PRIO; + int active_prio = NVIC_NOEXC_PRIO; + int pend_irq = 0; + + for (i = 1; i < s->num_irq; i++) { + VecInfo *vec = &s->vectors[i]; + + if (vec->enabled && vec->pending && vec->prio < pend_prio) { + pend_prio = vec->prio; + pend_irq = i; + } + if (vec->active && vec->prio < active_prio) { + active_prio = vec->prio; + } + } + + s->vectpending = pend_irq; + s->exception_prio = active_prio & nvic_gprio_mask(s); + + trace_nvic_recompute_state(s->vectpending, s->exception_prio); +} + +/* Return the current execution priority of the CPU + * (equivalent to the pseudocode ExecutionPriority function). + * This is a value between -2 (NMI priority) and NVIC_NOEXC_PRIO. + */ +static inline int nvic_exec_prio(NVICState *s) +{ + CPUARMState *env = &s->cpu->env; + int running; + + if (env->daif & PSTATE_F) { /* FAULTMASK */ + running = -1; + } else if (env->daif & PSTATE_I) { /* PRIMASK */ + running = 0; + } else if (env->v7m.basepri > 0) { + running = env->v7m.basepri & nvic_gprio_mask(s); + } else { + running = NVIC_NOEXC_PRIO; /* lower than any possible priority */ + } + /* consider priority of active handler */ + return MIN(running, s->exception_prio); +} + +bool armv7m_nvic_can_take_pending_exception(void *opaque) +{ + NVICState *s = opaque; + + return nvic_exec_prio(s) > nvic_pending_prio(s); +} + +/* caller must call nvic_irq_update() after this */ +static void set_prio(NVICState *s, unsigned irq, uint8_t prio) +{ + assert(irq > ARMV7M_EXCP_NMI); /* only use for configurable prios */ + assert(irq < s->num_irq); + + s->vectors[irq].prio = prio; + + trace_nvic_set_prio(irq, prio); +} + +/* Recompute state and assert irq line accordingly. + * Must be called after changes to: + * vec->active, vec->enabled, vec->pending or vec->prio for any vector + * prigroup + */ +static void nvic_irq_update(NVICState *s) +{ + int lvl; + int pend_prio; + + nvic_recompute_state(s); + pend_prio = nvic_pending_prio(s); + + /* Raise NVIC output if this IRQ would be taken, except that we + * ignore the effects of the BASEPRI, FAULTMASK and PRIMASK (which + * will be checked for in arm_v7m_cpu_exec_interrupt()); changes + * to those CPU registers don't cause us to recalculate the NVIC + * pending info. + */ + lvl = (pend_prio < s->exception_prio); + trace_nvic_irq_update(s->vectpending, pend_prio, s->exception_prio, lvl); + qemu_set_irq(s->excpout, lvl); +} + +static void armv7m_nvic_clear_pending(void *opaque, int irq) +{ + NVICState *s = (NVICState *)opaque; + VecInfo *vec; + + assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq); + + vec = &s->vectors[irq]; + trace_nvic_clear_pending(irq, vec->enabled, vec->prio); + if (vec->pending) { + vec->pending = 0; + nvic_irq_update(s); + } +} + void armv7m_nvic_set_pending(void *opaque, int irq) { - nvic_state *s = (nvic_state *)opaque; - if (irq >= 16) - irq += 16; - gic_set_pending_private(&s->gic, 0, irq); + NVICState *s = (NVICState *)opaque; + VecInfo *vec; + + assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq); + + vec = &s->vectors[irq]; + trace_nvic_set_pending(irq, vec->enabled, vec->prio); + + + if (irq >= ARMV7M_EXCP_HARD && irq < ARMV7M_EXCP_PENDSV) { + /* If a synchronous exception is pending then it may be + * escalated to HardFault if: + * * it is equal or lower priority to current execution + * * it is disabled + * (ie we need to take it immediately but we can't do so). + * Asynchronous exceptions (and interrupts) simply remain pending. + * + * For QEMU, we don't have any imprecise (asynchronous) faults, + * so we can assume that PREFETCH_ABORT and DATA_ABORT are always + * synchronous. + * Debug exceptions are awkward because only Debug exceptions + * resulting from the BKPT instruction should be escalated, + * but we don't currently implement any Debug exceptions other + * than those that result from BKPT, so we treat all debug exceptions + * as needing escalation. + * + * This all means we can identify whether to escalate based only on + * the exception number and don't (yet) need the caller to explicitly + * tell us whether this exception is synchronous or not. + */ + int running = nvic_exec_prio(s); + bool escalate = false; + + if (vec->prio >= running) { + trace_nvic_escalate_prio(irq, vec->prio, running); + escalate = true; + } else if (!vec->enabled) { + trace_nvic_escalate_disabled(irq); + escalate = true; + } + + if (escalate) { + if (running < 0) { + /* We want to escalate to HardFault but we can't take a + * synchronous HardFault at this point either. This is a + * Lockup condition due to a guest bug. We don't model + * Lockup, so report via cpu_abort() instead. + */ + cpu_abort(&s->cpu->parent_obj, + "Lockup: can't escalate %d to HardFault " + "(current priority %d)\n", irq, running); + } + + /* We can do the escalation, so we take HardFault instead */ + irq = ARMV7M_EXCP_HARD; + vec = &s->vectors[irq]; + s->cpu->env.v7m.hfsr |= R_V7M_HFSR_FORCED_MASK; + } + } + + if (!vec->pending) { + vec->pending = 1; + nvic_irq_update(s); + } } /* Make pending IRQ active. */ -int armv7m_nvic_acknowledge_irq(void *opaque) +void armv7m_nvic_acknowledge_irq(void *opaque) { - nvic_state *s = (nvic_state *)opaque; - uint32_t irq; - - irq = gic_acknowledge_irq(&s->gic, 0, MEMTXATTRS_UNSPECIFIED); - if (irq == 1023) - hw_error("Interrupt but no vector\n"); - if (irq >= 32) - irq -= 16; - return irq; + NVICState *s = (NVICState *)opaque; + CPUARMState *env = &s->cpu->env; + const int pending = s->vectpending; + const int running = nvic_exec_prio(s); + int pendgroupprio; + VecInfo *vec; + + assert(pending > ARMV7M_EXCP_RESET && pending < s->num_irq); + + vec = &s->vectors[pending]; + + assert(vec->enabled); + assert(vec->pending); + + pendgroupprio = vec->prio & nvic_gprio_mask(s); + assert(pendgroupprio < running); + + trace_nvic_acknowledge_irq(pending, vec->prio); + + vec->active = 1; + vec->pending = 0; + + env->v7m.exception = s->vectpending; + + nvic_irq_update(s); } -void armv7m_nvic_complete_irq(void *opaque, int irq) +int armv7m_nvic_complete_irq(void *opaque, int irq) { - nvic_state *s = (nvic_state *)opaque; - if (irq >= 16) - irq += 16; - gic_complete_irq(&s->gic, 0, irq, MEMTXATTRS_UNSPECIFIED); + NVICState *s = (NVICState *)opaque; + VecInfo *vec; + int ret; + + assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq); + + vec = &s->vectors[irq]; + + trace_nvic_complete_irq(irq); + + if (!vec->active) { + /* Tell the caller this was an illegal exception return */ + return -1; + } + + ret = nvic_rettobase(s); + + vec->active = 0; + if (vec->level) { + /* Re-pend the exception if it's still held high; only + * happens for extenal IRQs + */ + assert(irq >= NVIC_FIRST_IRQ); + vec->pending = 1; + } + + nvic_irq_update(s); + + return ret; } -static uint32_t nvic_readl(nvic_state *s, uint32_t offset) +/* callback when external interrupt line is changed */ +static void set_irq_level(void *opaque, int n, int level) +{ + NVICState *s = opaque; + VecInfo *vec; + + n += NVIC_FIRST_IRQ; + + assert(n >= NVIC_FIRST_IRQ && n < s->num_irq); + + trace_nvic_set_irq_level(n, level); + + /* The pending status of an external interrupt is + * latched on rising edge and exception handler return. + * + * Pulsing the IRQ will always run the handler + * once, and the handler will re-run until the + * level is low when the handler completes. + */ + vec = &s->vectors[n]; + if (level != vec->level) { + vec->level = level; + if (level) { + armv7m_nvic_set_pending(s, n); + } + } +} + +static uint32_t nvic_readl(NVICState *s, uint32_t offset) { ARMCPU *cpu = s->cpu; uint32_t val; - int irq; switch (offset) { case 4: /* Interrupt Control Type. */ - return (s->num_irq / 32) - 1; + return ((s->num_irq - NVIC_FIRST_IRQ) / 32) - 1; case 0x10: /* SysTick Control and Status. */ val = s->systick.control; s->systick.control &= ~SYSTICK_COUNTFLAG; @@ -192,38 +539,34 @@ static uint32_t nvic_readl(nvic_state *s, uint32_t offset) case 0xd04: /* Interrupt Control State. */ /* VECTACTIVE */ val = cpu->env.v7m.exception; - if (val == 1023) { - val = 0; - } else if (val >= 32) { - val -= 16; - } /* VECTPENDING */ - if (s->gic.current_pending[0] != 1023) - val |= (s->gic.current_pending[0] << 12); - /* ISRPENDING and RETTOBASE */ - for (irq = 32; irq < s->num_irq; irq++) { - if (s->gic.irq_state[irq].pending) { - val |= (1 << 22); - break; - } - if (irq != cpu->env.v7m.exception && s->gic.irq_state[irq].active) { - val |= (1 << 11); - } + val |= (s->vectpending & 0xff) << 12; + /* ISRPENDING - set if any external IRQ is pending */ + if (nvic_isrpending(s)) { + val |= (1 << 22); + } + /* RETTOBASE - set if only one handler is active */ + if (nvic_rettobase(s)) { + val |= (1 << 11); } /* PENDSTSET */ - if (s->gic.irq_state[ARMV7M_EXCP_SYSTICK].pending) + if (s->vectors[ARMV7M_EXCP_SYSTICK].pending) { val |= (1 << 26); + } /* PENDSVSET */ - if (s->gic.irq_state[ARMV7M_EXCP_PENDSV].pending) + if (s->vectors[ARMV7M_EXCP_PENDSV].pending) { val |= (1 << 28); + } /* NMIPENDSET */ - if (s->gic.irq_state[ARMV7M_EXCP_NMI].pending) + if (s->vectors[ARMV7M_EXCP_NMI].pending) { val |= (1 << 31); + } + /* ISRPREEMPT not implemented */ return val; case 0xd08: /* Vector Table Offset. */ return cpu->env.v7m.vecbase; case 0xd0c: /* Application Interrupt/Reset Control. */ - return 0xfa050000; + return 0xfa050000 | (s->prigroup << 8); case 0xd10: /* System Control. */ /* TODO: Implement SLEEPONEXIT. */ return 0; @@ -231,20 +574,48 @@ static uint32_t nvic_readl(nvic_state *s, uint32_t offset) return cpu->env.v7m.ccr; case 0xd24: /* System Handler Status. */ val = 0; - if (s->gic.irq_state[ARMV7M_EXCP_MEM].active) val |= (1 << 0); - if (s->gic.irq_state[ARMV7M_EXCP_BUS].active) val |= (1 << 1); - if (s->gic.irq_state[ARMV7M_EXCP_USAGE].active) val |= (1 << 3); - if (s->gic.irq_state[ARMV7M_EXCP_SVC].active) val |= (1 << 7); - if (s->gic.irq_state[ARMV7M_EXCP_DEBUG].active) val |= (1 << 8); - if (s->gic.irq_state[ARMV7M_EXCP_PENDSV].active) val |= (1 << 10); - if (s->gic.irq_state[ARMV7M_EXCP_SYSTICK].active) val |= (1 << 11); - if (s->gic.irq_state[ARMV7M_EXCP_USAGE].pending) val |= (1 << 12); - if (s->gic.irq_state[ARMV7M_EXCP_MEM].pending) val |= (1 << 13); - if (s->gic.irq_state[ARMV7M_EXCP_BUS].pending) val |= (1 << 14); - if (s->gic.irq_state[ARMV7M_EXCP_SVC].pending) val |= (1 << 15); - if (s->gic.irq_state[ARMV7M_EXCP_MEM].enabled) val |= (1 << 16); - if (s->gic.irq_state[ARMV7M_EXCP_BUS].enabled) val |= (1 << 17); - if (s->gic.irq_state[ARMV7M_EXCP_USAGE].enabled) val |= (1 << 18); + if (s->vectors[ARMV7M_EXCP_MEM].active) { + val |= (1 << 0); + } + if (s->vectors[ARMV7M_EXCP_BUS].active) { + val |= (1 << 1); + } + if (s->vectors[ARMV7M_EXCP_USAGE].active) { + val |= (1 << 3); + } + if (s->vectors[ARMV7M_EXCP_SVC].active) { + val |= (1 << 7); + } + if (s->vectors[ARMV7M_EXCP_DEBUG].active) { + val |= (1 << 8); + } + if (s->vectors[ARMV7M_EXCP_PENDSV].active) { + val |= (1 << 10); + } + if (s->vectors[ARMV7M_EXCP_SYSTICK].active) { + val |= (1 << 11); + } + if (s->vectors[ARMV7M_EXCP_USAGE].pending) { + val |= (1 << 12); + } + if (s->vectors[ARMV7M_EXCP_MEM].pending) { + val |= (1 << 13); + } + if (s->vectors[ARMV7M_EXCP_BUS].pending) { + val |= (1 << 14); + } + if (s->vectors[ARMV7M_EXCP_SVC].pending) { + val |= (1 << 15); + } + if (s->vectors[ARMV7M_EXCP_MEM].enabled) { + val |= (1 << 16); + } + if (s->vectors[ARMV7M_EXCP_BUS].enabled) { + val |= (1 << 17); + } + if (s->vectors[ARMV7M_EXCP_USAGE].enabled) { + val |= (1 << 18); + } return val; case 0xd28: /* Configurable Fault Status. */ return cpu->env.v7m.cfsr; @@ -294,7 +665,7 @@ static uint32_t nvic_readl(nvic_state *s, uint32_t offset) } } -static void nvic_writel(nvic_state *s, uint32_t offset, uint32_t value) +static void nvic_writel(NVICState *s, uint32_t offset, uint32_t value) { ARMCPU *cpu = s->cpu; uint32_t oldval; @@ -338,14 +709,12 @@ static void nvic_writel(nvic_state *s, uint32_t offset, uint32_t value) if (value & (1 << 28)) { armv7m_nvic_set_pending(s, ARMV7M_EXCP_PENDSV); } else if (value & (1 << 27)) { - s->gic.irq_state[ARMV7M_EXCP_PENDSV].pending = 0; - gic_update(&s->gic); + armv7m_nvic_clear_pending(s, ARMV7M_EXCP_PENDSV); } if (value & (1 << 26)) { armv7m_nvic_set_pending(s, ARMV7M_EXCP_SYSTICK); } else if (value & (1 << 25)) { - s->gic.irq_state[ARMV7M_EXCP_SYSTICK].pending = 0; - gic_update(&s->gic); + armv7m_nvic_clear_pending(s, ARMV7M_EXCP_SYSTICK); } break; case 0xd08: /* Vector Table Offset. */ @@ -357,14 +726,17 @@ static void nvic_writel(nvic_state *s, uint32_t offset, uint32_t value) qemu_irq_pulse(s->sysresetreq); } if (value & 2) { - qemu_log_mask(LOG_UNIMP, "VECTCLRACTIVE unimplemented\n"); + qemu_log_mask(LOG_GUEST_ERROR, + "Setting VECTCLRACTIVE when not in DEBUG mode " + "is UNPREDICTABLE\n"); } if (value & 1) { - qemu_log_mask(LOG_UNIMP, "AIRCR system reset unimplemented\n"); - } - if (value & 0x700) { - qemu_log_mask(LOG_UNIMP, "PRIGROUP unimplemented\n"); + qemu_log_mask(LOG_GUEST_ERROR, + "Setting VECTRESET when not in DEBUG mode " + "is UNPREDICTABLE\n"); } + s->prigroup = extract32(value, 8, 3); + nvic_irq_update(s); } break; case 0xd10: /* System Control. */ @@ -383,11 +755,21 @@ static void nvic_writel(nvic_state *s, uint32_t offset, uint32_t value) cpu->env.v7m.ccr = value; break; case 0xd24: /* System Handler Control. */ - /* TODO: Real hardware allows you to set/clear the active bits - under some circumstances. We don't implement this. */ - s->gic.irq_state[ARMV7M_EXCP_MEM].enabled = (value & (1 << 16)) != 0; - s->gic.irq_state[ARMV7M_EXCP_BUS].enabled = (value & (1 << 17)) != 0; - s->gic.irq_state[ARMV7M_EXCP_USAGE].enabled = (value & (1 << 18)) != 0; + s->vectors[ARMV7M_EXCP_MEM].active = (value & (1 << 0)) != 0; + s->vectors[ARMV7M_EXCP_BUS].active = (value & (1 << 1)) != 0; + s->vectors[ARMV7M_EXCP_USAGE].active = (value & (1 << 3)) != 0; + s->vectors[ARMV7M_EXCP_SVC].active = (value & (1 << 7)) != 0; + s->vectors[ARMV7M_EXCP_DEBUG].active = (value & (1 << 8)) != 0; + s->vectors[ARMV7M_EXCP_PENDSV].active = (value & (1 << 10)) != 0; + s->vectors[ARMV7M_EXCP_SYSTICK].active = (value & (1 << 11)) != 0; + s->vectors[ARMV7M_EXCP_USAGE].pending = (value & (1 << 12)) != 0; + s->vectors[ARMV7M_EXCP_MEM].pending = (value & (1 << 13)) != 0; + s->vectors[ARMV7M_EXCP_BUS].pending = (value & (1 << 14)) != 0; + s->vectors[ARMV7M_EXCP_SVC].pending = (value & (1 << 15)) != 0; + s->vectors[ARMV7M_EXCP_MEM].enabled = (value & (1 << 16)) != 0; + s->vectors[ARMV7M_EXCP_BUS].enabled = (value & (1 << 17)) != 0; + s->vectors[ARMV7M_EXCP_USAGE].enabled = (value & (1 << 18)) != 0; + nvic_irq_update(s); break; case 0xd28: /* Configurable Fault Status. */ cpu->env.v7m.cfsr &= ~value; /* W1C */ @@ -409,13 +791,16 @@ static void nvic_writel(nvic_state *s, uint32_t offset, uint32_t value) "NVIC: Aux fault status registers unimplemented\n"); break; case 0xf00: /* Software Triggered Interrupt Register */ + { /* user mode can only write to STIR if CCR.USERSETMPEND permits it */ - if ((value & 0x1ff) < s->num_irq && + int excnum = (value & 0x1ff) + NVIC_FIRST_IRQ; + if (excnum < s->num_irq && (arm_current_el(&cpu->env) || (cpu->env.v7m.ccr & R_V7M_CCR_USERSETMPEND_MASK))) { - gic_set_pending_private(&s->gic, 0, value & 0x1ff); + armv7m_nvic_set_pending(s, excnum); } break; + } default: qemu_log_mask(LOG_GUEST_ERROR, "NVIC: Bad write offset 0x%x\n", offset); @@ -425,46 +810,142 @@ static void nvic_writel(nvic_state *s, uint32_t offset, uint32_t value) static uint64_t nvic_sysreg_read(void *opaque, hwaddr addr, unsigned size) { - nvic_state *s = (nvic_state *)opaque; + NVICState *s = (NVICState *)opaque; uint32_t offset = addr; - int i; + unsigned i, startvec, end; uint32_t val; switch (offset) { + /* reads of set and clear both return the status */ + case 0x100 ... 0x13f: /* NVIC Set enable */ + offset += 0x80; + /* fall through */ + case 0x180 ... 0x1bf: /* NVIC Clear enable */ + val = 0; + startvec = offset - 0x180 + NVIC_FIRST_IRQ; /* vector # */ + + for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) { + if (s->vectors[startvec + i].enabled) { + val |= (1 << i); + } + } + break; + case 0x200 ... 0x23f: /* NVIC Set pend */ + offset += 0x80; + /* fall through */ + case 0x280 ... 0x2bf: /* NVIC Clear pend */ + val = 0; + startvec = offset - 0x280 + NVIC_FIRST_IRQ; /* vector # */ + for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) { + if (s->vectors[startvec + i].pending) { + val |= (1 << i); + } + } + break; + case 0x300 ... 0x33f: /* NVIC Active */ + val = 0; + startvec = offset - 0x300 + NVIC_FIRST_IRQ; /* vector # */ + + for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) { + if (s->vectors[startvec + i].active) { + val |= (1 << i); + } + } + break; + case 0x400 ... 0x5ef: /* NVIC Priority */ + val = 0; + startvec = offset - 0x400 + NVIC_FIRST_IRQ; /* vector # */ + + for (i = 0; i < size && startvec + i < s->num_irq; i++) { + val |= s->vectors[startvec + i].prio << (8 * i); + } + break; case 0xd18 ... 0xd23: /* System Handler Priority. */ val = 0; for (i = 0; i < size; i++) { - val |= s->gic.priority1[(offset - 0xd14) + i][0] << (i * 8); + val |= s->vectors[(offset - 0xd14) + i].prio << (i * 8); } - return val; + break; case 0xfe0 ... 0xfff: /* ID. */ if (offset & 3) { - return 0; + val = 0; + } else { + val = nvic_id[(offset - 0xfe0) >> 2]; + } + break; + default: + if (size == 4) { + val = nvic_readl(s, offset); + } else { + qemu_log_mask(LOG_GUEST_ERROR, + "NVIC: Bad read of size %d at offset 0x%x\n", + size, offset); + val = 0; } - return nvic_id[(offset - 0xfe0) >> 2]; - } - if (size == 4) { - return nvic_readl(s, offset); } - qemu_log_mask(LOG_GUEST_ERROR, - "NVIC: Bad read of size %d at offset 0x%x\n", size, offset); - return 0; + + trace_nvic_sysreg_read(addr, val, size); + return val; } static void nvic_sysreg_write(void *opaque, hwaddr addr, uint64_t value, unsigned size) { - nvic_state *s = (nvic_state *)opaque; + NVICState *s = (NVICState *)opaque; uint32_t offset = addr; - int i; + unsigned i, startvec, end; + unsigned setval = 0; + + trace_nvic_sysreg_write(addr, value, size); switch (offset) { + case 0x100 ... 0x13f: /* NVIC Set enable */ + offset += 0x80; + setval = 1; + /* fall through */ + case 0x180 ... 0x1bf: /* NVIC Clear enable */ + startvec = 8 * (offset - 0x180) + NVIC_FIRST_IRQ; + + for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) { + if (value & (1 << i)) { + s->vectors[startvec + i].enabled = setval; + } + } + nvic_irq_update(s); + return; + case 0x200 ... 0x23f: /* NVIC Set pend */ + /* the special logic in armv7m_nvic_set_pending() + * is not needed since IRQs are never escalated + */ + offset += 0x80; + setval = 1; + /* fall through */ + case 0x280 ... 0x2bf: /* NVIC Clear pend */ + startvec = 8 * (offset - 0x280) + NVIC_FIRST_IRQ; /* vector # */ + + for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) { + if (value & (1 << i)) { + s->vectors[startvec + i].pending = setval; + } + } + nvic_irq_update(s); + return; + case 0x300 ... 0x33f: /* NVIC Active */ + return; /* R/O */ + case 0x400 ... 0x5ef: /* NVIC Priority */ + startvec = 8 * (offset - 0x400) + NVIC_FIRST_IRQ; /* vector # */ + + for (i = 0; i < size && startvec + i < s->num_irq; i++) { + set_prio(s, startvec + i, (value >> (i * 8)) & 0xff); + } + nvic_irq_update(s); + return; case 0xd18 ... 0xd23: /* System Handler Priority. */ for (i = 0; i < size; i++) { - s->gic.priority1[(offset - 0xd14) + i][0] = - (value >> (i * 8)) & 0xff; + unsigned hdlidx = (offset - 0xd14) + i; + set_prio(s, hdlidx, (value >> (i * 8)) & 0xff); } - gic_update(&s->gic); + nvic_irq_update(s); return; } if (size == 4) { @@ -481,61 +962,125 @@ static const MemoryRegionOps nvic_sysreg_ops = { .endianness = DEVICE_NATIVE_ENDIAN, }; -static const VMStateDescription vmstate_nvic = { - .name = "armv7m_nvic", +static int nvic_post_load(void *opaque, int version_id) +{ + NVICState *s = opaque; + unsigned i; + + /* Check for out of range priority settings */ + if (s->vectors[ARMV7M_EXCP_RESET].prio != -3 || + s->vectors[ARMV7M_EXCP_NMI].prio != -2 || + s->vectors[ARMV7M_EXCP_HARD].prio != -1) { + return 1; + } + for (i = ARMV7M_EXCP_MEM; i < s->num_irq; i++) { + if (s->vectors[i].prio & ~0xff) { + return 1; + } + } + + nvic_recompute_state(s); + + return 0; +} + +static const VMStateDescription vmstate_VecInfo = { + .name = "armv7m_nvic_info", .version_id = 1, .minimum_version_id = 1, .fields = (VMStateField[]) { - VMSTATE_UINT32(systick.control, nvic_state), - VMSTATE_UINT32(systick.reload, nvic_state), - VMSTATE_INT64(systick.tick, nvic_state), - VMSTATE_TIMER_PTR(systick.timer, nvic_state), + VMSTATE_INT16(prio, VecInfo), + VMSTATE_UINT8(enabled, VecInfo), + VMSTATE_UINT8(pending, VecInfo), + VMSTATE_UINT8(active, VecInfo), + VMSTATE_UINT8(level, VecInfo), VMSTATE_END_OF_LIST() } }; +static const VMStateDescription vmstate_nvic = { + .name = "armv7m_nvic", + .version_id = 3, + .minimum_version_id = 3, + .post_load = &nvic_post_load, + .fields = (VMStateField[]) { + VMSTATE_STRUCT_ARRAY(vectors, NVICState, NVIC_MAX_VECTORS, 1, + vmstate_VecInfo, VecInfo), + VMSTATE_UINT32(systick.control, NVICState), + VMSTATE_UINT32(systick.reload, NVICState), + VMSTATE_INT64(systick.tick, NVICState), + VMSTATE_TIMER_PTR(systick.timer, NVICState), + VMSTATE_UINT32(prigroup, NVICState), + VMSTATE_END_OF_LIST() + } +}; + +static Property props_nvic[] = { + /* Number of external IRQ lines (so excluding the 16 internal exceptions) */ + DEFINE_PROP_UINT32("num-irq", NVICState, num_irq, 64), + DEFINE_PROP_END_OF_LIST() +}; + static void armv7m_nvic_reset(DeviceState *dev) { - nvic_state *s = NVIC(dev); - NVICClass *nc = NVIC_GET_CLASS(s); - nc->parent_reset(dev); - /* Common GIC reset resets to disabled; the NVIC doesn't have - * per-CPU interfaces so mark our non-existent CPU interface - * as enabled by default, and with a priority mask which allows - * all interrupts through. + NVICState *s = NVIC(dev); + + s->vectors[ARMV7M_EXCP_NMI].enabled = 1; + s->vectors[ARMV7M_EXCP_HARD].enabled = 1; + /* MEM, BUS, and USAGE are enabled through + * the System Handler Control register + */ + s->vectors[ARMV7M_EXCP_SVC].enabled = 1; + s->vectors[ARMV7M_EXCP_DEBUG].enabled = 1; + s->vectors[ARMV7M_EXCP_PENDSV].enabled = 1; + s->vectors[ARMV7M_EXCP_SYSTICK].enabled = 1; + + s->vectors[ARMV7M_EXCP_RESET].prio = -3; + s->vectors[ARMV7M_EXCP_NMI].prio = -2; + s->vectors[ARMV7M_EXCP_HARD].prio = -1; + + /* Strictly speaking the reset handler should be enabled. + * However, we don't simulate soft resets through the NVIC, + * and the reset vector should never be pended. + * So we leave it disabled to catch logic errors. */ - s->gic.cpu_ctlr[0] = GICC_CTLR_EN_GRP0; - s->gic.priority_mask[0] = 0x100; - /* The NVIC as a whole is always enabled. */ - s->gic.ctlr = 1; + + s->exception_prio = NVIC_NOEXC_PRIO; + s->vectpending = 0; + systick_reset(s); } static void armv7m_nvic_realize(DeviceState *dev, Error **errp) { - nvic_state *s = NVIC(dev); - NVICClass *nc = NVIC_GET_CLASS(s); - Error *local_err = NULL; + NVICState *s = NVIC(dev); s->cpu = ARM_CPU(qemu_get_cpu(0)); assert(s->cpu); - /* The NVIC always has only one CPU */ - s->gic.num_cpu = 1; - /* Tell the common code we're an NVIC */ - s->gic.revision = 0xffffffff; - s->num_irq = s->gic.num_irq; - nc->parent_realize(dev, &local_err); - if (local_err) { - error_propagate(errp, local_err); + + if (s->num_irq > NVIC_MAX_IRQ) { + error_setg(errp, "num-irq %d exceeds NVIC maximum", s->num_irq); return; } - gic_init_irqs_and_distributor(&s->gic); - /* The NVIC and system controller register area looks like this: - * 0..0xff : system control registers, including systick - * 0x100..0xcff : GIC-like registers - * 0xd00..0xfff : system control registers - * We use overlaying to put the GIC like registers - * over the top of the system control register region. + + qdev_init_gpio_in(dev, set_irq_level, s->num_irq); + + /* include space for internal exception vectors */ + s->num_irq += NVIC_FIRST_IRQ; + + /* The NVIC and System Control Space (SCS) starts at 0xe000e000 + * and looks like this: + * 0x004 - ICTR + * 0x010 - 0x1c - systick + * 0x100..0x7ec - NVIC + * 0x7f0..0xcff - Reserved + * 0xd00..0xd3c - SCS registers + * 0xd40..0xeff - Reserved or Not implemented + * 0xf00 - STIR + * + * At the moment there is only one thing in the container region, + * but we leave it in place to allow us to pull systick out into + * its own device object later. */ memory_region_init(&s->container, OBJECT(s), "nvic", 0x1000); /* The system register region goes at the bottom of the priority @@ -544,14 +1089,7 @@ static void armv7m_nvic_realize(DeviceState *dev, Error **errp) memory_region_init_io(&s->sysregmem, OBJECT(s), &nvic_sysreg_ops, s, "nvic_sysregs", 0x1000); memory_region_add_subregion(&s->container, 0, &s->sysregmem); - /* Alias the GIC region so we can get only the section of it - * we need, and layer it on top of the system register region. - */ - memory_region_init_alias(&s->gic_iomem_alias, OBJECT(s), - "nvic-gic", &s->gic.iomem, - 0x100, 0xc00); - memory_region_add_subregion_overlap(&s->container, 0x100, - &s->gic_iomem_alias, 1); + /* Map the whole thing into system memory at the location required * by the v7M architecture. */ @@ -567,36 +1105,31 @@ static void armv7m_nvic_instance_init(Object *obj) * any user-specified property setting, so just modify the * value in the GICState struct. */ - GICState *s = ARM_GIC_COMMON(obj); DeviceState *dev = DEVICE(obj); - nvic_state *nvic = NVIC(obj); - /* The ARM v7m may have anything from 0 to 496 external interrupt - * IRQ lines. We default to 64. Other boards may differ and should - * set the num-irq property appropriately. - */ - s->num_irq = 64; + NVICState *nvic = NVIC(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + + sysbus_init_irq(sbd, &nvic->excpout); qdev_init_gpio_out_named(dev, &nvic->sysresetreq, "SYSRESETREQ", 1); } static void armv7m_nvic_class_init(ObjectClass *klass, void *data) { - NVICClass *nc = NVIC_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass); - nc->parent_reset = dc->reset; - nc->parent_realize = dc->realize; dc->vmsd = &vmstate_nvic; + dc->props = props_nvic; dc->reset = armv7m_nvic_reset; dc->realize = armv7m_nvic_realize; } static const TypeInfo armv7m_nvic_info = { .name = TYPE_NVIC, - .parent = TYPE_ARM_GIC_COMMON, + .parent = TYPE_SYS_BUS_DEVICE, .instance_init = armv7m_nvic_instance_init, - .instance_size = sizeof(nvic_state), + .instance_size = sizeof(NVICState), .class_init = armv7m_nvic_class_init, - .class_size = sizeof(NVICClass), + .class_size = sizeof(SysBusDeviceClass), }; static void armv7m_nvic_register_types(void) diff --git a/hw/intc/gic_internal.h b/hw/intc/gic_internal.h index 3f311740da..7fe87b13de 100644 --- a/hw/intc/gic_internal.h +++ b/hw/intc/gic_internal.h @@ -25,9 +25,7 @@ #define ALL_CPU_MASK ((unsigned)(((1 << GIC_NCPU) - 1))) -/* The NVIC has 16 internal vectors. However these are not exposed - through the normal GIC interface. */ -#define GIC_BASE_IRQ ((s->revision == REV_NVIC) ? 32 : 0) +#define GIC_BASE_IRQ 0 #define GIC_SET_ENABLED(irq, cm) s->irq_state[irq].enabled |= (cm) #define GIC_CLEAR_ENABLED(irq, cm) s->irq_state[irq].enabled &= ~(cm) @@ -75,7 +73,6 @@ /* The special cases for the revision property: */ #define REV_11MPCORE 0 -#define REV_NVIC 0xffffffff void gic_set_pending_private(GICState *s, int cpu, int irq); uint32_t gic_acknowledge_irq(GICState *s, int cpu, MemTxAttrs attrs); @@ -87,7 +84,7 @@ void gic_set_priority(GICState *s, int cpu, int irq, uint8_t val, static inline bool gic_test_pending(GICState *s, int irq, int cm) { - if (s->revision == REV_NVIC || s->revision == REV_11MPCORE) { + if (s->revision == REV_11MPCORE) { return s->irq_state[irq].pending & cm; } else { /* Edge-triggered interrupts are marked pending on a rising edge, but diff --git a/hw/intc/trace-events b/hw/intc/trace-events index 39a538d048..729c1288f1 100644 --- a/hw/intc/trace-events +++ b/hw/intc/trace-events @@ -161,3 +161,18 @@ gicv3_redist_write(uint32_t cpu, uint64_t offset, uint64_t data, unsigned size, gicv3_redist_badwrite(uint32_t cpu, uint64_t offset, uint64_t data, unsigned size, bool secure) "GICv3 redistributor %x write: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u secure %d: error" gicv3_redist_set_irq(uint32_t cpu, int irq, int level) "GICv3 redistributor %x interrupt %d level changed to %d" gicv3_redist_send_sgi(uint32_t cpu, int irq) "GICv3 redistributor %x pending SGI %d" + +# hw/intc/armv7m_nvic.c +nvic_recompute_state(int vectpending, int exception_prio) "NVIC state recomputed: vectpending %d exception_prio %d" +nvic_set_prio(int irq, uint8_t prio) "NVIC set irq %d priority %d" +nvic_irq_update(int vectpending, int pendprio, int exception_prio, int level) "NVIC vectpending %d pending prio %d exception_prio %d: setting irq line to %d" +nvic_escalate_prio(int irq, int irqprio, int runprio) "NVIC escalating irq %d to HardFault: insufficient priority %d >= %d" +nvic_escalate_disabled(int irq) "NVIC escalating irq %d to HardFault: disabled" +nvic_set_pending(int irq, int en, int prio) "NVIC set pending irq %d (enabled: %d priority %d)" +nvic_clear_pending(int irq, int en, int prio) "NVIC clear pending irq %d (enabled: %d priority %d)" +nvic_set_pending_level(int irq) "NVIC set pending: irq %d higher prio than vectpending: setting irq line to 1" +nvic_acknowledge_irq(int irq, int prio) "NVIC acknowledge IRQ: %d now active (prio %d)" +nvic_complete_irq(int irq) "NVIC complete IRQ %d" +nvic_set_irq_level(int irq, int level) "NVIC external irq %d level set to %d" +nvic_sysreg_read(uint64_t addr, uint32_t value, unsigned size) "NVIC sysreg read addr 0x%" PRIx64 " data 0x%" PRIx32 " size %u" +nvic_sysreg_write(uint64_t addr, uint32_t value, unsigned size) "NVIC sysreg write addr 0x%" PRIx64 " data 0x%" PRIx32 " size %u" diff --git a/hw/misc/Makefile.objs b/hw/misc/Makefile.objs index 898e4ccfb1..c8b489390f 100644 --- a/hw/misc/Makefile.objs +++ b/hw/misc/Makefile.objs @@ -26,7 +26,7 @@ obj-$(CONFIG_IVSHMEM) += ivshmem.o obj-$(CONFIG_REALVIEW) += arm_sysctl.o obj-$(CONFIG_NSERIES) += cbus.o obj-$(CONFIG_ECCMEMCTL) += eccmemctl.o -obj-$(CONFIG_EXYNOS4) += exynos4210_pmu.o +obj-$(CONFIG_EXYNOS4) += exynos4210_pmu.o exynos4210_clk.o obj-$(CONFIG_IMX) += imx_ccm.o obj-$(CONFIG_IMX) += imx31_ccm.o obj-$(CONFIG_IMX) += imx25_ccm.o @@ -42,6 +42,7 @@ obj-$(CONFIG_OMAP) += omap_sdrc.o obj-$(CONFIG_OMAP) += omap_tap.o obj-$(CONFIG_RASPI) += bcm2835_mbox.o obj-$(CONFIG_RASPI) += bcm2835_property.o +obj-$(CONFIG_RASPI) += bcm2835_rng.o obj-$(CONFIG_SLAVIO) += slavio_misc.o obj-$(CONFIG_ZYNQ) += zynq_slcr.o obj-$(CONFIG_ZYNQ) += zynq-xadc.o diff --git a/hw/misc/bcm2835_rng.c b/hw/misc/bcm2835_rng.c new file mode 100644 index 0000000000..4d62143b24 --- /dev/null +++ b/hw/misc/bcm2835_rng.c @@ -0,0 +1,149 @@ +/* + * BCM2835 Random Number Generator emulation + * + * Copyright (C) 2017 Marcin Chojnacki <marcinch7@gmail.com> + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qapi/error.h" +#include "crypto/random.h" +#include "hw/misc/bcm2835_rng.h" + +static uint32_t get_random_bytes(void) +{ + uint32_t res; + Error *err = NULL; + + if (qcrypto_random_bytes((uint8_t *)&res, sizeof(res), &err) < 0) { + /* On failure we don't want to return the guest a non-random + * value in case they're really using it for cryptographic + * purposes, so the best we can do is die here. + * This shouldn't happen unless something's broken. + * In theory we could implement this device's full FIFO + * and interrupt semantics and then just stop filling the + * FIFO. That's a lot of work, though, so we assume any + * errors are systematic problems and trust that if we didn't + * fail as the guest inited then we won't fail later on + * mid-run. + */ + error_report_err(err); + exit(1); + } + return res; +} + +static uint64_t bcm2835_rng_read(void *opaque, hwaddr offset, + unsigned size) +{ + BCM2835RngState *s = (BCM2835RngState *)opaque; + uint32_t res = 0; + + assert(size == 4); + + switch (offset) { + case 0x0: /* rng_ctrl */ + res = s->rng_ctrl; + break; + case 0x4: /* rng_status */ + res = s->rng_status | (1 << 24); + break; + case 0x8: /* rng_data */ + res = get_random_bytes(); + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, + "bcm2835_rng_read: Bad offset %x\n", + (int)offset); + res = 0; + break; + } + + return res; +} + +static void bcm2835_rng_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + BCM2835RngState *s = (BCM2835RngState *)opaque; + + assert(size == 4); + + switch (offset) { + case 0x0: /* rng_ctrl */ + s->rng_ctrl = value; + break; + case 0x4: /* rng_status */ + /* we shouldn't let the guest write to bits [31..20] */ + s->rng_status &= ~0xFFFFF; /* clear 20 lower bits */ + s->rng_status |= value & 0xFFFFF; /* set them to new value */ + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, + "bcm2835_rng_write: Bad offset %x\n", + (int)offset); + break; + } +} + +static const MemoryRegionOps bcm2835_rng_ops = { + .read = bcm2835_rng_read, + .write = bcm2835_rng_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static const VMStateDescription vmstate_bcm2835_rng = { + .name = TYPE_BCM2835_RNG, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(rng_ctrl, BCM2835RngState), + VMSTATE_UINT32(rng_status, BCM2835RngState), + VMSTATE_END_OF_LIST() + } +}; + +static void bcm2835_rng_init(Object *obj) +{ + BCM2835RngState *s = BCM2835_RNG(obj); + + memory_region_init_io(&s->iomem, obj, &bcm2835_rng_ops, s, + TYPE_BCM2835_RNG, 0x10); + sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->iomem); +} + +static void bcm2835_rng_reset(DeviceState *dev) +{ + BCM2835RngState *s = BCM2835_RNG(dev); + + s->rng_ctrl = 0; + s->rng_status = 0; +} + +static void bcm2835_rng_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = bcm2835_rng_reset; + dc->vmsd = &vmstate_bcm2835_rng; +} + +static TypeInfo bcm2835_rng_info = { + .name = TYPE_BCM2835_RNG, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(BCM2835RngState), + .class_init = bcm2835_rng_class_init, + .instance_init = bcm2835_rng_init, +}; + +static void bcm2835_rng_register_types(void) +{ + type_register_static(&bcm2835_rng_info); +} + +type_init(bcm2835_rng_register_types) diff --git a/hw/misc/exynos4210_clk.c b/hw/misc/exynos4210_clk.c new file mode 100644 index 0000000000..81862c0ada --- /dev/null +++ b/hw/misc/exynos4210_clk.c @@ -0,0 +1,164 @@ +/* + * Exynos4210 Clock Controller Emulation + * + * Copyright (c) 2017 Krzysztof Kozlowski <krzk@kernel.org> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see <http://www.gnu.org/licenses/>. + */ + +#include "qemu/osdep.h" +#include "hw/sysbus.h" +#include "qemu/log.h" + +#define TYPE_EXYNOS4210_CLK "exynos4210.clk" +#define EXYNOS4210_CLK(obj) \ + OBJECT_CHECK(Exynos4210ClkState, (obj), TYPE_EXYNOS4210_CLK) + +#define CLK_PLL_LOCKED BIT(29) + +#define EXYNOS4210_CLK_REGS_MEM_SIZE 0x15104 + +typedef struct Exynos4210Reg { + const char *name; /* for debug only */ + uint32_t offset; + uint32_t reset_value; +} Exynos4210Reg; + +/* Clock controller register base: 0x10030000 */ +static const Exynos4210Reg exynos4210_clk_regs[] = { + {"EPLL_LOCK", 0xc010, 0x00000fff}, + {"VPLL_LOCK", 0xc020, 0x00000fff}, + {"EPLL_CON0", 0xc110, 0x00300301 | CLK_PLL_LOCKED}, + {"EPLL_CON1", 0xc114, 0x00000000}, + {"VPLL_CON0", 0xc120, 0x00240201 | CLK_PLL_LOCKED}, + {"VPLL_CON1", 0xc124, 0x66010464}, + {"APLL_LOCK", 0x14000, 0x00000fff}, + {"MPLL_LOCK", 0x14004, 0x00000fff}, + {"APLL_CON0", 0x14100, 0x00c80601 | CLK_PLL_LOCKED}, + {"APLL_CON1", 0x14104, 0x0000001c}, + {"MPLL_CON0", 0x14108, 0x00c80601 | CLK_PLL_LOCKED}, + {"MPLL_CON1", 0x1410c, 0x0000001c}, +}; + +#define EXYNOS4210_REGS_NUM ARRAY_SIZE(exynos4210_clk_regs) + +typedef struct Exynos4210ClkState { + SysBusDevice parent_obj; + + MemoryRegion iomem; + uint32_t reg[EXYNOS4210_REGS_NUM]; +} Exynos4210ClkState; + +static uint64_t exynos4210_clk_read(void *opaque, hwaddr offset, + unsigned size) +{ + const Exynos4210ClkState *s = (Exynos4210ClkState *)opaque; + const Exynos4210Reg *regs = exynos4210_clk_regs; + unsigned int i; + + for (i = 0; i < EXYNOS4210_REGS_NUM; i++) { + if (regs->offset == offset) { + return s->reg[i]; + } + regs++; + } + qemu_log_mask(LOG_GUEST_ERROR, "%s: bad read offset 0x%04x\n", + __func__, (uint32_t)offset); + return 0; +} + +static void exynos4210_clk_write(void *opaque, hwaddr offset, + uint64_t val, unsigned size) +{ + Exynos4210ClkState *s = (Exynos4210ClkState *)opaque; + const Exynos4210Reg *regs = exynos4210_clk_regs; + unsigned int i; + + for (i = 0; i < EXYNOS4210_REGS_NUM; i++) { + if (regs->offset == offset) { + s->reg[i] = val; + return; + } + regs++; + } + qemu_log_mask(LOG_GUEST_ERROR, "%s: bad write offset 0x%04x\n", + __func__, (uint32_t)offset); +} + +static const MemoryRegionOps exynos4210_clk_ops = { + .read = exynos4210_clk_read, + .write = exynos4210_clk_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + .unaligned = false + } +}; + +static void exynos4210_clk_reset(DeviceState *dev) +{ + Exynos4210ClkState *s = EXYNOS4210_CLK(dev); + unsigned int i; + + /* Set default values for registers */ + for (i = 0; i < EXYNOS4210_REGS_NUM; i++) { + s->reg[i] = exynos4210_clk_regs[i].reset_value; + } +} + +static void exynos4210_clk_init(Object *obj) +{ + Exynos4210ClkState *s = EXYNOS4210_CLK(obj); + SysBusDevice *dev = SYS_BUS_DEVICE(obj); + + /* memory mapping */ + memory_region_init_io(&s->iomem, obj, &exynos4210_clk_ops, s, + TYPE_EXYNOS4210_CLK, EXYNOS4210_CLK_REGS_MEM_SIZE); + sysbus_init_mmio(dev, &s->iomem); +} + +static const VMStateDescription exynos4210_clk_vmstate = { + .name = TYPE_EXYNOS4210_CLK, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(reg, Exynos4210ClkState, EXYNOS4210_REGS_NUM), + VMSTATE_END_OF_LIST() + } +}; + +static void exynos4210_clk_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = exynos4210_clk_reset; + dc->vmsd = &exynos4210_clk_vmstate; +} + +static const TypeInfo exynos4210_clk_info = { + .name = TYPE_EXYNOS4210_CLK, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(Exynos4210ClkState), + .instance_init = exynos4210_clk_init, + .class_init = exynos4210_clk_class_init, +}; + +static void exynos4210_clk_register(void) +{ + qemu_log_mask(LOG_GUEST_ERROR, "Clock init\n"); + type_register_static(&exynos4210_clk_info); +} + +type_init(exynos4210_clk_register) diff --git a/hw/net/cadence_gem.c b/hw/net/cadence_gem.c index e99d4544a2..d4de8ad9f1 100644 --- a/hw/net/cadence_gem.c +++ b/hw/net/cadence_gem.c @@ -508,7 +508,7 @@ static void gem_update_int_status(CadenceGEMState *s) if ((s->num_priority_queues == 1) && s->regs[GEM_ISR]) { /* No priority queues, just trigger the interrupt */ - DB_PRINT("asserting int.\n", i); + DB_PRINT("asserting int.\n"); qemu_set_irq(s->irq[0], 1); return; } diff --git a/hw/sd/Makefile.objs b/hw/sd/Makefile.objs index 31c83308f2..c2b7664264 100644 --- a/hw/sd/Makefile.objs +++ b/hw/sd/Makefile.objs @@ -6,3 +6,4 @@ common-obj-$(CONFIG_SDHCI) += sdhci.o obj-$(CONFIG_MILKYMIST) += milkymist-memcard.o obj-$(CONFIG_OMAP) += omap_mmc.o obj-$(CONFIG_PXA2XX) += pxa2xx_mmci.o +obj-$(CONFIG_RASPI) += bcm2835_sdhost.o diff --git a/hw/sd/bcm2835_sdhost.c b/hw/sd/bcm2835_sdhost.c new file mode 100644 index 0000000000..f7f4e656df --- /dev/null +++ b/hw/sd/bcm2835_sdhost.c @@ -0,0 +1,429 @@ +/* + * Raspberry Pi (BCM2835) SD Host Controller + * + * Copyright (c) 2017 Antfield SAS + * + * Authors: + * Clement Deschamps <clement.deschamps@antfield.fr> + * Luc Michel <luc.michel@antfield.fr> + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "sysemu/blockdev.h" +#include "hw/sd/bcm2835_sdhost.h" + +#define TYPE_BCM2835_SDHOST_BUS "bcm2835-sdhost-bus" +#define BCM2835_SDHOST_BUS(obj) \ + OBJECT_CHECK(SDBus, (obj), TYPE_BCM2835_SDHOST_BUS) + +#define SDCMD 0x00 /* Command to SD card - 16 R/W */ +#define SDARG 0x04 /* Argument to SD card - 32 R/W */ +#define SDTOUT 0x08 /* Start value for timeout counter - 32 R/W */ +#define SDCDIV 0x0c /* Start value for clock divider - 11 R/W */ +#define SDRSP0 0x10 /* SD card rsp (31:0) - 32 R */ +#define SDRSP1 0x14 /* SD card rsp (63:32) - 32 R */ +#define SDRSP2 0x18 /* SD card rsp (95:64) - 32 R */ +#define SDRSP3 0x1c /* SD card rsp (127:96) - 32 R */ +#define SDHSTS 0x20 /* SD host status - 11 R */ +#define SDVDD 0x30 /* SD card power control - 1 R/W */ +#define SDEDM 0x34 /* Emergency Debug Mode - 13 R/W */ +#define SDHCFG 0x38 /* Host configuration - 2 R/W */ +#define SDHBCT 0x3c /* Host byte count (debug) - 32 R/W */ +#define SDDATA 0x40 /* Data to/from SD card - 32 R/W */ +#define SDHBLC 0x50 /* Host block count (SDIO/SDHC) - 9 R/W */ + +#define SDCMD_NEW_FLAG 0x8000 +#define SDCMD_FAIL_FLAG 0x4000 +#define SDCMD_BUSYWAIT 0x800 +#define SDCMD_NO_RESPONSE 0x400 +#define SDCMD_LONG_RESPONSE 0x200 +#define SDCMD_WRITE_CMD 0x80 +#define SDCMD_READ_CMD 0x40 +#define SDCMD_CMD_MASK 0x3f + +#define SDCDIV_MAX_CDIV 0x7ff + +#define SDHSTS_BUSY_IRPT 0x400 +#define SDHSTS_BLOCK_IRPT 0x200 +#define SDHSTS_SDIO_IRPT 0x100 +#define SDHSTS_REW_TIME_OUT 0x80 +#define SDHSTS_CMD_TIME_OUT 0x40 +#define SDHSTS_CRC16_ERROR 0x20 +#define SDHSTS_CRC7_ERROR 0x10 +#define SDHSTS_FIFO_ERROR 0x08 +/* Reserved */ +/* Reserved */ +#define SDHSTS_DATA_FLAG 0x01 + +#define SDHCFG_BUSY_IRPT_EN (1 << 10) +#define SDHCFG_BLOCK_IRPT_EN (1 << 8) +#define SDHCFG_SDIO_IRPT_EN (1 << 5) +#define SDHCFG_DATA_IRPT_EN (1 << 4) +#define SDHCFG_SLOW_CARD (1 << 3) +#define SDHCFG_WIDE_EXT_BUS (1 << 2) +#define SDHCFG_WIDE_INT_BUS (1 << 1) +#define SDHCFG_REL_CMD_LINE (1 << 0) + +#define SDEDM_FORCE_DATA_MODE (1 << 19) +#define SDEDM_CLOCK_PULSE (1 << 20) +#define SDEDM_BYPASS (1 << 21) + +#define SDEDM_WRITE_THRESHOLD_SHIFT 9 +#define SDEDM_READ_THRESHOLD_SHIFT 14 +#define SDEDM_THRESHOLD_MASK 0x1f + +#define SDEDM_FSM_MASK 0xf +#define SDEDM_FSM_IDENTMODE 0x0 +#define SDEDM_FSM_DATAMODE 0x1 +#define SDEDM_FSM_READDATA 0x2 +#define SDEDM_FSM_WRITEDATA 0x3 +#define SDEDM_FSM_READWAIT 0x4 +#define SDEDM_FSM_READCRC 0x5 +#define SDEDM_FSM_WRITECRC 0x6 +#define SDEDM_FSM_WRITEWAIT1 0x7 +#define SDEDM_FSM_POWERDOWN 0x8 +#define SDEDM_FSM_POWERUP 0x9 +#define SDEDM_FSM_WRITESTART1 0xa +#define SDEDM_FSM_WRITESTART2 0xb +#define SDEDM_FSM_GENPULSES 0xc +#define SDEDM_FSM_WRITEWAIT2 0xd +#define SDEDM_FSM_STARTPOWDOWN 0xf + +#define SDDATA_FIFO_WORDS 16 + +static void bcm2835_sdhost_update_irq(BCM2835SDHostState *s) +{ + uint32_t irq = s->status & + (SDHSTS_BUSY_IRPT | SDHSTS_BLOCK_IRPT | SDHSTS_SDIO_IRPT); + qemu_set_irq(s->irq, !!irq); +} + +static void bcm2835_sdhost_send_command(BCM2835SDHostState *s) +{ + SDRequest request; + uint8_t rsp[16]; + int rlen; + + request.cmd = s->cmd & SDCMD_CMD_MASK; + request.arg = s->cmdarg; + + rlen = sdbus_do_command(&s->sdbus, &request, rsp); + if (rlen < 0) { + goto error; + } + if (!(s->cmd & SDCMD_NO_RESPONSE)) { +#define RWORD(n) (((uint32_t)rsp[n] << 24) | (rsp[n + 1] << 16) \ + | (rsp[n + 2] << 8) | rsp[n + 3]) + if (rlen == 0 || (rlen == 4 && (s->cmd & SDCMD_LONG_RESPONSE))) { + goto error; + } + if (rlen != 4 && rlen != 16) { + goto error; + } + if (rlen == 4) { + s->rsp[0] = RWORD(0); + s->rsp[1] = s->rsp[2] = s->rsp[3] = 0; + } else { + s->rsp[0] = RWORD(12); + s->rsp[1] = RWORD(8); + s->rsp[2] = RWORD(4); + s->rsp[3] = RWORD(0); + } +#undef RWORD + } + return; + +error: + s->cmd |= SDCMD_FAIL_FLAG; + s->status |= SDHSTS_CMD_TIME_OUT; +} + +static void bcm2835_sdhost_fifo_push(BCM2835SDHostState *s, uint32_t value) +{ + int n; + + if (s->fifo_len == BCM2835_SDHOST_FIFO_LEN) { + /* FIFO overflow */ + return; + } + n = (s->fifo_pos + s->fifo_len) & (BCM2835_SDHOST_FIFO_LEN - 1); + s->fifo_len++; + s->fifo[n] = value; +} + +static uint32_t bcm2835_sdhost_fifo_pop(BCM2835SDHostState *s) +{ + uint32_t value; + + if (s->fifo_len == 0) { + /* FIFO underflow */ + return 0; + } + value = s->fifo[s->fifo_pos]; + s->fifo_len--; + s->fifo_pos = (s->fifo_pos + 1) & (BCM2835_SDHOST_FIFO_LEN - 1); + return value; +} + +static void bcm2835_sdhost_fifo_run(BCM2835SDHostState *s) +{ + uint32_t value = 0; + int n; + int is_read; + + is_read = (s->cmd & SDCMD_READ_CMD) != 0; + if (s->datacnt != 0 && (!is_read || sdbus_data_ready(&s->sdbus))) { + if (is_read) { + n = 0; + while (s->datacnt && s->fifo_len < BCM2835_SDHOST_FIFO_LEN) { + value |= (uint32_t)sdbus_read_data(&s->sdbus) << (n * 8); + s->datacnt--; + n++; + if (n == 4) { + bcm2835_sdhost_fifo_push(s, value); + n = 0; + value = 0; + } + } + if (n != 0) { + bcm2835_sdhost_fifo_push(s, value); + } + } else { /* write */ + n = 0; + while (s->datacnt > 0 && (s->fifo_len > 0 || n > 0)) { + if (n == 0) { + value = bcm2835_sdhost_fifo_pop(s); + n = 4; + } + n--; + s->datacnt--; + sdbus_write_data(&s->sdbus, value & 0xff); + value >>= 8; + } + } + } + if (s->datacnt == 0) { + s->status |= SDHSTS_DATA_FLAG; + + s->edm &= ~0xf; + s->edm |= SDEDM_FSM_DATAMODE; + + if (s->config & SDHCFG_DATA_IRPT_EN) { + s->status |= SDHSTS_SDIO_IRPT; + } + + if ((s->cmd & SDCMD_BUSYWAIT) && (s->config & SDHCFG_BUSY_IRPT_EN)) { + s->status |= SDHSTS_BUSY_IRPT; + } + + if ((s->cmd & SDCMD_WRITE_CMD) && (s->config & SDHCFG_BLOCK_IRPT_EN)) { + s->status |= SDHSTS_BLOCK_IRPT; + } + + bcm2835_sdhost_update_irq(s); + } + + s->edm &= ~(0x1f << 4); + s->edm |= ((s->fifo_len & 0x1f) << 4); +} + +static uint64_t bcm2835_sdhost_read(void *opaque, hwaddr offset, + unsigned size) +{ + BCM2835SDHostState *s = (BCM2835SDHostState *)opaque; + uint32_t res = 0; + + switch (offset) { + case SDCMD: + res = s->cmd; + break; + case SDHSTS: + res = s->status; + break; + case SDRSP0: + res = s->rsp[0]; + break; + case SDRSP1: + res = s->rsp[1]; + break; + case SDRSP2: + res = s->rsp[2]; + break; + case SDRSP3: + res = s->rsp[3]; + break; + case SDEDM: + res = s->edm; + break; + case SDVDD: + res = s->vdd; + break; + case SDDATA: + res = bcm2835_sdhost_fifo_pop(s); + bcm2835_sdhost_fifo_run(s); + break; + case SDHBCT: + res = s->hbct; + break; + case SDHBLC: + res = s->hblc; + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset %"HWADDR_PRIx"\n", + __func__, offset); + res = 0; + break; + } + + return res; +} + +static void bcm2835_sdhost_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + BCM2835SDHostState *s = (BCM2835SDHostState *)opaque; + + switch (offset) { + case SDCMD: + s->cmd = value; + if (value & SDCMD_NEW_FLAG) { + bcm2835_sdhost_send_command(s); + bcm2835_sdhost_fifo_run(s); + s->cmd &= ~SDCMD_NEW_FLAG; + } + break; + case SDTOUT: + break; + case SDCDIV: + break; + case SDHSTS: + s->status &= ~value; + bcm2835_sdhost_update_irq(s); + break; + case SDARG: + s->cmdarg = value; + break; + case SDEDM: + if ((value & 0xf) == 0xf) { + /* power down */ + value &= ~0xf; + } + s->edm = value; + break; + case SDHCFG: + s->config = value; + bcm2835_sdhost_fifo_run(s); + break; + case SDVDD: + s->vdd = value; + break; + case SDDATA: + bcm2835_sdhost_fifo_push(s, value); + bcm2835_sdhost_fifo_run(s); + break; + case SDHBCT: + s->hbct = value; + break; + case SDHBLC: + s->hblc = value; + s->datacnt = s->hblc * s->hbct; + bcm2835_sdhost_fifo_run(s); + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset %"HWADDR_PRIx"\n", + __func__, offset); + break; + } +} + +static const MemoryRegionOps bcm2835_sdhost_ops = { + .read = bcm2835_sdhost_read, + .write = bcm2835_sdhost_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static const VMStateDescription vmstate_bcm2835_sdhost = { + .name = TYPE_BCM2835_SDHOST, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(cmd, BCM2835SDHostState), + VMSTATE_UINT32(cmdarg, BCM2835SDHostState), + VMSTATE_UINT32(status, BCM2835SDHostState), + VMSTATE_UINT32_ARRAY(rsp, BCM2835SDHostState, 4), + VMSTATE_UINT32(config, BCM2835SDHostState), + VMSTATE_UINT32(edm, BCM2835SDHostState), + VMSTATE_UINT32(vdd, BCM2835SDHostState), + VMSTATE_UINT32(hbct, BCM2835SDHostState), + VMSTATE_UINT32(hblc, BCM2835SDHostState), + VMSTATE_INT32(fifo_pos, BCM2835SDHostState), + VMSTATE_INT32(fifo_len, BCM2835SDHostState), + VMSTATE_UINT32_ARRAY(fifo, BCM2835SDHostState, BCM2835_SDHOST_FIFO_LEN), + VMSTATE_UINT32(datacnt, BCM2835SDHostState), + VMSTATE_END_OF_LIST() + } +}; + +static void bcm2835_sdhost_init(Object *obj) +{ + BCM2835SDHostState *s = BCM2835_SDHOST(obj); + + qbus_create_inplace(&s->sdbus, sizeof(s->sdbus), + TYPE_BCM2835_SDHOST_BUS, DEVICE(s), "sd-bus"); + + memory_region_init_io(&s->iomem, obj, &bcm2835_sdhost_ops, s, + TYPE_BCM2835_SDHOST, 0x1000); + sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->iomem); + sysbus_init_irq(SYS_BUS_DEVICE(s), &s->irq); +} + +static void bcm2835_sdhost_reset(DeviceState *dev) +{ + BCM2835SDHostState *s = BCM2835_SDHOST(dev); + + s->cmd = 0; + s->cmdarg = 0; + s->edm = 0x0000c60f; + s->config = 0; + s->hbct = 0; + s->hblc = 0; + s->datacnt = 0; + s->fifo_pos = 0; + s->fifo_len = 0; +} + +static void bcm2835_sdhost_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = bcm2835_sdhost_reset; + dc->vmsd = &vmstate_bcm2835_sdhost; +} + +static TypeInfo bcm2835_sdhost_info = { + .name = TYPE_BCM2835_SDHOST, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(BCM2835SDHostState), + .class_init = bcm2835_sdhost_class_init, + .instance_init = bcm2835_sdhost_init, +}; + +static const TypeInfo bcm2835_sdhost_bus_info = { + .name = TYPE_BCM2835_SDHOST_BUS, + .parent = TYPE_SD_BUS, + .instance_size = sizeof(SDBus), +}; + +static void bcm2835_sdhost_register_types(void) +{ + type_register_static(&bcm2835_sdhost_info); + type_register_static(&bcm2835_sdhost_bus_info); +} + +type_init(bcm2835_sdhost_register_types) diff --git a/hw/sd/sdhci.c b/hw/sd/sdhci.c index da32b5f709..6d6a791ee9 100644 --- a/hw/sd/sdhci.c +++ b/hw/sd/sdhci.c @@ -119,6 +119,7 @@ (SDHC_CAPAB_BASECLKFREQ << 8) | (SDHC_CAPAB_TOUNIT << 7) | \ (SDHC_CAPAB_TOCLKFREQ)) +#define MASK_TRNMOD 0x0037 #define MASKED_WRITE(reg, mask, val) (reg = (reg & (mask)) | (val)) static uint8_t sdhci_slotint(SDHCIState *s) @@ -486,6 +487,11 @@ static void sdhci_sdma_transfer_multi_blocks(SDHCIState *s) uint32_t boundary_chk = 1 << (((s->blksize & 0xf000) >> 12) + 12); uint32_t boundary_count = boundary_chk - (s->sdmasysad % boundary_chk); + if (!(s->trnmod & SDHC_TRNS_BLK_CNT_EN) || !s->blkcnt) { + qemu_log_mask(LOG_UNIMP, "infinite transfer is not supported\n"); + return; + } + /* XXX: Some sd/mmc drivers (for example, u-boot-slp) do not account for * possible stop at page boundary if initial address is not page aligned, * allow them to work properly */ @@ -564,7 +570,6 @@ static void sdhci_sdma_transfer_multi_blocks(SDHCIState *s) } /* single block SDMA transfer */ - static void sdhci_sdma_transfer_single_block(SDHCIState *s) { int n; @@ -583,10 +588,7 @@ static void sdhci_sdma_transfer_single_block(SDHCIState *s) sdbus_write_data(&s->sdbus, s->fifo_buffer[n]); } } - - if (s->trnmod & SDHC_TRNS_BLK_CNT_EN) { - s->blkcnt--; - } + s->blkcnt--; sdhci_end_transfer(s); } @@ -797,11 +799,6 @@ static void sdhci_data_transfer(void *opaque) if (s->trnmod & SDHC_TRNS_DMA) { switch (SDHC_DMA_TYPE(s->hostctl)) { case SDHC_CTRL_SDMA: - if ((s->trnmod & SDHC_TRNS_MULTI) && - (!(s->trnmod & SDHC_TRNS_BLK_CNT_EN) || s->blkcnt == 0)) { - break; - } - if ((s->blkcnt == 1) || !(s->trnmod & SDHC_TRNS_MULTI)) { sdhci_sdma_transfer_single_block(s); } else { @@ -1022,7 +1019,11 @@ sdhci_write(void *opaque, hwaddr offset, uint64_t val, unsigned size) /* Writing to last byte of sdmasysad might trigger transfer */ if (!(mask & 0xFF000000) && TRANSFERRING_DATA(s->prnsts) && s->blkcnt && s->blksize && SDHC_DMA_TYPE(s->hostctl) == SDHC_CTRL_SDMA) { - sdhci_sdma_transfer_multi_blocks(s); + if (s->trnmod & SDHC_TRNS_MULTI) { + sdhci_sdma_transfer_multi_blocks(s); + } else { + sdhci_sdma_transfer_single_block(s); + } } break; case SDHC_BLKSIZE: @@ -1050,7 +1051,7 @@ sdhci_write(void *opaque, hwaddr offset, uint64_t val, unsigned size) if (!(s->capareg & SDHC_CAN_DO_DMA)) { value &= ~SDHC_TRNS_DMA; } - MASKED_WRITE(s->trnmod, mask, value); + MASKED_WRITE(s->trnmod, mask, value & MASK_TRNMOD); MASKED_WRITE(s->cmdreg, mask >> 16, value >> 16); /* Writing to the upper byte of CMDREG triggers SD command generation */ diff --git a/hw/timer/imx_gpt.c b/hw/timer/imx_gpt.c index 010ccbf207..4b9b54bf2e 100644 --- a/hw/timer/imx_gpt.c +++ b/hw/timer/imx_gpt.c @@ -296,18 +296,23 @@ static uint64_t imx_gpt_read(void *opaque, hwaddr offset, unsigned size) return reg_value; } -static void imx_gpt_reset(DeviceState *dev) -{ - IMXGPTState *s = IMX_GPT(dev); +static void imx_gpt_reset_common(IMXGPTState *s, bool is_soft_reset) +{ /* stop timer */ ptimer_stop(s->timer); - /* - * Soft reset doesn't touch some bits; hard reset clears them + /* Soft reset and hard reset differ only in their handling of the CR + * register -- soft reset preserves the values of some bits there. */ - s->cr &= ~(GPT_CR_EN|GPT_CR_ENMOD|GPT_CR_STOPEN|GPT_CR_DOZEN| - GPT_CR_WAITEN|GPT_CR_DBGEN); + if (is_soft_reset) { + /* Clear all CR bits except those that are preserved by soft reset. */ + s->cr &= GPT_CR_EN | GPT_CR_ENMOD | GPT_CR_STOPEN | GPT_CR_DOZEN | + GPT_CR_WAITEN | GPT_CR_DBGEN | + (GPT_CR_CLKSRC_MASK << GPT_CR_CLKSRC_SHIFT); + } else { + s->cr = 0; + } s->sr = 0; s->pr = 0; s->ir = 0; @@ -333,6 +338,18 @@ static void imx_gpt_reset(DeviceState *dev) } } +static void imx_gpt_soft_reset(DeviceState *dev) +{ + IMXGPTState *s = IMX_GPT(dev); + imx_gpt_reset_common(s, true); +} + +static void imx_gpt_reset(DeviceState *dev) +{ + IMXGPTState *s = IMX_GPT(dev); + imx_gpt_reset_common(s, false); +} + static void imx_gpt_write(void *opaque, hwaddr offset, uint64_t value, unsigned size) { @@ -348,7 +365,7 @@ static void imx_gpt_write(void *opaque, hwaddr offset, uint64_t value, s->cr = value & ~0x7c14; if (s->cr & GPT_CR_SWR) { /* force reset */ /* handle the reset */ - imx_gpt_reset(DEVICE(s)); + imx_gpt_soft_reset(DEVICE(s)); } else { /* set our freq, as the source might have changed */ imx_gpt_set_freq(s); diff --git a/include/hw/arm/bcm2835_peripherals.h b/include/hw/arm/bcm2835_peripherals.h index e12ae3721a..31241c799d 100644 --- a/include/hw/arm/bcm2835_peripherals.h +++ b/include/hw/arm/bcm2835_peripherals.h @@ -19,6 +19,7 @@ #include "hw/dma/bcm2835_dma.h" #include "hw/intc/bcm2835_ic.h" #include "hw/misc/bcm2835_property.h" +#include "hw/misc/bcm2835_rng.h" #include "hw/misc/bcm2835_mbox.h" #include "hw/sd/sdhci.h" @@ -41,6 +42,7 @@ typedef struct BCM2835PeripheralState { BCM2835DMAState dma; BCM2835ICState ic; BCM2835PropertyState property; + BCM2835RngState rng; BCM2835MboxState mboxes; SDHCIState sdhci; } BCM2835PeripheralState; diff --git a/include/hw/arm/virt.h b/include/hw/arm/virt.h index 58ce74e0e5..33b0ff3892 100644 --- a/include/hw/arm/virt.h +++ b/include/hw/arm/virt.h @@ -93,6 +93,7 @@ typedef struct { FWCfgState *fw_cfg; bool secure; bool highmem; + bool its; bool virt; int32_t gic_version; struct arm_boot_info bootinfo; diff --git a/include/hw/misc/bcm2835_rng.h b/include/hw/misc/bcm2835_rng.h new file mode 100644 index 0000000000..41a531bce7 --- /dev/null +++ b/include/hw/misc/bcm2835_rng.h @@ -0,0 +1,27 @@ +/* + * BCM2835 Random Number Generator emulation + * + * Copyright (C) 2017 Marcin Chojnacki <marcinch7@gmail.com> + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef BCM2835_RNG_H +#define BCM2835_RNG_H + +#include "hw/sysbus.h" + +#define TYPE_BCM2835_RNG "bcm2835-rng" +#define BCM2835_RNG(obj) \ + OBJECT_CHECK(BCM2835RngState, (obj), TYPE_BCM2835_RNG) + +typedef struct { + SysBusDevice busdev; + MemoryRegion iomem; + + uint32_t rng_ctrl; + uint32_t rng_status; +} BCM2835RngState; + +#endif diff --git a/include/hw/sd/bcm2835_sdhost.h b/include/hw/sd/bcm2835_sdhost.h new file mode 100644 index 0000000000..7520dd6507 --- /dev/null +++ b/include/hw/sd/bcm2835_sdhost.h @@ -0,0 +1,48 @@ +/* + * Raspberry Pi (BCM2835) SD Host Controller + * + * Copyright (c) 2017 Antfield SAS + * + * Authors: + * Clement Deschamps <clement.deschamps@antfield.fr> + * Luc Michel <luc.michel@antfield.fr> + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef BCM2835_SDHOST_H +#define BCM2835_SDHOST_H + +#include "hw/sysbus.h" +#include "hw/sd/sd.h" + +#define TYPE_BCM2835_SDHOST "bcm2835-sdhost" +#define BCM2835_SDHOST(obj) \ + OBJECT_CHECK(BCM2835SDHostState, (obj), TYPE_BCM2835_SDHOST) + +#define BCM2835_SDHOST_FIFO_LEN 16 + +typedef struct { + SysBusDevice busdev; + SDBus sdbus; + MemoryRegion iomem; + + uint32_t cmd; + uint32_t cmdarg; + uint32_t status; + uint32_t rsp[4]; + uint32_t config; + uint32_t edm; + uint32_t vdd; + uint32_t hbct; + uint32_t hblc; + int32_t fifo_pos; + int32_t fifo_len; + uint32_t fifo[BCM2835_SDHOST_FIFO_LEN]; + uint32_t datacnt; + + qemu_irq irq; +} BCM2835SDHostState; + +#endif diff --git a/include/qemu-common.h b/include/qemu-common.h index 1430390eb6..d218821c14 100644 --- a/include/qemu-common.h +++ b/include/qemu-common.h @@ -19,7 +19,7 @@ #include "qemu/option.h" /* Copyright string for -version arguments, About dialogs, etc */ -#define QEMU_COPYRIGHT "Copyright (c) 2003-2016 " \ +#define QEMU_COPYRIGHT "Copyright (c) 2003-2017 " \ "Fabrice Bellard and the QEMU Project developers" /* main function, renamed */ diff --git a/linux-user/main.c b/linux-user/main.c index 9645122aa6..10a3bb3a12 100644 --- a/linux-user/main.c +++ b/linux-user/main.c @@ -574,6 +574,7 @@ void cpu_loop(CPUARMState *env) switch(trapnr) { case EXCP_UDEF: case EXCP_NOCP: + case EXCP_INVSTATE: { TaskState *ts = cs->opaque; uint32_t opcode; diff --git a/linux-user/signal.c b/linux-user/signal.c index 8209539555..a67db04e1a 100644 --- a/linux-user/signal.c +++ b/linux-user/signal.c @@ -254,7 +254,7 @@ int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset) } #if !defined(TARGET_OPENRISC) && !defined(TARGET_UNICORE32) && \ - !defined(TARGET_X86_64) && !defined(TARGET_NIOS2) + !defined(TARGET_NIOS2) /* Just set the guest's signal mask to the specified value; the * caller is assumed to have called block_signals() already. */ @@ -512,7 +512,7 @@ void signal_init(void) } } -#if !(defined(TARGET_X86_64) || defined(TARGET_UNICORE32)) +#ifndef TARGET_UNICORE32 /* Force a synchronously taken signal. The kernel force_sig() function * also forces the signal to "not blocked, not ignored", but for QEMU * that work is done in process_pending_signals(). @@ -819,9 +819,8 @@ int do_sigaction(int sig, const struct target_sigaction *act, return ret; } -#if defined(TARGET_I386) && TARGET_ABI_BITS == 32 - -/* from the Linux kernel */ +#if defined(TARGET_I386) +/* from the Linux kernel - /arch/x86/include/uapi/asm/sigcontext.h */ struct target_fpreg { uint16_t significand[4]; @@ -835,58 +834,120 @@ struct target_fpxreg { }; struct target_xmmreg { - abi_ulong element[4]; + uint32_t element[4]; }; -struct target_fpstate { +struct target_fpstate_32 { /* Regular FPU environment */ - abi_ulong cw; - abi_ulong sw; - abi_ulong tag; - abi_ulong ipoff; - abi_ulong cssel; - abi_ulong dataoff; - abi_ulong datasel; - struct target_fpreg _st[8]; + uint32_t cw; + uint32_t sw; + uint32_t tag; + uint32_t ipoff; + uint32_t cssel; + uint32_t dataoff; + uint32_t datasel; + struct target_fpreg st[8]; uint16_t status; uint16_t magic; /* 0xffff = regular FPU data only */ /* FXSR FPU environment */ - abi_ulong _fxsr_env[6]; /* FXSR FPU env is ignored */ - abi_ulong mxcsr; - abi_ulong reserved; - struct target_fpxreg _fxsr_st[8]; /* FXSR FPU reg data is ignored */ - struct target_xmmreg _xmm[8]; - abi_ulong padding[56]; + uint32_t _fxsr_env[6]; /* FXSR FPU env is ignored */ + uint32_t mxcsr; + uint32_t reserved; + struct target_fpxreg fxsr_st[8]; /* FXSR FPU reg data is ignored */ + struct target_xmmreg xmm[8]; + uint32_t padding[56]; }; -#define X86_FXSR_MAGIC 0x0000 +struct target_fpstate_64 { + /* FXSAVE format */ + uint16_t cw; + uint16_t sw; + uint16_t twd; + uint16_t fop; + uint64_t rip; + uint64_t rdp; + uint32_t mxcsr; + uint32_t mxcsr_mask; + uint32_t st_space[32]; + uint32_t xmm_space[64]; + uint32_t reserved[24]; +}; -struct target_sigcontext { +#ifndef TARGET_X86_64 +# define target_fpstate target_fpstate_32 +#else +# define target_fpstate target_fpstate_64 +#endif + +struct target_sigcontext_32 { uint16_t gs, __gsh; uint16_t fs, __fsh; uint16_t es, __esh; uint16_t ds, __dsh; - abi_ulong edi; - abi_ulong esi; - abi_ulong ebp; - abi_ulong esp; - abi_ulong ebx; - abi_ulong edx; - abi_ulong ecx; - abi_ulong eax; - abi_ulong trapno; - abi_ulong err; - abi_ulong eip; + uint32_t edi; + uint32_t esi; + uint32_t ebp; + uint32_t esp; + uint32_t ebx; + uint32_t edx; + uint32_t ecx; + uint32_t eax; + uint32_t trapno; + uint32_t err; + uint32_t eip; uint16_t cs, __csh; - abi_ulong eflags; - abi_ulong esp_at_signal; + uint32_t eflags; + uint32_t esp_at_signal; uint16_t ss, __ssh; - abi_ulong fpstate; /* pointer */ - abi_ulong oldmask; - abi_ulong cr2; + uint32_t fpstate; /* pointer */ + uint32_t oldmask; + uint32_t cr2; +}; + +struct target_sigcontext_64 { + uint64_t r8; + uint64_t r9; + uint64_t r10; + uint64_t r11; + uint64_t r12; + uint64_t r13; + uint64_t r14; + uint64_t r15; + + uint64_t rdi; + uint64_t rsi; + uint64_t rbp; + uint64_t rbx; + uint64_t rdx; + uint64_t rax; + uint64_t rcx; + uint64_t rsp; + uint64_t rip; + + uint64_t eflags; + + uint16_t cs; + uint16_t gs; + uint16_t fs; + uint16_t ss; + + uint64_t err; + uint64_t trapno; + uint64_t oldmask; + uint64_t cr2; + + uint64_t fpstate; /* pointer */ + uint64_t padding[8]; }; +#ifndef TARGET_X86_64 +# define target_sigcontext target_sigcontext_32 +#else +# define target_sigcontext target_sigcontext_64 +#endif + +/* see Linux/include/uapi/asm-generic/ucontext.h */ struct target_ucontext { abi_ulong tuc_flags; abi_ulong tuc_link; @@ -895,8 +956,8 @@ struct target_ucontext { target_sigset_t tuc_sigmask; /* mask last for extensibility */ }; -struct sigframe -{ +#ifndef TARGET_X86_64 +struct sigframe { abi_ulong pretcode; int sig; struct target_sigcontext sc; @@ -905,8 +966,7 @@ struct sigframe char retcode[8]; }; -struct rt_sigframe -{ +struct rt_sigframe { abi_ulong pretcode; int sig; abi_ulong pinfo; @@ -917,6 +977,17 @@ struct rt_sigframe char retcode[8]; }; +#else + +struct rt_sigframe { + abi_ulong pretcode; + struct target_ucontext uc; + struct target_siginfo info; + struct target_fpstate fpstate; +}; + +#endif + /* * Set up a signal frame. */ @@ -927,6 +998,7 @@ static void setup_sigcontext(struct target_sigcontext *sc, abi_ulong fpstate_addr) { CPUState *cs = CPU(x86_env_get_cpu(env)); +#ifndef TARGET_X86_64 uint16_t magic; /* already locked in setup_frame() */ @@ -959,6 +1031,44 @@ static void setup_sigcontext(struct target_sigcontext *sc, /* non-iBCS2 extensions.. */ __put_user(mask, &sc->oldmask); __put_user(env->cr[2], &sc->cr2); +#else + __put_user(env->regs[R_EDI], &sc->rdi); + __put_user(env->regs[R_ESI], &sc->rsi); + __put_user(env->regs[R_EBP], &sc->rbp); + __put_user(env->regs[R_ESP], &sc->rsp); + __put_user(env->regs[R_EBX], &sc->rbx); + __put_user(env->regs[R_EDX], &sc->rdx); + __put_user(env->regs[R_ECX], &sc->rcx); + __put_user(env->regs[R_EAX], &sc->rax); + + __put_user(env->regs[8], &sc->r8); + __put_user(env->regs[9], &sc->r9); + __put_user(env->regs[10], &sc->r10); + __put_user(env->regs[11], &sc->r11); + __put_user(env->regs[12], &sc->r12); + __put_user(env->regs[13], &sc->r13); + __put_user(env->regs[14], &sc->r14); + __put_user(env->regs[15], &sc->r15); + + __put_user(cs->exception_index, &sc->trapno); + __put_user(env->error_code, &sc->err); + __put_user(env->eip, &sc->rip); + + __put_user(env->eflags, &sc->eflags); + __put_user(env->segs[R_CS].selector, &sc->cs); + __put_user((uint16_t)0, &sc->gs); + __put_user((uint16_t)0, &sc->fs); + __put_user(env->segs[R_SS].selector, &sc->ss); + + __put_user(mask, &sc->oldmask); + __put_user(env->cr[2], &sc->cr2); + + /* fpstate_addr must be 16 byte aligned for fxsave */ + assert(!(fpstate_addr & 0xf)); + + cpu_x86_fxsave(env, fpstate_addr); + __put_user(fpstate_addr, &sc->fpstate); +#endif } /* @@ -972,23 +1082,34 @@ get_sigframe(struct target_sigaction *ka, CPUX86State *env, size_t frame_size) /* Default to using normal stack */ esp = env->regs[R_ESP]; +#ifdef TARGET_X86_64 + esp -= 128; /* this is the redzone */ +#endif + /* This is the X/Open sanctioned signal stack switching. */ if (ka->sa_flags & TARGET_SA_ONSTACK) { if (sas_ss_flags(esp) == 0) { esp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; } } else { - +#ifndef TARGET_X86_64 /* This is the legacy signal stack switching. */ if ((env->segs[R_SS].selector & 0xffff) != __USER_DS && !(ka->sa_flags & TARGET_SA_RESTORER) && ka->sa_restorer) { esp = (unsigned long) ka->sa_restorer; } +#endif } + +#ifndef TARGET_X86_64 return (esp - frame_size) & -8ul; +#else + return ((esp - frame_size) & (~15ul)) - 8; +#endif } +#ifndef TARGET_X86_64 /* compare linux/arch/i386/kernel/signal.c:setup_frame() */ static void setup_frame(int sig, struct target_sigaction *ka, target_sigset_t *set, CPUX86State *env) @@ -1029,7 +1150,6 @@ static void setup_frame(int sig, struct target_sigaction *ka, __put_user(val16, (uint16_t *)(frame->retcode+6)); } - /* Set up registers for signal handler */ env->regs[R_ESP] = frame_addr; env->eip = ka->_sa_handler; @@ -1047,13 +1167,17 @@ static void setup_frame(int sig, struct target_sigaction *ka, give_sigsegv: force_sigsegv(sig); } +#endif -/* compare linux/arch/i386/kernel/signal.c:setup_rt_frame() */ +/* compare linux/arch/x86/kernel/signal.c:setup_rt_frame() */ static void setup_rt_frame(int sig, struct target_sigaction *ka, target_siginfo_t *info, target_sigset_t *set, CPUX86State *env) { - abi_ulong frame_addr, addr; + abi_ulong frame_addr; +#ifndef TARGET_X86_64 + abi_ulong addr; +#endif struct rt_sigframe *frame; int i; @@ -1063,12 +1187,17 @@ static void setup_rt_frame(int sig, struct target_sigaction *ka, if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) goto give_sigsegv; + /* These fields are only in rt_sigframe on 32 bit */ +#ifndef TARGET_X86_64 __put_user(sig, &frame->sig); addr = frame_addr + offsetof(struct rt_sigframe, info); __put_user(addr, &frame->pinfo); addr = frame_addr + offsetof(struct rt_sigframe, uc); __put_user(addr, &frame->puc); - tswap_siginfo(&frame->info, info); +#endif + if (ka->sa_flags & TARGET_SA_SIGINFO) { + tswap_siginfo(&frame->info, info); + } /* Create the ucontext. */ __put_user(0, &frame->uc.tuc_flags); @@ -1087,6 +1216,7 @@ static void setup_rt_frame(int sig, struct target_sigaction *ka, /* Set up to return from userspace. If provided, use a stub already in userspace. */ +#ifndef TARGET_X86_64 if (ka->sa_flags & TARGET_SA_RESTORER) { __put_user(ka->sa_restorer, &frame->pretcode); } else { @@ -1099,15 +1229,31 @@ static void setup_rt_frame(int sig, struct target_sigaction *ka, val16 = 0x80cd; __put_user(val16, (uint16_t *)(frame->retcode+5)); } +#else + /* XXX: Would be slightly better to return -EFAULT here if test fails + assert(ka->sa_flags & TARGET_SA_RESTORER); */ + __put_user(ka->sa_restorer, &frame->pretcode); +#endif /* Set up registers for signal handler */ env->regs[R_ESP] = frame_addr; env->eip = ka->_sa_handler; +#ifndef TARGET_X86_64 + env->regs[R_EAX] = sig; + env->regs[R_EDX] = (unsigned long)&frame->info; + env->regs[R_ECX] = (unsigned long)&frame->uc; +#else + env->regs[R_EAX] = 0; + env->regs[R_EDI] = sig; + env->regs[R_ESI] = (unsigned long)&frame->info; + env->regs[R_EDX] = (unsigned long)&frame->uc; +#endif + cpu_x86_load_seg(env, R_DS, __USER_DS); cpu_x86_load_seg(env, R_ES, __USER_DS); - cpu_x86_load_seg(env, R_SS, __USER_DS); cpu_x86_load_seg(env, R_CS, __USER_CS); + cpu_x86_load_seg(env, R_SS, __USER_DS); env->eflags &= ~TF_MASK; unlock_user_struct(frame, frame_addr, 1); @@ -1125,6 +1271,7 @@ restore_sigcontext(CPUX86State *env, struct target_sigcontext *sc) abi_ulong fpstate_addr; unsigned int tmpflags; +#ifndef TARGET_X86_64 cpu_x86_load_seg(env, R_GS, tswap16(sc->gs)); cpu_x86_load_seg(env, R_FS, tswap16(sc->fs)); cpu_x86_load_seg(env, R_ES, tswap16(sc->es)); @@ -1138,7 +1285,29 @@ restore_sigcontext(CPUX86State *env, struct target_sigcontext *sc) env->regs[R_EDX] = tswapl(sc->edx); env->regs[R_ECX] = tswapl(sc->ecx); env->regs[R_EAX] = tswapl(sc->eax); + env->eip = tswapl(sc->eip); +#else + env->regs[8] = tswapl(sc->r8); + env->regs[9] = tswapl(sc->r9); + env->regs[10] = tswapl(sc->r10); + env->regs[11] = tswapl(sc->r11); + env->regs[12] = tswapl(sc->r12); + env->regs[13] = tswapl(sc->r13); + env->regs[14] = tswapl(sc->r14); + env->regs[15] = tswapl(sc->r15); + + env->regs[R_EDI] = tswapl(sc->rdi); + env->regs[R_ESI] = tswapl(sc->rsi); + env->regs[R_EBP] = tswapl(sc->rbp); + env->regs[R_EBX] = tswapl(sc->rbx); + env->regs[R_EDX] = tswapl(sc->rdx); + env->regs[R_EAX] = tswapl(sc->rax); + env->regs[R_ECX] = tswapl(sc->rcx); + env->regs[R_ESP] = tswapl(sc->rsp); + + env->eip = tswapl(sc->rip); +#endif cpu_x86_load_seg(env, R_CS, lduw_p(&sc->cs) | 3); cpu_x86_load_seg(env, R_SS, lduw_p(&sc->ss) | 3); @@ -1152,7 +1321,11 @@ restore_sigcontext(CPUX86State *env, struct target_sigcontext *sc) if (!access_ok(VERIFY_READ, fpstate_addr, sizeof(struct target_fpstate))) goto badframe; +#ifndef TARGET_X86_64 cpu_x86_frstor(env, fpstate_addr, 1); +#else + cpu_x86_fxrstor(env, fpstate_addr); +#endif } return err; @@ -1160,6 +1333,8 @@ badframe: return 1; } +/* Note: there is no sigreturn on x86_64, there is only rt_sigreturn */ +#ifndef TARGET_X86_64 long do_sigreturn(CPUX86State *env) { struct sigframe *frame; @@ -1191,6 +1366,7 @@ badframe: force_sig(TARGET_SIGSEGV); return -TARGET_QEMU_ESIGRETURN; } +#endif long do_rt_sigreturn(CPUX86State *env) { @@ -1198,7 +1374,7 @@ long do_rt_sigreturn(CPUX86State *env) struct rt_sigframe *frame; sigset_t set; - frame_addr = env->regs[R_ESP] - 4; + frame_addr = env->regs[R_ESP] - sizeof(abi_ulong); trace_user_do_rt_sigreturn(env, frame_addr); if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) goto badframe; @@ -5500,6 +5676,7 @@ static inline int target_rt_setup_ucontext(struct target_ucontext *uc, CPUM68KState *env) { target_greg_t *gregs = uc->tuc_mcontext.gregs; + uint32_t sr = cpu_m68k_get_ccr(env); __put_user(TARGET_MCONTEXT_VERSION, &uc->tuc_mcontext.version); __put_user(env->dregs[0], &gregs[0]); @@ -5519,7 +5696,7 @@ static inline int target_rt_setup_ucontext(struct target_ucontext *uc, __put_user(env->aregs[6], &gregs[14]); __put_user(env->aregs[7], &gregs[15]); __put_user(env->pc, &gregs[16]); - __put_user(env->sr, &gregs[17]); + __put_user(sr, &gregs[17]); return 0; } @@ -5553,7 +5730,7 @@ static inline int target_rt_restore_ucontext(CPUM68KState *env, __get_user(env->aregs[7], &gregs[15]); __get_user(env->pc, &gregs[16]); __get_user(temp, &gregs[17]); - env->sr = (env->sr & 0xff00) | (temp & 0xff); + cpu_m68k_set_ccr(env, temp); return 0; @@ -5674,14 +5851,13 @@ long do_rt_sigreturn(CPUM68KState *env) { struct target_rt_sigframe *frame; abi_ulong frame_addr = env->aregs[7] - 4; - target_sigset_t target_set; sigset_t set; trace_user_do_rt_sigreturn(env, frame_addr); if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) goto badframe; - target_to_host_sigset_internal(&set, &target_set); + target_to_host_sigset(&set, &frame->uc.tuc_sigmask); set_sigmask(&set); /* restore registers */ @@ -6418,7 +6594,7 @@ static void handle_pending_signal(CPUArchState *cpu_env, int sig, #if defined(TARGET_ABI_MIPSN32) || defined(TARGET_ABI_MIPSN64) \ || defined(TARGET_OPENRISC) || defined(TARGET_TILEGX) \ || defined(TARGET_PPC64) || defined(TARGET_HPPA) \ - || defined(TARGET_NIOS2) + || defined(TARGET_NIOS2) || defined(TARGET_X86_64) /* These targets do not have traditional signals. */ setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env); #else diff --git a/linux-user/syscall.c b/linux-user/syscall.c index f569f827fc..cec8428589 100644 --- a/linux-user/syscall.c +++ b/linux-user/syscall.c @@ -57,6 +57,8 @@ int __clone2(int (*fn)(void *), void *child_stack_base, #include <netinet/tcp.h> #include <linux/wireless.h> #include <linux/icmp.h> +#include <linux/icmpv6.h> +#include <linux/errqueue.h> #include "qemu-common.h" #ifdef CONFIG_TIMERFD #include <sys/timerfd.h> @@ -1634,6 +1636,11 @@ static inline abi_long host_to_target_sockaddr(abi_ulong target_addr, struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr; target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex); target_ll->sll_hatype = tswap16(target_ll->sll_hatype); + } else if (addr->sa_family == AF_INET6 && + len >= sizeof(struct target_sockaddr_in6)) { + struct target_sockaddr_in6 *target_in6 = + (struct target_sockaddr_in6 *)target_saddr; + target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id); } unlock_user(target_saddr, target_addr, len); @@ -1839,6 +1846,78 @@ static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh, } break; + case SOL_IP: + switch (cmsg->cmsg_type) { + case IP_TTL: + { + uint32_t *v = (uint32_t *)data; + uint32_t *t_int = (uint32_t *)target_data; + + __put_user(*v, t_int); + break; + } + case IP_RECVERR: + { + struct errhdr_t { + struct sock_extended_err ee; + struct sockaddr_in offender; + }; + struct errhdr_t *errh = (struct errhdr_t *)data; + struct errhdr_t *target_errh = + (struct errhdr_t *)target_data; + + __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno); + __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin); + __put_user(errh->ee.ee_type, &target_errh->ee.ee_type); + __put_user(errh->ee.ee_code, &target_errh->ee.ee_code); + __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad); + __put_user(errh->ee.ee_info, &target_errh->ee.ee_info); + __put_user(errh->ee.ee_data, &target_errh->ee.ee_data); + host_to_target_sockaddr((unsigned long) &target_errh->offender, + (void *) &errh->offender, sizeof(errh->offender)); + break; + } + default: + goto unimplemented; + } + break; + + case SOL_IPV6: + switch (cmsg->cmsg_type) { + case IPV6_HOPLIMIT: + { + uint32_t *v = (uint32_t *)data; + uint32_t *t_int = (uint32_t *)target_data; + + __put_user(*v, t_int); + break; + } + case IPV6_RECVERR: + { + struct errhdr6_t { + struct sock_extended_err ee; + struct sockaddr_in6 offender; + }; + struct errhdr6_t *errh = (struct errhdr6_t *)data; + struct errhdr6_t *target_errh = + (struct errhdr6_t *)target_data; + + __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno); + __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin); + __put_user(errh->ee.ee_type, &target_errh->ee.ee_type); + __put_user(errh->ee.ee_code, &target_errh->ee.ee_code); + __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad); + __put_user(errh->ee.ee_info, &target_errh->ee.ee_info); + __put_user(errh->ee.ee_data, &target_errh->ee.ee_data); + host_to_target_sockaddr((unsigned long) &target_errh->offender, + (void *) &errh->offender, sizeof(errh->offender)); + break; + } + default: + goto unimplemented; + } + break; + default: unimplemented: gemu_log("Unsupported ancillary data: %d/%d\n", @@ -2768,6 +2847,7 @@ static abi_long do_setsockopt(int sockfd, int level, int optname, case IP_PKTINFO: case IP_MTU_DISCOVER: case IP_RECVERR: + case IP_RECVTTL: case IP_RECVTOS: #ifdef IP_FREEBIND case IP_FREEBIND: @@ -2817,6 +2897,11 @@ static abi_long do_setsockopt(int sockfd, int level, int optname, case IPV6_MTU: case IPV6_V6ONLY: case IPV6_RECVPKTINFO: + case IPV6_UNICAST_HOPS: + case IPV6_RECVERR: + case IPV6_RECVHOPLIMIT: + case IPV6_2292HOPLIMIT: + case IPV6_CHECKSUM: val = 0; if (optlen < sizeof(uint32_t)) { return -TARGET_EINVAL; @@ -2827,6 +2912,50 @@ static abi_long do_setsockopt(int sockfd, int level, int optname, ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val))); break; + case IPV6_PKTINFO: + { + struct in6_pktinfo pki; + + if (optlen < sizeof(pki)) { + return -TARGET_EINVAL; + } + + if (copy_from_user(&pki, optval_addr, sizeof(pki))) { + return -TARGET_EFAULT; + } + + pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex); + + ret = get_errno(setsockopt(sockfd, level, optname, + &pki, sizeof(pki))); + break; + } + default: + goto unimplemented; + } + break; + case SOL_ICMPV6: + switch (optname) { + case ICMPV6_FILTER: + { + struct icmp6_filter icmp6f; + + if (optlen > sizeof(icmp6f)) { + optlen = sizeof(icmp6f); + } + + if (copy_from_user(&icmp6f, optval_addr, optlen)) { + return -TARGET_EFAULT; + } + + for (val = 0; val < 8; val++) { + icmp6f.data[val] = tswap32(icmp6f.data[val]); + } + + ret = get_errno(setsockopt(sockfd, level, optname, + &icmp6f, optlen)); + break; + } default: goto unimplemented; } @@ -2834,7 +2963,8 @@ static abi_long do_setsockopt(int sockfd, int level, int optname, case SOL_RAW: switch (optname) { case ICMP_FILTER: - /* struct icmp_filter takes an u32 value */ + case IPV6_CHECKSUM: + /* those take an u32 value */ if (optlen < sizeof(uint32_t)) { return -TARGET_EINVAL; } @@ -7680,7 +7810,7 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1, break; #ifdef TARGET_NR_fork case TARGET_NR_fork: - ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0)); + ret = get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0)); break; #endif #ifdef TARGET_NR_waitpid @@ -10490,7 +10620,8 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1, #endif #ifdef TARGET_NR_vfork case TARGET_NR_vfork: - ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD, + ret = get_errno(do_fork(cpu_env, + CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD, 0, 0, 0, 0)); break; #endif @@ -11063,11 +11194,16 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1, case TARGET_NR_mincore: { void *a; + ret = -TARGET_ENOMEM; + a = lock_user(VERIFY_READ, arg1, arg2, 0); + if (!a) { + goto fail; + } ret = -TARGET_EFAULT; - if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0))) - goto efault; - if (!(p = lock_user_string(arg3))) + p = lock_user_string(arg3); + if (!p) { goto mincore_fail; + } ret = get_errno(mincore(a, arg2, p)); unlock_user(p, arg3, ret); mincore_fail: diff --git a/linux-user/syscall_defs.h b/linux-user/syscall_defs.h index 72ca5b11d6..40c5027e93 100644 --- a/linux-user/syscall_defs.h +++ b/linux-user/syscall_defs.h @@ -164,6 +164,14 @@ struct target_sockaddr_in { sizeof(struct target_in_addr)]; }; +struct target_sockaddr_in6 { + uint16_t sin6_family; + uint16_t sin6_port; /* big endian */ + uint32_t sin6_flowinfo; /* big endian */ + struct in6_addr sin6_addr; /* IPv6 address, big endian */ + uint32_t sin6_scope_id; +}; + struct target_sock_filter { abi_ushort code; uint8_t jt; diff --git a/qapi/block-core.json b/qapi/block-core.json index 5f82d35fab..cf24c04242 100644 --- a/qapi/block-core.json +++ b/qapi/block-core.json @@ -2625,29 +2625,29 @@ ## # @BlockdevOptionsIscsi: # -# @transport The iscsi transport type +# @transport: The iscsi transport type # -# @portal The address of the iscsi portal +# @portal: The address of the iscsi portal # -# @target The target iqn name +# @target: The target iqn name # -# @lun #optional LUN to connect to. Defaults to 0. +# @lun: #optional LUN to connect to. Defaults to 0. # -# @user #optional User name to log in with. If omitted, no CHAP +# @user: #optional User name to log in with. If omitted, no CHAP # authentication is performed. # -# @password-secret #optional The ID of a QCryptoSecret object providing +# @password-secret: #optional The ID of a QCryptoSecret object providing # the password for the login. This option is required if # @user is specified. # -# @initiator-name #optional The iqn name we want to identify to the target +# @initiator-name: #optional The iqn name we want to identify to the target # as. If this option is not specified, an initiator name is # generated automatically. # -# @header-digest #optional The desired header digest. Defaults to +# @header-digest: #optional The desired header digest. Defaults to # none-crc32c. # -# @timeout #optional Timeout in seconds after which a request will +# @timeout: #optional Timeout in seconds after which a request will # timeout. 0 means no timeout and is the default. # # Driver specific block device options for iscsi diff --git a/qemu-options.hx b/qemu-options.hx index 82528804c3..c85f77d1d8 100644 --- a/qemu-options.hx +++ b/qemu-options.hx @@ -2151,7 +2151,7 @@ Example: @example qemu -m 512 -object memory-backend-file,id=mem,size=512M,mem-path=/hugetlbfs,share=on \ -numa node,memdev=mem \ - -chardev socket,path=/path/to/socket \ + -chardev socket,id=chr0,path=/path/to/socket \ -netdev type=vhost-user,id=net0,chardev=chr0 \ -device virtio-net-pci,netdev=net0 @end example diff --git a/target/alpha/cpu.h b/target/alpha/cpu.h index b08d1601d1..691ac00c0b 100644 --- a/target/alpha/cpu.h +++ b/target/alpha/cpu.h @@ -28,6 +28,9 @@ #define CPUArchState struct CPUAlphaState +/* Alpha processors have a weak memory model */ +#define TCG_GUEST_DEFAULT_MO (0) + #include "exec/cpu-defs.h" #include "fpu/softfloat.h" diff --git a/target/arm/cpu.c b/target/arm/cpu.c index f7157dc0e5..04b062cb7e 100644 --- a/target/arm/cpu.c +++ b/target/arm/cpu.c @@ -338,13 +338,6 @@ static bool arm_v7m_cpu_exec_interrupt(CPUState *cs, int interrupt_request) CPUARMState *env = &cpu->env; bool ret = false; - - if (interrupt_request & CPU_INTERRUPT_FIQ - && !(env->daif & PSTATE_F)) { - cs->exception_index = EXCP_FIQ; - cc->do_interrupt(cs); - ret = true; - } /* ARMv7-M interrupt return works by loading a magic value * into the PC. On real hardware the load causes the * return to occur. The qemu implementation performs the @@ -354,9 +347,16 @@ static bool arm_v7m_cpu_exec_interrupt(CPUState *cs, int interrupt_request) * the stack if an interrupt occurred at the wrong time. * We avoid this by disabling interrupts when * pc contains a magic address. + * + * ARMv7-M interrupt masking works differently than -A or -R. + * There is no FIQ/IRQ distinction. Instead of I and F bits + * masking FIQ and IRQ interrupts, an exception is taken only + * if it is higher priority than the current execution priority + * (which depends on state like BASEPRI, FAULTMASK and the + * currently active exception). */ if (interrupt_request & CPU_INTERRUPT_HARD - && !(env->daif & PSTATE_I) + && (armv7m_nvic_can_take_pending_exception(env->nvic)) && (env->regs[15] < 0xfffffff0)) { cs->exception_index = EXCP_IRQ; cc->do_interrupt(cs); diff --git a/target/arm/cpu.h b/target/arm/cpu.h index 38a8e00908..9e7b2dfc83 100644 --- a/target/arm/cpu.h +++ b/target/arm/cpu.h @@ -57,6 +57,7 @@ #define EXCP_VFIQ 15 #define EXCP_SEMIHOST 16 /* semihosting call */ #define EXCP_NOCP 17 /* v7M NOCP UsageFault */ +#define EXCP_INVSTATE 18 /* v7M INVSTATE UsageFault */ #define ARMV7M_EXCP_RESET 1 #define ARMV7M_EXCP_NMI 2 @@ -1356,9 +1357,27 @@ uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx, uint32_t cur_el, bool secure); /* Interface between CPU and Interrupt controller. */ +#ifndef CONFIG_USER_ONLY +bool armv7m_nvic_can_take_pending_exception(void *opaque); +#else +static inline bool armv7m_nvic_can_take_pending_exception(void *opaque) +{ + return true; +} +#endif void armv7m_nvic_set_pending(void *opaque, int irq); -int armv7m_nvic_acknowledge_irq(void *opaque); -void armv7m_nvic_complete_irq(void *opaque, int irq); +void armv7m_nvic_acknowledge_irq(void *opaque); +/** + * armv7m_nvic_complete_irq: complete specified interrupt or exception + * @opaque: the NVIC + * @irq: the exception number to complete + * + * Returns: -1 if the irq was not active + * 1 if completing this irq brought us back to base (no active irqs) + * 0 if there is still an irq active after this one was completed + * (Ignoring -1, this is the same as the RETTOBASE value before completion.) + */ +int armv7m_nvic_complete_irq(void *opaque, int irq); /* Interface for defining coprocessor registers. * Registers are defined in tables of arm_cp_reginfo structs diff --git a/target/arm/helper.c b/target/arm/helper.c index bcedb4a808..3f4211b572 100644 --- a/target/arm/helper.c +++ b/target/arm/helper.c @@ -6002,22 +6002,165 @@ static void switch_v7m_sp(CPUARMState *env, bool new_spsel) } } -static void do_v7m_exception_exit(CPUARMState *env) +static uint32_t arm_v7m_load_vector(ARMCPU *cpu) { + CPUState *cs = CPU(cpu); + CPUARMState *env = &cpu->env; + MemTxResult result; + hwaddr vec = env->v7m.vecbase + env->v7m.exception * 4; + uint32_t addr; + + addr = address_space_ldl(cs->as, vec, + MEMTXATTRS_UNSPECIFIED, &result); + if (result != MEMTX_OK) { + /* Architecturally this should cause a HardFault setting HSFR.VECTTBL, + * which would then be immediately followed by our failing to load + * the entry vector for that HardFault, which is a Lockup case. + * Since we don't model Lockup, we just report this guest error + * via cpu_abort(). + */ + cpu_abort(cs, "Failed to read from exception vector table " + "entry %08x\n", (unsigned)vec); + } + return addr; +} + +static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr) +{ + /* Do the "take the exception" parts of exception entry, + * but not the pushing of state to the stack. This is + * similar to the pseudocode ExceptionTaken() function. + */ + CPUARMState *env = &cpu->env; + uint32_t addr; + + armv7m_nvic_acknowledge_irq(env->nvic); + switch_v7m_sp(env, 0); + /* Clear IT bits */ + env->condexec_bits = 0; + env->regs[14] = lr; + addr = arm_v7m_load_vector(cpu); + env->regs[15] = addr & 0xfffffffe; + env->thumb = addr & 1; +} + +static void v7m_push_stack(ARMCPU *cpu) +{ + /* Do the "set up stack frame" part of exception entry, + * similar to pseudocode PushStack(). + */ + CPUARMState *env = &cpu->env; + uint32_t xpsr = xpsr_read(env); + + /* Align stack pointer if the guest wants that */ + if ((env->regs[13] & 4) && (env->v7m.ccr & R_V7M_CCR_STKALIGN_MASK)) { + env->regs[13] -= 4; + xpsr |= 0x200; + } + /* Switch to the handler mode. */ + v7m_push(env, xpsr); + v7m_push(env, env->regs[15]); + v7m_push(env, env->regs[14]); + v7m_push(env, env->regs[12]); + v7m_push(env, env->regs[3]); + v7m_push(env, env->regs[2]); + v7m_push(env, env->regs[1]); + v7m_push(env, env->regs[0]); +} + +static void do_v7m_exception_exit(ARMCPU *cpu) +{ + CPUARMState *env = &cpu->env; uint32_t type; uint32_t xpsr; - + bool ufault = false; + bool return_to_sp_process = false; + bool return_to_handler = false; + bool rettobase = false; + + /* We can only get here from an EXCP_EXCEPTION_EXIT, and + * arm_v7m_do_unassigned_access() enforces the architectural rule + * that jumps to magic addresses don't have magic behaviour unless + * we're in Handler mode (compare pseudocode BXWritePC()). + */ + assert(env->v7m.exception != 0); + + /* In the spec pseudocode ExceptionReturn() is called directly + * from BXWritePC() and gets the full target PC value including + * bit zero. In QEMU's implementation we treat it as a normal + * jump-to-register (which is then caught later on), and so split + * the target value up between env->regs[15] and env->thumb in + * gen_bx(). Reconstitute it. + */ type = env->regs[15]; + if (env->thumb) { + type |= 1; + } + + qemu_log_mask(CPU_LOG_INT, "Exception return: magic PC %" PRIx32 + " previous exception %d\n", + type, env->v7m.exception); + + if (extract32(type, 5, 23) != extract32(-1, 5, 23)) { + qemu_log_mask(LOG_GUEST_ERROR, "M profile: zero high bits in exception " + "exit PC value 0x%" PRIx32 " are UNPREDICTABLE\n", type); + } + if (env->v7m.exception != ARMV7M_EXCP_NMI) { /* Auto-clear FAULTMASK on return from other than NMI */ env->daif &= ~PSTATE_F; } - if (env->v7m.exception != 0) { - armv7m_nvic_complete_irq(env->nvic, env->v7m.exception); + + switch (armv7m_nvic_complete_irq(env->nvic, env->v7m.exception)) { + case -1: + /* attempt to exit an exception that isn't active */ + ufault = true; + break; + case 0: + /* still an irq active now */ + break; + case 1: + /* we returned to base exception level, no nesting. + * (In the pseudocode this is written using "NestedActivation != 1" + * where we have 'rettobase == false'.) + */ + rettobase = true; + break; + default: + g_assert_not_reached(); + } + + switch (type & 0xf) { + case 1: /* Return to Handler */ + return_to_handler = true; + break; + case 13: /* Return to Thread using Process stack */ + return_to_sp_process = true; + /* fall through */ + case 9: /* Return to Thread using Main stack */ + if (!rettobase && + !(env->v7m.ccr & R_V7M_CCR_NONBASETHRDENA_MASK)) { + ufault = true; + } + break; + default: + ufault = true; + } + + if (ufault) { + /* Bad exception return: instead of popping the exception + * stack, directly take a usage fault on the current stack. + */ + env->v7m.cfsr |= R_V7M_CFSR_INVPC_MASK; + armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE); + v7m_exception_taken(cpu, type | 0xf0000000); + qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing " + "stackframe: failed exception return integrity check\n"); + return; } /* Switch to the target stack. */ - switch_v7m_sp(env, (type & 4) != 0); + switch_v7m_sp(env, return_to_sp_process); /* Pop registers. */ env->regs[0] = v7m_pop(env); env->regs[1] = v7m_pop(env); @@ -6041,11 +6184,24 @@ static void do_v7m_exception_exit(CPUARMState *env) /* Undo stack alignment. */ if (xpsr & 0x200) env->regs[13] |= 4; - /* ??? The exception return type specifies Thread/Handler mode. However - this is also implied by the xPSR value. Not sure what to do - if there is a mismatch. */ - /* ??? Likewise for mismatches between the CONTROL register and the stack - pointer. */ + + /* The restored xPSR exception field will be zero if we're + * resuming in Thread mode. If that doesn't match what the + * exception return type specified then this is a UsageFault. + */ + if (return_to_handler == (env->v7m.exception == 0)) { + /* Take an INVPC UsageFault by pushing the stack again. */ + armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE); + env->v7m.cfsr |= R_V7M_CFSR_INVPC_MASK; + v7m_push_stack(cpu); + v7m_exception_taken(cpu, type | 0xf0000000); + qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on new stackframe: " + "failed exception return integrity check\n"); + return; + } + + /* Otherwise, we have a successful exception exit. */ + qemu_log_mask(CPU_LOG_INT, "...successful exception return\n"); } static void arm_log_exception(int idx) @@ -6063,37 +6219,11 @@ static void arm_log_exception(int idx) } } -static uint32_t arm_v7m_load_vector(ARMCPU *cpu) - -{ - CPUState *cs = CPU(cpu); - CPUARMState *env = &cpu->env; - MemTxResult result; - hwaddr vec = env->v7m.vecbase + env->v7m.exception * 4; - uint32_t addr; - - addr = address_space_ldl(cs->as, vec, - MEMTXATTRS_UNSPECIFIED, &result); - if (result != MEMTX_OK) { - /* Architecturally this should cause a HardFault setting HSFR.VECTTBL, - * which would then be immediately followed by our failing to load - * the entry vector for that HardFault, which is a Lockup case. - * Since we don't model Lockup, we just report this guest error - * via cpu_abort(). - */ - cpu_abort(cs, "Failed to read from exception vector table " - "entry %08x\n", (unsigned)vec); - } - return addr; -} - void arm_v7m_cpu_do_interrupt(CPUState *cs) { ARMCPU *cpu = ARM_CPU(cs); CPUARMState *env = &cpu->env; - uint32_t xpsr = xpsr_read(env); uint32_t lr; - uint32_t addr; arm_log_exception(cs->exception_index); @@ -6106,28 +6236,30 @@ void arm_v7m_cpu_do_interrupt(CPUState *cs) /* For exceptions we just mark as pending on the NVIC, and let that handle it. */ - /* TODO: Need to escalate if the current priority is higher than the - one we're raising. */ switch (cs->exception_index) { case EXCP_UDEF: armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE); env->v7m.cfsr |= R_V7M_CFSR_UNDEFINSTR_MASK; - return; + break; case EXCP_NOCP: armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE); env->v7m.cfsr |= R_V7M_CFSR_NOCP_MASK; - return; + break; + case EXCP_INVSTATE: + armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE); + env->v7m.cfsr |= R_V7M_CFSR_INVSTATE_MASK; + break; case EXCP_SWI: /* The PC already points to the next instruction. */ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SVC); - return; + break; case EXCP_PREFETCH_ABORT: case EXCP_DATA_ABORT: /* TODO: if we implemented the MPU registers, this is where we * should set the MMFAR, etc from exception.fsr and exception.vaddress. */ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM); - return; + break; case EXCP_BKPT: if (semihosting_enabled()) { int nr; @@ -6142,39 +6274,20 @@ void arm_v7m_cpu_do_interrupt(CPUState *cs) } } armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_DEBUG); - return; + break; case EXCP_IRQ: - env->v7m.exception = armv7m_nvic_acknowledge_irq(env->nvic); break; case EXCP_EXCEPTION_EXIT: - do_v7m_exception_exit(env); + do_v7m_exception_exit(cpu); return; default: cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index); return; /* Never happens. Keep compiler happy. */ } - /* Align stack pointer if the guest wants that */ - if ((env->regs[13] & 4) && (env->v7m.ccr & R_V7M_CCR_STKALIGN_MASK)) { - env->regs[13] -= 4; - xpsr |= 0x200; - } - /* Switch to the handler mode. */ - v7m_push(env, xpsr); - v7m_push(env, env->regs[15]); - v7m_push(env, env->regs[14]); - v7m_push(env, env->regs[12]); - v7m_push(env, env->regs[3]); - v7m_push(env, env->regs[2]); - v7m_push(env, env->regs[1]); - v7m_push(env, env->regs[0]); - switch_v7m_sp(env, 0); - /* Clear IT bits */ - env->condexec_bits = 0; - env->regs[14] = lr; - addr = arm_v7m_load_vector(cpu); - env->regs[15] = addr & 0xfffffffe; - env->thumb = addr & 1; + v7m_push_stack(cpu); + v7m_exception_taken(cpu, lr); + qemu_log_mask(CPU_LOG_INT, "... as %d\n", env->v7m.exception); } /* Function used to synchronize QEMU's AArch64 register set with AArch32 diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c index e15eae6d41..24de30d92c 100644 --- a/target/arm/translate-a64.c +++ b/target/arm/translate-a64.c @@ -10933,6 +10933,10 @@ static void disas_crypto_aes(DisasContext *s, uint32_t insn) return; } + if (!fp_access_check(s)) { + return; + } + /* Note that we convert the Vx register indexes into the * index within the vfp.regs[] array, so we can share the * helper with the AArch32 instructions. @@ -10997,6 +11001,10 @@ static void disas_crypto_three_reg_sha(DisasContext *s, uint32_t insn) return; } + if (!fp_access_check(s)) { + return; + } + tcg_rd_regno = tcg_const_i32(rd << 1); tcg_rn_regno = tcg_const_i32(rn << 1); tcg_rm_regno = tcg_const_i32(rm << 1); @@ -11060,6 +11068,10 @@ static void disas_crypto_two_reg_sha(DisasContext *s, uint32_t insn) return; } + if (!fp_access_check(s)) { + return; + } + tcg_rd_regno = tcg_const_i32(rd << 1); tcg_rn_regno = tcg_const_i32(rn << 1); diff --git a/target/arm/translate.c b/target/arm/translate.c index abc1f77ee4..b859f10755 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -7990,9 +7990,13 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn) TCGv_i32 addr; TCGv_i64 tmp64; - /* M variants do not implement ARM mode. */ + /* M variants do not implement ARM mode; this must raise the INVSTATE + * UsageFault exception. + */ if (arm_dc_feature(s, ARM_FEATURE_M)) { - goto illegal_op; + gen_exception_insn(s, 4, EXCP_INVSTATE, syn_uncategorized(), + default_exception_el(s)); + return; } cond = insn >> 28; if (cond == 0xf){ diff --git a/target/i386/cpu.h b/target/i386/cpu.h index 8df124f332..573f2aa988 100644 --- a/target/i386/cpu.h +++ b/target/i386/cpu.h @@ -1417,6 +1417,8 @@ floatx80 cpu_set_fp80(uint64_t mant, uint16_t upper); void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector); void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32); void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32); +void cpu_x86_fxsave(CPUX86State *s, target_ulong ptr); +void cpu_x86_fxrstor(CPUX86State *s, target_ulong ptr); /* you can call this signal handler from your SIGBUS and SIGSEGV signal handlers to inform the virtual CPU of exceptions. non zero diff --git a/target/i386/fpu_helper.c b/target/i386/fpu_helper.c index 66474ad98e..69ea33a5c2 100644 --- a/target/i386/fpu_helper.c +++ b/target/i386/fpu_helper.c @@ -1377,6 +1377,18 @@ void helper_fxrstor(CPUX86State *env, target_ulong ptr) } } +#if defined(CONFIG_USER_ONLY) +void cpu_x86_fxsave(CPUX86State *env, target_ulong ptr) +{ + helper_fxsave(env, ptr); +} + +void cpu_x86_fxrstor(CPUX86State *env, target_ulong ptr) +{ + helper_fxrstor(env, ptr); +} +#endif + void helper_xrstor(CPUX86State *env, target_ulong ptr, uint64_t rfbm) { uintptr_t ra = GETPC(); diff --git a/target/s390x/cpu_models.c b/target/s390x/cpu_models.c index 5b66d3325d..2a894eec65 100644 --- a/target/s390x/cpu_models.c +++ b/target/s390x/cpu_models.c @@ -671,7 +671,7 @@ static S390CPUModel *get_max_cpu_model(Error **errp) if (kvm_enabled()) { kvm_s390_get_host_cpu_model(&max_model, errp); } else { - /* TCG enulates a z900 */ + /* TCG emulates a z900 */ max_model.def = &s390_cpu_defs[0]; bitmap_copy(max_model.features, max_model.def->default_feat, S390_FEAT_MAX); diff --git a/tests/Makefile.include b/tests/Makefile.include index e60bb6ce58..3310c170a3 100644 --- a/tests/Makefile.include +++ b/tests/Makefile.include @@ -308,8 +308,7 @@ check-qtest-sparc-y = tests/prom-env-test$(EXESUF) check-qtest-sparc64-y = tests/endianness-test$(EXESUF) #check-qtest-sparc64-y += tests/m48t59-test$(EXESUF) #gcov-files-sparc64-y += hw/timer/m48t59.c -#Disabled for now, triggers a TCG bug on 32-bit hosts -#check-qtest-sparc64-y += tests/prom-env-test$(EXESUF) +check-qtest-sparc64-y += tests/prom-env-test$(EXESUF) check-qtest-arm-y = tests/tmp105-test$(EXESUF) check-qtest-arm-y += tests/ds1338-test$(EXESUF) diff --git a/tests/ide-test.c b/tests/ide-test.c index fb541f88b5..b57c2b1676 100644 --- a/tests/ide-test.c +++ b/tests/ide-test.c @@ -544,6 +544,7 @@ static void make_dirty(uint8_t device) guest_buf = guest_alloc(guest_malloc, len); buf = g_malloc(len); + memset(buf, rand() % 255 + 1, len); g_assert(guest_buf); g_assert(buf); diff --git a/tests/prom-env-test.c b/tests/prom-env-test.c index bd33bc353d..eac207b30e 100644 --- a/tests/prom-env-test.c +++ b/tests/prom-env-test.c @@ -76,7 +76,7 @@ static void add_tests(const char *machines[]) int main(int argc, char *argv[]) { const char *sparc_machines[] = { "SPARCbook", "Voyager", "SS-20", NULL }; - const char *sparc64_machines[] = { "sun4u", "sun4v", NULL }; + const char *sparc64_machines[] = { "sun4u", NULL }; const char *ppc_machines[] = { "mac99", "g3beige", NULL }; const char *ppc64_machines[] = { "mac99", "g3beige", "pseries", NULL }; const char *arch = qtest_get_arch(); |