aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--block/throttle-groups.c9
-rw-r--r--fpu/softfloat.c33
-rw-r--r--hmp.c2
-rw-r--r--include/block/throttle-groups.h5
-rw-r--r--include/fpu/softfloat-macros.h2
-rw-r--r--include/migration/register.h2
-rw-r--r--migration/channel.c11
-rw-r--r--migration/migration.c51
-rw-r--r--migration/migration.h12
-rw-r--r--migration/postcopy-ram.c1
-rw-r--r--migration/ram.c34
-rw-r--r--migration/ram.h4
-rw-r--r--migration/rdma.c1
-rw-r--r--migration/savevm.c5
-rw-r--r--qapi/migration.json5
-rwxr-xr-xscripts/archive-source.sh2
-rw-r--r--target/xtensa/Makefile.objs5
-rw-r--r--target/xtensa/cpu.h32
-rw-r--r--target/xtensa/dbg_helper.c129
-rw-r--r--target/xtensa/exc_helper.c258
-rw-r--r--target/xtensa/fpu_helper.c166
-rw-r--r--target/xtensa/helper.c635
-rw-r--r--target/xtensa/helper.h3
-rw-r--r--target/xtensa/mmu_helper.c818
-rw-r--r--target/xtensa/op_helper.c894
-rw-r--r--target/xtensa/overlay_tool.h1
-rw-r--r--target/xtensa/translate.c53
-rw-r--r--target/xtensa/win_helper.c222
-rw-r--r--tests/Makefile.include137
-rw-r--r--tests/fp/Makefile3
m---------tests/fp/berkeley-testfloat-30
-rw-r--r--tests/fp/fp-bench.c15
-rw-r--r--tests/fp/fp-test.c4
-rw-r--r--tests/fp/platform.h1
-rwxr-xr-xtests/qemu-iotests/23847
-rw-r--r--tests/qemu-iotests/238.out6
-rw-r--r--tests/qemu-iotests/group1
-rw-r--r--tests/test-vmstate.c50
-rw-r--r--util/main-loop.c8
-rw-r--r--util/qemu-coroutine-sleep.c27
40 files changed, 2084 insertions, 1610 deletions
diff --git a/block/throttle-groups.c b/block/throttle-groups.c
index 5d8213a443..a5a2037924 100644
--- a/block/throttle-groups.c
+++ b/block/throttle-groups.c
@@ -415,6 +415,9 @@ static void coroutine_fn throttle_group_restart_queue_entry(void *opaque)
}
g_free(data);
+
+ atomic_dec(&tgm->restart_pending);
+ aio_wait_kick();
}
static void throttle_group_restart_queue(ThrottleGroupMember *tgm, bool is_write)
@@ -430,6 +433,8 @@ static void throttle_group_restart_queue(ThrottleGroupMember *tgm, bool is_write
* be no timer pending on this tgm at this point */
assert(!timer_pending(tgm->throttle_timers.timers[is_write]));
+ atomic_inc(&tgm->restart_pending);
+
co = qemu_coroutine_create(throttle_group_restart_queue_entry, rd);
aio_co_enter(tgm->aio_context, co);
}
@@ -538,6 +543,7 @@ void throttle_group_register_tgm(ThrottleGroupMember *tgm,
tgm->throttle_state = ts;
tgm->aio_context = ctx;
+ atomic_set(&tgm->restart_pending, 0);
qemu_mutex_lock(&tg->lock);
/* If the ThrottleGroup is new set this ThrottleGroupMember as the token */
@@ -584,6 +590,9 @@ void throttle_group_unregister_tgm(ThrottleGroupMember *tgm)
return;
}
+ /* Wait for throttle_group_restart_queue_entry() coroutines to finish */
+ AIO_WAIT_WHILE(tgm->aio_context, atomic_read(&tgm->restart_pending) > 0);
+
qemu_mutex_lock(&tg->lock);
for (i = 0; i < 2; i++) {
assert(tgm->pending_reqs[i] == 0);
diff --git a/fpu/softfloat.c b/fpu/softfloat.c
index 59eac97d10..9132d7a0b0 100644
--- a/fpu/softfloat.c
+++ b/fpu/softfloat.c
@@ -1542,6 +1542,8 @@ soft_f64_muladd(float64 a, float64 b, float64 c, int flags,
return float64_round_pack_canonical(pr, status);
}
+static bool force_soft_fma;
+
float32 QEMU_FLATTEN
float32_muladd(float32 xa, float32 xb, float32 xc, int flags, float_status *s)
{
@@ -1562,6 +1564,11 @@ float32_muladd(float32 xa, float32 xb, float32 xc, int flags, float_status *s)
if (unlikely(!f32_is_zon3(ua, ub, uc))) {
goto soft;
}
+
+ if (unlikely(force_soft_fma)) {
+ goto soft;
+ }
+
/*
* When (a || b) == 0, there's no need to check for under/over flow,
* since we know the addend is (normal || 0) and the product is 0.
@@ -1623,6 +1630,11 @@ float64_muladd(float64 xa, float64 xb, float64 xc, int flags, float_status *s)
if (unlikely(!f64_is_zon3(ua, ub, uc))) {
goto soft;
}
+
+ if (unlikely(force_soft_fma)) {
+ goto soft;
+ }
+
/*
* When (a || b) == 0, there's no need to check for under/over flow,
* since we know the addend is (normal || 0) and the product is 0.
@@ -7974,3 +7986,24 @@ float128 float128_scalbn(float128 a, int n, float_status *status)
, status);
}
+
+static void __attribute__((constructor)) softfloat_init(void)
+{
+ union_float64 ua, ub, uc, ur;
+
+ if (QEMU_NO_HARDFLOAT) {
+ return;
+ }
+ /*
+ * Test that the host's FMA is not obviously broken. For example,
+ * glibc < 2.23 can perform an incorrect FMA on certain hosts; see
+ * https://sourceware.org/bugzilla/show_bug.cgi?id=13304
+ */
+ ua.s = 0x0020000000000001ULL;
+ ub.s = 0x3ca0000000000000ULL;
+ uc.s = 0x0020000000000000ULL;
+ ur.h = fma(ua.h, ub.h, uc.h);
+ if (ur.s != 0x0020000000000001ULL) {
+ force_soft_fma = true;
+ }
+}
diff --git a/hmp.c b/hmp.c
index 8da5fd8760..b2a2b1f84e 100644
--- a/hmp.c
+++ b/hmp.c
@@ -236,6 +236,8 @@ void hmp_info_migrate(Monitor *mon, const QDict *qdict)
info->ram->page_size >> 10);
monitor_printf(mon, "multifd bytes: %" PRIu64 " kbytes\n",
info->ram->multifd_bytes >> 10);
+ monitor_printf(mon, "pages-per-second: %" PRIu64 "\n",
+ info->ram->pages_per_second);
if (info->ram->dirty_pages_rate) {
monitor_printf(mon, "dirty pages rate: %" PRIu64 " pages\n",
diff --git a/include/block/throttle-groups.h b/include/block/throttle-groups.h
index e2fd0513c4..712a8e64b4 100644
--- a/include/block/throttle-groups.h
+++ b/include/block/throttle-groups.h
@@ -43,6 +43,11 @@ typedef struct ThrottleGroupMember {
*/
unsigned int io_limits_disabled;
+ /* Number of pending throttle_group_restart_queue_entry() coroutines.
+ * Accessed with atomic operations.
+ */
+ unsigned int restart_pending;
+
/* The following fields are protected by the ThrottleGroup lock.
* See the ThrottleGroup documentation for details.
* throttle_state tells us if I/O limits are configured. */
diff --git a/include/fpu/softfloat-macros.h b/include/fpu/softfloat-macros.h
index b1d772e6d4..bd5b6418e3 100644
--- a/include/fpu/softfloat-macros.h
+++ b/include/fpu/softfloat-macros.h
@@ -641,7 +641,7 @@ static inline uint64_t udiv_qrnnd(uint64_t *r, uint64_t n1,
uint64_t q;
asm("divq %4" : "=a"(q), "=d"(*r) : "0"(n0), "1"(n1), "rm"(d));
return q;
-#elif defined(__s390x__)
+#elif defined(__s390x__) && !defined(__clang__)
/* Need to use a TImode type to get an even register pair for DLGR. */
unsigned __int128 n = (unsigned __int128)n1 << 64 | n0;
asm("dlgr %0, %1" : "+r"(n) : "r"(d));
diff --git a/include/migration/register.h b/include/migration/register.h
index d287f4c317..3d0b9833c6 100644
--- a/include/migration/register.h
+++ b/include/migration/register.h
@@ -72,7 +72,7 @@ int register_savevm_live(DeviceState *dev,
const char *idstr,
int instance_id,
int version_id,
- SaveVMHandlers *ops,
+ const SaveVMHandlers *ops,
void *opaque);
void unregister_savevm(DeviceState *dev, const char *idstr, void *opaque);
diff --git a/migration/channel.c b/migration/channel.c
index 33e0e9b82f..20e4c8e2dc 100644
--- a/migration/channel.c
+++ b/migration/channel.c
@@ -30,6 +30,7 @@
void migration_channel_process_incoming(QIOChannel *ioc)
{
MigrationState *s = migrate_get_current();
+ Error *local_err = NULL;
trace_migration_set_incoming_channel(
ioc, object_get_typename(OBJECT(ioc)));
@@ -38,13 +39,13 @@ void migration_channel_process_incoming(QIOChannel *ioc)
*s->parameters.tls_creds &&
!object_dynamic_cast(OBJECT(ioc),
TYPE_QIO_CHANNEL_TLS)) {
- Error *local_err = NULL;
migration_tls_channel_process_incoming(s, ioc, &local_err);
- if (local_err) {
- error_report_err(local_err);
- }
} else {
- migration_ioc_process_incoming(ioc);
+ migration_ioc_process_incoming(ioc, &local_err);
+ }
+
+ if (local_err) {
+ error_report_err(local_err);
}
}
diff --git a/migration/migration.c b/migration/migration.c
index ffc4d9e556..37e06b76dc 100644
--- a/migration/migration.c
+++ b/migration/migration.c
@@ -438,15 +438,13 @@ static void process_incoming_migration_co(void *opaque)
/* Make sure all file formats flush their mutable metadata */
bdrv_invalidate_cache_all(&local_err);
if (local_err) {
- migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,
- MIGRATION_STATUS_FAILED);
error_report_err(local_err);
- exit(EXIT_FAILURE);
+ goto fail;
}
if (colo_init_ram_cache() < 0) {
error_report("Init ram cache failed");
- exit(EXIT_FAILURE);
+ goto fail;
}
qemu_thread_create(&mis->colo_incoming_thread, "COLO incoming",
@@ -461,20 +459,22 @@ static void process_incoming_migration_co(void *opaque)
}
if (ret < 0) {
- Error *local_err = NULL;
-
- migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,
- MIGRATION_STATUS_FAILED);
error_report("load of migration failed: %s", strerror(-ret));
- qemu_fclose(mis->from_src_file);
- if (multifd_load_cleanup(&local_err) != 0) {
- error_report_err(local_err);
- }
- exit(EXIT_FAILURE);
+ goto fail;
}
mis->bh = qemu_bh_new(process_incoming_migration_bh, mis);
qemu_bh_schedule(mis->bh);
mis->migration_incoming_co = NULL;
+ return;
+fail:
+ local_err = NULL;
+ migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,
+ MIGRATION_STATUS_FAILED);
+ qemu_fclose(mis->from_src_file);
+ if (multifd_load_cleanup(&local_err) != 0) {
+ error_report_err(local_err);
+ }
+ exit(EXIT_FAILURE);
}
static void migration_incoming_setup(QEMUFile *f)
@@ -541,7 +541,7 @@ void migration_fd_process_incoming(QEMUFile *f)
migration_incoming_process();
}
-void migration_ioc_process_incoming(QIOChannel *ioc)
+void migration_ioc_process_incoming(QIOChannel *ioc, Error **errp)
{
MigrationIncomingState *mis = migration_incoming_get_current();
bool start_migration;
@@ -563,9 +563,14 @@ void migration_ioc_process_incoming(QIOChannel *ioc)
*/
start_migration = !migrate_use_multifd();
} else {
+ Error *local_err = NULL;
/* Multiple connections */
assert(migrate_use_multifd());
- start_migration = multifd_recv_new_channel(ioc);
+ start_migration = multifd_recv_new_channel(ioc, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ return;
+ }
}
if (start_migration) {
@@ -777,6 +782,7 @@ static void populate_ram_info(MigrationInfo *info, MigrationState *s)
info->ram->postcopy_requests = ram_counters.postcopy_requests;
info->ram->page_size = qemu_target_page_size();
info->ram->multifd_bytes = ram_counters.multifd_bytes;
+ info->ram->pages_per_second = s->pages_per_second;
if (migrate_use_xbzrle()) {
info->has_xbzrle_cache = true;
@@ -1381,7 +1387,6 @@ static void migrate_fd_cleanup(void *opaque)
qemu_savevm_state_cleanup();
if (s->to_dst_file) {
- Error *local_err = NULL;
QEMUFile *tmp;
trace_migrate_fd_cleanup();
@@ -1392,9 +1397,7 @@ static void migrate_fd_cleanup(void *opaque)
}
qemu_mutex_lock_iothread();
- if (multifd_save_cleanup(&local_err) != 0) {
- error_report_err(local_err);
- }
+ multifd_save_cleanup();
qemu_mutex_lock(&s->qemu_file_lock);
tmp = s->to_dst_file;
s->to_dst_file = NULL;
@@ -1563,6 +1566,7 @@ void migrate_init(MigrationState *s)
s->rp_state.from_dst_file = NULL;
s->rp_state.error = false;
s->mbps = 0.0;
+ s->pages_per_second = 0.0;
s->downtime = 0;
s->expected_downtime = 0;
s->setup_time = 0;
@@ -2881,7 +2885,7 @@ static void migration_calculate_complete(MigrationState *s)
static void migration_update_counters(MigrationState *s,
int64_t current_time)
{
- uint64_t transferred, time_spent;
+ uint64_t transferred, transferred_pages, time_spent;
uint64_t current_bytes; /* bytes transferred since the beginning */
double bandwidth;
@@ -2898,6 +2902,11 @@ static void migration_update_counters(MigrationState *s,
s->mbps = (((double) transferred * 8.0) /
((double) time_spent / 1000.0)) / 1000.0 / 1000.0;
+ transferred_pages = ram_get_total_transferred_pages() -
+ s->iteration_initial_pages;
+ s->pages_per_second = (double) transferred_pages /
+ (((double) time_spent / 1000.0));
+
/*
* if we haven't sent anything, we don't want to
* recalculate. 10000 is a small enough number for our purposes
@@ -2910,6 +2919,7 @@ static void migration_update_counters(MigrationState *s,
s->iteration_start_time = current_time;
s->iteration_initial_bytes = current_bytes;
+ s->iteration_initial_pages = ram_get_total_transferred_pages();
trace_migrate_transferred(transferred, time_spent,
bandwidth, s->threshold_size);
@@ -3314,6 +3324,7 @@ static void migration_instance_init(Object *obj)
ms->state = MIGRATION_STATUS_NONE;
ms->mbps = -1;
+ ms->pages_per_second = -1;
qemu_sem_init(&ms->pause_sem, 0);
qemu_mutex_init(&ms->error_mutex);
diff --git a/migration/migration.h b/migration/migration.h
index e413d4d8b6..dcd05d9f87 100644
--- a/migration/migration.h
+++ b/migration/migration.h
@@ -126,7 +126,13 @@ struct MigrationState
*/
QemuSemaphore rate_limit_sem;
- /* bytes already send at the beggining of current interation */
+ /* pages already send at the beginning of current iteration */
+ uint64_t iteration_initial_pages;
+
+ /* pages transferred per second */
+ double pages_per_second;
+
+ /* bytes already send at the beginning of current iteration */
uint64_t iteration_initial_bytes;
/* time at the start of current iteration */
int64_t iteration_start_time;
@@ -229,7 +235,7 @@ struct MigrationState
void migrate_set_state(int *state, int old_state, int new_state);
void migration_fd_process_incoming(QEMUFile *f);
-void migration_ioc_process_incoming(QIOChannel *ioc);
+void migration_ioc_process_incoming(QIOChannel *ioc, Error **errp);
void migration_incoming_process(void);
bool migration_has_all_channels(void);
@@ -271,6 +277,8 @@ bool migrate_use_block_incremental(void);
int migrate_max_cpu_throttle(void);
bool migrate_use_return_path(void);
+uint64_t ram_get_total_transferred_pages(void);
+
bool migrate_use_compression(void);
int migrate_compress_level(void);
int migrate_compress_threads(void);
diff --git a/migration/postcopy-ram.c b/migration/postcopy-ram.c
index e5c02a32c5..fa09dba534 100644
--- a/migration/postcopy-ram.c
+++ b/migration/postcopy-ram.c
@@ -1117,6 +1117,7 @@ int postcopy_ram_enable_notify(MigrationIncomingState *mis)
/* Mark so that we get notified of accesses to unwritten areas */
if (qemu_ram_foreach_migratable_block(ram_block_enable_notify, mis)) {
+ error_report("ram_block_enable_notify failed");
return -1;
}
diff --git a/migration/ram.c b/migration/ram.c
index 1849979fed..59191c1ed2 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -917,13 +917,12 @@ static void multifd_send_terminate_threads(Error *err)
}
}
-int multifd_save_cleanup(Error **errp)
+void multifd_save_cleanup(void)
{
int i;
- int ret = 0;
if (!migrate_use_multifd()) {
- return 0;
+ return;
}
multifd_send_terminate_threads(NULL);
for (i = 0; i < migrate_multifd_channels(); i++) {
@@ -953,7 +952,6 @@ int multifd_save_cleanup(Error **errp)
multifd_send_state->pages = NULL;
g_free(multifd_send_state);
multifd_send_state = NULL;
- return ret;
}
static void multifd_send_sync_main(void)
@@ -1071,9 +1069,8 @@ static void multifd_new_send_channel_async(QIOTask *task, gpointer opaque)
Error *local_err = NULL;
if (qio_task_propagate_error(task, &local_err)) {
- if (multifd_save_cleanup(&local_err) != 0) {
- migrate_set_error(migrate_get_current(), local_err);
- }
+ migrate_set_error(migrate_get_current(), local_err);
+ multifd_save_cleanup();
} else {
p->c = QIO_CHANNEL(sioc);
qio_channel_set_delay(p->c, false);
@@ -1322,8 +1319,13 @@ bool multifd_recv_all_channels_created(void)
return thread_count == atomic_read(&multifd_recv_state->count);
}
-/* Return true if multifd is ready for the migration, otherwise false */
-bool multifd_recv_new_channel(QIOChannel *ioc)
+/*
+ * Try to receive all multifd channels to get ready for the migration.
+ * - Return true and do not set @errp when correctly receving all channels;
+ * - Return false and do not set @errp when correctly receiving the current one;
+ * - Return false and set @errp when failing to receive the current channel.
+ */
+bool multifd_recv_new_channel(QIOChannel *ioc, Error **errp)
{
MultiFDRecvParams *p;
Error *local_err = NULL;
@@ -1332,6 +1334,10 @@ bool multifd_recv_new_channel(QIOChannel *ioc)
id = multifd_recv_initial_packet(ioc, &local_err);
if (id < 0) {
multifd_recv_terminate_threads(local_err);
+ error_propagate_prepend(errp, local_err,
+ "failed to receive packet"
+ " via multifd channel %d: ",
+ atomic_read(&multifd_recv_state->count));
return false;
}
@@ -1340,6 +1346,7 @@ bool multifd_recv_new_channel(QIOChannel *ioc)
error_setg(&local_err, "multifd: received id '%d' already setup'",
id);
multifd_recv_terminate_threads(local_err);
+ error_propagate(errp, local_err);
return false;
}
p->c = ioc;
@@ -1351,7 +1358,8 @@ bool multifd_recv_new_channel(QIOChannel *ioc)
qemu_thread_create(&p->thread, p->name, multifd_recv_thread, p,
QEMU_THREAD_JOINABLE);
atomic_inc(&multifd_recv_state->count);
- return multifd_recv_state->count == migrate_multifd_channels();
+ return atomic_read(&multifd_recv_state->count) ==
+ migrate_multifd_channels();
}
/**
@@ -1593,6 +1601,12 @@ uint64_t ram_pagesize_summary(void)
return summary;
}
+uint64_t ram_get_total_transferred_pages(void)
+{
+ return ram_counters.normal + ram_counters.duplicate +
+ compression_counters.pages + xbzrle_counters.pages;
+}
+
static void migration_update_rates(RAMState *rs, int64_t end_time)
{
uint64_t page_count = rs->target_page_count - rs->target_page_count_prev;
diff --git a/migration/ram.h b/migration/ram.h
index 83ff1bc11a..936177b3e9 100644
--- a/migration/ram.h
+++ b/migration/ram.h
@@ -43,11 +43,11 @@ uint64_t ram_bytes_remaining(void);
uint64_t ram_bytes_total(void);
int multifd_save_setup(void);
-int multifd_save_cleanup(Error **errp);
+void multifd_save_cleanup(void);
int multifd_load_setup(void);
int multifd_load_cleanup(Error **errp);
bool multifd_recv_all_channels_created(void);
-bool multifd_recv_new_channel(QIOChannel *ioc);
+bool multifd_recv_new_channel(QIOChannel *ioc, Error **errp);
uint64_t ram_pagesize_summary(void);
int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len);
diff --git a/migration/rdma.c b/migration/rdma.c
index 9b2e7e10aa..54a3c11540 100644
--- a/migration/rdma.c
+++ b/migration/rdma.c
@@ -2321,6 +2321,7 @@ static void qemu_rdma_cleanup(RDMAContext *rdma)
rdma->connected = false;
}
+ qemu_set_fd_handler(rdma->channel->fd, NULL, NULL, NULL);
g_free(rdma->dest_blocks);
rdma->dest_blocks = NULL;
diff --git a/migration/savevm.c b/migration/savevm.c
index 9e45fb4f3f..322660438d 100644
--- a/migration/savevm.c
+++ b/migration/savevm.c
@@ -303,7 +303,7 @@ typedef struct SaveStateEntry {
int section_id;
/* section id read from the stream */
int load_section_id;
- SaveVMHandlers *ops;
+ const SaveVMHandlers *ops;
const VMStateDescription *vmsd;
void *opaque;
CompatEntry *compat;
@@ -614,7 +614,7 @@ int register_savevm_live(DeviceState *dev,
const char *idstr,
int instance_id,
int version_id,
- SaveVMHandlers *ops,
+ const SaveVMHandlers *ops,
void *opaque)
{
SaveStateEntry *se;
@@ -1729,6 +1729,7 @@ static int loadvm_postcopy_handle_listen(MigrationIncomingState *mis)
*/
if (migrate_postcopy_ram()) {
if (postcopy_ram_enable_notify(mis)) {
+ postcopy_ram_incoming_cleanup(mis);
return -1;
}
}
diff --git a/qapi/migration.json b/qapi/migration.json
index 31b589ec26..7a795ecc16 100644
--- a/qapi/migration.json
+++ b/qapi/migration.json
@@ -41,6 +41,9 @@
#
# @multifd-bytes: The number of bytes sent through multifd (since 3.0)
#
+# @pages-per-second: the number of memory pages transferred per second
+# (Since 4.0)
+#
# Since: 0.14.0
##
{ 'struct': 'MigrationStats',
@@ -49,7 +52,7 @@
'normal-bytes': 'int', 'dirty-pages-rate' : 'int',
'mbps' : 'number', 'dirty-sync-count' : 'int',
'postcopy-requests' : 'int', 'page-size' : 'int',
- 'multifd-bytes' : 'uint64' } }
+ 'multifd-bytes' : 'uint64', 'pages-per-second' : 'uint64' } }
##
# @XBZRLECacheStats:
diff --git a/scripts/archive-source.sh b/scripts/archive-source.sh
index 62bd22578b..6eed2a29bd 100755
--- a/scripts/archive-source.sh
+++ b/scripts/archive-source.sh
@@ -26,7 +26,7 @@ vroot_dir="${tar_file}.vroot"
# independent of what the developer currently has initialized
# in their checkout, because the build environment is completely
# different to the host OS.
-submodules="dtc ui/keycodemapdb"
+submodules="dtc ui/keycodemapdb tests/fp/berkeley-softfloat-3 tests/fp/berkeley-testfloat-3"
trap "status=$?; rm -rf \"$list_file\" \"$vroot_dir\"; exit \$status" 0 1 2 3 15
diff --git a/target/xtensa/Makefile.objs b/target/xtensa/Makefile.objs
index f63352cc04..808f7e3fce 100644
--- a/target/xtensa/Makefile.objs
+++ b/target/xtensa/Makefile.objs
@@ -7,4 +7,9 @@ obj-y += core-test_kc705_be.o
obj-$(CONFIG_SOFTMMU) += monitor.o xtensa-semi.o
obj-y += xtensa-isa.o
obj-y += translate.o op_helper.o helper.o cpu.o
+obj-$(CONFIG_SOFTMMU) += dbg_helper.o
+obj-y += exc_helper.o
+obj-y += fpu_helper.o
obj-y += gdbstub.o
+obj-$(CONFIG_SOFTMMU) += mmu_helper.o
+obj-y += win_helper.o
diff --git a/target/xtensa/cpu.h b/target/xtensa/cpu.h
index 34e5ccd9f1..bf6f9a09b6 100644
--- a/target/xtensa/cpu.h
+++ b/target/xtensa/cpu.h
@@ -400,6 +400,7 @@ struct XtensaConfig {
int excm_level;
int ndepc;
unsigned inst_fetch_width;
+ unsigned max_insn_size;
uint32_t vecbase;
uint32_t exception_vector[EXC_MAX];
unsigned ninterrupt;
@@ -695,6 +696,11 @@ static inline int cpu_mmu_index(CPUXtensaState *env, bool ifetch)
#define XTENSA_TBFLAG_CALLINC_MASK 0x180000
#define XTENSA_TBFLAG_CALLINC_SHIFT 19
+#define XTENSA_CSBASE_LEND_MASK 0x0000ffff
+#define XTENSA_CSBASE_LEND_SHIFT 0
+#define XTENSA_CSBASE_LBEG_OFF_MASK 0x00ff0000
+#define XTENSA_CSBASE_LBEG_OFF_SHIFT 16
+
static inline void cpu_get_tb_cpu_state(CPUXtensaState *env, target_ulong *pc,
target_ulong *cs_base, uint32_t *flags)
{
@@ -706,6 +712,32 @@ static inline void cpu_get_tb_cpu_state(CPUXtensaState *env, target_ulong *pc,
*flags |= xtensa_get_ring(env);
if (env->sregs[PS] & PS_EXCM) {
*flags |= XTENSA_TBFLAG_EXCM;
+ } else if (xtensa_option_enabled(env->config, XTENSA_OPTION_LOOP)) {
+ target_ulong lend_dist =
+ env->sregs[LEND] - (env->pc & -(1u << TARGET_PAGE_BITS));
+
+ /*
+ * 0 in the csbase_lend field means that there may not be a loopback
+ * for any instruction that starts inside this page. Any other value
+ * means that an instruction that ends at this offset from the page
+ * start may loop back and will need loopback code to be generated.
+ *
+ * lend_dist is 0 when LEND points to the start of the page, but
+ * no instruction that starts inside this page may end at offset 0,
+ * so it's still correct.
+ *
+ * When an instruction ends at a page boundary it may only start in
+ * the previous page. lend_dist will be encoded as TARGET_PAGE_SIZE
+ * for the TB that contains this instruction.
+ */
+ if (lend_dist < (1u << TARGET_PAGE_BITS) + env->config->max_insn_size) {
+ target_ulong lbeg_off = env->sregs[LEND] - env->sregs[LBEG];
+
+ *cs_base = lend_dist;
+ if (lbeg_off < 256) {
+ *cs_base |= lbeg_off << XTENSA_CSBASE_LBEG_OFF_SHIFT;
+ }
+ }
}
if (xtensa_option_enabled(env->config, XTENSA_OPTION_EXTENDED_L32R) &&
(env->sregs[LITBASE] & 1)) {
diff --git a/target/xtensa/dbg_helper.c b/target/xtensa/dbg_helper.c
new file mode 100644
index 0000000000..cd8fbd653a
--- /dev/null
+++ b/target/xtensa/dbg_helper.c
@@ -0,0 +1,129 @@
+/*
+ * Copyright (c) 2011 - 2019, Max Filippov, Open Source and Linux Lab.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the Open Source and Linux Lab nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/main-loop.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "qemu/host-utils.h"
+#include "exec/exec-all.h"
+#include "exec/address-spaces.h"
+
+static void tb_invalidate_virtual_addr(CPUXtensaState *env, uint32_t vaddr)
+{
+ uint32_t paddr;
+ uint32_t page_size;
+ unsigned access;
+ int ret = xtensa_get_physical_addr(env, false, vaddr, 2, 0,
+ &paddr, &page_size, &access);
+ if (ret == 0) {
+ tb_invalidate_phys_addr(&address_space_memory, paddr,
+ MEMTXATTRS_UNSPECIFIED);
+ }
+}
+
+void HELPER(wsr_ibreakenable)(CPUXtensaState *env, uint32_t v)
+{
+ uint32_t change = v ^ env->sregs[IBREAKENABLE];
+ unsigned i;
+
+ for (i = 0; i < env->config->nibreak; ++i) {
+ if (change & (1 << i)) {
+ tb_invalidate_virtual_addr(env, env->sregs[IBREAKA + i]);
+ }
+ }
+ env->sregs[IBREAKENABLE] = v & ((1 << env->config->nibreak) - 1);
+}
+
+void HELPER(wsr_ibreaka)(CPUXtensaState *env, uint32_t i, uint32_t v)
+{
+ if (env->sregs[IBREAKENABLE] & (1 << i) && env->sregs[IBREAKA + i] != v) {
+ tb_invalidate_virtual_addr(env, env->sregs[IBREAKA + i]);
+ tb_invalidate_virtual_addr(env, v);
+ }
+ env->sregs[IBREAKA + i] = v;
+}
+
+static void set_dbreak(CPUXtensaState *env, unsigned i, uint32_t dbreaka,
+ uint32_t dbreakc)
+{
+ CPUState *cs = CPU(xtensa_env_get_cpu(env));
+ int flags = BP_CPU | BP_STOP_BEFORE_ACCESS;
+ uint32_t mask = dbreakc | ~DBREAKC_MASK;
+
+ if (env->cpu_watchpoint[i]) {
+ cpu_watchpoint_remove_by_ref(cs, env->cpu_watchpoint[i]);
+ }
+ if (dbreakc & DBREAKC_SB) {
+ flags |= BP_MEM_WRITE;
+ }
+ if (dbreakc & DBREAKC_LB) {
+ flags |= BP_MEM_READ;
+ }
+ /* contiguous mask after inversion is one less than some power of 2 */
+ if ((~mask + 1) & ~mask) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "DBREAKC mask is not contiguous: 0x%08x\n", dbreakc);
+ /* cut mask after the first zero bit */
+ mask = 0xffffffff << (32 - clo32(mask));
+ }
+ if (cpu_watchpoint_insert(cs, dbreaka & mask, ~mask + 1,
+ flags, &env->cpu_watchpoint[i])) {
+ env->cpu_watchpoint[i] = NULL;
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "Failed to set data breakpoint at 0x%08x/%d\n",
+ dbreaka & mask, ~mask + 1);
+ }
+}
+
+void HELPER(wsr_dbreaka)(CPUXtensaState *env, uint32_t i, uint32_t v)
+{
+ uint32_t dbreakc = env->sregs[DBREAKC + i];
+
+ if ((dbreakc & DBREAKC_SB_LB) &&
+ env->sregs[DBREAKA + i] != v) {
+ set_dbreak(env, i, v, dbreakc);
+ }
+ env->sregs[DBREAKA + i] = v;
+}
+
+void HELPER(wsr_dbreakc)(CPUXtensaState *env, uint32_t i, uint32_t v)
+{
+ if ((env->sregs[DBREAKC + i] ^ v) & (DBREAKC_SB_LB | DBREAKC_MASK)) {
+ if (v & DBREAKC_SB_LB) {
+ set_dbreak(env, i, env->sregs[DBREAKA + i], v);
+ } else {
+ if (env->cpu_watchpoint[i]) {
+ CPUState *cs = CPU(xtensa_env_get_cpu(env));
+
+ cpu_watchpoint_remove_by_ref(cs, env->cpu_watchpoint[i]);
+ env->cpu_watchpoint[i] = NULL;
+ }
+ }
+ }
+ env->sregs[DBREAKC + i] = v;
+}
diff --git a/target/xtensa/exc_helper.c b/target/xtensa/exc_helper.c
new file mode 100644
index 0000000000..371a32ba5a
--- /dev/null
+++ b/target/xtensa/exc_helper.c
@@ -0,0 +1,258 @@
+/*
+ * Copyright (c) 2011 - 2019, Max Filippov, Open Source and Linux Lab.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the Open Source and Linux Lab nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/main-loop.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "qemu/host-utils.h"
+#include "exec/exec-all.h"
+
+void HELPER(exception)(CPUXtensaState *env, uint32_t excp)
+{
+ CPUState *cs = CPU(xtensa_env_get_cpu(env));
+
+ cs->exception_index = excp;
+ if (excp == EXCP_YIELD) {
+ env->yield_needed = 0;
+ }
+ if (excp == EXCP_DEBUG) {
+ env->exception_taken = 0;
+ }
+ cpu_loop_exit(cs);
+}
+
+void HELPER(exception_cause)(CPUXtensaState *env, uint32_t pc, uint32_t cause)
+{
+ uint32_t vector;
+
+ env->pc = pc;
+ if (env->sregs[PS] & PS_EXCM) {
+ if (env->config->ndepc) {
+ env->sregs[DEPC] = pc;
+ } else {
+ env->sregs[EPC1] = pc;
+ }
+ vector = EXC_DOUBLE;
+ } else {
+ env->sregs[EPC1] = pc;
+ vector = (env->sregs[PS] & PS_UM) ? EXC_USER : EXC_KERNEL;
+ }
+
+ env->sregs[EXCCAUSE] = cause;
+ env->sregs[PS] |= PS_EXCM;
+
+ HELPER(exception)(env, vector);
+}
+
+void HELPER(exception_cause_vaddr)(CPUXtensaState *env,
+ uint32_t pc, uint32_t cause, uint32_t vaddr)
+{
+ env->sregs[EXCVADDR] = vaddr;
+ HELPER(exception_cause)(env, pc, cause);
+}
+
+void debug_exception_env(CPUXtensaState *env, uint32_t cause)
+{
+ if (xtensa_get_cintlevel(env) < env->config->debug_level) {
+ HELPER(debug_exception)(env, env->pc, cause);
+ }
+}
+
+void HELPER(debug_exception)(CPUXtensaState *env, uint32_t pc, uint32_t cause)
+{
+ unsigned level = env->config->debug_level;
+
+ env->pc = pc;
+ env->sregs[DEBUGCAUSE] = cause;
+ env->sregs[EPC1 + level - 1] = pc;
+ env->sregs[EPS2 + level - 2] = env->sregs[PS];
+ env->sregs[PS] = (env->sregs[PS] & ~PS_INTLEVEL) | PS_EXCM |
+ (level << PS_INTLEVEL_SHIFT);
+ HELPER(exception)(env, EXC_DEBUG);
+}
+
+#ifndef CONFIG_USER_ONLY
+
+void HELPER(waiti)(CPUXtensaState *env, uint32_t pc, uint32_t intlevel)
+{
+ CPUState *cpu;
+
+ env->pc = pc;
+ env->sregs[PS] = (env->sregs[PS] & ~PS_INTLEVEL) |
+ (intlevel << PS_INTLEVEL_SHIFT);
+
+ qemu_mutex_lock_iothread();
+ check_interrupts(env);
+ qemu_mutex_unlock_iothread();
+
+ if (env->pending_irq_level) {
+ cpu_loop_exit(CPU(xtensa_env_get_cpu(env)));
+ return;
+ }
+
+ cpu = CPU(xtensa_env_get_cpu(env));
+ cpu->halted = 1;
+ HELPER(exception)(env, EXCP_HLT);
+}
+
+void HELPER(check_interrupts)(CPUXtensaState *env)
+{
+ qemu_mutex_lock_iothread();
+ check_interrupts(env);
+ qemu_mutex_unlock_iothread();
+}
+
+static uint32_t relocated_vector(CPUXtensaState *env, uint32_t vector)
+{
+ if (xtensa_option_enabled(env->config,
+ XTENSA_OPTION_RELOCATABLE_VECTOR)) {
+ return vector - env->config->vecbase + env->sregs[VECBASE];
+ } else {
+ return vector;
+ }
+}
+
+/*!
+ * Handle penging IRQ.
+ * For the high priority interrupt jump to the corresponding interrupt vector.
+ * For the level-1 interrupt convert it to either user, kernel or double
+ * exception with the 'level-1 interrupt' exception cause.
+ */
+static void handle_interrupt(CPUXtensaState *env)
+{
+ int level = env->pending_irq_level;
+
+ if (level > xtensa_get_cintlevel(env) &&
+ level <= env->config->nlevel &&
+ (env->config->level_mask[level] &
+ env->sregs[INTSET] &
+ env->sregs[INTENABLE])) {
+ CPUState *cs = CPU(xtensa_env_get_cpu(env));
+
+ if (level > 1) {
+ env->sregs[EPC1 + level - 1] = env->pc;
+ env->sregs[EPS2 + level - 2] = env->sregs[PS];
+ env->sregs[PS] =
+ (env->sregs[PS] & ~PS_INTLEVEL) | level | PS_EXCM;
+ env->pc = relocated_vector(env,
+ env->config->interrupt_vector[level]);
+ } else {
+ env->sregs[EXCCAUSE] = LEVEL1_INTERRUPT_CAUSE;
+
+ if (env->sregs[PS] & PS_EXCM) {
+ if (env->config->ndepc) {
+ env->sregs[DEPC] = env->pc;
+ } else {
+ env->sregs[EPC1] = env->pc;
+ }
+ cs->exception_index = EXC_DOUBLE;
+ } else {
+ env->sregs[EPC1] = env->pc;
+ cs->exception_index =
+ (env->sregs[PS] & PS_UM) ? EXC_USER : EXC_KERNEL;
+ }
+ env->sregs[PS] |= PS_EXCM;
+ }
+ env->exception_taken = 1;
+ }
+}
+
+/* Called from cpu_handle_interrupt with BQL held */
+void xtensa_cpu_do_interrupt(CPUState *cs)
+{
+ XtensaCPU *cpu = XTENSA_CPU(cs);
+ CPUXtensaState *env = &cpu->env;
+
+ if (cs->exception_index == EXC_IRQ) {
+ qemu_log_mask(CPU_LOG_INT,
+ "%s(EXC_IRQ) level = %d, cintlevel = %d, "
+ "pc = %08x, a0 = %08x, ps = %08x, "
+ "intset = %08x, intenable = %08x, "
+ "ccount = %08x\n",
+ __func__, env->pending_irq_level,
+ xtensa_get_cintlevel(env),
+ env->pc, env->regs[0], env->sregs[PS],
+ env->sregs[INTSET], env->sregs[INTENABLE],
+ env->sregs[CCOUNT]);
+ handle_interrupt(env);
+ }
+
+ switch (cs->exception_index) {
+ case EXC_WINDOW_OVERFLOW4:
+ case EXC_WINDOW_UNDERFLOW4:
+ case EXC_WINDOW_OVERFLOW8:
+ case EXC_WINDOW_UNDERFLOW8:
+ case EXC_WINDOW_OVERFLOW12:
+ case EXC_WINDOW_UNDERFLOW12:
+ case EXC_KERNEL:
+ case EXC_USER:
+ case EXC_DOUBLE:
+ case EXC_DEBUG:
+ qemu_log_mask(CPU_LOG_INT, "%s(%d) "
+ "pc = %08x, a0 = %08x, ps = %08x, ccount = %08x\n",
+ __func__, cs->exception_index,
+ env->pc, env->regs[0], env->sregs[PS],
+ env->sregs[CCOUNT]);
+ if (env->config->exception_vector[cs->exception_index]) {
+ uint32_t vector;
+
+ vector = env->config->exception_vector[cs->exception_index];
+ env->pc = relocated_vector(env, vector);
+ env->exception_taken = 1;
+ } else {
+ qemu_log_mask(CPU_LOG_INT,
+ "%s(pc = %08x) bad exception_index: %d\n",
+ __func__, env->pc, cs->exception_index);
+ }
+ break;
+
+ case EXC_IRQ:
+ break;
+
+ default:
+ qemu_log("%s(pc = %08x) unknown exception_index: %d\n",
+ __func__, env->pc, cs->exception_index);
+ break;
+ }
+ check_interrupts(env);
+}
+#else
+void xtensa_cpu_do_interrupt(CPUState *cs)
+{
+}
+#endif
+
+bool xtensa_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
+{
+ if (interrupt_request & CPU_INTERRUPT_HARD) {
+ cs->exception_index = EXC_IRQ;
+ xtensa_cpu_do_interrupt(cs);
+ return true;
+ }
+ return false;
+}
diff --git a/target/xtensa/fpu_helper.c b/target/xtensa/fpu_helper.c
new file mode 100644
index 0000000000..f8bbb6cdd8
--- /dev/null
+++ b/target/xtensa/fpu_helper.c
@@ -0,0 +1,166 @@
+/*
+ * Copyright (c) 2011 - 2019, Max Filippov, Open Source and Linux Lab.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the Open Source and Linux Lab nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/main-loop.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "qemu/host-utils.h"
+#include "exec/exec-all.h"
+#include "fpu/softfloat.h"
+
+void HELPER(wur_fcr)(CPUXtensaState *env, uint32_t v)
+{
+ static const int rounding_mode[] = {
+ float_round_nearest_even,
+ float_round_to_zero,
+ float_round_up,
+ float_round_down,
+ };
+
+ env->uregs[FCR] = v & 0xfffff07f;
+ set_float_rounding_mode(rounding_mode[v & 3], &env->fp_status);
+}
+
+float32 HELPER(abs_s)(float32 v)
+{
+ return float32_abs(v);
+}
+
+float32 HELPER(neg_s)(float32 v)
+{
+ return float32_chs(v);
+}
+
+float32 HELPER(add_s)(CPUXtensaState *env, float32 a, float32 b)
+{
+ return float32_add(a, b, &env->fp_status);
+}
+
+float32 HELPER(sub_s)(CPUXtensaState *env, float32 a, float32 b)
+{
+ return float32_sub(a, b, &env->fp_status);
+}
+
+float32 HELPER(mul_s)(CPUXtensaState *env, float32 a, float32 b)
+{
+ return float32_mul(a, b, &env->fp_status);
+}
+
+float32 HELPER(madd_s)(CPUXtensaState *env, float32 a, float32 b, float32 c)
+{
+ return float32_muladd(b, c, a, 0, &env->fp_status);
+}
+
+float32 HELPER(msub_s)(CPUXtensaState *env, float32 a, float32 b, float32 c)
+{
+ return float32_muladd(b, c, a, float_muladd_negate_product,
+ &env->fp_status);
+}
+
+uint32_t HELPER(ftoi)(float32 v, uint32_t rounding_mode, uint32_t scale)
+{
+ float_status fp_status = {0};
+
+ set_float_rounding_mode(rounding_mode, &fp_status);
+ return float32_to_int32(float32_scalbn(v, scale, &fp_status), &fp_status);
+}
+
+uint32_t HELPER(ftoui)(float32 v, uint32_t rounding_mode, uint32_t scale)
+{
+ float_status fp_status = {0};
+ float32 res;
+
+ set_float_rounding_mode(rounding_mode, &fp_status);
+
+ res = float32_scalbn(v, scale, &fp_status);
+
+ if (float32_is_neg(v) && !float32_is_any_nan(v)) {
+ return float32_to_int32(res, &fp_status);
+ } else {
+ return float32_to_uint32(res, &fp_status);
+ }
+}
+
+float32 HELPER(itof)(CPUXtensaState *env, uint32_t v, uint32_t scale)
+{
+ return float32_scalbn(int32_to_float32(v, &env->fp_status),
+ (int32_t)scale, &env->fp_status);
+}
+
+float32 HELPER(uitof)(CPUXtensaState *env, uint32_t v, uint32_t scale)
+{
+ return float32_scalbn(uint32_to_float32(v, &env->fp_status),
+ (int32_t)scale, &env->fp_status);
+}
+
+static inline void set_br(CPUXtensaState *env, bool v, uint32_t br)
+{
+ if (v) {
+ env->sregs[BR] |= br;
+ } else {
+ env->sregs[BR] &= ~br;
+ }
+}
+
+void HELPER(un_s)(CPUXtensaState *env, uint32_t br, float32 a, float32 b)
+{
+ set_br(env, float32_unordered_quiet(a, b, &env->fp_status), br);
+}
+
+void HELPER(oeq_s)(CPUXtensaState *env, uint32_t br, float32 a, float32 b)
+{
+ set_br(env, float32_eq_quiet(a, b, &env->fp_status), br);
+}
+
+void HELPER(ueq_s)(CPUXtensaState *env, uint32_t br, float32 a, float32 b)
+{
+ int v = float32_compare_quiet(a, b, &env->fp_status);
+ set_br(env, v == float_relation_equal || v == float_relation_unordered, br);
+}
+
+void HELPER(olt_s)(CPUXtensaState *env, uint32_t br, float32 a, float32 b)
+{
+ set_br(env, float32_lt_quiet(a, b, &env->fp_status), br);
+}
+
+void HELPER(ult_s)(CPUXtensaState *env, uint32_t br, float32 a, float32 b)
+{
+ int v = float32_compare_quiet(a, b, &env->fp_status);
+ set_br(env, v == float_relation_less || v == float_relation_unordered, br);
+}
+
+void HELPER(ole_s)(CPUXtensaState *env, uint32_t br, float32 a, float32 b)
+{
+ set_br(env, float32_le_quiet(a, b, &env->fp_status), br);
+}
+
+void HELPER(ule_s)(CPUXtensaState *env, uint32_t br, float32 a, float32 b)
+{
+ int v = float32_compare_quiet(a, b, &env->fp_status);
+ set_br(env, v != float_relation_greater, br);
+}
diff --git a/target/xtensa/helper.c b/target/xtensa/helper.c
index 501082f55b..323c47a7fb 100644
--- a/target/xtensa/helper.c
+++ b/target/xtensa/helper.c
@@ -26,14 +26,11 @@
*/
#include "qemu/osdep.h"
-#include "qemu/units.h"
#include "cpu.h"
#include "exec/exec-all.h"
#include "exec/gdbstub.h"
+#include "exec/helper-proto.h"
#include "qemu/host-utils.h"
-#if !defined(CONFIG_USER_ONLY)
-#include "hw/loader.h"
-#endif
static struct XtensaConfigList *xtensa_cores;
@@ -170,155 +167,6 @@ void xtensa_cpu_list(FILE *f, fprintf_function cpu_fprintf)
}
}
-hwaddr xtensa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
-{
-#ifndef CONFIG_USER_ONLY
- XtensaCPU *cpu = XTENSA_CPU(cs);
- uint32_t paddr;
- uint32_t page_size;
- unsigned access;
-
- if (xtensa_get_physical_addr(&cpu->env, false, addr, 0, 0,
- &paddr, &page_size, &access) == 0) {
- return paddr;
- }
- if (xtensa_get_physical_addr(&cpu->env, false, addr, 2, 0,
- &paddr, &page_size, &access) == 0) {
- return paddr;
- }
- return ~0;
-#else
- return addr;
-#endif
-}
-
-#ifndef CONFIG_USER_ONLY
-
-static uint32_t relocated_vector(CPUXtensaState *env, uint32_t vector)
-{
- if (xtensa_option_enabled(env->config,
- XTENSA_OPTION_RELOCATABLE_VECTOR)) {
- return vector - env->config->vecbase + env->sregs[VECBASE];
- } else {
- return vector;
- }
-}
-
-/*!
- * Handle penging IRQ.
- * For the high priority interrupt jump to the corresponding interrupt vector.
- * For the level-1 interrupt convert it to either user, kernel or double
- * exception with the 'level-1 interrupt' exception cause.
- */
-static void handle_interrupt(CPUXtensaState *env)
-{
- int level = env->pending_irq_level;
-
- if (level > xtensa_get_cintlevel(env) &&
- level <= env->config->nlevel &&
- (env->config->level_mask[level] &
- env->sregs[INTSET] &
- env->sregs[INTENABLE])) {
- CPUState *cs = CPU(xtensa_env_get_cpu(env));
-
- if (level > 1) {
- env->sregs[EPC1 + level - 1] = env->pc;
- env->sregs[EPS2 + level - 2] = env->sregs[PS];
- env->sregs[PS] =
- (env->sregs[PS] & ~PS_INTLEVEL) | level | PS_EXCM;
- env->pc = relocated_vector(env,
- env->config->interrupt_vector[level]);
- } else {
- env->sregs[EXCCAUSE] = LEVEL1_INTERRUPT_CAUSE;
-
- if (env->sregs[PS] & PS_EXCM) {
- if (env->config->ndepc) {
- env->sregs[DEPC] = env->pc;
- } else {
- env->sregs[EPC1] = env->pc;
- }
- cs->exception_index = EXC_DOUBLE;
- } else {
- env->sregs[EPC1] = env->pc;
- cs->exception_index =
- (env->sregs[PS] & PS_UM) ? EXC_USER : EXC_KERNEL;
- }
- env->sregs[PS] |= PS_EXCM;
- }
- env->exception_taken = 1;
- }
-}
-
-/* Called from cpu_handle_interrupt with BQL held */
-void xtensa_cpu_do_interrupt(CPUState *cs)
-{
- XtensaCPU *cpu = XTENSA_CPU(cs);
- CPUXtensaState *env = &cpu->env;
-
- if (cs->exception_index == EXC_IRQ) {
- qemu_log_mask(CPU_LOG_INT,
- "%s(EXC_IRQ) level = %d, cintlevel = %d, "
- "pc = %08x, a0 = %08x, ps = %08x, "
- "intset = %08x, intenable = %08x, "
- "ccount = %08x\n",
- __func__, env->pending_irq_level, xtensa_get_cintlevel(env),
- env->pc, env->regs[0], env->sregs[PS],
- env->sregs[INTSET], env->sregs[INTENABLE],
- env->sregs[CCOUNT]);
- handle_interrupt(env);
- }
-
- switch (cs->exception_index) {
- case EXC_WINDOW_OVERFLOW4:
- case EXC_WINDOW_UNDERFLOW4:
- case EXC_WINDOW_OVERFLOW8:
- case EXC_WINDOW_UNDERFLOW8:
- case EXC_WINDOW_OVERFLOW12:
- case EXC_WINDOW_UNDERFLOW12:
- case EXC_KERNEL:
- case EXC_USER:
- case EXC_DOUBLE:
- case EXC_DEBUG:
- qemu_log_mask(CPU_LOG_INT, "%s(%d) "
- "pc = %08x, a0 = %08x, ps = %08x, ccount = %08x\n",
- __func__, cs->exception_index,
- env->pc, env->regs[0], env->sregs[PS], env->sregs[CCOUNT]);
- if (env->config->exception_vector[cs->exception_index]) {
- env->pc = relocated_vector(env,
- env->config->exception_vector[cs->exception_index]);
- env->exception_taken = 1;
- } else {
- qemu_log_mask(CPU_LOG_INT, "%s(pc = %08x) bad exception_index: %d\n",
- __func__, env->pc, cs->exception_index);
- }
- break;
-
- case EXC_IRQ:
- break;
-
- default:
- qemu_log("%s(pc = %08x) unknown exception_index: %d\n",
- __func__, env->pc, cs->exception_index);
- break;
- }
- check_interrupts(env);
-}
-#else
-void xtensa_cpu_do_interrupt(CPUState *cs)
-{
-}
-#endif
-
-bool xtensa_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
-{
- if (interrupt_request & CPU_INTERRUPT_HARD) {
- cs->exception_index = EXC_IRQ;
- xtensa_cpu_do_interrupt(cs);
- return true;
- }
- return false;
-}
-
#ifdef CONFIG_USER_ONLY
int xtensa_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
@@ -338,468 +186,61 @@ int xtensa_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
#else
-static void reset_tlb_mmu_all_ways(CPUXtensaState *env,
- const xtensa_tlb *tlb, xtensa_tlb_entry entry[][MAX_TLB_WAY_SIZE])
-{
- unsigned wi, ei;
-
- for (wi = 0; wi < tlb->nways; ++wi) {
- for (ei = 0; ei < tlb->way_size[wi]; ++ei) {
- entry[wi][ei].asid = 0;
- entry[wi][ei].variable = true;
- }
- }
-}
-
-static void reset_tlb_mmu_ways56(CPUXtensaState *env,
- const xtensa_tlb *tlb, xtensa_tlb_entry entry[][MAX_TLB_WAY_SIZE])
-{
- if (!tlb->varway56) {
- static const xtensa_tlb_entry way5[] = {
- {
- .vaddr = 0xd0000000,
- .paddr = 0,
- .asid = 1,
- .attr = 7,
- .variable = false,
- }, {
- .vaddr = 0xd8000000,
- .paddr = 0,
- .asid = 1,
- .attr = 3,
- .variable = false,
- }
- };
- static const xtensa_tlb_entry way6[] = {
- {
- .vaddr = 0xe0000000,
- .paddr = 0xf0000000,
- .asid = 1,
- .attr = 7,
- .variable = false,
- }, {
- .vaddr = 0xf0000000,
- .paddr = 0xf0000000,
- .asid = 1,
- .attr = 3,
- .variable = false,
- }
- };
- memcpy(entry[5], way5, sizeof(way5));
- memcpy(entry[6], way6, sizeof(way6));
- } else {
- uint32_t ei;
- for (ei = 0; ei < 8; ++ei) {
- entry[6][ei].vaddr = ei << 29;
- entry[6][ei].paddr = ei << 29;
- entry[6][ei].asid = 1;
- entry[6][ei].attr = 3;
- }
- }
-}
-
-static void reset_tlb_region_way0(CPUXtensaState *env,
- xtensa_tlb_entry entry[][MAX_TLB_WAY_SIZE])
-{
- unsigned ei;
-
- for (ei = 0; ei < 8; ++ei) {
- entry[0][ei].vaddr = ei << 29;
- entry[0][ei].paddr = ei << 29;
- entry[0][ei].asid = 1;
- entry[0][ei].attr = 2;
- entry[0][ei].variable = true;
- }
-}
-
-void reset_mmu(CPUXtensaState *env)
-{
- if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
- env->sregs[RASID] = 0x04030201;
- env->sregs[ITLBCFG] = 0;
- env->sregs[DTLBCFG] = 0;
- env->autorefill_idx = 0;
- reset_tlb_mmu_all_ways(env, &env->config->itlb, env->itlb);
- reset_tlb_mmu_all_ways(env, &env->config->dtlb, env->dtlb);
- reset_tlb_mmu_ways56(env, &env->config->itlb, env->itlb);
- reset_tlb_mmu_ways56(env, &env->config->dtlb, env->dtlb);
- } else {
- reset_tlb_region_way0(env, env->itlb);
- reset_tlb_region_way0(env, env->dtlb);
- }
-}
-
-static unsigned get_ring(const CPUXtensaState *env, uint8_t asid)
+void xtensa_cpu_do_unaligned_access(CPUState *cs,
+ vaddr addr, MMUAccessType access_type,
+ int mmu_idx, uintptr_t retaddr)
{
- unsigned i;
- for (i = 0; i < 4; ++i) {
- if (((env->sregs[RASID] >> i * 8) & 0xff) == asid) {
- return i;
- }
- }
- return 0xff;
-}
-
-/*!
- * Lookup xtensa TLB for the given virtual address.
- * See ISA, 4.6.2.2
- *
- * \param pwi: [out] way index
- * \param pei: [out] entry index
- * \param pring: [out] access ring
- * \return 0 if ok, exception cause code otherwise
- */
-int xtensa_tlb_lookup(const CPUXtensaState *env, uint32_t addr, bool dtlb,
- uint32_t *pwi, uint32_t *pei, uint8_t *pring)
-{
- const xtensa_tlb *tlb = dtlb ?
- &env->config->dtlb : &env->config->itlb;
- const xtensa_tlb_entry (*entry)[MAX_TLB_WAY_SIZE] = dtlb ?
- env->dtlb : env->itlb;
-
- int nhits = 0;
- unsigned wi;
-
- for (wi = 0; wi < tlb->nways; ++wi) {
- uint32_t vpn;
- uint32_t ei;
- split_tlb_entry_spec_way(env, addr, dtlb, &vpn, wi, &ei);
- if (entry[wi][ei].vaddr == vpn && entry[wi][ei].asid) {
- unsigned ring = get_ring(env, entry[wi][ei].asid);
- if (ring < 4) {
- if (++nhits > 1) {
- return dtlb ?
- LOAD_STORE_TLB_MULTI_HIT_CAUSE :
- INST_TLB_MULTI_HIT_CAUSE;
- }
- *pwi = wi;
- *pei = ei;
- *pring = ring;
- }
- }
- }
- return nhits ? 0 :
- (dtlb ? LOAD_STORE_TLB_MISS_CAUSE : INST_TLB_MISS_CAUSE);
-}
-
-/*!
- * Convert MMU ATTR to PAGE_{READ,WRITE,EXEC} mask.
- * See ISA, 4.6.5.10
- */
-static unsigned mmu_attr_to_access(uint32_t attr)
-{
- unsigned access = 0;
-
- if (attr < 12) {
- access |= PAGE_READ;
- if (attr & 0x1) {
- access |= PAGE_EXEC;
- }
- if (attr & 0x2) {
- access |= PAGE_WRITE;
- }
-
- switch (attr & 0xc) {
- case 0:
- access |= PAGE_CACHE_BYPASS;
- break;
-
- case 4:
- access |= PAGE_CACHE_WB;
- break;
-
- case 8:
- access |= PAGE_CACHE_WT;
- break;
- }
- } else if (attr == 13) {
- access |= PAGE_READ | PAGE_WRITE | PAGE_CACHE_ISOLATE;
- }
- return access;
-}
-
-/*!
- * Convert region protection ATTR to PAGE_{READ,WRITE,EXEC} mask.
- * See ISA, 4.6.3.3
- */
-static unsigned region_attr_to_access(uint32_t attr)
-{
- static const unsigned access[16] = {
- [0] = PAGE_READ | PAGE_WRITE | PAGE_CACHE_WT,
- [1] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WT,
- [2] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_BYPASS,
- [3] = PAGE_EXEC | PAGE_CACHE_WB,
- [4] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WB,
- [5] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WB,
- [14] = PAGE_READ | PAGE_WRITE | PAGE_CACHE_ISOLATE,
- };
-
- return access[attr & 0xf];
-}
-
-/*!
- * Convert cacheattr to PAGE_{READ,WRITE,EXEC} mask.
- * See ISA, A.2.14 The Cache Attribute Register
- */
-static unsigned cacheattr_attr_to_access(uint32_t attr)
-{
- static const unsigned access[16] = {
- [0] = PAGE_READ | PAGE_WRITE | PAGE_CACHE_WT,
- [1] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WT,
- [2] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_BYPASS,
- [3] = PAGE_EXEC | PAGE_CACHE_WB,
- [4] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WB,
- [14] = PAGE_READ | PAGE_WRITE | PAGE_CACHE_ISOLATE,
- };
-
- return access[attr & 0xf];
-}
-
-static bool is_access_granted(unsigned access, int is_write)
-{
- switch (is_write) {
- case 0:
- return access & PAGE_READ;
-
- case 1:
- return access & PAGE_WRITE;
-
- case 2:
- return access & PAGE_EXEC;
-
- default:
- return 0;
- }
-}
-
-static bool get_pte(CPUXtensaState *env, uint32_t vaddr, uint32_t *pte);
-
-static int get_physical_addr_mmu(CPUXtensaState *env, bool update_tlb,
- uint32_t vaddr, int is_write, int mmu_idx,
- uint32_t *paddr, uint32_t *page_size, unsigned *access,
- bool may_lookup_pt)
-{
- bool dtlb = is_write != 2;
- uint32_t wi;
- uint32_t ei;
- uint8_t ring;
- uint32_t vpn;
- uint32_t pte;
- const xtensa_tlb_entry *entry = NULL;
- xtensa_tlb_entry tmp_entry;
- int ret = xtensa_tlb_lookup(env, vaddr, dtlb, &wi, &ei, &ring);
-
- if ((ret == INST_TLB_MISS_CAUSE || ret == LOAD_STORE_TLB_MISS_CAUSE) &&
- may_lookup_pt && get_pte(env, vaddr, &pte)) {
- ring = (pte >> 4) & 0x3;
- wi = 0;
- split_tlb_entry_spec_way(env, vaddr, dtlb, &vpn, wi, &ei);
-
- if (update_tlb) {
- wi = ++env->autorefill_idx & 0x3;
- xtensa_tlb_set_entry(env, dtlb, wi, ei, vpn, pte);
- env->sregs[EXCVADDR] = vaddr;
- qemu_log_mask(CPU_LOG_MMU, "%s: autorefill(%08x): %08x -> %08x\n",
- __func__, vaddr, vpn, pte);
- } else {
- xtensa_tlb_set_entry_mmu(env, &tmp_entry, dtlb, wi, ei, vpn, pte);
- entry = &tmp_entry;
- }
- ret = 0;
- }
- if (ret != 0) {
- return ret;
- }
-
- if (entry == NULL) {
- entry = xtensa_tlb_get_entry(env, dtlb, wi, ei);
- }
-
- if (ring < mmu_idx) {
- return dtlb ?
- LOAD_STORE_PRIVILEGE_CAUSE :
- INST_FETCH_PRIVILEGE_CAUSE;
- }
+ XtensaCPU *cpu = XTENSA_CPU(cs);
+ CPUXtensaState *env = &cpu->env;
- *access = mmu_attr_to_access(entry->attr) &
- ~(dtlb ? PAGE_EXEC : PAGE_READ | PAGE_WRITE);
- if (!is_access_granted(*access, is_write)) {
- return dtlb ?
- (is_write ?
- STORE_PROHIBITED_CAUSE :
- LOAD_PROHIBITED_CAUSE) :
- INST_FETCH_PROHIBITED_CAUSE;
+ if (xtensa_option_enabled(env->config, XTENSA_OPTION_UNALIGNED_EXCEPTION) &&
+ !xtensa_option_enabled(env->config, XTENSA_OPTION_HW_ALIGNMENT)) {
+ cpu_restore_state(CPU(cpu), retaddr, true);
+ HELPER(exception_cause_vaddr)(env,
+ env->pc, LOAD_STORE_ALIGNMENT_CAUSE,
+ addr);
}
-
- *paddr = entry->paddr | (vaddr & ~xtensa_tlb_get_addr_mask(env, dtlb, wi));
- *page_size = ~xtensa_tlb_get_addr_mask(env, dtlb, wi) + 1;
-
- return 0;
}
-static bool get_pte(CPUXtensaState *env, uint32_t vaddr, uint32_t *pte)
+void tlb_fill(CPUState *cs, target_ulong vaddr, int size,
+ MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
{
- CPUState *cs = CPU(xtensa_env_get_cpu(env));
+ XtensaCPU *cpu = XTENSA_CPU(cs);
+ CPUXtensaState *env = &cpu->env;
uint32_t paddr;
uint32_t page_size;
unsigned access;
- uint32_t pt_vaddr =
- (env->sregs[PTEVADDR] | (vaddr >> 10)) & 0xfffffffc;
- int ret = get_physical_addr_mmu(env, false, pt_vaddr, 0, 0,
- &paddr, &page_size, &access, false);
+ int ret = xtensa_get_physical_addr(env, true, vaddr, access_type, mmu_idx,
+ &paddr, &page_size, &access);
- if (ret == 0) {
- qemu_log_mask(CPU_LOG_MMU,
- "%s: autorefill(%08x): PTE va = %08x, pa = %08x\n",
- __func__, vaddr, pt_vaddr, paddr);
- } else {
- qemu_log_mask(CPU_LOG_MMU,
- "%s: autorefill(%08x): PTE va = %08x, failed (%d)\n",
- __func__, vaddr, pt_vaddr, ret);
- }
+ qemu_log_mask(CPU_LOG_MMU, "%s(%08x, %d, %d) -> %08x, ret = %d\n",
+ __func__, vaddr, access_type, mmu_idx, paddr, ret);
if (ret == 0) {
- MemTxResult result;
-
- *pte = address_space_ldl(cs->as, paddr, MEMTXATTRS_UNSPECIFIED,
- &result);
- if (result != MEMTX_OK) {
- qemu_log_mask(CPU_LOG_MMU,
- "%s: couldn't load PTE: transaction failed (%u)\n",
- __func__, (unsigned)result);
- ret = 1;
- }
- }
- return ret == 0;
-}
-
-static int get_physical_addr_region(CPUXtensaState *env,
- uint32_t vaddr, int is_write, int mmu_idx,
- uint32_t *paddr, uint32_t *page_size, unsigned *access)
-{
- bool dtlb = is_write != 2;
- uint32_t wi = 0;
- uint32_t ei = (vaddr >> 29) & 0x7;
- const xtensa_tlb_entry *entry =
- xtensa_tlb_get_entry(env, dtlb, wi, ei);
-
- *access = region_attr_to_access(entry->attr);
- if (!is_access_granted(*access, is_write)) {
- return dtlb ?
- (is_write ?
- STORE_PROHIBITED_CAUSE :
- LOAD_PROHIBITED_CAUSE) :
- INST_FETCH_PROHIBITED_CAUSE;
- }
-
- *paddr = entry->paddr | (vaddr & ~REGION_PAGE_MASK);
- *page_size = ~REGION_PAGE_MASK + 1;
-
- return 0;
-}
-
-/*!
- * Convert virtual address to physical addr.
- * MMU may issue pagewalk and change xtensa autorefill TLB way entry.
- *
- * \return 0 if ok, exception cause code otherwise
- */
-int xtensa_get_physical_addr(CPUXtensaState *env, bool update_tlb,
- uint32_t vaddr, int is_write, int mmu_idx,
- uint32_t *paddr, uint32_t *page_size, unsigned *access)
-{
- if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
- return get_physical_addr_mmu(env, update_tlb,
- vaddr, is_write, mmu_idx, paddr, page_size, access, true);
- } else if (xtensa_option_bits_enabled(env->config,
- XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_PROTECTION) |
- XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_TRANSLATION))) {
- return get_physical_addr_region(env, vaddr, is_write, mmu_idx,
- paddr, page_size, access);
+ tlb_set_page(cs,
+ vaddr & TARGET_PAGE_MASK,
+ paddr & TARGET_PAGE_MASK,
+ access, mmu_idx, page_size);
} else {
- *paddr = vaddr;
- *page_size = TARGET_PAGE_SIZE;
- *access = cacheattr_attr_to_access(
- env->sregs[CACHEATTR] >> ((vaddr & 0xe0000000) >> 27));
- return 0;
+ cpu_restore_state(cs, retaddr, true);
+ HELPER(exception_cause_vaddr)(env, env->pc, ret, vaddr);
}
}
-static void dump_tlb(FILE *f, fprintf_function cpu_fprintf,
- CPUXtensaState *env, bool dtlb)
+void xtensa_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, vaddr addr,
+ unsigned size, MMUAccessType access_type,
+ int mmu_idx, MemTxAttrs attrs,
+ MemTxResult response, uintptr_t retaddr)
{
- unsigned wi, ei;
- const xtensa_tlb *conf =
- dtlb ? &env->config->dtlb : &env->config->itlb;
- unsigned (*attr_to_access)(uint32_t) =
- xtensa_option_enabled(env->config, XTENSA_OPTION_MMU) ?
- mmu_attr_to_access : region_attr_to_access;
-
- for (wi = 0; wi < conf->nways; ++wi) {
- uint32_t sz = ~xtensa_tlb_get_addr_mask(env, dtlb, wi) + 1;
- const char *sz_text;
- bool print_header = true;
-
- if (sz >= 0x100000) {
- sz /= MiB;
- sz_text = "MB";
- } else {
- sz /= KiB;
- sz_text = "KB";
- }
-
- for (ei = 0; ei < conf->way_size[wi]; ++ei) {
- const xtensa_tlb_entry *entry =
- xtensa_tlb_get_entry(env, dtlb, wi, ei);
-
- if (entry->asid) {
- static const char * const cache_text[8] = {
- [PAGE_CACHE_BYPASS >> PAGE_CACHE_SHIFT] = "Bypass",
- [PAGE_CACHE_WT >> PAGE_CACHE_SHIFT] = "WT",
- [PAGE_CACHE_WB >> PAGE_CACHE_SHIFT] = "WB",
- [PAGE_CACHE_ISOLATE >> PAGE_CACHE_SHIFT] = "Isolate",
- };
- unsigned access = attr_to_access(entry->attr);
- unsigned cache_idx = (access & PAGE_CACHE_MASK) >>
- PAGE_CACHE_SHIFT;
-
- if (print_header) {
- print_header = false;
- cpu_fprintf(f, "Way %u (%d %s)\n", wi, sz, sz_text);
- cpu_fprintf(f,
- "\tVaddr Paddr ASID Attr RWX Cache\n"
- "\t---------- ---------- ---- ---- --- -------\n");
- }
- cpu_fprintf(f,
- "\t0x%08x 0x%08x 0x%02x 0x%02x %c%c%c %-7s\n",
- entry->vaddr,
- entry->paddr,
- entry->asid,
- entry->attr,
- (access & PAGE_READ) ? 'R' : '-',
- (access & PAGE_WRITE) ? 'W' : '-',
- (access & PAGE_EXEC) ? 'X' : '-',
- cache_text[cache_idx] ? cache_text[cache_idx] :
- "Invalid");
- }
- }
- }
-}
+ XtensaCPU *cpu = XTENSA_CPU(cs);
+ CPUXtensaState *env = &cpu->env;
-void dump_mmu(FILE *f, fprintf_function cpu_fprintf, CPUXtensaState *env)
-{
- if (xtensa_option_bits_enabled(env->config,
- XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_PROTECTION) |
- XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_TRANSLATION) |
- XTENSA_OPTION_BIT(XTENSA_OPTION_MMU))) {
-
- cpu_fprintf(f, "ITLB:\n");
- dump_tlb(f, cpu_fprintf, env, false);
- cpu_fprintf(f, "\nDTLB:\n");
- dump_tlb(f, cpu_fprintf, env, true);
- } else {
- cpu_fprintf(f, "No TLB for this CPU core\n");
- }
+ cpu_restore_state(cs, retaddr, true);
+ HELPER(exception_cause_vaddr)(env, env->pc,
+ access_type == MMU_INST_FETCH ?
+ INSTR_PIF_ADDR_ERROR_CAUSE :
+ LOAD_STORE_PIF_ADDR_ERROR_CAUSE,
+ addr);
}
void xtensa_runstall(CPUXtensaState *env, bool runstall)
diff --git a/target/xtensa/helper.h b/target/xtensa/helper.h
index 10153c2453..89eb97e265 100644
--- a/target/xtensa/helper.h
+++ b/target/xtensa/helper.h
@@ -12,12 +12,9 @@ DEF_HELPER_2(rotw, void, env, i32)
DEF_HELPER_3(window_check, noreturn, env, i32, i32)
DEF_HELPER_1(restore_owb, void, env)
DEF_HELPER_2(movsp, void, env, i32)
-DEF_HELPER_2(wsr_lbeg, void, env, i32)
-DEF_HELPER_2(wsr_lend, void, env, i32)
#ifndef CONFIG_USER_ONLY
DEF_HELPER_1(simcall, void, env)
#endif
-DEF_HELPER_1(dump_state, void, env)
#ifndef CONFIG_USER_ONLY
DEF_HELPER_3(waiti, void, env, i32, i32)
diff --git a/target/xtensa/mmu_helper.c b/target/xtensa/mmu_helper.c
new file mode 100644
index 0000000000..2096fbbd9f
--- /dev/null
+++ b/target/xtensa/mmu_helper.c
@@ -0,0 +1,818 @@
+/*
+ * Copyright (c) 2011 - 2019, Max Filippov, Open Source and Linux Lab.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the Open Source and Linux Lab nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/main-loop.h"
+#include "qemu/units.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "qemu/host-utils.h"
+#include "exec/exec-all.h"
+#include "exec/cpu_ldst.h"
+
+void HELPER(itlb_hit_test)(CPUXtensaState *env, uint32_t vaddr)
+{
+ /*
+ * Attempt the memory load; we don't care about the result but
+ * only the side-effects (ie any MMU or other exception)
+ */
+ cpu_ldub_code_ra(env, vaddr, GETPC());
+}
+
+void HELPER(wsr_rasid)(CPUXtensaState *env, uint32_t v)
+{
+ XtensaCPU *cpu = xtensa_env_get_cpu(env);
+
+ v = (v & 0xffffff00) | 0x1;
+ if (v != env->sregs[RASID]) {
+ env->sregs[RASID] = v;
+ tlb_flush(CPU(cpu));
+ }
+}
+
+static uint32_t get_page_size(const CPUXtensaState *env,
+ bool dtlb, uint32_t way)
+{
+ uint32_t tlbcfg = env->sregs[dtlb ? DTLBCFG : ITLBCFG];
+
+ switch (way) {
+ case 4:
+ return (tlbcfg >> 16) & 0x3;
+
+ case 5:
+ return (tlbcfg >> 20) & 0x1;
+
+ case 6:
+ return (tlbcfg >> 24) & 0x1;
+
+ default:
+ return 0;
+ }
+}
+
+/*!
+ * Get bit mask for the virtual address bits translated by the TLB way
+ */
+uint32_t xtensa_tlb_get_addr_mask(const CPUXtensaState *env,
+ bool dtlb, uint32_t way)
+{
+ if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
+ bool varway56 = dtlb ?
+ env->config->dtlb.varway56 :
+ env->config->itlb.varway56;
+
+ switch (way) {
+ case 4:
+ return 0xfff00000 << get_page_size(env, dtlb, way) * 2;
+
+ case 5:
+ if (varway56) {
+ return 0xf8000000 << get_page_size(env, dtlb, way);
+ } else {
+ return 0xf8000000;
+ }
+
+ case 6:
+ if (varway56) {
+ return 0xf0000000 << (1 - get_page_size(env, dtlb, way));
+ } else {
+ return 0xf0000000;
+ }
+
+ default:
+ return 0xfffff000;
+ }
+ } else {
+ return REGION_PAGE_MASK;
+ }
+}
+
+/*!
+ * Get bit mask for the 'VPN without index' field.
+ * See ISA, 4.6.5.6, data format for RxTLB0
+ */
+static uint32_t get_vpn_mask(const CPUXtensaState *env, bool dtlb, uint32_t way)
+{
+ if (way < 4) {
+ bool is32 = (dtlb ?
+ env->config->dtlb.nrefillentries :
+ env->config->itlb.nrefillentries) == 32;
+ return is32 ? 0xffff8000 : 0xffffc000;
+ } else if (way == 4) {
+ return xtensa_tlb_get_addr_mask(env, dtlb, way) << 2;
+ } else if (way <= 6) {
+ uint32_t mask = xtensa_tlb_get_addr_mask(env, dtlb, way);
+ bool varway56 = dtlb ?
+ env->config->dtlb.varway56 :
+ env->config->itlb.varway56;
+
+ if (varway56) {
+ return mask << (way == 5 ? 2 : 3);
+ } else {
+ return mask << 1;
+ }
+ } else {
+ return 0xfffff000;
+ }
+}
+
+/*!
+ * Split virtual address into VPN (with index) and entry index
+ * for the given TLB way
+ */
+void split_tlb_entry_spec_way(const CPUXtensaState *env, uint32_t v, bool dtlb,
+ uint32_t *vpn, uint32_t wi, uint32_t *ei)
+{
+ bool varway56 = dtlb ?
+ env->config->dtlb.varway56 :
+ env->config->itlb.varway56;
+
+ if (!dtlb) {
+ wi &= 7;
+ }
+
+ if (wi < 4) {
+ bool is32 = (dtlb ?
+ env->config->dtlb.nrefillentries :
+ env->config->itlb.nrefillentries) == 32;
+ *ei = (v >> 12) & (is32 ? 0x7 : 0x3);
+ } else {
+ switch (wi) {
+ case 4:
+ {
+ uint32_t eibase = 20 + get_page_size(env, dtlb, wi) * 2;
+ *ei = (v >> eibase) & 0x3;
+ }
+ break;
+
+ case 5:
+ if (varway56) {
+ uint32_t eibase = 27 + get_page_size(env, dtlb, wi);
+ *ei = (v >> eibase) & 0x3;
+ } else {
+ *ei = (v >> 27) & 0x1;
+ }
+ break;
+
+ case 6:
+ if (varway56) {
+ uint32_t eibase = 29 - get_page_size(env, dtlb, wi);
+ *ei = (v >> eibase) & 0x7;
+ } else {
+ *ei = (v >> 28) & 0x1;
+ }
+ break;
+
+ default:
+ *ei = 0;
+ break;
+ }
+ }
+ *vpn = v & xtensa_tlb_get_addr_mask(env, dtlb, wi);
+}
+
+/*!
+ * Split TLB address into TLB way, entry index and VPN (with index).
+ * See ISA, 4.6.5.5 - 4.6.5.8 for the TLB addressing format
+ */
+static void split_tlb_entry_spec(CPUXtensaState *env, uint32_t v, bool dtlb,
+ uint32_t *vpn, uint32_t *wi, uint32_t *ei)
+{
+ if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
+ *wi = v & (dtlb ? 0xf : 0x7);
+ split_tlb_entry_spec_way(env, v, dtlb, vpn, *wi, ei);
+ } else {
+ *vpn = v & REGION_PAGE_MASK;
+ *wi = 0;
+ *ei = (v >> 29) & 0x7;
+ }
+}
+
+static xtensa_tlb_entry *get_tlb_entry(CPUXtensaState *env,
+ uint32_t v, bool dtlb, uint32_t *pwi)
+{
+ uint32_t vpn;
+ uint32_t wi;
+ uint32_t ei;
+
+ split_tlb_entry_spec(env, v, dtlb, &vpn, &wi, &ei);
+ if (pwi) {
+ *pwi = wi;
+ }
+ return xtensa_tlb_get_entry(env, dtlb, wi, ei);
+}
+
+uint32_t HELPER(rtlb0)(CPUXtensaState *env, uint32_t v, uint32_t dtlb)
+{
+ if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
+ uint32_t wi;
+ const xtensa_tlb_entry *entry = get_tlb_entry(env, v, dtlb, &wi);
+ return (entry->vaddr & get_vpn_mask(env, dtlb, wi)) | entry->asid;
+ } else {
+ return v & REGION_PAGE_MASK;
+ }
+}
+
+uint32_t HELPER(rtlb1)(CPUXtensaState *env, uint32_t v, uint32_t dtlb)
+{
+ const xtensa_tlb_entry *entry = get_tlb_entry(env, v, dtlb, NULL);
+ return entry->paddr | entry->attr;
+}
+
+void HELPER(itlb)(CPUXtensaState *env, uint32_t v, uint32_t dtlb)
+{
+ if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
+ uint32_t wi;
+ xtensa_tlb_entry *entry = get_tlb_entry(env, v, dtlb, &wi);
+ if (entry->variable && entry->asid) {
+ tlb_flush_page(CPU(xtensa_env_get_cpu(env)), entry->vaddr);
+ entry->asid = 0;
+ }
+ }
+}
+
+uint32_t HELPER(ptlb)(CPUXtensaState *env, uint32_t v, uint32_t dtlb)
+{
+ if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
+ uint32_t wi;
+ uint32_t ei;
+ uint8_t ring;
+ int res = xtensa_tlb_lookup(env, v, dtlb, &wi, &ei, &ring);
+
+ switch (res) {
+ case 0:
+ if (ring >= xtensa_get_ring(env)) {
+ return (v & 0xfffff000) | wi | (dtlb ? 0x10 : 0x8);
+ }
+ break;
+
+ case INST_TLB_MULTI_HIT_CAUSE:
+ case LOAD_STORE_TLB_MULTI_HIT_CAUSE:
+ HELPER(exception_cause_vaddr)(env, env->pc, res, v);
+ break;
+ }
+ return 0;
+ } else {
+ return (v & REGION_PAGE_MASK) | 0x1;
+ }
+}
+
+void xtensa_tlb_set_entry_mmu(const CPUXtensaState *env,
+ xtensa_tlb_entry *entry, bool dtlb,
+ unsigned wi, unsigned ei, uint32_t vpn,
+ uint32_t pte)
+{
+ entry->vaddr = vpn;
+ entry->paddr = pte & xtensa_tlb_get_addr_mask(env, dtlb, wi);
+ entry->asid = (env->sregs[RASID] >> ((pte >> 1) & 0x18)) & 0xff;
+ entry->attr = pte & 0xf;
+}
+
+void xtensa_tlb_set_entry(CPUXtensaState *env, bool dtlb,
+ unsigned wi, unsigned ei, uint32_t vpn, uint32_t pte)
+{
+ XtensaCPU *cpu = xtensa_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
+ xtensa_tlb_entry *entry = xtensa_tlb_get_entry(env, dtlb, wi, ei);
+
+ if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
+ if (entry->variable) {
+ if (entry->asid) {
+ tlb_flush_page(cs, entry->vaddr);
+ }
+ xtensa_tlb_set_entry_mmu(env, entry, dtlb, wi, ei, vpn, pte);
+ tlb_flush_page(cs, entry->vaddr);
+ } else {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s %d, %d, %d trying to set immutable entry\n",
+ __func__, dtlb, wi, ei);
+ }
+ } else {
+ tlb_flush_page(cs, entry->vaddr);
+ if (xtensa_option_enabled(env->config,
+ XTENSA_OPTION_REGION_TRANSLATION)) {
+ entry->paddr = pte & REGION_PAGE_MASK;
+ }
+ entry->attr = pte & 0xf;
+ }
+}
+
+void HELPER(wtlb)(CPUXtensaState *env, uint32_t p, uint32_t v, uint32_t dtlb)
+{
+ uint32_t vpn;
+ uint32_t wi;
+ uint32_t ei;
+ split_tlb_entry_spec(env, v, dtlb, &vpn, &wi, &ei);
+ xtensa_tlb_set_entry(env, dtlb, wi, ei, vpn, p);
+}
+
+hwaddr xtensa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
+{
+ XtensaCPU *cpu = XTENSA_CPU(cs);
+ uint32_t paddr;
+ uint32_t page_size;
+ unsigned access;
+
+ if (xtensa_get_physical_addr(&cpu->env, false, addr, 0, 0,
+ &paddr, &page_size, &access) == 0) {
+ return paddr;
+ }
+ if (xtensa_get_physical_addr(&cpu->env, false, addr, 2, 0,
+ &paddr, &page_size, &access) == 0) {
+ return paddr;
+ }
+ return ~0;
+}
+
+static void reset_tlb_mmu_all_ways(CPUXtensaState *env,
+ const xtensa_tlb *tlb,
+ xtensa_tlb_entry entry[][MAX_TLB_WAY_SIZE])
+{
+ unsigned wi, ei;
+
+ for (wi = 0; wi < tlb->nways; ++wi) {
+ for (ei = 0; ei < tlb->way_size[wi]; ++ei) {
+ entry[wi][ei].asid = 0;
+ entry[wi][ei].variable = true;
+ }
+ }
+}
+
+static void reset_tlb_mmu_ways56(CPUXtensaState *env,
+ const xtensa_tlb *tlb,
+ xtensa_tlb_entry entry[][MAX_TLB_WAY_SIZE])
+{
+ if (!tlb->varway56) {
+ static const xtensa_tlb_entry way5[] = {
+ {
+ .vaddr = 0xd0000000,
+ .paddr = 0,
+ .asid = 1,
+ .attr = 7,
+ .variable = false,
+ }, {
+ .vaddr = 0xd8000000,
+ .paddr = 0,
+ .asid = 1,
+ .attr = 3,
+ .variable = false,
+ }
+ };
+ static const xtensa_tlb_entry way6[] = {
+ {
+ .vaddr = 0xe0000000,
+ .paddr = 0xf0000000,
+ .asid = 1,
+ .attr = 7,
+ .variable = false,
+ }, {
+ .vaddr = 0xf0000000,
+ .paddr = 0xf0000000,
+ .asid = 1,
+ .attr = 3,
+ .variable = false,
+ }
+ };
+ memcpy(entry[5], way5, sizeof(way5));
+ memcpy(entry[6], way6, sizeof(way6));
+ } else {
+ uint32_t ei;
+ for (ei = 0; ei < 8; ++ei) {
+ entry[6][ei].vaddr = ei << 29;
+ entry[6][ei].paddr = ei << 29;
+ entry[6][ei].asid = 1;
+ entry[6][ei].attr = 3;
+ }
+ }
+}
+
+static void reset_tlb_region_way0(CPUXtensaState *env,
+ xtensa_tlb_entry entry[][MAX_TLB_WAY_SIZE])
+{
+ unsigned ei;
+
+ for (ei = 0; ei < 8; ++ei) {
+ entry[0][ei].vaddr = ei << 29;
+ entry[0][ei].paddr = ei << 29;
+ entry[0][ei].asid = 1;
+ entry[0][ei].attr = 2;
+ entry[0][ei].variable = true;
+ }
+}
+
+void reset_mmu(CPUXtensaState *env)
+{
+ if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
+ env->sregs[RASID] = 0x04030201;
+ env->sregs[ITLBCFG] = 0;
+ env->sregs[DTLBCFG] = 0;
+ env->autorefill_idx = 0;
+ reset_tlb_mmu_all_ways(env, &env->config->itlb, env->itlb);
+ reset_tlb_mmu_all_ways(env, &env->config->dtlb, env->dtlb);
+ reset_tlb_mmu_ways56(env, &env->config->itlb, env->itlb);
+ reset_tlb_mmu_ways56(env, &env->config->dtlb, env->dtlb);
+ } else {
+ reset_tlb_region_way0(env, env->itlb);
+ reset_tlb_region_way0(env, env->dtlb);
+ }
+}
+
+static unsigned get_ring(const CPUXtensaState *env, uint8_t asid)
+{
+ unsigned i;
+ for (i = 0; i < 4; ++i) {
+ if (((env->sregs[RASID] >> i * 8) & 0xff) == asid) {
+ return i;
+ }
+ }
+ return 0xff;
+}
+
+/*!
+ * Lookup xtensa TLB for the given virtual address.
+ * See ISA, 4.6.2.2
+ *
+ * \param pwi: [out] way index
+ * \param pei: [out] entry index
+ * \param pring: [out] access ring
+ * \return 0 if ok, exception cause code otherwise
+ */
+int xtensa_tlb_lookup(const CPUXtensaState *env, uint32_t addr, bool dtlb,
+ uint32_t *pwi, uint32_t *pei, uint8_t *pring)
+{
+ const xtensa_tlb *tlb = dtlb ?
+ &env->config->dtlb : &env->config->itlb;
+ const xtensa_tlb_entry (*entry)[MAX_TLB_WAY_SIZE] = dtlb ?
+ env->dtlb : env->itlb;
+
+ int nhits = 0;
+ unsigned wi;
+
+ for (wi = 0; wi < tlb->nways; ++wi) {
+ uint32_t vpn;
+ uint32_t ei;
+ split_tlb_entry_spec_way(env, addr, dtlb, &vpn, wi, &ei);
+ if (entry[wi][ei].vaddr == vpn && entry[wi][ei].asid) {
+ unsigned ring = get_ring(env, entry[wi][ei].asid);
+ if (ring < 4) {
+ if (++nhits > 1) {
+ return dtlb ?
+ LOAD_STORE_TLB_MULTI_HIT_CAUSE :
+ INST_TLB_MULTI_HIT_CAUSE;
+ }
+ *pwi = wi;
+ *pei = ei;
+ *pring = ring;
+ }
+ }
+ }
+ return nhits ? 0 :
+ (dtlb ? LOAD_STORE_TLB_MISS_CAUSE : INST_TLB_MISS_CAUSE);
+}
+
+/*!
+ * Convert MMU ATTR to PAGE_{READ,WRITE,EXEC} mask.
+ * See ISA, 4.6.5.10
+ */
+static unsigned mmu_attr_to_access(uint32_t attr)
+{
+ unsigned access = 0;
+
+ if (attr < 12) {
+ access |= PAGE_READ;
+ if (attr & 0x1) {
+ access |= PAGE_EXEC;
+ }
+ if (attr & 0x2) {
+ access |= PAGE_WRITE;
+ }
+
+ switch (attr & 0xc) {
+ case 0:
+ access |= PAGE_CACHE_BYPASS;
+ break;
+
+ case 4:
+ access |= PAGE_CACHE_WB;
+ break;
+
+ case 8:
+ access |= PAGE_CACHE_WT;
+ break;
+ }
+ } else if (attr == 13) {
+ access |= PAGE_READ | PAGE_WRITE | PAGE_CACHE_ISOLATE;
+ }
+ return access;
+}
+
+/*!
+ * Convert region protection ATTR to PAGE_{READ,WRITE,EXEC} mask.
+ * See ISA, 4.6.3.3
+ */
+static unsigned region_attr_to_access(uint32_t attr)
+{
+ static const unsigned access[16] = {
+ [0] = PAGE_READ | PAGE_WRITE | PAGE_CACHE_WT,
+ [1] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WT,
+ [2] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_BYPASS,
+ [3] = PAGE_EXEC | PAGE_CACHE_WB,
+ [4] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WB,
+ [5] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WB,
+ [14] = PAGE_READ | PAGE_WRITE | PAGE_CACHE_ISOLATE,
+ };
+
+ return access[attr & 0xf];
+}
+
+/*!
+ * Convert cacheattr to PAGE_{READ,WRITE,EXEC} mask.
+ * See ISA, A.2.14 The Cache Attribute Register
+ */
+static unsigned cacheattr_attr_to_access(uint32_t attr)
+{
+ static const unsigned access[16] = {
+ [0] = PAGE_READ | PAGE_WRITE | PAGE_CACHE_WT,
+ [1] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WT,
+ [2] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_BYPASS,
+ [3] = PAGE_EXEC | PAGE_CACHE_WB,
+ [4] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WB,
+ [14] = PAGE_READ | PAGE_WRITE | PAGE_CACHE_ISOLATE,
+ };
+
+ return access[attr & 0xf];
+}
+
+static bool is_access_granted(unsigned access, int is_write)
+{
+ switch (is_write) {
+ case 0:
+ return access & PAGE_READ;
+
+ case 1:
+ return access & PAGE_WRITE;
+
+ case 2:
+ return access & PAGE_EXEC;
+
+ default:
+ return 0;
+ }
+}
+
+static bool get_pte(CPUXtensaState *env, uint32_t vaddr, uint32_t *pte);
+
+static int get_physical_addr_mmu(CPUXtensaState *env, bool update_tlb,
+ uint32_t vaddr, int is_write, int mmu_idx,
+ uint32_t *paddr, uint32_t *page_size,
+ unsigned *access, bool may_lookup_pt)
+{
+ bool dtlb = is_write != 2;
+ uint32_t wi;
+ uint32_t ei;
+ uint8_t ring;
+ uint32_t vpn;
+ uint32_t pte;
+ const xtensa_tlb_entry *entry = NULL;
+ xtensa_tlb_entry tmp_entry;
+ int ret = xtensa_tlb_lookup(env, vaddr, dtlb, &wi, &ei, &ring);
+
+ if ((ret == INST_TLB_MISS_CAUSE || ret == LOAD_STORE_TLB_MISS_CAUSE) &&
+ may_lookup_pt && get_pte(env, vaddr, &pte)) {
+ ring = (pte >> 4) & 0x3;
+ wi = 0;
+ split_tlb_entry_spec_way(env, vaddr, dtlb, &vpn, wi, &ei);
+
+ if (update_tlb) {
+ wi = ++env->autorefill_idx & 0x3;
+ xtensa_tlb_set_entry(env, dtlb, wi, ei, vpn, pte);
+ env->sregs[EXCVADDR] = vaddr;
+ qemu_log_mask(CPU_LOG_MMU, "%s: autorefill(%08x): %08x -> %08x\n",
+ __func__, vaddr, vpn, pte);
+ } else {
+ xtensa_tlb_set_entry_mmu(env, &tmp_entry, dtlb, wi, ei, vpn, pte);
+ entry = &tmp_entry;
+ }
+ ret = 0;
+ }
+ if (ret != 0) {
+ return ret;
+ }
+
+ if (entry == NULL) {
+ entry = xtensa_tlb_get_entry(env, dtlb, wi, ei);
+ }
+
+ if (ring < mmu_idx) {
+ return dtlb ?
+ LOAD_STORE_PRIVILEGE_CAUSE :
+ INST_FETCH_PRIVILEGE_CAUSE;
+ }
+
+ *access = mmu_attr_to_access(entry->attr) &
+ ~(dtlb ? PAGE_EXEC : PAGE_READ | PAGE_WRITE);
+ if (!is_access_granted(*access, is_write)) {
+ return dtlb ?
+ (is_write ?
+ STORE_PROHIBITED_CAUSE :
+ LOAD_PROHIBITED_CAUSE) :
+ INST_FETCH_PROHIBITED_CAUSE;
+ }
+
+ *paddr = entry->paddr | (vaddr & ~xtensa_tlb_get_addr_mask(env, dtlb, wi));
+ *page_size = ~xtensa_tlb_get_addr_mask(env, dtlb, wi) + 1;
+
+ return 0;
+}
+
+static bool get_pte(CPUXtensaState *env, uint32_t vaddr, uint32_t *pte)
+{
+ CPUState *cs = CPU(xtensa_env_get_cpu(env));
+ uint32_t paddr;
+ uint32_t page_size;
+ unsigned access;
+ uint32_t pt_vaddr =
+ (env->sregs[PTEVADDR] | (vaddr >> 10)) & 0xfffffffc;
+ int ret = get_physical_addr_mmu(env, false, pt_vaddr, 0, 0,
+ &paddr, &page_size, &access, false);
+
+ if (ret == 0) {
+ qemu_log_mask(CPU_LOG_MMU,
+ "%s: autorefill(%08x): PTE va = %08x, pa = %08x\n",
+ __func__, vaddr, pt_vaddr, paddr);
+ } else {
+ qemu_log_mask(CPU_LOG_MMU,
+ "%s: autorefill(%08x): PTE va = %08x, failed (%d)\n",
+ __func__, vaddr, pt_vaddr, ret);
+ }
+
+ if (ret == 0) {
+ MemTxResult result;
+
+ *pte = address_space_ldl(cs->as, paddr, MEMTXATTRS_UNSPECIFIED,
+ &result);
+ if (result != MEMTX_OK) {
+ qemu_log_mask(CPU_LOG_MMU,
+ "%s: couldn't load PTE: transaction failed (%u)\n",
+ __func__, (unsigned)result);
+ ret = 1;
+ }
+ }
+ return ret == 0;
+}
+
+static int get_physical_addr_region(CPUXtensaState *env,
+ uint32_t vaddr, int is_write, int mmu_idx,
+ uint32_t *paddr, uint32_t *page_size,
+ unsigned *access)
+{
+ bool dtlb = is_write != 2;
+ uint32_t wi = 0;
+ uint32_t ei = (vaddr >> 29) & 0x7;
+ const xtensa_tlb_entry *entry =
+ xtensa_tlb_get_entry(env, dtlb, wi, ei);
+
+ *access = region_attr_to_access(entry->attr);
+ if (!is_access_granted(*access, is_write)) {
+ return dtlb ?
+ (is_write ?
+ STORE_PROHIBITED_CAUSE :
+ LOAD_PROHIBITED_CAUSE) :
+ INST_FETCH_PROHIBITED_CAUSE;
+ }
+
+ *paddr = entry->paddr | (vaddr & ~REGION_PAGE_MASK);
+ *page_size = ~REGION_PAGE_MASK + 1;
+
+ return 0;
+}
+
+/*!
+ * Convert virtual address to physical addr.
+ * MMU may issue pagewalk and change xtensa autorefill TLB way entry.
+ *
+ * \return 0 if ok, exception cause code otherwise
+ */
+int xtensa_get_physical_addr(CPUXtensaState *env, bool update_tlb,
+ uint32_t vaddr, int is_write, int mmu_idx,
+ uint32_t *paddr, uint32_t *page_size,
+ unsigned *access)
+{
+ if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
+ return get_physical_addr_mmu(env, update_tlb,
+ vaddr, is_write, mmu_idx, paddr,
+ page_size, access, true);
+ } else if (xtensa_option_bits_enabled(env->config,
+ XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_PROTECTION) |
+ XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_TRANSLATION))) {
+ return get_physical_addr_region(env, vaddr, is_write, mmu_idx,
+ paddr, page_size, access);
+ } else {
+ *paddr = vaddr;
+ *page_size = TARGET_PAGE_SIZE;
+ *access = cacheattr_attr_to_access(env->sregs[CACHEATTR] >>
+ ((vaddr & 0xe0000000) >> 27));
+ return 0;
+ }
+}
+
+static void dump_tlb(FILE *f, fprintf_function cpu_fprintf,
+ CPUXtensaState *env, bool dtlb)
+{
+ unsigned wi, ei;
+ const xtensa_tlb *conf =
+ dtlb ? &env->config->dtlb : &env->config->itlb;
+ unsigned (*attr_to_access)(uint32_t) =
+ xtensa_option_enabled(env->config, XTENSA_OPTION_MMU) ?
+ mmu_attr_to_access : region_attr_to_access;
+
+ for (wi = 0; wi < conf->nways; ++wi) {
+ uint32_t sz = ~xtensa_tlb_get_addr_mask(env, dtlb, wi) + 1;
+ const char *sz_text;
+ bool print_header = true;
+
+ if (sz >= 0x100000) {
+ sz /= MiB;
+ sz_text = "MB";
+ } else {
+ sz /= KiB;
+ sz_text = "KB";
+ }
+
+ for (ei = 0; ei < conf->way_size[wi]; ++ei) {
+ const xtensa_tlb_entry *entry =
+ xtensa_tlb_get_entry(env, dtlb, wi, ei);
+
+ if (entry->asid) {
+ static const char * const cache_text[8] = {
+ [PAGE_CACHE_BYPASS >> PAGE_CACHE_SHIFT] = "Bypass",
+ [PAGE_CACHE_WT >> PAGE_CACHE_SHIFT] = "WT",
+ [PAGE_CACHE_WB >> PAGE_CACHE_SHIFT] = "WB",
+ [PAGE_CACHE_ISOLATE >> PAGE_CACHE_SHIFT] = "Isolate",
+ };
+ unsigned access = attr_to_access(entry->attr);
+ unsigned cache_idx = (access & PAGE_CACHE_MASK) >>
+ PAGE_CACHE_SHIFT;
+
+ if (print_header) {
+ print_header = false;
+ cpu_fprintf(f, "Way %u (%d %s)\n", wi, sz, sz_text);
+ cpu_fprintf(f,
+ "\tVaddr Paddr ASID Attr RWX Cache\n"
+ "\t---------- ---------- ---- ---- --- -------\n");
+ }
+ cpu_fprintf(f,
+ "\t0x%08x 0x%08x 0x%02x 0x%02x %c%c%c %-7s\n",
+ entry->vaddr,
+ entry->paddr,
+ entry->asid,
+ entry->attr,
+ (access & PAGE_READ) ? 'R' : '-',
+ (access & PAGE_WRITE) ? 'W' : '-',
+ (access & PAGE_EXEC) ? 'X' : '-',
+ cache_text[cache_idx] ?
+ cache_text[cache_idx] : "Invalid");
+ }
+ }
+ }
+}
+
+void dump_mmu(FILE *f, fprintf_function cpu_fprintf, CPUXtensaState *env)
+{
+ if (xtensa_option_bits_enabled(env->config,
+ XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_PROTECTION) |
+ XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_TRANSLATION) |
+ XTENSA_OPTION_BIT(XTENSA_OPTION_MMU))) {
+
+ cpu_fprintf(f, "ITLB:\n");
+ dump_tlb(f, cpu_fprintf, env, false);
+ cpu_fprintf(f, "\nDTLB:\n");
+ dump_tlb(f, cpu_fprintf, env, true);
+ } else {
+ cpu_fprintf(f, "No TLB for this CPU core\n");
+ }
+}
diff --git a/target/xtensa/op_helper.c b/target/xtensa/op_helper.c
index e4b42ab3e5..1865f46c4b 100644
--- a/target/xtensa/op_helper.c
+++ b/target/xtensa/op_helper.c
@@ -34,390 +34,9 @@
#include "exec/cpu_ldst.h"
#include "exec/address-spaces.h"
#include "qemu/timer.h"
-#include "fpu/softfloat.h"
#ifndef CONFIG_USER_ONLY
-void xtensa_cpu_do_unaligned_access(CPUState *cs,
- vaddr addr, MMUAccessType access_type,
- int mmu_idx, uintptr_t retaddr)
-{
- XtensaCPU *cpu = XTENSA_CPU(cs);
- CPUXtensaState *env = &cpu->env;
-
- if (xtensa_option_enabled(env->config, XTENSA_OPTION_UNALIGNED_EXCEPTION) &&
- !xtensa_option_enabled(env->config, XTENSA_OPTION_HW_ALIGNMENT)) {
- cpu_restore_state(CPU(cpu), retaddr, true);
- HELPER(exception_cause_vaddr)(env,
- env->pc, LOAD_STORE_ALIGNMENT_CAUSE, addr);
- }
-}
-
-void tlb_fill(CPUState *cs, target_ulong vaddr, int size,
- MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
-{
- XtensaCPU *cpu = XTENSA_CPU(cs);
- CPUXtensaState *env = &cpu->env;
- uint32_t paddr;
- uint32_t page_size;
- unsigned access;
- int ret = xtensa_get_physical_addr(env, true, vaddr, access_type, mmu_idx,
- &paddr, &page_size, &access);
-
- qemu_log_mask(CPU_LOG_MMU, "%s(%08x, %d, %d) -> %08x, ret = %d\n",
- __func__, vaddr, access_type, mmu_idx, paddr, ret);
-
- if (ret == 0) {
- tlb_set_page(cs,
- vaddr & TARGET_PAGE_MASK,
- paddr & TARGET_PAGE_MASK,
- access, mmu_idx, page_size);
- } else {
- cpu_restore_state(cs, retaddr, true);
- HELPER(exception_cause_vaddr)(env, env->pc, ret, vaddr);
- }
-}
-
-void xtensa_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, vaddr addr,
- unsigned size, MMUAccessType access_type,
- int mmu_idx, MemTxAttrs attrs,
- MemTxResult response, uintptr_t retaddr)
-{
- XtensaCPU *cpu = XTENSA_CPU(cs);
- CPUXtensaState *env = &cpu->env;
-
- cpu_restore_state(cs, retaddr, true);
- HELPER(exception_cause_vaddr)(env, env->pc,
- access_type == MMU_INST_FETCH ?
- INSTR_PIF_ADDR_ERROR_CAUSE :
- LOAD_STORE_PIF_ADDR_ERROR_CAUSE,
- addr);
-}
-
-static void tb_invalidate_virtual_addr(CPUXtensaState *env, uint32_t vaddr)
-{
- uint32_t paddr;
- uint32_t page_size;
- unsigned access;
- int ret = xtensa_get_physical_addr(env, false, vaddr, 2, 0,
- &paddr, &page_size, &access);
- if (ret == 0) {
- tb_invalidate_phys_addr(&address_space_memory, paddr,
- MEMTXATTRS_UNSPECIFIED);
- }
-}
-
-#else
-
-static void tb_invalidate_virtual_addr(CPUXtensaState *env, uint32_t vaddr)
-{
- tb_invalidate_phys_addr(vaddr);
-}
-
-#endif
-
-void HELPER(exception)(CPUXtensaState *env, uint32_t excp)
-{
- CPUState *cs = CPU(xtensa_env_get_cpu(env));
-
- cs->exception_index = excp;
- if (excp == EXCP_YIELD) {
- env->yield_needed = 0;
- }
- if (excp == EXCP_DEBUG) {
- env->exception_taken = 0;
- }
- cpu_loop_exit(cs);
-}
-
-void HELPER(exception_cause)(CPUXtensaState *env, uint32_t pc, uint32_t cause)
-{
- uint32_t vector;
-
- env->pc = pc;
- if (env->sregs[PS] & PS_EXCM) {
- if (env->config->ndepc) {
- env->sregs[DEPC] = pc;
- } else {
- env->sregs[EPC1] = pc;
- }
- vector = EXC_DOUBLE;
- } else {
- env->sregs[EPC1] = pc;
- vector = (env->sregs[PS] & PS_UM) ? EXC_USER : EXC_KERNEL;
- }
-
- env->sregs[EXCCAUSE] = cause;
- env->sregs[PS] |= PS_EXCM;
-
- HELPER(exception)(env, vector);
-}
-
-void HELPER(exception_cause_vaddr)(CPUXtensaState *env,
- uint32_t pc, uint32_t cause, uint32_t vaddr)
-{
- env->sregs[EXCVADDR] = vaddr;
- HELPER(exception_cause)(env, pc, cause);
-}
-
-void debug_exception_env(CPUXtensaState *env, uint32_t cause)
-{
- if (xtensa_get_cintlevel(env) < env->config->debug_level) {
- HELPER(debug_exception)(env, env->pc, cause);
- }
-}
-
-void HELPER(debug_exception)(CPUXtensaState *env, uint32_t pc, uint32_t cause)
-{
- unsigned level = env->config->debug_level;
-
- env->pc = pc;
- env->sregs[DEBUGCAUSE] = cause;
- env->sregs[EPC1 + level - 1] = pc;
- env->sregs[EPS2 + level - 2] = env->sregs[PS];
- env->sregs[PS] = (env->sregs[PS] & ~PS_INTLEVEL) | PS_EXCM |
- (level << PS_INTLEVEL_SHIFT);
- HELPER(exception)(env, EXC_DEBUG);
-}
-
-static void copy_window_from_phys(CPUXtensaState *env,
- uint32_t window, uint32_t phys, uint32_t n)
-{
- assert(phys < env->config->nareg);
- if (phys + n <= env->config->nareg) {
- memcpy(env->regs + window, env->phys_regs + phys,
- n * sizeof(uint32_t));
- } else {
- uint32_t n1 = env->config->nareg - phys;
- memcpy(env->regs + window, env->phys_regs + phys,
- n1 * sizeof(uint32_t));
- memcpy(env->regs + window + n1, env->phys_regs,
- (n - n1) * sizeof(uint32_t));
- }
-}
-
-static void copy_phys_from_window(CPUXtensaState *env,
- uint32_t phys, uint32_t window, uint32_t n)
-{
- assert(phys < env->config->nareg);
- if (phys + n <= env->config->nareg) {
- memcpy(env->phys_regs + phys, env->regs + window,
- n * sizeof(uint32_t));
- } else {
- uint32_t n1 = env->config->nareg - phys;
- memcpy(env->phys_regs + phys, env->regs + window,
- n1 * sizeof(uint32_t));
- memcpy(env->phys_regs, env->regs + window + n1,
- (n - n1) * sizeof(uint32_t));
- }
-}
-
-
-static inline unsigned windowbase_bound(unsigned a, const CPUXtensaState *env)
-{
- return a & (env->config->nareg / 4 - 1);
-}
-
-static inline unsigned windowstart_bit(unsigned a, const CPUXtensaState *env)
-{
- return 1 << windowbase_bound(a, env);
-}
-
-void xtensa_sync_window_from_phys(CPUXtensaState *env)
-{
- copy_window_from_phys(env, 0, env->sregs[WINDOW_BASE] * 4, 16);
-}
-
-void xtensa_sync_phys_from_window(CPUXtensaState *env)
-{
- copy_phys_from_window(env, env->sregs[WINDOW_BASE] * 4, 0, 16);
-}
-
-static void xtensa_rotate_window_abs(CPUXtensaState *env, uint32_t position)
-{
- xtensa_sync_phys_from_window(env);
- env->sregs[WINDOW_BASE] = windowbase_bound(position, env);
- xtensa_sync_window_from_phys(env);
-}
-
-void xtensa_rotate_window(CPUXtensaState *env, uint32_t delta)
-{
- xtensa_rotate_window_abs(env, env->sregs[WINDOW_BASE] + delta);
-}
-
-void HELPER(wsr_windowbase)(CPUXtensaState *env, uint32_t v)
-{
- xtensa_rotate_window_abs(env, v);
-}
-
-void HELPER(entry)(CPUXtensaState *env, uint32_t pc, uint32_t s, uint32_t imm)
-{
- int callinc = (env->sregs[PS] & PS_CALLINC) >> PS_CALLINC_SHIFT;
-
- env->regs[(callinc << 2) | (s & 3)] = env->regs[s] - imm;
- xtensa_rotate_window(env, callinc);
- env->sregs[WINDOW_START] |=
- windowstart_bit(env->sregs[WINDOW_BASE], env);
-}
-
-void HELPER(window_check)(CPUXtensaState *env, uint32_t pc, uint32_t w)
-{
- uint32_t windowbase = windowbase_bound(env->sregs[WINDOW_BASE], env);
- uint32_t windowstart = xtensa_replicate_windowstart(env) >>
- (env->sregs[WINDOW_BASE] + 1);
- uint32_t n = ctz32(windowstart) + 1;
-
- assert(n <= w);
-
- xtensa_rotate_window(env, n);
- env->sregs[PS] = (env->sregs[PS] & ~PS_OWB) |
- (windowbase << PS_OWB_SHIFT) | PS_EXCM;
- env->sregs[EPC1] = env->pc = pc;
-
- switch (ctz32(windowstart >> n)) {
- case 0:
- HELPER(exception)(env, EXC_WINDOW_OVERFLOW4);
- break;
- case 1:
- HELPER(exception)(env, EXC_WINDOW_OVERFLOW8);
- break;
- default:
- HELPER(exception)(env, EXC_WINDOW_OVERFLOW12);
- break;
- }
-}
-
-void HELPER(test_ill_retw)(CPUXtensaState *env, uint32_t pc)
-{
- int n = (env->regs[0] >> 30) & 0x3;
- int m = 0;
- uint32_t windowbase = windowbase_bound(env->sregs[WINDOW_BASE], env);
- uint32_t windowstart = env->sregs[WINDOW_START];
-
- if (windowstart & windowstart_bit(windowbase - 1, env)) {
- m = 1;
- } else if (windowstart & windowstart_bit(windowbase - 2, env)) {
- m = 2;
- } else if (windowstart & windowstart_bit(windowbase - 3, env)) {
- m = 3;
- }
-
- if (n == 0 || (m != 0 && m != n)) {
- qemu_log_mask(LOG_GUEST_ERROR, "Illegal retw instruction(pc = %08x), "
- "PS = %08x, m = %d, n = %d\n",
- pc, env->sregs[PS], m, n);
- HELPER(exception_cause)(env, pc, ILLEGAL_INSTRUCTION_CAUSE);
- }
-}
-
-void HELPER(test_underflow_retw)(CPUXtensaState *env, uint32_t pc)
-{
- int n = (env->regs[0] >> 30) & 0x3;
-
- if (!(env->sregs[WINDOW_START] &
- windowstart_bit(env->sregs[WINDOW_BASE] - n, env))) {
- uint32_t windowbase = windowbase_bound(env->sregs[WINDOW_BASE], env);
-
- xtensa_rotate_window(env, -n);
- /* window underflow */
- env->sregs[PS] = (env->sregs[PS] & ~PS_OWB) |
- (windowbase << PS_OWB_SHIFT) | PS_EXCM;
- env->sregs[EPC1] = env->pc = pc;
-
- if (n == 1) {
- HELPER(exception)(env, EXC_WINDOW_UNDERFLOW4);
- } else if (n == 2) {
- HELPER(exception)(env, EXC_WINDOW_UNDERFLOW8);
- } else if (n == 3) {
- HELPER(exception)(env, EXC_WINDOW_UNDERFLOW12);
- }
- }
-}
-
-uint32_t HELPER(retw)(CPUXtensaState *env, uint32_t pc)
-{
- int n = (env->regs[0] >> 30) & 0x3;
- uint32_t windowbase = windowbase_bound(env->sregs[WINDOW_BASE], env);
- uint32_t ret_pc = (pc & 0xc0000000) | (env->regs[0] & 0x3fffffff);
-
- xtensa_rotate_window(env, -n);
- env->sregs[WINDOW_START] &= ~windowstart_bit(windowbase, env);
- return ret_pc;
-}
-
-void HELPER(rotw)(CPUXtensaState *env, uint32_t imm4)
-{
- xtensa_rotate_window(env, imm4);
-}
-
-void xtensa_restore_owb(CPUXtensaState *env)
-{
- xtensa_rotate_window_abs(env, (env->sregs[PS] & PS_OWB) >> PS_OWB_SHIFT);
-}
-
-void HELPER(restore_owb)(CPUXtensaState *env)
-{
- xtensa_restore_owb(env);
-}
-
-void HELPER(movsp)(CPUXtensaState *env, uint32_t pc)
-{
- if ((env->sregs[WINDOW_START] &
- (windowstart_bit(env->sregs[WINDOW_BASE] - 3, env) |
- windowstart_bit(env->sregs[WINDOW_BASE] - 2, env) |
- windowstart_bit(env->sregs[WINDOW_BASE] - 1, env))) == 0) {
- HELPER(exception_cause)(env, pc, ALLOCA_CAUSE);
- }
-}
-
-void HELPER(wsr_lbeg)(CPUXtensaState *env, uint32_t v)
-{
- if (env->sregs[LBEG] != v) {
- tb_invalidate_virtual_addr(env, env->sregs[LEND] - 1);
- env->sregs[LBEG] = v;
- }
-}
-
-void HELPER(wsr_lend)(CPUXtensaState *env, uint32_t v)
-{
- if (env->sregs[LEND] != v) {
- tb_invalidate_virtual_addr(env, env->sregs[LEND] - 1);
- env->sregs[LEND] = v;
- tb_invalidate_virtual_addr(env, env->sregs[LEND] - 1);
- }
-}
-
-void HELPER(dump_state)(CPUXtensaState *env)
-{
- XtensaCPU *cpu = xtensa_env_get_cpu(env);
-
- cpu_dump_state(CPU(cpu), stderr, fprintf, 0);
-}
-
-#ifndef CONFIG_USER_ONLY
-
-void HELPER(waiti)(CPUXtensaState *env, uint32_t pc, uint32_t intlevel)
-{
- CPUState *cpu;
-
- env->pc = pc;
- env->sregs[PS] = (env->sregs[PS] & ~PS_INTLEVEL) |
- (intlevel << PS_INTLEVEL_SHIFT);
-
- qemu_mutex_lock_iothread();
- check_interrupts(env);
- qemu_mutex_unlock_iothread();
-
- if (env->pending_irq_level) {
- cpu_loop_exit(CPU(xtensa_env_get_cpu(env)));
- return;
- }
-
- cpu = CPU(xtensa_env_get_cpu(env));
- cpu->halted = 1;
- HELPER(exception)(env, EXCP_HLT);
-}
-
void HELPER(update_ccount)(CPUXtensaState *env)
{
uint64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
@@ -450,22 +69,6 @@ void HELPER(update_ccompare)(CPUXtensaState *env, uint32_t i)
env->yield_needed = 1;
}
-void HELPER(check_interrupts)(CPUXtensaState *env)
-{
- qemu_mutex_lock_iothread();
- check_interrupts(env);
- qemu_mutex_unlock_iothread();
-}
-
-void HELPER(itlb_hit_test)(CPUXtensaState *env, uint32_t vaddr)
-{
- /*
- * Attempt the memory load; we don't care about the result but
- * only the side-effects (ie any MMU or other exception)
- */
- cpu_ldub_code_ra(env, vaddr, GETPC());
-}
-
/*!
* Check vaddr accessibility/cache attributes and raise an exception if
* specified by the ATOMCTL SR.
@@ -549,505 +152,8 @@ void HELPER(wsr_memctl)(CPUXtensaState *env, uint32_t v)
env->sregs[MEMCTL] = v & env->config->memctl_mask;
}
-void HELPER(wsr_rasid)(CPUXtensaState *env, uint32_t v)
-{
- XtensaCPU *cpu = xtensa_env_get_cpu(env);
-
- v = (v & 0xffffff00) | 0x1;
- if (v != env->sregs[RASID]) {
- env->sregs[RASID] = v;
- tlb_flush(CPU(cpu));
- }
-}
-
-static uint32_t get_page_size(const CPUXtensaState *env, bool dtlb, uint32_t way)
-{
- uint32_t tlbcfg = env->sregs[dtlb ? DTLBCFG : ITLBCFG];
-
- switch (way) {
- case 4:
- return (tlbcfg >> 16) & 0x3;
-
- case 5:
- return (tlbcfg >> 20) & 0x1;
-
- case 6:
- return (tlbcfg >> 24) & 0x1;
-
- default:
- return 0;
- }
-}
-
-/*!
- * Get bit mask for the virtual address bits translated by the TLB way
- */
-uint32_t xtensa_tlb_get_addr_mask(const CPUXtensaState *env, bool dtlb, uint32_t way)
-{
- if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
- bool varway56 = dtlb ?
- env->config->dtlb.varway56 :
- env->config->itlb.varway56;
-
- switch (way) {
- case 4:
- return 0xfff00000 << get_page_size(env, dtlb, way) * 2;
-
- case 5:
- if (varway56) {
- return 0xf8000000 << get_page_size(env, dtlb, way);
- } else {
- return 0xf8000000;
- }
-
- case 6:
- if (varway56) {
- return 0xf0000000 << (1 - get_page_size(env, dtlb, way));
- } else {
- return 0xf0000000;
- }
-
- default:
- return 0xfffff000;
- }
- } else {
- return REGION_PAGE_MASK;
- }
-}
-
-/*!
- * Get bit mask for the 'VPN without index' field.
- * See ISA, 4.6.5.6, data format for RxTLB0
- */
-static uint32_t get_vpn_mask(const CPUXtensaState *env, bool dtlb, uint32_t way)
-{
- if (way < 4) {
- bool is32 = (dtlb ?
- env->config->dtlb.nrefillentries :
- env->config->itlb.nrefillentries) == 32;
- return is32 ? 0xffff8000 : 0xffffc000;
- } else if (way == 4) {
- return xtensa_tlb_get_addr_mask(env, dtlb, way) << 2;
- } else if (way <= 6) {
- uint32_t mask = xtensa_tlb_get_addr_mask(env, dtlb, way);
- bool varway56 = dtlb ?
- env->config->dtlb.varway56 :
- env->config->itlb.varway56;
-
- if (varway56) {
- return mask << (way == 5 ? 2 : 3);
- } else {
- return mask << 1;
- }
- } else {
- return 0xfffff000;
- }
-}
-
-/*!
- * Split virtual address into VPN (with index) and entry index
- * for the given TLB way
- */
-void split_tlb_entry_spec_way(const CPUXtensaState *env, uint32_t v, bool dtlb,
- uint32_t *vpn, uint32_t wi, uint32_t *ei)
-{
- bool varway56 = dtlb ?
- env->config->dtlb.varway56 :
- env->config->itlb.varway56;
-
- if (!dtlb) {
- wi &= 7;
- }
-
- if (wi < 4) {
- bool is32 = (dtlb ?
- env->config->dtlb.nrefillentries :
- env->config->itlb.nrefillentries) == 32;
- *ei = (v >> 12) & (is32 ? 0x7 : 0x3);
- } else {
- switch (wi) {
- case 4:
- {
- uint32_t eibase = 20 + get_page_size(env, dtlb, wi) * 2;
- *ei = (v >> eibase) & 0x3;
- }
- break;
-
- case 5:
- if (varway56) {
- uint32_t eibase = 27 + get_page_size(env, dtlb, wi);
- *ei = (v >> eibase) & 0x3;
- } else {
- *ei = (v >> 27) & 0x1;
- }
- break;
-
- case 6:
- if (varway56) {
- uint32_t eibase = 29 - get_page_size(env, dtlb, wi);
- *ei = (v >> eibase) & 0x7;
- } else {
- *ei = (v >> 28) & 0x1;
- }
- break;
-
- default:
- *ei = 0;
- break;
- }
- }
- *vpn = v & xtensa_tlb_get_addr_mask(env, dtlb, wi);
-}
-
-/*!
- * Split TLB address into TLB way, entry index and VPN (with index).
- * See ISA, 4.6.5.5 - 4.6.5.8 for the TLB addressing format
- */
-static void split_tlb_entry_spec(CPUXtensaState *env, uint32_t v, bool dtlb,
- uint32_t *vpn, uint32_t *wi, uint32_t *ei)
-{
- if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
- *wi = v & (dtlb ? 0xf : 0x7);
- split_tlb_entry_spec_way(env, v, dtlb, vpn, *wi, ei);
- } else {
- *vpn = v & REGION_PAGE_MASK;
- *wi = 0;
- *ei = (v >> 29) & 0x7;
- }
-}
-
-static xtensa_tlb_entry *get_tlb_entry(CPUXtensaState *env,
- uint32_t v, bool dtlb, uint32_t *pwi)
-{
- uint32_t vpn;
- uint32_t wi;
- uint32_t ei;
-
- split_tlb_entry_spec(env, v, dtlb, &vpn, &wi, &ei);
- if (pwi) {
- *pwi = wi;
- }
- return xtensa_tlb_get_entry(env, dtlb, wi, ei);
-}
-
-uint32_t HELPER(rtlb0)(CPUXtensaState *env, uint32_t v, uint32_t dtlb)
-{
- if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
- uint32_t wi;
- const xtensa_tlb_entry *entry = get_tlb_entry(env, v, dtlb, &wi);
- return (entry->vaddr & get_vpn_mask(env, dtlb, wi)) | entry->asid;
- } else {
- return v & REGION_PAGE_MASK;
- }
-}
-
-uint32_t HELPER(rtlb1)(CPUXtensaState *env, uint32_t v, uint32_t dtlb)
-{
- const xtensa_tlb_entry *entry = get_tlb_entry(env, v, dtlb, NULL);
- return entry->paddr | entry->attr;
-}
-
-void HELPER(itlb)(CPUXtensaState *env, uint32_t v, uint32_t dtlb)
-{
- if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
- uint32_t wi;
- xtensa_tlb_entry *entry = get_tlb_entry(env, v, dtlb, &wi);
- if (entry->variable && entry->asid) {
- tlb_flush_page(CPU(xtensa_env_get_cpu(env)), entry->vaddr);
- entry->asid = 0;
- }
- }
-}
-
-uint32_t HELPER(ptlb)(CPUXtensaState *env, uint32_t v, uint32_t dtlb)
-{
- if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
- uint32_t wi;
- uint32_t ei;
- uint8_t ring;
- int res = xtensa_tlb_lookup(env, v, dtlb, &wi, &ei, &ring);
-
- switch (res) {
- case 0:
- if (ring >= xtensa_get_ring(env)) {
- return (v & 0xfffff000) | wi | (dtlb ? 0x10 : 0x8);
- }
- break;
-
- case INST_TLB_MULTI_HIT_CAUSE:
- case LOAD_STORE_TLB_MULTI_HIT_CAUSE:
- HELPER(exception_cause_vaddr)(env, env->pc, res, v);
- break;
- }
- return 0;
- } else {
- return (v & REGION_PAGE_MASK) | 0x1;
- }
-}
-
-void xtensa_tlb_set_entry_mmu(const CPUXtensaState *env,
- xtensa_tlb_entry *entry, bool dtlb,
- unsigned wi, unsigned ei, uint32_t vpn, uint32_t pte)
-{
- entry->vaddr = vpn;
- entry->paddr = pte & xtensa_tlb_get_addr_mask(env, dtlb, wi);
- entry->asid = (env->sregs[RASID] >> ((pte >> 1) & 0x18)) & 0xff;
- entry->attr = pte & 0xf;
-}
-
-void xtensa_tlb_set_entry(CPUXtensaState *env, bool dtlb,
- unsigned wi, unsigned ei, uint32_t vpn, uint32_t pte)
-{
- XtensaCPU *cpu = xtensa_env_get_cpu(env);
- CPUState *cs = CPU(cpu);
- xtensa_tlb_entry *entry = xtensa_tlb_get_entry(env, dtlb, wi, ei);
-
- if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
- if (entry->variable) {
- if (entry->asid) {
- tlb_flush_page(cs, entry->vaddr);
- }
- xtensa_tlb_set_entry_mmu(env, entry, dtlb, wi, ei, vpn, pte);
- tlb_flush_page(cs, entry->vaddr);
- } else {
- qemu_log_mask(LOG_GUEST_ERROR, "%s %d, %d, %d trying to set immutable entry\n",
- __func__, dtlb, wi, ei);
- }
- } else {
- tlb_flush_page(cs, entry->vaddr);
- if (xtensa_option_enabled(env->config,
- XTENSA_OPTION_REGION_TRANSLATION)) {
- entry->paddr = pte & REGION_PAGE_MASK;
- }
- entry->attr = pte & 0xf;
- }
-}
-
-void HELPER(wtlb)(CPUXtensaState *env, uint32_t p, uint32_t v, uint32_t dtlb)
-{
- uint32_t vpn;
- uint32_t wi;
- uint32_t ei;
- split_tlb_entry_spec(env, v, dtlb, &vpn, &wi, &ei);
- xtensa_tlb_set_entry(env, dtlb, wi, ei, vpn, p);
-}
-
-
-void HELPER(wsr_ibreakenable)(CPUXtensaState *env, uint32_t v)
-{
- uint32_t change = v ^ env->sregs[IBREAKENABLE];
- unsigned i;
-
- for (i = 0; i < env->config->nibreak; ++i) {
- if (change & (1 << i)) {
- tb_invalidate_virtual_addr(env, env->sregs[IBREAKA + i]);
- }
- }
- env->sregs[IBREAKENABLE] = v & ((1 << env->config->nibreak) - 1);
-}
-
-void HELPER(wsr_ibreaka)(CPUXtensaState *env, uint32_t i, uint32_t v)
-{
- if (env->sregs[IBREAKENABLE] & (1 << i) && env->sregs[IBREAKA + i] != v) {
- tb_invalidate_virtual_addr(env, env->sregs[IBREAKA + i]);
- tb_invalidate_virtual_addr(env, v);
- }
- env->sregs[IBREAKA + i] = v;
-}
-
-static void set_dbreak(CPUXtensaState *env, unsigned i, uint32_t dbreaka,
- uint32_t dbreakc)
-{
- CPUState *cs = CPU(xtensa_env_get_cpu(env));
- int flags = BP_CPU | BP_STOP_BEFORE_ACCESS;
- uint32_t mask = dbreakc | ~DBREAKC_MASK;
-
- if (env->cpu_watchpoint[i]) {
- cpu_watchpoint_remove_by_ref(cs, env->cpu_watchpoint[i]);
- }
- if (dbreakc & DBREAKC_SB) {
- flags |= BP_MEM_WRITE;
- }
- if (dbreakc & DBREAKC_LB) {
- flags |= BP_MEM_READ;
- }
- /* contiguous mask after inversion is one less than some power of 2 */
- if ((~mask + 1) & ~mask) {
- qemu_log_mask(LOG_GUEST_ERROR, "DBREAKC mask is not contiguous: 0x%08x\n", dbreakc);
- /* cut mask after the first zero bit */
- mask = 0xffffffff << (32 - clo32(mask));
- }
- if (cpu_watchpoint_insert(cs, dbreaka & mask, ~mask + 1,
- flags, &env->cpu_watchpoint[i])) {
- env->cpu_watchpoint[i] = NULL;
- qemu_log_mask(LOG_GUEST_ERROR, "Failed to set data breakpoint at 0x%08x/%d\n",
- dbreaka & mask, ~mask + 1);
- }
-}
-
-void HELPER(wsr_dbreaka)(CPUXtensaState *env, uint32_t i, uint32_t v)
-{
- uint32_t dbreakc = env->sregs[DBREAKC + i];
-
- if ((dbreakc & DBREAKC_SB_LB) &&
- env->sregs[DBREAKA + i] != v) {
- set_dbreak(env, i, v, dbreakc);
- }
- env->sregs[DBREAKA + i] = v;
-}
-
-void HELPER(wsr_dbreakc)(CPUXtensaState *env, uint32_t i, uint32_t v)
-{
- if ((env->sregs[DBREAKC + i] ^ v) & (DBREAKC_SB_LB | DBREAKC_MASK)) {
- if (v & DBREAKC_SB_LB) {
- set_dbreak(env, i, env->sregs[DBREAKA + i], v);
- } else {
- if (env->cpu_watchpoint[i]) {
- CPUState *cs = CPU(xtensa_env_get_cpu(env));
-
- cpu_watchpoint_remove_by_ref(cs, env->cpu_watchpoint[i]);
- env->cpu_watchpoint[i] = NULL;
- }
- }
- }
- env->sregs[DBREAKC + i] = v;
-}
#endif
-void HELPER(wur_fcr)(CPUXtensaState *env, uint32_t v)
-{
- static const int rounding_mode[] = {
- float_round_nearest_even,
- float_round_to_zero,
- float_round_up,
- float_round_down,
- };
-
- env->uregs[FCR] = v & 0xfffff07f;
- set_float_rounding_mode(rounding_mode[v & 3], &env->fp_status);
-}
-
-float32 HELPER(abs_s)(float32 v)
-{
- return float32_abs(v);
-}
-
-float32 HELPER(neg_s)(float32 v)
-{
- return float32_chs(v);
-}
-
-float32 HELPER(add_s)(CPUXtensaState *env, float32 a, float32 b)
-{
- return float32_add(a, b, &env->fp_status);
-}
-
-float32 HELPER(sub_s)(CPUXtensaState *env, float32 a, float32 b)
-{
- return float32_sub(a, b, &env->fp_status);
-}
-
-float32 HELPER(mul_s)(CPUXtensaState *env, float32 a, float32 b)
-{
- return float32_mul(a, b, &env->fp_status);
-}
-
-float32 HELPER(madd_s)(CPUXtensaState *env, float32 a, float32 b, float32 c)
-{
- return float32_muladd(b, c, a, 0,
- &env->fp_status);
-}
-
-float32 HELPER(msub_s)(CPUXtensaState *env, float32 a, float32 b, float32 c)
-{
- return float32_muladd(b, c, a, float_muladd_negate_product,
- &env->fp_status);
-}
-
-uint32_t HELPER(ftoi)(float32 v, uint32_t rounding_mode, uint32_t scale)
-{
- float_status fp_status = {0};
-
- set_float_rounding_mode(rounding_mode, &fp_status);
- return float32_to_int32(
- float32_scalbn(v, scale, &fp_status), &fp_status);
-}
-
-uint32_t HELPER(ftoui)(float32 v, uint32_t rounding_mode, uint32_t scale)
-{
- float_status fp_status = {0};
- float32 res;
-
- set_float_rounding_mode(rounding_mode, &fp_status);
-
- res = float32_scalbn(v, scale, &fp_status);
-
- if (float32_is_neg(v) && !float32_is_any_nan(v)) {
- return float32_to_int32(res, &fp_status);
- } else {
- return float32_to_uint32(res, &fp_status);
- }
-}
-
-float32 HELPER(itof)(CPUXtensaState *env, uint32_t v, uint32_t scale)
-{
- return float32_scalbn(int32_to_float32(v, &env->fp_status),
- (int32_t)scale, &env->fp_status);
-}
-
-float32 HELPER(uitof)(CPUXtensaState *env, uint32_t v, uint32_t scale)
-{
- return float32_scalbn(uint32_to_float32(v, &env->fp_status),
- (int32_t)scale, &env->fp_status);
-}
-
-static inline void set_br(CPUXtensaState *env, bool v, uint32_t br)
-{
- if (v) {
- env->sregs[BR] |= br;
- } else {
- env->sregs[BR] &= ~br;
- }
-}
-
-void HELPER(un_s)(CPUXtensaState *env, uint32_t br, float32 a, float32 b)
-{
- set_br(env, float32_unordered_quiet(a, b, &env->fp_status), br);
-}
-
-void HELPER(oeq_s)(CPUXtensaState *env, uint32_t br, float32 a, float32 b)
-{
- set_br(env, float32_eq_quiet(a, b, &env->fp_status), br);
-}
-
-void HELPER(ueq_s)(CPUXtensaState *env, uint32_t br, float32 a, float32 b)
-{
- int v = float32_compare_quiet(a, b, &env->fp_status);
- set_br(env, v == float_relation_equal || v == float_relation_unordered, br);
-}
-
-void HELPER(olt_s)(CPUXtensaState *env, uint32_t br, float32 a, float32 b)
-{
- set_br(env, float32_lt_quiet(a, b, &env->fp_status), br);
-}
-
-void HELPER(ult_s)(CPUXtensaState *env, uint32_t br, float32 a, float32 b)
-{
- int v = float32_compare_quiet(a, b, &env->fp_status);
- set_br(env, v == float_relation_less || v == float_relation_unordered, br);
-}
-
-void HELPER(ole_s)(CPUXtensaState *env, uint32_t br, float32 a, float32 b)
-{
- set_br(env, float32_le_quiet(a, b, &env->fp_status), br);
-}
-
-void HELPER(ule_s)(CPUXtensaState *env, uint32_t br, float32 a, float32 b)
-{
- int v = float32_compare_quiet(a, b, &env->fp_status);
- set_br(env, v != float_relation_greater, br);
-}
-
uint32_t HELPER(rer)(CPUXtensaState *env, uint32_t addr)
{
#ifndef CONFIG_USER_ONLY
diff --git a/target/xtensa/overlay_tool.h b/target/xtensa/overlay_tool.h
index ee37a04a17..12609a0d0c 100644
--- a/target/xtensa/overlay_tool.h
+++ b/target/xtensa/overlay_tool.h
@@ -457,6 +457,7 @@
.nareg = XCHAL_NUM_AREGS, \
.ndepc = (XCHAL_XEA_VERSION >= 2), \
.inst_fetch_width = XCHAL_INST_FETCH_WIDTH, \
+ .max_insn_size = XCHAL_MAX_INSTRUCTION_SIZE, \
EXCEPTIONS_SECTION, \
INTERRUPTS_SECTION, \
TLB_SECTION, \
diff --git a/target/xtensa/translate.c b/target/xtensa/translate.c
index 46e1338448..a435d9c36c 100644
--- a/target/xtensa/translate.c
+++ b/target/xtensa/translate.c
@@ -53,7 +53,7 @@ struct DisasContext {
uint32_t pc;
int cring;
int ring;
- uint32_t lbeg;
+ uint32_t lbeg_off;
uint32_t lend;
bool sar_5bit;
@@ -390,11 +390,9 @@ static void gen_jump(DisasContext *dc, TCGv dest)
static void gen_jumpi(DisasContext *dc, uint32_t dest, int slot)
{
TCGv_i32 tmp = tcg_const_i32(dest);
-#ifndef CONFIG_USER_ONLY
if (((dc->base.pc_first ^ dest) & TARGET_PAGE_MASK) != 0) {
slot = -1;
}
-#endif
gen_jump_slot(dc, tmp, slot);
tcg_temp_free(tmp);
}
@@ -420,25 +418,25 @@ static void gen_callw(DisasContext *dc, int callinc, TCGv_i32 dest)
static void gen_callwi(DisasContext *dc, int callinc, uint32_t dest, int slot)
{
TCGv_i32 tmp = tcg_const_i32(dest);
-#ifndef CONFIG_USER_ONLY
if (((dc->base.pc_first ^ dest) & TARGET_PAGE_MASK) != 0) {
slot = -1;
}
-#endif
gen_callw_slot(dc, callinc, tmp, slot);
tcg_temp_free(tmp);
}
static bool gen_check_loop_end(DisasContext *dc, int slot)
{
- if (option_enabled(dc, XTENSA_OPTION_LOOP) &&
- !(dc->base.tb->flags & XTENSA_TBFLAG_EXCM) &&
- dc->base.pc_next == dc->lend) {
+ if (dc->base.pc_next == dc->lend) {
TCGLabel *label = gen_new_label();
tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_SR[LCOUNT], 0, label);
tcg_gen_subi_i32(cpu_SR[LCOUNT], cpu_SR[LCOUNT], 1);
- gen_jumpi(dc, dc->lbeg, slot);
+ if (dc->lbeg_off) {
+ gen_jumpi(dc, dc->base.pc_next - dc->lbeg_off, slot);
+ } else {
+ gen_jump(dc, cpu_SR[LBEG]);
+ }
gen_set_label(label);
gen_jumpi(dc, dc->base.pc_next, -1);
return true;
@@ -534,16 +532,6 @@ static void gen_rsr(DisasContext *dc, TCGv_i32 d, uint32_t sr)
}
}
-static void gen_wsr_lbeg(DisasContext *dc, uint32_t sr, TCGv_i32 s)
-{
- gen_helper_wsr_lbeg(cpu_env, s);
-}
-
-static void gen_wsr_lend(DisasContext *dc, uint32_t sr, TCGv_i32 s)
-{
- gen_helper_wsr_lend(cpu_env, s);
-}
-
static void gen_wsr_sar(DisasContext *dc, uint32_t sr, TCGv_i32 s)
{
tcg_gen_andi_i32(cpu_SR[sr], s, 0x3f);
@@ -743,8 +731,6 @@ static void gen_wsr(DisasContext *dc, uint32_t sr, TCGv_i32 s)
{
static void (* const wsr_handler[256])(DisasContext *dc,
uint32_t sr, TCGv_i32 v) = {
- [LBEG] = gen_wsr_lbeg,
- [LEND] = gen_wsr_lend,
[SAR] = gen_wsr_sar,
[BR] = gen_wsr_br,
[LITBASE] = gen_wsr_litbase,
@@ -906,13 +892,6 @@ static void disas_xtensa_insn(CPUXtensaState *env, DisasContext *dc)
}
dc->base.pc_next = dc->pc + len;
- if (xtensa_option_enabled(dc->config, XTENSA_OPTION_LOOP) &&
- dc->lbeg == dc->pc &&
- ((dc->pc ^ (dc->base.pc_next - 1)) & -dc->config->inst_fetch_width)) {
- qemu_log_mask(LOG_GUEST_ERROR,
- "unaligned first instruction of a loop (pc = %08x)\n",
- dc->pc);
- }
for (i = 1; i < len; ++i) {
b[i] = cpu_ldub_code(env, dc->pc + i);
}
@@ -1097,8 +1076,10 @@ static void xtensa_tr_init_disas_context(DisasContextBase *dcbase,
dc->pc = dc->base.pc_first;
dc->ring = tb_flags & XTENSA_TBFLAG_RING_MASK;
dc->cring = (tb_flags & XTENSA_TBFLAG_EXCM) ? 0 : dc->ring;
- dc->lbeg = env->sregs[LBEG];
- dc->lend = env->sregs[LEND];
+ dc->lbeg_off = (dc->base.tb->cs_base & XTENSA_CSBASE_LBEG_OFF_MASK) >>
+ XTENSA_CSBASE_LBEG_OFF_SHIFT;
+ dc->lend = (dc->base.tb->cs_base & XTENSA_CSBASE_LEND_MASK) +
+ (dc->base.pc_first & TARGET_PAGE_MASK);
dc->debug = tb_flags & XTENSA_TBFLAG_DEBUG;
dc->icount = tb_flags & XTENSA_TBFLAG_ICOUNT;
dc->cpenable = (tb_flags & XTENSA_TBFLAG_CPENABLE_MASK) >>
@@ -1712,12 +1693,10 @@ static void translate_loop(DisasContext *dc, const uint32_t arg[],
const uint32_t par[])
{
uint32_t lend = arg[1];
- TCGv_i32 tmp = tcg_const_i32(lend);
tcg_gen_subi_i32(cpu_SR[LCOUNT], cpu_R[arg[0]], 1);
tcg_gen_movi_i32(cpu_SR[LBEG], dc->base.pc_next);
- gen_helper_wsr_lend(cpu_env, tmp);
- tcg_temp_free(tmp);
+ tcg_gen_movi_i32(cpu_SR[LEND], lend);
if (par[0] != TCG_COND_NEVER) {
TCGLabel *label = gen_new_label();
@@ -4609,7 +4588,7 @@ static const XtensaOpcodeOps core_ops[] = {
.translate = translate_wsr,
.test_ill = test_ill_wsr,
.par = (const uint32_t[]){LBEG},
- .op_flags = XTENSA_OP_EXIT_TB_0,
+ .op_flags = XTENSA_OP_EXIT_TB_M1,
.windowed_register_op = 0x1,
}, {
.name = "wsr.lcount",
@@ -4622,7 +4601,7 @@ static const XtensaOpcodeOps core_ops[] = {
.translate = translate_wsr,
.test_ill = test_ill_wsr,
.par = (const uint32_t[]){LEND},
- .op_flags = XTENSA_OP_EXIT_TB_0,
+ .op_flags = XTENSA_OP_EXIT_TB_M1,
.windowed_register_op = 0x1,
}, {
.name = "wsr.litbase",
@@ -5183,7 +5162,7 @@ static const XtensaOpcodeOps core_ops[] = {
.translate = translate_xsr,
.test_ill = test_ill_xsr,
.par = (const uint32_t[]){LBEG},
- .op_flags = XTENSA_OP_EXIT_TB_0,
+ .op_flags = XTENSA_OP_EXIT_TB_M1,
.windowed_register_op = 0x1,
}, {
.name = "xsr.lcount",
@@ -5196,7 +5175,7 @@ static const XtensaOpcodeOps core_ops[] = {
.translate = translate_xsr,
.test_ill = test_ill_xsr,
.par = (const uint32_t[]){LEND},
- .op_flags = XTENSA_OP_EXIT_TB_0,
+ .op_flags = XTENSA_OP_EXIT_TB_M1,
.windowed_register_op = 0x1,
}, {
.name = "xsr.litbase",
diff --git a/target/xtensa/win_helper.c b/target/xtensa/win_helper.c
new file mode 100644
index 0000000000..7d793d4f9c
--- /dev/null
+++ b/target/xtensa/win_helper.c
@@ -0,0 +1,222 @@
+/*
+ * Copyright (c) 2011 - 2019, Max Filippov, Open Source and Linux Lab.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the Open Source and Linux Lab nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/main-loop.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "qemu/host-utils.h"
+#include "exec/exec-all.h"
+
+static void copy_window_from_phys(CPUXtensaState *env,
+ uint32_t window, uint32_t phys, uint32_t n)
+{
+ assert(phys < env->config->nareg);
+ if (phys + n <= env->config->nareg) {
+ memcpy(env->regs + window, env->phys_regs + phys,
+ n * sizeof(uint32_t));
+ } else {
+ uint32_t n1 = env->config->nareg - phys;
+ memcpy(env->regs + window, env->phys_regs + phys,
+ n1 * sizeof(uint32_t));
+ memcpy(env->regs + window + n1, env->phys_regs,
+ (n - n1) * sizeof(uint32_t));
+ }
+}
+
+static void copy_phys_from_window(CPUXtensaState *env,
+ uint32_t phys, uint32_t window, uint32_t n)
+{
+ assert(phys < env->config->nareg);
+ if (phys + n <= env->config->nareg) {
+ memcpy(env->phys_regs + phys, env->regs + window,
+ n * sizeof(uint32_t));
+ } else {
+ uint32_t n1 = env->config->nareg - phys;
+ memcpy(env->phys_regs + phys, env->regs + window,
+ n1 * sizeof(uint32_t));
+ memcpy(env->phys_regs, env->regs + window + n1,
+ (n - n1) * sizeof(uint32_t));
+ }
+}
+
+static inline unsigned windowbase_bound(unsigned a, const CPUXtensaState *env)
+{
+ return a & (env->config->nareg / 4 - 1);
+}
+
+static inline unsigned windowstart_bit(unsigned a, const CPUXtensaState *env)
+{
+ return 1 << windowbase_bound(a, env);
+}
+
+void xtensa_sync_window_from_phys(CPUXtensaState *env)
+{
+ copy_window_from_phys(env, 0, env->sregs[WINDOW_BASE] * 4, 16);
+}
+
+void xtensa_sync_phys_from_window(CPUXtensaState *env)
+{
+ copy_phys_from_window(env, env->sregs[WINDOW_BASE] * 4, 0, 16);
+}
+
+static void xtensa_rotate_window_abs(CPUXtensaState *env, uint32_t position)
+{
+ xtensa_sync_phys_from_window(env);
+ env->sregs[WINDOW_BASE] = windowbase_bound(position, env);
+ xtensa_sync_window_from_phys(env);
+}
+
+void xtensa_rotate_window(CPUXtensaState *env, uint32_t delta)
+{
+ xtensa_rotate_window_abs(env, env->sregs[WINDOW_BASE] + delta);
+}
+
+void HELPER(wsr_windowbase)(CPUXtensaState *env, uint32_t v)
+{
+ xtensa_rotate_window_abs(env, v);
+}
+
+void HELPER(entry)(CPUXtensaState *env, uint32_t pc, uint32_t s, uint32_t imm)
+{
+ int callinc = (env->sregs[PS] & PS_CALLINC) >> PS_CALLINC_SHIFT;
+
+ env->regs[(callinc << 2) | (s & 3)] = env->regs[s] - imm;
+ xtensa_rotate_window(env, callinc);
+ env->sregs[WINDOW_START] |=
+ windowstart_bit(env->sregs[WINDOW_BASE], env);
+}
+
+void HELPER(window_check)(CPUXtensaState *env, uint32_t pc, uint32_t w)
+{
+ uint32_t windowbase = windowbase_bound(env->sregs[WINDOW_BASE], env);
+ uint32_t windowstart = xtensa_replicate_windowstart(env) >>
+ (env->sregs[WINDOW_BASE] + 1);
+ uint32_t n = ctz32(windowstart) + 1;
+
+ assert(n <= w);
+
+ xtensa_rotate_window(env, n);
+ env->sregs[PS] = (env->sregs[PS] & ~PS_OWB) |
+ (windowbase << PS_OWB_SHIFT) | PS_EXCM;
+ env->sregs[EPC1] = env->pc = pc;
+
+ switch (ctz32(windowstart >> n)) {
+ case 0:
+ HELPER(exception)(env, EXC_WINDOW_OVERFLOW4);
+ break;
+ case 1:
+ HELPER(exception)(env, EXC_WINDOW_OVERFLOW8);
+ break;
+ default:
+ HELPER(exception)(env, EXC_WINDOW_OVERFLOW12);
+ break;
+ }
+}
+
+void HELPER(test_ill_retw)(CPUXtensaState *env, uint32_t pc)
+{
+ int n = (env->regs[0] >> 30) & 0x3;
+ int m = 0;
+ uint32_t windowbase = windowbase_bound(env->sregs[WINDOW_BASE], env);
+ uint32_t windowstart = env->sregs[WINDOW_START];
+
+ if (windowstart & windowstart_bit(windowbase - 1, env)) {
+ m = 1;
+ } else if (windowstart & windowstart_bit(windowbase - 2, env)) {
+ m = 2;
+ } else if (windowstart & windowstart_bit(windowbase - 3, env)) {
+ m = 3;
+ }
+
+ if (n == 0 || (m != 0 && m != n)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "Illegal retw instruction(pc = %08x), "
+ "PS = %08x, m = %d, n = %d\n",
+ pc, env->sregs[PS], m, n);
+ HELPER(exception_cause)(env, pc, ILLEGAL_INSTRUCTION_CAUSE);
+ }
+}
+
+void HELPER(test_underflow_retw)(CPUXtensaState *env, uint32_t pc)
+{
+ int n = (env->regs[0] >> 30) & 0x3;
+
+ if (!(env->sregs[WINDOW_START] &
+ windowstart_bit(env->sregs[WINDOW_BASE] - n, env))) {
+ uint32_t windowbase = windowbase_bound(env->sregs[WINDOW_BASE], env);
+
+ xtensa_rotate_window(env, -n);
+ /* window underflow */
+ env->sregs[PS] = (env->sregs[PS] & ~PS_OWB) |
+ (windowbase << PS_OWB_SHIFT) | PS_EXCM;
+ env->sregs[EPC1] = env->pc = pc;
+
+ if (n == 1) {
+ HELPER(exception)(env, EXC_WINDOW_UNDERFLOW4);
+ } else if (n == 2) {
+ HELPER(exception)(env, EXC_WINDOW_UNDERFLOW8);
+ } else if (n == 3) {
+ HELPER(exception)(env, EXC_WINDOW_UNDERFLOW12);
+ }
+ }
+}
+
+uint32_t HELPER(retw)(CPUXtensaState *env, uint32_t pc)
+{
+ int n = (env->regs[0] >> 30) & 0x3;
+ uint32_t windowbase = windowbase_bound(env->sregs[WINDOW_BASE], env);
+ uint32_t ret_pc = (pc & 0xc0000000) | (env->regs[0] & 0x3fffffff);
+
+ xtensa_rotate_window(env, -n);
+ env->sregs[WINDOW_START] &= ~windowstart_bit(windowbase, env);
+ return ret_pc;
+}
+
+void HELPER(rotw)(CPUXtensaState *env, uint32_t imm4)
+{
+ xtensa_rotate_window(env, imm4);
+}
+
+void xtensa_restore_owb(CPUXtensaState *env)
+{
+ xtensa_rotate_window_abs(env, (env->sregs[PS] & PS_OWB) >> PS_OWB_SHIFT);
+}
+
+void HELPER(restore_owb)(CPUXtensaState *env)
+{
+ xtensa_restore_owb(env);
+}
+
+void HELPER(movsp)(CPUXtensaState *env, uint32_t pc)
+{
+ if ((env->sregs[WINDOW_START] &
+ (windowstart_bit(env->sregs[WINDOW_BASE] - 3, env) |
+ windowstart_bit(env->sregs[WINDOW_BASE] - 2, env) |
+ windowstart_bit(env->sregs[WINDOW_BASE] - 1, env))) == 0) {
+ HELPER(exception_cause)(env, pc, ALLOCA_CAUSE);
+ }
+}
diff --git a/tests/Makefile.include b/tests/Makefile.include
index 4eea38ae99..f5e6eb5152 100644
--- a/tests/Makefile.include
+++ b/tests/Makefile.include
@@ -12,6 +12,7 @@ check-help:
@echo " $(MAKE) check-qapi-schema Run QAPI schema tests"
@echo " $(MAKE) check-block Run block tests"
@echo " $(MAKE) check-tcg Run TCG tests"
+ @echo " $(MAKE) check-softfloat Run FPU emulation tests"
@echo " $(MAKE) check-acceptance Run all acceptance (functional) tests"
@echo
@echo " $(MAKE) check-report.html Generates an HTML test report"
@@ -880,6 +881,138 @@ check-report-unit.tap: $(check-unit-y)
check-report.tap: $(patsubst %,check-report-qtest-%.tap, $(QTEST_TARGETS)) check-report-unit.tap
$(call quiet-command,./scripts/tap-merge.py $^ > $@,"GEN","$@")
+# FPU Emulation tests (aka softfloat)
+#
+# As we still have some places that need fixing the rules are a little
+# more complex than they need to be and have to override some of the
+# generic Makefile expansions. Once we are cleanly passing all
+# the tests we can simplify the make syntax.
+
+FP_TEST_BIN=$(BUILD_DIR)/tests/fp/fp-test
+
+# the build dir is created by configure
+.PHONY: $(FP_TEST_BIN)
+$(FP_TEST_BIN):
+ $(call quiet-command, \
+ $(MAKE) $(SUBDIR_MAKEFLAGS) -C $(dir $@) V="$(V)" $(notdir $@), \
+ "BUILD", "$(notdir $@)")
+
+# The full test suite can take a bit of time, default to a quick run
+ifeq ($(SPEED), quick)
+FP_TL=-l 1
+else
+FP_TL=-l 2 -r all
+endif
+
+# $1 = tests, $2 = description
+test-softfloat = $(call quiet-command, \
+ cd $(BUILD_DIR)/tests/fp && \
+ ./fp-test -s $(FP_TL) $1 > $2.out 2>&1 || \
+ (cat $2.out && exit 1;), \
+ "FLOAT TEST", $2)
+
+# Conversion Routines:
+# FIXME: i32_to_extF80 (broken), i64_to_extF80 (broken)
+# ui32_to_f128 (not implemented), f128_to_ui32 (not implemented)
+# extF80_roundToInt (broken)
+#
+check-softfloat-conv: $(FP_TEST_BIN)
+ $(call test-softfloat, \
+ i32_to_f16 i64_to_f16 \
+ i32_to_f32 i64_to_f32 \
+ i32_to_f64 i64_to_f64 \
+ i32_to_f128 i64_to_f128, int-to-float)
+ $(call test-softfloat, \
+ ui32_to_f16 ui64_to_f16 \
+ ui32_to_f32 ui64_to_f32 \
+ ui32_to_f64 ui64_to_f64 \
+ ui64_to_f128, uint-to-float)
+ $(call test-softfloat, \
+ f16_to_i32 f16_to_i32_r_minMag \
+ f32_to_i32 f32_to_i32_r_minMag \
+ f64_to_i32 f64_to_i32_r_minMag \
+ extF80_to_i32 extF80_to_i32_r_minMag \
+ f128_to_i32 f128_to_i32_r_minMag \
+ f16_to_i64 f16_to_i64_r_minMag \
+ f32_to_i64 f32_to_i64_r_minMag \
+ f64_to_i64 f64_to_i64_r_minMag \
+ extF80_to_i64 extF80_to_i64_r_minMag \
+ f128_to_i64 f128_to_i64_r_minMag, \
+ float-to-int)
+ $(call test-softfloat, \
+ f16_to_ui32 f16_to_ui32_r_minMag \
+ f32_to_ui32 f32_to_ui32_r_minMag \
+ f64_to_ui32 f64_to_ui32_r_minMag \
+ f16_to_ui64 f16_to_ui64_r_minMag \
+ f32_to_ui64 f32_to_ui64_r_minMag \
+ f64_to_ui64 f64_to_ui64_r_minMag, \
+ float-to-uint)
+ $(call test-softfloat, \
+ f16_roundToInt f32_roundToInt \
+ f64_roundToInt f128_roundToInt, \
+ round-to-integer)
+
+# Generic rule for all float operations
+#
+# Some patterns are overidden due to broken or missing tests.
+# Hopefully these can be removed over time.
+
+check-softfloat-%: $(FP_TEST_BIN)
+ $(call test-softfloat, f16_$* f32_$* f64_$* extF80_$* f128_$*, $*)
+
+# Float Compare routines
+SF_COMPARE_OPS=eq eq_signaling le le_quiet lt_quiet
+SF_COMPARE_RULES=$(patsubst %,check-softfloat-%, $(SF_COMPARE_OPS))
+
+# FIXME: extF80_le_quiet (broken)
+check-softfloat-le_quiet: $(FP_TEST_BIN)
+ $(call test-softfloat, \
+ f16_le_quiet f32_le_quiet f64_le_quiet \
+ f128_le_quiet, \
+ le_quiet)
+
+# FIXME: extF80_lt_quiet (broken)
+check-softfloat-lt_quiet: $(FP_TEST_BIN)
+ $(call test-softfloat, \
+ f16_lt_quiet f32_lt_quiet f64_lt_quiet \
+ f128_lt_quiet, \
+ lt_quiet)
+
+.PHONY: check-softfloat-compare
+check-softfloat-compare: $(SF_COMPARE_RULES)
+
+# Math Operations
+
+# FIXME: extF80_mulAdd (missing)
+check-softfloat-mulAdd: $(FP_TEST_BIN)
+ $(call test-softfloat, \
+ f16_mulAdd f32_mulAdd f64_mulAdd f128_mulAdd, \
+ mulAdd)
+
+# FIXME: extF80_rem (broken)
+check-softfloat-rem: $(FP_TEST_BIN)
+ $(call test-softfloat, \
+ f16_rem f32_rem f64_rem f128_rem, \
+ rem)
+
+SF_MATH_OPS=add sub mul mulAdd div rem sqrt
+SF_MATH_RULES=$(patsubst %,check-softfloat-%, $(SF_MATH_OPS))
+
+.PHONY: check-softfloat-ops
+check-softfloat-ops: $(SF_MATH_RULES)
+
+# Finally a generic rule to test all of softfoat. If TCG isnt't
+# enabled we define a null operation which skips the tests.
+
+.PHONY: check-softfloat
+ifeq ($(CONFIG_TCG),y)
+check-softfloat: check-softfloat-conv check-softfloat-compare check-softfloat-ops
+else
+check-softfloat:
+ $(call quiet-command, /bin/true, "FLOAT TEST", \
+ "SKIPPED for non-TCG builds")
+endif
+
# Per guest TCG tests
LINUX_USER_TARGETS=$(filter %-linux-user,$(TARGET_DIRS))
@@ -912,7 +1045,7 @@ clean-tcg-tests-%:
build-tcg: $(BUILD_TCG_TARGET_RULES)
.PHONY: check-tcg
-check-tcg: $(RUN_TCG_TARGET_RULES)
+check-tcg: check-softfloat $(RUN_TCG_TARGET_RULES)
.PHONY: clean-tcg
clean-tcg: $(CLEAN_TCG_TARGET_RULES)
@@ -993,7 +1126,7 @@ check-acceptance: check-venv $(TESTS_RESULTS_DIR)
check-qapi-schema: $(patsubst %,check-%, $(check-qapi-schema-y)) check-tests/qapi-schema/doc-good.texi
check-qtest: $(patsubst %,check-qtest-%, $(QTEST_TARGETS))
check-block: $(patsubst %,check-%, $(check-block-y))
-check: check-qapi-schema check-unit check-qtest check-decodetree
+check: check-qapi-schema check-unit check-softfloat check-qtest check-decodetree
check-clean:
rm -rf $(check-unit-y) tests/*.o $(QEMU_IOTESTS_HELPERS-y)
rm -rf $(sort $(foreach target,$(SYSEMU_TARGET_LIST), $(check-qtest-$(target)-y)) $(check-qtest-generic-y))
diff --git a/tests/fp/Makefile b/tests/fp/Makefile
index 5019dcdca0..5a35e7c210 100644
--- a/tests/fp/Makefile
+++ b/tests/fp/Makefile
@@ -65,8 +65,7 @@ QEMU_CFLAGS += $(TF_OPTS)
TF_CFLAGS :=
TF_CFLAGS += -Wno-strict-prototypes
TF_CFLAGS += -Wno-unknown-pragmas
-TF_CFLAGS += -Wno-discarded-qualifiers
-TF_CFLAGS += -Wno-maybe-uninitialized
+TF_CFLAGS += -Wno-uninitialized
TF_CFLAGS += -Wno-missing-prototypes
TF_CFLAGS += -Wno-return-type
TF_CFLAGS += -Wno-unused-function
diff --git a/tests/fp/berkeley-testfloat-3 b/tests/fp/berkeley-testfloat-3
-Subproject ca9fa2ba05625ba929958f163b01747e07dd39c
+Subproject 5a59dcec19327396a011a17fd924aed4fec416b
diff --git a/tests/fp/fp-bench.c b/tests/fp/fp-bench.c
index f5bc5edebf..4ba5e1d2d4 100644
--- a/tests/fp/fp-bench.c
+++ b/tests/fp/fp-bench.c
@@ -143,15 +143,20 @@ static void update_random_ops(int n_ops, enum precision prec)
for (i = 0; i < n_ops; i++) {
uint64_t r = random_ops[i];
- if (prec == PREC_SINGLE || PREC_FLOAT32) {
+ switch (prec) {
+ case PREC_SINGLE:
+ case PREC_FLOAT32:
do {
r = xorshift64star(r);
} while (!float32_is_normal(r));
- } else if (prec == PREC_DOUBLE || PREC_FLOAT64) {
+ break;
+ case PREC_DOUBLE:
+ case PREC_FLOAT64:
do {
r = xorshift64star(r);
} while (!float64_is_normal(r));
- } else {
+ break;
+ default:
g_assert_not_reached();
}
random_ops[i] = r;
@@ -171,8 +176,6 @@ static void fill_random(union fp *ops, int n_ops, enum precision prec,
if (no_neg && float32_is_neg(ops[i].f32)) {
ops[i].f32 = float32_chs(ops[i].f32);
}
- /* raise the exponent to limit the frequency of denormal results */
- ops[i].f32 |= 0x40000000;
break;
case PREC_DOUBLE:
case PREC_FLOAT64:
@@ -180,8 +183,6 @@ static void fill_random(union fp *ops, int n_ops, enum precision prec,
if (no_neg && float64_is_neg(ops[i].f64)) {
ops[i].f64 = float64_chs(ops[i].f64);
}
- /* raise the exponent to limit the frequency of denormal results */
- ops[i].f64 |= LIT64(0x4000000000000000);
break;
default:
g_assert_not_reached();
diff --git a/tests/fp/fp-test.c b/tests/fp/fp-test.c
index fca576309c..2a35ef601d 100644
--- a/tests/fp/fp-test.c
+++ b/tests/fp/fp-test.c
@@ -789,7 +789,7 @@ static int set_init_flags(const char *flags)
return 0;
}
-static uint8_t slow_clear_flags(void)
+static uint_fast8_t slow_clear_flags(void)
{
uint8_t prev = slowfloat_exceptionFlags;
@@ -797,7 +797,7 @@ static uint8_t slow_clear_flags(void)
return prev;
}
-static uint8_t qemu_clear_flags(void)
+static uint_fast8_t qemu_clear_flags(void)
{
uint8_t prev = qemu_flags_to_sf(qsf.float_exception_flags);
diff --git a/tests/fp/platform.h b/tests/fp/platform.h
index f8c423dde3..c20ba70baa 100644
--- a/tests/fp/platform.h
+++ b/tests/fp/platform.h
@@ -29,6 +29,7 @@
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+#include "config-host.h"
#ifndef HOST_WORDS_BIGENDIAN
#define LITTLEENDIAN 1
diff --git a/tests/qemu-iotests/238 b/tests/qemu-iotests/238
new file mode 100755
index 0000000000..f81ee1112f
--- /dev/null
+++ b/tests/qemu-iotests/238
@@ -0,0 +1,47 @@
+#!/usr/bin/env python
+#
+# Regression test for throttle group member unregister segfault with iothread
+#
+# Copyright (c) 2019 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import sys
+import os
+import iotests
+from iotests import log
+
+sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', 'scripts'))
+
+from qemu import QEMUMachine
+
+if iotests.qemu_default_machine == 's390-ccw-virtio':
+ virtio_scsi_device = 'virtio-scsi-ccw'
+else:
+ virtio_scsi_device = 'virtio-scsi-pci'
+
+vm = QEMUMachine(iotests.qemu_prog)
+vm.add_args('-machine', 'accel=kvm')
+vm.launch()
+
+log(vm.qmp('blockdev-add', node_name='hd0', driver='null-co'))
+log(vm.qmp('object-add', qom_type='iothread', id='iothread0'))
+log(vm.qmp('device_add', id='scsi0', driver=virtio_scsi_device, iothread='iothread0'))
+log(vm.qmp('device_add', id='scsi-hd0', driver='scsi-hd', drive='hd0'))
+log(vm.qmp('block_set_io_throttle', id='scsi-hd0', bps=0, bps_rd=0, bps_wr=0,
+ iops=1000, iops_rd=0, iops_wr=0, conv_keys=False))
+log(vm.qmp('device_del', id='scsi-hd0'))
+
+vm.shutdown()
diff --git a/tests/qemu-iotests/238.out b/tests/qemu-iotests/238.out
new file mode 100644
index 0000000000..4de840ba8c
--- /dev/null
+++ b/tests/qemu-iotests/238.out
@@ -0,0 +1,6 @@
+{"return": {}}
+{"return": {}}
+{"return": {}}
+{"return": {}}
+{"return": {}}
+{"return": {}}
diff --git a/tests/qemu-iotests/group b/tests/qemu-iotests/group
index f6b245917a..0f1c3f9cdf 100644
--- a/tests/qemu-iotests/group
+++ b/tests/qemu-iotests/group
@@ -234,3 +234,4 @@
234 auto quick migration
235 auto quick
236 auto quick
+238 auto quick
diff --git a/tests/test-vmstate.c b/tests/test-vmstate.c
index 0ab29a8216..fc8ce62471 100644
--- a/tests/test-vmstate.c
+++ b/tests/test-vmstate.c
@@ -284,6 +284,55 @@ static void test_simple_primitive(void)
FIELD_EQUAL(i64_2);
}
+typedef struct TestSimpleArray {
+ uint16_t u16_1[3];
+} TestSimpleArray;
+
+/* Object instantiation, we are going to use it in more than one test */
+
+TestSimpleArray obj_simple_arr = {
+ .u16_1 = { 0x42, 0x43, 0x44 },
+};
+
+/* Description of the values. If you add a primitive type
+ you are expected to add a test here */
+
+static const VMStateDescription vmstate_simple_arr = {
+ .name = "simple/array",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT16_ARRAY(u16_1, TestSimpleArray, 3),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+uint8_t wire_simple_arr[] = {
+ /* u16_1 */ 0x00, 0x42,
+ /* u16_1 */ 0x00, 0x43,
+ /* u16_1 */ 0x00, 0x44,
+ QEMU_VM_EOF, /* just to ensure we won't get EOF reported prematurely */
+};
+
+static void obj_simple_arr_copy(void *target, void *source)
+{
+ memcpy(target, source, sizeof(TestSimpleArray));
+}
+
+static void test_simple_array(void)
+{
+ TestSimpleArray obj, obj_clone;
+
+ memset(&obj, 0, sizeof(obj));
+ save_vmstate(&vmstate_simple_arr, &obj_simple_arr);
+
+ compare_vmstate(wire_simple_arr, sizeof(wire_simple_arr));
+
+ SUCCESS(load_vmstate(&vmstate_simple_arr, &obj, &obj_clone,
+ obj_simple_arr_copy, 1, wire_simple_arr,
+ sizeof(wire_simple_arr)));
+}
+
typedef struct TestStruct {
uint32_t a, b, c, e;
uint64_t d, f;
@@ -863,6 +912,7 @@ int main(int argc, char **argv)
g_test_init(&argc, &argv, NULL);
g_test_add_func("/vmstate/simple/primitive", test_simple_primitive);
+ g_test_add_func("/vmstate/simple/array", test_simple_array);
g_test_add_func("/vmstate/versioned/load/v1", test_load_v1);
g_test_add_func("/vmstate/versioned/load/v2", test_load_v2);
g_test_add_func("/vmstate/field_exists/load/noskip", test_load_noskip);
diff --git a/util/main-loop.c b/util/main-loop.c
index affe0403c5..443cb4cfe8 100644
--- a/util/main-loop.c
+++ b/util/main-loop.c
@@ -71,7 +71,7 @@ static void sigfd_handler(void *opaque)
}
}
-static int qemu_signal_init(void)
+static int qemu_signal_init(Error **errp)
{
int sigfd;
sigset_t set;
@@ -96,7 +96,7 @@ static int qemu_signal_init(void)
sigdelset(&set, SIG_IPI);
sigfd = qemu_signalfd(&set);
if (sigfd == -1) {
- fprintf(stderr, "failed to create signalfd\n");
+ error_setg_errno(errp, errno, "failed to create signalfd");
return -errno;
}
@@ -109,7 +109,7 @@ static int qemu_signal_init(void)
#else /* _WIN32 */
-static int qemu_signal_init(void)
+static int qemu_signal_init(Error **errp)
{
return 0;
}
@@ -148,7 +148,7 @@ int qemu_init_main_loop(Error **errp)
init_clocks(qemu_timer_notify_cb);
- ret = qemu_signal_init();
+ ret = qemu_signal_init(errp);
if (ret) {
return ret;
}
diff --git a/util/qemu-coroutine-sleep.c b/util/qemu-coroutine-sleep.c
index afb678fbe5..4bfdd30cbf 100644
--- a/util/qemu-coroutine-sleep.c
+++ b/util/qemu-coroutine-sleep.c
@@ -17,38 +17,31 @@
#include "qemu/timer.h"
#include "block/aio.h"
-typedef struct CoSleepCB {
- QEMUTimer *ts;
- Coroutine *co;
-} CoSleepCB;
-
static void co_sleep_cb(void *opaque)
{
- CoSleepCB *sleep_cb = opaque;
+ Coroutine *co = opaque;
/* Write of schedule protected by barrier write in aio_co_schedule */
- atomic_set(&sleep_cb->co->scheduled, NULL);
- aio_co_wake(sleep_cb->co);
+ atomic_set(&co->scheduled, NULL);
+ aio_co_wake(co);
}
void coroutine_fn qemu_co_sleep_ns(QEMUClockType type, int64_t ns)
{
AioContext *ctx = qemu_get_current_aio_context();
- CoSleepCB sleep_cb = {
- .co = qemu_coroutine_self(),
- };
+ QEMUTimer *ts;
+ Coroutine *co = qemu_coroutine_self();
- const char *scheduled = atomic_cmpxchg(&sleep_cb.co->scheduled, NULL,
- __func__);
+ const char *scheduled = atomic_cmpxchg(&co->scheduled, NULL, __func__);
if (scheduled) {
fprintf(stderr,
"%s: Co-routine was already scheduled in '%s'\n",
__func__, scheduled);
abort();
}
- sleep_cb.ts = aio_timer_new(ctx, type, SCALE_NS, co_sleep_cb, &sleep_cb);
- timer_mod(sleep_cb.ts, qemu_clock_get_ns(type) + ns);
+ ts = aio_timer_new(ctx, type, SCALE_NS, co_sleep_cb, co);
+ timer_mod(ts, qemu_clock_get_ns(type) + ns);
qemu_coroutine_yield();
- timer_del(sleep_cb.ts);
- timer_free(sleep_cb.ts);
+ timer_del(ts);
+ timer_free(ts);
}