aboutsummaryrefslogtreecommitdiff
path: root/accel/tcg
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2017-06-22 10:25:03 +0100
committerPeter Maydell <peter.maydell@linaro.org>2017-06-22 10:25:03 +0100
commitdb7a99cdc1d0f4d8cbf7c41ce9e570dce04f0a11 (patch)
tree47fd7ac1842e033dc163ed63788bc818af0d5a7e /accel/tcg
parent8dfaf23ae1f2273a9730a9b309cc8471269bb524 (diff)
parent8da54b2507c1cabf60c2de904cf0383b23239231 (diff)
Merge remote-tracking branch 'remotes/rth/tags/pull-tcg-20170619' into staging
Queued TCG patches # gpg: Signature made Mon 19 Jun 2017 19:12:06 BST # gpg: using RSA key 0xAD1270CC4DD0279B # gpg: Good signature from "Richard Henderson <rth7680@gmail.com>" # gpg: aka "Richard Henderson <rth@redhat.com>" # gpg: aka "Richard Henderson <rth@twiddle.net>" # Primary key fingerprint: 9CB1 8DDA F8E8 49AD 2AFC 16A4 AD12 70CC 4DD0 279B * remotes/rth/tags/pull-tcg-20170619: target/arm: Exit after clearing aarch64 interrupt mask target/s390x: Exit after changing PSW mask target/alpha: Use tcg_gen_lookup_and_goto_ptr tcg: Increase hit rate of lookup_tb_ptr tcg/arm: Use ldr (literal) for goto_tb tcg/arm: Try pc-relative addresses for movi tcg/arm: Remove limit on code buffer size tcg/arm: Use indirect branch for goto_tb tcg/aarch64: Use ADR in tcg_out_movi translate-all: consolidate tb init in tb_gen_code tcg: allocate TB structs before the corresponding translated code util: add cacheinfo Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'accel/tcg')
-rw-r--r--accel/tcg/translate-all.c46
1 files changed, 26 insertions, 20 deletions
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
index 7b25a16244..f6ad46b613 100644
--- a/accel/tcg/translate-all.c
+++ b/accel/tcg/translate-all.c
@@ -523,8 +523,6 @@ static inline PageDesc *page_find(tb_page_addr_t index)
# define MAX_CODE_GEN_BUFFER_SIZE (32u * 1024 * 1024)
#elif defined(__aarch64__)
# define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
-#elif defined(__arm__)
-# define MAX_CODE_GEN_BUFFER_SIZE (16u * 1024 * 1024)
#elif defined(__s390x__)
/* We have a +- 4GB range on the branches; leave some slop. */
# define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024)
@@ -781,12 +779,13 @@ static inline void code_gen_alloc(size_t tb_size)
exit(1);
}
- /* Estimate a good size for the number of TBs we can support. We
- still haven't deducted the prologue from the buffer size here,
- but that's minimal and won't affect the estimate much. */
- tcg_ctx.code_gen_max_blocks
- = tcg_ctx.code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
- tcg_ctx.tb_ctx.tbs = g_new(TranslationBlock, tcg_ctx.code_gen_max_blocks);
+ /* size this conservatively -- realloc later if needed */
+ tcg_ctx.tb_ctx.tbs_size =
+ tcg_ctx.code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE / 8;
+ if (unlikely(!tcg_ctx.tb_ctx.tbs_size)) {
+ tcg_ctx.tb_ctx.tbs_size = 64 * 1024;
+ }
+ tcg_ctx.tb_ctx.tbs = g_new(TranslationBlock *, tcg_ctx.tb_ctx.tbs_size);
qemu_mutex_init(&tcg_ctx.tb_ctx.tb_lock);
}
@@ -828,16 +827,20 @@ bool tcg_enabled(void)
static TranslationBlock *tb_alloc(target_ulong pc)
{
TranslationBlock *tb;
+ TBContext *ctx;
assert_tb_locked();
- if (tcg_ctx.tb_ctx.nb_tbs >= tcg_ctx.code_gen_max_blocks) {
+ tb = tcg_tb_alloc(&tcg_ctx);
+ if (unlikely(tb == NULL)) {
return NULL;
}
- tb = &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs++];
- tb->pc = pc;
- tb->cflags = 0;
- tb->invalid = false;
+ ctx = &tcg_ctx.tb_ctx;
+ if (unlikely(ctx->nb_tbs == ctx->tbs_size)) {
+ ctx->tbs_size *= 2;
+ ctx->tbs = g_renew(TranslationBlock *, ctx->tbs, ctx->tbs_size);
+ }
+ ctx->tbs[ctx->nb_tbs++] = tb;
return tb;
}
@@ -850,8 +853,10 @@ void tb_free(TranslationBlock *tb)
Ignore the hard cases and just back up if this TB happens to
be the last one generated. */
if (tcg_ctx.tb_ctx.nb_tbs > 0 &&
- tb == &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs - 1]) {
- tcg_ctx.code_gen_ptr = tb->tc_ptr;
+ tb == tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs - 1]) {
+ size_t struct_size = ROUND_UP(sizeof(*tb), qemu_icache_linesize);
+
+ tcg_ctx.code_gen_ptr = tb->tc_ptr - struct_size;
tcg_ctx.tb_ctx.nb_tbs--;
}
}
@@ -1279,9 +1284,11 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
gen_code_buf = tcg_ctx.code_gen_ptr;
tb->tc_ptr = gen_code_buf;
+ tb->pc = pc;
tb->cs_base = cs_base;
tb->flags = flags;
tb->cflags = cflags;
+ tb->invalid = false;
#ifdef CONFIG_PROFILER
tcg_ctx.tb_count1++; /* includes aborted translations because of
@@ -1666,7 +1673,7 @@ static TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
m_max = tcg_ctx.tb_ctx.nb_tbs - 1;
while (m_min <= m_max) {
m = (m_min + m_max) >> 1;
- tb = &tcg_ctx.tb_ctx.tbs[m];
+ tb = tcg_ctx.tb_ctx.tbs[m];
v = (uintptr_t)tb->tc_ptr;
if (v == tc_ptr) {
return tb;
@@ -1676,7 +1683,7 @@ static TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
m_min = m + 1;
}
}
- return &tcg_ctx.tb_ctx.tbs[m_max];
+ return tcg_ctx.tb_ctx.tbs[m_max];
}
#if !defined(CONFIG_USER_ONLY)
@@ -1874,7 +1881,7 @@ void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
direct_jmp_count = 0;
direct_jmp2_count = 0;
for (i = 0; i < tcg_ctx.tb_ctx.nb_tbs; i++) {
- tb = &tcg_ctx.tb_ctx.tbs[i];
+ tb = tcg_ctx.tb_ctx.tbs[i];
target_code_size += tb->size;
if (tb->size > max_target_code_size) {
max_target_code_size = tb->size;
@@ -1894,8 +1901,7 @@ void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
cpu_fprintf(f, "gen code size %td/%zd\n",
tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer,
tcg_ctx.code_gen_highwater - tcg_ctx.code_gen_buffer);
- cpu_fprintf(f, "TB count %d/%d\n",
- tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.code_gen_max_blocks);
+ cpu_fprintf(f, "TB count %d\n", tcg_ctx.tb_ctx.nb_tbs);
cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
tcg_ctx.tb_ctx.nb_tbs ? target_code_size /
tcg_ctx.tb_ctx.nb_tbs : 0,