aboutsummaryrefslogtreecommitdiff
path: root/tcg/tcg-op.c
diff options
context:
space:
mode:
authorEmilio G. Cota <cota@braap.org>2017-07-12 17:15:52 -0400
committerRichard Henderson <richard.henderson@linaro.org>2017-10-24 13:53:42 -0700
commitb1311c4acf503dc9c1a310cc40b64f05b08833dc (patch)
tree744c9db497b0e4ee946b1fd6c85910f27b18ae5a /tcg/tcg-op.c
parent44ded3d04821bec57407cc26a8b4db620da2be04 (diff)
tcg: define tcg_init_ctx and make tcg_ctx a pointer
Groundwork for supporting multiple TCG contexts. The core of this patch is this change to tcg/tcg.h: > -extern TCGContext tcg_ctx; > +extern TCGContext tcg_init_ctx; > +extern TCGContext *tcg_ctx; Note that for now we set *tcg_ctx to whatever TCGContext is passed to tcg_context_init -- in this case &tcg_init_ctx. Reviewed-by: Richard Henderson <rth@twiddle.net> Signed-off-by: Emilio G. Cota <cota@braap.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'tcg/tcg-op.c')
-rw-r--r--tcg/tcg-op.c46
1 files changed, 23 insertions, 23 deletions
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
index 8c7668de60..ba603281d3 100644
--- a/tcg/tcg-op.c
+++ b/tcg/tcg-op.c
@@ -48,7 +48,7 @@ extern TCGv_i32 TCGV_HIGH_link_error(TCGv_i64);
static inline TCGOp *tcg_emit_op(TCGOpcode opc)
{
- TCGContext *ctx = &tcg_ctx;
+ TCGContext *ctx = tcg_ctx;
int oi = ctx->gen_next_op_idx;
int ni = oi + 1;
int pi = oi - 1;
@@ -121,7 +121,7 @@ void tcg_gen_op6(TCGOpcode opc, TCGArg a1, TCGArg a2, TCGArg a3,
void tcg_gen_mb(TCGBar mb_type)
{
- if (tcg_ctx.tb_cflags & CF_PARALLEL) {
+ if (tcg_ctx->tb_cflags & CF_PARALLEL) {
tcg_gen_op1(INDEX_op_mb, mb_type);
}
}
@@ -2552,8 +2552,8 @@ void tcg_gen_goto_tb(unsigned idx)
tcg_debug_assert(idx <= 1);
#ifdef CONFIG_DEBUG_TCG
/* Verify that we havn't seen this numbered exit before. */
- tcg_debug_assert((tcg_ctx.goto_tb_issue_mask & (1 << idx)) == 0);
- tcg_ctx.goto_tb_issue_mask |= 1 << idx;
+ tcg_debug_assert((tcg_ctx->goto_tb_issue_mask & (1 << idx)) == 0);
+ tcg_ctx->goto_tb_issue_mask |= 1 << idx;
#endif
tcg_gen_op1i(INDEX_op_goto_tb, idx);
}
@@ -2562,7 +2562,7 @@ void tcg_gen_lookup_and_goto_ptr(void)
{
if (TCG_TARGET_HAS_goto_ptr && !qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {
TCGv_ptr ptr = tcg_temp_new_ptr();
- gen_helper_lookup_tb_ptr(ptr, tcg_ctx.tcg_env);
+ gen_helper_lookup_tb_ptr(ptr, tcg_ctx->tcg_env);
tcg_gen_op1i(INDEX_op_goto_ptr, tcgv_ptr_arg(ptr));
tcg_temp_free_ptr(ptr);
} else {
@@ -2648,7 +2648,7 @@ void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, TCGArg idx, TCGMemOp memop)
{
tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
memop = tcg_canonicalize_memop(memop, 0, 0);
- trace_guest_mem_before_tcg(tcg_ctx.cpu, tcg_ctx.tcg_env,
+ trace_guest_mem_before_tcg(tcg_ctx->cpu, tcg_ctx->tcg_env,
addr, trace_mem_get_info(memop, 0));
gen_ldst_i32(INDEX_op_qemu_ld_i32, val, addr, memop, idx);
}
@@ -2657,7 +2657,7 @@ void tcg_gen_qemu_st_i32(TCGv_i32 val, TCGv addr, TCGArg idx, TCGMemOp memop)
{
tcg_gen_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
memop = tcg_canonicalize_memop(memop, 0, 1);
- trace_guest_mem_before_tcg(tcg_ctx.cpu, tcg_ctx.tcg_env,
+ trace_guest_mem_before_tcg(tcg_ctx->cpu, tcg_ctx->tcg_env,
addr, trace_mem_get_info(memop, 1));
gen_ldst_i32(INDEX_op_qemu_st_i32, val, addr, memop, idx);
}
@@ -2676,7 +2676,7 @@ void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, TCGMemOp memop)
}
memop = tcg_canonicalize_memop(memop, 1, 0);
- trace_guest_mem_before_tcg(tcg_ctx.cpu, tcg_ctx.tcg_env,
+ trace_guest_mem_before_tcg(tcg_ctx->cpu, tcg_ctx->tcg_env,
addr, trace_mem_get_info(memop, 0));
gen_ldst_i64(INDEX_op_qemu_ld_i64, val, addr, memop, idx);
}
@@ -2690,7 +2690,7 @@ void tcg_gen_qemu_st_i64(TCGv_i64 val, TCGv addr, TCGArg idx, TCGMemOp memop)
}
memop = tcg_canonicalize_memop(memop, 1, 1);
- trace_guest_mem_before_tcg(tcg_ctx.cpu, tcg_ctx.tcg_env,
+ trace_guest_mem_before_tcg(tcg_ctx->cpu, tcg_ctx->tcg_env,
addr, trace_mem_get_info(memop, 1));
gen_ldst_i64(INDEX_op_qemu_st_i64, val, addr, memop, idx);
}
@@ -2780,7 +2780,7 @@ void tcg_gen_atomic_cmpxchg_i32(TCGv_i32 retv, TCGv addr, TCGv_i32 cmpv,
{
memop = tcg_canonicalize_memop(memop, 0, 0);
- if (!(tcg_ctx.tb_cflags & CF_PARALLEL)) {
+ if (!(tcg_ctx->tb_cflags & CF_PARALLEL)) {
TCGv_i32 t1 = tcg_temp_new_i32();
TCGv_i32 t2 = tcg_temp_new_i32();
@@ -2806,11 +2806,11 @@ void tcg_gen_atomic_cmpxchg_i32(TCGv_i32 retv, TCGv addr, TCGv_i32 cmpv,
#ifdef CONFIG_SOFTMMU
{
TCGv_i32 oi = tcg_const_i32(make_memop_idx(memop & ~MO_SIGN, idx));
- gen(retv, tcg_ctx.tcg_env, addr, cmpv, newv, oi);
+ gen(retv, tcg_ctx->tcg_env, addr, cmpv, newv, oi);
tcg_temp_free_i32(oi);
}
#else
- gen(retv, tcg_ctx.tcg_env, addr, cmpv, newv);
+ gen(retv, tcg_ctx->tcg_env, addr, cmpv, newv);
#endif
if (memop & MO_SIGN) {
@@ -2824,7 +2824,7 @@ void tcg_gen_atomic_cmpxchg_i64(TCGv_i64 retv, TCGv addr, TCGv_i64 cmpv,
{
memop = tcg_canonicalize_memop(memop, 1, 0);
- if (!(tcg_ctx.tb_cflags & CF_PARALLEL)) {
+ if (!(tcg_ctx->tb_cflags & CF_PARALLEL)) {
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t2 = tcg_temp_new_i64();
@@ -2851,14 +2851,14 @@ void tcg_gen_atomic_cmpxchg_i64(TCGv_i64 retv, TCGv addr, TCGv_i64 cmpv,
#ifdef CONFIG_SOFTMMU
{
TCGv_i32 oi = tcg_const_i32(make_memop_idx(memop, idx));
- gen(retv, tcg_ctx.tcg_env, addr, cmpv, newv, oi);
+ gen(retv, tcg_ctx->tcg_env, addr, cmpv, newv, oi);
tcg_temp_free_i32(oi);
}
#else
- gen(retv, tcg_ctx.tcg_env, addr, cmpv, newv);
+ gen(retv, tcg_ctx->tcg_env, addr, cmpv, newv);
#endif
#else
- gen_helper_exit_atomic(tcg_ctx.tcg_env);
+ gen_helper_exit_atomic(tcg_ctx->tcg_env);
/* Produce a result, so that we have a well-formed opcode stream
with respect to uses of the result in the (dead) code following. */
tcg_gen_movi_i64(retv, 0);
@@ -2914,11 +2914,11 @@ static void do_atomic_op_i32(TCGv_i32 ret, TCGv addr, TCGv_i32 val,
#ifdef CONFIG_SOFTMMU
{
TCGv_i32 oi = tcg_const_i32(make_memop_idx(memop & ~MO_SIGN, idx));
- gen(ret, tcg_ctx.tcg_env, addr, val, oi);
+ gen(ret, tcg_ctx->tcg_env, addr, val, oi);
tcg_temp_free_i32(oi);
}
#else
- gen(ret, tcg_ctx.tcg_env, addr, val);
+ gen(ret, tcg_ctx->tcg_env, addr, val);
#endif
if (memop & MO_SIGN) {
@@ -2959,14 +2959,14 @@ static void do_atomic_op_i64(TCGv_i64 ret, TCGv addr, TCGv_i64 val,
#ifdef CONFIG_SOFTMMU
{
TCGv_i32 oi = tcg_const_i32(make_memop_idx(memop & ~MO_SIGN, idx));
- gen(ret, tcg_ctx.tcg_env, addr, val, oi);
+ gen(ret, tcg_ctx->tcg_env, addr, val, oi);
tcg_temp_free_i32(oi);
}
#else
- gen(ret, tcg_ctx.tcg_env, addr, val);
+ gen(ret, tcg_ctx->tcg_env, addr, val);
#endif
#else
- gen_helper_exit_atomic(tcg_ctx.tcg_env);
+ gen_helper_exit_atomic(tcg_ctx->tcg_env);
/* Produce a result, so that we have a well-formed opcode stream
with respect to uses of the result in the (dead) code following. */
tcg_gen_movi_i64(ret, 0);
@@ -3001,7 +3001,7 @@ static void * const table_##NAME[16] = { \
void tcg_gen_atomic_##NAME##_i32 \
(TCGv_i32 ret, TCGv addr, TCGv_i32 val, TCGArg idx, TCGMemOp memop) \
{ \
- if (tcg_ctx.tb_cflags & CF_PARALLEL) { \
+ if (tcg_ctx->tb_cflags & CF_PARALLEL) { \
do_atomic_op_i32(ret, addr, val, idx, memop, table_##NAME); \
} else { \
do_nonatomic_op_i32(ret, addr, val, idx, memop, NEW, \
@@ -3011,7 +3011,7 @@ void tcg_gen_atomic_##NAME##_i32 \
void tcg_gen_atomic_##NAME##_i64 \
(TCGv_i64 ret, TCGv addr, TCGv_i64 val, TCGArg idx, TCGMemOp memop) \
{ \
- if (tcg_ctx.tb_cflags & CF_PARALLEL) { \
+ if (tcg_ctx->tb_cflags & CF_PARALLEL) { \
do_atomic_op_i64(ret, addr, val, idx, memop, table_##NAME); \
} else { \
do_nonatomic_op_i64(ret, addr, val, idx, memop, NEW, \