aboutsummaryrefslogtreecommitdiff
path: root/accel/tcg
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2019-03-28 11:54:23 -1000
committerRichard Henderson <richard.henderson@linaro.org>2019-06-10 07:03:42 -0700
commit5e1401969b25f676fee6b1c564441759cf967a43 (patch)
tree7ad13ea2a9043d2586c1e6f57ea5055e01354cf4 /accel/tcg
parent5b146dc716cfd247f99556c04e6e46fbd67565a0 (diff)
cpu: Move icount_decr to CPUNegativeOffsetState
Amusingly, we had already ignored the comment to keep this value at the end of CPUState. This restores the minimum negative offset from TCG_AREG0 for code generation. For the couple of uses within qom/cpu.c, without NEED_CPU_H, add a pointer from the CPUState object to the IcountDecr object within CPUNegativeOffsetState. Reviewed-by: Alistair Francis <alistair.francis@wdc.com> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'accel/tcg')
-rw-r--r--accel/tcg/cpu-exec.c23
-rw-r--r--accel/tcg/tcg-all.c6
-rw-r--r--accel/tcg/translate-all.c8
3 files changed, 18 insertions, 19 deletions
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
index 45ef41ebb2..032a62672e 100644
--- a/accel/tcg/cpu-exec.c
+++ b/accel/tcg/cpu-exec.c
@@ -54,7 +54,7 @@ typedef struct SyncClocks {
#define MAX_DELAY_PRINT_RATE 2000000000LL
#define MAX_NB_PRINTS 100
-static void align_clocks(SyncClocks *sc, const CPUState *cpu)
+static void align_clocks(SyncClocks *sc, CPUState *cpu)
{
int64_t cpu_icount;
@@ -62,7 +62,7 @@ static void align_clocks(SyncClocks *sc, const CPUState *cpu)
return;
}
- cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
+ cpu_icount = cpu->icount_extra + cpu_neg(cpu)->icount_decr.u16.low;
sc->diff_clk += cpu_icount_to_ns(sc->last_cpu_icount - cpu_icount);
sc->last_cpu_icount = cpu_icount;
@@ -105,15 +105,15 @@ static void print_delay(const SyncClocks *sc)
}
}
-static void init_delay_params(SyncClocks *sc,
- const CPUState *cpu)
+static void init_delay_params(SyncClocks *sc, CPUState *cpu)
{
if (!icount_align_option) {
return;
}
sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT);
sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - sc->realtime_clock;
- sc->last_cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
+ sc->last_cpu_icount
+ = cpu->icount_extra + cpu_neg(cpu)->icount_decr.u16.low;
if (sc->diff_clk < max_delay) {
max_delay = sc->diff_clk;
}
@@ -467,7 +467,7 @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
if (cpu->exception_index < 0) {
#ifndef CONFIG_USER_ONLY
if (replay_has_exception()
- && cpu->icount_decr.u16.low + cpu->icount_extra == 0) {
+ && cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0) {
/* try to cause an exception pending in the log */
cpu_exec_nocache(cpu, 1, tb_find(cpu, NULL, 0, curr_cflags()), true);
}
@@ -525,7 +525,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
* Ensure zeroing happens before reading cpu->exit_request or
* cpu->interrupt_request (see also smp_wmb in cpu_exit())
*/
- atomic_mb_set(&cpu->icount_decr.u16.high, 0);
+ atomic_mb_set(&cpu_neg(cpu)->icount_decr.u16.high, 0);
if (unlikely(atomic_read(&cpu->interrupt_request))) {
int interrupt_request;
@@ -596,8 +596,9 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
}
/* Finally, check if we need to exit to the main loop. */
- if (unlikely(atomic_read(&cpu->exit_request)
- || (use_icount && cpu->icount_decr.u16.low + cpu->icount_extra == 0))) {
+ if (unlikely(atomic_read(&cpu->exit_request))
+ || (use_icount
+ && cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0)) {
atomic_set(&cpu->exit_request, 0);
if (cpu->exception_index == -1) {
cpu->exception_index = EXCP_INTERRUPT;
@@ -624,7 +625,7 @@ static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
}
*last_tb = NULL;
- insns_left = atomic_read(&cpu->icount_decr.u32);
+ insns_left = atomic_read(&cpu_neg(cpu)->icount_decr.u32);
if (insns_left < 0) {
/* Something asked us to stop executing chained TBs; just
* continue round the main loop. Whatever requested the exit
@@ -643,7 +644,7 @@ static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
cpu_update_icount(cpu);
/* Refill decrementer and continue execution. */
insns_left = MIN(0xffff, cpu->icount_budget);
- cpu->icount_decr.u16.low = insns_left;
+ cpu_neg(cpu)->icount_decr.u16.low = insns_left;
cpu->icount_extra = cpu->icount_budget - insns_left;
if (!cpu->icount_extra) {
/* Execute any remaining instructions, then let the main loop
diff --git a/accel/tcg/tcg-all.c b/accel/tcg/tcg-all.c
index 3d25bdcc17..9b215dcc5a 100644
--- a/accel/tcg/tcg-all.c
+++ b/accel/tcg/tcg-all.c
@@ -28,13 +28,12 @@
#include "sysemu/sysemu.h"
#include "qom/object.h"
#include "qemu-common.h"
-#include "qom/cpu.h"
+#include "cpu.h"
#include "sysemu/cpus.h"
#include "qemu/main-loop.h"
unsigned long tcg_tb_size;
-#ifndef CONFIG_USER_ONLY
/* mask must never be zero, except for A20 change call */
static void tcg_handle_interrupt(CPUState *cpu, int mask)
{
@@ -51,7 +50,7 @@ static void tcg_handle_interrupt(CPUState *cpu, int mask)
if (!qemu_cpu_is_self(cpu)) {
qemu_cpu_kick(cpu);
} else {
- atomic_set(&cpu->icount_decr.u16.high, -1);
+ atomic_set(&cpu_neg(cpu)->icount_decr.u16.high, -1);
if (use_icount &&
!cpu->can_do_io
&& (mask & ~old_mask) != 0) {
@@ -59,7 +58,6 @@ static void tcg_handle_interrupt(CPUState *cpu, int mask)
}
}
}
-#endif
static int tcg_init(MachineState *ms)
{
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
index 52d94facf0..e24ee3a172 100644
--- a/accel/tcg/translate-all.c
+++ b/accel/tcg/translate-all.c
@@ -364,7 +364,7 @@ static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
assert(use_icount);
/* Reset the cycle counter to the start of the block
and shift if to the number of actually executed instructions */
- cpu->icount_decr.u16.low += num_insns - i;
+ cpu_neg(cpu)->icount_decr.u16.low += num_insns - i;
}
restore_state_to_opc(env, tb, data);
@@ -2200,7 +2200,7 @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
if ((env->hflags & MIPS_HFLAG_BMASK) != 0
&& env->active_tc.PC != tb->pc) {
env->active_tc.PC -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4);
- cpu->icount_decr.u16.low++;
+ cpu_neg(cpu)->icount_decr.u16.low++;
env->hflags &= ~MIPS_HFLAG_BMASK;
n = 2;
}
@@ -2208,7 +2208,7 @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
&& env->pc != tb->pc) {
env->pc -= 2;
- cpu->icount_decr.u16.low++;
+ cpu_neg(cpu)->icount_decr.u16.low++;
env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
n = 2;
}
@@ -2382,7 +2382,7 @@ void cpu_interrupt(CPUState *cpu, int mask)
{
g_assert(qemu_mutex_iothread_locked());
cpu->interrupt_request |= mask;
- atomic_set(&cpu->icount_decr.u16.high, -1);
+ atomic_set(&cpu_neg(cpu)->icount_decr.u16.high, -1);
}
/*