aboutsummaryrefslogtreecommitdiff
path: root/vl.c
diff options
context:
space:
mode:
Diffstat (limited to 'vl.c')
-rw-r--r--vl.c280
1 files changed, 151 insertions, 129 deletions
diff --git a/vl.c b/vl.c
index 3d1cde65a2..a0dcdea96e 100644
--- a/vl.c
+++ b/vl.c
@@ -273,7 +273,7 @@ uint64_t node_cpumask[MAX_NODES];
static CPUState *cur_cpu;
static CPUState *next_cpu;
-static int event_pending = 1;
+static int timer_alarm_pending = 1;
/* Conversion factor from emulated instructions to virtual clock ticks. */
static int icount_time_shift;
/* Arbitrarily pick 1MIPS as the minimum allowable speed. */
@@ -1360,7 +1360,7 @@ static void host_alarm_handler(int host_signum)
}
#endif
}
- event_pending = 1;
+ timer_alarm_pending = 1;
qemu_notify_event();
}
}
@@ -3879,153 +3879,175 @@ void main_loop_wait(int timeout)
}
-static int main_loop(void)
+static int qemu_cpu_exec(CPUState *env)
{
- int ret, timeout;
+ int ret;
#ifdef CONFIG_PROFILER
int64_t ti;
#endif
- CPUState *env;
- cur_cpu = first_cpu;
- next_cpu = cur_cpu->next_cpu ?: first_cpu;
- for(;;) {
- if (vm_running) {
-
- for(;;) {
- /* get next cpu */
- env = next_cpu;
#ifdef CONFIG_PROFILER
- ti = profile_getclock();
+ ti = profile_getclock();
#endif
- if (use_icount) {
- int64_t count;
- int decr;
- qemu_icount -= (env->icount_decr.u16.low + env->icount_extra);
- env->icount_decr.u16.low = 0;
- env->icount_extra = 0;
- count = qemu_next_deadline();
- count = (count + (1 << icount_time_shift) - 1)
- >> icount_time_shift;
- qemu_icount += count;
- decr = (count > 0xffff) ? 0xffff : count;
- count -= decr;
- env->icount_decr.u16.low = decr;
- env->icount_extra = count;
- }
- ret = cpu_exec(env);
+ if (use_icount) {
+ int64_t count;
+ int decr;
+ qemu_icount -= (env->icount_decr.u16.low + env->icount_extra);
+ env->icount_decr.u16.low = 0;
+ env->icount_extra = 0;
+ count = qemu_next_deadline();
+ count = (count + (1 << icount_time_shift) - 1)
+ >> icount_time_shift;
+ qemu_icount += count;
+ decr = (count > 0xffff) ? 0xffff : count;
+ count -= decr;
+ env->icount_decr.u16.low = decr;
+ env->icount_extra = count;
+ }
+ ret = cpu_exec(env);
#ifdef CONFIG_PROFILER
- qemu_time += profile_getclock() - ti;
+ qemu_time += profile_getclock() - ti;
#endif
- if (use_icount) {
- /* Fold pending instructions back into the
- instruction counter, and clear the interrupt flag. */
- qemu_icount -= (env->icount_decr.u16.low
- + env->icount_extra);
- env->icount_decr.u32 = 0;
- env->icount_extra = 0;
- }
- next_cpu = env->next_cpu ?: first_cpu;
- if (event_pending && likely(ret != EXCP_DEBUG)) {
- ret = EXCP_INTERRUPT;
- event_pending = 0;
- break;
- }
- if (ret == EXCP_HLT) {
- /* Give the next CPU a chance to run. */
- cur_cpu = env;
- continue;
- }
- if (ret != EXCP_HALTED)
+ if (use_icount) {
+ /* Fold pending instructions back into the
+ instruction counter, and clear the interrupt flag. */
+ qemu_icount -= (env->icount_decr.u16.low
+ + env->icount_extra);
+ env->icount_decr.u32 = 0;
+ env->icount_extra = 0;
+ }
+ return ret;
+}
+
+static int cpu_has_work(CPUState *env)
+{
+ if (!env->halted)
+ return 1;
+ if (qemu_cpu_has_work(env))
+ return 1;
+ return 0;
+}
+
+static int tcg_has_work(void)
+{
+ CPUState *env;
+
+ for (env = first_cpu; env != NULL; env = env->next_cpu)
+ if (cpu_has_work(env))
+ return 1;
+ return 0;
+}
+
+static int qemu_calculate_timeout(void)
+{
+ int timeout;
+
+ if (!vm_running)
+ timeout = 5000;
+ else if (tcg_has_work())
+ timeout = 0;
+ else if (!use_icount)
+ timeout = 5000;
+ else {
+ /* XXX: use timeout computed from timers */
+ int64_t add;
+ int64_t delta;
+ /* Advance virtual time to the next event. */
+ if (use_icount == 1) {
+ /* When not using an adaptive execution frequency
+ we tend to get badly out of sync with real time,
+ so just delay for a reasonable amount of time. */
+ delta = 0;
+ } else {
+ delta = cpu_get_icount() - cpu_get_clock();
+ }
+ if (delta > 0) {
+ /* If virtual time is ahead of real time then just
+ wait for IO. */
+ timeout = (delta / 1000000) + 1;
+ } else {
+ /* Wait for either IO to occur or the next
+ timer event. */
+ add = qemu_next_deadline();
+ /* We advance the timer before checking for IO.
+ Limit the amount we advance so that early IO
+ activity won't get the guest too far ahead. */
+ if (add > 10000000)
+ add = 10000000;
+ delta += add;
+ add = (add + (1 << icount_time_shift) - 1)
+ >> icount_time_shift;
+ qemu_icount += add;
+ timeout = delta / 1000000;
+ if (timeout < 0)
+ timeout = 0;
+ }
+ }
+
+ return timeout;
+}
+
+static int vm_can_run(void)
+{
+ if (powerdown_requested)
+ return 0;
+ if (reset_requested)
+ return 0;
+ if (shutdown_requested)
+ return 0;
+ return 1;
+}
+
+static void main_loop(void)
+{
+ int ret = 0;
+#ifdef CONFIG_PROFILER
+ int64_t ti;
+#endif
+
+ for (;;) {
+ do {
+ if (next_cpu == NULL)
+ next_cpu = first_cpu;
+ for (; next_cpu != NULL; next_cpu = next_cpu->next_cpu) {
+ CPUState *env = cur_cpu = next_cpu;
+
+ if (!vm_running)
break;
- /* all CPUs are halted ? */
- if (env == cur_cpu)
+ if (timer_alarm_pending) {
+ timer_alarm_pending = 0;
break;
- }
- cur_cpu = env;
-
- if (shutdown_requested) {
- ret = EXCP_INTERRUPT;
- if (no_shutdown) {
- vm_stop(0);
- no_shutdown = 0;
}
- else
+ ret = qemu_cpu_exec(env);
+ if (ret == EXCP_DEBUG) {
+ gdb_set_stop_cpu(env);
break;
- }
- if (reset_requested) {
- reset_requested = 0;
- qemu_system_reset();
- ret = EXCP_INTERRUPT;
- }
- if (powerdown_requested) {
- powerdown_requested = 0;
- qemu_system_powerdown();
- ret = EXCP_INTERRUPT;
- }
- if (unlikely(ret == EXCP_DEBUG)) {
- gdb_set_stop_cpu(cur_cpu);
- vm_stop(EXCP_DEBUG);
- }
- /* If all cpus are halted then wait until the next IRQ */
- /* XXX: use timeout computed from timers */
- if (ret == EXCP_HALTED) {
- if (use_icount) {
- int64_t add;
- int64_t delta;
- /* Advance virtual time to the next event. */
- if (use_icount == 1) {
- /* When not using an adaptive execution frequency
- we tend to get badly out of sync with real time,
- so just delay for a reasonable amount of time. */
- delta = 0;
- } else {
- delta = cpu_get_icount() - cpu_get_clock();
- }
- if (delta > 0) {
- /* If virtual time is ahead of real time then just
- wait for IO. */
- timeout = (delta / 1000000) + 1;
- } else {
- /* Wait for either IO to occur or the next
- timer event. */
- add = qemu_next_deadline();
- /* We advance the timer before checking for IO.
- Limit the amount we advance so that early IO
- activity won't get the guest too far ahead. */
- if (add > 10000000)
- add = 10000000;
- delta += add;
- add = (add + (1 << icount_time_shift) - 1)
- >> icount_time_shift;
- qemu_icount += add;
- timeout = delta / 1000000;
- if (timeout < 0)
- timeout = 0;
- }
- } else {
- timeout = 5000;
}
- } else {
- timeout = 0;
}
- } else {
- if (shutdown_requested) {
- ret = EXCP_INTERRUPT;
- break;
- }
- timeout = 5000;
- }
#ifdef CONFIG_PROFILER
- ti = profile_getclock();
+ ti = profile_getclock();
#endif
- main_loop_wait(timeout);
+ main_loop_wait(qemu_calculate_timeout());
#ifdef CONFIG_PROFILER
- dev_time += profile_getclock() - ti;
+ dev_time += profile_getclock() - ti;
#endif
+ } while (ret != EXCP_DEBUG && vm_can_run());
+
+ if (ret == EXCP_DEBUG)
+ vm_stop(EXCP_DEBUG);
+
+ if (qemu_shutdown_requested()) {
+ if (no_shutdown) {
+ vm_stop(0);
+ no_shutdown = 0;
+ } else
+ break;
+ }
+ if (qemu_reset_requested())
+ qemu_system_reset();
+ if (qemu_powerdown_requested())
+ qemu_system_powerdown();
}
- cpu_disable_ticks();
- return ret;
}
static void version(void)