diff options
author | Peter Maydell <peter.maydell@linaro.org> | 2018-01-16 15:45:15 +0000 |
---|---|---|
committer | Peter Maydell <peter.maydell@linaro.org> | 2018-01-16 15:45:15 +0000 |
commit | c1d5b9add7b04661bedef9a3379a8b82547b53db (patch) | |
tree | 7c064b14e4a3645aedbd7d7f0009b9ee71ad9952 /cpus.c | |
parent | aae39d24a387a273deab3eb930dbf730aa379e22 (diff) | |
parent | b5976c2e46e86b36b01d8ac380a182e22209a7cd (diff) |
Merge remote-tracking branch 'remotes/bonzini/tags/for-upstream' into staging
* QemuMutex tracing improvements (Alex)
* ram_addr_t optimization (David)
* SCSI fixes (Fam, Stefan, me)
* do {} while (0) fixes (Eric)
* KVM fix for PMU (Jan)
* memory leak fixes from ASAN (Marc-André)
* migration fix for HPET, icount, loadvm (Maria, Pavel)
* hflags fixes (me, Tao)
* block/iscsi uninitialized variable (Peter L.)
* full support for GMainContexts in character devices (Peter Xu)
* more boot-serial-test (Thomas)
* Memory leak fix (Zhecheng)
# gpg: Signature made Tue 16 Jan 2018 14:15:45 GMT
# gpg: using RSA key 0xBFFBD25F78C7AE83
# gpg: Good signature from "Paolo Bonzini <bonzini@gnu.org>"
# gpg: aka "Paolo Bonzini <pbonzini@redhat.com>"
# Primary key fingerprint: 46F5 9FBD 57D6 12E7 BFD4 E2F7 7E15 100C CD36 69B1
# Subkey fingerprint: F133 3857 4B66 2389 866C 7682 BFFB D25F 78C7 AE83
* remotes/bonzini/tags/for-upstream: (51 commits)
scripts/analyse-locks-simpletrace.py: script to analyse lock times
util/qemu-thread-*: add qemu_lock, locked and unlock trace events
cpu: flush TB cache when loading VMState
block/iscsi: fix initialization of iTask in iscsi_co_get_block_status
find_ram_offset: Align ram_addr_t allocation on long boundaries
find_ram_offset: Add comments and tracing
cpu_physical_memory_sync_dirty_bitmap: Another alignment fix
checkpatch: Enforce proper do/while (0) style
maint: Fix macros with broken 'do/while(0); ' usage
tests: Avoid 'do/while(false); ' in vhost-user-bridge
chardev: Clean up previous patch indentation
chardev: Use goto/label instead of do/break/while(0)
mips: Tweak location of ';' in macros
net: Drop unusual use of do { } while (0);
irq: fix memory leak
cpus: unify qemu_*_wait_io_event
icount: fixed saving/restoring of icount warp timers
scripts/qemu-gdb/timers.py: new helper to dump timer state
scripts/qemu-gdb: add simple tcg lock status helper
target-i386: update hflags on Hypervisor.framework
...
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'cpus.c')
-rw-r--r-- | cpus.c | 134 |
1 files changed, 83 insertions, 51 deletions
@@ -120,16 +120,11 @@ static bool all_cpu_threads_idle(void) /* Protected by TimersState seqlock */ static bool icount_sleep = true; -static int64_t vm_clock_warp_start = -1; /* Conversion factor from emulated instructions to virtual clock ticks. */ static int icount_time_shift; /* Arbitrarily pick 1MIPS as the minimum allowable speed. */ #define MAX_ICOUNT_SHIFT 10 -static QEMUTimer *icount_rt_timer; -static QEMUTimer *icount_vm_timer; -static QEMUTimer *icount_warp_timer; - typedef struct TimersState { /* Protected by BQL. */ int64_t cpu_ticks_prev; @@ -147,6 +142,11 @@ typedef struct TimersState { int64_t qemu_icount_bias; /* Only written by TCG thread */ int64_t qemu_icount; + /* for adjusting icount */ + int64_t vm_clock_warp_start; + QEMUTimer *icount_rt_timer; + QEMUTimer *icount_vm_timer; + QEMUTimer *icount_warp_timer; } TimersState; static TimersState timers_state; @@ -432,14 +432,14 @@ static void icount_adjust(void) static void icount_adjust_rt(void *opaque) { - timer_mod(icount_rt_timer, + timer_mod(timers_state.icount_rt_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) + 1000); icount_adjust(); } static void icount_adjust_vm(void *opaque) { - timer_mod(icount_vm_timer, + timer_mod(timers_state.icount_vm_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + NANOSECONDS_PER_SECOND / 10); icount_adjust(); @@ -460,7 +460,7 @@ static void icount_warp_rt(void) */ do { seq = seqlock_read_begin(&timers_state.vm_clock_seqlock); - warp_start = vm_clock_warp_start; + warp_start = timers_state.vm_clock_warp_start; } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, seq)); if (warp_start == -1) { @@ -473,7 +473,7 @@ static void icount_warp_rt(void) cpu_get_clock_locked()); int64_t warp_delta; - warp_delta = clock - vm_clock_warp_start; + warp_delta = clock - timers_state.vm_clock_warp_start; if (use_icount == 2) { /* * In adaptive mode, do not let QEMU_CLOCK_VIRTUAL run too @@ -485,7 +485,7 @@ static void icount_warp_rt(void) } timers_state.qemu_icount_bias += warp_delta; } - vm_clock_warp_start = -1; + timers_state.vm_clock_warp_start = -1; seqlock_write_end(&timers_state.vm_clock_seqlock); if (qemu_clock_expired(QEMU_CLOCK_VIRTUAL)) { @@ -594,11 +594,13 @@ void qemu_start_warp_timer(void) * every 100ms. */ seqlock_write_begin(&timers_state.vm_clock_seqlock); - if (vm_clock_warp_start == -1 || vm_clock_warp_start > clock) { - vm_clock_warp_start = clock; + if (timers_state.vm_clock_warp_start == -1 + || timers_state.vm_clock_warp_start > clock) { + timers_state.vm_clock_warp_start = clock; } seqlock_write_end(&timers_state.vm_clock_seqlock); - timer_mod_anticipate(icount_warp_timer, clock + deadline); + timer_mod_anticipate(timers_state.icount_warp_timer, + clock + deadline); } } else if (deadline == 0) { qemu_clock_notify(QEMU_CLOCK_VIRTUAL); @@ -623,7 +625,7 @@ static void qemu_account_warp_timer(void) return; } - timer_del(icount_warp_timer); + timer_del(timers_state.icount_warp_timer); icount_warp_rt(); } @@ -632,6 +634,45 @@ static bool icount_state_needed(void *opaque) return use_icount; } +static bool warp_timer_state_needed(void *opaque) +{ + TimersState *s = opaque; + return s->icount_warp_timer != NULL; +} + +static bool adjust_timers_state_needed(void *opaque) +{ + TimersState *s = opaque; + return s->icount_rt_timer != NULL; +} + +/* + * Subsection for warp timer migration is optional, because may not be created + */ +static const VMStateDescription icount_vmstate_warp_timer = { + .name = "timer/icount/warp_timer", + .version_id = 1, + .minimum_version_id = 1, + .needed = warp_timer_state_needed, + .fields = (VMStateField[]) { + VMSTATE_INT64(vm_clock_warp_start, TimersState), + VMSTATE_TIMER_PTR(icount_warp_timer, TimersState), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription icount_vmstate_adjust_timers = { + .name = "timer/icount/timers", + .version_id = 1, + .minimum_version_id = 1, + .needed = adjust_timers_state_needed, + .fields = (VMStateField[]) { + VMSTATE_TIMER_PTR(icount_rt_timer, TimersState), + VMSTATE_TIMER_PTR(icount_vm_timer, TimersState), + VMSTATE_END_OF_LIST() + } +}; + /* * This is a subsection for icount migration. */ @@ -644,6 +685,11 @@ static const VMStateDescription icount_vmstate_timers = { VMSTATE_INT64(qemu_icount_bias, TimersState), VMSTATE_INT64(qemu_icount, TimersState), VMSTATE_END_OF_LIST() + }, + .subsections = (const VMStateDescription*[]) { + &icount_vmstate_warp_timer, + &icount_vmstate_adjust_timers, + NULL } }; @@ -754,7 +800,7 @@ void configure_icount(QemuOpts *opts, Error **errp) icount_sleep = qemu_opt_get_bool(opts, "sleep", true); if (icount_sleep) { - icount_warp_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT, + timers_state.icount_warp_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT, icount_timer_cb, NULL); } @@ -788,13 +834,14 @@ void configure_icount(QemuOpts *opts, Error **errp) the virtual time trigger catches emulated time passing too fast. Realtime triggers occur even when idle, so use them less frequently than VM triggers. */ - icount_rt_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL_RT, + timers_state.vm_clock_warp_start = -1; + timers_state.icount_rt_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL_RT, icount_adjust_rt, NULL); - timer_mod(icount_rt_timer, + timer_mod(timers_state.icount_rt_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) + 1000); - icount_vm_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, + timers_state.icount_vm_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, icount_adjust_vm, NULL); - timer_mod(icount_vm_timer, + timer_mod(timers_state.icount_vm_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + NANOSECONDS_PER_SECOND / 10); } @@ -862,7 +909,8 @@ static void kick_tcg_thread(void *opaque) static void start_tcg_kick_timer(void) { - if (!mttcg_enabled && !tcg_kick_vcpu_timer && CPU_NEXT(first_cpu)) { + assert(!mttcg_enabled); + if (!tcg_kick_vcpu_timer && CPU_NEXT(first_cpu)) { tcg_kick_vcpu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, kick_tcg_thread, NULL); timer_mod(tcg_kick_vcpu_timer, qemu_tcg_next_kick()); @@ -871,6 +919,7 @@ static void start_tcg_kick_timer(void) static void stop_tcg_kick_timer(void) { + assert(!mttcg_enabled); if (tcg_kick_vcpu_timer) { timer_del(tcg_kick_vcpu_timer); tcg_kick_vcpu_timer = NULL; @@ -1090,18 +1139,9 @@ static void qemu_wait_io_event_common(CPUState *cpu) process_queued_cpu_work(cpu); } -static bool qemu_tcg_should_sleep(CPUState *cpu) +static void qemu_tcg_rr_wait_io_event(CPUState *cpu) { - if (mttcg_enabled) { - return cpu_thread_is_idle(cpu); - } else { - return all_cpu_threads_idle(); - } -} - -static void qemu_tcg_wait_io_event(CPUState *cpu) -{ - while (qemu_tcg_should_sleep(cpu)) { + while (all_cpu_threads_idle()) { stop_tcg_kick_timer(); qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex); } @@ -1111,20 +1151,18 @@ static void qemu_tcg_wait_io_event(CPUState *cpu) qemu_wait_io_event_common(cpu); } -static void qemu_kvm_wait_io_event(CPUState *cpu) +static void qemu_wait_io_event(CPUState *cpu) { while (cpu_thread_is_idle(cpu)) { qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex); } - qemu_wait_io_event_common(cpu); -} - -static void qemu_hvf_wait_io_event(CPUState *cpu) -{ - while (cpu_thread_is_idle(cpu)) { - qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex); +#ifdef _WIN32 + /* Eat dummy APC queued by qemu_cpu_kick_thread. */ + if (!tcg_enabled()) { + SleepEx(0, TRUE); } +#endif qemu_wait_io_event_common(cpu); } @@ -1160,7 +1198,7 @@ static void *qemu_kvm_cpu_thread_fn(void *arg) cpu_handle_guest_debug(cpu); } } - qemu_kvm_wait_io_event(cpu); + qemu_wait_io_event(cpu); } while (!cpu->unplug || cpu_can_run(cpu)); qemu_kvm_destroy_vcpu(cpu); @@ -1206,7 +1244,7 @@ static void *qemu_dummy_cpu_thread_fn(void *arg) exit(1); } qemu_mutex_lock_iothread(); - qemu_wait_io_event_common(cpu); + qemu_wait_io_event(cpu); } return NULL; @@ -1423,7 +1461,7 @@ static void *qemu_tcg_rr_cpu_thread_fn(void *arg) atomic_mb_set(&cpu->exit_request, 0); } - qemu_tcg_wait_io_event(cpu ? cpu : QTAILQ_FIRST(&cpus)); + qemu_tcg_rr_wait_io_event(cpu ? cpu : QTAILQ_FIRST(&cpus)); deal_with_unplugged_cpus(); } @@ -1454,13 +1492,7 @@ static void *qemu_hax_cpu_thread_fn(void *arg) } } - while (cpu_thread_is_idle(cpu)) { - qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex); - } -#ifdef _WIN32 - SleepEx(0, TRUE); -#endif - qemu_wait_io_event_common(cpu); + qemu_wait_io_event(cpu); } return NULL; } @@ -1497,7 +1529,7 @@ static void *qemu_hvf_cpu_thread_fn(void *arg) cpu_handle_guest_debug(cpu); } } - qemu_hvf_wait_io_event(cpu); + qemu_wait_io_event(cpu); } while (!cpu->unplug || cpu_can_run(cpu)); hvf_vcpu_destroy(cpu); @@ -1576,7 +1608,7 @@ static void *qemu_tcg_cpu_thread_fn(void *arg) } atomic_mb_set(&cpu->exit_request, 0); - qemu_tcg_wait_io_event(cpu); + qemu_wait_io_event(cpu); } return NULL; |