aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAvi Kivity <avi@redhat.com>2011-10-18 19:43:12 +0200
committerMarcelo Tosatti <mtosatti@redhat.com>2011-10-24 21:26:53 -0200
commit1cae88b9f4121c9af0bf677435c6129e643280fd (patch)
tree19405c55e15389570311cf7a633d5ce03f152fb5
parent626c427624ac1d6b5dd245cb37988f046cec5f03 (diff)
kvm: avoid reentring kvm_flush_coalesced_mmio_buffer()
mmio callbacks invoked by kvm_flush_coalesced_mmio_buffer() may themselves indirectly call kvm_flush_coalesced_mmio_buffer(). Prevent reentering the function by checking a flag that indicates we're processing coalesced mmio requests. Signed-off-by: Avi Kivity <avi@redhat.com>
-rw-r--r--kvm-all.c10
1 files changed, 10 insertions, 0 deletions
diff --git a/kvm-all.c b/kvm-all.c
index e7faf5cba9..c09ddf7ac5 100644
--- a/kvm-all.c
+++ b/kvm-all.c
@@ -64,6 +64,7 @@ struct KVMState
int vmfd;
int coalesced_mmio;
struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
+ bool coalesced_flush_in_progress;
int broken_set_mem_region;
int migration_log;
int vcpu_events;
@@ -876,6 +877,13 @@ static int kvm_handle_internal_error(CPUState *env, struct kvm_run *run)
void kvm_flush_coalesced_mmio_buffer(void)
{
KVMState *s = kvm_state;
+
+ if (s->coalesced_flush_in_progress) {
+ return;
+ }
+
+ s->coalesced_flush_in_progress = true;
+
if (s->coalesced_mmio_ring) {
struct kvm_coalesced_mmio_ring *ring = s->coalesced_mmio_ring;
while (ring->first != ring->last) {
@@ -888,6 +896,8 @@ void kvm_flush_coalesced_mmio_buffer(void)
ring->first = (ring->first + 1) % KVM_COALESCED_MMIO_MAX;
}
}
+
+ s->coalesced_flush_in_progress = false;
}
static void do_kvm_cpu_synchronize_state(void *_env)