aboutsummaryrefslogtreecommitdiff
path: root/target-i386
diff options
context:
space:
mode:
authorHuang Ying <ying.huang@intel.com>2011-03-02 08:56:20 +0100
committerMarcelo Tosatti <mtosatti@redhat.com>2011-03-15 01:19:06 -0300
commit3c85e74fbf9e5a39d8d13ef91a5f3dd91f0bc8a8 (patch)
treec9e4334842cbf7a95ecc802722f4595b99bf9cf2 /target-i386
parentcd19cfa23609dc1a35dd34f0b7554a8462337fde (diff)
KVM, MCE, unpoison memory address across reboot
In Linux kernel HWPoison processing implementation, the virtual address in processes mapping the error physical memory page is marked as HWPoison. So that, the further accessing to the virtual address will kill corresponding processes with SIGBUS. If the error physical memory page is used by a KVM guest, the SIGBUS will be sent to QEMU, and QEMU will simulate a MCE to report that memory error to the guest OS. If the guest OS can not recover from the error (for example, the page is accessed by kernel code), guest OS will reboot the system. But because the underlying host virtual address backing the guest physical memory is still poisoned, if the guest system accesses the corresponding guest physical memory even after rebooting, the SIGBUS will still be sent to QEMU and MCE will be simulated. That is, guest system can not recover via rebooting. In fact, across rebooting, the contents of guest physical memory page need not to be kept. We can allocate a new host physical page to back the corresponding guest physical address. This patch fixes this issue in QEMU-KVM via calling qemu_ram_remap() to clear the corresponding page table entry, so that make it possible to allocate a new page to recover the issue. [ Jan: rebasing and tiny cleanups] Signed-off-by: Huang Ying <ying.huang@intel.com> Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'target-i386')
-rw-r--r--target-i386/kvm.c36
1 files changed, 36 insertions, 0 deletions
diff --git a/target-i386/kvm.c b/target-i386/kvm.c
index 15741861a1..f7995bd877 100644
--- a/target-i386/kvm.c
+++ b/target-i386/kvm.c
@@ -173,7 +173,40 @@ static int get_para_features(CPUState *env)
}
#endif /* CONFIG_KVM_PARA */
+typedef struct HWPoisonPage {
+ ram_addr_t ram_addr;
+ QLIST_ENTRY(HWPoisonPage) list;
+} HWPoisonPage;
+
+static QLIST_HEAD(, HWPoisonPage) hwpoison_page_list =
+ QLIST_HEAD_INITIALIZER(hwpoison_page_list);
+
+static void kvm_unpoison_all(void *param)
+{
+ HWPoisonPage *page, *next_page;
+
+ QLIST_FOREACH_SAFE(page, &hwpoison_page_list, list, next_page) {
+ QLIST_REMOVE(page, list);
+ qemu_ram_remap(page->ram_addr, TARGET_PAGE_SIZE);
+ qemu_free(page);
+ }
+}
+
#ifdef KVM_CAP_MCE
+static void kvm_hwpoison_page_add(ram_addr_t ram_addr)
+{
+ HWPoisonPage *page;
+
+ QLIST_FOREACH(page, &hwpoison_page_list, list) {
+ if (page->ram_addr == ram_addr) {
+ return;
+ }
+ }
+ page = qemu_malloc(sizeof(HWPoisonPage));
+ page->ram_addr = ram_addr;
+ QLIST_INSERT_HEAD(&hwpoison_page_list, page, list);
+}
+
static int kvm_get_mce_cap_supported(KVMState *s, uint64_t *mce_cap,
int *max_banks)
{
@@ -233,6 +266,7 @@ int kvm_arch_on_sigbus_vcpu(CPUState *env, int code, void *addr)
hardware_memory_error();
}
}
+ kvm_hwpoison_page_add(ram_addr);
kvm_mce_inject(env, paddr, code);
} else
#endif /* KVM_CAP_MCE */
@@ -263,6 +297,7 @@ int kvm_arch_on_sigbus(int code, void *addr)
"QEMU itself instead of guest system!: %p\n", addr);
return 0;
}
+ kvm_hwpoison_page_add(ram_addr);
kvm_mce_inject(first_cpu, paddr, code);
} else
#endif /* KVM_CAP_MCE */
@@ -571,6 +606,7 @@ int kvm_arch_init(KVMState *s)
fprintf(stderr, "e820_add_entry() table is full\n");
return ret;
}
+ qemu_register_reset(kvm_unpoison_all, NULL);
return 0;
}