aboutsummaryrefslogtreecommitdiff
path: root/kvm-all.c
diff options
context:
space:
mode:
Diffstat (limited to 'kvm-all.c')
-rw-r--r--kvm-all.c243
1 files changed, 125 insertions, 118 deletions
diff --git a/kvm-all.c b/kvm-all.c
index cb8318707a..06e06f2b3f 100644
--- a/kvm-all.c
+++ b/kvm-all.c
@@ -24,13 +24,11 @@
#include "qemu/atomic.h"
#include "qemu/option.h"
#include "qemu/config-file.h"
-#include "sysemu/sysemu.h"
-#include "sysemu/accel.h"
#include "hw/hw.h"
#include "hw/pci/msi.h"
#include "hw/s390x/adapter.h"
#include "exec/gdbstub.h"
-#include "sysemu/kvm.h"
+#include "sysemu/kvm_int.h"
#include "qemu/bswap.h"
#include "exec/memory.h"
#include "exec/ram_addr.h"
@@ -61,22 +59,10 @@
#define KVM_MSI_HASHTAB_SIZE 256
-typedef struct KVMSlot
-{
- hwaddr start_addr;
- ram_addr_t memory_size;
- void *ram;
- int slot;
- int flags;
-} KVMSlot;
-
-typedef struct kvm_dirty_log KVMDirtyLog;
-
struct KVMState
{
AccelState parent_obj;
- KVMSlot *slots;
int nr_slots;
int fd;
int vmfd;
@@ -108,13 +94,9 @@ struct KVMState
QTAILQ_HEAD(msi_hashtab, KVMMSIRoute) msi_hashtab[KVM_MSI_HASHTAB_SIZE];
bool direct_msi;
#endif
+ KVMMemoryListener memory_listener;
};
-#define TYPE_KVM_ACCEL ACCEL_CLASS_NAME("kvm")
-
-#define KVM_STATE(obj) \
- OBJECT_CHECK(KVMState, (obj), TYPE_KVM_ACCEL)
-
KVMState *kvm_state;
bool kvm_kernel_irqchip;
bool kvm_async_interrupts_allowed;
@@ -135,13 +117,14 @@ static const KVMCapabilityInfo kvm_required_capabilites[] = {
KVM_CAP_LAST_INFO
};
-static KVMSlot *kvm_get_free_slot(KVMState *s)
+static KVMSlot *kvm_get_free_slot(KVMMemoryListener *kml)
{
+ KVMState *s = kvm_state;
int i;
for (i = 0; i < s->nr_slots; i++) {
- if (s->slots[i].memory_size == 0) {
- return &s->slots[i];
+ if (kml->slots[i].memory_size == 0) {
+ return &kml->slots[i];
}
}
@@ -150,12 +133,14 @@ static KVMSlot *kvm_get_free_slot(KVMState *s)
bool kvm_has_free_slot(MachineState *ms)
{
- return kvm_get_free_slot(KVM_STATE(ms->accelerator));
+ KVMState *s = KVM_STATE(ms->accelerator);
+
+ return kvm_get_free_slot(&s->memory_listener);
}
-static KVMSlot *kvm_alloc_slot(KVMState *s)
+static KVMSlot *kvm_alloc_slot(KVMMemoryListener *kml)
{
- KVMSlot *slot = kvm_get_free_slot(s);
+ KVMSlot *slot = kvm_get_free_slot(kml);
if (slot) {
return slot;
@@ -165,14 +150,15 @@ static KVMSlot *kvm_alloc_slot(KVMState *s)
abort();
}
-static KVMSlot *kvm_lookup_matching_slot(KVMState *s,
+static KVMSlot *kvm_lookup_matching_slot(KVMMemoryListener *kml,
hwaddr start_addr,
hwaddr end_addr)
{
+ KVMState *s = kvm_state;
int i;
for (i = 0; i < s->nr_slots; i++) {
- KVMSlot *mem = &s->slots[i];
+ KVMSlot *mem = &kml->slots[i];
if (start_addr == mem->start_addr &&
end_addr == mem->start_addr + mem->memory_size) {
@@ -186,15 +172,16 @@ static KVMSlot *kvm_lookup_matching_slot(KVMState *s,
/*
* Find overlapping slot with lowest start address
*/
-static KVMSlot *kvm_lookup_overlapping_slot(KVMState *s,
+static KVMSlot *kvm_lookup_overlapping_slot(KVMMemoryListener *kml,
hwaddr start_addr,
hwaddr end_addr)
{
+ KVMState *s = kvm_state;
KVMSlot *found = NULL;
int i;
for (i = 0; i < s->nr_slots; i++) {
- KVMSlot *mem = &s->slots[i];
+ KVMSlot *mem = &kml->slots[i];
if (mem->memory_size == 0 ||
(found && found->start_addr < mem->start_addr)) {
@@ -213,10 +200,11 @@ static KVMSlot *kvm_lookup_overlapping_slot(KVMState *s,
int kvm_physical_memory_addr_from_host(KVMState *s, void *ram,
hwaddr *phys_addr)
{
+ KVMMemoryListener *kml = &s->memory_listener;
int i;
for (i = 0; i < s->nr_slots; i++) {
- KVMSlot *mem = &s->slots[i];
+ KVMSlot *mem = &kml->slots[i];
if (ram >= mem->ram && ram < mem->ram + mem->memory_size) {
*phys_addr = mem->start_addr + (ram - mem->ram);
@@ -227,11 +215,12 @@ int kvm_physical_memory_addr_from_host(KVMState *s, void *ram,
return 0;
}
-static int kvm_set_user_memory_region(KVMState *s, KVMSlot *slot)
+static int kvm_set_user_memory_region(KVMMemoryListener *kml, KVMSlot *slot)
{
+ KVMState *s = kvm_state;
struct kvm_userspace_memory_region mem;
- mem.slot = slot->slot;
+ mem.slot = slot->slot | (kml->as_id << 16);
mem.guest_phys_addr = slot->start_addr;
mem.userspace_addr = (unsigned long)slot->ram;
mem.flags = slot->flags;
@@ -293,45 +282,47 @@ err:
* dirty pages logging control
*/
-static int kvm_mem_flags(KVMState *s, bool log_dirty, bool readonly)
+static int kvm_mem_flags(MemoryRegion *mr)
{
+ bool readonly = mr->readonly || memory_region_is_romd(mr);
int flags = 0;
- flags = log_dirty ? KVM_MEM_LOG_DIRTY_PAGES : 0;
+
+ if (memory_region_get_dirty_log_mask(mr) != 0) {
+ flags |= KVM_MEM_LOG_DIRTY_PAGES;
+ }
if (readonly && kvm_readonly_mem_allowed) {
flags |= KVM_MEM_READONLY;
}
return flags;
}
-static int kvm_slot_dirty_pages_log_change(KVMSlot *mem, bool log_dirty)
+static int kvm_slot_update_flags(KVMMemoryListener *kml, KVMSlot *mem,
+ MemoryRegion *mr)
{
- KVMState *s = kvm_state;
- int flags, mask = KVM_MEM_LOG_DIRTY_PAGES;
int old_flags;
old_flags = mem->flags;
-
- flags = (mem->flags & ~mask) | kvm_mem_flags(s, log_dirty, false);
- mem->flags = flags;
+ mem->flags = kvm_mem_flags(mr);
/* If nothing changed effectively, no need to issue ioctl */
- if (flags == old_flags) {
+ if (mem->flags == old_flags) {
return 0;
}
- return kvm_set_user_memory_region(s, mem);
+ return kvm_set_user_memory_region(kml, mem);
}
-static int kvm_dirty_pages_log_change(hwaddr phys_addr,
- ram_addr_t size, bool log_dirty)
+static int kvm_section_update_flags(KVMMemoryListener *kml,
+ MemoryRegionSection *section)
{
- KVMState *s = kvm_state;
- KVMSlot *mem = kvm_lookup_matching_slot(s, phys_addr, phys_addr + size);
+ hwaddr phys_addr = section->offset_within_address_space;
+ ram_addr_t size = int128_get64(section->size);
+ KVMSlot *mem = kvm_lookup_matching_slot(kml, phys_addr, phys_addr + size);
if (mem == NULL) {
return 0;
} else {
- return kvm_slot_dirty_pages_log_change(mem, log_dirty);
+ return kvm_slot_update_flags(kml, mem, section->mr);
}
}
@@ -339,14 +330,14 @@ static void kvm_log_start(MemoryListener *listener,
MemoryRegionSection *section,
int old, int new)
{
+ KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
int r;
if (old != 0) {
return;
}
- r = kvm_dirty_pages_log_change(section->offset_within_address_space,
- int128_get64(section->size), true);
+ r = kvm_section_update_flags(kml, section);
if (r < 0) {
abort();
}
@@ -356,14 +347,14 @@ static void kvm_log_stop(MemoryListener *listener,
MemoryRegionSection *section,
int old, int new)
{
+ KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
int r;
if (new != 0) {
return;
}
- r = kvm_dirty_pages_log_change(section->offset_within_address_space,
- int128_get64(section->size), false);
+ r = kvm_section_update_flags(kml, section);
if (r < 0) {
abort();
}
@@ -391,11 +382,12 @@ static int kvm_get_dirty_pages_log_range(MemoryRegionSection *section,
* @start_add: start of logged region.
* @end_addr: end of logged region.
*/
-static int kvm_physical_sync_dirty_bitmap(MemoryRegionSection *section)
+static int kvm_physical_sync_dirty_bitmap(KVMMemoryListener *kml,
+ MemoryRegionSection *section)
{
KVMState *s = kvm_state;
unsigned long size, allocated_size = 0;
- KVMDirtyLog d = {};
+ struct kvm_dirty_log d = {};
KVMSlot *mem;
int ret = 0;
hwaddr start_addr = section->offset_within_address_space;
@@ -403,7 +395,7 @@ static int kvm_physical_sync_dirty_bitmap(MemoryRegionSection *section)
d.dirty_bitmap = NULL;
while (start_addr < end_addr) {
- mem = kvm_lookup_overlapping_slot(s, start_addr, end_addr);
+ mem = kvm_lookup_overlapping_slot(kml, start_addr, end_addr);
if (mem == NULL) {
break;
}
@@ -430,8 +422,7 @@ static int kvm_physical_sync_dirty_bitmap(MemoryRegionSection *section)
allocated_size = size;
memset(d.dirty_bitmap, 0, allocated_size);
- d.slot = mem->slot;
-
+ d.slot = mem->slot | (kml->as_id << 16);
if (kvm_vm_ioctl(s, KVM_GET_DIRTY_LOG, &d) == -1) {
DPRINTF("ioctl failed %d\n", errno);
ret = -1;
@@ -634,15 +625,14 @@ kvm_check_extension_list(KVMState *s, const KVMCapabilityInfo *list)
return NULL;
}
-static void kvm_set_phys_mem(MemoryRegionSection *section, bool add)
+static void kvm_set_phys_mem(KVMMemoryListener *kml,
+ MemoryRegionSection *section, bool add)
{
KVMState *s = kvm_state;
KVMSlot *mem, old;
int err;
MemoryRegion *mr = section->mr;
- bool log_dirty = memory_region_get_dirty_log_mask(mr) != 0;
bool writeable = !mr->readonly && !mr->rom_device;
- bool readonly_flag = mr->readonly || memory_region_is_romd(mr);
hwaddr start_addr = section->offset_within_address_space;
ram_addr_t size = int128_get64(section->size);
void *ram = NULL;
@@ -676,7 +666,7 @@ static void kvm_set_phys_mem(MemoryRegionSection *section, bool add)
ram = memory_region_get_ram_ptr(mr) + section->offset_within_region + delta;
while (1) {
- mem = kvm_lookup_overlapping_slot(s, start_addr, start_addr + size);
+ mem = kvm_lookup_overlapping_slot(kml, start_addr, start_addr + size);
if (!mem) {
break;
}
@@ -686,19 +676,19 @@ static void kvm_set_phys_mem(MemoryRegionSection *section, bool add)
(ram - start_addr == mem->ram - mem->start_addr)) {
/* The new slot fits into the existing one and comes with
* identical parameters - update flags and done. */
- kvm_slot_dirty_pages_log_change(mem, log_dirty);
+ kvm_slot_update_flags(kml, mem, mr);
return;
}
old = *mem;
if (mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
- kvm_physical_sync_dirty_bitmap(section);
+ kvm_physical_sync_dirty_bitmap(kml, section);
}
/* unregister the overlapping slot */
mem->memory_size = 0;
- err = kvm_set_user_memory_region(s, mem);
+ err = kvm_set_user_memory_region(kml, mem);
if (err) {
fprintf(stderr, "%s: error unregistering overlapping slot: %s\n",
__func__, strerror(-err));
@@ -715,13 +705,13 @@ static void kvm_set_phys_mem(MemoryRegionSection *section, bool add)
* - and actually require a recent KVM version. */
if (s->broken_set_mem_region &&
old.start_addr == start_addr && old.memory_size < size && add) {
- mem = kvm_alloc_slot(s);
+ mem = kvm_alloc_slot(kml);
mem->memory_size = old.memory_size;
mem->start_addr = old.start_addr;
mem->ram = old.ram;
- mem->flags = kvm_mem_flags(s, log_dirty, readonly_flag);
+ mem->flags = kvm_mem_flags(mr);
- err = kvm_set_user_memory_region(s, mem);
+ err = kvm_set_user_memory_region(kml, mem);
if (err) {
fprintf(stderr, "%s: error updating slot: %s\n", __func__,
strerror(-err));
@@ -736,13 +726,13 @@ static void kvm_set_phys_mem(MemoryRegionSection *section, bool add)
/* register prefix slot */
if (old.start_addr < start_addr) {
- mem = kvm_alloc_slot(s);
+ mem = kvm_alloc_slot(kml);
mem->memory_size = start_addr - old.start_addr;
mem->start_addr = old.start_addr;
mem->ram = old.ram;
- mem->flags = kvm_mem_flags(s, log_dirty, readonly_flag);
+ mem->flags = kvm_mem_flags(mr);
- err = kvm_set_user_memory_region(s, mem);
+ err = kvm_set_user_memory_region(kml, mem);
if (err) {
fprintf(stderr, "%s: error registering prefix slot: %s\n",
__func__, strerror(-err));
@@ -759,14 +749,14 @@ static void kvm_set_phys_mem(MemoryRegionSection *section, bool add)
if (old.start_addr + old.memory_size > start_addr + size) {
ram_addr_t size_delta;
- mem = kvm_alloc_slot(s);
+ mem = kvm_alloc_slot(kml);
mem->start_addr = start_addr + size;
size_delta = mem->start_addr - old.start_addr;
mem->memory_size = old.memory_size - size_delta;
mem->ram = old.ram + size_delta;
- mem->flags = kvm_mem_flags(s, log_dirty, readonly_flag);
+ mem->flags = kvm_mem_flags(mr);
- err = kvm_set_user_memory_region(s, mem);
+ err = kvm_set_user_memory_region(kml, mem);
if (err) {
fprintf(stderr, "%s: error registering suffix slot: %s\n",
__func__, strerror(-err));
@@ -782,13 +772,13 @@ static void kvm_set_phys_mem(MemoryRegionSection *section, bool add)
if (!add) {
return;
}
- mem = kvm_alloc_slot(s);
+ mem = kvm_alloc_slot(kml);
mem->memory_size = size;
mem->start_addr = start_addr;
mem->ram = ram;
- mem->flags = kvm_mem_flags(s, log_dirty, readonly_flag);
+ mem->flags = kvm_mem_flags(mr);
- err = kvm_set_user_memory_region(s, mem);
+ err = kvm_set_user_memory_region(kml, mem);
if (err) {
fprintf(stderr, "%s: error registering slot: %s\n", __func__,
strerror(-err));
@@ -799,23 +789,28 @@ static void kvm_set_phys_mem(MemoryRegionSection *section, bool add)
static void kvm_region_add(MemoryListener *listener,
MemoryRegionSection *section)
{
+ KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
+
memory_region_ref(section->mr);
- kvm_set_phys_mem(section, true);
+ kvm_set_phys_mem(kml, section, true);
}
static void kvm_region_del(MemoryListener *listener,
MemoryRegionSection *section)
{
- kvm_set_phys_mem(section, false);
+ KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
+
+ kvm_set_phys_mem(kml, section, false);
memory_region_unref(section->mr);
}
static void kvm_log_sync(MemoryListener *listener,
MemoryRegionSection *section)
{
+ KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
int r;
- r = kvm_physical_sync_dirty_bitmap(section);
+ r = kvm_physical_sync_dirty_bitmap(kml, section);
if (r < 0) {
abort();
}
@@ -890,18 +885,27 @@ static void kvm_io_ioeventfd_del(MemoryListener *listener,
}
}
-static MemoryListener kvm_memory_listener = {
- .region_add = kvm_region_add,
- .region_del = kvm_region_del,
- .log_start = kvm_log_start,
- .log_stop = kvm_log_stop,
- .log_sync = kvm_log_sync,
- .eventfd_add = kvm_mem_ioeventfd_add,
- .eventfd_del = kvm_mem_ioeventfd_del,
- .coalesced_mmio_add = kvm_coalesce_mmio_region,
- .coalesced_mmio_del = kvm_uncoalesce_mmio_region,
- .priority = 10,
-};
+void kvm_memory_listener_register(KVMState *s, KVMMemoryListener *kml,
+ AddressSpace *as, int as_id)
+{
+ int i;
+
+ kml->slots = g_malloc0(s->nr_slots * sizeof(KVMSlot));
+ kml->as_id = as_id;
+
+ for (i = 0; i < s->nr_slots; i++) {
+ kml->slots[i].slot = i;
+ }
+
+ kml->listener.region_add = kvm_region_add;
+ kml->listener.region_del = kvm_region_del;
+ kml->listener.log_start = kvm_log_start;
+ kml->listener.log_stop = kvm_log_stop;
+ kml->listener.log_sync = kvm_log_sync;
+ kml->listener.priority = 10;
+
+ memory_listener_register(&kml->listener, as);
+}
static MemoryListener kvm_io_listener = {
.eventfd_add = kvm_io_ioeventfd_add,
@@ -1373,27 +1377,31 @@ void kvm_irqchip_set_qemuirq_gsi(KVMState *s, qemu_irq irq, int gsi)
g_hash_table_insert(s->gsimap, irq, GINT_TO_POINTER(gsi));
}
-static int kvm_irqchip_create(MachineState *machine, KVMState *s)
+static void kvm_irqchip_create(MachineState *machine, KVMState *s)
{
int ret;
- if (!machine_kernel_irqchip_allowed(machine) ||
- (!kvm_check_extension(s, KVM_CAP_IRQCHIP) &&
- (kvm_vm_enable_cap(s, KVM_CAP_S390_IRQCHIP, 0) < 0))) {
- return 0;
+ if (kvm_check_extension(s, KVM_CAP_IRQCHIP)) {
+ ;
+ } else if (kvm_check_extension(s, KVM_CAP_S390_IRQCHIP)) {
+ ret = kvm_vm_enable_cap(s, KVM_CAP_S390_IRQCHIP, 0);
+ if (ret < 0) {
+ fprintf(stderr, "Enable kernel irqchip failed: %s\n", strerror(-ret));
+ exit(1);
+ }
+ } else {
+ return;
}
/* First probe and see if there's a arch-specific hook to create the
* in-kernel irqchip for us */
ret = kvm_arch_irqchip_create(s);
- if (ret < 0) {
- return ret;
- } else if (ret == 0) {
+ if (ret == 0) {
ret = kvm_vm_ioctl(s, KVM_CREATE_IRQCHIP);
- if (ret < 0) {
- fprintf(stderr, "Create kernel irqchip failed\n");
- return ret;
- }
+ }
+ if (ret < 0) {
+ fprintf(stderr, "Create kernel irqchip failed: %s\n", strerror(-ret));
+ exit(1);
}
kvm_kernel_irqchip = true;
@@ -1406,8 +1414,6 @@ static int kvm_irqchip_create(MachineState *machine, KVMState *s)
kvm_init_irq_routing(s);
s->gsimap = g_hash_table_new(g_direct_hash, g_direct_equal);
-
- return 0;
}
/* Find number of supported CPUs using the recommended
@@ -1444,7 +1450,7 @@ static int kvm_init(MachineState *ms)
KVMState *s;
const KVMCapabilityInfo *missing_cap;
int ret;
- int i, type = 0;
+ int type = 0;
const char *kvm_type;
s = KVM_STATE(ms->accelerator);
@@ -1493,12 +1499,6 @@ static int kvm_init(MachineState *ms)
s->nr_slots = 32;
}
- s->slots = g_malloc0(s->nr_slots * sizeof(KVMSlot));
-
- for (i = 0; i < s->nr_slots; i++) {
- s->slots[i].slot = i;
- }
-
/* check the vcpu limits */
soft_vcpus_limit = kvm_recommended_vcpus(s);
hard_vcpus_limit = kvm_max_vcpus(s);
@@ -1630,14 +1630,21 @@ static int kvm_init(MachineState *ms)
goto err;
}
- ret = kvm_irqchip_create(ms, s);
- if (ret < 0) {
- goto err;
+ if (machine_kernel_irqchip_allowed(ms)) {
+ kvm_irqchip_create(ms, s);
}
kvm_state = s;
- memory_listener_register(&kvm_memory_listener, &address_space_memory);
- memory_listener_register(&kvm_io_listener, &address_space_io);
+
+ s->memory_listener.listener.eventfd_add = kvm_mem_ioeventfd_add;
+ s->memory_listener.listener.eventfd_del = kvm_mem_ioeventfd_del;
+ s->memory_listener.listener.coalesced_mmio_add = kvm_coalesce_mmio_region;
+ s->memory_listener.listener.coalesced_mmio_del = kvm_uncoalesce_mmio_region;
+
+ kvm_memory_listener_register(s, &s->memory_listener,
+ &address_space_memory, 0);
+ memory_listener_register(&kvm_io_listener,
+ &address_space_io);
s->many_ioeventfds = kvm_check_many_ioeventfds();
@@ -1653,7 +1660,7 @@ err:
if (s->fd != -1) {
close(s->fd);
}
- g_free(s->slots);
+ g_free(s->memory_listener.slots);
return ret;
}