aboutsummaryrefslogtreecommitdiff
path: root/accel
diff options
context:
space:
mode:
Diffstat (limited to 'accel')
-rw-r--r--accel/kvm/kvm-all.c392
1 files changed, 392 insertions, 0 deletions
diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c
index a4c4863f53..7cc9e33bab 100644
--- a/accel/kvm/kvm-all.c
+++ b/accel/kvm/kvm-all.c
@@ -47,6 +47,7 @@
#include "kvm-cpus.h"
#include "hw/boards.h"
+#include "monitor/stats.h"
/* This check must be after config-host.h is included */
#ifdef CONFIG_EVENTFD
@@ -2310,6 +2311,9 @@ bool kvm_dirty_ring_enabled(void)
return kvm_state->kvm_dirty_ring_size ? true : false;
}
+static void query_stats_cb(StatsResultList **result, StatsTarget target, Error **errp);
+static void query_stats_schemas_cb(StatsSchemaList **result, Error **errp);
+
static int kvm_init(MachineState *ms)
{
MachineClass *mc = MACHINE_GET_CLASS(ms);
@@ -2638,6 +2642,10 @@ static int kvm_init(MachineState *ms)
}
}
+ if (kvm_check_extension(kvm_state, KVM_CAP_BINARY_STATS_FD)) {
+ add_stats_callbacks(query_stats_cb, query_stats_schemas_cb);
+ }
+
return 0;
err:
@@ -3697,3 +3705,387 @@ static void kvm_type_init(void)
}
type_init(kvm_type_init);
+
+typedef struct StatsArgs {
+ union StatsResultsType {
+ StatsResultList **stats;
+ StatsSchemaList **schema;
+ } result;
+ Error **errp;
+} StatsArgs;
+
+static StatsList *add_kvmstat_entry(struct kvm_stats_desc *pdesc,
+ uint64_t *stats_data,
+ StatsList *stats_list,
+ Error **errp)
+{
+
+ Stats *stats;
+ uint64List *val_list = NULL;
+
+ /* Only add stats that we understand. */
+ switch (pdesc->flags & KVM_STATS_TYPE_MASK) {
+ case KVM_STATS_TYPE_CUMULATIVE:
+ case KVM_STATS_TYPE_INSTANT:
+ case KVM_STATS_TYPE_PEAK:
+ case KVM_STATS_TYPE_LINEAR_HIST:
+ case KVM_STATS_TYPE_LOG_HIST:
+ break;
+ default:
+ return stats_list;
+ }
+
+ switch (pdesc->flags & KVM_STATS_UNIT_MASK) {
+ case KVM_STATS_UNIT_NONE:
+ case KVM_STATS_UNIT_BYTES:
+ case KVM_STATS_UNIT_CYCLES:
+ case KVM_STATS_UNIT_SECONDS:
+ break;
+ default:
+ return stats_list;
+ }
+
+ switch (pdesc->flags & KVM_STATS_BASE_MASK) {
+ case KVM_STATS_BASE_POW10:
+ case KVM_STATS_BASE_POW2:
+ break;
+ default:
+ return stats_list;
+ }
+
+ /* Alloc and populate data list */
+ stats = g_new0(Stats, 1);
+ stats->name = g_strdup(pdesc->name);
+ stats->value = g_new0(StatsValue, 1);;
+
+ if (pdesc->size == 1) {
+ stats->value->u.scalar = *stats_data;
+ stats->value->type = QTYPE_QNUM;
+ } else {
+ int i;
+ for (i = 0; i < pdesc->size; i++) {
+ QAPI_LIST_PREPEND(val_list, stats_data[i]);
+ }
+ stats->value->u.list = val_list;
+ stats->value->type = QTYPE_QLIST;
+ }
+
+ QAPI_LIST_PREPEND(stats_list, stats);
+ return stats_list;
+}
+
+static StatsSchemaValueList *add_kvmschema_entry(struct kvm_stats_desc *pdesc,
+ StatsSchemaValueList *list,
+ Error **errp)
+{
+ StatsSchemaValueList *schema_entry = g_new0(StatsSchemaValueList, 1);
+ schema_entry->value = g_new0(StatsSchemaValue, 1);
+
+ switch (pdesc->flags & KVM_STATS_TYPE_MASK) {
+ case KVM_STATS_TYPE_CUMULATIVE:
+ schema_entry->value->type = STATS_TYPE_CUMULATIVE;
+ break;
+ case KVM_STATS_TYPE_INSTANT:
+ schema_entry->value->type = STATS_TYPE_INSTANT;
+ break;
+ case KVM_STATS_TYPE_PEAK:
+ schema_entry->value->type = STATS_TYPE_PEAK;
+ break;
+ case KVM_STATS_TYPE_LINEAR_HIST:
+ schema_entry->value->type = STATS_TYPE_LINEAR_HISTOGRAM;
+ schema_entry->value->bucket_size = pdesc->bucket_size;
+ schema_entry->value->has_bucket_size = true;
+ break;
+ case KVM_STATS_TYPE_LOG_HIST:
+ schema_entry->value->type = STATS_TYPE_LOG2_HISTOGRAM;
+ break;
+ default:
+ goto exit;
+ }
+
+ switch (pdesc->flags & KVM_STATS_UNIT_MASK) {
+ case KVM_STATS_UNIT_NONE:
+ break;
+ case KVM_STATS_UNIT_BYTES:
+ schema_entry->value->has_unit = true;
+ schema_entry->value->unit = STATS_UNIT_BYTES;
+ break;
+ case KVM_STATS_UNIT_CYCLES:
+ schema_entry->value->has_unit = true;
+ schema_entry->value->unit = STATS_UNIT_CYCLES;
+ break;
+ case KVM_STATS_UNIT_SECONDS:
+ schema_entry->value->has_unit = true;
+ schema_entry->value->unit = STATS_UNIT_SECONDS;
+ break;
+ default:
+ goto exit;
+ }
+
+ schema_entry->value->exponent = pdesc->exponent;
+ if (pdesc->exponent) {
+ switch (pdesc->flags & KVM_STATS_BASE_MASK) {
+ case KVM_STATS_BASE_POW10:
+ schema_entry->value->has_base = true;
+ schema_entry->value->base = 10;
+ break;
+ case KVM_STATS_BASE_POW2:
+ schema_entry->value->has_base = true;
+ schema_entry->value->base = 2;
+ break;
+ default:
+ goto exit;
+ }
+ }
+
+ schema_entry->value->name = g_strdup(pdesc->name);
+ schema_entry->next = list;
+ return schema_entry;
+exit:
+ g_free(schema_entry->value);
+ g_free(schema_entry);
+ return list;
+}
+
+/* Cached stats descriptors */
+typedef struct StatsDescriptors {
+ const char *ident; /* cache key, currently the StatsTarget */
+ struct kvm_stats_desc *kvm_stats_desc;
+ struct kvm_stats_header *kvm_stats_header;
+ QTAILQ_ENTRY(StatsDescriptors) next;
+} StatsDescriptors;
+
+static QTAILQ_HEAD(, StatsDescriptors) stats_descriptors =
+ QTAILQ_HEAD_INITIALIZER(stats_descriptors);
+
+/*
+ * Return the descriptors for 'target', that either have already been read
+ * or are retrieved from 'stats_fd'.
+ */
+static StatsDescriptors *find_stats_descriptors(StatsTarget target, int stats_fd,
+ Error **errp)
+{
+ StatsDescriptors *descriptors;
+ const char *ident;
+ struct kvm_stats_desc *kvm_stats_desc;
+ struct kvm_stats_header *kvm_stats_header;
+ size_t size_desc;
+ ssize_t ret;
+
+ ident = StatsTarget_str(target);
+ QTAILQ_FOREACH(descriptors, &stats_descriptors, next) {
+ if (g_str_equal(descriptors->ident, ident)) {
+ return descriptors;
+ }
+ }
+
+ descriptors = g_new0(StatsDescriptors, 1);
+
+ /* Read stats header */
+ kvm_stats_header = g_malloc(sizeof(*kvm_stats_header));
+ ret = read(stats_fd, kvm_stats_header, sizeof(*kvm_stats_header));
+ if (ret != sizeof(*kvm_stats_header)) {
+ error_setg(errp, "KVM stats: failed to read stats header: "
+ "expected %zu actual %zu",
+ sizeof(*kvm_stats_header), ret);
+ return NULL;
+ }
+ size_desc = sizeof(*kvm_stats_desc) + kvm_stats_header->name_size;
+
+ /* Read stats descriptors */
+ kvm_stats_desc = g_malloc0_n(kvm_stats_header->num_desc, size_desc);
+ ret = pread(stats_fd, kvm_stats_desc,
+ size_desc * kvm_stats_header->num_desc,
+ kvm_stats_header->desc_offset);
+
+ if (ret != size_desc * kvm_stats_header->num_desc) {
+ error_setg(errp, "KVM stats: failed to read stats descriptors: "
+ "expected %zu actual %zu",
+ size_desc * kvm_stats_header->num_desc, ret);
+ g_free(descriptors);
+ g_free(kvm_stats_desc);
+ return NULL;
+ }
+ descriptors->kvm_stats_header = kvm_stats_header;
+ descriptors->kvm_stats_desc = kvm_stats_desc;
+ descriptors->ident = ident;
+ QTAILQ_INSERT_TAIL(&stats_descriptors, descriptors, next);
+ return descriptors;
+}
+
+static void query_stats(StatsResultList **result, StatsTarget target,
+ int stats_fd, Error **errp)
+{
+ struct kvm_stats_desc *kvm_stats_desc;
+ struct kvm_stats_header *kvm_stats_header;
+ StatsDescriptors *descriptors;
+ g_autofree uint64_t *stats_data = NULL;
+ struct kvm_stats_desc *pdesc;
+ StatsList *stats_list = NULL;
+ size_t size_desc, size_data = 0;
+ ssize_t ret;
+ int i;
+
+ descriptors = find_stats_descriptors(target, stats_fd, errp);
+ if (!descriptors) {
+ return;
+ }
+
+ kvm_stats_header = descriptors->kvm_stats_header;
+ kvm_stats_desc = descriptors->kvm_stats_desc;
+ size_desc = sizeof(*kvm_stats_desc) + kvm_stats_header->name_size;
+
+ /* Tally the total data size; read schema data */
+ for (i = 0; i < kvm_stats_header->num_desc; ++i) {
+ pdesc = (void *)kvm_stats_desc + i * size_desc;
+ size_data += pdesc->size * sizeof(*stats_data);
+ }
+
+ stats_data = g_malloc0(size_data);
+ ret = pread(stats_fd, stats_data, size_data, kvm_stats_header->data_offset);
+
+ if (ret != size_data) {
+ error_setg(errp, "KVM stats: failed to read data: "
+ "expected %zu actual %zu", size_data, ret);
+ return;
+ }
+
+ for (i = 0; i < kvm_stats_header->num_desc; ++i) {
+ uint64_t *stats;
+ pdesc = (void *)kvm_stats_desc + i * size_desc;
+
+ /* Add entry to the list */
+ stats = (void *)stats_data + pdesc->offset;
+ stats_list = add_kvmstat_entry(pdesc, stats, stats_list, errp);
+ }
+
+ if (!stats_list) {
+ return;
+ }
+
+ switch (target) {
+ case STATS_TARGET_VM:
+ add_stats_entry(result, STATS_PROVIDER_KVM, NULL, stats_list);
+ break;
+ case STATS_TARGET_VCPU:
+ add_stats_entry(result, STATS_PROVIDER_KVM,
+ current_cpu->parent_obj.canonical_path,
+ stats_list);
+ break;
+ default:
+ break;
+ }
+}
+
+static void query_stats_schema(StatsSchemaList **result, StatsTarget target,
+ int stats_fd, Error **errp)
+{
+ struct kvm_stats_desc *kvm_stats_desc;
+ struct kvm_stats_header *kvm_stats_header;
+ StatsDescriptors *descriptors;
+ struct kvm_stats_desc *pdesc;
+ StatsSchemaValueList *stats_list = NULL;
+ size_t size_desc;
+ int i;
+
+ descriptors = find_stats_descriptors(target, stats_fd, errp);
+ if (!descriptors) {
+ return;
+ }
+
+ kvm_stats_header = descriptors->kvm_stats_header;
+ kvm_stats_desc = descriptors->kvm_stats_desc;
+ size_desc = sizeof(*kvm_stats_desc) + kvm_stats_header->name_size;
+
+ /* Tally the total data size; read schema data */
+ for (i = 0; i < kvm_stats_header->num_desc; ++i) {
+ pdesc = (void *)kvm_stats_desc + i * size_desc;
+ stats_list = add_kvmschema_entry(pdesc, stats_list, errp);
+ }
+
+ add_stats_schema(result, STATS_PROVIDER_KVM, target, stats_list);
+}
+
+static void query_stats_vcpu(CPUState *cpu, run_on_cpu_data data)
+{
+ StatsArgs *kvm_stats_args = (StatsArgs *) data.host_ptr;
+ int stats_fd = kvm_vcpu_ioctl(cpu, KVM_GET_STATS_FD, NULL);
+ Error *local_err = NULL;
+
+ if (stats_fd == -1) {
+ error_setg_errno(&local_err, errno, "KVM stats: ioctl failed");
+ error_propagate(kvm_stats_args->errp, local_err);
+ return;
+ }
+ query_stats(kvm_stats_args->result.stats, STATS_TARGET_VCPU, stats_fd,
+ kvm_stats_args->errp);
+ close(stats_fd);
+}
+
+static void query_stats_schema_vcpu(CPUState *cpu, run_on_cpu_data data)
+{
+ StatsArgs *kvm_stats_args = (StatsArgs *) data.host_ptr;
+ int stats_fd = kvm_vcpu_ioctl(cpu, KVM_GET_STATS_FD, NULL);
+ Error *local_err = NULL;
+
+ if (stats_fd == -1) {
+ error_setg_errno(&local_err, errno, "KVM stats: ioctl failed");
+ error_propagate(kvm_stats_args->errp, local_err);
+ return;
+ }
+ query_stats_schema(kvm_stats_args->result.schema, STATS_TARGET_VCPU, stats_fd,
+ kvm_stats_args->errp);
+ close(stats_fd);
+}
+
+static void query_stats_cb(StatsResultList **result, StatsTarget target, Error **errp)
+{
+ KVMState *s = kvm_state;
+ CPUState *cpu;
+ int stats_fd;
+
+ switch (target) {
+ case STATS_TARGET_VM:
+ {
+ stats_fd = kvm_vm_ioctl(s, KVM_GET_STATS_FD, NULL);
+ if (stats_fd == -1) {
+ error_setg_errno(errp, errno, "KVM stats: ioctl failed");
+ return;
+ }
+ query_stats(result, target, stats_fd, errp);
+ close(stats_fd);
+ break;
+ }
+ case STATS_TARGET_VCPU:
+ {
+ StatsArgs stats_args;
+ stats_args.result.stats = result;
+ stats_args.errp = errp;
+ CPU_FOREACH(cpu) {
+ run_on_cpu(cpu, query_stats_vcpu, RUN_ON_CPU_HOST_PTR(&stats_args));
+ }
+ break;
+ }
+ default:
+ break;
+ }
+}
+
+void query_stats_schemas_cb(StatsSchemaList **result, Error **errp)
+{
+ StatsArgs stats_args;
+ KVMState *s = kvm_state;
+ int stats_fd;
+
+ stats_fd = kvm_vm_ioctl(s, KVM_GET_STATS_FD, NULL);
+ if (stats_fd == -1) {
+ error_setg_errno(errp, errno, "KVM stats: ioctl failed");
+ return;
+ }
+ query_stats_schema(result, STATS_TARGET_VM, stats_fd, errp);
+ close(stats_fd);
+
+ stats_args.result.schema = result;
+ stats_args.errp = errp;
+ run_on_cpu(first_cpu, query_stats_schema_vcpu, RUN_ON_CPU_HOST_PTR(&stats_args));
+}