diff options
-rw-r--r-- | MAINTAINERS | 1 | ||||
-rw-r--r-- | hw/intc/loongarch_extioi.c | 4 | ||||
-rw-r--r-- | hw/intc/loongarch_ipi.c | 86 | ||||
-rw-r--r-- | hw/intc/trace-events | 1 | ||||
-rw-r--r-- | hw/loongarch/virt.c | 25 | ||||
-rw-r--r-- | include/exec/target_page.h | 1 | ||||
-rw-r--r-- | include/hw/intc/loongarch_extioi.h | 10 | ||||
-rw-r--r-- | include/hw/intc/loongarch_ipi.h | 10 | ||||
-rw-r--r-- | include/hw/loongarch/virt.h | 3 | ||||
-rw-r--r-- | migration/block.c | 4 | ||||
-rw-r--r-- | migration/dirtyrate.c | 64 | ||||
-rw-r--r-- | migration/meson.build | 4 | ||||
-rw-r--r-- | migration/migration.c | 14 | ||||
-rw-r--r-- | migration/options.c | 4 | ||||
-rw-r--r-- | migration/qemu-file.c | 20 | ||||
-rw-r--r-- | migration/qemu-file.h | 16 | ||||
-rw-r--r-- | migration/savevm.c | 6 | ||||
-rw-r--r-- | migration/trace-events | 4 | ||||
-rw-r--r-- | migration/vmstate.c | 5 | ||||
-rw-r--r-- | softmmu/dirtylimit.c | 11 | ||||
-rw-r--r-- | softmmu/physmem.c | 11 | ||||
-rw-r--r-- | tests/avocado/machine_loongarch.py | 58 |
22 files changed, 233 insertions, 129 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index ff2aa53bb9..50585117a0 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -245,6 +245,7 @@ M: Xiaojuan Yang <yangxiaojuan@loongson.cn> S: Maintained F: target/loongarch/ F: tests/tcg/loongarch64/ +F: tests/avocado/machine_loongarch.py M68K TCG CPUs M: Laurent Vivier <laurent@vivier.eu> diff --git a/hw/intc/loongarch_extioi.c b/hw/intc/loongarch_extioi.c index 4b8ec3f28a..0e7a3e32f3 100644 --- a/hw/intc/loongarch_extioi.c +++ b/hw/intc/loongarch_extioi.c @@ -254,7 +254,7 @@ static const VMStateDescription vmstate_loongarch_extioi = { .minimum_version_id = 1, .fields = (VMStateField[]) { VMSTATE_UINT32_ARRAY(bounce, LoongArchExtIOI, EXTIOI_IRQS_GROUP_COUNT), - VMSTATE_UINT32_2DARRAY(coreisr, LoongArchExtIOI, LOONGARCH_MAX_VCPUS, + VMSTATE_UINT32_2DARRAY(coreisr, LoongArchExtIOI, EXTIOI_CPUS, EXTIOI_IRQS_GROUP_COUNT), VMSTATE_UINT32_ARRAY(nodetype, LoongArchExtIOI, EXTIOI_IRQS_NODETYPE_COUNT / 2), @@ -281,7 +281,7 @@ static void loongarch_extioi_instance_init(Object *obj) qdev_init_gpio_in(DEVICE(obj), extioi_setirq, EXTIOI_IRQS); - for (cpu = 0; cpu < LOONGARCH_MAX_VCPUS; cpu++) { + for (cpu = 0; cpu < EXTIOI_CPUS; cpu++) { memory_region_init_io(&s->extioi_iocsr_mem[cpu], OBJECT(s), &extioi_ops, s, "extioi_iocsr", 0x900); sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->extioi_iocsr_mem[cpu]); diff --git a/hw/intc/loongarch_ipi.c b/hw/intc/loongarch_ipi.c index bdba0f8107..d6ab91721e 100644 --- a/hw/intc/loongarch_ipi.c +++ b/hw/intc/loongarch_ipi.c @@ -77,31 +77,42 @@ static void send_ipi_data(CPULoongArchState *env, uint64_t val, hwaddr addr) static void ipi_send(uint64_t val) { - int cpuid, data; + uint32_t cpuid; + uint8_t vector; CPULoongArchState *env; CPUState *cs; LoongArchCPU *cpu; - cpuid = (val >> 16) & 0x3ff; + cpuid = extract32(val, 16, 10); + if (cpuid >= LOONGARCH_MAX_CPUS) { + trace_loongarch_ipi_unsupported_cpuid("IOCSR_IPI_SEND", cpuid); + return; + } + /* IPI status vector */ - data = 1 << (val & 0x1f); + vector = extract8(val, 0, 5); + cs = qemu_get_cpu(cpuid); cpu = LOONGARCH_CPU(cs); env = &cpu->env; address_space_stl(&env->address_space_iocsr, 0x1008, - data, MEMTXATTRS_UNSPECIFIED, NULL); - + BIT(vector), MEMTXATTRS_UNSPECIFIED, NULL); } static void mail_send(uint64_t val) { - int cpuid; + uint32_t cpuid; hwaddr addr; CPULoongArchState *env; CPUState *cs; LoongArchCPU *cpu; - cpuid = (val >> 16) & 0x3ff; + cpuid = extract32(val, 16, 10); + if (cpuid >= LOONGARCH_MAX_CPUS) { + trace_loongarch_ipi_unsupported_cpuid("IOCSR_MAIL_SEND", cpuid); + return; + } + addr = 0x1020 + (val & 0x1c); cs = qemu_get_cpu(cpuid); cpu = LOONGARCH_CPU(cs); @@ -111,14 +122,21 @@ static void mail_send(uint64_t val) static void any_send(uint64_t val) { - int cpuid; + uint32_t cpuid; hwaddr addr; CPULoongArchState *env; + CPUState *cs; + LoongArchCPU *cpu; + + cpuid = extract32(val, 16, 10); + if (cpuid >= LOONGARCH_MAX_CPUS) { + trace_loongarch_ipi_unsupported_cpuid("IOCSR_ANY_SEND", cpuid); + return; + } - cpuid = (val >> 16) & 0x3ff; addr = val & 0xffff; - CPUState *cs = qemu_get_cpu(cpuid); - LoongArchCPU *cpu = LOONGARCH_CPU(cs); + cs = qemu_get_cpu(cpuid); + cpu = LOONGARCH_CPU(cs); env = &cpu->env; send_ipi_data(env, val, addr); } @@ -201,51 +219,43 @@ static const MemoryRegionOps loongarch_ipi64_ops = { static void loongarch_ipi_init(Object *obj) { - int cpu; - LoongArchMachineState *lams; LoongArchIPI *s = LOONGARCH_IPI(obj); SysBusDevice *sbd = SYS_BUS_DEVICE(obj); - Object *machine = qdev_get_machine(); - ObjectClass *mc = object_get_class(machine); - /* 'lams' should be initialized */ - if (!strcmp(MACHINE_CLASS(mc)->name, "none")) { - return; - } - lams = LOONGARCH_MACHINE(machine); - for (cpu = 0; cpu < MAX_IPI_CORE_NUM; cpu++) { - memory_region_init_io(&s->ipi_iocsr_mem[cpu], obj, &loongarch_ipi_ops, - &lams->ipi_core[cpu], "loongarch_ipi_iocsr", 0x48); - sysbus_init_mmio(sbd, &s->ipi_iocsr_mem[cpu]); - - memory_region_init_io(&s->ipi64_iocsr_mem[cpu], obj, &loongarch_ipi64_ops, - &lams->ipi_core[cpu], "loongarch_ipi64_iocsr", 0x118); - sysbus_init_mmio(sbd, &s->ipi64_iocsr_mem[cpu]); - qdev_init_gpio_out(DEVICE(obj), &lams->ipi_core[cpu].irq, 1); - } + + memory_region_init_io(&s->ipi_iocsr_mem, obj, &loongarch_ipi_ops, + &s->ipi_core, "loongarch_ipi_iocsr", 0x48); + + /* loongarch_ipi_iocsr performs re-entrant IO through ipi_send */ + s->ipi_iocsr_mem.disable_reentrancy_guard = true; + + sysbus_init_mmio(sbd, &s->ipi_iocsr_mem); + + memory_region_init_io(&s->ipi64_iocsr_mem, obj, &loongarch_ipi64_ops, + &s->ipi_core, "loongarch_ipi64_iocsr", 0x118); + sysbus_init_mmio(sbd, &s->ipi64_iocsr_mem); + qdev_init_gpio_out(DEVICE(obj), &s->ipi_core.irq, 1); } static const VMStateDescription vmstate_ipi_core = { .name = "ipi-single", - .version_id = 0, - .minimum_version_id = 0, + .version_id = 1, + .minimum_version_id = 1, .fields = (VMStateField[]) { VMSTATE_UINT32(status, IPICore), VMSTATE_UINT32(en, IPICore), VMSTATE_UINT32(set, IPICore), VMSTATE_UINT32(clear, IPICore), - VMSTATE_UINT32_ARRAY(buf, IPICore, MAX_IPI_MBX_NUM * 2), + VMSTATE_UINT32_ARRAY(buf, IPICore, 2), VMSTATE_END_OF_LIST() } }; static const VMStateDescription vmstate_loongarch_ipi = { .name = TYPE_LOONGARCH_IPI, - .version_id = 0, - .minimum_version_id = 0, + .version_id = 1, + .minimum_version_id = 1, .fields = (VMStateField[]) { - VMSTATE_STRUCT_ARRAY(ipi_core, LoongArchMachineState, - MAX_IPI_CORE_NUM, 0, - vmstate_ipi_core, IPICore), + VMSTATE_STRUCT(ipi_core, LoongArchIPI, 0, vmstate_ipi_core, IPICore), VMSTATE_END_OF_LIST() } }; diff --git a/hw/intc/trace-events b/hw/intc/trace-events index 50cadfb996..5c6094c457 100644 --- a/hw/intc/trace-events +++ b/hw/intc/trace-events @@ -292,6 +292,7 @@ sh_intc_set(int id, int enable) "setting interrupt group %d to %d" # loongarch_ipi.c loongarch_ipi_read(unsigned size, uint64_t addr, uint64_t val) "size: %u addr: 0x%"PRIx64 "val: 0x%"PRIx64 loongarch_ipi_write(unsigned size, uint64_t addr, uint64_t val) "size: %u addr: 0x%"PRIx64 "val: 0x%"PRIx64 +loongarch_ipi_unsupported_cpuid(const char *s, uint32_t cpuid) "%s unsupported cpuid 0x%" PRIx32 # loongarch_pch_pic.c loongarch_pch_pic_irq_handler(int irq, int level) "irq %d level %d" diff --git a/hw/loongarch/virt.c b/hw/loongarch/virt.c index f4bf14c1c8..2b7588e32a 100644 --- a/hw/loongarch/virt.c +++ b/hw/loongarch/virt.c @@ -565,9 +565,6 @@ static void loongarch_irq_init(LoongArchMachineState *lams) CPUState *cpu_state; int cpu, pin, i, start, num; - ipi = qdev_new(TYPE_LOONGARCH_IPI); - sysbus_realize_and_unref(SYS_BUS_DEVICE(ipi), &error_fatal); - extioi = qdev_new(TYPE_LOONGARCH_EXTIOI); sysbus_realize_and_unref(SYS_BUS_DEVICE(extioi), &error_fatal); @@ -598,17 +595,25 @@ static void loongarch_irq_init(LoongArchMachineState *lams) lacpu = LOONGARCH_CPU(cpu_state); env = &(lacpu->env); + ipi = qdev_new(TYPE_LOONGARCH_IPI); + sysbus_realize_and_unref(SYS_BUS_DEVICE(ipi), &error_fatal); + /* connect ipi irq to cpu irq */ - qdev_connect_gpio_out(ipi, cpu, qdev_get_gpio_in(cpudev, IRQ_IPI)); + qdev_connect_gpio_out(ipi, 0, qdev_get_gpio_in(cpudev, IRQ_IPI)); /* IPI iocsr memory region */ memory_region_add_subregion(&env->system_iocsr, SMP_IPI_MAILBOX, sysbus_mmio_get_region(SYS_BUS_DEVICE(ipi), - cpu * 2)); + 0)); memory_region_add_subregion(&env->system_iocsr, MAIL_SEND_ADDR, sysbus_mmio_get_region(SYS_BUS_DEVICE(ipi), - cpu * 2 + 1)); - /* extioi iocsr memory region */ - memory_region_add_subregion(&env->system_iocsr, APIC_BASE, + 1)); + /* + * extioi iocsr memory region + * only one extioi is added on loongarch virt machine + * external device interrupt can only be routed to cpu 0-3 + */ + if (cpu < EXTIOI_CPUS) + memory_region_add_subregion(&env->system_iocsr, APIC_BASE, sysbus_mmio_get_region(SYS_BUS_DEVICE(extioi), cpu)); } @@ -617,7 +622,7 @@ static void loongarch_irq_init(LoongArchMachineState *lams) * connect ext irq to the cpu irq * cpu_pin[9:2] <= intc_pin[7:0] */ - for (cpu = 0; cpu < ms->smp.cpus; cpu++) { + for (cpu = 0; cpu < MIN(ms->smp.cpus, EXTIOI_CPUS); cpu++) { cpudev = DEVICE(qemu_get_cpu(cpu)); for (pin = 0; pin < LS3A_INTC_IP; pin++) { qdev_connect_gpio_out(extioi, (cpu * 8 + pin), @@ -1026,7 +1031,7 @@ static void loongarch_class_init(ObjectClass *oc, void *data) mc->default_ram_size = 1 * GiB; mc->default_cpu_type = LOONGARCH_CPU_TYPE_NAME("la464"); mc->default_ram_id = "loongarch.ram"; - mc->max_cpus = LOONGARCH_MAX_VCPUS; + mc->max_cpus = LOONGARCH_MAX_CPUS; mc->is_default = 1; mc->default_kernel_irqchip_split = false; mc->block_default_type = IF_VIRTIO; diff --git a/include/exec/target_page.h b/include/exec/target_page.h index 96726c36a4..bbf37aea17 100644 --- a/include/exec/target_page.h +++ b/include/exec/target_page.h @@ -18,4 +18,5 @@ size_t qemu_target_page_size(void); int qemu_target_page_bits(void); int qemu_target_page_bits_min(void); +size_t qemu_target_pages_to_MiB(size_t pages); #endif diff --git a/include/hw/intc/loongarch_extioi.h b/include/hw/intc/loongarch_extioi.h index 15b8c999f6..fbdef9a7b3 100644 --- a/include/hw/intc/loongarch_extioi.h +++ b/include/hw/intc/loongarch_extioi.h @@ -14,6 +14,8 @@ #define LS3A_INTC_IP 8 #define EXTIOI_IRQS (256) #define EXTIOI_IRQS_BITMAP_SIZE (256 / 8) +/* irq from EXTIOI is routed to no more than 4 cpus */ +#define EXTIOI_CPUS (4) /* map to ipnum per 32 irqs */ #define EXTIOI_IRQS_IPMAP_SIZE (256 / 32) #define EXTIOI_IRQS_COREMAP_SIZE 256 @@ -46,17 +48,17 @@ struct LoongArchExtIOI { uint32_t nodetype[EXTIOI_IRQS_NODETYPE_COUNT / 2]; uint32_t bounce[EXTIOI_IRQS_GROUP_COUNT]; uint32_t isr[EXTIOI_IRQS / 32]; - uint32_t coreisr[LOONGARCH_MAX_VCPUS][EXTIOI_IRQS_GROUP_COUNT]; + uint32_t coreisr[EXTIOI_CPUS][EXTIOI_IRQS_GROUP_COUNT]; uint32_t enable[EXTIOI_IRQS / 32]; uint32_t ipmap[EXTIOI_IRQS_IPMAP_SIZE / 4]; uint32_t coremap[EXTIOI_IRQS / 4]; uint32_t sw_pending[EXTIOI_IRQS / 32]; - DECLARE_BITMAP(sw_isr[LOONGARCH_MAX_VCPUS][LS3A_INTC_IP], EXTIOI_IRQS); + DECLARE_BITMAP(sw_isr[EXTIOI_CPUS][LS3A_INTC_IP], EXTIOI_IRQS); uint8_t sw_ipmap[EXTIOI_IRQS_IPMAP_SIZE]; uint8_t sw_coremap[EXTIOI_IRQS]; - qemu_irq parent_irq[LOONGARCH_MAX_VCPUS][LS3A_INTC_IP]; + qemu_irq parent_irq[EXTIOI_CPUS][LS3A_INTC_IP]; qemu_irq irq[EXTIOI_IRQS]; - MemoryRegion extioi_iocsr_mem[LOONGARCH_MAX_VCPUS]; + MemoryRegion extioi_iocsr_mem[EXTIOI_CPUS]; MemoryRegion extioi_system_mem; }; #endif /* LOONGARCH_EXTIOI_H */ diff --git a/include/hw/intc/loongarch_ipi.h b/include/hw/intc/loongarch_ipi.h index 0ee48fca55..664e050b92 100644 --- a/include/hw/intc/loongarch_ipi.h +++ b/include/hw/intc/loongarch_ipi.h @@ -28,9 +28,6 @@ #define MAIL_SEND_OFFSET 0 #define ANY_SEND_OFFSET (IOCSR_ANY_SEND - IOCSR_MAIL_SEND) -#define MAX_IPI_CORE_NUM 4 -#define MAX_IPI_MBX_NUM 4 - #define TYPE_LOONGARCH_IPI "loongarch_ipi" OBJECT_DECLARE_SIMPLE_TYPE(LoongArchIPI, LOONGARCH_IPI) @@ -40,14 +37,15 @@ typedef struct IPICore { uint32_t set; uint32_t clear; /* 64bit buf divide into 2 32bit buf */ - uint32_t buf[MAX_IPI_MBX_NUM * 2]; + uint32_t buf[2]; qemu_irq irq; } IPICore; struct LoongArchIPI { SysBusDevice parent_obj; - MemoryRegion ipi_iocsr_mem[MAX_IPI_CORE_NUM]; - MemoryRegion ipi64_iocsr_mem[MAX_IPI_CORE_NUM]; + MemoryRegion ipi_iocsr_mem; + MemoryRegion ipi64_iocsr_mem; + IPICore ipi_core; }; #endif diff --git a/include/hw/loongarch/virt.h b/include/hw/loongarch/virt.h index 7ae8a91229..f1659655c6 100644 --- a/include/hw/loongarch/virt.h +++ b/include/hw/loongarch/virt.h @@ -14,7 +14,7 @@ #include "hw/intc/loongarch_ipi.h" #include "hw/block/flash.h" -#define LOONGARCH_MAX_VCPUS 4 +#define LOONGARCH_MAX_CPUS 256 #define VIRT_ISA_IO_BASE 0x18000000UL #define VIRT_ISA_IO_SIZE 0x0004000 @@ -36,7 +36,6 @@ struct LoongArchMachineState { /*< private >*/ MachineState parent_obj; - IPICore ipi_core[MAX_IPI_CORE_NUM]; MemoryRegion lowmem; MemoryRegion highmem; MemoryRegion isa_io; diff --git a/migration/block.c b/migration/block.c index a37678ce95..12617b4152 100644 --- a/migration/block.c +++ b/migration/block.c @@ -747,7 +747,7 @@ static int block_save_setup(QEMUFile *f, void *opaque) static int block_save_iterate(QEMUFile *f, void *opaque) { int ret; - uint64_t last_bytes = qemu_file_total_transferred(f); + uint64_t last_bytes = qemu_file_transferred(f); trace_migration_block_save("iterate", block_mig_state.submitted, block_mig_state.transferred); @@ -799,7 +799,7 @@ static int block_save_iterate(QEMUFile *f, void *opaque) } qemu_put_be64(f, BLK_MIG_FLAG_EOS); - uint64_t delta_bytes = qemu_file_total_transferred(f) - last_bytes; + uint64_t delta_bytes = qemu_file_transferred(f) - last_bytes; return (delta_bytes > 0); } diff --git a/migration/dirtyrate.c b/migration/dirtyrate.c index 388337a332..c06f12c39d 100644 --- a/migration/dirtyrate.c +++ b/migration/dirtyrate.c @@ -14,9 +14,8 @@ #include "qemu/error-report.h" #include <zlib.h> #include "qapi/error.h" -#include "cpu.h" #include "exec/ramblock.h" -#include "exec/ram_addr.h" +#include "exec/target_page.h" #include "qemu/rcu_queue.h" #include "qemu/main-loop.h" #include "qapi/qapi-commands-migration.h" @@ -29,6 +28,7 @@ #include "sysemu/kvm.h" #include "sysemu/runstate.h" #include "exec/memory.h" +#include "qemu/xxhash.h" /* * total_dirty_pages is procted by BQL and is used @@ -74,13 +74,11 @@ static inline void record_dirtypages(DirtyPageRecord *dirty_pages, static int64_t do_calculate_dirtyrate(DirtyPageRecord dirty_pages, int64_t calc_time_ms) { - uint64_t memory_size_MB; uint64_t increased_dirty_pages = dirty_pages.end_pages - dirty_pages.start_pages; + uint64_t memory_size_MiB = qemu_target_pages_to_MiB(increased_dirty_pages); - memory_size_MB = (increased_dirty_pages * TARGET_PAGE_SIZE) >> 20; - - return memory_size_MB * 1000 / calc_time_ms; + return memory_size_MiB * 1000 / calc_time_ms; } void global_dirty_log_change(unsigned int flag, bool start) @@ -291,8 +289,8 @@ static void update_dirtyrate_stat(struct RamblockDirtyInfo *info) DirtyStat.page_sampling.total_dirty_samples += info->sample_dirty_count; DirtyStat.page_sampling.total_sample_count += info->sample_pages_count; /* size of total pages in MB */ - DirtyStat.page_sampling.total_block_mem_MB += (info->ramblock_pages * - TARGET_PAGE_SIZE) >> 20; + DirtyStat.page_sampling.total_block_mem_MB += + qemu_target_pages_to_MiB(info->ramblock_pages); } static void update_dirtyrate(uint64_t msec) @@ -309,19 +307,47 @@ static void update_dirtyrate(uint64_t msec) } /* + * Compute hash of a single page of size TARGET_PAGE_SIZE. + */ +static uint32_t compute_page_hash(void *ptr) +{ + size_t page_size = qemu_target_page_size(); + uint32_t i; + uint64_t v1, v2, v3, v4; + uint64_t res; + const uint64_t *p = ptr; + + v1 = QEMU_XXHASH_SEED + XXH_PRIME64_1 + XXH_PRIME64_2; + v2 = QEMU_XXHASH_SEED + XXH_PRIME64_2; + v3 = QEMU_XXHASH_SEED + 0; + v4 = QEMU_XXHASH_SEED - XXH_PRIME64_1; + for (i = 0; i < page_size / 8; i += 4) { + v1 = XXH64_round(v1, p[i + 0]); + v2 = XXH64_round(v2, p[i + 1]); + v3 = XXH64_round(v3, p[i + 2]); + v4 = XXH64_round(v4, p[i + 3]); + } + res = XXH64_mergerounds(v1, v2, v3, v4); + res += page_size; + res = XXH64_avalanche(res); + return (uint32_t)(res & UINT32_MAX); +} + + +/* * get hash result for the sampled memory with length of TARGET_PAGE_SIZE * in ramblock, which starts from ramblock base address. */ static uint32_t get_ramblock_vfn_hash(struct RamblockDirtyInfo *info, uint64_t vfn) { - uint32_t crc; + uint32_t hash; - crc = crc32(0, (info->ramblock_addr + - vfn * TARGET_PAGE_SIZE), TARGET_PAGE_SIZE); + hash = compute_page_hash(info->ramblock_addr + + vfn * qemu_target_page_size()); - trace_get_ramblock_vfn_hash(info->idstr, vfn, crc); - return crc; + trace_get_ramblock_vfn_hash(info->idstr, vfn, hash); + return hash; } static bool save_ramblock_hash(struct RamblockDirtyInfo *info) @@ -373,7 +399,7 @@ static void get_ramblock_dirty_info(RAMBlock *block, sample_pages_per_gigabytes) >> 30; /* Right shift TARGET_PAGE_BITS to calc page count */ info->ramblock_pages = qemu_ram_get_used_length(block) >> - TARGET_PAGE_BITS; + qemu_target_page_bits(); info->ramblock_addr = qemu_ram_get_host_addr(block); strcpy(info->idstr, qemu_ram_get_idstr(block)); } @@ -454,13 +480,13 @@ out: static void calc_page_dirty_rate(struct RamblockDirtyInfo *info) { - uint32_t crc; + uint32_t hash; int i; for (i = 0; i < info->sample_pages_count; i++) { - crc = get_ramblock_vfn_hash(info, info->sample_page_vfn[i]); - if (crc != info->hash_result[i]) { - trace_calc_page_dirty_rate(info->idstr, crc, info->hash_result[i]); + hash = get_ramblock_vfn_hash(info, info->sample_page_vfn[i]); + if (hash != info->hash_result[i]) { + trace_calc_page_dirty_rate(info->idstr, hash, info->hash_result[i]); info->sample_dirty_count++; } } @@ -484,7 +510,7 @@ find_block_matched(RAMBlock *block, int count, if (infos[i].ramblock_addr != qemu_ram_get_host_addr(block) || infos[i].ramblock_pages != - (qemu_ram_get_used_length(block) >> TARGET_PAGE_BITS)) { + (qemu_ram_get_used_length(block) >> qemu_target_page_bits())) { trace_find_page_matched(block->idstr); return NULL; } diff --git a/migration/meson.build b/migration/meson.build index eb41b77db9..dc8b1daef5 100644 --- a/migration/meson.build +++ b/migration/meson.build @@ -13,6 +13,7 @@ softmmu_ss.add(files( 'block-dirty-bitmap.c', 'channel.c', 'channel-block.c', + 'dirtyrate.c', 'exec.c', 'fd.c', 'global_state.c', @@ -42,6 +43,5 @@ endif softmmu_ss.add(when: zstd, if_true: files('multifd-zstd.c')) specific_ss.add(when: 'CONFIG_SOFTMMU', - if_true: files('dirtyrate.c', - 'ram.c', + if_true: files('ram.c', 'target.c')) diff --git a/migration/migration.c b/migration/migration.c index 439e8651df..00d8ba8da0 100644 --- a/migration/migration.c +++ b/migration/migration.c @@ -2140,12 +2140,7 @@ static int postcopy_start(MigrationState *ms) * will notice we're in POSTCOPY_ACTIVE and not actually * wrap their state up here */ - /* 0 max-postcopy-bandwidth means unlimited */ - if (!bandwidth) { - qemu_file_set_rate_limit(ms->to_dst_file, INT64_MAX); - } else { - qemu_file_set_rate_limit(ms->to_dst_file, bandwidth / XFER_LIMIT_RATIO); - } + qemu_file_set_rate_limit(ms->to_dst_file, bandwidth); if (migrate_postcopy_ram()) { /* Ping just for debugging, helps line traces up */ qemu_savevm_send_ping(ms->to_dst_file, 2); @@ -2653,7 +2648,7 @@ static MigThrError migration_detect_error(MigrationState *s) /* How many bytes have we transferred since the beginning of the migration */ static uint64_t migration_total_bytes(MigrationState *s) { - return qemu_file_total_transferred(s->to_dst_file) + + return qemu_file_transferred(s->to_dst_file) + stat64_get(&mig_stats.multifd_bytes); } @@ -3236,11 +3231,10 @@ void migrate_fd_connect(MigrationState *s, Error *error_in) if (resume) { /* This is a resumed migration */ - rate_limit = migrate_max_postcopy_bandwidth() / - XFER_LIMIT_RATIO; + rate_limit = migrate_max_postcopy_bandwidth(); } else { /* This is a fresh new migration */ - rate_limit = migrate_max_bandwidth() / XFER_LIMIT_RATIO; + rate_limit = migrate_max_bandwidth(); /* Notify before starting migration thread */ notifier_list_notify(&migration_state_notifiers, s); diff --git a/migration/options.c b/migration/options.c index 7ed88b7b32..c2a278ee2d 100644 --- a/migration/options.c +++ b/migration/options.c @@ -1243,7 +1243,7 @@ static void migrate_params_apply(MigrateSetParameters *params, Error **errp) s->parameters.max_bandwidth = params->max_bandwidth; if (s->to_dst_file && !migration_in_postcopy()) { qemu_file_set_rate_limit(s->to_dst_file, - s->parameters.max_bandwidth / XFER_LIMIT_RATIO); + s->parameters.max_bandwidth); } } @@ -1273,7 +1273,7 @@ static void migrate_params_apply(MigrateSetParameters *params, Error **errp) s->parameters.max_postcopy_bandwidth = params->max_postcopy_bandwidth; if (s->to_dst_file && migration_in_postcopy()) { qemu_file_set_rate_limit(s->to_dst_file, - s->parameters.max_postcopy_bandwidth / XFER_LIMIT_RATIO); + s->parameters.max_postcopy_bandwidth); } } if (params->has_max_cpu_throttle) { diff --git a/migration/qemu-file.c b/migration/qemu-file.c index 61fb580342..597054759d 100644 --- a/migration/qemu-file.c +++ b/migration/qemu-file.c @@ -29,6 +29,7 @@ #include "migration.h" #include "qemu-file.h" #include "trace.h" +#include "options.h" #include "qapi/error.h" #define IO_BUF_SIZE 32768 @@ -43,12 +44,12 @@ struct QEMUFile { * Maximum amount of data in bytes to transfer during one * rate limiting time window */ - int64_t rate_limit_max; + uint64_t rate_limit_max; /* * Total amount of data in bytes queued for transfer * during this rate limiting time window */ - int64_t rate_limit_used; + uint64_t rate_limit_used; /* The sum of bytes transferred on the wire */ uint64_t total_transferred; @@ -708,7 +709,7 @@ int coroutine_mixed_fn qemu_get_byte(QEMUFile *f) return result; } -uint64_t qemu_file_total_transferred_fast(QEMUFile *f) +uint64_t qemu_file_transferred_fast(QEMUFile *f) { uint64_t ret = f->total_transferred; int i; @@ -720,7 +721,7 @@ uint64_t qemu_file_total_transferred_fast(QEMUFile *f) return ret; } -uint64_t qemu_file_total_transferred(QEMUFile *f) +uint64_t qemu_file_transferred(QEMUFile *f) { qemu_fflush(f); return f->total_transferred; @@ -737,14 +738,17 @@ int qemu_file_rate_limit(QEMUFile *f) return 0; } -int64_t qemu_file_get_rate_limit(QEMUFile *f) +uint64_t qemu_file_get_rate_limit(QEMUFile *f) { return f->rate_limit_max; } -void qemu_file_set_rate_limit(QEMUFile *f, int64_t limit) +void qemu_file_set_rate_limit(QEMUFile *f, uint64_t limit) { - f->rate_limit_max = limit; + /* + * 'limit' is per second. But we check it each 100 miliseconds. + */ + f->rate_limit_max = limit / XFER_LIMIT_RATIO; } void qemu_file_reset_rate_limit(QEMUFile *f) @@ -752,7 +756,7 @@ void qemu_file_reset_rate_limit(QEMUFile *f) f->rate_limit_used = 0; } -void qemu_file_acct_rate_limit(QEMUFile *f, int64_t len) +void qemu_file_acct_rate_limit(QEMUFile *f, uint64_t len) { f->rate_limit_used += len; } diff --git a/migration/qemu-file.h b/migration/qemu-file.h index 4ee58a87dd..bcc39081f2 100644 --- a/migration/qemu-file.h +++ b/migration/qemu-file.h @@ -68,7 +68,7 @@ void qemu_file_set_hooks(QEMUFile *f, const QEMUFileHooks *hooks); int qemu_fclose(QEMUFile *f); /* - * qemu_file_total_transferred: + * qemu_file_transferred: * * Report the total number of bytes transferred with * this file. @@ -83,19 +83,19 @@ int qemu_fclose(QEMUFile *f); * * Returns: the total bytes transferred */ -uint64_t qemu_file_total_transferred(QEMUFile *f); +uint64_t qemu_file_transferred(QEMUFile *f); /* - * qemu_file_total_transferred_fast: + * qemu_file_transferred_fast: * - * As qemu_file_total_transferred except for writable + * As qemu_file_transferred except for writable * files, where no flush is performed and the reported * amount will include the size of any queued buffers, * on top of the amount actually transferred. * * Returns: the total bytes transferred and queued */ -uint64_t qemu_file_total_transferred_fast(QEMUFile *f); +uint64_t qemu_file_transferred_fast(QEMUFile *f); /* * put_buffer without copying the buffer. @@ -138,9 +138,9 @@ void qemu_file_reset_rate_limit(QEMUFile *f); * out of band from the main file object I/O methods, and * need to be applied to the rate limiting calcuations */ -void qemu_file_acct_rate_limit(QEMUFile *f, int64_t len); -void qemu_file_set_rate_limit(QEMUFile *f, int64_t new_rate); -int64_t qemu_file_get_rate_limit(QEMUFile *f); +void qemu_file_acct_rate_limit(QEMUFile *f, uint64_t len); +void qemu_file_set_rate_limit(QEMUFile *f, uint64_t new_rate); +uint64_t qemu_file_get_rate_limit(QEMUFile *f); int qemu_file_get_error_obj(QEMUFile *f, Error **errp); int qemu_file_get_error_obj_any(QEMUFile *f1, QEMUFile *f2, Error **errp); void qemu_file_set_error_obj(QEMUFile *f, int ret, Error *err); diff --git a/migration/savevm.c b/migration/savevm.c index 032044b1d5..e33788343a 100644 --- a/migration/savevm.c +++ b/migration/savevm.c @@ -927,9 +927,9 @@ static int vmstate_load(QEMUFile *f, SaveStateEntry *se) static void vmstate_save_old_style(QEMUFile *f, SaveStateEntry *se, JSONWriter *vmdesc) { - uint64_t old_offset = qemu_file_total_transferred_fast(f); + uint64_t old_offset = qemu_file_transferred_fast(f); se->ops->save_state(f, se->opaque); - uint64_t size = qemu_file_total_transferred_fast(f) - old_offset; + uint64_t size = qemu_file_transferred_fast(f) - old_offset; if (vmdesc) { json_writer_int64(vmdesc, "size", size); @@ -2956,7 +2956,7 @@ bool save_snapshot(const char *name, bool overwrite, const char *vmstate, goto the_end; } ret = qemu_savevm_state(f, errp); - vm_state_size = qemu_file_total_transferred(f); + vm_state_size = qemu_file_transferred(f); ret2 = qemu_fclose(f); if (ret < 0) { goto the_end; diff --git a/migration/trace-events b/migration/trace-events index 92161eeac5..f39818c329 100644 --- a/migration/trace-events +++ b/migration/trace-events @@ -342,8 +342,8 @@ dirty_bitmap_load_success(void) "" # dirtyrate.c dirtyrate_set_state(const char *new_state) "new state %s" query_dirty_rate_info(const char *new_state) "current state %s" -get_ramblock_vfn_hash(const char *idstr, uint64_t vfn, uint32_t crc) "ramblock name: %s, vfn: %"PRIu64 ", crc: %" PRIu32 -calc_page_dirty_rate(const char *idstr, uint32_t new_crc, uint32_t old_crc) "ramblock name: %s, new crc: %" PRIu32 ", old crc: %" PRIu32 +get_ramblock_vfn_hash(const char *idstr, uint64_t vfn, uint32_t hash) "ramblock name: %s, vfn: %"PRIu64 ", hash: %" PRIu32 +calc_page_dirty_rate(const char *idstr, uint32_t new_hash, uint32_t old_hash) "ramblock name: %s, new hash: %" PRIu32 ", old hash: %" PRIu32 skip_sample_ramblock(const char *idstr, uint64_t ramblock_size) "ramblock name: %s, ramblock size: %" PRIu64 find_page_matched(const char *idstr) "ramblock %s addr or size changed" dirtyrate_calculate(int64_t dirtyrate) "dirty rate: %" PRIi64 " MB/s" diff --git a/migration/vmstate.c b/migration/vmstate.c index 351f56104e..af01d54b6f 100644 --- a/migration/vmstate.c +++ b/migration/vmstate.c @@ -361,7 +361,7 @@ int vmstate_save_state_v(QEMUFile *f, const VMStateDescription *vmsd, void *curr_elem = first_elem + size * i; vmsd_desc_field_start(vmsd, vmdesc_loop, field, i, n_elems); - old_offset = qemu_file_total_transferred_fast(f); + old_offset = qemu_file_transferred_fast(f); if (field->flags & VMS_ARRAY_OF_POINTER) { assert(curr_elem); curr_elem = *(void **)curr_elem; @@ -391,8 +391,7 @@ int vmstate_save_state_v(QEMUFile *f, const VMStateDescription *vmsd, return ret; } - written_bytes = qemu_file_total_transferred_fast(f) - - old_offset; + written_bytes = qemu_file_transferred_fast(f) - old_offset; vmsd_desc_field_end(vmsd, vmdesc_loop, field, written_bytes, i); /* Compressed arrays only care about the first element */ diff --git a/softmmu/dirtylimit.c b/softmmu/dirtylimit.c index 71bf6dc7a4..015a9038d1 100644 --- a/softmmu/dirtylimit.c +++ b/softmmu/dirtylimit.c @@ -235,20 +235,15 @@ bool dirtylimit_vcpu_index_valid(int cpu_index) static uint64_t dirtylimit_dirty_ring_full_time(uint64_t dirtyrate) { static uint64_t max_dirtyrate; - unsigned target_page_bits = qemu_target_page_bits(); - uint64_t dirty_ring_size_MB; + uint64_t dirty_ring_size_MiB; - /* So far, the largest (non-huge) page size is 64k, i.e. 16 bits. */ - assert(target_page_bits < 20); - - /* Convert ring size (pages) to MiB (2**20). */ - dirty_ring_size_MB = kvm_dirty_ring_size() >> (20 - target_page_bits); + dirty_ring_size_MiB = qemu_target_pages_to_MiB(kvm_dirty_ring_size()); if (max_dirtyrate < dirtyrate) { max_dirtyrate = dirtyrate; } - return dirty_ring_size_MB * 1000000 / max_dirtyrate; + return dirty_ring_size_MiB * 1000000 / max_dirtyrate; } static inline bool dirtylimit_done(uint64_t quota, diff --git a/softmmu/physmem.c b/softmmu/physmem.c index 0e0182d9f2..efaed36773 100644 --- a/softmmu/physmem.c +++ b/softmmu/physmem.c @@ -3357,6 +3357,17 @@ int qemu_target_page_bits_min(void) return TARGET_PAGE_BITS_MIN; } +/* Convert target pages to MiB (2**20). */ +size_t qemu_target_pages_to_MiB(size_t pages) +{ + int page_bits = TARGET_PAGE_BITS; + + /* So far, the largest (non-huge) page size is 64k, i.e. 16 bits. */ + g_assert(page_bits < 20); + + return pages >> (20 - page_bits); +} + bool cpu_physical_memory_is_io(hwaddr phys_addr) { MemoryRegion*mr; diff --git a/tests/avocado/machine_loongarch.py b/tests/avocado/machine_loongarch.py new file mode 100644 index 0000000000..7d8a3c1fa5 --- /dev/null +++ b/tests/avocado/machine_loongarch.py @@ -0,0 +1,58 @@ +# SPDX-License-Identifier: GPL-2.0-or-later +# +# LoongArch virt test. +# +# Copyright (c) 2023 Loongson Technology Corporation Limited +# + +from avocado_qemu import QemuSystemTest +from avocado_qemu import exec_command_and_wait_for_pattern +from avocado_qemu import wait_for_console_pattern + +class LoongArchMachine(QemuSystemTest): + KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 ' + + timeout = 120 + + def wait_for_console_pattern(self, success_message, vm=None): + wait_for_console_pattern(self, success_message, + failure_message='Kernel panic - not syncing', + vm=vm) + + def test_loongarch64_devices(self): + + """ + :avocado: tags=arch:loongarch64 + :avocado: tags=machine:virt + """ + + kernel_url = ('https://github.com/yangxiaojuan-loongson/qemu-binary/' + 'releases/download/binary-files/vmlinuz.efi') + kernel_hash = '951b485b16e3788b6db03a3e1793c067009e31a2' + kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash) + + initrd_url = ('https://github.com/yangxiaojuan-loongson/qemu-binary/' + 'releases/download/binary-files/ramdisk') + initrd_hash = 'c67658d9b2a447ce7db2f73ba3d373c9b2b90ab2' + initrd_path = self.fetch_asset(initrd_url, asset_hash=initrd_hash) + + bios_url = ('https://github.com/yangxiaojuan-loongson/qemu-binary/' + 'releases/download/binary-files/QEMU_EFI.fd') + bios_hash = ('dfc1bfba4853cd763b9d392d0031827e8addbca8') + bios_path = self.fetch_asset(bios_url, asset_hash=bios_hash) + + self.vm.set_console() + kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + + 'root=/dev/ram rdinit=/sbin/init console=ttyS0,115200') + self.vm.add_args('-nographic', + '-smp', '4', + '-m', '1024', + '-cpu', 'la464', + '-kernel', kernel_path, + '-initrd', initrd_path, + '-bios', bios_path, + '-append', kernel_command_line) + self.vm.launch() + self.wait_for_console_pattern('Run /sbin/init as init process') + exec_command_and_wait_for_pattern(self, 'cat /proc/cpuinfo', + 'processor : 3') |