diff options
Diffstat (limited to 'include/exec')
-rw-r--r-- | include/exec/memory.h | 106 | ||||
-rw-r--r-- | include/exec/ram_addr.h | 15 |
2 files changed, 110 insertions, 11 deletions
diff --git a/include/exec/memory.h b/include/exec/memory.h index e39256ad03..99e0f54d86 100644 --- a/include/exec/memory.h +++ b/include/exec/memory.h @@ -55,6 +55,8 @@ typedef enum { IOMMU_RW = 3, } IOMMUAccessFlags; +#define IOMMU_ACCESS_FLAG(r, w) (((r) ? IOMMU_RO : 0) | ((w) ? IOMMU_WO : 0)) + struct IOMMUTLBEntry { AddressSpace *target_as; hwaddr iova; @@ -77,13 +79,30 @@ typedef enum { #define IOMMU_NOTIFIER_ALL (IOMMU_NOTIFIER_MAP | IOMMU_NOTIFIER_UNMAP) +struct IOMMUNotifier; +typedef void (*IOMMUNotify)(struct IOMMUNotifier *notifier, + IOMMUTLBEntry *data); + struct IOMMUNotifier { - void (*notify)(struct IOMMUNotifier *notifier, IOMMUTLBEntry *data); + IOMMUNotify notify; IOMMUNotifierFlag notifier_flags; + /* Notify for address space range start <= addr <= end */ + hwaddr start; + hwaddr end; QLIST_ENTRY(IOMMUNotifier) node; }; typedef struct IOMMUNotifier IOMMUNotifier; +static inline void iommu_notifier_init(IOMMUNotifier *n, IOMMUNotify fn, + IOMMUNotifierFlag flags, + hwaddr start, hwaddr end) +{ + n->notify = fn; + n->notifier_flags = flags; + n->start = start; + n->end = end; +} + /* New-style MMIO accessors can indicate that the transaction failed. * A zero (MEMTX_OK) response means success; anything else is a failure * of some kind. The memory subsystem will bitwise-OR together results @@ -174,6 +193,8 @@ struct MemoryRegionIOMMUOps { void (*notify_flag_changed)(MemoryRegion *iommu, IOMMUNotifierFlag old_flags, IOMMUNotifierFlag new_flags); + /* Set this up to provide customized IOMMU replay function */ + void (*replay)(MemoryRegion *iommu, IOMMUNotifier *notifier); }; typedef struct CoalescedMemoryRange CoalescedMemoryRange; @@ -222,6 +243,9 @@ struct MemoryRegion { IOMMUNotifierFlag iommu_notify_flags; }; +#define IOMMU_NOTIFIER_FOREACH(n, mr) \ + QLIST_FOREACH((n), &(mr)->iommu_notify, node) + /** * MemoryListener: callbacks structure for updates to the physical memory map * @@ -668,6 +692,21 @@ void memory_region_notify_iommu(MemoryRegion *mr, IOMMUTLBEntry entry); /** + * memory_region_notify_one: notify a change in an IOMMU translation + * entry to a single notifier + * + * This works just like memory_region_notify_iommu(), but it only + * notifies a specific notifier, not all of them. + * + * @notifier: the notifier to be notified + * @entry: the new entry in the IOMMU translation table. The entry + * replaces all old entries for the same virtual I/O address range. + * Deleted entries have .@perm == 0. + */ +void memory_region_notify_one(IOMMUNotifier *notifier, + IOMMUTLBEntry *entry); + +/** * memory_region_register_iommu_notifier: register a notifier for changes to * IOMMU translation entries. * @@ -693,6 +732,14 @@ void memory_region_iommu_replay(MemoryRegion *mr, IOMMUNotifier *n, bool is_write); /** + * memory_region_iommu_replay_all: replay existing IOMMU translations + * to all the notifiers registered. + * + * @mr: the memory region to observe + */ +void memory_region_iommu_replay_all(MemoryRegion *mr); + +/** * memory_region_unregister_iommu_notifier: unregister a notifier for * changes to IOMMU translation entries. * @@ -871,6 +918,53 @@ void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr, */ bool memory_region_test_and_clear_dirty(MemoryRegion *mr, hwaddr addr, hwaddr size, unsigned client); + +/** + * memory_region_snapshot_and_clear_dirty: Get a snapshot of the dirty + * bitmap and clear it. + * + * Creates a snapshot of the dirty bitmap, clears the dirty bitmap and + * returns the snapshot. The snapshot can then be used to query dirty + * status, using memory_region_snapshot_get_dirty. Unlike + * memory_region_test_and_clear_dirty this allows to query the same + * page multiple times, which is especially useful for display updates + * where the scanlines often are not page aligned. + * + * The dirty bitmap region which gets copyed into the snapshot (and + * cleared afterwards) can be larger than requested. The boundaries + * are rounded up/down so complete bitmap longs (covering 64 pages on + * 64bit hosts) can be copied over into the bitmap snapshot. Which + * isn't a problem for display updates as the extra pages are outside + * the visible area, and in case the visible area changes a full + * display redraw is due anyway. Should other use cases for this + * function emerge we might have to revisit this implementation + * detail. + * + * Use g_free to release DirtyBitmapSnapshot. + * + * @mr: the memory region being queried. + * @addr: the address (relative to the start of the region) being queried. + * @size: the size of the range being queried. + * @client: the user of the logging information; typically %DIRTY_MEMORY_VGA. + */ +DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr, + hwaddr addr, + hwaddr size, + unsigned client); + +/** + * memory_region_snapshot_get_dirty: Check whether a range of bytes is dirty + * in the specified dirty bitmap snapshot. + * + * @mr: the memory region being queried. + * @snap: the dirty bitmap snapshot + * @addr: the address (relative to the start of the region) being queried. + * @size: the size of the range being queried. + */ +bool memory_region_snapshot_get_dirty(MemoryRegion *mr, + DirtyBitmapSnapshot *snap, + hwaddr addr, hwaddr size); + /** * memory_region_sync_dirty_bitmap: Synchronize a region's dirty bitmap with * any external TLBs (e.g. kvm) @@ -1426,13 +1520,11 @@ void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val); struct MemoryRegionCache { hwaddr xlat; - void *ptr; hwaddr len; - MemoryRegion *mr; - bool is_write; + AddressSpace *as; }; -#define MEMORY_REGION_CACHE_INVALID ((MemoryRegionCache) { .mr = NULL }) +#define MEMORY_REGION_CACHE_INVALID ((MemoryRegionCache) { .as = NULL }) /* address_space_cache_init: prepare for repeated access to a physical * memory region @@ -1688,7 +1780,7 @@ address_space_read_cached(MemoryRegionCache *cache, hwaddr addr, void *buf, int len) { assert(addr < cache->len && len <= cache->len - addr); - memcpy(buf, cache->ptr + addr, len); + address_space_read(cache->as, cache->xlat + addr, MEMTXATTRS_UNSPECIFIED, buf, len); } /** @@ -1704,7 +1796,7 @@ address_space_write_cached(MemoryRegionCache *cache, hwaddr addr, void *buf, int len) { assert(addr < cache->len && len <= cache->len - addr); - memcpy(cache->ptr + addr, buf, len); + address_space_write(cache->as, cache->xlat + addr, MEMTXATTRS_UNSPECIFIED, buf, len); } #endif diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h index 29647303b0..dbe2f08d47 100644 --- a/include/exec/ram_addr.h +++ b/include/exec/ram_addr.h @@ -53,7 +53,7 @@ static inline void *ramblock_ptr(RAMBlock *block, ram_addr_t offset) } long qemu_getrampagesize(void); -ram_addr_t last_ram_offset(void); +unsigned long last_ram_page(void); RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr, bool share, const char *mem_path, Error **errp); @@ -343,6 +343,13 @@ bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start, ram_addr_t length, unsigned client); +DirtyBitmapSnapshot *cpu_physical_memory_snapshot_and_clear_dirty + (ram_addr_t start, ram_addr_t length, unsigned client); + +bool cpu_physical_memory_snapshot_get_dirty(DirtyBitmapSnapshot *snap, + ram_addr_t start, + ram_addr_t length); + static inline void cpu_physical_memory_clear_dirty_range(ram_addr_t start, ram_addr_t length) { @@ -354,11 +361,13 @@ static inline void cpu_physical_memory_clear_dirty_range(ram_addr_t start, static inline uint64_t cpu_physical_memory_sync_dirty_bitmap(unsigned long *dest, + RAMBlock *rb, ram_addr_t start, ram_addr_t length, - int64_t *real_dirty_pages) + uint64_t *real_dirty_pages) { ram_addr_t addr; + start = rb->offset + start; unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS); uint64_t num_dirty = 0; @@ -411,7 +420,5 @@ uint64_t cpu_physical_memory_sync_dirty_bitmap(unsigned long *dest, return num_dirty; } - -void migration_bitmap_extend(ram_addr_t old, ram_addr_t new); #endif #endif |