aboutsummaryrefslogtreecommitdiff
path: root/memory.c
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2016-09-28 23:02:56 +0100
committerPeter Maydell <peter.maydell@linaro.org>2016-09-28 23:02:56 +0100
commitc640f2849ee8775fe1bbd7a2772610aa77816f9f (patch)
tree023903f354052da654a366a42f40fb717c28673d /memory.c
parentbc63afaf5f6d906ff56608b52d575ac8dbb09062 (diff)
parent6d0ceb80ffe18ad4b28aab7356f440636c0be7be (diff)
Merge remote-tracking branch 'remotes/bonzini/tags/for-upstream' into staging
* thread-safe tb_flush (Fred, Alex, Sergey, me, Richard, Emilio,... :-) * license clarification for compiler.h (Felipe) * glib cflags improvement (Marc-André) * checkpatch silencing (Paolo) * SMRAM migration fix (Paolo) * Replay improvements (Pavel) * IOMMU notifier improvements (Peter) * IOAPIC now defaults to version 0x20 (Peter) # gpg: Signature made Tue 27 Sep 2016 10:57:40 BST # gpg: using RSA key 0xBFFBD25F78C7AE83 # gpg: Good signature from "Paolo Bonzini <bonzini@gnu.org>" # gpg: aka "Paolo Bonzini <pbonzini@redhat.com>" # Primary key fingerprint: 46F5 9FBD 57D6 12E7 BFD4 E2F7 7E15 100C CD36 69B1 # Subkey fingerprint: F133 3857 4B66 2389 866C 7682 BFFB D25F 78C7 AE83 * remotes/bonzini/tags/for-upstream: (28 commits) replay: allow replay stopping and restarting replay: vmstate for replay module replay: move internal data to the structure cpus-common: lock-free fast path for cpu_exec_start/end tcg: Make tb_flush() thread safe cpus-common: Introduce async_safe_run_on_cpu() cpus-common: simplify locking for start_exclusive/end_exclusive cpus-common: remove redundant call to exclusive_idle() cpus-common: always defer async_run_on_cpu work items docs: include formal model for TCG exclusive sections cpus-common: move exclusive work infrastructure from linux-user cpus-common: fix uninitialized variable use in run_on_cpu cpus-common: move CPU work item management to common code cpus-common: move CPU list management to common code linux-user: Add qemu_cpu_is_self() and qemu_cpu_kick() linux-user: Use QemuMutex and QemuCond cpus: Rename flush_queued_work() cpus: Move common code out of {async_, }run_on_cpu() cpus: pass CPUState to run_on_cpu helpers build-sys: put glib_cflags in QEMU_CFLAGS ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'memory.c')
-rw-r--r--memory.c106
1 files changed, 79 insertions, 27 deletions
diff --git a/memory.c b/memory.c
index 1a1baf574c..58f92693e2 100644
--- a/memory.c
+++ b/memory.c
@@ -158,14 +158,10 @@ static bool memory_listener_match(MemoryListener *listener,
/* No need to ref/unref .mr, the FlatRange keeps it alive. */
#define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback, _args...) \
- MEMORY_LISTENER_CALL(callback, dir, (&(MemoryRegionSection) { \
- .mr = (fr)->mr, \
- .address_space = (as), \
- .offset_within_region = (fr)->offset_in_region, \
- .size = (fr)->addr.size, \
- .offset_within_address_space = int128_get64((fr)->addr.start), \
- .readonly = (fr)->readonly, \
- }), ##_args)
+ do { \
+ MemoryRegionSection mrs = section_from_flat_range(fr, as); \
+ MEMORY_LISTENER_CALL(callback, dir, &mrs, ##_args); \
+ } while(0)
struct CoalescedMemoryRange {
AddrRange addr;
@@ -245,6 +241,19 @@ typedef struct AddressSpaceOps AddressSpaceOps;
#define FOR_EACH_FLAT_RANGE(var, view) \
for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
+static inline MemoryRegionSection
+section_from_flat_range(FlatRange *fr, AddressSpace *as)
+{
+ return (MemoryRegionSection) {
+ .mr = fr->mr,
+ .address_space = as,
+ .offset_within_region = fr->offset_in_region,
+ .size = fr->addr.size,
+ .offset_within_address_space = int128_get64(fr->addr.start),
+ .readonly = fr->readonly,
+ };
+}
+
static bool flatrange_equal(FlatRange *a, FlatRange *b)
{
return a->mr == b->mr
@@ -1413,7 +1422,8 @@ void memory_region_init_iommu(MemoryRegion *mr,
memory_region_init(mr, owner, name, size);
mr->iommu_ops = ops,
mr->terminates = true; /* then re-forwards */
- notifier_list_init(&mr->iommu_notify);
+ QLIST_INIT(&mr->iommu_notify);
+ mr->iommu_notify_flags = IOMMU_NOTIFIER_NONE;
}
static void memory_region_finalize(Object *obj)
@@ -1508,13 +1518,31 @@ bool memory_region_is_logging(MemoryRegion *mr, uint8_t client)
return memory_region_get_dirty_log_mask(mr) & (1 << client);
}
-void memory_region_register_iommu_notifier(MemoryRegion *mr, Notifier *n)
+static void memory_region_update_iommu_notify_flags(MemoryRegion *mr)
{
- if (mr->iommu_ops->notify_started &&
- QLIST_EMPTY(&mr->iommu_notify.notifiers)) {
- mr->iommu_ops->notify_started(mr);
+ IOMMUNotifierFlag flags = IOMMU_NOTIFIER_NONE;
+ IOMMUNotifier *iommu_notifier;
+
+ QLIST_FOREACH(iommu_notifier, &mr->iommu_notify, node) {
+ flags |= iommu_notifier->notifier_flags;
+ }
+
+ if (flags != mr->iommu_notify_flags &&
+ mr->iommu_ops->notify_flag_changed) {
+ mr->iommu_ops->notify_flag_changed(mr, mr->iommu_notify_flags,
+ flags);
}
- notifier_list_add(&mr->iommu_notify, n);
+
+ mr->iommu_notify_flags = flags;
+}
+
+void memory_region_register_iommu_notifier(MemoryRegion *mr,
+ IOMMUNotifier *n)
+{
+ /* We need to register for at least one bitfield */
+ assert(n->notifier_flags != IOMMU_NOTIFIER_NONE);
+ QLIST_INSERT_HEAD(&mr->iommu_notify, n, node);
+ memory_region_update_iommu_notify_flags(mr);
}
uint64_t memory_region_iommu_get_min_page_size(MemoryRegion *mr)
@@ -1526,7 +1554,8 @@ uint64_t memory_region_iommu_get_min_page_size(MemoryRegion *mr)
return TARGET_PAGE_SIZE;
}
-void memory_region_iommu_replay(MemoryRegion *mr, Notifier *n, bool is_write)
+void memory_region_iommu_replay(MemoryRegion *mr, IOMMUNotifier *n,
+ bool is_write)
{
hwaddr addr, granularity;
IOMMUTLBEntry iotlb;
@@ -1547,20 +1576,32 @@ void memory_region_iommu_replay(MemoryRegion *mr, Notifier *n, bool is_write)
}
}
-void memory_region_unregister_iommu_notifier(MemoryRegion *mr, Notifier *n)
+void memory_region_unregister_iommu_notifier(MemoryRegion *mr,
+ IOMMUNotifier *n)
{
- notifier_remove(n);
- if (mr->iommu_ops->notify_stopped &&
- QLIST_EMPTY(&mr->iommu_notify.notifiers)) {
- mr->iommu_ops->notify_stopped(mr);
- }
+ QLIST_REMOVE(n, node);
+ memory_region_update_iommu_notify_flags(mr);
}
void memory_region_notify_iommu(MemoryRegion *mr,
IOMMUTLBEntry entry)
{
+ IOMMUNotifier *iommu_notifier;
+ IOMMUNotifierFlag request_flags;
+
assert(memory_region_is_iommu(mr));
- notifier_list_notify(&mr->iommu_notify, &entry);
+
+ if (entry.perm & IOMMU_RW) {
+ request_flags = IOMMU_NOTIFIER_MAP;
+ } else {
+ request_flags = IOMMU_NOTIFIER_UNMAP;
+ }
+
+ QLIST_FOREACH(iommu_notifier, &mr->iommu_notify, node) {
+ if (iommu_notifier->notifier_flags & request_flags) {
+ iommu_notifier->notify(iommu_notifier, &entry);
+ }
+ }
}
void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client)
@@ -2124,16 +2165,27 @@ bool memory_region_present(MemoryRegion *container, hwaddr addr)
return mr && mr != container;
}
-void address_space_sync_dirty_bitmap(AddressSpace *as)
+void memory_global_dirty_log_sync(void)
{
+ MemoryListener *listener;
+ AddressSpace *as;
FlatView *view;
FlatRange *fr;
- view = address_space_get_flatview(as);
- FOR_EACH_FLAT_RANGE(fr, view) {
- MEMORY_LISTENER_UPDATE_REGION(fr, as, Forward, log_sync);
+ QTAILQ_FOREACH(listener, &memory_listeners, link) {
+ if (!listener->log_sync) {
+ continue;
+ }
+ /* Global listeners are being phased out. */
+ assert(listener->address_space_filter);
+ as = listener->address_space_filter;
+ view = address_space_get_flatview(as);
+ FOR_EACH_FLAT_RANGE(fr, view) {
+ MemoryRegionSection mrs = section_from_flat_range(fr, as);
+ listener->log_sync(listener, &mrs);
+ }
+ flatview_unref(view);
}
- flatview_unref(view);
}
void memory_global_dirty_log_start(void)